�����    �   huggingface�{"info": {"features": {"id": {"dtype": "string", "_type": "Value"}, "text": {"dtype": "string", "_type": "Value"}, "dataset_id": {"dtype": "string", "_type": "Value"}}}}p4���� dataset_id��������text���� id����   l�   �� ���x&x&� 6��8��8����Ϗ@���� !'-38>DIOU[`flqw}���������������������� %+17<BHMRX]chntz���������������������� %+17=BHNTY_ejpv{���������������������� $*06<BHNTZ`djpv|���������������������� "(.4:@FKQV\aflrw}���������������������� !'-39?CINTZ`flrx}���������������������� "(.49?EKPUZ`flrw}���������������������� &,28>DJPVZ`flrx~���������������������� #)/5;AGLRX^djpv{����������������������     % + 0 6 < B H N T Z _ e k q w } � � � � � � � � � � � � � � � � � � � � �     " ( . 4 : @ F L R X ^ d j p v { � � � � � � � � � � � � � � � � � � � � � �      & , 2 8 > D J P V \ b h n t z � � � � � � � � � � � � � � � � � � � � � �      $ * 0 6 < A E J O U [ a g m s y  � � � � � � � � � � � � � � � � � � � � �     $ * 0 5 ; A G M S Y _ d j p u { � � � � � � � � � � � � � � � � � � � � � �  "(.39?EJOU[aflrx~��������������������� $)/5;AGMSY_ekqw}���������������������� #).4:@FLRX^diou{���������������������� $*/5;AGMSY_ekqw|����������������������� !'-39?EKQW]ciou{���������������������� &,28>CINTY_ekpu{���������������������� &,28>DJPV\bglrx~����������������������  &,28>DIOU[agmrx~���������������������� !'-38>DJPV\bhnsy~��������������82798327226596530878441283656809639784057365931667790578195793251700363181863822266625090779749758624662023078655179582253434432123141781104136494013494692081302170704258694488036404694366417227393745740422689704231162210652040768871928093636709725920361158066004451796437201325442328676327975231647599054354733488546467865587858699148615423657678733982777942050645133265463409530470257019352977267039614908590724151133290421553466656452154359443352675326818452312916023751567649294840419145493324312120432110984962505652481951657144314547803340382915075255999830770045151688599489826355534459939740672380325406658703714658115152581538587164109065663421381208450364652132607296233322109133169912560746476624452402592136223275128924019984894598497275874340406423556022051926841012053280992433692529056084611260852788362380724401946733851699255057361373049373219962124402170334750031834765166484850816031675944995557231773641883563822714530598110665616232817117911925683555118037626458374723651416763344053327230219583443698929044635924918788210169120751666423635940286030156833660971576910216197782994205763861223171073721330113872431010877218242618436218729653849067410011526288237413591395100549307028559102294537319233291434395083799885840004120924662272710223982356543166645724400536768318606635047154448383106338303257783500724384602412481680348880459472328563640081435070114336200902343797383146985594870673261336245114919255529961301530818354627717334251560756317841515342666027123209225620742549521991031589651245465361172292956285353468391038793037082738492253375189216464602916693515536821549437165244012400124248727813950736394132133165240891876138927621405588516257788171954500745159955399728662061305906577117234628686554794944416813617603202274379157472417646082466402342249160614521332114635214414633583355494840208733333229605830185952326965720559721548467342486622951923082969525698056025135736062421983886322183907687431245578536158904850292035062594732197434343722505465644131946611921873595413305571335734552854370955270685333495126504280942029016543561181701401376472055882906261556418734412184088167998363771721646921459510033654124466866053705657562129359207556540948554725465237580323216212105319582025278360267863919676223922009164071994414796509722587611702426637143578175751735749043392610318259563841711126864262946605352374344588630892040273540752530071245069663964646042117356457790508636391888311448195480657124389707418569399684583321582346435211636912381284123677216523657045836486502935873284276162024931874863984655390395491425930165122004771316244119216655118505786111405165387163140445527399641523486458190645966639123216403158232110751432744373815148135085901312786211844358222788514799602213095329656578276157646027525705538650448488500533345652297795633882185542460734256703315662033401549996328566009035788671542364092846211872961724022362942922142493710474519835966784241226659446049155762526437693717586199666228566572264797101103046052225290824389454646249356396218342709622841407429337712175872404915975805036166444695870893024621281774005581270233623015468161536051105723448547918562803168292846820931441981448465811671836722637965831785782445331106456602719402285523761773091773438795706042428223741244424483114571662129267433927373862178450393670470515591461336322860995629612959286278422247133907301166202140702616551253571387604793914827762461754886523402752115454861964562329138472245196741898652139726048315682994368203000173193313829335021195220416430646524145810011284475495486324832489064770215705341018133854583336185356791529911638414238012582833774375679462704336860635724975229244207385978502482563933401856793353405113395695083230650300015232191578511305831357855495345837875247553523442627954940223939635028963423286682713569194614054961833983216340160851853400543129918873629552155420649811639556330817438897454483084638374302295004503813255501540011183260395360254015494286123680262422494966365066605987130251426041576843161576183166253881544116632697812981574659403900556114741494413145359580627474842550537253826702951868112276541221812029414215525757806619724214338626897284904585006514195651437598748630725224882432601215854391476300893621272723062387116232061875441876830103631663629410251741363499739823719409226448679419230531020237064588920615998351342239277626872430339276123332724316180112676622267569782171540483776637797725404682573090573206095868242668956045322269916543525377721830194814123682785797316069744631241634683864220818753510456660427847985658485157080433076462105456148129663427334387544643413335611424531444305045486664536154880946576927443214725740127823285143726844035419026313891256365626118939231547721462893852631050081945551263500344814162252658317148465355114456216219568198143130354279402232174924243308763016273642733840563514291778855049226243884001301505903179154052125165055375530883810035052654828222250087144698656468112538983991203456837292819186153521448123760731629258315555851658565428254661145637741021195489885779301747041039875055703664205508445981412787233958029004233518498460192654224415307475654492364221282951709903957452232526470795246461639811813476595051688903764594504843053191056803993634834213173882639327083394625476230134551353263256002317887143739355509598866261646899412614086015585836058982651612313279213454453334414963602041191352338556682174496944786763554009672536778132800500370309727224585228332191921114240142343527185127513046982044584776395839906404124199717771729233858951864366442897750969150554239370656489711447519659123084233479166511963735395328553293465719262098883914366502147099661425856848458994397144815907582416835654403648543860992152383312917498957741258041338212958307335709151160841657322145239950299612721166068134144034032293392572435643664544948312097453750656917337830110901171455494563F  �/�E�L�t ֣���@�R�����Fy1C�W��Fq�5������5�<Q�|�V� �@�~ �"O�VG�FQzo��b����.r9�u�����>+�ɸ �* ? (F "b �s �� �� � �v Ʉ � �� � >� 9� � �d �� *� R% P( �. U; �U �� E� �� �� N� q� # �5 �U &a q u h� �� �� k,65�{����� ��0�9U_8c�e���D�e*�����=����(�Q�jt��/@U�m�#������V�X�l�}��'��JcWma�iq�t7�7�������b����� �A�B�v�q��p�����{E�J��A�L��/t5����} Z�IJjRp��M���`����I�,�FMz ���.6w��P������h��d����*�5ÜR�M�O���>� ����I���$�o�v�0�C�����V݋��k����������J |Y u_ � '� X� �� �� d� L� fZ!�v!�}!Q�!|�!�!,"�2"�"5�"��"��"��"c#� #�#�c#�q#9�(�)�3);F)�y)��)��)��) �)��).�)� *�*�*�*�.*4*�?*�B*�}*,�*o�*�*�*��*{�*��*� +�+F-+[?+OC+�E+k+��+��+ �+�,� ,�,��,��,��,��,��, -�E-N�-��-�-H .� .�.8.�i.��.��.��.$/�-/C/"H/[/{^/k/'�/d�/�/�/�"04Y0az00�0� 2� 2X2L�2��2��2%�2��2��2��2v 3�#3�)3 73�3��3�3�3�4�4�=4�S4�t4݉4��4A�4L�43�4��4<�4�A5�O5�e5��5��56s 6�&646�R6�b6�6��6U7W27�B7]�7�8k8�18�F8*W8�\8me8<�8��8<�8q�8V�8F9`9�c9ܑ9�9�9��9G:�-:�F:�Q:Cu:��:~�:�:|;�,;8E;�R;�*<�/<�^<ʲ<�<5�<;�<�=4$=p0=�4=G8=�9=��=��=��=��=t�=P�=�1>�`>��>�>?�e?zk?%�?�@v"@�6@&�@��@ A�[A�^A`iA��A��A��A&�Ax�Ah�A<�AJB�JB�PB)�BD�B�C�C��C�C��C �C�C��CЍD|wE{�EגE+�E}�E��E�FUFU)F�7Fw�FE�F_�F0�FyG2G�KG�^G��G#�G��G0Hm^H�iH��H7�Ha�Hs�HjI��I �I+Jd&J8J�TJ9�J��J��JE�J��J��Jx�JK� KE3K5SK>WK _K�K��K�AL�QLW�L��L3�L�L�M�M�.M6M?DM7YMteM�rM��M�M�M�M��M5N2fNc�N;�N��NL�NOO�+OcOntO �O��O�HP2�PӿP�Pn�P/Q:�Q�Qm�Q<�Q��Q�Q1RR#+R�=RJRvR �RY�R�R�ES{_S�pS�yS��S��SZ�S�T�IT�XT �T�TG�T~U�&U�DUW�U��UJ�U��Uw�U�AVMPV�V�V,�Vr�Va�V2�V�W� W�WxGW��W��W��W�XbBX0cX�X#�Xe�Xn�X}�X�XY�X�Y� Y"Y4+YuCY�EY�^Y��Y�YX�Y.Z%4Z�AZ eZx�Z��Z#�ZP[k[��[��[!�[�[Z!\R(\#+\D\bF\�N\U�\/�\��\j�\��\�_�_'�_��_M`�`�`�`�%`�@`)J`g~`�`��`�>a�Qa�[a��a{�a�b�DbIb�ObSSb�]b�lb�ob�b��b�bU�b��b�+c!6c�cc��cK�c��c#�c�c��cQ�c4�cXd�3dDNd�odK�d��d��d�d��d\1eIe.Re�]e߄e�e޿eG�e0�e!�e�f�!f�;f�[f�fc�f׽f��f%g�5g�Tg��g9�g��gg�gg�gU�g<h�Bh�\hJth�he�h@�h�h��h�h��hi�iYiI!i�5i��iz�i�i5�iʴi��i<�i��i� jj".jP2jOjv^j�uj�wj�|js�j[�j��j �j��j=�j k�3k�wk|k��k^�k�k� lx5l�Hlȧl¯l�lo�l��lx�lpm�7mJMm Vm^m��m8nqn[unÓn �n�ni�n��n�%o�7o{jo�}o��o��oh�o?pa@p�hp�p�p3�pCqq;-q<6q8q`Yq�tqÄq,�q��q��qK;r�Vr~rʋru�ro�rٻr��r�s�s�s+s8sC=s�s6�s��sttH(t�;t2ft߯tW�t��t�u�!uCBu]Mu�xu��u4�u��u �u[�uv�v->viev��v��v��v�w$Iw��w$x�7xjxfx�x��x��xo�x��xFy�ylybzn+z�z�z�&{ S{Vg|O�|f�|��|y�|�|�} �}|�}'�}m�}�}�m������Xك������t��7� U�:��qф�Ԅ0Մ���� ��+�`4��S�w^�nh�[���΅[T�h��r��u�4|�匆F���ӆU�U ����-�=��V�G{�Ӈ���_��š���Ȭ�Yɇ`�F%�*1��6��B��F��e�5���>�� �*A�jg����_̉/K��j�耊V�����kÊ�1��9��2�5ϋQ������(�J/�[6��E��N��`�偌䑌��Ρ�t�� ������$�M�{z��}�֗����׭�-�^�`��������[ێl������Computer Vision Nanodegree Project: Image Captioning---The Microsoft **C**ommon **O**bjects in **CO**ntext (MS COCO) dataset is a large-scale dataset for scene understanding. The dataset is commonly used to train and benchmark object detection, segmentation, and captioning algorithms.![Sample Dog Output](images/coco-examples.jpg)You can read more about the dataset on the [website](http://cocodataset.org/home) or in the [research paper](https://arxiv.org/pdf/1405.0312.pdf).In this notebook, you will explore this dataset, in preparation for the project. Step 1: Initialize the COCO APIWe begin by initializing the [COCO API](https://github.com/cocodataset/cocoapi) that you will use to obtain the data.import os from pycocotools.coco import COCO # Initialize COCO API for instance annotations. dataDir = os.path.join('..', '..', 'Data', 'cocoapi') dataType = 'val2014' instances_annFile = os.path.join(dataDir, 'annotations', 'instances_{}.json'.format(dataType)) coco = COCO(instances_annFile) # Initialize COCO API for caption annotations. captions_annFile = os.path.join(dataDir, 'annotations', 'captions_{}.json'.format(dataType)) coco_caps = COCO(captions_annFile) # Get image ids. ids = list(coco.anns.keys())loading annotations into memory... Done (t=3.29s) creating index... index created! loading annotations into memory... Done (t=0.25s) creating index... index created!Step 2: Plot a Sample ImageNext, we plot a random image from the dataset, along with its five corresponding captions. Each time you run the code cell below, a different image is selected.In the project, you will use this dataset to train your own model to generate captions from images!import numpy as np import skimage.io as io import matplotlib.pyplot as plt %matplotlib inline # Pick a random image and obtain the corresponding URL. ann_id = np.random.choice(ids) img_id = coco.anns[ann_id]['image_id'] img = coco.loadImgs(img_id)[0] url = img['coco_url'] # Print URL and visualize corresponding image. print(url) I = io.imread(url) plt.axis('off') plt.imshow(I) plt.show() # Load and display captions. annIds = coco_caps.getAnnIds(imgIds=img['id']) anns = coco_caps.loadAnns(annIds) coco_caps.showAnns(anns)http://images.cocodataset.org/val2014/COCO_val2014_000000151978.jpgReaction featuresfeatures_reactions = pd.DataFrame(index=cc.index) features_reactions['n_up'] = cc['Actions.Agree.Total'] features_reactions['n_down'] = cc['Actions.Disagree.Total'] features_reactions['n_reply'] = cc['Actions.Comment.Total'] sns.pairplot(features_reactions)Post date featuresfeatures_date = pd.DataFrame(index=cc.index) features_date['t_week'] = cc.Timestamp.dt.week features_date['t_dow'] = cc.Timestamp.dt.dayofweek features_date['t_hour'] = cc.Timestamp.dt.hour features_date['t_day'] = cc.Timestamp.dt.day sns.pairplot(features_date)Spacy NLP ...import spacy # See "Installing spaCy" nlp = spacy.load('en') # You are here. spacy_docs = pd.DataFrame(index=cc.index) docs = cc.Body.apply(nlp) vec = docs.apply(lambda x: x.vector) feature_word_vec = pd.DataFrame(vec.tolist(), columns=['spacy_%s'%i for i in range(300)]) feature_word_vec['spacy_sent'] = docs.apply(lambda x: x.sentiment) # tfidf ''' Author: Python: 3.6.0 Date: 24/6/2017 ''' import pandas as pd import numpy as np from nltk.corpus import stopwords from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.cross_validation import train_test_split from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import cross_val_score from scipy.sparse import csr_matrix text = cc['Body'] # create a stopset (words that occur to many times) stopset = set(stopwords.words('english')) vectorizer = TfidfVectorizer(use_idf=True, lowercase=True, strip_accents='ascii', stop_words=stopset) features_tfidf = pd.DataFrame(vectorizer.fit_transform(text).toarray())Targettargets = pd.read_csv('./btc-ind.csv') targets['date'] = pd.to_datetime(targets['Date']) targets = targets.set_index('date') del targets['Date'] targets.tail() join_by_date = pd.DataFrame(index=cc.index) join_by_date['date'] = cc.Timestamp.dt.round(freq="d") Y_all = join_by_date.join(targets, on='date').dropna() groups = Y_all['date'] del Y_all['date'] cols = Y_all.columns index = Y_all.index #Y_all = pd.DataFrame(normalize(Y_all, axis=1, norm='l2'), columns=cols, index=index) Y_all = Y_all - Y_all.mean() Y_all = Y_all/Y_all.std() #Y_all.plot()Combine features#features = pd.concat([features_date, features_tfidf, features_reactions, feature_word_vec], axis=1) features = pd.concat([features_date, features_reactions, feature_word_vec], axis=1) X_all = features.ix[Y_all.index] X_all.shape from sklearn.model_selection import cross_val_score from sklearn.ensemble import RandomForestRegressor from sklearn.preprocessing import normalize from xgboost.sklearn import XGBRegressor from sklearn.linear_model import LinearRegression, Lasso rf = RandomForestRegressor(n_estimators=10, max_depth=3, criterion='mse') xgb = XGBRegressor(n_estimators=10) regressors = [rf, Lasso()] target_scores = {} for indicator in targets.columns: Y =Y_all[indicator] for reg in regressors: tag = indicator+':'+str(reg)[:15] scores = cross_val_score(reg, X_all, Y, cv=4, groups=groups, scoring='neg_mean_squared_error') print np.mean(scores), tag target_scores[tag] = scores cv_score = pd.DataFrame(target_scores) ms = cv_score.mean(axis=0) ms.sort_values(ascending=False) indicator = 'BTC_cbrt_dv_T1:Lasso(alpha=1.0' indicator = indicator.split(":")[0] Y = Y_all[indicator] reg = XGBRegressor(n_estimators=100) reg.fit(X_all, Y) Y_t = reg.predict(X_all) error = abs(Y - Y_t) error.hist() # DROP THE BULL$HIT itruth = error < error.quantile(0.3) X = X_all[itruth] Y = Y_all[indicator][itruth] G = groups[itruth] reg = XGBRegressor(n_estimators=100, max_depth=8) scores = cross_val_score(reg, X, Y, cv=4, groups=G, scoring='neg_mean_squared_error') print sorted(scores) ax = groups.hist(figsize=(12,5)) G.hist(ax=ax) reg = XGBRegressor(n_estimators=100, max_depth=8) reg.fit(X,Y) Y_ = reg.predict(X) truth_df = pd.DataFrame({'date': G, 'Y': Y_}) def get_stats(group): return {'min': group.min(), 'max': group.max(), 'count': group.count(), 'mean': group.mean()} ax = targets.BTC_cbrt_dv_T1.plot() truth.plot(ax=ax) truth def drop_bs(indicator, q=0.3): Y = Y_all[indicator] reg = XGBRegressor(n_estimators=100) reg.fit(X_all, Y) Y_t = reg.predict(X_all) error = abs(Y - Y_t) error.hist() itruth = error < error.quantile(q) X = X_all[itruth] Y = Y_all[indicator][itruth] G = groups[itruth] reg = XGBRegressor(n_estimators=30, max_depth=5) scores = cross_val_score(reg, X, Y, cv=4, groups=G, scoring='neg_mean_squared_error') print sorted(scores) print "MEAN CV SCORE: ", np.mean(scores) reg = XGBRegressor(n_estimators=100, max_depth=8) reg.fit(X,Y) Y_ = reg.predict(X) agg = pd.Series(Y_).groupby(G) truthscore = agg.mean() impact_count = agg.count() truth_max = agg.max() return pd.DataFrame(dict(truthscore=truthscore, impact_count=impact_count, truth_max=truth_max, date=truthscore.index)) dv = drop_bs('BTC_cbrt_dv_T1', 0.4) import json def to_json(df, path): a = [] for i,d in list(df.iterrows()): d = d.to_dict() d['date'] = str(d['date']) a.append(d) with open(path, 'w') as f: json.dump(a, f) to_json(dv, '../bitcoin-daily-bars/out-truth-volume.json') impactfull = cc.ix[itruth.index][itruth] impactfull.head() f = 'Cryptopian.Name' a = impactfull.groupby(f).size() b = cc.groupby(f).size() c = pd.DataFrame(dict(a=a,b=b)) c = c[c.a>1] c['impact'] = c.a/c.b c.sort_values('impact', ascending=False) dv.truthscore.plot() target_scEllipsoidal nested samplingThis example demonstrates how to use ellipsoidal nested rejection sampling [1] to sample from the posterior distribution for a logistic model fitted to model-simulated data.[1] "A nested sampling algorithm for cosmological model selection", , and , [arXiv:astro-ph/0508461v2](https://arxiv.org/abs/astro-ph/0508461). First create fake data.import pints import pints.toy as toy import numpy as np import matplotlib.pyplot as plt # Load a forward model model = toy.LogisticModel() # Create some toy data r = 0.015 k = 500 real_parameters = [r, k] times = np.linspace(0, 1000, 100) signal_values = model.simulate(real_parameters, times) # Add independent Gaussian noise sigma = 10 observed_values = signal_values + pints.noise.independent(sigma, signal_values.shape) # Plot plt.plot(times,signal_values,label = 'signal') plt.plot(times,observed_values,label = 'observed') plt.xlabel('Time') plt.ylabel('Values') plt.legend() plt.show()Create the nested sampler that will be used to sample from the posterior.# Create an object with links to the model and time series problem = pints.SingleOutputProblem(model, times, observed_values) # Create a log-likelihood function (adds an extra parameter!) log_likelihood = pints.GaussianLogLikelihood(problem) # Create a uniform prior over both the parameters and the new noise variable log_prior = pints.UniformLogPrior( [0.01, 400, sigma * 0.5], [0.02, 600, sigma * 1.5]) # Create a nested ellipsoidal rejectection sampler sampler = pints.NestedController(log_likelihood, log_prior, method=pints.NestedEllipsoidSampler) # Set number of iterations sampler.set_iterations(8000) # Set the number of posterior samples to generate sampler.set_n_posterior_samples(1600) # Do proposals in parallel sampler.set_parallel(True) # Use dynamic enlargement factor sampler._sampler.set_dynamic_enlargement_factor(1)Run the sampler!samples = sampler.run() print('Done!')Running Nested ellipsoidal sampler Number of active points: 400 Total number of iterations: 8000 Total number of posterior samples: 1600 Iter. Eval. Time m:s Delta_log(z) Acceptance rate 0 1 0:05.6 -inf 1 0 2 0:05.6 -inf 1 0 21 0:05.6 -inf 1 0 41 0:05.7 -inf 1 0 61 0:05.7 -inf 1 0 81 0:05.8 -inf 1 0 101 0:05.8 -inf 1 0 121 0:05.8 -inf 1 0 141 0:05.9 -inf 1 0 161 0:05.9 -inf 1 0 181 0:06.0 -inf 1 0 201 0:06.0 -inf 1 0 221 0:06.1 -inf 1 0 241 0:06.1 -inf 1 0 261 0:06.2 -inf 1 0 281 0:06.2 -inf 1 0 301 [...]Plot posterior samples versus true parameter values (dashed lines)# Plot output import pints.plot pints.plot.histogram([samples], ref_parameters=[r, k, sigma]) plt.show()Plot posterior predictive simulations versus the observed datapints.plot.series(samples[:100], problem) plt.show()Marginal likelihood estimateprint('marginal log-likelihood = ' + str(sampler.marginal_log_likelihood()) + ' ± ' + str(sampler.marginal_log_likelihood_standard_deviation()))marginal log-likelihood = -375.348895962891 ± 0.07908966826933428Effective sample sizeprint('effective sample size = ' + str(sampler.effective_sample_size()))effective sample size = 1597.4248390822784Parameter analysis: kernel shapeWe analyse how the parameters $\tau_{{\sf half}}$ and $\sigma_{{\sf gauss}}$ affect the shape of the kernel $F = G \ast E$.We use the original processing code and empirically obtain impulse responses to illustrate the shape of the kernelsimport numpy as np import matplotlib as mpl import matplotlib.pyplot as plt from matplotlib import cm %matplotlib inline import pandas as pd mpl.rcParams['savefig.dpi'] = 150 # for print, go to 600 def compute_sig_smooth_diff_exp(signal, sigma_gauss, tau_half): """ Smooth the signal with gaussian kernel, form the discrete differential, and convolve it with an exponential kernel. sigma_gauss - width of the gaussian kernel tau_half - half-life of the exponential kernel """ windowsize = int(10 * sigma_gauss) # 5 sigma windowsize sig_smooth = pd.Series(signal).rolling(windowsize, win_type='gaussian').mean(std=sigma_gauss) sig_diff = np.diff(sig_smooth) sig_diff_ewma = pd.Series(sig_diff).ewm(halflife=tau_half, adjust=False, ignore_na=True).mean() return sig_diff_ewma def compute_sig_smooth_exp(signal, sigma_gauss, tau_half): """ Smooth the signal with gaussian kernel, and convolve it with an exponential kernel (no differential formed). sigma_gauss - width of the gaussian kernel tau_half - half-life of the exponential kernel """ windowsize = int(10 * sigma_gauss) # 5 sigma windowsize sig_smooth = pd.Series(signal).rolling(windowsize, win_type='gaussian').mean(std=sigma_gauss) sig_ewma = pd.ewma(sig_smooth, halflife=tau_half, adjust=False, ignore_na=True) return sig_ewma #construct a signal of length 2000, equivalent to 20 seconds at a sampling rate of 100 Hz as in the real data. sigma_gauss = 30 # 0.3 s tau_half = 40 # 0.4 s siglength = 2000 # 20 s signal = np.zeros(siglength) signal[500] = 1. # impulse at 5.0 sImpulse response and spectral analysis of the kernelshls = np.arange(5.,51.,20.) std = 25. colors = cm.gray(np.linspace(0,1,4)) f = plt.figure(figsize=(8,5)) gs = mpl.gridspec.GridSpec(2,2, hspace=0.6, wspace=0.4) axa = f.add_subplot(gs[0,0]) axb = f.add_subplot(gs[0,1]) for i,h in enumerate(hls): kernel_gde = compute_sig_smooth_diff_exp(signal, sigma_gauss=std, tau_half=h) kernel_gde[np.isnan(kernel_gde)] = 0. # replace nans with 0 kernel_gde_norm = kernel_gde / np.nanmax(kernel_gde) xaxis = np.arange(len(kernel_gde_norm))/100. axa.plot(xaxis, kernel_gde_norm, color=colors[i], label="{:.2f}".format(h/100.)) spec = np.fft.fft(kernel_gde_norm) xaxis_spec = np.fft.fftfreq(len(kernel_gde_norm), 0.01) axb.semilogx(xaxis_spec[:int(siglength/2.-1)], np.abs(spec)[:int(siglength/2.-1)], color=colors[i], label="{:.2f}".format(h/100.)) axa.set_frame_on(False) axa.xaxis.set_ticks_position('bottom') axa.yaxis.set_ticks_position('left') axa.set_xlim(5,9) axa.set_ylim(-1.05, 1.05) axa.set_xlabel('time [s]') axa.set_title(r"$\sigma_{{\sf smooth}}$= {:.2f} s".format(std/100.)) axa.legend(frameon=False, title=r"$\tau_{{\sf half}}$ [s]", fontsize=8, loc=(0.8, 0.7)) axa.spines['left'].set_position(('outward',6)) axa.spines['bottom'].set_position(('outward',5)) f.text(0.07, 0.93, "A)", weight='bold', fontsize=12) axb.set_xlim(0,4.) axb.set_xlabel('f [Hz]') axb.set_ylabel("rel. power [a.u]") miny = axb.get_yticks()[0] maxy = axb.get_yticks()[-1] axb.set_yticks([miny, maxy/2, maxy]) axb.set_frame_on(False) axb.xaxis.set_ticks_position('bottom') axb.yaxis.set_ticks_position('left') axb.legend(frameon=False, title=r"$\tau_{{\sf half}}$ [s]", fontsize=8, loc=(0.8, 0.7)) axb.spines['left'].set_position(('outward',6)) axb.spines['bottom'].set_position(('outward',5)) f.text(0.07, 0.45, "C)", weight='bold', fontsize=12) stds = np.arange(5.,51.,20.) hl = 25. axc = f.add_subplot(gs[1,0]) axd = f.add_subplot(gs[1,1]) for i,s in enumerate(stds): kernel_gde = compute_sig_smooth_diff_exp(signal, sigma_gauss=s, tau_half=hl) kernel_gde[np.isnan(kernel_gde)] = 0. # replace nans with 0 kernel_gde_norm = kernel_gde / np.nanmax(kernel_gde) xaxis = np.arange(len(kernel_gde_norm))/100. axc.plot(xaxis, kernel_gde_norm, color=colors[i], label="{:.2f}".format(s/100.)) spec = np.fft.fft(kernel_gde_norm) xaxis_spec = np.fft.fftfreq(len(kernel_gde_norm), 0.01) axd.semilogx(xaxis_spec[:int(siglength/2.-1)], np.abs(spec)[:int(siglength/2.-1)], color=colors[i], label="{:.2f}".format(s/100.)) axc.set_frame_on(False) axc.xaxis.set_ticks_position('bottom') axc.yaxis.set_ticks_position('left') axc.set_xlim(5,9) axc.set_ylim(-1.05, 1.05) axc.set_xlabel('time [s]') axc.set_title(r"$\tau_{{\sf half}}$= {:.2f} s".format(hl/100.)) axc.legend(frameon=False, title=r"$\sigma_{{\sf smooth}}$ [s]", fontsize=8, loc=(0.8, 0.7)) axc.spines['left'].set_position(('outward',6)) axc.spines['bottom'].set_position(('outward',5)) f.text(0.50, 0.93, "B)", weight='bold', fontsize=12) axd.set_xlim(0,4.) axd.set_xlabel('f [Hz]') axd.set_ylabel("rel. power [a.u]") maxy = axd.get_yticks()[-1] axd.set_yticks([miny, maxy/2, maxy]) axd.set_frame_on(False) axd.xaxis.set_ticks_position('bottom') axd.yaxis.set_ticks_position('left') axd.set_title(r" ") axd.legend(frameon=False, title=r"$\sigma_{{\sf smooth}}$ [s]", fontsize=8, loc=(0.8, 0.7)) axd.spines['left'].set_position(('outward',6)) axd.spines['bottom'].set_position(('outward',5)) f.text(0.50, 0.45, "D)", weight='bold', fontsize=12) f.savefig("Figures/Fig. 11 - Kernel shapes.png", dpi=600)scRFE Tutorial# visit: https://github.com/czbiohub/scRFE/blob/master/scripts/scRFE.ipynb # for how the scRFE function is builtTranscription Factor Analysisfrom scRFE.scRFE import scRFE #import the scRFE function from the scRFE .py file from the scRFE directory from anndata import read_h5ad #to read in Anndata object import pandas as pd #to read in csv list of TFs # read in AnnData/.h5ad file adata = read_h5ad('/data/madeline/src3/01_figure_1/tabula-muris-senis-droplet-processed-official-annotations.h5ad') # read in GO mouse transcription factor list mouse_tfs = pd.read_csv("/home/angela/maca/GO_term_summary_20171110_222852.csv") # to see what possibilities exist for classOfInterest, the value to split # observations by, run set(adata.obs) # subset the anndata object to only consider features that exist in the GO mouse TF list adataTF = adata[:,adata.var_names[adata.var_names.isin(mouse_tfs['Symbol'])]] # call scRFE with adataTF as your AnnData object, splitting observations # by cell_ontology_class, or cell type TFtopFeaturesDF, TFscore = scRFE(adata = adataTF, classOfInterest = 'cell_ontology_class')Aging Analysisfrom scRFE.scRFE import scRFE #import the scRFE function from the scRFE .py file from the scRFE directory from anndata import read_h5ad #to read in Anndata object import pandas as pd #to read in csv list of TFs # read in AnnData/.h5ad file adata = read_h5ad('/data/madeline/src3/01_figure_1/tabula-muris-senis-droplet-processed-official-annotations.h5ad') # call scRFE with adata and splitting obs by age topFeaturesDF, score = scRFE(adata = adata, classOfInterest = 'age')basic spatial subset - compute the model only in this domainbox_model = dict(latitude=slice(40, 28), longitude=slice(-95, -85)) glofas = glofas.sel(box_model) era5 = era5.sel(box_model) dis = glofas['dis'] z_glofas = static['z'].isel(time=0)/9.81 # converting to m approx. z_glofas = z_glofas.interp(latitude=glofas.latitude, longitude=glofas.longitude) z_glofas = z_glofas.drop('time') # time is misleading as the topography does not change tp = (era5['cp']+era5['lsp'])*1000 tp.name = 'total precip [mm]' tp = tp.interp(latitude=glofas.latitude, longitude=glofas.longitude)Preprocessing routines - Summarydef add_shifted_predictors(ds, shifts, variables='all'): """Adds additional variables to an array which are shifted in time. Parameters ---------- ds : xr.Dataset shifts : list of integers variables : str or list """ if variables == 'all': variables = ds.data_vars for var in variables: for i in shifts: if i == 0: continue # makes no sense to shift by zero newvar = var+'-'+str(i) ds[newvar] = ds[var].shift(time=i) return ds def correlate(da_3d, da_timeseries, timelag=False): a = da_3d - da_3d.mean('time') b = da_timeseries - da_timeseries.mean('time') N = len(b.coords['time']) if timelag: b = b.drop('time') a = a.drop('time') out = b.dot(a)/a.std('time')/b.std()/N out.name = 'correlation coefficient' return out def select_river(dis): river = dis.min('time') > 5 river.name = 'river mask [0/1]' return river def select_upstream_river(dis_box, dis_point, z_box, z_point, rivermask, pct): lags = [-1, 1] timelag_corrs = np.full((len(lags), len(dis_box.latitude), len(dis_box.longitude)), np.nan) for t, lag in enumerate(lags): if lag > 0: # dis_box with data from previous timesteps cntr = dis_point[lag:] dis_box_shift = dis_box[:-lag] elif lag < 0: # dis_box with data from future timesteps cntr = dis_point[:lag] dis_box_shift = dis_box[-lag:] dis_box_relevant = dis_box_shift.where(rivermask==1) timelag_corrs[t,:,:] = correlate(dis_box_relevant, cntr, timelag=True) lag_influencing = timelag_corrs[1,:,:]>timelag_corrs[0,:,:] #plt.imshow(lag_influencing) influencer = (dis_box.mean('time') > pct*dis_point.mean('time')) \ &(z_box > z_point) \ &(rivermask==1) & lag_influencing influencer.name = 'gridpoints influencing discharge [0/1]' #influencer.plot() return influencer shifts = range(1,4) X_dis = add_shifted_predictors(glofas, shifts, variables='all') X_dis = X_dis.drop('dis') # we actually want to predict (t) with (t-1, t-2, t-3) y_dis = glofas['dis'] i, j = 70, 38 di = 20 dj = 20 pct = 0.1 # influencing gridpoint must have mean discharge more than this percentage i0, i1 = i-di, i+di j0, j1 = j-dj, j+dj tp_box = tp[:, i0:i1, j0:j1] few_precip = tp_box.mean(['longitude', 'latitude']) < 0.1 print('percentage:',sum(few_precip.astype(int))/few_precip.size) # .plot() #.plot() #'#' print(few_precip) fig, ax = plt.subplots(figsize=(25,5)) few_precip.astype(int).to_pandas().plot(ax=ax) tp_box.where(few_precip).isel(time=0).plot() dis = dis.load() disf = dis.where(few_precip) dispre-selection of upstream river gridpointsdis_point = dis[:,i,j] dis_box = dis[:, i0:i1, j0:j1] z_point = z_glofas[i,j] z_box = z_glofas[i0:i1,j0:j1] rivermask = select_river(dis_box) upstream = select_upstream_river(dis_box, dis_point, z_box, z_point, rivermask, pct) upstream.plot() print(upstream.sum()) array(26)Parameter-tuning for reasonable performancedef preprocess_reshape(X_dis, y_dis, upstream, i, j): X_dis = X_dis.where(upstream) X_dis = X_dis.to_array(dim='time_feature') X_dis = X_dis.stack(features=['latitude', 'longitude', 'time_feature']) Xar = X_dis.dropna('features', how='all') yar = y_dis[:,i,j] yar = yar.drop(['latitude', 'longitude']) yar.coords['features'] = 'dis' Xy = xr.concat([Xar, yar], dim='features') Xyt = Xy.dropna('time', how='any') # drop them as we cannot train on nan values time = Xyt.time Xda = Xyt[:,:-1] yda = Xyt[:,-1] return Xda, yda, time Xda, yda, time = preprocess_reshape(X_dis, y_dis, upstream, i,j) Xdafor p in range(Xda.shape[1]): print(p, float(Xda[:,p].mean()))X_dis.where(upstream) #.plot() Xda = Xda.values yda = yda.values[:, np.newaxis] print('yda.shape:', Xda.shape, 'yda.shape:', yda.shape) class KerasDenseNN(object): def __init__(self, **kwargs): model = keras.models.Sequential() self.cfg = kwargs model.add(keras.layers.BatchNormalization()) model.add(keras.layers.Dense(8, kernel_initializer='normal', bias_initializer='zeros', activation='relu')) #('sigmoid')) #model.add(Dropout(self.cfg.get('dropout'))) #model.add(keras.layers.Dense(32)) #model.add(keras.layers.Activation('sigmoid')) #model.add(Dropout(self.cfg.get('dropout'))) #model.add(keras.layers.Dense(16)) #model.add(keras.layers.Activation('sigmoid')) #model.add(Dropout(self.cfg.get('dropout'))) #model.add(keras.layers.Dense(8)) #model.add(keras.layers.Activation('sigmoid')) #model.add(Dropout(self.cfg.get('dropout'))) model.add(keras.layers.Dense(1, activation='linear')) # bias_initializer=keras.initializers.Constant(value=9000))) #ha = self.cfg.get('hidden_activation') #for N_nodes in self.cfg.get('N_hidden_nodes'): # # model.add(hidden) # model.add(ha.copy()) # # if self.cfg.get('dropout'): # model.add(Dropout(self.cfg.get('dropout')))# #outputlayer = keras.layers.Dense(1, activation='linear') #optimizer_name, options_dict = self.cfg.get('optimizer') #optimizer = getattr(keras.optimizers, optimizer_name)(**options_dict) #optimizer = keras.optimizers.SGD(lr=0.01) rmsprop = keras.optimizers.RMSprop(lr=.1) sgd = keras.optimizers.SGD(lr=0.1, decay=1e-6, momentum=0.5, nesterov=True) model.compile(loss=self.cfg.get('loss'), optimizer=rmsprop) self.model = model self.callbacks = [keras.callbacks.EarlyStopping(monitor='loss', min_delta=1, patience=100, verbose=0, mode='auto', baseline=None, restore_best_weights=True),] def predict(self, X): return self.model.predict(X).squeeze() def fit(self, X, y, **kwargs): return self.model.fit(X, y, epochs=self.cfg.get('epochs', None), batch_size=self.cfg.get('batch_size', None), callbacks=self.callbacks, verbose=1, **kwargs) mlp_kws = dict(optimizer=('sgd', dict(lr=1)), loss='mean_squared_error', #N_hidden_nodes=(4,4), #hidden_activation=keras.layers.Activation('sigmoid'), #keras.layers.ReLU(), #-LeakyReLU(alpha=0.3), #'relu', #output_activation='linear', #bias_initializer='random_uniform', batch_size=128, dropout=0., #.25, epochs=1000, ) linear_kws = dict(C=.1, n_jobs=-1, max_iter=10000, verbose=True) if False: pipe = Pipeline([('scaler', StandardScaler()), ('pca', PCA(n_components=4)), ('model', LinearRegression(**linear_kws)),], verbose=True) if True: pipe = Pipeline([#('scaler', StandardScaler()), #('pca', PCA(n_components=2)), ('model', KerasDenseNN(**mlp_kws)),], verbose=False) pipe type(Xda) history = pipe.fit(Xda, yda) keras.utils.print_summary(pipe.named_steps['model'].model) h = history.named_steps['model'].model.history # Plot training & validation loss values plt.plot(h.history['loss']) #plt.plot(h.history['val_loss']) plt.title('Model loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') plt.gca().set_yscale('log') plt.show()Test it on the same datadef add_time(vector, time, name=None): """Converts arrays to xarrays with a time coordinate.""" return xr.DataArray(vector, dims=('time'), coords={'time': time}, name=name) with ProgressBar(): ytest = pipe.predict(Xda) ytest.shape ytest = add_time(ytest, time, name='dis-forecast') ytest_dis = ytest #.cumsum('time') ytest_dis.values #ytest_dis += y[0] # initial state + changes = timeseries of forecasted discharge fig, ax = plt.subplots(figsize=(24,5)) #for p in range(Xda.shape[1]): #print(p) #print(Xda[:,p]) minpred = add_time(Xda.min(axis=1), time) maxpred = add_time(Xda.max(axis=1), time) minpred.plot(ax=ax, linestyle='--', label='predictor-min') maxpred.plot(ax=ax, linestyle='--', label='predictor-max') obs = dis[:,i,j].to_pandas() fcst = ytest_dis.to_pandas() obs.plot(ax=ax, label='dis-reanalysis') fcst.plot(ax=ax, label='dis-forecast') plt.legend() #plt.gca().set_xlim(dt.datetime(1981,1,1), dt.datetime(1982,1,1)) ((fcst-obs)/obs*100).plot() y = add_time(yda.squeeze(), time) ((maxpred-y)/y*100).plot() s = pipe.named_steps['model'].model.to_json() import json with open('flowmodel.json', 'w') as f: json.dump(s, f, indent=4)CIFAR 10%matplotlib inline %reload_ext autoreload %autoreload 2 from fastai.conv_learner import * PATH = Path("data/cifar10/") # PATH = Path("/home/ubuntu/data/cifar10/") os.makedirs(PATH,exist_ok=True) from torchvision import transforms, datasets torch.cuda.set_device(0) classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') stats = (np.array([ 0.4914 , 0.48216, 0.44653]), np.array([ 0.24703, 0.24349, 0.26159])) bs=256 num_workers = num_cpus() num_workers = 16 traindir = str(PATH/'train') valdir = str(PATH/'test') tfms = [transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))] train_dataset = datasets.ImageFolder( traindir, transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), ] + tfms)) train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=bs, shuffle=True, num_workers=num_workers, pin_memory=True) val_dataset = datasets.ImageFolder(valdir, transforms.Compose(tfms)) val_loader = torch.utils.data.DataLoader( val_dataset, batch_size=bs*2, shuffle=False, num_workers=num_workers, pin_memory=True) data = ModelData(PATH, train_loader, val_loader) data.sz=32 from models.wideresnet import WideResNet m = WideResNet(3, 3, num_classes=10, k=6, drop_p=0.) m = nn.DataParallel(m, [0,1,2,3]) lr = 1.3 learn = ConvLearner.from_model_data(m, data) learn.crit = nn.CrossEntropyLoss() learn.metrics = [accuracy] wd=1e-4 # DP: m = WideResNet(depth=22, num_classes=10, widen_factor=6, dropRate=0.) learn.fit(lr/10, 1, wds=wd, cycle_len=1, use_clr_beta=(100, 1, 0.9, 0.8)) %time learn.fit(lr, 1, wds=wd, cycle_len=30, use_clr_beta=(20, 20, 0.95, 0.85)) # DP: m = WideResNet(depth=22, num_classes=10, widen_factor=6, dropRate=0.) learn.fit(lr/10, 1, wds=wd, cycle_len=1, use_clr_beta=(100, 1, 0.9, 0.8)) %time learn.fit(lr, 1, wds=wd, cycle_len=30, use_clr_beta=(20, 20, 0.95, 0.85)) learn.fit(lr/10, 1, wds=wd, cycle_len=1, use_clr_beta=(100, 1, 0.9, 0.8)) %time learn.fit(lr, 1, wds=wd, cycle_len=40, use_clr_beta=(10, 15, 0.95, 0.85)) learn.fit(lr/10, 1, wds=wd, cycle_len=1, use_clr_beta=(100, 1, 0.9, 0.8)) %time learn.fit(1., 1, wds=wd, cycle_len=30, use_clr_beta=(10, 25, 0.95, 0.85)) learn.fit(lr/10, 1, wds=wd, cycle_len=1, use_clr_beta=(100, 1, 0.9, 0.8)) %time learn.fit(1., 1, wds=wd, cycle_len=30, use_clr_beta=(10, 25, 0.95, 0.85)) %time learn.fit(lr, 1, wds=wd, cycle_len=40, use_clr_beta=(100, 15, 0.95, 0.85)) # darknet 2222 lr 1.3 65 cl %time learn.fit(lr, 1, wds=wd, cycle_len=65, use_clr_beta=(30, 20, 0.95, 0.85))LegendsThere are different types of legends we can use in our visualization. These legends depend on the geometry of the layer and on the type of the data it is being represented. Legend TypesWhen we set the legend, we have to indicate the type of the legend. Current types are:* Color * `color-category` * `color-bins` * `color-continuous`* Size * `size-category` * `size-bins` * `size-continuous`By default, each of the previous types detects the geometry of the layer. However, we set explicitely the geometry, which is very useful when having several legens to identify the each one type.* Color Category * [color-category-point](Color-Category-Point) * [color-category-line](Color-Category-Line) * [color-category-polygon](Color-Category-Polygon)* Color Bins * [color-bins-point](Color-Bins-Point) * [color-bins-line](Color-Bins-Line) * [color-bins-polygon](Color-Bins-Polygon)* Color Continuous * [color-continuous-point](Color-Continuous-Point) * [color-continuous-line](Color-Continuous-Line) * [color-continuous-polygon](Color-Continuous-Polygon)* Size Category * [size-category-point](Size-Category-Point) * [size-category-line](Size-Category-Line)* Size Bins * [size-bins-point](Size-Bins-Point) * [size-bins-line](Size-Bins-Line)* Size Continuous * [size-continuous-point](Size-Continuous-Point) * [size-continuous-line](Size-Continuous-Line) Classified by propertyAs we can see in the previous list of legend types, there're two types of legend regarding style: **color** and **size**. * **Color** types represent color style properties: `color` and `strokeColor`.* **Size** types represent size style properties: `width` and `strokeWidth`. Classified by data typeEach color and size legend legend types have three options: **category**, **bins**, and **continuous**.* **Category** legends: for use with **categorical** data.* **Bins** legends: for use with **numeric** data.* **Continuous** legends: for use with **numeric** data.from cartoframes.auth import set_default_credentials from cartoframes.viz import Map, Layer set_default_credentials('cartovl')Default LegendMap( Layer('populated_places'), default_legend=True )Legend InfoMap( Layer( 'populated_places', legend={ 'title': 'Populated Places', 'description': 'This is a common dataset.', 'footer': 'Source: CARTO' } ) )Color Legend Color Category Legend Color Category PointBy default, the style property represented in the legend is the `color`. But we may want to represent the `strokeColor` property instead, in which case we should specify it by using the 'prop' key in the Legend:Map( Layer( 'populated_places', 'color: ramp(top($adm0name, 5), bold)', legend={ 'type': 'color-category-point' } ) ) Map( Layer( 'populated_places', ''' color: transparent width: 10 strokeWidth: 2 strokeColor: ramp(top($adm0name, 5), bold) ''', legend={ 'type': 'color-category-point', 'prop': 'strokeColor' } ) )Color Category LineMap( Layer( 'sfcta_congestion_roads', 'color: ramp(top($direction, 5), bold)', legend={ 'type': 'color-category-line' } ) )Color Category PolygonMap( Layer( 'sf_neighborhoods', 'color: ramp(top($name, 5), bold)', legend={ 'type': 'color-category-polygon' } ) )Color Bins LegendFor *points* and *lines*, these legends are equivalent to `color-category-point` and `color-category-line` respectively. Color Bins PointMap( Layer( 'populated_places', 'color: ramp(globalQuantiles($scalerank, 5), purpor)', legend={ 'type': 'color-bins-point' } ) )Color Bins LineMap( Layer( 'sfcta_congestion_roads', 'color: ramp(globalEqIntervals($auto_speed, 5), purpor)', legend={ 'type': 'color-bins-line' } ) )Color Bins PolygonMap( Layer( 'sf_neighborhoods', 'color: ramp(globalQuantiles($cartodb_id, 5), purpor)', legend={ 'type': 'color-bins-polygon', #'color-bins-polygon' } ) )Color Continuous LegendFor *points* and *lines*, these legends are also equivalent to `color-category-point` and `color-category-line` respectively. Color Continuous PointMap( Layer( 'populated_places', 'color: ramp(linear($scalerank), purpor)', legend={ 'type': 'color-continuous-point' } ) )Color Continuous LineMap( Layer( 'sfcta_congestion_roads', ''' color: ramp(linear($auto_speed, 1, 20), bold) ''', legend={ 'type': 'color-continuous-line' } ) )Color Continuous PolygonMap( Layer( 'sf_neighborhoods', 'color: ramp(linear($cartodb_id), sunset)', legend={ 'type': 'color-continuous-polygon', 'prop': 'color' } ) )Size Legend Size Category Legend Size Category PointMap( Layer( 'populated_places', ''' width: ramp(buckets($adm_0_cap_name, ["Capital", "City"]), [25, 5]) strokeWidth: 1 strokeColor: white ''', legend={ 'type': 'size-category-point' } ) )Size Category LineMap( Layer( 'ne_50m_rivers_lake_centerlines', 'width: ramp(buckets($featurecla, ["Lake Centerline", "River"]), [4, 1])', legend={ 'type': 'size-category-line' } ) )Size Bins Legend Size Bins PointMap( Layer( 'county_points_with_population', ''' width: ramp(globalEqIntervals($estimate_total,5), [2, 40]) color: opacity(turquoise, 0.8) strokeWidth: 0.5 strokeColor: opacity(blue,0.2) ''', legend={ 'type': 'size-bins-point' } ) )Size Bins LineMap( Layer( 'sfcta_congestion_roads', ''' width: ramp(globalEqIntervals($auto_speed, 5), [1, 10]) color: opacity(turquoise, 0.8) strokeWidth: 0.5 strokeColor: opacity(blue,0.2) ''', legend={ 'type': 'size-bins-line' } ) )Size Continuous Legend Size Continuous PointMap( Layer( 'county_points_with_population', ''' width: ramp(linear($estimate_total), [1, 80]) color: opacity(turquoise, 0.8) strokeWidth: 0.5 strokeColor: opacity(blue,0.4) order: asc(width()) ''', legend={ 'type': 'size-continuous-point', 'title':'Population by County' } ) )Size Continuous LineMap( Layer( 'sfcta_congestion_roads', ''' width: ramp(linear($auto_speed), [1, 10]) color: opacity(turquoise, 0.8) strokeWidth: 0.5 strokeColor: opacity(blue,0.2) ''', legend={ 'type': 'size-continuous-line' } ) )Multiple layersFor multiple layers visualization, the legends are placed from top to bottom.Map([ Layer( 'sf_neighborhoods', 'color: ramp(linear($cartodb_id), sunsetdark)', legend={ 'type': 'color-continuous-polygon', 'prop': 'color' } ), Layer( 'sfcta_congestion_roads', ''' width: ramp(globalEqIntervals($auto_speed, 5), [1, 10]) color: opacity(turquoise, 0.8) strokeWidth: 0.5 strokeColor: opacity(blue,0.2) ''', legend={ 'type': 'size-bins-line' } ) ]) Map([ Layer( 'maximum_heat_index', 'color: ramp(linear($value), purpor)', legend={ 'type': 'color-continuous-point', 'title': 'Heat Index Values' } ), Layer( 'county_points_with_population', ''' width: ramp(globalEqIntervals($estimate_total,5), [2, 40]) color: opacity(turquoise, 0.8) strokeWidth: 0.5 strokeColor: opacity(blue,0.2) ''', legend={ 'type': 'size-continuous-point', 'title': 'Population by Counti' } ) ])Use py-earth to smoothen data and peakutils to find peaksmatx_filename = 'threegaussian_broadpeak.txt' datanm, datatime, dataz_matx = loaddata(matx_filename) ## get the peak position dataframe of true data set true_df, smooth_df = earth_peak_matrix(datanm, dataz_matx, 0.1, 0, 10) from syntheticdata_threegaussian_noshift_broadpeak import gaussian, monoexp, data_matrix def make_data_matrix(nm_array, time_coeff_array, spectrum): """ generates a two-way data matrix based on a known spectrum and the spectrum's decay Args: nm_array: wavelength array time_coeff_array: an array that describes the decay spectrum: an array of the spectrum Returns: data_matrix: a matrix that contains the spectrum at each time """ data_matrix = np.empty((np.shape(nm_array)[0], np.shape(time_coeff_array)[0])) for i in range(np.shape(time_coeff_array)[0]): data_matrix[:, i] = time_coeff_array[i] * spectrum return data_matrix #create time array time = np.arange(0, 5000, 1) #create wavelength array nm = np.arange(850, 1600, 1) a1 = 1 #center and FWHM of the gaussian x0_1 = 950 sigma_1 = 30 #life-time of the gaussian tau1 = 10 #create a second gaussian a2 = 0.3 x0_2 = 1300 sigma_2 = 100 tau2 = 5000 #create gaussian #3 a3 = 0.2 x0_3 = 1100 sigma_3 = 600 tau3 = 5000 #generate a gaussian model species_1 = gaussian(nm, a1, x0_1, sigma_1) #generate an array of time-coefficients #describing a mono-exponential decay with a given lifetime time_coeff_1 = monoexp(time, tau1) #generate a data matrix that contains a gaussian model at each #time and decays mono-exponentially data_matrix_1 = make_data_matrix(nm, time_coeff_1, species_1) #generate a second data matrix that contains a gaussian model #at each time and decays mono-exponentially species_2 = gaussian(nm, a2, x0_2, sigma_2) time_coeff_2 = monoexp(time, tau2) data_matrix_2 = make_data_matrix(nm, time_coeff_2, species_2) #generate the third data matrix that contains a gaussian model #at each time and decays mono-exponentially species_3 = gaussian(nm, a3, x0_3, sigma_3) time_coeff_3 = monoexp(time, tau3) data_matrix_3 = make_data_matrix(nm, time_coeff_3, species_3) #generate a two-gaussian mixture model by adding #the two gaussians above data_matrix_sum = data_matrix_1 + data_matrix_2 + data_matrix_3 def plot_timeslice(nm, time, select_times, data): select_times_idx = [find_nearest(time, x) for x in select_times] num_times = len(select_times_idx) data_timeslice = np.empty((len(nm), num_times)) plt.figure() plt.xlabel('Wavelength (nm)') plt.ylabel('Intensity (a.u.)') for i in range(num_times): data_timeslice_i = data[:, select_times_idx[i]] data_timeslice[:, i] = data_timeslice_i plt.plot(nm, data_timeslice_i, label = select_times[i]) plt.legend() return data_timeslice def find_nearest(array,value): idx = (np.abs(array-value)).argmin() return idx def get_timeslice(nm, time, select_time, data): select_times_idx = find_nearest(time, select_time) data_timeslice = data[:, select_times_idx] data_timeslice_norm = data_timeslice / np.max(data_timeslice) return data_timeslice_normPlot time-slices#select time points to plot times = [1, 10, 100, 1000, 3000, 5000] #plot 1d time-slice timeslice_matx = plot_timeslice(datanm, datatime, times, data_matrix_sum)Plot normalized individual spectrumtime = 1000 timeslice_matx_1 = get_timeslice(datanm, datatime, time, data_matrix_1) timeslice_matx_2 = get_timeslice(datanm, datatime, time, data_matrix_2) timeslice_matx_3 = get_timeslice(datanm, datatime, time, data_matrix_3) timeslice_matx_sum = get_timeslice(datanm, datatime, time, data_matrix_sum) plt.figure() plt.plot(datanm, timeslice_matx_1) plt.plot(datanm, timeslice_matx_2) plt.plot(datanm, timeslice_matx_3) plt.plot(datanm, timeslice_matx_sum) plt.legend(('feature 1', 'feature 2', 'feature 3', 'sum'))Peak-finding doesn't work in this scenariotrue_df smooth_dfPower functionnum_basis = 10 x = np.linspace(-1, 1, 100) f_basis = [] for i in range(num_basis): f_basis.append(np.power(x, 1 + i)) f_basis = np.array(f_basis) plot_kernel(f_basis, xd=-0.5)Gaussiannum_basis = 11 f_basis = [] for i in range(num_basis): mu = (-1 + 2. / (num_basis - 1) * i) sigma = 0.2 f_basis.append(np.exp(-np.square((x - mu) / sigma) / 2)) f_basis = np.array(f_basis) plot_kernel(f_basis, xd=0.0)Sigmoidnum_basis = 11 f_basis = [] for i in range(num_basis): mu = (-1 + 2. / (num_basis - 1) * i) sigma = 0.05 f_basis.append(1. / (1 + np.exp(-(x - mu) / sigma))) f_basis = np.array(f_basis) plot_kernel(f_basis, xd=0.0)Complexity AnalysisIn order to express the complexity of an algorithm, we can count the number of machine instructions or basic operations that will execute as a function of the input parameters. Counting the operations can give us the running time $T(n)$. This can be simplfied to give us the complexity, $\mathcal{O}$ or big-Oh, by removing constants from the equation that would have a small impact on the overall complexity for very large values of $n$. 1. Complexity in PythonThe complexity of an algorithm depends on the number of basic operations it contains. In this first exercise you will evaluate the complexity of simple Python functions - instead of algorithms. The idea is the same as algorithm analysis: identify the operations, count them. Take particular care of the loops.Here is an example:def function_a (n) : print("hello") for i in range(n): print("n = ", n, "; i=", i) print (" bye ") function_a(4)hello n = 4 ; i= 0 n = 4 ; i= 1 n = 4 ; i= 2 n = 4 ; i= 3 byeThe program enters n times in the loop \- checking the loop condition takes 1 operation (incrementing *i*). The program does only 1 operation every time it enters the loop \- we have 1 * n operations here. Outside the loop there are 2 simple instructions (one before and one after) which do only 1 operation each.def function_a(n): print("hello") #1 operation for i in range(n): #2 operation (assignment) per loop, N loops print("n = ", n) #1 operation per loop, N operations print("bye") #1 operationThe running time of this algorithm is computed as follows: every operation with a "per loop" gives a factor of n: here 2 operations, so *3n*; every operation without a "per loop" gives you a value without an _n_: here 2. So in total, T(n)=3n + 2 meaning it is $\mathcal{O}$(n).In the cell below we have a _magic_ command. These are notebook and not python commands. %%time will request that the OS times the execution of the cell for us and returns it in the cells output.%%time def function_b(n): print("starting for",n, "loops") #1 operation for i in range(n): #2 operation (assignment) per loop, N loops for j in range(n): #2 operation (assignment) per loop, N loops max=n #1 operation per loop, N operations print("max n=",n) #1 operation a=3000 function_b(a)starting for 3000 loops max n= 3000 CPU times: user 267 ms, sys: 5.83 ms, total: 273 ms Wall time: 271 msWe can run the cell repeatedly, changing the value for a. I have done 7 example tests below and recoded the times. I then used the matplotlib (which intalls by default as part of anaconda) to plot $n$ against $t$.n=[10,100,1000,3000,5000,8000, 10000] t=[444e-6,619e-6,37.3e-3,340e-3,907e-3,2.45,3.72] %matplotlib inline import matplotlib.pyplot as plt plt.plot(n,t) plt.show()Now do the same for the following 5 programs, labelling the operations and running for various values of $n$ and $m$ to validate the big-$\mathcal{O}$ experimentally. Watch out for the loop definitions.def function_1(n): for i in range(11): print(i*n) def function_2(n): s=1 for i in range(n): s=s+i**2 return s def function_3(n): s=1 for i in range(n): for j in range(n): s=s+i*j return s def function_4(n,m): i=1;j=1 opcount=2 while i<=n and j<=m: i=i+1 j=j+1 opcount+=2 return opcount def function_5(n,m): i=1;j=1;s=1 while i<=n: if j<=m: s=s*j j=j+1 else: s=s+i i=i+1 return sRun these programs with different values of the $n$ and $m$ parameters. Pick one and experiment on plotting the times. Does the graph match the big-$\mathcal{O}$ you expected? Automatic Time (Run-time performance) measuringNote that the candidate functions have some patterns in their signature (in other words, signature describe what and how many input the function expects). For instance function 1 to 3 expect only 1 input; 4 and 5 expect 2 numbers. So it is possible to construct a looping program to test all input ranges for all functions, so that we don't have to test manually and record the running time manually.import time def automatic_time(input_array, func_name): # input_array contains all elements of input, # preferably sorted from small to big, # each element could also be a tuple, which is requried # for functions that need more than 1 input # func_name is the name of the function (as object, not str) # this function also suppresses printing output from the {func_name} ret = [] for val in input_array: start = time.time() from IPython.utils import io as iuio with iuio.capture_output() as suppressed: if isinstance(val, tuple): _ = func_name(*val) else: _ = func_name(val) ret.append((time.time() - start)*10e9) # 10e9 converts unit from second to nanosecond return ret my_input_arr = [10,100,1000,3000,5000,8000, 10000] print("testing function_1 with the automatic measure") print("run-time for function 1 result in nanoseconds: \n{}".format(automatic_time(my_input_arr, function_1)))testing function_1 with the automatic measure run-time for function 1 result in nanoseconds: [5800724.029541016, 3230571.746826172, 3318786.62109375, 2918243.408203125, 2870559.6923828125, 2851486.2060546875, 2820491.7907714844]So now we can construct a bigger loop to automate the rest:input_arr = [i for i in range(1000, 100000, 1000)] # input_arr_t = [(i,i) for i in range(1, 1000)] # fun1 = automatic_time(input_arr, function_1) fun2 = automatic_time(input_arr, function_2) # fun3 = automatic_time(input_arr, function_3) # fun4 = automatic_time(input_arr_t, function_4) # fun5 = automatic_time(input_arr_t, function_5) plt.plot(input_arr, fun2) plt.xlabel("input size") plt.ylabel("running time in nanoseconds") plt.show()Data Science Connect nbdev talk> The nbdev project contains our talk on using nbdev in production at Mariner.#hide from nbdev.showdoc import * #export def my_exciting_function(arg): return arg ! nbdev_build_lib ! nbdev_build_docsConverted 00_core.ipynb. Converted index.ipynb. converting: /Users/stephenwelch/dsc_nbdev_talk/00_core.ipynb converting: /Users/stephenwelch/dsc_nbdev_talk/index.ipynb converting /Users/stephenwelch/dsc_nbdev_talk/index.ipynb to README.md说明: 给定正整数数组 A,A[i] 表示第 i 个观光景点的评分,并且两个景点 i 和 j 之间的距离为 j - i。 一对景点(i < j)组成的观光组合的得分为(A[i] + A[j] + i - j):景点的评分之和减去它们两者之间的距离。 返回一对观光景点能取得的最高分。示例: 输入:[8,1,5,2,6] 输出:11 解释:i = 0, j = 2, A[i] + A[j] + i - j = 8 + 5 + 0 - 2 = 11 提示: 1、2 <= A.length <= 50000 2、1 <= A[i] <= 1000class Solution: def maxScoreSightseeingPair(self, A) -> int: max_val = -float('inf') res = [] for i in range(len(A)): for j in range(i+1, len(A)): max_val = max(max_val, A[i] + A[j] + i - j) return max_val # 为了使公式“ A[i]+ i + A [j] -j”最大化,我们可以注意到,对于任何索引“ j”,得分均以之前看到的“ A [i] + i”的最佳值最大化(因为“ A [j] -j“是固定的。 # 结果,我们可以进行一次O(n)循环,仅记住O(1)空间中“A[i] + i”的最大值,并更新我们观察到的记录得分(变量“最佳”) 。 class Solution: def maxScoreSightseeingPair(self, A) -> int: K = A[0] best = -float('inf') for j in range(1, len(A)): x = A[j] best = max(best, K + x - j) # A[i] + i + A[j] - j K = max(K, x + j) # A[j] + j return best class Solution: def maxScoreSightseeingPair(self, A) -> int: arr = [A[0] + A[j] - j for j in range(1, len(A))] # 第 0 个景点 与其他所有景点 的得分 max_dif = 0 ans = max(arr) print(arr, ans) for j in range(1, len(arr)): max_dif = max(max_dif, A[j] + j - A[0]) # A[0] + A[j] + i - j print(A[j], j, A[0], A[j] + j - A[0]) ans = max(ans, arr[j] + max_dif) # arr[j] return ans class Solution: def maxScoreSightseeingPair(self, A) -> int: res = 0 max_so_far = A[0] + 0 for i in range(1, len(A)): res = max(res, max_so_far + A[i] - i) # A[0] + A[i] + i - j max_so_far = max(max_so_far, A[i] + i) return res solution = Solution() solution.maxScoreSightseeingPair([8,1,5,2,6])8 11 7 10Delete Dataimport os from glob import glob[Table of Contents](table-of-contents) 1. [About](about)2. [User Inputs](user-inputs)3. [Delete Files and Folders](delete-files-and-folders) - 3.1 [Delete Raw and Supplementary Datasets](delete-raw-and-supplementary-datasets) 1. About This notebook will delete all data files that were downloaded and used in the data analysis performed in this project.Although we should delete raw and processed data files, since we only have raw data files for this project we will just delete those files here. 2. User Inputspath_to_raw_data_dir = "data/raw"3. Delete Files and Folders We'll now delete the raw extracted CSV files and any data folders that were downloaded and stored in `data/raw`. 3.1. Delete Raw and Supplementary Datasets As mentioned above, since we only have raw data files (no raw data folders or processed data files), we'll only delete those files in this section. Get a list of all the CSV files of bikeshare ridership that were downloadedraw_data_files = glob(f"{path_to_raw_data_dir}/*.csv")Delete all the downloaded CSV filesfor f in raw_data_files: os.remove(f)re-building this hapfreq map without gdal dependenciesimport pandas as pd import seaborn as sns import matplotlib.pyplot as plt import cartopy.crs as ccrs import cartopy.feature as cfeature import numpy as np from matplotlib.lines import Line2D import matplotlib.patches as patches import matplotlib.patheffects as PathEffects %run setup.ipynb %run ../agam-report-base/src/python/ag1k/phase2_ar1.py %matplotlib inlinesetup datadf_meta = pd.read_csv('../data/samples.meta.txt', sep="\t") df_meta.rename(columns={"location":"region"}, inplace=True) df_meta.columns df_haplotypes = pd.read_csv('../data/haplotypes.autosomes.meta.txt', sep='\t') df_haplotypes['region'] = df_haplotypes['region'].str.replace('_', " ") df_haplotypes['region'] = df_haplotypes['region'].str.replace('Gado-Badzere', "Gado Badzere") df_haplotypes['region'] = df_haplotypes['region'].str.replace('Zembe-Borongo', "Zembe Borongo") df_haplotypes.head() # use the network membership to define haplotype groups vgsc_clusters = np.load('../data/median_joining_network_membership.npy').astype('U') clust_dict = {(l if l else 'wt'): set(np.nonzero(vgsc_clusters == l)[0]) for l in np.unique(vgsc_clusters)} clust_dict.keys() # merge the "other resistant" groups clust_dict['OR'] = clust_dict['FX'] | clust_dict['SX'] del clust_dict['FX'] del clust_dict['SX'] clust_dict.keys() hap_labels = sorted(clust_dict) # reorder for aesthetics hap_labels = ( [l for l in hap_labels if l.startswith('F')] + [l for l in hap_labels if l.startswith('S')] + # [l for l in hap_labels if l.startswith('L')] + ['OR', 'wt'] ) hap_labels def make_df_pops(): global df_pops tbl_pops = ( etl .wrap([ ['pop', 'long_label', 'short_label', 'query'], ['AOcol', 'Angola $coluzzii$', 'AO$Ac$', 'population == "AOcol"'], ['GHcol', 'Ghana $coluzzii$', 'GH$Ac$', 'population == "GHcol"'], ['BFcol', 'Burkina Faso $coluzzii$', 'BF$Ac$', 'population == "BFcol"'], ['CIcol', "Côte d'Ivoire $coluzzii$", 'CI$Ac$', 'population == "CIcol"'], ['GNcol', "Guinea $coluzzii$", 'GN$Ac$', 'population == "GNcol"'], ['CMgam', 'Cameroon $gambiae$', 'CM$Ag$', 'population == "CMgam"'], ['CMgam_savanna', 'Cameroon (savanna) $gambiae$', 'CM$Ag$', 'population == "CMgam" and (region == "Gado Badzere" or region == "Zembe Borongo")'], ['CMgam_transition', 'Cameroon (transition) $gambiae$', '', 'population == "CMgam" and region == "Daiguene"'], ['CMgam_forest', 'Cameroon (forest) $gambiae$', '', 'population == "CMgam" and region == "Mayos"'], ['GHgam', 'Ghana $gambiae$', 'GH$Ag$', 'population == "GHgam"'], ['BFgam', 'Burkina Faso $gambiae$', 'BF$Ag$', 'population == "BFgam"'], ['GNgam', 'Guinea $gambiae$', 'GN$Ag$', 'population == "GNgam"'], ['GW', 'Guinea-Bissau', 'GW', 'population == "GW"'], ['GM', 'The Gambia', 'GM', 'population == "GM"'], ['GAgam', 'Gabon $gambiae$', 'GA$Ag$', 'population == "GAgam"'], ['UGgam', 'Uganda $gambiae$', 'UG$Ag$', 'population == "UGgam"'], ['FRgam', 'Mayotte $gambiae$', 'FR$Ag$', 'population == "FRgam"'], ['GQgam', 'Bioko $gambiae$', 'GQ$Ag$', 'population == "GQgam"'], ['KE', 'Kenya', 'KE', 'population == "KE"'], ]) .addfield('latitude', lambda row: df_meta.query(row.query).latitude.mean()) .addfield('longitude', lambda row: df_meta.query(row.query).longitude.mean()) .addfield('n_haps', lambda row: len(df_meta.query(row.query)) * 2) ) df_pops = tbl_pops.todataframe() df_pops = df_pops.set_index('pop') make_df_pops() df_popscalculate hap freqs across populations - produce df for plottingdef compute_hap_freqs(): global df n_pops = len(df_pops) n_haps = len(hap_labels) hap_frequencies = np.zeros([n_pops, n_haps], dtype=int) # then loop through clusters for i, pop in enumerate(df_pops.index): pop_query = df_pops.loc[pop].query pop_hap_ixs = set(df_haplotypes.query(pop_query).index.values) for j, label in enumerate(hap_labels): core_hap_ixs = clust_dict[label] isec = pop_hap_ixs.intersection(core_hap_ixs) hap_frequencies[i, j] = len(isec) counts = df_pops.n_haps counts # make df for plotting df = pd.DataFrame(data=hap_frequencies, index=df_pops.index, columns=hap_labels) df['other'] = counts - df.sum(axis=1).values df['total'] = counts df['lon'] = df_pops['longitude'] df['lat'] = df_pops['latitude'] compute_hap_freqs()collapse GN into one pop as GNcol is only 8 haplotypes and they are the same as GNgamdf.loc['GNgam', 'F1'] = 86 df.loc['GNgam', 'other_resistant'] = 1 df.loc['GNgam', 'wt'] = 1 df.loc['GNgam', 'total'] = 88 df.rename(index={'GNgam':'GN'},inplace=True) df.drop("GNcol", axis=0, inplace=True) dfmap funcrs_lonlat = ccrs.PlateCarree() ratios = np.asarray([0.5, 0.5]) sum(ratios) crs_lonlat = ccrs.PlateCarree() #drop CM transect df.drop("CMgam_savanna", axis=0, inplace=True) df.drop("CMgam_transition", axis=0, inplace=True) df.drop("CMgam_forest", axis=0, inplace=True)offsets for piesdf.reset_index(inplace=True) #make lonlat offsets, label position exceptions df['offset_lon'] = np.zeros(len(df)) df['offset_lat'] = np.zeros(len(df)) df['label'] = 'top' #ghana col df.loc[df['pop'] == 'GHcol', 'offset_lat'] = -3 df.loc[df['pop'] == 'GHcol', 'offset_lon'] = -2.6 #df.loc[df['pop'] == 'GHcol', 'label'] = 'bottom' #ghana gam df.loc[df['pop'] == 'GHgam', 'offset_lat'] = -3 df.loc[df['pop'] == 'GHgam', 'offset_lon'] = 2.6 #df.loc[df['pop'] == 'GHgam', 'label'] = 'bottom' #BFgam df.loc[df['pop'] == 'BFgam', 'offset_lat'] = 3 df.loc[df['pop'] == 'BFgam', 'offset_lon'] = 2.6 df.loc[df['pop'] == 'BFgam', 'label'] = 'bottom' #BFcol df.loc[df['pop'] == 'BFcol', 'offset_lat'] = 3 df.loc[df['pop'] == 'BFcol', 'offset_lon'] = -2.6 df.loc[df['pop'] == 'BFcol', 'label'] = 'bottom' #Uganda df.loc[df['pop'] == 'UGgam', 'offset_lat'] = 3 df.loc[df['pop'] == 'UGgam', 'offset_lon'] = 0 df.loc[df['pop'] == 'UGgam', 'label'] = 'bottom' #Gambia df.loc[df['pop'] == 'GM', 'offset_lat'] = 0 df.loc[df['pop'] == 'GM', 'offset_lon'] = -3 df.loc[df['pop'] == 'GM', 'label'] = 'bottom' #GB df.loc[df['pop'] == 'GW', 'offset_lat'] = -2 df.loc[df['pop'] == 'GW', 'offset_lon'] = -2 #Kenya df.loc[df['pop'] == 'KE', 'offset_lat'] = -1 df.loc[df['pop'] == 'KE', 'offset_lon'] = 2 df.loc[df['pop'] == 'KE', 'label'] = 'bottom' #Cam df.loc[df['pop'] == 'CMgam', 'offset_lat'] = 3 df.loc[df['pop'] == 'CMgam', 'offset_lon'] = 3 df.loc[df['pop'] == 'CMgam', 'label'] = 'bottom' #mayotte df.loc[df['pop'] == 'FRgam', 'offset_lat'] = 2 df.loc[df['pop'] == 'FRgam', 'offset_lon'] = -1 df.loc[df['pop'] == 'FRgam', 'label'] = 'bottom' #Equatorial Guinea df.loc[df['pop'] == 'GQgam', 'offset_lat'] = -1 df.loc[df['pop'] == 'GQgam', 'offset_lon'] = -2 df.loc[df['pop'] == 'GQgam', 'label'] = 'bottom' #Angola df.loc[df['pop'] == 'AOcol', 'offset_lat'] = 0 df.loc[df['pop'] == 'AOcol', 'offset_lon'] = 3 #Gabon # df.loc[df['pop'] == 'Gabon', 'offset_lat'] = -1.5 # df.loc[df['pop'] == 'Gabon', 'offset_lon'] = 3 df.loc[df['pop'] == 'GAgam', 'offset_lat'] = -2 df.loc[df['pop'] == 'GAgam', 'offset_lon'] = 2.5 #Guinea df.loc[df['pop'] == 'GN', 'offset_lat'] = 2 df.loc[df['pop'] == 'GN', 'offset_lon'] = -1 df.loc[df['pop'] == 'GN', 'label'] = 'bottom' # #Guinea # df.loc[df['pop'] == 'Guinea', 'offset_lat'] = 2 # df.loc[df['pop'] == 'Guinea', 'offset_lon'] = -1 #Cote df.loc[df['pop'] == "CIcol", 'offset_lat'] = 2 df.loc[df['pop'] == "CIcol", 'offset_lon'] = -2.5 #df.loc[df['pop'] == "CIcol", 'label'] = 'bottom' df palette = sns.color_palette('nipy_spectral', n_colors=len(hap_labels) - 2, desat=0.8) # add a colour for other_resistant palette.append((0, 0, 0)) # add a colour for wt palette.append((0.9, 0.9, 0.9)) # check sns.palplot(palette) plt.gca().set_xticklabels(hap_labels); # for legend f_colors = {l: c for l, c in zip(hap_labels, palette)} f_colors def plot_map(fn=None): pie_scale_factor=0.02 subplot_kw = dict(projection=ccrs.PlateCarree()) # figsize here is the entire figure size in inches, reduced later by bbox_inches # This dpi setting affects the display size of the figure in the notebook fig, ax = plt.subplots(figsize=(10, 10), subplot_kw=subplot_kw) ax.add_feature(cfeature.LAND, linewidth=1, zorder=1, edgecolor='gray') ax.add_feature(cfeature.BORDERS, linewidth=0.5, zorder=2, edgecolor='gray') # def plot_shiny_map(fn=None, dpi=150): # subplot_kw = dict(projection=crs_lonlat) # fig, ax = plt.subplots(figsize=(10, 10), subplot_kw=subplot_kw) # ax.set_extent(extent_lonlat, crs=crs_lonlat) # ax.imshow(data, cmap=color_map, extent=extent_lonlat, origin='upper', alpha=0.3) # ax.coastlines(resolution='50m', linewidth=1.5, zorder=1) # ax.add_feature(cfeature.BORDERS, linewidth=1.5, zorder=4) #add pies for _, row in df.iterrows(): ratios = np.asarray([row[k]/row.total for k in hap_labels]) ratios = np.append(ratios, 1 - np.sum(ratios)) # wedgeprops is used here just to pass the zorder command centre = (row.lon + row.offset_lon, row.lat + row.offset_lat) #radius = np.sqrt(row.total * .02) radius=np.sqrt(row.total * pie_scale_factor) ax.pie(ratios, wedgeprops=dict(zorder=7, linewidth=0), colors=palette, center=centre, radius=radius, shadow=True) ax.add_patch(plt.Circle(xy=centre, radius=radius, facecolor='none', edgecolor='k', zorder=8, lw=2)) lbl = row['pop'] if row.label == 'bottom': ax.text(centre[0], centre[1] + (radius + (0.1 * radius)), lbl, ha='center', va='bottom', fontsize=10, fontweight='bold', bbox=dict(edgecolor='w', facecolor='w', pad=1, alpha=.8, ), zorder=6) if row.label == 'top': ax.text(centre[0], centre[1] - (radius + (0.1 * radius)), lbl, ha='center', va='top', fontsize=10, fontweight='bold', bbox=dict(edgecolor='w', facecolor='w', pad=1, alpha=.8, ), zorder=6) if row.offset_lat != 0 or row.offset_lon != 0: ax.plot([row.lon, row.lon + row.offset_lon],[row.lat, row.lat + row.offset_lat], 'k-', lw=2, zorder=5) #cover tiny island plt.plot([-15, -5],[-5, -30], 'w', lw=20, zorder=2) ax.set_extent([-22.5, 48.5, -13, 15.5]) handles = [mpl.patches.Patch(facecolor=f_colors[k], edgecolor='k', label=k) for k in hap_labels] leg = ax.legend(handles=handles, bbox_to_anchor=(0, 0), loc='lower left', title="Haplotype group", ncol=4, prop={'size': 10}, frameon=False) #leg._legend_box.align = "left" #un hash to build second legend # #Sample size legend # plt.text(-15.7, -5, 'Sample size', fontsize=10, fontweight='bold') # # Markers of same size as circles (below) used for spacing, hidden by alpha # l1 = Line2D([], [], marker = 'o', markersize=10, markerfacecolor='k', color='k', alpha=0) # l2 = Line2D([], [], marker = 'o', markersize=20, markerfacecolor='k', color='k', alpha=0) # l3 = Line2D([], [], marker = 'o', markersize=50, markerfacecolor='k', color='k', alpha=0) # labelz = ["n=50", " n=250", " n=500"] # leg = ax.legend([l1, l2, l3], labelz, ncol=3, frameon=False, fontsize=12, handlelength=1, loc=3, # borderpad=1.6, handletextpad=1.6, bbox_to_anchor=(-0.05, 0.0)) # #lw=2 to match the circles added around the pies # ax.add_patch(plt.Circle(xy=(-20,-10.5), radius=np.sqrt(50 * pie_scale_factor), facecolor='k', zorder=12, lw=0)) # ax.add_patch(plt.Circle(xy=(-9,-10.5), radius=np.sqrt(200 * pie_scale_factor), facecolor='k', zorder=12, lw=0)) # ax.add_patch(plt.Circle(xy=(2.8,-10.5), radius=np.sqrt(500 * pie_scale_factor), facecolor='k', zorder=12, lw=0)) if fn: fig.savefig(fn, format="svg", bbox_inches='tight'); plot_map(fn='../artwork/figure_Phase_2_vgsc_haplotype_frequency_new.svg') #adds the correctly scaled pies to copy into the plot above in inkscape plot_map(fn='../artwork/figure_Phase_2_vgsc_haplotype_frequency_pie_legend.svg')Ex3 - Getting and Knowing your Data This time we are going to pull data directly from the internet.Special thanks to: https://github.com/justmarkham for sharing the dataset and materials. Step 1. Import the necessary libraries!conda install --yes pandas import pandas as pdCollecting package metadata (current_repodata.json): done Solving environment: done ==> WARNING: A newer version of conda exists. <== current version: 4.7.12 latest version: 4.8.0 Please update conda by running $ conda update -n base conda # All requested packages already installed.Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/justmarkham/DAT8/master/data/u.user). Step 3. Assign it to a variable called users and use the 'user_id' as indexdata = pd.read_csv("https://raw.githubusercontent.com/justmarkham/DAT8/master/data/u.user", sep="|", index_col="user_id")Step 4. See the first 25 entriesdata.head(25)Step 5. See the last 10 entriesdata.tail(10)Step 6. What is the number of observations in the dataset?data.shape[0]Step 7. What is the number of columns in the dataset?data.shape[1]Step 8. Print the name of all the columns.for column in data.columns: print(column)age gender occupation zip_codeStep 9. How is the dataset indexed?data.indexStep 10. What is the data type of each column?data.dtypesStep 11. Print only the occupation columndata.occupationStep 12. How many different occupations there are in this dataset?data.occupation.nunique()Step 13. What is the most frequent occupation?data.occupation.value_counts().head(1).index[0]Step 14. Summarize the DataFrame.data.describe()Step 15. Summarize all the columnsdata.describe(include = "all")Step 16. Summarize only the occupation columndata.occupation.describe()Step 17. What is the mean age of users?round(data.age.mean())Step 18. What is the age with least occurrence?data.age.value_counts().tail()Corona Virus Infections**Please note:** _I am not an epidemiologist, scientist or public health professional. I don't know shit. These calculations are an experiment, they may be wrong by a signficant amount and should not be used as a basis for decision making._**If you have any feedback or suggestions:** please create an issue or PR on github at [samuelcolvin/covid-19-calcs](https://github.com/samuelcolvin/covid-19-calcs).I'm sitting around in Wiltshire self-isolating and getting frustrated by the media's insistence on using the number of confirmed cases as a corner stone of their coverage of COVID-19 (from here on CV), when that's clearly an aweful measure since the UK stopped testing most people ages ago.Here's an altnernative way of calculating the number of cases: take the number of deaths (which is likely to be relatively accurate) and work back to guess how many people would have needed to be infected some time ago to cause that number of deaths. Use that as an estimate of the number of cases then. Projecting the number of deaths forward allows an estimate of the number of infectiosn today.Assumptions and sources (more details of potential sources of error at the bottom of this document):* deaths per day from CV by country form [our world in data](https://ourworldindata.org/coronavirus-source-data)* **mortality rate: 0.9%** from [wikipedia](https://en.wikipedia.org/wiki/Coronavirus_disease_2019cite_note-126) and in turn [this paper from imperial](https://web.archive.org/web/20200210105717/https://www.imperial.ac.uk/media/imperial-college/medicine/sph/ide/gida-fellowships/Imperial-College-2019-nCoV-severity-10-02-2020.pdf)* average time from **contracting CV to death: 17** days from [this now infamous medium blog](https://medium.com/@tomaspueyo/coronavirus-act-today-or-people-will-die-f4d3d9cd99ca) which in turn seems to talk about [this](https://github.com/midas-network/COVID-19/tree/master/parameter_estimates/2019_novel_coronavirustime-from-hospitalization-to-discharge) list of papers, doesn't make it clear exactly where that number comes from. This is really significant since slightly varying the time enormously impacts the estimated number of cases, see below.* the number of cases and deaths is currently increasing exponentially in Europe, this appears to be a reasonably fit for the day. Exponential growth is a very good approximation for the beginning of a logistic curve, see [this excellent vidoe on logistic curves and exponential curves in relation to CV](https://www.youtube.com/watch?v=Kas0tIxDvrg)* I'm looking just at cases in europe since I'm primarily interested in the UK and europe seems the most similer to the UK in terms of health services, willingness to institute draconian measures and knowledge of the virus. I'm not using just the UK since the small number of deaths (so far) makes estimating the best fit exponential curve difficultEnough flirting, let's do some python:# import some stuff and setup matplotlib (you can probably ignore all this) from datetime import datetime import pandas as pd import matplotlib.pyplot as plt import numpy as np plt.rcParams['figure.figsize'] = [16, 5] # set some constants and download the raw data # see discussion above time_to_die = 17 # assuming a mortality rate of 0.9% mortality_rate = 0.009 infection_multiple = 1 / mortality_rate df_all = pd.read_csv('https://covid.ourworldindata.org/data/total_deaths.csv', parse_dates=['date']) df_all = df_all.fillna(0).set_index('date') latest_date = df_all.index[-1] print(f'downloaded data on total deaths from {df_all.index[0]:%Y-%m-%d} to {latest_date:%Y-%m-%d}') # from manually going through the above CSV, here are countries in europe. # It might not be perfect but it's good enough for this purpose european_countries = [ 'Austria', 'Belgium', 'Bosnia and Herzegovina', 'Bulgaria', 'Croatia', 'Cyprus', 'Czech Republic', 'Denmark', 'Estonia', 'Finland', 'France', 'Germany', 'Gibraltar', 'Greece', 'Guernsey', 'Hungary', 'Iceland', 'Ireland', 'Italy', 'Malta', 'Monaco', 'Netherlands', 'Norway', 'Poland', 'Portugal', 'Romania', 'San Marino', 'Serbia', 'Slovakia', 'Slovenia', 'Spain', 'Sweden', 'Switzerland', 'Ukraine', 'United Kingdom', 'Vatican', ] # add a new column with the number of deaths in the whole of europe df_all['europe'] = sum(df_all[c] for c in european_countries) # create a new dataframe with just the date column and number of deaths in europe as 'deaths_actual' # we've also cut off the beginning of the dataframe since there were no deaths in europe before the 16th of Feb df = df_all[['europe']].truncate(before=datetime(2020, 2, 15)).rename(columns={'europe': 'deaths_actual'}) # print out the raw table to make sure it makes sense df # set's set up a function to plot our results so we can reuse it # (I'm a software developer not a data scientist if you hadn't already noticed) def log_lin_plots(df, title, ylabel=None): """ Plot the data from a linear and log scale with some title and optional label for the y axis """ fig, axes = plt.subplots(ncols=2) fig.suptitle(f'{title} (linear & log scales)') legend = not ylabel df.plot(ax=axes[0], legend=legend) df.plot(logy=True, ax=axes[1], legend=legend) for ax in axes.flat: ax.set(ylabel=ylabel, xlabel=None) plt.show() log_lin_plots(df, 'Cumulative deaths in Europe due to Covid-19', ylabel='Number of Deaths') # let's fit an exponential curve to the data curve above def to_day(ts): """ Convert a timestamp into an integer. The actual magnitude of the number doesn't matter, hence choosing the beginning of 2020 as epoch in this calculation. """ return (ts.to_pydatetime() - datetime(2020, 1, 1)).days # now eyeballing the numbers above, let's truncate the data to only doing the fitting on data # from the 26th onwards when the curve can realistically be considered exponential # (the line on the log plot above is straight) df_fit = df.truncate(before=datetime(2020, 2, 26)) # next, convert the timestamp index into ints x = np.array([to_day(v) for v in df_fit.index]) # now we can use numpy.polyfit to do a simple linear regression (that's the 1 argument) # against the log of the actual death numbers, you can think of this as simply a posh way # of measuring the gradient (b) and y intercept (a) of the straight part of the line on the right above b, log_a = np.polyfit(x, np.log(df_fit['deaths_actual']), 1) a = np.exp(log_a) def deaths_fit(day): """ estimate total number of deaths on a given day based on a regression on the actual deaths each day """ return a * np.exp(b * day) # okay, let's see if our prediction looks like it's any good # we're going to plot our prediction of the number of deaths against the actual numbers days = np.array([to_day(v) for v in df.index]) df_predictions = pd.DataFrame( {'deaths_actual': df['deaths_actual'], 'deaths_prediction': deaths_fit(days)}, index=df.index, ) log_lin_plots(df_predictions, 'Actual deaths and predictions from best fit line') # this looks over all like a pretty good match # side note: Some postive news, I first than these numbers with data up to the 13th, since then # the exponential curve has started to out strip the actual data. We could perhaps therefore tentatively # start to suggest that europe has just begun to curb the rate of infection! # lots of messing around, now we can actually estimate the number of infections in Europe def deaths_per_day(day): """ Calculate the number of people who die on a given day by differentiating the above deaths fit equation. (I hope you remember your AS level maths!) """ return a * b * np.exp(b * day) def calc_infection(day): """ Calculate the number of people infected on this day based on the number expected to die on some day in the future who contracted the virus on this day. """ deaths_hence = deaths_per_day(day + time_to_die) return deaths_hence * infection_multiple # to plot the estimate over time, calculate for every day in the time period we're using df_infections = pd.DataFrame({'infections': np.cumsum(calc_infection(days))}, index=df.index) log_lin_plots(df_infections, 'Estimated infections (past and present) in Europe', ylabel='Number of infections') # and print out the number of estimated cases on the last day with data europe_infections = df_infections['infections'][latest_date] print( f'Estimated number of infections (past and present) in Europe on ' f'{latest_date:%Y-%m-%d}: {europe_infections:0,.0f}' ) # to estimate the number of infections in the UK (which is my (selfish) primary interest) # I'm assuming the proportion of infections mirrors the proportion of deaths latest_deaths_eu = df_all['europe'][latest_date] latest_deaths_uk = df_all['United Kingdom'][latest_date] print( f'There were {latest_deaths_eu:0,.0f} deaths in Europe on {latest_date:%Y-%m-%d} ' f'and {latest_deaths_uk:0,.0f} in the UK' ) uk_infections = europe_infections * latest_deaths_uk / latest_deaths_eu print(f'Estimated number of infections (past and present) on {latest_date:%Y-%m-%d} in UK: {uk_infections:0,.0f}')There were 2,333 deaths in Europe on 2020-03-16 and 35 in the UK Estimated number of infections (past and present) on 2020-03-16 in UK: 771,454Sensibility Matrix - G Este [IPython Notebook](http://ipython.org/videos.htmlthe-ipython-notebook) utiliza a biblioteca de código aberto [Fatiando a Terra](http://fatiando.org/)#%matplotlib inline import numpy as np from scipy.misc import derivative import scipy as spy from scipy import interpolate import matplotlib matplotlib.use('TkAgg', force=True) import matplotlib.pyplot as plt import math import cPickle as pickle import datetime #from IPython.display import Image as img #from IPython.display import Markdown as md #from IPython.display import display as dp import string as st from scipy.misc import imread from __future__ import division from fatiando import gravmag, mesher, utils, gridder from fatiando.mesher import Prism, Polygon from fatiando.gravmag import prism from fatiando.utils import ang2vec, si2nt, contaminate from fatiando.gridder import regular from fatiando.vis import mpl from numpy.testing import assert_almost_equal from numpy.testing import assert_array_almost_equal from pytest import raises plt.rc('font', size=16) import functions as fc # Model`s limits ymin = 0.0 ymax = 100000.0 zmin = -1000.0 zmax = 100000.0 xmin = -100000.0 xmax = 100000.0 area = [ymin, ymax, zmax, zmin] ny = 10 # number of observation datas and number of prisms along the profile # coordinates defining the horizontal boundaries of the # adjacent columns along the profile y = np.linspace(ymin, ymax, ny) # coordinates of the center of the columns forming the # interpretation model n = ny - 1 dy = (ymax - ymin)/n ycmin = ymin + 0.5*dy ycmax = ymax - 0.5*dy yc = np.reshape(np.linspace(ycmin, ycmax, n),(n,1)) x = np.zeros_like(yc) z = np.zeros_like(yc)-150.0 ## Edge extension (observation coordinates) sigma = 0.5 edge = sigma*dy*n # Model densities # Indices and polygons relationship: # cc = continental crust layer # oc = ocean crust layer # w = water layer # s = sediment layer # m = mantle layer dw = np.array([1030.0]) ds0 = np.array([2550.0]) dcc = np.array([2670.0]) doc = np.array([2840.0]) dm = np.array([3200.0]) #dc = dcc # coordinate defining the horizontal boundaries of the continent-ocean boundary COT = 60000.0 # list defining crust density variance dc = np.zeros_like(yc) aux = yc <= COT for i in range(len(yc[aux])): dc[i] = dcc for i in range(len(yc[aux]),n): dc[i] = doc # defining sediments layers density matrix ds = np.reshape(np.repeat(ds0,n),(n,1)) # S0 => isostatic compensation surface (Airy's model) # SR = S0+dS0 => reference Moho (Forward modeling) S0 = np.array([40000.0]) dS0 = np.array([5000.0]) pjmin = np.array([-50.0]) pjmax = np.array([100000.0]) dp = 1000. A = 500. B = 3500. c = 0.00009 D = 20000. bathymetry = fc.bathymetry_function(A,B,c,D,yc) tw = bathymetry - 0.0mpl.close('all')axes = mpl.figure().add_subplot(1,1,1)mpl.ylim(zmax, zmin)mpl.xlim(ymin, ymax)mpl.xlabel('y (m)')mpl.ylabel('z (m)')mpl.paths([[ymin, 0.0]], [[ymax, 0.0]], style='-k', linewidth=1)mpl.plot(yc, bathymetry, '-b', linewidth=1)mpl.plot(yc, sediments_limit, '--r', linewidth=1)draw = mpl.draw_polygon(area, axes, color='r') drawsediments = np.array([[ 6250. , 19331.16883117], [ 21774.19354839, 17418.29004329], [ 38911.29032258, 16325.21645022], [ 63306.4516129 , 14685.60606061], [ 92741.93548387, 13592.53246753]]) # change the coordinates of the extremum points in order to # avoid problems for constructing the interpolator sediments[0,0] = ymin sediments[-1,0] = ymax sediments_interpolated = fc.surface_interpolate_function(sediments,yc) ts = sediments_interpolated - tw S = fc.moho_function(S0,dw,ds,dcc,dm,dc,tw,ts) tm = S0 - S toc = S - tw - ts #thickness of the oceanic crust along the profile print ts.shape, tm.shape, S0.shape, dS0.shape psyn = [] psyn = np.vstack((ts, tm, dS0)) # prisms calculation by prism_w_syn = fc.prism_w_function(xmax,xmin,dy,edge,dw,dcc,tw,yc) prism_s_syn = fc.prism_s_function(xmax,xmin,dy,edge,ds,dcc,tw,psyn,yc) prism_c_syn = fc.prism_c_function(xmax,xmin,dy,edge,S0,dcc,dc,tw,psyn,yc) prism_m_syn = fc.prism_m_function(xmax,xmin,dy,edge,S0,dcc,dm,psyn,yc) # z component of gravity calculation by gzw_syn = prism.gz(np.reshape(x,(n,)),np.reshape(yc,(n,)),np.reshape(z,(n,)),prism_w_syn) gzs_syn = prism.gz(np.reshape(x,(n,)),np.reshape(yc,(n,)),np.reshape(z,(n,)),prism_s_syn[0]) gzc_syn = prism.gz(np.reshape(x,(n,)),np.reshape(yc,(n,)),np.reshape(z,(n,)),prism_c_syn) gzm_syn = prism.gz(np.reshape(x,(n,)),np.reshape(yc,(n,)),np.reshape(z,(n,)),prism_m_syn) #Observed data calculation: gsyn = gzw_syn + gzs_syn + gzc_syn + gzm_syn #gsyn = fc.g_function(x,yc,z,gzw_syn,prism_s_syn,prism_c_syn,prism_m_syn)Calcula matriz de sensibilidade com plot da variacao do parametroG1 = fc.G_matrix_function(xmax,xmin,dy,edge,1000.,1000.,S0,dw,ds,dm,dcc,dc,tw,psyn,yc) %matplotlib inline G2 = fc.G_matrix_function_all(xmax,xmin,dy,edge,1000.,1000.,COT,S0,dw,ds,dm,dcc,dc,tw,psyn,yc,area,11)Compara dois metodos de calculo da matriz sensibilidadeassert_array_almost_equal(G1, G2, decimal=3)Summary--- Importsimport os import shlex import subprocess import tempfile from pathlib import Path import optuna import concurrent.futures import itertools import lightgbm import json import lightgbm as lgb import string import numpy as np import pandas as pd import matplotlib.pyplot as plt import time import math import pyarrow as pa import pyarrow.parquet as pq import torch from scipy import stats from sklearn.decomposition import PCA from sklearn.model_selection import PredefinedSplit from tqdm.notebook import tqdm import multiprocessing as mp pd.set_option("max_columns", 1000)ParamtersNOTEBOOK_DIR = Path("04_train_model").resolve() NOTEBOOK_DIR.mkdir(exist_ok=True) NOTEBOOK_DIR COI = "interface" DATASET_VERSION = "v2" if "DATAPKG_OUTPUT_DIR" in os.environ: OUTPUT_DIR = Path(os.getenv("DATAPKG_OUTPUT_DIR")).joinpath("elaspic2").resolve() else: OUTPUT_DIR = NOTEBOOK_DIR.parent OUTPUT_DIR.mkdir(exist_ok=True) OUTPUT_DIR if (slurm_tmpdir := os.getenv("SLURM_TMPDIR")) is not None: os.environ["TMPDIR"] = slurm_tmpdir print(tempfile.gettempdir()) if COI == "core": datasets = [ "elaspic-training-set-core", "protherm-dagger-core", "rocklin-2017-core", "dunham-2020-core", "starr-2020-core", "cagi5-frataxin-core", "huang-2020-core", ] else: assert COI == "interface" datasets = [ "elaspic-training-set-interface", "skempi-v2-interface", # "intact-mutations-interface", "dunham-2020-interface", "starr-2020-interface", ] feature_generators = [ "02_run_rosetta_ddg", "02_run_proteinsolver", "02_run_protbert", ]Load datadef expand_mutations(df): results = [] for row in df.itertuples(): for idx in range(len(row.mutation)): row_mut = { "unique_id": row.unique_id, "dataset": row.dataset, "name": row.name, "mutation": row.mutation[idx], "effect": row.effect[idx], "effect_type": row.effect_type, } for column in ["provean_score", "foldx_score", "elaspic_score"]: if hasattr(row, column): row_mut[column] = getattr(row, column)[idx] results.append(row_mut) return pd.DataFrame(results) def add_mutation_complement(df): df = df.copy() df["rev"] = False df_comp = df.copy() df_comp["rev"] = True df_comp["mutation"] = ( df_comp["mutation"].str[-1] + df_comp["mutation"].str[1:-1] + df_comp["mutation"].str[0] ) for column in ["effect", "provean_score", "foldx_score", "elaspic_score"]: if column in df_comp: df_comp[column] = -df_comp[column] for column in df_comp: if column.endswith("_wt"): column_mut = column[:-3] + "_mut" df_comp[column], df_comp[column_mut] = ( df_comp[column_mut].copy(), df_comp[column].copy(), ) df_out = pd.concat([df, df_comp], ignore_index=True) return df_out tmp_df = pd.DataFrame( [[0, "M1A", 1.234, "wt score", "mut score"], [1, "M2C", -0.05, "wt score 2", "mut score 2"]], columns=["unique_id", "mutation", "effect", "feature_wt", "feature_mut"], ) tmp2_df = add_mutation_complement(tmp_df) display(tmp_df) display(tmp2_df) def merge_feature_dfs(feature_dfs): def _clean_df(df): df = df.copy() assert len(df) == len(df[["unique_id", "mutation"]].drop_duplicates()) for column in ["effect", "effect_type", "provean_score", "foldx_score", "elaspic_score"]: if column in df: del df[column] return df if not feature_dfs: return None df = _clean_df(feature_dfs[0]) for other_df in feature_dfs[1:]: df = df.merge( _clean_df(other_df), how="outer", on=["unique_id", "mutation", "rev"] ) return df input_data = {} for dataset_name in datasets: input_file = OUTPUT_DIR.joinpath("01_load_data", f"{dataset_name}.parquet") pfile = pq.ParquetFile(input_file) task_count = pfile.num_row_groups df = pfile.read().to_pandas(integer_object_nulls=True) expanded_df = ( add_mutation_complement(expand_mutations(df)) # expand_mutations(df) .drop_duplicates(subset=["unique_id", "mutation"]) .sort_values(["unique_id", "mutation"]) ) # expanded_df["rev"] = False sequence_df = df[["unique_id", "protein_sequence", "ligand_sequence"]].drop_duplicates() keys = set(tuple(x) for x in expanded_df[["unique_id", "mutation", "rev"]].values) features = {} for feature_generator in feature_generators: output_dir = OUTPUT_DIR.joinpath(feature_generator) feature_dfs = [] for task_id in range(1, task_count + 1): output_file_template = "{dataset_name}-{task_prefix}{task_id}{task_suffix}-{task_count}.parquet" if feature_generator in ["02_run_rosetta_ddg"]: task_prefix_rev_list = [("wt2mut-", False), ("mut2wt-", True)] else: task_prefix_rev_list = [("", None)] for (task_prefix, rev) in task_prefix_rev_list: output_file_kwargs = dict( dataset_name=dataset_name, task_prefix=task_prefix, task_id=task_id, task_count=task_count, ) output_file = OUTPUT_DIR.joinpath( feature_generator, output_file_template.format(task_suffix="", **output_file_kwargs) ).resolve() if output_file.is_file(): feature_df = pq.read_table(output_file).to_pandas(integer_object_nulls=True) else: subtask_feature_dfs = [] subtask_missing_files = [] for subtask_idx in range(20): subtask_output_file = OUTPUT_DIR.joinpath( feature_generator, output_file_template.format(task_suffix=string.ascii_lowercase[subtask_idx], **output_file_kwargs) ).resolve() if subtask_output_file.is_file(): feature_df = pq.read_table(subtask_output_file).to_pandas(integer_object_nulls=True) subtask_feature_dfs.append(feature_df) else: subtask_missing_files.append(subtask_output_file) if subtask_feature_dfs: feature_df = pd.concat(subtask_feature_dfs, ignore_index=True) if subtask_missing_files: for subtask_missing_file in subtask_missing_files: print(f"File {subtask_missing_file} is missing. Skipping...") else: print(f"File {output_file} is missing. Skipping...") continue if feature_df.empty: print(f"File {output_file} contains no data. Skipping...") continue if rev in [True, False]: feature_df["rev"] = rev else: feature_df = add_mutation_complement(feature_df) if rev is True: feature_df["unique_id"] = -feature_df["unique_id"].values assert not set(tuple(x) for x in feature_df[["unique_id", "mutation", "rev"]].values) - keys, (dataset_name, feature_generator, task_id) feature_dfs.append(feature_df) if not feature_dfs: print( f"No data collected for dataset {dataset_name} and feature generator {feature_generator}." ) continue final_feature_df = pd.concat(feature_dfs, ignore_index=True) features[feature_generator] = final_feature_df input_data[dataset_name] = { "expanded_df": expanded_df, "sequence_df": sequence_df, "feature_df": merge_feature_dfs(list(features.values())), }Merge togetherexpanded_df = pd.concat( [d["expanded_df"] for d in input_data.values() if d["feature_df"] is not None] ) sequence_df = pd.concat( [d["sequence_df"] for d in input_data.values() if d["feature_df"] is not None] ) features_df = pd.concat( [d["feature_df"] for d in input_data.values() if d["feature_df"] is not None] ).sort_values(["unique_id", "mutation"]) assert features_df["unique_id"].min() >= 0 len(features_df) input_wn_df = expanded_df.merge(features_df, on=["unique_id", "mutation", "rev"], validate="1:1", how="outer") # assert len(input_wn_df) == len(features_df), (len(expanded_df), len(features_df), len(input_wn_df)) assert input_wn_df["dataset"].notnull().all() print( f"Lost {len(expanded_df) - len(features_df):,} out of {len(expanded_df):,} rows due to missing features." ) # Correct the sign on some features for dataset, effect_type in [ ("protherm-dagger-core", "-ΔΔG"), ("rocklin-2017-core", "Stability score change"), ("dunham_2020_tianyu", "Deep mutation scan"), ("starr_2020_tianyu", "Deep mutation scan"), ]: mask = (input_wn_df["dataset"] == dataset) & (input_wn_df["effect_type"] == effect_type) if mask.any(): print(f"Reversing sign for {dataset} ({effect_type})...") input_wn_df.loc[mask, "effect"] = -input_wn_df.loc[mask, "effect"] if effect_type == "-ΔΔG": input_wn_df.loc[mask, "effect_type"] = "ΔΔG" len(input_wn_df) columns = [c for c in input_wn_df if c.startswith("protbert_")] input_wn_df[columns].isnull().sum() # 194 columns = [c for c in input_wn_df if c.startswith("proteinsolver_")] input_wn_df[columns].isnull().sum() # 308 columns = [c for c in input_wn_df if c.startswith("rosetta_")] input_wn_df[columns].isnull().sum().head() # 79,025Remove rows with missing valuesinput_df = input_wn_df.dropna( subset=[ c for c in input_wn_df if c.startswith("protbert_") or c.startswith("proteinsolver_") ] ) print( f"Lost {len(input_wn_df) - len(input_df):,} out of {len(input_wn_df):,} rows due to missing features." ) _before = len(input_df) input_df = input_df[~input_df["effect"].isnull()] print( f"Lost {_before - len(input_df):,} out of {_before:,} rows due to missing effect values." ) input_df = input_df.copy() len(input_df) # Core: 642160 assert not input_df["effect"].isnull().any()Feature engineeringdef assign_delta(input_df, column, column_ref, column_change): pca_columns = [] value_sample = input_df[column].iloc[0] if isinstance(value_sample, (list, np.ndarray)): input_df[column_change] = input_df[column] - input_df[column_ref] return True else: input_df[column_change] = input_df[column] - input_df[column_ref] return False pca_columns = [] for column in sorted(input_df): if column.endswith("_mut") and "_core2interface_" not in column: print(column, "(wt → mut)") column_ref = column[:-4] + "_wt" column_change = column[:-4] + "_change" if assign_delta(input_df, column, column_ref, column_change): pca_columns.extend([column_ref, column_change]) for column in sorted(input_df): if "_interface_" in column and not column.endswith("_mut"): print(column, "(core → interface)") column_ref = column.replace("_interface_", "_core_") column_change = column.replace("_interface_", "_core2interface_") if assign_delta(input_df, column, column_ref, column_change): pca_columns.extend([column_change]) pca_columnsRemove invalid datasetsinput_df["dataset"].value_counts() # CORE # cosmic 469802 # ... if COI == "core": datasets_to_drop = { "cagi4_sumo_ligase", "benedix_et_al", "hiv_escape_mutations", "ab_bind", "skempiskempi", "taipale_ppi", # "cosmic", } else: datasets_to_drop = { "cagi4_sumo_ligase", "benedix_et_al", "hiv_escape_mutations", "taipale", } input_df = input_df[~input_df["dataset"].isin(datasets_to_drop)] input_df["dataset"].value_counts() for (dataset, effect_type), gp in input_df.groupby(["dataset", "effect_type"]): gp = gp.copy() gp_sub = gp.dropna(subset=["effect", "protbert_core_score_change"]) corr1 = stats.spearmanr(gp_sub["effect"], gp_sub["protbert_core_score_change"]) gp_sub = gp_sub[gp_sub["rev"] == False] corr2 = stats.spearmanr(gp_sub["effect"], gp_sub["protbert_core_score_change"]) if corr1[0] > 0 or corr2[0] > 0: print(dataset, effect_type) for column in [ "provean_score", "foldx_score", "elaspic_score", "protbert_core_score_change", "proteinsolver_core_score_change", ]: gp_sub = gp.dropna(subset=["effect", column]) corr = stats.spearmanr(gp_sub["effect"], gp_sub[column]) print(f"{column:30s} {corr[0]:+.4} {corr[1]:.4}") gp_sub = gp_sub[gp_sub["rev"] == False] corr = stats.spearmanr(gp_sub["effect"], gp_sub[column]) print(f"{column:30s} {corr[0]:+.4} {corr[1]:.4}") print() for (dataset, effect_type), gp in input_df.groupby(["dataset", "effect_type"]): gp = gp.dropna(subset=["effect", "protbert_core_score_change"]) assert len(gp) corr = stats.spearmanr(gp["effect"], gp["protbert_core_score_change"]) assert corr[0] <= 0, (dataset, effect_type)Remove duplicateshumsavar_unique_ids = set(input_df[input_df["dataset"] == "humsavar"]["unique_id"].unique()) humsavar_sequences = set(tuple(s) for s in sequence_df[sequence_df["unique_id"].isin(humsavar_unique_ids)][["protein_sequence", "ligand_sequence"]].values) len(input_df) # 638184 clinvar_unique_ids = set(input_df[input_df["dataset"] == "clinvar"]["unique_id"].unique()) _before = len(clinvar_unique_ids) clinvar_unique_ids = { uid for uid, pseq, lseq in sequence_df[sequence_df["unique_id"].isin(clinvar_unique_ids)][["unique_id", "protein_sequence", "ligand_sequence"]].values if (pseq, lseq) not in humsavar_sequences } print(f"Removed {_before - len(clinvar_unique_ids)} clinvar unique ids.") input_df = input_df[(input_df["dataset"] != "clinvar") | (input_df["unique_id"].isin(clinvar_unique_ids))] len(input_df) # 617500 clinvar_sequences = set(tuple(s) for s in sequence_df[sequence_df["unique_id"].isin(clinvar_unique_ids)][["protein_sequence", "ligand_sequence"]].values) cosmic_unique_ids = set(input_df[input_df["dataset"] == "cosmic"]["unique_id"].unique()) _before = len(cosmic_unique_ids) cosmic_unique_ids = { uid for uid, pseq, lseq in sequence_df[sequence_df["unique_id"].isin(cosmic_unique_ids)][["unique_id", "protein_sequence", "ligand_sequence"]].values if (pseq, lseq) not in humsavar_sequences and (pseq, lseq) not in clinvar_sequences } print(f"Removed {_before - len(cosmic_unique_ids)} cosmic unique ids.") input_df = input_df[(input_df["dataset"] != "cosmic") | (input_df["unique_id"].isin(cosmic_unique_ids))] len(input_df) # 516344 input_df["dataset"].value_counts()Cluster by sequence identitydef obtain_clusters(input_sequences, min_seq_id=0.3): with tempfile.TemporaryDirectory() as tmp_dir: input_dir = Path(tmp_dir, "input") input_dir.mkdir() output_dir = Path(tmp_dir, "output") output_dir.mkdir() scratch_dir = Path(tmp_dir, "scratch") scratch_dir.mkdir() with input_dir.joinpath("input.fasta").open("wt") as fout: for tup in input_sequences.itertuples(): fout.write(f">{tup.unique_id}\n{tup.protein_sequence}\n") system_command = f"mmseqs easy-cluster --min-seq-id {min_seq_id} '{input_dir}/input.fasta' '{output_dir}/result' '{scratch_dir}'" print(system_command) proc = subprocess.run(shlex.split(system_command), capture_output=True, check=True) cluster_df = pd.read_csv( output_dir.joinpath("result_cluster.tsv"), sep="\t", names=["cluster_id", "unique_id"] ) assert len(cluster_df) == len(cluster_df["unique_id"].unique()) return cluster_df input_sequences = sequence_df.merge(input_df[["unique_id"]].drop_duplicates()) len(input_sequences) # CORE: 13779 cluster_df = obtain_clusters(input_sequences) cluster_df.head() if "cluster_id" in input_df: del input_df["cluster_id"] input_df = input_df.merge(cluster_df, on="unique_id", how="outer", validate="m:1") assert input_df["cluster_id"].notnull().all()Extract out independent test dataif COI == "core": test_datasets = { "starr_2020_tianyu", "huang_2020", "cagi5_frataxin", } else: test_datasets = { "starr_2020_tianyu", } input_test_df = input_df[input_df["dataset"].isin(test_datasets)].copy() print(input_test_df["dataset"].unique()) print(len(input_test_df)) # test_cluster_ids = set(input_test_df["cluster_id"]) # TODO: input_train_df = input_df[~input_df["dataset"].isin(test_datasets)].copy() print(input_train_df["dataset"].unique()) print(len(input_train_df))Train / validation splitimport heapq from dataclasses import dataclass, field from typing import Any def _update_mapping(df, mapping, num_folds): @dataclass(order=True) class PrioritizedItem: priority: int idx: int = field(compare=False) data: Any = field(compare=False) pq = [PrioritizedItem(0, i, []) for i in range(num_folds)] for cluster_id, gp in df.groupby("cluster_id"): if cluster_id in mapping: item_idx = mapping[cluster_id] item = next(item for item in pq if item.idx == item_idx) item.priority += len(gp) item.data.append(cluster_id) heapq.heapify(pq) else: item = heapq.heappop(pq) item.priority += len(gp) item.data.append(cluster_id) heapq.heappush(pq, item) for item in pq: for cluster_id in item.data: if cluster_id in mapping: assert mapping[cluster_id] == item.idx else: mapping[cluster_id] = item.idx return mapping def map_to_test_fold(input_df, effect_types, num_folds): dfs = [input_df[input_df["effect_type"] == effect_type] for effect_type in effect_types] assert sum(len(df) for df in dfs) == len(input_df) mapping = {} for df in dfs: mapping = _update_mapping(df, mapping, num_folds) return mapping input_train_df["effect_type"].unique() if COI == "core": num_folds = 6 else: num_folds = 6 cluster_id_to_test_fold_mapping = map_to_test_fold( input_train_df, ["ΔΔG", "ΔΔG (from Kon/Koff)", "ΔΔG (from affinity)", "Stability score change", "Deep mutation scan", "Deleteriousness score", "Deleteriousness class"], num_folds=num_folds) input_train_df["test_fold"] = input_train_df["cluster_id"].map(cluster_id_to_test_fold_mapping) assert input_train_df["test_fold"].notnull().all() assert len(input_train_df["test_fold"].unique()) == num_folds input_train_df["test_fold"].value_counts()Train PCA modelsn_components = 10 for column in pca_columns: print(column) values = np.vstack(input_train_df[column].values) pickle_file = NOTEBOOK_DIR.joinpath(f"pca-{column}-{COI}.pickle") if pickle_file.is_file(): pca = torch.load(pickle_file) else: pca = PCA(n_components=n_components) pca.fit(values) torch.save(pca, pickle_file) values_out = pca.transform(values) for i in range(n_components): new_column = f"{column}_{i}_pc" input_train_df[new_column] = values_out[:, i] train_test_splits = [] ps = PredefinedSplit(input_train_df["test_fold"]) for split_idx, (train, test) in enumerate(tqdm(ps.split(), total=n_components)): train_df = input_train_df.iloc[train].sample(frac=1.0, replace=False).sort_values(["unique_id"]).copy() test_df = input_train_df.iloc[test].sample(frac=1.0, replace=False).sort_values(["unique_id"]).copy() assert not set(train_df["cluster_id"]) & set(test_df["cluster_id"]) first_row = train_df.iloc[0] for column in list(train_df): value = first_row[column] if isinstance(value, (list, tuple, np.ndarray)): del train_df[column], test_df[column] train_test_splits.append((train_df, test_df))Save resultsNOTEBOOK_DIR with NOTEBOOK_DIR.parent.joinpath("04_train_model", f"pca-columns-{COI}.{DATASET_VERSION}.parquet").open("wt") as fout: json.dump(pca_columns, fout) output_file = NOTEBOOK_DIR.parent.joinpath("04_train_model", f"sequences-{COI}.{DATASET_VERSION}.parquet") pq.write_table( pa.Table.from_pandas(sequence_df, preserve_index=False), output_file, row_group_size=1_000, ) output_file = NOTEBOOK_DIR.parent.joinpath("04_train_model", f"input-train-{COI}.{DATASET_VERSION}.parquet") pq.write_table( pa.Table.from_pandas(input_train_df, preserve_index=False), output_file, row_group_size=10_000, ) output_file = NOTEBOOK_DIR.parent.joinpath("04_train_model", f"input-test-{COI}.{DATASET_VERSION}.parquet") pq.write_table( pa.Table.from_pandas(input_test_df, preserve_index=False), output_file, row_group_size=10_000, ) # for idx, (train_df, test_df) in enumerate(train_test_splits): # print(idx) # output_file = NOTEBOOK_DIR.parent.joinpath("04_train_model", f"xval-train-{COI}-{idx}.{DATASET_VERSION}.parquet") # pq.write_table( # pa.Table.from_pandas(train_df, preserve_index=False), # output_file, # row_group_size=10_000, # ) # output_file = NOTEBOOK_DIR.parent.joinpath("04_train_model", f"xval-test-{COI}-{idx}.{DATASET_VERSION}.parquet") # pq.write_table( # pa.Table.from_pandas(test_df, preserve_index=False), # output_file, # row_group_size=10_000, # )Optimize labelsfeature_columns = [ c for c in list(train_test_splits[0][0]) if (c.endswith("_wt") or c.endswith("_mut") or c.endswith("_change") or c.endswith("_pc")) and not (c.endswith("dg_change") or c.startswith("rosetta_")) ] # feature_columns other_columns = [c for c in list(train_test_splits[0][0]) if c not in feature_columns] # other_columns def get_label(df): effect = df["effect"].values.copy() mask = df["effect_type"].str.startswith("ΔΔG") effect[mask] *= 0.8 mask = df["effect_type"] == "Deleteriousness class" effect[mask] *= 1 mask = df["effect_type"] == "Stability score change" effect[mask] *= 5 mask = df["effect_type"] == "Deleteriousness score" if mask.any(): assert effect[mask].min() >= -5 and effect[mask].max() <= 5 mask = df["effect_type"] == "Deep mutation scan" effect[mask] *= 4 effect = np.rint(np.clip(effect, -5, 5) * 100 + 500) return effect input_train_df["effect_type"].value_counts() _ = plt.hist(get_label(input_train_df), bins=100) _ = plt.hist(get_label(input_train_df[input_train_df["effect_type"] == 'Deleteriousness score']), bins=100) _ = plt.hist(get_label(input_train_df[input_train_df["effect_type"] == 'Stability score change']), bins=100) _ = plt.hist(get_label(input_train_df[input_train_df["effect_type"] == 'Deep mutation scan']), bins=100) _ = plt.hist(get_label(input_train_df[input_train_df["effect_type"] == 'ΔΔG']), bins=100) _ = plt.hist(get_label(input_train_df[input_train_df["effect_type"] == 'ΔΔG (from affinity)']), bins=100) _ = plt.hist(get_label(input_train_df[input_train_df["effect_type"] == 'ΔΔG (from Kon/Koff)']), bins=100)Optimize groupsdef assert_get_group_valid(df): assert df["unique_id"].is_monotonic_increasing prev = None for unique_id, rev in df[['unique_id', "rev"]].values: if prev is not None: if not rev: assert unique_id != prev[0] or not prev[1], (unique_id, rev, prev) else: assert unique_id == prev[0] prev = (unique_id, rev) def get_group(df, max_group_size=100): assert df["unique_id"].is_monotonic_increasing vc = df["unique_id"].value_counts() groups = [vc[uid] for uid in df["unique_id"].unique()] if max_group_size: old_groups, groups = groups, [] for idx, group in enumerate(old_groups): if group <= max_group_size: groups.append(group) else: num_subgroups = math.ceil(group / max_group_size) num_per_group = math.floor(group / num_subgroups) subgroups = [num_per_group] * num_subgroups if (remainder := group - sum(subgroups)): assert remainder < num_subgroups for remainder_idx in range(remainder): subgroups[remainder_idx] += 1 groups.extend(subgroups) assert sum(groups) == len(df), (sum(groups), len(df)) assert not max_group_size or max(groups) <= max_group_size return np.array(groups) if COI == "core": max_group_size = 100 else: max_group_size = 100 plt.hist(np.clip(get_group(input_train_df.sort_values(["unique_id"]), max_group_size), 0, max_group_size), bins=100) NoneTrain modeldef train_model(input, param, early_stopping_rounds=10): train_df, test_df = input train_ds = lgb.Dataset( train_df[feature_columns], label=get_label(train_df), group=get_group(train_df, max_group_size=max_group_size), ) valid_ds = lgb.Dataset( test_df[feature_columns], label=get_label(test_df), group=get_group(test_df, max_group_size=max_group_size), reference=train_ds, ) bst = lgb.train( param, train_ds, valid_sets=[valid_ds], num_boost_round=100, verbose_eval=False, # feval=my_feval, # early_stopping_rounds=early_stopping_rounds, ) return bst skempi_unique_ids = set(input_train_df[input_train_df["dataset"] == "skempi++"]["unique_id"].unique()) skempi_sequences = set(tuple(s) for s in sequence_df[sequence_df["unique_id"].isin(skempi_unique_ids)][["protein_sequence", "ligand_sequence"]].values) skempi_v2_unique_ids = set(input_train_df[input_train_df["dataset"] == "skempi-v2"]["unique_id"].unique()) skempi_v2_unique_ids = { uid for uid, pseq, lseq in sequence_df[sequence_df["unique_id"].isin(skempi_v2_unique_ids)][["unique_id", "protein_sequence", "ligand_sequence"]].values if (pseq, lseq) not in skempi_sequences } def get_aggregate_spearmanr(result_df, datasets): corrs = [] for dataset, effect_type, *_ in datasets: df = result_df[ (result_df["dataset"] == dataset) & (result_df["effect_type"] == effect_type) & (result_df["rev"] == False) ] if dataset == "skempi-v2": df = df[df["unique_id"].isin(skempi_v2_unique_ids)] df = df.dropna(subset=["effect", "ddg_pred"]) corr = stats.spearmanr(df["effect"], df["ddg_pred"])[0] corrs.append(corr) return sum(corrs) / len(corrs) if COI == "core": columns_full = [ "ddg_pred", "elaspic_score", "foldx_score", "rosetta_dg_change", ] datasets_eval = [ ["protherm++", "ΔΔG", columns_full], ["humsavar", "Deleteriousness class", columns_full], ["clinvar", "Deleteriousness class", columns_full], ["cosmic", "Deleteriousness class", columns_full], ["taipale", "ΔΔG", columns_full], # ["taipale_gpca", "ΔΔG", columns_full], # ["cagi5_frataxin", "ΔΔG", ["ddg_pred"]], ["rocklin-2017-core", "Stability score change", ["ddg_pred", "rosetta_dg_change"]], ["dunham_2020_tianyu", "Deep mutation scan", ["ddg_pred", "rosetta_dg_change"]], # ["protherm-dagger-core", "ΔΔG", ["ddg_pred", "rosetta_dg_change"]], ] else: columns_full = [ "ddg_pred", "elaspic_score", "foldx_score", "rosetta_complex_dg_change", ] datasets_eval = [ ["skempi++", "ΔΔG", columns_full], ["humsavar", "Deleteriousness class", columns_full], ["clinvar", "Deleteriousness class", columns_full], ["cosmic", "Deleteriousness class", columns_full], ["ab_bind", "ΔΔG", ["ddg_pred", "elaspic_score", "foldx_score"]], # ["taipale", "ΔΔG", eval_columns], ["skempi-v2", "ΔΔG (from affinity)", ["ddg_pred", "rosetta_complex_dg_change"]], # ["skempi-v2", "ΔΔG (from Kon/Koff)", ["ddg_pred", "rosetta_complex_dg_change"]], ["dunham_2020_tianyu", "Deep mutation scan", ["ddg_pred", "rosetta_complex_dg_change"]], ] const_param = { "objective": "lambdarank", "metric": "ndcg", "verbosity": -1, "eval_at": 1_000_000, "label_gain": [np.log2(2 + i) for i in range(0, 1_001)], "force_col_wise": True, "num_threads": 40, }def objective(trial): param = { **const_param, num_trees = 100 "learning_rate": trial.suggest_loguniform("lambda_l1", 1e-3, 1.0), "num_iterations": trial.suggest_int("num_leaves", 64, 256), "max_bin": trial.suggest_categorical("max_bin", [255, 511]), 255 "num_leaves": trial.suggest_int("num_leaves", 2, 512), 256 "min_data_in_leaf": trial.suggest_int("min_data_in_leaf", 5, 200), 100 "lambda_l1": trial.suggest_loguniform("lambda_l1", 1e-8, 10.0), "lambda_l2": trial.suggest_loguniform("lambda_l2", 1e-8, 10.0), "feature_fraction": trial.suggest_uniform("feature_fraction", 0.4, 1.0), "bagging_fraction": trial.suggest_uniform("bagging_fraction", 0.4, 1.0), "bagging_freq": trial.suggest_int("bagging_freq", 1, 7), } bsts = [] result_dfs = [] for train_df, test_df in train_test_splits: assert not set(train_df["cluster_id"]) & set(test_df["cluster_id"]) bst = train_model((train_df, test_df), param) bsts.append(bst) test_df = test_df.copy() test_df["ddg_pred"] = bst.predict( test_df[feature_columns], num_iteration=bst.best_iteration ) result_dfs.append(test_df) result_df = pd.concat(result_dfs, ignore_index=True) score = get_aggregate_spearmanr(result_df, datasets_eval) return scorestart_time = time.perf_counter()study = optuna.create_study(direction="maximize")study.optimize(objective, n_trials=100, n_jobs=2)print(f"Elaspsed: {time.perf_counter() - start_time}.")if COI =="core": best_params = {'num_leaves': 131, 'lambda_l1': 0.06090843013079758, 'lambda_l2': 1.682306739340599, 'feature_fraction': 0.6427647079708247, 'bagging_fraction': 0.5908679308527225, 'bagging_freq': 6, 'min_child_samples': 47} else: best_params = { 'max_bin': 511, 'num_leaves': 64, 'min_data_in_leaf': 168, 'lambda_l1': 1.8149466697376564e-05, 'lambda_l2': 4.3022548294881256e-07, 'feature_fraction': 0.6326206839855546, 'bagging_fraction': 0.7398095524057099, 'bagging_freq': 6, } best_params = {"max_bin": 255, "learning_rate": 0.1} param = { **const_param, # **{"max_bin": 255, "learning_rate": 0.1, "force_col_wise": True}, **best_params, # **study.best_params, "num_threads": 80, "verbosity": 1, } start_time = time.perf_counter() bsts = [] result_dfs = [] for split_idx, (train_df, test_df) in enumerate(train_test_splits): print(split_idx, len(train_df), len(test_df)) assert not set(train_df["cluster_id"]) & set(test_df["cluster_id"]) bst = train_model((train_df, test_df), param, early_stopping_rounds=10) bsts.append(bst) test_df = test_df.copy() test_df["ddg_pred"] = bst.predict( test_df[feature_columns], num_iteration=bst.best_iteration ) result_dfs.append(test_df) result_df = pd.concat(result_dfs, ignore_index=True) print(f"Elaspsed: {time.perf_counter() - start_time}.") score = get_aggregate_spearmanr(result_df, datasets_eval) score # Interface: 0.325 # Core: 0.3565635315814614300:200: 0.3910785927589155100: 0.4002496796653158len(feature_columns) import json with open(f"05_feature_elimination/feature-columns-{COI}-0.json", "wt") as fout: json.dump(feature_columns, fout) import json with open(f"05_feature_elimination/feature-columns-interface-0.json", "rt") as fin: print(len(json.load(fin))) for split_idx, bst in enumerate(tqdm(bsts, total=n_components)): print(split_idx) for column in pca_columns: pickle_file = NOTEBOOK_DIR.joinpath(f"pca-{column}-{COI}.pickle") pca = torch.load(pickle_file) values = np.vstack(input_test_df[column].values) values_out = pca.transform(values) for i in range(n_components): new_column = f"{column}_{i}_pc" input_test_df[new_column] = values_out[:, i] input_test_df[f"ddg_pred_{split_idx}"] = bst.predict( input_test_df[feature_columns], num_iteration=bst.best_iteration ) input_test_df[f"ddg_pred"] = input_test_df[[f"ddg_pred_{i}" for i in range(6)]].max(axis=1) def get_spearman_corrs_global(df, feature_columns, target_column, drop_na=True): if drop_na: df = df.dropna(subset=feature_columns + [target_column]) corrs = {} for column in feature_columns: sign = -1 if any(column.startswith(prefix) for prefix in ["provean_", "protbert_", "proteinsolver_"]) else 1 df_nna = df.dropna(subset=[column, target_column]) corr = stats.spearmanr(sign * df_nna[column], df_nna[target_column]) corrs[column] = (corr[0], corr[1], len(df_nna)) # print(f"{column:30s} {corr[0]:+.4} {corr[1]:.4}") return corrs def get_spearman_corrs_perseq(df, feature_columns, target_column, min_gp_size=6, drop_na=True): if drop_na: df = df.dropna(subset=feature_columns + [target_column]) results = {c: [] for c in feature_columns} for _, gp in df.groupby("unique_id"): if len(gp) < min_gp_size or len(set(gp[target_column])) < 2: continue for column in feature_columns: sign = -1 if any(column.startswith(prefix) for prefix in ["provean_", "protbert_", "proteinsolver_"]) else 1 gp_nna = gp.dropna(subset=[column, target_column]) corr = stats.spearmanr(sign * gp_nna[column], gp_nna[target_column]) results[column].append(corr[0]) return results def print_spearman_corrs(corrs): for column, corr in corrs.items(): print(f"{column:30s} {corr[0]:+.4} {corr[1]:.4} ({corr[2]})") import matplotlib.pyplot as plt from IPython.display import set_matplotlib_formats set_matplotlib_formats("png") FIGURE_OUTPUT_DIR = Path(f"05_model_validation_{COI}").resolve() FIGURE_OUTPUT_DIR.mkdir(exist_ok=True) FIGURE_OUTPUT_DIR cmap = plt.cm.get_cmap("tab20") result_df[["dataset", "effect_type"]].drop_duplicates() def get_spearman_corrs_global_xxx(df, feature_columns, target_column, drop_na=True): if drop_na: df = df.dropna(subset=feature_columns + [target_column]) corrs = {} for column in feature_columns: sign = -1 if any(column.startswith(prefix) for prefix in ["provean_", "protbert_", "proteinsolver_"]) else 1 df_nna = df.dropna(subset=[column, target_column]) feature_values = sign * df_nna[column].values feature_values = np.hstack([feature_values, -feature_values]) target_values = df_nna[target_column] target_values = np.hstack([target_values, -target_values]) corr = stats.spearmanr(feature_values, target_values) corrs[column] = (corr[0], corr[1], len(df_nna)) # print(f"{column:30s} {corr[0]:+.4} {corr[1]:.4}") return corrs rev = [False] if rev == [False]: suffix = "" else: assert rev == [False, True] suffix = "-rev" from matplotlib.ticker import FormatStrFormatter fg, axs = plt.subplots(2, len(datasets_eval), figsize=(12, 8)) for idx, (dataset, effect_type, eval_columns) in enumerate(datasets_eval): df = result_df[ (result_df["effect_type"] == effect_type) & (result_df["dataset"] == dataset) & (result_df["rev"].isin(rev)) ] if dataset == "skempi-v2": df = df[df["unique_id"].isin(skempi_v2_unique_ids)] corrs = get_spearman_corrs_global(df, eval_columns, "effect") per_sequence_stats = get_spearman_corrs_perseq(df, eval_columns, "effect", min_gp_size=8) ax = axs[0, idx] x = np.arange(len(corrs)) y = [c[0] for c in corrs.values()] out = ax.bar(x, y, color=cmap(1), edgecolor="k") _ = ax.set_xticks(x) _ = ax.set_xticklabels([""] * len(x), rotation="vertical") ax.set_title(f"{dataset}") ax.set_ylim(-0.025, 0.825) if idx == 0: ax.set_ylabel("Global Spearman's ρ") ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f')) ax = axs[1, idx] out = ax.boxplot( per_sequence_stats.values(), patch_artist=True, boxprops={"facecolor": cmap(1)}, medianprops={"color": cmap(0)}, ) bp = ax.set_xticklabels(per_sequence_stats.keys(), rotation="vertical") ax.set_ylim(-1.05, 1.05) if idx == 0: ax.set_ylabel("Per-protein Spearman's ρ") ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f')) fg.subplots_adjust(top=0.95, right=0.98, bottom=0.38) fg.savefig(FIGURE_OUTPUT_DIR.joinpath(f"corrs-xval-{COI}{suffix}.svg"), dpi=300) fg.savefig(FIGURE_OUTPUT_DIR.joinpath(f"corrs-xval-{COI}{suffix}.png"), dpi=300) fg.savefig(FIGURE_OUTPUT_DIR.joinpath(f"corrs-xval-{COI}{suffix}.pdf"), dpi=300) plt.plot(df[df["rev"] == False]["effect"], df[df["rev"] == False]["ddg_pred"], 'r.', alpha=0.3) plt.plot(-df[df["rev"] == False]["effect"], -df[df["rev"] == False]["ddg_pred"], 'g.', alpha=0.3) plt.plot(df[df["rev"] == True]["effect"], df[df["rev"] == True]["ddg_pred"], 'b.', alpha=0.3) plt.xlabel("effect") if COI == "core": eval_columns = [ "ddg_pred", # "elaspic_score", # "foldx_score", # "rosetta_dg_change", # "provean_score", "protbert_core_score_change", "proteinsolver_core_score_change", ] else: eval_columns = [ "ddg_pred", # "elaspic_score", # "foldx_score", # "rosetta_complex_dg_change", # "provean_score", "protbert_core_score_change", "proteinsolver_core_score_change", # # "rosetta_opt_apart_dg_change", # "rosetta_apart_dg_change", # "rosetta_opt_bind_dg_change", # "rosetta_bind_dg_change", ] dataset, effect_type = ("huang_2020", "ΔΔG") dataset, effect_type = ("starr_2020_tianyu", "Deep mutation scan") # dataset, effect_type = ("cagi5_frataxin", "ΔΔG") rev = [False, True] # df = result_df[ # (result_df["effect_type"] == effect_type) # & (result_df["dataset"] == dataset) # & (result_df["rev"].isin(rev)) # ] df = input_test_df[ (input_test_df["effect_type"] == effect_type) & (input_test_df["dataset"] == dataset) & (input_test_df["rev"].isin(rev)) ] suffix = f"-{dataset}" if rev != [False, True]: assert rev == [False] suffix += "-norev" from matplotlib.ticker import FormatStrFormatter corrs = get_spearman_corrs_global(df, eval_columns, "effect") per_sequence_stats = get_spearman_corrs_perseq(df, eval_columns, "effect", min_gp_size=6) fg, axs = plt.subplots(2, 1, figsize=(3, 8)) ax = axs[0] x = np.arange(len(corrs)) y = [c[0] for c in corrs.values()] out = ax.bar(x, y, color=cmap(1), edgecolor="k") _ = ax.set_xticks(x) _ = ax.set_xticklabels([""] * len(x), rotation="vertical") ax.set_ylabel("Global Spearman's ρ") ax.set_title(f"{dataset} - {effect_type}") ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f')) ax = axs[1] out = ax.boxplot( per_sequence_stats.values(), patch_artist=True, boxprops={"facecolor": cmap(1)}, medianprops={"color": cmap(0)}, ) bp = ax.set_xticklabels(per_sequence_stats.keys(), rotation="vertical") ax.set_ylabel("Per-protein Spearman's ρ") ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f')) fg.subplots_adjust(top=0.95, right=0.98, bottom=0.38) fg.savefig(FIGURE_OUTPUT_DIR.joinpath(f"corrs-perseq{suffix}.svg"), dpi=300) fg.savefig(FIGURE_OUTPUT_DIR.joinpath(f"corrs-perseq{suffix}.png"), dpi=300) fg.savefig(FIGURE_OUTPUT_DIR.joinpath(f"corrs-perseq{suffix}.pdf"), dpi=300) plt.plot(df[df["rev"] == False]["effect"], df[df["rev"] == False]["ddg_pred"], 'r.', alpha=0.3) # plt.plot(-df[df["rev"] == False]["effect"], -df[df["rev"] == False]["ddg_pred"], 'g.', alpha=0.3) # plt.plot(df[df["rev"] == True]["effect"], df[df["rev"] == True]["ddg_pred"], 'b.', alpha=0.3) df2 = df[df["rev"] == False][["mutation", "ddg_pred"]] df2["mutation"] = df2["mutation"].str[-1] + df2["mutation"].str[1:-1] + df2["mutation"].str[0] df2["ddg_pred"] = -df2["ddg_pred"] df2 = df2.merge(df[df["rev"] == True][["mutation", "ddg_pred"]], on=["mutation"]) df2.head() stats.spearmanr(df2["ddg_pred_x"], df2["ddg_pred_y"]) plt.plot(df[df["rev"] == False]["effect"], df[df["rev"] == False]["ddg_pred"], 'r.', alpha=0.3) plt.plot(df[df["rev"] == True]["effect"], df[df["rev"] == True]["ddg_pred"], 'b.', alpha=0.3) plt.hist(df[df["rev"] == False]["effect"], bins=40) def my_feval(preds, train_data): labels = train_data.get_label() groups = train_data.get_group() if len(set(preds)) < 2 or len(set(labels)) < 2: global_corr = 0 else: global_corr = stats.spearmanr(preds, labels)[0] weighted_corr_total = 0 weight_total = 0 start = 0 for group in groups: stop = start + group preds_slice = preds[start:stop] labels_slice = labels[start:stop] start = stop weight = math.sqrt(group) if group < 2: continue elif len(set(labels_slice)) < 2: continue elif len(set(preds_slice)) < 2: group_corr = 0 else: group_corr = stats.spearmanr(preds_slice, labels_slice)[0] weighted_corr_total += weight * group_corr weight_total += weight assert start == sum(groups) pergroup_corr = weighted_corr_total / weight_total eval_name = "wavg_spearman_rho" # eval_result = (global_corr / pergroup_corr) / 2 eval_result = pergroup_corr is_higher_better = True return eval_name, eval_result, is_higher_better def calculate_score(df): corr_global = stats.spearmanr(df["ddg_pred"], df["effect"])[0] perseq_score = 0 perseq_weight = 0 for _, gp in df.groupby("unique_id"): if len(set(gp["effect"])) < 2: continue elif len(set(gp["ddg_pred"])) < 2: weight = math.sqrt(len(gp)) corr = 0 else: weight = math.sqrt(len(gp)) corr = stats.spearmanr(gp["ddg_pred"], gp["effect"])[0] perseq_score += corr * weight perseq_weight += weight corr_perseq = perseq_score / perseq_weight return (corr_global + corr_perseq) / 2 df = result_df[ (result_df["effect_type"] == "ΔΔG") & (result_df["dataset"] == "skempi++") & (result_df["rev"].isin([False])) ] corrs = get_spearman_corrs_global(df, eval_columns, "effect") fg, ax = plt.subplots() x = np.arange(len(corrs)) y = [c[0] for c in corrs.values()] out = ax.bar(x, y, color=cmap(1), edgecolor="k") _ = ax.set_xticks(x) _ = ax.set_xticklabels(corrs.keys(), rotation="vertical") ax.set_ylabel("Spearman's ρ") ax.set_title("Global correlations") fg.savefig(FIGURE_OUTPUT_DIR.joinpath("corrs-global-skempi-norev.svg"), dpi=300) fg.savefig(FIGURE_OUTPUT_DIR.joinpath("corrs-global-skempi.png"), dpi=300) fg.savefig(FIGURE_OUTPUT_DIR.joinpath("corrs-global-skempi.pdf"), dpi=300) per_sequence_stats = get_spearman_corrs_perseq(result_df, eval_columns, "effect", min_gp_size=6) fg, ax = plt.subplots() out = ax.boxplot( per_sequence_stats.values(), patch_artist=True, boxprops={"facecolor": cmap(1)}, medianprops={"color": cmap(0)}, ) bp = ax.set_xticklabels(per_sequence_stats.keys(), rotation="vertical") ax.set_ylabel("Spearman's ρ") ax.set_title("Per-protein correlations") fg.savefig(FIGURE_OUTPUT_DIR.joinpath("corrs-perseq-skempi.svg"), dpi=300) fg.savefig(FIGURE_OUTPUT_DIR.joinpath("corrs-perseq-skempi.png"), dpi=300) fg.savefig(FIGURE_OUTPUT_DIR.joinpath("corrs-perseq-skempi.pdf"), dpi=300) print_spearman_stats( result_df[ (result_df["effect_type"] == "Deleteriousness class") & (result_df["rev"].isin([True, False])) ], eval_columns, "effect", ) # 0.488 result_df[ (result_df["effect_type"] == "Deleteriousness class") & (result_df["rev"].isin([True, False])) ]["dataset"].unique() print_spearman_stats( result_df[ (result_df["effect_type"] == "Deleteriousness score") & (result_df["rev"].isin([True, False])) ], eval_columns, "effect", ) # 0.4128 print_spearman_stats(result_df, ["ddg_pred", "rosetta_dg_change"], "label") # 0.4646 print_spearman_stats(result_df[result_df["effect_type"] == "Deleteriousness score"], eval_columns, "label") # 0.4077 print_spearman_stats(result_df[result_df["effect_type"] == "ΔΔG"], eval_columns, "effect") def compute_per_sequence_stats(df, feature_columns, target_column, min_gp_size=6): df = df.dropna(subset=feature_columns + [target_column]) results = {c: [] for c in feature_columns} for _, gp in df.groupby("unique_id"): if len(gp) < min_gp_size or len(set(gp[target_column])) < 2: continue for column in feature_columns: corr = stats.spearmanr(gp[column], gp[target_column]) results[column].append(corr[0]) return results import matplotlib.pyplot as plt per_sequence_stats = compute_per_sequence_stats(result_df, eval_columns, "effect", 6) fg, ax = plt.subplots() out = ax.boxplot(per_sequence_stats.values()) _ = ax.set_xticklabels(per_sequence_stats.keys(), rotation="vertical") # ax.set_ylim(-1, 1) # fg.tight_layout() per_sequence_stats_ddg = compute_per_sequence_stats( result_df[result_df["effect_type"] == "Deleteriousness class"], eval_columns, "effect", 18 ) fg, ax = plt.subplots() out = ax.boxplot(per_sequence_stats_ddg.values()) _ = ax.set_xticklabels(per_sequence_stats_ddg.keys(), rotation="vertical") # ax.set_ylim(-1, 1) # fg.tight_layout() per_sequence_stats_ddg = compute_per_sequence_stats( result_df[result_df["effect_type"] == "Deleteriousness score"], eval_columns, "effect", 18 ) fg, ax = plt.subplots() out = ax.boxplot(per_sequence_stats_ddg.values()) _ = ax.set_xticklabels(per_sequence_stats_ddg.keys(), rotation="vertical") # ax.set_ylim(-1, 1) # fg.tight_layout() out.keys() palette = ["r", "g", "b", "y"] for x, val, c in zip(xs, vals, palette): plt.scatter(x, val, alpha=0.4, color=c) plt.show() train_df[(train_df["effect"] * 1_000).astype(np.int) > 300_000] import matplotlib.pyplot as plt _ = plt.hist(input_df["effect"], bins=100, range=(-5, 5)) param = { "objective": "lambdarank", "metric": "ndcg", "ndcg_eval_at": 1000000000000, "max_bin": 255, } bst = lgb.train(param, train_ds, num_boost_round=100, valid_sets=[valid_ds]) ypred = bst.predict(test_df.drop(columns_to_drop, axis=1), num_iteration=bst.best_iteration) ypred = bst.predict(test_df.drop(columns_to_drop, axis=1), num_iteration=bst.best_iteration) test_df = test_df.copy() test_df["ddg_pred"] = ypred stats.spearmanr(test_df["effect"], test_df["ddg_pred"]) stats.spearmanr(test_df["effect"], test_df["foldx_score"]) stats.spearmanr(test_df["effect"], test_df["provean_score"])colab!nvidia-smi # ドライブをマウント import sys if 'google.colab' in sys.modules: from google.colab import drive drive.mount('/content/drive') import os, sys if "google.colab" in sys.modules: CP_DIR = f"/content/drive/MyDrive/Work/probspace_religious_art/notebook/{NAME}_colab/output" INPUT_DIR = "./eda_output/output" sys.path.append("/content/drive/MyDrive/Work/probspace_religious_art/code") elif "kaggle_web_client" in sys.modules: pass elif "/kqi/output" in os.getcwd(): pass else: # local CP_DIR = "output" INPUT_DIR = "../../eda/output" sys.path.append("../../../code") sys.path.append('../../../Git/Ranger-Deep-Learning-Optimizer') sys.path.append('../../../Git/pytorch-optimizer') from mix_aug import cutmix, fmix, snapmix, SnapMixLoss, resizemix # driveからzipコピーしてくる if os.getcwd() == "/content" and os.path.exists(INPUT_DIR) == False: !mkdir -p "./eda_output" !cp -r "/content/drive/MyDrive/Work/probspace_religious_art/notebook/eda/output.zip" "./eda_output" !unzip -qq "./eda_output/output.zip" -d "./eda_output" pass # colabで足りないライブラリinstall import os, sys if ("google.colab" in sys.modules) or ("kaggle_web_client" in sys.modules) or ("/kqi/output" in os.getcwd()): !pip install --upgrade albumentations !pip install --upgrade timm !pip install torch-optimizer passdata loadimport pandas as pd # ==================================================== # Data Load # ==================================================== def get_test_file_path(image_id): return f"{INPUT_DIR}/test/{str(image_id)}.jpg" test = pd.read_csv(INPUT_DIR + "/test.csv") test["file_path"] = test["image_id"].apply(get_test_file_path)influenceCFGs = [] NUM_CLASSES = 13 TTA_ROUND = 2 import yaml # ==================================================== # Param # ==================================================== cfg_yml = CP_DIR + "/cfg.yaml" with open(cfg_yml, "r") as f: cfg_dict = yaml.safe_load(f) cfg_dict["batch_size"] = cfg_dict["batch_size"] * 2 cfg_dict["num_workers"] = os.cpu_count() if ("google.colab" in sys.modules) or ("kaggle_web_client" in sys.modules) or ("/kqi/output" in os.getcwd()) else 0 cfg_dict["name"] = NAME class Cfg: def __init__(self, **entries): self.__dict__.update(entries) CFG = Cfg(**cfg_dict) print("CFG:", CFG.__dict__) CFGs.append(CFG) import glob from sklearn.metrics import accuracy_score for seed in CFG.seeds: oof_df = pd.read_csv(CP_DIR + f"/oof_seed{seed}.csv") score = accuracy_score(oof_df['label'].values, oof_df['pred'].values) print("seed:", seed, ", oof score:", score) import os import sys import cv2 import numpy as np import pandas as pd import matplotlib.pyplot as plt import torch from torch.utils.data import DataLoader, Dataset import albumentations as A from albumentations import Compose from albumentations.pytorch import ToTensorV2 # ==================================================== # Dataset # ==================================================== class TestDataset(Dataset): def __init__(self, df, transform=None): super().__init__() self.df = df self.file_paths = df["file_path"].values self.transform = transform def __len__(self): return len(self.df) def __getitem__(self, idx): file_path = self.file_paths[idx] image = cv2.imread(file_path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32) if self.transform: augmented = self.transform(image=image) image = augmented["image"] return image def get_transforms(*, data): if data == "train": pass elif data == "valid": return Compose( [ A.Resize(CFG.size, CFG.size), A.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], ), ToTensorV2(), ] ) def collate(batch): """DataLoaderに追加可能なbatchを加工する関数""" images, labels = list(zip(*batch)) images = torch.stack(images) labels = torch.stack(labels) return images, labels.long() # ==================================================== # Library # ==================================================== import sys import os import gc import re import math import time import random import yaml import shutil import glob import pickle import pathlib from pathlib import Path from contextlib import contextmanager from collections import defaultdict, Counter import scipy as sp import numpy as np import pandas as pd from tqdm.auto import tqdm from functools import partial import cv2 from PIL import Image import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.parameter import Parameter from torch.utils.data import DataLoader, Dataset from torch.cuda.amp import autocast, GradScaler sys.path.append(r'C:\Users\81908\Git\Ranger-Deep-Learning-Optimizer') sys.path.append(r'C:\Users\81908\Git\pytorch-optimizer') from torch_optimizer import RAdam, Lookahead import timm print("timm version:", timm.__version__) import warnings warnings.filterwarnings("ignore") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(device) # ==================================================== # Helper functions # ==================================================== def tta(img, ops): # input: NxCxHxW if ops == 0: pass elif ops == 1: img = torch.flip(img, [-1]) elif ops == 2: img = torch.flip(img, [-2]) elif ops == 3: img = torch.flip(img, [-1, -2]) elif ops == 4: img = torch.rot90(img, 1, [2, 3]) elif ops == 5: img = torch.rot90(img, 3, [2, 3]) else: pass return img def test_fn(test_loader, model, device, tta_round=1): # switch to evaluation mode model.eval() preds = [] tk0 = tqdm(test_loader, total=len(test_loader)) for step, (images) in enumerate(tk0): images = images.to(device) batch_size = images.size(0) with torch.no_grad(): with autocast(): if tta_round == 1: predictions = model.forward_softmax(images) # 確信度 else: predictions = torch.zeros([images.shape[0], NUM_CLASSES], device=device) for i, x in enumerate(images): for ops in range(tta_round): xi = torch.unsqueeze(tta(x, ops), 0) predictions[i] += model.forward_softmax(xi)[0] / tta_round # 確信度 pred = predictions.detach().cpu().numpy() preds.append(pred) preds = np.concatenate(preds) return preds # ==================================================== # Utils # ==================================================== def seed_torch(seed=42): random.seed(seed) os.environ["PYTHONHASHSEED"] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True # ==================================================== # Model # ==================================================== class TimmModel(nn.Module): def __init__(self, n_classes, model_name="resnet18", pretrained=True): super().__init__() self.cnn = timm.create_model(model_name, pretrained=pretrained) if "efficient" in model_name: self.cnn.classifier = nn.Linear(self.cnn.classifier.in_features, n_classes) elif "nfnet" in model_name: self.cnn.head.fc = nn.Linear(self.cnn.head.fc.in_features, n_classes) elif "vit" in model_name: self.cnn.head = nn.Linear(self.cnn.head.in_features, n_classes) elif "tnt" in model_name: self.cnn.head = nn.Linear(self.cnn.head.in_features, n_classes) elif "swin" in model_name: self.cnn.head = nn.Linear(self.cnn.head.in_features, n_classes) elif "cait" in model_name: self.cnn.head = nn.Linear(self.cnn.head.in_features, n_classes) elif "mixer" in model_name: self.cnn.head = nn.Linear(self.cnn.head.in_features, n_classes) else: self.cnn.fc = nn.Linear(self.cnn.fc.in_features, n_classes) def forward(self, x): return self.cnn(x) def forward_softmax(self, x): return torch.softmax(self.cnn(x), 1) def forward_argmax(self, x): return self.cnn(x).argmax(1) # ==================================================== # main # ==================================================== def main(test, CFGs): Y_pred_ens = np.zeros((len(test), NUM_CLASSES)) for CFG in CFGs: print(f"========== {CFG.name} testing ==========") Y_pred_seed = np.zeros((len(test), CFG.n_classes)) for seed in CFG.seeds: seed_torch(seed=seed) print(f"========== seed: {seed} testing ==========") Y_pred = np.zeros((len(test), CFG.n_classes)) #Y_pred = pd.DataFrame(Y_pred, index=test.index) # , columns=Y.columns if CFG.debug: test = test.sample(n=1000, random_state=seed).reset_index(drop=True) Y_pred = np.zeros((len(test), CFG.n_classes)) for fold in CFG.trn_fold: print(f"========== fold: {fold} testing ==========") #states = torch.load(f'fold{CFG.trn_fold[fold]}_seed{seed}_best.pth', map_location=torch.device('cpu')) states = torch.load(CP_DIR + f'/fold{CFG.trn_fold[fold]}_seed{seed}_best.pth', map_location=torch.device('cpu')) model = TimmModel(CFG.n_classes, model_name=CFG.model_name, pretrained=False) model.to(device) model.load_state_dict(states['model']) del states; gc.collect() test_dataset = TestDataset(test, transform=get_transforms(data='valid')) test_loader = DataLoader(test_dataset, batch_size=CFG.batch_size, shuffle=False, num_workers=CFG.num_workers) Y_pred += test_fn(test_loader, model, device, tta_round=TTA_ROUND) / len(CFG.trn_fold) del test_loader, model; gc.collect() _test = test.copy() _test["label"] = Y_pred.argmax(1) # soft avgしたlogitが最大のidをラベルとする _test = pd.concat([_test, pd.DataFrame(Y_pred)], axis=1) # 確信度も残す _test.to_csv(CP_DIR+f"/{NAME}_inf_test_seed{seed}.csv", index=False) print(f"OUTPUT: {CP_DIR}/{NAME}_inf_test_seed{seed}.csv") Y_pred_seed += Y_pred / len(CFG.seeds) Y_pred_ens += Y_pred_seed / len(CFGs) test["label"] = Y_pred_ens.argmax(1) # soft avgしたlogitが最大のidをラベルとする test = pd.concat([test, pd.DataFrame(Y_pred_ens)], axis=1) # 確信度も残す test.to_csv(CP_DIR+f"/{NAME}_inf_test_seed_mean.csv", index=False) print(f"OUTPUT: {CP_DIR}/{NAME}_inf_test_seed_mean.csv") return test if __name__ == "__main__": # ==================================================== # make submission.csv # ==================================================== test = main(test, CFGs) print("test.shape: " + str(test.shape)) #display(test.head()) print("\ntest finish!!!")timm version: 0.4.9 cuda ========== ex15_pseudo3 testing ========== ========== seed: 0 testing ========== ========== fold: 0 testing ==========make submission.csvsub = pd.read_csv(f"{CP_DIR}/{NAME}_inf_test_seed_mean.csv")[['image_id', 'label']] sub = sub.rename(columns={'image_id':"id", 'label':"y"}) sub.to_csv(f"{CP_DIR}/{NAME}_seed_mean_submission.csv", index=False) subPotential Energy Methodology A couple different ways of estimating potential energy seem to come up in the literature so this notebook is dedicated to testing and comparing a couple of the methods. The chosen method will be loaded into the final work.# Load Data and relevant modules - this is common to both wayss %matplotlib inline import numpy as np import scipy.signal as sig import matplotlib.pyplot as plt import data_load import gsw import oceans as oc import pandas as pd import internal_waves_calculations as iwc import warnings import cmocean # Probably Shouldn't do this but they annoy me warnings.simplefilter("ignore") pd.options.display.max_rows = 3000 pd.options.display.max_columns = 22 # load data and cut off bottom (its all nans) ladcp, ctd = data_load.load_data() strain = np.genfromtxt('strain.csv', delimiter=',') wl_max = 900 wl_min = 400 ctd_bin_size = 1024 ladcp_bin_size = 1024 nfft = 1024 U, V, p_ladcp = oc.loadLADCP(ladcp) S, T, p_ctd, lat, lon = oc.loadCTD(ctd) rho = gsw.density.rho(S, T, p_ctd) maxDepth = 4000 idx_ladcp = p_ladcp[:, -1] <= maxDepth idx_ctd = p_ctd[:, -1] <= maxDepth strain = strain[idx_ctd, :] S = S[idx_ctd,:] T = T[idx_ctd,:] rho = rho[idx_ctd,:] p_ctd = p_ctd[idx_ctd, :] U = U[idx_ladcp, :] V = V[idx_ladcp, :] p_ladcp = p_ladcp[idx_ladcp, :] # Bin CTD data ctd_bins = oc.binData(S, p_ctd[:, 0], ctd_bin_size) # Bin Ladcp Data ladcp_bins = oc.binData(U, p_ladcp[:, 0], ladcp_bin_size) # Depth and lat/long grids (For plots) depths = np.vstack([np.nanmean(p_ctd[binIn]) for binIn in ctd_bins]) dist = gsw.distance(lon, lat) dist = np.cumsum(dist)/1000 dist = np.append(0,dist)/Users/mdevana/anaconda3/lib/python3.6/site-packages/cmocean/tools.py:76: MatplotlibDeprecationWarning: The is_string_like function was deprecated in version 2.1. if not mpl.cbook.is_string_like(rgbin[0]):**WARNING : The adiabatic leveling is a super slow function so try to avoid running a bunch of times for no reason**# Calculate potential energy density spectrum using adiabatic leveling # --- This is the part that needs tweaking I think # Adiabatic leveling following Bray and Fofonoff 1981 - # the actual code is a python version of 's Matlab code # Order = order of polynomial fit to use order = 1 # Pressure window - See Bray and Fofonoff 1981 for details pressure_range = 400 # axis = the depth increases on axis = 0 # Use Adiabtic Leveling from Oceans Library N2_ref, N2, strain, p_mid, rho_bar = oc.adiabatic_level(S, T, p_ctd, lat, pressure_range=pressure_range, order=order, axis=axis, )Isopyncal displacements from density surfaces**V1 - Waterman et al. 2013-using density surfaces**This uses the equation:$$ \eta = \frac{\rho - \rho_{ref}}{\frac{d\rho_{ref}}{dz}} $$where rho reference is neutral density. I am going to try it with neutral density and with the adiabatically leveled density surfaces and see how different it is and why. $\frac{d\rho}{dz}$ is computed by differencing the reference density surfaces over a 400 meter vertical window. According to *Waterman et al. 2013* this window should not make a big difference. for the reference density the mean of all the adiabatically leveled profiles is taken as the stratification varies signficantly across the profile.rho_ref = np.nanmean(rho_bar, axis=1) # Stick a nan to the top to make it the same size as the rho array rho_ref = np.hstack((0, rho_ref)) # set differnece window to 400 meters win = 400 # since all the data is on a normalized pressure grid use a single vertical vector to make it easier to handle z = -1*gsw.z_from_p(p_ctd[:,0], lat[:,0]) dz = np.nanmean(np.diff(z)) step = int(np.floor(.5*win/dz)) eta = np.full_like(rho, np.nan) for i in range(rho.shape[0]): # If in the TOP half of the profile the window needs to be adjusted if i - step < 0: lower = 0 upper = int(2*step) # If in the BOTTOM half of the profile the window needs to be adjusted elif i + step > (rho.shape[0] - 1): lower = int(rho.shape[0] - 2*step) upper = -1 else: upper = i + step lower = i - step drefdz = (rho_ref[upper] - rho_ref[lower])/win eta[i,:] = (rho[i,:] - rho_ref[i])/drefdzGet Spectrum to see how it workedThe kinetic energy calculations seem straight forward so they are just loaded in using the internal waves KE calculation function rather than show the whole code. Using these the wave components are calculated to see effects of different ways of potential energy calculations. **See main lee_wave.ipynb notebook for full details**# Calculate KE spectrums (m2/s2) z_ladcp = -1*gsw.z_from_p(p_ladcp, lat) KE, KE_grid, KE_psd, Uprime, Vprime, ke_peaks = iwc.KE_UV(U, V, z_ladcp, ladcp_bins, wl_min, wl_max, lc=wl_min-50, nfft=1024, detrend='constant') # Calculate PE spectrum using eta from above (m2/s2) z_ctd = -1*gsw.z_from_p(p_ctd, lat) PE, PE_grid, eta_psd, N2mean, pe_peaks = iwc.PE(N2, z, eta, wl_min, wl_max, ctd_bins, nfft=1024, detrend=False) # Plot spectra to see what happened m_plot = np.array([(1)/wl_max, (1)/wl_max, (1)/wl_min, (1)/wl_min]) plt.figure(figsize=[12,6]) plt.loglog(KE_grid, KE_psd.T, linewidth=.6, c='b', alpha=.05) plt.loglog(KE_grid, np.nanmean(KE_psd, axis=0).T, lw=2, c='b') ylims = plt.gca().get_ylim() ylim1 = np.array([ylims[0], ylims[1]]) plt.plot(m_plot[2:], ylim1, lw=1, c='k', alpha=.5, linestyle='dotted') plt.plot(m_plot[:2], ylim1, lw=1, c='k', alpha=.5, linestyle='dotted') plt.ylim(ylims) plt.ylabel('Kinetic Energy Density') plt.xlabel('Vertical Wavenumber') plt.gca().grid(True, which="both", color='k', linestyle='dotted', linewidth=.2) plt.loglog(PE_grid, .5*np.nanmean(N2)*eta_psd.T, lw=.6, c='r', alpha=.05) plt.loglog(PE_grid, .5*np.nanmean(N2)*np.nanmean(eta_psd, axis=0).T, lw=2, c='r') plt.plot(m_plot[2:], ylim1, lw=1, c='k', alpha=.5, linestyle='dotted') plt.plot(m_plot[:2], ylim1, lw=1, c='k', alpha=.5, linestyle='dotted') plt.ylim(ylims) # plt.gca().grid(True, which="both", color='k', linestyle='dotted', linewidth=.2) plt.ylabel('Potential Energy Density (m2/s2)') plt.xlabel('Vertical Wavenumber') # plt.xlim(.0005, .01)Get Wave Components (Frequency, horizontal wavenumber)Etotal = 1027*(KE + PE) # Multiply by density to get Joules # wave components f = np.nanmean(gsw.f(lat)) # version 2 omega calculation - where did this come from? omega = f*np.sqrt(((KE+PE)/(KE-PE))) # Waterman et al. 2012 (Ithink) m = (2*np.pi)/800 kh = m*np.sqrt(((f**2 - omega**2)/(omega**2 - N2mean))) # Waterman et al. 2012 kh2 = (m/np.sqrt(N2mean))*(np.sqrt(omega**2 - f**2)) # Where (meyer i think?) lambdaH = 1e-3*(2*np.pi)/kh lambdaH2 = 1e-3*(2*np.pi)/kh2 # version 2 omega calculation Rw = KE/PE # Unsure what to do with this just yet. table = oc.display(lambdaH, index=depths.flatten()) table.style.set_caption('Horizontal Wavelength V1')Attempt with neutral densities and polyfit# Neutral Densities rho_neutral = np.genfromtxt('neutral_densities.csv', delimiter=',') rho_n = rho_neutral[idx_ctd,:] # Poly fit to neutral density to get reference profiles ref = [] for cast in rho_n.T: fitrev = oc.vert_polyFit2(cast, p_ctd[:, 0], 100, deg=2) ref.append(fitrev) ref = np.vstack(ref).T eta = oc.isopycnal_displacements(rho_n, ref, p_ctd, lat) ref = np.nanmean(ref, axis=1) # recalculate spectrum z_ctd = -1*gsw.z_from_p(p_ctd, lat) PE, PE_grid, eta_psd, N2mean, pe_peaks = iwc.PE(N2, z, eta, wl_min, wl_max, ctd_bins, nfft=1024, detrend=False) # Plot spectra to see what happened m_plot = np.array([(1)/wl_max, (1)/wl_max, (1)/wl_min, (1)/wl_min]) plt.figure(figsize=[12,6]) plt.loglog(KE_grid, KE_psd.T, linewidth=.6, c='b', alpha=.05) plt.loglog(KE_grid, np.nanmean(KE_psd, axis=0).T, lw=2, c='b') ylims = plt.gca().get_ylim() ylim1 = np.array([ylims[0], ylims[1]]) plt.plot(m_plot[2:], ylim1, lw=1, c='k', alpha=.5, linestyle='dotted') plt.plot(m_plot[:2], ylim1, lw=1, c='k', alpha=.5, linestyle='dotted') plt.ylim(ylims) plt.ylabel('Kinetic Energy Density') plt.xlabel('Vertical Wavenumber') plt.gca().grid(True, which="both", color='k', linestyle='dotted', linewidth=.2) plt.loglog(PE_grid, .5*np.nanmean(N2)*eta_psd.T, lw=.6, c='r', alpha=.05) plt.loglog(PE_grid, .5*np.nanmean(N2)*np.nanmean(eta_psd, axis=0).T, lw=2, c='r') plt.plot(m_plot[2:], ylim1, lw=1, c='k', alpha=.5, linestyle='dotted') plt.plot(m_plot[:2], ylim1, lw=1, c='k', alpha=.5, linestyle='dotted') plt.ylim(ylims) # plt.gca().grid(True, which="both", color='k', linestyle='dotted', linewidth=.2) plt.ylabel('Potential Energy Density (m2/s2)') plt.xlabel('Vertical Wavenumber') # plt.xlim(.0005, .01) Etotal = 1027*(KE + PE) # Multiply by density to get Joules # wave components f = np.nanmean(gsw.f(lat)) # version 2 omega calculation - where did this come from? omega = f*np.sqrt(((KE+PE)/(KE-PE))) # Waterman et al. 2012 (Ithink) m = (2*np.pi)/800 kh = m*np.sqrt(((f**2 - omega**2)/(omega**2 - N2mean))) # Waterman et al. 2012 kh2 = (m/np.sqrt(N2mean))*(np.sqrt(omega**2 - f**2)) # Where (meyer i think?) lambdaH = 1e-3*(2*np.pi)/kh lambdaH2 = 1e-3*(2*np.pi)/kh2 # version 2 omega calculation Rw = KE/PE # Unsure what to do with this just yet. table = oc.display(Etotal, index=depths.flatten()) table.style.set_caption('Horizontal Wavelength V1') table = oc.display(lambdaH, index=depths.flatten()) table.style.set_caption('Horizontal Wavelength V1') plt.rcParams.update({'font.size':12}) ref = np.nanmean(ref, axis=1) ref.shapeLoad Beijing Air Qualitybeijing_17_18_aq = pd.read_csv(DATA_PATH + 'raw/Beijing/beijing_17_18_aq.csv', parse_dates=['utc_time']) beijing_802_803_aq = pd.read_csv(DATA_PATH + 'raw/Beijing/beijing_201802_201803_aq.csv', parse_dates=['utc_time']) beijing_17_18_aq.head() beijing_17_18_aq.utc_time.min(), beijing_17_18_aq.utc_time.max() beijing_802_803_aq.head() beijing_802_803_aq.utc_time.min(), beijing_802_803_aq.utc_time.max()Union Beijing Air Quality Datadef rename_aq_df(df): return df.rename(columns={'stationId': 'station_id', 'utc_time': 'utc_datetime'})Rename and concatenate both airquality data tablesbeijing_17_18_aq = rename_aq_df(beijing_17_18_aq) beijing_802_803_aq = rename_aq_df(beijing_802_803_aq) beijing_aq_data = pd.concat([beijing_17_18_aq, beijing_802_803_aq], ignore_index=True) beijing_aq_data.shapeDrop duplicated rowsbeijing_aq_data = beijing_aq_data.drop_duplicates() beijing_aq_data.shapeSave as interim feather file# `to_feather` won't accept the index after `drop_duplicates`. Had to `reset_index()` and drop the current index. beijing_aq_data.reset_index().drop('index', axis=1).to_feather(DATA_PATH + 'interim/beijing_aq_union.feather') beijing_aq_data = pd.read_feather(DATA_PATH + 'interim/beijing_aq_union.feather') beijing_aq_data.head() beijing_aq_data.utc_datetime.min(), beijing_aq_data.utc_datetime.max()Loading Grid Weather Databeijing_meo_grid = pd.read_csv(DATA_PATH + 'raw/Beijing/Beijing_historical_meo_grid.csv', parse_dates=['utc_time']) beijing_meo_grid.head() beijing_meo_grid.utc_time.min(), beijing_meo_grid.utc_time.max()Rename Grid Weather Datadef rename_weather_data(df): return df.rename(columns={'stationName': 'station_id', 'utc_time': 'utc_datetime', 'wind_speed/kph': 'wind_speed'}) beijing_meo_grid = rename_weather_data(beijing_meo_grid) beijing_meo_grid = beijing_meo_grid.drop_duplicates() beijing_meo_grid.head()Save as interim feather file# `to_feather` won't accept the index after `drop_duplicates`. Had to `reset_index()` and drop the current index. beijing_meo_grid.reset_index().drop('index', axis=1).to_feather(DATA_PATH + 'interim/beijing_meo_grid.feather') beijing_meo_grid = pd.read_feather(DATA_PATH + 'interim/beijing_meo_grid.feather')Loading Observed Weather Databeijing_meo_observed_17_18 = pd.read_csv(DATA_PATH + 'raw/Beijing/beijing_17_18_meo.csv', parse_dates=['utc_time']) beijing_meo_observed_802_803 = pd.read_csv(DATA_PATH + 'raw/Beijing/beijing_201802_201803_me.csv', parse_dates=['utc_time']) beijing_meo_observed_17_18.head() beijing_meo_observed_17_18.station_id.unique() beijing_meo_observed_17_18.utc_time.min(), beijing_meo_observed_17_18.utc_time.max() beijing_meo_observed_802_803.head() beijing_meo_observed_802_803.utc_time.min(), beijing_meo_observed_802_803.utc_time.max() beijing_meo_observed_802_803.station_id.unique()Union observed weather databeijing_meo_observed_17_18 = rename_weather_data(beijing_meo_observed_17_18) beijing_meo_observed_802_803 = rename_weather_data(beijing_meo_observed_802_803) beijing_observed_meo_union = pd.concat([beijing_meo_observed_17_18, beijing_meo_observed_802_803], sort=False) beijing_observed_meo_union = beijing_observed_meo_union.drop_duplicates() beijing_observed_meo_union.head()Save as interim feather filebeijing_observed_meo_union.reset_index().drop('index', axis=1).to_feather(DATA_PATH + 'interim/beijing_meo_observed_union.feather') beijing_observed_meo_union = pd.read_feather(DATA_PATH + 'interim/beijing_meo_observed_union.feather') beijing_observed_meo_union.head()Remove/correct outliers Weather outliersbeijing_observed_meo_union.describe() # TODO: Show outliers using boxplot of temperature, pressure, humidity, wind_direction and wind_speed beijing_observed_meo_union.temperature.plot.box() beijing_observed_meo_union[beijing_observed_meo_union.temperature > 800000]Wind direction is measured in degrees clockwise from due north and so a wind coming from the south has a wind direction of 180 degrees; one from the east is 90 degrees, etc. **If the wind speed is less than 0.5m/s (nearly no wind), the value of the wind_direction is 999017.**beijing_observed_meo_union[beijing_observed_meo_union.wind_direction == 999017].head() def process_observed_meo_outliers(df): df = df[df.temperature < 800000] df.loc[:, 'wind_direction'] = df.replace(999017, 0) return df beijing_observed_meo_union = process_observed_meo_outliers(beijing_observed_meo_union) beijing_observed_meo_union.describe()Save as interim feather filebeijing_observed_meo_union.reset_index().drop('index', axis=1).to_feather(DATA_PATH + 'interim/beijing_meo_observed_union.feather') beijing_observed_meo_union = pd.read_feather(DATA_PATH + 'interim/beijing_meo_observed_union.feather') beijing_observed_meo_union.head()Air Quality Outliersbeijing_aq_data.describe() beijing_aq_data['PM2.5'].plot.box() beijing_aq_data['PM10'].plot.box() beijing_aq_data[beijing_aq_data['PM2.5'] > 1400] ## PM2.5 reading for this day seems to be an outlier. We will drop it beijing_aq_data.loc[beijing_aq_data[beijing_aq_data.utc_datetime == '2018-3-29 14:00:00'].index].head(10) beijing_aq_data[beijing_aq_data['PM10'] > 3000] # PM10 is high for that day so we don't drop it. beijing_aq_data.loc[beijing_aq_data[beijing_aq_data.utc_datetime == '2018-3-28'].index].head(10) def process_air_quality_outliers(df): outliers_index = df[beijing_aq_data['PM2.5'] > 1400].index df = df.drop(outliers_index) return df beijing_aq_data = process_air_quality_outliers(beijing_aq_data)Save as interim feather filebeijing_aq_data.reset_index().drop('index', axis=1).to_feather(DATA_PATH + 'interim/beijing_aq_union.feather') beijing_aq_data = pd.read_feather(DATA_PATH + 'interim/beijing_aq_union.feather') beijing_aq_data.head()Merge Air Quality and Weather Data Load and process air quality stations excel filebeijing_aq_stations = pd.read_excel(DATA_PATH + '/raw/Beijing/Beijing_AirQuality_Stations_en.xlsx', names=['station_id', 'longitude', 'latitude']) def process_aq_stations(df): df = df.loc[11:].dropna().reset_index().drop('index', axis=1) return df beijing_aq_stations = process_aq_stations(beijing_aq_stations) beijing_aq_stations.to_feather(DATA_PATH + 'interim/beijing_aq_stations.feather') beijing_aq_stations = pd.read_feather(DATA_PATH + 'interim/beijing_aq_stations.feather') beijing_aq_stations.head()Load and process weather grid stationsbeijing_meo_grid_stations = pd.read_csv(DATA_PATH + 'raw/Beijing/Beijing_grid_weather_station.csv', names=['station_id', 'latitude', 'longitude']) beijing_meo_grid_stations.head()Create weather observed stations listbeijing_observed_meo_union = pd.read_feather(DATA_PATH + 'interim/beijing_meo_observed_union.feather') beijing_observed_meo_union.head() beijing_meo_observed_stations = beijing_observed_meo_union[['station_id', 'latitude', 'longitude']].drop_duplicates().dropna().reset_index().drop('index', axis=1) beijing_meo_observed_stations.head()Save observed weather stations listbeijing_meo_observed_stations.to_feather(DATA_PATH + 'interim/beijing_meo_observed_stations.feather') beijing_meo_observed_stations = pd.read_feather(DATA_PATH + 'interim/beijing_meo_observed_stations.feather') beijing_meo_observed_stations.head()Merge air quality station data with it's nearest neighbor weather station datafrom sklearn.neighbors import NearestNeighbors import itertools import os # Create directory to save merged air quality and weather data os.makedirs(DATA_PATH + 'interim/aq_station_merged', exist_ok=True) # Load beijing air quality, grid and observed weather stations. beijing_aq_stations = pd.read_feather(DATA_PATH + 'interim/beijing_aq_stations.feather') grid_weather_stations = pd.read_csv(DATA_PATH + 'raw/Beijing/Beijing_grid_weather_station.csv', names=['station_id', 'latitude', 'longitude']) grid_weather_data = pd.read_feather(DATA_PATH + 'interim/beijing_meo_grid.feather') observed_weather_stations = pd.read_feather(DATA_PATH + 'interim/beijing_meo_observed_stations.feather') observed_weather_data = pd.read_feather(DATA_PATH + 'interim/beijing_meo_observed_union.feather') beijing_aq_data = pd.read_feather(DATA_PATH + 'interim/beijing_aq_union.feather') ## Fit NearestNeighbor model to grid and observed weather station's coordinates grid_nn_model = NearestNeighbors(n_neighbors=2, metric='haversine', n_jobs=-1) grid_nn_model.fit(grid_weather_stations.set_index('station_id')) observed_nn_model = NearestNeighbors(n_neighbors=1, metric='haversine', n_jobs=-1) observed_nn_model.fit(observed_weather_stations.set_index('station_id')) def process_weather_station_data(df, station_name): df = df.iloc[:, 3:].copy() new_names = [(i,f"{station_name}_{i}") for i in df.iloc[:, 1:].columns.values] return df.rename(columns = dict(new_names)) def output_nn_weather_data(aq_station, latitude, longitude, nn_model, weather_stations, weather_data): nn_result = nn_model.kneighbors([[latitude, longitude]], return_distance=False) nn_index = nn_result[0] nn_station_names = weather_stations.iloc[nn_index].station_id weather_data_dfs = [weather_data[weather_data.station_id == weather_station_id] for weather_station_id in nn_station_names] weather_data_dfs = [process_weather_station_data(df, station_name) for station_name, df in zip(nn_station_names, weather_data_dfs)] return weather_data_dfs # Iterate through each air quality station for index, aq_station_info in beijing_aq_stations.iterrows(): # Fetch KNN grid station weather data nn_grid_weather_data = output_nn_weather_data(aq_station_info['station_id'], aq_station_info['latitude'], aq_station_info['longitude'], grid_nn_model, grid_weather_stations, grid_weather_data) # Fetch KNN observed station weather data nn_observed_weather_data = output_nn_weather_data(aq_station_info['station_id'], aq_station_info['latitude'], aq_station_info['longitude'], observed_nn_model, observed_weather_stations, observed_weather_data) # Merge to air station air quality data station_aq_data = beijing_aq_data[beijing_aq_data.station_id == aq_station_info['station_id']] for weather_df in itertools.chain(nn_grid_weather_data, nn_observed_weather_data): station_aq_data = station_aq_data.merge(weather_df, on='utc_datetime', how='inner') # Fill-in missing hourly values to impute later. date_range_index = pd.date_range(start=station_aq_data.utc_datetime.min(), end=station_aq_data.utc_datetime.max(), freq='H') station_aq_data = station_aq_data.set_index('utc_datetime').reindex(date_range_index) # TODO: Add imputation method here # Save to interim feather file station_aq_data = station_aq_data.drop('station_id', axis=1).reset_index().rename(columns={'index': 'utc_datetime'}) station_name = aq_station_info['station_id'] station_aq_data.to_feather(DATA_PATH + f"interim/aq_station_merged/{station_name}_merged_weather.feather")Restore the whole energysystem with resultsenergysystem = solph.EnergySystem() energysystem.restore(dpath=None, filename=None)Convert keys to strings and print all keysstring_results = solph.views.convert_keys_to_strings(energysystem.results['main']) print(string_results.keys())Use the outputlib to collect all the flows into and out of the electricity busCollect all flows into and out of the electricity bus by using solph.views.node()node_results_bel = solph.views.node(energysystem.results['main'], 'bel')What we will be working with now is a pandas dataframe. Have a look at these links to learn about pandas, especially the last one (pandas in 10min): https://pandas.pydata.org/ http://pandas.pydata.org/pandas-docs/stable/ http://pandas.pydata.org/pandas-docs/stable/10min.htmldf = node_results_bel['sequences'] df.head(2)Use pandas functionality to create a plot of all the columns of the dataframeax = df.plot(kind='line', drawstyle='steps-post') ax.set_xlabel('Time [h]') ax.set_ylabel('Energy [MWh]') ax.set_title('Flows into and out of bel') ax.legend(loc='center left', bbox_to_anchor=(1.0, 0.5)) # place legend outside of plot plt.show()oemof visio provides a function that collects the column names for in and outflows as lists in a dictionaryin_out_dictionary = oev.plot.divide_bus_columns('bel', df.columns) in_cols = in_out_dictionary['in_cols'] out_cols = in_out_dictionary['out_cols']This allows us to get the all the columns that are outflows: We can get any column of the dataframe by providing its label as a listbel_to_demand_el = [(('bel', 'demand_el'), 'flow')] # this is a list with one element df[bel_to_demand_el].head(2)Plot only outflowsax = df[out_cols].plot(kind='line', drawstyle='steps-post') ax.set_xlabel('Time [h]') ax.set_ylabel('Energy [MWh]') ax.set_title('Flows into or out of bel') ax.legend(loc='center left', bbox_to_anchor=(1.0, 0.5)) # place legend outside of plot plt.show()Use the functions of oemof_visio to create plotsSee also: oemof_examples/examples/oemof_0.2/plotting_examples/storage_investment_plot.py Use color palette generators to generate a suitable color list, e.g.: http://javier.xyz/cohesive-colors/ https://colourco.de/ http://seaborn.pydata.org/tutorial/color_palettes.htmlinorder = [(('pp_chp', 'bel'), 'flow'), (('pp_coal', 'bel'), 'flow'), (('pp_gas', 'bel'), 'flow'), (('pp_lig', 'bel'), 'flow'), (('pp_oil', 'bel'), 'flow'), (('pv', 'bel'), 'flow'), (('wind', 'bel'), 'flow')] outorder = [(('bel', 'demand_el'), 'flow'), (('bel', 'excess_el'), 'flow'), (('bel', 'heat_pump'), 'flow')] cdict = {(('pp_chp', 'bel'), 'flow'): '#eeac7e', (('pp_coal', 'bel'), 'flow'): '#0f2e2e', (('pp_gas', 'bel'), 'flow'): '#c76c56', (('pp_lig', 'bel'), 'flow'): '#56201d', (('pp_oil', 'bel'), 'flow'): '#494a19', (('pv', 'bel'), 'flow'): '#ffde32', (('wind', 'bel'), 'flow'): '#4ca7c3', (('bel', 'demand_el'), 'flow'): '#ce4aff', (('bel', 'excess_el'), 'flow'): '#555555', (('bel', 'heat_pump'), 'flow'): '#42c77a'} fig = plt.figure(figsize=(13, 5)) my_plot = oev.plot.io_plot('bel', df, inorder=inorder, outorder=outorder, cdict=cdict, ax=fig.add_subplot(1, 1, 1), smooth=False) ax = my_plot['ax'] oev.plot.set_datetime_ticks(ax, df.index, tick_distance=32, date_format='%d-%m-%H', offset=12) my_plot['ax'].set_ylabel('Power in MW') my_plot['ax'].set_xlabel('2012') my_plot['ax'].set_title("Electricity bus") legend = ax.legend(loc='center left', bbox_to_anchor=(1.0, 0.5)) # place legend outside of plot # save figure fig = ax.get_figure() fig.savefig('myplot.png', bbox_inches='tight')Homework: Grade Distributions The ProblemProfessor is at it again. He would like you to improve on the program you wrote for him last week. As you may recall this program, when given an input of a student points earned in the course, would output a letter grade and grade points.Prof. Xavier would like the program to input multiple points earned as part of a sentinel controlled loop, and as each student total point score is entered calculate the following statistics:- count of student point scores in "A" range, and percentage of total- count of student point scores in "B" range, and percentage of total- count of student point scores in "C" range, and percentage of total- count of student point scores in "D" range, and percentage of total- count of student point scores in "F" range, and percentage of total- total count of point scores enteredNOTE: The statistics are only displayed when the command `stats` is entered.Once again, here is the grading scale he uses: Point Range Grade Grade Points 125-150 A 4.0 100-124 B 3.0 75-99 C 2.0 50-74 D 1.0 0-49 F 0.0 Sample Run: Enter student's total points, type 'stats' or type 'quit': 130 Enter student's total points, type 'stats' or type 'quit': 111 Enter student's total points, type 'stats' or type 'quit': 145 Enter student's total points, type 'stats' or type 'quit': stats Scores : 3 A's : 2 (66%) B's : 1 (33%) C's : 0 (0%) D's : 0 (0%) F's : 0 (0%) Enter student's total points, type 'stats' or type 'quit': 100 Enter student's total points, type 'stats' or type 'quit': 60 Enter student's total points, type 'stats' or type 'quit': stats Scores : 5 A's : 2 (40%) B's : 2 (40%) C's : 0 (0%) D's : 1 (20%) F's : 0 (0%) Enter student's total points, type 'stats' or type 'quit': quit Goodbye! HINTS: **use problem simplification** like we did in small group and the lab!- Start small: just get it to count scores, then maybe A's etc.- if you cannot figure out the 'stats' command, print stats each time, then try to add it afterwards. If you want in the X-men you need to suck up to Prof. Xavier!!!Print your stats using a text-only tech bar chart, like this. This is easy to do in Python with the `*` string operator. Look it up and make it your own! Scores : 30 A's | ************ 12 (40%) B's | ********* 9 (30%) C's | *** 3 (10%) D's | ****** 6 (20%) F's | (0%) Part 1: Problem AnalysisInputs: ```TODO: Inputs```Outputs:```TODO: Outputs```Algorithm (Steps in Program): ```TODO:Steps Here``` Part 2: Code SolutionYou may write your code in several cells, but place the complete, final working copy of your code solution within this single cell below. Only the within this cell will be considered your solution.# Step 2: Write code herePart 3: Questions1. Did you use a while or for loop? Explain.`--== Double-Click and Write Your Answer Below This Line ==--` 2. What is the most challenging aspect of testing a program like this?`--== Double-Click and Write Your Answer Below This Line ==--` 3. How easy is it to modify the grading scale in this program?`--== Double-Click and Write Your Answer Below This Line ==--` Part 4: ReflectionReflect upon your experience completing this assignment. This should be a personal narrative, in your own voice, and cite specifics relevant to the activity as to help the grader understand how you arrived at the code you submitted. Things to consider touching upon: Elaborate on the process itself. Did your original problem analysis work as designed? How many iterations did you go through before you arrived at the solution? Where did you struggle along the way and how did you overcome it? What did you learn from completing the assignment? What do you need to work on to get better? What was most valuable and least valuable about this exercise? Do you have any suggestions for improvements?To make a good reflection, you should journal your thoughts, questions and comments while you complete the exercise.Keep your response to between 100 and 250 words.`--== Double-Click and Write Your Reflection Below Here ==--`# run this code to turn in your work! from coursetools.submission import Submission Submission().submit()HOG# Define a function to compute binned color features def bin_spatial(img, size=(32, 32)): # Use cv2.resize().ravel() to create the feature vector features = cv2.resize(img, size).ravel() # Return the feature vector return features # Define a function to compute color histogram features def color_hist(img, nbins=32, bins_range=(0, 256)): # Compute the histogram of the color channels separately channel1_hist = np.histogram(img[:,:,0], bins=nbins, range=bins_range) channel2_hist = np.histogram(img[:,:,1], bins=nbins, range=bins_range) channel3_hist = np.histogram(img[:,:,2], bins=nbins, range=bins_range) # Concatenate the histograms into a single feature vector hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0])) # Return the individual histograms, bin_centers and feature vector return hist_features def get_hog_features(img, orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True): # Call with two outputs if vis==True if vis == True: features, hog_image = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell), cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=True, visualise=vis, feature_vector=feature_vec) return features, hog_image # Otherwise call with one output else: features = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell), cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=True, visualise=vis, feature_vector=feature_vec) return features def extract_features(imgs, color_space='RGB', spatial_size=(32, 32), hist_bins=32, orient=9, pix_per_cell=8, cell_per_block=2, hog_channel=0, spatial_feat=True, hist_feat=True, hog_feat=True): #1) Define an empty list to receive features features = [] for img in imgs: img = img[:x, :y] img_features = [] #2) Apply color conversion if other than 'RGB' if color_space != 'RGB': if color_space == 'HSV': feature_image = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) elif color_space == 'LUV': feature_image = cv2.cvtColor(img, cv2.COLOR_BGR2LUV) elif color_space == 'HLS': feature_image = cv2.cvtColor(img, cv2.COLOR_BGR2HLS) elif color_space == 'YUV': feature_image = cv2.cvtColor(img, cv2.COLOR_BGR2YUV) elif color_space == 'YCrCb': feature_image = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb) else: feature_image = img[:,:,::-1] #3) Compute spatial features if flag is set if spatial_feat == True: spatial_features = bin_spatial(feature_image, size=spatial_size) #4) Append features to list img_features.append(spatial_features) print('spatial shape', spatial_features.shape) #5) Compute histogram features if flag is set if hist_feat == True: hist_features = color_hist(feature_image, nbins=hist_bins) print('hist shape', hist_features.shape) #6) Append features to list img_features.append(hist_features) #7) Compute HOG features if flag is set if hog_feat == True: print('feature image shape', feature_image.shape) if hog_channel == 'ALL': hog_features = [] for channel in range(feature_image.shape[2]): hog_features.extend(get_hog_features(feature_image[:,:,channel], orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True)) print('hog shape', len(hog_features)) else: hog_features = get_hog_features(feature_image[:,:,hog_channel], orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True) #8) Append features to list img_features.append(hog_features) features.append(np.concatenate(img_features)) return featuresTrain a classifier# Load data vehicle_path = os.path.expanduser("~/Downloads/tl_classifier_exceptsmall/simulator/") nonvehicle_path = os.path.expanduser("~/Downloads/tl_classifier_exceptsmall/simulator/") train_vehicles = [] train_nonvehicles = [] test_vehicles = [] test_nonvehicles = [] counter = 0 x = y = 300 for vehicle_file in glob.glob(vehicle_path+'Red/*.png'): img = cv2.imread(vehicle_file) train_vehicles.append(img) if img.shape[0] < x: x = img.shape[0] if img.shape[1] < y: y = img.shape[1] nonvehicle_folders = ['Green', 'Yellow', 'NoTrafficLight'] for folder in nonvehicle_folders: for nonvehicle_file in glob.glob(nonvehicle_path+folder+'/*.png'): img = cv2.imread(nonvehicle_file) train_nonvehicles.append(img) if img.shape[0] < x: x = img.shape[0] if img.shape[1] < y: y = img.shape[1] print(len(train_vehicles)) len(train_nonvehicles) y = 43 # Balance # train_vehicles = train_vehicles[:len(test_vehicles)] # test_nonvehicles = test_nonvehicles[:len(test_vehicles)] # extract features color_space = 'YCrCb' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb orient = 7 # HOG orientations pix_per_cell = 16 # HOG pixels per cell cell_per_block = 2 # HOG cells per block hog_channel = "ALL" # Can be 0, 1, 2, or "ALL" spatial_size = (16, 16) # Spatial binning dimensions hist_bins = 16 # Number of histogram bins spatial_feat = True # Spatial features on or off hist_feat = True # Histogram features on or off hog_feat = True # HOG features on or off y_start_stop = [500, 720] # Min and max in y to search in slide_window() train_car_features = extract_features(train_vehicles, color_space=color_space, spatial_size=spatial_size, hist_bins=hist_bins, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, hog_channel=hog_channel, spatial_feat=spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat) train_notcar_features = extract_features(train_nonvehicles, color_space=color_space, spatial_size=spatial_size, hist_bins=hist_bins, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, hog_channel=hog_channel, spatial_feat=spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat) # Create an array stack of feature vectors X_train = np.vstack((train_car_features, train_notcar_features)).astype(np.float64) # Fit a per-column scaler X_scaler = StandardScaler().fit(X_train) # Apply the scaler to X train_scaled_X = X_scaler.transform(X_train) # create labels y = np.hstack((np.ones(len(train_car_features)), np.zeros(len(train_notcar_features)))) # shuffle rand_state = np.random.randint(0, 100) X_train, X_test, y_train, y_test = train_test_split(train_scaled_X, y, test_size=0.2, random_state=rand_state) y = 43 # Use a linear SVC svc = LinearSVC() # Check the training time for the SVC t=time.time() svc.fit(X_train, y_train) t2 = time.time() print(round(t2-t, 2), 'Seconds to train SVC...') # Check the score of the SVC print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4)) # Check the prediction time for a single sample t=time.time() xOut = open('X_scaler.pkl','wb') pickle.dump(X_scaler, xOut) xOut.close() # save the classifier svcOut = open('svc.pkl', 'wb') pickle.dump(svc, svcOut) svcOut.close()Sliding Window Searchdef convert_color(img, conv='RGB2YCrCb'): if conv == 'RGB2YCrCb': return cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb) if conv == 'BGR2YCrCb': return cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb) if conv == 'BGR2LUV': return cv2.cvtColor(img, cv2.COLOR_BGR2LUV) if conv == 'BGR2HSV': return cv2.cvtColor(img, cv2.COLOR_BGR2HSV) if conv == 'BGR2HLS': return cv2.cvtColor(img, cv2.COLOR_BGR2HLS) if conv == 'BGR2YUV': return cv2.cvtColor(img, cv2.COLOR_BGR2YUV) if conv == 'BGR2RGB': return cv2.cvtColor(img, cv2.COLOR_BGR2RGB) def find_cars(img, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins): draw_img = np.copy(img) #img = img.astype(np.float32)/255 # only for jpeg img_tosearch = img[ystart:ystop,:,:] ctrans_tosearch = convert_color(img_tosearch, conv='BGR2YCrCb') if scale != 1: imshape = ctrans_tosearch.shape ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1]/scale), np.int(imshape[0]/scale))) ch1 = ctrans_tosearch[:,:,0] ch2 = ctrans_tosearch[:,:,1] ch3 = ctrans_tosearch[:,:,2] # Define blocks and steps as above nxblocks = (ch1.shape[1] // pix_per_cell)-1 nyblocks = (ch1.shape[0] // pix_per_cell)-1 nfeat_per_block = orient*cell_per_block**2 # 64 was the orginal sampling rate, with 8 cells and 8 pix per cell window = 64 nblocks_per_window = (window // pix_per_cell)-1 cells_per_step = 2 # Instead of overlap, define how many cells to step nxsteps = (nxblocks - nblocks_per_window) // cells_per_step nysteps = (nyblocks - nblocks_per_window) // cells_per_step # Compute individual channel HOG features for the entire image hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False) hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False) hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False) boxes = [] for xb in range(nxsteps): for yb in range(nysteps): ypos = yb*cells_per_step xpos = xb*cells_per_step xleft = xpos*pix_per_cell ytop = ypos*pix_per_cell #print('image block', ytop, ytop+x, xleft, xleft+y) if ytop+x < 600 or xleft+y < 800: pass # Extract HOG for this patch #hog_feat1 = get_hog_features(ch1[ytop:ytop+x, xleft:xleft+y], orient, pix_per_cell, cell_per_block, feature_vec=True).ravel() #hog_feat2 = get_hog_features(ch2[ytop:ytop+x, xleft:xleft+y], orient, pix_per_cell, cell_per_block, feature_vec=True).ravel() #hog_feat3 = get_hog_features(ch3[ytop:ytop+x, xleft:xleft+y], orient, pix_per_cell, cell_per_block, feature_vec=True).ravel() #hog_feat2 = hog2[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel() #hog_feat3 = hog3[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel() #print('hog features img size', nblocks_per_window, nblocks_per_window) #hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3)) # Extract the image patch #subimg = cv2.resize(ctrans_tosearch[ytop:ytop+window, xleft:xleft+window], (window,window)) subimg = cv2.resize(ctrans_tosearch[ytop:ytop+x, xleft:xleft+y], (x,y)) #print('sub image shape', subimg.shape) # HOG features hog_feat1 = get_hog_features(subimg[:,:,0], orient, pix_per_cell, cell_per_block, feature_vec=True).ravel() hog_feat2 = get_hog_features(subimg[:,:,1], orient, pix_per_cell, cell_per_block, feature_vec=True).ravel() hog_feat3 = get_hog_features(subimg[:,:,2], orient, pix_per_cell, cell_per_block, feature_vec=True).ravel() #print('hog features img size', nblocks_per_window, nblocks_per_window) hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3)) # Get color features spatial_features = bin_spatial(subimg, size=spatial_size) hist_features = color_hist(subimg, nbins=hist_bins) # Scale features and make a prediction try: test_features = X_scaler.transform(np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1)) except: print('ERROR') plt.imshow(subimg) #test_features = X_scaler.transform(np.hstack((shape_feat, hist_feat)).reshape(1, -1)) test_prediction = svc.predict(test_features) if test_prediction == 1: xbox_left = np.int(xleft*scale) ytop_draw = np.int(ytop*scale) win_draw = np.int(window*scale) boxes.append(((xbox_left, ytop_draw+ystart),(xbox_left+win_draw,ytop_draw+win_draw+ystart))) print('RED LIGHT!') return boxes #cv2.rectangle(draw_img,(xbox_left, ytop_draw+ystart),(xbox_left+win_draw,ytop_draw+win_draw+ystart),(0,0,255),6) print('NOT RED LIGHT!') return boxes # heatmap def add_heat(heatmap, bbox_list): # Iterate through list of bboxes for box in bbox_list: # Add += 1 for all pixels inside each bbox # Assuming each "box" takes the form ((x1, y1), (x2, y2)) heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1 # Return updated heatmap return heatmap def apply_threshold(heatmap, threshold): # Zero out pixels below the threshold heatmap[heatmap <= threshold] = 0 # Return thresholded map return heatmap def draw_labeled_bboxes(img, labels): bboxes = [] # Iterate through all detected cars for car_number in range(1, labels[1]+1): # Find pixels with each car_number label value nonzero = (labels[0] == car_number).nonzero() # Identify x and y values of those pixels nonzeroy = np.array(nonzero[0]) nonzerox = np.array(nonzero[1]) # Define a bounding box based on min/max x and y bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy))) # Draw the box on the image bboxes.append(bbox) cv2.rectangle(img, bbox[0], bbox[1], (0,0,255), 6) # Return the image return bboxes, img orient = 7 # HOG orientations pix_per_cell = 16 # HOG pixels per cell cell_per_block = 2 # HOG cells per block hog_channel = "ALL" # Can be 0, 1, 2, or "ALL" spatial_size = (16, 16) # Spatial binning dimensions hist_bins = 16 # Number of histogram bins ystart = [0] ystop = [600] scale = [2] print('get image') img = cv2.imread('test/test_186.png') box_list = [] for i in range(len(ystart)): result = find_cars(img, ystart[i], ystop[i], scale[i], svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) box_list.extend(result) heat = np.zeros_like(img[:,:,0]).astype(np.float) heat = add_heat(heat,box_list) heat = apply_threshold(heat,1) heatmap = np.clip(heat, 0, 255) labels = label(heatmap) box, draw_img = draw_labeled_bboxes(np.copy(img), labels) fig = plt.figure() plt.subplot(121) plt.imshow(draw_img) plt.title('Car Positions') plt.subplot(122) plt.imshow(heatmap, cmap='hot') plt.title('Heat Map') fig.tight_layout() plt.savefig('heatmap.jpg') ''' spatial shape (3072,) hist shape (96,) hog shape 4032 ''' yCDES Honours - Test 1 Instructions* Read all the instructions carefully.* The test consists of **50 Marks**, with **two** hours available.* The written section is to be answered in the book provided.* You must only access Moodle tests and NOT Moodle.* The programming section is to be answered within this Jupyter notebook and resubmitted to Moodle.* Do not rename the notebook, simply answer the questions and resubmit the file to Moodle.* The moodle submission link will expire at exactly **11:15** and **NO** late submission will be accepted. Please make sure you submit timeously!* You may use the **Numpy** help file at: https://docs.scipy.org/doc/numpy-1.15.4/reference/* **NB!!!** Anyone caught using Moodle (and its file storing), flash drives or external notes will be awarded zero and reported to the legal office. Written Section* Answer the following questions in the answer book provided. Question 1 (15 Marks)1. Consider the following equation:$$v_t = a v_{xx}.$$ 1. Set up an **implicit** scheme using a forward difference approximation for time and a Crank-Nicolson difference approximation for the spatial variable. (5 marks) 2. Using the von Neumann stability analysis, discuss the stability of the scheme given in (A). You may assume that $a >0$. (10 marks) Question 2 (5 Marks)2. Given the equation produced by the operation $P = \dfrac{\partial}{\partial t} + \alpha\dfrac{\partial}{\partial x}$:$$Pu = u_t + \alpha u_{x}, \qquad \alpha > 0.$$Evaluate the consistency of the FTFS scheme with the difference operator $P_{\Delta t, \Delta x}$. Programming Section Question 1Given the heat equation where:$$u_t = u_{xx}, \quad 0 < x < 1, \quad t \geq 0, \quad u(0, t) = u(1, t) = 0, \quad u(x, 0) = \sin(\pi x),$$and:$$\Delta x = 0.1, \quad \Delta t = 0.0005.$$Note: the analytical solution is:$$u(x, t) = e^{-\pi^2 t}\sin(\pi x)$$Implement an FTCS scheme to find an approximation at $t = 0.5$. The function should return the solution matrix $U$.def heat_eq(dt, dx, tf, f, D, alpha, beta): # YOUR CODE HERE raise NotImplementedError() # Run this test cell to check your code # Do not delete this cell # 1 mark # Unit test dx = 0.1 dt = 0.005 tf = 0.5 f = lambda x: np.sin(np.pi * x) D = 1 alpha = 0 beta = 0 tans = np.array([0. , 0.309017 , 0.5877853, 0.809017 , 0.9510565, 1. , 0.9510565, 0.809017 , 0.5877853, 0.309017 , 0. ]) ans = heat_eq(dt, dx, tf, f, D, alpha, beta) nt.assert_array_almost_equal(tans, ans[0]) print('Test case passed!!!') # Hidden test # No output will be produced # 9 marksQuestion 2Implement a Crank-Nicolson scheme on the heat equation given in Question 1 and find an approximation to $t = 0.5$. The function should return the solution matrix $U$.def crankNicolson(dt, dx, tf, f, D, alpha, beta): # YOUR CODE HERE raise NotImplementedError() # Run this test cell to check your code # Do not delete this cell # 1 mark # Unit test dx = 0.1 dt = 0.005 tf = 0.5 f = lambda x: np.sin(np.pi * x) D = 1 alpha = 0 beta = 0 tans = np.array([0. , 0.309017 , 0.5877853, 0.809017 , 0.9510565, 1. , 0.9510565, 0.809017 , 0.5877853, 0.309017 , 0. ]) ans = crankNicolson(dt, dx, tf, f, D, alpha, beta) nt.assert_array_almost_equal(tans, ans[0]) print('Test case passed!!!') # Hidden test # No output will be produced # 9 marksQuestion 31. Write a function which computes the absolute error and relative error (across the whole vector) at the final time `tf` of the two schemes in Questions 1 and 2. The function should also compute the norm at `tf` of the absolute difference between the approximate solution (of each scheme) and the true solution. The function should return 6 outputs, namely; `errExpAbs, errExpRel, errCNAbs, errCNRel, normExp, normCN`.dx = 0.1 dt = 0.005 tf = 0.5 f = lambda x: np.sin(np.pi * x) D = 1 alpha = 0 beta = 0 def errorFunction(): # YOUR CODE HERE raise NotImplementedError() # Run this test cell to check your code # Do not delete this cell # 1 mark # Unit test ans = errorFunction() # test output1 is vector assert(ans[0].shape[0] > 1) # test output2 is vector assert(ans[1].shape[0] > 1) # test output3 is vector assert(ans[2].shape[0] > 1) # test output4 is vector assert(ans[3].shape[0] > 1) # test output5 is scalar assert(ans[4].shape == ()) # test output6 is scalar assert(ans[5].shape == ()) print('Test case passed!!!') # Hidden test # No output will be produced # 1marks # Hidden test # No output will be produced # 1 marks # Hidden test # No output will be produced # 1 marks # Hidden test # No output will be produced # 1 marks # Hidden test # No output will be produced # 1 marks # Hidden test # No output will be produced # 1 marks2. Using the information of Question 3(1), plot the absolute and relative errors for both schemes on the same set of axis.# 3 marks # YOUR CODE HERE raise NotImplementedError()K-means( random initialization)import numpy as np import math # % Standard K-means clustering # % Inputs: data, n*d data matrix with n instances and d features # % K, number of clusters # % Ouputs: labels, n*1 indicator matrix with elements from 1 to K # % centroids, K*d centroid matrix # % objV, objective function value of K-means # % Jie TANG by 04/20/2020 def myKmeans(data, K, Maxiter=50, ObjV =math.inf, threshold = 0.01, centroids = False): centroids = initializeCentroid(data, K) for i in range(Maxiter): labels, New_ObjV = assign_label(data, centroids) centroids = update_centroid(data, K, labels) if ObjV-New_ObjV > threshold: ObjV = New_ObjV return labels, centroids, ObjV ## initialize k centroids randomly def initializeCentroid(data, K): return data[np.random.choice(len(data), K, replace=False)] ## assign labels to each point def assign_label(data, centroids): return np.array([np.argmin(np.sqrt(np.sum((data[i]-centroids)**2, axis=1))) for i in range(len(data))]),\ np.sum([np.sum((data[i]-centroids)**2) for i in range(len(data))]) ## update the centroids def update_centroid(data, K, labels): return np.array([np.mean(data[labels==k], axis=0) for k in range(K)])Testing## importing packages import pandas as pd import numpy as np import matplotlib.pylab as plt ## loading data three_globs = pd.read_csv('/Users/tjmask/Desktop/Semester2/DataMining/HW1/hw1/Homework_1/Datasets/three_globs.csv') three_globs = np.array(three_globs) labels, centroids, ObjV = myKmeans(three_globs, 3) plt.scatter(three_globs[:, 0], three_globs[:, 1], marker='.', c = labels) plt.scatter(centroids[:, 0], centroids[:,1], c='r') plt.show()K-means++import numpy as np ## K-means++ initialization def myKmeans_pp(data, K): ## get the data shape and creat an empty centriod array d = data.shape[1] centroids = np.zeros([K,d]) index = np.zeros(K) ## randomly choose the first centroid index[0] = np.random.choice(len(data),1, replace=False) initial_centroid = data[int(index[0])] centroids[0] = initial_centroid ## choose next centroids till all centroids are chosen for k in range(1, K): ## calculating the distance distance = np.array([(np.sum((data[i]-centroids)**2)) for i in range(len(data))]) ## get the index of the point which has the max distance index[k] = np.argmax(distance) ## update centroids centroids[k] = (data[int(index[k])]) return centroids, indexTestingKmeans_pp_initial, index = myKmeans_pp(three_globs, 3) Kmeans_pp_initial, index plt.scatter(three_globs[:, 0], three_globs[:, 1], marker='.') plt.scatter(Kmeans_pp_initial[:, 0], Kmeans_pp_initial[:,1], c='r') plt.show()Greedy K-meansfrom sklearn.cluster import KMeans def Greedy_Kmeans(data, K): # % Standard K-means clustering # % Inputs: data, n*d data matrix with n instances and d features; # K, number of clusters; # % Ouputs: greedy_initial, the greedy initialization k*d data matrix; # % 04/20/2020 ## get the data shape and create an empty centriod array d = data.shape[1] ## choose the mean of all points as the initialized centroid initial_centriod = np.sum(data, axis=0) centroid_k = [] centroid_k.append(initial_centriod) for k in range(2, K+1): tmp_socre = np.zeros(len(data)) tmp_centroid = [] for i in range(len(data)): ## use my own K-means to select the centroids which as the lowest ObjV labels, centroids, ObjV = myKmeans(data, k, centroids=centroid_k[k-2]) tmp_centroid.append(centroids) tmp_socre[i] = ObjV centroid_k.append(tmp_centroid[np.argmin(tmp_socre)]) greedy_initial = centroid_k[k-1] return greedy_initialTestinggreedy_initial = Greedy_Kmeans(three_globs, 3) greedy_initial plt.scatter(three_globs[:, 0], three_globs[:, 1], marker='.') plt.scatter(greedy_initial[:, 0], greedy_initial[:,1], c='r') plt.show()Kernel K-meansfrom sklearn.cluster import KMeans from utils import * from math import exp, inf def myKernalKmeans(data, K, sigma): # % Standard K-means clustering # % Inputs: data, n*d data matrix with n instances and d features; # K, number of clusters; # sigma, the variance we want the kernel to have; # % Ouputs: labels, n*1 indicator matrix with elements from 1 to K # % objV, objective function value of K-means # % 04/20/2020 maxIter = 100; threshold = 0.04; old_ObjV = inf; n = len(data) labels = np.zeros([n]) for i in range(n): labels[i] = np.random.choice(K, 1, replace = True) ## generate the Kernel matrix Kmatrix = kernel(data, sigma) for i in range(maxIter): labels, ObjV = assignLabel(data, labels,Kmatrix, n, K) ## update labels if old_ObjV-ObjV > threshold: old_ObjV = ObjV return labels, ObjV def assignLabel(data, labels, Kmatrix, n, K): ObjV = 0 distance = np.zeros([n, K]) for i in range(n): for k in range(K): ## computing distance based on objective functions distance[i,k] = -2*np.sum(Kmatrix[i,np.where(labels==k)[0]])/np.sum(labels==k) +\ np.sum(Kmatrix[np.where(labels==k)[0],np.where(labels==k)[0]])/(np.sum(labels==k)^2) value = np.min(distance[i,:]) ObjV += value labels[i] = np.argmin(distance[i,:]) return labels, ObjV def squaredDistance(a, b): return np.linalg.norm(a-b) def kernel(data, sigma): """" Inputs: data, n*d data matrix with n instances and d features K, number of clusters; #####sigma, the variance we want the kernel to have;##### Ouputs: Kmatrix, n*n Kernel matrix """"" nData = len(data) Kmatrix = np.zeros([nData,nData]) # nData x nData matrix # Calculate the Kmatrix matrix for i in range(nData): for j in range(i,nData): if i != j: # diagonal element of matrix = 0 # ############# Polynomial Kernel ############# # # kernel: K(xi,xj) = xi*xj**2 # Kmatrix[i][j] = abs(data[i,0]*data[j,0]) + abs(data[i,1]*data[j,1]) # Kmatrix[j][i] = Kmatrix[i][j] ############## Gaussian Kernel ############## # kernel: K(xi,xj) = e ( (-|xi-xj|**2) / (2sigma**2) square_dist = squaredDistance(data[i],data[j]) base = 2.0 * sigma**2 Kmatrix[i][j] = exp(-square_dist/base) Kmatrix[j][i] = Kmatrix[i][j] return KmatrixTesting## loading data import pandas as pd import numpy as np import matplotlib.pylab as plt eye = pd.read_csv('/Users/tjmask/Desktop/Semester2/DataMining/HW1/hw1/Homework_1/Datasets/eye.csv') eye = np.array(eye.iloc[:,1:]) labels, ObjV = myKernalKmeans(eye, 2) plt.scatter(eye[:, 0], eye[:, 1], marker='.', c = labels) plt.title('Polynomial Kernel Result') plt.show() labels, ObjV = myKernalKmeans(eye, 2, 0.05) plt.scatter(eye[:, 0], eye[:, 1], marker='.', c = labels) plt.title('Gussian Kernel Result') plt.show()Debugging **pdb** implements an interactive debugging environment for Python programs. It includes features to let you pause your program, look at the values of variables, and watch program execution step-by-step, so you can understand what your program actually does and find bugs in the logic. Starting the Debugger **From the Command Line**def seq(n): for i in range(n): print(i) return seq(5)0 1 2 3 4**From Within Your Program**import pdb #interactive debugging def seq(n): for i in range(n): pdb.set_trace() # breakpoint print(i) return seq(5) # c : continue # q: quit # h: help # list # p: print # p locals() # p globals()> (7)seq() -> print(i) (Pdb) list 2 3 #interactive debugging 4 def seq(n): 5 for i in range(n): 6 pdb.set_trace() # breakpoint 7 -> print(i) 8 return 9 10 seq(5) 11 12 (Pdb) p i 0 (Pdb) p n 5 (Pdb) p locals() {'i': 0, 'n': 5} (Pdb) c 0 > (6)seq() -> pdb.set_trace() # breakpoint (Pdb) list 1 import pdb 2 3 #interactive debugging 4 def seq(n): 5 for i in range(n): 6 -> pdb.set_trace() # breakpoint 7 print(i) 8 return 9 10 seq(5) 11 (Pdb) p locals() {'i': 1, 'n': 5} (Pdb) c 1 > (7)seq() -> print(i) (Pdb) p locals() {'i': 2, 'n': 5} (Pdb) c 2 > (6)seq() -> pdb.set_trace() # breakpoint (Pdb) p locals() {'i': 3, 'n': 5} (Pdb) h Documented commands (type help ): ======================================== EOF c d h [...]CS-S109A Introduction to Data Science Lecture 11: NNs and Visualizating Prediction Models**Harvard University****Summer 2020****Instructors:** **Authors:** , , , , , ---## RUN THIS CELL TO GET THE RIGHT FORMATTING import requests from IPython.core.display import HTML styles = requests.get("https://raw.githubusercontent.com/Harvard-IACS/2018-CS109A/master/content/styles/cs109.css").text HTML(styles)Table of Contents Learning Goals Brief Tree Review Architecture of Artificial Neural Networks (ANNs) Variable Importances Interpreting Prediction Modelsimport pandas as pd import sys import numpy as np import scipy as sp import sklearn as sk import matplotlib.pyplot as plt from sklearn.linear_model import LogisticRegression from sklearn.decomposition import PCA from sklearn import tree from sklearn import ensemble # Here are the decision trees from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import AdaBoostClassifier import tensorflow as tf print(tf.__version__) # You should see a 2.0.0 here! # sns.set(style="ticks") # %matplotlib inline2.2.0Learning GoalsThis Jupyter notebook accompanies Lecture 11. By the end of this lecture, you should be able to:- have a better grasp of neural network archetecture- interpret a few different types of variable importances- interpret a prediction model by exploring the relationships of predictors with the response through prediction plots. Part 0: Data WranglingFor this notebook we will be using the heart data set we've used all semester for performing classification:heart_df = pd.read_csv('../data/Heart.csv') print(heart_df.shape) heart_df.head() heart_df.describe() # Split into X and y X = heart_df[['Age','Sex','ChestPain','RestBP','Chol','Fbs','RestECG','MaxHR','ExAng','Oldpeak','Slope','Ca','Thal']] y = 1*(heart_df['AHD']=='Yes') # fix categorical data types for maching learning methods...don't worry about the warning message X['ChestPain']=X['ChestPain'].astype('category') X['ChestPain']=X['ChestPain'].cat.codes X['Thal']=X['Thal'].astype('category') X['Thal']=X['Thal'].cat.codes X.dtypes # imputing zeroes for the missing values in `CA` X['Ca']=X['Ca'].fillna(0) X.describe() # split into train and test from sklearn.model_selection import train_test_split itrain, itest = train_test_split(range(X.shape[0]), train_size=0.80) X_train = X.iloc[itrain, :] X_test = X.iloc[itest, :] y_train = y.iloc[itrain] y_test = y.iloc[itest]Part 1: tree-based modelsBelow `max_depth=3` and `max_depth=10` decision trees are fit.#fit the simple (depth = 3) decision tree classifier dt3= tree.DecisionTreeClassifier(max_depth = 3) dt3.fit(X_train,y_train) #fit the an overfit (depth = 10) decision tree classifier dt10 = tree.DecisionTreeClassifier(max_depth = 10) dt10.fit(X_train,y_train)**Q1.1** Calculate the AUC on both train and test, and interpret the results###### #n Your code here ###### print("AUC on train for dt10:",sk.metrics.roc_auc_score(y_train,dt10.predict_proba(X_train)[:,1])) print("AUC on test for dt10:",sk.metrics.roc_auc_score(y_test,dt10.predict_proba(X_test)[:,1])) print("AUC on train for dt3:",sk.metrics.roc_auc_score(y_train,dt3.predict_proba(X_train)[:,1])) print("AUC on test for dt3:",sk.metrics.roc_auc_score(y_test,dt3.predict_proba(X_test)[:,1])) roc_dt3_train = sk.metrics.roc_curve(y_train,dt3.predict_proba(X_train)[:,1], drop_intermediate=False) plt.plot(roc_dt3_train[0],roc_dt3_train[1]); roc_dt3_test = sk.metrics.roc_curve(y_test,dt3.predict_proba(X_test)[:,1], drop_intermediate=False) plt.plot(roc_dt3_test[0],roc_dt3_test[1]); roc_dt10_train = sk.metrics.roc_curve(y_train,dt10.predict_proba(X_train)[:,1], drop_intermediate=False) plt.plot(roc_dt10_train[0],roc_dt10_train[1]); roc_dt10_test = sk.metrics.roc_curve(y_test,dt10.predict_proba(X_test)[:,1], drop_intermediate=False) plt.plot(roc_dt10_test[0],roc_dt10_test[1]);**Solution:**The AUC is higher in train for the more complex tree with depth 10, but is lower in the test set. This is a clear illustration of overfitting: the model performs will with the same set of data that it was fit on, but performs poorly with out-of-sample data. We continue fitting tree based models: first with a random forest, and then a boosted tree model. Note: these are untuned.np.random.seed(109) randomforest = RandomForestClassifier(n_estimators=100, max_features='sqrt', max_depth=10) randomforest.fit(X_train,y_train); adaboost = AdaBoostClassifier( base_estimator=DecisionTreeClassifier(max_depth=4), n_estimators=500, learning_rate=.75) adaboost.fit(X_train,y_train); # evaluating print("AUC on train for randomforest:",sk.metrics.roc_auc_score(y_train,randomforest.predict_proba(X_train)[:,1])) print("AUC on test for randomforest:",sk.metrics.roc_auc_score(y_test,randomforest.predict_proba(X_test)[:,1])) print("AUC on train for adaboost:",sk.metrics.roc_auc_score(y_train,adaboost.predict_proba(X_train)[:,1])) print("AUC on test for adaboost:",sk.metrics.roc_auc_score(y_test,adaboost.predict_proba(X_test)[:,1]))AUC on train for randomforest: 1.0 AUC on test for randomforest: 0.9308278867102396 AUC on train for adaboost: 1.0 AUC on test for adaboost: 0.9052287581699346**Q1.2** What would happen to the above AUC on train and test (random forest and adaboost) if the number of estimators (base trees) were increased for each? **Solution:**The AUC cannot get better on the train set for either model (1.0 is the maximum), but on the test there is a different story in expectation: the random forest would likely have a very similar AUC on test (may increase slightly) but the adaboost model would eventually get worse and worse as the model overfit more to the train set. Part 2: NN modelBelow we build our first NN model for these datamodel_NN = tf.keras.models.Sequential([ tf.keras.layers.Dense(100, input_shape=(pd.DataFrame(X_train).shape[1],), activation='relu'), tf.keras.layers.Dense(25, activation='tanh'), tf.keras.layers.Dense(1, activation='linear'), ]) model_NN.summary()Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense (Dense) (None, 100) 1400 _________________________________________________________________ dense_1 (Dense) (None, 25) 2525 _________________________________________________________________ dense_2 (Dense) (None, 1) 26 ================================================================= Total params: 3,951 Trainable params: 3,951 Non-trainable params: 0 _________________________________________________________________**Q2.1** How many hidden layers does this model have? What should be the loss function for this model? What is incorrect in the model architecture above? Be sure to fix it... **Solution:**This model has 2 hidden layers (and one output layer). Since this is a classification problem, the loss should be `binary_crossentropy` and the activation in the last (output) layer should be `sigmoid`, not `linear`.This model has 3951 parameters:1. The 100 nodes in layer-1 has 13 inputs (predictors) plus a bias each, so 100*(13+1)=1400 parameters in this layer.2. The 25 nodes in layer-2 has 100 inputs (nodes from layer-1) plus a bias each, so 25*(100+1)=2525 parameters in this layer.3. The single node in the output layer has 25 inputs (nodes from layer-2) plus a bias term, so 26 parameters in this layer.from numpy.random import seed seed(109) tf.random.set_seed(121) # now fit the model, and evaluate: model_NN = tf.keras.models.Sequential([ tf.keras.layers.Dense(100, input_shape=(pd.DataFrame(X_train).shape[1],), activation='relu'), tf.keras.layers.Dense(25, activation='tanh'), tf.keras.layers.Dense(1, activation='sigmoid'), ]) model_NN.compile(optimizer='ADAM', loss='binary_crossentropy', metrics=['acc']) history = model_NN.fit(X_train, y_train, epochs=100, batch_size=64, verbose=0) print("AUC on train for NN_model:",sk.metrics.roc_auc_score(y_train,model_NN.predict(X_train))) print("AUC on test for NN_model:",sk.metrics.roc_auc_score(y_test,model_NN.predict(X_test)))AUC on train for NN_model: 0.8735233516483517 AUC on test for NN_model: 0.863834422657952**Q2.2** Create a new NN model called `model_NN2` that improves upon the fixed model above. Why do you suppose it is doing a better job?###### # your code here ###### # now fit the model, and evaluate: model_NN2 = tf.keras.models.Sequential([ tf.keras.layers.Dense(100, input_shape=(pd.DataFrame(X_train).shape[1],), activation='relu'), tf.keras.layers.Dense(25, activation='tanh'), tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dense(50, activation='tanh'), tf.keras.layers.Dense(1, activation='sigmoid'), ]) model_NN2.compile(optimizer='ADAM', loss='binary_crossentropy', metrics=['acc']) history = model_NN2.fit(X_train, y_train, epochs=200, batch_size=128, verbose=0) print("AUC on train for NN_model:",sk.metrics.roc_auc_score(y_train,model_NN2.predict(X_train))) print("AUC on test for NN_model:",sk.metrics.roc_auc_score(y_test,model_NN2.predict(X_test)))AUC on train for NN_model: 0.9425824175824176 AUC on test for NN_model: 0.8006535947712419**Solution:**This model added two intermediate layer that repeated the hidden layers from before, and increased the number of epochs to 200. This model may be performing better on the test set since it has more flexibility to approximate the true signal better; the first NN model may have been a little biased. Part 3: Variable ImportanceBelow the variable importances are created for the 4 tree-based models:#Default Variable Importance plt.figure(figsize=(24,6)) #plt.set_xticks() #plt.set_xticklabels(X.columns) num=10 plt.subplot(1, 4, 1) dt3_importances = dt3.feature_importances_ order = np.flip(np.argsort(dt3_importances))[0:num] plt.barh(range(num),dt3_importances[order],tick_label=X.columns[order]); plt.title("Relative Variable Importance for dt3") plt.subplot(1, 4, 2) dt10_importances = dt10.feature_importances_ order = np.flip(np.argsort(dt10_importances))[0:num] plt.barh(range(num),dt10_importances[order],tick_label=X.columns[order]); plt.title("Relative Variable Importance for dt10") plt.subplot(1, 4, 3) rf_importances = randomforest.feature_importances_ order = np.flip(np.argsort(rf_importances))[0:num] plt.barh(range(num),rf_importances[order],tick_label=X.columns[order]); plt.title("Relative Variable Importance for rf") plt.subplot(1, 4, 4) adaboost_importances = adaboost.feature_importances_ order = np.flip(np.argsort(adaboost_importances))[0:num] plt.barh(range(num),adaboost_importances[order],tick_label=X.columns[order]); plt.title("Relative Variable Importance for adaboost");**Q3.1** Interpret the plots above: why do they make sense? How would the random forest variable imporance change if `max_features` was altered? **Solution:**The plots make sense: the decision tree of depth 3 has all of its splits defined by just a handful of variables as there are only 7 splits in this tree. The tree of depth 10 has a lot of importance on just one variable (`ChestPain`, with relative importance of about 0.26), which defines the most influential splits on MSE when building the tree. The random forest and adaboost both have a lot more uniformly spread-out importance, which makes sense since the random forest there are some base trees that do not consider `ChestPain` in the first splits, and in adaboost, the learning rate reduces the influence that any one single split contributes to the reduciton of MSE. Below we use the [`eli5`](https://eli5.readthedocs.io/en/latest/autodocs/sklearn.htmleli5.sklearn.permutation_importance.PermutationImportance) package to perform permutation importance for the random forest model.#pip install eli5 #permutation importance import eli5 from eli5.sklearn import PermutationImportance from eli5.permutation_importance import get_score_importances perm = PermutationImportance(randomforest).fit(X_test, y_test) #eli5.show_weights(perm,feature_names=X.columns) print(X.columns) eli5.show_weights(perm, feature_names = X_train.columns.tolist())/Users/kevinrader/opt/anaconda3/lib/python3.7/site-packages/sklearn/utils/deprecation.py:144: FutureWarning: The sklearn.metrics.scorer module is deprecated in version 0.22 and will be removed in version 0.24. The corresponding classes / functions should instead be imported from sklearn.metrics. Anything that cannot be imported from sklearn.metrics is now part of the private API. warnings.warn(message, FutureWarning) /Users/kevinrader/opt/anaconda3/lib/python3.7/site-packages/sklearn/utils/deprecation.py:144: FutureWarning: The sklearn.feature_selection.base module is deprecated in version 0.22 and will be removed in version 0.24. The corresponding classes / functions should instead be imported from sklearn.feature_selection. Anything that cannot be imported from sklearn.feature_selection is now part of the private API. warnings.warn(message, FutureWarning)**Q3.2** How do the permutation importances compare to the default feature importance? What is the difference in interpretation? `ChestPain` is still the most influential, but `Thal` was the second most important based on relative importance, but is near the bottom in the permutation importance. This indicates (and can be explained by the dfact) that this variable is highly correlated with other predictors used in the model.#Note: eli5 does not behave well with Keras, by default. perm = PermutationImportance(model_NN, random_state=1).fit(X_train,y_train) #Note: eli5 does not behave well with Keras, by default. But a user- def score(model,X, y): y_pred = model.predict(X) return sk.metrics.accuracy_score(y, y_pred>0.5) perm = PermutationImportance(model_NN, scoring=score, random_state=1).fit(X_train,y_train) eli5.show_weights(perm, feature_names = X_train.columns.tolist())Part 4: Plotting PredictionsBelow we start to interpret relationships from various models based on the predictions from those modelsyhat_rf_train = randomforest.predict_proba(X_train)[:,1] plt.scatter(X_train[['Age']],yhat_rf_train); yhat_rf_test = randomforest.predict_proba(X_test)[:,1] plt.scatter(X_test[['Age']],yhat_rf_test,marker='x'); plt.title("Predicted Probabilities vs. Age from the RF in train and test");**Q4.1** What does the above plot showing? How can it be interpreted? **Solution:**This plot shows that there is not much directional relationship being predicted between Age and AHD in the random forest model. It does suggest there are 2 subgroups being predicted in the train set (one gorup near a probability of zero and one group near 1) which suggests a categorical predictor is causing these differences in predicted values (likely `ChestPain` is the culprit). **Q4.1** Reproduce the above plot for your neural netowrk model. How does it compare? What does it say about Age's relationship with Cardiac Arrest?###### # Your code here # Note: we also show it for a stronger predictor: MaxHR ###### plt.figure(figsize=(14,6)) plt.subplot(1, 2, 1) yhat_nn_train = model_NN2.predict(X_train) plt.scatter(X_train[['Age']],yhat_nn_train); yhat_nn_test = model_NN2.predict(X_test) plt.scatter(X_test[['Age']],yhat_nn_test,marker='x'); plt.title("Predicted Probabilities vs. Age from the RF in train and test"); plt.subplot(1, 2, 2) yhat_nn_train = model_NN2.predict(X_train) plt.scatter(X_train[['MaxHR']],yhat_nn_train); yhat_nn_test = model_NN2.predict(X_test) plt.scatter(X_test[['MaxHR']],yhat_nn_test,marker='x'); plt.title("Predicted Probabilities vs. MaxHR from the RF in train and test");**Q4.3** Fit a logistic regression to the predicted response from your NN model based on Age (in train). Interpret the resultfrom sklearn.linear_model import LogisticRegression ###### # your code here ###### logit = LogisticRegression(C=10000) logit.fit(X_test[['Age']],np.ravel(1*(yhat_nn_test>0.5))) logit.coef_**Solution:**The estimated $\hat{\beta}_1$ is estimated to be 0.10255, which suggests a positive relationship, on average. A 1-year increase is associated with a $e^{0.10255}=1.108$ multiplicative increase (a 10.8% increase) in the odds of having a cardiac event on average based on the Neural Network model. Below, several different plots are created:1. The predicted probabilities vs. age for any reasonable value of age at the mean values for the other predictors2. The predicted probabilties for each individual vs. Age (sometimes called profile plots) and the averaged individual probabilities vs. Age.3. The median of these individual predcited probability curves, along with the middle 95% ranges at any particular value of Age.means1 = X_train.mean(axis = 0) #means1 =pd.Series(means) means_df = (means1.to_frame()).transpose() #df_repeated = pd.concat(means*3) #print(df_repeated) Ages = np.arange(np.min(X['Age']),np.max(X['Age'])) means_df = pd.concat([means_df]*Ages.size,ignore_index=True) means_df['Age'] = Ages means_df.head() #plots at means yhat_nn = model_NN2.predict_proba(means_df) plt.scatter(X_train['Age'],y_train) plt.plot(means_df['Age'],yhat_nn,color="red") plt.title("Predicted Probabilities vs. Age from NN in train"); #Plots for all pobservations. And then averaged means1 = X_train.mean(axis = 0) #means1 =pd.Series(means) means_df = (means1.to_frame()).transpose() #df_repeated = pd.concat(means*3) #print(df_repeated) Ages = np.arange(np.min(X['Age']),np.max(X['Age'])) means_df = pd.concat([means_df]*Ages.size,ignore_index=True) means_df['Age'] = Ages yhat_nns = [] for i in range(0,X_train.shape[0]): obs = X_train.iloc[i,:].to_frame().transpose() obs_df = pd.concat([obs]*Ages.size,ignore_index=True) obs_df['Age'] = Ages yhat_nn = model_NN2.predict_proba(obs_df) yhat_nns.append(yhat_nn.transpose()) plt.plot(obs_df['Age'],yhat_nn,color='blue',alpha=0.05) plt.plot(obs_df['Age'],np.mean(yhat_nns,axis=0)[0],color='red',linewidth=2); plt.ylim(0,1) plt.title("Predicted Probabilities vs. Age from NN in train for all observations"); plt.plot(obs_df['Age'],np.median(yhat_nns,axis=0)[0],color='red'); plt.plot(obs_df['Age'],np.quantile(yhat_nns,q=.05,axis=0)[0],color='blue'); plt.plot(obs_df['Age'],np.quantile(yhat_nns,q=.95,axis=0)[0],color='blue');형태소 분석han = Hannanum() #불용어 제거 안함. han.nouns(synoplist[0]) n_synoplist = [] for synop in synoplist: s_nouns = ' '.join(han.nouns(synop)) #print(s_nouns) n_synoplist.append(s_nouns) metadata.head() n_synop = n_synoplist새 데이터프레임, csv파일 저장metadata['n_synop'] = n_synop metadata.drop(['synop'], 1) metadata.to_csv('data_all_nsynop_dayoung_ver2.csv', encoding='utf-8')Lale: Type-Driven Auto-ML with Scikit-Learn https://github.com/ibm/lale Example Dataset!pip install 'liac-arff>=2.4.0' import lale.datasets.openml import pandas as pd (train_X, train_y), (test_X, test_y) = lale.datasets.openml.fetch( 'credit-g', 'classification', preprocess=False) print(f'train_X.shape {train_X.shape}') pd.concat([train_y.tail(), train_X.tail()], axis=1)train_X.shape (670, 20)Algorithm Selection and Hyperparameter Tuningfrom sklearn.preprocessing import Normalizer as Norm from sklearn.preprocessing import OneHotEncoder as OneHot from sklearn.linear_model import LogisticRegression as LR from xgboost import XGBClassifier as XGBoost from sklearn.svm import LinearSVC from lale.operators import make_pipeline, make_union from lale.lib.lale import Project, ConcatFeatures, NoOp lale.wrap_imported_operators() project_nums = Project(columns={'type': 'number'}) project_cats = Project(columns={'type': 'string'}) planned_pipeline = ( (project_nums >> (Norm | NoOp) & project_cats >> OneHot) >> ConcatFeatures >> (LR | LinearSVC(dual=False)| XGBoost)) planned_pipeline.visualize() import sklearn.metrics from lale.lib.lale import Hyperopt auto_optimizer = Hyperopt(estimator=planned_pipeline, cv=3, max_evals=5) auto_trained = auto_optimizer.fit(train_X, train_y) auto_y = auto_trained.predict(test_X) print(f'accuracy {sklearn.metrics.accuracy_score(test_y, auto_y):.1%}')100%|█████████| 5/5 [01:08<00:00, 13.74s/trial, best loss: -0.7507273649370062] accuracy 72.1%Displaying Automation Resultsbest_pipeline = auto_trained.get_pipeline() best_pipeline.visualize() from lale.pretty_print import ipython_display ipython_display(best_pipeline, show_imports=False)JSON Schemashttps://json-schema.org/ipython_display(XGBoost.hyperparam_schema('n_estimators')) ipython_display(XGBoost.hyperparam_schema('booster')) import jsonschema import sys try: XGBoost(n_estimators=0.5, booster='gbtree') except jsonschema.ValidationError as e: print(e.message, file=sys.stderr)Invalid configuration for XGBoost(n_estimators=0.5, booster='gbtree') due to invalid value n_estimators=0.5. Schema of argument n_estimators: { "description": "Number of trees to fit.", "type": "integer", "default": 1000, "minimumForOptimizer": 500, "maximumForOptimizer": 1500, } Value: 0.5Customizing Schemasimport lale.schemas as schemas Grove = XGBoost.customize_schema( n_estimators=schemas.Int(minimum=2, maximum=10), booster=schemas.Enum(['gbtree'], default='gbtree')) grove_planned = ( Project(columns={'type': 'number'}) >> Norm & Project(columns={'type': 'string'}) >> OneHot ) >> ConcatFeatures >> Grove grove_optimizer = Hyperopt(estimator=grove_planned, cv=3, max_evals=10) grove_trained = grove_optimizer.fit(train_X, train_y) grove_y = grove_trained.predict(test_X) print(f'accuracy {sklearn.metrics.accuracy_score(test_y, grove_y):.1%}') grove_best = grove_trained.get_pipeline() ipython_display(grove_best, show_imports=False)내외부 판별- 어떤 점의 관하여 다각형의 내외부 판별import matplotlib as mpl import matplotlib.pyplot as plt from shapely.geometry import * import random as rd import math다각형 관련 함수 from Making_Polygon and Centroid# 다각형의 점 만들기(랜덤범위1, 랜덤범위2, 점 개수) def making_pts(r1, r2, n): pts = [] while len(pts) < n: x, y = rd.randint(r1, r2), rd.randint(r1, r2) if (x, y) not in pts: pts.append((x, y)) return pts # 각도 계산(두 점) def calculating_Angle(pt1, pt2): dx, dy = (pt2[0] - pt1[0]), (pt2[1] - pt1[1]) if dx >= 0 and dy == 0: angle = 0 else: angle = math.atan2(dy, dx) * 180 / math.pi return angle # 단순 다각형 만들기(포인트 리스트) def making_Polygon(pts): angle_list = [] result = [0 for _ in range(len(pts))] pts = sorted(pts, key=lambda x: (x[1], x[0])) standard_pt = pts[0] for pt in pts[1:]: angle_of_pt = calculating_Angle(standard_pt, pt) angle_list.append([angle_of_pt, pt]) angle_list = sorted(angle_list, key = lambda x: (x[0], x[1][0], -x[1][1])) result[0] = standard_pt for idx, i in enumerate(angle_list): result[idx+1] = i[1] return result # 다각형의 면적 구하기(포인트 리스트) def area_Polygon(pts): poly_area = 0 for i in range(len(pts)-1): poly_area += (pts[i][0] * pts[i+1][1]) - (pts[i+1][0] * pts[i][1]) return abs(poly_area) / 2 # 다각형의 무게중심 구하기(포인트 리스트) def centroid_Polygon(pts): A = area_Polygon(pts) x = 0 y = 0 for i in range(len(pts)-1): x += (pts[i][0] + pts[i+1][0]) * ((pts[i][0] * pts[i+1][1]) - (pts[i+1][0] * pts[i][1])) y += (pts[i][1] + pts[i+1][1]) * ((pts[i][0] * pts[i+1][1]) - (pts[i+1][0] * pts[i][1])) centroid_x = 1/6 / A * x centroid_y = 1/6 / A * y return [centroid_x, centroid_y]다각형 내부의 점 판단 방법1. The Crossing Number (cn) 교차 횟수 방법 :점 p에서 출발한 ray or vector(직선, 광선)이 폴리곤 경계선을 몇번 교차해 가는지 세는 것. "교차해간 경계선 수 가 짝수 일때 이 점은 바깥에 존재한다"라고 판단한다. 홀수 일땐 점은 내부에 있다고 판단한다.2. The Winding Number (wn) 감싸는 횟수 방법 : 점 p를 폴리곤이 몇번 감아 도는지 세는것, "감싸도는 횟수" wn = 0일때에만 점은 바깥에 존재한다. 다른 경우, 점은 내부에 있다고 판단한다. 두 방법의 약점wn의 경우, 복잡한 닫힌 폴리곤 내부에 점이 있는지도 정확하게 판단하지만 도넛모양 처럼 2개 이상의 폴리곤이 감싸고 있고 도넛의 안쪽의 외부영역에 대해서도 내부로 판단한다.cn의 경우, ray or vector가 폴리곤의 꼭지점을 통과할 경우 일반적인 방법으로는 꼭지점에 걸친 2선분에 대해 교차한다고 판단하므로 이를 처리해줘야 한다. 동시에 선분이 ray와 기울기가 같은 수평일 경우에도 이를 어떻게 판단해야 할지 정해줘야 한다.경계를 교차할 때 규칙- 위쪽 경계선에는 처음 끝점은 포함되고 마지막 끝점은 제외된다.(단, 위쪽과 아래쪽을 어떻게 구분할지에 대한 방법론 필요)- 아래쪽 경계선에는 처음 끝점은 제외되고 마지막 끝점은 포함된다.- 수평 경계선들은 다루지 않는다.- 경계선-ray의 교차 지점은 정확하게 점 p의 오른쪽 방향에서만 존재해야 한다. (오른쪽 경계선에 걸친 점들은 외부로 취급되고, 왼쪽 경계에 걸친 점들은 내부로 취급되는 결과를 줌) 결론도넛형태가 아닌 닫힌 폴리곤의 모든 경우에 wn이 보다 정확하므로, 임의의 폴리곤에 대해 점이 내외부에 있는지 판별할때는 wn 알고리즘이 우선 고려되어야 한다.wn을 먼저 적용한 뒤, wn 검토 값이 2이상일때 cn을 적용하여 내외부 판단을 하는것이 가장 정확한 판단이 가능할 것으로 예상된다.# 교차점 검토 def location_cross_point(pts): # x1, y1, x2, y2, x3, y3, x4, y4 = pts[0][0], pts[0][1], pts[1][0], pts[1][1], pts[2][0], pts[2][1], pts[3][0], pts[3][1] x1, y1, x2, y2, x3, y3, x4, y4 = pts[0], pts[1], pts[2], pts[3], pts[4], pts[5], pts[6], pts[7] mn = ((x1 - x2)*(y3- y4)) - ((y1 - y2)*(x3 - x4)) # mn = 0 을 만족하면 두 직선은 평행 또는 일치 if mn != 0: cx = (((x1*y2)-(y1*x2))*(x3-x4) - (x1-x2)*((x3*y4)-(y3*x4))) / mn cy = (((x1*y2)-(y1*x2))*(y3-y4) - (y1-y2)*((x3*y4)-(y3*x4))) / mn return [cx, cy] return False def ccw(pts): x1, y1, x2, y2, x3, y3 = pts[0][0], pts[0][1], pts[1][0], pts[1][1], pts[2][0], pts[2][1] cross_product = ((x2-x1)*(y3-y1)) - ((x3-x1)*(y2-y1)) if cross_product < 0: return -1 elif cross_product > 0: return 1 else: return 0 def finding_bigger(pts): pts = sorted(pts, key= lambda x: (x[0], x[1])) bigger, smaller = pts[1], pts[0] return bigger, smaller def angle_of_line(s1, b1): if (s1[0]-b1[0]) != 0: angle = (s1[1]-b1[1])/(s1[0]-b1[0]) else: angle = 90 return angle def check_cross(pts): pts = pts p1, p2, p3, p4 = [pts[0], pts[1]], [pts[2], pts[3]], [pts[4], pts[5]], [pts[6], pts[7]] sp, ep = [pts[4], pts[5]], [pts[6], pts[7]] v1 = ccw([p1, p2, p3]) * ccw([p1, p2, p4]) v2 = ccw([p3, p4, p1]) * ccw([p3, p4, p2]) b1, s1 = finding_bigger([p1, p2]) b2, s2 = finding_bigger([p3, p4]) ang1 = angle_of_line(s1, b1) ang2 = angle_of_line(s2, b2) # 두 선분이 동일 선상에 있을 경우 # ex) [[0,0], [5,0], [6,0], [10,0]] if v1 == 0 and v2 == 0 and ang1 == ang2: # 두 선분이 동일 선상에 있으면서 두 선분 중 동일한 좌표가 있을경우, 그 좌표가 교차점이 됨 if b1 == s2: intersection_point = b1 elif b2 == s1: intersection_point = b2 # 두 선분이 동일 선상에 있으면서, 일정부분 겹칠 경우 elif b2 == b1 or s2 == s1: intersection_point = 'Much' elif b1 >= s2 and b2 >= s1: intersection_point = 'Much' else: intersection_point = 'None' # 교차하지 않음 elif v1 <= 0 and v2 <= 0: intersection_point = location_cross_point(pts) else: intersection_point = 'None' # 교차점이 없거나 다수일 경우 if intersection_point == 'Much' or intersection_point == 'None': return False # 교차점이 1개인 경우 else: # 교차점이 시작점일 경우 Ture if intersection_point == sp: return True # 교차점이 끝점일 경우 False elif intersection_point == ep: return False # 교차점이 그 외 위치일 경우 True else: return True # Crossing_Number # 선에 걸칠경우 내부에 속하는것으로 # 교차점 판별에 시작점 혹은 끝점 하나만 들어가게 해야함 # 첫점과 끝점 동일하게 def crossing_number(point, polygon_pts): number = 0 px, py = point.x, point.y polygon = Polygon(polygon_pts) minx, miny, maxx, maxy = polygon.bounds for i in range(len(polygon_pts)-1): result = check_cross([px, py, maxx, py, polygon_pts[i][0], polygon_pts[i][1], polygon_pts[i+1][0], polygon_pts[i+1][1]]) if result == True: number += 1 if number % 2 == 1: return True, number else: return False, number # 랜덤 폴리곤 생성 pts_1 = making_pts(0, 10, 5) pts_2 = making_Polygon(pts_1) pts_2.append(pts_2[0]) polygon = Polygon(pts_2) # 랜덤 점 생성 p = Point(rd.randint(-1, 11), rd.randint(-1, 11)) fig, ax = plt.subplots(1, 3, figsize=(12, 4)) for i in range(3): ax[i].set_xlim(-2, 12) ax[i].set_ylim(-2, 12) ax[0].scatter(p.x, p.y) ax[1].plot(*polygon.exterior.xy) ax[2].scatter(p.x, p.y) ax[2].plot(*polygon.exterior.xy) plt.show() result = crossing_number(p, pts_2) print(f'다각형 내부의 점 판단 : {result} (내부 : True, 외부 : False)') fig, ax = plt.subplots(1, 4, figsize=(18, 4)) pt = Point(3, 3) polygons = [[[0,0], [5,0], [6,3], [3,7], [0,5], [0,0]], [[0,0], [6,0], [6,3], [9,3], [9,6], [0,6], [0,0]], [[0,0], [3,0], [3,3], [6,3], [6,6], [0,6], [0,0]], [[0,0], [4,0], [4,3], [5,3], [5,4], [7,4], [7,3], [9,3], [9,6], [0,6], [0,0]]] for i in range(4): ax[i].set_xlim(-2, 12) ax[i].set_ylim(-2, 12) ax[i].scatter(pt.x, pt.y) polygon = Polygon(polygons[i]) ax[i].plot(*polygon.boundary.xy) result, number = crossing_number(pt, polygons[i]) ax[i].set_title(f'Result : {result}, Crossing Number : {number}') plt.show() # Winding_Number def is_left(P0, P1, P2): return (P1[0] - P0[0]) * (P2[1] - P0[1]) - (P2[0] - P0[0]) * (P1[1] - P0[1]) def winding_number(point, polygon_pts): wn = 0 # the winding number counter px, py = point.x, point.y point = [px, py] # repeat the first vertex at end # V = tuple(V[:]) + (V[0],) for i in range(len(polygon_pts)-1): if polygon_pts[i][1] <= point[1]: if polygon_pts[i+1][1] > point[1]: if is_left(polygon_pts[i], polygon_pts[i+1], point) > 0: wn += 1 else: if polygon_pts[i+1][1] <= point[1]: if is_left(polygon_pts[i], polygon_pts[i+1], point) < 0: wn -= 1 if wn <= 0 : return False, wn else: return True, wn fig, ax = plt.subplots(1, 4, figsize=(18, 4)) pt = Point(3, 3) polygons = [[[0,0], [5,0], [6,3], [3,7], [0,5], [0,0]], [[0,0], [6,0], [6,3], [9,3], [9,6], [0,6], [0,0]], [[0,0], [3,0], [3,3], [6,3], [6,6], [0,6], [0,0]], [[0,0], [4,0], [4,3], [5,3], [5,4], [7,4], [7,3], [9,3], [9,6], [0,6], [0,0]]] for i in range(4): ax[i].set_xlim(-2, 12) ax[i].set_ylim(-2, 12) ax[i].scatter(pt.x, pt.y) polygon = Polygon(polygons[i]) ax[i].plot(*polygon.boundary.xy) # result, number = crossing_number(pt, polygons[i]) result, number = winding_number(pt, polygons[i]) ax[i].set_title(f'Result : {result}, Winding Number : {number}') plt.show() polygon_pts = [[0,0], [10,0], [10,7], [2,7], [2,4], [8,4], [8,2], [4,2], [4,9], [0,9], [0,0]] polygon = Polygon(polygon_pts) pts = [[-1,-1], [3,3], [3,5], [6,3]] fig, ax = plt.subplots(1, 4, figsize=(18, 4)) for i in range(4): point = Point(pts[i]) ax[i].set_xlim(-2, 12) ax[i].set_ylim(-2, 12) ax[i].plot(*polygon.boundary.xy) ax[i].fill(*polygon.boundary.xy, color='orange', alpha=0.5) ax[i].scatter(point.x, point.y) # result, number = crossing_number(pt, polygons[i]) result, number = winding_number(point, polygon_pts) ax[i].set_title(f'Result : {result}, Winding Number : {number}') plt.show() fig, ax = plt.subplots(1, 2, figsize=(8, 4)) polygon_pts = [[0,0], [10,0], [10,7], [2,7], [2,4], [8,4], [8,2], [4,2], [4,9], [0,9], [0,0]] polygon = Polygon(polygon_pts) br = Polygon([[0,0], [10,0], [10,7], [4,7], [4,9], [0,9], [0,0]]) sr1 = Polygon([[4,2], [8,2], [8,4], [4,4], [4,2]]) sr2 = Polygon([[2,4], [4,4], [4,7], [2,7], [2,4]]) pts = [[3,3], [3,5], [6,3]] ax[0].fill(*br.boundary.xy, color='orange', alpha=0.5) ax[0].fill(*sr1.boundary.xy, color='white', zorder=1) ax[0].fill(*sr2.boundary.xy, color='white', zorder=1) ax[1].fill(*polygon.boundary.xy, color='orange', alpha=0.5) for i in range(2): for j in range(len(pts)): ax[i].set_xlim(-2, 12) ax[i].set_ylim(-2, 12) ax[i].plot(*polygon.boundary.xy, color='k', linewidth=1) ax[i].scatter(pts[j][0], pts[j][1], color='navy', zorder=2) ax[0].set_title("Crossing Number Method") ax[1].set_title("Winding Number Method") plt.show()만약 폴리곤이 간단하다면(예, 스스로 겹치는 경우가 없음), 이 두 방법은 모든 점들에 대해 같은 결과를 줄 것이다. 하지만 위의 도형처럼 시계방향 or 반시계방향처럼 하나의 방향으로 계속 전개되지 않고, 방향이 변화하는 폴리곤이면서 그로 인해 겹치는 영역이 발생하는 경우 폴리곤의 내외부의 판단이 두가지 경우로 가능하다.[[2,4], [4,4], [4,7], [2,7], [2,4]] 부분에 대해 Crossing Number를 사용한 경우 바깥에 있다고 나오지만, Winding Number를 사용할 시 내부에 존재한다. 또한 위와 같이 색이나 다른 방법으로 구분하지 않는다면 해당 부분에 대한 내외부 판단이 더욱 어려울 것이다. 폴리곤의 전체 좌표를 통해 겹친영역을 구분한다고 하면, 겹친 영역에서의 점들은 wn=2를 가지며 폴리곤 안에 두번 중복해서 내부에 위치함을 나타낸다. 즉, wn이 cn보다 더 직관적인 답을 준다고 볼 수 있다.관련 refer : https://www.ijemin.com/blog 정확도 분석단순 폴리곤에 대해 어느정도의 정확도의 내외부 판단을 하는지에 대해,가장 정확한 측정을 위해서는 모든 판단 경우에 대해 답을 가지고 진행해야 하지만, 그 과정이 너무 많은 시간과 노력이 소모되고 위의 과정에서 오판단이 우려되는 일부 경우들을 먼저 시험했기 때문에 cn(crossing number method)과 wn(winding number method) 두 방법이 모두 틀렸다는 상황을 제외하고 진행함첫번째로 100번의 랜덤 폴리곤(꼭지점의 개수는 고정)과 랜덤 점을 생성하고 각 경우에 대해 cn과 wn을 수행하여 서로 결과값이 다를 경우(True & False) 해당 랜덤 폴리곤과 랜덤 점의 좌표를 리스트에 저장하고 완료된 리스트들을 다시 판단하는 방법으로 진행i = 0 diff_list = [] while i < 100: # 랜덤 폴리곤 생성 pts_1 = making_pts(0, 10, 5) pts_2 = making_Polygon(pts_1) pts_2.append(pts_2[0]) polygon = Polygon(pts_2) # 랜덤 점 생성 p = [rd.randint(-1, 11), rd.randint(-1, 11)] point = Point(p) # 점, 폴리곤 내외부 판단 함수 수행(cn, wn ) result_cn, cn = crossing_number(point, pts_2) result_wn, wn = winding_number(point, pts_2) # cn, wn 값이 다를 경우 리스트에 추가 if result_cn != result_wn: diff_list.append([p, pts_2, result_cn, result_wn]) i += 1 # len(diff_list)에 따른 출력 플롯의 4 * n의 n 도출 if len(diff_list) % 4 == 0: n = len(diff_list) // 4 else: n = (len(diff_list) // 4) + 1 fig, ax = plt.subplots(n, 4, figsize=(16, n*4)) for i in range(n): for j in range(4): if (i*4)+j < len(diff_list): pt = diff_list[(i*4)+j][0] polygon_pts = diff_list[(i*4)+j][1] result_cn = diff_list[(i*4)+j][2] result_wn = diff_list[(i*4)+j][3] point = Point(pt) polygon = Polygon(polygon_pts) ax[i][j].plot(*polygon.boundary.xy) ax[i][j].scatter(point.x, point.y) ax[i][j].set_xlim(-2, 12) ax[i][j].set_ylim(-2, 12) ax[i][j].set_xticks([]) ax[i][j].set_yticks([]) ax[i][j].set_title(f'result_cn : {result_cn}, result_wn : {result_wn}') plt.show()서로 다른경우1. 선에 걸친 경우 (cn과 wn의 값이 비일관적임)cn == False and wn == True : 1, 8 cn == True and wn == False : 4, 11 2. 폴리곤 외부에 있는 점의 수평선(vector)이 폴리곤 어느 꼭지점과 동일선상일때 (일관적)cn == True and wn == False: 2, 3, 5, 6, 7, 9, 10, 12, 13, 14, 15 보통 점의 우측 방향 즉, +x 축으로 수평선을 진행시키는데 점의 x좌표가 폴리곤의 x최대값보다 클 때도 폴리곤의 x최대값과 점의 x값을 연결하기 때문에 -x방향의 수평선이 생기고 cn 검토를 함.이를 해결하기 위해 점의 좌표와 폴리곤의 maxx, maxy, minx, miny 좌표들의 관계 선검토가 필요# 수정 def crossing_number(point, polygon_pts): number = 0 px, py = point.x, point.y polygon = Polygon(polygon_pts) minx, miny, maxx, maxy = polygon.bounds # px > maxx 경우 점은 폴리곤 외부에 위치 if px < minx or px > maxx: return False, number # py < minx or py > maxx 경우 폴리곤 외부에 위치 if py < minx or py > maxx: return False, number for i in range(len(polygon_pts)-1): result = check_cross([px, py, maxx, py, polygon_pts[i][0], polygon_pts[i][1], polygon_pts[i+1][0], polygon_pts[i+1][1]]) if result == True: number += 1 if number % 2 == 1: return True, number else: return False, number i = 0 diff_list = [] while i < 100: # 랜덤 폴리곤 생성 pts_1 = making_pts(0, 10, 5) pts_2 = making_Polygon(pts_1) pts_2.append(pts_2[0]) polygon = Polygon(pts_2) # 랜덤 점 생성 p = [rd.randint(-1, 11), rd.randint(-1, 11)] point = Point(p) # 점, 폴리곤 내외부 판단 함수 수행(cn, wn ) result_cn, cn = crossing_number(point, pts_2) result_wn, wn = winding_number(point, pts_2) # cn, wn 값이 다를 경우 리스트에 추가 if result_cn != result_wn: diff_list.append([p, pts_2, result_cn, result_wn]) i += 1 if len(diff_list) % 4 == 0: n = len(diff_list) // 4 else: n = (len(diff_list) // 4) + 1 fig, ax = plt.subplots(n, 4, figsize=(16, n*4)) for i in range(n): for j in range(4): if (i*4)+j < len(diff_list): pt = diff_list[(i*4)+j][0] polygon_pts = diff_list[(i*4)+j][1] result_cn = diff_list[(i*4)+j][2] result_wn = diff_list[(i*4)+j][3] point = Point(pt) polygon = Polygon(polygon_pts) ax[i][j].plot(*polygon.boundary.xy) ax[i][j].scatter(point.x, point.y) ax[i][j].set_xlim(-2, 12) ax[i][j].set_ylim(-2, 12) ax[i][j].set_xticks([]) ax[i][j].set_yticks([]) ax[i][j].set_title(f'result_cn : {result_cn}, result_wn : {result_wn}') plt.show()Two Qubit Circuitsfrom qiskit import * from math import pi import numpy as np from qiskit.visualization import * import matplotlib.pyplot as plt from qutip import *2. Double qubit circuit Basis states: $|00 \rangle, |01 \rangle, |10 \rangle,|11 \rangle $$ \psi _{2q} = \alpha |00 \rangle + \beta |01 \rangle + \gamma |10 \rangle + \delta |11 \rangle $ where $\alpha,\beta,\gamma, \delta \in \mathcal{C}^{4}$$\alpha^{2} + \beta^{2} + \gamma^{2} + \delta^{2} = 1$ 2.1. $I \otimes U$qc = QuantumCircuit(2) qc.h(0) qc.draw(output='mpl') backend = Aer.get_backend('unitary_simulator') job = execute(qc, backend) result = job.result() print(result.get_unitary(qc, decimals=3)) H = np.array([[ 0.707+0.j, 0.707-0.j],[ 0.707+0.j, -0.707+0.j]]) I = np.eye(2) np.kron(I,H)2.2. $U \otimes I$qc = QuantumCircuit(2) qc.h(1) qc.draw(output='mpl') backend = Aer.get_backend('unitary_simulator') job = execute(qc, backend) result = job.result() print(result.get_unitary(qc, decimals=3)) H = np.array([[ 0.707+0.j, 0.707-0.j],[ 0.707+0.j, -0.707+0.j]]) I = np.eye(2) np.kron(H,I)2.3 $U \otimes U$q = QuantumRegister(2) qc = QuantumCircuit(q) qc.h(0) qc.h(1) qc.draw(output='mpl') backend = Aer.get_backend('unitary_simulator') job = execute(qc, backend) result = job.result() print(result.get_unitary(qc, decimals=3)) H = np.array([[ 0.707+0.j, 0.707-0.j],[ 0.707+0.j, -0.707+0.j]]) np.kron(H,H)2.4. $(U \otimes U) \times (U\otimes U)$qc = QuantumCircuit(2) qc.barrier() qc0 = qc.copy() qc.h(0) qc.x(1) qc.barrier() qc1 = qc.copy() qc.y(0) qc.h(1) qc.barrier() qc2 = qc.copy() qc.draw(output='mpl') backend = Aer.get_backend('unitary_simulator') job = execute(qc, backend) result = job.result() print(result.get_unitary(qc, decimals=3))[[-0.-0.5j 0.+0.5j 0.-0.5j 0.+0.5j] [ 0.+0.5j 0.+0.5j 0.+0.5j 0.+0.5j] [ 0.+0.5j -0.-0.5j 0.-0.5j 0.+0.5j] [-0.-0.5j -0.-0.5j 0.+0.5j 0.+0.5j]]Operator : $(H \otimes Y) \times (X\otimes H)$H = np.array([[ 0.707+0.j, 0.707-0.j],[ 0.707+0.j, -0.707+0.j]]) X = sigmax() Y = sigmay() XKH = np.kron(X,H) HKY = np.kron(H,Y) np.dot(HKY,XKH) backend = Aer.get_backend('statevector_simulator') qc_vec = [] for qc in [qc0,qc1,qc2]: out = execute(qc,backend).result().get_statevector() qc_vec.append(out) print(out)[1.+0.j 0.+0.j 0.+0.j 0.+0.j] [0. +0.j 0. +0.j 0.70710678+0.j 0.70710678+0.j] [-6.123234e-17-0.5j 6.123234e-17+0.5j 6.123234e-17+0.5j -6.123234e-17-0.5j]State Vectorsqca = QuantumCircuit(1) qca.barrier() qca0 = qca.copy() qca.h(0) qca.barrier() qca1 = qca.copy() qca.y(0) qca.barrier() qca2 = qca.copy() qca.draw(output='mpl') backend = Aer.get_backend('statevector_simulator') qca_vec = [] for qc in [qca0,qca1,qca2]: out = execute(qc,backend).result().get_statevector() qca_vec.append(out) print(out) qcb = QuantumCircuit(1) qcb.barrier() qcb0 = qcb.copy() qcb.x(0) qcb.barrier() qcb1 = qcb.copy() qcb.h(0) qcb.barrier() qcb2 = qcb.copy() qcb.draw(output='mpl') backend = Aer.get_backend('statevector_simulator') qcb_vec = [] for qc in [qcb0,qcb1,qcb2]: out = execute(qc,backend).result().get_statevector() qcb_vec.append(out) print(out)[1.+0.j 0.+0.j] [0.+0.j 1.+0.j] [ 0.70710678-8.65956056e-17j -0.70710678+8.65956056e-17j]$\psi_{2q} = \psi_{q} \otimes \psi_{q}$for qcv,qcav,qcbv in zip(qc_vec,qca_vec,qcb_vec): print(qcv,"|", np.kron(qcbv,qcav))[1.+0.j 0.+0.j 0.+0.j 0.+0.j] | [1.+0.j 0.+0.j 0.+0.j 0.+0.j] [0. +0.j 0. +0.j 0.70710678+0.j 0.70710678+0.j] | [0. +0.j 0. +0.j 0.70710678+0.j 0.70710678+0.j] [-6.123234e-17-0.5j 6.123234e-17+0.5j 6.123234e-17+0.5j -6.123234e-17-0.5j] | [-6.123234e-17-0.5j 6.123234e-17+0.5j 6.123234e-17+0.5j -6.123234e-17-0.5j]Graded quiz. Confidence intervalsimport pandas as pd import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import scipy.stats as sts import seaborn as sns sns.set() sns.set_style("whitegrid") color_palette = sns.color_palette('deep') + sns.color_palette('husl', 6) + sns.color_palette('bright') + sns.color_palette('pastel') %matplotlib inline sns.palplot(color_palette) def ndprint(a, precision=3): with np.printoptions(precision=precision, suppress=True): print(a) def r4(*kargs): return [round(x, 4) for x in kargs] if len(kargs) > 1 else round(kargs[0], 4) def r4s(a): return r4(*a)01. Question 1Давайте уточним правило трёх сигм. Утверждение: 99.7% вероятностной массы случайной величины $X \sim N (\mu,\sigma^2)$ лежит в интервале $\mu \pm c \cdot \sigma$. Чему равно точное значение константы c? Округлите ответ до четырёх знаков после десятичной точки.alpha = 1 - 0.997 r4(sts.norm.ppf(1 - alpha/2))02. Question 2Пусть $X \sim N (\mu, \sigma^2)$. Какое распределение имеет величина $\frac{\bar{X}_n - \mu}{S_n / \sqrt{n}}$?__answered__ $St(n-1)$ 03. Question 3Выберите все распределения с несимметричной функцией плотности 04. Question 4Какое из выражений задаёт доверительный интервал для разности долей в связанных выборках? 05. Question 5В пятилетнем рандомизированном исследовании Гарвардской медицинской школы 11037 испытуемых через день принимали аспирин, а ещё 11034 — плацебо. Исследование было слепым, то есть, испытуемые не знали, что именно они принимают.За 5 лет инфаркт случился у 104 испытуемых, принимавших аспирин, и у 189 принимавших плацебо.Оцените, насколько вероятность инфаркта снижается при приёме аспирина. Округлите ответ до четырёх знаков после десятичной точки.n_aspirin = 11037 n_placebo = 11034 heart_attack_aspirin = 104 heart_attack_placebo = 189 p_aspirin = float(heart_attack_aspirin) / n_aspirin p_placebo = float(heart_attack_placebo) / n_placebo print r4(p_aspirin, p_placebo, p_placebo - p_aspirin)[0.0094, 0.0171, 0.0077]06. Question 6Постройте теперь 95% доверительный интервал для снижения вероятности инфаркта при приёме аспирина. Чему равна его верхняя граница? Округлите ответ до четырёх знаков после десятичной точки. Берем из практических занятий формулу для разности распределений (см переменную error_margin)def proportions_confint_diff_ind(p1, n1, p2, n2, alpha = 0.05): z = sts.norm.ppf(1 - alpha / 2.) error_margin = z * np.sqrt(p1 * (1 - p1)/ n1 + p2 * (1 - p2)/ n2) left_boundary = (p1 - p2) - error_margin right_boundary = (p1 - p2) + error_margin return (left_boundary, right_boundary) confint = r4s(proportions_confint_diff_ind(p_placebo, n_placebo, p_aspirin, n_aspirin)) print confint print confint[1][0.0047, 0.0107] 0.010707. Question 7Продолжим анализировать данные эксперимента Гарвардской медицинской школы.Для бернуллиевских случайных величин $X \sim Ber(p)$ часто вычисляют величину $\frac{p}{1-p}$, которая называется шансами (odds). Чтобы оценить шансы по выборке, вместо $p$ нужно подставить $\hat{p}$.Например, шансы инфаркта в контрольной группе, принимавшей плацебо, можно оценить как $\frac{\frac{189}{11034}}{1-\frac{189}{11034}} = \frac{189}{11034-189}\approx 0.0174$Оцените, во сколько раз понижаются шансы инфаркта при регулярном приёме аспирина. Округлите ответ до четырёх знаков после десятичной точки.def odds(p): return p / (1 - p) odds_arr = list(map(odds, [p_placebo, p_aspirin])) print r4s(odds_arr) print r4(odds_arr[0] / odds_arr[1])[0.0174, 0.0095] 1.832108. Question 8Величина, которую вы оценили в предыдущем вопросе, называется отношением шансов. Постройте для отношения шансов 95% доверительный интервал с помощью бутстрепа. Чему равна его нижняя граница? Округлите ответ до 4 знаков после десятичной точки.Чтобы получить в точности такой же доверительный интервал, как у нас:- составьте векторы исходов в контрольной и тестовой выборках так, чтобы в начале шли все единицы, а потом все нули;- установите random seed=0;- сделайте по 1000 псевдовыборок из каждой группы пациентов с помощью функции get_bootstrap_samples.def get_bootstrap_samples(data, n_samples): indices = np.random.randint(0, len(data), (n_samples, len(data))) samples = data[indices] return samples def stat_intervals(stat, alpha=0.05): boundaries = np.percentile(stat, [100 * alpha / 2., 100 * (1 - alpha / 2.)]) return boundaries def get_general_sample(n, success_cnt): return np.array([1] * success_cnt + [0] * (n - success_cnt)) def get_odds(sample): return odds(float(sum(sample)) / len(sample)) def get_odds_ratio(sample_pair): return get_odds(sample_pair[0]) / get_odds(sample_pair[1]) placebo_gen_sample = get_general_sample(n_placebo, heart_attack_placebo) aspirin_gen_sample = get_general_sample(n_aspirin, heart_attack_aspirin) np.random.seed(0) pseudosamples = zip( get_bootstrap_samples(placebo_gen_sample, 1000), get_bootstrap_samples(aspirin_gen_sample, 1000) ) odds_ratios = list(map(get_odds_ratio, pseudosamples)) print r4s(odds_ratios[:3]) odds_confint = stat_intervals(odds_ratios) print r4s(odds_confint) r4(odds_confint[0])[2.1053, 1.9255, 2.2171] [1.4629, 2.3509]Prepare Tourist Site data# import needed libraries import requests from bs4 import BeautifulSoup import time import pandas as pdWeb scraping www.tripadvisor.com.ph to extract tourist sites data.Disclaimers: - All of the data extracted are not my own and are properties of www.tripadvisor.com.ph- Scraped data are not used for commercial purposes and purely for personal education purposes- HTML, format, tags, parameters, and other website script used as reference are working as of this writing and may change anytime by the website owner/administrators which may impact this code- The code blocks may take 0-2mins depending on hardware/software/network capabilities. If you want to rerun, patience is appreciated- Imported time module to avoid overloading the site and get blocked Use Search Url and create BeautifulSoup object# set headers for scraping headers = { 'Access-Control-Allow-Origin': '*', 'Access-Control-Allow-Methods': 'GET', 'Access-Control-Allow-Headers': 'Content-Type', 'Access-Control-Max-Age': '3600', "User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82 Safari/537.36" } # get html text and create BeautifulSoup object search_url = 'https://www.tripadvisor.com.ph/Attractions-g298449-Activities-a_allAttractions.true-zft11309-Metro_Manila_Luzon.html' search_html = requests.get(search_url, headers = headers) search_soup = BeautifulSoup(search_html.text, 'html.parser') # extract tourist site links based from the search result page # Use 3 lines below this to find the link_class #a_tags = search_soup.find_all('a') #for tag in a_tags : #print(tag) links_class = {'class' : 'FmrIP _R w _Z P0 M0 Gm ddFHE'} a_tags = search_soup.find_all('a', links_class) # find a tags main_url = 'https://www.tripadvisor.com.ph' # iterate and append all links in a list tour_links_lst = [] for tag in a_tags : tour_links_lst.append(main_url + tag.get('href') ) print('Scraping Complete!') print('Count of Tourist Site Links : ', len(tour_links_lst) ) print('Sample Links:') for link in tour_links_lst[0:3] : print(link) # iterate over each tourist site link to scrape the data of each tour_main_dict = {} for tour_link in tour_links_lst : tour_details_dict = {} tour_html = requests.get(tour_link, headers = headers) tour_soup = BeautifulSoup(tour_html.text, 'lxml') # Extract attraction name and add to details dictionary name = tour_soup.find('h1', {'class' : 'WlYyy cPsXC GeSzT'} ).get_text() tour_details_dict['tourist_site_name'] = name # Extract tourist site classification and add to details dictionary type_tag = tour_soup.find_all('div', {'class' : 'WlYyy diXIH dDKKM'}) for tag in type_tag[4:5] : tour_classification = tag.get_text() tour_details_dict['tourist_site_classification'] = tour_classification # Extract tourist site location and add to details dictionary loc_tag = tour_soup.find('div', {'class' : 'dIDBU MJ'} ) if loc_tag != None : tour_loc = loc_tag.get_text().split('Address')[-1] else : tour_loc = tour_link.split('-')[-1].replace('_', ' ').split('.html')[0] tour_details_dict['tourist_site_location'] = tour_loc # Extract tourist site other info (overall rating) and add to details dictionary other_tag = tour_soup.find_all('div', {'class' : 'WlYyy cPsXC fksET cMKSg'} ) for tag in other_tag : other_info = tag.get_text() + '/5.0' tour_details_dict['tourist_site_other_info_overall_rating'] = other_info # Extract reviews and add to details dictionary reviews_tag = tour_soup.find_all('div', {'class' : 'pIRBV _T KRIav'}) rev_count = 0 tour_review = '' for tag in reviews_tag[1:4] : if tag.get_text() == '' : continue rev_count += 1 tour_review = tour_review = tour_review + '\n' + 'Review #' + str(rev_count) + '\n' + tag.get_text() tour_details_dict['tourist_site_reviews'] = tour_review.lstrip() tour_main_dict[name] = tour_details_dict # add all scraped details of each tourist site time.sleep(1) print('Scraping Complete!') print('Count of Hotels:', len(tour_main_dict)) # Create Dataframe from tour_main_dict df = pd.DataFrame.from_dict(tour_main_dict, orient = 'index') df.info() df.head(1) # lean location data def clean_loc(s) : if ',' not in s : r = s.split(' ')[0] else : r = s.split(',')[-2] return r.strip() + ' City' df['tourist_site_location'] = df['tourist_site_location'].apply(clean_loc) df.tourist_site_location.loc[df['tourist_site_location'] == 'Quezon City City'] = 'Quezon City' df.tourist_site_location.loc[df['tourist_site_location'] == 'San City'] = 'San Juan City' df.tourist_site_location.loc[df['tourist_site_location'] == 'Taguig City City'] = 'Taguig City' #for loc in df.tourist_site_location.values : #print(loc) df.tourist_site_location.value_counts() # Number of Visitors per year and entrance fee are based on research that are available # PSA, or other review site doesn't usually provide these data # See tourist_site_visitors_and_price.csv for references import_df = pd.read_csv('tourist_site_visitors_and_price.csv', index_col = 'tourist_site_name') import_df.head(3) import_df = import_df.drop(['references'], axis = 1) import_df.head(3) # join 2 df, save to csv df.set_index('tourist_site_name', inplace = True) final_df = df.merge(import_df, how = 'left', on = 'tourist_site_name' ) final_df.info() final_df final_df.to_csv('Tourist_site.csv', index = True)Note* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.# Dependencies and Setup import pandas as pd # File to Load (Remember to Change These) file_to_load = "Resources/purchase_data.csv" # Read Purchasing File and store into Pandas data frame purchase_data_df = pd.read_csv(file_to_load) purchase_data_df.head()Player Count * Display the total number of players#length to get total number of players count_df = purchase_data_df["SN"].nunique() #create Dataframe using dictionary of list total_df = pd.DataFrame({"Total Players": [count_df]}) #set new index to Total Players total_df = total_df.set_index("Total Players") #display total_dfPurchasing Analysis (Total) * Run basic calculations to obtain number of unique items, average price, etc.* Create a summary data frame to hold the results* Optional: give the displayed data cleaner formatting* Display the summary data frame#.nunique() function return number of unique elements in the object items = purchase_data_df["Item Name"].nunique() #.mean function return average average_price = purchase_data_df["Price"].mean() #number of purchases by counting the length of the dataframe number_of_purchase = len(purchase_data_df) #.sum function returns the total total_revenue = purchase_data_df["Price"].sum() #create Dataframe using lists of dictionaries purchasing_analysis_df =pd.DataFrame([{"Number of Unique Items": items, "Average Price": average_price, "Number of Purchases": number_of_purchase, "Total Revenue": total_revenue}]) #use mapping to format dollar amounts purchasing_analysis_df["Average Price"] = purchasing_analysis_df["Average Price"].map("${:.2f}".format) purchasing_analysis_df["Total Revenue"] = purchasing_analysis_df["Total Revenue"].map("${:.2f}".format) #set new index to Number of Unique Items purchasing_analysis_df = purchasing_analysis_df.set_index("Number of Unique Items") purchasing_analysis_dfGender Demographics * Percentage and Count of Male Players* Percentage and Count of Female Players* Percentage and Count of Other / Non-Disclosed# Calculate the Number of Unique Players by gender and sn andded age for later in the code demographic_df = purchase_data_df.loc[:, ["Gender", "SN", "Age" ]] demographic_df = demographic_df.drop_duplicates() gender_count = demographic_df.count()["Gender"] # Display the total number of players by gender gender_total = demographic_df["Gender"].value_counts() gender_percent = gender_total / gender_count * 100 demographic_breakdown_df = pd.DataFrame({"Total Count": gender_total, "Percentage of Players": gender_percent}) demographic_breakdown_df["Percentage of Players"] = demographic_breakdown_df["Percentage of Players"].map("{0:.2f}%".format) demographic_breakdown_dfPurchasing Analysis (Gender) * Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. by gender* Create a summary data frame to hold the results* Optional: give the displayed data cleaner formatting* Display the summary data frame#calculate the count average and sum by gender gender_group = purchase_data_df.groupby(["Gender"]) gender_purchase_total = gender_group["SN"].count() gender_average_price = purchase_data_df.groupby(["Gender"])["Price"].mean() gender_total_price = purchase_data_df.groupby(["Gender"])["Price"].sum() gender_per_person = gender_total_price / gender_total #create Dataframe gender_purchasing_analysis_df =pd.DataFrame({"Purchase Count": gender_purchase_total, "Average Purchase Price": gender_average_price, "Total Purchase Value": gender_total_price, "Avg Total Purchase per Person": gender_per_person}) #use mapping to formatting gender_purchasing_analysis_df["Average Purchase Price"] = gender_purchasing_analysis_df["Average Purchase Price"].map("${:.2f}".format) gender_purchasing_analysis_df["Total Purchase Value"] = gender_purchasing_analysis_df["Total Purchase Value"].map("${:.2f}".format) gender_purchasing_analysis_df["Avg Total Purchase per Person"] = gender_purchasing_analysis_df["Avg Total Purchase per Person"].map("${:.2f}".format) #Display gender_purchasing_analysis_dfAge Demographics * Establish bins for ages* Categorize the existing players using the age bins. Hint: use pd.cut()* Calculate the numbers and percentages by age group* Create a summary data frame to hold the results* Optional: round the percentage column to two decimal points* Display Age Demographics Table#create bins bins = [0, 9.90, 14.90, 19.90, 24.90, 29.90, 34.90, 39.90, 99999] group_players = ["<10", "10-14", "15-19", "20-24", "25-29", "30-34", "35-39", "40+"] #Categorize the existing players using the age bins demographic_df[ "Age Groups"] = pd.cut(demographic_df["Age"], bins, labels = group_players) #count age groups and calculate percentages age_demographic_count = demographic_df["Age Groups"].value_counts() age_demographic_percent = age_demographic_count / gender_count * 100 #use mapping to format age_breakdown_df = pd.DataFrame({"Total Count": age_demographic_count,"Percentage of Player": age_demographic_percent}) #use mapping to format age_breakdown_df["Percentage of Player"] = age_breakdown_df["Percentage of Player"].map("{0:.2f}%".format) #sort by age group and display age_breakdown_df.sort_index()Purchasing Analysis (Age) * Bin the purchase_data data frame by age* Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. in the table below* Create a summary data frame to hold the results* Optional: give the displayed data cleaner formatting* Display the summary data frame#Categorize the existing players using the age bins purchase_data_df[ "Age Groups"] = pd.cut(purchase_data_df["Age"], bins, labels = group_players) #calculate the count average and sum by age gender_group = purchase_data_df.groupby(["Age Groups"]) age_purchase_total = gender_group["Age"].count() age_average_price = purchase_data_df.groupby(["Age Groups"])["Price"].mean() age_total_price = purchase_data_df.groupby(["Age Groups"])["Price"].sum() age_per_person = age_total_price / age_demographic_count #create Dataframe age_purchasing_analysis_df =pd.DataFrame({"Purchase Count": age_purchase_total, "Average Purchase Price": age_average_price, "Total Purchase Value": age_total_price, "Avg Total Purchase per Person": age_per_person}) #use mapping to formatting age_purchasing_analysis_df["Average Purchase Price"] = age_purchasing_analysis_df["Average Purchase Price"].map("${:.2f}".format) age_purchasing_analysis_df["Total Purchase Value"] = age_purchasing_analysis_df["Total Purchase Value"].map("${:.2f}".format) age_purchasing_analysis_df["Avg Total Purchase per Person"] = age_purchasing_analysis_df["Avg Total Purchase per Person"].map("${:.2f}".format) #Display age_purchasing_analysis_dfTop Spenders * Run basic calculations to obtain the results in the table below* Create a summary data frame to hold the results* Sort the total purchase value column in descending order* Optional: give the displayed data cleaner formatting* Display a preview of the summary data frame#calculate the count average and sum by gender user_group = purchase_data_df.groupby(["SN"]) user_purchase_total = user_group["Price"].count() user_average_price = purchase_data_df.groupby(["SN"])["Price"].mean() user_total_price = purchase_data_df.groupby(["SN"])["Price"].sum() user_per_person = gender_total_price / gender_total #create Dataframe user_purchasing_analysis_df =pd.DataFrame({"Purchase Count": user_purchase_total, "Average Purchase Price": user_average_price, "Total Purchase Value": user_total_price}) #sort by purchase count then purchase value sorted_user_purchasing_analysis_df = user_purchasing_analysis_df.sort_values("Total Purchase Value", ascending = False) #use mapping to formatting sorted_user_purchasing_analysis_df["Average Purchase Price"] = sorted_user_purchasing_analysis_df["Average Purchase Price"].map("${:.2f}".format) sorted_user_purchasing_analysis_df["Total Purchase Value"] = sorted_user_purchasing_analysis_df["Total Purchase Value"].map("${:.2f}".format) #Display first 10 rows sorted_user_purchasing_analysis_df.head(10)Most Popular Items * Retrieve the Item ID, Item Name, and Item Price columns* Group by Item ID and Item Name. Perform calculations to obtain purchase count, average item price, and total purchase value* Create a summary data frame to hold the results* Sort the purchase count column in descending order* Optional: give the displayed data cleaner formatting* Display a preview of the summary data frame#calculate the count average and sum by item item_group = purchase_data_df.groupby(["Item ID", "Item Name"]) item_purchase_total = item_group["Purchase ID"].count() item_price = item_group["Price"].mean() item_total_price = item_group["Price"].sum() #create Dataframe item_purchasing_analysis_df =pd.DataFrame({"Purchase Count": item_purchase_total, "Item Price": item_price, "Total Purchase Value": item_total_price}) #sort by purchase count then purchase count then total purchase value sorted_item_purchasing_analysis_df = item_purchasing_analysis_df.sort_values(by=["Purchase Count","Total Purchase Value"], ascending = [False,False]) #use mapping to formatting sorted_item_purchasing_analysis_df["Item Price"] = sorted_item_purchasing_analysis_df["Item Price"].map("${:.2f}".format) sorted_item_purchasing_analysis_df["Total Purchase Value"] = sorted_item_purchasing_analysis_df["Total Purchase Value"].map("${:.2f}".format) #Display header sorted_item_purchasing_analysis_df.head()Most Profitable Items * Sort the above table by total purchase value in descending order* Optional: give the displayed data cleaner formatting* Display a preview of the data frame#calculate the count average and sum by item item_group = purchase_data_df.groupby(["Item ID", "Item Name"]) item_purchase_total = item_group["Purchase ID"].count() item_price = item_group["Price"].mean() item_total_price = item_group["Price"].sum() #create Dataframe item_purchasing_analysis_df =pd.DataFrame({"Purchase Count": item_purchase_total, "Item Price": item_price, "Total Purchase Value": item_total_price}) #sort by total purchase value sorted_item_purchasing_analysis_df = item_purchasing_analysis_df.sort_values("Total Purchase Value", ascending = False) #use mapping to formatting sorted_item_purchasing_analysis_df["Item Price"] = sorted_item_purchasing_analysis_df["Item Price"].map("${:.2f}".format) sorted_item_purchasing_analysis_df["Total Purchase Value"] = sorted_item_purchasing_analysis_df["Total Purchase Value"].map("${:.2f}".format) #Display header sorted_item_purchasing_analysis_df.head()Function to add correlation circlefrom sklearn.preprocessing import normalize def add_correlation_circle(figure, coeffs, texts, normalization=True, add_circle=True): if add_circle: figure.add_shape(type="circle", xref="x", yref="y", x0=-1, y0=-1, x1=1, y1=1, line_color="blue" ) if normalization: coeffs = normalize(coeffs, axis=0) for i in range(coeffs.shape[1]): figure.add_annotation( x=coeffs[0,i], # arrows' head y=coeffs[1,i], # arrows' head ax=0, # arrows' tail ay=0, # arrows' tail xref='x', yref='y', axref='x', ayref='y', text='', # if you want only the arrow showarrow=True, arrowhead=3, arrowsize=1, arrowwidth=3, arrowcolor='red' ) figure.add_annotation( x=coeffs[0,i]*1.25, y=coeffs[1,i]*1.25, text=texts[i], showarrow=False, font=dict(size=20,color="red") ) return figureLoad datairis = datasets.load_iris() X = iris.data y = iris.targetPerfrom PCAscaler = StandardScaler() scaler.fit(X) X=scaler.transform(X) pca = PCA(n_components=2) pca.fit(X) x_new = pd.DataFrame(pca.transform(X)) x_new['species'] = y x_new['species'] = x_new['species'].astype('object')Visualizationfig = px.scatter(x_new, x=0, y=1, color='species',width=800, height=600) add_correlation_circle(fig, coeffs=pca.components_, texts=[f"Var {i}" for i in range(1,5)], normalization=True, add_circle=True)Venture Funding with Deep LearningYou work as a risk management associate at Alphabet Soup, a venture capital firm. Alphabet Soup’s business team receives many funding applications from startups every day. This team has asked you to help them create a model that predicts whether applicants will be successful if funded by Alphabet Soup.The business team has given you a CSV containing more than 34,000 organizations that have received funding from Alphabet Soup over the years. With your knowledge of machine learning and neural networks, you decide to use the features in the provided dataset to create a binary classifier model that will predict whether an applicant will become a successful business. The CSV file contains a variety of information about these businesses, including whether or not they ultimately became successful. Instructions:The steps for this challenge are broken out into the following sections:* Prepare the data for use on a neural network model.* Compile and evaluate a binary classification model using a neural network.* Optimize the neural network model. Prepare the Data for Use on a Neural Network Model Using your knowledge of Pandas and scikit-learn’s `StandardScaler()`, preprocess the dataset so that you can use it to compile and evaluate the neural network model later.Open the starter code file, and complete the following data preparation steps:1. Read the `applicants_data.csv` file into a Pandas DataFrame. Review the DataFrame, looking for categorical variables that will need to be encoded, as well as columns that could eventually define your features and target variables. 2. Drop the “EIN” (Employer Identification Number) and “NAME” columns from the DataFrame, because they are not relevant to the binary classification model. 3. Encode the dataset’s categorical variables using `OneHotEncoder`, and then place the encoded variables into a new DataFrame.4. Add the original DataFrame’s numerical variables to the DataFrame containing the encoded variables.> **Note** To complete this step, you will employ the Pandas `concat()` function that was introduced earlier in this course. 5. Using the preprocessed data, create the features (`X`) and target (`y`) datasets. The target dataset should be defined by the preprocessed DataFrame column “IS_SUCCESSFUL”. The remaining columns should define the features dataset. 6. Split the features and target sets into training and testing datasets.7. Use scikit-learn's `StandardScaler` to scale the features data. Compile and Evaluate a Binary Classification Model Using a Neural NetworkUse your knowledge of TensorFlow to design a binary classification deep neural network model. This model should use the dataset’s features to predict whether an Alphabet Soup–funded startup will be successful based on the features in the dataset. Consider the number of inputs before determining the number of layers that your model will contain or the number of neurons on each layer. Then, compile and fit your model. Finally, evaluate your binary classification model to calculate the model’s loss and accuracy. To do so, complete the following steps:1. Create a deep neural network by assigning the number of input features, the number of layers, and the number of neurons on each layer using Tensorflow’s Keras.> **Hint** You can start with a two-layer deep neural network model that uses the `relu` activation function for both layers.2. Compile and fit the model using the `binary_crossentropy` loss function, the `adam` optimizer, and the `accuracy` evaluation metric.> **Hint** When fitting the model, start with a small number of epochs, such as 20, 50, or 100.3. Evaluate the model using the test data to determine the model’s loss and accuracy.4. Save and export your model to an HDF5 file, and name the file `AlphabetSoup.h5`. Optimize the Neural Network ModelUsing your knowledge of TensorFlow and Keras, optimize your model to improve the model's accuracy. Even if you do not successfully achieve a better accuracy, you'll need to demonstrate at least two attempts to optimize the model. You can include these attempts in your existing notebook. Or, you can make copies of the starter notebook in the same folder, rename them, and code each model optimization in a new notebook. > **Note** You will not lose points if your model does not achieve a high accuracy, as long as you make at least two attempts to optimize the model.To do so, complete the following steps:1. Define at least three new deep neural network models (the original plus 2 optimization attempts). With each, try to improve on your first model’s predictive accuracy.> **Rewind** Recall that perfect accuracy has a value of 1, so accuracy improves as its value moves closer to 1. To optimize your model for a predictive accuracy as close to 1 as possible, you can use any or all of the following techniques:>> * Adjust the input data by dropping different features columns to ensure that no variables or outliers confuse the model.>> * Add more neurons (nodes) to a hidden layer.>> * Add more hidden layers.>> * Use different activation functions for the hidden layers.>> * Add to or reduce the number of epochs in the training regimen.2. After finishing your models, display the accuracy scores achieved by each model, and compare the results.3. Save each of your models as an HDF5 file.# Imports import pandas as pd from pathlib import Path import tensorflow as tf from tensorflow.keras.layers import Dense from tensorflow.keras.models import Sequential from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler,OneHotEncoderC:\Users\nehaja\Anaconda3\envs\dev\lib\site-packages\numpy\_distributor_init.py:32: UserWarning: loaded more than 1 DLL from .libs: C:\Users\nehaja\Anaconda3\envs\dev\lib\site-packages\numpy\.libs\libopenblas.TXA6YQSD3GCQQC22GEQ54J2UDCXDXHWN.gfortran-win_amd64.dll C:\Users\nehaja\Anaconda3\envs\dev\lib\site-packages\numpy\.libs\libopenblas.WCDJNK7YVMPZQ2ME2ZZHJJRJ3JIKNDB7.gfortran-win_amd64.dll stacklevel=1)--- Prepare the data to be used on a neural network model Step 1: Read the `applicants_data.csv` file into a Pandas DataFrame. Review the DataFrame, looking for categorical variables that will need to be encoded, as well as columns that could eventually define your features and target variables.# Read the applicants_data.csv file from the Resources folder into a Pandas DataFrame applicant_data_df = pd.read_csv(Path("./Resources/applicants_data.csv")) # Review the DataFrame applicant_data_df.head() # Review the data types associated with the columns applicant_data_df.dtypesStep 2: Drop the “EIN” (Employer Identification Number) and “NAME” columns from the DataFrame, because they are not relevant to the binary classification model.# Drop the 'EIN' and 'NAME' columns from the DataFrame applicant_data_df = applicant_data_df.drop(columns=['EIN','NAME']) # Review the DataFrame applicant_data_dfStep 3: Encode the dataset’s categorical variables using `OneHotEncoder`, and then place the encoded variables into a new DataFrame.# Create a list of categorical variables categorical_variables = list(applicant_data_df.dtypes[applicant_data_df.dtypes == "object"].index) # Display the categorical variables list display(categorical_variables) # Create a OneHotEncoder instance enc = OneHotEncoder(sparse=False) # Encode the categorcal variables using OneHotEncoder encoded_data = enc.fit_transform(applicant_data_df[categorical_variables]) # Create a DataFrame with the encoded variables encoded_df =pd.DataFrame( encoded_data, columns = enc.get_feature_names(categorical_variables) ) # Review the DataFrame encoded_df.head()Step 4: Add the original DataFrame’s numerical variables to the DataFrame containing the encoded variables.> **Note** To complete this step, you will employ the Pandas `concat()` function that was introduced earlier in this course.# Add the numerical variables from the original DataFrame to the one-hot encoding DataFrame encoded_df = pd.concat([encoded_df, applicant_data_df.drop(columns=categorical_variables)], axis = 1) # Review the Dataframe encoded_df.head()Step 5: Using the preprocessed data, create the features (`X`) and target (`y`) datasets. The target dataset should be defined by the preprocessed DataFrame column “IS_SUCCESSFUL”. The remaining columns should define the features dataset.# Define the target set y using the IS_SUCCESSFUL column y =encoded_df["IS_SUCCESSFUL"] # Display a sample of y y[:5] # Define features set X by selecting all columns but IS_SUCCESSFUL X = encoded_df.drop(columns=["IS_SUCCESSFUL"]) # Review the features DataFrame X.head()Step 6: Split the features and target sets into training and testing datasets.# Split the preprocessed data into a training and testing dataset # Assign the function a random_state equal to 1 X_train, X_test, y_train, y_test = train_test_split(X,y, random_state=1)Step 7: Use scikit-learn's `StandardScaler` to scale the features data.# Create a StandardScaler instance scaler = StandardScaler() # Fit the scaler to the features training dataset X_scaler = scaler.fit(X_train) # Fit the scaler to the features training dataset X_train_scaled = X_scaler.transform(X_train) X_test_scaled = X_scaler.transform(X_test)--- Compile and Evaluate a Binary Classification Model Using a Neural Network Step 1: Create a deep neural network by assigning the number of input features, the number of layers, and the number of neurons on each layer using Tensorflow’s Keras.> **Hint** You can start with a two-layer deep neural network model that uses the `relu` activation function for both layers.# Define the the number of inputs (features) to the model number_input_features = len(X_train.iloc[0]) # Review the number of features number_input_features # Define the number of neurons in the output layer number_output_neurons = 1 # Define the number of hidden nodes for the first hidden layer hidden_nodes_layer1 = (number_input_features+number_output_neurons) // 2 # Review the number hidden nodes in the first layer hidden_nodes_layer1 # Define the number of hidden nodes for the second hidden layer hidden_nodes_layer2 = (hidden_nodes_layer1+number_output_neurons) // 2 # Review the number hidden nodes in the second layer hidden_nodes_layer2 # Create the Sequential model instance nn = Sequential() # Add the first hidden layer nn.add(Dense(units=hidden_nodes_layer1, input_dim=number_input_features, activation="relu")) # Add the second hidden layer nn.add(Dense(units=hidden_nodes_layer2, activation="relu")) # Add the output layer to the model specifying the number of output neurons and activation function nn.add(Dense(units=number_output_neurons, activation="sigmoid")) # Display the Sequential model summary nn.summary()Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense (Dense) (None, 58) 6786 _________________________________________________________________ dense_1 (Dense) (None, 29) 1711 _________________________________________________________________ dense_2 (Dense) (None, 1) 30 ================================================================= Total params: 8,527 Trainable params: 8,527 Non-trainable params: 0 _________________________________________________________________Step 2: Compile and fit the model using the `binary_crossentropy` loss function, the `adam` optimizer, and the `accuracy` evaluation metric.# Compile the Sequential model nn.compile(loss="binary_crossentropy",optimizer ="adam", metrics=["accuracy"]) # Fit the model using 50 epochs and the training data fit_model = nn.fit(X_train_scaled, y_train, epochs =50)Epoch 1/50 804/804 [==============================] - 2s 2ms/step - loss: 0.5756 - accuracy: 0.7203 Epoch 2/50 804/804 [==============================] - 1s 2ms/step - loss: 0.5539 - accuracy: 0.7298 Epoch 3/50 804/804 [==============================] - 1s 2ms/step - loss: 0.5501 - accuracy: 0.7305 Epoch 4/50 804/804 [==============================] - 1s 2ms/step - loss: 0.5477 - accuracy: 0.7325 Epoch 5/50 804/804 [==============================] - 1s 2ms/step - loss: 0.5464 - accuracy: 0.7328 Epoch 6/50 804/804 [==============================] - 1s 2ms/step - loss: 0.5449 - accuracy: 0.7340 Epoch 7/50 804/804 [==============================] - 1s 2ms/step - loss: 0.5442 - accuracy: 0.7343 Epoch 8/50 804/804 [==============================] - 1s 2ms/step - loss: 0.5432 - accuracy: 0.7329 Epoch 9/50 804/804 [==============================] - 1s 2ms/step - loss: 0.5422 - accuracy: 0.7345 Epoch 10/50 804/804 [==============================] - 1s 2ms/step - loss: 0.5426 - accuracy: 0.7350[...]Step 3: Evaluate the model using the test data to determine the model’s loss and accuracy.# Evaluate the model loss and accuracy metrics using the evaluate method and the test data model_loss, model_accuracy = nn.evaluate(X_test_scaled, y_test, verbose=2) # Display the model loss and accuracy results print(f"Loss: {model_loss}, Accuracy: {model_accuracy}")268/268 - 0s - loss: 0.5569 - accuracy: 0.7301 Loss: 0.5568558573722839, Accuracy: 0.7301457524299622Step 4: Save and export your model to an HDF5 file, and name the file `AlphabetSoup.h5`.# Set the model's file path file_path = Path("Resources/AlphabetSoup.h5") # Export your model to a HDF5 file nn.save(file_path)--- Optimize the neural network model Step 1: Define at least three new deep neural network models (resulting in the original plus 3 optimization attempts). With each, try to improve on your first model’s predictive accuracy.> **Rewind** Recall that perfect accuracy has a value of 1, so accuracy improves as its value moves closer to 1. To optimize your model for a predictive accuracy as close to 1 as possible, you can use any or all of the following techniques:>> * Adjust the input data by dropping different features columns to ensure that no variables or outliers confuse the model.>> * Add more neurons (nodes) to a hidden layer.>> * Add more hidden layers.>> * Use different activation functions for the hidden layers.>> * Add to or reduce the number of epochs in the training regimen. Alternative Model 1# Define the the number of inputs (features) to the model number_input_features_A1 = len(X_train.iloc[0]) # Review the number of features number_input_features_A1 # Define the number of neurons in the output layer number_output_neurons_A1 = 1 # Define the number of hidden nodes for the first hidden layer hidden_nodes_layer1_A1 = (number_input_features_A1+number_output_neurons_A1) // 2 # Review the number of hidden nodes in the first layer hidden_nodes_layer1_A1 # Define the number of hidden nodes for the additional hidden layers hidden_nodes_layer2_A1 = (hidden_nodes_layer1_A1+number_output_neurons_A1) // 2 # Review the number of hidden nodes in the additional layer hidden_nodes_layer2_A1 # Define the number of hidden nodes for the additional hidden layers hidden_nodes_layer3_A1 = (hidden_nodes_layer2_A1+number_output_neurons_A1) // 2 # Review the number of hidden nodes in the additional layer hidden_nodes_layer3_A1 # Define the number of hidden nodes for the additional hidden layers hidden_nodes_layer4_A1 = (hidden_nodes_layer3_A1+number_output_neurons_A1) // 2 # Review the number of hidden nodes in the additional layer hidden_nodes_layer4_A1 # Define the number of hidden nodes for the additional hidden layers hidden_nodes_layer5_A1 = (hidden_nodes_layer4_A1+number_output_neurons_A1) // 2 # Review the number of hidden nodes in the additional layer hidden_nodes_layer5_A1 # Create the Sequential model instance nn_A1 = Sequential() # First hidden layer nn_A1.add(Dense(units=hidden_nodes_layer1_A1, input_dim=number_input_features, activation="relu")) nn_A1.add(Dense(units=hidden_nodes_layer2_A1, activation="relu")) nn_A1.add(Dense(units=hidden_nodes_layer3_A1, activation="relu")) nn_A1.add(Dense(units=hidden_nodes_layer4_A1, activation="relu")) nn_A1.add(Dense(units=hidden_nodes_layer5_A1, activation="relu")) # Output layer nn_A1.add(Dense(units=number_output_neurons, activation="sigmoid")) # Check the structure of the model nn_A1.summary() # Compile the Sequential model nn_A1.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) # Fit the model using 50 epochs and the training data fit_model_A1 = nn_A1.fit(X_train_scaled, y_train, epochs=50)Epoch 1/50 804/804 [==============================] - 3s 2ms/step - loss: 0.5810 - accuracy: 0.7155 Epoch 2/50 804/804 [==============================] - 2s 2ms/step - loss: 0.5527 - accuracy: 0.7304 Epoch 3/50 804/804 [==============================] - 2s 2ms/step - loss: 0.5494 - accuracy: 0.7329 Epoch 4/50 804/804 [==============================] - 2s 2ms/step - loss: 0.5469 - accuracy: 0.7335 Epoch 5/50 804/804 [==============================] - 2s 2ms/step - loss: 0.5453 - accuracy: 0.7329 Epoch 6/50 804/804 [==============================] - 2s 2ms/step - loss: 0.5445 - accuracy: 0.7350 Epoch 7/50 804/804 [==============================] - 2s 2ms/step - loss: 0.5431 - accuracy: 0.7352 Epoch 8/50 804/804 [==============================] - 2s 2ms/step - loss: 0.5429 - accuracy: 0.7354 Epoch 9/50 804/804 [==============================] - 2s 2ms/step - loss: 0.5419 - accuracy: 0.7363 Epoch 10/50 804/804 [==============================] - 2s 2ms/step - loss: 0.5413 - accuracy: 0.7360[...]Alternative Model 2# Define the the number of inputs (features) to the model number_input_features_A2 = len(X_train.iloc[0]) # Review the number of features number_input_features_A2 # Define the number of neurons in the output layer number_output_neurons_A2 = 1 # Define the number of hidden nodes for the first hidden layer hidden_nodes_layer1_A2 = (number_input_features_A2+number_output_neurons_A1) // 2 # Review the number of hidden nodes in the first layer hidden_nodes_layer1_A2 # Define the number of hidden nodes for the first hidden layer hidden_nodes_layer2_A2 = (hidden_nodes_layer1_A2+number_output_neurons_A1) // 2 # Review the number of hidden nodes in the first layer hidden_nodes_layer2_A2 # Create the Sequential model instance nn_A2 = Sequential() # First hidden layer nn_A2.add(Dense(units=hidden_nodes_layer1_A2, input_dim=number_input_features, activation="relu")) nn_A2.add(Dense(units=hidden_nodes_layer2_A2, activation="relu")) # Output layer nn_A2.add(Dense(units=number_output_neurons, activation="linear")) # Check the structure of the model nn_A2.summary() # Compile the model nn_A2.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) # Fit the model fit_model_A2 = nn_A2.fit(X_train_scaled, y_train, epochs=100) #### Alternative Model 3 # Define the the number of inputs (features) to the model number_input_features_A3 = len(X_train.iloc[0]) # Review the number of features number_input_features_A3 # Define the number of neurons in the output layer number_output_neurons_A3 = 1 # Define the number of hidden nodes for the first hidden layer hidden_nodes_layer1_A3 = (number_input_features_A3 + number_output_neurons_A3) // 2 # Review the number of hidden nodes in the first layer hidden_nodes_layer1_A3 # Define the number of hidden nodes for the first hidden layer hidden_nodes_layer2_A3 = (hidden_nodes_layer1_A3 + number_output_neurons_A3) // 2 # Review the number of hidden nodes in the first layer hidden_nodes_layer2_A3 # Define the number of hidden nodes for the first hidden layer hidden_nodes_layer3_A3 = (hidden_nodes_layer2_A3 + number_output_neurons_A3) // 2 # Review the number of hidden nodes in the first layer hidden_nodes_layer3_A3 # Create the Sequential model instance nn_A3 = Sequential() # First hidden layer nn_A3.add(Dense(units=hidden_nodes_layer1_A3, input_dim=number_input_features, activation="LeakyReLU")) # Output layer nn_A3.add(Dense(units=1, activation="sigmoid")) # Check the structure of the model nn_A3.summary() # Compile the model nn_A3.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"]) # Fit the model fit_model_A3 = nn_A3.fit(X_train_scaled, y_train, epochs=80) #### Alternative Model 4 # Define the the number of inputs (features) to the model number_input_features_A4 = len(X_train.iloc[0]) # Review the number of features number_input_features_A4 # Define the number of neurons in the output layer number_output_neurons_A4 = 1 # Define the number of hidden nodes for the first hidden layer hidden_nodes_layer1_A4 = (number_input_features_A4 + number_output_neurons_A4) // 3 # Review the number of hidden nodes in the first layer hidden_nodes_layer1_A4 # Create the Sequential model instance nn_A4 = Sequential() # First hidden layer nn_A4.add(Dense(units=hidden_nodes_layer1_A4, input_dim=number_input_features, activation="relu")) # Output layer nn_A4.add(Dense(units=1, activation="sigmoid")) # Check the structure of the model nn_A4.summary() # Compile the model nn_A4.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"]) # Fit the model fit_model_A4 = nn_A4.fit(X_train_scaled, y_train, epochs=120)Epoch 1/120 804/804 [==============================] - 1s 1ms/step - loss: 0.5887 - accuracy: 0.7114 Epoch 2/120 804/804 [==============================] - 1s 1ms/step - loss: 0.5584 - accuracy: 0.7276: 0s - loss: 0.562 Epoch 3/120 804/804 [==============================] - 1s 1ms/step - loss: 0.5532 - accuracy: 0.7306 Epoch 4/120 804/804 [==============================] - 1s 1ms/step - loss: 0.5516 - accuracy: 0.7311 Epoch 5/120 804/804 [==============================] - 1s 1ms/step - loss: 0.5506 - accuracy: 0.7307 Epoch 6/120 804/804 [==============================] - 1s 1ms/step - loss: 0.5494 - accuracy: 0.7314 Epoch 7/120 804/804 [==============================] - 1s 1ms/step - loss: 0.5486 - accuracy: 0.7318 Epoch 8/120 804/804 [==============================] - 1s 1ms/step - loss: 0.5475 - accuracy: 0.7311: 0s - loss: 0.5459 - accuracy: Epoch 9/120 804/804 [==============================] - 1s 1ms/step - loss: 0.5472 - accuracy: 0.7311 Epoch 10/120 804/804 [===================[...]Step 2: After finishing your models, display the accuracy scores achieved by each model, and compare the results.print("Original Model Results") # Evaluate the model loss and accuracy metrics using the evaluate method and the test data model_loss, model_accuracy = nn.evaluate(X_test_scaled, y_test, verbose=2) # Display the model loss and accuracy results print(f"Loss: {model_loss}, Accuracy: {model_accuracy}") print("Alternative Model 1 Results") # Evaluate the model loss and accuracy metrics using the evaluate method and the test data model_loss, model_accuracy = nn_A1.evaluate(X_test_scaled, y_test, verbose=2) # Display the model loss and accuracy results print(f"Loss: {model_loss}, Accuracy: {model_accuracy}") print("Alternative Model 2 Results") # Evaluate the model loss and accuracy metrics using the evaluate method and the test data model_loss, model_accuracy = nn_A2.evaluate(X_test_scaled, y_test, verbose=2) # Display the model loss and accuracy results print(f"Loss: {model_loss}, Accuracy: {model_accuracy}") print("Alternative Model 3 Results") # Evaluate the model loss and accuracy metrics using the evaluate method and the test data model_loss, model_accuracy = nn_A3.evaluate(X_test_scaled, y_test, verbose=2) # Display the model loss and accuracy results print(f"Loss: {model_loss}, Accuracy: {model_accuracy}") print("Alternative Model 4 Results") # Evaluate the model loss and accuracy metrics using the evaluate method and the test data model_loss, model_accuracy = nn_A4.evaluate(X_test_scaled, y_test, verbose=2) # Display the model loss and accuracy results print(f"Loss: {model_loss}, Accuracy: {model_accuracy}")Alternative Model 4 Results 268/268 - 0s - loss: 0.5638 - accuracy: 0.7294 Loss: 0.5638464689254761, Accuracy: 0.7294460535049438Step 3: Save each of your alternative models as an HDF5 file.# Set the file path for the first alternative model file_path = Path("Resources/alternative1.h5") # Export your model to a HDF5 file nn_A1.save(file_path) # Set the file path for the second alternative model file_path =Path("Resources/alternative2.h5") # Export your model to a HDF5 file nn_A2.save(file_path) # Set the file path for the third alternative model file_path =Path("Resources/alternative3.h5") # Export your model to a HDF5 file nn_A3.save(file_path) # Set the file path for the fourth alternative model file_path =Path("Resources/alternative4.h5") # Export your model to a HDF5 file nn_A4.save(file_path)# !pip install quandl # import quandl # !pip install yfinance # import yfinance as yf import datetime as dt !pip install quandl import quandl import matplotlib.pyplot as plt import matplotlib.transforms as transform import matplotlib.gridspec as gridspec import pandas as pd from pandas import DataFrame ticker = "CHRIS/CME_NG1" authtoken="" class DataBase(): def __init__(self, ticker, days): self.ticker = ticker data =quandl.get(ticker, authtoken=authtoken, start_date='2020-01-01', ) self.df = pd.DataFrame(data) pd.set_option("display.max_columns", None) self.df[self.df.index.dayofweek < 5] self.df= self.df[-days:] def quote(self): return self.df db = DataBase(ticker, 252) df = db.quote() df.tail() pivot_h1 = df['High'][-21:-1].max() # 4 weeks pivot_h2 = df['High'][-55:-22].max() # 6 weeks post 4 weeks pivot_l1 = df['Low'][-21:-1].min() # 4 weeks pivot_l2 = df['Low'][-55:-22].min() # 6 weeks post 4 weeks a = [df['High'][-21:-1].idxmax(), pivot_h1] b = [df['High'][-55:-22].idxmax(), pivot_h2] a1 = [df['Low'][-21:-1].idxmin(), pivot_l1] b1 = [df['Low'][-55:-22].idxmin(), pivot_l2] x1_high_values = [a[0], b[0]] y1_high_values = [a[1], b[1]] x1_low_values = [a1[0], b1[0]] y1_low_values = [a1[1], b1[1]] plt.rcParams.update({'font.size': 10}) # plt.style.use('dark_background') fig, ax1 = plt.subplots(figsize = (15,6)) ax1.set_ylabel("Price in US$") ax1.set_xlabel("Timestamp") ax1.set_title("Henry Hub Spot Price \n Support-Resistance Breakout") ax1.plot('Settle', data=df, label = 'Daily Adj Close price', ) # if we need thinner line ax1.plot(x1_high_values, y1_high_values, color='g', linestyle ='--', label = 'Trend Resistance') ax1.plot(x1_low_values, y1_low_values, color='r', linestyle ='--', label = 'Trend Support') ax1.axhline(y= pivot_h1, color = 'g', label = "first resistance line") ax1.axhline(y= pivot_l1, color = 'r', label = "first support line") trans = transform.blended_transform_factory(ax1.get_yticklabels()[0].get_transform(), ax1.transData) ax1.text(0, pivot_h1, "{:.2f}".format(pivot_h1), color = 'g', transform=trans, ha='right', va='center') ax1.text(0, pivot_l1, "{:.2f}".format(pivot_l1), color = 'r', transform=trans, ha='right', va='center') ax1.legend(); ax1.grid(); plt.show() data =quandl.get(ticker, authtoken=authtoken, start_date='2015-01-01', ) data['roll_max'] = data['Settle'].rolling(window=252).max() data['roll_min'] = data['Settle'].rolling(window=252).min() pivot_h1 = data['High'][-21:-1].max() # 4 weeks pivot_h2 = data['High'][-55:-22].max() # 6 weeks post 4 weeks pivot_l1 = data['Low'][-21:-1].min() # 4 weeks pivot_l2 = data['Low'][-55:-22].min() # 6 weeks post 4 weeks a = [data['High'][-21:-1].idxmax(), pivot_h1] b = [data['High'][-55:-22].idxmax(), pivot_h2] a1 = [data['Low'][-21:-1].idxmin(), pivot_l1] b1 = [data['Low'][-55:-22].idxmin(), pivot_l2] x1_high_values = [a[0], b[0]] y1_high_values = [a[1], b[1]] x1_low_values = [a1[0], b1[0]] y1_low_values = [a1[1], b1[1]] plt.rcParams.update({'font.size': 10}) # plt.style.use('dark_background') fig, ax1 = plt.subplots(figsize = (15,6)) ax1.set_ylabel("Price in US$") ax1.set_xlabel("Timestamp") ax1.set_title("Henry Hub Spot Price \n Support-Resistance Breakout") ax1.plot('Settle', data=data, label = 'Daily Adj Close price', color = 'b', LineWidth = 0.5) # if we need thinner line ax1.plot('roll_max', data=data, label = "rolling maxima" ) # if we need thinner line ax1.plot('roll_min', data=data, label = "rolling minima" ) # if we need thinner line ax1.plot(x1_high_values, y1_high_values, color='g', linestyle ='--', label = 'Trend Resistance') ax1.plot(x1_low_values, y1_low_values, color='r', linestyle ='--', label = 'Trend Support') ax1.axhline(y= pivot_h1, color = 'g', label = "first resistance line") ax1.axhline(y= pivot_l1, color = 'r', label = "first support line") trans = transform.blended_transform_factory(ax1.get_yticklabels()[0].get_transform(), ax1.transData) ax1.text(0, pivot_h1, "{:.2f}".format(pivot_h1), color = 'g', transform=trans, ha='right', va='center') ax1.text(0, pivot_l1, "{:.2f}".format(pivot_l1), color = 'r', transform=trans, ha='right', va='center') ax1.legend(); ax1.grid(); plt.show() data['roll_max'] = data['Settle'].rolling(window=252).max().dropna() data['roll_min'] = data['Settle'].rolling(window=252).min().dropna() print(data) data['roll_max'] = data['Settle'].rolling(window=252).max().dropna() data['roll_min'] = data['Settle'].rolling(window=252).min().dropna()The example starts with NG data and implements a mean-reversion strategy on the basis of an SMA of 20 days and a threshold value of 0.5 for the absolute deviation of the current price to deviate from the SMA to signal a positioning.import numpy as np import matplotlib.pyplot as plt ticker = "CHRIS/CME_NG1" authtoken="" start_date = dt.date(2015,1,1); end_date = dt.date.today() class DataBase(): def __init__(self, ticker): self.ticker = ticker data =quandl.get(ticker, authtoken=authtoken, start_date= start_date, end_date = end_date ) self.df = pd.DataFrame(data) pd.set_option("display.max_columns", None) self.df[self.df.index.dayofweek < 5] # self.df= self.df[-days:] def quote(self): return self.df db = DataBase(ticker) NG = db.quote() NG.tail(); print() NG['returns'] = np.log(NG['Last'] / NG['Last'].shift(1)) SMA = 20 NG['SMA'] = NG['Last'].rolling(SMA).mean() threshold = 0.5 NG['distance'] = NG['Last'] - NG['SMA'] NG['distance'].dropna().plot(figsize=(15, 6), legend=True) plt.axhline(threshold, color='green') plt.axhline(-threshold, color='r') plt.axhline(0, color='k'); plt.title("Differences between current price of NG and the SMA \n+ve and -ve threshold value to generate sell and buy signals"); plt.grid(True) import numpy as np import matplotlib.pyplot as plt NG = quandl.get("CHRIS/CME_NG1", authtoken="L", start_date='2015-01-01', ) # natural gas continuous contract 1 NG['returns'] = np.log(NG['Last'] / NG['Last'].shift(1)) SMA = 20 NG['SMA'] = NG['Last'].rolling(SMA).mean() threshold = 0.5 NG['distance'] = NG['Last'] - NG['SMA'] NG['distance'].dropna().plot(figsize=(15, 6), legend=True) plt.axhline(threshold, color='green') plt.axhline(-threshold, color='r') plt.axhline(0, color='k'); plt.title("Differences between current price of NG and the SMA \n+ve and -ve threshold value to generate sell and buy signals"); plt.grid(True)differences between the current price of NG and the SMA, as well as the positive and negative threshold value to generate sell and buy signals, respectively:NG['position'] = np.where(NG['distance'] > threshold, -1, np.nan) NG['position'] = np.where(NG['distance'] < -threshold, 1, NG['position']) NG['position'] = np.where(NG['distance'] * NG['distance'].shift(1) < 0, 0, NG['position']) NG['position'] = NG['position'].ffill().fillna(0) NG['position'].iloc[SMA:].plot(ylim=[-1.1, 1.1], figsize=(15, 6)); plt.grid(True)- if the distance value is greater than the threshold value, go short (set –1 in the new column position), otherwise set NaN.- If the distance value is lower than the negative threshold value, go long (set 1), otherwise keep the column position unchanged.- If there is a change in the sign of the distance value, go market neutral (set 0), otherwise keep the column position unchanged.NG['strategy'] = NG['position'].shift(1) * NG['returns'] NG[['returns', 'strategy']].dropna().cumsum( ).apply(np.exp).plot(figsize=(15, 6)); plt.title("Mean-reversion strategy \n(SMA = 20, threshold = 0.5)") plt.grid(True) # class to store tick data class tickData(object): """ Stores a single unit of data """ def __init__(self,timestamp='',symbol='',open_price=0,close_price=0,total_volume=0): self.symbol = symbol self.timestamp = timestamp self.open_price = open_price self.close_price = close_price self.total_volume = total_volume # class to store price data class data(object): def __init__(self): self.recent_ticks = dict() # indexed by symbol def add_tick_data(self, tick_data): self.recent_ticks[tick_data.symbol] = tick_data def get_open_price(self,symbol): return self.get_tick_data(symbol).open_price def get_close_price(self,symbol): return self.get_tick_data(symbol).close_price def get_tick_data(self,symbol): return self.recent_ticks.get(symbol, tickData()) def get_timestamp(self,symbol): return self.recent_ticks[symbol].timestamp # class to generate data sources class data_source(object): def __init__(self, symbol, tick_event_handler = None, start='', end=''): self.data = data() self.symbol = symbol self.tick_event_handler = tick_event_handler self.start, self.end = start, end self.NG = None def retrieve_time_series(self): # historical Henry Hub prices # df = yf.download(self.symbol, start_date=self.start,end_date=self.end) # Update your Quandl API key here... QUANDL_API_KEY="" quandl.ApiConfig.api_key=QUANDL_API_KEY NG = quandl.get(self.symbol, start_date = self.start, end_date = self.end) return NG def run(self): if self.NG is None: self.NG = self.retrieve_time_series() total_ticks =len(self.NG) print('Processing total_ticks:',total_ticks) for timestamp, row in self.NG.iterrows(): open_price = row['Open'] close_price = row['Last'] volume = row['Volume'] print(timestamp.date(),'TICK', self.symbol, 'open:', open_price, 'close:', close_price) tick_data = tickData(timestamp, self.symbol, open_price, close_price, volume) self.data.add_tick_data(tick_data) if self.tick_event_handler: self.tick_event_handler(self.data) class Order(object): def __init__(self, timestamp, symbol, qty, is_buy, order, price=0): self.timestamp = timestamp self.symbol = symbol self.qty = qty self.price = price self.is_buy = is_buy self.order = order self.is_filled = False self.filled_price = 0 self.filled_time = None self.filled_qty = 0 class Position(object): def __init__(self,symbol=''): self.symbol = symbol self.buys = self.sells = self.net=0 self.retpnl = 0 self.position_value = 0 def position_event(self, is_buy, qty, price): if is_buy: self.buys += qty else: self.sells += qty self.net = self.buys - self.sells changed_value = qty * price * (-1 if is_buy else 1) self.position_value += changed_value if self.net == 0: self.retpnl = self.position_value self.position_value = 0 def unrealized_pnl(self, price): if self.net == 0: return 0 value = self.net*price unrpnl = self.position_value + value # unr = unrealized return unrpnl from abc import abstractmethod class Strategy: def __init__(self, send_order_event_handler): self.send_order_event_handler = send_order_event_handler @abstractmethod def tick_event(self, data): raise NotImplementedError('Method is required!') @abstractmethod def position_event(self, positions): raise NotImplementedError('Method is required!') def send_order(self, symbol, qty, is_buy, timestamp): if self.send_order_event_handler: order = Order(timestamp, symbol, qty, is_buy, order = True, price = 0,) self.send_order_event_handler(order) import pandas as pd # MRStrat = mean reversion strategy class MRStrat(Strategy): def __init__(self, symbol, trade_qty, send_order_event_handler = None, lookback_intervals = 20, buy_threshold = -0.5, sell_threshold = 0.5): super(MRStrat, self).__init__(send_order_event_handler) self.symbol = symbol self.trade_qty = trade_qty self.lookback_intervals = lookback_intervals self.buy_threshold = buy_threshold self.sell_threshold = sell_threshold self.prices = DataFrame() self.is_long = self.is_short = False def position_event(self, positions): position = positions.get(self.symbol) self.is_long = position and position.net > 0 self.is_short = position and position.net < 0 def tick_event(self, data): self.store_prices(data) if len(self.prices) < self.lookback_intervals: return self.generate_signals_and_send_order(data) def store_prices(self, data): timestamp = data.get_timestamp(self.symbol) close_price = data.get_close_price(self.symbol) self.prices.loc[timestamp,'close'] = close_price def generate_signals_and_send_order(self, data): signal_value = self.calculate_z_score() timestamp = data.get_timestamp(self.symbol) if self.buy_threshold > signal_value and not self.is_long: print(timestamp.date(),'BUY signal') self.send_order(self.symbol, self.trade_qty, True, timestamp) elif self.sell_threshold < signal_value and not self.is_short: print(timestamp.date(),'SELL signal') self.send_order(self.symbol, self.trade_qty, False, timestamp) def calculate_z_score(self): self.prices = self.prices[-self.lookback_intervals:] returns = self.prices['close'].pct_change().dropna() z_score = ((returns - returns.mean()) / returns.std())[-1] return z_score class back_test: def __init__(self, symbol, trade_qty, start='', end=''): self.symbol = symbol self.trade_qty = trade_qty self.data_source = data_source(symbol, tick_event_handler = self.tick_event, start = start, end=end) self.strategy = None self.unfilled_orders = [] self.positions = dict() self.NG_retpnl = None def start(self, **kwargs): print('Backtest started...') self.unfilled_orders = [] self.positions = dict() self.NG_retpnl = DataFrame() self.strategy = MRStrat(self.symbol, self.trade_qty, send_order_event_handler = self.order_received, **kwargs) self.data_source.run() print('---------------Backtest completed-----------------') def order_received(self, order): """ Adds an order to the order book """ print(order.timestamp.date(),'ORDER', 'BUY' if order.is_buy else 'SELL', order.symbol, order.qty) self.unfilled_orders.append(order) def tick_event(self, data): self.match_order_book(data) self.strategy.tick_event(data) self.print_position_status(data) def match_order_book(self, data): if len(self.unfilled_orders) > 0: self.unfilled_orders = [order for order in self.unfilled_orders if self.match_unfilled_orders(order, data)] def match_unfilled_orders(self, order, data): symbol = order.symbol timestamp = data.get_timestamp(symbol) """ Order is matched and filled """ if order.order and timestamp > order.timestamp: open_price = data.get_open_price(symbol) order.is_filled = True order.filled_timestamp = timestamp order.filled_price = open_price self.order_filled(symbol, order.qty, order.is_buy, open_price, timestamp) return False return True def order_filled(self, symbol, qty, is_buy, filled_price, timestamp): position = self.get_position(symbol) position.position_event(is_buy, qty, filled_price) self.NG_retpnl.loc[timestamp, "ret_pnl"] = position.retpnl self.strategy.position_event(self.positions) print(timestamp.date(),'FILLED',"BUY" if is_buy else "SELL", qty, symbol, 'at', filled_price) def get_position(self, symbol): if symbol not in self.positions: self.positions[symbol] = Position(symbol) return self.positions[symbol] def print_position_status(self, data): for symbol, position in self.positions.items(): close_price = data.get_close_price(symbol) timestamp = data.get_timestamp(symbol) unrpnl = position.unrealized_pnl(close_price) print(timestamp.date(),'POSITION','value:%.3f'% position.position_value,'un-realized_pnl:%.3f'%unrpnl,'ret_pnl:%.3f'%position.retpnl) strategy = back_test("CHRIS/CME_NG1",1, start='2020-01-01', end='2021-07-09') strategy.start(lookback_intervals = 20, buy_threshold=-0.5, sell_threshold = 0.5) %matplotlib inline import matplotlib.pyplot as plt strategy.NG_retpnl.plot(figsize=(15,6)); plt.grid(b=True, color='k', linestyle='-', linewidth=0.2); ax.minorticks_on();plt.show() THRESHOLDS=[(-0.5,0.5), (-1.5,1.5), (-2.5,2.0), (-1.5,2.5), ] fig, axes = plt.subplots(nrows = len(THRESHOLDS) // 2, ncols = 2, figsize=(15, 6), ) fig.subplots_adjust(hspace = 0.4) for i, (buy_threshold, sell_threshold) in enumerate(THRESHOLDS): strategy.start(lookback_intervals = 15, buy_threshold = buy_threshold, sell_threshold = sell_threshold) NG_retpnls=strategy.NG_retpnl ax=axes[i // 2, i % 2] ax.set_title('Buy-Sell thresholds:(%s,%s)'%(buy_threshold, sell_threshold)) ax.grid(b=True, which='major', color='b', linestyle='-') NG_retpnls.plot(ax=ax) plt.tight_layout()Backtest started... Processing total_ticks: 376 2020-01-02 TICK CHRIS/CME_NG1 open: 2.184 close: 2.13 2020-01-03 TICK CHRIS/CME_NG1 open: 2.129 close: 2.114 2020-01-06 TICK CHRIS/CME_NG1 open: 2.112 close: 2.149 2020-01-07 TICK CHRIS/CME_NG1 open: 2.149 close: 2.154 2020-01-08 TICK CHRIS/CME_NG1 open: 2.153 close: 2.151 2020-01-09 TICK CHRIS/CME_NG1 open: 2.151 close: 2.167 2020-01-10 TICK CHRIS/CME_NG1 open: 2.166 close: 2.207 2020-01-13 TICK CHRIS/CME_NG1 open: 2.21 close: 2.19 2020-01-14 TICK CHRIS/CME_NG1 open: 2.188 close: 2.179 2020-01-15 TICK CHRIS/CME_NG1 open: 2.181 close: 2.124 2020-01-16 TICK CHRIS/CME_NG1 open: 2.134 close: 2.076 2020-01-17 TICK CHRIS/CME_NG1 open: 2.077 close: 2.003 2020-01-21 TICK CHRIS/CME_NG1 open: 1.97 close: 1.908 2020-01-22 TICK CHRIS/CME_NG1 open: 1.912 close: 1.913 2020-01-23 TICK CHRIS/CME_NG1 open: 1.915 close: 1.938 2020-01-23 SELL signal 2020-01-23 ORDER SELL CHRIS/CME_NG1 1 2020-01-24 TICK CHRIS/CME_NG1 open: 1.938 close: 1.886 2020-01-24 FILL[...]**Quantum Spy Hunter**import numpy as np # Importing standard Qiskit libraries from qiskit import QuantumCircuit, transpile, Aer, IBMQ, QuantumRegister, ClassicalRegister, execute, BasicAer from qiskit.tools.jupyter import * from qiskit.visualization import * from ibm_quantum_widgets import * from qiskit.providers.aer import QasmSimulator # Loading your IBM Quantum account(s) provider = IBMQ.load_account() import math %matplotlib inline # Set up the program alice = QuantumRegister(1, name='alice') fiber = QuantumRegister(1, name='fiber') bob = QuantumRegister(1, name='bob') alice_had = ClassicalRegister(1, name='ahad') alice_val = ClassicalRegister(1, name='aval') fiber_val = ClassicalRegister(1, name='fval') bob_had = ClassicalRegister(1, name='bhad') bob_val = ClassicalRegister(1, name='bval') qc = QuantumCircuit(alice, fiber, bob, alice_had, alice_val, fiber_val, bob_had, bob_val) # Use Alice's QPU to generate two random bits qc.reset(alice) # write the value 0 qc.h(alice) qc.measure(alice, alice_had) qc.reset(alice) # write the value 0 qc.h(alice) qc.measure(alice, alice_val) # Prepare Alice's qubit qc.reset(alice) # write the value 0 qc.x(alice).c_if(alice_val, 1) qc.h(alice).c_if(alice_had, 1) # Send the qubit! qc.swap(alice, fiber) # Activate the spy spy_is_present = True if spy_is_present: qc.barrier() spy_had = True if spy_had: qc.h(fiber) qc.measure(fiber, fiber_val) qc.reset(fiber) qc.x(fiber).c_if(fiber_val, 1) if spy_had: qc.h(fiber) qc.barrier() # Use Bob's QPU to generate a random bit qc.reset(bob) qc.h(bob) qc.measure(bob, bob_had) # Receive the qubit! qc.swap(fiber, bob) qc.h(bob).c_if(bob_had, 1) qc.measure(bob, bob_val) backend = BasicAer.get_backend('statevector_simulator') job = execute(qc, backend) result = job.result() # Now Alice emails Bob to tell # him her had setting and value. # If the had setting matches and the # value does not, there's a spy! counts = result.get_counts(qc) print('counts:',counts) caught = False for key,val in counts.items(): ahad,aval,f,bhad,bval = (int(x) for x in key.split(' ')) if ahad == bhad: if aval != bval: print('Caught a spy!') caught = True if not caught: print('No spies detected.') outputstate = result.get_statevector(qc, decimals=3) print(outputstate) qc.draw() # draw the circuitcounts: {'1 1 1 1 1': 1} No spies detected. [0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 1.-0.j 0.+0.j]TME2 : AppariementL'appariement est l'action qui consiste à rassembler par paires des choses qui sont naturellement compatibles.import import_ipynb import tme1Indexation as tm import math import numpy as npimporting Jupyter notebook from tme1Indexation.ipynbExercice 1 – Exercice de compréhension : modèle de RI simpleOn considère la collection de documents (cisi.txt et cacm.txt) et la liste des stopwords (TextRepresenter.py) du TME1. L’objectif dans cet exercice est d’estimer le score des documents pour la requˆete "home sales top" question 1) Quels index faut-il interroger pour avoir un calcule du score pertinent? On utilise l'index inversé , car il nous donne accès aux informations dont on a besoin pour calculer le score plus rapidement. question 1.2) Ecrire le code qui permet de calculer le score des documents à partir du modèle booléen ScoreBoolean retourne les documents qui contiennent tous les termes de la requête.Ca veut dire que j'ai considéré que les temes de la requête sont relié par des AND entre eux.Remarque:si j'écris q="t1 AND (t2 OR NOT T3)", si ça c'est ma requête alors je dois retourner les documents qui contiennent t1 et ( t2 ou t3).Du coup ça dépend de la syntaxe de la requête. intersection retourne la liste des documents qui contiennent tous les mots de la requete.def intersection(indexReverse,s): L=[] for c,v in s.items(): L.append(indexReverse[c]) d=L[0] M=[] for c,v in d.items(): M.append(c) for c in M: b=False for D in L: if (c not in D): b=True if(b): M.remove(c) return M def ScoreBoolean(q,T): I=tm.IndexerSimple() index,indexReverse=I.indexation(T) PS=tm.tr.PorterStemmer() s=PS.getTextRepresentation(q) return intersection(indexReverse,s)On récupère la liste des documents qui contiennent tous les mots de la requêteq="home sales top" T="cisi/cisi.txt" #print(ScoreBoolean(q,T))question 1.3) On calcule le score des documents à partir du modèle vectoriel (produit cart´esien) dans le cas d’une pond´eration tf Modèle Vectoriel= Vector Space Model (VSM).On représente les documents et les requêtes sous forme de vecteurs dans l’espace vectoriel engendré par tous les termes de la collection de documents:T (un terme = une dimension).Document : dj= (w1j, w2j, …, wMj) Requête : q= (w1q, w2q, …, wMq) wij: poids du terme ti dans le document dj : tf*idf On parle aussi de Modèle sac de mots:La représentation vectorielle ne tient pas compte de l’ordre des mots.« Un garçon manque une pomme » est représenté par le même vecteur que « une pomme mange un garçon » c’est ce que l’on appelle « Sac de mots » (Bag of words) Une collection de n documents et M termes distincts peut être représentée sous forme de matrice ( ligne : M document , colonnes: M terme de la collection) La requête est également représentée par un vecteur#I=tm.IndexerSimple() #index,indexReverse=I.indexation("cisi/cisi.txt") #les termes de l'index (les termes normalisé présents dans la collection) """ T=[] for c,v in indexReverse.items(): T.append(c) #La liste de tous les Documents la collection D=[] for c,v in index.items(): D.append(c) #Construction de la matrice de vecteurs de document M={} for d in D: di={} #vecteur associé à chaque document r=I.getTfIDFsForDoc(d) for w in T: if (w in r): di[w]=r[w] else: di[w]=0 M[d]=di """ #print(M["2"])Construction du vecteur requete. Je normalise les termes de la reqête parceque les termes de l'indexation ont tous été normalisé""" q="home sales top" PS=tm.tr.PorterStemmer() s=PS.getTextRepresentation(q) Q={} for w in T: if (w in s): Q[w]=s[w] else: Q[w]=0 """ #print(Q)La pertinence est traduite en une similarité vectorielle:un document est d'autant plus pertinent à une requête que le vecteur associé est similaire à celui de la requête. Le degré de correspondance de R(d,q):Produit scalaire des deux vecteurs:R(di,qk) = somme des produit des poids des termes de la reqête et ceux du document di , pour i=1,....,n""" Scores={} for c,v in M.items(): p=0 for w in T: p=p+(v[w]*Q[w]) Scores[c]=p """ #print(Scores)Exercice 2 – Projet ”Moteur de Recherche” : Etape Appariemen 1) Représentation pondérée des documents et de la requêteUne classe générique est une classe qui peut être réutilisée pour des objets de différents typesclass Weighter: def __init__(self,T): self.I=tm.IndexerSimple() self.index,self.indexReverse=self.I.indexation(T) self.mots=[c for c,v in self.indexReverse.items()] #récupère tous les mots de la collection self.docs=[c for c,v in self.index.items()] #répère la liste de tous les id des documents de la collection #getWeightsForDoc retourne les poids des termes pour un document dont l’identifiant est idDoc def getWeightsForDoc(self,idDoc): raise NotImplementedError #getWeightsForStem retourne les poids du terme stem pour tous les documents qui le contiennent def getWeightsForStem(self,stem): raise NotImplementedError #getWeightsForQuery retourne les poids des termes de la requˆete def getWeightsForQuery(self,query): raise NotImplementedErrorw t,d = tf t,d et w t,q = 1 si t ∈ q, O sinon;class Weighter1(Weighter): def __init__(self,T): self.I=tm.IndexerSimple() self.index,self.indexReverse=self.I.indexation(T) self.mots=[c for c,v in self.indexReverse.items()] #récupère tous les mots de la collection self.docs=[c for c,v in self.index.items()] #répère la liste de tous les id des documents de la collection def getWeightsForDoc(self,idDoc): d=self.index[idDoc] for m in self.mots: #tous les termes de la collection qui ne sont pas dans ce doc ont un poid de 0 if(m not in d): d[m]=0 return d def getWeightsForStem(self,stem): return self.indexReverse[stem] def getWeightsForQuery(self,query): PS=tm.tr.PorterStemmer() s=PS.getTextRepresentation(query) q={} for m in self.mots: if(m in s): q[m]=1 else: q[m]=0 return q #w=Weighter1("cisi/cisi.txt")Test:on affiche le poid de tous les terme (normalisé) de la collection pour le document dont l'id est 1#print(w.getWeightsForDoc("1"))on affiche le poid du mot "present" "seulement" pour les documents qui le contient#print(w.getWeightsForStem("present"))on affiche pour chaque terme de la collection le poid de ce terme pour la reqête#print(w.getWeightsForQuery("home saled top"))w t,d = tf t,d et w t,q = tf t,qclass Weighter2(Weighter): def __init__(self,T): self.I=tm.IndexerSimple() self.index,self.indexReverse=self.I.indexation(T) self.mots=[c for c,v in self.indexReverse.items()] #récupère tous les mots de la collection self.docs=[c for c,v in self.index.items()] #répère la liste de tous les id des documents de la collection def getWeightsForDoc(self,idDoc): d=self.index[idDoc] for m in self.mots: #tous les termes de la collection qui ne sont pas dans ce doc ont un poid de 0 if(m not in d): d[m]=0 return d def getWeightsForStem(self,stem): return self.indexReverse[stem] def getWeightsForQuery(self,query): PS=tm.tr.PorterStemmer() s=PS.getTextRepresentation(query) q={} for m in self.mots: if(m in s): q[m]=s[m] #il y a que cette ligne qui a changé else: q[m]=0 return qw t,d = tf t,d et w t,q = idf t si t ∈ q, 0 sinon;class Weighter3(Weighter): def __init__(self,T): self.I=tm.IndexerSimple() self.index,self.indexReverse=self.I.indexation(T) self.mots=[c for c,v in self.indexReverse.items()] #récupère tous les mots de la collection self.docs=[c for c,v in self.index.items()] #répère la liste de tous les id des documents de la collection def getWeightsForDoc(self,idDoc): d=self.index[idDoc] for m in self.mots: #tous les termes de la collection qui ne sont pas dans ce doc ont un poid de 0 if(m not in d): d[m]=0 return d def getWeightsForStem(self,stem): return self.indexReverse[stem] def getWeightsForQuery(self,query): PS=tm.tr.PorterStemmer() s=PS.getTextRepresentation(query) q={} N=len(self.docs) for m in self.mots: if(m in s): idf=math.log( (1+N) / (1+len(self.indexReverse[m])) ) q[m]=idf else: q[m]=0 return q #w=Weighter3("cisi/cisi.txt") #print(w.getWeightsForQuery("home saled top"))w t,d = 1 + log(tf t,d) si t ∈ d, 0 sinon; et w t,q = idf t si t ∈ q, 0 sinonclass Weighter4(Weighter): def __init__(self,T): self.I=tm.IndexerSimple() self.index,self.indexReverse=self.I.indexation(T) self.mots=[c for c,v in self.indexReverse.items()] #récupère tous les mots de la collection self.docs=[c for c,v in self.index.items()] #répère la liste de tous les id des documents de la collection def getWeightsForDoc(self,idDoc): d=self.index[idDoc] di={} for m in self.mots: #tous lew=Weighter3("cisi/cisi.txt")s termes de la collection qui ne sont pas dans ce doc ont un poid de 0 if(m in d): di[m]=1+math.log(d[m]) else: di[m]=0 return di def getWeightsForStem(self,stem): r=self.indexReverse[stem] d={} for c,v in r.items(): d[c]=1+math.log(v) return d def getWeightsForQuery(self,query): PS=tm.tr.PorterStemmer() s=PS.getTextRepresentation(query) q={} N=len(self.docs) for m in self.mots: if(m in s): idf=math.log( (1+N) / (1+len(self.indexReverse[m])) ) q[m]=idf else: q[m]=0 return q #w=Weighter4("cisi/cisi.txt") #print(w.getWeightsForDoc("1")) #print(w.getWeightsForStem("present"))wt,d = (1 + log(tf t,d))x idf t si t ∈ d, 0 sinon; et wt,q = (1 + log(tf t,q))x idf t si t ∈ q, 0.class Weighter5(Weighter): def __init__(self,T): self.I=tm.IndexerSimple() self.index,self.indexReverse=self.I.indexation(T) self.mots=[c for c,v in self.indexReverse.items()] #récupère tous les mots de la collection self.docs=[c for c,v in self.index.items()] #répère la liste de tous les id des documents de la collection def getWeightsForDoc(self,idDoc): d=self.index[idDoc] di={} N=len(self.docs) for m in self.mots: #tous lew=Weighter3("cisi/cisi.txt")s termes de la collection qui ne sont pas dans ce doc ont un poid de 0 if(m in d): idf=math.log( (1+N) / (1+len(self.indexReverse[m])) ) di[m]=(1+math.log(d[m]))*idf else: di[m]=0 return di def getWeightsForStem(self,stem): r=self.indexReverse[stem] d={} N=len(self.docs) idf=math.log( (1+N) / (1+len(r)) ) for c,v in r.items(): d[c]=(1+math.log(v))*idf return d def getWeightsForQuery(self,query): PS=tm.tr.PorterStemmer() s=PS.getTextRepresentation(query) q={} N=len(self.docs) for m in self.mots: if(m in s): idf=math.log( (1+N) / (1+len(self.indexReverse[m])) ) q[m]=(1+math.log(s[m]))*idf else: q[m]=0 return q2) Modèles de RIclass IRModel: def __init__(self,T): self.I=tm.IndexerSimple() self.index,self.indexReverse=I.indexation(T) self.mots=[c for c,v in self.indexReverse.items()] #récupère tous les mots de la collection self.docs=[c for c,v in self.index.items()] #répère la liste de tous les id des documents de la collection def getScores(self,query): # retourne les scores des documents pour une requête raise NotImplementedError def getRanking(self,query): # retourne une liste de couples (document-score) ordonn´ee par score d´ecroissante scores=self.getScores(query) return sorted(scores.items(), key=lambda t: t[1],reverse=True)question 2.2) Définir une classe Vectoriel qui hérite de IRModel. Elle comporte en paramètres un Weighter défini dans l’´etape précédente ainsi qu’un booléen normalized permettant de définir la fonction de score (produit scalaire si faux et score cosinus si vrai). ( les normes des vecteurs des documents ne doivent pas être calculés à chaque nouvelle requête ).class Vectoriel(IRModel): def __init__(self,weighter, normalized): self.weighter=weighter self. normalized= normalized self.nd={} self.M={} for d in self.weighter.docs: self.M[d]=self.weighter.getWeightsForDoc(d) # pour chaque document de la collection , on récupère la liste des poids de chaque termes de la collection for c,v in self.M.items(): d2=[] for ca,va in v.items(): d2.append(va*va) self.nd[c]=math.sqrt(sum(d2)) #on calcule la norme de chaque document def getScores(self,query): Scores={} q=self.weighter.getWeightsForQuery(query) for c,v in self.M.items(): p=0 for w in self.weighter.mots: p=p+(v[w]*q[w]) Scores[c]=p if(self.normalized): q2=[] for c,v in q.items(): q2.append(v*v) nq=math.sqrt(sum(q2)) Scores2={} for c,v in Scores.items(): Scores2[c]=v/(self.nd[c]*nq) Scores=Scores2 return Scores #V=Vectoriel(w,False) #S=V.getScores("home saled top") #print(S) #V=Vectoriel(w,True) #S=V.getScores("home saled top") #print(S) #print(V.getRanking("home saled top"))question 2.3) Définir les classes ModeleLangue et Okapi permettant de calculer les scores pour respectivement le modèle de langue (lissage Jelinek-Mercer) et Okapi-BM25 Modèle de langue :Un modèle de langue est défini par son vocabulaire (mots simples, séquence de mots).Chaque mot (m)/séquence de mots(m1m2..mn) a une probabilité d’être généré(e).Le but est de calculer --> P(s|M): -s une observation (séquence de mots/texte) quelconque -Probabilité d’observer s dans le modèle (la langue) MDéfinir la taille des séquences générées par le modèle ? --> Séquence de 1 mot, 2 mots, 3 mots, … Estimer le modèle à probabilité de chaque séquence générée ? Calculer la probabilité d’une observation (un texte) quelconque? On choisit : Séquence d’un mot --> modèle unigram . soit s une observation (un texte) de n mots s=m1 m2…mn .Unigram – (M génère des séquences de 1 mot) . P(S|M)= P(m1m2....mn)= produit des P(mi|M) , pour i=1,....,n.UNigram : P(mi | M) = tf (mi) /nombre total de mots dans M.:Problème des fréquences nulles (zéro) : -Si un événement (un mot de la séquence) n’apparait pas dans le modèle, le modèle lui assigne une probabilité 0 -Solution : assigner des probabilités différentes de zéro aux événements (mots) absentsLissage par interpolation : - Les méthodes de « discounting » traitent les mots qui n’apparaissent pas dans le corpus de la même manière. Or, il y a des mots qui peuvent être plus fréquents que d’autres -Solution – Interpoler le modèle en utilisant d’autres sources d’évidence (par exemple la collection de documents) Interpolation (Jelinek-Mercer) : -RSV(Q,d)= Produit ( (1-lambda) * p(t|Mc) + lambda*p(t|Md)), pourtous les termes t de la requête. P(t|Mc)=p(t)=frequence du terme t dans la collection / somme des fréquence de tous les termes de la collection -(λ=0.8 pour les requêtes courtes et 0.2 pour les requêtes longues)class ModeleLangue(IRModel): def __init__(self,weighter,l): self.weighter=weighter L=[sum(v.values()) for c,v in self.weighter.indexReverse.items()] self.total=sum(L) self.l=l def ptD(self,t,d): s=sum(d.values()) if(t in d): return d[t]/s else: return 0 def ptC(self,t): if(t in self.weighter.indexReverse): return sum(self.weighter.indexReverse[t].values()) / self.total else: return 0 def getScores(self,query): PS=tm.tr.PorterStemmer() s=PS.getTextRepresentation(query) Scores={} for d in self.weighter.docs: p=1 for m,v in s.items(): p=p*( (1-self.l) * self.ptC(m) + self.l*self.ptD(m,self.weighter.index[d])) Scores[d]=p return Scores #M=ModeleLangue(w,0.8) #print(M.getScores("home saled top")) #print(M.getRanking("home saled top"))Okapi-BM25class Okapi(IRModel): def __init__(self,weighter,k,b): self.weighter=weighter self.k=k self.b=b def getScores(self,query): Scores={} PS=tm.tr.PorterStemmer() s=PS.getTextRepresentation(query) avgdl=0 L=[] for c,v in self.weighter.index.items(): t=sum(v.values()) L.append(t) avgdl=(sum(L)/len(self.weighter.index)) for d in self.weighter.docs: t=self.weighter.index[d] D=sum(t.values()) for c,v in s.items(): sc=0 #ATTENTION : SI LE MOT DE LA REQUETE NE FIGURE PAS DANS L'INDEXINVERSE if(c not in self.weighter.indexReverse ): n=0 else: n=self.weighter.indexReverse[c] idf=math.log( (1+len(self.weighter.docs)) / (1+len(n))) if(c in t): tf=t[c] else: tf=0 if( (tf + self.k * (1-self.b+self.b*(D/avgdl))) != 0 ): sc=sc + idf * ( ( tf*(self.k+1) ) / ( tf + self.k * (1-self.b+self.b*(D/avgdl)) ) ) Scores[d]=sc return Scores #M=Okapi(w,1.2,0.75) #print(M.getRanking("home saled top"))3 Bonus - Très fortement conseillés question 2.4)Q=["convention depth week profil","object issu assign", "inter feeder result " , "emphasi length demonstrat organ" , "group receipt connect relev", "run fundament assign librari", "vindic howel baxter data", "mechan timeli physical negoti", "mari historiograph desper demonstrat", "home saled top"]On sépare l’ensemble des requêtes en deux ensembles (train/test)train= Q[:int(len(Q)/2)] test= Q[int(len(Q)/2):] #print(train) #print(test)On définit une grille de valeurs à tester (de 0 à 1 par pas de 0.1). On expérimente chaque combinaison possible pour chaque modèle sur le jeu de données train Rappel : il y a 4 classe weighter différentes , chacune a sa façon de calculer le poid d'un terme pourla requête et le poid d'un terme pour le document Test sur l'ensemble "train" Test Modele LangueDans le modèle de langue il y a un seul paramètre qui varie (c'est le lambda). (Voir en haut à quoi correspond ce lambda)#w=Weighter1("cisi/cisi.txt")Pour chaque valeur de lambda , on cré un modele de langue parametré avec ce lambda. Ensuite pour ce modèle de langue , on parcourt chaque requête dans l'ensemble train , et on calcule pour chacune de ces reqête le score associé à chaque document de la collection "cisi.txt"""" G={} for i in np.arange(0,1.1,0.1): ML=ModeleLangue(w,i) d={} for c in train: scores=ML.getRanking(c) d[c]=scores G[i]=d """ #print(G[0.1])Observation : pour chaque valeur de lambda , on obtient pour chaque requête un ensemble de scores de documents différent Test Modele VectorielSur ce modèle il n'y a pas de paramètres.Mais il peut être intéressant de changer notre weighter , car chaque weighter a sa manière de calculer le vecteur de poid des documents et de la requête""" w1=Weighter1("cisi/cisi.txt") w2=Weighter2("cisi/cisi.txt") w3=Weighter3("cisi/cisi.txt") w4=Weighter4("cisi/cisi.txt") w5=Weighter5("cisi/cisi.txt") W=[w1,w2,w3,w4,w5] """ """ Gw={} i=1 for w in W: V=Vectoriel(w,False) # normalized = false , on a utilisé la similarité produit scalaire d={} for c in train: scores=V.getRanking(c) d[c]=scores Gw[str(i)]=d i+=1 """ #print(Gw['4']) #print(Gw['5'])Observation : pour des weighter différents , sur chaque modele Vectoriel construit , on remarque que l'ordre des documents classé selon leur score, obtenue sur chaque reqête , est à peu près le même Test Modele Okapi Pour chaque valeur de k et b , on cré un modele Okapi parametré avec ce k et ce b. Ensuite pour ce modèle Okapi , on parcourt chaque requête dans l'ensemble train , et on calcule pour chacune de ces reqête le score associé à chaque document de la collection "cisi.txt"""" G={} for k in np.arange(0,1.1,0.1): for b in np.arange(0,1.1,0.1): Mo=Okapi(w,k,b) d={} for c in train: scores=Mo.getRanking(c) d[c]=scores G[(k,b)]=d """ #print(G[(0.0,0.0)]) #print(G[(1.0,0.8)])On remarque que pour des valeurs de paramètres différents sur k et b , on obtient des scores de documents différents sur chacune des requêtes, mais l'ordre de pertinence des documents sur chacune des reqêtes n'a pas l'air de changer on pourra tester dans le TME 3 la combinaison qui obtient la meilleure valeur de métrique (MAP, Pr´ecision, ...)on applique ces valeurs sur le jeu de test Test sur l'ensemble "test" Test Model Langue""" G={} for i in np.arange(0,1.1,0.1): ML=ModeleLangue(w,i) d={} for c in test: scores=ML.getRanking(c) d[c]=scores G[i]=d """ #print(G[0.8]) #print(G[0.9])Observation : pour des valeurs de lambda différentes , on obtient à peu près le même ordre score de pertinence.C'est la valeur associé à chacun des scores qui change , mais cela n'a pas l'air de modifier l'ordre de pertience des documents sur chaque requête. Test Model Okapi""" G={} for k in np.arange(0,1.1,0.1): for b in np.arange(0,1.1,0.1): Mo=Okapi(w,k,b) d={} for c in test: scores=Mo.getRanking(c) d[c]=scores G[(k,b)]=d """ #print(G[(0.5,0.2)]) #print(G[(0.9,0.4)])CTDCdata = pd.read_csv('/content/drive/MyDrive/Project/output_csv_iFeature/CTDC_d.csv') data.tail() classModel(data) accuracy_scoresCTDTdata = pd.read_csv('/content/drive/MyDrive/Project/output_csv_iFeature/CTDT_d.csv') data.tail() classModel(data) accuracy_scoresCTriaddata = pd.read_csv('/content/drive/MyDrive/Project/output_csv_iFeature/CTriad_d.csv') data.tail() classModel(data) accuracy_scoresAACdata = pd.read_csv('/content/drive/MyDrive/Project/output_csv_iFeature/AAC_d.csv') data.tail() classModel(data) accuracy_scoresGAACdata = pd.read_csv('/content/drive/MyDrive/Project/output_csv_iFeature/GAAC_d.csv') data.tail() classModel(data) accuracy_scoresCombined Featuresdata = pd.read_csv('/content/drive/MyDrive/Project/output_csv_iFeature/Combined_d.csv') data.tail() classModel(data) accuracy_scores import matplotlib.pyplot as plt features= ['CTDC','CTDT','CTriad','AAC','GAAC', 'Combined'] accuracy_scores = np.array(accuracy_scores) accuracy_scores=accuracy_scores*100PSSMdata = pd.read_csv('/content/drive/MyDrive/Project/output_csv_iFeature/output_PSSM.csv') data.tail() data = data.drop(data.columns[[0]],axis = 1) data.tail() classes = pd.read_csv("/content/drive/MyDrive/Project/classes_d_len80_forpssm2.csv") classes.tail() data = pd.concat([data, classes], axis=1) data.tail() data.to_csv("/content/drive/MyDrive/Project/output_csv_iFeature/PSSM_d.csv",index=False) classModel(data)PCA Pssm - ncomponent 10data = pd.read_csv("/content/drive/MyDrive/Project/output_csv_iFeature/output_pca_pssm_10.csv") data.tail() classModel(data) data = pd.read_csv("/content/drive/MyDrive/Project/output_csv_iFeature/output_pca_pssm_3.csv") data.tail() classModel(data)/usr/local/lib/python3.7/dist-packages/xgboost/sklearn.py:1224: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1]. warnings.warn(label_encoder_deprecation_msg, UserWarning)AAC_PSSMdata = pd.read_csv('/content/drive/MyDrive/Project/Features_PSSM/AAC_PSSM.csv') data.tail() data=data.drop(columns=['d1'],axis=1) data.info() classModel(data)[14:12:12] WARNING: ../src/learner.cc:1115: Starting in XGBoost 1.3.0, the default evaluation metric used with the objective 'multi:softprob' was changed from 'merror' to 'mlogloss'. Explicitly set eval_metric if you'd like to restore the old behavior.GREY_PSSMdata = pd.read_csv('/content/drive/MyDrive/Project/Features_PSSM/GREY_PSSM.csv') data.tail() classModel(data)/usr/local/lib/python3.7/dist-packages/xgboost/sklearn.py:1224: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1]. warnings.warn(label_encoder_deprecation_msg, UserWarning)AB_PSSMdata = pd.read_csv('/content/drive/MyDrive/Project/Features_PSSM/AB_PSSM.csv') data.tail() classModel(data)/usr/local/lib/python3.7/dist-packages/xgboost/sklearn.py:1224: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1]. warnings.warn(label_encoder_deprecation_msg, UserWarning)DISCRETE_COSINE_PSSMdata = pd.read_csv('/content/drive/MyDrive/Project/Features_PSSM/DISCRETE_COSINE_PSSM.csv') data.tail() classModel(data)/usr/local/lib/python3.7/dist-packages/xgboost/sklearn.py:1224: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1]. warnings.warn(label_encoder_deprecation_msg, UserWarning)PSSM400data = pd.read_csv('/content/drive/MyDrive/Project/Features_PSSM/PSSM400.csv') data.tail() classModel(data)/usr/local/lib/python3.7/dist-packages/xgboost/sklearn.py:1224: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1]. warnings.warn(label_encoder_deprecation_msg, UserWarning)RPSSM_PSSMdata = pd.read_csv('/content/drive/MyDrive/Project/Features_PSSM/RPSSM_PSSM.csv') data.tail() classModel(data) def classModel2(data): X= data.iloc[:,:-1].values y= data.iloc[:,-1].values le = LabelEncoder() print(y) y= le.fit_transform(y) print(y) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.30, random_state = 5) model = XGBClassifier(objective='multi:softmax') print(model.objective) model.fit(X_train, y_train) y_pred = model.predict(X_test) print('\nClassification report:\n', classification_report(y_test,y_pred)) print('Confusion matrix: \n', confusion_matrix(y_test,y_pred)) accuracy = accuracy_score(y_test,y_pred) # accuracy_scores.append(accuracy) print("\n\nAccuracy:",accuracy_score(y_test, y_pred)) print("\n\nPrecision:",precision_score(y_test, y_pred, average=None)) print("\nRecall:",recall_score(y_test, y_pred, average=None)) print("\nF1 score:",f1_score(y_test, y_pred, average=None)) data = pd.read_csv('/content/drive/MyDrive/Project/Features_PSSM/RPSSM_PSSM.csv') data.tail() classModel2(data) !pwd %cd /content/drive/MyDrive/Project/Features_PSSM !pwd files= ['AADP_PSSM.csv', 'DPC_PSSM.csv', 'DFMCA_PSSM.csv', 'LPC_PSSM.csv', 'MBMGAC_PSSM.csv'] def automation(filenames): size= len(filenames) for i in range(size): print() print() print("Feature Name: ", filenames[i]) print() data = pd.read_csv(filenames[i]) classModel2(data) automation(files) output_files= ['SOMA_PSSM.csv', 'SINGLE_AVERAGE_PSSM.csv', 'SVD_PSSM.csv'] automation(output_files) files= ['AATP_TPCC.csv', 'K_SEPERATED_BIGRAME.csv', 'CS_PSE_PSSM.csv', 'DWT_PSSM.csv', 'EDP_MEDP.csv', 'FPSSM.csv', 'PSSMBLOCK.csv', 'SCSH2.csv', 'TRIGRAME_PSSM.csv' ] automation(files) df1= pd.read_csv('MBMGAC_PSSM.csv') df1.tail() df2= pd.read_csv( 'CS_PSE_PSSM.csv') df2.tail() df2=df2.drop(columns=['class'], axis=1) df2.tail() df3=pd.concat([df2,df1], axis=1) df3.tail() df3.to_csv('combined_CS_PSE_MBMGAC.csv', index=False) automation(['combined_CS_PSE_MBMGAC.csv'])Feature Name: combined_CS_PSE_MBMGAC.csv ['d' 'd' 'd' ... 'd' 'd' 'b'] [3 3 3 ... 3 3 1] multi:softmaxUsing combined of some features which gave >=70automation(['combined_pssm.csv']) !pip install -U xgboost import pickle data=pd.read_csv('combined_pssm.csv') data.tail()using gpu on combined featuresX= data.iloc[:,:-1].values y= data.iloc[:,-1].values le = LabelEncoder() print(y) y= le.fit_transform(y) print(y) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.30, random_state = 5) model = XGBClassifier(tree_method='gpu_hist',use_label_encoder=False) model.fit(X_train, y_train) y_pred = model.predict(X_test) print('\nClassification report:\n', classification_report(y_test,y_pred)) print('Confusion matrix: \n', confusion_matrix(y_test,y_pred)) accuracy = accuracy_score(y_test,y_pred) # accuracy_scores.append(accuracy) print("\n\nAccuracy:",accuracy_score(y_test, y_pred)) print("\n\nPrecision:",precision_score(y_test, y_pred, average=None)) print("\nRecall:",recall_score(y_test, y_pred, average=None)) print("\nF1 score:",f1_score(y_test, y_pred, average=None))['d' 'd' 'd' ... 'd' 'd' 'b'] [3 3 3 ... 3 3 1] [16:27:44] WARNING: ../src/learner.cc:1115: Starting in XGBoost 1.3.0, the default evaluation metric used with the objective 'multi:softprob' was changed from 'merror' to 'mlogloss'. Explicitly set eval_metric if you'd like to restore the old behavior. Classification report: precision recall f1-score support 0 0.90 0.89 0.89 772 1 0.84 0.89 0.86 825 2 0.92 0.91 0.91 1293 3 0.79 0.76 0.77 1021 accuracy 0.86 3911 macro avg 0.86 0.86 0.86 3911 weighted avg 0.86 0.86 0.86 3911 Confusion matrix: [[ 686 9 20 57] [ 13 732 7 73] [ 15 22 1175 81] [ 52 111 81 777]] Accuracy: 0.8616722065967783 Precision: [0.89556136 0.8375286 0.91582229 0.78643725] Recall: [0.88860104 0.88727273 0.90[...]some change in parameter of modelaccuracy_scores=[] %cd /content/drive/MyDrive/Project/Features_PSSM def classModel3(data): X= data.iloc[:,:-1].values y= data.iloc[:,-1].values le = LabelEncoder() print(y) y= le.fit_transform(y) print(y) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.30, random_state = 5) model = XGBClassifier(tree_method='gpu_hist',use_label_encoder=False) model.fit(X_train, y_train) y_pred = model.predict(X_test) print('\nClassification report:\n', classification_report(y_test,y_pred)) print('Confusion matrix: \n', confusion_matrix(y_test,y_pred)) accuracy = accuracy_score(y_test,y_pred) accuracy_scores.append(accuracy) print("\n\nAccuracy:",accuracy_score(y_test, y_pred)) print("\n\nPrecision:",precision_score(y_test, y_pred, average=None)) print("\nRecall:",recall_score(y_test, y_pred, average=None)) print("\nF1 score:",f1_score(y_test, y_pred, average=None)) def automation(filenames): size= len(filenames) for i in range(size): print() print() print("Feature Name: ", filenames[i]) print() data = pd.read_csv(filenames[i]) classModel3(data) files= ['AADP_PSSM.csv', 'DPC_PSSM.csv', 'DFMCA_PSSM.csv', 'LPC_PSSM.csv', 'MBMGAC_PSSM.csv', 'SOMA_PSSM.csv', 'SINGLE_AVERAGE_PSSM.csv', 'SVD_PSSM.csv', 'AATP_TPCC.csv', 'K_SEPERATED_BIGRAME.csv', 'CS_PSE_PSSM.csv', 'DWT_PSSM.csv', 'EDP_MEDP.csv', 'FPSSM.csv', 'PSSMBLOCK.csv', 'SCSH2.csv', 'TRIGRAME_PSSM.csv', 'combined_CS_PSE_MBMGAC.csv', 'combined_pssm.csv' ] automation(files) import matplotlib.pyplot as plt plt.bar(files, accuracy_scores, color ='orange',width = 0.2) plt.plot(files,accuracy_scores) plt.scatter(files,accuracy_scores)Matplotlib linestyle standard examplesIn this notebook I will generate a plot with all available linestyle options to draw a line plot with matplotlib.The final figure is this one:![matplotlib_linestyles.png](attachment:matplotlib_linestyles.png) Then I show the step by step used to obtain the above graph.--- ImportsLet's start by importing sugar, spice and everything nice:- *matplotlib* to draw the graph;- *numpy* to generate some data;import matplotlib.pyplot as plt import numpy as npChecking versions Python```pythonimport platformprint('Python version: ' + platform.python_version())```Python version: 3.7.3 IPython```pythonimport IPythonprint('IPython version: ' + IPython.__version__)```IPython version: 7.22.0 matplotlib```pythonimport matplotlib as mplprint('matplotlib version: ' + mpl.__version__)```matplotlib version: 3.3.4 NumPy```pythonimport numpy as npprint('NumPy version: ' + np.__version__)```NumPy version: 1.19.2 Acessing all linestyles availableBefore generating th graph, we need to obtain the linestyles from the matplotlib.One way to obtain them is through the ```lines``` class ([documentation](https://matplotlib.org/stable/api/lines_api.html)). First, we import this class:from matplotlib import linesAnd then assign the linestyles to a variable:lines_options = lines.lineStyles lines_optionsThe linestyles are stored in a dictionary, where the keys contains the representations of the lines, and the values contains a simple description of the respective linestyle.Now, I'm going to create two lists, one with the symbol of the linestyles (keys) and another with the name of the linestyles (values):line_styles = list(lines_options.keys()) line_description = list(lines_options.values())And I'm going to create another list with both description and symbol, which will be used in the plot legend. However, the underline at the beginning of the name will cause problems when generating subtitles, raising the following error:```No handles with labels found to put in legend.```To work around this issue, I will add a blank space before the underline.lines_description = [] for i in range(len(line_styles)): lines_description.append(' ' + line_description[i] + ' ( "' + line_styles[i] + '" ) ') lines_description[0]Generating the dataSince I only need lines to demonstrate the style of the lines, I will create a variable for the *x* axis with only two points, and another variable for the *y* axis also with two points, but with only numbers 1:x = np.arange(1,3) y = np.ones(2)Generating the plotTo plot th graph, I will create a figure: ```python plt.figure(figsize=(12,6))```Then I will add each linestyle with a ```plt.plot()``` in a for loop, which will iterate through all itens in lines_description:```python for i in range(len(lines_description)): plt.plot(x, y+i, linewidth=2, linestyle=line_styles[i], label=lines_description[i])```The ```x``` is the values for the *x* axis;The ```y + i ``` is the values for the *y* axis. In each iteration, the value of y is increased by the value of i, which prevents the lines from overlapping and leaves a homogeneous spacing between the lines;The ```linewidth = 2``` changes the line width to 2;The ```linestyle=line_styles[i]``` changes the line style based on the ```line_style``` *list*;The ```label=lines_description[i]``` changes the label for each line based on the ```lines_description``` *list*;After this, I will change the range of the *y* axis to improve readability:```python plt.ylim(0,i+1) ```The, I will remove the ticks of the *x* and *y* axis:```python plt.xticks([]) removendo os ticks do eixo xplt.yticks([]) removendo os ticks do eixo x```And I Will also add a title:```python plt.title("Matplotlib linestyle standard options")```Then I will add the legend outside the figure:```python plt.legend(bbox_to_anchor=(1,.8)) ```The ```bbox_to_anchor=(1,.8)``` changes the legend position;Then I save the plot:```python plt.savefig("matplotlib_linestyles.png", dpi=100, bbox_inches='tight')```The ```dpi``` sets the pixel density;The ```bbox_inches='tight'``` makes the legend not to be cut off in the saved version of the graph.Finally, I show the plot:```python plt.show()```plt.figure(figsize=(12,6)) for i in range(len(lines_description)): plt.plot(x, y + i, linewidth = 2, linestyle = line_styles[i], label=lines_description[i]) plt.ylim(0,i+1) plt.xticks([]) plt.yticks([]) plt.title("Matplotlib linestyle standard options") plt.legend(bbox_to_anchor=(1,.8)) plt.savefig("matplotlib_linestyles.png", dpi=100, bbox_inches='tight') plt.show()so we should have a grid spacing of 10' in the x direction and 12' in the y direction, divided by 8'1"def Rmat(theta): S, C = np.sin(theta), np.cos(theta) return np.array([ [C, S], [-S, C] ]) def RdRmat(theta): S, C = np.sin(theta), np.cos(theta) return np.array([ [C, S], [-S, C] ]), np.array([ [-S, C], [-C, -S] ]) xspc = 10/8.1 yspc = 12/8.1 xypts = pts[:, (gray[ceilmask] > 240)] xypts = np.dot(Rmat(0.0), xypts) xydist = ((xypts.T + [0.25 + xspc/2, 0.85 + yspc/2]) % [xspc, yspc]) - [xspc/2, yspc/2] plt.scatter(*xydist.T) plt.axis('equal') plt.xlim(-xspc/2, xspc) #plt.ylim(-yspc/2, yspc) def mkgrid(xspc, yspc, N, u, v, theta): mg = (np.mgrid[:N, :N].reshape(2, -1).T - [(N-1)/2, (N-1)/2]) * [xspc, yspc] - [u, v] mg = np.dot(Rmat(theta), mg.T) mg = np.vstack([mg, np.ones((1, mg.shape[1]))]) mg = np.dot(cv2.Rodrigues(np.array([0, (-22.)*np.pi/180., 0]))[0], mg) mg = mg[:, mg[2] > 0] mg /= mg[2] return cv2.fisheye.distortPoints(mg.T[None, :, :2].astype(np.float32), K, dist) plt.figure(figsize=(12, 10)) plt.imshow(gray*ceilmask > 240, cmap='gray') plt.plot(*mkgrid(xspc, yspc, 13, 0.25564975, -0.67761807, 0.00103883)[0].T, '+') plt.ylim(480, 0) def moddist(x, q): return (x+q/2)%q - q/2 plt.plot(np.arange(-30, 30), moddist(np.arange(-30, 30), 10)) # pixel cost: # Rxy R.xy # cost = 1/2 [moddist(Rx - u, xspc)**2, moddist(Ry - v, yspc)**2] # dcost/duv = [-(Rx - u), 0 ] # [ 0, -(Ry, v)] # dcost/dtheta = [(Rx - u)*d(Rx)/dtheta, (Ry - v)*d(Ry)/dtheta] def moddist(x, q): return (x+q/2)%q - q/2 def match(gray): return pts[:, gray[ceilmask] > 240] def cost(xy, u, v, theta): N = xy.shape[1] x = xy[0] y = xy[1] S = np.sin(theta) C = np.cos(theta) dRx = x*S - C*y dRy = x*C + S*y dx = moddist(x*C + y*S - u, xspc) dy = moddist(-x*S + y*C - v, yspc) S2 = np.sum(dRx) S3 = np.sum(dRy) JTJ = np.array([[N, 0, S2], [0, N, S3], [S2, S3, np.sum(x**2 + y**2)]]) JTr = np.array([-np.sum(dx), -np.sum(dy), -np.sum(dx*dRx + dy*dRy)]) return 0.5*np.sum(dx**2 + dy**2), -np.linalg.solve(JTJ + np.eye(3), JTr) xy = match(gray) print(cost(xy, -.248, .145, 0.0126)) %timeit cost(xy, -.248, .145, 0.0126) #e = 1e-6 #s, *_ = cost(gray, -2.25, 0.7, 0.03) #s1, *_ = cost(gray, -2.25+e, 0.7, 0.03) #s2, *_ = cost(gray, -2.25, 0.7+e, 0.03) #s3, *_ = cost(gray, -2.25, 0.7, 0.03+e) #print(np.sum(s)) #print(np.sum(Ju, axis=1) + np.sum(Jv, axis=1)) #print(np.sum(s1-s)/e, np.sum(s2-s)/e, np.sum(s3-s)/e) # 8.0 159701.53572861606 # 8.1 150074.75218936958 # 8.2 143326.9812395357 # 8.3 139387.52407613743 # 8.4 138146.47735365925 # 8.5 139404.41798315747 ceilheight = 8.0 xspc = 10/ceilheight #xspc = 2/ceilheight yspc = 12/ceilheight b = np.zeros(3) xy = match(gray) for i in range(10): c, dB = cost(xy, *b) print(b, c, dB) b += dB print(b) f = open("../../logs/cl20190707/cycloid-20190707-162426.rec", "rb") track = [] gyroz = [] b = np.zeros(3) totalc = 0 for frame in recordreader.RecordIterator(f): gray = frame['yuv420'][:480] gyroz.append(frame['carstate'][3][2]) xy = match(gray) for i in range(6): c, dB = cost(xy, *b) b += dB track.append(b.copy()) totalc += c f.close() print(totalc) track = np.array(track) plt.figure(figsize=(12,10)) plt.plot(*track[:, :2].T, '.') mg = (np.mgrid[:8, :4].reshape(2, -1).T - [5, 0]) * [xspc, yspc] plt.plot(mg[:, 0], mg[:, 1], '+') plt.plot(*track[0, :2].T, 'x') plt.axis('equal') plt.plot(-30*(track[1:, 2] - track[:-1, 2])) plt.plot(gyroz) floorheight = 0.2 / ceilheight # it stands 90mm off the ground, which is almost exactly 0.3 feet (4") floorlut = frontend.genlut()[0] fxy = floorlut[:2] / floorlut[2] #floormask = (floorlut[2] < 0) & (np.sum(fxy**2, axis=0) < 128**2) & (np.sum(origpts**2, axis=2) < 6**2) & (gray > 50) floormask = (floorlut[2] < 0) & (np.sum(fxy**2, axis=0) < 128**2) & (np.sum(origpts**2, axis=2) < 6**2) & (gray > 50) plt.figure(figsize=(12,10)) plt.imshow(bgr[:, :, ::-1]*floormask[:, :, None] + 170*(1-floormask[:, :, None])) fpts = floorheight * fxy[:, floormask] plt.axis('equal') plt.scatter(*np.dot(Rmat(0.), fpts), s=1, c=bgr[floormask, ::-1]/255.0) vidout = cv2.VideoWriter("mapping.mp4", cv2.VideoWriter_fourcc('X', '2', '6', '4'), 30, (640, 360), True) ceilheight = 8.25 xspc = 10/ceilheight yspc = 12/ceilheight #xspc = 2/ceilheight #yspc = 12/ceilheight floorheight = 0.23 / ceilheight # it stands 90mm off the ground, which is almost exactly 0.3 feet (4") floorlut, origpts = frontend.genlut() fxy = floorlut[:2] / floorlut[2] floormask = (floorlut[2] < 0) & (np.sum(fxy**2, axis=0) < 16**2) & (np.sum(origpts**2, axis=2) < 6**2) & (gray > 50) fpts = floorheight * fxy[:, floormask] f = open("../../logs/cl20190707/cycloid-20190707-183147.rec", "rb") #f = open("../../logs/cl20190707/cycloid-20190707-162426.rec", "rb") track = [] totalc = 0 mapsz = 360 floormapbgr = np.zeros((mapsz, mapsz, 3)) floormapN = np.ones((mapsz, mapsz)) Z = 40 b = np.float32([230/Z, 40/Z, 0]) cmapsz = 50 ceilmapY = np.zeros((cmapsz, cmapsz)) ceilmapN = np.ones((cmapsz, cmapsz)) track = [] for frame in recordreader.RecordIterator(f): gray = frame['yuv420'][:480] bgr = cv2.cvtColor(frame['yuv420'], cv2.COLOR_YUV2BGR_I420) gyroz.append(frame['carstate'][3][2]) # use wheel odometry for correction # b[2] -= frame['carstate'][3][2] / 30.0 xy = match(gray) for i in range(6): c, dB = cost(xy, *b) b += dB track.append(b.copy()) totalc += c p = Z*(np.dot(Rmat(b[2]), fpts).T + b[:2]) mask2 = (p[:, 0] >= 0) & (p[:, 1] >= 0) & (p[:, 0] < mapsz-1) & (p[:, 1] < mapsz-1) p = p[mask2] pi = p.astype(np.int) idxs = pi[:, 1] * mapsz + pi[:, 0] floormapN[:] += np.bincount(idxs, np.ones(len(idxs)), mapsz*mapsz).reshape((-1, mapsz)) for i in range(3): floormapbgr[:, :, i] += np.bincount(idxs, bgr[floormask, i][mask2], mapsz*mapsz).reshape((-1, mapsz)) #floormapbgr[pi[:, 1], pi[:, 0], :] = bgr[floormask][mask2] fview = (floormapbgr / floormapN[:, :, None]).astype(np.uint8) cv2.line(fview, (int(Z*b[0]), int(Z*b[1])), (int(Z*b[0] - 20*np.cos(b[2])), int(Z*b[1] + 20*np.sin(b[2]))), (0, 255, 0), 1) p = (-np.dot(Rmat(b[2]), pts).T + b[:2])/xspc p[:, 0] += 8.5 p[:, 1] += 20 ceilp = p mask2 = (p[:, 0] >= 0) & (p[:, 1] >= 0) & (p[:, 0] < cmapsz-1) & (p[:, 1] < cmapsz-1) p = p[mask2] pi = p.astype(np.int) idxs = pi[:, 1] * cmapsz + pi[:, 0] ceilmapN[:] += np.bincount(idxs, np.ones(len(idxs)), cmapsz*cmapsz).reshape((-1, cmapsz)) ceilmapY[:, :] += np.bincount(idxs, 255 * (gray[ceilmask][mask2] > 240), cmapsz*cmapsz).reshape((-1, cmapsz)) Knew = np.array([ [-250, 0, 320], [0, 250, 260], [0, 0, 1] ]) R=cv2.Rodrigues(np.array([0, (22.-90)*np.pi/180., 0]))[0] R = np.dot(np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]]), R) for gp in mkgrid(xspc, yspc, 101, *-b)[0]: cv2.circle(bgr, (int(gp[0]), int(gp[1])), 3, (255, 0, 0), 1) bgr[~(ceilmask | floormask)] //= 3 vframe = np.zeros((360, 640, 3), np.uint8) vframe[:, :480] = cv2.resize(bgr, (480, 360), cv2.INTER_LINEAR) # remaining area is 160x360 vframe[:, 480:] = fview[:160, :, :].transpose((1, 0, 2)) vidout.write(vframe) cv2.imshow("vframe", vframe) #cv2.imshow("ceilmap", cv2.resize( # (ceilmapY / ceilmapN).astype(np.uint8), (cmapsz*16, cmapsz*16), interpolation=cv2.INTER_NEAREST)) #cv2.imshow("front", cv2.fisheye.undistortImage(bgr, K=K, D=dist, Knew=np.dot(Knew, R), new_size=(640, 360))) #cv2.imshow("map", fview[::-1]) k = cv2.waitKey(1) if k == ord('q'): break f.close() del vidout print(totalc) cv2.destroyAllWindows() cv2.waitKey(1) plt.imshow(floormapbgr / floormapN[:, :, None] / 255.0) plt.scatter(p[:, 0], p[:, 1]) p = ceilp #p = (np.dot(Rmat(b[2]), pts).T - b[:2])/xspc p.shape plt.scatter(p[mask2, 0], p[mask2, 1], s=1, c=bgr[ceilmask, :][mask2]/255.0) #plt.axis('equal') for i in range(19, 22): plt.axvline(i) for i in range(-1, 2): plt.axhline(i) plt.figure(figsize=(12,10)) plt.imshow((ceilmapY / np.sqrt(ceilmapN))[10:42] > 10000) plt.plot(*(track[0]/xspc + [8.5, 20, 0])[:2], 'o') # okay, this is the final light map we'll use from now on lightmap = (((ceilmapY / np.sqrt(ceilmapN)) > 10000).astype(np.int)[10:42]) # now we need to generate a distance function to find the closest point on the grid # run Dijkstra's algorithm on this to get minimum (x, y) distance (manhattan metric) # then add the vector distance minus 1, so that: # if the pixel is white and expected to be white, the distance is zero, # but if it's white and not expected to be, it's the distance to the nearest white edge. # we're doing squared x and y distances separately, so we precompute the x, y vector at each point on the map # if the nearest is in the positive direction, use dist - fpart(u) # if it's negative, use dist + fpart(u)-1 # ... or something like that? u = np.linspace(0.1, 4.9, 100) uf = u - u.astype(np.int) dist = (5 - u).astype(np.int) + 1 plt.plot(u, dist) plt.plot(u, dist - uf) dist = (u).astype(np.int) plt.show() plt.plot(u, dist) plt.plot(u, dist + uf - 1) # dijkstra's algorithm on lightmap ldist = np.zeros((lightmap.shape[0], lightmap.shape[1], 2)) ldist[lightmap != 1] = 1000 while np.any(ldist == 1000): #for i in range(100): cost = np.sum(np.abs(ldist)**2, axis=2) # now propagate cost in four directions: left = cost[:, 1:] < cost[:, :-1] ldist[:, :-1][left] = ldist[:, 1:][left] + [1, 0] right = cost[:, :-1] < cost[:, 1:] ldist[:, 1:][right] = ldist[:, :-1][right] + [-1, 0] up = cost[1:] < cost[:-1] ldist[:-1, :][up] = ldist[1:, :][up] + [0, 1] down = cost[:-1] < cost[1:] ldist[1:, :][down] = ldist[:-1, :][down] + [0, -1] #plt.imshow(cost[:, :, 0] == cost[:, :, 0]) plt.imshow(np.clip(ldist[:, :, [0, 0, 1]] + 5*(cost == 0)[:, :, None], -10, 10) / 20 + 0.5) plt.imshow(cost == 0) with open("../../logs/cl20190707/cycloid-20190707-162426.rec", "rb") as f: for frame in recordreader.RecordIterator(f): bgr = cv2.cvtColor(frame['yuv420'], cv2.COLOR_YUV2BGR_I420) break def undistortMap(): CAM_TILT = np.array([0, 22. * np.pi / 180., 0]) global K, dist K = np.load("../../tools/camcal/camera_matrix.npy") dist = np.load("../../tools/camcal/dist_coeffs.npy") K[:2] /= 4.05 fx, fy = np.diag(K)[:2] cx, cy = K[:2, 2] uv = np.mgrid[:480, :640][[1, 0]].transpose(1, 2, 0).astype(np.float32) R = cv2.Rodrigues(CAM_TILT)[0] R = np.dot(cv2.Rodrigues(np.array([0.015, 0, 0]))[0], R) origpts = cv2.fisheye.undistortPoints(uv, K=K, D=dist) pts = np.stack([origpts[:, :, 0], origpts[:, :, 1], np.ones((480, 640))]) return np.dot(R, pts.transpose(1, 0, 2)), origpts pts, _ = undistortMap() horizonang = np.tan(5*np.pi/180) horizonmask = np.abs(pts[2]) <= horizonang #plt.imshow(horizonmask[:, :, None]*bgr[:, :, ::-1]) Knew = np.array([ [-250/3, 0, 320], [0, 250*5, 260], [0, 0, 1] ]) R=cv2.Rodrigues(np.array([0, (22.-90)*np.pi/180., 0]))[0] R = np.dot(np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]]), R) #plt.figure(figsize=(12, 6)) #plt.imshow(cv2.fisheye.undistortImage(horizonmask[:, :, None]*bgr, K=K, D=dist, Knew=np.dot(Knew, R), new_size=(640, 360))[:, :, ::-1]) with open("../../logs/cl20190707/cycloid-20190707-162426.rec", "rb") as f: for frame in recordreader.RecordIterator(f): bgr = cv2.cvtColor(frame['yuv420'], cv2.COLOR_YUV2BGR_I420) im = cv2.fisheye.undistortImage(horizonmask[:, :, None]*bgr, K=K, D=dist, Knew=np.dot(Knew, R), new_size=(640, 360)) #im = bgr * horizonmask[:, :, None] cv2.imshow("im", im) k = cv2.waitKey(1) if k == ord('q'): break cv2.destroyAllWindows() cv2.waitKey(1)Profiling BatchFlow code A profile is a set of statistics that describes how often and for how long various parts of the program executed.This notebooks shows how to profile various parts of BatchFlow: namely, pipelines and models.import sys sys.path.append("../../..") from batchflow import B, V, W from batchflow.opensets import MNIST from batchflow.models.torch import ResNet18 dataset = MNIST()To collect information about model training times (both on CPU and GPU), one must set `profile` option in the model configuration to `True`:model_config = { 'inputs/labels/classes': 10, 'loss': 'ce', 'profile': True, } pipeline = (dataset.train.p .init_variable('loss_history', []) .to_array(channels='first', dtype='float32') .multiply(multiplier=1/255., preserve_type=False) .init_model('dynamic', ResNet18, 'resnet', config=model_config) .train_model('resnet', B.images, B.labels, fetches='loss', save_to=V('loss_history', mode='a')) )To gather statistics about how long each action takes, we must set `profile` to `True` inside `run` call:BATCH_SIZE = 64 N_ITERS = 50 pipeline.run(BATCH_SIZE, n_iters=N_ITERS, bar=True, profile=True, bar_desc=W(V('loss_history')[-1].format('Loss is {:7.7}')))Loss is 0.1426592: 100%|██████████| 50/50 [01:22<00:00, 1.29s/it]Pipeline profiling First of all, there is an `elapsed_time` attribute inside every instance of `Pipeline`: it stores total time of running the pipeline (even if it was used multiple times):pipeline.elapsed_timeNote that `elapsed_time` attribute is created whether or not we set `profile` to `True`.After running with `profile=True`, pipeline has attribute `profile_info`: this `DataFrame` holds collected information:pipeline.profile_info.head()Note that there is a detailed information about exact methods that are called inside each of the actions. That is a lot of data which can give us precise understanding of parts of the code, that are our bottlenecks.Columns of the `profile_info`:- `action`, `iter`, `batch_id` and `start_time` are pretty self-explainable- `id` allows to identify exact method with great details: it is a concatenation of `method_name`, `file_name`, `line_number` and `callee`- `total_time` is a time taken by an action- `pipeline_time` is `total_time` plus time of processing the profiling table at each iteration- `tottime` is a time taken by a method inside action- `cumtime` is a time take by a method and all of the methods that are called inside this methodMore often than not, though, we don't need such granularity. Pipeline method `show_profile_info` makes some handy aggregations:**Note:** by default, results are sorted on `total_time` or `tottime`, depending on level of details.# timings for each action pipeline.show_profile_info(per_iter=False, detailed=False) # for each action show 2 of the slowest methods, based on maximum `ncalls` pipeline.show_profile_info(per_iter=False, detailed=True, sortby=('ncalls', 'max'), limit=2) # timings for each action for each iter pipeline.show_profile_info(per_iter=True, detailed=False,) # for each iter each action show 3 of the slowest methods, based on maximum `ncalls` pipeline.show_profile_info(per_iter=True, detailed=True, sortby='tottime', limit=3)Model profilingmodel = pipeline.m('resnet')There is an `info` property that, unsurprisingly, shows a lot of interesting details regarding model itself or the training process:model.info##### Config: {'benchmark': True, 'body': {'encoder': {'blocks': {'base': , 'bottleneck': False, 'downsample': [False, True, True, True], 'filters': [64, 128, 256, 512], 'layout': 'cnacn', 'n_reps': [2, 2, 2, 2], 'se': False}, 'downsample': {'layout': 'p', 'pool_size': 2, 'pool_strides': 2}, 'num_stages': 4, 'order': ['skip', 'block']}}, 'common': {'data_format': 'channels_first'}, 'decay': None, 'device': None, 'head': {'classes': 10, 'dropout_rate': 0.4, 'filters': 10, 'layout': 'Vdf', 'target_shape': (64,), 'units': 10}, 'initial_block': {'filters'[...]As with pipeline, there is a `profile_info` attribute, as well as `show_profile_info` method. Depending on type of the used device (`CPU` or `GPU`)# one row for every operation inside model; limit at 5 rows model.show_profile_info(per_iter=False, limit=5) # for each iteration show 3 of the slowest operations model.show_profile_info(per_iter=True, limit=3)Saildrone - March 25/26**Observations**- see notes beyond issues addressed here- Latitude = 37.781094 Longitude = -122.305265 Data format really wants something more refined than a pandas csv read but with a lot of delimiters specified, it works... its not speedy thoughimport os import glob #python >= 3.5 import datetime import pandas as pd import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt %matplotlib inline ### specify primary bulk figure parameters fontsize = 20 labelsize = 16 #plotstyle = 'seaborn' plt.style.use('seaborn-ticks') mpl.rcParams['svg.fonttype'] = 'none' mpl.rcParams['ps.fonttype'] = 42 #truetype/type2 fonts instead of type3 mpl.rcParams['pdf.fonttype'] = 42 #truetype/type2 fonts instead of type3 mpl.rcParams['axes.grid'] = False mpl.rcParams['axes.edgecolor'] = 'black' mpl.rcParams['axes.linewidth'] = 1.5 mpl.rcParams['axes.labelcolor'] = 'black' mpl.rcParams['grid.linestyle'] = '--' mpl.rcParams['grid.linestyle'] = '--' mpl.rcParams['xtick.major.size'] = 4 mpl.rcParams['xtick.minor.size'] = 1 mpl.rcParams['xtick.major.width'] = 2 mpl.rcParams['xtick.minor.width'] = 1 mpl.rcParams['ytick.major.size'] = 4 mpl.rcParams['ytick.minor.size'] = 1 mpl.rcParams['ytick.major.width'] = 2 mpl.rcParams['ytick.minor.width'] = 1 mpl.rcParams['ytick.direction'] = 'in' mpl.rcParams['xtick.direction'] = 'in' mpl.rcParams['ytick.color'] = 'black' mpl.rcParams['xtick.color'] = 'black' root_path = '/Users/bell/in_and_outbox/Ongoing_Analysis/ITAE_Projects/SPN1/2019_SailDrone_RadsIntercomparison/SaildroneRad_Data_20190326/' swdata = {} for filename in sorted(glob.iglob(root_path+'/*SW*.csv', recursive=True), reverse=True): print(filename) swdata.update({filename.split('/')[-1]: pd.read_csv(filename,header=None, sep='"|,|SW Rn', engine='python', usecols=[0,2],names=['time','sw'], parse_dates=True,index_col='time')}) swdata['SW1905_20190327000107.csv'].index=swdata['SW1905_20190327000107.csv'].index.tz_localize('Etc/GMT-7') lwdata = {} for filename in sorted(glob.iglob(root_path+'/*LW*.csv', recursive=True), reverse=True): lwdata.update({filename.split('/')[-1]: pd.read_csv(filename,header=None, sep='"|,|Rd|D|C|LW Rn', engine='python', usecols=[0,2,4,6,8],names=['time','lwnet','c1','c2','lw'], parse_dates=True,index_col='time')}) spn1 = {} for filename in sorted(glob.iglob(root_path+'/*A*.csv', recursive=True), reverse=True): print(filename) spn1.update({filename.split('/')[-1]: pd.read_csv(filename,delimiter=',F|,|\t', engine='python', header=None, names=['time','total','diffuse','sun','c0','c1','c2','c3', 'c4','c5','c6','c7','temp','something',''], error_bad_lines=False, parse_dates=True,index_col='time')}) from pysolar.solar import * import datetime ### example below from dateutil import tz solar_hypothetical = pd.DataFrame(columns=('Datetime', 'deg', 'rad','irr')) for i in range(60*24): date = datetime.datetime(2019, 3, 26, 12, tzinfo=datetime.timezone.utc) + datetime.timedelta(minutes=i - 1) altitude_deg = get_altitude(37.781094, -122.305265, date) rad = radiation.get_radiation_direct(date, altitude_deg) irr = rad*np.cos(np.deg2rad(90-altitude_deg)) solar_hypothetical.loc[i] = [date,altitude_deg,rad,irr] solar_hypothetical = solar_hypothetical.set_index('Datetime') for unit, value in spn1.items(): count=0 value['sza'] = np.nan try: value = value.tz_localize('UTC') except: value = value.tz_convert('UTC') spn1[unit] = value for unit, value in swdata.items(): count=0 value['sza'] = np.nan try: value = value.tz_localize('UTC') except: value = value.tz_convert('UTC') swdata[unit] = value solar_hypothetical['sza'] = np.nan try: solar_hypothetical = solar_hypothetical.tz_localize('UTC') except: solar_hypothetical = solar_hypothetical.tz_convert('UTC') fig = plt.figure(1,figsize=(9,2.125)) ax1 = plt.subplot2grid((1, 1), (0, 0), colspan=1, rowspan=1) plt.plot(solar_hypothetical.rad)Plot Timeseries of Each Instrument including temperaturesfig = plt.figure(1,figsize=(9,2.125)) ax1 = plt.subplot2grid((1, 1), (0, 0), colspan=1, rowspan=1) for unit, value in swdata.items(): plt.plot(value.sw,label=unit) plt.legend() plt.plot(solar_hypothetical.irr) ax1.set_xlim(['2019-03-26 13','2019-03-27 03']) fig = plt.figure(1,figsize=(9,2.125)) ax1 = plt.subplot2grid((1, 1), (0, 0), colspan=1, rowspan=1) for unit, value in lwdata.items(): plt.plot(value.lw,label=unit) plt.legend() fig = plt.figure(2,figsize=(9,2.125)) ax1 = plt.subplot2grid((1, 1), (0, 0), colspan=1, rowspan=1) for unit, value in lwdata.items(): plt.plot(value.c1,label=unit) plt.plot(value.c2,label=unit) plt.legend()Difference Plotsfig = plt.figure(1,figsize=(9,4.5)) ax1 = plt.subplot2grid((1, 1), (0, 0), colspan=1, rowspan=1) plt.plot(lwdata['LW1903_20190327000124.csv'].lw-lwdata['LW1902_20190327000209.csv'].lw,label='lwr1903-lwr1902') plt.plot(lwdata['LW1903_20190327000124.csv'].lw-lwdata['LW1901_20190327000142.csv'].lw,label='lwr1903-lwr1901') ax1.set_ylim([-15,7]) plt.legend() tmp = (lwdata['LW1903_20190327000124.csv'].lw-lwdata['LW1902_20190327000209.csv'].lw) print("blue \n median:{} w/m^2 \n std:{} w/m^2".format(tmp.median(),tmp.std())) tmp = (lwdata['LW1903_20190327000124.csv'].lw-lwdata['LW1901_20190327000142.csv'].lw) print("orange \n median:{} w/m^2 \n std:{} w/m^2".format(tmp.median(),tmp.std()))blue median:3.8000000000000114 w/m^2 std:1.176678171960742 w/m^2 orange median:0.8999999999999773 w/m^2 std:1.5844557421137118 w/m^2One-One Plotsfig = plt.figure(1,figsize=(9,4.5)) ax1 = plt.subplot2grid((1, 1), (0, 0), colspan=1, rowspan=1) plt.plot(lwdata['LW1903_20190327000124.csv']['2019-3-27 00:15':'2019-3-27 06:10']['lw'].resample('1s').mean(), lwdata['LW1901_20190327000142.csv']['2019-3-27 00:15':'2019-3-27 06:10']['lw'].resample('1s').mean(), '.r',markersize=.5, label='lwr38812:lwr38810') plt.plot(lwdata['LW1903_20190327000124.csv']['2019-3-27 00:15':'2019-3-27 06:10']['lw'].resample('1s').mean(), lwdata['LW1902_20190327000209.csv']['2019-3-27 00:15':'2019-3-27 06:10']['lw'].resample('1s').mean(), '.g',markersize=.5, label='lwr38812:lwr38810') plt.plot(range(250,400),range(250,400),'k--') plt.legend() fig = plt.figure(1,figsize=(9,4.5)) ax1 = plt.subplot2grid((1, 1), (0, 0), colspan=1, rowspan=1) t1 = '2019-03-26 17:30:00' t2 = '2019-03-26 23:00:00' t1tz = '2019-03-27 00:30:00' t2tz = '2019-03-27 6:00:00' plt.plot(swdata['SW1905_20190327000107.csv'][t1:t2]['sw'].resample('1s').mean(), spn1['SPN1_A1915_20190327000218.csv'][t1tz:t2tz]['total'].resample('1s').mean(), '.r',markersize=.5, label='A1915') plt.plot(swdata['SW1905_20190327000107.csv'][t1:t2]['sw'].resample('1s').mean(), spn1['SPN1_A1916_20190327000730.csv'][t1tz:t2tz]['total'].resample('1s').mean(), '.g',markersize=.5, label='A1916') plt.plot(swdata['SW1905_20190327000107.csv'][t1:t2]['sw'].resample('1s').mean()[0:-1], spn1['SPN1_A1913_20190327000225.csv'][t1tz:t2tz]['total'].resample('1s').mean(), '.g',markersize=.5, label='A1917') plt.plot(range(0,1200),range(0,1200),'k--') plt.legend() plt.xlabel('SW1905') plt.ylabel('SPN1')Calculate SZA as this is more comparable paramter than irradiance### alameda, ca lat = 37.781094 lon = -122.305265 for unit, value in swdata.items(): count=0 value['sza'] = np.nan for index, row in value.iterrows(): value['sza'][index] = get_altitude(lat, lon, index) if (count % 3600) == 0: print(row) count+=1 swdata[unit] = value fig = plt.figure(1,figsize=(9,2.125)) ax1 = plt.subplot2grid((1, 1), (0, 0), colspan=1, rowspan=1) for unit, value in swdata.items(): plt.plot(value.sza,value.sw,'.',markersize=.25,label=unit) ax1.set_xlim([0,60]) plt.legend() fig = plt.figure(1,figsize=(9,2.125)) ax1 = plt.subplot2grid((1, 1), (0, 0), colspan=1, rowspan=1) t1 = '2019-03-26 17:30:00' t2 = '2019-03-26 23:00:00' t1tz = '2019-03-27 00:30:00' t2tz = '2019-03-27 6:00:00' plt.plot(swdata['SW1905_20190327000107.csv'][t1:t2].resample('1s').mean().sza, spn1['SPN1_A1915_20190327000218.csv'][t1tz:t2tz].resample('1s').mean().total/spn1['SPN1_A1916_20190327000730.csv'][t1tz:t2tz].resample('1s').mean().total,label='') plt.plot(swdata['SW1905_20190327000107.csv'][t1:t2].resample('1s').mean().sza, spn1['SPN1_A1915_20190327000218.csv'][t1tz:t2tz].resample('1s').mean().total/spn1['SPN1_A1913_20190327000225.csv'][t1tz:t2tz].resample('1s').mean().total,label='swr38100/swr38425') ax1.set_xlim([0,55]) ax1.set_ylim([.9,1.1]) plt.legend()Load/Plot collocated SPN1's (2x)fig = plt.figure(1,figsize=(9,2.125)) ax1 = plt.subplot2grid((1, 1), (0, 0), colspan=1, rowspan=1) for unit, value in spn1.items(): plt.plot(value.total,label=unit) plt.legend() fig = plt.figure(1,figsize=(9,2.125)) ax1 = plt.subplot2grid((1, 1), (0, 0), colspan=1, rowspan=1) for unit, value in spn1.items(): plt.plot(value.diffuse,label=unit) plt.legend() fig = plt.figure(1,figsize=(18,4.5)) ax1 = plt.subplot2grid((1, 1), (0, 0), colspan=1, rowspan=1) for unit, value in spn1.items(): plt.plot(value.total,label=unit) for unit, value in spn1.items(): plt.plot(value.diffuse,label=unit) for unit, value in swdata.items(): plt.plot(value.index+datetime.timedelta(hours=7),value.sw,label=unit) plt.legend() #ax1.set_xlim(['2019-03-27 3:00:00','2019-03-27 4:00:00']) ### Choose 38100 as truth (matches a recently cal'd spn1 and is in the middle of data envelope) fig = plt.figure(1,figsize=(9,4.5)) ax1 = plt.subplot2grid((1, 1), (0, 0), colspan=1, rowspan=1) plt.plot(swdata['SW1905_20190327000107.csv'][t1:t2].resample('1s').mean().sza, (swdata['SW1905_20190327000107.csv'][t1:t2].resample('1s').mean().sw/ spn1['SPN1_A1915_20190327000218.csv'][t1tz:t2tz].resample('1s').mean().interpolate().total.values), 'b.',markersize=.25,label='38100/38425') plt.plot(swdata['SW1905_20190327000107.csv'][t1:t2].resample('1s').mean().sza, (swdata['SW1905_20190327000107.csv'][t1:t2].resample('1s').mean().sw/ spn1['SPN1_A1916_20190327000730.csv'][t1tz:t2tz].resample('1s').mean().interpolate().total.values), 'g.',markersize=.25,label='38100/38425') plt.plot(swdata['SW1905_20190327000107.csv'][t1:t2].resample('1s').mean().sza[0:-1], (swdata['SW1905_20190327000107.csv'][t1:t2].resample('1s').mean().sw[0:-1]/ spn1['SPN1_A1913_20190327000225.csv'][t1tz:t2tz].resample('1s').mean().interpolate().total.values), 'r.',markersize=.25,label='38100/38425') ax1.set_ylim([0.9,1.075]) ax1.set_xlim([0,55]) plt.legend()Now make life tough by comparing to on drone rads#load saildrone 1min data def dateparse (time_in_secs): return datetime.datetime.fromtimestamp(float(time_in_secs)/1e3) sd1030 = pd.read_csv(root_path + '/sd/sd1030_1min.csv',parse_dates=['timestamp'],date_parser=dateparse, index_col='timestamp') sd1034 = pd.read_csv(root_path + '/sd/sd1034_1min.csv',parse_dates=['timestamp'],date_parser=dateparse, index_col='timestamp') fig = plt.figure(1,figsize=(11,4.25)) ax1 = plt.subplot2grid((1, 1), (0, 0), colspan=1, rowspan=1) for unit, value in swdata.items(): plt.plot(value.index+datetime.timedelta(hours=-7),value.sw,'lightblue',label=unit) plt.plot(sd1030.index,sd1030['report_payload:payload:sw_shaded_radiometer_total_filtered'],'k-') plt.plot(sd1030.index,sd1030['report_payload:payload:sw_unshaded_radiometer_center_detector_filtered'],'k--') plt.plot(sd1030.index,sd1030['report_payload:payload:sw_unshaded_radiometer_average_detector_filtered'],'grey',linestyle='--') plt.plot(sd1034.index,sd1034['report_payload:payload:sw_shaded_radiometer_total_filtered'],'r-') plt.plot(sd1034.index,sd1034['report_payload:payload:sw_unshaded_radiometer_center_detector_filtered'],'r--') plt.plot(sd1034.index,sd1034['report_payload:payload:sw_unshaded_radiometer_average_detector_filtered'],'orange',linestyle='--') plt.plot(sd1034.index,sd1034['report_payload:payload:ir_thermo_temperature_filtered']) ax1.set_ylim([200,1200]) ax1.set_xlim(['2019-03-26 13:30:00','2019-03-26 16:30:00']) fig = plt.figure(1,figsize=(11,4.25)) ax1 = plt.subplot2grid((1, 1), (0, 0), colspan=1, rowspan=1) for unit, value in swdata.items(): plt.plot(value.index+datetime.timedelta(hours=-7),value.sw,'lightblue',label=unit) plt.plot(sd1030.index,sd1030['report_payload:payload:sw_shaded_radiometer_total_filtered'],'k-') plt.plot(sd1030.index,sd1030['report_payload:payload:sw_shaded_radiometer_diffuse_filtered'],'k--') plt.plot(sd1030.index,sd1030['report_payload:payload:sw_unshaded_radiometer_average_detector_filtered'],'grey',linestyle='--') plt.plot(sd1034.index,sd1034['report_payload:payload:sw_shaded_radiometer_total_filtered'],'r-') plt.plot(sd1034.index,sd1034['report_payload:payload:sw_shaded_radiometer_diffuse_filtered'],'r--') plt.plot(sd1034.index,sd1034['report_payload:payload:sw_unshaded_radiometer_average_detector_filtered'],'orange',linestyle='--') plt.plot(sd1034.index,sd1034['report_payload:payload:ir_thermo_temperature_filtered']) ax1.set_xlim(['2019-03-26 09:30:00','2019-03-26 16:30:00'])Scikit-Learn-style APIThis example demontrates compatability with scikit-learn's basic `fit` API.For demonstration, we'll use the perennial NYC taxi cab dataset.import os import s3fs import pandas as pd import dask.array as da import dask.dataframe as dd from distributed import Client from dask import persist from dask_glm.estimators import LogisticRegression if not os.path.exists('trip.csv'): s3 = s3fs.S3FileSystem(anon=True) s3.get("dask-data/nyc-taxi/2015/yellow_tripdata_2015-01.csv", "trip.csv") client = Client() ddf = dd.read_csv("trip.csv")We can use the `dask.dataframe` API to explore the dataset, and notice that some of the values look suspicious:ddf[['trip_distance', 'fare_amount']].describe().compute()Scikit-learn doesn't currently support filtering observations inside a pipeline ([yet](https://github.com/scikit-learn/scikit-learn/issues/3855)), so we'll do this before anything else.# these filter out less than 1% of the observations ddf = ddf[(ddf.trip_distance < 20) & (ddf.fare_amount < 150)]Now, we'll split our DataFrame into a train and test set, and select our feature matrix and target column (whether the passenger tipped). To ensure this example runs quickly for the documentation, we'll make the training smaller than usual.df_train, df_test = ddf.random_split([0.05, 0.95], random_state=2) columns = ['VendorID', 'passenger_count', 'trip_distance', 'payment_type', 'fare_amount'] X_train, y_train = df_train[columns], df_train['tip_amount'] > 0 X_test, y_test = df_test[columns], df_test['tip_amount'] > 0 X_train = X_train.repartition(npartitions=2) y_train = y_train.repartition(npartitions=2) X_train, y_train, X_test, y_test = persist( X_train, y_train, X_test, y_test )With our training data in hand, we fit our logistic regression.Nothing here should be surprising to those familiar with `scikit-learn`.%%time # this is a *dask-glm* LogisticRegresion, not scikit-learn lm = LogisticRegression(fit_intercept=False) lm.fit(X_train.values, y_train.values)CPU times: user 4.99 s, sys: 1.48 s, total: 6.47 s Wall time: 57.7 sAgain, following the lead of scikit-learn we can measure the performance of the estimator on the training dataset:lm.score(X_train.values, y_train.values).compute()and on the test dataset:lm.score(X_test.values, y_test.values).compute()**Question:** ![q3.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAxAAAAEcCAYAAABXpFKvAAAABHNCSVQICAgIfAhkiAAAABl0RVh0U29mdHdhcmUAZ25vbWUtc2NyZWVuc2hvdO8Dvz4AACAASURBVHic7N13XBTH/z/wF3cnIEVEVJooNhTFghGIJTbUWGP5aJR8jN3ET0wxicYYuynqB3tPMBgLloAGxYKKIjYUVBQMWEAEQu8cd9wddze/P/ixX07ujgUONfm8n4+Hj4fM7c7MzszN7ezO7BoxxhgIIYQQQgghhAfB684AIYQQQggh5O+DBhCEEEIIIYQQ3mgAQQghhBBCCOGNBhCEEEIIIYQQ3mgAQQghhBBCCOFN9LozQP755HI5CgoK0KxZM5iYmOjdViKR4P79+zAyMoK7uztEIhFkMhmsrKxeUW4JIYQQQog+dAfiNYiMjMTevXvxww8/YOfOnSgqKqpTPAUFBfD19cVbb72FVq1aYeHChXjw4AH3eUhICNq0aYOBAwfit99+M1DuayclJQULFiyAh4cH4uLi9G5769YtfPvtt5BKpcjLy8OGDRuwfPlyHDlyBLm5uVi1ahU6duyINm3aYNWqVRrlVlhYiHnz5qFVq1aYN28eACAwMBBTpkxBixYtsHXrVq1pyuVyzJ07Fy1btsSnn36KmJgYwx08IYQQQsg/kHD16tWrX3cm/pdcunQJSUlJmDNnDvr374/jx4/jwIEDGDNmDIyNjWsVV+PGjdGvXz+kpqYiNzcXfn5+aNu2Lfd5SUkJWrVqha1bt6J3796GPhRemjZtCnd3d4SGhmL06NFwcHDQul1hYSFWrVqFJUuWwMvLC507d0afPn1w9OhRtGzZEt7e3hg8eDDkcjkePnyIzZs3w87Ojtu/cePG6Ny5MywsLLBu3ToIBAJ07doVHTp0QHFxMW7evImJEyfC1NRUI92kpCQ8ePAAYrEY/v7+aNeuXYOWByGEEELI3x3dgXjFrl27hoMHD0IqlUIoFMLLywtRUVFITU2tc5wikQgCgQBGRkZc2L1795CUlIQFCxZUO2l+1YRCIUQi/bPlUlJSkJGRgSZNmnBhpqammDVrFiwtLbmwkSNHQi6XIzw8vFocf/75JwYNGgShUMiFCQQCDBgwACUlJYiOjq62T3JyMpycnKqVHyGEEEII0Y4GEK/Yl19+id9++w0WFhYAgLS0NNja2qJ58+YGS+P27dtITEzE5MmTdZ64KxQKZGRkQK1WVwtXKBRQq9VQKpXVwgFAJpNBpVJpjbe4uBj5+fm1zrOlpSUyMzNx9uxZjTz16NEDXl5e3N+dO3eGl5cXTpw4AblczoXLZDIkJyejW7du1eJu1aoVPD09ERQUpBFeUFAAoOIuCSGEEEII4YcGEK9Ys2bN4OzsDKBi+szdu3fx3//+Fy1btjRI/BcuXMD69esxbNgwjSvxlRQKBXbv3o2NGzfi/v37WLBgAc6fP4/S0lKsXbsWzs7O2LZtG3bu3IkhQ4ZAJpPB19cXzs7O2LRpE44cOYIzZ85g1qxZCA4O5uLNysrC4sWLERISgpCQEMyfP79Wd1Xatm2LCRMmYMGCBRg9ejS2bduGJ0+ewMbGBm5ubtx2IpEIkydPxoMHDzTWVCQkJMDR0ZEbmFVlZGSEyZMn4/bt23jx4gUXnpyczNUFIYQQQgjhhwYQr4FcLsexY8ewbt06eHh4GGx9wo0bN/Do0SPExcVh7969WrcJDg7GxYsXsXDhQowZMwbz58/H1q1bYWFhgWXLlsHT0xP37t3DjBkz8OWXX8LU1BSLFy/GoEGD8OjRI4wePRqTJk3C0KFDsXfvXshkMiiVSqxYsQKOjo6YNm0aZs6cCVdXV2zZsoV33gUCAdauXQtfX1/k5ORg0aJFcHNzw9y5c1FYWKixbf/+/WFtbY2zZ89yYQ8fPkTPnj11xt+7d29YWFjg8uXLAACVSoX09HSNNSOEEEIIIaRmNIB4DUxMTDB16lTs27cPCoUCEydORHZ2dr3iTE9Ph0KhwNdff40lS5Zgz549iIyM1NhGpVLhyJEj6NKlCwBAKpWiRYsW3HQkoVAIgUAAR0dHWFlZYcKECRp5dnJy4h6namNjA7lcDplMhufPn+PmzZvo1q0bpFIppFIpOnXqhMePH9fqGExMTPDZZ5/h3r17SElJwcaNG3Hq1Cns3LlTYzt7e3uMHDkSISEhKCgoQG5uLkpKStChQwedcVtbW2PMmDE4ceIEZDIZ0tLSYGNjU+NjZQkhhBBCiCYaQLxilesLKvXv3x8xMTE4efJkveJ1dHTEkCFDAADTpk1Dnz59sGLFCo1HnYrFYuTk5KCwsBAXL17ExYsXERUVhS+//FIjLldXV61pVF3gDACMMTDGkJOTA6lUisePH3PxymQyLF26lHf+09PTkZSUxP3t4OCAL774AqtWrcLVq1chlUo1th83bhwyMzMRGRmJuLg4dOnSReuUrapGjRqF1NRUPHz4ECkpKXT3gRBCCCGkDuhFcq9QdnY2hg8fjsmTJ2P58uUAADs7O5iZmdX7DkRVZmZmWL16NcaPH489e/ZwJ/JNmjSBg4MDmjRpgvHjx+vcXyCo3bjS3t4e5ubmcHV15QYxtfXixQukpKSgffv2GuG9evXCxYsXwRjTCO/ZsydcXV0RGBgId3d3fPDBBzWm0blzZ3Tq1AnHjx+Ht7e3zkfKEkIIIYQQ3egOxCtkYmICNzc3jBgxggurXGjcv39/ABVPUDp06JDGE5BqolQqoVarNU6yu3TpgkWLFmHXrl24ceMGgIqBwcyZMxEbG6uxriAkJIT7P2Os2pOZAECtVmuEV03L2dkZw4cPx7Vr17gwqVSq8fK6l/fX5sCBA9UGUomJiXB3d4e5ublGuLm5OSZOnIhTp06hvLwcLVq00BqnWCzmjtXY2BiTJ09GcHAwbGxsNPL28gCFEEIIIYRoRy+Se4VMTU1ha2uLP/74A2q1GllZWdi4cSN8fHzg4+MDgUCAw4cP4/Tp0xg3blyN728oKCjArl27EBgYiNTUVBQUFMDe3h52dnYQi8U4fvw4IiMjcfnyZZSVlaF///7cFf7AwEDI5XJERESgWbNmsLe3x65du3Dq1CmkpaWhrKwMXbp0gVqthp+fHwIDA5GcnAxLS0sUFxdj586diImJQVlZGYYNGwZPT0/cvHkTMTExyM3NxbVr1zBo0CBIJBJs2rQJV65cQX5+PlxcXGBra1vtWNLS0pCYmIinT58iKSkJSqUSYWFhuHfvHr7++mutT1eysrJCeHg4PvnkEzg6Olb7PCAgAOvXr8f58+dRXl4Od3d32NnZobCwEJMnT0Z5eTn27duHY8eOISkpCQUFBbCzs9N4QR0hhBBCCNFkxOjS6ytXWlqKu3fvQiwWo1evXlpPfhuaSqVCdnY2mjdvXus3YOsjkUhQWlqqdZCgT3FxMVQqFZo1a4aUlBTExsbCzs4O7u7uel9C9/jxY7Rr186gx0AIIYQQQnSjAQQhhBBCCCGEN1oDQQghhBBCCOGNBhCEEEIIIYQQ3mgAQQghhBBCCOGNBhCEEEIIIYQQ3mgAQQghhBBCCOGNBhCEEEIIIYQQ3mgAQQghhBBCCOGNBhCEEEIIIYQQ3mgAQQghhBBCCOGNBhCEEEIIIYQQ3mgAQV4ruVyOzMxMyOXy150VUgOxWIysrCyo1WoAb07dqVQq5Obmoqio6LXmoyZisRjZ2dl13v9NKW9iOBKJBDt37kR6evrrzgoAQK1W4/Dhw7h79y6v7YuLi+vVpg2NT1/wd+kv6qOyrza0tLQ0hIeHo7i4mHce3rTfC2I4NIAgWkVGRuKrr76Ck5MTPDw8sH79emRmZho0jRcvXuCTTz6Bh4cH4uPjdW7n7+8PPz8/g6ZN9CsqKsKKFSsQHR0NAAgLC8OoUaMwdepUyGQypKSkYMGCBfDw8EBcXNxry6dcLsf27dvh7u6OX375pc7xxMbG4rPPPoOdnR369euHVatWYeXKlVi4cCG+/fZbJCcn1yuf586dw7BhwzBz5kwolUoAQExMDJYtW4aCgoIa9+db3sePH8fkyZPRtGlTTJs2DatXr8aqVavw+eefY/bs2Xj48GG9jgOo3jZq48GDB/jss8/QqlUr9O3bFytXrsTq1avxzTffYPz48WjVqhUuXLhQ7zzWVX2OrbbUajX8/Pzg5uYGR0dHFBYWYsOGDXB3d4eTkxMWLVqElStXYunSpfj4449x5coV7mQMqF1d8y13gUCAcePGISQkBC9evNCb/+DgYHh7e2Pu3Lka+Xpd+PQFMpkMW7duhbu7O/z9/RssL6/zN+vChQsYOXIkPvjgA8hkMoPFe/LkSfj6+uLRo0f46KOP9G77pv5eEANjhOhQWFjIPDw82NKlSw0S38mTJ1liYqJGWEpKCvPw8GD379/Xuo9KpWLLli1jS5YsYUql0iD5MJSysjK2d+/eNy5fhpCRkcEmTJjAQkJCuLCgoCDm7e3NJBIJY4yx1NRU5unpyaKjo19XNjnTp09nGzZsqFccRUVF7O2332YrV67kwlQqFduxYwdzc3Njjx49qlf8AQEBbNSoUay8vJwxxlhoaCgbN24cS01N5bU/3/K+desWa9++Pbtz545G+K5du1inTp3Yn3/+WbcD+P+0tY3aqOxXli1bphGuUqnY9u3b2aFDh+qVP7609Uf1PbbaiIyMZEuXLmUqlUojfOXKlaxPnz6ssLCQC7t//z5zdXVlfn5+GtvWpq5rU+4RERFs5cqVNfZtv/76KxszZky1Y3id+PQFH3zwAdu0aVODpP8qf7NiY2PZuXPnqoX//vvvbNiwYaysrMwg6YjFYjZ69GgWHh7OUlJS2I0bN2rc503+vSCGQXcgiE5GRkYQCAQQCoX1jkutViMiIgIKhUIjXCgUQiQS6dxPIBDghx9+wPr16w2SD0PKzMw0yBXdN5G9vT1OnjyJMWPGcGEikQgCwf91GTXV3avUqFGjeschEAggEAhgZGSkETZixAgUFRXh2LFj9Yq/UaNGGuX37rvvIjg4GE5OTrz251veLx9D1fQkEgnCw8P5Z1oLbW2jNir7lZfzKBAI8N5772mUUUPR1R/V99hqo/JK8cvHKxQKYWRkpFE+3bt3R+fOnbFv3z4UFhZy4bWp69qUu6enJ3Jzc2u8C2FsbMzrWF8lPn1BQ+b7Vf5mxcbGIiMjo1q4SCTS2i7qSiwWo6SkBNbW1mjdujX69etX4z5v8u8FMQwaQJBaUygU3A+vTCaDSqWqcZ87d+7gxo0berdRq9WQSqW8wktLS5GcnKyRF11kMhnUajVUKpXO+ZdyuRwpKSlaP6+afmVaarUagYGBDT53OT8/X+t8U21lIpfLuekxhqBUKg0yX1WtViMjI0PjxKeqmuqn6rHK5XJe7c2QKk/otJ10FBYWIiMjQ+8UDrlcrvNzbd8fPnHWRXZ2NpRKJRwcHHinp1QqkZKSgqKiIo18amsb2dnZyMjIgEqlqnUdxcTEoKioCM2bN4eTkxOv9s23H5JKpUhLS9PYV19/pKvd6/ou1qU/zM7ORlJSEjp37lzjtsD/nfy/PAjVF7+2un7Zy+VeydTUFM7OzgabypWVlVWtPrX1qy+H6+uDFAqFQb4nuuqspn5LH23tl287qdrHpaWlISkpSet2BQUFCAgIqLG9GaIMGWMAoHVQout7wVdlOdf0O07ePDSAILzJZDL4+vrC2dkZmzZtwpEjR3DmzBnMmjULwcHBOveLi4vDb7/9hvT0dOzduxdr1qzBo0ePqm1z9OhRHD58GNOmTePmnF+7dg3vvPMORo0aBalUCrlcjh07diA0NBRZWVnYsmULli1bpjXdJ0+eYOrUqWjfvj3WrVuH/fv345dffsGcOXM00r969Sp+++03JCYm4ptvvtGYGxscHIy33noL06dPx4EDBzB9+nSsWbMGgYGBCA8Px+PHj7F27Vps3bqVVyeqVqvh7+8PLy8vuLu74+zZswAq5jN37doVn376KQoLCxEXF4clS5bg5s2buHDhApYvX47U1FQAFVcu+/fvjxEjRqC0tBTp6emYO3cunJyccOvWLa3pRkVFYezYsXBxccHWrVsRFBSEQYMGwcvLC5cvX0ZiYiLeffdd9OvXDxEREdi+fTvatGmDbdu21XhM+qSmpmLnzp2IjY3FgQMH8PXXX3PlxKd+goOD4eHhgSFDhmDnzp04duwY1qxZg2XLlukcWPEpX77kcjkOHDiAt99+G/PmzdMI379/Py5duoTbt29rXV8QFhaGxYsXIzAwEAcPHkRUVBT3Q5yRkYG5c+eiffv2iImJ4R1nXT1+/Bg7duzAV199hVGjRvFKLzQ0FHv27EFWVhZCQ0Px73//G0qlslrbePHiBX766Sc8fvwY8fHx+Pjjj3Hu3DneeVOr1QgLC4NUKoW5uTmkUqne9s23HxKLxVizZg127dqF2NhY7NixA/fu3dPZH2k7NgA6v4t17Q+BirZvbGwMGxsbXmV08eJF5OXl4YcffoCVlZXebbXVNZ9yf+eddzQ+79q1K65du8Yrf5XpvvXWW+jXrx83///69etYuHAh7t+/D19fX6xbtw5yuVxnvxoaGoq+ffti/PjxCA4ORnBwML7//nssX76cOwlWKBTYvXs3Nm7ciPv372PBggU4f/4873xWqjwBDwkJqVZnuvqtqv1Lr169uP7l2LFjcHV1xcyZM3HmzBmN3yy+7SQrKwtff/019u/fj7Vr1+I///kPYmJicOjQoWp5Lyoqws8//4ynT5/i/PnzWL16NYKCgjS2UalUCAkJwR9//FGvMnzx4gX27NmDlJQU7NmzB7t27UJpaane3yg+VCoVjh07hh9//BHx8fH4+eefsWPHDkilUuzbtw/u7u7o27cvQkND8dNPP8HV1RUffvgh0tLSEBQUBDc3N8yaNQs5OTm80yQG9rrnUJE3V1FREfPy8mLLly/XCPfx8WEffPABKyoqYowxduDAAfbuu+/qnW/57Nkz1qtXLxYfH68R/tdffzE3Nze2bds2xljF/NGJEyeyLVu2cNtUnUsZExPD5s6dy80tzcjIYL6+vjrTffLkCXNxcdGYJ3rixAk2YMAAlpWVxcrKyti4cePYkSNHGGOMJSYmsj59+mjMHfbz82MuLi4sNjaWRUVFsYiICMYYY/v372cjR46s0zzXo0ePsgEDBnBlmJeXx3bv3s3Ky8tZZmYmGzlyJHvw4AG3/cWLF9mUKVNYaWkpY6xijuugQYOYWCxmjDGWnp7O3nrrLS5v2kRHRzMPDw9u3ndAQACbNGkSk8vljDHGTp06xSIjI7ntP/zwQ425xMHBwWzYsGHcnNb09HTWt29fvXNat27dymbPns3kcjlTKpVsypQpbO/evdznNdUPYxXzrHv27Mn9rVQq2UcffcQWLVrEzb2eM2eORl71la8uJSUlrG/fvmzSpEns0KFDbNOmTWzw4MHMz8+v2hzv+/fvs8GDB7Nnz54xxiq+AxMnTmQymYwxxtjt27eZt7c3S09P5/b58ccf2YgRI7g8vDwnuKY4+ZR3ZdqtW7dm69atY0ePHmW//vorW7RoEfvuu+809tWXnkQiYTNmzGBPnjzhtq86Z7xq29iyZQvz9/fnPgsNDdU6L7tSZb8yePBgtnLlSvavf/2LdevWTaOs+LTvmvqhlStXsoULF3J1t3btWjZ79mzGmO7+6OVj4/NdrEt/eOLECbZ48WKtn61Zs4a5uLgwPz8/5ufnx3x8fNi8efO4+KviW9d8y72qyMhINnnyZJ3HwBhjhw4d4tZAxMfHM19fX27txosXL9iAAQO4sisrK2MTJkxg58+fZ4zp7lePHTvGOnXqxGJjYxljFX1yr169uHiOHz/Oxo0bx/VDDx48YMOHD+fy9HJfoM3MmTPZhAkTdNZZTf1WQEAAGzhwILd/fn4+27ZtG9eXvjz/n7Ga28l3333HtYmcnBw2ePBgvf25SqVi7733Hvv555+rfXby5EnWoUMHFhMTU6cyfNnLfQ+f70VNvxfnzp1jkyZN0iijb775hjsX8PX1ZVOnTmVKpZKpVCo2bdo0bg2QSqVimzZt0vqdIK8O3YEgtWZiYgInJyfuSpiNjQ3kcnmdn/hgbGyMbt26AaiYP2pra4v8/Hzu86pzKVu0aIHY2FjMnDkTgYGBEAgEmDVrls64GzVqBAsLC7Ru3ZoL69evH8RiMcLDw2FqaorPPvsMb731Fpe+Wq3WeASeqakpzMzM4ODgAA8PDwwYMKBOx1nV4MGDoVAoEBkZCQB4+vQpPD09IRKJEBYWBplMhg4dOnDbu7m5ITExkZtSUDmtppJQKKxxXm+XLl3QokULbuqGRCLBs2fP8OzZM6hUKhQXF6N79+7c9oaYJzx69Gh88MEHMDY2hlwuh7W1NRITE7nPa6qfyny0bNmSu1orFAoxfvx4nD17FmlpaVrT1Ve+NenatSumTZuGr776Cr/88gv27t1bbf1Dx44d8cUXX8DW1hYqlQrm5ubIyclBaWkpAGD//v1wcXHRmELSpk2banVWNT81xVkbxsbGGDJkCKZOnYrZs2fD19cXEydOxPTp0/H777/XmJ6JiQksLCwwe/Zs7Ny5EwkJCRrfs6pto127dli/fj2WLVuGiIgIeHp6on///jXmsV+/flizZg2OHTuGKVOmaHzGp33r64cyMjIQHByMgQMHcn3Hv/71LyxcuJBX2VXi812sS3+Ym5sLMzMznZ83b94ckydPxty5c7Fv3z6o1Wp89dVXWqei8KnrqvSVe1WWlpa8pzDeuHEDgYGB+Pzzz9G0aVMAwOXLlyGTyWBrawupVAq1Wo327dvjzp07AHT3q8bGxrC2toajoyMAwMLCAsbGxigpKYFKpcKRI0fQpUsXABXT01q0aFGnaY0uLi4666ymfmvw4MEoKyvj2sCTJ0/g4eHBtZ2X5/8DNbeTjIwM7rPGjRsDQL0ekdu8eXOuXzV0GfL5XuhTeSend+/eGt+Dvn37cm122LBhSExMRHJyMiQSCRo1aoTz589DLpcjNTUVHTt2rPFuHGlYNIAgddKkSRONvxlj3PSM2hKJRBqdiJGRkc64HB0d4e/vDzMzMyxfvhxubm44depUrdKzsLBAkyZNuGlSdnZ22LVrF3x9ffHw4UMolUqUl5dr7OPg4ABra+taHplutra2GD58OI4dOwaFQoHnz5/D1dUVAPDs2TM0atQIJiYm3PbGxsZQq9XcCXNdFsiZmZlh5MiROH36NHJzc2Fqaoo+ffrgypUrSElJQbNmzfSe1NRF8+bNER0djaVLlyI0NBQymaxa2b7s5frRpkWLFlAoFDp/YPWVb2106NABAwYMwN69ezVO5M3MzKBUKrF06VL89ttvyM7O5ub+y+VyJCcno2XLlrVKS1+chvDWW2+hZ8+e2LFjR43pCYVC/PDDDxgzZgyOHDmCt99+G4sWLdIa7+jRo7F+/Xo8evQI77//PoYMGYKUlBTe+RKJRPD29uZOmgD+7VtXP5Sfn4+ysjKNKUJdunThLlTwxee7qC8futSmTs3MzDBlyhRcvHgRDx484LXPy3WtjbZyfxmf9RaJiYm4fPkyTp06hadPn3LhKSkpUCqVuHnzJi5evIiLFy+iX79+GoMWXf1q48aNNQZylb8JYrEYOTk5KCws5OKMiorCl19+WWM+X6avzmrqt+zt7TFkyBAcO3YMKpUKT58+5U7I65rm2LFjERsbC4lEgsTERJiamsLLy6vWx1WpIcuQ7/dCl5KSEqSnp8PS0lIj3NzcHHl5eQCAzp07w9bWFlevXsXTp08xZswYFBYW4unTp0hMTOQGLxs2bEDr1q25f/ouKBLDoiXx5JWTSCQICwvDuHHjar3v48ePoVAo8PPPPwOomCO/bds2zJ49u1bpl5SUoG3btkhOTsaMGTOwevVqjBkzBvn5+TA1NQVQMfezVatWAGr+IU1ISIBEIkHv3r1552PixImYPXs2Ll26BFtbW+7kvW3btrh69SpkMhksLCwAVMxXZ4zB3t5ea1wqlYrXIrTBgwfjwIEDuHTpEtq3bw8bGxv88ssvaNOmDVxcXHjnnQ+FQoGvv/4aFhYW2Lx5M4RCISIjI1FeXo7c3FyddwOq1o8ueXl5MDY2hq2trc5tdJVvbVlaWiItLQ0FBQVcffz+++/Ytm0bTp48CXt7e1y+fBkCgQByuRw5OTlwdHRESUlJrdLRF2daWppBnuhiZGTEnQjpS+/Jkyd49uwZvv32W3z77bdISUnBjBkztMYZERGBfv36YcKECZDL5fj222+xf/9+bNq0iXe+3n77bb2f823flWxsbGBqasrrHRv6+qO6fBf5aN68ucYV7Zo0btwYZWVlePHiBe+Tyqp1rYu+ci8tLYW5uXmN6bi5uWHNmjUwNjbGypUrcejQIZibm6NDhw4QCAQYPHgwmjVrpnXf2j5xq0mTJnBwcECTJk0wfvz4Wu3LF59+y9raGhMnTsR//vMfREREoGnTpvW+Gm5jY4OhQ4fC398fJiYm8PPz4+7C8HHp0iUMGzasxu0MUYb1/V5YWlrCzs6u2no0sVjMlaOJiQnGjBmDkJAQNG7cGN7e3rh69SpCQ0PRsmVLbs3OkiVLsGTJkjodB6kfugNBdKq8OvLyExrUarVGGJ87D2ZmZjA1NYVMJoNYLOZOHivjqhrHy+lV5qHyyuLx48e5K3gDBgzQe6IJAGVlZRpTom7dugULCwsMHDgQ8fHxkMlk6NGjB4CKxWkFBQVQq9W4efMmFApFteOt1LRpU6jVapSXl6O4uJgbeERGRuLYsWM1PhGpa9euaN++PX799VeNq+Pe3t4QCoV4/PgxF3b//n20bt0aHh4eACo64Mq0gYqrfbm5uTVe2ezYsSOcnJwQFBSEDh06wMPDA8XFxbh582a1cny57qvWQyVdZQNULFK8f/8+3nnnHQiFQiiVSu5pOE+ePOGeYKWvfioVFRVxdwDUajVOnTqFkSNHcgM8be1UV/nqUnlsL7fnZs2aoaSkBFlZWZBIJHj06BGuXbuGjh07vrFDDgAAIABJREFUcj+WGRkZKCsrQ0FBAR4+fAgfHx/8+eefGiewSUlJKC8v16ijquVXU5w1lXclXVe/o6KicPPmTUyaNKnG9B48eIBTp05xVxPbtGmDwYMHa6RRmY/o6GjcvHkTQMWP/rhx4/Q+SrMyf/raKp/2ra8fsrOzw+jRo3Hx4kVuH7FYzD0gQVd/9PKx8fku1qU/tLW15RblvkxbO7S0tIRQKMTz588BVDy+szItPnVddVu+dz/y8vJqPCmu2h98+umnKCkpwd69ewFUlJ2lpaXGS0ITEhK4hce62vLLfUzV+hAIBJg5cyZiY2M1Tj5DQkK0bq8v37rqjG+/1b17dzg6OmLPnj3o2rWrRvx8+sqX6y01NRXGxsaYM2cO5s6dW+PgQSAQoHnz5ly/WPXN2vUtQ22q5p/P90JfGQiFQnz44YeIioqCRCLhPouIiNAY1AwaNAjPnj1DSkoK7OzsMHbsWAQEBMDKykrj7gd5PYSrV69e/bozQd48kZGR2LNnD65fv46srCwUFRXB0dERx44dQ2BgIJKTk2FpaYni4mLs3LkTMTExKCsrg7e3t9b4zMzMkJaWhsjISOTm5qJHjx4oKyvDpk2bcPXqVeTl5cHZ2Rnnz5/H0aNH8fz5c5iamqKoqAg7duzAn3/+CZFIhJYtW+Lq1atQKpUoKirCpUuX0K1bN7i5uWlNt/L5/U2aNEFZWRlu3bqFCxcuYO3atejQoQOaNm2Ke/fuITExESKRCOnp6XBycsLZs2fh5uaGzMxM7N69G0+fPkVBQQHatGnDXU1r2rQpLl26hLy8POTk5KB///4wNTXFoUOHsHv3bowYMULnlTegoiMXiUQoLy/X6DStrKzQtWtX+Pv7QywWIzo6Gjdu3MCqVau4kz0bGxvcu3cPJSUlyM/PR1JSEu7fv4/Hjx+jY8eOOn98hEIhysrKAADjxo2Dubk5nj17BhcXF+7uiVwux759+/D7778jLS0N9vb2SE9Px44dOxAbGwuFQgEbGxvs2rULV65cQX5+PlxcXKrdDWjcuDGKi4tx6dIlWFhY4OnTp+jWrRsuXLgAExMTDBgwAHK5XG/9ABUnS9evX0fTpk1RWFiIgIAACIVCfPfddxAKhVxenz9/DisrK64t6CpfbeLi4rBt2zauvZeUlMDV1RWNGzeGo6MjLl++DJVKBaVSCZVKhW7duuHs2bMoLy9HQUEBzMzMkJWVhadPn2LgwIHw8vKCkZERQkJCYGRkhKioKDx9+hRhYWEoLy9H27ZtubZfWloKNzc3tGnTRmecLi4uOHjwoN7yBiqeNuXn54eYmBgUFxcjLi4O4eHhCAwMRGBgID7++GPMnz8fQqEQlpaWOtPz8vLiftylUinu3buHoqIieHp6Vmsb+fn5SEhI4E60bty4gSlTpmidwvXw4UNs374d169fR0ZGBrKysmBubl6tvepr361bt8bZs2f19kNDhw5F7969ce/ePdy6dQslJSW4du0aBgwYgBYtWmjtj6ysrKodW+/evXV+F62treHn51en/tDc3Bznz59H//79uav8hYWF2L17N06ePInU1FQUFBTA3t4etra2aN68OTIzM3Hr1i107doV9+/fx7Nnz3jXNd9yr+rUqVPo3Lkzd3HlZefOncPu3bvx+PFjGBkZoWPHjjhz5gyOHDmCrKwsDBkyBH369MH+/fshFosRFxeHlJQUjBo1CmFhYVr71fDwcOzYsQNxcXFQKBSwtbXF3r17cfnyZWRlZaFdu3bcWonAwEDI5XJERESgWbNmcHJy0tkXVJLJZDXW2ZgxY2rstxo3bgyRSATGGLKzszFt2jTubsrVq1c1frM6deqEAwcO1NhO1Go1vvjiC2zbtg0//fQTNm/ejIcPH6JXr17cupKXWVhY4Pjx45DJZDAxMUGXLl3qVYZV1zRUSk1NxaZNmxAeHs5NL3r77bf1/kZVloG+34t+/fpBoVDgxIkTKCsrw4kTJ2BpaYnPP/+cm3rVtGlTxMTEYOTIkWjXrh2sra1x/fp1+Pj48H6CGWk4RqyuE9cJqYOsrCyYmprq7BBrolKpwBiDQqFAVlYWHBwcuCv/2iQnJ2PSpEk4ePAgWrdujfLycq0n9aWlpVAoFNxnSqWS14JbpVKJjIwM2NnZacw3vX79OmxsbGqcF3vt2jVYWVnp/JHOzc1Fo0aNtJaXWq1GTk4OVCoV7OzskJ6eDnNzc1hbW+udGlBZhpXHp1QqYWRk1GAvPVIqlcjLy0PLli0hEAigUqm459rzqZ/Dhw/j0KFDCAkJQWFhIaysrPTWeVU1lS9fpaWliIqKgpmZGTw8PLiyys3NhaWlJUxNTbmrbVXLseqxi8ViFBUVcSewutQUp6HpSk8ul0MkEuGvv/6ChYWFzh9shUIBkUjErTto1aqVQV4GV9f2/TKpVAqxWKx1wFWb/kjfd7EuVqxYgWHDhvF+KINKpUJcXBwyMzPRv3//avPHDUkmk2Hx4sX46quvarzDy0dubi6MjY0NuuhVpVIhOzsbzZs3b5AXw+nrt6rmQaVS1Tv9rKwsrFq1CsuXL+fexyGVSrFx40ZkZWVh9+7dOvcVi8UQi8U1vvNDG0OUYX2/FyqVCllZWWjevLnWuwqV/Utlucvlcrr78IagNRDklbKzs6vX/pUnUiKRCO3atatx+8rb9owxvT+4lfM4K/F9Y6ZIJNJ4glCl9PT0are1K4WEhKBly5Zwd3dHZmYmPD09dcbfokULnZ8JBAKN8tSWD21ePhlt6LeDikQijXxWTZ9P/VS9xqFvzUOl2pQvXxYWFhgyZEi18Kr1o+2ktuqxW1lZ8TqBqilOQ9OVXuWPdJs2bfTuX3nioa+t1kVd2/fLzMzMdA7YatMfGfr4Ro8ejZCQEPTv359XPQuFQvTs2RM9e/Y0aD60iYqKQsuWLWuse74MXXZARXnU5aSZL339VtUwQwzuK6emVT0eMzMzeHt74+DBg3r3tbS0rPNg0hBlWN+6FQqFeu+EaXv6Gnkz0BQm8o+Vnp4Of39/XLx4EUKhEC4uLga7eqjPgwcPNNZVvCwwMBD5+flITk5Gz54967UY8++MT/3cvn0b+/btQ1xcHJo3bw4XFxe98+sBKl/y9+Dg4ID4+HjI5XI4Ozu/7uxwiouLcfLkScydO7dB73KQ/2Nra4sXL14gPDycm6oZHByMy5cv4+OPP673hTdCGgJNYSL/WFKpFPn5+RAKhVAoFGjRogWvp4rUl1qtrnEK0aNHj7h5u/+r+NRPQUEBt2ZDpVLB0dGxxit+VL7k70IikeDAgQMYN25crZ6401DUajUCAgLg5uYGd3f3152d/zmVa1nEYjGcnZ3RqVOnV3IXkpC6oAEEIYQQQgghhDca2hJCCCGEEEJ4owEEIYQQQgghhDcaQBBCCCGEEEJ4owEEIYQQQgghhDcaQBBCCCGEEEJ4owEEIYQQQgghhDcaQBBCCCGEEEJ4owEEIYQQQgghhDcaQJA3RlpaGsLDw1FcXPy6s0IIIYQQQnSgN1ETnfz9/aFSqTBv3rwGT+vkyZO4evUqOnbsiBs3buD48eMNniYhhBBCCKk90evOAHkzqdVqPH/+HEqlEiqVCkKhsMHSKi0thb+/PxYtWoR27dqhV69eDZYWIYQQQgipHxpAEK0EAgF++OGHV5KWWCxGSUkJrK2t0bp1a7Ru3fqVpEsIIYQQQmqP1kAQndRqNaRSqUaYTCaDWq3W+llVcrkcKpUKQMXahqSkJJ3bVs6iMzIyqvZZfn6+1jURVdNXKBRaw5VKJZRKpd7jIYQQQgghtUN3IIhW165dw9KlS9GoUSOcO3cOOTk5WLVqFa5evQpfX1+oVCqIxWJcu3YN33//Pdq2bQsAyMrKgq+vLzp27IjMzEzk5ORg5MiRuH//PlavXl0tnRcvXuDXX39FSkoK9uzZAzc3N8yYMQPJyck4fPgw+vXrB5lMhtjYWHz00Udo3bo1goODsWbNGrRv3x5jx47F+fPn4erqCi8vL6xevRpNmjTBJ598AqVSiYSEBEgkEkyaNAkJCQlQKBQICwvDmjVr4OLi8opLlRBCCCHkH4ARokNQUBDz9vZmEomEMcbYixcvWJcuXdiePXsYY4ypVCo2ceJEtmXLFm6f7777ji1evJgxxlhOTg4bPHgwi4iI0JtOeno669u3L4uOjmaMMZaZmclGjhzJHjx4wG1z8eJFNmXKFFZaWsoYY8zPz4+5uLiw2NhYFhUVxaVx7Ngx1qlTJxYbG8sYYywxMZG5uLiwHTt2cHH9+9//Zhs2bKhX2RBCCCGE/K+iOxBEJ5FIBIFAoPG3qakpOnXqBKBinYStrS3y8/O5bTIyMtChQwcAQOPGjQEA2dnZtUo3LCwMMpmMiwcA3NzckJiYiOjoaAwaNAimpqYwMzODg4MDbGxsuO2MjY1hbW0NR0dHAICFhQUsLS3Rvn17bhs7Ozvk5eXVKk+EEEIIIaQCrYEgtSISiWBmZsb9bWRkxK1hAICxY8ciNjYWEokEiYmJMDU1hZeXV63SePbsGRo1agQTExMuzNjYGGq1GmlpaVyYg4MDrK2tq+3fuHFjGBsba+xrbm6uM8+EEEIIIYQ/GkAQg7KxscHQoUPh7++PqKgo+Pn51fqpSm3btoVMJoNMJuPC5HI5GGOwt7fnwqreHSGEEEIIIa8GnYERnRhjUKvV3NX6yqcvVb16r1arNfZJTU2FsbEx5syZg7lz53JTiWpSGTcAeHt7QygU4vHjx9zn9+/fR+vWreHh4VFt+5fjqZq/l49BW54JIYQQQgh/wtXaHo1D/uddvXoVO3bswJ9//gmRSARra2ts374dV69eRV5eHpydnXH+/HkcPXoUz58/h6mpKXr27Am1Wo0vvvgC27Ztw08//YTNmzfj4cOH6NWrF5o2bVotndTUVGzatAnh4eHcuoS3334bXbt2hb+/P8RiMaKjo3Hjxg2sWrUK9vb2OHfuHHbv3o2nT5+ioKAAbdq0QbNmzRAeHo4dO3YgLi4OCoUCtra22Lt3L8LCwpCbm4s2bdrg0qVLOHToEJKTk2FiYoLu3bvTnQxCCCGEkFowYjQZnBhIVlYWVq1aheXLl8PJyQkAIJVKsXHjRmRlZWH37t21jjM3NxeNGjXSOvgghBBCCCGvHl16JQbz/PlzABWLmyuZmZnB29ube6lcbbVo0YIGD4QQQgghbxC6A0EMRqFQYPfu3ZBIJBg7diyMjY1x48YNPHz4EHPnzkWPHj1edxYJIYQQQkg90QCCGFxxcTHi4uIgFovh7OyMTp060ToDQgghhJB/CBpAEEIIIYQQQnijy8KEEEIIIYQQ3mgAQQghhBBCCOGNBhCEEEIIIYQQ3mgAQQghhBBCCOGNBhCEkH8UtVpd5/eO1MWrTIvU3atuF7WlVquhVqtrtY9SqURubm6t9yOEkPqiAQQh5B/l1KlTCAoKeiVpPXz4EBs3bnwlaZH6eZXtoi7279+PS5cu8d7+9u3b2Lt3L86ePQsfHx9ERUU1YO4IIUQTPcaVEEII+RtRKpX45JNPMGvWLPTp0wfbt29HREQETpw48bqzRgj5H0F3IAghr51EIsHOnTuRnp5e5ziUSiXOnDmD3bt3o7i42IC5q06hUOD06dPYuXMnUlJSAFRMQTl8+DDu3r3boGm/DgkJCfDz82vQNGQyGb777juNMH9//3rH+yrbRV1kZWXh4MGD8Pf3h1wu58JjY2Px3//+l/u7avmIRCLMnDkTrVu3BgBYWFjAysoKwD+7HRJC3hw0gCCEGERycjKWLl0KJycnDB8+HDt27IBYLAZQcVKzYsUKODo6Yvbs2YiLi+P2U6vV8PPzg5ubGxwdHTXi/P333zFy5EhYW1vj/fffx/PnzwEARUVF8PHxgZ2dHSZNmoQ7d+7g3r17MDU1xc2bN5GVlVWrvNcmHQC4fv062rZti9zcXERGRgIABAIBxo0bh5CQELx48aJOZfgmysnJwaFDh/Dee+81aDpqtRpSqVQjTCaTad02NjYW8+fPh62tLXr16oUzZ85wn23fvh12dnZ455134OfnV692UVuFhYXYsGEDunTpglatWuH777/njunu3bvo2rUrOnXqhEWLFgGoGIiGhYVh8ODBCA0N1chfeXm5Rnm8XD59+/aFo6MjcnJycOHCBcyZMwfA37MdisViBAQEYN++ffDz80NmZubrzhIhpAY0gCCEGETbtm3x/vvvo1GjRli6dCk+++wzWFpaAqg4sWrZsiXOnTsHf39/dOvWjdsvKioKOTk5GDBgQLU433//fWzYsAHm5uYYOnQo2rVrBwAwNjaGq6srTp8+jaCgIHh5ecHLywumpqYwNjaGs7NzrfJem3QAwNvbGy4uLkhMTETHjh25eCwtLeHt7Y39+/e/0Qt2a+PQoUPw9PSEra3t684Kp3v37ti8eTN69eqFNm3aYNiwYdxn9vb2+P7773H16lXMmzevXu2itqytrbFkyRLMnz8fQqEQ//rXv2BmZgagom1MnDgRt27d4tbNGBsbY9q0aSgqKoKRkRFatGhRq/RKS0uxa9cufPrpp+jXrx8X/ndqh1KpFF9++SUaN26MuXPn4p133sE333zT4IM9Qkj90ACCEGIwsbGxMDMzQ6dOnbiwhIQEXLx4ETNnzkSPHj2q7XPhwgWMHDkSAoH27qhr164YPHgwDh8+DLFYDIVCgYCAAEyaNAmenp4a2545cwaDBg2q0/SN2qQDABkZGVAoFNVOSj09PZGbm/u3ufqrT25uLpKSkjBo0KDXnZVqzMzMMH36dNy+fRv3798HAFy5cgVCoRDz5s2DUCjktq1Pu6iLsWPHwsTEBCdPngQApKWlISwsDN988w1sbGyqbR8XF4cOHTpwgw0+ZDIZAgMD8cEHH6Bv3744cuSIxuevoh2Wlpbi3Llz3J3Gurh9+zYyMzMxfPhwAEDnzp1hbm6Os2fPGiqbhJAGQAMIQojB3LhxA506dYKtrS3UajUuX76MlJQUvP/++9zdiKqys7ORlJSEzp0764xTKBRi+vTpePz4MSIiInDs2DF4eHigS5cuGttVPsoyNTUVdXk2BN90KsXHx8PW1rbaCaGpqSmcnZ0RHR1d6zzUlUqlglKprBauVqs1wqvOsa/cr7Lc1Gp1tc+jo6NhbW2Npk2bGiSfCoXCoI8cHTp0KOzt7XH06FHcunULOTk5GD9+vMY29W0XddG2bVuMHTsWgYGBePToEUJCQuDj46P1OwBUlLOHhwfv+NVqNdauXYulS5di2LBhcHJyqjY4qms7rE0dmZmZQSAQ4IsvvsCPP/5Yp7sG165dg7W1NSwsLLgwZ2dnhIWF1TouQsirI3rdGSCE/DMUFBTgwYMH8PHxgUwmw9q1a+Ho6IjPP/9c5z5PnjyBsbGx1quyVfXp0wdubm5YvXo1tm/fjp49e1bbRiAQ4Mcff0R5eTlMTU3rdAx80lEqlRAIBLhz547WOxNAxd2Ms2fPYurUqXXKB1/p6enYv38/WrduDZFIhEaNGmHy5MkAgMjISKSmpiI+Ph7W1tYwMTGBWCxGZmYmtmzZgri4OFy7dg1xcXEYP348MjIykJ+fj5KSEixduhRmZma4ffs2XF1dDZLPhQsX4ty5c2jbti2OHj2Kbt26QalUIiQkBBMmTKhTvC1atMCUKVOwefNmdOzYEf/5z3+q3ckyRLuoi6lTp+Lo0aPYtGkT1q9fj2bNmlXbRqFQQCKR4K+//tKY1lcTgUCAn376CT/99JPe7WrTDutSRwKBACNGjMDw4cNx584drFq1ClZWVpg7dy5cXFx4HUtqaiqaNGmiEdasWTOkpaXx2p8Q8nrQAIIQYhDJyckoKCiAvb09goKCkJycjDt37mDWrFk6r7zm5eWhWbNmOqcvVbKwsECfPn3w66+/6p3mIRQKNaau1FZN6SgUCsyfPx8dOnRASUkJxo4dqzUeGxsb5OXl6UwnIiIChw4dqvFKr729PX788Uetn2VkZGDOnDn49ttvMWjQIEgkEmzduhVSqRQZGRnIysrClClTEBsbi6lTp2Lfvn0Qi8XcwuR79+7hvffeQ1BQEG7duoXVq1dDKpXCx8cHkZGR8Pb2Rn5+fq3n5b+stLQUGzZswPTp07F7925uHcyGDRuQkJBQ7eSxtjw9PaFQKKBSqSASaf9Jq2+7qIt27drByckJGRkZWtt/QkICVqxYwU2da9++vcHzUFM7rFTfOhIIBOjTpw/69OmDhIQE7N27F1KpFLNmzeLWDelSVFRU7eEJQqEQZWVlNR8gIeS1oQEEIcQgYmNjIZFIIJFI8OGHH6Jt27bw8fHBrVu38O6772rdJzc3l9e877Nnz6Jdu3Zo2rQpgoKCtN4ZMISa0jE2NsaWLVsglUphb2+vMx5LS8tq04GqGjhwIAYOHFivvG7evBnm5ubo3LkzoqOjERERAS8vL5iZmaFRo0bo378/gIqBnaOjI7p37w4LCwu8++67UCgU6N27N6RSKWQyGaZNmwaBQADGGEpLS5GdnQ2lUomioiKNqSVAxdz7CxcuoLCwUG/+nJyc4O3tjdjYWLz33nsYOnQoAGDGjBmwtLREWloaYmJiMGrUqDqXQVJSEuLj4zFmzBgcPXoUM2bMgLW1da3jqe0x1UQqleL333+Hj48P1q1bh+jo6Gr17erqiu3bt8PS0lLnALu+amqHlQxZR66urti8eTPS09Pxyy+/YM+ePfjoo4/Qt29frduLRKJqgwWJRAIjIyPeaRJCXj1aA0EIMYibN2+ib9++mDlzJkxMTNC7d290794dAQEBOp8Ew+cJMVeuXIFcLsecOXMwceJEnDx5EhkZGYbOPu90rKys9A4eKtV0V6U+SktLce/ePXTs2BF3795Fo0aN8Pnnn3MngG3atOHuHERFRaFHjx4aAwFjY2O4ubkhPj4eVlZW3BXg7Oxs5Ofnw8nJCWq1WusdEiMjI5iYmMDU1FTvP2NjYwAVjxutzFel9u3b486dO7C0tETLli3rVAapqakIDQ3F9OnTMWfOHKSkpCAiIqJOcdX2mPSRy+UICAjAwIEDMX36dDg7OyMgIEBrWTo4ODTY4KESn3bYEHVUXl6O8vJyMMb05qFJkybV3s9RXFxcbeBKCHmz0B0IQki9VV3/UDmNxMzMDNOmTcOyZcsQHx+vdY538+bNkZiYqDPemzdvIj09Hf/+978BVDxu9eDBgwgNDcXs2bMNln9Dp1NaWgpzc3Odn9d3CpNYLEZZWRlGjRql9fG3laRSKWJiYjBr1iwAFSdmEokEDg4OACqegNOjRw8ur5GRkbCyskLXrl1hbGwMc3Pzak/YMTExwYgRI/TmuyZNmjRBbGws5s2bV6f9s7KycPLkSXz44YewtLSEp6cnevTogcOHD2PUqFG8TvSrMsQxARXrYwICAtCzZ09u8b2Pjw+2bduGp0+f6n1YQEOoqR3qU9c6io2NhZ+fH8rLyzF37lz07t1b7/Y9e/ZEaGgoVCoVN81MLBY32F1GQohh0ACCEFJvz58/R15eHtzd3TXCvb298d///hdBQUFaBxC2trYoLi6GWq2udpXy0qVLiI+Px4IFC7jP3NzcMGDAABw+fBjvv/8+r6uUkZGRSEtLw8SJE7XOkTdUOlXl5eVxbwbWpr5TmKytreHg4ID8/HwuTCKR4PTp0xg+fDiOHz+OTp06oWnTpnjx4gX3qNnIyEi0atUKDg4OkEgkiI6O5hbYFhYW4uTJk1i0aBG34Nfe3h65ubl1zqcuRkZGEIlEdXovQ1paGvz8/DB//nxu8b2ZmRk+/PBDLFmyBA8ePNC5uL0hyeVy7NmzB926ddN4otLYsWOxc+dOBAcH49tvv32leXq5HWZlZeHUqVOYOHFijWtbalNHarUaN27cwMGDB9G8eXMsXLiQ95qOAQMGIDg4GLm5ubCzs4NcLkdycjI36M3OzkZQUBAmTZr0Rr2LhJD/dTSFiRBSZzKZDH5+flizZg3EYjFOnz6NhIQEABXTk06dOoX8/Hz8+uuvWL9+fbU3DXfp0gXl5eUaCz3Pnj2LsWPH4v3330dSUpLGY0gjIiLw7Nkz3L17F5999hn3Zmh9wsPD8f3331d7Hr6h06kqPj4eb7/9dq32qQ1TU1MsXrwY169fx7Vr1/DHH3/gxIkTGDhwIMzNzfHw4UM8e/YMT58+xfz58xEeHo7g4GA0atQIbm5uAIC//voLJSUlyMnJwenTp7Fx40ZMnz4d48aN49IZMGAA4uPjDZ7//Px89OnTp1YLm58/f4758+ejf//+CA0N1XhLdWpqKi5cuICSkhIsXrwYhw4d0vpY24YglUrh6+uL/v37Y926dSgoKOA+k8vlOHnyJMrKyrB3716sWbMG2dnZryRfQPV2mJycDF9fX1y4cKHGffnUkUqlwunTpzFr1izcvHkTP/30E9avX1+rBeFubm6YMGEC/Pz8UFhYiICAALi7u3PrppKTk+Hn54ekpCTecRJCXgFGCCGv0fLly1lERESDphEZGckSEhIaNI1KZWVl7NNPP2XPnz9v8LTKy8tZSkoKk8vlGuFKpZJlZ2czlUrFGGOsoKCASSQSjW1OnDjBhg8fzkpLS1leXh5TKpXV4s/KymLz5s1jBQUFBs33kSNH2I0bNzTCJBIJ++KLLzTCdu3aZdB0/07u3r3LVq1axf2trXz00dUO09PT2aVLl2rcX1sdvUwsFrPz588zsVjMO1+6pKens9OnT7OYmBiu3RJC3lx0B4IQ8lqNHj0aFy5cMOgLxl6WnZ1d58W6tRUVFYWWLVuiTZs2DZ6WSCRC69atq835FwqFaNmyJTcly9rautrTrm7fvo2ePXvC3NwcNjY2Wq8029rawtXVFdeuXTNYntVqNR4/flyn6UuEP13tMDs7u8b3rvCtIwsLC4wYMcIgC54dHBwwduxY9OxbjDcDAAAgAElEQVTZs0EfQEAIMQz6lhJCXitPT0/Y2tri+vXrDRL/o0ePYGpqqvVFXoZWXFyMmzdv4qOPPnpjT4KUSiWCgoJw6tQp5OTk4PHjx3q3nzZtGu7evYucnByDpJ+bmwuBQEDz2RuQrnZYXFyMhIQEdOrUSe/+VEeEkJq8mb9whJD/GQKBAHPmzMGff/6J9PR0g8ffpUsXne+hMCS1Wo3Tp09jxIgRb/SJl0gkwjvvvIOrV69i9erVNT6StkWLFpg2bRpCQkIMkr6FhQVmzpxZbUG7QCCo9khTPu8I+acyNjbWKA9t5aONvnZoZWWFqVOn1liuuuqIEEIqGTHG2OvOBCGEEEIIIeTvge5AEEIIIYQQQnijAQQhhBBCCCGENxpAEEIIIYQQQnijAQQhhBBCCCGENxpAEEIIIYQQQnijAQQhhBBCCCGENxpAEEIIIYQQQnijAQQhhBBCCCGENxpAEEIIIYQQQnijAQQhhBBCCCGENxpAEEIIIYQQQnijAQQhhBBCCCGENxpAEEIIIYQQQnijAQQhhBBCCCGENxpAEEIIIYQQQnijAQQhhBBCCCGENxpAkL8FlUqF3NxcFBUVNVgacrkcmZmZUCgUDZYG+d9S2abkcvnrzso/hkqlQk5ODsRi8evOyhtLLBYjKysLarX6texfH4WFhbhy5QpiY2NfS/p/J3y+C6/it9OQ/m75/V9GAwii1cOHD/HVV1/ByckJnp6e8PX1RWFhIQAgOjoakydPRtu2bfHFF18gPT29QfMik8mwdetWuLu7w9/fv0HSeP78OT766CN4enoiMTGxQdL4p4qJiUFgYKBB4kpLS8O+ffuwZcsWrF69GvHx8QaJ11CuX7+OLl26wMfHB0uWLMGCBQvQrl07TJw4EcuXL8f8+fPRo0cPrFq1Ci9evMAnn3wCDw8Pvcfh7+8PPz+/OuWnPvv+HUmlUmzcuBE9e/bE4cOHufA3uRxiYmKwbNkyFBQUvJL0wsLCMGrUKEydOhUymeyV789HUVERVqxYgejoaC5MpVJh9+7d2LBhA1q1agWxWIwvv/wSpaWlDZIHXfn4uygtLcW6devQrVs3nf2vXC7H9u3b4e7ujl9++eUV5/D/xMbG4rPPPoOdnR28vLywcuVKrFixAgsWLMCMGTNw5cqVNyq/hB8aQBCtevTogZUrV8Le3h5Dhw7F4sWLYW1tDQDw8PDAjh07MGfOHGzbtg2Ojo4GTTsuLg7nz5/n/jY1NcXXX3+NgQMHGjSdqtq1a4c1a9bAzs6uVvv98ccfSEpKaqBc6SeTyfDzzz9DpVLx2v7AgQNYuHChwfMRGxuL/Pz8eseTkZEBf39/TJ48GV9++SU8PT0xadIkPHjwQO9+if+PvXOPi6r4//8LFpZ1AZGb3FRQEC+pFCIqKEmmZmpqWZrl1zI/n0+f+mTaRbv4UbObZd7N1Aot1BTRUBM1QAVBFFQU5I5cBZb7Lsved8/8/uDB+bKwZ3dBlPr+5vkXnHNm5j3zfs97zsx5z2xREZ577jnU1NR0qTy1Wo1ly5bh7NmzZqdpbGzEO++8g99++w3ffPMNVq9eDZ1Oh/nz5+OLL77A3r178euvv0KlUsHHxwfr16+Hp6cnZ34Mw6C4uBj37t0zW489kba79Ka9A4BQKMSaNWswadIk9lpvtENXqK2tRXZ2NmQy2SMp7+mnn8bKlSthZWXVK+nNQaFQIDs7W6/Ppqen4+TJk1i9ejX8/f3BMAxKSkqgUCh6pMyO4wqXHH8X7Ozs8Mknn2D8+PGcz9jY2GDVqlWYOnXqI5SsM2PGjMEXX3yBwYMH45lnnsHGjRvx+eef4/vvv8e///1v/Pvf/0ZMTEy35TWkW8rD5+F5CMrfHgsLC1haWoLH43W6x+fz4ezs/FDKzczMNLjyxefzH0p5bfB4PIN15YJhGCQmJmL48OEPUSpuqqurcefOHbOeZRgG8fHxCAwM7HE5li5d2iP5FBUV4eDBg5g5cyaCg4Px2GOPQaFQIDU1FY8//jhnulu3bkGtVsPBwaFL5VVXVyMnJwcrVqwwO421tTUmT56sd83CwgIWFhbs/yNGjMCAAQPAMAx4PJ7RFzFLS0t88cUXXZK7J9J2h9629/a09wWPuh26yowZMzBjxoxHWqaVlRUsLbu/Pvig6U3h4eGBkydP6l27d+8e+vbtCycnJwDA5MmTO/W1B8HQuGJIjr8TlpaWZk30rK2tH4E0xrG0tISlpaWerwSAoKAg+Pj44MCBA5g9ezasrKy6LC/XOwPl4UK/QFB6lJqaGlRVVUGn03VaDRSJRJDL5UbTNzY24vDhwyZXEpVKJeczEomkSyviWq0WWq2W835TUxOqqqo6xeNev34dycnJXU7HRVfajmEYHD9+3Ozwsfr6epSXl2Ps2LF619vXW6vVcsrK1T46na5Tmo7XzI3/nzBhAs6dO8dOcurq6sAwDIYMGWI03dWrVxEUFASBQKB3nWEYPbk7ypGXlwehUAhvb2+z5AMAFxcXuLm5GX2Gz+dj+PDhndqMYRiD9m/oektLC0pKSqBWq43uyTGUVqvVoqysDGKx2GQ/YhgGVVVVbHiiMUzZe1doaGiARCLpUhqGYThtyVA7KJVKMAxjMF13/Ef7/Lj8GFcfNlReW9sb0m97vXPJ2hXdGaOhoQG1tbUmnzNVHpfNcl3XarV6eiGEdCpPIpEYbLeKigqDoU1cPtfYuNJRjja4bLS9/rnSGkujUqk49dn2jCGbMKfPdHWvlaExuWP92vsxY7bfXdp01XFiYQhDbWDuOwOl56FfICg9QmlpKY4cOYLQ0FBoNBqsW7cOc+fOxZw5c3DlyhWcOHEC06dPR3p6Ovh8Pt577z3Y2Njo5SEWi7Fv3z4UFBTg3LlzqKqqwqhRo7BgwQL2mTZnwefzcebMGTz//POYN28egFZnuGXLFgQEBECr1eLatWv45JNPMGjQIIMyl5eX4/vvv4ePjw8cHBxQW1ur57hVKhWOHDkCW1tbWFlZ4fTp01i1ahUCAgKQlZWFgwcPorKyEnv37oWTkxNeeOEFjBo1ymi6nmq7mJgYXLp0CcXFxdi4cSMcHR3x+uuvd1qFl8vlOHToENLS0lBeXo7Y2FiUlJTg1VdfxYkTJ1BQUAChUAh/f3/U1dUhPj4en3/+OfvSnpCQgCtXrmDEiBEQi8WoqqrCe++9BwcHB6SkpKC4uBjp6enYsGEDnJyccOPGDVy/fh35+fl45ZVXcPfuXQiFQiQnJ+O9996Dr68vpw21vXi3yR0dHY0333wTU6ZMMfh8XFwcLl26hNOnTyMsLAzffvstlixZAg8PD6SmpqK8vBw5OTlwdHSEjY0NpFIpqqursWLFChw9ehR//vkn1Go1duzYgWnTpiE0NJRTtjaCg4NNPgO0hoG0JysrCzk5OZDJZEhKSsLnn3+OwYMHIykpCR9//DGsra0RGxsLHo+H/fv3w8PDA15eXoiKikJ9fT02b97cqYyOaYVCIc6fP4/CwkIEBwcjNTUVMTExOHr0qEEZy8vLERMTA39/f+Tl5aGiogLr1q0z+CWHy97lcjk+//xz5Ofn46233sKAAQOwe/duKBQKfPXVV/D29sbbb7+NlpYWfPvtt+jbty8OHTqE0NBQKJVKZGZm4p///CdnHwVaX6D379+P5uZm+Pn5QSaToaqqirMdamtrsX79ely+fBmbN2+GTqeDUqnExYsXsWLFChQUFIDP5yM2NhbPPPMMFi5cCIDbfzAM0yk/qVSqp0euPjx27FisW7cO586dw6lTpxAUFASdTofjx4+jsLAQEydORG5uLgDgH//4BwBg165d2LZtG9555x14e3uDz+fj9OnTer6uK7rjIjc3F7t370ZoaCisra1RVVWFJUuWsF8AzLUVlUpl0Ga/+OILg9e//vprdq/Du+++i9WrV+PMmTM4deoUcnNzsWHDBjAMg7t37yI9PR0XLlzAyJEjwTAMTpw4gWvXriE8PBwikQh2dnZYtGiRUZ/LNa7Mmzevkxxtts5lo+fPn8eGDRvQt29fvPXWW9BqtWyI5X//+99OYxoAxMTE4PPPP4e1tTVeffVV2Nvbs+F2q1evhoODA2JiYvDZZ5/B19cXc+bMwblz5zBixAisX7/eqDxt5OTk4ODBg7C3t0dmZiYee+wxLFiwgPMrEte4cunSpU71y83NhUwmw4IFC5Cbmwu1Wo34+Hh89tln8Pf3N9veuLh58ybKy8uxefNmzq8pXG3Qt29fk+8MlIcIoVA4EIvFZPz48WTt2rWd7jU0NJA9e/aw/2/bto1ERESw/58/f57ExsaS0tJSEhYWRm7fvk0IIUShUJD58+eTc+fOGSxTp9OR5557juzbt6/Tvddee43Mnz+fiMViQgghv/zyC5kxYwZRKBREo9GQ5cuXk23btrHPb9++naxcuZKzbs899xw5deoUe+3atWtk2LBhJDs7mxBCyK1bt0h4eDgpLCxky3v++eeJUqkkhBBSWFhIAgMDSU5Ojl7eptJ1pLttd+DAATJz5kyi1WoN5tueH374gbzwwgvss/fu3SNnzpwhp0+fJsOGDSO3b98mhYWFZNWqVUQkErFyzJ07l9TW1hJCCNm9ezcJDw8nMpmM3L9/n/z++++kqKiIjB8/nuTk5BCNRkMOHjxISkpKyNixY8lXX33Flrds2TLy008/mZSTEEKuXr1KPvzwQ/L222+zuuAiOzubBAUFkaKiIvZaYWEhOXnyJCGEkDt37pARI0aQlJQUcv78efLZZ58RQghRqVRkzpw55ODBg2bJxEVpaSnx9vYmkZGRBu/fv3+fjBo1iuzYsYMQ0mrfzz//vJ6dRkdHk6lTpxKZTEYyMjLI8uXL2Xarqqoimzdv5iy/fVqZTEaWLl1K8vPz2ftbtmzhTLt9+3aybNkyolKpiFarJQsXLiR79+7lfJ7L3tPT08m4ceNYHRw+fJgsWLCAqFQqQgghp06dIqmpqaS6uprMnDmTtWdCCPnzzz/JwoULSUtLC2e5X3/9NVmxYgXbJlKplEyePFnP/7RvB0Ja9TJy5Ejyww8/EEL+t90XLlxImpqaCCGEHDt2jEybNo20tLSY9B9c+bU9z9WHCSGkvLycBAcHk/T0dEIIIbGxsWTBggWsrIQQsnr1atZGCCHk5ZdfJosXLzbo69pkM6a7mJgYMm3aNL0y2tPQ0ECmT59Ozp49SwghpKmpiUybNo1ER0cbTG+sPC6bNWXLS5YsId988w37f2RkJJk9ezbR6XRsmwcFBbE+IDExkYSHh5Pq6mpCCCEpKSkkJCSEEGLa5xobV9rLYY6NHj16lAwbNoxkZmYSQggpKioigYGBemk68vPPP5PHH3+c9a1arZb885//JB988AFb3x9//JH4+/uTzMxMkpaWRhITE82SZ/78+eT9999n82nT7fnz59k0b7zxBltHU+OKofr5+/uTXbt2sfm98sorerozh+bmZhISEkJmz55Ndu3aRbZt20Y++eQT8vbbb5OUlBS9Z9vLa6oNjOmW8nChIUyUHmHIkCHYtGkTPv30UyQmJiI4OBiTJk1CQkIClEol3NzcIJfLwTAMfH19cf369W6V4+/vz66yOTs7Q6VSQalUori4GCkpKRg9ejTkcjnkcjmGDRuGvLw8g/lcuXIF9+/f19uA5ubmBjs7O/b/oUOH4t1334Wbmxt0Oh1sbW1RW1tr8lSQrqZ72G3HMAxSUlIQEhLC7vEQCAQICgpCfn4+JkyYgICAAPj5+WHr1q1wc3ODWCzGt99+i5dffhmurq4AWr+UjB07FkKhEJaWlpg4cSKuX78ONzc3eHt7Q6VSYfz48WhoaIC1tTVeeeUV8Hg8tLS04P79+2w+ppg4cSK+/fZbvPrqq3jhhRfYEzoMcffuXTg7O+tt5Le2tmY32ZaUlMDLywtjxozBjBkzsG7dOgCt+x9qa2sxZsyYLrVld+Dz+Rg9ejSA1jhgNzc3vRCZ9vHmrq6uyMzMxGuvvYbjx4/D0tISr7/+Omfe7dPa2NjAzs4Oy5Ytw+7du5Gbm2s07axZs7B48WLw+XyoVCo4Ojp26wSykSNHwtXVlQ1vkslkKCwsRGFhIXQ6HSQSCcaMGYP4+HgolUr4+fmxaUeNGoWioiLOU3BEIhGio6Mxffp01nbt7OzQv39/znZo+18gEGDYsGEA/rfdXV1d0a9fPwCAk5MTNBqNWf6DK782PXL1YQB6+2AYhkFERASCgoIgFApZeUNCQhAVFcX+b2Njg4EDBxr0dcCD6+7KlSuorq5m9xbZ29tj7dq1CAsLM/i8sfK4bNaULZva09Zx/92vv/6KIUOGsAddDB8+HF999RWA7vvqjnKYY6N8Ph+Ojo6sz7GzswOfz0dzc7PRMvr378/uG+TxeJg3bx7Onj2LiooKAK0+WSgUwtPTE+PGjUNYWJjZfWbIkCGs/Ts5OWHo0KH45ZdfDMpialwxVD97e3u9r8fu7u6or6831bQGGTVqFP7zn/9g5cqV+PLLL7F7926EhIRwPt8dv0F5NNAQJgonbYOmobhKjUajF3M4a9Ys6HQ6HDx4ED/99BM8PT0RGRmJsrIyaLVapKSksINBaGhotzdi9u3bV+9/QggIIaitrYVcLkdeXp7emdgff/yxwXxKS0thY2PTKW6+PUKhEFqtFh9//DGeeOIJNnbVVKxlV9M97Larr69HWVkZ3njjDfaap6cntFotrl69ilmzZnVK03YySdueCblcjtu3b+PNN98E0Lr5UK1W4/fff8ecOXPYl6Hhw4cjIiICnp6e7OlD9+/fR3NzM/vyxYVOpwMhhH3ZGjZsGBwcHLBlyxY89dRTBtOkpKQgMDBQT4/t9zSkpaUhICBAb2IItIZvdHX/Q3exsrLSe1m0sLDoFO/dhpeXFyIiIrBz506sXbsWjY2N+Oabb7Bs2TKT5fB4PPYkqCNHjuDTTz/FggUL8PPPPxt83sXFBdHR0YiPj8e4ceOgVCoNhmCYQigUYubMmTh9+jSeffZZCAQCTJw4ERcvXkSfPn3g5OQEoVCIwsJCWFtb65XB5/PBMAzy8/OxZcsWvUMBvv32Wzz22GOQSqXsS39XMNTu7UN82uvBHP9hTI9cfXjUqFF6MjU3N6OyshLh4eF6121tbTu9kHH5OuDBdVdeXg6hUMjWh8fjcU4eTJVnzGa7a8sdUalUqKio0FvwcXJyYk/m666v7ogxG2170QeAPn366E08jPVpLlxdXaFWq1FTU8P6IU9PT/a0w67I05FBgwYhKyvL4D1zxpWO9ePz+bC1tX2g+naX7rYB5eFDJxAUTmxtbeHn54fi4uJO9+rq6vQcXWJiIkJDQzF//nyoVCp89NFHOHDgAB5//HFYWloiPDzcYGytKeLi4jBt2jSTz3l4eMDW1hYjRozgfNlsj5eXFzQajdENqlFRUdixYwdOnjwJDw8PJCQkwNLSkh3M2iOTyRAfH4+5c+eaTDdw4EC9tD3Rdm1xqkFBQZ3uFRcXQ6vVwt/fHyqVClVVVRg8eDBqampQXV1tcG9GWVkZ7O3t4eLiAuB/JwGjR49GaWkpBg4ciPz8fFRVVeGpp55CeXk5G5ObnJyM8ePHsxOBpKQkeHl5YeDAgSgtLYWPj4/BOrz77rvIysrC6dOn4eDgAAcHB7i5uaGurs7g8y0tLcjKymKPpi0pKYGnpyc70MjlcmRkZLCrnhKJBDKZDJ6enrhx4wZ8fX3h5OTE5m/uF5KHSV5eHtRqNfbt2wegNX56x44dZr10yWQyJCcn46OPPsJHH32EsrIyzhOy1Go13n//fdjZ2WHr1q3g8XhITU2FRqNBXV0drKys9Pq3obLa7B0AwsPD8csvvyAuLg6+vr5wdnbG/v374e3tzcZJDx48GJcvX4ZSqWQndCqVCoQQ+Pr64syZM53KqayshK2tbY9v3OxIV/1HR7j68JYtW/Ses7e3h7u7e6eNyFKp1Oz9C+bozhQDBw6ETCaDQqEwOTkzVV5xcTH4fH4nmw0JCem2LXfExsYGAwYM0Nv70h5TPrfjUeNc44oxG/Xw8Oiy3Maor68Hn8/XO5Sh456F7sojkUg4j4/28/N7oDH5UdPVNjD3nYHy4NAQJopRli1bhvLycr1QIIZhkJSUpLcalJ6ejpSUFACtzn7u3LmwtrbG1KlTYW9vr/dDWrm5uZxn71taWsLFxYX99Nz+1yjbTkBpo/0KiI+PD6ZPn46kpCT2mlwux8GDBw2WExoaCgcHB2RnZ7PXqqur0dzczK5aJSUlYejQoayTqqqqgkKhQGNjI+7cuQOhUAiBQAClUgmpVKr3wmwsXUe623b9+vUDwzDQaDSQSCScX1OKi4vh5uYGNzc3pKSkoLq6GgBQUFAAgUCAwYMHd0rj6OgIJycnNs9z587B1tYWffv2RWJiIng8Hq5duwYPDw84OTmx7d7Y2IjS0lJ2IsMwDC5duoTp06ejtLTUaJiFl5cXnn/+efZFqqGhAZWVlezn7ZqaGnz//ffsme319fWQy+UYPnw4KioqkJycDEtLS+zZswcJCQnIzc3Vm7CkpqayP+aVmZmJiRMnQqvV4o8//mBt6dq1a4iMjDR6Kld7CCFgGIZzpbPNZtvbascTYtryIISgoaEBx44dY/MLCwszqB9DabVaLU6dOsVObr29vTutdLfR2NiIW7duYfLkyeDxeNBqtaioqIBWq0V+fr7B07247B1oDSEZOHAgoqOj4efnh3HjxkEikSAlJYWVf+rUqeDxeHq+5NatWxg0aBDGjRtnUM6236FJSkpi200ikUAkEkGj0Rhsh7Y2NtTu3fUfpvTI1Yc7ls3j8bBkyRKkpaWxvwvRdjxu2wZpU7Kao7uO7dGR0NBQuLq6Ii0tjb2WmZmJ2NjYTu1pqryCggKDNmvKltvKMFbn9nVYvHhxJ9s8ceIEANM+19i40l4Oc2y0Y7t2rAcXYrGYLZ9hGJw6dQozZ87EgAEDDNbfXHmA1gWeNpqampCRkYH/+Z//4ayjsXGFq37GfJg5frMtD3Paqis6MaZbysOFt2HDhg29LQTlr0tbHO4PP/wAGxsbVFdX45dffsETTzyht9p97do15ObmsoNLcnIyFi5cCF9fXwQEBODAgQOQSqXIyspCWVkZnn32Wc4YWDs7Oxw7doz9TD5kyBD8+OOPOH78OEpKSmBvbw+JRILdu3cjIyMDCoUC06ZNQ3BwMFJSUpCRkYG6ujokJSVhypQpBn+vwtbWFgEBATh06BC0Wi2KioqQkZGBpKQklJeXY+TIkRgyZAjOnj0LjUaDxsZGCIVCiEQiFBQU4Mknn8SQIUNQUVGB1NRU1NXVISAgAP3794e9vb3RdB1/rK67bdevXz/ExcWhvr4etbW1mDRpksFJhIWFBTIzM2FhYQGtVovw8HBYWlri/Pnz0Gq1eOmllzql8fDwwO3bt1FTU4OcnBz0798fVVVV0Gg0GDNmDDw9PSEWi3H//n1IJBJMnjwZjo6OKC4uxrlz5/Cvf/0L9vb2sLCwwL1796BWqyEWizFt2jTY2NjAy8sL//3vf/Hll1/iyy+/xJ9//omvvvoK8fHxqK6uBsMw2Lt3L6ytrfH555/Dzs4Od+/excaNGxESEoKBAweCz+fj7t27sLKyQmlpKWbNmgUrKyscPHgQfD4fMpkMI0eORFFREerq6tCnTx9MmDABQOsgU11djZqaGgQEBLCnTh06dAinT5/G3LlzjYa3tZ1GFB0djYyMDFRXV6OsrAwA2AlLWVkZtmzZgsuXL6O+vh4+Pj44d+4cfvvtNxQXF0MgEEAsFmPXrl3Izs6GlZUV+vfvj8uXL0Or1UIsFiMuLg6jR4/uFAoDAJcvX9ZLO3z4cFy4cAEymQxyuRw3b96EWCw2eJZ+nz59IJFIEBcXBzs7OxQUFGD06NG4cOECbGxsEBYWhj59+uilEQqFBu0daA2BafvBr7lz58LW1haFhYXw9/dn/YSDgwMee+wxREREQCqVIj09HcnJyVi/fj3naqqFhQWCgoKQmpqKgoICSKVSpKWl4datW7hx4wacnZ1RW1ur1w6Ojo7YuXMnZ7vb2tqipaUFO3fuxK1btyCXyxEQEIDw8HCD/qOlpcWkHpVKpcE+rFQq2bQtLS0YNWoUgoODoVarceLECSgUCpw4cQL29vZYsWIFGIYx6etmz55tVHeWlpbYv38/MjMzoVar8fjjj3eyZVtbWwQGBuLAgQNoampCQUEB7t27h1mzZiE1NRW7du1i00+cOBEqlYqzvMGDB+PKlSudbNbR0RHx8fGdrg8dOhQ//fQToqKiUFFRAQ8PDxQWFmLPnj3Iy8uDTqeDo6MjduzYgaSkJDQ0NGD48OHsUc2//vordDodkpOT4eDggGHDhpnlczuOK76+vp3kCAoKMmqjly5dwq5du5CVlQW1Wg03Nzfs3bsXCQkJEIlEGDJkiMEfVs3MzMSVK1fQr18/NDU14fDhw+DxePjkk08gFAoRGxuLPXv2oKCgAI2NjfD29oaTk5NZfebmzZvsyYDl5eWIjIzEjBkzMHfuXGg0GraOxcXFcHBwwMSJEznHlZSUFIP1i4+PR11dHby9vREXF4fIyEiUlJTAxsYGY8aMwZEjR4z6zaysLOzYsQNXrlyBSCRCc3Mzhg8frhcSCLR+Wegob0hIiMk26KjbkSNHGvQnlJ7FgjyqQDbK3xqpVIqbN29Cp9Nh/PjxnWLK1Wo1rKys0NDQAIVCgQEDBnT6HFtXVwc+n2/Wp3qpVAqpVGr0V3y5kMlkaGlpMXlef3u5bG1twefzcf/+fTZ8pk3+uro62NvbQyAQsKso7Tf3iUQiCASCTqEAptK18SBtp9VqUVVVBXd3d6ObEpVKJbRarZ7e2j4Dc70oMwyDuro6ODs7w8rKCkqlEkqlUq+eUqkUNjY2bNkMw0CtVnfKs6mpSVOlw3QAACAASURBVK9NFQqF3ooWj8eDjY0NdDod7t69i+LiYvj6+uKxxx4z+uN+DMNALBbrfYrX6XRoaGiAi4sLLC0t0dTUBBsbm06DlVgshlAofOg/UNgV2vaBqNVqiEQieHp6Gp3IdESlUsHKygr379+HnZ2dyR971Gq1qK+vR//+/WFpaQmdTsf+gCQXXPbecQ+LVquFhYWFQf3V1dXB2tq6S3sbWlpaoFar4eTkhKqqKnblsad/Mbmr/gMwrw93RKfTQSQSwcXFpVt7T7qjO0OY65e5ymvbm9HRZh/UlrnQ6XSoqamBi4tLp75ryud2ZVzpjo1ycejQIURGRuLMmTOsL+xqW5iSp6N+zM3T3DH5r4CxNniQdwZK96ATCAqFQqFQKJSHRGRkJA4dOoQzZ878pRYsKJQHge6BoFAoFAqFQnkIXLt2DSdPnkReXh6OHDny0A8EoFAeFfQLBIVCoVAoFMpDoLGxkd0fpNPp4OXlZTQsk0L5u0AnEBQKhUKhUCgUCsVsaAgThUKhUCgUCoVCMRs6gaBQKBQKhUKhUChmQycQFAqFQqFQKBQKxWzoBIJCoVAoFAqFQqGYDZ1AUCgUCoVCoVAoFLOhEwgKhUKhUCgUCoViNnQCQaFQKBQKhUKhUMyGTiAoXUalUqG6uhoqlaq3RaFwIJVKUVNT09tiUB4iXdFxY2MjEhIScOnSJcjlcpSVlSExMREymewhS9l7SKVSiEQiMAzT26L8baC+vWfQ6XSoq6uDWCzubVG6jE6nQ21tLaRSaW+LQvmLQycQlC5RWlqKt956C+PGjUNOTk5vi2MSsViM//73v0hPTzf5bEREBH788cdHINXDJTY2FtOmTcNrr70GrVbb2+J0iYyMDHz66adobGzsbVHMpjdkNqRjQ7au0+mwa9cubN68GT4+PtBqtXjyySexdetW3Lx5E2+//TZ0Ot0jk/tRER8fj2effRaLFi2CUqnsFRmamprwzTff4IknnsCAAQOwatUqbNiwAatWrcIbb7yBr7/++i/1kvZ38+1c9LYfV6lU2LlzJ5544gns37//LyOXOcjlcnz33Xd4/PHHcejQoQfOz1zf2JVxmvLXgU4gKJwolUrs27dP7wXDx8cH69evh6enZy9KZj4KhQLZ2dl6K7WG6sUwDIqLi3Hv3r2//QvVs88+ixUrVsDS8q/dvX///Xfcu3dP71ptbS2ys7P/VivjvSGzIR0bsvW0tDScPn0aH374IXx9feHl5QWJRII5c+bg+eefx5tvvgkej/fI5G7DkO57kqeffhorV66ElZXVQyvDFI6OjlizZg2ee+45DBo0COvXr8eGDRuwbds27Nu3D4QQvPzyy6ioqOgV+Trq4O/m2w3RVT+elZWFc+fO9agMNjY2WLVqFaZOndptuXoLoVCINWvWYNKkST2Sn7m+0ZDvovz16T3vSvnLU11djTt37nS6zuPxenVg7goeHh44efKk3jVD9bK0tMQXX3zxKEV7qFhbW/+lJxAMwyAxMRHDhw/Xuz5jxgzMmDGjl6TqHr0lc0cdG7L14uJi2Nvbw8nJCQBga2sLZ2dnDBkyBD4+PvDx8XmUIgPg1n1PY2Vl9ZfoAzweDxYWFrCwsGCvWVlZYc2aNVi6dCm2bNmCLVu2PNKJHJcO/k6+3RBd9eOZmZkP7QuVtbU1+/ffbXzh8/k9ko+5vtGQ76L89el970r5S8IwDI4fP47KykqTz8nlcs77IpHI6P02lEolGIaBTqczGn/LMAyqqqqgVqs73WtpaUFJSQnUarXefa1Wy+ZprF6m6sKFRCJBQ0OD2fIYQqVSoayszGDd28vVvi5c+Zgb821uvmq1GlVVVQbzba+vlpYWZGZm6uVfVVWFpqamTumuX7+O5ORkg+Uplcour9JxyajValFWVgaxWGwyT6VSqRfypdPpOr1c1NTUoKqqCjqdTi+/jjK317mx+qhUKvZeRUWFWavyxnTcUY+EkE7PdHyZbcNQX21vI4Zs2FQaQ3ZlTPdcNDQ0oLa2lvOeRCIxOy+5XA6RSNTpurG6dqUvmwuPx8OcOXNw8uRJ3L171ywZ23hQezdHB93x7abshctXGsKY3zGGIbkN6a+xsRGHDx8229cY82fdlatt3OvuOGruuGGOzTIMY3LvizH9yeVyVFRUdAqb7ej/uGyUawzi6t9dGRcpDwfehg0bNvS2EJS/HlFRUYiJiUFhYSEaGhpw584djBgxAgKBAFKpFMePH8egQYOQnZ2NmzdvYtu2bXjiiSfg6OgIALhy5Qq2bdsGoVCI48ePIzU1FcHBwZ1Wt/Lz8/Gf//wH77//PlQqFQoLC3H9+nXs378fvr6+6N+/P4DWl7k2mQQCAc6fP4+0tDQEBARAp9Phhx9+QE1NDSwtLXHs2DH88ccfeOqpp7B7924sWLAAABAaGspZr7S0NLzyyis4evQoXnrpJYjFYmzcuBH//ve/kZSUhHHjxsHGxgYffPAB3n//fWg0Gvj4+OCzzz6DRCJBXl4e9u7dizFjxkAgEBiUZ/r06Qbb+vLly0hISAAA7N69G9XV1XjiiScAAOfPn8eSJUvw+++/w97eHjk5OYiKisLFixcRGhrKtmd8fDx++OEHNDQ0ICMjA7dv30ZdXR0WLVpkcBXWnHzVajX27duHixcvgmEYbN++HTweD0OHDoVWq8X333+PuLg45ObmYu/evWhqakJZWRnGjRuH8vJy/Prrr9BqtUhISMDx48cxYcIECAQCZGVlYf/+/bhx4wYUCgXS0tLg4uICrVaLVatW4Z133sGUKVNgbW1tUgeBgYGcMp4/fx4XLlyAg4MDbty4ga+++oq1hfaUlpZi1apVWLFiBcLCwtC/f3/s2rULL730Epqbm/HUU0+htLQUe/bsgVAoRE1NDTZt2gQejwd7e3s9mZ2cnLBt2zYsWrQIOp2OnRR8++23sLCwYFd8RSIR1q9fj/Lycpw9exZRUVGws7PDhQsXMGXKFIN2YkzHDMN0svUzZ84gKioKOTk5qK+vx+XLl5GcnIyrV6+iubkZJSUlGDlyJK5fv26wr/7xxx9YvHgxEhMToVAosHnzZmRnZ2PKlCmc/Ts+Pt6oXeXm5hrUfVs/70hubi42bNgAtVqNiooKxMfHw9/fH3369EFWVha2bt3K+pHffvsNfn5+cHBwYK+lp6dj0aJFsLa2hlgsxnfffYeSkhIoFApERkZCq9XC19cXMTExBus6ceLELvVlQyQmJuLevXt45ZVXIBAI9O5ZWFggMjIS48ePx8iRI43KCMBonzTX3rn6X//+/R/ItxuzF5FIZNBXtumqPVx1tLW1xddff43//Oc/+P333xEeHo7CwkI888wzOHXqFOzt7VFbW6vnxxmGMai/4OBg7N27F/Hx8WhubkZeXh7q6+sxcuRIgzo05s+4OH36NJydnREaGoqkpCQ9uSorK/Huu+/igw8+wIABA5CVlYUbN250aRw1Nm5w2bMh36JUKrFnzx7Ex8ejtrYW169fx9WrV/HYY49h3LhxAGBUf1KpFJs2bcLt27fBMAzOnTvHfsFo7xs9PT0N2ui8efM6+a42O+Xq3+aOi5SHDKFQODhw4ACZOXMm0Wq1etfv379PRo0aRXbs2EEIIUSn05Hnn3+ebNu2jRBCSGlpKQkLCyO3b98mhBCiUCjI/Pnzyblz5wyWk5+fT/z9/UlsbCx77cSJEyQsLIyIRCJCCCGxsbFkwYIFRCaTsc+sXr2a7Nixg2RkZJDly5ezclZVVZHNmzezzy1ZsoR88803JusVHR1Npk6dypah0WjIK6+8QtavX88+k5KSQk6dOkU0Gg1Zvnw5W2dCCNm+fTtZuXKlSXnao1AoyNy5c8mRI0cIIYQUFRWRiRMnkuzsbPaZo0ePkmHDhpHMzEz2mcDAQLZ9r127RqZOnUoqKyvZNF9++SV55plniEajMViuOfkeO3aMzJ07l22P27dvk+nTpxNCCLl8+TIJDw8n9fX1hBBC3n//fbJ27Vq9tli2bBlRqVREq9WShQsXkr1797L3CwsLSWBgIMnJydGTqby8nAQHB5P09HSTOjAmo0wmI0uXLiX5+flsui1btnC2RVlZGRk7diy5du0ae+2NN94gn376KSGEkG3btpGIiAj23vnz51l77SgzIYS8/PLLZPHixUQsFhNCCPnll1/IjBkziEKhIIQQ8sknn5APP/yQEEJIbW0tCQ8PJ4mJiZzymavjjrYeGRlJZs+eTXQ6HSGktW8GBweTvLw89n9jffXHH38k/v7+JDMzk6SlpZHExESTaUzZFZfuO9LQ0ECmT59Ozp49SwghpKmpiUybNo1ER0eT6upqMnPmTDZPQgj5888/ycKFC0lLSwshhJCYmBgybdo0IpPJiFarJR988AHZvn07+3xNTQ2ZPXs2ycjI4KxrV/oyF5999hkJCQlhbaE9RUVFxMvLi3z77bdmydhT9s6lgwf17Yba0JivNIQxv0MIIb/99hsJCwsjlZWVJC8vj2zevJnVOSH6ftyY/nQ6HXnuuefIvn37ONupvbzG/Jkh3njjDb2+2HF8KS0tJSNHjiQ//PADK4+5bW3OuGFIF4b4+uuvyYoVK9g2kkqlZPLkyWTPnj2EEGJSf+vWrSMrV65kfczGjRvJsmXLCCH6vtGUjbb3Xeb0b1N+hvLwoSFMlG7B5/MxevRoAK3xnW5ubuynzYSEBCiVSri5uUEul4NhGPj6+uL69esG87K2toadnR0GDRrEXgsNDYVUKsWlS5fAMAwiIiIQFBQEoVDIPhMSEoKoqCi4uroiMzMTr732Go4fPw5LS0u8/vrrerKaQ8eYaSsrKyxcuBBxcXGoq6sD0LpaPXHiRBQXFyMlJQWjR4+GXC6HXC7HsGHDkJeXZ1Ke9ggEArzzzjsYO3Ys25YMw+iFL/D5fDg6OsLLywsAYGdnBz6fj+bmZgDAgQMH4O/vr7f50dvb22CYSnuM5avT6XDkyBF2RU4ul8PV1ZX95NymaxsbGwCtG0ZLSkrYvGfNmoXFixeDz+dDpVLB0dERRUVFRuUBOsdgG9OBMRltbGxgZ2eHZcuWYffu3cjNzeXUQVu5Hb/UtNUNAIYMGYJNmzbh008/RWJiIoKDg9mNhobixm1sbDBw4EB2hdXZ2RkqlYoNiaqqqmLv9enTBwCMbiA0V8ddjV021VcFAgGEQiE8PT0xbtw4hIWFmUxjyl7N5cqVK6iursbjjz8OALC3t8fatWsRFhaG+Ph4KJVK+Pn5sc+PGjUKRUVFBk9yKSsrQ3x8PEJCQthr/fv3h7u7O44ePcpZ16705e4gk8mg0+ng6elpUsaetHdjPIhvN9SGxnxlR0z5HQB48cUXERoaivfeew+XLl3CP/7xD9ja2rL32/vxntJfd/1ZewyNLwKBAMOGDQPQtbY2Z9wwpIuOiEQiREdHY/r06eweHDs7O70vgsb0V1VVhZiYGDz55JNs3V544QWsXLkSgL5vNGWj7X2XOf27p/wMpfvQ7zyUbmFlZaX3Mm9hYcHGW5eVlUGr1SIlJYV1SqGhoV3aNGlnZ4e+ffuipKQEzc3NqKysRHh4uN4ztra2qK+vh5eXFyIiIrBz506sXbsWjY2N+Oabb7Bs2bIHrmdoaCh4PB6uXLmCcePGgc/nw9XVFfn5+ZDL5cjLy9M7ivHjjz/usjzu7u74/vvvMWDAADY8SKPR6D3Tp08fPQfb1t4qlQolJSWYOHFit+rHla9UKkVtbS08PDzw559/svdXrVoFAAgKCoKVlRXu3bsHPz8/ZGVlYeHChexzLi4uiI6ORnx8PMaNGwelUqn3Qt4VuHQgFos5ZeTxePjiiy+wd+9eHDlyBJ9++ikWLFiAn3/+mbMcYxOuWbNmQafT4eDBg/jpp5/g6emJyMhIjBo1ijNN37599f4nhLB9ZM6cOTh27BhkMhmKioogEAgwfvx4g/k8qI6NYU5f9fT0ZEMqzE3DZVddoby8HEKhkPUzPB6PfQkqLCyEtbW1nk3x+XwwDGPwVKOqqirI5XI9nwW06qigoICzrg/TtwCtp9RYW1vD19fXpIzG+mR37J2LB/XtHduwtraW01d2xJTfAVrtYNWqVZgzZw4kEonBMKg2ekp/PenP2vMgbW3OuNFRFx2pq6uDVCpFv379OJ8xpr+GhgYoFAo4Ozuz17nCwLpio+b2757wM5TuQycQFLPIzc2FTCZDUFCQyWf9/PxgaWmJ8PBw9vSXriKTydDc3IzBgwfD3t4e7u7unTavSaVSODg4IC8vj42bBVrjP3fs2GHWIGGqXk5OTnj22WcRFRUFgUDAOm8PDw/Y2tpixIgReOqpp/TSdEWekpISLF26FBs2bMDs2bPR0NDAxtWWlpZiwIABRuW3sbGBl5dXj6+69O3bF56enujbty/mzZvX6b6FhQWef/55pKSkICUlBStWrGBXTtVqNd5//33Y2dlh69at4PF4SE1NhUajQV1dXafVeplMhvj4eMydO9egLFw6MCajTCZDcnIyPvroI3z00UcoKyvD0qVLjda54wSi/QbqxMREhIaGYv78+VCpVPjoo49w4MABbNmyxWieXDg7O+Ppp59GREQEbGxs8OOPP7IraR15WDoGzOurHb/M9ET/bsOY7gcOHAiZTAaFQtHpBWfw4MG4fPkylEol7OzsALROtAgh8PDw6JSXu7s7bGxsOv3uQlNTk95JVB3r+iC+xRQMw7B7tQIDA1FeXm5Uxp629/ZpjfW/9nTHXoz5yo6Y8jttXL9+HW+//Tb27NmD4ODgTotLbXRFf3FxcZg2bVqn6+b4M2Mv6d3FWFubO26YOoXMyckJtra2RjdwG9NfVVUVBAKBWb+B0xUb7Wr/pvQONISJwkm/fv3AMAw0Gg0kEgnroNpOjWg/029/UsbUqVPZjU1t5Obm4uzZs5xlKRQKvdMdrl69Cjs7Ozz55JPg8XhYsmQJ0tLS2POk244hnDdvHhoaGnDs2DH2M3dYWBgGDx7M5kUI0ZOPq15tz3VcwZg9ezZyc3Nx9epV+Pv7A2g9M3369OlISkpin5PL5Th48KBJedqTk5MDpVKJgIAAAK0/qNPY2AiGYZCSkgK1Wt1Jpo71efnll5Gdna3nxO/duweNRmP0hBFj+VpaWuK1115DZmam3sTtzJkzAFpPNSkrK8OiRYvwz3/+Uy/sorGxEbdu3cLkyZPB4/Gg1WrZ0zny8/NRWVkJoVAIgUAApVIJqVSqN6losy9TOjAmo1arxalTp9jVKm9vb86XDKB1dYvP50OhUABoPW2kqKiIXdFLT09HSkoKgNYX+rlz5+od09hR5o7/d7Sp8vJy8Pl8vPHGG1i+fDnn5KENc3Xc0TYMtWX7a6b6qqH05qQxZq/GdN+e0NBQuLq6Ii0tjb2WmZmJ2NhYTJ06FTweTy8M5tatWxg0aBC78bN9f/bx8cHkyZP1Th6qrq5GRUUFXnrpJc66GuvLNTU1+P77702eXd8mQ0d/+fPPPyM7Oxtr164Fn883KWNP2juXDh7UtxtqQ2O+siOm/A4AXLhwAU1NTViyZAnWrFmDDRs2oKysjL3fXu/G9GdpaQkXFxe0tLQAAOevRpvjzwzR0e47ji8P0tbmjhumTrDy8PDA008/jaSkJPZZiUQCkUjE+j5j+nN3d8esWbPw559/sm0slUoRERGhVyeGYUzaaPv2Mqd/m/IzlIcPPYWJwkm/fv0QFxeH+vp61NbWYtKkSaipqcGWLVtw+fJl1NfXw8fHB+fOncNvv/2G4uJiCAQCTJ48GQEBAThw4ACkUimysrJQVlaGZ5991mCMtlgsxtGjR9G3b18oFApcvXoVFy5cwMaNG9kYyKFDh0KtVuPEiRNQKBQ4ceIE7O3tsWLFCtTW1iI+Ph5arRZisRhxcXEYPXo0hg4dip9++glRUVGoqKiAh4cHhg4darBe165dw65du5CdnQ0rKysEBASwsjo5OSEjIwOhoaEYM2YMgNbBJzg4GCkpKcjIyEBdXR2SkpIwZcoUKJVKg/IYCnfp168fbt68iaKiIlhZWaGyshIDBw7E2bNnMWrUKIjFYuzatQtZWVlQq9Vwc3PD3r17kZCQAJFIhCFDhiAkJAQWFhY4c+YMLCwskJaWhoKCAsTHx0Oj0eDJJ5/sVO6lS5dM5tsWLnL8+HGoVCokJibCyckJfn5+6NOnD/bv349169Zh69at+OKLL3D27Fm4u7tjzJgxkEgkiIuLg52dHQoKCjB69GhcuHABNjY2CAsLg7OzMyoqKpCamoq6ujoEBARAqVSyttXS0oJRo0axK2+GdACAPZ2mo4wDBw7EmTNnIJPJIJfLcfPmTYjFYkyePNmgrdva2qKlpQU3btwAn89HRkYGamtrkZCQAHt7eyiVSuTm5rIvD8nJyVi4cGEnmYcOHYro6GgcP34cJSUlsLe3h0Qiwe7du5GRkQGFQoGpU6eCYRi8++672LFjB7766its3boVd+7cQWBgoMFwAh8fH6M6njBhQidbLywsxJ49e5CXlwedTgcHBwfs2LEDycnJqK+vh7W1NcaOHcvZV+Pj47Fnzx4UFBSgsbER3t7ecHJygr29PWealJQUk3Y1dOjQTro3dAqTra0tAgMDceDAATQ1NaGgoAD37t3DrFmz4OrqisceewwRERGQSqVIT09HcnIy1q9fDw8PD1y+fBm7du1CZmYm1Go1AgMDMWXKFFy8eBFZWVmorq7G4cOHsWjRIkydOhWxsbEG61pZWcnZl+/evYuNGzciJCQEAwcO7CR/U1MTdu7ciWPHjqGiogJNTU1ITU3FmTNnEB0dDYZh8N133+mtFk+YMIFTxp60d6FQ2EkHCoXigXw7l70Y85XtQ19M9WkLCwusXr0amzdvxsiRIzFp0iRUVlbixx9/ZF9gW1pa9Px4//79cfnyZU5fbGdnh2PHjrEhSYbCb/r06WPSn7XtYwJaV8rb+mJxcTEcHBxQX1+vJ5ejoyN27tzZ7bbu37+/0XGjurraoC46YmFhgaCgIKSmprJhcmlpabh16xZu3LgBZ2dnjBo1ilN/Li4uCAoKws2bN9nT3ZKSkhAWFqZnTy0tLfD390dycnInGw0ODu7ku4KCgoz2b3PGL1OLMpQHx4LQgDGKEbRaLaqqquDu7t6tH5epq6sDn883GqdaUlKCBQsW4Ndff8WgQYOg0Wg4P4/rdDqIRCK4uLiw8ZE6nQ6EEKjVaohEInh6eho9Xq879VKr1eDxeAZ/7Ekmk6GlpQVubm7dlqelpQVqtZqtt1ar7fJRdFqtFvX19exxjGKxGK6urp1iqruKTqdDTU0NXFxc2DjUjRs3IigoCLNnzwbQuhqUkJCATZs2sUcLtpfH0tISOp0OFhYWep/VRSIRBAKB0RjcNozpoKOMQOtAbmVlhfv378POzs7gy0pH2s45HzBgAMRiMbRaLVxcXAC0xiu3xfwOGDCg2z9S1naE69q1a9kXT7lcju+++w4ikQh79uzhTPuwdAyY11d7Ik0bXdG9sXLq6upgbW1tVj7A/4ZHurm5mdRhd/pyT2BKxp6y967ooCPd0X1HX2kMQ3XsKuboTyqVQiqVmvwFbnP82cOCq617YtwwlFdVVRX7haZ9fsb0J5fLIZVKjeq2Ozba1f5NeXTQCQSl1ykuLmYnEMY2pVL+GkgkErz99tv47LPP2NXCtutvvfUWDh8+3IvS/fW5evUqfvnlF+zZs0dvMpSSkoJff/2VjdemUCgUCuWvCg1hovQqlZWViIiIwJ9//gkejwd/f3+60vAXRyAQwMHBAYcPH4aVlRV4PB4SExNx6NAhhIaGsnG5FMO4ubmhtLQUly5dQr9+/dDU1ISYmBgkJCTgX//6F9zd3XtbRAqFQqFQjEK/QFB6FblcjoaGBvB4PKjVari6uuqd6U3566LVapGTk4OKigq4uLhg9OjRPRJK8/8LEokEWVlZkEql8PHxwbBhwx5JOASFQqFQKA8KnUBQKBQKhUKhUCgUs6HLXRQKhUKhUCgUCsVs6ASCQqFQKBQKhUKhmA2dQFAoFAqFQqFQKBSzoRMICoVCoVAoFAqFYjZ0AkGhUCgUCoVCoVDMhk4gKBQKhUKhUCgUitnQCQSFQqFQKBQKhUIxGzqBoHAikUhQU1PT22IYRCqV9rpsOp0OGRkZSE9Ph06neyRlSqVSiESiR1JW+zJ7u627Q2NjI+Lj45GYmIjGxsbeFodCoVAolP8z0AkExSAxMTGYOnUqli9fDoZhelscPWJjYzFt2jS89tpr0Gq1vSKDWq3GJ598ghs3buC7777DgQMHHnqZFy5cwMyZM7F48WIolUqDz4jFYvz3v/9Fenp6j5T5V2jr7pCUlIRjx45h8ODB6Nu3L5YvX44TJ070tlgUCoVCofyfgE4gKAaZN28e3nrrrd4WwyDPPvssVqxYAUvL3jPf27dvIyMjAwsWLMCqVaswderUHs1fqVRi3759el82ZsyYgXfffRdWVlac6RQKBbKzs3vsi0F32tqQ7A+T33//Hffu3WP/V6lU2LNnD/r16wdfX1888cQTCAkJwY4dOx6JPBQKhUKh/F+H+02E8v89fD6/t0XgxNraulcnECKRCNbW1hAKhZgwYUKP519dXY07d+50um5lZQULCwvOdB4eHjh58mSPytLVtuaS/WHAMAwSExMxfPhw9pqNjQ1eeOEF+Pj4sNckEgkcHBweiUwUCoVCofxfh36BoJiFVqs1uqIsl8sNxuYzDAO5XK53TaVS6YXDtH9Gq9VCpVJxlqNSqThDqhiGQVVVFZqamjhlZBgGWq32gcOyCCFGZVCr1Z3uqdVqqNVqVgYuGIbB8ePHUVlZaVQGrrYydL2mpgZVVVXQ6XRmfxnoTlubkt2UjozJKZFI0NDQoHft+vXrSE5O7pTPiy++iHHjxgEA7ty5g5SUFKxbt85wRSkUDNKvdwAAIABJREFUCoVCoXQJ3oYNGzb0thCUvyaZmZlITk4Gj8dDcXExLl26hLi4OAQEBEAgEABojbn/7rvvUFJSAoVCgcjISGi1Wvj6+uLChQtYunQpjhw5ghdffBF1dXV477338Oabb2LixInw9vbG+fPnsWTJEvz++++wt7dHTk4OoqKicPHiRYSGhrLhOvHx8fjhhx/Q0NCAjIwM3L59G3V1dVi0aBEsLS1RXl6OX3/9FVqtFgkJCTh+/DgmTJgAgUCA/Px8vP322/j8889hZ2eHmJgYbN++HX5+fnj33XexZs0aNDc3Y/z48SguLsaCBQtw5MgRjBgxAp6enp3aJSEhAUeOHEFmZiYaGxshk8ng5+eHqKgoxMTEQCAQ4Pz580hLS0NAQACUSiU2bdqEV199FRYWFrh16xbWrFmD119/3WC7t+VTWFiIhoYG3LlzByNGjIBAIEBeXh6uX78Oe3t75OXl6bUVAOzevRsLFiwAAISGhqK0tBR79uyBUChETU0NNm3aBB6Ph2HDhnHq/UHa2pjsxtIZk1MkEuGzzz6DRCJBXl4e9u7dizFjxqC8vBz79+/HjRs3oFAokJaWBhcXF/Tv35+ti1KpxObNm/Hhhx8iKCioex2BQqFQKBSKPoRC4SAyMpIEBgaSmpoa9tratWvJhx9+SHQ6HdFqteSDDz4g27dvZ+/X1NSQ2bNnk4yMDEIIIVFRUWTKlClEKpUSQgiprKwkY8eOJYmJiWyao0ePkmHDhpHMzExCCCFFRUUkMDCQ3L59mxBCyLVr18jUqVNJZWUlm+bLL78kzzzzDNFoNIQQQrZv306WLVtGVCoV0Wq1ZOHChWTv3r3s83l5eWTo0KFk3759RCQSkePHjxNCCCkuLiZjx44lV65cYZ/9+eefSWlpqdG2iYmJIdOmTSMymYwQQkhsbCxZsGAB+z8hhKxevZrs2LGDEEKIVqslc+fOJQsXLiRisZicPHnSaP4HDhwgM2fOJFqtVu/6yZMniZ+fH9u+HduKEEKWLFlCvvnmG0IIIdu2bSMRERHsvfPnz5PY2FjOcnuirblkN5aOS06NRkOWL19Otm3bppfPypUrCSGEFBYWksDAQJKTk2OwPhkZGeTIkSOc9aVQKBQKhdJ1aAgTxSguLi5wcnJi/w8JCcEff/yBiooKlJWVIT4+HiEhIez9/v37w93dHUePHgUA8Hg8vZh9Ho/XaW8Fn8+Ho6MjvLy8AAB2dnbg8/lobm4GABw4cAD+/v56XwO8vb318p01axYWL14MPp8PlUoFR0dHFBUV6ZVhYWGBIUOGwM3NjV2lHzx4MEJCQhAdHQ2gNYTGwcEB3t7eZrcRwzCIiIhAUFAQhEKhXltFRUWx9ba0tISXlxccHBwwf/58s/PviIuLCwYNGgSgc1u11bWNIUOGYNOmTfj000+RmJiI4OBgTJo0iTPvnmhrLoyl45KzuLgYKSkpGD16NORyOeRyOYYNG4a8vDyz2qqlpQWPP/64Wc9SKBQKhUIxD7qJmtIlXF1dodFoUFNTA7VaDblcrvfSDAB9+/ZFQUEBABjd8NuePn366L34WlhYgBAClUqFkpISTJw40Wh6FxcXREdHIz4+HuPGjYNSqYSNjY3eM/b29hg4cGCntC+99BLee+89lJaWorS0VG9Drjk0NzejsrIS4eHhetdtbW1RX1+vd23EiBFdytsQXG1liFmzZkGn0+HgwYP46aef4OnpicjISIwaNarTsz3Z1l1NxyWnWCyGXC5HXl4epFIpm9fHH39ssjyg9bc6+vTpY9azFAqFQqFQzINOIChdQi6Xo0+fPnBzc4NGo4GNjY3eix0ANDU16Z2A0x6dTmdwgzEXNjY28PLy0lth74harcb7778POzs7bN26FTweD6mpqdBoNKirq9M79tTQhCYwMBCurq44e/Ys+vXrp/dFxRzs7e3h7u7eaWOwVCrtdPJPd06Oys3NhUwm61YMf2JiIkJDQzF//nyoVCp89NFHOHDgALZs2dLp2Z5u6/ayjxkzxmi6q1evGpTzrbfegq2tLUaMGIGnnnrKaF1lMhni4+Mxd+5c9tqkSZPMnsRSKBQKhUIxDxrCRDFKY2Oj3gTh4sWLmD59OgYMGAAfHx9MnjxZ7xSc6upqVFRU4KWXXgLQ+nLNMAw0Gg0AoKysDHV1dXon7DAMo7eCTgjRO/3n5ZdfRnZ2tt6vCd+7dw8ajQY6nQ6NjY24desWJk+eDB6PB61Wi4qKCmi1WuTn56OyspLN39BKvVAoxIsvvojdu3fD3d3drONr22QkhIDH42HJkiVIS0uDTCZj65SYmIh58+Zx1ssY/fr1Y9tNIpGwm9ZNtVXHa+np6UhJSQHQOkGYO3curK2tOcvtibY2JLupdJcvXzYop4+PD6ZPn46kpCRWHrlcjoMHDwJo1Z1AIIBSqYRUKtWbwKjVahw+fBg3b940q80pFAqFQqGYBz2FicJJfn4+Bg4ciIqKCtTU1CAmJgYqlQpr1qyBUCiEpaUlJkyYgIsXLyIrKwvV1dU4fPgwFi1axP6wmrOzM27evInm5mY0NDTg3r17uHXrFvLy8jB06FAUFBRg165dyMrKglqthpubG/bu3YuEhASIRCIMGTIEISEhsLCwwJkzZ2BhYYG0tDQUFBQgPj4eGo0GzzzzDCQSCeLi4mBnZ4eCggKMHj0aFy5cgI2NDQYMGICdO3ciNTUV9fX1sLGxgZ+fn15dnZ2dkZOTg6VLl8LW1tZou1y+fBm7du3C3bt30djYCE9PT4SEhECtVuPEiRNQKBQ4ceIE7O3tsWLFCmg0Gnz//fc4deoUKioqoFAoMHLkSKMTlX79+iEuLg719fWora3FpEmTkJqaarStBgwYgNjYWERFRaGiogIeHh5oaGhAbm4u+8KenJyMhQsX6p1U1B4fH58HauuwsDB4eHh0kt3R0dFoOh6Ph6Kiok5yuru7Izg4GCkpKcjIyEBdXR2SkpIwZcoUODs7QygUoqKiAqmpqairq0NAQABbt+bmZqxbtw7W1tZd/qpEoVAoFAqFGwvCFTxNobSjbXNx20p4R2QyGZqbm+Hm5tYpTIdhGNTW1kKn08Hd3R2VlZWwtbX9f+ydeViVRf//35wDhyOLAoKAqKAiLokoig+LuERoLuWeS/VoZvujj5ZZlomZ1VOmaW64W4oZoqF+FRVUQHEBFQWRRXYCDjuHs6/z+4Pr3D8OHM65QUireV0X18WZmXvmM5/Z7pn5zNywt7dvk0mPWq1GdXU1evToAZFIhPr6ejg5OTFnMJr6czgcaDQamJmZsUqjtLQUycnJT3S4GWg00RIIBHB0dGR1LsAYarUaZWVlrHdFDKFUKmFubo6amhrIZDL06tWLlT6eVNetyd7ac2q12qScEokEYrEYzs7OLeQVCATg8/mws7Nrl54oFAqFQqGwh04gKP9YsrOzcffuXcybNw+JiYlwc3ODl5fX0xaLQqFQKBQK5ZmGHqKm/GOprKzE1atX4eDgAJVKRScPFAqFQqFQKCygOxCUfzTFxcWoq6uDt7d3u25IolAoFAqFQvmnQScQFAqFQqFQKBQKhTV0yZVCoVAoFAqFQqGwhk4gKBQKhUKhUCgUCmvoBIJCoVAoFAqFQqGwhk4gKBQKhUKhUCgUCmvoBIJCoVAoFAqFQqGwhk4gKBQKhUKhUCgUCmvoBIJCoVAoFAqFQqGwhk4gKBQKhUKhUCgUCmvoBIJCMUFZWRkuXryIjIwMaDQaCIVCKBSKpy3Wn4ZGo0FlZSVEIhGr8AqFAuXl5f8oHT1N5HI5ysvLoVarn7YoDCKRCAKBAFqt9k9PW6PRoKqqCvX19X962n83ioqKEBcXh7KysqctylPlr9Sn6dreXxGRSISKigpWYf9KZfJ3hU4gKAa5du0ahgwZggULFuCTTz7BBx98gH79+mHWrFlYu3Yt3n33Xfj4+CAsLKzdaRw8eBD79u3rQKk7Fo1Gg127dmHfvn2wsbHBrVu3sGHDBqxcuRJZWVlISUnBokWL4OTkhH/96184fvy43vN3795FYGAgBg0ahM2bN0MoFOKHH35AQEAA+vfvj9TUVIPp5uXlYdiwYfDx8cG3336L2traPyO7BpFKpfjhhx8wfPhwHD161GT4oqIifPDBB/Dz80N6evqfIOGfQ1paGpYtWwYXFxcEBQUhLCwM69atw4oVK/Dpp5+ioKBAL/xvv/2GuXPnws7ODq+99hrWr1+PsLAwLF++HEuWLMGDBw+YsMeOHcPs2bPh6OiIhQsX6oX19/eHv79/q3JlZ2fjzTffhL+/P/Lz8zst/20hLi4OU6ZMwfz58yGXyzss3tTUVHz++edG24NcLsfWrVsxYsQIHDx4sMPS/rNQq9U4evQobt261cJPJBIhIiIC+/fvx759+1BeXt4m/7YQHx+PpUuX4v79+/Dw8EBiYiKWLl3aap/1d6awsBDvv/8+/Pz88OjRo6ctjlEuXryIyZMnY+HChR3a9v4Mzp8/j9DQUCxevNjkYsjfdZz5y0EoFANER0eTXbt2Mb8LCwtJnz59yC+//MK43b9/n3zyySftil+j0ZDPP/+cfPLJJ0StVj+xvJ1BQkICWbp0KVEoFIxbVlYWCQwMJPfv3yeEECIWi8mECRPI66+/bjAfv/32Gzlz5oyeW0REBAkICCBhYWEG07106RLx9/cnn376acdl5gmZO3euXn0wRnFxMRk9ejRJSUnpZKn+XOrr64m/vz9Zt24d46bRaMj27dvJ0KFDycOHD/XC37hxg/Tv35/cvn1bz33nzp1k4MCBJCMjg3FLSEggvXr1IgkJCXphGxoayDvvvGNUruzsbOLr60uys7Pbm7UOJyoqioSEhBCJRNJhcV64cIFMnz6dFBcXM25paWnk/PnzLcIuXLiQbN68ucPS7mzEYjHZtWsXWbduHenfvz85fvy4nr9EIiFvvvkmOXnyJCGEkMzMTPLaa6+R8vJyVv5tISIigoSEhBCBQKDnnpubS8aMGUNiY2Pbk8W/NEVFRcTPz4/cu3fvaYvCIJPJSHh4eItxJzIykoSGhhKZTPaUJGs/ERERZMqUKUSlUpkM+3cdZ/5K0B0IikEsLCwQHBys52ZmZgYzMzPm9+DBg9GrV692mSlwOBxs3LgR//vf/8Dlcp9Y3s7gzp07MDc3B4/HY9wGDhyIGTNmML+tra0xd+5cXL9+HXl5eXrPK5VKlJaWYsyYMXrufD4fs2fPxoULF1BVVaXnJxQKIZPJ4ODg8EzppakOTMHlcmFubt6J0jwdOBwOOByOXhvgcDh48cUXUV9f32IHqnlYHZMmTYJEIsHVq1dNhrW1tcXs2bONymVhYWHw2aeJubk5OJyOHV4mTZqE6Oho9O7dm3FLS0szaF7Tlvr6LGBtbY333nsPK1asgIuLSwv/W7duoby8HBMnTgQADBo0CNbW1jh37hwrf7YUFhZi8+bNePfdd+Hs7Kzn179/f8ybNw8bN25ETU1Ne7L5l+VZ7NPKy8v1djJ1mJubP3P9AVssLCxY9xvPYpn806ATCIpBHB0dWwwgzeHxeBg0aBCUSiWkUimAxpfmpmi1WpSVlaGurq7F81qtlnlOh1wuh1arNehnirq6OpSVlRmc0CiVSkY2uVwOjUZjMr7u3bvjypUryMjI0HN/6aWX9HQTEhICoNF0oyk5OTlwcnKCvb19i7iDg4OhUqlamCoUFhbCw8PDpGw6mupJrVbrbf2a0mFNTQ2EQqHRuE3ZlwqFwid+mWhNjvaUmQ6FQoGioiKD8jfX2ZPa0HK5XJiZmbF+aa2oqIBarUbPnj1bDSOXy5m60b9//yeST0drZSWVSqHVaqFWq/XaDtu2KBAI2txW20vTelBbW4uIiAiT9aKtdQfAM2dXnZiYCHt7e9jY2DBuHh4eTJ9jyp8tJ06cQF1dHfz8/Az6jxkzBtnZ2bh48SLjxrae1NTUoLKy0qBfR9chqVTa6jkAuVyu109qNJo2mfsYy6ex8Y4NbPtTrVaLEydOoLS01Gg4Y/2bUqlsdbw0lF57xxljZaFDoVAYlaMjxhlKx0OnbxSDjB49mlU4sVjM2PS/9NJLiImJweDBgxEWFobi4mJER0fDy8sLWVlZKCkpwbp169CtWzckJiZizZo1sLCwwPnz51FZWYmwsDDEx8dj06ZN0Gg0EIlESExMxFdffYW+ffu2KoNCocCxY8dgbW0Nc3NznDlzBitXroSPjw/kcjm2b9+OH3/8EcuWLYO7uzt4PB7OnDmDWbNm6e0mNOfFF1/E7t27MW7cOMyYMQMhISGYNGkSBg0apBfO09MT48aNw6lTp7Bo0SJYW1sDAO7fv4/hw4cbjNvZ2Rnjxo3DiRMnMGXKFGa3obi4GGPHjmWl+wsXLmD9+vXo2rUr3n//fajVamRmZkIikWDOnDnIzMyEUqlEXFwcvvzyS3h5eQEA0tPTcfToUQQFBUEulyMtLQ1vv/02+vTpA6BxgN27dy8aGhrg6ekJiUTSYpVXIBBg8+bN8PHxgVqtxq1bt/DZZ58xcbChNTl69OjR7jIDGu23s7Oz4enpiS1btsDHxwdLlixpVWf3798HAHzxxRewtLRkLT/QWPd+/vln+Pv746233jIZPisrC9u3b8eHH36IKVOmtBqusrISSUlJ8Pf3R79+/VjLI5fL8d577+HGjRt46aWXsGbNGqhUKoNlJZPJsG7dOqSlpWH16tXIz8/H/fv3sX37dlZt8dq1azh58iQmTpyIlJQU8Hg8fPjhh6x0qNVqcfDgQezcuRNdunTBunXrcO/ePRw5cgSjRo3CN998g9u3b2P9+vXw8/PDypUr8dNPPyEmJganT5+Gp6cn9uzZg5ycHMTExKCsrAxDhw7FnDlzmDR0Ewwej4ezZ8+yqjtxcXH4+OOP8ejRI8ybNw+7du2CjY0NiouLkZeXhwkTJrAui46kuLgYXbt21XNzcHBASUkJK3+2PHjwAM7OzujWrZtBfycnJ3Tt2hXp6ekoLCxkVU8yMzOxY8cOBAUFwcLCAmVlZXj99dfh4ODQpjpUVVWFH374ASdPnsTw4cOxadMmODs747PPPkN8fDzef/99vPLKK9ixYwdcXFzQu3dv7N69G2PGjEFoaCgKCwvx5Zdf4uLFi/j9998xYsQI7Nq1Cz/88AMWL16MjRs3mtRPeno6Hj16BIlE0iKfrY13SqXSoNxr1qxBbGws3n77bcyfP79N/emJEydw9epV5OfnY8OGDbC3t8cbb7zBlJtGo8HZs2eZMgX+f/+mVCqxf/9+1NfXY9iwYTh37hxefvllTJ482WBa7R1n6uvrWy0LHXFxcbh48SKT74yMDBBCGP+OGGconcjTtqGi/DUoLCwk7u7u5MiRIy389u3bR7y8vEhaWhpJTk5m7Li3bt1KlixZQhQKBVGr1WTevHkkPDycea65nXRhYSEZMmQI2b17NyGk0b581qxZ5McffzQq271798iECRPI48ePCSGE/Pzzz2TWrFlELpczYRYsWEAWLlxI6uvrmTCTJk0yaSdaXFxM3n//fdKrVy9iZmZGHBwcyL59+4hGo9ELd+bMGdKzZ0+SlJRECGm0l9+yZYve+QkdJ0+eJIWFhSQhIYEMHTqUZGZmEkII+eOPP0hCQgJRq9VkypQp5PPPPzcqGyGEHD9+nAwcOJCkpaURQhrtlL28vMj27duZMK+++ir57rvvCCGElJeXk8mTJzNnOAhpPHMxb948IhaLCSGEfPvtt2T58uWMba1IJCLBwcHMGQiVSkWWLl2qVy5bt24lK1asIIQQUlpaSgIDA43aprKRoz1lJpPJyPTp08mxY8cYfQQEBOidNzCkM19fXz1ZDNHQ0EACAwPJnDlzyJEjR8jmzZvJhAkTDNYHQgi5desW6dOnD/n222/Jr7/+Sg4cOEBWrVpFPvvssxa6uXbtGnF2diZvvvkm+fTTT4mfnx958803jcqjIz8/n4wcOZJkZ2eThoYGsnnzZvLgwQNCiOmyysrKIgMGDCB79uwhAoGAnDhxghBiui0WFhaSsWPHMjqTyWRk5syZJCYmhhDSeIYqNDTU5BmITZs2kfnz5xO1Wk00Gg157bXXyL59+5g0N2/ezJR/c5tnjUZDXn75ZbJnz54W8S5evJjMnDmzTXUnNTWVvPPOOyQjI4Pk5eWR1atXk4iICEJIY5stKCgw+Fx8fDx58803yRtvvGH077PPPjOqC0IIqa2tJUFBQS3OQMycOZP85z//0XPbv38/8fX1ZeXPBrFYTMaPH09CQ0P1+s6mVFdXE19fXzJnzhxCiOl6UlNTQyZOnEjOnTtHCCGkrq6OhIaGkqioKJN1yBBqtZq8/vrreueQrl+/Ts6fP0/UajVZtWoV2bp1K+NXUVFBpk2bRlJTUwkhjWcZRo4cSW7dusWEefPNN032tX/88QcZOnQo2bZtm8F8EmJ8vFOpVOTVV1/VO/eWlJRETp8+bbKNtsahQ4fI5MmTW5yBOHXqFPH09GTy3Lx/++2338j06dOZtnn//n0yceJEo2m1dZxhUxa3bt0iISEhpLS0lAnz9ddfkxdffJGoVKoOGWconQs1YaI8MXw+H1ZWVujZsyf8/PyYFfSpU6di4cKF4PF4UCgUsLe3R25uLvNccztpc3Nz8Pl8DBw4EECjXbizs7PJrcsBAwbgv//9L5ydnaHRaGBtbY3KykqIxWImjKWlJXr37s2s0HTv3h0KhcLk1nXv3r2xc+dOFBYWIjU1FTNmzMDatWtbmB4FBgbC1dUVp0+fBtBomz1w4ECjZi2+vr5wcnJCbGwsgEbzJXd3d6PyNIfH48He3h5ubm4AABsbG9ja2uqZvbi4uKC6uhpA44qPXC6Hp6cn4z906FDk5uYiJSUFAoEAUVFRmDhxIrMrYmNjgx49ejDh8/PzkZSUBG9vb0ilUkilUgwcOBBZWVms5TYlB9C+MuPz+Vi2bBlGjhwJoLEOabVavS10Qzrj8XhoaGhgJftzzz2H1157DR9++CH27t2L8PDwFucfmqb1/PPPY/78+ViyZAk2bdqEWbNm4d///jciIyNbhP33v/+Nb7/9FsePH8eAAQNYyaOjuroaX3/9NWbOnIlhw4YBMF1WPB4PZmZm6NevH5ydnZkVfFNt8fLly5DL5XB2dmZMoPr374/bt2+3SebQ0FDk5uaioKAAEokEFhYWiImJgUKhQHFxMQYMGMCUf1ttnr28vNpUd3S3PA0ZMgT9+vXDunXrIJVKUVZWBrFY3Kpp4bhx47B//34cPHjQ6N/XX3/NXjHNMDc3h0wm03OTSCSMrbspfzZYW1vDxcUFNTU1rbYFkUiEmpoaRhem6sm1a9dQXl7O7MTa2tpi7dq1GDt2bLvqEJfLxYIFCxAbG8tc91lSUgJ/f3/mytnAwEAmfI8ePeDi4sK0Ty6X28LGnu2uI4/Hg7e3t8F8AsbHO3Nzc8ybNw+xsbHMubfCwkIEBAR0SH/aHEdHR2aVvmn/ptFocOzYMQwZMgRAo3mRk5OTSRO/to4zbMri0KFD8PLy0jPldHd3Z+psZ+iF0rFQEyZKh9CzZ88Wtv6Ojo6IiopCXFwc/Pz8IJfLTXbW5ubmsLKyYn6bmZnpbWkawsrKCmq1GmvWrMGIESOgUCig0WhadIrNt/gJIUbjTktLQ58+fWBnZwculwsfHx/s3LkT5eXluHHjhl7n2L17d0ybNg2nTp3CihUrkJ6ebtJcwsbGBjNnzsTvv/+OhQsXoqamBgEBASbz25wuXbroTVR4PB5jRgXo6/Dx48ewsLDQKwcejwetVouSkhJ0794dIpEIdnZ2raZXWVkJqVSKrKwsvW9DrFmzhrXMpuTQ0dYyAxoHsp07d6JXr14YMGAA1Go1VCqVXpjmOmNTzwzh6emJsWPHIjw8HC+//LKeDXprjBw5EsOHD8f27dvxyiuvtJqHgIAA1nJIJBKcOXMG169fh4eHB959910A7MrK1tZW72CyDmNtsaioCGq1GklJScxEMygoqIV5nykGDRoEZ2dnxMfHY8SIEZg2bRp27NiBnJwcVFRU6E0w20pb684bb7yh99va2hqOjo6Ii4tr1RTxz6Jr164tzgkJhUKmvpnyZ4u3tzeSkpIgEong5OTUwr+iogISiYR5kQaM15Pi4mJYWVkx/lwul1lgam8dCggIAJ/PR3x8PPz9/WFrawt7e3tkZGRAKpXqyQI06iYnJ0dPvvZgamwyNd4FBQWBy+Xi2rVr8PPzA4/Hg5OTE7Kzs5+4P21Oa/2bSCRCZWUlXF1dcenSJcZ/5cqVbY7T2DhTVlZmtCwUCgUKCgqM9nEdMc5QOhc6gaB0CM1XdZRKJT766CPY2Nhgy5Yt4HK5uHnzJlQqFaqqqjr09oTIyEhs27YNp06dgqurKy5fvgwOhwOFQoGSkhKDL0dsuHHjBjgcjt7LNJ/Px+jRo1u8kALAtGnTsH//fkRFRcHS0tLoIVkdEydOxJ49e3D69GmMGDECHA6nzQc+20Lfvn0RHx8PuVzOvFwoFAoQQuDq6goHBwdYW1sbPRTn6uoKa2trDB48GM8//3ynyNFeCgoKsGjRIqxfvx7Tpk1DTU0N+Hw+gMYVv169erU77tawtbVFSUkJamtrWb+wmZmZGaxDOqysrFifhQEAe3t7vPPOO3j55ZexfPlyBAQEwMfHh3VZtfWlytPTExwOBxMmTICDg0Obnm2KpaUlpk2bhrNnz6JLly4ICQlBfHw8Lly4gB49erS4Cc4YsbGxevbVHQGfz0daWhoWLlzYapiEhAQcOXLE5GFUV1fXdu9CDB8+HBcuXIBGo2FetkUiETOxMeXPljlz5uDIkSO4cuWKwbM3Fy5cQN++fZnbnkzRu3dvSCQSyGSyFosS7a1DdnZ2ePnll/Hbb7/B2tqamXC4uLjA0tKyxQcv6+rq9HaPmtf1jvjAdn3nAAAgAElEQVReApvxzsHBAVOmTEFkZCT4fD4jd0f0p7ozCaNGjTIarmvXrujZsye6du1qcoHrSTBVFpaWlnBzczO669sReqF0LtSEicIKQgi0Wq3Bl1vdDRxNqa2txb179xAcHAwulwu1Wo2SkhKo1WpkZ2ejtLSUiVO3aqGLp+mqDpsbIhITEzFgwADmxbOsrAwymQy1tbXMAbLmMrJZba6oqMDRo0f18qxUKlFeXm5w5cTb25tZWdaZkBiisrKSuRnD09MTI0aMwKlTpxjzJd1KKdvbMZrmpblOdWF0hISEgMvl6m0D37t3D3369IGfnx9cXV3xwgsvIDExkXlOKBRCIBAwL7weHh6YOHEiEhMTmTikUikOHz6sl6Yx+U3JYSgONmX26NEjyOVy+Pj4AGg8yFdbWwutVoukpCQolcpWdWYK3XPN5XBwcEBDQwMEAgEkEgkePnzIxGtI5uTkZCQlJekd+NXF3Z4rkZumExgYyJjZCYVCk2Wle665nKbaYkhICGxtbfU+rJWZmclcG2qoHrbG+PHj8fjxYxQVFcHFxQUvvfQSIiIi0K1btxY7lk3rBIfDgaOjI2Oq2PTL0+2pO4YQi8UYOHCg0QWPjjRh0umteT0YO3Ys5HI5Y/6iW8HVvcib8q+oqMDOnTtNfuXXy8sLq1evxsGDB1FcXKzn9/DhQ0RHR+OLL75grpo1VU+CgoLg5OSE5ORkxi0tLQ3nz583WYeMMWXKFOTl5eHOnTvMRMfDwwPBwcG4fv06E668vBwlJSXMTh+PxwOPx2PMvYRCIXJzc41O5tnkk814BzQuMmVmZuLGjRvMYWM2/akh7OzsoNVqoVKpIBQKmYUSY/0bh8PB4sWLkZaWpndTlO7AtbH8t2WcYVMWCxYsQEZGht6HIfPy8qBSqaDRaNo8zqjVakREROjVNUrnwl2/fv36py0E5dmltLQU4eHhiIqKQmpqKsrLy1FUVASgsZM4f/48du3ahZycHNTW1sLd3R0ODg7o0qULhEIhYmNjYWNjg5ycHHh7e+PixYuwtLQEh8PB3r17kZGRAXNzc9jb2+Onn35CfHw8qqur4eHhgZiYGPz666/Iz88Hn89vdTXN1tYW586dg0qlQm1tLaysrCAQCJCTkwN/f3+cOXMGJ06cQEFBAWxtbSEUCrFjxw6kpqZCJpMx17A25/bt21CpVLhy5QrEYjGEQiEOHz6M3r17Y+7cuS12XczNzaFUKlFSUoJly5a1OP9QX1+Pb775Brt370ZKSgqcnZ3h6ekJKysrdO/eHePGjUNOTg62bduGuLg4lJWVoaGhAYMHD0aXLl1ayHf16lVs374d6enpUCqVcHZ2Rnh4OOLi4lBVVQV3d3fExsbiyJEjKCgogKWlJYKDg+Ht7Y2DBw9CJBIhJSUF169fR1hYGFxdXWFmZoZRo0bh5s2byMnJgUgkQnJyMu7du4c7d+6ge/fuGDp0KEaPHo2kpCSkpqaiqqoKiYmJGD9+PCQSCTZv3owrV66gpqYGXl5eBq8D7tatG5577jmDctjb22Pfvn3tKjM7OzvcvXsXubm5MDc3R2lpKXr37o1z585h6NChqK+vN6izy5cvQyAQoF+/foydb1PS09Oxbds2XLt2DQKBQK9c3NzccPnyZWg0GqjVamg0Gty+fRv79u1DamoqhEIh0tPTcfXqVZw4cQInTpzAO++8g3fffRdcLhfHjx/HL7/8ggcPHqCmpgYlJSXw9PTUMw9ojdzcXGzatAlJSUkQCoUYPHgwHj58iKNHj+LmzZsYMmQIZsyYYbCs6urq8P333+PmzZuorq6GpaUlPD09UVRUhM2bNxtti8HBwfDx8cGhQ4cgEomQnp6OoqIiTJkyBTdu3MD27duRlpYGpVKJ4cOHMy83rZVZamoqJk+ejH79+sHe3h7Xrl3DggUL0L17dwCNpjA6mcRiMYYOHQoHBwfY2Njgt99+Y8xF+vXr1+66Y4gHDx7Az8/viXZZ2KBUKhEREYFjx44hISEBpaWlKCkpYXahnJycIBaLkZSUhGHDhiEyMhK2trZYtGgROByOSf+HDx9iw4YNCAwMNLkjO2LECLi6uuL7779nzjjExMRg9+7d+Oqrr/DCCy8AAKt6EhQUBF9fXxw6dAh1dXXIyclBXl4epk6dCkdHx1brkKkrkXXnC55//nnGzI3D4cDf3x9XrlxBeno6ysvLERERgfnz5zNlbm1tDbFYjDt37oDH4yE1NRWVlZW4fPkybG1tDY4xbPIZEBBgdLwbO3YsunTpAgcHB6SmpiIoKIhZZOJwOK32p7r6bwg7OzvExsaiuroalZWVGDNmDG7evGmyf9PVqRMnTkChUCAhIQEODg6tmgu2Z5wZPnw4AgICjJaFh4cHzMzMcPbsWZiZmSE5ORk5OTmIi4uDSqXChAkT2jTOdOnSBWFhYUhLSzP57RxKx2BG2rs0Q6GwQK1Wo7q6Gj169GDMc8zMzDr8I1NA4zV/tra24PP5zOrIk3yMrby8HA4ODjAzM8OjR49QVFSEYcOGGb1SViwWo6KiosPu7u9MqqqqYGFh0ep5B7FYDKVSCQcHB5SVlTErvk1XYyUSCcRisclvhjyJHO2hqexAYz3szI8OicViJCcnw8rKCn5+fs/URwB1dERZGaKqqgo8Hq/Vqz/ZoFQq9S5VUCgUrA+3ikQiiEQiViaDbZXp+++/x7Jly54obx1JWVkZ7t69i969e2PYsGEt+lFT/m1Bq9UiOzsbjx8/xnPPPYe+ffs+UXzG6klb65BWq8Wvv/6Kl19+Gba2ti38JRIJGhoa4OzsbFBm3XcFevXqhfr6eqjVajg6Oj7xBwjZjHdKpRJcLtdgH9HWNqpWq1FWVgYXF5c2y67RaFBRUdEh+TaGqbJoqjORSIT6+no4OTnpnZ9oi14iIiLw6quvdmgeKIahEwgKhUKhUJpRUFCAo0eP4osvvnjaolDQOEmPjIzE1KlTIZFIkJeX1+FnXih/bSorKxEfH9/q5RSUjoUeoqZQKBQKpRm5ubmMnTrl6aNSqXD16lW4urqioqKCmqlQWhAfH//Ub0z7J0F3ICgUCoVCaUZ5eTn4fH6L66kpTw+xWIyMjAw899xzbb6ilvL3p+ktZJTOh04gKBQKhUKhUCgUCmvoNa4UCoVCoVAoFAqFNXQCQaFQKBQKhUKhUFhDJxAUCoVCoVAoFAqFNXQCQaFQKBQKhUKhUFhDJxAUCoVCoVAoFAqFNXQCQaFQKBQKhUKhUFhDJxAUCoVCoVAoFAqFNXQCQXkiRCIRBAIBtFrtn5amRqNBVVUV6uvrOzUdkUiEioqKTk3jaSEUCp/ZvP2d9U6hUCgUyt8BOoGgtJu4uDhMmTIF8+fPh1wu/1PSlMvl2Lp1K0aMGIGDBw92Wjrnz59HaGgoFi9eDLVa/URxpaam4vPPP0dtba3RcPX19fjiiy+QkpLyROmZIjo6GiEhIVi6dOmfOvFjQ0fqnUKhUCgUSudAJxCUdvPCCy9gxYoVMDc3Z/2MXC7Hnj17oNFoWIX//fffkZeXx/zm8/n46KOPMG7cuDbL2xamTJmC5cuXg8N58iZSWVmJjIwMSCQSo+FkMhkyMjI6ffV9xowZeP/99zs1jfbSkXqnUCgUCoXSObB/86NQDGBubt6ml73y8nI8ePCAVVitVouEhAQMGjSohR+Px2OdZnuxsLDokBfZSZMmYdKkSSbDubq64tSpU0+cHhv+DP21l47SO4VCoVAolM6BjtKUVtFqtZBKpQAAhULBetcAAKRSKQQCQYv4Tpw4gdLSUlZx3L59G9evXzcZTi6XtyqbUChETU0Nq/R0KBQKo6Y9xuKUSqUoKSlpYX7TXEa1Wo2ioiLU19e3cFcoFAbjba5PQL+MWnvWFGq12mjZsklbh0Kh0Mu7UqmEUqkEYLycdM+2pnetVouysjLU1dUZlUOXFoVCoVAolM6Du379+vVPWwjKs0d0dDRee+01HD16FHK5HI8fP8bvv/+OuLg4jBo1Cnw+HwCQnZ2NlJQUzJ8/HxYWFqivr8cPP/yAgoICyGQyHDlyBGq1Gv3790dkZCSio6Px+PFj1NTU4MGDBxg8eDATV1PS09Oxd+9e3LlzBzKZDMnJyXB0dESPHj0AAKdPn4aFhQXKy8uRl5eH77//HmZmZsxuhUAgwJdffgmhUIisrCyEh4dj2LBh6NatW6t5jouLw+7du1FTU4PU1FTcv38fVVVVmD9/PjgcjtE4RSIR/ve//+H+/fvQarWIiYlhVvlXrlyJZcuWYfz48ejZsycuXLiAixcvolu3brhz5w6++eYbzJgxAzt27MCcOXMAAEFBQQBgVJ8XLlzA66+/jt9//x22trZ49OgRIiMjceXKFQQFBRk1LUtLS8P169fB5XKRn5+Pq1evIjY2Fj4+Pkx5GEv74sWLWLRoEY4dO4a5c+eiqqoKH374Id59910EBATA2dkZP/74I+bPnw+NRoOSkhKD5cRG78XFxfjll1+gVqtx+fJlnDhxAv7+/uDz+YiOjsbChQuRkJAAmUyGTZs2ISMjA1ZWVvjvf/+LTz75BA0NDfjXv/6F/Px8zJkzB8eOHcPgwYPRs2dP1u2BQqFQKBRKEwiF0goHDhwgw4cPJwKBgBBCiFqtJm+//TZZtWoV0Wg0hBBCoqOjSWhoKJFIJEStVpNVq1aRrVu3MnFUVFSQadOmkdTUVEIIIYcOHSKTJ08marXaZPqPHz8mvr6+5NGjRy38Fi9eTGbOnEnq6+sJIYT8/PPPZNKkSUQmkxGVSkWWLl1KfvzxRyb81q1byYoVK1pN69atWyQkJISUlpYybl9//TV58cUXiUqlMhnnunXryIoVKxi9bNiwgSxZsoQQQkhxcTEZPXo0SUlJIRKJhCxatIhkZ2cz8WzevJn5//XXXyffffcdIYSw0ufx48fJwIEDSVpaGiGEkNzcXOLr60vu379vVLdHjhwhvr6+pKKignFbu3Yt+fjjj4lGo2GVdmRkJBk/fjwRiUSEEEJKS0vJyJEjSUJCAvPMggULyMKFCw2WExu96/S8ZMkSolAoiFqtJvPmzSPh4eFM+H379hEvLy+SlpZGkpOTmfTz8/PJyJEjybVr15iwBw4cIIWFhUZ1Q6FQKBQKxTjUhInSKjweDz169ED37t0BAFwuFzNmzMC5c+dQUlLSInxRURHi4uIQGBjIuPXo0QMuLi44fvx4h8vn5eXF7Ch0794dCoUCcrkc+fn5SEpKgre3N6RSKaRSKQYOHIisrKxW4zp06BC8vLz0VqXd3d1hZmYGAEbjLCsrQ3R0NMaNG8fY7s+ePRsrVqwA0Kg33W6ApaUlbGxssGTJEuzYsQOZmZl44403mDSbnk1go08ejwd7e3u4ubkBAGxsbMDj8dDQ0GBSf46OjnBwcGB+BwYG4v/+7/9QUlLCKm0ul8voR/e7+dkKS0tL9O7d22A5sdE7AEydOhULFy4Ej8eDQqGAvb09cnNzGX8+nw8rKyv07NkTfn5+GDt2LACgb9++CAwMRFRUFACgoqIC3bp1g7u7u0ndUCgUCoVCaR16iJrSJpycnKBUKlFRUdHiRaysrAxSqRRWVlZ67l27dkVOTk6Hy9K1a1e934QQEEJQWVkJqVSKrKwsiEQixn/NmjUG41EoFCgoKEBAQECraRmLs6amBjKZjJloAcCQIUMMxsPlcrFx40aEh4fj2LFj+PzzzzFnzhwcOHCgRVi2+uzSpYvei7uZmRkIIa3mpTWcnJygUqlQUVEBpVJpMu2mL/nGaK2c2OgdaJzoREVFIS4uDn5+fpDL5bC0tNQL07NnT9jb27d49pVXXsGHH36IwsJCFBYWGjyQT6FQKBQKpW3QCQSlTVRXV4PH48HZ2bmFn4uLCywtLfVesAGgrq4OHh4eLcJnZmZCIpFg1KhRJtOVSCSIi4vD9OnTTYZ1dXWFtbU1Bg8ejOeff95keEtLS7i5uRldtTcWZ1lZGfh8vsnvPOjycf36dXz66af49NNPUVRUhEWLFhkM21Z9PilSqRRdunSBs7MzVCpVm9PWaDRtOsTMRu9KpRIfffQRbGxssGXLFnC5XNy8eRMqlQpVVVXMzk5rtzb5+vrCyckJ586dg52dnd6OCoVCoVAolPZBTZgoRqmvr4dYLAbQeNvN6dOnMXnyZPTq1QtA42qyVqsFIQQeHh4IDg7WuzmpvLwcJSUleOWVVwAAdnZ20Gq1UKlUEAqFBg9Q67CysgKfz4dcLodIJNI7FKzVavVu7Gm64u7h4YGJEyciMTGRcZNKpTh8+HCraS1YsAAZGRl6k4C8vDyoVCpoNBqjcbq4uGDq1Km4dOkSc8uQSCTS+9CdTl61Wo3Tp08zJmDu7u6YMGGCXj50+WKjT53uDT1vitraWr0JwpUrVzBx4kT06tWLVdq2trZMWQKNJldVVVV6Ny0ZKyfAtN5ra2tx7949BAcHg8vlQq1WM7dcZWdno7S0tEUaTbGyssLcuXOxY8cOuLi4MDs1FRUV2LlzJ/3iNYVCoVAo7YDewkRplbS0NFy7dg12dnaoq6tDREQEuFwuPvvsM1hZWSE+Ph7bt29HWloalEolfH19MX78eFy5cgXp6ekoLy9HREQE5s+fj5CQEACNE4jY2FhUV1ejsrISY8aMaXUSYWVlhZKSEty8eRNVVVXw8fFB165dsW/fPpw4cQIFBQWwtbWFUCjEjh07kJqaCplMhtDQUIwePRpJSUlITU1FVVUVEhMTMX78eD0zo6Z4eHjAzMwMZ8+ehZmZGZKTk5GTk4O4uDioVCpMmDCh1TgdHR0xatQo3L17Fzdu3EBDQwMSExMxduxYyGQybN68GfHx8RCLxfDy8sL169chkUgglUpx9+5d1NfXY/To0di/fz8iIyNRUlICV1dXDBw4EP7+/q3q8+rVq9i+fTvS09OhVCrh7OyM8PBwXL58GQKBAP369WPORjQnOzsbvXv3RklJCSoqKhAdHQ2FQoFPPvkEVlZW4HA4RtMGGs8z3L17Fw0NDaipqUFeXh7u3buHrKws9OnTB+fOnTNaTiEhISb1/uKLL0IoFCI2NhY2NjbIycmBt7c3Ll68CEtLSygUChw4cAA5OTmora2Fu7u73rkOnZyPHj3CokWLYG1tDQB4+PAhNmzYgMDAQPTu3btd7YNCoVAolH8qZqQ9xtKUfwRHjx7FkSNHcPbsWdTV1aFbt25GdwyaIpFI0NDQAGdn5xbmJWq1GmVlZXorwsYQCATg8/mws7Nrcx4kEgnEYrFBkytDqNVqVFdXo0ePHhCJRKivr4eTk5PeWQBjcUqlUohEIqPpKRQKmJub448//oCNjU2rk5rm+WhNn0+K7nBxa2VrLG2tVovKykpoNBq4uLigtLQU1tbWsLe3b5OcpvTe1J/D4UCj0cDMzIxVGqWlpUhOTsbMmTNZy0OhUCgUCqV16ASC0ipHjhzB0aNHcfbs2Wf6y8UUSnOys7Nx9+5dzJs3D4mJiXBzc4OXl9fTFotCoVAolL8F9AwExSC3bt3CqVOnkJWVhWPHjrX44jCF8ixTWVnJfBxPZzpGoVAoFAqlY6A7EBSD1NbWQiaTAWi8XcfNzQ1cLvcpS0WhsKe4uBh1dXXw9vbucLMvCoVCoVD+ydAJBIVCoVAoFAqFQmENXZajUCgUCoVCoVAorKETCAqFQqFQKBQKhcIaOoGgUCgUCoVCoVAorKETCAqFQqFQKBQKhcIaOoGgUCgUCoVCoVAorKETCAqFQqFQKBQKhcIaOoGgUCgUCoVCoVAorKETCArlb4BGo0FlZSVEIhGr8CKRCAKBAFqttpMl+2ehUChQXl4OhULxtEUxSUlJCa5evQqhUPi0RaG0E5FIhIqKiqctxjNJR+vmabZtjUaDqqoq1NfX/ynp0XpFYQOdQFAMcu3aNQwZMgQLFizAJ598gg8++AD9+vXDrFmzsHbtWrz77rvw8fFBWFhYu9M4ePAg9u3b14FS/zORSqX44YcfMHz4cBw9etRk+Li4OEyZMgXz58+HXC7vVNnq6+vxxRdfICUlpVPTeRYoKirCBx98AD8/P6Snpz9tcYxy6tQpbNq0CQ8fPsTbb7/N6pmamhps27YNVVVVT5y+Wq3G0aNHcevWrRZ+IpEIERER2L9/P/bt24fy8vInTu/OnTs4ePBgm9PqDFk6ivPnzyM0NBSLFy+GWq1m/ZyxcszNzcXevXuxb98+REVFtXhZNuav1Wpx+fJl7N27F+Hh4bh9+3b7M/eEtFc3rfE027ZcLsfWrVsxYsQIg3W4o+lI3aWmpuLzzz9HbW2tybD0feCvB51AUAxSW1uLZcuW4ddff8V3332H1atXQ6PRYObMmdi4cSPCw8Pxyy+/tHs1RqvVIj8/H3l5edBoNB0s/T8LKysrfPLJJxgzZgyr8C+88AJWrFgBc3PzTpYMkMlkyMjI+EesZrm7uyMsLAxubm5PWxSjiMViHDx4ELNmzcL06dOxfPlyo+Hz8vLw9ddfY+3atdizZw+kUmm705ZIJNi9eze++uorrF+/HkVFRXr+UqkUK1euRJcuXbB06VIEBwdj9erVEAgE7UovJiYGGzZswJo1axAXF9emtDpalo5mypQpWL58OTgcdsO4qXLMzMzExx9/jEmTJuGtt96CRCLBN998w/TPpvwPHz6Ms2fPYsmSJVi8eDF++eUXxMbGdmymWdJW3ZiivW37999/R15eXpueSU9PR0xMDPObz+fjo48+wrhx49oUT3vpSN1VVlYiIyMDEonEaDj6PvDXhE4gKAaxsLBAcHCwnpuZmRnMzMyY34MHD0avXr3aZQbD4XCwceNG/O9//wOXy31ieSkAj8djHdbc3LzDBldjuLq64tSpU5g2bVqnp/UswOVy/5SJ2ZMgEonQ0NAAe3t79OnTB0FBQUbD9+/fH59//jkWLFgAS0vLJ0rb2toa7733HlasWAEXF5cW/rdu3UJ5eTkmTpwIABg0aBCsra1x7ty5dqU3efJkrFu3Dv7+/m1Oq6Nl6QwsLCxYt2NT5Xjo0CGMHj0a7u7uAIDQ0FDcvHkTubm5Jv1ra2tx7NgxLFy4EObm5uDz+Zg0aRIOHDjQITsA7aEtumFDW9u2VqtFQkIClEplm9JJS0tDWVlZC/e29O9PSkfpbtKkSYiOjkbv3r2NhqPvA39N6ASCYhBHR0c4OzsbDcPj8TBo0CAolUpmNat5Z6nValFWVoa6uroWz2u12harYHK5HFqt1qBfRyGVSg2uIhpKU6FQ6A2ASqUSSqUSWq2W9cDYNF61Wq33nKl81tTUGLVR12q1JneBhEIhampqWMnaGmq1uoW5U3O5xWIxCgoKGB01fba5qUNTfbQmv0ajYfzEYjHS0tJYy6vRaDrtxcVYnTYEG90BrZdTa3plK2fzZwghAKC3GPCskJiYCHt7e9jY2DBuHh4eLXYP/oy0OkMWhUKBoqIig3WebbvQxdOR55eEQiFu3boFT09Pxq1bt27QaDS4e/euSf+srCzU1tbC1dWV8Xd1dcXjx49RUlLCSoanrRvd2NO032FDa+3s9u3buH79eptkqK2tRUREhMlVeLlc3mqYJ+nvTenOWNxSqRQlJSUt+t3msqrVahQVFaG+vl7PvbWxkM14bapOUDqHZ3upjPLUGD16NKtwYrEYAQEB6N+/P1566SXExMRg8ODBCAsLQ3FxMaKjo+Hl5YWsrCyUlJRg3bp16NatGxITE7FmzRpYWFjg/PnzqKysRFhYGOLj47Fp0yZoNBqIRCIkJibiq6++Qt++fY3KkZycjO+//x53797FG2+8gdWrV6O4uBjvvPMOLCws8M0338DT0xM7duyAi4sLevfujd27d2PMmDEIDQ3FxYsX8eWXX8Lc3Bznz5+HUChEWFgYzpw5g6ioKPj6+mLLli0IDw/HypUrYWlpiaioKCQmJhqV68KFC1i/fj26du2K999/H2q1GpmZmZBIJJgzZw4yMzOhVCoRFxeHL7/8El5eXgAat7GPHj2KoKAgyOVypKWl4e2330afPn0ANHbKe/fuRUNDAzw9PSGRSFqsWgkEAmzevBk+Pj5Qq9W4desWPvvsMyYONsjlcuzYsQM//vgjPvjgA3z22We4evUq1q1bB6VSiQsXLsDKygp79+6Fq6sr3NzcEBkZierqanz77bfYtWsXvvvuO/z3v//F6tWrDerj/v37AIAvvvgClpaWUKvV2L17N+rr6+Hg4IC7d+/C398fGo0Gw4YNMyqvUqnE5s2b8dNPP0Eul+P777/HW2+9BQC4fv26UTMvrVaLw4cPY8+ePVCpVPjqq68wdepUHD9+HF9++SX+9a9/4cMPP0R8fLzBOt0e3dnb27daTs7Ozgb1umnTJqM60Gg0OHHiBB4/foyAgABkZmYCAN566y0IBAIcOHAARUVF2L17N4YOHYpFixbpvSQ/TYqLi9G1a1c9NwcHB9YvoR2ZVkfLEh8fj+zsbHh6emLLli3w8fHBkiVLABjuJ5q3C6Dx/NLFixeZupKRkcFMCJ8EsVgMoVAIKysrxo3P54PP5yMvL8+kf5cuXUAI0Vslt7W1hUQiQUVFhcn++2nqJjs7G2FhYbh27Rref/99ODs7QyaT4f79+1i5ciWGDh1q8Dlj7ezx48c4fPgwSktLER4eDgcHB8yePbvVuIDG82J79uxBTk4OYmJiUFZWhqFDh2LOnDlMGN0Eg8fj4ezZs5g1axZmzJgB4Mn6e1O6Mxa3SCTCli1bYGVlhSFDhiAqKgpjx46Fq6sr1q1bh5iYGJw+fRqjRo3ChQsX8PjxY4wePRo3b95EdHQ0jh8/3uJ9wMrKCvX19a2O12zrBKWTIRQKCwoLC4m7uzs5cuRIC799+/YRLy8vkpaWRpKTk0lCQgIhhJCtW7eSJUuWEIVCQdRqNZk3bx4JDw9nnouKiiIhISFEIrXirdsAACAASURBVJEwaQwZMoTs3r2bEEKIRqMhs2bNIj/++CMrGfPz88nIkSPJtWvXGLcDBw6QwsJColaryapVq8jWrVsZv4qKCjJt2jSSmppKCCEkMjKSjB8/nohEIkIIIaWlpWTkyJFMftRqNZk+fTqZN28eqa+vJ6dOnWIl1/Hjx8nAgQNJWloaIYSQ3Nxc4uXlRbZv386EefXVV8l3331HCCGkvLycTJ48mdy/f5/xv3TpEpk3bx4Ri8WEEEK+/fZbsnz5cqJWqwkhhIhEIhIcHEx27dpFCCFEpVKRpUuX6ulu69atZMWKFYQQQqKjo0loaCije1MsXLiQfP3118zv33//nQQHB5Pa2lqSmppKli5dyshSVlZGNm3axIR9/fXXmby1pg9fX18mv/Hx8WTChAmkurqaEELIRx99RNauXctKzvDwcPLNN98QgUBAkpKSyOLFi0l2djZpaGgghw8fZhVHREQEGTduHKmvryeEEFJTU0O2bdtGFAqFyTpdWlpKAgMDSUpKCivdGSsnU3ptjfPnz5M5c+bole3q1avJtm3bWpWRDQkJCWT48OGksLCwTc8Zora2lgQFBZHjx4/ruc+cOZP85z//0XPbv38/8fX1faL01q5dSxYsWNCmtDpSFplMRqZPn06OHTtGCGms8wEBASQjI4MJY6pd3Lp1i4SEhJDS0lLmma+//pq8+OKLRKVSsZbFUDnm5+cTLy8vcv78ecZNo9GQadOmkVWrVpn0P3r0KPH29iYVFRWMf3Z2NvH09CSXL19+5nWTnZ3dIn8nT54kY8eOJQKBgBDSst2YamePHz8mvr6+5NGjRybT16HRaMjLL79M9uzZ08Jv8eLFZObMmUy/9PPPP5NJkyYRmUxmsr83hindmYp73bp1ZMWKFUSj0RBCCNmwYQNZsmQJIYSQ4uJiMnr0aJKSkkIkEglZtGgRyc7OZuLZvHkz83/T9wE247WpOkHpfKgJE+WJ4fP5sLKyQs+ePeHn54exY8cCAKZOnYqFCxeCx+NBoVDA3t6esacFWtrh62xnBw4cCKDRLtLZ2Zn1dmzfvn0RGBiIqKgoAEBFRQW6desGd3d3FBUVIS4uDoGBgUz4Hj16wMXFBcePHwfQaOPa1KyDy+XqrahxuVxwOBy4ubmhW7dumDlzJiu5eDwe7O3tmQN4NjY2sLW1Rf/+/ZkwLi4uqK6uBtC4GiSXy/XMBYYOHYrc3FykpKRAIBAgKioKEydOZOxFbWxs0KNHDyZ8fn4+kpKS4O3tDalUCqlUioEDByIrK4uVzM2xsLBo8VtXdk5OTkhLS8PixYtx4sQJcDgcvPHGG3r5N6UPHo+HhoYGAGDKW7eKZG9vj4KCApMyVlZWwtzcHB9//DGcnZ0RGBiIlStXoqioCKmpqXr6NMaECRMgk8mYm6Oys7Ph5+cHHo9nsk4bwpjujJWTKb0aQqvV4uDBgxg1apTeanFgYCAiIyNZ5f9pYm5uDplMpucmkUg6xdzKVFodKQufz8eyZcswcuRIAI19m1ar1TPNMNUuDh06BC8vL/Ts2ZN5xt3dvUN0Y2ZmBkKInhmISqWCXC5n+kVj/hwOB0qlUs98RaFQQKPRmLSlfxZ0Y2FhARsbG73V+qCgIIhEIly9erVF+KfVzry8vJjdzu7du0OhUEAulz9Rf29Kd8biLisrQ3R0NMaNG8eU8+zZs7FixQoA+udGLC0tYWNjgyVLlmDHjh3IzMzU68+avg+wGa9N1QlK50MnEJQOoWfPnrC3t9dzc3R0REpKCtasWYMLFy5ALpdDpVIZjcfc3FyvQ9YNXGx55ZVXcOPGDRQWFiIzMxODBg0CAJSVlUEqlerFDQBdu3ZFTk4OkxYbBg8ezFoeHV26dNF7kebxeLC2tmZ+N83n48ePYWFhobcNy+PxoNVqUVJSgqqqKohEItjZ2bWaXmVlJaRSKbKysnDp0iVcunQJcrkca9asabPsOvlaw83NDQcPHoSVlRXWrl2LoUOH4vTp00bja66PpvkfNWoUzM3NkZeXB4lEgvT0dMyePdukjD169MCbb76pd9CxX79+KCsrQ05ODnx8fEzGATTabj///PM4fvw4NBoNcnJyMGTIEADtq9PGdGesnNqj14aGBpSWlsLW1lbP3drampmgPst07dq1xZkfoVDYKSZWptLqaFlcXFywc+dObNq0CQ8ePIBarW5Rd1prFwqFAgUFBXqLBB0Jj8drMWFSKpWQyWSws7Mz6W9lZQWNRqOXH91kq2k/1xrPom5sbGzQtWtXg4sXT6udNTepI4SAENLu/p6N7ozFXVNTA5lMhu7duzPhhwwZAm9v7xbxcLlcbNy4EdOmTcOxY8fg7++PVatWGUyTzXgNGB9HKJ0PPQNB6RCarzIplUp89NFHsLGxwZYtW8DlcnHz5k2oVCpUVVV12k01vr6+cHJywrlz52BnZ8esYLi4uMDS0rLFh9bq6urg4eFhMC6NRmPw0Gpn317Ut29fxMfHQy6XMy8rCoUChBC4urrCwcEB1tbWRg9fu7q6wtraGoMHD8bzzz//xDI1fwlWKpXMAbisrCwolUrs2bMHABAdHY1t27YxNsztSWvWrFlISkpCUlISli9frrcS1RZsbGyQn5+PgICANr34zZo1C++99x4SEhJgZ2eHbt26tbtOG9OdsXJqj15tbW3h4uLS4oC3SCQyeE7jWWP48OG4cOECNBoNs7smEokwfPjwPz2tjpSloKAAixYtwvr16zFt2jTU1NSAz+cDAAoLC9GrVy+jz1taWsLNza3TVle7d+8Od3d3vf5RqVSCw+FgxIgRJv3d3NxgZWWlN8GQSqVwcXFptX/V8azqRiKRoKGhweD5jba2M4lEgri4OEyfPr1NMsTGxiI0NNRkuPb292x0ZyzusrIy8Pl8Vt95kEgkuH79Oj799FN8+umnKCoqwqJFiwyGbc94TfnzoTsQFFYQQpgbKpqjuzWpKbW1tbh37x6Cg4PB5XKhVquZGxqys7NRWlrKxKlbMdDF03QFoa23aVhZWWHu3LnM4Svd6oSHhweCg4P1bsUoLy9HSUkJXnnlFQCNg4JWq2VWvoqKilBVVaWXZ53MbaF5nprnu3k+Q0JCwOVy9baf7927hz59+sDPzw+urq544YUXkJiYyDwnFAohEAgY2T08PDBx4kS9Q95SqRSHDx9uVQZj2NnZ6XXmDx8+hFwuByEENTU1+O233xg9jR07Vm/Qba6z1vShQywWo6ioCPPnz8fbb7/d7smDDpVKxZjFsWXYsGFwc3PD7t278dxzzwFgV6d1+WuaH2O6M1ZOpvRqCC6Xi9dffx3JycnM3eu66yR1hy0NycgGQ+0TaDxgGR4ejsrKStZx6cq8uQxjx46FXC5nPnKmWyHVXaXanrSAxsWAtqZlyl+tViMiIgLJyckm03/06BHkcjmzC1ZfX4/a2lpotVokJSUxN7sZaxcLFixARkaG3staXl4eVCpVm+7ON1SOlpaWmDJlit5NZ+Xl5bCysoKPj49Jf09PT/Tv31/PnC8rKwujRo2Ck5PTX0I3MplMz1z2xo0bsLGx0fv+gk53bNqZlZUV+Hw+5HI5RCIRq0UzDocDR0dHiMViRhfN026qAx2m+ntjmNKdsbhdXFwwdepUXLp0idGzSCTS++CdTm61Wo3Tp08zlxC4u7tjwoQJevnRlTOb8dpUnaB0Ptz169evf9pCUJ5ddLdIREVFITU1FeXl5czHnzw8PHD+/Hns2rULOTk5qK2thbu7OxwcHNClSxcIhULExsbCxsYGOTk58Pb2xsWLF2FpaQkOh4O9e/ciIyMD5ubmsLe3x08//YT4+HhUV1fDw8MDMTEx+PXXX5Gfnw8+n8965a979+549OgRFi1axGyfczgc+Pv748qVK0hPT0d5eTkiIiIwf/58hISEMM/dvXsXDQ0NqKmpQV5eHu7du4esrCz06tULp06dYjpAmUyGIUOGmLyb++rVq9i+fTvS09OhVCrh7OyM8PBwxMXFoaqqCu7u7oiNjcWRI0dQUFAAS0tLBAcHw9vbGwcPHoRIJEJKSgquX7+OsLAwuLq6wszMDKNGjcLNmzeRk5MDkUiE5ORk3Lt3D3fu3EH37t0xdOhQjB49GklJSUhNTUVVVRUSExMxfvx4pKenY/v27UhLS4NSqcTw4cOZFT9jOo2JiYGNjQ3S0tIgFApx6dIliEQiDBkyBHFxcVCr1aivr0dsbCy8vb0xYMAA7N+/H5GRkSgpKYGrqyv++OMPg/q4fPkyBAIB+vXrh/79+2Pv3r1Yt24dtmzZgo0bN+LcuXNwcXHBgAEDWNUBHWKxGHl5eXjhhRfatHNkbm4OQggqKirw2muvgcPhmKzT/fr1w86dO3HlyhXU1NTAy8sLzs7ORnU3adKkVstJLpcb1Kuxm1wAYMCAAVAqlTh58iRkMhlOnjwJW1tbLF++nLlN5erVq4yphc48qzXKysoQHh6OkydP4sGDB6iursYff/wBPz8/AI03hq1atQp9+vRhdUtWREQEjh07hoSEBJSWlqKkpIQ5N+Xk5ASxWIykpCQMGzYMkZGRsLW1xaJFi8DhcNqUFgAkJSXhwIEDOHPmDAoKCiAQCCCXy+Hl5WUyLVP+DQ0NCAsLQ1pamkkTOzs7O9y9exe5ubkwNzdHaWkpevfujXPnzv2/9u49Kqpy/QP4d4YBRq6KIDdBQsRUiDQwpbQ4qGnZMcvE7KK1OtXRlaV1cnWOZlbnnC7eMimMFZ5MTEXTLlQGGJpEooBAICAwwDgwMFxmmGGue8/7+8PF/jEwVy7i5f2sxR/M5d3PvJdnz7v3u/cgKioKcrnc5riIj48Hj8fD999/Dx6Ph4KCAlRXVyM7OxsGg8HmD43ZasfIyEicOnUKRqMRo0ePxqeffopnnnmGq2drzwsEAoSHh+PAgQOIiopCXV0dfvzxR2zevNnmmb/roW7kcjkOHToELy8vaDQa/P777zh58iTeeecdREREoLGxEdu3bzcZ2/fcc4/Fcebi4gI3NzeIxWLk5+dDJpMhJibGrmVWHh4eOHz4MLRaLZdXUlNTkZGRAZFIBE9PTygUCuzZswfFxcXQaDSYP3++xTzSe3mROWFhYVbrLiEhwWLZvr6+iI2NRWFhIX7//Xd0dXXhzJkzmDt3LjQaDbZv347c3FyoVCpERkbi7Nmz6O7uhlqtRmFhIeRyOebMmYPc3Fx88skn3PeB6dOnY86cORb315b2q737xPX+g543Ax6hC8aoYcQwDNra2jBu3Djw+XywLAsejzesy4AkEgkKCgosXuTcc2ra39+/XxxGoxGtra1gWRYBAQGQSCRwd3fHmDFjrskPr/Ulk8ng7Oxs8XoHlUoFvV4PHx8fNDU1cUeweh/t6u7uhkqlsvm7Hrbo9XpIJBL4+PhAIBCgra0NY8eO5W7hqNfrIZVKERQUZHNCYonRaMQ777yD2NhY7sfnjEYjcnJy8P777yMnJ8eh8srLy/Hbb7/hpZdecjgWlmXBsmy/SeJA+rSluuv95apvO7EsO6h6ZVkWUqkUvr6+w35bw8bGRpSXl2PRokVDUl5TUxMKCwsREhKCO+64w6Rur+W27Hk+PT0dTz75pF3b6j1egat9ydHlnL37n1KphFwuh5+fX7/14gPBsizKy8shEokQFxdncmGtPc93d3ejoKAALMsiPj7eoZhGsm5EIhGWLVuG/fv3IzQ0FAaDgYvDFlvjTCqVQigUWr1mrS+lUgmlUtmvfu0x0HxvT91ZK1utVkOpVFrdrk6ng0AgwJUrV+Dh4WFzctOzTUv7a2pk0QkEdVOoqqpCYWEhkpKScObMGQQHB3O/qUDdOBQKBdauXYutW7ea3KVKoVBgzZo1SE9Pd6i8o0ePwtXVFQ8//PBQh0r1kp+fD4FAwB3Nvlm2ZUtraytyc3O5ZRXUjamuro6bQNg6w0dR1FV0OkfdFFpbW/Hrr78iKyuLO11K3Xi8vb3x7LPPIjk5GTk5OWhoaMC3336L9957DwsXLnS4PJFI5PD1D5Rj5HI5Ll26xF0rcrNsyx65ubnDcoE3de1IJBJ8+eWXqK+vx/79+7kluhRFWUfPQFA3jcbGRnR2diI6Opqe6rzBMQyDiooKiMVi+Pr6Ijo6ekBLNKqrqzFx4kTuLjrU8DAajddszF3LbdnS+w5N1I1JrVajvb0dTk5O0Ov18PPzs+vWsxR1q6MTCIqiKIqiKIqi7HZ9HMahKIqiKIqiKOqGQCcQFEVRFEVRFEXZjU4gKIqiKIqiKIqyG51AUBRFURRFURRlNzqBoCiKoiiKoijKbnQCQVEURVEURVGU3egEgqIoiqIoiqIou9EJBGWRQqFAS0vLSIdhllKpHPHYWJZFcXExzp8/D5ZlRzQWe+h0OjQ3N0On0410KGY50qbX+2cZKJZl0draCqVSOdKhcFiWhUwmg1wuv6bbvR7aeDBjXKlUQiqVDmk8NCc77nroR9eLazGWa2pqkJmZieLiYuj1+mHbDjXy6ASCMuvEiRNITEzE888/D6PRONLhmPjxxx8xf/58rF69GgzDjEgMer0e//znP3HhwgVs27YN+/btG5E47FVfX481a9YgLi4OFRUVIx1OP460aUNDA9auXYu4uDiUlZXZvY3Ozk5s27YNM2fOREhICDZs2ICSkhKoVCokJycjISEBAQEBWLNmDc6ePTvYj+QwtVqNbdu24c4778SBAweu+fbN0Wq12LVrF6ZPn460tLRrtt2BtvFQGswYP3nyJBYtWoSVK1dCq9UOSTw0Jzvues97QyUtLQ2pqalWXzPcY5lhGKSmpqK8vBzTp09HdXU1Hn/8cdTV1Q35tqjrA51AUGY98sgjWLNmzUiHYdaDDz6IdevWgc8fue578eJFFBcXY9myZVi/fj0SExNHLBZ7hIWFYcuWLQgKChrpUMxypE0nTJiALVu2IDg42KFtjBkzBq+//joWLFiAwMBAbNq0CTExMfDw8MDatWuxfPlyeHl54c0338S999470I8yYG5ubti4ceOIbLtHWVkZfvrpJ+5/oVCI1157Dffdd981jWOgbdw3/sGwd4xrtVrs3bvX5AzFAw88gFdeeQUCgWBIYgFoTh6I6z3vDQWj0Yi6ujrU1taa9MHjx4+jtraW+3+4x3JlZSWOHTuGadOmISgoCA8//DCUSiUOHjw4LNujRt7QZTfqpuPi4jLSIVjk7Ow8ojsrqVQKZ2dnuLm5YdasWSMWhyOcnJyG9AvNUHOkTQfzWZycnMDn88Hj8fo9zuPxRvxL0EiOu9LSUrNHzEcipoG0saX4B8LeMd7c3IySkpJ+jwsEgn59bLBoTnbc9Z73BovP5+O9994zecxoNOL06dO4/fbb+71+uPpQSEgIli5dCm9vbwBXz+Cp1Wr4+fkNy/aokXf9jXbqusQwjNU1wGq12ux6X6PRCLVabfKYTqczOc3d+zUMw1hdq6rT6SyevjcajWhqakJnZ6fFGI1GIxiGGfQSAEKI1RjMrf3U6/XQ6/VcDPaQSqX96g+4Wg8NDQ0W68poNEIsFkOlUll83ly5lmi1WhiNRhiNxn7b1Gq1FvuGpX7Rm7U2Ba6u+25vb7c71uHQ3t4OhUIxbOWbq1dHY7BUTyqVCiKRiOt/lnR0dCA9Pd3mWn9r7a3X69HU1GTX+Oodj6Nf+s3Vhb3x21MWYHmM92Y0GpGRkQGJRGL1ddbymiN11rfM4crJfdvG2nYGmpN7xzFUa+UHk/es5VR791Esy3LPqVQqlJaWmjw/0La2pe/nOnfunF3LMG21rTmWPru3tzdefPFFbsKQnp6O8PBwLF++3KHyqRuH09tvv/32SAdBXZ9KS0tx9uxZODk5oa6uDr/++iuysrIQExMDoVAIAJDL5di2bRtEIhE0Gg2++uorMAyDiRMn4uTJk1i1ahUOHjyIxx9/HDKZDBs2bMBLL72E2bNnY8KECfj555/x9NNP4/jx4/D09ERFRQWOHDmCU6dO4Z577uGOHGVnZ+Ozzz5De3s7iouLcfHiRchkMqxYsQJ8Ph+NjY3Yv38/GIZBTk4OMjIyMGvWLAiFQlRVVWHt2rV499134eHhgRMnTmDXrl2IiIjAK6+8go0bN6Krqwt333036urqsGzZMhw8eBBTpkwxe+o7JycHBw8eRGlpKTo6OtDd3Y2IiAgcOXIEJ06cgFAoxM8//4yCggLExMRAq9Xi/fffx1NPPQUej4eioiJs3LgRzz77rMW6/+2337Bz5064ubkhIyMD+fn5mDlzJgQCAXJzc5GTkwMA2LNnD5qbmzF9+nQAV3ckR48exb59+zBq1Cj88ccfqK6uRlRUFJRKJTIyMhAaGory8nIUFhZi586dmD59OsaMGWM2jvr6erzyyit4/fXXMX78eJSVlaG4uBg7duxASEgIcnNzUVtbi+3bt0On0yEqKspmv+hhq02lUim2bt0KhUKByspKpKSk4I477oC3tzf3WRYtWuTw8oSemJ966imMGjWKe7ywsBD5+flYtWoVvLy8AFxdErNjxw4AQFVVFb7++mtERERAr9dj8+bN+Pvf/46SkhL4+Phgy5Yt2LhxIzQaDaZNm4bNmzfj1VdfhcFgsHgEW6vV4tNPP0V2djZaW1tx7tw5/P7775g2bRri4uKsxtBzpM9SPQmFQnz22WdoaWkBn8/H4cOH8cMPP2DBggX94pDL5UhJSUF2dja6urpQWVmJtrY2TJ06FQDw7bffwtnZGc3NzaitrcWHH34IHo/HHeHU6/XYu3cvTp06BaPRiF27dsHJyQmTJk3qty2VSoUPPvgAK1asgFgsRn19PaqqqpCSkgKlUolp06aBx+OZbWNLdUEIsRq/Odbq1dwYnzx5cr8yesb85cuX0d7ejpKSEkyZMgVCoRCVlZU4d+4cPD09UVlZ2S+vOVJnPYY7J/v7+2Pnzp1YsWIFWJaFWCw2297A4HLyiRMnsHLlSpw+fRoajQYfffQRysvL4ebmNqCcPNi8Zy2n2rOPYhgGycnJyMrKwqVLl5CSkoLOzk40NDQgLi7OobaWyWQO5Ra9Xo8nn3wShw4dwvLly1FZWYnPP/8cFy5cgEajQUFBAXx9fTFu3DgAtseyJdnZ2XjsscewYcMGXL58GfPnz4eLiwsaGxtRVFSE2267jXvtn3/+iZMnT2LHjh0YPXq01XKpGxihKAu++uorMmPGDNLS0sI9tmnTJvKPf/yDsCxLGIYhr7/+Otm1axf3fEtLC1m8eDEpLi4mhBBy5MgRcv/99xOlUkkIIUQikZC77rqLnD59mnvPoUOHyOTJk0lpaSkhhJCamhoyY8YMcvHiRUIIIX/88QdJTEwkEomEe8+///1vsnDhQmIwGAghhOzatYs899xzRKfTEYZhSFJSEklJSeFeX1lZSSZNmkT27t1LpFIpycjIIIQQUldXR+666y7y22+/ca/94osvSH19vdW6OXHiBJk/fz7p7u4mhBDy448/kmXLlnH/E0LIG2+8QT7++GNCCCEMw5AlS5aQpKQkIpfLyTfffGOx7Pr6ejJ37lzu82s0GrJ06VLy008/EY1GQ5YsWUIOHjzI1dXs2bNJeXk5IYSQ06dPk4SEBNLc3EwIISQvL4/Ex8cTQgi5cuUKiYqK4mJiWZY8+uijZOfOnVY/a319PZk6dSr57LPPTN6XlJREOjs7CSGEHD58mMyfP5+oVCq7+oWtNjUYDOT55583iW3Xrl3k1VdfJYRc7Ufx8fHk/PnzVmM356233iKhoaFk8+bN5N133+X+li5dSiIjI8mVK1cIIYQ0NzeTRYsWce1ACCG//PILSUpKIiqViqhUKvLAAw+Qr776ihBytS/NnTuXVFRUEEIIuXz5MklLS7May3//+1+ybt06wjAMIYQQpVJJ5syZQz799FO7YrBWT8XFxeT555/nym5qaiIfffSRxVhYliV//etfyd69e/s9t3r1arJ06VIil8sJIYR8+eWX5IEHHiAajYYQcrX9lyxZwvX/ixcvkgULFljd1sMPP0zWr19PWJYlhBDS0NBAZs2aRX7++WdCSP82tlUX1uLvy1ZZhPQf45bs27ePLFq0iKvnHt988w2JiIjg+nzfvOZonRFy7XLyE088QVauXGmxvYciJ6emppLIyEhSWlpKCgoKuO0PJCcPJu/ZyqmE2N5H5ebmkoSEBNLW1kYIIeS1114jmzZt4t7vaFs7mluOHj1KEhMTufIvX75MZsyYwb2+N1tj2Zzi4mLy4osvkvLyclJbW0veeOMNkp6eTggh5NixY0QkEpm8Pj09nZSUlFgsj7o50CVMlFW+vr7w8fHh/o+Pj8cPP/wAsViMhoYGZGdnIz4+nnt+3LhxCAgIwKFDhwD8/7ryHk5OTv3WYLq4uGDMmDHcBZMeHh5wcXFBV1cXAGDfvn2IjIw0OfI0YcIEk3IfeughrFy5Ei4uLtDpdBgzZgxqampMtsHj8RAeHg5/f38sW7YMAHDbbbchPj4eR48eBQC0tLTA29sbEyZMsLuOjEYj0tLSEBsbCzc3N5O6OnLkCPe5+Xw+goOD4e3tjaVLl1osLycnB1qtFv7+/tyyq4kTJ+LcuXMQCoV4+eWXcddddwG4uv7VaDRySxX279+P8PBwBAQEAABuv/12/Oc//zGph+joaO69/v7+NpcHCQQCCIVC7ghsz/v8/Py4o0s+Pj4wGAzQarV29QtbbVpXV4e8vDxER0dDrVZDrVZj8uTJqKystBqrvQIDA7F+/Xps2rSJ+1u4cKHJa7Kzs6HVahEREcE9FhUVhZqaGpw/fx7u7u5YuHAhvvvuOzAMA4VCAZ1Oh9zcXABAY2MjZs6caTEGqVSKo0ePYsGCBXBycgJwte/3HCm0JwZr9eTn54fS0lKsXr0aGRkZ4PP5Vs962RIZGcmd9Rg7dix0Oh23BOLgwYPc0f6edc/WlkbwwwQMVgAADNpJREFU+XwIBAJERERw6+ZDQ0MRGxuL//3vf2bfY6suHDGUZVnj6+uL0NBQAKZ5bSB11rvM4c7Jrq6uCAkJMdvewNDkZKFQCDc3NwQFBSEuLg5z584FMLCcPJi8Zyun9rzf2j6qpyxXV1cAV2/YIBKJAGBAbe1obhEIBA5df2JpLFtSXFyMf/3rX5g6dSrCw8Px1ltvQa1Wo6mpCSqVCmFhYSav5/F4Zs/YUTeXm/fKImpY+Pn5wWAwoKWlhbtIqveXZgDw8vJCdXU1ANh9EeGoUaNMdmI8Hg+EEOh0OohEIsyePdvq+319fXH06FFkZ2cjLi4OWq2WS+Y9PD09ERIS0u+9y5cvx4YNG1BfX4/6+nqbp3L76urqgkQiQUJCgsnj7u7uaGtrM3lsypQpNstraGgAwzDIy8vjvljec889XFwBAQFITk7G+PHjMWnSJDAMA4PBAJ1OB7FYjLvvvpsry8fHx+SuGwKBwKS9eurZFnPv69kB9S2nqanJar+wp01bW1uhVqtRWVlp8psIb775ps1Yh8rly5fh7Oxs0o9cXFy4ddYAkJiYiPT0dNTU1KCmpgZ/+9vfkJmZiaeffhpXrlwx+SLXl0wmg1KptHqK31YMAoHAYj0FBwcjLS0Nu3fvxqZNm9DR0YEPPvgAzz333IDqo2dZVw9CCAghUCqVaG1tRWBgIH755Rfu+fXr1zu8jfHjx/dbN97Dnvaw11CWZY2lvDaUdTZcOdlSew9lTg4KCjK7fNKRnDwUec9STu3NUlsCQGxsLAQCAWpraxEREYGysjIkJSUBwIDbejC5xRZLbWtJ3wMP7u7u8PX1RXZ2Nu68806T54xGIwgh/dqauvnQCQTlELVajVGjRsHf3x8GgwGurq79fvSqs7Oz3xGJHizLOnTBnKurK4KDg7kjPebo9Xq89tpr8PDwwI4dO+Dk5IT8/HwYDAbIZDKTO3CY23nOmDEDfn5+yMzMxOjRox1OzJ6enggICOh3oaBSqTT5kg3ArqNEPUdlExISTI40AoBIJMKqVavw9ttvY/HixWhvb+fWPjc3NyMoKAhNTU0OxT/UAgICrPYLe9o0MDAQ7u7umDJlCv7yl78Md8hm3XbbbcjNzYVWq4WHhweAq19WCCEIDAwEcPVIXnBwML777jsEBwfjoYcewtdff42srCz4+flxbWOOj48P3N3drV7MbisGa/VUWVnJrb0Grv6OwMcff2z3BCIrKwvz58+3+TovLy8EBQXBy8sLjzzyiF1lWyKRSCxe02JPe9gbv6Nl2evSpUvo7u5GbGys1dcNZZ3dyDnZUj50JCe7urpi/PjxA8571nJqfX09xo8fb7MMHo+HRx99FHl5ecjLy8O6deu4mAfa1oPJLb11d3cjOzsbS5YssXvb9hAKhSgtLcXKlStNHufz+fTC6VsEXcJEWdXR0WGyMzp16hQWLFiA8ePHIywsDHPmzDG520NzczPEYjGXQDw9PWE0GrmjOQ0NDZDJZCanb3uOWPQghJjcpeKJJ55AeXk5Ojo6uMdqa2thMBjAsiw6OjpQVFSEOXPmwMnJCQzDQCwWg2EYVFVVQSKRcOWbO8ri5uaGxx9/HHv27EFAQIBdt7nriZEQAicnJzz99NMoKChAd3c395lOnz5tssPo+7ksSUxM5C7W63Hp0iVkZmaioqICWq0WMTExAK5eMNnR0QGj0Yi8vDwsW7aM+8w9jh07xsXUt67ticfS+3q/t/dz9vQLW20aFhaGBQsW4MyZM9zzarXaZHlL7xgYhsGBAweQn59v1+cx1w96Hu8pMzExEU5OTibLpoqKihAaGspd4Ozq6orFixcjLS0NEyZMQEBAAO644w58+eWXCA8PtxpHYGAg5s2bhzNnznDbVCgUkEql3HixFYO1empvb8fhw4e5sTZ37lyTCx374vP58PX15e5g0/vXaq21N5/Px+rVq1FaWmoyif7++++tfn4AJl/6JBIJLly4gGeeecbsdm3VhbX4+7KnbXuPcWtGjx7N5TiFQsF9sbOW1wZTZ9cqJ1tqb2BocnLfbfTmaE5euXLlgPOerZzac+c8a/solUqFhoYGrFixAi+88ILJhGegbe1IbunbV93c3CAUCqHVaqFUKk0OotlqW3upVCpMnjy53y1ym5ubkZKSAplMNqByqRsHvQsTZVFVVRVCQkIgFovR0tKCEydOQKfTYePGjXBzcwOfz8esWbNw6tQplJWVobm5Genp6VixYgX3o0tjx45FYWEhurq60N7ejtraWhQVFaGyshKTJk1CdXU1PvnkE5SVlUGv18Pf3x8pKSnIycmBVCpFeHg44uPjwePx8P3334PH46GgoADV1dXIzs6GwWDAwoULoVAokJWVBQ8PD1RXVyM6OhonT57kjk7t3r0b+fn5aGtrg6urq8na5544KyoqsGrVKri7u1utl9zcXHzyySf4888/0dHRgaCgIMTHx0Ov1+PYsWPQaDQ4duwYPD09sW7dOhgMBiQnJ+Pbb7+FWCyGRqPB1KlTLe4UPT09ERMTg3379kGpVKKsrAwNDQ148MEHMW7cOBQWFqKmpgYCgQASiQQhISHIzMxEVFQUlixZAqFQiP3794NlWZw9exbe3t4QCoXYvn07cnNz0dbWhrCwMPz000/4+uuvUVdXB6FQ2O9UNHD1y4W197m7u0OlUmH37t0oKiqCWq1GTEwMEhMTrfaLsLAwq22akJCAmTNnIi8vD8XFxZDJZDhz5gzuv/9+dHd3Y/v27Th16hTa29sRGRmJUaNGYevWrWAYhltL3VdnZyeSk5Nx/PhxNDY2oqOjA4GBgfDw8EBqaioOHToEkUgEuVwOb29vREdHY9q0aUhLS4NSqcT58+dx9uxZbNmyxeQotYeHB0pKSvDcc8/Bzc0Nzs7OuHz5MlatWmX1jBOPx0NsbCzy8/NRXV0NpVKJgoICFBUV4cKFCxg7dixmz55tNQY+n2+xnrRaLbKzs8EwDORyObKyshAdHc3dKcscDw8PHD58mFtuEh4ejtTUVGRkZEAkEsHT0xMKhQJ79uxBcXExNBoNEhMTubtrZWRkQKfT4fTp0/Dx8ek3zno7fPgw9Ho9WJZFfX09UlNT8dhjj+GRRx7BlStX+rXxpEmTbLZH3/gt3YXJ29vbalnmxnjva1N6Gz16NLKystDW1obW1lbce++9yM/Pt5nXevqpI3U23Dk5NDQUmZmZNtvb1vi1lZN1Oh2++OILVFdXo6OjAxMmTOh3ttWRnBwWFjbgvHf//fdbzalyudxmW06cOBGff/453nrrLezYsQPvvfceMjMzERAQgEmTJg1ofAD25ZaevlpeXg6BQICYmBiMHj0aYrEY+fn5kMlkiImJgZeXl11j2V4lJSWIi4vr126lpaV49913cd99993UP+BHATwy0OkndUvpuZDN0mnT7u5udHV1wd/fv9+XJqPRiNbWVrAsi4CAAEgkEri7u2PMmDEOXfjFMAza2towbtw4KJVKyOVy+Pn5cWtbez/P5/PBsqzdPwwmkUhQUFBg9eJme7AsC6lUCl9f3yFZAyqTyeDi4tJvKZRKpYJer+eSN8MwJkeCWJZFS0sLfH19R/THp6z1C8B2m/aUoVKp4O/vfy1DNyGTyeDs7GzxegWdTse1d8/vfDhS773bs6mpiTua3rtNbcXQt55YlgUhBHq9HlKpFEFBQXYte1AqlVAqlQPa+TvS7x599FHMmzcPL7zwAmQymcU+Yo61unA0flv1ag+GYdDU1GT3GczeBjpWaU7ubzB5z1ZOtcRoNOKdd95BbGwsFi9ezD2Wk5OD999/n7s97EDjG0xukUqlEAqFQ34rVb1ejw8//BAvv/xyv30TdeugEwjqllVVVYXCwkIkJSXhzJkzCA4ORmRk5EiHRVG3hKVLl2LevHlYu3btSIdCXSduxJysUCiwdu1abN261eR3bhQKBdasWYP09PQRjG54iEQiHDhwAJs3bx7pUKgRRC+ipm5Zra2t+PXXX7lbkF7vOyqKuhlotVocP34cf/75JzQaDaKjoy0uO6NuLTdiTvb29sazzz6L5ORkPPTQQ4iIiMDFixdx9uzZfreGvlnU1NTcEG1DDS96BoK6pTU2NqKzsxPR0dEOnbqnKGpgWJZFa2srCCFgGAZCodDi9QXUredGzckMw6CiogJisRi+vr6Ijo7udzvdm0VzczOEQqHZW/BStw46gaAoiqIoiqIoym43zvSeoiiKoiiKoqgRRycQFEVRFEVRFEXZjU4gKIqiKIqiKIqyG51AUBRFURRFURRlNzqBoCiKoiiKoijKbnQCQVEURVEURVGU3egEgqIoiqIoiqIou9EJBEVRFEVRFEVRdqMTCIqiKIqiKIqi7EYnEBRFURRFURRF2e3/APtpnh0hF8XCAAAAAElFTkSuQmCC) **Answer:** **Data Analysis** ***Read Training Set***# import libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn import svm # read training set 3 and convert into pandas dataframe df = pd.read_csv('train_3.txt', delim_whitespace=' ', header=None) # display the data print(df)0 1 2 0 -0.158986 0.423977 1 1 -0.347926 0.470760 1 2 -0.504608 0.353801 1 3 -0.596774 0.114035 1 4 -0.518433 -0.172515 1 .. ... ... .. 206 -0.399885 -0.621930 1 207 -0.124078 -0.126608 1 208 -0.316935 -0.228947 1 209 -0.294124 -0.134795 -1 210 -0.153111 0.184503 -1 [211 rows x 3 columns]***Check for Null Values***print(df.info()) RangeIndex: 211 entries, 0 to 210 Data columns (total 3 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 0 211 non-null float64 1 1 211 non-null float64 2 2 211 non-null int64 dtypes: float64(2), int64(1) memory usage: 5.1 KB None* **No null value** is present in the dataset ***Basic Details***# rows and columns print(df.shape)(211, 3)* Number of **Rows = 211*** Number of **Columns = 3*** Number of **Features = 2**# basic statistical details print(df.describe())0 1 2 count 211.000000 211.000000 211.000000 mean -0.130245 -0.059743 0.004739 std 0.200516 0.314751 1.002367 min -0.596774 -0.657895 -1.000000 25% -0.287903 -0.313889 -1.000000 50% -0.130300 -0.052924 1.000000 75% 0.013825 0.192690 1.000000 max 0.297235 0.573392 1.000000* There is a considerable **difference between the maximum values** of the two features (0.29 and 0.57), therefore performing **standardisation** is better before proceeding into training of the model. ***Features Distribution***plt.figure(figsize=(14,4)) # plot the histogram of 1st feature data plt.subplot(121) sns.histplot(data=df, x=0, kde=True) plt.xlabel('X1') # plot the histogram of 2nd feature data plt.subplot(122) sns.histplot(data=df, x=1, kde=True) plt.xlabel('X2')***Feature Scaling***df[0] = (df[0] - np.mean(df[0])) / np.std(df[0]) df[1] = (df[1] - np.mean(df[1])) / np.std(df[1]) plt.figure(figsize=(14,4)) # plot the histogram of 1st feature data plt.subplot(121) sns.histplot(data=df, x=0, kde=True) plt.xlabel('X1') # plot the histogram of 2nd feature data plt.subplot(122) sns.histplot(data=df, x=1, kde=True) plt.xlabel('X2')* After feature scaling, both features have values between -2 and +2 with mean 0. **Data Visualization**# scatter plot # output +1 => green and '+' # output -1 => red and '-' plt.figure(figsize=(9,7)) df1 = df.loc[df[2]==1] df2 = df.loc[df[2]==-1] plt.scatter(df1[0], df1[1], color='green', marker='+', s=60) plt.scatter(df2[0], df2[1], color='red', marker='_', s=60) plt.legend(['+1 data','-1 data']) plt.xlabel('X1') plt.ylabel('X2') plt.title('Training Data')* The data is **not linearly separable**. **SVM Implementation** **Function to Plot**# this function will provide the scatter plots def plot_fun(model, df, color1, color2): # separating +1 and -1 data df1 = df.loc[df[2]==1] df2 = df.loc[df[2]==-1] plt.scatter(df1[0], df1[1], color=color1, marker='+', s=60) plt.scatter(df2[0], df2[1], color=color2, marker='_', s=60) plt.legend(['+1 data','-1 data']) plt.xlabel('X1') plt.ylabel('X2') # plot the decision function ax = plt.gca() xlim = ax.get_xlim() ylim = ax.get_ylim() # create grid to evaluate model xx = np.linspace(xlim[0], xlim[1], 30) yy = np.linspace(ylim[0], ylim[1], 30) XX, YY = np.meshgrid(xx, yy) xy = np.vstack([XX.ravel(), YY.ravel()]).T Z = model.decision_function(xy).reshape(XX.shape) # plot decision boundary and margins ax.contour(XX, YY, Z, colors='k', levels=[-1, 0, 1], alpha=0.5, linestyles=['--', '-', '--']) # plot support vectors ax.scatter(model.support_vectors_[:, 0], model.support_vectors_[:, 1], s=100, linewidth=1, facecolors='none', edgecolors='k')**Function to Find Error**# This function will provide the error def err_fun(model, df): # prediction with the learned model predicted_labels = model.predict(df.iloc[:,:-1]) error_count = 0 # comparison with actual label for i in range(df.shape[0]): if predicted_labels[i] != df.iloc[i,-1]: error_count = error_count + 1 # returns the error percentage return (error_count * 100 / df.shape[0])**Function to Train SVM: Linear Kernel**# This function will train the SVM and do all other needed operations def svm_link(df, c): # training model = svm.SVC(kernel='linear', C = c) model.fit(df.iloc[:,:-1], df.iloc[:,-1]) plt.figure(figsize=(9,7)) plt.title('Training Data, C = %s'%(c)) plot_fun(model, df, 'green', 'red') # support vector details print(f"{30*'==='}\n") print(f"Softmargin SVM with C = {c}\n") print(f"There are {len(model.support_vectors_)} support vectors in total.") # print(f"\nThey are as follows:\n") # for i in range(len(model.support_vectors_)): # print(f"{i+1}. {model.support_vectors_[i]}\tLamda = \ # {model.dual_coef_[0][i]/(df.iloc[model.support_[i],-1])}") print(f"\nTraining Error = {err_fun(model, df)} %\n")**Function to Train SVM: Non-linear Kernel (RBF)**# This function will train the SVM and do all other needed operations def svm_rbf(df, g): # training model = svm.SVC(kernel='rbf', gamma = g, C=1) model.fit(df.iloc[:,:-1], df.iloc[:,-1]) plt.figure(figsize=(9,7)) plt.title('Training Data, Gamma = %s'%(g)) plot_fun(model, df, 'blue', 'magenta') # support vector details print(f"{30*'==='}\n") print(f"SVM with RBF Kernel, Gamma = {g}\n") print(f"There are {len(model.support_vectors_)} support vectors in total.") # print(f"\nThey are as follows:\n") # for i in range(len(model.support_vectors_)): # print(f"{i+1}. {model.support_vectors_[i]}\tLamda = \ # {model.dual_coef_[0][i]/(df.iloc[model.support_[i],-1])}") print(f"\nTraining Error = {err_fun(model, df)} %\n")**Softmargin SVM with Linear Kernel, C = 1**svm_link(df, 1)========================================================================================== Softmargin SVM with C = 1 There are 59 support vectors in total. Training Error = 6.6350710900473935 %* Even though our data is not linearly separable, by allowing few misclassifications, we can come up with a linear model as above. Though not the best, this is a good model for our data. **RBF Kernel, Gamma = 1000**svm_rbf(df, 1000)========================================================================================== SVM with RBF Kernel, Gamma = 1000 There are 211 support vectors in total. Training Error = 0.0 %* **All the datapoints were considered as support vectors** and **zero training error** is obtained which is nothing but **overfitting** of the data. **RBF Kernel, Gamma = 100**svm_rbf(df, 100)========================================================================================== SVM with RBF Kernel, Gamma = 100 There are 209 support vectors in total. Training Error = 0.0 %* **Majority of the datapoints are regarded as support vectors** and the **training error is obtained as zero** which again indicates **overfitting** of the data. **RBF Kernel, Gamma = 10**svm_rbf(df, 10)========================================================================================== SVM with RBF Kernel, Gamma = 10 There are 159 support vectors in total. Training Error = 5.213270142180095 %* Since there was around 5% training error, it is not a perfect overfitting but again, **majority of the datapoints are considered as support vectors**, therefore this model **would not perform well on unseen data**. **RBF Kernel, Gamma = 1**svm_rbf(df, 1)========================================================================================== SVM with RBF Kernel, Gamma = 1 There are 70 support vectors in total. Training Error = 5.687203791469194 %* Here, there is around 5.6% training error which indicates there is **no overfitting** of the data, and **only 70 datapoints were regarded as support vectors** which suggests this is a good model. Also, a **non-linear hypersurface** is clearly visibile on the plot, and gives an **intuition that this model will perform well on unseen data** too. **RBF Kernel, Gamma = 0.1**svm_rbf(df, .1)========================================================================================== SVM with RBF Kernel, Gamma = 0.1 There are 83 support vectors in total. Training Error = 7.109004739336493 %* Compared to previous case, **more training error and more support vectors** were observed. **RBF Kernel, Gamma = 0.01**svm_rbf(df, .01)========================================================================================== SVM with RBF Kernel, Gamma = 0.01 There are 132 support vectors in total. Training Error = 10.42654028436019 %Find longest word in dictionary that subsequence of given stringcode-challengehttps://techdevguide.withgoogle.com/paths/foundational/find-longest-word-in-dictionary-that-subsequence-of-given-stringcode-challengeThe ChallengeGiven a string S and a set of words D, find the longest word in D that is a subsequence of S.Word W is a subsequence of S if some number of characters, possibly zero, can be deleted from S to form W, without reordering the remaining characters.Note: D can appear in any format (list, hash table, prefix tree, etc.For example, given the input of S = "abppplee" and D = {"able", "ale", "apple", "bale", "kangaroo"} the correct output would be "apple"* The words "able" and "ale" are both subsequences of S, but they are shorter than "apple".* The word "bale" is not a subsequence of S because even though S has all the right letters, they are not in the right order.* The word "kangaroo" is the longest word in D, but it isn't a subsequence of S.S = "abppplee" # Sorting the set based on length of the words D = {"able", "ale", "apple", "bale", "kangaroo", "abppple"} D = sorted(D, key=len, reverse=True) print(D) # Check each word in D to see if it is a possible sequence... for word in D: word_index = 0 string_index = 0 while word_index < len(word) and string_index < len(S): if word[word_index] == S[string_index]: word_index += 1 string_index += 1 else: string_index += 1 if word_index == len(word) and string_index <= len(S): print(word) break['kangaroo', 'abppple', 'apple', 'able', 'bale', 'ale'] abppple```!/usr/bin/env pythonimport collectionsimport sysdef find_longest_word_in_string(letters, words): letter_positions = collections.defaultdict(list) For each letter in 'letters', collect all the indices at which it appears. O(letters) space and speed. for index, letter in enumerate(letters): letter_positions[letter].append(index) For words, in descending order by length... Bails out early on first matched word, and within word on impossible letter/position combinations, but worst case is O(words avg-len) * O(letters / 26) time; constant space. With some work, could be O(W * avg-len) * log2(letters/26) But since binary search has more overhead than simple iteration, log2(letters) is about as expensive as simple iterations as long as the length of the arrays for each letter is “small”. If letters are randomly present in the search string, the log2 is about equal in speed to simple traversal up to lengths of a few hundred characters. for word in sorted(words, key=lambda w: len(w), reverse=True): pos = 0 for letter in word: if letter not in letter_positions: break Find any remaining valid positions in search string where this letter appears. It would be better to do this with binary search, but this is very Python-ic. possible_positions = [p for p in letter_positions[letter] if p >= pos] if not possible_positions: break pos = possible_positions[0] + 1 else: We didn't break out of the loop, so all letters have valid positions return wordif __name__ == '__main__': print subdict(sys.argv[1], sys.argv[2:])``` string_splosionGiven a non-empty string like "Code" return a string like "CCoCodCode".string_splosion('Code') → 'CCoCodCode'string_splosion('abc') → 'aababc'string_splosion('ab') → 'aab'def string_splosion(str): output = "" for i in range(len(str)): output += str[:i+1] return output print("string_splosion('Code') → 'CCoCodCode'") print(f"output: {string_splosion('Code')}") print("string_splosion('abc') → 'aababc'") print(f"output: {string_splosion('abc')}") print("string_splosion('ab') → 'aab'") print(f"output: {string_splosion('ab')}")string_splosion('Code') → 'CCoCodCode' output: CCoCodCode string_splosion('abc') → 'aababc' output: aababc string_splosion('ab') → 'aab' output: aabmaxSpanConsider the leftmost and righmost appearances of some value in an array. We'll say that the "span" is the number of elements between the two inclusive. A single value has a span of 1. Returns the largest span found in the given array. (Efficiency is not a priority.)maxSpan([1, 2, 1, 1, 3]) → 4maxSpan([1, 4, 2, 1, 4, 1, 4]) → 6maxSpan([1, 4, 2, 1, 4, 4, 4]) → 6import collections def maxSpan(input): number_positions = collections.defaultdict(list) for index, number in enumerate(input): number_positions[number].append(index) output = 0 for number in number_positions: current_list = number_positions[number] if len(current_list) > 1: current_span = current_list[-1] - current_list[0] + 1 else: current_span = 1 if current_span > output: output = current_span return output print("maxSpan([1, 2, 1, 1, 3]) → 4") print(f"output: {maxSpan([1, 2, 1, 1, 3])}") print("maxSpan([1, 4, 2, 1, 4, 1, 4]) → 6") print(f"output: {maxSpan([1, 4, 2, 1, 4, 1, 4])}") print("maxSpan([1, 4, 2, 1, 4, 4, 4]) → 6") print(f"output: {maxSpan([1, 4, 2, 1, 4, 4, 4])}")maxSpan([1, 2, 1, 1, 3]) → 4 output: 4 maxSpan([1, 4, 2, 1, 4, 1, 4]) → 6 output: 6 maxSpan([1, 4, 2, 1, 4, 4, 4]) → 6 output: 6withoutStringGiven two strings, base and remove, return a version of the base string where all instances of the remove string have been removed (not case sensitive). You may assume that the remove string is length 1 or more. Remove only non-overlapping instances, so with "xxx" removing "xx" leaves "x".withoutString("Hello there", "llo") → "He there"withoutString("Hello there", "e") → "Hllo thr"withoutString("Hello there", "x") → "Hello there"def withoutString(base, remove): output = "" base_case_removed = base.lower() remove_case_removed = remove.lower() i = 0 while i < len(base_case_removed): if base_case_removed[i] != remove_case_removed[0]: output += base[i] i += 1 else: for j in range(len(remove_case_removed)): if base_case_removed[i+j] != remove_case_removed[j]: output += base[i] i += 1 break # If all the characters matched, dont append them to output if j == len(remove_case_removed)-1: i = i+j+1 return output print('withoutString("Hello there", "llo") → "He there"') print(f'output: {withoutString("Hello there", "llo")}') print('withoutString("Hello there", "e") → "Hllo thr"') print(f'output: {withoutString("Hello there", "e")}') print('withoutString("Hello there", "x") → "Hello there"') print(f'output: {withoutString("Hello there", "x")}')withoutString("Hello there", "llo") → "He there" output: He there withoutString("Hello there", "e") → "Hllo thr" output: Hllo thr withoutString("Hello there", "x") → "Hello there" output: Hello theresumNumbers Given a string, return the sum of the numbers appearing in the string, ignoring all other characters. A number is a series of 1 or more digit chars in a row. (Note: Character.isDigit(char) tests if a char is one of the chars '0', '1', .. '9'. Integer.parseInt(string) converts a string to an int.)sumNumbers("abc123xyz") → 123sumNumbers("aa11b33") → 44sumNumbers("7 11") → 18numerical_character = [str(i) for i in range(0, 10)] def sumNumbers(input_str): start_index = None sum = 0 for index, value in enumerate(input_str): # if start_index: if start_index is not None: if value not in numerical_character: sum += int(input_str[start_index: index]) start_index = None elif index == len(input_str)-1: sum += int(input_str[start_index:]) start_index = None elif value in numerical_character: start_index = index return sum print('sumNumbers("abc123xyz") → 123') print(f'output: {sumNumbers("abc123xyz")}' ) print('sumNumbers("aa11b33") → 44') print(f'output: {sumNumbers("aa11b33")}') print('sumNumbers("7 11") → 18') print(f'output: {sumNumbers("7 11")}')sumNumbers("abc123xyz") → 123 output: 123 sumNumbers("aa11b33") → 44 output: 44 sumNumbers("7 11") → 18 output: 18canBalanceGiven a non-empty array, return true if there is a place to split the array so that the sum of the numbers on one side is equal to the sum of the numbers on the other side.canBalance([1, 1, 1, 2, 1]) → truecanBalance([2, 1, 1, 2, 1]) → falsecanBalance([10, 10]) → truedef canBalance(input_list): total_sum = 0 for _ in input_list: total_sum += _ forward_sum = 0 for i in range(len(input_list)): forward_sum += input_list[i] if forward_sum == (total_sum - forward_sum): return True return False print('canBalance([1, 1, 1, 2, 1]) → true') print(f'output: {canBalance([1, 1, 1, 2, 1])}') print('canBalance([2, 1, 1, 2, 1]) → false') print(f'output: {canBalance([2, 1, 1, 2, 1])}') print('canBalance([10, 10]) → true') print(f'output: {canBalance([10, 10])}')canBalance([1, 1, 1, 2, 1]) → true output: True canBalance([2, 1, 1, 2, 1]) → false output: False canBalance([10, 10]) → true output: TrueProblem of the Day:Given an encoded string, return its decoded string.The encoding rule is: `k[encoded_string]`, where the encoded_string inside the square brackets is being repeated exactly k times. Note that k is guaranteed to be a positive integer.You may assume that the input string is always valid; No extra white spaces, square brackets are well-formed, etc.Furthermore, you may assume that the original data does not contain any digits and that digits are only for those repeat numbers, k. For example, there won't be input like `3a or 2[4]`.```Example: Input: "3[a]2[bc]" Output: "aaabcbc" Input: "2[abc]3[cd]ef" Output: "abcabccdcdcdef" Input: "3[a2[c]]" Output: "accaccacc"```def encoded_string(input): stack = [] numbers = [str(i) for i in range(10)] for ch in input: if ch != ']': stack.append(ch) else: # Read non-numerical characters substring = [] popch = None if len(stack): popch = stack.pop() while popch != '[': substring += popch if len(stack): popch = stack.pop() substring = substring[::-1] # print(substring) # Read numerical characters k = [] popch = None if len(stack): popch = stack.pop() while popch in numbers: k += popch if len(stack): popch = stack.pop() else: break # Re-insert the last pop if stack is not empty. if len(stack): stack.append(popch) k = int(''.join(k[::-1])) # print(k) tempstr = substring * k # print(tempstr) stack += tempstr return ''.join(stack) import unittest class TestStringMethods(unittest.TestCase): def test_case1(self): self.assertEqual(encoded_string("3[a]2[bc]"), "aaabcbc") def test_case2(self): self.assertEqual(encoded_string("2[abc]3[cd]ef"), "abcabccdcdcdef") def test_case3(self): self.assertEqual(encoded_string("3[a2[c]]"), "accaccacc") if __name__ == '__main__': unittest.main(argv=['first-arg-is-ignored'], exit=False)... ---------------------------------------------------------------------- Ran 3 tests in 0.004s OK______Copyright by Inc.For more information, visit us at www.pieriandata.com KNN Project Exercise - SolutionsDue to the simplicity of KNN for Classification, let's focus on using a PipeLine and a GridSearchCV tool, since these skills can be generalized for any model. The Sonar Data Detecting a Rock or a MineSonar (sound navigation ranging) is a technique that uses sound propagation (usually underwater, as in submarine navigation) to navigate, communicate with or detect objects on or under the surface of the water, such as other vessels.The data set contains the response metrics for 60 separate sonar frequencies sent out against a known mine field (and known rocks). These frequencies are then labeled with the known object they were beaming the sound at (either a rock or a mine). Our main goal is to create a machine learning model capable of detecting the difference between a rock or a mine based on the response of the 60 separate sonar frequencies.Data Source: https://archive.ics.uci.edu/ml/datasets/Connectionist+Bench+(Sonar,+Mines+vs.+Rocks) Complete the Tasks in bold**TASK: Run the cells below to load the data.**import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt df = pd.read_csv('../DATA/sonar.all-data.csv') df.head()Data Exploration**TASK: Create a heatmap of the correlation between the difference frequency responses.**# CODE HERE plt.figure(figsize=(8,6)) sns.heatmap(df.corr(),cmap='coolwarm')**TASK: What are the top 5 correlated frequencies with the target\label?***Note: You many need to map the label to 0s and 1s.**Additional Note: We're looking for **absolute** correlation values.*#CODE HERE df['Target'] = df['Label'].map({'R':0,'M':1}) np.abs(df.corr()['Target']).sort_values().tail(6)Train | Test SplitOur approach here will be one of using Cross Validation on 90% of the dataset, and then judging our results on a final test set of 10% to evaluate our model.**TASK: Split the data into features and labels, and then split into a training set and test set, with 90% for Cross-Validation training, and 10% for a final test set.**# CODE HERE from sklearn.model_selection import train_test_split X = df.drop(['Target','Label'],axis=1) y = df['Label'] X_cv, X_test, y_cv, y_test = train_test_split(X, y, test_size=0.1, random_state=42)**TASK: Create a PipeLine that contains both a StandardScaler and a KNN model**from sklearn.preprocessing import StandardScaler from sklearn.neighbors import KNeighborsClassifier scaler = StandardScaler() knn = KNeighborsClassifier() operations = [('scaler',scaler),('knn',knn)] from sklearn.pipeline import Pipeline pipe = Pipeline(operations)**TASK: Perform a grid-search with the pipeline to test various values of k and report back the best performing parameters.**from sklearn.model_selection import GridSearchCV k_values = list(range(1,30)) param_grid = {'knn__n_neighbors': k_values} full_cv_classifier = GridSearchCV(pipe,param_grid,cv=5,scoring='accuracy') full_cv_classifier.fit(X_cv,y_cv) full_cv_classifier.best_estimator_.get_params()**(HARD) TASK: Using the .cv_results_ dictionary, see if you can create a plot of the mean test scores per K value.**#CODE HERE full_cv_classifier.cv_results_['mean_test_score'] scores = full_cv_classifier.cv_results_['mean_test_score'] plt.plot(k_values,scores,'o-') plt.xlabel("K") plt.ylabel("Accuracy")Final Model Evaluation**TASK: Using the grid classifier object from the previous step, get a final performance classification report and confusion matrix.**#Code Here pred = full_cv_classifier.predict(X_test) from sklearn.metrics import classification_report,confusion_matrix,accuracy_score confusion_matrix(y_test,pred) print(classification_report(y_test,pred))precision recall f1-score support M 0.92 0.92 0.92 13 R 0.88 0.88 0.88 8 accuracy 0.90 21 macro avg 0.90 0.90 0.90 21 weighted avg 0.90 0.90 0.90 21Inference notebook for the ResNet-50 model trained on ImageNetThis notebook demonstrates inference with a ResNet-50 model trained on the ImageNet dataset. In this notebook you will be able to run inference on some sample images and measure the latency of the model First let's set some variablesFill in below the values for the path to your code and modle checkpoint, which GPU you will be using (specify -1 to use the CPU). You can refer to the training recipe to access a checkpoint if you haven't trained the model yourself.# specify the gpu to use import os os.environ['CUDA_VISIBLE_DEVICES'] = '0' # path to tensorflow/models repository import sys sys.path.append("/ebs/code/models/") import tensorflow as tf import numpy as np CHECKPOINT_PATH = "./output-v4/" # select checkpoint# selec checkpoint_file = os.path.join(CHECKPOINT_PATH, 'checkpoint') with open(checkpoint_file,'r') as chkf: last_chkpnt = chkf.readline().split(' ')[1][1:-2] last_chkpnt = os.path.join(CHECKPOINT_PATH, last_chkpnt) print("loading checkpoint", last_chkpnt)loading checkpoint ./output-v4/model.ckpt-500000Load the modelfrom official.resnet import resnet_model from preprocess import preprocess_for_eval, _DEFAULT_IMAGE_SIZE, _RESIZE_SIDE_MIN image_data = tf.placeholder(tf.string) features = preprocess_for_eval(image_data, _DEFAULT_IMAGE_SIZE, _DEFAULT_IMAGE_SIZE, _RESIZE_SIDE_MIN) features = tf.expand_dims(features, 0) model = resnet_model.Model( resnet_size=50, bottleneck=True, num_classes=1000, num_filters=64, kernel_size=7, conv_stride=2, first_pool_size=3, first_pool_stride=2, block_sizes=[3, 4, 6, 3], block_strides=[1, 2, 2, 2], final_size=2048, resnet_version=2, data_format=None, dtype=tf.float32) logits = model(features, False) sess = tf.Session() saver = tf.train.Saver() saver.restore(sess, last_chkpnt) # load imagenet information with open("/ebs/code/models/research/inception/inception/data/imagenet_lsvrc_2015_synsets.txt", "r") as fh: synsets = [l.strip() for l in fh] with open("/ebs/code/models/research/inception/inception/data/imagenet_metadata.txt", "r") as fh: synset2name = dict([l.strip().split("\t") for l in fh])Run inference on sample imagesModify the filename below to try out the model on some sample images. We will show the topK predictions of the model.IMAGE_FILENAME = "./puppy.jpg" from IPython.display import Image, display display(Image(filename=IMAGE_FILENAME, width=500)) with open(IMAGE_FILENAME, "rb") as fh: x = fh.read() r = sess.run([logits], feed_dict={image_data: x}) l = r[0].flatten() for i in np.argsort(l)[::-1][:5]: print('%.2f\t%s' % (l[i], synset2name[synsets[i+1]]))Measure the model latencyWe measure how long it takes to run inference on the model with a batch size of 1. This scenario is close to how the model is deployed in production. In order to benchmark performance on the CPU, you can set the CUDA_VISIBLE_DEVICES environment variable to -1.Measurements on an AWS p3.2xlarge machine* GPU: avg time = 11ms (+/- 0ms)from time import time as now num_steps = 20 num_steps_burn_in = 5 durations = [] for i in range(num_steps + num_steps_burn_in): start_time = now() _ = sess.run([logits], feed_dict={image_data: x}) duration = now() - start_time if i >= num_steps_burn_in: durations.append(duration) durations = np.array(durations) print("avg time = %dms (+/- %dms)" % (1000*durations.mean(), 1000*durations.std()))avg time = 11ms (+/- 0ms)Run this cell to set your notebook up on Google Colab!apt-get install -y xvfb python-opengl ffmpeg > /dev/null 2>&1 !git clone https://github.com/yfletberliac/rlss2019-hands-on.git > /dev/null 2>&1 !pip install -q torch==1.1.0 torchvision pyvirtualdisplay piglet > /dev/null 2>&1Deep Q Networks------------You can find the original paper [here](https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf). Preliminaries: Q Learning Q-Value **Q-Value** is a measure of the overall expected reward assuming the agent is in state $s$ and performs action $a$, and then continues playing until the end of the episode following some policy $\pi$. It is defined mathematically as:\begin{equation}Q^{\pi}\left(s_{t}, a_{t}\right)=E\left[R_{t+1}+\gamma R_{t+2}+\gamma^{2} R_{t+3}+\ldots | s_{t}, a_{t}\right]\end{equation}where $R_{t+1}$ is the immediate reward received after performing action $a_{t}$ in state $s_{t}$ and $\gamma$ is the discount factor and controls the importance of the future rewards versus the immediate ones: the lower the discount factor is, the less important future rewards are. Bellman Optimality Equation Formally, the Bellman equation defines the relationships between a given state (or, in our case, a **state-action pair**) and its successors. While many forms exist, one of the most common is the **Bellman Optimality Equation** for the optimal **Q-Value**, which is given by:\begin{equation}Q^{*}(s, a)=\sum_{s^{\prime}, r} p\left(s^{\prime}, r | s, a\right)\left[r+\gamma \max _{a^{\prime}} Q^{*}\left(s^{\prime}, a^{\prime}\right)\right]\end{equation}Of course, when no uncertainty exists (transition probabilities are either 0 or 1), we have:\begin{equation}Q^{*}(s, a)=r(s, a)+\gamma \max _{a^{\prime}} Q^{*}\left(s^{\prime}, a^{\prime}\right)\end{equation} Q-Value Iteration We define the corresponding Bellman backup operator:\begin{equation}[\mathcal{T} Q]\left(s, a\right)=r(s, a)+\gamma \max _{a^{\prime}} Q\left(s^{\prime}, a^{\prime}\right)\end{equation} $Q$ is a fixed point of $\mathcal{T}$:\begin{equation}\mathcal{T} Q^{*}=Q^{*}\end{equation} If we apply the Bellman operator $\mathcal{T}$ repeatedly to any initial $Q$, the series converges to $Q^{*}$:\begin{equation}Q, \mathcal{T} Q, \mathcal{T}^{2} Q, \cdots \rightarrow Q^{*}\end{equation} Importsimport sys sys.path.insert(0, './rlss2019-hands-on/utils') # If using the Docker image, replace by: # sys.path.insert(0, '../utils') import gym, random, os.path, math, glob, csv, base64 from pathlib import Path from timeit import default_timer as timer from datetime import timedelta import numpy as np import torch import torch.optim as optim import torch.nn as nn import torch.nn.functional as F import matplotlib %matplotlib inline from qfettes_plot import plot_all_data from qfettes_wrappers import * from openai_wrappers import make_atari, wrap_deepmind from gym.wrappers import Monitor from pyvirtualdisplay import Display from IPython import display as ipythondisplay from IPython.display import clear_output------------ Deep Q learning Usually in Deep RL, the **Q-Value** is defined as $Q(s,a;\theta)$ where $\theta$ represents the parameters of the function approximation used.For *MuJoCo* or *Roboschool* environments, we usually use a simple 2- or 3-layer MLP whereas when using **raw pixels for observations** such as in *Atari 2600* games, we usually use a 1-, 2- or 3-layer CNN.In our case, since we want to train DQN on *CartPole*, we will use a 3-layer perceptron for our function approximation. Network declaration In this section, we build $Q(s,a;\theta)$ function approximation. Since the input is composed of 4 scalars, namely:[position of cart, velocity of cart, angle of pole, rotation rate of pole]we build a FCN -> ReLU -> FCN -> ReLU -> FCN neural network. As an exercice, change the architecture of the network:1. Change the 1st fully-connected layer from 8 hidden neurons to 162. Create `self.fc2` in `__init__` with 16 neurons3. Create `self.fc3` with `self.num_actions` as the output size4. Add it to the network in `forward` with no activation functionclass DQN(nn.Module): def __init__(self, input_shape, num_actions): super().__init__() self.input_shape = input_shape self.num_actions = num_actions self.fc1 = nn.Linear(self.input_shape[0], 8) self.fc2 = ... self.fc3 = ... def forward(self, x): x = F.relu(self.fc2(F.relu(self.fc1(x)))) x = ... return xSafety checks Network architecture As a *safety check*, inspect the resulting network in the next cell. For instance, the total number of trainable parameters should change with the architecture. Check the correctness of `in_features` and `out_features`.env_id = 'CartPole-v0' env = gym.make(env_id) network = DQN(env.observation_space.shape, env.action_space.n) print("Observation space:\n", env.observation_space.shape, "\n") print("Network architecture:\n", network, "\n") model_parameters = filter(lambda p: p.requires_grad, network.parameters()) print("Total number of trainable parameters:\n", sum([np.prod(p.size()) for p in model_parameters]))Observation space: (4,) Network architecture: DQN( (fc1): Linear(in_features=4, out_features=8, bias=True) (fc2): Linear(in_features=8, out_features=8, bias=True) (fc3): Linear(in_features=8, out_features=2, bias=True) ) Total number of trainable parameters: 130Run a Policy with Random Actions What the working environment looks like? It's always useful to know the details about the environment you train your policy on. For instance, its dynamics, the size of action and observation space, etc. Below we display three different random policies on `CartPole-v0`.display = Display(visible=0, size=(1400, 900)) display.start() def show_video(): html = [] for mp4 in Path("videos").glob("*.mp4"): video_b64 = base64.b64encode(mp4.read_bytes()) html.append(''''''.format(mp4, video_b64.decode('ascii'))) ipythondisplay.display(ipythondisplay.HTML(data="
".join(html))) env = Monitor(env, './videos', force=True, video_callable=lambda episode: True) for episode in range(2): done = False obs = env.reset() while not done: action = env.action_space.sample() obs, reward, done, info = env.step(action) env.close() show_video()xdpyinfo was not found, X start can not be checked! Please install xdpyinfo!We can see the episode ending prematurely because the pole drops. -----**Question**:It is also important to identify some of the characteristics of the problem. `CartPole-v0` can be described as a **fully-observable**, **deterministic**, **continuous state space**, with a **discrete action space** and **frequent rewards**. Take some time to understand each of these terms :-) Try to find the opposite term for each of them, e.g. deterministic stochastic. Experience Replay Memory As usual RL tasks have no pre-generated training sets which they can learn from, in off-policy learning, our agent must keep records of all the state-transitions it encountered so it can **learn from them later**. The memory-buffer used to store this is often referred to as the **Experience Replay Memory**. There are several types and architectures of these memory buffers — but some very common ones are:- the *cyclic memory buffers*: they make sure the agent keeps training over its new behavior rather than things that might no longer be relevant- the *reservoir-sampling-based memory buffers*: they guarantee each state-transition recorded has an even probability to be inserted to the bufferWe use a combination of both.In `push`:1. Append the transition to memory2. Create the if statement which deletes an old transition from the memoryclass ExperienceReplayMemory: def __init__(self, capacity): self.capacity = capacity self.memory = [] def push(self, transition): # Append the transition below ... # Now, we need an `if` statement in order to keep the capacity to its limit. Write it below. # Hint: `del something` will delete something if something is an array if ...: raise NotImplementedError def sample(self, batch_size): return random.sample(self.memory, batch_size) def __len__(self): return len(self.memory)------------ Now we have:- the **DQN** network,- the **ExperienceReplayMemory**.Let's build the **Agent** class ! Agent declaration In the cell below:1. Create `self.target_model` in `declare_networks`2. Complete the epsilon-greedy algorithm in `get_action`class Agent(object): def __init__(self, config, env, log_dir='/tmp/gym'): self.log_dir = log_dir self.rewards = [] self.action_log_frequency = config.ACTION_SELECTION_COUNT_FREQUENCY self.action_selections = [0 for _ in range(env.action_space.n)] # Define the DQN networks def declare_networks(self): self.model = DQN(self.num_feats, self.num_actions) # Create `self.target_model` with the same network architecture self.target_model = ... raise NotImplementedError # Define the Replay Memory def declare_memory(self): self.memory = ExperienceReplayMemory(self.experience_replay_size) # Append the new transition to the Replay Memory def append_to_replay(self, s, a, r, s_): self.memory.push((s, a, r, s_)) # Sample transitions from the Replay Memory def sample_minibatch(self): transitions = self.memory.sample(self.batch_size) batch_state, batch_action, batch_reward, batch_next_state = zip(*transitions) shape = (-1,)+self.num_feats batch_state = torch.tensor(batch_state, device=self.device, dtype=torch.float).view(shape) batch_action = torch.tensor(batch_action, device=self.device, dtype=torch.long).squeeze().view(-1, 1) batch_reward = torch.tensor(batch_reward, device=self.device, dtype=torch.float).squeeze().view(-1, 1) non_final_mask = torch.tensor(tuple(map(lambda s: s is not None, batch_next_state)), device=self.device, dtype=torch.uint8) # Sometimes all next states are false try: non_final_next_states = torch.tensor([s for s in batch_next_state if s is not None], device=self.device, dtype=torch.float).view(shape) empty_next_state_values = False except: non_final_next_states = None empty_next_state_values = True return batch_state, batch_action, batch_reward, non_final_next_states, non_final_mask, empty_next_state_values # Sample action def get_action(self, s, eps=0.1): with torch.no_grad(): # Epsilon-greedy if np.random.random() >= eps: X = torch.tensor([s], device=self.device, dtype=torch.float) a = self.model(X).max(1)[1].view(1, 1) return a.item() else: ...-----**Question**:Remember we define the objective function as\begin{equation}J=\left(r+\gamma \max _{a^{\prime}} Q\left(s^{\prime}, a^{\prime}, \mathbf{\theta}^{-}\right)-Q(s, a, \mathbf{\theta})\right)^{2},\end{equation}where $\theta^{-}$ are the target parameters.Why do we need a target network in the first place ? Learning In the cell below, and from the above objective fonction:1. Write the value `expected_q_values`2. Write `diff`3. The `update` function needs some workclass Learning(Agent): def __init__(self, env=None, config=None, log_dir='/tmp/gym'): super().__init__(config=config, env=env, log_dir=log_dir) # Compute loss from the Bellman Optimality Equation def compute_loss(self, batch_vars): batch_state, batch_action, batch_reward, non_final_next_states, non_final_mask, empty_next_state_values = batch_vars # Estimate current_q_values = self.model(batch_state).gather(1, batch_action) # Target with torch.no_grad(): max_next_q_values = torch.zeros(self.batch_size, device=self.device, dtype=torch.float).unsqueeze(dim=1) if not empty_next_state_values: max_next_action = self.get_max_next_state_action(non_final_next_states) max_next_q_values[non_final_mask] = self.target_model(non_final_next_states).gather(1, max_next_action) # From the equation above, write the value `expected_q_values`. expected_q_values = ... # From the equation above, write the value `diff`. diff = ... loss = self.MSE(diff) loss = loss.mean() raise NotImplementedError return loss # Update both networks (the agent and the target) def update(self, s, a, r, s_, sample_idx=0): self.append_to_replay(s, a, r, s_) # When not to update ? # There is a concise way to write to skip the update, fill in the 2 blanks in the `if` statement below. # Hint: the sample count should be < the learn_start hyperparameter and respect the update_freq. if ... or ...: raise NotImplementedError return None batch_vars = self.sample_minibatch() loss = self.compute_loss(batch_vars) # Optimize the model self.optimizer.zero_grad() loss.backward() for param in self.model.parameters(): param.grad.data.clamp_(-1, 1) self.optimizer.step() self.update_target_model() self.save_td(loss.item(), sample_idx) def update_target_model(self): # Copy weights from model to target_model following `target_net_update_freq`. self.update_count+=1 if self.update_count % self.target_net_update_freq == 0: self.target_model.load_state_dict(self.model.state_dict())Model declarationclass Model(Learning): def __init__(self, env=None, config=None, log_dir='/tmp/gym'): super().__init__(config=config, env=env, log_dir=log_dir) self.device = config.device # Hyperparameters self.gamma = config.GAMMA self.target_net_update_freq = config.TARGET_NET_UPDATE_FREQ self.experience_replay_size = config.EXP_REPLAY_SIZE self.batch_size = config.BATCH_SIZE self.learn_start = config.LEARN_START self.update_freq = config.UPDATE_FREQ # Environment specific parameters self.num_feats = env.observation_space.shape self.num_actions = env.action_space.n self.env = env self.declare_networks() self.declare_memory() self.target_model.load_state_dict(self.model.state_dict()) self.optimizer = optim.Adam(self.model.parameters(), lr=config.LR) # Move to correct device self.model = self.model.to(self.device) self.target_model.to(self.device) self.model.train() self.target_model.train() self.update_count = 0 def save_td(self, td, tstep): with open(os.path.join(self.log_dir, 'td.csv'), 'a') as f: writer = csv.writer(f) writer.writerow((tstep, td)) def get_max_next_state_action(self, next_states): return self.target_model(next_states).max(dim=1)[1].view(-1, 1) def MSE(self, x): return 0.5 * x.pow(2) def save_reward(self, reward): self.rewards.append(reward) def save_action(self, action, tstep): self.action_selections[int(action)] += 1.0/self.action_log_frequency if (tstep+1) % self.action_log_frequency == 0: with open(os.path.join(self.log_dir, 'action_log.csv'), 'a') as f: writer = csv.writer(f) writer.writerow(list([tstep]+self.action_selections)) self.action_selections = [0 for _ in range(len(self.action_selections))] def save_w(self): if not os.path.exists("../saved_agents"): os.makedirs("../saved_agents") torch.save(self.model.state_dict(), '../saved_agents/model.dump') torch.save(self.optimizer.state_dict(), '../saved_agents/optim.dump')Hyperparametersclass Config(object): def __init__(self): self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Main agent variables self.GAMMA=0.99 self.LR=1e-3 # Epsilon variables self.epsilon_start = 1.0 self.epsilon_final = 0.01 self.epsilon_decay = 10000 self.epsilon_by_sample = lambda sample_idx: config.epsilon_final + (config.epsilon_start - config.epsilon_final) * math.exp(-1. * sample_idx / config.epsilon_decay) # Memory self.TARGET_NET_UPDATE_FREQ = 1000 self.EXP_REPLAY_SIZE = 10000 self.BATCH_SIZE = 64 # Learning control variables self.LEARN_START = 1000 self.MAX_SAMPLES = 50000 self.UPDATE_FREQ = 1 # Data logging parameters self.ACTION_SELECTION_COUNT_FREQUENCY = 1000 config = Config()Trainingimport gym from openai_monitor import Monitor from IPython import display import matplotlib import matplotlib.pyplot as plt %matplotlib inline start=timer() log_dir = "/tmp/gym/" try: os.makedirs(log_dir) except OSError: files = glob.glob(os.path.join(log_dir, '*.monitor.csv')) \ + glob.glob(os.path.join(log_dir, '*td.csv')) \ + glob.glob(os.path.join(log_dir, '*action_log.csv')) for f in files: os.remove(f) env_id = 'CartPole-v0' env = gym.make(env_id) env = Monitor(env, os.path.join(log_dir, env_id)) model = Model(env=env, config=config, log_dir=log_dir) episode_reward = 0 observation = env.reset() for sample_idx in range(1, config.MAX_SAMPLES + 1): epsilon = config.epsilon_by_sample(sample_idx) action = model.get_action(observation, epsilon) # Log action selection model.save_action(action, sample_idx) prev_observation=observation observation, reward, done, _ = env.step(action) observation = None if done else observation model.update(prev_observation, action, reward, observation, sample_idx) episode_reward += reward if done: observation = env.reset() model.save_reward(episode_reward) episode_reward = 0 if sample_idx % 1000 == 0: try: clear_output(True) plot_all_data(log_dir, env_id, 'DQN', config.MAX_SAMPLES, bin_size=(10, 100, 100, 1), smooth=1, time=timedelta(seconds=int(timer()-start)), ipynb=True) except IOError: pass model.save_w() env.close()By observing the plots, does the learning appear to be stable?If your answer is *yes*, then start a second run, and a third, with the same hyperparameters. ;-)You have just faced reproducibility concerns, which is quite a serious problem in deep RL and which can be dealt with by e.g. running your experiments on a sufficient number of seeds (~ 6-8 min.) Visualize the agentfrom gym.wrappers import Monitor # Loading the agent fname_model = "../saved_agents/model.dump" fname_optim = "../saved_agents/optim.dump" log_dir = "/tmp/gym/" model = Model(env=env, config=config, log_dir=log_dir) if os.path.isfile(fname_model): model.model.load_state_dict(torch.load(fname_model)) model.target_model.load_state_dict(model.model.state_dict()) if os.path.isfile(fname_optim): model.optimizer.load_state_dict(torch.load(fname_optim)) env_id = 'CartPole-v0' env = gym.make(env_id) env = Monitor(env, './videos', force=True, video_callable=lambda episode: True) for episode in range(3): done = False obs = env.reset() while not done: action = model.get_action(obs) obs, _, done, _ = env.step(action) env.close() show_video()Продвинутое машинное обучение Домашнее задание 3 Третье домашнее задание посвящено достаточно простой, но, надеюсь, интересной задаче, в которой потребуется творчески применить методы сэмплирования. Как и раньше, в качестве решения **ожидается ссылка на jupyter-ноутбук на вашем github (или публичный, или с доступом для snikolenko); ссылку обязательно нужно прислать в виде сданного домашнего задания на портале Академии**. Как всегда, любые комментарии, новые идеи и рассуждения на тему категорически приветствуются. В этом небольшом домашнем задании мы **попробуем улучшить метод Шерлока Холмса**. Как известно, в рассказе *The Adventure of the Dancing Men* великий сыщик расшифровал загадочные письмена, которые выглядели как пляшущие человечки. Пользовался он для этого так называемым частотным методом: смотрел, какие буквы чаще встречаются в зашифрованных текстах, и пытался подставить буквы в соответствии с частотной таблицей: E — самая частая и так далее. В этом задании мы будем разрабатывать более современный и продвинутый вариант такого частотного метода. В качестве корпусов текстов для подсчётов частот можете взять что угодно, но для удобства вот вам “Война и мир” по-русски и по-английски:https://www.dropbox.com/s/k23enjvr3fb40o5/corpora.zip Часть 1 Задание Реализуйте базовый частотный метод по Шерлоку Холмсу:* подсчитайте частоты букв по корпусам (пунктуацию и капитализацию можно просто опустить, а вот пробелы лучше оставить);* возьмите какие-нибудь тестовые тексты (нужно взять по меньшей мере 2-3 предложения, иначе вряд ли сработает), зашифруйте их посредством случайной перестановки символов;* расшифруйте их таким частотным методом. Решение Импорт библиотекimport os import re import random from collections import Counter, defaultdict from copy import copy import numpy as np import pandas as pd from nltk import everygrams from nltk.tokenize import RegexpTokenizer from tqdm.notebook import tqdm np.random.seed(4)Загрузка данныхif not os.path.exists("/content/corpora.zip"): !wget -q https://www.dropbox.com/s/k23enjvr3fb40o5/corpora.zip !unzip -oq corpora.zip FILES = ["AnnaKarenina.txt", "WarAndPeace.txt"] corpus = [] for filename in FILES: with open(filename, "r") as fin: corpus += fin.readlines() corpus = " ".join(corpus)РасшифровкаALPHABET = " абвгдежзийклмнопрстуфхцчшщъыьэюя" def tokenize(text, alphabet=ALPHABET, tokenizer=RegexpTokenizer(r"\w+")): text = text.lower() # Filter characters not in alphabet: text = "".join([c for c in text if c in alphabet]) return " ".join(tokenizer.tokenize(text)) def get_ngram_freqs(text, n_gram=1): if n_gram > 1: text = [ "".join(ngram) for ngram in everygrams(text, min_len=n_gram, max_len=n_gram) ] freqs = { k: v / len(text) for k, v in Counter(text).items() if v > 0 # remove zeros, because why do we need them? } return freqs def generate_mapping(freqs): original = list(freqs.keys()) replacements = np.random.choice(original, replace=False, size=len(freqs)) mapping = { original_char: replacement_char for original_char, replacement_char in zip(original, replacements) } return mapping def apply_mapping(text, mapping): return "".join([mapping.get(c, "ь") for c in text]) def get_reverse_mapping(corpus_freqs, text_freqs): """Нахождение ближайшего по частотности символа.""" corpus_freqs_sorted = sorted(corpus_freqs.items(), key=lambda x: x[1], reverse=True) text_freqs_sorted = sorted(text_freqs.items(), key=lambda x: x[1], reverse=True) reverse_mapping = {} for text_char, text_freq in text_freqs_sorted: min_diff = 1.0 # maximum possible frequency best_char = None for corpus_char, corpus_freq in corpus_freqs_sorted: diff = abs(corpus_freq - text_freq) if diff < min_diff: best_char = corpus_char min_diff = diff reverse_mapping[text_char] = best_char corpus_freqs_sorted = [ (char, freq) for char, freq in corpus_freqs_sorted if char != best_char ] return reverse_mapping def character_accuracy(text1, text2): assert len(text1) == len(text2) matching_chars = sum((c1 == c2) for c1, c2 in zip(text1, text2)) return matching_chars / len(text1) tokenized_corpus = tokenize(corpus) corpus_freqs = get_ngram_freqs(tokenized_corpus, n_gram=1) mapping = generate_mapping(corpus_freqs) text = """ Наша работа во тьме —\n Мы делаем, что умеем,\n Мы отдаем, что имеем,\n Наша работа – во тьме.\n Сомнения стали страстью,\n А страсть стала судьбой.\n Все остальное — искусство\n В безумии быть собой.\n \n Хочется закрыть глаза. Это нормально. Цветной калейдоскоп, блестки, искрящийся звездный вихрь – красиво, но я знаю, что стоит за этой красотой.\n Глубина. Ее называют «дип», но мне кажется, что по-русски слово звучит правильнее. Заменяет красивый ярлычок предупреждением. Глубина! Здесь водятся акулы и спруты. Здесь тихо – и давит, давит, давит бесконечное пространство, которого на самом деле нет.\n В общем-то она добрая, глубина. По-своему, конечно. Она принимает любого. Чтобы нырнуть, нужно не много сил. Чтобы достичь дна и вернуться – куда больше. В первую очередь надо помнить – глубина мертва без нас. Надо и верить в нее, и не верить. Иначе настанет день, когда не удастся вынырнуть. """ tokenized_text = tokenize(" ".join(text.split("\n"))) encoded_text = apply_mapping(tokenized_text, mapping) text_freqs = get_ngram_freqs(encoded_text) tokenized_text encoded_text reverse_mapping = get_reverse_mapping(corpus_freqs, text_freqs) decoded_text = apply_mapping(encoded_text, reverse_mapping) decoded_text character_accuracy(tokenized_text, decoded_text)Текст нечитаем, посимвольная точность расшифровки оставляет желать много лучшего. Часть 2 Задание Вряд ли в результате получилась такая уж хорошая расшифровка, разве что если вы брали в качестве тестовых данных целые рассказы. Но и Шерлок Холмс был не так уж прост: после буквы E, которая действительно выделяется частотой, дальше он анализировал уже конкретные слова и пытался угадать, какими они могли бы быть. Я не знаю, как запрограммировать такой интуитивный анализ, так что давайте просто сделаем следующий логический шаг:* подсчитайте частоты *биграмм* (т.е. пар последовательных букв) по корпусам;* проведите тестирование аналогично п. 1, но при помощи биграмм. В качестве естественной метрики качества можно взять долю правильно расшифрованных букв или, если хочется совсем математически изощриться, расстояние между двумя перестановками, правильной и полученной из модели; но, честно говоря, в этом задании следить за численными метриками не так уж обязательно, будет и глазами всё видно. Решениеcorpus_freqs_bigram = get_ngram_freqs(tokenized_corpus, n_gram=2) text_freqs_bigram = get_ngram_freqs(encoded_text, n_gram=2) corpus_freqs_sorted = sorted( corpus_freqs_bigram.items(), key=lambda x: x[1], reverse=True ) text_freqs_sorted = sorted(text_freqs_bigram.items(), key=lambda x: x[1], reverse=True) def get_reverse_mapping_ngram(corpus_freqs, text_freqs, n_gram=1): corpus_freqs_sorted = sorted(corpus_freqs.items(), key=lambda x: x[1], reverse=True) text_freqs_sorted = sorted(text_freqs.items(), key=lambda x: x[1], reverse=True) # Iterate over n-grams starting from the most frequent. On each iteration # we take into account already decoded symbols: reverse_mapping = {} for i, (text_ngram, text_freq) in enumerate(text_freqs_sorted): filtered_freqs = copy(corpus_freqs_sorted) for j in range(n_gram): if text_ngram[j] in reverse_mapping: filtered_freqs = [ (ngram, freq) for ngram, freq in filtered_freqs if ngram[j] == reverse_mapping[text_ngram[j]] ] min_diff = 1.0 # maximum possible frequency best_ngram = None for ngram, freq in filtered_freqs: diff = abs(freq - text_freq) if diff < min_diff: best_ngram = ngram min_diff = diff for j in range(n_gram): if text_ngram[j] not in reverse_mapping: reverse_mapping[text_ngram[j]] = best_ngram[j] return reverse_mapping reverse_mapping_bigram = get_reverse_mapping_ngram( corpus_freqs_bigram, text_freqs_bigram, n_gram=2 ) decoded_text = apply_mapping(encoded_text, reverse_mapping_bigram) decoded_text character_accuracy(tokenized_text, decoded_text)Качество слегка ухудшилось. Объяснить это можно тем, что биграмм намного больше, чем униграмм, попасть по частоте в правильные — сложнее. Часть 3 Задание Но и это ещё не всё: биграммы скорее всего тоже далеко не всегда работают. Основная часть задания — в том, как можно их улучшить:* предложите метод обучения перестановки символов в этом задании, основанный на MCMC-сэмплировании, но по-прежнему работающий на основе статистики биграмм;* реализуйте и протестируйте его, убедитесь, что результаты улучшились. Решение Текст, разбитый на биграммы — это, по сути, марковская цепь. Частота биграмм — вероятность перехода между состояниями цепи.Для обучения перестановки будем считать вероятность порождения именно такого текста как произведение вероятностей всех биграмм, в него входящих.Всего перестановок очень много, поэтому будем использовать жадный алгоритм, основанный на идее MCMC-сэмплирования.Алгоритм:1. Инициализируем перестановки, восстанавливаем текст и вычисляем логарифм правдоподобия $p_{current}$.2. Меняем местами пару символов для перестановки.3. Восстанавливаем текст с новой перестановкой и вычисляем $p_{proposed}$.4. Принимаем новую перестановку с "вероятностью" $\displaystyle p_{accept} = \frac{p_{proposed}}{p_{current}}$.5. Возвращаемся к пункту 2 и повторяем цикл.Мы можем уйти немного не туда и застрять не в том максимуме, поэтому на каждой итерации алгоритма будем делать много попыток и брать в качестве окончательного результата лучшую из них.def get_ngram_freqs_smoothed(text, n_gram=2): vocab_size = len(set(text)) ** n_gram if n_gram > 1: text = [ "".join(ngram) for ngram in everygrams(text, min_len=n_gram, max_len=n_gram) ] freqs = { k: (v + 1) / (len(text) + vocab_size) # сглаживаем, чтобы не было нулей for k, v in Counter(text).items() } return freqs def get_text_proba(text, mapping, freqs, n_gram=2, alphabet=ALPHABET): decoded_text = apply_mapping(text, mapping) log_proba = 0.0 for i in range(len(decoded_text) - n_gram): ngram = decoded_text[i : i + n_gram] ngram_proba = freqs.get( ngram, 1 / (len(text) + len(alphabet) ** n_gram) ) # сглаживаем, чтобы не было нулей log_proba += np.log(ngram_proba) return log_proba def get_reverse_mapping_mcmc( encoded_text, alphabet_encoded, alphabet_corpus, freqs_corpus, n_iters=10000, n_trials=10, n_gram=2, ): accept_count = 0 best_reverse_mapping = None all_mappings = [] best_log_likelihood = -np.inf for trial in tqdm(range(n_trials), leave=False, position=0, total=n_trials): alphabet_encoded = list(alphabet_encoded) alphabet_iter = list(alphabet_corpus) reverse_mapping = { k: v for k, v in zip(alphabet_encoded, alphabet_iter[: len(alphabet_encoded)]) } log_proba_current = get_text_proba( encoded_text, reverse_mapping, freqs_corpus, n_gram=n_gram ) for i in range(n_iters): alphabet_proposal = alphabet_iter[:] idx1, idx2 = np.random.choice(len(alphabet_proposal), replace=False, size=2) alphabet_proposal[idx1], alphabet_proposal[idx2] = ( alphabet_proposal[idx2], alphabet_proposal[idx1], ) reverse_mapping_proposal = { k: v for k, v in zip( alphabet_encoded, alphabet_proposal[: len(alphabet_encoded)] ) } log_proba_proposal = get_text_proba( encoded_text, reverse_mapping_proposal, freqs_corpus, n_gram=n_gram ) p_accept = np.exp(log_proba_proposal - log_proba_current) if p_accept > np.random.rand(): accept_count += 1 alphabet_iter = alphabet_proposal log_proba_current = log_proba_proposal reverse_mapping = reverse_mapping_proposal if log_proba_current > best_log_likelihood: best_log_likelihood = log_proba_current best_reverse_mapping = reverse_mapping all_mappings.append(reverse_mapping) print(f"Best likelihood: {best_log_likelihood}") print(f"Accept ratio: {accept_count / (n_iters * n_trials)}") return best_reverse_mapping freqs_corpus = get_ngram_freqs_smoothed(tokenized_corpus, n_gram=2) best_reverse_mapping = get_reverse_mapping_mcmc( encoded_text, alphabet_encoded=ALPHABET, alphabet_corpus=ALPHABET, freqs_corpus=freqs_corpus, ) decoded_text = apply_mapping(encoded_text, best_reverse_mapping) decoded_text character_accuracy(tokenized_text, decoded_text)Получилось! Часть 4 Задание Расшифруйте сообщение:`←⇠⇒↟↹↷⇊↹↷↟↤↟↨←↹↝⇛⇯↳⇴⇒⇈↝⇊↾↹↟⇒↟↹⇷⇛⇞↨↟↹↝⇛⇯↳⇴⇒⇈↝⇊↾↹↨←⇌⇠↨↹⇙↹⇸↨⇛↙⇛↹⇠⇛⇛↲⇆←↝↟↞↹⇌⇛↨⇛⇯⇊↾↹⇒←↙⇌⇛↹⇷⇯⇛⇞↟↨⇴↨⇈↹⇠⇌⇛⇯←←↹↷⇠←↙⇛↹↷⇊↹↷⇠←↹⇠↤←⇒⇴⇒↟↹⇷⇯⇴↷↟⇒⇈↝⇛↹↟↹⇷⇛⇒⇙⇞↟↨←↹↳⇴⇌⇠↟↳⇴⇒⇈↝⇊↾↹↲⇴⇒⇒↹⇰⇴↹⇷⇛⇠⇒←↤↝←←↹⇞←↨↷←⇯↨⇛←↹⇰⇴↤⇴↝↟←↹⇌⇙⇯⇠⇴↹↘⇛↨↞↹⇌⇛↝←⇞↝⇛↹↞↹↝↟⇞←↙⇛↹↝←↹⇛↲←⇆⇴⇏`Или это (они одинаковые, второй вариант просто на случай проблем с юникодом):`დჳჵჂႨშႼႨშჂხჂჲდႨსႹႭჾႣჵისႼჰႨჂჵჂႨႲႹႧჲჂႨსႹႭჾႣჵისႼჰႨჲდႩჳჲႨჇႨႠჲႹქႹႨჳႹႹჱჶდსჂႽႨႩႹჲႹႭႼჰႨჵდქႩႹႨႲႭႹႧჂჲႣჲიႨჳႩႹႭდდႨშჳდქႹႨშႼႨშჳდႨჳხდჵႣჵჂႨႲႭႣშჂჵისႹႨჂႨႲႹჵჇႧჂჲდႨჾႣႩჳჂჾႣჵისႼჰႨჱႣჵჵႨეႣႨႲႹჳჵდხსდდႨႧდჲშდႭჲႹდႨეႣხႣსჂდႨႩჇႭჳႣႨႾႹჲႽႨႩႹსდႧსႹႨႽႨსჂႧდქႹႨსდႨႹჱდჶႣნ` Решениеmessage = "←⇠⇒↟↹↷⇊↹↷↟↤↟↨←↹↝⇛⇯↳⇴⇒⇈↝⇊↾↹↟⇒↟↹⇷⇛⇞↨↟↹↝⇛⇯↳⇴⇒⇈↝⇊↾↹↨←⇌⇠↨↹⇙↹⇸↨⇛↙⇛↹⇠⇛⇛↲⇆←↝↟↞↹⇌⇛↨⇛⇯⇊↾↹⇒←↙⇌⇛↹⇷⇯⇛⇞↟↨⇴↨⇈↹⇠⇌⇛⇯←←↹↷⇠←↙⇛↹↷⇊↹↷⇠←↹⇠↤←⇒⇴⇒↟↹⇷⇯⇴↷↟⇒⇈↝⇛↹↟↹⇷⇛⇒⇙⇞↟↨←↹↳⇴⇌⇠↟↳⇴⇒⇈↝⇊↾↹↲⇴⇒⇒↹⇰⇴↹⇷⇛⇠⇒←↤↝←←↹⇞←↨↷←⇯↨⇛←↹⇰⇴↤⇴↝↟←↹⇌⇙⇯⇠⇴↹↘⇛↨↞↹⇌⇛↝←⇞↝⇛↹↞↹↝↟⇞←↙⇛↹↝←↹⇛↲←⇆⇴⇏" message_freqs = get_ngram_freqs(message, n_gram=1) corpus_freqs_sorted = sorted(corpus_freqs.items(), key=lambda x: x[1], reverse=True) message_freqs_sorted = sorted(message_freqs.items(), key=lambda x: x[1], reverse=True) alphabet_corpus = "".join([c for c, _ in corpus_freqs_sorted]) alphabet_message = "".join([c for c, _ in message_freqs_sorted]) alphabet_corpus, alphabet_message freqs_corpus = get_ngram_freqs_smoothed(tokenized_corpus, n_gram=2) best_reverse_mapping = get_reverse_mapping_mcmc( message, alphabet_encoded=alphabet_message, alphabet_corpus=alphabet_corpus, freqs_corpus=freqs_corpus, n_iters=10000, n_trials=50, ) encoded_message = apply_mapping(message, best_reverse_mapping) encoded_messageТут тоже получилось. :) Посмотрим на посимвольную ошибку:original_message = "если вы видите нормальный или почти нормальный текст у этого сообщения который легко прочитать скорее всего вы все сделали правильно и получите максимальный балл за последнее четвертое задание курса хотя конечно я ничего не обещаю" character_accuracy(original_message, encoded_message)Часть 5 Задание *Бонус*: а что если от биграмм перейти к триграммам (тройкам букв) или даже больше? Улучшатся ли результаты? Когда улучшатся, а когда нет? Чтобы ответить на этот вопрос эмпирически, уже может понадобиться погенерировать много тестовых перестановок и последить за метриками, глазами может быть и не видно. Решение Зашифрованное сообщениеcorpus_freqs_sorted = sorted(corpus_freqs.items(), key=lambda x: x[1], reverse=True) message_freqs_sorted = sorted(message_freqs.items(), key=lambda x: x[1], reverse=True) abc_corpus = "".join([c for c, _ in corpus_freqs_sorted]) abc_message = "".join([c for c, _ in message_freqs_sorted]) abc_corpus, abc_message freqs_corpus_trigram = get_ngram_freqs_smoothed(tokenized_corpus, n_gram=3) best_reverse_mapping = get_reverse_mapping_mcmc( message, alphabet_encoded=alphabet_message, alphabet_corpus=alphabet_corpus, freqs_corpus=freqs_corpus_trigram, n_gram=3, n_iters=10000, n_trials=20, ) encoded_message = apply_mapping(message, best_reverse_mapping) encoded_message original_message = "если вы видите нормальный или почти нормальный текст у этого сообщения который легко прочитать скорее всего вы все сделали правильно и получите максимальный балл за последнее четвертое задание курса хотя конечно я ничего не обещаю" character_accuracy(original_message, encoded_message)Почти идеально! Более длинные тексты Сравним качество дешифровки при переходе от биграмм к триграммам на более длинных, чем зашифрованное сообщение, текстах — например, на тестовом примере.def test_ngrams(num_tries, n_gram, freqs_corpus, text): accuracy_arr = [] for _ in range(num_tries): best_reverse_mapping = get_reverse_mapping_mcmc( encoded_text, alphabet_encoded=alphabet_text, alphabet_corpus=alphabet_corpus, freqs_corpus=freqs_corpus, n_gram=n_gram, n_iters=10000, n_trials=5, ) decoded_text = apply_mapping(encoded_text, best_reverse_mapping) accuracy_arr.append(character_accuracy(text, decoded_text)) print( f"\nCharacter accuracy: {np.mean(accuracy_arr):.3f}±{np.std(accuracy_arr):.3f}" ) mapping = generate_mapping(corpus_freqs) encoded_text = apply_mapping(tokenized_text, mapping) text_freqs = get_ngram_freqs(encoded_text) corpus_freqs_sorted = sorted(corpus_freqs.items(), key=lambda x: x[1], reverse=True) text_freqs_sorted = sorted(text_freqs.items(), key=lambda x: x[1], reverse=True) alphabet_corpus = "".join([c for c, _ in corpus_freqs_sorted]) alphabet_text = "".join([c for c, _ in text_freqs_sorted]) alphabet_corpus, alphabet_text NUM_TRIES = 1 test_ngrams(num_tries=NUM_TRIES, n_gram=2, freqs_corpus=freqs_corpus, text=tokenized_text) test_ngrams( num_tries=NUM_TRIES, n_gram=3, freqs_corpus=freqs_corpus_trigram, text=tokenized_text ) freqs_corpus_fourgram = get_ngram_freqs_smoothed(tokenized_corpus, n_gram=4) test_ngrams( num_tries=NUM_TRIES, n_gram=4, freqs_corpus=freqs_corpus_fourgram, text=tokenized_text )Качество практически не изменилось. Более короткие тексты Теперь попробуем наоборот уменьшить размер текста относительно размера зашифрованного сообщения.shorter_text = 'короткий текст для проверки качества декодирования последовательностями символов разной длины' encoded_text = apply_mapping(shorter_text, mapping) text_freqs = get_ngram_freqs(encoded_text) corpus_freqs_sorted = sorted(corpus_freqs.items(), key=lambda x: x[1], reverse=True) text_freqs_sorted = sorted(text_freqs.items(), key=lambda x: x[1], reverse=True) alphabet_corpus = "".join([c for c, _ in corpus_freqs_sorted]) alphabet_text = "".join([c for c, _ in text_freqs_sorted]) alphabet_corpus, alphabet_text print(f'Text alphabet to corpus alphabet ratio: ' \ f'{len(alphabet_text) / len(alphabet_corpus) * 100:.2f}%') NUM_TRIES = 5 # Биграммы: test_ngrams(num_tries=NUM_TRIES, n_gram=2, freqs_corpus=freqs_corpus, text=shorter_text) # Триграммы: test_ngrams(num_tries=NUM_TRIES, n_gram=3, freqs_corpus=freqs_corpus_trigram, text=shorter_text) # 4-граммы: test_ngrams(num_tries=NUM_TRIES, n_gram=4, freqs_corpus=freqs_corpus_fourgram, text=shorter_text) longer_text = 'немного более длинный текст для проверки качества декодирования последовательностями символов разной длины который также пытается задействовать больше различных символов алфавита' encoded_text = apply_mapping(longer_text, mapping) text_freqs = get_ngram_freqs(encoded_text) corpus_freqs_sorted = sorted(corpus_freqs.items(), key=lambda x: x[1], reverse=True) text_freqs_sorted = sorted(text_freqs.items(), key=lambda x: x[1], reverse=True) alphabet_corpus = "".join([c for c, _ in corpus_freqs_sorted]) alphabet_text = "".join([c for c, _ in text_freqs_sorted]) alphabet_corpus, alphabet_text print(f'Text alphabet to corpus alphabet ratio: ' \ f'{len(alphabet_text) / len(alphabet_corpus) * 100:.2f}%') # Биграммы: test_ngrams(num_tries=NUM_TRIES, n_gram=2, freqs_corpus=freqs_corpus, text=longer_text) # Триграммы: test_ngrams(num_tries=NUM_TRIES, n_gram=3, freqs_corpus=freqs_corpus_trigram, text=longer_text) # 4-граммы: test_ngrams(num_tries=NUM_TRIES, n_gram=4, freqs_corpus=freqs_corpus_fourgram, text=longer_text)Train [DALLE-pytorch](github.com/lucidrains/DALLE-pytorch) from [lucidrains](https:/github.com/lucidrains/)Notebook by [afiaka87](https://github.com/afiaka87) With help from:valteralfred, robvanvolt, rom1504, janEbert, mehdidc, gabriel_syme, robvanvolt, mega b# @title Licensed under the MIT License # Copyright (c) 2021 # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE.Installation + Weights & Biases LoginA free tier account for `https://wandb.ai` is required for experiment tracking.%%writefile download_cah.sh mkdir /content/CAH cd /content/CAH wget --continue https://the-eye.eu/public/AI/cah/laion400m-dat-release.torrent aria2c --continue --seed-time=0 --file-allocation='trunc' --select-file=1-100 laion400m-dat-release.torrent cd /content !source download_cah.sh #@title (Advanced) - DALLE-pytorch Repository and Branch Name #@markdown Install `DALLE-pytorch` from source. #@markdown Defaults to latest branch. repository_url = "https://github.com/lucidrains/DALLE-pytorch.git" #@param {type: "string"} branch = "main" #@param {type: "string"} !git clone -b "$branch" --single-branch "$repository_url" !git clone https://github.com/mehdidc/DALLE-pytorch %pip install wandb %cd /content/DALLE-pytorch !python setup.py install %cd /content/ #@title Log in to Weights & Biasess #@markdown DALLE-pytorch uses W&B (https://www.wandb.ai) for experiment tracking. #@markdown If you don't want to make a free account; you can find generations in `/content/wandb` in the folder for your specific run. #@markdown **It's recommended to use W&B.** !wandb loginTrain DALLE-pytorch%pip uninstall taming-transformers-rom1504 %pip install omegaconf !git clone https://github.com/CompVis/taming-transformers %pip install -e taming-transformers %pip install pytorch-lightning !python /content/DALLE-pytorch/train_dalle.py --dim 512 --truncate_captions --depth 8 --shift_tokens --random_resize_crop_lower_ratio 1.0 --wandb_name "train_dalle_colab" --rotary_emb --wds=jpg,txt --image_text_folder=/content/CAH/laion400m-dat-release(c) - Beginners Machine Learning - London Introduction to Unsupervised Machine Learning with AWS SagemakerIn this interesting 3hr workshop, you will take the massive dataset of UFO sightings (80,000 reports over the past century) from [National UFO Reporting Center (NUFORC)](http://www.nuforc.org/) and use Amazon's machine learning services ([AWS Sagemaker](https://aws.amazon.com/sagemaker/)) to identify the top 10 locations that are most likely to have UFO sightings. To do so, you will need to use an unsupervised machine learning algorithm.You will then take your trained model, deserialise it, convert its output to a csv format and visualise it on a map using AWS [Quicksight](https://aws.amazon.com/quicksight/) to see where these locations are. Then you can try correlating these locations with landmarks.The general machine learning workflow with AWS Sagemaker is shown below. For this assignment we will not evaluate or deploy the model but only use its output to visualise the results on a world map. What is Unsupervised Machine Learning? With unsupervised learning, data features are fed into the learning algorithm, which determines how to label them (usually with numbers 0,1,2..) and based on what. This “based on what” part dictates which unsupervised learning algorithm to follow.Most unsupervised learning-based applications utilize the sub-field called **Clustering**. One of the most famous topics under the realm of Unsupervised Learning in Machine Learning is k-Means Clustering. Even though this clustering algorithm is fairly simple, it can look challenging to newcomers into the field. What is the difference between supervised and unsupervised machine learning?The main difference between Supervised and Unsupervised learning algorithms is the absence of data labels in the latter. What does clustering mean?**Clustering** is the process of grouping data samples together into clusters based on a certain feature that they share — exactly the purpose of unsupervised learning in the first place.Source: [Clustering using K-means algorithm](https://towardsdatascience.com/clustering-using-k-means-algorithm-81da00f156f6) How does the K-Means Algorithm work?Being a clustering algorithm, **k-Means** takes data points as input and groups them into `k` clusters. This process of grouping is the training phase of the learning algorithm. The result would be a model that takes a data sample as input and returns the cluster that the new data point belongs to, according the training that the model went through.Source - [How Does k-Means Clustering in Machine Learning Work?](https://towardsdatascience.com/how-does-k-means-clustering-in-machine-learning-work-fdaaaf5acfa0)Source: [How Does k-Means Clustering in Machine Learning Work?](https://towardsdatascience.com/how-does-k-means-clustering-in-machine-learning-work-fdaaaf5acfa0)Check out the the two articles below to learn more about how the K-Means Algorithm work:- [Clustering using K-means algorithm](https://towardsdatascience.com/clustering-using-k-means-algorithm-81da00f156f6)- [How Does k-Means Clustering in Machine Learning Work?](https://towardsdatascience.com/how-does-k-means-clustering-in-machine-learning-work-fdaaaf5acfa0) Where can you use k-means?The **k-means algorithm** can be a good fit for finding patterns or groups in large datasets that have not been explicitly labeled. Here are some example use cases in different domains:**E-commerce**- Classifying customers by purchase history or clickstream activity.**Healthcare**- Detecting patterns for diseases or success treatment scenarios.- Grouping similar images for image detection.**Finance**- Detecting fraud by detecting anomalies in the dataset. For example, detecting credit card frauds by abnormal purchase patterns.**Technology**- Building a network intrusion detection system that aims to identify attacks or malicious activity.**Meteorology**- Detecting anomalies in sensor data collection such as storm forecasting. Step 1: Importing Data For this part of the assignment, we need to import the following packages: - **Amazon SageMaker Python SDK**: Amazon SageMaker Python SDK is an open source library for training and deploying machine-learned models on Amazon SageMaker. See [Documentation](https://sagemaker.readthedocs.io/en/stable/index.html)- **Python Built-in Library** [datetime](https://docs.python.org/2/library/datetime.html)- **Numpy** and **Pandas**# TODO: Import the above packages below import ____ as pd import ____ as np import ____ from ____ import datetime> **Exercise:** Construct a url to the the dataset location in your S3 bucket using the following expression and save it to `data_location`.# TODO: Construct the url path to your dataset file that you have just uploaded to your newly created S3 bucket bucket = "____" prefix = "ufo_dataset" data_key = "ufo_complete.csv" # Construct a url string and save it to data_location variable data_location = "s3://{}/{}/{}".format(bucket, prefix, data_key) # print data_location print(data_location) # Internally do not process the file in chunks when loading the csv onto a dataframe # to ensure avoid mixed type inferences when importing the large UFO dataset. df = pd.read_csv(data_location, low_memory= False) # Inspect the tail of the dataframe df.tail() # Inspect the shape of the dataframe df.shapeStep 2: Clearning, transforming and preparing the data# TODO: Select the 'latitude' and 'longitude' columns and save it as a new dataframe `df_geo` with .copy(). df_geo = df[["____", "____"]].copy() # Inspect the tail of df_geo df_geo.tail() # Fully inspect the df_geo dataframe df_geo.info()Upon successfull inspection of the above dataframe, you should note the following with this dataframe:- There are no `null` or missing values in both columns. However, we still need to check for other incorrect entries that are not **coordinates**. Examples include: `0`, `string`s, etc.- The `latitude` column has a `dtype` of `object`. This means the column may have missing or string values where the rest of the values are numbers. If the entries in the column are non-homogenous, pandas will store the column as a `string` or `object` data type. To clean the data in this column we can use pandas' `pd.to_numeric()` method to convert the data in this column to `float` for processing. The machine learning algorithm expects the data passed in to it to be numerical digits `float`s or `int`s not `string`s. - See [Documentation](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.to_numeric.html) on how to use this method.> **Exercise:** Convert the `latitude` column's datatype to `float`. You can pass in the `errors = "coerce"` option to `pd.to_numeric()` method to enforce the conversion. When conversion is not possible - i.e. values are `string`s - these strings will be replaced with `NaNs`. Therefore, you need to use a `.dropna()` method to drop rows where `NaNs` exist. Then check whether the column formats have been converted to numerical data types `float` and if any missing values are still present. **Note**: You can pass in `inplace = True` argument to `.dropna()` methods so that operations are performed in place and to avoid re-assignments.# TODO: Convert the column values to numeric and whenever this is not possible replace the value with NaNs df_geo["latitude"] = pd.____(df_geo.____, errors = "____") # Count the number of null values in the dataframe - Expecting this to be non-zero print("Number of null values in the dataframe before dropping rows is {}".format(df_geo.isnull().any().sum())) # TODO: Drop all rows that NaN Values df_geo.____(inplace=____) # Count the number of null values in the dataframe - Expecting this to be zero print("Number of null values in the dataframe before dropping rows is {}". format(df_geo.isnull().any().sum())) # Count how many rows in the df have 0 values print(df_geo[(df_geo.longitude == 0) | (df_geo.latitude == 0) ].count()) # TODO: Select all rows that have non-zero coordinate values and re-assign the selection to df_geo df_geo = df_geo[(df_geo.longitude != ____) &(df_geo.latitude != ____) ] # Check that the there are no coordinate values in the df_geo dataframe with 0 print(df_geo[(df_geo.longitude == 0) &(df_geo.latitude == 0)]) # Re-checking the dataframe to ensure both columns have numerical datatype such as `float` or `int`. df_geo.info() # Check if we have any missing values (NaNs) in our dataframe missing_values = df_geo.isnull().values.any() print("Are there any missing values? {}".format(missing_values)) # If there are any missing values in the dataframe, show them if (missing_values): df_geo[df_geo.isnull().any(axis = 1)] # TODO: store the cleaned up dataframe column values as a 2D numpy array (matrix) with datatype of float32 data_train = df_geo.values.astype("____") # Print the 2D numpy array data_trainStep 3: Visualising the last 5000 reports of the data on the map One of the useful packages for visualising the data on a map is called **plotly**. We can import the following module from plotly package as `px`:- **plotly**'s [express](https://plot.ly/python/plotly-express/) - Plotly Express is a terse, consistent, high-level wrapper around `plotly.graph_objects` for rapid data exploration and figure generation.For data available as a tidy pandas DataFrame, we can use the Plotly Express function `px.scatter_geo` for a geographical scatter plot. The `color` argument is used to set the color of the markers from a given column of the DataFrame.import plotly.express as px # Showing only the last 5000 rows only on a map fig = px.scatter_geo(df_geo.iloc[-5000: -1, :], lat="latitude", lon = "longitude", title="UFO Reports by Latitude/Longitude in the world - Last 5000 Reports", color = "longitude") fig.show()You may notice that most of the recent 5000 UFO reports have been located in the United States. Let's take a closer look at United States by using `plotly`'s `geo` layout feature to show sightings on the US map.from plotly.offline import iplot data = [dict( type = 'scattergeo', locationmode = 'USA-states', lat = df_geo.iloc[-5000:-1, 0], lon = df_geo.iloc[-5000:-1, 1], mode = 'markers', marker = dict( size = 5.5, opacity = 0.75, color = 'rgb(0, 163, 81)', line = dict(color = 'rgb(255, 255, 255)', width = 1)) )] layout = dict( title = 'UFO Reports by Latitude/Longitude in United States - Last 5000 Reports', geo = dict( scope = 'usa', projection = dict(type = 'albers usa'), showland = True, landcolor = 'rgb(250, 250, 250)', subunitwidth = 1, subunitcolor = 'rgb(217, 217, 217)', countrywidth = 1, countrycolor = 'rgb(217, 217, 217)', showlakes = True, lakecolor = 'rgb(255, 255, 255)') ) figure = dict(data = data, layout = layout) iplot(figure)Step 3: Create and train our model# Define number of clusters and output location URL to save the trained model num_clusters = 10 output_location = "s3://" + bucket + "/model-artifacts"To pass a training command to Amazon Sagemaker, we need to grab the details of the current execution role **ARN ID** whose credentials we are using to call the Sagemaker API. > **Exercise:** Grab the ARN ID of your current Execution role using the `sagemaker` SDK - See [Documentation](https://sagemaker.readthedocs.io/en/stable/session.html?highlight=get%20executionsagemaker.session.get_execution_role)# TODO: Get the execution role ARN ID to pass to the sagemaker API later on role = sagemaker.____() # Check that you have this step correctly performed print(role)We now can use Amazon's built-in K-means ML algorithm to find `k` clusters of data in our unlabeled UFO dataset.Amazon SageMaker uses a modified version of the web-scale k-means clustering algorithm. Compared with the original version of the algorithm, the version used by Amazon SageMaker is more accurate. Like the original algorithm, it scales to massive datasets and delivers improvements in training time. To do this, the version used by Amazon SageMaker streams mini-batches (small, random subsets) of the training data. The k-means algorithm expects tabular data, where rows represent the observations that you want to cluster, and the columns represent attributes of the observations. See [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/k-means.html)To ask AWS sagemaker for training a model using this algorithm we need to define a **K-means Estimator**. KMeans estimators can be configured by setting **hyperparameters**. These hyperparameters are arguments passed into the estimator's Constructor Function. This estimator requires the following hyperparameters to be passed in `sagemaker.KMeans()`:- `role` (str) – An AWS IAM role (either name or full ARN)- `train_instance_count` (int) – Number of Amazon EC2 instances to use for training. We only need 1 for this exercise.- `train_instance_type` (str) – Type of EC2 instance to use for training, for example, ‘ml.c4.xlarge’. This is the **compute resources** that you want Amazon SageMaker to use for model training. Compute resources are ML compute instances that are managed by Amazon SageMaker.- `k` (int) – The number of clusters to produce. We need to 10 for this exercise.- `output_path` (str) - The URL of the S3 bucket where you want to store the output of the job.# TODO: Define the training API request to AWS Sagemaker kmeans = sagemaker.____(role = ____, train_instance_count = ____, train_instance_type = "____", output_path = ____, k = ____)The following diagram shows how you train and deploy a model with Amazon SageMakern - See [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-training.html) For this assignment, Amazon SageMaker provides training algorithms that are great out-of-the-box solution for quick model training. We have used some helper code above to clean and prepare the dataset and configure AWS Sagemaker API calls but do not need to specify training code or even a training code image from EC2 Container Registry. We only need to pass in the dataset for training with AWS's KMeans default algorithm. If we wanted to specify our own algorithms or use one of the popular deep learning frameworks - tensorflow/etc. - then we provide additional training code.To train a model in Amazon SageMaker, you create a **training job** using the `kmeans.fit()` method. - See [Documentation](https://sagemaker.readthedocs.io/en/stable/kmeans.html?highlight=kmeans.fitsagemaker.KMeans.fit)The training job requires the following information passed in to `.fit()` method:- `record_set(data_train)` - The training records to train the KMeans Estimator on. Here `data_train` must be passed in to the `kmeans.record_set()` method to convert our 2D numpy array data to a `RecordSet` object that is required by the algorithm. - See [Documentation](https://sagemaker.readthedocs.io/en/stable/sagemaker.amazon.amazon_estimator.html?highlight=record_set()sagemaker.amazon.amazon_estimator.AmazonAlgorithmEstimatorBase.record_set)- `job_name` (str) - Training job name. If not specified, the estimator generates a default job name, based on the training image name and current timestamp.Amazon SageMaker then launches the ML compute instances and uses the training dataset to train the model. It saves the resulting model artifacts and other output in the S3 bucket you specified for that purpose.Here we are going to construct a job name using the following expression and Python's built-in `datetime` module. This ensures our `job_name` is unique. Each training job requires a **unique** `job_name`. Otherwise, AWS will throw an error.# Construct a unique job_name using datetime module job_name = "kmeans-geo-job-{}".format(datetime.now().strftime("%Y%m%d%H%M%S")) # Print job_name print("Here is the job name: {}".format(job_name))> **Exercise**: Create a training job using `kmeans.fit()`. Use the AWS documentation links above to figure out how to pass in the arguments to `kmeans.fit()` for the training job to commence. If you do this step right, you should see outputs like this appear underneath the code cell:```2019-07-29 00:54:46 Starting - Starting the training job...2019-07-29 00:54:47 Starting - Launching requested ML instances...2019-07-29 00:55:44 Starting - Preparing the instances for training......2019-07-29 00:56:24 Downloading - Downloading input data...2019-07-29 00:57:05 Training - Downloading the training image.....2019-07-29 00:57:31 Uploading - Uploading generated training model2019-07-29 00:57:31 Completed - Training job completedBillable seconds: 68CPU times: user 1.78 s, sys: 18.7 ms, total: 1.8 sWall time: 3min 13s```%%time # TOOD: Create a training job and time it. Running this code cell will send a training job request to AWS Sagemaker kmeans.fit(kmeans.record_set(_____), job_name= _____)**Congratulations** on building and training a model on the cloud using unsupervised machine learning algorithm and saving it! Next we are going to deserialise the model so that we can use its output. Step 4: Model Deserialisation To deserialise the compressed model output saved on our S3 bucket we need to import the following packages.- **Boto** is the Amazon Web Services (AWS) SDK for Python. It enables Python developers to create, configure, and manage AWS services, such as EC2 and S3. Boto provides an easy to use, object-oriented API, as well as low-level access to AWS service. See [Documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/index.html)> **Exercise**: Import `boto3` package, then use the AWS Python SDK boto3 to download the compressed model from the S3 bucket to a file. You will need to construct a url to the model and save it to `path_to_model` variable. Then pass `path_to_model` to the following command `boto3.resource("s3").Bucket(bucket).download_file(path_to_model, file_name_to_save_to)`. - See [boto3 Documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html?highlight=s3.objectS3.Client.download_file)# TODO: Import boto3 import ____ # Construct a url to the model. Compressed model is saved under the model-artifacts folder path_to_model = "model-artifacts/" + job_name + "/output/model.tar.gz" # TODO: Use the AWS Python SDK boto3 to download the compressed model output from S3 bucket onto `model.tar.gz` file. boto3.____("____").____(____).download_file(____, "model.tar.gz")To deserialise the compressed model output saved on our S3 bucket we need to import the following packages.- **Python's Built-in module** `os` - See [Documentation](https://docs.python.org/2/library/os.htmlos.system)Python's built-in system module `os.system()` can be used to execute a shell command `tar -zxvf` on the `model.tar.gz` compressed gzipped file. This command shell helps to extract tar files out a `tar.gz` archives. The `-zxvf` flags can passed in to `os.system()` to perform the following commands: - `-z` - The file is a “gzipped” file- `-x` - Extract files- `-v` - Verbose, print the file names as they are extracted one by one- `-f` - Use the following tar archive for the operationSee [Linux's tar Man Pages](https://linux.die.net/man/1/tar) for more details on the `tar` shell command. > **Exercise:** Use `os.system()` method to run the `tar` command on the compressed gzip file `model.tar.gz` with the above flags.# TODO: Import the required packages for deserialisation import os # TODO: Use Python's built-in os package to open the compressed model output os.system("____ -____ model.tar.gz")`os.system()` later can be used to execute the `unzip` shell command on `model_algo-1`. `unzip` shell command lists, tests, or extracts files from a ZIP archive. See [Linux unzip Man Pages](https://linux.die.net/man/1/unzip) for more details on the `unzip` command.> **Exercise:** Use `os.system()` method to unzip `model_algo-1`.# TODO: Use Python's built-in os package to unzip model_algo-1 file. os.system("____ model_algo-1")To load the unzipped model output parameters, we need to install `mxnet` package.> **Exercise**: Use `!pip install` to install `mxnet`.# TODO: Install mxnet package !pip install ____To load the model output parameters we need to import the following package:- **MXNet**: A flexible and efficient library for deep learning. - See [Documentation](https://mxnet.apache.org/versions/master/api/python/index.html) > **Exercise**: Use `mxnet`'s `.ndarray.load()` method to load the model output parameters and assign it to `Kmeans_model_params` variable - See [Documentation](https://mxnet.incubator.apache.org/api/python/ndarray/ndarray.html)# TODO: Import mxnet import ____ as mx # TODO: Use mxnet to load the model parameters Kmeans_model_params = mx.____.____("model_algo-1")> **Exercise**: Convert the model parameters to a dataframe called `cluster_centroids_kmeans` using `pd.DataFrame()`. You can grab the model output parameters using `Kmeans_model_params[0].asnumpy()` to pass to `pd.DataFrame()`.# TODO: Convert the Kmeans_model_params to a dataframe using pandas and numpy: cluster_centroids_kmeans cluster_centroids_kmeans = pd.____(____[0].____()) # TODO: Set the column names of the cluster_centroids_kmeans dataframe to match the df_geo column names cluster_centroids_kmeans.____ = df_geo.____ # Print cluster_centroids_kmeans print(cluster_centroids_kmeans)To write the content of the model output using An in-memory stream for text I/O we need to import the following package:- **Python's Built-in Package** `io` - See [Documentation](https://docs.python.org/3/library/io.htmlio.StringIO)# TODO: Import Python's built-on package io import ____ # When a csv_buffer object is created, it is initialized using StringIO() constructor # Here no string is given to the StringIO() so the csv_buffer object is empty. csv_buffer = io.StringIO() # TODO: Use pandas .to_csv() method to weite the cluster_centroids_kmeans dataframe to a csv file cluster_centroids_kmeans.____(csv_buffer, index = False) # TODO: Let's use Amazon S3 s3_resource = boto3.resource("____") # Use the .Object() method to upload an object in the given `bucket` # Save the content of the csv_buffer file using the .put() method s3_resource.Object(bucket, "results/ten_locations_kmeans.csv").put(Body = csv_buffer.getvalue())Let's quickly visualise where these top 10 coordinates are! We will use **AWS Quicksights** later on to for reporting these locations.# TODO: Visualise the top 10 locations in the world most likely to have UFO Sightings fig = px.scatter_geo(cluster_centroids_kmeans, lat="____", lon = "____", title="Top 10 Locations in the world mostly likely to have UFO Sightings", color = "longitude") fig.show() # TODO: Visualise the top locations in the US most likely to have UFO Sightings data = [dict( type = '____', locationmode = 'USA-states', lat = ____.iloc[:, 0], lon = ____.iloc[:, 1], mode = 'markers', marker = dict( size = 5.5, opacity = 0.75, color = 'rgb(0, 163, 81)', line = dict(color = 'rgb(255, 255, 255)', width = 1)) )] layout = dict( title = 'Top locations in the United States most likely to have UFO Sightings', geo = dict( scope = '____', projection = dict(type = 'albers usa'), showland = True, landcolor = 'rgb(250, 250, 250)', subunitwidth = 1, subunitcolor = 'rgb(217, 217, 217)', countrywidth = 1, countrycolor = 'rgb(217, 217, 217)', showlakes = True, lakecolor = 'rgb(255, 255, 255)') ) figure = dict(data = ____, layout = ____) iplot(____)Interesting findings! Now answer the following questions:- Which cities are the closest to these top 10 locations?- Which states in the United States are these top coordinates located in?- What landmarks - airports, research centres, etc. - do these coordinates correlate with?# TODO: Your answers here cities = ["___", "___", "___", "___", "___", "___", "___", "___", "___", "___"] us_states = ["___", "___", "___", "___", "___", "___"] landmarks = ["___", "___", "___", "___", "___", "___", "___", "___", "___", "___"]说明: 让我们在一个非空字符串s上定义一个函数f(s),该函数计算s中最小字符的频率。 例如,如果s =“ dcce”,则f(s)= 2, 因为最小字符为“ c”,其频率为2。 现在,给定字符串数组查询和单词,返回整数数组答案, 其中每个答案[i]是 f(queries[i]) < f(W) 的单词数,其中W是单词中的单词。Example 1: Input: queries = ["cbd"], words = ["zaaaz"] Output: [1] Explanation: On the first query we have f("cbd") = 1, f("zaaaz") = 3 so f("cbd") < f("zaaaz").Example 2: Input: queries = ["bbb","cc"], words = ["a","aa","aaa","aaaa"] Output: [1,2] Explanation: On the first query only f("bbb") < f("aaaa"). On the second query both f("aaa") and f("aaaa") are both > f("cc").Constraints: 1、1 <= queries.length <= 2000 2、1 <= words.length <= 2000 3、1 <= queries[i].length, words[i].length <= 10 4、queries[i][j], words[i][j] are English lowercase letters.from collections import Counter class Solution: def numSmallerByFrequency(self, queries, words): ans = [] q_freq = [self.helper(word) for word in queries] w_freq = [self.helper(word) for word in words] print(q_freq, w_freq) for i in range(len(q_freq)): cnt = 0 for j in range(len(w_freq)): if q_freq[i] < w_freq[j]: cnt += 1 if cnt != 0: ans.append(cnt) return ans def helper(self, word): # 返回word中最小字符的频率 w_freq = Counter(word) w_freq = sorted(w_freq.items(), key=lambda x: x[0]) return w_freq[0][1] solution = Solution() solution.numSmallerByFrequency(queries = ["bba","abaaaaaa","aaaaaa","bbabbabaab","aba","aa","baab","bbbbbb","aab","bbabbaabb"], words = ["aaabbb","aab","babbab","babbbb","b","bbbbbbbbab","a","bbbbbbbbbb","baaabbaab","aa"]) a = [6,1,1,2,3,3,3,1,3,2] print(len(a)) q = [1, 7, 6, 4, 2, 2, 2, 6, 2, 3] w = [3, 2, 2, 1, 1, 1, 1, 10, 5, 2] # f(queries[i]) < f(W) class Solution: def numSmallerByFrequency(self, queries, words): def str2num(ls): q = [] for i in range(len(ls)): min_char = 'z' count = 0 for j in range(len(ls[i])): if ord(ls[i][j]) < ord(min_char): count = 1 min_char = ls[i][j] elif ord(ls[i][j]) == ord(min_char): count += 1 q.append(count) return q def bin_search(a): l, r = 0, len(w) while l < r: mid = (l+r)//2 if w[mid] <= a: l = mid + 1 else: r = mid return l q = str2num(queries) w = str2num(words) w.sort() res = [] for i in range(len(q)): res.append(len(w) - bin_search(q[i])) return res solution = Solution() solution.numSmallerByFrequency(queries = ["bba","abaaaaaa","aaaaaa","bbabbabaab","aba","aa","baab","bbbbbb","aab","bbabbaabb"], words = ["aaabbb","aab","babbab","babbbb","b","bbbbbbbbab","a","bbbbbbbbbb","baaabbaab","aa"])[1, 7, 6, 4, 2, 2, 2, 6, 2, 3] [3, 2, 2, 1, 1, 1, 1, 10, 5, 2]Horner's MethodWhat is the best way to evaluate \\[ f(x) = x^3 + 4x^2 - 10 \\]at $ x = \dfrac{1}{2} $? The traditional and direct approach is\\[ f(\dfrac{1}{2}) = \dfrac{1}{2} * \dfrac{1}{2} * \dfrac{1}{2} + 4 * \dfrac{1}{2} * \dfrac{1}{2} - 10 \\]This procedure takes 4 multiplications and 2 additions where a subtraction can be interpreted as adding a negative number. All together it takes 6 operations to evaluate the function. Is there a way to reduce the number of operations? Rewrite the polynomial in such a way the variable $x$ is factored out: \begin{align}f(x) & = -10 + 4x^2 + x^3 \\ & = -10 + x * (0 + 4x + x^2 ) \\ & = -10 + x * (0 + x * (4 + x))\end{align}As you can see, it takes a total of 5 operations to evaluate the function; 3 additions and 2 multiplications. This method is called **Horner's Method**.The point of this notebook is to check the absolute error and relative error and to test the efficiency of using Horner's Method for numerical computations. In this case, we explore its application to find the root of the equation using Bisection Method. Absolute and Relative ErrorTo calculate the error between the two functions we need to find the absolute and the relative error where $x^*$ is the approximate function and $x$ is the true function.**Absolute Error** $= |x^* - x| $**Relative Error** $= \dfrac{|x^* - x|}{|x|} $def error_analysis(true_fx, approx_fx): absolute_error = abs(approx_fx - true_fx) relative_error = absolute_error / abs(true_fx) return absolute_error, relative_errorTrue Function - Naive Methoddef f(x): return (x ** 3) + (4 * (x ** 2)) - 10Approximate Function - Horner's Methoddef f_a(x): return -10 + 𝑥 * (0 + 𝑥 * (4 + 𝑥))Bisection MethodBisection method is a root-finding algorithm based from the intermediate-value theorem from calculus.You need to verify if a root exist by making sure the two endpoints of the interval $ [a,b] $ or $\{f(a),f(b)\}$ have different signs. If function $ f $ is continuous, then there will be a root $r$ between $a$ and $b$ such that $f(r) = 0$ and $a < r < b$.import numpy as np def bisection_method(a,b): if f(a) * f(b) < 0: roots = [] while (b - a) / 2 > 1e-7: c = (a + b) / 2 if f(a) * f(c) < 0: b = c else: a = c roots.append(c) return np.array(roots) %%timeit roots = bisection_method(1,2) roots = bisection_method(1,2) def bisection_method_hm(a,b): if f_a(a) * f_a(b) < 0: roots = [] while (b - a) / 2 > 1e-7: c = (a + b) / 2 if f_a(a) * f_a(c) < 0: b = c else: a = c roots.append(c) return np.array(roots) %%timeit roots_a = bisection_method_hm(1,2) roots_a = bisection_method(1,2)According to the benchmark an implementation of the Horner's Method perform faster than the naive method. Now let us look at the error analysis to find out whether results are different on a precision level.absolute_error, relative_error = error_analysis(roots, roots_a) absolute_error relative_errorLuckily errors were not found in this example, however polynomials should always be expressed in the nested form before performing an evaluation, because this form minimizes the number of arithmetic calculations, as a result, the errors are reduced. The example \begin{align}f(x) & = -10 + 4x^2 + x^3 \\ & = -10 + x * (0 + 4x + x^2 ) \\ & = -10 + x * (0 + x * (4 + x))\end{align}has already placed the coefficients and the variables in the rightful place. Notice we placed an additional coefficient $0$ to fill-in the missing degree. Evaluation using Horner's MethodAs an engineer we can write an algorithm to evaluate `x` given only the `degree` (or the number of terms in a polynomial) and its `coefficients`. This way we don't have to rewrite a function everytime a new polynomial is introduced.Consider the following polynomial $ P(x) = a_{k}x^{k} + a_{k-1}x^{k-1} + a_{k-2}x^{k-2} + ... + a_{1}x + a_{0} $ we can evaluate `x` using this algorithm below.def evaluate(x, k, c, b=None): ''' Evaluate `x` from a polynomial using Horner's Method. Parameters ---------- x : int or float The value of x k : int degrees or the number of terms c : int or float coefficients b : int or float base points Returns ------- y : int or float The output of the polynomial ''' y = c[0] if b is None: for i in range(1, k): y = c[i] + (x * y) else: for i in range(1, k): y = (y * (x - b[i])) + c[i] return y %%timeit evaluate(2, 4, [1, 4, 0, -10]) %%timeit f_a(2) %%timeit f(2)581 ns ± 9.06 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)IntroductionAs a last step of checking nucleotide metabolism, I will now visit the (d)NTP nodes and check that they are properly formed, and consumed.N.B.: I will not look into ATP, as this is a very long list of reactions that requires seperate investigationimport cameo import pandas as pd import cobra.io import escher from escher import Builder from cobra import Reaction, Metabolite model = cobra.io.read_sbml_model('../model/p-thermo.xml') model_e_coli = cameo.load_model ('iML1515') model_b_sub = cameo.load_model('iYO844')__CTP__model.reactions.CMS.id = 'MEPCT' #remove reaction model.remove_reactions(model.reactions.CTPT)__GTP__#remove reaction GTPT model.remove_reactions(model.reactions.GTPT)__UTP__#i will not remove this yet, it will come with the RNA/DNA node with model: model.remove_reactions(model.reactions.UTPT) print (model.optimize().objective_value) model.reactions.GALT.bounds = (0,1000) model.reactions.GALUi.bounds = (0,1000) #save & commit cobra.io.write_sbml_model(model,'../model/p-thermo.xml')__dCTP__ Should remove DCTPT reaction, but cannot right now. It will come back in this DNA/RNA node, so I will leave it for now. __dGTP__ Seems fine, except this DGTPT reaction. Also this becomes a dead end now. __DTTP__#remove reaction model.remove_reactions(model.reactions.DTTPT)__DATP__model.remove_reactions(model.reactions.DATPT) #save&commit cobra.io.write_sbml_model(model,'../model/p-thermo.xml')![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png) 8. Keyword Extraction YAKE v3.0.1 Keyword Extraction with YAKEfrom pyspark.sql import functions as F from pyspark.sql.functions import lit from pyspark.sql.types import StringType, DataType,ArrayType from sparknlp_jsl.annotator import YakeModel from pyspark.sql.functions import udf, struct from IPython.core.display import display, HTML import re import sparknlp from sparknlp.base import * from sparknlp.annotator import * print("Spark NLP version", sparknlp.version()) print("Apache Spark version:", spark.version) spark stopwords = StopWordsCleaner().getStopWords() stopwords[:5]YAKE Keyword ExtractorYake is an Unsupervised, Corpus-Independent, Domain and Language-Independent and Single-Document keyword extraction algorithm.Extracting keywords from texts has become a challenge for individuals and organizations as the information grows in complexity and size. The need to automate this task so that text can be processed in a timely and adequate manner has led to the emergence of automatic keyword extraction tools. Yake is a novel feature-based system for multi-lingual keyword extraction, which supports texts of different sizes, domain or languages. Unlike other approaches, Yake does not rely on dictionaries nor thesauri, neither is trained against any corpora. Instead, it follows an unsupervised approach which builds upon features extracted from the text, making it thus applicable to documents written in different languages without the need for further knowledge. This can be beneficial for a large number of tasks and a plethora of situations where access to training corpora is either limited or restricted.The algorithm makes use of the position of a sentence and token. Therefore, to use the annotator, the text should be first sent through a Sentence Boundary Detector and then a tokenizer.You can tweak the following parameters to get the best result from the annotator.- *setMinNGrams(int)* Select the minimum length of a extracted keyword- *setMaxNGrams(int)* Select the maximum length of a extracted keyword- *setNKeywords(int)* Extract the top N keywords- *setStopWords(list)* Set the list of stop words- *setThreshold(float)* Each keyword will be given a keyword score greater than 0. (Lower the score better the keyword) Set an upper bound for the keyword score from this method.- *setWindowSize(int)* Yake will construct a co-occurence matrix. You can set the window size for the cooccurence matrix construction from this method. ex: windowSize=2 will look at two words to both left and right of a candidate word.References., ., ., ., ., . and . (2020). YAKE! Keyword Extraction from Single Documents using Multiple Local Features. In Information Sciences Journal. Elsevier, Vol 509, pp 257-289. [pdf](https://doi.org/10.1016/j.ins.2019.09.013)document = DocumentAssembler() \ .setInputCol("text") \ .setOutputCol("document") sentenceDetector = SentenceDetector() \ .setInputCols("document") \ .setOutputCol("sentence") token = Tokenizer() \ .setInputCols("sentence") \ .setOutputCol("token") \ .setContextChars(["(", ")", "?", "!", ".", ","]) keywords = YakeModel() \ .setInputCols("token") \ .setOutputCol("keywords") \ .setMinNGrams(1) \ .setMaxNGrams(3)\ .setNKeywords(20)\ .setStopWords(stopwords) yake_pipeline = Pipeline(stages=[document, sentenceDetector, token, keywords]) empty_df = spark.createDataFrame([['']]).toDF("text") yake_Model = yake_pipeline.fit(empty_df) # LightPipeline light_model = LightPipeline(yake_Model) text = ''' google is acquiring data science community kaggle. Sources tell us that google is acquiring kaggle, a platform that hosts data science and machine learning competitions. Details about the transaction remain somewhat vague , but given that google is hosting its Cloud Next conference in san francisco this week, the official announcement could come as early as tomorrow. Reached by phone, kaggle co-founder declined to deny that the acquisition is happening. google itself declined 'to comment on rumors'. kaggle, which has about half a million data scientists on its platform, was founded by Goldbloom and in 2010. The service got an early start and even though it has a few competitors like DrivenData, TopCoder and HackerRank, it has managed to stay well ahead of them by focusing on its specific niche. The service is basically the de facto home for running data science and machine learning competitions. With kaggle, google is buying one of the largest and most active communities for data scientists - and with that, it will get increased mindshare in this community, too (though it already has plenty of that thanks to Tensorflow and other projects). kaggle has a bit of a history with google, too, but that's pretty recent. Earlier this month, google and kaggle teamed up to host a $100,000 machine learning competition around classifying YouTube videos. That competition had some deep integrations with the google Cloud platform, too. Our understanding is that google will keep the service running - likely under its current name. While the acquisition is probably more about Kaggle's community than technology, kaggle did build some interesting tools for hosting its competition and 'kernels', too. On kaggle, kernels are basically the source code for analyzing data sets and developers can share this code on the platform (the company previously called them 'scripts'). Like similar competition-centric sites, kaggle also runs a job board, too. It's unclear what google will do with that part of the service. According to Crunchbase, kaggle raised $12.5 million (though PitchBook says it's $12.75) since its launch in 2010. Investors in kaggle include Index Ventures, , , , google chief economist , and ''' light_result = light_model.fullAnnotate(text)[0] [(s.metadata['sentence'], s.result) for s in light_result['sentence']] import pandas as pd keys_df = pd.DataFrame([(k.result, k.begin, k.end, k.metadata['score'], k.metadata['sentence']) for k in light_result['keywords']], columns = ['keywords','begin','end','score','sentence']) keys_df['score'] = keys_df['score'].astype(float) # ordered by relevance keys_df.sort_values(['sentence','score']).head(30)Getting keywords from datraframe! wget -q https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/en/pubmed/pubmed_sample_text_small.csv df = spark.read\ .option("header", "true")\ .csv("/pubmed_sample_text_small.csv")\ df.show(truncate=50) result = yake_pipeline.fit(df).transform(df) result = result.withColumn('unique_keywords', F.array_distinct("keywords.result")) result.show(1) result.select('keywords.result').show(1,truncate=False)특정 시점에 학습률을 조정하는 커스텀 케라스 콜백from tensorflow.keras.callbacks import Callback from tensorflow.keras import backend as K # Callback을 상속받아 Custom Callback을 정의합니다. class CustomLearningLateCallback(Callback): def __init__(self): pass # 0.1배 만큼 학습률을 감소시킵니다. def down_lr(self, current_lr): return current_lr * 0.1 # 기점 예시입니다. # 이 예제에서는 사용하지 않습니다. def on_train_begin(self, logs = None): pass def on_train_end(self, logs = None): pass def on_train_batch_begin(self, batch, logs = None): pass def on_train_batch_end(self, batch, logs = None): pass def on_epoch_begin(self, epoch, logs = None): current_lr = self.model.optimizer.lr if(epoch > 1): # 5, 8, 10번째마다 학습률을 감소시킬 것입니다. if((epoch == 4) or (epoch == 7) or (epoch == 9)): current_lr = self.down_lr(current_lr) # 감소된 학습률을 현재 모델 옵티마이저의 학습률로 설정합니다. K.set_value(self.model.optimizer.lr, current_lr) print('\nEpoch %03d: learning rate change! %s.' % (epoch + 1, current_lr.numpy())) def on_epoch_end(self, epoch, logs = None): pass커스텀 케라스 콜백을 사용하여 모델 학습시키기from tensorflow.keras.datasets import mnist (x_train, y_train), (x_test, y_test) = mnist.load_data(path='mnist.npz') from sklearn.model_selection import train_test_split x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size = 0.3, random_state = 777) x_train = (x_train.reshape(-1, 28, 28, 1)) / 255 x_val = (x_val.reshape(-1, 28, 28, 1)) / 255 x_test = (x_test.reshape(-1, 28, 28, 1)) / 255 from tensorflow.keras.utils import to_categorical y_train = to_categorical(y_train) y_val = to_categorical(y_val) y_test = to_categorical(y_test) from tensorflow.keras.models import Model from tensorflow.keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, Dense from tensorflow.keras.layers import Input inputs = Input(shape = (28, 28, 1)) x = Conv2D(32, (3, 3), activation = 'relu')(inputs) x = Conv2D(32, (3, 3), activation = 'relu')(x) x = MaxPooling2D(strides = 2)(x) x = GlobalAveragePooling2D()(x) x = Dense(10, activation = 'softmax')(x) model = Model(inputs = inputs, outputs = x) # 정의한 손실 함수를 사용합니다. model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['acc']) model.fit(x_train, y_train, batch_size = 32, validation_data = (x_val, y_val), epochs = 10, callbacks = [CustomLearningLateCallback()])(책 내용 X) CosineAnnealing Learning Rate + 커스텀 케라스 콜백과 사용 방법은 동일합니다.class CosineAnnealingLearningRateSchedule(Callback): def __init__(self, n_epochs, init_lr, T_mult = 1, eta_min = 0,restart_decay = 0, verbose = 0): self.T_max = n_epochs self.T_mult = T_mult self.cycle_cnt = 0 self.restart_decay = restart_decay self.init_lr = init_lr self.eta_min = eta_min self.lrates = list() # caculate learning rate for an epoch def cosine_annealing(self, epoch): lr = self.eta_min + (self.init_lr - self.eta_min) * (1 + math.cos(math.pi * (epoch / self.T_max))) / 2 if(epoch == self.T_max): self.cycle_cnt += 1 self.T_max = self.T_mult * self.T_max if(self.restart_decay >0): self.init_lr *= self.restart_decay print('change init learning rate {}'.format(self.init_lr)) return lr # calculate and set learning rate at the start of the epoch def on_epoch_begin(self, epoch, logs = None): lr = self.cosine_annealing(epoch) print('\nEpoch %05d: CosineAnnealingScheduler setting learng rate to %s.' % (epoch + 1, lr)) # set learning rate backend.set_value(self.model.optimizer.lr, lr) # log value self.lrates.append(lr)load and autoreloadfrom IPython import get_ipython # noinspection PyBroadException try: _ipython = get_ipython() _magic = _ipython.magic _magic('load_ext autoreload') _magic('autoreload 2') except: pass from sectional_v2.constants import get_outdata_path version = '_noresmv21' path_in = get_outdata_path('eusaar') file_in = path_in + 'Nd_cat_sources_timeseries%s.csv' % version plot_path = get_plotpath('eusaar') version = '_noresmv21_both'Read csv:df = pd.read_csv(file_in, index_col=0, parse_dates=True)Add month to dataframe:df['time'] = pd.to_datetime(df['time']) df['season'] = df.to_xarray()['time.season'].to_series() # df = pd.read_csv(file_in, index_col=0, parse_dates=True) df_median = df.groupby(by=['source', 'station', 'season']).median()Add flagsdf_median = df_median.drop(['flag_gd'], axis=1)Get percentiles and mask:def get_med_percs(df_masked, lis=None, q_low=0.16, q_high=0.84, by=None): if by is None: by = ['source', 'station', 'month'] if lis is None: lis = ['station', 'N30-50', 'N50', 'N100', 'N250', 'N50-100', 'N30-100', 'source', 'month'] df_median_month = df_masked.groupby(by=by).median() df_perc16_month = df_masked[lis].groupby(by=by).quantile(q=q_low) df_perc84_month = df_masked[lis].groupby(by=by).quantile(q=q_high) return df_median_month, df_perc16_month, df_perc84_month df['month'] = df.to_xarray()['time.month'].to_series() df_masked = df[df['flag_gd']] df_median_month, df_perc16_month, df_perc84_month = get_med_percs(df_masked)Various functionsdef make_new_cat(): """ Make new category """ coll_ltr = collocate_locations.transpose() td = { 'Low altitude sites (less than 1000 m a.s.l.)': 'LA', 'High altitude sites (over 1000 m a.s.l.)': 'HA' } coll_ltr['AC'] = coll_ltr['Altitude category'].apply(lambda x: td[x]) coll_ltr['new_cat'] = coll_ltr['AC'] + ': ' + coll_ltr['Region'] coll_ltr = coll_ltr.sort_values('new_cat', ascending=False) return coll_ltr def get_ordered_stations(): coll_ltr = make_new_cat() return coll_ltr.index list(get_ordered_stations()) def plot_month_med_save(df_median_month, df_perc16_month, df_perc84_month, station_ls=None, var='N50-100', figname_ext='all_stations', ncols=4, nrows=6, figsize=None, ylim=None): _df = df_median_month.reset_index() # .sort_values()' _df_16 = df_perc16_month.reset_index() # .sort_values()' _df_84 = df_perc84_month.reset_index() # .sort_values()' sns.set_style("whitegrid") # var = 'N50-100' if figsize is None: figsize = [10 / 4 * ncols, 10 / 6 * nrows] fig, figname_ext, labels, lines = plot_med_stations_month(_df, _df_16, _df_84, var, station_ls, figname_ext, figsize, ncols, nrows, ylim) fig.tight_layout() print(labels) lgn = fig.legend(lines, labels, bbox_to_anchor=(0, 1., 1, 0.5), # (0, -0.04, 1., .1), loc='lower center', ncol=4, # mode="expand", borderaxespad=0., fontsize=11, frameon=False) # bbox_to_anchor=(0, 1., 1, 0.5)) fn = plot_path + '/Nd/' + var + 'monthly_variation%s_%s.' % (version, figname_ext) fig.savefig(fn + 'png', bbox_extra_artists=(lgn,), bbox_inches='tight') fig.savefig(fn + 'pdf', bbox_extra_artists=(lgn,), bbox_inches='tight') plt.show() def plot_month_med_save_cat(df_median_month, df_perc16_month, df_perc84_month, station_ls=None, var='N50-100', cat='all_stations', ncols=4, nrows=6, figsize=None, ylim=None): _df = df_median_month.reset_index() # .sort_values()' _df_16 = df_perc16_month.reset_index() # .sort_values()' _df_84 = df_perc84_month.reset_index() # .sort_values()' sns.set_style("whitegrid") # var = 'N50-100' if figsize is None: figsize = [10 / 4 * ncols, 10 / 6 * nrows] fig, cat, labels, lines = plot_med_stations_month(_df, _df_16, _df_84, var, station_ls, cat, figsize, ncols, nrows, ylim) fig.tight_layout() lgn = fig.legend(lines, labels, bbox_to_anchor=(0, -0.01, 1, 0.5), # (0, -0.04, 1., .1), loc='lower center', ncol=4, # mode="expand", borderaxespad=0., fontsize=11, frameon=False) # bbox_to_anchor=(0, 1., 1, 0.5)) # fig.tight_layout(ad)#rect=(0,-0.5,1,1.5)) # fig.tight_layout() su = plt.suptitle(cat, va='bottom') # , y=-.05) # fig.subplots_adjust(top=.9) fn = plot_path + '/Nd/' + var + 'monthly_variation%s_%s.' % (version, cat) fig.savefig(fn + 'png', bbox_extra_artists=(lgn, su,), bbox_inches='tight') fig.savefig(fn + 'pdf', bbox_extra_artists=(lgn,), bbox_inches='tight') plt.show() def plot_med_stations_month(_df, _df_16, _df_84, var, station_ls, figname_ext, figsize, ncols, nrows, ylim): fig, axs = plt.subplots(nrows, ncols, sharex=True, sharey=True, figsize=figsize) if nrows == 1 and ncols == 1: axf = [axs] else: axf = axs.flatten() if station_ls is None: station_ls = list(get_ordered_stations()) figname_ext = 'all_stations' for st, ax in zip(station_ls, axf): labels, lines = plt_station(_df, _df_16, _df_84, ax, st, var, ylim) if st =='ZEP': axins = ax.inset_axes([0.23, 0.2, 0.67, 0.67])#zoomed_inset_axes(ax, zoom=1, loc='upper right') labels, lines = plt_station(_df, _df_16, _df_84, axins, st, var, [0,300]) axins.set_xticklabels('') axins.grid(False) axins.yaxis.label.set_color('red') axins.tick_params(axis='y', colors='red') #axins.tick_params(axis=u'both', which=u'both',length=1) ax.indicate_inset_zoom(axins,edgecolor='r',) axins.spines['left'].set_color('r') axins.spines['right'].set_color('r') axins.spines['bottom'].set_color('r') axins.spines['top'].set_color('r') axins.set_title('') ax.spines['left'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) if nrows == 1: axl = axs else: axl = axs[-1, :] if nrows == 1 and ncols == 1: axl = [axs] for ax in axl: ax.set_xlabel('Month') if nrows == 1 and ncols == 1: axl = [axs] elif nrows == 1: axl = [axs[0]] elif ncols == 1: axl = axs else: axl = axs[:, 0] for ax in axl: ax.set_ylabel('%s [#/cm$^3$]' % var) return fig, figname_ext, labels, lines def plt_station(_df, _df_16, _df_84, ax, st, var, ylim): ax.set_xticks(range(1, 12, 2)) _df_s = _df[_df['station'] == st] _df_s16 = _df_16[_df_16['station'] == st] _df_s84 = _df_84[_df_84['station'] == st] x = _df_s['month'] y = _df_s lines = [] labels = [] for source in _df_s['source'].unique(): _df_ss = _df_s[_df_s['source'] == source].set_index('month').reindex( range(1, 13)) # .set_index('month').reindex(range(1,13)) # print(_df_ss[var]) line = ax.plot(_df_ss.index, _df_ss[var], color=get_case_col(source), label=source, marker='*', markersize=4) lines = lines + line # s[0] labels.append(source) _df_ss16 = _df_s16[_df_s16['source'] == source].set_index('month').reindex(range(1, 13)) _df_ss84 = _df_s84[_df_s84['source'] == source].set_index('month').reindex(range(1, 13)) ax.fill_between(_df_ss16.index, _df_ss16[var], _df_ss84[var], color=get_case_col(source), alpha=0.3) ax.set_title(st) if ylim is not None: ax.set_ylim(ylim) ax.set_xlim([.8,12.2]) return labels, lines plot_month_med_save(df_median_month, df_perc16_month, df_perc84_month, var='N50-100', ylim=[0, 4000]) coll_ltr= make_new_cat() cats = coll_ltr['new_cat'].unique() for cat in cats: st_ls = (coll_ltr[coll_ltr['new_cat'] == cat].index) nrows = int(np.ceil(len(st_ls) / 3)) plot_month_med_save_cat(df_median_month, df_perc16_month, df_perc84_month, station_ls=st_ls, cat=cat, ncols=3, nrows=nrows)Importsimport sys import os import numpy as np import pickle import matplotlib.pyplot as plt import time from scipy.io import loadmat from glob import glob from sklearn.model_selection import train_test_split from keras.callbacks import TensorBoard from keras.optimizers import Adadelta from keras.utils import multi_gpu_model from os.path import join, basename, isdir sys.path.append('scripts') from data_helpers import * from model_helpers import * %load_ext autoreload %autoreload 2 os.environ['CUDA_VISIBLE_DEVICES'] = '0' OUT_PATH_MODEL = 'saved'Hyperparameters# Data hyperparameters ratio_0_1 = 1 excluded_classes = [1, 2, 3] target_size = (512, 512) test_ratio = 0.25 # Model hyperparameters mobilenet_width = 1 dense_layers = [128, 64, 32] # Train hyperparameters num_epochs = 500 batch_size = 8Load datafiles = sorted(glob('data/imgs/*jpg')) classes = loadmat('data/imagelabels.mat')['labels'][0].tolist() unique_labels = set(classes) grouped = [[(data[0], data[1]) for data in zip(files, classes) if data[1] == label] for label in unique_labels if label not in excluded_classes] exc_grouped = sorted([(data[0], data[1]) for data in zip(files, classes) for label in unique_labels if label in excluded_classes if data[1] == label], key=lambda x: x[1]) same_pairs, diff_pairs, rest_data = create_pairs(grouped, ratio_0_1=ratio_0_1) print('Number of same pairs: {} - Number of different pairs: {}'.format(len(same_pairs), len(diff_pairs))) pairs = same_pairs + diff_pairs file_pairs = [(pair[0][0], pair[1][0]) for pair in pairs] class_pairs = [(pair[0][1], pair[1][1]) for pair in pairs] labels = [int(not (pair[0] == pair[1])) for pair in class_pairs] train_files, test_files, c_train, c_test, y_train, y_test = train_test_split( file_pairs, class_pairs, labels, test_size=test_ratio, shuffle=True, stratify=labels) img_pairss = [] for mode, files in zip(['train', 'test'], [train_files, test_files]): img_pairs = [] i = 0 for file_pair in files: img_pair = [cv2.imread(img_file, -1)[:, :, ::-1] for img_file in file_pair] img_pair = [preprocess_img(img, target_size, canvas_color=(255, 255, 255), normalize=mode == 'test') for img in img_pair] img_pairs.append(img_pair) if i % 100 == 0: if i == 0: start = time.time() else: end = time.time() print('Progressed time: {:.2f} sec - ETA: {:.2f} sec'.format( end - start, (len(file_pairs) - i) * ((end - start) / i))) i += 1 img_pairss.append(img_pairs) print('Loaded {} data!'.format(mode)) x_train, x_test = img_pairss[0], img_pairss[1]Prepare data to visualize embeddingsexc_files = [data[0] for data in exc_grouped] exc_imgs = [cv2.imread(data[0], -1)[:, :, ::-1] for data in exc_grouped] exc_imgs = np.array([preprocess_img(img, target_size, canvas_color=(255, 255, 255), normalize=False) for img in exc_imgs]) exc_classes = [data[1] for data in exc_grouped] vis_files = [pair[0] for pair in test_files] + [pair[1] for pair in test_files] vis_imgs = np.array([pair[0] for pair in x_test] + [pair[1] for pair in x_test]) vis_classes = [pair[0] for pair in c_test] + [pair[1] for pair in c_test]Create metadata for tensorboard embeddingcreate_metadata('saved/logs/test_metadata.tsv', vis_files, vis_classes) create_metadata('saved/logs/excluded_metadata.tsv', exc_files, exc_classes) num_vis = 10 fig, ax = plt.subplots(nrows=num_vis, ncols=2, figsize=(15, 50)) for row in range(num_vis): for col in range(2): ax[row][col].set_title('Class: {} - Label: {}'.format(c_train[row][col], y_train[row])) ax[row][col].imshow(x_train[row][col]) ax[row][col].axis('off') training_generator = DataGenerator( x_train, y_train, batch_size=batch_size, augment=True, shuffle=True)Create and compile models# Create models encoder = create_mobile_net_encoder((*target_size, 3), dense_layers, mobilenet_width=mobilenet_width) siamese_model = create_siamese_model(encoder, distance_func=euclidean_distance) siamese_model.summary() # Compile siamese model siamese_model.compile(loss=contrastive_loss, optimizer=Adadelta(), metrics=[siamese_accuracy])Save hyperparameters in modelsiamese_model.train_files = train_files siamese_model.test_files = test_files siamese_model.c_train = c_train siamese_model.c_test = c_test siamese_model.y_train = y_train siamese_model.y_test = y_test siamese_model.ratio_0_1 = ratio_0_1 siamese_model.excluded_classes = excluded_classes siamese_model.target_size = target_size siamese_model.test_ratio = test_ratio siamese_model.mobilenet_width = mobilenet_width siamese_model.dense_layers = dense_layers siamese_model.num_epochs = num_epochs siamese_model.batch_size = batch_sizeCreate callbackscheckpointer = CustomModelCheckpoint( join(OUT_PATH_MODEL, 'logs'), monitor='val_loss', verbose=0, save_weights_only=False, mode='auto', period=1) graphs = TensorBoard( log_dir=join(OUT_PATH_MODEL, 'logs'), batch_size=batch_size, write_graph=False) embeddings = TensorBoardEmbeddings( log_dir=join(OUT_PATH_MODEL, 'logs'), encoder, {'test': vis_imgs, 'excluded': exc_imgs}, ['saved/logs/test_metadata.tsv', 'saved/logs/excluded_metadata.tsv'], vis_every=1)Train model# Start training siamese_model.fit_generator( generator=training_generator, validation_data=(split_imgs(np.array(x_test)), y_test), epochs=num_epochs, verbose=1, shuffle=False, use_multiprocessing=True, workers=10, callbacks=[checkpointer, graphs, embeddings])Midterm Exam – DATA 3401 (Fall 2021) Start Date: 10/20 Due Date: 10/22 (at the **end** of the lab period (5:00pm))There will be **no Lab on Friday 10/22** Midterm RulesThis midterm exam is essentially like a short-term lab. Please work the exercises below **on your own**. When you have completed the exam, you should push your completed jupyter notebook to your GitHub repo for this class in the **Exams->Midterm** folder.You may not discuss the problems with **anyone else**, including persons on an online internet forum. Consulting an outside source like this will be considered an academic integrity violation.You may use all class resources including previous labs and lectures, and anything posted on the course GitHub repo. You may not use any function that trivializes a problem. For example, if I ask you to write a `max` function that computes the maximum entry in a list, you are not allowed to use the pre-defined Python function `max`; you must write your own. Exercise 11. Write a script or function that asks the user to input 3 numbers, and output the product of the 3 numbers.1. Your code should display an error message if one of the inputs is not a number.1. Test your code on the following inputs: 1. (5, -10, 20.0) 1. (1, "blah", 5)## Write your code here ## Test your code hereExercise 2Suppose the first element of a sequence is $a_0 = 1$, the second element is $a_1 = 3$, and for every $i>1$, the $i$--th element of the sequence is defined by the recursive relation $a_i = \frac{(a_{i-1}-a_{i-2})^2}{3}$. (So $a_2 = 4/3$)1. Write a function which outputs the $n$--th element of this sequence for a given positive integer $n$2. Test your function for $n = 1, 5, 10, 15,$ and $20$# Write your function here # Test your function hereExercise 31. Write a function which takes a list as input and outputs the largest **integer** entry. If there is none, return `None`.1. Test your function on the following lists 1. \[ 1, 9, 10.2, 6, -2\] 1. \[1.2, 3/4, "Hello World"\]# Write your function here # Test your function hereExercise 41. Create a program that will make a deck of cards. Each card should be represented as a tuple of the form `(value, suit)`, where `value` is a number between 2 and 10, Jack, Queen, King, or Ace, and `suit` is one of Spades, Clubs, Diamonds, or Hearts. (So at the end, you should have a list of 52 tuples representing a 52 card deck)1. Create a function that takes in a hand (list) of 5 cards, and determines whether or not the hand is a "flush", meaning that all 5 cards are of the same suit.1. Test your function by inputting the following hands: 1. Ace of Hearts, King of Hearts, Queen of Hearts, Jack of Hearts, 2 of Hearts 1. 2 of Clubs, 3 of Spades, 4 of Clubs, 10 of Diamonds, Jack of Spades# Write your functions here # Test your functions hereExercise 51. Write a function or script that takes in a string and whose output is the input string minus all of a specified character. For example, an input of "Hello World" where "l" is removed should result in "Heo Word"1. Test your function by specifying "i" to be removed from "Supercalifragilisticexpialidocious"1. Test your function by specifying "data" to be removed from "data 3401 introduction to python for data science"# Write your function here # Test your function hereCS 109A/STAT 121A/AC 209A/CSCI E-109A: Midterm - 2017**Harvard University****Fall 2017****Instructors**: , , , --- INSTRUCTIONS- You must submit the Midterm on your own. ** No group submissions are allowed**. You may use any print or online resources but ** you may not work or consult with others**.- Restart the kernel and run the whole notebook again before you submit. - Please submit both a notebook and a pdf. --- Flight DelaysThe U.S. Department of Transportation's (DOT) Bureau of Transportation Statistics tracks the on-time performance of domestic flights operated by large air carriers. Summary information on the number of on-time, delayed, canceled, and diverted flights are published in DOT's monthly Air Travel Consumer Report and in this dataset of 2015 flight delays and cancellations. DataEach entry of the flights.csv file corresponds to a flight. More than 5,800,000 flights were recorded in 2015. These flights are described according to 31 variables. Further details of these variables can be found here, if you are interested (not needed to answer these questions). | Name | Type | DESCRIPTION ||--------------------------------|---|----------------------------------------------------------------------|| DATE | object | The date in python datetime format || MONTH | int64 | The month of the year(1-12) || DAY | int64 | The day of the month || DAY_OF_WEEK | int64 | The day of the week(1-7, MON-SUN) || AIRLINE | object | An identifier for the airline || FLIGHT_NUMBER | int64 | The flight number || TAIL_NUMBER | object | The tail number (aircraft) corresponding to this flight || ORIGIN_AIRPORT | object | The code for origin airport || DESTINATION_AIRPORT | object | The code for destination airport || SCHED_DEP | object | The departure time in python datetime.time format || SCHED_ARR | object | The arrival time in python datetime.time format || DEPARTURE_DELAY | float64| The delay incurred at the origin (mins) || ARRIVAL_DELAY | float64 | The delay when the flight reached the (mins) destination || DISTANCE | int64 | Distance in miles between origin and destination || SCHEDULED_TIME | float64 | Scheduled time of flight (minutes) || ELAPSED_TIME | float64 | Actual time of flight (minutes) || AIR_SYSTEM_DELAY | float64 | What part of the delay was NASD?(mins) || SECURITY_DELAY | float64 | What part of the delay was due to security problems? (mins) || AIRLINE_DELAY | float64 | What part of the delay is due to the airline? (mins) || LATE_AIRCRAFT_DELAY | float64 | What part of the delay is due to previous flight(s) being late(mins) || WEATHER_DELAY | float64 | Delay due to extreme weather events(min) |You can read more about the various weather delays [here](https://www.rita.dot.gov/bts/help/aviation/html/understanding.html) if you are so inclined. Data/CaveatsThe data file, flights.csv, is found here (note, it is about 70MB). This data is already preprocessed, reduced, partially cleaned and therefore not identical to the original dataset.import numpy as np import pandas as pd from datetime import datetime import time import matplotlib import matplotlib.pyplot as plt from sklearn.pipeline import Pipeline from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LogisticRegressionCV import sklearn.metrics as metrics from sklearn.preprocessing import PolynomialFeatures from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis as QDA from sklearn.neighbors import KNeighborsClassifier as KNN from sklearn.tree import DecisionTreeClassifier as DecisionTree from sklearn.model_selection import cross_val_score from sklearn.metrics import accuracy_score from sklearn import preprocessing from sklearn.tree import export_graphviz from IPython.display import Image from IPython.display import display from sklearn.linear_model import LinearRegression from sklearn.linear_model import LassoCV from sklearn.linear_model import RidgeCV from sklearn.decomposition import PCA from sklearn.model_selection import train_test_split import warnings warnings.filterwarnings("ignore") %matplotlib inlineProblem Description We will build two separate models: one model that classifies whether a flight will be delayed and a second model that predicts the length of delay given that a flight is truly delayed. Only consider models taught in class so far. ** Consider the following: **This is a large dataset; think of strategies on how to solve this problem. Create a manageable subsample of the data that you can use to train and test/validate, but eventually you should predict on all the data (excluding the training set). Questions1. (5pts) Create a new variable, `DELAY_OR_NOT`: a boolean/indicator variable which indicates any arrival delay under 15 mins as a 0, and any delay at or above 15 mins as a 1 (`ARRIVAL_DELAY >= 15`).2. (5pts) Make sure you understand the data variable descriptions before you start the analysis. Consider all the columns and determine and list which of these predictors should not be used. 3. (15pts) Perform EDA to gain intuition of the factors that affect delay and provide visuals: do delays vary across airlines, or time of departure, or airport (do, at the very least, Chicago (ORD), Boston (BOS), and your favorite another airport), or airport traffic?4. (20pts) Build a classification model that classifies delays according to `DELAY_OR_NOT`. This is an unbalanced dataset, thus consider the appropriate performance metric when reporting your results. 5. (5pts) Given your model, comment on the importance of factors as related to whether a flight is delayed.6. (5pts) Evaluate your model(s) on your test set, and finally provide a visual to show which airlines are predicted to have the most delays using all the data excluding the training and test set. 7. (15pts) Build a regression model that predicts the length of delay (on the log scale) given that a flight is truly delayed.8. (20pts) Write a report (in the last markdown cell in your notebook with your findings (without code)). Describe the main design decisions you have made with justifications. Clearly explain your methodology and results. This should not be more than 300 words. You may use up to 5 diagrams. **Question 1**(5pts) Create a new variable, DELAY_OR_NOT: a boolean/indicator variable which indicates any arrival delay under 15 mins as a 0, and any delay at or above 15 mins as a 1 (ARRIVAL_DELAY >= 15).#Load the data df = pd.read_csv("cs109a_midterm.csv") #Display the first couple columns df.head() #Let's first clean our dataframe #See where the missing values are cols_missing_values = df.columns[df.isnull().any()] cols_missing_values #Let's see if we have any missing values in the 'ARRIVAL_DELAY' column np.sum([df.ARRIVAL_DELAY.isnull()]) #Create the variable 'DELAY_OR_NOT' in our DataFrame based on if 'ARRIVAL_DELAY_>=15 (min) #Since we saw we do not have any missing values in ARRIVAL_DELAY column, we can proceed. DELAY_OR_NOT = (df.ARRIVAL_DELAY >= 15).astype(int) df['DELAY_OR_NOT'] = DELAY_OR_NOT**Question 2**(5pts) Make sure you understand the data variable descriptions before you start the analysis. Consider all the columns and determine and list which of these predictors should not be used. **I believe there is several columns which do not help in our analysis** DATE since we already have variables MONTH and DAY and we know all the data points are from 2015 SCHEDULED_TIMEis duplicate information since SCHEDULED_TIME = SCHED_ARRIVAL - SCHED_DEPARTURE ELAPSED_TIME is duplicate information since ELAPSED_TIME = SCHED_ARRIVAL + ARRIVAL_DELAY - (SCHED_DEPARTURE + DEPARTURE DELAY) FLIGHT NUMBERIt is redundant since we have info about origin/destination and scheduled dep/arr time TAIL NUMBERThis is indicative of each unique plane. When we have a dataset with 800,000 observations and 4819 planes, it is very unlikely our regression model would be able to recognize if there is a particular plane which causes delay. All the delay featuresGiven the fact we already have a "delay_or_not" response we are seeking, our "types" of delays are not predictors of whether there will be a delay or not. In other words, a type of "delay" will predict the length of the delay but not whether there will be an event of a delay. **So, we will get rid of those in our analysis in this subproblem. I already looked for missing values in the previous subquestion and the missing values happen to be only in the columns I am dropping.**#We copy the dataframe, since it's 2017 and we all have enough memory,right? new_df = df.copy(deep=True) #drop all the columns based on the analysis in the cell above del new_df['DATE'] del new_df['SCHEDULED_TIME'] del new_df['ELAPSED_TIME'] del new_df['FLIGHT_NUMBER'] del new_df['TAIL_NUMBER'] del new_df['ARRIVAL_DELAY'] del new_df['DEPARTURE_DELAY'] del new_df['AIR_SYSTEM_DELAY'] del new_df['AIRLINE_DELAY'] del new_df['SECURITY_DELAY'] del new_df['LATE_AIRCRAFT_DELAY'] del new_df['WEATHER_DELAY']**Question 3** (15pts) Perform EDA to gain intuition of the factors that affect delay and provide visuals: do delays vary across airlines, or time of departure, or airport (do, at the very least, Chicago (ORD), Boston (BOS), and your favorite another airport), or airport traffic?#We will ONLY look at the visualisation in the training data we will later use to build our model #(I am splitting train/test here just for purposes for visualisation, I am later using the same random seed #to do it again after preproccessing) #Subsample the data to reduce the # of observations. np.random.seed(9001) new_df_for_viz = new_df.sample(n=100000,random_state=6) #Split the data in test and training (see Lecture 5) train_df_viz, data_test_NOT_USE = train_test_split(new_df_for_viz,test_size=0.33, random_state=0) #Let's see what airlines had the highest percentage of delays in 2015 list_of_airlines = [i for i in train_df_viz.AIRLINE.value_counts().index] airlines_delays = [] for i in list_of_airlines: delayed = train_df_viz[train_df_viz.AIRLINE==i][train_df_viz.DELAY_OR_NOT==1].shape[0] not_delayed = train_df_viz[train_df_viz.AIRLINE==i][train_df_viz.DELAY_OR_NOT==0].shape[0] airlines_delays.append(delayed/(delayed+not_delayed)) plt.figure(figsize=(20,10)) plt.bar([i for i in range(len(airlines_delays))],airlines_delays,color='red',label = "Percentage of delayed flights by Airline") plt.xticks([i for i in range(len(airlines_delays))],list_of_airlines); mean_delays=np.mean(airlines_delays) plt.axhline(mean_delays,label='Mean percentage of delay accross airlines') plt.legend(); plt.xlabel("Airline") plt.ylabel("Ratio of delayed flights");**We see significant differences in the ratio of delayed flights among airlines. For example, "HA" seems to have relatively less delayed flights than "NK"**#Let's see which airline had the most delays in 2015 as well as how many flights were not delayed for that airlines (total number) import seaborn as sns plt.figure(figsize=(20,10)) sns.set(style="darkgrid") sns.countplot(x='AIRLINE',hue='DELAY_OR_NOT',data=train_df_viz,palette="Set2");**This graph is related to the one above. However, we are just plotting the absolute number of flights here.**#Let's see what days of the week had the most delays in 2015 weekday_delays = [] for i in range(1,8): delays = train_df_viz[train_df_viz.DAY_OF_WEEK==i][train_df_viz.DELAY_OR_NOT==1].shape[0] total = train_df_viz[train_df_viz.DAY_OF_WEEK==i].shape[0] weekday_delays.append(delays/total) plt.figure(figsize=(20,10)) plt.bar([i for i in range(7)],weekday_delays,color='green',label = "Delay in a weekday") plt.xticks([i for i in range(7)],['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday']); mean_delays=np.mean(weekday_delays) plt.axhline(mean_delays,label='Mean delay across weekdays',color='gold') plt.legend(); plt.xlabel("Day of the week") plt.ylabel("Percentage of delays");**This visualisation indicates that certain days have a higher ratio of delayed flights than others. For example, Thursdays have the highest percentage of delayed flights while Saturdays the smallest.**#Let's visualize the total delays per day of the week sns.set(style="darkgrid") plt.figure(figsize=(20,10)) sns.countplot(x='DAY_OF_WEEK',hue='DELAY_OR_NOT',data=train_df_viz,palette="Set1") plt.xticks([i for i in range(7)],['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday']);**This graph is related to the one above. However, we are just plotting the absolute number of flights per day of the week here.**#Let's take a look at how many flights in total were delayed in each month sns.set(style="darkgrid") plt.figure(figsize=(20,10)) sns.countplot(x='MONTH',data=train_df_viz[train_df_viz.DELAY_OR_NOT==1],palette="Set1");**In this graph, we plot the absolute number of delayed flights in any given month. We see that the lowest number of delayed flights is in September/October, while the most delayed flights are in June/July timeframe.**#Let's take a look at the percentage of flights delayed in each month months = train_df_viz.groupby('MONTH').mean() plt.figure(figsize=(20,10)) plt.bar(months.index,months.DELAY_OR_NOT,color='gray',label = "Percentage delay in a month") plt.axhline(mean_delays,label='Mean percentage delay across months',color='gold') plt.legend(bbox_to_anchor=(1, 1)); plt.xlabel("Month") plt.ylabel("Percentage of delays");**Here, we plot the ratio of flights delayed each month. The trends reflect what the previous graph with absolute numbers displayed.**#Number of delayed/not delayed flights departing every hour, also showing the overall traffic at the airports at a given hour from datetime import datetime import time # string to time tuple date_str = list(train_df_viz.SCHED_DEP) time_tuples = [] for i in date_str: time_tuple = time.strptime(i, "%H:%M:%S") time_tuples.append(time_tuple.tm_hour) train_df_viz['hour_of_dep'] = np.array(time_tuples) plt.figure(figsize=(20,10)) sns.countplot(x='hour_of_dep',hue='DELAY_OR_NOT',data=train_df_viz,palette="Set1"); plt.xlabel("hour of the day");**Here we can see the highest number of flights departs in the morning while being the lest delayed, while evening flights are the most delayed and there is less of them than in the morning hours**#Now, let's plot the percentage of delayed flights dependent on the time departure hour_delays=[] for i in range(24): delays = train_df_viz[train_df_viz.hour_of_dep==i][train_df_viz.DELAY_OR_NOT==1].shape[0] total = train_df_viz[train_df_viz.hour_of_dep==i].shape[0] hour_delays.append(delays/total) plt.figure(figsize=(20,10)) plt.bar([i for i in range(24)],hour_delays,color='gray',label = "Percentage delay in a month") plt.xticks([i for i in range(24)],[i for i in range(24)]); mean_delays=np.mean(weekday_delays) plt.axhline(mean_delays,label='Mean percentage delay across day hours',color='gold') plt.legend(); plt.xlabel("Hour of the day") plt.ylabel("Percentage of delays") del train_df_viz['hour_of_dep'];**Seems like most flights are delayed in the evening hours with the peak around 8pm, while morning flights have the least delays.**#Let's look at delays at 15 most busy airports (from left as the busiest) frequent_airports = train_df_viz.ORIGIN_AIRPORT.value_counts().index[0:15] airport_delays = [] for i in frequent_airports: delays = train_df_viz[train_df_viz.ORIGIN_AIRPORT==i][train_df_viz.DELAY_OR_NOT==1].shape[0] total = train_df_viz[train_df_viz.ORIGIN_AIRPORT==i].shape[0] airport_delays.append(delays/total) plt.figure(figsize=(20,10)) plt.bar([i for i in range(len(frequent_airports))],airport_delays,color='orange',label = "Percentage delay origin aiport") plt.xticks([i for i in range(len(frequent_airports))],[i for i in frequent_airports]); mean_delays=np.mean(weekday_delays) plt.axhline(mean_delays,label='Mean percentage delay across {} most frequent airports'.format(len(frequent_airports)),color='black') plt.legend(); plt.xlabel("Origin airport") plt.ylabel("Percentage of delays");**Here, we look at the 15 busiest airports (defined as how many flights depart from it in total in 2015). We can see that ORD or MCO have a high percentage of delayed flights while ATL or SEA have a low percentage of delayed flights.** **Question 4**(20pts) Build a classification model that classifies delays according to DELAY_OR_NOT. This is an unbalanced dataset, thus consider the appropriate performance metric when reporting your results.#Now, we need to convert our categorical variables cols_to_change =['MONTH','DAY','DAY_OF_WEEK','AIRLINE','ORIGIN_AIRPORT','DESTINATION_AIRPORT'] #hot-encode new_df_dummy = pd.get_dummies(new_df,columns=cols_to_change) #Subsample the data to reduce the # of observations. new_df_sample = new_df_dummy.sample(n=100000,random_state=6) #Split the data in test and training (see Lecture 5) np.random.seed(9001) data_train, data_test = train_test_split(new_df_sample,test_size=0.33, random_state=0) #Check we actually got the right shapes print(new_df_sample.shape,df.shape,data_train.shape,data_test.shape) #now create a remaining df to test in Question 6 remaining_df = new_df_dummy.drop(new_df_sample.index)**Change the scheduled time of dep and arr to decimals**# string to time tuple TRAIN date_str_dep = list(data_train.SCHED_DEP) date_str_arr = list(data_train.SCHED_ARR) time_tuples_dep = [] time_tuples_arr = [] for i,j in zip(date_str_dep,date_str_arr): time_tuple_dep = time.strptime(i, "%H:%M:%S") time_tuple_arr = time.strptime(j, "%H:%M:%S") time_tuples_dep.append(time_tuple_dep.tm_hour+time_tuple_dep.tm_min/60) time_tuples_arr.append(time_tuple_arr.tm_hour+time_tuple_arr.tm_min/60) data_train['SCHED_DEP'] = np.array(time_tuples_dep) data_train['SCHED_ARR'] = np.array(time_tuples_arr) # string to time tuple TEST date_str_dep = list(data_test.SCHED_DEP) date_str_arr = list(data_test.SCHED_ARR) time_tuples_dep = [] time_tuples_arr = [] for i,j in zip(date_str_dep,date_str_arr): time_tuple_dep = time.strptime(i, "%H:%M:%S") time_tuple_arr = time.strptime(j, "%H:%M:%S") time_tuples_dep.append(time_tuple_dep.tm_hour+time_tuple_dep.tm_min/60) time_tuples_arr.append(time_tuple_arr.tm_hour+time_tuple_arr.tm_min/60) data_test['SCHED_DEP'] = np.array(time_tuples_dep) data_test['SCHED_ARR'] = np.array(time_tuples_arr) # string to time REMAINING df date_str_dep = list(remaining_df.SCHED_DEP) date_str_arr = list(remaining_df.SCHED_ARR) time_tuples_dep = [] time_tuples_arr = [] for i,j in zip(date_str_dep,date_str_arr): time_tuple_dep = time.strptime(i, "%H:%M:%S") time_tuple_arr = time.strptime(j, "%H:%M:%S") time_tuples_dep.append(time_tuple_dep.tm_hour+time_tuple_dep.tm_min/60) time_tuples_arr.append(time_tuple_arr.tm_hour+time_tuple_arr.tm_min/60) remaining_df['SCHED_DEP'] = np.array(time_tuples_dep) remaining_df['SCHED_ARR'] = np.array(time_tuples_arr) #check we converted time to decimals correctly in SCHED_DEP and SCHED_ARR features data_train.head()**Let's take a look at how unbalanced our data (delayed/not delayed) is - I see that only ~10% of observations are delayed flights.**print(data_train[data_train.DELAY_OR_NOT==1].shape) print(data_train[data_train.DELAY_OR_NOT==0].shape)(6929, 1323) (60071, 1323)**Finally, before I start building out models, I need to standardize all of our continuous variables**p_data_train=data_train.copy(deep=True) p_data_train.loc[:,"SCHED_DEP":"DISTANCE"]=(data_train.loc[:,"SCHED_DEP":"DISTANCE"]-data_train.loc[:,"SCHED_DEP":"DISTANCE"].mean())/data_train.loc[:,"SCHED_DEP":"DISTANCE"].std() #test set - use mean and std of the training set p_data_test=data_test.copy(deep=True) p_data_test.loc[:,"SCHED_DEP":"DISTANCE"]=(data_test.loc[:,"SCHED_DEP":"DISTANCE"]-data_train.loc[:,"SCHED_DEP":"DISTANCE"].mean())/data_train.loc[:,"SCHED_DEP":"DISTANCE"].std() p_remaining_df = remaining_df.copy(deep=True) p_remaining_df.loc[:,"SCHED_DEP":"DISTANCE"]=(remaining_df.loc[:,"SCHED_DEP":"DISTANCE"]-data_train.loc[:,"SCHED_DEP":"DISTANCE"].mean())/data_train.loc[:,"SCHED_DEP":"DISTANCE"].std() p_data_train.shape**Now, I am ready to try different classification models**x_train = p_data_train.copy(deep=True).drop('DELAY_OR_NOT',axis=1) y_train = p_data_train.DELAY_OR_NOT x_test = p_data_test.copy(deep=True).drop('DELAY_OR_NOT',axis=1) y_test = p_data_test.DELAY_OR_NOT**I will use the classification models comparison code from Tuesday section 8 which reviewed last years midterm. However, I will throw out the kNN classification since calculating Euclidean distances between over 1000 predictors would be too computationally expensive**#following lamda function scores predictions for two-way classifier score = lambda model, x_train, y_train: pd.Series([model.score(x_train, y_train), model.score(x_train[y_train==0], y_train[y_train==0]), model.score(x_train[y_train==1], y_train[y_train==1])], index=['overall accuracy', 'accuracy on class 0', 'accuracy on class 1']) def out_of_box_class(x,y): #Unweighted logistic regression unweighted_logistic = LogisticRegression(C=1000) unweighted_logistic.fit(x, y) unweighted_log_scores = score(unweighted_logistic, x, y) print('unweighted log') #Weighted logistic regression weighted_logistic = LogisticRegression(C=1000, class_weight='balanced') weighted_logistic.fit(x, y) weighted_log_scores = score(weighted_logistic, x, y) print('weighted log') #LDA lda = LDA() lda.fit(x, y) lda_scores = score(lda, x, y) print('lda') #QDA qda = QDA() qda.fit(x, y) qda_scores = score(qda, x, y) print('qda') #Decision Tree tree = DecisionTree(max_depth=50, class_weight='balanced', criterion='entropy') tree.fit(x, y) tree_scores = score(tree, x, y) print('tree') score_df = pd.DataFrame({'unweighted logistic': unweighted_log_scores, 'weighted logistic': weighted_log_scores, 'lda': lda_scores, 'qda': qda_scores, 'tree': tree_scores}) return score_df out_of_box_class(x_train,y_train)unweighted log weighted log lda qda tree**From running these accuracy tests, I can see that the data is likely not multivariate normally (MVN) distributed (see LDA/QDA accuracy score on the class 0/1). Thus, LDA/QDA is not a good fit given the fact they require MVN property. Additionally, the decision tree is clearly overfitting. Thus we will choose weighted logistic regression as our classification model. Further justification is that only ~10% of my training data has DELAY==1 response, thus there needs to be weight assigned to the "positive DELAY" samples to ensure better accuracy of our model. Finally, the overall score of the weighted logit regression (still with non-tuned C parameter) is fairly high.**# Fit cross-validated weighted L2 logistic regression clf = LogisticRegressionCV(fit_intercept=True, penalty='l2',class_weight='balanced') clf.fit(x_train, y_train) y_train_hat = clf.predict(x_train) print("Training set prediction accuracy of logistic regression =", accuracy_score(y_train, y_train_hat)) clf.Cs**After tuning the hyperparameter C on my weighted logistic regression, we got C = 10. In logistic regression, as seen in the class, C is the inverse of the regularization parameter $\lambda$.**print(x_test.shape,x_train.shape) clf_test_score = score(clf,x_test,y_test) print("Accuracy on the test set achieved:") print(clf_test_score) auc_logreg_test = metrics.roc_auc_score(y_test, clf.predict_proba(x_test)[:,1]) print("The AUC score on the test set auc_logreg",auc_logreg_test)The AUC score on the test set auc_logreg 0.820862632096**I was able to beat the 60% benchmark AUC accuracy on the test set with the chosen weighted logistic model (C=10).** **Question 5**(5pts) Given your model, comment on the importance of factors as related to whether a flight is delayed.#Let's display some coefficients of our weighted logistic regression clf.coef_[0][0:7] #the order of coefficients is the same as the order of columns in my x_train data set x_train.head()**The importance of each feature on flight delays can be shown from the coefficient values. We have 1322 coefficients for each of 1322 features. Each coefficient should represent how important each feature is in predicting whether the flight will be delayed or not. For example, looking at the first 6 coefficients, we have **| SCHED_DEP |SCHED_ARR|DISTANCE|MONTH_1|MONTH_2|MONTH_3|MONTH_4|---|---|---|| 0.94856692|0.03805954| 0.02356447|0.42227514|0.60139052|0.11210393|-0.21800069**And we can clearly see that the SCHED_DEP time is the most important predictor for the events of delayed flights among the displayed features while SCHED_ARR and DISTANCE are not that important and MONTH_4 is the least important. This makes sense with my visualisation that the percentage of delayed flights is much higher in the evening than in the morning. If the coefficient is negative, that means that the particular predictor is the least important (the least contribution of the beta coefficient in the exponential form) ****We could do a bootstrapping of our train data to get e.g. 100 values for each coefficient to get confidence intervals for our coefficients, but this would be too computationally expensive (we have >1,300 features and doing a LogisticRegressionCV one time takes ~10 minutes and I have only 36 hours (960 minutes) to complete the midterm!!)**#Now let's take a look at which predictors are most important in predicting a delay #and which ones are the least importnat X = x_train.columns Y= clf.coef_[0] Z = [x for _,x in sorted(zip(Y,X))] print("Several features the least important for prediction of delay \n(chose most negative coefficients in the logit regression)") print(Z[0:10]) print("\n") print("Most important features predicting delay events (chose most positive coefficients in the logit regression)") print(Z[-10:])Several features the least important for prediction of delay (chose most negative coefficients in the logit regression) ['DESTINATION_AIRPORT_LFT', 'DESTINATION_AIRPORT_12191', 'ORIGIN_AIRPORT_11697', 'MONTH_9', 'ORIGIN_AIRPORT_TLH', 'DESTINATION_AIRPORT_DLH', 'ORIGIN_AIRPORT_MGM', 'ORIGIN_AIRPORT_15016', 'ORIGIN_AIRPORT_ABI', 'ORIGIN_AIRPORT_11618'] Most important features predicting delay events (chose most positive coefficients in the logit regression) ['DESTINATION_AIRPORT_14842', 'ORIGIN_AIRPORT_10529', 'ORIGIN_AIRPORT_13244', 'ORIGIN_AIRPORT_12441', 'ORIGIN_AIRPORT_12954', 'DESTINATION_AIRPORT_11057', 'DESTINATION_AIRPORT_12173', 'AIRLINE_NK', 'DESTINATION_AIRPORT_LGA', 'DESTINATION_AIRPORT_12478']**As we can see from above, we again confirm our visualisation that month 9 has little importance in predicting delay events (on par with month 10) in accordance with the outcome of the weighted logistic model.** **QUESTION 6**(5pts) Evaluate your model(s) on your test set, and finally provide a visual to show which airlines are predicted to have the most delays using all the data excluding the training and test set. **The model was already evaluated on the test set in the end of questions 4 but I will list it here again.**clf_test_score = score(clf,x_test,y_test) print("Accuracy on the test set achieved:") print("Accuracy", clf_test_score) print("The AUC score on the test set auc_logreg",auc_logreg_test) #Visual showing which airlines are predicted to have the most delays using all data except training and test set p_remaining_df.head() y_test_remain = p_remaining_df.DELAY_OR_NOT x_test_remain = p_remaining_df.drop('DELAY_OR_NOT',axis=1) #now predict on the full remaining data_set y_pred_remain = clf.predict(x_test_remain) score(clf,x_test_remain,y_test_remain) auc_logreg_rem = metrics.roc_auc_score(y_test_remain, clf.predict_proba(x_test_remain)[:,1]) print("The AUC score for the remaining whole data set is",auc_logreg_rem)The AUC score for the remaining whole data set is 0.823005059613**The accuracy of prediction achieved on the remaining full dataset is 0.754205 and AUC is 0.823**print("Predicted delayed",sum(y_pred_remain==1),"; true delayed",sum(y_test_remain)) original_dataframe = new_df.drop(new_df_sample.index) #getting the rest of indices not used in train/test predicted_dataframe = new_df.drop(new_df_sample.index) #getting the rest of indices not used in train/test sns.countplot(x='AIRLINE',hue='DELAY_OR_NOT',data=original_dataframe, palette="Set1"); plt.xlabel("Airline") plt.ylabel("Truth value count of delayed flights") plt.title("Truth values"); #Let's look at the mean delay % for each airline (truth) original_dataframe.groupby("AIRLINE").mean() predicted_dataframe['DELAY_OR_NOT'] = y_pred_remain sns.countplot(x='AIRLINE',hue='DELAY_OR_NOT',data=predicted_dataframe,palette="Set1"); plt.xlabel("Airline") plt.ylabel("Predicted count of delayed flights") plt.title("Prediction"); #Let's look at the mean delay % for each airline (PREDICTION) predicted_dataframe.groupby("AIRLINE").mean() #Now take a look at how many flights we classify FALSE POSITIVE and FALSE NEGATIVE from sklearn.metrics import confusion_matrix from sklearn.metrics import roc_curve print(confusion_matrix(y_test_remain,y_pred_remain)) tn_logreg1, fp_logreg1, fn_logreg1, tp_logreg1 = metrics.confusion_matrix(y_test_remain,y_pred_remain).astype('float').ravel() print("\nThe true positive rate for the weighted logistic regression model:",tp_logreg1/(tp_logreg1 + fn_logreg1))[[478467 154626] [ 18645 53203]] The true positive rate for the weighted logistic regression model: 0.740493820287**Comparing the two plots above (truth and prediction, respectively) we see that our model predicts more delayed flights than there actually are: Predicted delayed 207829 (TP:53203,FP:154626), True delayed total 71,848 out of 700,000 flights in the remaining dataset (see the confusion matrix above). In other words, we have more FPRs than FNRs which makes sense as our data is highly unbalanced towards not delayed flights and our model is balanced in a way to put extra weight on the positive classification.** **E.g. for airlines "NK" and "F9", more flights are predicted to be delayed than on time, than in the truth baseline. However, this is to be expected given our AUC of the classification model is ~80%.****Airlines with most delays: 'NK' has 76%/37% delayed flights (predicted/ground truth) followed by 'F9': 60%/22% and 'B6': 47%/16% . We can see that the percentage of delayed flights predicted is higher than ground truth in all cases, which is consistent with the high false positive ratio I have with my model.****Speculation: if the user of my model preferred to get more false positives/false negatives (depending whether the user is more concerned about learning that a flight is predicted to be delayed when it's on time (FPR) or a flight is predicted to be on time when it is delayed - FNR. I would imagine for e.g., a businessman, being on the side of caution and having more FPRs than FNRs and taking precations to not e.g., miss a business meeting, might be a more useful use-case.** **Question 7**(15pts) Build a regression model that predicts the length of delay (on the log scale) given that a flight is truly delayed.#First, we want to look at lenght of flight GIVEN the flight is delayed - only select observations when a flight is truly delayed new_df_regre = df.copy(deep=True)[df.DELAY_OR_NOT==1] #drop everything as it was, but keep ARRIVAL_TIME as that will be our response variable del new_df_regre['DATE'] del new_df_regre['SCHEDULED_TIME'] del new_df_regre['ELAPSED_TIME'] del new_df_regre['FLIGHT_NUMBER'] del new_df_regre['TAIL_NUMBER'] del new_df_regre['DEPARTURE_DELAY'] del new_df_regre['AIR_SYSTEM_DELAY'] del new_df_regre['AIRLINE_DELAY'] del new_df_regre['SECURITY_DELAY'] del new_df_regre['LATE_AIRCRAFT_DELAY'] del new_df_regre['WEATHER_DELAY'] new_df_regre.head() new_df_regre.shape #Change time to decimal in scheduled dep and arr date_str_dep = list(new_df_regre.SCHED_DEP) date_str_arr = list(new_df_regre.SCHED_ARR) time_tuples_dep = [] time_tuples_arr = [] for i,j in zip(date_str_dep,date_str_arr): time_tuple_dep = time.strptime(i, "%H:%M:%S") time_tuple_arr = time.strptime(j, "%H:%M:%S") time_tuples_dep.append(time_tuple_dep.tm_hour+time_tuple_dep.tm_min/60) time_tuples_arr.append(time_tuple_arr.tm_hour+time_tuple_arr.tm_min/60) new_df_regre['SCHED_DEP'] = np.array(time_tuples_dep) new_df_regre['SCHED_ARR'] = np.array(time_tuples_arr) #Check we have continuos time in decimals (hours) and that we only have DELAYED == True observations new_df_regre.head() #create a log response variable ARRIVAL TIME new_df_regre['ARRIVAL_DELAY']=np.log(new_df_regre['ARRIVAL_DELAY']) #Now, we need to convert our categorical variables cols_to_change =['MONTH','DAY','DAY_OF_WEEK','AIRLINE','ORIGIN_AIRPORT','DESTINATION_AIRPORT'] new_df_dummy_regre = pd.get_dummies(new_df_regre,columns=cols_to_change) #Subsample the data to reduce the # of observations. new_df_sample_regre = new_df_dummy_regre.sample(n=16000,random_state=6) #Split the data in test and training (see Lecture 5) np.random.seed(9001) data_train_regre, data_test_regre = train_test_split(new_df_sample_regre,test_size=0.33, random_state=0) #Check we actually got the right shapes print(new_df_sample_regre.shape,df.shape,data_train_regre.shape,data_test_regre.shape) data_train_regre.head() #now we standardize our continuous variables p_data_train_regre = data_train_regre.copy(deep=True) p_data_train_regre.loc[:,"SCHED_DEP":"DISTANCE"] = (data_train_regre.loc[:,"SCHED_DEP":"DISTANCE"] - data_train_regre.loc[:,"SCHED_DEP":"DISTANCE"].mean())/data_train_regre.loc[:,"SCHED_DEP":"DISTANCE"].std() #test set - use mean and std of the training set p_data_test_regre=data_test_regre.copy(deep=True) p_data_test_regre.loc[:,"SCHED_DEP":"DISTANCE"]=(data_test_regre.loc[:,"SCHED_DEP":"DISTANCE"]-data_train_regre.loc[:,"SCHED_DEP":"DISTANCE"].mean())/data_train_regre.loc[:,"SCHED_DEP":"DISTANCE"].std() #create x-train, y-train x_train_regre = p_data_train_regre.copy(deep=True).drop('ARRIVAL_DELAY',axis=1) y_train_regre = p_data_train_regre.ARRIVAL_DELAY x_test_regre = p_data_test_regre.copy(deep=True).drop('ARRIVAL_DELAY',axis=1) y_test_regre = p_data_test_regre.ARRIVAL_DELAY #We create our regression model by performing regularized linear regression - Lasso. Lasso_pipe = Pipeline([ ('mlr', LassoCV(cv=3) )]) Lasso_pipe.fit(x_train_regre, y_train_regre) from sklearn.metrics import r2_score y_pred_train = Lasso_pipe.predict(x_train_regre) y_pred = Lasso_pipe.predict(x_test_regre) print("R^2 accuracy prediction on the training set") print(r2_score(y_train_regre,y_pred_train)) print("R^2 accuracy prediction on the test set") print(r2_score(y_test_regre,y_pred))R^2 accuracy prediction on the training set 0.0538784085414 R^2 accuracy prediction on the test set 0.0327098717967**I achieved R^2 over 3% on the test set so I am in the clear! I used Lasso regression with cross validation as it allows to regularize features coefficients and also removes not-significant coefficients.** 209 Additional questions(10pts) Engineer two additional features that will help improve the classification model's performance. First engineered feature#Let's take a look to find how some of my features are correlated. x_train.columns[53:67] add = [i for i in range(53,100)] features_to_look_at = [i for i in range(15)] for i in add: features_to_look_at.append(i) a=np.corrcoef(x_train.iloc[:,features_to_look_at].T) plt.figure(figsize=(20,15)) sns.heatmap(a,xticklabels=list(x_train.iloc[:,features_to_look_at].columns), yticklabels=list(x_train.iloc[:,features_to_look_at].columns));**It is hard to tell which features are correlated from the correlation matrix above (we can only display a small subset of the predictors given our predictor space is too large. We get some blank values since certain airports do not have enough flights to calculate the correlation matrix**#We will add the interaction feature between day of the week - Thursday and scheduled departure time. #We saw in the visualisation that Thursday has the largest % of delayed flights #and also that evening-hours flights have a higher percentage of delayed than morning ones #It is computationally not viable to go through all the different interaction terms (even of degree 2), given our large feature space #So we have to think about which combinations might make logical sense. x_train_eng1 = p_data_train.copy(deep=True).drop('DELAY_OR_NOT',axis=1) y_train_eng1 = p_data_train.DELAY_OR_NOT x_test_eng1 = p_data_test.copy(deep=True).drop('DELAY_OR_NOT',axis=1) y_test_eng1 = p_data_test.DELAY_OR_NOT #Creating the interaction featured between Thursday and time of departure x_train_eng1['inter_time_day']= x_train_eng1.DAY_OF_WEEK_4*x_train_eng1.SCHED_DEP x_test_eng1['inter_time_day']= x_test_eng1.DAY_OF_WEEK_4*x_test_eng1.SCHED_DEP # Fit cross-validated weighted 'L2'-penalized logistic regression clf_eng = LogisticRegressionCV(fit_intercept=True, penalty='l2',class_weight='balanced') clf_eng.fit(x_train_eng1, y_train_eng1) y_train_hat = clf_eng.predict(x_train_eng1) print("Training set prediction accuracy of logistic regression =", accuracy_score(y_train_eng1, y_train_hat)) auc_logreg_eng = metrics.roc_auc_score(y_test_eng1, clf_eng.predict_proba(x_test_eng1)[:,1]) print("The AUC score without engineered features on the test set ",auc_logreg_test) print("The AUC score with a engineered feature on the test set",auc_logreg_eng)The AUC score without engineered features on the test set 0.820862632096 The AUC score with a engineered feature on the test set 0.821622046718**We achieved slight improvemenent from AUC of 0.821 to 0.822 on the test set, which may be small, but given the fact we have over 1300 features, this interaction feature still adds a bit of accuracy. I observed most delayed flights happen on Thursdays and also in the evening from the visualisation, thus combining these two features added a bit of classification accuracy makes a logical sense.**#Now test the model with the engineered feature on the entire dataset x_test_remain['inter_time_day'] = x_test_remain.DAY_OF_WEEK_4*x_test_remain.SCHED_DEP #Calculate the AUC score auc_logreg_eng_rem = metrics.roc_auc_score(y_test_remain, clf_eng.predict_proba(x_test_remain)[:,1]) print("AUC on the remaining data without engineered features",auc_logreg_rem) print("AUC on the remaining data WITH a engineered features",auc_logreg_eng_rem)AUC on the remaining data without engineered features 0.823005059613 AUC on the remaining data WITH a engineered features 0.822875047292**Despite achieving a minimal improvement with the first engineered feature on the test set, we actually did slightly worse on the remaining dataset.** Second engineered feature#Let's get rid of the airports which have less than 100 flights in a year # - to reduce the complexity of our model by engineering the ORIGIN_AIRPORT feature new_df_eng = df.copy(deep=True) del new_df_eng['DATE'] del new_df_eng['SCHEDULED_TIME'] del new_df_eng['ELAPSED_TIME'] del new_df_eng['FLIGHT_NUMBER'] del new_df_eng['TAIL_NUMBER'] del new_df_eng['ARRIVAL_DELAY'] del new_df_eng['DEPARTURE_DELAY'] del new_df_eng['AIR_SYSTEM_DELAY'] del new_df_eng['AIRLINE_DELAY'] del new_df_eng['SECURITY_DELAY'] del new_df_eng['LATE_AIRCRAFT_DELAY'] del new_df_eng['WEATHER_DELAY'] #Change time to decimal in scheduled dep and arr date_str_dep = list(new_df_eng.SCHED_DEP) date_str_arr = list(new_df_eng.SCHED_ARR) time_tuples_dep = [] time_tuples_arr = [] for i,j in zip(date_str_dep,date_str_arr): time_tuple_dep = time.strptime(i, "%H:%M:%S") time_tuple_arr = time.strptime(j, "%H:%M:%S") time_tuples_dep.append(time_tuple_dep.tm_hour+time_tuple_dep.tm_min/60) time_tuples_arr.append(time_tuple_arr.tm_hour+time_tuple_arr.tm_min/60) new_df_eng['SCHED_DEP'] = np.array(time_tuples_dep) new_df_eng['SCHED_ARR'] = np.array(time_tuples_arr) #Now, we need to convert our categorical variables cols_to_change =['MONTH','DAY','DAY_OF_WEEK','AIRLINE','ORIGIN_AIRPORT','DESTINATION_AIRPORT'] new_df_dummy = pd.get_dummies(new_df_eng,columns=cols_to_change) #get the airports with only more than 100 flights indices_to_keep = new_df.ORIGIN_AIRPORT.value_counts()[list(new_df.ORIGIN_AIRPORT.value_counts()>100)].index collection = [] for c in new_df_dummy.columns: if "ORIGIN_AIRPORT" in c or "DESTINATION_AIRPORT" in c: tag = c.replace("ORIGIN_AIRPORT_", "").replace("DESTINATION_AIRPORT_","") if not tag in indices_to_keep: collection.append(c) new_df_dummy=new_df_dummy.drop(collection, axis=1) #Subsample the data to reduce the # of observations. new_df_sample = new_df_dummy.sample(n=100000,random_state=6) #Split the data in test and training (see Lecture 5) np.random.seed(9001) data_train_eng, data_test_eng = train_test_split(new_df_sample,test_size=0.33, random_state=0) #Check we actually got the right shapes print(new_df_sample.shape,df.shape,data_train_eng.shape,data_test_eng.shape) data_train_eng.head() #now we standardize our continuous variables p_data_train_eng = data_train_eng.copy(deep=True) p_data_train_eng.loc[:,"SCHED_DEP":"DISTANCE"] = (data_train_eng.loc[:,"SCHED_DEP":"DISTANCE"] - data_train_eng.loc[:,"SCHED_DEP":"DISTANCE"].mean())/data_train_eng.loc[:,"SCHED_DEP":"DISTANCE"].std() #test set - use mean and std of the training set p_data_test_eng=data_test_eng.copy(deep=True) p_data_test_eng.loc[:,"SCHED_DEP":"DISTANCE"]=(data_test_eng.loc[:,"SCHED_DEP":"DISTANCE"]-data_train_eng.loc[:,"SCHED_DEP":"DISTANCE"].mean())/data_train_eng.loc[:,"SCHED_DEP":"DISTANCE"].std() p_data_train_eng.head() x_train_eng2 = p_data_train_eng.copy(deep=True).drop('DELAY_OR_NOT',axis=1) y_train_eng2 = p_data_train_eng.DELAY_OR_NOT x_test_eng2 = p_data_test_eng.copy(deep=True).drop('DELAY_OR_NOT',axis=1) y_test_eng2 = p_data_test_eng.DELAY_OR_NOT # Fit cross-validated weighted L2 logistic regression clf_eng2 = LogisticRegressionCV(fit_intercept=True, penalty='l2',class_weight='balanced') clf_eng2.fit(x_train_eng2, y_train_eng2) y_train_hat = clf_eng2.predict(x_train_eng2) print("Training set prediction accuracy of logistic regression =", accuracy_score(y_train_eng2, y_train_hat)) auc_logreg_eng2 = metrics.roc_auc_score(y_test_eng2, clf_eng2.predict_proba(x_test_eng2)[:,1]) print("The AUC score without engineered features on the test set ",auc_logreg_test) print("The AUC score with a engineered feature (number of airports) on the test set",auc_logreg_eng2)The AUC score without engineered features on the test set 0.820862632096 The AUC score with a engineered feature (number of airports) on the test set 0.821096182128**We can see we improved our accuracy by engineering the "ORIGIN_AIRPORTS" feature by a bit, but most importantly we reduced the feature space from 1,300 to about 700, which improves the computational speed significantly.** (5pts) Add one additional feature from a data source not given to you. Do this only after you complete the rest of the exam. **Given lack of time, I will just add the "diverted" occurences feature and only look at January as I won't have enough time to clean and concatenate all of the months. I will add the feature called "DIVERTED" to see whether DIVERTED affected delay flights**#read the dataset I downloaded from https://www.transtats.bts.gov/DL_SelectFields.asp?Table_ID=236&DB_Short_Name=On-Time jan_df = pd.read_csv("Jan_15_flights.csv") jan_df.head() #fill in for 0s, we will only be using ARR_DEL15 as our response which has NaNs, other variables with NaN;s will be dropped. my_df = jan_df.fillna(0) my_df.columns del my_df['ACTUAL_ELAPSED_TIME'] del my_df['DIV_AIRPORT_LANDINGS'] #duplicate to DIVERTED del my_df['Unnamed: 13'] my_df.head() cols_to_change =['MONTH','DAY_OF_MONTH','DAY_OF_WEEK','AIRLINE_ID','ORIGIN','DEST'] #hot-encode new_df_dummy = pd.get_dummies(my_df,columns=cols_to_change) #Subsample the data to reduce the # of observations. new_df_sample = new_df_dummy.sample(n=50000,random_state=6) #Split the data in test and training (see Lecture 5) np.random.seed(9001) data_train, data_test = train_test_split(new_df_sample,test_size=0.33, random_state=0) #Check we actually got the right shapes print(new_df_sample.shape,df.shape,data_train.shape,data_test.shape) data_train.head() #standardize the data p_data_train=data_train.copy(deep=True) p_data_train.iloc[:,[0,1,4]]=(data_train.iloc[:,[0,1,4]]-data_train.iloc[:,[0,1,4]].mean())/data_train.iloc[:,[0,1,4]].std() #test set - use mean and std of the training set p_data_test=data_test.copy(deep=True) p_data_test.iloc[:,[0,1,4]]=(data_test.iloc[:,[0,1,4]]-data_train.iloc[:,[0,1,4]].mean())/data_train.iloc[:,[0,1,4]].std() p_data_train.head() x_train = p_data_train.copy(deep=True).drop('ARR_DEL15',axis=1) y_train = p_data_train.ARR_DEL15 x_test = p_data_test.copy(deep=True).drop('ARR_DEL15',axis=1) y_test = p_data_test.ARR_DEL15 # Fit cross-validated weighted L2 logistic regression clf = LogisticRegressionCV(fit_intercept=True, penalty='l2',class_weight='balanced') clf.fit(x_train, y_train) y_train_hat = clf.predict(x_train) print("Training set prediction accuracy of logistic regression =", accuracy_score(y_train, y_train_hat)) clf_test_score = score(clf,x_test,y_test) print("Accuracy on the test set achieved:") print(clf_test_score) auc_logreg_test = metrics.roc_auc_score(y_test, clf.predict_proba(x_test)[:,1]) print("The AUC score on the test set auc_logreg",auc_logreg_test)The AUC score on the test set auc_logreg 0.711382941648First we'll generate some random images and save them to disk to emulate having a real dataset.n_images = 100 assert n_images % 10 == 0 images = np.random.randint(0, 255, (n_images, 224, 224, 3), dtype=np.uint8) labels = np.repeat(np.arange(n_images // 10), n_images // 10) for i, (img, label) in enumerate(zip(images, labels)): img_path = Path(f"fake-dataset/{label}/{i}.jpg") img_path.parent.mkdir(exist_ok=True, parents=True) with img_path.open("wb") as f: f.write(img_to_jpeg_bytes(img)) !tree fake-datasetfake-dataset ├── 0 │   ├── 0.jpg │   ├── 1.jpg │   ├── 2.jpg │   ├── 3.jpg │   ├── 4.jpg │   ├── 5.jpg │   ├── 6.jpg │   ├── 7.jpg │   ├── 8.jpg │   └── 9.jpg ├── 1 │   ├── 10.jpg │   ├── 11.jpg │   ├── 12.jpg │   ├── 13.jpg │   ├── 14.jpg │   ├── 15.jpg │   ├── 16.jpg │   ├── 17.jpg │   ├── 18.jpg │   └── 19.jpg ├── 2 │   ├── 20.jpg │   ├── 21.jpg │   ├── 22.jpg │   ├── 23.jpg │   ├── 24.jpg │   ├── 25.jpg │   ├── 26.jpg │   ├── 27.jpg │   ├── 28.jpg │   └── 29.jpg ├── 3 │   ├── 30.jpg │   ├── 31.jpg │   ├── 32.jpg │   ├── 33.jpg │   ├── 34.jpg │   ├── 35.jpg │   ├── 36.jpg[00[...]Now we'll gulp the dataset%%bash rm -rf fake-dataset-gulp gulp2_image_folder \ --images_per_chunk 100 \ --num_workers 1 \ --image_size 120 \ --shuffle \ fake-dataset/ \ fake-dataset-gulp/{'--help': False, '--image_size': '120', '--images_per_chunk': '100', '--num_workers': '1', '--shuffle': True, '--version': False, '': 'fake-dataset/', '': 'fake-dataset-gulp/'}Now we have ingested our images into a gulp directory `fake-dataset-gulp`, let's see what's inside!tree fake-dataset-gulpfake-dataset-gulp ├── data_0.gulp ├── label2idx.json └── meta_0.gmeta 0 directories, 3 filesNow we can load data and write a torch dataset classgulp_dir = GulpDirectory('fake-dataset-gulp') class GulpImageDataset: def __init__(self, gulp_dir: GulpDirectory, transform=None): self.gulp_dir = gulp_dir self.transform = transform if transform is not None else lambda x: x self.example_ids = list(gulp_dir.merged_meta_dict.keys()) def __getitem__(self, idx): if isinstance(idx, int): example_id = self.example_ids[idx] else: example_id = idx imgs, meta = self.gulp_dir[example_id] return self.transform(imgs[0]), meta def __len__(self): return len(self.gulp_dir.merged_meta_dict) dataset = GulpImageDataset(gulp_dir) print(len(dataset)) img, meta = dataset[0] img.shape, metaAbout![prjpic](doc/media_main/story.png)This is the main demo of this repo, it is about a concept study on the MIMII dataset to detect anomalies of machines or machine parts like fans, slider, pump and valves by means of classic machine learning and deep learning methods.In runs through the essentials to demonstrate the steps* feature extraction* indvidual model training within the ensemble* ensemble evaluation* summary and scoresFor more in depth information and some insights about the decisions we made, take a look at the subnotebooks and utilities as mentioned in the README. Imports#=============================================== # Basic Imports BASE_FOLDER = './' TARGET_FOLDER_FE = r'\dataset\extdia_v1_essential' # output folder for ffeat. extraction # import the repo-local utility py files %run -i utility\feature_extractor\JupyterLoad_feature_extractor.py %run -i utility\modeling\JupyterLoad_modeling.py # feature extraction diagram %run -i feature_extraction_diagrams\extdia_v1_essential %run -i utility\extractor_batch.py # helper from tqdm.auto import tqdm import glob import gc # sklearn from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA, FastICA feat_ext_folder = os.path.abspath(BASE_FOLDER + TARGET_FOLDER_FE)load feature_extractor_mother load feature_extractor_mel_spectra load feature_extractor_psd load feature_extractor_ICA2 load feature_extractore_pre_nnFilterDenoise load extractor_diagram_mother load Simple_FIR_HP load TimeSliceAppendActivation load load_data Load split_data Load anomaly_detection_models Load pseudo_supervised_models Load tensorflow models Load detection_pipe load # extractor diagram V1 essential load extractor_batchGeneral configTo customize the following notebook execution, set the specifications here:The execution time on a local desktop PC for all four IDs of one machine and one SNR is circa 1 hour. We recommend only executing a set of 4 variations at a time (for instance over one machine).#=============================================== # Possible variations # # SNRs = ['6dB', '0dB', 'min6dB'] # machines = ['pump', 'valve', 'fan', 'slider'] # IDs = ['00', '02', '04', '06'] SNRs = ['6dB', 'min6dB'] machines = ['pump'] IDs = ['04', '06'] # if we want to include augmented model (SVM) set True aug = True # note: increase n_jobs to max. CPUs you have use all hyperthreading cores (there is no auto detect just now) n_jobs = 7Utility wrapper functionsdef feat_ext_process_set(FileFindDict, main_channel=0, sporadic=False, augment=False,FileCountLimit=None, n_jobs=4): if sporadic: dt = 1 # 1 means time slicing else: dt = 0 if augment: ag = 0 # augment only normal operation else: ag = -2 # not existing class = no augment extractor_batch(base_folder= BASE_FOLDER, target_folder=TARGET_FOLDER_FE, extdia = extdia_v1_essential, FileFindDict = FileFindDict, n_jobs = n_jobs, target_class_map = {'abnormal':1, 'normal': 0}, FileCountLimit = FileCountLimit, datset_folder_from_base = 'dataset', fHP=120, DeviceType=dt, main_channel = main_channel, augment=ag) gc.collect() def find_data_file(SNR, machine, ID): ''' Function to find existing feature data files. If one is found, it returns the path. Multiple files will raise error. ''' # list all the files that match the mask path = glob.glob(BASE_FOLDER + '/dataset/extdia_v1_essential/{}{}{}_EDiaV1HP'.format(machine, SNR, ID) + "*pandaDisc*.pkl", recursive=True) if len(path) == 0: return None elif len(path) == 1: return path[0] else: raise Exception('More than one file found:', path)Feat Extraction Feature extraction diagram![exdia](doc/media_feature_extraction/exdia_v1_essential.png)in order to modify the diagram go the class definition: /feature_extraction_diagrams/extdia_v1_essential.py Note to the main_channelThe main channel is picking one microphone out of the 8, this can be seen as if the demo version is strictly in working monoOr a DOA could be used to find the main direction see : feature_extraction_diagrams/A21_DirectionOfArrival_DOA/pyroomacustic_DOA.ipynbif not find_data_file('6dB', 'pump', '02'): ExampleFileFilter = {'SNR': ['6dB'],'machine': ['pump'],'ID': ['02']} # create some feat_ext_process_set(ExampleFileFilter, main_channel=2, sporadic = False, augment = True, FileCountLimit= None, n_jobs=n_jobs)Spot Check the output# This code reloads pkl files that have been stored # in the step above - notice only created files can be loaded # then a plot is made form n and n+1 output ports # this cell is ment as a spot check before running the batch that might, # take much more time ! d_MEL_den = pickle.load( open( feat_ext_folder + r'\pump6dB02_EDiaV1HPaug0_outpMEL_den.pkl', "rb" )) d_MEL_raw = pickle.load( open( feat_ext_folder + r'\pump6dB02_EDiaV1HPaug0_outpMEL_raw.pkl', "rb" )) d_PSD_raw = pickle.load( open( feat_ext_folder + r'\pump6dB02_EDiaV1HPaug0_outpPSD_raw.pkl', "rb" )) n1=2 n2=7 plt.figure(figsize=(16,9)) plt.subplot(321) feature_extractor_from_dict(d_MEL_raw[n1],BASE_FOLDER).plot(False) plt.subplot(322) feature_extractor_from_dict(d_MEL_raw[n2],BASE_FOLDER).plot(False) plt.subplot(323) feature_extractor_from_dict(d_MEL_den[n1],BASE_FOLDER).plot(False) plt.subplot(324) feature_extractor_from_dict(d_MEL_den[n2],BASE_FOLDER).plot(False) plt.subplot(325) feature_extractor_from_dict(d_PSD_raw[n1],BASE_FOLDER).plot(True) plt.subplot(326) feature_extractor_from_dict(d_PSD_raw[n2],BASE_FOLDER).plot(True) plt.tight_layout()Batch creation of feature data# Create the batch of feature data # note: there is still a deepcopy issue you may experience memory leak : https://github.com/BA-HanseML/NF_Prj_MIMII_Dataset/issues/58 for SNR in SNRs: for machine in machines: # some metaparameters for feature generation according to each machine # main_channel is the channel in direction of the machine (according to MIMII-paper setup) # for sporadic machinery we use the activation time detection if machine == 'valve': main_channel = 0 sporadic = True elif machine == 'pump': main_channel = 2 sporadic = False elif machine == 'fan': main_channel = 4 sporadic = False elif machine == 'slider': main_channel = 6 sporadic = True for ID in IDs: # check if files already exist if not find_data_file(SNR, machine, ID): BatchFileFilter = {'SNR': SNR,'machine': machine,'ID': ID} feat_ext_process_set(BatchFileFilter, main_channel=main_channel, sporadic=sporadic, augment=True, n_jobs=n_jobs)Modeling Ensemble![diagram](doc\media_modeling\ensemble_structure.png)The above diagram shows the final ensemble structure. We have four individual pipes with different sets of feature data, preprocessing steps and models. The class definition below will combine them in an ensemble. This class definition is only used to evaluate the found ensemble structure. As input pre extracted feature data is used.class uni_Ensemble(object): def __init__(self, SNR, machine, ID, aug=True): self.SNR = SNR self.machine = machine self.ID = ID # individual model weights for the blender self.weights = [1.3, 1.0, 0.9] # feature constellation for the individual models feature_constellation = [('MEL_den', {'function':'frame', 'frames':5}), # Autoencoder MEL spectrum ('MEL_den', {'function':'frame', 'frames':5}), # Isolation Forest MEL spectrum ('PSD_raw', {'function':'flat'}), # Isolation Forest Welch method ('PSD_raw', {'function':'flat'})] # Isolation Forest Welch method # augmented is optional if aug: feature_constellation.append( ('PSD_raw', {'function':'flat'})) self.weights.append(0.8) # set up all the model tasks self.tasks = [{ 'path_descr': find_data_file(SNR, machine, ID), 'feat': feature[1], 'feat_col':feature[0], 'SNR':SNR, 'machine':machine, 'ID':ID, 'BASE_FOLDER':BASE_FOLDER } for feature in feature_constellation] # set up all the individual model pipelines as shown in the ensemble diagram self.pipes = [ Pipe(preprocessing_steps=[(PCA, {'n_components':64}),(StandardScaler, {})], modeling_step=(uni_AutoEncoder, {'epochs':50}), pseudo_sup=False), # Autoencoder MEL spectrum Pipe(preprocessing_steps=[(PCA, {'n_components':64}),(StandardScaler, {})], modeling_step=(uni_IsolationForest, {'n_estimators':64, 'max_features':4}), pseudo_sup=False), # Isolation Forest MEL spectrum Pipe(preprocessing_steps=[(StandardScaler, {})], modeling_step=(uni_IsolationForest, {'n_estimators':200, 'max_features':1}), pseudo_sup=False), # Isolation Forest Welch method ] # again augmented is optional if aug: self.pipes.append( Pipe(preprocessing_steps=[(StandardScaler, {})], modeling_step=(uni_svm, {'C': 0.1, 'degree':3,'kernel':'rbf'}), pseudo_sup=True) # SVM augmented Welch method ) def fit(self): # training of all the individual models for pipe, task in zip(self.pipes, self.tasks): # set up the task pipe.task = task # split data into train and testset pipe.split_data() # get the data print('...loading data') data_train, data_test = pipe.get_data() print('data loading completed\n\n...preprocessing data') # preprocessing data_train, data_test = pipe.preprocess(data_train, data_test) print('data preprocessing finished\n\n...fitting the model') # fitting the model pipe.fit_model(data_train) print('model fitted successfully\n\n...fitting the prediction scaler') # fitting the prediction scaler pipe.fit_aggr_score_scaler(data_train, pipe.df_train.path) print('prediction scaler fitted successfully\n\n...evaluating model') # evaluating over ground truth pipe.evaluate(data_test) print('evaluation successfull, roc_auc:', pipe.roc_auc) def evaluate(self): # evaluate the individual models for i, (pipe, weight) in enumerate(zip(self.pipes, self.weights)): # get the data _, data_test = pipe.get_data() # preprocess according to individual pipe data_test = pipe.preprocess_post(data_test) # append up weighted predictions if i == 0: predictions = pipe.predict_aggr_score( data_test, pipe.df_test.path)*weight ground_truth = pipe.median_by_file( pipe.ground_truth, pipe.df_test.path) else: predictions = np.append(predictions, pipe.predict_aggr_score( data_test, pipe.df_test.path)*weight, axis=1) # sum up weighted predictions prediction = np.sum(predictions, axis=1) # evaluate predictions return roc_auc_score(ground_truth, prediction) filepath = 'results.dataframe' columns = ['SNR', 'machine', 'ID', 'augmented', 'roc_auc'] # if the dataframe file exists, load it # if not, create an empty dataframe if os.path.exists(filepath): results = pd.read_pickle(filepath) else: results = pd.DataFrame(data=None, columns=columns) # iterate through all the wanted machines for SNR in SNRs: for machine in machines: for ID in IDs: # check if ensemble result already exists within the dataframe # if not, run training and evaluation mask = (results.SNR==SNR) & (results.machine==machine) & (results.ID==ID) & (results.augmented==aug) if results[mask].shape[0]==0: # instantiate ensemble ensemble = uni_Ensemble(SNR, machine, ID, aug=aug) # fit ensemble ensemble.fit() # evaluate ensemble roc_auc = ensemble.evaluate() # create results row for dataframe and append df_tmp = pd.DataFrame(data=[[ SNR, machine, ID, aug, roc_auc ]], columns=columns) results = results.append(df_tmp) # save to file results.to_pickle(filepath)Summary/ Results The scores can be investigated. The results dataframe is pushed to GitHub with precalculated results. Running this notebook with the config of your interest should reproduce these results.results.groupby(by=['augmented', 'SNR']).roc_auc.mean() resultsBases de datosLas bases de datos nos pueden servir como fuente, como destino de los resultados, o ambas. Vamos a ver cómo leer y escribir datos desde Python.Para estos ejemplos, utilizaremos una base de datos `sqlite` en local.import sqlite3 import pandas as pdConexiónEsto varía dependiendo de la BD a la que te vayas a conectar:* sqlite: es neceario el módulo `sqlite3`, que vamos a utilizar* PostgreSQL: necesitas el módulo `psycopg2`* MySQL: hay varias alternativas, como `pymysql` o `mysqlclient`conn = sqlite3.connect('11_bd_temporal.sqlite')EscrituraPodemos volcar un dataframe a una tabla de la base de datos con [`DataFrame.to_sql`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_sql.html).alquiler = pd.read_csv('dat/alquiler-madrid-distritos.csv') alquiler.head() alquiler.to_sql('alquiler', conn)ConsultasPara lanzar consultas a la base de datos y obtener el resultado en un dataframe, podemos usar [`pd.read_sql()`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_sql.html)query = ''' select distrito, ano, quarter, precio from alquiler where distrito = 'Retiro' and ano between 2012 and 2014 ''' pd.read_sql(query, conn)Parametrizar es importante para evitar inyección de SQL. Esto es especialmente importante si los datos son introducidos por usuarios. [xkcd](http://imgs.xkcd.com/comics/exploits_of_a_mom.png) Carga de resultadosAunque hemos visto que se puede crear y alimentar una tabla en el momento con `to_sql`, lo más habitual es que nuestra base de datos ya tenga las tablas creadas (con su esquema bien definido, índices, claves foráneas, ...) y que queramos añadir datos.Vamos a crear una tabla definiendo su esquema, para ver como podríamos añadir los datos de un dataframe sin crearla de cero en `to_sql`.c = conn.cursor() c.execute('''create table alquiler_2 (distrito text, ano integer, quarter integer, precio real)''')Comprobamos que está vacíapd.read_sql('select * from alquiler_2', conn)Agregamos los datos a la tabla existentealquiler.to_sql('alquiler_2', conn, index=False, if_exists='append') pd.read_sql('select * from alquiler_2 limit 5', conn)Cierre de conexiónconn.close()1장. 케라스 시작하기케라스는 인공지능을 파이썬으로 구현하는 라이브러리입니다. 케라스를 설치하는 방법과 간단한 인공신경망을 구현하는 예제를 다룹니다. 1.3 케라스 사용인공지능으로 숫자를 예측하는 실습을 하면서 파이썬 텍스트 모드와 주피터 노트북 모드에서 케라스 사용법을 익힙시다. 실습 내용은 숫자 예측입니다. 예제 1-1 숫자 5개 중 2개를 학습해 나머지 3개를 예측하는 케라스 예제import keras import numpy x = numpy.array([0, 1, 2, 3, 4]) y = x * 2 + 1 model = keras.models.Sequential() model.add(keras.layers.Dense(1,input_shape=(1,))) model.compile('SGD', 'mse') model.fit(x[:2], y[:2], epochs=1000, verbose=0) print(model.predict(x))[[1.0194799] [2.9879603] [4.956441 ] [6.924921 ] [8.893402 ]]A1: core module notebook Introduction This notebook includes a description of the 'core' python module in the JBEI Quantitative Metabolic Modeling (QMM) library. A description and demonstration of the diffent classes can be found below. Setup First, we need ot set the path and environment variable properly:%matplotlib inline import sys, os pythonPath = "/scratch/david.ando/quantmodel/code/core" if pythonPath not in sys.path: sys.path.append('/scratch/david.ando/quantmodel/code/core') os.environ["QUANTMODELPATH"] = '/scratch/david.ando/quantmodel'Importing the required modules for the demo:from IPython.display import Image import core, FluxModels import osClasses description Metabolite related classes metbolite class The *metabolite* class is used to store all information related to a metabolite. For example the following instantation:ala = core.Metabolite('ala-L', ncarbons=3, source=True, feed='100% 1-C', destination=False, formula='C3H7NO2')creates a metabolite with nbame 'ala-L', 3 carbon atoms, which is the source of labeling, is labeled in the first carbon, is not a destination (measured) metabolite and with a composition formula equal to 'C3H7NO2' the **generateEMU** function creates the corresponding Elementary Metabolite Unit (EMU):ala.generateEMU([2])In this case the EMU contains the first and last carbon in alanine. The input ([2]) specifies which carbons to exclude:ala.generateEMU([2,3])reactant and product classes *Reactant* and *product* are classes derived from metabolite and the only difference is that they represent metabolites in the context of a reaction. Hence, the stoichiometry of the metabolite and the labeling pattern in that reaction are included:R_ala = core.Reactant(ala, 1, 'abc')Notice that the stoichiometry information (1, meaning in the reaction only 1 molecule participates in the reaction) and the labeling data ('abc', one part of the labeling pattern, see below) only make sense in the context of a reaction, so they are not included in the metabolite class.Both classes are derived from metabolites, so they inherit their methods:R_ala.generateEMU([2,3])Reaction related classes reaction class The *reaction* class produces a reaction instance:# Create reactant metabolites coa_c = core.Metabolite('coa_c') nad_c = core.Metabolite('nad_c') pyr_c = core.Metabolite('pyr_c') # Convert into reactants Rcoa_c = core.Reactant(coa_c, 1.0) Rnad_c = core.Reactant(nad_c, 1.0) Rpyr_c = core.Reactant(pyr_c, 1.0) # Create product metabolites accoa_c = core.Metabolite('accoa_c') co2_c = core.Metabolite('co2_c') nadh_c = core.Metabolite('nadh_c') # Convert into products Raccoa_c = core.Product(accoa_c, 1.0) Rco2_c = core.Product(co2_c, 1.0) Rnadh_c = core.Product(nadh_c, 1.0) # Create reaction PDH = core.Reaction('PDH',reactants=[Rcoa_c,Rnad_c,Rpyr_c] , products=[Raccoa_c,Rco2_c,Rnadh_c] ,subsystem='S_GlycolysisGluconeogenesis')Reactions can also initialized from a string:PDH2 = core.Reaction.from_string('PDH : coa_c + nad_c + pyr_c --> accoa_c + co2_c + nadh_c ')The *reaction* class contains some useful functions such as: **stoichLine** to obtain the stoichiometric line for the reaction:print PDH.stoichLine() print PDH2.stoichLine()PDH : coa_c + nad_c + pyr_c --> accoa_c + co2_c + nadh_c PDH : coa_c + nad_c + pyr_c --> accoa_c + co2_c + nadh_c**getReactDict** produces a dictionary of reactants:PDH.getReactDict()**getProdDict** produces a dictionary of products:PDH.getProdDict()Elementary Metabolite Unit (EMU) related classes Elementary Metabolite Units (or EMUs) of a compound are the molecule parts (moieties) comprising any distinct subset of the compound’s atoms (Antoniewicz MR, , Stephanopoulos G: Elementary metabolite units (EMU): a novel framework for modeling isotopic distributions. Metab Eng 2007, 9:68-86.). For example, cit$_{123}$ represents the first 3 carbon atoms in the citrate molecule. EMU class The EMU class provides a class to hold and manipulate EMUs:cit321= core.EMU('cit_3_2_1')The method **findnCarbons** produces the number of carbons in the EMU:print cit321.findnCarbons()3.0The method **getMetName** produces the name of the corresponding metabolite:print cit321.getMetName() str(cit321.getMetName()) == 'cit'The method **getIndices** produces the indices:print cit321.getIndices()[3, 2, 1]**getSortedName** sorts the indices in the EMU name:print cit321.getSortedName()cit_1_2_3**getEmuInSBML** produces the name of the EMU in SBML format:print cit321.getEmuInSBML()cit_c_3_2_1Transitions related classes Transitions contain the information on how carbon (or other) atoms are passed in each reaction. Atom transitions describe, for example, the fate of each carbon in a reaction, whereas EMU transitions describe this information by using EMUs, as described below. AtomTransition class Atom transitions represent the fate of each carbon in a reaction (. (2001) 13C metabolic flux analysis. Metabolic engineering 3: 195-206). For example, in:AKGDH akg --> succoa + co2 abcde : bcde + aakg gets split into succoa and co2, with the first 4 carbons going to succoa and the remaining carbon going to co2.AT = core.AtomTransition('AKGDH akg --> succoa + co2 abcde : bcde + a') print ATAKGDH akg --> succoa + co2 abcde : bcde + aThe method **findEMUtransition** provides for a given input EMU (e.g. succoa_1_2_3_4), which EMU it comes from in the form of a EMU transition:emu1 = core.EMU('co2_1') print AT.findEMUtransition(emu1) emu2 = core.EMU('succoa_1_2_3_4') print AT.findEMUtransition(emu2)['AKGDH, akg_1 --> co2_1'] ['AKGDH, akg_2_3_4_5 --> succoa_1_2_3_4']This is done through the method **findEMUs**, which finds the emus from which the input emanates in the given atom transition:print emu2.name print AT.findEMUs(emu2) for emus in AT.findEMUs(emu2): for emu_ in emus: print emu_.namesuccoa_1_2_3_4 [[]] akg_2_3_4_5which in turn, uses the method **getOriginDictionary** which provides for a given input EMU the originating metabolite and the correspondance in indices:AT.getOriginDictionary(emu2)EMUTransition class Class for EMU transitions that contain information on how different EMUs transform intto each other. For example: TA1_b, TAC3_c_1_2_3 + g3p_c_1_2_3 --> f6p_c_1_2_3_4_5_6 indicating that TAC3_c_1_2_3 and g3p_c_1_2_3 combine to produce f6p_c_1_2_3_4_5_6 in reaction TA1_b (backward reaction of TA1), or: SSALy, (0.5) sucsal_c_4 --> (0.5) succ_c_4 which indicates that the fourth atom of sucsal_c becomes the fourth atom of succ_c. The (0.5) contribution coefficient indicates that reaction SSALy contains a symmetric molecule and two labeling correspondences are equally likely. Hence this transition only contributes half the flux to the final labeling.emuTrans = core.EMUTransition('TA1_b, TAC3_c_1_2_3 + g3p_c_1_2_3 --> f6p_c_1_2_3_4_5_6') print emuTrans str(emuTrans) == 'TA1_b, TAC3_c_1_2_3 + g3p_c_1_2_3 --> f6p_c_1_2_3_4_5_6'Ranged number class The *rangedNumber* class describes floating point numbers for which a confidence interval is available. For example, fluxes obtained through 2S-$^{13}$C MFA are described through the flux that best fits the data and the highest and lowest values that are found to be compatible with labeling data (see equations 16-23 in Garcia Martin *et al* 2015). However, this class has been abstracted out so it can be used with other ranged intervals. Ranged numbers can used as follows:number = core.rangedNumber(0.3,0.6,0.9) # 0.3 lowest, 0.6 best fit, 0.9 highestRanged numbers can be printed:print number[0.3 : 0.6 : 0.9]and added, substracted, multiplied and divided following the standard error propagation rules(https://en.wikipedia.org/wiki/Propagation_of_uncertainty):A = core.rangedNumber(0.3,0.6,0.9) B = core.rangedNumber(0.1,0.15,0.18) print A+B print A-B print 2*A print B/3[0.0333333333333 : 0.05 : 0.06]Flux class The flux class describes fluxes attached to a reaction. For example, if the net flux is described by the ranged number A and the exchange flux by the ranged number B, the corresponding flux would be:netFlux = A exchangeFlux = B flux1 = core.flux(net_exc_tup=(netFlux,exchangeFlux)) print flux1Forward: [0.445861873485 : 0.75 : 1.05149626863] Backward: [0.1 : 0.15 : 0.18] Net: [0.3 : 0.6 : 0.9] Exchange: [0.1 : 0.15 : 0.18]Fluxes can easily multiplied:print 3*flux1Forward: [1.33758562046 : 2.25 : 3.1544888059] Backward: [0.3 : 0.45 : 0.54] Net: [0.9 : 1.8 : 2.7] Exchange: [0.3 : 0.45 : 0.54]Update environment We use [graphviz](https://graphviz.gitlab.io/download/) to draw schemas.- Using conda, this seems to solve the `dot not found` exception.!conda install python-graphvizSolving environment: done # All requested packages already installed.- Update pip!pip install --upgrade pipRequirement already up-to-date: pip in /anaconda/lib/python3.6/site-packages- The python [PROV](https://pypi.python.org/pypi/prov) library is an implementation of the [Provenance Data Model](http://www.w3.org/TR/prov-dm/) by the World Wide Web Consortium. A tutorial can be found [here](http://nbviewer.jupyter.org/github/trungdong/notebooks/blob/master/PROV%20Tutorial.ipynb).!pip install provRequirement already satisfied: prov in /anaconda/lib/python3.6/site-packages Requirement already satisfied: rdflib>=4.2.1 in /anaconda/lib/python3.6/site-packages (from prov) Requirement already satisfied: networkx>=2.0 in /anaconda/lib/python3.6/site-packages (from prov) Requirement already satisfied: six>=1.9.0 in /anaconda/lib/python3.6/site-packages (from prov) Requirement already satisfied: python-dateutil>=2.2 in /anaconda/lib/python3.6/site-packages (from prov) Requirement already satisfied: lxml>=3.3.5 in /anaconda/lib/python3.6/site-packages (from prov) Requirement already satisfied: isodate in /anaconda/lib/python3.6/site-packages (from rdflib>=4.2.1->prov) Requirement already satisfied: pyparsing in /anaconda/lib/python3.6/site-packages (from rdflib>=4.2.1->prov) Requirement already satisfied: decorator>=4.1.0 in /anaconda/lib/python3.6/site-packages (from networkx>=2.0->prov)- A Python interface to GraphViz and the DOT language: [dot](https://pypi.python.org/pypi/pydot).!pip install pydotRequirement already satisfied: pydot in /anaconda/lib/python3.6/site-packages Requirement already satisfied: pyparsing>=2.1.4 in /anaconda/lib/python3.6/site-packages (from pydot)- Just to make sure: [graphviz](https://pypi.python.org/pypi/graphviz) once again!pip install graphvizRequirement already satisfied: graphviz in /anaconda/lib/python3.6/site-packagesParity check matricesIn LDPC error correction codes are represented in terms of their parity check matrix stored in `numpy.ndarray` format. As an example, the parity check matrix for the repetition code can be loaded from the `ldpc.codes` submodule as follows:import numpy as np from ldpc.codes import rep_code n=5 #specifies the lenght of the repetition code H=rep_code(n) #returns the repetition code parity check matrix print(H)[[1 1 0 0 0] [0 1 1 0 0] [0 0 1 1 0] [0 0 0 1 1]]To compute the [n,k,d] code parameters we can use functions from the `ldpc.mod2` and `ldpc.code_util` submodules. Below is an example showing how to calculate the code parameters of the Hamming code:from ldpc.codes import hamming_code #function for generating Hamming codes from ldpc.mod2 import rank #function for calcuting the mod2 rank from ldpc.code_util import compute_code_distance #function for calculting the code distance H=hamming_code(3) print(H) n=H.shape[1] #block length of the code k=n-rank(H) #the dimension of the code computed using the rank-nullity theorem. d=compute_code_distance(H) #computes the code distance print(f"Hamming code parameters: [n={n},k={k},d={d}]")[[0 0 0 1 1 1 1] [0 1 1 0 0 1 1] [1 0 1 0 1 0 1]] Hamming code parameters: [n=7,k=4,d=3]JSC370 Recommendation Systems Assignment , January 2021 This report uses the MovieLens movie ratings dataset collected by a research group at the University of Minnesota in 1997-1998. The dataset consists of 100,000 movie ratings from 943 users on 1,682 movies. The data was cleaned to exclude users who rated fewer than 20 different movies or who did not provide their demographic information. For this assignment we are assuming the role of data scientists in a movie streaming service company. The purpose of this report is to design a recommender system which would be able to recommend movies to a user based on how the user has rated other movies for a movie streaming service. An effective recommender system could contribute to acquiring new customers and to retaining existing ones as it would likely increase customer satisfaction. In light of this, we could evaluate the effectiveness of our recommender system by comparing customer churn rates before and after the system has been implemented. That is, we could compare how long users stay subscribed to the service after first signing up. Furthermore, we could compare the number of minutes users spend watching movies before and after as finding enjoyable movies more easily should contribute to users using the service more. In the long run, as word spreads about the effectiveness of the system, we would also expect more new users to purchase the movie streaming service, but the effect on new users would be very difficult to evaluate due to many possible confounding variables. Imports# import required libraries !pip install wget import os import os.path import matplotlib.pyplot as plt import seaborn as sns import wget import jax.numpy as jnp import numpy.random as npr from jax.api import jit, grad import pandas as pd import numpy as np from scipy.stats import mode # Matplotlib visual settings plt.rcParams['figure.figsize'] = [15,7] plt.style.use("seaborn-muted") MOVIELENS_DIR = "ml-100k" if not os.path.isdir(MOVIELENS_DIR): wget.download("https://github.com/MIE451-1513-2019/course-datasets/raw/master/ml-100k.zip") !unzip ml-100k.zip def getData(folder_path, file_name): fields = ['userID', 'itemID', 'rating', 'timestamp'] data = pd.read_csv(os.path.join(folder_path, file_name), sep='\t', names=fields) return data rating_df = getData(MOVIELENS_DIR, 'u.data') num_users = len(rating_df.userID.unique()) num_items = len(rating_df.itemID.unique()) print("Number of users:", num_users) print("Number of items:", num_items)Number of users: 943 Number of items: 1682Exploratory Data Analysissns.countplot(x=rating_df.rating) plt.xlabel("Rating", fontsize=16) plt.ylabel("Count", fontsize=16) plt.title("Ratings Given by Users", fontsize=24) plt.show() rating_df["rating"].describe()The majority of ratings given by users are greater than 3. This can speak to the tendency of users generally having positive opinions about movies, or that users do not rate movies they have not enjoyed. It might also be due to how different users perceive ratings; some users might consider a 3 star rating to be average, while others might consider it as terrible. The imbalance of ratings may pose a challenge: a simple algorithm that always predicts a rating between 3 and 5 may be quite effective already, and might be hard to beat by our recommender system.movies = rating_df.groupby('itemID').agg({'rating': ['count', np.mean]}) plt.hist(movies[('rating', 'count')], bins=50) plt.xticks(np.arange(0, max(movies[('rating', 'count')])+1, 20)) plt.xlabel("Number of Ratings", fontsize=16) plt.ylabel("Count", fontsize=16) plt.title("Number of Ratings Given to Movies", fontsize=24) plt.show()The majority of movies have received fewer than 40 ratings from users, which may make understanding the latent features of movies a challenge. Still, a sizeable amount of movies have received many ratings.plt.hist(movies[('rating', 'mean')], bins=20) plt.xlabel("Rating", fontsize=16) plt.ylabel("Count", fontsize=16) plt.title("Average Movie Ratings", fontsize=24) plt.show()The above plot corresponds with what we observed when looking at the ratings users generally give: the average movie ratings also tend to be above 3.0, but in this case the ratings imbalance is not as severe as previously. This suggests that a movie that has received many ratings has received both high and low ones.users = rating_df.groupby('userID').agg({'rating': ['count', np.mean, lambda x: x.value_counts().index[0]]}) plt.hist(users[('rating', 'count')], bins=50) plt.xticks(np.arange(0, max(movies[('rating', 'count')])+1, 20)) plt.xlabel("Number of Ratings", fontsize=16) plt.ylabel("Count", fontsize=16) plt.title("Number of Ratings Given by Users", fontsize=24) plt.show()Similarly to movies, most users have given a relatively small amount of ratings, but there exist many users who have given many.plt.hist(users[('rating', 'mean')], bins=20) plt.xlabel("Rating", fontsize=16) plt.ylabel("Count", fontsize=16) plt.title("Average Rating Given per User", fontsize=24) plt.show()The average ratings users give seem to approximately follow a normal distribution centered around 3.5sns.countplot(users[('rating', '')]) plt.title("Mode Rating Given by Users", fontsize=24) plt.xlabel("Rating", fontsize=16) plt.ylabel("Count", fontsize=16) plt.show()/usr/local/lib/python3.6/dist-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation. FutureWarningPer user, the most common rating given is clearly 4, but there are users who most frequently give out 3s and 5s. There exist very few users who most often give movies a 1 or 2. Brainstorming In reality, there are so many different variables that could affect a users rating for a given movie. The interface a user sees for providing ratings can affect the rating itself: exposure to other users' or critics' ratings can affect the user's opinion, while a very inconvenient user interface may mean users only bother to rate movies when they have strong feelings about them. Similar to critics' opinion on a movie, a users rating can also be affected by the box office success of the movie. A users rating for a movie does not only have to depend on what the user thinks of the movie, but it can also be affected by the users opinion of the movie's cast and the directing staff. Even if a user sees a movie just once, the rating they would give for it may change over time - if the movie is one the user saw very long ago, they might have a sense of nostalgia about it and thus appreciate it more. Likewise, as actors are public figures and often get into scandals, which might affect the user's perception of them, and hence possibly the user's opinion of the movies the actors have starred in. These are just some of the uncountably many additional variables that may affect a user's rating. Unfortunately, most of them are near impossible to objectively measure and thus to account for in our models. Still, we could try obtaining some of the data by combining our dataset with professional critics' ratings and information about how well the movies did in cinemas. Of course, any model is likely to see an improvement by having more data to train on, so combining our dataset with, for example, the Netflix Prize competition dataset could yield better results. Model Staircase We propose the following model staircase: **Model One:** - User ratings are independent and identically normally distributed with an average of 3.53 and a standard deviation of 1.13. (Where 3.53 is the mean rating given for all movies). **Model Two:** - User ratings are independent, but are normally distributed with their own average rating. **Model Three:** - User ratings are conditionally dependent on some movie features (average rating, genre, etc), i.e $ p(X | \textbf{M}) \sim N(\mu_i, \sigma_i) $ Where $X$ is the rating given movie features $\textbf{M}$ for a user $i$. **Model Four:**- User ratings depend on some linear combination of latent user and movie features (PMF)**Model Five**- User ratings depend on some non-linear combination of latent user and movie features (PMF extension 1)**Model Six:**- The probabilty of a user giving a movie a certain rating depend of some combination of latent user and movie features (PMF extension 2) Support functions and variablesfrom tqdm.auto import tqdm big_array = rating_df.values big_array[:, :2] = big_array[:, :2]-1 # Fixes off by one error # Make train, validation, and test splits train_mat = big_array[:80000, :] valid_mat = big_array[80000:90000, :] test_mat = big_array[90000:100000, :] train_inputs, train_targets = train_mat[:, :2], train_mat[:, 2].astype(jnp.float32) val_inputs, val_targets = valid_mat[:, :2], valid_mat[:, 2].astype(jnp.float32) test_inputs, test_targets = test_mat[:, :2], test_mat[:, 2].astype(jnp.float32) def plot_training_curves(train_history, val_history): plt.plot(train_history, label="train") plt.plot(val_history, label="validation") plt.legend() plt.xlabel("Iterations") plt.title("Training curves") def rmse(preds, target): return jnp.mean(jnp.sqrt((preds - target)**2))Model 1Model 1 is the most basic model: it assumes that user ratings are independently and identically distributed as a normal distribution with the same mean and variance across all users.mean, global_sd = rating_df["rating"].mean(), rating_df["rating"].std() preds = np.random.normal(mean, global_sd, size=val_inputs.shape[0]) print(f"Model 1 validation RMSE: {rmse(preds, val_targets)}")Model 1 validation RMSE: 1.261194109916687Model 4Model 4 is the vanilla probabalistic matrix factorization solution, where we assume that the ratings users give can be described as a linear combination of matrices representing user and movie latent features.# Model implementation in JAX (https://github.com/google/jax) # Optimization hyperparameters step_size = 100.0 train_iters = 1500 # Model hyperparameters num_factors = 3 init_scale = 0.1 latent_param_regularization = 0.1 # Init parameters user_latents = np.random.randn(num_users, num_factors) * init_scale movie_latents = np.random.randn(num_items, num_factors) * init_scale params_m4 = (user_latents, movie_latents) # Actual model mean_rating = jnp.mean(train_targets) def pmf4_predict(user_latents, movie_latents, inputs): (user_index, movie_index) = (inputs[:, 0], inputs[:, 1]) return jnp.sum(user_latents[user_index] * movie_latents[movie_index], axis=1) + mean_rating def regularization_loss(user_latents, movie_latents): return latent_param_regularization * (jnp.mean(user_latents**2) + jnp.mean(movie_latents**2)) def prediction_mse(params, inputs, targets): preds = pmf4_predict(*params, inputs) return (preds - targets)**2 def training_loss(params, inputs, targets): return regularization_loss(*params) + jnp.mean(prediction_mse(params, inputs, targets), axis=0) # One training step @jit # Pre-compiles the function to speed up training. def sgd_update(params, i): # Stochastic gradient descent (grads_user, grads_movie) = grad(training_loss)(params, train_inputs, train_targets) (user_latents, movie_latents) = params return (user_latents - step_size * grads_user, # one step of gradient descent movie_latents - step_size * grads_movie) # Main training loop # print('i', 'train loss', 'train RMSE', 'test RMSE') train_rmses = [] val_rmses = [] for i in tqdm(range(train_iters)): params_m4 = sgd_update(params_m4, i) if i % 50 == 0: # Print current progress train_rmse = jnp.mean(jnp.sqrt(prediction_mse(params_m4, train_inputs, train_targets))) val_rmse = jnp.mean(jnp.sqrt(prediction_mse(params_m4, val_inputs, val_targets))) train_rmses.append(train_rmse) val_rmses.append(val_rmse) plot_training_curves(train_rmses, val_rmses)Model 5Model 5 is an extension of model 4, where the final rating predictions are obtained by running the linear combination of the latent matrices through a [custom sigmoid activation function](https://www.desmos.com/calculator/szclaip7ua).def activation(x): return 4 / (1 + jnp.exp(-0.8*x)) # Model implementation in JAX (https://github.com/google/jax) # Optimization hyperparameters step_size = 100.0 train_iters = 1500 # Model hyperparameters num_factors = 3 init_scale = 0.1 latent_param_regularization = 0.1 # Init parameters user_latents = np.random.randn(num_users, num_factors) * init_scale movie_latents = np.random.randn(num_items, num_factors) * init_scale params_m5 = (user_latents, movie_latents) # Actual model mean_rating = jnp.mean(train_targets) def pmf5_predict(user_latents, movie_latents, inputs): (user_index, movie_index) = (inputs[:, 0], inputs[:, 1]) return activation( jnp.sum(user_latents[user_index] * movie_latents[movie_index], axis=1) + mean_rating) + 1 def regularization_loss(user_latents, movie_latents): return latent_param_regularization * (jnp.mean(user_latents**2) + jnp.mean(movie_latents**2)) def prediction_mse(params, inputs, targets): preds = pmf5_predict(*params, inputs) return (preds - targets)**2 def training_loss(params, inputs, targets): return regularization_loss(*params) + jnp.mean(prediction_mse(params, inputs, targets), axis=0) # One training step @jit # Pre-compiles the function to speed up training. def sgd_update(params, i): # Stochastic gradient descent (grads_user, grads_movie) = grad(training_loss)(params, train_inputs, train_targets) (user_latents, movie_latents) = params return (user_latents - step_size * grads_user, # one step of gradient descent movie_latents - step_size * grads_movie) train_rmses = [] val_rmses = [] for i in tqdm(range(train_iters)): params_m5 = sgd_update(params_m5, i) if i % 50 == 0: # Print current progress train_rmse = jnp.mean(jnp.sqrt(prediction_mse(params_m5, train_inputs, train_targets))) val_rmse = jnp.mean(jnp.sqrt(prediction_mse(params_m5, val_inputs, val_targets))) train_rmses.append(train_rmse) val_rmses.append(val_rmse) plot_training_curves(train_rmses, val_rmses)Model 6Model 6 is a more complex extension of model 4. We have designed the user latent matrix such that every user has 5 latent vectors. Every such vector corresponds to one rating (1-5) and describes how likely a user is to rate a movie with the corresponding rating. The movie latent vectors describe how much the movies have the features that affect the users' probability of giving specific ratings. Thus, we can use a softmax activation for our final predictions and treat this as a classification problem with 5 classes, i.e. ratings 1-5.def softmax(x, temperature=1): e_x = jnp.exp(temperature*x) return e_x / jnp.sum(e_x, axis=1).reshape(-1, 1) def one_hot_encode(targets): one_hot = np.zeros((targets.shape[0], 5)) one_hot[np.arange(targets.shape[0]), (targets-1).astype(int)] = 1.0 return one_hot # Model implementation in JAX (https://github.com/google/jax) # Optimization hyperparameters step_size = 100.0 train_iters = 1500 # Model hyperparameters num_factors = 2 init_scale = 0.1 latent_param_regularization = 0.1 # Init parameters user_latents = np.random.randn(num_users, num_factors, 5) * init_scale # movie_latents = np.random.randn(num_items, num_factors, 5) * init_scale movie_latents = np.random.randn(num_items, num_factors, 1) * init_scale params_m6 = (user_latents, movie_latents) params_over_time = [] # Actual model mean_rating = jnp.mean(train_targets) def pmf6_predict(user_latents, movie_latents, inputs): (user_index, movie_index) = (inputs[:, 0], inputs[:, 1]) logits = jnp.sum(user_latents[user_index] * movie_latents[movie_index], axis=1) return softmax(logits, temperature=1) def regularization_loss(user_latents, movie_latents): return latent_param_regularization * (jnp.mean(user_latents**2) + jnp.mean(movie_latents**2)) # Cross Entropy for categorical distribution def prediction_ce(params, inputs, targets): preds = pmf6_predict(*params, inputs) return -jnp.sum(targets*jnp.log(preds), axis=1) def training_loss(params, inputs, targets): return regularization_loss(*params) + jnp.mean(prediction_ce(params, inputs, targets), axis=0) # One training step @jit # Pre-compiles the function to speed up training. def sgd_update(params, i): # Stochastic gradient descent (grads_user, grads_movie) = grad(training_loss)(params, train_inputs, one_hot_encode(train_targets)) (user_latents, movie_latents) = params return (user_latents - step_size * grads_user, # one step of gradient descent movie_latents - step_size * grads_movie) # Main training loop train_rmses = [] val_rmses = [] for i in tqdm(range(train_iters)): params_m6 = sgd_update(params_m6, i) if i % 50 == 0: # Print current progress params_over_time.append(params_m6) train_rmse = jnp.mean(prediction_ce(params_m6, train_inputs, one_hot_encode(train_targets))) val_rmse = jnp.mean(prediction_ce(params_m6, val_inputs, one_hot_encode(val_targets))) train_rmses.append(train_rmse) val_rmses.append(val_rmse) plot_training_curves(train_rmses, val_rmses) pmf6_predict(*params_m6, train_inputs)[2]Model EvaluationWe choose two ways to evaluate our models: RMSE and classification accuracy, where we treat each rating as a class. For models 1, 4 and 5, we have rounded the predicted ratings to the nearest integer. Predictions that are greater than 5 are rounded down to 5, and likewise predictions below 1 are rounded up to 1.from sklearn.metrics import accuracy_score, confusion_matrix def round_predictions(raw_predictions): new = np.rint(raw_predictions) new[new > 5] = 5 new[new < 1] = 1 return new def plot_confusion(preds, targets, name): plt.imshow(confusion_matrix(preds, targets, normalize="true"), cmap="Blues") plt.clim(0, 1) plt.colorbar() plt.xlabel("Prediction") plt.ylabel("True value") plt.xticks(range(0, 5), labels=range(1, 6)) plt.yticks(range(0, 5), labels=range(1, 6)) plt.title(name)Model 1Model 1 is very unsuccessful and does a poor job capturing the complexity of the task.new_preds = round_predictions(np.random.normal(mean, global_sd, size=test_inputs.shape[0])) print("Accuracy:", accuracy_score(new_preds, test_targets)) print("RMSE:", rmse(new_preds, test_targets)) plot_confusion(new_preds, test_targets, "Model 1 Confusion Matrix")Model 4Model 4 provides much more promising results. We see a sizeable increase in both classification accuracy and RMSE. The confusion matrix shows that the model does an excellent job predicting ratings on both ends of the range (1 and 5), and generally gives close-to-true or precise predictions for ratings inbetween.new_preds = round_predictions(pmf4_predict(*params_m4, test_inputs)) print("Accuracy:", accuracy_score(new_preds, test_targets)) print("RMSE:", rmse(new_preds, test_targets)) plot_confusion(new_preds, test_targets, "Model 4 Confusion Matrix")Model 5Model 5 is still an improvement over model 1, but we can see it performs worse than its simpler counterpart model 4. Besides its worse accuracy and RMSE scores, we also observe that it frequently makes noisy predictions for ratings that in actuality were equal to 2.new_preds = round_predictions(pmf5_predict(*params_m5, test_inputs)) print("Accuracy:", accuracy_score(new_preds, test_targets)) print("RMSE:", rmse(new_preds, test_targets)) plot_confusion(new_preds, test_targets, "Model 5 Confusion Matrix")Model 6: In terms of raw predictions, it is difficult to say if model 6 is an improvement over model 4: model 6 obtains a better classification accuracy, but a slightly worse RMSE score.However, it appears that with the purpose of making recommendations, model 6 may prove to be more useful. While the differences are subtle, the confusion matrix shows that model 6 accounts for the bias in ratings by predicting ratings in the 3-5 more frequently and with higher accuracy, even if the proportion of true positives for rating 5 has slightly decreased.new_preds = pmf6_predict(*params_m6, test_inputs).argmax(axis=1) + 1 print("Accuracy:", accuracy_score(new_preds, test_targets)) print("RMSE:", rmse(new_preds, test_targets)) plot_confusion(new_preds, test_targets, "Model 6 Confusion Matrix")Latent Variables and their meaningWe can attempt to interpret the latent variables in our matrices.import ipywidgets as widgets from ipywidgets import interact, interact_manual def plot_params(Time): (user_latents, movie_latents) = params_over_time[Time] for i in range(5): plt.scatter(user_latents[:, 0, i], user_latents[:, 1, i], label=f"Rating {i+1}", alpha=0.4) plt.xlim((-3, 3)) plt.ylim((-3, 3)) plt.legend() plt.title(f"User latent paramaters at iter {Time*50}") slider = widgets.IntSlider(min=0, max=len(params_over_time)-1) _ = interact(plot_params, Time=slider)From the interactive graph above, we can see that the latent vectors for each rating start to seperate as training progresses. Generally, we can see that the latent vectors for 1-rated and 5-rated get further apart as iterations increase. This has the real life interpretation that movies that are rated 1 and rated 5 by users are likely to be different from each other. We can also see that there is some form of clustering occuring. This has the real life interpretation that there is a general consensus on the quality of any given movie.plot_params(len(params_over_time)-1) # Star wars: 49 # Princess bride: 172 # Titanic: 312 # Flintstones: 382 # : 900 # : 372 (user_latents, movie_latents) = params_m6 plt.scatter(user_latents[:, 0, 4], user_latents[:, 1, 4], alpha=0.1, s=30, label="User Rating 5") plt.scatter(movie_latents[49, 0], movie_latents[49, 1]) plt.text(movie_latents[49, 0], movie_latents[49, 1]+0.1, "Star wars") plt.scatter(movie_latents[172, 0], movie_latents[172, 1]) plt.text(movie_latents[172, 0], movie_latents[172, 1]+0.1, "") plt.scatter(movie_latents[312, 0], movie_latents[312, 1]) plt.text(movie_latents[312, 0], movie_latents[312, 1]+0.1, "Titanic") plt.scatter(movie_latents[382, 0], movie_latents[382, 1]) plt.text(movie_latents[382, 0], movie_latents[382, 1]+0.1, "The Flintstones") plt.scatter(movie_latents[900, 0], movie_latents[900, 1]) plt.text(movie_latents[900, 0], movie_latents[900, 1]+0.1, "") plt.scatter(movie_latents[372, 0], movie_latents[372, 1]) plt.text(movie_latents[372, 0], movie_latents[372, 1]+0.1, "") plt.xlim((-3, 3)) plt.ylim((-3, 3)) plt.vlines(0, ymax=3, ymin=-3, alpha=0.4) plt.hlines(0, xmax=3, xmin=-3, alpha=0.4) plt.legend() _ = plt.title("6 Movies and 5-Rated user latents")In the graph above, we have plotted the latent vector for 5-rated movies for users. We have highlighted several movies to demonstrate the meaning of the latent variables. We chose 3 movies that were critically acclaimed: The Princess bride, Titanic and Star wars. We can see that they are located near the latent vectors for 5-rated movies (with a cosine distance metric). We also chose 3 movies that were considered terrible: The Flintstones, Mr. Magoo, and (These movies were [chosen from this list](https://screenrant.com/worst-movies-1990s-according-rotten-tomatoes/)). We can see that they are located further from the latent vectors (with a cosine distance metric).plt.scatter(user_latents[:, 0, 0], user_latents[:, 1, 0], alpha=0.1, s=30, label="User Rating 1") plt.scatter(movie_latents[49, 0], movie_latents[49, 1]) plt.text(movie_latents[49, 0], movie_latents[49, 1]+0.1, "Star wars") plt.scatter(movie_latents[172, 0], movie_latents[172, 1]) plt.text(movie_latents[172, 0], movie_latents[172, 1]+0.1, "") plt.scatter(movie_latents[312, 0], movie_latents[312, 1]) plt.text(movie_latents[312, 0], movie_latents[312, 1]+0.1, "Titanic") plt.scatter(movie_latents[382, 0], movie_latents[382, 1]) plt.text(movie_latents[382, 0], movie_latents[382, 1]+0.1, "The Flintstones") plt.scatter(movie_latents[900, 0], movie_latents[900, 1]) plt.text(movie_latents[900, 0], movie_latents[900, 1]+0.1, "") plt.scatter(movie_latents[372, 0], movie_latents[372, 1]) plt.text(movie_latents[372, 0], movie_latents[372, 1]+0.1, "The Flintstones") plt.xlim((-3, 3)) plt.ylim((-3, 3)) plt.vlines(0, ymax=3, ymin=-3, alpha=0.4) plt.hlines(0, xmax=3, xmin=-3, alpha=0.4) plt.legend() _ = plt.title("6 Movies and 1-Rated user latents")CS109B Data Science 2: Advanced Topics in Data Science Homework 5: Autoencoders **Harvard University****Spring 2020****Instructors:** , , #RUN THIS CELL import requests from IPython.core.display import HTML styles = requests.get("https://raw.githubusercontent.com/Harvard-IACS/2018-CS109A/master/content/styles/cs109.css").text HTML(styles) #RUN THIS CELL import os import pathlib working_dir = pathlib.Path().absolute() os.chdir(working_dir)INSTRUCTIONS- To submit your assignment follow the instructions given in Canvas.- This homework can be submitted in pairs.- Please restart the kernel and run the entire notebook again before you submit.import numpy as np import seaborn as sns from matplotlib import pyplot import matplotlib.pylab as plt %matplotlib inline from sklearn.decomposition import PCA from sklearn.metrics import mean_squared_error import tensorflow as tf from tensorflow.keras import Input from tensorflow.keras.models import Sequential, Model from tensorflow.keras.layers import Dense, Dropout, Flatten, Activation, Reshape from tensorflow.keras.layers import Conv2D, MaxPooling2D, UpSampling2D from tensorflow.keras.optimizers import Adam, SGDOverview In this homework, we will investigate autoencoders, how they are related to PCA (and in doing so, show that they can be a more powerful extension of this technique), and one possible application of autoencoders for outlier detection. Question 1: Autoencoders and MNIST [50pts total] For this question, we will be using the [MNIST Dataset](https://en.wikipedia.org/wiki/MNIST_database) of handwritten digits, a simple standardized image dataset. The dataset consists of single-channel (black and white) 28x28 images, containing one digit each. We will see if it is feasible to encode (compress, in this case) the images into just 2 dimensions, a substantial compression ratio considering that the original vector has dimension 28x28=784.**1.1** [1pts] Load MNIST using `tf.keras.datasets.mnist.load_data()`, saving the training data as `x_train` and `y_train`, and the test data as `x_test`, `y_test`. Normalize the images to the range [0.,1.] by dividing by 255.**1.2** [1pts] Use `imshow` to show one image of your choice from the train set, specifying `cmap='gray'` to show the image in black and white.**1.3** [2pts] Construct and instance of `sklearn`'s `PCA` class, specifying that it only use the first 2 PCA components. Fit to `x_train` (Hint: you will need to use `reshape`), and project `x_train` down to its first 2 PCA components, saving the new array of shape (N,2) to `pca_latent_train`. This is the representation of all the images in `x_train` in a 2D latent space.**1.4** [2pts] Make a scatterplot of `pca_latent_train` with the point color designated by the corresponding class labels. Pick a reasonable color palette with enough of a contrast to clearly distinguish classes. **1.5** [8pts] Linear Autoencoder. Construct an encoder-decoder network with **linear activations** only, and **no biases**. The encoder and decoder should consist of one dense layer each, and the bottleneck dimension should be 2. The encoder and decoder should be their own separate models called `linear_encoder` and `linear_decoder`. Create the full linear autoencoder, call it `lae`, out of the encoder and decoder. Use a mean-squared-error reconstruction loss. Print the `summary()` for both the encoder and decoder networks, as well as the summary for the full linear autoencoder.**1.6** [4pts] Train your linear autoencoder `lae` on the train data, using `x_test` as validation data. Use enough epochs such that the training loss plateaus. Plot the train loss and validation (equivalent to test, in this case) loss as a function of epoch, in the same figure.**1.7** [3pts] Compute the `linear_encoder`'s latent space representation of `x_train`, calling the resulting array `lae_latent_train`. Create two scatterplots, side by side, using `subplots`, showing `pca_latent_train` (from **1.4**) and `lae_latent_train`, with points colored according to class label. Don't forget to title the two figures.**1.8** [3pts] What do you notice about the latent space representations in PCA and the LAE (linear autoencoder)? Does either one do a substantially better job at separating the 10 classes in the 2D latent space? --- *Bonus, but for no additional points: prove a relationship between the latent space representation in PCA and LAE for the same bottleneck dimension.***1.9** [3pts] What do you expect to happen if you added more dense layers (no biases) with only linear activations to your `linear_encoder` and `linear_decoder`? Would you expect a better reconstruction error?**1.10** [8pts] Construct a nonlinear (regular) autoencoder with at least 2 dense layers, with biases, in both the encoder and decoder parts. Call the encoder network `encoder` and the decoder network `decoder`, and the full autoencoder `ae`. Print the summaries for `encoder`, `decoder`, and `ae`.**1.11** [4pts] Train your autoencoder on `x_train`, using `x_test` as validation data. Train it for a reasonable number of epochs, using your best judgement on what that entails. As usual, plot the train loss and validation loss as a function of epoch.**1.12** [3pts] Compute the `encoder`'s latent space representation of `x_train`, calling the resulting array `ae_latent_train`. Plot the scatterplots of `pca_latent_train`, `lae_latent_train`, and `ae_latent_train` in a row using `subplots` so we can see them all simultaneously. What do you notice?**1.13** [6pts] Comparing reconstructed images. You will create arrays containing the reconstructed `x_test` using PCA, the linear autoencoder, and the regular autoencoder. For PCA, be sure to use the `pca` object you created and fit in **1.3** on the *train* data. You will project `x_test` onto its 2D latent space representation, and then convert it back, saving the result as `pca_recons_x_test`. For the linear autoencoder and the regular autoencoder, save the reconstructed `x_test` as `lae_recons_x_test` and `ae_recons_x_test` respectively. Now, you will create a 6 row by 4 column collection of subplots. Each row will correspond to an element of the test set (of your choice), with the columns being the PCA reconstruction, the LAE reconstruction, the AE reconstruction, and the original image. Be sure to title the subplots with 'PCA', 'LAE', 'AE', 'Original'. **1.14** [2pts] Finally, using `sklearn.metrics`'s `mean_squared_error`, report the average reconstruction error across the entire test set for PCA, LAE, and AE. Does the ordering agree with what you've seen in the previous questions? Does it support your conclusion in **1.8**? Answers **1.1** [1pts] Load MNIST using `tf.keras.datasets.mnist.load_data()`, saving the training data as `x_train` and `y_train`, and the test data as `x_test`, `y_test`. Normalize the images to the range [0.,1.] by dividing by 255.# your code here**1.2** [1pts] Use `imshow` to show one image of your choice from the train set, specifying `cmap='gray'` to show the image in black and white.# your code here**1.3** [2pts] Construct and instance of `sklearn`'s `PCA` class, specifying that it only use the first 2 PCA components. Fit to `x_train` (Hint: you will need to use `reshape`), and project `x_train` down to its first 2 PCA components, saving the new array of shape (N,2) to `pca_latent_train`. This is the representation of all the images in `x_train` in a 2D latent space.# your code here**1.4** [2pts] Make a scatterplot of `pca_latent_train` with the point color designated by the corresponding class labels. Pick a reasonable color palette with enough of a contrast to clearly distinguish classes.# your code here**1.5** [8pts] Linear Autoencoder. Construct an encoder-decoder network with **linear activations** only, and **no biases**. The encoder and decoder should consist of one dense layer each, and the bottleneck dimension should be 2. The encoder and decoder should be their own separate models called `linear_encoder` and `linear_decoder`. Create the full linear autoencoder, call it `lae`, out of the encoder and decoder. Use a mean-squared-error reconstruction loss. Print the `summary()` for both the encoder and decoder networks, as well as the summary for the full linear autoencoder.# your code here**1.6** [4pts] Train your linear autoencoder `lae` on the train data, using `x_test` as validation data. Use enough epochs such that the training loss plateaus. Plot the train loss and validation (equivalent to test, in this case) loss as a function of epoch, in the same figure.# your code here # your code here**1.7** [3pts] Compute the `linear_encoder`'s latent space representation of `x_train`, calling the resulting array `lae_latent_train`. Create two scatterplots, side by side, using `subplots`, showing `pca_latent_train` (from **1.4**) and `lae_latent_train`, with points colored according to class label. Don't forget to title the two figures.# your code here # your code here**1.8** [3pts] What do you notice about the latent space representations in PCA and the LAE (linear autoencoder)? Does either one do a substantially better job at separating the 10 classes in the 2D latent space? --- *Bonus, but for no additional points: prove a relationship between the latent space representation in PCA and LAE for the same bottleneck dimension.* *Your answer here* **1.9** [3pts] What do you expect to happen if you added more dense layers (no biases) with only linear activations to your `linear_encoder` and `linear_decoder`? Would you expect a better reconstruction error? *Your answer here* **1.10** [8pts] Construct a nonlinear (regular) autoencoder with at least 2 dense layers, with biases, in both the encoder and decoder parts. Call the encoder network `encoder` and the decoder network `decoder`, and the full autoencoder `ae`. Print the summaries for `encoder`, `decoder`, and `ae`.# your code here**1.11** [4pts] Train your autoencoder on `x_train`, using `x_test` as validation data. Train it for a reasonable number of epochs, using your best judgement on what that entails. As usual, plot the train loss and validation loss as a function of epoch.# your code here # your code here**1.12** [3pts] Compute the `encoder`'s latent space representation of `x_train`, calling the resulting array `ae_latent_train`. Plot the scatterplots of `pca_latent_train`, `lae_latent_train`, and `ae_latent_train` in a row using `subplots` so we can see them all simultaneously. What do you notice?# your code here # your code here*Your answer here* **1.13** [6pts] Comparing reconstructed images. You will create arrays containing the reconstructed `x_test` using PCA, the linear autoencoder, and the regular autoencoder. For PCA, be sure to use the `pca` object you created and fit in **1.3** on the *train* data. You will project `x_test` onto its 2D latent space representation, and then convert it back, saving the result as `pca_recons_x_test`. For the linear autoencoder and the regular autoencoder, save the reconstructed `x_test` as `lae_recons_x_test` and `ae_recons_x_test` respectively. Now, you will create a 6 row by 4 column collection of subplots. Each row will correspond to an element of the test set (of your choice), with the columns being the PCA reconstruction, the LAE reconstruction, the AE reconstruction, and the original image. Be sure to title the subplots with 'PCA', 'LAE', 'AE', 'Original'.# your code here # your code here**1.14** [2pts] Finally, using `sklearn.metrics`'s `mean_squared_error`, report the average reconstruction error across the entire test set for PCA, LAE, and AE. Does the ordering agree with what you've seen in the previous questions? Does it support your conclusion in **1.8**?# your code here*Your answer here* Question 2: Convolutional Autoencoders and Outlier Detection [50pts total] For this question, we will be using a modified version of a subset of MNIST. We have hidden some images of handwritten letters in the dataset `data/cs109b-mnist-mix.csv` amongst thousands of handwritten digits. The dataset is provided as a csv, where each row is an image, and each column gives the value of a given pixel in a flattened 28 by 28 image. It would be very tedious to have humans flip through every image to find the letters, so instead we will exploit a neat feature of autoencoders, outlier detection. This method turns a disadvantage of autoencoders, namely, their inability to properly reconstruct data very dissimilar to what they were trained on, into an advantage. You will also be constructing a convolutional autoencoder, which tends to work a lot better for reconstructing images, all while using substantially fewer parameters.**2.1** [2pts] Load and normalize (by dividing by 255) the modified dataset from `data/cs109b-mnist-mix.csv`, and reshape it to (-1, 28, 28), saving the array as `x_cs109b`. Using `imshow` and `cmap='gray'`, plot one image of your choice from this dataset.**2.2** [15pts] Create a convolutional autoencoder called `cae`. This time you don't need separate references to the encoder and decoder parts since we only intend to use the full autoencoder. You may use a combination of `Conv2D`, `MaxPooling2D`, `Flatten`, `Dense`, `Reshape`, and `UpSampling2D` layers. You may use any number of these layers, and if you are unfamiliar with any of these layers we encourage you to look at their TF Keras documentation. You will have to experiment with an appropriate bottleneck size to complete the rest of question **2**. As always, print the `summary()` of your model.**2.3** [10pts] Train your convolutional autoencoder on `x_train` (from **problem 1**, MNIST), using `x_test` as validation data. Plot the train/validation loss versus epoch. This will adapt the convolutional autoencoder to the type of data we expect, handwritten digits.**2.4** [4pts] Pass `x_test` through your convolutional autoencoder (CAE), calling the resulting reconstruction of the dataset `cae_recons_x_test`. To see how well your CAE is performing, we will visually inspect some of its reconstructions. Make an array of subplots of 6 rows and 2 columns, with the rows being different elements of the test set (your choice) and the columns being the CAE reconstruction and the Original image. How good is the reconstruction? How does it compare to the reconstruction of your dense autoencoder from **1.13**?**2.5** [4pts] Construct an instance of `tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE)` and use it to calculate the MSE reconstruction error of every element in `x_test`. Save this array as `mse_x_test`; you want this output to be a 1D array so consider the required shapes of what you feed in to the `MeanSquaredError` object. We will now pass the mystery dataset through the CAE: reconstruct `x_cs109b`, saving the result as `cae_recons_x_cs109b`. Compute the reconstruction errors, saving the result as `mse_x_cs109b`.**2.6** [5pts] Using subplots, plot the histograms of `mse_x_test` and `mse_x_cs109b` side-by-side. For the most part, do they look like they are coming from similar types of data? Pick a reasonable threshold value for reconstruction error based on the histogram for `mse_x_cs109b`; plot this threshold as a vertical line on the histogram. Be sure to explain your choice of threshold. Beyond this threshold, you will examine the images to see if you can find letters. Print how many images lie beyond this threshold.**2.7** [10pts] Use the subset of data determined by your threshold to find the letters in `x_cs109b`, displaying them as images. Show your work! Create a 2-column table indicating the letter (e.g., 'a') and the index where it is located (e.g. '9728'). There are a few letters total. To get full credit you need to find most of them (you should not need to examine more than a few tens of images - if so, consider a different threshold in **2.6** or check your CAE performance).You will have been able to find the majority of the letters hidden in the dataset of thousands of images, while only having to manually look at 1/100th of the dataset. This demonstrates how autoencoders could be used as a cheap preprocessing step to draw out the most "interesting" data. Answers **2.1** [2pts] Load and normalize (by dividing by 255) the modified dataset from `data/cs109b-mnist-mix.csv`, and reshape it to (-1, 28, 28), saving the array as `x_cs109b`. Using `imshow` and `cmap='gray'`, plot one image of your choice from this dataset.# your code here # your code here**2.2** [15pts] Create a convolutional autoencoder called `cae`. This time you don't need separate references to the encoder and decoder parts since we only intend to use the full autoencoder. You may use a combination of `Conv2D`, `MaxPooling2D`, `Flatten`, `Dense`, `Reshape`, and `UpSampling2D` layers. You may use any number of these layers, and if you are unfamiliar with any of these layers we encourage you to look at their TF Keras documentation. You will have to experiment with an appropriate bottleneck size to complete the rest of question **2**. As always, print the `summary()` of your model.# your code here**2.3** [10pts] Train your convolutional autoencoder on `x_train` (from **problem 1**, MNIST), using `x_test` as validation data. Plot the train/validation loss versus epoch. This will adapt the convolutional autoencoder to the type of data we expect, handwritten digits.# your code here # your code here**2.4** [4pts] Pass `x_test` through your convolutional autoencoder (CAE), calling the resulting reconstruction of the dataset `cae_recons_x_test`. To see how well your CAE is performing, we will visually inspect some of its reconstructions. Make an array of subplots of 6 rows and 2 columns, with the rows being different elements of the test set (your choice) and the columns being the CAE reconstruction and the Original image. How good is the reconstruction? How does it compare to the reconstruction of your dense autoencoder from **1.13**?# your code here # your code here*Your answer here* **2.5** [4pts] Construct an instance of `tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE)` and use it to calculate the MSE reconstruction error of every element in `x_test`. Save this array as `mse_x_test`; you want this output to be a 1D array so consider the required shapes of what you feed in to the `MeanSquaredError` object. We will now pass the mystery dataset through the CAE: reconstruct `x_cs109b`, saving the result as `cae_recons_x_cs109b`. Compute the reconstruction errors, saving the result as `mse_x_cs109b`.# your code here # your code here**2.6** [5pts] Using subplots, plot the histograms of `mse_x_test` and `mse_x_cs109b` side-by-side. For the most part, do they look like they are coming from similar types of data? Pick a reasonable threshold value for reconstruction error based on the histogram for `mse_x_cs109b`; plot this threshold as a vertical line on the histogram. Be sure to explain your choice of threshold. Beyond this threshold, you will examine the images to see if you can find letters. Print how many images lie beyond this threshold.# your code here # your code here*Your answer here* **2.7** [10pts] Use the subset of data determined by your threshold to find the letters in `x_cs109b`, displaying them as images. Show your work! Create a 2-column table indicating the letter (e.g., 'a') and the index where it is located (e.g. '9728'). There are a few letters total. To get full credit you need to find most of them (you should not need to examine more than a few tens of images - if so, consider a different threshold in **2.6** or check your CAE performance).You will have been able to find the majority of the letters hidden in the dataset of thousands of images, while only having to manually look at 1/100th of the dataset. This demonstrates how autoencoders could be used as a cheap preprocessing step to draw out the most "interesting" data.# your code here02 - Deployment* [Starlette](https://www.starlette.io/)* Follow the `Render` tutorial [here](https://github.com/render-examples/fastai-v3)* [Link](https://github.com/muellerzr/fastai-Starlette) to a fastai template* **Note**: You do not **need** to deploy on Render to get the code working, we can test locally on our machine! (which we will do today) What will we focus on?* Looking at how to format inputs/outputs for each model type and feeding it in. * Images, Tabular, NLP What code do we change?* `server.py` Images:* Different input types: * URL * File uploadasync def get_bytes(url): async with aiohttp.ClientSession() as session: async with session.get(url) as response: return await response.read()An image upload@app.route('/analyze', methods=['POST']) async def analyze(request): img_data = await request.form() img_bytes = await (img_data['file'].read()) pred = learn.predict(img_bytes)[0] return JSONResponse({ 'results': str(pred) })A URL@app.route('/analyze', methods=['POST']) async def analyze(request): img_bytes = await get_bytes(request.query_params["url"]) pred = learn.predict(img_bytes)[0] return JSONResponse({ 'results' : str(pred) })A zip file (see below on how to upload a `zip` or other fileimport zipfile import csv @app.route('/analyze', methods=['POST']) async def analyze(request): data = await request.form() content = data['content'] zip_ref = zipfile.ZipFile(content, 'r') mkdir('Downloaded_Images') zipref.extractall('Downloaded_Images') zip_ref.close() path = Path('Downloaded_Images') imgs = get_image_files(path) learn = load_learner(path/export_file_name) dl = test_dl(learn.dls, imgs) _, __, preds = learn.get_preds(dl=dl, with_decoded=True) rm -r 'Downloaded_Images' resultsFile = open('results.csv', 'wb') wr = csv.writer(resultsFile) wr.writerows([preds]) return FileResponse('results.csv')Parsing a csv with image urlsimport csv import StringIO @app.route('/analyze', methods=['POST']) async def analyze(request): data = await request.form() content = await (data['file'].read()) s = str(content, 'utf-8') data = StringIO(s) mkdir('Downloaded_Images') download_images('Downloaded_Images', urls=data) path = Path('Downloaded_Images') learn = load_learner(path/export_file_name) imgs = get_image_files(path) dl = test_dl(learn.dls, imgs) _, __, preds = learn.get_preds(dl=dl, with_decoded=True) rm -r 'Downloaded_Images' resultsFile = open('results.csv', 'wb') wr = csv.writer(resultsFile) wr.writerows([preds]) return FileResponse('results.csv')Tabular Tabular is different. Most work will be done by sending large chuncks of data for analysis. Let's recreate what we did, but load it into Pandasimport StringIO import csv @app.route('/analyze', methods=['POST']) async def analyze(request): data = await request.form() content = await (data['file'].read()) s = str(content, 'utf-8') data = StringIO(s) df = pd.read_csv(data) learn = load_learner(path/export_file_name) # if we want to do GPU: # learn.model = learn.model.cuda() dl = learn.dls.train_dl.new(df) _, __, y = learn.get_preds(dl=dl, with_decoded=True) df['Predictions'] = y # if we want to store the results path_res = Path('app/static/') df.to_csv(path_res/'results.csv') return FileResponse('results.csv', media_type='csv')We need to adjust the JavaScript to accept a form:`client.js`:function analyze(){ var uploadFiles = el('file-input').files; if (uploadFiles.length < 1) alert('Please select 1 file to analyze!'); el('analyze-button').innerHTML = 'Analyzing...'; var xhr = new XMLHttpRequest(); var loc = window.location xhr.open('POST', `${loc.protocol}//${loc.hostname}:${loc.port}/analyze`, true); xhr.onerror = function() {alert (xhr.responseText);} xhr.onload = function(e) { if (this.readyState === 4) { el("result-label").innerHTML = `Result = Good`; download('results.csv', 'results.csv'); xhr.send(); } el("analyze-button").innerHTML = "Analyze"; }; var fileData = new FormData(); fileData.append("file", uploadFiles[0]); xhr.send(fileData); } }TextTo write a simple function for text based models:@app.route('/analyze', methods=['POST']) async def analyze(request): data = await request.form() content = data['content'] pred = learn.predict(content)[0] return JSONResponse({'result': pred})Tabular Q Learning on Frozen Lake game Import required libraries''' These libraries are related with OpenAI Gym and Frozen Lake enviornment. ''' import gym from gym.envs.registration import register from gym.envs.toy_text.frozen_lake import LEFT, RIGHT, DOWN, UP ''' These libraries are related with algebraic calculations and plotting. ''' %matplotlib inline import seaborn import matplotlib.pyplot as plt import numpy as np import numpy.linalg as LA import time from tabulate import tabulateInitialize the Froze Lake env Code Parameters * Size of the world: It is a grid world with `4 x 4` grid size.* The discount factor, $\gamma$ has been set to 0.9.* The environment is slippery, i.e., the transition kernel is stochasticregister( id='D4x4-FrozenLake-v0', entry_point='gym.envs.toy_text.frozen_lake:FrozenLakeEnv', kwargs={'map_name': '4x4', 'is_slippery': True}) env = gym.make('D4x4-FrozenLake-v0') gamma=0.9 action_names = {LEFT: 'LEFT', RIGHT: 'RIGHT', DOWN: 'DOWN', UP: 'UP'}The Environment* The environment consists of 16 states. * `env.nS`: count of state variable of data type int.* The agent can take 4 actions. * `env.nA`: count of action variable of data type int.* The transition kernel `P` is a dictionary. * `P[state][action]` is a tuple. * `P[state][action] = [probability, nextstate, reward, terminal]`.* For more information check here [link](https://gym.openai.com/envs/FrozenLake-v0/).''' Print the total no. of states and actions in this env. ''' print('Number of Actions', env.nA) print('Number of States ', env.nS) print('P[5,0]', env.P[5][0])Number of Actions 4 Number of States 16 P[5,0] [(1.0, 5, 0, True)]Generating a Heatmap * Pass the optimal value function and policy to the function `fancy_visual` to obtain a heat map.* This function also prints the value function and policy. Hence you do not have to print it.def fancy_visual(value_func,policy_int): grid = 4 f, ax = plt.subplots(figsize=(11, 9)) cmap = seaborn.diverging_palette(220, 10, as_cmap=True) reshaped=np.reshape(value_func,(grid,grid)) seaborn.heatmap(reshaped, cmap=cmap, vmax=1.1, square=True, xticklabels=grid+1, yticklabels=grid+1, linewidths=.5, cbar_kws={"shrink": .5}, ax=ax, annot=True, fmt="f") counter=0 for j in range(0, 4): for i in range(0, 4): if policy_int[counter]==1: plt.text(i+0.5, j+0.7, u'\u2193', fontsize=12) elif policy_int[counter]==3: plt.text(i+0.5, j+0.7, u'\u2191', fontsize=12) elif policy_int[counter]==0: plt.text(i+0.5, j+0.7, u'\u2190', fontsize=12) else: plt.text(i+0.5, j+0.7, u'\u2192', fontsize=12) counter=counter+1 plt.title('Heatmap of policy iteration with value function values and directions') print('Value Function',value_func) print('Policy',policy_int) plt.show()Part 1 Value IterationWe do Value Iteration to get the optimal Q-values, which will be used to compare with the Q-values that we get from Tabular Q-Learning Algorithm. Define important functionsFunctions have been written based on the pseudo code provided in the Sutton-Barto Chapter 4 and Section 4.4 Value Iteration. All the equations have been taken from that textbook.* `value_function_to_policy`: Function which computes the optimal policy in greedy manner based on the optimal input value function.* `value_function_to_Qvalues`: Function which computes the optimal Q-values based on the optimal input value function.* `value_iteration`: Function for doing the value iteration. * `print_policy`: Helper function to print the optimal policy followed by the agent. * `run_policy`: Function for running our trained policy on the game see how it performs. Definitions* `value_func_sa`: value function for given state and action taken by following the policy.* `max_value_func_sa`: value function for given state and action taken by following the policy such that it is maximized.* `a`: current action* `max_a`: action corresponding to the maximum value function.* `q_values`: Q-values* `delta`: parameter to measure the convergence. Refer the Sutton-Barto textbook.def value_function_to_policy(env, gamma, value_function): """ Output action numbers for each state in value_function. Parameters ---------- env: gym.core.Environment Environment to compute policy for. Must have nS, nA, and P as attributes. gamma: float Discount factor. Number in range [0, 1) value_function: np.ndarray Value of each state. Returns ------- np.ndarray An array of integers. Each integer is the optimal action to take in that state according to the environment dynamics and the given value function. """ policy = np.zeros(env.nS, dtype='int') for s in range(env.nS): max_value_func_sa = -1 max_a = -1 for a in range(env.nA): value_func_sa = 0 for possible_next_state in env.P[s][a]: prob_action = possible_next_state[0] cur_reward = possible_next_state[2] future_reward = gamma * value_function[possible_next_state[1]] value_func_sa += prob_action * (cur_reward + future_reward) if value_func_sa > max_value_func_sa: max_value_func_sa = value_func_sa max_a = a policy[s] = max_a return policy def value_function_to_Qvalues(env, gamma, value_function): """ Output optimal Q value for each state action pair in env. Parameters ---------- env: gym.core.Environment Environment to compute policy for. Must have nS, nA, and P as attributes. gamma: float Discount factor. Number in range [0, 1) value_function: np.ndarray Value of each state. Returns ------- np.ndarray An matirx (2D array) of optimal Q-values. """ q_values = np.zeros((env.nS, env.nA)) for s in range(env.nS): for a in range(env.nA): q_values[s][a] = 0 for possible_next_state in env.P[s][a]: prob_action = possible_next_state[0] cur_reward = possible_next_state[2] future_reward = gamma * value_function[possible_next_state[1]] q_values[s][a] += prob_action * (cur_reward + future_reward) return q_values def value_iteration(env, gamma, max_iterations=int(1e3), tol=1e-10): """ Runs value iteration for a given gamma and environment. Parameters ---------- env: gym.core.Environment The environment to compute value iteration for. Must have nS, nA, and P as attributes. gamma: float Discount factor, must be in range [0, 1) max_iterations: int The maximum number of iterations to run before stopping. tol: float Determines when value function has converged. Returns ------- np.ndarray, iteration The value function and the number of iterations it took to converge. """ value_func_old = np.random.rand(env.nS) value_func_new = np.zeros(env.nS) diff_arr = [] for iteration in range(max_iterations): delta = 0 for s in range(env.nS): max_value_func_sa = -1 for a in range(env.nA): value_func_sa = 0 for possible_next_state in env.P[s][a]: prob_action = possible_next_state[0] cur_reward=possible_next_state[2] future_reward = gamma * value_func_old[possible_next_state[1]] value_func_sa += prob_action * (cur_reward + future_reward) if value_func_sa > max_value_func_sa: max_value_func_sa = value_func_sa diff = np.abs(value_func_old[s] - max_value_func_sa) delta = max(delta, diff) value_func_new[s] = max_value_func_sa if delta <= tol: break sq_diff = LA.norm(value_func_new - value_func_old) diff_arr.append(sq_diff) value_func_old = value_func_new return value_func_new, iteration, diff_arr def print_policy(policy, action_names): """ Print the policy in human-readable format. Parameters ---------- policy: np.ndarray Array of state to action number mappings action_names: dict Mapping of action numbers to characters representing the action. """ str_policy = policy.astype('str') for action_num, action_name in action_names.items(): np.place(str_policy, policy == action_num, action_name) print(str_policy) return str_policy def run_policy(env, gamma, policy): initial_state = env.reset() total_reward = 0 num_steps = 0 current_state = initial_state while True: nextstate, reward, is_terminal, debug_info = env.step(policy[current_state]) total_reward += np.power(gamma, num_steps) * reward num_steps += 1 if is_terminal: break current_state = nextstate return total_reward, num_stepsRun Value Iteration* Start the Value iteration and time it to measure the performance.print("Executing Value Iteration") start_time=time.time() value_function, value_iters, error = value_iteration(env, gamma) print("Total time taken: " + str((time.time()-start_time))) print("Total Value Iteration Steps: "+str(value_iters))Executing Value Iteration Total time taken: 0.08605813980102539 Total Value Iteration Steps: 197Rate of convergence* We convergence performance of the value iteration by measuring following qunatity $\lVert V_{k+1} - V_{k} \rVert_{2}$ and see how it changes with total number of iterations.* Tolerance which is the termination criteria for the code tells us how close should $V_{k+1}$ and $V_{k}$ should be. By default the tolerance value has been set to 1e-10 i.e, we want the values to be very tight, and we achieve this tolerance in less than 200 iterations.plt.figure(figsize=(8, 6)) plt.plot(np.arange(value_iters), error) plt.xlabel(r'Number of iterations $\rightarrow$') plt.ylabel(r'$\|V_{k+1} - V_{k}\|_{2} \rightarrow$') plt.title('Convergence of the value iteration with respect to iterations') plt.show()Optimal Value function and Optimal Q-valuesprint('Optimal value function is:') print(value_function) print('\n') q_val = value_function_to_Qvalues(env, gamma, value_function) print('Optimal Q-values are:') print(q_val)Optimal value function is: [6.88909050e-02 6.14145717e-02 7.44097623e-02 5.58073218e-02 9.18545400e-02 4.19797313e-10 1.12208207e-01 8.46112458e-10 1.45436355e-01 2.47496955e-01 2.99617593e-01 2.87889594e-10 4.62412369e-10 3.79935901e-01 6.39020148e-01 1.90230436e-10] Optimal Q-values are: [[6.88909050e-02 6.66480050e-02 6.66480050e-02 5.97589145e-02] [3.90916432e-02 4.29902003e-02 4.07473003e-02 6.14145717e-02] [7.44097623e-02 6.88290301e-02 7.27275873e-02 5.74894968e-02] [3.90651255e-02 3.90651255e-02 3.34843933e-02 5.58073218e-02] [9.18545400e-02 7.11872686e-02 6.42981781e-02 4.82236336e-02] [3.77817582e-10 3.77817582e-10 3.77817582e-10 3.77817582e-10] [1.12208207e-01 8.98852783e-02 1.12208207e-01 2.23229291e-02] [7.61501212e-10 7.61501212e-10 7.61501212e-10 7.61501212e-10] [7.11872686e-02 1.17879993e-01 1.01805449e-01 1.45436355e-01] [1.57611677e-01 2.47496955e-01 2.03866048e-01 1.33516185e-01] [2.99617593e-01 2.65955131e-01 2.25368507e-01 1.07911549e-01] [2.5910063[...]Optimal Policy* Print the optimal policy which tells the agents what action take based on the current position of the game and optimal value function. * We will aslo plot the output in fancy manner to get more better idea.# Print the policy print("Policy:") policy = value_function_to_policy(env, gamma, value_function) policy_str = print_policy(policy, action_names) # Plot the policy fancy_visual(value_function, policy) # Plot ps=[] for elem in policy_str: ps.append(elem[0]) reshaped_policy=np.reshape(ps,(4,4)) # print(tabulate(reshaped_policy,tablefmt='latex')) print('Above policy can also be expressed in a tabular form as follows:') print(tabulate(reshaped_policy))Above policy can also be expressed in a tabular form as follows: - - - - L U L U L L R L U D L L L R D L - - - -Performance measuretotal_cum_reward=0 maxn = 5 start_time=time.time() for n in range(maxn): cum_reward, nsteps = run_policy(env, gamma, policy) total_cum_reward += cum_reward if n%1 == 0: print("Done " + str(n)) print("Time: " + str((time.time()-start_time)/60)) print("No. of steps for the last run: " + str(nsteps)) print("Performance of the agent over " + str(maxn) + " episodes") print("Average Cumulative Reward: " + str((total_cum_reward/maxn)))Done 0 Done 1 Done 2 Done 3 Done 4 Time: 0.0001902341842651367 No. of steps for the last run: 99 Performance of the agent over 5 episodes Average Cumulative Reward: 0.07826099076725651Part 2 Tabular Q-Learning Hyperparameters* In this part I have implemented the Q-learning algorithm. This code has been written based on the pseudo code given the Sutton-Barto textbook in chapter 6 and section 6.5. This version of the tabular Q-learning is known as the Off-policy TD Control Q-learning.* Below I have defined some hyperparameters based on the algorithm and these required for learning the optimal Q-values.total_episodes = 150000 # Total episodes learning_rate = 0.8 # Learning rate gamma = 0.95 # Discounting rate # Exploration parameters epsilon = 1.0 # Exploration rate max_epsilon = 1.0 # Exploration probability at start min_epsilon = 0.01 # Minimum exploration probability decay_rate = 0.005 # Exponential decay rate for exploration prob # Q-value table qtable = np.zeros((env.nS, env.nA)) # qtable = np.random.rand(env.nS, env.nA) # qtable[15:] = 0 # Book keeping parameters # List of rewards rewards = [] episode_counter = 0 sq_diff_qval = [] # Debug verbose = FalseThe Q-Learning Alogrithm* Here is the main loop for doing the tabular Q-learning. Note that we during learning we decay our learning rate and epsilon greedy parameter after each episode so that satisfy the Robbins–Monro conditions.# For total no. of episodes or until learning is stopped for episode in range(total_episodes): # Reset the environment state = env.reset() is_terminal = False total_rewards = 0 episode_counter += 1 # We break the epsiode and go to next one as soon as we die or reach the goal while not is_terminal: # Choose an action A in the current world state S # First we randomize a number, which help in inducing the # exploration and exploitation effect exp_exp_tradeoff = np.random.uniform(0, 1) # If this number > greater than epsilon # then exploitation (taking the biggest Q value for this state) if exp_exp_tradeoff > epsilon: action = np.argmax(qtable[state,:]) # Else doing a random choice which means exploration else: action = env.action_space.sample() # random action # Take the action A and observe the outcome state S' and reward R new_state, reward, is_terminal, info = env.step(action) # take a step # Update Q(S,A):= Q(S,A) + eta * [R(S,A) + gamma * max{Q(S',A')} - Q(S,A)] # qtable[new_state,:] : all the actions we can take from new state qtable[state, action] = qtable[state, action] + learning_rate * (reward + gamma * np.max(qtable[new_state, :]) - qtable[state, action]) total_rewards += reward # Our new state is state state = new_state # Do some book keeping: store the values of reward and difference between optimal q-value # that we obtained from the dynamic programming and q-values we got from tabular q-learning # Reduce epsilon because we need less and less exploration and # reduce the learning rate epsilon = min_epsilon + (max_epsilon - min_epsilon) * np.exp(-decay_rate * episode) learning_rate = learning_rate * np.exp(-0.00005) rewards.append(total_rewards) sq_diff_qval.append(LA.norm(qtable - q_val)) # Debugging if verbose: if episode_counter%1000 == 0: print('In episode: ', episode_counter, ' and delta: ', LA.norm(qtable-q_val)) print ("Score over time: " + str(sum(rewards)/total_episodes))Score over time: 0.6799Optimal Q-values and Optimal Value functionprint('Optimal Q-values and Optimal value function obtained from Tabular Q-learning algoritm') value_function = np.max(qtable, axis=1) print('Optimal value function is:') print(value_function) print('\n') print('Optimal Q-values are:') print(qtable)Optimal Q-values and Optimal value function obtained from Tabular Q-learning algoritm Optimal value function is: [0.18150054 0.15530499 0.15446939 0.12390759 0.20928477 0. 0.17694596 0. 0.26999698 0.37248143 0.39958396 0. 0. 0.50904335 0.72518004 0. ] Optimal Q-values are: [[0.18150054 0.17191766 0.17251848 0.16328259] [0.09560385 0.09666919 0.10011551 0.15530499] [0.15446939 0.13183677 0.1331532 0.12670612] [0.05071176 0.03421913 0.04270463 0.12390759] [0.20928477 0.15732286 0.14271295 0.12187214] [0. 0. 0. 0. ] [0.12920402 0.11321215 0.17694596 0.04646206] [0. 0. 0. 0. ] [0.16205737 0.20395685 0.18533047 0.26999698] [0.24233804 0.37248143 0.29521572 0.22029204] [0.39958396 0.36394531 0.28458796 0.1820341 ] [0. 0. 0. 0. ] [0. 0. 0. 0. ] [0.2898651 0.38452165 0.50904335 0.35035077] [0.51117287 0.72518004 0.68191074 0.6240[...]Optimal Policy* Print the optimal policy which tells the agents what action take based on the current position of the game and optimal value function. Note that optimal policy can be computed directly from the Q-values as shown below. These values are computed from the Q-values we got from Tabular Q-learning. * We will aslo plot the output in fancy manner to get more better idea.# Compute the policy directly from the Q-values policy = np.argmax(qtable, axis=1) # Print the policy print("Policy:") policy = value_function_to_policy(env, gamma, value_function) policy_str = print_policy(policy, action_names) # Plot the policy fancy_visual(value_function, policy) # Plot ps=[] for elem in policy_str: ps.append(elem[0]) reshaped_policy=np.reshape(ps,(4,4)) print('Above policy can also be expressed in a tabular form as follows:') print(tabulate(reshaped_policy))Above policy can also be expressed in a tabular form as follows: - - - - L U L U L L L L U D L L L R D L - - - -Convergence* Here we observe how our Q-values obtained from Tabular Q-learning algorithm converges to the optimal Q-values obtained through dynamic programming. We see this trend over mutliple episodes.* We also do a moving window average to remove the noisy zig-zag patterns and get a smooth trend for understanding.plt.figure(figsize=(8, 6)) plt.plot(np.arange(episode_counter), sq_diff_qval) plt.xlabel(r'No. of episodes $\rightarrow$') plt.ylabel(r'$\vert\vert Q_{k} - Q^{*}\vert\vert_{2} \rightarrow$') plt.title('Convergence of the Tabular Q-learning to optimal value over episodes') plt.show() def moving_average(a, n): ''' Function for doing moving window average ''' ret = np.cumsum(a, dtype=float) ret[n:] = ret[n:] - ret[:-n] return ret[n-1:]/n # Moving avg window has been set to 10000 sq_diff_avg = moving_average(np.array(sq_diff_qval), 10000) plt.figure(figsize=(8, 6)) plt.plot(np.arange(len(sq_diff_avg)), sq_diff_avg) plt.xlabel(r'No. of episodes $\rightarrow$') plt.ylabel(r'$\vert\vert Q_{k} - Q^{*}\vert\vert_{2} \rightarrow$') plt.title('Moving average convergence of the Tabular Q-learning to optimal value over episodes') plt.show()Cumulative Rewards* We see how did the rewards improved over episodes of learning. Note since output of each output will be 0 or 1 so to get a more proper understanding of the improvement we take cummulative sum of the rewards over episodes and then normalize them with total no. of episodes. * We also moving window average over the rewards and cumulative sum of rewards to see the trend.total_cum_reward = np.cumsum(np.array(rewards)/total_episodes) plt.figure(figsize=(8, 6)) plt.plot(np.arange(total_cum_reward.shape[0]), total_cum_reward) plt.xlabel(r'No. of episodes $\rightarrow$') plt.ylabel(r'Cumulative sum of the rewards $\rightarrow$') plt.title('Improvement of rewards over episodes') plt.show() # Moving avg window has been set to 10000 avg_reward = moving_average(total_cum_reward, 10000) plt.figure(figsize=(8, 6)) plt.plot(np.arange(len(avg_reward)), avg_reward) plt.xlabel(r'No. of episodes $\rightarrow$') plt.ylabel(r'Cumulative sum of the rewards $\rightarrow$') plt.title('Moving average improvement of rewards over episodes') plt.show() # Note: we are only considering rewards not the cumulative sum # Moving avg window has been set to 1000 avg_reward = moving_average(np.array(rewards), 1000) plt.figure(figsize=(8, 6)) plt.plot(np.arange(len(avg_reward)), avg_reward) plt.xlabel(r'No. of episodes $\rightarrow$') plt.ylabel(r'Rewards $\rightarrow$') plt.title('Moving average improvement of rewards over episodes') plt.show()Performance measuretotal_cum_reward=0 maxn = 5 start_time=time.time() for n in range(maxn): cum_reward, nsteps = run_policy(env, gamma, policy) total_cum_reward += cum_reward if n%1 == 0: print("Done " + str(n)) print("Time: " + str((time.time()-start_time)/60)) print("No. of steps for the last run: " + str(nsteps)) print("Performance of the agent over " + str(maxn) + " episodes") print("Average Cumulative Reward: " + str((total_cum_reward/maxn)))Done 0 Done 1 Done 2 Done 3 Done 4 Time: 0.0001345078150431315 No. of steps for the last run: 13 Performance of the agent over 5 episodes Average Cumulative Reward: 0.3108728782061132Notebook Modules%load_ext literary.module import sys from pathlib import Path from ..core.config import find_project_config, load_project_config from .importer import ModuleImporter def load_ipython_extension(ipython): """Load the import hook and setup the global state for the Literary extension. When IPython invokes this function, the determined package root path will be added to `sys.path`. :param ipython: IPython shell instance """ path = find_project_config(Path.cwd()) config = load_project_config(path) # Install the import hook importer = ModuleImporter(config=config) importer.install_hook() importer.update_namespace(ipython.user_ns)Table of Contentsfrom turtle import * def f(length, depth): if depth == 0: forward(length) else: f(length/3, depth-1) right(60) f(length/3, depth-1) right(120) f(length/3, depth-1) left(60) f(length/3, depth-1) # Python code to draw snowflakes fractal. import turtle import random # setup the window with a background color wn = turtle.Screen() wn.bgcolor("cyan") # assign a name to your turtle elsa = turtle.Turtle() elsa.speed(15) # create a list of colours sfcolor = ["white", "blue", "purple", "grey", "magenta"] # create a function to create different size snowflakes def snowflake(size): # move the pen into starting position elsa.penup() elsa.forward(10*size) elsa.left(45) elsa.pendown() elsa.color(random.choice(sfcolor)) # draw branch 8 times to make a snowflake for i in range(8): branch(size) elsa.left(45) # create one branch of the snowflake def branch(size): for i in range(3): for i in range(3): elsa.forward(10.0*size/3) elsa.backward(10.0*size/3) elsa.right(45) elsa.left(90) elsa.backward(10.0*size/3) elsa.left(45) elsa.right(90) elsa.forward(10.0*size) # loop to create 20 different sized snowflakes # with different starting co-ordinates for i in range(20): x = random.randint(-200, 200) y = random.randint(-200, 200) sf_size = random.randint(1, 4) elsa.penup() elsa.goto(x, y) elsa.pendown() snowflake(sf_size) # leave the window open until you click to close wn.exitonclick()**Y**ou **O**nly **L**ook **A**t **C**oefficien**T**s``` ██╗ ██╗ ██████╗ ██╗ █████╗ ██████╗████████╗ ╚██╗ ██╔╝██╔═══██╗██║ ██╔══██╗██╔════╝╚══██╔══╝ ╚████╔╝ ██║ ██║██║ ███████║██║ ██║ ╚██╔╝ ██║ ██║██║ ██╔══██║██║ ██║ ██║ ╚██████╔╝███████╗██║ ██║╚██████╗ ██║ ╚═╝ ╚═════╝ ╚══════╝╚═╝ ╚═╝ ╚═════╝ ╚═╝ ```A simple, fully convolutional model for real-time instance segmentation. This is the code for our papers: - [YOLACT: Real-time Instance Segmentation](https://arxiv.org/abs/1904.02689) - [YOLACT++: Better Real-time Instance Segmentation](https://arxiv.org/abs/1912.06218) YOLACT++ (v1.2) released! ([Changelog](CHANGELOG.md))YOLACT++'s resnet50 model runs at 33.5 fps on a Titan Xp and achieves 34.1 mAP on COCO's `test-dev` (check out our journal paper [here](https://arxiv.org/abs/1912.06218)).[GitHub](https://github.com/dbolya/yolact)> Colab author: [mrm8488](https://twitter.com/mrm8488) Install required packages# Cython needs to be installed before pycocotools !pip install cython !pip install opencv-python pillow pycocotools matplotlibClone the repository!git clone https://github.com/dbolya/yolact.git cd yolact !mkdir weightsDownload the model weights!gdown "https://drive.google.com/uc?id=15id0Qq5eqRbkD-N3ZjDZXdCvRyIaHpFB&export=download" -O "./weights/yolact_plus_base_54_800000.pth" cd external/DCNv2Compile deformable convolutional layers (from DCNv2) to use YOLACT++ version!python setup.py build develop &> /dev/null cd /content/yolact/ !mkdir uploads !mkdir resultsUpload files from local filesystemfrom google.colab import files uploaded = files.upload() pic_names = list(uploaded.keys()) for pic_name in pic_names: print(pic_name) !mv ./$pic_name ./uploads/$pic_nameDetect objects in the uploaded pictures!python eval.py --trained_model=weights/yolact_plus_base_54_800000.pth --score_threshold=0.15 --top_k=15 --images=./uploads:./resultsDisplay resultsfrom IPython.display import display, Image import os for filename in os.listdir("./results/"): print(filename) if filename.endswith(".png"): img = Image("./results/"+filename) display(img)Zip and download the results!zip -r /content/results_images.zip ./results from google.colab import files files.download("/content/results_images.zip")Load Datasetuse_cols = [ 'Pclass', 'Sex', 'Age', 'Fare', 'SibSp', 'Survived' ] data = pd.read_csv('./data/titanic.csv', usecols=use_cols) data.head(3) # Note that we include target variable in the X_train # because we need it to supervise our discretization # this is not the standard way of using train-test-split X_train, X_test, y_train, y_test = train_test_split(data, data.Survived, test_size=0.3, random_state=0) X_train.shape, X_test.shapeEqual width binningdivides the scope of possible values into N bins of the same widthfrom sklearn.preprocessing import KBinsDiscretizer enc_equal_width = KBinsDiscretizer(n_bins=3,encode='ordinal',strategy='uniform').fit(X_train[['Fare']]) # equal width for every bins enc_equal_width.bin_edges_ result = enc_equal_width.transform(X_train[['Fare']]) pd.DataFrame(result)[0].value_counts() # add the new discretized variable X_train_copy = X_train.copy(deep=True) X_train_copy['Fare_equal_width'] = enc_equal_width.transform(X_train[['Fare']]) print(X_train_copy.head(10))Survived Pclass Sex Age SibSp Fare Fare_equal_width 857 1 1 male 51.0 0 26.5500 0.0 52 1 1 female 49.0 1 76.7292 0.0 386 0 3 male 1.0 5 46.9000 0.0 124 0 1 male 54.0 0 77.2875 0.0 578 0 3 female NaN 1 14.4583 0.0 549 1 2 male 8.0 1 36.7500 0.0 118 0 1 male 24.0 0 247.5208 1.0 12 0 3 male 20.0 0 8.0500 0.0 157 0 3 male 30.0 0 8.0500 0.0 127 1 3 male 24.0 0 7.1417 0.0Equal frequency binningdivides the scope of possible values of the variable into N bins, where each bin carries the same amount of observationsenc_equal_freq = KBinsDiscretizer(n_bins=3,encode='ordinal',strategy='quantile').fit(X_train[['Fare']]) # check the bin edges enc_equal_freq.bin_edges_ # equal number of case for every bins result = enc_equal_freq.transform(X_train[['Fare']]) pd.DataFrame(result)[0].value_counts() # add the new discretized variable X_train_copy = X_train.copy(deep=True) X_train_copy['Fare_equal_freq'] = enc_equal_freq.transform(X_train[['Fare']]) print(X_train_copy.head(10))Survived Pclass Sex Age SibSp Fare Fare_equal_freq 857 1 1 male 51.0 0 26.5500 2.0 52 1 1 female 49.0 1 76.7292 2.0 386 0 3 male 1.0 5 46.9000 2.0 124 0 1 male 54.0 0 77.2875 2.0 578 0 3 female NaN 1 14.4583 1.0 549 1 2 male 8.0 1 36.7500 2.0 118 0 1 male 24.0 0 247.5208 2.0 12 0 3 male 20.0 0 8.0500 0.0 157 0 3 male 30.0 0 8.0500 0.0 127 1 3 male 24.0 0 7.1417 0.0K-means binningusing k-means to partition values into clustersenc_kmeans = KBinsDiscretizer(n_bins=3,encode='ordinal',strategy='kmeans').fit(X_train[['Fare']]) # check the bin edges enc_kmeans.bin_edges_ result = enc_kmeans.transform(X_train[['Fare']]) pd.DataFrame(result)[0].value_counts() # add the new discretized variable X_train_copy = X_train.copy(deep=True) X_train_copy['Fare_kmeans'] = enc_kmeans.transform(X_train[['Fare']]) print(X_train_copy.head(10))Survived Pclass Sex Age SibSp Fare Fare_kmeans 857 1 1 male 51.0 0 26.5500 0.0 52 1 1 female 49.0 1 76.7292 0.0 386 0 3 male 1.0 5 46.9000 0.0 124 0 1 male 54.0 0 77.2875 0.0 578 0 3 female NaN 1 14.4583 0.0 549 1 2 male 8.0 1 36.7500 0.0 118 0 1 male 24.0 0 247.5208 1.0 12 0 3 male 20.0 0 8.0500 0.0 157 0 3 male 30.0 0 8.0500 0.0 127 1 3 male 24.0 0 7.1417 0.0Discretisation with Decision Treeusing a decision tree to identify the optimal splitting points that would determine the binsenc1 = dc.DiscretizeByDecisionTree(col='Fare',max_depth=2).fit(X=X_train,y=y_train) enc1.tree_model data1 = enc1.transform(data) # see how the new column Fare_tree_discret is distributed # the values are corresponding to the proba of the prediction by the tree print(data1.head(5)) # the unique value of the discretisized column print(data1.Fare_tree_discret.unique()) # see how the bins are cut # because we use a tree with max-depth of 2, we have at most 2*2=4 bins generated by the tree col='Fare' bins = pd.concat([data1.groupby([col+'_tree_discret'])[col].min(), data1.groupby([col+'_tree_discret'])[col].max()], axis=1) print(bins) # all values between 0 to 7.5208 in the original variable 'Fare' # are given new value 0.107143 in the new column 'Fare_tree_discret' # and so onFare Fare Fare_tree_discret 0.107143 0.0000 7.5208 0.255319 7.5500 10.5167 0.442308 11.1333 73.5000 0.746269 75.2500 512.3292Discretisation with Decision Tree with optimal depth search# search for the best depth from range 2-7 # we see when depth=2 we get the best roc-auc mean enc2 = dc.DiscretizeByDecisionTree(col='Fare',max_depth=[2,3,4,5,6,7]).fit(X=X_train,y=y_train) # using optimal depth=2 we train the model, same result as last one enc2.tree_model data2 = enc2.transform(data) data2.head(5)Discretisation with ChiMergesupervised hierarchical bottom-up (merge) method that locally exploits the chi-square criterion to decide whether two adjacent intervals are similar enough to be mergedenc3 = dc.ChiMerge(col='Fare',num_of_bins=5).fit(X=X_train,y='Survived') # the bins boundary created by ChiMerge enc3.bins data3 = enc3.transform(data) print(data3.head(5)) # all values are grouped into 5 intervals data3.Fare_chimerge.unique()Shallow Learning: Neural Networks 101A basic assumption of many classifications schemes is linear separability (see earlier notes). This assumption will lead to poor classification performance in cases that aren't separable by some hyperplane. A similar problem arises for decision trees and forests when the features don't align well along the coordinate axes of the feature space. The situation becomes quite clear in [this comparison](http://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.htmlsphx-glr-auto-examples-classification-plot-classifier-comparison-py) from the scikit-learn website:![Classifier comparison from scikit-learn](http://scikit-learn.org/stable/_images/sphx_glr_plot_classifier_comparison_001.png)Linear SVM (3rd column) does very well for the linearly separable case (last row), but is really troubled with the Yin-Yang and the inside-outside cases (first and second row). In contrast, trees and forests (6th and 7th columns) present boxy approximations, but generally follow he features quite well. Much smoother approximations can be had from a SVM that uses Radial Basis Functions as kernels (4th column) and neural nets (8th column).The key to both is that they use non-linear transformation of the data, which results in curved decision boundaries. Let's look in detail how neural networks do that. Universal approximatorsIt might be strange to look at networks from the perspective of regression since they are mostly used for classifications. But bear with me.When we want to approximate the mapping $X\rightarrow Y$ as observed with $N$ samples $\lbrace x_i,y_i\rbrace, x_i\in\mathbb{R}^D,y_i\in\mathbb{R}$, we first need to account for the dimensionality difference and then for the unknown behavior of the mapping. The first problem can trivially solved by projecting the samples $x $ onto some basis $w$, the second by allowing any function with scalar arguments. Combined this gives us the regression function $\tilde{y}_i = g(w^\top x_i)$, which already provides quite some flexibility. But we can do *a lot* better by using multiple of those regressors and combining them:$$\tilde{y}_i = \sum_k^K g_k(w_k^\top x_i)$$It is clear that with $K\rightarrow\infty$ an enormous class of mappings can be approximated, which is why this form is called a **universal approximator**. It is limited, however, to mixtures of scalar functions, but that is really the only restriction. The optimization would have to determine the $w_k$ *and* the functions $g_k$. The concept is known as [projection pursuit regression](https://en.wikipedia.org/wiki/Projection_pursuit_regression), and we will not into details.However, if we now replace the unknown function $g_k$ with a known and fixed function $\sigma$, we get a set of intermediate variables$$z_k = \sigma(w_k^\top x)\ \text{with}\ w_k\in\mathbb{R}^D\ \ \forall k\in\lbrace1,\dots,K\rbrace$$This is in fact what the simplest neural network, the single-layer perceptron, does at the beginning. *[For experts: we'll ignore the bias term, which is equivalent to adding a constant 1 as extra feature to the samples. That simply shortens the equations at no loss in generality.]*import numpy as np import matplotlib.pyplot as plt %matplotlib inline from drawNetwork import * drawNN([5,3], labels=["$X$","$Z$"])The function $\sigma$ is called **activation function** and there are several typical ones:x = np.linspace(-5,5,101) plt.plot(x,x,label='linear') plt.plot(x,1/(1+np.exp(-x)), label='sigmoid') plt.plot(x,np.tanh(x), label='tanh') plt.plot(x,np.maximum(0,x), label='relu') plt.legend(loc='lower right')It's worth noting that if $\sigma$ is linear, the model is a fancy way of describing a matrix operation, so we're mostly interested in non-linear functions here. There are *some* guidelines which ones to use, but I won't go into those. Output layersSo far, we've built a single layer with some non-linear transformation and independent weights for each node $z_k$. These are supposed to allow some form of approximation, but of what?This will become clearer when we discuss how we'll complete our simplest network. In the universal approximator above, we simply summed over the transformed variables. That's a fine choice, in particular if the goal of the network is to perform a regression. For more flexibility, we'll allow for weights in that summation:$$t =\beta^\top z \ \text{with}\ \beta\in\mathbb{R}^K$$drawNN([5,3,1], labels=["$X$","$Z$","$T$"])Multivariate regression, i.e. the learning of some mapping $\mathbb{R}^D\rightarrow\mathbb{R}^M$, can be trivially realized with multiple output nodes$$t_m =\beta_m^\top z \ \text{with}\ \beta_m\in\mathbb{R}^K\ \forall m \in \lbrace1,\dots,M\rbrace$$drawNN([5,3,2], labels=["$X$","$Z$","$T$"])But what if we want a classifier? Then, these output simply have to go through another non-linear transformation to ensure that they are bound between 0 and 1 and sum up to 1. One choice is the **softmax** function$$p_m = \frac{\exp(t_m)}{\sum_l \exp(t_l)}$$which is the multivariate form of the logit regression we saw earlier in linear classification. The classifier would then choose the class with the maximum $p_m$.Let's look what happened here: we've transformed the original samples $x_i$ in some non-linear fashion, and then run a classifier on the $z_k$ that assumes linear separability. This is quite similar to the kernel trick in SVMs (finding some non-linear mapping to a higher-dimensional space to help with the separability), but with one major difference. The approximator part of the network does not require us to select a kernel function beforehand; instead we pick an activation function, but the weights are determined *from the data*!> A perceptron classifier is a conventional classifier attached to a non-linear approximator, whose parameters are determined from the data Leaves the important question: how are the weigths determined? Network trainingWe start by defining a cost function $R=\sum_i R_i$. In the multi-variate regression case, we use the quadratic cost function$$R_i = \sum_m \left(y_{im} - t_m(x_i)\right)^2.$$The key to finding the best network weights is to understand that the target values ($t$ or $p$) are **differentiable** functions of the inputs, so we can search for the weights by gradient descent. For the regression case we can write the network equations as$$t_m(x_i) = \beta_m^\top z(x_i) = \beta_m^\top\left[\sigma(w_1^\top x_i), \dots, \sigma(w_K^\top x_i)\right]\ \forall m \in \lbrace 1,\dots,M\rbrace.$$which gives$$\begin{split}&\frac{\partial R_i}{\partial \beta_{ml}}=-2(y_{im}-t_m(x_i))\,z_l(x_i)\equiv \delta_{mi}z_l(x_i)\\&\frac{\partial R_i}{\partial w_{lj}} = \sum_m^M -2(y_{im}-t_m(x_i))\,\beta_{ml}\, \sigma^\prime(w_l^\top x_i)\, x_{ij} \equiv s_{li} x_{ij}\end{split}$$For regression the derivation follows the same lines with the cross-entropy$$R_i = \sum_m y_{im}\log p_m(x_i)$$as cost function and an extra derivative because of the softmax transformation. As usual, the update equations look like$$\begin{split}&\beta_{ml}^{\text{it}+1} = \beta_{ml}^{\text{it}} - \gamma^\text{it} \sum_i\frac{\partial R_i}{\partial \beta_{ml}}\\&w_{lj}^{\text{it}+1} = w_{lj}^{\text{it}}- \gamma^\text{it} \sum_i\frac{\partial R_i}{\partial w_{lj}}\end{split}$$with a **learning rate** $\gamma>0$. We can now compute these gradients in a two-step process. In the *forward pass*, we fix all weights and compute all $t_m(x_i)$. In the *backward pass*, we compute the current errors $\delta_{mi}$ and propagate them back the the previous layer by $s_{li} = \sigma^\prime(w_l^\top x_i) \sum_m^M\beta_{ml}\delta_{mi}$, which follows directly from the definition of $s_{li}$ above. This scheme, for obvious reasons, it called **back-propagation** and it's the fundamental concept of network training (and the reason why Google's TensorFlow is so good with [automatic differentiation](https://github.com/dfm/tf-tutorial)). Photometric redshifts from a simple networkWe will do a (simplistic) test case of galaxy photometry from 4 different spectral templates, redshifted between 0 and 1, and then observed in 4 different optical filters.# load four template spectra import fitsio path = "data/" templates = [1,3,5,7] seds = {} for k in templates: fp = fitsio.FITS(path+'bpz_%d.sed.fits' % k) seds[k] = fp[1].read() fp.close() plt.semilogy(seds[k]['frequency'], seds[k]['spectral_flux'], label='SED %d' % k) plt.legend(loc='lower left') plt.xlabel('f [THz]') plt.ylabel(r'$f_\nu$') # load the filters bands = ['B','V','R','I'] filters = {} for b in bands: fp = fitsio.FITS(path+'johnson%s.fits' % b) filters[b] = fp[1].read() fp.close() plt.semilogy(filters[b]['frequency'], filters[b]['transmission'], label='Johnson %s' % b) plt.legend(loc='lower left') plt.xlabel('f [THz]') plt.ylabel(r'$T_\nu$') def extrap(x, xp, yp): """np.interp function with linear extrapolation""" y = np.interp(x, xp, yp) y = np.where(xxp[-1], yp[-1]+(x-xp[-1])*(yp[-1]-yp[-2])/(xp[-1]-xp[-2]), y) return y # redshift and apply filters zs = np.linspace(0,1,101) photo = np.empty(len(templates)*len(zs), dtype=[('template','int'),('redshift','float'),('flux','4f')]) i=0 for t in templates: nu_t = seds[t]['frequency'] f_t = seds[t]['spectral_flux'] for z in zs: flux = np.empty(len(bands)) for j in range(len(bands)): b = bands[j] nu_b = filters[b]['frequency'] T_b = filters[b]['transmission'] # shift sed frequency by 1/(1+z) and interpolate at nu_b f_t_z = extrap(-nu_b, -nu_t/(1+z), f_t) # nu decreaseing, need to be increasing for interp/extrap # multiply with filter curve f_t_z_b = f_t_z * T_b # integrate as broadband color flux[j] = f_t_z_b.sum() photo[i] = (t,z,flux) i+=1 for t in templates: mask = photo['template'] == t plt.plot(photo[mask]['redshift'],photo[mask]['flux'][:,0]-photo[mask]['flux'][:,1], label='SED %d' % t) plt.legend(loc='upper right') plt.xlabel('z') plt.ylabel('B-V')Note that the "color" axis is not in magnitudes, but in per-band fluxes, so the typically intuition for what colors mean doesn't apply here. The point is that these templates strongly overlap in some part of the "color"-redshift space. They also overlap in the space of multiple colors:def plotColors(data, scatter=False, legend=True, label=False, ax=None): _templates = np.unique(data['template']) if ax is None: fig = plt.figure() ax = fig.add_subplot(111) for t in _templates: mask = data['template'] == t x = data[mask]['flux'][:,1]-data[mask]['flux'][:,3] # V-I y = data[mask]['flux'][:,0]-data[mask]['flux'][:,1] # B-V if scatter: ax.scatter(x, y, label='SED %d' % t, s=3) else: ax.plot(x, y, label='SED %d' % t) if label: ax.text(x[0], y[0], '$z=0$', size=6, ha='center') ax.text(x[-1], y[-1], '$z=1$', size=6, ha='center') if legend: ax.legend(loc='upper right') ax.set_xlabel('V-I') ax.set_ylabel('B-V') plotColors(photo, label=True) # training samples with some noise def createTrainingData(photo, std=0.02, withhold_template=None): data = photo.copy() data['flux'] *= 1+np.random.normal(scale=std ,size=(len(data),len(bands))) if withhold_template is not None: mask = data['template'] == withhold_template data = data[~mask] return data data = createTrainingData(photo) plotColors(data, scatter=True)We'll now create a perceptron classifier with a single hidden layer that is trained classify the given colors too determine the galaxy template, marginalized over the redshift. It's simpler to interpret visually what the network is capable of.from sklearn.neural_network import MLPClassifier, MLPRegressor from sklearn.preprocessing import StandardScaler # perform template type classification, marginalized over redshift def createTemplateTraining(data): training_templates = np.unique(data['template']) N = len(zs)*len(training_templates) X = np.empty((N,2)) Y = np.empty(N, dtype='int') i = 0 for t in training_templates: mask = data['template'] == t X[i:i+mask.sum(),1] = data[mask]['flux'][:,0]-data[mask]['flux'][:,1] X[i:i+mask.sum(),0] = data[mask]['flux'][:,1]-data[mask]['flux'][:,3] Y[i:i+mask.sum()] = t i += mask.sum() return X,Y # perform redshift regression, marginalized over template def createRedshiftTraining(data): training_templates = np.unique(data['template']) N = len(zs)*len(training_templates) X = np.empty((N,2)) Y = np.empty(N) i = 0 for t in training_templates: mask = data['template'] == t X[i:i+mask.sum(),1] = data[mask]['flux'][:,0]-data[mask]['flux'][:,1] X[i:i+mask.sum(),0] = data[mask]['flux'][:,1]-data[mask]['flux'][:,3] Y[i:i+mask.sum()] = data[mask]['redshift'] i += mask.sum() return X,Y # need to define tab10 for matplotlob < 2.1 (where it's not included as colormap...) from matplotlib.colors import ListedColormap _tab10_data = ( (0.12156862745098039, 0.4666666666666667, 0.7058823529411765 ), # 1f77b4 (1.0, 0.4980392156862745, 0.054901960784313725), # ff7f0e (0.17254901960784313, 0.6274509803921569, 0.17254901960784313 ), # 2ca02c (0.8392156862745098, 0.15294117647058825, 0.1568627450980392 ), # d62728 (0.5803921568627451, 0.403921568627451, 0.7411764705882353 ), # 9467bd (0.5490196078431373, 0.33725490196078434, 0.29411764705882354 ), # 8c564b (0.8901960784313725, 0.4666666666666667, 0.7607843137254902 ), # e377c2 (0.4980392156862745, 0.4980392156862745, 0.4980392156862745 ), # 7f7f7f (0.7372549019607844, 0.7411764705882353, 0.13333333333333333 ), # bcbd22 (0.09019607843137255, 0.7450980392156863, 0.8117647058823529), # 17becf ) cmap_tab10 = ListedColormap(_tab10_data, "tab10") def trainNetwork(data, kind='template', scaler=StandardScaler(), n_nodes=15, alpha=1e-5, plot=True): assert kind in ['template', 'redshift'] if kind == "template": X, Y = createTemplateTraining(data) clf = MLPClassifier(solver='lbfgs', alpha=alpha, hidden_layer_sizes=(n_nodes,), random_state=1) else: X, Y = createRedshiftTraining(data) clf = MLPRegressor(solver='lbfgs', alpha=alpha, hidden_layer_sizes=(n_nodes,), random_state=1) X_ = scaler.fit_transform(X) clf.fit(X_, Y) if plot: h=100 upper = np.max(X, axis=0) * 1.05 lower = np.min(X, axis=0) * 1.05 test_pos = np.meshgrid(np.linspace(lower[0],upper[0],h), np.linspace(lower[1],upper[1],h)) test_pos = np.dstack((test_pos[0].flatten(), test_pos[1].flatten()))[0] test_pos = scaler.transform(test_pos) if kind == "template": pred = np.argmax(clf.predict_proba(test_pos), axis=1) cmap = cmap_tab10 else: pred = clf.predict(test_pos) cmap = 'Spectral_r' pred = pred.reshape(h,h) fig = plt.figure() ax = fig.add_subplot(111) if kind == "template": ax.imshow(pred, cmap=cmap, vmin=0, vmax=10, alpha=0.5, extent=(lower[0], upper[0], lower[1], upper[1]), aspect='auto') if kind == "redshift": c = ax.imshow(pred, cmap=cmap, extent=(lower[0], upper[0], lower[1], upper[1]), aspect='auto') cb = fig.colorbar(c) cb.set_label('redshift') plotColors(data, ax=ax, legend=(kind=="template"), scatter=True, label=(kind=="redshift")) ax.set_xlim(lower[0],upper[0]) ax.set_ylim(lower[1],upper[1]) return clf clf = trainNetwork(data, n_nodes=15)With 15 nodes in the hidden layer, we can see that it did a pretty good job drawing boundaries such that most regions are populated only by the samples of a given template. Note in particular that many of these boundaries are curved and some are inside of others. That's the advantage of the non-linear transformations here.We should, however, keep in mind that we've trained a network with quite a number of weights on just 400 noisy samples. We expect that it's not as good when we train it on some noise samples and then test it on another independent set:X, Y = createTemplateTraining(photo) scaler = StandardScaler() scaler.fit(X) print ("score with perfect data: %.3f" % clf.score(X, Y)) data_ = createTrainingData(photo) X_, Y_ = createTemplateTraining(data_) X_ = scaler.transform(X_) print ("score with noisy data: %.3f" % clf.score(X_, Y_))And now for the question: how many nodes do we need:for n in [1,5,15,25]: trainNetwork(data, n_nodes=n)As expected, the extent to which the classifier deviates from linear behavior increases with $n_nodes$. It would also increase with the number of hidden layers wedged in between the input and the output layers. As before with the activation function, the number of nodes and hidden layers are configuration parameters for which there are no rigorous guidelines. Experimentation and cross-validation are key. NN photo-z estimationWe finish this intro with a (silly) regression case. Given a set of colors (two in fact), we ask the network to predict the redshift, ignoring that there are multiple solutions because there are four different spectral templates in the data.trainNetwork(data_, kind='redshift', n_nodes=30)Plotting angular distributions for U-238import sandy import numpy as np import matplotlib.pyplot as plt import seaborn as sns sns.set_style("whitegrid") tape = sandy.get_endf6_file("jeff_33", "xs", 922380) lpc = sandy.Lpc.from_endf6(tape) # legendre polynomial coefficientsAvailable MT numberslpc.data.index.get_level_values("MT").unique()Tabulated energies (linear interpolation is assumed)lpc.data.index.get_level_values("E").unique()Plot for MT=2# keep only coefficients for first 6 polynomials data = lpc.data.stack().rename("VAL").reset_index().query("MT==2 & P<=6") data["P"] = data["P"].astype("category") fig, ax = plt.subplots(figsize=(7, 4), dpi=100) sns.lineplot(ax=ax, data=data, x="E", y="VAL", hue="P") ax.set_xlim([1e1, 2e7]) ax.set_xlabel("neutron energy / $MeV$") ax.set_ylabel("polynomial coefficient") ax.set_xscale("log") fig.tight_layout()Convert polynomial coefficients to tabulated angular distributioncosines = np.linspace(-1, 1, 101) tpd = lpc.filter_by("MT", 2).to_tpd(cosines=cosines) data = tpd.data.stack().rename("VAL").reset_index() fig, ax = plt.subplots(figsize=(7, 4), dpi=100) sns.lineplot(ax=ax, data=data, x="COSINE", y="VAL", hue="E") ax.legend(title="neutron energy / $MeV$", ncol=2) ax.set_xlim([-1, 1]) ax.set_xlabel("scattering cosine") ax.set_ylabel("tabulated angular distribution") ax.set_yscale("log") fig.tight_layout()Plot for MT=51cosines = np.linspace(-1, 1, 101) tpd = lpc.filter_by("MT", 51).to_tpd(cosines=cosines) data = tpd.data.stack().rename("VAL").reset_index() fig, ax = plt.subplots(figsize=(7, 4), dpi=100) sns.lineplot(ax=ax, data=data, x="COSINE", y="VAL", hue="E") ax.legend(title="neutron energy / $MeV$", ncol=2) ax.set_xlim([-1, 1]) ax.set_xlabel("scattering cosine") ax.set_ylabel("tabulated angular distribution") ax.set_yscale("log") fig.tight_layout()Plot for MT=90cosines = np.linspace(-1, 1, 101) tpd = lpc.filter_by("MT", 90).to_tpd(cosines=cosines) data = tpd.data.stack().rename("VAL").reset_index() fig, ax = plt.subplots(figsize=(7, 4), dpi=100) sns.lineplot(ax=ax, data=data, x="COSINE", y="VAL", hue="E") ax.legend(title="neutron energy / $MeV$", ncol=2) ax.set_xlim([-1, 1]) ax.set_xlabel("scattering cosine") ax.set_ylabel("tabulated angular distribution") ax.set_yscale("log") fig.tight_layout()Connected Components The code in this notebook is based on the algorithm described in the following paper:* , , : *Parallel algorithms for finding connected components using linear algebra*. J. Parallel Distributed Comput. 144: 14-27 (2020).A prevoius version of the algorithm is here (which might not be reflected in this code):* Zhang, Azad, Hu. *FastSV: A Distributed-Memory Connected Component Algorithm with Fast Convergence* (SIAM PP20) Create and visualize a Matrix# The input matrix A must be symmetric. Self-edges (diagonal entries) are # OK, and are ignored. The values and type of A are ignored; just its # pattern is accessed. row_col = np.array([ [0, 0, 0, 1, 2, 2, 3, 6, 6, 9, 9], [1, 2, 3, 2, 4, 5, 4, 7, 8, 10, 11], ]) rows, cols = row_col data = np.full_like(rows, fill_value=1) A = coo_matrix((data, (rows, cols)), shape=(12, 12)).tolil() A[cols, rows] = A[rows, cols] # symmetrize matrix A = A.tocoo() # Draw A using spring layout which may even reveal the connected components G = nx.convert_matrix.from_scipy_sparse_matrix(A) layout = nx.drawing.layout.spring_layout(G, k=0.6, scale=1, threshold=1e-10) nx.draw_networkx(G, with_labels=True, node_size=500, font_color='w', pos=layout) rows_da = da.from_array(np.concatenate([rows, cols])) cols_da = da.from_array(np.concatenate([cols, rows])) data_da = da.from_array(np.concatenate([data, data])) A = Matrix.from_values(rows_da, cols_da, data_da, nrows=12, ncols=12, chunks=4) # Size of the sparse matrix is 12x12 with 22 non-zero elements of type INT64 A (A.nrows, A.ncols) A.nvals.compute() A._delayed A.compute() # This is an adjacency matrix # Reading along a row shows the out-nodes of a vertex # Reading along a column shows the in-nodes of a vertex # grblas.io.draw could do with a few more tunable options to improve pretty display gio.draw(A.compute()) def fastSV(A, chunksz='auto'): n = A.nrows I = da.arange(n, chunks=chunksz) # The parent of each vertex is initialized to be the vertex itself: f = Vector.from_values(I, I, name='parents', chunks=chunksz) gp = f.dup() # grandparent of each vertex initialized to parent gp_dup = gp.dup() # duplicate grandparents mngp = f.dup() # minimum grandparent of each star-vertex # boolean flag for each vertex mod = Vector.new(dtype=bool, size=n, name='modified?', chunks=chunksz) # flag to terminate FastSV algorithm change = Scalar.from_value(True, dtype=bool, name='changed?') # set checkpoint for task graph f = f.persist() I = I.persist() gp = gp.persist() while change: # Step 1: Hooking phase mngp << semiring.min_second(A @ gp) f(binary.min)[I] << mngp f << op.min(f | mngp) # Step 2: Shortcutting f << op.min(f | gp) # Step 3: Calculate grandparents _, I = f.to_values() gp << f[I] # set checkpoint for task graph f = f.persist() I = I.persist() gp = gp.persist() # Check termination mod << op.ne(gp_dup & gp) change << mod.reduce(binary.lor) gp_dup << gp return f A = A.persist() connected_components = fastSV(A, chunksz=4) connected_components connected_components._delayed connected_components.compute()*connected_components* gives the label of the component to which each vertex belongs.Compare with the graph drawing to check result:nx.draw_networkx(G, with_labels=True, node_size=500, font_color='w', pos=layout)Get Item Recommendations from Clustersimport numpy as np import pandas as pd from random_gen import * from get_rec import * def get_rec_item(df_rec, top_k, ic_assignment): """Returns the top K item recommendations for each user in the user list. Items are selected randomly from the top recommended item cluster, exhaustively. Left overs are taken from the next highest ranked item clusters in a cascading fashion. Parameters: df_rec (pandas.DataFrame): Table containing the top N item cluster recommendations for each user in the user list ic_assignment (array-like): List containing the cluster assignment of each item top_n (int): Number of items to recommend Returns: df_rec_item (pandas.DataFrame): Table containing the top K item recommendations for each user in the user list """ # Class stuff #df_rec = self.df_rec # recommendations after running get_rec() #ic_assignment = self.item_assignment # item-cluster assignment # Create recommendation table df_rec_item = pd.DataFrame() df_rec_item['user_id'] = df_rec['user_id'] for i in range(top_k): df_rec_item['rank_'+str(i+1)] = np.zeros(df_rec_item.shape[0]) # Get items for j in range(df_rec_item.shape[0]): item_rec = [] rank = 0 while len(item_rec) < top_k: if rank+1 >= df_rec.shape[1]: item_list = list(set(ic_assignment.index)-set(item_rec)) item_rec = item_rec + list(np.random.choice(item_list, size=top_k-len(item_rec), replace=False)) break item_list = ic_assignment.index[np.where(ic_assignment == df_rec.iloc[j, rank+1])[0]] if top_k-len(item_rec) > len(item_list): item_rec = item_rec + list(item_list) rank += 1 else: item_rec = item_rec + list(np.random.choice(item_list, size=top_k-len(item_rec), replace=False)) df_rec_item.iloc[j, 1:] = item_rec # look-up tables #user_id_lookup = self.user_assignment.index #for j in range(df_rec_item.shape[0]): # df_rec_item.iloc[j, 0] = user_id_lookup[df_rec_item.iloc[j, 0].astype('int32')] return df_rec_itemExamplen_user = 100 n_item = 50 sample_size = 10 n_user_cluster = 5 n_item_cluster = 5 top_n = 3 random_seed = 1 user_id_list = list(range(n_user)) user_list = random_user_list(n_user, sample_size, random_seed) uc_assignment = random_user_cluster(n_user, n_user_cluster, random_seed) utility_matrix_o, utility_matrix = random_utility_matrix(n_user_cluster, n_item_cluster, random_seed) df_rec = get_rec(utility_matrix, utility_matrix_o, user_list, top_n, uc_assignment) df_rec ic_assignment = random_user_cluster(n_item, n_item_cluster, random_seed=2) ic_assignment top_k = 10 df_rec_item = get_rec_item(df_rec, top_k, pd.DataFrame(ic_assignment)) df_rec_itemUnit Testimport unittest class TestGetRecItem(unittest.TestCase): def test_1(self): # Set-up n_user = 100 n_item = 50 sample_size = 10 n_user_cluster = 5 n_item_cluster = 5 random_seed = 1 top_n = 3 top_k = 10 user_id_list = list(range(n_user)) user_list = random_user_list(n_user, sample_size, random_seed) uc_assignment = random_user_cluster(n_user, n_user_cluster, random_seed) utility_matrix_o, utility_matrix = random_utility_matrix(n_user_cluster, n_item_cluster, random_seed) df_rec = get_rec(utility_matrix, utility_matrix_o, user_list, top_n, uc_assignment) ic_assignment = random_user_cluster(n_item, n_item_cluster, random_seed=2) df_rec_item = get_rec_item(df_rec, top_k, pd.DataFrame(ic_assignment)) test_case = np.array([ [80., 7., 19., 26., 34., 38., 40., 43., 44., 45., 15.], [84., 2., 4., 8., 13., 16., 17., 22., 25., 48., 32.], [33., 2., 4., 8., 13., 16., 17., 22., 25., 48., 21.], [81., 0., 1., 5., 23., 28., 35., 39., 41., 49., 48.], [93., 2., 4., 8., 13., 16., 17., 22., 25., 48., 38.], [17., 0., 1., 5., 23., 28., 35., 39., 41., 49., 2.], [36., 2., 4., 8., 13., 16., 17., 22., 25., 48., 43.], [82., 7., 19., 26., 34., 38., 40., 43., 44., 45., 47.], [69., 21., 46., 29., 24., 32., 10., 14., 30., 11., 12.], [65., 2., 4., 8., 13., 16., 17., 22., 25., 48., 43.] ]) self.assertEqual(df_rec_item.to_numpy().tolist(), test_case.tolist()) def test_2(self): # Set-up n_user = 100 n_item = 50 sample_size = 10 n_user_cluster = 5 n_item_cluster = 5 random_seed = 2 top_n = 3 top_k = 10 user_id_list = list(range(n_user)) user_list = random_user_list(n_user, sample_size, random_seed) uc_assignment = random_user_cluster(n_user, n_user_cluster, random_seed) utility_matrix_o, utility_matrix = random_utility_matrix(n_user_cluster, n_item_cluster, random_seed) df_rec = get_rec(utility_matrix, utility_matrix_o, user_list, top_n, uc_assignment) ic_assignment = random_user_cluster(n_item, n_item_cluster, random_seed=3) df_rec_item = get_rec_item(df_rec, top_k, pd.DataFrame(ic_assignment)) test_case = np.array([ [80., 7., 19., 26., 34., 38., 40., 43., 44., 45., 15.], [84., 2., 4., 8., 13., 16., 17., 22., 25., 48., 32.], [33., 2., 4., 8., 13., 16., 17., 22., 25., 48., 21.], [81., 0., 1., 5., 23., 28., 35., 39., 41., 49., 48.], [93., 2., 4., 8., 13., 16., 17., 22., 25., 48., 38.], [17., 0., 1., 5., 23., 28., 35., 39., 41., 49., 2.], [36., 2., 4., 8., 13., 16., 17., 22., 25., 48., 43.], [82., 7., 19., 26., 34., 38., 40., 43., 44., 45., 47.], [69., 21., 46., 29., 24., 32., 10., 14., 30., 11., 12.], [65., 2., 4., 8., 13., 16., 17., 22., 25., 48., 43.] ]) self.assertEqual(df_rec_item.to_numpy().tolist(), test_case.tolist()) unittest.main(argv=[''], verbosity=2, exit=False)test_1 (__main__.TestGetRecItem) ... ok test_2 (__main__.TestGetRecItem) ... FAIL ====================================================================== FAIL: test_2 (__main__.TestGetRecItem) ---------------------------------------------------------------------- Traceback (most recent call last): File "", line 77, in test_2 self.assertEqual(df_rec_item.to_numpy().tolist(), test_case.tolist()) AssertionError: Lists differ: [[83.0, 21.0, 8.0, 28.0, 0.0, 19.0, 17.0, 12[617 chars]2.0]] != [[80.0, 7.0, 19.0, 26.0, 34.0, 38.0, 40.0, 4[607 chars]3.0]] First differing element 0: [83.0, 21.0, 8.0, 28.0, 0.0, 19.0, 17.0, 12.0, 25.0, 43.0, 20.0] [80.0, 7.0, 19.0, 26.0, 34.0, 38.0, 40.0, 43.0, 44.0, 45.0, 15.0] Diff is 1513 characters long. Set self.maxDiff to None to see it. ---------------------------------------------------------------------- Ran 2 tests in 0.088s FAILED (failures=1)I think ideally, I should've removed the red geyser sample from datas right here, before looking for control sample.from astropy.table import Table control_inds = np.array([]) #we'll save the indices of your control galaxes here dm=0.1 for mass in np.log10(data['nsa_elpetro_mass']): #find the indices of galaxies which satisfy your criteria sel_control = np.where((np.log10(datas['nsa_elpetro_mass_2']) > (mass-0.1)) & (np.log10(datas['nsa_elpetro_mass_2']) < (mass+0.1))) #(you could add NUV-r > 5 reqiurement as well here ) sel_control = sel_control[0] control_inds=np.concatenate([control_inds,sel_control]) res=[] [res.append(x) for x in control_inds if x not in res] #getting rid of redundancies in indices hmm=np.array(res) #turn list into array print('The indices of the control galaxies:', len(res),'. I don"t know if the fact that this output is 50 less than 749 is a coincidence or not.') t=Table(datas) #attempting to apply index slicing t.add_index('nsa_elpetro_mass_2') res2=list(map(int, res)) control=t[res2] print(control) control.write('controlll-take-22.fits', overwrite=True) #export this control table that still has some red geysers in it cntrl = 'controlll-take-22.fits' hdul6 = fits.open(cntrl) data6 = hdul6[1].data hdu6 = hdul6[1] hdr6 = hdul6[0].header hdr6 = hdul6[1].header #I'm pretty sure this cell is obsolete last='control-geysers3.fits' #hdul7 = fits.open(last) #hdul7.info() #data7 is the final control group without any of the red geyser sample, even though TOPCAT matching only removed 44 galaxies from controlll-take-3 #hdu7 = hdul7[1] #hdr7 = hdul7[0].header #hdr7 = hdul7[1].header #data7 = hdu7.data #print(len(data7['nsa_elpetro_mass_1'])) #detections mask7=(data6['LOGMHI_2'] > -999) & (data6['Z_2'] < 0.05) mask10=(data6['LOGMHI_2'] < -999) & (data6['LOGHILIM200KMS_2'] > -999) #print(len(data6['nsa_elpetro_mass_2'][mask10])) xdetx=data6['nsa_elpetro_mass_2'][mask7] ydety=(10**(data6['LOGMHI_2'][mask7]))/(xdetx) print('Number of detected control galaxies =',len(xdetx)) #one galaxy (8082-6101) is neither non-detection nor detection #nondetections mask8=(data6['LOGHILIM200KMS_2'] > -999) & (data6['Z_2'] < 0.05) xnondetx=data6['nsa_elpetro_mass_2'][mask8] ynondety=(10**(data6['LOGHILIM200KMS_2'][mask8]))/(xnondetx) print('Number of non-detected control galaxies =',len(xnondetx)) #print('Number of detected control galaxies =',len(ynondety)) import matplotlib.pyplot as plt plt.figure(figsize=(8,8)) #ax = fig.add_axes([0.1, 0.1, 0.6, 0.75]) plt.scatter(xdetx, ydety, label='control detections', color='darkslategrey', marker='x',s=30) plt.scatter(xnondetx, ynondety,label='control non-detections', color='silver', marker='o', s=30) plt.scatter(detx,dety, label='geyser detections', color= 'fuchsia', marker='x', s=50) plt.scatter(nondetx, nondety, label= 'geyser non-detections', color='red', marker='o', s=40) plt.xscale('log') plt.yscale('log') plt.xlabel("log $M_* [M_o]$", fontsize=15) plt.ylabel("log $M_{HI}/M_*$",fontsize=15) plt.legend(loc='lower left', fontsize=15) plt.savefig('all_mass_plot.png', bbox_inches='tight') plt.title("Control Group and Geyser Sample $M_{HI}/M_*$ vs $M_*$", fontsize=15) #combine red geyser table (data) with control sample table (controlll-take-22.fits) file2='all-finall.fits' hdul8 = fits.open(file2) hdul8.info() hdu8 = hdul8[1] hdr08 = hdul8[0].header hdr8 = hdul8[1].header alldata = hdu8.data #make column for detection hip = np.array(alldata['LOGMHI']) sel = (alldata['LOGMHI'] > -999) sel = np.multiply(sel, 1) print(sel) #import sys #np.set_printoptions(threshold=sys.maxsize) #print(sel) #print(alldata['LOGMHI']) #make column for sample bet=alldata['NUV_r'] bet[np.isnan(bet)] = 0 sel2 = bet > 0 sel2 = np.multiply(sel2, 1) #where_are_NaNs = isnan(alldata['NUV_r']) #alldata['NUV_r'][where_are_NaNs] = 0 bet[np.isnan(bet)] = 0 print(sel2) #make new column for MHI/M* take two ratio = np.zeros(len(alldata)) nondetections = alldata['LOGHILIM200KMS'] > -999 detections = (nondetections == False) ratio[nondetections] = (10**alldata['LOGHILIM200KMS'][nondetections])/(alldata['nsa_elpetro_mass'][nondetections]) ratio[detections] = (10**alldata['LOGMHI'][detections])/(alldata['nsa_elpetro_mass'][detections]) print(len(ratio)) #table for Dave M*, MHI/M*, detection (1=True, 0=False), and sample (1=red geyser, 0=control) tebl=Table() tebl['MANGAID'] = alldata['MANGAID_1'] tebl['M*'] = alldata['nsa_elpetro_mass'] tebl['MHI/M*'] = ratio tebl['Detection (0=non)'] = sel tebl['Sample (0=red geyser)'] = sel2 tebl.remove_row(195) tebl.show_in_notebook() tebl.write('stattable2.fits', format='fits')Pipelin is the boss# Create a pipeline that standardizes the data then creates a model import os from datetime import datetime import numpy as np import pandas as pd #read data, create listings dataframe path = '../../data/new-york-city-airbnb-open-data/' listings_csv = os.path.join(path,'listings.csv') listings = pd.read_csv(listings_csv) def less_than_50_percent(column): total_row = listings.shape[0] isnull_count = listings[column].isna().sum() if isnull_count/total_row > .5: return True columns = list(listings) remove_columns_0 = [] for column in columns: remove_column_y_n = less_than_50_percent(column) if remove_column_y_n: remove_columns_0.append(column) print(remove_columns_0) from sklearn.pipeline import Pipeline from sklearn.impute import SimpleImputer from sklearn.preprocessing import StandardScaler, OneHotEncoder numeric_transformer = Pipeline(steps=[ ('imputer', SimpleImputer(strategy='median')), ('scaler', StandardScaler())]) categorical_transformer = Pipeline(steps=[ ('imputer', SimpleImputer(strategy='constant', fill_value='missing')), ('onehot', OneHotEncoder(handle_unknown='ignore'))]) numeric_features = listings.select_dtypes(include=['int64', 'float64']).columns categorical_features = listings.select_dtypes(include=['object']).drop(['Loan_Status'], axis=1).columns from sklearn.compose import ColumnTransformer preprocessor = ColumnTransformer( transformers=[ # ('num', numeric_transformer, numeric_features), ('cat', categorical_transformer, categorical_features)]) from sklearn.ensemble import RandomForestClassifierrf = Pipeline(steps=[('preprocessor', preprocessor), ('classifier', RandomForestClassifier())])Analyze Ariane Deduced Tranportsimport datetime import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import xarray as xr from scipy import stats import scipy %matplotlib inline SUBDIR_TMPL = '{:%d%b%y}' other_nan = ['********************']/home/sallen/anaconda/envs/py3/lib/python3.5/site-packages/pandas/computation/__init__.py:19: UserWarning: The installed version of numexpr 2.4.4 is not supported in pandas and will be not be used UserWarning)Find the daily averaged squared velocity at a key spotu_vel = xr.open_dataset('https://salishsea.eos.ubc.ca/erddap/griddap/ubcSSn3DuVelocity1hV1') grid = xr.open_dataset('https://salishsea.eos.ubc.ca/erddap/griddap/ubcSSnBathymetry2V1') iY = 341-1; iX = 283-1; iZ = 10 velocitypt1 = u_vel.uVelocity.sel(time ='2014', depth=iZ, gridX=iX, gridY=iY, method='nearest') velocitypt2 = u_vel.uVelocity.sel(time ='2015', depth=iZ, gridX=iX, gridY=iY, method='nearest') velocitypt3 = u_vel.uVelocity.sel(time ='2016', depth=iZ, gridX=iX, gridY=iY, method='nearest') # Yes, do run this. Its gets the data which makes the next cell happy. fig, ax = plt.subplots(1, 1, figsize=(10, 5)) velocitypt1.plot() velocitypt2.plot() velocitypt3.plot() velocity = xr.concat([velocitypt1, velocitypt2, velocitypt3], dim='time') velsquared = velocity * velocity day_avg_tide_vel = velsquared.resample('1D', dim='time', how='mean') day_avg_tide_pd = day_avg_tide_vel.to_dataframe() day_avg_tide_pd.to_csv('day_avg_tide_pd.csv') day_avg_tide_pd = day_avg_tide_pd.drop('depth', 1) day_avg_tide_pd = day_avg_tide_pd.drop('gridY', 1) day_avg_tide_pd = day_avg_tide_pd.drop('gridX', 1) print (day_avg_tide_pd) #print(day_avg_tide_vel[:]) low_pass_tide = pd.rolling_mean(day_avg_tide_pd, 4, center=True) print (low_pass_tide) day_avg_tide_pd.plot() low_pass_tide.plot()Input Required Ariane Resultsrawstats = ['sn', 's1x', 's1x2'] stat = {} mean = np.zeros((3)) stdev = np.zeros((1)) FRtoBPdir = '/ocean/sallen/allen/research/MEOPAR/Ariane/BackFluxes/StatsFiles/' FRtoBP = pd.DataFrame(data=None, index=None, columns=['time', 'depth-mean', 'depth-std', 'salinity-mean', 'flux'], dtype=None, copy=False) VStoBPdir = '/ocean/sallen/allen/research/MEOPAR/Ariane/BackFluxesSouth/StatsFiles/' VStoBP = pd.DataFrame(data=None, index=None, columns=['time', 'depth-mean', 'depth-std', 'salinity-mean','flux'], dtype=None, copy=False) startdate = datetime.date(2014, 10, 27) + datetime.timedelta(days=15) #startdate = datetime.date(2015, 9, 1) enddate = datetime.date(2016, 3, 13) delt = enddate - startdate for nday in range(delt.days): rundate = startdate + datetime.timedelta(days=nday) for region, database, dire in zip(['FraserRidge', 'VictoriaSill'], [FRtoBP, VStoBP], [FRtoBPdir, VStoBPdir]): for stattype in rawstats: stat[stattype] = pd.read_csv(os.path.join(dire, stattype+'.'+SUBDIR_TMPL.format(rundate).lower()), index_col=0, na_values=other_nan) stat[stattype].index = [x.strip() for x in stat[stattype].index] for i, parameter in enumerate(['depth', 'sal']): themean = stat['s1x'][parameter]/stat['sn']['sn'] mean[i] = getattr(themean, region) for i, parameter in enumerate(['depth']): thestdev = np.sqrt(np.abs((stat['s1x2'][parameter] - stat['s1x'][parameter]**2 / stat['sn']['sn']) / (stat['sn']['sn']-1))) stdev[i] = getattr(thestdev, region) mean[2] = getattr(stat['sn']['flux'], region) database.loc[nday] = [rundate, mean[0], stdev[0], mean[1], mean[2]] FRtoBP =FRtoBP.set_index('time') VStoBP =VStoBP.set_index('time') ax = FRtoBP['flux'].plot() low_pass_tide.plot(ax=ax)Look at Raw Transportsfig, ax = plt.subplots(1, 1, figsize=(15, 7)) ax.plot(FRtoBP.index, FRtoBP['flux']) ax.plot(VStoBP.index, (VStoBP['salinity-mean']-30)*20000) ax.plot(low_pass_tide.index, low_pass_tide['uVelocity']*50000.) c1=np.correlate(FRtoBP['flux']-np.mean(FRtoBP['flux']), VStoBP['salinity-mean']-np.mean(VStoBP['salinity-mean']), mode='full') fig, ax = plt.subplots(1, 1, figsize=(15, 7)) scale1 = np.std(FRtoBP['flux']-np.mean(FRtoBP['flux'])) scale2 = np.std(VStoBP['salinity-mean']-np.mean(VStoBP['salinity-mean'])) scaled_c1 = c1/(scale1*scale2)/c1.shape[0] ax.plot(scaled_c1) print (scaled_c1.max(), scaled_c1.argmax(), c1.shape[0]/2., FRtoBP.index.shape) slope, intercept, r_value, p_value, std_err = stats.linregress(VStoBP['salinity-mean'], FRtoBP['flux']) print (slope, intercept, r_value, p_value, std_err) fig, ax = plt.subplots(1, 1, figsize=(7, 7)) ax.plot(VStoBP['salinity-mean'], FRtoBP['flux'], 'o') ax.plot(VStoBP['salinity-mean'], -604459.313736+20234.2945168*VStoBP['salinity-mean'], 'o') print (r_value**2) step_two = FRtoBP['flux'] - (-604459.313736+20234.2945168*VStoBP['salinity-mean']) fig, ax = plt.subplots(1, 1, figsize=(15, 7)) ax.plot(FRtoBP.index, step_two) ax.plot(low_pass_tide.index, low_pass_tide['uVelocity']*50000.-50000) print(low_pass_tide.index[-90], FRtoBP.index[-1], low_pass_tide.index[48], FRtoBP.index[0]) c1=np.correlate(step_two, low_pass_tide['uVelocity'][48+1:-89+1]-np.mean(low_pass_tide['uVelocity'][48+1:-89+1]), mode='full') fig, ax = plt.subplots(1, 1, figsize=(15, 7)) scale1 = np.std(step_two) scale2 = np.std(low_pass_tide['uVelocity'][48:-89]-np.mean(low_pass_tide['uVelocity'][48:-89])) scaled_c1 = c1/(scale1*scale2)/c1.shape[0] ax.plot(scaled_c1) print (scaled_c1.max(), scaled_c1.argmax(), c1.shape[0]/2. ) ax.plot([487, 487], [-0.20, 0.20]) print (step_two.shape, low_pass_tide['uVelocity'][48+1:-89+1].shape) slope, intercept, r_value, p_value, std_err = stats.linregress(low_pass_tide['uVelocity'][48+1:-89+1], step_two) print (slope, intercept, r_value, p_value, std_err) fig, ax = plt.subplots(1, 1, figsize=(7, 7)) ax.plot(low_pass_tide['uVelocity'][48+1:-89+1], step_two, 'o') ax.plot(low_pass_tide['uVelocity'][48+1:-89+1], intercept+slope*low_pass_tide['uVelocity'][48+1:-89+1], 'o') print (r_value**2) step_three = step_two - (intercept + slope* low_pass_tide['uVelocity'][48:-89].shift(periods=1)) print (step_three.shape, step_two.shape,low_pass_tide['uVelocity'][48+1:-89+1].shape ) fig, ax = plt.subplots(1, 1, figsize=(15, 7)) ax.plot(FRtoBP.index, step_three) X = np.fft.fft(np.array(step_three[1:])) Z = np.fft.fft(np.array(step_two[1:])) Y = np.fft.fft(np.array(FRtoBP['flux'])) omega = np.fft.fftfreq(step_three[1:].shape[0]) omega3 = np.fft.fftfreq(step_two[1:].shape[0]) omega2 = np.fft.fftfreq(FRtoBP['flux'].shape[0]) fig, ax = plt.subplots(1, 1, figsize=(15, 7)) ax.plot(omega2, np.absolute(Y), linewidth=3) ax.plot(omega3, np.absolute(Z), linewidth=2) ax.plot(omega, np.absolute(X), 'r') ax.set_ylim((0, 2e6)) ax.set_xlim((0, 0.2)) ax.plot(1/14.75, 1e6, 'or') ax.plot(1/27.5, 1e6, 'or') ax.plot(0.5/14.75+0.5/27.5, 1e6, 'og') X = np.fft.fft(np.array(VStoBP['flux']/50000.)) Z = np.fft.fft(np.array(VStoBP['salinity-mean'])) Y = np.fft.fft(np.array(FRtoBP['flux']/50000.)) A = np.fft.fft(np.array(FRtoBP['salinity-mean']/5.)) omega = np.fft.fftfreq(VStoBP['flux'].shape[0]) omega3 = np.fft.fftfreq(VStoBP['salinity-mean'].shape[0]) omega2 = np.fft.fftfreq(FRtoBP['flux'].shape[0]) omega4 = np.fft.fftfreq(FRtoBP['salinity-mean'].shape[0]) fig, ax = plt.subplots(1, 1, figsize=(15, 7)) ax.plot(omega2, np.absolute(Y), linewidth=3, label='VStoBP, flux') ax.plot(omega3, np.absolute(Z), linewidth=2, label='VS, salinity') ax.plot(omega, np.absolute(X), label='FRtoBP, flux') ax.plot(omega4, np.absolute(A), label='FR, salinity') ax.set_ylim((0, 100)) ax.set_xlim((0, 0.2)) ax.plot(1/14.75, 50, 'or') ax.plot(1/27.5, 50, 'or') ax.plot(0.5/14.75+0.5/27.5, 50, 'og') ax.legend() fig, ax = plt.subplots(1, 1, figsize=(15, 7)) ax.plot(FRtoBP.index, step_three/20000.+25.) ax.plot(FRtoBP.index, FRtoBP['salinity-mean']) print (fr[0]) c1=np.correlate(step_three[1:], FRtoBP['salinity-mean']-np.mean(FRtoBP['salinity-mean']), mode='full') fig, ax = plt.subplots(1, 1, figsize=(15, 7)) scale1 = np.std(step_three[1:]) scale2 = np.std(FRtoBP['salinity-mean']-np.mean(FRtoBP['salinity-mean'])) scaled_c1 = c1/(scale1*scale2)/c1.shape[0] ax.plot(scaled_c1) print (scaled_c1.min(), scaled_c1.argmin(), c1.shape[0]/2., step_three.index.shape, fr.index.shape) ax.plot([487, 487], [-0.15, 0.14]) slope, intercept, r_value, p_value, std_err = stats.linregress(FRtoBP['salinity-mean'].shift(periods=-1)[:-1], step_three[1:]) print (slope, intercept, r_value, p_value, std_err) fig, ax = plt.subplots(1, 1, figsize=(7, 7)) ax.plot(FRtoBP['salinity-mean'].shift(periods=-1)[:-1], step_three[1:], 'o') ax.plot(FRtoBP['salinity-mean'].shift(periods=-1)[:-1], intercept+slope*FRtoBP['salinity-mean'].shift(periods=-1)[:-1], 'o') step_four = step_three - (intercept + slope* FRtoBP['salinity-mean'].shift(periods=-1)[:-1]) fig, ax = plt.subplots(1, 1, figsize=(15, 7)) ax.plot(FRtoBP.index, FRtoBP['flux'] - np.mean(FRtoBP['flux'])) ax.plot(FRtoBP.index, step_two) ax.plot(FRtoBP.index, step_three) ax.plot(FRtoBP.index, step_four) print (np.std(FRtoBP['flux'] - np.mean(FRtoBP['flux'])), np.std(step_two), np.std(step_three), np.std(step_four)) X = np.fft.fft(np.array(FRtoBP['flux'] - np.mean(FRtoBP['flux']))) Z = np.fft.fft(np.array(step_two)) Y = np.fft.fft(np.array(step_three[1:])) A = np.fft.fft(np.array(step_four[1:-1])) omega = np.fft.fftfreq(FRtoBP['flux'].shape[0]) omega3 = np.fft.fftfreq(step_two.shape[0]) omega2 = np.fft.fftfreq(step_three.shape[0]-1) omega4 = np.fft.fftfreq(step_four.shape[0]-2) fig, ax = plt.subplots(1, 1, figsize=(15, 7)) ax.plot(omega2, np.absolute(Y), linewidth=3, label='step_three') ax.plot(omega3, np.absolute(Z), linewidth=2, label='step_two') ax.plot(omega, np.absolute(X), label='original') ax.plot(omega4, np.absolute(A), label='step_four') ax.set_xlim((0, 0.2)) ax.plot(1/14.75, 50, 'or') ax.plot(1/27.5, 50, 'or') ax.plot(0.5/14.75+0.5/27.5, 50, 'og') ax.legend()Normalize the Transport Idea is to normalize by a velocity = sqrt(g'h) a width and a depth. For now, we will just use the sqrt(salinity difference) as the only component that actually changesdifference = VStoBP - FRtoBP TransportScale = np.sqrt(difference.loc[:, 'salinity-mean']) fig, ax = plt.subplots(1, 1, figsize=(10, 5)) ax.plot(difference.time, TransportScale) fig, ax = plt.subplots(1, 1, figsize=(10, 5)) ax.plot(FRtoBP.time, FRtoBP.loc[:, 'flux']/TransportScale) ax.plot(VStoBP.time, VStoBP.loc[:, 'flux']/TransportScale) print ('Fraser Ridge to Boundary Pass') print (np.mean(FRtoBP.loc[:, 'flux']/TransportScale)) print (np.std(FRtoBP.loc[:, 'flux']/TransportScale)) print ('Normalized Variance = ', np.std(FRtoBP.loc[:, 'flux']/TransportScale) / np.mean(FRtoBP.loc[:, 'flux']/TransportScale)) print ('Victoria Sill to Boundary Pass') print (np.mean(VStoBP.loc[:, 'flux']/TransportScale)) print (np.std(VStoBP.loc[:, 'flux']/TransportScale)) print ('Normalized Variance = ', np.std(VStoBP.loc[:, 'flux']/TransportScale) / np.mean(VStoBP.loc[:, 'flux']/TransportScale))Estimate a Richardson Number The Richardson number should be Brunt-Vaisala squared divided by the velocity shear squared. Here we will use g'/h as a measure of the stratification and U/h as the velocity shear to give us a Richardson number ofg'h/U^2.And until I do everything nice, we will approximate this as the difference in Salinity divided by the velocity squared.Ri = (VStoBP-FRtoBP)/day_avg_tide_vel fig, ax = plt.subplots(1, 1, figsize=(10, 5)) ax.plot(Ri.time, Ri.loc[:, 'salinity-mean'], 'r') ax.plot(FRtoBP.time, FRtoBP.loc[:, 'flux']/TransportScale/2000.) ax.plot(VStoBP.time, VStoBP.loc[:, 'flux']/TransportScale/2000.) fig, ax = plt.subplots(1, 1, figsize=(10, 5)) ax.plot(Ri.loc[:, 'salinity-mean'], FRtoBP.loc[:, 'flux']/TransportScale, 'o') ax.plot(Ri.loc[:, 'salinity-mean'], VStoBP.loc[:, 'flux']/TransportScale, 'o')Find the maximum lagged correlationfig, ax = plt.subplots(1, 1, figsize=(10,5)) c1 = np.correlate(Ri.loc[:, 'salinity-mean']-np.mean(Ri.loc[:, 'salinity-mean']), FRtoBP.loc[:, 'flux']/TransportScale-np.mean(FRtoBP.loc[:, 'flux']/TransportScale), mode='full') ax.plot(c1,'o-') c2 = np.correlate(Ri.loc[:, 'salinity-mean']-np.mean(Ri.loc[:, 'salinity-mean']), VStoBP.loc[:, 'flux']/TransportScale-np.mean(VStoBP.loc[:, 'flux']/TransportScale), mode='full') ax.plot(c2,'o-') c3 = np.correlate(Ri.loc[:, 'salinity-mean']-np.mean(Ri.loc[:, 'salinity-mean']), Ri.loc[:, 'salinity-mean']-np.mean(Ri.loc[:, 'salinity-mean']), mode='full') #ax.plot(c3*1000,'o-') shiftFR = np.argmax(c1) - np.argmax(c3) shiftVS = np.argmax(c2) - np.argmax(c3) print (shiftFR, shiftVS) Riadv = Ri Riadv['time'] = Riadv['time'] + datetime.timedelta(days=int(shiftVS)) Riback = Ri Riback['time'] = Riadv['time'] + datetime.timedelta(days=int(shiftFR)) fig, ax = plt.subplots(1, 1, figsize=(10, 5)) ax.plot(Riback.loc[:, 'salinity-mean'], FRtoBP.loc[:, 'flux']/TransportScale, 'o') ax.plot(Riadv.loc[:, 'salinity-mean'], VStoBP.loc[:, 'flux']/TransportScale, 'o')Transitions> Describe transitions between states OverviewA transition is a nonrecurrent bi-gram, which means each transition in a sequence represents a change between states (as opposed to staying in the same state). Transitions are an important part of sequence analysis, and the transition matrix generated by the `get_transition_matrix` method is used in many other parts of the library.#export def get_transitions(sequence): "Extracts a list of transitions from a sequence, returning a list of lists containing each transition." transitions = [] for position in range(len(sequence) - 1): if sequence[position] != sequence[position + 1]: transitions.append([sequence[position], sequence[position + 1]]) return transitions #export def get_ntransitions(sequence): "Computes the number of transitions in a sequence." return len(get_transitions(sequence)) #export from pysan.core.elements import get_alphabet from pysan.core.ngrams import get_all_ngrams import numpy as np, pandas as pd def get_transition_matrix(sequence, alphabet=None, verbose=False): "Computes a transition matrix for each bigram in a sequence. The resulting matrix can be interpreted by reading along the side first, then across the top, indicating from the element down the side, to the element along the top. For example, to find the number of transitions from element 2 to element 3, find element 2 down the side, then follow that row across until it reaches element 3 across the top." if alphabet == None: alphabet = get_alphabet(sequence) all_ngrams = get_all_ngrams(sequence, 2) transition_matrix = np.zeros((len(alphabet), len(alphabet))) descriptive_matrix = [['-' for x in range(len(alphabet))] for y in range(len(alphabet))] for x, element_row in enumerate(alphabet): for y, element_column in enumerate(alphabet): current_ngram = [element_row, element_column] descriptive_matrix[x][y] = 'n' + str(current_ngram) #print('from', current_ngram[0], 'to', current_ngram[1], ':', all_ngrams.count(current_ngram)) transition_matrix[x, y] = all_ngrams.count(current_ngram) # add column & index labelling in TraMineR style pre_alphabet = [str(a) + '->' for a in alphabet] post_alphabet = ['->' + str(a) for a in alphabet] if verbose: de_df = pd.DataFrame(descriptive_matrix, columns=post_alphabet, index=pre_alphabet) print(de_df) tm_df = pd.DataFrame(transition_matrix, columns=post_alphabet, index=pre_alphabet) return tm_df sequence = [1,1,2,1,2,2,3,1,1,2,2,1,2,2,3,1,1,2] get_transition_matrix(sequence)PlottingAs with other plotting methods in the pysan library, the methods below can each be called on a single sequence and will return a matplotlib pylot object which can be modified as needed.#export import matplotlib.pyplot as plt import matplotlib.cm as cm import copy def plot_transition_matrix(sequence, cmap='summer'): "Computes and plots a transition matrix, returning a colored matrix with elements at position n up the y axis, and elements at position n+1 along the x axis." matrix = get_transition_matrix(sequence) results_size = len(matrix.columns) values = np.empty((results_size, results_size), dtype=object) for r, row in enumerate(matrix.values): for e, element in enumerate(row): if element == "-": values[r, e] = 100 continue if element == "": values[r, e] = np.nan continue if "*" in str(element): value = element.replace("*", "") values[r, e] = float(value) else: values[r, e] = element current_cmap = copy.copy(cm.get_cmap(cmap)) current_cmap.set_bad(color="white") plt.figure() # this one-lines sets the x axis to appear at the top of this plot only with plt.rc_context({'xtick.bottom':False, 'xtick.labelbottom':False, 'xtick.top':True, 'xtick.labeltop':True}): ax = plt.gca() ax.xaxis.set_label_position('top') plt.imshow(np.array(values).astype(np.float), cmap=current_cmap) plt.yticks(range(len(matrix.index)), list(matrix.index)) plt.xticks(range(len(matrix.columns)), list(matrix.columns)) cbar = plt.colorbar() #cbar.set_ticks([-100, -80, -60, -40, -20, 0, 20, 40, 60, 80, 100]) #cbar.set_ticklabels([-1, -0.8, -0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1]) plt.grid(False) return plt plt = plot_transition_matrix(sequence) plt.show()NY State Freedom of Information Law Committee on Open Government Advisory Opinions Web Scraping Notebook The Committee on Open Government is responsible for overseeing the Freedom of Information Law in New York State by advising government agencies, the press and the the public. Their written advisory opinions are effectively compliance instructions and may serve as precedent in similar cases. According to Committee Chair, ,> Since its creation in 1974, more than24,000 written advisory opinions have been prepared by the Committee at the requestof government, the public and the news media. \[. . .\] Opinions prepared since early 1993 that have educational or precedential value aremaintained online, identified by means of a series of key phrases in separate indicescreated in relation to the Freedom of Information Law and the Open Meetings Law. However, that online access is largely unstructured and difficult to use. Opinions are given a single topic keyphrase, often using bureaucratic vocabulary, and grouped together on webpages by the first letter of their keyphrase. These pages, and the site in general, lack descriptive structures like titles, so the only available search feature (a google plugin) displays results that appear largely identical.To handcraft finding tools would be arduous and require the domain knowledge of the committee members, who do not themselves have experience in information storage and retrieval. As an alternative, text mining and machine learning methods can help to generate the data and structure needed for robust search and discovery features. This notebook is the first stage in that process: Data collection and initial cleaning.import pandas as pd import numpy as np from bs4 import BeautifulSoup from requests import get from requests.exceptions import RequestException from contextlib import closing import time import pickle import re #A GET url request function with error logging def simple_get(url): try: with closing(get(url, stream=True)) as resp: if is_good_response(resp): return resp.content else: return None except RequestException as e: log_error('Error during requests to {0} : {1}'.format(url, str(e))) return None def is_good_response(resp): content_type = resp.headers['Content-Type'].lower() return (resp.status_code == 200 and content_type is not None and content_type.find('html') > -1) def log_error(e): print(e)Gathering the Opinions---There are two layers we need to gather urls from. The first is the A-Z index, one page per letter. Then we travel to each of those pages and gather all of the links to individual opinions. The site salts the urls of the letters by periodically adding a different random letter before the letter of the index. This may be an effort to prevent web scraping or could be an unintended behavior while serving the content. Either way, the page doesn't have a robots.txt file, so we scrape as politely as we can. This scraping code looks at the index page and gets the URLs of each letter and stores them in a list.---base = 'https://www.dos.ny.gov/coog/foil_listing/' index_page = simple_get(base + 'findex.html') index_soup = BeautifulSoup(index_page, 'html.parser') index = index_soup.find_all('table') index[0].find_all('td') index_urls = [] for i in index[0].find_all('a'): index_urls.append(base + i['href'])Getting the Opinions and Relevant Metata---We structure the data as a dictionary, with the Advisory Committee keyphrases as keys. We travel to each letter and capture all of the keyphrases and any associated links to opinions. We then travel any linked opinions and store the opinion text. We keep the html markup because it contains useful information, like paragraphing and typographic elements, that we can mine later. ---tag_dict = {} #iterate through alphebetical index for url in index_urls: #get the html from the index page index_page_html = simple_get(url) #pass over any dead links if index_page_html == False: pass else: #This html is particularyly soupy. We need to grab every other 'td', # table_side switches from positive to negative for left and right td's table_side = -1 #parse the page soup = BeautifulSoup(index_page_html, "html.parser") #grabs the second table table = soup.find_all('table')[1] for item in table.find_all('td'): #grabs the text from the leftside td, which is keyword name if table_side == -1: text = item.text #makes a list of the links and the non-linked codes for rulings from the rightside td else: tag_dict[text] = [[a['href'] for a in item.find_all('a')], item.text.replace(',','').split()] #flips the sign on the count to grab from right/left table_side *= -1 #to be polite to the server time.sleep((np.random.randint(100,200))/100)Store Opinions and IDs---Opinion IDs will help us distinguish the publish from the unpublished opinions later when we process the PDFs of additional opinions we recieved from the Committee.---#add a list for each key to hold the opinions for data_list in tag_dict.values(): data_list.append([]) #if there were any urls for the keyword if len(data_list[key][0]) > 0: #iter through the urls and get the html of the ruling for url in data_list[0]: ruling = simple_get(url) #handle dead links if ruling == None: data_list[2].append('dead link') #only store the div with the content, the rest is formatting else: soup = BeautifulSoup(ruling, "html.parser") data_list[2].append(soup.find_all('div', id='mainContent')[0].prettify()) #take between 1.0 and 1.5 seconds between calls to be polite to the server #(and avoid any potention ) time.sleep((np.random.randint(100,150)/100))Cleaning---Here we do some basic cleaning to get the plain text from the html and store it. For this process to scale, we would simply convert this to a function and serve it directly to whichever model we were using. After cleaning, we pickle the dictionary and save it to the hard drive. ---#add a new list to hold the plain text opinion for data_list in tag_dict.values(): data_list.append([]) for i in range(len(data_list[2])): #pass over dead links if data_list[2][i] == 'dead link': pass #clean up the text for bag of words (bow) approach else: #grab the text from the div goup = BeautifulSoup(data_list[2][i]).div #Remove the JavaScript goup = goup.get_text().replace('document.write("OML-AO-"+DocName);', '') #Remove the carriage return/newline because for bow it doesn't matter goup = re.sub(r'\s+',' ', goup ) #replace escaped single quotes with double quotes goup = re.sub(r'\'', '"', goup) #add the cleaned text to the data_list.append(goup) with open('FOILAdvisoryDecisionDataWithText.pickle', 'wb') as f: # Pickle the dictionary using the highest protocol available. pickle.dump(tag_dict, f, pickle.HIGHEST_PROTOCOL)Confirm ----As a precaution, make sure the data saved correctly before closing the notebook.----with open('FOILAdvisoryDecisiondataWithText.pickle', 'rb') as f: # The protocol version used is detected automatically, so we do not # have to specify it. tester = pickle.load(f) len(tester) == len(tag_dict)Accessing Zotero Via the APIWe want to write scripts which allow us to generate formated bibliographies from our Zotero database which can be found here.[CMSEatMSU Zotero Group](https://www.zotero.org/groups/2465063/cmseatmsu) API overviewI was reading the [Zotero API instructions](https://www.zotero.org/support/dev/web_api/v3/basicssearch_syntax) and learned a lot. I created the following video as a quick explainationfrom IPython.display import YouTubeVideo YouTubeVideo("fsSTeVz8lfc",width="100%",cc_load_policy=True)Everything starts with the Basic URL.Basic URL this does nothing interesting:https://api.zotero.org We can build off the basic URL by adding in the groups (or user) keyword and their number. For example here is the basic URL with the CMSEatMSU group added. This produces just the group information:https://api.zotero.org/groups/2465063 We can dig a little deeper and see the collections (and their Keys)inside the groups as follows:https://api.zotero.org/groups/2465063/collections If we know a collection key (ex ) then we can see the items in the collection:https://api.zotero.org/groups/2465063/collections/MPY3BTV3/items What I want is to generated formatted data the format=bib is parameter is the trick. We can add in a parameter by using the "```?```" charicter and then our ```parameter=value```. Unfortunatly this only works with up to 150 entries:https://api.zotero.org/groups/2465063/collections/MPY3BTV3/items?format=bib I can use multiple parameters by seperating them with the "```&```" symbol. This one uses linkwrap set to 1 to include clickable links in the output:https://api.zotero.org/groups/2465063/collections/MPY3BTV3/items?format=bib&linkwrap=1 There are a few special formats like "coins" and "bibtex" which we may also want to investigate:https://api.zotero.org/groups/2465063/collections/MPY3BTV3/items?format=coins Using Pyzotero[PyZotero gitrepo](https://github.com/urschrei/pyzotero)#pip install --user pyzotero # CMSEatMSU info library_id = 2465063 library_type = 'group' api_key = '' from pyzotero import zotero zot = zotero.Zotero(library_id = library_id, library_type = library_type, api_key = api_key) items = zot.top() # we've retrieved the latest five top-level items in our library # we can print each item's item type and ID # for item in items: # print('Item: %s | Key: %s' % (item['data']['itemType'], item['data']['key']))Read all collections (Zotero Folders) and identify publications for each collectionfor folder in zot.collections(): print(folder['data']['name'], folder['data']['key']) for item in zot.collection_items(folder['data']['key']): print('* Item: %s | Key: %s' % (item['data'],['itemType'])) dir(zotero.Zotero) zotero.mimetypes? dir(zotero)First, let us load the training and testing data from the adult dataset.import numpy as np X_train = np.loadtxt("https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data", usecols=(0, 4, 10, 11, 12), delimiter=", ") y_train = np.loadtxt("https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data", usecols=14, dtype=str, delimiter=", ") X_test = np.loadtxt("https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test", usecols=(0, 4, 10, 11, 12), delimiter=", ", skiprows=1) y_test = np.loadtxt("https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test", usecols=14, dtype=str, delimiter=", ", skiprows=1) y_test = np.array([a[:-1] for a in y_test])Naive Bayes with No PrivacyTo begin, let us first train a regular (non-private) naive Bayes classifier, and test its accuracy.from sklearn.naive_bayes import GaussianNB nonprivate_clf = GaussianNB() nonprivate_clf.fit(X_train, y_train) from sklearn.metrics import accuracy_score print("Non-private test accuracy: %.2f%%" % (accuracy_score(y_test, nonprivate_clf.predict(X_test)) * 100))Non-private test accuracy: 79.64%Differentially Private Naive Bayes ClassificationFirst, install IBM Differential Privacy Library.!pip install diffprivlibCollecting diffprivlib [?25l Downloading https://files.pythonhosted.org/packages/fe/b8/852409057d6acc060f06cac8d0a45b73dfa54ee4fbd1577c9a7d755e9fb6/diffprivlib-0.3.0.tar.gz (70kB)  |████▋ | 10kB 20.3MB/s eta 0:00:01  |█████████▎ | 20kB 19.2MB/s eta 0:00:01  |██████████████ | 30kB 12.4MB/s eta 0:00:01  |██████████████████▋ | 40kB 13.7MB/s eta 0:00:01  |███████████████████████▎ | 51kB 14.9MB/s eta 0:00:01  |████████████████████████████ | 61kB 16.9MB/s eta 0:00:01  |████████████████████████████████| 71kB 5.4MB/s [?25hRequirement already satisfied: numpy>=1.15.0 in /usr/local/lib/python3.6/dist-packages (from diffprivlib) (1.18.5) Requirement already satisfied: setuptools>=39.0.1 in /usr/local/lib/python3.6/dist-packages (from diffprivlib) (50.3.2) Requirement already satisfied: scikit-learn>=0.22.0 in /usr/local/lib/python3.6/dist-packages (from diffp[...]Using the models.GaussianNB module of diffprivlib, we can train a naive Bayes classifier while satisfying differential privacy. If we don't specify any parameters, the model defaults to epsilon = 1.00.import diffprivlib.models as dp dp_clf = dp.GaussianNB() dp_clf.fit(X_train, y_train) print("Differentially private test accuracy (epsilon=%.2f): %.2f%%" % (dp_clf.epsilon, accuracy_score(y_test, dp_clf.predict(X_test)) * 100))Differentially private test accuracy (epsilon=1.00): 79.98%As we can see from the output accuracies above, the regular (non-private) Naïve Bayes classifier could produce an accuracy of 79.64%, while setting epsilon=1.00, the differentially private Naïve Bayes classifier could achieve an accuracy of 78.59%. If we use a smaller epsilon, it usually leads to better privacy protection while less accuracy. For instance, if we set epsilon=0.01:import diffprivlib.models as dp dp_clf = dp.GaussianNB(epsilon=float("0.01")) dp_clf.fit(X_train, y_train) print("Differentially private test accuracy (epsilon=%.2f): %.2f%%" % (dp_clf.epsilon, accuracy_score(y_test, dp_clf.predict(X_test)) * 100))Differentially private test accuracy (epsilon=0.01): 76.91%Basic descriptive statistcsUsing pandas and numpy, we can do basic statistics.import pandas as pd import numpy as np data_frame = pd.read_csv("../../data/short_table.csv", delimiter=',') # show it data_frameYou can _take_ a column out of the DataFrame. In this context it works like a Python dictionary.data_frame["area"]Even though this data structure appears more than just a vector, numpy is capable of applying basic descriptive statistics functions:np.mean(data_frame["area"]) np.min(data_frame["area"]) np.max(data_frame["area"])Individual cells of the DataFrame can be accessed like this:data_frame["area"][0]For loops can also iterate over table columns like this:for area_value in data_frame["area"]: print(area_value)45 23 68Reflect Tables into SQLAlchemy ORM# Python SQL toolkit and Object Relational Mapper import sqlalchemy from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine, func # create engine to hawaii.sqlite engine = create_engine("sqlite:///Resources/hawaii.sqlite") # reflect an existing database into a new model Base = automap_base() # reflect the tables Base.prepare(engine, reflect = True) # View all of the classes that automap found Base.classes.keys() # Save references to each table Measurement = Base.classes.measurement Station = Base.classes.station # Create our session (link) from Python to the DB session = Session(engine)Exploratory Precipitation Analysis# Find the most recent date in the data set. most_recent = session.query(Measurement.date).order_by(Measurement.date.desc()).first().date most_recent # Design a query to retrieve the last 12 months of precipitation data and plot the results. # Starting from the most recent data point in the database. # Calculate the date one year from the last date in data set year_before = dt.date(2017,8,23) - dt.timedelta(days = 365) # Perform a query to retrieve the data and precipitation scores precipitation = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date > year_before).\ order_by(Measurement.date).all() precipitation[0] # Save the query results as a Pandas DataFrame and set the index to the date column precipitation_df = pd.DataFrame(precipitation) # Clean up column names precipitation_df = precipitation_df.rename(columns={"prcp": "Precipitation", "date": "Date"}) # Sort the dataframe by date precipitation_df = precipitation_df.sort_values(by = "Date").dropna() precipitation_df # Use Pandas Plotting with Matplotlib to plot the data precipitation_df.plot("Date", "Precipitation", rot = 70, figsize = (11,6), legend = False) plt.title("Precipitation Analysis") plt.xlabel("Date") plt.ylabel("Precipitation (in inches)") plt.savefig("Resources/precipitation_analysis.png", dpi = 75, bbox_inches = "tight") plt.show() # Use Pandas to calcualte the summary statistics for the precipitation data precipitation_df.describe()Exploratory Station Analysis# Design a query to calculate the total number stations in the dataset Stations = session.query(Station).count() print(f"The total number of stations is {Stations}") # Design a query to find the most active stations (i.e. what stations have the most rows?) # List the stations and the counts in descending order. active_stations = (session.query(Measurement.station, func.count(Measurement.station)).\ group_by(Measurement.station).order_by(func.count(Measurement.station).desc()).all()) # Make the output look pretty for j, i in active_stations: print("The station " + j + " had " + str(i) + " observations" ) # Using the most active station id from the previous query, calculate the lowest, highest, and average temperature. most_active = active_stations[0][0] session.query(func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).\ filter(Measurement.station == most_active).all() # Using the most active station id # Query the last 12 months of temperature observation data for this station and plot the results as a histogram results = session.query(Measurement.date, Measurement.tobs).\ filter(Measurement.date >= "2016-08-24").\ filter(Measurement.date <= "2017-08-23").\ filter(Measurement.station == most_active).all() # convert to dataframe before plotting results results_df = pd.DataFrame(results) # histogram of plotted results results_df.plot.hist(by = "tobs", bins = 12, figsize = (11,6), legend = False) plt.title("Station Analysis") plt.xlabel("Temperature") plt.ylabel("Frequency") plt.savefig("Resources/station_analysis.png", dpi = 75, bbox_inches = "tight") plt.show()Close session# Close Session session.close()Introduction to Kaggle Titanic CompetitionIn this project, I will be looking at Kaggle Titanic Competition and submitting my piece of code. Kaggle is a site where people create algorithms and compete against machine learning practitioners around the world. Your algorithm wins if it's the most accurate on a particular data set. To begin the problem we have to think logically about the columns and what we're trying to predict. What variables might logically affect the outcome of survival? Reading more about the Titanic disaster might help you with this.We know that women and children were more likely to survive, so Age and Sex are probably good predictors. It's also logical to think that passenger class might affect the outcome, because first class cabins were closer to the deck of the ship. While fare is tied to passenger class and will probably have a strong correlation with it, it may also give us some useful information.Family size (the number of siblings and parents/children) will probably be correlated with survival one way or the other. That's because there would either be more people to help you, or more people to think about trying to save.There may be links between survival and columns like Ticket, Name, and Embarked (because people who boarded at certain ports may have had cabins closer or farther away from the top of the ship), .We call this step acquiring domain knowledge, and it's fairly important to most machine learning tasks. We're looking to engineer the features so that we maximize the information we have about what we're trying to predict. Examining the dataimport pandas as pd titanic = pd.read_csv("C:/Users/Jennifer/Documents/Python/Data/titanic_train.csv") # Print the first five rows of the dataframe print(titanic.head(5)) print(titanic.describe())PassengerId Survived Pclass \ 0 1 0 3 1 2 1 1 2 3 1 3 3 4 1 1 4 5 0 3 Name Sex Age SibSp \ 0 Braund, Mr. male 22.0 1 1 Cumings, Mrs. (... female 38.0 1 2 Heikkinen, female 26.0 0 3 Futrelle, Mrs. () female 35.0 1 4 Allen, Mr. male 35.0 0 Parch Ticket Fare Cabin Embarked 0 0 A/5 21171 7.2500 NaN S 1 0 PC 17599 71.2833 C85 C 2 0 STON/O2. 3101282 7.9250 NaN S 3 0 113803 53.1000 C123 S 4 0 373450 8.0500 NaN S [...]Cleaning missing dataThere are many ways to clean up missing data. One of the easiest is to fill in all of the missing values with the median of all the values in the column.titanic["Age"] = titanic["Age"].fillna(titanic["Age"].median())Handling non-numeric columnsSeveral of our columns are non-numeric, which is a problem when it comes time to make predictions. We can't feed non-numeric columns into a machine learning algorithm and expect it to make sense of them.We either have to exclude the non-numeric columns when we train our algorithm (Name, Sex, Cabin, Embarked, and Ticket), or find a way to convert them to numeric columns.We'll ignore the Ticket, Cabin, and Name columns because we can't extract much information from them. Most of the values in the cabin column are missing (there are only 204 values out of a total of 891 rows), and it probably isn't a particularly informative column anyway. The Ticket and Name columns are unlikely to tell us much without some domain knowledge about what the ticket numbers mean, and about which names correlate with characteristics like large or rich families. Converting the Sex Column to Numeric# Find all of the unique genders # The column appears to contain the values male and female only print(titanic["Sex"].unique()) # Replace all the occurences of male with the number 0 titanic.loc[titanic["Sex"] == "male", "Sex"] = 0 titanic.loc[titanic["Sex"] == "female", "Sex"] = 1['male' 'female']Repeating the same for the embarked column# Find all of the unique values for "Embarked" print(titanic["Embarked"].unique()) titanic["Embarked"] = titanic["Embarked"].fillna("S") titanic.loc[titanic["Embarked"] == "S", "Embarked"] = 0 titanic.loc[titanic["Embarked"] == "C", "Embarked"] = 1 titanic.loc[titanic["Embarked"] == "Q", "Embarked"] = 2['S' 'C' 'Q' nan]Making predictions using Scikit Learn# Import the linear regression class from sklearn.linear_model import LinearRegression # Sklearn also has a helper that makes it easy to do cross-validation from sklearn.cross_validation import KFold # The columns we'll use to predict the target predictors = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Embarked"] # Initialize our algorithm class alg = LinearRegression() # Generate cross-validation folds for the titanic data set # It returns the row indices corresponding to train and test # We set random_state to ensure we get the same splits every time we run this kf = KFold(titanic.shape[0], n_folds=3, random_state=1) predictions = [] for train, test in kf: # The predictors we're using to train the algorithm # Note how we only take the rows in the train folds train_predictors = (titanic[predictors].iloc[train,:]) # The target we're using to train the algorithm train_target = titanic["Survived"].iloc[train] # Training the algorithm using the predictors and target alg.fit(train_predictors, train_target) # We can now make predictions on the test fold test_predictions = alg.predict(titanic[predictors].iloc[test,:]) predictions.append(test_predictions)c:\users\jennifer\appdata\local\programs\python\python36-32\lib\site-packages\sklearn\cross_validation.py:44: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20. "This module will be removed in 0.20.", DeprecationWarning)Evaluating Prediction Errorsimport numpy as np # The predictions are in three separate NumPy arrays # Concatenate them into a single array, along the axis 0 (the only 1 axis) predictions = np.concatenate(predictions, axis=0) # Map predictions to outcomes (the only possible outcomes are 1 and 0) predictions[predictions > .5] = 1 predictions[predictions <=.5] = 0 accuracy = sum(predictions[predictions == titanic["Survived"]]) / len(predictions)Logistic RegressionWe have our first predictions! With only 78.3% accuracy, though, they aren't very good. Instead, we can use logistic regression to output values between 0 and 1.Logistic regression takes the output of a linear regression and maps it to a probability value between 0 and 1. It does the mapping using the logit function. Passing any value through the logit function will map it to a value between 0 and 1 by "squeezing" the extreme values. This is perfect for us, because we only care about two outcomes.from sklearn.linear_model import LogisticRegression from sklearn import cross_validation # Initialize our algorithm alg = LogisticRegression(random_state=1) # Compute the accuracy score for all the cross-validation folds; this is much simpler than what we did before scores = cross_validation.cross_val_score(alg, titanic[predictors], titanic["Survived"], cv=3) # Take the mean of the scores (because we have one for each fold) print(scores.mean())0.787878787879Processing the test data setOur accuracy is decent, but not great. We can still try a few things to make it better, and we'll talk about them in the next mission.For now, we'll focus on performing the exact same steps on the test data that we did on the training data. If we don't perform the exact same operations, then we won't be able to make valid predictions on it.titanic_test = pd.read_csv("C:/Users/Jennifer/Documents/Python/Data/titanic_test.csv") titanic_test["Age"] = titanic_test["Age"].fillna(titanic["Age"].median()) titanic_test["Fare"] = titanic_test["Fare"].fillna(titanic_test["Fare"].median()) titanic_test.loc[titanic_test["Sex"] == "male", "Sex"] = 0 titanic_test.loc[titanic_test["Sex"] == "female", "Sex"] = 1 titanic_test["Embarked"] = titanic_test["Embarked"].fillna("S") titanic_test.loc[titanic_test["Embarked"] == "S", "Embarked"] = 0 titanic_test.loc[titanic_test["Embarked"] == "C", "Embarked"] = 1 titanic_test.loc[titanic_test["Embarked"] == "Q", "Embarked"] = 2Generating the predictions on the test# Initialize the algorithm class alg = LogisticRegression(random_state=1) # Train the algorithm using all the training data alg.fit(titanic[predictors], titanic["Survived"]) # Make predictions using the test set predictions = alg.predict(titanic_test[predictors]) # Create a new dataframe with only the columns Kaggle wants from the data set submission = pd.DataFrame({ "PassengerId": titanic_test["PassengerId"], "Survived": predictions })Implementing Random Forest ModelIn the last code cell, we made our first submission to Titanic: Machine Learning from Disaster, a machine learning competition on Kaggle.Our submission wasn't very high-scoring, though. There are three main ways we can improve it:Use a better machine learning algorithm.Generate better features.Combine multiple machine learning algorithms.In this mission, I will do all three. First, we'll find a different algorithm to use, instead of logistic regression. This time, we'll use the random forests algorithm.from sklearn import cross_validation from sklearn.ensemble import RandomForestClassifier predictors = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Embarked"] # Initialize our algorithm with the default paramters # n_estimators is the number of trees we want to make # min_samples_split is the minimum number of rows we need to make a split # min_samples_leaf is the minimum number of samples we can have at the place where a tree branch ends (the bottom points of the tree) alg = RandomForestClassifier(random_state=1, n_estimators=10, min_samples_split=2, min_samples_leaf=1) # Compute the accuracy score for all of the cross validation folds; this is much simpler than what we did before kf = cross_validation.KFold(titanic.shape[0], n_folds=3, random_state=1) scores = cross_validation.cross_val_score(alg, titanic[predictors], titanic["Survived"], cv=kf) # Take the mean of the scores (because we have one for each fold) print(scores.mean())0.785634118967Tuning Parameters to improve accuracyThe first (and easiest) thing we can do to improve the accuracy of the random forest is to increase the number of trees we're using. Training more trees will take more time, but because we're averaging many predictions we made on different subsets of the data, having more trees will greatly increase accuracy (up to a point).We can also tweak the min_samples_split and min_samples_leaf variables to reduce overfitting. Because of the way a decision tree works, very deep splits in a tree can make it fit to quirks in the data set, rather than true signal.For this reason, increasing min_samples_split and min_samples_leaf can reduce overfitting. This will actually improve our score because we're making predictions on unseen data. A model that's less overfit and can generalize better will actually perform better on unseen data, but worse on seen data.alg = RandomForestClassifier(random_state=1, n_estimators=50, min_samples_split=4, min_samples_leaf=2) # Compute the accuracy score for all the cross-validation folds; this is much simpler than what we did before kf = cross_validation.KFold(titanic.shape[0], 3, random_state=1) scores = cross_validation.cross_val_score(alg, titanic[predictors], titanic["Survived"], cv=kf) # Take the mean of the scores (because we have one for each fold) print(scores.mean())0.81593714927Generating New Features with ApplyWe can also generate new features. Here are some ideas:- The length of the name. This could pertain to how rich the person was, and therefore their position on the Titanic.- The total number of people in a family (SibSp + Parch).# Generating a familysize column titanic["FamilySize"] = titanic["SibSp"] + titanic["Parch"] # The .apply method generates a new series titanic["NameLength"] = titanic["Name"].apply(lambda x: len(x))Extracting the Passenger Titles with a Regular ExpressionWe can extract the passengers' titles from their names. The titles take the form of Master., Mr., Mrs., etc. There are a few very common titles, and a "long tail" of titles that only one or two passengers have.import re # A function to get the title from a name def get_title(name): # Use a regular expression to search for a title # Titles always consist of capital and lowercase letters, and end with a period title_search = re.search(' ([A-Za-z]+)\.', name) # If the title exists, extract and return it if title_search: return title_search.group(1) return "" # Get all of the titles, and print how often each one occurs titles = titanic["Name"].apply(get_title) print(pd.value_counts(titles)) # Map each title to an integer # Some titles are very rare, so they're compressed into the same codes as other titles title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Dr": 5, "Rev": 6, "Major": 7, "Col": 7, "Mlle": 8, "Mme": 8, "Don": 9, "Lady": 10, "Countess": 10, "Jonkheer": 10, "Sir": 9, "Capt": 7, "Ms": 2} for k,v in title_mapping.items(): titles[titles == k] = v # Verify that we converted everything print(pd.value_counts(titles)) # Add in the title column titanic["Title"] = titlesMr 517 Miss 182 Mrs 125 Master 40 Dr 7 Rev 6 Mlle 2 Col 2 Major 2 Ms 1 Countess 1 Capt 1 Sir 1 Lady 1 Jonkheer 1 Mme 1 Don 1 Name: Name, dtype: int64 1 517 2 183 3 125 4 40 5 7 6 6 7 5 10 3 8 3 9 2 Name: Name, dtype: int64Generating a feature for Family GroupsWe can also generate a feature that indicates which family passengers belong to. Because survival was probably very dependent on your family and the people around you, this has a good chance of being a helpful feature.To create this feature, we'll concatenate each passenger's last name with FamilySize to get a unique family ID. Then we'll be able to assign a code to each person based on their family ID.import operator # A dictionary mapping family name to ID family_id_mapping = {} # A function to get the ID for a particular row def get_family_id(row): # Find the last name by splitting on a comma last_name = row["Name"].split(",")[0] # Create the family ID family_id = "{0}{1}".format(last_name, row["FamilySize"]) # Look up the ID in the mapping if family_id not in family_id_mapping: if len(family_id_mapping) == 0: current_id = 1 else: # Get the maximum ID from the mapping, and add 1 to it if we don't have an ID current_id = (max(family_id_mapping.items(), key=operator.itemgetter(1))[1] + 1) family_id_mapping[family_id] = current_id return family_id_mapping[family_id] # Get the family IDs with the apply method family_ids = titanic.apply(get_family_id, axis=1) # There are a lot of family IDs, so we'll compress all of the families with less than three members into one code family_ids[titanic["FamilySize"] < 3] = -1 # Print the count of each unique ID print(pd.value_counts(family_ids)) titanic["FamilyId"] = family_ids-1 800 14 8 149 7 63 6 50 6 59 6 17 5 384 4 27 4 25 4 162 4 8 4 84 4 340 4 43 3 269 3 58 3 633 2 167 2 280 2 510 2 90 2 83 1 625 1 376 1 449 1 498 1 588 1 dtype: int64Identifing the best features to useFeature engineering is the most important part of any machine learning task, and there are a lot more features we could calculate. However, we also need a way to figure out which features are the best.One way to accomplish this is to use univariate feature selection. This approach essentially involves reviewing a data set column by column to identify the ones that correlate most closely with what we're trying to predict (Survived).As usual, sklearn has a function that will help us with feature selection. The SelectKBest function selects the best features from the data. We can specify how many features we want this function to select.import numpy as np import matplotlib.pyplot as plt from sklearn.feature_selection import SelectKBest, f_classif predictors = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Embarked", "FamilySize", "Title", "FamilyId", "NameLength"] # Perform feature selection selector = SelectKBest(f_classif, k=5) selector.fit(titanic[predictors], titanic["Survived"]) # Get the raw p-values for each feature, and transform them from p-values into scores scores = -np.log10(selector.pvalues_) # Plot the scores # Do you see how "Pclass", "Sex", "Title", and "Fare" are the best features? plt.bar(range(len(predictors)), scores) plt.xticks(range(len(predictors)), predictors, rotation='vertical') plt.show() # Pick only the four best features predictors = ["Pclass", "Sex", "Fare", "Title"] alg = RandomForestClassifier(random_state=1, n_estimators=50, min_samples_split=8, min_samples_leaf=4) # Compute the accuracy score for all the cross-validation folds; this is much simpler than what we did before scores = cross_validation.cross_val_score(alg, titanic[predictors], titanic["Survived"], cv=3) # Take the mean of the scores (because we have one for each fold) print(scores.mean())Making predictions with multiple classifiersOne thing we can do to improve the accuracy of our predictions is ensemble different classifiers. Ensembling means generating predictions based on information from a set of classifiers, instead of just one. In practice, this means that we average their predictions.Generally speaking, the more diverse the models we ensemble, the higher our accuracy will be. Diversity means that the models generate their results from different columns, or use very different methods to generate predictions. Ensembling a random forest classifier with a decision tree probably won't work extremely well, because they're very similar. On the other hand, ensembling a linear regression with a random forest can yield very good results.One caveat with ensembling is that the classifiers we use have to be about the same in terms of accuracy. Ensembling one classifier that's much less accurate than the other will probably make the final result worse.In this case, we'll ensemble logistic regression we trained on the most linear predictors (the ones that have a linear order, as well as some correlation to Survived) with a gradient-boosted tree we trained on all of the predictors.from sklearn.ensemble import GradientBoostingClassifier import numpy as np # The algorithms we want to ensemble # We're using the more linear predictors for the logistic regression, and everything with the gradient boosting classifier algorithms = [ [GradientBoostingClassifier(random_state=1, n_estimators=25, max_depth=3), ["Pclass", "Sex", "Age", "Fare", "Embarked", "FamilySize", "Title", "FamilyId"]], [LogisticRegression(random_state=1), ["Pclass", "Sex", "Fare", "FamilySize", "Title", "Age", "Embarked"]] ] # Initialize the cross-validation folds kf = KFold(titanic.shape[0], n_folds=3, random_state=1) predictions = [] for train, test in kf: train_target = titanic["Survived"].iloc[train] full_test_predictions = [] # Make predictions for each algorithm on each fold for alg, predictors in algorithms: # Fit the algorithm on the training data alg.fit(titanic[predictors].iloc[train,:], train_target) # Select and predict on the test fold # We need to use .astype(float) to convert the dataframe to all floats and avoid an sklearn error test_predictions = alg.predict_proba(titanic[predictors].iloc[test,:].astype(float))[:,1] full_test_predictions.append(test_predictions) # Use a simple ensembling scheme—just average the predictions to get the final classification test_predictions = (full_test_predictions[0] + full_test_predictions[1]) / 2 # Any value over .5 is assumed to be a 1 prediction, and below .5 is a 0 prediction test_predictions[test_predictions <= .5] = 0 test_predictions[test_predictions > .5] = 1 predictions.append(test_predictions) # Put all the predictions together into one array predictions = np.concatenate(predictions, axis=0) # Compute accuracy by comparing to the training data accuracy = sum(predictions[predictions == titanic["Survived"]]) / len(predictions) print(accuracy)0.278338945006Making changes to the test sets# First, we'll add titles to the test set titles = titanic_test["Name"].apply(get_title) # We're adding the Dona title to the mapping, because it's in the test set, but not the training set title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Dr": 5, "Rev": 6, "Major": 7, "Col": 7, "Mlle": 8, "Mme": 8, "Don": 9, "Lady": 10, "Countess": 10, "Jonkheer": 10, "Sir": 9, "Capt": 7, "Ms": 2, "Dona": 10} for k,v in title_mapping.items(): titles[titles == k] = v titanic_test["Title"] = titles # Check the counts of each unique title print(pd.value_counts(titanic_test["Title"])) # Now we add the family size column titanic_test["FamilySize"] = titanic_test["SibSp"] + titanic_test["Parch"] # Now we can add family IDs # We'll use the same IDs we used earlier print(family_id_mapping) family_ids = titanic_test.apply(get_family_id, axis=1) family_ids[titanic_test["FamilySize"] < 3] = -1 titanic_test["FamilyId"] = family_ids1 240 2 79 3 72 4 21 7 2 6 2 10 1 5 1 Name: Title, dtype: int64 {'Braund1': 1, 'Cumings1': 2, 'Heikkinen0': 3, 'Futrelle1': 4, 'Allen0': 5, 'Moran0': 6, 'McCarthy0': 7, 'Palsson4': 8, 'Johnson2': 9, 'Nasser1': 10, 'Sandstrom2': 11, 'Bonnell0': 12, 'Saundercock0': 13, 'Andersson6': 14, 'Vestrom0': 15, 'Hewlett0': 16, 'Rice5': 17, 'Williams0': 18, 'Vander Planke1': 19, 'Masselmani0': 20, 'Fynney0': 21, 'Beesley0': 22, 'McGowan0': 23, 'Sloper0': 24, 'Asplund6': 25, 'Emir0': 26, 'Fortune5': 27, "O'Dwyer0": 28, 'Todoroff0': 29, 'Uruchurtu0': 30, 'Spencer1': 31, 'Glynn0': 32, 'Wheadon0': 33, 'Meyer1': 34, 'Holverson1': 35, 'Mamee0': 36, 'Cann0': 37, '': 38, 'Nicola-Yarred1': 39, 'Ahlin1': 40, 'Turpin1': 41, 'Kraeff0': 42, 'Laroche3': 43, 'Devaney0': 44, 'Rogers0': 45, 'Lennon1': 46, "O'Driscoll0": 47, 'Samaan2': 48, 'Arnold-Franchi1': 49, 'Panula5': 50, 'Nosworthy0': 51, 'Harper1': 52, 'Faunthorpe1': 53, 'Ostby1': 54, 'Woolner0': 55, 'R[...]Predicting on the test setpredictors = ["Pclass", "Sex", "Age", "Fare", "Embarked", "FamilySize", "Title", "FamilyId"] algorithms = [ [GradientBoostingClassifier(random_state=1, n_estimators=25, max_depth=3), predictors], [LogisticRegression(random_state=1), ["Pclass", "Sex", "Fare", "FamilySize", "Title", "Age", "Embarked"]] ] full_predictions = [] for alg, predictors in algorithms: # Fit the algorithm using the full training data. alg.fit(titanic[predictors], titanic["Survived"]) # Predict using the test dataset. We have to convert all the columns to floats to avoid an error predictions = alg.predict_proba(titanic_test[predictors].astype(float))[:,1] full_predictions.append(predictions) # The gradient boosting classifier generates better predictions, so we weight it higher predictions = (full_predictions[0] * 3 + full_predictions[1]) / 4Generate cost datainput_path = '../input_static/level_of_service/'PT ticket pricesThe German rail operator provides a distance-related ticket price list as pdf that can be translated into a functioninput_file = input_path + 'DB_2020_preisliste_fahrplanjahr_2020.pdf' # generate a DataFrame db_prices = tabula.read_pdf(input_file, output_format='dataframe', pages=list(range(5, 41)), pandas_options={ 'names': ['km', 'single_trip_second_class', 'single_trip_first_class', 'return_trip_second_class', 'return_trip_first_class'] }, multiple_tables=False)[0] db_prices.dropna(axis=0, how='any', inplace=True) db_prices.dtypes db_prices for col in list(db_prices.columns): db_prices[col] = db_prices[col].str.replace(',', '.') db_prices = db_prices[[col for col in list(db_prices.columns)]].astype(float) # plot db_prices.set_index('km').plot() # save db_prices.to_csv(input_file.replace('.pdf', '.csv'), index=False) # Use second class single trip prices # Choose a fit for a distance range up to 60 km (longer links are rare) z = np.polyfit(c.loc[0:30, 'km'], c.loc[0:30, 'single_trip_second_class'], deg=1) p = np.poly1d(z) plt.plot(c.loc[0:30, 'km'], p(c.loc[0:30, 'km']), linestyle='--', marker=' ', label='Regression') plt.plot(c.loc[0:30, 'km'], c.loc[0:30, 'single_trip_second_class'], linestyle='--', marker=' ', label='2nd class') plt.legend() rail_short_intercept = np.round(z[1], 3) print('y={}*x+{}'.format(np.round(z[0], 3), rail_short_intercept))MIT costThe German car driver association (ADAC) provides a list of cost for every new car available in Germany. They calculate aquisition cost, variable cost, fix cost, and repair cost.input_file = input_path + 'ADAC_2019_autokostenuebersicht_47085.pdf' # generate a DataFrame cars = tabula.read_pdf(input_file, output_format='dataframe', pages=list(range(3, 38)), pandas_options={ 'names': ['model', 'data', 'costs'] }, multiple_tables=False)[0] cars.sample(5) # cleansing cars.reset_index(inplace=True) cars.rename(columns={'model': 'power', 'index': 'model'}, inplace=True) cars.dropna(axis=0, how='all', inplace=True) cars.dropna(axis=0, how='any', thresh=3, inplace=True) cars = cars.loc[cars['model']!='NaN'] cars.dropna(axis=0, subset=['model'], inplace=True) cars = cars.loc[cars['model']!='Marke / Modell:'] cars['costs'] = cars['costs'].str.replace(',', '.') cars['costs'] = cars['costs'].str.replace('*', '') elements = ['fix_cost', 'repair_cost', 'var_cost', 'acquisition_cost', 'eur_per_month', 'ct_per_km'] cars[elements] = cars['costs'].str.split(' ', expand=True) # Save elements.append('model') cars = cars[elements] cars.to_csv(input_file.replace('.pdf', '.csv'), index=False)Take only perceived variable cost for distance-dependency.Assume monthly mileage of 15,000km / 12Additionally, there are fix cost which add up on every trip. Assume 3 trips per day as average.vc_car = cars['var_cost'].astype(int).mean() / (15000000/12) # in EUR/m vc_car fix_car = cars['fix_cost'].astype(int).mean() / 30 / 3 fix_carInference for the parameters of a 1d Gaussian using a non-conjugate priorWe illustrate various inference methods using the example in sec 4.3 ("Gaussian model of height") of [Statistical Rethinking ed 2](https://xcelab.net/rm/statistical-rethinking/). This requires computing $p(\mu,\sigma|D)$ using a Gaussian likelihood but a non-conjugate prior.The numpyro code is from [Du Phan's site](https://fehiepsi.github.io/rethinking-numpyro/04-geocentric-models.html).import numpy as np np.set_printoptions(precision=3) import matplotlib.pyplot as plt import math import os import warnings import pandas as pd #from scipy.interpolate import BSpline #from scipy.stats import gaussian_kde !mkdir figures !pip install -q numpyro@git+https://github.com/pyro-ppl/numpyro import jax print("jax version {}".format(jax.__version__)) print("jax backend {}".format(jax.lib.xla_bridge.get_backend().platform)) import jax.numpy as jnp from jax import random, vmap rng_key = random.PRNGKey(0) rng_key, rng_key_ = random.split(rng_key) import numpyro import numpyro.distributions as dist from numpyro.distributions import constraints from numpyro.distributions.transforms import AffineTransform from numpyro.diagnostics import hpdi, print_summary from numpyro.infer import Predictive from numpyro.infer import MCMC, NUTS from numpyro.infer import SVI, Trace_ELBO, init_to_value from numpyro.infer.autoguide import AutoLaplaceApproximation import numpyro.optim as optim !pip install arviz import arviz as azRequirement already satisfied: arviz in /usr/local/lib/python3.7/dist-packages (0.11.2) Requirement already satisfied: setuptools>=38.4 in /usr/local/lib/python3.7/dist-packages (from arviz) (56.0.0) Requirement already satisfied: typing-extensions<4,>=3.7.4.3 in /usr/local/lib/python3.7/dist-packages (from arviz) (3.7.4.3) Requirement already satisfied: pandas>=0.23 in /usr/local/lib/python3.7/dist-packages (from arviz) (1.1.5) Requirement already satisfied: netcdf4 in /usr/local/lib/python3.7/dist-packages (from arviz) (1.5.6) Requirement already satisfied: scipy>=0.19 in /usr/local/lib/python3.7/dist-packages (from arviz) (1.4.1) Requirement already satisfied: packaging in /usr/local/lib/python3.7/dist-packages (from arviz) (20.9) Requirement already satisfied: matplotlib>=3.0 in /usr/local/lib/python3.7/dist-packages (from arviz) (3.2.2) Requirement already satisfied: xarray>=0.16.1 in /usr/local/lib/python3.7/dist-packages (from arviz) (0.17.0) Requirement already satisfied: numpy[...]DataWe use the "Howell" dataset, which consists of measurements of height, weight, age and sex, of a certain foraging tribe, collected by .#url = 'https://github.com/fehiepsi/rethinking-numpyro/tree/master/data/Howell1.csv?raw=True' url = 'https://raw.githubusercontent.com/fehiepsi/rethinking-numpyro/master/data/Howell1.csv' Howell1 = pd.read_csv(url, sep=';') d = Howell1 d.info() d.head() # get data for adults d2 = d[d.age >= 18] N = len(d2) ndx = jax.random.permutation(rng_key, N) data = d2.height.values[ndx] N = 20 # take a subset of the 354 samples data = data[:N]Empirical mean and std.print(len(data)) print(np.mean(data)) print(np.std(data))20 154.16326000000004 7.459859122289108ModelWe use the following model for the heights (in cm):$$\begin{align}h_i &\sim N(\mu,\sigma) \\\mu &\sim N(178, 20) \\\sigma &\sim U(0,50)\end{align}$$The prior for $\mu$ has a mean 178cm, since that is the height of , the author of the "Statisical Rethinking" book.The standard deviation is 20, so that 90\% of people lie in the range 138--218.The prior for $\sigma$ has a lower bound of 0 (since it must be positive), and an upper bound of 50, so that the interval $[\mu-\sigma, \mu+\sigma]$ has width 100cm, which seems sufficiently large to capture human heights.Note that this is not a conjugate prior, so we will just approximate the posterior.But since there are just 2 unknowns, this will be easy. Grid posteriormu_prior = dist.Normal(178, 20) sigma_prior = dist.Uniform(0, 50) mu_range = [150, 160] sigma_range = [4, 14] ngrid = 100 plot_square = False mu_list = jnp.linspace(start=mu_range[0], stop=mu_range[1], num=ngrid) sigma_list = jnp.linspace(start=sigma_range[0], stop=sigma_range[1], num=ngrid) mesh = jnp.meshgrid(mu_list, sigma_list) print([mesh[0].shape, mesh[1].shape]) print(mesh[0].reshape(-1).shape) post = {"mu": mesh[0].reshape(-1), "sigma": mesh[1].reshape(-1)} post["LL"] = vmap( lambda mu, sigma: jnp.sum(dist.Normal(mu, sigma).log_prob(data)) )(post["mu"], post["sigma"]) logprob_mu = mu_prior.log_prob(post["mu"]) logprob_sigma = sigma_prior.log_prob(post["sigma"]) post["prob"] = post["LL"] + logprob_mu + logprob_sigma post["prob"] = jnp.exp(post["prob"] - jnp.max(post["prob"])) prob = post["prob"] / jnp.sum(post["prob"]) # normalize over the grid prob2d = prob.reshape(ngrid, ngrid) prob_mu = jnp.sum(prob2d, axis=0) prob_sigma = jnp.sum(prob2d, axis=1) plt.figure() plt.plot(mu_list, prob_mu, label='mu') plt.legend() plt.savefig('figures/gauss_params_1d_post_grid_marginal_mu.pdf', dpi=300) plt.show() plt.figure() plt.plot(sigma_list, prob_sigma, label='sigma') plt.legend() plt.savefig('figures/gauss_params_1d_post_grid_marginal_sigma.pdf', dpi=300) plt.show() plt.contour( post["mu"].reshape(ngrid, ngrid), post["sigma"].reshape(ngrid, ngrid), post["prob"].reshape(ngrid, ngrid), ) plt.xlabel(r'$\mu$') plt.ylabel(r'$\sigma$') if plot_square: plt.axis('square') plt.savefig('figures/gauss_params_1d_post_grid_contours.pdf', dpi=300) plt.show() plt.imshow( post["prob"].reshape(ngrid, ngrid), origin="lower", extent=(mu_range[0], mu_range[1], sigma_range[0], sigma_range[1]), aspect="auto", ) plt.xlabel(r'$\mu$') plt.ylabel(r'$\sigma$') if plot_square: plt.axis('square') plt.savefig('figures/gauss_params_1d_post_grid_heatmap.pdf', dpi=300) plt.show()Posterior samples.nsamples = 5000 #int(1e4) sample_rows = dist.Categorical(probs=prob).sample(random.PRNGKey(0), (nsamples,)) sample_mu = post["mu"][sample_rows] sample_sigma = post["sigma"][sample_rows] samples = {'mu': sample_mu, 'sigma': sample_sigma} print_summary(samples, 0.95, False) plt.scatter(samples['mu'], samples['sigma'], s=64, alpha=0.1, edgecolor="none") plt.xlim(mu_range[0], mu_range[1]) plt.ylim(sigma_range[0], sigma_range[1]) plt.xlabel(r'$\mu$') plt.ylabel(r'$\sigma$') plt.axis('square') plt.show() az.plot_kde(samples['mu'], samples['sigma']); plt.xlim(mu_range[0], mu_range[1]) plt.ylim(sigma_range[0], sigma_range[1]) plt.xlabel(r'$\mu$') plt.ylabel(r'$\sigma$') if plot_square: plt.axis('square') plt.savefig('figures/gauss_params_1d_post_grid.pdf', dpi=300) plt.show()mean std median 2.5% 97.5% n_eff r_hat mu 154.39 1.75 154.34 150.91 157.78 4505.97 1.00 sigma 8.18 1.41 8.04 5.72 10.97 5065.49 1.00posterior marginals.print(hpdi(samples['mu'], 0.95)) print(hpdi(samples['sigma'], 0.95)) fig, ax = plt.subplots() az.plot_kde(samples['mu'], ax=ax, label=r'$\mu$') fig, ax = plt.subplots() az.plot_kde(samples['sigma'], ax=ax, label=r'$\sigma$')[150.909 157.778] [ 5.717 10.97 ]Laplace approximationSee [the documentation](http://num.pyro.ai/en/stable/autoguide.htmlautolaplaceapproximation) Optimizationdef model(data): mu = numpyro.sample("mu", mu_prior) sigma = numpyro.sample("sigma", sigma_prior) numpyro.sample("height", dist.Normal(mu, sigma), obs=data) guide = AutoLaplaceApproximation(model) svi = SVI(model, guide, optim.Adam(1), Trace_ELBO(), data=data) svi_result = svi.run(random.PRNGKey(0), 2000) plt.figure() plt.plot(svi_result.losses) start = {"mu": data.mean(), "sigma": data.std()} guide = AutoLaplaceApproximation(model, init_loc_fn=init_to_value(values=start)) svi = SVI(model, guide, optim.Adam(0.1), Trace_ELBO(), data=data) svi_result = svi.run(random.PRNGKey(0), 2000) plt.figure() plt.plot(svi_result.losses)100%|██████████| 2000/2000 [00:01<00:00, 1128.19it/s, init loss: 75.2585, avg. loss [1901-2000]: 75.2447]Posterior samples.samples = guide.sample_posterior(random.PRNGKey(1), svi_result.params, (nsamples,)) print_summary(samples, 0.95, False) plt.scatter(samples['mu'], samples['sigma'], s=64, alpha=0.1, edgecolor="none") plt.xlim(mu_range[0], mu_range[1]) plt.ylim(sigma_range[0], sigma_range[1]) plt.xlabel(r'$\mu$') plt.ylabel(r'$\sigma$') plt.show() az.plot_kde(samples['mu'], samples['sigma']); plt.xlim(mu_range[0], mu_range[1]) plt.ylim(sigma_range[0], sigma_range[1]) plt.xlabel(r'$\mu$') plt.ylabel(r'$\sigma$') if plot_square: plt.axis('square') plt.savefig('figures/gauss_params_1d_post_laplace.pdf', dpi=300) plt.show() print(hpdi(samples['mu'], 0.95)) print(hpdi(samples['sigma'], 0.95)) fig, ax = plt.subplots() az.plot_kde(samples['mu'], ax=ax, label=r'$\mu$') fig, ax = plt.subplots() az.plot_kde(samples['sigma'], ax=ax, label=r'$\sigma$')[151.06 157.569] [ 5.446 10.217]Extract 2d joint posterior The Gaussian approximation is over transformed parameters.post = guide.get_posterior(svi_result.params) print(post.mean) print(post.covariance_matrix) def logit(p): return jnp.log(p/(1-p)) def sigmoid(a): return 1/(1+jnp.exp(-a)) scale=50; print(logit(7.7/scale)); print(sigmoid(-1.7)*scale) unconstrained_samples = post.sample(rng_key, sample_shape=(nsamples,)) constrained_samples = guide._unpack_and_constrain(unconstrained_samples, svi_result.params) print(unconstrained_samples.shape) print(jnp.mean(unconstrained_samples, axis=0)) print(jnp.mean(constrained_samples['mu'], axis=0)) print(jnp.mean(constrained_samples['sigma'], axis=0))(5000, 2) [154.326 -1.724] 154.32643 7.6484103We can sample from the posterior, which return results in the original parameterization.samples = guide.sample_posterior(random.PRNGKey(1), params, (nsamples,)) x = jnp.stack(list(samples.values()), axis=0) print(x.shape) print('mean of ssamples\n', jnp.mean(x, axis=1)) vcov = jnp.cov(x) print('cov of samples\n', vcov) # variance-covariance matrix # correlation matrix R = vcov / jnp.sqrt(jnp.outer(jnp.diagonal(vcov), jnp.diagonal(vcov))) print('corr of samples\n', R)(2, 5000) mean of ssamples [154.324 7.702] cov of samples [[2.839 0.051] [0.051 1.56 ]] corr of samples [[1. 0.024] [0.024 1. ]]Variational inferenceWe use$q(\mu,\sigma) = N(\mu|m,s) Ga(\sigma|a,b)$def guide(data): data_mean = jnp.mean(data) data_std = jnp.std(data) m = numpyro.param("m", data_mean) s = numpyro.param("s", 10, constraint=constraints.positive) a = numpyro.param("a", data_std, constraint=constraints.positive) b = numpyro.param("b", 1, constraint=constraints.positive) mu = numpyro.sample("mu", dist.Normal(m, s)) sigma = numpyro.sample("sigma", dist.Gamma(a, b)) optimizer = numpyro.optim.Momentum(step_size=0.001, mass=0.1) svi = SVI(model, guide, optimizer, loss=Trace_ELBO()) nsteps = 2000 svi_result = svi.run(rng_key_, nsteps, data=data) print(svi_result.params) print(svi_result.losses.shape) plt.plot(svi_result.losses) plt.title("ELBO") plt.xlabel("step") plt.ylabel("loss");100%|██████████| 2000/2000 [00:03<00:00, 518.24it/s, init loss: 75.1782, avg. loss [1901-2000]: 74.7501]Extract Variational parameters.print(svi_result.params) a = np.array(svi_result.params['a']) b = np.array(svi_result.params['b']) m = np.array(svi_result.params['m']) s = np.array(svi_result.params['s']) print('empirical mean', jnp.mean(data)) print('empirical std', jnp.std(data)) print(r'posterior mean and std of $\mu$') post_mean = dist.Normal(m, s) print([post_mean.mean, jnp.sqrt(post_mean.variance)]) print(r'posterior mean and std of unconstrained $\sigma$') post_sigma = dist.Gamma(a,b) print([post_sigma.mean, jnp.sqrt(post_sigma.variance)])empirical mean 154.16325 empirical std 7.459859 posterior mean and std of $\mu$ [array(154.246, dtype=float32), DeviceArray(1.785, dtype=float32)] posterior mean and std of unconstrained $\sigma$ [9.165675, DeviceArray(1.937, dtype=float32)]Posterior samplespredictive = Predictive(guide, params=svi_result.params, num_samples=nsamples) samples = predictive(rng_key, data) print_summary(samples, 0.95, False) plt.scatter(samples['mu'], samples['sigma'], s=64, alpha=0.1, edgecolor="none") plt.xlim(mu_range[0], mu_range[1]) plt.ylim(sigma_range[0], sigma_range[1]) plt.xlabel(r'$\mu$') plt.ylabel(r'$\sigma$') plt.show() az.plot_kde(samples['mu'], samples['sigma']); plt.xlim(mu_range[0], mu_range[1]) plt.ylim(sigma_range[0], sigma_range[1]) plt.xlabel(r'$\mu$') plt.ylabel(r'$\sigma$') if plot_square: plt.axis('square') plt.savefig('figures/gauss_params_1d_post_vi.pdf', dpi=300) plt.show() print(hpdi(samples['mu'], 0.95)) print(hpdi(samples['sigma'], 0.95)) fig, ax = plt.subplots() az.plot_kde(samples['mu'], ax=ax, label=r'$\mu$') fig, ax = plt.subplots() az.plot_kde(samples['sigma'], ax=ax, label=r'$\sigma$')[150.846 157.881] [ 5.559 12.877]MCMCconditioned_model = numpyro.handlers.condition(model, {'data': data}) nuts_kernel = NUTS(conditioned_model) mcmc = MCMC(nuts_kernel, num_warmup=100, num_samples=nsamples) mcmc.run(rng_key_, data) mcmc.print_summary() samples = mcmc.get_samples() print_summary(samples, 0.95, False) plt.scatter(samples['mu'], samples['sigma'], s=64, alpha=0.1, edgecolor="none") plt.xlim(mu_range[0], mu_range[1]) plt.ylim(sigma_range[0], sigma_range[1]) plt.xlabel(r'$\mu$') plt.ylabel(r'$\sigma$') plt.show() az.plot_kde(samples['mu'], samples['sigma']); plt.xlim(mu_range[0], mu_range[1]) plt.ylim(sigma_range[0], sigma_range[1]) plt.xlabel(r'$\mu$') plt.ylabel(r'$\sigma$') if plot_square: plt.axis('square') plt.savefig('figures/gauss_params_1d_post_mcmc.pdf', dpi=300) plt.show()mean std median 2.5% 97.5% n_eff r_hat mu 154.34 1.86 154.33 150.70 158.08 3652.68 1.00 sigma 8.24 1.51 8.05 5.67 11.04 2806.00 1.00Since the classes are highly imbalanced we have to perform some sampling methods to balance the data so that we could get the best result from our model.leg_df = df[df.Class == 0] fraud_df = df[df.Class == 1] no_of_samples = round(leg_df.shape[0] * 0.05) no_of_samples from imblearn.over_sampling import RandomOverSampler from sklearn.utils import resample leg_df_2 = resample(leg_df, n_samples=no_of_samples, random_state=15) # leg_df_2.describe() df_sampled = pd.concat([leg_df_2,fraud_df],axis=0) x_sampled = df_sampled.drop('Class', axis=1) y_sampled = df_sampled.Class ros = RandomOverSampler(random_state=42) x,y = ros.fit_resample(x_sampled,y_sampled) y.value_counts() from sklearn.model_selection import train_test_split x_train,x_test,y_train,y_test=train_test_split(x,y,shuffle=True,test_size=0.2) from sklearn.linear_model import LogisticRegression lr=LogisticRegression(max_iter=100,random_state=10) lr.fit(x_train,y_train) y_pred=lr.predict(x_test) lr.score(x_test,y_test) from sklearn.metrics import confusion_matrix,f1_score,recall_score,precision_score sns.heatmap(confusion_matrix(y_test,y_pred),annot=True) from sklearn.metrics import f1_score,recall_score,precision_score def print_scores(y_test,y_pred): print(f'The precision score is {precision_score(y_test,y_pred)}') print(f'The recall score is {recall_score(y_test,y_pred)}') print(f'The f1 score is {f1_score(y_test,y_pred)}') print_scores(y_test,y_pred) from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from xgboost import XGBClassifier from sklearn.model_selection import GridSearchCV xgb=XGBClassifier() params={'eta':[0.1,0.01], 'max_depth':[1,3,4], 'max_leaf_nodes':[10,20,30], 'objective':['binary:logistic']} clf=GridSearchCV(xgb,params) clf.fit(x_train,y_train) clf.best_params_ clf.best_score_ clf1=XGBClassifier(4,0.1,max_leaf_nodes=10,objective='binary:logistic') clf1.fit(x_train,y_train) y_pred1=clf1.predict(x_test) print(f'F1 score {f1_score(y_test,y_pred1)}') print(f'Precision {precision_score(y_test,y_pred1)}') print(f'Recall {recall_score(y_test,y_pred1)}') sns.heatmap(confusion_matrix(y_test,y_pred1),annot=True)Self-Driving Car Engineer Nanodegree Project: **Finding Lane Lines on the Road** ***In this project, you will use the tools you learned about in the lesson to identify lane lines on the road. You can develop your pipeline on a series of individual images, and later apply the result to a video stream (really just a series of images). Check out the video clip "raw-lines-example.mp4" (also contained in this repository) to see what the output should look like after using the helper functions below. Once you have a result that looks roughly like "raw-lines-example.mp4", you'll need to get creative and try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4". Ultimately, you would like to draw just one line for the left side of the lane, and one for the right.In addition to implementing code, there is a brief writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) that can be used to guide the writing process. Completing both the code in the Ipython notebook and the writeup template will cover all of the [rubric points](https://review.udacity.com/!/rubrics/322/view) for this project.---Let's have a look at our first image called 'test_images/solidWhiteRight.jpg'. Run the 2 cells below (hit Shift-Enter or the "play" button above) to display the image.**Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the "Kernel" menu above and selecting "Restart & Clear Output".**--- **The tools you have are color selection, region of interest selection, grayscaling, Gaussian smoothing, Canny Edge Detection and Hough Tranform line detection. You are also free to explore and try other techniques that were not presented in the lesson. Your goal is piece together a pipeline to detect the line segments in the image, then average/extrapolate them and draw them onto the image for display (as below). Once you have a working pipeline, try it out on the video stream below.**--- Your output should look something like this (above) after detecting line segments using the helper functions below Your goal is to connect/average/extrapolate line segments to get output like this **Run the cell below to import some packages. If you get an `import error` for a package you've already installed, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.** Import Packages#importing some useful packages import matplotlib.pyplot as plt import matplotlib.image as mpimg import numpy as np import cv2 %matplotlib inlineRead in an Image#reading in an image image = mpimg.imread('test_images/solidWhiteRight.jpg') #printing out some stats and plotting print('This image is:', type(image), 'with dimensions:', image.shape) plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')Ideas for Lane Detection Pipeline **Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**`cv2.inRange()` for color selection `cv2.fillPoly()` for regions selection `cv2.line()` to draw lines on an image given endpoints `cv2.addWeighted()` to coadd / overlay two images`cv2.cvtColor()` to grayscale or change color`cv2.imwrite()` to output images to file `cv2.bitwise_and()` to apply a mask to an image**Check out the OpenCV documentation to learn about these and discover even more awesome functionality!** Helper Functions Below are some helper functions to help get you started. They should look familiar from the lesson!import math import datetime as dt def show_lines(lines, msg): """ A simple utility function to show the list of lines where each line is represented 4 element list that encodes x1, y1, x2, y2. a line """ print("show_lines: " + msg) for line in lines: print("\tline = " + str(line)) def discard_lines_with_unlikely_slopes(lines, min_slope): """ discard lines with slopes less than min_slope or infinite """ left = [] right = [] for line in lines: x1, y1, x2, y2 = line if (x2 == x1): continue # vertical, no slope for you m = (y2 - y1) / (x2 - x1) if math.fabs(m) < min_slope: #print("reject: m = %f, (%d, %d, %d, %d)" % (m, x1, y1, x2, y2)) continue if m > 0: left.append(line) else: right.append(line) return left, right def line_parms(lines, y_size): """ input: a list of lines(4 elt lists) output: average slope, average y intercep, min_y, max_y """ m_avg = b_avg = 0 y_min = y_size total_len = 0 y_max = 0 for line in lines: x1, y1, x2, y2 = line y_min = min(y_min, y1, y2) y_max = max(y_max, y1, y2) len = np.sqrt(np.square(x2 - x1) + np.square(y2 - y1)) total_len = total_len + len m = (y2 - y1) / (x2 - x1) m_avg = m_avg + m * len b = y1 - m * x1 b_avg = b_avg + b * len m_avg = m_avg / total_len b_avg = b_avg / total_len return m_avg, b_avg, y_min, y_max def avg_line(m, b, y_min, y_max): """ input: return values from line parms output: the 4 points defined by y_min, y_max and the associated x coord """ x1 = (y_min - b) / m x1 = int(x1) y1 = y_min x2 = int((y_max - b) / m) y2 = y_max return [x1, y1, x2, y2] def flip_y_ULO_LLO(lines, y_size): """ It seemed easier to reason about things in this excercise to convert the output of hough lines which uses cv2's upper left origin to the more familiar lower left origin (LLO))convert it back to ULO before calling cv2.line """ ret = [] for ulo_line in lines: x1, y1, x2, y2 = ulo_line llo_line = [x1, y_size - y1, x2, y_size - y2] ret.append(llo_line) return ret def grayscale(img): """Applies the Grayscale transform This will return an image with only one color channel but NOTE: to see the returned image as grayscale (assuming your grayscaled image is called 'gray') you should call plt.imshow(gray, cmap='gray')""" return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) # Or use BGR2GRAY if you read an image with cv2.imread() # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) def canny(img, low_threshold, high_threshold): """Applies the Canny transform""" return cv2.Canny(img, low_threshold, high_threshold) def gaussian_blur(img, kernel_size): """Applies a Gaussian Noise kernel""" return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0) def region_of_interest(img, vertices): """ Applies an image mask. Only keeps the region of the image defined by the polygon formed from `vertices`. The rest of the image is set to black. `vertices` should be a numpy array of integer points. """ #defining a blank mask to start with mask = np.zeros_like(img) #defining a 3 channel or 1 channel color to fill the mask with depending on the input image if len(img.shape) > 2: channel_count = img.shape[2] # i.e. 3 or 4 depending on your image ignore_mask_color = (255,) * channel_count else: ignore_mask_color = 255 #filling pixels inside the polygon defined by "vertices" with the fill color cv2.fillPoly(mask, [vertices], ignore_mask_color) #returning the image only where mask pixels are nonzero masked_image = cv2.bitwise_and(img, mask) return masked_image def draw_lines(img, lines, color=[255, 0, 0], thickness=8): """ NOTE: this is the function you might want to use as a starting point once you want to average/extrapolate the line segments you detect to map out the full extent of the lane (going from the result shown in raw-lines-example.mp4 to that shown in P1_example.mp4). Think about things like separating line segments by their slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left line vs. the right line. Then, you can average the position of each of the lines and extrapolate to the top and bottom of the lane. This function draws `lines` with `color` and `thickness`. Lines are drawn on the image inplace (mutates the image). If you want to make the lines semi-transparent, think about combining this function with the weighted_img() function below """ x_size = image.shape[0] y_size = image.shape[1] # convert from nx1x4 to list of lists of 4 lines = [line[0] for line in lines] lines = flip_y_ULO_LLO(lines, y_size) left_lines, right_lines = discard_lines_with_unlikely_slopes(lines, 0.2) lines = [avg_line(*line_parms(left_lines, y_size)), avg_line(*line_parms(right_lines, y_size))] lines = flip_y_ULO_LLO(lines, y_size) for line in lines: (x1, y1, x2, y2) = line cv2.line(img, (x1, y1), (x2, y2), color, thickness) def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap): """ `img` should be the output of a Canny transform. Returns an image with hough lines drawn. """ lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap) line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8) draw_lines(line_img, lines) return line_img # Python 3 has support for cool math symbols. def weighted_img(img, initial_img, α=0.8, β=1., γ=0.): """ `img` is the output of the hough_lines(), An image with lines drawn on it. Should be a blank image (all black) with lines drawn on it. `initial_img` should be the image before any processing. The result image is computed as follows: initial_img * α + img * β + γ NOTE: initial_img and img must be the same shape! """ return cv2.addWeighted(initial_img, α, img, β, γ)Test ImagesBuild your pipeline to work on the images in the directory "test_images" **You should make sure your pipeline works well on these images before you try the videos.**import os os.listdir("test_images/")Build a Lane Finding Pipeline Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report.Try tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.# TODO: Build your pipeline that will draw lane lines on the test_images # then save them to the test_images_output directory. def imsave(img_name, img, cmap='Greys_r', msg=""): """save an image to disk for debug""" outdir = "/home/evt/tmp/sdc_test_out/" outf = img_name + msg + "_" + dt.datetime.now().strftime("%a_%m%d%y_%H%M%S") + ".png" if cmap: mpimg.imsave(outdir + outf, img, cmap=cmap) else: mpimg.imsave(outdir + outf, img) def show_stage_result(img, _cmap, msg="", fname = "", enabled=False): """plot an image in notebook and save to file if enabled""" ysize = image.shape[0] xsize = image.shape[1] if enabled: print("\nshow_stage_result: " + msg + ", " + fname + ": %dx%d" %(xsize, ysize)) plt.figure() plt.imshow(img, cmap=_cmap) plt.show() imsave(fname, img, _cmap, msg) def get_roi_vertices(img): """return an np_array of the vertices of the region of interest""" ysize = image.shape[0] xsize = image.shape[1] left_bottom = [0, ysize] right_bottom = [xsize, ysize] apex_y_mult = 0.6 half_apex_width = int (0.05 * xsize / 2) apex_l = [int(xsize / 2 - half_apex_width), int(ysize * apex_y_mult)] apex_r = [int(xsize / 2 + half_apex_width), int(ysize * apex_y_mult)] return np.array([left_bottom, apex_l, apex_r, right_bottom], 'int32') def overlay_roi(img, vertices): """ overlay the region of interest as a blue polygon on a copy of the input image(for debug) """ tmp = np.copy(image) vertices = vertices.reshape((-1,1,2)) # see https://docs.opencv.org/3.1.0/dc/da5/tutorial_py_drawing_functions.html cv2.polylines(tmp,[vertices],True,(0,255,255)) return tmp def process_image(img, dbg_lvl=0, fname="", note=""): """ apply all of the parts of the image pipeline to an image and return the result """ tmp1 = np.copy(img) #show_stage_result(tmp1, None, msg="_0_copy" + note, fname=fname, enabled = dbg_lvl > 3) tmp1 = grayscale(tmp1) #show_stage_result(tmp1, 'Greys_r', msg="_1_gray" + note, fname=fname, enabled = dbg_lvl > 3) tmp1 = gaussian_blur(tmp1, kernel_size=5) #show_stage_result(tmp1, 'Greys_r', msg="_2_blur_gray" + note, fname=fname, enabled = dbg_lvl > 3) tmp1 = canny(tmp1, low_threshold=50, high_threshold=150) #show_stage_result(tmp1, 'Greys_r', msg="_3_canny edges" + note, fname=fname, enabled = dbg_lvl > 3) roi_vertices = get_roi_vertices(tmp1) #roi_overlay = overlay_roi(tmp1, roi_vertices) #show_stage_result(roi_overlay, 'Greys_r', msg="_4.roi_overlay" + note, fname=fname, enabled = dbg_lvl > 2) tmp1 = region_of_interest(tmp1, roi_vertices) #show_stage_result(tmp1, 'Greys_r', msg="_4_cropped_region" + note, fname=fname, enabled = dbg_lvl > 4) tmp1 = hough_lines(tmp1, rho=6, theta=np.pi/60, threshold=16, min_line_len=30, max_line_gap=160) #show_stage_result(tmp1, 'Greys_r', msg="_5_houghed" + note, fname=fname, enabled = dbg_lvl > 5) composite = weighted_img(tmp1, img) #show_stage_result(composite, 'Greys_r', msg="_6_composite" + note, fname=fname, enabled = dbg_lvl > 0) return composite # for testing def process_1_path(dir, fname, msg, dbg_lvl): print("\n==== %s =====" % fname) img = mpimg.imread(dir+fname) process_image(img, dbg_lvl, fname, msg) # for testing for f in ['solidWhiteRight.jpg', 'solidWhiteCurve.jpg', 'solidYellowLeft.jpg', 'whiteCarLaneSwitch.jpg', 'solidYellowCurve.jpg', 'solidYellowCurve2.jpg']: process_1_path("test_images/", f, "_bumble", 3)Test on VideosYou know what's cooler than drawing lanes over images? Drawing lanes over video!We can test our solution on two provided videos:`solidWhiteRight.mp4``solidYellowLeft.mp4`**Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.****If you get an error that looks like this:**```NeedDownloadError: Need ffmpeg exe. You can download it by calling: imageio.plugins.ffmpeg.download()```**Follow the instructions in the error message and check out [this forum post](https://discussions.udacity.com/t/project-error-of-test-on-videos/274082) for more troubleshooting tips across operating systems.**# Import everything needed to edit/save/watch video clips from moviepy.editor import VideoFileClip from IPython.display import HTMLLet's try the one with the solid white lane on the right first ...white_output = 'test_videos_output/solidWhiteRight.mp4' ## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video ## To do so add .subclip(start_second,end_second) to the end of the line below ## Where start_second and end_second are integer values representing the start and end of the subclip ## You may also uncomment the following line for a subclip of the first 5 seconds ##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5) clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4") white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!! %time white_clip.write_videofile(white_output, audio=False)Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice.HTML(""" """.format(white_output))Improve the draw_lines() function**At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4".****Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest.** Now for the one with the solid yellow lane on the left. This one's more tricky!yellow_output = 'test_videos_output/solidYellowLeft.mp4' ## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video ## To do so add .subclip(start_second,end_second) to the end of the line below ## Where start_second and end_second are integer values representing the start and end of the subclip ## You may also uncomment the following line for a subclip of the first 5 seconds #clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,4) clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4') yellow_clip = clip2.fl_image(process_image) %time yellow_clip.write_videofile(yellow_output, audio=False) HTML(""" """.format(yellow_output))Writeup and SubmissionIf you're satisfied with your video outputs, it's time to make the report writeup in a pdf or markdown file. Once you have this Ipython notebook ready along with the writeup, it's time to submit for review! Here is a [link](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) to the writeup template file. Optional ChallengeTry your lane finding pipeline on the video below. Does it still work? Can you figure out a way to make it more robust? If you're up for the challenge, modify your pipeline so it works with this video and submit it along with the rest of your project!challenge_output = 'test_videos_output/challenge.mp4' ## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video ## To do so add .subclip(start_second,end_second) to the end of the line below ## Where start_second and end_second are integer values representing the start and end of the subclip ## You may also uncomment the following line for a subclip of the first 5 seconds ##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5) clip3 = VideoFileClip('test_videos/challenge.mp4') challenge_clip = clip3.fl_image(process_image) %time challenge_clip.write_videofile(challenge_output, audio=False) HTML(""" """.format(challenge_output))Read DataUse pandas to read csv files and print headpath_prefix = "./data" warnings.filterwarnings("ignore") # Read CSV files to get questions and tags df_questions = pd.read_csv(path_prefix+"/Questions.csv", encoding="ISO-8859-1") df_tags = pd.read_csv(path_prefix+"/Tags.csv", encoding="ISO-8859-1", dtype={'Tag': str}) df_questions.head(5) df_tags = pd.read_csv(path_prefix+"/Tags.csv",encoding="ISO-8859-1",dtype={'Tag':str}) df_tags.head(5)Process tagsProcess them tags into something nice to querytype(df_tags) type(df_tags['Tag'][0]) # Group tags by id and join them df_tags['Tag'] = df_tags['Tag'].astype(str) grouped_tags = df_tags.groupby("Id")['Tag'].apply(lambda tags: ' '.join(tags)) grouped_tags.head(5) # Reset index for making simpler dataframe grouped_tags = grouped_tags.reset_index() a = df_tags.groupby('Id') b = a['Tag'].apply(lambda tags: ' '.join(tags)) b = b.reset_index() grouped_tags_final = pd.DataFrame({'Id':b['Id'],'Tags':b['Tag']}) grouped_tags_final.head(5) # Drop unnecessary columns df_questions.drop(columns=['OwnerUserId', 'CreationDate', 'ClosedDate'], inplace=True) # Merge questions and tags into one dataframe df = df_questions.merge(grouped_tags_final, on='Id') df.head(5)Visualization for scoresum(df['Score']>5) type(df['Score'][0])Using the sampling to view the distribution of scoreindex = np.random.randint(0,1264216,size = 10000) df['Score'][index] #直方图📊 plt.hist(df['Score'][index],facecolor='blue',edgecolor='black',alpha=0.35) plt.xlabel('range') plt.ylabel('frequency') plt.title('Frequency distribution histogram') #my_x_ticks = np.arange(0, 100, 50) #plt.xticks(my_x_ticks) plt.show() plt.boxplot(df['Score']) plt.ylim(-10, 10) new_df = df[df['Score']>5] new_df.head() new_df.info() result = new_df['Score'].describe() pd.DataFrame(result)there is no missing values or duplocated valuesprint('Duplicate entries:{}'.format(new_df.duplicated().sum())) new_df.drop_duplicates(inplace=True)This is a very good dataset since there are no missing values or duplicated values.new_df.drop(columns=['Id','Score'],inplace=True) new_df.head(10) # Filter out questions with a score lower than 5 new_df = df[df['Score']>5] # Split tags in order to get a list of tags new_df['Tags'] = new_df['Tags'].apply(lambda x: x.split()) all_tags = [item for sublist in new_df['Tags'].values for item in sublist] flat_list = [item for sublist in new_df['Tags'].values for item in sublist] keywords = nltk.FreqDist(flat_list) keywords = nltk.FreqDist(keywords) # Get most frequent tags frequencies_words = keywords.most_common(25) tags_features = [word[0] for word in frequencies_words] # Drop unnecessary columns at this point new_df.drop(columns=['Id', 'Score'], inplace=True) print(tags_features) hot_tags = [item[0] for item in frequencies_words[:10]] hot_tags hot_tags_nums = [item[1] for item in frequencies_words[:10]] hot_tags_nums #参数一:y轴 参数二:x轴 plt.barh(range(10),hot_tags_nums,height = 0.7,color = 'steelblue',alpha =0.8) plt.yticks(range(10),hot_tags) plt.xlim(2000,7000) for x, y in enumerate(hot_tags_nums): plt.text(y + 0.2, x - 0.1, '%s' % y) plt.xlabel('frequency') plt.title('The frequency of TOP 10 labels') plt.show() keyword = nltk.FreqDist(keywords) keyword #most_common()函数 # most_common(n)按照降序,返回前n项组成的list; n忽略时返回全部 frequencies_words = keyword.most_common(100) tag_features = [word[0] for word in frequencies_words] frequencies_words[:10] tag_features[:10] fig , ax = plt.subplots(figsize = (15,10)) keyword.plot(100,cumulative = False) #cumulative:whether to count the cumulative frequenceies plt.figure(figsize=(10,10)) plt.bar(hot_tags,hot_tags_nums,color='rgb',tick_label=hot_tags) plt.xlabel('hot tags') plt.ylabel('the number of hot tags') plt.title('Frequency of high-frequency tag statistics') plt.show() def most_common(tags): """Function to check if tag is in most common tag list""" tags_filtered = [] for i in range(0, len(tags)): if tags[i] in tags_features: tags_filtered.append(tags[i]) return tags_filtered # Change Tags column into None for questions that don't have a most common tag new_df['Tags'] = new_df['Tags'].apply(lambda x: most_common(x)) new_df['Tags'] = new_df['Tags'].apply(lambda x: x if len(x)>0 else None) # Drop rows that contain None in Tags column new_df.dropna(subset=['Tags'], inplace=True) new_df.shapePreprocess Data Remove special characters from title and body Remove stop words Remove HTML tags Convert characters to lowercase Lemmatize the words# Filter out HTML new_df['Body_lxml'] = new_df['Body'].apply(lambda x: BeautifulSoup(x, 'lxml').get_text()) token = ToktokTokenizer() lemma = WordNetLemmatizer() stop_words = set(stopwords.words("english")) def strip_list_noempty(mylist): newlist = (item.strip() if hasattr(item, 'strip') else item for item in mylist) return [item for item in newlist if item != ''] def removeStopWords(text): words = token.tokenize(text) filtered = [w for w in words if not w in stop_words] return ' '.join(map(str, filtered)) def removePunctuation(text): punct = '!"$%&\'()*,./:;<=>?@[\\]^_`{|}~' words=token.tokenize(text) punctuation_filtered = [] regex = re.compile('[%s]' % re.escape(punct)) remove_punctuation = str.maketrans(' ', ' ', punct) for w in words: if w in tags_features: punctuation_filtered.append(w) else: punctuation_filtered.append(regex.sub('', w)) filtered_list = strip_list_noempty(punctuation_filtered) return ' '.join(map(str, filtered_list)) def lemmatizeWords(text): words=token.tokenize(text) listLemma=[] for w in words: x=lemma.lemmatize(w, pos="v") listLemma.append(x.lower()) return ' '.join(map(str, listLemma)) # Remove stopwords, punctuation and lemmatize for text in body new_df['Body_punctuation'] = new_df['Body_lxml'].apply(lambda x: removePunctuation(x)) new_df.head(10) new_df['Body_stopword'] = new_df['Body_punctuation'].apply(lambda x: removeStopWords(x)) new_df['Body_lemmatize'] = new_df['Body_stopword'].apply(lambda x: lemmatizeWords(x)) new_df.head(10) new_df['Final_Body'] = new_df['Body_lemmatize'] new_df['Final_Body'].head(5) # Remove stopwords, punctuation and lemmatize for title. Also weight title 3 times new_df['Title_str'] = new_df['Title'].apply(lambda x: str(x)) new_df['Title_punctuation'] = new_df['Title_str'].apply(lambda x: removePunctuation(x)) new_df['Title_stopword'] = new_df['Title_punctuation'].apply(lambda x: removeStopWords(x)) new_df['Title_lemmatize'] = new_df['Title_stopword'].apply(lambda x: lemmatizeWords(x)) new_df['Title_weight'] = new_df['Title_lemmatize'].apply(lambda x: ' '.join(x.split()*3)) new_df['Title'] = new_df['Title_weight'] new_df['Title'] #new_df.to_csv("./data/processed_question.csv")-EDAno_topics=20 text = new_df['Final_Body'] from sklearn.feature_extraction.text import TfidfVectorizer vectorizer_train = TfidfVectorizer(analyzer = 'word',min_df=0.0,max_df=1.0,strip_accents=None, encoding='utf-8', preprocessor=None,token_pattern=r"(?u)\S\S+", max_features=1000 ) TF_IDF_matrix = vectorizer_train.fit_transform(text) TF_IDF_matrix.shape weight = TF_IDF_matrix.toarray() weight.shape import sys import numpy numpy.set_printoptions(threshold=sys.maxsize) print(weight[0]) print(TF_IDF_matrix) from sklearn.decomposition import LatentDirichletAllocation lda = LatentDirichletAllocation(n_components=no_topics,max_iter=5,learning_method='online',learning_offset=50,random_state=11).fit(TF_IDF_matrix) def display_topics(model,feature_names,no_stop_words): for topic_idx, topic in enumerate(model.components_): print("------------------------------------------") print("Topic %d:"%(topic_idx)) print(" ".join([feature_names[i] for i in topic.argsort()[:-no_top_words-1:-1]])) print("------------------------------------------") no_top_words =10 display_topics(lda,vectorizer_train.get_feature_names(),no_top_words)------------------------------------------ Topic 0: table select query row database sql column id value mysql ------------------------------------------ ------------------------------------------ Topic 1: use android application app find would like server web know ------------------------------------------ ------------------------------------------ Topic 2: devices eandroidruntime mobile linearlayout red dialog extra target 05 19 ------------------------------------------ ------------------------------------------ Topic 3: file error project run use install build command try -- ------------------------------------------ ------------------------------------------ Topic 4: pdf can explain book socket engine someone please block tell ------------------------------------------ ------------------------------------------ Topic 5: class public object string method new return type use value ------------------------------------------ ------------------------------------------ Topic 6: script ht[...]Classifier implementationfrom sklearn.preprocessing import MultiLabelBinarizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import HashingVectorizer from sklearn.model_selection import KFold from sklearn.model_selection import train_test_split from scipy.sparse import hstack # Define X, y X1 = new_df['Final_Body'] X2 = new_df['Title'] y = new_df['Tags'] print(len(X1), len(X2), len(y)) # Define multilabel binarizer multilabel_binarizer = MultiLabelBinarizer() y_bin = multilabel_binarizer.fit_transform(y) vectorizer_X1 = TfidfVectorizer(analyzer = 'word', min_df=0.0005, max_df = 1.0, strip_accents = None, encoding = 'utf-8', ngram_range = (1, 3), preprocessor=None, token_pattern=r"(?u)\S\S+", max_features=35000) vectorizer_X2 = TfidfVectorizer(analyzer = 'word', min_df=0.0, max_df = 1.0, strip_accents = None, encoding = 'utf-8', ngram_range = (1, 3), preprocessor=None, token_pattern=r"(?u)\S\S+", max_features=35000) X1_tfidf = vectorizer_X1.fit_transform(X1) X2_tfidf = vectorizer_X2.fit_transform(X2) # Stack X1 and X2 into X_tfidf X_tfidf = hstack([X1_tfidf,X2_tfidf]) # Split training and test data X_train, X_test, y_train, y_test = train_test_split(X_tfidf, y_bin, test_size = 0.2, random_state = 0) # Using Label Powerset from sklearn.metrics import accuracy_score from sklearn.svm import LinearSVC from sklearn.metrics import hamming_loss from sklearn.metrics import f1_score from skmultilearn.problem_transform import LabelPowerset from sklearn.linear_model import SGDClassifier from sklearn.naive_bayes import GaussianNB from sklearn import svm from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import jaccard_score from sklearn import model_selection from sklearn.metrics import make_scorer from sklearn.metrics import recall_score from sklearn.metrics import precision_score svc = LinearSVC() sgd = SGDClassifier(n_jobs=-1) def print_score(y_pred, clf): print("Clf: ", clf.__class__.__name__) print("Accuracy score: {}".format(accuracy_score(y_test, y_pred))) print("Recall score: {}".format(recall_score(y_true=y_test, y_pred=y_pred, average='weighted'))) print("Precision score: {}".format(precision_score(y_true=y_test, y_pred=y_pred, average='weighted'))) print("Hamming loss: {}".format(hamming_loss(y_pred, y_test)*100)) print("F1 score: {}".format(f1_score(y_pred, y_test, average='weighted'))) print("---") clf = LabelPowerset(svc) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) print_score(y_pred, clf) kfold = KFold(n_splits=5) X_sparse = X_tfidf.tocsr() scores = [] for train_indices, test_indices in kfold.split(X_sparse, y_bin): clf.fit(X_sparse[train_indices], y_bin[train_indices]) print(clf.score(X_sparse[test_indices], y_bin[test_indices])) scores.append(clf.score(X_sparse[test_indices], y_bin[test_indices])) print(sum(scores)/len(scores)) # Using Classifier Chains from sklearn.multioutput import ClassifierChain import numpy as np chains = [ClassifierChain(svc, order='random', random_state=i) for i in range(10)] for chain in chains: chain.fit(X_train, y_train) Y_pred_chains = np.array([chain.predict(X_test) for chain in chains]) Y_pred_ensemble = Y_pred_chains.mean(axis=0) ensemble_accuracy_score = accuracy_score(y_test, Y_pred_ensemble >= .5) ensemble_recall_score = recall_score(y_test, Y_pred_ensemble >= .5, average='weighted') ensemble_precision_score = precision_score(y_test, Y_pred_ensemble >= .5, average='weighted') ensemble_f1_score = f1_score(y_pred, Y_pred_ensemble >= .5, average='weighted') hamm = hamming_loss(Y_pred_ensemble >= .5, y_test)*100 print(ensemble_accuracy_score, ensemble_recall_score, ensemble_precision_score, ensemble_f1_score, hamm) # Using Binary Relevance from skmultilearn.problem_transform import BinaryRelevance from sklearn.naive_bayes import GaussianNB # initialize binary relevance multi-label classifier # with a gaussian naive bayes base classifier classifier = BinaryRelevance(svc) # train classifier.fit(X_train, y_train) # predict predictions = classifier.predict(X_test) print_score(predictions, classifier)Clf: BinaryRelevance Accuracy score: 0.5623807706982068 Recall score: 0.6469784391132706 Precision score: 0.8453964177308024 Hamming loss: 2.315528424265547 F1 score: 0.7474982642210757 ---Text Classification in scikit-learn First, let's get the corpus we will be using, which is included in NLTK. You will need NLTK and Scikit-learn (as well as their dependencies, in particular scipy and numpy) to run this code.import nltk nltk.download("reuters") # if necessary from nltk.corpus import reuters[nltk_data] Downloading package reuters to /Users/jason/nltk_data...The NLTK sample of the Reuters Corpus contains 10,788 news documents totaling 1.3 million words. The documents have been classified into 90 topics, and is divided into a training and test sets, a split which we will preserve here. Let's look at the counts of texts the various categories.for category in reuters.categories(): print (category, len(reuters.fileids(category)))acq 2369 alum 58 barley 51 bop 105 carcass 68 castor-oil 2 cocoa 73 coconut 6 coconut-oil 7 coffee 139 copper 65 copra-cake 3 corn 237 cotton 59 cotton-oil 3 cpi 97 cpu 4 crude 578 dfl 3 dlr 175 dmk 14 earn 3964 fuel 23 gas 54 gnp 136 gold 124 grain 582 groundnut 9 groundnut-oil 2 heat 19 hog 22 housing 20 income 16 instal-debt 6 interest 478 ipi 53 iron-steel 54 jet 5 jobs 67 l-cattle 8 lead 29 lei 15 lin-oil 2 livestock 99 lumber 16 meal-feed 49 money-fx 717 money-supply 174 naphtha 6 nat-gas 105 nickel 9 nkr 3 nzdlr 4 oat 14 oilseed 171 orange 27 palladium 3 palm-oil 40 palmkernel 3 pet-chem 32 platinum 12 potato 6 propane 6 rand 3 rape-oil 8 rapeseed 27 reserves 73 retail 25 rice 59 rubber 49 rye 2 ship 286 silver 29 sorghum 34 soy-meal 26 soy-oil 25 soybean 111 strategic-metal 27 sugar 162 sun-meal 2 sun-oil 7 sunseed 16 tea 13 tin 30 trade 485 veg-oil 124 wheat 283 wpi 29 yen 59 zinc 34Many of the documents in the corpus are tagged with multiple labels; in this situation, a straightforward approach is to build a classifier for each label. Let's build a classifier to distinguish the most common topic in the corpus, "acq" (acqusitions). First, here's some code to build the dataset in preparation for classification using scikit-learn.from sklearn.feature_extraction import DictVectorizer def get_BOW(text): BOW = {} for word in text: BOW[word] = BOW.get(word,0) + 1 return BOW def prepare_reuters_data(topic,feature_extractor): training_set = [] training_classifications = [] test_set = [] test_classifications = [] for file_id in reuters.fileids(): feature_dict = feature_extractor(reuters.words(file_id)) if file_id.startswith("train"): training_set.append(feature_dict) if topic in reuters.categories(file_id): training_classifications.append(topic) else: training_classifications.append("not " + topic) else: test_set.append(feature_dict) if topic in reuters.categories(file_id): test_classifications.append(topic) else: test_classifications.append("not " + topic) vectorizer = DictVectorizer() training_data = vectorizer.fit_transform(training_set) test_data = vectorizer.transform(test_set) return training_data,training_classifications,test_data,test_classifications trn_data,trn_classes,test_data,test_classes = prepare_reuters_data("acq",get_BOW)The above code builds a sparse bag of words feature representation (a Python dictionary) for each text in the corpus (which is pre-tokenized), and places it to the appropriate list depending on whether it is testing or training; a corresponding list of correct classifications is created at the same time. The scikit-learn DictVectorizer class converts Python dictionaries into the scipy sparse matrices which Scikit-learn uses; for the training set, use the fit_transform method (which fixes the total number of features in the model), and for the test set, use transform method (which ignores any features in the test set that weren't in the training set). Next, let's prepare some classifiers to test...from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.naive_bayes import MultinomialNB from sklearn.svm import LinearSVC from sklearn.linear_model import LogisticRegression clfs = [KNeighborsClassifier(),DecisionTreeClassifier(),RandomForestClassifier(), MultinomialNB(),LinearSVC(),LogisticRegression()]To start, we are using default settings for all these classifiers. Let's start by doing 10-fold cross validation on the training set, and looking at the accuracy, recall, precision, and f1-score for each (be patient, this may take a while to complete)...from sklearn import model_selection #from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, classification_report def do_multiple_10foldcrossvalidation(clfs,data,classifications): for clf in clfs: predictions = model_selection.cross_val_predict(clf, data,classifications, cv=10) print (clf) print ("accuracy") print (accuracy_score(classifications,predictions)) print (classification_report(classifications,predictions)) do_multiple_10foldcrossvalidation(clfs,trn_data,trn_classes)KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None, n_jobs=1, n_neighbors=5, p=2, weights='uniform') accuracy 0.9258591839361565 precision recall f1-score support acq 0.85 0.79 0.82 1650 not acq 0.95 0.96 0.95 6119 avg / total 0.92 0.93 0.92 7769 DecisionTreeClassifier(class_weight=None, criterion='gini', max_depth=None, max_features=None, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, presort=False, random_state=None, splitter='best') accuracy 0.9347406358604711 precision recall f1-score support acq 0.86 0.83 0.84 1650 not acq 0.96 0.96 0.96 6119 avg / total 0.93 0.93 0.93 7769 [...]In this case, the classifiers are not obviously biased towards a particular task, so accuracy and f-score are nearly the same. The numbers are generally quite high, indicating that it is a fairly easy classification task. In terms of the best classifier, the clear standouts here are the SVM and Logistic Regression classifiers, while kNN is clearly the worst. One reason kNN might be doing poorly is that it is particularly susceptible to a noisy feature space with dimensions that are irrelevant to the task. Let's try to improve performance by removing stopwords and doing lowercasingfrom nltk.corpus import stopwords nltk.download('stopwords') stopwords = stopwords.words('english') def get_BOW_lowered_no_stopwords(text): BOW = {} for word in text: word = word.lower() if word not in stopwords: BOW[word] = BOW.get(word,0) + 1 return BOW trn_data,trn_classes,test_data,test_classes = prepare_reuters_data("acq",get_BOW_lowered_no_stopwords) do_multiple_10foldcrossvalidation(clfs,trn_data,trn_classes)[nltk_data] Downloading package stopwords to /Users/jason/nltk_data... [nltk_data] Unzipping corpora/stopwords.zip. KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None, n_jobs=1, n_neighbors=5, p=2, weights='uniform') accuracy 0.9387308533916849 precision recall f1-score support acq 0.90 0.80 0.85 1650 not acq 0.95 0.98 0.96 6119 avg / total 0.94 0.94 0.94 7769 DecisionTreeClassifier(class_weight=None, criterion='gini', max_depth=None, max_features=None, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, presort=False, random_state=None, splitter='best') accuracy 0.9420774874501223 precision recall f1-score support acq 0.86 0.86 0.86 [...]That did improve the performance of kNN by about 1% accuracy, but it is still the worst classifier. Gains for other classifiers were more modest, since the scores were already high, and those classifiers are more robust to feature noise.The random forest classifier is doing worse than its reputation would suggest. The default number of decision trees (n_estimators) used in the model is only 10, which is fairly low: lets see if we can find a better number...n_to_test = [10,50,100,150] rfs = [RandomForestClassifier(n_estimators=n) for n in n_to_test] do_multiple_10foldcrossvalidation(rfs,trn_data,trn_classes)RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini', max_depth=None, max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1, oob_score=False, random_state=None, verbose=0, warm_start=False) accuracy 0.9532758398764319 precision recall f1-score support acq 0.88 0.90 0.89 1650 not acq 0.97 0.97 0.97 6119 avg / total 0.95 0.95 0.95 7769 RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini', max_depth=None, max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=50, n_jobs=1, [...]Yup, more subclassifiers improved things, though the Random Forest classifier is still slightly inferior to the SVM and Logistic Regression classifiers in this BOW (i.e. large feature set) situation. Both SVM and Logistic Regression classifiers have a C parameter which controls the degree of regularization (lower C means more emphasis on regularization when optimising the model). Let's see if we can improve the performance of the Logistic Regression classifier by changing the C parameter from the default (1.0). For this parameter, a logrithmic scale is appropriate...c_to_test = [0.001,0.01,0.1,1,10,100, 1000] lrcs = [LogisticRegression(C=c) for c in c_to_test] do_multiple_10foldcrossvalidation(lrcs,trn_data,trn_classes)In this case, changing the parameter from the default is not desirable. When training with fairly large datasets to solve a straightforward task with a simple classifier, the effect of regularization is often minimal.Under normal circumstances we might do more parameter tuning or feature selection (and we encourage you to play around), but let's just skip to testing the classifiers on the test set and displaying the results using matplotlib....%matplotlib inline import matplotlib.pyplot as plt def test_and_graph(clfs,training_data,training_classifications,test_data,test_classifications): accuracies = [] for clf in clfs: clf.fit(training_data,training_classifications) predictions = clf.predict(test_data) accuracies.append(accuracy_score(test_classifications,predictions)) print (accuracies) p = plt.bar([num + 0.25 for num in range(len(clfs))], accuracies,0.5) plt.ylabel('Accuracy') plt.title('Accuracy classifying acq topic in Reuters, by classifier') plt.ylim([0.9,1]) plt.xticks([num + 0.5 for num in range(len(clfs))], ('kNN', 'DT', 'RF', 'NB', 'SVM', 'LR')) plt.show() test_and_graph(clfs,trn_data,trn_classes,test_data,test_classes)pYPK3pYPK3 is a version of [pYPK1](pYPK1.ipynb) that has a CEN6/ARS yeast oriand a HphMX4 marker instead of the bleomycin marker.from pydna.all import * pYPK1 =read("pYPK1.gb") gb =Genbank("") githubuser="BjornFJohansson" gistid = "c5424b7ebbf553c52053" x=download_text('https://gist.githubusercontent.com/{user}/{gistid}/raw'.format(user=githubuser, gistid=gistid)) pAG32 =read(x) pAG32[pAG32 sequence](https://gist.github.com/BjornFJohansson/c5424b7ebbf553c52053)p150,p149,p325,p324,p678,p666 = parse_primers(''' >150_MX4fwd (25-mer) Primer in the Ashbya gossypi TEF promoter in forward direction. contaminated? AAAATCTTGCTAGGATACAGTTCTC >149_MX4rev (25-mer) Primer in the Ashbya gossypi TEF terminator in the reverse direction. ACAAATGACAAGTTCTTGAAAACAA >325_Hygfwd (27-mer) GATGTAGGAGGGCGTGGATATGTCCTG >324_Hygrev (27-mer) TGCATCATCGAAATTGCCGTCAACCAA >678_pYPK0_hygfwd: (77-mer) ctcacgttaagggattttggtcatgagCACATACGATTTAGGTGACACTATAGAAC >666_pYPK0_hygrev (70-mer) catctttgacagcttatcatcgataagctCGACTCACTATAGGGAGACC''') hygmarker1 =pcr( p678, p324, pAG32 ) hygmarker2 =pcr( p325, p666, pAG32 ) from Bio.Restriction import PvuII vect = pYPK1.linearize(PvuII) a=Assembly([hygmarker1, hygmarker2, vect], limit =251) a candidate = a.assemble_circular()[0] candidate.figure() pYPK3=candidate.synced(pYPK1) pYPK3.stamp() pYPK3.locus = "pYPK3" pYPK3.write("pYPK3.gb")[DOWNLOAD](pYPK3.gb)from pydna.all import * reloaded=read("pYPK3.gb") assert reloaded.cseguid() in reloaded.definitionFurniture Sales Time Series Modelimport pandas as pd import numpy as np import matplotlib.pyplot as plt %matplotlib inline #loading the data df= pd.read_excel('Furniture-Sales.xls',sheet_name='Orders',index_col='Row ID') df.shape df.columns df.head() df.dtypesData Preprocessing# Taking the order date and sales in our data as we only need that to predict future sales as per time series model. data= df.drop(['Order ID','Ship Date', 'Ship Mode', 'Customer ID', 'Customer Name', 'Segment', 'Country', 'City', 'State', 'Postal Code', 'Region', 'Product ID', 'Category', 'Sub-Category', 'Product Name', 'Quantity', 'Discount', 'Profit'], axis=1) data.head() # Now as we see in data we have more than one sales for a particular date # so we will group the data by date and will do the sum of its sales data = data.groupby('Order Date', sort=True)['Sales'].sum().reset_index() data.set_index('Order Date', inplace=True) # So if you want you can slice it now with day, date or year data['2014-01']Now as we know mainly sales are done with Monthly prediction so we will change the freq of daily to monthly with avg monthly sales and will take first of every month.data = data.resample('MS').mean() data['2017']Seasonality Check# Visulaizing the data data.plot(figsize=(15,5)) plt.show()Now if we will see in graph, Per year sales is less in starting month and max in end month with low sales in few mid month. Which represent the seasonality trend both# We have seasonal decompose class whcih returns all the following plot data from statsmodels.tsa.seasonal import seasonal_decompose decom= seasonal_decompose(data, model= 'additive') # Seasonal decom.seasonal.plot(figsize= (15,5)) # Trend decom.trend.plot(figsize= (15,5))We can see we have very less increasing trend in our data, starting from july 2015#Residuals decom.resid.plot(figsize=(15,5)) #original data plot decom.observed.plot(figsize=(15,5)) # Now as we have Trend we will try to use lag to check trend. diff= data.diff(periods=1) seasonal_decompose(diff.dropna(),model='additive').trend.plot(figsize=(15,5))Now we can see with One lag , we can see increasing trend is gone, which we can use for model.from statsmodels.graphics.tsaplots import plot_acf, plot_pacf plot_acf(data)Acf plot also suggest with no linear trend# now getting training and testing data set train= data.iloc[0:36,:] test= data.iloc[36:48,:] print(train.shape, test.shape) import warnings warnings.filterwarnings('ignore') import itertools as it p=d=q=range(0,2) pdq= list(it.product(p,d,q)) seasonal_pdq=[(x[0], x[1], x[2], 12) for x in list(it.product(p, d, q))] print('Examples of parameter combinations for Seasonal ARIMA...') print('SARIMAX: {} x {}'.format(pdq[1], seasonal_pdq[1])) print('SARIMAX: {} x {}'.format(pdq[1], seasonal_pdq[2])) print('SARIMAX: {} x {}'.format(pdq[2], seasonal_pdq[3])) print('SARIMAX: {} x {}'.format(pdq[2], seasonal_pdq[4])) from statsmodels.tsa.api import SARIMAX pair= {} for param in pdq: for param_seasonal in seasonal_pdq: try: mod = SARIMAX(data,order=param, seasonal_order=param_seasonal, enforce_stationarity=False, enforce_invertibility=False) results = mod.fit() pair[results.aic]=(param,param_seasonal) # print('ARIMA{}x{}12 - AIC:{}'.format(param, param_seasonal, results.aic)) except: continue best_order,best_s_order= pair[min(pair.keys())] print(best_order,best_s_order) final_model= SARIMAX(data,order=best_order,seasonal_order=best_s_order).fit() final_model.summary() final_model.plot_diagnostics(figsize=(16, 8)) plt.show()our model diagnostics suggests that the model residuals are near normally distributed.yp= final_model.predict(start='2017-01-01', end='2017-12-01') plt.figure(figsize=(15,5)) plt.plot(train,'blue',label='Train') plt.plot(test,'red',label='Test') plt.plot(yp,'green',label='Pred') plt.legend(loc='best') plt.show() from sklearn.metrics import mean_squared_error, r2_score print('rmse:{}'.format(np.sqrt(mean_squared_error(test,yp)))) print('R2 Score:{}'.format(r2_score(test,yp))) plt.figure(figsize=(15,5)) plt.plot(data,'blue',label='Train') plt.plot(final_model.forecast(steps=100),'green') plt.show()So now with R2 of .65 and above we cn consider it as a good model. Which captured trend and seasonality both for future also.from pmdarima.arima import auto_arima final_model= auto_arima(data, start_p=0, start_q=0,d=1, max_p=3,max_q=3, m=12, start_P=0,D=1,start_Q=0 ,seasonal=True, max_P=3,max_Q=3, trace=True, error_action='ignore', suppress_warnings=True, stepwise=True) model= final_model.fit(data) forecast = model.predict(n_periods=len(test)) forecast = pd.DataFrame(forecast,index = test.index, columns=['Prediction']) print('rmse:{}'.format(np.sqrt(mean_squared_error(test['Sales'],forecast['Prediction'])))) print('R2 Score:{}'.format(r2_score(test['Sales'],forecast['Prediction']))) plt.figure(figsize=(15,5)) plt.plot(train['Sales'],'blue',label='Train') plt.plot(test['Sales'],'red',label='Test') plt.plot(forecast['Prediction'],'green',label='Pred') plt.legend(loc='best') plt.show()rmse:662.4781334247258 R2 Score:0.3179815659687161Goal: * ID and pull out the sequences of all 16S genes in bac_genome_n1210 dataset User variablesimport os baseDir = '/home/nick/notebook/SIPSim/dev/bac_genome1147/' genomeDir = '/var/seq_data/ncbi_db/genome/Jan2016/bac_complete_spec-rep1_rn/' rnammerDir = os.path.join(baseDir, 'rnammer')Initimport glob from IPython.display import Image %load_ext rpy2.ipython %%R library(ggplot2) library(dplyr) library(tidyr) library(gridExtra) if not os.path.isdir(rnammerDir): os.makedirs(rnammerDir)rnammer run%%bash -s "$genomeDir" "$rnammerDir" find $1 -name "*.fna" | \ perl -pe 's/.+\/|\.fna//g' | \ xargs -n 1 -I % -P 30 bash -c \ "rnammer -S bac -m ssu -gff $2/%_rrn.gff -f $2/%_rrn.fna -xml $2/%_rrn.xml < $1/%.fna" ## Summarizing the results !cd $rnammerDir; \ egrep -v "^#" *.gff | \ grep "16s_rRNA" | \ perl -pe 's/:/\t/' > ssu_summary.txt %%bash -s "$rnammerDir" cd $1 printf "ssu gene length distribution:\n" cut -d $'\t' -f 7 ssu_summary.txt | NY_misc_perl stats_descriptivessu gene length distribution: 1 min 589.20 1 Q1 1882.60 1 mean 1903.58 1 median 1940.60 1 Q3 1979.00 1 max 2090.20 1 stdev 138.74Compiling 16S sequences! cd $rnammerDir; \ cat *_rrn.fna > bac_genome1147_16S.fna !printf "Number of sequences: " ! cd $rnammerDir; \ grep -c ">" bac_genome1147_16S.fnaNumber of sequences: 4498Protein Folding Objective and PrerequisitesHone your modeling skills with this challenging Protein Folding problem. We’ll show you how to create a binary optimization model of the problem with the Gurobi Python API and then solve it using the Gurobi Optimizer.This model is example 28 from the fifth edition of Model Building in Mathematical Programming by on pages 289-290 and 344-345.This modeling example is at the advanced level, where we assume that you know Python and the Gurobi Python API and that you have advanced knowledge of building mathematical optimization models. Typically, the objective function and/or constraints of these examples are complex or require advanced features of the Gurobi Python API.**Download the Repository** You can download the repository containing this and other examples by clicking [here](https://github.com/Gurobi/modeling-examples/archive/master.zip). **Gurobi License** In order to run this Jupyter Notebook properly, you must have a Gurobi license. If you do not have one, you can request an [evaluation license](https://www.gurobi.com/downloads/request-an-evaluation-license/?utm_source=3PW&utm_medium=OT&utm_campaign=WW-MU-MUI-OR-O_LEA-PR_NO-Q3_FY20_WW_JPME_PROTEIN_FOLDING_COM_EVAL_GitHub&utm_term=Protein_Folding&utm_content=C_JPM) as a *commercial user*, or download a [free license](https://www.gurobi.com/academia/academic-program-and-licenses/?utm_source=3PW&utm_medium=OT&utm_campaign=WW-MU-EDU-OR-O_LEA-PR_NO-Q3_FY20_WW_JPME_PROTEIN_FOLDING_ACADEMIC_EVAL_GitHub&utm_term=Protein_Folding&utm_content=C_JPM) as an *academic user*. --- Problem DescriptionThe problem described in this Jupyter Notebook is based on a molecular biology problem that is discussed in a paper entitled “Quadratic Binary Programming Models in Computational Biology” by (2008). The problem pertains to a protein, which consists of a chain of amino acids. In this problem, the amino acids come in two forms: hydrophilic (waterloving) and hydrophobic (water hating). An example of such a chain is given in the following figure with the hydrophobic acids marked in bold.![chain](chain.PNG)Such a chain naturally folds so as to bring as many hydrophobic acids as possible close together. A folding for the chain, in two dimensions, is given in the following figure, with the new matches marked by dashed lines. Our objective is to predict the optimum folding.![folding](folding.PNG)To solve the problem posed here, we must find the optimum folding for a chain of 50 amino acids with hydrophobic acids at positions 2, 4, 5, 6, 11, 12, 17, 20, 21, 25, 27, 28, 30, 31, 33, 37, 44 and 46. --- Model Formulation Sets and Indices$k \in A =\{1,2,...,50\}$: Chain of aminoacids.$i,j \in H =\{2,4,5,6,11,12,17,20,21,25,27,28,30,31,33,37,44,46\} \subseteq Aminoacids $: Subset of aminoacids that are hydrophobic. Decision variables$\text{match}_{i,j} \equiv x_{i,j} = 1$, iff hydrophobic acid $i$ is matched with acid $j$, for all hydrophobic acids $ i i+1$).$\text{fold}_{k} \equiv y_{k} = 1$, iff a fold occurs between the $i$th and $(i +1)$st acids in the chain. ConstraintsFor each pair of hydrophobic acids $i$ and $j$, we can match them if: * they are not contiguous, that is, already matched, * they have an even number of acids between them in the chain,* and there is exactly one fold between $i$ and $j$ . This gives rise to the following constraints.1. $y_{k} + x_{i,j} \leq 1, \; \forall k \in A, (i,j) \in H, \; \text{such that} \; i \leq k < j, \; \text{and} \; k \neq (i+j-1)/2$.2. $x_{i,j} \leq y_{k}, \; \text{where} \; k = (i+j-1)/2 $.Let $\text{H_fold} = \{(i,j) \in H: x_{i,j} \leq y_{k}, \; k = (i+j-1)/2 \}$ be the set of hydrophobic acids for which there is a folding that enables the matching. Objective functionThe objective is to maximize the number of matchings of hydrophobic acids.$$\sum_{i,j \in \text{H_fold}} x_{i,j}$$ --- Python ImplementationWe import the Gurobi Python Module.import gurobipy as gp from gurobipy import GRB # tested with Python 3.7.0 & Gurobi 9.0Input Data# list of aminoacids and hydrophobic acids acids = [*range(1,51)] h_phobic = [2,4,5,6,11,12,17,20,21,25,27,28,30,31,33,37,44,46]Preprocessing# Creating the data structures to generate the model list_ij = [] # Indices of hydrophobic acids that can be matched for i in h_phobic: for j in h_phobic: if j > i + 1: tp = i,j list_ij.append(tp) ij = gp.tuplelist(list_ij) ### list_ik1j = [] list_ik2j = [] for i,j in ij: for k in range(i,j): if (k == (i+j-1)/2 ): tp = i,j,k list_ik2j.append(tp) else: tp = i,j,k list_ik1j.append(tp) # Indices for constraints of type 2 ik2j = gp.tuplelist(list_ik2j) # Indices for constraints of type 1 ik1j = gp.tuplelist(list_ik1j) # Matchings that are enabled by a folding list_ijfold = [] for i,j,k in ik2j: tp = i,j list_ijfold.append(tp) ijfold = gp.tuplelist(list_ijfold)Model DeploymentWe create a model and the decision variables. There are two types of decision variables: the variables that determine which hydrophobic acids to match, and the variables that determine at which amino acid the folding of the protein happens.model = gp.Model('ProteinFolding') # Matching variables match = model.addVars(ij, vtype=GRB.BINARY, name="match") # Folding variables fold = model.addVars(acids, vtype=GRB.BINARY, name="fold")Using license file c:\gurobi\gurobi.licFolding and matching constraints1. $y_{k} + x_{i,j} \leq 1, \; \forall k \in A, (i,j) \in H, \; \text{such that} \; i \leq k < j, \; \text{and} \; k \neq (i+j-1)/2$.2. $x_{i,j} \leq y_{k}, \; \text{where} \; k = (i+j-1)/2 $.# Constraint 1: C1 = model.addConstrs( (fold[k] + match[i,j] <= 1 for i,j,k in ik1j ) , name='C1') # Constraint 2: C2 = model.addConstrs( ( match[i,j] <= fold[k] for i,j,k in ik2j ) , name='C2')Objective functionMaximize the matchings of hydrophobic acids.# Objective function model.setObjective(gp.quicksum(match[i,j] for i,j in ijfold) , GRB.MAXIMIZE ) # Verify model formulation model.write('ProteinFolding.lp') # Run optimization engine model.optimize() # Output report print(f"Optimal number of hydrophobic acids matchings: {model.objVal}") print("_______________________________________") print(f"Optimal matching of hydrophobic acids.") print("_______________________________________") for i,j,k in ik2j: if (match[i,j].x > 0.5): print(f"Hydrophobic acid matching {i,j} with folding at amonacid {k}.")Optimal number of hydrophobic acids matchings: 10.0 _______________________________________ Optimal matching of hydrophobic acids. _______________________________________ Hydrophobic acid matching (2, 5) with folding at amonacid 3. Hydrophobic acid matching (5, 12) with folding at amonacid 8. Hydrophobic acid matching (6, 11) with folding at amonacid 8. Hydrophobic acid matching (12, 17) with folding at amonacid 14. Hydrophobic acid matching (17, 20) with folding at amonacid 18. Hydrophobic acid matching (20, 25) with folding at amonacid 22. Hydrophobic acid matching (25, 28) with folding at amonacid 26. Hydrophobic acid matching (28, 31) with folding at amonacid 29. Hydrophobic acid matching (31, 46) with folding at amonacid 38. Hydrophobic acid matching (33, 44) with folding at amonacid 38.Title Area Element Dependencies Matplotlib Backends Bokeh Matplotlib Plotlyimport numpy as np import holoviews as hv hv.extension('matplotlib')``Area`` elements are ``Curve`` elements where the area below the line is filled. Like ``Curve`` elements, ``Area`` elements are used to display the development of quantitative values over an interval or time period. ``Area`` Elements may also be stacked to display multiple data series in a cumulative fashion over the value dimension.The data of an ``Area`` Element should be tabular with one key dimension representing the samples over the interval or the timeseries and one or two value dimensions. A single value dimension will fill the area between the curve and the x-axis, while two value dimensions will fill the area between the curves. See the [Tabular Datasets](../../../user_guide/08-Tabular_Datasets.ipynb) user guide for supported data formats, which include arrays, pandas dataframes and dictionaries of arrays. Area under the curveBy default the Area Element draws just the area under the curve, i.e. the region between the curve and the origin.xs = np.linspace(0, np.pi*4, 40) hv.Area((xs, np.sin(xs)))Area between curvesWhen supplied a second value dimension the area is defined as the area between two curves.X = np.linspace(0,3,200) Y = X**2 + 3 Y2 = np.exp(X) + 2 Y3 = np.cos(X) hv.Area((X, Y, Y2), vdims=['y', 'y2']) * hv.Area((X, Y, Y3), vdims=['y', 'y3'])Stacked areas Areas are also useful to visualize multiple variables changing over time, but in order to be able to compare them the areas need to be stacked. To do this, use the ``Area.stack`` classmethod to stack multiple ``Area`` elements in an (Nd)Overlay.In this example we will generate a set of 5 arrays representing percentages and create an ``Overlay`` of them. Then we simply call the ``stack`` method with this overlay to get a stacked area chart.values = np.random.rand(5, 20) percentages = (values/values.sum(axis=0)).T*100 overlay = hv.Overlay([hv.Area(percentages[:, i], vdims=[hv.Dimension('value', unit='%')]) for i in range(5)]) overlay + hv.Area.stack(overlay)Chapter 8 - Dimensionality Reduction Unsupervised LearningIn supervised learning, we have an access to $p$ features measured on $n$ observations, and a response $y$ is given. The goal is then to predict $y$ using the $p$ features.In unsupervised learning, we only have a set of features $X_1, \cdots, X_p$ measured on $n$ observations. We are not interested in prediction because we do not have an associated response variable $y$. Rather, the goal is to discover interesting things about the measurements $X_1, \cdots, X_p$. Can we visualise the data? Can we discover subgroups among the variables or the observations?Unsupervised learning is much more challenging. The analysis tends to be more subjective / biased and there is no simple goal of the analysis. Unsupervised learning is part of exploratory data analysis. Furthermore, in unsupervised learning there is no way to check our work - we don't have tools like cross-validation to measure the performance of our technique. The Curse of DimensionalityMany ML problems involve training on many features, for each training instance - $p$ can be very large. This process is slow and makes it harder to find a good solution. This is called the curse of dimensionality.Consider the MNIST example. The pixels on the image borders are almost always white (feature has low variation) so they can be removed. Neighbouring pixels usually have the same colour so they can be averaged to form one feature (features have high correlation). Such steps do not result in much information loss.In theory, one solution to overcome the curse of dimsensionality is to increase the size of the training set. However, in reality, the number of training instances required to reach a given density ($\frac np$) grows exponentially with the number of dimensions.import pandas as pd import numpy as np from sklearn.model_selection import KFold from sklearn.decomposition import PCA import matplotlib.pyplot as plt def load(fname): import pickle mnist = None try: with open(fname, 'rb') as f: mnist = pickle.load(f) return mnist except FileNotFoundError: from sklearn.datasets import fetch_openml mnist = fetch_openml('mnist_784', version=1, cache=True) with open(fname, 'wb') as f: mnist = pickle.dump(mnist, f) return mnistPrincipal Component Analysis (PCA)Consider an ML problem that has a large set of correlated variables (e.g. the neighbouring pixels example in the MNIST dataset). We can summarize this large set of correlated variables with a smaller number of representative variables using principal components.Say we want to visualise $n$ observations with $p$ features $X_1, \cdots, X_p$. We can visualise the data using $n \choose 2$ scatterplots. If $p$ is large then we cannot possibly look at all of them. Also, most of them will likely be uninformative as they contain only a small fraction of the total information / variance in the dataset. A better method is to visualise the $n$ observations when $p$ is large. Particularly, we want to find a low-dimensional representation of the data / reduce the dimensions of the data, capturing as much of the information as possible.PCA allows us to do so. The approach is to pick the hyperplane that preserves the most amount of variance, as it will likely lose the least amount of information compared to other projections. Each of these hyperplanes is a linear combination of the $p$ features.The first principal component of a set of features $X_1, \cdots, X_p$ is the normalised linear combination of the features:$$Z_1 = \phi_{11}X_1 + \phi_{21}X_2 + \cdots + \phi_{p1}X_p$$that has the largest variance. The elements $\phi_{j1} \forall j \in \{1\cdots p\}$ are the loadings of the first principal component and together, they make the principal component loading vector $\phi_1$. Mathematically, the first principal component loading vector has the loadings:$$\phi_1 = \begin{pmatrix}\phi_{11}&\phi_{21}&\cdots&\phi_{p1}\end{pmatrix}^T$$Normalised means that the sum of the loadings $\sum_{j=1}^p\phi_{j1}^2=1$. This constraint is needed as setting these elements to be arbitrarily large would results in an arbitrary large variance. To find the first principal components of a $n\times p$ training set $\mathbf X$, we first center the data to have mean zero. Then, we find the linear combination of the feature values: $$z_{i1} = \phi_{11}x_{i1} + \phi_{21}x_{i2} + \cdots + \phi_{p1}x_{ip}\,\, \forall i \in \{1,\cdots,n\}$$that has the largest sample variance subject to the constraint $\sum_{j=1}^p\phi_{j1}^2=1$. In other words, the first principal component loading vector solves the optimisation problem:$$\underset{\phi_{11}, \cdots, \phi_{p1}}{\text{Maximise }} \left\{\frac 1n \sum_{i=1}^n\begin{pmatrix}\sum_{j=1}^p\phi_{j1}x_{ij}\end{pmatrix}^2\right\} \text{ s. t. }$$$$\sum_{j=1}^p \phi_{j1}^2=1$$Since $z_{i1} = \phi_{11}x_{i1} + \phi_{21}x_{i2} + \cdots + \phi_{p1}x_{ip}$ we can simplify the optimisation problem to:$$\underset{\phi_{11}, \cdots, \phi_{p1}}{\text{Maximise }} \left\{\frac 1n \sum_{i=1}^nz_{i1}^2\right\} \text{ s. t. }$$ $$\sum_{j=1}^p \phi_{j1}^2=1$$Furthermore, since we have a zero-ed mean, that means $\frac 1n \sum_{i=1}^nx_{ij}=0$, the mean of $z_{11}, \cdots, z_{n1}$ is zero as well. Hence, the objective we are maximising is just the sample variance of the $n$ values of $z_{i1}$. We refer $z_{11}, \cdots, z_{n1}$ as the scores of the first principal component.Solving the optimisation problem involves eigenvalue decomposition. In particular, there is a standard matrix factorization technique called Singlular Value Decomposition (SVD) that decomposes the training set matrix $\mathbf X$ to the dot product of three matrices:$$\mathbf X = \mathbf U \cdot \Sigma \cdot \mathbf V^T$$ where $\mathbf V^T$ contains all the principal components that we are looking for. Interpretation: The loadings of the first principal component, $\phi_1$ is the direction in feature space along which the data varies the most. If we project the $n$ training samples onto this direction, the projected values are the principal component scores $z_{11}, \cdots, z_{n1}$ themselves and they will lose the least amount of information compared to other projections. PCA identifies the axis that accounts for the largest amount of variance in the training set.In this example, the observations are in 2D. The first principal component loading vector is the green line. $\phi_1 = (\phi_{11}, \phi_{21}) = (0.839, 0.544)$# Ingest mnsit = load('mnist.data.pkl') mnsit_X, mnsit_y = mnsit['data'], mnsit['target'] kf = KFold(n_splits=10, shuffle=True, random_state=0) kf.get_n_splits() t1 = [] for train_index, test_index in kf.split(mnsit_X, mnsit_y): t1 = test_index break X = mnsit_X[t1] y_test = mnsit_y[t1] # For testing # print(pd.Series(y_test).value_counts())The following is an implementation of PCA using the SVD method from `numpy`.# PCA using SVD Algorithm # Center the data X_centered = X - X.mean(axis=0) # SVD algorithm to obtain the loadings u, s, Vt = np.linalg.svd(X_centered) # Obtain the principal components W = Vt.T[:,:196] X_d1 = X_centered.dot(W) # For testing # print(X_test.shape) # print(X_test_centered.shape) # X_test_centered[:2]After the first principal component $Z_1$ of the features are determined, we can find the second principal component $Z_2$. The second principal component is the linear combination $X_1, \cdots, X_p$ that has maximal variance out of all linear combinations that are uncorrelated with $Z_1$. The second principal component scores $z_{12}, \cdots, z_{n2}$ take the form:$$z_{i2} = \phi_{12}x_{i1} + \phi_{22}x_{i2} + \cdots + \phi_{p2}x_{ip}$$where $\phi_{2}$ is the second principal component loading vector, with elements $\phi_{12}, \phi_{22}, \cdots, \phi_{p2}$. Note that this loading vector is constrained such that the direction must be orthogonal (perpendicular) to the direction of $\phi_1$. In 3D space, once we have found $\phi_1$, there is only one possibility for $\phi_2$, which is the blue dashed line.But in a larger dataset with $p>2$ variables, there are multiple candidates for principal components, and they are defined in a similar manner. To find $\phi_2$, we solve the same maximisation problem, but with the additional constraint that $\phi_2$ is orthogonal to $\phi_1$.Once all the principal components are identified, you can reduce the dimensionality of the dataset by projecting it onto the hyperplane defined by the first $d$ principal components. Selecting this hyperplane ensures that the projection will preserve as much variance as possible. To do so, simply compute the dot product of the training sest matrix $\mathbf X$ by the matrix $\mathbf W_d$.$$\mathbf X_{d\text{-proj}} = \mathbf X \cdot \mathbf W_d$$ The following is the `sklearn` implementation of PCA.pca = PCA(n_components=196, svd_solver='full') X_d2 = pca.fit_transform(X) # For testing # Validate that both numpy and sklearn approaches yield the same results # i_X = 19 # Xv_d1 = X_d1[i_X] # Xv_d2 = X_d2[i_X] # for i,j in zip(Xv_d1, Xv_d2): # print('{:2f}'.format(abs(float(abs(j) - abs(i))))) # For testing # Validate that the loadings for both numpy and sklearn approaches yield the same results # print(pca.components_[100][20:50]) # print(Vt[100][20:50])Fit Kmeans Modelfrom pyspark.ml.clustering import KMeans kmeans = KMeans().setK(2).setSeed(42) model = kmeans.fit(final_data) model.predictionColCalculate Within Set Sum of Squared Errors (WSSSE)# within set sum of squared errors wssse = model.computeCost(final_data) wssseWhere Are the Centroids ?centers = model.clusterCenters() centersWhich Cluster Is Each Sample In ?results = model.transform(final_data) results.show()+--------------------+----------+ | features|prediction| +--------------------+----------+ | (3,[],[])| 1| |(3,[0,1,2],[0.1,0...| 1| |(3,[0,1,2],[0.2,0...| 1| |(3,[0,1,2],[9.0,9...| 0| |(3,[0,1,2],[9.1,9...| 0| |(3,[0,1,2],[9.2,9...| 0| +--------------------+----------+- 知识参考[1] [强化学习之六:Deep Q-Network and Beyond](https://blog.csdn.net/qq_32690999/article/details/79302093) DQN 相对import tensorflow as tf import numpy as np import gym env=gym.make('CartPole-v0') state_shape=env.observation_space.shape; action_shape=env.action_space.shape; input_vec=tf.placeholder(dtype=tf.int32,name='state_input',shape=state_shape) output_vec=tf.placeholder(dtype=tf.int32,name='state_input',shape=action_shape) with tf.Session() as sess: class NN: def __init__(): defAdvent of Code 2018This solution (Jupyter notebook; python3.7) by kannix68, @ 2020-12 (2 years late). \Using anaconda distro, conda v4.9.2. installation on MacOS v10.14.6 "Mojave". Generic AoC codeimport sys import logging import lib.aochelper as aoc from lib.aochelper import map_list as mapl from lib.aochelper import filter_list as filterl print("Python version:", sys.version) print("Version info:", sys.version_info) log = aoc.getLogger(__name__) #log.setLevel(logging.DEBUG) # std: logging.INFO print(f"initial log-level={log.getEffectiveLevel()}") EXEC_RESOURCE_HOGS = False EXEC_DOUBTFUL = FalseProblem domain code Day 1: Chronal Calibrationprint("Day 1 a") test = """ +1, -2, +3, +1 """.strip().split(', ') tests = aoc.map_list(int, test) aoc.assert_msg("test 1", 3 == sum(tests)) ins = aoc.read_file_to_list('./day01/day01.in') print("Day 1 a solution:", sum(aoc.map_list(int, ins))) print("Day 1 b", "TODO here, but already solved 2018") import itertools # A list and list.append(frequency are resource hog tools to keep track of seen entries), # using dict instead. def solve01b(l): iter = 0 freq = 0 freqs = {freq:True} for freq_inc in itertools.cycle(l): iter += 1 freq += freq_inc if (len(freqs)%100_000 == 0): log.info(f"iter={iter}, freq={freq} num-frequencies={len(freqs)}") if freq in freqs: log.info(f"frequency {freq} used 2nd time, iteration={iter}, num-frequencies={len(freqs)}") return freq elif iter > 10_000_000: raise Exception("fail") else: freqs[freq] = True solve01b(tests) iins = aoc.map_list(int, ins) log.debug(f"len={len(iins)},elems={iins}") result = solve01b(iins) print("Day 1 b result:", result)Day 2: Inventory Management Systemprint("Day 2", "TODO here, but already solved 2018")Day 3: No Matter How You Slice Itfrom collections import defaultdict class C2d_space_inc: def __init__(self): self.spc = defaultdict(int) self.clms = {} def set_xy(self, x, y): self.spc[tuple([x,y])] += 1 def set_range(self, x, y, rgx, rgy, id=None): for px in range(x, x+rgx): for py in range(y, y+rgy): self.spc[tuple([px,py])] += 1 if id is not None: self.clms[id] = [x, y, rgx, rgy] #log_info(f"create claim {id} => {self.claims[id] }") def get_range(self, x, y, rgx, rgy): outspc = {} for px in range(x, x+rgx): for py in range(y, y+rgy): outspc[tuple([px,py])] = self.spc[tuple([px,py])] return outspc def cols(self): return sorted(set(map(lambda it: it[0], self.spc.keys()))) def rows(self): return sorted(set(map(lambda it: it[1], self.spc.keys()))) def values(self): return self.spc.values() def claims(self): return self.clms def get_pp_repr(self): return "[c2d_space_inc]: " + str(self.spc) def pp(self): print(self.get_pp_repr()) def get_show_repr(self): rows = self.rows() cols = self.cols() rowstr = '' for y in range(0, max(rows)+1): #range(min(rows), max(rows)+1): colstr = '' for x in range(0, max(cols)+1): colstr += str(self.spc[tuple([x,y])]) rowstr += colstr + "\n" return rowstr def show(self): print(self.get_show_repr()) # just some testing: c2d = C2d_space_inc() c2d.set_xy(1,1) c2d.set_xy(2,2) log.debug(c2d.get_pp_repr()) c2d.set_xy(1,1) c2d.set_xy(1,3) log.debug(c2d.get_pp_repr()) log.debug(f"cols: {c2d.cols()}") log.debug(f"rows:, {c2d.rows()}") log.debug("\n"+c2d.get_show_repr()) #c2d.show() def create_space(l): import re c2d = C2d_space_inc() for line in l: rx = re.search('^#(\w+)\s+@\s+(\d+),(\d+):\s*(\d+)x(\d+)$', line) #r'^#([^\s]+) @ (d+),(\d+): ((\d+)),((\d+))$', line) #log.debug(rx.groups()) id, sx, sy, srgx, srgy = rx.groups() x, y, rgs, rgy = aoc.map_list(int, [sx, sy, srgx, srgy]) #c2d.set_range(x, y, rgs, rgy) c2d.set_range(x, y, rgs, rgy, id=id) #c2d.show() return c2d tests = """ #123 @ 3,2: 5x4 """.strip().split("\n") log.info(f"tests {tests}") print("1st test representation:") create_space(tests).show() tests = """ #1 @ 1,3: 4x4 #2 @ 3,1: 4x4 #3 @ 5,5: 2x2 """.strip().split("\n") log.info(f"tests={tests}") print("2nd test representation:") create_space(tests).show() def get_space_overlaps(l): c2d = create_space(l) return len(aoc.filter_list(lambda it: it > 1, c2d.values())) aoc.assert_msg( "4 test cells that overlap", 4 == get_space_overlaps(tests) ) ins = aoc.read_file_to_list('./day03/day03.in') result = get_space_overlaps(ins) print("Day 3 a solution:", result) def get_singular_space(l): c2d = create_space(l) for k, v in c2d.claims().items(): #log.debug([k, v]) rg = c2d.get_range(v[0], v[1], v[2], v[3]) #log.debug(rg) #log.debug(rg.values()) if max(rg.values()) == 1: log.info(f"found id={k}, num of 1-cells:{len(rg.values())}") result = k return result print("Day 3 b tests result: ", get_singular_space(tests)) print("Day 3 b solution:", get_singular_space(ins))Day 4# TODODay 5# TODODay 6: Chronal Coordinates#TODODay 7: The Sum of Its PartsDEBUG_FLAG = 0 import networkx as nx tests = """ Step C must be finished before step A can begin. Step C must be finished before step F can begin. Step A must be finished before step B can begin. Step A must be finished before step D can begin. Step B must be finished before step E can begin. Step D must be finished before step E can begin. Step F must be finished before step E can begin. """.strip().split("\n") def create_graph(l): graph = nx.DiGraph() for line in l: linesplit = line.split(' ') srcnd, trgnd = [linesplit[1], linesplit[-3]] if not srcnd in graph.nodes: log.debug(f"g add node {srcnd}") if not trgnd in graph.nodes: log.debug(f"g add node {trgnd}") graph.add_edge(srcnd, trgnd) log.info(f"graph-edges={sorted(list(graph.edges))}") return graph # still using networkx graph li, inspiration from user VikeStep: # [- 2018 Day 7 Solutions - : adventofcode](https://www.reddit.com/r/adventofcode/comments/a3wmnl/2018_day_7_solutions/) def solve07a(l): graph = create_graph(l) nodes_lst = list(graph.nodes) log.debug(f"nodes: entry-order {str.join('', nodes_lst)}") nodes_lst = list(nx.topological_sort(graph)) log.debug(f"nodes: topo {str.join('', nodes_lst)}") nodes_lst = list(nx.algorithms.dag.lexicographical_topological_sort(graph)) log.info("nodes: lexico-topo {str.join('', nodes_lst)}") return str.join('', nodes_lst) solve07a(tests) ins = aoc.read_file_to_list('./in/day07.in') solve07a(ins) print("Day 07 b, # TODO")Day 8# TODODay 9# TODODay 10: The Stars Alignimport copy # for deepcopy class CellularWorldBase: """Base class for 2d-grid cellular world. E.g. for cellular automata.""" def __init__(self, world): """World object constructor, world has to be given as a list-of-lists of chars.""" self.world = world self.dim = [len(world[0]), len(world)] log.info(f'[CellularWorld] new dim={self.dim}') self.world = world def repr(self): """Return representation str (can be used for printing).""" l = [] for line in self.world: l.append( str.join('', line) ) return str.join("\n", l) def set_world(self, world): self.world = world self.dim = [len(world[0]), len(world)] def get_neighbors(self, x, y): """Get cell's surrounding neighbors, for cell iteration.""" neighbors = '' #for nx in range(x-1, x+2): # for ny in range(y-1, y+2): # if condition: # neighbors += self.world[ny][nx] return neighbors def iterate_cell(self): neighbors = self.get_neighbors(x, y) #val = self.world[y][x] #if cond1: # next_world[y][x] = '*' #elif cond2: # world2[y][x] = '#' return self.world[y][x] def iterate(self, n=1): for i in range(n): next_world = copy.deepcopy(self.world) for y in range(self.dim[1]): for x in range(self.dim[0]): nextworld[y][x] = self.iterate_cell(x, y) self.set_world(next_world) class CellularWorldDay10(CellularWorldBase): True tests = """ ........#............. ................#..... .........#.#..#....... ...................... #..........#.#.......# ...............#...... ....#................. ..#.#....#............ .......#.............. ......#............... ...#...#.#...#........ ....#..#..#.........#. .......#.............. ...........#..#....... #...........#......... ...#.......#.......... """.strip().split("\n") test = mapl(list, tests) tests = """ position=< 9, 1> velocity=< 0, 2> position=< 7, 0> velocity=<-1, 0> position=< 3, -2> velocity=<-1, 1> position=< 6, 10> velocity=<-2, -1> position=< 2, -4> velocity=< 2, 2> position=<-6, 10> velocity=< 2, -2> position=< 1, 8> velocity=< 1, -1> position=< 1, 7> velocity=< 1, 0> position=<-3, 11> velocity=< 1, -2> position=< 7, 6> velocity=<-1, -1> position=<-2, 3> velocity=< 1, 0> position=<-4, 3> velocity=< 2, 0> position=<10, -3> velocity=<-1, 1> position=< 5, 11> velocity=< 1, -2> position=< 4, 7> velocity=< 0, -1> position=< 8, -2> velocity=< 0, 1> position=<15, 0> velocity=<-2, 0> position=< 1, 6> velocity=< 1, 0> position=< 8, 9> velocity=< 0, -1> position=< 3, 3> velocity=<-1, 1> position=< 0, 5> velocity=< 0, -1> position=<-2, 2> velocity=< 2, 0> position=< 5, -2> velocity=< 1, 2> position=< 1, 4> velocity=< 2, 1> position=<-2, 7> velocity=< 2, -2> position=< 3, 6> velocity=<-1, -1> position=< 5, 0> velocity=< 1, 0> position=<-6, 0> velocity=< 2, 0> position=< 5, 9> velocity=< 1, -2> position=<14, 7> velocity=<-2, 0> position=<-3, 6> velocity=< 2, -1> """.strip().split("\n") import re from lib.nbodysystem import Body, NBodySystem def populate_sys(los): nbsys = NBodySystem() for line in los: nums = filterl(lambda it: it != '', re.split(r'[^\-\d]+', line)) nums = mapl(int, nums) p = [nums[0], nums[1], 0] v = [nums[2], nums[3], 0] log.debug(f"nbsys add body; p={p} v={v}") nbsys.add_body(Body(pos=p, vel=v)) return nbsys def get_extent(nbsys): posns = mapl(lambda it: it.pos, nbsys.bodies) x_posns = mapl(lambda it: it[0], posns) y_posns = mapl(lambda it: it[1], posns) x_min = min(x_posns) x_max = max(x_posns) y_min = min(y_posns) y_max = max(y_posns) d = {'x':[x_min, x_max], 'y':[y_min, y_max]} d['x_ofs'] = x_min d['y_ofs'] = y_min d['x_len'] = x_max - x_min + 1 d['y_len'] = y_max - y_min + 1 #log.trace(f"[get_extent] exten={exten}") return d def get_repr(nbsys): exten = get_extent(nbsys) x_len, y_len = [ exten['x_len'], exten['y_len'] ] x_ofs, y_ofs = [ exten['x_ofs'], exten['y_ofs'] ] r = [] for y in range(y_len): row = [] for x in range(x_len): row.append('.') r.append(row) for b in nbsys.bodies: #log.trace(f"represent body={b.pos}") x,y,z = b.pos r[y-y_ofs][x-x_ofs] = '#' #r[x+x_ofs][y+y_ofs] = '#' #r[0-y_ofs][0-x_ofs] = 'O' return str.join("\n", mapl(lambda it: str.join('', it), r)) def show_states(los, max_iters=10): nbsys = populate_sys(los) log.info(f"NBSystem {nbsys}") exten = get_extent(nbsys) log.info(f"NBSystem t0 extent @t=0: {exten}") log.info("@ t = 0 ::") if exten['x_len'] < 999 and exten['y_len'] < 999: log.info(f"\n{get_repr(nbsys)}") else: log.debug(" large-dimensions, no plot") for t in range(1, max_iters+1): nbsys.iterate_step() exten = get_extent(nbsys) crit = exten['x_len'] < 999 and exten['y_len'] < 999 if crit: log.debug(f"NBSys extent @t={t}: {exten}") elif t % 1_000 == 0: log.debug(f"NBSys extent @t={t}: {exten}") if crit: r = get_repr(nbsys) if t == 1 or "#####" in r: log.debug(f"NBSys @t={t} extent: {exten}") log.info(f"@ t = {t} ::") log.info(f"\n{r}") else: if t % 1_000 == 0: log.debug(f"@t={t} large-dimensions, no plot") log.info(f"terminated after {max_iters} steps") show_states(tests) ins = aoc.read_file_to_list('./in/day10.in') show_states(ins, max_iters=99_999)Day 11def calc_power_level(x, y, grid_serial_num): """ Find the fuel cell's rack ID, which is its X coordinate plus 10. Begin with a power level of the rack ID times the Y coordinate. Increase the power level by the value of the grid serial number (your puzzle input). Set the power level to itself multiplied by the rack ID. Keep only the hundreds digit of the power level (so 12345 becomes 3; numbers with no hundreds digit become 0). Subtract 5 from the power level. """ #log.trace(f"[calc_power_level({x}, {y}, {grid_serial_num})] called.") rack_id = x + 10 tmp = rack_id * y #log.trace(f" rack-id = X+10 = {rack_id} ; mul-by-Y = {tmp}") tmp += grid_serial_num #log.trace(f" add-grid_serial_num = {tmp}") tmp *= rack_id #log.trace(f" mul-by-rack_id = {tmp}") ltmp = list(str(tmp)) if len(ltmp) >= 3: pl = int(ltmp[-3]) else: pl = 0 #log.trace(f" hundreads-thereof = {pl}") pl -= 5 log.trace(f"[calc_power_level({x}, {y}, {grid_serial_num})] returns {pl}; rack_id={rack_id}") return pl def populate_grid(xdim=300, ydim=300, grid_serial_num=0): """Pupulate power grid. Grid-coordinates are 1-based here, standard=1..300 in each dimension.""" pgrid = {} for y in range(1, ydim+1): for x in range(1, xdim+1): pgrid[tuple([x,y])] = calc_power_level(x, y, grid_serial_num) return pgrid #log.setLevel(aoc.LOGLEVEL_TRACE) # logging.DEBUG #log.setLevel(logging.DEBUG) log.setLevel(logging.INFO) # power level of the fuel cell at 3,5 in a grid with serial number 8 => 4 assert( 4 == calc_power_level(3, 5, 8) ) # Fuel cell at 122,79, grid serial number 57: power level -5. assert( -5 == calc_power_level(122, 79, 57) ) # Fuel cell at 217,196, grid serial number 39: power level 0. assert( 0 == calc_power_level(217, 196, 39) ) #Fuel cell at 101,153, grid serial number 71: power level 4. assert( 4 == calc_power_level(101, 153, 71) ) def power_3x3_sum_at(x_start, y_start, pgrid): rct = [] for y in range(y_start, y_start+3): row = [] for x in range(x_start, x_start+3): row.append( pgrid[tuple([x,y])] ) rct.append(row) #log.trace(rct) #log.trace(sum(mapl(sum, rct))) log.debug(f"[power_3x3_sum_at({x_start}, {y_start}, pgrid)] returns {sum(mapl(sum, rct))}") return sum(mapl(sum, rct)) def find_opt_3x3_square(pgrid): pmax = -99_999 xmax = -1 ymax = -1 for x in range(1, 300+1-3): for y in range(1, 300+1-3): pn = power_3x3_sum_at(x, y, pgrid) if pn > pmax: pmax = pn xmax = x ymax = y elif pn == pmax: #raise Exception(f"tie at {x}, {y} with {pmax}") True return {'x':xmax, 'y':ymax, 'power':pmax} pgrid18 = populate_grid(xdim=300, ydim=300, grid_serial_num=18) assert( 29 == power_3x3_sum_at(33, 45, pgrid18) ) pgrid42 = populate_grid(xdim=300, ydim=300, grid_serial_num=42) assert( 30 == power_3x3_sum_at(21, 61, pgrid42) ) res = find_opt_3x3_square(pgrid18) log.info(f"pgrid18 opt={res}") res = find_opt_3x3_square(pgrid42) log.info(f"pgrid42 opt={res}") ins = int(aoc.read_file_to_str('./in/day11.in').strip()) pgrid_sol1 = populate_grid(xdim=300, ydim=300, grid_serial_num=ins) res = find_opt_3x3_square(pgrid_sol1) log.info(f"pgrid-{ins} opt-square={res}") import time def power_nxn_sum_at(num_len, x_start, y_start, pgrid): #rct = [] #for y in range(y_start, y_start+num_len): # row = [] # for x in range(x_start, x_start+num_len): # row.append( pgrid[tuple([x,y])] ) # rct.append(row) ##log.debug(f"[power_nxn_sum_at({num_len}, {x_start}, {y_start}, pgrid)] returns {sum(mapl(sum, rct))}") #tups = [] #for y in range(y_start, y_start+num_len): # for x in range(x_start, x_start+num_len): # #row.append( pgrid[tuple([x,y])] ) # tups.append(tuple([x,y])) #return sum(mapl(lambda it: pgrid[it] ,tups)) return sum( [ pgrid[tuple([x,y])] for y in range(y_start, y_start+num_len) for x in range(x_start, x_start+num_len) ] ) def find_opt_nxn_square(num_len, pgrid): start_tm = int(time.time()) pmax = -99_999 xmax = -1 ymax = -1 ict = 0 for xs in range(1, 300+1-num_len): for ys in range(1, 300+1-num_len): #pn = power_nxn_sum_at(num_len, xs, ys, pgrid) ict += 1 pn = sum( [ pgrid[tuple([x,y])] for y in range(ys, ys+num_len) for x in range(xs, xs+num_len) ] ) if pn > pmax: pmax = pn xmax = xs ymax = ys ict_str = f"{ict:,}".replace(',','_') fullct = ict*(300-num_len)*(300-num_len) fullct_str = f"{fullct:,}".replace(',','_') res = {'len':num_len, 'x':xmax, 'y':ymax, 'power':pmax, 'tm_needed': int(time.time()-start_tm), 'iters': ict_str, 'fullct':fullct_str} log.trace(f"find_opt_nxn_square({num_len}, pgrid) result={res}") return res def find_opt_any_square(pgrid): start_tm = int(time.time()) found_opts = [] pmax = -99_999 lmax = -1 last_pow = 0 for sqlen in range(1, 301): if sqlen in [1, 2, 3] or sqlen % 10 == 0: tm_needed = int(time.time()-start_tm) log.debug(f"[find_opt_any_square] calculating, at square-len={sqlen} @{tm_needed}s") found_opt = find_opt_nxn_square(sqlen, pgrid) found_opts.append(found_opt) if found_opt['power'] > pmax: pmax = found_opt['power'] lmax = sqlen tm_needed = int(time.time()-start_tm) log.debug(f"new max-power >{found_opt['x']},{found_opt['y']},{sqlen}< power={pmax} square-len={sqlen} @{tm_needed}s {found_opt}") if found_opt['power'] < 0 and last_pow < 0: log.debug(f"[find_opt_any_square] BREAK cycle, 2 consecutiv ngative max-powers @square-len={sqlen}") break last_pow = found_opt['power'] max_power = max(mapl(lambda it: it['power'], found_opts)) max_power_coord = filterl(lambda it: it['power']==max_power, found_opts)[0] tm_needed = int(time.time()-start_tm) max_power_coord['tm_total'] = tm_needed log.debug(f"[find_opt_any_square(pgrid)] res=>{max_power_coord['x']},{max_power_coord['y']},{max_power_coord['len']}< result={max_power_coord} after @{tm_needed}s") return max_power_coord #log.setLevel(aoc.LOGLEVEL_TRACE) # logging.DEBUG #log.setLevel(logging.DEBUG) #log.setLevel(logging.INFO) assert( 29 == find_opt_nxn_square(3, pgrid18)['power'] ) assert( 30 == find_opt_nxn_square(3, pgrid42)['power'] ) #log.setLevel(aoc.LOGLEVEL_TRACE) # logging.DEBUG #log.setLevel(logging.DEBUG) log.setLevel(logging.INFO) if EXEC_RESOURCE_HOGS: log.info(f"Day 11 testing calculating, please wait...") res = find_opt_any_square(pgrid18) log.info(f"Day 11 testing solution, grid-18: >{res['x']},{res['y']},{res['len']}< {res}") if EXEC_RESOURCE_HOGS: log.info(f"Day 11 testing calculating, please wait...") res = find_opt_any_square(pgrid42) log.info(f"Day 11 testing solution, grid-42: >{res['x']},{res['y']},{res['len']}< {res}") if EXEC_RESOURCE_HOGS: log.info(f"Day 11 b calculating, please wait...") res = find_opt_any_square(pgrid_sol1) log.info(f"Day 11 b solution: >{res['x']},{res['y']},{res['len']}< {res}")Day 12# TODODay 13# TODO ### Day 14: Chocolate Charts def find_improved_score(target_num): elf1 = 0 elf2 = 1 recipies = [3, 7] #num_new_recipies = 0 log.info(f"#0: num-recipes={len(recipies)}, recipies={recipies}") for i in range(2*target_num): new_recipy = recipies[elf1] + recipies[elf2] #found_recipies.add(new_recipy) digits = aoc.map_list(int, str(new_recipy)) #num_new_recipies += len(digits) for d in digits: recipies.append(d) elf1 = (elf1+1+recipies[elf1]) % len(recipies) elf2 = (elf2+1+recipies[elf2]) % len(recipies) #log.debug(f"#{i+1}: num-recipes={len(recipies)}") #log.debug(f"{len(digits)}, {digits}, {recipies}, elf1:{elf1} elf2:{elf2}, {len(recipies)}") #found_recipies) if len(recipies) >= target_num + 10: res = str.join('', aoc.map_list(str, recipies))[target_num:target_num+10] # 0124515891 log.info(f"found: {res}") return res print( find_improved_score(9) ) print( find_improved_score(5) ) print( find_improved_score(18) ) print( find_improved_score(2018) ) ins = 846021 # << insert personal input here print( "Day 14 a solution:", find_improved_score(ins) ) print("Day 14 b") def find_num_recips_before(target_num): target_str = str(target_num) elf1 = 0 elf2 = 1 recipies = [3, 7] #num_new_recipies = 0 log.info(f"#0: num-recipes={len(recipies)}, recipies={recipies}") for i in range(1000*int(target_num)): new_recipy = recipies[elf1] + recipies[elf2] #found_recipies.add(new_recipy) digits = aoc.map_list(int, str(new_recipy)) #num_new_recipies += len(digits) for d in digits: recipies.append(d) elf1 = (elf1+1+recipies[elf1]) % len(recipies) elf2 = (elf2+1+recipies[elf2]) % len(recipies) #log_debug(f"#{i+1}: num-recipes={len(recipies)}") #log_debug(len(digits), digits, recipies, " elf1:", elf1, " elf2:", elf2, found_recipies) if i % 1_000_000 == 0: log.info(f"calculating, iter: {i}") recips_end_str = str.join('', aoc.map_list(str, recipies[-12:])) if target_str in recips_end_str: offset = 0 if not recips_end_str.endswith(target_str): recips_end_str = recips_end_str[0:-1] offset = 1 assert( recips_end_str.endswith(target_str) ) #recips_str = str.join('', lmap(str, recipies)) #res = recips_str.index(target_str) #log.info(f"length={len(recipies)}-{len(target_str)}") #" from {recips_str}") res = len(recipies) - len(target_str) - offset log.info(f"target-num={target_str}, found: idx={res} @iter={i}") #" from {recips_str}") return res raise Exception("not terminated with find-criterium") find_num_recips_before(51589) find_num_recips_before('01245') find_num_recips_before('92510') find_num_recips_before('59414') PERFORM_RESOURCE_HOGS = False if PERFORM_RESOURCE_HOGS: find_num_recips_before(ins)Day 15# TODODay 16test = """ Before: [3, 2, 1, 1] 9 2 1 2 After: [3, 2, 2, 1] """.strip().split("\n") mem_before = eval(test[0].replace('Before: ', '')) cpu_instruct = aoc.map_list(int, test[1].split(' ')) mem_after = eval(test[2].replace('After: ', '')) log.debug([mem_before, cpu_instruct, mem_after]) OPC, A, B, C = [0, 1, 2, 3] # positions def op_addr(instr, regs): """addr (add register) stores into register C the result of adding register A and register B.""" oregs = regs.copy() oregs[instr[C]] = oregs[instr[A]] + oregs[instr[B]] return oregs def op_addi(instr, regs): """addi (add immediate) stores into register C the result of adding register A and value B.""" oregs = regs.copy() oregs[instr[C]] = oregs[instr[A]] + instr[B] return oregs def op_mulr(instr, regs): """mulr (multiply register) stores into register C the result of multiplying register A and register B.""" oregs = regs.copy() oregs[instr[C]] = oregs[instr[A]] * oregs[instr[B]] return oregs def op_muli(instr, regs): """muli (multiply immediate) stores into register C the result of multiplying register A and value B.""" oregs = regs.copy() oregs[instr[C]] = oregs[instr[A]] * instr[B] return oregs def op_banr(instr, regs): """banr (bitwise AND register) stores into register C the result of the bitwise AND of register A and register B.""" oregs = regs.copy() oregs[instr[C]] = oregs[instr[A]] & oregs[instr[B]] return oregs def op_bani(instr, regs): """bani (bitwise AND immediate) stores into register C the result of the bitwise AND of register A and value B.""" oregs = regs.copy() oregs[instr[C]] = oregs[instr[A]] & instr[B] return oregs def op_borr(instr, regs): """borr (bitwise OR register) stores into register C the result of the bitwise OR of register A and register B.""" oregs = regs.copy() oregs[instr[C]] = oregs[instr[A]] | oregs[instr[B]] return oregs def op_bori(instr, regs): """bori (bitwise OR immediate) stores into register C the result of the bitwise OR of register A and value B.""" oregs = regs.copy() oregs[instr[C]] = oregs[instr[A]] | instr[B] return oregs def op_setr(instr, regs): """setr (set register) copies the contents of register A into register C. (Input B is ignored.)""" oregs = regs.copy() oregs[instr[C]] = oregs[instr[A]] return oregs def op_seti(instr, regs): """seti (set immediate) stores value A into register C. (Input B is ignored.)""" oregs = regs.copy() oregs[instr[C]] = instr[A] return oregs def op_gtir(instr, regs): """gtir (greater-than immediate/register) sets register C to 1 if value A is greater than register B. Otherwise, register C is set to 0.""" oregs = regs.copy() if instr[A] > oregs[instr[B]]: oregs[instr[C]] = 1 else: oregs[instr[C]] = 0 return oregs def op_gtri(instr, regs): """gtri (greater-than register/immediate) sets register C to 1 if register A is greater than value B. Otherwise, register C is set to 0.""" oregs = regs.copy() if oregs[instr[A]] > instr[B]: oregs[instr[C]] = 1 else: oregs[instr[C]] = 0 return oregs def op_gtrr(instr, regs): """gtrr (greater-than register/register) sets register C to 1 if register A is greater than register B. Otherwise, register C is set to 0.""" oregs = regs.copy() if oregs[instr[A]] > oregs[instr[B]]: oregs[instr[C]] = 1 else: oregs[instr[C]] = 0 return oregs def op_eqir(instr, regs): """eqir (equal immediate/register) sets register C to 1 if value A is equal to register B. Otherwise, register C is set to 0.""" oregs = regs.copy() if instr[A] == oregs[instr[B]]: oregs[instr[C]] = 1 else: oregs[instr[C]] = 0 return oregs def op_eqri(instr, regs): """eqri (equal register/immediate) sets register C to 1 if register A is equal to value B. Otherwise, register C is set to 0.""" oregs = regs.copy() if oregs[instr[A]] == instr[B]: oregs[instr[C]] = 1 else: oregs[instr[C]] = 0 return oregs def op_eqrr(instr, regs): """eqrr (equal register/register) sets register C to 1 if register A is equal to register B. Otherwise, register C is set to 0.""" oregs = regs.copy() if oregs[instr[A]] == oregs[instr[B]]: oregs[instr[C]] = 1 else: oregs[instr[C]] = 0 return oregs opcodes = [op_addr, op_addi, op_mulr, op_muli, op_banr, op_bani, op_borr, op_bori , op_setr, op_seti, op_gtir, op_gtri, op_gtrr, op_eqir, op_eqri, op_eqrr] def pp_opfun(opfun): return str(opfun).replace('= 3: log.debug(["found-test", opscount, mem_before, cpu_instruct, mem_after]) ct += 1 log.info(f"Day 16 solution: found fulfilling samples: {ct}") def pp_opfun(opfun): return str(opfun).replace(' 10: log.warn("BREAK HACK") break test = sample.split("\n") mem_before = eval(test[0].replace('Before: ', '')) cpu_instruct = aoc.map_list(int, test[1].split(' ')) mem_after = eval(test[2].replace('After: ', '')) op_num = cpu_instruct[OPC] res = get_ops_matching(mem_before, cpu_instruct, mem_after) if op_num in ops_dict: ops_dict[op_num] = ops_dict[op_num].intersection(res) else: ops_dict[op_num] = res opcodes = {} ict = 0 found_codes = True while(found_codes and ict < 20): ict += 1 log.debug(f"resolve-iter {ict}") log.debug("ops-dict {ops_dict}") found_codes = False for k, v in ops_dict.items(): if v is not None and len(v) == 1: opcodes[k] = list(v)[0] log.debug(f"found code {k}: {opcodes[k]}") found_codes = True for k in opcodes.keys(): if k in ops_dict: log.debug(f"removing {k} from maybes") del ops_dict[k] for k in ops_dict.keys(): for v in opcodes.values(): if v in ops_dict[k]: log.debug(f" removing elem {v} from maybe:{k}") ops_dict[k].remove(v) log.debug("opcodes!:: {opcodes}") if len(ops_dict.keys()) == 0: log.info(f"resolved all {len(opcodes.keys())} opcodes") break #return ops_dict return opcodes test = """ Before: [3, 2, 1, 1] 9 2 1 2 After: [3, 2, 2, 1] """.strip() find_op_relations([test]) ins1 = ins.split("\n\n\n")[0].strip().split("\n\n") ins2 = ins.split("\n\n\n")[1].strip().split("\n") #log_debug(ins1) op_funs = find_op_relations(ins1) #log_info(ins2) mem_before = [0, 0, 0, 0] memnext = mem_before.copy() for idx, line in enumerate(ins2): instruct = aoc.map_list(int, line.split(' ')) opfun = op_funs[instruct[OPC]] memnext = opfun(instruct, memnext) log.debug(f"#{idx} memnext={memnext} after {pp_opfun(opfun)} instruct={instruct}") res = [idx, memnext] print(f"Day 16 part b solution:, rrun-idx={idx}, mem-regs={memnext}, mem:0={memnext[0]}")Day 17# TODODay 18: Settlers of The North Poletests = """ .#.#...|#. .....#|##| .|..|...#. ..|#.....# #.#|||#|#| ...#.||... .|....|... ||...#|.#| |.||||..|. ...#.|..|. """.strip().split("\n") tests2 = [] for t in tests: tests2.append(list(t)) import copy # for deepcopy class CellularWorld: def __init__(self, world): """World object constructor, world has to be given as a list-of-lists of chars.""" self.world = world self.dim = [len(world[0]), len(world)] log.info(f'[CellularWorld] new dim={self.dim}') self.world = world def repr(self): """Return representation str (can be used for printing).""" l = [] for line in self.world: l.append( str.join('', line) ) return str.join("\n", l) def set_world(self, world): self.world = world self.dim = [len(world[0]), len(world)] def get_neighbors8(self, x, y): """Get cell's surrounding 8 neighbors, omitting boundaries.""" log.trace(f"[CellularWorld]:get_neighbors8({x},{y})") dim_x = self.dim[0] dim_y = self.dim[1] neighbors = '' for nx in range(x-1, x+2): for ny in range(y-1, y+2): if (nx >= 0 and nx < dim_x) and (ny >= 0 and ny < dim_y) and not (nx == x and ny == y): #log.info(f" neighb={[nx, ny]}") neighbors += self.world[ny][nx] return neighbors def iterate(self, n=1): for i in range(n): world2 = copy.deepcopy(self.world) for y in range(self.dim[1]): for x in range(self.dim[0]): val = self.world[y][x] neighbors = self.get_neighbors8(x, y) #log.trace(f"[{x},{y}]='{val}' nbs='{neighbors}'") if val == '.' and neighbors.count('|') >= 3: world2[y][x] = '|' elif val == '|' and neighbors.count('#') >= 3: world2[y][x] = '#' elif val == '#': if neighbors.count('#') >= 1 and neighbors.count('|') >= 1: world2[y][x] = '#' else: world2[y][x] = '.' self.set_world(world2) def find_cycle(self, max_iter=1_000): """This may only be called at initial state, before any iterations.""" seen = [world.repr] for i in range(max_iter): if i % 1_000 == 0: log.debug(f"iter# {i}, still running") world.iterate() world_repr = world.repr() if world_repr in seen: start_idx = seen.index(world_repr) log.info(f"found cycle @ iter={i+1}, seen-idx={start_idx}") return([start_idx, i+1]) else: seen.append(world_repr) raise Exception("no world iter cycle found") #t = CellularWorld([[]]) world = CellularWorld(tests2) log.info(f"world created: dim={world.dim}") log.info(f"\n{world.repr()}\n") for i in range(1,11): world.iterate() log.info(f"world iter {i}:") log.info(f"\n{world.repr()}\n") world_repr = world.repr() num_trees = world_repr.count('|') num_lumberyds = world_repr.count('#') log.info(f"test num-trees={num_trees}, num-lumberyds={num_lumberyds}, result={num_trees*num_lumberyds}") ins = aoc.map_list(list, aoc.read_file_to_list('./in/day18.in')) world = CellularWorld(ins) log.info(f"world created: dim={world.dim}") log.debug(f"\n{world.repr()}\n") for i in range(1,11): world.iterate() log.info(f"world iter {i}:") log.debug(f"\n{world.repr()}\n") world_repr = world.repr() num_trees = world_repr.count('|') num_lumberyds = world_repr.count('#') day18a_solution = num_trees*num_lumberyds log.info(f"Day 18 a solution: result={day18a_solution} from num-trees={num_trees}, num-lumberyds={num_lumberyds}") ins = aoc.map_list(list, aoc.read_file_to_list('./in/day18.in')) world = CellularWorld(ins) world_cycle = world.find_cycle() cycle_len = world_cycle[1] - world_cycle[0] log.info(f"cycle-len={cycle_len} from cycle={world_cycle}") world.set_world(ins) scores = {} for i in range(0, world_cycle[1]+3+28): world_repr = world.repr() num_trees = world_repr.count('|') num_lumberyds = world_repr.count('#') score = num_trees * num_lumberyds scores[i] = score world.iterate() log.info("finished scoring") log.debug(scores) def get_cycled_index(idx, cycle_lst): cycle_start, cycle_end_bound = cycle_lst cycle_len = cycle_end_bound - cycle_start cycle_end = cycle_end_bound-1 #log.debug(f"[get_cycled_index] cyle is {cycle_lst}, cycle ends-including @{cycle_end}, cycle-len={cycle_len}") if idx <= cycle_end: return idx else: #log.trace(idx-cycle_end_bound) return cycle_start + ((idx-cycle_end_bound) % cycle_len ) aoc.assert_msg( "day 18 b, solution of 1st part ok", day18a_solution == scores[10]) #557 => 557, 558 => 530, 559 => 531 for v in [529, 530, 531, 556, 557, 558, 559, 560 , 556+cycle_len, 557+cycle_len, 558+cycle_len, 559+cycle_len]: log.debug(f"in idx={v} out idx={get_cycled_index(v, world_cycle)}") tgt_iter = 1_000_000_000 # 1000000000 idx = get_cycled_index(tgt_iter, world_cycle) log.info(f"Day 18 b solution: score={scores[idx]} from tgt_iter={tgt_iter:9,.0f} target-index={idx}")Day 19: Go With The Flowimport time from datetime import datetime class CPU: OPC, A, B, C = [0, 1, 2, 3] # positions def __init__(self): self.flow_mode = -1 self.prog = [] self.mem = [0, 0, 0, 0, 0, 0] self.iptr = 0 self.ict = 0 self.state = 'INITED' self.opfuns = { 'addr':self.opc_addr, 'addi':self.opc_addi, 'mulr':self.opc_mulr, 'muli':self.opc_muli, 'banr':self.opc_banr, 'bani':self.opc_bani, 'borr':self.opc_borr, 'bori':self.opc_bori, 'setr':self.opc_setr, 'seti':self.opc_seti, 'gtir':self.opc_gtir, 'gtri':self.opc_gtri, 'gtrr':self.opc_gtrr, 'eqir':self.opc_eqir, 'eqri':self.opc_eqri, 'eqrr':self.opc_eqrr, } def prepare(self, los): idx = 0 if los[idx].startswith("#ip"): log.info(f"program-mode {los[0]}") self.flow_mode = int(los[0].split(" ")[1]) idx += 1 #cpu_instruct = aoc.map_list(int, test[1].split(' ') for i in range(idx, len(los)): cells = los[i].split(" ") self.prog.append([cells[0]] + mapl(int, cells[1:])) log.info(f"CPU prod={self.prog}") def interpret(self, steps=10_000_000_001): start_tm = int(time.time()) log.info(self.mem) self.state = 'RUNNING' tm = f"{int(time.time()-start_tm)}s @" + datetime.now().strftime("%H:%M:%S") log.info(f"cpu prog interpret started ict#{self.ict:,}, in-mem={self.mem}") for i in range(1, steps+1): self.ict += 1 instruct = self.prog[self.iptr] #log.debug(f"ict#{self.ict} iptr={self.iptr} instr={instruct} mem=[self.mem]") if self.flow_mode >= 0: self.mem[self.flow_mode] = self.iptr #op = f"self.op_{instruct[CPU.OPC]}({instruct}, {self.mem})" self.opfuns[instruct[CPU.OPC]](instruct) #log.debug(f"op={op} returns {self.mem}") if self.flow_mode >= 0: self.iptr = self.mem[self.flow_mode] self.iptr += 1 if self.iptr >= len(self.prog): tm = f"{int(time.time()-start_tm)}s @" + datetime.now().strftime("%H:%M:%S") self.state = 'TERMINATED' log.info(f"cpu prog terminated gracefully! {tm}" ) log.info(f" ict#{self.ict:,} curstep#{i:,} iptr={self.iptr} mem={self.mem}") return if self.ict % 10_000_000 == 0: tm = f"{int(time.time()-start_tm)}s @" + datetime.now().strftime("%H:%M:%S") log.info(f"cpu-prog running {int(time.time()-start_tm)}s, ict={self.ict:,} iptr={self.iptr} mem={self.mem}") if self.ict > 100_000_000_000: raise Exception("FAILSAFE") tm = f"{int(time.time()-start_tm)}s @" + datetime.now().strftime("%H:%M:%S") self.state = 'PAUSED' log.info(f"cpu prog interpret PAUSED ict#{self.ict:,}, end of interpret(), curstep#{i:,}! {tm}") def opc_addr(self, instr): """addr (add register) stores into register C the result of adding register A and register B.""" self.mem[instr[3]] = self.mem[instr[1]] + self.mem[instr[2]] def opc_addi(self, instr): """addi (add immediate) stores into register C the result of adding register A and value B.""" self.mem[instr[3]] = self.mem[instr[1]] + instr[2] def opc_mulr(self, instr): """mulr (multiply register) stores into register C the result of multiplying register A and register B.""" self.mem[instr[3]] = self.mem[instr[1]] * self.mem[instr[2]] def opc_muli(self, instr): """muli (multiply immediate) stores into register C the result of multiplying register A and value B.""" self.mem[instr[3]] = self.mem[instr[1]] * instr[2] def opc_banr(self, instr): """banr (bitwise AND register) stores into register C the result of the bitwise AND of register A and register B.""" self.mem[instr[3]] = self.mem[instr[1]] & self.mem[instr[2]] def opc_bani(self, instr): """bani (bitwise AND immediate) stores into register C the result of the bitwise AND of register A and value B.""" self.mem[instr[3]] = self.mem[instr[1]] & instr[2] def opc_borr(self, instr): """borr (bitwise OR register) stores into register C the result of the bitwise OR of register A and register B.""" self.mem[instr[3]] = self.mem[instr[1]] | self.mem[instr[2]] def opc_bori(self, instr): """bori (bitwise OR immediate) stores into register C the result of the bitwise OR of register A and value B.""" self.mem[instr[3]] = self.mem[instr[1]] | instr[2] def opc_setr(self, instr): """setr (set register) copies the contents of register A into register C. (Input B is ignored.)""" self.mem[instr[3]] = self.mem[instr[1]] def opc_seti(self, instr): """seti (set immediate) stores value A into register C. (Input B is ignored.)""" self.mem[instr[3]] = instr[1] def opc_gtir(self, instr): """gtir (greater-than immediate/register) sets register C to 1 if value A is greater than register B. Otherwise, register C is set to 0.""" if instr[1] > self.mem[instr[2]]: self.mem[instr[3]] = 1 else: self.mem[instr[3]] = 0 def opc_gtri(self, instr): """gtri (greater-than register/immediate) sets register C to 1 if register A is greater than value B. Otherwise, register C is set to 0.""" if self.mem[instr[1]] > instr[2]: self.mem[instr[3]] = 1 else: self.mem[instr[3]] = 0 def opc_gtrr(self, instr): """gtrr (greater-than register/register) sets register C to 1 if register A is greater than register B. Otherwise, register C is set to 0.""" if self.mem[instr[1]] > self.mem[instr[2]]: self.mem[instr[3]] = 1 else: self.mem[instr[3]] = 0 def opc_eqir(self, instr): """eqir (equal immediate/register) sets register C to 1 if value A is equal to register B. Otherwise, register C is set to 0.""" if instr[1] == self.mem[instr[2]]: self.mem[instr[3]] = 1 else: self.mem[instr[3]] = 0 def opc_eqri(self, instr): """eqri (equal register/immediate) sets register C to 1 if register A is equal to value B. Otherwise, register C is set to 0.""" if self.mem[instr[1]] == instr[2]: self.mem[instr[3]] = 1 else: self.mem[instr[3]] = 0 def opc_eqrr(self, instr): """eqrr (equal register/register) sets register C to 1 if register A is equal to register B. Otherwise, register C is set to 0.""" if self.mem[instr[1]] == self.mem[instr[2]]: self.mem[instr[3]] = 1 else: self.mem[instr[3]] = 0 tests = """ #ip 0 seti 5 0 1 seti 6 0 2 addi 0 1 0 addr 1 2 3 setr 1 0 0 seti 8 0 4 seti 9 0 5 """.strip().split("\n") log.setLevel( aoc.LOGLEVEL_TRACE ) log.debug(f"effective-log-level={log.getEffectiveLevel()}") log.debug(tests) cpu = CPU() cpu.prepare(tests) cpu.interpret() assert( [6, 5, 6, 0, 0, 9] == cpu.mem ) log.setLevel( logging.INFO ) log.info(f"effective-log-level={log.getEffectiveLevel()}") ins = aoc.read_file_to_list('./in/day19.in') log.debug(ins) cpu = CPU() cpu.prepare(ins) cpu.interpret() log.info(f"Day 19 a result-mem={cpu.mem}; cpu-state={cpu.state}") if EXEC_DOUBTFUL: # this currently fails after _hours_ of runtime cpu = CPU() cpu.prepare(ins) cpu.mem[0] = 1 log.info(f" set-init-mem={cpu.mem}") cpu.interpret(steps=1_000_000) log.info(f"Day 19 b result-mem={cpu.mem}, cpu-state={cpu.state}")Scan and find Busca en primer lugar las cantidades de variables, para luego encontrar el tipo de estas a partir de la consulta de un modelo victima. Load best modelimport joblib as jb import numpy as np model=jb.load('rfDefense2021.sav') print(model) datafake=np.array([[0]]) try: model.predict(datafake) except Exception as ex: text = "An exception of type {} occurred. Arguments:\n{}" message = text.format(type(ex).__name__, ex.args) print (message)An exception of type ValueError occurred. Arguments: ('X has 1 features, but DecisionTreeClassifier is expecting 10 features as input.',)ahora se testea el tipo de variablesdatafake=np.array([["0","0","1","0","500",0,0,0,0,0]]) datafake.shape try: print(model.predict(datafake)) except Exception as ex: text = "An exception of type {} occurred. Arguments:\n{}" message = text.format(type(ex).__name__, ex.args) print (message)['benign']El modelo recibe datos numericos solamente aunque estos esten en formato string. reconoce el error de tipo cuando:- al menos una variable es string y contiene letras- todas las variables son string y son númerosno lo reconoce cuando:- hay enteros y string que contienen numerosfrom itertools import product def bruteForce(): nMax=14 numericValue=0 categoricalValue='word' bruteTest=np.array([]) valuesPosible=[numericValue,categoricalValue] result='' for x in range(nMax): bruteTest=np.append(bruteTest, categoricalValue) bruteTestMix=np.copy(bruteTest) for y in product(valuesPosible, repeat=x): bruteTestMix=np.asarray(y) print(bruteTestMix.shape) print(bruteTestMix) try: #print(model.predict(bruteTestMix.reshape(1,-1))) result=model.predict(bruteTestMix.reshape(1,-1)) break except Exception as ex: print ('fault') if result!='': return result print(bruteForce())(0,) [] fault (1,) [0] fault (1,) ['word'] fault (2,) [0 0] fault (2,) ['0' 'word'] fault (2,) ['word' '0'] fault (2,) ['word' 'word'] fault (3,) [0 0 0] fault (3,) ['0' '0' 'word'] fault (3,) ['0' 'word' '0'] fault (3,) ['0' 'word' 'word'] fault (3,) ['word' '0' '0'] fault (3,) ['word' '0' 'word'] fault (3,) ['word' 'word' '0'] fault (3,) ['word' 'word' 'word'] fault (4,) [0 0 0 0] fault (4,) ['0' '0' '0' 'word'] fault (4,) ['0' '0' 'word' '0'] fault (4,) ['0' '0' 'word' 'word'] fault (4,) ['0' 'word' '0' '0'] fault (4,) ['0' 'word' '0' 'word'] fault (4,) ['0' 'word' 'word' '0'] fault (4,) ['0' 'word' 'word' 'word'] fault (4,) ['word' '0' '0' '0'] fault (4,) ['word' '0' '0' 'word'] fault (4,) ['word' '0' 'word' '0'] fault (4,) ['word' '0' 'word' 'word'] fault (4,) ['word' 'word' '0' '0'] fault (4,) ['word' 'word' '0' 'word'] fault (4,) ['word' 'word' 'word' '0'] fault (4,) ['word' 'word' 'word' 'word'] fault (5,) [0 0 0 0 0] fault (5,) ['0' '0' '0' '0' 'word'] fault (5,) ['0' '0' '0' [...]Tutorial: Using `fastai.data` low-level APIs> Using `DataSource`, `Pipeline`, `TfmdList`, `TfmOver`, and `Transform`from local.imports import * from local.test import * from local.core import * from local.data.pipeline import * from local.data.source import * from local.data.core import * from local.vision.core import * from local.data.external import * from PIL.ImageFile import ImageFilesetupdef mk_tensor(im): return tensor(array(im))[None] @patch def dihedral(im:ImageFile, idx=0): return im.transpose(idx-1) if idx>=1 else im def rand_dihedral(im): return im.dihedral(random.randint(0,7)) def normalize (o,m,s): return (o-m)/s def denormalize(o,m,s): return (o*s)+m def decode_vocab(o, v): return [v[o_] for o_ in o] source = untar_data(URLs.MNIST_TINY)/'train' items = get_image_files(source) fn = items[0] img = PIL.Image.open(fn) m,s = 0.1,0.3 imgt = mk_tensor(img) lbls = items.mapped(parent_label) itos,stoi = uniqueify(lbls, bidir=True, sort=True)v5vocab = SimpleNamespace(itos=itos,stoi=stoi) def _get_types(func): sig = inspect.signature(func) t_out = sig.return_annotation if sig.return_annotation != inspect._empty else None t_in = [p.annotation if p.annotation != inspect._empty else None for p in sig.parameters.values() if p.default == inspect._empty and p.kind != inspect._VAR_KEYWORD] # if not t_out: t_out = t_in[0] return (len(t_in) > 1,t_out) def _check_same(t1, t2): assert t1 is None or t2 is None or t1 == t2 return t2 if t1 is None else t1 class Transform(metaclass=PrePostInitMeta): def __init__(self, encodes=None): if encodes is not None: self.encodes = encodes def __post_init__(self): is_tuple_in, t1 = _get_types(self.encodes) if hasattr(self, 'encodes') else (False,None) is_tuple_out,t2 = _get_types(self.decodes) if hasattr(self, 'decodes') else (False,None) assert is_tuple_in == is_tuple_out self.type = _check_same(t1, t2) self.is_tuple = is_tuple_in def __getattr__(self,k): if k not in ['encodes', 'decodes', 'decode']: def _inner(x, *args, **kwargs): return getattr(self.type, k, noop)(x, *args, **kwargs) return _inner else: raise AttributeError def _apply(self, n, o): if is_listy(self.type) and not self.is_tuple: old_type = self.type res = [] for o_,t in zip(o,old_type): self.type=t res.append(getattr(self, n, noop)(o_)) self.type = old_type return tuple(res) return getattr(self, n, noop)(*L(o)) def __call__(self, o): return self._apply('encodes', o) def decode(self, o): return self._apply('decodes', o) class String(): @staticmethod def show(o, ctx=None, **kwargs): return show_title(str(o), ctx=ctx) String.show("3") class Categorify(Transform): def __init__(self, vocab): self.vocab = vocab def encodes(self, s): return self.vocab.stoi[getattr(s, 'data', s)] def decodes(self, i) -> String: return self.vocab.itos[i] tst = Categorify(vocab) tst.type class TensorImage(): @staticmethod def show(o, ctx=None, **kwargs): return show_image(to_cpu(o), ctx=ctx, **kwargs) def mk_tensor(im) -> TensorImage: return tensor(array(im))[None] def compose_tfms(x, tfms, func_nm='__call__', reverse=False): if reverse: tfms = reversed(tfms) for tfm in tfms: x = getattr(tfm,func_nm,noop)(x) return x class Pipeline(): def __init__(self, *tfms): self.tfms = [t if isinstance(t, Transform) else Transform(t) for t in tfms] def setup(self, t=None): if len(self.tfms) == 0: self.final_t = t else: if self.tfms[0].type is None: self.tfms[0].type = t for t_prec,t in zip(self.tfms[:-1],self.tfms[1:]): if t.type is None: t.type = t_prec.type self.final_t = self.tfms[-1].type def __call__(self, o): return compose_tfms(o, self.tfms) def decode (self, i): return compose_tfms(i, self.tfms, func_nm='decode', reverse=True) def show(self, o, ctx=None, **kwargs): r_tfms = list(reversed(self.tfms)) for i,tfm in enumerate(r_tfms): o = tfm.decode(o) if hasattr(tfm.type, 'show') and (i==len(r_tfms)-1 or r_tfms[i+1].type!=tfm.type): #tfm.type knows how to show AND is the first tfm with this type return tfm.type.show(o, ctx=ctx, **kwargs) class MNIST(): def __init__(self, items, pipes, tuple_pipe): self.items,self.pipes,self.tuple_pipe = items,pipes,tuple_pipe for p in self.pipes: p.setup() self.tuple_pipe.setup(t = [p.final_t for p in self.pipes]) def __getitem__(self, i): return self.tuple_pipe([p(self.items[i]) for p in self.pipes]) def __len__(self): return len(items) def show(self, o, ctx=None, **kwargs): o = self.tuple_pipe.decode(o) for o_,p in zip(o,self.pipes): ctx = p.show(o_, ctx=ctx, **kwargs) return ctx def show_at(self, i, ctx=None, **kwargs): return self.show(self[i], ctx=ctx, **kwargs) pipe_x = Pipeline(PIL.Image.open, rand_dihedral, mk_tensor) pipe_y = Pipeline(parent_label, Categorify(vocab)) ds = MNIST(items, [pipe_x, pipe_y], Pipeline()) ds.show_at(0, cmap="Greys", figsize=(1,1)) ds.tuple_pipe.final_t dl = DataLoader(ds, batch_size=9) @patch def floatify(x:TensorImage): return x.float()/255. class Normalize(Transform): def __init__(self, m, s): self.m,self.s = m,s def encodes(self, o)->TensorImage: return (o-self.m)/self.s def decodes(self, o): return (o*self.s)+self.m norm_mnist = Normalize(m,s) imgf = floatify(imgt) imgf.mean(),imgf.std() imgn = norm_mnist(imgf) imgn.mean(),imgn.std() imgf2 = norm_mnist.decode(imgn) imgf2.mean(),imgf2.std() norm_mnist = Normalize(m,s) dl_pipe = Pipeline(to_device, floatify, norm_mnist) class MNIST_DL(): def __init__(self, dl, f): self.dl,self.f = dl,f t = getattr(self.dl.dataset.tuple_pipe, 'final_t', None) if hasattr(self.dl.dataset, 'tuple_pipe') else None self.f.setup(t=t) def __iter__(self): return (self.f(b) for b in self.dl) def __len__(self): return len(self.dl) def show_batch(self, b, max_rows=10, ctxs=None, **kwargs): b = self.f.decode(b) rows = itertools.islice(zip(*L(b)), max_rows) if ctxs is None: ctxs = [None] * len(b[0] if is_iter(b[0]) else b) for o,ctx in zip(rows,ctxs): self.dl.dataset.show(o, ctx=ctx, **kwargs) mdl = MNIST_DL(dl, dl_pipe) b = next(iter(mdl)) b[0].shape,b[1].shape, b[0].mean(), b[0].std() assert b[0].min()<0 mdl.f.final_t fig,axs = plt.subplots(3,3) mdl.show_batch(b, cmap="Greys", figsize=(4,4), ctxs=axs.flat)v6class Transform(PrePostInit): def __init__(self,encode=None,decode=None): if encode: self.encode = encode if decode: self.decode = decode @patch def powx(x:math, a): return math.pow(x,a) @patch def powx(x:torch, a): return torch.pow(x,a) class Add1(Transform): def encode(self,x): return x+1 def decode(self,x): return x-1 def mk_torch(x)->torch: return tensor(x) def mk_int (x)->math : return x class Func(): def __init__(self, nm, *args, **kwargs): self.nm,self.args,self.kwargs = nm,args,kwargs def __repr__(self): return f'sig: {self.nm}({self.args}, {self.kwargs})' def __call__(self,t): f = getattr(t,self.nm) if not (self.args or self.kwargs): return f return partial(f, *self.args, **self.kwargs) class SelfFunc(): def __init__(self, nm, *args, **kwargs): self.nm,self.args,self.kwargs = nm,args,kwargs def __repr__(self): return f'self: {self.nm}({self.args}, {self.kwargs})' def __call__(self, o): return getattr(o,self.nm)(*self.args, **self.kwargs) class _Sig(): def __getattr__(self,k): def _inner(*args, **kwargs): return Func(k, *args, **kwargs) return _inner class _SelfFunc(): def __getattr__(self,k): def _inner(*args, **kwargs): return SelfFunc(k, *args, **kwargs) return _inner Sig = _Sig() Self = _SelfFunc() pipe_funcs = [Add1(), (Sig.powx(a=2), 'sqrt'), mk_torch, (Self.reciprocal(),Self.reciprocal())] pf1 = [mk_int ] + pipe_funcs pf2 = [mk_torch] + pipe_funcs def mk_func(f, t): if isinstance(f,str ): f = Func(f) if isinstance(f,Func): f = f(t) return f def mk_tfm(f,t): if not is_listy(f): f = (f,None) return Transform(mk_func(f[0],t), mk_func(f[1],t)) def compose_tfms(x, tfms, func_nm='encode', reverse=False): if reverse: tfms = reversed(tfms) for tfm in tfms: x = getattr(tfm,func_nm,noop)(x) return x def _get_ret(func): ann = getattr(func,'__annotations__', None) if not ann: return None return ann.get('return') class Pipeline(): def __init__(self, funcs): self.fs = [] self.t = None for f in funcs: if not isinstance(f,Transform): f = mk_tfm(f, self.t) self.fs.append(f) self.t = _get_ret(f.encode) or self.t def __call__(self, o): return compose_tfms(o, self.fs) def decode (self, i): return compose_tfms(i, self.fs, func_nm='decode', reverse=True) p1 = Pipeline(pf1) a1 = p1(1.5); a1 p1.decode(a1) p2 = Pipeline(pf2) a2 = p2(tensor(1.5)); a2 p2.decode(a2)data process5897列: 第一列是数据编号,其余数据Hyb值22284行: 第一行是表头,其余为数据(5896, 22283)最终的数据即5896个instance,22283个featurewith open('./../Gene_Chip_Data/microarray.original.txt','r') as fin: lines = fin.readlines() lines[0] print lines[0]"ProbeSet ID" "RMA_Signal (Hyb_1)" "RMA_Signal (Hyb_2)" "RMA_Signal (Hyb_3)" "RMA_Signal (Hyb_4)" "RMA_Signal (Hyb_5)" "RMA_Signal (Hyb_6)" "RMA_Signal (Hyb_7)" "RMA_Signal (Hyb_8)" "RMA_Signal (Hyb_9)" "RMA_Signal (Hyb_10)" "RMA_Signal (Hyb_11)" "RMA_Signal (Hyb_12)" "RMA_Signal (Hyb_13)" "RMA_Signal (Hyb_14)" "RMA_Signal (Hyb_15)" "RMA_Signal (Hyb_16)" "RMA_Signal (Hyb_17)" "RMA_Signal (Hyb_18)" "RMA_Signal (Hyb_19)" "RMA_Signal (Hyb_20)" "RMA_Signal (Hyb_21)" "RMA_Signal (Hyb_22)" "RMA_Signal (Hyb_23)" "RMA_Signal (Hyb_24)" "RMA_Signal (Hyb_25)" "RMA_Signal (Hyb_26)" "RMA_Signal (Hyb_27)" "RMA_Signal (Hyb_28)" "RMA_Signal (Hyb_29)" "RMA_Signal (Hyb_30)" "RMA_Signal (Hyb_31)" "RMA_Signal (Hyb_32)" "RMA_Signal (Hyb_33)" "RMA_Signal (Hyb_34)" "RMA_Signal (Hyb_35)" "RMA_Signal (Hyb_36)" "RMA_Signal (Hyb_37)" "RMA_Signal (Hyb_38)" "RMA_Signal (Hyb_39)" "RMA_Signal (Hyb_40)" "RMA_Signal (Hyb_41)" "RMA_Signal (Hyb_42)" "RMA_Signal (Hyb_43)" "RMA_Signal (Hyb_44)" "RMA_Signal (Hyb_45)" "RMA_[...]确认是这5896列是从小到大排列的import numpy as np # 需要一个5896*22283的array # 但是先用22283*5896的 data=np.zeros([22283,5896],dtype='float64') print lines[1][0:10] print lines[2][0:10] print lines[3][0:10] print lines[4][0:10] import re # print lines[1] data_1=lines[1].split(' ')[1:] len(data_1) data_1 data[0]=np.array(data_1) data for i in range(1,22284): data[i-1]=np.array(lines[i].split(' ')[1:]) data.transpose() data_out=data.T print data_out data_out.shape # output the raw data data_out.tofile("./../data/all_raw_data.bin")label dataimport pandas as pd df=pd.read_csv('./../data/label.csv') df colums=df.columns colums for name in colums: print "==================================" uni=df[name].unique() print name,len(uni) print df[name].value_counts() """ ================================== Characteristics [BioSourceType] 13 frozen_sample 794 blood 636 frozen 585 biopsy 395 fresh_sample 252 frozen tissue 233 frozen, biopsy 79 Name: Characteristics [BioSourceType], dtype: int64 ================================== Characteristics [DiseaseState] 194 breast tumor 535 acute myeloid leukemia 293 B-cell lymphoma, dlbcl 166 breast cancer 139 acute lymphoblastic leukemia, chemotherapy response 129 Huntington's disease 113 bone marrow relapse 113 germ cell tumor 101 brain tumor 100 precursor T lymphoblastic leukemia 99 control brain 87 acute lymphoblastic leukemia 81 prostate cancer 71 normal 67 colorectal adenocarcinoma 65 breast tumor, luminal 62 Name: Characteristics [DiseaseState], Length: 194, dtype: int64 ====================================================== Characteristics [Sex] 8 4221 male 1036 female 500 femalemale 58 unknown_sex 38 mixed_sex 24 males 14 hermaphrodite 5 Name: Characteristics [Sex], dtype: int64 """ ndf=df[['Hybridization Name','Material Type','Characteristics [Sex]','Characteristics [DiseaseState]','Characteristics [BioSourceType]']] ndf.columns = ['Hyb', 'MaterialType', 'Sex', 'DiseaseState', 'BioSourceType'] ndf for i in range(5896): print ndf.iloc[i]['Hyb'][4:],i if int(ndf.iloc[i]['Hyb'][4:])!=i+1: print "1" # 是按照顺序来的 """ ================================== Characteristics [BioSourceType] 13 frozen_sample 794 blood 636 frozen 585 biopsy 395 fresh_sample 252 frozen tissue 233 frozen, biopsy 79 Name: Characteristics [BioSourceType], dtype: int64 """ for i in range(5896): # 7分类 label=0 name = ndf.iloc[i]['BioSourceType'] if name == 'frozen_sample': label= 1 elif name == 'blood': label= 2 elif name == 'frozen': label= 3 elif name == 'biopsy': label= 4 elif name == 'fresh_sample': label= 5 elif name == 'frozen tissue': label= 6 elif name == 'frozen, biopsy': label= 7 else: label=0 ndf.iloc[i]['BioSourceType']=label """ Characteristics [Sex] 8 4221 male 1036 female 500 Name: Characteristics [Sex], dtype: int64 """ for i in range(5896): # 2分类 label=0 name = ndf.iloc[i]['Sex'] if name == 'male': label= 1 elif name == 'female': label= 2 else: label=0 ndf.iloc[i]['Sex']=label """ ================================== Characteristics [DiseaseState] 194 breast tumor 535 acute myeloid leukemia 293 B-cell lymphoma, dlbcl 166 breast cancer 139 acute lymphoblastic leukemia, chemotherapy response 129 Huntington's disease 113 bone marrow relapse 113 germ cell tumor 101 brain tumor 100 precursor T lymphoblastic leukemia 99 control brain 87 acute lymphoblastic leukemia 81 prostate cancer 71 normal 67 colorectal adenocarcinoma 65 breast tumor, luminal 62 Name: Characteristics [DiseaseState], Length: 194, dtype: int64 """ for i in range(5896): # 16分类 label=0 name = ndf.iloc[i]['DiseaseState'] if name == 'breast tumor': label= 1 elif name == 'acute myeloid leukemia': label= 2 elif name == 'B-cell lymphoma, dlbcl': label= 3 elif name == 'breast cancer': label= 4 elif name == 'acute lymphoblastic leukemia, chemotherapy response': label= 5 elif name == "Huntington's disease": label= 6 elif name == 'bone marrow relapse': label= 7 elif name == 'germ cell tumor': label= 8 elif name == 'brain tumor': label= 9 elif name == 'precursor T lymphoblastic leukemia': label= 10 elif name == 'control brain': label= 11 elif name == 'acute lymphoblastic leukemia': label= 12 elif name == 'prostate cancer': label= 13 elif name == 'normal': label= 14 elif name == 'colorectal adenocarcinoma': label= 15 elif name == 'breast tumor, luminal': label= 16 else: label=0 ndf.iloc[i]['DiseaseState']=label """ Material Type 2 organism_part 4754 cell_line 1142 Name: Material Type, dtype: int64 """ for i in range(5896): # 16分类 label=0 name = ndf.iloc[i]['MaterialType'] if name == 'organism_part': label= 1 elif name == 'cell_line': label= 2 else: label=0 ndf.iloc[i]['MaterialType']=label for i in range(5896): Hyb = ndf.iloc[i]['Hyb'] ndf.iloc[i]['Hyb']=int(Hyb[4:]) ndf这个还只是最初步的数据处理,还需要划分训练集ndf.to_csv('./../data/all_label.csv',index=False)fuzzy-searchFuzzy search modules for searching lists of words in low quality OCR and HTR text. Usagefrom fuzzy_search.fuzzy_phrase_searcher import FuzzyPhraseSearcher from fuzzy_search.fuzzy_phrase_model import PhraseModel # highger matching thresholds for higher quality OCR/HTR (higher precision, recall should be good anyway) # lower matching thresholds for lower quality OCR/HTR (higher recall, as that's the main problem) config = { "char_match_threshold": 0.6, "ngram_threshold": 0.5, "levenshtein_threshold": 0.6, "ignorecase": False, "max_length_variance": 3, "ngram_size": 2, "skip_size": 2, } # initialize a new searcher instance with the config fuzzy_searcher = FuzzyPhraseSearcher(config) # create a list of domain phrases domain_phrases = [ # terms for the chair and attendants of a meeting "PRAESIDE", "PRAESENTIBUS", # some weekdays in Latin "Veneris", "Mercurii", # some date phrase where any date in January 1725 should match "den .. Januarii 1725" ] phrase_model = PhraseModel(phrases=domain_phrases) # register the keywords with the searcher fuzzy_searcher.index_phrase_model(phrase_model) # take some example texts: meetings of the Dutch States General in January 1725 text1 = "ie Veucris den 5. Januaris 1725. PR&ASIDE, . PRASENTIEBUS, , , Torck , met een extraordinaris Gedeputeerde uyt de Provincie van Gelderlandt. Van Maasdam , vanden Boeizelaar , Raadtpenfionaris van Hoornbeeck , met een extraordinaris Gedeputeerde uyt de Provincie van Hollandt ende Welt-Vrieslandt. Velters, Ockere , Noey; van Hoorn , met een extraordinaris Gedeputeerde uyt de Provincie van Zeelandt. Van Renswoude , van Voor{t. Van Schwartzenbergh, , {elmuden. Van Iddekinge ‚ van Tamminga." text2 = "Mercuri: den 10. Jangarii, 1725. ia PRESIDE, Den Heere an Iddekinge. PRA&SENTIBUS, De Heeren /an Welderen , van Dam, van Wynbergen, Torck, met een extraordinaris Gedeputeerde uyt de Provincie van Gelderland. Van Maasdam , Raadtpenfionaris van Hoorn=beeck. Velters, Ockerfe, Noey. Ta, van Renswoude. , Vegilin, ’ Bentinck, van I(elmaden. Van Tamminga."The `find_matches` method returns match objects:# look for matches in the first example text for match in fuzzy_searcher.find_matches(text1): print(match)Match(phrase: "Veneris", variant: "Veneris",string: "Veucris", offset: 3) Match(phrase: "den .. Januarii 1725", variant: "den .. Januarii 1725",string: "den 5. Januaris 1725.", offset: 11) Match(phrase: "PRAESIDE", variant: "PRAESIDE",string: "PR&ASIDE,", offset: 33) Match(phrase: "PRAESENTIBUS", variant: "PRAESENTIBUS",string: "PRASENTIEBUS,", offset: 63)Printing the matches directly yields the following output:# look for matches in the first example text for match in fuzzy_searcher.find_matches(text1): print(match)Match(phrase: "Veneris", variant: "Veneris",string: "Veucris", offset: 3) Match(phrase: "den .. Januarii 1725", variant: "den .. Januarii 1725",string: "den 5. Januaris 1725.", offset: 11) Match(phrase: "PRAESIDE", variant: "PRAESIDE",string: "PR&ASIDE,", offset: 33) Match(phrase: "PRAESENTIBUS", variant: "PRAESENTIBUS",string: "PRASENTIEBUS,", offset: 63)Alternatively, each match object can generate a JSON representation of the match containing all information:# look for matches in the first example text for match in fuzzy_searcher.find_matches(text1): print(match.json()){'phrase': 'Veneris', 'variant': 'Veneris', 'string': 'Veucris', 'offset': 3, 'match_scores': {'char_match': 0.7142857142857143, 'ngram_match': 0.625, 'levenshtein_similarity': 0.7142857142857143}} {'phrase': 'den .. Januarii 1725', 'variant': 'den .. Januarii 1725', 'string': 'den 5. Januaris 1725.', 'offset': 11, 'match_scores': {'char_match': 0.95, 'ngram_match': 0.7619047619047619, 'levenshtein_similarity': 0.8571428571428572}} {'phrase': 'PRAESIDE', 'variant': 'PRAESIDE', 'string': 'PR&ASIDE,', 'offset': 33, 'match_scores': {'char_match': 0.875, 'ngram_match': 0.5555555555555556, 'levenshtein_similarity': 0.6666666666666667}} {'phrase': 'PRAESENTIBUS', 'variant': 'PRAESENTIBUS', 'string': 'PRASENTIEBUS,', 'offset': 63, 'match_scores': {'char_match': 1.0, 'ngram_match': 0.6923076923076923, 'levenshtein_similarity': 0.7692307692307692}}Running the searcher on the second text:# look for matches in the second example text for match in fuzzy_searcher.find_matches(text2): print(match.json()){'phrase': 'Mercurii', 'variant': 'Mercurii', 'string': 'Mercuri:', 'offset': 0, 'match_scores': {'char_match': 0.875, 'ngram_match': 0.7777777777777778, 'levenshtein_similarity': 0.875}} {'phrase': 'den .. Januarii 1725', 'variant': 'den .. Januarii 1725', 'string': 'den 10. Jangarii, 1725.', 'offset': 9, 'match_scores': {'char_match': 0.95, 'ngram_match': 0.7142857142857143, 'levenshtein_similarity': 0.782608695652174}} {'phrase': 'PRAESIDE', 'variant': 'PRAESIDE', 'string': 'PRESIDE,', 'offset': 36, 'match_scores': {'char_match': 0.875, 'ngram_match': 0.6666666666666666, 'levenshtein_similarity': 0.75}} {'phrase': 'PRAESENTIBUS', 'variant': 'PRAESENTIBUS', 'string': 'PRA&SENTIBUS,', 'offset': 69, 'match_scores': {'char_match': 0.9166666666666666, 'ngram_match': 0.7692307692307693, 'levenshtein_similarity': 0.8461538461538461}}Match objects can also generate Web Annotation representations:# look for matches in the second example text text2_with_id = { "text": text2, "id": "urn:republic:3783_0076:page=151:para=4" } matches = fuzzy_searcher.find_matches(text2_with_id) import json print(json.dumps(matches[0].as_web_anno(), indent=2)) %reload_ext autoreload %autoreload 2 from fuzzy_search.fuzzy_phrase_searcher import FuzzyPhraseSearcher # init searcher with default parameter settings fuzzy_searcher = FuzzyPhraseSearcher() # register phrase you want to search fuzzy_searcher.index_phrases(['Makelaars', 'Tabak', 'Koffie']) # A text with OCR mistakes text = 'De Makelaets sullen verkopen twee balen Tobacco en Javaansche Koffy.' # Find all fuzzy matches fuzzy_searcher.find_matches(text) config = { # these thresholds work when there are few OCR errors "char_match_threshold": 0.8, "ngram_threshold": 0.6, "levenshtein_threshold": 0.8, # Is upper/lowercase a meaningful signal? "ignorecase": False, # should matches follow word boundaries? "use_word_boundaries": False, # for phrases that have variant phrasings "include_variants": False, # avoid matching with similar but different phrases "filter_distractors": False, # matching string can be lower/shorter than prhase "max_length_variance": 3, # higher ngram size allows fewer character differences "ngram_size": 3, # fewer skips is much faster but less exhaustive "skip_size": 1, } # init searcher, overriding some defaults fuzzy_searcher = FuzzyPhraseSearcher(config) from fuzzy_search.fuzzy_phrase_searcher import FuzzyPhraseSearcher # init searcher with default parameter settings fuzzy_searcher = FuzzyPhraseSearcher({'include_variants': True}) # register phrases and optional variants phrases = [ {'phrase': 'Makelaars'}, {'phrase': 'Tabak', 'variants': ['Tobacco']}, {'phrase': 'Koffie'} ] fuzzy_searcher.index_phrase_model(phrases) # A text with OCR mistakes text = 'De Makelaets sullen verkopen twee balen Tobacco en Javaansche Koffy.' # Find all fuzzy matches fuzzy_searcher.find_matches(text) from fuzzy_search.fuzzy_phrase_model import PhraseModel phrase_model = PhraseModel(phrases) # A text with a similar but different phrase text = 'De Metselaers sullen verkopen twee zaken cement.' # Find all fuzzy matches fuzzy_searcher.find_matches(text) # registering a phrase with a distractor phrases = [{'phrase': 'Makelaars', 'distractors': ['Metselaars']},] fuzzy_searcher.index_phrase_model(phrases) # A text with OCR mistakes text = 'De Metselaers sullen verkopen twee zaken cement.' # Find all fuzzy matches fuzzy_searcher.find_matches(text, filter_distractors=True) fuzzy_searcher = FuzzyPhraseSearcher({'include_variants': True, 'filter_distractors': True}) phrases = [ {'phrase': 'Makelaars', 'label': ['person_role', 'auction_broker'], 'distractors': ['Metselaars']}, {'phrase': 'Tabak', 'label': 'auction_good', 'variants': ['Tobacco']}, {'phrase': 'Koffie', 'label': 'auction_good'}, ] fuzzy_searcher.index_phrase_model(phrases) # A text with OCR mistakes text = 'De Makelaets sullen verkopen twee balen Tobacco en Javaansche Koffy. ' + \ 'De Metselaers sullen verkopen twee zaken cement.' # Find all fuzzy matches matches = fuzzy_searcher.find_matches(text) for match in matches: print(f"{match.offset: >4}\t{match.string: <20}\t{match.phrase.phrase_string: <20}", match.label_list) text = "Auction op Prime Tobaccos. The Executors of the late JOHN BENNETT," + \ " Tobacco Merchant,will Sell by AUCTION, at HALL'S Sale Room," + \ " Commercial Buildings, Cork, TUESDAY the 14th October." from fuzzysearch import find_near_matches phrases = [ {'phrase': 'Makelaars'}, {'phrase': 'Tabak', 'variants': ['Tobacco']}, {'phrase': 'Koffie'} ] # A text with OCR mistakes text = 'De Makelaets sullen verkopen twee balen Tobacco en Javaansche Koffy.' for phrase in phrases: matches = find_near_matches(phrase['phrase'], text, max_l_dist=2) print(matches) # Text from Delpher newspaper archive text = """n 't Volck inSpanje en Portugacl ten tijdn van de Slag van Almauza , tc geven! W i|l de Intikeoingcn in deExchequer van dc 600000 Ponden, toegeftaert door middel van Lijfrenten te veikoopcn, door de Alakei&ers by na gecompletecrt zijn (die fy Wc'er mei groot propje vetkoopen) werden al itilcke lotekeinngtn te niet geraaeckt door etnCl»uful,die bjr dc Lijfremeo-Bil, dpwclcke nu ftact te pafleren,gevoegt is }""" print(find_near_matches('Makelaars', text, max_l_dist=5)) fuzzy_searcher = FuzzyPhraseSearcher() fuzzy_searcher.index_phrases(['Makelaars']) matches = fuzzy_searcher.find_matches(text) for match in matches: print(match.json()) from fuzzy_search.fuzzy_phrase_model import PhraseModel from fuzzy_search.fuzzy_template_searcher import FuzzyTemplateSearcher, FuzzyTemplate phrases = [ {'phrase': 'Makelaars', 'label': ['person_role', 'auction_broker'], 'distractors': ['Metselaars']}, {'phrase': 'Tabak', 'label': 'auction_good', 'variants': ['Tobacco']}, {'phrase': 'Koffie', 'label': 'auction_good'}, ] phrase_model = PhraseModel(phrases) template = ['auction_broker', 'auction_good'] fuzzy_template = FuzzyTemplate(phrase_model, template) template_searcher = FuzzyTemplateSearcher(fuzzy_template, {'include_variants': True, 'filter_distractors': True}) # A text with OCR mistakes text = 'De Makelaets sullen verkopen twee balen Tobacco en Javaansche Koffy. ' + \ 'De Metselaers sullen verkopen twee zaken cement.' # Find all fuzzy matches phrase_matches = template_searcher.find_matches(text) template_matches = template_searcher.find_template_matches(phrase_matches) for template_match in template_matches: for element_match in template_match.element_matches: print('Template element:', element_match['label']) for phrase_match in element_match['phrase_matches']: print(f'\t{phrase_match.phrase.phrase_string: <15}{phrase_match.string: <15}{phrase_match.offset: >4}') template = { 'label': 'auction', 'ordered': True, 'type': 'group', 'elements': [ { 'label': 'auction_event', 'ordered': True, 'type': 'group', 'elements': [ {'label': 'auction_broker', 'required': True, 'cardinality': 'single'}, {'label': 'auction_location', 'required': True, 'cardinality': 'single'}, {'label': 'auction_date', 'required': False, 'cardinality': 'single'}, ] }, { 'label': 'auction_event', 'ordered': False, 'type': 'group', 'elements': [ {'label': 'auction_unit', 'required': False, 'cardinality': 'multi'}, {'label': 'auction_good', 'required': True, 'cardinality': 'multi'}, ] } ] }Hello WorldIt is not really established yet, what the quantum analogy of the classical `print("Hello world")` is. I will define here the "Hello world" as the task to build an arbitrary circuit with rotations and an entangling gate on 2 qubits, I will print the circuit and sample from it. Tensorflow Quantum "Hello World"Since Tensorflow Quantum (tfq) does all the quantum part on google's own quantum platform Cirq, the "Hello world" example can be done without using tfq.import tensorflow as tf import tensorflow_quantum as tfq import cirq import sympy # Sympy is for symbolic math import numpy as np from cirq.contrib.svg import SVGCircuit # to print the circuit- Define symbolic variables, which will be the parameters of the rotations- Define qubits q0 and q1- Define a circuit with a Rx rotations on each qubit followed by CNOT- Print the circuit with the Cirquit native `SVGCircuit`x1, x2 = sympy.symbols('x1 x2') # Define symbolic parameters x1, x2 q0, q1 = cirq.GridQubit.rect(1,2) # Define two qubits on grid # Define a circuit tf_circuit = cirq.Circuit( cirq.rx(x1).on(q0), cirq.rx(x2).on(q1), cirq.CNOT(control=q0, target=q1) ) SVGCircuit(tf_circuit) # Print circuit- Or simply print the circuitprint(tf_circuit)(0, 0): ───Rx(x1)───@─── │ (0, 1): ───Rx(x2)───X───How to measure an output?In the cirq simulator we can get the state vector as an output and after that we can calculate any observable that we want. Define the Cirq simulator and the resolverThe resolver sets the symbolic values of the rotation angles to a real value.simulator = cirq.Simulator() resolver = cirq.ParamResolver({x1: 0.5, x2: -0.5}) # Set symbolic parameters to values 0.5 and -0.5Do a measurementMeasurements in Cirq can be by adding `cirq.measure()` and define which qubit we want to measure. The "key" in the measurement can be set arbitrary and is just to find the histogram later. Cirq measurements can be done the `.run` command from the `cirq.Simulator()`.resolved_circuit = cirq.resolve_parameters(tf_circuit, resolver) # Resolve sympy values in circuit resolved_circuit.append(cirq.measure(q0, key='some_key')) # Add a measurement in Z direction on qubit 0 results = simulator.run(resolved_circuit, repetitions=100) # Run a simulation of this circuit 100 times print(results.histogram(key='some_key')) # Get measurement statisticsCounter({0: 90, 1: 10})Get measurement by simulating the state vectorInstead of running the measurement 100 times we can also get analytic results from the `.simulate` command from the `cirq.Simulator()`.The measurement direction can here be specified as an arbitrary combination of Pauli operators.output_state_vector = simulator.simulate(tf_circuit, resolver).final_state # simulate state vector z0 = cirq.Z(q0) # Define measurement direction in Z direction qubit_map = {q0: 0, q1: 1} # Define which qubits are involved in measurement z0.expectation_from_wavefunction(output_state_vector, qubit_map).realHere is another example for a measurement in $0.5 Z_0 + X_1$ directionz0x1 = 0.5 * z0 + cirq.X(q1) z0x1.expectation_from_wavefunction(output_state_vector, qubit_map).realPennylane "Hello World"Pennylane is "hardware agnostic", which means we do not have to stick to one quantum environment like Cirq. For every environment Qiskit, Cirq, Forest we basically just have to change one line of code, the `qml.device()`.import pennylane as qml from pennylane import numpy as npDefine a deviceThis is basically the whole magic of pennylane. By defining a device we can access all possible quantum hardware platforms and simulators. For more information see [here](https://pennylane.ai/plugins.html).We will start with the `qiskit.aer` device that can be installed via `pip install pennylane-qiskit`.dev1 = qml.device("qiskit.aer", wires=2) def circuit(params): qml.RX(params[0], wires=0) qml.RX(params[1], wires=1) qml.CNOT(wires=[0,1]) return qml.expval(qml.PauliZ(0))Pennylane allows us to use wrappers, to directly define a `circuit()` for a certain device `dev1`. To do so you can start the above definition of the circuit with `@qml.qnode(dev)`. This will look the following way:```@qml.qnode(dev1)def circuit(params): qml.RX(params[0], wires=0) qml.RY(params[1], wires=0) return qml.expval(qml.PauliZ(0))``` I recommend doing this, if you anyway stick to the same device. I will not do it in this tutorial, because I will change the device many times.To set the device for the circuit we can also do the following and evaulate it directly for the given parametersq_circuit = qml.QNode(circuit, dev1) params = np.array([0.5, -0.5]) q_circuit(params)To print the circuit with `qiskit.aer` we can directly do the following:dev1._circuit.draw()With the `default.qubit` the output looks the following way.dev1 = qml.device("default.qubit", wires=2) q_circuit = qml.QNode(circuit, dev1) params = np.array([0.5, -0.5]) q_circuit(params) print(q_circuit.draw())0: ──RX(0.5)───╭C──┤ ⟨Z⟩ 1: ──RX(-0.5)──╰X──┤We can evaluate the circuit like before with the parameters 0.5 and -0.5params = np.array([0.5, -0.5]) q_circuit(params)And we can also sample the measurement outputs by changing the return line of the circuit function from `qml.expval()` to `qml.sample()`def circuit_sample(params): qml.RX(params[0], wires=0) qml.RX(params[1], wires=1) qml.CNOT(wires=[0,1]) return qml.sample(qml.PauliZ(0)), qml.sample(qml.PauliZ(1)) dev2 = qml.device("default.qubit", wires=2) q_circuit = qml.QNode(circuit_sample, dev2) params = np.array([0.5, -0.5]) q_circuit(params)And the wavefunction can as well be accessed with the `default.qubit`q_circuit(params) print(dev1.state)[0.93879128+0.j 0. +0.23971277j 0.06120872+0.j 0. -0.23971277j]Predicting House Prices Moving to a new area? Want to sell or buy an house? Not sure about the right price? Machine learning can be of help!We are going to try some very simple regression models, asses their quality against some metrics and predict some house prices.For this exercise we are going to use [Apache Spark](https://spark.apache.org/) version 2.0 and its ML library. For a general overview of Apache Spark check this [Wikipedia link](https://en.wikipedia.org/wiki/Apache_Spark). Don't forget to check [Apache Spark ML documentation](http://spark.apache.org/docs/latest/ml-guide.html) for an overview of its Machine Learning capabilites!We are going to use the House Sales in King Country, USA available from [Kaggle website](https://www.kaggle.com/harlfoxem/housesalesprediction). You need to sign-in and download the csv file to an appropriate location in order.import sys import os from pyspark.sql.types import * import pyspark.sql.functions as func from pyspark.ml import Pipeline from pyspark.ml.evaluation import RegressionEvaluator from pyspark.ml.feature import Bucketizer, OneHotEncoder, StringIndexer, VectorAssembler from pyspark.ml.regression import GeneralizedLinearRegression, LinearRegression from pyspark.ml.tuning import CrossValidator, ParamGridBuilder import matplotlib.pyplot as plt %matplotlib inline import pyspark from pyspark.sql import SQLContext if ('sc' not in locals() or 'sc' not in globals()): os.environ['PYSPARK_PYTHON'] = '/usr/bin/python2' sc = pyspark.SparkContext('local[*]') sqlContext = SQLContext(sc)Dataset explorationApache Spark can load data from many different datasources including CSV files. For this exercise we are gong to specify the CSV file schema as it will come handy throughout this sample.kc_house_schema = StructType([\ StructField('id', LongType(),True),\ StructField('date', DateType(),True), StructField('price', FloatType(),True),\ StructField('bedrooms',IntegerType(),True),\ StructField('bathrooms',FloatType(),True),\ StructField('sqft_living',IntegerType(),True),\ StructField('sqft_lot',IntegerType(),True),\ StructField('floors',DoubleType(),True),\ StructField('waterfront',IntegerType(),True),\ StructField('view',IntegerType(),True),\ StructField('condition',IntegerType(),True),\ StructField('grade',IntegerType(),True),\ StructField('sqft_above',IntegerType(),True),\ StructField('sqft_basement',IntegerType(),True),\ StructField('yr_built',IntegerType(),True),\ StructField('yr_renovated',IntegerType(),True),\ StructField('zipcode',IntegerType(),True),\ StructField('lat',DoubleType(),True),\ StructField('long',DoubleType(),True),\ StructField('sqft_living15',IntegerType(),True),\ StructField('sqft_lot15',IntegerType(),True)\ ]) file_name = 'kc_house_data.csv' kc_house_data = sqlContext.read.csv( os.path.join(file_name), header=True, inferSchema=False, schema=kc_house_schema, dateFormat='yyyyMMdd\'T\'HHmmss') kc_house_data.cache()**Note:** Don't forget to ```cache``` frequently used DataFrames to improve speed and performache.kc_house_data.columnsAbove the list of the columns available in this dataset. We will ask Apache Spark to briefly describe some of them in order to get a general understandig of their contents. When exploring a dataset it is quite common to investigate the correlation between the different variables and the variable we are trying to predict.kc_house_data.describe('price', 'bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot').show() kc_house_data.describe('floors', 'waterfront', 'view', 'grade', 'sqft_above').show() kc_house_data.describe('sqft_basement', 'yr_built', 'yr_renovated', 'lat', 'long').show() kc_house_data.describe('sqft_living15', 'sqft_lot15', 'condition').show() plt.scatter(kc_house_data.select('sqft_living').collect(),kc_house_data.select('price').collect()) plt.xlabel('Sqft. Living') plt.ylabel('Price') plt.show()/usr/local/lib/python2.7/dist-packages/matplotlib/collections.py:590: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison if self._edgecolors == str('face'):**Note:** The dataset is clean a ready to be used. This is not common in real life scenarios! Make sure to prepare you data before training you modelsprice_stats_map = kc_house_data.describe('price').rdd.collectAsMap() min_price = float(price_stats_map['min']) max_price = float(price_stats_map['max']) num_buckets = 50 bucket_size = (max_price - min_price) / num_buckets splits = [float(min_price + i * bucket_size) for i in range(num_buckets+1)] bucketizer = Bucketizer(splits=splits, inputCol="price", outputCol="bucket") buckeizerData = kc_house_data.selectExpr('cast(price as double) price') bucketedPrice = bucketizer.transform(buckeizerData)\ .groupBy('bucket')\ .count()\ .sort('bucket') buckets = bucketedPrice.select('bucket').rdd.map(lambda x: x.bucket).collect() count = bucketedPrice.select('count').rdd.map(lambda x: x['count']).collect() plt.bar(buckets,count, log=True) plt.xlabel('Bin') plt.ylabel('Count') plt.show()Build a simple Regression model based on the 'sqft_living' onlyLets create our first Regression model to predict the house *price* based on a single feature, the *sqft_living* of the house.First we proceed to prepare a training and test sets, let not forget to cache them to speed the performaces.train, test = kc_house_data.randomSplit([0.80, 0.20], seed=42) train.cache() test.cache()Starting from Apache Spark 1.2 it is possible to leverage on [ML Pipelines](http://spark.apache.org/docs/latest/ml-pipeline.html), an high-level API that defines a sequence of stages we want our data to go through.Our pipeline will:* Assemble the feature vector based on some feature list using the [VectorAssmebler](http://spark.apache.org/docs/latest/ml-features.htmlvectorassembler)* Prepare the [LineraRegression](http://spark.apache.org/docs/latest/ml-classification-regression.htmllinear-regression) model* Assemble the workflow pipeline* Tune the model via [CrossValidator](http://spark.apache.org/docs/latest/ml-tuning.html)Once the model has been trained and selected. We will generate some predicion and evaluate the [RMSE](https://en.wikipedia.org/wiki/Root-mean-square_deviation) using a RegressionEvaluatorfeatures_columns = ['sqft_living'] vectorAssembler = VectorAssembler(inputCols=features_columns, outputCol='features') linearRegression = LinearRegression(labelCol='price') regressionEvaluator = RegressionEvaluator(labelCol='price') pipeline = Pipeline(stages=[vectorAssembler, linearRegression]) paramGrid = ParamGridBuilder()\ .addGrid(linearRegression.regParam, [1.0, 0.1, 0.01])\ .build() crossValidator = CrossValidator(estimator=pipeline, estimatorParamMaps=paramGrid, evaluator=regressionEvaluator, numFolds=10) singleFeatureModel = crossValidator.fit(train) singleFeatureModelPredictions = singleFeatureModel.transform(test) singleFeatureModelRMSE = regressionEvaluator.evaluate(singleFeatureModelPredictions) print "Single Feature Model RMSE: %s" % singleFeatureModelRMSESingle Feature Model RMSE: 260877.960048Our Single Feature Model has an RMSE of about 260.877$plt.plot(test.select('sqft_living').collect(),test.select('price').collect(),'o', test.select('sqft_living').collect(),singleFeatureModelPredictions.select('prediction').collect(),'-') plt.xlabel('Sqft. Living') plt.ylabel('Price') plt.show()A more advanced modelWe will now leverage on more features to build a more advanced model that hopefully will lead to better results.We will take advange of the pipeline system to pre-process some of the features. In this example the *zipcode*, *yr_built* and *yr_renovated* are [categrical variables](https://en.wikipedia.org/wiki/Categorical_variable) and we are pre-processing it via One Hot Encoding algorithm.features_columns = ['bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'grade', 'sqft_above',\ 'sqft_basement', 'yr_builtVector', 'yr_renovatedVector', 'zipcodeVector', 'lat', 'long',\ 'sqft_living15', 'sqft_lot15'] zipcodeStringIndexer = StringIndexer(inputCol="zipcode", outputCol="zipIndex", handleInvalid='skip') zipcodeOneHotEncoder = OneHotEncoder(dropLast=False, inputCol="zipIndex", outputCol="zipcodeVector") yearBuiltStringIndexer = StringIndexer(inputCol="yr_built", outputCol="yr_builtIndex", handleInvalid='skip') yearBuiltOneHotEncoder = OneHotEncoder(dropLast=False, inputCol="yr_builtIndex", outputCol="yr_builtVector") yearRenovatedStringIndexer = StringIndexer(inputCol="yr_renovated", outputCol="yr_renovatedIndex", handleInvalid='skip') yearRenovatedOneHotEncoder = OneHotEncoder(dropLast=False, inputCol="yr_renovatedIndex", outputCol="yr_renovatedVector") vectorAssembler = VectorAssembler(inputCols=features_columns, outputCol='features') linearRegression = GeneralizedLinearRegression(family="gaussian", link="log", maxIter=100, labelCol='price') advancedPipeline = Pipeline(stages=[zipcodeStringIndexer, zipcodeOneHotEncoder,\ yearBuiltStringIndexer, yearBuiltOneHotEncoder,\ yearRenovatedStringIndexer, yearRenovatedOneHotEncoder,\ vectorAssembler, linearRegression]) paramGrid = ParamGridBuilder()\ .addGrid(linearRegression.regParam, [1.0, 0.1, 0.01])\ .build() crossValidator = CrossValidator(estimator=advancedPipeline, estimatorParamMaps=paramGrid, evaluator=regressionEvaluator, numFolds=10) advancedFeatureModel = crossValidator.fit(train) advancedFeatureModelPredictions = advancedFeatureModel.transform(test) advancedFeatureModelRMSE = regressionEvaluator.evaluate(advancedFeatureModelPredictions) print "Advanced Features Model RMSE: %s" % advancedFeatureModelRMSEAdvanced Features Model RMSE: 154857.94152The AdvancedFeatureModel has an RMSE of about 151.883$ Let's use our model to predict some house priceskc_house_data.filter('id=5309101200')\ .select('yr_built', 'price', 'bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'waterfront',\ 'view', 'condition', 'grade', 'zipcode')\ .show() advancedFeatureModel.transform(kc_house_data.filter('id=5309101200')).select('price', 'prediction').collect()Lets try with another house in the dataset:kc_house_data.filter('id=7979900210')\ .select('yr_built', 'price', 'bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'waterfront',\ 'view', 'condition', 'grade', 'zipcode')\ .show() advancedFeatureModel.transform(kc_house_data.filter('id=7979900210')).select('price', 'prediction').collect()Last but not least let's try with an house out of the datasetCan you recognize it?bill_gates_json = '{"id":0, "date":"19980423T000000","price":0.00,"bedrooms":8,"bathrooms":25,"sqft_living":50000,\ "sqft_lot":225000,"floors":4,"zipcode":"98039","condition":10,"grade":10,"waterfront":1,"view":4,"sqft_above":37500,\ "sqft_basement":12500,"yr_built":1994,"yr_renovated":2010,"lat":47.627606,"long":-122.242054,"sqft_living15":5000,\ "sqft_lot15":40000}'Let's import the house data and adjust it to our dataset schema.bill_gates = sqlContext.read.json(sc.parallelize([bill_gates_json])) for field in kc_house_schema.fields: bill_gates = bill_gates.withColumn(field.name, bill_gates[field.name].cast(field.dataType).alias(field.name)) advancedFeatureModel.transform(bill_gates).select('prediction').show()+--------------------+ | prediction| +--------------------+ |2.1420889193652208E9| +--------------------+Índices y slicing en 2 dimensionesEn la lección anterior vimos como trabajar con arrays de una dimensión, ahora veremos como se trasladan esos conceptos a la segunda dimensión. Es extremadamente sencillo si nos imaginamos el array como una **tabla** con filas y columnas.import numpy as np arr_2d = np.array(([0,5,10], [15,20,25], [30,35,40])) arr_2dÍndicesSi tenemos dos dimensiones, entonces necesitamos dos índices.El primer índice hace referencia a la primera dimensión, podemos entenderlo como la **fila**:# Primera fila arr_2d[0]Ahora para acceder a la segunda dimensión, **o columna**, utilizaremos un segundo índice. Así podemos acceder a valors individuales:# Primera fila y primera columna arr_2d[0][0]También podemos utilizar los índices negativos para posicionarnos muy fácilmente en la última fila y última columna:arr_2d[-1][-1]Utilizando esta lógica podemos cambiar fácilmente la primera columna de la última fila:arr_2d[-1][0] = 99 arr_2dSlicingTambién es posible utilizar slicing, aunque al tener dos dimensiones deberemos hacerlo doblando los indices de inicio y fin **separados por una coma**.Por ejemplo, un slicing sin indices buscaría un subarray con todas las filas y columnas:arr_2d[:,:]Para conseguir un subarray de las dos primeras filas haríamos:arr_2d[:2,:]O uno con la primera columna:arr_2d[:,:1]Con esta lógica podemos también modificar los elementos masivamente. Por ejemplo toda la segunda columna:arr_2d[:,1:2] = 0 arr_2dCopiasEvidentemente los arrays de dos dimensiones también están referenciados en memoria, por lo que todos los cambios realizados en un subarray se verán reflejados en el original.**Recordad utilizar el método .copy() para crear copias por valor y no por referencia a la memoria.** Fancy indexEl último concepto importante que veremos sobre los arrays 2d es el fancy index.Esta propiedad de los arrays nos permite trabajar muy cómodamente con las filas de estos arrays.Por ejemplo, vamos a crear una matriz 5x10 llena de ceros:arr_2d = np.zeros((5,10)) arr_2dAhí tenemos nuestra matriz de 5 filas y 10 columnas.Hasta ahora sabemos acceder fácilmente a una fila concreta, por ejemplo la 3 (3-1):arr_2d[2] = 10 arr_2d¿Pero habría alguna forma de acceder a varias a la vez? Pues sí, con el fancy index, que se basa en pasarle una lista al array haciendo referencia a las filas donde queremos acceder.Por ejemplo podemos modificar al vuelo la primera, tercera y última fila:arr_2d[[0,2,-1]] = 99 arr_2dIncluso podemos utilizarlo en cualquier orden o doblando índices:arr_2d[[4,0,1,0,4]]La verdad es que es casi mágico, y todo es por la idea de que realmente las filas simulan sublistas.De hecho podríamos recorrer este array 2d con un for y cada vez que entramos al bucle estamos en una fila:for row in arr_2d: print(row)[ 99. 99. 99. 99. 99. 99. 99. 99. 99. 99.] [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [ 99. 99. 99. 99. 99. 99. 99. 99. 99. 99.] [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [ 99. 99. 99. 99. 99. 99. 99. 99. 99. 99.]De manera que si quisiéramos darle el mismo valor a cada fila no costaría mucho, sólo deberíamos acceder a través de nuestro índice mágico, que podemos sacarlo por ejemplo con un enumerador:for i, row in enumerate(arr_2d): arr_2d[i] = i arr_2d**684. Redundant Connection**def findRedundantConnection(edges): p = [0]*(len(edges)+1) s = [1]*(len(edges)+1) # path compression during find def find(u): while p[u] != u: p[u] = p[p[u]] u = p[u] return u for u, v in edges: if p[u] == 0: p[u] = u if p[v] == 0: p[v] = v pu, pv = find(u), find(v) if pu == pv: return [u, v] # union by rank, make sure pv is lower rank # link v's parent node to u's, increase size if s[pv] > s[pu]: u, v = v, u p[pv] = pu s[pu] += s[pv] return [] edges = [[1,2], [1,3], [2,3]] findRedundantConnection(edges) for u,v in edges: print (u,v)1 2 1 3 2 3**547. Number of Provinces**""" Method 1: DFS Time complexity: O(n^2) Space complexity: O(n) """ class Solution(object): def findCircleNum(M): def dfs(M, curr, n): for i in range(n): if M[curr][i] == 1: M[curr][i] = M[i][curr] = 0 dfs(M, i, n) n = len(M) ans = 0 for i in range(n): if M[i][i] == 1: ans += 1 dfs(M, i, n) return ans isConnected = [[1,1,0],[1,1,0],[0,0,1]] Solution.findCircleNum(isConnected) """ Method 2: Union Find """ class Solution: def findCircleNum(isConnected): def find(u): while u != p[u]: p[u] = find(p[u]) u = p[u] return u m = len(isConnected) p = [] for i in range(m): p.append(i) for i in range(m): for j in range(i, m): if i != j and isConnected[i][j] == 1 and find(i) != find(j): p[find(i)] = find(j) provinces = [] for n in range(m): if p[n] == n: provinces.append(n) return len(provinces) isConnected = [[1,1,0],[1,1,0],[0,0,1]] Solution.findCircleNum(isConnected)**737. Sentence Similarity II**class UFS: def __init__(self, n): self.p = [0]*(n+1) self.s = [1]*(n+1) def find(self, u): while u != self.p[u]: self.p[u] = self.p[self.p[u]] u = self.p[u] return u def union(self, u, v): pu = self.find(u) pv = self.find(v) if pu == pv: return False if s[pu] > s[pv]: u, v = v, u p[pu] = pv s[pv] += s[pu] return True class Solution: def areSentencesSimilarTwo(words1, words2, pairs): if len(words1) != len(words2): return False index = {} ufs = UFS(2*len(pairs)) for pair in pairs: u = Solution.getIndex(pair[0], index, 'True') v = Solution.getIndex(pair[1], index, 'True') ufs.union(u, v) print(index) for i in range(len(words1)): if words1[i] == words2[i]: continue u, v = getIndex(words1[i], index, 'False'), getIndex(words2[i], index, 'False') print(u, v) if u < 0 or v < 0: return False if ufs.find(u) != ufs.find(v): return False print (index) return True def getIndex(word, index, create): print(word) if word in index: return index[word] else: if create == 'True': num = len(index) index[word] = num return num return -1 words1 = ["great","acting","skills"] words2 = ["fine","painting","talent"] pairs = [["great","fine"],["drama","acting"],["skills","talent"]] Solution.areSentencesSimilarTwo(words1,words2,pairs) def getIndex(word, index, create = 'False'): if word in index: return index[word] else: if not create: return -1 num = len(index) index[word] = num return num index = {} for pair in pairs: u = getIndex(pair[0], index, 'True') v = getIndex(pair[1], index, 'True') indexDigit Recognition ------ Imports needed for the project# Tkinter is Python's de-facto standard GUI (Graphical User Interface) package. import tkinter as tk import keras as kr import numpy as np import matplotlib.pyplot as plt import math import sklearn.preprocessing as pre import gzip import PIL from PIL import Image, ImageDraw import os.pathUsing TensorFlow backend.Read in the images for training ---- We can start by Importing the Images that we will use for training the model, then the images we will use to test the model and images used to identify the classes needed to identify them.with gzip.open('data/train-images-idx3-ubyte.gz', 'rb') as f: train_img = f.read() with gzip.open('data/train-labels-idx1-ubyte.gz', 'rb') as f: train_lbl = f.read() with gzip.open('data/t10k-images-idx3-ubyte.gz', 'rb') as f: test_img = f.read() with gzip.open('data/t10k-labels-idx1-ubyte.gz', 'rb') as f: test_lbl = f.read()Create the model for training ---- Here we can create a sequential model, we can build it using layers to identify the dimentions to use. the tools to normalise the data, and which algorithm to use to optimise and train the network.# Start a neural network, building it by layers. # using sequential model model = kr.models.Sequential() # Add a hidden layer with 1000 neurons and an input layer with 784. model.add(kr.layers.Dense(512, input_dim=784, activation="relu", kernel_initializer="normal")) model.add(kr.layers.Dense(10, activation="softmax", kernel_initializer="normal")) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])Reshape the mnist data set to match the conditions that we need ---- Now we can reshape the inputs in or to train the network, using a list of images to train and a group of labels to identify the differnt classes.# reshape the images and labels. train_img = ~np.array(list(train_img[16:])).reshape(60000, 1, 784).astype(np.uint8) train_lbl = np.array(list(train_lbl[ 8:])).astype(np.uint8) train_img = train_img/ 255 train_lbl = kr.utils.to_categorical(train_lbl) # reshape the image array inputs = train_img.reshape(60000, 784)Create the encoder ---- Now we can train the Encoder so that we can use it to interpret the expected result.# Binarize labels in a one-vs-all fashion encoder = pre.LabelBinarizer() # Trains the model for a fixed number of epochs (iterations on a dataset). encoder.fit(train_lbl) outputs = encoder.transform(train_lbl)Train the Neural Network ---- Train the nueral network by giving it a group of inputs and expected outputs.# Train the model model.fit(inputs, outputs, epochs=15, batch_size=100)Epoch 1/15 60000/60000 [==============================] - 9s 152us/step - loss: 0.5218 - acc: 0.8463 Epoch 2/15 60000/60000 [==============================] - 9s 149us/step - loss: 0.3155 - acc: 0.9063 Epoch 3/15 60000/60000 [==============================] - 9s 148us/step - loss: 0.2498 - acc: 0.9258 1s - loss: Epoch 4/15 60000/60000 [==============================] - 9s 150us/step - loss: 0.2001 - acc: 0.9397 Epoch 5/15 60000/60000 [==============================] - 9s 149us/step - loss: 0.1677 - acc: 0.9501 Epoch 6/15 60000/60000 [==============================] - 9s 148us/step - loss: 0.1475 - acc: 0.9560 Epoch 7/15 60000/60000 [==============================] - 9s 149us/step - loss: 0.1359 - acc: 0.9588 Epoch 8/15 60000/60000 [==============================] - 9s 149us/step - loss: 0.1247 - acc: 0.9625 Epoch 9/15 60000/60000 [==============================] - 9s 148us/step - loss: 0.1139 - acc: 0.9647 0s - loss: 0.1141 - acc: 0 Epoch 10/15 60000/60000 [==========================[...]Saving the model to file ---- Here we can save the model so that it can be read in again.# save the current model kr.models.save_model( model, "model2.h5py", overwrite=True, include_optimizer=True )Loading the model from file ---- If the model exists, we can load it so that the network wont have to be trained each time.# if the model file exists load it if os.path.isfile('data/model2.h5py'): model = kr.models.load_model('data/model2.h5py')Read in an image to test ---- Lets open the image we want to test, feel free to test without images by changing the relative paths.# read in an image and greyscale im = Image.open('data/image.png').convert('L')Here we can show the image that we loaded and see the image using the default image viewer in the OS. It will show a black background with a white digit. This is loading the image saved by the project program "digitrecognition.py", so it can change.im.show()Normalise the data and reshape ---- Now we must normalise the data so that the values are easier to read for the model.# get the data from the image tv = list(im.getdata()) # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black. tv = [(255 - x) * 1.0 / 255.0 for x in tv]We resize the image to an array of 1*784 so that it can be compared to the training data we have trained the network using.img = np.array(list(tv)).reshape(1,784)Finally we can predict the class from the model we trained and get the response expected for the image we have input. Make the prediction ----model.predict_classes(img)Input and Outputimport json as json import pandas as pd import numpy as np import matplotlib.pyplot as plt import random as random %matplotlib inline #mkdir newfolderTxt file - ```with open ()``` methodmkdir data with open('data/test.txt', 'w')as file: for k in range(100): file.write("it is your "+ str(k) + "th turn! \n")- ```open()``` and ```close()``` methodtxtfile = open("data/textdata.txt",'w') for i in range (1000): '''check even or odd''' if i%2 ==0: txtfile.write(str(i) + "|It is even \n") '''check above or below 500''' if i<500: txtfile.write(str(i) + "|It is below 500 \n") else: txtfile.write(str(i) + "|It is above 500 \n") else: txtfile.write(str(i) + "|It is odd \n") '''check above or below 500''' if i<500: txtfile.write(str(i) + "|It is below 500 \n") else: txtfile.write(str(i) + "|It is above 500 \n") txtfile.close()- uploading/reading text filewith open('data/textdata.txt','r') as f: i = 0 for line in f: print(line) if i >10: break i = i+10|It is even 0|It is below 500 1|It is odd 1|It is below 500 2|It is even 2|It is below 500 3|It is odd 3|It is below 500 4|It is even 4|It is below 500 5|It is odd 5|It is below 500JSON fileData = [] for k in range(1000): Data.append({"x": np.random.uniform(0,10),\ "y": np.random.uniform(0,20),\ "z" : np.random.uniform(0,10)}) Data[101:105]- Save data to a JSON filemkdir data with open("data/sample_data.json", 'w')as f: json.dump(Data,f)- Upload data from JSON filewith open("data/sample_data.json","r")as f: uploaded_data = json.load(f) uploaded_data[0:2]CSV fileDF = pd.DataFrame(Data) DF.head()- Save data to a CSV fileDF.to_csv("data/sampledata.csv")- Upload data from a CSV fileuploaded_data = pd.read_csv("data/sampledata.csv") uploaded_data.head()Lab 3 - Iteration with For Loops Data 94, Spring 2021 Iteration is a very important tool in Python. So far we have learned about `while` loops, and now we are going to dive into `for` loops. `For` loops are a form of iteration that loop over a Python data structure, such as a list or a string. Below, you can see that `for` loops can work in all sorts of ways!# We can iterate over numbers using the range() function! for i in range(10): print("Charging... battery at:", str(i * 10), '%') print("Battery charged at 100%!")Notice how the value of `i` changes on each loop because of the range() function! **Important:** We do not necessarily have to use `i` as the name of our `for` loop variable. We can use any name we want!# We can iterate over the characters in a string! string = "Data Science is Cool!" for letter in string: # 'letter' instead of 'i' print(letter) # We can even iterate over the words in a string! sentence = string.split(" ") # Split up string, using spaces (" ") to separate words for word in sentence: # 'word' instead of 'i' print(word)`While` loops in Python can do these things as well, but they are a bit more complicated:word_index = 0 # Start at first item while word_index < len(sentence): # While we are iterating over the list print(sentence[word_index]) word_index += 1 # Move onto the next item of the listAs you can see, using the `while` loop is a lot more work and looks a bit more difficult to read. Both work, but often one will be better to use than the other.When choosing between which you should use, generally:- `While` loops work better when you don't necessarily know how many iterations your loop may take - Think 'Persistency' from Question 5 on the Quiz- `For` loops work better when you have something to **iterate over** like a list or a string. - See examples above Let's see an example of a `for` loop at work:In this function we will calculate the maximum value in a list of numbers without using the Python max() function. If the list is empty, we return the False boolean.def max_value(lst): if len(lst) == 0: # If we are given an empty list, there is no maximum value, so return False return False max_number = lst[0] # The first number is the biggest we've seen so far... because we haven't seen any other numbers for number in lst[1:]: # We check the rest of the items after the first; we don't need to check the first again if number > max_number: # If the number we are looking at is bigger than our biggest so far... max_number = number # ... then it becomes the new biggest! return max_number # After we finish the 'for' loop, we will have stored the maximum value in max_number lst1 = [13, 96, -24, 53, -109] lst2 = [1, 2, 3, 4, 5, 6, 7, 6, 5, 4, 3, 2, 1] lst3 = ["data", "science", "is", "cool!"] # It even works on strings too because strings can be compared using '<' and '>' in Python! print("The maximum value of", lst1, "is:", max_value(lst1)) print("The maximum value of", lst2, "is:", max_value(lst2)) print("The maximum value of", lst3, "is:", max_value(lst3))Let's go ahead and try writing a function with a `for` loop:We want to be able to pairwise multiply two lists together. This means that we want the result of multiplying the first items of each list, the second items of each list, etc.For example, pairwise multiplying `[2, 3, 4]` and `[10, 20, 30]` is `[20, 60, 120]` because 2 * 10 == `20`, 3 * 20 == `60` and 4 * 30 == `120`.We can use a `for` loop to move through each list, multiplying items as we go!def pairwise_multiply(lst1, lst2): output = ... for i in range(len(lst1)): multiplication = ... ... return output grader.check("q1")Now let's work on another example:We will implement a function that calculates the most revenue brought in during one shift at a store. Given the sales that were made, calculate the sales total for the best shift of the day.For example, with `sales = [3, 4, 1.25, 6, "Shift Change", 2, 4, 5.75, "Shift Change", 10, 2, .25, "Shift Change"]`, we will return `14.25` because 3 + 4 + 1.25 + 6 == `14.25`.In other words:`best_shift_sales`(`[3, 4, 1.25, 6, "Shift Change", 2, 4, 5.75, "Shift Change", 10, 2, .25, "Shift Change"]`) == `14.25`def best_shift_sales(sales): current_shift_sales = ... best_shift_sales_total = ... for sale in sales: if sale == "Shift Change": if current_shift_sales > best_shift_sales_total: best_shift_sales_total = ... current_shift_sales = ... else: ... return best_shift_sales_total grader.check("q2")Done! 😇That's it! There's nowhere for you to submit this, as labs are not assignments. However, please ask any questions you have with this notebook in lab or on Ed.If you want some extra practice, you may proceed onto the next section, which contains a practice problem for this week. Extra Practice ProblemsThese problems are here for extra practice. They are not mandatory, and they will not be turned in for any points, but we highly suggest you do them as practice for both homework questions and quiz questions. Extra Question 1We want to modify our `best_shift_sales` function from earlier in the lab so that it calculates the **name of the employee** who was working during the best shift for the store. You are given the shift list in order and the sales that were made. There are guaranteed to be an equal number of shifts and employees, so each employee only does one shift.For example, with `employees = ["Alice", "Bob", "Cam"]` and `sales = [3, 4, 1.25, 6, "Shift Change", 2, 4, 5.75, "Shift Change", 10, 2, .25]`, we will return `"Alice"`.In other words:`best_salesperson`(`["Alice", "Bob", "Cam"]`, `[3, 4, 1.25, 6, "Shift Change", 2, 4, 5.75, "Shift Change", 10, 2, .25]`) == `"Alice"`*Hint: You should only need to **modify** your implementation from `best_shift_sales`. We are still finding the best sales, but instead of returning how much money was the most, we want the name of the employee. How can we keep track of not only what the best shift by sales was, but also who was working that shift?*def best_salesperson(employees, sales): current_employee_number = 0 # Start with the first employee current_employee_sales = ... best_sales_total = ... employee_number_of_best_salesperson_so_far = ... ... best_salesperson_name = ... return best_salesperson_name grader.check("eq1")Solution (for after you have tried yourself) def best_salesperson(employees, sales): current_employee_number = 0 Start with the first employee current_employee_sales = 0 We are not yet checking a shift total best_sales_total = 0 We have not yet seen a best shift total employee_number_of_best_salesperson_so_far = 0 There is no best employee sales yet for sale in sales: if sale == "Shift Change": if current_employee_sales > best_sales_total: best_sales_total = current_employee_sales employee_number_of_best_salesperson_so_far = current_employee_number current_employee_number += 1 current_employee_sales = 0 else: current_employee_sales += sale best_salesperson_name = employees[employee_number_of_best_salesperson_so_far] return best_salesperson_name Extra Question 2aLet's write a boolean function that tells us if there are any duplicate values in a list. If we find a duplicate, we should return `True`, but if we search everywhere and we cannot find a duplicate, we should return `False`.We want to implement this function so that it checks all the items from itself to the end of the list for duplicates. There is no need to check anything behind it because past loops already did those duplicate checks. In the list `[1, 2, 3]`, we check if 1 == 2, then if 1 == 3, then if 2 == 3. There is no need to check if 2 == 1 by the time we get to 2, because we had already done that check when we were on 1.Examples:- The list `[1, 2, 3, 4, 5, 1]` has a duplicate, so we should return `True`.- The list `[1, 2, 3, 4, 5, 6]` has no duplicates, so we should return `False`.def duplicate_values1(values): ... grader.check("eq2a")Solution (for after you have tried yourself) def duplicate_values1(values): for i in range(len(values)): if values[i] in values[i + 1:]: return True return False Extra Question 2bThere is actually another way to implement this function using the `count()` method of lists! We can simply loop through the list once and ask for the count of each item. If at any point we encounter a count greater than 1, we have found a duplicate and we can immediately return` True`! If we never see any counts over 1, we return `False` as there are no duplicates.def duplicate_values2(values): ... grader.check("eq2b")Solution (for after you have tried yourself) def duplicate_values2(values): for value in values: if values.count(value) > 1: return True return False Meme GalleryHere are some memes about the topics we covered today, feel free to like, comment, and subscribe 😆 ---To double-check your work, the cell below will rerun all of the autograder tests.grader.check_all()SubmissionMake sure you have run all cells in your notebook in order before running the cell below, so that all images/graphs appear in the output. The cell below will generate a zip file for you to submit. **Please save before exporting!**# Save your notebook first, then run this cell to export your submission. grader.export(pdf=False)Tweedie distribution> A short introduction to the Tweedie distribution- toc: true - badges: true- comments: false- categories: [insurance, distribution, glm, notebook] Introduction The [Tweedie distribution](https://en.wikipedia.org/wiki/Tweedie_distribution) is a family of probability distributions that include- [Normal](https://en.wikipedia.org/wiki/Normal_distribution)- [Gamma](https://en.wikipedia.org/wiki/Gamma_distribution)- [Inverse Gaussian](https://en.wikipedia.org/wiki/Inverse_Gaussian_distribution)- [Poisson](https://en.wikipedia.org/wiki/Poisson_distribution)- [Compound Poisson-gamma](https://en.wikipedia.org/wiki/Compound_Poisson_distributionCompound_Poisson_Gamma_distribution)The Tweedie distribution is wildly popular in insurance industry as a tool of modelling- claim frequency (count data), - claim severity (non-negative continuous data), and - pure premium (non-negative continous data with a zero mass).In this post, we give a brief introduction to the Tweedie distribution and its properties. Exponential Dispersion Models Before diving into the Tweedie distribution, we need to understand the **exponential dispersion model (EDM)** {% cite jorgensen1987exponential %}, of which the Tweedie is a special case. A probability distribution is an **EDM** if the density/mass function has the following form$$f(y) = c(y, \phi)\exp\left\{\frac{y\theta- a(\theta)}{\phi}\right\},$$where $\theta$ is called the canonical parameter and $\phi$ the dispersion parameter. It can be shown that,$$\mathbb{E}(y):=\mu = \dot{a}(\theta), \quad \mathrm{Var}(y)=\phi\ddot{a}(\theta)= \phi\ddot{a}(\dot{a}^{-1}(\mu))=\phi V(\mu)$$where $\dot{a}$ and $\ddot{a}$ are the first and second derivative of $a$, respectively; $V(\mu)$ is called the variance function. Tweedie Distribution Now we formally introduce the Tweedie distribution. A Tweedie distribution $Tw_p(\mu, \phi)$ is an EDM with $$V(\mu) = \mu^p, $$where $p\in \mathrm{R}$.The Tweedie behaves differently when $p$ takes different values. We consider 5 cases. Normal ($p=0$)When $p=0$, the Tweedie distribution becomes a normal distribution,$$Tw_0(\mu, \phi) \to N(\mu, \sigma), $$ where $\mu=\mu, \phi=\sigma^2$.# hide_input import numpy as np import matplotlib.pyplot as plt %config InlineBackend.figure_format = 'retina' %load_ext lab_black from scipy.stats import norm mu = 2 sigma = 1 norm_dist = norm(loc=mu, scale=sigma) x = np.linspace(norm_dist.ppf(0.01), norm_dist.ppf(0.99), 100) plt.plot(x, norm_dist.pdf(x)) plt.title("Normal Distribution Tw_0(mu=2, phi=1)") plt.show()Poisson ($p=1$) When $p=1$, the Tweedie distribution becomes a Poisson distribution,$$Tw_1(\mu, \phi) \to \mathrm{Poisson}(\lambda), $$ where $\mu=\lambda, \phi=1$.from scipy.stats import poisson mu = 2 poisson_dist = poisson(mu=mu) x = np.arange(poisson_dist.ppf(0.01), poisson_dist.ppf(0.99)) plt.plot(x, poisson_dist.pmf(x), marker="o") plt.title("Poisson Distribution Tw_1(mu=2, phi=1)") plt.show()Gamma Distribution ($p=2$) When $p=1$, the Tweedie distribution becomes a Gamma distribution,$$Tw_2(\mu, \phi) \to \mathrm{Gamma}(\alpha, \beta), $$ where $\alpha = 1/\phi, \beta = 1/(\phi\mu)$from scipy.stats import gamma mu = 2 phi = 1 gamma_dist = gamma(a=1 / phi, scale=mu * phi) x = np.linspace(gamma_dist.ppf(0.01), gamma_dist.ppf(0.99), 100) plt.plot(x, gamma_dist.pdf(x)) plt.title("Gamma Distribution Tw_2(mu=2, phi=1)") plt.show()Inverse Gaussian Distribution ($p=3$) When $p=3$, the Tweedie distribution becomes a Inverse Gaussian Distribution,$$Tw_3(\mu, \phi) \to \mathrm{IG}(\mu, \lambda), $$ where $\mu=\mu, \lambda = 1/\phi$.from scipy.stats import invgauss mu = 2 phi = 1 invgauss_dist = invgauss(mu=mu, scale=1 / phi) x = np.linspace(invgauss_dist.ppf(0.01), invgauss_dist.ppf(0.8), 100) plt.plot(x, invgauss_dist.pdf(x)) plt.title(f"Inverse Gaussian Distribution Tw_3(mu={mu}, phi={phi})") plt.show()Compound Poisson-Gamma ($1from scipy.stats import poisson, gamma mu = 5 phi = 3 p = 1.5 n_sim = 2000 lam = mu ** (2 - p) / ((2 - p) * phi) alpha = (2 - p) / (p - 1) beta = mu ** (1 - p) / ((p - 1) * phi) scale = 1 / beta # as scipy.stats scale is 1/beta rvs_poisson = poisson(lam).rvs(n_sim) rvs_gamma = [gamma(a=alpha, scale=scale).rvs(rp).sum() for rp in rvs_poisson] plt.hist(rvs_gamma, bins=40) plt.title(f"Compound Poisson-Gamma Tw_{p}(mu={mu}, phi={phi})") plt.show()These are the symptoms that we are most interested in.symptoms_dataset = pd.read_csv('Example/Inputs/example_dataset_1.csv') symptom_list = [ 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J' ]Get just the symptom columns and transpose the datasymptoms_transposed = symptoms_dataset[symptom_list].transpose()Convert strings to booleans for use when computing Jaccardsymptoms_transposed.replace({ 1: True, 0: False }, inplace = True)Compute a Jaccard distance matrix%%time distance_matrix = sci.spatial.distance.pdist(X = symptoms_transposed, metric = 'jaccard')Wall time: 1.02 msConvert the matrix into squareform (instead of the weird lower tringular things we don't use), label and outputdist_matrix_jaccard = sci.spatial.distance.squareform(distance_matrix) df_jaccard = pd.DataFrame(dist_matrix_jaccard, index = symptom_list, columns = symptom_list) df_jaccard.to_csv("Example/Outputs/DistanceMatrixJaccard.csv") labels_list = list(symptoms_transposed.index.values)Dendrogram plotting Using the linkage function, scipy only lets you use ward linkage with Euclidean distances. The following code function WILL let you. This appears to be well justified in practice, and there are some simulation papers where this has been justified using Monte Carlo simulation.Overall, while I do believe that Ward linkage will output an good clustering, I steer away from it in practice due to methodological reasons.def make_dendrogram(data, method, metric): distance_matrix = sci.spatial.distance.pdist(X = data, metric = metric) linked = clust.linkage(y = distance_matrix, method = 'ward', optimal_ordering = True) plt.figure(figsize = (12, 8)) clust.dendrogram(linked, orientation = 'top', labels = labels_list) plt.xticks(rotation = 90) plt.title(f'Heirarchal clustering (linkage = {method}, distance = {metric})') plt.show() make_dendrogram(data = symptoms_transposed, method = 'complete', metric = 'jaccard')In this notebook, we'll be comparing our Transformer XL architecture with the official implementation.Note: this is a non-refactored, dirty notebook that shouldn't be used as a reference for implementation.import torch import torch.nn as nnDownloading the reference code%%bash if [ -d "./transformer-xl" ] then echo "Transformer XL reference repo exists" else echo "Cloning Transformer XL repo" git clone https://github.com/kimiyoung/transformer-xl.git fiTransformer XL reference repo existsWe'll be using the penn treebank dataset to benchmark our model.from pathlib import Path DATASET = "penn" DATA_DIR = Path("../data") / DATASET #scrap import sys from pathlib import Path DATASET = "penn" REF_PATH = Path("./transformer-xl") DATA_DIR = Path("..") / "data" / DATASET sys.path.append(str(REF_PATH / "pytorch")) sys.path.append(str(REF_PATH / "pytorch" / "utils")) TESTING = not IS_KAGGLE_KERNEL # Keep True for now #scrap from mem_transformer import RelPartialLearnableMultiHeadAttn from collections import Counter, OrderedDict #TODO: Clean up import torch class Vocab(object): def __init__(self, special=[], min_freq=0, max_size=None, lower_case=True, delimiter=None, vocab_file=None): self.counter = Counter() self.special = special self.min_freq = min_freq self.max_size = max_size self.lower_case = lower_case self.delimiter = delimiter self.vocab_file = vocab_file def tokenize(self, line, add_eos=False, add_double_eos=False): line = line.strip() # convert to lower case if self.lower_case: line = line.lower() # empty delimiter '' will evaluate False if self.delimiter == '': symbols = line else: symbols = line.split(self.delimiter) if add_double_eos: # lm1b return [''] + symbols + [''] elif add_eos: return symbols + [''] else: return symbols def count_file(self, path, verbose=False, add_eos=False): if verbose: print('counting file {} ...'.format(path)) assert os.path.exists(path) sents = [] with open(path, 'r', encoding='utf-8') as f: for idx, line in enumerate(f): if verbose and idx > 0 and idx % 500000 == 0: print(' line {}'.format(idx)) symbols = self.tokenize(line, add_eos=add_eos) self.counter.update(symbols) sents.append(symbols) return sents def count_sents(self, sents, verbose=False): """ sents : a list of sentences, each a list of tokenized symbols """ if verbose: print('counting {} sents ...'.format(len(sents))) for idx, symbols in enumerate(sents): if verbose and idx > 0 and idx % 500000 == 0: print(' line {}'.format(idx)) self.counter.update(symbols) def _build_from_file(self, vocab_file): self.idx2sym = [] self.sym2idx = OrderedDict() with open(vocab_file, 'r', encoding='utf-8') as f: for line in f: symb = line.strip().split()[0] self.add_symbol(symb) self.unk_idx = self.sym2idx[''] def build_vocab(self): if self.vocab_file: print('building vocab from {}'.format(self.vocab_file)) self._build_from_file(self.vocab_file) print('final vocab size {}'.format(len(self))) else: print('building vocab with min_freq={}, max_size={}'.format( self.min_freq, self.max_size)) self.idx2sym = [] self.sym2idx = OrderedDict() for sym in self.special: self.add_special(sym) for sym, cnt in self.counter.most_common(self.max_size): if cnt < self.min_freq: break self.add_symbol(sym) print('final vocab size {} from {} unique tokens'.format( len(self), len(self.counter))) def encode_file(self, path, ordered=False, verbose=False, add_eos=True, add_double_eos=False): if verbose: print('encoding file {} ...'.format(path)) assert os.path.exists(path) encoded = [] with open(path, 'r', encoding='utf-8') as f: for idx, line in enumerate(f): if verbose and idx > 0 and idx % 500000 == 0: print(' line {}'.format(idx)) symbols = self.tokenize(line, add_eos=add_eos, add_double_eos=add_double_eos) encoded.append(self.convert_to_tensor(symbols)) if ordered: encoded = torch.cat(encoded) return encoded def encode_sents(self, sents, ordered=False, verbose=False): if verbose: print('encoding {} sents ...'.format(len(sents))) encoded = [] for idx, symbols in enumerate(sents): if verbose and idx > 0 and idx % 500000 == 0: print(' line {}'.format(idx)) encoded.append(self.convert_to_tensor(symbols)) if ordered: encoded = torch.cat(encoded) return encoded def add_special(self, sym): if sym not in self.sym2idx: self.idx2sym.append(sym) self.sym2idx[sym] = len(self.idx2sym) - 1 setattr(self, '{}_idx'.format(sym.strip('<>')), self.sym2idx[sym]) def add_symbol(self, sym): if sym not in self.sym2idx: self.idx2sym.append(sym) self.sym2idx[sym] = len(self.idx2sym) - 1 def get_sym(self, idx): assert 0 <= idx < len(self), 'Index {} out of range'.format(idx) return self.idx2sym[idx] def get_idx(self, sym): if sym in self.sym2idx: return self.sym2idx[sym] else: # print('encounter unk {}'.format(sym)) assert '' not in sym assert hasattr(self, 'unk_idx') return self.sym2idx.get(sym, self.unk_idx) def get_symbols(self, indices): return [self.get_sym(idx) for idx in indices] def get_indices(self, symbols): return [self.get_idx(sym) for sym in symbols] def convert_to_tensor(self, symbols): return torch.LongTensor(self.get_indices(symbols)) def convert_to_sent(self, indices, exclude=None): if exclude is None: return ' '.join([self.get_sym(idx) for idx in indices]) else: return ' '.join([self.get_sym(idx) for idx in indices if idx not in exclude]) def __len__(self): return len(self.idx2sym) #scrap from vocabulary import Vocab #scrap torch.manual_seed(10) device = torch.device("cpu") if not torch.cuda.is_available() else torch.device("cuda:0")Overview Attention Let's start off simple by imagining some word embeddings of shape `(seq=7, batch_size=3, embedding_dim=32)`seq, batch_size, embedding_dim = 7, 3, 32 word_embs = torch.rand(seq, batch_size, embedding_dim)In the Transformer XL, we also feed the cached outputs of the model for the previous sequence. In this case, we would be feeding the word embeddings from the previous sequence as additional input to our model.To make things clearer, let's imagine our previous sequence was of length `prev_seq=6`memory = torch.rand(6, 3, 32) # hidden states from the previousRelative positional embeddings There are two sources of attention: the content and position MHA: The core component Aggregating all the above, we get the following MultiHeadAttention modulefrom typing import * class MultiHeadAttention(nn.Module): def __init__(self, d_input: int, d_inner: int, n_heads: int=4, dropout: float=0.1, dropouta: float=0.): super().__init__() self.d_input = d_input self.d_inner = d_inner self.n_heads = n_heads # this layer applies the linear transformation required # for the keys and values for all heads at once for efficiency self.linear_kv = nn.Linear( d_input, (d_inner * n_heads * 2), # 2 is for keys and values bias=False, # we don't apply bias, making this a simple matrix multiplication ) # for queries (will not be concatenated with memorized states so separate) self.linear_q = nn.Linear( d_input, d_inner * n_heads, bias=False ) # for positional embeddings self.linear_p = nn.Linear( d_input, d_inner * n_heads, bias=False ) self.scale = 1 / (d_inner ** 0.5) # for scaled dot product attention self.dropa = nn.Dropout(dropouta) # we will use this to project back to the input dimension self.lout = nn.Linear(self.d_inner * self.n_heads, self.d_input, bias=False) self.norm = nn.LayerNorm(self.d_input) self.dropo = nn.Dropout(dropout) def _rel_shift(self, x): # TODO: Understand zero_pad = torch.zeros((x.size(0), 1, *x.size()[2:]), device=x.device, dtype=x.dtype) x_padded = torch.cat([zero_pad, x], dim=1) x_padded = x_padded.view(x.size(1) + 1, x.size(0), *x.size()[2:]) x = x_padded[1:].view_as(x) return x def forward(self, input_: torch.FloatTensor, # (cur_seq, b, d_in) pos_embs: torch.FloatTensor, # (cur_seq + prev_seq, d_in) memory: torch.FloatTensor, # (prev_seq, b, d_in) u: torch.FloatTensor, # (H, d) v: torch.FloatTensor, # (H, d) mask: Optional[torch.FloatTensor]=None, ): """ pos_embs: we pass the positional embeddings in separately because we need to handle relative positions input shape: (seq, bs, self.d_input) pos_embs shape: (seq + prev_seq, bs, self.d_input) output shape: (seq, bs, self.d_input) """ cur_seq = input_.shape[0] # sequence length of current segment prev_seq = memory.shape[0] # sequence length of previous segment H, d = self.n_heads, self.d_inner input_with_memory = torch.cat([memory, input_], dim=0) # concatenate recurrent memory # across sequence dimension # we will use the following symbols to represent the shape of the tensors # cs: current sequence length, b: batch, H: number of heads # d: inner dimension, ps: previous sequence length # The key and value are now conditioned on the preceding context k_tfmd, v_tfmd = \ torch.chunk(self.linear_kv(input_with_memory), 2, dim=-1) # (cs + ps, b, H * d) q_tfmd = self.linear_q(input_) # (cs, b, H * d) # apply scaled dot product attention # look at the following dimensions carefully, since this is the key operation # in the Transformer/Transformer XL architecture _, bs, _ = q_tfmd.shape assert bs == k_tfmd.shape[1] # content-based attention term ((a) + (c) in the paper) # this is the standard attention term in the original Transformer, except without positional embeddings # which are handled separately in the Transformer XL (see below) # here, i corresponds to the number of queries = number of current inputs/targets (seq-wise) # j corresponds to the number of key/values = number of vectors that we can use to compute the # vector for each query content_attn = torch.einsum("ibhd,jbhd->ijbh", ( (q_tfmd.view(cur_seq, bs, H, d) + # (a) u), # (c): u represents the global (independent of the query) # bias towards certain key/values = words # Note: maybe this could be a per-attention head parameter? k_tfmd.view(cur_seq + prev_seq, bs, H, d) # There is no positional information to be found here )) # (cs, cs + ps, b, H) # position-based attention term ((b) + (d) in the paper) # this attention is solely based on the position of the key/values # (i.e. it does not take the content of the key/values into account) p_tfmd = self.linear_p(pos_embs) # (cs + ps, b, H * d) position_attn = torch.einsum("ibhd,jhd->ijbh", ( (q_tfmd.view(cur_seq, bs, H, d) + # (b) v), # (d): v represents the global (independent of the query) # bias towards certain positions p_tfmd.view(cur_seq + prev_seq, H, d) # Notice there is not content information # regarding keys and values here! )) # (cs, cs + ps, b, H) # ??? position_attn = self._rel_shift(position_attn) # the attention is the sum of content-based and position-based attention attn = content_attn + position_attn if mask is not None and mask.any().item(): attn = attn.masked_fill( mask[...,None], -float('inf')) attn = torch.softmax(attn * self.scale, # rescale to prevent values from exploding dim=1) # normalize across the value sequence dimension attn = self.dropa(attn) attn_weighted_values = (torch.einsum("ijbh,jbhd->ibhd", (attn, # (cs, cs + ps, b, H) v_tfmd.view(cur_seq + prev_seq, bs, H, d), # (cs + ps, b, H, d) )) # (cs, b, H, d) .contiguous() # we need to change the memory layout to make `view` work .view(cur_seq, bs, H * d)) # (cs, b, H * d) # Project back to input dimension and add residual connection output = input_ + self.dropo(self.lout(attn_weighted_values)) output = self.norm(output) return output hoge = torch.arange(4 * 5 * 3 * 1).view(4, 5, 3, 1) x = hoge zero_pad = torch.zeros((x.size(0), 1, *x.size()[2:]), device=x.device, dtype=x.dtype) x_padded = torch.cat([zero_pad, x], dim=1) #scrap torch.manual_seed(10)Let's test it outmha = MultiHeadAttention(32, 17, n_heads=4) inpt = torch.rand(7, 3, 32) pos = torch.rand(13, 32) mem = torch.rand(6, 3, 32) u, v = torch.rand(4, 17), torch.rand(4, 17) x1 = mha(inpt, pos, mem, u, v) x1.shape x1[0] torch.manual_seed(10) #scrap NHEADS = 4 DMODEL = 32 DINNER = 17 #scrap mha = MultiHeadAttention(32, 17, n_heads=4) #scrap mha #scrap inpt = torch.rand(7, 3, 32) pos = torch.rand(13, 32) mem = torch.rand(6, 3, 32) u, v = torch.rand(4, 17), torch.rand(4, 17) x2 = mha(inpt, pos, mem, u, v) #scrap x2[0] #scrap x2.mean() #scrap x2.std() #scrap torch.manual_seed(10) #scrap mha_ref = RelPartialLearnableMultiHeadAttn(NHEADS, DMODEL, DINNER, 0) #scrap mha_ref(inpt, pos, u, v, mems=mem).shape #scrap mha_ref(inpt, pos, u, v, mems=mem).mean() #scrap mha_ref(inpt, pos, u, v, mems=mem).std()Periphereal items#scrap torch.einsum("i,j->ij", torch.arange(4), torch.arange(3)) 1 / (10000 ** (torch.arange(0.0, 32, 2.0) / 32)) class PositionalEmbedding(nn.Module): def __init__(self, d): super().__init__() self.d = d inv_freq = 1 / (10000 ** (torch.arange(0.0, d, 2.0) / d)) self.register_buffer("inv_freq", inv_freq) def forward(self, positions: torch.LongTensor, # (seq, ) ): sinusoid_inp = torch.einsum("i,j->ij", positions.float(), self.inv_freq) pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1) return pos_emb[:,None,:] embedding = PositionalEmbedding(32) embedding(torch.arange(10).float()).shape class PositionwiseFF(nn.Module): def __init__(self, d_input, d_inner, dropout): super().__init__() self.d_input = d_input self.d_inner = d_inner self.dropout = dropout self.ff = nn.Sequential( nn.Linear(d_input, d_inner), nn.ReLU(inplace=True), nn.Dropout(dropout), nn.Linear(d_inner, d_input), nn.Dropout(dropout), ) self.layer_norm = nn.LayerNorm(d_input) def forward(self, input_: torch.FloatTensor, # (cur_seq, bs, d_input) ) -> torch.FloatTensor: # (cur_seq, bs, d_input) ff_out = self.ff(input_) output = self.layer_norm(input_ + ff_out) return output from mem_transformer import PositionwiseFF as RefPositionwiseFFBuilding the decoderclass DecoderBlock(nn.Module): def __init__(self, n_heads, d_input, d_head_inner, d_ff_inner, dropout, dropouta=0.): super().__init__() self.mha = MultiHeadAttention(d_input, d_head_inner, n_heads=n_heads, dropout=dropout, dropouta=dropouta) self.ff = PositionwiseFF(d_input, d_ff_inner, dropout) def forward(self, input_: torch.FloatTensor, # (cur_seq, bs, d_input) pos_embs: torch.FloatTensor, # (cur_seq + prev_seq, d_input), # memory: torch.FloatTensor, # (cur_seq ), u: torch.FloatTensor, # (H, d_input), # TODO: is this this level? v: torch.FloatTensor, # (H, d_input), mask=None, mems=None, ): return self.ff(self.mha(input_, pos_embs, mems, u, v, mask=mask))Building the adaptive embeddingsclass StandardWordEmbedding(nn.Module): """ TODO: Implement?? """ def __init__(self, num_embeddings, embedding_dim, div_val=1, sample_softmax=False): super().__init__() self.num_embeddings = num_embeddings self.embedding_dim = embedding_dim self.embedding = nn.Embedding(num_embeddings, embedding_dim) self.scale = embedding_dim ** 0.5 def forward(self, input_: torch.LongTensor): return self.embedding(input_) * self.scale idxs1 = torch.randint(100, (7, 3)) idxs2 = torch.randint(100, (6, 3)) wembs = StandardWordEmbedding(100, 32) wembs(idxs1).shape from mem_transformer import AdaptiveEmbedding as RefAdaptiveEmbedding from mem_transformer import ProjectedAdaptiveLogSoftmax as RefProjectedAdaptiveLogSoftmaxBuilding the entire modelimport torch.nn.functional as FTODO: Handle evaluationfrom mem_transformer import RelPartialLearnableDecoderLayer as RefDecoderLayer from typing import * class TransformerXL(nn.Module): def __init__(self, num_embeddings, n_layers, n_heads, d_model, d_head_inner, d_ff_inner, dropout=0.1, dropouta=0., seq_len: int=0, mem_len: int=0): super().__init__() self.n_layers,self.n_heads,self.d_model,self.d_head_inner,self.d_ff_inner = \ n_layers,n_heads,d_model,d_head_inner,d_ff_inner # Embedding layers self.word_embs = StandardWordEmbedding(num_embeddings, d_model) self.pos_embs = PositionalEmbedding(d_model) # Core transformer self.drop = nn.Dropout(dropout) self.layers = nn.ModuleList([DecoderBlock(n_heads, d_model, d_head_inner=d_head_inner, d_ff_inner=d_ff_inner, dropout=dropout, dropouta=dropouta) for _ in range(n_layers)]) # tie weights self.output_projection = nn.Linear(d_model, num_embeddings) self.output_projection.weight = self.word_embs.embedding.weight self.loss_fn = nn.CrossEntropyLoss() # TODO: Why do we need a special loss? self.seq_len, self.mem_len = seq_len, mem_len # TODO: Is seq_len being used? # TODO: Why is this shared among the layers and heads? # TODO: Better understand meaning of these parameters self.u, self.v = (nn.Parameter(torch.Tensor(self.n_heads, self.d_head_inner)), nn.Parameter(torch.Tensor(self.n_heads, self.d_head_inner))) def init_memory(self, device=torch.device("cpu")) -> torch.FloatTensor: return [torch.empty(0, dtype=torch.float).to(device) for _ in range(self.n_layers+1)] def update_memory(self, previous_memory: List[torch.FloatTensor], hidden_states: List[torch.FloatTensor], ): assert len(hidden_states) == len(previous_memory) mem_len, seq_len = previous_memory[0].size(0), hidden_states[0].size(0) # For the updated memory, we use the most recent `self.mem_len` # states, including the previous memory # In other words, if `seq_len` < `self.mem_len` some of the previous memory # will carry over to the next memory with torch.no_grad(): new_memory = [] end_idx = mem_len + seq_len beg_idx = max(0, end_idx - self.mem_len) # TODO: Make this more efficient for m, h in zip(previous_memory, hidden_states): cat = torch.cat([m, h], dim=0) # (mem_len + seq_len, bs, d) new_memory.append(cat[beg_idx:end_idx].detach()) # (self.mem_len, bs, d) return new_memory def reset_length(self, seq_len, ext_len, mem_len): self.seq_len = seq_len self.mem_len = mem_len def forward(self, idxs: torch.LongTensor, # (cs, bs) target: torch.LongTensor, # (cs, bs) -> TODO: Isn:'t this the same? memory: Optional[List[torch.FloatTensor]]=None, ) -> Dict[str, torch.Tensor]: if memory is None: memory: List[torch.FloatTensor] = self.init_memory(idxs.device) assert len(memory) == len(self.layers) + 1 cur_seq, bs = idxs.size() prev_seq = memory[0].size(0) # Construct attention mask (TODO: Understand) dec_attn_mask = torch.triu( torch.ones((cur_seq, cur_seq + prev_seq)), diagonal=1 + prev_seq, ).byte()[...,None].to(idxs.device) word_embs = self.drop(self.word_embs(idxs)) # TODO: Understand pos_idxs = torch.arange(cur_seq + prev_seq - 1, -1, -1.0, dtype=torch.float).to(word_embs.device) pos_embs = self.drop(self.pos_embs(pos_idxs)) # Main part of forward pass hidden_states = [word_embs] layer_out = word_embs for mem, layer in zip(memory, self.layers): layer_out = layer(layer_out, pos_embs, self.u, self.v, mask=dec_attn_mask, mems=mem) hidden_states.append(layer_out) logits = self.output_projection(self.drop(layer_out)) loss = self.loss_fn(logits.view(-1, logits.size(-1)), target.view(-1)) # Update memory # Ensure the memory is treated as a constant # and we do not back propagate through them new_memory = self.update_memory(memory, hidden_states) return {"loss": loss, "logits": logits, "memory": new_memory} mha_ref(inpt, pos, u, v, mems=mem).mean() transformer = TransformerXL(1000, 4, 3, 32, 17, 71, mem_len=5) idxs = torch.randint(1000, (5, 9)) tgts = torch.randint(1000, (5, 9)) transformer(idxs, tgts)Training Data Loadingfrom torch.utils import data import math class LMDataLoader(data.DataLoader): """ Suppose batch size is 4 and our entire corpus looks like this: 'pytorch is an amazing deep learning framework that makes nlp really easy' To take advantage of segment-level recurrence in the Transformer XL, we want to make sure that the previous batch contains the previous segment at the same position. (TODO: Add better explanation) In other words, we want to iterate over this sentence like this Batch 1: pytorch amazing framework nlp Batch 2: is deep that really Batch 3: an learning makes easy Notice that you can reconstruct the original sentence by reading from top to bottom, left to right instead of left to right, top to bottom With a longer bptt (back propagation through time) length of 2 for example, the minibatch would be of shape (batch_size, bptt) and would look like Batch 1: pytorch amazing framework nlp is deep that really Batch 2: an learning makes easy """ def __init__(self, data: torch.LongTensor, batch_size: int, bptt: int, device=torch.device("cpu")): self.batch_size = batch_size self.bptt = bptt self.n_steps = data.size(0) // batch_size # we reshape the data here so that we can index # efficiently into it while training self.data = (data[:self.n_steps * batch_size] # trim off any elements that don't fit cleanly .view(batch_size, self.n_steps) # .transpose(0, 1) # .contiguous().to(device) # put on device as contiguous tensor ) def __iter__(self): for batch_start_idx in range(0, self.data.size(0) - 1, self.bptt): batch_end_idx = min(batch_start_idx + self.bptt, self.data.size(0) - 1) # TODO: What is `self.ext_len` in the original code? batch_data = self.data[batch_start_idx:batch_end_idx] target = self.data[batch_start_idx+1:batch_end_idx+1] # we generate the sequence length as well for loss calculation later yield batch_data, target, batch_end_idx - batch_start_idx def __len__(self): return math.ceil(self.data.size(0) / self.bptt)Testtest_corpus = torch.randint(1000, (1600, )) BS = 16 BPTT = 10 test_corpus[:BPTT] loader = LMDataLoader(test_corpus, BS, BPTT) b1, *_ = next(iter(loader)) b1.shape b1 b1, b2, sl = next(iter(loader)) transformer_xl = TransformerXL(1000, n_layers=4, n_heads=3, d_model=32, d_head_inner=17, d_ff_inner=71)Initializationdef init_weight(weight): nn.init.normal_(weight, 0.0, 0.02) def init_bias(bias): nn.init.constant_(bias, 0.0) # Borrowed from the transformer XL repo def weights_init(m): classname = m.__class__.__name__ if classname.find('Linear') != -1: if hasattr(m, 'weight') and m.weight is not None: init_weight(m.weight) if hasattr(m, 'bias') and m.bias is not None: init_bias(m.bias) elif classname.find('Embedding') != -1: if hasattr(m, 'weight'): init_weight(m.weight) elif classname.find('LayerNorm') != -1: if hasattr(m, 'weight'): nn.init.normal_(m.weight, 1.0, 0.02) if hasattr(m, 'bias') and m.bias is not None: init_bias(m.bias) else: if hasattr(m, 'u'): init_weight(m.u) if hasattr(m, 'v'): init_weight(m.v) transformer_xl.apply(weights_init); def assert_nonan_params(m): for nm, param in m.named_parameters(): if torch.isnan(param).any(): raise ValueError(f"{nm} has nan weights") assert_nonan_params(transformer_xl) transformer_xl(b1, b2)Reference#scrap import mem_transformer #scrap def ref_init_weight(weight): nn.init.normal_(weight, 0.0, 0.02) def ref_init_bias(bias): nn.init.constant_(bias, 0.0) def ref_weights_init(m): classname = m.__class__.__name__ if classname.find('Linear') != -1: if hasattr(m, 'weight') and m.weight is not None: ref_init_weight(m.weight) if hasattr(m, 'bias') and m.bias is not None: ref_init_bias(m.bias) elif classname.find('AdaptiveEmbedding') != -1: if hasattr(m, 'emb_projs'): for i in range(len(m.emb_projs)): if m.emb_projs[i] is not None: nn.init.normal_(m.emb_projs[i], 0.0, 0.01) elif classname.find('Embedding') != -1: if hasattr(m, 'weight'): ref_init_weight(m.weight) elif classname.find('ProjectedAdaptiveLogSoftmax') != -1: if hasattr(m, 'cluster_weight') and m.cluster_weight is not None: ref_init_weight(m.cluster_weight) if hasattr(m, 'cluster_bias') and m.cluster_bias is not None: ref_init_bias(m.cluster_bias) if hasattr(m, 'out_projs'): for i in range(len(m.out_projs)): if m.out_projs[i] is not None: nn.init.normal_(m.out_projs[i], 0.0, 0.01) elif classname.find('LayerNorm') != -1: if hasattr(m, 'weight'): nn.init.normal_(m.weight, 1.0, 0.02) if hasattr(m, 'bias') and m.bias is not None: ref_init_bias(m.bias) elif classname.find('TransformerLM') != -1: if hasattr(m, 'r_emb'): ref_init_weight(m.r_emb) if hasattr(m, 'r_w_bias'): ref_init_weight(m.r_w_bias) if hasattr(m, 'r_r_bias'): ref_init_weight(m.r_r_bias) if hasattr(m, 'r_bias'): ref_init_bias(m.r_bias) #scrap from mem_transformer import MemTransformerLM #scrap ref_transformer_xl = MemTransformerLM(1000, n_layer=4, n_head=3, d_model=32, d_head=16, d_inner=71, dropout=0.1, dropatt=0.0, ext_len=0, tgt_len=BPTT, mem_len=0, d_embed=None, ) #scrap ref_transformer_xl.apply(ref_weights_init); #scrap ref_transformer_xl(b1, b2) #scrap ref_transformer_xl(b1, b2)[0] #scrap ref_transformer_xl(b1, b2)[0].mean()Actual Training# TODO: Use some better library class Config(dict): def __init__(self, **kwargs): super().__init__(**kwargs) for k, v in kwargs.items(): setattr(self, k, v) def set(self, key, val): self[key] = val setattr(self, key, val) def update(self, dct): for k, v in dct.items(): self.set(k, v) # We will use prime numbers to ensure our implementation # is actually correct config = Config( use_fp16=False, seed=101, debug=False, is_ref=False, warmup_step=0, # Check default params min_lr=0., dropouta=0., clip=0.25, log_interval=200, eval_interval=50, ) if TESTING: config.update(dict( debug=True, lr=0.00025, bs=8, epochs=2, max_step=10000, # shorten for testing n_layers=4, n_heads=3, d_model=32, d_head_inner=17, d_ff_inner=71, dropout=0.1, train_bptt=33, eval_bptt=41, mem_len=41, eval_mem_len=63, )) else: config.update(dict( lr=0.0025, bs=22, epochs=2, max_step=400000, n_layers=12, n_heads=8, d_model=512, d_head_inner=64, d_ff_inner=2048, dropout=0.1, train_bptt=512, eval_bptt=128, mem_len=512, eval_mem_len=2100, )) torch.manual_seed(config.seed)TODO: Implement ourselves?vocab = Vocab(special=[""], lower_case=True) vocab.count_file(DATA_DIR / "train.txt") vocab.count_file(DATA_DIR / "valid.txt") vocab.count_file(DATA_DIR / "test.txt") None vocab.build_vocab() train_dataset = vocab.encode_file(DATA_DIR / "train.txt", ordered=True, add_eos=True) valid_dataset = vocab.encode_file(DATA_DIR / "valid.txt", ordered=True, add_eos=True) test_dataset = vocab.encode_file(DATA_DIR / "test.txt", ordered=True, add_eos=True) train_dataset[:50]Prepare iteratorstrain_iter = LMDataLoader(train_dataset, config.bs, config.train_bptt, device=device) valid_iter = LMDataLoader(valid_dataset, config.bs, config.eval_bptt, device=device) test_iter = LMDataLoader(test_dataset, config.bs, config.eval_bptt, device=device) next(iter(train_iter))Training Loop TODO: With FP16def logging(x): print(x) # temporary! import torch.optim as optim import math import time import os from tqdm import tqdm # TODO: Rewrite to use ignite or some cleaner framework loss_change = [] val_loss_change = [] def train_epoch( epoch: int, model: nn.Module, train_loader: data.DataLoader, val_loader: data.DataLoader, optimizer: optim.Optimizer, scheduler, train_step_start=0., ): # Turn on training mode which enables dropout. model.train() mems = tuple() if config.is_ref else None train_step = train_step_start train_loss = 0 log_start_time = time.time() best_val_loss = float("inf") pbar = tqdm(train_loader, total=min(config.max_step - train_step_start, len(train_loader))) for batch_idx, (data, target, seq_len) in enumerate(pbar): model.zero_grad() if config.is_ref: ret = model(data, target, *mems) loss, mems = ret[0], ret[1:] loss = loss.mean() else: out_dict = model(data, target, memory=mems) loss, mems = out_dict["loss"], out_dict["memory"] if config.use_fp16: optimizer.backward(loss) else: loss.backward() train_loss += loss.item() loss_change.append(loss.item()) if config.use_fp16: optimizer.clip_master_grads(config.clip) else: torch.nn.utils.clip_grad_norm_(model.parameters(), config.clip) optimizer.step() assert_nonan_params(model) # check for nan # step-wise learning rate annealing train_step += 1 # linear warmup stage if train_step < config.warmup_step: curr_lr = config.lr * train_step / config.warmup_step optimizer.param_groups[0]['lr'] = curr_lr else: scheduler.step(train_step) if train_step % config.log_interval == 0: cur_loss = train_loss / config.log_interval elapsed = time.time() - log_start_time log_str = '| epoch {:3d} step {:>8d} | lr {:.3g} ' \ '| loss {:5.2f}'.format( epoch, train_step, optimizer.param_groups[0]['lr'], cur_loss) log_str += ' | ppl {:9.3f}'.format(math.exp(cur_loss)) # logging(log_str) pbar.set_description(log_str) train_loss = 0 log_start_time = time.time() if train_step % config.eval_interval == 0: val_loss = evaluate(model, val_loader) val_loss_change.append(val_loss) # TODO: Log appropriately # logging('-' * 100) # log_str = '| Eval {:3d} at step {:>8d} | time: {:5.2f}s ' \ # '| valid loss {:5.2f}'.format( # train_step // config.eval_interval, train_step, # (time.time() - eval_start_time), val_loss) # log_str += ' | valid ppl {:9.3f}'.format(math.exp(val_loss)) # logging(log_str) # logging('-' * 100) # Save the model if the validation loss is the best we've seen so far. if not best_val_loss or val_loss < best_val_loss: # if not config.debug: # with open(os.path.join(args.work_dir, 'model.pt'), 'wb') as f: # torch.save(model, f) # with open(os.path.join(args.work_dir, 'optimizer.pt'), 'wb') as f: # torch.save(optimizer.state_dict(), f) best_val_loss = val_loss eval_start_time = time.time() if train_step == config.max_step: return train_step return train_step def evaluate(model: nn.Module, val_loader: data.DataLoader): # Turn on evaluation mode which disables dropout. model.eval() # If the model does not use memory at all, make the ext_len longer. # Otherwise, make the mem_len longer and keep the ext_len the same. model.reset_length(config.eval_bptt, 0, config.eval_mem_len+config.train_bptt-config.eval_bptt) # Evaluation total_len, total_loss = 0, 0. with torch.no_grad(): mems = tuple() if config.is_ref else None for i, (data, target, seq_len) in enumerate(val_loader): if config.is_ref: ret = model(data, target, *mems) loss, mems = ret[0], ret[1:] loss = loss.mean() else: out_dict = model(data, target, memory=mems) loss, mems = out_dict["loss"], out_dict["memory"] total_loss += seq_len * loss.float().item() total_len += seq_len # Switch back to the training mode model.reset_length(config.train_bptt, 0, config.mem_len) model.train() return total_loss / total_len def train(model, train_loader, valid_loader): optimizer = optim.Adam(model.parameters(), lr=config.lr) total_steps = min(config.max_step, len(train_loader) * config.epochs) scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, total_steps, eta_min=config.min_lr) train_step_start = 0 for epoch in range(config.epochs): if train_step_start >= config.max_step: break train_step_start = train_epoch( epoch, model, train_iter, valid_iter, optimizer, scheduler, train_step_start, ) def evaluate_final(model, val_loader): # Turn on evaluation mode which disables dropout. model.eval() total_len, total_loss = 0, 0. start_time = time.time() model.reset_length(config.eval_bptt, 0, config.eval_mem_len + config.train_bptt - config.eval_bptt) with torch.no_grad(): mems = tuple() if config.is_ref else None for i, (data, target, seq_len) in enumerate(val_loader): if config.is_ref: ret = model(data, target, *mems) loss, mems = ret[0], ret[1:] loss = loss.mean() else: out_dict = model(data, target, memory=mems) loss, mems = out_dict["loss"], out_dict["memory"] total_loss += seq_len * loss.item() total_len += seq_len total_time = time.time() - start_time model.reset_length(config.train_bptt, 0, config.mem_len) loss_val = total_loss / total_len return {"loss": loss_val, "ppl": math.exp(loss_val)} transformer_xl = TransformerXL( num_embeddings=len(vocab), n_layers=config.n_layers, n_heads=config.n_heads, d_model=config.d_model, d_head_inner=config.d_head_inner, d_ff_inner=config.d_ff_inner, dropout=config.dropout, dropouta=config.dropouta, seq_len=config.train_bptt, mem_len=config.mem_len, ) if torch.cuda.is_available(): transformer_xl.cuda() transformer_xl.apply(weights_init); assert_nonan_params(transformer_xl) def prod(x): acc = 1 for v in x: acc *= v return acc def num_params(model: nn.Module): acc = 0 for p in model.parameters(): acc += prod(p.shape) return acc def num_params_per_param(model): d = {} for name, p in model.named_parameters(): d[name] = prod(p.shape) return d num_params(transformer_xl) train( transformer_xl, train_iter, valid_iter, ) evaluate_final(transformer_xl, valid_iter) import matplotlib.pyplot as plt %matplotlib inline loss_change_self = [x for x in loss_change] plt.plot(loss_change_self) val_loss_change_self = [x for x in val_loss_change] plt.plot(val_loss_change_self)Compare with reference#scrap loss_change = [] val_loss_change = [] #scrap config.set("is_ref", True) #scrap transformer_xl_ref = MemTransformerLM( len(vocab), n_layer=config.n_layers, n_head=config.n_heads, d_model=config.d_model, d_head=config.d_head_inner, d_inner=config.d_ff_inner, dropout=config.dropout, dropatt=config.dropouta, ext_len=0, tgt_len=config.train_bptt, mem_len=config.mem_len, pre_lnorm=False, ) if torch.cuda.is_available(): transformer_xl_ref.cuda() transformer_xl_ref.apply(ref_weights_init) optimizer = optim.Adam(transformer_xl_ref.parameters(), lr=config.lr) scheduler = scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, config.max_step, eta_min=config.min_lr) #scrap num_params(transformer_xl_ref) #scrap train( transformer_xl_ref, train_iter, valid_iter, ) #scrap evaluate_final(transformer_xl_ref, valid_iter) #scrap loss_change_ref = [x for x in loss_change] #scrap plt.plot(loss_change_self) plt.plot(loss_change_ref) #scrap val_loss_change_ref = [x for x in val_loss_change] #scrap plt.plot(val_loss_change_ref) plt.plot(val_loss_change_self) #scrap transformer_xl_ref #scrap transformer_xlAnother custom sectiondisplay(HTML(df_test_metadata.head().to_html()))Customizing nbconvert Under the hood, nbconvert uses [Jinja templates](http://jinja.pocoo.org/docs/latest/) to specify how the notebooks should be formatted. These templates can be fully customized, allowing you to use nbconvert to create notebooks in different formats with different styles as well. Converting a notebook to an (I)Python script and printing to stdoutOut of the box, nbconvert can be used to convert notebooks to plain Python files. For example, the following command converts the `example.ipynb` notebook to Python and prints out the result:!jupyter nbconvert --to python 'example.ipynb' --stdout[NbConvertApp] Converting notebook example.ipynb to python # coding: utf-8 # # Example notebook # ### Markdown cells # # This is an example notebook that can be converted with `nbconvert` to different formats. This is an example of a markdown cell. # ### LaTeX Equations # # Here is an equation: # # $$ # y = \sin(x) # $$ # ### Code cells # In[1]: print("This is a code cell that produces some output") # ### Inline figures # In[1]: import matplotlib.pyplot as plt import numpy as np plt.ion() x = np.linspace(0, 2 * np.pi, 100) y = np.sin(x) plt.plot(x, y)From the code, you can see that non-code cells are also exported. If you wanted to change that behaviour, you would first look to nbconvert [configuration options page](./config_options.rst) to see if there is an option available that can give you your desired behaviour. In this case, if you wanted to remove code cells from the output, you could use the `TemplateExporter.exclude_markdown` traitlet directly, as below.!jupyter nbconvert --to python 'example.ipynb' --stdout --TemplateExporter.exclude_markdown=True[NbConvertApp] Converting notebook example.ipynb to python # coding: utf-8 # In[1]: print("This is a code cell that produces some output") # In[1]: import matplotlib.pyplot as plt import numpy as np plt.ion() x = np.linspace(0, 2 * np.pi, 100) y = np.sin(x) plt.plot(x, y)Custom Templates As mentioned above, if you want to change this behavior, you can use a custom template. The custom template inherits from the Python template and overwrites the markdown blocks so that they are empty. Below is an example of a custom template, which we write to a file called `simplepython.tpl`. This template removes markdown cells from the output, and also changes how the execution count numbers are formatted:%%writefile simplepython.tpl {% extends 'python.tpl'%} ## remove markdown cells {% block markdowncell %} {% endblock markdowncell %} ## change the appearance of execution count {% block in_prompt %} # [{{ cell.execution_count if cell.execution_count else ' ' }}]: {% endblock in_prompt %}Overwriting simplepython.tplUsing this template, we see that the resulting Python code does not contain anything that was previously in a markdown cell, and only displays execution counts (i.e., `[]:` not `In[]:`):!jupyter nbconvert --to python 'example.ipynb' --stdout --template=simplepython.tpl[NbConvertApp] Converting notebook example.ipynb to python # coding: utf-8 # [1]: print("This is a code cell that produces some output") # [1]: import matplotlib.pyplot as plt import numpy as np plt.ion() x = np.linspace(0, 2 * np.pi, 100) y = np.sin(x) plt.plot(x, y)Template structureNbconvert templates consist of a set of nested blocks. When defining a newtemplate, you extend an existing template by overriding some of the blocks.All the templates shipped in nbconvert have the basic structure described here,though some may define additional blocks.from IPython.display import HTML, display with open('template_structure.html') as f: display(HTML(f.read()))A few gotchasJinja blocks use `{% %}` by default which does not play nicely with LaTeX, so those are replaced by `((* *))` in LaTeX templates. Templates using cell tagsThe notebook file format supports attaching arbitrary JSON metadata to each cell. In addition, every cell has a special `tags` metadata field that accepts a list of strings that indicate the cell's tags. To apply these, go to the `View → CellToolbar → Tags` option which will create a Tag editor at the top of every cell. First choose a notebook you want to convert to html, and apply the tags: `"Easy"`, `"Medium"`, or `"Hard"`. With this in place, the notebook can be converted using a custom template.Design your template in the cells provided below.Hint: tags are located at `cell.metadata.tags`, the following Python code collects the value of the tag: ```pythoncell['metadata'].get('tags', [])```Which you can then use inside a Jinja template as in the following:%%writefile mytemplate.tpl {% extends 'full.tpl'%} {% block any_cell %} {% if 'Hard' in cell['metadata'].get('tags', []) %}
{{ super() }}
{% elif 'Medium' in cell['metadata'].get('tags', []) %}
{{ super() }}
{% elif 'Easy' in cell['metadata'].get('tags', []) %}
{{ super() }}
{% else %} {{ super() }} {% endif %} {% endblock any_cell %}Overwriting mytemplate.tplNow, if we collect the result of using nbconvert with this template, and display the resulting html, we see the following:example = !jupyter nbconvert --to html 'example.ipynb' --template=mytemplate.tpl --stdout example = example[3:] # have to remove the first three lines which are not proper html from IPython.display import HTML, display display(HTML('\n'.join(example)))**Genearte DataSet**import numpy as np from skimage import filters import h5py import os from skimage.draw import circle def map_image_to_intensity_range(image, min_o, max_o, percentiles=0): # If percentile = 0 uses min and max. Percentile >0 makes normalisation more robust to outliers. if image.dtype in [np.uint8, np.uint16, np.uint32]: assert min_o >= 0, 'Input image type is uintXX but you selected a negative min_o: %f' % min_o if image.dtype == np.uint8: assert max_o <= 255, 'Input image type is uint8 but you selected a max_o > 255: %f' % max_o min_i = np.percentile(image, 0 + percentiles) max_i = np.percentile(image, 100 - percentiles) image = (np.divide((image - min_i), max_i - min_i) * (max_o - min_o) + min_o).copy() image[image > max_o] = max_o image[image < min_o] = min_o return image def prepare_data(out_path, effect_size=50., num_samples=100, image_size=100, moving_effect=True, scale_intensities_to_one=True, save_type='hdf5'): # Constants stdbckg = 50. #std deviation of the background stdkernel = 2.5 #std deviation of the Gaussian smoothing kernel block1size = 10 #size of the first block block2size = 10 #size of the 2nd block offset = int((image_size / 3.5) + 0.5) block2offset_ = np.asarray([offset, offset]) block3size = 10 #size of the 3rd block block3offset_ = np.asarray([-offset, -offset]) norm_percentile = 0 numNsamples = num_samples // 2 numP1samples = np.int(num_samples // 4) numP2samples = np.int(num_samples // 4) Features = np.zeros([image_size ** 2, numNsamples + numP1samples + numP2samples]) GT = np.zeros([image_size ** 2, numNsamples + numP1samples + numP2samples]) Labels = np.zeros(numNsamples+numP1samples+numP2samples) half_imsize = np.int(image_size / 2) # Generate images of class 1 with subtype A (box in the centre and upper left) for n in range(numP1samples): I = np.zeros([image_size, image_size]) if moving_effect: block2offset = block2offset_ + np.random.randint(-5, 5, size=2) else: block2offset = block2offset_ rr, cc = circle(half_imsize+40, half_imsize+40, 8, [image_size, image_size]) I[rr,cc] = effect_size GT[:,n] = I.reshape(image_size ** 2) > 0 noise = np.random.normal(scale=stdbckg, size=np.asarray([image_size, image_size])) smnoise = filters.gaussian(noise, stdkernel) smnoise = smnoise / np.std(smnoise) * stdbckg J = I + smnoise if scale_intensities_to_one: J = map_image_to_intensity_range(J, -1, 1, percentiles=norm_percentile) Features[:,n] = J.reshape(image_size ** 2) Labels[n] = 1 # Generate images of class 1 with subtype B (box in the centre and lower right) for n in range(numP2samples): I = np.zeros([image_size, image_size]) if moving_effect: block3offset = block3offset_ + np.random.randint(-5, 5, size=2) else: block3offset = block3offset_ rr, cc = circle(half_imsize-40, half_imsize-40, 8, [image_size, image_size]) I[rr,cc] = effect_size GT[:,n+numP1samples] = I.reshape(image_size ** 2) > 0 noise = np.random.normal(scale=stdbckg, size=np.asarray([image_size, image_size])) smnoise = filters.gaussian(noise, stdkernel) smnoise = smnoise / np.std(smnoise) * stdbckg J = I + smnoise if scale_intensities_to_one: J = map_image_to_intensity_range(J, -1, 1, percentiles=norm_percentile) Features[:,n+numP1samples] = J.reshape(image_size ** 2) Labels[n+numP1samples] = 1 # Generate image of class 0 (only noise) for n in range(numNsamples): I = np.zeros([image_size, image_size]) noise = np.random.normal(scale=stdbckg, size=np.asarray([image_size, image_size])) smnoise = filters.gaussian(noise, stdkernel) smnoise = smnoise / np.std(smnoise) * stdbckg J = I + smnoise if scale_intensities_to_one: J = map_image_to_intensity_range(J, -1, 1, percentiles=norm_percentile) Features[:,n+numP1samples+numP2samples] = J.reshape(image_size ** 2) Labels[n+numP1samples+numP2samples] = 0 if save_type == 'text': txt_folder = os.path.dirname(out_path) np.savetxt(os.path.join(txt_folder, 'features_moving.txt'), Features, fmt='%1.4f') np.savetxt(os.path.join(txt_folder, 'labels_moving.txt'), Labels, fmt='%d') np.savetxt(os.path.join(txt_folder, 'gt_features_moving.txt'), GT, fmt='%d') elif save_type == 'pickle': np.savez_compressed(out_path, features=Features, labels=Labels, gt=GT) elif save_type == 'hdf5': with h5py.File(out_path, 'w') as hdf5_file: #Features = (Features - np.min(Features))/(np.max(Features) - np.min(Features)) hdf5_file.create_dataset('features', data=Features, dtype=np.float32) hdf5_file.create_dataset('labels', data=Labels, dtype=np.uint8) hdf5_file.create_dataset('gt', data=GT, dtype=np.uint8) else: raise ValueError('Unknown save_type: %s' % save_type)**Generate And Load DataSet**prepare_data(os.path.join(base_path, 'syntheticDataset.hdf5'), scale_intensities_to_one=True, num_samples=1000, effect_size=500, image_size=imsize)#10000 with h5py.File(os.path.join(base_path, 'syntheticDataset.hdf5'), 'r') as f: features = f['features'].value labels = f['labels'].value gts = f['gt'].value print('DataSet Loaded Successfully') gts = np.reshape(gts, [imsize, imsize, -1]) gts = np.transpose(gts, [2, 0, 1]) features = np.reshape(features, [imsize, imsize, -1]) features = np.transpose(features, [2, 0, 1]) features.min() print(features.shape) print(labels.shape) print(gts.shape) import matplotlib.pyplot as plt index = 100 plt.imshow(features[index], cmap='gray') plt.show() print(labels[index]) plt.imshow(gts[index], cmap='gray') plt.show() import matplotlib.pyplot as plt index = 0 plt.figure() plt.subplot(1,2,1) plt.imshow(features[index], cmap='gray') plt.subplot(1,2,2) print(labels[index]) plt.imshow(gts[index], cmap='gray') plt.show() index = 700 plt.figure() plt.subplot(1,2,1) plt.imshow(features[index], cmap='gray') plt.subplot(1,2,2) print(labels[index]) plt.imshow(gts[index], cmap='gray') plt.show() class_0 = os.path.join(base_path, '0') class_1 = os.path.join(base_path, '1') if not os.path.exists(class_0): os.makedirs(class_0) if not os.path.exists(class_1): os.makedirs(class_1) class_0_indexes = np.where(labels==0) normal = [] infected = [] normal_groundtruth = [] infected_groundtruth = [] for index in class_0_indexes: normal.append(features[index]) normal_groundtruth.append(gts[index]) normal = np.array(normal).reshape(-1, imsize, imsize, 1) normal_groundtruth = np.array(normal_groundtruth).reshape(-1, imsize, imsize, 1) print(normal.shape) print(normal_groundtruth.shape) class_1_indexes = np.where(labels==1) for index in class_1_indexes: infected.append(features[index]) infected_groundtruth.append(gts[index]) infected = np.array(infected).reshape(-1, imsize, imsize, 1) infected_groundtruth = np.array(infected_groundtruth).reshape(-1, imsize, imsize, 1) print(infected.shape) print(infected_groundtruth.shape) normal_norm = [] for i, content in enumerate(normal): img = content.copy() img = ((img - np.min(img))/(np.max(img) - np.min(img)))*1.3 img = (img - np.mean(img))/np.std(img) normal_norm.append(img) print('\rProcessing Non-Healthy Files:', str(i+1) + '/' + str(len(normal)), end='') normal = np.asarray(normal_norm) print('') infected_norm = [] for i, content in enumerate(infected): img = content.copy() img = (img - np.min(img))/(np.max(img) - np.min(img)) img = (img - np.mean(img))/np.std(img) infected_norm.append(img) print('\rProcessing Healthy Files:', str(i+1) + '/' + str(len(infected_norm)), end='') infected = np.asarray(infected_norm) np.save(os.path.join(class_0, 'images'), normal) np.save(os.path.join(class_0, 'masks'), normal_groundtruth) np.save(os.path.join(class_1, 'images'), infected) np.save(os.path.join(class_1, 'masks'), infected_groundtruth) print(infected.max(), normal.max()) print(np.load(os.path.join(class_1, 'images.npy')).max()) print(np.load(os.path.join(class_0, 'images.npy')).max()) import numpy as np class_0 = os.path.join(base_path, '0') class_1 = os.path.join(base_path, '1') print(np.load(os.path.join(class_0, 'images.npy')).max()) print(np.load(os.path.join(class_1, 'images.npy')).max()) print(infected.max(), normal.max()) infected.max() from_images = 100 to_images = 110 images_0 = normal[from_images:to_images] masks_0 = normal_groundtruth[from_images:to_images] images_1 = infected[from_images:to_images] masks_1 = infected_groundtruth[from_images:to_images] def plot_images(images, masks): plt.figure(1, figsize=(20,20)) for i in range(images.shape[0]): plt.subplot(1, 10, i+1).set_title('Image # {}' .format(i+1)) plt.imshow(images[i, :, :, 0], cmap='gray') plt.axis('off') plt.figure(2, figsize=(20,20)) for i in range(masks.shape[0]): plt.subplot(1, 10, i+1).set_title('Mask # {}' .format(i+1)) plt.imshow(masks[i, :, :, 0], cmap='gray') plt.axis('off') plt.show() plt.close() plot_images(images_0, masks_0) plot_images(images_1, masks_1)Ejemplo 13: Series temporales con Keras Paso 1: Gestión de los datos En primer lugar, se cargan las bibliotecas necesarias para la gestión de datosimport numpy as np import matplotlib.pyplot as plt import pandas as pd**1.1-Carga de datos**En este caso, los datos están disponibles como un CSV que se carga desde un directorio.from google.colab import drive drive.mount('/content/drive') dataset = pd.read_csv("/content/drive/My Drive/Colab Notebooks/data/NSE-TATAGLOBAL.csv") dataset.head() #print(len(dataset))Se seleccionan las columnas 1 y 2.training_set = dataset.iloc[:, 1:2].values print(len(training_set)) print(training_set)2035 [[234.05] [234.55] [240. ] ... [121.8 ] [120.3 ] [122.1 ]]**1.2-Visualización de los datos*** Se puede comprobar la forma que tienen nuestros datos.plt.figure(figsize = (15, 5)) plt.plot(dataset.iloc[:, 1].values, label = "Open") plt.plot(dataset.iloc[:, 2].values, label = "High") plt.xlabel("Días") plt.ylabel("") plt.title("Valores por días") plt.legend() plt.show()**1.3-Codificar los datos**En este caso los datos son numéricos con lo que sólo requieren procesamiento para escalarlos:* Los datos ya son numéricos.* Se realizar una escacla de los datos con MinMax en la zona 0-1, Una ventaja es que se da estabilidad a los datos pero, un problema es que comprime los datos de entrada entre unos límites empíricos (el máximo y el mínimo de la variable). Esto quiere decir que si existe ruido, se va a ampliar.from sklearn.preprocessing import MinMaxScaler sc = MinMaxScaler(feature_range = (0, 1)) training_set_scaled = sc.fit_transform(training_set) training_set_scaled**1.4-Seleccionar los datos**En este caso, los datos se separan por la ventana que queremos controlar (60 días).window_size = 60 X_train = [] #Lista de listas de 60 observaciones y_train = [] #Lista de valores for i in range(window_size, len(training_set)): X_train.append(training_set_scaled[i-60:i, 0]) #Take [0:60,0] [60:120,0] se generan listas de 60 observaciones y_train.append(training_set_scaled[i, 0])#Take [60,0] [120,0] X_train, y_train = np.array(X_train), np.array(y_train) X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) print(len(X_train[0]))Paso 2: Arquitectura e implementación de nuestra red1. La entrada de nuestra red será una capa con la forma de los datos de entrada.2. La función de activación en la capa de salida se establece para que sea un número. 4. La función de pérdida será **mse**.5. La función de optimización **adam**.from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM from keras.layers import Dropout regressor = Sequential() regressor.add(LSTM(units = 50, return_sequences = True, input_shape = (X_train.shape[1], 1))) regressor.add(Dropout(0.2)) regressor.add(LSTM(units = 50, return_sequences = True)) regressor.add(Dropout(0.2)) regressor.add(LSTM(units = 50, return_sequences = True)) regressor.add(Dropout(0.2)) regressor.add(LSTM(units = 50)) regressor.add(Dropout(0.2)) regressor.add(Dense(units = 1)) regressor.compile(optimizer = 'adam', loss = 'mean_squared_error')Paso 3: Entrenamientofrom matplotlib import pyplot history = regressor.fit(X_train, y_train, epochs = 10, batch_size = 64) print(history) print(history.history){'loss': [0.0023194083247239454, 0.002245409493039869, 0.0020821630932343536, 0.0022651717262460462, 0.002011301252766972, 0.0018462412571534515, 0.0018464375247506874, 0.0019472022510216207, 0.0020134318585778715, 0.0018142159509507915]}Paso 4: Test y Predicción En este caso, se va a validar con el conjunto de test:* Se pasa como parámetro el modelo entrenado.* Se pasan los valores de entrada y los esperados de salida (X,Y)* Se calculan valores para los datos de entrenamiento y de test.* Se calcula RMSE (error cuadrático medio). Se busca penalizar tanto los valores por defecto como por exceso. El valor preferente es pequeño indicando que que los valores pronosticados están cerca de los valores observados.dataset_test = pd.read_csv("/content/drive/My Drive/Colab Notebooks/data/tatatest.csv") real_stock_price = dataset_test.iloc[:, 1:2].values print(real_stock_price)#Se obtienen los valores reales #Se crea un frame con dos columnas: los valores de entrenamiento y de test dataset_total = pd.concat((dataset['Open'], dataset_test['Open']), axis = 0) inputs = dataset_total[len(dataset_total) - len(dataset_test) - 60:].values print(inputs) #Se preparan los datos en la forma adecudad inputs = inputs.reshape(-1,1) #-1-->la forma se infiere inputs = sc.transform(inputs) #se escalan los datos X_test = [] #se generan la lista de lista de observaciones for i in range(60, 76): X_test.append(inputs[i-60:i, 0]) X_test = np.array(X_test) #Se transforma a un array de numpy X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1)) predicted_stock_price = regressor.predict(X_test)#Se realiza la predicción obteniendo un array de valores. print(predicted_stock_price) predicted_stock_price = sc.inverse_transform(predicted_stock_price) #Se realiza la inversa para obtener el valor real (previamente se habían normalizado los valores de entrada) print(predicted_stock_price) import math from sklearn.metrics import mean_squared_error print("Valores reales: "+str(real_stock_price)) print("Valores predicción: "+str(predicted_stock_price)) rmse = math.sqrt(mean_squared_error(real_stock_price, predicted_stock_price)) print("Test data score: %.2f RMSE" % rmse) plt.plot(real_stock_price, color = 'black', label = 'TATA Stock Price') plt.plot(predicted_stock_price, color = 'green', label = 'Predicted TATA Stock Price') plt.title('TATA Stock Price Prediction') plt.xlabel('Time') plt.ylabel('TATA Stock Price') plt.legend() plt.show()Simple plot of data from txt file txt file contains text lines and numerical data#Comments: # for single line comment and ''' lines ''' for multilines comment #Shortcut command to run file.ipynb in Jupyter in windows OS: Shift+Enter #open txt file #create emply lists for x and y values #read line by line from txt file #if line starts with any character (here #), continue without doing anything #if line starts with other than #, then slpit the line by delimiter (here is a space) #specify which are which #add those values in respective parameters, if needed #plot interested lists #format plot #save plot import matplotlib.pyplot as plt import random as random import json as json import pandas as pd import numpy as np %matplotlib inline import seaborn as sns sns.set() with open ("testdata1.txt",'r') as f: i = 0 #it works even without use of i X = [] # create blank list for x axis values Y = [] R = [] for line in f: #each lin in file f #print(line) if line.startswith("#"): #if line start with # character, continue without doing any work #print(line) continue else: #if line start with othern than # then slpit the line by a space ' ' lineParts = line.split(' ') #print(lineParts) x = float(lineParts[0]) # 1 intexed value is x, check w/wo float() #print(x) X.append(x) # add values of x in list X #print(X) y = float(lineParts[1]) #print(y) r = y / x #print("resistance = ", r,"Ohm") Y.append(y) '''print(Y)''' # two ways of commenting #print(Y) R.append(r) #print(R) i = i + 1 #print(i,x,y) #print(Y) #print(R) #print(len(R)) avgR = sum(R)/len(R) myMean = np.mean(R) print(myMean) #print("resistance = ", avgR ,"Ohm") #print(X,Y) plt.figure(figsize = [14,10]) #figsize(width, height) in inches #plt.bar(X,Y) # ^ . try withh plt.plot plt.plot(X,Y,color = 'red', marker = 'o') # ^ . try withh plt.plot plt.axis([0, 2.0, 0, 5.0]) #axis[xmin,xmax, ymin,ymax] #another way to set axis limit #plt.xlim(xmin,xmax) #plt.ylim(ymin,ymax) plt.title('Practice, NPSC2020', fontsize=36) plt.xlabel('Current (A)', fontsize=34) plt.ylabel('Voltage (V)', fontsize=34) plt.tight_layout() # this is for margin plt.grid(True) # comment this if u do not need grid plt.savefig("testplot.png") # png, jpg, eps, pdf as u like plt.show() with open ("testdata.txt",'r') as f: i = 0 #it works even without use of i X = [] # create blank list for x axis values Y = [] R = [] for line in f: #each lin in file f #print(line) if line.startswith("#"): #if line start with # character, continue without doing any work #print(line) continue else: #if line start with othern than # then slpit the line by a space ' ' lineParts = line.split(' ') #print(lineParts) x = float(lineParts[0]) # 1 intexed value is x, check w/wo float() #print(x) X.append(x) # add values of x in list X #print(X) y = float(lineParts[1]) #print(y) Y.append(y) '''print(Y)''' # two ways of commenting #print(Y) R.append(r) #print(R) i = i + 1 #print(i,x,y) #print(Y) #print(R) #print(len(R)) avgR = sum(R)/len(R) myMean = np.mean(R) print(myMean) #print("resistance = ", avgR ,"Ohm") #print(X,Y) plt.figure(figsize = [14,8]) #figsize(width, height) in inches #plt.bar(X,Y) # ^ . try withh plt.plot plt.plot(X,Y,color = 'red', marker = 'o') # ^ . try withh plt.plot plt.axis([0, 35, 0, 50]) #axis[xmin,xmax, ymin,ymax] #another way to set axis limit #plt.xlim(xmin,xmax) #plt.ylim(ymin,ymax) plt.title('Practice, NPSC2020', fontsize=22) plt.xlabel('Current (A)', fontsize=18) plt.ylabel('Voltage (V)', fontsize=18) plt.tight_layout() # this is for margin plt.grid(True) # comment this if u do not need grid plt.savefig("testplot.png") # png, jpg, eps, pdf as u like plt.show()2.75Week 3 - FunctionsThe main objective for this part of the lecture is to develop an understanding of:* What is a function?* How can functions be used?* Why is using functions a good idea? Build-In Functionshelp(pow) pow(3, 3) id(pow) id(3)Creating our own functionsdef BMI ( weight_kg, height_cm ): height_m = height_cm / 100 bmi = weight_kg / (height_m ** 2) return bmi power=2 BMI (90.7, 182) height_m = 98 height_mAdding documentationdef BMI ( weight_kg, height_cm ): """ (float, float) -> float Returns the BMI calculated by dividing weight in kilograms by the square of the height in meters squared. >>> BMI (90.7, 182) 27.38195870064002 """ height_m = height_cm / 100 bmi = weight_kg / (height_m ** 2) return bmi help(BMI) def doSometing(a): """ (string) -> string This function does something great! >>> doSomething(a) Magic!!! """ return 'Magic!!!'Another functionLet's right a new function that calculates the drip rate for an IV drip as in the following example:Imagine that you have a 1,000 mL IV bag and need to infuse 125 mg of ABC at a rate of 5 mg per hour. Assuming you use entire bag to deliver the dose of ABC, what should the drip rate be (mL per hour) in order to deliver the entire IV bag.def drip_rate ( med_dose, med_per_time, bag_size ): """ (int, int, int) -> float Returns the drip rate using the units of volume from bag_size and the unit of time from the med_per_time parameter. >>> drip_rate ( 10, 1, 10 ) 1.0 """ time = med_dose / med_per_time drip_rate = bag_size / time return drip_rate help(drip_rate) drip_rate ( 125, 5, 1000 ) drip_rate ( 10, 1, 10 ) drip_rate ( med_per_time = 5, med_dose = 125, bag_size = 1000 ) l = [[125,5,1000],[10,1,10]] for item in l: print(drip_rate(item[0],item[1],item[2]))40.0 1.0Customizing figuresSometimes, even though relatively customizeable, the `plot` method of `scipp` is not flexible enough for one's needs. In this section, we explore how the figures produced by the `scipp.plot` function can be further modified. Modifying the returned Plot objectThere are two ways of customizing `scipp` figures. The first one is to first create a default figure using the `plot` function, and then modifying its contents.The `plot` commands returns an object which is represented in a notebook as a figure (or multiple figures) using the `_ipython_display_` property.This object can subsequently be modified post-creation.import matplotlib.pyplot as plt import numpy as np import scipp as sc N = 60 M = 5 d = sc.Dataset() d['noise'] = sc.Variable(dims=['x', 'tof'], values=10.0*np.random.rand(M, N)) d.coords['tof'] = sc.Variable(dims=['tof'], values=np.arange(N+1).astype(np.float64), unit=sc.units.us) d.coords['x'] = sc.Variable(dims=['x'], values=np.arange(M).astype(np.float64), unit=sc.units.m) out = sc.plot(d, projection="1d") outThe `out` object is a `Plot` object which is made up of several pieces:- some `widgets` that are used to interact with the displayed figure via buttons and sliders to control slicing of higher dimensions or flipping the axes of the plot- a `view` which contains a `figure` and is the visual interface between the user and the data- in the case of 1D and 3D plots, the `Plot` object also contains a `panel` which provides additional control widgetsEach one of these pieces can individually be displayed in the notebook.For instance, we can display the `widgets` of the 2D image by doingout.widgetsand they are still connected to the figure above.It is also possible to customize figures such as changing the figure title or the axes labels by accessing the underlying matplotlib axes:out.ax.set_title('This is a new title!') out.ax.set_xlabel('My new Xaxis label') outA line color may be modified by accessing the underlying axes and lines using the Matplotlib API(although we do recommend that changing line styles should instead be done by passing arguments to the `plot` command,as shown [here](plotting-1d-data.ipynbCustomizing-linestyles,-markers-and-colors)):out.ax.get_lines()[0].set_color('red') out**Note**If the plot produces more than one figure (in the case of plotting a dataset that contains both 1d and 2d data), the `out` object is a `dict` that contains one key per figure.The keys are either a combination of dimension and unit for 1d figures, or the name of the variable (`noise`) in our case. Placing figures inside existing Matplotlib axesSometimes, the `scipp` default graphs are not flexible enough for advanced figures. One common case is placing figures in subplots, for example. To this end, it is also possible to attach `scipp` plots to existing `matplotlib` axes.This is achieved via the `ax` keyword argument (and `cax` for colorbar axes), and is best illustrated via a short demo.We first create 3 subplots:figs, axs = plt.subplots(1, 3, figsize=(12, 3)) figsThen a `Dataset` with some 2D data:N = 100 M = 50 xx = np.arange(N, dtype=np.float64) yy = np.arange(M, dtype=np.float64) x, y = np.meshgrid(xx[:-1], yy) b = N/20.0 c = M/2.0 r = np.sqrt(((x-c)/b)**2 + ((y-c)/b)**2) a = 10.0 * np.sin(r) d1 = sc.Dataset() d1['Signal'] = sc.Variable(dims=['y', 'x'], values=a, unit=sc.units.counts) d1.coords['x'] = sc.Variable(dims=['x'], values=xx, unit=sc.units.m) d1.coords['y'] = sc.Variable(dims=['y'], values=yy, unit=sc.units.m)Next, we attach the 2D image plot to the first subplot, and display the colorbar in the third subplot:out = sc.plot(d1, ax=axs[0], cax=axs[2])This has just returned a `Plot` object, but then we can check that our original figure has been updated:figsWe can add a 1D plot of a slice through the 2D data in the middle panel, and check once again the original figure:out1 = sc.plot(d1['Signal']['x', 1], ax=axs[1]) figsNext we create a second dataset with some more 1D data and add it to the middle panel:d2 = sc.Dataset() N = 100 d2["Sample"] = sc.Variable(dims=['tof'], values=10.0 * np.random.rand(N), variances=np.random.rand(N), unit=sc.units.counts) d2["Background"] = sc.Variable(dims=['tof'], values=2.0 * np.random.rand(N), unit=sc.units.counts) d2.coords['tof'] = sc.Variable(dims=['tof'], values=np.arange(N+1).astype(np.float64), unit=sc.units.us) out2 = sc.plot(d2, ax=axs[1], color=['r', 'g']) figsWe can now for example modify the axes labels:axs[0].set_xlabel('This is my new label!') figsYou can then also access the individual plot objects and change their properties using the Matplotlib API.For example, if we wish to change the line color of the `'Sample'` from green to purple, we can do:axs[1].get_lines()[2].set_color('purple') figsLogistic Regression Sectiontrain = pd.read_csv('train_data.csv') test = pd.read_csv('test_data.csv') X_train = train.drop(['High Income'], axis=1) y_train = train['High Income'] X_test = test.drop(['High Income'], axis=1) y_test = test['High Income']Logistic Regression Coefficientslogreg = LogisticRegression() logreg.fit(X_train, y_train) coef = logreg.coef_ print("Coefficients: " + str(logreg.coef_)) print("Intercept" + str(logreg.intercept_))Coefficients: [[0.25213684 0.52613559]] Intercept[-16.3964274]Get Data Points for Desmos graphstrain['linreg'] = train['Age']*0.25213684 + train['Years of Education']*0.52613559 - 16.3964274 train['logreg'] = 1 / (1 + np.exp(-train['linreg'])) mask_tr = train.applymap(type) != bool d = {True: 1, False: 0} train = train.where(mask_tr, train.replace(d)) def merge(list1, list2): merged_list = [(list1[i], list2[i]) for i in range(0, len(list1))] return merged_list train_Ytuple = list(train['High Income']) train_Xtuple = list(train['linreg']) train_tuples = merge(train_Xtuple, train_Ytuple) test['linreg'] = test['Age']*0.25213684 + test['Years of Education']*0.52613559 - 16.3964274 test['logreg'] = 1 / (1 + np.exp(-test['linreg'])) mask_te = test.applymap(type) != bool test = test.where(mask_te, test.replace(d)) test_Ytuple = list(test['High Income']) test_Xtuple = list(test['linreg']) test_tuples = merge(test_Xtuple, test_Ytuple)Model Performance Sectionadult = pd.read_csv("adult.csv") mask = adult.applymap(type) == bool d = {">50K": 1, "<=50K": 0} adult = adult.where(mask, adult.replace(d)) data = adult[['age', 'education.num', 'income']] data['income'] = data["income"].astype(str).astype(int) X = data.drop(['income'], axis=1) y = data['income'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 42) logreg = LogisticRegression() logreg.fit(X_train, y_train) print("Train accuracy:" + str(logreg.score(X_train, y_train))) print("Test accuracy:" + str(logreg.score(X_test, y_test))) logreg = LogisticRegression() cross_val_score(logreg, X_train, y_train, cv=5)Assignment 2 - Semi-gradient TD with a Neural NetworkWelcome to Course 3 Programming Assignment 2. In the previous assignment, you implemented semi-gradient TD with State Aggregation for solving a **policy evaluation task**. In this assignment, you will implement **semi-gradient TD with a simple Neural Network** and use it for the same policy evaluation problem. You will implement an agent to evaluate a fixed policy on the 500-State Randomwalk. As you may remember from the previous assignment, the 500-state Randomwalk includes 500 states. Each episode begins with the agent at the center and terminates when the agent goes far left beyond state 1 or far right beyond state 500. At each time step, the agent selects to move either left or right with equal probability. The environment determines how much the agent moves in the selected direction.**In this assignment, you will:**- Implement stochastic gradient descent method for state-value prediction.- Implement semi-gradient TD with a neural network as the function approximator and Adam algorithm.- Compare performance of semi-gradient TD with a neural network and semi-gradient TD with tile-coding. PackagesWe import the following libraries that are required for this assignment:- [numpy](www.numpy.org) : Fundamental package for scientific computing with Python.- [matplotlib](http://matplotlib.org) : Library for plotting graphs in Python.- [RL-Glue](http://www.jmlr.org/papers/v10/tanner09a.html) : Library for reinforcement learning experiments.- [tqdm](https://tqdm.github.io/) : A package to display progress bar when running experiments.- BaseOptimizer : An abstract class that specifies the optimizer API for Agent.- plot_script : Custom script to plot results.- RandomWalkEnvironment : The Randomwalk environment script from Course 3 Assignment 1.# Do not modify this cell! # Import necessary libraries # DO NOT IMPORT OTHER LIBRARIES - This will break the autograder. import numpy as np import matplotlib.pyplot as plt %matplotlib inline import os, shutil from tqdm import tqdm from rl_glue import RLGlue from environment import BaseEnvironment from agent import BaseAgent from optimizer import BaseOptimizer import plot_script from randomwalk_environment import RandomWalkEnvironmentSection 1: Create semi-gradient TD with a Neural NetworkIn this section, you will implement an Agent that learns with semi-gradient TD with a neural network. You will use a neural network with one hidden layer. The input of the neural network is the one-hot encoding of the state number. We use the one-hot encoding of the state number instead of the state number itself because we do not want to build the prior knowledge that integer number inputs close to each other have similar values. The hidden layer contains 100 rectifier linear units (ReLUs) which pass their input if it is bigger than one and return 0 otherwise. ReLU gates are commonly used in neural networks due to their nice properties such as the sparsity of the activation and having non-vanishing gradients. The output of the neural network is the estimated state value. It is a linear function of the hidden units as is commonly the case when estimating the value of a continuous target using neural networks.The neural network looks like this:![](nn_structure.png)For a given input, $s$, value of $s$ is computed by:$$\begin{align} \psi &= sW^{[0]} + b^{[0]} \\x &= \textit{max}(0, \psi) \\v &= xW^{[1]} + b^{[1]}\end{align} $$where $W^{[0]}$, $b^{[0]}$, $W^{[1]}$, $b^{[1]}$ are the parameters of the network and will be learned when training the agent. 1-1: Implement helper methodsBefore implementing the agent, you first implement some helper functions which you will later use in agent's main methods. Implement `get_value()`First, you will implement get_value() method which feeds an input $s$ into the neural network and returns the output of the network $v$ according to the equations above. To implement get_value(), take into account the following notes:- `get_value()` gets the one-hot encoded state number denoted by s as an input. - `get_value()` receives the weights of the neural network as input, denoted by weights and structured as an array of dictionaries. Each dictionary corresponds to weights from one layer of the neural network to the next. Each dictionary includes $W$ and $b$. The shape of the elements in weights are as follows: - weights[0]["W"]: num_states $\times$ num_hidden_units - weights[0]["b"]: 1 $\times$ num_hidden_units - weights[1]["W"]: num_hidden_units $\times$ 1 - weights[1]["b"]: 1 $\times$ 1- The input of the neural network is a sparse vector. To make computation faster, we take advantage of input sparsity. To do so, we provided a helper method `my_matmul()`. **Make sure that you use `my_matmul()` for all matrix multiplications except for element-wise multiplications in this notebook.**- The max operator used for computing $x$ is element-wise.def my_matmul(x1, x2): """ Given matrices x1 and x2, return the multiplication of them """ result = np.zeros((x1.shape[0], x2.shape[1])) x1_non_zero_indices = x1.nonzero() if x1.shape[0] == 1 and len(x1_non_zero_indices[1]) == 1: result = x2[x1_non_zero_indices[1], :] elif x1.shape[1] == 1 and len(x1_non_zero_indices[0]) == 1: result[x1_non_zero_indices[0], :] = x2 * x1[x1_non_zero_indices[0], 0] else: result = np.matmul(x1, x2) return result #GRADED FUNCTION: [get_value] def get_value(s, weights): """ Compute value of input s given the weights of a neural network """ ### Compute the ouput of the neural network, v, for input s (3 lines) ### START CODE HERE ### psi = my_matmul(s,weights[0]['W']) + weights[0]['b'] x = (psi>0)*psi v = my_matmul(x,weights[1]['W']) + weights[1]['b'] ### END CODE HERE ### return vRun the following code to test your implementation of the `get_value()` function:## Test Code for get_value() ## # Suppose num_states = 5, num_hidden_layer = 1, and num_hidden_units = 10 num_hidden_layer = 1 s = np.array([[0, 0, 0, 1, 0]]) weights_data = np.load("asserts/get_value_weights.npz") weights = [dict() for i in range(num_hidden_layer+1)] weights[0]["W"] = weights_data["W0"] weights[0]["b"] = weights_data["b0"] weights[1]["W"] = weights_data["W1"] weights[1]["b"] = weights_data["b1"] estimated_value = get_value(s, weights) print ("Estimated value: {}".format(estimated_value)) assert(np.allclose(estimated_value, np.array([[-0.21915705]]))) print ("Passed the assert!")Estimated value: [[-0.21915705]] Passed the assert!**Expected output**: Estimated value: [[-0.21915705]] Implement `get_gradient()`You will also implement `get_gradient()` method which computes the gradient of the value function for a given input, using backpropagation. You will later use this function to update the value function. As you know, we compute the value of a state $s$ according to: $$\begin{align} \psi &= sW^{[0]} + b^{[0]} \\x &= \textit{max}(0, \psi) \\v &= xW^{[1]} + b^{[1]}\end{align} $$To update the weights of the neural network ($W^{[0]}$, $b^{[0]}$, $W^{[1]}$, $b^{[1]}$), we compute the gradient of $v$ with respect to the weights according to:$$\begin{align} \frac{\partial v}{\partial W^{[0]}} &= s^T(W^{[1]T} \odot I_{x>0}) \\\frac{\partial v}{\partial b^{[0]}} &= W^{[1]T} \odot I_{x>0} \\\frac{\partial v}{\partial W^{[1]}} &= x^T \\\frac{\partial v}{\partial b^{[1]}} &= 1\end{align}$$where $\odot$ denotes element-wise matrix multiplication and $I_{x>0}$ is the gradient of the ReLU activation function which is an indicator whose $i$th element is 1 if $x[i]>0$ and 0 otherwise.#GRADED FUNCTION: [get_gradient] def get_gradient(s, weights): """ Given inputs s and weights, return the gradient of v with respect to the weights """ ### Compute the gradient of the value function with respect to W0, b0, W1, b1 for input s (6~8 lines) # grads[0]["W"] = ? # grads[0]["b"] = ? # grads[1]["W"] = ? # grads[1]["b"] = ? # Note that grads[0]["W"], grads[0]["b"], grads[1]["W"], and grads[1]["b"] should have the same shape as # weights[0]["W"], weights[0]["b"], weights[1]["W"], and weights[1]["b"] respectively # Note that to compute the gradients, you need to compute the activation of the hidden layer (x) grads = [dict() for i in range(len(weights))] ### START CODE HERE ### psi = my_matmul(s,weights[0]['W']) + weights[0]['b'] x = (psi>0)*1 grads[0]["W"] = my_matmul( s.T, weights[1]['W'].T * x) grads[0]["b"] = weights[1]['W'].T * x grads[1]["W"] = (x*psi).T grads[1]["b"] = np.array([[1.0]]) ### END CODE HERE ### return gradsRun the following code to test your implementation of the `get_gradient()` function:## Test Code for get_gradient() ## # Suppose num_states = 5, num_hidden_layer = 1, and num_hidden_units = 2 num_hidden_layer = 1 s = np.array([[0, 0, 0, 1, 0]]) weights_data = np.load("asserts/get_gradient_weights.npz") weights = [dict() for i in range(num_hidden_layer+1)] weights[0]["W"] = weights_data["W0"] weights[0]["b"] = weights_data["b0"] weights[1]["W"] = weights_data["W1"] weights[1]["b"] = weights_data["b1"] grads = get_gradient(s, weights) grads_answer = np.load("asserts/get_gradient_grads.npz") print("grads[0][\"W\"]\n", grads[0]["W"], "\n") print("grads[0][\"b\"]\n", grads[0]["b"], "\n") print("grads[1][\"W\"]\n", grads[1]["W"], "\n") print("grads[1][\"b\"]\n", grads[1]["b"], "\n") assert(np.allclose(grads[0]["W"], grads_answer["W0"])) assert(np.allclose(grads[0]["b"], grads_answer["b0"])) assert(np.allclose(grads[1]["W"], grads_answer["W1"])) assert(np.allclose(grads[1]["b"], grads_answer["b1"])) print("Passed the asserts!")grads[0]["W"] [[0. 0. ] [0. 0. ] [0. 0. ] [0.76103773 0.12167502] [0. 0. ]] grads[0]["b"] [[0.76103773 0.12167502]] grads[1]["W"] [[0.69198983] [0.82403662]] grads[1]["b"] [[1.]] Passed the asserts!**Expected output**: grads[0]["W"] [[0. 0. ] [0. 0. ] [0. 0. ] [0.76103773 0.12167502] [0. 0. ]] grads[0]["b"] [[0.76103773 0.12167502]] grads[1]["W"] [[0.69198983] [0.82403662]] grads[1]["b"] [[1.]] Implement stochastic gradient descent method for state-value predictionIn this section, you will implement stochastic gradient descent (SGD) method for state_value prediction. Here is the basic SGD update for state-value prediction with TD:$$\mathbf{w_{t+1}} = \mathbf{w_{t}} + \alpha \delta_t \nabla \hat{v}(S_t,\mathbf{w_{t}})$$At each time step, we update the weights in the direction $g_t = \delta_t \nabla \hat{v}(S_t,\mathbf{w_t})$ using a fixed step-size $\alpha$. $\delta_t = R_{t+1} + \gamma \hat{v}(S_{t+1},\mathbf{w_{t}}) - \hat{v}(S_t,\mathbf{w_t})$ is the TD-error. $\nabla \hat{v}(S_t,\mathbf{w_{t}})$ is the gradient of the value function with respect to the weights.The following cell includes the SGD class. You will complete the `update_weight()` method of SGD assuming that the weights and update g are provided.**As you know, in this assignment, we structured the weights as an array of dictionaries. Note that the updates $g_t$, in the case of TD, is $\delta_t \nabla \hat{v}(S_t,\mathbf{w_t})$. As a result, $g_t$ has the same structure as $\nabla \hat{v}(S_t,\mathbf{w_t})$ which is also an array of dictionaries.**#GRADED FUNCTION: [SGD] class SGD(BaseOptimizer): def __init__(self): pass def optimizer_init(self, optimizer_info): """Setup for the optimizer. Set parameters needed to setup the stochastic gradient descent method. Assume optimizer_info dict contains: { step_size: float } """ self.step_size = optimizer_info.get("step_size") def update_weights(self, weights, g): """ Given weights and update g, return updated weights """ for i in range(len(weights)): for param in weights[i].keys(): ### update weights (1 line) # weights[i][param] = None ### START CODE HERE ### weights[i][param] += self.step_size*g[i][param] ### END CODE HERE ### return weightsRun the following code to test your implementation of the `update_weights()` function:# Do not modify this cell! ## Test Code for update_weights() ## # Suppose num_states = 5, num_hidden_layer = 1, and num_hidden_units = 2 num_hidden_layer = 1 weights_data = np.load("asserts/update_weights_weights.npz") weights = [dict() for i in range(num_hidden_layer+1)] weights[0]["W"] = weights_data["W0"] weights[0]["b"] = weights_data["b0"] weights[1]["W"] = weights_data["W1"] weights[1]["b"] = weights_data["b1"] g_data = np.load("asserts/update_weights_g.npz") g = [dict() for i in range(num_hidden_layer+1)] g[0]["W"] = g_data["W0"] g[0]["b"] = g_data["b0"] g[1]["W"] = g_data["W1"] g[1]["b"] = g_data["b1"] test_sgd = SGD() optimizer_info = {"step_size": 0.3} test_sgd.optimizer_init(optimizer_info) updated_weights = test_sgd.update_weights(weights, g) # updated weights asserts updated_weights_answer = np.load("asserts/update_weights_updated_weights.npz") print("updated_weights[0][\"W\"]\n", updated_weights[0]["W"], "\n") print("updated_weights[0][\"b\"]\n", updated_weights[0]["b"], "\n") print("updated_weights[1][\"W\"]\n", updated_weights[1]["W"], "\n") print("updated_weights[1][\"b\"]\n", updated_weights[1]["b"], "\n") assert(np.allclose(updated_weights[0]["W"], updated_weights_answer["W0"])) assert(np.allclose(updated_weights[0]["b"], updated_weights_answer["b0"])) assert(np.allclose(updated_weights[1]["W"], updated_weights_answer["W1"])) assert(np.allclose(updated_weights[1]["b"], updated_weights_answer["b1"])) print("Passed the asserts!")updated_weights[0]["W"] [[ 1.17899492 0.53656321] [ 0.58008221 1.47666572] [ 1.01909411 -1.10248056] [ 0.72490408 0.06828853] [-0.20609725 0.69034095]] updated_weights[0]["b"] [[-0.18484533 0.92844539]] updated_weights[1]["W"] [[0.70488257] [0.58150878]] updated_weights[1]["b"] [[0.88467086]] Passed the asserts!**Expected output**: updated_weights[0]["W"] [[ 1.17899492 0.53656321] [ 0.58008221 1.47666572] [ 1.01909411 -1.10248056] [ 0.72490408 0.06828853] [-0.20609725 0.69034095]] updated_weights[0]["b"] [[-0.18484533 0.92844539]] updated_weights[1]["W"] [[0.70488257] [0.58150878]] updated_weights[1]["b"] [[0.88467086]] Adam AlgorithmIn this assignment, instead of using SGD for updating the weights, we use a more advanced algorithm called Adam. The Adam algorithm improves the SGD update with two concepts: adaptive vector step-sizes and momentum. It keeps estimates of the mean and second moment of the updates, denoted by $\mathbf{m}$ and $\mathbf{v}$ respectively:$$\mathbf{m_t} = \beta_m \mathbf{m_{t-1}} + (1 - \beta_m)g_t \\\mathbf{v_t} = \beta_v \mathbf{v_{t-1}} + (1 - \beta_v)g^2_t$$Given that $\mathbf{m}$ and $\mathbf{v}$ are initialized to zero, they are biased toward zero. To get unbiased estimates of the mean and second moment, Adam defines $\mathbf{\hat{m}}$ and $\mathbf{\hat{v}}$ as:$$ \mathbf{\hat{m_t}} = \frac{\mathbf{m_t}}{1 - \beta_m^t} \\\mathbf{\hat{v_t}} = \frac{\mathbf{v_t}}{1 - \beta_v^t}$$The weights are then updated as follows:$$ \mathbf{w_t} = \mathbf{w_{t-1}} + \frac{\alpha}{\sqrt{\mathbf{\hat{v_t}}}+\epsilon} \mathbf{\hat{m_t}}$$When implementing the agent you will use the Adam algorithm instead of SGD because it is more efficient. We have already provided you the implementation of the Adam algorithm in the cell below. You will use it when implementing your agent.class Adam(BaseOptimizer): def __init__(self): pass def optimizer_init(self, optimizer_info): """Setup for the optimizer. Set parameters needed to setup the Adam algorithm. Assume optimizer_info dict contains: { num_states: integer, num_hidden_layer: integer, num_hidden_units: integer, step_size: float, self.beta_m: float self.beta_v: float self.epsilon: float } """ self.num_states = optimizer_info.get("num_states") self.num_hidden_layer = optimizer_info.get("num_hidden_layer") self.num_hidden_units = optimizer_info.get("num_hidden_units") # Specify Adam algorithm's hyper parameters self.step_size = optimizer_info.get("step_size") self.beta_m = optimizer_info.get("beta_m") self.beta_v = optimizer_info.get("beta_v") self.epsilon = optimizer_info.get("epsilon") self.layer_size = np.array([self.num_states, self.num_hidden_units, 1]) # Initialize Adam algorithm's m and v self.m = [dict() for i in range(self.num_hidden_layer+1)] self.v = [dict() for i in range(self.num_hidden_layer+1)] for i in range(self.num_hidden_layer+1): # Initialize self.m[i]["W"], self.m[i]["b"], self.v[i]["W"], self.v[i]["b"] to zero self.m[i]["W"] = np.zeros((self.layer_size[i], self.layer_size[i+1])) self.m[i]["b"] = np.zeros((1, self.layer_size[i+1])) self.v[i]["W"] = np.zeros((self.layer_size[i], self.layer_size[i+1])) self.v[i]["b"] = np.zeros((1, self.layer_size[i+1])) # Initialize beta_m_product and beta_v_product to be later used for computing m_hat and v_hat self.beta_m_product = self.beta_m self.beta_v_product = self.beta_v def update_weights(self, weights, g): """ Given weights and update g, return updated weights """ for i in range(len(weights)): for param in weights[i].keys(): ### update self.m and self.v self.m[i][param] = self.beta_m * self.m[i][param] + (1 - self.beta_m) * g[i][param] self.v[i][param] = self.beta_v * self.v[i][param] + (1 - self.beta_v) * (g[i][param] * g[i][param]) ### compute m_hat and v_hat m_hat = self.m[i][param] / (1 - self.beta_m_product) v_hat = self.v[i][param] / (1 - self.beta_v_product) ### update weights weights[i][param] += self.step_size * m_hat / (np.sqrt(v_hat) + self.epsilon) ### update self.beta_m_product and self.beta_v_product self.beta_m_product *= self.beta_m self.beta_v_product *= self.beta_v return weights1-2: Implement Agent MethodsIn this section, you will implement `agent_init()`, `agent_start()`, `agent_step()`, and `agent_end()`.In `agent_init()`, you will: - specify the neural network structure by filling self.layer_size with the size of the input layer, hidden layer, and output layer. - initialize the network's parameters. We show the parameters as an array of dictionaries, self.weights, where each dictionary corresponds to weights from one layer to the next. Each dictionary includes $W$ and $b$. To initialize the parameters, you will use a normal distribution with mean 0 and standard deviation $\sqrt{\frac{2}{\text{ input of each node}}}$. This initialization heuristic is commonly used when using ReLU gates and helps keep the output of a neuron from getting too big or too small. To initialize the network's parameters, use **self.rand_generator.normal()** which draws random samples from a normal distribution. The parameters of self.rand_generator.normal are mean of the distribution, standard deviation of the distribution, and output shape in the form of tuple of integers.In `agent_start()`, you will: - specify self.last_state and self.last_action. In `agent_step()` and `agent_end()`, you will: - compute the TD error using $v(S_t)$ and $v(S_{t+1})$. To compute the value function for $S_t$ and $S_{t+1}$, you will get their one-hot encoding using `one_hot()` method that we provided below. You feed the one-hot encoded state number to the neural networks using `get_value()` method that you implemented above. Note that `one_hot()` method returns the one-hot encoding of a state as a numpy array of shape (1, num_states). - retrieve the gradients using `get_gradient()` function that you implemented. - use Adam_algorithm that we provided to update the neural network's parameters, self.weights. - use `agent_policy()` method to select actions with. (only in `agent_step()`)def one_hot(state, num_states): """ Given num_state and a state, return the one-hot encoding of the state """ # Create the one-hot encoding of state # one_hot_vector is a numpy array of shape (1, num_states) one_hot_vector = np.zeros((1, num_states)) one_hot_vector[0, int((state - 1))] = 1 return one_hot_vector #GRADED FUNCTION: [Agent] class TDAgent(BaseAgent): def __init__(self): self.name = "td_agent" pass def agent_init(self, agent_info={}): """Setup for the agent called when the experiment first starts. Set parameters needed to setup the semi-gradient TD with a Neural Network. Assume agent_info dict contains: { num_states: integer, num_hidden_layer: integer, num_hidden_units: integer, step_size: float, discount_factor: float, self.beta_m: float self.beta_v: float self.epsilon: float seed: int } """ # Set random seed for weights initialization for each run self.rand_generator = np.random.RandomState(agent_info.get("seed")) # Set random seed for policy for each run self.policy_rand_generator = np.random.RandomState(agent_info.get("seed")) # Set attributes according to agent_info self.num_states = agent_info.get("num_states") self.num_hidden_layer = agent_info.get("num_hidden_layer") self.num_hidden_units = agent_info.get("num_hidden_units") self.discount_factor = agent_info.get("discount_factor") ### Define the neural network's structure (1 line) # Specify self.layer_size which shows the number of nodes in each layer # self.layer_size = np.array([None, None, None]) # Hint: Checkout the NN diagram at the beginning of the notebook ### START CODE HERE ### self.layer_size = np.array([self.num_states, self.num_hidden_units, 1]) ### END CODE HERE ### # Initialize the neural network's parameter (2 lines) self.weights = [dict() for i in range(self.num_hidden_layer+1)] for i in range(self.num_hidden_layer+1): ### Initialize self.weights[i]["W"] and self.weights[i]["b"] using self.rand_generator.normal() # Note that The parameters of self.rand_generator.normal are mean of the distribution, # standard deviation of the distribution, and output shape in the form of tuple of integers. # To specify output shape, use self.layer_size. ### START CODE HERE ### self.weights[i]['W'] = self.rand_generator.normal(0,np.sqrt(2/self.layer_size[i]),(self.layer_size[i],self.layer_size[i+1])) self.weights[i]['b'] = self.rand_generator.normal(0,np.sqrt(2/self.layer_size[i]),(1,self.layer_size[i+1])) ### END CODE HERE ### # Specify the optimizer self.optimizer = Adam() optimizer_info = {"num_states": agent_info["num_states"], "num_hidden_layer": agent_info["num_hidden_layer"], "num_hidden_units": agent_info["num_hidden_units"], "step_size": agent_info["step_size"], "beta_m": agent_info["beta_m"], "beta_v": agent_info["beta_v"], "epsilon": agent_info["epsilon"]} self.optimizer.optimizer_init(optimizer_info) self.last_state = None self.last_action = None def agent_policy(self, state): ### Set chosen_action as 0 or 1 with equal probability. chosen_action = self.policy_rand_generator.choice([0,1]) return chosen_action def agent_start(self, state): """The first method called when the experiment starts, called after the environment starts. Args: state (Numpy array): the state from the environment's evn_start function. Returns: The first action the agent takes. """ ### select action given state (using self.agent_policy()), and save current state and action (2 lines) # self.last_state = ? # self.last_action = ? ### START CODE HERE ### self.last_state = state self.last_action = self.agent_policy(state) ### END CODE HERE ### return self.last_action def agent_step(self, reward, state): """A step taken by the agent. Args: reward (float): the reward received for taking the last action taken state (Numpy array): the state from the environment's step based, where the agent ended up after the last step Returns: The action the agent is taking. """ ### Compute TD error (5 lines) # delta = None ### START CODE HERE ### last_state = one_hot(self.last_state, self.num_states) v_last = get_value( last_state, self.weights ) curr_state = one_hot(state, self.num_states) v = get_value( curr_state, self.weights ) delta = self.discount_factor*v - v_last ### END CODE HERE ### ### Retrieve gradients (1 line) # grads = None ### START CODE HERE ### grads = get_gradient(last_state,self.weights) ### END CODE HERE ### ### Compute g (1 line) g = [dict() for i in range(self.num_hidden_layer+1)] for i in range(self.num_hidden_layer+1): for param in self.weights[i].keys(): # g[i][param] = None ### START CODE HERE ### g[i][param] = (reward+delta)*grads[i][param] ### END CODE HERE ### ### update the weights using self.optimizer (1 line) # self.weights = None ### START CODE HERE ### self.weights = self.optimizer.update_weights(self.weights,g) ### END CODE HERE ### ### update self.last_state and self.last_action (2 lines) ### START CODE HERE ### self.last_state = state self.last_action = self.agent_policy(state) ### END CODE HERE ### return self.last_action def agent_end(self, reward): """Run when the agent terminates. Args: reward (float): the reward the agent received for entering the terminal state. """ ### compute TD error (3 lines) # delta = None ### START CODE HERE ### last_state = one_hot(self.last_state, self.num_states) v_last = get_value( last_state, self.weights ) delta = -1 * v_last ### END CODE HERE ### ### Retrieve gradients (1 line) # grads = None ### START CODE HERE ### grads = get_gradient(last_state,self.weights) ### END CODE HERE ### ### Compute g (1 line) g = [dict() for i in range(self.num_hidden_layer+1)] for i in range(self.num_hidden_layer+1): for param in self.weights[i].keys(): # g[i][param] = None ### START CODE HERE ### g[i][param] = (reward+delta)*grads[i][param] ### END CODE HERE ### ### update the weights using self.optimizer (1 line) # self.weights = None ### START CODE HERE ### self.weights = self.optimizer.update_weights(self.weights,g) ### END CODE HERE ### def agent_message(self, message): if message == 'get state value': state_value = np.zeros(self.num_states) for state in range(1, self.num_states + 1): s = one_hot(state, self.num_states) state_value[state - 1] = get_value(s, self.weights) return state_valueRun the following code to test your implementation of the `agent_init()` function:## Test Code for agent_init() ## agent_info = {"num_states": 5, "num_hidden_layer": 1, "num_hidden_units": 2, "step_size": 0.25, "discount_factor": 0.9, "beta_m": 0.9, "beta_v": 0.99, "epsilon": 0.0001, "seed": 0 } test_agent = TDAgent() test_agent.agent_init(agent_info) print("layer_size: {}".format(test_agent.layer_size)) assert(np.allclose(test_agent.layer_size, np.array([agent_info["num_states"], agent_info["num_hidden_units"], 1]))) print("weights[0][\"W\"] shape: {}".format(test_agent.weights[0]["W"].shape)) print("weights[0][\"b\"] shape: {}".format(test_agent.weights[0]["b"].shape)) print("weights[1][\"W\"] shape: {}".format(test_agent.weights[1]["W"].shape)) print("weights[1][\"b\"] shape: {}".format(test_agent.weights[1]["b"].shape), "\n") assert(test_agent.weights[0]["W"].shape == (agent_info["num_states"], agent_info["num_hidden_units"])) assert(test_agent.weights[0]["b"].shape == (1, agent_info["num_hidden_units"])) assert(test_agent.weights[1]["W"].shape == (agent_info["num_hidden_units"], 1)) assert(test_agent.weights[1]["b"].shape == (1, 1)) print("weights[0][\"W\"]\n", (test_agent.weights[0]["W"]), "\n") print("weights[0][\"b\"]\n", (test_agent.weights[0]["b"]), "\n") print("weights[1][\"W\"]\n", (test_agent.weights[1]["W"]), "\n") print("weights[1][\"b\"]\n", (test_agent.weights[1]["b"]), "\n") agent_weight_answer = np.load("asserts/agent_init_weights_1.npz") assert(np.allclose(test_agent.weights[0]["W"], agent_weight_answer["W0"])) assert(np.allclose(test_agent.weights[0]["b"], agent_weight_answer["b0"])) assert(np.allclose(test_agent.weights[1]["W"], agent_weight_answer["W1"])) assert(np.allclose(test_agent.weights[1]["b"], agent_weight_answer["b1"])) print("Passed the asserts!")layer_size: [5 2 1] weights[0]["W"] shape: (5, 2) weights[0]["b"] shape: (1, 2) weights[1]["W"] shape: (2, 1) weights[1]["b"] shape: (1, 1) weights[0]["W"] [[ 1.11568467 0.25308164] [ 0.61900825 1.4172653 ] [ 1.18114738 -0.6180848 ] [ 0.60088868 -0.0957267 ] [-0.06528133 0.25968529]] weights[0]["b"] [[0.09110115 0.91976332]] weights[1]["W"] [[0.76103773] [0.12167502]] weights[1]["b"] [[0.44386323]] Passed the asserts!**Expected output**: layer_size: [5 2 1] weights[0]["W"] shape: (5, 2) weights[0]["b"] shape: (1, 2) weights[1]["W"] shape: (2, 1) weights[1]["b"] shape: (1, 1) weights[0]["W"] [[ 1.11568467 0.25308164] [ 0.61900825 1.4172653 ] [ 1.18114738 -0.6180848 ] [ 0.60088868 -0.0957267 ] [-0.06528133 0.25968529]] weights[0]["b"] [[0.09110115 0.91976332]] weights[1]["W"] [[0.76103773] [0.12167502]] weights[1]["b"] [[0.44386323]] Run the following code to test your implementation of the `agent_start()` function:# Do not modify this cell! ## Test Code for agent_start() ## agent_info = {"num_states": 500, "num_hidden_layer": 1, "num_hidden_units": 100, "step_size": 0.1, "discount_factor": 1.0, "beta_m": 0.9, "beta_v": 0.99, "epsilon": 0.0001, "seed": 10 } # Suppose state = 250 state = 250 test_agent = TDAgent() test_agent.agent_init(agent_info) test_agent.agent_start(state) print("Agent state: {}".format(test_agent.last_state)) print("Agent selected action: {}".format(test_agent.last_action)) assert(test_agent.last_state == 250) assert(test_agent.last_action == 1) print("Passed the asserts!")Agent state: 250 Agent selected action: 1 Passed the asserts!**Expected output**: Agent state: 250 Agent selected action: 1 Run the following code to test your implementation of the `agent_step()` function:# Do not modify this cell! ## Test Code for agent_step() ## agent_info = {"num_states": 5, "num_hidden_layer": 1, "num_hidden_units": 2, "step_size": 0.1, "discount_factor": 1.0, "beta_m": 0.9, "beta_v": 0.99, "epsilon": 0.0001, "seed": 0 } test_agent = TDAgent() test_agent.agent_init(agent_info) # load initial weights agent_initial_weight = np.load("asserts/agent_step_initial_weights.npz") test_agent.weights[0]["W"] = agent_initial_weight["W0"] test_agent.weights[0]["b"] = agent_initial_weight["b0"] test_agent.weights[1]["W"] = agent_initial_weight["W1"] test_agent.weights[1]["b"] = agent_initial_weight["b1"] # load m and v for the optimizer m_data = np.load("asserts/agent_step_initial_m.npz") test_agent.optimizer.m[0]["W"] = m_data["W0"] test_agent.optimizer.m[0]["b"] = m_data["b0"] test_agent.optimizer.m[1]["W"] = m_data["W1"] test_agent.optimizer.m[1]["b"] = m_data["b1"] v_data = np.load("asserts/agent_step_initial_v.npz") test_agent.optimizer.v[0]["W"] = v_data["W0"] test_agent.optimizer.v[0]["b"] = v_data["b0"] test_agent.optimizer.v[1]["W"] = v_data["W1"] test_agent.optimizer.v[1]["b"] = v_data["b1"] # Assume the agent started at State 3 start_state = 3 test_agent.agent_start(start_state) # Assume the reward was 10.0 and the next state observed was State 1 reward = 10.0 next_state = 1 test_agent.agent_step(reward, next_state) # updated weights asserts print("updated_weights[0][\"W\"]\n", test_agent.weights[0]["W"], "\n") print("updated_weights[0][\"b\"]\n", test_agent.weights[0]["b"], "\n") print("updated_weights[1][\"W\"]\n", test_agent.weights[1]["W"], "\n") print("updated_weights[1][\"b\"]\n", test_agent.weights[1]["b"], "\n") agent_updated_weight_answer = np.load("asserts/agent_step_updated_weights.npz") assert(np.allclose(test_agent.weights[0]["W"], agent_updated_weight_answer["W0"])) assert(np.allclose(test_agent.weights[0]["b"], agent_updated_weight_answer["b0"])) assert(np.allclose(test_agent.weights[1]["W"], agent_updated_weight_answer["W1"])) assert(np.allclose(test_agent.weights[1]["b"], agent_updated_weight_answer["b1"])) # last_state and last_action assert print("Agent last state:", test_agent.last_state) print("Agent last action:", test_agent.last_action, "\n") assert(test_agent.last_state == 1) assert(test_agent.last_action == 1) print ("Passed the asserts!")updated_weights[0]["W"] [[ 1.10893459 0.30763738] [ 0.63690565 1.14778865] [ 1.23397791 -0.48152743] [ 0.72792093 -0.15829832] [ 0.15021996 0.39822163]] updated_weights[0]["b"] [[0.29798822 0.96254535]] updated_weights[1]["W"] [[0.76628754] [0.11486511]] updated_weights[1]["b"] [[0.58530057]] Agent last state: 1 Agent last action: 1 Passed the asserts!**Expected output**: updated_weights[0]["W"] [[ 1.10893459 0.30763738] [ 0.63690565 1.14778865] [ 1.23397791 -0.48152743] [ 0.72792093 -0.15829832] [ 0.15021996 0.39822163]] updated_weights[0]["b"] [[0.29798822 0.96254535]] updated_weights[1]["W"] [[0.76628754] [0.11486511]] updated_weights[1]["b"] [[0.58530057]] Agent last state: 1 Agent last action: 1 Run the following code to test your implementation of the `agent_end()` function:# Do not modify this cell! ## Test Code for agent_end() ## agent_info = {"num_states": 5, "num_hidden_layer": 1, "num_hidden_units": 2, "step_size": 0.1, "discount_factor": 1.0, "beta_m": 0.9, "beta_v": 0.99, "epsilon": 0.0001, "seed": 0 } test_agent = TDAgent() test_agent.agent_init(agent_info) # load initial weights agent_initial_weight = np.load("asserts/agent_end_initial_weights.npz") test_agent.weights[0]["W"] = agent_initial_weight["W0"] test_agent.weights[0]["b"] = agent_initial_weight["b0"] test_agent.weights[1]["W"] = agent_initial_weight["W1"] test_agent.weights[1]["b"] = agent_initial_weight["b1"] # load m and v for the optimizer m_data = np.load("asserts/agent_step_initial_m.npz") test_agent.optimizer.m[0]["W"] = m_data["W0"] test_agent.optimizer.m[0]["b"] = m_data["b0"] test_agent.optimizer.m[1]["W"] = m_data["W1"] test_agent.optimizer.m[1]["b"] = m_data["b1"] v_data = np.load("asserts/agent_step_initial_v.npz") test_agent.optimizer.v[0]["W"] = v_data["W0"] test_agent.optimizer.v[0]["b"] = v_data["b0"] test_agent.optimizer.v[1]["W"] = v_data["W1"] test_agent.optimizer.v[1]["b"] = v_data["b1"] # Assume the agent started at State 4 start_state = 4 test_agent.agent_start(start_state) # Assume the reward was 10.0 and reached the terminal state reward = 10.0 test_agent.agent_end(reward) # updated weights asserts print("updated_weights[0][\"W\"]\n", test_agent.weights[0]["W"], "\n") print("updated_weights[0][\"b\"]\n", test_agent.weights[0]["b"], "\n") print("updated_weights[1][\"W\"]\n", test_agent.weights[1]["W"], "\n") print("updated_weights[1][\"b\"]\n", test_agent.weights[1]["b"], "\n") agent_updated_weight_answer = np.load("asserts/agent_end_updated_weights.npz") assert(np.allclose(test_agent.weights[0]["W"], agent_updated_weight_answer["W0"])) assert(np.allclose(test_agent.weights[0]["b"], agent_updated_weight_answer["b0"])) assert(np.allclose(test_agent.weights[1]["W"], agent_updated_weight_answer["W1"])) assert(np.allclose(test_agent.weights[1]["b"], agent_updated_weight_answer["b1"])) print ("Passed the asserts!")updated_weights[0]["W"] [[ 1.10893459 0.30763738] [ 0.63690565 1.14778865] [ 1.17531054 -0.51043162] [ 0.75062903 -0.13736817] [ 0.15021996 0.39822163]] updated_weights[0]["b"] [[0.30846523 0.95937346]] updated_weights[1]["W"] [[0.68861703] [0.15986364]] updated_weights[1]["b"] [[0.586074]] Passed the asserts!**Expected output:** updated_weights[0]["W"] [[ 1.10893459 0.30763738] [ 0.63690565 1.14778865] [ 1.17531054 -0.51043162] [ 0.75062903 -0.13736817] [ 0.15021996 0.39822163]] updated_weights[0]["b"] [[0.30846523 0.95937346]] updated_weights[1]["W"] [[0.68861703] [0.15986364]] updated_weights[1]["b"] [[0.586074]] Section 2 - Run ExperimentNow that you implemented the agent, we can run the experiment. Similar to Course 3 Programming Assignment 1, we will plot the learned state value function and the learning curve of the TD agent. To plot the learning curve, we use Root Mean Squared Value Error (RMSVE). 2-1: Run Experiment for Semi-gradient TD with a Neural NetworkWe have already provided you the experiment/plot code, so you can go ahead and run the two cells below.Note that running the cell below will take **approximately 12 minutes**.# Do not modify this cell! true_state_val = np.load('data/true_V.npy') state_distribution = np.load('data/state_distribution.npy') def calc_RMSVE(learned_state_val): assert(len(true_state_val) == len(learned_state_val) == len(state_distribution)) MSVE = np.sum(np.multiply(state_distribution, np.square(true_state_val - learned_state_val))) RMSVE = np.sqrt(MSVE) return RMSVE # Define function to run experiment def run_experiment(environment, agent, environment_parameters, agent_parameters, experiment_parameters): rl_glue = RLGlue(environment, agent) # save rmsve at the end of each episode agent_rmsve = np.zeros((experiment_parameters["num_runs"], int(experiment_parameters["num_episodes"]/experiment_parameters["episode_eval_frequency"]) + 1)) # save learned state value at the end of each run agent_state_val = np.zeros((experiment_parameters["num_runs"], environment_parameters["num_states"])) env_info = {"num_states": environment_parameters["num_states"], "start_state": environment_parameters["start_state"], "left_terminal_state": environment_parameters["left_terminal_state"], "right_terminal_state": environment_parameters["right_terminal_state"]} agent_info = {"num_states": environment_parameters["num_states"], "num_hidden_layer": agent_parameters["num_hidden_layer"], "num_hidden_units": agent_parameters["num_hidden_units"], "step_size": agent_parameters["step_size"], "discount_factor": environment_parameters["discount_factor"], "beta_m": agent_parameters["beta_m"], "beta_v": agent_parameters["beta_v"], "epsilon": agent_parameters["epsilon"] } print('Setting - Neural Network with 100 hidden units') os.system('sleep 1') # one agent setting for run in tqdm(range(1, experiment_parameters["num_runs"]+1)): env_info["seed"] = run agent_info["seed"] = run rl_glue.rl_init(agent_info, env_info) # Compute initial RMSVE before training current_V = rl_glue.rl_agent_message("get state value") agent_rmsve[run-1, 0] = calc_RMSVE(current_V) for episode in range(1, experiment_parameters["num_episodes"]+1): # run episode rl_glue.rl_episode(0) # no step limit if episode % experiment_parameters["episode_eval_frequency"] == 0: current_V = rl_glue.rl_agent_message("get state value") agent_rmsve[run-1, int(episode/experiment_parameters["episode_eval_frequency"])] = calc_RMSVE(current_V) elif episode == experiment_parameters["num_episodes"]: # if last episode current_V = rl_glue.rl_agent_message("get state value") agent_state_val[run-1, :] = current_V save_name = "{}".format(rl_glue.agent.name).replace('.','') if not os.path.exists('results'): os.makedirs('results') # save avg. state value np.save("results/V_{}".format(save_name), agent_state_val) # save avg. rmsve np.savez("results/RMSVE_{}".format(save_name), rmsve = agent_rmsve, eval_freq = experiment_parameters["episode_eval_frequency"], num_episodes = experiment_parameters["num_episodes"]) # Run Experiment # Experiment parameters experiment_parameters = { "num_runs" : 20, "num_episodes" : 1000, "episode_eval_frequency" : 10 # evaluate every 10 episode } # Environment parameters environment_parameters = { "num_states" : 500, "start_state" : 250, "left_terminal_state" : 0, "right_terminal_state" : 501, "discount_factor" : 1.0 } # Agent parameters agent_parameters = { "num_hidden_layer": 1, "num_hidden_units": 100, "step_size": 0.001, "beta_m": 0.9, "beta_v": 0.999, "epsilon": 0.0001, } current_env = RandomWalkEnvironment current_agent = TDAgent # run experiment run_experiment(current_env, current_agent, environment_parameters, agent_parameters, experiment_parameters) # plot result plot_script.plot_result(["td_agent"]) shutil.make_archive('results', 'zip', 'results')Setting - Neural Network with 100 hidden unitsConstruct an equally weighted portfolio at the beginning, no modificationstatistics = pd.DataFrame(index=returns.columns) statistics.index.names = ["Ticker"] statistics["Mean Return (%)"] = returns.mean() * 100 statistics["Standard deviation (%)"] = returns.std() * 100 statistics["Skewness"] = returns.skew() statistics["Kurtosis"] = returns.kurtosis() statistics = statistics.round(4) statistics statistics.to_csv(f"../data/{index}_calculated/statistics.csv")Normal shock waves%matplotlib inline from matplotlib import pyplot as plt import numpy as np # Pint gives us some helpful unit conversion from pint import UnitRegistry ureg = UnitRegistry() Q_ = ureg.Quantity # We will use this to construct quantities (value + unit) # these lines are only for helping improve the display import matplotlib_inline.backend_inline matplotlib_inline.backend_inline.set_matplotlib_formats('pdf', 'png') plt.rcParams['figure.dpi']= 200 plt.rcParams['savefig.dpi'] = 200**Shock waves** are finite pressure disturbances that correspond to a large change in properties over a short distance, on the order of a few free molecular paths of the gas.In contrast, sound waves are infinitesimal disturbances.We can analyze the flow through a shock wave with a control volume, applying our existing conservation equations.:::{figure-md} fig-shockControl volume around a shock.:::{numref}`Figure {number} ` shows a control volume around a shock wave, in a duct with varying area. The thickness of the control volume is very small, on the order of the thickness of the shock itself (so $dx \sim 10^{-6}$ m).We will make the following assumptions about the flow:- steady, one-dimensional flow- adiabatic flow process: $\delta q = 0$, and so $ds_e = 0$- no shaft work across the control volume: $\delta w_s = 0$- no potential change: $dz = 0$- constant area around the shock: $A_1 = A_2$We can apply conservation of mass to this control volume:$$\rho_1 A_1 V_1 = \rho_2 A_2 V_2$$$$\rho_1 V_1 = \rho_2 V_2 \;,$$ (eq_mass)conservation of energy:$$\begin{gather*}h_{t1} + q = h_{t2} + w_s \\h_{t1} = h_{t2}\end{gather*}$$$$h_1 + \frac{V_1^2}{2} = h_2 + \frac{V_2^2}{2} \;,$$ (eq_energy)and momentum:$$\begin{gather*}\sum{F_x} = \dot{m} \left( V_{\text{out},x} - V_{\text{in}, x} \right) = \dot{m} \left( V_{2x} - V_{1x} \right) \\p_1 A_1 - p_2 A_2 = ( p_1 - p_2 ) A = \dot{m} (V_2 - V_1) = \rho A V (V_2 - V_1) \\p_1 - p_2 = (\rho V) V_2 - (\rho V) V_1 \\p_1 - p_2 = \rho_2 V_2^2 - \rho_1 V_1^2\end{gather*}$$$$p_1 + \rho_1 V_1^2 = p_2 + \rho_2 V_2^2 \;.$$ (eq_momentum)For an arbitrary fluid, we have three equations: {eq}`eq_mass`, {eq}`eq_energy`, and {eq}`eq_momentum`.In a typical problem, we know the fluid and conditions before the shock, and want to find the conditions after the shock.Thus, our known variables are $\rho_1$, $p_1$, $h_1$, and $V_1$, while our unknown variables are $\rho_2$, $p_2$, $h_2$, and $V_2$.We need a fourth equation to close this system of equation: a property relation for the fluid, otherwise known as an equation of state. Perfect gasesFor ideal/perfect gases, we have the ideal gas equation of state, and can assume constant values for $c_p$, $c_v$, and $\gamma$.We also have a convenient relationship for the speed of sound:$$\begin{gather*}p = \rho R T \\V = M a = M \sqrt{\gamma R T} \;.\end{gather*}$$Incorporating these into Equation {eq}`eq_mass` we can obtain$$\frac{p_1}{M_1}{\sqrt{T_1}} = \frac{p_2 M_2}{\sqrt{T_2}} \;.$$ (eq_mass2)With our stagnation relationship for temperature, $ T_t = T \left( 1 + (\gamma-1)M^2 / 2 \right)^2 $, Equation {eq}`eq_energy` becomes$$T_1 \left( 1 + \frac{\gamma-1}{2} M_1^2 \right) = T_2 \left( 1 + \frac{\gamma-1}{2} M_2^2 \right) \;,$$ (eq_energy2)which applies both around a normal shock and also at any point in a general flow with no work or heat transfer.Lastly, incorporating our relationships for perfect gases into Equation {eq}`eq_momentum` we obtain$$p_1 \left( 1 + \gamma M_1^2 \right) = p_2 \left( 1 + \gamma M_2^2 \right) \;.$$ (eq_momentum2)Now, we have three equations and three unknowns: $M_2$, $p_2$, and $T_2$. 😎However, it is a bit of a pain to solve this complicated system of equations every time. Fortunately, we can combine all three together and eliminate pressure and temperature completely!$$\left( \frac{1 + \gamma M_2^2}{1 + \gamma M_1^2} \right) \frac{M_1}{M_2} = \left( \frac{1 + \frac{\gamma-1}{2} M_2^2}{1 + \frac{\gamma-1}{2} M_1^2} \right)^{1/2} \;,$$which can actually be solved to find $M_2 = f(\gamma, M_1)$.A trivial solution to this equation is that $M_1 = M_2$, where there is no shock.(What does this mean? Just that the governing equations we set up apply just fine for a flow where there is no shock—good news!)For nontrivial solutions, with some painful algebra we can rearrange this equation into a recognizable form:$$A \left( M_2^2 \right)^2 + B M_2^2 + C = 0 \;,$$where $A, B, C = f(\gamma, M_1)$. This looks like a quadratic equation! The only physically viable solution to this equation is$$M_2^2 = \frac{M_1^2 + \frac{2}{\gamma-1}}{\frac{2 \gamma}{\gamma-1} M_1^2 - 1} \;.$$ (eq_normal_shock)gamma = 1.4 mach1 = np.linspace(1.0, 4.0, num=50, endpoint=True) mach2 = np.sqrt((mach1**2 + 2/(gamma-1))/(2*gamma*mach1**2/(gamma-1) - 1)) plt.plot(mach1, mach2) plt.xlabel(r'$M_1$') plt.ylabel(r'$M_2$') plt.title('Downstream Mach number versus upstream Mach number for normal shocks') plt.grid(True) plt.tight_layout() plt.show()BackgroundThe directory structure currently looks like this:* `Driving Performance` * `01` * `Log_20200130101256_Unknown Road__0_0_0.csv`The primary directory (`Driving Performance`) contains folders named based on participant numbers (e.g. folder `00` contains the files related to participant number 00.)Inside each folder, there are CSV files named `Log__Unknown Road__0_0_0` (e.g. `Log_20200130101256_Unknown Road__0_0_0`).There are 21 folders, ranging from 00-21, with 9 log files per folder. RequirementsRename each log file from `Log__Unknown Road__0_0_0` to `_` (e.g. `01_06`).The trial sequence is this constant set of numbers: [06, 07, 14, 09, 13, 01, 18, 02, 11].The elements of the trial sequence should match each alphabetically-arranged log file. (Arranging the files alphabetically also arranges them based on datetime). ExceptionsFor folders `00` and `11`, the trial sequence element is already indicated in the file name so for example, simply format the log file inside folder `00` from `Log_20200129211416_Unknown Road__0_0_0_6` to `00_06`. Output locationTo preserve the original files, save the parsed files inside a new folder named `parsed_files`. ScriptImport the os, csv, shutil, and re modules.import os # for accessing and creating directories import csv # for accessing and generating csv files import shutil # for copying files from one directory to another import re # for filtering string using regexDefine the trial sequence.trial_sequence = [6, 7, 14, 9, 13, 1, 18, 2, 11]Define the paths of the source and destination directories and create them if they don't exist yet.src_path = "Driving Performance/" if not os.path.exists(src_path): os.mkdir(src_path) dst_path = "parsed_files/" if not os.path.exists(dst_path): os.mkdir(dst_path)Create dummy folders and CSV files.**NOTE: No need to run this part on actual parsing.**# creates 9 pre-formatted dummy CSV files def createDummyFiles(path, folder): # create a loop from 0-8 for i in range(9): # define the pre-formatted dummy filename if folder in ["00", "11"]: filename = "Log_2020022312345" + str(i) + "_Unknown Road__0_0_0_" + str(trial_sequence[i]) else: filename = "Log_2020022312345" + str(i) + "_Unknown Road__0_0_0" # create the empty CSV file with open(path + filename + ".csv", "w") as dummy: pass # create a loop from 0-20 for the folder names for i in range(21): # define the name and path of the directory to be created folder = "{:02d}".format(i) path = src_path + folder + "/" # create the folder os.makedirs(path) # create dummy files createDummyFiles(path, folder)Copy and rename files.# loop through the folder in Driver Performance for folder in os.listdir(src_path): # loop through the files inside each folder for index, file in enumerate(os.listdir(src_path + folder + "/")): # for folders 00 and 11 if folder in ["00", "11"]: # filter file to get trial_sequence_element trial_sequence_element = re.search(r'Unknown Road__0_0_0_(.*?)\.csv', file).group(1) # define the formatted filename (format: _.csv) filename = folder + "_" + "{:02d}".format(int(trial_sequence_element)) + ".csv" # for the rest of the folders else: # define the formatted filename (format: _.csv) filename = folder + "_" + "{:02d}".format(trial_sequence[index]) + ".csv" # define the paths of the source and destination files src_file = src_path + folder + "/" + file dst_file = dst_path + filename # create renamed copy of source file if not os.path.exists(dst_file): shutil.copy(src_file, dst_file)1.0 Basic Variable Extractionfrom __future__ import (absolute_import, division, print_function, unicode_literals) from wrf import getvar from netCDF4 import Dataset as nc #ncfile = nc("/Users/ladwig/Documents/wrf_files/wrfout_d01_2016-02-25_18_00_00") ncfile = nc("/Users/ladwig/Documents/wrf_files/wrfout_d01_2016-10-07_00_00_00") p = getvar(ncfile, "P") print(p)1.0.1 DataArray attributes: 'dims', 'coords', 'attrs'print("dims: ", p.dims) print("coords: ", p.coords) print("attrs: ", p.attrs) del p1.0.2 Removing implicit 'squeeze' behavior to preserve single sized dimensionsp_nosqueeze = getvar(ncfile, "P", timeidx=0, squeeze=False) print (p_nosqueeze)1.0.3 Single element metadataprint (p_nosqueeze[0,0,100,200]) del p_nosqueeze1.0.4 Disabling/Enabling xarrayfrom wrf import disable_xarray, enable_xarray # Disable xarray completely disable_xarray() p_no_meta = getvar(ncfile, "P") print(type(p_no_meta)) print (p_no_meta) del p_no_meta enable_xarray() # Disable on extraction p_no_meta = getvar(ncfile, "P", meta=False) print("\n") print(type(p_no_meta)) print(p_no_meta) del p_no_meta2.0 Sequences of Input Files 2.0.1 Combining via the 'cat' methodfrom wrf import ALL_TIMES import numpy as np wrflist = [ncfile, ncfile, ncfile] p_cat = getvar(wrflist, "P", timeidx=ALL_TIMES, method="cat") print(p_cat) del p_cat2.0.2 Combining via the 'join' methodp_join = getvar(wrflist, "P", timeidx=ALL_TIMES, method="join") print(p_join)Note how the Time dimension was replaced with the file dimension, due to the 'squeezing' of the Time dimension.To maintain the Time dimension, set squeeze to False.from wrf import ALL_TIMES p_join = getvar(wrflist, "P", timeidx=ALL_TIMES, method="join", squeeze=False) print(p_join) del p_join2.0.3 Dictionary Sequenceswrf_dict = {"label1" : [ncfile, ncfile], "label2" : [ncfile, ncfile]} p_dict = getvar(wrf_dict, "P", timeidx=ALL_TIMES) print(p_dict) del p_dict2.0.4 Generator Sequencesdef gen_seq(): wrfseq = [ncfile, ncfile, ncfile] for wrf in wrfseq: yield wrf p_gen = getvar(gen_seq(), "P", method="join") print(p_gen) del p_gen2.0.5 Custom Iterable Classesclass FileGen(object): def __init__(self, ncfile, count=3): self._total = count self._i = 0 self.ncfile = [ncfile]*count def __iter__(self): return self def next(self): if self._i >= self._total: raise StopIteration else: val = self.ncfile[self._i] self._i += 1 return val # Python 3 def __next__(self): return self.next() obj_gen = FileGen(ncfile, 3) p_obj_gen = getvar(obj_gen, "P", method="join", squeeze=False) print(p_obj_gen) del p_obj_gen import tempfile import glob import shutil import os from netCDF4 import Dataset from wrf import getvar, ll_to_xy, CoordPair, GeoBounds class FileReduce(object): def __init__(self, filenames, geobounds, tempdir=None, delete=True, reuse=True): """An iterable object for cutting out geographic domains. Args: filenames (sequence): A sequence of full paths to the WRF files geobounds (GeoBounds): A GeoBounds object defining the region of interest tempdir (str): The location to store the temporary cropped data files. If None, tempfile.mkdtemp is used. delete (bool): Set to True to delete the cropped files when FileReduce is garbage collected. reuse (bool): Set to True when you want to resuse the files that were previously converted. *tempdir* must be set to a specific directory that contains the converted files. """ self._filenames = filenames self._i = 0 self._geobounds = geobounds self._delete = delete self._cache = set() self._own_data = True self._reuse = reuse if tempdir is not None: if not os.path.exists(tempdir): os.makedirs(tempdir) self._tempdir = tempdir if self._reuse: self._cache = set((os.path.join(self._tempdir, name) for name in os.listdir(self._tempdir))) else: self._tempdir = tempfile.mkdtemp() print ("temporary directory is: {}".format(self._tempdir)) self._prev = None self._set_extents() def _set_extents(self): fname = list(self._filenames)[0] with Dataset(fname) as ncfile: lons = [self._geobounds.bottom_left.lon, self._geobounds.top_right.lon] lats = [self._geobounds.bottom_left.lat, self._geobounds.top_right.lat] orig_west_east = len(ncfile.dimensions["west_east"]) orig_south_north = len(ncfile.dimensions["south_north"]) # Note: Not handling the moving nest here # Extra points included around the boundaries to ensure domain is fully included x_y = ll_to_xy(ncfile, lats, lons, meta=False) self._start_x = 0 if x_y[0,0] == 0 else x_y[0,0] - 1 self._end_x = orig_west_east - 1 if x_y[0,1] >= orig_west_east - 1 else x_y[0,1] + 1 self._start_y = 0 if x_y[1,0] == 0 else x_y[1,0] - 1 self._end_y = orig_south_north if x_y[1,1] >= orig_south_north - 1 else x_y[1,1] + 1 self._west_east = self._end_x - self._start_x + 1 self._west_east_stag = self._west_east + 1 self._south_north = self._end_y - self._start_y + 1 self._south_north_stag = self._south_north + 1 def __iter__(self): return self def __copy__(self): cp = type(self).__new__(self.__class__) cp.__dict__.update(self.__dict__) cp._own_data = False cp._delete = False return cp def __del__(self): if self._delete: shutil.rmtree(self._tempdir) def reduce(self, fname): outfilename = os.path.join(self._tempdir, "reduced_" + os.path.basename(fname)) # WRF-Python can iterate over sequences several times during a 'getvar', so a cache is used to if outfilename in self._cache: return Dataset(outfilename) # New dimension sizes dim_d = {"west_east" : self._west_east, "west_east_stag" : self._west_east_stag, "south_north" : self._south_north, "south_north_stag" : self._south_north_stag } # Data slice sizes for the 2D dimensions slice_d = {"west_east" : slice(self._start_x, self._end_x + 1), "west_east_stag" : slice(self._start_x, self._end_x + 2), "south_north" : slice(self._start_y, self._end_y + 1), "south_north_stag" : slice(self._start_y, self._end_y + 2) } with Dataset(fname) as infile, Dataset(outfilename, mode="w") as outfile: print ("reduce getting called!") # Copy the global attributes outfile.setncatts(infile.__dict__) # Copy Dimensions, limiting south_north and west_east to desired domain for name, dimension in infile.dimensions.items(): dimsize = dim_d.get(name, len(dimension)) outfile.createDimension(name, dimsize) # Copy Variables for name, variable in infile.variables.iteritems(): new_slices = tuple((slice_d.get(dimname, slice(None)) for dimname in variable.dimensions)) outvar = outfile.createVariable(name, variable.datatype, variable.dimensions) outvar[:] = variable[new_slices] outvar.setncatts(variable.__dict__) result = Dataset(outfilename) self._cache.add(outfilename) return result def next(self): if self._i >= len(self._filenames): if self._prev is not None: self._prev.close() raise StopIteration else: fname = self._filenames[self._i] reduced_file = self.reduce(fname) if self._prev is not None: self._prev.close() self._prev = reduced_file self._i += 1 return reduced_file # Python 3 def __next__(self): return self.next() ll = CoordPair(lat=24.0, lon=-87.) ur = CoordPair(lat=27.0, lon=-84) bounds = GeoBounds(ll, ur) reduced_files = FileReduce(glob.glob("/Users/ladwig/Documents/wrf_files/wrf_vortex_multi/moving_nest/wrfout_d02*"), bounds, tempdir="/Users/ladwig/mytemp", delete=False, reuse=True) slp = getvar(reduced_files, "slp") print(slp) del (reduced_files)3.0 WRF Variable Computational Routineswrf_vars = ["avo", "eth", "cape_2d", "cape_3d", "ctt", "dbz", "mdbz", "geopt", "helicity", "lat", "lon", "omg", "p", "pressure", "pvo", "pw", "rh2", "rh", "slp", "ter", "td2", "td", "tc", "theta", "tk", "tv", "twb", "updraft_helicity", "ua", "va", "wa", "uvmet10", "uvmet", "z", "ctt", "wspd_wdir", "wspd_wdir10", "uvmet_wspd_wdir", "uvmet10_wspd_wdir"] #wrf_vars = ["slp"] vard = {varname: getvar(ncfile, varname, method="cat", squeeze=True) for varname in wrf_vars} for varname in wrf_vars: print(vard[varname]) print ("\n")(Note all of the NaNs in the above routines which produce missing values (e.g. cape_2d). xarray always converts all masked_array missing values to NaN in order to work with pandas. To get back the original missing values in a numpy masked_array, you need to use the 'to_np' method from wrf.)from wrf import to_np masked_ndarray = to_np(vard["slp"]) print(type(masked_ndarray)) del masked_ndarray keys = [x for x in vard.keys()] for key in keys: del vard[key]3.1 Interpolation Routines 3.1.1 Horizontal Level Interpolation# 500 MB Heights from wrf import getvar, interplevel z = getvar(ncfile, "z") p = getvar(ncfile, "pressure") ht_500mb = interplevel(z, p, 500) print(ht_500mb) del ht_500mb, z, p3.1.2 Vertical Cross Section Interpolation# Pressure using pivot and angle from wrf import getvar, vertcross, CoordPair z = getvar(ncfile, "z") p = getvar(ncfile, "pressure") pivot_point = CoordPair((z.shape[-1]-1) // 2, (z.shape[-2] - 1) // 2) angle = 90.0 p_vert = vertcross(p, z, pivot_point=pivot_point, angle=angle, latlon=True) print(p_vert) print ("\n") del p_vert # Pressure using start_point and end_point start_point = CoordPair(0, (z.shape[-2]-1) // 2) end_point = CoordPair(-1, (z.shape[-2]-1) // 2) p_vert = vertcross(p, z, start_point=start_point, end_point=end_point, latlon=True) print(p_vert) del p_vert, p, z # Pressure using pivot and angle from wrf import getvar, vertcross, CoordPair, xy_to_ll z = getvar(ncfile, "z") p = getvar(ncfile, "pressure") lats = getvar(ncfile, "lat") lons = getvar(ncfile, "lon") #print ((lats.shape[-2]-1) / 2) #print ((lats.shape[-1]-1) / 2) #print (to_np(lats[529, 899])) #print (to_np(lons[529, 899])) #print (to_np(lats[529, 0])) #print (to_np(lons[529, 0])) #print (to_np(lats[529, -1])) #print (to_np(lons[529, -1])) pivot_point = CoordPair(lat=38.5, lon=-97.5) angle = 90.0 p_vert = vertcross(p, z, wrfin=ncfile, pivot_point=pivot_point, angle=angle, latlon=True) print (p_vert) print ("\n") start_lat = lats[(lats.shape[-2]-1)//2, 0] end_lat = lats[(lats.shape[-2]-1)//2, -1] start_lon = lons[(lats.shape[-2]-1)//2, 0] end_lon = lons[(lats.shape[-2]-1)//2, -1] print (start_lat) print (end_lat) print (start_lon) print (end_lon) # Pressure using start_point and end_point start_point = CoordPair(lat=start_lat, lon=start_lon) end_point = CoordPair(lat=end_lat, lon=end_lon) p_vert = vertcross(p, z, wrfin=ncfile, start_point=start_point, end_point=end_point, latlon=True) print(p_vert) # Pressure using pivot and angle from wrf import getvar, vertcross, CoordPair, xy_to_ll z = getvar(ncfile, "z") p = getvar(ncfile, "pressure") lats = getvar(ncfile, "lat") lons = getvar(ncfile, "lon") #print ((lats.shape[-2]-1) / 2) #print ((lats.shape[-1]-1) / 2) #print (to_np(lats[529, 899])) #print (to_np(lons[529, 899])) #print (to_np(lats[529, 0])) #print (to_np(lons[529, 0])) #print (to_np(lats[529, -1])) #print (to_np(lons[529, -1])) pivot_point = CoordPair(lat=38.5, lon=-97.5) angle = 90.0 p_vert = vertcross(p, z, wrfin=ncfile, pivot_point=pivot_point, angle=angle, latlon=True) print (p_vert) print ("\n") start_lat = lats[(lats.shape[-2]-1)//2, 0] end_lat = lats[(lats.shape[-2]-1)//2, -1] start_lon = lons[(lats.shape[-2]-1)//2, 0] end_lon = lons[(lats.shape[-2]-1)//2, -1] print (start_lat) print (end_lat) print (start_lon) print (end_lon) # Pressure using start_point and end_point start_point = CoordPair(lat=start_lat, lon=start_lon) end_point = CoordPair(lat=end_lat, lon=end_lon) levels = [1000., 2000., 3000.] p_vert = vertcross(p, z, wrfin=ncfile, levels=levels, start_point=start_point, end_point=end_point, latlon=True) print(p_vert)3.1.3 Interpolate 2D Variable to a Line# T2 using pivot and angle from wrf import interpline, getvar, CoordPair t2 = getvar(ncfile, "T2") pivot_point = CoordPair((t2.shape[-1]-1)//2, (t2.shape[-2]-1)//2) angle = 0.0 t2_line = interpline(t2, pivot_point=pivot_point, angle=angle, latlon=True) print(t2_line, "\n") del t2_line # T2 using start_point and end_point start_point = CoordPair((t2.shape[-1]-1)//2, 0) end_point = CoordPair((t2.shape[-1]-1)//2, -1) t2_line = interpline(t2, start_point=start_point, end_point=end_point, latlon=True) print(t2_line, "\n") del t2_line t2 = getvar(ncfile, "T2") lats = getvar(ncfile, "lat") lons = getvar(ncfile, "lon") start_lat = lats[0, (lats.shape[-1]-1)//2] end_lat = lats[-1, (lats.shape[-1]-1)//2] start_lon = lons[0, (lons.shape[-1]-1)//2] end_lon = lons[-1, (lons.shape[-1]-1)//2] start_point = CoordPair(lat=start_lat, lon=start_lon) end_point = CoordPair(lat=end_lat, lon=end_lon) t2_line = interpline(t2, wrfin=ncfile, start_point=start_point, end_point=end_point, latlon=True) print (t2_line) del t2_line, t23.1.4 Vertical Coordinate Interpolationfrom wrf import vinterp, getvar # Interpolate tk to theta levels tk = getvar(ncfile, "tk") interp_levels = [200, 300, 500, 1000] interp_field = vinterp(ncfile, field=tk, vert_coord="theta", interp_levels=interp_levels, extrapolate=True, field_type="tk", log_p=True) print(interp_field) del interp_field # Interpolate tk to theta-e levels interp_levels = [200, 300, 500, 1000] interp_field = vinterp(ncfile, field=tk, vert_coord="eth", interp_levels=interp_levels, extrapolate=True, field_type="tk", log_p=True) print(interp_field) del interp_field # Interpolate tk to geopotential height (MSL) levels interp_levels = [30, 60, 90] interp_field = vinterp(ncfile, field=tk, vert_coord="ght_msl", interp_levels=interp_levels, extrapolate=True, field_type="tk", log_p=True) print(interp_field) del interp_field # Interpolate tk to geopotential height (MSL) levels interp_levels = [30, 60, 90] interp_field = vinterp(ncfile, field=tk, vert_coord="ght_agl", interp_levels=interp_levels, extrapolate=True, field_type="tk", log_p=True) print(interp_field) del interp_field # Interpolate tk to pressure levels interp_levels = [850, 500] interp_field = vinterp(ncfile, field=tk, vert_coord="pressure", interp_levels=interp_levels, extrapolate=True, field_type="tk", log_p=True) print(interp_field) del interp_field, tk3.2 Lat/Lon to X/Y Routinesfrom wrf import xy_to_ll, ll_to_xy a = xy_to_ll(ncfile, 400, 200) a1 = ll_to_xy(ncfile, a[0], a[1]) #print(a) #print("\n") #print(a1) #print("\n") a = xy_to_ll(ncfile, [400,105], [200,205]) a1 = ll_to_xy(ncfile, a[0,:], a[1,:]) b = ll_to_xy(ncfile, 45.5, -110.8, as_int=True) # Note: Lists/Dictionaries of files will add a new dimension ('domain') only if the domain is moving c = xy_to_ll([ncfile, ncfile, ncfile], [400,105], [200,205]) d = xy_to_ll({"label1" : [ncfile, ncfile], "label2" : [ncfile, ncfile]}, [400,105], [200,205]) print(a) print("\n") print(a1) print("\n") print(b) print("\n") print(c) print("\n") print(d)4.0 Plotting with Cartopy%matplotlib inline # SLP import matplotlib.pyplot as plt from matplotlib.cm import get_cmap import cartopy.crs as crs from cartopy.feature import NaturalEarthFeature from wrf import to_np, getvar, smooth2d, get_cartopy, cartopy_xlim, cartopy_ylim, latlon_coords slp = getvar(ncfile, "slp") smooth_slp = smooth2d(slp, 3) lats, lons = latlon_coords(slp) cart_proj = get_cartopy(slp) fig = plt.figure(figsize=(10,10)) ax = plt.axes(projection=cart_proj) states = NaturalEarthFeature(category='cultural', scale='50m', facecolor='none', name='admin_1_states_provinces_shp') ax.add_feature(states, linewidth=.5) ax.coastlines('50m', linewidth=0.8) # Can only get this to work if I manually transform the lat/lon points to projected space. xform_coords = cart_proj.transform_points(crs.PlateCarree(), to_np(lons), to_np(lats)) x = xform_coords[:,:,0] y = xform_coords[:,:,1] plt.contour(x, y, to_np(smooth_slp), 10, colors="black") plt.contourf(x, y, to_np(smooth_slp), 10) plt.colorbar(ax=ax, shrink=.47) ax.set_xlim(cartopy_xlim(slp)) ax.set_ylim(cartopy_ylim(slp)) ax.gridlines() # SLP from __future__ import (absolute_import, division, print_function, unicode_literals) from netCDF4 import Dataset import matplotlib.pyplot as plt from matplotlib.cm import get_cmap import cartopy.crs as crs from cartopy.feature import NaturalEarthFeature from wrf import to_np, getvar, smooth2d, get_cartopy, cartopy_xlim, cartopy_ylim, latlon_coords, geo_bounds ncfile = Dataset("/Users/ladwig/Documents/wrf_files/wrfout_d01_2016-10-07_00_00_00") # Get the sea level pressure slp = getvar(ncfile, "slp") # Smooth the sea level pressure since it tends to be noisey near the mountains smooth_slp = smooth2d(slp, 3) # Get the numpy array from the XLAT and XLONG coordinates lats, lons = latlon_coords(slp, as_np=True) # The cartopy() method returns a cartopy.crs projection object cart_proj = get_cartopy(slp) print (cart_proj) print (get_cartopy(wrfin=ncfile)) bounds = geo_bounds(slp) print (bounds) subset = slp[150:250, 150:250] subset_bounds = geo_bounds(subset) print (subset_bounds) file_bounds = geo_bounds(wrfin=ncfile) print (file_bounds) # Create a figure that's 10x10 fig = plt.figure(figsize=(10,10)) # Get the GeoAxes set to the projection used by WRF ax = plt.axes(projection=cart_proj) # Download and add the states and coastlines states = NaturalEarthFeature(category='cultural', scale='50m', facecolor='none', name='admin_1_states_provinces_shp') ax.add_feature(states, linewidth=.5) ax.coastlines('50m', linewidth=0.8) # Make the contour outlines and filled contours for the smoothed sea level pressure. # The transform keyword indicates that the lats and lons arrays are lat/lon coordinates and tells # cartopy to transform the points in to grid space. plt.contour(lons, lats, to_np(smooth_slp), 10, colors="black", transform=crs.PlateCarree()) plt.contourf(lons, lats, to_np(smooth_slp), 10, transform=crs.PlateCarree()) # Add a color bar plt.colorbar(ax=ax, shrink=.47) # Set the map limits ax.set_xlim(cartopy_xlim(slp)) ax.set_ylim(cartopy_ylim(slp)) # Add the gridlines ax.gridlines() # 500 MB Heights and Winds import matplotlib.pyplot as plt from matplotlib.cm import get_cmap import cartopy.crs as crs from cartopy.feature import NaturalEarthFeature from wrf import getvar, interplevel, to_np, get_cartopy, cartopy_xlim, cartopy_ylim, latlon_coords p = getvar(ncfile, "pressure") z = getvar(ncfile, "z", units="dm") ua = getvar(ncfile, "ua", units="kts") va = getvar(ncfile, "va", units="kts") ht_500 = interplevel(z, p, 500) u_500 = interplevel(ua, p, 500) v_500 = interplevel(va, p, 500) lats, lons = latlon_coords(ht_500) cart_proj = get_cartopy(slp) fig = plt.figure(figsize=(20,20)) ax = plt.axes([0.1,0.1,0.8,0.8], projection=cart_proj) states = NaturalEarthFeature(category='cultural', scale='50m', facecolor='none', name='admin_1_states_provinces_shp') ax.add_feature(states, linewidth=0.5) ax.coastlines('50m', linewidth=0.8) # Can only get this to work if I manually transform the lat/lon points to projected space. xform_coords = cart_proj.transform_points(crs.PlateCarree(), to_np(lons), to_np(lats)) x = xform_coords[:,:,0] y = xform_coords[:,:,1] plt.contour(x, y, to_np(ht_500), 20, cmap=get_cmap("plasma")) plt.barbs(x[::50,::50], y[::50,::50], to_np(u_500[::50, ::50]), to_np(v_500[::50, ::50])) plt.colorbar(ax=ax, shrink=.7) ax.set_xlim(cartopy_xlim(slp)) ax.set_ylim(cartopy_ylim(slp)) ax.gridlines() # Cross-section of pressure using xarray's builtin plotting import numpy as np import matplotlib.pyplot as plt from matplotlib.cm import get_cmap from wrf import getvar, vertcross, to_np, CoordPair p = getvar(ncfile, "pressure") z = getvar(ncfile, "z", units="dm") pivot_point = CoordPair(z.shape[-1] // 2, z.shape[-2] // 2) angle = 90.0 p_vert = vertcross(p, z, pivot_point=pivot_point, angle=angle)#, levels=[1000,850,500]) fig = plt.figure(figsize=(20,8)) ax = plt.axes([0.1,0.1,0.8,0.8]) p_vert.plot.contour(ax=ax, levels=[0 + 50*n for n in range(20)], cmap=get_cmap("viridis"))Multi-time Moving Domain Filesimport os from wrf import getvar, ALL_TIMES from netCDF4 import Dataset as nc dir = "/Users/ladwig/Documents/wrf_files/wrf_vortex_multi/moving_nest" ncfilenames = [os.path.join(dir, x) for x in os.listdir(dir) if x.find("_d02_") > 0] ncfiles = [nc(x) for x in ncfilenames] #print (ncfiles[0].variables["XLONG"][0,0,-1], ncfiles[0].variables["XLONG"][-1,0,-1]) #print (ncfiles[1].variables["XLONG"][0,0,-1], ncfiles[1].variables["XLONG"][-1,0,-1]) #print (ncfiles[-1].variables["XLONG"][0,0,-1], ncfiles[-1].variables["XLONG"][-1,0,-1]) p = getvar(ncfiles, "ctt", timeidx=ALL_TIMES) print (p) #print (p.attrs["projection"].shape) print (p.attrs["projection"]) ncfiles[2].variables["XTIME"][:] p = getvar(ncfiles, "P", timeidx=None, method="cat", meta=True, squeeze=True) print (p) print (type(p.coords["Time"])) import datetime import pandas print (type(p.coords["Time"].values.astype(datetime.datetime))) print (repr(datetime.datetime.utcfromtimestamp(p.coords["Time"][0].values.astype(int) * 1E-9))) print (pandas.to_datetime(p.coords["Time"].values)) wrf_vars = ["avo", "eth", "cape_2d", "cape_3d", "ctt", "dbz", "mdbz", "geopt", "helicity", "lat", "lon", "omg", "p", "pressure", "pvo", "pw", "rh2", "rh", "slp", "ter", "td2", "td", "tc", "theta", "tk", "tv", "twb", "updraft_helicity", "ua", "va", "wa", "uvmet10", "uvmet", "z", "ctt", "cfrac", "uvmet_wspd_wdir", "uvmet10_wspd_wdir", "wspd_wdir", "wspd_wdir10"] #wrf_vars = ["cape_2d"] vard = {} for varname in wrf_vars: print (varname) vard[varname] = getvar(ncfiles, varname, timeidx=None, method="join", squeeze=False) #vard = {varname: getvar(ncfiles, varname, method="join", squeeze=False) for varname in wrf_vars} for varname in wrf_vars: print(vard[varname]) # NOTE: Warnings below are due to "join" and the fill values used since the last file only contains 1 time step. import os from wrf import getvar from netCDF4 import Dataset as nc dir = "/Users/ladwig/Documents/wrf_files/wrf_vortex_multi/moving_nest" ncfilenames = [os.path.join(dir, x) for x in os.listdir(dir) if x.find("_d02_") > 0] ncfiles = [nc(x) for x in ncfilenames] # Pressure using pivot and angle from wrf import getvar, vertcross, CoordPair timeidx = 0 z = getvar(ncfiles, "z", timeidx, method="join") p = getvar(ncfiles, "pressure", timeidx, method="join") pivot_point = CoordPair(z.shape[-1] / 2, z.shape[-2] / 2) angle = 40.0 p_vert = vertcross(p, z, pivot_point=pivot_point, angle=angle) print(p_vert) print ("\n") del p_vert # Pressure using start_point and end_point start_point = CoordPair(0, z.shape[-2]/2) end_point = CoordPair(-1, z.shape[-2]/2) p_vert = vertcross(p, z, start_point=start_point, end_point=end_point) print(p_vert) del p_vert, p, z import os from wrf import getvar from netCDF4 import Dataset as nc dir = "/Users/ladwig/Documents/wrf_files/wrf_vortex_multi/moving_nest" ncfilenames = [os.path.join(dir, x) for x in os.listdir(dir) if x.find("_d02_") > 0] ncfiles = [nc(x) for x in ncfilenames] timeidx = None # T2 using pivot and angle from wrf import interpline, getvar, to_np, CoordPair t2 = getvar(ncfiles, "T2", timeidx) pivot_point = CoordPair(t2.shape[-2] / 2, t2.shape[-1] / 2) angle = 90.0 t2_line = interpline(t2, pivot_point=pivot_point, angle=angle, latlon=True) print(t2_line) print("\n") del t2_line # T2 using start_point and end_point start_point = CoordPair(t2.shape[-2]/2, 0) end_point = CoordPair(t2.shape[-2]/2, -1) t2_line = interpline(t2, start_point=start_point, end_point=end_point, latlon=True) print(t2_line) print("\n") del t2_line, t2 from wrf import getvar from wrf import xy_to_ll, ll_to_xy a = xy_to_ll(ncfiles, 400, 200) a = xy_to_ll(ncfiles, [400,105], [200,205]) b = ll_to_xy(ncfiles, 45.5, -110.8, as_int=True) # Note: Lists/Dictionaries of files will add a new dimension ('domain') only if the domain is moving c = xy_to_ll(ncfiles, [400,105], [200,205]) d = xy_to_ll({"label1" : ncfiles, "label2" : ncfiles}, [400,105], [200,205]) print(a) print("\n") print(b) print("\n") print(c) print("\n") print(d) from glob import glob from wrf import getvar, ALL_TIMES, geo_bounds, get_cartopy, get_basemap, get_pyngl, cartopy_xlim, cartopy_ylim from netCDF4 import Dataset as nc wrf_filenames = glob("/Users/ladwig/Documents/wrf_files/wrf_vortex_multi/moving_nest/wrfout_d02_*") ncfiles = [nc(x) for x in wrf_filenames] slp = getvar(ncfiles, "slp", timeidx=ALL_TIMES) bounds = geo_bounds(slp) print (bounds) print () cart_proj = get_cartopy(slp) print (cart_proj) print ("\n") xlims = cartopy_xlim(slp) print (xlims) print ("\n") ylims = cartopy_ylim(slp) print (ylims) print ("\n") bm = get_basemap(slp) print (bm) print ("\n") pyngl_res = get_pyngl(slp) print (pyngl_res) print ("\n")Interactive Visualizations with Plotlyhttps://plot.ly/python/getting-started/__Plotly currently cannot display plots inside notebooks opened in Jupyter Lab out-of-the-box. Therefore please switch to Jupyter Notebook for this tutorial.__Jupyter Notebook (if running locally): http://localhost:8888/tree!conda install plotly --yes import plotly.offline as plt # plot.ly offline plotting import plotly.graph_objs as go plt.init_notebook_mode(connected=True) # required for plt.iplot # connected = False requires no internet connection for plotting, # but significantly increases notebook size.MotivationFor most plotting purposes, [Matplotlib](plotting.ipynb) (supplemented with Seaborn) is a great choice.The use case for Plot.ly are interactive plots where the user can change parameters, scroll, click on points to get their values, etc. In addition, Plot.ly plots can be used in the Dash web framework.Plot.ly offers both web-based plotting (which requires a plot.ly account) and offline plotting (based on the plot.ly open source library). For most use-cases, offline plotting is preferred, therefore only this option is discussed here. Basic Plottinghttps://plot.ly/python/user-guide/python-api-user-guideimport numpy as np trace1 = go.Scatter(x=[1, 2, 3, 4], y=[4, 3, 2, 1], marker={'color': 'red'}, mode='markers+lines') trace1 x = np.arange(1,4,0.1) trace2 = go.Scatter(x=x, y=4*np.sin(x), marker={'color': 'blue'}, mode='lines') trace2 layout = go.Layout(title="hello world", xaxis={'title': 'x'}, yaxis={'title': 'y'}) layout fig = go.Figure(data=[trace1, trace2], layout=layout) fig plt.plot(fig)The *plot* function creates the plot as html file for opening in Browser.p = plt.iplot(fig)Classical method to create a plot inside a notebook.f = go.FigureWidget(data=[trace1, trace2], layout=layout) f.show()New method (Plotly >= 3.0) to create a plot inside a notebook. Interactive Plotting of Pandas DataFramesimport pandas as pd from ipywidgets import widgets df = pd.read_csv('../example_files/test_data.csv', parse_dates=[1, 2]) df.head() vendor = widgets.Dropdown( options=df.VendorID.unique().tolist(), value=1, description='Vendor ID:' ) passengers = widgets.IntSlider( value=1, min=df.passenger_count.min(), max=df.passenger_count.max(), step=1, description='Passengers:', continuous_update=False, ) vendor passengers plot_df = df[(df.VendorID == vendor.value) & (df.passenger_count == passengers.value)].head(100) g = go.FigureWidget(data=[go.Scatter(x=plot_df.trip_distance, y=plot_df.total_amount, name='Distance vs. Price', mode='markers')], layout=go.Layout(title={'text': 'NYC Taxi Data'}, xaxis={'title': 'trip distance'}, yaxis={'title': 'total amount'})) def response(change): plot_df = df[(df.VendorID == vendor.value) & (df.passenger_count == passengers.value)].head(100) with g.batch_update(): g.data[0].x = plot_df.trip_distance g.data[0].y = plot_df.total_amount vendor.observe(response, names='value') passengers.observe(response, names='value') widgets.VBox([vendor, passengers, g])Tutorial: Optimization Hello, and welcome to our tutorial on optimization. Here, we will explore three of Tequila's built in optimizers. Chiefly, we will cover the gradient descent (GD) optimizer, we will also discuss the Phoenics and GPyOpt bayesian optimizers that can be accessed through Tequila. 1: The GD optimizer.### start at the start: import statements! import tequila as tq import numpy as np from tequila.optimizers.optimizer_gd import minimize as gd_minrunning build_ext skipping '/Users/sumneralperin-lea/.pyxbld/temp.macosx-10.9-x86_64-3.6/pyrex/BayesianNetwork/kernel_prob_reshaping.cpp' Cython extension (up-to-date) skipping 'BayesianNetwork.kernel_prob_reshaping' extension (up-to-date) running build_ext skipping '/Users/sumneralperin-lea/.pyxbld/temp.macosx-10.9-x86_64-3.6/pyrex/BayesianNetwork/kernel_evaluations.cpp' Cython extension (up-to-date) skipping 'BayesianNetwork.kernel_evaluations' extension (up-to-date)We start by selecting an objective to optimize. We will begin with a fairly simple, 2-qubit expectationvalue. We will optimize our 2-qubit circuit with the simple, but non trivial hamiltonian $[Y(0)+Qm(0)]\otimes X(1)$, where $Qm=\frac{1}{2} (I + Z)$, the projector onto the 0 state.### optimizing the circuit in terms of pi makes the result of the optimization easier to interpret. a = tq.Variable(name="a")*tq.numpy.pi b = tq.Variable(name="b")*tq.numpy.pi c = tq.Variable(name="c")*tq.numpy.pi d = tq.Variable(name='d')*tq.numpy.pi U = tq.gates.H(target=[0]) U += tq.gates.H(target=1) U += tq.gates.Ry(target=0, angle=a) U += tq.gates.Rz(target=1, angle=b) U += tq.gates.Z(target=1,control=0) U += tq.gates.Rx(target=0, angle=c) U += tq.gates.Rx(target=1,angle=d) U += tq.gates.Z(target=1,control=0) ### once we have a circuit, we pick a hamiltonian to optimize over H=(tq.paulis.Y(0)+tq.paulis.Qm(0))*tq.paulis.X(1) O=tq.ExpectationValue(U=U,H=H) ### we use the .draw function to pretty-print circuits via backend printers. tq.draw(U,backend='qiskit') print(O)┌───┐┌───────────────┐ ┌───────────────┐ q_0: |0>┤ H ├┤ Ry(f((a,))_0) ├─■─┤ Rx(f((c,))_2) ├─■─ ├───┤├───────────────┤ │ ├───────────────┤ │ q_1: |0>┤ H ├┤ Rz(f((b,))_1) ├─■─┤ Rx(f((d,))_3) ├─■─ └───┘└───────────────┘ └───────────────┘ c_0: 0 ═════════════════════════════════════════════ c_1: 0 ═════════════════════════════════════════════ Objective with 1 unique expectation values variables = [a, b, c, d] types = not compiledWe are ready to optimize, now! like all tequila optimizers, the GD optimizer has a minimize function and most of the arguments are the same. However, there is one important difference: the GD optimizer takes a learning rate, lr. This parameter mediates step size in all of the GD optimizer methods; it is a positive float which scales the step in the direction of the gradient. There are several available optimization methods available to the GD optimizer, including basic SGD, SGD with momentum, and more advanced optimization strategies like Adam or RMS-prop.print('the following methods are available for Gradient Descent optimization:\n') print(tq.optimizers.optimizer_gd.OptimizerGD.available_methods())the following methods are available for Gradient Descent optimization: ['adam', 'adagrad', 'adamax', 'nadam', 'sgd', 'momentum', 'nesterov', 'rmsprop', 'rmsprop-nesterov']We will now optimize our chosen expectationvalue, chosing starting angles equivalent to $\frac{1}{4}\pi$ for all four variables, and optimizing via the ['Adam'](https://towardsdatascience.com/_adam-latest-trends-in-deep-learning-optimization-6be9a291375c) method.init={'a':0.25,'b':0.25,'c':0.25,'d':0.25} lr=0.1 ### For even more fun, try using sampling with the samples keyword, ### or pick your favorite backend with the 'backend' keyword! adam_result=gd_min(O,lr=lr, method='adam', maxiter=80, initial_values=init, silent=True)The plots below show the trajectory of both the value of the objective and the values of the angles as a function of time.adam_result.history.plot('energies') adam_result.history.plot('angles') print('best energy: ',adam_result.energy) print('optimal angles: ',adam_result.angles)We see that, minus a few hiccups, all the angles converge to optimimum values. Excercise: is this truly the best performance possible, or are we stuck in a local minimum? Let's repeat what we did above, but with a few of the other methods! Here's RMSprop:init={'a':0.25,'b':0.25,'c':0.25,'d':0.25} lr=0.01 rms_result=gd_min(O,lr=lr, method='rmsprop', maxiter=80, initial_values=init, silent=True) print('RMSprop optimization results:') rms_result.history.plot('energies') rms_result.history.plot('angles') print('best energy: ',rms_result.energy) print('optimal angles: ',rms_result.angles)RMSprop optimization results:... And here's Momentum:init={'a':0.25,'b':0.25,'c':0.25,'d':0.25} lr=0.1 mom_result=gd_min(O,lr=lr, method='momentum', maxiter=80, initial_values=init, silent=True) print('momentum optimization results:') mom_result.history.plot('energies') mom_result.history.plot('angles') print('best energy: ',mom_result.energy) print('optimal angles: ',mom_result.angles)momentum optimization results:Note that when using the [RMSprop](https://towardsdatascience.com/understanding-rmsprop-faster-neural-network-learning-62e116fcf29a) method, we reduced the learning rate from 0.1 to 0.01. Different methods may be more or less sensitive to choices of initial learning rate. Try going back to the previous examples, and choosing different learning rates, or different initial parameters, to gain a feel for how sensitive different methods are. 1.1: The GD optimizer, with the Quantum Natural Gradient. The Quantum Natural Gradient, or QNG, is a novel method of calculating gradients for quantum systems, inspired by the natural gradient sometimes employed in classical machine learning. The usual gradient we employ is with respect to a euclidean manifold, but this is not the only geometry -- nor even, the optimal geometry -- of quantum space. The QNG is, in essence, a method of taking gradients with respect to (an approximation to) the Fubini-Study metric. For information on how (and why) the QNG is used, see [Stokes et.al](https://arxiv.org/abs/1909.02108). Using the qng in Tequila is as simple as passing in the keyword gradient='qng' to optimizers which support it, such as the GD optimizer. We will use it to optimize a more complicated circuit below, and then compare the results to optimizing the same circuit with the regular gradient.### this time, don't scale by pi H = tq.paulis.Y(0)*tq.paulis.X(1)*tq.paulis.Y(2) U = tq.gates.Ry(tq.numpy.pi/2,0) +tq.gates.Ry(tq.numpy.pi/3,1)+tq.gates.Ry(tq.numpy.pi/4,2) U += tq.gates.Rz('a',0)+tq.gates.Rz('b',1) U += tq.gates.CNOT(control=0,target=1)+tq.gates.CNOT(control=1,target=2) U += tq.gates.Ry('c',1) +tq.gates.Rx('d',2) U += tq.gates.CNOT(control=0,target=1)+tq.gates.CNOT(control=1,target=2) E = tq.ExpectationValue(H=H, U=U) print('drawing a more complicated circuit. Hope you like it!') tq.draw(U) ### the keyword stop_count, below, stops optimization if no improvement occurs after 50 epochs. ### let's use a random initial starting point: init={k:np.random.uniform(-2,2) for k in ['a','b','c','d']} lr=0.01 qng_result = tq.minimize(objective=E, gradient='qng', method='sgd', maxiter=200,lr=lr, initial_values=init, silent=True) qng_result.history.plot('energies') qng_result.history.plot('angles') print('best energy with qng: ',qng_result.energy) print('optimal angles with qng: ',qng_result.angles)To gain appreciation for why one might use the QNG, let's optimize the same circuit with the same learning rate and the same method, but without QNG.lr=0.01 sgd_noqng_result = tq.minimize(objective=E, gradient=None, method='sgd', maxiter=200,lr=lr, initial_values=init, silent=True) print('plotting what happens without QNG') sgd_noqng_result.history.plot('energies') sgd_noqng_result.history.plot('angles') print('best energy without qng: ',sgd_noqng_result.energy) print('optimal angles without qng: ',sgd_noqng_result.angles)plotting what happens without QNGThough the starting point was random (and so I, your humble tutorial writer, do not know what your graphs look like), you will most likely see that the QNG run achieved a greater degree of improvement, and that the trajectories followed by angles there was different from that followed by angles in the straight-gd optimization. Feel free to play around with other methods, learning rates, or circuits in the space below!### have fun!2. Bayesian Optimization [Bayesian optimization](https://arxiv.org/abs/1807.02811) is a method of global optimization, often used to tune hyperparameters in classical learning. It has also seen use in the optimization of [quantum circuits](https://arxiv.org/pdf/1812.08862.pdf). Tequila currently supports 2 different bayesian optimization algorithms: [Phoenics](https://github.com/aspuru-guzik-group/phoenics) and [GPyOpt](https://github.com/SheffieldML/GPyOpt), optimizers originally developed for optimizing expensive experimental procedures in chemistry. Click the links to get to the respective github pages, and download the optimizers before continuing this tutorial. 2.1: GPyOpt GPyOpt can be used like any of our other optimizers. Like the GD and SciPy optimizers, it also takes a 'method' keyword. 3 methods are supported: 'lbfgs','DIRECT', and 'CMA'. See the GPyOpt github for more info.from tequila.optimizers.optimizer_gpyopt import minimize as gpy_minwe will use GPyOpt to optimize the same circuits as seen above.### optimizing the circuit in terms of pi makes the result of the optimization easier to interpret. a = tq.Variable(name="a")*tq.numpy.pi b = tq.Variable(name="b")*tq.numpy.pi c = tq.Variable(name="c")*tq.numpy.pi d = tq.Variable(name='d')*tq.numpy.pi U = tq.gates.H(target=[0]) U += tq.gates.H(target=1) U += tq.gates.Ry(target=0, angle=a) U += tq.gates.Rz(target=1, angle=b) U += tq.gates.Z(target=1,control=0) U += tq.gates.Rx(target=0, angle=c) U += tq.gates.Rx(target=1,angle=d) U += tq.gates.Z(target=1,control=0) ### once we have a circuit, we pick a hamiltonian to optimize over H=(tq.paulis.Y(0)+tq.paulis.Qm(0))*tq.paulis.X(1) O=tq.ExpectationValue(U=U,H=H) ### we use the .draw function to pretty-print circuits via backend printers. tq.draw(U,backend='qiskit') print(O) ### let's use the lbfgs method. init={'a':0.25,'b':0.25,'c':0.25,'d':0.25} ### note: no lr is passed here! there are fewer tunable keywords for this optimizer. result=gpy_min(O, method='lbfgs', maxiter=80, initial_values=init) print('GPyOpt optimization results:') result.history.plot('energies') result.history.plot('angles') print('best energy: ',result.energy) print('optimal angles: ',result.angles)Optimizer: backend : qulacs backend_options : {} samples : None save_history : True noise : None method : lbfgs Objective : 1 expectationvalues num acquisition: 1, time elapsed: 0.51s num acquisition: 2, time elapsed: 0.96s num acquisition: 3, time elapsed: 1.67s num acquisition: 4, time elapsed: 2.48s num acquisition: 5, time elapsed: 3.10s num acquisition: 6, time elapsed: 4.26s num acquisition: 7, time elapsed: 5.01s num acquisition: 8, time elapsed: 5.88s num acquisition: 9, time elapsed: 6.88s num acquisition: 10, time elapsed: 7.18s num acquisition: 11, time elapsed: 7.69s num acquisition: 12, time elapsed: 8.07s num acquisition: 13, time elapsed: 8.59s num acquisition: 14, time elapsed: 9.35s num acquisition: 15, time elapsed: 10.14s num acquisition: 16, time elapsed: 10.54s num acquisition: 17, time elapsed: 11.08s num acquisition: 18, time elapsed: 11.48s num acquisition[...]Perhaps you are looking at the plots above in horror. But, do take note: bayesian optimization is a global, exploratory optimization method, designed to explore large portions of parameter space while still seeking out optimality. Look at the optimal energy again, and one sees that the best performance of this optimization method matched that of all the gradient descent methods. We will apply gpyopt, next, to the QNG example circuit above, and see how bayesian optimization compares to QNG and SGD.### this time, don't scale by pi H = tq.paulis.Y(0)*tq.paulis.X(1)*tq.paulis.Y(2) U = tq.gates.Ry(tq.numpy.pi/2,0) +tq.gates.Ry(tq.numpy.pi/3,1)+tq.gates.Ry(tq.numpy.pi/4,2) U += tq.gates.Rz('a',0)+tq.gates.Rz('b',1) U += tq.gates.CNOT(control=0,target=1)+tq.gates.CNOT(control=1,target=2) U += tq.gates.Ry('c',1) +tq.gates.Rx('d',2) U += tq.gates.CNOT(control=0,target=1)+tq.gates.CNOT(control=1,target=2) E = tq.ExpectationValue(H=H, U=U) print('Hey, remember me?') tq.draw(U) ### the keyword stop_count, below, stops optimization if no improvement occurs after 50 epochs. ### let's use a random initial starting point: init={k:np.random.uniform(-2,2) for k in ['a','b','c','d']} gpy_result = gpy_min(objective=E,maxiter=25,method='lbfgs', initial_values=init) gpy_result.history.plot('energies') print('best energy: ',gpy_result.energy) print('optimal angles: ',gpy_result.angles)Hey, remember me? 0: ───Ry(0.5π)─────Rz(0.318309886183791*pi*f((a,))_0)───@────────────────────────────────────────────@─────── │ │ 1: ───Ry(0.333π)───Rz(0.318309886183791*pi*f((b,))_1)───X───@───Ry(0.318309886183791*pi*f((c,))_2)───X───@─── │ │ 2: ───Ry(0.25π)─────────────────────────────────────────────X───Rx(0.318309886183791*pi*f((d,))_3)───────X─── Optimizer: backend : qulacs backend_options : {} samples : None save_history : True noise : None method : lbfgs Objective : 1 expectationvalues num acquisition: 1, time elapsed: 0.47s num acquisition: 2, time elapsed: 0.96s num acquisition: 3, time elapsed: 1.57s num acquisition: 4, time elapsed: 2.11s num acquisition: 5, time elaps[...]In a very, very small number of step, GPyOpt is able to match the performance of SGD with the QNG, and discovers the hidden truth: the optimil circuit, here, is one where all angles are zero (modulo 2 $\pi$) There's a few extras you can access if you are well-familiar with GPyOpt. We return a part of result, result.object, which is a native GPyOpt BayesianOptimization object -- the one built and run during your optimization. It has some plotting features you can use.obj=gpy_result.object obj.plot_convergence()If your function has 1 or 2 parameters (but no more) you can also see a plot of its acquisition function! see [here](https://www.blopig.com/blog/2019/10/a-gentle-introduction-to-the-gpyopt-module/) for more info!You can also extract the acquisition function of the model itself, and play with it (it takes ones object, an np array, as input), using:acq=result.object.acquisition.acquisition_function Feel free to play around more with other circuits in the space below! 2.2 Phoenics Finally, we turn to Phoenics. This algorithm, originally developed within the Aspuru-Guzik group (Hey, just like Tequila!), can be accessed in the usual fashion. It's performance on the two-qubit optimization circuit is shown below. Note that the number of datapoints exceeds the provided maxiter; maxiter here controls the number of parameter __batches__ suggested by phoenics. phoenics suggests a number of parameter sets to try out, per batch, that scales with the number of parameters (in a nonlinear fashion), so you may want to set maxiter lower if you are only playing around.from tequila.optimizers.optimizer_phoenics import minimize as p_min ### optimizing the circuit in terms of pi makes the result of the optimization easier to interpret. a = tq.Variable(name="a")*tq.numpy.pi b = tq.Variable(name="b")*tq.numpy.pi c = tq.Variable(name="c")*tq.numpy.pi d = tq.Variable(name='d')*tq.numpy.pi U = tq.gates.H(target=[0]) U += tq.gates.H(target=1) U += tq.gates.Ry(target=0, angle=a) U += tq.gates.Rz(target=1, angle=b) U += tq.gates.Z(target=1,control=0) U += tq.gates.Rx(target=0, angle=c) U += tq.gates.Rx(target=1,angle=d) U += tq.gates.Z(target=1,control=0) H=(tq.paulis.Y(0)+tq.paulis.Qm(0))*tq.paulis.X(1) O=tq.ExpectationValue(U=U,H=H) init={'a':0.25,'b':0.25,'c':0.25,'d':0.25} ### geez! even fewer keywords! ### to see what you can pass down to phoenics, see the tequila documentation for that module. p_result=p_min(O, maxiter=5, initial_values=init, silent=False) print('Phoenics optimization results on 2 qubit circuit:') p_result.history.plot('energies') p_result.history.plot('angles') print('best energy: ',p_result.energy) print('optimal angles: ',p_result.angles)Phoenics config: {'general': {'auto_desc_gen': 'False', 'batches': 5, 'boosted': 'False', 'parallel': 'False'}, 'parameters': [{'name': a, 'periodic': 'True', 'type': 'continuous', 'size': 1, 'low': 0, 'high': 6.283185307179586}, {'name': b, 'periodic': 'True', 'type': 'continuous', 'size': 1, 'low': 0, 'high': 6.283185307179586}, {'name': c, 'periodic': 'True', 'type': 'continuous', 'size': 1, 'low': 0, 'high': 6.283185307179586}, {'name': d, 'periodic': 'True', 'type': 'continuous', 'size': 1, 'low': 0, 'high': 6.283185307179586}], 'objectives': [{'name': 'Energy', 'goal': 'minimize'}]} phoenics has recieved objective: Objective with 1 unique expectation values variables = [a, b, c, d] types = not compiled noise model : None samples : None maxiter : 5 variables : [a, b, c, d] passive var : {} backend options qulacs {} now lets begin [TIME]: 0:00:00.001051 (overall) *********************************************** energy = -0.29609650 , angles= {a: 6.2376809120178[...]we also have returned to you the phoenics object. One interesting object we can extract from this is the acquisition function. You can obtain this indirectly, using resut.object.bayesian_network.kernel_contribution. This function takes a numpy array ( a point in your parameter space) and returns 2 numbers, x and y; the acquisition function then has the value x*y. Note: this is often zero.kc=p_result.object.bayesian_network.kernel_contribution random_point=np.random.uniform(0,2*np.pi,4) f,s=kc(random_point) random_ac=f*s print('random point ', random_point, ' has acquisition function value ',random_ac)random point [2.02107126 0.53605093 1.77683872 4.47834312] has acquisition function value 0.0Topic by LDA# Open in pandas the table with the topic LDA df_lda = pd.read_csv(csv_path / "topic_lda.csv") #Make sure to update this data df_lda.head() df_nmf = pd.read_table(csv_path / "topic_classification.txt",sep=',') #Make sure to update this data df_nmf.head() filepath_2 = Path.cwd().parent / "data" / "Approval_Index.xlsx" approval=pd.read_excel(filepath_2, usecols=['DATE','APPROVAL INDEX']) approval.head() approval=approval.rename(columns={'DATE':'date','APPROVAL INDEX':'approval_index'}) approval.head() df_lda['date'] = df_lda['date'].astype('datetime64[ns]') df_lda['year']=df_lda['date'].dt.year df_lda["topic_1_num"] = df_lda["main_topic_1"].str.replace("Topic ", "") merged_lda=pd.merge(df_lda,approval,on='date',how='left') import datetime merged_lda.loc[merged_lda.approval_index.isna(),'date']= merged_lda.date+pd.Timedelta("2 days") merged_lda=pd.merge(merged_lda.drop(['approval_index'],axis=1),approval,on='date',how='left') merged_lda.head() merged_lda.loc[merged_lda.approval_index.isna(),['date','state','country','approval_index']] merged_lda['approval_index'].astype('Int64') cross_topic_lda=pd.DataFrame(merged_lda.groupby(['main_topic_1']).main_topic_2.value_counts().reset_index(name='count')) sns.relplot(x="main_topic_1", y="main_topic_2", size="count",sizes=(70, 700), alpha=.5,height=7, data=cross_topic_lda) most_freq_topic=[22,20,8,1,4,9,15,14,0,3]##.loc[df_nmf['main_topic_1_index']].isin(most_freq_topic) cross_topic_nmf=pd.DataFrame(df_nmf.groupby(['main_topic_1_index']).main_topic_2_index.value_counts().reset_index(name='count')) order_2 = ['1','2','3','4','5','6','7','8','9','10','11','12','13', '14','15','16','17','18','19','20', '21','22','23','24','25','none'] sns.relplot(x="main_topic_1_index", y="main_topic_2_index", size="count",sizes=(50, 500), alpha=.5,height=10, data=cross_topic_nmf,row_order=order_2) n_approval = pd.crosstab(index=merged_lda['main_topic_1'], columns=merged_lda['year']) n_approval sns.set(rc={'figure.figsize':(20,8.27)}) n_approval.T.plot.bar(stacked=True) avg_approval = pd.pivot_table(merged_lda, values='approval_index', columns='year',index=['main_topic_1'],aggfunc=np.mean,margins=True) avg_approval=avg_approval.iloc[: , :-2].transpose() sns.set(rc={'figure.figsize':(20,8.27)}) sns.lineplot(data=avg_approval) state_approval = pd.pivot_table(merged_lda.loc[merged_lda['country']=='USA'], values='approval_index', columns='main_topic_1',index=['state'],aggfunc=np.mean,margins=True) state_approval freq_topic_state= pd.crosstab(index=merged_lda['state'], columns=merged_lda['main_topic_1']) merged_lda.loc[(merged_lda['state']=='no_state')& (merged_lda['country']=='USA')] states = merged_lda.groupby(['state','main_topic_1'])['main_topic_1'].agg({'count'}) mask = states.groupby(level=0).agg('idxmax') df_count = states.loc[mask['count']] df_count = df_count.reset_index() df_count["topic_1_num"] = df_count["main_topic_1"].str.replace("Topic ", "") print("\nOutput\n{}".format(df_count)) freq_topic_nmf= pd.crosstab(index=df_nmf['main_topic_1_index'], columns='count').sort_values(by='count') freq_topic_nmf most_freq_topic=[22,20,8,1,4,9,15,14,0,3,7,18]Proiectul Nr. 20- Generarea variabilei Beta(0.3, 5) prin două metode- Generarea variabilei normale prin metoda polarăimport random import numpy as npGenerarea variabilei BetaFolosim cele două metode descrise în cursul 6.def gen_beta1(a, b): "Folosind distribuția gamma" x_1 = random.gammavariate(a, 1) x_2 = random.gammavariate(b, 1) return x_1 / (x_1 + x_2) def gen_beta2(a, b): "Folosind distribuții uniforme" while True: v = random.uniform(0, 1) ** (1 / a) t = random.uniform(0, 1) ** (1 / (b - 1)) if v + t < 1: return vVerificareVedem cât ar trebui să fie media și deviația standard a distribuției folosind formulele de pe Wikipedia.După aceea, generăm multe eșantioane cu funcțiile de mai sus și vedem dacă respectă aceste proprietăți.num_samples = 100_000 a = 0.3 b = 5 # Formulele sunt luate de pe https://en.wikipedia.org/wiki/Beta_distribution mean = a / (a + b) sd = np.sqrt((a * b)/(((a + b)**2)*(a + b + 1))) sample1 = [gen_beta1(a, b) for _ in range(num_samples)] sample2 = [gen_beta2(a, b) for _ in range(num_samples)] print("Valori teoretice:") print("Medie:", mean, "-", "Deviație standard:", sd) print() print("Eșantioane generate prin prima metodă") print("Medie:", np.mean(sample1), "-", "Deviație standard:", np.std(sample1)) print("Medie:", np.mean(sample2), "-", "Deviație standard:", np.std(sample2))Valori teoretice: Medie: 0.05660377358490566 - Deviație standard: 0.09206604461778616 Eșantioane generate prin prima metodă Medie: 0.05676770538150129 - Deviație standard: 0.09237633079284698 Medie: 0.05609732461631161 - Deviație standard: 0.09092262264587549Generarea variabilei normaleFolosind metoda polară descrisă în cursul 6.def gen_norm_polar(): while True: v_1 = random.uniform(-1, 1) v_2 = random.uniform(-1, 1) s = (v_1 ** 2) + (v_2 ** 2) if s < 1: z_1 = v_1 * np.sqrt((-2 * np.log(s)) / s) z_2 = v_2 * np.sqrt((-2 * np.log(s)) / s) return [z_1, z_2]Verificaresample = [x for _ in range(num_samples) for x in gen_norm_polar()] print("Medie:", np.mean(sample), "-", "Deviație standard:", np.std(sample))Medie: 0.00393825190828833 - Deviație standard: 1.0005390943921242Graphbrain parserThis notebook is a simple demonstration of the Graphbrain parser, which transforms a sentence in natural language into an hyperedge.# simple functions to interact with Graphbrain from notebooks from graphbrain.notebook import * # the parser from graphbrain.meaning.parser import Parser # function to visualize parse trees from graphbrain.meaning.nlpvis import print_tree**Note**: When the cell bellow is run, the parser is initalized. This can take from a few seconds to a minute.# include part-of-speech tags in atoms pos = True # create the parser object parser = Parser(lang='en', pos=pos) # change to whatever you like... text = """ The Turing test, developed by in 1950, is a test of machine intelligence. """ roots_only = True compact = False tree = False parses = parser.parse(text) for parse in parses: if tree: print_tree(parse['spacy_sentence'].root) show(parse['main_edge'], roots_only=roots_only, compact=compact)* tracking/track/VBG (ROOT) +-- < satellites/satellites/NNP (nsubj) {ORG} | +-- < // () {ORG} | +-- > from/from/IN (prep) | +-- > nasa/nasa/NNP (pobj) {ORG} | +-- > and/and/CC (cc) | +-- > agencies/agency/NNS (conj) | +-- < other/other/JJ (amod) +-- < have/have/VBP (aux) +-- < been/be/VBN (aux) +-- > changes/change/NNS (dobj) | +-- < ice/ice/NN (compound) | +-- < sea/sea/NN (compound) +-- > since/since/I[...]FunctionsFunctions are first class citizens in Python! Python is a good fit for functional programming paradigms... More on that another day.# define a function using def functionName(variable1, variable2): def myFirstFunction(): print("Ran the function!") # call the function myFirstFunction() # implement a return statement def mySecondFunction(): return 100 mySecondFunction() # pass in parameters def myThirdFunction(a, b=1): return a/b myThirdFunction(100, 10) # pass in parameters def myFourthFunction(a:int, b:int): return a, b #myFourthFunction(100, 10) myFourthFunction(("abc" + "def"), 10)`*Args` and `**Kwargs`# pass in the parameters to the previous functions using a dict and ** # aka "Kwargs" vals = {"a":1000, "b":10} myThirdFunction(**vals) # Pass in a variable number of arguments using a "*" # aka "*Args" def myFourthFunction(a, *args): print(a) for v in args: print(v) myFourthFunction("100", "1", "2", "3", "4") def MultipleReturn(): a = 1 b = 2 c = 3 return a, b, c MultipleReturn() x, y, z = MultipleReturn() y--- ClassesPython is actually Object Orientated, so it is easy to define classes# Create a class class myClass(): # constructor method def __init__(self, val1=None, val2=None): self.prop1 = val1 self.prop2 = val2 # simple example method def product(self): return self.prop1 * self.prop2 # Create an instance of the class x = myClass(100, 20) # Print the properties and the output of the "product" method print(x.prop1) print(x.prop2) print(x.product()) # Python classes also have things like class variables, inheritance....but this session is meant to be < 1 hour!Getting Transforming and Exploring Dataimport numpy as np import pandas as pd import matplotlib.pyplot as plt import pandas_datareader as pdr gld = pdr.get_data_yahoo('GLD', start='2013-01-01') gld = gld.drop('Adj Close', axis=1) gld =round(gld,2) gld.head() with plt.style.context('ggplot'): plt.plot(gld.Close) gld.index = pd.DatetimeIndex(gld.index) monthly = gld.resample('BM').last() monthly.head() plt.plot(monthly['Close']) gld['Change'] = gld['Close'] -gld['Close'].shift() gld.head() gld['LN_change'] = np.log(gld['Close']/ gld['Close'].shift()) gld.head() with plt.style.context('ggplot'): plt.figure(figsize=(10,8)) plt.hist(gld.LN_change[1:],bins=50,edgecolor='black', density='True') gld['Daily Vol'] = gld['LN_change'].rolling(21).std().shift() gld['Exp Change'] = gld['Close'] * gld['Daily Vol'] gld.head(23) gld = gld[22:] gld.head()4. Preparando o Dataset (Texto) Importação das Bibliotecasimport nltk import numpy as np nltk.download('stopwords') nltk.download('punkt') from pprint import pprint stopWordPortugues = nltk.corpus.stopwords.words('portuguese') print(np.transpose(stopWordPortugues)) sample_text = """ O menino gosta de jogar futebol aos finais de semana. Ele gosta de jogar com seus amigos Marcos e João, mas não gosta de brincar com a irmã Marcela.""" tokenizacao_sentencas = nltk.sent_tokenize sample_sentence=tokenizacao_sentencas(text=sample_text) pprint(sample_sentence) len(sample_sentence) sample_sentence = 'O menino gosta de jogar futebol aos finais de semana.' tokenizacao_palavras=nltk.word_tokenize sample_words=tokenizacao_palavras(text=sample_sentence) pprint(sample_words) from nltk.stem import PorterStemmer from nltk.stem import RSLPStemmer nltk.download('rslp') ps=PorterStemmer() stemmer = RSLPStemmer() print(ps.stem('jumping')) print(stemmer.stem('amoroso')) print(stemmer.stem('amorosa')) print(stemmer.stem('amados')) from nltk.stem import SnowballStemmer print('Linguagens suportadas %s',SnowballStemmer.languages) ss = SnowballStemmer('portuguese') print(ss.stem('casado')) print(ss.stem('casarão')) print(ss.stem('casa'))cas cas cas** ---------------------**Exemplo Bag of Words ---------------------**** **sentenca="O IGTI oferece especializacao em Deep Learning. Deep Learning e utilizado em diversas aplicacoes. As aplicacoes de deep learning sao estudadas nesta especializacao. O IGTI tambem oferece bootcamp" sentenca=sentenca.lower() print(sentenca) tokenizacao_sentencas=nltk.sent_tokenize sample_sentence = tokenizacao_sentencas(text=sentenca) pprint(sample_sentence) sample_sentence[0] tokenizacao_palavras=nltk.word_tokenize list_words=[] for i in range(len(sample_sentence)): sample_words = tokenizacao_palavras(text=sample_sentence[i]) list_words.extend(sample_words) print(list_words) def tokenizaPalavras(sentenca): sample_words = tokenizacao_palavras(text=sentenca) return sample_words #removendo stopwords e criando o BoW def removeStopWords(list_of_words): my_stop_words=['o','em','as','de','sao','nesta','.','e','a','na','do'] # cria a lista de stopwords list_cleaned=set(list_of_words)-set(my_stop_words) return list_cleaned my_BoW=removeStopWords(list_words) print(my_BoW) def bagofwords(sentence, words): sentence_words = tokenizaPalavras(sentence) # conta a frequência de palavras que estão no vetor do BoW bag = np.zeros(len(words)) for sw in sentence_words: for i,word in enumerate(words): if word == sw: bag[i] += 1 return np.array(bag) sentenca_teste='o igti oferece especializacao em deep learning e o igti oferece bootcamp' print(bagofwords(sentenca_teste,my_BoW))[2. 2. 1. 0. 0. 1. 0. 0. 0. 1. 1.]SCRATCH WORKimport pandas as pd project_id = 'stackoverflow-qa' sample_count = 2000 df = pd.io.gbq.read_gbq(''' SELECT * FROM `bigquery-public-data.stackoverflow.posts_answers` AS question LIMIT 100 ''', project_id = project_id, dialect= 'standard') df.head() import pandas as pd project_id = 'stackoverflow-qa' sample_count = 2000 df = pd.io.gbq.read_gbq(''' SELECT * FROM `bigquery-public-data.stackoverflow.posts_questions` AS question INNER JOIN `bigquery-public-data.stackoverflow.posts_answers` AS answers LIMIT 100 ''', project_id = project_id, dialect= 'standard') df.head() # SELECT # questions.title, # questions.Id as 'q_id', # questions.body as 'q_body', # questions.score as 'q_score', # (case # when questions.AcceptedAnswerId = answers.Id # then 1 # else 0 end) as 'acc_a', # answers.Id as 'a_id', # answers.body as 'a_body', # answers.score as 'a_score' # FROM Posts answers # INNER JOIN Posts questions ON answers.parentid = questions.id # WHERE questions.title like '%how%' # AND questions.tags like '%java%' # AND questions.tags not like '%javascript%' # AND answers.score > 0 # order by questions.id asc # OFFSET 50000 ROWS # FETCH FIRST 50000 ROWS ONLY;Question 1 -- Convert hex to base64import codecs def hex_to_b64(hexa): b64 = codecs.encode(codecs.decode(hexa, 'hex'), 'base64').decode() return b64 hexa = "" print(hex_to_b64(hexa))SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcGIG11c2hyb29tExplaination: This code makes use of the codecs library available in the python to decode hex and later convert it to base64 Question 2 -- Fixed XORdef XOR(a,b): lookup = {"0" : "0000", "1" : "0001", "2" : "0010", "3" : "0011", "4" : "0100", "5" : "0101", "6" : "0110", "7" : "0111", "8" : "1000", "9" : "1001", "a" : "1010", "b" : "1011", "c" : "1100", "d" : "1101", "e" : "1110", "f" : "1111"} lookup2 = {"10":"a","11":"b","12":"c","13":"d","14":"e","15":"f"} a= lookup[a] b= lookup[b] c="" for i in range(4): c=c+str(int(a[i]) ^ int(b[i])) if int(c,2)>=10: return lookup2[str(int(c,2))] else: return (str(int(c,2))) def fixedXOR(x,y): result = "" for i in range(len(x)): result = result + (XOR(x[i],y[i])) return result final = fixedXOR("1c0111001f010100061a024b53535009181c","686974207468652062756c6c277320657965") finalExplaination: In this question we make XOR of 2 hex strings. The fixedXOR function takes in two strings and calls XOR on individual character of the string. XOR function maintains 2 lookup tables to convert single int to 4 bit binary and another for converting integer to hex values. We iterate over 4 bits and perform individual XOR. Later we accumulate the result from XOR and print it. Question 3 -- Single-byte XOR cipherimport codecs cipher="1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736" freqs = {97: 0.0651738,98: 0.0124248,99: 0.0217339,100: 0.0349835,101: 0.1041442,102: 0.0197881,103: 0.0158610, 104: 0.0492888,105: 0.0558094,106: 0.0009033,107: 0.0050529,108: 0.0331490,109: 0.0202124,110: 0.0564513, 111: 0.0596302,112: 0.0137645,113: 0.0008606,114: 0.0497563,115: 0.0515760,116: 0.0729357,117: 0.0225134, 118: 0.0082903,119: 0.0171272,120: 0.0013692,121: 0.0145984,122: 0.0007836,32: 0.1918182} def xor_str(string1,string2): st1=codecs.decode(string1,'hex') st2=codecs.decode(string2,'hex') x=(bytes(a^b for a,b in zip(st1,st2))).hex() return x def Single_byte_Xor_cipher(cipher): index=0 high=0 for i in range(0,256): if (i<16): xor_byte=('0'+hex(i)[2:])*(int)(len(cipher)/2) else: xor_byte=hex(i)[2:]*(int)(len(cipher)/2) new=(codecs.decode(xor_str(cipher,xor_byte),"hex")) count=0.0 for j in range(len(new)): if new[j] in freqs: count = count + freqs[new[j]] if count>high: high=count index=i return high,index def output(cipher,index): print("The key to decrypt message is '{}'".format((chr(index) ))) print("The decrypted string is : ",end="") print((codecs.decode(xor_str(cipher,hex(index)[2:]*(int)(len(cipher)/2)),"hex")).decode()) high, index = Single_byte_Xor_cipher(cipher) output(cipher,index)The key to decrypt message is 'X' The decrypted string is : Cooking MC's like a pound of baconExplaination: In the above code the single_byte_xor_cipher is trying all the possible 256 ascii values in order to fetch the correct key to decrypt cipher. We add '0' to the ascii value under 16 to maintain length constraint. Then we pass both the guesses single character key and cipher to the xor_str function to get the xor. We decide upon the key based on the frequency table that states the frequency of a character in the english langauge. Based on the highest score obtained we print out the gussed single character key. Question 4 -- Detect single-character XORhigh = 0 for line in open('input.txt'): line = line.strip() count,index = Single_byte_Xor_cipher(str(line)) if high < count: high = count index1 = index line1 = line output(line1, index1)The key to decrypt message is '5' The decrypted string is : Now that the party is jumpingExplaination: Here we make use of the function that we generated in the previous question. Instead of trying a single character on a string we try it on the a file that contains cipher of length 60. Based on the english frequency in previous question we get the above output. Question 5 -- Implement repeating-key XORimport binascii text = b"""Burning 'em, if you ain't quick and nimble I go crazy when I hear a cymbal""" key = "ICE" def repeating_key_XOR(text,key): key = (key)*(int)(len(text)/3) if len(text)%3!=0: for i in range(len(text)%3): key = key+key[i] key=bytes(key,encoding='utf-8') a=text b=key return (bytes(a^b for a,b in zip(a,b)).hex()) print(repeating_key_XOR(text,key))0b3637272a2b2e63622c2e69692a23693a2a3c6324202d623d63343c2a26226324272765272a282b2f20430a652e2c652a3124333a653e2b2027630c692b20283165286326302e27282fExplaination : Here we are bringing in the input string in the form of bytes. The function repeating_key_XOR would convert the key equivalent to the length of text. We make the increase in the lenght the same way did it in the question 3. Then we simply XOR the bytes with the help of zip function. Question 6 -- Break repeating-key XORimport base64 import codecs str1=base64.b64decode("".join(list(open("input6.txt", "r")))) str_ascii=codecs.encode(str1,'hex') def humming_distance(str1,str2): n=(''.join(["{0:08b}".format(x) for x in str1])) m=(''.join(["{0:08b}".format(x) for x in str2])) count=0 for a,b in zip(n,m): if a!=b: count=count+1 return count def optimal_key_len(): for keysize in range(2,40): avg = 0 for keysize1 in range(len(str1)//keysize): a = str1[keysize*keysize1:keysize*keysize1+keysize] b = str1[keysize*keysize1+keysize:(keysize*keysize1+keysize)+keysize] norm_dist = float(humming_distance(a,b)/keysize) avg = avg + norm_dist d = keysize , avg/(len(str1)//keysize) c.append(d) return min(c, key = lambda t: t[1]) humming_distance(b"this is a test",b"wokka wokka!!!") def divide_with_keysize(): chunks=[] print() for keylen in range(len(str1)//29): chunks.append(str1[keylen*29:keylen*29+29]) return chunks chunks = divide_with_keysize() def guess_key(): key='' for i in range(29): new=bytearray() for tup in chunks: new.append(tup[i]) a,b=Single_byte_Xor_cipher(binascii.hexlify(new[:len(new)]).decode()) key+=chr(b) return (key) print (guess_key()) def decrypt(key): final_str=repeating_key_XOR(str1,key) print(binascii.unhexlify(final_str).decode()) decrypt(guess_key())Terminator X: Bring the noise I'm back and I'm ringin' the bell A rockin' on the mike while the fly girls yell In ecstasy in the back of me Well that's my DJ Deshay cuttin' all them Z's Hittin' hard and the girlies goin' crazy Vanilla's on the mike, man I'm not lazy. I'm lettin' my drug kick in It controls my mouth and I begin To just let it flow, let my concepts go My posse's to the side yellin', Go Vanilla Go! Smooth 'cause that's the way I will be And if you don't give a damn, then Why you starin' at me So get off 'cause I control the stage There's no dissin' allowed I'm in my own phase The girlies sa y they love me and that is ok And I can dance better than any kid n' play Stage 2 -- Yea the one ya' wanna listen to It's off my head so let the beat play through So I can funk it up and make it sound good 1-2-3 Yo -- Knock on some wood For good luck, I like my rhymes atrocious Supercalafragilisticexpialidocious I'm an effect and that you can bet I can take [...]Explaination: Here in this quesiton I took the 3rd approach as per mentioned in the instructions. First of all I read the file and find the key length using the average of the normalized humming distance. Then we divide the input string in to chunks of 29 which is the optimal key length that we guessed in the previous step. After we have a list of chunks we take transpose of the whole list so that we have 29 different strings with first character of all the strings. Then we use the single_byte_xor_cipher function the we created in the previous questions to guess the single byte of key. Doing this for 29 times would give us the appropriate string key. late we decode the whole file. Question 7 -- AES in ECB modefrom base64 import b64decode from Crypto.Cipher import AES BLOCK_SIZE = 16 # Bytes pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * \ chr(BLOCK_SIZE - len(s) % BLOCK_SIZE) unpad = lambda s: s[:-ord(s[len(s) - 1:])] def change(st): st = st + (BLOCK_SIZE - len(st) % BLOCK_SIZE) * bytes(chr(BLOCK_SIZE - len(st) % BLOCK_SIZE),encoding = "utf-8") return st def decrypt(enc): enc = b64decode(enc) #enc = change(enc) #print (enc) cipher = AES.new(key, AES.MODE_ECB) return unpad(cipher.decrypt(enc)).decode('utf8',"ignore") def get_file(file): st="" for line in open(file): line = line.strip() st = st + line return st key = "YELLOW SUBMARINE" print(decrypt(get_file("input7.txt")))I'm back and I'm ringin' the bell A rockin' on the mike while the fly girls yell In ecstasy in the back of me Well that's my DJ Deshay cuttin' all them Z's Hittin' hard and the girlies goin' crazy Vanilla's on the mike, man I'm not lazy. I'm lettin' my drug kick in It controls my mouth and I begin To just let it flow, let my concepts go My posse's to the side yellin', Go Vanilla Go! Smooth 'cause that's the way I will be And if you don't give a damn, then Why you starin' at me So get off 'cause I control the stage There's no dissin' allowed I'm in my own phase The girlies sa y they love me and that is ok And I can dance better than any kid n' play Stage 2 -- Yea the one ya' wanna listen to It's off my head so let the beat play through So I can funk it up and make it sound good 1-2-3 Yo -- Knock on some wood For good luck, I like my rhymes atrocious Supercalafragilisticexpialidocious I'm an effect and that you can bet I can take a fly girl and make her wet. [...]Explaination: Here in this question we need to implement the AES in ECB mode which means the cipher goes through single step of encryption. Our key YELLOW SUBMARINE is a 16 byte string but we need to pad in the input cipher. We created pad and unpad functions to do the above mentioned work. Then we implement AES using new() functions mentioning ECB as mode. We simply decrypt the file from base64 and convert it into ascii before printing. Question 8 -- Detect AES in ECB modefrom collections import defaultdict max_reps = 0 cipher = None def substring(buffer, block_length=16): count = defaultdict(lambda: -1) for i in range(0, len(buffer), block_length): block = bytes(buffer[i:i + block_length]) count[block] += 1 return sum(count.values()) def detect_aes(max_reps = 0,cipher = None): for ciphertext in list(open("input8.txt", "r")): ciphertext = ciphertext.rstrip() count = substring(bytearray(ciphertext,'utf-8')) if count > max_reps: max_reps = count cipher = ciphertext print("The key with AES is :"+cipher) detect_aes()The key with AES is :d880619740a8a19b7840a8a31c810a3d08649af70dc06f4fd5d2d69c744cd283e2dd052f6b641dbf9d11b0348542bb5708649af70dc06f4fd5d2d69c744cd2839475c9dfdbc1d46597949d9c7e82bf5a08649af70dc06f4fd5d2d69c744cd28397a93eab8d6aecd566489154789a6b0308649af70dc06f4fd5d2d69c744cd283d403180c98c8f6db1f2a3f9c4040deb0ab51b29933f2c123c58386b06fba186aExplaination: In this question we simply make use of dictionary implementation in python to recognize the longest substing in a cipher. By adding the key into the dictionary, it would save the count of times the string is repeated. Here again we mention the block_size as 16 as the total width of a cipher with aes would be in the blocks of length 16. The string from the file is read in the form of bytarray to be saved in the dictionary. Question 9 -- Implement PKCS7 paddingBLOCK_SIZE = 20 pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * \ chr(BLOCK_SIZE - len(s) % BLOCK_SIZE) pad("YELLOW SUBMARINE")* Concat `first_name` and `last_name`. Provide an alias to the derived result as `full_name`from pyspark.sql.functions import concat, lit # Equivalent logic using select users_df. \ select( 'id', 'first_name', 'last_name', concat('first_name', lit(', '), 'last_name').alias('full_name') ). \ show() # A Column whose name is full_name is added by `withColumn` users_df. \ select('id', 'first_name', 'last_name'). \ withColumn('full_name', concat('first_name', lit(', '), 'last_name')). \ show() help(users_df.withColumn) from pyspark.sql.functions import col users_df. \ select('id', 'first_name', 'last_name'). \ withColumn('fn', col('first_name')). \ show() users_df. \ select('id', 'first_name', 'last_name'). \ withColumn('fn', users_df['first_name']). \ show()* Add another column by name `course_count` where it contain number of courses the user is enrolled for.users_df.select('id', 'courses'). \ show() users_df.select('id', 'courses'). \ dtypes from pyspark.sql.functions import size users_df.select('id', 'courses'). \ withColumn('course_count', size('courses')). \ show()Stage1from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from sklearn.naive_bayes import GaussianNB level_1_classifier = {'logistic_classifier' : LogisticRegression(), 'svm_classifier' : SVC(), 'naive_bayse_gaussian_classifier' : GaussianNB()} level_1_out_X, model_dict = StackingMethod(X, Y, 10, 0, **level_1_classifier) #level_1_out_X = StackingMethod(X, Y, 10, 0, **level_1_classifier) level_1_out_X level_1_out_X['Label'] = Y['Label'] level_1_out_X level_1_out_X = level_1_out_X.drop(['Label'], axis=1) level_1_out_XStage2from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import GradientBoostingClassifier from xgboost import XGBClassifier level_2_classifier = {'rf_classifer' : RandomForestClassifier(), 'gbdt_classifier' : GradientBoostingClassifier(), 'xgb_classifier' : XGBClassifier()} level_2_out_X, model_dict = StackingMethod(level_1_out_X, Y, 10, 0, **level_2_classifier) level_2_out_X level_2_out_X['Label'] = Y['Label'] level_2_out_X level_2_out_X = level_2_out_X.drop(['Label'], axis=1) level_2_out_XStage3from lightgbm import LGBMClassifier # Set the parameters by cross-validation tuned_parameters = [] # Number of random trials NUM_TRIALS = 1 clf = LGBMClassifier() (max_score, best_estimator) = CrossValidationGridSearchNested(level_2_out_X, Y, NUM_TRIALS, 10, clf, tuned_parameters, 'roc_auc') best_parameter = best_estimator.get_params() print(f'\nmax_score = {max_score}\n') print(f'\nbest_estimator = {best_estimator}\n') print(f'\nbest_parameter = {best_parameter}\n') level_2_out_X.head() (n_estimator, n_scoring) = CrossValidationGetModelsScores(level_2_out_X, Y, 10, best_estimator) n_scoring n_estimator Y.head(15) level_2_out_X.head(10) x_1d_test_array = np.array([list(level_2_out_X.iloc[4]), list(level_2_out_X.iloc[5]), list(level_2_out_X.iloc[6])]) original_df_X = pd.DataFrame(data = x_1d_test_array, columns=level_2_out_X.columns) original_df_X x_1d_test_array2 = np.array([list(level_2_out_X.iloc[7]), list(level_2_out_X.iloc[8]), list(level_2_out_X.iloc[9])]) original_df_X2 = pd.DataFrame(data = x_1d_test_array2, columns=level_2_out_X.columns) original_df_X2 a = {(str(x)+'shit'):2 for x in ['123', 456]} a Y.values aa = [[x] for x in range(1, 4, 1)] aa = np.array(aa) aa.ravel() y_pred = np.array([original_df_X2['Label_logistic_classifier']]) y_pred from sklearn.metrics import accuracy_score from sklearn.metrics import roc_auc_score y_pred = np.array(original_df_X2['Label_logistic_classifier']) y_pred[0] = 0 y_pred y_true = np.array(Y['Label'][7:10]) #print(f'y_pred = \n{y_pred}') #print(f'y_true = \n{y_true}') accuracy_score(y_true, y_pred) from sklearn.metrics import accuracy_score from sklearn.metrics import roc_auc_score y_pred = original_df_X2['Label_logistic_classifier'] y_pred[0] = 1 y_true = Y['Label'][7:10] print(f'y_pred = \n{y_pred}') print(f'y_true = \n{y_true}') accuracy_score(y_true, y_pred) roc_auc_score(y_true, y_pred) original_df_X.values original_df_X2.values total_list = [] a = [[x] for x in range(10, 31, 10)] b = [[x] for x in range(40, 61, 10)] total_list.append(a) total_list.append(b) X2_featuer = original_df_X2.values for list_test in total_list: #a_arr = np.array(list_test) a_arr = list_test # print(a_arr.shape) X2_featuer = np.concatenate((X2_featuer, a_arr), axis=1) X2_featuer np.concatenate((original_df_X.values, original_df_X2.values), axis=0) #a = [PredictFunctionAggregation([x], n_estimator, voting=1, is_debug=0) for x in original_df_X.values] total = [] a = original_df_X.values[0].tolist() b = original_df_X.values[1].tolist() total.append(a) total.append(b) total = np.array(total) total x_1d_test_array = np.array([list(level_2_out_X.iloc[4]), list(level_2_out_X.iloc[5]), list(level_2_out_X.iloc[6])]) cc = [x for x in x_1d_test_array] cc[0].shape ccc = np.array([cc[0]]) ccc.shape x_1d_test_array = np.array([list(level_2_out_X.iloc[4]), list(level_2_out_X.iloc[5]), list(level_2_out_X.iloc[6])]) a = [PredictFunctionAggregation(np.array([x]), n_estimator, voting=1, is_debug=0) for x in x_1d_test_array] a x_1d_test_array = np.array([list(level_2_out_X.iloc[4])]) final_class = PredictFunctionAggregation(x_1d_test_array, n_estimator, voting=1, is_debug=1) final_class_1_prob = PredictFunctionAggregation(x_1d_test_array, n_estimator, voting=0, is_debug=1) final_class final_class_1_prob 1-final_class_1_probFeature Importanceimport numpy as np import matplotlib.pyplot as plt feature_name = list(level_2_out_X.columns) best_estimator.fit(level_2_out_X.values, Y.values.ravel()) importances = best_estimator.feature_importances_ indices = np.argsort(importances)[::-1] std = np.array([0.5 for x in importances]) #std = np.std([tree.feature_importances_ for tree in best_estimator.], axis=0) # Print the feature ranking print("Feature ranking:") for f in range(level_2_out_X.shape[1]): print("%d. feature %s (%f)" % (f + 1, feature_name[indices[f]], importances[indices[f]])) # Plot the feature importances of the forest plt.figure() plt.title("Feature importances") plt.bar(range(level_2_out_X.shape[1]), importances[indices], color="r", yerr=std[indices], align="center") plt.xticks(range(level_2_out_X.shape[1]), indices) plt.xlim([-1, level_2_out_X.shape[1]]) plt.show()Feature ranking: 1. feature FG%_A (217.000000) 2. feature Label_logistic_classifier (100.000000) 3. feature 3P%_A (33.000000) 4. feature Home/Away_A (22.000000) 5. feature FT%_A (9.000000) 6. feature FG%_B (8.000000) 7. feature FGM_A (4.000000) 8. feature FGA_A (4.000000) 9. feature 3P%_B (3.000000) 10. feature 3PA_A (3.000000) 11. feature AST_A (2.000000) 12. feature FT%_B (2.000000) 13. feature TOV_A (1.000000) 14. feature BLK_A (1.000000) 15. feature OREB_A (1.000000) 16. feature DREB_A (1.000000) 17. feature STL_A (0.000000) 18. feature FTA_A (0.000000) 19. feature REB_A (0.000000) 20. feature FTM_A (0.000000) 21. feature 3PM_A (0.000000) 22. feature PF_A (0.000000) 23. feature Label_xgb_classifier (0.000000) 24. feature FGM_B (0.000000) 25. feature Label_gbdt_classifier (0.000000) 26. feature Label_rf_classifer (0.000000) 27. feature Label_naive_bayse_gaussian_classifier (0.000000) 28. feature Label_svm_classifier (0.000000) 29. feature PF_B (0.000000) 30. feature TOV_B (0.000000)[...]Post Grad Income Data by State: Investigating whether or not different properties of colleges change based on location of the school & Importing the dataimport pandas as pd import numpy as np import plotly_express as px import matplotlib.pyplot as plt import seaborn as sns import plotly.figure_factory as ff college_income_stats = pd.read_csv('Most-Recent-Cohorts-All-Data-Elements.csv') college_income_stats.head()/optnfs/el7/jupyterhub/envs/Psych81.09/lib/python3.6/site-packages/IPython/core/interactiveshell.py:3044: DtypeWarning: Columns (6,9,987,988,989,990,991,992,993,994,995,996,997,998,999,1000,1001,1002,1003,1004,1005,1006,1008,1009,1010,1011,1014,1015,1016,1017,1018,1019,1021,1022,1023,1027,1028,1029,1030,1031,1032,1034,1035,1036,1040,1041,1042,1043,1044,1045,1046,1047,1048,1049,1050,1053,1054,1055,1056,1057,1058,1059,1060,1061,1062,1063,1065,1066,1067,1068,1069,1070,1071,1073,1074,1075,1076,1078,1079,1080,1081,1082,1083,1084,1086,1087,1088,1089,1091,1092,1093,1094,1095,1096,1097,1099,1100,1101,1102,1104,1105,1106,1107,1108,1109,1110,1112,1113,1114,1115,1118,1119,1121,1122,1123,1125,1127,1128,1131,1132,1134,1135,1136,1138,1140,1141,1144,1145,1146,1147,1148,1149,1150,1151,1152,1153,1154,1157,1158,1159,1160,1161,1162,1163,1164,1165,1166,1167,1170,1171,1172,1173,1174,1175,1177,1178,1179,1180,1183,1184,1185,1186,1187,1188,1190,1192,1196,1199,1200,1201,1209,1212,1213,1214,1222,1223,1224,1225[...]Cleaning the datacollege_income_stats.columns #Chose to only look at income information 10 years after graduation. x = ['UNITID','INSTNM','STABBR','ZIP','LATITUDE','LONGITUDE','PCT_WHITE','PCT_BLACK','PCT_ASIAN','PCT_HISPANIC','MN_EARN_WNE_P10','MD_EARN_WNE_P10'] college_income_stats[x] clean_data = college_income_stats[x].set_index(['UNITID','INSTNM']) clean_data2 = clean_data.fillna(0) clean_data2.head()Descriptive statistics#First, we converted the data into numeric data numeric_data = clean_data2[['PCT_WHITE','PCT_BLACK','PCT_ASIAN','PCT_HISPANIC','MN_EARN_WNE_P10','MD_EARN_WNE_P10']] = clean_data2[['PCT_WHITE','PCT_BLACK','PCT_ASIAN','PCT_HISPANIC','MN_EARN_WNE_P10','MD_EARN_WNE_P10']].apply(pd.to_numeric, errors='coerce') numeric_data.head() #Then, we calculated the max, min, mean and median numeric_data.max() numeric_data.min() numeric_data.median() numeric_data.mean()Data Visualizationx1 = clean_data2['STABBR'] final_data = numeric_data.copy() final_data['STATE']= x1.tolist() final_data.head() by_state=final_data.groupby('STATE').aggregate(np.mean) by_state.head(10) #Stacked bar plot of average state demographic composition state = by_state.index White = by_state['PCT_WHITE'] Black = by_state['PCT_BLACK'] Asian = by_state['PCT_ASIAN'] Hispanic = by_state['PCT_HISPANIC'] ind = [x for x, _ in enumerate(state)] stackedbarplot = plt.figure(figsize=(18,8)) plt.bar(ind, White, width=0.5, label='White', color='Red', bottom=Black+Asian+Hispanic) plt.bar(ind, Black, width=0.5, label='Black', color='Blue', bottom=Asian+Hispanic) plt.bar(ind, Asian, width=0.5, label='Asian', color='Yellow', bottom=Hispanic) plt.bar(ind, Hispanic, width=0.5, label='Hispanic', color='Green') plt.xticks(ind, state) plt.ylabel("Demographics") plt.xlabel("State") plt.legend(loc="upper right") plt.title("Institution Demographics by State") plt.show() #Bar plot of average median income by state state = by_state.index median = by_state['MD_EARN_WNE_P10'] state_pos = [i for i, _ in enumerate(state)] barplot = plt.figure(figsize=(18,8)) plt.bar(state_pos, median,width=0.5, color='Purple') plt.xlabel("State") plt.ylabel("Average Median Income") plt.title("Average Median Income by State") plt.xticks(state_pos, state) plt.show() #States with 10 highest average median incomes by_state_sorted = by_state.sort_values(['MD_EARN_WNE_P10'], ascending=False) by_state_sorted.head(10) #Examining the relationship between demographic percentages and median income wpop=by_state['PCT_WHITE'] bpop=by_state['PCT_BLACK'] asianpop=by_state['PCT_ASIAN'] hispanicpop=by_state['PCT_HISPANIC'] x=by_state['MD_EARN_WNE_P10'] plt.figure(figsize=(18,8)) plt.scatter(x, wpop, color='red') plt.scatter(x, bpop, color='blue') plt.scatter(x, asianpop,color='yellow') plt.scatter(x, hispanicpop, color='green') plt.xlabel('Avg Median Income') plt.ylabel('Percent') plt.title('Demographics vs. Avg Median Income') plt.legend(loc="upper right") plt.show()Comparing top 10 earning states and top 10 in each demographic categoryby_state_sorted2= by_state.sort_values(['PCT_WHITE'], ascending=False) by_state_sorted2.head(10)States that fall in the top 10 for both median income and percent white population include:MN, VT, IA, NEby_state_sorted3= by_state.sort_values(['PCT_BLACK'], ascending=False) by_state_sorted3.head(10)States that fall in the top 10 for both median income and percent black population include: DCby_state_sorted4= by_state.sort_values(['PCT_ASIAN'], ascending=False) by_state_sorted4.head(10)States that fall in the top 10 for both median income and percent asian population include: NYby_state_sorted5= by_state.sort_values(['PCT_HISPANIC'], ascending=False) by_state_sorted5.head(10)States that fall in the top 10 for both median income and percent asian population include: N/Amap_data = numeric_data.copy() map_data['LATITUDE']=clean_data2['LATITUDE'] map_data['LONGITUDE']=clean_data2['LONGITUDE'] map_data['STATE']=clean_data2['STABBR'] map_data.head()**Important** columns should be genes, rows should be cellsdata = pd.read_csv("data_Nestorowa.tsv.gz", compression="gzip", sep="\t", index_col=0).T data.head() probin = ProfileBin(data) %time probin.fit() probin.criteria.head() %time bindata = probin.binarize() bindata.head()geom_density2d()import pandas as pd from lets_plot import * LetsPlot.setup_html() df = pd.read_csv('https://raw.githubusercontent.com/JetBrains/lets-plot-docs/master/data/mpg.csv') ggplot(df, aes('cty', 'hwy')) + geom_density2d(aes(color='..group..'))Linear algebraimport numpy as np np.__version__Matrix and vector products Q1. Predict the results of the following code.x = [1,2] y = [[4, 1], [2, 2]] #print np.dot(x, y) #print np.dot(y, x) #print np.matmul(x, y) #print np.inner(x, y) #print np.inner(y, x) print(np.dot(x, y)) print(np.dot(y, x)) print(np.matmul(x, y)) print(np.inner(x, y)) print(np.inner(y, x)) print(np.matmul(y, x))[8 5] [6 6] [8 5] [6 6] [6 6] [6 6]Q2. Predict the results of the following code.x = [[1, 0], [0, 1]] y = [[4, 1], [2, 2], [1, 1]] #print np.dot(y, x) #print np.matmul(y, x) x = [[1, 0], [0, 1]] y = [[4, 1], [2, 2], [1, 1]] print(np.dot(y, x)) print(np.matmul(y, x))[[4 1] [2 2] [1 1]] [[4 1] [2 2] [1 1]]Q3. Predict the results of the following code.x = np.array([[1, 4], [5, 6]]) y = np.array([[4, 1], [2, 2]]) #print np.vdot(x, y) #print np.vdot(y, x) #print np.dot(x.flatten(), y.flatten()) #print np.inner(x.flatten(), y.flatten()) #print (x*y).sum() x = np.array([[1, 4], [5, 6]]) y = np.array([[4, 1], [2, 2]]) print(np.vdot(x, y)) print(np.vdot(y, x)) print(np.dot(x.flatten(), y.flatten())) print(np.inner(x.flatten(), y.flatten())) print((x*y).sum())30 30 30 30 30Q4. Predict the results of the following code.x = np.array(['a', 'b'], dtype=object) y = np.array([1, 2]) #print np.inner(x, y) #print np.inner(y, x) #print np.outer(x, y) #print np.outer(y, x) x = np.array(['a', 'b'], dtype=object) y = np.array([1, 2]) print(np.inner(x, y)) print(np.inner(y, x)) print(np.outer(x, y)) print(np.outer(y, x))abb abb [['a' 'aa'] ['b' 'bb']] [['a' 'b'] ['aa' 'bb']]Decompositions Q5. Get the lower-trianglular `L` in the Cholesky decomposition of x and verify it.x = np.array([[4, 12, -16], [12, 37, -43], [-16, -43, 98]], dtype=np.int32) x = np.array([[4, 12, -16], [12, 37, -43], [-16, -43, 98]], dtype=np.int32) np.linalg.cholesky(x)Q6. Compute the qr factorization of x and verify it.x = np.array([[12, -51, 4], [6, 167, -68], [-4, 24, -41]], dtype=np.float32) x = np.array([[12, -51, 4], [6, 167, -68], [-4, 24, -41]], dtype=np.float32) q,r = np.linalg.qr(x) print('q=',q,'\nr=',r)q= [[-0.85714287 0.3942857 0.33142856] [-0.42857143 -0.9028571 -0.03428571] [ 0.2857143 -0.17142858 0.94285715]] r= [[ -14. -21. 14.] [ 0. -175. 70.] [ 0. 0. -35.]]Q7. Factor x by Singular Value Decomposition and verify it.x = np.array([[1, 0, 0, 0, 2], [0, 0, 3, 0, 0], [0, 0, 0, 0, 0], [0, 2, 0, 0, 0]], dtype=np.float32) x = np.array([[1, 0, 0, 0, 2], [0, 0, 3, 0, 0], [0, 0, 0, 0, 0], [0, 2, 0, 0, 0]], dtype=np.float32) np.linalg.svd(x)Matrix eigenvalues Q8. Compute the eigenvalues and right eigenvectors of x. (Name them eigenvals and eigenvecs, respectively)x = np.diag((1, 2, 3)) x = np.diag((1, 2, 3)) print(np.linalg.eigvals(x)) print(np.linalg.eig(x)[1]) eigenvals = np.linalg.eigvals(x) eigenvecs = np.linalg.eig(x)[1]Q9. Predict the results of the following code.#print np.array_equal(np.dot(x, eigenvecs), eigenvals * eigenvecs) print(np.array_equal(np.dot(x, eigenvecs), eigenvals * eigenvecs))TrueNorms and other numbers Q10. Calculate the Frobenius norm and the condition number of x.x = np.arange(1, 10).reshape((3, 3)) x = np.arange(1, 10).reshape((3, 3)) print(np.linalg.norm(x,ord="fro")) print(np.linalg.cond(x,p="fro"))16.881943016134134 4.561770736605098e+17Q11. Calculate the determinant of x.x = np.arange(1, 5).reshape((2, 2)) x = np.arange(1, 5).reshape((2, 2)) np.linalg.det(x)Q12. Calculate the rank of x.x = np.eye(4) x = np.eye(4) np.linalg.svd(x)[1].sizeQ13. Compute the sign and natural logarithm of the determinant of x.x = np.arange(1, 5).reshape((2, 2)) x = np.arange(1, 5).reshape((2, 2)) sign, logdet = np.linalg.slogdet(x) print(sign,' ',logdet) np.log(-np.linalg.det(x))Q14. Return the sum along the diagonal of x.x = np.eye(4) x = np.eye(4) sum(np.diag(x)) np.trace(x)Solving equations and inverting matrices Q15. Compute the inverse of x.x = np.array([[1., 2.], [3., 4.]]) x = np.array([[1., 2.], [3., 4.]]) np.linalg.inv(x)Lab 6: Calculations Part 1: Differential Pressure Measurmentsimport matplotlib.pyplot as plt import numpy as np import pandas as pd import os import math dirPath = os.path.realpath('.') fileName = 'rawData/lab 6 measurements (2).xlsx' filePath = os.path.join(dirPath, fileName) df = pd.read_excel(filePath,sheetname="Part1",header=0) print(df) cols = df.columnsdeltah(cm) error(cm) V (mV) error (mV) deltah(cm).1 error(cm).1 \ 0 0 1 1 1 -75 1 1 -5 1 -17 1 -70 1 2 -10 1 -34 1 -65 1 3 -15 1 -54 1 -60 1 4 -20 1 -67 1 -55 1 5 -25 1 -86 1 -50 1 6 -30 1 -106 1 -45 1 7 -35 1 -122 1 -40 1 8 -40 1 -141 1 -35 1 9 -45 1 -160 1 -30 1 10 -50 1 -177 1 -25 1 11 -55 1 -194 1 -20 1 12 -[...]Plottingplt.figure(1) plt.errorbar(df[cols[0]],df[cols[2]],xerr=2*df[cols[1]],yerr=2*df[cols[3]]) plt.errorbar(df[cols[4]],df[cols[6]],xerr=2*df[cols[5]],yerr=2*df[cols[7]]) plt.title('Voltage vs delta h') plt.xlabel('delta h (cm)') plt.ylabel('Voltage (mV)') plt.legend(['Ramp Down','Ramp Up']) plt.grid() plt.savefig('pressure.png') plt.show()Plot again, but zoom in so that error bars are visibleplt.figure(1) plt.errorbar(df[cols[0]],df[cols[2]],xerr=2*df[cols[1]],yerr=2*df[cols[3]]) plt.errorbar(df[cols[4]],df[cols[6]],xerr=2*df[cols[5]],yerr=2*df[cols[7]]) plt.title('Voltage vs delta h (Hysteresis Effects)') plt.xlabel('delta h (cm)') plt.ylabel('Voltage (mV)') plt.legend(['Ramp Down','Ramp Up']) plt.axis([-10,1,-50,5]) plt.grid() plt.savefig('pressureHysteresis.png') plt.show()Sensitivity Calculationssensitivity = df['V (mV)'][1:]/df['deltah(cm)'][1:] print('sensitivity =',np.average(sensitivity), 'mV/cm') sens_error = np.sqrt((df['error(cm)'][1:]/df['V (mV)'][1:])**2 + (df['error (mV)'][1:]/df['deltah(cm)'][1:])**2)*sensitivity print('sensitivity error =', np.average(sens_error), 'mV/cm')sensitivity = 3.51078806379 mV/cm sensitivity error = 0.159453662338 mV/cmSensitivity with Hysteresis Effects Calculationssensitivity_h = df['V (mV).1'][0:10]/df['deltah(cm).1'][0:10] print(sensitivity_h) print('sensitivity =',np.average(sensitivity_h), 'mV/cm') sens_error_h = np.sqrt((df['error(cm).1'][0:10]/df['V (mV).1'][0:10])**2 + (df['error (mV)'][0:10]/df['deltah(cm).1'][0:10])**2)*sensitivity_h print('sensitivity error =', np.average(sens_error_h), 'mV/cm')0 3.600000 1 3.642857 2 3.676923 3 3.700000 4 3.745455 5 3.780000 6 3.800000 7 3.875000 8 3.885714 9 3.966667 dtype: float64 sensitivity = 3.76726157176 mV/cm sensitivity error = 0.0813336209479 mV/cmEffects of HysteresishysteresisOffset = df[cols[1]][0]-df[cols[6]][len(df[cols[4]])-1] print('Hysteresis Offset =', hysteresisOffset,'mV') hysteresisError = np.sqrt(1**2 + 1**2) print('Hysteresis Error =',hysteresisError, 'mV')Hysteresis Offset = 16 mV Hysteresis Error = 1.41421356237 mVPart 2: Accelerometer Angle MeasurementsMake a new data frame dfangle to hold the data from the accelAngle sheet.dfangle = pd.read_excel(filePath,sheetname="accelAngle",header=0) print(dfangle) colsangle = dfangle.columns plt.figure(2) plt.errorbar(dfangle[colsangle[2]],dfangle[colsangle[0]],xerr=2*dfangle[colsangle[3]],yerr=2*dfangle[colsangle[1]]) plt.grid() plt.title('Voltage vs Angle') plt.ylabel('Angle (degrees)') plt.xlabel('Voltage (mV)') plt.savefig('VoltAngle.png') plt.show() accArray = [] accArrayError = [] for i in range(0,len(dfangle[colsangle[0]])): accArray.append(math.cos(math.radians(np.array(dfangle[colsangle[0]][i])))) for i in range(0,len(dfangle[colsangle[0]])): accArrayError.append(math.sin(math.radians(np.array(dfangle[colsangle[0]][i])))) acc = 9.8*np.array(accArray) accError=9.8*math.radians(1)*np.array(accArrayError) plt.figure(3) plt.errorbar(dfangle[colsangle[2]],acc,xerr=2*dfangle[colsangle[3]],yerr= accError) plt.grid() plt.title('Voltage vs Acceleration') plt.ylabel('Acceleration (m/s$^2$)') plt.xlabel('Voltage (mV)') plt.savefig('VoltAcceleration.png') plt.show()Part 2: Static Measurementsdfstatic = pd.read_excel(filePath,sheetname="staticMeasurements",header=0) print(dfstatic) colsstatic = dfstatic.columns gravity = 9.81 # m/s/s kstatic = dfstatic['Force']/dfstatic['disp (m)'] print('kstatic') print(kstatic) kstatic_error = kstatic*np.sqrt((dfstatic['Force Error']/dfstatic['Force'])**2 + (dfstatic['error (cm)']/dfstatic['Displacement (cm)'])**2) print('\n error in kstatic =') print(kstatic_error)kstatic 0 2581.992000 1 1226.795000 2 995.269091 3 906.293077 4 801.362338 dtype: float64 error in kstatic = 0 365.148810 1 48.193059 2 21.326107 3 12.323961 4 7.359075 dtype: float64Part 2: Dynamic Measurementsdfdynamic = pd.read_excel(filePath,sheetname="dynamicMeasurements",header=0) print(dfdynamic) colsdynamic = dfdynamic.columnsfreq (Hz) error period error.1 omega 0 2.17 0.05 440 20 13.634512 1 2.08 0.05 470 20 13.069025 2 2.17 0.05 480 20 13.634512 3 2.22 0.05 440 20 13.948671 4 2.17 0.05 460 20 13.634512__NOTE:__ We also need to define the massMASS = 0.2772Now lets calculate $k_{dynamic}$kdynamic = MASS*dfdynamic['omega']**2 print('kdynamic =') print(kdynamic) # now calculate error num = range(0,5) kdynamic_error = dfdynamic['error']/dfdynamic['freq (Hz)']*kdynamic*2 print('\nkdynamic error =') print(kdynamic_error) plt.figure(4) plt.errorbar(num,kdynamic,yerr=kdynamic_error,fmt='o') plt.title('Kdynamic, multiple calculations') plt.xlabel('Trial Number') plt.ylabel('k dynamic (N/m)') plt.legend(['k dynamic']) plt.axis([-0.1,4.1,0,55]) plt.grid() plt.savefig('kdynamic.png') plt.show() dfWheat = pd.read_excel(filePath,sheetname="Wheatstone",header=0) print(dfWheat) colsWheat = dfWheat.columns R3 = np.linspace(110,130,11) R1 = 120 R2 = 120 R4 = 120 expectedVout = 30*(R2/(R2+R1)-R4/(R3+R4)) print(expectedVout) wheatArray = [] for i in range(0,len(dfWheat[colsWheat[0]])): wheatArray.append(1/(np.array(dfWheat[colsWheat[0]][i])+120)) plt.figure(5) plt.plot() plt.errorbar(wheatArray,dfWheat[colsWheat[1]],xerr=0,yerr=2*dfWheat[colsWheat[2]]) plt.grid() plt.title('Resistance vs Bridge Output Voltage') plt.ylabel('Voltage (V)') plt.xlabel('1/(R$_x$ + 120) (mho)') plt.savefig('Wheatstone.png') plt.show()Neural Networks===============Neural networks can be constructed using the ``torch.nn`` package.A typical training procedure for a neural network is as follows:- Define the neural network that has some learnable parameters (or weights)- Iterate over a dataset of inputs- Process input through the network- Compute the loss (how far is the output from being correct)- Propagate gradients back into the network’s parameters- Update the weights of the network, typically using a simple update rule: ``weight = weight - learning_rate * gradient``import torch import numpy as np import matplotlib.pyplot as plt from torchvision import datasets, transforms # check if CUDA is available train_on_gpu = torch.cuda.is_available() if train_on_gpu: print('CUDA is available. Training on GPU ...') else: print('CUDA not available. Training on CPU ...')CUDA not available. Training on CPU ...Load the dataWe are using MINIST Dataset# Define a transform the data transform = transforms.Compose([transforms.ToTensor()]) # Download and load the training data trainset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=32, shuffle=True) # Download and load the test data testset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=False, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=True) 10000/32 len(testloader)Define the network------------------Let’s define this network:import torch import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.fc1 = nn.Linear(784, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) x = nn.LogSoftmax(dim=1)(x) return x net = Net() print(net) # use GPU if available if train_on_gpu: device = torch.device('cuda') else: device = torch.device('cpu') # Loss Function criterion = nn.NLLLoss() net.to(device) # optimizer from torch import optim optimizer = optim.SGD(net.parameters(), lr=0.003)Training the Network Now we know how to use all the individual parts so it's time to see how they work together. Let's consider just one learning step before looping through all the data. The general process with PyTorch:* Make a forward pass through the network* Use the network output to calculate the loss* Perform a backward pass through the network with loss.backward() to calculate the gradients* Take a step with the optimizer to update the weightsepochs = 15 steps = 0 train_losses, test_losses = [], [] for e in range(epochs): running_loss = 0 net.train() for images, labels in trainloader: # move tensors to GPU is CUDA is available if train_on_gpu: images, labels = images.cuda(), labels.cuda() # Flatten Image images = images.view(images.shape[0], -1) # Clear the gradeints optimizer.zero_grad() # Forward pass, get our logits log_ps = net(images) # Calculate the loss with the logits and the labels loss = criterion(log_ps, labels) # Calculate the gradients loss.backward() # Update the weights optimizer.step() running_loss += loss.item() else: test_loss = 0 accuracy = 0 # Turn off gradients for validation, saves memory and computations net.eval() with torch.no_grad(): for images, labels in testloader: # move tensors to GPU is CUDA is available if train_on_gpu: images, labels = images.cuda(), labels.cuda() # Flatten Image images = images.view(images.shape[0], -1) log_ps = net(images) test_loss += criterion(log_ps, labels) ps = torch.exp(log_ps) top_p, top_class = ps.topk(1, dim=1) equals = top_class == labels.view(*top_class.shape) accuracy += torch.mean(equals.type(torch.FloatTensor)) train_losses.append(running_loss/len(trainloader)) test_losses.append(test_loss/len(testloader)) # print training/test statistics print("Epoch: {}/{}.. ".format(e+1, epochs), "Training Loss: {:.3f}.. ".format(running_loss/len(trainloader)), "Test Loss: {:.3f}.. ".format(test_loss/len(testloader)), "Test Accuracy: {:.3f}".format(accuracy/len(testloader))) idx = 0 dataiter = iter(testloader) images, labels = dataiter.next() img = images[idx].view(1, 784) # Calculate the class probabilities (softmax) for img ps = torch.exp(net(img)) plt.imshow(images[idx].view(28,28)) torch.argmax(ps)CNNclass CNN(nn.Module): def __init__(self): super(CNN, self).__init__() # 1 input image channel, 6 output channels, 3x3 square convolution # kernel self.conv1 = nn.Conv2d(1, 6, 3) self.conv2 = nn.Conv2d(6, 16, 3) # an affine operation: y = Wx + b self.fc1 = nn.Linear(16 * 6 * 6, 120) # 6*6 from image dimension self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): # Max pooling over a (2, 2) window x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2)) # If the size is a square you can only specify a single number x = F.max_pool2d(F.relu(self.conv2(x)), 2) x = x.view(-1, self.num_flat_features(x)) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def num_flat_features(self, x): size = x.size()[1:] # all dimensions except the batch dimension num_features = 1 for s in size: num_features *= s return num_features cnn_net = CNN() print(cnn_net) epochs = 15 steps = 0 train_losses, test_losses = [], [] for e in range(epochs): running_loss = 0 cnn_net.train() for images, labels in trainloader: # move tensors to GPU is CUDA is available if train_on_gpu: images, labels = images.cuda(), labels.cuda() # Flatten Image # images = images.view(images.shape[0], -1) # Clear the gradeints optimizer.zero_grad() # Forward pass, get our logits log_ps = cnn_net(images) # Calculate the loss with the logits and the labels loss = criterion(log_ps, labels) # Calculate the gradients loss.backward() # Update the weights optimizer.step() running_loss += loss.item() else: test_loss = 0 accuracy = 0 # Turn off gradients for validation, saves memory and computations net.eval() with torch.no_grad(): for images, labels in testloader: # move tensors to GPU is CUDA is available if train_on_gpu: images, labels = images.cuda(), labels.cuda() # Flatten Image # images = images.view(images.shape[0], -1) log_ps = cnn_net(images) test_loss += criterion(log_ps, labels) ps = torch.exp(log_ps) top_p, top_class = ps.topk(1, dim=1) equals = top_class == labels.view(*top_class.shape) accuracy += torch.mean(equals.type(torch.FloatTensor)) train_losses.append(running_loss/len(trainloader)) test_losses.append(test_loss/len(testloader)) # print training/test statistics print("Epoch: {}/{}.. ".format(e+1, epochs), "Training Loss: {:.3f}.. ".format(running_loss/len(trainloader)), "Test Loss: {:.3f}.. ".format(test_loss/len(testloader)), "Test Accuracy: {:.3f}".format(accuracy/len(testloader)))If no data at hand, try the example from [Aeneas repo](https://github.com/readbeyond/aeneas/tree/devel) or grab one from [LibriVox](https://librivox.org).align("p001.mp3", "p001.xhtml", "en")**Analysis of Variance********![anova](https://i.pinimg.com/originals/47/01/3b/47013becd153df62e9ee817a46e5a2ba.png)#-------Importing from other folder------# import sys sys.path.insert(0, "../resources/") import mstats as ms #-----------Miguel's statistics----------# import scipy.stats as ss import numpy as np1. Análisis de varianza a una víaEl director administrativo de una empresa industrial desea determinar si los tresprogramas de capacitación distintos tienen efectos diferentes en los niveles deproductividad de los empleados. Se seleccionan aleatoriamente 14 empleados y seasignan a uno de los tres programas. Al terminar la capacitación, cada empleadoresponde un examen para determinar su competencia. Se colocan cuatro empleadosen el primer programa de capacitación y cinco en cada uno de los otros dosprogramas. Cada uno de estos tres grupos se trata de manera independiente comonuestras separadas.![image.png](attachment:image.png)programa1 = [85, 72, 83, 80] # Resultados de la muestra 1 programa2 = [80, 84, 81, 78, 82] # Resultados de la muestra 2 programa3 = [82, 80, 85, 90, 88] # Resultados de la muestra 3 n_programas = 14 # Número total de observaciones c_programas = 3 # Número total de tratamientos alpha_programas = 0.05 # Nivel de significancia**Ho:** μ1 = μ2 = μ3**Ha:** μ1 ≠ μ2 ≠ μ3 **Encontramos las medias por cada tratamiento y la gran media**mean_programa1 = np.mean(programa1) mean_programa2 = np.mean(programa2) mean_programa3 = np.mean(programa3) bigmean_programas = np.mean(programa1 + programa2 + programa3)**Vamos a encontrar las sumas de los cuadrados** **Suma de cuadrados total**sct_programas = ms.anova.get_sct(programa1, programa2, programa3) sct_programas**Suma de cuadrados de los tratamientos**sctr_programas = ms.anova.get_sctr(programa1, programa2, programa3) sctr_programas**Suma de cuadrados del error**sce_programas = ms.anova.get_sce(programa1, programa2, programa3) sce_programasVamos a encontrar los cuadrados medios **Cuadrado medio total**cmt_programas = ms.anova.get_cmt(programa1, programa2, programa3) cmt_programas**Cuadrado medio del tratamiento**cmtr_programas = ms.anova.get_cmtr(programa1, programa2, programa3) cmtr_programas**Cuadrado medio del error**cme_programas = ms.anova.get_cme(programa1, programa2, programa3) cme_programasVamos a encontrar la razón Ff_programas = ms.anova.get_fratio(programa1, programa2, programa3) f_programas**Valor crítico**df1_programas = c_programas - 1 df2_programas = n_programas - c_programas ms.hypothesis.crit_val_f(df1_programas, df2_programas, alpha_programas)Regla de decisión Debido a que el valor de la razón f es **1.9431643625192017** que es menor al valor crítico encontrado **3.9822979570944836**, cae en la zona de no rechazo. Por lo que el CEO no debería rechazar la hipótesis de que el puntaje de prueba son los mismos en todos los tratamiento. 2. Análisis de varianza a una vía es vicepresidente de mercadeo en First City Bank enAtlanta. Los recientes esfuerzos promocionales para atraer nuevos depositantes incluyenalgunos juegos y premios en cuatro sucursales del banco. Shade está convencido de quediferentes tipos de premios atraerían a diferentes grupos de ingreso. Las personas de unnivel de ingreso prefieren los regalos, mientras que los de otro grupo de ingreso puedensentirse más atraídas por viajes gratuitos a sitios favoritos para pasar vacaciones. Shadedecide utilizar el monto de los depósitos como una media representativa del ingreso. Eldesea determinar si existe una diferencia en el nivel promedio de depósitos entre lascuatro sucursales. Si se halla alguna diferencia, Shade ofrecerá una diversidad depremios promocionales. Se desea un alfa del 5%.![image.png](attachment:image.png)sucursal1 = [5.1, 4.9, 5.6, 4.8, 3.8, 5.1, 4.8] # Resultados de la muestra 1 sucursal2 = [1.9, 1.9, 2.1, 2.4, 2.1, 3.1, 2.5] # Resultados de la muestra 2 sucursal3 = [3.6, 4.2, 4.5, 4.8, 3.9, 4.1, 5.1] # Resultados de la muestra 3 sucursal4 = [1.3, 1.5, 0.9, 1.0, 1.9, 1.5, 2.1] # Resultados de la muestra 4 n_sucursales = 7*4 # Número total de observaciones c_sucursales = 4 # Número total de tratamientos alpha_sucursales = 0.05 # Nivel de significancia ms.anova.get_fratio(sucursal1, sucursal2, sucursal3, sucursal4) df1_sucursales = c_sucursales - 1 df2_sucursales = n_sucursales - c_sucursales ms.hypothesis.crit_val_f(df1_sucursales, df2_sucursales, alpha_sucursales)No todas las medias son iguales, debido a que el valor de la razon F *78.09022177419358* es mayor al valor crítico *3.0087865704473615* Diferencia mínima significativa (DMS)ms.anova.get_dms(sucursal1, sucursal2, sucursal3, sucursal4, sign=alpha_sucursales)|x_1 - x_2| = |4.871428571428571 - 2.2857142857142856| = 2.5857142857142854 > 0.5361490207167764 |x_1 - x_3| = |4.871428571428571 - 4.314285714285715| = 0.557142857142856 > 0.5361490207167764 |x_1 - x_4| = |4.871428571428571 - 1.457142857142857| = 3.4142857142857137 > 0.5361490207167764 |x_2 - x_3| = |2.2857142857142856 - 4.314285714285715| = 2.0285714285714294 > 0.5361490207167764 |x_2 - x_4| = |2.2857142857142856 - 1.457142857142857| = 0.8285714285714285 > 0.5361490207167764 |x_3 - x_4| = |4.314285714285715 - 1.457142857142857| = 2.8571428571428577 > 0.5361490207167764Podemos comprobar que según la prueba DMS, todas las poblaciones sugieren medias poblacionales diferentes, debido a que la diferencia entre las medias de cada uno son mayores al valor DMS correspondiente entre esas 2 medias. 3. Análisis de varianza a una víaCada vez más norteamericanos buscan escapar de las presiones urbanas, los pagos deimpuestos en los parques nacionales ha demostrado un incremento marcado dequienes acampan los fines de semana. Outdoor World informó recientemente que elparque Yosemite National Park ubicado en las sierras altas de California contrató unconsultor en economía para estudiar la situación financiera del parque.Parte del esfuerzo realizado por el consultor requería una comparación de los ingresosdel parque provenientes de varias fuentes, incluyendo los pagos por acampar, licenciaspara pescar y para pasear en bote. Aquí aparecen los datos para visitantesseleccionados aleatoriamente. Se determina si existe diferencia en los ingresospromedio que recibe el parque provenientes de estas tres actividades. Asuma un alfa de5%.![image.png](attachment:image.png)acampar = [38, 32, 35, 36, 38, 32] # Resultados de la muestra 1 pescar = [30, 25, 31, 35] # Resultados de la muestra 2 pasear_bote = [19, 35, 20, 22, 25] # Resultados de la muestra 3 n_parque = len(acampar + pescar + pasear_bote) # Número total de observaciones c_parque = 3 # Número total de tratamientos alpha_parque = 0.05 # Nivel de significancia ms.anova.get_fratio(acampar, pescar, pasear_bote) df1_parque = c_parque - 1 df2_parque = n_parque - c_parque ms.hypothesis.crit_val_f(df1_parque, df2_parque, alpha_parque)No todas las medias son iguales debido a que el valor de la razón F *7.736749000851729* es mayor que el valor crítico *3.8852938346523933* Diferencia mínima significativa (DMS)ms.anova.get_dms(acampar, pescar, pasear_bote, sign=alpha_parque)|x_1 - x_2| = |35.166666666666664 - 30.25| = 4.916666666666664 <= 5.85814365695289 |x_1 - x_3| = |35.166666666666664 - 24.2| = 10.966666666666665 > 5.4954258663763 |x_2 - x_3| = |30.25 - 24.2| = 6.050000000000001 <= 6.0879614711278505Segun la prueba DMS, solo acampar y pasear en bote tienen una diferencia mínima significativa.Por lo que con un nivel de significancia del *5%* se puede concluir que solo acampar y pasear en bote tienen una diferencia mínima significativa. 4. Análisis de varianza a 2 víasUna empresa de contabilidad grande trata de seleccionar un sistema de computaciónintegrado a la oficina, entre los tres modelos que están actualmente en estudio. Laselección final dependerá de la productividad de los sistemas. Se seleccionanaleatoriamente cinco operadores para manejar cada sistema. Es importante tener encuenta que el nivel de experiencia que tienen los empleados en el manejo decomputadores puede afectar el resultado de la prueba. Por tanto, existe la necesidad dejustificar el impacto de la experiencia al determinar los méritos relativos de los sistemasde computación. Los niveles resultantes de producción medios en unidades por horaaparecen a continuación. El nivel de experiencia más alto indica más años decapacitación.![image.png](attachment:image.png)La empresa puede considerar que los años de experiencia de un operador afectansignificativamente su productividad. Sin embargo, la empresa está interesada en laproductividad de los sistemas de computación y no en la de los empleados. Por tanto sedebe ajustar la productividad de los empleados eliminando el efecto de la variabilidad deloperador para obtener una medida precisa, no contaminada, de la calidad del sistema.sistema1 = [27, 31, 42, 38, 45] # Resultados tratamiento 1 sistema2 = [21, 33, 39, 41, 46] sistema3 = [25, 35, 39, 37, 45] scbl_sistemas = ms.anova.get_scbl(sistema1, sistema2, sistema3) scbl_sistemas sct_sistemas = ms.anova.get_sct(sistema1, sistema2, sistema3) sct_sistemas sctr_sistemas = ms.anova.get_sctr(sistema1, sistema2, sistema3) sctr_sistemas sce_sistemas = ms.anova.get_sce2(sistema1, sistema2, sistema3) sce_sistemas cmt_sistemas = ms.anova.get_cmt(sistema1, sistema2, sistema3) cmt_sistemas cmtr_sistemas = ms.anova.get_cmtr(sistema1, sistema2, sistema3) cmtr_sistemas cme_sistemas = ms.anova.get_cme2(sistema1, sistema2, sistema3) cme_sistemas cmbl_sistemas = ms.anova.get_cmbl(sistema1, sistema2, sistema3) cmbl_sistemas fratio_sistemas = ms.anova.get_fratio2(sistema1, sistema2, sistema3) fratio_sistemas fratio_cmbl_sistemas = ms.anova.get_fratio2_cmbl(sistema1, sistema2, sistema3) fratio_cmbl_sistemas df1_sistemas = len(sistema1) - 1 df2_sistemas = (len(sistema1) - 1)*(3 - 1) ms.hypothesis.crit_val_f(df1_sistemas, df2_sistemas, 0.05)Como el f de los bloques es mayor al valor crítico se concluye que los niveles de experiencia tienen un efecto en las tasas de producción. Ahora la empresa está preparada para probar la hipótesis en la cual estuvo originalmenteinteresada. ¿Existe alguna diferencia en la producción promedio de los sistemas decomputación (tratamientos)?Si el valor de alfa del 5% se mantiene, el valor crítico de F para CMTR con 2 y 8 grados delibertad se obtiene de la tabla y es Fα,(c-1),(r-1)(c-1) = F0.05,2,8 = 4.46 (2 en el numerador porCMTR y 8 en el denominador por CME).La hipótesis a probar es:H0: μ1 = μ2 = μ3HA: no todas las medias de las columnas son igualesDondeμison las medias de las columnas para los tres sistemas de computación.Regla de decisión: “No rechazar la hipótesis nula si F ≤ 4.46. Rechazar la hipótesis nula si F> 4.46”.F = 0.09 < 4.46 por lo que la hipótesis nula no se rechaza y la empresa concluye que losniveles de producción promedio de los tres sistemas de computación no difieren, una vezque se ha hecho la corrección para el factor experiencia. Los empleados de diferentesniveles de experiencia se desempeñan igualmente bien en todas las máquinas. Nointeresa cual sistema de computación compren. 5. Análisis de varianza a 2 víasUna emisión reciente de la revista Fortune describió los esfuerzosrealizados por una importante empresa electrónica para desarrollar un sistema en elcual se les daba a los empleados la oportunidad de evaluar el desempeño de sussupervisores. Se seleccionan aleatoriamente cinco empleados y se les pide evaluar acuatro de sus gerentes en una escala del 10 al 50. Los resultados son:![image.png](attachment:image.png)El gerente de la empresa de electrónica desea saber si existe diferencia entre lasclasificaciones promedio de los cuatro gerentes.puntajes1 = [31, 29, 13, 28, 14] # Resultados tratamiento 1 puntajes2 = [35, 32, 17, 38, 20] # Resultados tratamiento 2 puntajes3 = [46, 45, 35, 52, 40] # Resultados tratamiento 3 puntajes4 = [38, 36, 20, 39, 20] # Resultados tratamiento 4 r_puntajes = 5 # Número de bloques c_puntajes = 4 # Número de tratamientos n_puntajes = r_puntajes * c_puntajes # Número total de observaciones alpha_puntajes = 0.05 # Nivel de significancia df1_sistemas = r_puntajes - 1 df2_sistemas = (r_puntajes - 1) * (c_puntajes - 1) ms.hypothesis.crit_val_f(df1_sistemas, df2_sistemas, alpha_puntajes)Loading necessary libraries and also mounting google drive where the datasets are present.We have one French to Fongbe JW300 dataset present from where we will extract relevant datapoints to augment the train data. We will then train a model on the extracted datasets for French-FongbeWe also have one French to Ewe JW300 dataset present from where we will extract relevant datapoints to augment the French-Ewe data and train our model on the augmented data# !pip install -U sentence-transformers # !pip install --upgrade transformers from google.colab import drive drive.mount('/content/drive')Mounted at /content/driveLoading Necessary libraries needed to extract relevant dataset from the JW300 data collectedimport pandas as pd import numpy as np import gc from sentence_transformers import SentenceTransformer from sklearn.metrics.pairwise import cosine_similarityAugmented Data Extraction We start with French To Fongbe Method1Here we consider only the French records from the test data and try to extract the similar records from the JW300 French-Fongbe dataset. Cosine similarity is used to find the similarity between one test data-point and all other French datapoints in the JW300 data.Only records with highest and second highest similarities are taken to consideration. Model used for sentence similarity - XLM-ROBERTA-LARGEjwdf = pd.read_csv('/content/drive/MyDrive/takwimu_translations/french_fongbe.csv') jwdf['French'] = jwdf['French'].astype(str) jwdf['Fongbe'] = jwdf['Fongbe'].astype(str) jwdf = jwdf.dropna(subset=['French']) jwdf = jwdf.dropna(subset=['Fongbe']) availdf = pd.read_csv('Test.csv') availdf = availdf[availdf['Target_Language']=='Fon'] model = SentenceTransformer('xlm-roberta-large') highest_indices_df = pd.DataFrame(columns=['French']) highest_indices_df['French'] = availdf['French'].values.tolist() highest_similarity_df = pd.DataFrame(columns=['French']) highest_similarity_df['French'] = availdf['French'].values.tolist() batch = 1 rows = availdf['French'].values.tolist() row_enc = model.encode(rows,batch_size=64,show_progress_bar=True) for i in range(0,jwdf.shape[0],50000): print('batch:- ',batch) start = i end = start+50000-1 columns = jwdf['French'].loc[start:end].values.tolist() columns_enc = model.encode(columns,batch_size=128,show_progress_bar=True) try: a = start b = end+1 colnames = list(range(a,b)) colnames = [str(i) for i in colnames] simdf = pd.DataFrame(cosine_similarity(row_enc,columns_enc),columns=colnames) except: a = start b = a+len(columns) colnames = list(range(a,b)) colnames = [str(i) for i in colnames] simdf = pd.DataFrame(cosine_similarity(row_enc,columns_enc),columns=colnames) highest_label = simdf.idxmax(axis=1).values highest_match = simdf.max(axis=1).values highest_indices_df[f'batch_{batch}_indices'] = highest_label highest_similarity_df[f'batch_{batch}_similarity'] = highest_match del simdf gc.collect() batch = batch+1 sim = highest_similarity_df.drop(['French'],axis=1) highest_indices_df['second_most_sim'] = sim.T.apply(lambda x: x.nlargest(2).idxmin()) highest_indices_df['first_most_sim'] = sim.T.apply(lambda x: x.nlargest(1).idxmin()) def get_index(text): return text.replace('similarity','indices') highest_indices_df['second_most_sim'] = highest_indices_df.apply(lambda z: z[get_index(z['second_most_sim'])],axis=1) highest_indices_df['first_most_sim'] = highest_indices_df.apply(lambda z: z[get_index(z['first_most_sim'])],axis=1) indices = highest_indices_df['first_most_sim'].values.tolist() data = jwdf[jwdf.index.isin(indices)] data.shape indices1 = highest_indices_df['second_most_sim'].values.tolist() data1 = jwdf[jwdf.index.isin(indices1)] data1.shape data.to_csv('french_fongbe_train_xlm_roberta.csv',index=False) data1.to_csv('french_fongbe_valid_xlm_roberta.csv',index=False)Method 2:- Here we perform the same procedure as above and we use Language agnostic BERT Sentence to calculate vectors which will be used to find similarities between the sentences.We will determine the highest and second highest similar French statements and augment the train data with itmodel = SentenceTransformer('LaBSE') highest_indices_df = pd.DataFrame(columns=['French']) highest_indices_df['French'] = availdf['French'].values.tolist() highest_similarity_df = pd.DataFrame(columns=['French']) highest_similarity_df['French'] = availdf['French'].values.tolist() batch = 1 rows = availdf['French'].values.tolist() row_enc = model.encode(rows,batch_size=64,show_progress_bar=True) for i in range(0,jwdf.shape[0],50000): print('batch:- ',batch) start = i end = start+50000-1 columns = jwdf['French'].loc[start:end].values.tolist() columns_enc = model.encode(columns,batch_size=128,show_progress_bar=True) try: a = start b = end+1 colnames = list(range(a,b)) colnames = [str(i) for i in colnames] simdf = pd.DataFrame(cosine_similarity(row_enc,columns_enc),columns=colnames) except: a = start b = a+len(columns) colnames = list(range(a,b)) colnames = [str(i) for i in colnames] simdf = pd.DataFrame(cosine_similarity(row_enc,columns_enc),columns=colnames) highest_label = simdf.idxmax(axis=1).values highest_match = simdf.max(axis=1).values highest_indices_df[f'batch_{batch}_indices'] = highest_label highest_similarity_df[f'batch_{batch}_similarity'] = highest_match del simdf gc.collect() batch = batch+1 sim = highest_similarity_df.drop(['French'],axis=1) highest_indices_df['second_most_sim'] = sim.T.apply(lambda x: x.nlargest(2).idxmin()) highest_indices_df['first_most_sim'] = sim.T.apply(lambda x: x.nlargest(1).idxmin()) def get_index(text): return text.replace('similarity','indices') highest_indices_df['second_most_sim'] = highest_indices_df.apply(lambda z: z[get_index(z['second_most_sim'])],axis=1) highest_indices_df['first_most_sim'] = highest_indices_df.apply(lambda z: z[get_index(z['first_most_sim'])],axis=1) indices = highest_indices_df['first_most_sim'].values.tolist() data = jwdf[jwdf.index.isin(indices)] data.shape indices1 = highest_indices_df['second_most_sim'].values.tolist() data1 = jwdf[jwdf.index.isin(indices1)] data1.shape data.to_csv('french_fongbe_train_labse.csv',index=False) data1.to_csv('french_fongbe_valid_labse.csv',index=False)Extracting statements from French-Ewe JW300We perform the same procedure for EWE as well but since we have a pretrained seq2seq model for french-Ewe, we will use only one method to extract relevant datasets which will be XLM-ROBERTA-LARGE and extract those points which are going to augment the train data.jwdf = pd.read_csv('/content/drive/MyDrive/takwimu_translations/french_ewe.csv') jwdf['French'] = jwdf['French'].astype(str) jwdf['Ewe'] = jwdf['Ewe'].astype(str) jwdf = jwdf.dropna(subset=['French']) jwdf = jwdf.dropna(subset=['Ewe']) availdf = pd.read_csv('Test.csv') availdf = availdf[availdf['Target_Language']=='Ewe'] model = SentenceTransformer('xlm-roberta-large') highest_indices_df = pd.DataFrame(columns=['French']) highest_indices_df['French'] = availdf['French'].values.tolist() highest_similarity_df = pd.DataFrame(columns=['French']) highest_similarity_df['French'] = availdf['French'].values.tolist() batch = 1 rows = availdf['French'].values.tolist() row_enc = model.encode(rows,batch_size=64,show_progress_bar=True) for i in range(0,jwdf.shape[0],50000): print('batch:- ',batch) start = i end = start+50000-1 columns = jwdf['French'].loc[start:end].values.tolist() columns_enc = model.encode(columns,batch_size=128,show_progress_bar=True) try: a = start b = end+1 colnames = list(range(a,b)) colnames = [str(i) for i in colnames] simdf = pd.DataFrame(cosine_similarity(row_enc,columns_enc),columns=colnames) except: a = start b = a+len(columns) colnames = list(range(a,b)) colnames = [str(i) for i in colnames] simdf = pd.DataFrame(cosine_similarity(row_enc,columns_enc),columns=colnames) highest_label = simdf.idxmax(axis=1).values highest_match = simdf.max(axis=1).values highest_indices_df[f'batch_{batch}_indices'] = highest_label highest_similarity_df[f'batch_{batch}_similarity'] = highest_match del simdf gc.collect() batch = batch+1 sim = highest_similarity_df.drop(['French'],axis=1) highest_indices_df['second_most_sim'] = sim.T.apply(lambda x: x.nlargest(2).idxmin()) highest_indices_df['first_most_sim'] = sim.T.apply(lambda x: x.nlargest(1).idxmin()) def get_index(text): return text.replace('similarity','indices') highest_indices_df['second_most_sim'] = highest_indices_df.apply(lambda z: z[get_index(z['second_most_sim'])],axis=1) highest_indices_df['first_most_sim'] = highest_indices_df.apply(lambda z: z[get_index(z['first_most_sim'])],axis=1) indices = highest_indices_df['first_most_sim'].values.tolist() data = jwdf[jwdf.index.isin(indices)] data.shape indices1 = highest_indices_df['second_most_sim'].values.tolist() data1 = jwdf[jwdf.index.isin(indices1)] data1.shape data.to_csv('french_ewe_train_xlm_roberta.csv',index=False) data1.to_csv('french_ewe_valid_xlm_roberta.csv',index=False)多元线性回归-正规方程 [![LR9.png](https://i.postimg.cc/NMwQDTGT/LR9.png)](https://postimg.cc/LqNd6J16)[![LR10.png](https://i.postimg.cc/jjfsyvTt/LR10.png)](https://postimg.cc/XZVSbfXz)[![LR11.png](https://i.postimg.cc/sD0y8bD3/LR11.png)](https://postimg.cc/YhgJLnWP)- **即是多元线性回归的正规方程解(Normal Equation)**- 问题:时间复杂度高:O(n^3)- 优点:不需要对数据做归一化处理 2. 动手实现import numpy as np import matplotlib.pyplot as plt from sklearn import datasets boston = datasets.load_boston() X = boston.data y = boston.target X[1, :] X = X[y < 50.0] y = y[y < 50.0] X.shape from LR.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, seed=666) from LR.LinearRegression import LinearRegression reg = LinearRegression() reg.fit_normal(X_train, y_train) reg.coef_ reg.intercept_ reg.score(X_test, y_test)3. scikit-learn中的线性回归from sklearn.linear_model import LinearRegression lin_reg = LinearRegression() lin_reg.fit(X_train, y_train) lin_reg.coef_ lin_reg.intercept_ lin_reg.score(X_test, y_test)4. 使用scikit-learn中的kNN Regression 解决回归问题from sklearn.neighbors import KNeighborsRegressor knn_reg = KNeighborsRegressor() knn_reg.fit(X_train, y_train) knn_reg.score(X_test, y_test)- **显然,kNN预测分数低于线性回归模型的分数**- 下面对 kNN 使用网格搜索找到最佳超参数from sklearn.model_selection import GridSearchCV param_grid = [ { 'weights': ['uniform'], 'n_neighbors': [i for i in range(1, 11)] }, { 'weights': ['distance'], 'n_neighbors': [i for i in range(1, 11)], 'p': [i for i in range(1, 6)] } ] knn_reg = KNeighborsRegressor() grid_search = GridSearchCV(knn_reg, param_grid, n_jobs=-1, verbose=1) grid_search.fit(X_train, y_train) grid_search.best_params_ grid_search.best_score_ # 此时是网格搜索自己的评估算法 # 此时使用和线性回归相同的评估算法,可以看出,得分有很大的提高,但仍然不如线性回归模型 grid_search.best_estimator_.score(X_test, y_test)Seqdist> Probability distributions over sequences in pytorch and cupy. Install `pip install seqdist` How to use Comparison against builtin pytorch implementation of the standard CTC loss:sample_inputs = logits, targets, input_lengths, target_lengths = ctc.generate_sample_inputs(T_min=450, T_max=500, N=128, C=20, L_min=80, L_max=100) print('pytorch loss: {:.4f}'.format(ctc.loss_pytorch(*sample_inputs))) print('seqdist loss: {:.4f}'.format(ctc.loss_cupy(*sample_inputs)))pytorch loss: 12.8080 seqdist loss: 12.8080Speed comparison Pytorch:report(benchmark_fwd_bwd(ctc.loss_pytorch, *sample_inputs))fwd: 4.79ms (4.17-5.33ms) bwd: 9.69ms (8.33-10.88ms) tot: 14.47ms (12.67-16.20ms)Seqdist:report(benchmark_fwd_bwd(ctc.loss_cupy, *sample_inputs))fwd: 7.22ms (6.78-7.85ms) bwd: 6.21ms (5.82-8.57ms) tot: 13.43ms (12.63-16.41ms)Alignmentsbetas = [0.1, 1.0, 10.] alignments = {'beta={:.1f}'.format(beta): to_np(ctc.soft_alignments(*sample_inputs, beta=beta)) for beta in betas} alignments['viterbi'] = to_np(ctc.viterbi_alignments(*sample_inputs)) fig, axs = plt.subplots(2, 2, figsize=(15, 8)) for (ax, (title, data)) in zip(np.array(axs).flatten(), alignments.items()): ax.imshow(data[:, 0].T, vmax=0.05); ax.set_title(title)PathsThis is an attempt to calculate paths, in order to get distributions of those paths.import warnings warnings.filterwarnings('ignore') import numpy as np import pandas as pd %matplotlib inline import matplotlib.pyplot as plt import psycopg2 import sys sys.path.append('../../src/') from utils.database import dbutils conn = dbutils.connect() cursor = conn.cursor() df = pd.read_sql(''' select cust_id, (cust_id - lag(cust_id) over ())=0 as same_cust, date_, extract(days from date_ - lag(date_) over ()) - 1 as datediff, calls, calls_in_florence_city as calls_in_fl_city, calls_near_airport from optourism.foreigners_timeseries_daily; ''', con=conn) # df = pd.read_sql(''' # select cust_id, # (cust_id - lag(cust_id) over ())=0 as same_cust, # date_, # extract(days from date_ - lag(date_) over ()) - 1 as datediff, # calls, # in_florence as calls_in_fl_prov, # in_florence_comune as calls_in_fl_city, # in_florence>0 as now_in_fl_prov, # (case when (cust_id - lag(cust_id) over ())=0 then (lag(in_florence) over ())>0 else Null end) as was_in_fl_prov, # in_florence_comune>0 as now_in_fl_city, # (case when (cust_id - lag(cust_id) over ())=0 then (lag(in_florence_comune) over ())>0 else Null end) as was_in_fl_city # from optourism.foreigners_timeseries_daily; # ''', con=conn) dfi = pd.read_sql(''' select cust_id, (cust_id - lag(cust_id) over ())=0 as same_cust, date_, extract(days from date_ - lag(date_) over ()) - 1 as datediff, calls, calls_in_florence_city as calls_in_fl_city, calls_near_airport from optourism.italians_timeseries_daily; ''', con=conn) df.head(10) # Check # df.iloc[0,] # How to locate an individual element df.iloc[0,1] = False # Replace the 'None' with 'False' df.head(20) # Check df.loc[df['same_cust']==False,'datediff'] = None # Comes out as NaN df.head(10) dfi.loc[dfi['same_cust']==False,'datediff'] = None # Comes out as NaN df['calls_out_fl_city'] = df['calls'] - df['calls_in_fl_city'] df['in_fl_city'] = df['calls_in_fl_city']>0 df['out_fl_city'] = df['calls_out_fl_city']>0 df['was_in_fl_city'] = df['in_fl_city'].shift(1) df['was_out_fl_city'] = df['out_fl_city'].shift(1) df['willbe_in_fl_city'] = df['in_fl_city'].shift(-1) df['willbe_out_fl_city'] = df['out_fl_city'].shift(-1) df.loc[df['same_cust']==False,'was_in_fl_city'] = None df.loc[df['same_cust']==False,'was_out_fl_city'] = None df['trip'] = '' df.head() # df['same_cust'][0] df.loc[(df['same_cust']==True)&(df['datediff']<3)&(df['was_in_fl_city']==False)&(df['in_fl_city']==True),'trip'] = 'start' df.loc[(df['same_cust']==True)&(df['datediff']<3)&(df['was_in_fl_city']==True)&(df['in_fl_city']==True),'trip'] = 'continue' df.loc[(df['same_cust']==True)&(df['datediff']<3)&(df['was_in_fl_city']==True)&(df['in_fl_city']==True)&(df['willbe_in_fl_city']==False),'trip'] = 'end' df.loc[(df['same_cust']==False)&((df['in_fl_city']==True)|(df['calls_near_airport']>0)),'trip'] = 'first' df.loc[(df['same_cust']==True)&(df['same_cust'].shift(-1)==False)&((df['in_fl_city']==True)|(df['calls_near_airport']>0)),'trip'] = 'last' df['on_trip'] = df['trip']!='' df2 = df[['cust_id','same_cust','date_','datediff','calls_in_fl_city','calls_out_fl_city','trip','on_trip']] df2.head() df2['trip_id'] = (((df2['on_trip'].shift(1) != df2['on_trip']).astype(int).cumsum())*(df2['on_trip']).astype(int)) df2.loc[0:50] dfg = df2[df2['trip_id']!=0][['cust_id','trip_id']].groupby(['cust_id','trip_id']).size().to_frame() dfg.head(20) dfg.groupby('cust_id').std().head() def frequency(dataframe,columnname): out = dataframe[columnname].value_counts().to_frame() out.columns = ['frequency'] out.index.name = columnname out.reset_index(inplace=True) out = out.sort_values(columnname) out['cumulative'] = out['frequency'].cumsum()/out['frequency'].sum() out['ccdf'] = 1 - out['cumulative'] return out fr_trips = frequency(dfg.groupby('cust_id').count(),0) # Distribution of lengths of gaps between trips fr_trlen = frequency(dfg,0) # Distribution of lengths of gaps between trips fr_dtdff = frequency(df[df['datediff']>0],'datediff') # Distribution of lengths of gaps between trips fr_dtdff.head() f, ax = plt.subplots(figsize=(6,5), dpi=300) ax.stem(fr_dtdff['datediff'],fr_dtdff['frequency'], linestyle='steps--') plt.yscale('log') plt.xscale('log') ax.set_title('Length of gaps between trups to Florence by foreign customers') ax.set_ylabel('Number of trips with gap of length x') ax.set_xlabel('Gaps of trip') plt.show() f, ax = plt.subplots(figsize=(6,5), dpi=300) ax.stem(fr_trips[0],fr_trips['frequency'], linestyle='steps--') plt.yscale('log') plt.xscale('log') ax.set_title('Trips to Florence by foreign customers') ax.set_ylabel('Number of customers taking x trips') ax.set_xlabel('Number of trips to Florence') plt.show() f, ax = plt.subplots(figsize=(6,5), dpi=300) ax.stem(fr_trlen[0],fr_trlen['frequency'], linestyle='steps--') plt.yscale('log') plt.xscale('log') ax.set_title('Length of trips to Florence across foreign customers') ax.set_ylabel('Number of trips of length x across all foreign customers') ax.set_xlabel('Length of trip to Florence') plt.show() dfg[dfg.groupby('cust_id').count()==1].head(20) fr_len1trip = frequency(dfg[dfg.groupby('cust_id').count()==1],0) # Distribution of lengths of gaps between trips fr_len1trip.head(20) f, ax = plt.subplots(figsize=(6,5), dpi=300) ax.stem(fr_len1trip[0],fr_len1trip['frequency'], linestyle='steps--') plt.yscale('log') plt.xscale('log') ax.set_title('Length of trip to Florence for foreign customers with 1 trip') ax.set_ylabel('Number of trips of length x') ax.set_xlabel('Length of trip to Florence') plt.show() f, ax = plt.subplots(figsize=(6,5), dpi=300) ax.scatter(x=dfg.groupby('cust_id').count(),y=dfg.groupby('cust_id').mean(),s=.1) plt.yscale('log') plt.xscale('log') ax.set_title('Foreigner trip length vs number of trips') ax.set_xlabel('Number of trips') ax.set_ylabel('Mean trip length in days') plt.show() f, ax = plt.subplots(figsize=(6,5), dpi=300) ax.scatter(x=dfg.groupby('cust_id').count(),y=dfg.groupby('cust_id').sum(),s=.1) plt.yscale('log') plt.xscale('log') ax.set_title('Foreigner trip length vs number of trips') ax.set_xlabel('Number of trips') ax.set_ylabel('Total days in Florence') plt.show() x = dfg.groupby('cust_id').count() + np.random.normal(loc=0,scale=25) y = dfg.groupby('cust_id').sum() + np.random.normal(loc=0,scale=25) f, ax = plt.subplots(figsize=(6,5), dpi=300) ax.scatter(x=x,y=y,s=.1) plt.yscale('log') plt.xscale('log') plt.xlim([.9,40]) plt.ylim([.9,150]) ax.set_title('Foreigner trip length vs number of trips') ax.set_xlabel('Number of trips') ax.set_ylabel('Total days in Florence') plt.show() dfi['calls_out_fl_city'] = dfi['calls'] - dfi['calls_in_fl_city'] dfi['in_fl_city'] = dfi['calls_in_fl_city']>0 dfi['out_fl_city'] = dfi['calls_out_fl_city']>0 dfi['was_in_fl_city'] = dfi['in_fl_city'].shift(1) dfi['was_out_fl_city'] = dfi['out_fl_city'].shift(1) dfi['willbe_in_fl_city'] = dfi['in_fl_city'].shift(-1) dfi['willbe_out_fl_city'] = dfi['out_fl_city'].shift(-1) dfi.loc[dfi['same_cust']==False,'was_in_fl_city'] = None dfi.loc[dfi['same_cust']==False,'was_out_fl_city'] = None dfi['trip'] = '' dfi.loc[(dfi['same_cust']==True)&(dfi['datediff']<3)&(dfi['was_in_fl_city']==False)&(dfi['in_fl_city']==True),'trip'] = 'start' dfi.loc[(dfi['same_cust']==True)&(dfi['datediff']<3)&(dfi['was_in_fl_city']==True)&(dfi['in_fl_city']==True),'trip'] = 'continue' dfi.loc[(dfi['same_cust']==True)&(dfi['datediff']<3)&(dfi['was_in_fl_city']==True)&(dfi['in_fl_city']==True)&(dfi['willbe_in_fl_city']==False),'trip'] = 'end' dfi.loc[(dfi['same_cust']==False)&((dfi['in_fl_city']==True)|(dfi['calls_near_airport']>0)),'trip'] = 'first' dfi2 = dfi[['cust_id','same_cust','date_','datediff','calls_in_fl_city','calls_out_fl_city','trip','on_trip']] dfi2['trip_id'] = (((dfi2['on_trip'].shift(1) != dfi2['on_trip']).astype(int).cumsum())*(dfi2['on_trip']).astype(int)) dfig = df2[df2['trip_id']!=0][['cust_id','trip_id']].groupby(['cust_id','trip_id']).size().to_frame() fri_trips = frequency(dfig.groupby('cust_id').count(),0) # Distribution of lengths of gaps between trips fri_trlen = frequency(dfig,0) # Distribution of lengths of gaps between trips fri_dtdff = frequency(dfi[dfi['datediff']>0],'datediff') # Distribution of lengths of gaps between trips f, ax = plt.subplots(figsize=(6,5), dpi=300) ax.stem(fri_dtdff['datediff'],fri_dtdff['frequency'], linestyle='steps--') plt.yscale('log') plt.xscale('log') ax.set_title('Length of gaps between trups to Florence by Italian customers') ax.set_ylabel('Number of trips with gap of length x') ax.set_xlabel('Gaps of trip') plt.show()Calculate Zoltar Truth DataWith JHU Truth.import pandas as pd import pymmwr as pm import datetime import warnings warnings.simplefilter(action='ignore') from zoltpy import util import json def get_epi_data(date): format_str = '%m/%d/%y' # The format dt = datetime.datetime.strptime(date, format_str).date() epi = pm.date_to_epiweek(dt) return epi.year, epi.week, epi.day def get_epi_data_TZ(date): format_str = '%Y-%m-%d' # The format dt = datetime.datetime.strptime(date, format_str).date() epi = pm.date_to_epiweek(dt) epi_week = epi.week epi_day = epi.day if epi_day >=3: # cut off is Tuesday epi_week = epi_week + 1 return epi.year, epi_week, epi.day def get_available_timezeros(project_name): conn = util.authenticate() project = [project for project in conn.projects if project.name == project_name][0] project_timezeros = project.timezeros timezero = [] for timezero_array in project_timezeros: timezero += [timezero_array.timezero_date] return timezero def configure_JHU_data(df, target): # convert matrix to repeating row format df_truth = df.unstack() df_truth = df_truth.reset_index() # get epi data from date df_truth['year'], df_truth['week'], df_truth['day'] = \ zip(*df_truth['level_0'].map(get_epi_data)) # rename columns df_truth = df_truth.rename(columns={0: "value", "level_1": "location_long"}) # Get state IDs df_truth = df_truth.merge(fips_codes, left_on='location_long', right_on='state_name', how='left') df_truth.loc[df_truth["location_long"] == "US", "state_code"] = "US" df_truth["state_code"].replace({"US": 1000}, inplace=True) # so that can be converted to int # convert FIPS code to int df_truth = df_truth.dropna(subset=['state_code']) df_truth["state_code"] = df_truth["state_code"].astype(int) # add leading zeros to state code df_truth['state_code'] = df_truth['state_code'].apply(lambda x: '{0:0>2}'.format(x)) # convert 1000 back to US df_truth["state_code"].replace({"1000": "US"}, inplace=True) df_truth.loc[df_truth["location_long"] == "US", "state"] = "nat" # Observed data on the seventh day # or group by week for incident deaths if target == 'Incident Deaths': df_vis = df_truth.groupby(['week', 'location_long'], as_index=False).agg({'level_0': 'last', 'value': 'sum', 'year': 'last', 'day': 'last', 'state_code': 'last', 'state': 'last', 'state_name': 'last' }) else: df_vis = df_truth df_vis['week'] = df_vis['week'] + 1 # shift epiweek on axis # add leading zeros to epi week df_vis['week'] = df_vis['week'].apply(lambda x: '{0:0>2}'.format(x)) # define epiweek df_vis['epiweek'] = df_vis['year'].astype(str) + df_vis['week'] # only output "location", "epiweek", "value" df_vis = df_vis.rename(columns={"state": "location"}) # rename location df_truth_long = df_vis.rename(columns={"week": "epiweek", "state_code": "unit", "level_0": "date"}) # get timezero df_truth_long['date'] = pd.to_datetime(df_truth_long['date']) # initialize df_targets df_targets = pd.DataFrame(columns=list(df_truth_long.columns).append('target')) # use Saturday truth values df_truth_values = df_truth_long[df_truth_long['day'] == 7] # find week-ahead targets for i in range(4): weeks_ahead = i + 1 days_back = 5 + ((weeks_ahead - 1) * 7) # timezero is on Mondays df_calc = df_truth_values # initialize df # find timezero and target df_calc['timezero'] = df_calc['date'] - datetime.timedelta(days=days_back) if target == "Cumulative Deaths": df_calc['target'] = "%i wk ahead cum death" % weeks_ahead else: df_calc['target'] = "%i wk ahead inc death" % weeks_ahead # concatenate truth df_targets = pd.concat([df_targets, df_calc]) # get epi data from Timezero df_targets['timezero'] = df_targets['timezero'].astype(str) df_targets['tz_year'], df_targets['tz_week'], df_targets['tz_day'] = \ zip(*df_targets['timezero'].map(get_epi_data_TZ)) # truth targets by timezero week df_targets = df_targets[["tz_week", "unit", "target", "value"]] # Map all timezeros in Zoltar to Corresponding weeks df_map_wk_to_tz = pd.DataFrame(columns=['timezero']) df_map_wk_to_tz['timezero'] = get_available_timezeros("COVID-19 Forecasts") df_map_wk_to_tz['tz_year'], df_map_wk_to_tz['tz_week'], df_map_wk_to_tz['tz_day'] = \ zip(*df_map_wk_to_tz['timezero'].map(get_epi_data_TZ)) # Merge timezeros with truth values and targets df_final = pd.merge(df_targets, df_map_wk_to_tz, how='right', on=['tz_week']) # select columns df_final = df_final[["timezero", "unit", "target", "value"]] # drop empty rows nan_value = float("NaN") df_final.replace("", nan_value, inplace=True) df_final.dropna(inplace=True) return df_final df = pd.read_csv( "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_US.csv") fips_codes = pd.read_csv('../../template/state_fips_codes.csv') # aggregate by state and nationally state_agg = df.groupby(['Province_State']).sum() us_nat = df.groupby(['Country_Region']).sum() df_state_nat = state_agg.append(us_nat) # drop unnecessary columns cols = list(range(0, 6)) df_truth = df_state_nat.drop(df_state_nat.columns[cols], axis=1) # calculate incidents from cumulative df_truth_cumulative = df_truth df_truth_incident = df_truth - df_truth.shift(periods=1, axis='columns') # re-format files df_cum_death = configure_JHU_data(df_truth_cumulative, "Cumulative Deaths") df_inc_death = configure_JHU_data(df_truth_incident, "Incident Deaths") # concatenate targers zoltar_truth = pd.concat([df_cum_death,df_inc_death]) # write truth to csv file_path = '../../data-truth/zoltar-truth.csv' zoltar_truth.to_csv(file_path, index=False)Functions > A **function** is a named sequence of statements that performs a computation.> When defined, you specify the **name** and the sequence of statements.> Later, you **call the function** by name >> 1. **A Python function that takes a list and returns a new list with unique elements of the first list.**def unique_list(l): x = [] for a in l: if a not in x: x.append(a) return x print(unique_list(input('Enter a list: ')))Enter a list Enter a list ['E', 'n', 't', 'e', 'r', ' ', 'a', 'l', 'i', 's']>> 2. **A Python function that reverses a string.**def string_reverse(str1): rstr1 = '' index = len(str1) while index > 0: rstr1 += str1[ index - 1 ] index = index - 1 return rstr1 print(string_reverse(input('Enter a string: ')))Enter a string eedd ddee>> 3. **A Python function that checks whether a passed string is palindrome or not.**def isPalindrome(string): left_pos = 0 right_pos = len(string) - 1 while right_pos >= left_pos: if not string[left_pos] == string[right_pos]: return False left_pos += 1 right_pos -= 1 return True print(isPalindrome(input('Enter a word: ')))Enter a word delete FalseClass Exercises > 1. **Write a shutting down program:**> a. First, **def** a function, **shut_down**, that takes one argument from the keyboard. > b. Then, if the **shut_down** function receives an argument equal to **"yes"**, it should return "Shutting down".> c. Alternatively, elif argument is equal to **"no"**, then the function should return "Shutdown aborted". > d. Finally, if shut_down gets anything other than those inputs, the function should return "Sorry, such argument not welcome here ". > 2. Create a function **showEmployee()** in such a way that it should accept a given number of employee name and salary as input, stores the data in a dictionary, and then save it into a csv file, if any of the following condition is met. > a. If the salary is greater than **N1,000,000** display employ name and salary in **green color**. > b. If the salary is greater than **N500,000** and less than N1,000,000 dispaly both employee name and salary in **blue color**. > c. If the salary is less than **N500,000** dispaly both employee name and salary in red color.If the salary is missing in the function call display the message **"Warning: No salary structure for employee"** in **red color**. > 3. **Follow the stpes:** > a. First, def a function called **cube** that takes an argument called **number**.> b. Make that function return the cube of that number (i.e. that number multiplied by itself and multiplied by itself once again).> c. Define a second function called **by_three** that takes an argument called number. if that number is divisible by 3, **by_three** should call **cube(number)** and return its result. Otherwise, **by_three should** return False. -Check if it works. > 4. Write a Python function that accepts a string and calculate the number of upper case letters and lower case letters. Number 1 Answerdef shutdown(a): if a=="Yes": print("Shutting down") elif a=="No": print("Shutdown aborted") else: print("Sorry, such argument not welcome here ") k = input("Are you going to the office:") shutdown(k)Are you going to the office:Maybe Sorry, such argument not welcome hereNumber 3 Answerdef cube(number): print(number**3) number = int(input("Please enter the value for w ")) cube(number) def by_three(number): if number%3==0: return(number**3) else: return("False") #f = int(input("Please enter the value for f ")) by_three(number)Please enter the value for w 5 125Number 4 Answerdef isupper(str1): w = int(input("Please enter your word ")) print(w) w = int(input("Please enter your word ")) name=str(input("enter the string")) count=0 for i in name: if i.isupper(): count=count+1 print("The number of capital letters found in the string is:-",count)enter the stringLEttER The number of capital letters found in the string is:- 4Number 2 Answerimport colorama from colorama import Fore, Style #import pandas import pandas as pd import csv def showEmployee(c): m=0 while m < c: n=input("Enter name: ") s=int(input("Enter salary:")) x={} if (s>1000000): print(Fore.GREEN + "Name:", n) print("Salary ",s) print(Style.RESET_ALL) elif (s>500000)&(s<1000000): print(Fore.BLUE + "Name:", n) print("Salary ",s) print(Style.RESET_ALL) else: print(Fore.RED + "Warning: No salary structure for employee") print(Style.RESET_ALL) field_names = ['No', 'Name', 'Salary'] x=[{'No':m, 'Name':n, 'Salary':s}] with open('Name.csv', 'w') as csvfile: writer = csv.DictWriter(csvfile, fieldnames = field_names) writer.writeheader() writer.writerows(x) m+=1 c=int(input("Enter number of Employees:")) showEmployee(c)Anomaly Detection Model - FIRST Modellbeschreibung:- Training auf den Jahren 2020 und 2021- Adam als Opzimizer- Komplexerer AE- Mehr als 6 Epochen- Normalisierung der Daten- Modell wie aus der Vorstudie Imports%run -i ./scripts/PythonImports.pySet Configs and load data%run -i ./scripts/TrainPreperations.pyShape of normal data: (105216, 17) Shape of anormal data: (35040, 18)Skalieren der Daten & "Column name zu Zahl"-Mapping speichern & Daten für PyTorch vorbereiten%run -i ./scripts/ScaleAndPrepare.pyTrainingfrom models.SimpleAutoEncoder import SimpleAutoEncoder torch.manual_seed(42) num_inputs = len(df_data.columns) print('Num Inputs: {}'.format(num_inputs)) val_lambda = 0.5 model = SimpleAutoEncoder(num_inputs=num_inputs, val_lambda=val_lambda) critereon = mse_loss optimizer = Adam(model.parameters()) print(model) losses = [] EPOCHS = 32 for epoch in range(EPOCHS): model.train() for batch_idx, (data,target) in enumerate(trn_dataloader): data = torch.autograd.Variable(data) optimizer.zero_grad() pred = model(data) loss = critereon(pred, data) losses.append(loss.cpu().data.item()) # Backpropagation loss.backward() optimizer.step() # Display if batch_idx % 50 == 1: print('\n Train Epoch: {}/{} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch+1, EPOCHS, batch_idx * len(data), len(trn_dataloader.dataset), 100. * batch_idx / len(trn_dataloader), loss.cpu().data.item()), end='') fig = plt.figure(figsize=(16,8)) plt.plot(np.arange(len(losses)),losses) plt.title('Loss (Training) Einfacher AE',fontsize=20) loss_fn = '{}_AE_model_loss.pdf'.format(arrow.now().format('YYYYMMDD')) fn = os.path.join(fig_path, loss_fn) fig.savefig(fn, bbox_inches = 'tight', pad_inches = 0)Save Modellmodel_fn = '{}_firstAE_model.pt'.format(arrow.now().format('YYYYMMDD')) print('Model Name: {}'.format(model_fn)) torch.save(model.state_dict(), os.path.join(model_bib_path, model_fn))Model Name: 20200303_firstAE_model.ptLogistic Regression zum Lernen des Thresholdsfrom sklearn.linear_model import LogisticRegression from utils.evalUtils import calc_cm_metrics from models.SimpleAutoEncoder import SimpleAutoEncoder torch.manual_seed(42) s_labels = df_data_anormal['label'] df_data_anormal.drop('label', axis=1, inplace=True) num_inpus = len(df_data_anormal.columns) val_lambda = 42 * 0.01 fn_now = os.path.join(model_path, 'model_bib' ,'20200303_firstAE_model.pt') model = SimpleAutoEncoder(num_inputs=num_inpus, val_lambda=val_lambda) model.load_state_dict(torch.load(fn_now)) model.eval() data_fn_anormal = os.path.join(data_path, 'anomalous_data_y_2022_reduced.h5') df_data_anormal = pd.read_hdf(data_fn_anormal, key='df') print('Shape of anormal data: {}'.format(df_data_anormal.shape)) scaler_train = MinMaxScaler((-1,1)) scaler_train = scaler_train.fit(df_data) df_data_anormal.shape scaled_anormal = scaler_train.transform(df_data_anormal.to_numpy()) # build tensor from numpy anormal_torch_tensor = torch.from_numpy(scaled_anormal).type(torch.FloatTensor) # build TensorDataset from Tensor anormal_dataset = TensorDataset(anormal_torch_tensor, anormal_torch_tensor) # build DataLoader from TensorDataset anormal_dataloader = torch.utils.data.DataLoader(anormal_dataset,batch_size=128,shuffle=False, num_workers=0) losses_anormal = [] for val in anormal_torch_tensor: loss = model.calc_reconstruction_error(val) losses_anormal.append(loss.item()) s_losses_anormal = pd.Series(losses_anormal) X = s_losses_anormal.to_numpy() X = X.reshape(-1, 1) y = [1 if x > 0 else 0 for x in s_labels] clf = LogisticRegression(random_state=42, fit_intercept=True, solver='liblinear', class_weight={1:2.0}) clf.fit(X, y) predictions = [] for val in X: val = val.reshape(1,-1) pred = clf.predict(val) predictions.append(pred[0]) from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from mlxtend.plotting import plot_confusion_matrix import joblib cm = confusion_matrix(y, predictions) tn, fp, fn, tp = confusion_matrix(y, predictions).ravel() accuracy, precision, specifity, sensitivity, f1_score = calc_cm_metrics(tp, tn, fp, fn) print('Accuracy: {}'.format(accuracy)) print('Precision: {}'.format(precision)) print('Specifity: {}'.format(specifity)) print('Sensitivity: {}'.format(sensitivity)) print('F1-Score: {}'.format(f1_score))Accuracy: 99.70034246572497 Precision: 99.99999999958195 Specifity: 93.15299842562096 Sensitivity: 95.79495394435004 F1-Score: 97.85232153794672Save Modelmodel_fn = '{}_LogRegModell.save'.format(arrow.now().format('YYYYMMDD')) filename = os.path.join(model_bib_path, model_fn) joblib.dump(clf, filename)Lab 2: Text Classification=============In this problem set, you will build a system for automatically classifying song lyrics comments by era. You will:- Do some basic text processing, tokenizing your input and converting it into a bag-of-words representation- Build a machine learning classifier based on the generative model, using Naive Bayes- Evaluate your classifiers and examine what they have learned- Build a logistic regression classifier (discriminative model) using scikit-learnTotal Points: 120 points 0. SetupIn order to develop this assignment, you will need [python 3.6](https://www.python.org/downloads/) and the following libraries. Most if not all of these are part of [anaconda](https://www.continuum.io/downloads), so a good starting point would be to install that.- [jupyter](http://jupyter.readthedocs.org/en/latest/install.html)- numpy (This will come if you install scipy like above, but if not install separately)- [matplotlib](http://matplotlib.org/users/installing.html)- [nosetests](https://nose.readthedocs.org/en/latest/)- [pandas](http://pandas.pydata.org/) DataframesHere is some help on installing packages in python: https://packaging.python.org/installing/. You can use ```pip --user``` to install locally without sudo. About this assignment- This is a Jupyter notebook. You can execute cell blocks by pressing control-enter.- All of your coding will be in the python file ```lab2.py```. - The file ```tests/tests_visible.py``` contains the Gradescope autograder unit tests that will be available for you to run locally. You should run them as you work on the assignment to see that you're on the right track. You are free to look at their source code, if that helps. You can run the tests by running ```python run_tests.py``` or ```python run_tests.py -j``` for more description. - You may want to add more tests, but that is completely optional. - **To submit this assignment, submit ```lab2.py``` on Gradescope.** Important Instructions for this assignment- Since each test case takes about 1 minute to run individually and the collective test suite takes about 20-30 minutes to run in its entirety, we recommend that when you implement an individual function you can comment out the remaining test case functions in tests/test_visible.py and only keep the corresponding test case and the def Setup(self) (i.e first function) in an uncommented state.- We estimate that your completed code should be able to complete running on all the test cases in about 20-30 minuetes. However, if your code takes longer to run, follow the next bullet point.- The gradescope autograder has a runtime limit of 40 minutes, so if your code times out with the autograder unable to run on all the test cases, then we have a solution for you. The ECE 365 Gradescope page has two assignments: **NLP Lab 2 Code** and **NLP Lab 2 Screenshot**. You will submit your code to **NLP Lab 2 Code**, which will run the autograder. If your code is unable to finish running on all the test cases before timeout, then you would need to submit a screenshot of the local test case output on the **NLP Lab 2 Screenshot** assignment. First run ```python run_tests.py -j``` in the assignment directory and then take a screenshot of the prompt which shows your final score. An example screenshot is shown below.- You only need to submit the screenshot if the gradescope autograder is unable to run your code on all the test cases. Submitting your code in **NLP Lab 2 Code** is a requirement and you will not recieve any credit from your screenshot submission if you have not submitted your code. ![title](screenshot.png)import sys from importlib import reload import lab2 print('My Python version') print('python: {}'.format(sys.version)) import nose import pandas as pd import numpy as np import scipy as sp import matplotlib import matplotlib.pyplot as plt import torch from torch.autograd import Variable from torch import optim %matplotlib inline print('My library versions') print('pandas: {}'.format(pd.__version__)) print('numpy: {}'.format(np.__version__)) print('scipy: {}'.format(sp.__version__)) print('matplotlib: {}'.format(matplotlib.__version__)) print('nose: {}'.format(nose.__version__)) print('torch: {}'.format(torch.__version__))My library versions pandas: 1.2.1 numpy: 1.19.4 scipy: 1.6.0 matplotlib: 3.3.3 nose: 1.3.7 torch: 1.7.1To test whether your libraries are the right version, run:`nosetests tests/test_environment.py`# use ! to run shell commands in notebook ! nosetests tests/test_environment.py. ---------------------------------------------------------------------- Ran 1 test in 0.000s OK1. Preprocessing**Total: 20 points** Read the data into a dataframedf_train = pd.read_csv('lyrics-train.csv')A dataframe is a structured representation of your data. You can preview a dataframe using `head()`df_train.head()Bags of wordsYour first task is to convert the text to a bag-of-words representation. For this data, a lot of the preprocessing is already done: the text is lower-cased, and punctuation is removed. You need only create a `counter` for each instance.- **Deliverable 1.1**: Complete the function `lab2.bag_of_words`. (5 points)- **Test**: `tests/test_visible.py:test_d1_1_bow`# run this block to update the notebook as you change the preproc library reload(lab2); y_tr,x_tr = lab2.read_data('lyrics-train.csv',preprocessor=lab2.bag_of_words) y_dv,x_dv = lab2.read_data('lyrics-dev.csv',preprocessor=lab2.bag_of_words) y_te,x_te = lab2.read_data('lyrics-test-hidden.csv',preprocessor=lab2.bag_of_words)Unseen wordsOne challenge for classification is that words will appear in the test data that do not appear in the training data. Compute the number of words that appear in `lyrics-dev.csv`, but not in `lyrics-train.csv`. To do this, implement the following deliverables:- **Deliverable 1.2**: implement `lab2.compute_oov`, returning a list of words that appear in one list of bags-of-words, but not another. Also implement `lab2.aggregate_counts` (10 points)- **Tests**: `tests/test_visible.py:test_d1_3a_oov` and `tests/test_visible.py:test_d1_2agg`from collections import Counter reload(lab2);To write fast code, you can find bottlenecks using the %%timeit cell magic. (The following line will run for about 5 mins.)%%timeit lab2.aggregate_counts(x_tr) counts_dv = lab2.aggregate_counts(x_dv)You can see the most common items in a counter by calling `counts.most_common()`:counts_dv.most_common(5) counts_tr = lab2.aggregate_counts(x_tr) reload(lab2); len(lab2.compute_oov(counts_dv,counts_tr)) len(lab2.compute_oov(counts_tr,counts_dv)) lab2.oov_rate(counts_dv,counts_tr)30% of the words in the dev set do not appear in the training set. Pruning the vocabularyLet's prune the vocabulary to include only words that appear at least ten times in the training data.- **Deliverable 1.3:** Implement `lab2.prune_vocabulary` (5 points)- **Test**: `tests/test_visible.py:test_d1_4_prune`reload(lab2); x_tr_pruned, vocab = lab2.prune_vocabulary(counts_tr,x_tr,10) x_dv_pruned, _ = lab2.prune_vocabulary(counts_tr,x_dv,10) x_te_pruned, _ = lab2.prune_vocabulary(counts_tr,x_te,10) len(vocab) i = 94 print(len(x_dv[i]),len(x_dv_pruned[i])) print(sum(x_dv[i].values()),sum(x_dv_pruned[i].values()))88 79 187 1762. Linear classificationNow we'll show you how to implement the linear classification rule, $\hat{y} = \text{argmax}_y \theta^{\top} f(x,y)$.You will use these functions in all classifiers in this assignment.**Total: 10 points**reload(lab2);The feature function vector $f(x,y)$ can be viewed as a dict, in which the values are counts, and the keys are tuples $(y,x_j)$, where $y$ is a label and $x_j$ is a base feature. Note that we must also include the offset feature, ```lab2.OFFSET```. Desired output is shown below:fv = lab2.make_feature_vector({'test':1,'case':2},'1980s') print(fv)defaultdict(None, {('1980s', '**OFFSET**'): 1, ('1980s', 'test'): 1, ('1980s', 'case'): 2})Let's compute the entire set of labels.labels = set(y_tr) #figure out all possible labels print(labels){'1990s', 'pre-1980', '2000s', '1980s'}Now we implement the prediction rule, $\hat{y} = \text{argmax}_y \theta^{\top} f(x,y)$.The output should be:- A predicted label- The scores of all labelsYou can test this function using these simple hand-crafted weights.from collections import defaultdict reload(lab2) # weight vectors must be defaultdicts theta_hand = defaultdict(float, {('2000s','money'):0.1, ('2000s','name'):0.2, ('1980s','tonight'):0.1, ('2000s','man'):0.1, ('1990s','fly'):0.1, ('pre-1980',lab2.OFFSET):0.1 }) lab2.predict(x_tr_pruned[0],theta_hand,labels)Now let's see how good these weights are, by evaluating on the dev set.reload(lab2); # this applies your predict function to all the instances in ```x_dv``` y_hat = lab2.predict_all(x_dv_pruned,theta_hand,labels) print(lab2.acc(y_hat,y_dv))0.34222222222222223. Naive BayesYou'll now implement a Naive Bayes classifier in this section.**Total: 45 points**reload(lab2);- **Deliverable 3.1**: (warmup) implement ```get_corpus_counts``` in ```lab2.py```. (5 points)- **Test**: `tests/test_visible.py:test_d3_1_corpus_counts`This function should compute the word counts for a given label.eighties_counts = lab2.get_corpus_counts(x_tr_pruned,y_tr,"1980s"); print(eighties_counts['today']) print(eighties_counts['yesterday'])50 14- **Deliverable 3.2**: Implement ```estimate_pxy``` in ```lab2.py```. (15 points)- **Test**: `tests/test_visible.py:test_d3_2_pxy`This function should compute the *smoothed* multinomial distribution $\log P(x \mid y)$ for a given label $y$.Note that this function takes the vocabulary as an argument. You have to assign a probability even for words that do not appear in documents with label $y$, if they are in the vocabulary.Hint: You can use ```get_corpus_counts``` in this function if you want to, but you don't have to.reload(lab2); log_pxy = lab2.estimate_pxy(x_tr_pruned,y_tr,"1980s",0.1,vocab)Probabilities must sum to one! (or very close)sum(np.exp(list(log_pxy.values())))Let's look at the log-probabilities of the words from the hand-tuned weightsprint({word:log_pxy[word] for (_,word),weight in theta_hand.items() if weight>0}) log_pxy_more_smooth = lab2.estimate_pxy(x_tr_pruned,y_tr,"1980s",1000,vocab) print({word:log_pxy_more_smooth[word] for (_,word),weight in theta_hand.items() if weight>0}){'money': -8.443741629859995, 'name': -8.43282250408468, 'tonight': -8.191919045665923, 'man': -8.295226983039361, 'fly': -8.497300695217104, '**OFFSET**': 0}- **Deliverable 3.3**: Now you are ready to implement ```estimate_nb``` in ```lab2.py```. (15 points)- **Test**: `tests/test_visible.py:test_d3_3a_nb`- The goal is that the score given by ```lab2.predict``` is equal to the joint probability $P(x,y)$, as described in the notes. Therefore, make sure your return output can be feed into ```lab2.predict```. - Don't forget the offset feature, whose weights should be set to the prior $\log P(y)$.- The log-probabilities for the offset feature should not be smoothed.- You can call the functions you have defined above, but you don't have to.reload(lab2); theta_nb = lab2.estimate_nb(x_tr_pruned,y_tr,0.1)Let's predict for a single instance.lab2.predict(x_tr_pruned[155],theta_nb,labels) aaa = lab2.predict(x_tr_pruned[155],theta_nb,labels) print(aaa) aaa = lab2.predict(x_tr_pruned[55],theta_nb,labels) print(aaa)('2000s', defaultdict(None, {'1990s': -2125.1966084804503, 'pre-1980': -2136.834842396802, '2000s': -2099.2474010561396, '1980s': -2153.019927798136})) ('1980s', defaultdict(None, {'1990s': -1851.2801316678658, 'pre-1980': -1798.5641514619065, '2000s': -1840.50646909292, '1980s': -1735.3527509275218}))Let's predict for all instances of the development set.y_hat = lab2.predict_all(x_dv_pruned,theta_nb,labels) print(lab2.acc(y_hat,y_dv)) # this block shows how we write and read predictions for evaluation lab2.write_predictions(y_hat,'nb-dev.preds') y_hat_dv = lab2.read_predictions('nb-dev.preds') lab2.acc(y_hat_dv,y_dv) # execute this block to write predictions for the test set y_hat = lab2.predict_all(x_te_pruned,theta_nb,labels) lab2.write_predictions(y_hat,'nb-test.preds')- **Deliverable 3.4**: Write a function in ```lab2.py``` called ```find_best_smoother```, which finds the smoothing value that gives best performance on the dev data. (5 points)- **Test**: `tests/test_visible.py:test_d3_4a_nb_best`Your function should be trying at least the following values in `vals` below.Then, using this smoothing value, run your Naive Bayes classifier on the test set, and output the results.vals = np.logspace(-3,2,11) print(vals) reload(lab2); best_smoother, scores = lab2.find_best_smoother(x_tr_pruned,y_tr,x_dv_pruned,y_dv,vals) plt.semilogx(list(scores.keys()),list(scores.values()),'o-'); plt.xlabel('smoothing') plt.ylabel('dev set accuracy');**Reflect:**- what might explain the dramatic drop in accuracy when the smoothing is increased from $10$ to $30$?- before you check, predict whether the accuracy will continue to significantly drop if you further increase the smoothing to $10000$. **Your Answer Here**: Save the best parameters for later comparison.theta_nb = lab2.estimate_nb(x_tr_pruned,y_tr,best_smoother) y_hat = lab2.predict_all(x_te_pruned,theta_nb,labels) lab2.write_predictions(y_hat,'nb-best-test.preds')4. Logistic regressionYou will implement logistic regression in scikit-learn.**Total: 15 points** 4.1 Converting data to numpyNumpy is a package for numerical computing in python.You will need to convert your bag-of-words list of counters to a numpy array. - **Deliverable 4.1**: Implement `lab2.py:make_numpy()` (5 points)- **Test**: `tests/test_visible.py:test_d4_1_numpy`- **Hint**: one approach is to start with `numpy.zeros((height,width))`, and then fill in the cells by iterating through the bag-of-words listX = np.zeros((4,2)) print(X) X[1,1] = -1 X[2,0] = 1.5 print(X) reload(lab2); X_tr = lab2.make_numpy(x_tr_pruned,vocab) X_dv = lab2.make_numpy(x_dv_pruned,vocab) X_te = lab2.make_numpy(x_te_pruned,vocab) label_set = sorted(list(set(y_tr))) print(label_set) Y_tr = np.array([label_set.index(y_i) for y_i in y_tr]) Y_dv = np.array([label_set.index(y_i) for y_i in y_dv]) len(set(Y_tr))4.2 Building a logistic regression model Import the model you want to use and make an instance of the Model.from sklearn.linear_model import LogisticRegression scikit_log_reg = LogisticRegression()Logistic Regression Model training.logisticRegr=scikit_log_reg.fit(X_tr, Y_tr)/usr/local/lib/python3.9/site-packages/sklearn/linear_model/_logistic.py:763: ConvergenceWarning: lbfgs failed to converge (status=1): STOP: TOTAL NO. of ITERATIONS REACHED LIMIT. Increase the number of iterations (max_iter) or scale the data as shown in: https://scikit-learn.org/stable/modules/preprocessing.html Please also refer to the documentation for alternative solver options: https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression n_iter_i = _check_optimize_result(Get accuracy of training data and dev data. accuracy is defined as:(fraction of correct predictions): correct predictions / total number of data pointstrain_acc = logisticRegr.score(X_tr, Y_tr) dev_acc = logisticRegr.score(X_dv, Y_dv) print(train_acc) print(dev_acc)0.95375 0.48444444444444446**Deliverable 4.2**The noisy progress of the loss and dev set accuracy suggests that something is wrong with our training hyperparameters. Tune the ```LogisticRegression``` parameters until you can get to a dev set accuracy of at least 0.5. You may find a set of tunable parameters here: https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html Complete lab2.better_model function(10 points)reload(lab2); scikit_log_reg = lab2.better_model() logisticRegr=scikit_log_reg.fit(X_tr, Y_tr) train_acc = logisticRegr.score(X_tr, Y_tr) dev_acc = logisticRegr.score(X_dv, Y_dv) print(train_acc) print(dev_acc) ### BEGIN HIDDEN TESTS scikit_log_reg = lab2.better_model() logisticRegr=scikit_log_reg.fit(X_tr, Y_tr) dev_acc = logisticRegr.score(X_dv, Y_dv) assert dev_acc >= 0.50 ### END HIDDEN TESTS Y_hat_te = logisticRegr.predict(X_te) np.save('logreg-es-test.preds.npy', np.array(Y_hat_te))5. Feature analysis**Total: 20 points** 5.1 Top Features for Logistic Regression **Deliverable 5.1**: Implement ```get_top_features_LR``` to output the k most indicative features (**highest features weights**) and the k least indicative features (**lowest features weights**) for each label. (10 points)**Hint**: ```scikit_log_reg.coef_``` is the coefficient of the features. Let's load the vanilla LR model for comparison.scikit_log_reg = LogisticRegression() logisticRegr=scikit_log_reg.fit(X_tr, Y_tr) reload(lab2); print(lab2.get_top_features_LR(scikit_log_reg, vocab,label_set,'pre-1980',k=10)) print(lab2.get_top_features_LR(scikit_log_reg, vocab,label_set,'1980s',k=10)) print(lab2.get_top_features_LR(scikit_log_reg, vocab,label_set,'1990s',k=10)) print(lab2.get_top_features_LR(scikit_log_reg, vocab,label_set,'2000s',k=10)) ### BEGIN HIDDEN TESTS scikit_log_reg = LogisticRegression() logisticRegr=scikit_log_reg.fit(X_tr, Y_tr) assert set(lab2.get_top_features_LR(scikit_log_reg, vocab,label_set,'pre-1980',k=10)[0]) == set(['lord', 'boogie', 'very', 'feelin', 'darling', 'dancing', 'till', 'mornin', 'fool', 'percussion']) assert set(lab2.get_top_features_LR(scikit_log_reg, vocab,label_set,'pre-1980',k=10)[1]) == set(['step', 'under', 'meant', 'runaway', 'perfect', 'yo', 'open', 'front', 'body', 'hit']) assert set(lab2.get_top_features_LR(scikit_log_reg, vocab,label_set,'1980s',k=10)[0]) == set(['wall', 'america', 'standing', 'tumble', 'poison', 'shout', 'chance', 'heat', 'cut', 'took']) assert set(lab2.get_top_features_LR(scikit_log_reg, vocab,label_set,'1980s',k=10)[1]) == set(['floor', 'hes', 'god', 'percussion', 'thinkin', 'finally', 'window', 'mama', 'lord', 'sing']) assert set(lab2.get_top_features_LR(scikit_log_reg, vocab,label_set,'1990s',k=10)[0]) == set(['hit', 'yo', 'cuz', 'saw', 'dick', 'cradle', 'front', 'push', 'needed', 'rush']) assert set(lab2.get_top_features_LR(scikit_log_reg, vocab,label_set,'1990s',k=10)[1]) == set(['dancing', 'second', 'chance', 'born', 'use', 'those', 'pretty', 'meaning', 'today', 'other']) assert set(lab2.get_top_features_LR(scikit_log_reg, vocab,label_set,'2000s',k=10)[0]) == set(['wit', 'shut', 'shorty', 'club', 'three', 'jeans', 'side', 'ass', 'full', 'bitch']) assert set(lab2.get_top_features_LR(scikit_log_reg, vocab,label_set,'2000s',k=10)[1]) == set(['lovin', 'rhythm', 'hip', 'lover', 'must', 'honey', 'boogie', 'woman', 'youve', 'fool']) ### END HIDDEN TESTS/usr/local/lib/python3.9/site-packages/sklearn/linear_model/_logistic.py:763: ConvergenceWarning: lbfgs failed to converge (status=1): STOP: TOTAL NO. of ITERATIONS REACHED LIMIT. Increase the number of iterations (max_iter) or scale the data as shown in: https://scikit-learn.org/stable/modules/preprocessing.html Please also refer to the documentation for alternative solver options: https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression n_iter_i = _check_optimize_result(5.2 Top Features for Naive Bayes **Deliverable 5.2**: Implement ```get_top_features_NB``` to output the k most indicative features (**highest features weights**) and the k least indicative features (**lowest features weights**) for each label. (10 points)reload(lab2); print(lab2.get_top_features_NB(theta_nb, label_set,'pre-1980',k=10)) print(lab2.get_top_features_NB(theta_nb, label_set,'1980s',k=10)) print(lab2.get_top_features_NB(theta_nb, label_set,'1990s',k=10)) print(lab2.get_top_features_NB(theta_nb, label_set,'2000s',k=10)) ### BEGIN HIDDEN TESTS theta_nb = lab2.estimate_nb(x_tr_pruned,y_tr,best_smoother) assert set(lab2.get_top_features_NB(theta_nb, label_set,'pre-1980',k=10)[0]) == set(['you', 'the', 'i', 'to', 'and', 'a', 'me', 'my', 'it', 'love']) # assert set(get_top_features_NB(theta_nb, label_set,'pre-1980',k=10)[1]) == set(['master', 'wishful', 'killin', 'benefit', 'zono', 'muzik', 'mewhy', 'overall', 'animal', 'skeet']) assert set(lab2.get_top_features_NB(theta_nb, label_set,'1980s',k=10)[0]) == set(['you', 'the', 'i', 'to', 'me', 'a', 'and', 'it', 'my', 'love']) # assert set(get_top_features_NB(theta_nb, label_set,'1980s',k=10)[1]) == set(['lamborghini', 'yeahthe', 'wishful', 'benefit', 'babei', 'zono', 'overall', 'billion', 'fiend', 'skeet']) assert set(lab2.get_top_features_NB(theta_nb, label_set,'1990s',k=10)[0]) == set(['you', 'i', 'the', 'to', 'me', 'and', 'a', 'it', 'my', 'your']) # assert set(get_top_features_NB(theta_nb, label_set,'1990s',k=10)[1]) == set(['ladada', 'toot', 'spotlights', 'reverse', 'zono', 'muzik', 'overall', 'tho', 'billion', 'skeet']) assert set(lab2.get_top_features_NB(theta_nb, label_set,'2000s',k=10)[0]) == set(['you', 'i', 'the', 'me', 'and', 'to', 'a', 'it', 'my', 'in']) # assert set(get_top_features_NB(theta_nb, label_set,'2000s',k=10)[1]) == set(['eternal', 'shiver', 'stepper', 'escapade', 'jojo', 'tambourine', 'dop', 'wishful', 'total', 'muzik']) ### END HIDDEN TESTS**Reflect:**- Compare the development dataset accuracy of LR and NB, which model do you think is better? - Given those indicative features of LR and NB, which model do you think is better? - You may read https://medium.com/@sangha_deb/naive-bayes-vs-logistic-regression-a319b07a5d4c for more information on a comparison between discriminative and generative models. **Your Answer Here**: 6. Precision, Recall, and F1Besides accuracy, systems in natural language processing are evaluated using precision, recall, and F1. Such measures are essential when evaluating on an unbalanced dataset in terms of classes (labels). **Total: 10 points** Confusion MatrixA confusion matrix is a table that is often used to describe the performance of a classification model (or "classifier") on a set of data for which the true values are known. In this section, we show one python packages (Seaborn) for making confusion matrixes.from sklearn import metrics import seaborn as sns predictions = logisticRegr.predict(X_dv) cm = metrics.confusion_matrix(Y_dv, predictions) plt.figure() ax = sns.heatmap(cm, annot=True, fmt=".1f", linewidths=1, square = True, cmap = 'Blues_r'); ax.set_ylim(0 ,4) plt.ylabel('Actual label'); plt.xlabel('Predicted label'); all_sample_title = 'Accuracy Score: {0:.4f}'.format(dev_acc) plt.title(all_sample_title, size = 15); plt.show();**Reflect**: What do you observe on the above confusion matrix? If you are the leading manager for this team project, which portion of the data would you ask your team to focus on? **Your Answer Here**: Precision, Recall, and F1Write a function below that takes in a predicted labels 'Y_hat' and gold labels 'Y', and returns the precision, recall, and F1 for each label.F1 is the harmonic mean of precision and recall. F1 = 2 * (precision * recall) / (precision + recall)(10 points)print(lab2.get_PRF(predictions, Y_dv, label_set, 'pre-1980')) print(lab2.get_PRF(predictions, Y_dv, label_set, '1980s')) print(lab2.get_PRF(predictions, Y_dv, label_set, '1990s')) print(lab2.get_PRF(predictions, Y_dv, label_set, '2000s')) label_set ### BEGIN HIDDEN TESTS scikit_log_reg = LogisticRegression() logisticRegr=scikit_log_reg.fit(X_tr, Y_tr) predictions = logisticRegr.predict(X_dv) a,b,c = lab2.get_PRF(predictions, Y_dv, label_set, 'pre-1980') assert abs(a-0.5078125) < 0.01 assert abs(b-0.5241935483870968) < 0.01 assert abs(c-0.5158730158730158) < 0.01 a,b,c = lab2.get_PRF(predictions, Y_dv, label_set, '1980s') assert abs(a-0.32967032967032966) < 0.01 assert abs(b-0.28846153846153844) < 0.01 assert abs(c-0.30769230769230765) < 0.01 a,b,c = lab2.get_PRF(predictions, Y_dv, label_set, '1990s') assert abs(a-0.391304347826087) < 0.01 assert abs(b-0.37894736842105264) < 0.01 assert abs(c-0.3850267379679144) < 0.01 a,b,c = lab2.get_PRF(predictions, Y_dv, label_set, '2000s') assert abs(a-0.6258992805755396) < 0.01 assert abs(b-0.6850393700787402) < 0.01 assert abs(c-0.6541353383458647) < 0.01 ### END HIDDEN TESTS/usr/local/lib/python3.9/site-packages/sklearn/linear_model/_logistic.py:763: ConvergenceWarning: lbfgs failed to converge (status=1): STOP: TOTAL NO. of ITERATIONS REACHED LIMIT. Increase the number of iterations (max_iter) or scale the data as shown in: https://scikit-learn.org/stable/modules/preprocessing.html Please also refer to the documentation for alternative solver options: https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression n_iter_i = _check_optimize_result(Example of using the model moduleThis notebook shows how to use the components defined in the model. The model module is meant as a light-weight trade model which allows flexible portfolio construction and valuation. The portfolio then handles the position management (long/short), and p&l accounting.import tia.analysis.model as model import pandas as pd from tia.util.fmt import new_dynamic_formatter, DynamicColumnFormatter %matplotlib inline import matplotlib.pyplot as plt try: plt.style.use('fivethirtyeight') plt.rcParams['lines.linewidth'] = 1.4 except: pass # Load microsoft - note it retrieves the dividends too msft = model.load_yahoo_stock('MSFT', start='1/1/2010') msft.pxs.frame.tail() fig, axes = plt.subplots(1, 2, figsize=(12, 3)) msft.pxs.dvds.resample('Q', how='sum').to_period().plot(kind='bar', ax=axes[0], title='msft dividends') msft.pxs.close.plot(ax=axes[1], title='msft close') # # Create a signal for when the 10x20 moving average (buy/sell single contract) # - you can create your own simulation and build trades (using model.Trade or # model.TradeBlotter) from tia.analysis.ta import cross_signal, sma, Signal ma10, ma20 = sma(msft.pxs.close, 10), sma(msft.pxs.close, 20) sig = cross_signal(ma10, ma20) trades = Signal(sig).close_to_close(msft.pxs.close) # show last 10 trades trades[-10:] # Build the portfolio port = model.SingleAssetPortfolio(msft, trades) # show the ltd transaction level pl frame port.pl.ltd_txn_frame.tail() # show the daily pl frame (rolled up by day) port.pl.dly_frame.tail() # show the positions F = new_dynamic_formatter(method='col', pcts=1, parens=0, trunc_dot_zeros=1) F(port.positions.frame.tail(4)) # Look at basic p/l stats PLFMT = new_dynamic_formatter(method='row', precision=3, pcts=1, trunc_dot_zeros=1) tmp = pd.DataFrame({'dly': port.pl.dly_details.summary, 'wkly': port.pl.weekly_details.summary, 'mthly': port.pl.monthly_details.summary, 'yrly': port.pl.annual_details.summary}) PLFMT(tmp) FMT = new_dynamic_formatter(method='row', precision=2, pcts=1, trunc_dot_zeros=1) # Look at basic ret stats (By default return on initial invest of each position) tmp = pd.DataFrame({'dly': port.performance.dly_details.summary, 'wkly': port.performance.weekly_details.summary, 'mthly': port.performance.monthly_details.summary, 'yrly': port.performance.annual_details.summary}) FMT(tmp) # show the position stats port.positions.stats # Easy to get portfolio subsets such as long/short, winner/losers port.long.positions.frame.tail() # Get a specific position pos = port.long.positions[57] # Get txn level detail associated with the position pos.pl.txn_frame.head()Simple Report Provided (can add own splits but Long/Short, Win/Loss included)summary = model.PortfolioSummary() # defaults included are long/short, win/loss but can provide a method to split as like summary.include_long_short().include_win_loss() # a few simple stats methods provided but easy to create own analyze_fct = model.PortfolioSummary.analyze_returns rpt = summary(port, analyze_fct) FMT(rpt.T)Some graphstmp = pd.DataFrame({'Long': port.long.pl.ltd_dly, 'Short': port.short.pl.ltd_dly, 'All': port.pl.ltd_dly}) _ = tmp.plot(title='P&L') # Show Daily Drawdowns port.pl.dly_details.plot_ltd() plt.title('P&L Drawdown') port.performance.dly_details.plot_ret_on_dollar(label='All') port.long.performance.dly_details.plot_ret_on_dollar(label='Long') port.short.performance.dly_details.plot_ret_on_dollar(label='Short') plt.legend(loc='upper left', prop={'size':12}) plt.title('Return on $1') # Show the LTD Returns port.performance.dly_details.plot_ltd() # Show the Monthly Return Histogram port.performance.monthly_details.plot_hist(figsize=(7, 3)) # See the range of returns for the positions fig, ax = plt.subplots(1, 1, figsize=(9, 3)) port.positions.plot_ret_range(ax=ax) plt.title('Position Return Range')Sets> A summary of "Probability and Statistics in Data Science using Python", offered from UCSD DSE210x- toc: true - badges: true- comments: true- author: - categories: [Python, edX, Data_Science, Statistics, Probability]- image: images/venn_example.png Elements, sets, and membership Elements- Foundation, building blocks of sets- Can be anything- Structured- Numbers Sets- Collection of elements- Define : `{specify elements}` Specification- Explicit - Coin = {heads, tails} - Bits = {0, 1} - Die = {1, 2, 3, 4, 5, 6}- Implicit - Digits = {0, 1, $\dots$ 9} - Letters = {a, b, $\dots$, z} - Days = {Monday, $\dots$, Sunday}- Descriptive - {4-letter words} = {love, like, dear, $\dots$} Common Sets| Sets | | Notation || ---- | ----- | ---- || Integers | {$\dots$, -2, -1, 0, 1, 2, $\dots$} | $\mathbb{Z}$ || Naturals | {0, 1, 2, $\dots$ }| $\mathbb{N}$ || Positives | {1, 2, 3, $\dots$} | $\mathbb{P}$ || Rationals | {interger ratio m / n, n $\neq 0$} | $\mathbb{Q}$ || Reals | { whole number } | $\mathbb{R}$ |> Note: $\mathbb{Z}$ comes from german word `Zahl`, meaning numbersUsually, Sets are expressed with Upper case (A, B, etc), and Elements are expressed with Lower case (a, b, etc), as a convention. Membership- If element $x$ is in a set $A$, it is a member of, or belongs to $A$, denoted $x$ $\in$ $A$. $$ 0 \in \{0, 1\} \qquad 1 \in \{0, 1\} \qquad \pi \in \mathbb{R} $$ - Equivalently, $A$ contains $x$, written $A$ $\ni$ $x$. $$ \{0, 1\} \ni 0 \qquad \{0, 1\} \ni 1 \qquad \mathbb{R} \in \pi $$- If $x$ is not in $A$, then $x$ is not a member, or does not belong to $A$, denoted $x$ $\notin$ $A$.- Equivalently, $A$ does not contain $x$, $A$ $\not\owns$ $x$. Doesn't Matter- Order: {0, 1} = {1, 0}- Repetition: {0, 1} = {0, 1, 1, 1}If you want to consider:- Order matters: use ordered tuples ((0, 1) $\neq$ (1, 0))- Repetition matters: use multisets or bags Special Sets- Empty set: contains no elements ($\emptyset$ or {}, $\forall x, x \notin \emptyset$)> Note: $\forall$ means 'all', or 'every'- Universal set: all possible elements ($\Omega$, $\forall x, x \in \Omega$)- $\Omega$ lets us consider only relevant elements. $\Omega$ can be $\mathbb{Z}$ (the integer) or "prime number"- $\Omega$ depends on application (temperature, text, etc...)- $\emptyset$ is only one in whole case, this is the set with no elements. Set Definition in python- Define a set```python{...} or set({...})```- For empty set```pythonset() or set({})```> Note: In python, `{}` is not a empty set, it is dictionary. Membership in python- $\in \quad \rightarrow$ `in`- $\notin \quad \rightarrow$ `not in` Testing if Empty, Size# Empty set S = set() not S # Set T = {1, 2} not T len(S) len(T)Some simple sets Sets within SetsSpecify a set within a universe, or any other set,$$ \{x \in A \vert \dots \} $$means elements $x$ in $A$ such that. Sometimes it expresses like this,$$ \{x \in A : \dots \} $$For example,$$ \mathbb{N} = \{x \in \mathbb{Z} \vert x \geq 0 \} $$$$ \mathbb{P} = \{x \in \mathbb{N} \vert x \gt 0 \} $$It usually express the solution to equations,$$ \{ x \in \mathbb{R} \vert x^2 \geq 0\} = \mathbb{R} $$$$ \{ x \in \mathbb{R} : x^2 = 1 \} = \{-1, 1\} $$$$ \{ x \in \mathbb{R} \vert x^2 = 0 \} = \{0\} $$> Note: a single-element set is called **singleton**$$ \{ x \in \mathbb{R} \vert x^2 = -1 \} = \emptyset $$$$ \{ x \in \mathbb{C} \vert x^2 = -1 \} = \{i, -i\} $$ Integer intervals$$ \{m, \dots n\} = \{i \in \mathbb{Z} \vert m \leq i \leq n \} $$It is a set of integers from $m$ to $n$, inclusively.$$ \{3, \dots, 5\} = \{i \in \mathbb{Z} \vert 3 \leq i \leq 5 \} = \{3, 4, 5\} $$ $$ \{3, \dots, 4\} = \{i \in \mathbb{Z} \vert 3 \leq i \leq 4 \} = \{3, 4\} $$$$ \{3, \dots, 3\} = \{i \in \mathbb{Z} \vert 3 \leq i \leq 3 \} = \{3\} $$$$ \{3, \dots, 2\} = \{i \in \mathbb{Z} \vert 3 \leq i \leq 2 \} = \emptyset $$For convention, $[n] = \{1, \dots, n\}$ Real intervals$$[a, b] \qquad \rightarrow \{x \in \mathbb{R} \vert a \leq x \leq b \} $$$$(a, b) \qquad \rightarrow \{x \in \mathbb{R} \vert a \lt x \lt b \} $$$$[a, b) \qquad \rightarrow \{x \in \mathbb{R} \vert a \leq x \lt b \} $$$$(a, b] \qquad \rightarrow \{x \in \mathbb{R} \vert a \lt x \leq b \} $$ DivisibilityIn $m, n \in \mathbb{Z}$, if $n = c \dot m$ for some $c \in \mathbb{Z}$, we say that n is a multiple of m, or $m$ divides $n$, and write $m \vert n$If no such $c$ exists, $m$ does not divide $n$, or $n$ is not a multiple of $m$ denoted $m \not\vert n$.For example,$$\text{There is no } c \in \mathbb{Z} \quad \text{such that} \quad 4 = c \cdot 3 \quad \rightarrow 3 \not\vert 4 $$$$ 0 \not\vert n \quad \text{for any } n \neq 0 $$ Set of Multiples- Integer multiples of $m$$$ m \in \mathbb{Z} \qquad {}_m\mathbb{Z} \overset{\underset{\mathrm{def}}{}}{=} \{i \in \mathbb{Z} : m \vert i \}$$ - Example$$ \begin{aligned} {}_2\mathbb{Z} &= \{\dots, -4, -2, 0, 2, 4, \dots \} \overset{\underset{\mathrm{def}}{}}{=} \mathbb{E} \quad \rightarrow \text{even number} \\ {}_1\mathbb{Z} &= \{\dots , -2, -1, 0, 1, 2, \dots \} = \mathbb{Z} \\ {}_0\mathbb{Z} &= \{0\} \end{aligned} $$- Multiples of $m$ in $\{1..n\}$$$ m \in \mathbb{Z}, n \in \mathbb{P} \qquad {}_m[n] \overset{\underset{\mathrm{def}}{}}{=} \{i \in [n] : m \vert i\}$$ - Example $$ \begin{aligned} {}_3[13] &= \{i \in \{1, \dots, 13\} : 3 \vert i \} = \{3, 6, 9, 12\} \\{}_7[13] &= \{7\} \\ {}_1[13] &= [13] \\ {}_{14}[13] &= {}_0[13] = \emptyset \end{aligned} $$ Intervals, Multiples in python$\{0,\dots, n-1\} \quad \rightarrow$ `range(n)`$\{m, \dots, n-1\} \quad \rightarrow$ `range(m, n)`$\{m, m+d, m+2d, \dots \} \leq n - 1 \quad \rightarrow$ `range(m, n, d)`set(range(3)) set(range(2, 5)) set(range(2, 12, 3))Visualizing Sets Venn Diagram- Developed by - Used to visualize Sets, Regions, Elements, Points Ven Diagram in Python```python!pip install matplotlib_venn```import matplotlib.pyplot as plt import matplotlib_venn as venn S = {1, 2, 3} T = {0, 2, -1, 5} venn.venn2([S, T], set_labels=('S', 'T')); U = { 10, 8, 0, 2, -1} venn.venn3([S, T, U], set_labels=('S', 'T', 'U'));Set Relations Relation Types- Set $A$ and $B$ are equal, denoted $A = B$, if they have exactly the same elements$$\{0, 1\} = \{1, 0\}$$- If $A$ and $B$ are not equal, they are **different**, denoted $A \neq B$$$\{0, 1\} \neq \{1, 2\}$$- **All** elements must be identical: $\{1, 2, 4\} = \{4, 1, 2\}$- **One different** element enough: $\{1, 2, 4\} \neq \{1, 2, 4, 8\}$ Intersection- Two sets **intersect** if they share at least one common element. Mathematically, it can express like this,$$ \exists x, \quad x \in A \wedge x \in B $$- Two sets are **disjoint** if they share no elements.$$ \neg \exists x, \quad x \in A \wedge x \in B $$- $\emptyset$ disjoint from any set- Non-empty $\Omega$ intersects every set- A set intersects itself if and only if it is not-empty.- Several sets - intersect if all share a common element - mutually disjoint if every two are disjoint Subsets- It generalizes $\leq$- If every element in $A$ is also in $B$, then $A$ is a **subset of** $B$, denoted $A \subseteq B$$$ \{0\} \subseteq \{0, 1\} \\ \{0\} \subseteq \{0\}$$- Equivalently, $B$ is a **superset** of, or contains, $A$, denoted $B \supseteq A$$$ \{0, 1\} \supseteq \{0\} $$- If $A$ has an element that's not in $B$, then $A$ is **not a subset** of $B$, denoted $A \not \subseteq B$, or $B \not \supseteq A$$$ \{0, 1\} \not \subseteq \{1, 2\} \\ \{1, 2\} \not \supseteq \{0, 1\} $$- $\mathbb{P} \subseteq \mathbb{N} \subseteq \mathbb{Z} \subseteq \mathbb{Q} \subseteq \mathbb{R}$- $\emptyset \subseteq A \subseteq A \subseteq \Omega$- (transitivity) $A \subseteq B$ and $B \subseteq C \rightarrow A \subseteq C$> Note: $\subseteq$ is called **transitive**- $A \subseteq B$ and $B \subseteq A \rightarrow A = B$ Strict Subsets- It generalizes $\gt$- If $A \subseteq B$ and $A \neq B$, $A$ is a **strict subset** of $B$, denoted $A \subset B$ and $B$ is a **strict superset** of $A$, denoted $B \supset A$.$$ \{0\} \subset \{0, 1\} \\ \{0, 1\} \supset \{0\} $$- If $A$ is not a strict subset of $B$, we write $A \not \subset B$ or $B \not \supset A$ - Reason: $ A \not \subseteq B$ , $A = B$ Belongs to vs. Subset of- $\in$ (Belongs to) - Relation between an element and a set - $x \in A$: element $x$ belongs to, or is contained in, set $A$ - ex) $\{0, 1\}$ has two elements: 0 and 1 $$ \rightarrow 0 \in \{0, 1\} , \{0\} \not \in \{0, 1\} $$ - $\subseteq$ - Relation between two sets - $A \subseteq B$ : $A$ is a subset of set $B$ - ex) $\{0, 1\}$ has two elements: 0 and 1 $$ \{0\} \subseteq \{0, 1\} $$ - 0 is an element of $\{0, 1\}$, but 0 is not a set. ($0 \not \subseteq \{0, 1\}$) Set relations in python EqualityS1 = {0, 1} S2 = set({0, 1}) S3 = {1, 0, 1} T = {0, 2} print(S1 == T) print(S1 == S2) print(S1 == S3)False True TrueInequalityprint(S1 != S2) print(S1 != T)False TrueDisjointS1.isdisjoint(T) S1.isdisjoint({2})Subsets and Supersetszero = {0} zplus = {0, 1} zminus = {0, -1}subsetzminus <= zplus zero.issubset(zminus)Strict subsetzplus < zminus zero < zminusSupersetzplus >= zminus zplus.issuperset(zero)Strict supersetzminus > zminus zplus > zeroTuples and products Tuples and Ordered Pairs- Set - Order and repetition do not matter$$ \{a, b, c\} = \{b, c, a\} $$- Tuple - Both order and repetition matter$$ (a, b, c) \neq (b, c, a) \\ (a, a, a) \neq (a) $$- $n$-tuple - Tuple with $n$ elements$$ (a_1, a_2, \dots, a_n) $$- 2-tuple - Ordered pair$$ (3, 7) $$ Cartesian Products- The cartesian product of $A$ and $B$ is the set $A \times B$ of ordered pairs $(a, b)$ where $a \in A$ and $b \in B$. Mathematically,$$ A \times B = \{(a, b): a \in A, b \in B\} $$- $A \times A = A^2$- $\mathbb{R}^2 = \{(x, y): x, y \in \mathbb{R}\} \quad \rightarrow$ Cartesian Plane- $A, B \subseteq \mathbb{R} \quad \rightarrow A \times B \subseteq \mathbb{R}^2$ - For example,$$ A = [0, 2], B=[1, 4] \\ A \times B = \{(x, y): x \in [0, 2], y \in [1, 4]\} $$ Discrete Sets- Similar, simpler- For example,$$ \begin{aligned} \{a, b\} \times \{1, 2, 3\} &= \{(x, y): x \in \{a, b\}, y \in \{1, 2, 3\}\} \\ &= \{ (a, 1), (a, 2), (a, 3), (b, 1), (b, 2), (b, 3)\} \end{aligned}$$- 1st coordinate: vertical, 2nd coordinate: horizontal Identities- $A \times \emptyset = \emptyset \times A = \emptyset$- $A \times (B \cup C) = A \times B \cup A \times C$- $A \times (B \cap C) = A \times B \cap A \times C$- $A \times (B - C) = A \times B - A \times C$ Cartesian products in PythonUse **product** function in **itertools** libraryfrom itertools import product Faces = set({'J', 'Q', 'K'}) Suits = {'\u2666\uFE0F', '\u2660\uFE0F'} for i in product(Faces, Suits): print(i)('K', '♠️') ('K', '♦️') ('Q', '♠️') ('Q', '♦️') ('J', '♠️') ('J', '♦️')Russell's Paradox Sets in Sets- Sets can be elements- Every set is a subset of itself$$ \{0\} \subseteq \{0\} $$- Can a set belong to (be an element of) itself? $ \rightarrow S \in S$ - Typically, sets do not belong to themselves $\quad \{0\} \not \in \{0\} , \emptyset \not \in \emptyset $ - But some sets do belong to themselves! (infinite recursion) - Some sets $\in$ themselves, others don't ($\{0\}$) Russell's Paradox- Define a set that cannot exist- For example,$$ R = \{\text{sets that don't belong to themselves}\} = \{S: S \not \in S\} $$- If - $R \in R \quad \rightarrow R \not \in R$ (contradiciton) - $R \not \in R \quad \rightarrow R \in R$ (contradiction) - If R existed, then both $R \in R$ and $R \not \in R$ would hold- R defined but cannot exist!!- ex) The set that contains only the empty set $\emptyset$ is not empty Exercise 1De Morgan's first law states the following for any two sets $A$ and $B$$$(A\cup B)^c = A^c\cap B^c$$In the following two exercises we calculate $(A\cup B)^c$ in two different ways. Both functions must take $A$, $B$ and the universal set $U$ as their inputs. Exercise 1.1Write the function **complement_of_union** that first determines $A\cup B$ and then evaluates the complement of this set. Output the tuple: $\begin{pmatrix}A\cup B,\, (A\cup B)^c\end{pmatrix}$. **Code**```pythonA = {1, 2, 3}B = {3, -6, 2, 0}U = {-10, -9, -8, -7, -6, 0, 1, 2, 3, 4}complement_of_union(A, B, U)``` **Output**```({-6, 0, 1, 2, 3}, {-10, -9, -8, -7, 4})```def complement_of_union(A, B, U): # inputs: A, B and U are of type 'set' # output: a tuple of the type (set, set) union = A.union(B) complement_union = U.difference(union) return (union, complement_union) # Check Function A = {1, 2, 3, 4, 5} B = {0, 2, -6, 5, 8, 9} U = A|B|{-3, 7, 10, -4} assert( complement_of_union(A, B, U) == ({-6, 0, 1, 2, 3, 4, 5, 8, 9}, {-4, -3, 7, 10}) )Exercise 1.2Write the function **intersection_of_complements** that first determines $A^c$ and $B^c$ and then evaluates the intersection of their complements. Output the tuple: $\begin{pmatrix}A^c, \, A^c\cap B^c\end{pmatrix}$ **Code**```pythonA = {1, 2, 3}B = {3, -6, 2, 0}U = {-10, -9, -8, -7, -6, 0, 1, 2, 3, 4}intersection_of_complements(A, B, U)``` **Output**```({-10, -9, -8, -7, -6, 0, 4}, {-10, -9, -8, -7, 4})```def intersection_of_complements(A, B, U): # inputs: A, B and U are of type 'set' # output: a tuple of the form (set, set) complement_a = U.difference(A) complement_b = U.difference(B) complement_intersect = complement_a.intersection(complement_b) return (complement_a, complement_intersect) # Check Function A = {1, 2, 3, 4, 5} B = {0, 2, -6, 5, 8, 9} U = A|B|{-3, 7, 10, -4} assert( intersection_of_complements(A, B, U) == ({-6, -4, -3, 0, 7, 8, 9, 10}, {-4, -3, 7, 10}) )Exercise 2This problem illustrates a property of cartesian products of unions of two or more sets. For four sets $A$, $B$, $S$ and $T$, the following holds:$$(A\cup B)\times(S\cup T) = (A\times S)\cup(A\times T)\cup(B\times S)\cup(B\times T)$$Write the following functions to determine $(A\cup B)\times(S\cup T)$ in two different ways. Exercies 2.1Write function **product_of_unions** that first determines $(A\cup B)$ and $(S\cup T)$ and then evaluates the cartesian products of these unions. Output the tuple $\begin{pmatrix}(A\cup B),\, (A\cup B)\times(S\cup T)\end{pmatrix}$. **Code**```pythonA = {1, 2}B = {1, 3}S = {-1, 0}T = {0, 10}product_of_unions(A, B, S, T)``` **Output**```({1, 2, 3}, {(1, -1), (1, 0), (1, 10), (2, -1), (2, 0), (2, 10), (3, -1), (3, 0), (3, 10)})```def product_of_unions(A, B, S, T): # inputs: A, B, S and T are sets # output: a tuple of the type (set, set) union_a_b = A.union(B) union_s_t = S.union(T) product_a_b_s_t = set() for i in product(union_a_b, union_s_t): product_a_b_s_t.add(i) return (union_a_b, product_a_b_s_t) # Check Function A = {5} B = {5, 6} S = {-1, 0, 1} T = {1, 2} assert( product_of_unions(A, B, S, T) == \ ({5, 6}, {(5, -1), (5, 0), (5, 1), (5, 2), (6, -1), (6, 0), (6, 1), (6, 2)}) )Exercise 2.2Write a function **union_of_products** that first determines $(A\times S)$ and the other three cartesian products that appear on the right hand side of the identity above, then evaluates the union of these cartesian products. Output the tuple $\begin{pmatrix}(A\times S),\, (A\times S)\cup(A\times T)\cup(B\times S)\cup(B\times T)\end{pmatrix}$. **Code**```pythonA = {1, 2}B = {1, 3}S = {-1, 0}T = {0, 10}union_of_products(A, B, S, T)``` **Output**```({(1, -1), (1, 0), (2, -1), (2, 0)}, {(1, -1), (1, 0), (1, 10), (2, -1), (2, 0), (2, 10), (3, -1), (3, 0), (3, 10)})```def union_of_products(A, B, S, T): # inputs: A, B, S and T are sets # output: a tuple of the type (set, set) product_a_s = set(x for x in product(A, S)) product_a_t = set(x for x in product(A, T)) product_b_s = set(x for x in product(B, S)) product_b_t = set(x for x in product(B, T)) union_all = product_a_s.union(product_a_t).union(product_b_s).union(product_b_t) return (product_a_s, union_all) # Check Function A = {5} B = {5, 6} S = {-1, 0, 1} T = {1, 2} assert( union_of_products(A, B, S, T) == \ ({(5, -1), (5, 0), (5, 1)}, \ {(5, -1), (5, 0), (5, 1), (5, 2), (6, -1), (6, 0), (6, 1), (6, 2)}) \ )Cálculo de Rentabilidade de Carteira de Ações - Cotização - Python para Investimentos Configurações Iniciais!pip -q install yfinance import yfinance as yf import pandas as pd import numpy as np import warnings warnings.filterwarnings('ignore') import matplotlib matplotlib.rcParams['figure.figsize'] = (18,8) matplotlib.style.use('seaborn-darkgrid')Fazendo o upload do arquivo trades.xlsxfrom google.colab import files uploaded = files.upload() for fn in uploaded.keys(): print('User uploaded file "{name}" with length {length} bytes'.format( name=fn, length=len(uploaded[fn]))) arquivo = pd.read_excel('trades.xlsx') arquivoCriando tabelas com os dados do arquivo Criando tabela com colunas para cada ativo e indexando por datatrade_quant = pd.pivot_table(arquivo, values='quantidade', index=['data'], columns=arquivo['ativo'].str.upper(), aggfunc=np.sum, fill_value=0) trade_quantCriando tabela com os preços de compra e vendatrade_price = pd.pivot_table(arquivo, values='preço', index=['data'], columns=arquivo['ativo'].str.upper(), fill_value=0) trade_priceBaixando os cotações das açõesprices = yf.download(tickers=(trade_quant.columns+'.SA').to_list(), start=trade_quant.index[0], rounding=True)['Adj Close'] prices.columns = prices.columns.str.rstrip('.SA') prices.dropna(how='all', inplace=True) prices trades = trade_quant.reindex(index=prices.index) trades.fillna(value=0, inplace=True) trades aportes = (trades * trade_price).sum(axis=1) aportes posicao = trades.cumsum() posicao carteira = posicao * prices carteira['saldo'] = carteira.sum(axis=1) carteira for i, data in enumerate(aportes.index): if i == 0: carteira.at[data, 'vl_cota'] = 1 carteira.at[data, 'qtd_cotas'] = carteira.loc[data]['saldo'].copy() else: if aportes[data] != 0: carteira.at[data, 'qtd_cotas'] = carteira.iloc[i-1]['qtd_cotas'] + (aportes[data] / carteira.iloc[i-1]['vl_cota']) carteira.at[data, 'vl_cota'] = carteira.iloc[i]['saldo'] / carteira.at[data, 'qtd_cotas'] carteira.at[data, 'retorno'] = (carteira.iloc[i]['vl_cota'] / carteira.iloc[i-1]['vl_cota']) -1 else: carteira.at[data, 'qtd_cotas'] = carteira.iloc[i-1]['qtd_cotas'] carteira.at[data, 'vl_cota'] = carteira.iloc[i]['saldo'] / carteira.at[data, 'qtd_cotas'] carteira.at[data, 'retorno'] = (carteira.iloc[i]['vl_cota'] / carteira.iloc[i-1]['vl_cota']) -1 carteira.head(50) carteira['vl_cota'].plot();This notebook has been designed for the nurse care activity recognition challenge competition with the the aim of providing the basic knowledge of Human Activity Recognition by accelerometer.It has been made by . Exploring the dataimport pandas as pd import numpy as np %matplotlib inlineFirst, we have to load the data and label file. In this tutorial, we only use the train data of 1 user to reduce the time of the whole process.#Load data df=pd.read_csv('acc_user2.csv') df_label=pd.read_csv('label/label_train.csv')Let's check what information the data contains.df.head(5)We can see that the data file contains 5 columns: subject_id, datetime, and 3 coordinates of the accelerometer data.df_label.head(5)For label file, we have 8 columns: id (label id), user_id, activity_type_id, activity_type (name), target_id (patients), activity2user_id, start and finish timestamp of the activity. Visualization the datadf.plot()Check the bar plot of labelsactivity_map = { 1: "Vital", 2: "Meal / medication", 3: "Oral care", 4: "Excretion", 5: "Bathing / wiping", 6: "Treatment", 7: "Morning gathering / exercises", 8: "Rehabilitation / recreation", 9: "Morning care", 10: "Daytime user response", 11: "Night care", 12: "Nighttime user response", 13: "Family / guest response", 14: "Outing response", 19: "Get up assistance", 20: "Change dressing assistance", 21: "Washing assistance", 27: "Emergency response such as acciden", 15: "Linen exchange", 16: "Cleaning", 23: "Preparation and checking of goods", 24: "Organization of medications", 17: "Handwriting recording", 18: "Delegating / meeting", 22: "Doctor visit correspondence", 25: "Family / doctor contact", 26: "Break", 28: "Special remarks / notes" } df_label["activity_type_e"]=df_label["activity_type_id"].map(activity_map) df_label['activity_type_e'].value_counts().plot.bar()Pre-processing the data The missing value and duplicated value rows are dropped. But we can also use several methods to handle missing value, it depends on your pipelines.All the values are sorted by datetimedf = df.dropna() df = df.sort_values('datetime') df = df.drop_duplicates()In the label file, nan and duplicated value rows are also dropped.df_label = df_label.drop_duplicates('id') df_label = df_label.dropna()Now, we only get the label of user we utilizedf_label_user=df_label[df_label['user_id']==df['subject_id'].sample(1).values[0]]We change all the timestamp data into the same data typedf['datetime']= pd.to_datetime(df['datetime'], format='%Y-%m-%dT%H:%M:%S.%f%z') df_label_user['start'] = pd.to_datetime(df_label_user['start'], format='%Y-%m-%d %H:%M:%S %z') df_label_user['finish'] = pd.to_datetime(df_label_user['finish'], format='%Y-%m-%d %H:%M:%S %z')We can check how the data change after pre-processingdf.head(5) df_label_user.head(5)Segmentation We try to reset the index valuedf_label_user=df_label_user.reset_index() df_label_user=df_label_user.drop(columns='index') df_label_user.head(5)Segment the data by the timestamp given by label file.Every segment windows are extracted by the start and finish time of the activity in label file.seg_list = [] seg_label_list = [] for i in range(len(df_label_user)): seg = df[(df["datetime"] >=df_label_user['start'][i]) & (df["datetime"] <= df_label_user['finish'][i])] seg_label = df_label_user["activity_type_id"][i] if (len(seg)!=0): seg_list.append(seg) seg_label_list.append(seg_label)Features Extraction In this tutorial, we extract 4 main features: STD, Average, Max, Min of 3 coordinates.def get_features(x_data): #Set features list features = [] #Set columns name list DFclist=list(x_data.columns) #Calculate features (STD, Average, Max, Min) for each data columns X Y Z for k in DFclist: # std features.append(x_data[k].std(ddof=0)) # avg features.append(np.average(x_data[k])) # max features.append(np.max(x_data[k])) # min features.append(np.min(x_data[k])) return features features_list = [] for i in seg_list: #Drop 2 columns 'subject_id' and 'datetime', we only use 3 columns x, y, z i = i.drop(columns=['subject_id','datetime']) features_list.append(get_features(i))Training The Random Forest Model is utilized in this tutorialfrom sklearn.ensemble import RandomForestClassifier model_ml = RandomForestClassifier(n_estimators=500,n_jobs=-1)Divide data into train and test file to evaluate the resultsfrom sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(features_list, seg_label_list, test_size=0.3, random_state=42)Train the modelmodel_ml.fit(X_train, y_train)Check the resultsfrom sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix y_predict = model_ml.predict(X_test) print(classification_report(y_test,y_predict)) confusion_matrix(y_test, y_predict)precision recall f1-score support 1 0.00 0.00 0.00 0 4 0.60 0.33 0.43 9 7 1.00 0.50 0.67 2 10 0.42 0.71 0.53 7 13 0.00 0.00 0.00 1 accuracy 0.47 19 macro avg 0.40 0.31 0.32 19 weighted avg 0.54 0.47 0.47 19This notebook is created by . For any feedback or suggestion, please contact me via my email, (). 🐍 Python 🐍 Modular programming in PythonThis notebook will cover the following topics:- Introduction - 1. Functions - 2. Lambda functions - 3. Built-in functions - map function - filter function - enumerate function - zip function - 4. Classes Introduction ***Modular programming*** in Python is a very important tool for modern developers. To create robust systems that last, you need to know how to organize your programs so that they can grow over time. The techniques of modular programming and the specific use of Python modules and packages give us tools that we need to succeed as expertise in the fast-changing programming landscape. 1. Functions A function is a block of code that only runs when it is called. Functions are used to reduce the repetition in code.```Pythondef function_name(inputs): """ a summary Parameters ---------- inputs : type description Returns ------- type description """ block of code return result```# Example 1 def add (a , b): """ add is used to calculate the summation of two variables Parameters ---------- a : Numeric a numeric variable b : Numeric a numeric variable Returns ------- c : Numeric a + b """ c = a + b return cadd(2,3)# Calling a function add(2, 3) # Example 2 def subtract (a , b): """ subtract is used to calculate the subtraction of two variables Parameters ---------- a : Numeric a numeric variable b : Numeric a numeric variable Returns ------- c : Numeric a - b """ c = a - b return c subtract(2, 3) # How to get the documentation of a function print(add.__doc__)add is used to calculate the summation of two variables Parameters ---------- a : Numeric a numeric variable b : Numeric a numeric variable Returns ------- c : Numeric a + b2. Lambda functions We can make a function in one line using `lambda` as```Pythonfunction_name = lambda input1, input2: statement```add_lambda = lambda a, b: a + b add_lambda(2, 3)3. Built in functions Python has a set of built-in functions.You can find a list of them here.# Function for absolute value abs(-3) a = '2' print(type(a), type(float(a))) Let's learn some more useful and complicated built-in functions. 3.1 `map` function The `map()` function applies a specific function for each item in an iterable structure.```Pythonmap(function_name, iterable)```add_lambda = lambda a, b: a + b add_lambda(1, 2) # By entering 2 lists, addition becomes concatenation. add_lambda([1, 3], [3, 2]) # To add up each two items, we need to use the map() x = map(add_lambda, [1, 3], [3, 2])The output of `map()` has a type of map! 🤯# Type is another built-in function type(x)So, we use another built-in function like `list()` to get our data in list type.list(x)3.2 `filter` function The `filter()` function returns an iterator where the items are filtered through a function to test if the item is accepted or not.def day_check(day): workday_list = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri'] return day in workday_list my_list = ['Python', 'May', 1, 'Mon', 'Fri', 'Sun', 'Sunday'] x = filter(day_check, my_list) type(x)As is shown, the output of `filter()` has a type of filter. We use another build-in function like `tuple()` to get the data.tuple(x)3.3 `enumerate` function The `enumerate()` function takes a collection and returns it as an enumerate object.x = ['France', 'Japan', 'USA'] y = enumerate(x) y list(y) for i, country in enumerate(x): print(country) def even_items(iterable): return [v for i, v in enumerate(iterable, start=1) if not i % 2] seq = list(range(1, 11)) print(seq) even_items(seq)Reference 3.4 `zip` function Python’s `zip()` function creates an iterator of tuples that will aggregate elements from two or more iterables.a = [1, 2, 5, 1.3] b = ['Nike', ' Adidas', 'Soccer', 'Volleyball'] c = zip(a, b) c list(c)Reference 4. Classes Python is an ***object-oriented programming language*** and almost everything is an object in Python with its own attributes and methods TO create a class we use the keyword `class`class Calculator: intro = 'Hi, this is my calculator! ' def add(self, a, b): return a + b def multiply(self, a, b): return a * b MyClass = Calculator() # Methods MyClass.add(1, 2) # Attribute print (MyClass.intro)Hi, this is my calculator!Convolutional NetworksSo far we have worked with deep fully-connected networks, using them to explore different optimization strategies and network architectures. Fully-connected networks are a good testbed for experimentation because they are very computationally efficient, but in practice all state-of-the-art results use convolutional networks instead.First you will implement several layer types that are used in convolutional networks. You will then use these layers to train a convolutional network on the CIFAR-10 dataset.# As usual, a bit of setup import numpy as np import matplotlib.pyplot as plt from cs231n.classifiers.cnn import * from cs231n.data_utils import get_CIFAR10_data from cs231n.gradient_check import eval_numerical_gradient_array, eval_numerical_gradient from cs231n.layers import * from cs231n.fast_layers import * from cs231n.solver import Solver %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # for auto-reloading external modules # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython %load_ext autoreload %autoreload 2 def rel_error(x, y): """ returns relative error """ return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y)))) import csv import numpy as np csv_file = open('cs231n/datasets/fer2013/fer2013.csv') reader_file = csv.reader(csv_file) def read_faces_csv(reader_file, center=True): """ Function that takes as input file a csv.reader() instance and assumes the following formatting: emotion, pixels (2034 of them), usage (train, test, val) Returns the following numpy arrays: - X_train, y_train (respectively (N, 48, 48), (N,) representing raw grayscale pixels and emotion labels) - X_test, y_test - X_val, y_val """ # Discard header row = next(reader_file) X_train_list, y_train_list = [], [] X_test_list, y_test_list = [], [] X_val_list, y_val_list = [], [] N_train, N_test, N_val = 0, 0, 0 for row in reader_file: y_str, X_row_str, data_type = row y = int(y_str) X_row_strs = X_row_str.split(' ') X_row = [float(x) for x in X_row_strs] if data_type == 'PublicTest': y_test_list.append(y) X_test_list.append(X_row) N_test += 1 elif data_type == 'PrivateTest': y_val_list.append(y) X_val_list.append(X_row) N_val += 1 else: y_train_list.append(y) X_train_list.append(X_row) N_train += 1 X_train = np.asarray(X_train_list).astype('float64').reshape((N_train, 48, 48)) y_train = np.asarray(y_train_list) X_test = np.asarray(X_test_list).astype('float64').reshape((N_test, 48, 48)) y_test = np.asarray(y_test_list) X_val = np.asarray(X_val_list).astype('float64').reshape((N_val, 48, 48)) y_val = np.asarray(y_val_list) # decide to mean-center or not if center: train_mean = X_train.mean(axis = 0) X_train -= train_mean X_test -= train_mean X_val -= train_mean ######### return X_train, y_train, X_test, y_test, X_val, y_val X_train, y_train, X_test, y_test, X_val, y_val = read_faces_csv(reader_file) #(28709, 48, 48) (28709,) (3589, 48, 48) (3589,) N, D, D = X_train.shape N_val = X_val.shape[0] N_test = X_test.shape[0] X_train2 = np.zeros((N,3,D,D)) X_val2 = np.zeros((N_val,3,D,D)) X_test2 = np.zeros((N_test,3,D,D)) #convert graysclae to rgb for i in xrange(N): for j in xrange(3): X_train2[i,j,:,:] = X_train[i,:,:] for i in xrange(N_val): for j in xrange(3): X_val2[i,j,:,:] = X_val[i,:,:] for i in xrange(N_test): for j in xrange(3): X_test2[i,j,:,:] = X_test[i,:,:] num_train = 100 small_data = { 'X_train': X_train2[:num_train], 'y_train': y_train[:num_train], 'X_val': X_val2, 'y_val': y_val, } print X_train.shape, y_train.shape, X_val.shape, y_test.shape model = ThreeLayerConvNet(weight_scale=5e-3) solver = Solver(model, small_data, num_epochs=15, batch_size=50, update_rule='adam', optim_config={ 'learning_rate': 5e-4, }, verbose=True, print_every=1) solver.train() data = { 'X_train': X_train2, 'y_train': y_train, 'X_val': X_val2, 'y_val': y_val, 'X_test': X_test2, 'y_test': y_test, } model = ThreeLayerConvNet(weight_scale=5e-3) solver = Solver(model, small_data, num_epochs=10, batch_size=50, update_rule='adam', optim_config={ 'learning_rate': 5e-4, }, verbose=True, print_every=1) solver.train() y_test_pred = np.argmax(model.loss(data['X_test']), axis=1) y_val_pred = np.argmax(model.loss(data['X_val']), axis=1) print 'Validation set accuracy: ', (y_val_pred == data['y_val']).mean() print 'Test set accuracy: ', (y_test_pred == data['y_test']).mean()Inspectimport pandas as pd import numpy as np #need the encoding set to "ISO-8859-1" so it will read the weird characters properly movies = pd.read_csv('https://raw.githubusercontent.com/afroman32/Unit-2-Build/master/movies.csv', encoding = "ISO-8859-1") print(movies.shape) movies.head() movies['score'].describe()Wrangle# director track record, movies success by director # make sure to train on features that would come before the target # number of unique directors movies['director'].nunique() # create a list of directors directors = list(movies['director']) # remove the duplicates from the list directors = list(dict.fromkeys(directors)) # check length of directors, should be 2759 to match unique directors print(len(directors)) type(directors) # test to see average for directors sub = movies[movies['director'] == movies['director'][3]] sub['gross'].sum()/len(sub['gross']) ave_gross = [] # run through each director and average the movie gross revenue for i in range(0,len(directors)): # create a subset of all directors in index i sub = movies[movies['director'] == directors[i]] # average the gross revenues of each movie average=sub['gross'].sum()/len(sub['gross']) # cast float to int to remove decimal points then add it to ave_gross list ave_gross.append(int(average)) # check if ave_gross has correct number of values print(len(ave_gross)) print(directors[i]) sub movies[movies['director']==directors[-2]] ave_gross[-1] # create a dictionary with directors and their average gross dir_ave = {directors[i]: ave_gross[i] for i in range(len(directors))} len(dir_ave) print(directors[0]) res = list(dir_ave.keys()).index('') res # pass the dictionary to the movies dataframe movies['dir_ave_gross']= movies['director'].map(dir_ave) movies.head() # number of unique writers movies['writer'].nunique() # create a list of writers writers = list(movies['writer']) # remove the duplicates from the list writers = list(dict.fromkeys(writers)) # check length of writers, should be 4199 to match unique writers print(len(writers)) type(writers) # test to see average for random writer sub = movies[movies['writer'] == movies['writer'][3]] sub['gross'].sum()/len(sub['gross']) writer_gross = [] # run through each writers and average the movie gross revenue for i in range(0,len(writers)): # create a subset of all writers in index i sub = movies[movies['writer'] == writers[i]] # average the gross revenues of each movie average=sub['gross'].sum()/len(sub['gross']) # cast float to int to remove decimal points then add it to ave_gross list writer_gross.append(int(average)) # check if writer_gross has correct number of values, expect 4199 len(writer_gross) # create a dictionary with writers and their average gross dir_ave = {writers[i]: writer_gross[i] for i in range(len(writers))} # pass the dictionary to the movies dataframe movies['writer_ave_gross']= movies['writer'].map(dir_ave) movies.head()Train, Val, Test Split (60,20,20)movies.sort_values(by = 'released', inplace = True) movies.head() movies.reset_index(inplace = True, drop = True) movies.head() # set end year for training data set train_lim = movies['year'].min()+19 # set beginning year for test data test_lim = movies['year'].max()-4 # movies in years less than training end year train = movies[movies['year']<= train_lim] # all movies in years greater than training end year val_temp = movies[movies['year'] > train_lim] # movies in years less than test year and greater than training end year val = val_temp[val_temp['year'] < test_lim] # movies in years greater than test year test = movies[movies['year']>=test_lim] train.shape, val.shape, test.shapeAssignment 1 Baselineaverage = movies['gross'].mean() print(f'{average:,.0f}') from sklearn.metrics import mean_absolute_error train['gross'].mean() y_train = train['gross'] y_test = test['gross'] base_guess = y_train.mean() # test error y_base_pred = [base_guess] * len(y_test) mae_test = mean_absolute_error(y_test, y_base_pred) print(f'Mean Absolute Error: ${mae_test:,.2f}')Mean Absolute Error: $45,726,662.13Basic Model!pip install category_encoders==2.* # set target target = 'gross' # set features features = ['genre', 'rating', 'year', 'dir_ave_gross', 'writer_ave_gross'] # set X matrices and y vectors X_train = train[features] y_train = train[target] X_val = val[features] y_val = val[target] X_test = test[features] y_test = test[target] import category_encoders as ce from sklearn.impute import SimpleImputer from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error from sklearn.pipeline import make_pipeline # from math import sqrt pipeline = make_pipeline( ce.OrdinalEncoder(), # SimpleImputer(strategy = 'mean'), StandardScaler(), LinearRegression() ) # Fit on training set pipeline.fit(X_train, y_train) # make predictions y_pred = pipeline.predict(X_test) # test mean absolute error mae = mean_absolute_error(y_test, y_pred) print(f'Test Mean Absolute Error: {mae:,.2f}')Test Mean Absolute Error: 19,734,155.80Input ValidationInput validation code verifies that user supplied data, such as text from the input() function, is formatted appropriately.* try/except* isdigit()* pyinputplus try/exceptwhile True: try: integer = int(input ("Please enter your age (integer): ") ) break except ValueError: print ("Please enter as integer") prompt = "Please enter your age (integer): " prompt2 = "Please re-enter as integer" print (integer) print ('Let us try again') valid = False while not valid: try: integer = int( input(prompt) ) # no need to break valid = True # this is earsier to understand than "while True", as it can avoid using break except ValueError: print (prompt2) print(integer, 'is an integer.')Please enter your age (integer): 1 1 Let us try again Please enter your age (integer): 10 10 is an integer.isdigit()valid = False while not valid: integer = input(prompt) if integer.isdigit(): valid = True else: print (prompt2) print(integer, 'is an integer.')Please enter your age (integer): 4 4 is an integer.Module: pyinputs!pip install PyInputPlus import pyinputplus as pi num1 = pi.inputNum('Please enter your age (decimal point is allowed):') print (num1, 'is a number')Please enter your age (decimal point is allowed):4.5 4.5 is a numberTide Tuning Notebookimport matplotlib.pyplot as plt import numpy as np %matplotlib inline N36_Cin = {'North': {'S2': {'Pha': -9.2307801486176153, 'Amp': 1.038819257728969}, 'M2': {'Pha': 2.6888859885826322, 'Amp': 0.88935590323168601}, 'O1': {'Pha': -2.5, 'Amp': 0.96099403975860098}, 'Q1': {'Pha': -5, 'Amp': 0.955}, 'N2': {'Pha': -4.6638540853471788, 'Amp': 0.98340380585090903}, 'K1': {'Pha': 4, 'Amp': 1.0204698354704094}}, 'West': {'S2': {'Pha': 1.6977373796987445, 'Amp': 1.042595155967583}, 'M2': {'Pha': -7.2661477680807574, 'Amp': 0.87208325073801229}, 'O1': {'Pha': 2.0, 'Amp': 1.09}, 'Q1': {'Pha': 3.0, 'Amp': 0.99611855906827507}, 'N2': {'Pha': 0.8, 'Amp': 0.98014138690691721}, 'K1': {'Pha': -2.8032934372377518, 'Amp': 0.98438671203095895}}} N36_Bin = {'North': {'Q1': {'Pha': -6, 'Amp': 0.95049054590546012}, 'K1': {'Pha': -4, 'Amp': 1.02}, 'M2': {'Pha': 2.3827828994834683, 'Amp': 0.88558971714596835}, 'N2': {'Pha': -4.6678925773866355, 'Amp': 0.98323714871353618}, 'S2': {'Pha': -8.8871723559264488, 'Amp': 1.0379680543704286}, 'O1': {'Pha': -3, 'Amp': 0.96353680059599056}}, 'West': {'Q1': {'Pha': 0, 'Amp': 0.9943368632471852}, 'K1': {'Pha': -4, 'Amp': 0.98682615564783738}, 'M2': {'Pha': -7.4948755798835851, 'Amp': 0.87901024019047747}, 'N2': {'Pha': -0.2, 'Amp': 0.9797124228733876}, 'S2': {'Pha': 1.6, 'Amp': 1.0447724256431379}, 'O1': {'Pha': 1.9, 'Amp': 1.06}}} TS13in = {'North': {'M2': {'Amp': 0.8845477202697829, 'Pha': 2.3403076176485551}, 'O1': {'Amp': 0.96272350844507903, 'Pha': 12.0}, 'S2': {'Amp': 1.0379680543704286, 'Pha': -9.2}, 'K1': {'Amp': 1.0330857501316655, 'Pha': -19.0}, 'Q1': {'Amp': 0.97916112578543213, 'Pha': 4.0}, 'N2': {'Amp': 0.9832161382284451, 'Pha': -4.6840045875437299}}, 'West': {'M2': {'Amp': 0.88170684906405461, 'Pha': -7.5434415446258862}, 'O1': {'Amp': 1.0309632498092482, 'Pha': 12.2}, 'S2': {'Amp': 1.0446321721304719, 'Pha': 1.73}, 'K1': {'Amp': 1.0243962888030014, 'Pha': -14.26898720916071}, 'Q1': {'Amp': 0.9943368632471852, 'Pha': 9.85}, 'N2': {'Amp': 0.9797124228733876, 'Pha': 1.0}}} TS12in = {'West': {'K1': {'Pha': -14.26898720916071, 'Amp': 1.0243962888030014}, 'S2': {'Pha': 1.73, 'Amp': 1.0446178034256923}, 'Q1': {'Pha': 9.8, 'Amp': 0.9943368632471852}, 'O1': {'Pha': 12.050236466923987, 'Amp': 1.0305322478848729}, 'N2': {'Pha': 1.1, 'Amp': 0.9797124228733876}, 'M2': {'Pha': -7.5808775409993272, 'Amp': 0.883}}, 'North': {'K1': {'Pha': -15.0, 'Amp': 1.0}, 'S2': {'Pha': -9.132874394421526, 'Amp': 1.0379680543704286}, 'Q1': {'Pha': 1.6326019921444512, 'Amp': 0.99137048561193775}, 'O1': {'Pha': 12.399628661111066, 'Amp': 0.96285725026965896}, 'N2': {'Pha': -4.6862481353032051, 'Amp': 0.98321613822844511}, 'M2': {'Pha': 2.3120312919802783, 'Amp': 0.88373899355715846}}} TS11in = {'North': {'K1': {'Pha': -11.0, 'Amp': 1.0}, 'S2': {'Pha': -9.132874394421526, 'Amp': 1.0379680543704286}, 'Q1': {'Pha': 7.632601992144451, 'Amp': 1.023011793904681}, 'M2': {'Pha': 2.2779834668333558, 'Amp': 0.88270427351908831}, 'N2': {'Pha': -4.689607824516905, 'Amp': 0.98318244724240378}, 'O1': {'Pha': 18.399628661111066, 'Amp': 0.96361058463564087}}, 'West': {'K1': {'Pha': -14.26898720916071, 'Amp': 1.0243962888030014}, 'S2': {'Pha': 1.73, 'Amp': 1.0447177932216045}, 'Q1': {'Pha': 9.717984938287248, 'Amp': 0.99433686324718518}, 'M2': {'Pha': -7.62709254474166, 'Amp': 0.884}, 'N2': {'Pha': 1.4777129541142697, 'Amp': 0.9797124228733876}, 'O1': {'Pha': 12.050236466923987, 'Amp': 1.0299456999774774}}} TS10in = {'North': {'Q1': {'Amp': 1.0825116171662457, 'Pha': 12.632601992144451}, 'S2': {'Amp': 1.0379680543704286, 'Pha': -9.332874394421525}, 'N2': {'Amp': 0.98355159834393069, 'Pha': -4.6814662131266642}, 'O1': {'Amp': 0.96565598198952585, 'Pha': 28.399628661111066}, 'K1': {'Amp': 0.8200000000000001, 'Pha': -9.0}, 'M2': {'Amp': 0.88138318239218327, 'Pha': 2.2386464647335269}}, 'West': {'Q1': {'Amp': 0.99470837890616992, 'Pha': 9.517984938287249}, 'S2': {'Amp': 1.0446956705700003, 'Pha': 1.7247857894725884}, 'N2': {'Amp': 0.9797124228733876, 'Pha': 0.7610588345320388}, 'O1': {'Amp': 1.0290395717320522, 'Pha': 11.550236466923987}, 'K1': {'Amp': 1.05, 'Pha': -16.26898720916071}, 'M2': {'Amp': 0.88774830007038597, 'Pha': -7.6851903099544563}}} TS9in = {'North': {'O1': {'Amp': 0.9883470888743955, 'Pha': 23.399628661111066}, 'M2': {'Amp': 0.87832866964865763, 'Pha': 2.1477843647649064}, 'N2': {'Amp': 0.98342713711111474, 'Pha': -4.6896078245169051}, 'K1': {'Amp': 0.8200000000000001, 'Pha': -9.0}, 'Q1': {'Amp': 1.1482701669875641, 'Pha': 12.632601992144451}, 'S2': {'Amp': 1.0379680543704286, 'Pha': -8.4439557398943386}}, 'West': {'O1': {'Amp': 1.04, 'Pha': 12.550236466923987}, 'M2': {'Amp': 0.8906867359929187, 'Pha': -7.8166410932266306}, 'N2': {'Amp': 0.9797124228733876, 'Pha': 1.6187676872376835}, 'K1': {'Amp': 1.0243962888030014, 'Pha': -14.26898720916071}, 'Q1': {'Amp': 0.99162436129565101, 'Pha': 10.517984938287249}, 'S2': {'Amp': 1.0445332886627665, 'Pha': 1.7247857894725884}}} TS8in = {'North': {'Q1': {'Pha': 17.63260199214445, 'Amp': 1.2}, 'N2': {'Pha': -4.6764732709324885, 'Amp': 0.98307570791396637}, 'O1': {'Pha': 33.399628661111066, 'Amp': 0.96834708887439547}, 'K1': {'Pha': -7.0, 'Amp': 0.77}, 'M2': {'Pha': 1.9437797162850838, 'Amp': 0.87105935851246963}, 'S2': {'Pha': -8.3328743944215251, 'Amp': 1.0379680543704286}}, 'West': {'Q1': {'Pha': 10.517984938287249, 'Amp': 0.99510234222303384}, 'N2': {'Pha': 1.7610588345320388, 'Amp': 0.9797124228733876}, 'O1': {'Pha': 12.550236466923987, 'Amp': 1.04}, 'K1': {'Pha': -16.26898720916071, 'Amp': 1.05}, 'M2': {'Pha': -8.0785860056219647, 'Amp': 0.89475860223501025}, 'S2': {'Pha': 1.7247857894725884, 'Amp': 1.050516310380342}}} TS7in = {'West': {'S2': {'Amp': 1.0505163103803421, 'Pha': 3.7247857894725884}, 'N2': {'Amp': 0.96971242287338755, 'Pha': 1.7940575230642932}, 'O1': {'Amp': 1.02153509818521, 'Pha': 14.550236466923987}, 'M2': {'Amp': 0.89853811467771461, 'Pha': -8.7976656174843022}, 'K1': {'Amp': 1.0800017009372505, 'Pha': -18.268987209160709}, 'Q1': {'Amp': 0.97358470939406028, 'Pha': 13.517984938287249}}, 'North': {'S2': {'Amp': 1.0479680543704286, 'Pha': -8.5775631728451902}, 'N2': {'Amp': 0.9899441635274836, 'Pha': -4.9011012456689809}, 'O1': {'Amp': 0.98667683685586627, 'Pha': 43.399628661111066}, 'M2': {'Amp': 0.86150074612567795, 'Pha': 0.63888680551324561}, 'K1': {'Amp': 0.75, 'Pha': -5.0}, 'Q1': {'Amp': 1.2518950248746064, 'Pha': 27.632601992144451}}} TS6in = {'North': {'N2': {'Pha': -5.0340458851303485, 'Amp': 0.99093746613836042}, 'Q1': {'Pha': 31.76446905720422, 'Amp': 1.29800213507216}, 'S2': {'Pha': -8.5545198974892447, 'Amp': 1.0490562377934676}, 'O1': {'Pha': 46.267709352580937, 'Amp': 0.99784655545890988}, 'M2': {'Pha': 2.729140847167737, 'Amp': 0.87131594055414263}, 'K1': {'Pha': -5.9289384852688727, 'Amp': 0.74675608275025374}}, 'West': {'N2': {'Pha': 1.8035783055008339, 'Amp': 0.96705193710790061}, 'Q1': {'Pha': 14.139475946283159, 'Amp': 0.96348802266580769}, 'S2': {'Pha': 4.0093996059906685, 'Amp': 1.0506161230881381}, 'O1': {'Pha': 15.072578656156807, 'Amp': 1.0190547114607584}, 'M2': {'Pha': -9.6711874443139667, 'Amp': 0.89769234884245885}, 'K1': {'Pha': -18.747764436328552, 'Amp': 1.0854844970422486}}} TS5in = {'North': {'M2': {'Amp': 0.8503826575559933, 'Pha': 1.4765232701548285}, 'S2': {'Amp': 1.060093886807302, 'Pha': -6.946359019746533}, 'Q1': {'Amp': 1.5368297096448922, 'Pha': 60.42364948634719}, 'K1': {'Amp': 0.773677834111803, 'Pha': -108.3072633495973}, 'N2': {'Amp': 0.9871188616242617, 'Pha': -4.530096424751415}, 'O1': {'Amp': 0.923875247607606, 'Pha': 90.88987314530584}}, 'West': {'M2': {'Amp': 0.8765426451574038, 'Pha': -6.766842787292333}, 'S2': {'Amp': 1.0035869299692508, 'Pha': 8.728134209383496}, 'Q1': {'Amp': 0.9358987350001928, 'Pha': 28.204796180164475}, 'K1': {'Amp': 1.128562033874093, 'Pha': -27.67836501066415}, 'N2': {'Amp': 0.9293430555315706, 'Pha': 1.9060762160877864}, 'O1': {'Amp': 0.9736846815080314, 'Pha': 27.43518571910129}}} TS4in = {'West': {'Q1': {'Amp': 0.95973008246671854, 'Pha': 11.726283958259742}, 'S2': {'Amp': 1.0560814538710048, 'Pha': 2.7883778835861137}, 'K1': {'Amp': 1.0836454717694404, 'Pha': -17.258576900678481}, 'M2': {'Amp': 0.91298101239070584, 'Pha': -12.883470958465448}, 'N2': {'Amp': 0.97616005994525723, 'Pha': 0.62431409913450864}, 'O1': {'Amp': 1.0224087908142427, 'Pha': 11.993685651677305}}, 'North': {'Q1': {'Amp': 1.2501178259629055, 'Pha': 22.116883975191229}, 'S2': {'Amp': 1.0453077941440578, 'Pha': -9.2572751948132552}, 'K1': {'Amp': 0.74541612168305771, 'Pha': -55.123000171765071}, 'M2': {'Amp': 0.93073892518672774, 'Pha': 5.8729325945818829}, 'N2': {'Amp': 0.99352352620481288, 'Pha': -5.3488189574785938}, 'O1': {'Amp': 0.93735590315618389, 'Pha': 33.986361779272698}}} TS3in = {'West': {'Q1': {'Amp': 1.0210325925510757, 'Pha': 19.713188044831874}, 'S2': {'Amp': 1.0619670281785616, 'Pha': 1.6225000000000023}, 'K1': {'Amp': 1.0418394160583937, 'Pha': -16.731369863013693}, 'M2': {'Amp': 0.89799999999999947, 'Pha': -13.180000000000003}, 'N2': {'Amp': 0.98119624254615978, 'Pha': -0.95666666666667055}, 'O1': {'Amp': 1.0285514282124055, 'Pha': 11.929584148727976}}, 'North': {'Q1': {'Amp': 1.6950033029840668, 'Pha': -17.727035647279536}, 'S2': {'Amp': 0.99589688506981933, 'Pha': -27.76166666666667}, 'K1': {'Amp': 0.76388461538461572, 'Pha': -40.112195121951224}, 'M2': {'Amp': 0.93099999999999972, 'Pha': 5.9250000000000007}, 'N2': {'Amp': 0.96926862611073172, 'Pha': -24.561666666666667}, 'O1': {'Amp': 1.2414115152944531, 'Pha': 35.273504645760752}}} TS2in = {'West': {'M2': {'Amp': 0.898, 'Pha': -13.18}, 'K1': {'Amp': 1.076, 'Pha': -18.0}, 'O1': {'Amp': 0.999, 'Pha': 13.6}, 'S2': {'Amp': 1.037, 'Pha': 0.9}, 'N2': {'Amp': 0.974, 'Pha': 0.53}, 'Q1': {'Amp': 0.988, 'Pha': 12.9} }, 'North' : {'M2': {'Amp': 0.931, 'Pha': 6}, 'K1': {'Amp': 0.769, 'Pha': -24.6}, 'O1': {'Amp': 0.998, 'Pha': 53.}, 'S2': {'Amp': 1.044, 'Pha': -9.5}, 'N2': {'Amp': 0.993, 'Pha': -5.5}, 'Q1': {'Amp': 1.344, 'Pha': 35.3} } } TS1in = {'West': {'M2': {'Amp': 0.909, 'Pha': -13.47}, 'K1': {'Amp': 1.016, 'Pha': -4.77}, 'O1': {'Amp': 1.055, 'Pha': -1.88}, 'S2': {'Amp': 1.075, 'Pha': -4.5}, 'N2': {'Amp': 1.032, 'Pha': -0.27}, 'Q1': {'Amp': 1.047, 'Pha': -3.28} }, 'North': {'M2': {'Amp': 0.953, 'Pha': 5.7}, 'K1': {'Amp': 0.764, 'Pha': 28.4}, 'O1': {'Amp': 0.968, 'Pha': 0.4}, 'S2': {'Amp': 1.020, 'Pha': -11.2}, 'N2': {'Amp': 0.990, 'Pha': -5.3}, 'Q1': {'Amp': 1.071, 'Pha': -0.6} } } def initialize_dict(): TS = {'West': {'M2': {'Amp': 0, 'Pha': 0}, 'K1': {'Amp': 0, 'Pha': 0}, 'O1': {'Amp': 0, 'Pha': 0}, 'S2': {'Amp': 0, 'Pha': 0}, 'N2': {'Amp': 0, 'Pha': 0}, 'Q1': {'Amp': 0, 'Pha': 0} }, 'North': {'M2': {'Amp': 0, 'Pha': 0}, 'K1': {'Amp': 0, 'Pha': 0}, 'O1': {'Amp': 0, 'Pha': 0}, 'S2': {'Amp': 0, 'Pha': 0}, 'N2': {'Amp': 0, 'Pha': 0}, 'Q1': {'Amp': 0, 'Pha': 0} } } return TS TSNin = initialize_dict() TSN2in = initialize_dict() for TS in (TS1in, TS2in, TS3in, TS4in, TS5in, TS6in, TS7in, TS8in, TS9in, TS10in, TS11in, TS12in, TS13in, N36_Bin, N36_Cin): for const, symbol in zip(('M2', 'S2', 'N2'), ('o','s','^')): plt.plot(TS['West'][const]['Amp'], TS['West'][const]['Pha'],symbol) for TS in (TS1in, TS2in, TS3in, TS4in, TS5in, TS6in, TS7in, TS8in, TS9in, TS10in, TS11in, TS12in, TS13in, N36_Bin, N36_Cin): for const, symbol in zip(('K1', 'O1', 'Q1'), ('o','s','^')): plt.plot(TS['West'][const]['Amp'], TS['West'][const]['Pha'],symbol) Ts1out = {'West': {'Q1': {'Amp': 1.0002572547617277, 'Pha': 3.0075447504032695}, 'S2': {'Amp': 0.95307125754457123, 'Pha': 5.7198115533444138}, 'K1': {'Amp': 1.0585313055637906, 'Pha': -13.200757777125908}, 'M2': {'Amp': 0.98799240608799199, 'Pha': 0.29234517194957516}, 'N2': {'Amp': 0.93318917930521317, 'Pha': 1.0654603431878371}, 'O1': {'Amp': 1.0029590524663352, 'Pha': 2.3038937822556562}}, 'North': {'Q1': {'Amp': 1.263438995383616, 'Pha': -17.096293558632283}, 'S2': {'Amp': 1.0004497332114852, 'Pha': 1.9782821187744162}, 'K1': {'Amp': 1.0063487360512036, 'Pha': -52.899421715538068}, 'M2': {'Amp': 0.97668547636460912, 'Pha': 0.30302472717710316}, 'N2': {'Amp': 0.97999881464282712, 'Pha': 0.082189463385645922}, 'O1': {'Amp': 1.0365442709803026, 'Pha': -0.42241125887312592}}} Ts2out = {'West': {'Q1': {'Amp': 0.98356590441914449, 'Pha': -1.4334345475063373}, 'S2': {'Amp': 1.0216419632851881, 'Pha': 0.90779104737045202}, 'K1': {'Amp': 0.99266606835333504, 'Pha': 1.3508016934226554}, 'M2': {'Amp': 1.0015851836909175, 'Pha': -0.058748044962015362}, 'N2': {'Amp': 0.99537214954697717, 'Pha': 0.47459245342846879}, 'O1': {'Amp': 1.0336150491261633, 'Pha': -0.48588841857058185}}, 'North': {'Q1': {'Amp': 0.90960982150305669, 'Pha': -28.835321790582896}, 'S2': {'Amp': 1.0051059524147195, 'Pha': 0.76024295755041749}, 'K1': {'Amp': 1.2659586959029192, 'Pha': -11.966162418417269}, 'M2': {'Amp': 1.0001080402583795, 'Pha': -0.12727645118474129}, 'N2': {'Amp': 1.0024693099628155, 'Pha': 0.21684096723782886}, 'O1': {'Amp': 1.1317630850593585, 'Pha': -34.12635889300995}}} #TS3o Ts3out = {'West': {'Q1': {'Amp': 0.95110110499464851, 'Pha': -7.6426784791072464}, 'S2': {'Amp': 1.0202025932595069, 'Pha': 1.0144847613380019}, 'K1': {'Amp': 1.0272219086326759, 'Pha': 0.13292497155501992}, 'M2': {'Amp': 1.0008871425553614, 'Pha': -0.026401458001528565}, 'N2': {'Amp': 1.0045383235506544, 'Pha': 2.8578421947387653}, 'O1': {'Amp': 1.0284224577447008, 'Pha': 0.15177275062089279}}, 'North': {'Q1': {'Amp': 1.0491593153643064, 'Pha': 11.38432262097092}, 'S2': {'Amp': 1.0633942810638697, 'Pha': 22.092814038315055}, 'K1': {'Amp': 1.399915987512546, 'Pha': -14.492809784225329}, 'M2': {'Amp': 0.99934510071026761, 'Pha': -0.1857003075421062}, 'N2': {'Amp': 1.0354092178535133, 'Pha': 22.631314821886065}, 'O1': {'Amp': 1.0853916220804463, 'Pha': -25.230049427371426}}} #TS3 TSRout = {'West': {'Q1': {'Amp': 0.9615240748671523, 'Pha': -8.0186552500333903}, 'S2': {'Amp': 1.0314281994044106, 'Pha': 1.5245866303987459}, 'K1': {'Amp': 1.0423440174621237, 'Pha': -0.42662190481817891}, 'M2': {'Amp': 1.0148414225529525, 'Pha': 0.30699215045163986}, 'N2': {'Amp': 1.0190109589854979, 'Pha': 3.1444151751516358}, 'O1': {'Amp': 1.0403371684399034, 'Pha': -0.3682571354704578}}, 'North': {'Q1': {'Amp': 1.0402483648893954, 'Pha': 9.9466892870592574}, 'S2': {'Amp': 1.0644803022440763, 'Pha': 23.072834208666109}, 'K1': {'Amp': 1.3417137185150854, 'Pha': -11.210779203809977}, 'M2': {'Amp': 0.97345565972896475, 'Pha': 1.1010105205821503}, 'N2': {'Amp': 1.0179979559630259, 'Pha': 24.008950313318181}, 'O1': {'Amp': 1.1091799512735716, 'Pha': -19.851697664023504}}} #TS4 TS4out = {'West': {'Q1': {'Amp': 1.021710281717962, 'Pha': -1.2818051360898437}, 'S2': {'Amp': 1.0048287114712622, 'Pha': -0.77931142110978868}, 'K1': {'Amp': 1.0077723235798881, 'Pha': 0.050270335765687305}, 'M2': {'Amp': 1.0019637630738421, 'Pha': -0.084921979605779865}, 'N2': {'Amp': 0.99488881155208686, 'Pha': 0.31632214863880626}, 'O1': {'Amp': 1.0160040214143051, 'Pha': 0.28326329135653339}}, 'North': {'Q1': {'Amp': 1.1396837053873892, 'Pha': -22.343216315967823}, 'S2': {'Amp': 1.01031637074638, 'Pha': 0.61309589110754814}, 'K1': {'Amp': 1.4993965264182494, 'Pha': -19.036640927224198}, 'M2': {'Amp': 0.99847670218981277, 'Pha': -0.033309406172094214}, 'N2': {'Amp': 1.0048771725470016, 'Pha': 0.16683054836840938}, 'O1': {'Amp': 1.3692040838013475, 'Pha': -29.098224761850673}}} #TS4 w Hollingsworth TSHout = {'West': {'Q1': {'Amp': 1.0119383657601555, 'Pha': -1.9438847794043141}, 'S2': {'Amp': 0.99559468002265994, 'Pha': -1.4019991460643482}, 'K1': {'Amp': 0.98832300736547773, 'Pha': 0.28881171540387657}, 'M2': {'Amp': 0.98854451467568982, 'Pha': -0.5788869325603514}, 'N2': {'Amp': 0.98697982726560152, 'Pha': -0.052406117338830827}, 'O1': {'Amp': 0.99971651564967901, 'Pha': 0.2006417619422507}}, 'North': {'Q1': {'Amp': 1.1336904122250575, 'Pha': -22.262549937009737}, 'S2': {'Amp': 1.019390426248554, 'Pha': 1.0650283890524292}, 'K1': {'Amp': 1.4689939087715977, 'Pha': -19.71301519371076}, 'M2': {'Amp': 1.0095401002831588, 'Pha': 0.5060034580093884}, 'N2': {'Amp': 1.0141608691246866, 'Pha': 0.66414894517527046}, 'O1': {'Amp': 1.3401176934857579, 'Pha': -29.105986664905771}}} TS4newBout = {'North': {'M2': {'Pha': -4.3964093244270543, 'Amp': 0.91366400882544685}, 'K1': {'Pha': -53.184263177832236, 'Amp': 1.0379140075008491}, 'O1': {'Pha': 3.7192481882009076, 'Amp': 1.0229871678905673}, 'Q1': {'Pha': -14.877497666676277, 'Amp': 1.2759573935003046}, 'S2': {'Pha': -2.0854931493603317, 'Amp': 0.92658797320536079}, 'N2': {'Pha': -3.5776867916998754, 'Amp': 0.90777415180494747}}, 'West': {'M2': {'Pha': 6.1166281711731152, 'Amp': 0.96008858153809173}, 'K1': {'Pha': -10.419788109985671, 'Amp': 1.041449499190275}, 'O1': {'Pha': 5.0217119574383133, 'Amp': 0.99181798223604978}, 'Q1': {'Pha': 6.0587241119190622, 'Amp': 1.0155889522120538}, 'S2': {'Pha': 12.056384496970498, 'Amp': 0.9123655647133796}, 'N2': {'Pha': 7.398390288126393, 'Amp': 0.91404237128654797}}} TS5out = {'North': {'M2': {'Pha': 0.42758785421409584, 'Amp': 1.011625002682464}, 'K1': {'Pha': -29.472163528321573, 'Amp': 1.5805376110939042}, 'O1': {'Pha': -83.573669377494497, 'Amp': 1.9482260093882917}, 'Q1': {'Pha': -61.734484942181325, 'Amp': 1.0241774045384158}, 'S2': {'Pha': -1.4895386482369872, 'Amp': 0.99513722966584894}, 'N2': {'Pha': 0.10773931037238071, 'Amp': 1.0111376898211519}}, 'West': {'M2': {'Pha': -1.5309396078788888, 'Amp': 1.0378637494752101}, 'K1': {'Pha': 12.035886712521583, 'Amp': 0.94974179875911324}, 'O1': {'Pha': -3.7224212917422079, 'Amp': 1.0325092803506841}, 'Q1': {'Pha': -8.3677928244752877, 'Amp': 0.99273824278749245}, 'S2': {'Pha': -6.9463119651399552, 'Amp': 1.1154505816612328}, 'N2': {'Pha': -1.3598703918715493, 'Amp': 1.0890526878512701}}} TS6out = {'West': {'O1': {'Amp': 0.99917259570573957, 'Pha': -0.26278166115388046}, 'N2': {'Amp': 1.0244990697827043, 'Pha': 1.224306456276878}, 'Q1': {'Amp': 1.0176497900134822, 'Pha': -1.4806862994981884}, 'S2': {'Amp': 1.0087992347785939, 'Pha': 0.1336154074221394}, 'M2': {'Amp': 1.009684365613287, 'Pha': 1.2434341458076474}, 'K1': {'Amp': 0.97410589754902577, 'Pha': 3.0078799229218589}}, 'North': {'O1': {'Amp': 1.1099589622402357, 'Pha': -27.838619895114306}, 'N2': {'Amp': 0.98180311923347008, 'Pha': -0.58195399212740995}, 'Q1': {'Amp': 0.91915208049571451, 'Pha': -25.717717368800777}, 'S2': {'Amp': 0.98467109778763395, 'Pha': -1.4704736261345488}, 'M2': {'Amp': 0.99011883203452811, 'Pha': -1.3444498805239675}, 'K1': {'Amp': 1.1904807255499437, 'Pha': -6.1957349307586469}}} TS7out = {'North': {'Q1': {'Pha': -22.886425235514594, 'Amp': 0.93055076235820455}, 'M2': {'Pha': 1.8789511310803277, 'Amp': 1.0423735647108798}, 'K1': {'Pha': -7.0524623691283921, 'Amp': 1.1619354362342378}, 'N2': {'Pha': 2.2311317939641242, 'Amp': 1.0312508215460159}, 'O1': {'Pha': -25.60358248760835, 'Amp': 1.0881114343042568}, 'S2': {'Pha': 1.6180611113048258, 'Amp': 1.0326339034344933}}, 'West': {'Q1': {'Pha': -0.34469260821630598, 'Amp': 0.98149555239898412}, 'M2': {'Pha': 1.3234744539132743, 'Amp': 0.97796155111043792}, 'K1': {'Pha': 3.8762994934517199, 'Amp': 0.95946938123484593}, 'N2': {'Pha': 0.89717076965282549, 'Amp': 0.98678259304211158}, 'O1': {'Pha': 1.0664646301858483, 'Amp': 0.97995456282236715}, 'S2': {'Pha': -0.27450001446825567, 'Amp': 0.97941497283467949}}} TS8out = {'West': {'K1': {'Amp': 0.97294394809883511, 'Pha': 2.7579367053216881}, 'Q1': {'Amp': 0.96966233066943985, 'Pha': 1.9150525736966344}, 'S2': {'Amp': 0.95472229971458944, 'Pha': 1.0372218360624021}, 'N2': {'Amp': 0.95752188775425451, 'Pha': 0.40262808850771226}, 'M2': {'Amp': 0.95858932366390581, 'Pha': 1.1601428772132749}, 'O1': {'Amp': 0.97183294662054664, 'Pha': 2.007318956120443}}, 'North': {'K1': {'Amp': 1.1547120834449451, 'Pha': -7.6129201656615066}, 'Q1': {'Amp': 0.96681474235607734, 'Pha': -15.124112334094548}, 'S2': {'Amp': 1.0513147965750143, 'Pha': 0.20788982904463182}, 'N2': {'Amp': 1.0526603636580936, 'Pha': 1.0521057205134241}, 'M2': {'Amp': 1.0521929700885222, 'Pha': 1.1130570825965833}, 'O1': {'Amp': 1.0868904745440655, 'Pha': -19.161141807354142}}} TS9out = {'North': {'K1': {'Amp': 1.141806975674275, 'Pha': -6.8066122155455275}, 'O1': {'Amp': 1.0790183141135039, 'Pha': -12.693489398286005}, 'N2': {'Amp': 1.0276135396079655, 'Pha': 0.69195097514989357}, 'S2': {'Amp': 1.0267195882687952, 'Pha': -0.21023995360576464}, 'M2': {'Amp': 1.0273131680911955, 'Pha': 0.6410257630467413}, 'Q1': {'Amp': 0.99349042997534653, 'Pha': -11.450128659908728}}, 'West': {'K1': {'Amp': 1.0005922446358808, 'Pha': 0.30581150821401393}, 'O1': {'Amp': 0.99691301051231362, 'Pha': -0.27067321788786103}, 'N2': {'Amp': 0.97009487763399349, 'Pha': 0.079806302494018655}, 'S2': {'Amp': 0.97355520330980305, 'Pha': 0.52786277421644989}, 'M2': {'Amp': 0.97177309878878693, 'Pha': 0.65317168768398026}, 'Q1': {'Amp': 1.005747559953059, 'Pha': -0.38741079347094853}}} TS10out = {'West': {'Q1': {'Amp': 0.98050924008569673, 'Pha': 2.1640812550494957}, 'N2': {'Amp': 0.98497707891758624, 'Pha': 0.73188917599328818}, 'S2': {'Amp': 0.98386804368385228, 'Pha': 0.49094103412041257}, 'M2': {'Amp': 0.9835814140583149, 'Pha': 0.33283810199147285}, 'K1': {'Amp': 0.98125945152519567, 'Pha': 2.0121855104044792}, 'O1': {'Amp': 0.98879469627003824, 'Pha': 2.423226052588376}}, 'North': {'Q1': {'Amp': 1.0262218515128376, 'Pha': -11.636416402243867}, 'N2': {'Amp': 1.0121869970770876, 'Pha': 0.25638965602854569}, 'S2': {'Amp': 1.0127597355406559, 'Pha': 0.59505848045751009}, 'M2': {'Amp': 1.0133909064729139, 'Pha': 0.33157708793285678}, 'K1': {'Amp': 1.1311741337398815, 'Pha': -6.2684264923129263}, 'O1': {'Amp': 1.0807892207906737, 'Pha': -15.033773286125751}}} TS11out = {'West': {'N2': {'Amp': 0.9861002244759608, 'Pha': -0.10566829682310153}, 'K1': {'Amp': 0.99899667510872814, 'Pha': -0.036503272822104603}, 'M2': {'Amp': 0.98567279493454218, 'Pha': 0.3057391725200202}, 'Q1': {'Amp': 1.0010194819454052, 'Pha': 0.047263663169253789}, 'S2': {'Amp': 0.98418635692484369, 'Pha': 0.34645353156939507}, 'O1': {'Amp': 1.0045736271018535, 'Pha': -0.035061767485288442}}, 'North': {'N2': {'Amp': 1.0118494949360155, 'Pha': 0.37538197471974399}, 'K1': {'Amp': 1.0011334075203402, 'Pha': -4.4039853987503932}, 'M2': {'Amp': 1.0117183935668721, 'Pha': 0.33110599860091838}, 'Q1': {'Amp': 0.95470825319238906, 'Pha': -7.1123004897670796}, 'S2': {'Amp': 1.0103376571119917, 'Pha': 0.31215195736308488}, 'O1': {'Amp': 0.98265530252292388, 'Pha': -7.7938163393311299}}} TS12out = {'North': {'Q1': {'Pha': -2.8028207797105438, 'Amp': 1.0067196858504108}, 'O1': {'Pha': -3.4538665865347014, 'Amp': 1.0238409720567998}, 'N2': {'Pha': 0.33510031771822923, 'Amp': 1.0091900611944493}, 'M2': {'Pha': 0.31120580423186084, 'Amp': 1.0100886881011024}, 'K1': {'Pha': -4.3189261967383743, 'Amp': 1.0276037991916289}, 'S2': {'Pha': 0.20258067707970895, 'Amp': 1.010827726447741}}, 'West': {'Q1': {'Pha': 0.23364280047611174, 'Amp': 1.0034113035505845}, 'O1': {'Pha': 0.16957879223932082, 'Amp': 1.0042583168341883}, 'N2': {'Pha': 0.17550044986570212, 'Amp': 0.98473045540527915}, 'M2': {'Pha': 0.28090946947771794, 'Amp': 0.98647781863541684}, 'K1': {'Pha': 0.0016394684952055845, 'Amp': 0.99968355103521234}, 'S2': {'Pha': 0.32145241386463219, 'Amp': 0.98671884010077271}}} TS13out = {'West': {'O1': {'Pha': 0.04680136479113628, 'Amp': 1.0028414117632114}, 'N2': {'Pha': 0.28407505636140273, 'Amp': 0.98666343884396468}, 'S2': {'Pha': 0.30991534737121995, 'Amp': 0.98748279768091618}, 'M2': {'Pha': 0.2517691311556558, 'Amp': 0.9878931090830384}, 'K1': {'Pha': 0.014731604514469591, 'Amp': 0.99954343378996247}, 'Q1': {'Pha': 0.0071499783811859174, 'Amp': 1.0018607902885381}}, 'North': {'O1': {'Pha': -3.4702531096732798, 'Amp': 1.0239069789801176}, 'N2': {'Pha': 0.35918217891162385, 'Amp': 1.0087207644351472}, 'S2': {'Pha': 0.27295659468495614, 'Amp': 1.0097792960221685}, 'M2': {'Pha': 0.29874565143435916, 'Amp': 1.0083006574774827}, 'K1': {'Pha': -4.0578237330762477, 'Amp': 1.0270972397817333}, 'Q1': {'Pha': -4.235818999980836, 'Amp': 0.99737542293651305}}} N36_Aout = {'West': {'S2': {'Pha': 0.024968663334318464, 'Amp': 0.99337155681265432}, 'Q1': {'Pha': 3.3569984413439684, 'Amp': 0.95663330904750743}, 'K1': {'Pha': 10.525916755897864, 'Amp': 0.95627620722297868}, 'O1': {'Pha': -0.019006280510751594, 'Amp': 1.0095477235303318}, 'N2': {'Pha': -0.0012874180538793301, 'Amp': 0.99344078977697381}, 'M2': {'Pha': 0.20672969828430446, 'Amp': 0.99007316601581041}}, 'North': {'S2': {'Pha': 0.11378081255770667, 'Amp': 1.0070620543538167}, 'Q1': {'Pha': 0.88814591096328854, 'Amp': 0.95394468894565598}, 'K1': {'Pha': 10.085614670042702, 'Amp': 0.99230295956348635}, 'O1': {'Pha': -2.8850242464157532, 'Amp': 1.022596420254793}, 'N2': {'Pha': 0.42669693943682319, 'Amp': 1.007929667620058}, 'M2': {'Pha': 0.27507562700012045, 'Amp': 1.0073131602562162}}} N36_Bout = {'North': {'N2': {'Pha': 0.228619922684004, 'Amp': 1.0044197820734821}, 'O1': {'Pha': 8.4089357005164231, 'Amp': 0.98525392629581676}, 'S2': {'Pha': -0.46028630594918241, 'Amp': 1.0108745392383118}, 'Q1': {'Pha': 9.0291992812070987, 'Amp': 0.97501550156148509}, 'K1': {'Pha': 7.8443759889432698, 'Amp': 0.95588820757114035}, 'M2': {'Pha': 0.19904203096535866, 'Amp': 1.0024480929027397}}, 'West': {'N2': {'Pha': 1.0231581697589434, 'Amp': 0.99485890137137734}, 'O1': {'Pha': 0.51036973879503478, 'Amp': 1.0090380271121973}, 'S2': {'Pha': 0.19181980227444306, 'Amp': 0.98490801519221349}, 'Q1': {'Pha': 3.5029764974067277, 'Amp': 0.99309398972124918}, 'K1': {'Pha': 0.45180641855848502, 'Amp': 0.98606684406471978}, 'M2': {'Pha': 0.14578991784822209, 'Amp': 0.99131929120194784}}} N36_Cout = {'North': {'O1': {'Pha': 7.3854138207617837, 'Amp': 0.94085326688379123}, 'Q1': {'Pha': 6.4006241776399975, 'Amp': 0.95897973076356524}, 'K1': {'Pha': 7.1413981914269868, 'Amp': 0.91658971870546968}, 'S2': {'Pha': -0.3595232949933802, 'Amp': 1.0060087070702608}, 'M2': {'Pha': -0.07795186160888079, 'Amp': 0.99633800734625722}, 'N2': {'Pha': -0.063366454852484821, 'Amp': 0.99810122970525916}}, 'West': {'O1': {'Pha': -0.79300780292307138, 'Amp': 0.98074493235008808}, 'Q1': {'Pha': -0.6739014812033588, 'Amp': 0.98943179187765928}, 'K1': {'Pha': -0.72383160181509798, 'Amp': 0.98614869389993487}, 'S2': {'Pha': 0.069163430058537756, 'Amp': 0.99327610629011942}, 'M2': {'Pha': -0.038238175514344983, 'Amp': 0.99773471593815255}, 'N2': {'Pha': -0.01545786753853351, 'Amp': 1.0017159403672811}}} diurnals = ('K1', 'O1', 'Q1') semis = ('M2', 'S2', 'N2') for TS in (TS4newBout, TS5out, TS6out, TS7out, TS8out, TS9out, TS10out, TS11out, TS12out, TS13out, N36_Aout, N36_Bout, N36_Cout): for side in ('West', 'North'): phaerror = 0 amperror = 0 for const in diurnals: phaerror += TS[side][const]['Pha']**2 amperror += (TS[side][const]['Amp'] - 1)**2 if side=='West': print (side, np.sqrt(phaerror/3.), np.sqrt(amperror/3.) ) correction = {'S2': {'Pha' : ('M2', False, True), 'Amp': ('M2', True, False)}, 'N2': {'Pha' : ('M2', False, True), 'Amp': ('M2', True, False)}, 'O1': {'Pha' : ('K1', False, True), 'Amp': ('K1', True, False)}, 'Q1': {'Pha' : ('K1', False, True), 'Amp': ('K1', True, False)},} def correctionf(TS): TSc = initialize_dict() for side in ('West', 'North'): for const in ('M2', 'K1'): for pa in ('Pha', 'Amp'): TSc[side][const][pa] = TS[side][const][pa] for const in ('O1', 'Q1', 'S2', 'N2'): TSc[side][const]['Pha'] = (TS[side][const]['Pha'] - TS[side][correction[const]['Pha'][0]]['Pha']) TSc[side][const]['Amp'] = (TS[side][const]['Amp'] / TS[side][correction[const]['Amp'][0]]['Amp']) return TSc TS3corr = correctionf(Ts3out) TS3Rcorr = correctionf(TSRout) numruns = 7; x = np.zeros(numruns); y = np.zeros_like(x) def make_comparison(freq, side): fig, ax = plt.subplots(1,2,figsize=(15,10)) for ipa, pa in enumerate(('Amp', 'Pha')): for const in freq: for irun, (Tin, Tout) in enumerate(zip( (TS10in, TS11in, TS12in, TS13in, TS13in, N36_Bin, N36_Cin), (TS10out, TS11out, TS12out, TS13out, N36_Aout, N36_Bout, N36_Cout))): x[irun] = Tin[side][const][pa] y[irun] = Tout[side][const][pa] # print const, x[irun] if const in ('O1', 'Q1', 'S2', 'N2'): divisor = (correction[const][pa][1] * Tout[side][correction[const][pa][0]][pa] + (1-correction[const][pa][1])) y[irun] = (Tout[side][const][pa] / divisor - correction[const][pa][2] * Tout[side][correction[const][pa][0]][pa]) print (side, const, pa, y[irun]) ax[ipa].plot(x[:-3],y[:-3],'o', label=const) ax[ipa].plot(x[-3], y[-3], '<', label=const) ax[ipa].plot(x[-2], y[-2], '>', label=const) ax[ipa].plot(x[-1],y[-1],'^', label=const) m, b = np.polyfit(x, y, 1) if pa == 'Pha': TSNin[side][const]['Pha'] = -b/m ax[ipa].plot(TSNin[side][const]['Pha'], 0., '*') else: TSNin[side][const]['Amp'] = (1-b)/m ax[ipa].plot(TSNin[side][const]['Amp'], 1., '*') if pa == 'Amp': if side == 'North' and const in diurnals: exes = np.arange(0.7, 1.8, 0.1) else: exes = np.arange(0.88,1.1,0.05) elif side == 'West': if const in diurnals: exes = np.arange(-20, 21, 1) else: exes = np.arange(-15,5,1) else: if const in diurnals: exes = np.arange(-60, 60, 2) else: exes = np.arange(-30, 10, 1) ax[ipa].plot(exes, m*exes+b) ax[ipa].set_xlabel('Forcing') ax[ipa].set_ylabel('Suggested Correction') ax[ipa].grid() ax[ipa].legend() if side == 'North' and pa == 'Amp' and const in diurnals: ax[ipa].set_ylim(0.5,2) ax[ipa].set_xlim(0.7, 1.2) elif side == 'North' and pa == 'Pha' and const in diurnals: ax[ipa].set_xlim(-30,10) ax[ipa].set_ylim(-14,11) elif side == 'West' and pa == 'Pha' and const in semis: ax[ipa].set_xlim(0,5) ax[ipa].set_ylim(-2.5,2.5) elif side== 'North' and pa == 'Pha' and const in semis: ax[ipa].set_xlim(-12,7) ax[ipa].set_ylim(-8,5) print (side, freq) make_comparison(semis,'West') side = 'West'; const = 'N2'; pa = 'Pha' print (side, const, pa) print (TS13in[side][const][pa], N36_Bin[side][const][pa], N36_Cin[side][const][pa], TSNin[side][const][pa]) #print (N36_Aout[side][const][pa], N36_Bout[side][const][pa], N36_Cout[side][const][pa]) make_comparison(diurnals, 'West') side = 'West'; const = 'Q1'; pa = 'Pha' print (side, const, pa) print (TS13in[side][const][pa], N36_Bin[side][const][pa], N36_Cin[side][const][pa], TSNin[side][const][pa]) #print (N36_Aout[side][const][pa], N36_Bout[side][const][pa], N36_Cout[side][const][pa]) make_comparison(semis,'North') side = 'North'; const = 'N2'; pa = 'Pha' print (side, const, pa) print (TS13in[side][const][pa], N36_Bin[side][const][pa], N36_Cin[side][const][pa], TSNin[side][const][pa]) #print (N36_Aout[side][const][pa], N36_Bout[side][const][pa], N36_Cout[side][const][pa]) make_comparison(diurnals,'North') side = 'North'; const = 'Q1'; pa = 'Pha' print (side, const, pa) print (TS13in[side][const][pa], N36_Bin[side][const][pa], N36_Cin[side][const][pa], TSNin[side][const][pa]) #print (N36_Aout[side][const][pa], N36_Bout[side][const][pa], N36_Cout[side][const][pa]) TSNin['West']['S2']['Amp'] = 1.041 TSNin['West']['N2']['Amp'] = 0.981 TSNin['West']['S2']['Pha'] = 1.7 TSNin['West']['K1']['Amp'] = 0.98 TSNin['West']['O1']['Amp'] = 1.08 TSNin['West']['Q1']['Amp'] = 0.997 TSNin['West']['K1']['Pha'] = -3.5 TSNin['West']['O1']['Pha'] = 1.95 TSNin['West']['Q1']['Pha'] = 3.05 TSNin['North']['S2']['Amp'] = 1.039 TSNin['North']['S2']['Pha'] = -9.25 TSNin['North']['K1']['Pha'] = 8 TSNin['North']['O1']['Pha'] = -2 TSNin['North']['Q1']['Pha'] = -5.4 print (TSNin) #for side in ('West', 'North'): # don't correct North because we are missing River and Kelsey Bay TSN2in['North'] = TSNin['North'] side = 'West' print side for const in semis + diurnals: print const, TSNin[side][const] print TS3Rcorr[side][const], 'River' print TS3corr[side][const], 'No River' differ = {key: TS3Rcorr[side][const][key] - TS3corr[side][const][key] for key in ('Pha','Amp')} print differ, 'Difference' TSN2in[side][const] = {key: TSNin[side][const][key] + differ[key] for key in ('Pha', 'Amp')} print TSN2in[side][const], 'Proposal' print TSN2in for side in ('West', 'North'): for const in semis + diurnals: print (side, const) print (TS4newBout[side][const]['Pha'] + TS4in[side][const]['Pha'], TS4in[side][const]['Pha']) correction = {'S2': {'Pha' : ('M2', False, True), 'Amp': ('M2', True, False)}, 'N2': {'Pha' : ('M2', False, True), 'Amp': ('M2', True, False)}, 'O1': {'Pha' : ('K1', False, True), 'Amp': ('K1', True, False)}, 'Q1': {'Pha' : ('K1', False, True), 'Amp': ('K1', True, False)},} def correction2(TSprevin, TSnewout): TSc = initialize_dict() for side in ('West', 'North'): for const in ('M2', 'K1'): TSc[side][const]['Pha'] = TSprevin[side][const]['Pha'] + TSnewout[side][const]['Pha'] TSc[side][const]['Amp'] = TSprevin[side][const]['Amp'] * TSnewout[side][const]['Amp'] for const in ('O1', 'Q1', 'S2', 'N2'): TSc[side][const]['Pha'] = (TSprevin[side][const]['Pha'] - TSnewout[side][correction[const]['Pha'][0]]['Pha'] + TSnewout[side][const]['Pha']) TSc[side][const]['Amp'] = (TSprevin[side][const]['Amp'] / TSnewout[side][correction[const]['Amp'][0]]['Amp'] * TSnewout[side][const]['Amp']) return TSc TS5in = correction2(TS4in, TS4newBout) print (TS5in) print (TS4in)Copyright 2021 The TensorFlow Cloud Authors.#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.Tuning a wide and deep model using Google CloudIn this example we will use CloudTuner and Google Cloud to Tune a [Wide and Deep Model](https://ai.googleblog.com/2016/06/wide-deep-learning-better-together-with.html) based on the tunable model introduced in [structured data learning with Wide, Deep, and Cross networks](https://keras.io/examples/structured_data/wide_deep_cross_networks/). In this example we will use the data set from [CAIIS Dogfood Day](https://www.kaggle.com/c/caiis-dogfood-day-2020/overview) View on TensorFlow.org Run in Google Colab View on GitHub Download notebook Run in Kaggleimport datetime import uuid import numpy as np import pandas as pd import tensorflow as tf import os import sys import subprocess from tensorflow.keras import datasets, layers, models from sklearn.model_selection import train_test_split # Install the latest version of tensorflow_cloud and other required packages. if os.environ.get("TF_KERAS_RUNNING_REMOTELY", True): subprocess.run( ['python3', '-m', 'pip', 'install', 'tensorflow-cloud', '-q']) subprocess.run( ['python3', '-m', 'pip', 'install', 'google-cloud-storage', '-q']) subprocess.run( ['python3', '-m', 'pip', 'install', 'fsspec', '-q']) subprocess.run( ['python3', '-m', 'pip', 'install', 'gcsfs', '-q']) import tensorflow_cloud as tfc print(tfc.__version__)Setting project parameters. For more details on Google Cloud Specific parameters please refer to [Google Cloud Project Setup Instructions](https://www.kaggle.com/nitric/google-cloud-project-setup-instructions/).# Set Google Cloud Specific parameters # TODO: Please set GCP_PROJECT_ID to your own Google Cloud project ID. GCP_PROJECT_ID = 'YOUR_PROJECT_ID' #@param {type:"string"} # TODO: Change the Service Account Name to your own Service Account SERVICE_ACCOUNT_NAME = 'YOUR_SERVICE_ACCOUNT_NAME' #@param {type:"string"} SERVICE_ACCOUNT = f'{SERVICE_ACCOUNT_NAME}@{GCP_PROJECT_ID}.iam.gserviceaccount.com' # TODO: set GCS_BUCKET to your own Google Cloud Storage (GCS) bucket. GCS_BUCKET = 'YOUR_GCS_BUCKET_NAME' #@param {type:"string"} # DO NOT CHANGE: Currently only the 'us-central1' region is supported. REGION = 'us-central1' # Set Tuning Specific parameters # OPTIONAL: You can change the job name to any string. JOB_NAME = 'wide_and_deep' #@param {type:"string"} # OPTIONAL: Set Number of concurrent tuning jobs that you would like to run. NUM_JOBS = 5 #@param {type:"string"} # TODO: Set the study ID for this run. Study_ID can be any unique string. # Reusing the same Study_ID will cause the Tuner to continue tuning the # Same Study parameters. This can be used to continue on a terminated job, # or load stats from a previous study. STUDY_NUMBER = '00001' #@param {type:"string"} STUDY_ID = f'{GCP_PROJECT_ID}_{JOB_NAME}_{STUDY_NUMBER}' # Setting location were training logs and checkpoints will be stored GCS_BASE_PATH = f'gs://{GCS_BUCKET}/{JOB_NAME}/{STUDY_ID}' TENSORBOARD_LOGS_DIR = os.path.join(GCS_BASE_PATH,"logs")Authenticating the notebook to use your Google Cloud ProjectFor Kaggle Notebooks click on "Add-ons"->"Google Cloud SDK" before running the cell below.# Using tfc.remote() to ensure this code only runs in notebook if not tfc.remote(): # Authentication for Kaggle Notebooks if "kaggle_secrets" in sys.modules: from kaggle_secrets import UserSecretsClient UserSecretsClient().set_gcloud_credentials(project=GCP_PROJECT_ID) # Authentication for Colab Notebooks if "google.colab" in sys.modules: from google.colab import auth auth.authenticate_user() os.environ["GOOGLE_CLOUD_PROJECT"] = GCP_PROJECT_IDLoad the dataRead raw data and split to train and test data sets. For this step you will need to copy the dataset to your GCS bucket so it can be accessed during training. For this example we are using the dataset from https://www.kaggle.com/c/caiis-dogfood-day-2020.To do this you can run the following commands to download and copy the dataset to your GCS bucket, or manually download the dataset vi [Kaggle UI](https://www.kaggle.com/c/caiis-dogfood-day-2020/data) and upload the `train.csv` file to your [GCS bucket vi GCS UI](https://console.cloud.google.com/storage/browser).```python Download the dataset!kaggle competitions download -c caiis-dogfood-day-2020 Copy the training file to your bucket!gsutil cp ./caiis-dogfood-day-2020/train.csv $GCS_BASE_PATH/caiis-dogfood-day-2020/train.csv```train_URL = f'{GCS_BASE_PATH}/caiis-dogfood-day-2020/train.csv' data = pd.read_csv(train_URL) train, test = train_test_split(data, test_size=0.1) # A utility method to create a tf.data dataset from a Pandas Dataframe def df_to_dataset(df, shuffle=True, batch_size=32): df = df.copy() labels = df.pop('target') ds = tf.data.Dataset.from_tensor_slices((dict(df), labels)) if shuffle: ds = ds.shuffle(buffer_size=len(df)) ds = ds.batch(batch_size) return ds sm_batch_size = 1000 # A small batch size is used for demonstration purposes train_ds = df_to_dataset(train, batch_size=sm_batch_size) test_ds = df_to_dataset(test, shuffle=False, batch_size=sm_batch_size)Preprocess the dataSetting up preprocessing layers for categorical and numerical input data. For more details on preprocessing layers please refer to [working with preprocessing layers](https://www.tensorflow.org/guide/keras/preprocessing_layers).from tensorflow.keras.layers.experimental import preprocessing def create_model_inputs(): inputs ={} for name, column in data.items(): if name in ('id','target'): continue dtype = column.dtype if dtype == object: dtype = tf.string else: dtype = tf.float32 inputs[name] = tf.keras.Input(shape=(1,), name=name, dtype=dtype) return inputs #Preprocessing the numeric inputs, and running them through a normalization layer. def preprocess_numeric_inputs(inputs): numeric_inputs = {name:input for name,input in inputs.items() if input.dtype==tf.float32} x = layers.Concatenate()(list(numeric_inputs.values())) norm = preprocessing.Normalization() norm.adapt(np.array(data[numeric_inputs.keys()])) numeric_inputs = norm(x) return numeric_inputs # Preprocessing the categorical inputs. def preprocess_categorical_inputs(inputs): categorical_inputs = [] for name, input in inputs.items(): if input.dtype == tf.float32: continue lookup = preprocessing.StringLookup(vocabulary=np.unique(data[name])) one_hot = preprocessing.CategoryEncoding(max_tokens=lookup.vocab_size()) x = lookup(input) x = one_hot(x) categorical_inputs.append(x) return layers.concatenate(categorical_inputs)Define the model architecture and hyperparametersIn this section we define our tuning parameters using [Keras Tuner Hyper Parameters](https://keras-team.github.io/keras-tuner/the-search-space-may-contain-conditional-hyperparameters) and a model-building function. The model-building function takes an argument hp from which you can sample hyperparameters, such as hp.Int('units', min_value=32, max_value=512, step=32) (an integer from a certain range).import kerastuner # Configure the search space HPS = kerastuner.engine.hyperparameters.HyperParameters() HPS.Float('learning_rate', min_value=1e-4, max_value=1e-2, sampling='log') HPS.Int('num_layers', min_value=2, max_value=5) for i in range(5): HPS.Float('dropout_rate_' + str(i), min_value=0.0, max_value=0.3, step=0.1) HPS.Choice('num_units_' + str(i), [32, 64, 128, 256]) from tensorflow.keras import layers from tensorflow.keras.optimizers import Adam def create_wide_and_deep_model(hp): inputs = create_model_inputs() wide = preprocess_categorical_inputs(inputs) wide = layers.BatchNormalization()(wide) deep = preprocess_numeric_inputs(inputs) for i in range(hp.get('num_layers')): deep = layers.Dense(hp.get('num_units_' + str(i)))(deep) deep = layers.BatchNormalization()(deep) deep = layers.ReLU()(deep) deep = layers.Dropout(hp.get('dropout_rate_' + str(i)))(deep) both = layers.concatenate([wide, deep]) outputs = layers.Dense(1, activation='sigmoid')(both) model = tf.keras.Model(inputs=inputs, outputs=outputs) metrics = [ tf.keras.metrics.Precision(name='precision'), tf.keras.metrics.Recall(name='recall'), 'accuracy', 'mse' ] model.compile( optimizer=Adam(lr=hp.get('learning_rate')), loss='binary_crossentropy', metrics=metrics) return modelConfigure a CloudTunerIn this section we configure the cloud tuner for both remote and local execution. The main difference between the two is the distribution strategy.from tensorflow_cloud import CloudTuner distribution_strategy = None if not tfc.remote(): # Using MirroredStrategy to use a single instance with multiple GPUs # during remote execution while using no strategy for local. distribution_strategy = tf.distribute.MirroredStrategy() tuner = CloudTuner( create_wide_and_deep_model, project_id=GCP_PROJECT_ID, project_name=JOB_NAME, region=REGION, objective='accuracy', hyperparameters=HPS, max_trials=100, directory=GCS_BASE_PATH, study_id=STUDY_ID, overwrite=True, distribution_strategy=distribution_strategy) # Configure Tensorboard logs callbacks=[ tf.keras.callbacks.TensorBoard(log_dir=TENSORBOARD_LOGS_DIR)] # Setting to run tuning remotely, you can run tuner locally to validate it works first. if tfc.remote(): tuner.search(train_ds, epochs=20, validation_data=test_ds, callbacks=callbacks) # You can uncomment the code below to run the tuner.search() locally to validate # everything works before submitting the job to Cloud. Stop the job manually # after one epoch. # else: # tuner.search(train_ds, epochs=1, validation_data=test_ds, callbacks=callbacks)Start the remote trainingThis step will prepare your code from this notebook for remote execution and start NUM_JOBS parallel runs remotely to train the model. Once the jobs are submitted you can go to the next step to monitor the jobs progress via Tensorboard.tfc.run_cloudtuner( distribution_strategy='auto', docker_config=tfc.DockerConfig( image_build_bucket=GCS_BUCKET ), chief_config=tfc.MachineConfig( cpu_cores=16, memory=60, ), job_labels={'job': JOB_NAME}, service_account=SERVICE_ACCOUNT, num_jobs=NUM_JOBS )Training ResultsWhile the training is in progress you can use Tensorboard to view the results. Note the results will show only after your training has started. This may take a few minutes.# %load_ext tensorboard # %tensorboard --logdir $TENSORBOARD_LOGS_DIRYou can access the training assets as follows. Note the results will show only after your tuning job has completed at least once trial. This may take a few minutes.if not tfc.remote(): tuner.results_summary(1) best_model = tuner.get_best_models(1)[0] best_hyperparameters = tuner.get_best_hyperparameters(1)[0] # References to best trial assets best_trial_id = tuner.oracle.get_best_trials(1)[0].trial_id best_trial_dir = tuner.get_trial_dir(best_trial_id)【問題1】SimpleRNNのフォワードプロパゲーション実装class SimpleRNN(): def __init__(self, n_features, n_nodes, initializer=None, optimizer=None, activator1=None, activator2=None): self.optimizer = optimizer self.activator1 = activator1 self.activator2 = activator2 # self.Wx = initializer.W(n_features, n_nodes) # self.b = initializer.b(n_nodes) # self.Wh = initializer.W(n_nodes, n_nodes) def forward(self, x, h_in): self.Z = np.dot(x, self.Wx) + np.dot(h_in, self.Wh) + self.b self.h_out = self.activator1.forward(self.Z) self.out = self.activator2.forward(self.h_out) return self.out, self.h_out def backward(self, y_true, y_pred, dh_out): dh_out = dh_out + self.activator2.backward(y_pred, y_true) dZ = self.activator1.backward(dh_out) dh_in, dWx, dWh, db = self.optimizer.backward(self, dZ) return dh_in, dWx, dWh, db def update(self, dWx, dWh, db): self.optimizer.update(self, dWx, dWh, db) class SGD: def __init__(self, lr=0.001): self.lr = lr def backward(self, layer, dZ): db = np.sum(dZ, axis=0) dWx = np.dot(layer.x.T, dZ) dx = np.dot(dZ, layer.Wx.T) dWh = np.dot(layer.h.T, dZ) dh = np.dot(dZ, layer.Wh.T) return dh, dWx, dWh, db def update(self, layer, dWx, dWh, db): layer.Wx -= self.lr * dWx layer.Wh -= self.lr * dWh layer.b -= self.lr * db class Softmax(): def __init__(self): pass def forward(self, Z): if Z.ndim == 2: Z = Z.T A = (np.exp(Z) / np.sum(np.exp(Z), axis=0)).T return A A = np.exp(Z) / np.sum(np.exp(Z)) return A def backward(self, y_pred, y_true): return y_pred - y_true class Tanh(): def __init__(self): pass def forward(self, Z): A = np.tanh(Z) return A def backward(self, dA, A): return dA * np.square(1 - A) class ScratchSimpleRNNClassifier(): def __init__(self, layer, epoch=3): self.epoch = epoch self.layer = layer self.loss_train = [] self.loss_valid = [] def train(self, X, y, X_val=None, y_val=None): # X shape: (batch_size, n_sequences, n_features) n_sequences = X.shape[1] self.out_list = [] self.hout_list = [] self.dh_list = [] self.dWx_list = [] self.dWh_list = [] self.db_list = [] for _ in tqdm(range(self.epoch)): # feedforward self.h = np.zeros((batch_size, n_nodes)) h_out = np.zeros((batch_size, n_nodes)) for n in range(n_sequences): out, h_out = self.layer.forward(X[:,n,:], h_out) self.out_list.append(out) self.hout_list.append(h_out) # backward dh = np.zers((batch_size, n_nodes)) for n in range(n_sequences): dh, dWx, dWh, db = layer.backward(self.out_list[-n-1], dh) self.dh_list.append(dh) self.dWx_list.append(dWx) self.dWh_list.append(dWh) self.db_list.append(db) # update weight layer.update(np.sum(dWx), np.sum(dWh), np.sum(db)) # self.loss_train.append(self.crossentropy(mini_y_train, fout)) def crossentropy(self, y, y_pred): loss = -np.mean(np.sum(y*np.log(y_pred), axis=1)) return loss def predict(self, X_test): out = X_test for layer in self.layers: out = layer.forward(out) return out【問題2】小さな配列でのフォワードプロパゲーションの実験softmax = Softmax() tanh = Tanh() sgd = SGD() x = np.array([[[1, 2], [2, 3], [3, 4]]])/100 # (batch_size, n_sequences, n_features) w_x = np.array([[1, 3, 5, 7], [3, 5, 7, 8]])/100 # (n_features, n_nodes) w_h = np.array([[1, 3, 5, 7], [2, 4, 6, 8], [3, 5, 7, 8], [4, 6, 8, 10]])/100 # (n_nodes, n_nodes) batch_size = x.shape[0] # 1 n_sequences = x.shape[1] # 3 n_features = x.shape[2] # 2 n_nodes = w_x.shape[1] # 4 h = np.zeros((batch_size, n_nodes)) # (batch_size, n_nodes) b = np.array([1, 1, 1, 1]) # (n_nodes,) rnn = SimpleRNN(2, 4, None, sgd, tanh, softmax) rnn.Wx = w_x rnn.Wh = w_h rnn.b = b rnn.h = h rnn_cls = ScratchSimpleRNNClassifier(rnn, 1) rnn_cls.train(x, None) rnn_cls.hout_list100%|██████████| 1/1 [00:00<00:00, 464.02it/s]Открытый курс по машинному обучениюАвтор: [](https://www.kaggle.com/dremovd) Простой разведочный анализ данных соревнования по категоризации покупок[Kaggle Inclass](https://www.kaggle.com/c/receipt-categorisation) и отдельная [ссылка](https://www.kaggle.com/t/73f1a2eb0be9443ba1f8d2f283adc444) для участия. Недавно на чеках появились QR коды. Пока еще не все с этим знакомы, но по информации из этого кода можно получить полное содержание чека. Это дает возможность вести расходы, учитывая каждый отдельный товар, включая расходы, сделанные наличными. Как следствие наличия полной информации, можно анализировать изменения характера расходов и инфляцию по собственной продуктовой корзине. Названия товаров не стандартизованы: у одного товара в разных магазинах существенно отличаются названия; отдельные слова могут сокращаться; названия могут содержать опечатки. В магазины постоянно добавляются новые товары. Это делает простое составление каталога всех товаров с категориями нереалистичным.Данные публикуются впервые, а обученные на них модели используются в [production](https://play.google.com/store/apps/details?id=com.dremovd.fnschecks&hl=ru). Задача, которую предлагается решить — это разбиение всех покупок чека по небольшому набору понятных человеку категорий.import os import pandas as pd import numpy as np PATH_TO_DATA = 'data' train = pd.read_csv(os.path.join(PATH_TO_DATA, 'train.csv.gz'), encoding='utf-8') train.fillna('', inplace=True) train.head() import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline plt.rcdefaults() plt.rcParams['font.family'] = 'fantasy' plt.rcParams['font.fantasy'] = 'Arial' categories = train.groupby('category').count()['check_id'] categories = categories.sort_values(ascending = False) plt.figure(figsize = (16, 10)) plt.title(u'Число товаров по категориям') categories.plot(kind='bar'); checks = pd.read_csv(os.path.join(PATH_TO_DATA, 'train_checks.csv.gz')) checks.head() shops = checks.groupby('shop_name').count()['check_id'] shops = shops.sort_values(ascending = False)[:15] plt.figure(figsize = (16, 10)) plt.title(u'Число чеков по магазинам') shops.plot(kind='bar'); plt.figure(figsize = (16, 10)) plt.title(u'Число чеков по магазинам') _ = plt.hist(checks['sum'], bins = 20, range = [0, 10000], log=True);Coding ChallengesNow it's time to put all you have learned together to try some unique coding puzzles. The coding challenges presented here range from very easy to challenging. For each one there are few hints that you can use if you get stuck. Each challenge has a space for you to write the answer and a cell following which can be executed to check your answer against a few test cases.**Before starting the challenges be sure to run the following cell first, which sets up functions for testing your answers.**# This cell brings in a function called assert_equal() which is used to test # the output of your function with the expected output. # After each challenge, the cell following your code will run your function and compare the result to the expected output. %run ../src/challenge_tests.pyChallenge 1 - What day of the year is it?In this challenge you are to write a function called _day_of_year_ which accepts three integer parameters _day_, _month_, _year_ the result of the function should be an integer representing the number of days that have elapsed since January 1st of the year provided until the date. For instance, if the function were asked for the day of the year on Feb 2, 2015 the answer returned should be 33 (31 days in Jan + 2 days in February).Remember to take into account leap year! There are three criteria for leap year:The year can be evenly divided by 4If the year can be evenly divided by 100 it is NOT a leap year, unless:The year is also divisble by 400, in which case it is a leap yeardef day_of_year(day, month, year): ''' A function to determine what day of the year it is. Day of the the year is a number between 1 and 366 where Jan 1 is day 1 and Dec 31 is day 366 (in 2020) Parameters ---------- day : int The day of the month month : int The month of the year year : int The year which the day is being calculated Returns ------- int The day of the year ''' # Replace pass with your code pass # Here's an example to see how your code will be run print(f'Jan 1st is the {day_of_year(1,1,2000)} day of the year')You can test your work by running the next cell.# Run this cell to test your work assert_equal(day_of_year(1,1,2000),1,'Jan 1, 2000') assert_equal(day_of_year(15,2,2015),46,'Feb 15, 2015') assert_equal(day_of_year(30,6,2020),182,'June 30, 2020', 'Did you check for leap year?') assert_equal(day_of_year(1, 1, 2001), 1, "Jan 1, 2001") assert_equal(day_of_year(17, 11, 2020), 322, "Nov 17, 2020") assert_equal(day_of_year(31, 12, 2020), 366, "Dec 31, 2021") assert_equal(day_of_year(14, 8, 2021), 226, "Aug 14, 2021") assert_equal(day_of_year(9, 5, 2022), 129, "May 9, 2022")Challenge 2 - Create me a monogramTraditional monograms are represented by three initials (first name, last name and middle initial). The challenge here is to build a monogram from a name that is supplied. The monogram should use lowercase letters for the first initial and middle initial, while the last name initial is in caps. For example, => d.K.sEye See Deadpeople => e.S.d => m.B.sdef monogram(full_name): ''' Creates a traditional monogram from a supplied full name Parameters ---------- full_name : str The full name (first middle last) of the person for which to build the monogram ''' # Replace pass with your code pass # Put your name here to try out your monogram function my_name = '' print(f'My monogram is {monogram(my_name)}') # Run this cell to test your work assert_equal(monogram(''),'d.S.k') assert_equal(monogram('Eye see deadpeople'),'e.D.s', hint='Did you check the case?') assert_equal(monogram('mers sadees benz II'),'m.B.s',hint='Did the extra suffix throw you off?')Challenge 3 - Are you my mother?In this coding challenge you are to determine the matriarchical family tree given a list of mother/daughter pairs. For this challenge you will need to understand the concept of tuples. A tuple is a sequence of elements much like a list, but unlike a list, tuples are immutable (that is, they cannot be changed). Tuples are represented by the parathenses surrounding a comma separated list of items such as (5,6) or ('mother', 'daughter'). In the first case, the tuple is made up of two integers and the second case the tuple is two strings. Accessing items in a tuple is similar to accessing items in other sequences in Python - by using square brackets.```python> pair = ('mother','daughter')> pair[0]'mother'> pair[1]'daughter'```Now on with the challenge. You will be provided a list of tuples, the first name will always be the mother of the second name. Given this list of names, you are to develop the family tree and provide the relationship between the target pair.For instance, if the `source_list` is ```[('Enid','Susan'),('Enid','Diane'),('Susan','Deborah')] ```then the family tree represented is ``` Enid | |--------| Susan Diane | Deborah```and then if the `target_list` is `[('Enid','Deborah')]` then the correct response is `Granddaughter`, as Deborah is the _granddaughter_ of Enid.There will only every be only 3 generations (maximum) with varying number of children for each parent, but each child will only have a single parent (we are only dealing with the females in the tree). Your response should be one of ```pythonMotherDaughterGrandmotherGranddaughterSisterCousinAuntNiece```>**Remember**>Sisters have the same mother.>Cousins have the same grandmother.>A niece's grandmother is the mother of her Aunt.>An Aunt's mother is the grandmother of her niece.**Hint**: You may consider using a [dictionary](https://www.w3schools.com/python/python_dictionaries.asp:~:text=%20Python%20Dictionaries%20%201%20Dictionary.%20A%20dictionary,Items.%20%208%20Removing%20Items.%20%20More%20) data type to solve this one.def relations(family_tree, relationship): ''' Determine the relationship between two people in a given family Parameters ---------- family_tree : list of tuple of str The family tree is defined by tuples of mother/daughter pairs where the first item in the tuple is the mother of the second name in the tuple. relationship: tuple of str The relationship to be determined of the second person in the tuple to the first person in the tuple Returns ------- str : {'Grandmother','Granddaughter','Mother','Daughter','Sister','Cousin','Aunt','Niece'} The relationship of the second person in the `relationship` tuple to the first person in the tuple ''' # Replace pass with your code pass # You can run this to try out the function you wrote. # First we define the family tree. # The variable test_family represents a family tree where: # Ingrid is Sally's mother # Ingrid is also Jackie's mother # Sally is Betty's mother # # Therefore, # Sally and Jackie are sisters (same mother) # Ingrid is Betty's grandmother (Sally's mother is Ingrid) # Jackie is Betty's aunt (Betty's mother has the same mother as Jackie) test_family = [('Ingrid','Sally'), ('Ingrid','Jackie'), ('Sally','Betty')] # Let's see how Ingrid and Sally are related relationship_1 = relations(test_family, ('Ingrid','Sally')) print(f'Ingrid is Sally`s {relationship_1}') # Let's see how Ingrid and Betty are related (should be gradmother) relationship_2 = relations(test_family, ('Ingrid','Betty')) print(f'Ingrid is Betty`s {relationship_2}') # Now let's see how Betty is related to Ingrid (should be granddaughter) relationship_3 = relations(test_family, ('Ingrid','Betty')) print(f'Betty is Ingrid`s {relationship_3}') # Run this cell to test your work family_a = [("Enid", "Susan"), ("Susan", "Deborah")] family_b = [('Enid', 'Susan'), ('Susan', 'Deborah'), ('Enid', 'Dianne'), ('Dianne', 'Judy'), ('Dianne', 'Fern')] assert_equal(relations(family_a,('Enid','Susan')),'Daughter') assert_equal(relations(family_b,('Enid','Judy')),'Granddaughter') assert_equal(relations(family_b,('Enid','Deborah')),'Granddaughter') assert_equal(relations(family_b,('Enid','Dianne')),'Daughter') assert_equal(relations(family_b,('Enid','Fern')),'Granddaughter') assert_equal(relations(family_b,('Susan','Enid')),'Mother') assert_equal(relations(family_b,('Susan','Deborah')),'Daughter') assert_equal(relations(family_b,('Susan','Dianne')),'Sister') assert_equal(relations(family_b,('Susan','Judy')),'Niece') assert_equal(relations(family_b,('Susan','Fern')),'Niece') assert_equal(relations(family_b,('Fern','Susan')),'Aunt') assert_equal(relations(family_b,('Fern','Judy')),'Sister')Challenge 4 - Money in the bankFor this challenge you are to dispense bills from an ATM in the least number of bills possible.In this challenge you are writing the code for an ATM which can dispense up to 1500 dollars per transaction with the least number of bills possible. The ATM has bills available in these nominal amounts 10, 20, 50, 100 and 500 and plenty of them so no need to worry about running out! You function should return the number of bills required, if the amount requested cannot be met, then your function should signal an error by returning a -1. **HINT**: Python has an operator for [floor division](https://python-reference.readthedocs.io/en/latest/docs/operators/floor_division.html) which you may find helpful for this example. You may also consider the [divmod() function](https://python-reference.readthedocs.io/en/latest/docs/functions/divmod.html?highlight=divmod())def dispense_cash(amount): ''' Determine the minimum number of ATM bills to meet the requested amount to dispense Parameters ---------- amount : int The amount of money requested from the ATM Returns ------- int The number of bills needed, -1 if it can't be done ''' pass # Start simple print(f'If I ask for $500, I should get back 1 bill. Your answer: {dispense_cash(500)}') # A bit more complex print(f'If I ask for $110, I should get back 2 bills (1x $100, 1x$10). Your answer: {dispense_cash(110)}') # Finally print(f'If I ask for $290, I should get back 4 bills (2 x $100, 1 x $50, 2 x $20). Your answer: {dispense_cash(290)}') # Run this cell to test your answer assert_equal(dispense_cash(1120), 4) assert_equal(dispense_cash(492), -1) assert_equal(dispense_cash(440), 6) assert_equal(dispense_cash(370), 5) assert_equal(dispense_cash(80), 3)Challenge 5 - Fruit CalculatorGiven a word problem as a string, complete the calculation and return the result.This one is going to be tricky. You are given a word problem telling with some math in it. For instance, > Panda has 8 apples and loses 2 apples. How many apples?The format will always be a number followed by a fruit, and may contain the words `gains` or `loses`. The question will always end in a question about a fruit (which may or may not be mentioned in the question). Here are a few examples:> Panda has 2 apples, 3 bananas and 1 watermelon. He gains 1 apple. How many apples?> Panda has 2 apples and gains 3 bananas. How many watermelon?> Panda has 2 apples and loses 2 apples but gains 4 bananas. How many bananas?**Hints**Built-in string functions will be helpful. Three in particular: `isdigits()`, `rtrim()`, `split()`def fruit_calculator(question): ''' Given a word problem, answer the question Parameters ---------- question : str A question which has one or more sentences describing the situation and a question. Returns ------- int A number which answers the question ''' # Your code here pass # Start with 3 apples apples = fruit_calculator('Jim has 3 apples. How many apples?') print(apples) # Now add 3 apples apples = fruit_calculator('Jim has 3 apples and gains 2 apples. How many apples?') print(apples) # Now drop a few apples = fruit_calculator('Jim has 3 apples and loses 2 apples. How many apples?') print(apples) # Add in another fruit apples = fruit_calculator('Jim has 3 apples and loses 2 bananas. How many apples?') print(apples) # Run this cell to test your work assert_equal(fruit_calculator('Panda has 8 apples and loses 2 apples. How many apples?'), 6) assert_equal(fruit_calculator('Panda has 8 apples, 2 bananas and gains 3 bananas. How many bananas?'), 5) assert_equal(fruit_calculator('Panda has 8 apples, 2 bananas and gains 3 bananas. How many apples?'), 8) assert_equal(fruit_calculator('Jim has 12 bananas. He loses 2 apples. Then he gains 1 apple. How many bananas?'), 12) assert_equal(fruit_calculator('Jim has 2 bananas and gains 3 bananas. How many watermelons?'), 0)# Importando os módulos import tensorflow as tf import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import mean_squared_error, r2_score from sklearn.model_selection import train_test_split import seaborn as sns #encontrando o dataframe df = pd.read_excel('Folds5x2_pp.xlsx', sheet_name='Sheet1') #mostrando o dataset df.head()- Temperature (T) in the range 1.81°C and 37.11°C,- Ambient Pressure (AP) in the range 992.89-1033.30 milibar,- Relative Humidity (RH) in the range 25.56% to 100.16%- Exhaust Vacuum (V) in teh range 25.36-81.56 cm Hg- Net hourly electrical energy output (PE) 420.26-495.76 MWdf.shape**Análise Exploratória**#encontrando as estatísticas descritivas df.describe() #analisando a distribuição dos dados df.PE.hist() df.PE.hist(bins = 50); df[['AT', 'V', 'AP', 'RH']].hist(); df[['AT', 'V', 'RH']].boxplot();**Engenharia de Características**#verificando a multicolinearidade corr = df.corr() plt.figure(figsize=(10,5)) sns.heatmap(corr, annot=True) plt.show() f,axes=plt.subplots(2,2,figsize=(15,7)) sns.scatterplot(y='PE',x='AT',data=df,ax=axes[0,0]) sns.scatterplot(y='PE',x='AP',data=df,ax=axes[0,1],color='green') sns.scatterplot(y='PE',x='V',data=df,ax=axes[1,0],color='red') sns.scatterplot(y='PE',x='RH',data=df,ax=axes[1,1]) plt.tight_layout() #utilizando outros métodos para identificar multicolinearidade #VIF (Variable Inflation Factors) from statsmodels.stats.outliers_influence import variance_inflation_factor def calc_vif(X): # calculando o VIF vif = pd.DataFrame() vif["variáveis"] = X.columns vif["VIF"] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])] return(vif)/usr/local/lib/python3.7/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead. import pandas.util.testing as tm![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAHUAAAA+CAIAAACa+cNEAAADg0lEQVR4Ae2b0ZHjIAyG1QI1uAV6oARqcAvpIB24AypwBTSQBuiAHnRzqxkdiyH2kii3ieWHHcUBGT7/FkLOAuohSQAknatvVL6yIlC+yleWgKx31a/ylSUg6131q3yfSiDGaK2NMT7Va9fZifSbUnLOwdehfLuKGPsi5+ycizGu6woAyncM436vGKPy3cc03EL5DqM71FH5HsI03Ej5DqM71FH5HsI03Ej5DqPb75hzvlwuAHC5XPZbP6NFY39xvV4BYJ7nnv9pmgAgpYSIy7IAwPV65ca3241y+Oqv957b/BeDNxc8MOec9EgafAmQMaZ5bfp2mib6lm5GyZceQGOM+36UbZqeP/Jkgy8ikkLXdd3OmZ6vZVnu832BNLZj+4Vn2nwJYjNElMEBEXv6Vb50s9t8U0oAsA0RFBystawU5csomkabLyJaawEghFB2I12XJx/nO8/z90D979PLVvlyjs+1u3wpMagWfWMMAOSceRA9vtv1rRnNOdbzml4aHxBkunxzzjRVpkmVvYp4j2+JiWxeEvnejBlbz68/c3zkXb6I6L0vQ8Q8zwBQybDHV056r6e5veJz+IYQAIAFa76OyvXr+VYD+OUf7+k358wBl4LDNmN7nC8lfFuNAIDcQ/Cyu3KPLyJSTFiWhYzta5XH+Z40f6A7TLK11hpjeE9c3vzH+ZbePs/e0W+ZPzWzUeV7XxP7fIkgANxut62vXv3sA0InIoYQuOpmrW0S2DIpz+zzLVufzXbOUdkvpTRN04Bo3ptvSsl7P1z5pFJqmbpYa6sEnyXlnONUlU/uGu/Kl99EVNX93QlXDSj6UV6Uc+6lSTFGY8yJ4oP3PoRAm/hh/XJ9laGToiuHIYSx4Iv4/v8f8KB+qVjHfGnLWqb5BJeLMNzyoPGu8YGn9yBf7p5zDiEYY8o96rqu0zQR3JxzcwfAI2kan8m3XLIqu3z2q/WNfgBYYqLyAHs4Xf7wN8B9f3td0tm1aXGjF+Hee2MM2bsdjzf4TP0enL9zjh950vKzitQ8gFPzrX7mIVGxe2++tNw75wbWd+pbFlVoK1wmDyzDYeNd+XJVhBcfgB/MpfyREccEqqX0Ki1jiH8wprELnLyX8pUVgPJVvrIEZL2rfpWvLAFZ76pf5StLQNa76lf5yhKQ9a76Vb6yBGS9q36VrywBWe+qX+UrS0DWu+pXlu8fyVhDSaTZn8UAAAAASUVORK5CYII=)X = df.iloc[:,:-1] calc_vif(X) # começa em 1 # VIF < 5 -> baixa multicolinearidade # VIF > 10 -> alta multicolinearidade**Ajustando a multicolinearidade**from sklearn.preprocessing import PolynomialFeatures feature_cols = ['AT', 'V', 'AP', 'RH'] poly_cols = ['Coff','AT', 'V', 'AP', 'RH', 'AT2', 'AT*V', 'AT*AP', 'AT*RH', 'V2', 'V*AP', 'V*RH', 'AP2', 'AP*RH', 'RH2'] X = df[feature_cols] # Invoking polynomial feature transform method with degree of 2 for two variables poly_reg = PolynomialFeatures(degree=2, interaction_only=False) X_poly = poly_reg.fit_transform(X) # Converting array into a Dataframe X_poly = pd.DataFrame(X_poly, columns=poly_cols, index=df.index) #[a, b] #[1, a, b, a^2, ab, b^2] X_poly.head()**Aplicando os algoritmos sem a transformação**#aplicando a normalização para os dados scaler = MinMaxScaler() X_new = scaler.fit_transform(df[['AT', 'V', 'AP', 'RH']]) #aplicando a normalização para os dados de saída target_scaler = MinMaxScaler() Y_new = target_scaler.fit_transform(df['PE'].values.reshape(-1,1)) #dividindo os dados entre treinamento e teste from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X_new, Y_new, test_size=0.4, random_state=333)**Aplicando a Floresta Randomica**from sklearn.ensemble import RandomForestRegressor rnf = RandomForestRegressor(n_estimators=300) rnf.fit(X_train, y_train) from sklearn.metrics import mean_squared_error y_preds = rnf.predict(X_test) print("RMSE : {:.3f} ".format(mean_squared_error(y_test, y_preds, squared=False)))RMSE : 0.047**Aplicando uma MLP**# camada inicial e o tipo from tensorflow import keras from tensorflow.keras import layers #entradas # criando o formato da entrada input_shape = (X_train.shape[1],) print(f'Shape das entradas: {input_shape}') # Criando o modelo model = keras.Sequential() model.add(layers.Dense(16, input_shape=input_shape, activation='relu')) model.add(layers.Dense(8, activation='relu')) model.add(layers.Dense(1, activation='linear')) #mostrando o modelo print(model.summary()) #treina o modelo model.compile(loss='mean_absolute_error', optimizer='adam', metrics=['mean_squared_error']) model.fit(X_train, y_train, epochs=10, batch_size=1, verbose=1, validation_split=0.2) y_pred = model.predict(X_test) np.set_printoptions(precision=2) print(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1)) from sklearn.metrics import r2_score r2_score(y_test,y_pred) print("RMSE : {:.3f} ".format(mean_squared_error(y_test, y_pred, squared=False)))RMSE : 0.057**Aplicando os algoritmos com a transformação**#aplicando a normalização para os dados scaler = MinMaxScaler() X_new = scaler.fit_transform(X_poly) #aplicando a normalização para os dados de saída target_scaler = MinMaxScaler() Y_new = target_scaler.fit_transform(df['PE'].values.reshape(-1,1)) #dividindo os dados entre treinamento e teste X_train, X_test, y_train, y_test = train_test_split(X_new, Y_new, test_size=0.4, random_state=333) #aplicando a floresta randômica como regressora rnf = RandomForestRegressor(n_estimators=300) rnf.fit(X_train, y_train) y_preds = rnf.predict(X_test) print("RMSE : {:.3f} ".format(mean_squared_error(y_test, y_preds, squared=False))) # criando o formato da entrada para o input_shape = (X_train.shape[1],) print(f'Shape das entradas: {input_shape}') # Criando o modelo model = keras.Sequential() model.add(layers.Dense(16, input_shape=input_shape, activation='relu')) model.add(layers.Dense(8, activation='relu')) model.add(layers.Dense(1, activation='linear')) #mostrando o modelo print(model.summary()) #treina o modelo model.compile(loss='mean_absolute_error', optimizer='adam', metrics=['mean_squared_error']) history=model.fit(X_train, y_train, epochs=30,batch_size=10, verbose=1, validation_split=0.2) history.history.keys() #história do treinamento plt.plot(history.history['mean_squared_error']) plt.plot(history.history['val_mean_squared_error']) plt.title('Erro médio quadrático') plt.ylabel('MSE') plt.xlabel('epoca') plt.legend(['treinamento', 'teste'], loc='upper left') plt.show() # história do treinamento plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Variação da função perda') plt.ylabel('loss') plt.xlabel('epoca') plt.legend(['treinamento', 'teste'], loc='upper left') plt.show() y_pred = model.predict(X_test) np.set_printoptions(precision=2) print(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1)) r2_score(y_test,y_pred)Kmeans Para visualizar la agrupación de los datos introducidos, se utilizó el algoritmo de Kmeans por lo que es necesario definir tanto el número de ventanas como la constante k. Por favor, defínalas a continuación:def kmeans (v, k): print (f'El tamaño definido de la ventana es: {v}') print (f'El número de agrupaciones definido es: {k}') x = series_to_array (df_A, v) df_A_copy = df_A.copy() df_A_values = df_A_copy.drop(df_A_copy.index[0:v],axis=0) kmeans = KMeans(n_clusters=k,random_state=10).fit(x) y_predict = kmeans.predict(x) plt.figure(figsize=(20,5)) plt.scatter(df_A_values.index, df_A_values['AEP_MW'], c=y_predict, cmap='tab20', s=7) plt.title('Consumo energético en el tiempo', fontweight="bold", fontsize=16) plt.xlabel('Tiempo', fontsize=12) plt.ylabel('Energía (MW)', fontsize=12) plt.show() scaler = StandardScaler() AEP_s = scaler.fit_transform(x) AEP_PCA = PCA(n_components = 2) AEP_PCA.fit(AEP_s) AEP_results = AEP_PCA.transform(AEP_s) x_PCA = pd.DataFrame(AEP_results) x_PCA.columns = ['One', 'Two'] plt.figure(figsize=(20,5)) plt.scatter(x_PCA['One'], x_PCA['Two'], c=y_predict, cmap='tab20', s=7) plt.title('Vizualización de agrupación 2D', fontweight="bold", fontsize=16) plt.xlabel('Component 1', fontsize=12) plt.ylabel('Component 2', fontsize=12) plt.show() widgets.interact(kmeans, v=(1, 30, 1), k =(1, 15, 1))Some common 'tricks'When modelling problems in ASP, it turns out that there are some 'tricks' that come in handy a lot of the time. Here we'll run through some of the most common of these tricks.Let's start with setting up a function to print answer sets of a program:import clingo def print_answer_sets(program): control = clingo.Control() control.add("base", [], program) control.ground([("base", [])]) control.configuration.solve.models = 0 for model in control.solve(yield_=True): sorted_model = [str(atom) for atom in model.symbols(shown=True)] sorted_model.sort() print("Answer set: {{{}}}".format(", ".join(sorted_model)))Generating The following tricks are useful for generating the right search space. Generating assignments to binary variablesYou can generate all assignments to a set of binary variables as follows (e.g., truth assignments).print_answer_sets(""" var(1..3). true(X) :- not false(X), var(X). false(X) :- not true(X), var(X). #show true/1. #show false/1. """)Answer set: {false(1), false(2), false(3)} Answer set: {false(1), false(3), true(2)} Answer set: {false(1), false(2), true(3)} Answer set: {false(1), true(2), true(3)} Answer set: {false(2), false(3), true(1)} Answer set: {false(2), true(1), true(3)} Answer set: {false(3), true(1), true(2)} Answer set: {true(1), true(2), true(3)}Or alternatively like this:print_answer_sets(""" var(1..3). 1 { true(X); false(X) } 1 :- var(X). #show true/1. #show false/1. """)Answer set: {false(2), false(3), true(1)} Answer set: {false(3), true(1), true(2)} Answer set: {false(2), true(1), true(3)} Answer set: {true(1), true(2), true(3)} Answer set: {false(1), false(2), true(3)} Answer set: {false(1), false(2), false(3)} Answer set: {false(1), true(2), true(3)} Answer set: {false(1), false(3), true(2)}Generating assignments to $n$-ary variablesGenerating all assignments to variables with domains of more than two elements, you can do like this:print_answer_sets(""" var(1..2). value(1..4). assign(X,V) :- var(X), value(V), not assign(X,V') : value(V'), V' != V. #show assign/2. """)Answer set: {assign(1,1), assign(2,3)} Answer set: {assign(1,1), assign(2,2)} Answer set: {assign(1,1), assign(2,1)} Answer set: {assign(1,1), assign(2,4)} Answer set: {assign(1,4), assign(2,1)} Answer set: {assign(1,2), assign(2,1)} Answer set: {assign(1,3), assign(2,1)} Answer set: {assign(1,2), assign(2,3)} Answer set: {assign(1,2), assign(2,2)} Answer set: {assign(1,2), assign(2,4)} Answer set: {assign(1,4), assign(2,2)} Answer set: {assign(1,3), assign(2,2)} Answer set: {assign(1,4), assign(2,4)} Answer set: {assign(1,4), assign(2,3)} Answer set: {assign(1,3), assign(2,4)} Answer set: {assign(1,3), assign(2,3)}Or a bit more compactly/intuitively, like this:print_answer_sets(""" var(1..2). value(1..4). 1 { assign(X,V) : value(V) } 1 :- var(X). #show assign/2. """)Answer set: {assign(1,3), assign(2,4)} Answer set: {assign(1,3), assign(2,1)} Answer set: {assign(1,3), assign(2,2)} Answer set: {assign(1,3), assign(2,3)} Answer set: {assign(1,4), assign(2,4)} Answer set: {assign(1,4), assign(2,2)} Answer set: {assign(1,4), assign(2,3)} Answer set: {assign(1,4), assign(2,1)} Answer set: {assign(1,2), assign(2,3)} Answer set: {assign(1,2), assign(2,1)} Answer set: {assign(1,2), assign(2,4)} Answer set: {assign(1,2), assign(2,2)} Answer set: {assign(1,1), assign(2,3)} Answer set: {assign(1,1), assign(2,4)} Answer set: {assign(1,1), assign(2,2)} Answer set: {assign(1,1), assign(2,1)}Generating one-to-one assignmentsIf you have two sets of equal size, and you want to generate all one-to-one assignments between these sets, you can do that as follows:print_answer_sets(""" side1(1..3). side2(a;b;c). 1 { match(S1,S2) : side1(S1) } 1 :- side2(S2). 1 { match(S1,S2) : side2(S2) } 1 :- side1(S1). #show match/2. """)Answer set: {match(1,c), match(2,b), match(3,a)} Answer set: {match(1,c), match(2,a), match(3,b)} Answer set: {match(1,b), match(2,c), match(3,a)} Answer set: {match(1,b), match(2,a), match(3,c)} Answer set: {match(1,a), match(2,c), match(3,b)} Answer set: {match(1,a), match(2,b), match(3,c)}Generating one-to-many assignmentsIf you have two sets $S_1$ and $S_2$ (possibly of different size), and you want to generate all assignments where every element in $S_1$ is assigned to exactly one element in $S_2$ (but not vice versa), then this is a way to do that:print_answer_sets(""" side1(1..2). side2(a;b;c). 1 { match(S1,S2) : side2(S2) } 1 :- side1(S1). #show match/2. """)Answer set: {match(1,c), match(2,a)} Answer set: {match(1,c), match(2,b)} Answer set: {match(1,c), match(2,c)} Answer set: {match(1,b), match(2,a)} Answer set: {match(1,b), match(2,c)} Answer set: {match(1,b), match(2,b)} Answer set: {match(1,a), match(2,a)} Answer set: {match(1,a), match(2,b)} Answer set: {match(1,a), match(2,c)}Generating arbitrary assignmentsIf you have two sets $S_1$ and $S_2$ (possibly of different size), and you want to generate *all possible* (partial) assignments, you can do this:print_answer_sets(""" side1(1..2). side2(a;b). { match(S1,S2) : side1(S1) } :- side2(S2). #show match/2. """)Answer set: {} Answer set: {match(1,b)} Answer set: {match(2,a)} Answer set: {match(1,b), match(2,a)} Answer set: {match(2,b)} Answer set: {match(2,a), match(2,b)} Answer set: {match(1,b), match(2,b)} Answer set: {match(1,b), match(2,a), match(2,b)} Answer set: {match(1,a)} Answer set: {match(1,a), match(1,b)} Answer set: {match(1,a), match(2,b)} Answer set: {match(1,a), match(1,b), match(2,b)} Answer set: {match(1,a), match(2,a)} Answer set: {match(1,a), match(2,a), match(2,b)} Answer set: {match(1,a), match(1,b), match(2,a)} Answer set: {match(1,a), match(1,b), match(2,a), match(2,b)}Generating injective assignmentsIf you have two sets $S_1$ and $S_2$ (possibly of different size), and you want to generate all assignments where every element in $S_1$ is assigned to exactly one element in $S_2$ (but not vice versa) that are *injective* (i.e., no two elements in $S_1$ are assigned to the same element of $S_2$), you can do that as follows:print_answer_sets(""" side1(1..2). side2(a;b;c). { match(S1,S2) : side1(S1) } 1 :- side2(S2). 1 { match(S1,S2) : side2(S2) } 1 :- side1(S1). #show match/2. """)Answer set: {match(1,c), match(2,a)} Answer set: {match(1,c), match(2,b)} Answer set: {match(1,b), match(2,a)} Answer set: {match(1,b), match(2,c)} Answer set: {match(1,a), match(2,b)} Answer set: {match(1,a), match(2,c)}Generating arbitrary subsetsSelecting an arbitrary subset of elements from a given set can be done as follows:print_answer_sets(""" element(a;b;c). { select(E) } :- element(E). #show select/1. """)Answer set: {} Answer set: {select(b)} Answer set: {select(c)} Answer set: {select(b), select(c)} Answer set: {select(a)} Answer set: {select(a), select(c)} Answer set: {select(a), select(b)} Answer set: {select(a), select(b), select(c)}Or alternatively like this:print_answer_sets(""" element(a;b;c). { select(E) : element(E) }. #show select/1. """)Answer set: {} Answer set: {select(b)} Answer set: {select(c)} Answer set: {select(b), select(c)} Answer set: {select(a)} Answer set: {select(a), select(c)} Answer set: {select(a), select(b)} Answer set: {select(a), select(b), select(c)}Generating subsets of size $k$If you want to generate all subsets that are of size *exactly* $k$, you can do this:print_answer_sets(""" element(a;b;c). 2 { select(E) : element(E) } 2. #show select/1. """)Answer set: {select(b), select(c)} Answer set: {select(a), select(c)} Answer set: {select(a), select(b)}Generating subsets of size $\leq k$If you want to generate all subsets that are of size *at most* $k$, you can do this:print_answer_sets(""" element(a;b;c). { select(E) : element(E) } 2. #show select/1. """)Answer set: {} Answer set: {select(c)} Answer set: {select(b)} Answer set: {select(b), select(c)} Answer set: {select(a)} Answer set: {select(a), select(b)} Answer set: {select(a), select(c)}Generating subsets of size $\geq k$If you want to generate all subsets that are of size *at least* $k$, you can do this:print_answer_sets(""" element(a;b;c). 2 { select(E) : element(E) }. #show select/1. """)Answer set: {select(b), select(c)} Answer set: {select(a), select(c)} Answer set: {select(a), select(b), select(c)} Answer set: {select(a), select(b)}ConstraintsThe following tricks are useful for filtering out incorrect solutions, after you have generated a search space.We will illustrate these with the example case where we generated an arbitrary subset of elements, but the same tricks apply also to the other cases. Basic constraintsIf you want to ensure that `something` is **true**, you can add the constraint `:- not something.` (which can be read as: "It is not the case that `something` is not true").For example, if you want to ensure that `a` is selected, you can use `:- not select(a).`.print_answer_sets(""" element(a;b;c). { select(E) } :- element(E). :- not select(a). #show select/1. """)Answer set: {select(a)} Answer set: {select(a), select(c)} Answer set: {select(a), select(b)} Answer set: {select(a), select(b), select(c)}If you want to ensure that `something` is **false**, you can add the constraint `:- something.` (which can be read as: "It is not the case that `something` is true").For example, if you want to ensure that `a` is **not** selected, you can use `:- select(a).`.print_answer_sets(""" element(a;b;c). { select(E) } :- element(E). :- select(a). #show select/1. """)Answer set: {} Answer set: {select(c)} Answer set: {select(b)} Answer set: {select(b), select(c)}AND-constraintsIf you want to ensure that both `thing1` and `thing2` are true, you can define a new predicate (e.g., `my_property`), add rules that express that `my_property` is true if both `thing1` and `thing2` are true, and add a constraint that says that `my_property` must be true.For example:print_answer_sets(""" element(a;b;c). { select(E) } :- element(E). my_property :- select(a), select(b). :- not my_property. #show select/1. """)Answer set: {select(a), select(b)} Answer set: {select(a), select(b), select(c)}This strategy (of defining a new predicate, defining when exactly this predicate is true, and requiring it to be true) works for more complicated cases as well.In this simple example, we could easily have done without the new predicate too, e.g.:print_answer_sets(""" element(a;b;c). { select(E) } :- element(E). :- not select(a). :- not select(b). #show select/1. """)Answer set: {select(a), select(b)} Answer set: {select(a), select(b), select(c)}OR-constraintsIf you want to ensure that `thing1` is true **or** `thing2` is true, you can use the strategy of introducing a new predicate, like this:print_answer_sets(""" element(a;b;c). { select(E) } :- element(E). my_property :- select(a). my_property :- select(b). :- not my_property. #show select/1. """)Answer set: {select(b)} Answer set: {select(b), select(c)} Answer set: {select(a)} Answer set: {select(a), select(c)} Answer set: {select(a), select(b)} Answer set: {select(a), select(b), select(c)}Or you can add a constraint `:- not thing1, not thing2.` (which can be read as: "It is not the case that both `thing1` is false and `thing2` is false").For example:print_answer_sets(""" element(a;b;c). { select(E) } :- element(E). :- not select(a), not select(b). #show select/1. """)Answer set: {select(b)} Answer set: {select(b), select(c)} Answer set: {select(a)} Answer set: {select(a), select(c)} Answer set: {select(a), select(b)} Answer set: {select(a), select(b), select(c)}IMPLIES-constraintsIf you want to express that if `thing1` is true, then also `thing2` must be true, you can do that like this, for example:print_answer_sets(""" element(a;b;c). { select(E) } :- element(E). :- select(a), not select(b). #show select/1. """)Answer set: {} Answer set: {select(c)} Answer set: {select(b)} Answer set: {select(b), select(c)} Answer set: {select(a), select(b)} Answer set: {select(a), select(b), select(c)}Again, also here the strategy of defining a new predicate would work well. IF-AND-ONLY-IF-constraintsIf you want to express that two things must either both be true, or both be false, you can do that by using two 'if-then' constraints. For example like this:print_answer_sets(""" element(a;b;c). { select(E) } :- element(E). :- select(a), not select(b). :- not select(a), select(b). #show select/1. """)Answer set: {} Answer set: {select(c)} Answer set: {select(a), select(b)} Answer set: {select(a), select(b), select(c)}Solutionnum_players = 10 b = Board(num_players=num_players) d = Dice() curr_player = 0 b.reset() b.set_current_player(curr_player) while True: steps = d.roll() print('player %d makes %d steps' % (curr_player+1, steps)) b.make_move(steps) if b.at_ladder_start(): p1 = b.get_position() b.move_to_ladder_end() p2 = b.get_position() print('player %d took ladder %d -> %d' % (curr_player+1, p1, p2)) if b.at_snake_head(): p1 = b.get_position() b.move_to_snake_tail() p2 = b.get_position() print('player %d was eaten by a snake %d -> %d' % (curr_player+1, p1, p2)) if b.player_won(): print('player %d WON!!!!!!'% (curr_player+1)) b.print_player_positions() break curr_player += 1 curr_player = curr_player % num_players b.set_current_player(curr_player)player 1 makes 1 steps player 2 makes 1 steps player 3 makes 3 steps player 3 took ladder 3 -> 34 player 4 makes 1 steps player 5 makes 4 steps player 6 makes 3 steps player 6 took ladder 3 -> 34 player 7 makes 1 steps player 8 makes 1 steps player 9 makes 1 steps player 10 makes 6 steps player 1 makes 2 steps player 1 took ladder 3 -> 34 player 2 makes 3 steps player 3 makes 2 steps player 4 makes 6 steps player 5 makes 2 steps player 6 makes 4 steps player 7 makes 1 steps player 8 makes 2 steps player 8 took ladder 3 -> 34 player 9 makes 2 steps player 9 took ladder 3 -> 34 player 10 makes 3 steps player 1 makes 1 steps player 2 makes 5 steps player 3 makes 5 steps player 4 makes 1 steps player 5 makes 4 steps player 5 took ladder 10 -> 24 player 6 makes 3 steps player 7 makes 2 steps player 8 makes 4 steps player 9 makes 1 steps player 10 makes 1 steps player 10 took ladder 10 -> 24 player 1 makes 1 steps player 2 makes 3 steps player 3 makes 5 steps player 4 makes 4 steps player 5 [...]Evaluate a Siamese model: Ungraded Lecture Notebookimport trax.fastmath.numpy as npInspecting the necessary elements In this lecture notebook you will learn how to evaluate a Siamese model using the accuracy metric. Because there are many steps before evaluating a Siamese network (as you will see in this week's assignment) the necessary elements and variables are replicated here using real data from the assignment: - `q1`: vector with dimension `(batch_size X max_length)` containing first questions to compare in the test set. - `q2`: vector with dimension `(batch_size X max_length)` containing second questions to compare in the test set. **Notice that for each pair of vectors within a batch $([q1_1, q1_2, q1_3, ...]$, $[q2_1, q2_2,q2_3, ...])$ $q1_i$ is associated to $q2_k$.** - `y_test`: 1 if $q1_i$ and $q2_k$ are duplicates, 0 otherwise. - `v1`: output vector from the model's prediction associated with the first questions. - `v2`: output vector from the model's prediction associated with the second questions. You can inspect each one of these variables by running the following cells:q1 = np.load('q1.npy') print(f'q1 has shape: {q1.shape} \n\nAnd it looks like this: \n\n {q1}\n\n')q1 has shape: (512, 64) And it looks like this: [[ 32 38 4 ... 1 1 1] [ 30 156 78 ... 1 1 1] [ 32 38 4 ... 1 1 1] ... [ 32 33 4 ... 1 1 1] [ 30 156 317 ... 1 1 1] [ 30 156 6 ... 1 1 1]]Notice those 1s on the right-hand side? Hope you remember that the value of `1` was used for padding.q2 = np.load('q2.npy') print(f'q2 has shape: {q2.shape} \n\nAnd looks like this: \n\n {q2}\n\n') y_test = np.load('y_test.npy') print(f'y_test has shape: {y_test.shape} \n\nAnd looks like this: \n\n {y_test}\n\n') v1 = np.load('v1.npy') print(f'v1 has shape: {v1.shape} \n\nAnd looks like this: \n\n {v1}\n\n') v2 = np.load('v2.npy') print(f'v2 has shape: {v2.shape} \n\nAnd looks like this: \n\n {v2}\n\n')v1 has shape: (512, 128) And looks like this: [[ 0.01273625 -0.1496373 -0.01982759 ... 0.02205012 -0.00169148 -0.01598107] [-0.05592084 0.05792497 -0.02226785 ... 0.08156938 -0.02570007 -0.00503111] [ 0.05686752 0.0294889 0.04522024 ... 0.03141788 -0.08459651 -0.00968536] ... [ 0.15115018 0.17791134 0.02200656 ... -0.00851707 0.00571415 -0.00431194] [ 0.06995274 0.13110274 0.0202337 ... -0.00902792 -0.01221745 0.00505962] [-0.16043712 -0.11899089 -0.15950686 ... 0.06544471 -0.01208312 -0.01183368]] v2 has shape: (512, 128) And looks like this: [[ 0.07437647 0.02804951 -0.02974014 ... 0.02378932 -0.01696189 -0.01897198] [ 0.03270066 0.15122835 -0.02175895 ... 0.00517202 -0.14617395 0.00204823] [ 0.05635608 0.05454165 0.042222 ... 0.03831453 -0.05387777 -0.01447786] ... [ 0.04727105 -0.06748016 0.04194937 ... 0.07600753 -0.03072828 0.00400715] [ 0.00269269 0.15222628 0.01714724 ... 0.01482705 -0.0197884 0.01389[...]Calculating the accuracyYou will calculate the accuracy by iterating over the test set and checking if the model predicts right or wrong.The first step is to set the accuracy to zero:accuracy = 0You will also need the `batch size` and the `threshold` that determines if two questions are the same or not. **Note :A higher threshold means that only very similar questions will be considered as the same question.**batch_size = 512 # Note: The max it can be is y_test.shape[0] i.e all the samples in test data threshold = 0.7 # You can play around with threshold and then see the change in accuracy.In the assignment you will iterate over multiple batches of data but since this is a simplified version only one batch is provided. **Note: Be careful with the indices when slicing the test data in the assignment!**The process is pretty straightforward: - Iterate over each one of the elements in the batch - Compute the cosine similarity between the predictions - For computing the cosine similarity, the two output vectors should have been normalized using L2 normalization meaning their magnitude will be 1. This has been taken care off by the Siamese network you will build in the assignment. Hence the cosine similarity here is just dot product between two vectors. You can check by implementing the usual cosine similarity formula and check if this holds or not. - Determine if this value is greater than the threshold (If it is, consider the two questions as the same and return 1 else 0) - Compare against the actual target and if the prediction matches, add 1 to the accuracy (increment the correct prediction counter) - Divide the accuracy by the number of processed elementsfor j in range(batch_size): # Iterate over each one of the elements in the batch d = np.dot(v1[j],v2[j]) # Compute the cosine similarity between the predictions as l2 normalized, ||v1[j]||==||v2[j]||==1 so only dot product is needed res = d > threshold # Determine if this value is greater than the threshold (if it is consider the two questions as the same) accuracy += (y_test[j] == res) # Compare against the actual target and if the prediction matches, add 1 to the accuracy accuracy = accuracy / batch_size # Divide the accuracy by the number of processed elements print(f'The accuracy of the model is: {accuracy}')The accuracy of the model is: 0.7421875import time from sklearn.metrics import accuracy_scoreThis notebook was inspired from https://github.com/bentrevett/pytorch-sentiment-analysis. Great thanks to the authors! Data setupimport torch from torch.utils.data import TensorDataset from torch.utils.data import DataLoader def get_data(seq_len, inp_dim, device, data_size=25000): data = torch.randint(low=0, high=inp_dim, size=(data_size, seq_len), out=None, device=device) labels = torch.abs(data[:, 0]) train_data = TensorDataset(data[:int(0.7*data_size)], labels[:int(0.7*data_size)]) valid_data = TensorDataset(data[int(0.7*data_size): int(0.85*data_size)], labels[int(0.7*data_size): int(0.85*data_size)]) test_data = TensorDataset(data[int(0.85*data_size): int(data_size)], labels[int(0.85*data_size): int(data_size)]) train_data_loader = DataLoader(train_data, batch_size=64) valid_data_loader = DataLoader(valid_data, batch_size=64) test_data_loader = DataLoader(test_data, batch_size=64) return train_data_loader, valid_data_loader, test_data_loaderModel definitionimport torch.nn as nn class RNN(nn.Module): def __init__(self, input_dim, embedding_dim, hidden_dim, output_dim): super().__init__() self.embedding = nn.Embedding(input_dim, embedding_dim) self.rnn = nn.RNN(embedding_dim, hidden_dim) self.fc = nn.Linear(hidden_dim, output_dim) def forward(self, text): #text = [seq len, batch size] embedded = self.embedding(text) #embedded = [seq len, batch size, emb dim] output, hidden = self.rnn(embedded) #output = [seq len, batch size, hid dim] #hidden = [1, batch size, hid dim] assert torch.equal(output[-1,:,:], hidden.squeeze(0)) return self.fc(hidden.squeeze(0)) def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad)Loss functionimport torch.nn.functional as F loss_func = F.cross_entropyOptimizerimport torch.optim as optim def evaluate(model, data_iterator, loss_func): epoch_loss = 0 epoch_acc = 0 model.eval() with torch.no_grad(): for inp, label in data_iterator: predictions = model(inp.t()).squeeze(1) loss = loss_func(predictions, label) acc = accuracy_score(torch.argmax(predictions, dim=1).cpu().detach().numpy(), label.cpu().numpy()) epoch_loss += loss.item() epoch_acc += acc return epoch_acc / len(data_iterator), epoch_loss / len(data_iterator)Trainingdef train_model(model, train_data, valid_data, loss_func, optimizer, epochs=5): for epoch in range(epochs): model.train() epoch_loss = 0 epoch_acc = 0 tic = time.time() for inp, label in train_data: predictions = model(inp.t()).squeeze(1) loss = loss_func(predictions, label) loss.backward() optimizer.step() optimizer.zero_grad() epoch_loss += loss.item() toc = time.time() train_acc, _ = evaluate(model, train_data, loss_func) acc, _ = evaluate(model, valid_data, loss_func) toe = time.time() print(len(train_data)) print('Loss at epoch %d : %f, train acc : %f, valid acc : %f | train time : %d sec, eval time : %d sec' % (epoch, epoch_loss / len(train_data), train_acc, acc, toc-tic, toe - toc)) SEQ_LEN = 10 INPUT_DIM = 50 OUTPUT_DIM = INPUT_DIM EMBEDDING_DIM = 32 HIDDEN_DIM = 256 N_LAYERS = 1 BIDIRECTIONAL = False DROPOUT = 0 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') train_data_loader, valid_data_loader, test_data_loader = get_data(SEQ_LEN, INPUT_DIM, device=device, data_size=100000) model = RNN(INPUT_DIM, EMBEDDING_DIM, HIDDEN_DIM, OUTPUT_DIM) #, N_LAYERS, BIDIRECTIONAL, DROPOUT) model = model.to(device) optimizer = optim.Adam(model.parameters(), weight_decay=0.00001) print(f'The model has {count_parameters(model):,} trainable parameters') train_model(model, train_data_loader, valid_data_loader, loss_func, optimizer, epochs=15)**File handling in Google Colab** Mounting google drive into Colabfrom google.colab import drive drive.mount('/content/gdrive/',force_remount = True)Mounted at /content/gdrive/List content of directory!ls !ls -ltotal 16 -rw-r--r-- 1 root root 26 Nov 8 13:06 data.txt drwx------ 5 root root 4096 Nov 8 13:03 gdrive drwxr-xr-x 1 root root 4096 Nov 1 13:35 sample_data drwxr-xr-x 2 root root 4096 Nov 8 13:05 sample_folderCreate a directorymkdir sample_folder lsdata.txt gdrive/ sample_data/ sample_folder/Create files!echo "The sample data to check." > data.txt lsdata.txt gdrive/ sample_data/ sample_folder/Downloading files from internet! wget https://github.com/Amit32624/dbpedia_prj/blob/master/med_devs_clean_data.csv ls -ltotal 644 -rw-r--r-- 1 root root 26 Nov 8 13:16 data.txt drwx------ 5 root root 4096 Nov 8 13:03 gdrive/ -rw-r--r-- 1 root root 642378 Nov 8 13:20 med_devs_clean_data.csv drwxr-xr-x 1 root root 4096 Nov 1 13:35 sample_data/ drwxr-xr-x 2 root root 4096 Nov 8 13:05 sample_folder/read filescat data.txt # reading file in python data = open("data.txt","r") data_con = data.read() data_conAccessing google drive form google colab! ls "/content/gdrive/My Drive/Colab Notebooks/"'affineTransformations (1).ipynb' DecisionTrees.ipynb affineTransformations.ipynb linear_Algrebra_1.ipynb 'autoencoder (1).ipynb' LinearRegression_Basics.ipynb autoencoder.ipynb Untitled0.ipynb 'Copy of Welcome to Colaboratory' Untitled1.ipynbCopy files from Google Drive into your Google Cola!pwd cd "/content/gdrive/My Drive/Colab Notebooks/" pwdThe ExerciseIn this exercise you'll implement a neural network to perform segmentation on the Oxford Pets dataset (or another dataset, if you prefer...). The provided notebook is an acceptable solution, but perhaps you can do better by building a model from scratch, starting from an alternative model, or importing a model designed to perform segmentation such as UNet. Step 1: Download and prepare the dataThe Oxford Pets dataset can be found here: [http://www.robots.ox.ac.uk/~vgg/data/pets/](http://www.robots.ox.ac.uk/~vgg/data/pets/)Download the dataset and the ground truth data. Then, read and process the images and their matching trimap masks. You can leverage the code from the Jupyter notebook extensively to do this.During this process you will have to change the way training data is prepared from the trimaps from what is in the notebook. Specifically, you should create three output feature maps (one for each: dog, cat and background) instead of two (one for dog, one for cat). The feature maps for the "dog" and "cat" layer will be identical to the ones in the notebook, and all three will still only contain values of 0 or 1, where zero indicates "this pixel is not a member of this feature-map's class" and one indicates "this pixel is a member of this feature-map's class". The new feature-map will contain 1's for pixels that are a part of the background, and 0's for pixels that are NOT a part of the background. Step 2: Build the modelYou can again leverage much of the code in the notebook, but critically to support the changes we made to our training data you will have to update the number of kernels (from 2 to 3) and activation function (from sigmoid to softmax) in the final layer. You'll also need to change the loss function (from binary cross entropy to categorial cross entropy). Step 3: Train and evaluate the modelDid your changes result in any significant improvements on the models performance on this dataset?# All the imports: from collections import namedtuple import csv from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2, preprocess_input from tensorflow.keras import backend as K from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, Dropout, Flatten, Conv2DTranspose, add as keras_add from tensorflow.keras.models import Model, Sequential from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.utils import to_categorical import matplotlib.pyplot as plt import matplotlib.patches as patches import numpy as np import os from PIL import Image, ImageOps # First, we have to download and prepare the data. We're using the Oxford Pet dataset # linked above. You need the "dataset" and the "groundtruth data" listed under Downloads # Modify these to fit wherever you save the data. PATH_TO_IMAGES = './OxfordPets/images/' PATH_TO_CLASS_LIST = './OxfordPets/annotations/list.txt' PATH_TO_TRIMAPS = './OxfordPets/annotations/trimaps' # Change if you wish. TARGET_SIZE = (224, 224) # This should look familiar by now. We removed the bounding box specific stuff. # We will be using this process on BOTH the input images and the trimaps. def resize_image(path_to_image, target_size=None, pad_with_value=0): image = Image.open(path_to_image) width, height = image.size w_pad = 0 h_pad = 0 bonus_h_pad = 0 bonus_w_pad = 0 if width > height: pix_diff = (width - height) h_pad = pix_diff // 2 bonus_h_pad = pix_diff % 2 # If the difference was odd, add one pixel on one side. elif height > width: pix_diff = (height - width) w_pad = pix_diff // 2 bonus_w_pad = pix_diff % 2 # If the difference was odd, add one pixel on one side. # else: image is already square. Both pads stay 0 image = ImageOps.expand(image, (w_pad, h_pad, w_pad+bonus_w_pad, h_pad+bonus_h_pad), pad_with_value) if target_size is not None: # Note, width and height have changed due to the padding resize. # Update our notions so we get the scale factor right width, height = image.size image = image.resize(target_size) width_scale = target_size[0] / width height_scale = target_size[1] / height # This is a change, this function now handles data with either 1 or 3 color # channels. We can detect this from the shape of the np.array so we've broken # this line into 3. image_as_array = np.array(image.getdata()) if len(image_as_array.shape) == 2: # This is required to handle transparency, which some of the JPGs contain. # We are essentially removing the alpha channel. if image_as_array.shape[1] == 4: image_as_array = image_as_array[:, 0:3] image_data = image_as_array.reshape(image.size[0], image.size[1], 3) else: image_data = image_as_array.reshape(image.size[0], image.size[1]) # Image data is a 3D array, 3 channels (RGB) of target_size. # RBG values are from 0-255. Later in this notebook we preprocess # those images with the MobileNetV2 preprocess input function. return image_data # Given a reshaped trimap, reduce it such that it contains # 2 values: 0 for "not object of interest" and 1 for "object of interest" # Note that, this transform includes the "unclassified" border zone # as part of the background. If you wanted to explicitly classify # the border seprately from the image you'd have to make this transform # more involved, and not reduce the trimap to jsut values. # Similarly, if you had examples with dogs and cats in the same image # This reduction step would have to account for those values differently def reduce_trimap_values(trimap): return np.where((trimap == 1), 1, 0) # Given the relevant data from a row of the CSV return a reshaped image # as well the reshaped trimap def prepare_sample_from_name(sample_name): path_to_image = os.path.join(PATH_TO_IMAGES, sample_name + '.jpg') path_to_trimap = os.path.join(PATH_TO_TRIMAPS, sample_name + '.png') image_data = resize_image(path_to_image, TARGET_SIZE) # Note that in OUR TRIMAPS 1 is the value for "background" # The final value of 2 is specific to our dataset!! trimap = resize_image(path_to_trimap, TARGET_SIZE, 2) return (image_data, trimap) # Plot just the image, or just the trimap, by leaving them none. # If both are specified the trimap will be printed on top of the image # in a semi-transparent way. I love how easy this one is. def plot_with_trimap_overlay(image=None, trimap=None): if image is not None: plt.imshow(image) if trimap is not None: plt.imshow(trimap, alpha=.5) plt.colorbar() plt.show() # Okay, lets get all our samples processed. After this we'll prepare the # data and labels for our network and perform a validation split. processed_data = [] # Processing all this data takes some time... # Took my laptop roughly 8 minutes with open(PATH_TO_CLASS_LIST) as csv_list_file: csv_reader = csv.reader(csv_list_file, delimiter=' ') for row in csv_reader: if row[0].startswith('#'): continue # Unpack for readability sample_name, class_id, species, breed_id = row # Not every image has a bounding box, some files are missing. # Use a try/except block to ignore such samples try: image, trimap = prepare_sample_from_name(sample_name) except FileNotFoundError: # More images have their trimap than had their bounding box # which is a small surprise. print(f'No annotations for {sample_name}: skipped.') continue # species - 1 so cat = 0 and dog = 1. # Makes things a little easier to process data_tuple = (image, int(species) - 1, trimap) processed_data.append(data_tuple) print(f'Processed {len(processed_data)} samples') # Make it a numpy array processed_data = np.array(processed_data) # This time around, instead of training two outputs on a single network # we're going to have the final output layer produce 3 activation maps # One activation map each for the location of cats, dogs, and background # In our dataset, for the true labels, one of these panes will always # be empty (all 0), because our dataset never has images with a cat and # a dog in the same image. # We'll test our network afterwards on a few images with both # dogs and cats, and see what happens... cross your fingers, but don't hold your breath! x_train = [] y_train = [] x_validation = [] y_validation = [] validation_split = 0.2 # Notice that we're applying the preprocess_input function to the images here. # Also note that our labels are shaped (w, h, 3), one pane each for "cat, dog, background" for image, species, trimap in processed_data: processed_image = preprocess_input(image) # 3 panes each with binary values. Note they mutually exclusive, each pixel can be "on" # in exactly ONE of the three following maps. cat_segmentation = reduce_trimap_values(trimap) if species == 0 else np.zeros(trimap.shape) dog_segmentation = reduce_trimap_values(trimap) if species == 1 else np.zeros(trimap.shape) bg_segmentation = np.logical_not(reduce_trimap_values(trimap)) if np.random.random() > validation_split: x_train.append(processed_image) y_train.append([cat_segmentation, dog_segmentation, bg_segmentation]) else: x_validation.append(processed_image) y_validation.append([cat_segmentation, dog_segmentation, bg_segmentation]) x_train = np.array(x_train) y_train = np.array(y_train) x_validation = np.array(x_validation) y_validation = np.array(y_validation) # Because of how we appeneded the cat and dog data to the labels # we need to change the dimensions before prediction so that the # color channels are where the network expects them: y_train = np.rollaxis(y_train, 1, 4) y_validation = np.rollaxis(y_validation, 1, 4)Step 2: Build the model¶You can again leverage much of the code in the notebook, but critically to support the changes we made to our training data you will have to update the number of kernels (from 2 to 3) and activation function (from sigmoid to softmax) in the final layer. You'll also need to change the loss function (from binary cross entropy to categorial cross entropy).# We don't have a TON of data, but we could actually do some data augmentation # Because the label data is in the same format as the input. We'd have to # transform the labels which is not built into Keras, but might be a good exercise # for later... # Lets still use transfer learning # Like before, we grab a pretrained model with include_top=False base_model = MobileNetV2(weights='imagenet', include_top=False, input_shape=(TARGET_SIZE[0], TARGET_SIZE[1], 3)) # We're going to lop off the last few layers, which have most # likely learned the highest level features specific to imagenet chopped_mobilenet = Model(inputs=[base_model.input], outputs=[base_model.layers[90].output]) # Now, we have to use "Convolution Transpose" sometimes called "Deconvolution" to # re-increase the resolution, since MobileNetV2 significantly reduces the input size # Drawing on inspiration from U-Net, we're also going to add some symmetric skip connections # which should make us feel *very* fancy. segmentation_output = Conv2DTranspose(32, kernel_size=(3, 3), strides=(2, 2), padding='same', activation='relu')(chopped_mobilenet.output) segmentation_output = Conv2DTranspose(24, kernel_size=(3, 3), strides=(2, 2), padding='same', activation='relu')( keras_add([segmentation_output, base_model.layers[45].output]) ) segmentation_output = Conv2DTranspose(24, kernel_size=(3, 3), strides=(2, 2), padding='same', activation='relu')( keras_add([segmentation_output, base_model.layers[27].output]) ) # Note: we use 3 frames, one for cat one for dog one for background, and we # and likely improve our result, especially with respect to generalization and finding # BOTH cats and dogs in the same image. Plus, it would allow us to detect the absense # of either cats or dogs more readily. segmentation_output = Conv2DTranspose(3, kernel_size=(3, 3), strides=(2, 2), padding='same', activation='softmax')(segmentation_output) model = Model(inputs=[chopped_mobilenet.input], outputs=[segmentation_output]) model.summary()문제input은 int n을 입력받아 첫번째 row는 (n-1)의 O와 X, 두번째 row는 (n-2)의 O와 XX, 세번째 row는 (n-3)의 O와 XXX,.n번째 row는 n개의 X을 출력하라 풀이def ox(n): for i in range(1, n + 1): print('0' * (n - i) + 'X' * i) ox(7)000000X 00000XX 0000XXX 000XXXX 00XXXXX 0XXXXXX XXXXXXXWebscraping Book FeaturesAuthor: License: MITThis notebook extracts book-specific features, such as ratings, reviews, and book size dimensions and page numberfrom __future__ import print_function, division from bs4 import BeautifulSoup import requests import pandas as pd import numpy as np import collections import reGlobal Variablesmax_range = 250. #set max records per file to be saved incrementally location = 'random' #set library branch set_variable = 7Field-Level Functionsimport ast #Definition for finding information that is next to a text field def find_text(textsoup, field): info = textsoup.find(text=re.compile(field)) if info: return info.findNext().text.strip() else: return 'N/A' #function to extract book description def find_description(textsoup): try: dictionary_string = textsoup.find("script",text=re.compile("@graph")).text #this is a dictionary string book_dict = ast.literal_eval(dictionary_string) #turn it into an actual dict book_details = [] #empty list to hold book details book_sub_dict = book_dict.get("@graph")[0] ratings_dict = book_sub_dict.get("aggregateRating") try: #Avg Rating book_details.append(ratings_dict.get("ratingValue")) except: book_details.append("N/A") try: #num of Ratings book_details.append(ratings_dict.get("ratingCount")) except: book_details.append(0) try: #num of Reviews book_details.append(ratings_dict.get("reviewCount")) except: book_details.append(0) try: #Hardcover/Softcover book_details.append(book_sub_dict.get("bookFormat").get("@id")) except: book_details.append("N/A") try: #Subject areas book_details.append(book_sub_dict.get("about")) except: book_details.append("N/A") try: #URL to book image book_details.append(book_sub_dict.get("image")) except: book_details.append("N/A") try: #Book description if len(book_sub_dict.get("description")[0]) > 1: book_details.append(book_sub_dict.get("description")[0]) else: book_details.append(book_sub_dict.get("description")) #some cases this is needed except: book_details.append("N/A") return book_details except: return 7*['N/A']Book-Level Functiondef get_book_data(url_row): response = requests.get(f"{url_row}?active_tab=bib_info") #take in the URL webpage = response.text soup = BeautifulSoup(webpage, "lxml") this_book_data = [url_row] this_book_data.append(find_text(soup,'Characteristic')) #Number of Pages, Book Size this_book_data.append(find_text(soup,'Branch Call Number')) #Library Call Number this_book_data = this_book_data + find_description(soup) #concat two lists return this_book_dataData Cleaning Functions#Extract the page count def get_page_count(row): try: row = row.replace(" unnumbered","") #handle cases where there are unnumbered pages if 'pages' in row: if len(row.split(' page')[0].strip().split(" ")) == 2: return row.split(' page')[0].strip().split(" ")[-1] else: return row.split(' page')[0].strip() else: return 'N/A' except: return 'N/A' #extract the book dimensions def get_book_dims(row): try: if 'cm' in row: if len(row.split(' cm')[0].strip().split(" ")) == 1: return row.split(' cm')[0].strip() else: return row.split(' cm')[0].strip().split(" ")[-1] else: return 'N/A' except: return 'N/A'Create URL Linkdef get_url(row): row = str(row) return f"https://seattle.bibliocommons.com/item/show/{row}030"Load URLS, Divide into DataFrame Chunksurl_df = pd.read_csv(f"01_Data/Random_Sample_{set_variable}.csv",index_col=0) url_df["link"] = url_df["BibNum"].apply(get_url) url_df = url_df[url_df['link'].notna()] #remove lines with no URL url_df.head() #split the URL List into chunks so you can incrementally save total_loops = (len(url_df) // max_range) + 1 url_dframes = np.array_split(url_df, total_loops)Loop Scrapefor i in range(11,len(url_dframes)): #adjust the lower number if the scrape stalled dframe = url_dframes[i] dframe = dframe.reset_index() #reset index so the ISBN below can match dframe["data"] = dframe["link"].apply(get_book_data) book_df = pd.DataFrame(list(dframe["data"])) #turn data into dataframe book_df = book_df.rename({0: 'url', 1: 'page_dim', 2: 'callno', 3:'avg_rating', #rename columns 4:'tot_ratings', 5: 'tot_reviews', 6:'type', 7:'subjects', 8: 'image', 9: 'desc'}, axis=1) #Clean Data. #Remove repetitive part of image URL book_df["page"] = book_df["page_dim"].apply(get_page_count) #Extract Page Number book_df["dim"] = book_df["page_dim"].apply(get_book_dims) #Extract book dimensions book_df["isbn"] = dframe["isbn"] #Keep useful columns book_df = book_df[["isbn","url","page","dim","avg_rating","tot_ratings","tot_reviews", "type","callno","subjects","desc","image"]] book_df.to_csv(f'01_Data/book_data_{location}_{i}.csv')Combine Files Together into Combined Branch CSV#start a dataframe with the first CSV book_data_df = pd.read_csv(f'01_Data/book_data_{location}_0.csv',index_col=0) #loop remaining CSVs for i in range(1,len(url_dframes)): temp_df = pd.read_csv(f'01_Data/book_data_{location}_{i}.csv',index_col=0) book_data_df = pd.concat([book_data_df,temp_df]) book_data_df.to_csv(f'01_Data/book_data_{location}_combined_{set_variable}.csv')SPSS file loaderLoads large SPSS files to BigQuery.__To Do__- Update schema with YAMLimport re from datetime import datetime as dt import pandas as pd from savReaderWriter import SavReader from database_connection import databaseConnection from google.cloud import bigquery def get_db_client(full_table_id): db = databaseConnection() project_id, dataset_id, table_id = _get_table_id_set(full_table_id) client = bigquery.Client( project=project_id, credentials=db.credentials) return client def delete_table(full_table_id): # If the table does not exist, delete_table raises # google.api_core.exceptions.NotFound unless not_found_ok is True. client = get_db_client(full_table_id) client.delete_table(full_table_id, not_found_ok=True) # Make an API request. print("Deleted table '{}'.".format(full_table_id)) def _get_table_id_set(full_table_id): project_id = full_table_id.split('.')[0] dataset_id = full_table_id.split('.')[1] table_id = full_table_id.split('.')[2] return (project_id, dataset_id, table_id) def _get_chunck_cutoffs(file_length, interval=50_000): """ Create a list of lists containing cutoff levels to chunk SPSS files. """ i0=0 i1=interval c_list=[] while i1 <= file_length: c = [i0, i1] c_list += [c] i0 = i1 + 1 i1 = i0 + (interval - 1) c_list = c_list + [[i0, file_length]] return c_list def spss_to_csv(filename): with SavReader(filename) as reader: header = [re.sub("(\'|b)", "", str(h)) for h in reader.header] file_length = len(reader) df = pd.DataFrame(columns=header) chunck_cutoffs = _get_chunck_cutoffs(file_length) file_list = [] for i, chunk in enumerate(chunck_cutoffs): lines = [] for line in reader[chunk[0] : chunk[1]]: lines += [line] df_tmp = pd.DataFrame(lines, columns=header) filename_out = re.sub("\.sav", "_{}.csv".format(i), filename) file_list += [filename_out] print("LOADING {} of {}: (lines {} of {}) --> {}".format( i+1, len(chunck_cutoffs), chunk[1], file_length, filename_out)) df_tmp.to_csv(filename_out) del lines, df_tmp return file_list def csv_to_db(source_file, full_table_id, replace=True): """ Load a CSV file to BigQuery using the BigQuery Python API. Example: ``` csv_to_db( file = , full_table_id = ) ``` Attributes: source_file (str): The file path/name of the CSV file. Include the file suffix (i.e. .csv). full_table_id (str): BigQuery table ID. Example: `my-project.dope_dataset.terrific_table` replace (boolean): Whether to replace the table, if it exists. To Do: * Load table schema from YAML """ project_id, dataset_id, table_id = _get_table_id_set(full_table_id) client = get_db_client(full_table_id) dataset_ref = client.dataset(dataset_id) table_ref = dataset_ref.table(table_id) job_config = bigquery.LoadJobConfig() job_config.source_format = bigquery.SourceFormat.CSV job_config.skip_leading_rows = 1 job_config.autodetect = True if replace: delete_table(full_table_id) with open(source_file, "rb") as sf: job = client.load_table_from_file(sf, table_ref, job_config=job_config) job.result() # Waits for table load to complete. print("Loaded {} rows into {}:{}.{}.".format( job.output_rows, project_id, dataset_id, table_id)) def spss_to_db(filename, full_table_id, replace=True): """ Load SPSS files to BigQuery using the BigQuery Python API. This function breaks large SPSS files into smaller CSVs and then loads them into BigQuery. Example: ``` spss_to_db( filename = 'dopeData.sav', full_table_id = 'my-project.dope_data.awesome_table') ``` Attributes: filename (str): The file path/name of the CSV file. Include the file suffix (i.e. .csv). full_table_id (str): BigQuery table ID. Example: `my-project.dope_dataset.terrific_table` replace (boolean): Whether to replace the table, if it exists. """ project_id, dataset_id, table_id = _get_table_id_set(full_table_id) client = get_db_client(full_table_id) print("STEP 1: CONVERT SPSS FILES INTO CSV FILE(S)\n") file_list = list(set(spss_to_csv(filename))) print("\nSTEP 2: LOAD CSV FILE(S) INTO BIGQUERY TABLE\n") if replace: delete_table(full_table_id) for i, file in enumerate(file_list): print("LOADING {} of {}: {} --> {}:{}.{}.".format( i+1, len(file_list), file, project_id, dataset_id, table_id)) csv_to_db(file, full_table_id, replace=False) print("\nSPSS FILE LOAD COMPLETE!\n\n" "Note: you have a number of CSV files in your data folder." "You should delete them before merging to a repository." )Run spss_to_db`spss_to_db()` breaks large SPSS files into smaller CSVs and then loads them into BigQuery.# Load a CSV file to BigQuery # csv_file = '/home/jovyan/project-implicit/data/race_iat/Race_IAT.public.2019_0.csv' # full_table_id = 'algomosaic-nyc.project_implicit.race_ait_2019' # csv_to_db(source_file=filename, full_table_id=full_table_id) # Load a SPSS file to BigQuery spss_file = '/home/jovyan/project-implicit/data/race_iat/Race_IAT.public.2019.sav' full_table_id = 'algomosaic-nyc.project_implicit.race_ait_2019' spss_to_db(spss_file, full_table_id) from google.cloud import bigquery # Construct a BigQuery client object. client = bigquery.Client() # TODO(developer): Set table_id to the ID of the table # to add an empty column. full_table_id = "your-project.your_dataset.your_table_name" table = client.get_table(full_table_id) # Make an API request. original_schema = table.schema new_schema = original_schema[:] # Creates a copy of the schema. new_schema.append(bigquery.SchemaField("phone", "STRING")) table.schema = new_schema table = client.update_table(table, ["schema"]) # Make an API request. if len(table.schema) == len(original_schema) + 1 == len(new_schema): print("A new column has been added.") else: print("The column has not been added.")`MeasurementChain` Tutorial OverviewThe goal of the `MeasurementChain` class is to describe as detailed as possible how experimental data is acquired.Acquiring this data is a process that usually involves multiple substeps. For example, during a temperature measurement we might have a sensor that produces a voltage correlating with the current temperature.Since we wanted to know the temperature and not some arbitrary voltage, we need to transform the voltage data into temperature data by utilizing the calibration of our sensor.The `MeasurementChain` is build upon 2 basic constructs: Signals and transformations. We always start our measurement chain with an initial signal, generated by a source sensor.In the example we gave before this was a voltage.To get to the data of interest we might need to perform one or more transformation steps were each one yields a new signal.Possible transformations are for example an AD conversion, signal amplification, filtering, or applying a calibration.The last signal of the measurement chain is usually the one we generate our measurement data from by recording its value at certain points in time.We will now discuss the different methods to construct a `MeasurementChain` and the features it offers. Construction without additional classesThe easiest way to construct a measurement chain is to use the `from_parameters` and the `create_transformation` methods. With these functions, we do not need to bother with as many extra classes as with the other approaches.We start with the `from_parameters` function to create a new measurement chain. We need to provide 5 parameters to it:- The name of the measurement chain- The name of the source that creates the first, unprocessed measurement signal- The error of the source- The type of the source signal (analog or digital)- The unit of the source signalOptionally, one can also provide the associated measurement data, if it was recorded, but we will discuss this in a later section. Let's start by creation our first measurement chain:mc_1 = MeasurementChain.from_parameters( name="Temperature measurement chain 1", source_name="Thermocouple 1", source_error=Error(deviation=Q_(0.1, "percent")), output_signal_type="analog", output_signal_unit="V", )As you can see, we have created a `MeasurementChain` with the name "Temperature measurement chain 1". It's source is named "Thermocouple 1" and it produces an analog output signal in Volts. The specified measurement error is a fixed value of 0.1%. Next we want to add the first transformation step, the analog-digital conversion of the signal. Therefore, we use the `create_transformation` method of our newly created measurement chain. As the `from_parameters` function, it accepts the name of the transformation, its error, the output signal, and the output unit as parameters. Additionally, we can provide a function that describes how the numerical values and units are transformed.The output signal type, unit and function are all optional parameters. However, providing none of them wouldn't apply any changes to the signal and emit a warning.Now we add the AD conversion to our measurement chain:mc_1.create_transformation( name="AD conversion", error=None, output_signal_type="digital", output_signal_unit="", )All we needed to specify apart from the name and error was the output signal type as "digital". We also removed the unit by providing an empty string as output unit since the AD conversion just yields a digital number that doesn't necessarily represent a physical quantity. Let's add the final transformation, the calibration, which produces the data we are interested in.mc_1.create_transformation( name="Calibration", error=Error(Q_(0.4, "percent")), func=MathematicalExpression( expression="a*x+b", parameters={"a": Q_(3, "K"), "b": Q_(273.15, "K")} ), )Here we specify a function that describes the transformation of our digital number into an actual temperature value:$$3K \cdot x + 273.15K$$The name of the variable and the parameters can be arbitrarily chosen. The only restriction is, that the function only has a single variable which represents the input signal. In this example our input variable is given by `x`.Since the parameters of our function already contain the unit conversion we do not need to provide the `output_signal_unit` parameter. However, we could do this to assure that the functions' output signal has the correct dimensionality, for example length, time, or temperature.To do so, we can provide an arbitrary unit of the desired dimension.If we would pass `m`, we expect the output signal to represent a length.In case the function yields inches, millimeters, yards etc. this would be the correct dimensionality.Squaremeters, seconds, or volts would raise an exception.Additionally, you can't use the `output_signal_unit` parameter to add a unit conversion if the passed function does not contain one. In fact, if only `output_signal_unit` is provided without a function, like in the AD conversion we added before, `create_transformation` generates a corresponding conversion function internally. Plotting measurement chainsNow that we have created our first measurement chain without any exceptions, we might want to verify that everything is specified correctly. To do so, we can use pythons `print` command to check all variables, but this would be a bit tedious. A more convenient way is to use the plot function of the `MeasurementChain`.mc_1.plot()The plot shows us the initial signal produced by the source on the left. To the right of the source signal all transformations and their resulting output signals are shown. Construction from dedicated classesThe first method we demonstrated required you to provide a lot of parameters. While this method is very explicit and doesn't involve many other classes, it is not the best approach if you want to share sources and transformations with other measurement chains or objects. To share information about a source or transformation, two dedicated container classes are available: `SignalSource` and `SignalTransformation`.A `SignalSource` can be generated as follows:source_2 = SignalSource( name="Source", error=Error(Q_(0.1, "percent")), output_signal=Signal(signal_type="analog", units="V"), )The information we provide is similar as before. The only noteworthy thing here is, that the signal type and unit are wrapped into a separate `Signal` class. Now we can use this class to create a measurement chain:mc_2 = MeasurementChain(name="Measurement chain 2", source=source_2) mc_2.plot()Next we create a `SignalTransformation`transformation_3 = SignalTransformation( name="Transformation", error=Error(Q_(1, "percent")), func=MathematicalExpression( expression="a*x+b", parameters={"a": Q_(3, "K/V"), "b": Q_(273.15, "K")} ), type_transformation="AD", )The first three arguments are equivalent as when using the `create_transformation` method. The `type_transformation` parameter expects a string consisting of two letters that can either be "A" for analog and "D" for digital. The first letter is the expected input signal type and the second letter the output signal type.We can add it to the `MeasurementChain` with the `add_transformation` function.mc_2.add_transformation(transformation_3) mc_2.plot()Construction from equipment classesThe sources and transformations of a measurement chain are often tied to a certain piece of laboratory equipment. The weldx package offers the `MeasurementEquipment` structure to describe your equipment and collect all the operations it performs inside of a measurement chain.Since lab equipment usually doesn't change frequently, a good approach would be to define all your instruments once and reuse their definitions when creating a new WelDX file.The `MeasurementChain` supports this by letting you create a new instances using `from_equipment` and adding transformations with `add_transformation_from_equipment`.Let us create two pieces of equipment and create a new `MeasurementChain` from it:source_eq = MeasurementEquipment("Source 2000", sources=[source_2]) transformation_eq = MeasurementEquipment( "Transformer X3", transformations=[transformation_3] )Now we simply create a measurement chain from them:mc_3 = MeasurementChain.from_equipment("Measurement Chain 3", source_eq) mc_3.plot() mc_3.add_transformation_from_equipment(transformation_eq) mc_3.plot()If we add an equipment to the `MeasurementChain`, it won't just only store the corresponding transformation but also remember the equipment that provides it.We can get the linked equipment using `get_equipment`.Therefore we must provide the name of the transformation or sourcemc_3.get_equipment("Source")It might also be the case that an equipment provides multiple sources or transformations.In this case, you need to specify which one should be added to the `MeasurementChain`.The `from_equipment` and `add_transformation_from_equipment` provide an extra parameter for this case. Accessing informationIn case you want to know names of the source or the transformations that are part of a `MeasurementChain`, you can use the following two properties:mc_3.source_name mc_3.transformation_namesYou can also get the the `SignalSource` and `SignalTransformation` objects using:mc_3.source mc_3.get_transformation("Transformation")Because a measurement chain can contain multiple transformations, you have to specify the name of the desired transformation when using `get_transformation`.If you want to know the type and unit of a signal that is generated by the source or results from a transformation, you can call the `get_signal` method.It returns a special class that contains all relevant information about a signal, including attached measurement data, that will be covered in the next section.mc_3.get_signal("Transformation")One can also get a list of all signals and transformations with:mc_3.signals mc_3.transformationsAttaching dataUntil now we have seen how to create such a `MeasurementChain` and how we can access the information it provides.However, the most important information is currently still missing: the actual data we produced.Attaching data is simply done with `add_signal_data`:mc_1.add_signal_data( TimeSeries(data=Q_([10, 15, 5], "K"), time=TimedeltaIndex(data=[0, 2, 6], unit="s")) )As you can see in the following plot, the data is associated with the output signal of the last transformation:mc_1.plot()But what if we want to attach the raw data of our source too? For this case, `add_signal_data` provides a second parameter to specify the origin of the data.To add some data to the source, we do the following:mc_1.add_signal_data( data=TimeSeries( data=Q_([2, 3, 1], "K"), time=TimedeltaIndex(data=[0, 2, 6], unit="s") ), signal_source="Thermocouple 1", )As can be seen in the next plot, we successfully added the data to the source:mc_1.plot()Also note, that all functions that let you create a measurement chain or add a transformation provide an extra parameter to add the corresponding data.Finally, if you want to access the stored data, we can use `get_signal_data`.As for the `add_signal_data` function, you get the data from the last transformation if you don't specify any source or transformation name:mc_1.get_signal_data()We access the sources' data by adding its name to the function call:mc_1.get_signal_data("Thermocouple 1")Remember, that the returned `TimeSeries` posseses a plot function that lets you create a plot of the time dependent data:mc_1.get_signal_data().plot()Note that the `Signal` class returned by the `get_signal` function offers a `plot` function too.So you do not need to fetch the data from the Signal in order to plot it:mc_1.get_signal("Calibration").plot()Clase de repaso> El objetivo de esta clase es resolver una serie de ejercicios teóricos y prácticos relacionados con los contenidos de los módulos 1 y 2, en preparación para el examen.> Es válido que propongan ustedes mismos sus dudas de los temas tratados en estos módulos, o ejercicios que no hayan quedado claros de clases anteriores, tareas pasadas y/o quices.> La recomendación principal para el examen es que COMPRENDAN cada uno de los ejercicios de los quices y de las tareas. Si todo eso está claro, el examen será un simple trámite.___ Ejercicios varios tipo quiz.Una parte del examen consta de ejercicios parecidos a los de los quices realizados en los módulos 1 y 2. La diferencia con los quices, es que además de seleccionar la respuesta se debe dar una justificación de el porqué de la selección.Repasemos algunos ejercicios similares a los de los quices pasados. **Pregunta 1.** Considere la siguiente distribución de rendimientos de los activos A, B y C:| Probabilidad | Rendimiento A | Rendimiento B | Rendimiento C || ---------------- | ------------------ | ------------------- | ------------------ || 30% | -0.20 | -0.05 | 0.05 || 40% | 0.05 | 0.10 | 0.03 || 30% | 0.40 | 0.15 | 0.02 |¿Cuáles de las siguientes afirmaciones son correctas?A. $E[r_A] = 25.00\%$, $E[r_B] = 20.00\%$, $E[r_C] = 10.00\%$.B. $E[r_A] = 8.00\%$, $E[r_B] = 20.00\%$, $E[r_C] = 3.30\%$.C. $E[r_A] = 25.00\%$, $E[r_B] = 7.00\%$, $E[r_C] = 10.00\%$.D. $E[r_A] = 8.00\%$, $E[r_B] = 7.00\%$, $E[r_C] = 3.30\%$. La respuesta correcta es (4%):# La justificación a esta pregunta son los cálculos necesarios para llegar al resultado (4%)**Pregunta 2.** Considere la siguiente distribución de rendimientos de los activos A, B y C:| Probabilidad | Rendimiento A | Rendimiento B | Rendimiento C || ---------------- | ------------------ | ------------------- | ------------------ || 30% | -0.20 | -0.05 | 0.05 || 40% | 0.05 | 0.10 | 0.03 || 30% | 0.40 | 0.15 | 0.02 |¿Cuáles de las siguientes afirmaciones son correctas?A. $\sigma_A = 27.33\%$, $\sigma_B = 8.12\%$, $\sigma_C = 1.91\%$.B. $\sigma_A = 23.37\%$, $\sigma_B = 8.12\%$, $\sigma_C = 1.19\%$.C. $\sigma_A = 23.37\%$, $\sigma_B = 12.08\%$, $\sigma_C = 1.91\%$.D. $\sigma_A = 27.33\%$, $\sigma_B = 12.08\%$, $\sigma_C = 1.19\%$. La respuesta correcta es (4%):# La justificación a esta pregunta son los cálculos necesarios para llegar al resultado (4%)**Pregunta 3.** Considere la siguiente distribución de rendimientos de los activos A, B y C:| Probabilidad | Rendimiento A | Rendimiento B | Rendimiento C || ---------------- | ------------------ | ------------------- | ------------------ || 30% | -0.20 | -0.05 | 0.05 || 40% | 0.05 | 0.10 | 0.03 || 30% | 0.40 | 0.15 | 0.02 |¿Cuáles de las siguientes afirmaciones son correctas?A. $\sigma_{AB} = 0.0174$, $\sigma_{AC} = 0.00264$, $\sigma_{BC} = 0.00096$.B. $\sigma_{AB} = -0.0174$, $\sigma_{AC} = -0.00264$, $\sigma_{BC} = 0.00096$.C. $\sigma_{AB} = 0.0174$, $\sigma_{AC} = -0.00264$, $\sigma_{BC} = -0.00096$.D. $\sigma_{AB} = -0.0174$, $\sigma_{AC} = 0.00264$, $\sigma_{BC} = -0.00096$.# La justificación a esta pregunta son los cálculos necesarios para llegar al resultado (4%)**Pregunta 4.** Considere la siguiente distribución de rendimientos de los activos A, B y C:| Probabilidad | Rendimiento A | Rendimiento B | Rendimiento C || ---------------- | ------------------ | ------------------- | ------------------ || 30% | -0.20 | -0.05 | 0.05 || 40% | 0.05 | 0.10 | 0.03 || 30% | 0.40 | 0.15 | 0.02 |¿Cuál es el rendimiento esperado y volatilidad de un portafolio formado por 20% del activo A, 30% del activo B y 50% del activo C?A. $E[r_P] = 5.53\%$, $\sigma_P=6.39\%$.B. $E[r_P] = 5.53\%$, $\sigma_P=7.71\%$.C. $E[r_P] = 3.55\%$, $\sigma_P=7.71\%$.D. $E[r_P] = 5.35\%$, $\sigma_P=6.39\%$. La respuesta correcta es (4%):# La justificación a esta pregunta son los cálculos necesarios para llegar al resultado (4%)Práctica de Formatos de datos: CSV y JSON Ejercicio 1[5 puntos]Considerar los archivos __Terrazas.csv__ y __Locales.csv__ que contiene un censo de locales y actividades del Ayuntamiento de Madrid, clasificados según su tipo de acceso (puerta de calle o agrupado), situación (abierto, cerrado...) e indicación de la actividad económica ejercida y de las terrazas de hostelería y restauración que aparecen registradas en dicho censo. Observar que la información puede contener alguna inconsistencia.Se pide realizar funciones en Python que permitan realizar las siguientes acciones:* Usando los dos archivos indicados, se pide crear un nuevo archivo csv denominados __NoTerrazas.csv__ el cual contenga la información de los locales que no son terrazas. Es decir, contendrá la misma información que el archivo __Locales.csv__ pero sin la información de locales que son Terrazas.* Usando el archivo __Locales.csv__ se pide crear una nuevo archivo csv denominada __Agrupadas.csv__ el cual debe contener una línea por cada barrio conteniendo el número de locales en los que el nombre del vial del edificio en que se encuentran es diferente del nombre del vial de acceso al local. Por ejemplo el local con id 10000401 se encuentra en la Plaza de Isabel II pero el acceso al mismo se realiza desde la Calle Arenal. La información en el archivo debe aparecer en el siguiente orden: Código Barrio, Nombre Barrio, Número de empresas cumpliendo la condición * Usando el archivo __Terrazas.csv__ crea un nuevo archivo cvs denominado TopRepeticiones.cvs. Considera el número de veces que se repite el nombre de una terraza (campo rótulo). Se pide crear un archivo csv que contenga los 20 nombres que más se repiten. La información en el archivo debe aparecer en orden descendente con los siguientes campos de información: Nombre terraza, Número de repeticiones Para leer los archivos usa un código como el siguiente:import csv csvarchivo = open('Locales.csv',encoding="utf8",errors='ignore') entrada = csv.reader(csvarchivo, delimiter=";")Ejercicio 2[5 puntos]Considerar el archivo __Agenda.json__ que contiene información de actividades de carácter gratuito, para público infantil y adulto, desarrolladas por las siguientes instituciones de titularidad municipal: la red de Bibliotecas Públicas, la Biblioteca Histórica, la Biblioteca Musical Víctor Espinós, la Hemeroteca y la Imprenta. Se presentan las actividades de los próximos 60 dias. Se pide realizar funciones en Python que permitan realizar las siguientes acciones:* El archivo contiene un campo denominado "audience" que indica el público al que va dirigido. A veces aparece y otras veces no aparece dicho campo. Se quiere procesar el archivo para obtener como resultado dos archivos de texto:el archivo __Niños.txt__ que contendrá la información de los eventos que es exclusivamente para niños y el archivo __Familias.txt__ que contendrá la información de los eventos que está dirigida a niños y familias. En los archivos debe aparecer una línea por cada evento cumpliendo las condiciones indicadas, y mostrando la información siguiente: Título del evento(campo title), inicio(campo "dtstart"), finalizacion(campo "dtend"), url(campo link) * Se desea conocer los eventos más cercanos a un punto que viene especificado por su longitud y latitud. De manera que se quiere crear una función que tomando como entrada una longitud, una latitud y un número real que representa una distancia en kilometros, devuelva como resultado los eventos que se encuentran a una distancia menor o igual a la distancia dada como parámetro. El resultado se mostrará por la pantalla mostrando una línea por cada evento. De cada evento se mostrará: Título del evento(campo title), inicio(campo "dtstart"), finalizacion(campo "dtend"), url(campo link) Para calcular la distancia entre dos puntos dadas sus coordenadas se utilizará la siguiente función en Python:import math def haversine(lat1, lon1, lat2, lon2): rad=math.pi/180 dlat=lat2-lat1 dlon=lon2-lon1 R=6372.795477598 a=(math.sin(rad*dlat/2))**2 + math.cos(rad*lat1)*math.cos(rad*lat2)*(math.sin(rad*dlon/2))**2 distancia=2*R*math.asin(math.sqrt(a)) return distanciaPara leer los archivos usa un código como el siguiente:import json leer = json.loads(open('Agenda.json',encoding="utf8").read())Fixed-Type Arrays in PythonPython offers several different options for storing data in efficient, fixed-type data buffers.The built-in ``array`` module (available since Python 3.3) can be used to create dense arrays of a uniform type:import array L = list(range(10)) A = array.array('i', L) AHere ``'i'`` is a type code indicating the contents are integers.Much more useful, however, is the ``ndarray`` object of the NumPy package.While Python's ``array`` object provides efficient storage of array-based data, NumPy adds to this efficient *operations* on that data.We will explore these operations in later sections; here we'll demonstrate several ways of creating a NumPy array.We'll start with the standard NumPy import, under the alias ``np``:import numpy as npCreating Arrays from Python ListsFirst, we can use ``np.array`` to create arrays from Python lists:# integer array: np.array([1, 4, 2, 5, 3])Remember that unlike Python lists, NumPy is constrained to arrays that all contain the same type.If types do not match, NumPy will upcast if possible (here, integers are up-cast to floating point):np.array([3.14, 4, 2, 3])If we want to explicitly set the data type of the resulting array, we can use the ``dtype`` keyword:np.array([1, 2, 3, 4], dtype='float32')Finally, unlike Python lists, NumPy arrays can explicitly be multi-dimensional; here's one way of initializing a multidimensional array using a list of lists:# nested lists result in multi-dimensional arrays np.array([range(i, i + 3) for i in [2, 4, 6]])The inner lists are treated as rows of the resulting two-dimensional array. Creating Arrays from ScratchEspecially for larger arrays, it is more efficient to create arrays from scratch using routines built into NumPy.Here are several examples:# Create a length-10 integer array filled with zeros np.zeros(10, dtype=int) # Create a 3x5 floating-point array filled with ones np.ones((3, 5), dtype=float) # Create a 3x5 array filled with 3.14 np.full((3, 5), 3.14) # Create an array filled with a linear sequence # Starting at 0, ending at 20, stepping by 2 # (this is similar to the built-in range() function) np.arange(0, 20, 2) # Create an array of five values evenly spaced between 0 and 1 np.linspace(0, 1, 5) # Create a 3x3 array of uniformly distributed # random values between 0 and 1 np.random.random((3, 3)) # Create a 3x3 array of normally distributed random values # with mean 0 and standard deviation 1 np.random.normal(0, 1, (3, 3)) # Create a 3x3 array of random integers in the interval [0, 10) np.random.randint(0, 10, (3, 3)) # Create a 3x3 identity matrix np.eye(3) # Create an uninitialized array of three integers # The values will be whatever happens to already exist at that memory location np.empty(3)NumPy Standard Data TypesNumPy arrays contain values of a single type, so it is important to have detailed knowledge of those types and their limitations.Because NumPy is built in C, the types will be familiar to users of C, Fortran, and other related languages.The standard NumPy data types are listed in the following table.Note that when constructing an array, they can be specified using a string:```pythonnp.zeros(10, dtype='int16')```Or using the associated NumPy object:```pythonnp.zeros(10, dtype=np.int16)``` | Data type | Description ||---------------|-------------|| ``bool_`` | Boolean (True or False) stored as a byte || ``int_`` | Default integer type (same as C ``long``; normally either ``int64`` or ``int32``)| | ``intc`` | Identical to C ``int`` (normally ``int32`` or ``int64``)| | ``intp`` | Integer used for indexing (same as C ``ssize_t``; normally either ``int32`` or ``int64``)| | ``int8`` | Byte (-128 to 127)| | ``int16`` | Integer (-32768 to 32767)|| ``int32`` | Integer (-2147483648 to 2147483647)|| ``int64`` | Integer (-9223372036854775808 to 9223372036854775807)| | ``uint8`` | Unsigned integer (0 to 255)| | ``uint16`` | Unsigned integer (0 to 65535)| | ``uint32`` | Unsigned integer (0 to 4294967295)| | ``uint64`` | Unsigned integer (0 to 18446744073709551615)| | ``float_`` | Shorthand for ``float64``.| | ``float16`` | Half precision float: sign bit, 5 bits exponent, 10 bits mantissa| | ``float32`` | Single precision float: sign bit, 8 bits exponent, 23 bits mantissa| | ``float64`` | Double precision float: sign bit, 11 bits exponent, 52 bits mantissa| | ``complex_`` | Shorthand for ``complex128``.| | ``complex64`` | Complex number, represented by two 32-bit floats| | ``complex128``| Complex number, represented by two 64-bit floats| More advanced type specification is possible, such as specifying big or little endian numbers; for more information, refer to the [NumPy documentation](http://numpy.org/).NumPy also supports compound data types, which will be covered in [Structured Data: NumPy's Structured Arrays](02.09-Structured-Data-NumPy.ipynb). The Basics of NumPy Arrays Data manipulation in Python is nearly synonymous with NumPy array manipulation: even newer tools like Pandas are built around the NumPy array.This section will present several examples of using NumPy array manipulation to access data and subarrays, and to split, reshape, and join the arrays.While the types of operations shown here may seem a bit dry and pedantic, they comprise the building blocks of many other examples.Get to know them well!We'll cover a few categories of basic array manipulations here:- *Attributes of arrays*: Determining the size, shape, memory consumption, and data types of arrays- *Indexing of arrays*: Getting and setting the value of individual array elements- *Slicing of arrays*: Getting and setting smaller subarrays within a larger array- *Reshaping of arrays*: Changing the shape of a given array- *Joining and splitting of arrays*: Combining multiple arrays into one, and splitting one array into many NumPy Array Attributes First let's discuss some useful array attributes.We'll start by defining three random arrays, a one-dimensional, two-dimensional, and three-dimensional array.We'll use NumPy's random number generator, which we will *seed* with a set value in order to ensure that the same random arrays are generated each time this code is run:import numpy as np np.random.seed(0) # seed for reproducibility x1 = np.random.randint(10, size=6) # One-dimensional array x2 = np.random.randint(10, size=(3, 4)) # Two-dimensional array x3 = np.random.randint(10, size=(3, 4, 5)) # Three-dimensional arrayEach array has attributes ``ndim`` (the number of dimensions), ``shape`` (the size of each dimension), and ``size`` (the total size of the array):print("x3 ndim: ", x3.ndim) print("x3 shape:", x3.shape) print("x3 size: ", x3.size)x3 ndim: 3 x3 shape: (3, 4, 5) x3 size: 60Another useful attribute is the ``dtype``, the data type of the array:print("dtype:", x3.dtype)dtype: int64Other attributes include ``itemsize``, which lists the size (in bytes) of each array element, and ``nbytes``, which lists the total size (in bytes) of the array:print("itemsize:", x3.itemsize, "bytes") print("nbytes:", x3.nbytes, "bytes")itemsize: 8 bytes nbytes: 480 bytesIn general, we expect that ``nbytes`` is equal to ``itemsize`` times ``size``. Array Indexing: Accessing Single Elements If you are familiar with Python's standard list indexing, indexing in NumPy will feel quite familiar.In a one-dimensional array, the $i^{th}$ value (counting from zero) can be accessed by specifying the desired index in square brackets, just as with Python lists:x1 x1[0] x1[4]To index from the end of the array, you can use negative indices:x1[-1] x1[-2]In a multi-dimensional array, items can be accessed using a comma-separated tuple of indices:x2 x2[0, 0] x2[2, 0] x2[2, -1]Values can also be modified using any of the above index notation:x2[0, 0] = 12 x2Keep in mind that, unlike Python lists, NumPy arrays have a fixed type.This means, for example, that if you attempt to insert a floating-point value to an integer array, the value will be silently truncated. Don't be caught unaware by this behavior!x1[0] = 3.14159 # this will be truncated! x1Array Slicing: Accessing Subarrays Just as we can use square brackets to access individual array elements, we can also use them to access subarrays with the *slice* notation, marked by the colon (``:``) character.The NumPy slicing syntax follows that of the standard Python list; to access a slice of an array ``x``, use this:``` pythonx[start:stop:step]```If any of these are unspecified, they default to the values ``start=0``, ``stop=``*``size of dimension``*, ``step=1``.We'll take a look at accessing sub-arrays in one dimension and in multiple dimensions. One-dimensional subarraysx = np.arange(10) x x[:5] # first five elements x[5:] # elements after index 5 x[4:7] # middle sub-array x[::2] # every other element x[1::2] # every other element, starting at index 1A potentially confusing case is when the ``step`` value is negative.In this case, the defaults for ``start`` and ``stop`` are swapped.This becomes a convenient way to reverse an array:x[::-1] # all elements, reversed x[5::-2] # reversed every other from index 5Multi-dimensional subarraysMulti-dimensional slices work in the same way, with multiple slices separated by commas.For example:x2 x2[:2, :3] # two rows, three columns x2[:3, ::2] # all rows, every other columnFinally, subarray dimensions can even be reversed together:x2[::-1, ::-1]Accessing array rows and columnsOne commonly needed routine is accessing of single rows or columns of an array.This can be done by combining indexing and slicing, using an empty slice marked by a single colon (``:``):print(x2[:, 0]) # first column of x2 print(x2[0, :]) # first row of x2[12 5 2 4]In the case of row access, the empty slice can be omitted for a more compact syntax:print(x2[0]) # equivalent to x2[0, :][12 5 2 4]Subarrays as no-copy viewsOne important–and extremely useful–thing to know about array slices is that they return *views* rather than *copies* of the array data.This is one area in which NumPy array slicing differs from Python list slicing: in lists, slices will be copies.Consider our two-dimensional array from before:print(x2)[[12 5 2 4] [ 7 6 8 8] [ 1 6 7 7]]Let's extract a $2 \times 2$ subarray from this:x2_sub = x2[:2, :2] print(x2_sub)[[12 5] [ 7 6]]Now if we modify this subarray, we'll see that the original array is changed! Observe:x2_sub[0, 0] = 99 print(x2_sub) print(x2)[[99 5 2 4] [ 7 6 8 8] [ 1 6 7 7]]This default behavior is actually quite useful: it means that when we work with large datasets, we can access and process pieces of these datasets without the need to copy the underlying data buffer. Creating copies of arraysDespite the nice features of array views, it is sometimes useful to instead explicitly copy the data within an array or a subarray. This can be most easily done with the ``copy()`` method:x2_sub_copy = x2[:2, :2].copy() print(x2_sub_copy)[[99 5] [ 7 6]]If we now modify this subarray, the original array is not touched:x2_sub_copy[0, 0] = 42 print(x2_sub_copy) print(x2)[[99 5 2 4] [ 7 6 8 8] [ 1 6 7 7]]Reshaping of ArraysAnother useful type of operation is reshaping of arrays.The most flexible way of doing this is with the ``reshape`` method.For example, if you want to put the numbers 1 through 9 in a $3 \times 3$ grid, you can do the following:grid = np.arange(1, 10).reshape((3, 3)) print(grid)[[1 2 3] [4 5 6] [7 8 9]]Note that for this to work, the size of the initial array must match the size of the reshaped array. Where possible, the ``reshape`` method will use a no-copy view of the initial array, but with non-contiguous memory buffers this is not always the case.Another common reshaping pattern is the conversion of a one-dimensional array into a two-dimensional row or column matrix.This can be done with the ``reshape`` method, or more easily done by making use of the ``newaxis`` keyword within a slice operation:x = np.array([1, 2, 3]) # row vector via reshape x.reshape((1, 3)) # row vector via newaxis x[np.newaxis, :] # column vector via reshape x.reshape((3, 1)) # column vector via newaxis x[:, np.newaxis]We will see this type of transformation often. Array Concatenation and SplittingAll of the preceding routines worked on single arrays. It's also possible to combine multiple arrays into one, and to conversely split a single array into multiple arrays. We'll take a look at those operations here. Concatenation of arraysConcatenation, or joining of two arrays in NumPy, is primarily accomplished using the routines ``np.concatenate``, ``np.vstack``, and ``np.hstack``.``np.concatenate`` takes a tuple or list of arrays as its first argument, as we can see here:x = np.array([1, 2, 3]) y = np.array([3, 2, 1]) np.concatenate([x, y])You can also concatenate more than two arrays at once:z = [99, 99, 99] print(np.concatenate([x, y, z]))[ 1 2 3 3 2 1 99 99 99]It can also be used for two-dimensional arrays:grid = np.array([[1, 2, 3], [4, 5, 6]]) # concatenate along the first axis np.concatenate([grid, grid]) # concatenate along the second axis (zero-indexed) np.concatenate([grid, grid], axis=1)For working with arrays of mixed dimensions, it can be clearer to use the ``np.vstack`` (vertical stack) and ``np.hstack`` (horizontal stack) functions:x = np.array([1, 2, 3]) grid = np.array([[9, 8, 7], [6, 5, 4]]) # vertically stack the arrays np.vstack([x, grid]) # horizontally stack the arrays y = np.array([[99], [99]]) np.hstack([grid, y])Similary, ``np.dstack`` will stack arrays along the third axis. Splitting of arraysThe opposite of concatenation is splitting, which is implemented by the functions ``np.split``, ``np.hsplit``, and ``np.vsplit``. For each of these, we can pass a list of indices giving the split points:x = [1, 2, 3, 99, 99, 3, 2, 1] x1, x2, x3 = np.split(x, [3, 5]) print(x1, x2, x3)[1 2 3] [99 99] [3 2 1]Notice that _N_ split-points, leads to *N + 1* subarrays.The related functions ``np.hsplit`` and ``np.vsplit`` are similar:grid = np.arange(16).reshape((4, 4)) grid upper, lower = np.vsplit(grid, [2]) print(upper) print(lower) left, right = np.hsplit(grid, [2]) print(left) print(right)[[ 0 1] [ 4 5] [ 8 9] [12 13]] [[ 2 3] [ 6 7] [10 11] [14 15]]Similarly, ``np.dsplit`` will split arrays along the third axis. Computation on NumPy Arrays: Universal Functions Up until now, we have been discussing some of the basic nuts and bolts of NumPy; in the next few sections, we will dive into the reasons that NumPy is so important in the Python data science world.Namely, it provides an easy and flexible interface to optimized computation with arrays of data.Computation on NumPy arrays can be very fast, or it can be very slow.The key to making it fast is to use *vectorized* operations, generally implemented through NumPy's *universal functions* (ufuncs).This section motivates the need for NumPy's ufuncs, which can be used to make repeated calculations on array elements much more efficient.It then introduces many of the most common and useful arithmetic ufuncs available in the NumPy package. The Slowness of LoopsPython's default implementation (known as CPython) does some operations very slowly.This is in part due to the dynamic, interpreted nature of the language: the fact that types are flexible, so that sequences of operations cannot be compiled down to efficient machine code as in languages like C and Fortran.Recently there have been various attempts to address this weakness: well-known examples are the [PyPy](http://pypy.org/) project, a just-in-time compiled implementation of Python; the [Cython](http://cython.org) project, which converts Python code to compilable C code; and the [Numba](http://numba.pydata.org/) project, which converts snippets of Python code to fast LLVM bytecode.Each of these has its strengths and weaknesses, but it is safe to say that none of the three approaches has yet surpassed the reach and popularity of the standard CPython engine.The relative sluggishness of Python generally manifests itself in situations where many small operations are being repeated – for instance looping over arrays to operate on each element.For example, imagine we have an array of values and we'd like to compute the reciprocal of each.A straightforward approach might look like this:import numpy as np np.random.seed(0) def compute_reciprocals(values): output = np.empty(len(values)) for i in range(len(values)): output[i] = 1.0 / values[i] return output values = np.random.randint(1, 10, size=5) compute_reciprocals(values)This implementation probably feels fairly natural to someone from, say, a C or Java background.But if we measure the execution time of this code for a large input, we see that this operation is very slow, perhaps surprisingly so!We'll benchmark this with IPython's ``%timeit`` magic:big_array = np.random.randint(1, 100, size=1000000) %timeit compute_reciprocals(big_array)1.56 s ± 36.6 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)It takes several seconds to compute these million operations and to store the result!When even cell phones have processing speeds measured in Giga-FLOPS (i.e., billions of numerical operations per second), this seems almost absurdly slow.It turns out that the bottleneck here is not the operations themselves, but the type-checking and function dispatches that CPython must do at each cycle of the loop.Each time the reciprocal is computed, Python first examines the object's type and does a dynamic lookup of the correct function to use for that type.If we were working in compiled code instead, this type specification would be known before the code executes and the result could be computed much more efficiently. Introducing UFuncsFor many types of operations, NumPy provides a convenient interface into just this kind of statically typed, compiled routine. This is known as a *vectorized* operation.This can be accomplished by simply performing an operation on the array, which will then be applied to each element.This vectorized approach is designed to push the loop into the compiled layer that underlies NumPy, leading to much faster execution.Compare the results of the following two:print(compute_reciprocals(values)) print(1.0 / values)[0.16666667 1. 0.25 0.25 0.125 ] [0.16666667 1. 0.25 0.25 0.125 ]Looking at the execution time for our big array, we see that it completes orders of magnitude faster than the Python loop:%timeit (1.0 / big_array)958 µs ± 15.6 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)Vectorized operations in NumPy are implemented via *ufuncs*, whose main purpose is to quickly execute repeated operations on values in NumPy arrays.Ufuncs are extremely flexible – before we saw an operation between a scalar and an array, but we can also operate between two arrays:np.arange(5) / np.arange(1, 6)And ufunc operations are not limited to one-dimensional arrays–they can also act on multi-dimensional arrays as well:x = np.arange(9).reshape((3, 3)) 2 ** xComputations using vectorization through ufuncs are nearly always more efficient than their counterpart implemented using Python loops, especially as the arrays grow in size.Any time you see such a loop in a Python script, you should consider whether it can be replaced with a vectorized expression. Exploring NumPy's UFuncsUfuncs exist in two flavors: *unary ufuncs*, which operate on a single input, and *binary ufuncs*, which operate on two inputs.We'll see examples of both these types of functions here. Array arithmeticNumPy's ufuncs feel very natural to use because they make use of Python's native arithmetic operators.The standard addition, subtraction, multiplication, and division can all be used:x = np.arange(4) print("x =", x) print("x + 5 =", x + 5) print("x - 5 =", x - 5) print("x * 2 =", x * 2) print("x / 2 =", x / 2) print("x // 2 =", x // 2) # floor divisionx = [0 1 2 3] x + 5 = [5 6 7 8] x - 5 = [-5 -4 -3 -2] x * 2 = [0 2 4 6] x / 2 = [0. 0.5 1. 1.5] x // 2 = [0 0 1 1]There is also a unary ufunc for negation, and a ``**`` operator for exponentiation, and a ``%`` operator for modulus:print("-x = ", -x) print("x ** 2 = ", x ** 2) print("x % 2 = ", x % 2)-x = [ 0 -1 -2 -3] x ** 2 = [0 1 4 9] x % 2 = [0 1 0 1]In addition, these can be strung together however you wish, and the standard order of operations is respected:-(0.5*x + 1) ** 2Each of these arithmetic operations are simply convenient wrappers around specific functions built into NumPy; for example, the ``+`` operator is a wrapper for the ``add`` function:np.add(x, 2)The following table lists the arithmetic operators implemented in NumPy:| Operator | Equivalent ufunc | Description ||---------------|---------------------|---------------------------------------||``+`` |``np.add`` |Addition (e.g., ``1 + 1 = 2``) ||``-`` |``np.subtract`` |Subtraction (e.g., ``3 - 2 = 1``) ||``-`` |``np.negative`` |Unary negation (e.g., ``-2``) ||``*`` |``np.multiply`` |Multiplication (e.g., ``2 * 3 = 6``) ||``/`` |``np.divide`` |Division (e.g., ``3 / 2 = 1.5``) ||``//`` |``np.floor_divide`` |Floor division (e.g., ``3 // 2 = 1``) ||``**`` |``np.power`` |Exponentiation (e.g., ``2 ** 3 = 8``) ||``%`` |``np.mod`` |Modulus/remainder (e.g., ``9 % 4 = 1``)|Additionally there are Boolean/bitwise operators; we will explore these in [Comparisons, Masks, and Boolean Logic](02.06-Boolean-Arrays-and-Masks.ipynb). Absolute valueJust as NumPy understands Python's built-in arithmetic operators, it also understands Python's built-in absolute value function:x = np.array([-2, -1, 0, 1, 2]) abs(x)The corresponding NumPy ufunc is ``np.absolute``, which is also available under the alias ``np.abs``:np.absolute(x) np.abs(x)This ufunc can also handle complex data, in which the absolute value returns the magnitude:x = np.array([3 - 4j, 4 - 3j, 2 + 0j, 0 + 1j]) np.abs(x)Trigonometric functionsNumPy provides a large number of useful ufuncs, and some of the most useful for the data scientist are the trigonometric functions.We'll start by defining an array of angles:theta = np.linspace(0, np.pi, 3)Now we can compute some trigonometric functions on these values:print("theta = ", theta) print("sin(theta) = ", np.sin(theta)) print("cos(theta) = ", np.cos(theta)) print("tan(theta) = ", np.tan(theta))theta = [0. 1.57079633 3.14159265] sin(theta) = [0.0000000e+00 1.0000000e+00 1.2246468e-16] cos(theta) = [ 1.000000e+00 6.123234e-17 -1.000000e+00] tan(theta) = [ 0.00000000e+00 1.63312394e+16 -1.22464680e-16]The values are computed to within machine precision, which is why values that should be zero do not always hit exactly zero.Inverse trigonometric functions are also available:x = [-1, 0, 1] print("x = ", x) print("arcsin(x) = ", np.arcsin(x)) print("arccos(x) = ", np.arccos(x)) print("arctan(x) = ", np.arctan(x))x = [-1, 0, 1] arcsin(x) = [-1.57079633 0. 1.57079633] arccos(x) = [3.14159265 1.57079633 0. ] arctan(x) = [-0.78539816 0. 0.78539816]Exponents and logarithmsAnother common type of operation available in a NumPy ufunc are the exponentials:x = [1, 2, 3] print("x =", x) print("e^x =", np.exp(x)) print("2^x =", np.exp2(x)) print("3^x =", np.power(3, x))x = [1, 2, 3] e^x = [ 2.71828183 7.3890561 20.08553692] 2^x = [2. 4. 8.] 3^x = [ 3 9 27]The inverse of the exponentials, the logarithms, are also available.The basic ``np.log`` gives the natural logarithm; if you prefer to compute the base-2 logarithm or the base-10 logarithm, these are available as well:x = [1, 2, 4, 10] print("x =", x) print("ln(x) =", np.log(x)) print("log2(x) =", np.log2(x)) print("log10(x) =", np.log10(x))x = [1, 2, 4, 10] ln(x) = [0. 0.69314718 1.38629436 2.30258509] log2(x) = [0. 1. 2. 3.32192809] log10(x) = [0. 0.30103 0.60205999 1. ]Aggregations: Min, Max, and Everything In Between Often when faced with a large amount of data, a first step is to compute summary statistics for the data in question.Perhaps the most common summary statistics are the mean and standard deviation, which allow you to summarize the "typical" values in a dataset, but other aggregates are useful as well (the sum, product, median, minimum and maximum, quantiles, etc.).NumPy has fast built-in aggregation functions for working on arrays; we'll discuss and demonstrate some of them here. Summing the Values in an ArrayAs a quick example, consider computing the sum of all values in an array.Python itself can do this using the built-in ``sum`` function:import numpy as np L = np.random.random(100) sum(L)The syntax is quite similar to that of NumPy's ``sum`` function, and the result is the same in the simplest case:np.sum(L)However, because it executes the operation in compiled code, NumPy's version of the operation is computed much more quickly:big_array = np.random.rand(1000000) %timeit sum(big_array) %timeit np.sum(big_array)69 ms ± 1.07 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) 317 µs ± 6.59 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)Be careful, though: the ``sum`` function and the ``np.sum`` function are not identical, which can sometimes lead to confusion!In particular, their optional arguments have different meanings, and ``np.sum`` is aware of multiple array dimensions, as we will see in the following section. Minimum and MaximumSimilarly, Python has built-in ``min`` and ``max`` functions, used to find the minimum value and maximum value of any given array:min(big_array), max(big_array)NumPy's corresponding functions have similar syntax, and again operate much more quickly:np.min(big_array), np.max(big_array) %timeit min(big_array) %timeit np.min(big_array)46 ms ± 1.15 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) 339 µs ± 5.24 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)For ``min``, ``max``, ``sum``, and several other NumPy aggregates, a shorter syntax is to use methods of the array object itself:print(big_array.min(), big_array.max(), big_array.sum())7.071203171893359e-07 0.9999997207656334 500216.8034810001Whenever possible, make sure that you are using the NumPy version of these aggregates when operating on NumPy arrays! Multi dimensional aggregatesOne common type of aggregation operation is an aggregate along a row or column.Say you have some data stored in a two-dimensional array:M = np.random.random((3, 4)) print(M)[[0.79832448 0.44923861 0.95274259 0.03193135] [0.18441813 0.71417358 0.76371195 0.11957117] [0.37578601 0.11936151 0.37497044 0.22944653]]By default, each NumPy aggregation function will return the aggregate over the entire array:M.sum()Aggregation functions take an additional argument specifying the *axis* along which the aggregate is computed. For example, we can find the minimum value within each column by specifying ``axis=0``:M.min(axis=0)The function returns four values, corresponding to the four columns of numbers.Similarly, we can find the maximum value within each row:M.max(axis=1)The way the axis is specified here can be confusing to users coming from other languages.The ``axis`` keyword specifies the *dimension of the array that will be collapsed*, rather than the dimension that will be returned.So specifying ``axis=0`` means that the first axis will be collapsed: for two-dimensional arrays, this means that values within each column will be aggregated. Other aggregation functionsNumPy provides many other aggregation functions, but we won't discuss them in detail here.Additionally, most aggregates have a ``NaN``-safe counterpart that computes the result while ignoring missing values, which are marked by the special IEEE floating-point ``NaN`` value (for a fuller discussion of missing data, see [Handling Missing Data](03.04-Missing-Values.ipynb)).Some of these ``NaN``-safe functions were not added until NumPy 1.8, so they will not be available in older NumPy versions.The following table provides a list of useful aggregation functions available in NumPy:|Function Name | NaN-safe Version | Description ||-------------------|---------------------|-----------------------------------------------|| ``np.sum`` | ``np.nansum`` | Compute sum of elements || ``np.prod`` | ``np.nanprod`` | Compute product of elements || ``np.mean`` | ``np.nanmean`` | Compute mean of elements || ``np.std`` | ``np.nanstd`` | Compute standard deviation || ``np.var`` | ``np.nanvar`` | Compute variance || ``np.min`` | ``np.nanmin`` | Find minimum value || ``np.max`` | ``np.nanmax`` | Find maximum value || ``np.argmin`` | ``np.nanargmin`` | Find index of minimum value || ``np.argmax`` | ``np.nanargmax`` | Find index of maximum value || ``np.median`` | ``np.nanmedian`` | Compute median of elements || ``np.percentile`` | ``np.nanpercentile``| Compute rank-based statistics of elements || ``np.any`` | N/A | Evaluate whether any elements are true || ``np.all`` | N/A | Evaluate whether all elements are true |We will see these aggregates often throughout the rest of the book. Comparison Operators as ufuncsWe introduced ufuncs, and focused in particular on arithmetic operators. We saw that using ``+``, ``-``, ``*``, ``/``, and others on arrays leads to element-wise operations.NumPy also implements comparison operators such as ```` (greater than) as element-wise ufuncs.The result of these comparison operators is always an array with a Boolean data type.All six of the standard comparison operations are available:x = np.array([1, 2, 3, 4, 5]) x < 3 # less than x > 3 # greater than x <= 3 # less than or equal x >= 3 # greater than or equal x != 3 # not equal x == 3 # equalIt is also possible to do an element-wise comparison of two arrays, and to include compound expressions:(2 * x) == (x ** 2)As in the case of arithmetic operators, the comparison operators are implemented as ufuncs in NumPy; for example, when you write ``x < 3``, internally NumPy uses ``np.less(x, 3)``. A summary of the comparison operators and their equivalent ufunc is shown here: | Operator | Equivalent ufunc |-| Operator | Equivalent ufunc ||---------------|---------------------|---|---------------|---------------------||``==`` |``np.equal`` | -|``!=`` |``np.not_equal`` ||``<`` |``np.less`` | -|``<=`` |``np.less_equal`` ||``>`` |``np.greater`` | -|``>=`` |``np.greater_equal`` | Using comparison operators to subset an array:x[x > 3]Fast Sorting in NumPy: ``np.sort`` and ``np.argsort``Although Python has built-in ``sort`` and ``sorted`` functions to work with lists, we won't discuss them here because NumPy's ``np.sort`` function turns out to be much more efficient and useful for our purposes.By default ``np.sort`` uses an $\mathcal{O}[N\log N]$, *quicksort* algorithm, though *mergesort* and *heapsort* are also available. For most applications, the default quicksort is more than sufficient.To return a sorted version of the array without modifying the input, you can use ``np.sort``:x = np.array([2, 1, 4, 3, 5]) np.sort(x)If you prefer to sort the array in-place, you can instead use the ``sort`` method of arrays:x.sort() print(x)[1 2 3 4 5]A related function is ``argsort``, which instead returns the *indices* of the sorted elements:x = np.array([2, 1, 4, 3, 5]) i = np.argsort(x) print(i)[1 0 3 2 4]The first element of this result gives the index of the smallest element, the second value gives the index of the second smallest, and so on.These indices can then be used (via fancy indexing) to construct the sorted array if desired:x[i]Sorting along rows or columns A useful feature of NumPy's sorting algorithms is the ability to sort along specific rows or columns of a multidimensional array using the ``axis`` argument. For example:rand = np.random.RandomState(42) X = rand.randint(0, 10, (4, 6)) print(X) # sort each column of X np.sort(X, axis=0) # sort each row of X np.sort(X, axis=1)问题描述: 您将得到一个整数数组prices,其中i-th元素是给定股票在当天的价格i; 非负整数fee代表交易费用。 您可以根据需要完成任意数量的交易,但是您需要为每笔交易支付交易费。 您一次只能购买不超过1股的股票(即必须先出售股票才能再次购买。) 求:返回您可以赚取的最大利润。例题1: Input: prices = [1, 3, 2, 8, 4, 9], fee = 2 Output: 8 Explanation: The maximum profit can be achieved by: Buying at prices[0] = 1 Selling at prices[3] = 8 Buying at prices[4] = 4 Selling at prices[5] = 9 The total profit is ((8 - 1) - 2) + ((9 - 4) - 2) = 8.class Solution: def maxProfit(self, prices, fee: int) -> int: # 动态规划的方法 hold = -float('inf')/2 sold = 0 for price in prices: print(hold, sold, price, 'before') # 选择买进或者不买进 hold = max(hold, sold - price) # sold, 是现在手中的前, sold-price=剩余的钱 # 选择卖出或不卖出 sold = max(sold, hold + price - fee) print(hold, sold, price, 'after') return sold prices_ = [100, 3, 2, 8, 4, 9] fee_ = 2 solution = Solution() solution.maxProfit(prices_, fee_)-inf 0 100 before -100 0 100 after -100 0 3 before -3 0 3 after -3 0 2 before -2 0 2 after -2 0 8 before -2 4 8 after -2 4 4 before 0 4 4 after 0 4 9 before 0 7 9 after动态规划的特点,每个值都有选或不选两个选择class Solution: def maxProfit(self, prices, fee: int) -> int: # 动态规划的方法,对于每一天,都可以选则买进、或者卖出 pass prices_ = [100, 3, 2, 8, 4, 9] fee_ = 2 solution = Solution() solution.maxProfit(prices_, fee_)*mocalum* tutorial 4 Monte-Carlo simulation for single-Doppler configuration A notebook by IntroductionIn this section we will calculate wind speed uncertainty of a single-Doppler setup by means of Monte-Carlo simulations. We will consider a sector-scanning lidar performing a PPI scan. The size of the PPI scan and wind direction we will vary. Nevertheless, the range, and thus elevation angle of the PPI scan, as well the central azimuth angle will be kept fixed.We will consider two cases in which the uncertainties are correlated or uncorrelated from one to another line of sight (LOS) of the PPI scan respectively. The obtained results will be collected and presented as plots which will show dependency of the wind speed uncertainty with respect to the size of the scanned sector, wind direction and correlation coefficient. The range, and thus elevation angle of the PPI scan will be kept fixed. If you want directly to explore earlier derives results of the Monte-Carlo simulations first import required libraries in the cell below and then go to the notebook section [Results of Monte Carlo simulations](monte_carlo_results).import numpy as np import matplotlib.pyplot as plt import xarray as xr import mocalum as mc from mocalum_tutorial import plot_sd_scan_setup, plot_bbox, plot_ffield, average_sonic # setup of fontsize for plots SMALL_SIZE = 12 MEDIUM_SIZE = 14 BIGGER_SIZE = 16 plt.rc('font', size=MEDIUM_SIZE) # controls default text sizes plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels plt.rc('xtick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels plt.rc('ytick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels plt.rc('legend', fontsize=MEDIUM_SIZE) # legend fontsize plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title mc_IVAP = mc.Mocalum()Case 1: Uncorrelated uncertaintiesLets consider a lidar which we will call 'koshava', located at position of (0,0,0) having following values for uncertainty contributors (standard uncertainty):- estimation uncertainty of radial velocity of 0.1 m/s - ranging uncertainty of 5 m- azimuth uncertainty of 0.1 deg- elevation uncertainty of 0.1 degWe will consider that uncertainty values are not correlated from one LOS to another, thus correlation coeficient will be equal to 0:lidar_pos = np.array([0,0,0]) unc_cfg = {'unc_az' : 0.1, 'unc_el' : 0.1, 'unc_rng' : 5, 'unc_est' : 0.1, 'corr_coef':0} mc_IVAP.add_lidar('koshava', lidar_pos, unc_cfg)Now we will prepare bundle up `mocalum` configurations, which will be execute in `for loop`. For simplicity, we will omit kinematic limits in these configurations. We will generate `turbulent` flow field. To calculate the uncertainty of the reconstructed wind speed we need first to calculate the difference between the actual and reconstructed wind speed. Afterwards we compute the mean difference (which represent the systematic part of the wind speed uncertainty) and standard deviation of the difference (which represents the random part of the wind speed uncertainty).If we have generated `uniform` flow field this is rather straightforward since the actual wind speed does not change with time. On the other hand, in case of `turbulent` flow fields the wind speed varies with time, thus we need to extract wind speed at specific time instances. `mocalum` provides method called `generate_virtual_sonic` to perform this type of work. This method takes following parameters: - `meas_pts` : Measurement points position as (n,3) shaped numpy array - `time_steps` : Numpy array of time instances at which sonic is 'measuring' > **mocalum note**: Beware that `meas_pts` must contain points which are within the bounding box of lidar measurement points. Otherwise the method `generate_virtual_sonic` will return `NaNs`. In this tutorial we will set `meas_pts` to the central point of the PPI scan, while `time_steps` will be take values of `data.probing[lidar_id].time`. The result of this method is an `xarray` dataset `data.sonic_wind` containing the wind vector values at given point(s) and given time instances. Since we are going to average PPI scans prior the wind speed reconstruction a function `average_sonic` is provided, which takes the following input parameters: - `ds` : `mocalum` sonic_wind xr dataset - `no_samples` : number of samples in averaging block and return the averaged sonic_wind xr dataset. We will have to be careful to match `no_samples`, such that the averaged sonic_wind dataset has length matching to the lenght of the reconstructed wind dataset. Before we bundle this all together lets remind ourselves what are the steps in configuration:meas_range = 1000 # in m meas_height = 100 # in m sector_size = 30 # in deg wind_from_direction = 45 # in deg avg_period = 600 # in seconds PPI_cfg = { 'no_scans' : 2*3*5*7*9*100, # to be able to average for various combination of sector size 'range' : 1000, 'meas_height' : meas_height, 'elevation' : np.degrees(np.arcsin(meas_height / meas_range)), # to assure measurements at 100 m agl 'angular_step' : 1, # degreee 'acq_time' : 1, # s 'azimuth_mid' : 90, # central azimuth angle 'sector_size' : sector_size, # degree } # calculating number of scans and sonic samples in averaging block lidar_scans_per_blck = int(avg_period/(sector_size/(PPI_cfg['angular_step'] / PPI_cfg['acq_time']))) sonic_samples_per_blck = int(avg_period/PPI_cfg['acq_time']) atmo_cfg={'wind_speed':10, 'upward_velocity':0, 'wind_from_direction': wind_from_direction, 'reference_height':100, # we set the reference height same as the scanned height 'shear_exponent':0.2} mc_IVAP.generate_PPI_scan('koshava', PPI_cfg) mc_IVAP.generate_uncertainties('koshava') mc_IVAP.generate_flow_field('koshava', atmo_cfg, 'turbulent') mc_IVAP.project_to_los('koshava') # virtual sonic setup/generation time_steps = mc_IVAP.data.probing['koshava'].time.values sonic_pos = np.array([[1000,0,meas_height]]) mc_IVAP.generate_virtual_sonic(sonic_pos, time_steps) # average and reconstruct data mc_IVAP.reconstruct_wind('koshava', 'IVAP', lidar_scans_per_blck) avg_sonic_data = average_sonic(mc_IVAP.data.sonic_wind, sonic_samples_per_blck) # calculate difference: diff_ds = avg_sonic_data - mc_IVAP.data.rc_wind # get standard uncertainty std_diff_ws = diff_ds.ws.std() # standard uncertainty of horizontal wind speed std_diff_wdir = diff_ds.wdir.std() # standard uncertainty of wind direction # get mean difference uncertainty mean_diff_ws = diff_ds.ws.mean() # standard uncertainty of horizontal wind speed mean_diff_wdir = diff_ds.wdir.mean() # standard uncertainty of wind direction counts, bins = np.histogram(mc_IVAP.data.probing['koshava'].unc_az.values) mc_IVAP.data.probing['koshava'].unc_azLet's now setup loop that will perform Monte Carlo simulations.We will loop over sector size starting at 10 deg finishing at 90, and run simulations for two wind direciton 0 deg (wind perpendicular to the PPI scan) and 90 deg (wind aligned with the PPI scan).To know how far we are in simulations we will import package `tqdm` which will display the current progress.The end result of the simulation will be provided as `xarray` dataset, which will be exported as a [NetCDF](https://en.wikipedia.org/wiki/NetCDF) file for future reuse.from tqdm.notebook import trange, tqdm meas_range = 1000 # in m meas_height = 100 # in m avg_period = 600 # in seconds sim_ssize = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100] sim_wdir = [0,90] uncertainty = np.empty((len(sim_ssize), len(sim_wdir),2,2)) for i,sector in enumerate(tqdm(sim_ssize, desc ="Sector size loop")): sector_size = sector # in deg PPI_cfg = { 'no_scans' : 2*3*5*7*9*100, # to be able to average for various combination of sector size 'range' : 1000, 'meas_height' : meas_height, 'elevation' : np.degrees(np.arcsin(meas_height / meas_range)), # to assure measurements at 100 m agl 'angular_step' : 1, # degreee 'acq_time' : 1, # s 'azimuth_mid' : 90, # central azimuth angle 'sector_size' : sector_size, # degree } # calculating number of scans and sonic samples in averaging block lidar_scans_per_blck = int(avg_period/(sector_size/(PPI_cfg['angular_step'] / PPI_cfg['acq_time']))) sonic_samples_per_blck = int(avg_period/PPI_cfg['acq_time']) for j,wind_from_direction in enumerate(tqdm(sim_wdir, desc ="Wind direction loop", leave = False)): atmo_cfg={'wind_speed':10, 'upward_velocity':0, 'wind_from_direction': wind_from_direction, 'reference_height':100, # we set the reference height same as the scanned height 'shear_exponent':0.2} mc_IVAP.generate_PPI_scan('koshava', PPI_cfg) mc_IVAP.generate_uncertainties('koshava') mc_IVAP.generate_flow_field('koshava', atmo_cfg, 'turbulent') mc_IVAP.project_to_los('koshava') # virtual sonic setup/generation time_steps = mc_IVAP.data.probing['koshava'].time.values sonic_pos = np.array([[1000,0,meas_height]]) mc_IVAP.generate_virtual_sonic(sonic_pos, time_steps) # average and reconstruct data mc_IVAP.reconstruct_wind('koshava', 'IVAP', lidar_scans_per_blck) avg_sonic_data = average_sonic(mc_IVAP.data.sonic_wind, sonic_samples_per_blck) # calculate difference: diff_ds = avg_sonic_data - mc_IVAP.data.rc_wind # get random and systematich part of uncertainty uncertainty[i,j,0] = np.array([diff_ds.ws.mean(), diff_ds.ws.std()]) uncertainty[i,j,1] = np.array([diff_ds.wdir.mean(), diff_ds.wdir.std()]) ds_unc_uncorr = xr.Dataset({'unc_ws': (['corr_coef', 'sector_size', 'wind_direction', 'statistics'], np.array([uncertainty[:,:,0]])), 'unc_wdir': (['corr_coef', 'sector_size', 'wind_direction', 'statistics'], np.array([uncertainty[:,:,1]]))}, coords={'corr_coef': [0], 'sector_size': sim_ssize, 'wind_direction': sim_wdir, 'statistics': ['mean', 'std']})Case 2: Correlated uncertaintiesThe only difference comparing to the first case is setting up `corr_coef` to 1.lidar_pos = np.array([0,0,0]) unc_cfg = {'unc_az' : 0.1, 'unc_el' : 0.1, 'unc_rng' : 5, 'unc_est' : 0.1, 'corr_coef':1} mc_IVAP.add_lidar('koshava', lidar_pos, unc_cfg) meas_range = 1000 # in m meas_height = 100 # in m avg_period = 600 # in seconds sim_ssize = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100] sim_wdir = [0,90] uncertainty = np.empty((len(sim_ssize), len(sim_wdir),2,2)) for i,sector in enumerate(tqdm(sim_ssize, desc ="Sector size loop")): sector_size = sector # in deg PPI_cfg = { 'no_scans' : 2*3*5*7*9*100, # to be able to average for various combination of sector size 'range' : 1000, 'meas_height' : meas_height, 'elevation' : np.degrees(np.arcsin(meas_height / meas_range)), # to assure measurements at 100 m agl 'angular_step' : 1, # degreee 'acq_time' : 1, # s 'azimuth_mid' : 90, # central azimuth angle 'sector_size' : sector_size, # degree } # calculating number of scans and sonic samples in averaging block lidar_scans_per_blck = int(avg_period/(sector_size/(PPI_cfg['angular_step'] / PPI_cfg['acq_time']))) sonic_samples_per_blck = int(avg_period/PPI_cfg['acq_time']) for j,wind_from_direction in enumerate(tqdm(sim_wdir, desc ="Wind direction loop", leave = False)): atmo_cfg={'wind_speed':10, 'upward_velocity':0, 'wind_from_direction': wind_from_direction, 'reference_height':100, # we set the reference height same as the scanned height 'shear_exponent':0.2} mc_IVAP.generate_PPI_scan('koshava', PPI_cfg) mc_IVAP.generate_uncertainties('koshava') mc_IVAP.generate_flow_field('koshava', atmo_cfg, 'turbulent') mc_IVAP.project_to_los('koshava') # virtual sonic setup/generation time_steps = mc_IVAP.data.probing['koshava'].time.values sonic_pos = np.array([[1000,0,meas_height]]) mc_IVAP.generate_virtual_sonic(sonic_pos, time_steps) # average and reconstruct data mc_IVAP.reconstruct_wind('koshava', 'IVAP', lidar_scans_per_blck) avg_sonic_data = average_sonic(mc_IVAP.data.sonic_wind, sonic_samples_per_blck) # calculate difference: diff_ds = avg_sonic_data - mc_IVAP.data.rc_wind # get random and systematich part of uncertainty uncertainty[i,j,0] = np.array([diff_ds.ws.mean(), diff_ds.ws.std()]) uncertainty[i,j,1] = np.array([diff_ds.wdir.mean(), diff_ds.wdir.std()]) ds_unc_corr = xr.Dataset({'unc_ws': (['corr_coef', 'sector_size', 'wind_direction', 'statistics'], np.array([uncertainty[:,:,0]])), 'unc_wdir': (['corr_coef', 'sector_size', 'wind_direction', 'statistics'], np.array([uncertainty[:,:,1]]))}, coords={'corr_coef': [1], 'sector_size': sim_ssize, 'wind_direction': sim_wdir, 'statistics': ['mean', 'std']}) ds_IVAP_unc = ds_unc_corr.merge(ds_unc_uncorr) ds_IVAP_unc.to_netcdf('./assets/IVAP_uncertainty.nc')Results of Monte Carlo simulationsYou can either analyze/plot your own results or you can directly load results from the previous Monte Carlo simulations which are located in the subdirectory `assets`. If you decide to load the results from the previous simulation execture the cell below, otherwise simply skip it and continue to the next cell after it.ds_IVAP_unc = xr.open_dataset('./assets/IVAP_uncertainty.nc') fig, ax = plt.subplots(figsize=(7.5, 7.5)) plt.grid() ds_IVAP_unc.unc_ws.sel(wind_direction=90,statistics='std', corr_coef=0).plot(ax=ax, label='corr coeficient = 0') ds_IVAP_unc.unc_ws.sel(wind_direction=90,statistics='std', corr_coef=1).plot(ax=ax, label='corr coeficient = 1') plt.xlabel('Sector size [deg]') plt.ylabel('Standard deviation of difference [m/s]') plt.title('Wind aligned with the scanned sector (wind dir = 90 deg)') plt.legend(loc="upper left") plt.show() fig.savefig('./assets/std_diff_wind_speed_dir_90.png') fig, ax = plt.subplots(figsize=(7.5, 7.5)) plt.grid() ds_IVAP_unc.unc_ws.sel(wind_direction=0,statistics='std', corr_coef=0).plot(ax=ax, label='corr coeficient = 0') ds_IVAP_unc.unc_ws.sel(wind_direction=0,statistics='std', corr_coef=1).plot(ax=ax, label='corr coeficient = 1') plt.xlabel('Sector size [deg]') plt.ylabel('Standard deviation of difference [m/s]') plt.title('Wind orthogonal to the scanned sector (wind dir = 0 deg)') plt.legend(loc="upper left") plt.show() fig.savefig('./assets/std_diff_wind_speed_dir_0.png') fig, ax = plt.subplots(figsize=(7.5, 7.5)) plt.grid() ds_IVAP_unc.unc_ws.sel(wind_direction=90,statistics='mean', corr_coef=0).plot(ax=ax, label='corr coeficient = 0') ds_IVAP_unc.unc_ws.sel(wind_direction=90,statistics='mean', corr_coef=1).plot(ax=ax, label='corr coeficient = 1') plt.xlabel('Sector size [deg]') plt.ylabel('Mean difference [m/s]') plt.title('Wind aligned with the scanned sector (wind dir = 90 deg)') plt.legend(loc="upper left") plt.show() fig.savefig('./assets/mean_diff_wind_speed_dir_90.png') fig, ax = plt.subplots(figsize=(7.5, 7.5)) plt.grid() ds_IVAP_unc.unc_ws.sel(wind_direction=0,statistics='mean', corr_coef=0).plot(ax=ax, label='corr coeficient = 0') ds_IVAP_unc.unc_ws.sel(wind_direction=0,statistics='mean', corr_coef=1).plot(ax=ax, label='corr coeficient = 1') plt.xlabel('Sector size [deg]') plt.ylabel('Mean difference [m/s]') plt.title('Wind orthogonal to the scanned sector (wind dir = 0 deg)') plt.legend(loc="upper left") plt.show() fig.savefig('./assets/mean_diff_wind_speed_dir_0.png')Keras for Text Classification**Learning Objectives**1. Learn how to tokenize and integerize a corpus of text for training in Keras1. Learn how to do one-hot-encodings in Keras1. Learn how to use embedding layers to represent words in Keras1. Learn about the bag-of-word representation for sentences1. Learn how to use DNN/CNN/RNN model to classify text in keras IntroductionIn this notebook, we will implement text models to recognize the probable source (Github, Tech-Crunch, or The New-York Times) of the titles we have in the title dataset we constructed in the previous lab.In a first step, we will load and pre-process the texts and labels so that they are suitable to be fed to a Keras model. For the texts of the titles we will learn how to split them into a list of tokens, and then how to map each token to an integer using the Keras Tokenizer class. What will be fed to our Keras models will be batches of padded list of integers representing the text. For the labels, we will learn how to one-hot-encode each of the 3 classes into a 3 dimensional basis vector.Then we will explore a few possible models to do the title classification. All models will be fed padded list of integers, and all models will start with a Keras Embedding layer that transforms the integer representing the words into dense vectors.The first model will be a simple bag-of-word DNN model that averages up the word vectors and feeds the tensor that results to further dense layers. Doing so means that we forget the word order (and hence that we consider sentences as a “bag-of-words”). In the second and in the third model we will keep the information about the word order using a simple RNN and a simple CNN allowing us to achieve the same performance as with the DNN model but in much fewer epochs.# Ensure the right version of Tensorflow is installed. !pip freeze | grep tensorflow==2.0 || pip install tensorflow==2.0 import os import shutil import pandas as pd import tensorflow as tf from tensorflow.keras.callbacks import TensorBoard, EarlyStopping from tensorflow.keras.layers import ( Embedding, Flatten, GRU, Conv1D, Lambda, Dense, ) from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.utils import to_categorical print(tf.__version__) %matplotlib inlineLet's start by specifying where the information about the trained models will be saved as well as where our dataset is located:LOGDIR = "./text_models" DATA_DIR = "./data"Loading the dataset Our dataset consists of titles of articles along with the label indicating from which source these articles have been taken from (GitHub, Tech-Crunch, or the New-York Times).DATASET_NAME = "titles_full.csv" TITLE_SAMPLE_PATH = os.path.join(DATA_DIR, DATASET_NAME) COLUMNS = ['title', 'source'] titles_df = pd.read_csv(TITLE_SAMPLE_PATH, header=None, names=COLUMNS) titles_df.head()Integerize the texts The first thing we need to do is to find how many words we have in our dataset (`VOCAB_SIZE`), how many titles we have (`DATASET_SIZE`), and what the maximum length of the titles we have (`MAX_LEN`) is. Keras offers the `Tokenizer` class in its `keras.preprocessing.text` module to help us with that:tokenizer = Tokenizer() tokenizer.fit_on_texts(titles_df.title) integerized_titles = tokenizer.texts_to_sequences(titles_df.title) integerized_titles[:3] VOCAB_SIZE = len(tokenizer.index_word) VOCAB_SIZE DATASET_SIZE = tokenizer.document_count DATASET_SIZE MAX_LEN = max(len(sequence) for sequence in integerized_titles) MAX_LENLet's now implement a function `create_sequence` that will * take as input our titles as well as the maximum sentence length and * returns a list of the integers corresponding to our tokens padded to the sentence maximum lengthKeras has the helper functions `pad_sequence` for that on the top of the tokenizer methods.# TODO 1 def create_sequences(texts, max_len=MAX_LEN): sequences = tokenizer.texts_to_sequences(texts) padded_sequences = pad_sequences(sequences, max_len, padding='post') return padded_sequences sequences = create_sequences(titles_df.title[:3]) sequences titles_df.source[:4]We now need to write a function that * takes a title source and* returns the corresponding one-hot encoded vectorKeras `to_categorical` is handy for that.CLASSES = { 'github': 0, 'nytimes': 1, 'techcrunch': 2 } N_CLASSES = len(CLASSES) # TODO 2 def encode_labels(sources): classes = [CLASSES[source] for source in sources] one_hots = to_categorical(classes) return one_hots encode_labels(titles_df.source[:4])Preparing the train/test splits Let's split our data into train and test splits:N_TRAIN = int(DATASET_SIZE * 0.80) titles_train, sources_train = ( titles_df.title[:N_TRAIN], titles_df.source[:N_TRAIN]) titles_valid, sources_valid = ( titles_df.title[N_TRAIN:], titles_df.source[N_TRAIN:])To be on the safe side, we verify that the train and test splitshave roughly the same number of examples per classes.Since it is the case, accuracy will be a good metric to use to measurethe performance of our models.sources_train.value_counts() sources_valid.value_counts()Using `create_sequence` and `encode_labels`, we can now prepare thetraining and validation data to feed our models.The features will bepadded list of integers and the labels will be one-hot-encoded 3D vectors.X_train, Y_train = create_sequences(titles_train), encode_labels(sources_train) X_valid, Y_valid = create_sequences(titles_valid), encode_labels(sources_valid) X_train[:3] Y_train[:3]Building a DNN model The build_dnn_model function below returns a compiled Keras model that implements a simple embedding layer transforming the word integers into dense vectors, followed by a Dense softmax layer that returns the probabilities for each class.Note that we need to put a custom Keras Lambda layer in between the Embedding layer and the Dense softmax layer to do an average of the word vectors returned by the embedding layer. This is the average that's fed to the dense softmax layer. By doing so, we create a model that is simple but that loses information about the word order, creating a model that sees sentences as "bag-of-words".def build_dnn_model(embed_dim): model = Sequential([ Embedding(VOCAB_SIZE + 1, embed_dim, input_shape=[MAX_LEN]), # TODO 3 Lambda(lambda x: tf.reduce_mean(x, axis=1)), # TODO 4 Dense(N_CLASSES, activation='softmax') # TODO 5 ]) model.compile( optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'] ) return modelBelow we train the model on 100 epochs but adding an `EarlyStopping` callback that will stop the training as soon as the validation loss has not improved after a number of steps specified by `PATIENCE` . Note that we also give the `model.fit` method a Tensorboard callback so that we can later compare all the models using TensorBoard.%%time tf.random.set_seed(33) MODEL_DIR = os.path.join(LOGDIR, 'dnn') shutil.rmtree(MODEL_DIR, ignore_errors=True) BATCH_SIZE = 300 EPOCHS = 100 EMBED_DIM = 10 PATIENCE = 0 dnn_model = build_dnn_model(embed_dim=EMBED_DIM) dnn_history = dnn_model.fit( X_train, Y_train, epochs=EPOCHS, batch_size=BATCH_SIZE, validation_data=(X_valid, Y_valid), callbacks=[EarlyStopping(patience=PATIENCE), TensorBoard(MODEL_DIR)], ) pd.DataFrame(dnn_history.history)[['loss', 'val_loss']].plot() pd.DataFrame(dnn_history.history)[['accuracy', 'val_accuracy']].plot() dnn_model.summary()Building a RNN model The `build_dnn_model` function below returns a compiled Keras model that implements a simple RNN model with a single `GRU` layer, which now takes into account the word order in the sentence.The first and last layers are the same as for the simple DNN model.Note that we set `mask_zero=True` in the `Embedding` layer so that the padded words (represented by a zero) are ignored by this and the subsequent layers.def build_rnn_model(embed_dim, units): model = Sequential([ Embedding(VOCAB_SIZE + 1, embed_dim, input_shape=[MAX_LEN], mask_zero=True), # TODO 3 GRU(units), # TODO 5 Dense(N_CLASSES, activation='softmax') ]) model.compile( optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'] ) return modelLet's train the model with early stoping as above. Observe that we obtain the same type of accuracy as with the DNN model, but in less epochs (~3 v.s. ~20 epochs):%%time tf.random.set_seed(33) MODEL_DIR = os.path.join(LOGDIR, 'rnn') shutil.rmtree(MODEL_DIR, ignore_errors=True) EPOCHS = 100 BATCH_SIZE = 300 EMBED_DIM = 10 UNITS = 16 PATIENCE = 0 rnn_model = build_rnn_model(embed_dim=EMBED_DIM, units=UNITS) history = rnn_model.fit( X_train, Y_train, epochs=EPOCHS, batch_size=BATCH_SIZE, validation_data=(X_valid, Y_valid), callbacks=[EarlyStopping(patience=PATIENCE), TensorBoard(MODEL_DIR)], ) pd.DataFrame(history.history)[['loss', 'val_loss']].plot() pd.DataFrame(history.history)[['accuracy', 'val_accuracy']].plot() rnn_model.summary()Build a CNN model The `build_dnn_model` function below returns a compiled Keras model that implements a simple CNN model with a single `Conv1D` layer, which now takes into account the word order in the sentence.The first and last layers are the same as for the simple DNN model, but we need to add a `Flatten` layer betwen the convolution and the softmax layer.Note that we set `mask_zero=True` in the `Embedding` layer so that the padded words (represented by a zero) are ignored by this and the subsequent layers.def build_cnn_model(embed_dim, filters, ksize, strides): model = Sequential([ Embedding(VOCAB_SIZE + 1, embed_dim, input_shape=[MAX_LEN], mask_zero=True), # TODO 3 Conv1D( # TODO 5 filters=filters, kernel_size=ksize, strides=strides, activation='relu', ), Flatten(), # TODO 5 Dense(N_CLASSES, activation='softmax') ]) model.compile( optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'] ) return modelLet's train the model. Again we observe that we get the same kind of accuracy as with the DNN model but in many fewer steps.%%time tf.random.set_seed(33) MODEL_DIR = os.path.join(LOGDIR, 'cnn') shutil.rmtree(MODEL_DIR, ignore_errors=True) EPOCHS = 100 BATCH_SIZE = 300 EMBED_DIM = 5 FILTERS = 200 STRIDES = 2 KSIZE = 3 PATIENCE = 0 cnn_model = build_cnn_model( embed_dim=EMBED_DIM, filters=FILTERS, strides=STRIDES, ksize=KSIZE, ) cnn_history = cnn_model.fit( X_train, Y_train, epochs=EPOCHS, batch_size=BATCH_SIZE, validation_data=(X_valid, Y_valid), callbacks=[EarlyStopping(patience=PATIENCE), TensorBoard(MODEL_DIR)], ) pd.DataFrame(cnn_history.history)[['loss', 'val_loss']].plot() pd.DataFrame(cnn_history.history)[['accuracy', 'val_accuracy']].plot() cnn_model.summary()Comparing the models At last, let's compare all the models we have trained at once using TensorBoard in orderto choose the one that overfits the less for the same performance level.Running the following command will launch TensorBoard on port 6006. This willblock the notebook execution, so you'll have to interrupt that cell first beforeyou can run other cells.!tensorboard --logdir $LOGDIR --port 6006Train/Test split# Split the data into a train and test set #df_train, df_test = candy.iloc[:-250], candy.iloc[-250:] df_train, df_test = candy.iloc[:-200], candy.iloc[-200:] # Create an axis fig, ax = plt.subplots() # Plot the train and test setsa dn show df_train.plot(ax=ax) df_test.plot(ax=ax) #plt.axvline(x=4000, color='k', linestyle='--') ax.legend(["Train", "Test"]); plt.show()Univariate forecasting requires using the same sequence as features and target. Here is one choice where I do not use the test dataframe.**Features**: 0-100 time points**Target** : 100-200 time pointsX_train = df_train[-300:-200].to_numpy().reshape(-1,1) X_test = df_train[-200:-100].to_numpy().reshape(-1,1) y_train = df_train[-200:-100].to_numpy().ravel() y_test = df_train[-100:].to_numpy().ravel() #X_train = df_train[-200:-100].to_numpy().reshape(-1,1) #X_test = df_test[-100:-50].to_numpy().reshape(-1,1) #y_train = df_train[-200:-100].to_numpy().ravel() #y_test = df_test[-100:-50].to_numpy().ravel()Baseline 1. Last valuey_base1 = np.copy(y_test) y_base1[:] = y_test[0] print_scores(y_test, y_base1)R2 score: -0.44501992071288243 MSE score: 228.26728138975366 MAE score: 12.076231907505443 Median AE score: 9.37603926045103 MAPE score: 10.114830034905106 SMAPE score: 5.3567332895786642. Rolling meancandy_rolling_mean = candy.rolling(20).mean() y_base2 = candy_rolling_mean.iloc[-100:].to_numpy() print_scores(y_test, y_base2)R2 score: -0.298747826933458 MSE score: 205.1609333653304 MAE score: 10.642681908653898 Median AE score: 7.633135193490908 MAPE score: 10.620370484183471 SMAPE score: 5.58266486675679Pipelinepipe_lasso = Pipeline([ #('scale', MinMaxScaler(feature_range=(0, 1))), ('scale', StandardScaler()), ('lasso', Lasso()) ]) pipe_rf = Pipeline([ #('scale', MinMaxScaler(feature_range=(0, 1))), ('scale', StandardScaler()), ('rf', RandomForestRegressor(random_state=42)) ]) pipe_xgb = Pipeline([ #('scale', MinMaxScaler(feature_range=(0, 1))), ('scale', StandardScaler()), ('xgb', xgb.XGBRegressor()) ]) params_lasso = {'lasso__alpha': np.logspace(-4,1,10)} params_rf = { #'rf__criterion': ['mae', 'mse'], 'rf__max_depth': [2, 4, 8], #'rf__max_features': ['auto', 'sqrt', None], 'rf__min_samples_leaf': [2, 4, 8], 'rf__n_estimators': [8, 16, 32, 64] } params_xgb = { 'xgb__learning_rate': [0.001,0.01,0.1,0.25], 'xgb__max_depth': [4,8,16], 'xgb__subsample': [0.6,0.8], 'xgb__colsample_bytree': [0.4,0.6,0.8], 'xgb__n_estimators': [32,64,128], 'xgb__seed': [1337] }GridSearchCV LASSOmy_cv = TimeSeriesSplit(n_splits=2).split(X_train) gs_lasso = GridSearchCV(pipe_lasso, param_grid=params_lasso, scoring='neg_mean_absolute_error', cv=my_cv, n_jobs=4) # Fit to the training set gs_lasso.fit(X_train,y_train) # Predict y_pred_lasso = gs_lasso.predict(X_test) # Compute and print the metrics print(f"Tuned Lasso Alpha: {gs_lasso.best_params_}") print(f"Tuned Lasso Score: {gs_lasso.score(X_test, y_test)}") print_scores(y_test, y_pred_lasso)R2 score: -1.6596253138861394 MSE score: 420.13638096881834 MAE score: 17.29391429915223 Median AE score: 16.92541778766853 MAPE score: 14.43984346357555 SMAPE score: 7.970033459916509Random Forestsmy_cv = TimeSeriesSplit(n_splits=2).split(X_train) gs_rf = GridSearchCV(estimator=pipe_rf, param_grid=params_rf, scoring='neg_mean_absolute_error', cv=my_cv, n_jobs=4) # Fit to the training set gs_rf.fit(X_train,y_train) # Predict y_pred_rf = gs_rf.predict(X_test) # Compute and print the metrics print(f"Tuned RF params: {gs_rf.best_params_}") print(f"Tuned RF Score: {gs_rf.score(X_test, y_test)}") print_scores(y_test, y_pred_rf)R2 score: -2.4912056251799735 MSE score: 551.4996751320172 MAE score: 19.740117799280988 Median AE score: 17.706203309761726 MAPE score: 16.456281090024376 SMAPE score: 9.229567931690722XGBoostmy_cv = TimeSeriesSplit(n_splits=2).split(X_train) #xgb.XGBRegressor() gs_xgb = GridSearchCV(estimator=pipe_xgb, param_grid=params_xgb, scoring='neg_mean_absolute_error', cv=my_cv, n_jobs=4) # Fit to the training set gs_xgb.fit(X_train,y_train) # Predict y_pred_xgb = gs_xgb.predict(X_test) # Compute and print the metrics print(f"Tuned XGB params: {gs_xgb.best_params_}") print(f"Tuned XGB Score: {gs_xgb.score(X_test, y_test)}") print_scores(y_test, y_pred_xgb)R2 score: -2.7632140988299243 MSE score: 594.4683802031687 MAE score: 20.49399834181531 Median AE score: 18.77203540674435 MAPE score: 17.096887003683783 SMAPE score: 9.6461764647038Plot `y_pred`y_pred_lasso.shape, y_pred_rf.shape, y_pred_xgb.shape #t_test = np.linspace(4000, 4000+tst_sz, tst_sz) plt.plot(y_pred_lasso, 'b:', label='Lasso', alpha=0.5) plt.plot(y_pred_rf, 'g--', label='Random Forest', alpha=0.7) plt.plot(y_pred_xgb, 'k', label='XGBoost') plt.plot(y_test, 'r', label='Actual', alpha=0.3) #plt.plot(y_base1, 'k-.', label='Base1', alpha=0.3) #plt.plot(y_base2, 'k-.', label='Base2', alpha=0.4) plt.legend()Introduction to Probabilistic Programming Probabilistic programming is a method for Bayesian statistical analysis. Bayesian methods, in contrast to Frequentist statistics, do not believe that the data represents the entire truth about the world. Instead, Bayesian methods are model based meaning there is a prior probability which is updated with more information. This is captured in the famous Bayes' Rule.$$P(Y | X) = \frac{P(X | Y) * P (Y)}{P(X)}$$Where * $P(Y | X)$: posterior probability of Y conditioned on the value of X - what we want to figure out, * $(P X | Y)$: likelihood - the observed data* $P(Y)$: prior - probability of Y given no observations, also known as the base rate* $P(X)$: marginal probability of X - total probability of X for all values of YIn the case where both X and Y are Bernoulli variables (can only take on True / False values), then the marginal probability of X is $$P(X) = P(X | Y) * P(Y) + P(X | \neg Y) * P(\neg Y)$$where $\neg$ means "not". I don't tend to get much value from equations, so let's plug in some numbers into an example of determining if a transaction is fraudulent considering only whether or not the amount of the transactions > $500.Here are the relevant pieces:$$P(\text{Fraudulent} | \text{Amount > \$500}) = \frac{P(\text{Amount > \$500} | \text{Fraudulent}) * P(\text{Fraudulent})}{P(\text{Amount > \$500})}$$* $P(\text{Fraudulent} | \text{Amount > \$500})$: unknown posterior- what we want to find out* $P(\text{Amount > \$500} | \text{Fraudulent}) = 0.6$: likelihood based on past observations* $P(\text{Fraudulent}) = 0.01)$: prior probability of a fraudulent transactionThe marginal probability of a transaction greater than \$500 is must take into account both the situation where the transaction is fraudulent and the situation where the transaction is not fraudulent. * $P(\text{Amount} > \$500) = P(\text{Amount > \$500} | \text{Fraudulent}) * P(\text{Fraudulent}) + P(\text{Amount > \$500} | \neg \text{Fraudulent}) * P(\neg \text{Fraudulent})$This means we need $P(\text{Amount > \$500} | \neg \text{Fraudulent})$ which also must be found from the historical data. Let's say this is 0.2 We now have all the pieces and can carry out the calculation. $$P(\text{Fraudulent} | \text{Amount > \$500}) = \frac{0.6 * 0.01}{(0.6 * 0.01) + (0.2 * 0.99)} = 0.0294 = 2.94\%$$ This is the _posterior_ probability of a fraudulent transaction _conditioned_ on the amount of the transaction greater than \$500. The model we created for fraud is extremely simple - there is only one cause for fraud, the amount of the transaction. What happens when we have multiple causes, each of which in turn may have their own cause? For this, we turn to Bayesian graphical models which allow us to show the cause and effect relationship between variables as well as the associated probabilities. Bayesian Graphical ModelThe model below is a slightly more complex simulation of fraud. Here, fraud has two causes, if the transaction is greater than \$500, and if the transaction was made online. For now, we are still using only Bernoulli (True / False) variables in our model. A variable that does not have any causes has a prior probability as indicated by the blue tables. Variables that are dependent on other variables have a conditional probability table in orange. The conditional probability table specifies the probability of the dependent variable for all the values of its _parent_ variables. ![](../images/fraud_model.png)Once we have the model, we can figure out the posterior probability of any variable given any combination of the other variables. For example, to find the marginal probability of an online transaction, we need to sum up the probability for each combination of the parent variables.$$P(Online) = P(Online | Location, Gender) * P(Location) * P(Gender) + P(Online| Location, \neg Gender) * P(Location) * P(\neg Gender) + P(Online | \neg Location, Gender) * P(\neg Location) * P(Gender) + P(Online | \neg Location, \neg Gender) * P(\neg Location) * P(\neg Gender)$$Plugging in the numbers we get: $P(Online) = 0.25 * 0.2 * 0.5 + 0.45 * 0.2 * 0.5 + 0.3 * 0.8 * 0.5 + 0.6 * 0.8 * 0.5 = 0.43$That's a lot of work! We can do a similar operation for a fraudulent transaction and compute the marginal probability:$$P(Fraud) = 0.45 * 0.1 * 0.43 + 0.2 * 0.1 * 0.57 + 0.1 * 0.9 * 0.43 + 0.05 * 0.9 * 0.57 = 0.10518$$Here we see that the probability of a fraudulent transaction knowing known of the other variables is 10.5%. We can also set variables to be fixed and then compute the probabilities. If we set the amount to be less than \$500, then the marginal probability of a fraudulent transaction is $$P(Fraud | Amount) = 0.1 * 1.0 * 0.43 + 0.05 * 1.0 * 0.57 = 0.07635$$The probability we compute has dropped because we have additional evidence. Computing these probabilities by hand is not very fun! We could make a computer program to solve the probabilities for us, which works when we have a limited number of discrete (can only take on a limited number of options) variables. So, for this example currently, we can solve this problem analytically - meaning exactly. However, when we have more variables that can take on any values - continuous variables as almost any quantity in real life is! - then we need an approximate method. The most popular approximate method is Markov Chain Monte Carlo, which draws samples from the posterior in order to approximate the posterior. We set up a probability model, and then have an algorithm sample (draw examples) thousands of times from the model according to the probabilities (prior and conditional) that we have defined. The crazy part is, this method works extremely well! The sampled posterior converges on the true posterior given enough samples. Moreover, we can set any variables as observed and find the probability of any other variable. We can set a transaction to be fraudulent, and then find the probability that the location is San Francisco. This is known as the inverse probability problem. That's enough explanation for now. Let's work through implementing a Bayesian graphical model in PyMC3 and then performing inference on the model. Bayesian Model in PyMC3PyMC3 is a Python library for performing Bayesian inference on graphical models. It includes a number of MCMC methods for sampling from the posterior including the most efficient, the No-U-Turn Sampler (NUTS). We can translate our above graphical model into code using the following.import numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline import pymc3 as pm import theano.tensor as tt N_SAMPLES = 1000 N_CHAINS = 3 with pm.Model() as model: # Bernoulli random variables with priors gender = pm.Bernoulli('gender', 0.5) location = pm.Bernoulli('location', 0.2) # Bernoulli deterministic variable with conditional probability online = pm.Bernoulli('online', tt.switch(tt.eq(location, 1), tt.switch(tt.eq(gender, 1), 0.25, 0.45), tt.switch(tt.eq(gender, 1), 0.3, 0.6))) # Bernoulli random variable with prior amount = pm.Bernoulli('amount', 0.1) # Bernoulli deterministic variable with conditional probability fraudulent = pm.Bernoulli('fraudulent', tt.switch(tt.eq(amount, 1), tt.switch(tt.eq(online, 1), 0.45, 0.2), tt.switch(tt.eq(online, 1), 0.1, 0.05))) pm.model_to_graphviz(model) model.vars model.test_point with model: trace = pm.sample() trace['fraudulent'].mean() trace['online'].mean() pm.plot_posterior(trace) with model: x = pm.model_graph pm.summary(trace) with model: pm.model_to_graphviz() with pm.Model() as model: # Bernoulli random variables with priors gender = pm.Bernoulli('gender', 0.5) location = pm.Bernoulli('location', 0.2) # Bernoulli deterministic variable with conditional probability online = pm.Bernoulli('online', tt.switch(tt.eq(location, 1), tt.switch(tt.eq(gender, 1), 0.25, 0.45), tt.switch(tt.eq(gender, 1), 0.3, 0.6))) # Bernoulli random variable with prior amount = pm.Normal('amount', mu = 300, sd = 100) # Bernoulli deterministic variable with conditional probability fraudulent = pm.Bernoulli('fraudulent', tt.switch(tt.eq(amount, 1), tt.switch(tt.eq(online, 1), 0.45, 0.2), tt.switch(tt.eq(online, 1), 0.1, 0.05))) with model: trace = pm.sample() pm.traceplot(trace, varnames=['amount']); import theano values = theano.shared(np.array([100, 150, 200, 250])) with pm.Model() as model: c = pm.Categorical('c', [0.1, 0.2, 0.3, 0.4]) value = values[c] value = pm.Deterministic('value_det', value) trace = pm.sample() trace['value_det'].mean() trace['c'].mean() trace['value_det'] value pm.autocorrplot(trace) trace.varnames model.free_RVs model.deterministics trace.get_sampler_stats('location') trace['fraudulent_det'].mean() model.unobserved_RVs pm.Deterministic( model.deterministics model.vars model.disc_vars def make_model(gender_obs=None, location_obs=None, online_obs=None, amount_obs=None, fraudulent_obs=None): """Make a model for the fraudulent transaction problem.""" with pm.Model() as model: # Bernoulli random variables with priors gender = pm.Bernoulli('gender', 0.5, observed = gender_obs) location = pm.Bernoulli('location', 0.2, observed = location_obs) # Bernoulli deterministic variable with conditional probability online = pm.Bernoulli('online', tt.switch(tt.eq(location, 1), tt.switch(tt.eq(gender, 1), 0.25, 0.45), tt.switch(tt.eq(gender, 1), 0.3, 0.6)), observed = online_obs) # Bernoulli random variable with prior amount = pm.Bernoulli('amount', 0.1, observed = amount_obs) # Bernoulli deterministic variable with conditional probability fraudulent = pm.Bernoulli('fraudulent', tt.switch(tt.eq(amount, 1), tt.switch(tt.eq(online, 1), 0.45, 0.2), tt.switch(tt.eq(online, 1), 0.1, 0.05)), observed = fraudulent_obs) return model def sample_model(model): """Sample from a pymc3 model""" with model: trace = pm.sample(draws=N_SAMPLES, chains=N_CHAINS) return trace fraud_model = make_model() fraud_trace = sample_model(fraud_model) fraud_trace['fraudulent'].mean() pm.traceplot(fraud_trace) pm.forestplot(fraud_trace) fraud_trace['gender'].mean() fraud_trace['online'].mean() fraud_model = make_model(amount_obs = 0) fraud_trace = sample_model(fraud_model) fraud_trace['fraudulent'].mean() pm.traceplot(fraud_trace) p_sanfran = fraud_trace['location'] p_sanfran.mean() p_female = fraud_trace['gender'] p_female.mean() p_fraud = fraud_trace['fraudulent'] p_fraud.mean()Bayesian Linear RegressionWe can also use Probabilistic Programming to perform Bayesian Linear RegressionIn ordinary least squares regression, the data generating process is:$$y = \beta * X + \epsilon$$In Bayesian Linear Regression, the data generating process is a distribution:$$ y \sim N(\mu, (\sigma) ^ 2)$$$$\mu = \alpha + \beta * X$$Where $\beta$ and X are matrices representing the weights and features respectively and $\alpha$ is the intercept.from pymc3 import GLM data = pd.read_csv('../data/bike-sharing/day.csv', parse_dates = ['dteday']).set_index('dteday') data['date'] = data.index data.head() plt.figure(figsize = (8, 6)) plt.plot(data['date'], data['cnt'], marker = '.'); data['day'] = (data.index - data.index[0]).days data.head() p = np.polyfit(data['day'], data['cnt'], 8) y = np.polyval(p, data['day']) plt.plot(y) plt.xticks(data.index) import seaborn as sns sns.regplot('date', 'cnt', data, fit_reg = False); formula = ('cnt ~ day + season + mnth + holiday + weekday + workingday + weathersit + atemp + windspeed') from pymc3 import GLM with pm.Model() as bike_model: GLM.from_formula(formula, data) start = pm.find_MAP() trace = pm.sample(tune=1000, njobs=-1, start = start) pm.traceplot(trace) def lm(value, sample): prediction = sample['Intercept'] for var in sample.keys(): prediction += sample[var] * value return prediction trace['season'] * data['season'][0] len(trace['season']) row.index trace.varnames estimates = [] days = [] for i, (day, row) in enumerate(data.iterrows()): estimate = trace['Intercept'] for var in row.index: if var in trace.varnames: estimate += trace[var] * row[var] estimates.append(estimate) days.append(day for _ in range(len(trace[var]))) print(f'{round(100 * (i / len(data)), 2)}% complete.', end = '\r') len(days) len(estimates) est = pd.DataFrame(estimates) est.index = data.index est.head() import matplotlib.pyplot as plt %matplotlib inline fig, ax = plt.subplots(figsize = (10, 10)) data['cnt'].plot(linewidth = 2, linestyle = '--', color = 'k', ax = ax); est.plot(legend = None, ax = ax); data['upper'] = np.percentile(est, 95, axis = 1) data['lower'] = np.percentile(est, 5, axis = 1) data.head() plt.figure(figsize = (10, 10)) plt.fill_between(x = data.index, y1=data['lower'], alpha = 10, y2 = data['upper'],step = 'mid') fig, ax = plt.subplots(figsize = (10, 10)) data['cnt'].plot(ax = ax, label = 'actual') data['lower'].plot(ax = ax, label = 'lower') data['upper'].plot(ax = ax, label = 'upper') plt.legend(); estimate.shape len(data) pm.plots.plot_posterior(trace, varnames = ['cnt']);Scalar results for stage cost l(xm,um) expansiondiff(l,x) diff(l,u) diff(l,y) diff(l,v) diff(l,x,x) diff(l,u,u) diff(l,y,y) diff(l,v,v) diff(l,x,u) diff(l,x,y) diff(l,x,v) diff(l,u,y) diff(l,u,v) diff(l,y,v)Copyright (c) 2017 Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Deep Learning From Basics to Practice by , https://dlbasics.com, http://glassner.com------ Chapter 25: Reinforcement Learning Notebook 2: Playing FlippersThis notebook is provided as a “behind-the-scenes” look at code used to make some of the figures in this chapter. It is still in the hacked-together form used to develop the figures, and is only lightly commented.# Some code here is adapted from # https://www.nervanasys.com/demystifying-deep-reinforcement-learning/ # What's going on # This is for learning with Q-Learning and Sarsa # There are two games: three_in_a_row_score and two_by_two_box_score (no reason not to add more) # Each returns 1 if the condition is exactly met, else 0 # (so we're doing positive reinforcement only, no punishment. To punish return -1 if not a win) # We have the ability to decay the exploration by reducing the chance of picking a random # action, rather than the best. I've found that with these tasks, going random about as # much as possible is best, since we get to search the space broadly in a relatively small # number of runs. # # This file is basically a bunch of tools for learning Flippers and building the # images. For each game illustration, set the learning strategy (Q or Sarsa), the # action-selection policy, the global variables, the number of episodes, the game # number, etc. Then train the model (if needed) and then run the game, saving the # results in a PostScript file. Each game needs the file to be manually configured # in this way and then re-run. # # Unlike most of the other figure-making notebooks, this one needs to be modified # by hand to produce each of the figures that we want. I'm sure with some effort it # can be cleaned up and all the steps parameterized, but this code is reasonable # clean, and it works properly, so I'm going to leave it as-is. # # There are lots of globals in this code, so it's important to run # the cells sequentially. # # The first block of variables control the building of the Q table, including Q or Sarsa # The second block of variables control the layout of the PostScript files showing the game # # TO MAKE ILLUSTRATIONS: # Because Python's native graphics are so lousy, pictures are saved as PostScript files. # Set the algorithm and #runs and the game type # If your configuration doesn't exist, it will learn and save to file. Else it will read the file. # # To find good examples: # Print out the number of states used for every starting board # Try different #runs (1k, 5k, 10k, 50k, etc.) and note different #states # Find games that are long with small #runs, and shorter later, showing learning # For 3 in a row, game 343 is nice, with 7 steps at 1k runs, but only 3 at 10k # import numpy as np from numpy import random import math import pickle import matplotlib.pyplot as plt import os # Make a File_Helper for saving and loading files. save_files = True import os, sys, inspect current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) sys.path.insert(0, os.path.dirname(current_dir)) # path to parent dir from DLBasics_Utilities import File_Helper file_helper = File_Helper(save_files) # This is the heart of the learning algorithm. We play a game and update the Q table. def run_one_episode_of_learning(episode_number): global board, state_visited, Q reset_all() state0 = get_board_state() score0 = get_board_score() max_steps = 99 step_count = 0 if learning_algorithm == 'Sarsa': action0 = epsilon_greedy_policy(episode_number, state0) while step_count < max_steps: if score0 == 1: for action_number in range(num_actions): win_value = 1 / math.sqrt(max(1, step_count)) Q[state0][action_number] = win_value # give a winning score (nice when printing game charts) return # the episode is over when we have a winning board if learning_algorithm == 'Q': action0 = epsilon_greedy_policy(episode_number, state0) # Take the action (x, y) = action_to_xy(action0) make_game_move(x, y) # rarely, random stuff happens in the world and the board changes on us add_surprise() # Find the new state and new score state1 = get_board_state() score1 = get_board_score() # Get the reward. It's 1 if we've won, else -1 reward = non_winning_reward if score1 == 1: reward = 1 # get next action if learning_algorithm == 'Q': action1 = np.argmax(Q[state1]) else: action1 = epsilon_greedy_policy(episode_number, state1) # Bellman's equation to update the Q score Q[state0][action0] += learning_rate * (reward + (gamma * Q[state1][action1]) - Q[state0][action0]) # The new state becomes the starting state for the next time through state0 = state1 score0 = score1 action0 = action1 step_count += 1 # A little optimization: if we've seen this state before, we're in a loop so quit. #if state_visited[state0]: #return state_visited[state0] = True def epsilon_greedy_policy(episode_number, state): # Choose an action. If under epsilon (which decays), pick randomly, else pick best Q-score threshold = 0 drop_limit = epsilon_stop_percent * num_episodes if episode_number < drop_limit: threshold = epsilon_max * np.exp(-5 * episode_number / drop_limit) # starts at e_max, drops to .006 (call it 0) if random.rand() < threshold: action = random.randint(0, num_actions) else: action = np.argmax(Q[state]) return action # prepare for a new episode def reset_all(): global board, state_visited board_state = random.randint(0, num_states) board_from_state(board_state) state = get_board_state() state_visited = [False for i in range(num_states)] state_visited[state] = True ########## # Utilities for manipuating the board, working with actions, etc. # These are from Q-table-builder, and probably should be in a common # file that both modules import, but for now repeating them is file ########## # return the x and y coordinates for an action number def action_to_xy(action): y = int(action * 1. / board_size) x = int(action - (y * board_size)) return (x, y) # cells are worth 1, 2, 4, 8, 16... in reading order from UL def get_board_state(): flat_board = np.ravel(board) score = sum([flat_board[i] * (2 ** i) for i in range(flat_board.size)]) return int(score) # given a state number, recreate the board def board_from_state(state): global board binary_state = state_in_binary(state) flat_board = [int(binary_state[i]) for i in range(len(binary_state))] # print(" board from state: flat_board = {}".format(flat_board)) board = np.reshape(flat_board, [board_size, board_size]) # return a binary string describing the board contents, starting in UL, row 1 L->R, row 2 L->R, etc. def state_in_binary(state): format_string = "{0:0" + str(num_actions) + "b}" binary_version = ''.join(reversed(format_string.format(state))) return binary_version def add_surprise(): global board, num_actions, random_event_probability if random.rand() < random_event_probability: action = random.randint(0, num_actions) (x, y) = action_to_xy(action) make_game_move(x, y) # return 1 if we have exactly 1 row or column of 1's, else 0 def three_in_a_row_score(): global board total_ones = np.sum(np.ravel(board)) if total_ones != board_size: return 0 # early exit, too few or too many 1's column_totals = np.sum(board, 0) if max(column_totals) == board_size: return 1 row_totals = np.sum(board, 1) if max(row_totals) == board_size: return 1 return 0 def three_in_a_row_move(x, y): global board board[y][x] = 1 - board[y][x] def three_in_a_row_name(): return 'three-in-a-row' # return 1 if we have a 2-by-2 box. This could be faster, but this is simple def two_by_two_box_score(): global board total_ones = np.sum(np.ravel(board)) if total_ones != 4: return 0 # early exit, too few or too many 1's for y in range(board_size - 1): for x in range(board_size - 1): if board[y][x] == 1: if (board[y + 1][x] == 1) and (board[y][x + 1] == 1) and (board[y + 1][x + 1] == 1): return 1 return 0 def two_by_two_box_move(x, y): global board board[y][x] = 1 - board[y][x] def two_by_two_box_name(): return 'two-by-two' # ridiculously slow. scan every 1 and see if it's a plus center def plus_sign_score(): global board total_ones = np.sum(np.ravel(board)) if total_ones != 5: return 0 # early exit, too few or too many 1's for y in np.arange(1, board_size - 1): for x in np.arange(1, board_size - 1): if board[y][x] == 1: if (board[y - 1][x] == 1) and (board[y + 1][x] == 1) and (board[y][x - 1] == 1) and ( board[y][x + 1] == 1): return 1 return 0 def plus_sign_move(x, y): global board board[y][x] = 1 - board[y][x] if x > 0: board[y][x - 1] = 1 - board[y][x - 1] if x < board_size - 1: board[y][x + 1] = 1 - board[y][x + 1] if y > 0: board[y - 1][x] = 1 - board[y - 1][x] if y < board_size - 1: board[y + 1][x] = 1 - board[y + 1][x] def plus_sign_name(): return 'plus-sign' ############################### # Building the Q table ############################### num_episodes = 3000 # number of games to play to train Q table (10000) board_size = 3 # side length of square board (more than 4 is playing with fire) (3) learning_rate = 0.99 # lerp value from old score to new (0.99) gamma = 0.85 # discount on future scores (0.85) random_event_probability = 0.10 # how likely it is that a random tile will flip on us after a move (0.1) epsilon_max = .8 # how likely we are to pick a random action, decays with episodes epsilon_stop_percent = .5 # how far into this series of runs until epsilon hits 0 num_random_games = 1000 # for the "average game length" printout after the table is built (1000) random_seed = 3 # seeding the RNG non_winning_reward = 0 # set to -1 for "punishment", 0 for "positive reinforcement only" ############################### # Choose the game to play ############################### game_functions = ( (three_in_a_row_score, three_in_a_row_move, three_in_a_row_name), (two_by_two_box_score, two_by_two_box_move, two_by_two_box_name), (plus_sign_score, plus_sign_move, plus_sign_name) ) (get_board_score, make_game_move, game_name) = game_functions[0] # choose the game to play # choose these by hand to make nice examples for figures game_list_3_cross = (163, 343, 313, 495) # good for 3-by-3 cross game_list_4_cross = (33444, 10344, 44122, 18705) # good for 4-by-4 cross game_list = ( 49, 182 ) learning_algorithm = 'Q' # either 'Q' or 'Sarsa' ############################### # Global variables built from the user-defined variables ############################### num_actions = num_states = total_board_cells = Q = state_visited = board = None def start_run(): global Q if not os.path.exists(data_dir): os.makedirs(data_dir) filename = data_dir + "/" + "Qtable-size-" + str(board_size) + "-alg-" + learning_algorithm + "-episodes-" + str(num_episodes) + "-game-" + game_name() + ".pkl" if os.path.exists(filename): read_data_file(filename) else: make_derived_variables() Q = np.zeros([num_states, num_actions], dtype='float') # initialize Q table to all 0 random.seed(random_seed) for episode_number in range(num_episodes): if episode_number % 1000 == 0: print("Starting episode {}".format(episode_number)) run_one_episode_of_learning(episode_number) save_data_file(filename) #print_all_game_lengths() # this helps us choose good games to plot #save_games(save_chart, game_list) #save_games(save_sequence, game_list) #plot_results() # in PyCharm this hangs until the window is closed, so make it last step print_averages() def print_averages(): total_moves = 0 max_moves = 0 for game_number in range(num_states): (action_list, state_list) = run_game(game_number) total_moves += action_list.size max_moves = max(max_moves, action_list.size) average_moves = total_moves * 1.0 / num_states flat_Q = np.ravel(Q) average_Q = np.average(flat_Q) min_Q = min(flat_Q) max_Q = max(flat_Q) print("Zlearning_algorithm =A {} Znum_episodes =B {} Zaverage_moves =C {} Zmin_Q =D {} Zaverage_Q =E {} Zmax_Q =F {}". format(learning_algorithm, num_episodes, average_moves, min_Q, average_Q, max_Q)) print("max_moves = {}".format(max_moves)) def save_data_file(filename): master_dict = {} master_dict.update({'num_episodes': num_episodes, 'board_size': board_size, 'learning_rate': learning_rate}) master_dict.update({'gamma': gamma, 'random_event_probability': random_event_probability}) master_dict.update({'epsilon_max': epsilon_max, 'epsilon_stop_percent': epsilon_stop_percent}) master_dict.update({'random_seed': random_seed}) master_dict.update({'Q_table': Q}) pickle.dump(master_dict, open(filename, "wb")) #print("saving Q={}".format(Q)) def read_data_file(filename): global num_episodes, board_size, learning_rate, gamma, random_event_probability global epsilon_max, epsilon_stop_percent, random_seed, Q global num_actions, num_states, total_board_cells, state_visited, board master_dict = pickle.load(open(filename, "rb")) num_episodes = master_dict['num_episodes'] board_size = master_dict['board_size'] learning_rate = master_dict['learning_rate'] gamma = master_dict['gamma'] random_event_probability = master_dict['random_event_probability'] epsilon_max = master_dict['epsilon_max'] epsilon_stop_percent = master_dict['epsilon_stop_percent'] random_seed = master_dict['random_seed'] Q = master_dict['Q_table'] #print_input_variables() make_derived_variables() def print_input_variables(): print("READ IN VARIABLES") print("num_episodes = {}".format(num_episodes)) print("board_size = {}".format(board_size)) print("learning_rate = {}".format(learning_rate)) print("gamma = {}".format(gamma)) print("random_event_probability = {}".format(random_event_probability)) print("epsilon_max = {}".format(epsilon_max)) print("epsilon_stop_percent = {}".format(epsilon_stop_percent)) print("random_seed = {}".format(random_seed)) print("--------") print("num_actions = {}".format(num_actions)) print("num_states = {}".format(num_states)) print("total_board_cells = {}".format(total_board_cells)) print("board = {}".format(board)) print("--------") print("Q.shape = {}".format(Q.shape)) print("Q = {}".format(Q)) def make_derived_variables(): global num_actions, num_states, total_board_cells, Q, state_visited, board num_actions = board_size * board_size num_states = 2 ** num_actions total_board_cells = board_size ** 2 board = np.zeros([board_size, board_size]) state_visited = np.zeros(num_states) ############################### # Plot the contents of the Q table in a scatter plot ############################### def plot_results(): global Q x_coords = [] y_coords = [] for state in range(num_states): for action in range(num_actions): x_coords.append(Q[state][action]) y_coords.append(state) plt.figure(num=None, figsize=(8, 6), dpi=200) plt.scatter(x_coords, y_coords, s=10, color=['black']) file_helper.save_figure('Q-table-scatter') plt.show() # plot game lengths x_coords = [] y_coords = [] total_moves = 0 max_moves = 0 for game_number in range(num_states): (action_list, state_list) = run_game(game_number) x_coords.append(action_list.size) y_coords.append(game_number) total_moves += action_list.size max_moves = max(max_moves, action_list.size) #print("game {} uses {} steps".format(game_number, action_list.size)) average_moves = total_moves * 1.0 / num_states print("average number of moves in {} games: {}, longest game = {} moves".format(num_states, average_moves, max_moves)) plt.figure(num=None, figsize=(8, 6), dpi=200) plt.scatter(x_coords, y_coords, s=10, color=['black']) file_helper.save_figure('Q-table-game-lengths') plt.show() ############################### # Estimate the average game length by running a bunch of games ############################### def run_games(): total_moves = 0 for game in range(num_random_games): (action_list, state_list) = run_random_game() num_moves = action_list.size total_moves += num_moves average_moves = total_moves * 1.0 / num_random_games print("average number of moves in {} games: {}".format(num_random_games, average_moves)) def run_random_game(): game_number = random.randint(0, num_states) return run_game(game_number) def run_game(game_number): global board, Q, get_board_score board_from_state(game_number) score = get_board_score() action_list = np.array([], dtype='int32') state = get_board_state() state_list = np.array([state], dtype='int32') visited_state = [False for i in range(num_states)] while (score != 1) and (state_list.size < 99): # and not visited_state[state]: visited_state[state] = True best_action = np.argmax(Q[state]) (x, y) = action_to_xy(best_action) make_game_move(x, y) add_surprise() state = get_board_state() action_list = np.append(action_list, best_action) state_list = np.append(state_list, state) score = get_board_score() return (action_list, state_list) def print_all_game_lengths(): for game_number in range(num_states): (action_list, state_list) = run_game(game_number) print("game number {}, states = {}".format(game_number, state_list.size)) ############################### # Control the layout of the PostScript file that shows a game chart ############################### page_width = 791 # width in PS pixels of drawing area page_height = 791 # height in PS pixels of drawing area page_border = 50 # pstopdf clips thick lines on the edges, so add padding all around gd = .5 # target x gap ratio ge = .5 # target y gap ratio gs = .25 # ratio of score box height to box height board_length = 0 # length in PS pixels of a board drawing that meets all above constraints x_chunk = 0 # width of board plus gap y_chunk = 0 # height of board plus score plus gap score_height = 0 # height of score box cell_size = 0 # size of one cell in game box output_file = None # output PS file max_display_states = 10 # show this many states at most data_dir = "flippers-data" # directory for placing files def save_games(save_game_style, game_list): for game_num in game_list: (action_list, state_list) = run_game(game_num) print("game {} uses {} states".format(game_num, state_list.size)) save_game_style(game_num) def save_chart(game_num): global board_length, x_chunk, y_chunk, score_height, output_file, cell_size padded_game_num = '{:03}'.format(game_num) chart_name = "chart-" + padded_game_num + ".ps" #print("chart_name = <{}>".format(chart_name)) output_file = open(data_dir+"/"+chart_name, "w") output_file.write("%!PS\n") output_file.write("%% chart for game {}\n".format(game_num)) output_file.write("<< /PageSize [{} {}] >> setpagedevice\n\n". format(page_width + (2 * page_border), page_height + (2 * page_border))) output_file.write("2 setlinewidth\n") (action_list, state_list) = run_game(game_num) # get the actual run of the game action_list = action_list[:max_display_states] # clip the output after the first few states if needed state_list = state_list[:max_display_states] # if there's too many states they're all tiny and ugly num_steps = state_list.size if num_steps > 1: num_rows = num_actions num_columns = num_steps+1 w = page_width / (num_columns + ((num_columns - 1) * gd)) h = page_height / ((num_rows * (1 + gs + ge)) - ge) b = min(h, w) d = (page_width - ((num_columns) * b)) / (num_columns - 1) x_chunk = b + d s = b * gs e = (page_height - (num_rows * (b + s))) / (num_rows - 1) y_chunk = b + s + e board_length = b else: x_chunk = 100 y_chunk = 100 board_length = 100 score_height = board_length * gs cell_size = board_length * 1.0 / board_size # starting state is special: no score, no variations left = page_border top = page_border + (page_height / 2.) + (board_length / 2.) state = state_list[0] action_number = -1 draw_game_board(left, top, state, action_number, False, False) for step in range(num_steps - 1): draw_column(step, action_list, state_list) # final state is special: no score, no variations left = page_border + (num_steps * x_chunk) top = page_border + (page_height / 2.) + (board_length / 2.) state = state_list[-1] action_number = -1 draw_game_board(left, top, state, action_number, False, False) output_file.write("showpage\n") output_file.close() def save_sequence(game_num): global board_length, x_chunk, y_chunk, score_height, output_file, cell_size padded_game_num = '{:03}'.format(game_num) sequence_name = "sequence-" + padded_game_num + ".ps" #print("sequence_name = <{}>".format(sequence_name)) output_file = open(data_dir+"/"+sequence_name, "w") output_file.write("%!PS\n") output_file.write("%% sequence for game {}\n".format(game_num)) output_file.write("<< /PageSize [{} {}] >> setpagedevice\n\n". format(page_width + (2 * page_border), page_height + (2 * page_border))) output_file.write("2 setlinewidth\n") (action_list, state_list) = run_game(game_num) num_steps = state_list.size num_columns = num_steps + 1 if num_columns > 1: num_columns = num_steps+1 w = page_width / (num_columns + ((num_columns - 1) * gd)) b = w d = (page_width - ((num_columns) * b)) / (num_columns - 1) x_chunk = b + d board_length = b top = page_height + (b/2.) else: x_chunk = 100 y_chunk = 100 board_length = 100 score_height = board_length * gs cell_size = board_length * 1.0 / board_size for step in range(num_steps): if step==0: draw_game_board(page_border, top, state_list[0], -1, False, False) if step == num_steps-1: draw_game_board(page_border+((step+1)*x_chunk), top, state_list[-1], -1, False, False) else: draw_game_board(page_border+((step+1)*x_chunk), top, state_list[step], action_list[step], False, True) output_file.write("showpage\n") output_file.close() def draw_column(step, action_list, state_list): state = state_list[step] board_from_state(state) left = page_border + ((step + 1) * x_chunk) for action_number in range(num_actions): top = (page_height + page_border) - (action_number * y_chunk) highlight = action_number == action_list[step] draw_game_board(left, top, state, action_number, highlight, True) def draw_game_board(left, top, state, action_number, highlight, draw_score): global board # draw the thick black highlight if we need it if highlight: output_file.write("15 setlinewidth\n") draw_box(left, top, board_size * cell_size, (board_size * cell_size + score_height), True, (0, 0, 0), True, (0, 0, 0)) output_file.write("2 setlinewidth\n") # load the board, and draw a box for each cell. Normally draw a gray circle in # each cell of value 1. But if that cell is the one we're thinking of changing, # draw a solid circle if it was empty, or an unfilled circle if it was full board_from_state(state) saved_board = np.copy(board) action_x = action_y = -1 if action_number >= 0: (action_x, action_y) = action_to_xy(action_number) make_game_move(action_x, action_y) for y in range(board_size): for x in range(board_size): cx = left + ((x + .5) * cell_size) cy = top - ((y + .5) * cell_size) radius = cell_size * .3 this_action = (y * board_size) + x if (x == action_x) and (y == action_y): draw_box(left + (x * cell_size), top - (y * cell_size), cell_size, cell_size, True, (0, 0, 0), True, (.76, .76, 1)) else: draw_box(left + (x * cell_size), top - (y * cell_size), cell_size, cell_size, True, (0, 0, 0), True, (1, 1, 1)) if board[y][x] == saved_board[y][x]: if board[y][x] == 1: draw_circle(cx, cy, radius, False, (0, 0, 0), True, (.3, .3, .3)) else: if board[y][x] == 0: draw_circle(cx, cy, radius, True, (1, 0, 0), False, (1, 0, 0)) else: draw_circle(cx, cy, radius, False, (1, 0, 0), True, (1, 0, 0)) if draw_score: # draw the score box with a color bar indicating score value draw_box(left, top - board_length, board_length, score_height, True, (0, 0, 0), True, (1, 1, 1)) flat_Q = np.ravel(Q) min_Q_score = min(flat_Q) max_Q_score = max(flat_Q) Q_score = (Q[state][action_number] - min_Q_score) / (max_Q_score - min_Q_score) draw_box(left, top - board_length, board_length * Q_score, score_height, True, (0, 0, 0), True, (1 - Q_score, Q_score, 0)) def draw_box(left, top, width, height, do_stroke, stroke_rgb, do_fill, fill_rgb): output_file.write("newpath\n") output_file.write("{} {} moveto\n".format(left, top)) output_file.write("{} {} lineto\n".format(left + width, top)) output_file.write("{} {} lineto\n".format(left + width, top - height)) output_file.write("{} {} lineto\n".format(left, top - height)) output_file.write("closepath\n") if do_stroke: output_file.write("gsave\n") output_file.write("{} {} {} setrgbcolor\n".format(stroke_rgb[0], stroke_rgb[1], stroke_rgb[2])) output_file.write("stroke\n") output_file.write("grestore\n") if do_fill: output_file.write("{} {} {} setrgbcolor\n".format(fill_rgb[0], fill_rgb[1], fill_rgb[2])) output_file.write("fill\n") def draw_circle(cx, cy, radius, do_stroke, stroke_rgb, do_fill, fill_rgb): output_file.write("newpath\n") output_file.write("{} {} {} 0 360 arc closepath\n".format(cx, cy, radius)) if do_stroke: output_file.write("gsave\n") output_file.write("{} {} {} setrgbcolor\n".format(stroke_rgb[0], stroke_rgb[1], stroke_rgb[2])) output_file.write("stroke\n") output_file.write("grestore\n") if do_fill: output_file.write("{} {} {} setrgbcolor\n".format(fill_rgb[0], fill_rgb[1], fill_rgb[2])) output_file.write("fill\n") start_run()Starting episode 0 Starting episode 1000 Starting episode 2000 Zlearning_algorithm =A Q Znum_episodes =B 3000 Zaverage_moves =C 17.6875 Zmin_Q =D 0.0 Zaverage_Q =E 0.1384620876097466 Zmax_Q =F 1.8449527826599037 max_moves = 98How to Train Your Own Cone Detection NetworksIn this notebook, we will demonstrate - how to train your own YOLOv3-based traffic cone detection network and do inference on a video.**[Accurate Low Latency Visual Perception for Autonomous Racing: Challenges Mechanisms and Practical Solutions](https://github.com/mit-han-lab/once-for-all)** is an accurate low latency visual perception system introduced by , , , and . 1. PreparationLet's first install all the required packages:! sudo apt install unzip print('Installing PyTorch...') ! pip3 install torch print('Installing torchvision...') ! pip3 install torchvision print('Installing numpy...') ! pip3 install numpy # tqdm is a package for displaying a progress bar. print('Installing tqdm (progress bar) ...') ! pip3 install tqdm print('Installing matplotlib...') ! pip3 install matplotlib print('Installing Tensorboard') ! pip3 install tensorboardx print('Installing all the other required packages once for all') ! sudo python3 setup.py install print('Installing video editor') ! sudo apt install ffmpeg -yLet' s clone our repo first...! git clone https://github.com/cv-core/MIT-Driverless-CV-TrainingInfra.git ! mv MIT-Driverless-CV-TrainingInfra/CVC-YOLOv3/* .Before we start training, let's download the Cone Detection dataset and the corresponding label and intial training weights.print("Downloading Training Dataset") ! wget https://storage.googleapis.com/mit-driverless-open-source/YOLO_Dataset.zip ! unzip -q YOLO_Dataset.zip ! mv YOLO_Dataset dataset/ && rm YOLO_Dataset.zip print("Downloading YOLOv3 Sample Weights") ! wget https://storage.googleapis.com/mit-driverless-open-source/yolov3-training/sample-yolov3.weights print("Downloading Training and Validation Label") ! cd dataset/ && wget https://storage.googleapis.com/mit-driverless-open-source/yolov3-training/all.csv && cd .. ! cd dataset/ && wget https://storage.googleapis.com/mit-driverless-open-source/yolov3-training/train_mini_yolo.csv && mv train_mini_yolo.csv train.csv && cd .. ! cd dataset/ && wget https://storage.googleapis.com/mit-driverless-open-source/yolov3-training/validate_mini_yolo.csv && mv validate_mini_yolo.csv validate.csv && cd ..2. Using Pretrained YOLOv3 Weights File to Start Training First, import all the packages used in this tutorial:import os import random import tempfile import time import multiprocessing import subprocess import math import shutil import math from datetime import datetime import torch import torch.nn as nn from torch.utils.data import DataLoader from models import Darknet from utils.datasets import ImageLabelDataset from utils.utils import model_info, print_args, Logger, visualize_and_save_to_local,xywh2xyxy from yolo_tutorial_util import run_epoch import validate import warnings import sys ##### section for all random seeds ##### torch.manual_seed(2) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False ######################################## warnings.filterwarnings("ignore") os.environ['CUDA_LAUNCH_BLOCKING'] = "1" cuda = torch.cuda.is_available() device = torch.device('cuda:0' if cuda else 'cpu') num_cpu = multiprocessing.cpu_count() if cuda else 0 if cuda: torch.cuda.synchronize() random.seed(0) torch.manual_seed(0) if cuda: torch.cuda.manual_seed(0) torch.cuda.manual_seed_all(0) torch.backends.cudnn.benchmark = True torch.cuda.empty_cache()Successfully imported all packages and configured random seed to 17! Training Config# Training Related Config batch_size = int(5) optimizer_pick = "Adam" model_cfg = "model_cfg/yolo_baseline.cfg" weights_path = "sample-yolov3.weights" output_path = "automatic" dataset_path = "dataset/YOLO_Dataset/" num_epochs = int(2) # Set them to 2048 during full dataset training num_steps = 8388608 checkpoint_interval = int(1) # How often you want to get evaluation metric during training val_tolerance = int(3) min_epochs = int(3) # Dataloader Related Config data_aug = False # toggle for image augmentation blur = False # Add blur to image salt = False # Add "salt" noise to image noise = False # Add noise to image contrast = False # Add High Contrast to image sharpen = False # Image Sharpen ts = True # Tiling and Scaling augment_affine = False # Affine augment_hsv = False # HSV lr_flip = False # left and right flip ud_flip = False # up and down flip # Training Hyperparameter Related Config momentum = float(0.9) gamma = float(0.95) lr = float(0.001) weight_decay = float(0.0) xy_loss = float(2) wh_loss= float(1.6) no_object_loss = float(25) object_loss = float(0.1) # Debugging/Visualization Related Config debug_mode = False upload_dataset = False vanilla_anchor = False vis_batch = int(0)Initializing Modelinput_arguments = list(locals().items()) print("Initializing model") model = Darknet(config_path=model_cfg,xy_loss=xy_loss,wh_loss=wh_loss,no_object_loss=no_object_loss,object_loss=object_loss,vanilla_anchor=vanilla_anchor)Processing Training Configif output_path == "automatic": current_month = datetime.now().strftime('%B').lower() current_year = str(datetime.now().year) if not os.path.exists(os.path.join('outputs/', current_month + '-' + current_year + '-experiments/' + model_cfg.split('.')[0].split('/')[-1])): os.makedirs(os.path.join('outputs/', current_month + '-' + current_year + '-experiments/' + model_cfg.split('.')[0].split('/')[-1])) output_uri = os.path.join('outputs/', current_month + '-' + current_year + '-experiments/' + model_cfg.split('.')[0].split('/')[-1]) else: output_uri = output_path img_width, img_height = model.img_size() bw = model.get_bw() validate_uri, train_uri = model.get_links() num_validate_images, num_train_images = model.num_images() conf_thresh, nms_thresh, iou_thresh = model.get_threshs() num_classes = model.get_num_classes() loss_constant = model.get_loss_constant() conv_activation = model.get_conv_activation() anchors = model.get_anchors() onnx_name = model.get_onnx_name() start_epoch = 0 weights_path = weights_pathData LoadersOne of our main contributions to vanilla YOLOv3 is the custom data loader we implemented:Each set of training images from a specific sensor/lens/perspective combination is uniformly rescaled such that their landmark size distributions matched that of the camera system on the vehicle. Each training image was then padded if too small or split up into multiple images if too large.with tempfile.TemporaryDirectory() as tensorboard_data_dir: print("Initializing data loaders") train_data_loader = torch.utils.data.DataLoader( ImageLabelDataset(train_uri, dataset_path=dataset_path, width=img_width, height=img_height, augment_hsv=augment_hsv, augment_affine=augment_affine, num_images=num_train_images, bw=bw, n_cpu=num_cpu, lr_flip=lr_flip, ud_flip=ud_flip,vis_batch=vis_batch,data_aug=data_aug,blur=blur,salt=salt,noise=noise,contrast=contrast,sharpen=sharpen,ts=ts,debug_mode=debug_mode, upload_dataset=upload_dataset), batch_size=(1 if debug_mode else batch_size), shuffle=(False if debug_mode else True), num_workers=(0 if vis_batch else num_cpu), pin_memory=cuda) print("Num train images: ", len(train_data_loader.dataset)) validate_data_loader = torch.utils.data.DataLoader( ImageLabelDataset(validate_uri, dataset_path=dataset_path, width=img_width, height=img_height, augment_hsv=False, augment_affine=False, num_images=num_validate_images, bw=bw, n_cpu=num_cpu, lr_flip=False, ud_flip=False,vis_batch=vis_batch,data_aug=False,blur=False,salt=False,noise=False,contrast=False,sharpen=False,ts=ts,debug_mode=debug_mode, upload_dataset=upload_dataset), batch_size=(1 if debug_mode else batch_size), shuffle=False, num_workers=(0 if vis_batch else num_cpu), pin_memory=cuda)Initialize Optimizerif optimizer_pick == "Adam": print("Using Adam Optimizer") optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=lr, weight_decay=weight_decay) elif optimizer_pick == "SGD": print("Using SGD Optimizer") optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=lr, momentum=momentum, weight_decay=weight_decay) else: raise Exception(f"Invalid optimizer name: {optimizer_pick}") print("Loading weights") model.load_weights(weights_path, model.get_start_weight_dim()) # Set scheduler scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=gamma)Sending Model to GPUs if we are in GPU mode Let's Dance (Training)if torch.cuda.device_count() > 1: print('Using ', torch.cuda.device_count(), ' GPUs') model = nn.DataParallel(model) model = model.to(device, non_blocking=True) val_loss = 999 # using a high number for validation loss val_loss_counter = 0 step = [0] # wrapping in an array so it is mutable epoch = start_epoch while epoch < num_epochs and step[0] < num_steps: epoch += 1 scheduler.step() model.train() run_epoch(label_prefix="train", data_loader=train_data_loader, epoch=epoch, step=step, model=model, num_epochs=num_epochs, num_steps=num_steps, optimizer=optimizer, device=device) print('Completed epoch: ', epoch) # Update best loss if epoch % checkpoint_interval == 0 or epoch == num_epochs or step[0] >= num_steps: # First, save the weights save_weights_uri = os.path.join(output_uri, "{epoch}.weights".format(epoch=epoch)) model.save_weights(save_weights_uri) with torch.no_grad(): print("Calculating loss on validate data") epoch_losses, epoch_time_total, epoch_num_targets = run_epoch( label_prefix="validate", data_loader=validate_data_loader, epoch=epoch, model=model, num_epochs=num_epochs, num_steps=num_steps, optimizer=None, step=step, device=device) avg_epoch_loss = epoch_losses[0] / epoch_num_targets print('Average Validation Loss: {0:10.6f}'.format(avg_epoch_loss)) if avg_epoch_loss > val_loss and epoch > min_epochs: val_loss_counter += 1 print(f"Validation loss did not decrease for {val_loss_counter}" f" consecutive check(s)") else: print("Validation loss decreased. Yay!!") val_loss_counter = 0 val_loss = avg_epoch_loss ##### updating best result for optuna study ##### result = open("logs/result.txt", "w" ) result.write(str(avg_epoch_loss)) result.close() ########################################### validate.validate(dataloader=validate_data_loader, model=model, device=device, step=step[0], bbox_all=False,debug_mode=debug_mode) if val_loss_counter == val_tolerance: print("Validation loss stopped decreasing over the last " + str(val_tolerance) + " checkpoints, creating onnx file") with tempfile.NamedTemporaryFile() as tmpfile: model.save_weights(tmpfile.name) weights_name = tmpfile.name cfg_name = os.path.join(tempfile.gettempdir(), model_cfg.split('/')[-1].split('.')[0] + '.tmp') onnx_gen = subprocess.call(['python3', 'yolo2onnx.py', '--cfg_name', cfg_name, '--weights_name', weights_name]) save_weights_uri = os.path.join(output_uri, onnx_name) os.rename(weights_name, save_weights_uri) try: os.remove(onnx_name) os.remove(cfg_name) except: pass breakOur full dataset accuracy metrics for detecting traffic cones on the racing track:| mAP | Recall | Precision ||----|----|----|| 89.35% | 92.77% | 86.94% | 3. Inference Download target video file for inferencefrom IPython.display import HTML from base64 import b64encode ! wget https://storage.googleapis.com/mit-driverless-open-source/test_yolo_video.mp4 ! ffmpeg -i test_yolo_video.mp4 test.mp4 && rm test_yolo_video.mp4 video_path = 'test.mp4' mp4 = open(video_path,'rb').read() decoded_vid = "data:video/mp4;base64," + b64encode(mp4).decode() HTML(f'')Download pretrained weights for inference! wget https://storage.googleapis.com/mit-driverless-open-source/pretrained_yolo.weightsImport all packages for inferenceimport os from os.path import isfile, join import copy import cv2 from tensorboardX import SummaryWriter from PIL import Image, ImageDraw import torchvision from utils.nms import nms from utils.utils import calculate_padding from yolo_tutorial_util import single_img_detect, detect from tqdm import tqdm warnings.filterwarnings("ignore") detection_tmp_path = "/tmp/detect/"Set up config file for inferencetarget_path = "test.mp4" output_path = "outputs/visualization/" weights_path = "pretrained_yolo.weights" conf_thres = float(0.8) nms_thres = float(0.25) cuda = torch.cuda.is_available() device = torch.device('cuda:0' if cuda else 'cpu') random.seed(0) torch.manual_seed(0) if cuda: torch.cuda.manual_seed(0) torch.cuda.manual_seed_all(0) torch.backends.cudnn.benchmark = True torch.cuda.empty_cache() model = Darknet(config_path=model_cfg,xy_loss=xy_loss,wh_loss=wh_loss,no_object_loss=no_object_loss,object_loss=object_loss,vanilla_anchor=vanilla_anchor) # Load weights model.load_weights(weights_path, model.get_start_weight_dim()) model.to(device, non_blocking=True) detect(target_path, output_path, model, device=device, conf_thres=conf_thres, nms_thres=nms_thres, detection_tmp_path=detection_tmp_path) ! cd outputs/visualization/ && ffmpeg -i test.mp4 output.mp4 && rm test.mp4 && cd ../.. video_path = "outputs/visualization/output.mp4" mp4 = open(video_path,'rb').read() decoded_vid = "data:video/mp4;base64," + b64encode(mp4).decode() HTML(f'')![BTS](https://4d17d13o8yuu3wluzr4c2feo-wpengine.netdna-ssl.com/wp-content/uploads/2019/02/btslogo.png) Master in Big Data Solutions Final Project - Smart Tour , & Data preparation Data Sources 1. Number of tourists in Catalunya per month from 2015 to 2019:https://datos.gob.es/es/catalogo/ea0010587-numero-de-turistas-segun-comunidad-autonoma-de-destino-principal-mensual-comunidades-autonomas-movimientos-turisticos-en-fronteras-identificador-api-1082312. Barcelona list of tourist attractions and number of annual visitors from 1994 to 2019: Observatorio de Turismo de Barcelona3. Barcelona list of gastronomic equipment: https://opendata-ajuntament.barcelona.cat/data/es/dataset/equipament-restaurants4. Barcelona tourist attractions geolocation: https://opendata-ajuntament.barcelona.cat/data/es/dataset/punts-informacio-turistica5. Lonely Planet list of Barcelona tourist attractions with features: Web Scraping from Web Sitehttps://www.lonelyplanet.com/spain/barcelona/attractions?page=16. TripAdvisor Restaurants Info for 31 Euro-Cities: https://www.kaggle.com/damienbeneschi/krakow-ta-restaurans-data-raw7. Barcelona neighbourhoods and districs geodata: https://github.com/martgnz/bcn-geodata Package importimport pandas as pd from re import search from math import sqrt, radians from sklearn.neighbors import DistanceMetric import numpy as np import matplotlib.pyplot as plt %matplotlib inlineLoading datasets#od is OpenData, pits is Points of Interest pits_od=pd.read_excel('pits_opendata.xls') #ob is Observsatorium de Turismo de Barcelona, pits is Points of Interest pits_ob=pd.read_excel('barcelona-cultura-y-ocio-1994-2019.xlsx') #TA is TripAdvisor TA_data=pd.read_csv('TA_restaurants_curated.csv') #lp is Lonely Planet pits_lp=pd.read_excel('pits_lp.xlsx', sheet_name='Curado') restaurants_op=pd.read_csv('restaurants_opendata.csv') #cat is Catalunya cat_visitors=pd.read_excel('catalunya_visitors2015-2019.xlsx') #POI_barcleona poi_bcn=pd.read_excel('POI_barcelona.xlsx')Formatting, dropping columns, identifying missing values Catalunya visitors datasetcat_visitors.head(2) cat_visitors.shape cat_visitors.dtypes cat_visitors.columns #Removing space from benining and end of columns names cat_visitors.columns = cat_visitors.columns.str.lstrip() cat_visitors.columns #Dropping unnecessary columns cat_visitors=cat_visitors.drop(['Mes2', 'Tasa de variación anual', 'Acumulado en lo que va de año', 'Tasa de variación acumulada'], axis=1) cat_visitors.head()Points of interest Observatorium of Tourism in Barcelonapits_ob.head(2) pits_ob.dtypes #Dropping unnecessary columns with index number pits_ob=pits_ob.drop('numero', axis=1) #Replace string values with 0 in columns 2015 and 2016 pits_ob[2015].replace(['Cerrado', 'nd'], 0, inplace=True) pits_ob[2016].replace(['Cerrado', 'nd'], 0, inplace=True) #Changing columns 2015 and 2016 to int type pits_ob[2015]=pits_ob[2015].astype(int) pits_ob[2016]=pits_ob[2016].astype(int) #Renaming columns pits_ob.rename(columns={'Visitantes de museos y colecciones (MC)':'name', 'Type':'type'}, inplace=True) #Apply upper case for names pits_ob['name'] = pits_ob['name'].str.upper() pits_ob['type']= pits_ob['type'].str.upper() #Remove white space at the beginning and end of the name pits_ob['name'] = pits_ob['name'].str.lstrip() pits_ob['type']= pits_ob['type'].str.lstrip() #Check for duplicate names pits_ob.name.value_counts() #Checking every column has a value pits_ob.count()Points of interest Open Data Barcelonapits_od.dtypes pits_od.shape #Drop unnecessary columns pits_od=pits_od.drop(['originalpost','categories','atencio_eq', 'phonenumber','type','tp','date','author','tags','language','pos', 'num', 'city', 'address', 'code_url', 'related_post', 'attachments', 'vignette', 'moreinfo', 'usergroup', 'post_modified', 'original_modified', 'wt', 'sigla', 'sectionname'], axis=1) pits_od.head(2) #Apply upper case for names pits_od['name'] = pits_od['name'].str.upper() #Remove white space at the beginning and end of the name pits_od['name'] = pits_od['name'].str.lstrip() #Check for duplicates pits_od.id.value_counts() #Drop duplicate values pits_od=pits_od.drop_duplicates('id') pits_od.head(2) #Removing noise strings from exceprt pits_od['excerpt'] = pits_od['excerpt'].str.replace('

', '') pits_od['excerpt'] = pits_od['excerpt'].str.replace('', '' ) pits_od['content']=pits_od['content'].str.replace('', '' ) pits_od['content']=pits_od['content'].str.replace('\r\n', '' ) pits_od['content']=pits_od['content'].str.replace('

', '' ) pits_od['content']=pits_od['content'].str.replace('', '' ) pits_od['content']=pits_od['content'].str.replace('', '' ) pits_od['content']=pits_od['content'].str.replace('

', '' ) pits_od['content']=pits_od['content'].str.replace('', '' ) print(pits_od.excerpt[0]) print(" ") print(pits_od.content[0]) pits_od.district.value_counts() pits_od.count()Trip Advisor datasetTA_data.head(2) TA_data.shape TA_data.dtypes TA_data_bcn=TA_data[TA_data['City']=='Barcelona'] TA_data_bcn.shape TA_data_bcn.head(2) TA_data_bcn=TA_data_bcn.drop(['Unnamed: 0', 'URL_TA', 'ID_TA', 'City'], axis=1) TA_data_bcn.replace('NaN', np.nan, inplace=True) TA_data_bcn.columns= TA_data_bcn.columns.str.lower() TA_data_bcn.head(2) TA_data_bcn=TA_data_bcn.reset_index().drop('index', axis=1) TA_data_bcn['price range'].value_counts().sum() TA_data_bcn.count() TA_data_bcn.shape pits_od=pits_od.drop_duplicates() pits_od[pits_od['name']=='El Parc del Fòrum']['title']Convolutional Neural NetworksSean WadeThe main difference between CNNs and standard DNNs is that neurons are arranged in 3 dimensions, **width, height, and depth**. Each layer in a CNN takes in an input 3d tensor and output a 3d tensor.There are 3 main layers in typical CNN:- Convolutional Layer- Pooling Layer- Fully-Connected Layer Convolutional LayerConvolutional kernels are nothing new to image processing. This kernels are small matricies that we use for blurring, sharpening, edge detection, and more.$\text{Identity: } \quad \begin{bmatrix}0 & 0 & 0 \\0 & 1 & 0 \\0 & 0 & 0\end{bmatrix}$$\text{Blur: } \qquad \begin{bmatrix}1/9 & 1/9 & 1/9 \\1/9 & 1/9 & 1/9 \\1/9 & 1/9 & 1/9\end{bmatrix}$$\text{Sobel (edge): } \begin{bmatrix}-1 & 0 & 1 \\-2 & 0 & 2 \\-1 & 0 & 1\end{bmatrix}$By convolving these kernels across the image we can isolate certain characteristics, or **features**. A large number at a location after the convolution indicates a large **activation** or response to the kernel. In the case of the sobel filter a high activation indicates there is an edge present. This site is an awesome visualization of kernels: http://setosa.io/ev/image-kernels/ CNNs use filters like above for identifying features in 2D space. However, the magic with CNNs is that these filters are not predetermined, but rather they are learned. Also these leaned filters are stacked to look for complex, high level features. For example, instead of just detecting edges we can now detect cars and tires. These filters are small spacially, meaning along the width and height, but always extend to the full depth of the input volume. For example 5x5x3 for a 224x224x3 image. Hyperparameters - W - Input volume size - F - Receptive field size - S - Stride - P - Amount of 0 padding used Output size = $\frac{(W-F+2 P)}{S}+1$ Pooling Layer​The function of this layer is to reduce the spacial size of the representation. The benefit of this is three fold: reduce the amount of parameters, lower computation time, and control overfitting. There are variants like max, average, or overlapping.​​It is worth noting that while this has been standard, a few recent publications have abandoned it. It could be falling out of favor. Fully Connected LayerA fully connected layer takes the activations and flattens them to behave like a normal layer in a DNN. Now with all these pieces we can create a whole CNN architecture. Learning MNISTfrom __future__ import absolute_import, division, print_function import numpy as np import tensorflow as tf import math import random from matplotlib import pyplot as plt #from utils import load_CIFAR10 %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # Load in data from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('data/MNIST_data', one_hot=True) plt.imshow(np.reshape(mnist.train.images[42],[28,28])) print(mnist.train.labels[42]) def weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial)Standard DNN%%time # Placeholders y_true = tf.placeholder(tf.float32, [None, 10]) x = tf.placeholder(tf.float32, [None, 784]) # Layer 1 W1 = weight_variable([784, 100]) b1 = bias_variable([100]) h1 = tf.nn.relu(tf.matmul(x, W1) + b1) # Layer 2 W2 = weight_variable([100, 10]) b2 = bias_variable([10]) y = tf.matmul(h1, W2) + b2 # Define loss and optimizer cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y, labels=y_true)) train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_true, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Start session sess = tf.Session() sess.run(tf.global_variables_initializer()) # Train for i in range(5000): batch_xs, batch_ys = mnist.train.next_batch(100) if i % 100 == 0: _, acc = sess.run([train_step, accuracy], feed_dict={x: batch_xs, y_true: batch_ys}) print("step [%d]: training accuracy %.4f" % (i, acc)) else: sess.run(train_step, feed_dict={x: batch_xs, y_true: batch_ys}) # Test trained model print("--- RESULTS ---") print("Test Accuracy: %.4f" % sess.run(accuracy, feed_dict={x: mnist.test.images[:1000], y_true: mnist.test.labels[:1000]})) sess.close() tf.reset_default_graph()step [0]: training accuracy 0.1400 step [100]: training accuracy 0.8500 step [200]: training accuracy 0.9200 step [300]: training accuracy 0.9300 step [400]: training accuracy 0.9500 step [500]: training accuracy 0.9400 step [600]: training accuracy 0.9200 step [700]: training accuracy 0.9600 step [800]: training accuracy 0.9200 step [900]: training accuracy 0.9300 step [1000]: training accuracy 0.9600 step [1100]: training accuracy 0.9900 step [1200]: training accuracy 0.9600 step [1300]: training accuracy 0.9900 step [1400]: training accuracy 0.9700 step [1500]: training accuracy 0.9800 step [1600]: training accuracy 0.9600 step [1700]: training accuracy 0.9600 step [1800]: training accuracy 0.9600 step [1900]: training accuracy 0.9700 step [2000]: training accuracy 0.9400 step [2100]: training accuracy 1.0000 step [2200]: training accuracy 0.9800 step [2300]: training accuracy 1.0000 step [2400]: training accuracy 0.9700 step [2500]: training accuracy 1.0000 step [2600]: training ac[...]Vanilla CNNThis is the most basic CNN. It has 2 convolutional layers and 2 fully connected layers.def conv2d(x, W): # [batch, in_height, in_width, in_channels] # [filter_height, filter_width, in_channels, out_channels] return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') %%time tf.reset_default_graph() # Placeholders y_true = tf.placeholder(tf.float32, [None, 10]) x = tf.placeholder(tf.float32, [None, 784]) # Resize to appropriate input tensor: # [batch, in_height, in_width, in_channels] x_image = tf.reshape(x, [-1,28,28,1]) # First Convolutional Layer W_conv1 = weight_variable([5, 5, 1, 32]) b_conv1 = bias_variable([32]) h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) # Second Convolutional Layer W_conv2 = weight_variable([5, 5, 32, 64]) b_conv2 = bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv2) + b_conv2) # Densely Connected Layer W_fc1 = weight_variable([28*28*64, 1024]) b_fc1 = bias_variable([1024]) h_conv2_flat = tf.reshape(h_conv2, [-1, 28*28*64]) h_fc1 = tf.nn.relu(tf.matmul(h_conv2_flat, W_fc1) + b_fc1) # Readout Layer W_fc2 = weight_variable([1024, 10]) b_fc2 = bias_variable([10]) y_conv = tf.matmul(h_fc1, W_fc2) + b_fc2 # Loss cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y_true)) train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_true,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) sess = tf.Session() sess.run(tf.global_variables_initializer()) for i in range(5000): batch = mnist.train.next_batch(50) if i % 100 == 0: _, acc = sess.run([train_step, accuracy], feed_dict={x: batch[0], y_true: batch[1]}) print("step [%d]: training accuracy %.4f" % (i, acc)) else: sess.run(train_step, feed_dict={x: batch[0], y_true: batch[1]}) print("--- RESULTS ---") print("Test Accuracy %.4f" % sess.run(accuracy, feed_dict={ x: mnist.test.images[:1000], y_true: mnist.test.labels[:1000]})) sess.close()step [0]: training accuracy 0.1000 step [100]: training accuracy 0.8400 step [200]: training accuracy 0.9000 step [300]: training accuracy 0.9200 step [400]: training accuracy 0.9600 step [500]: training accuracy 0.9200 step [600]: training accuracy 0.9200 step [700]: training accuracy 0.9600 step [800]: training accuracy 0.9800 step [900]: training accuracy 0.9800 step [1000]: training accuracy 0.9400 step [1100]: training accuracy 0.9600 step [1200]: training accuracy 0.9600 step [1300]: training accuracy 0.9200 step [1400]: training accuracy 0.9600 step [1500]: training accuracy 1.0000 step [1600]: training accuracy 0.9600 step [1700]: training accuracy 1.0000 step [1800]: training accuracy 1.0000 step [1900]: training accuracy 0.9600 step [2000]: training accuracy 1.0000 step [2100]: training accuracy 1.0000 step [2200]: training accuracy 1.0000 step [2300]: training accuracy 1.0000 step [2400]: training accuracy 1.0000 step [2500]: training accuracy 0.9800 step [2600]: training ac[...]CNN + MaxPool + DropoutThis CNN is the same as above plus max pooling and dropout.def max_pool_2x2(x): return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') %%time tf.reset_default_graph() # Placeholders y_true = tf.placeholder(tf.float32, [None, 10]) x = tf.placeholder(tf.float32, [None, 784]) keep_prob = tf.placeholder(tf.float32) # Resize to appropriate input tensor: # [batch, in_height, in_width, in_channels] x_image = tf.reshape(x, [-1,28,28,1]) # First Convolutional Layer W_conv1 = weight_variable([5, 5, 1, 32]) b_conv1 = bias_variable([32]) h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) h_pool1 = max_pool_2x2(h_conv1) # <------------------------- Added Pooling # Second Convolutional Layer W_conv2 = weight_variable([5, 5, 32, 64]) b_conv2 = bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) h_pool2 = max_pool_2x2(h_conv2) # <------------------------- Added Pooling # Densely Connected Layer W_fc1 = weight_variable([7*7*64, 1024]) b_fc1 = bias_variable([1024]) h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) # <------------ Added Dropout # Readout Layer W_fc2 = weight_variable([1024, 10]) b_fc2 = bias_variable([10]) y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 # Loss cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y_true)) train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_true,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) sess = tf.InteractiveSession() sess.run(tf.global_variables_initializer()) for i in range(5000): batch = mnist.train.next_batch(50) if i % 100 == 0: _, acc = sess.run([train_step, accuracy], feed_dict={x: batch[0], y_true: batch[1], keep_prob: 1.0}) print("step [%d]: training accuracy %g" % (i, acc)) else: sess.run(train_step, feed_dict={x: batch[0], y_true: batch[1], keep_prob: .5}) print("--- RESULTS ---") print("Test Accuracy %g" % accuracy.eval(feed_dict={ x: mnist.test.images[:1000], y_true: mnist.test.labels[:1000], keep_prob: 1.0})) sess.close()step [0]: training accuracy 0.14 step [100]: training accuracy 0.82 step [200]: training accuracy 0.88 step [300]: training accuracy 0.9 step [400]: training accuracy 0.94 step [500]: training accuracy 0.94 step [600]: training accuracy 0.9 step [700]: training accuracy 0.98 step [800]: training accuracy 0.92 step [900]: training accuracy 0.98 step [1000]: training accuracy 1 step [1100]: training accuracy 0.98 step [1200]: training accuracy 0.9 step [1300]: training accuracy 1 step [1400]: training accuracy 0.96 step [1500]: training accuracy 0.96 step [1600]: training accuracy 0.96 step [1700]: training accuracy 0.98 step [1800]: training accuracy 1 step [1900]: training accuracy 1 step [2000]: training accuracy 0.96 step [2100]: training accuracy 1 step [2200]: training accuracy 0.98 step [2300]: training accuracy 0.98 step [2400]: training accuracy 0.96 step [2500]: training accuracy 0.98 step [2600]: training accuracy 1 step [2700]: training accuracy 0.98 step [2800]: training acc[...]Low level API Prerequisites- Understanding the gammapy data workflow, in particular what are DL3 events and instrument response functions (IRF).- Understanding of the data reduction and modeling fitting process as shown in the [analysis with the high level interface tutorial](analysis_1.ipynb) ContextThis notebook is an introduction to gammapy analysis this time using the lower level classes and functionsthe library.This allows to understand what happens during two main gammapy analysis steps, data reduction and modeling/fitting. **Objective: Create a 3D dataset of the Crab using the H.E.S.S. DL3 data release 1 and perform a simple model fitting of the Crab nebula using the lower level gammapy API.** Proposed approachHere, we have to interact with the data archive (with the `~gammapy.data.DataStore`) to retrieve a list of selected observations (`~gammapy.data.Observations`). Then, we define the geometry of the `~gammapy.datasets.MapDataset` object we want to produce and the maker object that reduce an observationto a dataset. We can then proceed with data reduction with a loop over all selected observations to produce datasets in the relevant geometry and stack them together (i.e. sum them all).In practice, we have to:- Create a `~gammapy.data.DataStore` poiting to the relevant data - Apply an observation selection to produce a list of observations, a `~gammapy.data.Observations` object.- Define a geometry of the Map we want to produce, with a sky projection and an energy range. - Create a `~gammapy.maps.MapAxis` for the energy - Create a `~gammapy.maps.WcsGeom` for the geometry- Create the necessary makers : - the map dataset maker : `~gammapy.makers.MapDatasetMaker` - the background normalization maker, here a `~gammapy.makers.FoVBackgroundMaker` - and usually the safe range maker : `~gammapy.makers.SafeRangeMaker`- Perform the data reduction loop. And for every observation: - Apply the makers sequentially to produce the current `~gammapy.datasets.MapDataset` - Stack it on the target one.- Define the `~gammapy.modeling.models.SkyModel` to apply to the dataset.- Create a `~gammapy.modeling.Fit` object and run it to fit the model parameters- Apply a `~gammapy.estimators.FluxPointsEstimator` to compute flux points for the spectral part of the fit. SetupFirst, we setup the analysis by performing required imports.%matplotlib inline import matplotlib.pyplot as plt from pathlib import Path from astropy import units as u from astropy.coordinates import SkyCoord from regions import CircleSkyRegion from gammapy.data import DataStore from gammapy.datasets import MapDataset from gammapy.maps import WcsGeom, MapAxis from gammapy.makers import MapDatasetMaker, SafeMaskMaker, FoVBackgroundMaker from gammapy.modeling.models import ( SkyModel, PowerLawSpectralModel, PointSpatialModel, FoVBackgroundModel, ) from gammapy.modeling import Fit from gammapy.estimators import FluxPointsEstimatorDefining the datastore and selecting observationsWe first use the `~gammapy.data.DataStore` object to access the observations we want to analyse. Here the H.E.S.S. DL3 DR1.data_store = DataStore.from_dir("$GAMMAPY_DATA/hess-dl3-dr1")We can now define an observation filter to select only the relevant observations. Here we use a cone search which we define with a python dict.We then filter the `ObservationTable` with `~gammapy.data.ObservationTable.select_observations()`.selection = dict( type="sky_circle", frame="icrs", lon="83.633 deg", lat="22.014 deg", radius="5 deg", ) selected_obs_table = data_store.obs_table.select_observations(selection)We can now retrieve the relevant observations by passing their `obs_id` to the`~gammapy.data.DataStore.get_observations()` method.observations = data_store.get_observations(selected_obs_table["OBS_ID"])Preparing reduced datasets geometryNow we define a reference geometry for our analysis, We choose a WCS based geometry with a binsize of 0.02 deg and also define an energy axis:energy_axis = MapAxis.from_energy_bounds(1.0, 10.0, 4, unit="TeV") geom = WcsGeom.create( skydir=(83.633, 22.014), binsz=0.02, width=(2, 2), frame="icrs", proj="CAR", axes=[energy_axis], ) # Reduced IRFs are defined in true energy (i.e. not measured energy). energy_axis_true = MapAxis.from_energy_bounds( 0.5, 20, 10, unit="TeV", name="energy_true" )Now we can define the target dataset with this geometry.stacked = MapDataset.create( geom=geom, energy_axis_true=energy_axis_true, name="crab-stacked" )Data reduction Create the maker classes to be usedThe `~gammapy.datasets.MapDatasetMaker` object is initialized as well as the `~gammapy.makers.SafeMaskMaker` that carries here a maximum offset selection.offset_max = 2.5 * u.deg maker = MapDatasetMaker() maker_safe_mask = SafeMaskMaker( methods=["offset-max", "aeff-max"], offset_max=offset_max ) circle = CircleSkyRegion( center=SkyCoord("83.63 deg", "22.14 deg"), radius=0.2 * u.deg ) exclusion_mask = ~geom.region_mask(regions=[circle]) maker_fov = FoVBackgroundMaker(method="fit", exclusion_mask=exclusion_mask)Perform the data reduction loop%%time for obs in observations: # First a cutout of the target map is produced cutout = stacked.cutout( obs.pointing_radec, width=2 * offset_max, name=f"obs-{obs.obs_id}" ) # A MapDataset is filled in this cutout geometry dataset = maker.run(cutout, obs) # The data quality cut is applied dataset = maker_safe_mask.run(dataset, obs) # fit background model dataset = maker_fov.run(dataset) print( f"Background norm obs {obs.obs_id}: {dataset.background_model.spectral_model.norm.value:.2f}" ) # The resulting dataset cutout is stacked onto the final one stacked.stack(dataset) print(stacked)Inspect the reduced datasetstacked.counts.sum_over_axes().smooth(0.05 * u.deg).plot( stretch="sqrt", add_cbar=True );Save dataset to diskIt is common to run the preparation step independent of the likelihood fit, because often the preparation of maps, PSF and energy dispersion is slow if you have a lot of data. We first create a folder:path = Path("analysis_2") path.mkdir(exist_ok=True)And then write the maps and IRFs to disk by calling the dedicated `~gammapy.datasets.MapDataset.write()` method:filename = path / "crab-stacked-dataset.fits.gz" stacked.write(filename, overwrite=True)Define the modelWe first define the model, a `SkyModel`, as the combination of a point source `SpatialModel` with a powerlaw `SpectralModel`:target_position = SkyCoord(ra=83.63308, dec=22.01450, unit="deg") spatial_model = PointSpatialModel( lon_0=target_position.ra, lat_0=target_position.dec, frame="icrs" ) spectral_model = PowerLawSpectralModel( index=2.702, amplitude=4.712e-11 * u.Unit("1 / (cm2 s TeV)"), reference=1 * u.TeV, ) sky_model = SkyModel( spatial_model=spatial_model, spectral_model=spectral_model, name="crab" ) bkg_model = FoVBackgroundModel(dataset_name="crab-stacked")Now we assign this model to our reduced dataset:stacked.models = [sky_model, bkg_model]Fit the modelThe `~gammapy.modeling.Fit` class is orchestrating the fit, connecting the `stats` method of the dataset to the minimizer. By default, it uses `iminuit`.Its constructor takes a list of dataset as argument.%%time fit = Fit(optimize_opts={"print_level": 1}) result = fit.run([stacked])The `FitResult` contains information about the optimization and parameter error calculation.print(result)The fitted parameters are visible from the `~astropy.modeling.models.Models` object.stacked.models.to_parameters_table()Inspecting residualsFor any fit it is useful to inspect the residual images. We have a few options on the dataset object to handle this. First we can use `.plot_residuals_spatial()` to plot a residual image, summed over all energies:stacked.plot_residuals_spatial(method="diff/sqrt(model)", vmin=-0.5, vmax=0.5);In addition, we can also specify a region in the map to show the spectral residuals:region = CircleSkyRegion( center=SkyCoord("83.63 deg", "22.14 deg"), radius=0.5 * u.deg ) stacked.plot_residuals( kwargs_spatial=dict(method="diff/sqrt(model)", vmin=-0.5, vmax=0.5), kwargs_spectral=dict(region=region), );We can also directly access the `.residuals()` to get a map, that we can plot interactively:residuals = stacked.residuals(method="diff") residuals.smooth("0.08 deg").plot_interactive( cmap="coolwarm", vmin=-0.2, vmax=0.2, stretch="linear", add_cbar=True );Plot the fitted spectrum Making a butterfly plot The `SpectralModel` component can be used to produce a, so-called, butterfly plot showing the envelope of the model taking into account parameter uncertainties:spec = sky_model.spectral_modelNow we can actually do the plot using the `plot_error` method:energy_bounds = [1, 10] * u.TeV spec.plot(energy_bounds=energy_bounds, energy_power=2) ax = spec.plot_error(energy_bounds=energy_bounds, energy_power=2)Computing flux pointsWe can now compute some flux points using the `~gammapy.estimators.FluxPointsEstimator`. Besides the list of datasets to use, we must provide it the energy intervals on which to compute flux points as well as the model component name.energy_edges = [1, 2, 4, 10] * u.TeV fpe = FluxPointsEstimator(energy_edges=energy_edges, source="crab") %%time flux_points = fpe.run(datasets=[stacked]) ax = spec.plot_error(energy_bounds=energy_bounds, energy_power=2) flux_points.plot(ax=ax, energy_power=2)Boundary value problems--- Poisson boundary value problemConsider the Poisson equation$$-\nabla^2\varphi = -\frac{\partial^2 \varphi}{\partial x^2} - \frac{\partial^2 \varphi}{\partial y^2} = f(\varphi,x,y)$$With Dirichlet boundary conditions\begin{align*}\varphi(0, y) &= 0, \quad 0\le y\le H \\\varphi(L, y) &= 0, \quad 0\le y\le H \\\varphi(x, 0) &= 0, \quad 0\le x\le L \\\varphi(x, H) &= 0, \quad 0\le x\le L\end{align*}**Applications:**- Model for laminar flow of Newtonian fluid through a rectangular duct- Steady state diffusion or heat conduction Solution domain- 2D grid with boundary points in green and interior points in blue- The grid has $N_x$ and $N_y$ points in the $x$ and $y$ direction, respectively- The 2D grid is internally stored in one long array- The point $\phi(x_i, y_j) =\phi_{i,j}$ has the position $n=iN_y + j$ Method of finite differencesThe value on the grid boundaries are given by the Dirichlet boundary conditions $\phi_{i,j} = 0$ for\begin{align*}i=0 & \quad & 0\le j\le N_y-1 \\i=N_x-1v& \quad & 0\le j\le N_y-1 \\0\le i\le N_x-1 & \quad & j=0 \\0\le i\le N_x-1 & \quad & j=N_y-1\end{align*}In the interior we are using the approximation of the 2nd partial derivatives\begin{align*}\left.\frac{d^2\phi}{dx^2}\right|_{x_i, y_j} &= \frac{\phi(x_i+\Delta x, y_j) - 2 \phi(x_i,y_j) + \phi(x_i-\Delta x,y_j)}{(\Delta x)^2} = \frac{\phi(x_{i+1}, y_j) - 2 \phi(x_i,y_j) + \phi(x_{i-1},y_j)}{(\Delta x)^2} \\\left.\frac{d^2\phi}{dy^2}\right|_{x_i, y_j} &= \frac{\phi(x_i, y_j+\Delta y) - 2 \phi(x_i,y_j) + \phi(x_i,y_j-\Delta y)}{(\Delta y)^2} = \frac{\phi(x_i, y_{j+1}) - 2 \phi(x_i,y_j) + \phi(x_i,y_{j-1})}{(\Delta y)^2} \end{align*}So the discretisation is given by\begin{align*}-\frac{\phi(x_{i+1}, y_j) + 2 \phi(x_i,y_j) - \phi(x_{i-1},y_j)}{(\Delta x)^2} - \frac{\phi(x_i, y_{j+1}) + 2 \phi(x_i,y_j) - \phi(x_i,y_{j-1})}{(\Delta y)^2} &= f(x_i, y_j)\end{align*}for $1\le i \le N_x-2$ and $1\le j \le N_y-2$With the mapping $n=iN_y + j$ we get\begin{align*}A_{n,n-N_y} \phi_{n-N_y} &+ A_{n,n-1} \phi_{n-1} + A_{n,n} \phi_{n} \\ & + A_{n,n+1} \phi_{n+1} + A_{n,n+N_y} \phi_{n+N_y} = b_n\end{align*}for $1\le i \le N_x-2$ and $1\le j \le N_y-2$The parameters $A$ are given by\begin{align*}A_{n,n-N_y} = A_{n,n+N_y} &= \left[\frac{-1}{(\Delta x)^2}\right] \\A_{n,n-1} = A_{n,n+1} &= \left[\frac{-1}{(\Delta y)^2}\right] \\A_{n,n} &= \left[\frac{2}{(\Delta x)^2} + \frac{2}{(\Delta y)^2}\right] \\b_n &= f(x_i, y_j)\end{align*} Numerical solutionFirst, we define the grid spacing in the $x$ and $y$ directions. Then we use the [`numpy.meshgrid`](https://numpy.org/doc/stable/reference/generated/numpy.meshgrid.html) function to generate 2D arrays for the $x$ and $y$ coordinates of the entire 2D grid. By default, `meshgrid` uses Cartesian indexing so that inputs of length $N_x$ and $N_y$ give outputs of shape ($N_y$, $N_x$). Finally, we plot the grid points.import numpy as np # x and y length and number of grid points x_length = 2 Nx = 45 y_length = 1 Ny = 30 # Define the meshgrid x = np.linspace(0, x_length, Nx) dx = x[1] - x[0] y = np.linspace(0, y_length, Ny) dy = y[1] - y[0] X, Y = np.meshgrid(x, y) print("The X and Y arrays are of shape {}".format(np.shape(X))) # Plot the grid points import matplotlib.pyplot as plt plt.scatter(X, Y) plt.xlabel("x axis") plt.ylabel("y axis") plt.title("Plot of the grid points") plt.show()Next we allocate storage for the large array $A$ and the right hand side $b$. We will also define the boundary conditions.# Total size Ntot = Nx * Ny # Allocate space for the arrays A = np.zeros([Ntot, Ntot]) b = np.zeros(Ntot) # Define function for the position def get_position(i, j, Nx, Ny): return i * Ny + j # Define boundary conditions # Left boundary: x = 0 i = 0 for j in range(Ny): n = get_position(i, j, Nx, Ny) A[n, n] = 1 b[n] = 0 # Right boundary: x = x_length i = Nx-1 for j in range(Ny): n = get_position(i, j, Nx, Ny) A[n, n] = 1 b[n] = 0 # Bottom boundary: y = 0 j = 0 for i in range(1, Nx-1): n = get_position(i, j, Nx, Ny) A[n, n] = 1 b[n] = 0 # Top boundary: y = y_length j = Ny-1 for i in range(1, Nx-1): n = get_position(i, j, Nx, Ny) A[n, n] = 1 b[n] = 0Before we can solve the resulting equation system, we need to define the equations for the interior points.# Define the factors factor_x = 1/(dx**2) factor_y = 1/(dy**2) factor_cent = 2 * (factor_x + factor_y) # Loop over the interior points to define A and b for i in range(1,Nx-1): for j in range(1, Ny-1): n = get_position(i,j,Nx,Ny) A[n,n-Ny] = -factor_x A[n,n+Ny] = -factor_x A[n,n-1] = -factor_y A[n,n+1] = -factor_y A[n,n] = factor_cent b[n] = 1We can finally solve the system with the `numpy.linalg.solve` function.# Solve with Gaussian elimination phi = np.linalg.solve(A, b)Plot the resulting solution.# Contour plot of the solution import matplotlib.pyplot as plt Z = np.zeros(np.shape(X)) for i in range(Nx): for j in range(Ny): n = get_position(i, j, Nx, Ny) # We need so swap the x and y indices due to the default meshgrid ordering Z[j, i] = phi[n] h = plt.contourf(X, Y, Z) plt.colorbar(h) plt.xlabel("x axis") plt.ylabel("y axis") plt.show()* Problem 9 - Special Pythagorean tripletA Pythagorean triplet is a set of three natural numbers, a < b < c, for which, a² + b² = c²For example, 3² + 4² = 9 + 16 = 25 = 5².There exists exactly one Pythagorean triplet for which a + b + c = 1000.Find the product abc. Pythagorean numbers:* a b c* 3 4 5* 5 12 13* 7 24 25* 9 40 41* 11 60 61* 13 84 85* 15 112 113* 17 144 145* 19 180 181* 21 220 221* 23 264 265* 25 312 313* 27 364 365* 29 420 421* 31 480 481But, if I look for these numbers, I am seeing that is impossible to get a triplet which a + b + c = 1000. So, I must to leave this pattern. Dividing the problem in many parts, I imagine the first one is to check this sum(a + b + c)def check(a, b, c): if a + b + c == 1000: return True else: return False if __name__ == '__main__': assert check(1000, 0, 0) == True assert check(900, 50, 50) == True assert check(333, 333, 334) == True assert check(0,0,0) == False assert check(1000.0, 0.0, 0.0) == True assert check(-100, 80, 9) == FalseNow, I am using the Euclid's Formula to get a pythagorean triplet.* a = (m²) - (n²)* b = 2mn* c = (m²) + (n²)* m > n > 0for i in range(1, 1000): for j in range(1, 1000): if i > j: a = (i ** 2) - (j ** 2) b = 2 * i * j c = (i ** 2) + (j ** 2) if (a + b + c) == 1000: print(a,b,c) else: pass for i in range(1, 1000): for j in range(1, 1000): if i > j: a = (i ** 2) - (j ** 2) b = 2 * i * j c = (i ** 2) + (j ** 2) if (a + b + c) < 100: print(a,b,c) """a = (m²) - (n²) b = 2mn c = (m²) + (n²) m > n > 0""" for i in range(1, 100): for j in range(1,100): a = (i**2) - (j**2) b = 2*i*j c = (i**2) + (j**2) print(a,b,c) #to avoid the negative value, I must to define i > j for i in range(1, 100): for j in range(1,100): if i > j: a = (i**2) - (j**2) b = 2*i*j c = (i**2) + (j**2) print(a,b,c) #and now I am loocking for check the assertion: a + b + c = 1000 for i in range(1, 100): for j in range(1,100): if i > j: a = (i**2) - (j**2) b = 2*i*j c = (i**2) + (j**2) if (a + b + c) == 1000: print(a,b,c) #and print the product for i in range(1, 100): for j in range(1,100): if i > j: a = (i**2) - (j**2) b = 2*i*j c = (i**2) + (j**2) if (a + b + c) == 1000: print(a*b*c)31875000Practical coding assignment 1 Assignment consitsts of 2 Sections: 1. Part I,III, IV this is a simple introduction to numpy. However, it has an integrated theoretical exercise inside, don't miss it! 2. Fun with ~~flags~~ data generating models. This section also has a simple theoretical exercise inside. Section 1 THIS GUIDE IS ADAPTED FROM THE ASSIGNMENT 1 GUIDE OF RL-LEARNING COURSE BY HSE (Higher School Of Economics) Part I: Jupyter notebooks in a nutshell* You are reading this line in a jupyter notebook.* A notebook consists of cells. A cell can contain either code or hypertext. * This cell contains hypertext. The next cell contains code.* You can __run a cell__ with code by selecting it (click) and pressing `Ctrl + Enter` to execute the code and display output(if any).* If you're running this on a device with no keyboard, ~~you are doing it wrong~~ use topbar (esp. play/stop/restart buttons) to run code.* Behind the curtains, there's a python interpreter that runs that code and remembers anything you defined.Run these cells to get starteda = 5 print(a * 2)10* `Ctrl + S` to save changes (or use the button that looks like a floppy disk)* Top menu -> Kernel -> Interrupt (or Stop button) if you want it to stop running cell midway.* Top menu -> Kernel -> Restart (or cyclic arrow button) if interrupt doesn't fix the problem (you will lose all variables).* For shortcut junkies like us: Top menu -> Help -> Keyboard Shortcuts* More: [Hacker's guide](http://arogozhnikov.github.io/2016/09/10/jupyter-features.html), [Beginner's guide'](https://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/), [Datacamp tutorial](https://www.datacamp.com/community/tutorials/tutorial-jupyter-notebook)Now __the most important feature__ of jupyter notebooks for this course: * if you're typing something, press `Tab` to see automatic suggestions, use arrow keys + enter to pick one.* if you move your cursor inside some function and press `__Shift + Tab__`, you'll get a help window. `Shift + (Tab , Tab)` will expand it.# run this first import math # place your cursor at the end of the unfinished line below to find a function # that computes arctangent from two parameters (should have 2 in it's name) # once you chose it, press shift + tab + tab(again) to see the docs math.atan(a)Part III: Numpy and vectorized computingAlmost any statistical model requires some computational heavy lifting usually involving linear algebra problems. Unfortunately, raw python is terrible at this because each operation is interpreted at runtime. So instead, we'll use `numpy` - a library that lets you run blazing fast computation with vectors, matrices and other tensors. Again, the god object here is `numpy.ndarray`:import numpy as np a = np.array([1,2,3,4,5]) b = np.array([5,4,3,2,1]) print("a = ",a) print("b = ",b) # math and boolean operations can applied to each element of an array print("a + 1 =", a + 1) print("a * 2 =", a * 2) print("a == 2", a == 2) # ... or corresponding elements of two (or more) arrays print("a + b =",a + b) print("a * b =",a * b) # Your turn: compute half-products of a and b elements (halves of products) print(a * b / 2) #my question: or this means this: print(np.prod(a) / 2, np.prod(b) / 2) # compute elementwise quoient between squared a and (b plus 1) np.sqrt(a) / (b + 1)`````````````````````````````` How fast is it, harry?![img](https://img.buzzfeed.com/buzzfeed-static/static/2015-11/6/7/enhanced/webdr10/enhanced-buzz-22847-1446811476-0.jpg)Let's compare computation time for python and numpy* Two arrays of 10^6 elements * first - from 0 to 1 000 000 * second - from 99 to 1 000 099 * Computing: * elemwise sum * elemwise product * square root of first array * sum of all elements in the first array%%time # ^-- this "magic" measures and prints cell computation time # Option I: pure python arr_1 = range(1000000) arr_2 = range(99,1000099) a_sum = [] a_prod = [] sqrt_a1 = [] for i in range(len(arr_1)): a_sum.append(arr_1[i]+arr_2[i]) a_prod.append(arr_1[i]*arr_2[i]) a_sum.append(arr_1[i]**0.5) arr_1_sum = sum(arr_1) %%time # Option II: start from python, convert to numpy arr_1 = range(1000000) arr_2 = range(99,1000099) arr_1, arr_2 = np.array(arr_1) , np.array(arr_2) a_sum = arr_1 + arr_2 a_prod = arr_1 * arr_2 sqrt_a1 = arr_1 ** .5 arr_1_sum = arr_1.sum() %%time # Option III: pure numpy arr_1 = np.arange(1000000) arr_2 = np.arange(99,1000099) a_sum = arr_1 + arr_2 a_prod = arr_1 * arr_2 sqrt_a1 = arr_1 ** .5 arr_1_sum = arr_1.sum()Wall time: 42.9 msThere's also a bunch of pre-implemented operations including logarithms, trigonometry, vector/matrix products and aggregations.a = np.array([1,2,3,4,5]) b = np.array([5,4,3,2,1]) print("numpy.sum(a) = ", np.sum(a)) print("numpy.mean(a) = ", np.mean(a)) print("numpy.min(a) = ", np.min(a)) print("numpy.argmin(b) = ", np.argmin(b)) # index of minimal element print("numpy.dot(a,b) = ", np.dot(a, b)) # dot product. Also used for matrix/tensor multiplication print("numpy.unique(['male','male','female','female','male']) = ", np.unique(['male','male','female','female','male'])) # and tons of other stuff. see http://bit.ly/2u5q430 .numpy.sum(a) = 15 numpy.mean(a) = 3.0 numpy.min(a) = 1 numpy.argmin(b) = 4 numpy.dot(a,b) = 35 numpy.unique(['male','male','female','female','male']) = ['female' 'male']The important part: all this functionality works with Pandas dataframes! The final numpy feature we'll need is indexing: selecting elements from an array. Aside from python indexes and slices (e.g. a[1:4]), numpy also allows you to select several elements at once.a = np.array([0, 1, 4, 9, 16, 25]) ix = np.array([1,2,3]) print("a = ", a) print("Select by element index") print("a[[1,2,5]] = ", a[ix]) print("\nSelect by boolean mask") print("a[a > 5] = ", a[a > 5]) # select all elementts in a that are greater than 5 print("(a % 2 == 0) =",a % 2 == 0) # True for even, False for odd print("a[a > 3] =", a[a % 2 == 0]) # select all elements in a that are evena = [ 0 1 4 9 16 25] Select by element index a[[1,2,5]] = [1 4 9] Select by boolean mask a[a > 5] = [ 9 16 25] (a % 2 == 0) = [ True False True False True False] a[a > 3] = [ 0 4 16]Matrix operationsA = np.array([[1,2],[3,4]]) # just a square matrix print("just a square matrix A =") print(A) print("elementwise product of its elements",) B = A**2 # elementwise product of its elements print( B) print("again, elementwise product of its elements") # elementwise product of its elements B1 = A*A # elementwise product of its elements print( B1) print("standard matrix product" ) C = np.dot(A,A) print( C)just a square matrix A = [[1 2] [3 4]] elementwise product of its elements [[ 1 4] [ 9 16]] again, elementwise product of its elements [[ 1 4] [ 9 16]] standard matrix product [[ 7 10] [15 22]]EXERCISE 1, Your turn! (also see problem 7)Use numpy to answer a few questions# the following command generates a random square matrix X = np.random.rand(3,2)You need to * 1) check its shape ("X.shape" command) * 2) calculate matrix C = X'*X, where ' is for transpose. * 3) which shape does it have? * 4) find a list of eigenvalues (call it $\lambda_1$, $\lambda_2$) and eigenvectors (call it v1, v2) of C (Google youself, how to do that!, Hint: np.linalg..... )* 5) Show that a standard eigendecomposition C = V L V' holds, where L = diag_matrix( $\lambda_1$, $\lambda_2$) is a diagonal matrix (see np.diag command), and V is a change of basis matrix, seehttps://ru.wikipedia.org/wiki/%D0%A1%D0%BF%D0%B5%D0%BA%D1%82%D1%80%D0%B0%D0%BB%D1%8C%D0%BD%D0%BE%D0%B5_%D1%80%D0%B0%D0%B7%D0%BB%D0%BE%D0%B6%D0%B5%D0%BD%D0%B8%D0%B5_%D0%BC%D0%B0%D1%82%D1%80%D0%B8%D1%86%D1%8Bwhat sign (positive or negative do eigenvalues have?)* 6) Solve corresponding exercise from Problem_set3_HW_updated# You need to fill in the gaps #1. Shape shapeX = X.shape print("shape = ", shapeX) #2. Matrix C C = np.dot(X.transpose(),X) # 3. shapeC = C.shape print("shape = ", shapeC) #4. lmb, V = np.linalg.eig(C) #5. L = np.diag(lmb) C1 = np.dot(np.dot(V, L),V.transpose()) # check more elegant approach multi_dot([A, B, C, D]): C2 = np.linalg.multi_dot([V, L, V.T]) # check that everything worked correctly print("||C1-C||=", np.linalg.norm(C1-C)) # this should be close to zero, not 0 due to numerical errors. print("||C2-C||=", np.linalg.norm(C2-C)) #shape = (3, 2) shape = (2, 2) ||C1-C||= 6.280369834735101e-16 ||C2-C||= 6.280369834735101e-16Creating your own functions. Some functions are realized in libraries, like numpy and math, you've already used themx=2 print("using numpy sin(x)=", np.sin(x))# just sin(x) print("using math sin(x)=" , math.sin(x))# also sin(x) np.sin(x) - math.sin(x) # look they are equal (actually usually realizations by different libs may slightly differ)using numpy sin(x)= 0.9092974268256817 using math sin(x)= 0.9092974268256817Also you can create your own functions. For instance, here is a new function that calculates sin(2x)def sin2(x): y = np.sin(2*x) return y # now you can call it like this sin2(1) machine_zero = np.finfo(float).eps # write your own function, that calculates difference d = |np.sin(2*x) - 2*np.sin(x)*np.cos(x)| and # returns true if d is numerical zero , i.e. d < machine_zero x=1 # # Your code here # def func(x): return np.abs(sin2(x) - 2*np.sin(x)*np.cos(x)) < machine_zero func(1)Part IV: plots and matplotlibUsing python to visualize the data is covered by yet another library: `matplotlib`.Just like python itself, matplotlib has an awesome tendency of keeping simple things simple while still allowing you to write complicated stuff with convenience (e.g. super-detailed plots or custom animations).import matplotlib.pyplot as plt %matplotlib inline # ^-- this "magic" tells all future matplotlib plots to be drawn inside notebook and not in a separate window. # line plot plt.plot([0,1,2,3,4,5],[0,1,4,9,16,25]); #scatter-plot plt.scatter([0,1,2,3,4,5],[0,1,4,9,16,25]) plt.show() # show the first plot and begin drawing next one # draw a scatter plot with custom markers and colors plt.scatter([1,1,2,3,4,4.5],[3,2,2,5,15,24], c = ["red","blue","orange","green","cyan","gray"], marker = "x") # without .show(), several plots will be drawn on top of one another plt.plot([0,1,2,3,4,5],[0,1,4,9,16,25],c = "black") # adding more sugar plt.title("Conspiracy theory proven!!!") plt.xlabel("Per capita alcohol consumption") plt.ylabel("Number of Tests (Letuchka) in the Mathematical analysis course");Some more funny correlations: http://bit.ly/1FcNnWF, don't miss them :)# histogram - showing data empirical density plt.hist([0,1,1,1,2,2,3,3,3,3,3,4,4,5,5,5,6,7,7,8,9,10]) plt.show() # same with less bins plt.hist([0,1,1,1,2,2,3,3,3,3,3,4,4,5,5,5,6,7,7,8,9,10], bins = 5);Section 2 In this section we consider data generating models. From very simple generating of random variables to linear models, nonlinear models and beyond. Random variables The best way to "feel" random objects is to look at lots of samples. Let $N$ denote a sample size. 1. Bernoulli distribution (or a simple coin toss)one_toss = np.random.binomial(1, 1/3, [1]) print("result of a toss is", one_toss[0])result of a toss is 0Note it is random, once you rerun the cell above `Shift+Enter`, you may see a different value!N = 20 # let's have a look at the sample smpl = [ np.random.binomial(1, 1/3, [1]) for i in range(N)] # convert it to more comfortable np-array smpl = np.array(smpl) print("sample shape = ", smpl.shape) # oh, its a (20 x 1) matrix! print("sample is", smpl[:,0]) # array with 20 entries print("average number (or realized mean value) is ", np.mean(smpl[:,0])) # array with 20 entries # plt.hist(smpl, bins = 2) # plt.show() # thereis a more clever way to do this!!!! # just use smpl = np.random.binomial(1, 1/3, [N])sample shape = (20, 1) sample is [0 0 1 0 0 1 0 1 0 1 1 0 0 0 0 1 0 0 0 1] average number (or realized mean value) is 0.35Print "heads tails" in Google to have a look at animation. EXERCISE 2, Your turn!Generate 2. Poisson, 3. Standard Cauchy and 4. Standard Normal and 5. Normal distributed (mu = M, Std = D) samples, (Let M be your grade for Mathematical analysis course and D = sqrt(n), where n is the class size). Where do these distributions commonly arise?* A. Plot sample itself (as a set of points in $\mathbb{R}$)* B. Plot their histograms. * C. Extraexercise: plot their sample distribution function.Note, once again, both histogram and sample distribution functions are random. It is interesting, what happens, when you increase $N$. Task 2 and 5 are already done for you. Use them as a starting point.N = 1000 #sample size2. Poisson: commanly models processes with a fixed intensity "$\lambda$" like a number of incoming phone calls at the phone station per hour. It takes discrete values x=0,1,2.. with probs $P(x=k) = \frac{exp(-\lambda)\lambda^k}{k!}$ .from scipy.special import gamma # Generating sample lam = 2 smpl = np.random.poisson(lam = lam, size = N ) # For instance, it each number can model an incoming tcpip connections received by some distant server # A. plt.plot(smpl, np.zeros_like(smpl), 'x') # its just one-dimensional plot, note, more common values will look more shady. plt.show() # have a look at smpl: print('first 10 realized values ',smpl[0:10]) # B. plt.hist(smpl, bins = 10) # bins is a number of "boxes" (disjoint intervals of equal "width" covering set of observations) we put observations in. Height of columns = number of observations in a box. plt.show() #C. fig, ax = plt.subplots(figsize=(8, 4)) # plot the cumulative histogram n_bins = smpl.max() # technical parameter n, bins, patches = ax.hist(smpl, n_bins, density=True, histtype='step', cumulative=True, label='Empirical') # Add a line showing the theoretical distribution F. y = np.exp(-lam) * lam ** bins / np.vectorize(math.factorial)(bins) y = y.cumsum() y /= y[-1] ax.plot(bins, y, 'k--', linewidth=1.5, label='Theoretical') # tidy up the figure ax.grid(True) ax.legend(loc='right') ax.set_title('Cumulative step histograms') ax.set_xlabel('Number of incoming phone calls') ax.set_ylabel('Realized (empirical) probability of occurrence') plt.show() ############################### # D. instead of cumulative distribution one can depict density function. # It is especially inetersting to look at one picture with normalized histogram points = np.array(range(smpl.max() + 1)) points.sort() pdf = np.exp(-lam) * lam ** points / np.vectorize(math.factorial)(points) plt.hist(smpl, smpl.max(), density=True); plt.plot(points, pdf, color='r') plt.show() # 3. Standard Cauchy # TODO smpl = np.random.standard_cauchy(size=N) # For instance, it each number can model an incoming tcpip connections received by some distant server # A. plt.plot(smpl, np.zeros_like(smpl), 'x') # its just one-dimensional plot, note, more common values will look more shady. plt.show() # have a look at smpl: print('first 10 realized values ',smpl[0:10]) # B. plt.hist(smpl, bins = 10) # bins is a number of "boxes" (disjoint intervals of equal "width" covering set of observations) we put observations in. Height of columns = number of observations in a box. plt.show() #C. fig, ax = plt.subplots(figsize=(8, 4)) # plot the cumulative histogram n_bins = 50 # technical parameter n, bins, patches = ax.hist(smpl, n_bins, density=True, histtype='step', cumulative=True, label='Empirical') # Add a line showing the theoretical distribution F. y = 1 / (np.pi * (1 + np.square(bins))) y = y.cumsum() y /= y[-1] ax.plot(bins, y, 'k--', linewidth=1.5, label='Theoretical') # tidy up the figure ax.grid(True) ax.legend(loc='right') ax.set_title('Cumulative step histograms') ax.set_xlabel('Number of incoming phone calls') ax.set_ylabel('Realized (empirical) probability of occurrence') plt.show() ############################### # D. instead of cumulative distribution one can depict density function. # It is especially inetersting to look at one picture with normalized histogram points = np.linspace(-100,100,50) pdf = 1 / (np.pi * (1 + np.square(points))) plt.hist(smpl, 20, density=True); plt.plot(points, pdf, color='r') plt.xlim((-100, 100)) plt.show() # 4. Standard Normal distribution # TODO smpl = np.random.standard_normal(size=N) # For instance, it each number can model an incoming tcpip connections received by some distant server # A. plt.plot(smpl, np.zeros_like(smpl), 'x') # its just one-dimensional plot, note, more common values will look more shady. plt.show() # have a look at smpl: print('first 10 realized values ',smpl[0:10]) # B. plt.hist(smpl, bins = 10) # bins is a number of "boxes" (disjoint intervals of equal "width" covering set of observations) we put observations in. Height of columns = number of observations in a box. plt.show() #C. fig, ax = plt.subplots(figsize=(8, 4)) # plot the cumulative histogram n_bins = 50 # technical parameter n, bins, patches = ax.hist(smpl, n_bins, density=True, histtype='step', cumulative=True, label='Empirical') # Add a line showing the theoretical distribution F. y = ((1 / (np.sqrt(2 * np.pi))) * np.exp(-0.5 * np.square(bins))) y = y.cumsum() y /= y[-1] ax.plot(bins, y, 'k--', linewidth=1.5, label='Theoretical') # tidy up the figure ax.grid(True) ax.legend(loc='right') ax.set_title('Cumulative step histograms') ax.set_xlabel('Number of incoming phone calls') ax.set_ylabel('Realized (empirical) probability of occurrence') plt.show() ############################### # D. instead of cumulative distribution one can depict density function. # It is especially inetersting to look at one picture with normalized histogram points = np.linspace(-5,5,100) pdf = ((1 / (np.sqrt(2 * np.pi))) * np.exp(-0.5 * np.square(points))) plt.hist(smpl, 30, density=True); plt.plot(points, pdf, color='r') plt.show()4. Normal distribution: commonly arises when a result is obtained as a sum of a big number of different and independent factors. It has 2 parameters: mean value $\mu$ and standard deviation $\sigma>0$ (or variance $\sigma^2$). Common examples are like Annual rainfall (mm), interestingly, class-grades (before rounding) behave quite similarly. It has a density function $$ f(x) = \frac{1}{\sqrt{2 \pi} \sigma)}\exp\big(-\frac{(x - \mu)^2}{ 2 \sigma^2} \big).$$ And a cumulative density function $F(x) = \int\limits_{-\infty}^{x}f(t)dt.$mu = 9 sigma = 29 # Generate sample smpl = np.random.normal(mu, sigma, size=100) # A. plt.plot(smpl, np.zeros_like(smpl), 'x') # its just one-dimensional plot, note, more common values will look more shady. plt.show() # B. plt.hist(smpl, bins = 10) plt.show() # C. fig, ax = plt.subplots(figsize=(8, 4)) # plot the cumulative histogram n, bins, patches = ax.hist(smpl, n_bins, density=True, histtype='step', cumulative=True, label='Empirical') # Add a line showing the theoretical distribution F. y = ((1 / (np.sqrt(2 * np.pi) * sigma)) * np.exp(-0.5 * (1 / sigma * (bins - mu))**2)) y = y.cumsum() y /= y[-1] ax.plot(bins, y, 'k--', linewidth=1.5, label='Theoretical') # tidy up the figure ax.grid(True) ax.legend(loc='right') ax.set_title('Cumulative step histograms') ax.set_xlabel('Annual rainfall (mm)') ax.set_ylabel('Likelihood of occurrence') plt.show() ############################### # D. instead of cumulative distribution one can depict density function. # It is especially inetersting to look at one picture with normalized histogram points = np.linspace(-100,200,100) pdf = ((1 / (np.sqrt(2 * np.pi) * sigma)) * np.exp(-0.5 * (1 / sigma * (points - mu))**2)) plt.hist(smpl, 30, density=True); plt.plot(points, pdf, color='r') plt.show()EXERCISE 3*, standartization, ~~central~~ local limit theoremLet's return to Bernoulli model with $p = 1/3$.Generate n = 30000 different samples consisting of 50000 values each. For each sample calculate its realized (or empirical) mean-value $\bar{X}^{emp}$.You obtain n numbers. Let's consider it as a new __random__ sample n -element sample $\{\bar{X}_i\}_{i=1}^{n}$. Check theoretically that $\mathbb{E}\bar{X} = p$, $\mathbb{Var} \bar{X} = \frac{p(1-p)}{n}.$Do the following transform: $Y_i = \frac{\bar{X}_i - p}{\sqrt{p(1-p)/n}}, i = 1..n$,that subtract mean value and devide by std.deviation. Set ${Y_i} $ is a new random (however, you deal with realized values) sample. Compare it with standard normal diastribution. Plot density function and normalized histogram as above.p = 1/3 n = 30000 Y_0 = np.random.binomial(1, p, (n, 50000)).mean(axis=1) Y_new = (Y_0 - Y_0.mean()) / Y_0.std() Y_new_1 = (Y_0 - p) / np.sqrt(p * (1-p) / n) points = np.linspace(-5,5,100) pdf = ((1 / (np.sqrt(2 * np.pi))) * np.exp(-0.5 * np.square(points))) plt.figure(figsize=(10,3)) plt.subplot(1, 2, 1) plt.hist(Y_new, 30, density=True); plt.plot(points, pdf, color='r') plt.title('Actual Mean/Var') plt.subplot(1, 2, 2) plt.hist(Y_new_1, 30, density=True); plt.plot(points, pdf, color='r') plt.title('Theoretical Mean/Var') plt.show()EXERCISE 4, it's just a Poisson! , also see Problem 8 Random variable $\xi$ has a Poisson distribution with $\lambda$ parameter equal to 2. The new random variable $X$ is as follows: if $\xi=k$ then you throw a coin with probabilities of head/tails $(1/3,2/3)$ (1/3 is for 1, i.e. head) and sum up number of heads. That is the value of $X$. Here you need to generate a 100 element sample of $X$.In theoretical exercise, you'll need to show, that it is Poisson (2/3) distributed!#100 is a small number, I'll take N=100000 def generateOneX(a): global p return np.random.binomial(1, p, (a)).sum() p = 1/3 lam = 2 N = 1000000 smpl = np.random.poisson(lam=lam, size=N) sums = np.vectorize(generateOneX)(smpl) points = np.array(range(8)) pdf = np.exp(-lam * p) * (lam * p) ** points / np.vectorize(math.factorial)(points) plt.hist(sums, 7, density=True); plt.plot(points, pdf, color='r') plt.show() fig, ax = plt.subplots(figsize=(8, 4)) # plot the cumulative histogram n_bins = 7 # technical parameter n, bins, patches = ax.hist(sums, n_bins, density=True, histtype='step', cumulative=True, label='Empirical') # Add a line showing the theoretical distribution F. y = np.exp(-lam * p) * (lam * p) ** bins / np.vectorize(math.factorial)(bins) y = y.cumsum() y /= y[-1] ax.plot(bins, y, 'k--', linewidth=1.5, label='Theoretical') ax.grid(True) ax.legend(loc='right') ax.set_title('Cumulative step histograms') ax.set_xlabel('Number of X') ax.set_ylabel('Realized (empirical) probability of occurrence') plt.show()EXERCISE 5, conditional expectations, +see 10 points theoretical exercise!, see problem 9 in theoretical partA model of a die is given below. It is just a discrete uniformly distributed random variable $X$ with values 1..6.Google "roll a die" to see an animated model :)X = np.random.randint(1, 6, size = 1) print("result of rolling a die: ", X )result of rolling a die: [5]Consider 2 more random variables $Y$ and $Z$, that depend on $X$.1. $Y(X) = X^2$ -- it is just a squared outcome of a die roll.2. random varibale $Z=Z(X)$, that equals 1, if X is odd, and equal 0, otherwise. Calculate theoretically $\mathbb{E}(Y|Z) = ???$. This should be some function $m(Z)$ of $ Z$.In class we had a formula for the case when $m(Z) = \beta_2 Z+ \beta_1 = z'\beta$, where vector $z = (1, Z)$ and $(\beta_1, \beta_2)$ is a numeric vector.We showed that $\beta = \mathbb{E}(z'z)^{-1}\mathbb{E}(Yz)$ (look at the proof!)Show theoretically by matrix multiplication that in this case$\mathbb{E}(Y|Z) = \beta_1 + \beta_2 Z$ where$ \beta_1= \mathbb{E}y- \frac{\mathbb{cov}(Y,Z)}{\mathbb{var}{Z}}\mathbb{E}Z$$ \beta_2= \frac{\mathbb{cov}(Y,Z)}{\mathbb{var}{Z}}.$__Very important!__ $\mathbb{cov}$, $\mathbb{var}$ -- are theoretical covariance and theoretical variance; i.e. $\mathbb{cov}(Y,Z)$, $\mathbb{var}Z, \mathbb{E}Z$ are numbers, not random variables! Define function m(z) below to look at realization$E(Y|Z)$:def m(z): # calculate theoretically beta1 and beta2 beta1 = 56/3 beta2 = -7 return beta2 * z + beta1 # let's sample some more: smpl = np.random.randint(1, 6, size = 10) for x in smpl: z = 1 if x%2 == 1 else 0 # in fact it is a coin print("realized E(Y|Z) =" ,m(z), "; Z=" ,z)realized E(Y|Z) = 18.666666666666668 ; Z= 0 realized E(Y|Z) = 18.666666666666668 ; Z= 0 realized E(Y|Z) = 11.666666666666668 ; Z= 1 realized E(Y|Z) = 11.666666666666668 ; Z= 1 realized E(Y|Z) = 11.666666666666668 ; Z= 1 realized E(Y|Z) = 18.666666666666668 ; Z= 0 realized E(Y|Z) = 18.666666666666668 ; Z= 0 realized E(Y|Z) = 11.666666666666668 ; Z= 1 realized E(Y|Z) = 11.666666666666668 ; Z= 1 realized E(Y|Z) = 11.666666666666668 ; Z= 1Define matrix $Q= \mathbb{E}(z'z)^{-1}$Define vector column $v=\mathbb{E}(Yz)$Check your calculations!Q = np.matrix([[1, .5],[.5, .5]]) # fill 1-s with right values!!! v = np.array([[91/6],[35/6]]) # fill 1-s with right values!!! beta = Q.I @ v print("betas = " + ', '.join(map(str, list(np.array(beta).flatten()))))betas = 18.666666666666664, -7.0CatBoost Importing the libraries!pip install catboost import numpy as np import matplotlib.pyplot as plt import pandas as pdImporting the datasetdataset = pd.read_csv('Data.csv') X = dataset.iloc[:, :-1].values y = dataset.iloc[:, -1].values dataset.head()Splitting the dataset into the Training set and Test setfrom sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)Training CatBoost on the Training setfrom catboost import CatBoostClassifier classifier = CatBoostClassifier() classifier.fit(X_train, y_train)Learning rate set to 0.007956 0: learn: 0.6778283 total: 47.7ms remaining: 47.6s 1: learn: 0.6642874 total: 48.9ms remaining: 24.4s 2: learn: 0.6510578 total: 50ms remaining: 16.6s 3: learn: 0.6351685 total: 51.1ms remaining: 12.7s 4: learn: 0.6203906 total: 52.3ms remaining: 10.4s 5: learn: 0.6053561 total: 53.3ms remaining: 8.83s 6: learn: 0.5913363 total: 54.4ms remaining: 7.72s 7: learn: 0.5773888 total: 55.5ms remaining: 6.88s 8: learn: 0.5638394 total: 56.5ms remaining: 6.22s 9: learn: 0.5507421 total: 57.6ms remaining: 5.71s 10: learn: 0.5377201 total: 58.7ms remaining: 5.28s 11: learn: 0.5243873 total: 59.8ms remaining: 4.92s 12: learn: 0.5129034 total: 60.8ms remaining: 4.62s 13: learn: 0.5047204 total: 61.9ms remaining: 4.36s 14: learn: 0.4942404 total: 63ms remaining: 4.13s 15: learn: 0.4836253 total: 64ms remaining: 3.94s 16: learn: 0.4733355 total: 65.1ms remaining: 3.76s 17: learn: 0.4629416 total: 66.1ms remaining: 3.61s 18: learn: 0.4527778 total: 67.2ms remaining: 3.47[...]Making the Confusion Matrixfrom sklearn.metrics import confusion_matrix, accuracy_score y_pred = classifier.predict(X_test) cm = confusion_matrix(y_test, y_pred) print(cm) accuracy_score(y_test, y_pred)[[84 3] [ 0 50]]This turned out higher that the XGBoost model! Applying k-Fold Cross Validationfrom sklearn.model_selection import cross_val_score accuracies = cross_val_score(estimator = classifier, X = X_train, y = y_train, cv = 10) print("Accuracy: {:.2f} %".format(accuracies.mean()*100)) print("Standard Deviation: {:.2f} %".format(accuracies.std()*100))Streaming output truncated to the last 5000 lines. 6: learn: 0.6007221 total: 9.92ms remaining: 1.41s 7: learn: 0.5865261 total: 11.1ms remaining: 1.37s 8: learn: 0.5760173 total: 12.3ms remaining: 1.35s 9: learn: 0.5641784 total: 13.2ms remaining: 1.3s 10: learn: 0.5538549 total: 14.3ms remaining: 1.28s 11: learn: 0.5413434 total: 15.4ms remaining: 1.27s 12: learn: 0.5308262 total: 16.5ms remaining: 1.26s 13: learn: 0.5187893 total: 17.6ms remaining: 1.24s 14: learn: 0.5084890 total: 18.7ms remaining: 1.23s 15: learn: 0.4986254 total: 19.8ms remaining: 1.22s 16: learn: 0.4890714 total: 20.9ms remaining: 1.21s 17: learn: 0.4790883 total: 21.9ms remaining: 1.2s 18: learn: 0.4700108 total: 23.1ms remaining: 1.19s 19: learn: 0.4630325 total: 24.2ms remaining: 1.18s 20: learn: 0.4536134 total: 25.3ms remaining: 1.18s 21: learn: 0.4429695 total: 26.4ms remaining: 1.17s 22: learn: 0.4362340 total: 27.6ms remaining: 1.17s 23: learn: 0.4280061 total: 28.7ms remaining: 1.17s 24: l[...]The Std Dev is almost same as XGBoost, however the mean accuracy has also slightly increase just as the test accuracy. So, definitely a better model than XGB, in this case.Exercise 2 Part 1 - Lists Use a loop to make a list of the integers from 5 to 10int_list = [] for i in range(5,11): int_list.append(i) int_listUse a list comprehension to make a list of the integers from 20 to 30int_list_2 = [i for i in range(20,31)] int_list_2Print just the 3rd, 4th, and 5th integers in that arrayprint(int_list_2[2:5])[22, 23, 24]Part 2 - Dictionaries and functions Make a dictionary that contains the number of characters for each word in a listE.g. ['hello', 'world!'] -> {'hello': 5, 'world!': 6}word_list = ['Mercury', 'Venus', 'Earth', 'Mars', 'Jupiter', 'Saturn', 'Uranus', 'Neptune'] num_chars = {} for w in word_list: num_chars[w] = len(w) print(num_chars){'Mercury': 7, 'Mars': 4, 'Neptune': 7, 'Saturn': 6, 'Uranus': 6, 'Earth': 5, 'Jupiter': 7, 'Venus': 5}Add poor Pluto back to the listword_list.append('Pluto') word_listNow try to make up the lookup for the number of characters using a comprehension instead of a loopdict_comprehension = {w: len(w) for w in word_list} dict_comprehensionCount the total number of characters in the list using a looptotal = 0 for w in word_list: total += dict_comprehension[w] print(total)52Can you count the total number of characters using just `dict_comprehension` and no loops? _Hint_: Inspect the `dict_comprehension` object - does it contain any methods or properties that might help you?dir(dict_comprehension) sum(dict_comprehension.values())Part 3 - FactorsDesign a function which returns a list containing all of the [factors](https://en.wikipedia.org/wiki/Integer_factorization) of an integerE.g.: 10 -> [1, 2, 5, 10] 50 -> [1, 2, 5, 10, 25, 50]def factors(n): ''' Return a list containing the factors for integer n ''' fac = [] for i in range(1,n+1): if n % i == 0: fac.append(i) return fac factors(10)**Bonus**: Can you do it in one line?def one_line_factors(n): ''' Return a list containing the factors for integer n ''' return [i for i in range(1,n+1) if n % i == 0] one_line_factors(10)Part 4 - Fibonacci SequenceCreate a function which will return the first `n` numbers in the [Fibonacci sequence](https://en.wikipedia.org/wiki/Fibonacci_number) starting at 1 (1, 1, 2, 3, 5, 8, 13, ...)def fib(n): ''' Return the first n digits of the Fibonacci sequence ''' seq = [1,1] while len(seq) < n: seq.append(seq[-1] + seq[-2]) return seq fib(10) assert fib(10)[-1] == 55**Bonus**: Design a function that creates a simple "graph" of the Fibonacci sequencee.g.``````_Hint_: What happens if you multiply a `string`?def fib_graph(n): ''' Draw a graph of the first n digits of the Fibonacci sequence ''' seq = fib(n) for i in seq: print('#' * i) fib_graph(10) def alt_fib_graph(n): ''' Draw a graph of the first n digits of the Fibonacci sequence ''' seq = fib(n) return '\n'.join([''.join(['#' for i in range(x)]) for x in seq]) print(alt_fib_graph(10))# # ## ### ##### ######## ############# ##################### ################################## #######################################################__Super Bonus__: Can you create a Fibonacci [`class`](https://docs.python.org/3/tutorial/classes.html) which calculates the sequence of order `n`, stores it, and can print it as a graph?class Fibonacci(): ''' Creates the Fibonacci sequence of order n, both storing it and making graphs of it ''' def __init__(self, n, graph_it=False): ''' Initialize the sequence by storing n, and then calculating the sequence ''' self.n = n self.calculate() if graph_it: self.graph() def calculate(self): ''' Calculate the Fibonacci sequence of order n and store it in self.seq ''' seq = [1,1] while len(seq) < self.n: seq.append(seq[-1] + seq[-2]) self.seq = seq def graph(self): ''' Print a graph of the Fibonacci sequence ''' for i in self.seq: print('#' * i) fib11 = Fibonacci(11) fib11.seq fib11.graph() fib8 = Fibonacci(8, graph_it=True)# # ## ### ##### ######## ############# #####################Функцииfrom datetime import datetime def get_seconds(): """Return current seconds""" return datetime.now().second print( get_seconds() ) # Вызов функции print( get_seconds.__doc__ ) # Получение документационной строки из магического атрибута print( get_seconds.__name__ ) # Получение имени функции из магического атрибута48 Return current seconds get_secondsЧаще всего функция определяется с параметрами. Если эту функцию вызвать без параметров, то выпадет ошибка `TypeError`:def split_tags(tag_string): tag_list = [] for tag in tag_string.split(','): tag_list.append(tag.strip()) # Обрезаем пробелы и заносим элементы в новый список tag_list return tag_list print( split_tags('python, coursera, mooc') ) # Передаем строку['python', 'coursera', 'mooc']Аннотация типовВ функции можно указать тип аргументов, а также тип возвращаемого значения:def add(x: int, y: int) -> int: return x + yОшибки нет, код исполняется, потому что Python это динамический язык и аннотация типов призвана помочь программисту или его IDE отловить какие-то ошибки:print( add(10, 11) ) print( add('still ', 'works') )21 still worksПо ссылке или по значению?def extender(source_list, extend_list): return source_list.extend(extend_list) source_list = [1, 2, 3] extender(source_list, [4, 5, 6]) print(source_list)[1, 2, 3, 4, 5, 6]Неизменяемый объект:def replacer(source_tuple, replace_with): source_tuple = replace_with # Менять значения передаваемых в функцию переменных - дурной тон user_info = ('Guido', '31/01') replacer(user_info, ('Larry', '27/09')) # Объект не изменился, так как tuple() неизменяемый тип print(user_info)('Guido', '31/01')Именованные аргументыdef say(greeting, name): print('{} {}!'.format(greeting, name)) # Можно передать аргументы в функцию в другом порядке, указав имя и значение аргумента say('Hello', 'Kitty') # Hello Kitty! say(name='Kitty', greeting='Hello') # Hello Kitty!Hello Kitty! Hello Kitty!Область видимостиresult = 0 def increment(): # Переменные объявленные вне функции нельзя изменять внутри функции # Существуют модификаторы global или non local, но эти особенности не рекомендуется использовать result += 1 # Если вызвать функцию increment(), то она выпадет в ошибку: # UnboundLocalError: local variable 'result' referenced before assignment # # increment()Аргументы по умолчаниюdef greeting(name='Vasya'): print('Hello, {}'.format(name)) greeting() # Hello, Vasya greeting('Petya') # Hello, PetyaHello, Vasya Hello, PetyaОднако, надо быть осторожным, если в качестве аргумента мы передаем изменяемое значение, например, список:def append_one(iterable=[]): iterable.append(1) return iterable # Ожидаемый результат: print( append_one([5]) ) # [5, 1][5, 1]Однако, что же произойдет, если мы вызовем функцию несколько раз?print( append_one() ) # [1] print( append_one() ) # [1, 1] print( append_one() ) # [1, 1, 1][1] [1, 1] [1, 1, 1]Почему так происходит? Вначале, как мы ожидали вернется список из одной единицы, потому что функция взяла дефолтное значение (пустой список) и вставила значение 1. Если функция вызывается второй раз, то в списке возвращается две единички, хотя мы ожидаем одну. Чтобы понять, почему так, можно посмотреть у функции атрибут `__defaults__`:print( append_one.__defaults__ )([1, 1, 1],)Оказывается, там уже содержаться эти самые 3 единички. Почему так происходит? При определении/инициализации функции определяются связи между именем функции и дефолтными значениями, т.о. у каждой функции появляется `tuple()` с дефолтными значениями-переменными и именно для этой переменной каждый раз вызывается метод `append()` в теле функции.Таким образом, дефолтные значения не инициализируются каждый раз при вызове функции.Что же нужно делать в таком случае? Как избежать этого?В объявлении функции нужно указать дефолтное значение как `None`, и если значение аргумента не передано,то создать его с помощью условия внутри функции:def append_one(iterable=None): if iterable is None: iterable = [] # Список создается в локальной области видимости функции, а не в атрибуте __defaults__ iterable.append(1) return iterable # Возвращаем из функции ссылку на созданный объект print( append_one() ) # [1] print( append_one() ) # [1][1] [1]Звездочки в аргументах. Распаковка аргументовФункция принимает разное количество аргументов:def printer(*args): print(type(args)) # for arg in args: print(arg) printer(1, 2, 3, 4, 5) 1 2 3 4 5Точно так же, мы можем разворачивать список в аргументы:name_list = ['John', 'Bill', 'Amy'] printer(*name_list) # Список распаковывается в аргументы John Bill AmyТак же, это работает в случае со словарями:def printer(**kwargs): # Происходит упаковка переданных аргументов в словарь kwargs print(type(kwargs)) # for k, v in kwargs.items(): print('{}: {}'.format(k, v)) printer(a=10, b=11) a: 10 b: 11Возможно разворачивать словари в обратную сторону:payload = { 'user_id': 117, 'feedback': { 'subject': 'Registration fields', 'message': 'There is no country for old man' } } printer(**payload) # Распаковывается в последовательность аргументов user_id и feedback user_id: 117 feedback: {'subject': 'Registration fields', 'message': 'There is no country for old man'}Visualization 1: Matplotlib Basics Exercises%matplotlib inline import matplotlib.pyplot as plt import numpy as np from __future__ import print_function from IPython.html.widgets import interact, interactive, fixed from IPython.html import widgets:0: FutureWarning: IPython widgets are experimental and may change in the future.Scatter plots Learn how to use Matplotlib's `plt.scatter` function to make a 2d scatter plot.* Generate random data using `np.random.randn`.* Style the markers (color, size, shape, alpha) appropriately.* Include an x and y label and title.randx = np.random.randn(500) randy = np.random.randn(500) plt.scatter(randx, randy, color = "g", marker = "x") plt.xlabel("Random X") plt.ylabel("Random Y") plt.title("Random Data!!!!!") plt.box(False) plt.grid(True)Histogram Learn how to use Matplotlib's `plt.hist` function to make a 1d histogram.* Generate randpom data using `np.random.randn`.* Figure out how to set the number of histogram bins and other style options.* Include an x and y label and title.data = np.random.randn(500000) def plothist(bins, numdata): plt.hist(np.random.randn(numdata), bins=bins, color = "k", ec = "w") interact(plothist, bins=widgets.IntSlider(min=1,max=100,step=1,value=10), numdata=\ widgets.IntSlider(min=10,max=10000,step=10,value=10)); plt.xlabel("Random Variable X") plt.ylabel("Counts") plt.title("Distribution of a random variable in abjustable bins.")Table of Contents1  Load libraries2  Load data3  Train model4  Evaluation Load librariesimport numpy as np import matplotlib.pyplot as plt import evaluation import _pickle as cPickleLoad datadata_dir = "./data/" + "dataset/" MFCC_LEN_LIM = 700 N_MFCC = 13 FOLDS = 10 def load_data(folds): X = [] y = [] for fold in folds: with open(data_dir + f"data_mfcc_{fold}.pkl", 'rb') as f: data_mfcc = cPickle.load(f) for i in range(len(data_mfcc['sentence_I'])): if data_mfcc['sentence_I'][i].shape[0] < MFCC_LEN_LIM: sentence_I = np.zeros((MFCC_LEN_LIM, N_MFCC), dtype=np.float32) sentence_I[-data_mfcc['sentence_I'][i].shape[0]:] = data_mfcc['sentence_I'][i] else: sentence_I = data_mfcc['sentence_I'][i] if data_mfcc['sentence_II'][i].shape[0] < MFCC_LEN_LIM: sentence_II = np.zeros((MFCC_LEN_LIM, N_MFCC), dtype=np.float32) sentence_II[-data_mfcc['sentence_II'][i].shape[0]:] = data_mfcc['sentence_II'][i] else: sentence_II = data_mfcc['sentence_II'][i] X.append([sentence_I, sentence_II]) y.append(data_mfcc['same_speaker'][i]) return np.array(X), np.array(y) X_train, y_train = load_data([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) X_train.shape np.save(data_dir + "data_mfcc_x.npy", X_train) np.save(data_dir + "data_mfcc_y.npy", y_train) X_train = np.load(data_dir + "data_mfcc_x.npy") y_train = np.load(data_dir + "data_mfcc_y.npy") X_val, y_val = load_data(["val"]) X_val.shapeTrain modelfrom modeling.gru_gru import GRU_GRU model = GRU_GRU(num_features=N_MFCC) model.fit(X_train, y_train, epochs=100, early_stopping_rounds=7, X_val=X_val, y_val=y_val) model.save_weights() model.load_weights() model.fit(X_train, y_train, epochs=100, early_stopping_rounds=100, X_val=X_val, y_val=y_val) model.save_weights()Evaluationmodel.load_weights() X_test, y_test = load_data(["test"]) y_pred = model.predict(X_test) evaluation.plot_confusion_matrix(y_test, y_pred) evaluation.print_classification_report(y_test, y_pred) evaluation.get_f1_score(y_test, y_pred, average='macro')Introduction MotivationThis notebook follows up `model_options.ipynb`.The key difference is that we filter using the category distance metric (see `bin/wp-get-links` for details), rather than relying solely on the regression to pick relevant articles. Thus, we want to decide what an appropriate category distance threshold is.Our hope is that by adding this filter, we can now finalize the regression algorithm selection and configuration. Summary(You may want to read this section last, as it refers to the full analysis below.) Q1. Which algorithm?In `model_options`, we boiled down the selection to three choices:1. Lasso, normalized, positive, auto 𝛼 (LA_NPA)2. Elastic net, normalized, positive, auto 𝛼, auto 𝜌 (EN_NPAA)3. Elastic net, normalized, positive, auto 𝛼, manual 𝜌 = ½ (EN_NPAM)The results below suggest three conclusions:1. LA_NPA vs. EN_NPAA: * EN_NPAA has (probably insignificantly) better RMSE. * Forecasts look almost identical. * EN_NPAA chooses a more reasonable-feeling number of articles. * EN_NPAA is more principled (lasso vs. elastic net).2. EN_NPAM vs. EN_NPAA: * EN_NPAA has better RMSE. * Forecasts look almost identical, except EN_NPAM has some spikes in the 2014–2015 season, which probably accounts for the RMSE difference. * EN_NPAA chooses fewer articles, though EN_NPAM does not feel excessive. * EN_NPAA is more principles (manual 𝜌 vs. auto). On balance, **EN_NPAA seems the best choice**, based on principles and article quantity rather than results, which are nearly the same across the board. Q2. What distance threshold?Observations for EN_NPAA at distance threshold 1, 2, 3:* d = 2 is where RMSE reaches its minimum, and it stays more or less the same all the way through d = 8.* d = 2 and 3 have nearly identical-looking predictions.* d = 2 and 3 have very similar articles and coefficient ranking. Of the 10 and 13 articles respectively, 9 are shared and in almost the same order.These suggests that the actual models for d = 2..8 are very similar. Further:* d = 2 or 3 does not have the spikes in 3rd season that d = 1 does. This suggests that the larger number of articles gives a more robust model.* d = 2 or 3 matches the overall shape of the outbreak better than d = 1, though the latter gets the peak intensity more correct in the 1st season.Finally, examining the article counts in `COUNTS` and `COUNTS_CUM`, d = 2 would give very small input sets in some of the sparser cases, while d = 3 seems safer (e.g., "es+Infecciones por clamidias" and "he+שעלת"). On the other hand, Arabic seems to have a shallower category structure, and d = 3 would capture most articles.On balance, **d = 3 seems the better choice**. It performs as well as d = 2, without catching irrelevant articles, and d = 2 seems too few articles in several cases. The Arabic situation is a bit of an unknown, as none of us speak Arabic, but erring on the side of too many articles seems less risky than clearly too few. Q3. What value of 𝜌?In both this notebook and `model_options`, every auto-selected 𝜌 has been 0.9, i.e., mostly lasso. Thus, we will **fix 𝜌 = 0.9** for performance reasons. ConclusionWe select **EN_NPAM with 𝜌 = 0.9**. Preamble Imports%matplotlib inline import collections import gzip import pickle import os import urllib.parse import numpy as np import matplotlib as plt import pandas as pd import sklearn as sk import sklearn.linear_model DATA_PATH = os.environ['WEIRD_AL_YANKOVIC'] plt.rcParams['figure.figsize'] = (12, 4)Load, preprocess, and clean data Load and preprocess the truth spreadsheet.truth = pd.read_excel(DATA_PATH + '/truth.xlsx', index_col=0) TRUTH_FLU = truth.loc[:,'us+influenza'] # pull Series TRUTH_FLU.index = TRUTH_FLU.index.to_period('W-SAT') TRUTH_FLU.head()Load the Wikipedia link data. We convert percent-encoded URLs to Unicode strings for convenience of display.def unquote(url): (lang, url) = url.split('+', 1) url = urllib.parse.unquote(url) url = url.replace('_', ' ') return (lang + '+' + url) raw_graph = pickle.load(gzip.open(DATA_PATH + '/articles/wiki-graph.pkl.gz')) GRAPH = dict() for root in raw_graph.keys(): unroot = unquote(root) GRAPH[unroot] = { unquote(a): d for (a, d) in raw_graph[root].items() }Load all the time series. Most of the 4,299 identified articles were in the data set.Note that in contrast to `model_options`, we do not remove any time series by the fraction that they are zero. The results seem good anyway. This filter also may not apply well to the main experiment, because the training periods are often not long enough to make it meaningful.TS_ALL = pd.read_csv(DATA_PATH + '/tsv/forecasting_W-SAT.norm.tsv', sep='\t', index_col=0, parse_dates=True) TS_ALL.index = TS_ALL.index.to_period('W-SAT') TS_ALL.rename(columns=lambda x: unquote(x[:-5]), inplace=True) len(TS_ALL.columns) (TS_ALL, TRUTH_FLU) = TS_ALL.align(TRUTH_FLU, axis=0, join='inner') TRUTH_FLU.plot() TS_ALL.iloc[:,:5].plot()Summarize distance from root Number of articles by distance from each root.COUNTS = pd.DataFrame(columns=range(1,9), index=sorted(GRAPH.keys())) COUNTS.fillna(0, inplace=True) for (root, leaves) in GRAPH.items(): for (leaf, dist) in leaves.items(): COUNTS[dist][root] += 1 COUNTSNumber of articles of at most the given distance.COUNTS_CUM = COUNTS.cumsum(axis=1) COUNTS_CUMParameter sweep Return the set of articles with maximum category distance from a given root.def articles_dist(root, dist): return { a for (a, d) in GRAPH[root].items() if d <= dist }Return time series for articles with a maximum category distance from the given root.def select_by_distance(root, d): keep_cols = articles_dist(root, d) return TS_ALL.filter(items=keep_cols, axis=1) select_by_distance('en+Influenza', 1).head()Fit function. The core is the same as the `model_options` one, with non-constant training series set and a richer summary.def fit(root, train_week_ct, d, alg, plot=True): ts_all = select_by_distance(root, d) ts_train = ts_all.iloc[:train_week_ct,:] truth_train = TRUTH_FLU.iloc[:train_week_ct] m = alg.fit(ts_train, truth_train) m.input_ct = len(ts_all.columns) pred = m.predict(ts_all) pred_s = pd.Series(pred, index=TRUTH_FLU.index) m.r = TRUTH_FLU.corr(pred_s) m.rmse = ((TRUTH_FLU - pred_s)**2).mean() m.nonzero = np.count_nonzero(m.coef_) if (not hasattr(m, 'l1_ratio_')): m.l1_ratio_ = -1 # this is just a line to show how long the training period is train_period = TRUTH_FLU.iloc[:train_week_ct].copy(True) train_period[:] = 0 if (plot): pd.DataFrame({'truth': TRUTH_FLU, 'prediction': pred, 'training pd': train_period}).plot(ylim=(-1,9)) sumry = pd.DataFrame({'coefs': m.coef_, 'coefs_abs': np.abs(m.coef_)}, index=ts_all.columns) sumry.sort_values(by='coefs_abs', ascending=False, inplace=True) sumry = sumry.loc[:, 'coefs'] for a in ('intercept_', 'alpha_', 'l1_ratio_', 'nonzero', 'rmse', 'r', 'input_ct'): try: sumry = pd.Series([getattr(m, a)], index=[a]).append(sumry) except AttributeError: pass return (m, pred, sumry)Which 𝛼 and 𝜌 to explore? Same as `model_options`.ALPHAS = np.logspace(-15, 2, 25) RHOS = np.linspace(0.1, 0.9, 9)Try all distance filters and summarize the result in a table.def fit_summary(root, label, train_week_ct, alg, **kwargs): result = pd.DataFrame(columns=[[label] * 4, ['input_ct', 'rmse', 'rho', 'nonzero']], index=range(1, 9)) preds = dict() for d in range(1, 9): (m, preds[d], sumry) = fit(root, train_week_ct, d, alg(**kwargs), plot=False) result.loc[d,:] = (m.input_ct, m.rmse, m.l1_ratio_, m.nonzero) return (result, preds)Lasso, normalized, positive, auto 𝛼la_npa = fit_summary('en+Influenza', 'la_npa', 104, sk.linear_model.LassoCV, normalize=True, positive=True, alphas=ALPHAS, max_iter=1e5, selection='random', n_jobs=-1) la_npa[0] (m, _, s) = fit('en+Influenza', 104, 1, sk.linear_model.LassoCV(normalize=True, positive=True, alphas=ALPHAS, max_iter=1e5, selection='random', n_jobs=-1)) s.head(27) (m, _, s) = fit('en+Influenza', 104, 2, sk.linear_model.LassoCV(normalize=True, positive=True, alphas=ALPHAS, max_iter=1e5, selection='random', n_jobs=-1)) s.head(27) (m, _, s) = fit('en+Influenza', 104, 3, sk.linear_model.LassoCV(normalize=True, positive=True, alphas=ALPHAS, max_iter=1e5, selection='random', n_jobs=-1)) s.head(27)Elastic net, normalized, positive, auto 𝛼, auto 𝜌en_npaa = fit_summary('en+Influenza', 'en_npaa', 104, sk.linear_model.ElasticNetCV, normalize=True, positive=True, alphas=ALPHAS, l1_ratio=RHOS, max_iter=1e5, selection='random', n_jobs=-1) en_npaa[0] (m, _, s) = fit('en+Influenza', 104, 1, sk.linear_model.ElasticNetCV(normalize=True, positive=True, alphas=ALPHAS, l1_ratio=RHOS, max_iter=1e5, selection='random', n_jobs=-1)) s.head(27) (m, _, s) = fit('en+Influenza', 104, 2, sk.linear_model.ElasticNetCV(normalize=True, positive=True, alphas=ALPHAS, l1_ratio=RHOS, max_iter=1e5, selection='random', n_jobs=-1)) s.head(27) (m, _, s) = fit('en+Influenza', 104, 3, sk.linear_model.ElasticNetCV(normalize=True, positive=True, alphas=ALPHAS, l1_ratio=RHOS, max_iter=1e5, selection='random', n_jobs=-1)) s.head(27)Elastic net, normalized, positive, auto 𝛼, manual 𝜌 = ½en_npam = fit_summary('en+Influenza', 'en_npam', 104, sk.linear_model.ElasticNetCV, normalize=True, positive=True, alphas=ALPHAS, l1_ratio=0.5, max_iter=1e5, selection='random', n_jobs=-1) en_npam[0] (m, _, s) = fit('en+Influenza', 104, 1, sk.linear_model.ElasticNetCV(normalize=True, positive=True, alphas=ALPHAS, l1_ratio=0.5, max_iter=1e5, selection='random', n_jobs=-1)) s.head(27) (m, _, s) = fit('en+Influenza', 104, 2, sk.linear_model.ElasticNetCV(normalize=True, positive=True, alphas=ALPHAS, l1_ratio=0.5, max_iter=1e5, selection='random', n_jobs=-1)) s.head(27) (m, _, s) = fit('en+Influenza', 104, 3, sk.linear_model.ElasticNetCV(normalize=True, positive=True, alphas=ALPHAS, l1_ratio=0.5, max_iter=1e5, selection='random', n_jobs=-1)) s.head(27)Summary All the result tables next to one another.pd.concat([la_npa[0], en_npaa[0], en_npam[0]], axis=1)Plot the predictions by distance filter next to one another.def plot(data, ds): for d in ds: D = collections.OrderedDict([('truth', TRUTH_FLU)]) D[d] = data[1][d] pd.DataFrame(D).plot(figsize=(12,3)) plot(la_npa, range(1, 4)) plot(en_npaa, range(1, 4)) plot(en_npam, range(1, 4))MissOh DataLoader# authored by haeyong.kang # date : 2020/06/21AnotherMissOh Visual Structure- json_data['file_name'] : 'AnotherMissOh01.mp4'- json_data['visual_results']- json_data['visual_results'][0].keys() : dict_keys(['start_time', 'end_time', 'vid', 'image_info'])- {'start_time': '00:02:51;16', 'end_time': '00:02:54;15', 'vid': 'AnotherMissOh01_001_0078', 'image_info': ...}- json_data['visual_results'][0]['image_info']- [{'frame_id': 'AnotherMissOh01_001_0078_IMAGE_0000004295', 'place': 'none', 'persons': [{'person_id': 'Haeyoung1', 'person_info': {'face_rect': {'min_x': 515, 'min_y': 0, 'max_x': 845, 'max_y': 443}, 'full_rect': {'min_x': 278, 'min_y': 2, 'max_x': 1025, 'max_y': 769}, 'behavior': 'stand up', 'predicate': 'none', 'emotion': 'Neutral', 'face_rect_score': '0.5', 'full_rect_score': '0.9'}, 'related_objects': []}], 'objects': []}, - {'frame_id': 'AnotherMissOh01_001_0078_IMAGE_0000004311', 'place': '', 'persons': [{'person_id':'Haeyoung1','person_info': {'face_rect': {'min_x': 515, 'min_y': 0, 'max_x': 831, 'max_y': 411}, 'full_rect': {'min_x': 270, 'min_y': 0, 'max_x': 1025, 'max_y': 768}, 'behavior': 'stand up', 'predicate': 'none', 'emotion': 'Neutral', 'face_rect_score': '0.5', 'full_rect_score': '0.9'}, 'related_objects': []}],'objects': []},]import sys sys.path.append("../") # go to parent dir import os from torch.utils.data import Dataset, DataLoader import cv2 import pickle import numpy as np import glob from torchvision.transforms import Compose, Resize, ToTensor, Normalize from PIL import Image import json import argparse import matplotlib.pyplot as plt from Yolo_v2_pytorch.src.utils import * from graphviz import Digraph, Graph import networkx as nx from networkx.drawing.nx_pydot import read_dot #from networkx.drawing.nx_agraph import read_dot from networkx.readwrite import json_graph read_dot def is_not_blank(s): return bool(s and s.strip()) MissOh_CLASSES = ['person'] print(MissOh_CLASSES[0]) global colors colors = pickle.load(open("../Yolo_v2_pytorch/src/pallete", "rb")) print(colors[0]) def get_args(): parser = argparse.ArgumentParser( "You Only Look Once:Unified, Real-Time Object Detection") parser.add_argument("--image_size", type=int, default=448, help="The common width and height for all images") parser.add_argument("--batch_size", type=int, default=1, help="The number of images per batch") # Training base Setting parser.add_argument("--momentum", type=float, default=0.9) parser.add_argument("--decay", type=float, default=0.0005) parser.add_argument("--dropout", type=float, default=0.5) parser.add_argument("--num_epoches", type=int, default=100) parser.add_argument("--test_interval", type=int, default=1, help="Number of epoches between testing phases") parser.add_argument("--object_scale", type=float, default=1.0) parser.add_argument("--noobject_scale", type=float, default=0.5) parser.add_argument("--class_scale", type=float, default=1.0) parser.add_argument("--coord_scale", type=float, default=5.0) parser.add_argument("--reduction", type=int, default=32) parser.add_argument("--es_min_delta", type=float, default=0.0, help="Early stopping's parameter:minimum change loss to qualify as an improvement") parser.add_argument("--es_patience", type=int, default=0, help="Early stopping's parameter:number of epochs with no improvement after which training will be stopped. Set to 0 to disable this technique.") parser.add_argument("--pre_trained_model_type", type=str, choices=["model", "params"], default="model") parser.add_argument("--pre_trained_model_path", type=str, default="Yolo_v2_pytorch/trained_models/only_params_trained_yolo_voc") # Pre-training path parser.add_argument("--saved_path", type=str, default="./checkpoint") # saved training path parser.add_argument("--conf_threshold", type=float, default=0.35) parser.add_argument("--nms_threshold", type=float, default=0.5) args = parser.parse_args(args=[]) # for jupyter return args opt = get_args() print(opt) # visualize the images and labels height, width = (768, 1024) width_ratio = 448 / width height_ratio = 448 / height class AnotherMissOh(Dataset): def __init__(self, dataset, img_path, json_path, display_log=True): self.display_log = display_log self.init_clips(img_path) self.load_json(dataset,img_path, json_path) def init_clips(self, img_path): self.cnt_clips = 0 self.img_path = img_path self.img_size = (1024, 768) self.img_scaled_size = (448, 448) tform = [ Resize(self.img_scaled_size), # should match to Yolo_V2 ToTensor(), #Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # should match to Yolo_V2 ] self.transformations = Compose(tform) ''' clips = { 'episode' : [], 'clip' : [], 'start_time' : [], 'end_time' : [], 'vid' : [], 'img_size' : [], 'img_scaled_size' : [], 'image_info' : []} image_info = { 'frame_id': [], 'place' : [], 'persons' : []} persons = { 'person_id': [], 'face_rect' : [], 'full_rect' : [], 'behavior' : [], 'predicate' : [], 'emotion' : [], 'face_rect_score' : [], 'full_rect_score' : []} ''' def load_json(self, dataset, img_path, json_path): self.clips = [] for episode in dataset: img_dir = img_path + 'AnotherMissOh{:02}/'.format(episode) json_dir = json_path + 'AnotherMissOh{:02}_ver3.2.json'.format(episode) if self.display_log: print('imag_dir:{}'.format(img_dir)) print('json_dir:{}'.format(json_dir)) with open(json_dir, encoding='utf-8') as json_file: json_data = json.load(json_file) for i in range(len(json_data['visual_results'])): clip = {} clip['episode'] = [] clip['clip'] = [] clip['start_time'] = [] clip['end_time'] = [] clip['vid'] = [] clip['image_info'] = [] if self.display_log: print("***{}th episode***{}th clips***************************************".format(episode, i)) print("['visual_results'][{}]['start_time']:{}".format(i,json_data['visual_results'][i]['start_time'])) print("['visual_results'][{}]['end_time']:{}".format(i,json_data['visual_results'][i]['end_time'])) print("['visual_results'][{}]['vid']:{}".format(i,json_data['visual_results'][i]['vid'].replace('_', '/'))) print("['visual_results'][{}]['img_size']:{}".format(i,img_size)) print("['visual_results'][{}]['img_scaled_size']:{}".format(i,img_scaled_size)) print("['visual_results'][{}]['episode']:{}".format(i,episode)) clip['episode'].append(episode) clip['clip'].append(i) clip['start_time'].append(json_data['visual_results'][i]['start_time']) clip['end_time'].append(json_data['visual_results'][i]['end_time']) clip['vid'].append(json_data['visual_results'][i]['vid'].replace('_', '/')) for j, info in enumerate(json_data['visual_results'][i]['image_info']): image_info = {} image_info['frame_id'] = [] image_info['place'] = [] image_info['objects'] = {} image_info['persons'] = {} if self.display_log: print("=============={}th frame========================================".format(j)) img_file = img_dir + json_data['visual_results'][i]['vid'].replace('_', '/')[-8:] + '/'+ info['frame_id'][-16:] + '.jpg' image_info['frame_id'].append(img_file) image_info['place'].append(info['place']) image_info['objects']['object_id']=[] image_info['objects']['object_rect']=[] for k, obj in enumerate(info['objects']): image_info['objects']['object_id'].append(obj['object_id']) object_bbox = obj['object_rect'] if (object_bbox['min_y'] == "" or object_bbox['max_y'] == "" or object_bbox['min_x'] == "" or object_bbox['max_x'] == ""): object_rect = [] continue else: object_rect = [object_bbox['min_x'], object_bbox['min_y'], object_bbox['max_x'], object_bbox['max_y']] image_info['objects']['object_rect'].append(object_rect) image_info['persons']['person_id']=[] image_info['persons']['face_rect']=[] image_info['persons']['full_rect']=[] image_info['persons']['behavior']=[] image_info['persons']['predicate']=[] image_info['persons']['emotion']=[] image_info['persons']['face_rect_score']=[] image_info['persons']['full_rect_score']=[] image_info['persons']['related_object_id']=[] image_info['persons']['related_object_rect']=[] for k, person in enumerate(info['persons']): if self.display_log: print("--------------------{}th person-----------------------------".format(k)) image_info['persons']['person_id'].append(person['person_id']) #import pdb; pdb.set_trace() for j, robj in enumerate(person['related_objects']): image_info['persons']['related_object_id'].append(robj['related_object_id']) robj_bbox = robj['related_object_rect'] if (robj_bbox['min_y'] == "" or robj_bbox['max_y'] == "" or robj_bbox['min_x'] == "" or robj_bbox['max_x'] == ""): related_object_rect = [] continue else: related_object_rect = [robj_bbox['min_x'], robj_bbox['min_y'], robj_bbox['max_x'], robj_bbox['max_y']] image_info['persons']['related_object_rect'].append(related_object_rect) face_bbox = person['person_info']['face_rect'] if (face_bbox['min_y'] == "" or face_bbox['max_y'] == "" or face_bbox['min_x'] == "" or face_bbox['max_x'] == ""): face_rect = [] continue else: face_rect = [face_bbox['min_x'], face_bbox['min_y'], face_bbox['max_x'], face_bbox['max_y']] image_info['persons']['face_rect'].append(face_rect) full_bbox = person['person_info']['full_rect'] if (full_bbox['min_y'] == "" or full_bbox['max_y'] == "" or full_bbox['min_x'] == "" or full_bbox['max_x'] == ""): full_rect = [] continue else: full_rect = [full_bbox['min_x'], full_bbox['min_y'], full_bbox['max_x'], full_bbox['max_y']] image_info['persons']['full_rect'].append(full_rect) image_info['persons']['behavior'].append(person['person_info']['behavior']) image_info['persons']['predicate'].append(person['person_info']['predicate']) image_info['persons']['emotion'].append(person['person_info']['emotion']) image_info['persons']['face_rect_score'].append(person['person_info']['face_rect_score']) image_info['persons']['full_rect_score'].append(person['person_info']['full_rect_score']) clip['image_info'].append(image_info) self.clips.append(clip) def __len__(self): return len(self.clips) def __getitem__(self, item): info = self.clips[item]['image_info'] episode = self.clips[item]['episode'] clip = self.clips[item]['clip'] start_time = self.clips[item]['start_time'] end_time = self.clips[item]['start_time'] images = [] for it, frame in enumerate(info): img = Image.open(frame['frame_id'][0]).convert('RGB') img = self.transformations(img) images.append(img) return images, info, episode, clip, start_time, end_time img_path = '../data/AnotherMissOh/AnotherMissOh_images/' json_path = '../data/AnotherMissOh/AnotherMissOh_Visual_ver3.2/' episode = 1 train = [episode] train_set = AnotherMissOh(train, img_path, json_path, False) def graph(episode, scene, frm,st,et,info, save_file, debug = False): import string strseq = string.ascii_uppercase # define graph dot = Digraph('G',filename='{}.gv'.format(save_file),engine='fdp') dot.attr('graph', rotate = '0', dpi='600',rankdir='TB', size='10,8') dot.attr('node', height='0.1', fontsize='6') dot.attr('edge', fontsize='6') place = "{}".format(info['place'][0]) sound = "{}".format('sound') if not is_not_blank(place): place = 'none' if not is_not_blank(sound): sound = 'none' num_of_persons = len(info['persons']['person_id']) num_of_objects = len(info['objects']['object_id']) frm_graph = 'episode_{}_scene_{}_frame_{}'.format( episode, scene, frm) #dot.node(frm_graph, style='filled', color='lightgrey') episode_node = "episode_{:02d}".format(episode) scene_node = "scene_{:03d}".format(scene) frame_node = "frame_{:04d}".format(frm) dot.node(episode_node, style='filled', color='lightgrey') dot.node(scene_node, style='filled', color='lightgrey') dot.node(frame_node, style='filled', color='lightgrey') dot.node(place, style='filled', color='lightblue') dot.node(sound, style='filled', color='lightblue') if is_not_blank(episode_node) and is_not_blank(scene_node): dot.edge(episode_node, scene_node) if is_not_blank(scene_node) and is_not_blank(frame_node): dot.edge(scene_node, frame_node) if is_not_blank(frame_node) and is_not_blank(place): dot.edge(frame_node, place) if is_not_blank(frame_node) and is_not_blank(sound): dot.edge(frame_node, sound) for p in range(num_of_objects): try: object_id = info['objects']['object_id'][p] except: object_id = 'none' #continue if is_not_blank(object_id) and object_id is not 'person': dot.node(object_id, style='filled', color='gold') if is_not_blank(frame_node) and is_not_blank(object_id): dot.edge(frame_node, object_id) for p in range(num_of_persons): try: person_id = "{}".format(info['persons']['person_id'][p]) except: person_id = 'none' #continue try: behavior = "{}".format(info['persons']['behavior'][p]) except: person_id = 'none' #continue try: predicate = "{}".format(info['persons']['predicate'][p]) except: person_id = 'none' #continue try: emotion = "{}".format(info['persons']['emotion'][p]) except: person_id = 'none' #continue try: robj_id = "{}".format(info['persons']['related_object_id'][p]) except: robj_id = '' #continue if is_not_blank(person_id): dot.node(person_id) if is_not_blank(behavior): dot.node(behavior, style='filled', color='green') #if is_not_blank(predicate): # dot.node(predicate, style='filled', color='yellow') if is_not_blank(emotion): dot.node(emotion, style='filled', color='blue') if is_not_blank(frame_node) and is_not_blank(person_id): dot.edge(frame_node, person_id) if is_not_blank(person_id) and is_not_blank(behavior): dot.edge(person_id, behavior) if is_not_blank(person_id) and is_not_blank(predicate) and is_not_blank(robj_id): dot.edge(person_id, robj_id, label=predicate, color='red') #dot.edge(predicate, robj_id) if is_not_blank(person_id) and is_not_blank(emotion): dot.edge(person_id, emotion) # show in image dot.format = 'png' dot.render('{}.gv'.format(save_file), view=True) graph = cv2.imread('{}.gv.png'.format(save_file)) graph = cv2.resize(graph, dsize=(0, 0), fx=600.0/graph.shape[0], fy=600.0/graph.shape[0]) if debug: plt.figure(figsize=(8,8)) plt.imshow(graph) plt.show() save_dir = '../results/drama_graph/' if not os.path.exists(save_dir): os.makedirs(save_dir) print(len(train_set)) debug = False for scene in range(len(train_set)): #if scene < 1033: # continue images, info, episode, scene, st, et = train_set[scene] scene= scene[0] episode= episode[0] for frm in range(len(images)): image = images[frm].cpu().numpy() #print(image) imageInfo = cv2.cvtColor(np.transpose(image * 255,(1,2,0)), cv2.COLOR_RGB2BGR) frm_name = "episode_{:02d}_scene_{:03d}_frame_{:04d}".format(episode, scene, frm) save_file = save_dir + frm_name print("episode:{}, scene:{}, frame:{} st:{}, et:{}".format( episode, scene, frm, st, et)) place = info[frm]['place'][0] sound = 'sound' num_of_persons = len(info[frm]['persons']['person_id']) num_of_objects = len(info[frm]['objects']['object_id']) graph(episode, scene, frm, st, et ,info[frm], save_file, debug) # read dot graph #dot_graph = nx.nx_pydot.read_dot('{}.gv'.format(save_file)) #s_graph = json.dumps(json_graph.node_link_data(dot_graph)) for p in range(num_of_objects): try: object_id = info[frm]['objects']['object_id'][p] except: object_id = 'none' continue try: object_rect= info[frm]['objects']['object_rect'][p] except: object_rect = 'none' continue print("object_id:{}".format(object_id)) print("object_rect:{}".format(object_rect)) xmin_ = int(max(object_rect[0] * width_ratio, 0)) ymin_ = int(max(object_rect[1] * height_ratio, 0)) xmax_ = int(min((object_rect[2]) * width_ratio, 448)) ymax_ = int(min((object_rect[3]) * height_ratio, 448)) cv2.rectangle(imageInfo, (xmin_, ymin_), (xmax_, ymax_), (0.0, 128.0, 255.0), 2) cv2.putText(imageInfo, object_id, (xmin_, ymin_), cv2.FONT_HERSHEY_PLAIN, 1.5, (255.0, 255.0, 255.0), 1) for p in range(num_of_persons): try: person_id = info[frm]['persons']['person_id'][p] except: person_id = 'none' #continue try: face_rect= info[frm]['persons']['face_rect'][p] except: face_rect = 'none' #continue try: full_rect = info[frm]['persons']['full_rect'][p] except: full_rect = 'none' #continue try: behavior = info[frm]['persons']['behavior'][p] except: behavior = 'none' #continue try: predicate = info[frm]['persons']['predicate'][p] except: predicate = 'none' #continue try: emotion = info[frm]['persons']['emotion'][p] except: emotion = 'none' #continue print("place:{}".format(place)) print("num_of_persons:{}".format(num_of_persons)) print("person_id:{}".format(person_id)) print("face_rect:{}".format(face_rect)) print("full_rect:{}".format(full_rect)) print("behavior:{}".format(behavior)) print("predicate:{}".format(predicate)) print("emotion:{}".format(emotion)) # face rect xmin = int(max(face_rect[0] * width_ratio, 0)) ymin = int(max(face_rect[1] * height_ratio, 0)) xmax = int(min((face_rect[2]) * width_ratio, 448)) ymax = int(min((face_rect[3]) * height_ratio, 448)) cv2.rectangle(imageInfo, (xmin, ymin), (xmax, ymax), colors[0], 2) # full rect xmin = int(max(full_rect[0] * width_ratio, 0)) ymin = int(max(full_rect[1] * height_ratio, 0)) xmax = int(min((full_rect[2]) * width_ratio, 448)) ymax = int(min((full_rect[3]) * height_ratio, 448)) cv2.rectangle(imageInfo, (xmin, ymin), (xmax, ymax), colors[2], 2) cv2.putText(imageInfo, person_id, (xmin, ymin), cv2.FONT_HERSHEY_PLAIN, 1.5, (255.0, 255.0, 255.0), 1) cv2.putText(imageInfo, emotion, (xmin, ymax+20), cv2.FONT_HERSHEY_PLAIN, 1, (0.0, 0.0, 255.0), 1) cv2.putText(imageInfo, behavior, (xmin, ymax+30), cv2.FONT_HERSHEY_PLAIN, 1, (0.0, 255.0, 0.0), 1) cv2.putText(imageInfo, place, (10, 20), cv2.FONT_HERSHEY_PLAIN, 1.5, (255.0, 255.0, 255.0), 1) cv2.putText(imageInfo, sound, (10, 40), cv2.FONT_HERSHEY_PLAIN, 1.5, (255.0, 255.0, 255.0), 1) imageInfo = cv2.resize(imageInfo,dsize=(1024, 768)) cv2.imwrite("{}_frame.png".format(save_file), imageInfo) imageInfo = cv2.cvtColor(imageInfo, cv2.COLOR_BGR2RGB) if debug: plt.figure(figsize=(8,8)) plt.imshow(np.uint8(imageInfo)) plt.show() {"directed": true, "multigraph": true, "graph": {"name": "G", "node": {"fontsize": "7", "height": "0.1"}, "edge": {"fontsize": "7"}}, "nodes": [{"color": "lightgrey", "style": "filled", "id": "episode_1"}, {"color": "lightgrey", "style": "filled", "id": "scene_10"}, {"color": "lightgrey", "style": "filled", "id": "frame_0"}, {"color": "lightblue", "style": "filled", "id": "kitchen"}, {"color": "lightblue", "style": "filled", "id": "sound"}, {"id": "Jeongsuk"}, {"color": "green", "style": "filled", "id": "cook"}, {"color": "blue", "style": "filled", "id": "Happiness"}, {"id": "Deogi"}], "links": [{"source": "Jeongsuk", "target": "cook", "key": 0}, {"source": "Jeongsuk", "target": "Happiness", "key": 0}, {"source": "Deogi", "target": "cook", "key": 0}, {"source": "Deogi", "target": "Happiness", "key": 0}] }The plot provides an overview as to where we can focus our resources, One can invest more in uttar pradesh as the population clearly is an advantageplt.figure(figsize=(12,15)) sns.barplot(df['SCHTOT'], df['STATNAME'],alpha=0.8) plt.xticks(rotation='vertical') plt.xlabel('Number of Schools', fontsize=15) plt.ylabel('States in India', fontsize=15) plt.title("Number of schools in states of India", fontsize=16) plt.show()We see a relation i.e population increases the number of schools also increasesplt.figure(figsize=(25,17)) for i in range(1,len(data)): plt.subplot(4,9,i) plt.title(df['STATNAME'][i]) top = ['Gov','pri'] uttar = data.loc[df['STATNAME'] == df['STATNAME'][i],:] value =[float(uttar['SCHTOTG']/uttar['SCHTOT'])*100,float(uttar['SCHTOTPR']/uttar['SCHTOT'])*100] plt.pie(value, labels=top, autopct='%1.1f%%',startangle=140) plt.axis('equal') plt.show()Ratio of government vs private schools with respect to every state in India1) The number of governments schools is high in almost all states of India (except kerala)2) 60% of the schools in kerala is private schools, which is unlike any other state ( it is an anamoly)¶3) Incidentally kerala also has the highest literacy rate among the Indian states4) In Bihar, which has one of the lowest literacy rates only 3% of the schools are private Does this mean that a states literacy depends on the number of private educations institutions, We can safely assume that there is a relation (as little as it may seem)plt.figure(figsize=(10,12)) sns.barplot(data['OVERALL_LI'], data['STATNAME'],alpha=0.8) plt.xticks(rotation='vertical') plt.xlabel("Literacy rate", fontsize=16) plt.title('Literacy rate with respect to state') plt.show()We can see almost all in India states have more than 50% literacy ratedata['good'] = data['ROADTOT'] +data['SPLAYTOT'] + data['SWATTOT'] +data['SELETOT'] data['goodpercent'] = data['good']/data['SCHTOT'] plt.figure(figsize=(10,12)) sns.barplot(data['goodpercent'], data['STATNAME'],alpha=1) plt.xticks(rotation='vertical') plt.xlabel("Literacy rate", fontsize=16) plt.title('Literacy rate with respect to state') plt.show()Honey Bee Pollen Classification AbstractIn this notebook,we have analysed the CNN model to classfy the pollen and non-pollen honey bee images from the dataset. For this dataset, we have tried various combination to access the accuracy like changing the activatin function for the convolution layer, changing the cost function, change in number of epoch value, trying different gradient estimation, changing network architecture like reducing the layer, try different kernal sizes and tried different network initialization. We have compared the accuracy for all the type of variation in the CNN model. DatasetHoney Bee Pollen dataset has been created from videos captured at the entrance of a bee colony in June 2017 at the Bee facility of the Gurabo Agricultural Experimental Station of the University of Puerto Rico.Dataset contains images for pollen bearing and no pollen bearing honey bees. Images will be read by code and convert into numpy array of size 64*64 .import glob, os, cv2 from skimage import io, transform import pandas as pd import numpy as np import matplotlib.pyplot as plt os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Mount google drive from google.colab import drive drive.mount('/content/drive') # listing the directory items = os.listdir('/content/drive/My Drive/images') images = [] for each_image in items: if each_image.endswith(".jpg"): full_path = "/content/drive/My Drive/images/" + each_image images.append(full_path) # Create image list print(images) """ First, Function will read all image of honey bee. Second, All images will be resize to 300*180 size. Third, All image will be flatten and store into list variable. Labels will be assign as 0 for Non Pollen Bee and 1 for Pollen Bee. Funtion will return array of flatten image and array of labels """ def dataset(file_list,size=(64,64),flattened=False): data = [] for i, file in enumerate(file_list): image = io.imread(file) image = transform.resize(image, size, mode='constant') if flattened: image = image.flatten() data.append(image) labels = [1 if f.split("/")[-1][0] == 'P' else 0 for f in file_list] return np.array(data), np.array(labels) # Call dataset funtion to create feature and target label X,y=dataset(images) # X has the following structure: X[imageid, y,x,channel] print('X: ',X.shape) # data print('y: ',y.shape) # target print('Class 0: ',sum(y==0)) print('Class 1: ',sum(y==1)) print('Total : ',len(y)) print(y) %matplotlib inline # Bellow images is of Pollen and NonPollen Honey Bee fig, axes = plt.subplots(1,2) k=0 plt.sca(axes[0]) plt.imshow(X[k]) plt.title('img {} - class {}'.format(k, y[k])) k=400 plt.sca(axes[1]) plt.imshow(X[k]) plt.title('img {} - class {}'.format(k, y[k])); import tensorflow as tf from sklearn.model_selection import train_test_split X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.20, random_state=123) print(X_train.shape,y_train.shape)(571, 64, 64, 3) (571,)Part A CNN ModelBuild a CNN model function which have three convolution and pooling layer with three fully connected layer(one dense, one dropout and one output layer).def create_model(features, labels, mode, params): # Reshape the input image to 64*64 entry_data = tf.reshape(features['X'], [-1, 64, 64, 3]) ### Condition to get the parameter values # Default flags actn_fun = tf.nn.relu error_cng = False optm_flag = False layer_flag = False kernal_init = None ker_size = 5 # For Part B if params.get('A') == 1: actn_fun = params.get('B')[0] elif params.get('A') == 2: error_cng = True elif params.get('A') == 3: optm_flag = True elif params.get('A') == 4: layer_flag = True elif params.get('A') == 5: ker_size = params.get('F')[0] elif params.get('A') == 6: kernal_init = params.get('G')[0] else: actn_fun = tf.nn.relu error_cng = False optm_flag = False layer_flag = False kernal_init = None ker_size = 5 #### First layer # First Convolution Layer with 32 filter having kernal size of 5 and activation function convolu1 = tf.layers.conv2d(inputs = entry_data, filters = 32, kernel_size=ker_size, activation = actn_fun,padding = 'same',kernel_initializer=kernal_init) # First Pooling layer of max_pooling with pool_size of 2 and strides of 2 pixel pooling1 = tf.layers.max_pooling2d(inputs = convolu1, pool_size = [2,2], strides = 2) #### Second Layer # Second Convolution Layer with 64 filter having kernal size of 5 and activation function convolu2 = tf.layers.conv2d(inputs = pooling1, filters = 64, kernel_size = ker_size, activation = actn_fun,padding = 'same',kernel_initializer=kernal_init) # Second Pooling layer of max_pooling with pool_size of 2 and strides of 2 pixel pooling2 = tf.layers.max_pooling2d(inputs = convolu2, pool_size = [2,2], strides = 2) #### Third Layer # Third Convolution Layer with 64 filter having kernal size of 5 and activation function convolu3 = tf.layers.conv2d(inputs = pooling2, filters = 128, kernel_size = ker_size, activation = actn_fun,padding = 'same',kernel_initializer=kernal_init) # Third Pooling layer of max_pooling with pool_size of 2 and strides of 2 pixel pooling3 = tf.layers.max_pooling2d(inputs = convolu3, pool_size = [2,2], strides = 2) #### Fully connected Layer # Flat the output of last pooling layer if layer_flag: flattening = tf.reshape(pooling2, [-1, 16 * 16 * 64]) else: flattening = tf.reshape(pooling3, [-1, 8 * 8 * 128]) # First dense layer with 3000 units dense = tf.layers.dense(inputs = flattening, units = 3000, activation = tf.nn.relu) # dropout layer with dropout rate 30% dropout = tf.layers.dropout(inputs = dense, rate = 0.3, training=mode == tf.estimator.ModeKeys.TRAIN) #### Output Layer with 2 units since we are dealing with binary classification exit_l = tf.layers.dense(inputs = dropout, units = 2) # Predicted lables by using argmax function #predicted = tf.argmax(exit_l, axis = 1); predicted = { "classes" : tf.argmax(exit_l, axis = 1), } # For inference mode if mode == tf.estimator.ModeKeys.PREDICT: return tf.estimator.EstimatorSpec(mode = mode, predictions = predicted) # Error rate for Train and Eval model mode if error_cng: temp_labels = tf.reshape(labels, [-1, 1]) error = tf.losses.hinge_loss(labels = temp_labels, logits = exit_l[:,1:]) else: error = tf.losses.sparse_softmax_cross_entropy(labels = labels, logits = exit_l) accuracy = tf.metrics.accuracy(labels=labels, predictions=predicted["classes"]) logging_hook = tf.train.LoggingTensorHook({"loss" : error, "accuracy" : accuracy[1]}, every_n_iter=10) # For Train mode, Use optimizer to optimize the weights if mode == tf.estimator.ModeKeys.TRAIN: if optm_flag: optimizer = tf.train.AdagradOptimizer(learning_rate = 0.001) else: optimizer = tf.train.AdamOptimizer(learning_rate = 0.001) training = optimizer.minimize(error, global_step = tf.train.get_global_step()) steps = tf.train.get_global_step() return tf.estimator.EstimatorSpec(mode = mode, loss = error, train_op = training,training_hooks = [logging_hook]) # For Eval Model, measure the accuracy for the test data if mode == tf.estimator.ModeKeys.EVAL: accuracy = tf.metrics.accuracy(labels = labels, predictions = predicted['classes']) eval_metrics_ops = {'accuracy': tf.metrics.accuracy(labels = labels, predictions = predicted['classes'])} logging_hook = tf.train.LoggingTensorHook({"loss" : error, "accuracy" : accuracy[1]}, every_n_iter=10) return tf.estimator.EstimatorSpec(mode = mode, loss = error, eval_metric_ops = eval_metrics_ops,evaluation_hooks=[logging_hook]) def model_train_test(classifier,epoch_num): # Train CNN model for train images with batch size of 16 with steps = 100 train_function = tf.estimator.inputs.numpy_input_fn(x = {'X': X_train}, y = y_train,batch_size = 16, num_epochs = None, shuffle = True) classifier.train(input_fn=train_function, steps = 100) # Test CNN model with test dataset with num_epochs from the function test_function = tf.estimator.inputs.numpy_input_fn(x = {'X': X_test}, y = y_test, num_epochs = epoch_num,shuffle = False) pred_value = classifier.evaluate(input_fn=test_function) # Return accuracy return pred_value def bar_plot(list,x_pos,y_label,title): y_pos = np.arange(len(list)) plt.bar(y_pos,x_pos,align='center',alpha=0.5,color='g') plt.xticks(y_pos, list) plt.ylabel(y_label) plt.title(title) plt.show()Part B Different Activation functionBelow funtion will train CNN with three activation function like (1) Rectified linear unit (ReLU)(2) Leaky rectified linear unit (Leaky ReLU) (3) TanH. Also CNN will be trained for test dataset and measure the accuracy for each CNN with activation function. Relu Activation Function:tf.logging.set_verbosity(tf.logging.INFO) Acc_list = [] # Train and test CNN model with Relu Activation function classifier = tf.estimator.Estimator(model_fn = create_model,params={'A': 1 ,'B':[tf.nn.relu]}) Accuracy_Relu = model_train_test(classifier,100) #print(Accuracy_Relu) Acc_list.append(Accuracy_Relu.get('accuracy')*100)INFO:tensorflow:Using default config. WARNING:tensorflow:Using temporary folder as model directory: /tmp/tmp7jcxo5hi INFO:tensorflow:Using config: {'_model_dir': '/tmp/tmp7jcxo5hi', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_save_checkpoints_secs': 600, '_session_config': allow_soft_placement: true graph_options { rewrite_options { meta_optimizer_iterations: ONE } } , '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_service': None, '_cluster_spec': , '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1} INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling[...]LeakyRelu Activation Function# Train and test CNN model with LeakyRelu Activation function classifier = tf.estimator.Estimator(model_fn = create_model,params={'A': 1 ,'B':[tf.nn.leaky_relu]}) Accuracy_LeakyRelu = model_train_test(classifier,100) #print(Accuracy_LeakyRelu) Acc_list.append(Accuracy_LeakyRelu.get('accuracy')*100)INFO:tensorflow:Using default config. WARNING:tensorflow:Using temporary folder as model directory: /tmp/tmpxt9bd3jx INFO:tensorflow:Using config: {'_model_dir': '/tmp/tmpxt9bd3jx', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_save_checkpoints_secs': 600, '_session_config': allow_soft_placement: true graph_options { rewrite_options { meta_optimizer_iterations: ONE } } , '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_service': None, '_cluster_spec': , '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1} INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling[...]Tanh Activation Function# Train and test CNN model with Tanh Activation function classifier = tf.estimator.Estimator(model_fn = create_model,params={'A': 1 ,'B':[tf.nn.tanh]}) Accuracy_Tanh = model_train_test(classifier,100) #print(Accuracy_Tanh) Acc_list.append(Accuracy_Tanh.get('accuracy')*100) accuracy_df = pd.DataFrame({'Activation_Function':['ReLU','LeakyReLU','Tanh'],'Accuracy':Acc_list}) accuracy_df.head()From the above table, By using LeakyReLu activation function, we get best accuracy compare to LeakyReLU and Tanh activation function.bar_plot(['ReLU','LeakyRelu','Tanh'],Acc_list,"Model Accuracy","Comparision of Different Activation function in CNN")Part C Changes in Cost function Hindge Cost function:Acc_list = [] # Train and test CNN model with hindge cost function classifier = tf.estimator.Estimator(model_fn = create_model,params={'A': 2}) Accuracy_Hindge = model_train_test(classifier,100) #print(Accuracy_Hindge) Acc_list.append(Accuracy_Hindge.get('accuracy')*100) #print(Accuracy_Relu) Acc_list.append(Accuracy_Relu.get('accuracy')*100) accuracy_df = pd.DataFrame({'Cost_Function':['Hindge Loss','Cross-Entroy'],'Accuracy':Acc_list}) accuracy_df.head()From the above table, Hindge Loss function give more accuracy than cross-entropy cost fucnction.bar_plot(['Hindge Loss','Cross-Entroy'],Acc_list,"Model Accuracy","Comparision of Different Cost function in CNN")Part D Variable Epochs Values To check effect of epoch value, I have tried 100,50,10 and 1 epoch value to get the accuracy which is mention below.epoch_list = [50,10,1] Acc_list = [] Accuracy_epoch_list = [] #below loop try all epoch value and train and test the model to get the accuracy for numb in epoch_list: classifier = tf.estimator.Estimator(model_fn = create_model,params={}) Accuracy_epoch_list.append(model_train_test(classifier,numb)) #print(Accuracy_epoch_list) Acc_list = [i.get('accuracy')*100 for i in Accuracy_epoch_list] #print(Accuracy_Relu) Acc_list.append(Accuracy_Relu.get('accuracy')*100) accuracy_df = pd.DataFrame({'Epoch Values':['50','10','1','100'],'Accuracy':Acc_list}) accuracy_df.head()From the above code, CNN with 50 and 10 epoch values gives highest accuracy compacre to other values like 100, and 1.bar_plot(['Epoch_50','Epoch_10','Epoch_1','Epoch_100'],Acc_list,"Model Accuracy","Comparision of Different Epoch Value in CNN")Part E Changes in Gradient estimation ADA Gradient Estimation:I have compared ADA gradient and ADAM gradient to check the accuracy of the CNN network.Acc_list = [] # Train and test CNN model with ADA Gradient estimation classifier = tf.estimator.Estimator(model_fn = create_model,params={'A': 3}) Accuracy_AdaGrad = model_train_test(classifier,100) #print(Accuracy_AdaGrad) Acc_list.append(Accuracy_AdaGrad.get('accuracy')*100) #print(Accuracy_Relu) Acc_list.append(Accuracy_Relu.get('accuracy')*100) accuracy_df = pd.DataFrame({'Gradient Estimation':['Adagrad','ADAM'],'Accuracy':Acc_list}) accuracy_df.head()From the table, ADAM gradient gives more accuracy than Ada gradient estimation.bar_plot(['Adagrad','ADAM'],Acc_list,"Model Accuracy","Comparision of Different Gradient Estimation in CNN")Part F Changes in Network Architecture In below code, I have tried to edit the layer in the CNN model and also change the kernal size to check the effect in the accuracy after changing the network architecture. CNN with Two LayerAcc_list = [] # Train and test CNN model with two layer classifier = tf.estimator.Estimator(model_fn = create_model,params={'A': 4}) Accuracy_TwoLayerCNN = model_train_test(classifier,100) #print(Accuracy_TwoLayerCNN) Acc_list.append(Accuracy_TwoLayerCNN.get('accuracy')*100) #print(Accuracy_Relu) Acc_list.append(Accuracy_Relu.get('accuracy')*100)INFO:tensorflow:Using default config. WARNING:tensorflow:Using temporary folder as model directory: /tmp/tmpee1yq5da INFO:tensorflow:Using config: {'_model_dir': '/tmp/tmpee1yq5da', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_save_checkpoints_secs': 600, '_session_config': allow_soft_placement: true graph_options { rewrite_options { meta_optimizer_iterations: ONE } } , '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_service': None, '_cluster_spec': , '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1} INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling[...]CNN with kernal size = 5# Train and test CNN model with kernal size=5 classifier = tf.estimator.Estimator(model_fn = create_model,params={'A': 5, 'F':[2]}) Accuracy_KernalSize_Two = model_train_test(classifier,100) #print(Accuracy_KernalSize_Two) Acc_list.append(Accuracy_KernalSize_Two.get('accuracy')*100)INFO:tensorflow:Using default config. WARNING:tensorflow:Using temporary folder as model directory: /tmp/tmphi2jgeb0 INFO:tensorflow:Using config: {'_model_dir': '/tmp/tmphi2jgeb0', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_save_checkpoints_secs': 600, '_session_config': allow_soft_placement: true graph_options { rewrite_options { meta_optimizer_iterations: ONE } } , '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_service': None, '_cluster_spec': , '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1} INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling[...]CNN with Kernal size= 3# Train and test CNN model with kernal size=3 classifier = tf.estimator.Estimator(model_fn = create_model,params={'A': 5, 'F':[3]}) Accuracy_KernalSize_Three = model_train_test(classifier,100) #print(Accuracy_KernalSize_Three) Acc_list.append(Accuracy_KernalSize_Three.get('accuracy')*100) accuracy_df = pd.DataFrame({'Network Architecture Type':['CNN with 2 hidden Layer','CNN with 3 hidden Layer', 'CNN with kernal size 2','CNN with kernal size 3'],'Accuracy':Acc_list}) accuracy_df.head()From the table, CNN with kernal size 2 gave best accuracy compared to other network architecture change.bar_plot(['CNN(2HL)','CNN(3HL)', 'CNN(kernal=2)','CNN(kernal=3)'],Acc_list,"Model Accuracy","Comparision of Different Network Architecture in CNN")Part G Network initialization Xavier Glorot Initializar (Uniform)Acc_list = [] # Train and test CNN model with Xavier glorot(Uniform) initializar in convolution layer classifier = tf.estimator.Estimator(model_fn = create_model,params={'A': 6, 'G':[tf.contrib.layers.xavier_initializer(uniform=True)] }) Accuracy_Xav_Init_Uni = model_train_test(classifier,100) #print(Accuracy_Xav_Init_Uni) Acc_list.append(Accuracy_Xav_Init_Uni.get('accuracy')*100)WARNING: The TensorFlow contrib module will not be included in TensorFlow 2.0. For more information, please see: * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md * https://github.com/tensorflow/addons If you depend on functionality not listed there, please file an issue. INFO:tensorflow:Using default config. WARNING:tensorflow:Using temporary folder as model directory: /tmp/tmpiay0cdy4 INFO:tensorflow:Using config: {'_model_dir': '/tmp/tmpiay0cdy4', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_save_checkpoints_secs': 600, '_session_config': allow_soft_placement: true graph_options { rewrite_options { meta_optimizer_iterations: ONE } } , '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_service': None, '_cluster_spec': Xavier Glorot Initializer (Gaussian)# Train and test CNN model with Xavier glorot(Gaussian) initializar in convolution layer classifier = tf.estimator.Estimator(model_fn = create_model,params={'A': 6, 'G':[tf.contrib.layers.xavier_initializer(uniform=False)] }) Accuracy_Xav_Init_Gau = model_train_test(classifier,100) #print(Accuracy_Xav_Init_Gau) Acc_list.append(Accuracy_Xav_Init_Gau.get('accuracy')*100)INFO:tensorflow:Using default config. WARNING:tensorflow:Using temporary folder as model directory: /tmp/tmpko5z0r4c INFO:tensorflow:Using config: {'_model_dir': '/tmp/tmpko5z0r4c', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_save_checkpoints_secs': 600, '_session_config': allow_soft_placement: true graph_options { rewrite_options { meta_optimizer_iterations: ONE } } , '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_service': None, '_cluster_spec': , '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1} INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling[...]Random Gaussain# Train and test CNN model with random gaussain initializar in convolution layer classifier = tf.estimator.Estimator(model_fn = create_model,params={'A': 6, 'G':[tf.initializers.random_normal] }) Accuracy_Rand_Gau = model_train_test(classifier,100) #print(Accuracy_Rand_Gau) Acc_list.append(Accuracy_Rand_Gau.get('accuracy')*100) accuracy_df = pd.DataFrame({'Network Initialization Type':['Xavier Glorot(Uniform)','Xavier Glorot(Gaussian)', 'Random Gaussian'],'Accuracy':Acc_list}) accuracy_df.head()from the above table, Xavier Glorot kernal initializer gave the best accuracy among other initializer like random gaussain.bar_plot(['Xavier Glorot(Uniform)','Xavier Glorot(Gaussian)', 'Random Gaussian'],Acc_list,"Model Accuracy","Comparision of Different Network Initialization in CNN")OAK-D PREPROCESSINGSince the `recording_RGBD.py` scrips saves the data in .h265 and .h254 format, it is necesary to convert them into a more common formal like .mp4. In this scrip performs the following tasks:- Changes the video files from `.h265` and `.h254` format to `.mp4`- Copies the RGB files into a new dataset- Visualizes the background of a recordningimport cv2 import numpy as np import os import matplotlib.pyplot as plt import shutil def create_dir(folder, force=True, verbose=False): ''' Create a directory if it doesn't exist ''' try: os.makedirs(folder) if verbose: print('Directory {} created succesfully.'.format(folder)) except: if force: if verbose: print('{} already exists. Creating a new one'.format(folder)) shutil.rmtree(folder) os.makedirs(folder) else: if verbose: print('{} already exists.'.format(folder)) passWe define the raw dataset path and the path to the dataset containing only the RGB videodataset = '/home/israel/Downloads/OAKD_8S/raw/' color_dataset = '/home/israel/Downloads/OAKD_8S/color' right_dataset = '/home/israel/Downloads/OAKD_8S/right' left_dataset = '/home/israel/Downloads/OAKD_8S/left' subjects = ['rami', 'isra', 'jos', 'carlos', 'jhane', 'guille', 'andrea', 'sus'] # subjects = ['carlos1'] walks = ['bk', 'nm', 'bg', 'cl'] files = ['color_video.h265', 'left_video.h264', 'right_video.h264'] mfiles = ['color_video.mp4', 'left_video.mp4', 'right_video.mp4'] views = sorted(os.listdir(dataset)) for view in views: recordings = os.listdir(os.path.join(dataset, view)) for s, subject in enumerate(subjects): # os.system(f'rm {os.path.join(dataset, view, subject, f'convert.txt')}') color_path = os.path.join(color_dataset, view, f'{str(s).zfill(3)}') create_dir(color_path, force=True) right_path = os.path.join(right_dataset, view, f'{str(s).zfill(3)}') create_dir(right_path, force=True) left_path = os.path.join(left_dataset, view, f'{str(s).zfill(3)}') create_dir(left_path, force=True) for w, walk in enumerate(walks): for rec in recordings: if subject in rec and f'walk:{walk}' in rec: for file in files: print(f'View:{view} Subject: {subject} walk: {walk} vid: {rec}') codec_file = os.path.join(dataset, view, rec, file) mp4_file = codec_file[:-4]+'mp4' cmd = f'ffmpeg -framerate 30 -i {codec_file} -c copy {mp4_file}' os.system(cmd) if 'color' in file: new_path = os.path.join(color_path, f'{walk}.mp4' ) cmd = f'cp {mp4_file} {new_path}' os.system(cmd) if 'right' in file: new_path = os.path.join(right_path, f'{walk}.mp4' ) cmd = f'cp {mp4_file} {new_path}' os.system(cmd) if 'left' in file: new_path = os.path.join(left_path, f'{walk}.mp4' ) cmd = f'cp {mp4_file} {new_path}' os.system(cmd) video_path = f'/home/israel/Downloads/OAK/j9/05.08.2021_12.48.12_id:carlos1_walk:bk//color_video.mp4' cap = cv2.VideoCapture(video_path) end = cap.get(cv2.CAP_PROP_FRAME_COUNT) cap.set(cv2.CAP_PROP_POS_FRAMES, end-10) ret, frame = cap.read() frame = cv2.resize(frame, (1280, 720), cv2.INTER_AREA) plt.imshow(frame[:,:,::-1])Exercises obtained from Kaggle: Pandas mini courses Here: https://www.kaggle.com/residentmario/grouping-and-sortingimport pandas as pd reviews = pd.read_csv("winemag-data-130k-v2.csv", index_col=0) reviews.head() reviews_written = reviews.groupby('taster_twitter_handle').taster_twitter_handle.count() reviews_writtenWhat is the best wine I can buy for a given amount of money? Create a Series whose index is wine prices and whose values is the maximum number of points a wine costing that much was given in a review. Sort the values by price, ascending (so that 4.0 dollars is at the top and 3300.0 dollars is at the bottom).best_rating_per_price = reviews.groupby('price')['points'].max().sort_index() best_rating_per_priceWhat are the minimum and maximum prices for each variety of wine? Create a DataFrame whose index is the variety category from the dataset and whose values are the min and max values thereof.price_extremes = reviews.groupby('variety').price.agg([min, max]) price_extremesWhat are the most expensive wine varieties? Create a variable sorted_varieties containing a copy of the dataframe from the previous question where varieties are sorted in descending order based on minimum price, then on maximum price (to break ties).sorted_varieties = price_extremes.sort_values(by=['min','max'], ascending=False) sorted_varietiesCreate a Series whose index is reviewers and whose values is the average review score given out by that reviewer. Hint: you will need the taster_name and points columns.reviewer_mean_ratings = reviews.groupby('taster_name')['points'].mean() reviewer_mean_ratingsWhat combination of countries and varieties are most common? Create a Series whose index is a MultiIndexof {country, variety} pairs. For example, a pinot noir produced in the US should map to {"US", "Pinot Noir"}. Sort the values in the Series in descending order based on wine count.country_variety_counts = reviews.groupby(['country','variety']).size().sort_values(ascending=False) country_variety_countsDecision Treedef run_model(model, feats): X = df[cat_feats].values y = df['price_value'].values #model = DecisionTreeRegressor(max_depth=5) # scores = cross_val_score(model, X, y, cv=3, scoring = 'neg_mean_absolute_error') return np.mean(scores), np.std(scores) run_model(DecisionTreeRegressor(max_depth=5), cat_feats)Random Forestmodel = RandomForestRegressor(max_depth=5, n_estimators=50, random_state=0) run_model(model, cat_feats)XGBoostxgb_params = { 'max_depth': 5, 'n_estimators': 50, 'learning_rate': 0.1, 'seed' :0 } run_model(xgb.XGBRegressor(**xgb_params), cat_feats) m = xgb.XGBRegressor(max_depth=5, n_estimators=50,learning_rate=0.1,seed=0) m.fit(X, y) #ktore cechy sa istotne wg xgboosta inp= PermutationImportance(m).fit(X, y) eli5.show_weights(inp, feature_names = cat_feats) len(cat_feats) feats=['param_napęd__cat','param_stan__cat','param_rok-produkcji__cat','param_faktura-vat__cat','param_moc__cat','param_skrzynia-biegów__cat','param_marka-pojazdu__cat','feature_kamera-cofania__cat','param_typ__cat','param_pojemność-skokowa__cat','seller_name__cat','param_wersja__cat','param_model-pojazdu__cat','feature_wspomaganie-kierownicy__cat','feature_system-start-stop__cat','param_kod-silnika__cat','feature_asystent-pasa-ruchu__cat','feature_łopatki-zmiany-biegów__cat','feature_czujniki-parkowania-przednie__cat','feature_światła-led__cat'] #sprawdzamy czy jak zmienimy ilość cech z 151 na 20 (te uznane za ważne) to wynik się zmeni len(feats) run_model(xgb.XGBRegressor(**xgb_params), feats) #analizujemy co jest w kolumnach. Poszukujemy zmienne numeryczne df['param_rok-produkcji'].unique() df['param_rok-produkcji__cat'].unique() #po zastosowani funki cat te lata się dubią, tracimy informację , możemy je zopymalizować troche df['param_rok-produkcji']=df['param_rok-produkcji'].map(lambda x: -1 if str(x) == 'None' else int(x)) feats=['param_napęd__cat','param_stan__cat','param_rok-produkcji','param_faktura-vat__cat','param_moc__cat','param_skrzynia-biegów__cat','param_marka-pojazdu__cat','feature_kamera-cofania__cat','param_typ__cat','param_pojemność-skokowa__cat','seller_name__cat','param_wersja__cat','param_model-pojazdu__cat','feature_wspomaganie-kierownicy__cat','feature_system-start-stop__cat','param_kod-silnika__cat','feature_asystent-pasa-ruchu__cat','feature_łopatki-zmiany-biegów__cat','feature_czujniki-parkowania-przednie__cat','feature_światła-led__cat'] run_model(xgb.XGBRegressor(**xgb_params), feats) df['param_moc']=df['param_moc'].map(lambda x: -1 if str(x) == 'None' else int(x.split(' ')[0]) ) feats=['param_napęd__cat','param_stan__cat','param_rok-produkcji','param_faktura-vat__cat','param_moc','param_skrzynia-biegów__cat','param_marka-pojazdu__cat','feature_kamera-cofania__cat','param_typ__cat','param_pojemność-skokowa__cat','seller_name__cat','param_wersja__cat','param_model-pojazdu__cat','feature_wspomaganie-kierownicy__cat','feature_system-start-stop__cat','param_kod-silnika__cat','feature_asystent-pasa-ruchu__cat','feature_łopatki-zmiany-biegów__cat','feature_czujniki-parkowania-przednie__cat','feature_światła-led__cat'] run_model(xgb.XGBRegressor(**xgb_params), feats) #poniewaz nadpisujmy param_moc w feats zmieniamy z wersji __cat na param_moc df['param_pojemność-skokowa'].unique() df['param_pojemność-skokowa']=df['param_pojemność-skokowa'].map(lambda x: -1 if str(x) == 'None' else int(str(x).split('cm')[0].replace(' ','')) ) feats=['param_napęd__cat','param_stan__cat','param_rok-produkcji','param_faktura-vat__cat','param_moc','param_skrzynia-biegów__cat','param_marka-pojazdu__cat','feature_kamera-cofania__cat','param_typ__cat','param_pojemność-skokowa','seller_name__cat','param_wersja__cat','param_model-pojazdu__cat','feature_wspomaganie-kierownicy__cat','feature_system-start-stop__cat','param_kod-silnika__cat','feature_asystent-pasa-ruchu__cat','feature_łopatki-zmiany-biegów__cat','feature_czujniki-parkowania-przednie__cat','feature_światła-led__cat'] run_model(xgb.XGBRegressor(**xgb_params), feats)**How Bright are Accreting Black Holes in Binary Systems?** ***A Computational Investigation***Leader: **Background**Binary star systems consist of two stars that orbit one another. Depending on the mass of these stars, one or more of them may explode in a supernova and form a black hole late in their lifetime. This is generally the case for stars with initial masses greater than 20 times that of the sun, or 20 solar masses ($M_\odot$). In the majority of these binary systems, black hole formation occurs asynchronously. This implies that the binary undergoes a period time where one object is a black hole and one object is still a star. This stage of evolution is called the x-ray binary phase, and the system is referred to as an **x-ray binary**.Why do we call these systems x-ray binaries? During this evolutionary stage, the black hole accretes matter from its companion star due to its strong gravitational pull. The accretion process heats transferred material through gravitational and frictional forces, which in turn triggers the emission of x-rays. Below is a helpful illustration of this process:![photo1](https://drive.google.com/uc?export=view&id=1ya2mtVhdkbPInwV-kysQbDaqj-DYc--v) Because the matter surrounding these black holes emits brightly in the x-ray band of the electromagnetic spectrum, x-ray observatories (such as the Chandra Observatory) search for this emission to identify binary systems that house black holes. The brightness, or luminosity, that astronomers observe in these binaries is called the **accretion luminosity**, as it is the luminosity that results from the accretion process of stellar material onto the black hole. These observations help astronomers learn about the properties of these systems and predict their possible courses of evolution. For example, some astronomers are interested in finding systems like these that will eventually form colliding black holes, which can be observed with gravitational wave detections.In this project, we will take a closer look at how we can predict the luminosity of these black hole systems using computational methods and data from simulations. Theoretical calculations like these help astronomers target their observations towards certain luminosities so that they can better identify these systems in the sky. These calculations also help astronomers compare their theoretical predictions with observations so that they can better understand if there are flaws in their theoretical or observational methods. **Building a Luminosity Function**First, we need to write a function that calculates and returns the accretion luminosity of a black hole in a binary system. Once we have this function, we can apply it to simulation data in multiple different ways. Previous theoretical work tells us that the black hole accretion luminosity is calculated by: $L_{acc} = \eta \dot{M}_{acc}c^2$where $L_{acc}$ is the accretion luminosity, $\eta$ is a scale factor derived from relativistic properties of the black hole, $\dot{M}_{acc}$ is the accretion rate of material onto the black hole, and $c$ is the speed of light. The parameters $\eta$ and $\dot{M}_{acc}$ are calculated by: $\eta = 1 - \sqrt{1 - \bigg(\frac{M_{BH}}{3M_{BH,0}}\bigg)^2}$ $\dot{M}_{acc} = \frac{\dot{M}_{BH}}{1 - \eta}$where $M_{BH}$ is the current black hole mass, $M_{BH,0}$ is the initial black hole mass, and $\dot{M}_{BH}$ is the rate of change of the black hole mass. The parameters provided in our simulation output are $M_{BH}$, $M_{BH,0}$, and $\dot{M}_{BH}$. Using this information and the equations above, define a function in python that takes the appropriate input parameters, calculates $L_{acc}$, $\eta$, and $\dot{M}_{acc}$ in the proper order, and returns the accretion luminosity. Don't worry about defining numeric quantities for constants yet.There are a couple refinements we need to make to our function before we can apply it to our simulation data. The first is that there is an upper limit for $\eta$. Specifically, if $M_{BH}$ exceeds $\sqrt{6}M_{BH,0}$, then $\eta$ is taken to be $0.42$. Copy and paste your old function below, and then add this condition into your calculation for $\eta$.Next, we need to apply a **bolometric correction** to our calculated luminosity. A bolometric correction is a multiplicative factor that accounts for differences between the visible luminosity that we observe and the true brightness of the observed source. Bolometric corrections usually vary depending on the observing apparatus and specific properties of the observed object. However, since we are simulating binaries and not observing them, we take the bolometric correction factor to be constant for all sources. For binaries with accreting black holes, this correction is $0.8$. Copy and paste your old function below, and then multiply your final accretion luminosity by this factor before returning it.Lastly, we need to adjust the units of our calculation so that our final accretion luminosity has the traditional units of x-ray observations. To do this, we need to know the units of our simulation variables. Below is a list of the units for our simulation output data:$M_{BH}$ ---> [$M_\odot$]$M_{BH,0}$ ---> [$M_\odot$]$\dot{M}_{BH}$ ---> [$M_\odot yr^{-1}$]We want our final luminosity to be in **ergs per second**, or $erg$ $s^{-1}$, which is a common unit of measure for luminosities in astronomy. A helpful unit conversion for one $erg$ is:[$erg$] = [$g$ $cm^2$ $s^{-2}$] Unforunately, it looks like we have a lot of converting to do! Alas, if you continue with astronomy or astrophysics, you'll find this is a common practice, as astronomers like to use a plethora of unique and often inconvenient units. Using dimensional analysis, work out any additional multiplicative constants you may need to add to your calculations so that your returned accretion luminosity has the correct units. You can choose the units of $c$ to be whatever is most convenient for your calculations. Copy and paste your old function below, and then write in your unit adjustments. Once you are done, define numeric quantities for your constants at the beginning of your function.Your function is now ready to go! Let's put it to work with some simulation data. **Applying Your Luminosity Function to Data: ```for``` Loop Method** First, read-in the simulation data file called ```"cosmic_data_1.csv"```, located in this project folder, and store it as a ```pandas``` dataframe. Make sure to import ```pandas``` and any other packages you might need first!import pandas as pd import numpy as np import matplotlib.pyplot as plt from google.colab import files uploaded = files.upload() df1 = pd.read_csv("cosmic_data_1.csv")Now, open the file and examine its format, including column titles, the number of rows, etc.This file contains the evolutionary data for a single simulated binary system during its x-ray binary phase. Each row represents one timestep of the simulation, which you can see by looking at the ```tphys``` column. ```tphys``` is the time, in mega years (Myr), since the binary began its evolution. As a warm-up to practice working with the data, use ```pandas``` operations to calculate how long this system was in the x-ray binary phase, and print your result.time_xrb = df1['tphys'].iloc[-1] - df1['tphys'].iloc[0] print (time_xrb, "Myr")1.309999999999972 MyrThe parameters we will need to calculate the accretion luminosity are ```mass_2```, which represents the black hole mass, and ```deltam_2```, which represents the rate of change in the black hole mass. One way we can acquire luminosity information is to loop through the dataframe, row by row, and calculate the luminosity at each time step using our function.First, write a ```for``` loop that goes through each row of the data frame and applies your luminosity function at each iteration. Save your calculated luminosities in an array. Print your final array of luminosities to check that they are reasonable. Reasonable luminosities should fall between $10^{30}$ and $10^{45}$ $erg$ $s^{-1}$. *Hint: you will need to use a special iterator for your dataframe. Try the pandas function iterrows().*def calc_luminosity_loop(current_BH_mass, initial_BH_mass, mdot_BH): ## phsyical constants c = 2.99e10 ## speed of light in cm/s secyr = 3.154e7 ## seconds per year Myr = 1e6 ## years per Myr Msun = 1.989e33 ## grams per solar mass bolometric_correction = 0.8 if current_BH_mass > np.sqrt(6)*initial_BH_mass: eta = 0.42 else: eta = 1 - np.sqrt(1-(current_BH_mass/(3*initial_BH_mass))**2) acc_rate = mdot_BH/(1-eta) luminosity = bolometric_correction*eta*acc_rate*c**2*Msun/secyr return luminosity acc_lum_loop = [] M_bh_0 = df1['mass_2'].iloc[0] for index, row in df1.iterrows(): M_bh = row['mass_2'] Mdot_bh = row['deltam_2'] luminosity = calc_luminosity_loop(M_bh, M_bh_0, Mdot_bh) acc_lum_loop.append(luminosity) print (acc_lum_loop)[1.1253823492823668e+35, 1.1408901655388753e+35, 1.1566749933938256e+35, 1.1727424293171707e+35, 1.189098170691318e+35, 1.205748015670417e+35, 1.2226978628376603e+35, 1.2399537106417342e+35, 1.257521656592063e+35, 1.2754078961913838e+35, 1.293618721582208e+35, 1.3121605198824428e+35, 1.3310397711835939e+35, 1.350263046183022e+35, 1.369837003420074e+35, 1.3897683860834096e+35, 1.4100640183552084e+35, 1.4307308012550807e+35, 1.4517757079446097e+35, 1.4732057784503908e+35, 1.4950281137611946e+35, 1.5172498692516261e+35, 1.539878247382079e+35, 1.5629204896211678e+35, 1.5863838675340488e+35, 1.610275672976188e+35, 1.6346032073286615e+35, 1.6593737697074791e+35, 1.6845946440752246e+35, 1.7102730851793114e+35, 1.7364163032371813e+35, 1.763031447283878e+35, 1.7901255870933556e+35, 1.81770569358e+35, 1.8457786175819872e+35, 1.874351066923311e+35, 1.9034295816462324e+35, 1.9330205073007557e+35, 1.9631299661726803e+35, 1.9937638263264203e+35, 2.024927668333835e+35, 2.0566267495548505e+35, 2.08886[...]Now that we have the accretion luminosity data for our binary, let's plot it to see how the system's emission changes over time. Using ```matplotlib```, make a time series plot using the ```tphys``` column from the simulation output along with your array of luminosities. Make sure to properly label your plot! *Hint: Use a log scale when plotting your luminosities so that you can better visualize the emission behavior.*fig, ax = plt.subplots(figsize=(8,6)) plt.plot(df1['tphys'], np.log10(acc_lum_loop)) plt.xlabel("Time (Myr)", size=13) plt.ylabel("Accretion Luminosity (log$_{10}$ erg/s)", size=13) plt.title("Accretion Luminosity vs. Time", size=14) plt.show()One question researchers might ask is if a given binary system is observable. For a system to be observed by an x-ray observatory, it must reach a maximum luminosity above an observable threshold. Let's take our observable threshold to be $10^{37} erg$ $s^{-1}$, which is a common threshold used for the Chandra Observatory. Using your calculations, find the maximum luminosity of your binary system. Is it observable?print (np.max(acc_lum_loop), "erg/s")1.0352989720162052e+39 erg/s**Applying Your Luminosity Function to Data: Array Method** It turns out that ```for``` loops are too computationally slow to analyze very large files or many files in one run. Luckily, we can speed-up our code by passing arrays to our luminosity function instead of passing values at each timestep of the data. This will allow us to calculate all luminosities for the binary in one step! However, this method will require some alteration to your luminosity function.Primarily, now that your function takes arrays as parameters instead of values, you can no longer use conditional logic to determine what $\eta$ should be. Instead, you should use boolean array logic, which is a standard application of ```numpy``` arrays. For example, let's say I have two arrays of the same size called ```x``` and ```y```. If want to know which values of ```x``` are less than their corresponding values in ```y```, I can write:```x_lower_values = x < y``````x_lower_values``` is an array of the same size as ```x``` and ```y```. It it contains ```True/False``` entries that tell me if the corresponding entry in ```x``` is less than that in ```y```. Another useful tool that might help you rework your luminosity function is ```np.where()```. This function takes a boolean array (such as ```x_lower_values``` in the example above) and returns an array that yields entires with two possible outcomes. The outcomes are supplied by you and depend on if a given boolean array entry is ```True``` or ```False```. Copy and paste your old luminosity function below. Then, using the tools described above and/or any other methods you prefer, alter you luminosity function so that it takes array parameters and then calculates and returns an array of accretion luminosities using array operations.def calc_luminosity_arrays(current_BH_mass, initial_BH_mass, mdot_BH): ## phsyical constants c = 2.99e10 ## speed of light in cm/s secyr = 3.154e7 ## seconds per year Myr = 1e6 ## years per Myr Msun = 1.989e33 ## grams per solar mass bolometric_correction = 0.8 where_lower_masses = current_BH_mass < np.sqrt(6)*initial_BH_mass eta_lower_masses = 1 - np.sqrt(1-(current_BH_mass/(3*initial_BH_mass))**2) eta = np.where(where_lower_masses, eta_lower_masses, 0.42) acc_rate = mdot_BH/(1-eta) luminosity = bolometric_correction*eta*acc_rate*c**2*Msun/secyr ## accretion luminosity in erg/sec return luminosityNow, apply your new and improved function to the data set above. You should now only need one line of code to calculate your array of accretion luminosities. Print your final array of luminosities to check that they are reasonable. *Hint: you may need to create an array for $M_{BH,0}$ that is the same length as your arrays for $M_{BH}$ and $\dot M_{BH}$ in order to pass it to your function.*M_bh_0 = np.ones(len(df1['mass_2']))*df1['mass_2'].iloc[0] acc_lum_array = calc_luminosity_arrays(df1['mass_2'], M_bh_0, df1['deltam_2']) print (acc_lum_array)0 1.125382e+35 1 1.140890e+35 2 1.156675e+35 3 1.172742e+35 4 1.189098e+35 ... 127 1.032864e+39 128 1.029047e+39 129 1.023241e+39 130 1.015235e+39 131 1.004801e+39 Name: deltam_2, Length: 132, dtype: float64To double check that your improved function is working correctly, make another time series plot using ```matplotlib```. This time, plot the accretion luminosities you obtained with the ```for``` loop method as well as those you obtained with the array method. They should appear to be the same line on your plot! Make sure to properly label your plot.fig, ax = plt.subplots(figsize=(8,6)) plt.plot(df1['tphys'], np.log10(acc_lum_loop)) plt.plot(df1['tphys'], np.log10(acc_lum_array)) plt.xlabel("Time (Myr)", size=13) plt.ylabel("Accretion Luminosity (log$_{10}$ erg/s)", size=13) plt.title("Accretion Luminosity vs. Time", size=14) plt.show()One fun way to examine the computational difference between your two luminosity functions is to time how long each of them takes to run. Copy and paste your pieces of code that run your different luminosity functions below. Import the python ```time``` module and use the ```time()``` function to determine the runtime of each code segment. Print both runtimes to compare them.import time start = time.time() M_bh_0 = np.ones(len(df1['mass_2']))*df1['mass_2'].iloc[0] acc_lum_array = calc_luminosity_arrays(df1['mass_2'], M_bh_0, df1['deltam_2']) end = time.time() array_time = end-start print ("Arrays: ", array_time, "sec") start = time.time() acc_lum = [] M_bh_0 = df1['mass_2'].iloc[0] for index, row in df1.iterrows(): M_bh = row['mass_2'] Mdot_bh = row['deltam_2'] luminosity = calc_luminosity_loop(M_bh, M_bh_0, Mdot_bh) acc_lum.append(luminosity) end = time.time() loop_time = end-start print ("For Loop: ", loop_time, "sec")Arrays: 0.002843141555786133 sec For Loop: 0.014995574951171875 secEven though the difference in runtimes may seem small, it is actually quite noticeable when applied to many systems. For example, one standard simulation run contains a population of ~50,000 binaries. Calculate how long each method would take to analyze the luminosities of a full population of binaries, and convert the resulting times (in seconds) to more useful units.print ("Arrays: ", 50000*array_time/60, "min") print ("For Loop: ", 50000*loop_time/60, "min")Arrays: 2.3692846298217773 min For Loop: 12.496312459309896 minNow, suppose we run a total of 10 different simulations, and need to analyze the luminosities of all systems in each simulation. How long does each method take to complete these calculations? Were the improvements to your luminosity function worth it?print ("Arrays: ", 10*50000*array_time/60, "min") print ("For Loop: ", 10*50000*loop_time/60/60, "hrs")Arrays: 23.692846298217773 min For Loop: 2.082718743218316 hrs**Bonus: Extended Data File** The file ```cosmic_data_1.csv``` is actually a very small portion of a full simulation output file. Normally, researchers want information about the entire evolution of a binary system so that characteristics of its behavior, such as its merge status, emission properties, etc. are placed in context with one other. This allows correlations to be drawn between evolutionary behaviors and facilitates a more complete understanding of how binary evolution works.Read-in the simulation data file ```cosmic_data_2.csv```, also located in this project folder, and store it as a ```pandas``` dataframe (it may take a couple of minutes to upload). Open the file and examine its format, including column titles, the number of rows, etc.from google.colab import files uploaded = files.upload() import pandas as pd import numpy as np df2 = pd.read_csv("cosmic_data_2.csv")This file is clearly far more massive than the previous one - and it is still a reduced version of the full simulation output! To work with the complete data set, we would need a supercomputer. However, this reduction will be suitable for the remainder of our investigation. The bonus portion of this project will involve identifying the x-ray binary phase of the system ourselves and applying our luminosity function to the appropriate subset of the extended data file. First, we need to identify when the x-ray binary phase begins. We can do this by finding the timestep when the first black hole is formed, which is recorded in the ```kstar_1``` and ```kstar_2``` columns of the data file. If ```kstar_1``` or ```kstar_2``` equals $14$, then that respective object is a black hole. The x-ray binary phase begins when the first black hole's accretion rate becomes greater than zero.Write a piece of code that identifies the timestep or index of the data file at which each object in the binary becomes a black hole. Store each index as a variable.BH1_index = np.where(df2['kstar_1'] == 14)[0][0] BH2_index = np.where(df2['kstar_2'] == 14)[0][0]Using conditional logic, compare the indicies or timesteps you found above, and determine which one occurred first. Then, using this information, identify the data columns for $M_{BH}$ and $\dot{M}_{BH}$ that correspond to the first black hole object. Also store the first and second black hole formation indicies as separate variables.if BH2_index > BH1_index: first_BH = BH1_index second_BH = BH2_index BH_obj = "kstar_1" BH_mass = "mass_1" BH_mdot = "deltam_1" else: first_BH = BH2_index second_BH = BH1_index BH_obj = "kstar_2" BH_mass = "mass_2" BH_mdot = "deltam_2"Now, using the appropriate data columns, identify the index when the black hole accretion rate becomes positive. This index signifies the beginning of the x-ray binary phase. Store this index as a separate variable.XRB_start = np.where(df2[BH_mdot] > 0)[0][0]The x-ray binary phase terminates when the black hole accretion rate is no longer positive. Using similar methods as above, identify the index when the x-ray binary phase ends. Store this index as a separate variable. To check that you indicies make sense, print the indicies of black hole formation as well as those identifying the x-ray binary phase.XRB_end = np.where(df2[BH_mdot] > 0)[0][-1] print ("XRB phase begins: ", XRB_start) print ("XRB phase ends: ", XRB_end) print ("First BH forms: ", first_BH) print ("Second BH forms: ", second_BH)XRB phase begins: 12 XRB phase ends: 127 First BH forms: 12 Second BH forms: 128Cut the data you need to calculate the accretion luminosity using the indices you found above so that the data begins and ends with the x-ray binary phase. Store your reduced data in separate arrays.M_bh_0_value = df2[BH_mass][first_BH] Mbh = df2[BH_mass][XRB_start:XRB_end] Mdot_bh = df2[BH_mdot][XRB_start:XRB_end]You should now be able to pass your reduced data arrays directly to your function! Do this to calculate the accretion luminosity of the binary during the x-ray binary phase, and print the resulting array of luminosities. Determine if this system is observable.M_bh_0 = np.ones(len(Mbh))*M_bh_0_value acc_lum_extended = calc_luminosity_arrays(Mbh, M_bh_0, Mdot_bh) print (acc_lum_extended, "\n") print ("Maximum luminosity: ", np.max(acc_lum_extended))12 1.191027e+30 13 1.209642e+30 14 1.227897e+30 15 1.246501e+30 16 1.265461e+30 ... 122 9.023618e+38 123 9.013496e+38 124 8.988866e+38 125 8.948127e+38 126 8.889543e+38 Name: deltam_2, Length: 115, dtype: float64 Maximum luminosity: 9.023618244430517e+38As a last step, compare your new luminosities to those you found above with the ```cosmic_data_1.csv``` data set. Use ```matplotlib``` to plot a time series of the binary emission over time, and label your plot accordingly. *Hint: you will need to apply the same cuts to the ```tphys``` column of ```cosmic_data_2.csv``` that you applied to your luminosity input parameters in order to plot your luminosities.*What are some similarities and differences between the emission of the first and second binary system?time = df2['tphys'][XRB_start:XRB_end] fig, ax = plt.subplots(figsize=(8,6)) plt.plot(df1['tphys'], np.log10(acc_lum_array)) plt.plot(time, np.log10(acc_lum_extended)) plt.xlabel("Time (Myr)", size=13) plt.ylabel("Accretion Luminosity (log$_{10}$ erg/s)", size=13) plt.title("Accretion Luminosity vs. Time", size=14) plt.show()Shapelyfrom shapely.geometry import Polygon p1 = Polygon(((1,2), (5,3), (5,7), (1,9), (1,2))) p1 p2 = Polygon(((6,6), (7,6), (10,4), (11,8), (6,6))) p2 from shapely.geometry import Point point = Point(2.0, 2.0) point from shapely.geometry import LineString line = LineString([(0,0), (10,10)]) line from shapely.geometry.polygon import LinearRing ring = LinearRing([(0,0), (3,3), (3,0)]) ring from shapely.geometry import MultiPoint points = MultiPoint([(0,0), (3,3)]) points from shapely.geometry import MultiLineString coords = MultiLineString([((0,0), (1,1)),((-1,0), (1,0))]) coords from shapely.geometry import MultiPolygon polygons = MultiPolygon([p1,p2]) polygons # 면적 구하기 print(p1.area) # 경계 구하기 print(p1.bounds) # 길이 구하기 print(p1.length) # geometry Type 구하기 print(p1.geom_type) import json from shapely.geometry import mapping, shape jData = json.loads('{"type":"Polygon","coordinates":[[[1,1],[1,3],[3,3]]]}') p = shape(jData) p mapping(p) import fiona c = fiona.open(r'data/node.shp') rec = next(iter(c)) print('keys = ', rec.keys()) print('type = ', rec['type']) print('prop = ', rec['properties']) print('id = ', rec['id']) print('geom = ', rec['geometry']) print(len(c)) print(c.driver) print(c.crs) import fiona with fiona.open(r'data/node.shp') as src: print(src[0]) import geopandas as gpd %matplotlib inline df = gpd.read_file(r'data/link.shp') type(df) df.shape df.columns df.loc[0] df['ROAD_NAME'] 서울문산고속도로 = df[df['ROAD_NAME'] == '서울문산고속도로'] 서울문산고속도로 서울문산고속도로.plot(figsize=(7,7)) df.plot(cmap="Set1", figsize=(7,7)) seoul = gpd.read_file(r'data/seoul.shp') seoul.plot() df.crs gpd.sjoin(df, seoul, op='within')Generating new variables# compares variables and generates a new one def compare_human(x): if x["DISAPPROVALS_BL"] == 0 and x["DISAPPROVALS_EN"] == 0: return(0) else: return(1) df["DISAPPROVALS_HUMAN"] = df.apply(compare_human, axis= 1) def compare_exacts(x): if x["DISAPPROVALS_FM"] == 0 and x["DISAPPROVALS_OM"] == 0: return(0) else: return(1) df["DISAPPROVALS_EXACTS"] = df.apply(compare_exacts, axis= 1) # Viewing new data frame df # Converting categorical variable into numeric variable var = ['NOTE_BL_CAT', 'NOTE_EN_CAT', 'NOTE_FM_CAT', 'NOTE_OM_CAT'] count = 0 for i in var: df[count]= df[i].map({"(-1, 2]": 2, "(2, 4]": 4, "(4, 6]": 6, "(6, 8]": 8, "(8, 10]": 10}) count += 1 df.rename(columns={0:'NOTE_BL_CAT_N', 1:'NOTE_EN_CAT_N', 2:'NOTE_FM_CAT_N', 3:'NOTE_OM_CAT_N'}, inplace=True) # checking data type df.dtypes # Arranging dataframe columns a = ['DISAPPROVALS_BL', 'DISAPPROVALS_EN', 'DISAPPROVALS_HUMAN', 'DISAPPROVALS_FM','DISAPPROVALS_OM', 'DISAPPROVALS_EXACTS', 'NOTE_BL', 'NOTE_EN', 'NOTE_FM', 'NOTE_OM', 'NOTE_BL_CAT', 'NOTE_EN_CAT', 'NOTE_FM_CAT', 'NOTE_OM_CAT', 'NOTE_BL_CAT_N', 'NOTE_EN_CAT_N', 'NOTE_FM_CAT_N', 'NOTE_OM_CAT_N', 'ENGLISH', 'H_CLASS_PRES', 'ONLINE_TASKS', 'ABSENCES', 'PROFILE_N'] df = df.reindex(columns= a) df.shape df.isna().sum() # Saving data df.to_csv("Data/data3.csv", index= False) # creating new dataframe and removing from categorical variables df1 = df.drop(['NOTE_BL_CAT', 'NOTE_EN_CAT', 'NOTE_FM_CAT', 'NOTE_OM_CAT'], axis= 1) df1 df1.shape # Checking data type df1.dtypes df1.isna().sum()Applying feature selectionx = df1.iloc[:, 0:18] y = df1.PROFILE_N x.shape y.shape feature_selection = pd.DataFrame(SelectKBest(chi2, k= 10).fit_transform(x, y)) feature_selection["TARGET"]= y feature_selection.head(15) # saving feature selection in csv format feature_selection.to_csv("Data/selection_variables.csv", index= False)**Lab 3** Asking a Statistical Question **Problem 1**We are performing an experiment where the temperature is critical to its reliability. For this particular experiment, the target temperature is 12 Kelvin. The thermal control system when working correctly gives temperatures distributed around 12 K with a standard deviation of 0.4 K. We would like to throw out pieces of data where the themal control system is not working correctly, and to be extra cautious also throw out pieces of data that may read differently than their true temperature.%matplotlib inline import numpy as np import matplotlib import matplotlib.pyplot as plt import scipy from scipy import stats plt.rcParams["figure.figsize"] = (20,15)**A)**1. Start by exploring the data you have created with appropriate plotsfig,(ax1,ax2) = plt.subplots(1,2) #Setting the seed np.random.seed(123456) #Generating Data d = stats.norm.rvs(loc = 12., scale = 0.4, size = 100000) ax1.hist(d, np.arange(10,14,0.2)) #Adding Bad Data bad = [10., 10.3, 2.1, 0., 0., 15.6, 22.3, 12.7] d = np.append(d,bad) ax2.hist(d, np.arange(0,22.3,0.2)) ax2.hist(bad, np.arange(0,22.3,0.2)) ax1.set_yscale('log') ax2.set_yscale('log') ax1.title.set_text('Normal Data') ax2.title.set_text('Normal Data with Bad Data Highlighted') plt.show()2. State the statistical question in words. Be very clear, and describe why you have chosen this statistical questionFor each temperature measurement T, what is the probability that a measurement that far or further from the mean would arrise from the distribution of temperature values assuming a gaussian distribution centered at 12 Kelvin with a standard deviation of 0.4 Kelvin? How does this differentiator perform with a statistical threshold of 3 $\sigma$3. Restate your question in math. Be very clear$\DeclareMathOperator\erf{erf}$$\DeclareMathOperator\icdf{icdf}$$$P(T) = 2 * \int_{|T-12|}^{\infty}\frac{1}{0.4}e^{-\frac{1}{2}\left(\frac{x}{0.4}\right)^2}$$In order to represent that value in $\sigma$ the probability value must be plugged into the inverse cdf to find where that would occur on the standard normal.$$\icdf(p) = \sqrt{2}\erf^{-1}(2p-1)$$4. Apply your statistical test and construct a truth table#Maintaining the Same Seed np.random.seed(123456) d = stats.norm.rvs(loc = 12., scale = 0.4, size = 100000) bad = np.array([10., 10.3, 2.1, 0., 0., 15.6, 22.3, 12.7]) #Initializing Counts for Truth Table trueT_good = 0 trueT_bad = 0 badT_good = 0 badT_bad = 0 cutoff = 3 # modifying for absolute delta from mean d = abs(d-12) bad = abs(bad-12) P1 = 2*stats.norm.cdf(-d,scale = 0.4) P1 = 1-P1 sigma1 = stats.norm.ppf(P1,loc=0,scale=1) for i in sigma1: if i >= cutoff: trueT_bad += 1 else: trueT_good += 1 print('True T predicted True:', trueT_good, '\nTrue T predicted Bad:', trueT_bad, '\nSum:', trueT_good+trueT_bad) P2 = 2*stats.norm.cdf(-bad,scale = 0.4) P2 = 1-P2 sigma2 = stats.norm.ppf(P2,loc=0,scale=1) for i in sigma2: if i >= cutoff: badT_bad += 1 else: badT_good += 1 print() print('Bad T predicted True', badT_good,'\nBad T predicted Bad', badT_bad,'\nSum', badT_good+badT_bad)True T predicted True: 99890 True T predicted Bad: 110 Sum: 100000 Bad T predicted True 1 Bad T predicted Bad 7 Sum 8Truth Table with Threshold at $3\sigma$:| | True T | Bad T ||-----------------|--------|-------|| Predicted True: | 99,890 | 1 || Predicted Bad: | 110 | 7 | **B)** How does the number of omissions, where you threw out good data, depend on the statistical threshold you chose and is it a predictable quantity?#Maintaining the Same Seed np.random.seed(123456) d = stats.norm.rvs(loc = 12., scale = 0.4, size = 100000) bad = np.array([10., 10.3, 2.1, 0., 0., 15.6, 22.3, 12.7]) #Initializing Counts for Truth Table trueT_good = 0 trueT_bad = 0 badT_good = 0 badT_bad = 0 cutoff = 2 # modifying for absolute delta from mean d = abs(d-12) bad = abs(bad-12) P1 = 2*stats.norm.cdf(-d,scale = 0.4) P1 = 1-P1 sigma1 = stats.norm.ppf(P1,loc=0,scale=1) print('cutoff is 2 sigma') for i in sigma1: if i >= cutoff: trueT_bad += 1 else: trueT_good += 1 print('True T predicted True:', trueT_good, '\nTrue T predicted Bad:', trueT_bad, '\nSum:', trueT_good+trueT_bad) P2 = 2*stats.norm.cdf(-bad,scale = 0.4) P2 = 1-P2 sigma2 = stats.norm.ppf(P2,loc=0,scale=1) for i in sigma2: if i >= cutoff: badT_bad += 1 else: badT_good += 1 print() print('Bad T predicted True', badT_good,'\nBad T predicted Bad', badT_bad,'\nSum', badT_good+badT_bad) cutoff = 2.5 trueT_good = 0 trueT_bad = 0 badT_good = 0 badT_bad = 0 print() print('cutoff is 2.5 sigma') for i in sigma1: if i >= cutoff: trueT_bad += 1 else: trueT_good += 1 print('True T predicted True:', trueT_good, '\nTrue T predicted Bad:', trueT_bad, '\nSum:', trueT_good+trueT_bad) for i in sigma2: if i >= cutoff: badT_bad += 1 else: badT_good += 1 print() print('Bad T predicted True', badT_good,'\nBad T predicted Bad', badT_bad,'\nSum', badT_good+badT_bad) cutoff = 3.5 trueT_good = 0 trueT_bad = 0 badT_good = 0 badT_bad = 0 print() print('cutoff is 3.5 sigma') for i in sigma1: if i >= cutoff: trueT_bad += 1 else: trueT_good += 1 print('True T predicted True:', trueT_good, '\nTrue T predicted Bad:', trueT_bad, '\nSum:', trueT_good+trueT_bad) for i in sigma2: if i >= cutoff: badT_bad += 1 else: badT_good += 1 print() print('Bad T predicted True', badT_good,'\nBad T predicted Bad', badT_bad,'\nSum', badT_good+badT_bad) cutoff = 4 trueT_good = 0 trueT_bad = 0 badT_good = 0 badT_bad = 0 print() print('cutoff is 4 sigma') for i in sigma1: if i >= cutoff: trueT_bad += 1 else: trueT_good += 1 print('True T predicted True:', trueT_good, '\nTrue T predicted Bad:', trueT_bad, '\nSum:', trueT_good+trueT_bad) for i in sigma2: if i >= cutoff: badT_bad += 1 else: badT_good += 1 print() print('Bad T predicted True', badT_good,'\nBad T predicted Bad', badT_bad,'\nSum', badT_good+badT_bad)cutoff is 2 sigma True T predicted True: 97711 True T predicted Bad: 2289 Sum: 100000 Bad T predicted True 1 Bad T predicted Bad 7 Sum 8 cutoff is 2.5 sigma True T predicted True: 99370 True T predicted Bad: 630 Sum: 100000 Bad T predicted True 1 Bad T predicted Bad 7 Sum 8 cutoff is 3.5 sigma True T predicted True: 99980 True T predicted Bad: 20 Sum: 100000 Bad T predicted True 1 Bad T predicted Bad 7 Sum 8 cutoff is 4 sigma True T predicted True: 100000 True T predicted Bad: 0 Sum: 100000 Bad T predicted True 1 Bad T predicted Bad 7 Sum 8Truth Table with Threshold at $2\sigma$:| | True T | Bad T ||-----------------|--------|-------|| Predicted True: | 97,711 | 1 || Predicted Bad: | 2289 | 7 |Truth Table with Threshold at $2.5\sigma$:| | True T | Bad T ||-----------------|--------|-------|| Predicted True: | 99,370 | 1 || Predicted Bad: | 630 | 7 |Truth Table with Threshold at $3.5\sigma$:| | True T | Bad T ||-----------------|--------|-------|| Predicted True: | 99,980 | 1 || Predicted Bad: | 20 | 7 |Truth Table with Threshold at $4\sigma$:| | True T | Bad T ||-----------------|--------|-------|| Predicted True: | 100,000| 1 || Predicted Bad: | 0| 7 |The trials above show how the truth tables changed as the $\sigma$ threshold changed. As the threshold increased, the number of pieces of good data that needed to be thrown out decreased. At a threshold of $4\sigma$ the number of good data points thrown out was reduced to zero while the performance in classifying bad data pieces did not change.This quantity is predictable, the probability that corresponds to a $2\sigma$ measurement or a $4\sigma$ measurement is known, so multiplying that probability by the 100,000 data points would give the number of data points you would expect to find outside of the threshold. For example there is a 2.28% chance of the standard normal giving a value $2\sigma$ or more greater than the mean. This probability predicts 2,280 of 100,000 events to be misclassified and my algorithm misclassified 2289. For a measurement of $3\sigma$ or more greater than the mean there is a probability of 0.13% giving 130 out of 100,000 measurements misclassified, and my algorithm misclassified 110. **C)** Are there mistakes of commission (bad data getting in)? If yes, are they avoidable and how do they depend on your statistical threshold?There is 1 common mistake that makes it through the threshold in all of the tests done so far. This is the value of 12.7.P = 2*stats.norm.cdf(-0.7,scale = 0.4) P = 1-P sigma = stats.norm.ppf(P,loc=0,scale=1) print(sigma)1.404276170241102In order to avoid this mistake, the threshold would need to be set below $1.404\sigma$, this was found by finding the sigma of the measurement of 12.7, or 0.7 away from the mean.#Maintaining the Same Seed np.random.seed(123456) d = stats.norm.rvs(loc = 12., scale = 0.4, size = 100000) bad = np.array([10., 10.3, 2.1, 0., 0., 15.6, 22.3, 12.7]) #Initializing Counts for Truth Table trueT_good = 0 trueT_bad = 0 badT_good = 0 badT_bad = 0 cutoff = 1.4 # modifying for absolute delta from mean d = abs(d-12) bad = abs(bad-12) P1 = 2*stats.norm.cdf(-d,scale = 0.4) P1 = 1-P1 sigma1 = stats.norm.ppf(P1,loc=0,scale=1) for i in sigma1: if i >= cutoff: trueT_bad += 1 else: trueT_good += 1 print('True T predicted True:', trueT_good, '\nTrue T predicted Bad:', trueT_bad, '\nSum:', trueT_good+trueT_bad) P2 = 2*stats.norm.cdf(-bad,scale = 0.4) P2 = 1-P2 sigma2 = stats.norm.ppf(P2,loc=0,scale=1) for i in sigma2: if i >= cutoff: badT_bad += 1 else: badT_good += 1 print() print('Bad T predicted True', badT_good,'\nBad T predicted Bad', badT_bad,'\nSum', badT_good+badT_bad)True T predicted True: 91979 True T predicted Bad: 8021 Sum: 100000 Bad T predicted True 0 Bad T predicted Bad 8 Sum 8With the threshold set at $1.4\sigma$ there were no pieces of bad data used, but there were 8021 more pieces of good data thrown away in order to remove that 1 piece of data. with over 90,000 pieces of good data remaining, this may be a worthy pursuit if the data at a temperature of 12.7 is verified as being at a temperature unfit to perform the experiment at. **Problem 2**When looking for asteroids in consecutive images, the stars in the background don't perfectly allign due to atmospheric and instrumental effects even after factoring in the earth rotation, and the starts normal movements. Since we are looking at an image this results in a 2 dimentional distribution, for this example we are assuming a 2D Gaussian with a RMS of 1 arcsecond.np.random.seed(123456) a = np.vstack((stats.norm.rvs( scale = 1, size = 100000), stats.norm.rvs( scale = 1, size = 100000))) a.shape fig, ax = plt.subplots(1, 1) h = ax.hist2d(a[0,:],a[1,:],bins=100, density=True); ax.set_aspect('equal', 'box') plt.xlim([-3 , 3]) plt.ylim([-3 , 3]) plt.title("2D Histogram of positional uncertainty", fontsize = 24) plt.ylabel("$\Delta$y arcseconds", fontsize = 18) plt.xlabel("$\Delta$x arcseconds", fontsize = 18) plt.colorbar(h[3], ax=ax) plt.show()When looking for real movement of an asteroid, we want to get a $5\sigma$ measurement of displacement. What is that distance in arcseconds?1. What is the question in words? What radius from the center corresponds to a measurement with a significance of $5\sigma$?2. What is the question in math? The distribution of the magnitude of a vector composed of two independently generated Gaussian distributed random variables with the same parameters is a Rayleigh distribution as proved in homework 3. The pdf of a Rayleigh distribution is given below:$$p(x) = \frac{x}{\sigma^2}e^{-\frac{x^2}{2\sigma^2}}$$ In this example, sigma is 1 arcsecond, and the probability is given by the standard normal distribution.$$P_{5\sigma} = \int_{a}^{\infty}xe^{-\frac{x^2}{2}} = -e^{-\frac{x^2}{2}}\bigg|_{a}^{\infty} = 0 - ^-e^{-\frac{a^2}{2}}$$$$-\frac{a^2}{2} = ln{(P_{5\sigma})}$$$$\sqrt{-2\ln{(P_{5\sigma})}} = a$$3. What distance in arcseconds represents a 5 sigma detection of motion?sig = stats.norm.cdf(5) a = stats.rayleigh.ppf(sig) print('a =',np.round(a,3)) print('ln(P of 5 sigma)=',np.round(np.log(1-sig),4))a = 5.489 ln(P of 5 sigma)= -15.065The answer found numerically above was a distance of 5.489 arcseconds. In order to use the formula found above I also printed the $\ln{(5\sigma)}$ value.$$\sqrt{-2*(-15.065)} = 4.489$$ **Problem 3**Assume in a moon sized patch on the sky we normally have a cosmic ray rate of 1 cosmic ray per minute. If we can observe where the moon is for 8 hours per night and we observe for 15 days and see 6800 cosmic rays, what is the signficance of our moon shadow detection?1. What is the question in words? What is the probability the the background poisson distribution with an average of 1 cosmic ray per minute will give 6800 or more cosmic rays over a period of 15 days of 8 hour observations?2. Translate the question to math. $$P = n*\sum_{x = 6800}^{\infty} \frac{\lambda^xe^{-\lambda}}{x!}$$ where $\lambda$ is the mean number of cosmic rays per minute 1 and $n$ is the number of minutes in the trials period $= 60*8*15 = 7200$ Since each minute can be treated as an independant measurement from the same poisson distribution, this can be represented as a sum of 7200 poisson distributions with an average of 1 cosmic ray per minute. $$P = e^{-7200}\sum_{x=6800}^{\infty} \frac{7200^x}{x!}$$3. Convert to sigma.p = stats.poisson.cdf(6800,7200) s = stats.norm.ppf(p) print('sigma:', np.round(s,3))sigma: -4.751todosend cal pressure/time temp/chlorimport pandas as pd pd.DataFrame(mesh_grid_chl).to_csv("chlor.csv") t=pd.DataFrame(num2date(time_grid,'Days since 0001-1-1'),columns=['datetime'],dtype='datetime64')LINEAR REGRESSION WITH TENSORFLOW In this notebook we will overview the implementation of Linear Regression with TensorFlowTable of Contents Linear Regression Linear Regression with TensorFlow Linear RegressionDefining a linear regression in simple terms, is the approximation of a linear model used to describe the relationship between two or more variables. In a simple linear regression there are two variables, the dependent variable, which can be seen as the "state" or "final goal" that we study and try to predict, and the independent variables, also known as explanatory variables, which can be seen as the "causes" of the "states". When more than one independent variable is present the process is called multiple linear regression. When multiple dependent variables are predicted the process is known as multivariate linear regression.The equation of a simple linear model is$$Y = a X + b $$Where Y is the dependent variable and X is the independent variable, and a and b being the parameters we adjust. a is known as "slope" or "gradient" and b is the "intercept". You can interpret this equation as Y being a function of X, or Y being dependent on X.If you plot the model, you will see it is a line, and by adjusting the "slope" parameter you will change the angle between the line and the independent variable axis, and the "intercept parameter" will affect where it crosses the dependent variable's axis.Let's first import the required packages:import matplotlib.pyplot as plt import pandas as pd import pylab as pl import numpy as np import tensorflow as tf import matplotlib.patches as mpatches import matplotlib.pyplot as plt %matplotlib inline plt.rcParams['figure.figsize'] = (10, 6)/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. _np_qint8 = np.dtype([("qint8", np.int8, 1)]) /home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. _np_quint8 = np.dtype([("quint8", np.uint8, 1)]) /home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:521: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. _np_qint16 = np.dtype([("qint16", np.int16, 1)]) /home/jupyterlab/conda/envs/python/lib/python3[...]Let's define the independent variable:X = np.arange(0.0, 5.0, 0.1) X ##You can adjust the slope and intercept to verify the changes in the graph a = 1 b = 0 Y= a * X + b plt.plot(X, Y) plt.ylabel('Dependent Variable') plt.xlabel('Indepdendent Variable') plt.show()OK... but how can we see this concept of linear relations with a more meaningful point of view?Simple linear relations were used to try to describe and quantify many observable physical phenomena, the easiest to understand are speed and distance traveled: $$Distance Traveled = Speed \times Time + Initial Distance$$$$Speed = Acceleration \times Time + Initial Speed$$ They are also used to describe properties of different materials: $$Force = Deformation \times Stiffness$$$$Heat Transfered = Temperature Difference \times Thermal Conductivity$$$$Electrical Tension (Voltage) = Electrical Current \times Resistance$$$$Mass = Volume \times Density$$ When we perform an experiment and gather the data, or if we already have a dataset and we want to perform a linear regression, what we will do is adjust a simple linear model to the dataset, we adjust the "slope" and "intercept" parameters to the data the best way possible, because the closer the model comes to describing each ocurrence, the better it will be at representing them.So how is this "regression" performed? Linear Regression with TensorFlowA simple example of a linear function can help us understand the basic mechanism behind TensorFlow.For the first part we will use a sample dataset, and then we'll use TensorFlow to adjust and get the right parameters. We download a dataset that is related to fuel consumption and Carbon dioxide emission of cars.!wget -O FuelConsumption.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/FuelConsumptionCo2.csv--2020-08-04 14:24:01-- https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/FuelConsumptionCo2.csv Resolving s3-api.us-geo.objectstorage.softlayer.net (s3-api.us-geo.objectstorage.softlayer.net)... 172.16.17.32 Connecting to s3-api.us-geo.objectstorage.softlayer.net (s3-api.us-geo.objectstorage.softlayer.net)|172.16.17.32|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 72629 (71K) [text/csv] Saving to: ‘FuelConsumption.csv’ FuelConsumption.csv 100%[===================>] 70.93K --.-KB/s in 0.06s 2020-08-04 14:24:01 (1.25 MB/s) - ‘FuelConsumption.csv’ saved [72629/72629]Understanding the DataFuelConsumption.csv:We have downloaded a fuel consumption dataset, FuelConsumption.csv, which contains model-specific fuel consumption ratings and estimated carbon dioxide emissions for new light-duty vehicles for retail sale in Canada. Dataset source- **MODELYEAR** e.g. 2014- **MAKE** e.g. Acura- **MODEL** e.g. ILX- **VEHICLE CLASS** e.g. SUV- **ENGINE SIZE** e.g. 4.7- **CYLINDERS** e.g 6- **TRANSMISSION** e.g. A6- **FUEL CONSUMPTION in CITY(L/100 km)** e.g. 9.9- **FUEL CONSUMPTION in HWY (L/100 km)** e.g. 8.9- **FUEL CONSUMPTION COMB (L/100 km)** e.g. 9.2- **CO2 EMISSIONS (g/km)** e.g. 182 --> low --> 0df = pd.read_csv("FuelConsumption.csv") # take a look at the dataset df.head()Lets say we want to use linear regression to predict Co2Emission of cars based on their engine size. So, lets define X and Y value for the linear regression, that is, train_x and train_y:train_x = np.asanyarray(df[['ENGINESIZE']]) train_y = np.asanyarray(df[['CO2EMISSIONS']])First, we initialize the variables a and b, with any random guess, and then we define the linear function:a = tf.Variable(20.0) b = tf.Variable(30.2) y = a * train_x + bNow, we are going to define a loss function for our regression, so we can train our model to better fit our data. In a linear regression, we minimize the squared error of the difference between the predicted values(obtained from the equation) and the target values (the data that we have). In other words we want to minimize the square of the predicted values minus the target value. So we define the equation to be minimized as loss.To find value of our loss, we use tf.reduce_mean(). This function finds the mean of a multidimensional tensor, and the result can have a different dimension.loss = tf.reduce_mean(tf.square(y - train_y))Then, we define the optimizer method. The gradient Descent optimizer takes in parameter: learning rate, which corresponds to the speed with which the optimizer should learn; there are pros and cons for increasing the learning-rate parameter, with a high learning rate the training model converges quickly, but there is a risk that a high learning rate causes instability and the model will not converge. Please feel free to make changes to learning parameter and check its effect. On the other hand decreasing the learning rate might reduce the convergence speed, but it would increase the chance of converging to a solution. You should note that the solution might not be a global optimal solution as there is a chance that the optimizer will get stuck in a local optimal solution. Please review other material for further information on the optimization. Here we will use a simple gradient descent with a learning rate of 0.05:optimizer = tf.train.GradientDescentOptimizer(0.05)Now we will define the training method of our graph, what method we will use for minimize the loss? We will use the .minimize() which will minimize the error function of our optimizer, resulting in a better model.train = optimizer.minimize(loss)Don't forget to initialize the variables before executing a graph:init = tf.global_variables_initializer() sess = tf.Session() sess.run(init)Now we are ready to start the optimization and run the graph:loss_values = [] train_data = [] for step in range(100): _, loss_val, a_val, b_val = sess.run([train, loss, a, b]) loss_values.append(loss_val) if step % 5 == 0: print(step, loss_val, a_val, b_val) train_data.append([a_val, b_val])0 26992.594 77.07106 46.110275 5 1891.7205 58.84462 47.59573 10 1762.7241 57.65104 53.019833 15 1653.5897 56.36652 58.023922 20 1559.0441 55.172844 62.68204 25 1477.1372 54.061794 67.01765 30 1406.179 53.027664 71.05309 35 1344.7057 52.065136 74.809135 40 1291.4506 51.169243 78.30512 45 1245.3145 50.33538 81.559074 50 1205.3451 49.55925 84.58775 55 1170.7189 48.83685 87.40674 60 1140.7214 48.164467 90.03055 65 1114.734 47.53864 92.472694 70 1092.2203 46.956135 94.74576 75 1072.7163 46.413967 96.86146 80 1055.8193 45.909332 98.83067 85 1041.1812 45.439632 100.66355 90 1028.4996 45.002453 102.36953 95 1017.5135 44.595547 103.95739Lets plot the loss values to see how it has changed during the training:plt.plot(loss_values, 'ro')Lets visualize how the coefficient and intercept of line has changed to fit the data:cr, cg, cb = (1.0, 1.0, 0.0) for f in train_data: cb += 1.0 / len(train_data) cg -= 1.0 / len(train_data) if cb > 1.0: cb = 1.0 if cg < 0.0: cg = 0.0 [a, b] = f f_y = np.vectorize(lambda x: a*x + b)(train_x) line = plt.plot(train_x, f_y) plt.setp(line, color=(cr,cg,cb)) plt.plot(train_x, train_y, 'ro') green_line = mpatches.Patch(color='red', label='Data Points') plt.legend(handles=[green_line]) plt.show()[Curso de Redes Neuronales](https://curso-redes-neuronales-unison.github.io/Temario/) Una red neuronal convolucional simple [****](http://mat.uson.mx/~juliowaissman/), 2 de octubre de 2018.En esta libreta se muestra el ejemplo básico para una red convolucionalaplicada al conjunto de datos [MNIST](http://yann.lecun.com/exdb/mnist/).La estructura de la red que vamos a hacer está inspirada (y es básicamente la misma) que la arquitectura conocisa como [LeNet 5](https://engmrk.com/lenet-5-a-classic-cnn-architecture/), la cual fue una de las primeras redes neuronales convolucionales exitosas.import numpy as np import tensorflow as tf1. Cargar datosPrimero cargamos los archivos que se utilizan para el aprendizaje. Para otro tipo de problemas, es necesario hacer un proceso conocido como *Data Wrangling*, que normalmente se realiza con la ayuda de *Pandas*.from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)Para que un aprendizaje tenga sentido es necesario tener bien separado un conjunto de datos de aprendizaje y otro de prueba (en caso de grandes conjuntos de datos es la opción). Como vemos tanto las imágenes como las etiquetas están separados en archivos de datos y de aprendizaje.El objeto `mnist` es un objeto tensorflow que contiene 3 objetos tipo tensorflow: *test*, *train* y *validation*, los cuales a su vez contienen *ndarrays* de *numpy*. La estructura es la misma para cada conjunto de datos. Veamos su estructura:print("Tipo de images: {}".format(type(mnist.train.images))) print("Tipo de epochs_completed: {}".format(type(mnist.train.epochs_completed))) print("Tipo de labels: {}".format(type(mnist.train.labels))) print("Tipo de next_batch: {}".format(type(mnist.train.next_batch))) print("Tipo de num_examples: {}".format(type(mnist.train.num_examples)))Como generar el conjunto de datos para ser utilizado dentro de TensorFlow es objeto de otra libreta. Por el momento concentremonos en como hacer una red neuronal rápido y sin dolor.Sin embargo, vamos a ver unos cuantos datos que nos pueden ser de útilidad para la construcción de la red neuronal.print("Forma del ndarray con las imágenes: {}".format(mnist.train.images.shape)) print("Forma del ndarray con las etiquetas: {}".format(mnist.train.labels.shape)) print("-" * 79) print("Número de imagenes de entrenamiento: {}".format(mnist.train.images.shape[0])) print("Tamaño de las imagenes: {}".format(mnist.train.images.shape[1])) print("Clases diferentes: {}".format(mnist.train.labels.shape[1]))2. Construcción de la red neuronal Para hacer una red neuronal lo más genérica posible y que podamos reutilizar en otros proyectos, vamos a establecer los parámetros base independientemente de la inicialización de la red, independientemente de la forma en que construimos la red. Comencemos por establecer una función genérica que nos forme una red neuronal con dos capas convolucionales y una capa oculta densa posterior. No agrego más comentarios porque, con la experiencia de las libretas anteriores, la construcción de la red neuronal se explica sola.def cnn_mnist(x, pesos, sesgos, dropout): """ Genera una red neuronal de dos capas para usar en TensorFlow Parámetros ---------- pesos: un diccionario con cuatro etiquetas: 'c1', 'c2', w1' y 'wo' en donde cada una es una tf.Variable conteniendo una matriz con los pesos tanto de los filtros convolucionales 'c1' y'c2' como de las capas densas 'w1' y 'wo' sesgos: un diccionario con cuatro etiquetas: 'b1', 'b2, 'b3' y 'bo' en donde cada una es una tf.Variable conteniendo un vector de dimensión [numero_de_neuronas_capa] dropout: Un flotante 0 <= dropout <= 1 con el porcentaje de dropout en la capa densa oculta Devuelve -------- Un ops de tensorflow que calcula la salida de una red neuronal con dos capas convolucionales, una capa oculta, y activaciones RELU. """ # Ajusta el vector de entrada de (-1, 784) que son los ejemplos que entran # a la red para reconocimiento y/o aprendizaje, a un tensor de dimensión # [-1, 28, 28, 1] (el último 1 implica que es una imagen BW, o una sola capa) x = tf.reshape(x, shape=[-1, 28, 28, 1]) # Primera capa convolucional con activación ReLU y max_pool de 2X2 capa_1 = tf.nn.conv2d(x, pesos['c1'], strides=[1, 1, 1, 1], padding='SAME') capa_1 = tf.nn.bias_add(capa_1, sesgos['b1']) capa_1 = tf.nn.relu(capa_1) capa_1m = tf.nn.max_pool(capa_1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # Segunda capa convolucional con activación ReLU capa_2 = tf.nn.conv2d(capa_1m, pesos['c2'], strides=[1, 1, 1, 1], padding='SAME') capa_2 = tf.nn.bias_add(capa_2, sesgos['b2']) capa_2 = tf.nn.relu(capa_2) capa_2m = tf.nn.max_pool(capa_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # Desenrollamos las imágenes para poder aplicar capas densas capa_2desen = tf.reshape(capa_2m, [-1, pesos['w1'].get_shape().as_list()[0]]) # Capa oculta densa con activación ReLU capa_3 = tf.matmul(capa_2desen, pesos['w1']) capa_3 = tf.add(capa_3, sesgos['b3']) capa_3 = tf.nn.relu(capa_3) # Se le agrega Dropout por la generalización capa_3 = tf.nn.dropout(capa_3, dropout) # Capa de salida con activación lineal # En Tensorflow la salida es siempre lineal, y luego se especifica # la función de salida a la hora de calcularla como vamos a ver # más adelante capa_salida = tf.matmul(capa_3, pesos['wo']) capa_salida = tf.add(capa_salida, sesgos['bo']) return capa_salidaY ahora necesitamos poder generar los datos de entrada a la red neuronal dealguna manera posible. Afortunadamente sabemos exactamente que necesitaos, asíque vamos a hacer una función que nos genere las variables de peso y sesgo.Por el momento, y muy a la brava, solo vamos a generarlas con números aletorios con una distribución $\mathcal{N}(0, 1)$.def inicializa_pesos(filtro1, filtro2, n1, n2, n3): """ Genera un diccionario con pesos para ser utilizado en la función red_neuronal_dos_capas_ocultas Parámetros ---------- filtro1: (x, y) tupla con el tamaño del primer filtro convolucional filtro2: (x, y) tupla con el tamaño del segundo filtro convolucional n1: Número de imágenes generadas con el primer filtro n2: Número de imagenes generadas con el segundo filtro n3: Número de neuronas en la capa oculta Asumimos que es una red para MNIST, por lo que la entrada son imágenes de 28 x 28 x 1 (BW) y que tenemos 10 valores de salida diferentes Como hacemos maxpool de 2x2, las imagenes despues de la primer capa convolucional seran de 14 x 14, y las imágenes que salgan de la segunda capa convolucional serán de 7 x 7 Devuelve -------- Dos diccionarios, uno con los pesos por capa y otro con los sesgos por capa """ pesos = { 'c1': tf.Variable(tf.random_normal([filtro1[0], filtro1[1], 1, n1])), 'c2': tf.Variable(tf.random_normal([filtro2[0], filtro2[1], n1, n2])), 'w1': tf.Variable(tf.random_normal([7 * 7 * n2, n3])), 'wo': tf.Variable(tf.random_normal([n3, 10])) } sesgos = { 'b1': tf.Variable(tf.random_normal([n1])), 'b2': tf.Variable(tf.random_normal([n2])), 'b3': tf.Variable(tf.random_normal([n3])), 'bo': tf.Variable(tf.random_normal([10])) } return pesos, sesgosAhora necesitamos establecer los parámetros de la topología de la red neuronal. Tomemos en cuenta que estos prámetros los podríamos haber establecido desdela primer celda, si el fin es estar variando los parámetros para escoger los que ofrezcan mejor desempeño.num_entradas = 784 # Lo sabemos por la inspección que hicimos a mnist num_salidas = 10 # Ídem # Aqui es donde podemos jugar filtro1 = (5, 5) filtro2 = (5, 5) n1 = 32 n2 = 64 n3 = 1024¡A construir la red! Para esto vamos a necesitar crear las entradascon un placeholder, y crear nuestra topología de red neuronal.Observa que la dimensión de x será [None, num_entradas], lo que significa que la cantidad de renglones es desconocida (o variable).# La entrada a la red neuronal x = tf.placeholder("float", [None, num_entradas]) # Los pesos y los sesgos w, b = inicializa_pesos(filtro1, filtro2, n1, n2, n3) # El valor de dropout que puede variar en el tiempo dp = tf.placeholder(tf.float32) # Crea la red neuronal estimado = cnn_mnist(x, w, b, dp)Parecería que ya está todo listo. Sin ambargo falta algo muy importante: No hemos explicadoni cual es el criterio de error (loss) que vamos a utilizar, ni cual va a ser el método deoptimización (aprendizaje) que hemos decidido aplicar.Primero definamos el costo que queremos minimizar, y ese costo va a estar en función de loestimado con lo real, por lo que necesitamos otra entrada de datos para los datos de salida.Sin ningun lugar a dudas, el costo que mejor describe este problema es el de *softmax*# Creamos la variable de datos de salida conocidos y = tf.placeholder("float", [None, num_salidas]) # Definimos la función de costo costo = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=estimado, labels=y))Y ahora definimos que función de aprendizaje vamos a utilizar. Existen muchas funcionesde aprendizaje en tensorflow, las cuales se pueden consultar en `tf.train.`. Entre lasexistentes podemos ver algunas conocidas del curso como descenso de gradiente simple,momento, rprop, rmsprop entre otras. Casi todas las funciones de optimización (aprendizaje)acaban su nombre con `Optimize`.En este caso vamos a usar un método comocido como el *algoritmo de Adam* el cual se puede consultar [aqui](http://arxiv.org/pdf/1412.6980.pdf). El metodo utiliza dos calculosde momentos diferentes, y por lo visto genera resultados muy interesantes desde el punto de vista práctico.¿Cual es el mejor método? Pues esto es en función de tu problema y de la cantidad de datos que tengas.Lo mejor es practicar con varios métodos para entender sus ventajas y desventajas.En todo caso el método de optimización requiere que se le inicialice con una tasa de aprendizaje.alfa = 0.001 optimizador = tf.train.AdamOptimizer(learning_rate=alfa) paso_entrenamiento = optimizador.minimize(costo)3. Ejecutar la sesión usando mini-batchesAhora, ya que la red neuronal está lista vamos a ejecutar la red utilizando el algoritmo deAdam pero en forma de mini-batches. Con el fin de tener control sobre el problema, vamos a establecer un número máximo de epoch (ciclos de aprendizaje), el tamaño de los mini-batches, y cada cuandos epoch quisieramos ver como está evolucionando la red neuronal.Como entrenar una red neuronal no tiene sentido, si no es porque la queremos usar para reconocer,no tendría sentido entrenarla y luego perderla y tener que reentrenar en cada ocasión. Recuerda que cuandose cierra la sesión se borra todo lo que se tenía en memoria. Para esto vamos a usar una ops especial llamada `Saver`, que permite guardar en un archivo la red neuronal y después utilizarla en otra sesión (en otro script, computadora, ....).archivo_modelo = "simple/cnn_simple" saver = tf.train.Saver()Como todo se ejecuta dentro de una sesión, no es posible hacerlo por partes (si usamos el `with` que debería ser la única forma en la que iniciaramos una sesión). Por lo tanto procuraré dejar comentado el código.numero_epochs = 5 tamano_minibatch = 100 display_step = 1 dropout = 0.5 # Muy importante la primera vez que se ejecuta inicializar todas las variables init = tf.global_variables_initializer() # La manera correcta de iniciar una sesión y realizar calculos with tf.Session() as sess: sess.run(init) # Ciclos de entrenamiento for epoch in range(numero_epochs): # Inicializa el costo promedio de todos los minibatches en 0 avg_cost = 0. # Calcula el número de minibatches que se pueden usar total_batch = int(mnist.train.num_examples/tamano_minibatch) # Por cada minibatch for i in range(total_batch): # Utiliza un generador incluido en mnist que obtiene # tamano_minibatch ejemplos selecionados aleatoriamente del total batch_x, batch_y = mnist.train.next_batch(tamano_minibatch) # Ejecuta la ops del paso_entrenamiento para aprender # y la del costo, con el fin de mostrar el aprendizaje _, c = sess.run( [paso_entrenamiento, costo], feed_dict={x: batch_x, y: batch_y, dp: dropout} ) # Calcula el costo del minibatch y lo agrega al costo total avg_cost += c / total_batch # Muestra los resultados if epoch % display_step == 0: print (("Epoch: " + str(epoch)).ljust(20) + ("Costo: " + str(avg_cost))) # Guarda la sesión en el archivo rnn2.cptk saver.save(sess, archivo_modelo, global_step=i) print("Se acabaron los epochs, saliendo de la sesión de tensorflow.")Ahora vamos a revisar que tan bien realizó el aprendizaje cuando se aplica la red adatos queno se usaron para entrenamiento. Para esto vamos a utilizar dos ops extas: una para definir la operaración de datos bien estimados o mal estimados, y otra paracalcular el promedio de datos bien estimados. Para calcular los datos bien estimados vamos a utilizar `tf.cast` que permite ajustar los tiposal tipo tensor.prediction_correcta = tf.equal( tf.argmax(estimado, 1), tf.argmax(y, 1) ) precision = tf.reduce_mean(tf.cast(prediction_correcta, "float"))Ahora si, vamos a abrir una nueva sesión, vamos a restaurar los valores de la sesión anterior,y vamos a ejecutar el grafo con el fin de evaluar la ops precision, pero ahora con eldiccionario de alimentación con los datos de prueba.with tf.Session() as sess: sess.run(init) saver.restore(sess, archivo_modelo) porcentaje_acierto = sess.run( precision, feed_dict={ x: mnist.test.images, y: mnist.test.labels, dp: dropout } ) print("Precisión: {}".format(porcentaje_acierto))Example workflow of process sensitivity analiysis using 1D groundawter flow model This jupyter file applies both variance-based sensitivity analysis (VBSA) and mutil-model difference-based sensitivity analysis(MMDS) to a one-dimensional groundwater flow model revised after Dai et al. (2017). An unconfined aquifer with a length $L=10,000$ m is adjacent to two constant head boundaries and a uniform precipitation infiltrates to the top of the aquifer. The aquifer is under steady state condition and the groundwater discharge per unit width at the location $x_0 = 7,000$ m is our quantity of interest. The hydrological model is quite simple and the groundwater discharge per unit width can be analytically solved so it is computationally affordable for the Monte Carlo simulation. For more detials about the model settings, readers are reffered to Yang et al. (2020).Prepared by and at Florida State University, 2020. Mail to: . 1. Load basic modulesimport numpy as np import numba as nb2. Define the multiple system model configuration 2.1 Define the process models Two rainfall recharge models ($R_1$ and $R_2$) are used to simulate the recharge process that converts precipitation $[inch \times yr^{-1}]$ to groundwater recharge $[m \times d^{-1}]$, and they are: $$R_1:w=a\times(P-14)^{0.5}\times25.4\times0.001\div365$$ $$R_2:w=b\times(P-15.7)\times25.4\times0.001\div365$$where a and b are scaling parameters which are assumed to follow the normal distribution, N(2.0, 0.4), and the uniform distribution, U(0.2, 0.5), respectively; P is the annual precipitation which is set to be a constant of 60 $[inch\times yr^{-1}]$.# process model: rechrg_lin @nb.njit def rechrg_lin_func(P, a): """ Compute recharge[m/d] using recharge model R1 by Chaturvedi(1936) """ return a * (P - 14)**0.5 * 25.4 * 0.001 / 365 # process model: rechrg_power @nb.njit def rechrg_power_func(P, b): """ Compute recharge[m/d] using recharge model R2 by (1970) """ return b * (P - 15.7) * 25.4 * 0.001 / 365Hydraulic conductivity (K) $[m\times d^{-1}]$ over the domain is parameterized by two geology process models, i.e., a single layer model(G1) and a double layer model(G2): $$G_1:K=K_1=K_2$$ $$G_2=\begin{cases}K_1 & \text{for x < 7000m}\\K_2 & \text{for x >= 7000m}\end{cases}$$ In model $G_1$, the aquifer is assumed to be homogeneous and the hydraulic conductivity follows the lognormal distribution, LN(2.9, 0.5). Model $G_2$ is parametrized into two zones at the location of $x_0=7000$. The hydraulic conductivity $K_1$ of zone 1 (x= 7000 m) are assumed to follow the lognormal distributions, LN(2.6, 0.3) and LN(3.2, 0.3), respectively.# process model: geol_single @nb.njit def geol_single_func(K): """ Single layer geological model """ return K, K # process model: geol_double @nb.njit def geol_double_func(K1, K2): """ Double layer geological model """ return K1, K2Assuming there is no gauge station at the river thus the river stage $h_2$ is unknown. An empirical rating curve function is used to covert the discharge into river stage:$$h_2=0.3\times Q^{0.6}+289$$ where $Q$ denotes the river discharge $[m^3 \times s^{-1}]$ which can be calculated form the snowmelt runoff process at the upstream catchment: $$Q=C_{sn}\times M\times SVC \times A \times \frac{0.001}{86400}$$where $C_{sn}$ denotes the runoff coefficient expressing the losses as a ratio; M is the snow melt rate $[mm \times d^{-1}]$; SVC is the ration of snow-covered area to the total area; A is the area of the upper catchment $[m^2]$; 0.001/86400 is the conversion factor from $[m\times d^{-1}]$ to $[m^3 \times s^{-1}]$. Fixed values $C_{sn} = 0.8$, $SVC = 0.7$ and $A = 2000$ $km^2$ are used to characterize the upstream catchment conditions. Thus, the only unknown factor is the snowmelt rate, M. Two different snowmelt functions, i.e., degree-day method and restricted degree-day radiation balance approach (Hock 2003, Kustas et al. 1994), are used to evaluate the daily snow melt depth [mm]:$$M_1:M=f_1\times(T_a-T_m)$$$$M_2:M=f_2\times(T_a-T_m)+r\times R_n$$ where $f_1$ and $f_2$ are snowmelt factors $[mm \times$℃$^{-1}\times d^{-1}]$, which are assumed to follow norm distributions, N (3.5, 0.75) and U[2.5, 0.3], respectively. ${T_a}$ [℃] is the average temperature for a given day and $T_m$ [℃] is the temperature threshold when snow melt occurs which is typically set to be 0. The second equation consider the effects of surface radiation budget, $R_n [W \times m^{-2}]$, using a conversion factor r for converting energy flux density to snowmelt depth. The $r [mm \times d^{-1}\times (W\times m^{-2})^{-1}]$ is assumed to follow normal distribution, N(0.3, 0.05). $T_a$ and $R_n$ are set to be 7 ℃ and 80 $W \times m^{-2}$ respectively in this case.# process model: snomlt_degree @nb.njit def snomlt_degree_func(Ta, Tm, Csn, SVC, A, z0, f1): """ Compute river stage h2 [m] using degree-day method """ M = f1 * (Ta - Tm) Q = Csn * M * SVC * A * 0.001 / 86400 return 0.3 * Q**0.6 + z0 # process model: snomlt_restrcd @nb.njit def snomlt_restrcd_func(Ta, Tm, Csn, Rn, SVC, A, z0, f2, r): """ Compute river stage h2 [m] using degree-day method """ M = f2 * (Ta - Tm) + r * Rn Q = Csn * M * SVC * A * 0.001 / 86400 return 0.3 * Q**0.6 + z02.2. Define the system model function The analytical solution for the groundwater discharge per unit width at a given location x, $q(x)$, can be obtained from:$$q(x)=K_1\frac{h_1^2-h_2^2}{2(x_0-\lambda x_0+\lambda L)}-\frac{1}{2}w\frac{x_0^2-\lambda x_0^2+\lambda L^2}{x_0-\lambda x_0+\lambda L} + wx$$where $\lambda=K_1/K_2$.# system model @nb.njit def dschrg_func(x0, L, h1, w, K1, K2, h2): """ Compute discharge per unit [m2/d] at x=x0 using anaytical solution """ lamda = K1 / K2 return K1 * (h1**2 - h2**2) / (2 * (x0 - lamda * x0 + lamda * L)) - 1/2 * w * (x0**2 - lamda * x0**2 + lamda * L**2) / (x0 - lamda * x0 + lamda * L) + w * x02.3 Define the model frames# Import sammpy import sammpy as sm # System model class model = sm.model() model.name = 'gwmodel' model.frames = {'names' : ['rechrg', 'geol', 'snomlt'], 'options': [['rechrg_lin', 'rechrg_power'], ['geol_single', 'geol_double'], ['snomlt_degree', 'snomlt_restrcd']], 'weights': [[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]} # Constant variables used in the simulations model.env = {'x0' : 7000, 'L' : 10000, 'h1' : 300, 'P' : 60, 'Ta' : 7, 'Tm' : 0, 'Csn' : 0.8, 'SVC' : 0.7, 'A' : 2000 * 1e6, 'z0' : 289, 'Rn' : 80} # Random variabs used in the simulation model.pars = {'names': ['a', 'b', 'K', 'K1', 'K2', 'f1', 'f2', 'r'], 'bounds':[[2.0, 0.4], [0.2, 0.5], [2.9, 0.5], [2.6, 0.3], [3.2, 0.3], [3.5, 0.75], [2.5, 0.3], [0.3, 0.05]], 'dists': ['norm', 'unif', 'lognorm', 'lognorm', 'lognorm', 'norm', 'norm', 'norm']} # system model functions model.func = dschrg_func3. Generate ouputs with parametric and process model uncertainty 3.2 Generate the parameter realizations The samples are genertaed using SALib with Saltelli's sampling sampling scheme. Saltelli's scheme extends the Sobol sequence in a way to reduce the error rates in the resulting sensitivity index calculations. For more detials, readers are refered to https://salib.readthedocs.io/en/latest/index.html.N = 500 # Number of sample realizations param_values = model.sample(nobs=N, method='saltelli', seed=933090936)The distribution of the generated parameters can be dispalyed using sammpy.plotting.from sammpy.plotting import hist hist.plot(model, param_values)3.2 Generate the outputs The outputs are generated using considering both parametric and process model uncertainty. The outputs are stored in an array of whihc the size is 2 x N x 2 x N x 2 x N in this case, corresponding to the two recharge process models and their N parameter realizaitons, the two geology process models and their N parameter realizations, and the two snowmelt process models and their N parameter realizations, respectively. The advantage of storing data in this way is we can compute all the varianced-based sensitivy indices including the first-order, total-effect, second-order and higher-order ones of all processes using this array. The disadvantage is storing such a array requirs a large memory size since it is high demensional.# Retrive the costant parameter values Ma = len(model.frames['options'][0]) # Number of alterantive models for recharge process Mb = len(model.frames['options'][1]) # Number of alterantive models for geloogy process Mc = len(model.frames['options'][2]) # Number of alterantive models for snow melt process x0 = model.env['x0'] L = model.env['L'] h1 = model.env['h1'] P = model.env['P'] Ta = model.env['Ta'] Tm = model.env['Tm'] Csn = model.env['Csn'] SVC = model.env['SVC'] A = model.env['A'] z0 = model.env['z0'] Rn = model.env['Rn'] # Calculate model outputs using numba to accelerate @nb.njit(parallel=True, fastmath=True) def cmpt_Dscs(): # Initilize an array Y = np.zeros((Ma, N, Mb, N, Mc, N)) for i in range(Ma): for j in nb.prange(N): if i == 0: a = param_values[j, 0] w = rechrg_lin_func(P, a) else: b = param_values[j, 1] w = rechrg_power_func(P, b) for k in range (Mb): for l in nb.prange(N): if k == 0: K = param_values[l, 2] K1, K2 = geol_single_func(K) else: K1 = param_values[l, 3] K2 = param_values[l, 4] K1, K2 = geol_double_func(K1, K2) for m in range(Mc): for n in nb.prange(N): if m == 0: f1 = param_values[n, 5] h2 = snomlt_degree_func(Ta, Tm, Csn, SVC, A, z0, f1) else: f2 = param_values[n, 6] r = param_values[n, 7] h2 = snomlt_restrcd_func(Ta, Tm, Csn, Rn, SVC, A, z0, f2, r) Y[i, j, k, l, m, n] = dschrg_func(x0, L, h1, w, K1, K2, h2) return Y Y = cmpt_Dscs()4. Process sensitivity analysis The sensitivity of the parameters under individual system model can be performed using many sensitivity analysis tools such as SALib thus it is not shown here. Below, we use the model outputs generated in the last step to evaluate the process sensitivity by using both variance-based method and the difference-based method. 4.1 Sensitivity analysis using variance-based method By decomposing the variance, the first-order process sensitivity index is defined by Dai et al. (2017) as$$PS_K=\frac{V_{M_K}(E_{M_{\sim K}}[\Delta|M_K])}{V(\Delta)}$$The total effect process sensitivty index is defined by Yang et al. (2020) as$$PS_{TK}=\frac{E_{M_{\sim K}}(V_{M_{K}}[\Delta|M_{\sim K}])}{V(\Delta)}$$The sensitivy indices including ones of both first-order and total effect can be computed using the function of vbsa.analysis. The function takes the model object and output values as input parameters and return a dictionary with keys "PSK" and "PSTK", corresponding to the first-order and total effect process sensitivty index, respectively. Each entry is a array of size of the number of process. The "print_to_console" is used to control the dispaly of the computing information.from sammpy.analyze import vbsa Ret_vbsa = vbsa.analyze(model, Y, print_to_console=True)Runing VBSA first-order process sensitivy analysis... Runing VBSA total-effect process sensitivy analysis... Process PSK PSTK rechrg 0.1456 0.1490 geol 0.0699 0.2133 snomlt 0.6411 0.7811The first-order and total effect process sensitivity indices of the three processes is 14.56%, 6.60%, and 64.11%, 14.90%, 20.33% and 78.11%, respectivly. The results can be visualized using the bar plots.from sammpy.plotting import bar bar.plot(model, Ret_vbsa)4.2 Sensitivity analysis using difference-based method The purpose of the sensitivity analysis using difference-based method is very similar to the total effect-process senistivity analysis,that is to screen the non-influential processes to simplify the model.Inspired by the Morris elementary effect, the mean and variance of the absolute output difference caused by the variation (uncertainty) of a process K when the other process are fixed can be used to measure the sensitivity of this process:$$E(d\Delta|K)=E_{\sim K}E_{P_K}(d\Delta|\sim K)=E_{M_{\sim K}}E_{P_{M_K}}(d\Delta|M_{\sim K})$$ $$V(d\Delta|K)=E_{\sim K}V_{P_K}(d\Delta|\sim K)+ V_{\sim K}E_{P_K}(d\Delta|\sim K)=E_{M_{\sim K}}V_{P_{M_K}}(d\Delta|M_{\sim K}) + V_{M_{\sim K}}E_{P_{M_K}}(d\Delta|M_{\sim K})$$from sammpy.analyze import mmds Ret_mmds = mmds.analyze(model, Y, print_to_console=True)Runing MMDS difference-based process sensitivy analysis... Process mean variance rechrg 0.6197 0.2201 geol 0.6630 0.4252 snomlt 1.3031 1.4685The mean and variance of the output difference of the three processes is 0.6234, 0.6554, and 1.3097, 0.2252, 0.4218, and 1.5059 respectivly.The results can be visualized using the scatter plot.from sammpy.plotting import dotty dotty.plot(model, Ret_mmds)[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/PennNGG/Quantitative-Neuroscience/blob/master/Concepts/Python/Frequentist%20Versus%20Bayesian%20Approaches.ipynb) Definition Debates between frequentists and Bayesians have carried on for years, touching on issues that are in some cases very [practical](https://www.ejwagenmakers.com/2008/BayesFreqBook.pdf) and other cases much more [philosophical](http://www.stat.columbia.edu/~gelman/research/published/philosophy.pdf). The goal here is not to dive deeply into all of those debates but rather to introduce you to the basic issues, because they are at the heart of what we can and cannot do with statistics.More specifically, the two camps differ fundamentally on how to interpret randomness, which profoundly affects the kinds of inferences that can be drawn on the basis of noisy data:A **frequentist** thinks of probability only in terms of the frequency of many repeated events that include some element of randomness. **To a frequentist, assigning a probability to a singular event that can either happen or not happen, particularly one that is not directly or yet measured, is nonsensical** ("There is no place in our system for speculations concerning the probability that the sun will rise tomorrow" -- ). As a consequence of these ideas, a frequentist operates on the conditional distribution of the data, assuming that a hypothesis is true. That is, one makes a series of repeated measurements (the data) under fixed conditions, obtaining what is essentially a histogram. Inferences about the nature of the process that generated the data then allow only for this definition of randomness or uncertainty: the obtained variability in the data. Questions of the form "What is the probability that process x generated my data?" are undefined in this framework, because a probability cannot be assigned to an unknown and unseeable process (or "hypothesis"), only to repeated measures. Instead, the best you can do is simply assume that a particular process was the one that generated your data, and then ask "What is the probability that I would have obtained my data, assuming that x was the true process?" This question is the basis for null hypotheses (typically defined in terms of the parameters of the probability distribution that you would expect the data to be drawn from under a particular set of assumptions) and p-values: computing the likelihood p(data | null hypothesis).Benefits of this approach are that frequentist-based statistics are typically relatively easy to compute, they require few assumptions, and they tend to promote good experimental design (because you need to very carefully control the conditions under which the data are collected).Drawbacks include the fact that definitions of probability in this framework are often highly counter-intuitive to how most of us think, resulting in results that can be very difficult to interpret. A good example is the concept of a "confidence interval" in frequentist statistics, which is described nicely [here](https://jfiksel.github.io/2018-01-08-explaining-confidence-intervals/).A **Bayesian** thinks of probability as the degree of belief in a proposition (or hypothesis). In this framework, data represent evidence that can support or oppose such a belief, which is represented as a probability distribution. Thus, unlike from the frequentist perspective, **from the Bayesian perspective it is perfectly natural to describe the belief (or probability) that particular values of particular parameters of a particular probability distribution (together encompassing a "hypothesis" about the data) are true**.These ideas are derived directly from the definition of joint probability (see [Independence and Lack Thereof](https://colab.research.google.com/drive/1YFwKKkWUjtV6_Nx2upNpFYHJNeXIeQB6) for a related discussion):$P(A\cap B)=p(A|B)\times p(B) = p(B|A)\times p(A)$where $P(A\cap B)$ is read as "the probability that A and B are true" and P(A | B) is read as "the probability that A is true, given that B is true" or just "the probability of A given B."If we call A the Hypothesis and B the Data, and rearrange, we get Bayes' Rule:$P(Hypothesis|Data)=\frac{P(Data|hypothesis)\times P(Hypothesis)}{P(Data)}$Where *P*(*Hypothesis* | *Data*) is called the posterior probability (or just posterior), *P*(*Data* | *Hypothesis*) is the likelihood, *P*(*Hypothesis*) is the prior, and *P*(*Data*) is the marginal probability of the data.Benefits of the Bayesian approach are that it tends to get at the intuitive concepts that one is addressing (e.g., the probability that a hypothesis is true, given the data), and it does so in a rigorous manner.Drawbacks include questions about [how to identify an appropriate prior](https://stats.stackexchange.com/questions/78606/how-to-choose-prior-in-bayesian-parameter-estimation). Getting Started with Code Matlab code is found in the [NGG Statistics GitHub Repository](https://github.com/PennNGG/Statistics.git) under "Concepts/FrequentistVsBayesian.m".Python code is included below. First run the code cell just below to make sure all of the required Python modules are loaded, then you can run the other cell(s).import scipy.stats as st import numpy as npTutorial and Exercises To use this tutorial, read the commands and execute the code cell-by-cell.The learning objective is to gain insights into thinking about inference from a "Frequentist" versus a "Bayesian" perspective. In brief, because a Frequentist does not consider the probability of an event or state of the world or hypothesis, only their frequency of occurrance, it is not possible to ask questions of the form "what is the probabilty that hypothesis x is true?" Instead, one can only consider questions of the form, "what is the probabilty that I would have obtained my data, given that hypothesis x is true?" In contrast, Bayesians consider the probabilities of such things (often called the strength of belief), but doing so can require making assumptions that can be difficult to prove.Let's start with a simple example, taken from:https://en.wikipedia.org/wiki/Base_rate_fallacyExample_1:_HIV"Imagine running an HIV test on A SAMPLE of 1000 persons ...""The test has a false positive rate of 5% (0.05)..." i.e., the probability that someone who takes the test gets a POSITIVE result despite the fact that the person does NOT have HIV"...and no false negative rate." i.e., The probability that someone who takes the test gets a NEGATIVE result despite the fact that the person DOES have HIV Question 1: If someone gets a positive test, is it "statistically significant"? **Answer**: Statistical significance from the Frequentist perspective is typically measured by comparing p to a threshold value; e.g., p<0.05. In this case, "p" is shorthand for "the probabilty of obtaining the data under the Null Hypothesis", so we are checking for: $$p(data\,|\,Null\,Hypothesis) < 0.05$$Here we take the Null Hypothesis as "not infected", and the data are just the single positive test. Therefore, the relvant p-value is simply the false-positive rate: p=0.05, which is typically considered "not significant." However, you can also see that it is not particularly informative.N = 10000 # size of the SAMPLE false_positive_rate = 0.05 false_negative_rate = 0 print(f'The probability of obtaining the data under the Null Hypothesis = {false_positive_rate}')The probability of obtaining the data under the Null Hypothesis = 0.05Question 2: What is the probability that if someone gets a positive test, that person is infected? **Answer**: Here we are asking for a different probability: $$p(infected\,|\,positive\,test) = p(hypothesis\,|\,data)$$ which is the "posterior probability" of the hypothesis, given the data.Let's work our way backwards to figure out what information we need to solve this problem. We can compute the probability that someone with a positive test is infected from a particular population as:$$probability\,infected\,given\,positive\,test = \frac{total(infected\,and\,postive)}{total(positive)}$$It should be obvious that to compute this quantity, we need to know the number of people in the population who are actually infected, in addition to knowing the number of people who had a positive test.# So let's start by defining how many in the population are actually infected. # We'll start by assuming that that *real* rate of infection is 0.5 (i.e., # half the POPULATION is infected), and then do a quick simulation to find # out how many in our SAMPLE of N people are infected. We can do this # simulation by by getting N picks from a binomial distribution, where # each pick determines "isInfected" for a single person according to the # assumed rate of infection: is_infected = st.binom.rvs(1, 0.5, size=N) # Now we can count the number infected num_infected = is_infected.sum() # Now we need to count the number of people who got a positive test in this # example. There is no false negative rate, which implies that everyone who # is infected got a positive test: is_positive = np.copy(is_infected) # But there is a non-zero false-positive rate, which implies that some of the # people who are **not** infected will also have a positive test. We can use # binornd again to generate random picks from a binomial distribtuion according # to the false-positive rate: is_positive[is_infected==0] = st.binom.rvs(1, false_positive_rate, size=N-num_infected) # Now we can compute the probability that someone with a positive test is infected: p_is_infected_given_is_positive = (np.logical_and(is_infected==1, is_positive==1).sum())/is_positive.sum() print(f'Probaility infected given a positive test = {p_is_infected_given_is_positive:.4f}')Probaility infected given a positive test = 0.9495Let's do the same thing, but this time we will try different values for the proportion of the population that is actually infected. What you should notice is that the **PROPORTION INFECTED GIVEN A POSITIVE TEST** depends (a lot!) on the **OVERALL RATE OF INFECTION**. Put another way, to determine the probabilty of a hypothesis, given your data (e.g., proportion infected given a positive test), you have to know the probability that the hypothesis was true without any data.Why is this the case? It is a simple consequence of the definition of a conditional probability, formulated as Bayes' Rule. Specifically, the joint probability of two events, call them A and B, is defined as: $$p(A\,and\,B) = p(A) \times p(B\,|\,A)$$ $$p(B\,and\,A) = p(B) \times p(A\,|\,B)$$Now, calling A the Hypothesis and B the Data, then rearranging, we get:$$p(Hypothesis\,|\,Data) = \frac{p(Data\,|\,Hypothesis) \times p(Hypothesis)}{p(Data)}$$So you cannot calculate the probability of the hypothesis, given the data (i.e., the Bayesian posterior), without knowing the probability of the hypothesis independent of any data (i.e., the prior)infected_proportions = np.arange(0.0, 1.1, 0.1) for idx, val in enumerate(infected_proportions): # Simulate # infections in the SAMPLE, given the POPULATION rate is_infected = st.binom.rvs(1, val, size=N) # Count the number infected num_infected = is_infected.sum() # Make array of positive tests, given that falseNegativeRate=0 ... is_positive = np.copy(is_infected) # And falsePositiveRate > 0 is_positive[is_infected==0] = st.binom.rvs(1, false_positive_rate, size=N-num_infected) # The probability that someone with a positive test is infected p_is_infected_given_is_positive = (np.logical_and(is_infected==1, is_positive==1).sum())/is_positive.sum() # We can compute the Bayesian Posterior as: # p(hypothesis | data) = (p(data | hypothesis) * p(hypothesis)) / p(data) # Note that we are using the true rate from the full POPULATION, so these predictions will differ slightly from the probability computed above (pIsInfectedGivenIsPositiveTest) from the SAMPLE p_data_given_hypothesis = 1 - false_negative_rate p_hypothesis = val p_data = is_positive.sum()/is_positive.size p_hypothesis_given_data = (p_data_given_hypothesis * p_hypothesis) / p_data # Compute the theoretial posterior probability: print(f'Infection rate={val:.1f}, proportion infected given a positive test={p_is_infected_given_is_positive:.3f}, Posterior={p_hypothesis_given_data:.3f}')Infection rate=0.0, proportion infected given a positive test=0.000, Posterior=0.000 Infection rate=0.1, proportion infected given a positive test=0.708, Posterior=0.714 Infection rate=0.2, proportion infected given a positive test=0.843, Posterior=0.833 Infection rate=0.3, proportion infected given a positive test=0.906, Posterior=0.881 Infection rate=0.4, proportion infected given a positive test=0.929, Posterior=0.943 Infection rate=0.5, proportion infected given a positive test=0.951, Posterior=0.948 Infection rate=0.6, proportion infected given a positive test=0.969, Posterior=0.963 Infection rate=0.7, proportion infected given a positive test=0.982, Posterior=0.987 Infection rate=0.8, proportion infected given a positive test=0.990, Posterior=0.995 Infection rate=0.9, proportion infected given a positive test=0.995, Posterior=0.996 Infection rate=1.0, proportion infected given a positive test=1.000, Posterior=1.000Plotting Prediction Intervals for Time Series Projects**Author**: **Label**: TIme Series ScopeThe scope of this notebook is to provide instructions on how to plot prediction intervals for time series projects. This script will work for both multi and single time series projects. Requirements- Python version 3.7.3- DataRobot API version 2.19.0. Small adjustments might be needed depending on the Python version and DataRobot API version you are using.Full documentation of the Python package can be found here: https://datarobot-public-api-client.readthedocs-hosted.comIt is assumed you already have a DataRobot Time Series Project object. Import Librariesimport datarobot as dr import pandas as pd import numpy as np import matplotlib.pylab as plt import seaborn as sns %matplotlib inlineConnect to DR, Define Project and ModelMake sure you pick a model other than "Recommended for Deployment" as there is no backtesting calculations for that model. You will also need to **load the dataset you used to create the project** in a pandas dataframe called datadr.Client(token = 'YOUR_API_KEY', endpoint = 'YOUR_HOSTNAME') data = pd.read_excel('path/to/file') #The data that was used for the project. change pd.read_ method according to file type project = project = dr.Project.get('YOUR_PROJECT_ID') #Use this with project ID to get your project. model = [m for m in project.get_models()][1] #You can choose whichever model you wish other than the recommended for deploymentDefine function that returns training predictionsBelow function might take a while to finish runningdef get_training_predictions(model): print(f"calculating training predictions for {model.model_type}") try: backtests_pred_job = model.request_training_predictions(data_subset=dr.enums.DATA_SUBSET.ALL_BACKTESTS) backtests_pred_job.wait_for_completion() backtest_predictions = backtests_pred_job.get_result_when_complete().get_all_as_dataframe() except: pass try: holdout_pred_job = model.request_training_predictions(data_subset=dr.enums.DATA_SUBSET.HOLDOUT) holdout_pred_job.wait_for_completion() holdout_predictions = holdout_pred_job.get_result_when_complete().get_all_as_dataframe() except: pass all_predictions = dr.TrainingPredictions.list(model.project_id) my_model_training_predictions = [p for p in all_predictions if p.model_id == model.id] all_prediction_frames = [p.get_all_as_dataframe() for p in my_model_training_predictions] if 'Holdout' in all_prediction_frames[0].partition_id.unique(): holdout_predictions = all_prediction_frames[0] backtest_predictions = all_prediction_frames[1] else: holdout_predictions = all_prediction_frames[1] backtest_predictions = all_prediction_frames[0] return backtest_predictions, holdout_predictions backtest_predictions, holdout_predictions = get_training_predictions(model)calculating training predictions for eXtreme Gradient Boosted Trees Regressor with Early StoppingGet partitioning informationThe script will figure out automatically if this is a single series or a multiseries project.partitioning = dr.DatetimePartitioning.get(project.id) try: series_id_column = partitioning.multiseries_id_columns[0].replace(' (actual)','') is_multiseries = True except: is_multiseries = False print('This seems to be a single Series Project') date_column = partitioning.datetime_partition_column.replace(' (actual)','') target_column = project.target.replace(' (actual)','')Define function that calculates errorsBetween predictions made by DataRobot and actual values (with intervals).def calculate_errors(backtest_preds, holdout_preds): holdout_preds['timestamp'] = pd.to_datetime(holdout_preds['timestamp'], utc=False).dt.tz_localize(None) holdout_preds['forecast_point'] = pd.to_datetime(holdout_preds['forecast_point'], utc=False).dt.tz_localize(None) if is_multiseries == True: holdout_preds.rename(columns={'series_id':series_id_column, 'timestamp':date_column}, inplace=True) else: holdout_preds.rename(columns={'timestamp':date_column}, inplace=True) backtest_preds['timestamp'] = pd.to_datetime(backtest_preds['timestamp'], utc=False).dt.tz_localize(None) backtest_preds['forecast_point'] = pd.to_datetime(backtest_preds['forecast_point'], utc=False).dt.tz_localize(None) if is_multiseries == True: backtest_preds.rename(columns={'series_id':series_id_column, 'timestamp':date_column}, inplace=True) else: backtest_preds.rename(columns={'timestamp':date_column}, inplace=True) if is_multiseries == True: holdout_preds_joined = holdout_preds.set_index([date_column, series_id_column]).join(data.set_index([date_column, series_id_column])).reset_index() backtest_preds_joined = backtest_preds.set_index([date_column, series_id_column]).join(data.set_index([date_column, series_id_column])).reset_index() else: holdout_preds_joined = holdout_preds.set_index([date_column]).join(data.set_index([date_column])).reset_index() backtest_preds_joined = backtest_preds.set_index([date_column]).join(data.set_index([date_column])).reset_index() if is_multiseries == True: errors = backtest_preds_joined.groupby(['forecast_distance', series_id_column])[['prediction', target_column]].apply(lambda df: pd.Series(np.abs(df['prediction'] - df[target_column]).std())) errors.columns=['prediction_interval'] holdout_preds_joined = holdout_preds_joined.set_index(['forecast_distance',series_id_column]).join(errors).reset_index() else: errors = backtest_preds_joined.groupby(['forecast_distance'])[['prediction', target_column]].apply(lambda df: pd.Series(np.abs(df['prediction'] - df[target_column]).std())) errors.columns=['prediction_interval'] holdout_preds_joined = holdout_preds_joined.set_index(['forecast_distance']).join(errors).reset_index() holdout_preds_joined['error_high'] = holdout_preds_joined['prediction'] + 2*holdout_preds_joined['prediction_interval'] holdout_preds_joined['error_low'] = holdout_preds_joined['prediction'] - 2*holdout_preds_joined['prediction_interval'] return holdout_preds_joined prediction_data = calculate_errors(backtest_predictions, holdout_predictions)Define function to plot prediction intervalsdef plot_predictions(prediction_data, data, forecast_point, history): if is_multiseries == True: n_cols = data[series_id_column].drop_duplicates().shape[0]//2 fig, axs = plt.subplots(n_cols,2, figsize=(20,30)) plot_counter = 1 for c, df in prediction_data.groupby(series_id_column): ax = plt.subplot(n_cols,2,plot_counter) plot_counter += 1 pred_data_plot = df[df.forecast_point == forecast_point] all_data_plot = data.loc[(data[series_id_column] == c) & (data.Date > pd.to_datetime(forecast_point) - history)] g = sns.lineplot(data=pred_data_plot, x='Date', y='prediction', label='Prediction', color='green', linewidth=3) ax.fill_between(pred_data_plot.Date.values, pred_data_plot.error_low, pred_data_plot.error_high, alpha=0.3, zorder=-10, color='lightgreen') #sns.lineplot(data=df, x='Date', y='target') g = sns.lineplot(data=all_data_plot[all_data_plot.Date <= forecast_point], x='Date', y=target_column, linewidth=3, color='blue', label='Actual') g = sns.lineplot(data=all_data_plot[all_data_plot.Date > forecast_point], x='Date', y=target_column, linewidth=3, color='blue') g.lines[2].set_linestyle("--") ax.axvline(forecast_point, linestyle='--', c='gray') plt.xticks(rotation=45, ha='right') plt.title(c) #plt.show() plt.tight_layout() return fig else: df = prediction_data fig, axs = plt.subplots(figsize=(15,7)) ax = plt.subplot(1,1,1) pred_data_plot = df[df.forecast_point == forecast_point] all_data_plot = data.loc[(data.Date > pd.to_datetime(forecast_point) - history)] g = sns.lineplot(data=pred_data_plot, x='Date', y='prediction', label='Prediction', color='green', linewidth=3) ax.fill_between(pred_data_plot.Date.values, pred_data_plot.error_low, pred_data_plot.error_high, alpha=0.3, zorder=-10, color='lightgreen') #sns.lineplot(data=df, x='Date', y='target') g = sns.lineplot(data=all_data_plot[all_data_plot.Date <= forecast_point], x='Date', y=target_column, linewidth=3, color='blue', label='Actual') g = sns.lineplot(data=all_data_plot[all_data_plot.Date > forecast_point], x='Date', y=target_column, linewidth=3, color='blue') g.lines[2].set_linestyle("--") ax.axvline(forecast_point, linestyle='--', c='gray') plt.xticks(rotation=45, ha='right') plt.tight_layout() plot_predictions(prediction_data, data, forecast_point='2014-06-07', history=pd.Timedelta(1, 'M'))3.3 Tensorflow: Text Sequence Modeling In this section, we will use several high-level python libraries to address a simple textual task. Given an incomplete sentece, we want to predict the next token. First, we will start with the dataset. We will use the [datasets] library from [huggingface]. [datasets] is a well organized collection of several datasets. We will use the [bookcorpus] dataset, which is a collection of several books. 3.3.1 Download Bookcorpus[bookcorpus]: https://huggingface.co/datasets/bookcorpus[huggingface]:https://huggingface.co/[datasets]:https://huggingface.co/docs/datasets/import datasets dataset = datasets.load_dataset("bookcorpus", split="train[:5%]") print(f"# samples: {len(dataset)}") dataset[:10] def str2ascii(string): return [ord(char) for char in string.lower() if char.isascii()] tkn2id = {"PAD":256, "SOS":257, "EOS":258} def ascii2str(lst): return "".join([chr(char) if char<256 else str(char-256) for char in lst]) print("str2ascii(\"hello my fried\") ->", str2ascii("hello my fried")) print("ascii2str(str2ascii(\"hello my fried\")) ->", ascii2str(str2ascii("hello my fried"))) import tensorflow as tf def collate(data): batch = tf.convert_to_tensor([([tkn2id["SOS"]] + str2ascii(x["text"])+[tkn2id["EOS"]]+[tkn2id["PAD"]]*64)[:64] for x in data]) return {"inputs" : batch[:,:-1], "targets" : batch[:,+1:]} tfdataset = dataset.to_tf_dataset(columns=["text"], shuffle=False, label_cols=["targets"], batch_size=16, collate_fn=collate) \ .prefetch(10000) \ .shuffle(10000) \ .repeat()3.3.3 Embedding.Feeding integers from $0$ to $vocab\_size$ is not practical. Meaning that it does not work. To overcome this issue, we map token ids to embedding. Each token is associated to its trainable vector of parameters. During training embeddings learn to represent the token. For example, the figure below shows a possible scenario during training. 3.3.4 Recurrent Neural NetworksRecurrent neural networks (RNNs) are a popular neural network architectures to process sequences. They are fed with one embedding. They process the embedding internally and output their state. At the next step, they are fed with another embedding. Again, they process the embedding internally with the inner state modified by the previous embeddings and so on. There are many kinds of RNNs. One popular choice are Long short-term memory (LSTM). LSTM is a fairly complex layer involving many components. Tensorflow already implements LSTM internally.tf.keras.backend.clear_session() class MyModel(tf.keras.Model): def __init__(self): super(MyModel, self).__init__() self.embedding = tf.keras.layers.Embedding(input_dim=257, output_dim=512) self.lstm1 = tf.keras.layers.LSTM(512, dropout=0.1, return_sequences=True) self.lstm2 = tf.keras.layers.LSTM(512, dropout=0.1, return_sequences=True) self.lstm3 = tf.keras.layers.LSTM(512, dropout=0.1, return_sequences=True) self.lstm4 = tf.keras.layers.LSTM(512, dropout=0.1, return_sequences=True) self.lstm5 = tf.keras.layers.LSTM(512, dropout=0.1, return_sequences=True) self.lnrm1 = tf.keras.layers.LayerNormalization() self.lnrm2 = tf.keras.layers.LayerNormalization() self.lnrm3 = tf.keras.layers.LayerNormalization() self.lnrm4 = tf.keras.layers.LayerNormalization() self.lnrm5 = tf.keras.layers.LayerNormalization() self.dense = tf.keras.layers.Dense(257, activation='softmax') def call(self, x): x = self.embedding(x) x = self.lnrm1(x + self.lstm1(x)) x = self.lnrm2(x + self.lstm2(x)) x = self.lnrm3(x + self.lstm3(x)) x = self.lnrm4(x + self.lstm4(x)) x = self.lnrm5(x + self.lstm5(x)) x = self.dense(x) return x optim = tf.keras.optimizers.Adam(learning_rate=0.001) loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True, axis=-1) model = MyModel() def loss_fn(batchY, batchP): batchP = tf.reshape(batchP[batchY != tkn2id["PAD"]], (-1,257)) batchY = tf.one_hot(batchY[batchY != tkn2id["PAD"]], depth=257) return loss(batchY, batchP) def accuracy(batchY, batchP): return tf.reduce_mean(tf.cast(tf.argmax(batchP,-1)[batchY != tkn2id["PAD"]] == tf.cast(batchY[batchY != tkn2id["PAD"]],tf.int64), tf.float64)) model.compile(loss=loss_fn, optimizer=optim, metrics=[accuracy]) model.fit(tfdataset, steps_per_epoch=10000, epochs=1, callbacks = [tf.keras.callbacks.ModelCheckpoint(filepath="./local.tf", save_format="tf")]) x = [tkn2id["SOS"]] + str2ascii("only because") for i in range(40): x1 = tf.convert_to_tensor(x, dtype=tf.int64) x2 = tf.expand_dims(x1,0) p1 = model.predict(x2) p2 = tf.argmax(p1,-1)[0] x += [p2[-1].numpy()] print(ascii2str(x))1only because and and and and and and and and and andCase study: Bioinformatics - comparing genomes**Objectives*** In this case study, we will compare two DNA sequences: (1) human; vs. (2) bacteria**Dataset features**Escherichia coli strain U 5/41 16S ribosomal RNA gene, partial sequence* Rows: 37* Columns: 2* File format: fasta* Source: https://www.ncbi.nlm.nih.gov/nuccore/M10098.1?report=fasta Data Science: Visualização de Dados com Python Udemy's course:'DNA is a molecule present in all living beings, which is responsible for storing hereditary characteristics. It is composed of nucleotide sequences, which can be of four types: adenine, thymine, cytosine or guanine."Computerly" speaking we can represent them through 4 letters: A, T, C or G.In this case study, we want to assess whether structures with similar functions (we are using ribosomal RNA sequences) from different organisms have differences. For this we will evaluate the number of nucleotide pairs.' ![2018-11-18_22-54-37-e5c469c34cfe0984380af3738ed13a95.svg](attachment:2018-11-18_22-54-37-e5c469c34cfe0984380af3738ed13a95.svg) Opening and reading files# Reading the file as a list skiping the first line human = open("human.fasta").readlines()[1:] bacteria = open("bacteria.fasta").readlines()[1:] # Concating the list elements human = ''.join(human) bacteria = ''.join(bacteria) # For breaklines human = human.replace("\n","") bacteria = bacteria.replace("\n","") # Creating and writing a new html file comparison = open("human_bacteria_comparison.html", "w") print(bacteria)AGTTTGATCATGGCTCAGATTGAACGCTGGCGGCAGGCCTAACACATGCAAGTCGAACGGTAACAGGAAGCAGCTTGCTGCTTTGCTGACGAGTGGCGGACGGGTGAGTAATGTCTGGGAAACTGCCTGATGGAGGGGGATAACTACTGGAAACGGTAGCTAATACCGCATAACGTCGCAAGCACAAAGAGGGGGACCTTAGGGCCTCTTGCCATCGGATGTGCCCAGATGGGATTAGCTAGTAGGTGGGGTAACGGCTCACCTAGGCGACGATCCCTAGCTGGTCTGAGAGGATGACCAGCAACACTGGAACTGAGACACGGTCCAGACTCCTACGGGAGGCAGCAGTGGGGAATATTGCACAATGGGCGCAAGCCTGATGCAGCCATGCGCGTGTATGAAGAAGGCCTTCGGGTTGTAAAGTACTTTCAGCGGGGAGGAAGGGAGTAAAGTTAATACCTTTGCTCATTGACGTTACCCGCAGAAGAAGCACCGGCTAACTCCGTGCCAGCAGCCGCGGTAATACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCCCGGGCTCAACCTGGGAACTGCATCTGATACTGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTGAAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCCCCCTGGACGAAGACTGACGCTCAGGTGCGAAAGCGTGGGGAGCAAACAGGATTAGATACCCTGGTAGTCCACGCCGTAAACGATGTCGACTTGGAGGTTGTGCCCTTGAGGCGTGGCTTCCGGATAACGCGTTAAGTCGACCGCCTGGGGAGTACGGCCGCAAGGTTAAAACTCAAATGAATTGACGGGGGCCGCACAAGCGGTGGAGCATGTGGTTTAATTCGATGCAACGCGAAGAACCTTACCTGGTCTTGACATCCACGGAAGTTTTCAG[...]Creating a dictionarycount = {} # Creating a Simple Arrangement of 4 elements taken 2 to 2. for i in ['A', 'T', 'C', 'G']: for j in ['A', 'T', 'C', 'G']: count[i+j] = 0 countCounting human and bacteria nucleotide pairs# Distributing human_count = count bacteria_count = count # Counting how many combinations are there in the human file for k in range(len(human)-1): human_count[human[k]+human[k+1]] += 1 print(human_count) # Counting how many combinations are there in the bacterian file for k in range(len(bacteria)-1): bacteria_count[bacteria[k]+bacteria[k+1]] += 1 print(bacteria_count){'AA': 116, 'AT': 95, 'AC': 97, 'AG': 113, 'TA': 85, 'TT': 113, 'TC': 109, 'TG': 111, 'CA': 87, 'CT': 105, 'CC': 184, 'CG': 170, 'GA': 134, 'GT': 105, 'GC': 155, 'GG': 184} {'AA': 221, 'AT': 159, 'AC': 183, 'AG': 224, 'TA': 149, 'TT': 170, 'TC': 169, 'TG': 219, 'CA': 170, 'CT': 180, 'CC': 258, 'CG': 267, 'GA': 247, 'GT': 198, 'GC': 264, 'GG': 331}Printing on a HTML page# HTML part i = 1 for k in count: transparency = human_count[k]/max(human_count.values()) comparison.write("
"+k+"
\n") if i % 4 == 0: comparison.write("
") i += 1 comparison.close() import matplotlib.pyplot as plt import numpy as np ''' Example chess = np.array([[1,0,1,0,1,0,1,0], [0,1,0,1,0,1,0,1], [1,0,1,0,1,0,1,0], [0,1,0,1,0,1,0,1], [1,0,1,0,1,0,1,0], [0,1,0,1,0,1,0,1], [1,0,1,0,1,0,1,0], [0,1,0,1,0,1,0,1]]) ''' human_transparency = np.array([human_count[k]/max(human_count.values()) for k in count]).reshape(4,4) print(human_transparency) plt.figure(figsize=(8,8)) plt.imshow(human_transparency, cmap='gray_r') # gray_r reverse scale plt.axis(False) plt.show()[[0.66767372 0.48036254 0.55287009 0.67673716] [0.45015106 0.51359517 0.51057402 0.66163142] [0.51359517 0.54380665 0.77945619 0.80664653] [0.74622356 0.59818731 0.79758308 1. ]]Quelques méthodes statistiques avancées avec Python vs SAS- Une méthode dans une proc SAS --> une fonction dans un package python- Une fonction dans un package python -|-> une méthode dans une proc SASLa première phrase est quasiment toujours vraieLa seconde phrase n'est quasiment jamais vraie - Python se concentre sur des méthodes de machine learning et de deep learning- En python, les sorties sont toujours très légères, on se concentre sur l'efficacité du code plus que sur l'explication et les tabkeaux de résultats Premier exemple : la procédure logistic de SASCette procédure permet d'appliquer une régression logistique sur des données, il existe dans approches en python pour appliquer cette méthode. Approche avec scikit-learnOn utilise le package scikit-learnfrom sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_splitApproche avec statsmodelsOn utilise le package statsmodels pour avoir des réusultats plus détaillés.from statsmodels.api import LogitSecond exemple : la procédure reg de SASIl s'agit d'une régression linéaires, nous allons utiliser les deux mêmes approches que pour la régression logistique Approche avec scikit-learnfrom sklearn.linear_model import LinearRegression**Exercice :** A vous de construire et d'ajuster ce modèle Approche statsmodelsfrom statsmodels.api import OLS, add_constant**Exercice :** A vous de construire et d'ajuster le modèleAttention, on doit ajouter une constante manuellement. Aller plus loinLes deux packages utilisés comporte des centaines de méthodes vous permettant de tester des modèles sur vos données en utilisant la même approche.La force de python est ici les capacités d'automatisation et d'ajustement de modèles.from sklearn.ensemble import RandomForestClassifierConfusion matrixExample of confusion matrix usage to evaluate the qualityof the output of a classifier on the iris data set. Thediagonal elements represent the number of points for whichthe predicted label is equal to the true label, whileoff-diagonal elements are those that are mislabeled by theclassifier. The higher the diagonal values of the confusionmatrix the better, indicating many correct predictions.The figures show the confusion matrix with and withoutnormalization by class support size (number of elementsin each class). This kind of normalization can beinteresting in case of class imbalance to have a morevisual interpretation of which class is being misclassified.Here the results are not as good as they could be as ourchoice for the regularization parameter C was not the best.In real life applications this parameter is usually chosenusing `grid_search`.print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import svm, datasets from sklearn.model_selection import train_test_split from sklearn.metrics import ConfusionMatrixDisplay # import some data to play with iris = datasets.load_iris() X = iris.data y = iris.target class_names = iris.target_names # Split the data into a training set and a test set X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) # Run classifier, using a model that is too regularized (C too low) to see # the impact on the results classifier = svm.SVC(kernel='linear', C=0.01).fit(X_train, y_train) np.set_printoptions(precision=2) # Plot non-normalized confusion matrix titles_options = [("Confusion matrix, without normalization", None), ("Normalized confusion matrix", 'true')] for title, normalize in titles_options: disp = ConfusionMatrixDisplay.from_estimator( classifier, X_test, y_test, display_labels=class_names, cmap=plt.cm.Blues, normalize=normalize ) disp.ax_.set_title(title) print(title) print(disp.confusion_matrix) plt.show()Heatmap The `HeatMap` mark represents a 2d matrix of values as a color image. It can be used to visualize a 2d function, or a grayscale image for instance.`HeatMap` is very similar to the `GridHeatMap`, but should be preferred for a greater number of points (starting at around 100x100), to avoid overloading the browser. `GridHeatMap` offers more control (interactions, selections), and is better suited for a smaller number of points.import numpy as np from bqplot import (Figure, LinearScale,ColorScale, Color, Axis, HeatMap, ColorAxis) from ipywidgets import LayoutData Input- `x` is a 1d array, corresponding to the abscissas of the points (size N)- `y` is a 1d array, corresponding to the ordinates of the points (size M)- `color` is a 2d array, $\text{color}_{ij}$ is the intensity of the point $(x_i, y_j)$ (size (N, M))Scales must be defined for each attribute:- a `LinearScale`, `LogScale` or `OrdinalScale` for `x` and `y`- a `ColorScale` for `color`x = np.linspace(-5, 5, 200) y = np.linspace(-5, 5, 200) X, Y = np.meshgrid(x, y) color = np.cos(X**2 + Y**2)Plotting a 2-dimensional function This is a visualization of the function $f(x, y) = \text{cos}(x^2+y^2)$x_sc, y_sc, col_sc = LinearScale(), LinearScale(), ColorScale(scheme='RdYlBu') heat = HeatMap(x=x, y=y, color=color, scales={'x': x_sc, 'y': y_sc, 'color': col_sc}) ax_x = Axis(scale=x_sc) ax_y = Axis(scale=y_sc, orientation='vertical') ax_c = ColorAxis(scale=col_sc) fig = Figure(marks=[heat], axes=[ax_x, ax_y, ax_c], title='Cosine', layout=Layout(width='650px', height='650px'), min_aspect_ratio=1, max_aspect_ratio=1, padding_y=0) figDisplaying an imageThe `HeatMap` can be used as is to display a 2d grayscale image, by feeding the matrix of pixel intensities to the `color` attributefrom scipy.misc import ascent Z = ascent() Z = Z[::-1, :] aspect_ratio = Z.shape[1]/Z.shape[0] col_sc = ColorScale(scheme='Greys', reverse=True) scales = {'color': col_sc}; ascent = HeatMap(color=Z, scales=scales) img = Figure(title='Ascent', marks=[ascent], layout=Layout(width='650px', height='650px'), min_aspect_ratio=aspect_ratio, max_aspect_ratio=aspect_ratio, padding_y=0) imgSimple fiber coupling analysis using Zemax's POP *Please feel free to [e-mail](mailto:) any corrections, comments and suggestions to the author ([](http://indranilsinharoy.com/))* Last updated: 12/27/2015License: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0/) **Reference**The source of this material is from a Zemax webinar called [Understanding Single-Mode Fiber Coupling with Rays and Physical Optics](http://www.zemax.com/support/resource-center/webinars/understanding-single-mode-fiber-coupling-with-rays) hosted by Dr. on 1/29/2013. Please note that if there are any errors in concepts discussed here, it is mostly likely due to my lack of understanding and not made by Zemax, or the host of the above webniar. Also, please note that this article is a part of my own notes (as I learn Zemax), and it has not been sponsored by Zemax.import os import numpy as np import matplotlib.pyplot as plt import pyzdde.zdde as pyz %matplotlib inline ln = pyz.createLink()Load the lens file "Fiber Coupling.zmx" that comes with Zemax as an example of POP computation.zmxfile = 'Fiber Coupling.zmx' lensPath = ln.zGetPath()[1] lensFile = os.path.join(lensPath, 'Physical Optics', zmxfile) ln.zLoadFile(lensFile)In this example, we have a lens (see the LDE and the layout plot below) that has almost no aberrations, a Gaussian TEM00 beam that is propagating in free space starting from the ``STO`` surface to the lens, and then the beam is focused by the lens. The focused beam is the coupled into the receiving fiber at the right end.ln.ipzGetLDE()SURFACE DATA SUMMARY: Surf Type Radius Thickness Glass Diameter Conic Comment OBJ STANDARD Infinity Infinity 0 0 STO STANDARD Infinity 5 4 0 2 STANDARD Infinity 10 N15 16 0 3 STANDARD -25 50 16 -2.25 IMA STANDARD Infinity 2 0 FOCUS AT FIBERThere is just one wavelength defined for the system. The wavelength is 1 $\mu m$ as shown belowln.zGetWaveTuple()We can also see the fields setln.zGetFieldTuple()The lens is truely diffraction limited, as shown by the Seidel aberration coefficients.ln.zGetSeidelAberration()Layout plot The layout plot shows where we will place the input Gaussian beam (at the location of STO). The beam, after passing through the lens should be focused into the fiber. The surface numbers for the stop and fiber are shown in parentheses.ln.zPushLens(1) # was pushed to LDE, and made sure that the "Frame suppress" was check on layout plot lay = ln.ipzCaptureWindow('Lay', percent=15, gamma=0.1, retArr=True) fig = plt.figure(figsize=(10,10)) ax = fig.add_subplot(111) # Render the array pyz.imshow(lay, cropBorderPixels=(2, 5, 60, 90), fig=fig, faxes=ax) ax.text(85, 75, "Lens", fontsize=12) ax.text(20, 75, "STO (#1)", fontsize=12, rotation='vertical') ax.text(510, 80, "Fiber (#4)", fontsize=12, rotation='vertical') col = (0.08,0.08,0.08) ax.annotate("{}".format('Beam origin'), (25, 163), (25, 230), fontsize=12, arrowprops=dict(arrowstyle="->", linewidth=0.45, color=col, relpos=(0.0,0.5))) ax.annotate("{}".format('Beam focus'), (516, 152), (448, 230), fontsize=12, arrowprops=dict(arrowstyle="->", linewidth=0.45, color=col, relpos=(1.0,0.5))) #ax.set_title('Layout plot', fontsize=14) plt.show()We will now set up the POP analysis and see the irradiance and phase distribution of the input beam at surfaces 1 and 4, using the function ``zSetPOPSettings()``. Please refer to the function's docstring for details on the function.The input beam is defined as follows:```Beam type : Gaussian waistMode : TEM00Waist : 2 mm Total power : 1 Watt```The beam waist of the input beam is located at the ``STO`` surface (surface to beam = 0).The receiver fiber settings are as follows:```Beam type : Gaussian waistMode : TEM00Waist : 8 microns (0.008 mm)```The modal radius that the receiving fiber supports is an 8 microns beam waist.sfile = ln.zSetPOPSettings(data=0, startSurf=1, endSurf=1, field=1, wave=1, beamType=0, paramN=((1, 2), (2, 2)), tPow=1, sampx=4, sampy=4, widex=40, widey=40)The above function creates .cfg file (if not provided with one) after setting the appropriate parameters for the POP analysis and returns the full name of the file. Note that we want to analyze the effect of the beam that is propagating from surace 1 to surface 4. However, in the above function we set the `endSurf` to 1 in order to retrieve the POP data at the ``STO`` surface. Also, in the above function call, we did not select the "compute fiber coupling integral" option. This is because we are interested in the fiber coupling computation at the surface 4. Irradiance data at ``STO`` surface:irr_sur1, irrGridDat_sur1 = ln.zGetPOP(settingsFile=sfile, displayData=True)The function returns to data structures --- ``irr_sur1`` contains POP analysis data such as peak irradiance, total power, rayleigh, etc, and ``irrGridDat_sur1`` contains the 2D irradiance plot data. We will plot this data later along with the irradiance and phase data of surface 4 (at the coupling).irr_sur1Irradiance data at surface 4: To get the beam parameters at the surface 4, we will need to modify the ``endSurf`` to 4. Also, since we are interested in fiber coupling integral computation at this surface, we will specify the fiber type and other fiber parameters as shown below.ln.zModifyPOPSettings(settingsFile=sfile, endSurf=4, fibComp=1, fibType=0, fparamN=((1, 2), (0.008, 0.008)))The tuple of zeros in the above indicate that all the settings were successfully modified.irr_sur4, irrGridDat_sur4 = ln.zGetPOP(settingsFile=sfile, displayData=True) irr_sur4From the above we see that the **system efficiency**, which is the energy transported by the optical system is 1. The **receiver efficiency** is 0.999955, and the **coupling efficiency** is 0.999955. Phase data at ``STO`` surface: In order to get the phase information we need to use the function ``zSetPOPSettings()`` once again. This is mainly because there is (currently) no way to modify the "Data" parameter externally. So in PyZDDE, we create a new settings file when everytime we switch between "Phase" and "Irradiance" data. Since we have already retrieved the irradiance data, we will re-use the name of the previous settings file.sfile = ln.zSetPOPSettings(data=1, startSurf=1, endSurf=1, field=1, wave=1, beamType=0, paramN=((1, 2), (2, 2)), tPow=1, sampx=4,sampy=4, widex=40, widey=40) pha_sur1, phaGridDat_sur1 = ln.zGetPOP(settingsFile=sfile, displayData=True) pha_sur1Phase data at surface 4:ln.zModifyPOPSettings(settingsFile=sfile, endSurf=4, fibComp=1, fibType=0, fparamN=((1, 2), (0.008, 0.008))) # Change analysis surface to 4 and add fiber comp pha_sur4, phaGridDat_sur4 = ln.zGetPOP(settingsFile=sfile, displayData=True)Plot of the irradiance and phase data at the two surfaces In the following sections we shall use matplotlib to render the raw data which we have grabbed above. Consequently, will will have to write code for generating the plots. The advantage is that we have complete control of how we want to analyze and present the data. For quick notes, you can always use ``zCaptureWindow()`` to grab the POP graphic window from Zemax as shown in an example at the end of this article.fig = plt.figure(figsize=(8,8)) # irradiance data ax = fig.add_subplot(2,2,1) ax.set_title('Irradiance at STO', fontsize=14) irrmax = np.max(irrGridDat_sur1) ext = [-irr_sur1.widthX/2, irr_sur1.widthX/2, -irr_sur1.widthY/2, irr_sur1.widthY/2] ax.imshow(irrGridDat_sur1, extent=ext, origin='lower', cmap=plt.cm.coolwarm, vmin=0, vmax=irrmax) ax.set_xlabel('x (mm)', fontsize=13) ax.set_ylabel('y (mm)', fontsize=13) ax = fig.add_subplot(2,2,2) ax.set_title('Irradiance at fiber', fontsize=14) irrmax = np.max(irrGridDat_sur4) ext = [-irr_sur4.widthX/2, irr_sur4.widthX/2, -irr_sur4.widthY/2, irr_sur4.widthY/2] ax.imshow(irrGridDat_sur4, extent=ext, origin='lower', cmap=plt.cm.coolwarm, vmin=0, vmax=irrmax) ax.set_xlabel('x (mm)', fontsize=13) ax.set_ylabel('y (mm)', fontsize=13) # phase data ax = fig.add_subplot(2,2,3) ax.set_title('Phase at source', fontsize=14) ext = [-pha_sur1.widthX/2, pha_sur1.widthX/2, -pha_sur1.widthY/2, pha_sur1.widthY/2] ax.imshow(phaGridDat_sur1, extent=ext, origin='lower', vmin=-np.pi, vmax=np.pi, cmap=plt.cm.coolwarm) ax.set_xlabel('x (mm)', fontsize=13) ax.set_ylabel('y (mm)', fontsize=13) ax = fig.add_subplot(2,2,4) ax.set_title('Phase at fiber', fontsize=14) ext = [-pha_sur4.widthX/2, pha_sur4.widthX/2, -pha_sur4.widthY/2, pha_sur4.widthY/2] ax.imshow(phaGridDat_sur4, extent=ext, origin='lower', vmin=-np.pi, vmax=np.pi, cmap=plt.cm.coolwarm) ax.set_xlabel('x (mm)', fontsize=13) ax.set_ylabel('y (mm)', fontsize=13) fig.tight_layout() plt.show()From the above plots we can see that there is no appreciable change in the phase of the beam. In order to get a better picture, we will plot slices of the 2D data.fig = plt.figure(figsize=(8,8)) # irradiance data ax = fig.add_subplot(2,2,1) ax.set_title('Irradiance at STO', fontsize=14) dx = irr_sur1[-2]/256 x = [-irr_sur1[-2]/2 + dx*i for i in range(256)] ax.plot(x, irrGridDat_sur1[128]) ax.set_ylim(top=np.max(np.array(irrGridDat_sur1[128]))) ax.set_xlim(left=-irr_sur1[-2]/2, right=irr_sur1[-2]/2) ax.set_xlabel('x (mm)', fontsize=13) ax.set_ylabel('Irradiance (Watts per square mm)', fontsize=13) ax = fig.add_subplot(2,2,2) dx = irr_sur4[-2]/256 x = [-irr_sur4[-2]/2 + dx*i for i in range(256)] ax.set_title('Irradiance at fiber', fontsize=14) ax.plot(x, irrGridDat_sur4[128]) ax.set_ylim(top=np.max(np.array(irrGridDat_sur4[128]))) ax.set_xlim(left=-irr_sur4[-2]/2, right=irr_sur4[-2]/2) ax.set_xlabel('x (mm)', fontsize=13) ax.set_ylabel('Irradiance (Watts per square mm)', fontsize=13) # phase data ax = fig.add_subplot(2,2,3) ax.set_title('Phase at STO', fontsize=14) dx = pha_sur1[-2]/256 x = [-pha_sur1[-2]/2 + dx*i for i in range(256)] ax.plot(x, phaGridDat_sur1[128]) ax.set_ylim(top=np.pi, bottom=-np.pi) ax.set_xlim(left=-pha_sur1[-2]/2, right=pha_sur1[-2]/2) ax.set_xlabel('x (mm)', fontsize=13) ax.set_ylabel('Phase (radians)', fontsize=13) ax = fig.add_subplot(2,2,4) ax.set_title('Phase at fiber', fontsize=14) dx = pha_sur4[-2]/256 x = [-pha_sur4[-2]/2 + dx*i for i in range(256)] ax.plot(x, phaGridDat_sur4[128]) ax.set_ylim(top=np.pi, bottom=-np.pi) ax.set_xlim(left=-pha_sur4[-2]/2, right=pha_sur4[-2]/2) ax.set_xlabel('x (mm)', fontsize=13) ax.set_ylabel('Phase (radians)', fontsize=13) fig.tight_layout() plt.show()We can zoom-in into to the irradiance slice plot at the fiber and see that the fiber is "perfectly coupled" as the beam size (radius) defined by $1/e^2$ width at the point of focus is very close to 8 microns (indicated by the red dashed vertical lines), which is the modal radius of the fiber we have defined.fig = plt.figure(figsize=(8,5)) ax = fig.add_subplot(111) dx = irr_sur4[-2]/256 x = [-irr_sur4[-2]/2 + dx*i for i in range(256)] ax.set_title('Irradiance at fiber', fontsize=14) ax.plot(x, irrGridDat_sur4[128]) ymax = np.max(np.array(irrGridDat_sur4[128])) xlim = irr_sur4[-2]/16 ax.set_ylim(top=ymax) ax.set_xlim(left=-xlim, right=xlim) ax.axvline(x=-0.008, ymax=ymax, color='r', ls='--') ax.axvline(x=0.008, ymax=ymax, color='r', ls='--') ax.axhline(y=ymax/np.e**2, color='g', ls='--') ax.text(0.012, 1730, r'$1/e^2$', fontsize=17) ax.set_xlabel('x (mm)', fontsize=13) ax.set_ylabel('Irradiance (Watts per square mm)', fontsize=13) plt.show()Now, if we change the input beam waist to be 1.5 mm instead of 2 mm, which was the perfect coupling case, we will see that the coupling efficiency will decrease.sfile = ln.zSetPOPSettings(data=0, startSurf=1, endSurf=4, field=1, wave=1, beamType=0, paramN=((1, 2), (1.5, 1.5)), tPow=1, sampx=4, sampy=4, widex=40, widey=40, fibComp=1, fibType=0, fparamN=((1, 2), (0.008, 0.008))) irr_sur4_imperfect, irrGridDat_sur4_imperfect = ln.zGetPOP(settingsFile=sfile, displayData=True) irr_sur4_imperfectWe can see that the coupling efficiency decreased to 0.924587 We can also see that the $1/e^2$ beam size at the point of coupling with the fiber is greater than 8 microns (indicated by the red dashed vertical lines) resulting in a loss of energy.fig = plt.figure(figsize=(8,5)) ax = fig.add_subplot(111) dx = irr_sur4_imperfect[-2]/256 x = [-irr_sur4_imperfect[-2]/2 + dx*i for i in range(256)] ax.set_title('Irradiance at fiber', fontsize=14) ax.plot(x, irrGridDat_sur4_imperfect[128]) ymax = np.max(np.array(irrGridDat_sur4_imperfect[128])) xlim = irr_sur4[-2]/14 ax.set_ylim(top=ymax) ax.set_xlim(left=-xlim, right=xlim) ax.axvline(x=-0.008, ymax=ymax, color='r', ls='--') ax.axvline(x=0.008, ymax=ymax, color='r', ls='--') ax.axhline(y=ymax/np.e**2, color='g', ls='--') ax.text(0.015, 930, r'$1/e^2$', fontsize=17) ax.set_xlabel('x (mm)', fontsize=13) ax.set_ylabel('Irradiance (Watts per square mm)', fontsize=13) plt.show()Effect of moving the beam origin to the left We will now see the effect of changing the origin of the beam behind the stop by 3000 *mm*. In Zemax, under POP analysis, there is an option called "Surface to Beam" that specifies the distance from the starting surface to the beam position in lens units. Unfortunately, we cannot modify this POP parameter through the DDE. One way of achieving the same objective is to create a dummy surface of thickness 3000 *mm* between the ``OBJ`` and ``STO`` and specify this surface as the start position of the beam.ln.zInsertSurface(1) ln.zSetSurfaceData(surfNum=1, code=ln.SDAT_COMMENT, value='dummy') ln.zSetSurfaceData(surfNum=1, code=ln.SDAT_THICK, value=3000) ln.ipzGetLDE() fig = plt.figure(figsize=(10,10)) ax = fig.add_subplot(111) # Render the array pyz.imshow(lay, cropBorderPixels=(2, 5, 60, 90), fig=fig, faxes=ax) ax.text(85, 75, "Lens", fontsize=12) ax.text(20, 75, "STO (#2)", fontsize=12, rotation='vertical') ax.text(510, 80, "Fiber (#5)", fontsize=12, rotation='vertical') col = (0.08,0.08,0.08) ax.annotate("{}".format('Beam origin (3000 mm left of STO)'), (0, 230), (35, 234), fontsize=12, arrowprops=dict(arrowstyle="-|>", linewidth=0.9, color='r', relpos=(0.0,0.5))) ax.annotate("{}".format('Beam focus'), (516, 152), (448, 230), fontsize=12, arrowprops=dict(arrowstyle="->", linewidth=0.45, color=col, relpos=(1.0,0.5))) #ax.set_title('Layout plot', fontsize=14) plt.show()Intensity at surface ``STO`` Note that the surface numbers have changed. We will create a new settings file by not passing a settings file-name to ``zSetPOPSettings()``. (Also, since it will have the same name, the old file will be overwritten.)sfile = ln.zSetPOPSettings(startSurf=1, endSurf=2, field=1, wave=1, beamType=0, paramN=((1, 2), (2, 2)), tPow=1, sampx=4, sampy=4, widex=40, widey=40) new_irr_sur2, new_irrGridDat_sur2 = ln.zGetPOP(settingsFile=sfile, displayData=True)Intensity at Fiberln.zModifyPOPSettings(settingsFile=sfile, endSurf=5, fibComp=1, fibType=0, fparamN=((1, 2), (0.008, 0.008))) new_irr_sur5, new_irrGridDat_sur5 = ln.zGetPOP(settingsFile=sfile, displayData=True)Phase at surface ``STO``sfile = ln.zSetPOPSettings(data= 1, startSurf=1, endSurf=2, field=1, wave=1, beamType=0, paramN=((1, 2), (2, 2)), tPow=1, sampx=4, sampy=4, widex=40, widey=40) new_pha_sur2, new_phaGridDat_sur2 = ln.zGetPOP(settingsFile=sfile, displayData=True)Phase at Fiberln.zModifyPOPSettings(settingsFile=sfile, endSurf=5, fibComp=1, fibType=0, fparamN=((1, 2), (0.008, 0.008))) new_pha_sur5, new_phaGridDat_sur5 = ln.zGetPOP(settingsFile=sfile, displayData=True)Plots of irradiance and phasefig = plt.figure(figsize=(8,8)) # irradiance data ax = fig.add_subplot(2,2,1) ax.set_title('Irradiance at STO', fontsize=14) ext = [-new_irr_sur2[-2]/2, new_irr_sur2[-2]/2, -new_irr_sur2[-1]/2, new_irr_sur2[-1]/2] ax.imshow(new_irrGridDat_sur2, extent=ext, origin='lower', cmap=plt.cm.coolwarm) ax.set_xlabel('x (mm)', fontsize=13) ax.set_ylabel('y (mm)', fontsize=13) ax = fig.add_subplot(2,2,2) ax.set_title('Irradiance at fiber', fontsize=14) ext = [-new_irr_sur5[-2]/2, new_irr_sur5[-2]/2, -new_irr_sur5[-1]/2, new_irr_sur5[-1]/2] ax.imshow(new_irrGridDat_sur5, extent=ext, origin='lower', cmap=plt.cm.coolwarm) ax.set_xlabel('x (mm)', fontsize=13) ax.set_ylabel('y (mm)', fontsize=13) # phase data ax = fig.add_subplot(2,2,3) ax.set_title('Phase at STO', fontsize=14) ext = [-new_pha_sur2[-2]/2, new_pha_sur2[-2]/2, -new_pha_sur2[-1]/2, new_pha_sur2[-1]/2] ax.imshow(new_phaGridDat_sur2, extent=ext, origin='lower', vmin=-np.pi, vmax=np.pi, cmap=plt.cm.coolwarm) ax.set_xlabel('x (mm)', fontsize=13) ax.set_ylabel('y (mm)', fontsize=13) ax = fig.add_subplot(2,2,4) ax.set_title('Phase at fiber', fontsize=14) ext = [-new_pha_sur5[-2]/2, new_pha_sur5[-2]/2, -new_pha_sur5[-1]/2, new_pha_sur5[-1]/2] ax.imshow(new_phaGridDat_sur5, extent=ext, origin='lower', vmin=-np.pi, vmax=np.pi, cmap=plt.cm.coolwarm) ax.set_xlabel('x (mm)', fontsize=13) ax.set_ylabel('y (mm)', fontsize=13) fig.tight_layout() plt.show() fig = plt.figure(figsize=(8,8)) # irradiance data ax = fig.add_subplot(2,2,1) ax.set_title('Irradiance at STO', fontsize=14) dx = irr_sur1[-2]/256 x = [-irr_sur1[-2]/2 + dx*i for i in range(256)] ax.plot(x, irrGridDat_sur1[128],'gray') ax.plot(x, new_irrGridDat_sur2[128], 'r') ax.set_ylim(top=np.max(np.array(irrGridDat_sur1[128]))) ax.set_xlim(left=-irr_sur1[-2]/2, right=irr_sur1[-2]/2) ax.set_xlabel('x (mm)', fontsize=13) ax.set_ylabel('Irradiance (Watts per square mm)', fontsize=13) ax = fig.add_subplot(2,2,2) dx = irr_sur4[-2]/256 x = [-irr_sur4[-2]/2 + dx*i for i in range(256)] ax.set_title('Irradiance at fiber', fontsize=14) ax.plot(x, irrGridDat_sur4[128],'gray') ax.plot(x, new_irrGridDat_sur5[128], 'r') ax.set_ylim(top=np.max(np.array(irrGridDat_sur4[128]))) ax.set_xlim(left=-irr_sur4[-2]/2, right=irr_sur4[-2]/2) ax.set_xlabel('x (mm)', fontsize=13) ax.set_ylabel('Irradiance (Watts per square mm)', fontsize=13) # phase data ax = fig.add_subplot(2,2,3) ax.set_title('Phase at STO', fontsize=14) dx = pha_sur1[-2]/256 x = [-pha_sur1[-2]/2 + dx*i for i in range(256)] ax.plot(x, phaGridDat_sur1[128],'gray') ax.plot(x, new_phaGridDat_sur2[128], 'r') ax.set_ylim(top=np.pi, bottom=-np.pi) ax.set_xlim(left=-pha_sur1[-2]/2, right=pha_sur1[-2]/2) ax.set_xlabel('x (mm)', fontsize=13) ax.set_ylabel('Phase (radians)', fontsize=13) ax = fig.add_subplot(2,2,4) ax.set_title('Phase at fiber', fontsize=14) dx = pha_sur4[-2]/256 x = [-pha_sur4[-2]/2 + dx*i for i in range(256)] ax.plot(x, phaGridDat_sur4[128],'gray') ax.plot(x, new_phaGridDat_sur5[128], 'r') ax.set_ylim(top=np.pi, bottom=-np.pi) ax.set_xlim(left=-pha_sur4[-2]/2, right=pha_sur4[-2]/2) ax.set_xlabel('x (mm)', fontsize=13) ax.set_ylabel('Phase (radians)', fontsize=13) fig.tight_layout() plt.show()In the above plots, the gray lines represent the intensity and phase data slices when the beam originated at the ``STO`` surface (i.e. surface to beam = 0). The red lines represent the intensity and phase data slices when the beam originated 3000 *mm* to the left of the ``STO`` surface. While the intensity plots for the two surfaces in both the cases are very similar, the phase plots show significant differences. Firstly, as the beam propagates from its waist position, 3000 mm behind the ``STO`` surface, to the ``STO`` surface its radius of curvature decreases from infinity to a finite value. Secondly, we can see the **Gouy shift** of phase profile of the Gaussian beam. Coupling efficiencynew_irr_sur5The coupling efficiency reduced from 0.999955 to 0.985784. The coupling efficiency decreases because the fiber's mode which we have defined as having a flat phase profile doesn't match that of the input (to the fiber) beam. Grabbing POP window from Zemax I used matplotlib to create the above plots using the raw data extracted from Zemax. However, it is entirely possible to grab the Zemax plots as shown with the following example.pyz.findZButtonCode('physical optics') # What is the button code? sfile = ln.zSetPOPSettings(startSurf=1, endSurf=2, field=1, wave=1, beamType=0, paramN=((1, 2), (2, 2)), tPow=1, sampx=4, sampy=4, widex=40, widey=40) sfile ln.ipzCaptureWindow('Pop', percent=15, gamma=0.9) ln.close()CLASSIFICATION using train_test_split function that comes with scikit learnfrom sklearn.model_selection import train_test_split X_train,X_test,y_train,y_test=train_test_split(iris['data'],iris['target'],random_state=0) X_train.shape X_test.shapeKNearestNeighbor (KNN algorithm)from sklearn.neighbors import KNeighborsClassifier knn=KNeighborsClassifier(n_neighbors=1) knn.fit(X_train,y_train)TAKING UNKOWN DATA TO PREDICTimport numpy as np X_new=np.array([[5,2.9,1,0.2]]) X_new.shapeMAKING PREDICTIONSpredictions=knn.predict(X_new) print(predictions) iris['target_names'][predictions]CHECKING ACCURACYknn.score(X_test,y_test)Decision Tree Algorithmimport graphviz from sklearn.tree import export_graphviz import numpy as np from sklearn.datasets import load_iris from sklearn.tree import DecisionTreeClassifier from sklearn import tree from sklearn.tree import export_text iris=load_iris() x=iris.data y=iris.target tree_clf=DecisionTreeClassifier() model=tree_clf.fit(x,y) pro=tree_clf.predict_proba([[6,3,4.8,1.8]]) r = export_text(model, feature_names=iris['feature_names']) print(r) print(pro) r=pro print(pro[0]) result=np.where(pro[0]==1) print(result) iris['target_names'][result] tree.plot_tree(tree_clf) from matplotlib import pyplot as plt fig = plt.figure(figsize=(25,20)) _ = tree.plot_tree(tree_clf, feature_names=iris.feature_names, class_names=iris.target_names, filled=True) from sklearn import datasets iris=datasets.load_iris() x=iris.data y=iris.target from sklearn.model_selection import train_test_split x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=.5) from sklearn import tree classifier=tree.DecisionTreeClassifier() classifier.fit(x_train,y_train) predictions=classifier.predict(x_test) print(predictions) from sklearn.metrics import accuracy_score print(accuracy_score(y_test,predictions)) from sklearn.datasets import load_iris from sklearn.tree import DecisionTreeClassifier from sklearn.tree import export_text iris = load_iris() X = iris['data'] y = iris['target'] decision_tree = DecisionTreeClassifier(random_state=0, max_depth=2) decision_tree = decision_tree.fit(X, y) r = export_text(decision_tree, feature_names=iris['feature_names']) print(r)|--- petal width (cm) <= 0.80 | |--- class: 0 |--- petal width (cm) > 0.80 | |--- petal width (cm) <= 1.75 | | |--- class: 1 | |--- petal width (cm) > 1.75 | | |--- class: 2RANDOM FORESTfrom sklearn.datasets import load_iris iris=load_iris() from sklearn.model_selection import train_test_split X_train,X_test,y_train,y_test=train_test_split(iris['data'],iris['target'],random_state=0) #split dataset into training set and testing set X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.3)#70%training and 30% testing set #import randomforest classifier from sklearn.ensemble import RandomForestClassifier #create a gaussian classifier clf=RandomForestClassifier(n_estimators=100) #train the model using the training set clf.fit(X_train,y_train) y_pred=clf.predict(X_test) #import scikitlearn metric model for accuracy calculation from sklearn import metrics print("Accuracy:",metrics.accuracy_score(y_test,y_pred)) import numpy as np X_new=np.array([[5,2.9,1,0.2]]) X_new.shape predictions=clf.predict(X_new) print(predictions) iris['target_names'][predictions]https://leetcode.com/problems/subarray-sum-equals-k/ Other solutions: https://leetcode.com/problems/subarray-sum-equals-k/discuss/503178/Python5-Approaches-easy-to-understand-with-detailed-explanations# Brute force (time limit in leetcode is exceeded) def subarraySum(nums, k: int) -> int: cnt = 0 for i in range(len(nums)): for j in range(i+1,len(nums)+1): if sum(nums[i:j]) == k: cnt += 1 return cnt subarraySum([1,1,1],2)Note: this solution only works for an array (nums) of positive numbers.# Sliding Window class Solution: def subarraySum(self, nums, k: int) -> int: def isneg(x): return abs(x)!=x n = len(nums) l,r = 0,0 res = 0 rs = 0 while l < n: keep_going = False while rs <= k or keep_going: if r >= n: break rs += nums[r] if isneg(nums[r]): keep_going = True r+=1 print("right",rs,nums[l:r]) if rs == k and r-l > 0: res += 1 while r >= n or rs >= k: if l>=n: break rs -= nums[l] l+=1 print("left",rs,nums[l:r]) if rs == k and r-l > 0: res+=1 return resCreating a Kobe Twitter Botby *Since: August 24th, 2020*Using gpt-2-simple, I refined OpenAI's Generative Pre-trained Transformer 2 (GPT) by training the model on Kobe's Tweets. Using TWINT, I scraped all of Kobe's tweets since he first came onto Twitter until his infamous last tweet about .%tensorflow_version 1.x !pip install -q gpt-2-simple import gpt_2_simple as gpt2 from datetime import datetime from google.colab import filesTensorFlow 1.x selected. Building wheel for gpt-2-simple (setup.py) ... [?25l[?25hdone WARNING:tensorflow: The TensorFlow contrib module will not be included in TensorFlow 2.0. For more information, please see: * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md * https://github.com/tensorflow/addons * https://github.com/tensorflow/io (for I/O related ops) If you depend on functionality not listed there, please file an issue.GPUVerifying Which GPU is in use - Tesla T4 is better than the P100!nvidia-smiTue Aug 25 17:10:36 2020 +-----------------------------------------------------------------------------+ | NVIDIA-SMI 450.57 Driver Version: 418.67 CUDA Version: 10.1 | |-------------------------------+----------------------+----------------------+ | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | | | | MIG M. | |===============================+======================+======================| | 0 Tesla K80 Off | 00000000:00:04.0 Off | 0 | | N/A 36C P8 28W / 149W | 0MiB / 11441MiB | 0% Default | | | | ERR! | +-------------------------------+----------------------+----------------------+ +-------[...]Downloading GPT-2There are three released sizes of GPT-2:* `124M` (default): the "small" model, 500MB on disk.* `355M`: the "medium" model, 1.5GB on disk.* `774M`: the "large" model, cannot currently be finetuned with Colaboratory but can be used to generate text from the pretrained model (see later in Notebook)* `1558M`: the "extra large", true model. Will not work if a K80 GPU is attached to the notebook. (like `774M`, it cannot be finetuned).Larger models have more knowledge, but take longer to finetune and longer to generate text.gpt2.download_gpt2(model_name="124M")Fetching checkpoint: 1.05Mit [00:00, 190Mit/s] Fetching encoder.json: 1.05Mit [00:00, 62.5Mit/s] Fetching hparams.json: 1.05Mit [00:00, 433Mit/s] Fetching model.ckpt.data-00000-of-00001: 498Mit [00:06, 74.1Mit/s] Fetching model.ckpt.index: 1.05Mit [00:00, 440Mit/s] Fetching model.ckpt.meta: 1.05Mit [00:00, 113Mit/s] Fetching vocab.bpe: 1.05Mit [00:00, 127Mit/s]Mounting Google DriveThe best way to get input text to-be-trained into the Colaboratory VM, and to get the trained model *out* of Colaboratory, is to route it through Google Drive *first*.Running this cell (which will only work in Colaboratory) will mount your personal Google Drive in the VM, which later cells can use to get data in/out. (it will ask for an auth code; that auth is not saved anywhere)gpt2.mount_gdrive()Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly&response_type=code Enter your authorization code: ·········· Mounted at /content/driveUploading a Text File to be Trained to ColaboratoryIn the Colaboratory Notebook sidebar on the left of the screen, select *Files*. From there you can upload files:![alt text](https://i.imgur.com/TGcZT4h.png)Upload **any smaller text file** (<10 MB) and update the file name in the cell below, then run the cell.file_name = "kobe.txt"If your text file is larger than 10MB, it is recommended to upload that file to Google Drive first, then copy that file from Google Drive to the Colaboratory VM.gpt2.copy_file_from_gdrive(file_name)Finetune GPT-2The next cell will start the actual finetuning of GPT-2. It creates a persistent TensorFlow session which stores the training config, then runs the training for the specified number of `steps`. (to have the finetuning run indefinitely, set `steps = -1`)The model checkpoints will be saved in `/checkpoint/run1` by default. The checkpoints are saved every 500 steps (can be changed) and when the cell is stopped.The training might time out after 4ish hours; make sure you end training and save the results so you don't lose them!**IMPORTANT NOTE:** If you want to rerun this cell, **restart the VM first** (Runtime -> Restart Runtime). You will need to rerun imports but not recopy files.Other optional-but-helpful parameters for `gpt2.finetune`:* **`restore_from`**: Set to `fresh` to start training from the base GPT-2, or set to `latest` to restart training from an existing checkpoint.* **`sample_every`**: Number of steps to print example output* **`print_every`**: Number of steps to print training progress.* **`learning_rate`**: Learning rate for the training. (default `1e-4`, can lower to `1e-5` if you have <1MB input data)* **`run_name`**: subfolder within `checkpoint` to save the model. This is useful if you want to work with multiple models (will also need to specify `run_name` when loading the model)* **`overwrite`**: Set to `True` if you want to continue finetuning an existing model (w/ `restore_from='latest'`) without creating duplicate copies.sess = gpt2.start_tf_sess() gpt2.finetune(sess, dataset=file_name, model_name='124M', steps=1000, restore_from='latest', run_name='run1', print_every=10, sample_every=200, save_every=500, overwrite=True )WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/gpt_2_simple/src/sample.py:17: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version. Instructions for updating: Use tf.where in 2.0, which has the same broadcast rule as np.where Loading checkpoint checkpoint/run1/model-706 INFO:tensorflow:Restoring parameters from checkpoint/run1/model-706After the model is trained, you can copy the checkpoint folder to your own Google Drive.If you want to download it to your personal computer, it's strongly recommended you copy it there first, then download from Google Drive. The checkpoint folder is copied as a `.rar` compressed file; you can download it and uncompress it locally.gpt2.copy_checkpoint_to_gdrive(run_name='run1')You're done! Feel free to go to the **Generate Text From The Trained Model** section to generate text based on your retrained model. Load a Trained Model CheckpointRunning the next cell will copy the `.rar` checkpoint file from your Google Drive into the Colaboratory VM.gpt2.copy_checkpoint_from_gdrive(run_name='run1')The next cell will allow you to load the retrained model checkpoint + metadata necessary to generate text.**IMPORTANT NOTE:** If you want to rerun this cell, **restart the VM first** (Runtime -> Restart Runtime). You will need to rerun imports but not recopy files.sess = gpt2.start_tf_sess() gpt2.load_gpt2(sess, run_name='run1')Loading checkpoint checkpoint/run1/model-848 INFO:tensorflow:Restoring parameters from checkpoint/run1/model-848Generate Text From The Trained ModelAfter you've trained the model or loaded a retrained model from checkpoint, you can now generate text. `generate` generates a single text from the loaded model.gpt2.generate(sess, run_name='run1')@PhilJackson11 I'm thinking of writing a novel about retiring from basketball and founding a company… http://instagram.com/p/l6yTNrUVjd/  @PhilJackson11 I've been thinking about writing a novel for a while now but didnt know I was writing it lol I'm not a bad kid hey are you ready to play yet? Thinking about writing a novelisation of #ThePunies #GameSetTellerforyou #TeamUSA #LakerNation A #ThankYouMLK50 march is about more than just seeing shoes fitted. It's about being represented in the highest level. It's about being a part of the change. Change is coming. Watch Episode 4 here: http://es.pn/2J1XR7H  Lesson 1: Love the hate. #HeroVillain #KOBE11 I love the trash talk. #ambien #mambaout This is why I partner with Lenovo #LegacyandtheQueen #Kobe11 #TMT: http://www.youtube.com/watch?v=t8ZtgNuZDM ” by far the craziest thing I've done! I have no idea how I walked after that #real***t @PhilJackson11 ist really only funny 8to9 years but it will get better. I'm thinking 9.5 to 1[...]If you're creating an API based on your model and need to pass the generated text elsewhere, you can do `text = gpt2.generate(sess, return_as_list=True)[0]`You can also pass in a `prefix` to the generate function to force the text to start with a given character sequence and generate text from there (good if you add an indicator when the text starts).You can also generate multiple texts at a time by specifing `nsamples`. Unique to GPT-2, you can pass a `batch_size` to generate multiple samples in parallel, giving a massive speedup (in Colaboratory, set a maximum of 20 for `batch_size`).Other optional-but-helpful parameters for `gpt2.generate` and friends:* **`length`**: Number of tokens to generate (default 1023, the maximum)* **`temperature`**: The higher the temperature, the crazier the text (default 0.7, recommended to keep between 0.7 and 1.0)* **`top_k`**: Limits the generated guesses to the top *k* guesses (default 0 which disables the behavior; if the generated output is super crazy, you may want to set `top_k=40`)* **`top_p`**: Nucleus sampling: limits the generated guesses to a cumulative probability. (gets good results on a dataset with `top_p=0.9`)* **`truncate`**: Truncates the input text until a given sequence, excluding that sequence (e.g. if `truncate=''`, the returned text will include everything before the first ``). It may be useful to combine this with a smaller `length` if the input texts are short.* **`include_prefix`**: If using `truncate` and `include_prefix=False`, the specified `prefix` will not be included in the returned text.gpt2.generate(sess, length=250, temperature=0.7, prefix="@KingJames", nsamples=5, batch_size=5 )@KingJames #countonkobe Watching @TurnerSportsEJ and the crew do this #allstarchallenge is hilarious Not a good case for nba players being the best athletes! HA #QueenMamba ladyvb24 Celebrate the one you love #myvalentine happy valentines day to all #blessings http://instagram.com/p/kbG3a8RNqV/  Happy birthday Mr Russell. Thank you for all of your wisdom and the amount of time you have taken to… http://instagram.com/p/kWHezyRNlF/  On my Nike set with the champ rsherman_25 #differentanimalSamebeast http://instagram.com/p/kQdrA8xNiv/  Major shout to @sagekotsenburg @Jme_Anderson #snowboardGold #usa #SochiStomped Tonight, watch @BillClinton, @AllysonFelix, @TheRealMattKemp & myself discuss why kids in sports is so important. http://es.pn/KidsAndSports  What a game! #seriously ==================== @KingJames #GOAT Catch the short film I Just Metagy feat. Demi Lovato and Dina is dancing! It’s Disney’s masterpiece and one of my favorite videos. https://youtu.be/oVK6YReKeDM ” ( h[...]For bulk generation, you can generate a large amount of text to a file and sort out the samples locally on your computer. The next cell will generate a generated text file with a unique timestamp.You can rerun the cells as many times as you want for even more generated texts!gen_file = 'gpt2_gentext_{:%Y%m%d_%H%M%S}.txt'.format(datetime.utcnow()) gpt2.generate_to_file(sess, destination_path=gen_file, length=500, temperature=0.7, nsamples=100, batch_size=20 ) # may have to run twice to get file to download files.download(gen_file)EtcIf the notebook has errors (e.g. GPU Sync Fail), force-kill the Colaboratory virtual machine and restart it with the command below:!kill -9 -1LICENSEMIT LicenseCopyright (c) 2019 Permission is hereby granted, free of charge, to any person obtaining a copyof this software and associated documentation files (the "Software"), to dealin the Software without restriction, including without limitation the rightsto use, copy, modify, merge, publish, distribute, sublicense, and/or sellcopies of the Software, and to permit persons to whom the Software isfurnished to do so, subject to the following conditions:The above copyright notice and this permission notice shall be included in allcopies or substantial portions of the Software.THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ORIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THEAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHERLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THESOFTWARE.1. Preprocessdef preprocess(df, col): preprocessor = PreProcess() preprocessor.fill_na(df, col) preprocessor.remove_urls(df, col) preprocessor.expand_contractions(df, col) preprocessor.remove_escape_chars(df, col) return dfPostsworld_posts.head() posts_df = world_posts.drop(columns=["subreddit", "url","title", "created"]) posts_df.head() # posts_pre_df = PreProcess.preprocess(posts_df, 'body') # for i in range(len(posts_df.index)): # posts_pre_df.at[i, 'body_string'] = ' '.join([str(elem) for elem in posts_pre_df.at[i, 'body_stem']]) # posts_pre_df.head() preprocess(posts_df, "body")Commentscomments_df = world_comments.copy() comments_df.head() preprocess(comments_df, "comment")2. BERT Sentiment Analysistokenizer = AutoTokenizer.from_pretrained('nlptown/bert-base-multilingual-uncased-sentiment') model = AutoModelForSequenceClassification.from_pretrained('nlptown/bert-base-multilingual-uncased-sentiment') def sentiment_score(post): tokens = tokenizer.encode(post, return_tensors='pt') result = model(tokens) return int(torch.argmax(result.logits))+1 posts_df['sentiment'] = posts_df['body'].apply(lambda x: sentiment_score(x[:512])) posts_df.head() # comments_df['sentiment'] = comments_df['comment'].apply(lambda x: sentiment_score(x[:512])) # comments_df.head()3. EDA sentiments of Posts and Commentsposts_df.corr() posts_df.sentiment.value_counts().loc[[1, 2, 3, 4, 5]].plot(kind='bar', xlabel='sentiment', ylabel='frequency') pprint(posts_df.sentiment.value_counts().loc[[1, 2, 3, 4, 5]]) plt.show() temp_id = posts_df.post_id[0] print("Looking at one post with id:",temp_id) posts_df.head(1) median_sentiment_list=[] for i in posts_df.post_id: median_sentiment_list.append(np.median(comments_df[comments_df.post_id==i].sentiment)) posts_df['median_comments_sentiment'] = median_sentiment_list posts_df.head() posts_df posts_df.sentiment.value_counts().loc[[1, 2, 3, 4, 5]].plot(kind='bar', xlabel='sentiment', ylabel='frequency') plt.show() posts_df.median_comments_sentiment.value_counts().loc[[1, 2, 3, 4, 5]].plot(kind='bar', xlabel='median_sentiment', ylabel='frequency') plt.show()4. Observing with post Metadatadf = posts_df[['post_id','score','upvote_ratio','num_comments', 'sentiment']] df plt.rcParams["figure.figsize"] = (20,10) plt.scatter(df.score, df.num_comments, c=df.sentiment, cmap="Greys") plt.legend() plt.show() from sklearn.preprocessing import MinMaxScaler df = posts_df[['post_id','score','upvote_ratio','num_comments', 'sentiment']] sc = MinMaxScaler() df = sc.fit_transform(df[['score','upvote_ratio','num_comments']]) df from mpl_toolkits import mplot3d from mpl_toolkits.mplot3d import Axes3D plt.rcParams["figure.figsize"] = (20,15) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') # ax.scatter(df['score'], df['upvote_ratio'], df['num_comments'], c=df['sentiment']) ax.scatter(df[:, 0], df[:, 1], df[:, 2], c=posts_df.sentiment) plt.show() #also try with another subreddit# Add Multi-line Comment - CTRL+/ # Online Compiler:- # Google Colab # Repl.it # Local Compiler:- [IDE] # Anaconda Navigator # PyCharm # This is my first program in Python (SLoC) ''' This is a Multi-line Comment Author: Sanket Date: 02/11/2021 ''' print("hello world") # To RUN the Code Snippet - PLAY or CTRL+Enter or CMD+Enter print("hello world") print('hello world') print('''yorker''') print("""youtube""") # Python is a case-sensitive language PRINT("helloworld") print("helloworld") # Built-in Function x = 45 # definition of x -> Declaration + Initialization print(x) NUM = 600 print(num) # List Of Operators in Python # Arithmetic Operators (+,-,/,*,**,//,%) print(3+4) # + Addition Operator - Binary Operator print(3-4) print(3*4) print(13/4) # float division operator print(12/4) # float division operator print(13//4) # integer division operator - 3.25 --> it trims the fractional part and returns integer part --> 3 print(23//3) # returns integer -> 7 print(12**2) # Power Operator -> base ** exponent -> 12 ** 2 -> 144 print(45 % 20) # Modulo Operator - Remainder Value -> 5 print( 2020 % 4 ) # Remainder -> 0 # Relational Operators (<,>,>=,<=,!=,==) return True/False -> Boolean(1/0)(T/F) print(3<4) print(3>4) print(3<=4) print(3>=4) print(23 != 3) # not-equal-to [inequality operator] -> True print(23 == 3) # equal-to [equality operator] -> False # Logical Operators (and, or, not) return True/False -> Boolean(1/0)(T/F) print( 56<4 and 56>78) # and operator returns True if both operands are True print( True and True) print( False and False) print( False and True) print( True and False) print( 56<4 or 56>78) # or operator returns True if atleast one of the operands is True print( 56<4 or 56>78) # False or False -> False print( True or True) print( False or False) print( False or True) print( True or False) # not is a unary operator - inverts the value (True->False) (False->True) print( not True) # not True -> False print( not (56<67)) # not True -> False age = 30 True = 34Logistic regression binary response variables (Y)- 0 or 1 Xs can be numerical or categorical%matplotlib inline import matplotlib import numpy as np import matplotlib.pyplot as plt import numpy as np import pandas as pd import statsmodels.api as sm df = pd.read_csv("trainT.csv") #titanic df.head(5) df.shape df.isnull().sum() #number of nas in a column df = df[["Survived","Pclass","Age","Fare"]] df=df.dropna() #drops nas df.head(7) plt.figure(figsize=(6,4)) fig, ax = plt.subplots() df.Survived.value_counts().plot(kind='barh', color="blue", alpha=.65) ax.set_ylim(-1, len(df.Survived.value_counts())) plt.title("Survival Breakdown (1 = Survived, 0 = Died)") sns.factorplot(x="Pclass", y="Fare", hue="Survived", data=df, kind="box") #formula = 'Survived ~ C(Pclass) + C(Sex) + Age + Fare' #c indicates categorical y=df[['Survived']] print(type(y)) x=df[["Pclass","Age","Fare"]] print(type(x)) # Make the model logit =sm.Logit(y, x.astype(float)) #import statsmodels.api as sm # Fit the model result = logit.fit() print result.summary() #log [p/(1-p)] = -.28*Pclass + .0146 * faree -0.01*Age #how a 1 unit increase or decrease in a variable affects the odds of surviving #Number of successes:1 failure # odds print np.exp(result.params) #odds that passengers die increase by a factor of 0.98 for each unit change in age. #prob = odds / (1 + odds) . #probability of finding someone dead on basis of age = 0.98/(1+0.98) from patsy import dmatrices import pandas as pd from sklearn.linear_model import LogisticRegression import statsmodels.discrete.discrete_model as sm df2=pd.read_csv("trainT.csv") df2.head(7) df2 = df2[["Survived","Pclass","Sex","Age","Fare"]] df2.head(6) df2=df2.dropna() df2.head(6) y, X = dmatrices('Survived ~ C(Pclass) + C(Sex) + Age + Fare', df2, return_type = 'dataframe') #c indicates categorical # sklearn output model = LogisticRegression(fit_intercept = False, C = 1e9) mdl = model.fit(X, y) model.coef_ logit = sm.Logit(y, X) logit.fit().params # Fit the model result = logit.fit() print result.summary() # create a results dictionary to hold our regression results for easy analysis later results = {} #http://hamelg.blogspot.co.uk/2015/11/python-for-data-analysis-part-28.html # create a regression friendly dataframe using patsy's dmatrices function y,x = dmatrices(formula, data=df, return_type='dataframe') # instantiate our model model = sm.Logit(y,x) # fit our model to the training data res = model.fit() # save the result for outputing predictions later results['Logit'] = [res, formula] res.summary() # fare is not statistically significant formula = 'Survived ~ C(Pclass) + C(Sex) + Age' results = {} # create a regression friendly dataframe using patsy's dmatrices function y,x = dmatrices(formula, data=df, return_type='dataframe') # instantiate our model model = sm.Logit(y,x) # fit our model to the training data res = model.fit() # save the result for outputing predictions later results['Logit'] = [res, formula] res.summary()Optimization terminated successfully. Current function value: 0.453279 Iterations 6===============================Using Compose with Featuretools===============================In this guide, we will generate labels and features on a mock dataset of transactions using Compose and Featuretools. Then create a machine learning model for predicting one hour in advance whether customers will spend over $1200 within the next hour of transactions.%matplotlib inline import composeml as cp import featuretools as ft import pandas as pd from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import classification_reportLoad Data=========To get an idea on how the transactions looks, we preview the data frame.transactions = ft.demo.load_mock_customer( return_single_table=True, random_seed=0, ) transactions[transactions.columns[:7]].head()Generate Labels===============Now with the transactions loaded, we are ready to generate labels for our prediction problem.Create Labeling Function------------------------First, we define the function that will return the total purchase amount given a hour of transactions.def total_spent(df): total = df["amount"].sum() return total.. currentmodule:: composemlConstruct Label Maker---------------------With our labeling function, we create the :class:`LabelMaker` for the transactions. The :code:`target_entity` is set to :code:`customer_id` so that the labels are generated for each customer. The :code:`window_size` is set to one hour to process one hour of transactions for a given customer.label_maker = cp.LabelMaker( target_entity='customer_id', time_index='transaction_time', labeling_function=total_spent, window_size='1h', )Create Labels-------------Next, we automatically search and extract the labels by using :meth:`LabelMaker.search`... seealso:: For more details on how the label maker works, see :doc:`/main_concepts`.labels = label_maker.search( transactions.sort_values('transaction_time'), num_examples_per_instance=-1, gap=1, ) labels.head()Elapsed: 00:01 | Remaining: 00:00 | Progress: 100%|██████████| customer_id: 5/5Transform Labels----------------With the generated :class:`LabelTimes`, we will apply specific transforms for our prediction problem.Apply Threshold on Labels~~~~~~~~~~~~~~~~~~~~~~~~~We apply :meth:`LabelTimes.threshold` to make the labels binary for total amounts exceeding $1200.labels = labels.threshold(1200) labels.head()Lead Label Times~~~~~~~~~~~~~~~~We also use :meth:`LabelTimes.apply_lead` to shift the label times 1 hour earlier for predicting in advance.labels = labels.apply_lead('1h') labels.head()Describe Labels---------------After transforming the labels, we could use :meth:`LabelTimes.describe` to print out the distribution with the settings and transforms that were used to make the labels. This is useful as a reference for understanding how the labels were generated from raw data. Also, the label distribution is helpful for determining if we have imbalanced labels.labels.describe()Label Distribution ------------------ True 252 False 248 Total: 500 Settings -------- num_examples_per_instance -1 minimum_data None window_size gap 1 Transforms ---------- 1. threshold - value: 1200 2. apply_lead - value: 1h.. currentmodule:: featuretoolsGenerate Features=================Now with the generated labels, we are ready to generate features for our prediction problem.Create Entity Set-----------------Let's construct an :class:`EntitySet` and load the transactions as an entity by using :meth:`EntitySet.entity_from_dataframe`. Then extract additional entities by using :meth:`EntitySet.normalize_entity`... seealso:: For more details on working with entity sets, see :doc:`loading_data/using_entitysets` .es = ft.EntitySet('transactions') es.entity_from_dataframe( 'transactions', transactions, index='transaction_id', time_index='transaction_time', ) es.normalize_entity( base_entity_id='transactions', new_entity_id='sessions', index='session_id', make_time_index='session_start', additional_variables=[ 'device', 'customer_id', 'zip_code', 'session_start', 'join_date', 'date_of_birth', ], ) es.normalize_entity( base_entity_id='sessions', new_entity_id='customers', index='customer_id', make_time_index='join_date', additional_variables=[ 'zip_code', 'join_date', 'date_of_birth', ], ) es.normalize_entity( base_entity_id='transactions', new_entity_id='products', index='product_id', additional_variables=['brand'], make_time_index=False, ) es.add_last_time_indexes()Describe Entity Set-------------------To get information on how the entity set is structured, we could print the entity set and use :meth:`EntitySet.plot` to create a diagram.print(es, end='\n\n') es.plot()Entityset: transactions Entities: transactions [Rows: 500, Columns: 5] sessions [Rows: 35, Columns: 4] customers [Rows: 5, Columns: 4] products [Rows: 5, Columns: 2] Relationships: transactions.session_id -> sessions.session_id sessions.customer_id -> customers.customer_id transactions.product_id -> products.product_idCreate Feature Matrix---------------------Next, we generate features that correspond to the labels created previously by using :func:`dfs`. The :code:`target_entity` is set to :code:`customers` so that features are only calculated for customers. The :code:`cutoff_time` is set to the labels so that features are calculated only using data up to and including the label cutoff times. Notice that the output of Compose integrates easily with Featuretools... seealso:: For more details on calculating features using cutoff times, see :doc:`automated_feature_engineering/handling_time`.feature_matrix, features_defs = ft.dfs( entityset=es, target_entity='customers', cutoff_time=labels, cutoff_time_in_index=True, verbose=True, )Built 77 features Elapsed: 02:32 | Progress: 100%|█████████████████████████████████████████████████████████████████████| Remaining: 00:00Describe Features-----------------To get an idea on how the generated features look, we preview the feature definitions.features_defs[:20]Machine Learning================Now with the generated labels and features, we are ready to create a machine learning model for our prediction problem. Preprocess Features-------------------In the feature matrix, let's extract the labels and fill any missing values with zeros. Then, one-hot encode all categorical features by using :func:`encode_features`.y = feature_matrix.pop(labels.name) x = feature_matrix.fillna(0) x, features_enc = ft.encode_features(x, features_defs)Split Labels and Features-------------------------After preprocessing, we split the features and corresponding labels each into training and testing sets.x_train, x_test, y_train, y_test = train_test_split( x, y, train_size=.8, test_size=.2, random_state=0, )Train Model-----------Next, we train a random forest classifer on the training set.clf = RandomForestClassifier(n_estimators=10, random_state=0) clf.fit(x_train, y_train)Test Model----------Lastly, we test the model performance by evaluating predictions on the testing set.y_hat = clf.predict(x_test) print(classification_report(y_test, y_hat))precision recall f1-score support False 0.80 0.86 0.83 50 True 0.85 0.78 0.81 50 accuracy 0.82 100 macro avg 0.82 0.82 0.82 100 weighted avg 0.82 0.82 0.82 100Feature Importances-------------------This plot is based on scores obtained by the model to illustrate which features are considered important for predictions.feature_importances = zip(x_train.columns, clf.feature_importances_) feature_importances = pd.Series(dict(feature_importances)) feature_importances = feature_importances.rename_axis('Features') feature_importances = feature_importances.sort_values() top_features = feature_importances.tail(40) plot = top_features.plot(kind='barh', figsize=(5, 12), color='#054571') plot.set_title('Feature Importances') plot.set_xlabel('Scores');* Python 2 일때 해쉬 메시지 생성 방법if sys.version_info.major == 2: hashMsg = unicode(hashlib.sha256(hashMsg).hexdigest(),'utf-8') hashMsg* Python 3 일때 해쉬 메시지 생성 방법if sys.version_info.major == 3: hashMsg = hashlib.sha256(str(hashMsg).encode('utf-8')).hexdigest() hashMsgThe blood transfusion datasetIn this notebook, we will present the "blood transfusion" dataset. Thisdataset is locally available in the directory `datasets` and it is stored asa comma separated value (CSV) file. We start by loading the entire dataset.import pandas as pd blood_transfusion = pd.read_csv("../datasets/blood_transfusion.csv")We can have a first look at the at the dataset loaded.blood_transfusion.head()In this dataframe, we can see that the last column correspond to the targetto be predicted called `"Class"`. We will create two variables, `data` and`target` to separate the data from which we could learn a predictive modeland the `target` that should be predicted.data = blood_transfusion.drop(columns="Class") target = blood_transfusion["Class"]Let's have a first look at the `data` variable.data.head()We observe four columns. Each record corresponds to a person that intendedto give blood. The information stored in each column are:* `Recency`: the time in months since the last time a person intended to give blood;* `Frequency`: the number of time a person intended to give blood in the past;* `Monetary`: the amount of blood given in the past (in c.c.);* `Time`: the time in months since the first time a person intended to give blood.Now, let's have a look regarding the type of data that we are dealing inthese columns and if any missing values are present in our dataset.data.info()Our dataset is made of 748 samples. All features are represented with integernumbers and there is no missing values. We can have a look at each featuredistributions._ = data.hist(figsize=(12, 10), bins=30, edgecolor="black", density=True)There is nothing shocking regarding the distributions. We only observe a highvalue range for the features `"Recency"`, `"Frequency"`, and `"Monetary"`. Itmeans that we have a few extreme high values for these features.Now, let's have a look at the target that we would like to predict for thistask.target.head() import matplotlib.pyplot as plt target.value_counts(normalize=True).plot.barh() plt.xlabel("Number of samples") _ = plt.title("Class distribution")We see that the target is discrete and contains two categories: whether aperson `"donated"` or `"not donated"` his/her blood. Thus the task to besolved is a classification problem. We should note that the class counts ofthese two classes is different.target.value_counts(normalize=True)Indeed, ~76% of the samples belong to the class `"not donated"`. It is ratherimportant: a classifier that would predict always this `"not donated"` classwould achieve an accuracy of 76% of good classification without using anyinformation from the data itself. This issue is known as class imbalance. Oneshould take care about the statistical performance metric used to evaluate amodel as well as the predictive model chosen itself.Now, let's have a naive analysis to see if there is a link between featuresand the target using a pair plot representation.import seaborn as sns _ = sns.pairplot(blood_transfusion, hue="Class")Autoencoders> Summary: Encoder, Decoder, Latent vector, Variational Autoencoder, VAE, Latent Space What are Autoencoders?Autoencoders are neural networks that learn to efficiently compress and encode data then learn to reconstruct the data back from the reduced encoded representation to a representation that is as close to the original input as possible. Therefore, autoencoders reduce the dimentsionality of the input data i.e. reducing the number of features that describe input data.Since autoencoders encode the input data and reconstruct the original input from encoded representation, they learn the **identity** function in an unspervised manner.![](images/autoen_architecture.png)*Autoencoder architecture. [[Image Source](https://lilianweng.github.io/lil-log/2018/08/12/from-autoencoder-to-beta-vae.html)]*An autoencoder consists of two primary components:1. Encoder: Learns to compress (reduce) the input data into an encoded representation.2. Decoder: Learns to reconstruct the original data from the encoded representation to be as close to the original input as possible.3. Bottleneck: The layer that contains the compressed representation of the input data.4. Reconstruction loss: The method to that measures how well the decoder is performing, i.e. measures the difference between the encoded and decoded vectors.The model involves encoded function $g$ parameterized by $\phi$ and a decoder function $f$ parameterized by $\theta$. The bottleneck layer is $\mathbf{z}=g_{\phi}(\mathbf{x})$, and the reconstructed input $\mathbf{x'}=f_{\theta}(g_{\phi}(\mathbf{x}))$.For measuring the reconstruction loss, we can use the cross entropy (when activation function is sigmoid) or basic Mean Squared Error (MSE):$$L_{AE}(\theta,\phi)=\frac{1}{n}\sum_{i=1}^n (\mathbf{x}^{(i)}-f_{\theta}(g_{\phi}(\mathbf{x}^{(i)})))^2$$ Autoencoder ApplicationsAutoencoders have several different applications including:- Dimensionality Reductiions- Image Compression- Image Denoising- Image colorization Image DenoisingImage denoising is the process of removing noise from the image. We can train an autoencoder to remove noise from the images. ![](images/autoen_denoising_architecture.png)*Denoising autoencoder architecture. [[Image Source](https://lilianweng.github.io/lil-log/2018/08/12/from-autoencoder-to-beta-vae.html)]*We start by adding some noise (usually Gaussian noise) to the input images and then train the autoencoder to map noisy digits images to clean digits images. In order to see a complete example of image denoising, see [here](https://blog.keras.io/building-autoencoders-in-keras.html). Autoencoder Implementation#export import Augmentor import os import numpy as np from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt from PIL import Image from tqdm import tqdm #export def get_pixel(image, i, j): """ Returns a pixel at coordinate (`i`, `j`). """ return image.getpixel((i,j)) def change_image_background(orig_dir_path, converted_path): """ Changes the image background from white to black and foreground from black to white, for all the images at folder `orig_dir_path` and place them into folder `converted_path`.""" files = os.listdir(dir_path) num_files = len(files) data = [] counter = 1 for f in tqdm(files, total=num_files): img = Image.open(os.path.join(dir_path,f)) out_img = Image.new('RGB',img.size,color=1) width, height = img.size for w in range(width): for h in range(height): r, g, b = get_pixel(img, w,h) if r > 128 or g > 128 or b > 128: r = g = b = 0 else: r = g = b = 255 out_img.putpixel((w,h),(r,g,b)) file_name = os.path.join(converted_path, str(counter) + '.png') out_img.save(file_name) counter += 1 return data def create_augmentor_pipeline(dir_path): """ Creates a pipeline for generating extra images from images at folder `dir_path`.""" p = Augmentor.Pipeline(dir_path) p.resize(probability=1,width=64,height=64) p.rotate90(probability=0.1) p.rotate(probability=0.2, max_left_rotation=5, max_right_rotation=10) p.skew_left_right(probability=0.1) p.greyscale(probability=1) return p def load_data(dir_path): """ Loads all the images from directory `dir_path`, converts them to matrices and return a list.""" files = os.listdir(dir_path) num_files = len(files) data = [] for f in tqdm(files, total=num_files): img = Image.open(os.path.join(dir_path,f)) img_array = np.array(img) data.append(img_array) return data> Note: The dataset contains several Farsi (Persian) characters written in `Moallah` font. It can replaced with any dataset of your interest.# Change the background to black and foreground to white # Please note that you have to execute this once. If your dataset is already correctly # formatted, then skip this step. dir_path = os.path.join('data','moalla-dataset') converted_path = os.path.join('data','converted') # change_image_background(dir_path, converted_path) p = create_augmentor_pipeline(converted_path) # Generate 10000 images of (64 x 64) according to the pipeline and put them in `data/converted/output` folder num_samples = 10000 p.sample(num_samples) # Load all the images and return a list having array representation of each image dir_path = os.path.join(converted_path,'output') data = load_data(dir_path) # Split the dataset into 80% train and 20% test sets. train_data,test_data,_,_ = train_test_split(data,data,test_size=0.2) train_data = np.array(train_data) test_data = np.array(test_data) # select a random image and display it sample = 1190 img = Image.fromarray(train_data[sample]) plt.imshow(img) # Normalizing train and test data normalized_train_data = train_data.astype('float32')/255.0 normalized_test_data = test_data.astype('float32')/255.0 # Reshaping train and test sets, i.e. changing from (64, 64) to (64, 64, 1) normalized_train_data = np.expand_dims(normalized_train_data,axis=-1) normalized_test_data = np.expand_dims(normalized_test_data,axis=-1) print('Normalization and reshaping is done.') print('Input shape = {}'.format(normalized_train_data.shape[1:]))Normalization and reshaping is done. Input shape = (64, 64, 1)Defining the Encoderimport tensorflow as tf from tensorflow import keras from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Conv2DTranspose, Flatten from tensorflow.keras.layers import Reshape, BatchNormalization from tensorflow.keras.models import Model from tensorflow.keras import backend as K from tensorflow.keras.callbacks import TensorBoard image_width = 64 image_height = 64 n_epochs = 15 batch_size = 128 input_img = Input(shape=(image_width, image_height, 1)) # You can experiment with the encoder layers, i.e. add or change them x = Conv2D(32, (3, 3), activation='relu', strides=2, padding='same')(input_img) x = Conv2D(64, (3, 3), activation='relu', strides=2, padding='same')(x) # We need this shape later in the decoder, so we save it into a variable. encoded_shape = K.int_shape(x) x = Flatten()(x) encoded = Dense(128)(x) # Builing the encoder encoder = Model(input_img,encoded,name='encoder') # at this point the representation is 128-dimensional encoder.summary()Model: "encoder" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_11 (InputLayer) [(None, 64, 64, 1)] 0 _________________________________________________________________ conv2d_10 (Conv2D) (None, 32, 32, 32) 320 _________________________________________________________________ conv2d_11 (Conv2D) (None, 16, 16, 64) 18496 _________________________________________________________________ flatten_5 (Flatten) (None, 16384) 0 _________________________________________________________________ dense_10 (Dense) (None, 128) 2097280 ================================================================= Total params: 2,116,096 Trainable params: 2,116,096 Non-trainable params: 0 _________________________________________________[...]Defining the Decoder# Input shape for decoder encoded_input = Input(shape=(128,)) x = Dense(np.prod(encoded_shape[1:]))(encoded_input) x = Reshape((encoded_shape[1], encoded_shape[2], encoded_shape[3]))(x) x = Conv2DTranspose(64,(3, 3), activation='relu',strides=2, padding='same')(x) x = Conv2DTranspose(32,(3, 3), activation='relu', strides=2, padding='same')(x) x = Conv2DTranspose(1,(3, 3), activation='sigmoid', padding='same')(x) decoder = Model(encoded_input,x,name='decoder') decoder.summary()Model: "decoder" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_12 (InputLayer) [(None, 128)] 0 _________________________________________________________________ dense_11 (Dense) (None, 16384) 2113536 _________________________________________________________________ reshape_5 (Reshape) (None, 16, 16, 64) 0 _________________________________________________________________ conv2d_transpose_15 (Conv2DT (None, 32, 32, 64) 36928 _________________________________________________________________ conv2d_transpose_16 (Conv2DT (None, 64, 64, 32) 18464 _________________________________________________________________ conv2d_transpose_17 (Conv2DT (None, 64, 64, 1) 289 ===========================================================[...]Defining the Autoencoderautoencoder = Model(input_img, decoder(encoder(input_img)),name="autoencoder") autoencoder.summary() # Compile and train the model. Log and visualize using tensorboard autoencoder.compile(optimizer='adam', loss='binary_crossentropy') h = autoencoder.fit(normalized_train_data, normalized_train_data, epochs=n_epochs, batch_size=batch_size, shuffle=True, validation_data=(normalized_test_data, normalized_test_data), callbacks=[TensorBoard(log_dir='/tmp/autoencoder')]) #hide # Plot the training history using altair import altair as alt import pandas as pd train_source = pd.DataFrame({'x':np.arange(0,n_epochs), 'y':h.history['loss'], 'orig_label': 15 * ['train_loss']}) val_source = pd.DataFrame({'x':np.arange(0,n_epochs), 'y':h.history['val_loss'], 'val_label': 15 * ['val_loss']}) legends = ['train loss', 'val loss'] train_chart = alt.Chart(train_source).mark_line().encode( alt.X('x', title='Epochs'), alt.Y('y', title='Loss/Accuracy'), color=alt.Color('orig_label:O', legend=alt.Legend(title=None)) ) val_chart = alt.Chart(val_source).mark_line().encode( alt.X('x', title='Epochs'), alt.Y('y', title='Loss/Accuracy'), color=alt.Color('val_label:O', scale=alt.Scale(range=['red']), legend=alt.Legend(title=None)) ) # alt.layer(train_chart, val_chart).resolve_scale(color='independent') # plot the train and validation losses N = np.arange(0, n_epochs) plt.figure() plt.plot(N, h.history['loss'], label='train_loss') plt.plot(N, h.history['val_loss'], label='val_loss') plt.title('Training Loss and Accuracy') plt.xlabel('Epochs') plt.ylabel('Loss/Accuracy') plt.legend(loc='upper right')TensorBoard also provides plenty of useful information including measurements and visualizations during and after training. The snippet below shows the training loss in TensorBoard:![](images/autoen_tensorboard.png)# Make predictions on the test set decoded_imgs = autoencoder.predict(normalized_test_data) def visualize(model, X_test, n_samples): """ Visualizes the original images and the reconstructed ones for `n_samples` examples on the test set `X_test`.""" # Reconstructing the encoded images reconstructed_images = model.predict(X_test) plt.figure(figsize =(20, 4)) for i in range(1, n_samples): # Generating a random to get random results rand_num = np.random.randint(0, 2000) # To display the original image ax = plt.subplot(2, 10, i) plt.imshow(X_test[rand_num].reshape(image_width, image_width)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # To display the reconstructed image ax = plt.subplot(2, 10, i + 10) plt.imshow(reconstructed_images[rand_num].reshape(image_width, image_width)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # Displaying the plot plt.show() # Plots `n_samples` images. Top row is the original images and the lower row is the reconstructed ones. n_samples = 10 visualize(autoencoder,normalized_test_data, n_samples)Variational Autoencoders (VAE) Limitations of Autoencoders for Content GenerationAfter we train an autoencoder, we might think whether we can use the model to create new content. Particularly, we may ask *can we take a point randomly from that latent space and decode it to get a new content?*The answer is "yes", but but quality and relevance of generated data depend on the regularity of the latent space. The latent space regularity depends on the *distribution of the initial data*, the *dimension of the latent space* and the *architecture of the encoder*. It is quite difficult to ensure, a priori, that the encoder will organize the latent space in a smart way compatible with the generative process I mentioned. No regularization means overfitting, which leads to meaningless content once decoded for some point. For more information, see [this nice blog](https://towardsdatascience.com/understanding-variational-autoencoders-vaes-f70510919f73). How can we make sure the latent space is regularized enough? We can explicitly introduce regularization during the training process. Therefore, we introduce **Variational Autoencoders**. Variational Autoencoder (VAE)It's an autoencoder whose training is regularized to avoid overfitting and ensure that the latent space has good properties that enable generative process. The idea is instead of mapping the input into a fixed vector, we want to map it into a distribution. In other words, the encoder outputs two vectors of size $n$, a vector of means $\mathbf{\mu}$, and another vector of standard variations $\mathbf{\sigma}$.![](images/autoen_vae1.png)*Difference between autoencoder (deterministic) and variational autoencoder (probabilistic). [[Image Source](https://towardsdatascience.com/understanding-variational-autoencoders-vaes-f70510919f73)]*The encoded distributions are often normal so that the encoder can be trained to return the mean and the covariance matrix that describe these Gaussians. We force the encoder to return the distributions that are close to a standard normal distribution.![](images/autoen_vae-gaussian.png)*Variational autoencoder model with the multivariate Gaussian assumption.[[]Image Source](https://lilianweng.github.io/lil-log/2018/08/12/from-autoencoder-to-beta-vae.html)* VAE Loss FunctionThe loss function that we need to minimize for VAE consists of two components: (a) reconstruction term, which is similar to the loss function of regular autoencoders; and (b) regularization term, which regularizes the latent space by making the distributions returned by the encoder close to a standard normal distribution. We use the Kullback-Leibler divergence to quantify the difference between the returned distribution and a standard Gaussian. KL divergence $D_{KL}(X\|Y)$ measures how much information is lost if the distribution $Y$ is used to represent $X$. I am not willing to go deeply into the mathmatical details of VAE, however, all the math details have been nicely described [here](https://lilianweng.github.io/lil-log/2018/08/12/from-autoencoder-to-beta-vae.html) and [here](https://towardsdatascience.com/understanding-variational-autoencoders-vaes-f70510919f73) among other places.$$L_{VAE}=\|\mathbf{x}-\mathbf{x'}\|^2 - D_{KL}[N(\mu_{\mathbf{x}},\sigma_{\mathbf{x}})\|N(0,1)]$$$$D_{KL}[N(\mu_{\mathbf{x}},\sigma_{\mathbf{x}})\|N(0,1)]=\frac{1}{2}\sum_{k} (\sigma_{\mathbf{x}} + \mu_{\mathbf{x}}^2 -1 - \log(\sigma_{\mathbf{x}}))$$where $k$ is the dimension of the Gaussian. In practice, however, it’s better to model $\sigma_{\mathbf{x}}$ rather than $\log(\sigma_{\mathbf{x}})$ as it is more numerically stable to take exponent compared to computing log. Hence, our final KL divergence term is:$$D_{KL}[N(\mu_{\mathbf{x}},\sigma_{\mathbf{x}})\|N(0,1)]=\frac{1}{2}\sum_{k} (\exp(\sigma_{\mathbf{x}}) + \mu_{\mathbf{x}}^2 -1 - \sigma_{\mathbf{x}})$$What is important is that the VAE loss function involves generating samples from $\mathbf{z}\sim N(\mathbf{\mu},\mathbf{\sigma})$. Since Sampling is a stochastic process, we cannot backpropagate the gradient while training the model. To make it trainable, a simple trick, called reparametrization trick, is used to make the gradient descent possible despite the random sampling that occurs halfway of the architecture. In this trick, random variable $\mathbf{z}$ is expressed as a deterministic variable $\mathbf{z}=\mathcal{T}_{\phi}(\mathbf{x},\mathbf{\epsilon})$, where $\mathbf{\epsilon}$ is an auxiliary independent random variable, and the transformation function $\mathcal{T}_{\phi}$ parameterized by ϕ converts $\mathbf{\epsilon}$ to $\mathbf{z}$.If $\mathbf{z}$ is a random variable following a Gaussian distribution with mean $\mathbf{\mu}$ and with covariance $\mathbf{\sigma}$ then it can be expressed as:$$\mathbf{z}=\mathbf{\mu}+\mathbf{\sigma}\odot \mathbf{\epsilon}$$where $\odot$ is the element-wise multiplication.![](images/autoen_vae_backprop.png)*Illustration of the reparametrisation trick. [[Image Source](https://towardsdatascience.com/understanding-variational-autoencoders-vaes-f70510919f73)]* VAE Implementationimport tensorflow as tf from tensorflow import keras from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Conv2DTranspose, Flatten from tensorflow.keras.layers import Reshape, BatchNormalization, Lambda from tensorflow.keras.models import Model from tensorflow.keras import backend as K from tensorflow.keras.losses import binary_crossentropy from tensorflow.keras.callbacks import TensorBoard # reparameterization trick # instead of sampling from Q(z|X), sample epsilon = N(0,I) # z = z_mean + sqrt(var) * epsilon def sample_z(args): """Reparameterization trick by sampling from an isotropic unit Gaussian. # Arguments args (tensor): mean and log of variance of Q(z|X) # Returns z (tensor): sampled latent vector """ mu, sigma = args batch = K.shape(mu)[0] dim = K.int_shape(mu)[1] # by default, random_normal has mean = 0 and std = 1.0 eps = K.random_normal(shape=(batch, dim)) return mu + K.exp(sigma / 2) * eps image_width = 64 image_height = 64 latent_dim = 2 no_epochs = 30 batch_size = 128 num_channels = 1 # Defining the encoder inputs = Input(shape=(image_width, image_height, 1), name='encoder_input') x = Conv2D(32, (3, 3), activation='relu', strides=2, padding='same')(inputs) x = BatchNormalization()(x) x = Conv2D(64, (3, 3), activation='relu', strides=2, padding='same')(x) x = BatchNormalization()(x) conv_shape = K.int_shape(x) x = Flatten()(x) x = Dense(16)(x) x = BatchNormalization()(x) mu = Dense(latent_dim, name='latent_mu')(x) sigma = Dense(latent_dim, name='latent_sigma')(x) # use reparameterization trick to push the sampling out as input z = Lambda(sample_z, output_shape=(latent_dim, ), name='z')([mu, sigma]) encoder = Model(inputs, [mu, sigma, z], name='encoder') encoder.summary() # Defining decoder d_i = Input(shape=(latent_dim, ), name='decoder_input') x = Dense(conv_shape[1] * conv_shape[2] * conv_shape[3], activation='relu')(d_i) x = BatchNormalization()(x) x = Reshape((conv_shape[1], conv_shape[2], conv_shape[3]))(x) cx = Conv2DTranspose(16, (3, 3), strides=2, padding='same', activation='relu')(x) cx = BatchNormalization()(cx) cx = Conv2DTranspose(8, (3, 3), strides=2, padding='same', activation='relu')(cx) cx = BatchNormalization()(cx) o = Conv2DTranspose(1, (3, 3), activation='sigmoid', padding='same', name='decoder_output')(cx) # Instantiate decoder decoder = Model(d_i, o, name='decoder') decoder.summary() # Build VAE model outputs = decoder(encoder(inputs)[2]) vae = Model(inputs, outputs, name='vae') vae.summary() def vae_loss(true, pred): # Reconstruction loss reconstruction_loss = binary_crossentropy(K.flatten(true), K.flatten(pred)) * image_width * image_height # KL divergence loss kl_loss = 1 + sigma - K.square(mu) - K.exp(sigma) kl_loss = K.sum(kl_loss, axis=-1) kl_loss *= -0.5 # Total loss = 50% rec + 50% KL divergence loss return K.mean(reconstruction_loss + kl_loss) vae.compile(optimizer='adam', loss=vae_loss, experimental_run_tf_function=False) vae.fit(normalized_train_data, normalized_train_data, epochs=no_epochs, batch_size=batch_size, validation_data=(normalized_test_data, normalized_test_data), callbacks=[TensorBoard(log_dir='/tmp/autoencoder')]) # Credit for code: https://www.machinecurve.com/index.php/2019/12/30/how-to-create-a-variational-autoencoder-with-keras/ def viz_decoded(encoder, decoder, data): """ Visualizes the samples from latent space.""" num_samples = 10 figure = np.zeros((image_width * num_samples, image_height * num_samples, num_channels)) grid_x = np.linspace(-8, 8, num_samples) grid_y = np.linspace(-8, 8, num_samples)[::-1] for i, yi in enumerate(grid_y): for j, xi in enumerate(grid_x): # z_sample = np.array([np.random.normal(0, 1, latent_dim)]) z_sample = np.array([[xi, yi]]) x_decoded = decoder.predict(z_sample) digit = x_decoded[0].reshape(image_width, image_height, num_channels) figure[i * image_width: (i + 1) * image_width, j * image_height: (j + 1) * image_height] = digit plt.figure(figsize=(10, 10)) start_range = image_width // 2 end_range = num_samples * image_width + start_range + 1 pixel_range = np.arange(start_range, end_range, image_width) sample_range_x = np.round(grid_x, 1) sample_range_y = np.round(grid_y, 1) plt.xticks(pixel_range, sample_range_x) plt.yticks(pixel_range, sample_range_y) plt.xlabel('z - dim 1') plt.ylabel('z - dim 2') # matplotlib.pyplot.imshow() needs a 2D array, or a 3D array with the third dimension being of shape 3 or 4! # So reshape if necessary fig_shape = np.shape(figure) if fig_shape[2] == 1: figure = figure.reshape((fig_shape[0], fig_shape[1])) # Show image plt.imshow(figure) plt.show() # Plot results data = (normalized_test_data, normalized_test_data) # data = (normalized_train_data, normalized_train_data) viz_decoded(encoder, decoder, data)If you notice the plot above, in the upper right corner and around (-8,-2.7), we see the issue with completeness, which yield outputs that do not make sense. Also, some issues with continuity are visible wherever the samples are blurred.**continuity** i.e. two close points in the latent space should not give two completely different contents once decoded, and **completeness** i.e. for a chosen distribution, a point sampled from the latent space should give “meaningful” content once decoded, are two primary properties of a regularized latent space.# Randomly sample one or more charachters and plot them random_chars = [np.random.normal(0, 1, latent_dim) for _ in range(1)] imgs = [] for char in random_chars: char = char.reshape(-1,2) imgs.append(decoder.predict(char)) imgs = [np.reshape(img,(image_width, image_height)) for img in imgs] for img in imgs: plt.figure(figsize=(2,2)) plt.axis('off') plt.imshow(img, cmap='gray')Creating Morph ImagesFor morphing images, the approach is somewhat different than generating new images of characters. There are four main steps:1. Choose two images that you want to morph between2. Put both images into the VAE's encoder and get a latent vector out for each3. Choose several intermediate vectors between the two latent vectors4. Take the intermediate vectors and pass them into the VAE's decoder to generate imagesimport moviepy.editor as mpy def load_image(file_path): img = Image.open(file_path) img_array = np.array(img) return img_array def generate_morph_images(image_1, image_2, encoder, decoder, n_steps): image_1 = image_1.astype('float32')/255.0 image_2 = image_2.astype('float32')/255.0 np.expand_dims(image_1,axis=-1) np.expand_dims(image_2,axis=-1) vec1 = encoder.predict(image_1.reshape(-1,image_width,image_height,1))[2] vec2 = encoder.predict(image_2.reshape(-1,image_width,image_height,1))[2] morph_vecs = [ np.add( np.multiply(vec1, (n_steps - 1 - i) / (n_steps - 1)), np.multiply(vec2, i / (n_steps - 1)), ) for i in range(n_steps) ] return[decoder.predict(vec) for vec in morph_vecs] def animate_morph_images(image_1, image_2, encoder, decoder, n_steps, loop=True): morph_images = [ np.multiply(f[0], 255) for f in generate_morph_images(image_1, image_2, encoder, decoder, n_steps) ] if loop: morph_images = ( int(n_steps / 5) * [morph_images[0]] + morph_images + int(n_steps / 5) * [morph_images[-1]] + morph_images[::-1] ) return mpy.ImageSequenceClip(morph_images, fps=50, with_mask=False) def animate_morph_images_from_morph(morph_images, n_steps, loop=True): if loop: morph_images = ( int(n_steps / 5) * [morph_images[0]] + morph_images + int(n_steps / 5) * [morph_images[-1]] + morph_images[::-1] ) return mpy.ImageSequenceClip(morph_images, fps=25, with_mask=False) img1_path = os.path.join('data','converted', 'output', 'converted_original_7.png_daf7cc5a-4c6f-467b-a4e5-faffb245aa68.png') img2_path = os.path.join('data','converted', 'output', 'converted_original_10.png_17dc166c-00b3-4812-a149-d6c740de1d8c.png') img1 = load_image(img1_path) img2 = load_image(img2_path) n_steps = 90 clip = animate_morph_images(img1, img2,encoder,decoder,n_steps) # Uncomment the line below if you want to see the video clip # clip.ipython_display(width=400) # Create a .gif image clip.write_gif('7to10.gif', fps=10)Gesture Dataset Environmentdef load_gesture(name, gesture): with open("../DVS_Gesture_Dataset/DvsGesture/"+name[:-7]+"_labels.csv", "r") as l: for line in l: labels = line.split(",") if labels[0] == str(gesture): start = int(labels[1]) end = int(labels[2]) events = [] with LegacyAedatFile("../DVS_Gesture_Dataset/DvsGesture/"+name[:-1]) as f: for event in f: if event.timestamp >= start and event.timestamp <= end: events.append([event.x/128, event.y/128]) return np.array(events) def load_img_gesture(name, gesture): with open("../DVS_Gesture_Dataset/DvsGesture/"+name[:-7]+"_labels.csv", "r") as l: for line in l: labels = line.split(",") if labels[0] == str(gesture): start = int(labels[1]) end = int(labels[2]) events = [] bin_events = np.zeros((128, 128)) prev_time = start cumu_time = 0 with LegacyAedatFile("../DVS_Gesture_Dataset/DvsGesture/"+name[:-1]) as f: for event in f: if event.timestamp >= start and event.timestamp <= end: bin_events[event.x, event.y] = 1 cumu_time += event.timestamp - prev_time prev_time = event.timestamp if cumu_time >= 100000: # bin of 100ms cumu_time = 0 events = np.hstack([events, resize(bin_events, (32, 32), anti_aliasing=False).flatten()]) bin_events = np.zeros((128, 128)) return trim_gesture(events, 40960) def trim_gesture(gesture, gesture_size): if gesture.shape[0] > gesture_size: gesture = gesture[0:gesture_size] else: temp = np.zeros(gesture_size, 3) temp[:gesture.shape[0]] = gesture gesture = temp return gesture gestures = [] labels = [] count = 1 with open("../DVS_Gesture_Dataset/DvsGesture/trials_to_train.txt") as file: for name in file: for label in range(0, 2): try: gestures.append(load_img_gesture(name, label+1)) labels.append(label) print("loaded", name[:-1], count) count += 1 except: print("failed to load", name[:-1]) gestures = np.array(gestures) gestures = gestures[:, :, np.newaxis] temp = np.zeros((len(labels), 1, 1)) temp[:, 0, 0] = np.array(labels) labels = temp np.save("gestures_img_2", gestures) np.save("labels_img_2", labels) gestures = np.load("gestures_img_2.npy") labels = np.load("labels_img_2.npy") plt.figure(figsize=(15, 50)) nb_img = 40 for i in range(nb_img): plt.subplot(20, 6, i+1) plt.imshow(gestures[3].reshape((40, 1024))[i].reshape(32, 32))Legendre Memory Unit Cell Definitionclass LMUCell(nengo.Network): def __init__(self, units, order, theta, input_d, **kwargs): super().__init__(**kwargs) # compute the A and B matrices according to the LMU's mathematical derivation # (see the paper for details) Q = np.arange(order, dtype=np.float64) R = (2 * Q + 1)[:, None] / theta j, i = np.meshgrid(Q, Q) A = np.where(i < j, -1, (-1.0) ** (i - j + 1)) * R B = (-1.0) ** Q[:, None] * R C = np.ones((1, order)) D = np.zeros((1,)) A, B, _, _, _ = cont2discrete((A, B, C, D), dt=1.0, method="zoh") with self: nengo_dl.configure_settings(trainable=None) # create objects corresponding to the x/u/m/h variables in the above diagram self.x = nengo.Node(size_in=input_d) self.u = nengo.Node(size_in=1) self.m = nengo.Node(size_in=order) self.h = nengo_dl.TensorNode(tf.nn.tanh, shape_in=(units,), pass_time=False) # compute u_t from the above diagram. # note that setting synapse=0 (versus synapse=None) adds a one-timestep # delay, so we can think of any connections with synapse=0 as representing # value_{t-1} nengo.Connection( self.x, self.u, transform=np.ones((1, input_d)), synapse=None ) nengo.Connection(self.h, self.u, transform=np.zeros((1, units)), synapse=0) nengo.Connection(self.m, self.u, transform=np.zeros((1, order)), synapse=0) # compute m_t # in this implementation we'll make A and B non-trainable, but they # could also be optimized in the same way as the other parameters conn_A = nengo.Connection(self.m, self.m, transform=A, synapse=0) self.config[conn_A].trainable = False conn_B = nengo.Connection(self.u, self.m, transform=B, synapse=None) self.config[conn_B].trainable = False # compute h_t nengo.Connection( self.x, self.h, transform=np.zeros((units, input_d)), synapse=None ) nengo.Connection( self.h, self.h, transform=np.zeros((units, units)), synapse=0 ) nengo.Connection( self.m, self.h, transform=nengo_dl.dists.Glorot(distribution="normal"), synapse=None, )Network Definitiontau_slow = 0.01 tau_fast = None discount = 0.95 with nengo.Network() as net: nengo_dl.configure_settings( trainable=None, stateful=False, keep_history=False, ) # input node inp = nengo.Node(np.zeros(gestures.shape[-1])) # LMU cell lmu = LMUCell( units=212, order=256, theta=gestures.shape[1], # number of events per gesture input_d=gestures.shape[-1], # number of dimension per event (3) ) conn = nengo.Connection(inp, lmu.x, synapse=None) net.config[conn].trainable = False # dense linear readout out = nengo.Node(size_in=2) nengo.Connection(lmu.h, out, transform=nengo_dl.dists.Glorot(), synapse=None) # record output. note that we set keep_history=False above, so this will # only record the output on the last timestep (which is all we need # on this task) p = nengo.Probe(out) X_train, X_test, y_train, y_test = train_test_split(gestures, labels, test_size=0.33, random_state=42) with nengo_dl.Simulator(net, minibatch_size=8, unroll_simulation=8) as sim: sim.compile( loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer=tf.optimizers.Adam(), metrics=["accuracy"], ) sim.load_params("./lmu_params") sim.fit(X_train, y_train, epochs=1) sim.save_params("./lmu_params") print( "Final test accuracy: %.2f%%" % (sim.evaluate(X_test, y_test, verbose=0)["probe_accuracy"] * 100) )Build finished in 0:00:00 Optimization finished in 0:00:00 |# Constructing graph: build stage (1%) | ETA: 0:00:03Marathon Build Loops Integration Testsimport logging import os from commons import JenkinsJob from IPython.display import HTML, display user, password = os.environ['AUTH'].split(":") jobs = ["marathon-sandbox/job/marathon-loop-1.5", "marathon-sandbox/job/marathon-loop-1.6", "marathon-sandbox/job/marathon-loop-1.7", "marathon-sandbox/job/marathon-loop-master", ] for j in jobs: job = JenkinsJob("jenkins.mesosphere.com/service/jenkins/view/Marathon/job", j, user, password) case_table = await job.unique_errors_table('html') error_table = await job.names_table('html') display(HTML('

{}

'.format(j))) display(HTML(error_table)) display(HTML(case_table))System Integration Testsimport os from commons import JenkinsJob from IPython.display import HTML, display user, password = os.environ['AUTH'].split(":") jobs = ["system-integration-tests/job/marathon-si-dcos-open/job/master", "system-integration-tests/job/marathon-si-dcos-permissive/job/master", "system-integration-tests/job/marathon-si-dcos-strict/job/master"] for j in jobs: job = JenkinsJob("jenkins.mesosphere.com/service/jenkins/view/Marathon/job", j, user, password) case_table = await job.unique_errors_table('html') error_table = await job.names_table('html') display(HTML('

{}

'.format(j))) display(HTML(error_table)) display(HTML(case_table)) import ipywidgets as widgets import requests from ddog import build_graph from time import sleep graph = build_graph('master') sleep(5) # See http://andreafalzetti.github.io/blog/2017/04/17/datadog-png-snapshot-not-showing.html widgets.Image(value=requests.get(graph['snapshot_url']).content, format='png')Data modeling with Apache Cassandra Part I. ETL Pipeline for pre-processing files Setupfrom pathlib import Path import pandas as pd import cassandra from helpers import * from queries import * %load_ext autoreload %autoreload 2Create a list of filepaths to process original event data csv filesroot_path = Path.cwd() data_path = root_path.joinpath("event_data") file_path_list = [e for e in data_path.rglob("*.csv")] print(f"Found {len(file_path_list)} CSV files in: {data_path}")Found 30 CSV files in: /home/workspace/event_dataProcess individual files to create a single file that will be used for Apache Casssandra tables Let's take a look at one individual csv file firstfirst_file = file_path_list[0] !head $first_file first_df = pd.read_csv(first_file) first_df.shape first_df.head()Load all csv files into one dataframe and save it as CSVtry: df = pd.read_csv(f"{root_path}/event_datafile_new.csv") print("Loaded file from disk") except FileNotFoundError: columns = ["artist", "firstName", "gender", "itemInSession", "lastName", "length", "level", "location", "sessionId", "song", "userId"] df = load_all_records(file_path_list, columns) finally: print(f"Shape: {df.shape}") df.head() df.dtypesSet correct dtypes for ts (timestamp) and userId (int)# Although userId should be of type int, because of a limitation of pandas < 0.24 (Series of type int can not hold NaNs) we leave it as int # df["userId"] = df["userId"].fillna(0).astype("int64") df["ts"] = df["ts"].astype("datetime64[ms]") df.head() if not root_path.joinpath("event_datafile_new.csv").exists(): df.to_csv(f"{root_path}/event_datafile_new.csv", index=False) else: print("File event_datafile_new.csv does already exist")File event_datafile_new.csv does already existPart II. Data Modelling with Apache CassandraNow we are ready to work with the CSV file titled **event_datafile_new.csv**, located within the W\workspace directory. The event_datafile_new.csv contains the following columns: - artist - firstName of user- gender of user- item number in session- last name of user- length of the song- level (paid or free song)- location of the user- sessionId- song title- userIdThe image below is a screenshot of what the denormalized data should appear like in the **event_datafile_new.csv** after the code above is run: Create Clusterfrom cassandra.cluster import Cluster cluster = Cluster() session = cluster.connect()Create Keyspace# http://cassandra.apache.org/doc/latest/cql/ddl.html#create-keyspace session.execute(cluster_create_keyspace)Set Keyspacesession.set_keyspace("sparkify")Create queries to find answers to the following three questions1. Give me the artist, song title and song's length in the music app history that was heard during sessionId = 338, and itemInSession = 42. Give me only the following: name of artist, song (sorted by itemInSession) and user (first and last name) for userid = 10, sessionid = 1823. Give me every user name (first and last) in my music app history who listened to the song 'All Hands Against His Own' Query 1session.execute(q1_drop_table) session.execute(q1_create_table)Insert datatable = "song_playlist_session" cols = ["sessionId", "itemInSession", "artist", "song", "length"] q1_cql = f"INSERT INTO {table} ({cols[0]}, {cols[1]}, {cols[2]}, {cols[3]}, {cols[4]}) VALUES (?, ?, ?, ?, ?)" print(q1_cql) batch_insert(cql=q1_cql, cols=cols, data=df, size=500, session=session)Starting batch insert for 8056 rows in 17 batches of size 500 Inserted 474 rows of data Inserted 474 rows of data Inserted 474 rows of data Inserted 474 rows of data Inserted 474 rows of data Inserted 474 rows of data Inserted 474 rows of data Inserted 474 rows of data Inserted 474 rows of data Inserted 474 rows of data Inserted 474 rows of data Inserted 474 rows of data Inserted 474 rows of data Inserted 474 rows of data Inserted 474 rows of data Inserted 473 rows of data Inserted 473 rows of data Batch insert finishedPerform sanity checkquery(f"SELECT * FROM {table} LIMIT 5", session, print_result=False)Execute query_Give me the artist, song title and song's length in the music app history that was heard during sessionId = 338, and itemInSession = 4_q1 = query(q1_query, session)[Row(artist='Faithless', song='Music Matters ()', length=495.30731201171875)]Query 2session.execute(q2_drop_table) session.execute(q2_create_table)Insert datatable = "song_playlist_user" cols = ["userId", "sessionId", "itemInSession", "artist", "song", "firstName", "lastName"] q2_cql = f"INSERT INTO {table} ({cols[0]}, {cols[1]}, {cols[2]}, {cols[3]}, {cols[4]}, {cols[5]}, {cols[6]}) VALUES (?, ?, ?, ?, ?, ?, ?)" print(q2_cql) batch_insert(cql=q2_cql, cols=cols, data=df, size=250, session=session)Starting batch insert for 8056 rows in 33 batches of size 250 Inserted 245 rows of data Inserted 245 rows of data Inserted 245 rows of data Inserted 245 rows of data Inserted 244 rows of data Inserted 244 rows of data Inserted 244 rows of data Inserted 244 rows of data Inserted 244 rows of data Inserted 244 rows of data Inserted 244 rows of data Inserted 244 rows of data Inserted 244 rows of data Inserted 244 rows of data Inserted 244 rows of data Inserted 244 rows of data Inserted 244 rows of data Inserted 244 rows of data Inserted 244 rows of data Inserted 244 rows of data Inserted 244 rows of data Inserted 244 rows of data Inserted 244 rows of data Inserted 244 rows of data Inserted 244 rows of data Inserted 244 rows of data Inserted 244 rows of data Inserted 244 rows of data Inserted 244 rows of data Inserted 244 rows of data Inserted 244 rows of data Inserted 244 rows of data Inserted 244 rows of data Batch insert finishedPerform sanity checkquery(f"SELECT * FROM {table} LIMIT 5", session, print_result=False)Execute query_Give me only the following: name of artist, song (sorted by itemInSession) and user (first and last name) for userid = 10, sessionid = 182_# explicitly select itemInSession to show that sorting works q2 = query(q2_query, session)[Row(artist='', song='Catch You Baby (Steve Pitron & Max Sanna Radio Edit)', firstname='Sylvie', lastname='Cruz', iteminsession=3), Row(artist='', song='Kilometer', firstname='Sylvie', lastname='Cruz', iteminsession=2), Row(artist='Three Drives', song='Greece 2000', firstname='Sylvie', lastname='Cruz', iteminsession=1), Row(artist='Down To The Bone', song="Keep On Keepin' On", firstname='Sylvie', lastname='Cruz', iteminsession=0)]Query 3session.execute(q3_drop_table) session.execute(q3_create_table)Insert datatable = "song_user_name" cols = ["song", "userId", "firstName", "lastName"] q3_cql = f"INSERT INTO {table} ({cols[0]}, {cols[1]}, {cols[2]}, {cols[3]}) VALUES (?, ?, ?, ?)" print(q3_cql) batch_insert(cql=q3_cql, cols=cols, data=df, size=500, session=session)Starting batch insert for 8056 rows in 17 batches of size 500 Inserted 474 rows of data Inserted 474 rows of data Inserted 474 rows of data Inserted 474 rows of data Inserted 474 rows of data Inserted 474 rows of data Inserted 474 rows of data Inserted 474 rows of data Inserted 474 rows of data Inserted 474 rows of data Inserted 474 rows of data Inserted 474 rows of data Inserted 474 rows of data Inserted 474 rows of data Inserted 474 rows of data Inserted 473 rows of data Inserted 473 rows of data Batch insert finishedPerform sanity checkquery(f"SELECT * FROM {table} LIMIT 5", session, print_result=False)Execute query_Give me every user name (first and last) in my music app history who listened to the song 'All Hands Against His Own'_q3 = query(q3_query, session)[Row(firstname='Jacqueline', lastname='Lynch'), Row(firstname='Tegan', lastname='Levine'), Row(firstname='Sara', lastname='Johnson')]Drop tables before closing out session[session.execute(q) for q in [q1_drop_table, q2_drop_table, q3_drop_table]]Close session and cluster connection¶session.shutdown() cluster.shutdown()02: Probabilistic Regression Model with TFPimport matplotlib.pyplot as plt import numpy as np from sklearn.model_selection import train_test_split import tensorflow_probability as tfp import tensorflow as tf from tensorflow.keras.layers import Input, Dense, Concatenate from tensorflow.keras.models import Model from tensorflow.keras.optimizers import Adam %matplotlib inlineMake Dataset Design standard deviation $\sigma$x_1 = np.linspace(1, 8, 100) x_2 = np.repeat(8, 30) x_3 = np.linspace(8, 1, 150) x_4 = np.repeat(1, 60) x_5 = np.linspace(1, 8, 120) x_6 = np.linspace(8, 1, 200) x_7 = np.repeat(1, 50) x = np.concatenate([x_1, x_2, x_3, x_4, x_5, x_6, x_7]) plt.plot(x) plt.xlabel("index") plt.ylabel("$\sigma$") plt.show() x.shapeGenerate samples based on $\sigma$noise = np.random.normal(0, x, size=len(x)) plt.plot(noise) plt.xlabel("indx") plt.ylabel("noise") plt.show() x_sample = np.sort(np.random.uniform(-3, 3, size=len(x))) y_sample = 3.3 * x_sample + 3.0 + noise plt.scatter(x_sample, y_sample) plt.xlabel("x") plt.ylabel("y") plt.show()Split Train / Validation / Test datasetx_train, x_test, y_train, y_test = train_test_split(x_sample, y_sample, test_size=0.2) x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.2) order_idx_train = x_train.argsort(axis=0) x_train = x_train[order_idx_train, np.newaxis] y_train = y_train[order_idx_train, np.newaxis] x_train.shape order_idx_val = x_val.argsort(axis=0) x_val = x_val[order_idx_val, np.newaxis] y_val = y_val[order_idx_val, np.newaxis] x_val.shape order_idx_test = x_test.argsort(axis=0) x_test = x_test[order_idx_test, np.newaxis] y_test = y_test[order_idx_test, np.newaxis] x_test.shapeVisualize Train, Validation, Testplt.scatter(x_train, y_train) plt.scatter(x_val, y_val) plt.scatter(x_test, y_test)Fitting and evaluating a linear regression model with constant variancedef NLL(y, distribution): return -distribution.log_prob(y) def my_dist(params): return tfp.distributions.Normal(loc=params, scale=1) inputs = Input(shape=(1,)) params = Dense(1)(inputs) dist = tfp.layers.DistributionLambda(my_dist)(params) model_sd_1 = Model(inputs=inputs, outputs=dist) model_sd_1.compile(Adam(), loss=NLL) model_sd_1.summary() history = model_sd_1.fit(x_train, y_train, epochs=2000, verbose=0, validation_data=(x_val, y_val))Let's check the training found the correct slope and intercept.model_sd_1.get_weights() plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.legend(['loss', 'val_loss']) plt.ylabel('NLL') plt.xlabel('Epochs') plt.show() x_pred = np.arange(-3, 3, 0.1) x_pred.shape preds = model_sd_1(x_pred).mean() sigma = 1 # predefined plt.figure(figsize=(14, 5)) plt.subplot(1,2,1) plt.scatter(x_train, y_train) plt.plot(x_pred, preds, color="red", linewidth=4) plt.plot(x_pred, preds + 2*sigma, color="orange", linestyle="--", linewidth=4) plt.plot(x_pred, preds - 2*sigma, color="orange", linestyle="--", linewidth=4) plt.xlabel("x") plt.ylabel("y") plt.title("Train Data ($\sigma=1$)") plt.subplot(1,2,2) plt.scatter(x_val, y_val) plt.plot(x_pred, preds, color="red", linewidth=4) plt.plot(x_pred, preds + 2*sigma, color="orange", linestyle="--", linewidth=4) plt.plot(x_pred, preds - 2*sigma, color="orange", linestyle="--", linewidth=4) plt.xlabel("x") plt.ylabel("y") plt.title("Validation Data ($\sigma=1$)") plt.show()Performance (NLL)model_sd_1.evaluate(x_train, y_train) model_sd_1.evaluate(x_val, y_val)4/4 [==============================] - 0s 2ms/step - loss: 8.5542How about using $\sigma=5$?def NLL(y, distribution): return -distribution.log_prob(y) def my_dist(params): return tfp.distributions.Normal(loc=params, scale=5) inputs = Input(shape=(1,)) params = Dense(1)(inputs) dist = tfp.layers.DistributionLambda(my_dist)(params) model_sd_5 = Model(inputs=inputs, outputs=dist) model_sd_5.compile(Adam(), loss=NLL) history = model_sd_5.fit(x_train, y_train, epochs=2000, verbose=0, validation_data=(x_val, y_val)) model_sd_5.get_weights() plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.legend(['loss', 'val_loss']) plt.ylabel('NLL') plt.xlabel('Epochs') plt.show() preds = model_sd_5(x_pred).mean() sigma = 5 # predefined plt.figure(figsize=(14, 5)) plt.subplot(1,2,1) plt.scatter(x_train, y_train) plt.plot(x_pred, preds, color="red", linewidth=4) plt.plot(x_pred, preds + 2*sigma, color="orange", linestyle="--", linewidth=4) plt.plot(x_pred, preds - 2*sigma, color="orange", linestyle="--", linewidth=4) plt.xlabel("x") plt.ylabel("y") plt.title("Train Data ($\sigma=5$)") plt.subplot(1,2,2) plt.scatter(x_val, y_val) plt.plot(x_pred, preds, color="red", linewidth=4) plt.plot(x_pred, preds + 2*sigma, color="orange", linestyle="--", linewidth=4) plt.plot(x_pred, preds - 2*sigma, color="orange", linestyle="--", linewidth=4) plt.xlabel("x") plt.ylabel("y") plt.title("Validation Data ($\sigma=5$)") plt.show() model_sd_5.evaluate(x_train, y_train) model_sd_5.evaluate(x_val, y_val)4/4 [==============================] - 0s 1ms/step - loss: 2.8337Fitting and evaluating a linear regression model with a nonconstant variancedef NLL(y, distribution): return -distribution.log_prob(y) def my_dist(params): return tfp.distributions.Normal(loc=params[:,0:1], scale=1e-3 + tf.math.softplus(0.05 * params[:,1:2])) inputs = Input(shape=(1,)) params = Dense(2)(inputs) dist = tfp.layers.DistributionLambda(my_dist)(params) model_monotonic_sd = Model(inputs=inputs, outputs=dist) model_monotonic_sd.compile(Adam(), loss=NLL) model_monotonic_sd.summary() history = model_monotonic_sd.fit(x_train, y_train, epochs=2000, verbose=0, validation_data=(x_val,y_val)) plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.legend(['loss', 'val_loss']) plt.ylabel('NLL') plt.xlabel('Epochs') plt.show() print(model_monotonic_sd.evaluate(x_train,y_train, verbose=0)) print(model_monotonic_sd.evaluate(x_val,y_val, verbose=0)) preds = model_monotonic_sd(x_pred).mean() sigma = model_monotonic_sd(x_pred).stddev() plt.figure(figsize=(14, 5)) plt.subplot(1,2,1) plt.scatter(x_train, y_train) plt.plot(x_pred, preds, color="red", linewidth=4) plt.plot(x_pred, preds + 2*sigma, color="orange", linestyle="--", linewidth=4) plt.plot(x_pred, preds - 2*sigma, color="orange", linestyle="--", linewidth=4) plt.xlabel("x") plt.ylabel("y") plt.title("Train Data") plt.subplot(1,2,2) plt.scatter(x_val, y_val) plt.plot(x_pred, preds, color="red", linewidth=4) plt.plot(x_pred, preds + 2*sigma, color="orange", linestyle="--", linewidth=4) plt.plot(x_pred, preds - 2*sigma, color="orange", linestyle="--", linewidth=4) plt.xlabel("x") plt.ylabel("y") plt.title("Validation Data") plt.show()Give more flexibility for standard deviationdef NLL(y, distr): return -distr.log_prob(y) def my_dist(params): return tfp.distributions.Normal(loc=params[:,0:1], scale=1e-3 + tf.math.softplus(0.05 * params[:,1:2])) inputs = Input(shape=(1,)) out1 = Dense(1)(inputs) hidden1 = Dense(30,activation="relu")(inputs) hidden2 = Dense(20,activation="relu")(hidden1) hidden3 = Dense(20,activation="relu")(hidden2) out2 = Dense(1)(hidden3) params = Concatenate()([out1,out2]) dist = tfp.layers.DistributionLambda(my_dist)(params) model_flex_sd = Model(inputs=inputs, outputs=dist) model_flex_sd.compile(Adam(learning_rate=0.01), loss=NLL) model_flex_sd.summary() history = model_flex_sd.fit(x_train, y_train, epochs=2000, verbose=0, validation_data=(x_val,y_val)) preds = model_flex_sd(x_pred).mean() sigma = model_flex_sd(x_pred).stddev() plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.legend(['loss', 'val_loss']) plt.ylabel('NLL') plt.xlabel('Epochs') plt.show() print(model_flex_sd.evaluate(x_train,y_train, verbose=0)) print(model_flex_sd.evaluate(x_val,y_val, verbose=0)) preds = model_flex_sd(x_pred).mean() sigma = model_flex_sd(x_pred).stddev() plt.figure(figsize=(14, 5)) plt.subplot(1,2,1) plt.scatter(x_train, y_train) plt.plot(x_pred, preds, color="red", linewidth=4) plt.plot(x_pred, preds + 2*sigma, color="orange", linestyle="--", linewidth=4) plt.plot(x_pred, preds - 2*sigma, color="orange", linestyle="--", linewidth=4) plt.xlabel("x") plt.ylabel("y") plt.title("Train Data") plt.subplot(1,2,2) plt.scatter(x_val, y_val) plt.plot(x_pred, preds, color="red", linewidth=4) plt.plot(x_pred, preds + 2*sigma, color="orange", linestyle="--", linewidth=4) plt.plot(x_pred, preds - 2*sigma, color="orange", linestyle="--", linewidth=4) plt.xlabel("x") plt.ylabel("y") plt.title("Validation Data") plt.show()Test the model on test data **Make sure that training and test data shape is a matrix, not a vector.**model_flex_sd.evaluate(x_test, y_test, verbose=0) plt.figure(figsize=(14, 5)) plt.scatter(x_test, y_test) plt.plot(x_pred, preds, color="red", linewidth=4) plt.plot(x_pred, preds + 2*sigma, color="orange", linestyle="--", linewidth=4) plt.plot(x_pred, preds - 2*sigma, color="orange", linestyle="--", linewidth=4) plt.xlabel("x") plt.ylabel("y") plt.title("Test Data") plt.show() -np.mean(model_flex_sd(x_test).log_prob(y_test)) NLL_test = np.mean(-np.log(1/(np.sqrt(2*np.pi*np.square(model_flex_sd(x_test).stddev()))))+ ((np.square(y_test-model_flex_sd(x_test).mean())/(2*np.square(model_flex_sd(x_test).stddev()))))) print(NLL_test)2.6366794Test it on outside the observed data region (extrapolation)x_extp = np.arange(-10, 10, 0.1) preds_extp = model_flex_sd(x_extp).mean() sigma_extp = model_flex_sd(x_extp).stddev() plt.scatter(x_test, y_test) plt.plot(x_extp, preds_extp, color="red", linewidth=4) plt.plot(x_extp, preds_extp + 2*sigma_extp, color="orange", linestyle="--", linewidth=4) plt.plot(x_extp, preds_extp - 2*sigma_extp, color="orange", linestyle="--", linewidth=4) plt.xlabel("x") plt.ylabel("y") plt.title("Test Data (with extrapolation)")seaborn.jointplot---Seaborn's `jointplot` displays a relationship between 2 variables (bivariate) as well as 1D profiles (univariate) in the margins. This plot is a convenience class that wraps [JointGrid](http://seaborn.pydata.org/generated/seaborn.JointGrid.htmlseaborn.JointGrid).%matplotlib inline import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import numpy as np plt.rcParams['figure.figsize'] = (20.0, 10.0) plt.rcParams['font.family'] = "serif"The multivariate normal distribution is a nice tool to demonstrate this type of plot as it is sampling from a multidimensional Gaussian and there is natural clustering. I'll set the covariance matrix equal to the identity so that the X and Y variables are uncorrelated -- meaning we will just get a blob# Generate some random multivariate data x, y = np.random.RandomState(8).multivariate_normal([0, 0], [(1, 0), (0, 1)], 1000).T df = pd.DataFrame({"x":x,"y":y})Default plotp = sns.jointplot(data=df,x='x', y='y')Currently, `jointplot` wraps `JointGrid` with the following options for `kind`: - scatter - reg - resid - kde - hex Scatter is the default parametersp = sns.jointplot(data=df,x='x', y='y',kind='scatter')'reg' plots a linear regression line. Here the line is close to flat because we chose our variables to be uncorrelatedp = sns.jointplot(data=df,x='x', y='y',kind='reg')'resid' plots the residual of the data to the regression line -- which is not very useful for this specific example because our regression line is almost flat and thus the residual is almost the same as the data.x2, y2 = np.random.RandomState(9).multivariate_normal([0, 0], [(1, 0), (0, 1)], len(x)).T df2 = pd.DataFrame({"x":x,"y":y2}) p = sns.jointplot(data=df,x='x', y='y',kind='resid')`kde` plots a kernel density estimate in the margins and converts the interior into a shaded countour plotp = sns.jointplot(data=df,x='x', y='y',kind='kde')'hex' bins the data into hexagons with histograms in the margins. At this point you probably see the "pre-cooked" nature of `jointplot`. It provides nice defaults, but if you wanted, for example, a KDE on the margin of this hexplot you will need to use `JointGrid`.p = sns.jointplot(data=df,x='x', y='y',kind='hex')`stat_func` can be used to provide a function for computing a summary statistic from the data. The full x, y data vectors are passed in, so the function must provide one value or a tuple from many. As an example, I'll provide `tmin`, which when used in this way will return the smallest value of x that was greater than its corresponding value of y.from scipy.stats import tmin p = sns.jointplot(data=df, x='x', y='y',kind='kde',stat_func=tmin) # tmin is computing roughly the equivalent of the following print(df.loc[df.x>df.y,'x'].min())-1.37265900987Change the colorp = sns.jointplot(data=df, x='x', y='y', kind='kde', color="#99ffff")`ratio` adjusts the relative size of the marginal plots and 2D distributionp = sns.jointplot(data=df, x='x', y='y', kind='kde', ratio=1)Create separation between 2D plot and marginal plots with `space`p = sns.jointplot(data=df, x='x', y='y', kind='kde', space=2)`xlim` and `ylim` can be used to adjust the field of viewp = sns.jointplot(data=df, x='x', y='y', kind='kde', xlim=(-15,15), ylim=(-15,15))Pass additional parameters to the marginal plots with `marginal_kws`. You can pass similar options to `joint_kws` and `annot_kws`p = sns.jointplot(data=df, x='x', y='y', kind='kde', marginal_kws={'lw':5, 'color':'red'})Finalizesns.set(rc={'axes.labelsize':30, 'figure.figsize':(20.0, 10.0), 'xtick.labelsize':25, 'ytick.labelsize':20}) from itertools import chain p = sns.jointplot(data=df, x='x', y='y', kind='kde', xlim=(-3,3), ylim=(-3,3), space=0, stat_func=None, marginal_kws={'lw':3, 'bw':0.2}).set_axis_labels('X','Y') p.ax_marg_x.set_facecolor('#ccffccaa') p.ax_marg_y.set_facecolor('#ccffccaa') for l in chain(p.ax_marg_x.axes.lines,p.ax_marg_y.axes.lines): l.set_linestyle('--') l.set_color('black') plt.text(-1.7,-2.7, "Joint Plot", fontsize = 55, color='Black', fontstyle='italic') fig, ax = plt.subplots(1,1) sns.set(rc={'axes.labelsize':30, 'figure.figsize':(20.0, 10.0), 'xtick.labelsize':25, 'ytick.labelsize':20}) from itertools import chain p = sns.jointplot(data=df, x='x', y='y', kind='kde', xlim=(-3,3), ylim=(-3,3), space=0, stat_func=None, ax=ax, marginal_kws={'lw':3, 'bw':0.2}).set_axis_labels('X','Y') p.ax_marg_x.set_facecolor('#ccffccaa') p.ax_marg_y.set_facecolor('#ccffccaa') for l in chain(p.ax_marg_x.axes.lines,p.ax_marg_y.axes.lines): l.set_linestyle('--') l.set_color('black') plt.text(-1.7,-2.7, "Joint Plot", fontsize = 55, color='Black', fontstyle='italic') # p = sns.jointplot(data=df, # x='x', # y='y', # kind='kde', # xlim=(-3,3), # ylim=(-3,3), # space=0, # stat_func=None, # ax=ax[1], # marginal_kws={'lw':3, # 'bw':0.2}).set_axis_labels('X','Y') # p.ax_marg_x.set_facecolor('#ccffccaa') # p.ax_marg_y.set_facecolor('#ccffccaa') # for l in chain(p.ax_marg_x.axes.lines,p.ax_marg_y.axes.lines): # l.set_linestyle('--') # l.set_color('black') p.savefig('../../figures/jointplot.png')接口:从协议到抽象基类Python中仅实现部分接口通常是可以接收的,例如若一个类实现了\_\_getitem\_\_()方法,则该类的实例是可迭代的对象,而并不一定需要实现\_\_iter\_\_以及\_\_contains\_\_方法。Python的抽象基类则不同于上述灵活的应用,抽象基类有严格规定以及类型检查 Python中的接口和协议“我们把协议定义为非正式的接口,是让Python这种动态类型语言实现多态的方式”对于Python来说,只要在自定义类中实现特定方法或者继承特定属性就能够实现多态所描述的:一个接口,多种实现。接口:“对象公开方法的子集,该子集让对象在系统中扮演特定的角色”。一个类可能实现多个接口,从而让实例扮演多个角色。协议仅是非正式的接口,Python中的协议并不能像正式接口那样施加限制,同时Python允许一个类仅实现部分接口 —— KISS原则 Python对序列的偏爱正常来说,为了实现类实例中元素的迭代,需要实现\_\_iter\_\_方法;而为了支持“in”关键字,则需要实现\_\_contains\_\_方法。但是在仅实现\_\_getitem\_\_方法的情况下,Python也能正常实现元素的迭代和对“in”关键字的支持。并且由于Python本身的特性,这一自定义类也无需继承序列抽象基类abs.Sequence。这些特性正好体现了Python对序列类型(或者说对迭代这一操作)的偏爱,即Python解释器会尝试调用多种方法,以支持迭代操作。 Python神奇的动态性Python的动态性体现在方方面面,Python不仅允许在程序运行过程中为一个对象创建新属性,或者改变一个类的类属性,甚至允许为类的行为“打补丁” —— 动态实现类的方法以支持某种协议、接口或者应用要求。下述是一个例子下述的TestSeq类实现了\_\_getitem\_\_以及\_\_len\_\_方法,以支持最基础的序列操作。若想实现元素的打乱,即支持random.shuffle,还需要实现\_\_setitem\_\_方法,即支持可变的序列协议。下述例子中\_\_setitem\_\_方法是在程序运行过程中实现的。这一处理方法也被称为猴子补丁(Monkey Patch)。这一例子也反映了协议的动态性,在这个例子种shuffle不关心TestSeq如何实现的\_\_setitem\_\_方法,也不关心传入的参数类型,只要TestSeq实现了shuffle接口要求的方法即可。import random random.seed(1024) class TestSeq: def __init__(self, component): self._component = component def __getitem__(self, position): return self._component[position] def __len__(self): return len(self._component) def set_item(obj, position, value): obj._component[position] = value test_list = [1, 2, 3, 4, 5] test_seq = TestSeq(test_list) print(test_seq[:5]) # 动态注册__setitem__方法 TestSeq.__setitem__ = set_item random.shuffle(test_seq) print(test_seq[:5])[1, 2, 3, 4, 5] [5, 3, 2, 4, 1]“表型”和“支序”鸭子类型实际上是反映了“表型”的观点,即从行为上判断多个实例是不是同一个类的实例。基于这一观点,在实际应用过程中,某一特定对象是哪个类的实例并不是非常重要的问题,更重要的是这个对象能不能实现特定的行为。在为类添加功能的过程中,有时会导致毫不相关的两个类有相似的方法和接口,但是并不能保证分属于不同类的相同方法有相类似的语义。实际上我们总是期望相同方法具有语义上的相似性。例如对于repr和print,我们期望repr输出有助于程序调试的信息(例如当前对象的类名),print输出用户友好的信息(当前对象存储的信息)。假设在实现一个自定义类时巧合对调了这两个方法的具体实现,毫无疑问,repr和print依然能正常使用,即行为上和内置类完全一致,但是语义上不一致 —— 这会导致使用中的困扰。上述问题实际上反映了仅关注“表型”的缺陷。另一方面,我们可以期待继承自同一抽象基类的多个类的同一方法不仅有相同的接口,更是有相似的语义。因此,对类的继承关系进行检查或许是不错的想法。这一思路反映了“支序”的观点。Python中使用isinstance和issubclass执行这一想法,建议仅对检查是否继承自抽象基类时才使用这一语法。其理由是这样更灵活 —— 若当前测试的类没有继承自抽象基类,有多种方法可以让这个类的实例通过类型测试 定义抽象基类的子类继承自抽象基类的类需要实现特定方法。但是Python在加载时并不会检查这个类是否有实现特定方法,取而代之的是在实例化时进行检查。若没有实现特定方法则会抛出TypeError。下面假设上述定义的TestSeq继承自抽象基类collections.MutableSequence,则有如下实现。from collections import MutableSequence import random random.seed(1024) class TestSeq(MutableSequence): def __init__(self, component): self._component = list(component) def __getitem__(self, position): return self._component[position] def __len__(self): return len(self._component) def __setitem__(self, position, value): """ collections.MutableSequence 要求实现的方法 而TestSeq并需要这个类的功能 """ self._component[position] = value def __delitem__(self, position): """ collections.MutableSequence 要求实现的方法 而TestSeq并需要这个类的功能 """ del self._component[position] def insert(self, position, value): """ collections.MutableSequence 要求实现的方法 而TestSeq并需要这个类的功能 """ self._component.insert(position, value) def set_item(obj, position, value): obj._component[position] = value test_list = [1, 2, 3, 4, 5] test_seq = TestSeq(test_list) print(test_seq[:5]) random.shuffle(test_seq) print(test_seq[:5])[1, 2, 3, 4, 5] [5, 3, 2, 4, 1]标准库中的抽象基类 collections模块中的抽象基类Python的collections模块提供了一些抽象基类。本书提供了一个非常不错的UML图,用以表示该模块中各个抽象基类之间的关系。值得注意的是,本书给出的UML图和[文档](https://docs.python.org/3/library/collections.abc.htmlcollections-abstract-base-classes)中的继承关系有出入,但是大体上是一致的。* Iterable、Container和Sized这三个类提供了非常基础的协议。其中,Iterable提供了\_\_iter\_\_方法以支持迭代,Container提供了\_\_contains\_\_方法以支持in运算符,Sized提供了\_\_len\_\_方法以支持len()方法。* CollectionCollection类在本书中并未提及,但是在文档中有提及,并且起到“承上启下”的作用。具体来说,这个抽象基类继承自Iterable、Container和Sized,即整合了上述基础的三个类。下述各类如有需要实现上述三个协议,实际上都是继承自Collection,而不是直接继承自Iterable、Container和Sized。* Sequence、Mapping和Set这三个类是不可变集合。分别实现了三类常用类型,并且这三类均有可变的子类 —— MutableSequence、MutableMapping以及MutableSet。* MappingView视图类主要是创建一个动态随源数据改变而改变的数据结构。MappingView有三个重要的子类,其分别是ItemsView、KeysView以及ValuesView。映射方法.items()、.keys()以及.values()分别返回这三个类的实例。其中,ItemsView和KeysView还从set类继承了大量方法,ValuesView则仅另外继承自Collection。* Callable和Hashable这两个抽象基类相当的“孤立”,一方面既不继承自其他类,另一方面也很少被其他类继承。Callable类提供了\_\_call\_\_方法,Hashable类提供了\_\_hash\_\_方法。乍一看,这两个方法很常用,但是在使用中通常不会主动声明继承自这两个类以支持相应的方法。只要实现相应的方法,Python就能够将自定义的类识别为对应抽象基类的子类。例如对于Callable,一个自定义类只要实现了\_\_call\_\_方法,就能够通过isinstance的类型检查。这一特点源于\_\_subclasshook\_\_方法,具体细节可以参考[子类检查](https://hg.python.org/cpython/file/3.4/Lib/abc.py1194),\_\_subclasscheck\_\_方法会调用\_\_subclasshook\_\_方法进行子类检查。* IteratorIterator继承自Iterable,在Iterable的基础上,Iterator添加了\_\_next\_\_方法。* 其他除了本书本章介绍的抽象基类外,从文档中可以得知collections模块中还有一些其他抽象基类。例如用于支持序列反转\_\_reversed\_\_方法的Reversible类,用于支持async/await语法的Awaitable、AsyncIterable、AsyncIterator以及AsyncGenerator等抽象基类。class TestCLS: def __len__(self): return 1 def __call__(self): return self.__len__() from collections.abc import Sized, Callable print("TestCLS类是否是Sized的子类?", isinstance(TestCLS(), Sized)) print("TestCLS类是否是Callable的子类?", isinstance(TestCLS(), Callable))TestCLS类是否是Sized的子类? True TestCLS类是否是Callable的子类? Truenumbers中的抽象基类[numbers](https://docs.python.org/3/library/numbers.html)中的抽象基类均是和数字相关的类。相较于collections中的抽象基类,numbers中的抽象基类层次关系更为明显,因而被称为数字塔(The numeric tower):* Number* Complex* Real* Rational* Integral上述结构层次比较明显,其中唯一有些疑惑的是Real和Integral中间的Rational(对于精度有限的计算机来说,真的有必要区分有理数和无理数吗)。在Real的基础上,Rational类进一步添加了numerator和denominator属性用于构造分式。 自定义抽象基类本书自定义了一个实现如下功能的抽象基类:随机从有限集合中挑选元素并且选出的物体没有重复,直到所有的元素被选完。本书将这一自定义抽象基类命名为Tombola,并且具有两个抽象方法以及两个具体方法:* .load():把元素放入容器* .pick():从容器中随机拿出一个元素并返回该元素* .loaded():如果容器中至少有一个元素,则返回True* .inspect():返回一个有序容器,由容器中的现有元素构成,不会修改容器的内容(对于本方法,在实际应用中,对于同一个容器,希望每次调用这个方法会返回相同的序列)这一抽象基类定义如下。下述定义中,loaded方法依赖于inspect方法,而inspect方法则依赖于抽象方法load和pick。对于这一抽象基类的子类,load和pick方法必然会实现,因此inspect方法必然能够正常工作,否则用户应当检查load方法和pick方法的实现。不同版本的Python使用不同的方法建立自定义的抽象基类。* python 3.4及以后:继承自abc.ABC -> class Tombola(abc.ABC)* python 3.4之前:class Tombola(metaclass=abc.ABCMeta)* python 2:\_\_metaclass\_\_ = abs.ABCMetaimport abc class Tombola(abc.ABC): @abc.abstractmethod def load(self, iterable): """ 从可迭代对象中添加元素 """ @abc.abstractmethod def pick(self): """ 随机选择元素并返回 """ def loaded(self): """ 委托给inspect返回的元组 """ return bool(self.inspect()) def inspect(self): """ 返回一个由当前元素构成的有序元组 """ items = [] while True: try: items.append(self.pick()) except LookupError: break self.load(items) # sorted 保证每次返回的序列相同 return tuple(sorted(items))自定义抽象基类的子类Tombola要求其子类实现load方法以及pick方法。本章实现了Tombola类的两个子类,分别为BingoCage和LotteryBlower。BingoCage实现了load和pick,此外还添加了\_\_init\_\_和\_\_call\_\_。LotteryBlower实现了load和pick,并根据实际情况覆写了loaded和inspect(具体来说,将相关方法的具体实现委托给list)。除了上述两个子类外,本章还定义了一个Tombola的虚拟子类 —— TomboList。TomboList不会从Tombola继承任何方法或属性,但是能够通过issubclass以及isinstance的类型检查。Python不会检查TomboList是否符合Tombola抽象基类的接口,但是实际上要求TomboList符合Tombola的接口,否则在实际应用中会报错。import random class BingoCage(Tombola): def __init__(self, items): self._randomizer = random.SystemRandom() self._items = list() # 将初始加载委托给load self.load(items) def load(self, items): self._items.extend(items) self._randomizer.shuffle(self._items) def pick(self): try: return self._items.pop() except IndexError: raise LookupError("pick from empty BingoCage") def __call__(self): self.pick() class LotteryBlower(Tombola): def __init__(self, iterable): self._balls = list(iterable) def load(self, iterable): self._balls.extend(iterable) def pick(self): try: position = random.randrange(len(self._balls)) except ValueError: raise LookupError("pick from empty LotteryBlower") return self._balls.pop(position) def loaded(self): # 委托给list return bool(self._balls) def inspect(self): # 委托给list return tuple(sorted(self._balls)) @Tombola.register class TombolaList(list): def pick(self): if self: position = random.randrange(len(self)) return self.pop(position) else: raise LookupError("pop from empty TomboList") load = list.extend def loaded(self): return bool(self) def inspect(self): return tuple(sorted(self))Importsfrom pyspark.sql import SparkSession import pyspark.sql.functions as F from pathlib import Path from pyspark.ml.feature import OneHotEncoder, StringIndexer, VectorAssembler from pyspark.ml import Pipeline from pyspark.ml.clustering import KMeans from pyspark.ml.evaluation import ClusteringEvaluator DATA_PATH = Path('data/') !ls {DATA_PATH} spark = (SparkSession .builder .appName('Structured Streaming') .getOrCreate()) sparkStructured Streamingstatic_df = (spark .read .format('csv') .option('inferschema', 'true') .option('header', 'true') .load(str(DATA_PATH / 'retail-data/by-day/*.csv'))) static_schema = static_df.schema static_schema static_df.createOrReplaceTempView('retail_data') static_df.printSchema() static_df.selectExpr('CustomerID', '(Quantity * UnitPrice) as total_cost', 'InvoiceDate').show() (static_df .selectExpr('CustomerID', '(Quantity * UnitPrice) as total_cost', 'InvoiceDate') .groupBy('CustomerID', F.window(F.col('InvoiceDate'), '1 day')) .sum('total_cost') .show(5)) streaming_df = (spark .readStream.format('csv') .schema(static_schema) .option('maxFilesPerTrigger', 1) .option('header', 'true') .load(str(DATA_PATH / 'retail-data/by-day/*.csv'))) streaming_df.isStreaming purchase_by_customer_by_hour = (streaming_df .selectExpr('CustomerID', '(Quantity * UnitPrice) as total_cost', 'InvoiceDate') .groupBy('CustomerID', F.window(F.col('InvoiceDate'), '1 day')) .sum('total_cost')) (purchase_by_customer_by_hour .writeStream .format('memory') .queryName('customer_purchases') .outputMode('complete') .start()) spark.sql("SELECT * FROM customer_purchases ORDER BY 3 DESC").show(5)+----------+--------------------+------------------+ |CustomerID| window| sum(total_cost)| +----------+--------------------+------------------+ | 12415.0|[2011-03-02 18:00...| 16558.14| | 15769.0|[2011-03-16 19:00...| 10065.0| | null|[2011-03-16 19:00...| 7876.000000000018| | 12435.0|[2011-03-16 19:00...|3978.9899999999993| | null|[2011-03-02 18:00...| 3538.750000000001| +----------+--------------------+------------------+ only showing top 5 rowsMachine Learningstatic_df.printSchema() prep_df = (static_df .na.fill(0) .withColumn('day_of_week', F.date_format(F.col('InvoiceDate'), 'EEEE')) .coalesce(5)) prep_df.explain() prep_df.show() train_df = prep_df.where("InvoiceDate < '2011-07-01'") test_df = prep_df.where("InvoiceDate >= '2011-07-01'") train_df.count(), test_df.count() indexer = (StringIndexer() .setInputCol('day_of_week') .setOutputCol('day_of_week_index')) encoder = (OneHotEncoder() .setInputCol('day_of_week_index') .setOutputCol('day_of_week_encoded')) vector_assembler = (VectorAssembler() .setInputCols(['UnitPrice', 'Quantity', 'day_of_week_encoded']) .setOutputCol('features')) tfms_pipeline = (Pipeline() .setStages([indexer, encoder, vector_assembler])) fitted_pipeline = tfms_pipeline.fit(train_df) transformed_train_df = fitted_pipeline.transform(train_df) transformed_test_df = fitted_pipeline.transform(test_df) transformed_train_df.cache() transformed_train_df.show() kmeans = KMeans().setK(20).setSeed(1) kmeans_model = kmeans.fit(transformed_train_df) transformed_train_df.take(1) ClusteringEvaluator().evaluate(kmeans_model.transform(transformed_train_df)) ClusteringEvaluator().evaluate(kmeans_model.transform(transformed_test_df)) kmeans_model.clusterCenters() train_df.groupBy('day_of_week').count().show() kmeans_model.transform(transformed_train_df).show(3)+---------+---------+--------------------+--------+-------------------+---------+----------+--------------+-----------+-----------------+-------------------+--------------------+----------+ |InvoiceNo|StockCode| Description|Quantity| InvoiceDate|UnitPrice|CustomerID| Country|day_of_week|day_of_week_index|day_of_week_encoded| features|prediction| +---------+---------+--------------------+--------+-------------------+---------+----------+--------------+-----------+-----------------+-------------------+--------------------+----------+ | 537226| 22811|SET OF 6 T-LIGHTS...| 6|2010-12-06 08:34:00| 2.95| 15987.0|United Kingdom| Monday| 2.0| (5,[2],[1.0])|(7,[0,1,4],[2.95,...| 0| | 537226| 21713|CITRONELLA CANDLE...| 8|2010-12-06 08:34:00| 2.1| 15987.0|United Kingdom| Monday| 2.0| (5,[2],[1.0])|(7,[0,1,4],[2.1,8...| 0| | 537226| 22927|GREEN GIANT GARDE...| 2[...]Get a prediction trajectory, and make a movie!gaussian_trajs_concat = np.load('../bayes_implicit_solvent/rjmc_experiments/elaborate_tree_rjmc_march31_run_n_compounds=315_n_iter=100000_gaussian_ll_small_proposals.npz')['within_model_trajs'] student_t_trajs_concat = np.load('../bayes_implicit_solvent/rjmc_experiments/elaborate_tree_rjmc_march31_run_n_compounds=315_n_iter=100000_student-t_ll_small_proposals.npz')['within_model_trajs'] len(set([tuple(t) for t in gaussian_trajs_concat])) len(gaussian_trajs_concat) /4000 gaussian_trajs = [gaussian_trajs_concat[26*i:26*(i+1)] for i in range(len(gaussian_trees))] student_t_trajs = [student_t_trajs_concat[26*i:26*(i+1)] for i in range(len(student_t_trees))] gaussian_thetas = [traj[-1] for traj in gaussian_trajs] student_t_thetas = [traj[-1] for traj in student_t_trajs] set([len(t) for t in gaussian_trajs]), set([len(t) for t in student_t_trajs]) plt.plot([len(t) for t in gaussian_trajs_concat]) N_s_continuous = np.array([int(len(t)/2) for t in gaussian_trajs_concat]) len(N_s_continuous) 26 * (len(gaussian_trees)) plt.plot([len(t) for t in gaussian_trajs]) len(N_s_continuous) len(N_s_continuous) okay_inds = [] problem_inds = [] for i in tqdm(range(len(gaussian_trees))): n_continuous = N_s_continuous[i * 26] n_discrete = len(gaussian_trees[i].ordered_nodes) if n_continuous == n_discrete: okay_inds.append(i) else: problem_inds.append(i) len(problem_inds) preds = get_predictions( student_t_thetas[-1], t_typings[tuple(student_t_trees[-1].ordered_nodes)]) def rmse(x, y): return np.sqrt(np.mean((x - y)**2)) rmse(unreduce(preds), expt_means) gaussian_trajs[0][0] gaussian_trees[0] gaussian_prediction_traj = [] for i in tqdm(range(len(gaussian_trees))[::5]): gaussian_prediction_traj.append(get_predictions( gaussian_thetas[i], gaussian_typings[tuple(gaussian_trees[i].ordered_nodes)])) diag = np.linspace(-15,5) plt.scatter(expt_means, gaussian_prediction_traj[-1]) plt.plot(diag, diag, c='grey') unreduced_gaussian_prediction_traj = unreduce(np.array(gaussian_prediction_traj)) diag = np.linspace(-15,5) plt.scatter(expt_means, unreduced_gaussian_prediction_traj[-1]) plt.plot(diag, diag, c='grey') len(set([tuple(p) for p in unreduced_gaussian_prediction_traj])) len(set([tuple(t.get_radii()) for t in gaussian_trees])) np.save('march30_gaussian_ll_rjmc_prediction_traj.npy', unreduced_gaussian_prediction_traj) plt.plot([rmse(p, expt_means) for p in gaussian_prediction_traj]) plt.plot([rmse(p, expt_means) for p in unreduced_gaussian_prediction_traj]) plt.plot([rmse(p, expt_means) for p in unreduced_gaussian_prediction_traj[10:]]) rmse(unreduced_gaussian_prediction_traj[-1], expt_means) rmse(gaussian_prediction_traj[-1], expt_means) train_inds = np.array([ 1, 2, 3, 5, 6, 8, 10, 13, 16, 17, 18, 20, 21, 22, 23, 24, 28, 32, 33, 36, 37, 39, 41, 43, 44, 46, 48, 49, 51, 52, 54, 56, 57, 62, 64, 66, 68, 70, 73, 79, 82, 83, 84, 85, 92, 94, 97, 100, 101, 104, 105, 107, 109, 110, 111, 115, 117, 118, 119, 120, 121, 122, 123, 126, 129, 131, 132, 138, 139, 142, 143, 144, 145, 146, 151, 152, 154, 156, 157, 159, 162, 163, 164, 166, 172, 173, 175, 176, 177, 183, 184, 187, 188, 190, 195, 196, 199, 200, 201, 204, 205, 208, 214, 215, 216, 223, 226, 227, 229, 230, 232, 233, 238, 239, 240, 243, 244, 245, 246, 254, 255, 262, 263, 264, 266, 267, 270, 271, 273, 274, 275, 277, 278, 279, 289, 290, 291, 292, 295, 296, 298, 299, 301, 302, 303, 304, 305, 306, 307, 308, 311, 312, 319, 320, 321, 322, 324, 326, 328, 329, 333, 336, 337, 340, 341, 342, 345, 346, 347, 351, 353, 354, 357, 359, 360, 363, 366, 367, 369, 371, 374, 376, 379, 380, 382, 386, 391, 392, 393, 394, 395, 396, 397, 399, 400, 401, 402, 404, 408, 411, 412, 413, 414, 415, 417, 419, 420, 421, 428, 429, 430, 431, 432, 433, 434, 435, 436, 440, 441, 442, 443, 445, 447, 448, 452, 453, 455, 457, 458, 459, 461, 462, 463, 464, 466, 468, 469, 470, 471, 472, 474, 482, 483, 484, 485, 486, 487, 490, 491, 494, 495, 496, 499, 500, 502, 503, 509, 511, 516, 517, 519, 523, 524, 527, 528, 529, 530, 531, 533, 534, 538, 539, 541, 542, 543, 545, 546, 547, 548, 553, 561, 562, 565, 566, 568, 570, 573, 574, 577, 579, 580, 581, 583, 584, 585, 589, 590, 593, 594, 595, 596, 597, 603, 605, 606, 609, 615, 616, 617, 618, 619, 623, 624, 625, 628]) test_inds = np.array([ 0, 4, 7, 9, 11, 12, 14, 15, 19, 25, 26, 27, 29, 30, 31, 34, 35, 38, 40, 42, 45, 47, 50, 53, 55, 58, 59, 60, 61, 63, 65, 67, 69, 71, 72, 74, 75, 76, 77, 78, 80, 81, 86, 87, 88, 89, 90, 91, 93, 95, 96, 98, 99, 102, 103, 106, 108, 112, 113, 114, 116, 124, 125, 127, 128, 130, 133, 134, 135, 136, 137, 140, 141, 147, 148, 149, 150, 153, 155, 158, 160, 161, 165, 167, 168, 169, 170, 171, 174, 178, 179, 180, 181, 182, 185, 186, 189, 191, 192, 193, 194, 197, 198, 202, 203, 206, 207, 209, 210, 211, 212, 213, 217, 218, 219, 220, 221, 222, 224, 225, 228, 231, 234, 235, 236, 237, 241, 242, 247, 248, 249, 250, 251, 252, 253, 256, 257, 258, 259, 260, 261, 265, 268, 269, 272, 276, 280, 281, 282, 283, 284, 285, 286, 287, 288, 293, 294, 297, 300, 309, 310, 313, 314, 315, 316, 317, 318, 323, 325, 327, 330, 331, 332, 334, 335, 338, 339, 343, 344, 348, 349, 350, 352, 355, 356, 358, 361, 362, 364, 365, 368, 370, 372, 373, 375, 377, 378, 381, 383, 384, 385, 387, 388, 389, 390, 398, 403, 405, 406, 407, 409, 410, 416, 418, 422, 423, 424, 425, 426, 427, 437, 438, 439, 444, 446, 449, 450, 451, 454, 456, 460, 465, 467, 473, 475, 476, 477, 478, 479, 480, 481, 488, 489, 492, 493, 497, 498, 501, 504, 505, 506, 507, 508, 510, 512, 513, 514, 515, 518, 520, 521, 522, 525, 526, 532, 535, 536, 537, 540, 544, 549, 550, 551, 552, 554, 555, 556, 557, 558, 559, 560, 563, 564, 567, 569, 571, 572, 575, 576, 578, 582, 586, 587, 588, 591, 592, 598, 599, 600, 601, 602, 604, 607, 608, 610, 611, 612, 613, 614, 620, 621, 622, 626, 627, 629, 630]) def train_rmse(preds): return rmse(preds[train_inds], expt_means[train_inds]) def test_rmse(preds): return rmse(preds[test_inds], expt_means[test_inds]) gaussian_train_rmse_traj = np.array(list(map(train_rmse, unreduced_gaussian_prediction_traj))) gaussian_test_rmse_traj = np.array(list(map(test_rmse, unreduced_gaussian_prediction_traj))) plt.plot(gaussian_train_rmse_traj) plt.plot(gaussian_test_rmse_traj) plt.plot(gaussian_train_rmse_traj[5:]) plt.plot(gaussian_test_rmse_traj[5:]) gaussian_trees[-1].get_radii() len(set(gaussian_test_rmse_traj))Make a movie of the atom-typing scheme!list(zip(discrete_trees[0], np.bincount(np.hstack(list(gaussian_typings.values())[0])))) student_t_trees[-1] def expt_unc_contained(preds, i, desired_coverage=0.95): alpha = 100 * ((1 - desired_coverage) / 2) upper, lower = norm.cdf(np.percentile(preds, q=[100 - alpha, alpha]), loc=expt_means[i], scale=expt_uncs[i]) return upper - lower from scipy.stats import norm from bayes_implicit_solvent.utils import remove_top_right_spines desired_coverages = np.linspace(0,1) actual_coverages = np.vstack([np.array([expt_unc_contained(unreduced_gaussian_prediction_traj[:,i], i, desired_coverage=p) for i in range(len(mols))]) for p in desired_coverages]) ax = plt.subplot(1,1,1) remove_top_right_spines(ax) plt.plot(desired_coverages, desired_coverages, c='grey', linestyle='--') plt.plot(desired_coverages, np.median(actual_coverages, 1), label='gaussian') plt.fill_between(desired_coverages, np.median(actual_coverages, 1), alpha=0.25) np.trapz(np.mean(actual_coverages, 1), desired_coverages) student_t_prediction_traj = [] for i in tqdm(range(len(student_t_trees))[::10]): student_t_prediction_traj.append(get_predictions( student_t_thetas[i], t_typings[tuple(student_t_trees[i].ordered_nodes)])) student_t_prediction_traj = np.array(student_t_prediction_traj) unreduced_student_t_prediction_traj = unreduce(np.array(student_t_prediction_traj)) t_actual_coverages = np.vstack([np.array([expt_unc_contained(unreduced_student_t_prediction_traj[:,i], i, desired_coverage=p) for i in range(len(mols))]) for p in desired_coverages]) ax = plt.subplot(1,1,1) remove_top_right_spines(ax) plt.plot(desired_coverages, desired_coverages, c='grey', linestyle='--') #for a in actual_coverages.T[10:]: # plt.plot(desired_coverages, a, c='lightblue', alpha=0.5) #plt.plot(desired_coverages, np.mean(actual_coverages[:,10:], 1), label='gaussian') #plt.fill_between(desired_coverages, np.mean(actual_coverages[:,10:], 1), alpha=0.25) plt.plot(desired_coverages, np.mean(t_actual_coverages, 1), label='student-t') plt.fill_between(desired_coverages, np.mean(t_actual_coverages, 1), alpha=0.25) plt.legend() plt.xlabel('desired coverage probability') plt.ylabel('actual coverage probability') plt.xlim(0,1) plt.ylim(0,1) ax = plt.subplot(1,1,1) remove_top_right_spines(ax) plt.plot(desired_coverages, desired_coverages, c='grey', linestyle='--') plt.plot(desired_coverages, np.mean(actual_coverages, 1), label='gaussian') plt.fill_between(desired_coverages, np.mean(actual_coverages, 1), alpha=0.25) plt.plot(desired_coverages, np.mean(t_actual_coverages, 1), label='student-t') plt.fill_between(desired_coverages, np.mean(t_actual_coverages, 1), alpha=0.25) plt.legend() plt.xlabel('desired coverage probability') plt.ylabel('actual coverage probability') plt.xlim(0,1) plt.ylim(0,1) np.trapz(np.mean(t_actual_coverages, 1), desired_coverages) gaussian_train_rmse_traj = np.array(list(map(train_rmse, unreduced_gaussian_prediction_traj))) gaussian_test_rmse_traj = np.array(list(map(test_rmse, unreduced_gaussian_prediction_traj))) t_train_rmse_traj = np.array(list(map(train_rmse, unreduced_student_t_prediction_traj))) t_test_rmse_traj = np.array(list(map(test_rmse, unreduced_student_t_prediction_traj))) plt.plot(gaussian_train_rmse_traj) plt.plot(gaussian_test_rmse_traj) plt.figure() plt.plot(t_train_rmse_traj) plt.plot(t_test_rmse_traj) plt.plot(t_train_rmse_traj[1:]) #plt.plot(t_test_rmse_traj[1:]) t_test_rmse_traj t_test_rmse_traj[-1] train_rmse(np.mean(unreduced_student_t_prediction_traj[10:], 0)) train_rmse(np.mean(unreduced_gaussian_prediction_traj[10:], 0)) test_rmse(np.mean(unreduced_student_t_prediction_traj[10:], 0)) np.min(t_test_rmse_traj) test_rmse(np.mean(unreduced_gaussian_prediction_traj[10:], 0)) # oh, wait, each one of the predictions has an uncertainty I've neglected! @jit def compute_exp_uncertainty(w_F): x = np.exp(w_F - np.max(w_F)) Ex = np.mean(x) dx = np.std(x) / np.sqrt(len(x)) return dx / Ex @jit def predict_solvation_free_energy_jax_w_unc(theta, distance_matrices, charges, element_ind_array): N = int(len(theta) / 2) radii_, scaling_factors_ = theta[:N], theta[N:] radii = radii_[element_ind_array] scaling_factors = scaling_factors_[element_ind_array] @jit def compute_component(distance_matrix): return compute_OBC_energy_vectorized(distance_matrix, radii, scaling_factors, charges) W_F = vmap(compute_component)(distance_matrices) w_F = W_F * kj_mol_to_kT unc = compute_exp_uncertainty(w_F) return one_sided_exp(w_F), unc import jax.numpy as np def get_predictions_and_uncs(theta, typings): ## TypeError: 'FilledConstant' object does not support item assignment #preds = np.zeros(len(typings)) #uncs = np.zeros(len(typings)) #for i in range(len(charges)): # preds[i], uncs[i] = predict_solvation_free_energy_jax_w_unc(theta, distance_matrices[i], charges[i], typings[i]) # OKAY, fine a list comprehension... preds_and_uncs = [predict_solvation_free_energy_jax_w_unc(theta, distance_matrices[i], charges[i], typings[i]) for i in range(len(typings))] preds = np.array([p for (p,u) in preds_and_uncs]) uncs = np.array([u for (p,u) in preds_and_uncs]) return preds, uncs student_t_prediction_traj = [] student_t_unc_traj = [] for i in tqdm(range(len(student_t_trees))[::5]): preds, uncs = get_predictions_and_uncs( student_t_thetas[i], t_typings[tuple(student_t_trees[i].ordered_nodes)]) student_t_prediction_traj.append(preds) student_t_unc_traj.append(uncs) student_t_prediction_traj = np.array(student_t_prediction_traj) student_t_unc_traj = np.array(student_t_unc_traj) student_t_unc_traj.flatten().max() plt.hist(student_t_unc_traj.flatten(), bins=50); preds, uncs = student_t_prediction_traj[:,0], student_t_unc_traj[:,0] x_ = np.linspace(-15,5,1000) plt.hist(preds, density=True); def uncertainty_density(x, preds, uncs): return sum([norm.pdf(x, loc=mu, scale=sigma) for (mu, sigma) in zip(preds, uncs)]) / len(preds) y_ = uncertainty_density(x_, preds, uncs) plt.plot(x_, y_) plt.xlim(-12,-4) plt.hist(preds, density=True); plt.plot(x_, norm.pdf(x_, expt_means[0], expt_uncs[0])) from numpy import trapz trapz(y_, x_) plt.plot(x_, norm.pdf(x_, preds[0], uncs[0])) plt.plot(x_, norm.pdf(x_, expt_means[0], expt_uncs[0])) i = train_inds[10] preds, uncs = student_t_prediction_traj[:,i], student_t_unc_traj[:,i] plt.plot(x_, norm.pdf(x_, expt_means[i], expt_uncs[i])) plt.plot(x_, norm.pdf(x_, preds[0], uncs[0])) plt.xlim(-8,-1) plt.plot(x_, norm.pdf(x_, expt_means[i], expt_uncs[i])) y_ = uncertainty_density(x_, preds, uncs) plt.plot(x_, y_) plt.xlim(-8,-1) diag = np.linspace(-15,5) plt.scatter(expt_means, unreduced_student_t_prediction_traj[-1]) plt.plot(diag, diag, c='grey') diag = np.linspace(-15,5) plt.scatter(expt_means[train_inds], unreduced_student_t_prediction_traj[-1][train_inds]) plt.plot(diag, diag, c='grey') diag = np.linspace(-15,5) plt.scatter(expt_means[test_inds], unreduced_student_t_prediction_traj[-1][test_inds]) plt.plot(diag, diag, c='grey') averaged_prediction = np.mean(unreduced_student_t_prediction_traj, 0) median_prediction = np.median(unreduced_student_t_prediction_traj, 0) diag = np.linspace(-15,5) plt.scatter(averaged_prediction[test_inds], expt_means[test_inds]) plt.plot(diag, diag, c='grey') diag = np.linspace(-15,5) plt.scatter(median_prediction[test_inds], expt_means[test_inds]) plt.plot(diag, diag, c='grey') averaged_prediction = np.mean(unreduced_student_t_prediction_traj, 0) median_prediction = np.median(unreduced_student_t_prediction_traj, 0) rmse(median_prediction[test_inds], expt_means[test_inds]), rmse(averaged_prediction[test_inds], expt_means[test_inds]) averaged_prediction = np.mean(unreduced_gaussian_prediction_traj, 0) median_prediction = np.median(unreduced_gaussian_prediction_traj, 0) rmse(median_prediction[test_inds], expt_means[test_inds]), rmse(averaged_prediction[test_inds], expt_means[test_inds]) burn_in = 10 running_median_prediction = np.array([np.median(unreduced_student_t_prediction_traj[burn_in:t], 0) for t in range(burn_in, len(unreduced_student_t_prediction_traj))]) t_train_rmse_traj_med = np.array(list(map(train_rmse, running_median_prediction))) t_test_rmse_traj_med = np.array(list(map(test_rmse, running_median_prediction))) plt.title('instantaneous predictions') plt.plot(t_train_rmse_traj[burn_in:], label='train') plt.plot(t_test_rmse_traj[burn_in:], label='test') plt.legend() plt.title('running median predictions') plt.plot(t_train_rmse_traj_med, label='train') plt.plot(t_test_rmse_traj_med, label='test') plt.legend() Deltas = np.zeros((len(test_inds), len(test_inds))) for i in range(len(test_inds)): for j in range(len(test_inds)): Deltas[i,j] = expt_means[test_inds[i]] - expt_means[test_inds[j]] flat_Deltas = [] for i in range(len(test_inds)): for j in range(i+1, len(test_inds)): flat_Deltas.append(expt_means[test_inds[i]] - expt_means[test_inds[j]]) flat_Deltas = np.array(flat_Deltas) preds = running_median_prediction[-1] predicted_Deltas = np.zeros((len(test_inds), len(test_inds))) for i in range(len(test_inds)): for j in range(len(test_inds)): predicted_Deltas[i,j] = preds[test_inds[i]] - preds[test_inds[j]] flat_predicted_Deltas = [] for i in range(len(test_inds)): for j in range(i+1, len(test_inds)): flat_predicted_Deltas.append(preds[test_inds[i]] - preds[test_inds[j]]) flat_predicted_Deltas = np.array(flat_predicted_Deltas) np.mean(np.sign(flat_Deltas) == np.sign(flat_predicted_Deltas)) disagreements = np.sign(flat_Deltas) != np.sign(flat_predicted_Deltas) np.mean(disagreements) plt.scatter(flat_Deltas[disagreements], flat_predicted_Deltas[disagreements], s=1) plt.hexbin(flat_Deltas[disagreements], flat_predicted_Deltas[disagreements], bins='log', cmap='Blues')Instead of comparing the means here, should be doing a statistical test or something! Need to compare with predictions from original OBC2 model! Should move these analyses into a submodule, maybe performance_metrics or similar...np.mean((Deltas - predicted_Deltas)**2) plt.scatter(Deltas.flatten(), predicted_Deltas.flatten(), s=1) plt.hexbin(flat_Deltas, predicted_Deltas, bins='log', cmap='Blues') np.argmax(Deltas - predicted_Deltas) mbondi2_preds, mbondi2_uncs = unreduce(np.load('mbondi2_preds.npy')).T rmse(mbondi2_preds, expt_means) from bayes_implicit_solvent.gb_models.obc2_parameters import mbondi2_model mbondi2_types = mbondi2_model.apply_to_molecule_list(oemols) mbondi2_radii = mbondi2_model.get_radii() mbondi2_scales = mbondi2_model.get_scale_factors() mbondi2_theta = np.hstack([mbondi2_radii, mbondi2_scales]) reduced_mbondi2_preds, reduced_mbondi2_uncs = get_predictions_and_uncs(mbondi2_theta, mbondi2_types) mbondi2_preds = unreduce(reduced_mbondi2_preds) mbondi2_uncs = unreduce(reduced_mbondi2_uncs) rmse(mbondi2_preds, expt_means) OBC2_Deltas = np.zeros((len(test_inds), len(test_inds))) for i in range(len(test_inds)): for j in range(len(test_inds)): OBC2_Deltas[i,j] = mbondi2_preds[test_inds[i]] - mbondi2_preds[test_inds[j]] flat_OBC2_Deltas = [] for i in range(len(test_inds)): for j in range(i+1, len(test_inds)): flat_OBC2_Deltas.append(mbondi2_preds[test_inds[i]] - mbondi2_preds[test_inds[j]]) flat_OBC2_Deltas = np.array(flat_OBC2_Deltas) plt.hexbin(flat_Deltas, flat_OBC2_Deltas, bins='log', cmap='Blues') np.median((flat_Deltas - flat_OBC2_Deltas)**2) np.median((flat_Deltas - flat_predicted_Deltas)**2) disagreements = np.sign(flat_Deltas) != np.sign(flat_OBC2_Deltas) 1 - np.mean(disagreements) plt.hexbin(flat_OBC2_Deltas[disagreements],flat_Deltas[disagreements], bins='log', cmap='Blues') plt.xlabel('OBC2 prediction') plt.ylabel('experimental value') np.max(flat_OBC2_Deltas[disagreements]-flat_Deltas[disagreements]) np.min(flat_OBC2_Deltas[disagreements]-flat_Deltas[disagreements]) plt.hist(flat_OBC2_Deltas[disagreements], bins=50); plt.hist(flat_Deltas[disagreements], bins=50); # for each one of these pairs, can I do a substructure analysis to see how similar they are? # then I could pick some extreme examples on both ends, and some moderate examples: # * a very similar-looking pair with a very big difference in hydration free energy # * a very different-looking pair with a very small difference in hydration free energy # * less extreme examples... from openeye import oechem mcss.SetMCSFunc(oechem.OEMCSMaxAtoms()) def size_of_mcs(m1, m2): atomexpr = oechem.OEExprOpts_DefaultAtoms bondexpr = oechem.OEExprOpts_DefaultBonds mcss = oechem.OEMCSSearch(m1, atomexpr, bondexpr, oechem.OEMCSType_Approximate) mcss.SetMCSFunc(oechem.OEMCSMaxAtoms()) match_sizes = [0] for count, match in enumerate(mcss.Match(m2, True)): m = oechem.OEGraphMol() oechem.OESubsetMol(m, match, True) match_sizes.append(m.NumAtoms()) return max(match_sizes) test_mols = [oemols[i] for i in test_inds] mcs_sizes = np.zeros((len(test_mols), len(test_mols))) for i in tqdm(range(len(test_mols))): for j in range(len(test_mols)): mcs_sizes[i,j] = size_of_mcs(test_mols[i], test_mols[j]) oechem.OEMCSType_Approximate sizes = np.array([m.NumAtoms() for m in test_mols]) pair_min_sizes = np.minimum(np.outer(sizes, np.ones(len(sizes))).T, np.outer(sizes, np.ones(len(sizes)))) pair_max_sizes = np.maximum(np.outer(sizes, np.ones(len(sizes))).T, np.outer(sizes, np.ones(len(sizes)))) plt.imshow(mcs_sizes / pair_max_sizes) plt.colorbar() affinities = mcs_sizes / pair_max_sizes from sklearn.cluster.bicluster import SpectralBiclustering bic = SpectralBiclustering(n_clusters=6) bic.fit(affinities) inds = np.argsort(bic.column_labels_) plt.imshow(affinities[inds][:,inds]) plt.colorbar() plt.scatter(affinities.flatten(), np.abs(Deltas).flatten(), s=1) plt.hexbin(affinities.flatten(), np.abs(Deltas).flatten(),bins='log', cmap='Blues') plt.hexbin(affinities.flatten(), Deltas.flatten(),bins='log', cmap='Blues') plt.xlabel('|MCS(m1,m2)| / max(|m1|, |m2|)') plt.ylabel(r'$\Delta G_{hyd}(m_1) - \Delta G_{hyd}(m_2)$') # find pareto front of MCS size and absolute value of Delta X = np.vstack([affinities.flatten(), np.abs(Deltas.flatten())]).T pareto_front = [0] for i in range(len(X)): pareto_optimal = True for j in pareto_front: if (X[j] > X[i]).all(): pareto_optimal = False break if pareto_optimal: pareto_front.append(i) len(pareto_front) plt.scatter(*X[pareto_front].T) np.max(np.abs(Deltas.flatten())[affinities.flatten() > 0.9]) x_affinities = sorted(list(set(affinities.flatten())))[:-1] y_Deltas = [np.max(np.abs(Deltas.flatten())[affinities.flatten() > thresh]) for thresh in x_affinities] plt.scatter(x_affinities, np.abs(y_Deltas)) # maybe instead of "affinities" say "atoms not in common"... atoms_not_in_common = pair_max_sizes - mcs_sizes np.min(atoms_not_in_common), np.max(atoms_not_in_common) x_atoms_not_in_common = sorted(list(set(atoms_not_in_common.flatten())))[1:] y_Deltas = [np.max(np.abs(Deltas.flatten())[atoms_not_in_common.flatten() < thresh]) for thresh in x_atoms_not_in_common] plt.scatter(x_atoms_not_in_common, np.abs(y_Deltas)) plt.xlabel('atoms not in common') y_Deltas = [np.max(np.abs(OBC2_Deltas.flatten())[atoms_not_in_common.flatten() < thresh]) for thresh in x_atoms_not_in_common] plt.scatter(x_atoms_not_in_common, np.abs(y_Deltas)) y_Deltas = [np.max(np.abs(predicted_Deltas.flatten())[atoms_not_in_common.flatten() < thresh]) for thresh in x_atoms_not_in_common] plt.scatter(x_atoms_not_in_common, np.abs(y_Deltas)) x_atoms_not_in_common = sorted(list(set(atoms_not_in_common.flatten())))[1:] y_Deltas = [np.max(np.abs(Deltas.flatten())[atoms_not_in_common.flatten() < thresh]) for thresh in x_atoms_not_in_common] plt.scatter(x_atoms_not_in_common, np.abs(y_Deltas)) plt.xlabel('atoms not in common') plt.xlim(0,6.5) y_Deltas i_s, j_s = np.where(Deltas == 7.18) expt_means[test_inds][i_s] - expt_means[test_inds][j_s] atoms_not_in_common[i_s, j_s] z = np.argmin(atoms_not_in_common[i_s, j_s]) i,j = i_s[z], j_s[z] smiles_i, smiles_j = mols[test_inds[i]].smiles, mols[test_inds[j]].smiles smiles_i, smiles_j print('ClC1') test_inds[i], test_inds[j] from bayes_implicit_solvent.freesolv import smiles_to_cid smiles_to_cid[smiles_i], smiles_to_cid[smiles_j] x_atoms_not_in_common = sorted(list(set(atoms_not_in_common.flatten())))[:-1] y_Deltas = [np.min(np.abs(Deltas.flatten())[atoms_not_in_common.flatten() > thresh]) for thresh in x_atoms_not_in_common] plt.scatter(x_atoms_not_in_common, np.abs(y_Deltas)) plt.xlabel('atoms not in common') x_atoms_not_in_common X[0] np.vstack([affinities.flatten(), np.abs(Deltas.flatten())]).T.shape plt.hexbin(affinities.flatten(), OBC2_Deltas.flatten(),bins='log', cmap='Blues') plt.hexbin(mcs_sizes.flatten(), Deltas.flatten(),bins='log', cmap='Blues') plt.xlabel('|MCS(m1,m2)|') plt.ylabel(r'$\Delta G_{hyd}(m_1) - \Delta G_{hyd}(m_2)$')Character level name classification.In this notebook we are going to learn how to classify names. We will be following [this](https://pytorch.org/tutorials/intermediate/char_rnn_classification_tutorial.html) to understand how we can perform this task. First we must download the [dataset](https://download.pytorch.org/tutorial/data.zip) which I've already downloaded and uploaded it on my google drive. Specifically, we’ll train on a few thousand surnames from 18 languages of origin, and predict which language a name is from based on the spelling. Importsfrom __future__ import unicode_literals import string, unicodedata, os, time, random, math import torch from torch import nn from torch.nn import functional as F from torch.utils.data import Dataset, DataLoader import matplotlib.pyplot as plt from prettytable import PrettyTable import numpy as np from torchtext.legacy import data, datasets import matplotlib.ticker as ticker torch.__version__SeedsSEED = 42 np.random.seed(SEED) torch.manual_seed(SEED) random.seed(SEED) torch.cuda.manual_seed(SEED) torch.backends.cudnn.deteministic = TrueDevicedevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu') deviceMounting the drivefrom google.colab import drive drive.mount('/content/drive') file_path = '/content/drive/My Drive/NLP Data/seq2seq/data/names' os.path.exists(file_path) all_letters = string.ascii_letters + " .,;'" n_letters = len(all_letters) n_lettersWe need a function that turns ``Unicode`` characters to `ASCII` characters. Basically the function will convert text from this kind of domain `Ślusàrski` to `Slusarski`def unicodeToAscii(s): return ''.join( c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn' and c in all_letters ) print(unicodeToAscii('Ślusàrski'))SlusarskiBuiling the category line dictionary of list of names per language.def readLines(filename): lines = open(filename, encoding="utf-8").read().strip().split('\n') return [unicodeToAscii(line) for line in lines] categories = [] category_names = {} for file in os.listdir(file_path): category = file.split('.')[0] categories.append(category) category_names[category] = readLines(os.path.join(file_path, file)) len(categories) category_names.get("French")[:5]Converting names into tensorsTo represent a single letter we are going to make use of `one_hot_vector` where each letter will be representet with a vector of `[1, n_letters]`. Where the index of the current letter will be the only 1 in the vector and others will be only zeros.def letterToIndex(letter): return all_letters.find(letter) def letterToTensor(letter): return torch.from_numpy( np.eye(n_letters, dtype="float32")[letterToIndex(letter)].reshape(1, -1) ) print(letterToTensor(';')) def lineToTensor(line): tensor = torch.zeros(len(line), 1, n_letters) for i, letter in enumerate(line): tensor[i][0][letterToIndex(letter)] = 1 return tensor print(lineToTensor('Jones').size()) print(lineToTensor('Jones'))torch.Size([5, 1, 57]) tensor([[[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]], [[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]], [[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]], [[0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., [...]Model definition.We are going to create our `RNN` layer or module from scratch. The achitecture looks as follows:![img](https://i.imgur.com/Z2xbySO.png)class RNN(nn.Module): def __init__(self, input_size, hidden_size, output_size): super(RNN, self).__init__() self.hidden_size = hidden_size self.i2h = nn.Linear(input_size + hidden_size, hidden_size) self.i2o = nn.Linear(input_size + hidden_size, output_size) def forward(self, input, hidden): hidden = hidden.to(device) input = input.to(device) combined = torch.cat((input, hidden), 1) hidden = self.i2h(combined) output = self.i2o(combined) return output, hidden def initHidden(self): return torch.rand(size=(1, self.hidden_size)) n_hidden = 128 rnn = RNN(n_letters, n_hidden, len(categories)).to(device)To run a step of this network we need to pass an input (in our case, the Tensor for the current letter) and a previous hidden state (which we initialize as zeros at first). We’ll get back the output (probability of each language) and a next hidden state (which we keep for the next step).input = letterToTensor('A').to(device) hidden = torch.zeros(1, n_hidden).to(device) output, next_hidden = rnn(input, hidden) input = lineToTensor('Albert').to(device) hidden = torch.zeros(1, n_hidden).to(device) output, next_hidden = rnn(input[0], hidden) print(output)tensor([[ 0.0085, 0.0400, 0.0183, 0.0599, 0.0601, 0.0283, -0.1087, 0.0567, -0.0821, -0.0028, -0.0840, -0.0157, -0.0156, 0.0181, -0.0343, 0.0729, -0.0339, -0.0503]], device='cuda:0', grad_fn=)TrainingBefore going into training we should make a few helper functions. The first is to interpret the output of the network, which we know to be a likelihood of each category.def categoryFromOutput(output): top_n, top_i = output.topk(1) category_i = top_i[0].item() return categories[category_i], category_i print(categoryFromOutput(output)) def randomChoice(l): return l[random.randint(0, len(l) - 1)] def randomTrainingExample(): category = randomChoice(categories) line = randomChoice(category_names[category]) category_tensor = torch.tensor([categories.index(category)], dtype=torch.long) line_tensor = lineToTensor(line) return category, line, category_tensor, line_tensor for i in range(10): category, line, category_tensor, line_tensor = randomTrainingExample() print('category =', category, '/ line =', line)category = Czech / line = Till category = Scottish / line = Smith category = French / line = Porcher category = Vietnamese / line = Kim category = Russian / line = Halepsky category = Vietnamese / line = Ngo category = English / line = Howell category = Spanish / line = Oleastro category = Italian / line = Selvaggio category = Scottish / line = ThomsonLosscriterion = nn.NLLLoss().to(device) optimimizer = torch.optim.Adam(rnn.parameters())Training loop* Create input and target tensors* Create a random initial hidden state* Read each letter in and* Keep hidden state for next letter* Compare final output to target* Back-propagate* Return the output and lossdef train(category_tensor, line_tensor, optimimizer, model): model.train() category_tensor = category_tensor.to(device) line_tensor = line_tensor.to(device) hidden = model.initHidden() optimimizer.zero_grad() for i in range(line_tensor.size()[0]): output, hidden = model(line_tensor[i], hidden) loss = criterion(output, category_tensor) optimimizer.step() return output, loss.item() n_iters = 100000 print_every = 5000 plot_every = 1000 current_loss = 0 all_losses = [] def timeSince(since): now = time.time() s = now - since m = math.floor(s / 60) s -= m * 60 return '%dm %ds' % (m, s) start = time.time() for iter in range(1, n_iters + 1): category, line, category_tensor, line_tensor = randomTrainingExample() output, loss = train(category_tensor, line_tensor, optimimizer, rnn) current_loss += loss if iter % print_every == 0: guess, guess_i = categoryFromOutput(output) correct = '✓' if guess == category else '✗ (%s)' % category print('%d %d%% (%s) %.4f %s / %s %s' % (iter, iter / n_iters * 100, timeSince(start), loss, line, guess, correct)) # Add current loss avg to list of losses if iter % plot_every == 0: all_losses.append(current_loss / plot_every) current_loss = 05000 5% (0m 6s) -0.0674 Ruhlyadko / Japanese ✗ (Russian) 10000 10% (0m 13s) 0.1001 Zabek / English ✗ (Polish) 15000 15% (0m 20s) 0.1224 Zientek / English ✗ (Polish) 20000 20% (0m 27s) -0.0163 Kolen / French ✗ (Dutch) 25000 25% (0m 34s) 0.1449 Meisner / Irish ✗ (German) 30000 30% (0m 41s) 0.1584 Ritter / Irish ✗ (German) 35000 35% (0m 47s) 0.0666 Toset / Italian ✗ (Spanish) 40000 40% (0m 54s) -0.0369 Qian / Japanese ✗ (Chinese) 45000 45% (1m 1s) 0.1194 Heidrich / Japanese ✗ (German) 50000 50% (1m 8s) 0.0060 Ron / Japanese ✗ (Korean) 55000 55% (1m 14s) -0.0767 Saigo / Japanese ✓ 60000 60% (1m 21s) -0.0409 Jon / Russian ✗ (Korean) 65000 65% (1m 28s) 0.0967 Krauss / English ✗ (German) 70000 70% (1m 35s) 0.0364 An / Italian ✗ (Vietnamese) 75000 75% (1m 41s) 0.0109 Pantelas / English ✗ (Greek) 80000 80% (1m 48s) -0.1993 Roy / French ✓ 85000 85% (1m 55s) -0.1051 Murkami / English ✗ (Japanese) 90000 90% (2m 2s) -0.0311 Youj / English ✗ (Korean) 95000 95% (2m 8s) -0.0140 Karameros / English ✗ ([...]Plotting resultsimport matplotlib.pyplot as plt import matplotlib.ticker as ticker plt.figure() plt.plot(all_losses)Evaluating the resultsTo see how well the network performs on different categories, we will create a confusion matrix, indicating for every actual language (rows) which language the network guesses (columns). To calculate the confusion matrix a bunch of samples are run through the network with evaluate(), which is the same as train() minus the backprop.# Keep track of correct guesses in a confusion matrix confusion = torch.zeros(len(categories), len(categories)) n_confusion = 10000 # Just return an output given a line def evaluate(line_tensor): hidden = rnn.initHidden() for i in range(line_tensor.size()[0]): output, hidden = rnn(line_tensor[i], hidden) return output # Go through a bunch of examples and record which are correctly guessed for i in range(n_confusion): category, line, category_tensor, line_tensor = randomTrainingExample() output = evaluate(line_tensor) guess, guess_i = categoryFromOutput(output) category_i = categories.index(category) confusion[category_i][guess_i] += 1 # Normalize by dividing every row by its sum for i in range(len(categories)): confusion[i] = confusion[i] / confusion[i].sum() # Set up plot fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(confusion.numpy()) fig.colorbar(cax) # Set up axes ax.set_xticklabels([''] + categories, rotation=90) ax.set_yticklabels([''] + categories) # Force label at every tick ax.xaxis.set_major_locator(ticker.MultipleLocator(1)) ax.yaxis.set_major_locator(ticker.MultipleLocator(1)) # sphinx_gallerUser inputdef predict(input_line, n_predictions=3): print('\n> %s' % input_line) with torch.no_grad(): output = evaluate(lineToTensor(input_line)) # Get top N categories topv, topi = output.topk(n_predictions, 1, True) predictions = [] for i in range(n_predictions): value = topv[0][i].item() category_index = topi[0][i].item() print('(%.2f) %s' % (value, categories[category_index])) predictions.append([value, categories[category_index]]) predict('Dovesky') predict('Jackson') predict('Satoshi')> Dovesky (0.10) Japanese (0.09) Italian (0.09) French > Jackson (0.13) Italian (0.13) Russian (0.07) Japanese > Satoshi (0.17) English (0.10) Japanese (0.08) SpanishOriginal Linkurl = "https://www.amazon.in/Lenovo-IdeaPad-Celeron-Platinum-81WH007KIN/dp/B09MM4FPMR/ref=sr_1_3?crid=30R34DWTFV1LU&keywords=laptops&qid=1640247787&sprefix=lap%2Caps%2C699&sr=8-3"Splitting the URL into Distinctive partsurl_array = url.split("/")Printing the URL partsprint(url_array)['https:', '', 'www.amazon.in', 'Lenovo-IdeaPad-Celeron-Platinum-81WH007KIN', 'dp', 'B09MM4FPMR', 'ref=sr_1_3?crid=30R34DWTFV1LU&keywords=laptops&qid=1640247787&sprefix=lap%2Caps%2C699&sr=8-3']Creating the URL array from the partsurl_1 = 'https://www.amazon.in/'+url_array[3]+'/product-reviews/'+url_array[5]+'/ref=cm_cr_dp_d_show_all_btm?ie=UTF8&reviewerType=all_reviews'Printing the created URLprint(url_1)https://www.amazon.in/Lenovo-IdeaPad-Celeron-Platinum-81WH007KIN/product-reviews/B09MM4FPMR/ref=cm_cr_dp_d_show_all_btm?ie=UTF8&reviewerType=all_reviewsInserting the page numberx=2Created URLurl_2 = 'https://www.amazon.in/'+url_array[3]+'/product-reviews/'+url_array[5]+'/ref=cm_cr_arp_d_paging_btm_next_'+str(x)+'?ie=UTF8&reviewerType=all_reviews&pageNumber='+str(x) #pr print(url_2)https://www.amazon.in/Lenovo-IdeaPad-Celeron-Platinum-81WH007KIN/product-reviews/B09MM4FPMR/ref=cm_cr_arp_d_paging_btm_next_2?ie=UTF8&reviewerType=all_reviews&pageNumber=2!curl -O https://raw.githubusercontent.com/franciscadias/data/master/abcnews-date-text.csv import pandas as pd df_data = pd.read_csv('./abcnews-date-text.csv') df_data.head(3) df_data = df_data.head(10000) head_text = df_data[['headline_text']] type(head_text) import nltk nltk.download('punkt') head_text['title_text'] = head_text.apply(lambda row : nltk.word_tokenize(row['headline_text']), axis=1) # map() head_text.head(3) from nltk.corpus import stopwords nltk.download('stopwords') stop = stopwords.words('english') stop # titles = [] # for x in head_text['title_text']: # for word in x: # if len(word) > 3: # if word not in stop: # titles.apply(word) # return titles # def stopWord(x): # result = [] # for word in x: # if word not in stop or len(word)>3: # return result # def callStopWord(head_text): # titles=[] # for x in head_text['title_text']: # titles.apply(stopWord(x)) # return titles head_text['title'] = head_text['title_text'].apply(lambda x:[word for word in x if (len(word)>3) if (word not in stop)]) head_text.head(5) head_text['title'][3] tokens = [] for i in range(len(head_text)): tokens.append(' '.join(head_text['title'][i])) tokens[3:5] from sklearn.feature_extraction.text import TfidfVectorizer tfidf = TfidfVectorizer(max_features=1000) X = tfidf.fit_transform(tokens) X.shape X[4].toarray() from sklearn.decomposition import LatentDirichletAllocation lda_model = LatentDirichletAllocation() lda_top = lda_model.fit_transform(X) lda_model.components_.shape, lda_model.components_ terms = tfidf.get_feature_names() n = 5 for idx, topic in enumerate(lda_model.components_): print([(terms[i], topic[i]) for i in topic.argsort()[:-n-1:-1]])Language Model from scratchHere we create our own Vocab and iterator without using torchtext or any other library.!wget -q https://github.com/pytorch/examples/raw/master/word_language_model/data/wikitext-2/train.txt !wget -q https://github.com/pytorch/examples/raw/master/word_language_model/data/wikitext-2/test.txt !wget -q https://github.com/pytorch/examples/raw/master/word_language_model/data/wikitext-2/valid.txt !ls import os import torch from io import open from pathlib import Path import torch.nn as nn import time import math import osDataclass Dictionary(object): """The class which holds the mapping from word2idx and idx2word. """ def __init__(self): self.word2idx={} self.idx2word=[] def add_word(self,word): if word not in self.word2idx: self.idx2word.append(word) self.word2idx[word]= len(self.idx2word)-1 return self.word2idx[word] def __len__(self): return len(self.idx2word) class Corpus(object): """The class which holds all the three data sets. We maintain on Single vocab for all train, test, val. """ def __init__(self,path): self.dictionary = Dictionary() self.train = self.tokenize(path/"train.txt") # tokenize the data self.test = self.tokenize(path/"valid.txt") self.valid = self.tokenize(path/"test.txt") return None def tokenize(self,path): with open(path,"r",encoding="utf8") as f: tokens=0 for line in f: words = line.split() + [""] tokens += len(words) for word in words: self.dictionary.add_word(word) #tokenize file content with open(path , "r" , encoding="utf8") as f: ids = torch.LongTensor(tokens) token=0 for line in f: words = line.split() +[""] for word in words: ids[token] = self.dictionary.word2idx[word] token+=1 return ids class RNNModel(nn.Module): def __init__(self,rnn_type, ntoken, ninp, nhid , nlayers , dropout =0.5): super(RNNModel,self).__init__() self.drop = nn.Dropout(dropout) self.encoder = nn.Embedding(ntoken,ninp) self.rnn = getattr(nn,rnn_type)(ninp,nhid,nlayers,dropout=dropout) self.decoder = nn.Linear(nhid,ntoken) self.init_weights() self.rnn_type=rnn_type self.nhid=nhid self.nlayers=nlayers return None def init_weights(self): initrange = 0.1 self.encoder.weight.data.uniform_(-initrange,initrange) self.decoder.bias.data.zero_() self.decoder.weight.data.uniform_(-initrange,initrange) def forward(self,input,hidden): emb = self.drop(self.encoder(input)) output,hidden = self.rnn(emb,hidden) print(output.shape) output = self.drop(output) decoded = self.decoder(output.view(output.shape[0]*output.shape[1] , output.shape[2])) return decoded.view(output.size(0),output.size(1),decoded.size(1)) , hidden def init_hidden(self, bsz): weight = next(self.parameters()) if self.rnn_type == 'LSTM': return (weight.new_zeros(self.nlayers, bsz, self.nhid), weight.new_zeros(self.nlayers, bsz, self.nhid)) else: return weight.new_zeros(self.nlayers, bsz, self.nhid) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") corpus = Corpus(Path("./")) def batchify(data , bsz): nbatch = data.size(0) // bsz data = data.narrow(0,0,nbatch*bsz) data = data.view(bsz,-1).t().contiguous() return data.to(device) eval_batch_size = 10 train_batch_size = 20 train_data = batchify(corpus.train, train_batch_size) val_data = batchify(corpus.valid, eval_batch_size) test_data = batchify(corpus.test, eval_batch_size) train_data.shape ntokens = len(corpus.dictionary) model = RNNModel("LSTM", ntokens, ninp = 300, nhid=200, nlayers=2,dropout=0.2).to(device) criterion = nn.CrossEntropyLoss() bptt=3 def get_batch(source, i): seq_len = min(bptt, len(source) - 1 - i) data = source[i:i+seq_len] target = source[i+1:i+1+seq_len] return data, target data , target = get_batch(train_data,0) data[:,:10] , target # https://discuss.pytorch.org/t/help-clarifying-repackage-hidden-in-word-language-model/226/7 def repackage_hidden(h): """Wraps hidden states in new Tensors, to detach them from their history.""" if isinstance(h, torch.Tensor): return h.detach() else: return tuple(repackage_hidden(v) for v in h) def train(): # Turn on training mode which enables dropout. model.train() total_loss = 0. start_time = time.time() ntokens = len(corpus.dictionary) hidden = model.init_hidden(bsz=20) for batch, i in enumerate(range(0, train_data.size(0) - 1, 35)): data, targets = get_batch(train_data, i) # Starting each batch, we detach the hidden state from how it was previously produced. # If we didn't, the model would try backpropagating all the way to start of the dataset. hidden = repackage_hidden(hidden) model.zero_grad() output, hidden = model(data, hidden) loss = criterion(output.view(-1, ntokens), targets) loss.backward() # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs. torch.nn.utils.clip_grad_norm_(model.parameters(), 0.25) for p in model.parameters(): p.data.add_(-lr, p.grad.data) total_loss += loss.item() if batch % 200 == 0 and batch > 0: cur_loss = total_loss / 200 elapsed = time.time() - start_time print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | ' 'loss {:5.2f} | ppl {:8.2f}'.format( epoch, batch, len(train_data) // 35, lr, elapsed * 1000 / 200, cur_loss, math.exp(cur_loss))) total_loss = 0 start_time = time.time() def evaluate(data_source): # Turn on evaluation mode which disables dropout. model.eval() total_loss = 0. ntokens = len(corpus.dictionary) hidden = model.init_hidden(eval_batch_size) with torch.no_grad(): for i in range(0, data_source.size(0) - 1, 35): data, targets = get_batch(data_source, i) output, hidden = model(data, hidden) output_flat = output.view(-1, ntokens) total_loss += len(data) * criterion(output_flat, targets).item() hidden = repackage_hidden(hidden) return total_loss / (len(data_source) - 1) lr = 20 best_val_loss = None # At any point you can hit Ctrl + C to break out of training early. try: for epoch in range(1, 40+1): epoch_start_time = time.time() train() val_loss = evaluate(val_data) print('-' * 89) print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | ' 'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time), val_loss, math.exp(val_loss))) print('-' * 89) # Save the model if the validation loss is the best we've seen so far. if not best_val_loss or val_loss < best_val_loss: with open("madel.pth", 'wb') as f: torch.save(model, f) best_val_loss = val_loss else: # Anneal the learning rate if no improvement has been seen in the validation dataset. lr /= 4.0 except KeyboardInterrupt: print('-' * 89) print('Exiting from training early') # Load the best saved model. with open("madel.pth", 'rb') as f: model = torch.load(f) # after load the rnn params are not a continuous chunk of memory # this makes them a continuous chunk, and will speed up forward pass model.rnn.flatten_parameters() # Run on test data. test_loss = evaluate(test_data) print('=' * 89) print('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format( test_loss, math.exp(test_loss))) print('=' * 89)| epoch 1 | 200/ 2983 batches | lr 20.00 | ms/batch 21.31 | loss 7.62 | ppl 2034.50 | epoch 1 | 400/ 2983 batches | lr 20.00 | ms/batch 20.33 | loss 6.83 | ppl 923.59 | epoch 1 | 600/ 2983 batches | lr 20.00 | ms/batch 20.32 | loss 6.47 | ppl 642.53 | epoch 1 | 800/ 2983 batches | lr 20.00 | ms/batch 20.34 | loss 6.27 | ppl 526.87 | epoch 1 | 1000/ 2983 batches | lr 20.00 | ms/batch 20.36 | loss 6.11 | ppl 452.09 | epoch 1 | 1200/ 2983 batches | lr 20.00 | ms/batch 20.32 | loss 6.04 | ppl 418.94 | epoch 1 | 1400/ 2983 batches | lr 20.00 | ms/batch 20.32 | loss 5.92 | ppl 372.74 | epoch 1 | 1600/ 2983 batches | lr 20.00 | ms/batch 20.30 | loss 5.94 | ppl 378.09 | epoch 1 | 1800/ 2983 batches | lr 20.00 | ms/batch 20.30 | loss 5.78 | ppl 325.18 | epoch 1 | 2000/ 2983 batches | lr 20.00 | ms/batch 20.29 | loss 5.75 | ppl 313.59 | epoch 1 | 2200/ 2983 batches | lr 20.00 | ms/batch 20.29 | loss 5.65 | ppl 282.97 | epoch [...]Network> CNN models generated according to abstracted DAGs.#export class NodeOP(nn.Module): "The Operation of inner nodes in the network." def __init__(self, ni:int, no:int, nh:int, Unit:nn.Module, **kwargs): super(NodeOP, self).__init__() self.unit = Unit(ni, no, nh, **kwargs) def forward(self, *inputs): sum_inputs = sum(inputs) out = self.unit(sum_inputs) return outParameters:- ni : number of input channels- no : number of output channels- nh : number of hidden channels- Unit : the operation at the node- kwargs : arguments into `Unit`> Note: `sum` op may has performance problem, should we use `torch.stack(inputs, dim=0).sum(dim=0)` ?ni, no, nh = 16, 32, 8 Unit = resnet_bottleneck input1 = torch.rand(64, ni, 224, 224) input2 = torch.rand(64, ni, 224, 224) inputs = [input1, input2] m = NodeOP(ni, no, nh, Unit) out = m(*inputs) test_eq(out.shape, torch.Size([64, no, 224, 224])) m cfg_file = 'configs/imagenet/resnet/resnet50.yaml' cfg.merge_from_file(cfg_file) assert_cfg(cfg) cfg.freeze() cfg G = resnet_dag(cfg.GRAPH.NUM_NODES) G.nodes, G.edges #export class NetworkOP(nn.Module): "The operations along a DAG network." def __init__(self, G:nx.DiGraph, ni:int, no:int, Unit:nn.Module, **kwargs): super(NetworkOP, self).__init__() self.G = G self.n = G.graph['n'] # number of nodes self.nodeops = nn.ModuleList() for id in G.nodes(): # for each node if id == 0: # if is the unique input node, do nothing continue elif id == self.n: # if is the unique output node # then, concat its predecessors n_preds = len([*G.predecessors(id)]) self.nodeops += [IdentityMapping(n_preds * ni, no)] else: # if is the inner node self.nodeops += [NodeOP(ni, ni, ni, Unit, **kwargs)] def forward(self, x): results = {} results[-1] = x # input data is the result of the unique input node for id in self.G.nodes(): # for each node if id == -1: # if is the input node, do nothing continue # get the results of all predecessors inputs = [results[pred] for pred in self.G.predecessors(id)] if id == self.n: # if is the output node cat_inputs = torch.cat(inputs, dim=1) # concat results of all predecessors if self.efficient: return cp.checkpoint(self.nodeops[id], cat_inputs) else: return self.nodeops[id](cat_inputs) else: # if is inner nodes if self.efficient: results[id] = cp.checkpoint(self.nodeops[id], *inputs) else: results[id] = self.nodeops[id](*inputs) # 删除前驱结点result中,不再需要的result for pred in self.G.predecessors(id): # 获得节点的所有前驱结点 succs = list(self.G.successors(pred)) # 获得每个前驱结点的所有后继节点 # 如果排名最后的后继节点是当前节点,说明该前驱结点的result不再被后续的节点需要,可以删除 if max(succs) == id: del results[pred]Dynamic range quantizationconverter = tf.lite.TFLiteConverter.from_keras_model(model) converter.optimizations = [tf.lite.Optimize.DEFAULT] tflite_quant_model = converter.convert() with open('extractor_2_dynrange.tflite', 'wb') as f: f.write(tflite_quant_model)Full integer quantization# def representative_dataset(): # for data in tf.data.Dataset.from_tensor_slices((images)).batch(1).take(100): # yield [tf.dtypes.cast(data, tf.float32)] def representative_dataset(): for _ in range(100): data = np.random.rand(1, 244, 244, 3) yield [data.astype(np.float32)] converter = tf.lite.TFLiteConverter.from_keras_model(model) converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.representative_dataset = representative_dataset tflite_quant_model = converter.convert() with open('extractor_2_fullint.tflite', 'wb') as f: f.write(tflite_quant_model)Float16 quantizationconverter = tf.lite.TFLiteConverter.from_keras_model(model) converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.target_spec.supported_types = [tf.float16] tflite_quant_model = converter.convert() with open('extractor_2_float16.tflite', 'wb') as f: f.write(tflite_quant_model)Opening a TFLite modelmodel = keras.models.load_model("extractor_2.h5") model.compile() random_image = np.random.rand(1, 112, 112, 3) features = model.predict(random_image) features[0][:10] interpreter = tf.lite.Interpreter("extractor_2_float16.tflite") interpreter.allocate_tensors() input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() isinstance(interpreter, tf.lite.Interpreter) input_data = np.array(random_image, dtype=np.float32) interpreter.set_tensor(input_details[0]['index'], input_data) interpreter.invoke() output_data = interpreter.get_tensor(output_details[0]['index']) features[0][:10]Matplotlib Exercise 1 Imports%matplotlib inline import matplotlib.pyplot as plt import numpy as npLine plot of sunspot data Download the `.txt` data for the "Yearly mean total sunspot number [1700 - now]" from the [SILSO](http://www.sidc.be/silso/datafiles) website. Upload the file to the same directory as this notebook.import os assert os.path.isfile('yearssn.dat')Use `np.loadtxt` to read the data into a NumPy array called `data`. Then create two new 1d NumPy arrays named `years` and `ssc` that have the sequence of year and sunspot counts.# YOUR CODE HERE data = np.loadtxt("yearssn.dat") year = data[:,0] ssc = data[:,1] print(year) print(ssc) assert len(year)==315 assert year.dtype==np.dtype(float) assert len(ssc)==315 assert ssc.dtype==np.dtype(float)Make a line plot showing the sunspot count as a function of year.* Customize your plot to follow Tufte's principles of visualizations.* Adjust the aspect ratio/size so that the steepest slope in your plot is *approximately* 1.* Customize the box, grid, spines and ticks to match the requirements of this data.# YOUR CODE HERE #http://matplotlib.org/examples/pylab_examples/spine_placement_demo.html fig = plt.figure(figsize=(12,1)) ax = fig.add_subplot(1, 1, 1) ax.set_title("Sunspot Activity") ax.plot(year, ssc) plt.xlabel("Year") plt.ylabel("Sunspot Count") ax.grid(True) ax.spines["right"].set_visible(False) ax.spines["top"].set_visible(False) assert True # leave for gradingDescribe the choices you have made in building this visualization and how they make it effective. The default color and line style worked well for the data, so I didn't change those. I adjusted the aspect ratio of the graph to get a reasonable slope, as per the instructions. I also added labels, and removed the upper and right spines. Now make 4 subplots, one for each century in the data set. This approach works well for this dataset as it allows you to maintain mild slopes while limiting the overall width of the visualization. Perform similar customizations as above:* Customize your plot to follow Tufte's principles of visualizations.* Adjust the aspect ratio/size so that the steepest slope in your plot is *approximately* 1.* Customize the box, grid, spines and ticks to match the requirements of this data.print(year[0:100]) print(len(year[0:100])) print(len(ssc[0:100])) # YOUR CODE HERE fig, ax = plt.subplots(4, 1, figsize=(12,12)) plt.sca(ax[0]) plt.plot(year[0:100], ssc[0:100]) plt.ylabel("Sunspots") plt.xlabel("Year") plt.sca(ax[1]) plt.plot(year[100:200], ssc[100:200]) plt.ylabel("Sunspots") plt.xlabel("Year") plt.sca(ax[2]) plt.plot(year[200:300], ssc[200:300]) plt.ylabel("Sunspots") plt.xlabel("Year") plt.sca(ax[3]) plt.plot(year[300:-1], ssc[300:-1]) plt.ylabel("Sunspots") plt.xlabel("Year") plt.xlim([2000, 2100]) assert True # leave for gradingOne Step Forecasting of CO2 Levels with SARIMA Models Pre-processing the Dataimport warnings import itertools import pandas as pd import numpy as np import statsmodels.api as sm import matplotlib.pyplot as plt plt.style.use("fivethirtyeight")The data set that we use is the historic atmospheric ${\text{CO}}_2$ levels from continuous air samples at Mauna Loa Observatory, Hawaii, USA, collected between March 1958 to December 2001.data = sm.datasets.co2.load_pandas() co2_data = data.data print(type(co2_data)) print(co2_data.head(10)) co2 1958-03-29 316.1 1958-04-05 317.3 1958-04-12 317.6 1958-04-19 317.5 1958-04-26 316.4 1958-05-03 316.9 1958-05-10 NaN 1958-05-17 317.5 1958-05-24 317.9 1958-05-31 NaNChecking for `NaN` values, we see 59 missing values in the `Series` `co2`:print(f'Number of entries = {len(co2_data)},\nNumber of null entries = {co2_data.isnull().sum()[0]}')Number of entries = 2284, Number of null entries = 59We first resample into monthly data sets and fill in missing values by pulling back future values with `bfill()` (recall that `resample()` returns a `DatetimeIndexResampler` object and `mean()` returns a `Series` object; we need to construct a `DataFrame` from their chained output):co2_data = pd.DataFrame(co2_data['co2'].resample('MS').mean()) # After resampling, we still have 5 missing values: print(f'Number of missing enetries after resampling: {co2_data.isnull().sum()[0]}') # bfill() and verify we no longer have any NaN values co2_data = co2_data.fillna(co2_data.bfill()) print(f'Number of missing enetries after bfill: {co2_data.isnull().sum()[0]}')Number of missing enetries after resampling: 5 Number of missing enetries after bfill: 0We plot the raw data as a preliminary visualisation:fig, ax = plt.subplots(figsize = (14,6)) ax.plot(co2_data['co2'], linewidth = 2, label = 'CO2 Level') ax.set_title('Recored CO2 Levels at Mauna Loa Observatory') ax.set_xlabel('Date') ax.set_ylabel('Atmospheric CO2 Levels (ppm)') ax.legend(loc = 'best')We note an obvious seasonality and upward trend.*** The ARIMA Time Series Model One of the most common methods used in time series forecasting is known as the ARIMA (__A__uto __R__egressive __I__ntegrated __M__oving __A__verage) model. It is a model in three parameters `(p,d,q)` that can be fitted to time series data to allow forecasting or better-understand furture points in the series. The three parameters in an `ARIMA(p,d,q)` model accounts for seasonality, trends and noise in the data:* `p` is the _autoregressive_ part of the model e.g. if the past three days have been warm, the next day is also likely to be warm.* `d` is the _integrated_ part of the model concerned with differencing e.g. if the difference in temperatures for the past three days have been small, the next day is likely to be a similar temperature.* `q` is the _moving average_ part of the model that allows us to set the errors of the model as a linear combination of the errors values observed at previous points in time. If the data exhibits obvious seasonal trends, as our dataset exhibits, it may be more appropriate to fit a SARIMA (__S__easonal ARIMA) model. We denote such models as `ARIMA(p,d,q)(P,D,Q)s` where the second set of parameters follow the same definitions as `(p,d,q)` but applied on seasonal scales. The trailing `s` is the periodicity of the time series; 4 for quarterly, 12 for yearly periods etc. In this project, we shall fit a yearly SARIMA model to the Mauna Loa $\text{CO}_2$ data and use it to generate one step ahead forecasts and compare its performance to historic data.SARIMA modles thus have a lot of tunable paramaters. They are thus extremly versatile but it can be very difficult to find the optimal parameter set. Parameter Selection for the SARIMA model Whilst some languages, such as R, provide an automated way of handling parameter selection these have yet to be ported to Python and so we resort to manual fitting of the parameters by a grid search. The only parameter we shall fix is the periodicity of the seasonality $s=12$ since the data exhibits an obvious yearly seasonality.We iteratively explors possible combinations of the parameters, fit a new SARIMA model with the `SARIMAX()` function and assess its quality to pick out the optimum set of parameters within the chosen landscape of parameters.# Seasonal periodicity s = 12 # We allow each of p, d, q to take on any of the values in [0, 1]. p = d = q = range(0, 2) pdq = list(itertools.product(p, d, q)) # Generate all the possible parameter combinations (P,D,Q) for the seasonal component: seasonal_pdq = list((x[0], x[1], x[2], s) for x in pdq) SARIMA_params = itertools.product(pdq, seasonal_pdq) print("Possible combinations of parameters for the SARIMA model:") for i, j in SARIMA_params: print(f'\tSARIMA{i}{j}')Possible combinations of parameters for the SARIMA model: SARIMA(0, 0, 0)(0, 0, 0, 12) SARIMA(0, 0, 0)(0, 0, 1, 12) SARIMA(0, 0, 0)(0, 1, 0, 12) SARIMA(0, 0, 0)(0, 1, 1, 12) SARIMA(0, 0, 0)(1, 0, 0, 12) SARIMA(0, 0, 0)(1, 0, 1, 12) SARIMA(0, 0, 0)(1, 1, 0, 12) SARIMA(0, 0, 0)(1, 1, 1, 12) SARIMA(0, 0, 1)(0, 0, 0, 12) SARIMA(0, 0, 1)(0, 0, 1, 12) SARIMA(0, 0, 1)(0, 1, 0, 12) SARIMA(0, 0, 1)(0, 1, 1, 12) SARIMA(0, 0, 1)(1, 0, 0, 12) SARIMA(0, 0, 1)(1, 0, 1, 12) SARIMA(0, 0, 1)(1, 1, 0, 12) SARIMA(0, 0, 1)(1, 1, 1, 12) SARIMA(0, 1, 0)(0, 0, 0, 12) SARIMA(0, 1, 0)(0, 0, 1, 12) SARIMA(0, 1, 0)(0, 1, 0, 12) SARIMA(0, 1, 0)(0, 1, 1, 12) SARIMA(0, 1, 0)(1, 0, 0, 12) SARIMA(0, 1, 0)(1, 0, 1, 12) SARIMA(0, 1, 0)(1, 1, 0, 12) SARIMA(0, 1, 0)(1, 1, 1, 12) SARIMA(0, 1, 1)(0, 0, 0, 12) SARIMA(0, 1, 1)(0, 0, 1, 12) SARIMA(0, 1, 1)(0, 1, 0, 12) SARIMA(0, 1, 1)(0, 1, 1, 12) SARIMA(0, 1, 1)(1, 0, 0, 12) SARIMA(0, 1, 1)(1, 0, 1, 12) SARIMA(0, 1, 1)(1, 1, 0, 12) SARIMA(0, 1[...]In order to evaluat the goodness of fit, we use the __AIC value__ (Akaike Infromation Criterion) which is reported with ARIMA models fitted with `statsmodels`. The AIC is a measure of how well the model fits the data, whilst taking into account the overall complexity of the model i.e. we prefer models that achieve the same goodness-of-fit whilst using fewer features. In particular, we prefer models that return the _lowest_ AIC value.The following code performs a grid search over all of the possible SARIMA combinations we generated above, fits to the data and evaluates the AIC value of each model. Finally, it returns the parameter combination with the lowest AIC value.# Some parameter combinations are not valid. Ignore warning messages warnings.filterwarnings("ignore") aic = {} for params in pdq: for seasonal_params in seasonal_pdq: try: model = sm.tsa.statespace.SARIMAX(co2_data, order = params, seasonal_order = seasonal_params, enforce_stationaraity = False, enforce_invertibility = False) results = model.fit() aic.update({f'SARIMA{params}{seasonal_params}': results.aic}) print(f'ARIMA{params}{seasonal_params}: AIC = {results.aic}') except: continue print(aic) minval = min(aic.values()) optimum = [(k, v) for k, v in aic.items() if v == minval] # optimum is a list of length one, containing a tuple (aic, model) print(f'\nLowest AIC = {optimum[0][1]} for model {optimum[0][0]}')ARIMA(0, 0, 0)(0, 0, 0, 12): AIC = 7626.943594958338 ARIMA(0, 0, 0)(0, 0, 1, 12): AIC = 6957.288036271688 ARIMA(0, 0, 0)(0, 1, 0, 12): AIC = 1857.624339889968 ARIMA(0, 0, 0)(0, 1, 1, 12): AIC = 1634.6625982272858 ARIMA(0, 0, 0)(1, 0, 0, 12): AIC = 2057.255779233646 ARIMA(0, 0, 0)(1, 0, 1, 12): AIC = 1835.7966867018222 ARIMA(0, 0, 0)(1, 1, 0, 12): AIC = 1398.4561196922323 ARIMA(0, 0, 0)(1, 1, 1, 12): AIC = 1119.2721968234093 ARIMA(0, 0, 1)(0, 0, 0, 12): AIC = 6907.101966591662 ARIMA(0, 0, 1)(0, 0, 1, 12): AIC = 6236.105272306411 ARIMA(0, 0, 1)(0, 1, 0, 12): AIC = 1383.5838661636997 ARIMA(0, 0, 1)(0, 1, 1, 12): AIC = 1276.7784730240842 ARIMA(0, 0, 1)(1, 0, 0, 12): AIC = 1607.2228379626072 ARIMA(0, 0, 1)(1, 1, 0, 12): AIC = 1150.068960108588 ARIMA(0, 0, 1)(1, 1, 1, 12): AIC = 868.5667608534741 ARIMA(0, 1, 0)(0, 0, 0, 12): AIC = 1678.850182235887 ARIMA(0, 1, 0)(0, 0, 1, 12): AIC = 1271.6802658530646 ARIMA(0, 1, 0)(0, 1, 0, 12): AIC = 633.7998386712203 ARIMA(0, 1, 0)(0, 1, 1, 12): AIC = 386[...]In the above, we purposefully suprressed warnings at the beginning because some combinataions may lead to numerical issues that will pollute the output with warning messages. We catch any errors that they raise and ignore that parameter combination. The output of the code above suggest that the optimum combination of parameters in this parameter landscape is `SARIMA(1, 1, 1)(0, 1, 1, 12)`. We thus specify our model with these parameters:model = sm.tsa.statespace.SARIMAX(co2_data, order = (1, 1, 1), seasonal_order = (0, 1, 1, 12), enforce_stationarity = False, enforce_invertibility = False) result = model.fit()The function `sm.tsa.statespace.SARIMAX()` returns a `SARIMAX` object whilst `fit()` returns a `SARIMAXResultsWrapper`:print(type(model)) print(type(result)) The `SARIMAXResultsWrapper.summary()` function returns a signifiant amount of information:print(result.summary())SARIMAX Results ========================================================================================== Dep. Variable: co2 No. Observations: 526 Model: SARIMAX(1, 1, 1)x(0, 1, 1, 12) Log Likelihood -143.969 Date: Thu, 17 Sep 2020 AIC 295.937 Time: 11:47:43 BIC 312.788 Sample: 03-01-1958 HQIC 302.550 - 12-01-2001 Covariance Type: opg ============================================================================== coef std err z P>|z| [0.025 0.975] -----------------------[...]We unpack each of these tables in turn.print(result.summary().tables[0])SARIMAX Results ========================================================================================== Dep. Variable: co2 No. Observations: 526 Model: SARIMAX(1, 1, 1)x(0, 1, 1, 12) Log Likelihood -143.969 Date: Thu, 17 Sep 2020 AIC 295.937 Time: 11:47:43 BIC 312.788 Sample: 03-01-1958 HQIC 302.550 - 12-01-2001 Covariance Type: opg ==========================================================================================The left hand column is mostly self-explanatory except the `Covariance Type: opg`. This just denotes that the OPG estimator was used to compute the covariance matrix. The OPG (outer product of gradeints) estimator is a consistent estimate of the covariane matrix $\Sigma$ i.e. it converges to $\Sigma$ in probability.On the right hand column, we see that the SARIMAX fits by a log-likelihood fit. The final three rows are different criteria for model selection. We have already seen AIC and, to it, we add the BIC (Bayesian information criterion) and HQIC (Hannan-Quinn information criterion) as alternative measures of fitness of a model. Whilst all three measures credit goodness-of-fit and penalise complexity of models the BIC, for example, introduces a heavier penalty for overfitting than the AIC. We could have chosen our model based on minimising the BIC instead.print(result.summary().tables[1])============================================================================== coef std err z P>|z| [0.025 0.975] ------------------------------------------------------------------------------ ar.L1 0.3869 0.091 4.270 0.000 0.209 0.565 ma.L1 -0.6782 0.073 -9.347 0.000 -0.820 -0.536 ma.S.L12 -0.8655 0.027 -32.198 0.000 -0.918 -0.813 sigma2 0.1023 0.005 21.950 0.000 0.093 0.111 ==============================================================================The rows are the estimated features:* `ar.L1` and `ma.L1` are the AR and MA variables lagged by 1 time step (since $p = q= 1$ in the model)* `ma.S.L12` is the MA variables lagged by 12 times step (since $Q = 1$ and $s=12$). Note that there is no `ar.S.L12` since $P = 0$ in our model.The `coef` column shows the estimates of the features in the left-most column and `std err` records the standard error of these estimates.`z` is the z-statistic of the feature; a statistical test for which the null hypothesis is an underlyling (approximately) Gaussain distribution. Accordingly, `P>|z|` is the cCDF evaluated at $z$. Using a cut-off of $P < 0.05$ for statistical significance, we see that all four of the lagged $\text{AR}$ and $\text{MA}$ terms impact the forecast at that significance.The final two columns give the range of values of the feature within 2se of the estimate.print(result.summary().tables[2])=================================================================================== Ljung-Box (Q): 35.26 Jarque-Bera (JB): 125.75 Prob(Q): 0.68 Prob(JB): 0.00 Heteroskedasticity (H): 0.59 Skew: 0.30 Prob(H) (two-sided): 0.00 Kurtosis: 5.38 ===================================================================================The Ljung-Box test is a statistical test applied to the _residuals_ of a fitted ARIMA model. It tests whether any group of autocorrelations of a time series differs from zero; a test of independence of the residuals. The test statistic is defined as$$Q = n(n+2) \sum_{k=1}^h \frac{\hat{\rho}_k^2}{n - k}\,,$$where $n$ is the sample size, $\hat{\rho}_k$ is the sample autocorrelation at lag $k$ and $h$ is the number of lags being tested. Under the null hypothesis $H_0$ that the residuals are independently distributed, $Q$ asymptotically follows a $\chi^2_h$ distribution.The Jarque-Bera test is a goodness-of-fit test of whether the _residuals_ of the fit have the same skewness and kurtosis as a normal distribution null hypothesis. It is defined as$$\text{JB} = \frac{n}{6} \left( S^2 + \frac{1}{4} {(K - K_0)}^2 \right)\,,$$where $S$ and $K$ are the sample skewness and kurtosis and $K_0 = 3$ is the kurtosis of the Gaussian distribution (consequently, $K-3$ is the excess kurtosis). Asymptotically, the Jarque-Bera statistic asymptotically follows a $\chi_2^2$ distribution under the null hypothesis $H_0$ that the data is normally distributed.The two-sidded heteroskedasticity test reported is a type of Goldfeld-Quandt test in which the sum-of-squares of the sample in the first third of the sample is tested for whether it differs from the sum-of-squares in the last third. A more heuristic method of assessing the model is offered by `plot_diagnostics`:result.plot_diagnostics(figsize = (15, 12)) plt.show()Our primary concer is whether the residuals of the model are uncorrelated and normally distributed. If they are not, it is a good indication that the ARIMA fit can be improved. Here, we see that these assumptions do hold and so the fit is good:* The residuals over time in the top left plot show no obvious trends or seasonality and look like white noise. This is backed up the correlogram in the bottom right that shows no significant serial autocorrelation of the residuals.* In the top right plot, the KDE line closely follows a $N(0,1)$ plot and so the residuals are approximately standard normally distributed;* The Q-Q plot (quantile-quantile plot) in the bottom left compares the distribution of the quantiles of the residuals against those of a theoretical $N(0,1)$ distribution;N.b. KDE = _kernel density estimation;_ a non-parametric estimation of the p.d.f. of an r.v. (here, the residuals).In this context, the Ljung-Box and Jarque-Bera tests can be thought of as a quantitative summary of the top-left and top-right plots, since they respectively measure the randomness of the distribution of the reisduals and its similarity to a normal distribution (in particular, the skewness and kurtosis) respectively. We thus conclude that these parameters give an adequate fit, although it could be improved by increasing the range of the grid search (at the expense of computation time).*** Forecasting and Validating Forecasts We now have a model with which to produce forecasts. The commands that we shall focus on in this section are the `get_prediction()` and `conf_int()` functions. These give the values and confidence intervals for the forecast.pred = result.get_prediction(start=pd.to_datetime('1998-01-01'), dynamic=False) pred_ci = pred.conf_int()We have used `pd.to_datetime` to convert the string into a a `DateTime` format that `pandas` can understand. In particular, we _start_ forecasting at 1st Jan. 1998. The `dynamic=False`argument ensures that only one-step predictions are made and that each forecast uses the _full_ history of the time series up to that point.We now plot the forecasts against the actual data to assess how well the forecast worked.fig, ax = plt.subplots(figsize = (13,6)) # Plot all the features ax.plot(co2_data['1990':], linewidth = 2, label = 'Observed') ax.plot(pred.predicted_mean, linewidth = 2, label = 'One-step ahead Forecast', alpha = 0.7) # Shade in the areas between y = pred_ci.iloc[:, 0] and y = pred_ci.iloc[:, 1], plotted # against x = pred_ci.index. Set the color to black = 'k' with an alpha of 0.2. ax.fill_between(pred_ci.index, pred_ci.iloc[:, 0], pred_ci.iloc[:, 1], color = 'k', alpha = 0.2) # Add Information ax.set_title('Predicted and observed CO2 levels 1990---2002') ax.set_xlabel('Date') ax.set_ylabel('CO2 Levels') plt.legend() plt.show()Seaborn Source: [https://github.com/d-insight/code-bank.git](https://github.com/d-insight/code-bank.git) License: [MIT License](https://opensource.org/licenses/MIT). See open source [license](LICENSE) in the Code Bank repository. --- IntroductionSeaborn, is a powerful but easy-to-use data visualization tool! Some examples of charts:![tut1_plots_you_make](https://i.imgur.com/54BoIBW.png) Set up the notebookThere are a few lines of code that you'll need to run at the top of every notebook to set up your coding environment. It's not important to understand these lines of code now, and so we won't go into the details just yet. (_Notice that it returns as output: `Setup Complete`._)import pandas as pd pd.plotting.register_matplotlib_converters() import matplotlib.pyplot as plt %matplotlib inline import seaborn as sns print("Setup Complete")Setup CompleteLoad the dataIn this notebook, we'll work with a dataset of historical FIFA rankings and look at six countries: Argentina (ARG), Brazil (BRA), Spain (ESP), France (FRA), Germany (GER), and Italy (ITA). The dataset is stored as a CSV file.To load the data into the notebook, we'll use two distinct steps, implemented in the code cell below as follows:- begin by specifying the location (or filepath) where the dataset can be accessed- use the filepath to load the contents of the dataset into a Pandas datarframe# Path of the file to read fifa_filepath = "data/fifa_ranking.csv" # Read the file into a variable fifa_data fifa_data = pd.read_csv(fifa_filepath, parse_dates=True) # Let's look at the file fifa_data.head() # Let's keep only necessary variables countries = ['ARG', 'BRA', 'ESP', 'FRA', 'GER', 'ITA'] cols = ['rank', 'country_full', 'rank_date'] fifa = fifa_data[fifa_data['country_abrv'].isin(countries)][cols] fifaTransform the dataWe need an appropriate format.# set rank date as index fifa.set_index('rank_date', inplace=True) # from long to wide format # Check-out this reference for pivoting: https://chrisalbon.com/python/data_wrangling/pandas_long_to_wide/ fifa_pivot = fifa.pivot(columns='country_full', values='rank') fifa_pivot.to_pickle('fifa_cl')Plot the dataIn one line of code!# Set the width and height of the figure plt.figure(figsize=(16,6)) # Add title plt.title("FIFA Ranking") # Line chart showing how FIFA rankings evolved over time sns.lineplot(data=fifa_pivot)Plot a subset of the datalist(fifa_pivot.columns)In the next code cell, we plot the lines corresponding to the first two columns in the dataset.# Set the width and height of the figure plt.figure(figsize=(14,6)) # Rotate X labels plt.xticks(rotation=80) # Add title plt.title("South America FIFA Rankings") # Add label for horizontal axis plt.xlabel("Date") # Line chart showing Argentina ranking overtime plotARG = sns.lineplot(data=fifa_pivot['Argentina'], label="Argentina") # Line chart showing Brazil ranking overtime plotBRA = sns.lineplot(data=fifa_pivot['Brazil'], label="Brazil") # Make X labels readable for ind, label in enumerate(plotARG.get_xticklabels()): if ind % 10 == 0: # every 10th label is kept label.set_visible(True) else: label.set_visible(False)**- scikit-learn이란** ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAARUAAACWCAYAAADwvRwgAAAe2klEQVR4Ae2dXagtR1aAT0AURUZRFBlRgwwBR5DxjzEPJw/HAREDgkFkzENIiL5M9CHoQ9AgeRCjEcH4IJeT8zLCzcMVMhJIIMQYLsh9uQmOXGRmIM4wMmQUNZAzSM79mZav7ln7rl2nflb37u5dvfcq2FR37+6q6upaX6+16qcPOg9eAwuogW9/8LXuztevdbe/eLm7de3F7tZbz3U333imu/nq57qzK492Z5//te6jk6Puo0sPhpj9s5cfufu78mh385Unu5uvP93duvp8SOPOe292d/7zRtednS7g7pdVxINlFddLu+s18O3Tb3YI/K3rLwVoAIyPjg/vwgJgTPE7OQrQAVaAy0GzWStzqGxWf371pjVwdnoXIlefD1rFJNAYACJg5pAZ9nAdKsPqza/aoAbQBhDYoIUMEPjZwXN8GLSmoMVscN/7cqlDZV+e9Jbv8843rgd/RvB9LAEkmTJS/qDB4I/xkKwBh0qyWvzgGDWAIzRoJDhRM0K65ONoWvh/PKzXgENlvT58b9MaODsNvSv0vCwZGH3K7nBZbzQOlfX68L2BNUCvDVrJqlt3BzWTGmjotna/S9c5VAYKkV92twaCifPWc9N3+y4IUgEue+xzcag4HQbVAI5XBp7V3t57+//xYRhrM//+FTP)​ scikit-learn이란 python을 대표하는 머신러닝 라이브러리이다. '사이킷런'이라고 부르기도 한다. scikit-learn은 오픈 소스로 공개되어 있으며, 개인, 비즈니스 관계없이 누구나 무료로 사용가능하다. scikit-learn은 현재도 활용하여 개발이 이루어지고 있으며, 인터넷 상에서 정보를 찾기에도 쉽다. 많은 머신러닝 알고리즘이 구현되어 있는데, 어떤 알고리즘도 같은 방식으로 이용이 가능하다. 또, 샘플 데이터 셋(토이 데이터 셋)이 부속되어 있으므로, 설치하여 바로 기계 학습을 시험해볼 수 있다. 그러므로 초심자가 기계학습을 배우기 시작할 때 적합한 라이브러리라고 말한다. **- scikit-learn의 주요 기능**​ 다음은 scikit-learn의 주요한 기능, 특히 기계학습 모델에 대해 설명한다. 아래의 이미지는 scikit-learn의 알고리즘 치트 시트로 불린다. ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAyAAAAHuCAYAAABnDWiFAAAgAElEQVR4AeydB/xdVZX9sY3t74xtLIiOOmNvI/aGXVBRsBdEYBQUUalWVBBII71ASEIoIYGQTkhCQhLSG6SQkACCBUUUAQsiKljO/7NO7ve63s65970kzmDhfD4va+991tr73Pve++Wcd9seydqf//zn7Anj609/+lMdM0mHGTX4rlWMhg2vCV0vres8187q27SqGeuWarfVRN+kI96WI/Z104hPXdneyEUO59GnmMfR0x994qBriaGhLvGIrkUDZ2e16IQ02V7DfeLUcQ0xz9lkowPF60UPH270S/UYMzVcg91Nhxb0+op5DXJ5LNaJPhpH9LGWOMScj40On1qO9EWu4sRiDXyQHN0QPug1mrTiehNPrU3LuJ2LpqlOjJdqEovc6MNzFEctcqNf0hCL3JIPN2KJG2PSKObNOb5fFXffNdGO3OiX+MS8PrbqUtt52ELnYjtGvWub9GjIAw+t4sTgNCGaiCV+qW7U4XfTwxOK601+rBXzOd/tyCOX8pHT+W6jhYfvKL58R3I4r2SXNG1aHwda8rbp4DjCd/R+t9lXsb5rZUsD1/XEiZV06OGUMOrcjzWiH7nRL9XzmPOxvT/avewraZr2F/moJVTMG3q4JXS+2yWuj0VccWjRjvo9IIAk8KTqc78pOdpYpMknD9jEo76Qhu2oMcqPL+JRK9/1URd99Oi8nxrESvsLHXngCkt6+p3vNv0RYy71q8Fzm1gTlrge81pukw9uRHEjP/rK4TrsklZc9Flk/3iekhadj5lapKGvDZ0b7ZLO68J3lEatm9b7ne+213J+Uw3XRn6bL50anG51K3oNTeMhn6PXIU6sVJcYxaIGLXFHtIrBIw/o/CZ7d7TUJgd+Uy3FGTcaxup+SY8u9rkOO3KkRU89ceB7DF4pR9TAIY/Xoc9j8MjjvvNLNmMsYYnvdb2e69EJfbujFr3z3Xatx13HtoLwumkZL7minjzCpnGTA0TTVJsajruiRQMyRuq3oWuc1zRmcWjRdn2bjV5IDvixru9rdGjcV8y5+OSNmqh1Xsl2vmzyCeOYS3rno2/ixXxttdvqo3P0cZTqUxtN5ChODK77MUYeUNzIQQ9SwzXEmvTkjBr30cKlnqPXQUuspCvF4IPkd+4enhzbB+hkbHig+DS0jrKjD1+oPhBuxFgbTUkHlxzRL2nEjbwYy4Os/iE3iBaMWvnUBdHCbdLCzwnC/pKGF/ncR0uf+86jn7EIafS5Fp5y0E8M3/luw4v1yQVXqBbzwSMe0TXqc5+ansPtTK7+IS9659EXMerxxWvTw6NW9GMd9+EqRvP+bjaaiOh6Hbfr0ZaQfM6PdknnsRKfmPNKNrwSis/4SlqPSS+fRp/0bTnQlbTkKKHX8RwlboyhLWHkRr+kISZut+0VR428riXWhHDBmKtJBw+dY5uGPvGbcsCJ77H7UU8uYdv+iuPEp2YJe61b0nqMWhGd47UUx3eN4mqOnqNkO59cJV6MoVM8tsht8smBHh7bhh8RfkTx0IJo8ampODZ54DZh5KMj3qQr1SppGWMpj/Pdhittk975bqPthq5xu0nHOMQVxxt+k5Y4Gud7DJ4jdYnBd6SvDSMfH02sQxyEL1SMRn9E8kU+vvPhekx2bB6THXX1AiQS5Te9vEiTToV4lfKUckReHCy1SqgY9cCYD51qRxt9SUMsjpk42FTX64mLjw5s0tNPfc/hGtlwHRX/4x//mP7whz/UKLv0Es/zMNZutb0eNhpH+oQ+do9jUxvfEa2P1ftll/Tw0UeN++hB72uy4Tpio2EM+MLYPCa7pGnSo/X+thyxNn7Ul/zIle889nNp/HBB6Wieo8mG6wiXuviO8InhCxVjrCA8RzSK0VzrXLfhOno/OUq1pYGLDRJvQq8XbWruqrZJp3hba9N5X8zhfW22dOoHnev71204sSY5wJKmTUs+cXZXK/3O5mBsu1LftW02+4ZtBds09MGNSH8TOl8cb2h62VdoHWX3ovWasqnbpofXpvU8JTtqyVniEkODL3Rd2/Y6lzyuVX+venJFveJNL6/pdhOfuHOjDacJnS+OmqPstm1Gj8Z9xbq94Du6pqm2892O2pLe+dF2fZMdNfLhxnr1KVhOiuQoUj8NG41QfH95HzZ6kDg6/Ijie8z1cZzRdy45Yj7Fm8aAHiQHmiYfPiieWhO/NG64WiD89re/Tb/85S/Tz372s3TzzTen7373u+m6665LW7duTVu2bEmbNm1KGzZsSFdeeWVas2ZNWrlyZVq2bFlaunRp8aU+XsuXL0+rVq3KOunXr1+fNm7cmK6++uqcWzWuvfba9J3vfCfXvemmm9KPf/zjdOutt6bbb789j+uuu+5Kv/vd7/ICh3GzzWw3+xgs8eL+Qgu3pPU6kY8OLOmpCZJPKD7aJmyrGfXuU8/1xJpqedx1itOc02Q7lzygNIwT9DxR6zp4JZ36nIudg+G7QZ6IcF0Lp6kmdV3rtvqlRe82uUHqkpO4EL3HnBdrOq9N6zqv34ue+p4DHTUj0u8abPoikkNxb+5HjXzpXAsfbswFFx1+5Df5xEHXqxZxr+tx+qmPH/nuw3GMdb1GSet8z4ONRr7nkt9Ni4YcUd+WA03MUdL4ONBRK/rSk0O6Jm3Uuw47IrUU9ybfa7lNDue7TX9JQx98+WqOcEDfXnjoI6q/ra7rsR3RC0uvWA8fLrXjmNVPc1sxtI5RD48c7ruuyS7pyMGYd0YbuXG89Htd6tEXMeaIWnzp4DJ2fM8Jn7r4cEoa9Xkr+VEf87geO2rkS5cXIHTej+Uv3X2xX37/+9/nCb0m+mvXrk0LFy5Ms2bNShdffHGaNGlSOvfcc9PYsWPTmWeemUaMGJGGDBmSBg0alAYMGJD69++f+vbtm0477bR0yimn7PLr1FNPTX369Mm5+vXrl3MPHDgw1xk6dGiuO2rUqDR69Og8lvHjx+dxXXDBBXmMkydPTlOnTs3jnjdvXlq0aFFe6KxevTovkLRY0sLp+9//fl7E3HbbbenOO+9M99xzT/0Fuy/2/a7U1BdtV3TS7I52V2ver/vb+a738l7c/xn5+3q/enlP/1E4/4yfzd3Z5vtK+4/yebt/O9r/Fv49fb46FiC+ktFG0PSG03p98+EL2/Sq2Va3rR5a9NREg88YiAvReEw2Ldrw0LXVLtWLeh3J0FECTbh15OCaa67JRydmzJiRxo0blxcUp59+esdi4jQtCE47LfXr0yed3q9fGtS/fxpy+ulp2MCBacTgwWnk4MFp1JAh6cyhQ9PYESPSuWeckSaedVaaPG5cmnbOOWn6ueemGS0v9U8dPz5NHjs2684/88w0ftSonGv00KHpjCFDcn7VGTFoUK47dODAPIZBAwakgf3753H179s3j7Hvaafl8WrcWsxoQcSCRgskLWq0WNJLCydtLwscLajOOOOMpEXNxIkT80Lm0ksvzQsxHa3R0ZnNmzen66+/PukozE9/+tO8H3/+85/nozC/+tWv8r7V0Zjf/OY3+aiRFnU67Yz3kPe69H7x/up9g++fCddGvfN539FGjFr170xtz48d68ex4sPvhowRHdiLDq7n6KaL/a7Fjpzoe91eNL7PXKu8tFjDNdiRG/2YI/qRLz9y2nz0jm18xg2HeiDxNoRLTXGJtelKPHRgSc+Y4TjKVnOd+GhAONvZ2/8tjUexJn0vWtd77ab6Pm60MdarFl1TXfWTC64Qvscy0f4vp6+kpy+i54h9bX6vNRg3qJylVqqFRoi9s3WVFy01yOHjoM/56EDXiYdf0nosctGBkRv9XKj6x3OV9IwVJJfnwKZPKH5JQz3XlOqSK+aBG9H51CcGugabPsfSuOELaa6JtueAD0au+6XtRecYNe5jRz4+/SBjBRVXo99tYqBriMEH8wKEDXOBCN6UQE3oPHyPlXTEGEgTRh6+sElDbbjO8xg89TfZaKkHEndUDs8DF4QLRwsOnap0ww035An07Nmz0znnnJMn3pqg+9GKAf365UXF+JEj04Vjx6aZ556bLps0KS2dOjWtnTUrbZ43L92waFH60bJl6bY1a9JdGzak327alO7dvDn96Zprtr+2bk1/+mu8qnx/3LIl3XP11enujRvTr666Kt2+dm36ycqV6YfLlqXvLl6crrv88rTlssvS+ksvTWtmzUrLp09Piy++OC248MI0Z8KENOu88/JC6OKzz06Txo5NE0aPzouks0eOTGcNG5YXN1pMaWE1QEdwwj7x/aP9FV9a1OgokI4InXXWWXnfavEyZcqUdMkll6QFCxbkRZ5OS9MpahyB0SlsP/rRj/J7o1PbdIqbL1R4/3hfhbHxXoNohNjSqD8iGkfXNWnIQ37xXEc8F6z+IRcxrxn13gff0fvdLtV1nWznY6MDFS81+CVESw30Ja5i8GM/+hiPPnpQ/Wi71Y65nI9Nrsj1evS1aZwv2/3SmMmlvsiNeudiCxlXCckJnzHgd9N7Tue67ZxoU4e6sZ/xKe42PPQg8Sb0HGiE8N0m5hpirkUDwolIHrTqR+OxqHO/G58arnHb9R5vs8nJGCO2aemLGvfhgNQD4dKPLyQGogEjN/rohNKg87hsWoy3+a7BFjZpvL7z3S5pXad+Glx8ITFH32bnwgdd47bXh1vKEzX41EdDPPrEQXTyafQ1xRR3HTyQPKDi/vJtJe5cbCH93RCNeLQmjY+dGq4hVtI3acXd4QgIZBKWisChmHyPoQHFU4PfK7p+ZzVoQel9jO57HL6jc30cvt3w1e9NRzpuueWWpNOOpk2bls4+++x8dMNPj9JRDU3CdQRi0eTJeQKvyfwPli5NP1u9Ov16/fp075Yt6c9bt7a+0rZtKb7+bDHZ2VcebOuXNsfpNyRvHgM8YZcx0a9F0R+2bEm/rxYwd65fn36xbl1exNy6enW6ZcWKvJD53pIl6YaFC9O1CxbkxcyGOXO2L2amTUsLJ09OcydOTDPPOy8f1dECRvtNR2Z0JEaLllN7POVMixcdgdERl+HDh+dT2fTeTJgwIZ/mpiNR8+fPT7omRtfB6FSxn/zkJ0lHVPi88D77Z8JteMTExwaJRW70xYebjeof8pSwKQf6koYYWufKVj9NNjx0HoNXQue5NtrSKuatFy188kWfeAnhOoqnJmzbZvKhdR87IvnID4pHixr8khYNCLcNxVW/N+dTh5j7TRpxnYfvsSat4tRqQ3gRqeVa1Y211U/Ddo3bbXrXRp7XJB81exk3Gq9BLObxWnEckRtru5b8oGujLU5J67GowSe/c2M+58pGE3nEYy7FaXCa0LWuoa7HSjmiHg468hBvQudjN3FVk5dzqAXS52NULPrUQ4ePHkQHKk6DQ8z9Jpt6aNwvaVSX2nC9PrGSthSD7zm8hmu8rvPFoTnf7aiNGukVg9ekdR01m7QxV+Tjl+pK63rnUs/H6LbrfLxwOhYgBEtEYnBiYsWJMSgG6oi+CZ0bbTTUwQcj3304TVr1NzX1SdemJb8WHHfffXc+JUi/tJ933nlp8ODB+ToKLTR0ZGPwgAFp3MiR+cjANfPnp9tWr85HMH6nIxhbtuSjFkzcwbwA2Lp1xwVGFasXDpFjvnLlRYYh+WMfCw4wL1xYeMRFhy1i4KEDvY6PtSMe81Y+i5d7Nm/OCxjtJx3t+c3GjXm/aYGmIzK/vPLK9PN169JPV61KNy1dmq6//PJ09bx5ad0ll+QjR5dNnJhPQbtwzJikI0s6XU1HXHQam04h02ltOn2sb58++XQxLRJ1VEWniOn0ML2Pw4YNy9e8XHTRRUnXtWhhqcWJTgHTaV863YsL8ONnRp8vPid83tzvZrfpS59NYlFHbY/DZQzRF9cbvBK6NmrwS7pSDD4Ix2sQc4Tv6P3d7KiTj6ZUmxg8UBq35cON+eCB9DtGLX25SDVGt+lvQs9HXfT4TdoYdx1azx/57qNVjOb9bpPTedjCEpdY1Lbp4Eat/NjgCKWLWu93redyTpONlv7oEy+huIo7ur7bmMmJBlS8pPX94FxsITlL6Dnh9qolXzed10BDzGuRRxxs3z60oGtdQ38TxtrwqInfhnAZg7jE2nT0oUODr37GB9dj8ErofLfJV9IQgw+35DtXtjigdFFLDnjoHdXXpiMHGnIR74auQ6tYm47xOM/zeDzmQUs86nrVwotI3lhHcY9RV7iHOryTJDE5vicr6dQfW8yJX9JHLXVBr08eR3ig+rB9W9tqky8Lqw9EiQ9PqAundTcoLTr0K7p+XdcpQ/pFXr/O63oKnYqkIxv65V9HA3R6FBP/xEKBSXg1qWeyrol8tiv0ib20dR7pzGdBkPupQW4WIhzJABmDkHqWM4+FXHCpS258r1H1sa1x8ZHHbfXqbSRWjc/rs901Wo2Yvz4djVPKrrkmL2h+deWV+VSyG6+4Il09d25aNWNGPtqiU8YuHjcuXWBHWrR41LUtnA7mp4Hp9C+dUqejJ0uWLMnXqNx44435c6FTu3QNin8e/fOzKzafa7T++VaMz2zk4YPoSxg5+GBJ4zHxaMTlY/eKaMBuOufJVosa9k+My4efDfunxI0xo+/y+019H0sv44UfxxDHGH34HvcxeByb8aCNKB6caJPDa2CTB04bugYeMfwm9DpuO9/Hr7j71EGLj965xEDnoqdP2KRFh8Z99NKW9M5FjwYs6dTX1tAK22qTw/O5tslGJ4zapvGSK2piLnhteaKGnE3byhhB9PjU7IbUcX03jfe73uNsK+h9rqEuCK+ko0/oOdx3TslG5xqPleoSy0XD5wMtHLBUm5qeRzE0YJPW4zGH90WbMYLqx26rCcfzZWG1D9q0XoM8oOeTXcpDHTT48Esaz+v8vABBWCJBBp1LIRA9XKFi3uB4Ho910zi3rS55xMcWHw3o+ZxLvKRVH/p77703XwQ9d+7c/Mu4LrLW5FS/pJ8/enRaNnVqumHx4vSLK69Mf7zmmvqUpTyJZrLuE30m7mGynRcSkcdku4QsEKrJe4e+sCjIk3W43o8NVuNj/CxwWDywaGCRwAKmRluweE34dV7Vc67tj1yD8VS8HKs4jKmuWW0XNXK/7cu4UJGfFyvVAuXOq67K19noWpfrFy5Mm+bOTSunT8/X5OiifR1NGT5wYOpj16xoYaIjKDpqoutRdC3KzJkz0+LFi/PCRKfl6VoTPmd8nvAj5g9xl+8TmpgLbQldE3X0oWvyiUc9OiEcYvhtCBds48Y+NBEjr+SjUZ8352pb4/ZGvrSu6WZ7LbejrlTba7nW4zFP9KMObaler1p4pX1FH3Xk0+hr08EH0Xo+9ZVyOKek67bN6NEy3hLG+q5l7CWdYk1a+NQnZ9u44TRhqV5THeIljGMWJzZiQvhgzMl4iUefeBOWajdxYxwt8egTL40drhAetvPdhif0hu/9sktaaqB3rfi8Yi546CJ6vVJd9CB6+f7aGa1ySOsat8kLj5r49Jc09DkXPX3S+Ys4iNYx5oBbGgPcqEfThM4XJ/roSjXpy6LwD31tiEQcmvPbasKTTnbjKVgQ78fOL4/2B3ew2rZtW9KpOLolbT5dp0+fdMbQoWn5tGnpxytWJF3noCMd9QTXFgr1JJlJNhPqisPkmUlzROXkSELuM538Dr75dV3Fqppwc80qL7yaw4TdNLmPOIsXalXIWKiRkb6gdU5t23YyprztViePG99zus34qli9XdKxTfSRyzXqs5cWk7qeRaeB6QjKHdUF+boIf8mUKflie30WdGcwP61LC1SdzsWpXLpVse7qpbt5/eIXv8iLEh1N02fs/u/ejt+9+/fJ/fvk/s/A/Z+B+z8D938G7v8M/P1/BvIChFUMq5Jubyw8R3K4Nq4cWRmVuK7Dhk8exWlwiLkv2zXkgUsOYdRFbezXqTS6g5JOtcmn35xySr6GQLe61fUGmpDq13NNoJlE54lzNXn1OBP4HGOiXU2C4ySbXD5ZjrmYINfcagzSkK/GML46ztjRVH7uD2OkfkZ4yotd4Q6+JvZVLiG18zhtXJ6/5pku53U/aNF4fs+Z61X722vDiTpf6HRoWahU28XREy1SdHcy3als45w5+QYDU3TEZNSofMtkXQ/ERfP6LOl6kzFjxuTnpug2wz/4wQ+Sno2ia4ri59A/0+qjwYu+8/luEItccjjCVcxt12ILXdtkw1c/rYlLPNYmTg733ZbOtc4vjRduRHKiYdz49O8Mtmmp7+PtJbd0rnU9Y/aY7KhxPZqIpbFEnXNc73Fqu1b9avDQ4jtGnWvJATbpPAdcr0ks6hk7cTSR777Xcj0ccpAzInqQ/qiLvniu8doxB2MhDrZpxGnSuR478hlvzMGYQfTwox/16o/aWNtzYDuW9PSX6tHn6DlKmlJMGtcpH7xs2D+lWiU9OUxaf8/Igc5re11ywAdLOufGHOiE1IoIx8dLHvqo63lke4NLDN/rYbdp0UV0LX3UcqRPiAb0PmncL9muc7sXrdcnN+PEb0OvJx41QY+RB42wPgKCAIQMiozQOdgg/BLurl45qZON6p+mWtQD0ZDHfc8hPhpQFxbrKeA6lUbPrNBpVsMGDUpzL7ggbVuwIN+atmnCm+PV5JRf8TXJZYJbY2nyzuS2mihTg0lyzuOcatIfFz1eo2gzEfdxmR0n6L4dvi2Mqx4nCwzGhV9tK3mEaDpsG1fOXS04qFlvS5U3a1mIVPuFMbFoqP2qP9c1bkfOKleuZ3nJkbnVtnXE4DKu6rQu3WRAF8p/f8mSfL3J0ilT8t3PdBqXjpb4tSU6fYtrSlasWJGfQK8FiY6O8Ln0z3LJ9s91k63vwa5qXdf0fRLHx8s44JcQTkTyoFG/t8gv+fCjVvHIp57HS7xSTJqop3ZEzx9tcqBRPy1y8dHgi+86fPpB12HD9Zpuo+2GaMjXja9+uK4l1k3vGrhtWraXunA9DzHyScOLGJw2nbhR53pyKOY2nCaMNXdWS150IHGwNHZqw4m+4tLRD8KjVkR4oOdAS1/0ibchGrDE9ZrY8CNGPXyPo1EsNufJjvrIlx81+GhBuBHhR0RH3GsrRqM/ovTkiNzoo4UftV4PrRCdEK3HnAufmPNKeue1aakLkhc9SLwXpJ64tDad14YPopOP3YboIkaN11RfbMSiDn+PKJBPZxO6Rhw0YEmngTJYeI7YTVri4sVGX0Tqgejg4QuJOUqnyZ5uvzp9+vR8Pr9+qdZEcc4FF+S7LeluTPrFO09ibbLMxFqT3npi6pNZm5DnfuPJr/VMkoVwmtDyU7MDpUNLjcpvzE1OdKDiZvvEv65p+yPHqEXOCutt9X7ZvIxfcz1muno7vD/WkV9pHIvaahvoy/WjllrE8cEQVy5eOkVPR820KNGtiK+57LI0/8IL8+2FdTcu3TlNz0PR0REtSPTk+0mTJuXbA+vZJbqw3U/X4jOtz3HT59o/485DC0ZeyYcbscRVjO+ibBq2o+z4atN6rqiLvriKObbpvS654DvSF1F6cpT4ikVNyXctGrDEpyZ9f029crbVpia8WBttHKPrSlpikYfv+ajRTYNWiD6OF47nJIYGVNyb+2ja0GvEPCVdrOv10Dfp2rTkKWkVk7akh+/bQQyd+6UxNmnRURceORzhRkSreGzEokY+24vetegUg1vKQQyt69DCiRjrRm03Pfmo7UhfRGoSp4ZricEBS/uLPvT43dBroAV71Trf8/Wi/2toydFW2/c3vF502oYmrfq8xe11HX3U7qZVfy968kasFyAUglBKGmOlQUovXuSSlzptWueSj5jvSLfp74Zo4Pk4iAk1qdNTtZcuXVof8dDtWll4cEG5T6xl+0Q124XJpya9Nc9sn7hjw4s+E2H6QXgR64l7mFCL59psV2NG47Ed8ha2D13MjXaHemEM8HZAxiX0um4z6e8lZ7UvqOPbyf6lz9G3L7+XVlM8z1PrfOyBj0Z5pdVth/X50sXvuuhdD3PUnbhGDxuWb+GcT/2rLnIfOXJkfsjipk2b8t227rzzzvwQRf8sRzt+N/07gd30vYi58OGDxGOt6JfqxRzkKmHUt2mpDadJC4960Vechg23CckRde7DacohLvVc5/yYw33Xex7Xd7OpC5b4sWZbXeeWcrmWmsRKfI9FPr5z3NZYGA810DhKAw999J3vdklLDqG3kl/Se23XY3v+Nhs+SP02Tak2Os/jvJhPPDSgayO/yUcDiqe6sXaMwRdSv6lGjLuGPE11iQvbWqyBz3ZQ0/NgC+P2oXeO1/c4NVxTijEG8ji/yXYuNnmkiXV8O+BFHfGo91zOcT1xr7MzY481S1rqqY/mPB+nx+GDaIXwSlpizkdDDH0bOhc9/Li/oo82InrHegFSKuJEt2Ni953XZEc+fhPf43CFitOc04td0vPm6SnYerbDBRdckH951i/Rk8aMSVvnz//LAwE1WbSJZMekWvFqkkwcLpgnphUnT0DRMIGtcsNXHnIxqfW+thiTdedjo8to46k11cRZ/bWmNJaKV4+Tyb3llL7uJ5/lzzXIY/u2HqPXNZv9kvNb3nq8xNinVpPxOLfeF5Wuzt8ytnrbqvcxa9gHpW1xXuivc1XbqCNsuuBdzzfRAxqvmDIl3x541ODB9dPgdRMEPQFeF7XrTlu6QYKeS8JnWt8Jt0u+Yt7avkeeCw36Nl3sQwvG/iYfvqNzfXwel63mSD/xNi06r4utvm5a10dbfrcXY9xZrY8RG2yqybZQEz7oOrgei2P0PPDQgcRBarkWWxzpohYfrefC7obUaMtBnaZcUdvEU9xzuc5t5znf4+L3Wgee54r1PJ/zetW26cnnHOWNvmLiwseHmwUN3+mok8ZfaKmJD6dXvY/FtdglpGapz7c19kcdY/Z4kx4OGuWmyUYHlmqXcrg2ajxX1OLDAcmBD09Ig0NtuMTxS3xx1NCiiYgWLgiPGvgRIz/6kS+fnHBB9bndpqUvC8I/XgNeKYYMjtfWGDjfLYcAACAASURBVBmna+GAUesa9eUFiAxviCKZeOTGYuLFARLzHLJLWjglhJ+F1T/wqAl6HDsieYjrlBbdlUh3KeJ0q7WzZuWH3PlRjzxxrSaRTJzryWw1ccSnH0Tb0a/JLrpqQio/a7wvcOgX8mt8rYsTbfJUE2vGk7X0VfnJW0/MFWd7nVOqQS4wcoK+o4bVadyOqHffauaJPL4QmxqVz75jP4D1uKr3g3gHlnLx/lU1O/gh1nONahu1T3Talo6O/GTlyrT5ssvS7PPPzxe1c0G7FiNDhgxJ48ePT3PmzMl32NKdtfiMR+Q7pTg2GLklP4vCPyVejFHDkTSRG314oPppkRt950U7cku+NIpHLHFjjHoRI6/J3x2dctKi3VTPeWiFbXzvi5roO7dkw1cfrcQrxeBHjNzS/3NR437UR19cxUotcqOPpqSP3JKPXkiOEq8ptjt617rdVIu4c7HVpybk/QHRwUET0XlNNpqYq4nvcbRC18sujTVqXRNzOTfacEv6yI0+2hJGbsnvpmvb7pJWsVKdGIta9dMiN/rUgO8YudF3LrY4apFb8tFELHFjzDXqo8HblX2tHOibkDoRm/geR6OYN+f4uDvugoVIKBJE0JN4cmz6XUushOhAcdA6ulZc+d7oZ5wR6S9piOmUqx/+8If5InNN4Ab07Zt0x6KfrV7dcVcrJpNMHOtJMpNbm3zCFebJr01kd9DbRBadcudX0HsMbgeiixP/MPmtNRUvj1E2euP7dtY69Qcu29mB4rBfom1+vYCyutRibHHb6zrVOOCDPj7F0Gcbv9pmNBmrfOLDpT9vSxXnSFHmwAXt/e7IWdVtzetaarEPrU/j40nxt65alVbNnJl0RzbdAli3/9UimgWJHpC4ZcuWfGctPYOE7wnfgYh8b0C+k/hCWozhlzT0SUsOsJQPfhOiIV+Jx7Z6HXjoQOJNGHny1Zzv9TwOb7viL/86p0kL5y+q3mpK19TI6Rjrl7TkFIrvL8+Fllj0iTch/IhNfI9LE/0Yo7+0zepzfnYsZ9SQC14JxWnaV9SjZtS7lloRowY/8sgV4/AdncPYPYaNpuQ37StxaW57TPEmvfOwhYzBsS1HrI2O7W3TUo/60qLDJh8It4RoutV0rTRq5Efr44DjOmx0QjSg98F3pJ+a+CV0HTa8bnr4Qmlo6JsQHtikpT5IvpJOMfqFUUOf88hDTJomnfSlRl60TfqSlhjjJQc5QXig4jS0jugiL8ajRvXru2A5+W/V1k7Y1bG1aXXKlSZmugVqvrvVwIFp9cyZ6dcbNuQJ6w4TT580Mylk0hkmwj6BxWayyqS8bSLaoVEt1anq+6ScMTIhzjobU93PeG0Sm/tskttR03IoTk3yEZOPnZF81f6otxW/kKveLjhM1G0MuQ5x2wbVZAyMzf3Y7z42udEz5oj0dyDby/5lPDZG+Dlftf3EHKmX9wf7ibzadsvZMfZqMXLv5s3pJ6tWpXWXXJIuOf/8NGb48HwhO4sRPRhx9uzZ+XbSd9xxR8dF7Lv6/dpZXdv3cWdz/TPw799fnafOdHvPd2d/3Vfabtv0t9q/O/vrb3Wbuo3rvtrm+6put/3Rrf++Gvd9Vbfb/vjf7N+dbd4d7a5sU/0kdIlLKyINyJsXcb5s+sR3Gz0xEA2oeNTCdXS+4u5Tizxg1Eujl065Wrt2bT7lSouPs4YNyxf+ahKXJ3dMAJkwMrG0Cad4TAQ7bCaLVb8mjh2TR00km/JUXDTkjegTV9k1Xza5rX7mKO4xeOgr38da5w7byvhzXXTkNizp67EwoRa/MIY8DsstHS/fH8TIS98O8UrPmDPPctb8ajz49f70fWs6xuk815bi9Auz3seGzX60Wq6rbX8fq8WI7tKma0c2z5uXT9UaNnBgXmRrMaLbSes0rXnz5qUbb7yxePG6f5+i7d+pkh357pf4/j2m3zWyiYOucdt14tLQRSxpo65UP+bxOthg5OJTu8QrxdAJXSu/1JzfZkdtiUs97yvpFHOO7G5a+OTDd63ncB52qS76ktZ1TVrG4XrFYoPXDV0Ht1Q71vOabpOPXCVdG99r96qlpmtVY2f1jLmE5KJW3Iaoge9xtBGdI7ukdY5vp9tNWq/neaLdVBc9/OgTLyHjQwNGbqk23BJGffTRKK4GYju/19quabOpDVK7pFHtpvroSmNWX9TBa8Km+l4HrcdKOo+VNB5zbpMtvpr6wRK3tM1ZYNomPVqQWvA7joA0kSiGyAcpDS+PuwYd6DyvyeBAcuC7TnZJ65rIj74eKqjJV//+/VO/Pn3yr8W3r13bcVtdJnf1ZNUmgcTgMBmXnye11cQx99vksNjPxBv0ya9Pvokr5nEbVz0eJrBCuDbR7ZjwMtYqr0+WI6/OX+XEz7wQo8+RfEK3NcYcYwxhm5yf87EvlCfUJS85Y3348Pz9wq415AbZn02+4jY2z1PXrTh1H9ta5WRcvG8dvKgNvtdgW3Qhu55BcvfGjXmBPXvChHxXLZ1qqFO0+vXrl48A6vqnm2++Od111135yEj8zvL9inH/bvn30vloQNeUbNeqn1biEqM23IjwQPj4wrbmvCZbevKAxKJG9RkDdeHgN2nheQ24aJ1TsmPtbnr4notaEZ3TZqMTx1vUsK8Yg7iugU8c3xEtOvrQgMTbcGe4jN3zSe/N+2QzVlAxNXho8UsYtWjIg1/SKoYeHqg+tSZdjLtuZ7XwqQfGGiUfLvXdj3y2lXjkRh8eKD05Yj1xaPC7IfXatNQTx+2dreVjQUt9fOe4TV3nYYPOd9u1isfm3DZbul3Vx5rkaqtHH1yQMdDfhrFuG5f9BId6veaIeuUhB7b71CkhNdGVOMSoCxInR8cChM4SIij1KaYCFGFDXEMMLnlcp1hbQ9OEro25ouYXv/hFmjZtWp58aRK2aPLk/DwGJmx5sltNBjX5yxNCJofVZD1zqoljPeljIonGfJ9EUifnZrJayu+xONEO42DSWo8ljj+Mta5d5WF8bFedT7pqjORG24G2rXn7qvFmjW+HbB+b2XkMtj/yWGx89dgacsTxoa+3zXIxhjxWaoZtQNeEeR/Z+Bmf+LmvsM+9r+Zb/VpHXtDGVo/Htidue82xMbCtf7jmmnTHunXpytmz0/RzzklnDBlSXy+i543ou7F+/fp06623dixE4ncsfq/k+98B+PDch0cfGOPSqI8Gr4Ro4To6H57HsNHIp2HDcfRc8B27adFL49zoe82SXapJjhLfY2gVo3k/Y1RMtvtNGtf3YlMXLGm8rnhw0ICKO7fko3cNMfI2IZqI8NtqRw1+aYzkA8WV7eh6eMI4hl40bXrqxjylWt1qk8vrNdm+fW438WNtavm4m7SKu556EZv0aL0WWsYRa5RyRY37JX6MUZ949IlrvIxZMXjZsM8acXRgk5Zcnse56nffedhNNant6Bp0oNdxjdvoFUMnu5sWXQk9v9vkdE2s2622a6PttbCpiS8sNe/H7qYll7DEJQ881cXuWIBEMUQS4IsXuR6DRxFHcjm6lrhy0Nr0rhU/cokpDvf222/PDxbUL7+DBwxIK2fMSDpVJU/gqskedpzUMYGjv2OSFyaDrs2TzmqSWYrT7/mYiOZJqsZlYyNWc7x2XKjYQgAdCwrqsV3uYwt9zJlbTbCJuz7XqPpzDhu352IsnoP+uka1z3IeJtIhN3k6tFZT/ejJm/1qn3p/zuF8s+v97zGz2Y5iLePlGmF8eRsCp87j2yuOvdeey/k5Lp3Vyf34VZ8uYP/Nhg3ph8uWpVUzZqRzzzgj6bbTnKJ19tlnp8svvzw/jFM3avDGd7UNne/fxTYNfVGLT7++z9hCvt+yvRY26JqSncX2t6RXHXWjnriwNGbG0KaDE/XEGSM5HOG0IXpxsME2HX1ZVO0zz0F/HDc+uhKWtOioUdIpVtIqhr6kI2eTljjoOaIWTgld57a4Gh9jRBtj0qgvNudjO8KPWjixDvHIj/XhlcZNH7Uj0t+GaMTx1qZhLPCjVnHF4DXlgkceEG2b3rnk8To7o/Vc1PZcbsMF1afmnGgzFnhRSzzqoo8uYuTJp6ZsmvNKMe9327nYQue02a7BbuKXxo3GUXrnlvKJr3hsvWrReQ7qlGoTQweib9Kig4fOEW03dE3HAkRCCjlJNoWd01Qoat3vpo+1orapJnH08r1RV0811/M9tPgYMXhw2nLZZX95tkc1scuT1GpyWk/qqokbk9CMzmeyxwSPSWJBlyeFVX4miHnC6Jqqn0ljzbOajI0JKRwwx8O4nJt5Nj7XsX0Zq0kvY9khR1WjqKd+NbkWJ7+IU79Qg9p1XuPUedCDcPCtXj1+r2086tX7tRpzjLNYYOECev4dcpCrC3bkKnBjf/Sb6rbx2Je6va9O0bp5+fK04MIL06ghQ/KdtPr06ZNv66tnjFx77bX59CzduIHvHN8t92XH5jG4/L3BFxKLenznRrukFYcW+U0+fGFJT52oRxc15JGuTes6cqGNtdx3brSd12RHDX4T3+NwIzqnyY4a95s0xMWVXWpwmhCN67GFTe8R+dBH7FVLLfTkBZvqiw+HHO7LbtLC85pu09+G8B3hd6srXmxo2zBq8Ns09MEtIZwmLGmINWmIwyshnLb9VdIphrYNd0ervE2trWabLo67abu9ruejbpOuxCWGtg2pW9K01YSP3rGtHn3Od5t+YVN950fb9U121OA38X0scCO2aemLmrwAKW1kJCqBGokcox5ezEEcPui5qNOkVX+TLmrJS10d+dBFt7rYfOTgwem6yy/PtzBlQl1PNDUpZTFQTQJ3mMBVHCZ8TJJB10ub42EyTM66X7WYEFc2HOrkfjhgYaK6Qx62pxq3j7POXY1TfYzX8+SxeF/TGG08dR7bB3U9chnft6/WVtuJDyoPY6rHGbevyu2amut1yRVieazUZ/8ETs7N9tFnmnp76TOs39+KX29TgVNzrQ9+vR/Cdnjct8XHFPNqe/64ZUv6+bp1+U5aF40dmwYNGJCPivTt2zdNmDAhrV69Ol8r4guR+L3073Dpuxn5fF9B17tNfxs63200qt1U3/mypQFlu85teCU9ddE31Y9ar+s5SjZcR2wfp9vkEY/mMWzHqEcHiqvmGuySFj4aEE0T5iL2/pR0qhdrNtXzfOKgA+M44JfqRq778EH1YTPebjXRZGHY121a1zGmXmrDoZ6j8jTVpIbXRet92Gw/vuuwvQ+7qb7Xkk0ObPRNiB4+fuSX6kcNftQ2+dQCm3gxDh9Uv1rkyY/jdg06jznfbXLDjUi/NL3ovLZrsSOqXtQQi9ySz3hjjhI3xryO2/DYXpC40PluwylpXJcThH/QCpv05AjS+jOCDvScPk5s0HlNWmqKu8MREE8g21vs6+b7oMgjDQMDPU+TxjlNNjVK+POf/zxNnTo1T6J0vrueaq4HCxYnZU0TY5tkMqGNk896MldNhONkFz5xfNAnhkWbiWqVHx2ovLUdJqod2xryUCtrqz62hZhQsRyv6tDn25NjFQ9b+bFrTTU+4owBVNz7sDMyfuVw23MSF5rNduU6xNHZ9tUa62NsQsYTeUVOlaPm+pjIb2PJ+7ny6zoxBz4Y9PU4eN/oB9FV29KxX6qYTtH61VVXpW0LFqQ5F1yQhpx+er0Q0e18Fy1aVJ+eFb+X8XuofrXIw2/6e0AeeBG76dC31fackY8Px+vJdh9uCdGDJa36Sg1NE7rGczTxY9z1bsPzbXTba5V06MG4zU165UIjjDVLOo9108J1jLb8Ul3FSy/GHDXRp47vL7SeV7qodU3Mg9Y1bkc+NcGoj1r0IGORLy58kHwlvmudV7IjN463pIkxcqAFxWO8vg2uhxsRvutdJztq8COv5DvXbbhNdeEK1eAL0XjMbfhN6NxoR436aXDZZ/iOcB29v9vYxaVhux67lAed0LU+3m66mIN6TQiffnzGoHippuI0tz0WddFHBzZp23Rohf5yjdtwvFbXBQiiv2fU3a70EDadSqLTrjSZ0ukmcWLHxLqeuDEZtYma99V2YUJX9xW0caInbimWc3TLXfWjF2K3jaGjr8caHZrCdql/p2uTp9sY4LVgfD/bxlsaJ3pHbLat9tnvhXHDye9F6FdfjMNncRL9vB2hHpyYK4+TmqaB31RD/ewTuI5aiPxu06Z02+rVaenUqfnZInrQYf5OjRiRLrnkkvTTn/4039b67/lvxf1j7/zP5P79cf/+uP8z8Lf7GdB1effee289AZSth8zqdc899zROYHlPo554L6jJpR5joFq/+93virdx7yXP/Zy/3c/X//Z7swerEUcv6isYbOfKFp/mWtloYhwNSJ4Sz2PwHLGdh3333XenWbNm5Ws+Bvbvn498aDLF5IoJWcdklQlcNdnNXMWI28SuQ+eTY+f4L/keh09e6gk9FjQ+6WQ7akTrei0Mgr/DuG0CSu16QhpzKlfMV/nUAakjn3x1jO1l/5Cjitc1Ctqcw8ZAvYwxD3UC1jnEr16McQf0nNV4qMn2gE1a+oVwiEU/cnK/jUF+rE+O3FfVgFPia5ujhn1e2o/k0JHDX69fnzbMmZPOP/PMdHq/fvmoyKBBg9Kll16avvOd7yR97/gOCkt/B/S9pYlDc10vWuna9OTwMVArYqztPnrXtNWF7zlc67Zz3PYc4ns9fOfLdg19vdSCG/VehzzierytrnPRgNRs0veqLeXJA6z2GXna6vp2u9Y1bvuYXctY4Eak3/UeE1+NWOXWfklHfXSuIUY+10vn2siNPjnQ4AvhOmLD83rEetWKR12QHLl4+H7QF9G16BgnftTIZ+zonYtNHtdHHX1R06SFH+vq9NelS5emoUOHpl/96lc53RVXXJHnOieeeGK+0Y4WCNQnD3WkX7JkSX4GmvT0OzZplUOntE+aNCmddNJJ+bR23TVRWsbpeWR7cz/y2nxydNP7uKWJfM9Tqsc2wHMkV9ShKdVGw1hc63y3XUN915VsxlDSNuVwDTmdy5hBOIwVJN5U2/tlS1cvQFxUInqMwUV0jttxA3dW5xvITuiWQxqtyvUF0wXnWnysmj49n3ZVT8qY+DLhBW1iVk8QmaDSZ37mVD78PIGsJozE6rphIpwngXDJ65yqDz2TxBoLE9p6TGxTAZlk1pNQ3zavb1rGALJtNTL+at9SQ/1u57FXXLajo1816bexxLr1vqvGrv4ODjkYj21LHhPj8n1IrirWkc/49TYXYh37VGNwTuWjj/mJZ0Rr+4Bc1HBe7jNuBydse9axjWji2FxT7Re9LxqzFvK6TmTjnDlp8rhxqc9pp+WFiP5D1KL/+uuv7zgi4t9j/X3o9bvsf0vcLum9v8mOOvFoTRqPR677/rfObddTD5ReduTjg17H7Zi75MMHqV3iEvO6zieHEK6jdK6F54gNT3q3ySeemnyQviYkTxbYP86H0xajHinEJRZ1MR+aiK6TLZ1r4cOLPnG07sv25n7klXxpXUOuErcU21U9dUDGUKpBrLTP0INwQde4DV8oLugcxaMfdU3aqHOebHzqEgPRg/DvvPPO9NnPfjY95CEPSRs2bMjD2bRpU/rWt76VHv7wh6dPf/rT+YYh0rmWvL/+9a/T4Ycfnh7xiEekq666Ko+jxBOfuI9RixadgnvAAQekf/u3f0vnnHNOzuF8dIyZ2uw7/IjS8UJL7V3R7mwOHw/1utX3bcWOGnJ5ftnwPe5ct53TZMOPWOLH2q4R31uT3nOU+IqVtB0LEIoJPSFCj8UiXsB5aCNGfexv8qMOX3zVpbZW/itWrEgDBw5MA/r1S8umTs2nkDDZE2LnCZVPEDXpsv6aW12c++srr0y/Wrs23XXVVdsvYjc+3A6scmsymOM2qat5TATDJC9rtm1Lf9i8OdfTxcFMHDVGTQJ/c9VV6RerV6dfrFmTfrthQ9JT3DU2ceua1fbkCWmsUfkai09maz9sH/uG3Kqv03PYNq8Rt0/a+lVtM9vTwWW8IFx8Q+2Df/qX7Y/8vlTvmfZt/T7y/lof+z6/p/a57PgcxHil5/3iiMi1CxakiWPG5CMiWvQPGTIkTZkyJf3oRz/Kpwn491PfV7WI/n0u/Q3g++08bJC/Ba5HRz3nyiZe0sZY5HsucdteaMXBBks6H3dJ06b1fPCEauTCdq5sf6+2Kzr/RQ836vE7VTvWjduHDkQv35v8Ji1x+GgdS3p08NALPbaz2phH+viiduRGH17U48ex4tMf0fPBpaa4xMRzrvqij84x1iv5kY9f4lLXa8N3bNKW4lEnnzolfqm2eDS0zot5nOtaake++2h1BGP27Nnp+OOPTzrNXE01t27dmvbcc8902GGH7bAAQat80uuHIvSKSa/TuLSYUG6doqUY24JeCF839nnkIx+ZdMt2Hyf9HkNHHvq8BjHXl3QxR0mn2O5o0VPL0eu57dvifGznttnwwcjlPYlxfHRCxWj0t6Fz3ZamVNdj4pMbGyQOdixAmkiQKSKeGvHK7YjBheO+890WVzznoged7zb90upLo8OBWnzoV1mds37P5s2dv0L7xKw0wapiTLKYqP1o8eL0zc9+Nr3pla9MH33nO9PtK1Zsn1C35EC7K8jkceH48el9b3tbWjt5cr1I0KR73eTJ6VMf+EA67MAD05te8Yp01Mc+li4bMya9501vSlfPmPGXyWe1veRrG0vkRF9aYlqMffzd706DvvSlvEgiDrbV6dbnOaKtbdcCSwuzezZtSr/buDG/frtxY/pnemnhp+3XojMvOH0hUi048r5z294/3oO4f/099gVjtu2z7jo9bf33V1+dvnfFFWnq+PH5gvXTTj01fw/nzZuXbrrppnxeMt9VIY1Y9ImD/rfBudhCuELnYzsHnbjExYNLrugTR9emFSfqqQvCiTziQr2ogw5fupLWdWjI4z68EsJzJIfzvT62a7BLWs8ju02vPJHvfknr/Z6/W5wxO6KnTswhv6nBbdI26RSnriP5wCY9/W3Ypm0ar+dr0zsv2ugUpzlHtWN9fPglVA54IDF86bwuvtePtmu9Lnngw8OPKK0WATqSoJdsry+9Xjqd9Wc/+1nmeA1d46HrL3QkQzy0N9xwQ3r605+eFyC/+c1v8nxIP8jy4poR6ZX7rrvuqveT8usU2re97W1p1KhROb/4bEsuEvaZbs/OAgQe26pt0kOfdbqW8jBG8gilEUfbyLagV590GrvGq7jmd7qpEFzVYNtkS6NtUj7tH2lo5I0+8YjwhJ4HP/Kjj76kjfuqpI066vaipXZE6jTloEbUEW/SlcZKDmoKXZ8XIHRSAL8JSQo/ohco5XB91Dq/lKeNj/bmm29OI0aMyKeDzDzvvO2/ztukKU+u5IdJWfwVuJ5ciVdxNdG7aeHC9O599kkvfe5z00+WLPnLEQmvYbnzEQHqWZzJX0aPB1vjmDZsWHrVi1+cFupXBvVv3Zp+tmJFOvg970kHv/e96faVK9Pwr30tHfGhD6XJgwallzznOWnNhRdmbsd2+BiDTd568lmNA/+Xa9akDVrMXX11vZi7Y/Xq9MaXvzx99dOf3h63CXDen5Wfc1g91eLFvmU/+Hhrm/1XTZ412b776k3plg3r0vfWrUo3rF6erl+1NF23csk/yWtpun7l0vTd1cvTz9etTXevX5/3vy9C8v7U/rL9znvZsc+rfQovYq2xhUvk4IurhYieJ6I7zWkhogcb6ojIGWeckRYvXpz/Y9B3m++yvrfe+B53w6jxfG1aeK4n1qaLfU16bVvpb5f01AE9JrtJR+2mmvR3y0Fd8qDrVle8Ji05mtB1cIjhR/Tx5MJhAuB85yruPnXIge8853tet9v0zpPd1CKvrW4pR9S7Ty7pPC7bY/AiJ/LQgOpv07o+F7R/qNWmN3qHibapPmR4+EI0qttU2/luo3WkBkgN1xGD04Tibdu2LfXv3z99+ctfTl/4whfytRi33HJL/f5p4q1r6r74xS+mT33qU+kzn/lMGjduXLr11lvzJPwb3/hGOvbYY/NLzzejti9ANBH//ve/n3T79GOOOSZzdXTjjjvuSCeffHL2dQTkxz/+ca67fPnydNBBB+VTuN761remo48+Ot92nWv6cpHqH22bmi9AFGN/33bbbWnAgAF57AcffHD69re/nbZs2ZL7yaNx65TdI444Ip8y9rnPfS5deOGFSaeXKZcWZuedd1467rjj8r7Sab1jxoxJhxxySBJXt4NfuHBhPoqj7dP/L7oORuPWdnzlK1/Jp5dp0eLN3xfG6zFsNNFXnBjonzHsqBc3auHGPPDIATqvFy01XR915HSE7+j9JTtyo+8ajaE+AlIiMkgQceTKVx8ITxi1zoOfhYU31PNgw3WkT7X0gZ04cWK+KEoXyP5szZp6wu4TsnrCZJNs+kFNfvMv7dVF60yYhZ/58IfTfz/nOR0LEPp1Sop08n2ip5qKaYImDvxczybk3gdHp1dtnjEj/fqqq3JOxXWE47+f+9x0+vHH55x3rFqVfrJ0aT4dS1ydhsW2CHNtHTmgdpjU01+P3Sau6tPi54uf+ET65dq19dg14d16ySVJR4bEifVK+4L9ID6vjn3CuML+823QkY/V65en96yemF6+anzae+XZ6aX5NS69dKW9VpjdS1wcNKDHSjmc51ziESOHfs+NHfs6/LPTvivOS6uvmJ8XoL9Zvz7de/XVxc9eXgyyX/VZqF68Xx3vXXgfS5z8vtnnA07G6n3Ve3/n+vX5YZ/jRo7MRyP79euX/xNduXJl/pWK767/LSAGNv0NcQ1/D9C0oevE8+a6Ul36XYNNXxvCFcJzm1gJXYsGLPFj7K+pV+5ea8e6+Mqhfcx+Bn3ccMFS3ZIu8qT3vLKbdGip6Vps17pNjajFpx9s0tKPjrrESzr1wXMdMfVLhxYkJzy0ng8t3BKip899r+W2uDR0xPDhg8SdRw5H5zXZzpftOWVTU4hNLteidGBIRgAAIABJREFUI4dz3UarX+/33XffPLHW30I9HPk5z3lO/pGGu1ddfPHF6cUvfnHmLFu2LE/mH//4x6fBgwfn61t1VFmLhAc96EH5tCtqxwWITs/S5F9aTcrXrl2bjwzMnz8/veUtb0kPe9jD0tVXX523fc2aNbneHnvskT7ykY+k4cOHJ/F0PS3bCLL9vgBhP+kIhRYVL33pS/OpWboL6Ste8Yr07ne/Oz/MVjnEPfXUU9NTn/rUpBuY6JT5r3/96+lpT3taXtRo0aAjG1q0aNzPfOYz01FHHZU+//nPpyOPPDIvkrTA0Pbq2W6Pe9zj0vve9768OFG+r33ta+lJT3pS2m+//fIzqxgv4xcyDrD0XqFj/4Lo0bqP7Vq36Wd/NdUVzxs6YUlDvzRoQfJEbSkP+jZtSSc+mpIdNf9Qt+HV4ba5c+fmW4OeNWxYumnp0npSnCdYTKyqiW1ehNgkl0mVUKf0aBJ/Qf/+6exTTkmXjBqVfrhoUZ3vyI9+tGMBokn7j5csSXNHj07nnHZaOr9v37Tk3HOTJoVMyjRZ3zJzZpp9xhlp4oABaf6YMem2FStyTt0W+HsLFqR5Z52VJp1+epo1cmSe1Ct+/dy5uf6lZ5yRfnzFFXkBotiQL3857fnv/54XQ6q7eebMtGn69DRLt0UdNWp77mqSqNpasEwaMCCN+/a3M+cHCxfWk1WNY8G4cem8vn3TeX36pMvPPjtfU6Kx371hQz7VS0dgdLRDY1963nnptpUr86Jk5ogR6aqLL65zaf8p3+Lx49Pob30rjTvllHTFOeckna7FRHXR+PFJOp1Spv06ZejQdOY3v5kWjB2b6+b3xibM7EPl1r7+/aZNae7aRelpq85Ke6waUX6tHL49vnJE2qPDrvilWM41PO0hDXlLtsdWVXXEJx6RXDtwbYzeV/M1DstPXGNfOSI9cdmoNG3mRel78+cnLUD1udV7rX2kfcZ+rD/rTYsGj1ffE7Tse8/nC+tGu8qpxeVdGzbku2aNHzUq9auOiIwePTpdc801HQsR/mi1of9xbOP9I/XtzjbvjvbvdR/uzjbfV9r7cl/vzjbfl+O+r2rv7v7SokMT75kzZ+YJm05B0uRb18zpNKIf/vCH6R3veEd61ateVd9RUD+uvu51r8vzG51apcmcLjh/wAMekP+Osi/iAkTX4H384x/PRzv8dCvpdbcsLUB08br0mvTrzlpagOiItRZDipG7hL4AUb/y6rqQxzzmMfnohfSK6ZqShz70oflIBqd1aaGgRYkWGtLqVu6PfvSj8wJD20u9adOm5YWWuDfeeGP63ve+ly/A19EO5dcp9y95yUvSf/zHf+TFmPJrH2mBowXaggUL6lzk/FvG3f18/S1vm49tD30wFBDykk9zcrTRoqc/avGFcJowcqMfdYxZH0JdfKULX/uedlraNHfuXx40aJMrTWDzhKolpgmcJvRveeUr03GHHpqvb9jnZS9LH3zHO/I1B9J3LEC2bs1HBU447LD06pe8JPO/fsQR+ZQpLWCop9OX3v+2t6X+xx6bRp90Unrtf/93GvqVr+TJ9I2XXZY++d73pm985jPprJNOSvu/6U3p6E98Ik/aV02cmD7w9renpzzhCflULOVbOXFiOvrgg9PjH/3ofG2IFiOXjxuX5o8dmw54y1vSox/1qHTFuefWE9Cl55+f3vKqV6UvHHRQPlVL23PogQemHy1alOv3PeaY9PIXvjCd8oUv5Nfr9947DTj++Lw40uldGtOTH//49Pz//M/0xYMOSiNPPDH9cOHCNPCEE/IYDnvf+/LEV2P76bJl6Uv/8z+5Xp+jj04nf+5z6Q17752P1OiCdU1Yh33ta+llz39+2uflL08nHHpo3pZDDjggPfOpT03DvvrVPCbliu+XfL0/uth+9qoF6akrqwUIE34m6MI8SbfJOxwWHs7NdrXwEM85+OhzbvIHbszJGOocLG6EGluFmadcnjfYmcviaXvfvy8ZkcZPGJsXlzr6pVOxtGjNCxD7rMf9yKIhx6tFOAsOuELF8PkcZyS3IYsc56NXTEdEbl+zJl+TNXrYsKTrQ/QfkC6E1GkCfJf5jvt3n78dIByh/x0i7lrFetXBdb3b5HeM427K4ZqSHeuUxiwd9eJ2wydPqQYxcuALY/M+bK+JHXX4aED48rHhguqjoWtCeELX4aOjFn7kxjzOa7Jdg13i7mrtkq40bo9RX9o2vWt62VeR79sb9eJ6beyocZ9xO6IjBt+RvjZ0Prb4am06OGhA4rui9Rz64UUTdJ2apDtQaaKvRQjXLOiIh37x1+lX1NTCZN26dXmSzv7RL/9xAaIJuq4B0WlbOqVLRwl0xMBP72L83DGLBYjGqCMRWoDoByLNq2hoIrIA0aJDTadradwPfOADk07tUpNGp5zp4ngdWWEhpFOtdFqV+tW0D/baa6+8YNJpYmh1KprynXDCCflojMalfaV9Iq2O4Oy9995pn332qcesuBYuD37wg/OCSL6/2Ie5SPWP+qnpXLfRwXW9267BjtqmHPCbkDqux44ar0kf+oj0g2iF2K6Bpxh2N4Qr7DgCQgEloGE7disAlxyOUUtN0LnY5GvSKq4PtL4A+kJefPbZ9UXnHZMlW3R0TKaqSRYTMN3h6t1vfGOe9N98xRX5ouZvHXlk+q+nPa2+/qFjAbJtW/71WRPorx1+eD7qoUn7e9/85vTO17++XoCcfNRRad/XvS6fIqVf8LUQ0a/+uq7kooED0+te+tJ0zaxZefKtIyiauOuCak3cdF3HU5/0pHTx4ME5n2JahPznU5+ahn71q/VFyJqcTx0yJD30X/4l6SiDtknb89F3vSu98RWvyEcmVE+T/Fe88IW5nn41P/7QQ9PhH/xgPpXnznXr8lEVLTY0UVUtTWxf+rznpY/ocObixdnPv2xfdVV6xl57pYP237+e+E4bOjQfmdE4dFGyjqCMP/XU9My99sqLJO17XU/yuY9+ND3iYQ/LR2U4fUwX2mtRcsO8edsXT9UE1ye32kYdWZq5Yl566srR1SR+RHrQsqHpYYsGp4frtXjIP+zrgcuH1Ud2Hrd4WBp+1rB8vY+OJOm0O52epgVIXgjYIiIuDPgO5EVCdbQk277PLa7PQn4fLLZDDfqM21Fn69b83fzxihXp0gkT8iKE60N0Pi/nGvNd19+Apu8/HKH+fvByPnrnus3fHY/tjI0ejFrqg7F/V/y/Vi3l8Vw+Rrd3ZozoQNfGet6HLZ0aPujjJAbPMYuD3rXYaGIu/Ihx7FEvfimmODXJCS8i/WCsqTgaOPggccdYP+aJ2hLf88EHvY8xg97ndkmrfq/ttvPd9pxuo+2FW9Iphhb0mGua7JzAPovu65QmLQr0a79Os9LpQlo4MG492PWxj31sPu3K82viDUfYtgDRNSM6wvHkJz85XzfBZJ180veyABF/48aN+QiKrrvQS9ee6FQu9fkCRL4uOP/oRz+aFzEf/OAH84JDi453vetd+WJ1nTbG0Q0d+bjsssvykQotWrQg0wXt0isPY9UCRLcW1ulnGjcv+lmAvP/976812t866qIjPGeeeWYdlxZdRH+vY1+sSX/URF88r4ktnjfytaFrxfOGX9KXxi6tc6OvPuqBsQb1PY/b6ED6qNWxAKFT6M195zTZro22NHEwnify3Xee2/pi6XxIfRn1y+pta9Zsn/Tz66wmRQ12nhx537Zt6fsLFqRHPPzh+Vd+TmXRNRhaVPDLcVyAaDKuSbom75pc37p8efrQvvvmBQKTtuFf/3p6+lOeksaefHK+kF0Tfy7ovvTMM/Ok/bQvfjGfiqUjBVqkMG4dkfmPPffcvgCpxrv2oovyomjE179ej0t8nT7FAkTbpztlaTHR75hj6nw6HUpj1GReGi10NHYtVnRx+zEHH5we8uAH14sn5Xn5C16QFxraPvaD4s99xjPqBYiOTBz18Y/noym6WF+5tf2aGGuxoTuI6Rd68b502GHpcY9+dH2ql3IN/vKX03Oe8Yy0atKkjtrqy7mqIyDaP9OXz017rdACREcDRqQnTeuT3tT/S+kdg7+e3jny5PTu0ael94ztm94ztt8/wKtv2v+sPuldZ5ySHnfZoPpIyWMXDU39h28/Je77l1+efr1u3V8WIFpwa/+HxYB8f/+y7Zzq88XnNu/7Kpds+HW8Wthnfi/1qvfy3i1b0o2LFqWJZ52V9FR1LUR0K0cdyfRzjvkb4N/50t+QGHOdbNd3s9Giw5cu1om5nEsfefBBz+U6+B5DU8ISrxRzbVNtcWjOb7PhO4rvNUp68RUvtRLfY1FLH3H8ElJPfbEp1su40ZGjVKeUC50QLXbMEcfh2mhHbaxNDXTwieODXhtNCWMd9I7oiEWfuCP14UYUVxx43bTq94a+pItc6TzmmpJNHdd4THH9aKpTxjVp1q/+z3rWs+pTsHTNhJ6toSPEtFKdtgXIf/3Xf+XTm17wghfk05Ouu+66HfZXXICoRukIiE4Z0/UV73znO/NLtp4BIr4vQDRW3X1KCw8tGLR9mpvxkq/rTHSK1A9+8IP0gQ98ID3/+c/PNw268sor85EMHSVhAaJ8qqEFyKMe9ajGhQQLkA996EN5d0mj1rQAiZ+ZTLZ/pNer6fNFv0l6/nxQu6QlbxO6VhwafPmy4REn5nG0jvBcF2PUUJwGX/m9hscjX9qOBYgLS0U8RmJH13sxBtmm3x2tPsg69UpPZL5y9uw8wc0TperXWOxeUJOoq6ZMyZNlHYVgEsbkN0/e4ilY27blhYSOSOiCcJ0ONerEE9OrXvSivOBgsqcLtT+83375iME73/CGNKF//+2Lmuq0peMOOSQ94ylPSW999avzEQq/5sQXIBqTXh0LECaBcQEif9y4vHjRUQj2AdsjX4uQ9VOnpiFf+UoadMIJeez7vf71+ZBn5lcT0o4FSFVP2+YLEC00dLrY21/72nxBPPW06HjCYx+bbxOsoxcsQJ6255554cN+1RGhZz3taWnFhAl5G4k75iMgcQGyakR68rS+ab+BX08Hnnlq+tB5g9LHJo9MB009Mx00bfTf/2vqmeljk0elj1wwND3h8qH1dSGPWTQ09R3SLy0+55x8HQgLkLy/qkVF/ryUFh3VQoD3SIiuI8Z7bd8n3g948tHS598dfU6IY8vXAv+X69allTNmpFFDhuRfwnT7bJ2zqzuk6O8Cjb8p/M2JfzOi73zPgb4bogGdX6rl9dyO+jYtXCH13CZWQte6XeIq5uOAr7iaY5Pe4+gdvd9redz50XZeyW7iK17i+xhcK663kjbG4O+MlvpoI8Ya7rvWa0bbNdGmnms8phrUadLCd4xc98nnfGzntdniqz+2Ng19JQ0xOIwx+vAcxVGD24RwXIstjY5k6G6dmojrtCPd6UlHQnRhui4a19+/pzzlKfnuTujI6TXbFiC6le53v/vdPGnXBeg6csEPO+SICxDVKC1A9COvrqnwl8auPL4Aka8L7PWAQ117oTthSauX+HrJ1j6/6KKL0r/+67/mH4651kSnXT3xiU/saQHCNgjjAoR9VlqAiB/fc/ggucWLXPfhl5Acjm1a8WhonE8MdC62kP42hC8ODb5qxrrRd43b5GhC58ruWIC4CKKQuNvEmhC9+mlw48YoTsxrRB16R50HqMOV+vV00pgx6U490ZNfYauJM5MkkIlQnCCxUNAF1TqCoIvJs8bzVJO6eATkyosvTq984QvTsYccklZecEE+tUnXYuiIB3U12dJRAV0Xoud0POnxj0+6RkJHIzQmHZGYc+aZ+cjJnk94Qr5GQ5N19fkCRPkU62kBsm1bvrhcR090QThjYVvla0yv23vv9Kn3vz9fOH7NzJnpyI98JD3ogQ/cvi+rejssQKr90rEAufLKegFyS3UTAO1nLUD+/TGPyadd+QJE+0dHXngvfAGSt5NJcYXa7qYFyFNm9E/7j/x2+siEwengWWPT/1x+QTr8igvT4UsmpyP+zl+fvuKi9KmFF6RDLh2fnnjFyPqoz2MWDk19hvTN75suRO9YgLAo1b6zRQSfAcVyvPpM1/s7cLXPc1/Fk8+r1th3pIPvcX8veT8r1NEQPT9k+jnn5IvUddtI3S5SR0P0t8H/Lvj3v1d7Z/Xwc2H7G6h6Gg9/r2J9dMTRxzj9u4Kl2tSJ+XqtCw+MeUo1PUZ99GDMU/LhRixxPSa+mmIR4WmMPk7sLAj/eB7ZcD0XNkhd+dixJlxHSntsZ2zXUxd9HDdxIS3acKI2+k36yCMfyBgdseFEJKfzZNPgw8MXEhNXvjd8+HCbfLToyCldkxaOo+fRUQDdepdTqnQ0RLfjfd7znpcn7bpeQhegv+Y1r+lYNOiUJU3SNZFXbRYg3N5WY9RF6LoY+9BDD81HWfRDjo40aGI/ffr0jjFrAaJTlHSKFdunU2G1eNAdsDQ+toH9E1ELCZ4DIq40OnKjJ6yrnvO1uOL5HWeddVa+fkV6auvvvZ7szhEQtL0cAdEdt5qOgOiCenKV0N8bt+P7G7XOla1+WuTie07XlLTOVT++1yAHeufJLr3Qo8V3LbU8JhsNtvvSuE4c11MH7LoA8SIUokBTsahBB6JncI5whDTywUOvD7q+ePrFdGD//um73ArWJkh5kqVJUBWLEyn5vJik6eLqRz3ykemUz38+P+SNSZZOl2JypQm6nrVxS/UckH7HHpuPBOjUIXE0ydZF6/UCZOvW9N358/O1DZqM/3z16ny9iCb135k7N+laE93OVosUne6lZ3powq5JpcbuCxAmjWsvvPAvp2Ax2YxHQLZtSxunTUsvfvaz80XteeJYTfo0Dr0uPP30tNeTnpTvzqX6OvVLF7g/8AEPqBcg2geveNGL0sf33/8vp0xVk0udMsU1IDqtTKdWvfg5z8mnkrFv9aT2hz/0ofkCd11TwhGQjgXI1q35mhg/AoKe90DjLy5AVo5Ie80akA4Y2zd9YurodPjCSemoNTPSF9Zfmr6wYU764k68jlo7K31m2cU7rWuq8dkVU9PnVk3fqTHEXNoOjeuIpZPTk5ed0dMChM9q3of2Hchx+z7Qz+effV7zbLEAN3+f+O6wyKhQuvp7J05VW1pqMDZH2bpm6PqFC9NZw4fn2/bqPvk6R1i3dezlP0P+RvC3IyL9IH9L8IVtTf0lTdSTB1RO55Rs6tKHH7Wl+pGLTy40IHEQPrXw6QdLergRpSnxyQWik++N/iYU1zXwiOM3ofOwwV7H3ZRb8W45VMtbzFXSiw8PLX43hA+KT9tVLTqNtTRe+mMd+fS16eC43rX0NyFc9O43aYhHbvThlTDWE4cmW3e/0ilXmqDrLlW6i9MBBxyQn4WhxYgWGJrE6/oNnnehRYLu6qQfZ7QI0SlVn/zkJ/O1FprM6/kgOv1JN/XQguANb3hDvvBbRz10hEBHWHSnKP1gq+sr9MBB3d5Wiw1dIyG9jk7oAnldxH7ggQfmC8R1ZyqNh+3kvdZiSBe26zkiyqELxHUzEfGEOpqj06vOP//8tHnz5nxUR4si3dZXOS6//PK8UNJiQzV19y3dYlcLCY19w4YN+W++xvXNb34zL0z0rA/dAYuL9VVL/zfo0Qs6je1lL3tZrq1tVuMIiK6H0Xbo/xC2A+R9caSvG0ojDhht9lUpj9dzfYkbY/CbcvT6nSIveeSjBeGAzmUc9DVp6AfJ0XUBQgEEJFAhXsRA5zbp4cYBoy2hNF5TX9QJEybkXwH0wEFNbOvJUzVpypMem3wxgWIixKQoT4SqCZR+kdfF0Lrt7LLzz8/XL+gOQzqFSZNzTdo14dZ1FbpYWjl0N6un77lnPt1Jk3DdgUpHFXTkIV/LsW1bvq7i20cdVT+rYczJJyfdjUrXnOiULT1bRNdgaBEwdejQ9OynPz3filf5Lx4yJOmoiG4JrLHqpVvh7vXEJ+ZJfd6OKq6LwB/8oAflW/mKp+3RHbY0Xh2dydd/LFuWFzW69e+M4cPTEx/3uLwQ0VO1dc3Iu/bZJ/3LQx5S30ZYeXTa2H5veEM9Jk0sde2IThv7wDvekfeTxq5bCGu7dctf6XTR+9hvfzvHdKtf7XuNSUeRnvj4x+cL+DV+5dMRkKc9+cnbL6Bn4moTWuXbYQFS3Tlqr1mnp/eNH5A+OWtc+syyKekLG+emY7YsSMdec3k6dmvvr88um5LecMLh6Ysb5+6UrlTjmC3z04fPH5L2H/KN3cql7fjipnnpyNUz0pOXnbn9GpBVI1IvR0DyvvXJP4tVFhBg9fmHX2OI58UFGssLn+9W5rnWbDjSYIP6DN26alW+SF0/LOg/XP0npl8F/T9C/obwd8F9bP/7Q8zR/54Q9789itHob0O4juRwXfy7B0c6bPjEXOM2fHiO2ORCBxIXT41cxEH4Qmz6uumc7zZ6ITmI4Tfx4bk2J6n+Ubyb1vnUA7tpqVvK4WMr2a7xPJHLGED6GaPnISYufLCka6qLHkQLUjPqvVZJiw5ET15HzwUPHej8aLfpm7iMeVe0Mad8tTbUAkLPtNDF2XrIni5I14XdHMmQXqdm6UF+uh2vJuRCTaRXrVqVFws6YrL//vunN77xjenDH/5wOvfcc/NEXA/pe9Ob3pSfZn7SSSflW/pq4q3rLzTZV78WIa7XOKTXxeF6afHz5je/OR9F0dEDjTdupyb0I0eOzAsnjUHXheiICqdYadGhWurT6WDvfe978y14ObVWdXSURX16VocWJ7quRD8oa1t1VouOluhvv3JTQ7cr1lEexjNnzpys1Xil0wMWtUhR0756+9vfnveTHuKohUt8j8nj7xk2XBCuI1yhN3HQga6TrVZCeG06r+U22jZ0vtvSeE23S/nQ0ic+GrfpF182rbgAgYTo/wp3tu61116bJyeD+vdPP1q+fPsvrTYxYkIUUZPYxlg1wdUCQrei1XUcum5Dp1N96gMfyM+s6Hv00Xmx8a//7//lif36KVPyNRQ6mqE7OOn2tt888sh8y1sdxRj8pS/lowZaPLzk2c9OXz388Lxo0IReE24dDdARDumP+tjH8mlZB77lLenEI47IOi1G3vaa1+TTwt7wspelif3750XD+9761rxIeMGznpXvqHX19On5+SO625Vuo/e2V786P7ND26sx6hkeenChLo6XVhea67oUvZT3lS96Ud4e3U5Yd/PSBeK6+5dODdP+0q13tTjQqVpacK2eNCnn0MXlih/7yU/ma1p0sb7u+KW7eulIyuc//vF8y11dYK7FjxZwenK67uClW+TptLVts2fnGtofD3voQ/PtivVck/p9skXIDguQ6ta3eQFyzunpk5ecnXTU4ehN89JxWxem465dlI7v4QVPk/3nv/dt6XNrZqbD5p6bjtTRi01z8+lPR627JH1+3SXp0wsnpU9ffkH67Mpp6VMLJqQjrrgoHTL77LzIUJ6j1s5Mh156Tjr66sty/JVHfCyPgRqMR34ppn7imbNtYTp68/z0ubUz057LR1e3GO6+AGFSn49C6LsRPvv6bIiTvxNVX/39MB8e+prD901c4+f3rfJ3iDeMw3nK/+v169PGOXPS8EGD8i9/I0aMSLo9ZbxT1v/V36dudXb271e3fH8P/buzzbuj/XvYN6Ux7s4231fa0nb8X8Xuq23enbq97hstMDRJ192kdOqRfCZvyiFbf+s0z9FEWkdJeAq6jj7oCIh+lNFLHB1J0Us2cd1Zi7+X+vFGek3ehSU9D0HU4kCLIZ0qpsm84nG7dJRB199SS3V1VIVt0KJH14Bo3DqtS/V4fgm5NDYd/dA+uOmmm3IdjU05df2KxqxrZXyblEc/PpNDfO/XNquO+rWAoU/PVolHQHbnfb6vtGz3fYG7s82l8e7wJPQSiZUMHywNwps0NNfDB+mDG5F+MOoUp+kLoQu3Tj3llDRv4sR86gaTojxZsglRPRmyCVLmwglx9elXe50Wpedq6Ba5OnKg51vo1KpN06bli6SXT5iQb3+qu0aJr6MhegCgNJq066iJTsnSqVU6CqK7ZOlZIJkzZky6/tJLcz7V01ET8eaMHp0XI3pooI4S6JaqOnVLzwJZNmFCvvWuTtnSqVlaACimcWycOjVP/q+fMyeP1bnKr1+VtdDQBekXDRqUj+xo3DpqpD5dB6JF0OxRo/LDDvUgQV1jogcbatyaHOr6Al1oP2XIkKTTv7SNOlqi+nqtu+iivJhSPS00dEvh6cOG5SMiegCj8qhPNXXNjDQap6650SlpqqHrRhTTtt2hu44xOa4mulmv2/DGi9BXDk9agLy/XoBMqxcgebJ/3eJ0vF7Xbsd6ct8RX5Q56nvBgfumQ2aPT/t86TPpsHnnpdccdXDat8+X0j4nHJEOvXR82vuT789HSfbr95X0lhM/n177+UPSW791dNqv35fTpxdOTK/+3CfSuwadmN5xyvHp0Lnnpld++qPb69f1ttfKY7JxbR9j1adFU+5blI7btjAd4wuQfOev4T0fAakXIiwYIlbfgXrBx/6OvOB73tou5aqOzpXy17pQk8/KLStXpilnn53vlKUnqU+aNCn/x6f/TLzx94G/H2D8OyINXNcTb9IRjxp8+tsQbsQ2DX1Rg08/yPaCitNKNromRFvCqPGaXqsXbczVpi9xY0w1Y47IcV9jZ/w+3p3J4fmo73rvpxZIn9fGpg+MGnxqRkTXhq7BbuOrz+uiEXrrlkP9xZbPH/9j+vOf/pD+/Kd7qpdsnUaz/VaqRV0V3OW61eemm76pdjcd2wuPfRjj8tXnrxKHPCU+Wuf0YqNzlE7NkVyRJ199MQ6ffYfvvGjDKaHyRD5+ia8Yranf4yWuYs4p2RoDcXI40ldCtNRxHbGSTjHXRh3+rmpLOurRR42I9IPo8gJEQTVHiCUkufeVYvRTDB9uRPrBqFNcTahVrQ5Pjhg0KH1/yZI8sdWEpV5sVKd21DGbOOUJD4sOxZkcub59Qo6xAAAgAElEQVSK6dd2f66C8mnC7i/FeOnXfWnICS+Po+Ipn145ZuOUT590MQe5mlD6Hfq0TTY+jU2LpZy/2nYm+Xlbq9vyeq56nFV+6cV1DnXhkjNvj+XUWNQHH0TnOXOsGj/5FFPtjgWInYLVtADJRxGuXZyOYwHQgMdVC5TnH7hvevOJn8+LCF1D8vTXvyJ98JzT02uO+mT6n/kT0mu/cEg6eMaYvMjZf8i30jsHfC19auGk9LJDPpgOOOPU9KrPHpTeM+Lk9PZvH5s+MWPMjguQUJ+6LEg0zuOu67IA6eEULP+sax+y/7NdfSeaFgDs85pr70VHrMqzA5+4Pn/YwuozoPcyv8feV30fPL84Ohqyeub/Z+87wOwojq2vDBhMMDlHm2QwGLDJYIKNydFkgwGTc7QNmBwEQjnnnFbSrnJaSStpV7urnCUySAQJIQmUUZbO/526c0Z1e2fuLvZ7fvj9b75vVN3Vdap6umdHVbdT/3inrObNm9vpvfwhIrz0/fDUf0sk778nnudxTAsrStmaYj3eY5hWXvrES6OqY0iT5FVXlXkMef6STBqlrDBJNMR527IT4qTTY4nTLX6IT8JR1ttU2stKj+fJhqhwyidhkvCqs/AeF6al21PhxJONmmCFkQ6PZZkuyeWjwgrnqfRXh5c96RL1OOrSLRvg38OmVdj03XxsXDkb678dj7WLRmLNgoFY82Uh1nzRE2u+KMCaBX2x5qshWLt4DNYvm4KNqz/A5vWLLDgJbXubSofP4TFhXUPZUEcSVjpqipVOr0s8T70+L+vTXj5MC+/lmZacT4snKizz/vJ5pr1cGlb8UI/4oqEuL680ZXkJk0YlE9IkedrV7cvNkPvHlynt66y0IJTxlzCeCiMe5X3a58X31Nfb2/JpL5+WDu0oL5qGI1+XZJRPw8YjIAILWB0NFSufhAsbVraEEU3Ckhc2LIf+uHiLcwQHdO6MdTNmbJuqI2cmcnLkyOQ4OqEjFcjKYRfWqHOamDd9zoGS/tjRko1At+S8ExbrU93zUemL9EufaGLdvT6PJz/So2c1PZGMnkU8UvKMHz27yvzzGM8FVjm6XduFfOVFzZ7qG+HIqxKAuClYaQEIHXsLPqIAQ45+Gj3u6j/givov4KxH77QdtI6+6FzcNaQj/ty/LR6qKMK5T9+LvwzvgqfeHYWrGr+CKxq8aOtOTrntWlzf7h0bEeHIyd0juhrutHtuzo6uRIFHNsCIRmWCYCSukxutqToCkj3hvUZrQNRvaktR915YW7u8+iAf9e+ByenvIdQf5eP3Se9GYE/vVvz+SE+klztlfThqFDq3bGkHGDZq1Ajjxo2z4fbqviH6jqR9ezw+6XsV4qXHU6aru2lHMrLpeSpLopKrKc4/hzDS63WJ56nHip+mI0lWGNK0i2U1wSbp8PrDtHR6u9LhZSVHHtM+T6wwXo/4aXrEF0Z56fJ5pUO7Hqt0kl3hPZWccCwTL3xGj/Ny/wxWuoT1lGX+GZW2+mxYjPXflGL1/I74du6rWDz1USwafwcWlF2Dz0ddhPnFF2B+8fmYX3xeRM/H/BEX4vPRl2Bh+Y34etI9WDrjb1j+fl2sXdjXgpetm9ey86z/ZEv1C+sS1lP5UM7nQ53CkKbZ8Xym812hrPKyK6z4ypOK56lw4nl5YURDWeZDnsdTp7DSL5qGE0Z6mA9lQx2yIYzykgupr7dkhaWsLqZD22HeyzItfGjT56UjxArvZX06X71DrGwQ79PMh5fnhfLCinrZJD0s9zdxHuvxSks+lIsDEBmSYBIVWLJJVDjJKh9SYcnnpfLqcJTjnETOB+cC1Q95GI4cFefQyNnJ50CFZXKoPL9GehLsex1hOklnEs9w1emOyoUnVTq0m5qvoY1U/L/Y7jV6zsjGPxOA2OiHRhNy6LZpUH5k5L7RBTjl9utwY8f6uLrZq7i2VW0LMjgd67qWb+KuIR3AgOLGjvVsp6zfPnMfLqn9V9xf2hun3X+rrQn5w2tP4/cvPY5bezbFNc1ex2n33YIHx/WJpmGV5IzEZG2Tp5uBiZPhepB/ZgqW3g0FBmxDx/Pt7gMQ/T2JUs6nlbf3LNTnZJMwZjNFJpYPdbo8R8x40OjALl1sShZ3yeKPEZxr7L8l/I6E3xLl/ffGYzxfsv675Hn20XL/GeXDSkdoSxhR6vc2hEviefuUk4xoiJW8bCnv5WqCFU7U25YuTyVHSr6/vJxPsx6qi5f3acp7OY9XmvJMe6q0ZGRHeVEDRf9Ih3iSSaOSS6LEJNXb1yMNl4b19ciHFd7L+3QSlrx8ONU7xBKjS/jsc2/A5vVLsf6bMqz4sA4Wjb8NX4y5EvOKz8fHg87EhwNOx/v9TsW7hb/B3N6nYE4v3idjTq+TInoy5vQ+BXP7/BrvFf0GH/Q/DR8NPAOfDDkbn4/6AxaU/RFLpj6ANZ93xcZV72PzxlXYYlO2qjpNYR1VX1KWpd1pzyx8Gs7zJZtEJSc7YV71S8OGOOLFS8JIn+yEVFjy0y5vI8Qrnw/7ffGU15WGVb0lJxpi0/BeTlhP03Cymw/PsiT898FKh6fC+3qG6SS7Xkco7/NeLintZcN0knyVAIQgL6gH8rwkxeJ5OaaT8JKRrTSs5DzloiTulMDt6PhL6IZZs5J/xY8c8RynxjliOXznQHunLMdZohMUOUKisY4QHzp6zm4iVnojPd6x8/WRPQsynFNGGV9mNkKdQV7PFlPWUc8h3WF9pINU6RAjbFDO+sX6vd6o7lZ/YQOdtCUs5RJHQGq6BoQBSDwK4qY4OYefu1lxBywuRrd7VrFR480abgvNs+XFtlCcaS4252gI0wwWnpg1HI9PHxbjyWe5RjdqMgJi07JY19QAJP8i9Jz3xL0jSX1usmx39QGp0kF/Ca8+Ud6oMBFVmWRDGpYzb3Xxfa73QfXgmqkZMzCuqAiN6ta10RDucMKtH9N2yfLfEKbTrlDO5/Ut81ivy8syLXnP91if9jJpacqzzF+SpS1/iy+ahFFZdVR2KadLaY9Ne14vK12iHuPTXq9kQ0oZPbOXlx7K81JZlI3zwkpecqS6lFaZ10deEjaUFUZU5WnU26aMLi8f2lVeNjxGPOIlJ+p5HiNbSViVhVSyniq9ZfN6bFz9EdZ8WYDFk++z0Y1Ph56DD/oz2Pg1Zvc6CTO6n4hpXX6JKZ2Ow6QOv8DEdsdiQlvex2B8dDM9od2xmNjuF5jc4ThM7Xw8pnc9ATN7/MoCEwYlHw08HfOHn4eFZdfg2zkvY93SUmxevyRaO5IbWLB+uvg8upj2beSfVXwvqzSpZCUnKr5oiPFYySRhhaNMeOXDqUx2PFU6yZ5wspdEvUyaDl9X6ZBdj8+Xlo4kfD67obzXQ1w+rGRFpSusp9ehNDGS83jxSEP7wrIsvEKcz0uXeCFW+VBO8p56WaZZJio5X0/xRIX3lGX+WZnOCUAo4I1IOMmQl5MRGffyPq1yyXuqsiR5lZFypwjuec0DeKYOHpzjeMsJl1MjZ8bnc9KBk+TL5Cjl8ORcBQ6Rl5HTRp6vT07aOVI5fDpd0S2dvjzUmZQ3nq+f0xfqlg3R1Gd27RTWR/ZMN+2qjRKeRXYSaWAjrKvypFUCkIomtitUvkXoGmWIA4BogbdfeyGZLC87GpENVrI7UsVBg2EZvEQjFlE+O4KRHb2gHa07oVzWrkY1pI80V164LMYFSEkjINWtAWEf6F1w7Wvt7/qJfcZ2Ner6UH1ZRT6SEYbl6h/DeN3+70B2kmxE9qUryWZoj2eGzBg6FK0bN7YgpFWrVrbjCneI4bci7Vuib5en+hZ5XBpe3yNhpEf8fDSUlQ5hZJNUaZVJNqQsD+WTsJTTJZ0hDXEeI6yn1eFD2TCfZM/rDOVVHy+TL028MNKVJB/WQ7KeSo/HhzjJeBzT4nss0yE+xPm8sCFGfNLwEk8y3wcrXcLmo15WadHN6xZg9WedsXjKA/hs5O/x0aAzbfRidsHJmNb1BEzpeJwFGxWtjsK4Fj9HWdOfYWzjwzGm4eEoaXAYShocmnOPbngYxjQ6HKWNj0BZs5+jvMWRGN/6aExq/wsLSBjIcPSEoyMMcr4svQrfzn0F65aUYMvmtdYXehbVMaQqz0eFoYy/8mFU5uWZ9jokQ8r+CvssxPq8xyalvWyS3dBWqCPES0col5T3sl4PZb+vXWJ0CZ+mQ3Yl76nqKayo+KFsmJdtyYc0lFdecrSnWzxRyjIdXirPR9Mw0ils+LyyJ5qkR5ikeks+CS+bpNKRuA2vF/yhpbk1HNd+cGvO5ZMnx2s/zDmJnC2fNkdG/Iiak6N05Czl8Fjm+YFDFcrGTp53tpiW3cgRZF68nHo5RzGpPMeeq5c5jKqbnsdRcwqDfI5d1S+gena1o+okmqOD9n2dAl1JGMOn1FvypHHa2WCdEgOQSh5EmLwLFgOBbUFAFBhYgLBtDUY2+Ng29WlbPgoQwgXszL9Xkt1py5VtC2iywYbyMY0Wl1M/R1d4RomCIo3MZG1vG6mxQCYpAKmoZhcs9Yva2ufdO2nvl8ur7dX/Ko+p9OV7t2TL/004G3pnkmxYWQre6qB3jLvVzZ6NL8eNQ7dWrey70KBBAxsh1XaSP7Tv1//VJ/cX6P9rj//F7bF5DdYvLcHiKQ/is5EX4aNBZ9hox8zuv8KUTsfb6AaDh9ImR4BBRUm9gzGq7kEY9c4BGFtvf0xstB+mNt0XM5rvizkt98XsFvtierN9MaXJvqhssD9Gv7M/Rr1zIErqHoSS+odYUFLW7GdgIMNgZHqXX2J2wUk2peuToedgQenVWP5ebWxc8wm2btl2sN4P9R2kk8bvGH9Q4R1uIfvvrjfrU5M6cBRada7piPS/+1n4HGxb3j/UOtakTfgcauv/xP/z4gBEEYmoj2TSGoKykk+SUbQVUsrWBJuE69mzp41+jOjZ05wPOiRVbu+8BOWJDk8g4/VR3mPMAXL6TTbM59HnddckLduiMSayafVz9qrIBWX5yqU7lAnzlEvieX5auWzUhHodSpNWCUBqugg9GvXIjjJkg4/sKEV2pEFBRzZgYYCxLUDxAYwFDO+V4Km5o3Dxm3+NRzcUSMQBRLy4fNsICGV4RgllbunRxM4WieXjkRTZjQKnpClY0c5f+Rah+zYLHX6VqR98XmnRnIAgeNdjmYR3wpfJjtHob0oBJnmUjeUDGzGWfPfei08cd8ka3LUr6r79tp0NxNPTuZc9vzX+SvpO6RsmKoxoEl7fL4/xuoURj3mlhQl1eJnQdhJWPFHZlB7lVS4q+8qLSt5TleWjXt6nkzB6Zl/mMUr7cl9fn5YsKeX9JbyXFy9JNglLuZrgvX1vIw3vbfl0iE3Le4xPJ8n7+lOWMrrCtPAeo7QwomnYrI4t2LjqQ3P2vxhzKT62wOMUm2LFaVOVrY6yoIMjG6PrHYRxDQ7AlKb74qM2e2NRlz2xuvce2Fi0BzYV7Y5NRT+1e3Pfn2JzlCZvY9HuWF+4B5b12BOfd9wLc1vtgwmN9kNp/QMxqt7BGNPoMIxr/nOMb3MMpnaOApG+p+LTYefYupPvFhRiy8bl8d+knt1TPWsSlZzah3mfFoZ8XUoLG9IQv3z5cjtYkLt9cq0b177mw6iM9pgOL5WT0pZu5YWRnNdDHg8v5AnuPFtDMp5Snjq5MQg3CGGdeRK59HhZpsPnlZwBon9CTFreY3w6lNcz8/wVHoLIOvI4B8qFV4j1edU9xPi85CXLvE9TlrzwEk5UGFKlheUZKDwoku8I21znn4RY5oVlOukSRtTLe3xNsNIRUmHJ52UBiH8wGfWCEiYvVCh58n3aY5JwoR7lJSv7nvIPkid00sl4b+TIrNOS4GDHjo13aiI5OTkmw3KHl3Mkx4ZU8p5XJR06TIFe2qCe2JavV5SO7YS6wvol5GNsWn3z6cxXJlvVyHj7cZsFmPjZpTOiafxYD+UiXWrHKgFIyja8PEiQC8Vv6tIAj00djNv7tsatBc3wx3Z1bGeq+8YU4KaujXBzt0YWDHCdxi09m1qaMszfXtTatt+lrruLu+C6Nm/ZuR43dq5vwcNFrz1luqn3lp5NcH37d2x7Xm7FSwxHOf7Uuzlu6toQD5T2xj0ju+E3d92IP/dtbdv18tBEnnB+e1Er3Ni5AR6ZOMB2zrq1V3Nc364Obu/bynRUXYSenXaWLwCp8p6GbV/DPkrTE/Z7jpx0iwa2c2QTylJ1p+ij/MqpU1FaWGjfB36Q+R/mt99+m/OR13fGU323RP03J0yHOGKEYxkvyeTDSiakIT7Uyby3p3Roy+dDGz6fhJdNXxdhJM+8TwvjqTAhTcL5+iod4pQXXnKeUoaXZEWFUV4yniotGVLihBVVueRJeYlfHY3Eq5DqcCqXrVCBytNoEo6y4gvnn9OnvaxsCxPTLRux/ttJWDrtUcwbfp5Ng+IoBEc8OE2qtOnPMKr+IRhd90BMabof5nfYG9903xPrC3fH5qLdsKVoV2wt2gVbC3cGCn9i99bCnYCcW/ydsbVwF2wp3NWClDW9dseiLnvhgzb7oKLB/hhZ92AbWSlvfqRN8eJ0r7l9TrHF7l+MuQwrP26a3cI3ZUGwntFT3wbxMzt82F6SD9vYY4nxtzArV65E7969cckll2CnnXbCiBEjct4xYbwupX2dlVaZp6qvKMtUV+FIWX7CCSfgnHPOiU9rp6yvg+QmTZqExx57DPvuu6+dUC59+ex6W2Ha49LSSZg0u6wzj3F46aWXcOihh9ozJT1/mi3yJS8b3n4+nC8TljylpcfL5UsvXbrUAo+TTjoJ++23X7wZi8eoruQpLTversfUJO2xSleHk13JxSMgZKhyTIeXeAJ66nGSE97nPUZpYnV7WeKVlyxP1KxduzY6tmiBpRMnZoODFIfEOzjmzKTJpfG945tPRs5TmozjpzlV5oQ7OV93Ot/CicblEaYKPwxEpJtU6aDesY5Ap/EdJpZz7cP65PAj3Tk8ZzvmB7asbs6WPafLE8e7pgHIvaN74vK6z+PC5x/GbYUt8ce2b+PMh/9sAca5T96Nu4Z1xrGXX2hByfnPPmgylD/nibvtAEFifvfS47itsIUdSHhfSU+ces/NFhhcXu8fth3v8ddebAvQz37sLlzR8CVc9OpTdlghAxAeTMiA4srGr+D69nVw0StP2k5Zx1x6vm3Zy9ETBjQ3d2+My955Drf1aYkLnn/Y6nXC9ZfhloKmuOC5h3BvSY/kXbCqWQOS085qf7V50Ecmq7KAqh/8e2p94d+hCCPZVOrl+N7oJl9lEc2pfyQXyvg85ddOn45JAwfi7dq1oUML+R95+C3RN8VTfcf07UqiXp5pYURDjOT1PVNeVDjmw8vzJB/SfHjpCzHME6db5ZIXFb86KvmQyk4+fIjx+Xw4lvkrKZ8P77Fhmjjfrkn5EMO87P0zWI+XniQquywLr1A+rIdseCodIZZ5j0/ChNgtm9dh7dfD8PWE2/HpkHNsutWMbiea8z+u2c8xusGhNmWK06i+7rIXvuu9B7YU7RYFHD8B+uyErb13BHr/OHv32gFbe+0AJNzGl1yfHbGV2MKdLYDhCMmKnntgfvu9UV5/f4yqezDGND4cFS2PwuSOx9u0LK4P+Wzk77Bs7kvYZFOyqv978G1Qk/ZS+4Q0CRvyZItHDvCX7Uwmg2HDhsXvmJdnP+kmP+ny8vnSSVjVhVPfedPplQ7arVevnp1mLjlOCyovL8eRRx6JP/3pT6ZS8mk0za50puHET8Or3FPWmXXkifC//vWvceqpp1r7hTo8RmlilSbNd3k5pj2Wa5kbNmwIns7uL+kMsWFeGPL5I9stt9yCn/70p4kBSIhVnjp82ufFJ1W9Rb1tnlLPHWkXLVoU6/JYpYVl3l85AYg35oWYliLJeIW+jOlQXnnRUN7nJUPqL85z69Wrly02HVlQYKdpm5MTOFKeV5O0flWX82tOlhyrBGqOlxu1+E/P5ziKLpCorh3i9gqcRt/mOTok59o0LI8dTifj9TFNmSoBSMoULJ5YzkDh13deb1vpchTidy8+hocn9ge3zuWow88vPNN2tbrk7b/j6qav4brWb1mgwF2trmryCi6v/w9z/o+59DzbxYrlJ95wOR6q7GvrSo655DybTsUAhCMsHBm58B+P4MHyIpx069W29a4OJOThhI9OGYRfXneJTdu6rO7zdljhFQ1ewNXNXsMjkwbiN3fdYCevn3TL1bbFL7f+vWNgu+QApKKaXbDU5qJRu1o78x327ewCXe/Ux+9Hkg73vgijv4cq/RbJxn0se2E9XJ28rPpeevUMyss+KQ8DnTl0KJo2aJDdLa9zZ3zxxRfx/OV83y59v/TtSfo2eR7T1JekUzpEQ5zywkqOVGXiKS9Z5UUlJ0q+LslURyXvqTChXZ+nvLenvLD5qLflcT6dhg+xPp+GUb29rNLE8ErDkp+Grw5LXBrW2/c2kuohWdUzpEkY8ZKw4n0fu8KIGnbTWtvh6suxl9laD26dy1EPOv1cMM51Gly7saTrnthU+FMLOmyEo0824MgGGtsDBby3A3r+aNvd40dAjx9hq+dZejuT3UqMghUGJdRZlA1GOLLycdu9UVZ/f5TUO8QWuE9seyymdzvBFsJzSha37d24Ypadrq4+4jPp8mnP87KU8TfllBdGVHxS6li9ejW++eYbmz5Dx5h8XSzv0KFDlQCEfN4MUOiAUgexX331FeZyNHjlSqkwffSf6PSSv2nTprhutMW81kNIL/VxxonyXCfBqVeyQxztcSrTiSeeiFGjRpkO8njNmDEDv/jFLywAoSynBrGerAfzuqlftlUv5lnX7777LpajPKfUqv7ESQdp2kU5te/atbkbELCtzjzzzDgAYVvSLqnX7dPSx2eRXGib8qy7+kXPJT1sI+7kylELthOflzyW65KsnpOUbcc20DQrybIef/nLX+IARO1HOeGlz1O2B+uodqY+lnsM+53vAW+tlZEc69yxY0f8/ve/xwcffGDl5Hm87ImnOpPPK6MCCYpK0FOViVaHlRypLs/zaa9Lsp7yZeEuN/Xr1MH7nH7lHRU5VnJyXFnsoDied2yYztElR4tUTpL0RieB08HhCd88EXzDzJn/UTfrrNPW6cjzXIW4PVwbmcPH53ZtG7alyrys54XyPh86kKl1UH+oHt8jALn49adx0atP4pwn/oKrmrxq06BOf+A2cNrTaffdaieaH/Hb03DnoA42wnF9+7oWdHDkgutAbuxU3wIWjqT86parLOi4stHLNirBcz44revIC8/CEzOH2cnnt/VpgZs6N8AFzz5ksif88VILaDiqcU3z13H6A3/Co1MG49jLLjTMxW8+AwYh17R4HZfWec7qw/NF/jK8MzgCwhPYf/v0vRbYVJ2C1dR2/qpuChb7Q20bUuuPhPb1/eTTm2fNxHeTKrB17hxsmDYZm2fPAnlrJozDxhnTsHH6VKwZX2b3plnB4aDOjurhdafxyNczVHm3Ip0mE+jn9txzi4vRijtkvfkmOnXqhE8++ST+D9V/e5TWN0jfHfGZV7o6KtlQh3CyoTypeMIk0SR5zwvtSodkaEN2xPPU48UXTzhRlYd58YVTnrQ6WV9fjycuxPq8x9GOLtn0sr4+oazKZFt2Q3yYT9OTJCee6ihbsu2pZMXzeeFV5qmX8/ww7XXUpB6SScRtXo/vFg7EgtLLbQvc2b1OxuSOx9nuVFznUVr/AHzSbm9wihSnWFngoZGOgu2xtWA7bO3Jm0FGLaBnLWztUQvokaVM+9vzTdZwP8oGLhaMbI+tvX9soyIo3BmbinazaV4zm+9ri9zHNj7CpoNxSha37WUQsnTqw9i05uMcB8w/M9vPX2pPtrdvc6WTZIURZdDRtWtX/P3vf8dTTz2FZ5991vJ0HHVRXxiAyObEiRNtPewzzzyD5557Di1btsTDDz+M8847z85Goh06o1yTwW8gp0U9/fTT5kfxl2vVg6MVr776qt0cFRg5ciT+9re/4dFHHzU95HEU5pVXXkHbtm3BdQd0hocMGYLrrrsOu+66q021oo4xY8ZY1RWA3HTTTSgrK8MLL7yAhx56yNZccPqT2okOcOPGjc021/fSFtdlUJbPVFFRYYHPgAED8MQTT9jdrFkzCxRYf+nRs9C40nSwOYWN7frkk0/ixRdfxODBgy0goQx/tWcAwlGQadOmWVvS7muvvQZOI1NQIBt0wgsLC61tKMedWCdMmBDbo20GSH369MHzzz9vfcp2ZJsRq/qy/PLLL7dpdewv2qusrLRyX3/ZJY5tyGnFL7/8srXl0KFDLXCgvA9AOEuIIyvUyzpwLaQCDOmjPAMgTkHjc/zjH/8A9fngkLJcc8S2fvzxx+1u3ry5/ZDH+ixevNjWzvzyl7/EEUccYeUcCfv44+zfEGWSbtaXl8ryjoBIKKR6kHx8byRrMvtviAnzXlZpysycOdM6oHnDhlg5ZUrsGMe/fAbOs3duaiTjnO0cbOTA0VlfP3MGhkwZgzoTB+HtCQPxZmV/vFHR7z/mfrOin9V5ZPkILKmsxJopU7BhxgwbUUhyAON2cM5dXl5SHwgrmiRDXkJ5Up3Iq+kICKdQXfz6M2DQcG3LNy0AOfOh221a1nWta9tZHUf+/mxc2+pNXFb3H3hwXKFNhWIw8OjkQXh4fD8bmeBIyJ96NcetvZvj8vov2LoRTtnimo2zn/iLrf34/StP4PoO79j6EE7jerCiLy54/hHcVtQKl7z1d9De5Q1exMMTBtg0LY6UXF7vBQtAuC6E9eMoCEdlOJLy27/ej3tLelrgwnUpVQOQGqwBUZu6QFrtHLdtJGN5n3aBC/uc5cvGDse4tx7Ct6OH4uOeLbCyvARzOryNiQ3/islNnsOiYYUof+dRTGz4DFZVjIkDnxybCTYUWCTWyb0bLJeMUZf3NiTHHbLmjR1rO+bx3JeE2DAAACAASURBVCD+gKH/ePUNE036DpHHy9NQrqZ56aE93SFWMqKyq3xaXaVHcqLCqzwfnhhewkTZ+D+KfDokG1Ji8tmULelOwqssiVLe6/D4JHnP81iPE9/LJqU9huU1waktPNanvR3Jep7seIzsigon6vHCSY+osF42KS18Dt26GeuXluKriptt5EPBBxeAj6p3iK3F+LrrXhYE2LqOPjtlp1fljHRkgw4GFlXu7rUA3SzvXgtbmY+CEi/P4CUeJeEoSq/tzdbWwp9ga9GuWNt7d1sfwp2zuOtWZeuj7QwRBiHzhv/WpmNtXrcwfufVLv55k9oliecxYVry/PX4Zz/7GRhA0DH+61//iqOPPhp0xHWxH5MCEH7HzjjjDNx2222gc87g4OCDDzank9PUFQgwmOAUo7vvvhtFRUV2hMH555+PW2+9Nf4lnU4/nVBO4aGTe8MNN5gDy/UR5557rjmVdJoPOeQQWy/BERY6sbR72WWXYeedd8Z9991nU14Z7PD5FICcdtppZpvByf333291/OMf/2iOOuUYbNGpP/744+157rjjDgs8GHz8/Oc/x4UXXmiBA3/h5zf8yiuvxP7772/TvtSOpLp8mgEZRxkYeHA94J///GfTWVxcbHVUAMLnfOSRR6wNGKhw6hjXuixcuDD+fvF56dhzHQyDgC5dulgQ8Zvf/Abvv/9+/B2iHfYpAwD2KZ38Y445xqYpsW7sT2L/8Ic/YPvtt7eAilOFGUzyUv1JdTOo+N3vfmfPzJEmBqynnHKKBWfEKADZcccdLchkMMv3gc/OdlWfSB/rdfLJJ1uf9ejRw0ZPKMcNBiTDgIn9xGCW7Vi3bl0L1PjecESHbcP39/DDD7c25bvLRf0MWpK+PdKrfhKtEoB4QaX/GSoDIaUuVlCVFJUNyvs084zMGKEx4uQJyHIwYmc4cJY8/59Jy8mR48VRAo50LJ88CXeU98COlc3wY39XNMOP/xPuqM739mmOcT2649PiYiyfMAHrZ8zIOxKS1Ia+jZLKQ56czJAf5qvTy/IqAYidA1J1G14eBMiF5E/OLrZ1GlxYfv6zD1lgwcMGORrB9RgPj++PJ2ZlDxXkaAanX9Hh5ygIF5FzkfjT72bTLCNPcszH96xim6ZFXTx0kHzayWJ4mGFxNpCw+gyzAIhlFlxEOs3mHOrPYp+ckz0MsUoAEuyCVdKhg/XnqkmTbGSO7VTTm33g2z1OK4CJyr8u7ovSN+5DZd3HMaXpc2B++PM347tJlSiv84gFINNavIRPe7fFljmzY53qe6tPFIDKhmhOHYIgJSe4CPAq03sU6mN+QUUFunCb3jfeAH/F4X+4/IiG3x59d0j1HQopy/z3y2Py4aRHWE+lQzKi0qe86isqnKjkRIVXeT5KTNIljGyKik+adnkZpUN8Gpb8JIzHf1+s9JH6KynvZZPSaXjK+jr6tPR4rE8Lm4SpDks9wks2pJKRTZbrCmXDvORCumHZZHxVeSM+Hnymra3g9rc8m4PrLrjWY1XBHub8c40G13bYdKme0RSrnIAjA/Sowd09kHH5rZaOghgbSYlGRbiGhCMuhT+xnbQWdN4L4+rvb2tSuCMXDz98r+jXmF98Lpa9+zq2bPjG2jJ8VubVZqRhP/l8ElY8tS1/Gb/mmmtsyhSxdOzpsHPRueyQHwYgLGOgwICAu0xRhr88n3322bj66qttmg6/b3Tu+ev+xRdfjPnz55tO8unwE0uHkVjeHAGgo7zPPvuYM8zpVhwx4a/aHAngTT2nn366rfcghlNy+Ov93nvvbaMm9NOon5cCEE7DonNNJ5lOLUcjdthhh/gXfz4Lbd11113G545OHLmgPJ3jH/3oRxaEzJkzx/xAPsdZZ51lzjV/hVdb+rZlmlOC+DwMXDQViSM9bA+OOhGnAOSggw6yYIo2OVrANQ0Mqjhqzuek7Lvvvou99trLRjX4HGyP0tJSCzZoQ8/dr18/XHTRRXYeFbFc43HBBRdYwKARFU4JY4DAjQWmT58eT8FivWlLVM/GAIojDRx5Io9twJEljtLwUgDy4x//ON4JizxuWsCF6Q8++GA8msO2OO6443DFFVfgs88+s+fj/4cMNBiwsG60we3sDzzwQAuG+Wzs27feestGuxio8Nk4usa+YEDLIMxPJVO76Rk8tUpH/yQGICzzgO+T/u/Acl4e/wj5ss8pLq667W7wK6k5M6GjkpR3DlaOA+P4dGAsAJkxA0snjMdNpV1s6sv2ZY2xx/D6du9Z3AB7jmiAvUY0/IHeDcA61ipvgkxlU1zb4S0UNmuKmf364etx47Bu+vRtAYh79rhNorZTXlSOpfL5aI5jmKQvdDqdjNlx5VUCkMrsc+WeA1JogQO3yqVTz/M2OEXqmuZv2NQpni7OXbCubPgS7uMib26BG91MK6+0z3s5O6U8kpeMylXm854neVHJMe95lrfT1Ytt/cpB41pZP2ZsDUhDvNmwNka1b4+Phw+PA0qbZsepgsHNtuPNd1rT73zf5OtDBhxTm71g9/DnbsKi4UUofv5mrKoci7LaD2DpqIGY2eZ1fN6vcxx8xPrceyV7Rh3fZF0/C0s5YchTOocSp9vJUJ7P+QXPCmnd2n5F43/8H374YfxR9t+3/47vl9f/vy39f+217ZfKmvTtv9JeNdGfJvPfYZfb2C6d/hg+HXoW3i08xU4w58jHyLqHWPCxupcPPnaw9R3ZEYptU6oYdDBwsODBBRMxPyq34CSSywYaLhAR3gUw8RQurhXhFC+OuDAAsiBkNyzssqetC+FIyPg2R2NG9xPsVPYvRl+ENQt6/9P+T1r7J/E5n5+OG51CltNZpTPHX7clT6c1KQDhtBj+Uk+nmrILFiyw0QiOdsiJnDx5sjmglKUTKZ0cPeFIC6dPaXoOz1ejk3vsscfGU3EYGKicWI4+KACRLk7tYgDCERfxSBWAcH2ApvbQKeWz7LLLLigoKIjlWd8HHngAu+22m60pkR5OmeXie5ZxahP5bA8636w/gwLJhrRbt2444IADbNRbZZzuxfYiJU8BCEc15NyTzx+7OZrEURth27Rpg913392CFznXS5Yssb5im1AXZfksnGLGtmOeviunW3FUhIEVeWwP/pjOAITtLhtplFPS+CwMjNjPdPRFiVEAwqlwnCkkPawHR2g4SqXgh1PG+BwMBPVOUB9H0rhrGfuNz8dgk8Eag2LpY3DFenC0QzwGvQyw2FfipdGkb1B8EjpBurwCVsbfvswr9GnJ5MPJlnCiwnrKxmbE/1bt2jmHD3qHRc6KOSjOeRVfzorynlZXFgcg48fjprIu5vztObIRLuj0Fi7qVheX9GmMywe0xBWDW+PKIW1w5ZC2P5x7cGtcPqAFLi1sgh3Lso761e3fREGjhphWWIhFZWW2c1DsjAbOm2/PtHYi35f5tG/ntLTJh45o2IfOMa0agDS1PmEAcl2Hd2zR9gPj+kSjHyPsvA4GIk/OikYW5ow0HkcXsqMT22Qo90O8n5wz0gKqh8b3xYFlLaMAhAcRNsQb9WtjRNu2+GDIECytqMDqKVOwdtq0xJvBJke8OKLH4CTud9f+Vfovcuy/KRmC97o0wFdD+2D0q3/BstJizGrzJiY1/jsmN3kW66dNwvtdG2HhkF72PkiPpzlp9rGzq7SXqfL+ufcg8X1y76LXw+dcWF6ONk2aWBDSunXrKjuQhN8gfr/0HfJp8SQvKj5pkjz5aZfHKi0doqGdMB/aFc7bDdOyJeox4n2fOhPjddQU6zGyS5p2eZl/BSs9oY40u+QnYTy+JljpCOm/gqWu8BIvtJOWD/HMJ8lu2bIJaz7vgs+Kz8P7fX9jU5kqWh5pW+xOarxfNPKxi+1sZbtYaWF50qhHEHiAAYWCiagsJ+hI4ilQYZnKZSseDckGIVwXwi1/v+y0F0rqHminrk9sd4yN4Hw06HQsqrwRm6P1IEnPLl5N20rynhJLx5Fz//mrP6fq8JdqTgfiom7J0nFs3759lUXoPI6ATjLXYfCaN2+e/Rp97bXXmqPL95EOI51KTl2SPsrSAWaw8dvf/tam0pCnAIRBhhxTYUSvuuoqC0DolOpKC0D4yz5HP2688cb4e0AMRzX22GMPm4YkHQpAOBLh1xBwhIMBCKeosa10ccThqKOOsmCFPNXPUzrtHM3hr/W6WC5HnDwFIHS06YQLz/URXNfAaWm6OO3pJz/5CS699FLrJ/bVvffea+3L/uKIC/Fs2ylTpthUN07VYvDH4IP6GMzxoq2kAIR9yXUbutnHXPvMH8s43Y4jEuwfTuHSGVe0ycCGI0icQsepUeTxYltyKhmnb/G5eXEjJ47uUB+fQTf7as8997S1Iawn3w9Odbv99ttjGY6aMHhksKK2UgCifjMjKf8IQ6rvZbUBSBKI+sn3VGkvny9t4OCfNHktturUvDm4uJSOhX59J5WjIYdE5Z6vdBpO5ebwRE6MdPsA5Oayrub87TeqMW7s1gh/HtAW943uaWc+PDp1MB6bPtQcX079+SHcrBN3fbp/bC/sXN7M6n5dh7fQp0ljTC8qqhKAqA1zqHcSXWCgNiO1tkrqC/Fq4DiGbZ9Th8gubaUFIAf3fwfXtH3b1l1w/cQjE/vjsWmDfxD98K++C1yQzl2y7i/tjQNLW9goXKaiCfYaWR/1GjyLsW1exwd9W2BhcWcsKemOb8bo7oZvxnTFt2O6YtmYrlhR2hWrKofb+h8GIlWCEN/XLm3tzo0Xpk+1xefrpkzAltmzbPH5dxMrbFE6ZVjOBehMhwEF88YPA4/gHTFckmyo0+Xjd0V1dmWySfpVRQU6t2xp07E4t5VzVv1/SmnfoJAffLriD3Ioxw+tbpb5y+fTcOTrY+2xTAsfYoUJ7Uo+SU+SDvFCeZ+XjKjqqnwoG+YlJ+rrTF7aJXlP89n2ejxGaWFFQ/kwL5ynIZZlupT28knpUF550iT5kOflfTqUS8t7jNKhLJ9zw7JJWFR5Ez4ccBpmdj8RE9oebVOaJjTeD8t77omthbvablS2u1W0wDy7bmPbyEUcZETBRhxkuCDCgpEooNCoSBxgiK+AQ3kFLwxKLK1pWT+qMhIyr8PediZJadMjMKXjcZjb+2R8OuwsLJ/7CrZszP7qrudX/4qqfUIq+ZASJywdVTqwnPZCR5Vz+zllhlOwqgtAaI+OKWX56zqnCnFxOUdO+Ou9vmccZWAAwh9vVRdiWf6rX/3KAhZtA6sAhNvmCq+6CvvPBCDUJzwp17ekBSAMvvRLOm0rAOEv7j4A4XqW6gIQOvgcmeF6B1207y8FIBw10jNTZvTo0RY0MBDgRR4DCQY0XExPJ143AyoGDpyWxSlprCtHVLj4nGtNOOLC6U01CUAYiHLNi246/3Ts2Rb8f4pToFhXBp5cn6HF/D4A0bQ01ptYruHxAQiDF46AsA31DKTsF67zYQDD0SaOhPE5GPxKju8Tn1ejbmyXtAAkbGu1I/n+tgBEDC8UvnySIdWVlPZySlOX1xfiJZdGOWeRL5S231UQYQ6rc0zFj2nkjCgvBzcHJ3xEzZGRExM5RkkByP6jGtuhdXcN6WgnW9PBfGrOCJvP/8y7o+BvW0/geEl5ypPvcfl4XjZM+zynHj0+Y6jtyrRLRfNsANLxbQtAZoQBCJ/XtYNPW7u4MuXl3ClvTmYgl09PvjLqjMsVFOYJQPYdXBe/7fwWLu7bFFeO6IDryrrj+ooCXF/Z63/BXYDrxvXAVaM6Ye/SZtkApLwJ9hlRBwUtrsC3nQ/D2h6HYEPBIdhYcBA2FRyITQX7YVPBPthcsBc2F+xh93e9T8LS4kZYNr4Sa6ZOtfUiGgWxPtTfjBx4HwhEfw/qc/WN8tZfwkfvgMpE4/ck0iVMXO7s5pS5/s+R9XrceycZUdndMncuvigrQ9doTQinBHBOrf8+hd8hlYkmfb/IC3HMC6MyYUXJrynW60hKi0ca2iXPXz7vccIS73Wk1THEerwv83if9jaE9Tilfd2FT8KGPOIlH6aTZMXz9oTzelQvUmHE81hhRCUTUukIsT6fZEt6hFeeVDZFk2TIE19ywpoC915LjlOvlr//FuYPPxdzep+MyR1+gbKmR2BMvQPB9RVb+u62beSjynqPbQFIPK2KQYILHhg0+OAkzivQ8PIOJ31GoyBEeuJdtGxKVrRLVuHOWNdnd8xqsQ9K6h+CylZHgjtjvd/vN1hYejXWLR6FrVu2TV1iuyS1l28ntaGXE85Tzt+nI8jFytwNi84rRzE4KlFdAEI9HKXgqAkXjPOXev7iz2lQ/PVa/cSghk4zy4nhzYu/nvMXbzqmnCJEvgIQ/rrtnXGW6Vl8ACJdaSMgmoLFAET1Iaa6AISONuV41yQAkay3wTSnKzH4Ck85p7wuBSCcpuSf2Qcg0s+F/dRHJ50jGLwZRPJmmjYZPDG4YhsyQGSf8swUjpooAKG+tBEQ9h2nh/Fmv2jLYfKZZ6DBKXucPqXAkvYZnGkERAEI7SQFIDxLhu8dp5ep/p6yHVhvBjrcIYzrQ/S8emaWq13CAIR8fykv+ZDaGhACwoIwzwb2nRyWMy891clKzgDunySdbBBu08YFpLOHD982+iFHJ6LmYPi0c0bkfIiGTgn5MS/B4cobgAxlAFKEJ2YMswXHNo///dF2zsMz74/G0++NtnMiLJ3DL8Gdg9rjykYv4eI3nrZTtS98/hHcMaCdHTx3ce2/4p6SHnZiNmV4wB1P0qae3LvE5Uvw9PvMb+NxOtHjM4fhofH9UF0Akq99VPZP06T+cn0Ut7/j5dhiv0T9lDYCskNpI+wxqiH2HN0Ee5c2xz7jWmDf8pbYt+J/x83n2busOXYoz27BmylvjD2L66F+/UewoP1R2Nr9R9F2ldn/wOP/uHtksKX7dljS9SS83+tlfDx8ML4eV4ZVkydbAML2ZPtvmjkdy0tH2KjG18X9jHKa1fqpk21a1ZIR/bFp5gzbDWvBoJ5YM7Ec66ZMxPKyEVhdWYolIwfgy4HdTcfi4v5YPb4M300sz27LO3OGrRH5cmA3cPREoyPqY+v/8B3R33PI5zvieBYIBbKmN+DRBmX59/zluHFoHW3Ry/+k+OHWdyn8DqV99ySvT5hwafL6Roby0sPyEOvzHufTHuflVR/ZzZf3ZUxTj3TJVhINcWE+DUN+KKu87DKvy6erwwovbEipyz+f7HrqMTWxLZuhrPSIT+plmQ95woRUsiFN0peETXpmjw0xyqt+JsvRjxXTsXDcNfhowGl2nkZFq6Ns6tXclvtgY9FPo212f7ztPA9NhUoLFsjXHQUXOaMhPuCIAgsbDUnhxwFIjk63wxa37eWaEB5gWLQrVhRk14OMbXIEJrY/FnN6nYRPh52NZXNfweYN2fUCvs3VZmqfkHrZMC0spwZxzj53QKIML67Z4NSXtACEoxuS5ZoFOn90lumg0jkNp05x6jpHFbj7k9aF0A6DA04L4toKYngxAOFOSEkBiJ6B03846sIF4bo4usIgJzylnVOw+Cs6d03SMxOjAISjNryom3XjDll+BIR8BSDhFCyNgHC9g3TIhurKduGULm4lTMdZF9uJoxW8GCTQyU4LQLgTly7q4y5TDLjosPOiLdqlHqY5nY4LwTn6oYCGoxRcI8IAhGt9eNGBZ79zFyz2heosqmcxYQDt2rWz7YSZZxkXj7PefDa2XXUjIBxRYX148Z1g8MLAkIGvbJLqOZhmYMwdrtSvxJLP4Ij2VEeu/2DQzGliLNfl055HnLCUiadgyQCZ1d1SKOqNCRsaEl9U2CS7voJsJEafTerXN6fBnIjIkaCTYU5p5LTKiZGjYeVyVCKnRU5sLOPLIwfX9Did3ycAscXDDBLeY6DAgCAKQLjA2dLZIIEjEzxk7qYuDXBdm9o2jYsH1d3So4mlL6n9N9w5uCNOu+cWOzWbOzhxWpHpYFAT6YvzZnObDQUpVQKQiia4LmUERM5g3K5y4NQWCtRcMKC2Es1pc4dXuTmLamfpi/TLvsm4PpazKd3JAUh2W1pOS8redNLdHR1WaDyfpgzzoWyYD2U8zpcpLVpTOcoLE6Z9XeJ0E2TKm2DP4vp47Z1XUdL0Cazuup/tJOMDD/5Hzm0r57U7GaNavIixnTpg7sCBWDBmDFZMnGhrQRSAMDCY0/5tfN6vC0a+eLstJp/Tvg6mt3wF01u8jAkNnsFnRR0xq21tlL55PyrrPoHFI/phUqO/YXz9p/D18CJMaPA0GJyUvHwnprV4ER/3aoVPClrhox7NMaXJc7Z9L4MV9WXc5+xv977YSJr+Nkl9ucsTk/NOOTnT53SabJTnSMjnZWXxOSEMQvhrlf/2+DS/W8qH3yx9y/Rtk5zyngobUskkYUNePmwoK70hRnmVp+FYrsunQzzLknSEctIV8lWPkHr5mmLSdJAvHUl19TjJGSD6p6Z4j1HaY72dpDQxkvd48mpSb2E8rQlWdQnti5/VsQkrPnjbpinN7XMKJrU/1tZQjK2/v51svrWI6z52jHa74gGCcvyjxeZhABGNdtg3KyVAYZkCDqXjIMPr82tHHD9XNjpnhCMz3B2LJ6j33Q3z2u+NUfUOQnmLn2Nal+NtQfqCsZdi/bLJqf6Qb1+1mW+rbHttW0fm8/wlm1OEuEaCzj8dUfo5HJmgs8qpUfxlmw4jHXCuheAoB/0hBg10og877DDbTpW7LnFbV67/4HapXBxNW3Q6Ob2Lux5x6gyn1/BXce5uxUXcHIWhHJ1YbqnLHZO4ZoCL1LmIWu8aAxwe5MrpYtwal1vNchSFFwMKLkymHdaVDirLOF2HAQCnHxHLIIC2FIBwTYV+1eev7Nz5i4vQuf5CC7gVgHDRudZYUAcDEK6H4BkfdOpVT1HKsM4MphhocftctjfbmDtKde/e3XAcMWJ7M1BiH7C9idUIyPXXX2//J7C9WVfmOfWLC9LZjtTHmTmcOkUcR1RYL454cB0IAzXuIsY1Jtx1jIc2MvhgPblIngENf1yn48828/WnPt1cp8H+5QgL+5bb6jJQ5bbyfE7Wne3HtR0c9VKwqREQ1pl9wz6gDQZWrA9Hxrieh0ESdfE9o01erCs3OeAmAtzKmSNTbBcGswo2KMtphAxUuN6Iz68gS3pMWfSPnsfTnACEcr4wrUGSlIr3ffHCyTZtertsHC6i4rztb7ntWORAxM6K8oETK8empjTWRz2B01IlAKloingKVsIICIMOPwqRzYuXDUq4Tetl7zyHc5++x4IQBiQXv/EMrmjwoh2MxxO3ef7E+X9/0IIObv3KLV29XgtwLBDZNuKhwEO0SgBS2bRmAYhrA98e1p4sU7lPJ/VBdeVJmASe+ofUApCpU9G/fBgOLW+1LXiQ4+5pHABEgYkvC9Ny7slnIGM0Skdb/WZ5rqyKLOUdhjt0Sa/VJQ0b8aVP9nJ0uSCF5eVcA9IAbzd6G4NbNMPczvdiY/dd7T9rTWnQf+wrO++FWa2vwLh2r9nmA/NHjsQ3lZW2UJ2HU7JNN86Yjnc71kXFO4/ZVruV7zyO9zo3wOhX78I3o4fgk16tMKPVq5jW/CXbfpcByaJhfdDv0YtRWe9JO6RwWvMXsbi4H0peuQOTGv3d+HM7vgPyeXYI9S0ZOTAOKNSvfK+UFtXfbpj3spLR+5gkG8vQht4tvkdz5uDDkhILQmq/+ab9h8YPvP/++O+Z0v6b5dMqz0f1naOMv/JhVCb5JGxY5zDvMT7t65OESZJVPZKwoQ4vG6apu7o7xCjvcbQZ2g3rLRypx+ZLe4xPEyN7ol6Plw3T1WFZnnZ5G2lpj/W6JJ9UX5V5rE+rnHTzhsVYWHY5Phxwqu0cVc6F5/UOxsft9s6delXggg/tUuUDDPEYKGikwpeHAYSTD0dHLO+xCYGIApcsZRASbdHLk9OLdsa6wt1R0XB/jGl0GLggfW4frgU504KttDbzbeTTvr2I1e35dGo5esBfkLlLEZ1lBgHikdL3IaVDy0XDHIGg88tvFH9t5y5W/PWZzidv/irOIIJTdFRnjpQwOOCaAf6qzQXcdOg5lUijH2PHjrXFxsRzzQCDAzqdLGeduUUvnVPa4qgLp/vQ4eRF55lngPA5OIrBH3K4NTCfh1u06hwPBiG8+Is6AyauXaETzlEMTiXj7l+0z/rR0WX96fTzuSnPOrEuvDgKwUCJzvD48eMt0NLz+jamA866UTfrR5s8eI/BCHF0nvm8fC62DfnUw+CBbUvnm9u264cpBhQ8L4S6WMZF2dTBqXO0y4CPm5twahv745ZbbrGAS2s7+Jw6h4pBFwMp6uJIBvueIyu+/nxW5nkYI/uP7c51KXfeeacFPtTFduUBjWxntt8dd9wB9ifrwnL2CfuB2/4yiODFQJTvBNuV9vkc7C9uK0x7vBgosU4MuthGfEe5ZTQDLl9PrgfhjlnUwfeOu2zxkh7LRP/4Z1N/VVkDIrCEKahbPK885DEveVEv47GqnHihHPOM9hiA9O3YEd9Nn77N8ZUjEdEkxyOJJ0fFOyVh2uOYrhKAVOYPQOT856McwXiosq8dgvfrO67H3cO72CF0OijPtoht/DLO++v9VYIOr5dTvHw+G6AoICmxHZ2+1xQsBRbeWQvamu0VO3JJZU5H2LaWj8q9Dt/meTFRAPLdtGkorxiFW8Z2wTkjWuLUIY1x8oAG+FW/ujixb12c8L/85nOe3L8eLixqgA6tm2Jsp06Y1KM9Fna/Apu775ANQqL/xLNBSC1s6PYTLOlyHD7s8SQ+GNgHX44ZY9v2csesjdwVa9YsfFzQEoOfucZGP0jnFbazgwXf79IQU5r+wwIIjoiMffM+2/nKdsR65S6Mef1um4LFERDugsVtealr2LM3Yk6HOpjZ+nVMa/kyyus8anz+HVr/610R5fsUvB/xu+FGN2IZ4Rz18vauSp9sRjYox8MK3x0xAg3r1rVhcU6N0C9wL2+9PwAAIABJREFU/H4lfZPE0/cr/GaqPIl62RDv7fm09CRhVSZ5T5WmjLCiwoV58j1OWFOQ8I/0eOrxHhLqYt7Leh1Ke7xPq9zTJF0eo3QSJsRKNokSH8r7vDCUCy9vOy1NTHVY2vM2pSu0p7zKq6Ne3qeFW7dkBOYPPxvvFv3att0d2+RwjGu4P5Zx4TlHP3jeBqc30cHnYYF2cGC07sMHCQkBhn40yR2xcNh8eI6kqNzTKJ1blh2V2cogyUZBfmLB0yft97FgikHVjB4n4qOBp+GrimuxZUuuY6i2UPt4qrLqKDGc40/nnr/685doOnZ0DsnjL/4c5SCfB9HRKSblr88cuaDDydPJ+es3HW3e5DNY4S/u/htGPXT0+Ys/HUY6zJpGxHrwl2tOmZId/rLPX8z1jtEBJ4/lvCmrxeuUYRBC55X6+Ss9HV8+g+pMx52/jLNNOEJCXSxnPZhnOWV5UzfbgLJ0gqVD28uyvpyCpHr4H4z83wPTdMJZzk2MGFCR8lnF1zNLF0chaJf9wnYln4GCRkaEkz462wzwyFd/c/SBowfsU9aZ05U4kkNbDCg5YkFZYhg88IRzth3TXg9ldDEQZFvRF6Ys9bN/KU976hu2FduVz0g86833iM/B94gYXsTxnSCO7cJ3gv2pUSrKEM8810cyoGFQyH4KR2r4HtEGyxnUsT+Ft4T7hzr1TqmvEs8BoeAP4WYleXonA5BRvXpZIFDFOaVzIefDORZybmNnxDk2XgfLKRvfCbqqBiBNUkdA7MwGN0WKazKMF03FyuZH27kUl9Z51nY1Ovepe3BrQTPbEvaU26/D7158zAKH+8cU4MQbLrfzKphmwMJgw9aVaEqX2cpO8wrXm9BWlRGQijwjIHp216ZqF3Pk2IaujXPaUWWeSp/nKXhJKot4ZjNwFmWL/cX+4A5OX08cj2mjizG8X2/06dYRXdq3RPtWTdG2ZeP/9Xf7Vk3QuW0L9OncDuU9e2J2//54b9AgvFfUBt/0OAtbbD1INO0h+kWQ/wnTIVjffU98W3Ae5veviy9GDsWS8nJbD8ItepeOGYb3Oje09Rvvd2mEFeNKsKK8xAKPD3s0w+rxpVgwpACLR/QH80tLBluwwrUfn/XrhNntauOrYX3wcUErW//xQbfGdkDhirKR+KBbE8xu9xa+HTMsPstHfa3+jWlSsBG9e7FMkE/VJTm9c0433yfuBMb1ZfXr1LGdRjjXWh/jH8J38P/q8MP4/+j/+mEzlr/3Gj4ZfAZmFZxkO19x8fZsW/uxu6392Np7B2wt2A5betTCqva1sLhVBotbZrCyfQabu3EtWgZLW6ffy9tmsKlrNOWqexazrG0G6ztHgYh+VOmeAWW9rm/bZO0Qz28d7S1vlytD+W9a18I3bWrh23Y/wsbu3JqXa0F2wbIee2JcgwPAtSBTOx9nhxN+NuJcbFxZdZ7+f8W7ICeZDqacz9BB83mluXEG5/HzF23ixOc3i7/4c02DpuGonpShrOTFJw3LmPflkgnlJJPEJ8/fabLCJsmGdtN0iJ9G+cxqY9qRXrWFbHu8eKIqY97rE99TyZCSr7zseVnykvheRjr4DHoOX079/laZt5tkQ+WkwoRUMkl2JSsZ2hCvpjQOQFQJ0Zoq8HIeqyiI5f7y8j4trMcxauZx73XeegtTBg3KcVpygo7QwZWjQWfXpem40EnxVHrEN+fGYeTwbuBBhOPHQ9vw5puClR2R2DYK4adkacSCO1Xd1LUhrmj4Iq5s/LKdxE3czV0b2kJ0CybeLcEtBc3wh9eetulaD08cEIx2uAXuOWtCaDtrv0oAUs0ULN9ecujI83y1WewI+vYK+iKWycOX/hwbvu98MBItIua0oe+mTsW348fjs1Gj7AyM2QMGYEbfvv/f3LMYdAwejHkjRmDhmDH4qrQU80eOwEf9mmB1z6NsFGR1592woO3PsK7rTrazjI2G2Lzr7bChx974uvB2fDakIxaMKbG2XDNlMtZOnWKjIRtn8oDKbQvUtb3u5tmzjL951gzbitfyPPBw5gxbuL559kxsnjUTW+fOMcrteplWOXVWeYfc+6H3II3a369753xeGHvv3Hvj32WVGY3s8n2aNHAg6r/zjs215q9C+uVL3y//vWKa3yx9t0S9jHCivkz4kCdZUZbzCuVkT9SXS97rUDnlhfFpb8fjhRMVlnmlvR2lpUM4L+/TkicvvDxWadlUPglDnsrz0RCrvMeE9pSXDWFEPZZpyfu0ZEMdIdZjpMdjw3QS3vNCeW/fyyWlKbtlw9dYPOlO23p3ercTwHM/Rtc/CF902tvWUaD3TtjaKzv6Ma12LbxwbS08fXkGj/whg8cuzmDKGxkwSLjspAwevySDF6/N4NpTM7jlrAz+cU1W5r4LM/i4wbbRjPfqZnDjGRkMeGobz9aDdM+g9d0Z3HxmBleeksFzV2Xw0nUZPH91Bt0fzmBdpwwWtcjgwd9n9b58XdbOFSdn8PL1GTx7VS386ZxamFFnO3CrYB5QuL5wd0xvvi9GNzw0uxidW/IOPQtrPmsXNx3bRu2W1E7iSSYGuoRkQso+DvuZMv5inr9acy0GD1TlL9dcF0LKqU+chsMpSiGOOkJ7SXlvSxjxQnnVVVRySdRj/XOGWMl52+KRCiucymSTeX+p3GPFkw4v79OSq46GGOarw7DcX0n5fDqEDXGhbT1jqCsJH8oIS6q0l5EOUV+WL606ksYBCAHeiISkyBvxcqqcqORJpUNY5b2M0tLpZTmUxL36OT3i49Gjs6MUKc6wHAzvVDCdE1g4JyfJCfFY4ejQVBkBqWYNiKZEZReKZwOBnLSNYpSA6zp4EN4Ts4ZnR0neKwEPx8uu9chOrcpuozvMdtmy7XXjXbCkt+oULAU5rEeVACTPInQ9c0470NFzzl6Ogxe1p9re8JEs5XR7fanpwEaIjfORXluzwCHOadOwYtIkfFtZiSUVFVhcXo7F48b9/3GXl9s6juUTJtgIBne14kGE80YWY37Rc9jYYzd80+kgDGvwICY3vxpLOx1uIyOa4mC/EvbYBSt6nY4v+r2Cz0YW22jIysmTbYSJU7L47rPt9Q5Ymv2uv8Ogv03O8djfVTAR3t4F9XuKDY9PrIf+pp1N2fPUv5tVdEZ1XDN9Okb37o23a9e2xYEcNucvP/7S90pU3y1R8nUp7alwpOE3U7gk6nE+7e2KT3xSWjxS2RY+yabX47FhOpSTrlBOedlkXlhhPJW8pyHWy/u0xzAtnKiXDdNeXnqIE5Y8XT5NnuTzUWElr3wSRjZFQ4yw4ifpEM/LhmnJeCqb5PHauGImFo67Hu/1/Q2mdj4eZc1+hrL6B9i5HyjaNTuSULA9NnSthZvOqIW/XpHBp40ymFMng+tPz+CVP2bwSaMMTj8yg3EvZ/B+vQzOPjqDm87IYHrtDIqfzeBPZ2cwrfa2EZC6t2ZwzAEZ3H9hBqs7uOlYPTL4qkUGD/wug18enEHZS1k7jW7P4JeHZDDomQw+aZjBpb/KoPTFbJqBzi8OzODjhhnMfLsWbjyjFka9oGlYXIz+U3zUllvyHgyejj6z56/w8aDTsWzO88DW7C5CajffTkyzrcL2kmxIQ6zyNcFTltNgGjRoYGtCuI6Aawi4NoFpzhah35R0yU4+KhxlwivEqb6iobzPh1jmiRNW5SGGeZV5mg8nHV5e6RBHvrchrOcJm496nNKhvGyLSi6JhljmhVNZEo48leejNcXSZj67tJHPZogP7eYEIFQmY16wOiMex7TkvQ5fSdmQrMdLjpRzy7iDQYuGDbF04sR4BMQclyC4kLPhnQvJiXoHO07LAQr0eYerSgBS2RQ8iPDmHo1t+1yeus2D4p6YXWzBAwOIH8rNIId1e7CiCDuX6xyQlIMIFVC44IHtmnTHbeocR/FI1b6ivszSchjlQMq2o8Ja3/p+cnVi33AxMQOS/99vjQpZEDJ8IBb1uQ3Luh6NMa1fx7CWjVHW5mV82PFqrO+2W3aKVjQ1a0uP7bC+5/5Y1OdOfDq0NxaMGY1lEyZkF6jPnGnt6t+B+G/D9Z31UUo+LmO/6d1Q/wf9Gr8baXzhSSkjPeIn4Kzueq9U7rCySbnV06ZhQJcutu0395L3O37o2xR+v/xHljK6/HfQf+t8OtTlscJ7+bS0xylNmiZPvq8385IXXtiwjsqLSj6k+fAeK9seL2xNqHCSZZ5p2RDf5yUjrKhkq6OSD/Wk4Wg7tO+xwoVy5Hteml3pkh5PZVdYlkne8zyG6dDuusUj8OWYSzG3z68xueMvUNrkcFQ23A8bi/YAinbBVi7oLtgOXzSrhWMPzKDrQ9kpVxu7ZlDxcgbjX83g65YZNLw9A/I4reqC4zK467zsyAinS/V6NINPG2YDDY5g3HZ2dnTjlCOyQYqt5YjWdWzokkHtGzM48dAMPmqQwZZuGcxrnMFJh2Vwz/kZLGyeQau/ZLCuc7Yed1+QwfEHR1PBumVQ+EQtzHj7R9lRmz47An13w8Iue2Fs/YNQ0fIo22KYBy0unnI/tm7Kbt2qdlP7+DYL24tlSVcS1usRTtTbFJbz+TmHnwuUuTaA6xK4iJrrF4QTVR2EJU27ZUsY0TR5z0/Cslx8vYfChHnZEpUcaSirMvGFCank8lFiWO4vyVO/bIhHKp7HKC1dXi4JKznhSMXz8krLpmS8fKjDy3q8+MJ6XZ4nTBqVPY9Pk/V82RC+2gDEg8O0HibkK++Nkac8cdVhKc8/Kp7+2LF5c1ssWsWJdQ5PTpmcDDklTi52huTAuLIcHc5hqRKAVDTBHiWNcV6fhrhoUAtcVtIBV5V3xzUTCnDNxN649gd0s05XVXTH5aM7Ycdx2d2ZrmlfG70aN8K08CDCwLE35z5w7Mkzh861sW83ta+oL/u+aa/Dp2uqRwGM5H1e+kjjtN6X6NnS5KUvjUpfYrl0yxlnPmxLJyMdsU7Jepkozb5hEMIDBjka9EVxbywqegiz+rTBlD59MKV3b0zq2RnTu7yAr7qehQ3ddo0XbnJq1uYeO2F5wWn4rP/r+GzEkHhtyHqOBPjREPc3E9cr4e/Jl+k5jOo9C/TE8npGVy6c2iqWVZ9JNg3LcldGfFL/UmbV1KkoaNvWgpBmzZrZgszv+/3SBzb8FiZ9+/Qt1PfRY0I9kpWMp5KVHuW9TL60cJJRXfNRyYrKZqhL5UnUY5SWnGwr76lkZSvMUzatvULZMC87NcF7+8Llo96WT+fD+DKP8Wkvk5SmLPm6wrQw/pmVJua7L3vis5EX2OGDPC+DO0ZNa7Yvtvb7KbYW7pxd0N1zOyxvXwsnHJoBp1N90TQbGGzqts3x/65jNsBgAMEA5C/nZcB1HgwuGJRw7QbTg57O2CjKlDczOGKfDFrdvW19CEdxGcS8dVM2AOGoBnmfN83g5MMzeOqyrJ61naKpW90zuCcKQBiomK0utbCxm05HZwCyqy2mL61/AMY153a8v8QH/U7Fogm3Y/PaeWo2o2wrto3aR20nKmHmdSktmRDr88KElNh8dr2NfFhvSzo91qelR3IhlvwkeeFIJZNGJSs9Pp+G8XzJh9TLpKVDjPJp8uSrDSSbRIWXrPKixDAdXioX9XilhZWM8qLiS555nw5thrhQ3udrgpX9kApLPi8LQHzFlPaCEiaPacnkS3uMcORVd0uWUx+4pzG3/erfuXPsJHongk5J6ETkOBbOuZQDJMfFcIETEjs5cmYip4qOHRc9LxlfiZvGdkZmXGPUKmuMH49tiB3HNsJOYxtjp7Im0d0UO5X9kG7WqzF2Km2MTFkjZMY1wtVt30BBo4a2Heui0lL7pZujB3xO3ja9adYsOyOCC5PXTZuA9dMnWz7JEVWb5rSxc/bCfpJcPr6VSUdE1dfeHnnim15hQn7ogAZ51SnWTT1Ol0/HMgk24jJXZ9UvLoveL/FlO5H6OggXvbdeH9O82XcbZs7E6ilTsLSiHF+V9MdnI4eC2+7OHzECHw8fjtn9+2J6QUt81PVOrOm2f7xjFv9T5mjI2p6H4qvCe200hOtKOMVt3YwZOUGI7P2QKNvP2tS1GesXtmvMC8uiPMt5UGGnFi0sCOGe9pxnrW+Tvn+i/psmmfD75/lJOOnwckqLSiYJT5m0i7gkTMinDG8uJuT3lzd3ouHi1vAmnzdlKJ+0yFH1De14PtNp1z+Dk76aYL2s6kCex6pNxPeUGOZ1+bJ8acmHlJi0fvL6iGM+vLxMUjoJJz1ePq0Oq+a1t9PPZxecjAltj8HoBodhTst9gH7ZwwdtLUXP7AL0en/K4Kj9M7jpzAx6PMKF31Eg4BaRhwGIdsHijyEMUl6/IYNuD2WwpHV2mhancXHhuU0h7b4tAOGUK07z0ojHWUdnR1sox2+ayfcIAhDtwtWzVnbXLluIvitW994TZQ0OsOllnGbGU9G/qrgZG1e9a80dtldaW/n2VD95nsclpYUJ+4w6JE+qtNfNdNIlbBLO6wltSlc+vGyKeoz0JdmlfIgRVpTlvn7CeCpZUen0Mkx7PUoT4+XCvOS8jHiSlV3lJUs5yYon6mWVlh7KpOGEF/VY8Tz1epSWneqwXk+Y9lilQ5kwL7viZ1QhMUQlKEo+L5V7GuoQRvLKe4zSaVhuV8a9phmAlPftuy0AiZyw2KkInYiwnE4JZdLk0vjUEzl1FoBMn44llZW4saQjMiX1kBnxDjLD3kZmyFv/Wfewt3Bdm9dR2KypLdZmAMJfzLkNK38952L7ddOmYc2UiVg1fiTWjXoBa4sfx6rxw23RN51bOrlhm8ZOnWv/HJ7rg5gftX1OPuwPl4/lnA17D6Q76q/43VDfu76UfKLz72xRzuw53cJ6/San+iTJBjpjrPgeEwUWSe+qnl00lpGeqL4sVxDCfl0xcQKWjR9vU6o4rYpnfywcOxYfDx+GWX17YW7357Ck6ym2da/+s97aoxY29vgpvu31O8wb2BwLRo/G8okT4ylZDEJ/KDeflTf/RvnsunP6Sm0UtHVO37l3xNpwzhxbd9a0fn3bnnf48OE5h15V9/0Kv3n6Fup7Jyo9pOHleUx7jE9LR4hXXuVJGPJ4+2CDi++5AciqVauxYuUqLFu+At8uWx7fy5Ytx/IVK2xbR8rxW83tGBmkKCCRLVHVIaSqYxINZcO8x7DMX6FsmPeyYZqyapcQp3yIYV5leuaQqjwJ6/GUS7MvLGXCS/qFlX3xZcNT6ZCMqMdm9W3C8o+a4ZOhZ2FGj5NQ2foYjKp/KN5vvU92BKTPT2wEZCtPGe9ZyxabM3i45MQMjj4ggzvOzWDmW9mpUAoIkgIQBh8MROY3zo6gTH4jG4w0+XMGB++ZARelK7DQCMg+u2ancV1xSgbXnZrB4L9mg5P4fJEo2PAjIKoDXADCaWRrC/ewnbDKmv4MUzodb+tdFpbfgA3Lp1lTsX3UfmorUt9eklHbhlS4ECN+PryXUTrUE9pLqq+wIU3CihfKhnnJJdFQNswnYcQLZZPykg2pZMM2Ep807fIySod60rDkewxxNcGqPh7LdD6sMKqLsNVRX8ckbFKdvS2lPWU6rCt5/pYt0ipTsLzgP5P2D/V98R7LhVY8sZIByJzi4m2/csuZkNMXUToNsXMXlOXj0xE1bOSchI4pyxSALB5fiQtL2+PHZY2xQ2ljbD+2EbYf2xDbj2mI7X7gN+todR3bCKf2r4/erVtgRlFR9kTsSZMsuLCgo3IwVo1uhLWDbsKm3odjY8GBWDzwWSwcW7LtzAg3HYftE7df0O45bZnQP2G56Ql0JPVdjpzX69PqV+mL+jfU53UpLeplY15kI6y7ZMmPZWk7X50omyLj+dQX65S+kEZ2JUunnAElR+78zRGtNVOm2I5XDETeHzwI03u1xUddb8eabvvZNr36z5nniXze7SrM7leAz0eNsmldDERWTZqE1ZMn/4/ffA4GWWunT7fAucronNpI74Drm7g9VRbJGp/p6O9+bnEx3nn7bQtCuAc7Hey075r/fqXJ/BD4+o+FoxYcxWDwsHrNGixZtgrzv16B2Z8vR8VHyzB87nIUzViBgmkr0W3KCvSYugK9py3HgBnfomTuEkz68GvMmfc15i1YjMVLltrBYZybzr3vGYxQt0ZGkv5T+k9pr//KPvtXnvnfhfXvB9939uW6dWuxeG5jvD/gDEzpeiLKWh6N4rqH4t1W+2BL359ia5+f2G5Sdv4Hnfoe2elSHPngug6u02BwwG159X2pEoBoVKJ7BoWPZ3DWURnbRev5azL4y/kZ7LdbBm/csA2vAOS4gzKofCWD127I4NC9Mxj4dO7Ih+zlC0C28vySMADpvC0AWb98Wurf/X/l+/Gv6vp3vSNhPf8Vu6Guf2f+X6n3/xT239k+oa3/6me2AIRK/RUaZd7/ByJZ8nUp7bHCiPoy4pT3afF4+E379u0tAJlfWhpvnZvj+EXOg3ik5oDJqUihsfOhX0ydnC+jE8I8AxA6bgxAzhzXLvdka39SdXzidXD6dQ4/OpE75unUbE+Z1p2kK4kneU8lJ5otO3ZgfXRs0hATevbEJyNGYPG4MVgxtgPWDH8K6/tfiC299sHWHjuCv4Qv7n4OZhd1wodDh2JRWRlWcjrO9Onm3Ppfn/0v0HLG81FrZ+cgxu3u+oL4NH6S7u8jm4SXvVCP8mnUdLlnkR7ZIE53WBbLuHc3Dj6EU5sENnxwE+pnnn1iC/Q9jQIT9iEDCfbpR8OGYnjvjmjW8W6M6XQk1nTf3qYufNhpP9Rt/QBe79YcDQd2RYtRfdCmtD/alQ1Eu3H/k/cgtBs3CO3LB6FfxXB8Nj57jgkDLZ7pwef2bWNtrjZ07xTbyPeH8nGf8KDCWbMwsqAAb9WujXr16sWL0vXNCqm+X/rmkepWGTG6xEuj0sNyXUp7Wh2eWMpQnwIPjnSsXLUanyxahbKPVqJw5mq0mbgGdcatx+tj1+O1sevxypgN2Xt0RMdswKtj1uO1MetRu3QdGpevRueJyzBk5mJMfH8h5n2+wA7/4g9IHBnxoyKqo38O8dRGnqpM8noGUZWTJuFU7uV9WuXEhmnK8RJfaVHxPVY8yYiS79OSE1XdJWfC0T/iSTakHqsy4X1eadIQ45+Baf9+2EjYN19g5ReVWFL5OOb1PhazOx6BkiZHYsjbh2BG832w2QKQnbPb2fbcDqs61rIdrTiiyvu7Thlwd6rD9s5usaupVmEAEm8R3j27fS632Z1dJ3tPfTODG07PBiXcDYt6FYAwuOEaEO6qdcHx2XUlPHckHtGN6pETgEQbcOSOgGSnYPEsEO7wlZ2CdSoWVd6CjSvnxn8/aq+wndm2vHxbK602F1ayoQ6Pl6yw0iWMpyoLqdcR2pQt6fFY2fTU45X2mDAd2hbG21MdhA3tiS+s8iFOfE9l3/M8zqdDu8JSxl/UpUt6hfV5pSkrjKh4kiGVPVFf5uV9WjIe49OU9RfleXmclxffy0leerxMvrRw0uXzHmcnoftCn6YgK6hKinqllOfllfq0x4gv+Sxy278qJ+VWcs2bN7cAxHbACpwvBR2hoyHnIYcG2Jwy75i4tGTkyNFh+7qyAmcwAKlsavfOI+tj55ENsAvvUQ2xa8kP82bddh7VEJmKJlbvI/vVQdO338Do9i3xQe/X8G3h1Vjf+xfY3HM3bO2xXfwL1cZuP0Z5m79jbOdOmF5YiE9HjrRtbrkmgL88c6oWnT5Oy+Kvz3kDkur6IKE8dAjVJ1VohK1W3tnwTr70Ce+pT1NO+fi9o07pDeqhgNj0B2XCUF+oM8xL1tuXTJoN6U2iFphwrciMGbZ979dlZWg3pACH9XgJJ7a/Gy93OAOfd90Fj7Y+H/u3fAL7dn8Z+w96BweMaowDS5vhwHEtcGB5y//x+6CKlvhdWQeMHjUEX48bZ+tebIogAxD+Lbt+Cfvb2oX9qb951z9x20Z9s2zyZPTp0MG2uOQhYDqpd9uXK5vy36+0NCVZpkvpNHny9Q0VtjoM5XV7veRpxGPV6jX4cOFKDJyzBi0nrkXt0vV4ecwGvDR6A14cvREvlmyM6QslWd4L4qvMZDfg5dEb8PqYdWhQtgqdJnyLcbO/wCfzPrNTgrkrD0dE/GhITeqvequdQqryfFTtJZrPrtpY1NvzOK/L2xZOssIn5UOcx4by0lOdXenw8j7tbfq0cOQp8OCI2JpVK7B6fjHWTn4eG0quxMYhp2ND0UFY12NHLOq4G4bW/xn6v3kIxjfYBxuLdseWwmwAgp7bYfKbtXDRCdltchls8PDBLg9mcNzBGcxrtG0EgwHI+cdlcOdvM1jWZhv/8yYZ/P6XGSxqmV1IzkXpvNvfm12MPvaFrCwDkNo3ZXDCIdnAhvra3pPBAbtn0Pyu7OJ3BTukHEWxXbB40KGtDamVPbGdJ7dzBCRchN7VLUL/7pMqPo5vX5/27au02lnUy4dpYdJoKO/zIUb2xJcs8/5SuWiIE99jfFrloml4loeXMKLCkiotXEipSzhPQ1xoU3mPSUpLTnZC6jGyKcqytMvjwrTwIVb6QnnlifNYL+91ST6kHuvlfToJI5zseXmfDrHMWwDigUlC5IUPJ8VenjzJen7Ik5x0KO8xX375pf3i+Mbrr2P9zJlVplfJSTBHwzuGcij+Bep1M02HzQKQivKcAOSiFq/i0rZv4qqu9XFdn2a4oV9L3NC/NW4Y0OaHc/dvhT8WNsflvRphh3HZAOTn/eqg8Uu3YewbR2Je012wusOP4p1HNFxNylOz13TdG990OQZfdz8fiwrvwKJBL+Lr4pZYMroAS0sH45vysVg2YbyNjHDh83fTpllb0bnlr8cKTNiGvNmeutV3nqosiXo5n6Ys86K+LCcthzTl3RBelA5snCYmxCsv+5FeYpLuuI6UF0Y2RBOcYsnG+OBZ4zo6nTl1j/i+LYhhf/Bva8XEiWg7ui92Gvw2Mn1exo5yUKpYAAAgAElEQVTd/oZjO92DHTs8iUzX55Dp+woyxXWQGdvANmBQIGuUI3UWkLtRtihAt1E8phn4xnckL1wSPwqUFejH1PiRrsjGL0qao6CwGz4aOtSmlXE6FgNhtb9/5tS0ax+PU7uSLp0wAV1btbIgpGfPnrYoXd+rpO+XyvTt00fayyot6jFJacrxYpkuLycb5Ol7LcqpNHQsV6xag4++Wok+M9dkg4444IgCj9EbwYDjhVHr8MLINXiheCVeGL4CLwxfnqXMj1iNF0atNbk4UKEeC0yywUiL8mUYNX0+Pvxkvv2YxBERjriEgYivf1LaP6fSpEmyIU9yniodyvq2Y5nkLBH8E2KZ9/hAPCebhPW8HGGX8TJhWn1MftrFMl9H6RBW78eaZQv+H3vvAV5HdW2PO4V0XkIaeSmkkZCXSgghkEJCQiCEhN4JvfeSkAA2xbh3W+5FtmV1WZIly1XFtmRJ7r0XwMYGYxv33rT+39p31vW+R3Nlk7z3krzff75vtE/Za5+Zc0dzz767HOxdlYdDlRfjWNFpaMr7QOIHKRfMvTXzgyhpfwYKXvoiKjt+BnsKPoGjo4+n4d004D245PvvwaOXJPbgGPNUwnrR945ESlx+tzDrFffu+PrprfDzbyb27WDg+ca+rfDkZa1wzlcSblW0npCfrlxdbm6Fz5+WiCehxYMbG9Kt64untcLIBxIyaR154tJEJixaUHYObQVmwqp7oRUu+nYrfP2zrVD8RCLtL3IjBYSbJxZ9yNLwbhz5KUyJ0vAuyP4eVpedhy1Mw3t4R7O1zbuda/8ZpMOy3fPpc/L0RFjPG5aFVbvqGlftouEz4/lVFm9ITwarcUNeyVK7xhJ/WBefcJ563rBMvpPBSp7Hx2FDWZ7flyUvpB4f8qseYlQ/GSxliN9TYoXXOHGUGPF5vMrCsB4ecdi0FpBwkLAeJ5xtupCQX+1xFyZZukBiX331VdvRs3vnzjiiXzSDhVfKgsIvvtwiTosI8abU3cIjpd+1k7+ZAhItkK4c3BE35vTGHWMzcf/UAjzUUIJHZozBIzP/Z857q3Jsl/SHphfbGA/UFuJPJYNw/7SCtGM+3FgC45uchQ9MTyz+vj6mC3q1fRqVXS7Fmr6nY0/me44rIO6LxpSQXP6C9V4cyznF0rQezfsYDud/FvuLfoDdJb/HjrJ78M74Ntg6OcOUki211RbozEUtN8bjbuVU3uKsJJxbnVowa76lsHgq3n916q9Z5fCa+byxLUndM5dsS6MoeYUk+dw65UdtohonrLOdCiI/p6HTyvCh6h6JxArl7fCe4pfQavRLaFXaFq3Gd0wkXqjtiVbTeyctgKYYyFUwVumIlA8pISGPtUuBiVFMUvgTynNizKhc3wdfn9wHg0cMtIQKtILweeM9pbtnPWcpc+E/h+hzCfHHli7FumnT0LNLF0sNzt2IuWjTO09U7zD/vgvL4jVw8M4M35u+HodTW7pxidficvP2PahZtQd9Gw/g5SmHEgoErRrR2bpyP54ftwXPlbyK5wqW4vnc+Xh+1Ay0yWpAm6x6vDCqES/kzEabvPloU7QMbUpfQ+uKt00hoQxaSGQtoSWlw5S9yGt8E7MWr8G69estRkRuWYoNibtuXrPuW3Ok+1Q9DhfXJn5R8ujUGKp7Go4nvNrjsGoTL+XpUJlUfH48tUm++D1e/OJNVxfGU/KGOLXp+di3eRkONj6Go6VnoCnvFLMSJN2YouBwukltzfwAil/5EvJe/BLGtfssNo+iAvIxWBxF/vvt+2J+x/eYFaLzTQn3q8l/S2SwkjwqECMfbAX2M2MWU+xuHdgK89q3Qu/bWoEbEDIORNmzuM/HgLsS/NxHhPuK5D7cCj1uTbQRz00OKf/t/q0w4oGE1YX7jnD3dda735JQYrg3yJoejFN5D5ryop3QGcMSbURY04MbEX4Ti/J+gDUVP7GNCJuOHW2mGGh+OY88PGU5PMP593jxSg7rnp9l1cUTUskQn+qeakxR9unwfL4cjk3+OFy6cdWucTzVOOJRnTSuTdhw/DheL0s40hAbN5bkeVxYlnzxpquH4+ka0o0reX68UIZ4NKao2uOwkiFeT4XzbZ5f8sL+sC6+kIZ8qqcoIAKpk9RfmC+zj4d4fVl8ouIR1TiioSzili5dau5XA3v3Nj/20FVCC4iWFmNJHrfY820qh4sNtZOyj4vIZhaQ+gxcM7wrbh09APdW5+LR2WPxJHc1XzgRv+/6HH7d5nH8sfdLuCKjLZ5eUvnfct5Xk4eLnn8E90/JN3mPzamw+m2lg9PKf3LxJDw2twJ31xXig/V9Ey5YVEA6vIyxfbtg5tA/Y8OIc3Ek+z328pYfLpUPnZaZhBaRqC3B81405Z6CY7kfxJG803Cw8OvYP/pc7Cm5FNvHPozNE3vhrZoK25Gcu5RTIaGFhDuX022LWbdC6wjnmb9es53WE8vMxexc/xfOyBrEe+QzlXzmuPiV8hEshH27Ydxz7PH631BbSO15DsbQQjxFAantjVZTeqBVVVd8vOBlnDH4OXxlWBt8Lbstvp7fAWcWdcKZozvjzOIu/4Szs439leLO+FDNcXfCr07siV6Delk8E4PqGZguBSScB9Z9W9Ityyl6IY/eBeRlPE19SUkyHoQbpYbvLr3jTvbd59+BxMbh1ObfsSczLnFcXNLy8OY7u83q0bH2YNLNypQFWjsq9+G50WvwbNZMPD+sBm2GTMSLwybilRGV6DBqCjrl1KJLXh265tejS349OudNt7b22bVom9OAl/Ln4YXStWgzaSfaUF5SqTmEl2v2o1/tVtTMWWk/KjG2b8+ePRYb0pISonnU/IRU/enmTPMT4vwcal69LJXjcGrTmOnw4oujkp+OxmHUlg7j28Uryj4enseX9Xzs3bICh6dej6aCj1nsn3ddYjKKd3K+ivW552LF4M9jVu/PoLDdV1DQ9ssof+VzWDv0kzhafCqaaEWgNSHvfTia8x7sH/Ee7B2eyGJ1OOt4TAa/V46MSlgmaJ3Qyf056Falum0gmJ34HuI+ImonpbsV+30beXjdVEK4n4jwlOv5WOb3XSL+431A4QdsH5NDoz+BBf0+g5qeXwL3OVlSeDZeG38B9rw2OHb+NMeeam7TPRvq95iwLJ6WaIhRXZh044svjgrbEo3Dsa0ljPrEF8pQf0s0xPh6Szj2tXScLDaUcyKc+tONrf6WqMeSTwfL6T5fySOvxwirdvHFUc8bluP42abrCfl93WPF/64UEAmgUJY9VVk8GkD1kBo4+CMeYhsbG00ByR082BQALZikFCQXXW5RllwscKERtCcXHvqlOE1/Ur6TEauANGTgmhHdzAJx35Q8PDZ3HJ5cUomnl1WZ4vHzJ+42i8i1w7rg7olZuLmgL27I7gUqC08tmYy7J2SZ9eT+KQWmINyY3cuUGSoLN2b3xs35fXFtZlfcNWEErh/R3ZSch2eMwQWP3G5KzU25ffDEggm48C/3G++jc8baGNa+cAL+vLzazqeWVuHxBRNwT0NxUgE5c0wX9O74CsYPHIiG3GzMzR+EVaPuxq7sr+BozvuTisfh7PdjW+ansXvkJ7Bv1MdwKPuDOJrz3uPBfSkKSUJBYeD6sdwP40jeJ3Go4AzsGX0RtpU9hLcn9Mbb1QXYMm083qmvs7SuXiGhy9be+fOxdd4cbJozE2/OmoENMxuxYUbDv/25cVYjGEdA5cunMU4+k17xYNmffF6lPOjZdf0mI1pYqxyLT4MxBWTWLAyhBYQWMlo4anvhqyNexG86/AWX9myDPwxsjyszu+Dqkd1wzcju/5Tz6pHdcdXwrrg0sxM+Naln0grz5fE90CWjK+pGjcKGmhpTcqWAaN40zynzImUkmlvP09I87p03D+Oys9GhfXsMHjwYGzduTPnyfTfvPb7vdLDssb6s96J4RYX3WOJ0cnG5d+8+LNu4G0Nn77dYjTY1CcsHlY/nJ+zAc4Ur8OzQSrQZPA5th01Ex+wp6FE0Axll89B/3BIMnrwCQ6tWY1j1WmROeRWZNa9iaPUaDKlchYETl6FfxSL0Lp2DroUz0D5vBl4uWREpIgeTLlkcs0PNHoxtXImly1firbfeshS+dAnjNcbda7p79vcunnRUvCHVfJ3suOT3R7rx1C5ej1OZVJ9P3PhxWN+mMdJR8YZU/BqTlAqgZUDbvBKHpt6QsHpElg5+D2zLOQOLc69FVX57TCgegsljczF++K0o7fFtjOl2Fsq7fA1jO37RNiM8OPrjaBr9YVvMczd0c2+ihcF9R0g5CH/YStZlfSd1ZVlNyGflqC/ZHtajMZP80T0dHz9h/bCMXVH8R1NxYhPCup6fQ23GVzB35HewouRHWF/5CxzZtTDlf1xzGc6x6urX56y6p+Rl3dMQz359Xumwwoh6vriy+EjZryPkTXft4g9piE9XD3Gsk1f3KRriPY59/vC8cXjxhji2e2xcWTyeennExI0pWeINqfpPREMc6x7DsePGF5+nkiV8HI596Y4T4TzWlyWPbeGYyRgQAUjjGNUuPgqNK3s+lcNB/QVJjnhJyU8XB6bgLc/KSiggbvEl5cOUhZh29WtRobqUixSc8BFlnxYu4n9XCsjyalw9uBN+fM+NuKMi05SLm/MyrH7N0M74yYO34t7qPJx71w24Zkgn3FLUH5d2eAa/efFx/OzJu3Fjbh/85oXH8Ys/34uLWj+K8x++DZd1fQ4Xt30KD9QV4dy7b8DlPdrgF3+5Dzdk9cAv//qgKSBXD+pgbT99/E5cP7wb/ryixs50CkhGp/aoHDoU84uLsWzsWCwqzsfC3PbYMOpiHMj5uH1xHBz1AZR0vA/juj+A6X1vw6LBV2Jt5i/x5ogfYFvWl7GPpvcoY5J9iURfHP5Lpyn3/TiW+xEcyfsE9hV+D7tKrsA7Y5/G5kn98faUcmyeXmd7VNBCMr2xBn+bPhpP1BXgsbp8PFqbj0f+zc9HeR91BaipnWSuafyFnhYdWUL0vOlZ1bOnuijbVRY1bPS86hlPUiktolS4g2edclIUEFrIopiMM7Pb4fIeL+DaoV1wS34/3D5mCO4cm4k7K4b/c86xmXYN147uh89WRW5g9Rk4Y3x3dOzdGdNGjsQb1dWpCkjwI4PmS/MnKsVEdc2T/v+t3Sl5mxoaMHLAAHTo0AElJSVgfIN/f+kd5qn69e4TZXvcOzAO62XElYXh+5MLewZ/L92w24LMGSiedJOqOoDnStfj2cypaD2wAm0zJ6JLfh36jJlrSgUVjazpG5Ez423kz96K/DnvoHDedhTN22FpeAvmbre23Fmbkd3wFkbUrseQqlXoP24xepTMQYfR8/HS2PVoU7nPXLJaR8HqHWt2oWT6KixassziQpiylwvgv8cSonvVPMTV1efn15f1vSQqflJ/+LrnUTnEe6zGExWGNMSxraUjxMThvXyNKSp+Up6Mx9m74y3sn/k0jhWeaj888d19OOeDWJtzISbldcLY0jxMmjgBTEE9c+ZMzJ46EFMyz8GUwd9FTb+zMKn7l8Gdw7fnfhJcxKPog2gyK8h7k0qIFIxYqmxUUjgiS7u+Q0yJkPVdioYUCk+ldDg5wopSYUkoLVSM3oOm/PehqfAUYPSHLZXw2mGfRnW3L6B+wJlYmPN9rC4/D5vqr8Kxo/vTflb+M1NZn5PmWXVSnfpMVCcN24Qn9XziDflV97gQa4OcYO0WYvzYcXiNS76WsOF1e1knwrWEDceNk+XH8mVidQonqnZSHqIhPo4/Dqs2yVI9Hd7zaUzf5vFxMsSbDkuMcKKSKSzr/lC/aIiL4/eyhBP9b98HRIL/EcqbKisrMwWkurAQ9L/WwswWBU5J0KLBFhdusaXFRhxNwWhRJqxb6NlY6Vyw0llAIgWEisIdY4eBLlC0evzybw/hvin5+PlT9+D28mH4fffW+NkTd+GGUT3xvesvx+U92+Dil5/CraMHmoJBpeOGkT3wy2ceMCvLr5592PA/e/xOk0vlhUoLFRBaPaisUPbvOv/NLCYtKyBd0a9LB0wZPhzLKyrwRk0N1lVVYXlFOeYXDMWKUfdh+6hvmnLR2O9GlPbphQmD+lnWrGmZ/VGX2R0zM5/HvKEPYvHQm7Bm2EXYNOK/sDvr0+b/G/dFwy+Uptz34lguY0k+hv0FZ2F38W+wrew+bJrQF+srx6Hf5EJ8qqonPjSlp+3c/qG6DHyovu//ibPHxDy8VllpSgjd+ehqJiVEz2M6mvIMR89pyGs8eoajZ9qeX5XVJxotzpspIFG8xjfzO+KqgR1wc34/3DUxy2KcHpw+Gg/VF/9TTo7NOKubJ4/A56bSlTAR9E4FpFM6BUT3GlHOGZULO12bnzub16AvnGt+bquqqtC5Y0d06dIF9fX1tuD/R955J8K29BIPsXKreX3zbgyauT/hcmWuUYfQevJePFuwFM8OmoAXh1SgU/YUs2AMnrzSFAkqHYVzt2P0wt0oXbwPY5bux5ilB1G2jOeh6DyIMUsPoHTJPpQs2oMi7hUyeyuyG940K0m/cUvQvWQ+2pWuRJvJe5LZtOie1al6J0rrlmLZsuWWJYvuWApMD+/jH6m/m/kKx/lnYcPr+J+q8/tVz8i+FaNwtPj0pKXiSM4pWJFzMcYW9Me4inJTPObNm4cVK1bgtddew4bXF2HJ2IsxN+8czBj+HUzr+zVUdv0C1gzlfiCnxlpBkt8HgXLgrRxWVn9SUUi1hCTlRMqGKSnCSAHxlH3q92WL/XhvYgf0osT+H3S/qu95Omp6nYEZQ+l+9UO8Ov4C7FrZMblA/e/6PP6vP19x8/TPuud/ZNy4+/jfavtnXff/9rjNFBCv0fBieGjSfVltpB7j2xPoxF+2x+GJ9XjxFBUVoW3btqm7oLtfIfUrpRYUttjiwkE8gWJh/FqAeB4tNhy/FhySGWsBqe+T1gWLcR+/ePpe0GXqDz1fwK1FA3DhM/dbsPiFf7kPtIhcPbgjruz3Ci7r8qwpIlf1b2cB5g83luJ3nf5mVg8qFhc+8wBuLx9qlK5c595zo1k+Lm3/F/yxz0v4xZ/vA928WKZCcuf4ETZOWgWkvg/OLKMC0hHTsrKwZuJEvFNfby5Rm6dPx+uVlVhaVoxFBRlYn3sz3hr1SzRkD8PcoiIsKSvD0vJyowuLizGnMB8zcrPRkDUI9cO7o37YK5g15EmsyLwCb434PvaM+gwOZn80dZft6Bcv+wUq9704mvthHMr7HPYWfBc1hb/D6WXPo9XEzmhV091cgVpN91mPYsrKphQtSI8HRscELXseBUAbPpJrAdUKcGYAdXQmg6ETi96Udo3veZNtEX9DBlqPHmzWJip7DPpmjEuogIQLYT2HovZsu+fU+KVg6Nd+/2yrLaCSR9pMAYkSLJxV2AnXZnbB7WVDcH9tIejiR1e+x6M4J8Y6heejcyrMSsdkDMa7YIL9DzxQW2TuiI/NG4cHpxeDdVK6LYYy4uoc87EFE/DInLG4vbYA/1k7IPnZnDG+R1oFxO4z+v/W/7LuPayLlzSpoGgu/XslkkcFsnb0aHPF6t69uy3O/DtO7zG1+brekf69p7aW+D2PyqEMudVs2LoHWfMi5cNiMg5ZRqtns2bh+QHleGX4JPQYPdMsHiPq1iN/1laMXrALpUv2o2z5IZSvOILihXtQNG87ShbvtTIVktyZm5E/+x1UrDxq59gVR1C+/DDGLDtofNyoMLtxE4bVvIq+FYvRccwyvDhpRyI2hOl8qw+hQ/VOjK+bh+XLl2Pz5s22XwiVEN0L54qH7lGU/TrZFh7ii8OyT/JDbFxdsjyNw3us540rC6/rS3f9wopfdVJ/xNU9b7oynxHuz7Jn+0YcqfhhUvk4mvM+vJr7C1QUDsCE8eMxY8YM+4zeeOMNy/pGS9+ePbuweUlHrBp7Phbl/wCNQ85CTa8vY2q307G38BNoKv6oZZRKWkFsU8LExoRmgXAKgqwciR+nZJ04Hnso5SHkCxUR1UP5ak+lidgPWj8Y+0HrB4PPXx32KVR1/Tzq+n0Nc7O+g5VjzsXGab/Hoe1z/JQnfwWPm1t+XvrMUkBRRZ9XHJZtHi9eLycdTu0nGlt8J0M1Lnl1eJzG8m3k8/y+7vniyp5X44l6fj9H/hpC3rBOGZ7fy/S8LPs+Xz4ZPPl1eCzLIV5jhfxqj8NLhjBx1OPET6qT/TpU9hiVhY2rC++p+E5EPSatAuKZWD6RUN8vrNpUP1k5vPHc3FyzgMwsK0u6noQLtJS6W4hZ+7us+4VJirUlzgISLTDjYkCeXDzZlIFL2v/FFINLOjxjCshvX3nafsG9akB7i/W4sn87XDO4I+4oH4YHpheZ0sG+B+tG4/ddnwddqu6aMBK/79Ya91Tm4HIqMsUDQbl/7PMyftfpr7h/aj4u6/q8xYTQPesPvV4wS8ojs8padsEq64r+XTua3/xrkydbgDh/lWcGoR0zZuCtadPw6uTJWFVRjFfL+mFNRQFenzwZDPJlpiGeb02dai4vxHOTwqVlZTClpCAfM3MyUT+yHxoyO2BB5n1YM/wyvDXibOzM+gIOZX8kGUPivxiokLyWfRpOz3kCrcraodXkLnhfdXd8sKoHPljTEx+c0gsfCs+pJ9kWhxOWVOWQ7x+ov58B3cm0sRl4cmQv1EUKH93NGAsiBSR8XvUseqqFccjLuucL+6WoJ9uDBXkzBSSygJxV1BnXj+hm7lZUKBhv9NSSRIwT45ziTsYsURHmM/rE/PG4e9IoXDmgPa7o29Ysc/dW5eJ3XZ7Fr557BFcP7GAWlicXTYqVFcrn2LyGOxtG4/N1A50FJL0Ckrxn9y5IzkekXPj/dePX/ERKR0qb5ER9O+fMQdGwYZaaNzs7G9zzIu7QezCk/guBfS0dcVi2+S8JlrmQ37ZrL8qX7kXbKYnUuJZWd9JuJJSPMrQfWYneY+ZgWM1a5DS+jaL5OzFmyX5TJMauPIKKVcfM0vFK5iQ80z3H+DuOqsELA8vQun8pXhk+2ZSP8WuAcaubMG71MVSsOgoqI7SU0CpSMGcbRk7fgAETl6FL+YqkEpLIuHUIPavexJT62Vi9ejW2bdtmgfKKB9E8hPcc1sXnKXl4hLys+7nymLAch1WbZIQY1cNxhPNUvKLs4+F50pWFiaPpMP6a9Yzs27sH+xb1RFP+h5JWAgaaj8/viXHjKkz54GfD/bj4XDORgTaVPLBtFjbU/tEW6fOyvoPpA76Oqm5fsF3RjxR/HCj+SBQLwoD0hCsW3+9SKOy9H1o5UiwUTgmRwuL4E25Ux60bVpelg/zRj1zh90si65VS756ScBcr+Rh25p+Guh6nY0qvL2Pm0LOwOP8HeHXCBdi+9AUcO7Qt+dn4OQ/nmvOqeWafDl9mW4hj3eN8v2R42hK/sJ5fY4qKx1M/vrDsDw+P8WXhQ37VPa8vC6c28ZOyTYf646hkiDekcZiwLcSoHvLF1UNe1UlDfl2rb/f8vux54sqeNyzH8bPNjx9iVE+HDdvF72k4hsYTZb8OlZMKiGdSJ5lZPpmT+FCGcBo0Tl6IIw/bsrKyTAGZW1GRWGD5hYEWAo5qEeYXHXFtzRYcToawHsfyu7OAVOOJhRPsF2D+mstfgqmUPD5/PJ5aWoknFk20un7ptUXd8mrrZxtjNrjQSpQTiy7DLaScSZHchHwu0hK8E2wRZ+POH2/lhAWk2uQ1C0J3CggtHnuiX+T5q/xB7pA9ezaYuYob1L01tQabaqea6xB3zuYGhFRUGMvAXdG3z5iBrfX1xrtxyhRz5VozYQKWl5djYUkx5hTkYEbOMDSM7I3Zw/+GJZm347XhF2F71hk4kn1KMuCdv3JVZJ6J/8h8PJH+dWInfDa/Lb43sA3OznwZ52S1x7nZHXFubiecm9v5X/fM6YQfZXXAF8u6piggDw3tYjE3K8aNszlLUUCiZ1vPHan9Ch89m9Ye8Og5juuTHD3P5PVt+oWfbc0UEFlAqICM7I47x43Aw41jIEXBkhusqMbTUYzR00x2sKLG6ufefaO5F96U09ssH7958QlcN6IbHplVjlsK+pnV448ZL+Oi5x8FLX1MqHBLYX/8eUVChp7ZpOxoDI5pz/qiSbhrRjE+P31gFKvCGJAWFJBgzni/XgHxyoXajSfAaR41h+Kha+jamhr07dEDnTt3xvTp000B8O/Plt59ej+G1L8nhQ95VPfvXP6yzYXigvW70aXuIBj8zb05mOXq2dwFeH4AlY8q9C6ba65SdJsqXrTXlA1aNMatOmYKBRULulg9/FJ/PN1lJJ7tXYBnexfimR65aDd8MnqMnpFQPNYA5D2uiDSZ8lK+4jBKl+43xWZU/ZsYOHkluoxdjReq9iev6aXqfcisWoEZs+aAv7AzHoQLXH8/usd0VHMTR9NhJD/EkF/HibCelxjxq8wxNI76SH2bxvLU86rsMWzToX7W1a62lijlWeD520tweNKFSesHFYRZufeibEwp6urqzPLx9ttvJ5MFpMTpHNqOHSs64LWJP8PigrMxK/MsTO3zFUzt/jm8mfVJNJWcapYFFJ6SiLOIlJBEYHqMchEpDSkKihQP0RMpKApQlyzRSDExBcjv+1H4QTQVfwQHiz6OxQM+jeruX0D9wK9j3qhE8PnGaX/Egc2VaGo6qilP0nTzy7nV55VkDgrpsGoXnvW4Q3wt0Tgc24TxY/hyS2MK7/lZ9vV048Zh464lDi++E9E4rMY9GSx5wiMOp/sVDTG+Hof3beSNG1ftnjcsa5w4PNt0fSFO9XR49XtKWaE84XWtoiFvSzhikgqIH1DldIOoPx1Nh9NFpsOpnRfNHYcZhL5owgRbPOnLX4uBJOViIVowaEFhCwS/eJOS4flcvxZ7Wph52ZTVXAFJuOmks4DcU5UDLsLuGj/S4jEYz8FFVeKsxtPLVY4o68sTQePkYfD6bWVDbNFFqytaDG0AACAASURBVMYfer2IS9r9BQ/WFycWe8YfLfwirMmO5GpB+PSKGAWkPsNcsGQBkQKimARSBknTIkJlg8qIslX5zQW5cCUfF9JMq8sNCMlnSkljI7ZMn26WlPXV1Vg7cSKWjx2LhSWjMbsgFzOyB2PmiPaYm/k4Vg3/A97J+hoOZJ+K3gN/hw8P+3NSAfl65ou4tMuz+GO/trgmswuuH9UTN+T2tkB9Buv/K57XZ/fCtSO643vF3SIFJLEXxn2DOlrWMbqwbW9sTLWAuIWxFrf+GUyW9Rx7qmda/weqO8sI8cn/H6eMsK25ApLYj8MsIGkUED1r9pxRObBnu9rcBhn7dN3wbpa17bwHbrH4EWZ9e6i+xJRsWv5+3foxUHG5vHtrs4YQr2c2IZP/D4n/k+QYVLZTFJDEdbaogITzpLkJ5iD5/lA/ccFnos9AiormlEp7Q2kpOnXogL59+2LNmjXJL3q9B/le07svfCHrnSceYcSvOvvTYdnOkwv47Tt3Y+DMxEKfwd+tqw/iuaJVeG7AWLQbUYk+ZfOROeV1FMx5x2I46D5Fi0fCktGE8auB8aubTCm585kuuL9NH7QdNgFPdMw05YOWECohVFjGr2lKKiBeEaE8WlIYM0K3LrpkDa5cjU4T1qFN9YEoJuQQOlS9g5KqmVi8eHHSFYtWED8n6e45br40V5q7luZMvBpLGNF046pd+JBKXktUY8RhJT8dPsSoLv4T4alI7Nu3F3uXDsKxwtOSgedv534LFUVDUF1djUWLFll2N58kQPIT134Mh3fMwaaGG7Gq7FwszPkeGgd/w9LXNvb6LLblnoamEgakMy3vKWBWrCanhCQtGFISIiVD7UmXK2/ViHiSVo2wL6yn8CeCzs0aU/B+NEWuV0eKE65XNd3+E9P6fgWzh38LS4vOttiPHcteQdORXVwhaoqTVHPREk0yxywyhWvps/J4Xxa2Jer5fZmYdGP6do/x5Zbw7GvpaOl61ZcOr/6WqMeST0c6zMneL/GeN5QXjsO6xg95w7qwnnosxw3HVt2PE+I1jnhVD6nHqRzyxNXjeNkWx6s2fy3CkzZTQDyjwOloyOvr/oL8gJLledVGyoN9gwYNMgVk2eTJzRQQWwT4xYJfaPjFVlC2RUPEm1xIRHKS9WjxkVxwxCogiUVlnAJy7bCuoLsV4z6oNNw2ZgjOuf0aW1DRlYpxHI/MLDPl4p7KbFBZobWDAbbso588U/ISd2dFJr53w+X26zFdrqiA0OJxz+RR5t5CHGVyf5C7xo/AwzPHgG4u3JdEv0zHZsGKsYBQ8dDil7/ssm5KRrQnB8uH5s/FzroqHFm4AHtnTDe6b1YDtlaPw57GOuxuqMWWyrHYXFWBXTMazFJCBUZWkjenTcP6qiqLO2E8ybyiQszKHY6GrN4W1N560HP40Kjn0WpMW3PB+ubItuAv5sz2xUB+uqTx3hPzxrn7Fzsrs23BzSQD54zvF8UpJOJA7hnQAeMGDDBXtVABkXLgn0Fr0yLYPevkMb5oEa3PLPbZ1v+FeKP/B2FImykgUWxMSwoIlYLwpLLLz4X74nz3ustwU16GZXCjMs2NMM+57WpcPSgR92QKyIoaXPzyk+auJQtI4pk9bg0xJYTWkXQWkIYTW0A0pzZv0TzY/7abU5s71TVXjvp3gX0umtdoPvcvWIC8IUPMFSsnJ8dcVvQuezfvvnQYymBfuncm27lw5yZ/jWt34yW3ySA3Fnx28ES8PHQ8epXMRubU1yyDFQPIx644rnxQmRi3OuFSRWViZO16dMqZiu5FjehZPBNF83aiZPE+C0wvXrj7uMISWEHMGhK5ZiVcuQ5aQHvOjE3oP3k1Xpm0xcWDHETGhJWond5gMTQ7duywX+f9ffoy5yA8/fxqnkRD3rAeYlX3fOH4qmsMYUSJFQ+pyr5dvKEMz+OvwZc9Nix7vrgyr4Uuenu2rceBqTclMkHltMLh3A9gZu59FnQ+a9aslM8izi3OrvvYIexZn4V1k36B5SU/MssBXbGqu38Rs/p8FnvyPwFE8SC0hIDpbqmEJGNCEnEhVChM4XBuU6xLGUkqHCkKRWRFcW5Z4pOsRD3KdsUxOTazc9meH8x6dSo2jPgkqHzQ9WrGkG9iUX4i89Vb9dfhyJ6V/K9LLiY115xXu/+YZ9HPuXiE81R8/tkI28Sv8XydbXFYyQgxIdbzhWXxinpZ4k03tjBxlFjidEqWqDC+zjIPtZGeDF6yToTVfXh+X/bjtlQOMXHjxuHjcGqL4/dtnk9lUfLp3kQ9lmUdYflksJIlGaJqT/cZqZ/8LPNopoCETL4uoG5KA6kuXlHx20jRH/Wlo2SjvH79+pkCwmwz+tJvtohwyoL6tFgQ1QJE9RSqRYdbUKhfOI7dzAKSJgbkiYWT8IObr7BAcLqs0HeecRo////F5403hY+wNT/bdmNMnpY3td44bbkqf+EgZNM2sY77tZMt0EeIz8PCMUlxQm5nM1MSsWfcO7JjWAuKfUf/c+bKe1+Rn5J5htRnVZ+eeYS/HxiLPP6SAJFwHzT3KrCEJZeFPpYNxS2E/3FrU32KdGOdBSx7jP24d3R8PNZaagsLnk1YiKtQWbB7FgMgN0SwekcuiWUfSKSD1fVqMAfHz0mz+ojlIzn00b8l6NH9eRtw8qv/QokWoKy42V6yMjAxs2LAh+aX2P/0O5cKSC/d563ahcy1dnBJ7ftD6Qdcrxn30q1iIUQ1vWjYrr3yY1YOuVKZAJNyvqFRQ0aAFg8HlLJuLlQWcU7mQtYR8iZOuWywnXbGSAepN5q7FmBLuJzJ0ymvoULnVlA/GpzAr1sCKeRY/wzSvDHj2GbH+p+fu/yX5spLtWdgLTXnvM8vDsZz3Yk3exagoK7DAc34G27dvb7Y/S7p5OrR9NjY13oK1FedjccEPMGv4t0wJYUzFvL6fwc68hDtWYpNC7hFyiu2UTouEd8uSBcPTpCuWlBEpIkklI7KeqJ8xHjpN/vsSlpeCD6DJ0u1+BHS7ouWDQec1vb6EhsFnmvVmefE5eH3ShdixoiOOHtr6v/a/m25e///2Eytp//8c/fvPUbOd0OM+VGkxotJixKs6qdpCKizb0x3C8Au1G39NbNcOr0+dCi4KtIBIWSB4hcGX3WIixGnBYDIdXzq55G+mgESBuqECwoUqNw6kVYNuVrRs3FuVgx/c9Efbq4Opebm3Ancvp5sUswxRmaAl47phXS3VLtPzWiahdn82l6Mf3X6tLdIol24tdO/6fbfnzQXrwboi/PJvD1qgLxUeuijx1+Zz7rw+GS/y91pA/HxozrZWV2BG96cxq9dfUfXCbViTPwAzez2DtyeVmCKyaWIx5g94EW+UjYpVAjWXnM8jkYsXFRJaRhgz0ruqCB+Z2iuRfreuN75V2CkKhB5ue0/QukMLExe13KyRsTF0dfvqhefhT6WDzBXtR3deB+4ZQUWAyhxd1qjgXdLuz/ZLOpWU+6fkm2WKrmvfvvISPNxQYjxcDF81sL0piXQbssDr5dV4dPZYk/UkkwrMG2f8XFw/NmespZLVOE8vqzbFhSlrL6gbEVlAEu56LSog4XMY1TXvKc+qeD2PytH/AHE6k1jxOEqed2sBURC6XLAS8RuKR0rcP+fRlJMoVon/F6ZoLK+2TFpPLklk00rIUvyHs6ZYYHvgihXGgJyEBcQ/w1aO7l1z4t8Nybnm3Ll5TMEFc0c+4Uh3zJ6N/CFD0L59e5SWloJ7XOi9J6p3XPgOVLso+YURZR8PUZW5sNyxaw/GLN6d2POj5rDFfjybPRcvDq5Aj+KZZskomLfDrBEWcC6lQi5UUT1p0XDWELWZYiJ+WToCpcMrICybxYQxISuOoGTRXosHyah6Ay9UM0j+sJ0dJr6FivETLRZky5YtFkgvK0g4T6prnkTj5ku8mifxxlHh/VyTL+4QXryiak+H89fhMb4cN57avHyP8e3i9ZT95KeVzDJaVV6eiP3IaYUDOR9DQ8HTmDhxolmhmPVq7969yX1tKMfLD8vHjh7C/s3VeKv+Rqwee54pIbNHfAt1/b9m7lgNPU/HplGn4UjxqYnsWIwLYQxGFBtiu6abi1QUpxEpEAwaTwaO5zCDVnRKwfD9bItkmKsVU+zy5BhUPJhqt/ij2F/0cawc/GnUdKXb1RmmfMzP/g6WFZ9jAfXblr6Ao/vfiL1fP98q+znWPIn6eRK/b2M53SE+4UKqMeLwworGYeNwkhniwrqwbPeH+EQ5brqxW8J6nGSF/H5clj1fXNnzq19tqqej4tM4Yd3jdL++Tfxs84fnYTkdNsRRRohV3cvwY4Vlzx833xojxKld+Dgahwlx/jpTLCAhIweIu0C2pzviLipsi8N6Hn75dOrUyX5NXDdtWtIakbIY0CLBKR5cBNiptqhuOPFrEcG6W2wkedwihW2U10wBiRY/1wzvhj8VJxa+XNhyoXXXuBG2oSBT6tLNhArB+Q/dZnt/0FpB5YJKBH9lZxAu9wGhNYRuVNzh/E+jB+LSTn8F3bWuH9HdLB+MDaGiQeWFygpdtK7L7GoLbm5eSJcrZsjiviHcoJAyuQjmAo9ZtrgYv6d+ND7Ina5PxgKi+RON5mTX9GqszuuHTRNGm6sVLR8rR/XC0syuWJHVEwdmN2Jt/kBsqSxL/cxiFEh9VkeXLLEgdqb/7Tu1BB+py4D2/vhWYWfbjNHiWxpLbZPGCx69w/ZP4fwy3TCzin3togts3xUGOtO6xP0luBcLeWjRuHtSts0jlQcqh1dkvGz8nNev/+anli6Zc0bFhfN69q1XGf7Kvm1tXwvu18IdwXnSvYp7r1AJ5OdzeY/WuGZwJ9sskkognwHO/U+nvwsFRPMcR4PnUc+kX/wm22Ke9+RzHSObn0GsAtKQgXQuWFIaTPGIMmCZMuKSKKQElXuemNgRKTIh5VyqzayJf6cCwnsM5yDZFvaFdT9nUV8SG70b9DmwnbFTKyor0bVzZ/Ts2dMW1FpI6/2mdx/r/lC/qH/v6oXt+VkmL/u4sNywZSeGztqbXNS3nrgDzw6tTlg/xi22HcuZ8YrB4XKjMmXBKRBJ5cEpJ0nrB5WJpPJBS0nCWpKQkbCgSOHwbaa0RNaUsqUHbZPDzKnr8HJVYoPC1jWH8fLkncgpqwLjDzZu3JiyAA7nSXOgeQqp5krtmqeQqp+0pbkWXzq8H09lXWMclbx0NA6jthCj8Xy7eD1lP59DZkjbvnUjjo4+PZl9cFfu5zBh9EBMnToVK1eutP0+yBc+t34MjUtq55F92P/mGGyYehlWlf84oYQM/y9TQmp6fMmsDWuHfgr7Cj6OppKPJhQC7phuisj7E1YKKgwWIxJZRlysSFIRkfIR9TWZ0sG4Ep4JpaOJbl5UPCi76ENoGv0RHC0+FVuzT8PC/p9BTffPY0rvL6Nh0DfM8kHlY+3487Fl3iM4smdVMutV3P2yTffMMg/RcL49PsQJE1LJiMP6tnTjqt3zhmXPk248Yvx9suzlSIao7xPWU9/vMb7secLx1Ed+Hqz7Q/3paMjrZXiMH1dlzxvK8VhfFlZtkqF6OhqH82OqHOKJE1ZUYwojGmJV9zLURhoevo9ljad28ntcWBafqCkgGkTM6kxHxa/B0vH5dn+hwnk5aiMmqYB07Ih1tbUpv+aGiwm5RTRrd4sHWzSkW1ykayc+WtA1U0CiGJCrM7vilqL+FgzNxTDT3dLdh7+I0xrBmAXufs7NAfmLOQOo75uSZ4rDo7PLcffEkVY2BYEB6ZOzLXaBO09boHVlTmJjQQaoT0q0cRHNQHPGRPAXePJxIc1f+rnx4d0cd/po+/X+qcj9iAviu2oLjysgY7qif5eOti+Fz4Ll50kLLd92dNECHJgzw9yrdjdMNXpo3mywfHDuLPuc2H94wbyE1crPLctRXbI1v1wE0x2rX+0YfIRKUmRh+lZRqgLCQHRaQOjmRkVBi9QLHr4dF/7lAVM4fvXcw+Zadc4d15mSwHgDKnhn/e6X9llQeaCb0A9uucIUszN/8zP7vNjOfVYol9am+6cV4DtX/84UFionnO8L//qAzTfHp7sRrVjfv/EPphAp8L+5ApLY5PCEFpBofuzXdz9v0XOsORPVXCZp9Lzq/0ALY6tr7kUjhZCymikgkXthqIDQvYzPE0/eIy1EVK4TAeTH4zWoNCQ+F7XJXYvpqSfi3po86+ezSjzT/NJ90JQMZcNymd0kjxaUd50FS/PoqeYgarP51rtCCpzn9/OqdvGrTzKXLQNdscpGjjT30ZEjR9ovzuG7T+89vut0+Helyh7neT2GPPTrX7FhB7rUHYhcmw7huTHr8fzg8eiaPx1Dq9cgf862hPUjynjVJa8WnXOnoVfpHPy1Vz6GVK5G/3GLE+WqVZHbVZPFjNCK0mbgGNvtvHz5IUvD2yGrCv3HL8ZzGaNNqaB1o1tBPZ7vV4wRU1+3vUESCk3CfYuKDIPeixftMWWoU9XWRBxIzWG8ULUXA8c02q7bdAFi9iW5YeleQ6o5aokS4+dN5ZYwmvNwPC8rHV5YjZNORjp8SziN3xI2HZ7t/E5lXMfONRPRlPf+ZPrdjXnnYmx5qblfrVu3zp5XKrS6lxONK76mo3uxd0MBNk79PVaP/QkW55+NOSO+jfoBZ2JKzy9jSrf/xJw+n8GmrNNwlDumF380sWs6LRSFH0woDcxQRQXCzkihiJQSKSeidCFLKB1UOKR0yOKRUDyaij+GA0Ufx+rBn8J0ulx1/wKmZXzVdjqfP+q7oNvVaxN+asrHoZ0Lk8pHuntO3msUwxH3+abD8jPw+HeL9fh02JbGJp5nukP9J6L/CF7XF8o40Zj+un1Zck6EF18c/XuxJ8KpP25Mtqm/JSq+UEY6TPh8kS/uCPEe5zG+LDkhlnWP93wqk8bh2GYuWOpsiTEciHXPb5VoIF6QvyjJFxVvSDUGX5ZdunRBx/btYRYQ/wt63CIg+jVSi693Q5MLtWgRkbIgieQ2U0AYWNyQgQtHdsLFhT1x2bhBuLJmJK6uy8E19Xn/UufV03Nx5ZQs/G7SUJwyPZG69OtjuqBv5w6ozcrCa9E+ID4LFucvnIdmc6r50uch6hZnKTLS9GsRnKKARBamOAWE1iQqCd+99jJT8uj2ZDE1k7Lww9uvMeWBViIqHow9YJk7dH/jkl+Y+xQVFfJz93gups+67CL7pZ1tDKBm+y/+fL8pcN+6/Ne4rMtzZmF6qL7YdrOnskcF5Mklky3An5hfPH0fft+9tS3MmykgkTJ1QgXEz5ssc9GccY7SKh5uXsUjftVTlBT9r0QymykgkXIdKiBUpmlpo+XnouceMWvbNy/7lQWf857pckgFg3EcVp9VbtY3xiIxQJ/ubPdV55oMKtwXPvMArhnS2VzpuIGhbTS4aKJZj8xtcP54UxCpPFNGcwUkkV2spSD08L7tuXZzmvJM+3aWVY/my55l1y6s5ljPOutbZsxAv549zRWLvyz7X5P1ztP7k3W9F09EPa/KWljOWbsNL1raXabePYTnCpfhxSEV6F06FyPr3sDoBbtNAeBeH1QMepfOwbfOPh+PthuEp7tk4Ts/+jkebTcYf+tdiAt+exWyG960zQl/e91daN2/BO1GVuKS6+5B7ozNyChfgG9+/zw8+spA/PH2x0yRebzDUNzxl05gml5ihlRKiTkeF2IB6UsPoGD2O+hT82ZCWaIbVvUB9CxbiAkTJmLFihXw2bA0XyFtaa703RNiVBe2pe8p8ZKS3x+sa4x0sjy/lyH+kEpeiPP1EKO6vw/PrzL5qNBRsdvX+HQy+xXjLebm34dJkyZh/vz5eOutt1KsTx6vsdJR8jYd3Y/9b0/E2zNux9qKC7Cs6BzMz/4eZg45C7UZXwOtITXdPoe5GZ/GW1mfxN6Cj4MB4cyWZTEidM+SQkILhp20ZiTOpogm6lE/LSnmZhUpHSUfxZHRp2Jn3idAq0t9j8+iutsXLNMVlaHZw7+NxXk/wMrSc7Gu8tfYsexlHNm7Gk1NqYH26e7Tt2t+QioefaaqexpifN3z+bLkeV6VycdD/OJVXVT8cVQ8LdE4nMb1z6GXoWs5EdZjVD4RVmOTX7zCiqYb12PFG9K/F6tr8XjK1hGOo7pwnlcY0ThetYmKV9TLE4+nJzMuZXmMysQKr/FEyaND/J6mWEA8owR6ZpU9X1gmjy7oRDK8PJVJ+aXavXt3S2vJ7DLJL/s0C+Pkl79fLLgFnfCiWjRwkcGy8Oz3feo/roDU4yd1Q035YGDxR6b0xken9sbHajNwal1fnDq9H06t/xc7eU11ffHRWsYiJBSQM8d0sZ3Qp2dn4/WqKovBoAJCy8XWqrE4NG8Otk+diCML5lt8x96Z07GhPAebJhRb6t0tleV4oywbu+unYE9jLXbWVmFXXbW5XpGPsSKMCyFu38zp2F0/FYcXzMeGsbkWHyJriea3mQUk+hXeKyDMKHbDqF6mWDB4nAthup/RKvLzJ++2OA1mDrvgkTvM2vTrFx4z9yumhaX14r+uuBh3VAzDL//6AH7xl/tsz4rbSofg21ddYpaq37Z9ClcP7oTLOj8LWlTovsVYnTvGZprLGxWTSzs+YxYQxo3QqsJf5Gkd4V4tTDFLN7jmCshJxIBEz7WeRb/YVZs9u9GiWM+xFsl6bpPPrnuOKUvtcbS5ApJQrlMVkFJLBkDrEj8H3jstPlTqqABSCaPrH1Mm0xWRaZPpnkbr4M35GRbHw3rCffBvZoliXBSzvl07tLPtik4LHmN66DrHz/S20kE275d2SMx5MwUkekZaVEDcOyDufzxuPpJzG82b51FZPKqHlPFN00tKkgHpXNj5d6HemaT+ved5fLvKwrEuLOM/9uzdi+rl2467X1Xtx/O58/BK5gT0q1iEnMZNlj7Xx35kN7yFH5z/a1NE8mZuwdk/vdjS7o5euBs/vuhy9CqZZdhzfn4JRi/cZRsTXnTln/DK8EkonLcDX/2vsy2t7/Bpr1tw+6+vug23PdUeLw4ea3gqLQxsTwSnJ6wgDGqnBYU7rw+cSgUkigOpPmS7pY8pH2txCO+88465C2k+dN+6Z1H2i0dzFEfF7+ctHU7tGlPyJENU7XFUPKJ+XJY1hseqjRh/vBus5AmvOmXTSrZz6wYcmvTrpPXjaO77MLmoJ6ZMmYJly5Zh69atKfE3wnuq61Sbr3PPDCohh7bNwJZ5j+G1CT+zhf6ivB9YhqyGgd/AtD5fsdgQblo4J+MzeH3Yp/BO9idwcPR/2N4h5qJlCslHEq5ajN8ITovpsLiOjySUl5KPWVrdPfkfx1tZp1mcR33P01HT7fOY0uMM1PX7GmYMOQvctX1p4Q+xuvw8bKi5FLvWZODoASaLaHmzQX+PvG/Vw3lmH4+4uYlrE95jfFnjCCvqcSqrjzQOpzbxi5Jfh2SQV/ySp7rnZVkY8fm6ysKy7g/VSTWm5xVe1I/ny8KLTNhyDQAAIABJREFUTzROlsf5ssYPseTxB/t1sCycH8uXySuMqNqEZbtOj1Wb+IUnjeML23Wdoh4f8rLuT43pqcqeL13Z87LMI443JQidDJ4xboIkxBjdH7WHNG6iNIaDW1FYKiC9e/c2NwbuNGxf8MHiK7mgCNq1OEihJ8MTLQJTcNEiRArI5sYGnF83LKGARAsgW9RHqUu1wKd1JOWMfgVPaUvhSSz6mskyhSHqE3+kRCR5I2tMiuyQJ7i+s8Z2x/CMnphVUIA3ampsV3NTQObPRX3nx0yBqO+SoNM7PYxlI7tjSWYXzOv/AjaOzbH4j4WDX8GCgW2xtmAQFg/tiAWD2uLNcfmY+sq9WF86Ag1dHwd5Nlbk4dXCwVgxqrfVXy8ZbrEimmd+trEKSEMGQgXk3socW+zSnY2B4dxjghaJ20oGm/sbf1nn5o9crNL1jQteUrr43JyXYSmKqUSw/KvnHjFLCst0B2KyALrAUebtZUMtuP2BuiJTKOg6d8fYYZZU4OGZZYanmx3HoQsRFSJmeGK9uQJyYhcsLWClUIR1tXPO1Jekrk187EsqMVyER4q2zXn0v2A8cS5Y0bPjFRDeG7Oy/br1o6Zw8D5ppfjmpRea6xTnjO5rZ//panMZPPeuxGaEdENk9jYqFpzTO8YNBzcq5Of3k/tvtrgZ7rbOhAJ3Vgy3rHBUAH/62J02/5JDl0KO+Y+4YCXnw9+//5/37a4cO+cx82lzHH0WnNt3Zs5E1sCBZgVhgC/dWvh+C999vk3vv5DqHSqsx1AB2b17D8Yu2m67njMDVuvJe/F89ky0H1mJQROXmRsVs1AdV0CakDtzM378q8sxpGo1Shbtwfm/vcoUCsaIXHDJ1eiaX4fOOVNx/sVXJrNgXXbzA3imZ65l0vrG936MrOkbbPd0bmp4/sVX4b7WvfDKiEl4ulsWBkxYmowZUWrepBvWwt0YWrcJL1Zzw0RaQA6jY/lqFJWU2y/xmzdvTqbjtQmL+RPOUVxd8yXq501zGocTn4ZV3dN0OPGEWF9/N2MLR9rSmJLp+YVhH7Ok7Vo3DUfKvpHcd2N7zhdRUZqL+vp620CTlqfQ/Uoy/Ngay7cdH7fJUtge3r3cMkq9UX2xLfiZppfWkFmZ3zK3LCoiCYvI5zG95+mYm/EZrBz0Kbw58jTsyvsEDhd/HE2l/wHEnGw/VvIf5l71Ts5pWJf5SSwZ8CnM7P1Z1Hb/HGrM4nEGavt+FY2Dv2muYFSCuF/Jq+MuwKbGm7F3QyGOHdrWzPKh+9C9xd2r+sQbUvWThnjVQ4yve3xc2fOG5bgxvQzxs80fnqclGR7jy8Lr/kTV7qnHsez7WPZYXxavpyqHMnydPOmOcDyPY9kfqnvKsq5RlG06Pd6X1R9HJcfzs0xeUfGISo7q4vPUwJEc8QnnqfjiKPmITYePw6jNj6Fy0gUrjkkDiQokXlK1karuL86XxSs+A0R/1EdKBYS7CnMndL8PiBatflGgNi3IVP9HqJfFMhUQZmvaMXsW/lpXiMunDsclVUPw64kD8KsJ/fDL8f8m54R++NXEAbi7bBCq83KwasIEbK2vt13PEy5YS9DY7UksG94VE/52HRYObodZPZ+x1Lq0jLw2eijmD3jJ9gKp7fAgZvf+G1bl9sWEv16H2b3/iiML56H6pTvx9sTRmNuvNapevA0LB7XD8pE9MOm5G81CMm/Ai9hROyllIX0yCggXrVyAciGqQOiUDeyiIGi1KYA5QRMBzcx6xaDxK/q+jKsGdbBNHy3GQLEHlg5W2ZiIURyDAqKP73mRIt/GTuxX0UwB0T4gLaThtefNLWqbKdfRgtg/9+Ezas+7Xzg7eVJMUv4nov5mFpBIofUKCOf+ij6JwH26WdFditaeb1x6oe29wqQKDPb/3vWX446K4aagUfGgler2siGWQIEJGKiQMEsZlcefPX6XKX1UAKmA3JDdy9zqaClhml4qfD9/+p7EZ5QuDe8JNiL0c5Ry784y4ueNZWKEa4lan+d1MtnHgPSlkyahW+fO6NOnD1atYoDr8XekL+t96N+BcWW9Lz2WrjXMbFQ8f4e5NDGou/Wk3Wid1YiO2VMwpGoVCuYej/+g+xUVgVH1G/HDn1+CQZOWmwLyk99caZsOUgGhMkFXqswpr+Hsn12MnBlvm9Xi55ddbzEl+bPfwZnf/RFG1K4zWdyY8IrbH8dTnUdYqt2s+o0W62ExIMkgd6bubbIgeG5oOLx+M16u3gteL5WQ9uVrkVs0BrNnzwYzMWk/kPCefT1ujnyb5tVTlT1f+P3m51fjeeqxKvvvOc8blsWfjob8qof8fjzfJ35R9vH7dO+ePdizoCeOFZyazIC1IvdyTBhXjrlz5+KNN96wrG1UaInxh5efruz5rcyFypE9OLi1FlvnPYL1VVREfmJuWQtyv4c5I79tsRjT+38dU6mM9DwDNd2/aFaLqd0+h2ndTkdDj89idu/PYl7GZ7CgX+JkHMnMXp9FXffTMbXb6RZbwtiO6h5fsuByKh0NA880RWfeqO9iUf4PsKLkXLw6/mfYOO1K7FzVHUf2voZjx47/INDs2t26Jt08h3PkZbDP43xZ8+f5w3KIF0Y05Pd18WhMUbWLN6yzXW2epsNLThxOGFHJ8xhfZr/n9eU4LNv84fEnwnocyx6rsTwVv9pUT4cNx/f8vix5Ib/aSf3h654nXTkdlu3CcGyNL+rH8TJUZr/nlSxRz6eyaIilnJQsWBLyz6a8sAEDBpgCsjzaCV0LCC0KVE9Sv/hyv2yG/Cl1t7CTHOt37azz5AL9wPz5eGNmA5bXTsH8qomYOWEsGirKMH1s6b/FWT92DBrHl2PBpPFYX1NtGwJyP47DixaZksX7XJXdB1Pa3o25/dpgcuubsb50JOb3fxErs/tgwYCXsXJUb1NAaP2Y3/8FrMzujbpOD5tVZPOkEkx6/iasKxlhFpE1ef0x/pmrsWJUL9R3ftQyZVW/eLtZSmyeowVfvALSJ8UC0lwBkVKQuiu3bYyXJtsSYwm40zzdhB5fMD4Klk4ETStzkykdXPBKKYk22TtePz6uxmLcgwVRp8uC1dDyPiA2F+6Z47Oo+dFzmUIDXuOPFsBxuLBNddLmCkjCepeqgIwxywbjZJim+MbsXmbRoAJCNyvuTcM9aM6541rcMKqnBeWzrg01uU8N3eWuG9HNUlJTufjRXdfjxuze5n7FzGZ3TRyJH997k1mpmCGOAeo/vu8mC3a3uQ0tIGaBbHkfED9nuucUa5DmOZpP8ZCq7GWklB1Pkjf4XGyH9MGDzZW0vLzcFtV/77uVL/EQy/ckf7HeuXMniudvt9gPLuZbT96DNtkzLDaDAegFc7ejbNkh29eDe3jQEtJuxGRc8NurLUsWNwm85Pp70br/GJQu2Y/LbrrfgtFpGbn72W6mWDBQ/fanO6Bw3nYwgJ1uWtxdnbElCXmVuOWxl/HnbqPwtz6FGNXwVor7VWLvkMTYpYv3YUTjFrxcvc+uuXXNIbQrX4PswhLLhEUFhAHTXDiH93yy9bj5+t/AnuwY/1t8fEZMSd2+Cfvq7otS27bCkdxT0Jj/BCorJ2PJkiUpmw/+t17b0UM4sm8d9qzLwubZD2Hd5F9hVfl5FgBO5YBuUdzAkDuR1w88E7X9voZpGV+xmA2myp3Sk+eXgjNq7/0V1GZ8FXX9v25ZrWYOO8sUmwXZ38OSwrOxovRcrKk4HxunXYFtS17Ggc3VOHp4h7lc/bfeo/vF+2Tk/r/4bP4j93wyc/p/jecfma9/Fvbv+QySCghfVF4Ab8Ifvs+XiRNWlP06VPbU41UWljiWhwwZYgrI4okTbTHQ7Fdh/eIYfOn7xZhfMCQXCcI52lKfyXNKCC0hVET2zp2L3bNnY9esWdg1c+a/zzl7tsV87Js3z6w6XIDSwmNzsHSpxXNwl/N3asZj8dAOODh3JhjzsTInwzYe3NM4zVLxbp82Ca8XZ1psyIbybKPc/2Px0PZ4syIfG8tzsXdGne2UTsWEmxjSErJoaEcwna/mnDReAUl1wfIKSCLLUkLxoFLBrFZcsNItiqmM6UqVYqEwZSKybKQoJ4mNGpnemDEIdLliauTLox3l+Uu/t6B4i0hi0zxaRKiQJE5eVzMLSOT+dlJB6MFCOGm5iJ4//g/4ebPnm32B8tGMh/36P3FUc7971iwMmVaGD0Vpmls19GmWhtfc27jPTHYvc7e6rzrPlAXONd2rGAdClzfGhrDOvVro/sYUyreNGQxmdrt70ihze2PmNlo5mB2O7muMGWGwOTOLca8bbjZJubSGcLPJWAUkcmtsKQYknAfWfZvNm+bDzaHn0TsknPuQx+qSFX0mbKMFt327dujRoweY4YnvO77f9L7T+4+Uh6jKvp9l4SSDCsiOHTtRtuAdsyQwAJ0uWG1yZscqILRCcHPBAeOXWNaqYdVrLDtWnzFzbaf0suWHkFE23ywj3MGc1o7eY+ZarEj+7K22n8fQqtXoVjDdZDD7FWVScaE1pUdRI4ZPfc0sHWYBcal7beyVRy0eZXjjVrxUw00TuWfJYbQvW4WcgtFJBYQWEP0aH86B6jZhaf6IJ6SavzQwaw4xcfV0+Dhetmlc9Xs823ioz9OWcMJIlsepTCXO4j/eXISDE36Rkn63pqizZR6jdc7Hf2hMUcnSOCFVf0iP44+h6che0C1r7xsF2Dz7Aayr/BXWVvzElISlo8+x1L100ZobKSSzMs8yCwkVE7pRNQxKnCwznoPKBt25aEmhpWNh7vdN6VhW/COsKvsxXh1/ATZM+yO2L3sFBzbX4OiBTWg6djj2/+v4dR53neG9tHSE9xrWiU0nI+SNGz/d2CE2rv4/gdX9+PHC6xZP3Pgep7LHx2EkT/ykwoj6vjgZvt+XPT7EkS9ubI9XWVhhVH83+Jaw/jo1Zjp+jS2+dNg4fIiVDFHKkjzxeio+T8Xv24RhG4+kAsIGDxAjqZhV9gI9TniPDcvpsGonP+VkZmaaAjJ//PjkoiG5GHBf9KYgOGVCv3Dyy18LsxSeYBEimUke9btFhPVF7SY3csviwv3f9gzuR/PA+zm8cJ7d1+EF82zum5YusQB1puHl/R9ZOB/Hliy2gPQjixbg6OKFOLp4kbUfXjDX2hM7oS+xNvYRR3nsb1qyBFiW2J/B5IVpeC3e5UQWkBrLksSgcgaa8xd3ZlP61XMP2eKWCgVjPxjfob1QmMmKC13uHcIyMzfRInL14M5mFfnlMw/Yr/fcT4WL4qeWJPb04IaPZjWZP95iPaioMI3ycUUo4X4Vq4BEv9S3qIC4xa8+B1MY9CxK8Yiee86Z8TnKNmtnm8qR4pHCr76INrOARAv70AJi7m9Lq8wFzhQCc4s6ft/JNra3dC6L+kVDOWnbgxiQKCaqJQVEc6j/35S51ZxGc5QyZ2rzPFE5RWmJ5rqZfIc7uHAhiocPt3dZbm6u/Rqtd53eneF7k+3h4TEqEycLSOXizcmsUq2rDqBNwSJ0HDUFgytTXbASO5cn3KHGrT5mygMVg5ZP8oW8x+ty60rION5uO6NHO6xbHIil4k1sSDi0PtoN3WJADqFDyRLkFRRizpw59mt8nALC+9WpOfJU88K5U1lUc6y6n1+26VC/p8KKit/zxI3JfmFIVfZ44US9TJU9VnzpqDCkxFGJ41zuXDMBR4s+lXS/eiPvPEwcMxIzZ87E66+/bpnHfPyH5KS7Zo7PQ3whFU402X/ssCkDDFTftaYv3p55J96Y8ke8PvnXZq1YWfZjU0q4PwfjRpYU/hBLCs62tL6LC862MtuotDCmY+WYc82iQherdVWXYmPdtdi64Cns3TAah3evxLHDOy3Og+Preq0Qc+26VlHxh9jkvcRYPjxW43gahxUm7NP4IfV8xAovGo4nvMepLIyox4ZlYULqserzWLWFlLgQG+JYbwnn8R4bh6Mc8Ydjp+MPx5YMyWFdWE9V9niPicOJNx3W48mT7pAcUY9TWVhdB+viD6kwvl38kqO654kri599PNIqIGIQ9cC4C/KDkVe4OOp5WfbyiGU9KyvLvrTnjB2bWFj5xVS08NLCKqR+oZGyYKCM6ExRTrQo8/1sC3iTCzzXzjY7uehwCw+12+LE9UmmLY5CfsmNxvbytEDSWH7Rwzbdl9r9+L5NclLaouuw8XgNwXzYNbs5T/IF1697Mqp7cPNjcv1cRH3NLCBcBNent4AoBoSKx3n335LY7XxWOR6bUwFms2LGKv4Cz9S4zKLEPVSYQpbZqvgLPk9u9MgNBPkrPGUwUxZjGGhB4YaD3BeEwdeMY/h9t+dw8ctP2i/7P3vybpPJXdlpHaEblnfPirWAnMAFy3929rlEz0HY/vfU/bNhZfessN6iAjKiezLA3lLsWjrcEygYLSkff2/fMlqqKi3N752Nxfh83cBkkocWFZBgHu05Dtri5vREfL7fl/XZ+TbO8erqavTq1g2dO3dOxoLoHRi++/Se9e9QlYURJZaLRsaANK58Gy/WJLJKmRWkeDU6jKoxq0Te7K0Ys3S/WT4sM9WaRFYq20zQWSgSFouoL2hP2YQwiiNJWjjcxoUp8kMZ2gtk4W70r0tk7eK1tqk6gM6FM1FUVIR58+ZBQehxFhDes+ZMc8X5CA/NUToq/hArfo2hekiFF5WckC+uLgypcCrH8fs2jw3Lno9l3gPdrxj/sXtu56T7FbNfLcu/1uI/FixYgDfffNPiP8gb3rfqur5wTLWHY4f1VBwtDUdw7Mhu23n8wJZp5qJFN6nNcx/DWw13YMO0a7C+5ndYV3kRXpv4c7w64QK8OuGneH3ShVhX9RtsmPoHbKy7EZtm3octC/6Cnat6YN/GUnA/j6MHt+DYkf3JAHNei46wHF6n7lftwpGeDFZ4jwvLkh1HQ7wfU9cQh/Nt4Xiqex5f5pjhuB7jy+LzeJXJx3LcIZ50VJgQn46f7f5a0o0dh/c49ceNrz5PifV44UTJq8PjfFn4kI914T2/L3useL0cyRCfsGE9xHoZwnjq8RrDU5U9H/Fh3Y/LcooC4gfUBUmwgOKJE6w+z+vleFme15fJQ9l5eXmmgDSOGZNUQPTFTqqyLSD84jha+MYtLJK8wSJMCwf1x8n28uIWcibDyRU/eXX6Niu7604u3ltaIEn+ie4xkOHvx5dTrlmyA6y/LmJDvO7pZGg4Dxq/mQIS/brts2B5FywpIFQ0Lnjk9pRf5RkQzfY7yodZ9iVmVKIrDxWInz1xt8UssP/H99xofI/MKscPb7vaUvFy75Dby4dZZq3zH/oT7qzItI0Gf9/1OUvXS3ehH999g/VzR3Tv5qU4kGYKSHQvJ7KAhHPDeto5bakv+vxCfMrnRrxT/pq7YGXgrMJO5i51+9hheLB+NB6bPy6RZpgB6P+EkxYYXsMd9YX4z7oBUSrsE8eA6L41H6J69vwc+z71q03U8+t/IzlG+L8TKeh758/HmJEjzRWruLi4WSyI3qV6V+r9qbp/b4qXPCxz0bhnzx4sWLsJnabtS2SVoltT+Ua0y65F//FLLIi8dPHe41mwTGE4rmgkLBXcr8MrJokUuom2JstoZdYTUyrIF+1+ngwyT+z3cZyHwe7aAyTBb2l4lx3C6Pm70H3arqTLWJvKPeieU4nS0lIsXLgQW7ZssTmiAsJ7bOnUd4efK82XcOIJqfg8VZnYkN+3kU8H+XQI43nVJirekLI/Dse2sJ1Y8uuQbE+JMQV153bsn/y7pPXjQO5/YEbh06isrLT0u5xvxtxovkMZvh6Op+sgT3iNvs3jhAGoiBzDsaOHcOzIPhw9tB1H9r2Kg9vng0rJvk3jsXfjGOx5oxC71+Vg97o87H2jGPveLMeBzZU4+E4jDu1aiiMH3sTRw7tw7OgBy8BFmbrmcNzjYyfmL+6ahSUN+VXXvYV41Q3o/kiW8OQTr8bzdfF5qrLGFi6Oel7fHzeulyccaXh4PskMr5kY9oU0HdbjJTMOq76QxuHJIxksx/H4NmOO+SOs5w3H1zgh3POlw6fDqj3EhXWOoUPlE40rGR53sljJFtZT9Um+6nHU45opIF6AGCWEdZY9j/riqPDCicbxqk08ZWVlaNu2LaoLCy2jjH3xu8W8/9JPlt3CKqVNC+do4c7FhJ1cMESLOc/vFxu+rEVJyrVEMoi3M90YbtEYjq1rMPnRosXLSxlX/V5esOi0e/H90TXFyonk2T25BZTuxbcn24J7jJObts1dq+Q1V0ASqWtbVECW19geHWffepWldaVrFeMUuP8HM1z99PG7zNJx4TP3m6WD1hJaNy5q8xhuKexvbT9/8h6LS+Du6g/UFuG8+25KKCCTRiXKZUNx7t034k8lA83Ni7ELVEwsLazFftTgz1H2LVpBYl2worS2LSkgKc+YnsfoGU2ZxzSfoz4jkxPh7Blwn63GsGfPjZHOAvKNgk64amhn3FoyCPfU5OHBxlI8PLscj8wZ+085OTav4dYpOfjctH6JPW3qM3DG+O7o1Lszpo0ciTeqqy2+ifeUvM/w/yBNPZy7lDoxmlc3d8kx3DPtPy/1k66uqUGvrl3Rq1cvrFy5ssV3qN6beid6Gr57uWjcu3cv1qx/G4MadkZB3cyEtQsv58xAn7J5li6X+3uULz+cdLUy64Xt05FQNBgXwnS6zHyVcKWS8nBckVA63aTlQwpLZOlI9B9XbMQn9y4GqzMdcN6c7Whbsz8RNF99CC9MeAcZw/JQUVFhAdGMR+A9HThwwOIXGMPgTy6qqXgxviGcD86VvkP8vIVlzbF4w3qcXC9D/BpPcjxPWJZM8UqGaMgfVxdvnAzPz7E4P5zDHZtW41jhqcn9P7bnfhkTS4agtrYWa9euxfbt221+Wwr617gcwx/hmLpH3y6M2oRXXZTYBJ6Un+2RSKE4mqCsNzExgc4EX5Mpi8eVDskjTXd4nnRlYkMZqqfDqD1u3JPB+vkLZUh2OipsiNN9EHd8jlPjXUKZksF2HcKHvKprHPH7+t+DlRzJb0lGyBvWTxara9aYmlPV42g4VlhPJ0NjhfxqjxtLbcLEUfGQhmOrLhx5eHiMyuKNqwvvKflCjLCeekwzBcQzssxDVOWQJ6xrAI/zbSF/XL2qqsosIOVZWRaPoEWWqC0Q+MWvhUGweBZfckHgFh6+z5eTMp1c648WIElZ6o+otbtyUma0YAzlJmW6BWKI8TzJPj9GdN/JPn9/7nrZb+P7/lAO6/4Ur6j6ovtJ8vpriMa0ha/DSckIMZoT0mYKyAlcsBRjwFSw3J/isq7P2qZ2fyoZbClfr+z3Cn717MO4IuNl27uCLlW/79bagqav6PsKruzXztys6F5FV61z777eLCO0lnCXde6g/pMHb7XMTLR+UM71I3vgrgkjYBmbJoxMul35IPVYBeQkLSD2OUXzLOVBcxbOqdr/Uaq5j7OAnD62K84u6ILzxmbgZ1VDceG0Efhl3Uj8anrWP+Xk2BdOG47zqgfjP2r7Ohes7uiYRgH5R+fnXeHdMx+HO7RoEQqHDbOMWFxo8xfn8L2nd6Tes2E95OfLXovMt97ejNFzt+AFc8M6ZPEgL4xebql1h01ZG2XCOmiZsKgQSDkQZRD58/2KcdbZ55ulhO2h2xXbmHK3cO52iwkxHmdNkdzjOFpOEpYS9pWvOAKm4B3cuAMv1CSusXXVQbxUugaDhwwF90tZunSpuQS9s2ULtvPctAnbNm7E9vXrsf2117B97VrsWLMG+3bvtl/3/a/24Xy19GXoecOy5rklvHiEZZ0Hqcex7OviE87TEMt6eHr+sOx5Oaa5X+3di91LhiWDz7n7+at5v0bF2HIL+Gf6Xe6QLoVOMlq6ZvL4Ix1G7SF/iPVj+bLw4g/lqJ+UuHTYOBxlCufl+LLGFfVyQmy6sYX11I8RylGf+Fnn4akwcWOKT3hPhRONw2ssL0dl4URDvMby/L5NONK4U7yeej6OF46pfmJYDg/1p8Olw0ie8KRxMsTnqa7BY+PK4oujnj/dfQtHXh0e11JZ/CEVJu5e1UfqD9VJhWvpmj1/UgERkJ1xpwb0g8Txsc0fqnuaDud5GhoaTAHJHzIkoYBEX/BauMZ9yVvbCRYCzfCSq8Wfw8ctnmPHdRj2c4xwEWnt7OM4GlM4US30W6DNrj+G92R44u4jvOaUur9GX44Z32SfBA/l81qbKSAn6YLFxT+tHrRMMPsS95dgvMf90wrMpYrtDBh/qL7Egs65g/cjs8osEJ0Zmbir98MzmWJ2uKV7Jfa+KXmGYfYmymOg+j2TRhll4DmVELqDefcrltNaQHgv9X3QkgVEz4z/TJKfoZtH38Zysi4lM+7ZIz6SIX5P01lA3lvXB6fU9sEpdRn4wPS++ED9v8Z5Sn0G3hMFyjNOiDEgnft0QW1Wlm2quWfOHHue/D36efXlJI/mzc919FwnedTn5l3/H+LRjyF+DD/3Kysr7Z2WkZGBjRs3Jl/W/n3Id6fqvqw2/55mmQoILQLbtm1D7dK30HEqU9sm9tZo8/+x9x7QeV3XmajSnJlM3iTrzSSeeS/vPctVbnLcx5PESSaZOHGL425LjhPHsuMqN0mmKFKk2DsJ9goWgAUkiEIAJArRC0EQIAASAAGSYpVIir0XUfxmffu/38X+D+4PUpEca97KXetin7Lb2efi/nvf08rOYkJuIxaW7Y2mYV213alK+lMLxRV8cGSC60R4eOD9DzyI4n23bYoVzwThKeoMOhhAcArVhFVVFqhw616uISnqu2WHGnKERQFIah0IA5hUACJa7rC1vv0iptVdSZ1ZUnUDT1Zcx+zpuSj79rex47HH0DN6NA6OHYvDY8bg8KhRODRiBA499hgO/fjHOPSDH+Dw97+PY0uXWnDCII5t9zbxvyFheZj3uErL5sIVlP1DKHxCXqonnW6VeShcQdUl0YQ6mCAnS7SC4pGafnUW12u+jDsGsiRhAAAgAElEQVQ598UHEDat/ynKy8ttupu2PFYgJ1rxEvQyQ52FQyhdBVUnGvERFI2X+y+hTeITymZel+o8nS8T7nDw5dJStmSovYIq91C6ehjKTKIXPnF1ia+37d1oRS9awZCHyj0M5bLOy/Np0YlGUOWiFcxE6/HJw+e9zr6caV1KexjihnnReujpQ129Hkk0KpMcTx/SejmiIwxpPZ3qQih6Xy46QdV5XKW9XOElQeGzjlccgCQh/7LK2OCuri77sV42dy5ua5vYJGfXOVj2wx85CEOcA9LKiXB86DyYI0wnxDly4iVnQk6GcFkuGcIRFE/LO5mi9bxCnCE6RvRGI16ESjudY/5qn8ORboTSWzCuE77jL56ejrLVBtEO4ZXB3jGfSBb5JAYgjZl3weJIA9eBxLDH7c4Upv3CZ9YpH6ZF52Em3Ei+rUVxumQcAblbABI9g2ab0LbqE/VpCFWvPlPePRfia33l+Mv23E56aV1RahveaMrYfVEQSCefAZTdFkwxH92Gk9owwMpEE8I0fNHPiUYxInrjHZVJXsMc3Gf37BRUucHUeSVvKJuBafOmo3HNGhyvrk6bgmXtlb1kn+i51PNrUDYTFE5EK1yzo/gFkDgxnk+7fri+ezeWz5tnoyCcf/9qvV/pPHIh+v5DR7Gw8TzsNHTb3vYmxmzqw6z8NtsaN6/jfHQeyO04WOAhhV/5/mh8+Xuj8LUfj8fr/+B+C0A4yvGV743CD8YvsfqZm1rtRPX/8Zmv4Y8+9jk8MWudHTr4Dz+bhO+OXYCv/nCMnQ/CYENrPzgSYvn+l2z7Xo5+LGm+iDHV1+MAZGTJSSwYPw07//Ivse8d70D/Aw9g4K1vxcBb3pJ+v/nN2Pfgg9j7ne+gv6oKzz//vJ1/wilGDMLCH8lXy7b/u/LRyNilI414sfCt8QjIjZzfRvGmlbb9bl9fH86cOWPTtIj/v2tb/03v5I/G/2aXf7PLa/0ZsJPQFb346GQ4xYXnIfF5DUenOk+ntOoE9+/fbwHInOnTLQAZ7sc/dDRiR8D9+Mc43smIHI003kF9Jl6+3Bxy75BEPGKnPODpgweTTYfF0w+Tvle8uL2OVybatLYQP9Inrdzr6NqTiedw8j0NZQwNQFKO6AMbJtsC8n/YssJGMX4YncLN09Bfi/eP96a2iv3eziJ8pH5FylGPnPRhR0CcPeNnI3JifR8oLah+ip+hiMbbXri0eZzW8xnZ/kpbG1Y2bMHvNszDb3LEo34OXlc3G7/xy7prZ+F1NbPwuuqZ+I3t7ma+ZhZ+o3aW6UYd31Y6C/OXzEXbhg14vrbWzufh85TUVrOLs6twZL/Y9s5WKhOuYEwT/b8ob892Qj+QD09H37VlCyaOH2/ngnDqiy6990LoHWvhCgqXziNHA06cPInK3Ufj3bB4xsZT285j4saO1GL05hPI775ia0E4msHg4KmFBfgfn/l7rGo4jlELCwcDkPbz+NHkFVhZdwQP/XAMvvHENGzecxVf+9E4fPm7T9mox/q2s/j5nA1YXL4P3xwxAw8/OhaFPddTQYfb2pcjKjxTJGfXRUzmQnmeVcK74irGrqrFooWLUTl+PHo+8hH0338/Bu6/32D/G95gaeZ7H3gA9T/8IeoKC7G7vR1HjhyxUR9t1ys7Cco2spWHqhMkjegEhS+cMK9yQtGoTLgeqi4Jevm+nvTK+7TKJFdQ5YS2NujSeVzZNRYvrf+teATkYO5HUbKl0KZf8VwaBq7hKJKX5dPiL30FWa7Lp5NoPQ+lRSMoOkGPx7Rvr3AIeQnXp1Xm6ZT2eMYg+iMaQeETKi184vhLNB6KRmUen2nPQziCXqbwBJP4iM5DyQ9lid7jKi0aL/9e6D0deSnPtL8kJwl6GtWHtMqrntDTKe3xfNrTZaL1OKIl9OWeVjJVH9JkovV0SntanxZvQeLrVhmhZInW1ynt6SRXdffCI6QJZfq856t0PAJyN8R7ESQeYi6YREtcXsJRWrj8gZk0aRImTZiAF7u70774yykwh8I52CpPc7ZU75wCcyLk9BEq7XEjJyTmJcclwjFHQ/jCDWRQjhyWWKZz/uSsqB3Cj6HjLxzqavXOSYp1TNBNdTHMRC9+skUIXRtjXSgvbI/KJEd8xF86uvohAUj0pfsB7cRUtBTf4tkbu7bg0c4ycEek1+TdudXOFeH2vf+tdrkLQHgS+gSULFiAvYWFONfcbIdAxgdARjZJs2tQZs9KUEb8TOUhryQ8Pke3u7vBQylbGqvwRGUOvl20BH+/IQtfXDMdn1s5GZ/NnpRwZyoXblgf5oWXDP8uexI+v2gs/mH8D/C5rJH41KJn8KnFz+DTS8bhb5dNwN+tmIjPrpyML62ZjkdzslCwarnZ9YXGRlzr6LA2sW2yQVLbVTcsdDyGxfP94GhCudTphZYWLJ871z6u7Ny5M/USjN6DevfpnRjCGNkliEM6LtLmYuL+/c8iK9ri1gKQqht4estxTM/vwLLtXAty1g4NLO57EZyK9ciTM230g7ta8cyQN7z13TYCwkMHfzJtNcYuL8Onv/6oBR0MIh4ZOQv/8NhkC2J4SOETs9dh9OIt+NJ3n8Jn/+mnFuBoapdNvWLw0XsTmzovY07DpdTaj2h0ZlTxMUxblIt169Zhe2kp2seOxb53vjMOOuJA5P77sfed70TlY49ha14empubbfE0d2/iYnWtXwjt99Lly7jR14eXuJj9wAG89OKLuHXyJK61teHWqVPxb09oZ+Z5Ccrcyifhh2WiCWGIF+oc4vt8SJspT54MKi6d6sON8o/Z1Ks7uffhZs6/Q+O6n6CivNxmGHD7XdqPwQp5ZboyyQnLRR/yCvHCvOiSYIirvOyWRKMy4YZQtCoXvqDKQzyVC4b4YV54SXw8LtPE1SW6EIqPxwvTIQ3zomM6vHzZ3Wg9bhIfTy+ZhEqHNMp7OqaFL5gk15eF9OIhesoJ8TPJFi/RerokPsIn9DQq93KUFh/hhFB8PL5Ph/hJeY8veSpLwvdlwvPQ14dp6ctyXoL3Qm8BiEcUA89UAn1ZSKM8cT2eaD0Uroe+nmm+HLljzDPPPIOrHR2xM+EdAf8Dn+ZwOCcgI76cOYfr+cmxJr14C1qZ6L3z4XA9DunIW7fXSfwlW3AIjuQ5fRWMeFmeTnJV5vX3NJIpqLYLX+WC4mc8qI/XKdJTuIKextIRDWUMCUCi6ThvWT8Rn146CV/atABfr1yDf2rciEdaNuNbOwpeo/dmfLNpE/6xZh0+sH3x4FSlxix8c8E9BiDOLrKr70f1idletk6iYZ36xderPAoAGYDc2L0bZ3e0YKCqEm2Fm1GzNgdbVyzHlqVLULx0cXQvwZZlv/i7eOkSFC5ehOJpU9HwyY+j5s/+FBt/8D2smTwROTOnY33WHOQvmIfCxQtRunwZanNz0FlQYDtgXWhttbawTWYz1249h7H9WCf7yCaRPeNnNbKR2drhex5KG/T89C6Iyiif963ubmzfsAETxo9HTk6ObTfL9yDfeUnvTZb58vCdKVo64dyOl1OTKnYdxPjtV2wqFkcbnqq4hrFFz2JOcRey645gQ/s5242KQci3R83BF//5SRu1WLi1B//fW95l06VGZG3E3379UTtHhNOrvvSdkTaK8S0GID+bhKLemxiztNSCk6yiDnzzyRn4u2/8xAKQ1DSsO7aYnXibuq5gcfMlPL39BkZy8TlHZsovYfyqKixdno1t27bZAYSdTU3Y86Mfof9NbwJHP+x2IyJ73vtetH3xi9ixeDF6Ojtx7NgxC7o4CuKDENmLwcdz3/iGBRwvTJiAG/39ODNnDs5lZ+P0lClDghBva9mZ/RJe4W+V8pLr8UN65oXn6VTmacN0Eq14CJIPA4pr167iUt86vJT3n1KjHzn34UTuO7B10wLU19fbeTQ6/fxeAhDpJznSReXUlWX+8rhJaU/r6ZQWjfAy5YUfQuKHtOIRwiTaEEf8xNPTsE6X6ITn8yoTLqFoPRRNJujpfZr4kiFa5lUmXNUR6kqiVVlILxpCzytMi07lomPe03odhevLPJ3qk8pURyjZwhP0OElp0bFuuMvL8HyS6ENeHn+4NOWL1kMvw9OrPNRbtOLnaZgmnacN8TPRhXxCuT4f4jJvU7BCYUmIvkzKeChBHi9TWrgh9Pg8jGrhwoUWgJxqabEARE6Ed75iZ0HOQ+AEqD6mdfVJToOcEuHLwRCfRNnOyfB4Sg/h4XQwnDAfOSsxfYKDZPol0JFGuoveQ9V5nTKlPd2QtJPt6YfY7279kikAacrC/1E+A/93yXT8v+VzcH/1fLyxbhHe1LD4NX2/sX4R7q9dgP+zdu7gCIitARk+AEmzoWymfne2tn6IAtrY1gn1qiN+zJt4zqlmHUdhbnV12SjI2eZmPFdTg0MVFRgoK0N/aSn2lZTEd39JCX7RN+X1FBWhMycHez/3OfS/8Y3Y++CDaP3MZ1A7ciRqlyxBy9q1aN+40UY99m/dams/qPu19nYLZtNGlqI2e3uEzzLtE9tItg9sGtcH5Wn2TaIN+6qnB4dra8GppfzA0t/fH/+A6wfAvwd9OnxfKk8cTsPiKMj58+exb/9BrG06ijHbr0XrQXjY3zWMLz6IuaV7bVqVgpApa+vwp5/6Kqaub8BPp6/B//WGt2DRtj78ZOoq/NXnv2G7aH3ukcfxyYe/Z4vROSryiYe+awvWn1pQgP/5+X/E5NxaWy/yN1/5Z3BUhAGITbvqvYk8Bh8tl/AM131Y8JGafjU2rwvzFi9HQUGBBR/cDpYncvc0NKDn4YfRzzUgDD4YiERBiI2IvPnN6Hv/+7HniSfQV16OIwcP2joGBl9sP51prWe41tWFo5//PM7On4/jX/86LldW4vTkybh55AhOjRmDKy0tse1l57APZGMPiet/sF8urehF5/OUw3zSJbxQx5CPjX6cex43Kv4mXvvxYu7r0LX2qygpLrDDHrn7FadfMXATPy9TPKWPcFSeBEWvujCv8iQY4koucZNsLR4ez/NQufQWFJ3ywkuiDXHDfEjj88QdTm/W8wqhykRPmHRLlvCV93Rqoy8TPsvCS3KS9BavJHpP52WF5ZLHcl0hjvIhFD6hp1de+oV0yos+iZZlSW0WrWg8VN290EnHEN6LzpIj2eKRiVblvr2e1tML18vwaeGKl/JJdCoTjqCXLd7CZT6egiVEIRF6RF/ucaVcWJ+UD/mJVpA0wuHiuBUrVlgAcrC6OjUCoh9z/dB7Z1uOBusCh0LOg5zv0AEJ84kOiWQmOCCe3mgj3FheRCM90vCFOwyO4Wdok3jFvGWHBD1jfRJsJAdNfHw7KMPnycd4ORlx3uvp6sVDciwf8eUICBdCz68rwG810nFPrQH5lYY5+NXo/rXGLLz69xz8WiPvV5/3r2jhddSWRxZNQumiRegpLk6cgiW7qz8NJtlPz0sAk/DNxiGPiE51lMtNHhiE0IFnP5xracGZpiac/iXcnEZ1oq4Oh0pL0f/Nb8Zfwfe99a3Y8+EPY9c3voFdWVnozs+HBR81NaYnd7+62dk5ZPqV2mn2cW0P8/ZMO1vZ8+xsnNg/w/FzvEJZXIy+dvFijB8/HlVVVeYEJr0vVaZ3IvNJl96bdCZtLciJE9jZ3Y9Ftc9HTv+t1La8FVcwoeQw5pb1YWX9UdtOV+s4uJB87PKtNsVq0ppqm671/WcW2RQrjnQ8Omk58trPY2nVfnx37EKMyy6380V+OGEpfjZ9DcZlV+DRictsbQhHVjhda1PXZSxsvoRxNQo+uDvXTYwqOooZi3Kwbv16+xrP9X784MQv8ocPHcLe/Hz0fvzjFnxaAPqOd2Dfm95kwYgFJfffj33vehf6Pvc59M2di2d7esDdnC5cuGAjShoNYQBycsQInJ42DUc+/nFcra/HqaefxtXmZpx47DGw3ttW9padfV42Jr7usF75sI9Yrks4w0HhhpA0Xl+fVl1q9OMaLg0U4M76/xBPvzqf+weoyptqz1tvb6/Zm88K8cUnlKe8dBWeoMoJ/aW8hx43TN8r7d3khnxCOUl5T+PTIe5wsoUreuUF70ZLOuLqGo7O4wnf098LbchDedESJuksPC9XaU+bKR3qmUQbylXe03o6lQsvSbZwQjqVD0erNguKh2hZnkSvMuF7eqZ1C08wiZ9kCXocT+fLmQ4vlQkvpGW5x0miF61wQ+hpxEtlnpayeactQg+ZvVwFKeheeUg5Dz0tv86sX7/eAhAu3Ax/xOUgDHEMoh9+cyi8g6DyMIgJHIU0fq5O8kwPV868pzG5vp5pn5dOUZnxTaoXXuTse7w0XZx8X+7TabZzstJ4uvKwTZ4+5Ku8oMdNbHfUHtXRdgpAFtQX4rca58VnPNhOTHTk/a5KykeOfVwX5kXjyy0oiHZ0indXSthhyaaAObkxD7dLU4gTy4vopKfhzcF3lk5BxdKl6CspMQefzvKQL/WuL2XHtGdruD5SXQRjOpVn4E083tSFa61udXbi5u7duNHRgeu/hNuCoLY2vFBfj4OPPWbTceR00hnd98AD6PnLv8Sen/wE+zZswLGqKguWfADCtgyxn7ODnj3BJFtloo9xvT1Dm/N/NyyT/Mjeu4qL7d2WnZ1ti6n57tP7VtC/D/We1Mtc71pB0vDLP79+0xE/fOQI6nf1Ykb1mWjRdxSEVF7F+K3PI6usH9m1R8C1HlwXsqH9vJ3Rkd91JTU9i4cG7rlmN9dwFOy9ZmtGuGUvzwxhgMGF7Czf3H0VhXuvg7QsZ/269kuY13QJY6pvpEZhuDUwDx0sOYHJywqwctVqbN++3U7iZvDBEQze/PB0cGAAe7Kz0ffhD1vgUfOFL6Dy61/H3ne/OxWUaJE6d8b64AfR94MfoL+iAkcPHzZ6rmvgaMh1LrIuKMDVHTtsxOPFEycsf275clzYtAm3L12y3ytvb6Z1057+prPOmwEOb6Z9veh8v/n+8n0lXEKPr35WmadnmeiUFh6h+v8SRz+qPhUfPHgn91ewd+3foaQo39bPcKSJI2W0EWmkg/STTM9baS9fZYS6lPZ1TItOssJ6yVa5z3sanxauoGi8LqqTfOUFk2hURhzJS6L3ckQjKFpPL5keJvEIaT2+0qKTPEHRCkq+6Aj9pbyvT6Lx9aKRTEHZSPSCohVeKJ/1ohVuCEWr8pBHKEt4hLqU9nVMi1YwrE+SLRzpfTda8fC6eNok+pDG00pvz0M6EYpWZSGtpxeOoMcVH1+WpKtoBUXHvE+rXjBtBESFrwXIl2Npaan9SG/NzR3i5MtBEIwdg/ALvH745dAzHwYhri7m48okI4ZJPCOnI3bEhRPJi2kjPDkoLM8oUzyki4NDaFTn5MW6qC6CVi7esleUF1/RCkp/5YUXtyOgF761L6mN0sXtxJTTUIK31y3CH9TMxX+tmoPXV8zC75fPfJXvGfj9rTPw+tKpeH3JFPxe2TT8Xtl0/N7W6fj9bTNeZVkzrQ3/pXI2Rq6cY1vFHti2DRdbW9MXoasPXD+ZfYexaWzfkDaJJiqLadgfkqW6iA/l0oH/Zd1cv8Hg7HxLCw5PmICBd7wjbVFyHIy8/e3Y91d/hf3jx+NYQQHONTXZAnSO5FD3JPvFz6x79swmsoEr97hMx/aS3QIoHIO+T8RbMKK70tGBqdxkY9IkO3zvbu/c4V7iouUPAx1Kbk3LBel0NLe37sWs7acwqoqjEKkg4KmqGxhTcQHTtx7GoqpnsbrxOTuokCMWPKmc6zY4isFgg0GG7nhthyvjVKvivlu2vS8Djw0dl7F0x0VMqb1qu3GNpMxI7ujio5iybDNWZK8CtyHes2ePrVlh4MHASSM4HAlhELJ3/nzberf20UeRn5uLbTNmYOeXv4zeP/zD1C5ZWh/ylreg7y/+Ar3Tp+PArl3Gkw62HVZ48SJuc2rWuXPgidlckH777Fm8dONGHHzoR5y2Y1BBXfj7w7UlDGaoH3csu3DxYuq+cMGmL/EjGes4kkBc0pBWgcm9/FCr714ppCza78qlc7jSORMv5f1uPP3qcu5/xpaNS230o7u7GydPnjSdiS8d7+X5yqTjL4s2kz6ZyhWgqV/Zp+xDBux8Xvg/w/N0GATzGeQmB7SVbgbKLGMdcYhLGtLyJh////////+++//////HAVGrpocFWrpUuXoKpiHfaMdHkB5X7chRfH4SnAtIQcP7UDB473Y++RrdhzqBejB3sweuDf0HGoF3sPb8XJYwM4f3KnAS1b0cu3+kSsGj54YD5yOCBDIIKWDQIYyUV4DWx4AfFRZW57vjuXZNWm5f14GrbtAcPdZqniYgAXTg8ZQKHbHK0nW7rr0d66AXXVxSgv9YLluX8K90xh/MmSJfHeEe9RuvPQBYnxFXTV4Vd4LoIjq4mseryHqAzz/pblRPekaKz7L/xcuXnJkUrWTassTCeSuxVZtiU+tx2VhfuaKB9Llufmnl8sWclpHG6/k8m6cmo3XDaRvNuHZNwytRdLXvziUd6VjyU3Eb/kVU/6u8i7crHSakvjU3+isWTCZeIN0zDfRHnKsS78i8Vvq2C5jC6TTsYtm6hhteHy/q7yYVma2rzgqmXYto273/Kfv6cAUBkIlItAWZDSIMpAYAKJ55+fYsHeixevMsBAhZ7L8nKVK1pCqNixfVoNuIztK6/MsHJaN7j0LYEM3a+42eCsWVw7Pc2v3xRRPsnLpXy5rC7b4fjonsU4jFdemW7AhdYOBprTCrJ48QobFzdEZLtUXDhuAhgCJgIT9kuwQ0DEsXLM/ApPhZeKKF2y4uNXGkhJSlpjFhvGnJDvpZemWZD7woWpUVaQ8fM2MRDRPHJuqPjR8sNVrnJ6rmG2v0wqFd0ZLTcwreo4puZtwYyMets7IqFoE1JrdiCzZa9ZIgq3nvO/wAexCfxiXu27A+mrOpXb8qHr3hd4371nw8hNUJ57UZCPQKW4/6IdWv2J+2LQAkKQQh7SaroA+XEHtJgQ8JT0XzDrSHbHIVtCNnnDdizaMIJZ1UcxnQCq6bot7Tu1/jLmVR/B2ootaGruw5Ytew108NpxlTPeW94+It4O65wf3Z8BjXVvRiux3hzr3g7ub5Zr3rkSGu8P9klLAK0hXMSgfEMnMst6kVQ+jHm1JzHLj3ExEGJuTf6SxAxab3sDc9puYEH7NcS3X8HSjZewvPMiVm26gIzN55Ddcx5rey8gb+sF5G+7iIJtl7Cu77JzXLF0Qd9l5G+7jLxtl7B26yVk9V5CWs9lrNh8Bcld15DQ+ToWdtzA3PabiPPd8giMtBCBgdWWm5hRfxEz1+/DrKxOzE8tw7KUtRbrwc0a6Q5JqwdXEeM589zd1a70Lhh/L3O+NZdKu9S9Hpprl1/1Xl34Onr9ejxB35L3ZHgfNDf32FKNtOJSadG7Te/JMGX9RO/LiWRZzp8ryzZ0SE584p2o71j9U9nil8lzfX3Yf8cdtvM5AcjOL38ZVcXF5oZBdwi6O/CjkZQz9R2rT9WJxhqv6mKNVW2G5WKNn1+26TZCn3EuIcsN/Xq6ai2wXIHf+rJPpZdKroKu54zW4ieDJfjRjiI8tL0QDw0U4qH+df9GDu98nt5eiq5dbTh2cKsPQkZMyY8ACR9oaK4ilgoDG74blh/IHiVDOYIRH5CIikftGCDxeVVnFhMDHdFtRJUL1ITaV7sEkNxH5djBPnOt474p3KOFSxBzo0cup7w2O81W8OJmjly9izvQWzxQQoK5dBGYMJ6AK3bRckarCd2AaO2jhwYtfvyK71r9dG/y/o11P+q+Jg3/VCZZyYtKVn2E5VUuPrXj5l0ZlceSU10sqjZY58pynOGxuvKSc2XctGRFw7LqTzKiLJcMqdITyasdt95twy1nH+GfWy+5WH2Kj/JMuz/JiWci+bBsmF9yoqoP96V2VE9KmbAcy11epa3QAcpqJywvvjAN80tuHABRhxMJaIDqwM2HZZhXR6pz+dWGqHgow4O+mnwBcPm5yspm+/ophThMo5UBD5ywjAqDuQvtOGrxEFRmWMavyVTkdFCxlsLHwG9+4aYFg0oPeegCwzS/wHJ1ItYTkLhKJ+tZx7gLBhCrPfbJJVnp+sV+2D/bUd+k3PzQa+stU25Vx345Diq9KmOevGxLZYx3+frX7zGLCa0xVJB5Ht/+9v148cXXrE9vPqRgeXMTnseJ8uyPYx7dexnlfVd8FxpPqTTwsX4vpma2Ii6jFosLNiJlQx8ymvZYHEfRtnPmHkWXHH5FZ1yAQIJAhwKXaQ2Zv7YFszNr7Ss7g6IJOrgZ3mvLiq28dPslsE3uyP3M7JXIaBmzfl6Yn4EFa1vMCqL21H7tXgVBv23tVgxeN7Cybstps8pwN/DEDTs8a0jTVdv80PYWaX4dS2qPoKy6xyxPnkLsxXvEWtlK8xetqHqKqVumeyPgD66LVyYF2AUh3NzRc8niNeamld3du81FaX1FJ9YUdyBp/QAWVB3E7PpzFuMiIGIxFlrG1w++5yaHs9tvYk77Tcxrv4H57TewsP06Fm+8iiUdV5HQwdXMeFzx6VUkdHK1qmuI76ArFZdY5mpnNw1szG7jamNvWEwNl1c2i4y/FLJZZtrfwoymq5hZdQIzi4YxO7MZi1IKkZyaD8ZBlZY2GdjmMtJ0aeQzweeI80zLG+/B8By686fn3Ztb7x3g1istUOG25aY9+WD+Y+WDtrzr48kH13nr1jEkJnrr/vMrqt5teteR6l2oOr33mI/13hR/uI2J5GO1cSuy6p/y/OpLJevE6tXY96EPYd9tt9kSvJsfeQR1/oZa9L2mr7WriGlM4fNUXvWiscbKOvfn5sNylA+3QT9wxgIwRoB7WFSszzM3HltG11GOBTyoDPMLO92uzp3ciS/25+KPelIjx7/rScW/6/YPN80y5lXmUrdcfGpDeZdfZS6P0i6fm3Zl1J9LJR+mPan48+5VyO8ux56hdpw8vM3cywjABAYIHNz5McAQslyojDKyXFjaBweeZcQDd9augRrfpUsAx9y+AutJwCeri+8K5vdtYxIIUZmAjAFJyfmuYb5Vh9eeK3lpjxRz69rWgq72atRWFaO4MAc5WWm2ElpKSpLtMM9YEwbB8z6iOxeDnhnszMBuxi8wVoGxE3TxY3B4LEtg+N7U/at72s0z7d7Pbtrl17OhsvCzNVGfYb5w327/sdqQfJiGxzmZbLjPd5IN9xVLPlymPGXDP9WJuv2746aceNw2VCYqGVGVk8b6sdzlZdrNS97tX21J1uV30y6f0mpHVG2QhtMujysfLpesS8VDGv65fBrvv6p9QNwBKs3VCuiGtXp1JkZGTkQUECkMAfCQAhetOEjZCBSG6Pp3lncV9UB2vJz6Jw34gn7d+t8lPXlbmgdaWr75ze+aWxetOQQ83P397ru/Oy7GJBiT2haNPS72QQVw/4GraNt5CfGdQdwBA82nlY5i6upazFlTbxvXMRi62YoAACAASURBVL5jbdcRAwmMzaD1glYIgQ438JmAhDEaBACVu9/EtBVl+NZDT+HeHz/rb373T5iX3YDv/PQFLKvoxw+enmob5tGa8ZOX5uBL3/6h9cOVs+577BVbEYtgg8BFIEQ0Akb8FZnYN9uh1YSB7llt+5FSM4SFNQcQR7cguge1vmmrZq2oGkFdfa9ZHtzN76gUHzsWuPd494UsSir37gnOo3cESqt3/VQeyPEaxeJlf7wWBJ9UzmlNo7LO69/c3I+Kyk7kFzdjVWEHEtbvwIIN+zCn9iTizDJyM7ICmAdMvKBvD5x4gey2XLGBBrpJeQd3pw92qPfLfTcvs2b4G0ZOb3od0+uvYFrjNbMicfNIulfNpKWj4jDiCDqyOrAwtRRLU/KwKq0IublVtrQul8CmCyHvW4Jtuii6Vo/oOeb9GmuugrJgrjW3wXPMa+TNu66Ld99LRnPv9RHweDJqb2JK+dHRM8jIWGu+6HyH6X32h0D5z4GWDFpuaNW4dOoUDj/+eGTzwbEPfAANi7jZaou5NrnuV/8azo9jJyjijtWMBUhOSkBDbQnOHN9uy8pSYTarhymszld8fxO/S2eHcepIPz7Tl4P3dC/He3pS8Z7uVI/2LPfT4XLVi9evl6y1E5ZRW75spA+1RX6nHeubMu8gx3Zi9eeWWbvL8V+7UrG8PhvbuqtxcGwzzp3cYe5NBGK0JpjVQeDAzwtkKA5ElgnL+8BOAMSAhO+a5aUDIGLtqE1SAxTq0wcj1p5AhCcbWFbk8kVeL23LCrvAh+WR6+3xeefl8/tuXbSY2FLDp4dw+uh2s5rs7G/F5s5aW1aY8Sbc+4TuXMuXe8Bkie/GlZCwxFZpIjDhClN05eLSznRNZJA0rSWylMiFSwrYv4bn5X+NIYh5+19z8fudi0kBiBAML4p+t3qByC/e30VWvJLng8s1wrn7ZGvr1qivoAIBnuJApcJTFqgAuEd0vRQKKhCBIhLdllenNjw+r021xTpXIRGvVx70L361H83nKf4ah9eeex5BveTUjtpVf4wRyMkpBzczZBD9nDlLQdcuxp7wa3msPrwybz689oM5CcbiKVpm3Rm9iMzua0HAeesbmFq+H1NXV2NudoOtNpXRshcFvadRuuOyDzwCa4eAAPeeoPKf03nYQMtLC9fYrtpVu99AfGEnnp+Xhm899KQBEAIXAouXFmYaqHg1Md/quALTc3NW4Ts/fd5WzqL1I2VDv7leadM7LQtrG+o5e1doHOamtee3Nha6fNGla23nEays341FdUf8AG/PyjO38SJyyzZZwD+tIIxHIBDQPOn6vHsaDQJjt+tdC9Z5QIRuWQQil+wa08JGNzHuvl5f34P161uRW9iItHVtSC7sRnzJDswv34M5NScwq+Ei4rhHijZubH3DVtWSm1QsGnGjkiWFrnetb9gu9dNrL2BR9iDS5pdiTlYfXls3gum52zAruxPz0uqweOV6JKcWmCshF2UoLW0GdzNvb99prlYEzQRTBB60WPJ+k8WD5zvRfIyfbw+cjC8PnknV6VlSPjaNvi7kCcYyvk5t0FJVWlptH0+4jv9kCofq9M5z37d6H+pdOhEVXyzqyqgvUbeOaSrvb549izevXbONB2nZON3dbZsP0vXKNh/8zGdQlZtr/vJa/cp1v9IYwm2rT1HW6xcrHZZnPpas2wbHz9Vx+IWaO3Zzo7zurlqLFZBCHSikvmIrxde3gDA4+/ihrfj01qwAfHQvx//ZloQ/bkvCf2hLxn9oX+YdG5fhP/whHBpvezL+t00pEUD1XzqWY2FpKjY2F2NseCPOHNtucTECIJozA22yNDjUQIOfj8Uj64ioy6+0QI7xmKXCDXb3AWIEVPggxLVyyIriUwOXzpjUN6lAEM8rXK5xiIqHMgQntJrs39ON4R0bsa23yfY8qakqQtG6bGSkr7AgeMYX0dpGVy4uIcsFD7i3hILf+/r6LL6E1hJ3RS4CE97b4fvbfS54n7vPhHjD1JVR2pWbLO0+S0qLunLq0y1zxycZUZfPTasd8YmShz+Xl2nyS0ZUPJKVnKjqJe9Spif6uXKujDuGsLybD8vHyof7lnwsXvd8dW5heZVLXjJhKjn1F5ZjeVhGbboybjtu/URp8YtGAIg6k6A7IDG7ZeKLRV3+cDrMH+6X9e6PeZrRuRpWdnaBuWPon7yrQATKdKBoeGVSEkSDerXjKRUq9/jctifjU93k/NF9ewqVgIb6vTU6UT8cB0FIb+9e85+nDz1X7nJ3QI+lOIXbY15l3jgV+3ENXTvPY74tm+rt7zG99hx+k9mKWRm14P4bGc1jtpoVV1liQLkt9cpVk/w4DnePidSqHXj4+VmYkrQOXH6XgMSsFvv/Cb9JKY4AkLIdV3Dnd3+EuLQqWxZ29po6fOor3zZeApCvPfAoHvnlAjw2ZbHFjQh8BDR6TwuCH+vHH5NZRRgrMvq2AaaSgYvI7TqGFQ2jmMM9N/zlYbnp37Lynais6gSD+ukGxzicWHPq3RPR11z3yWTUm3vdfxPLu9eFSjqVdVlECDa5VG1//0GzKDAWiRslri9rRUFRE7LWNWFlXjOW5bUhoWATFhX1Y37pCOZW7MOcqiOYXX0cs2pOIa72NOJqzyCu7izi6s55B/M1PE4jrvoE4rhPR/l+zM7tR8rCEjTf9wv0feZO5DzyEuIWZmBBYjaSluciLb0IeXmVBjoYJ8XV3zZv3oW+vgMYHDyB3bvPRVk8XHerYH7d5yN6bsSjeXHnWHVhq6TH67bJdHS7sdsJAEjQdlAmGdY1NW22L/Bc8pVuGeF3Xzgffu+F82F+5t33p8vvpsN8yruyKvvt22/jUnU1LlRW4jI33TpzBodzc7HvYx8z16u93Hzw+99HVWmpfeWl64niP9Se+labpO5BPvGynD+3/p3Sko0lR/98D3wsRGb6SvArtla48hRQKp7+l3BHSZXSSsX74ulBHDuwBZ/akhWxNvxR5zL8bcZM/F1aHD6TNQ+fz12IL+QvxhcK4v9AjsX4Qv4ifG7tAvxJ01IPgHQvx3/euAxx+UvRWJuH3YNtOH1swILRDYBIyTfl33evkpKvuZOFQnnKOGlT8injAIiI4u+DDU/R9y0e2igx0oa/s7tjjbHraPXa9V1WDt8i4lhTBCK8/r0+dB9wnN5YJUcqC4xXZzz+2CPj9oHMpbMjFmdy/KC3z8nQQBs2d9aAMSaFBdlYk7nSYky4bPDixVwyeJGtykVQwvgS7lXBfTq4vC2Xl+VGhLSWEPSH48bcZ0L3/0Rlev70fIiSP5as244r68q5PG6a7anNsKzyLr/SklFevLEoeVx+N+3KM+3+VBeLxmrjneRdGTftyqkvjkNp0Vgyko1FJUcaS1bnKr5wXuWSZxtqR+ObTEbyklFeMqIqvxUqGVLyRwCIhNVZmFEC4gtTyanclVdadWFelqtM/UiGlGtUcyMaWkG2bz8YUfqileVAmZACTWUgWkmI5pG8+Cfjfae2pHhEKzGBRUHlsfrQONw+XD6vbSmm7jmwTOWB8iRZUY0tyEtGbUk2upz8PBgATXef0i2Xoq0fxcOYvroS8es6kdY4am5MBB+0Wmgp1wj40K7YPl1S2IWfvDTX4jq43wfBivGGAAh39r7zOz/CrIwaAw508/rkl78VASB/d+e38cNnpuGL3/y+7fsR6c/2sNAu3R7o8ACO55rlghDKyBpCdzG6ZNGFbGnDYc8Kwi/+bW9ZbEV+UTO48zzjf+giRACg+XWvX/R1megLvuY/oME1Cu7d6LJoXtZxDDyouDNegoHxjDtiDIUHRg7ZLu1caY3AlCCgvKIdxaXNyFtXj+y8GqRnV2LVmgosz6zEsjW1WJpVj8SsBiRkNSIhuxlLspqwZE09lmTWISGjGolpG7B0VQmSUtdhzfR49Hzx77HnAx+0IOW+//FxrH5tDlavKsC6dTWoru6KgI5t2/ZbLBTnj2OkJYngifeYzoPn5J6zO7/Rad23nJPoe9e9Fm5bXjpW+25bwRyrXbWnsQXvDI9X7brj27p1FAkJieYzfurUqXH/kNz3nt6Neu+5eaUno66c0qSTyfCdq/cu+ZhmHMeFmhoc/MY3cHTuXBzo6MDY9OnY94EPmPWD7ldNL7yAuuxs9K9di4NbtuDihQu2fCatD2xHP/Xt9qF+VKYxSkZUsrdK2R6/KvNrMy0f6WmptvGd7WAeUWY9i4cUZPvSbcqxvoZrf4pBHN3fg09tWRMBIP/HxmR8JWkq7k2OwwMZi/CDvCQ8VLgcDxel4uHiP4CjaDl+WJCCf1ibiD9rTI5Ydv5k4zJMz1mC+qq12LWjBaeO9kcBECrwNl+uxSAMQiKKvBR6X3mPIROAB825Y4XSdYq6Jv41M6DjAwi3f5dXY/Xb0XUW4DAAoTGZXKg9qwtAiicXAKAoeeecxUfQRlcu7m/COJNjh/rAXeJ7NtXZssFlpXnIzclA2urltuEiAQlX5UpMTLD4ktWrVtn+NLSYEpQw4J2uhFxIQRZG3uc6Jno23GdIaVI9exPJsVx8rpzKJpObSNYt1zOvdsL5yfoM84bbCMsqL77JqM5PMspTZqK51nhcGaUppzbE5/bvlklG1JV1ZZiOJSd+yb9Tv2pDfGH5cJ8T5dWfKz8Rr1uufiU/DoCIWQzqwKXimYiGZZUnnUhG5S6v0nTDogmTVpDi4kr74uspBIGiECjxVC6kFAT1UgyilREqeQGvl3YVkSAtPikias+jXp8uj8fntc9yt84dg/jGt6O+g7bVhtee6qPP0W1b/OpDch4Nn7faCdplWzzoTjI2dh6rNl0NVjJquoapWRsxJ6ve4i4IIsp2XI2AD8V5yOJgblAEBf5BhT+36yhmrKrAy4uyzQWLZdz8jsHm3/zhE7YfCC0TD/ziVbwwLx11+/4Rv0rIxT0/esbaeXb2Ktz9g8dRuPUs7vvZy7jnx8/aHiDqg8Ai0r+/qZ7qXEpXLQMuXDlrz2/NEsJle9PaDmNOy1XM9F2OZteeQlp+kwV9091p/37PDUvXLqyseuUBkFC9e43EE6bR107XxKOBfJDXtRIQoVWEYIRWMYJHLkrA2IqhoeMWZ0ErDi1kBFO0knBpaLpt1dR02Y7jFRVtKCtrsf1kGBxeUuIdTNN9inXl5a22YlvVmlL0f+8fIiskcZ+IznvvR2FaoYGPzrbtGGjehqHBE5Hd4uVmFbZ26LyDc+R9GTzTnCedazSv5ii4rzWnHl90vdeOeL37XNfHk4vmjx6P5ESjedUvn5lly1Lt4wk39uI7jv8A9E9A7zzl9b5jufsTn+TdvNIufzg9mZzkSc396s03cZG+69/5DvZ+8IPY8+CD2HPXXd61ve027Pnwh9Fz773ov+8+DP/859jb1mZWEjfwlv277cY6Z7c+PF7lyRNLNjxfHDctH7Q0MV5wTeYqcNUjKoRyp4m43vhf2D3wQWXT++ptX8r9eABaQAIA4sV0/PuOZbhrxSw8mLkYPy5dicfq1+LxlgI80Vb4B3E83lqIXzTl49HqNXhvmx8j0p2KP2lfhqnZi1FbmYOR7c0GQBiE71lAPGVcQM2Lr5BlwFPMWce59Q6BBd/VKQIO/HbMchC4QQkgBEHqHiiJtOVbqoL+g3rFfBhA8vs3gBCxjDhjjwFsIveDP/4AXMgKQqtK0IbGamDD4kp8PrYt644PgGy8TuyJ7jGCE+5psm/3Zuzsb8OmjTVoqC1FaclaZGWuAjdZXMpNFuMXG4jmPiaKK6mvr7fV3PgeIdAmKOEz5y78oOeCz437c5818YiqTnnJuW0oHeZVXlSypJJRmXjUTzgf5gvnw3KSFxU/Kcv0YzqWrFsmXlG1KeryTlQmWVLxkFI2ljzrJvpJXnKiKnep+nPbcutdWabdvGTIr99ksm7dRDJhnnCe/biyEwIQlymcVqPhE9LJqRPx6eTC5ap3qXhcGbbL3TDpU8l/6IODR6K+OusffrTiEFspGK9IxOaLbtNTNIKyyWWkGAX8YUVlvHwwrug6T9kK9x/kvfpoGXcegnZdGY0nKIseq9ceZanQUpHlfhdLO64HAKTxMqale5sL0vrBValoPWCgeQA+AsDh7Zrt5H0gQmsJA9B5MHi8oOcknpyxDF/81vexmmVj/xMz06rw4ONTDOg8/HwcfpNSYjt7/+TFOfjKvQ8bAFleOWBWkFcWZ2P99ssRoOMCjeh0YBVRuSwhHBMtOdmbTmJh6yXboZuuWHRHSslpNKWaK50xXoFf7qOvQQAYNadevQtENP+a59jXgfdR9PWLvs5encrUlqdMsy4ajLxuFhtaHPbuvWSAhBYSrrJGKwlX1OI50S2KS/wylqS3dw96ekZtpS2utqWDZV7dmIEYxpxsWrEWox/9mK2SxDiBXX97B5penYmainZsquzAyPwkDPcfBAP4OYbx+6bEmgMBA9bpPIM50XOmORJ1739dAw/E6NqoLc2Z+la5lxdwiW5XvONpMB6v7tixm0hPzzalmDv7uu9LvSvddx/TsX4uz2RyrrzSkpWcOwbWqZzpCAA5cQKHp0zB3g98AHtvv92o4j+Y3/OhD2H3xz+ObUuWYGTnTtC6wxULuWIW2+PP7Vd9un2pXuerfJhKNlzu5um2wn0c6HfPmI+h7W32JZqKoacM+oqr7zojhdZTJl2l1lsFazwAWQ4CkK+nz8MP85PxWF0Onu5aj+e2VOL5vpp3dTy3pQrP9b4L+a3Vt9wf24+McVs1nu2pwJPtRfjLjlURy44LQIbHAZBAuZYlhHMXPadU0oN5pqLuzS/LPAXe5tlV9H2lXfVS7iVr5b6lIsKjfnlNfeuDxmEARgq/rrGNKRiDx+uMM9JecG8IzHjjdc7LH3t4fF6/AmQ++PLHEWnDPw/Oke5HjoXnRZDHzRYJSg7s6THQzH1MGutKUVyYbQHv3L8kISHedn1XTAn3s+ECC9xxfOfOneYhwmfg6tWrBkjcZ8N9FlmuZzD8XKnclWVa8qIT8bly4jVh513g8oTTrozbr/jU7zuNW+1IzqUTyboySotSPiynMlL3p75UJj6NXfWi4iN1y5SeSE71YTk3L57w2NXmO/VNefGqLVHJMq8+RcUTpmpLsuJ/T3iAYUF1IoFY9bHKxK8O1Y7Lq0G5ZeKTnCj9jD3f3kWoqWk3xY/KxWTKgeo8RU4KQ7SSESgoXr1kwuXR+aAN8asP5aP51bf6CMYtJWdifrev6HYmlgn6IY8Uo/H8Xtsas8vnnY/iP66by1FiCIDMyGzCkqIuCwLnpoJVu940VybP8hACG3LBcqwgruLPDQcpl1C8CU/OSMYPnplmblfcWJC7aU9JKsDTccvBIHSCnfyek/jFbxKNb2XtkO33sXhdB56akYLM1n0IW1xoWVF/EcoxaVxGA1csro6V23sGi9ouY4ZvASEAScqqsx3Rt207YC5EtDRw/ty5c5XlYM7Dcx1c1zCPpyzzGro8SovqXgjnvXvLG1M0GHEBCa1aBJZ0I5OVZN++y3ZOe/dewNie89iz+wz2jJ7D6O4zFuQ+uvssRrkHyu6zBiQIJhg4Pjh4DL2bRtD/5IsY++CH7Gs5YwW2f/VrqFu2Bl2LV2D3Pd/D9spOAzt0vWK/tH7o3pvsXL17caL50Dy49S5wUT3nafxceWVene75WNfDKwvkNW7R8TLeeIqLN0Q2JOTXSr7fJnrn6p0Zfge678fJ3pmSc/nV5kR9urwCIHzXHsnOtrgPAQ9SXlOjt9+Onkcewcb6elvpJxYAUb9u+7HGrvd7LCrZWHKq45xy1SGCD+7nsLOvFVfO74rsyk1F0JRB5yu4vmxbXURB9PioFEYBEK5i1ZOKf9+Rgm9kLsCPilLxeMs6PNu7AT+ry8GPS1fhZ7XZeHrTerwy3IRXRprxynCzR5lW3il7eagJP2/MMzDz4kBtwCN+Ucn49IW+GvyoZCVe2tkQ3X6Yf6QZLw834dHqLHMRe3FHPV4easSL2+vwTHcF3teV5q/o5VlApmXHmwUkDECk/Hvz54IJxV54c+bOo8CElRnQCNyyImU+gHDzZoHSNQpRdxymyPvtRtI+fwSQRCwyHgDhNf1DOK5e2G0LAJw/NYjTx7bb7u87B9qwsWUDNpQVIG9thi0PzEB3Lg28ZMkSJCUttT1uCgoKQCsJXbe4ieKJEyfMSqKPAnpews9SOK/nkPzuT/KkYRnVufzhtHhEY7URlmFe/KKx5FQ3kbwr46Ynkwv3HUuO8uJT3ypTufqIRSUj3jClzGT9uvJuOtxXrDZcfqVduVgyqhc/Kcv0Y3oyOZdXMjEtIGpYHf6+6Dv1yxUk+I9m1apMDA0dc5SXQKF3lQwpLoFyGCgQVBjGKxxSVgI6kYKhcq/taAVUdWGlxC0PxuSOXcpjdFn0OQXnEN1eMAa37WAM3vkq78qqzKWaGyqtVBbpvrOy81JEGZ/RfA0z87qxpGgTstoP2AaD1bvfilqFyhR9d/WpWCDgVsqcNqJcqvyAcvYTBXqi+D3g4QIQpiP8IV6WEwzRmpO75Tzmt183F6wZXIq26hiS0ivATSwJQLRPjDeXnhIbXCvvegR1wTWNPffi9+69gEftuvJhHrcu9v2ha8t23YPX1ztuGiCgRYeg6sjwSRxYXYDD/QdxqLwFh3cew8GaTTiYWYRDG9pwZN8lP/j9Gvbtu2jAoqeiDUP33oe9t3vxAvxavuXe+zFw9z3Yc8dH0Ru/Ej3do7ZXDq1HAm8am0e98Ueff/A8kieoU3lwzkFbwbwFIMOTHS8fPZ96pmP3pT5J3bG6/QU8jY2bDICUl5fbUpy/r3fpu+mH/zyosHCDtWPbtmHfN78ZgA4HfPTddReq16yxVbAYPHv27FlzB9FKPu+m73cjQ/BBy9LixYuRnJyILZvrQSXO+yLvKcxSSvUF31N6fWXa/5puyq/FGMSygHhL3hKA3J21ED8uWWlWhOe2VuG7y2fjs0//xNyyvrnw13h+a5Udz2wqw/Pbqg2I0OrAgxYTggACEtY/mBmPzz37U6t7ob/G6HNbK63+ud4NeK5nA57qLDULxtOdpQY62M53U2bjxe31Zj2xtrsrPJktlXimu9z4Xh5sxNObyvC95bPxpV8+DgKXX+5qBcvZx19tSvctIMvNBWsiAMK5ovIv64HNnawBNneuhcH7wh8BEiH3K88C4FlUDAD6MR0RcOHPf+RaGEhx23fBThgMyR0sABwXzg5jz8kB7Dy+DTuObsH2I70YOPwHcBzpxeDRLTh+nLvTD4JLQnNfFsYynT2xE9zxffu2FnS2VaGqogD5uRkW6G4bKdrqWwsMmKSlpaGoqMjAeW9vr8WT8EMBA9y5sagb5P5unr2wzDvpbmH+fy35/z/j/peS/Zecu3/uczYAwkbdn3uCsRCNyxtOu7JM/y7yYdlwnv8YV65ciUWLFqO5uRd0cZASEEtZ8JRoKReBUhAoKbGUmYBPivxk7QQ845Ug1bn9hdMeTyAbq693akcKpdu2Vxb73NWeqCvHtMrZhgdArtuX7vyuM5GlWrn86syyUSwu7LKNAIv7LtgGfxbHwRWmbgFYREBAmFdWCSuPdpXy2nXL/gnm3qU2omThj8Pn98HGhGPzV8biRoncEDG9+6Lt5G3Lz7a9hVmle5Cyqsh2ou/rOxhlAXmn+1Bz7Cq/Smu+xXOr1JVTW7ci6/IyHes4umUv9n/zXhzOq8TBV2fiSGUHDsYtxuHs9Tg4dR6OdgyZHK0YDCJnvEPflr3YkrAauz79uYjiypgBHgQjvU++gJa6bnDuuFoX5Xh/uecxfvzBs/G71enel7xo9DPvnXuYN3gHqM/YY/TaDOYz6ENy3d0jmDt3LtauXWuKPd9psX7hdx3fm+4hOfGxjXAZ85IRn/v+dfudSJYAhO5Up44fx/7nn49cR1o/eAx98pOonToVddXVtgEbFwjRKli0oKg/UbfPcFpjdGl4/K6My8e+uATw6tWrbXWh+toS2zzQFOZInIe/cpMp0s6XeF85NlcYv05f48dZQGzvDM8CcnfWIrN4PLmxyKwJ/5CTaCCCVpCvTHnKrCD3py/Ed5LjcM/SGWYluWvGC/h2/FR8ffYr+GlFulk+vjbrJXx9ziv47FM/BsHFvUkz8UD6Inxj/qt4trscd818Ed9OmGaUwObrc36JH+Qm4ReNebjz109bP99dPgd/P/15k3mitdD4v7MsDt/PWmIxKncv+LXVf+mVX4AA51e7W/HKUBOe31blAxDPskMXrMkAiAdCAuVfbkguGNHcRcCED1w8lyMPwAgIumBG82/g0LFEWbmTt3oDJCH3Lx/EeGP0wId2sR883IvHh9bj77fn46sDefhqfx7u7M/9138M5OI7/fmo6au2ZZG5HPSFU4O2OabNr3/vEmhzX5vD+7ZgdKjTwHdj/XqUFOUgI20FUpdzE1TPUkLXLepNfAdVVlZi06ZNYCzJ6dOnzUryxhtvxLQ06FkU1Tsj/Ewy7z6bSrty/1yyalt9iqpclH0rTeryWcb/Ix7xa8zKS86VUTosy7zkRMUbppIVVb/Kk8b6ufVMS079icaSZZkrL1m3jGn9YqXDvOG8ZNVXOB/mf48Y1FmYQXmdmPKSC1PVi4blWM6fqOTF/06Um18xGD0zMxcHDly+ha+h45UCKQfRdGK+sPIRKBxSVCaWvbU+xsu7fbrpW2svABEuf7gdN++mJcPzpILoxYCcQ133Ucxu5yaEb8M2sqs7h/lFW231KW7kx700qLwzBiTi5iRg8G5pCFBU7rqJsp1XvfZDdRP36QIWxxUrJE9AxJgT7kdSMnAFSzpp/XgLBCAzWm5i9trNWLV6nQVrb99+FHRZkhvR+HtC90a0wqu5nZiOvxcm5n13fdzKWI/1juHAAz/Eobh4HHjkCRzOKcehGQtxbPthHIpbjCPrm+3Z4/1BSwbduIaHT6KnvfKiZgAAIABJREFUdQDbXpiCMe6ara/mVF5vuw0Dd92Nmvxq21GevJPtpTLZGIP5eHdzFch786e+RMP175TXsyPq8jPon+8r7qDMHdEn+oXfe/rnIBp+X7rthGWZl5zevy4/0257rjwVewa3chfxA/n5kfgPgg8CyfZHHkF5Xh46Oztt6VBuQshFQuTqof7Upvp1+3PLxBemsdpx26DPO4PO58+fj/zcTBw/1Bf5Wm9uQPqiLsU1BpXia0q1Xz8egCgInRaQ8QDk4z++D3f+5jnckzjDLAwPZC7CA2kL8YlHHjDrxZdffRIPpC/E91bOwzfm/QoEFN9NmWXuV59//lELDP/kIw/i2c3l+Nxzj5o71+eee8SAxL3JM3Hna8/gR8Wp+PyLPzOw8slHvw9aSH5SkY5PP/EQHipYZv194cXHcP/qBSBlnz9Yu9SsLF965fEJAIhn2XknAOKCBFP0OU+0gvhuUAIkLIsAET942/LG67ts8Zr48yxegQsL5Dbrih9L4fbhpyN9yPoiVy2T82IqGOjNXew3jrbjb7dkRvY7sY0kbYNGZ9NH2/jR2dTR3QiSwNMHnxFZbQppdWzHl3XbudU+In35/Uiuezn+U1cqkqoysKm9DHuGN9qmmNybxtuh3o858efbm/9Ri3m6cGrIXLcISrj89MbWDSgrzUVO1mqsXJFisST0IGFwe0pKii0FXFVVBVpIuJw2AQktJHwH8FnTM0iqtPsM6jnW+0TPsHglp7z4w21IzqWSdct+F/mwHPPuT3mXT32KujyUFa/aUZ5U5yiqOvGGqepFJSfKcv5EJS9+l1LGlXNl3PRkbbjtiS8WdfkmSktO9cwrHabjAIiYwycUFnQ7cdNhvlh58YdpLF6WuWPhPzz+M583bz46OvodABIoI+9WkQgUB7UlGih65KGiEUvZ8OrGy3i848uD/qQIBf3wHGL34bYTO30r538rPPyqTz4qmHSZ6ek7jBXt5wIrSNtbiKs5jsTyAYu7KGQg+vANb0ldf/8PgYI6R9mXBYIKf+WuNyyg3Av8vo6q3W9ZGwQZDASvGHrd9ghhvmL4dRRuPYMZKyss5kNL/pKH7UxkUVG5OwYblz8m1vNgAD37L9l5HambueIXd/1+2wDIzOqTWLhsHbiJXnv7DgwPn7LlbglAvOvoXgtexyAfnutwPrinomXe6b7x2vFkxrcZtKX7LOBXXZgGYOnY1n04NHUeDqdkGxA5Wt2JQzMX4XBGIQ6+NhvHuobtHNmmrGSMCenfshcbS5uw/e57IgBEQGTPhz6MqsUrbSlgBrwzFkRB/BrjeBrbQsN+3+nw5lXnGDxb3n0d5IN2gjJvHLFkPR732QzS4/m5SSSXD6e/Nt9d4R/fb/zpPee+6/Q+VJn41IbqRcWntlwqGVHW6efysQ0u+Ulr89GxMez97Ge9uI/3vx9bvvMdVGRlobm52XZA52o8CnyV0sK23MPtQ2lRly+c1rmIV5R8HB9d2gg+Vq1Msa/A+vou5ViKcUTBjSjObhCzqxB76VsFIC8M1OLBjMX4zFM/wrfip+Kr057Dsz0b8PGf3G9uUn9z/92W/+rUZw0kPLwuBV/59dP42qyXDTBwJS0CEK6mRZkX+qoNPDxSmWmUsSY/WJuIv5/xAmjhIPCgS9YnHnkQz2wu8ywkcS8aYLlv5TwbB4EPrTFffPnnFvvxw7xk0AJCd7BxFhDboX1yFywpt6b4OyAgMsexyrRylM23Z/0w0OG7u1mbLk/kuviKdVTed63y29K11LU2Vzvx+0Hdl8+OmGWgdaQFf9ObYQDk/2pLxvuqEvC+qkT8VW0SbqtLxvsbltlxe8My/EseNo76ZbitLgn/pSXZQM9/7FyO+UUpaK4rwMj2Fpw4vM3ikghAxs2l5tW3jHjzy7n07+2Lo+bKdWCsBwNbm9DSVA4uB8yV4rg/Sby/4hbdGOm2xcUc2trasH37dgtu5/NNly1+YOCzN9GPdXpm9Swz75ZNJhuWD8u6fat9tae8qPoUFR+peNy0ykTdvl3ZcJr86iOWLMv0U73K3PxEacmSSk7pW+1X/OG2wn3+LucclmVe4wn3E86LT21EAEiYUQwulbDLG05rMOJ15ZWWjJtXmjQs6+b5z4f/ALncYmbmWrOCuEoLlQk3rzSVBPdQuUdjy7g8gZIRraR4bcaWn1gmzO8pU+zPbW8i+WDM4XYCYOTKem264x4v556rm+Z8Usnm3hI7dx5HeechLGjzLANUzme23sTcmiNIqRlB9sZD4PK1G4ZvBsvx+sq9QEck6Hs/LNZiUcFGLMhtRfbGg7ba1YrqnYgv7LDleaemrsfCvDasrBu24PP4dR1YWTuIJ6cnI61xN15dWoAVNYPGm1I5YH26wMIDP7R+xLaAEJAY+ODyu2PejugEH6t7rmGubbgo68cNxGV1ICUlx5af7ekZi3IhCq6HO8duOvZ8T3SvhudfeY+fbbnteengert17hiCtPoVjW7P4zs2ehZHa7pwrP8QDq8pwbGRUzjauAVH8itxtLoLxw9es+eMbRw7egOHR05hrLUfg2tK0PerGRj64lc8xfW226KASMsTL9oqYr29nMOzZl3z3LC882J7sQ7yeMdNc72k+yXvS/dgWXDQtSt2W7HON/oaxprDycpUJxrM9cGDV7B0abIpy9xozP3xXaef3n/uu07vQpWJV1QyouJTXvLKU45p96c6yvIgkGBsBb+E0md87zPP2PXb9Xd/h4bERAt03bp1Kw4dOmSuV/InD/ft5tWf+gpTl5d1zKvMHbPquD/CokWLTIFi3Me1C56yJZef4Ou655YT+XIuZdW+3vsyLHPKxwMQ7yu3FwMSWEDoysR4DAIQBqF/depz+FHJCnzykQfMneqTP/u+uUzRIsF4j5+UrjJrBvcNoesUXbQYA0JZWkYeXBMPgpWnOkoMmBDcEIQQgDDo/dOPP2yuVZ/46QMWh0Iryd3zX8XdC17FzxtyvTYyF9sKXXQNu3vBFHxz0Wv44ks/N5etcQDE/8I/mQWE8yIrkSm+mjd9fZeVKQJEBBgC4OHFfuj60ErizHvEauJdJ/ZloMJvL3IdZelwVpiK8JlVxZPntRMAaRn2AUhPKv6yPgn3rF2C7xYm44GKVfhB3Ro81JiDh5vX4uGW3H+5o3ktHmpaix82ZOP7Ven4aMMKWxzgP3Ytx8KSVLQ0rMPIDgGQkAXE5oTz7R92DwfzHLl2KidQuUhLyQhOHRnAvt3dGNjajLbmCpSV5CIjLRUpy5KQsCTe4myTk5OxZs0alJWVmbVz165dZiEhIKEOxmdRz2b4eXbzsZ5lt15t6Hl368Jl4iWP+2M+zBsuk6zk3H7CvOExuzJqR/Iur1umNKl+KlMb7yQrOVG1RRrrfN3ysIz6FHV5mXbzrizL3Z94XeqOxeUNp90+JP+uAYgaIHV/sTpxecP8lFVZmG+iPP/50QoSH5+Arq5BUzQCpUJKh6cEBIpZoBQECh3LPKUhUNIpH80bzrvyqhNVXTQdr5i4/FSUovmj+3frXDm3fHw6OK/xdbfavjeXVPzohsVlWzf17kNGyzHEtb0ZWZJ3ZusNzK87jpT6MWR3HDEQQncs7m7u7oYua4gorR6vJRfhtZRirKgdtD0+mObeIrMz6/DEtCQsyGvF4oKN+MlLs5FQ0o1Z6TV4YnoSpq0osxWvFua14ulZqQZiNozcCFa1skBzx93KdQFjLAiB0d5/NJer6j1vmwWleMd1rAiBj5ltb2JG6RgWLM1BTk657QEi9ytahjg3tz6/uibR19u9/rq+LlVa/QT83vVROWlQ511jybrlKuO9z3SQD+SPMzbj8HUYPXjVo1y16sAVHPetPmyTx1GupFW7GWPPvII9X7oTY3d81GI+ZPkw6gORbXd9A2UlTejsHMbIyElz3SKIYDsCGR6o8ILhuVyvd3grdmnVLrp88aAbFw/lScnDg/csZbniF6+VwAr70di9uQtfj4mfD3eug3Ss6+qVHTp0FampaeaGRTcHvevc957S+ofkvtDJr7z7npWM6ESy7jtU8iqjDAEHA1L5hZPAgwdBBS0gdMkYy8rCno9+FFtefhmNNTXmPz4yMmJ7EVAZof84ZdkG29JY1Ue4T+U1bvGF5ZR3+Zm+ePGibTbID1CVFQW24Zv7RTz8lVzgw1PIfMVYCnTkK3GgGI8DIGYpGB8DwrgKBno/0brOXK9olSAoebKt0OI4WM5Vp7hPCJfC5WpXBBdcwYpL4TIo/cm2Iry0o97qGZjO9riClsn0bjAZxogweJxlDHJ/vLnAKK0iT3eV4smNxRaEzlW4eHBcbJN9UZb1DH5/NwBEAEDKrOUJDvTV3QcGkfmPzGt0rIbkDGA44MLaFbjwwUzkOvnAMCIjsKI+DHgIII0HIIEFZDlub0gxi9CjVWtsPnQ9OE+8Hv+ix456u2ZPb16Pz3VkGQD5T13LEV+2Cu1NRdi9s9W3gHgAJHJNIvMg1zcX2AVAhPxB0L94vWvo3evDOH10Ow6N9Vpwe0tjOQoLsmwjz+SkRNuPhNZbuWzV1taCiwFRB6ObZjh+hM+tnt3ws61nmeX8qf5WqGR/Vzm1LXn1Hasdd9xu2m0jnI7F55bF6ld9k8/lVdthqjZE3fqJ5MWrvpS/FVnyhH8qk/yt9BurDcmLGgBR4xRQxWRUDYtH+bD8RBMsPpcqPdGJuX3xn2RjY6Mh9ZycQguApUIRKAS/qwLxu/L/Pvv6Xcf2z8svRY0KHDe0Gxw8jpaOXUhpOoG4NsaDvOW5ZLW9iTlNF5HYeATpbYeQ33PKdhTncrZ0azIgwj1CZBH5/9h7DzA9kupcWAQbfA32tS+2uWAMGGzABgxeWILBxCVuIi2wCxuATWzULrurNMo5TdRoRqPRBE0OmpxzHs1okiYop1EYZY3y7vr++/7Pe6pPd3XP9420y/Vj/Pz/9zytqq4651R1VU/pfftU2PuaeCxiivvx7MoU8XSQVLwUlYvFW6oRWbAdPNMjalsvVme14kcPvyAyS1Nq8duF8ViX34XFyVXY2jmBJSk1Es8Nnv9hT/uyynWJx5ghHrkDl7F5+yWsbb2CCJl25Uy9qruGufl7sWh9JuI3Zsri86D3Q9sn+O4pqA/1Xnp5Xl8F0/Re7dr3/niod9GkadkmDMr5741NTQuGrGcwzSOnfDf2Dx3D6JYCDNH7ceePMPovn3YXn8sCZoeA8HyQwqhkmYbV13dYpmEZsmDOKSGB4AGPu/ecx+iuc9g5dhaDo2fQP3wavUOnsH3wJDoHTqKt/xRa+nidRFvfBDr6J9A9cAI9QyewY2gCfUMnMTh8CsPcOnjsjOxYxmmEtM1DELkAXkmJEh/tS203JWimD/j82gaGqGmbqbzdLypLEpSUlCoEhHv22+OYjnkaah7DcOOgLWvHVTecntpUwsEx9Pr1l3Hl6jVcuHgZZy5cwsmzkzh++gIOnziH3eOnMbD3GForW9D0uwiU5xSjsr4FrZ09GBzZjYNHjuHk6TM4f+GCeEu4boRghHZJSKYjI6w3f1rn6UJHVGRpn9NDOF0kMSEWB/Z0Chg2oExBlw3EPMDlAjEFvIFQwfAUAuLM1Q96QEguZg7X4rmxejy3qwHP8xpzwmA8eK9yGqp+UC7UvS2r+WMNbh1Yl2B9pI7BRejO9sLTekAckK9tY7ezEjuTpu1stb16OpQ4uPeWjILoIJHRr/baR5Rz7TBubAgRUhvhpmB1xOBDVdG4N2cDHqpIwWNtBUIMSfRIytg2M4dr5FDJX9Wk45nBStOn7Nf/7Gu03rc72RdbU1wCsqowHg012RgdnEpA9Lld4qftJCHbxhAybxqW0z9K2qR9vT4z9vwk7sj+7bKOpK66AHk5qUjaFId1a1fJlC16H7mwPTU1FTy1nR7RAwcOTOsR5d94uN90f/+aZ+sGbalMuNDWDcbD6Wh6UN6+V5npQlueccpqqHrhxuygrt7fSE/LUPlgqPrhwqC83oeTt9NVliHTg6Ety/gbJiBqPFhgsIBQjWtX6vXqq/z4+DjoJly9ei2am/tDfolWQKAAQcGcggPv3gOCdhr11YY/3QMiTDcy/jQjb9JsG+FljR2VNWAofL28cqcrf6q+2rf1TV2nymo668KvxwRtPFG7t/cgSmqHsL7qKObXXXXXhMhhfbVXsLzmFKJrj2Bz81E5JDCvbxKFQyQiL8vaDS70VhKQ1jaO1NZxcP1IcsMBmYqV0nwY2T1nZHtfTuli+v0zlyKpbi8yu04isWaXLETfVLtHDh1Mbtgv5dCuelckVE+HQz6YTyJEz0vR8DXkDV5Beu9FRLddxhJ3ytUrstUwd/ki+VgYlYOomFTk5dXKV/vBwWMCYgleTR8F+z3Y53a+P8//XtpyofrCy/frsf/tPL+u3d/an3ZonsHW8XtVgvpGXp/DeC2EgOyflLNietvH0J5Zhub5q9Bx70Po+/JXMfKJT2L3hz4sU3m4DqT26VkoK2sHp2HxIEQShOFd57Bj5Cy6hk6juf80KnvOoLD7LDI6ziO5bRIJrZOIb72IuJaLiGm+iKjmS1jffAmREmfaJOJaJhHfcgEJrReQ3HoOmW2nUdhxEpVdx9HYcwxtO45j+8AJDO6cEG8et1EmqSbpIVGwCYlp02Bb8Lm9tmY7+ttH8zw92k1NzRICwi1j7XFT4/aYaY+XdtyW0fHzRvrMpw2SAV7i3bh+HZOXr+LEucs4OHERO49cQOveSZQMX0R2/yWk915C8vZLSOiYREzLWawpP4SVWQNYVjiKVcVjiK7ej+SWYyjsPYnGkVPYse8kdh0+hUPHT+PUmbOyqw530aIXhVM1wnlG7OfR59TQzrPj+/fvF883vR/tLWXOSedBQOvsfOVOCzJf4wUw69d79wu+B2QVSE8lINGy2Hg6AiLkwyIGz+1SUkIQ2yCXLaOEQAHw82P1QmAMGDakhvI2ADb6jtyuerg6jDtlU17iDrFh2a6dIAERYnWDNSDOtB1tO0PizNdzJSAMFQiTqJh7BbeGLBivhkcizBd5M/VN211sW/2jZdlTjERWCYczHUvKdurJvtMpWLYHJEhA6PmYKQTE9A+JJHcu4+5j9FTR00QvycyRWvEmcXtjpvOeWx9zEwB6vOhtYshzXdiX1KEsz4OhvNy3FeBJrsFRMqOkUcJ60aM+t3X2CEgMpicgpn1NvxjioO1lt7/2jUtExBuiZMTpNx+xs/rSSWeb8kwSnt7e21WN6oo8ZGUky9qrdetWY4VzHkliYiJyc3PR0NDg7rBFD6muHeHfsT1u2X/XNxNXXTEyzccLjiHBcUR1NNTy1Kbeq56Gmq56trzGtTzV0VB1NVR5DZmuuhpXWTukPH+apnEtJxjaciorBgKEQMsOpa/yqk+bdlzL0FBtqJytb8dV3g5DngNiC/yhxvlib9tmDvlKS8t2dsQyQMoPCmxwxTjBgZem4M2fNjXfD9rsfBO3gVlQ1thWcEJ5E2e60fMDGa2T2vHq5tnw0oK6Xt08fU8vXJraM6FfnvXhV2ICNII1TsWiJ6Cgsh9R5QexoNY6Ib2eHpHrWFh7EatqTyGu4RiSW44jo+uknBXCqVk8Y4MHDJaOvYqy3f8h06BISNQ74m6r62ybywXo0cV9sk5jCsEINbWKhIPTq5wpVmVCOl5F0eh1WeCeM3AZqdsvIq7jElY0X8X8Bk4nM14PWXRecxlz8/ZgYWQWoqJTkZlZjvr6PvCLPUErp/XoNJ5w7anpoUO2r7+NKWf3u/aH0Z8qG9qu1/eeram6zPPb9/TUruYbWQ9Qaz5D5rEduJCcXgwuKuep6jxJvb62F5WZ5Shfn4S6J55H13fvwOBnP4exj3wEnT/8KYpza1DTMIT6zsOo3j6B/K5z2NQ2ieiWy1jTfBXLm64JKVzYcN3ySll9VG92YuP5LLIWSTcLcDxY7NNFDdextOEqVjRewZqmS4huuoAtLadR0HYc1V3jaOsdR9/AUSFBJNYkI+qNIeG2+9hrz6ltZbdJMM53JT09VwgId5t5o+PpdP8BhLOp3g4SARKC0+cvYc+Ji2jfdxF5g5eR2H0V69uvYWXLdSxuehnznbbjDnck4LNrruLFykn8ruQMni86ieeLT+HF8nOYU3URC+uvYEXzFaxrvYKEjovI3XEWjSMnMbTvBMaPTcj0DE7jomeE5YfzioSrezCdNjgfnQvPNyfFy8Ja/RJuryuwv5QLQNOvw/r1V4iJN2/eD2pDnANiHUQY3AVLPSCe50PJgIaGRAgJsIjCEz0lslicC9B5qjrPBiGY/VVlGrgInWs+FPzyq/3T/eUuyXAJhUV4gvZ577sIfoME5CY8IKYtHYInIN8mDUHiZxEMlyRYQNnpB0MYHDvSFx4glr7QaXHqMXH1vLJdkO32qf/rPben9QjIVA+Ij4CQLI7V464NS2QrZU51+0HicnA7ZZIJrsXh7mLc3pjeE67X4XbHX5/3tMhxVzNO7XpqRznujFmI766ajQdKN8t6HdnxLHqBrBXiWSzimbLIppCS0TohMB4BMbtghSUgShic99gmf6b9TD/ou2/y/e+75Dlkz7z/zPfIh/s34fS52iUZ4SGJPLl9z0ibfAQoLsxAclK8HAK6YsUyrFy5Ug5G5Ja/PBhxaGgIXPt24cKFN3z2yBsZ+4Ljx/+X7v87tdcUAhKKzWjnhXswW0dlGdo/vbfzGaeura9l2KHaCeoeOXJEvCDcZaalZcAB8x64M6DBgC0PUCnQ89INyDLAQuUUSGieAhDme2lqi6Gnb+J+0Kb6BrgFZT19JUimDFNHv46xq+V59fTXxc7X+ho7nk1TlgGofnnPlmffIyGcc88v1yQhZVU92Fg6jKUVE4iovSLeED03g+snFtRdxfK681hffxIJzSeQ0j6BjO1nkNN3XrwYuoMVp2nRM8FtfOml4I5U9FjIxftd/2EIhRILDVXG8W6I7tirMu2LO2NxXUj+4BXk9F9CWs8kEjsvYn3rZSxuvGYB21dNvTnlqugoIpJbsHRdKuLjM5CbW426Oo980AtEcKr9YxMJr4/1HfS3tba/3aYmzQO1/n7QvjGhvyzqaDmqb8vZcfv9Ull/H2vdNfTqapdh3j3N0zZQcqokZHh4Ajt2HEJHxy401PehvLgJhanbULwsGtW/egrF9z+FDQnlWFc0iuWVE1hQewnz60MRDW4CQI8USYYJBRxzXU7gku2SCZxl22RHz9V91Z0qyCl2C+pfxtL6K4hpPIeU5gkUth1DY9dh9PaNYycX0zueEZ2mpUTEtI22q2nHqe3ltSvzuP4kIyNfCEhbW5uPgNjjnj22hRvvbBmNqw0dL5nONHoeCNrp7Th06jI6D1xC/uBlRHVcw6KmV1yyIe1K8ma1sbRl3XXMqb2G2dWXMavyIl6qvIhZVZcwu/qKpM+tYx+YfqENkhdu3BDVehE5PafRsPMERvYfxfETJ3Hu/Hk5WyQUEQn1rME0Pg8XnnMLUc5LH+prcL+6KzhSAOUCLwWyBGwKiDW0vwS7cQOop3pAvJPQwxIQB/DrV24f+HfJAEmJISa/acjCp+67C99dNQuff+J+Aau3r5+H76+dK2D3zthFsu6Di8s/+bM78YuCjQKSjV3PTrAclq9l+PJCERBnatl0U7DYtuLVUBJnhyHakl/aQwJi6hHMBkiF23cuyXA8Um6fWARGymO+R3xM/RydcFOw2qcnINpOP0paJRsJyCYBs5/ADxOX4778eNz68M9BwsjNANgPX37+N0JWvj7vSXx7+QvgDmQkHfSMkJjctnAmvrloJn65LRGff/yXMrWLmw5wQwHTPxY5dPrldXlAnHbUaXFeyLZ32svuJ407BMPtH5I3SXMIINvcp2+3s+k7eReo4/QzD0e8cHpY1pAM7WiQBe15uamyfoRnkHCHrVWrVorXkt4Rjn88uJRbaHM6Jceo4Pilf/t2qGOdjm2qoyHT9aeywXvV1Xw7tGUZt/PsuF2enW7rM11/tgzjQX0tKyiv6aH01YbqhAptPZVnqBfz9adxW0fjqhvqXvXtUOVuFNo6N0VAbIVwxu3Kqjxlg7+gvq2nebaObUPzNeRLTJZNl3xi4tQdsRQoMbzRpUCCoQ0CbT0v3QZltO2BEk/GD/rsdFvetq9xrYvea0gbth07rjJeeDPPoe1iA0tN85dFu6wXwTc9IZxLT6DJr91V1T1I29aF9UVjWFR9FvN4SCF3yLIurhdZXHcRK+rPY13jGWxoPoXNbSeR2nkaGT3nkL3jAjhVq2DgkpwnwrUj9JSQRPAqHnnZf42ae0MySDSugToEWbn9l5DVdxFbeyaxuWsSG9ovYl3LZSxvuoqFzlQrt27O1965FWcxL3MQC6NzsWZ9MpKSclFU1ITm5iH09R0SUKrkg4DUa2fTXl6f6bugodeenk6oPKbp5bW9Z5d2/HomT3U0DFcfrx6eTWPPu/fK8Kd5unYd9Z2gLNvEeEIuy6JwHkzIsz44Xa+xeQQFFb3YmNuBxantmBPbgJlbBvBs/jheLD8vX9oJaNknAmrZJ7XXMLfmCuZWX8TcqguYW3kOcyvOYG75KcwpPYG5Jccwt+gI5hYfxdzS45hbdhLzKk5jHuUoXz2JefRk1V4176OAZQXa5t1Uory4/iqiGs4hrekYKtoOoW37IQwMHpVduoxXxEzPUiKi/ej9/Wnbm/bUfIYkIJmZhUJAWlpafP+56dinoT3W2WMg4zrm2SH1bF3GdarV5avXcOzMZTTvvYzknmtY3mK8fPLMrreI7fGytNHc6kteO5efwtzSCWnbOUWHMadoHLNLjmNO2SnMqTiLuZVs38uYS8LukD7pOxK+hleF2PNvLbHjPGroYdp7BMeOn5A54jwzhEQkHPiw20CfleM8T3VeumQJirdl4OzEkLWwVoGSAVRmyo73tVxALoGVcylgM/cKXj2gPZWIb002AAAgAElEQVSAcArW1EXovjUgOvXJWX/hrcEwhENBrpID2XL36QflLBBuzcsDCD/32H3ugnZOAaIH5IdJq3DrI/fiOytectYrKHjlFC/z5Z42tTymuVOznHUhmhbSA9I+/RQsAfgOKHW9TS5ItQGvtqOG2t7Wl3fHW6EAWEmh1z8WedG+ktCz4X25V6+JrWPOAZk6BetGBMSQNh4EyTNXftOQKUTwa7N/iztiFoK7jT0zUCnbLP88OxZfeelR3LM1UrZBJvmQHceWPC+eK5ISHhr5lZceE68It0DmhgM8dJLnspj3wLwTQkbCEpDosFOw2AaGKOizO++9S7i9fnHb1iGAU/42xMPBPtO/HefvxiErou/kGfJoypS/IXkPVE/7fUxOa9893Iru9kpUluUgZctGREd5u2vFxMTI+T11dXXgzlrcmpxTNjke6N9+qDA4zunYYIf2OKnjJ/NtXb1nmqbbsrYN27bq2SHjKn8zNuwyVTeoZ99TRq9wumpH5UKF+py2Da33zegHbWod36gNW28KAbEL04LeaCXVltqxC9Y8htpAmmbLTadLPS5+4o5YK1asRFNTnwAhA4w8EGcDgiCw8kCEB9yYpukaTmcjCAz9sga4+9P8QGVqnYL5WjevXrSnddNQ04L3WjbT/Xn+clQuXMh68jIk5IoATU7H4snW9fX9yC/uQGLBdqwoOYSFlSQi1xwSotNkzIJ1fu3mFI6lDRexquE8IpvOIq75DBJaT2Nz+2mkdJ5BavdZpG8/j4ye88jsOY+s3gvI2qHXJDJ3TCKjd1JIRvr2C0jpvoDkrgtI6LiA2LZJrG+5iJVNV7Ck8ZpzeKLWwSNGss6DxCNvN+YnVmN5ZDo2bNiKjIwylJe3o719TBbd84u4TT5MO3h9YtrdI3F6r+2o/athMN3I2yCfcX/fmL7z0kwdWKbWw5SvZfhDo6eyatvY8Jdr64XK17qbUO2asg0JuSZT1EhQR0ZPoX3HMWQ3jWNN+RHMKTyMZ3MP4umcQ3g2/yh+V3xavqxzqg/JxpzKc5hDglF8FHPy92JOzjDmZuzAvLR2zEtpQURqC+antWBBWjMWpDZjYWozFqU1Y1F6Kxamt2GBXO2Yv7UD87N6EZG7ExEFezCv+AjmlU1gHvu65hLk672SHQXj9a9gYf1VrKqfxKbGCZS2HER79wHpf07PotePZMLv/fK3vbar6U/TJkpAeBp6KAJi/6fA8e9mxz4dK1WfoXo9Tk9eQe/hy8jov4qVSjxIwOqdd5+kofqS09bjmJO/G3My+zBH2rkZEanNmJ/KNm7CQl5pzViY3irtOz+9HfMztyMidxgR2w5gXtkJIX3zaq+4ZIQkhMSE3qaVTZexpeMUGgYOY9/Bwzh16pScHRL8AqrPo+O93jPkaevchYdzzgd31Dtf2Z21HoGv5wY0OV/cHbDMNA+4adwCtpJvQNQUAiK7YEXjpteAWF4I+4u3HScB+dyj98o2upzSw8MEuZ3vI03ZQiBIKLhI+su/ewR3Ri/Ev/z8TvGIEMC6dqyF5y7BUSKkoe2ZCU7Bcg7Zu5EHxHgu2Gamfdz21S/rVkgZJXgSV9IhX8yNDbHjgFe7T6SPFERrf1j9N9XzoWDZlEm77LvXT0AaZCH47ZHz8ZWXHpfdsn6eFS1nu5A08IR7nmDPbY3pkeIhjyQdJB/fXx+Bn2VGy7Qsejj+7ekHhZjwTJj7izbhlgd/jB9uWiHk5dc16UIata90OhaJ4ev1gEgfWP0xpc1dcuC0jUNATJ9Y7eYQDUMolNAw34lr+zukR0iIkyb9GPi7cj0q58xUxtPHB+WMHnosef5IWkoiYmMisXLlctlIgmSEU7V4rIJu80syon/79ljA8U3TGdrjnqarvB2qrMpoaNujvKarrt6HC1Vf5TWkPH+qp3J6b4eqo/LB+6CufR/UUbuarvd2SH21oXIa2nI3its6jOvvRnrMt3+8v2kCoso3U0goGS1Y87QhgvcqFwyDcnrP/8iqq6vFPZ+UlIaxsZMCkj2g5IE2GzRpvgfKFIgF5W0C4eVN1VN9hp6clvPGw+ltKWAMb386fc3T0H4GL24/qwGl3hdv7ihEgM4zQjjlprq6B9kFzUjIacPqghEsLjuOiJpL3ldoAUKcY/6yzDPnl25u4xtRdw3z669hkRCTS1jReAkrGy9ideNFrGmaxLrmC1jffEHCtc2TWN18EauaLwnAWd5IonFViAbn/kfIpdNDLNDFsumdIVgqGse8rX1YEF+OFetTEBvHQwaL5IyKxsYB9PTsl0XV/ALOhcQEnvoFXNvAbnsTV0JgwCffAyNr3glPz7xTeu/JeXrsT9VVOdMPpgxN03eN9xo39vz6mmZCT9Yuw8vTd96rD8sOXYaWY94JrnnYf+AihsbOoar3LOJbJmWN0OyqS3ipYhIvlJ3DC6Vn8WLZWbxUehqzio5idsE+zMneiTmpbZi3uRYRSVVYtKUWS1LrsSy9ESszW7A6uw1rcjtk97P1BdsRua0HUUW94Pkv0UU7JB5Z2APmcYe0NXmdWJ3TgZVZbViW0YIl6c1YlNGB+XlDDnA+jnlV5w1JlulczlQvAuf6l7Gs7hKS6o6irHkPurYfwNDO486OXX5viPaDHerfItPYHmlp/jUgwbFPxzKG/IW7D6XHNF5cY3H12nWMn76Eop1XsbLVW9Nhpqe9bEhe2UnMYXtv7cHczXXS1gu31GBpWj2WZzRhVVYr1ko7dyOycLvsQsed6NjWvF+X3421TtuuyGrD0oxWLMrswoKCUUSUjBsy4vOMkIi8jOWNl5HfNY6+kX04Mj7u7pjDeod6Lm0H5nNhK6dfpacmOtvu6pQeBVNKKuzpJB4Y9oFXBcX6JVkBlfOldwoBcYD6dASEYFIWm7uA3+xEZUCmxo3Hgmn0cHzhyQfw04wo2ZGJ03f4pZ1f2OlZ4SJnTvf5xoJnZOrOP//gW/JFneRDLsfTwrUndhmyFsVOc4iI6AQJiJzqfQMPiLYRgaZ+CWeaSzr0y7fj8RAZQ+wM2FVQ63w5d9veuRdg7PWdAcLmXvtMgC77yLnMvZJHv/2QBKRjeg8I24+Lxu8r2Ih78+LlsMf78uLxYHmykD5ujcx+4doQrtP5aWa0EBFuc/yLbYky9eq+/I1y2OO9OXGSR1skLV+a+Rv8OHk1flWdJh4tl3xIHxlv1RQC0h4jJ6GHWwPCNjLeCNMmxhuibcbQee/Fu+HvN9Omxmth+sdrV0PStT2NnrS19LXXX0aPZWg/q6zWge+EEh+jx3N6SAxPHe3H2FCznNCek7UFG+Oj5dwRTtXirlqbNm1y143wkFOuW6NHN9T4oGOknadjhoa2jC3H9OC9ylKXP70PJ2enOyqujq0fqixbl/kqH7SjdbhRqPq2rWAZasNOD1eeLRNKT8sJp686dqg2VUfrfNMERBVto9PFtQDK6E/ltTJ6b4cqq/rBe8oG9XlgVlxcnHhBqqvbZSqIggA/kFNQ7QFDA6oIokyeByRU1uT58z1ZG7RpmSobumzbrgH+Cu78dbFBoq3jla3lmNDIa/1NXYx9Uw8PKHr3+twqFwS3LMufZ+waW/4v3hdkykp//2G0to7IeRl5+XVIym5CZG4PVhSOYWHZccyvPI951Rcxr3IS84tOYnbFBbxUdRGzqi87U3Guu19Szbx/5yRyZzqX+brKL6yWJ0MWJCvRsNMdolNzGfNYbvFRRGQPY+GWZiyLzcfaqBRZ50GPR1FRsyw07+7eJwupSapIrjjdzCYe+vwm1Lbx+sef/3rS/bb0XfDb88v484Jl3azszcrZ74K/LNaVBI1ge/feSbQPnUda50Usa3Sm6NS9jDm8aq9iduUFzCo+hlnZw5id2o65m+sxP6kSi5KrwEMpV2e3C4mIKenHhvKdSKgaw6a6vbIjWnLTIaS0HEZq6xGkth1FWjuvY+7FNOZxF7XkpoOiw53SeGhlXPmQbGRAAM0ylmd3YnHODiwoGENE6THMq7kohNiAdfMOkYisqj2HrfUHUNuyGz29h+QASnpD+Kz6XkzXDySvyclbZQrWjh07poxdwbFP7+1xT8fCUGMf/4MmSL945RpGjl3G1n5uquB5+2SKFL0dxeOYvbUXc5IbEbGpQra6XpnRjHV5ndIuGyqGkVizG0n1+7HFbedxq42PIrVtXNqf+Zvr94s89aJL+oX0rczpxNLcPizYtgcRnA5Xe9X9O+Xf7cKGa0hqPY6mHbux/4A5R4BTsmwSos+q4dmzZ2W7Ty4+585Xvqk7LhDWKTleqF/XPYDmgChn2okLpJx7+Xp7Pswi9Pbpp2ARWAoRIAEJsbUu89z8XQ1yRge/pPOkclnIPlovX9K5JuRunmaeHQsuYCbw5Rkgty1+Ht9bMwfPDlV5u1o5ZMcu2y0jWIc3ugbEBf2mXZV42KHG1TOi9xpquoZ2usa1j0TGBc4emWS6gmHquHGHPJov72E8IDcgIEoKdOqaac/g1Dmd+hY+1B3HRH+0DvflbQCncXGXLJkGZ/WXXeYUAtJxE4vQHRLotpvTDnqv7aPtou1q2tHfl5Knf0fankIMleQpsSDRsAiLemC0Lk6e/l0F+0zutZzzu2QTCU7V6mgtBxeyb0qMk/VdK1YsB88d2bx5M3jmCLcuJ77jwag6TgTxX6gxk2k6hmh+qPFT5UQ48I/qsTy97DSN22q2PcbD1TWUrtZXw+l0VcYuW+NqO1T5atOWtePT6dp5qqOhnTddXOU1dAmIVkyVKWDHVUHTpgtVNlQY1AuWy3z7Z98HdfWe/wlzTiH/k0pISMbo6ITM0VdgbwDC9EBLwbwCbu8+CLb03m9vevmgrAJ9tXXzYbhygiCIckFZD9R69Zkq4+l58qHrx3yCMF5mhyyzExIXqPf2HkJb26gQkcJtTUjLqUd8Rj3WpTdjWWYP1mxswtbnYrE2sg5zMvfghaIJvFRxQRa5cmoUgaC7ToPkwyIcvnRrnQkJC0HPvKpJM92mdAIRBfsxf2svFiZWY1lcPtbE5SB2QyaSk/ORnV2J0tJWNDQMoLNzDwYGxgVgcvpQOK9HsJ29e69Nb+Z98/S8tjV9ZuzcqO2DZVD+5nS88rx3PVh3QzCDddR3yivHlKnkY2zPBVT3XUBs6xUsaHhZ1gPoGgGZ8lN8DLMzBzB7cwMiNlUJ6ViR0Shf1LnLGYlCUt0+AcAkFzzjJbP7NHJ6ziK377ysEcobmJTNCwoGL6Fg8HLguoT8gUugDM+EydlxDtk9Z2WL5q2dJ5DWdhTJjYdAUhJfOYLo4h1Yk9+NFbk9WJS/E/P5Bb/qAuY561HkPat/BYvrLmND7VGU1I9i+/Z9GBmZkB2zpienpg25MD8xcYsQkJ07d7r/Ib2ecY/joY51qsdQyceVq1dlylXC9mtY0GimWglR50Ly0gnMzujHnKRazN9ULmfp0JNEgpdQPSbbXpNYZHROIIubQ+w4J1tbc+c5WY8l7cy2viT3TM/rZ9ueF/mMrgkhg8mNB5BQswuxpQNYW9CDZfn9WFi8H/OqJ50NA8yHhAUN1xHddBoNPbuwd98+WZDKnbIUXOhzMuQz7t27F2vXrsXKFctl9x0FMgKyBCCbL67ydVdBkQIdyTeAlfL8Qn7xzIicBs0ToRnnxXS9eH9uYhBH9nXgM11JmCEnhk9PQNQzYcCrAak+T4XjISHIZTq9HL+qTpddlQhAqUciwvUCD1Wl4vGObXJI4ePtBSa9MVvyKGPAq7OOwEdsnC/qCnT5lZ3lOetCWM6zQ9VyWOL7WxMwg4vQb3YNiLSrflkPAFjm2cBVvtD7yYPkO32jpEOAr0M2jL7jTXE8LZIv/ejYt7wx+g6YPmdZ+qU99BSsv6+KllPqHyxLlgMgn+otw7ODVXL+B9fa/F+/hmvkMEpO2dLte8OVwal2PAjx8c5t+ELLFtnymQcRrirYgIaarJDngGjbBdtd2kPb3+0T0672Gh7TB05/+trYS/P3k0X6HO+G1z+Bvta/PYZKTrTvnHdFSIrkj+Hi2VGcHO/DvrF2bO+oQlFhhixiX7t2tXg9o6OjZZoWz33TBewcL3Qc1PFCx8ggdtT8mw3D6bO8YJm2TdXTNK2P6mmo+QxVJlxIGdULli3Kzj9qU9P0Pqij6Qz501Djdj7j4fS1HA2DetPdq46W6RKQ6ZT+O+TxVM4NGzbIgvSyskYXjHlgyQPWBFYemPLSp5M1Oh5Qs4G7HVdQpwDRlEO9qcBS9YyMAYZaBw09EKj6HoA0MppuQrueQRveveqoLdX1p3vy/rp5dTIgi3IkIQSihohcBA99Gx09KVvXdnXtQVPTIMrLO5BfUIeUtFKU/+IRjHzs4+j+3NeQ8dAsLF6Vj9kpXZibM4qIbYcQUXwUEaUnEFF2EhHlpw2hoBej4hzmlZ/BPKYzv+QYIoqOIKJwP+bnjmHB1l4s2tKMJZuqsCK+EGtjMxG9IRMJiTkyxSonpwolJc2ore1Ba+uwTLXi1rG7dp12tmENPd1K+1OfXdtGw2C6v79vnhzY9jSuIcvQd8auj8lnGdqfoUPN99vz97kpQ9P0vTD9bJ7Jn8Z+N56PCyjuncSqZt1dzKwDIJmcU3oSszN2YHZiFeZvqpDpPgKCi/sFtCY3HkR62zFkbj+FnN5zyOuflI0Itu28Kue1yAYEzi5pPLXe3SnN2R1Ndj5z46/KWTPcUY07qxWNvCw2uEEBt4Cm7Zyec8joPmkIScN+8bJEF/djdd52LM3rx4LScVnAbrwhZt0SN1FYWzOBotohmWo4NHRc3he/J8RrG21HnoQeG2tOQt+zZ49LJH7fMZX/ORC0k3wMH72Ede26i5jT7jWXMDt/D2Yl1iAisRxL0+rES0GPRVLDfqS1HUPW9tNCNkgupK1lw4drQthMO5vd59z2HXsVJWMvy251PNNHNoBguw5eEjs8uye94xg2NxwQgre+qA/LCoawoPyEM9XN7E7GKVmRjWdQ1z2CPXv2ypa9oUgI17Q0NTUJeduyeSM4lUPApwOu3C+y+lXcTlcA5Hg4zp/eidMT/Th1og+njvfh9LF+nDk+gLMnBnH+5BDOn9yJC6fMxbxDe9pxS+cmh4DYa0Di8HBjlpAIbxteB/xzypN1kQDY90pA7LRwMsH04H0oW2o3KMt7/dLO09qFgMjalhjceA2I9eVbwWWwnRX0BkL9Im8AswdUfeDW6ScfoKV9pwzVNYRTwbSx5fa/ANwwHpD2aPzvmkh8PT8Kt5VvxPcbUnBneybu7s7BD7fn4Yc9/7XXD7bn4u7OHNzenI5/ajLE8M9aorEiLxb1VZlTTkLXNtVQ/h60X5zQbl/GVVZCbVeXFFhtKfoeCfH1ga8MPxk1feQRRdXTsrU+Ggph1HdF36VzY/Ix4OyJIRzc04WOlnIU5KUhIT7GnabFc994+CGnZHIsPXfuXMgPF7/v2Pr/63uL3/+z2+KmDyK02ZCyFztUZnMzFVbZYBhON1h2qHL5NbCrq0v2oV67NhKDg+MuCTFgbSqI9gN/BXc2iPDiCt4UWNg2CcxsW5qnaRp66Z5dY08Bo21HZbw0rYOxp/mqG/r5vDLN8xF82vWx40Z2ql27XK2vm8YD+Y5cxdHx6zjK6UpHruEIT5neN4mDBy4JSOOOSGZr1oPiFanNKMPwpz4tB9Pt/eAHMfiJT2Lz07OwZm0iIjdkITqpGFHJ5YhMrsS6LVVYu6UGa1LqsDqlHqu31GH15mqsSarAmk1lWLepBOsTixC1MQ8xcRnYuDFDdrBKSdkm53fk59cK4aiu7gLXdrS375JF8zzRnQvoeXYFp1oRSAbXeWjbKZnQe9OGbCe7rfz3oUC+ytt5ajMYavsy3ZM35YW+17qojPdeePLeOxAsT+tm0tUWbZj3xZanPV5KPuj5KN1xAYsbvZ2suLsSF5XPzh3F7KQ6zE8sxbL0eqwv6BZwSpC6tf2YeCjy+i+icOiy2e1MiMarcuaLbMGs2y1b58ToeTE3CuUcGNXnNs27eQglt2h+WXZNIyGhpySr5zTS244iqX6fTPtaV9CLpUW7ML/ipDuNiB4FekTWVJ1AXuUAWttGsXPnCZ8nxG4jjXO6Fg9L5U593A+fY1zwF27cY3q4sY/j3bVr1zF2/DLiusxmD1LH+ldkp6pZW3dg9sZSLE6ukjUdnM7GaWnmPJ7zQsZIIEpGufW1OYuH7VW48wq4lkZIyfA15O44L+SE7URvEr1HZXv+j7clth7uKWTvOgqGrkib8sDQlOZDIOFZWzSIxaWHEFGn23SbdTZRDROo6xjA3r37hIRwzrfujsXn5m5ZycnJQkBam0pl3rmAGge8GEBDUGq+gAsYU4DlrlkgsBnF/vFWrOuYh7Vtc5HavRZV/WnoGi7CwFgldu1txKED7Th+uAcnx3fg2KHt2DvajFvaEjCjNRIz2qLwtqZIfGvzMtybE4uHZT1AmSwUf26kzuxQxVO1/4AvkiXu5sTD8f5OPCDR4gWZnoBYhI+AVdtU2t8AToJa+aJteTM0TYCo9IdjxwHDQiYUgDq2jI4DhtUWy5O+9b6mmz7WdK3DVA9I7c5a/FNHIma0ReOtLVF4R0Mk3tEYhXc2ReOdzTH4s5ZYc7UGQk1nyDw7375XOTtfdYJhUEZ1rZB1eltLtNT3z5qisDQ7CrUVWzHcXyvv47mJIQHobFOXjLGdfO3I9jNtrcRE/0aMjmlfrw2VXHr9wzy5lBg4/eOzK+U6feCUL/3nI0CeTbNmxauXqZtFdOw6W+/T5OkR8YzoNK2NG6KxZvVKLFu2FOvWrUN2djaam5tx8OBB2diCB61y3LDHTB1bdcwNjr+aH05P823MqWkahipP84LlMl1tqQzDUDaCcratcLpqJyir97ZNldXQtsm4yobSDcqqDVtHZVRf71WW9z4PSFAwqKCKWojmB+813Q5Vl2n2T+9t2enitq4dpw69IFu3bpX/7PPzK+RQMQUCGtpgzKTZYMsDbSofOpxexwaPtr5X9lT9cHlMD2ePtk3+VHsKKD273rOpPQ3tOpq4saf5oWyozpHmIRyp3o6jo6cw3rkL47vP4khFOw4XNeJI3yHxiBDc8wBD7ibUv+Mg+uYsxZ6//3uXgLTe9j0kRiaLh4InjhcVNcoWuNu2NaKgoE5OIc/NrUFOTrV78XyOvLwakGAUFDSIfElJKyoqOlFT0yO7crW07JSv1Zw2w7Up/GptSMdZ2dWI02N0ZyOC6VBtyTbw2sHfhpqu/aBtZ+tonknz95O2q21H29Xo2eTDkAF/Pu2pTe2zYKh1NqTB6PtlPJuejFc3te+vC/NJ2Pbuu4i6/gtY3UwQzIXc5gv8nJIJzEppx5yNZVicXO2A4CFsbjyAjM4TMoWHU6iKhq/LoZQlFggmEA5FLngIZTkPnpQT7p2Qac6BlcFDKkPZUFLCc2ZYJr0k3MKZU7eye89ia8dxWd8QUzqEVdsGsaj0sJnWJzs7vQp6QtZVHEZhRY9M2xsZOSnvNtvCazNt81ewe/dpmRrKaUTcAcoe73T8Cjfe6Zipocrxnt4PnmaeyTUfnHbl7Dw1p+IcZqW0YW5CKZak1CKqsFdIQ3r7MdPmQ1dQNPqyeImU4NntROLw5NJNWJZWh42VI3hudZpMkXspOhdrczsxc3WqEDhbh3FpV7dNXxYiQ9KyteMEkur2Irp0J5aWHURE7WVnjdersulEQv0BdPbswOHDh+WwMpIOkis+4/nz5+WD0qpVK+QkZgOMnLUcBDs+kGRNExEAa76I68Lkw+MdWNgwEx9P+wj+IeWD+MzWT+K2nC/jl0U/wNyqJxHXvAQF3Qmo789E+0ABOnu24VONsZjRtE5IyNsaI3HbpqWyVoM7GhHIP91XIWs0uE7jD/3iNJ8ne8tkTcLftcS7U8umJyBsYwc4CgExX9MJNhWoKrjUNAHEzHf6wJVT4OwCXIs0WsDVEBHnazrBrU7dEn3HrgV6XTBr7YJ16mgf6odr8cn2BPxRcyTe2rQeb21ch7c2/Pe4/rJ2HZblRKO2IgM7+2pw7GC3TAvk9ECXSGgfBNrZEBQlKaafdMG4aSvN89pf+sgC/rYN059+8iDvhPu3pyRIyaD+XXpeEtZZiZIbShrt6vvl/V3rM4qe068nj/Zj184W8ENEbnaKu5sW14tw8XppaSkGBwdljOWHjOCYqWOnjr8ce+00xlVHQ5VVOdXRUNM1pJ6tq+kMw+mozI10p9O3y9S4FGg9o96Hs6N6Wh8Nbb1wuirLUGWCeppuyzJ+UwQkqGQbt/O0kFAPY6fZ+nactmw527bGVZ73/Nkh/+Pq7+/H6tVrEBkZg66uMQdUeiDSA2weSPAAmB9k2en++FRQpqBTQYhf3l+WDUYN+PTsTdXz8sIB1ak6pjyV13CqnLGtdbbl7PhUPa+dDi5ei/2/fgKHC+pwKCZFwgNPv4iDi9bg4MpY3xoR7ig1UrMdY9/6PvZ88INCQHZ9+B+Q+3wE0tJKZAcqnrvBKVvbt+8HF4R3d+9BZ+duOfSQ3gu9uOMW0ynLs0gozzMn+voOi/eLZ1AQHHJqFb0cLJskyBwu53k7bOLBdvCeW9tdQ68PjYw/3dOjnD/PtJ9J89o6vAz1p5ah8hpqfYL3Xt+wXC3P34fU8etp/Y28v3zNUx3KsN3Ylj3D52TNB7dclXUH/AJfehKzNtVjXkKJrDngLlUEoekdx2WNgTl80oBgOXDS8VIEQa19T5IQUzqA7O1nIERk72vyZZ47M63JbZcv+7S1pfkQVme3yhoHEpzNDfuxOqcNBOAV+17zERs/GXlVPDDU4TQwLmhPrNmFyOIBLCYJqbvqrEt6BfPrriK2eETOv+EW1CTW/qlYXt/09x8Et+BNSEgQQG2PWXZcx7dQYXBMpJfgytVraNl3CUuanEMX2e5Vk3gptRNzNxZjeXo9Yor7sKXxoEy1yh+8hDGTXNsAACAASURBVKLR60K61INht68Qu32QNnjwdyswN34b4sqGcO9TC7A0pRaPzo+VNn0xMlvIhsprX7i2nL6kV4WeJrYnp2ZxY4CY8mEsKzuMCLctX8XC+ssoqOuVE5O52NRelD42Nibej00JcTh+eIf1pd0BKw74soGKu87jzIgcksbpVZxqRa9G12ARHiz8Cf4k/u2YETcDb457E94W/8f4s43vxLsS/xJ/t/k9+HzGv+JHed/FvJKn8LHK1ZhRvwYzmtfjjxvX48tJS3BHxjrcU56EXzRn4YGOfDzYVYiHuv/wrwc6C3B/Wy7urd+K9zTFydkmPN+EBGTW5pUoK9oiYPfEkV5ZI8N2FCDokgMPhCpAVHKhAFYBbjCdoNOX5hAIX5oC6lCySoIkTwErbfrrpGSTuy01jNbj0+2J+POmKLyzYR3eUbsW76hZgz/9A7/eUbsG765ZhyUlG9Fcn4uRgTocP7QdZycGXQ+Ir93s9nLb0GsXysrfhy1nxe2+URIZyr4hmg6h8fWH0w9hyg5lSwiO9J1VT1d/ar+qDT6H7KZ1bAAjg82oLMvFluSN4HqR5cuXY926tTJFq7W1Ffv378fk5KQPT9rjLeP8hRpvmaZjriPm4kzV0TCor3qarvrBUPODoa2vZdyMrq1Hm9P9gmUG78OVq+lBefveLpfpqqOhLct42ClYwQfivaapMbFu/aPGVU7vGdpploovasuHi9sKKsM0jZP9FhQUyFfH1NRscPpPEEApGGN6aICmACJUvgFuqmdsK2Azepqn5djATdO8OvnLoO7Uenlg0bYdjKtNDbUsDe10W1fz7TAoq/IaGtmXcXDWIhycvxIHHnsWh5ZH4+CyKByKTcF46wj2P/CwM4WHX8uvYu/oSYwtWoM9H/2YkA+SkPavfxvJ8RkoLGxAU9MQ+vuPyELwPXvOCnHYvfucbPPLXanYlwR7DM3FNF7nRJbrTvbvv4D9+w3RICikh4PrUnR6lU04+Az+52H/+fvX3DNNL09GZbUtgjJsQ2Pfb3NqmcamkQ++Q165qqd9o/f2c4RKM/UzdqePm3raMrY9Ph/vebFN9+67gKyuSedEeeP5mF06gZc2mSlXq7JbZUoTd6bK6T0LEg+CUp3y44JW9WjQu+FcxWOvyMJork3I6j6FObEF+NS/3YYlKTWGROz5fzBzVQrue3ohHl8Yh58+PlcWR5Ps/PNnvoSH56yXtQxPLE7A9+97QqZ9ie29fo+JXQcSEtaNHgJOQeKibAXOS8uPuOsY6G1YUD2JtLwWNDT0C+HlVCvbC6Jt2NS0Q0B0RkaG7OTCcUp/Gtexyx4jw6VRhlMNjp+5hPXtzoGfPNej5go47WpOfDFWZDQJeeCOYTKFaviaTD3j85W73iV6kNgWXpuX73tNDvN8ZF6UeD1WZjTh6WVJ2Fg5ihfXZ2FL00Es2lwpa0BMWxpC5ychpn1ZDsvjlDeSRy5cT20dR1zlLiytPC5kTghrwytYW3MMdU2t2L17t8zr1jNCysvLhbxxYSqnoBCAEGQqyeAXYS4mJyjh+o1zJ4dkTcfJYztw5HAn9h5ows491WjZmYvUjnWYW/Ek7sr+Ft4R/6eYETtDSAiJiH29Ke5NeH/Ce/FM5kP4WNEyzKhaiRmNa/Hm5kj8VfkavLdqPf6uPhYfaI7HB1sT8MHWxDdwUe+N6r6R8ozOB1oS8P6mDXh7q1mAPnURei1sAuKCVwGHHvAXQOt8wWZc5ewv4+KdsvUIOHmvX76p5xBIAZhy73w9V5shp9YZGzYoVTusC98HruGpHqvH33ZuNIvtud6F2w47GwqY0ExBIwlz72VrYufejoueo+/KU865xL7qBULKiI4TunZVN1Cvjmj8j7YY/K4mCR3Nhdi1swHHD/XI++95QPzeBWlXkgqn3bSNpW987e0QCJH1PCHaf0pAjJ72lb9PhDxIP+qUPMfzof3r81qZPjb949RP3gnPpkdsnPdI7ahHxr3Xcjw7rOfEeB92dNegtChLdtLSKVrr160DT1/v6emRXbTUK6Jjr40ZOdaGGnt1fFadUPehdIO2bH21oXpB2eC9rWvHg3JBe7Ys84JXKH2VCerqPUMtR2VDhba8HVdZu2zxgNhCwUJsYTUQlLfvVSaUnubZ8nZc829Gl7L2j/eqx4OrOOWBhxPW1XU5oCk0APMDLL+MB/QIJBUYhpMJBd78sgpK7NDY9XR5HyxL7209O66gcGrajeocun7hyrPrpmUdjFiOI4X12P/gozg4dxkOb87FwYgVOJycj4MvLpC2J+jnrlK7Woax6wf3uN6PkY/+EwqffAFb00pQWdktOwyNjZ0WT4UhD1cF6BoSYcgE0z1i4REMAmIlGjbZsAlHqHbS52AY/l3w+seW98c9maAduz3tPI3b+X6boftHZWw9taV504U3ljXPYuS856JNpml/Do6ewYoms9UudynjTlfyBT6+SM7uiK8clq1bORWHIJQLm0NN+1FiQEBM0ErgTy8GCQZBK4Hv7Nh8fPzWr2DBpjIBzlxMfsuXv4OYkgH5yv6F234goJn6H//sv8tXe3o/Hp4TibjynQK8pRwhOK8ZokPw7UzfUiJCfdaRi9i5cJ27Qm3h1/vKXVhUfcYAZ+7IVv8K1hcMoqi4WbxwJMJ8L/V90/YvLKwSEM0tJQmsg+OWjmE69oUKdVxjHr28XLDdsf8iIpxpYdxpbHbREcyK53S3KvFUkHyw7kUj1wzhc4mHeXaSDSUR2gZsf66DWZpWK4v12fb0HNGDwalUXDPC3bJI0oR0yJQ4x45DHElo6B2x21NI3ch12cmMZDS6cg/my1QsszPWwrpLyKzsFLBw7NgxORmZgIFbcS5evBgNddvcBeJcLK5Eg56N08f6cPRIN3bvb0Lf7nI0D+eicEcCYloX4oXqR/HTbbfj37NuxUdTPoS/SPhzvCXuzR75IAmxiUjsDPzdhvfi6dSHkFuwAf9cuBwzKpcLAZG1IC6AtUClAkwXWJqT011ga4NT6rfxisKMVufivQJcBaoCbB07rn0LRNs2RUdtWCBX9WhbZVRP7o3s/2yMxNyU1agoScVIfy0mjvSK54gkzwP5BrgKUBWA6dwHplUJcHW+rrvgV0gGwa4DfiVU0qJrELx8A4b99wJ6Ha+J2lHQrKCb5bkekGN9qBipwXs7471nb4/Gm9r+cC/7HfiT1mg8XRaPxppsWQNy9GC36wEx7eoAeGd9jO3FcMG+00/aXgy1T6RfXZJn+kDyfOTByItth3CInuU90T5wy7D1rbjIue+FU56QIIfUUjZgV8vSehkbSlqVSDmk5NwYeOjhntE2tDaWIHNrEmKi14Nb+hIHpqWlyWYW4+PjMrZwDNWfjrf2GMs0vacc70P9VE5l1Zamh9NTm9QLpat2wpVp2w/q630oXS1X7aus3muourzXn8ZVJlyo8gxVR9NC6YgHxBYOJRQqTY0GQ5XVh9NQ0xna5am+nT9dXOWDoeqwPH4hrKiokLUgMTHx4Nd0BQThQz/QsuVsoMf0qQAuvK5tx//F3AaXU/XtMu34zdnzSIwtH7Rj39txW8eOT33uV3A4oxTjOw7icFYFDifnYbz/MA5FJePg6ngcqe+TtiIh4Bfi0dRt2POJT7rej86vfgNpq+KRn1+H5uadGBw8JlOlggSCoO5mLtYv1GU/gx2/mWe25cP3X6h3wuvfUO3mt+vJTldGeJ3pyw/qTVef6fJoh/nan/V9Z2Xdh1l/8KoBwRuKsWRLDWJLB4U8kHwQuIrXYwoI5td3AlgDYukdod7MVan47cINiCsbFF2m084XbrtbCAjBM9cnfPjjt6Bw+KqA4e/d+zieWLxRyMknbv0KXozMAqcSLU2tDZAPZx2JelvEA2DKp10Bzvr1nt6Q4WvI7j0HnkGyrnI/5smhhfT2vIolZceQnlUpmxtwkwVuZmATELbV5s3p4o1tb28X8hAct/Se45eOlQw1Hkzn2o/Ji5eQuuOqs+7DeJ5e2tKOiIQSrMlpl2lnXDC+bfjq1HZ3PR6vSVt5pMz0BadqcSG6ma5mvCRCNvaybax7tpWPdDhExCEg9nQ3kjr2P0motGXjQayoPmGm7MkW29cRUz6C+oZG2d2GZ39wOhYXmi5btgQdrWXgvP4TR3qw/0ALBndXoXU4DxX9qcjt2YCY1kV4sfox/LToDtya8Sm8P/m9+Jukd+HPEt6BP9rwVs/DYZONgOeDXpC/iv1LPJ34IDKzYlBenIKPF63EjOqV7joQGySaL+oWESEBUdBvAXzd7lZ0STpa1suUrhlN6yEX72Whu0MiVFdJiIT213IlGZrmhD5y5NRFdYM25d7I/EVjFBZsXYea8q0YG6rHxHivkD0lIAJcBSwq6DPkQIEpQwNuHVIiYNIDloY8GKDokgUbbBIYKxgVkGzAtV2uxhkKMHW+6rug1QHXhoDslI0ESoeq8J4OrnWJwZtbo/A/y1fjr4tX4m9KV+Nvytbg3eVrzVWxFu/+r7ykHmvEu/ZHTZFCRt/eGo3Ht0Wjuiwdg71VGN/fJZ499YBomyuwl3unH4Jpcu96Rpy2VVmHWLjt6PSd2rOJnxCb4HtA74TTl0qMdL2JbdOukx0XGadP3TKdMvSedt24lue8J0bfebfkmcwULU7X5OnrxQUZ2BAXhRUrlsnZIpwGy0OrubU3zxWxx1h7nFUsyZA/O9S4pqts0Jam6/geDJlvj/Oh9LUMW1ft2vJ2XPNVh/fBn8qE0gslb+ur7nShyqsM7zUeDKcQEBW2K2fH1YBdiB3X/FA6mqfywVDzb0ZXZdWG3jOkPhd8pqSkyH/+BQWVIRekB4GZd6+EQEMbIBpwHw68hgJwRja0La9MY1fvaSd0Gbad0PFQdVC7Gt6MjB8M22X520NtBkOWQUDGL8N7xk5h7P6HzcLzD3wAY//4EZT9+gmkbtmGysouWcMx3Vx61oXtoZcpK1ydwqWbemu7eqHd1tQNpx8unXaDed59sK2D93wW81x+HVM/Ly1U+2q5U21O1TMyTNe8YMh6eGlenbTtGZrpV1xXs7Xzgrv4mSRk1pY2zE8ole1eufaA039IPuhR0Kk/BvA6YJUA1lpAzrMlfjlzKe55dLZ4TqhrvqRDPBL0cqgHhCeef/if/1U8JrR590Mz8auXVgsZoQfkjvufwq1fvwPPrtgiu1/JV3kbMLtA3NSB5dhTknhvA+esnjNySN/C2osu8J9fdR4b0mtQXt4ua4+4zohTDbUN+eFj/foY+Qo3Ojrq/kemYxZDjlcactyyL3tM03GNi7QPnbyAtW3XXQA/p3ISs2ILZb3NhvJhcCE5p7vxHA/2AadBuc+mz+08r5ALXdzvtA/X0vC8D9qQdpF+Ml4NkkG7X2hXNwVQ74dr023T16T/SULoVeL2v3G13BXLO6hySek+pBYVoKajBs2jzchqzcKvV/wav1x1LxaUPIfF9c/h6YoHcW/R3fhW7r/jX9I/hr/d/G68Pf5tHsEIQSpkelXsDLwl7i14e9zb8M64P8V74v4Gfx37v1y9t8S+BR+O/QBmxv8G2RlxqCpLR0tDPu6u3oCP1kbhH+qi8MGa9Xh/9Tr83Ru5qtbifRVr8NGyRfhJ2QO4vewh/GvRTPx94Tz8bdEy/G3pSryvYjXeV7X2jdl/A3Xis7y/Zj0+XR2F6IJ4tDbky85fJHoXTg+LN0GAn+PBcMGoBf5sYuDGHZDo03XAYRBIuve0KeDXCd175wu3o+/VQYGvA06lTuoBMQSkZLDSJSBvb4zE5xMW4vbICNy9camcTv6TtPVyEv3PMqNlY4H/qvCerZH4SepafG/LSvxV5RqPgBQaAjLQE4KA2KBc4xaZMG2pbRRsWytd+1ba3pnWJVOevCleoQiD9hXX4AgpVBLhhqYMIQ9aPyUqTllqV/vUvfcREKuuSj6cd4F1oI4SI33/xI68P3wfxmRL35bGEmRlJGPdOp4tslQ2tuD0rO7ubtDjSs+0bnyh46yOvfZYHYyrrI7Zeq+6DPUXTNN76jKuoaYztH/2vS2vehqqfjhdpquMrWPHbd1gXHXDhbY8Zewf7+1yGPctQg9n9L9jOhdq9vb2Ys2aNVi/PhqdnaPO10k/0J8eyHpAW8HqVBBogzZPXkFlUN6kK8hTeQPsVEcBTLgyjU3aCNrxns3WNUBSy2I4VS9UPbU+JvR0ptpT256M6vBrORd/jzYPYfcnP4W9H/iAkJD+W25F5pJIZGdXobFxENwWlwCO06j0+VlP+znsOgbrEE7O1jF1MnX1yvDazC9rnsXY1bj/+Uw7apqG2hZ2GDrProO/bE/XljFx2rLtmbj3/HaeZ8e2rzY19NsL6nj2KM+LhJKL+xPbJgWMk3wIAUmoFO8HPRc8RJAAVna3cjwfhnz41xyYNPM1nQCV27zO21iMp5ZuwoqtTbIWgzKcEuURkNfkdPR//ORnwS18CZK/fc9vZMoWATGnalGf1y3//h054NAHpB2gLeBZ49ZuWrouQkkIATw9A5yOtKyWXh/jAYmouoD1qXWyfombIJCU2e/v9u27ZQve+Ph4+Y/u9x1H+Z8kpyYNHL6AFS1m/cfchlcwu/Ag5sUXyU5jPKGc3iIShfX5XXjoxVVyuKP7/PQ22Ws/LCKiniiSlmdWJEMXnCtJJIlcl9+FVVmtIEmx+1OmdKmXRNrU8SrRvtO2JHTcdYzT55Kaj2JxHQ8pfAVz6q7j2ZJB3JX2a3xj6zfwlZyv4NMpn8Z7It+Dd0W/C3+d8C78ecI78WZOn7JJRnAKFfNiZ4DrOP447o/xzrh34K82/KVMq/rX+I/jjsRv4jebfobnEx/GF+NuEVtciP6xuA/j+Y0PY2t6lHgCutuK5ctzQVcRNjZnYW1lMhYXxGFeViTmZKzD7Iy1r+uatXUNXkxdjdUpT2Jnxvsxkv3XaMr4EJKSv4Vn42dj5qaleCFlJV5KX4NZW1+f7ddbF5Wfk7kO83OisaYoEWXVmejrrsDhvR2yYJ/bn9KbICBPwL3n/RAvhg00beDLL+0O+BNgKiCRaQ6QFD0LVDp2DJDVL9kG1LoAU4GmY8tLd+TEttmUwOcBUQLSEYM/aYrCl9NW4MfJa3BfwUbwgEAe+vhIcy4ebc37z79acuXsmIcbs/1lteTikaYc/LouA/eUJeFv6iJlyhinYD1RFCPv4lQPiEcoZBG+tLkH+A0oNzIK7o13wiELvvY0AN5dzO/rJ8+jRTtyCfhnuteHbhk6HczpU7efRFeJouknl2y4dffqRg+KeXf0ffA8brRpSI3zfPos9jvqvlOOt+fcLtmGe/xAjxx0yLNF4mIjZXoWsSE/Und0dOD48eMytv6+Y/T/LX0C9zdq6/fRfaNlvlG9sATEZi4av1Ehym60AVTPDkPZUD3mTfcLpWunUVdtMKSbLScnR7wgW7fmyVQsD3Qp8PQAnIJND8wZMBa8VzDHdM3TUPP8oQfiNN2Wt+uk+eFCWy+cjEn3nmt6OT/g9NufWm+1ZcvZcc1nyOciIJNDCVdtMN4PZ/er5tt/iJSkXBQXN8t2pmNjp8RLRcKi7RHOrrHt1Vvlg+lT7702oW29bDnPlvfsRs67p/wbu7R8vy2vzKnvk11HLdOT94jr1Dz/u+vpeGUowWMZ4fQ13YQeAeF2xvEtF+UrvADyhlcxO6FcSENC9S45M8L1fggw9bweOnXHBrB2nFN14iuG8cL6TFn/QfDMbXJv/drt4Haw3MWJMv/2nR/LgmuuT/j8N+9GXPkQuID8n275EhZurpAT1O+8/xl892ePCnkgAFYwLOWpN8D6wh9MZ9k8P4TTmXhQ4sp6Q0D4zPOqJrEmpRbcIpq7sXEdCNcr6TSsiopmLFu2HFyAzt2d7LGKcXvc07xw4x9lZferK1fQse8CljS9LOCd9ZiVvRMLkyoQVbRDpr3xfBMe1pjVfVq8QFyUzuewPRQ2+Qr2B8nL4wvicOcDzxiiQd19xiO0LK1e2pZkUUiHTlvTNSVK6ITkGB15D5yzQ9QLktoxgeX15+WU9Dk1V/FIUTfet+mzUwlGkHAE7x3vxp/G/Q+8K+4v8f749+IT8R/B1xK+gJ9vugvPJz+MFWkvYUPGEqTlRCI/Nx55OfH4XMKnhNB8OP79WLh5JrIyCfjS0dtZhl07G3FwTxv272qVdRE9naVobcyXOfk8HM5cWaivuvFVV5Up26lWlqSiO/cxXM/4U7yWOQOvZczAyJYvImPzcuRlx8suVNXl6aitpP0b2/39ZDLRUJ2FptocdDRvw2BPJfaNNssCdK6v0ak+Ambli7UHQAVwBr9qB4CrAYoWgCVQVBArBMUiNC6IVPJCWQewWnouoVGAa5WpX9pDEpD2aCEgX8lcjZ9lxeChqjQ81l4InobOc1F4IvpTPaX4VWUaHqpIxW87t0ka02/2erqvfFrZp/sr8MuiTbg3J26KHPN4OOQvmjLwvxvMeh0SkCeL41BXmYGh3mrfFCy3T9x20Hb2SIEQDm076T+HBNrg3Gp36S+R92xoOW44pTynXKevzHthvSe2PcpQ37k07hINrYvzrmm+eQ8c0qLvhD6De+/U2XkfhPhoeVIH512Tgw5HZdH66FCznLgevyEay5cvk4/UJCKcJnvy5EmZwq/jsR3a47Udp0zwZ+sxTnm9NE91bqQfLEvlg6FtT8tgaOtrnLIqo3p2mubZYVD3ZvVsG7YO41MIiF1IsELBexpWeQ21MFuWcf1pvoaqpyHT7Z/eq/x0oa3HOGV5+BfnEi9fvgKVlS0YH7/mgi0/uPJA2I3T/QDyxvKvx/YfouyNnjd0PoEYz9rY1bUXu27/gbv4fNeHPoTsucvkoECe2cGdr0hS7K/Hr7dNbRAdTtcG4aFl9DlMSHlPx59n63syXt9pfabPCyVv0lRfy7Hv/XGtl2dLPRpatgmDcv57Y1PTgiFtmzTa4oYCPOV+c+s5cPG5EpC5KS1YmdmMxJrdcuYEgSxBv3x9d6bqSNyZ8iOg2P4i707ZMdN9CFapzzUJ3Ar2Oz99GI/MjZQ1DtQlsH7ohZV4ckkCnlySKOeJxJYN4ut3349nlm+W6UdbO0/grgefka/5BOZavvtV3wHOttdDZaR+nIq1+//ItCNOw1rawClYPO/kFcyrPIfVm0qQk1Ml20TbUwi5G1taWo6sQ2tsbHQHe3v84ph3o3FPxzHKcf3H5cuX0bz7PBY3mUMf2fazswZkd7DYsiFZLL5t5zUhDqw/SRpJA+MJNbuwYFM5FiaVI7q4T9qWBxGuzeuU3a24w9WWpkNC0l5Yn4U7739a2jAioRhzNxTK1KnZMfniBWGfcME719hQj/LctpeeEW69uyqrRbxhXIvz7Motcr4K6yBtOXwN6d2nsarBEJBZ1VfwROEAPpD4RSEg9GC8KfZNeHPMm/HW2LfibXF/LNOnSDL+14a/wPs3kGR8FF9I+Ffclvhl/CTp+3g0+V7MSXkSsVsXISs7BgX5CSgt2oyqsjQB2y31eQK2u1qL0NSUg39I+gC+lPJZROUuQFlxCprrctC/vQIHdrXixOEeWXMycbRPzmA4uKddAPqekWbsHm7C7p2Nr+saGazHQHcJxst+jtcy3yQE5D8y3oqu1HuwLXejEIEdXWVCdnYNNbwu26+3Lq78cBP4PCRZR/Z1yuLzsxNDvi14FcgKqHNBogF8Bjwq+NXQA7Du7kYKGPWruRIH+ZrtgEPHtpZH0GvHFbh6YFjroF/LjXxIAkIPSHMUvpq9VgjAr2sz8ER3kZzbMnOkFjwl/kebV+F7a+bgjqgF+EXBRjnj5bdd2/B4RwFILij35PYSPN5eiGeHqvDszmqYfBKZUvwoaSUebckTmSe6ivBYa5459LFzGx5rL5Cyfpi0El+a+WspT06md06nnzlcg6f6ynF/aw7e02jW9wgBKSEByQxJQNg2bH9fG/n6x4B27QO3r0K0vfGisB+svlM5hhZhlH5QOadftQy3b6Qeni1j3yFAVp/b8voc9numddY8X/3cZ3XeH/cd89rEqxfr4rwvGspuaSM4tLcLjXXbkJwUL6etL1u2TLZL53jNjYzobabX2R6zg/Eg5rTvg7LBex3bgzqabsvb/08wXWVsXY3beuHiKhsMQ8kHy9byqRuM2/o3o3fTBEQraheg8WBBKmtXUCvKMChvp9m6tr7GtcxwYVCfZdXX18s+0ZyKNTQU+oR0BXp2SFBmgz3NU2Dn3XsATdNMqKDNA4d+oOelU94uj2WEKlvlpoYKGv02TT28NNumHQ/Khbu3dRj37r3yTbq3WHksvQS7b7nV9YD0fu6LSI7fKlNXWlqGMTJiFvDa3g8t37PvPYPm3SjUehg5f1/YdkPH7efx4naZtp6X7i9HgXvoOth27XioZ/Xyp75/dp5fN3QdPZmp75lNuuz+NTqU5xQseqzy2iec6UjOGSD5u7B8a5NMeeJOVrYHxAB+zwsingjra7nxPHj5BKtG5jXxdnBxemzpgCxST2s9Knn0TCRUjwmY5nQvgl/u/kS5jVWjso6BNjK7TwlpYX3scoJf/l1S4ny9l/LpAeHaheGrSOs6g/kNztSn+lcQUXoCa2Oz5EDMoAdkYOAwuAHGypUrceDAARmW7HFO4xyfdDy0xzgdx3ScowwJCL267XvOYUnTddcDMid3VIgATzvnLlVcOC/rbva+hi85BKRw8LJ4NEgUFiSW4vv3/Va8QiQi9HSQ4HHL4p8/OV/IyguRhoDwLBQSuGeWJ4ME7MePvIRfzlwibfvw3Eh88ds/wqzoXNz/3DLc8/gcISz0xNz1wDOYn1CCL333J3jguWVCXoSAcBrWyMvYuv0sVjRwCtbLIAGZmT+CWzZ+H+/e8G58MP6DeN+q9+F9C9+Hz677NG7f9A38NOl2PJJ8L55PeRgL0p7F+owIJGWtRG5eHEq3JcsuTvRgcOeg1oY8KPHs0wAAIABJREFUdLZsQ29nKQZ6KjHcXwMCewLuvSPN6OkvxY/zvouMumi0NOahq7UYO3dUC/mYGN8hW55yW19ePH+B27pybQRPSZ848voubmt79EA3DuwsxYWSLwH0fmTOwOWtf4GGtCek3vS6sF6Uo/zrLeMNyY/vwMnxPnku7ibG81K4ha16PwzIdYAsAZ4LOA3I8wCiDfoYN5cLMJ0v1TaJEHBoA1uXgKhtBYws1wOvYtOthzetR76Uu7tgOYvQdQoWPSBKQHI34Dd1meJxeGao2pxcP9aAby97Ad9dNQsPVqTI1Kw7YxfhG/OfwR2R83FX3GKZNvW9NXPx/XURuCdtPe4vTsJd8UtwR/QC/DR9Pf7l53fixylr8KPkVfjmopn49tLfCdn5QcIyITa/3JaIe3M34Mu/ewTP7WrA82MNeH5Xg5ARkpun+iteJwGxgLeQEa+97PY3cbaTla/gXQmMBd6FbDjkI3T/mvfBtLe/DqZ/DTFiP6lXxX2P3D429RH7WgetnxIfleV0LOYF0613QMpy62zeCfPOOHWgrCuv75eT5nhF9o21o7G+CCnJCVizZhVWLF8uhxvW1dUJEaH3WsdoHbd1XNbQHq/teFDPvrd1Nf56QrscjTOkjVDlME3TVf5mylMdlVVdO9S8G4W2DuNTCIgasAXtNI0HK6XpGlKf8eBP88OFQXn7XnWCZQfv7XIZP336NNLTzW40OTnFOHBg0vWCKJizgZo/7oE7BZn+fA+YqS2VY0hZI2/bMXGTZ4iG36Ytq/osx0v3y9uA0otrPULXy7Ol9fTC0HnGnj9Py9BQy2JIj8aesdMYm7cce/7xI0JAePZH1b0PIS1lGyoqOuSwwXBbmKotz7b/2ew2sGU0naEX99db0z09f76mm3C6PFOnYF39+v56m36catO24a/fVNnp7fO5VUdDrw7M89v38tSu5htZj5Tw3qwBOYOGrsOY3/Cyuxh6XuVZLMnsRGxJv3zxpseBW+/KAnQlGxIG1iH48nSNiENGfGsUNM+Euu5ASIXaVVs6vcoiMjLtyLlXciHrIVwdjwBpvqxbGHsFeQOXECNrXsyp4/Q8RGQPIyo6Fdu2NaKnZz/27jVrQEikGxt7xesaExMjX9I4jnEsCjVWaZqOdfb4pWmUUQLSu+8MlsvJ88b7NKf4qLP2Zgj0+IQiICQnPEeFngsSqtt+/CvZNeux+bFCHEgOuG0xz1DhdDMSkO/89BE8GhGNJxcniFeDMoyTpNC7NT+xFF+/65dyTsv6gm7c9qOHZArYouRKfPueh+UdYBixsVhIjRIQEsG07nNY0nBFCMjsmquYlbcLz8YvQ8SWCCxPX46HZj2EHz7yQyyOfBEZeVHIK4hHWfEWVJelobYywyUaPCthe3sJdnSWgwt2h/trhWzsHW3Ggd3t8nWf25ieONxrCMT4Duze24TG/hyMDNaJ/O7hRln/cHK81wHiZg0Ev6gTkBOYv9GLwP7UsX4cHSnEy/nvEwJCEnIy/QMo3zpfvnKzzuMHumT9Bc80CVXW+YkhqRtJ0fmJQZG5cHII508MCFFieO54v9xPhrERyq6m8Tn5vD7AKMDNgFcFnd5Xaifd/qqtQE9DBZLOvQsWlXxoSDlXRsGyZ9/oefP+XZCtwNQB4SE9IO2WByQUAdlVj980ZOJrs3+Lf3/xUTxQliyekC8+9SAeLEvGv/z8Lvw8Owa3PHQP7o5fii88+QC+s+Il3LVhCR4oTcb9RZvwmV/dg19Vp8kC9y/N/A1+UZiAB8u34Mdb1gi5uW3xcy4BeX6s3iUf9IRMJSDRmNYDou0kbecQAgfws53kEmCvBIHt6AfxfvCvJM+Ac9uG9rmdph4JJShCWpRwsG5aP6mL9qHTp06evkOmHirD+jrkRN4pTdfQeTbnXRGC4/a/99xSHyUkKqtyTvvIM1hpvOf23of3dqOxrgibkzZg9eqVWLlyhRARekS4Kx/HX47N9qXjuo7TGtoyOr7baYyH09X0oF7wXm2ovIZaTih5TaMsfyprx5mmcppvh0bT/6/mT6enMrbmFAJiG1BBW5FxW0bzQoWqz5D5GoaS1TSVsUNRtBpLZUOFqsc8jbO+PFU3KioKq1atQV1dtzPdxwBUG5Ap8PKAmBIEA+SYr/Iap6zqmTQCOg+0mXwPCE6V9fSNnpbhAUVPx9jVe6+eRkfrYudrfbUeXp5XRy/N1IU6altDI2PXKXzc7H51GbvaRzF2zy/c6VfD//TPyJ6zVKat8BC3gYFx2aJXp1+Zutp2vbbx19HIBNNMXZln91dogK26DM1FHe+5tS7e89t2VU5Dr/09guE9h9rVetGmxrUeJs2rq5evZWiobcJ7L82L2+km3y7Dfp5gXMv06ue3xXwhlnvOoXfHIUQ1nHVOCH8V8+quY37JYawu2CE7RvH8CIJZl4RMWQuiJEN3UvIIgOxa5ZIFZ8tYl0zoVrp6EJ7xUvCrP70YBLp+fYe4BMqXRde06U4P83bEEhvOQXoFQ1eQ1DWJRY3XvG2Ha65iUWwhNm3KRVVVl0wj1MMI+YGDh6DyDIuqqirf2Mex6EbjZ6jxjjpcA8IpWPuOTGBdq7MNL6eD1VzGwpQG8QSlth6FWQPyH9IOOgVrbW4HPvvV77u7kd354DMyNeqB55fLNDa2G3en+sdP3iqHRtJTwnU13/rJb+Q09Ly+C2KPU91sAnLHL5+SRe88pf6bP3pQDm4k2fnevY/hxagc3PfMYjlbRNrTWU9DErSp4xwiSF65CL3mKiJyBhGXuBnbtm1DSUkJ5s2bh9/+9nHkZiWCi8J7O0plihQX5RKwj4lHown7x1rAKVKcRmSIRo9sJUuPBb/s8wBDkgDj0RgWoHHmxIB4GuhxGKfX4XCveDnOn9ophMMHxB1w6wLz13FPO1zQzXqc7NuA1zLfIms/Xst4E/anfwbbsqPFW7NnpEnIEesYruz9nbnY3ZiKM4c6sbd5K04faMeu+mSMVifgUM82HOjIQV/eKhzszsfkyUGPSLyO+uozEnC6INGKu0DUAXYq58mGAcQi73zNdkCgz5YNDBXMWnIKmM1UGj8gdb/uh/WATE9A6Img54OLwb+19He4bcnzoOfiy88/jMfaCvCJn3wP96RHipfjB4krxAPyrSXP44ebVoDrNzj16vO//YWEP0qiB+Q5PD1QiXvS1+Ors36Lr819El956XEhMSQnz43WCQG5WQ/IYG81juzvxJkTehK6B9K1/U3o9z5MaV8hJVb/EIBbREVs+AiC3c5emewLtrnX5x4x4PszxVshfW+IieRrX1v9q89h5+u0qdDP4REdtx7W87nkSN4l7zm8+pk06qqsiY/J3/+xQ71obylDetom8GBDnrK+adMmdHV1YWJiQtaI2GN4cLwO3tuyNmZVfBqU13RbNhhXnVChLRssW+9Vj7L82ToaV9lQ96pvh5QL6qiuHdo6UwhIOEFVsvPDxW1ZjWsY1AlVYZVlSHn9qa7qaKjptlwwzu0r6VJbunQpNmzYhP7+Qy4A9MCWAYWhwJqRUXBogzJDNjwgpzIKEs2936YBhaoTDG0QaOfRhm3HjttyJm6DX69OzJuqZwNrTzYoF7yfWqY+qzf9ajSvBrs/+znX+9H51W8ifW2iLD7v6BiT6VdcJ6ILd7V+ocqy28XOt+Ph6jRdOvWn2vDahMDenz+1/7y6aZ6GXnt6dQiVxzS9vPp4dmnHr2fyVEdDrw9MeX4dpnk2TZ5375XhT/Oegen8ws/T5od2HkdB0wEsrr9i1kVwYXbtVSwqOYSosmFsaT4kYFZIyJg5hFCBqHgYAusv7DRZPyIHAr4qOyeRXHAnLH49565UnH7Fe56YzgXXXN+wufGAfN1nOr0uugaFdnWHJ0NOPKJjPB1+jwplTHmvyraxyd0XsbyJgN+s/Zhbdx1zc0axeu1mZGVVoLl5CDwHRN/jwcEjWLlytew9f/jwYR2K3EGe45ZeOnZp6AoHIswnAeEhhBMTJ5HSfd4lfgTx83OHsDavS7wYuf2T8uzcSvczX/me7JDFbZFv/frt0kYZXRP42l2/kMMeOX3q7l89J9OreIbIrV+7Qzwo3EHs/+XuO8AzO6qz18bYdAgkhJKE7kAIGGJIIICBGIONCyUYF2zjBrjbGLC93u7tWq20altVVr2uVr333vuu2q600vbe3Qh5/+c9c8+98119kg0kwc+v57mae2fOmfbdb773nXNmhu5T3PXq2h/9HAuid8juVQ/OX48f3f+0uFqRpNCSwh3PSEC+dcvPxPUttmoctz38vOzA9UJ8mRyIWDSmJ6PTmnQWoXVnjQuZnOJ+HitS6rE9MQmcbayrq5Mf/WeffQYNtTsxMdIoFgoCMZKGg5PtmJxowuRkE6amWjA51Yw9Uw0Y3VeLI4e6MHWgGRP7G3H6mAH0CuoZqkWDxOTEoT4Bdryn5cG2AiiQUoAjs7MK2ASEWSBIZ1M1XUGWczL30f09ONnwuJAPWj9+n/JmdCXegp3ZW9HWYNpHNy9aIwxIUgKgIHE32pOWoGzlPRitS0JtxGPYVb4VrQkLMVAQiYaNT2OsNhE7fnMDprrzcfbogOv3bmakHfDvAkwHQLr11nJ8YNICaNofOsMtINTKzwBYB9gxXi4LkDpluSDT0RWA6chqmhIOE3p5BMQLsA0E1Px8SeLoKlfgumC9FgGpwo82rxLrxU1hi3Br4nr8aMsqXHXnD8R9imTjweo0XLvoSfxo00rcW7Jdtu2lqxXXfjxQkYxvPfsQbk1YJ+5a1/z2l3i4JRe3JYfj2/MfxY1hC/HNZx6SMr7yyN2yZkTJR3ALSKSxgORFobw4GT0dJZgcaxaCrO+HAc3advPZKWBnH5p0yyrg9JV8hvruBvSfk4emyXuswN2kmc/blGnKeOPeC7FyvoPm/VLCoX1liIe0Y4ac096TuzE90SEHG9I1K2QtLSJrkJycjPb2dpw6dSrAIqLDNcdp/ul4bo/zs+FVldc89Fnl7Tw0X5X1l6fpGgbT9etoearDUMu24/Rey7b17HuVmy20Zee9noKYka1kZzybvi0vys4/W1fv/Xn8MbqaB0PVc4oKCJjOl2bTpk1CQtLT8zAxcdYHLA04swFpIPD0gzsFZjNBnoJNk1dguh/cvfazlusBUwMkvfjA8rReM+Xt9vjL1TxmCwN1vTJUnvnxIpngLPnYyAnsWhOF0U98UgjIyCc+gYrb70FSbJbMGnd27pWdg+jSE0hAAvvLzp/3Wg8NNf1PCTUPf1/Yz5RROS1Dn73QJiyU99qg9xp6edgy/v700ihv6uDFMS9z6Tugz0ZGyzKhxmkZ+kwdjfPyMeVpfjPT+VlNTJyXrXjrm8exufIAFld5ayNIQlYUTSGyeFhOEc/oPClAnluw6sJygnxZcyFWDRICxxIxDrGacM1HSusRmT3n4mbudMU4uvpwoXl87V5sqRjBhrwucJ0CF1JzJyi6EUXm9cjp3VzfQBDuJx3u2g/L+qGWE7pcsY4kL9n9FxDfdhYrlXzIoXmvYEH+QSyJyMKmTWkoKGiY4UZYWFiDZcteELdPWix0XLLHKt5z3Ptjxj4uiOSe9cePH0fNwAEs0sXwXI9SfBSrMtoRUzwka164JoZ99W/X/gAPL4kS68YjyzbiocXReHJ1PO5+arlsLZzcfBA/e2KZLCJ/4LlQPB2yXU4s55qOb958J2KKB/FcZBb+8xfPYG1qPX760HxxzYrM68b9z4TgW7fcKZYXbjxwwx0PYVlcqZAW7jx2wx2/wvW3/1K2Q+bCdPZr7sBFJLSdxlJ7LU3JCYTFZWHHjh3o6upyt09funQxhvpqBHzRikDCYCwaA2gey0VW30Zk9sWgYCgOYa2LsL51IbomChHZvgybO1fi6NHuoJYAEg1DROheZe4JXpWoKJjW2XUBMS5gU3JgQo+gOM8uqB+W/EhseLr42fKbZe0HUubh5ZS3ojbxFyjaGY+u1iKZ4WbbWCcF+i44Yn4nd4t1oy7ycSEd1et/hZ7sEAyVbMThkRohJEdGalC46FZxwdEZapnVVYsCgakFtpi/C9SUADiyEu+2w+iJvKXv9okV59ZZiIsCZPaLAcWS7pQbQOicPLS/pd6unJIiBcTmWdvGvlH5P42AVMuicxIJbs37WFehrO346iN3i4vVw8078NRgOX5RnY77SxPxaFueWDjocvVAebIsIP9FVapss/urukzcV5wgC9Af6yyQ7X5pWbm/PEm2/P15YbwsYhcCMtsaEK5Zqd+AR3MjUFoQD+7ANr673t2hTN9R+/PQz9Xfv27fsH9dS5jz/jpExf3MnHR59+Xz8oA4ZViufm/eqCHrqP1jvsNKwgLfHXl3rfYaWZXxyDnbze/vxGgLKkpz5FBD7poVFhaGnJwc8arR8V3Bpo1DdYz3j/v6rDr6++B/1t8G/Z3QZ7sM1bHz1Ptgeppml6lxdvh6dClv/9n6vLfrq2l++TktIKr0/1PY19fnnLC7Svy0Z+6K5YG8mUDRS/MDN5U1oScXGB8I5l5f2tx5KdCcPa+59DVNw+D1m7utBmzv52nlUy9iat9FWSPAsz+G+/dj909/ZqwfH/0oBv/5c9jxm4Wy+1V1dS96e83ZH/bi89nbMxMs+9s8u662K3g7Vc+ElFHwbUJTjhfHZ8qqnj57lhJNUx2Wr/cMPX0tU/PTZ80zUFbz8fTVKmLraT1MnkZW47Qetp6WsXfqLKb2v4iJ6fNyjU+dxuT0eblG953E5PQFabMSzPHxM+JCV1q7GxvKD2KRnBLu7BBVcQHLig9jffEebKueEDLB8x+4fS5dsrhgXHfIEoKg7lFj/43Exv14bMVWhGQ04oW4Mlkczd21nlgVi9+GJgugfT46Gyu3V8juV0+tTZBF2Jy15ynqv1iwAaGZLXhwfpgQEiE7svOW47JluWhp2bR4ECDTerJj8EWkdJ1HTPM5vFBDYvWKWBtoaVhQdAyLNxVjfXgcMjPLUF8/gIGBg3LWjbEMnUZ09CaZwafJnlaL/6nxkwM6/ZDPnj2LsYkphNfzHBZuAPCKuL8tyZtA6I5uxNVMgKQvsfGA7Ei2vWFaCJWA//p9ku4Ss7H/FstGXM1e0H2L60dIvhLq9gn5o7WCz1zczsX8tKTEVu+V/ClDa4eedp9QNyVlPvDsOvxmXTLia/fJwZE8JJJ1yB16CSldZ7G27oLbnwurXsELWX2IjU8QC/XIyAgGBwflR37FiuWyU40CHgUYDLsni/Fo5T34Qd51KB1OxG1FN2Nxw5NoHM/GfxbcgPjuEBw/1uuc1KzgQheh7sb5E+agsvP2ugcFaQrEFTgLgFOCobOnJi8BMw7gFiDjgm9DQOj+dWCyHS/m/bNxv0qdh4sp70Rh0kI556G/qwwHJtqFWAl4krJZ30AA2J8fgd3lW9Gw6WnURT2B8fpktCcvBeM7UpfjxFQbylbf61pQFHAq2HJBlkMsXLAqC4A9sqB6Amoti46QA+dZAa83667WIEOW3DxckOfNlms9POBr9af2s1hHzGdGOWNNMWWYZ6ccBcpOyP6bYQGxd8EKugaEi8HNwnCxSAxWiKXjxvUL8GR/qZVWLQvIKUMCYZOIp4erRI75yD3JhcrogvNhS0/lgq0BkW2Dw/FU9jJ0Zv0Kg+XLsasjFxMjdeIqKNa6WTYLcPvWJRyG/BlCSTDuf6/UUuWQOvfzUtJoiAdJOt0T1WpIsvxGu04e7hP3SlqJ5Huk32V9f+R76fSHfkelP5ScOd9nxwpEK5JncduNU0cHMbarEYX5aXKOCN2ySEQKCwvljKdXXnklKOD+nxr7/3/Nxz0JXZlJsIb6mYwt67/369u69r3q2aFfd7Zn6jBNw9nk/PGU59ZqFRUVWL16NSIjY9DdvQf797/kzgR7QFBBKwGcuWxQqPceoAuUZ7rK2Poar2kmtAGllmWAsKarHvNSXe+ecYHA2ch7gNvUQYGx1yaTh8oFguPAPEzazHqYuk9NnMdk6yj2klSMnQK3JO2rH8TI567C6Ec/KmtAOr76NSSGbkFWVgUaGoYwMHBIXHkCCYjJT9uo5dmh1tluvwHgnq7Rt5+1jSbOk/c+N7uM13fv5fn65OcqKzAvrV9gvoEygWn+vF+vrJHbN30RoU2bkT/QgNiObMR2ZGFB7Wpk91UioTMXi2rXomJXl0u8SEJ0MToXYBdU9GNd6SEsrLgALip+tvQ0flt8DM8XHcfKoilEle9FXP20zMQT/NIty7hP/d7dsUnJAE/yJpHgzDu3f/3VokjQlYfPJCILNuZiflQ2VifXyBa8vwlNwtLYEkNG1iTg+egccB3CI0tjEJnfIxYQdcFSS4eUNfoHKVstHjylO73nPLa2nsPauotmgT2325WDB1/GguKTWLSxGCHrY7F9ex4qK7vkBHSeeE6LH/ukoaEPq1evEUsrT9kNNgbJwPU6xzCOmfa4abthlXZPYXnNRdcVi33/Qt4eRBYNIqF+Styk6LJGF7XAk+g9lzTbPU0sUnRXcxbwq6VIQyPrrKexDm6UeEeHBI79/tgLW0CXrqfXbsdTaxKQ3nkSqd3nENZwHotJmKRPX8HC4mMI25omM4odHR3yYz48PCzr9VavXon9Ex2uFcOAaQOWSC6eq30Ej1Xei759pfhxwfV4tvZhDEyVY0XTb/FQxV2YPtw6wx1FgIWCEZn511lSJRgWWSGQcy4zy65AzdMh2BbgJ8TBBjiGgBCo7R+rxe+z3ucuQD+b8jfISQmXhfS7+6rEQqLrP7Q8N18B48M4uKsCxyaaMd1XhMn2HJw+1I09TekYrozFsb2NOHd8EONNaa4FROrk6Lp5Ou0OTLPq77Ql0DLiADGnfaJr94nVR/40u1y998iL6W/RsespwNFLc/XsftZ71ROwaMCyEhD3JHSbgGRE4/6yJNCqIdvrDla4ZEIXhxtiUQXZHrezUBaJK+EIFsoicsoNlrt5cW0IL5IVCS3yYecR1AWrcQPeVh2KlRl34WLKu3A+4x9wqPBHmGxYhYm+XOzf0wTu1MZ1TGyre2ikTR7sz9G9dz5np8/sz0H7WIiefraO1ePk0UEMH+hEx74WtO5pRPNYPZpGa9E48sa4mkbr0DxWh949TTgw1SnEyLhTmg0VtE1CXPV7L+Te+Z5rf2g/2e+zlabjBvMe6qvFjuwkbAgPBSdJ4uLi5FT1EydOzNi2VzGqP/T/LuizPdYzTv80XfPRZ4Z+HdXTUHX8efl17XxUdi5d6vsvW89/75fls1hAXk8hs1XutXRtPX8FtIKM13xsGVtX71XHH9p6ek8d1bPLOHr0KBITE7FiBc+iyMHo6HFrNtsAMgW3fpDnxQcCPC8+EAiaeL+sAv1AWX9ZwZ5nK8cvSzm/rAdqvfrMlPH0PPnZ60kZAaMT57AnJgljK8Kxq6oHPZ0T6Nye6y4+5+5Xdd//AbZtTsPO1CI0V3ZjcDA4AfG3xXv26u3FefW14/Re26dt8Z5Nm/RZ5YOHplzNYy4ZLy14Xb30mX3KumgZGs4lr1YMylD+9enY5Xrtmt7/In5XtQQ/yb8LKxsi8OvKBbg1/x7k9Fbh8fJn8Uj571A3POCWw7JIHGnp4jbKzc3DyCjqRkj+HvymYB/uyC7CjzNS8WTuPvwu/ygWFx1CSOk0NlZPY3vTIaS3H5eZd4L+PGtdBy0RtJRwK9dNZbuxIbdTXHy4xiA0u1UOJqQrVmR+L1YlV4O7OPEAQrplhe/oAA9B5HN6xwlxQeLMvRIbgnBaXWxrBwE6F5kTHG9rPSsA2bV6iMvVq5A1H/kHsXBzmZCP+PgdKCpqAk8/3737GPbuPef2RXp6rrh45uXlibsUxx3/GOQfu3Tss+X0nqHeMy+6YXE92+nTpzE0Oon4psMBRGlhxXmsLNiD6NJhsTrQOkGrU4G1BkdImEUg2D+GRHhrY8yhhWox4qJ/s7DfJhsi4+o662howWqYlnNAaMGiGxjJUFr3OYQ3XJC6GqvNq1hYfg7Lk+oQl7Bdtkon8eCP+NjYmBCQNatX4eC+Lm/mVsGRAInd2Nq9BnHd67B7fzV+UX47ljf9Bh0T+QhpmY/bi27B3oON1kJbG4Dx3gEgmqcz86lgzAVoAkKMvJkRDdQToqDgj7IOaGE8LTd0HTs4kI4/pL/dWYA+D4eSrkRO+iY0VGdjdLBWtsJV/34DyJ0y3LycWVpfnY2sQ4rYDoIoR8dYLLxnbZeG0hZaDgLAmNc2bb/XZnVNscrzlenm7fSpPkudHICn+UqaQzZMnU3ZamlxdaV+pl8FBDp9TUDJmW77Yh9ylp7nuOT1FONDzRsxz9oF67bkDbJ71e1pEXLex0ON2YY00CLBczmsHap4fge32SVZ0W1zDTkx1g8lEo87LlsPVqVCSQzPEvnhphV4aqAct24PxW9oIRGLSJXIGEtJkG14qyMxryEc761YjryML7jvy3+lXo6L2R/BieKbMdWyHpMDeZgeb5K1Lny/dAtl7QvXaiSfg75LGiq5cz5PlZF314tjHnx/Jw9247e7duJrXYn4944EfLUjHl9tj8dX3iAX68Lrka4M9AxVy7k9xw/2iesU+8O8R2y7Xtbkgf89tb9f8r56/aHfc+bHvjk01Y32phIkxG0CJ0p41hzdsrjmj26yikF1rNdnhnON98HkVN5Os38T/L8Rdhlz3fvz0zy1zhqqXLB6ME31bDnVnU1HZV0C4lfwZ+ovyJa37/1yWpAd2vJ6b6fPda/yDCmnf7x/rTqrPOVo6udLs3r1WpSUNDi7Ynmg0Qamem8DPI0z4G8mqDPAUYkG8/XAnoJKzYOhdz9TTvPyl2XqM1Ne66mhV56R9eqi9Q4eP7u+6hngSyDKNQGjmWUY/do12HX7PeiI2o7Ox58R68fYxz4mRKT0J3ci+9llqH3iGbSml6C//wDGx0+7s8cszyuTddJ6mfJMP3mI8EHtAAAgAElEQVRxKquhv53apzPzMXn49VTfDr08WIfXX7bKmjLsdnl9Z5ej93ad9F5DygSrj1fGa+Wt/RisHa+geLAF38y6DoWDTXimeim+n/sjpPYUIa27CHcVPYjErjyQqJh6OMRz8oJsP8vdzEor2xCSloIbEp7Fezd9Ch/Y/C/4SUImHs/Yi9/mHcXzpWewtPwMQiqPIrrmEGLrDyG59ajMjmf1nJUtYkkIdg69jB3958VCYlyCzOJz3hNI03LCmXYSDlpEGMcD9bjGhO5dJDS0auwcNLP/XLBOKwB1eFI6ATlJR0bPOSR2nMXm5rMIqb+ApTUvy7qKAJersnOy4HxJ1A6sC4tDUlIeiotb0NIyCp4GTwLG958kvL19BOHhkQgJCcGuXbvcHyF7PNPxyh/ONX7Z4xrvaQXh3vQHDx1CS/84ouqOGTcxEiZuD1x5AStKDiCydEzAP8mY9Oeul6VfSMDUIqLkTEmJGwpB8RMPEhJjPTHkgwdGmkv0HDc65s+zPrgOJbvvPBI7zmFdw0WzZsWxfshmBZndiNyaiLy8fHR3d4v1g+ec8NwU7lhIAnJICIgDmhTEOiB7cLoCu6YrcfRYN8qGk5C/KxYjB2pRtDse9eNZOH2Ci7EdwKGhRTQMuLZIgw1AVN4XCoD25+FYB2xwrQCOW/Ae7ViLP6Rf4VpAdiddIwvQeVYJDwLkAvTj0x04fbAbpw/14NSBTpw53ItT+ztx6mAXjo7VSdrZo/1i7eBOWOdIbva14szRfnG/4q5YR8frRZeWknME49MdOHtsQHbFOjpWjxP7WgWwu32iBCegjQ5Q1Thtqy3r3Ae015HXOIaG5MwEfCozaz2Yv1Ue+5IXASVB8Rlna2RaAOgaxC1UCcJPHOoVUL5vvBnZXfn4YFMM5lnngPAMj8/deiOuXfQ4bly/EHekRcruV1z7cV/Jdtn1ims7eBo613pwQfmt29fjgcoUOXyQBxAy/Zd1Gfj1YAUe68iX7Xa/8ug9uDM9StZ+cC3JnVnR+Jd7fizk5fpVz0g8CQrL4XqQJ/tKxbLyQFmSrDO5e+cW3F2Xhg+RgNSH4wPli9GYciVeSn4r/pBiDq7k2TF/SHszXs76IE4U34j9jUuxry8TU6O1ODjZJu8Q3ZDYJ+wj9hXdDLXvTOj1a2C85YJnubOR0O2ebsMN3cmY12wOSZSwKUKInbmPdO7NKe4kfK4s7ymrunov8XaaJdPs5C1lMN65NC83f0fHyevrDVtQ3pQrhJ7n4vCdOHvcWEHkXZT30yMUQmYZ575n1nuv75/97rtyDgHnBhPHhzC9tx35uSmI2MDDrleJ5bulpUUmUjhW659iT/vZ/l2Y7Z7yqmuH9m+CravxWo6Gqqv52Tq8p56t65efTc+fj5YXLPTL8llcsPyFBRO047QydqgF2nKz3ausP5xN3o5XHcbZf7bMbPd+ee64Qles0NBwAQ90xVLgaAChDdQ8cGcDQpUz4NiTMSDNAEZ/ml/fA5VKWJiPlu3FqZzJT9P9ZXrxKh9YFy8/O960wy7Xy1fzsUPvXmfCz2F37xSGb/yhkI1dV38ZQ1/yDh+kBaTz6n/FwOeuQvuPbkOtcwL66CjPTvAWoWu+Xn389TDto5x3mTZpe1SX/az5qayX5s9Xn6nj9aGRD3w2+TLOjg98tvPw7o2896xlzgy13qZNWo5f3372y3h52uWZflBZT4Zt6RyfEMtHx9gevFAfhvtLHkVOXxU2taWKFWRDSxympi+6VhACb56KPjxyHHVdA3g+byX+JfZfcXn0FZgXNQ+XRF+Kz0b/Jx6Ma8NT2dN4pvg0nq94UXY/Wlx5ESsqTyO0+jgia49iS/0RJLYcRUr7caR3nhIrSE7fOeQMXABP9s4dfEmIBdcjKAEhoeCOWHoxXi7KDL0kRIPrHkg2eI5HevdZJHeeQWzrGcQ0n8X6+vNYUWOAMdciELyraxC3tl2wYxKLtlZhVWgsoqOTZMcrul3R8jE0dETIh7pekYhkZOTL1rupqakBs2D24O4ftzgm6XilPwD67A9VlnL0OT5z5gympqZQ3zWM0OoTWCQLu816lYVVL2NZ2QmsL53E1upJJLUckrUa7A/2IYnabEREzkZxLBse4TCbBMizs22xTTpIZNSyxPxp2aLVY2PzOays9bYvFnLHndKy+hGxNQmZmZngDzZPHuYmIZw95CxiVFSUEBB1wSJY5ey4ggkNdWaTgEpkXKBgZv8VBGu6yBNYCMgws6LmnmBEdWxA5gAOi5y44MUBMArmCGYUXDOOAI47Mp2sfxT/nXaZISAp89Ca+FMU5Majo7kAk6NNApz3tmZjpCYBozXbMVS8ESO1idhdsQ39eRvQuyMUPTnrxP2qLWkJ2pOXYV9XHroyVmGiPQf9BRHYVbYFLXHPY29LpqwTOTBYiqHSzTgwUIo+5pG7HmP1yUJspP4KspwZXam7Eyf97MwAu30qbXP6x/ksVEfbr/1o8nesL/J5OJ+NQ0r0s3D7SkCvR1hMfux3JR6cjTfrO44d6sXB/R3YN9WCPRMNGNlTg6GxSvSNlqJjVwFqetOR27IFyyvW428bowIsIDwM8Or7bsX3Vv1OXLG4zS6tHN9e8ChuDF2Irz15P24KXyyHED7eWYivPnqPnOPxvVXPyM5X3B3ruyt/J4cNkrBct+xp3ByxVLbqvS0pXGS4a9b3QxfItr2/rE3Hlx+4DY+25+Obv/sVvh+6EN9d/jRuT9mAewvj8R8LH8c1v/sVeE7I3bWpLgF5a9kaPLX9CTRuuxlTiZ/GiynvwH+lvMlsYsCDLFPm4fdpb8eZ/K/iSNUDmG4Lw75dhdg/3iDufGoVscmIEBLLesQ+VkLnEhYrnWkkdwMTLfheR6JHJFwiQKLwF76E2Jg6fLkyChmFCehuK5YzdXRTB33XPKJhLBju+CHvp05wOGSZ33UdTxziwmd5v5W06Pfj5LBYW/q6qpCeGo+1a1cLpkxJSRFLLq3VOo7r+K3h6x3vbf3X0tU8Kcc/v67GUU5lVcYfqqxk5PyjTDA9jbN1ND/V12eV5XPAInS/oCr4Qy1E4/3PjLcLUTkNtRzV01DT5wptXftedeYqV2VU7+TJk0hLSxOXibi4JOzefSQI+LRBmt4HA3Ca5p+ltuNVT8PXTiN4tMGoAmgNTfrM/BQc2+DT0zHlzp6vyU/Tg+Xh5WURkN1HMcBDBz/2MVl4TsuHXroOpPvf/h2Fa6JRVdmFrq7JoLtgad4MvTp4fWWnv/Z9sL4JnpdpZ6A8yw9WBzve1FP1zOelOppm5FVG+98827J2e2bqBNPXOM3LH2pbbWIVKKNljk+dQc3uXozvO42GkUGk9hSjZncfCgcahYh07zFbV3O2X61e/cMHkd5aintyf4kPbvkw5kVfgnnR83BJ1CV4R8Q7cdW6b+CXkfn4deownik8gfkVLzoLkF81axeqXsaSqotYXnUOa6tPI6zmhMzob2k4irimY0hoOY6kthNIaT+J1M7TSO86jYzuM8jsPiMkhZaTTOfK6D4rJCOt6wxSOs8gsf207Li0tfkMohvPILzhHNbUXcDymhexpPplbwG3rEd4VYiREI+Cw1iY2I5lEZkIDYtDXFw2duyoBs+t4TvLU+C5DTHJB/uB70139wRCQ8NkFmxoaEgGfnsssu91vNNxSMclhracxvvj+EwrCNezcQzbs3cvilt2YUP1YSyuMmB/QZVpz5KKc1hdfhSRlVOIq98vC8nplkXrhHF9c3Ym46YAahkZ/YPntuZYNdRSIiHTHVc2JR0kgcwvp98Qj9i2swilVckhRbKAnySv/CyWZfZiw9YUZGRkyJa7o6OjsrMXtxjmIvv9+/dj48aNWL16FfaNt8ksrgsgFCQpEBAwbACECzZcgGyBBpVTIGEDDwHGSmAc4OyQHSlXgLkBwx44doCMpHng2QUuzg46PNn8TNVt+O/US10CUpn4CEryEwUs8fwSug2RUDRufRbNsc+haduzqI18TBaYN276DaZ7ClC74VHZcrdq3S9Rue4X2FW6GVWhv0R9zFPoL4jEeGMaaiMeFZJRvPQOdKatQNv2xRgsikFN+CMYq0vCaG2iWFIMUbBBluknaZvdbulj0y9CNqSfrLZqPzsEwpBEJ935nBinJMb+fAyg84iJrSty+vk4rkAT+5vQOLID+X1xSGgLRXjDYiypegpPltyH+wpuxY93XI9r07+Gf0++Gl/c9lncnHoP3t9gZtntk9Cv/vl/CuG4Jz9WLBG3RC4VwkHLxWd/+F2xiHz2x9dDCMhjP8cdGVFyoOC3n38Mn/vpTUIivvbEfUIiPn3TtbKD1r/98k7ZhpdEhVv13r1zKz5/203ignXlDd+S9SE82JDnifD8EJ6y/pO4EHzruYfxnSVPSv73NKQbAtIQjrdUrMN9iauQsHUNCrY/i87kW3Ew5Z9xMeU9+EPqJeBOatzOmVaR/0p/Gy7mfBInSn6AA03LsK83FdOjVTg40Y6j+7tAdyRzDo63zbQQDx6ueagNpw53i7XArJvwDqMUAnKkH33jTbiufbtjxYjApQ0b8PaydXhX6Tq8uywU7y5fj/f8Ba53VqzHpY2OtaQ5El8oCUdsRgxa6nfKlt0kYSRW9rvkvoc6mcB31Hk/5T1V0iETFPZ3wpmcsMYJfX9Vn/3FyZKK0mxZpM7jHnh2SGdnp5zdpGO5Hdrjun1vy/Be/+x4lWeo90zXe7+OPjPUfFRWn4OFtp6tG0yWcSrj19N4v96cBGS2CtqZ2xlqIbPpqaytb98zXXU1VB0NVZ7P/LNDlVFdDTXeluc908fHx8VsRvNZbm6pHIo3F+A2YE1BH8GpgjkDVPVZQV2w0AaWBnx6+c2U99KMrALK1w5VXsPZ8tY623L2/Uw9r91MM2D0nGzL2rOjGiOf+acZJISkZOjTn0HhA48iPTEPVVXdsoB3tlPQ7TK1LhraaYH3M/uKOq+t5/WlJ6t5aeiXCYz39CgXmGbqaOK8vp5dhvomP1tG7zXU+vifAz8bLW9mPwXqaf2NfGD5TGM8L1nvM3kBe/aeQfvgHiyvDMNViVfjLRvfKsSD5ONN0W/CRyI/jh+s/ymeWfsCVq6Pw/ItpViaOYhFRUex0CEhtDrIDk7O9rZ8XlT1kmzpu7T6RbxQfQErq88ipOY01teeQljtKUTUnUJk/UlE15/AxoYTiGk4ieiGU4hqOI2I+tOyM1RY3RmsreXM+wUsq3kJS6tflrUHPPhOZuBl5ygD0KUOrEfpGbF4LNzeIsRjXfh2bN6cLlYPHjTY1LRbdvzi+8rzPtTyoX2SlVUo1o+kpCTQjUjHHH/oH4OYrnG855jkH7c0DxF0xjzKEKwTtHNb3t3DI6hs7sWmqn1C6NgudSWjNWRJ5TmsqTiCqKoDiK0/iKSWI8b1rfeskJHcoRdB9zQ5Y4W7lA3/Xs5aIcHQizuXSby4s/F8lpdBPR4smNlzDsmdZ7G19YxYltjvLJ/EQ+rCz7bkBJanNCJiS6JYPurr68VV7ciRI/IjTasO17jw1OHNmzcLARnf3RzoRuUAUwWsAhxsEKH3llyArM7kzyJnLCoKmh3wrbIucNGZUit0AbOnyxlonllyrvR7+O9Ux5UmZR6KEp9HRXEyeKgizzUR96H9HagMeQB10U+iLXkJ8uf/QBabN235HQ6PVKMu6nH07QxHxdoHUL76PrF8kIBwS96+nWGgBaU++ikhIh0py1Ad9pDslkXrR23k4xgsjhHLCN20ZJ2Fv00umfDaZBMGW8cP6gx4c9rt5uvkY1mF7M9KgZsb5/u8NJ5lnTraj9KhJNxecAs+kfAR/O22v8b7tv4V3r35nXj7prfiio2X47KYN2Fe5DzM2zAP7w1/D36TuQTvrzfuQS4ByYjBl+67FSQdj7TuFPcp3t+w5jlxu7rq9pvxRE8JPnPLdeJeRQvIHQ4B+Y8Fj+Gq22/BHelRQjAebsoBCcij7Xng+R4kFLSs0EpyT0EcPv/Tm/BUfxmuvP6bQkS+/tQDuCtnM25L2YBvzX8EdMniIYc8/JAL4+9pyMCHaiIxr3ED3lK9Hg+mrUN6cjTysrehOHczqnaEoCfzXhxJ+ywupr4Xf9BDLVMNGflD2uV4NfN9OJd3NQ5XP4LptghM9WdheqwehybbDBk5pAdzckerXpzZtQ2nB2JwZLJJ1iEZFy6zixQB9ckj/egda8B17QmuG9XbykPxhZgF+Gb4fFy7cSmu27oC341dhe/Frf4/ulbhu7Er8dX4FXhrTbjrCva5olDEJIajvjpbDialW6MSkIB3zSEZ7vtlva8BclY8ZW3rh3vvyngknZMJA93VSE7cCt2yl5secW2bPY7rvYbBxnz9XdBxX8NgOn59lQ0Wqv5sIXWYFuyP8XZZ9r1fR5/tkPf2FUBA7IQ34r12zJ9St9l0+YPHrXlDQ0PllPTy8mZxCVLAZgCZgj0PXGq8AkUFcNTTNM1DAakdP5s8ZRTw+fX9eQfmp6BxdsA9W5leOV47/bJaloZGx5RJUEo3qrGxk+huHsbAT+4IsHyYNSAfR+WNP0T8hjjk5FShsXGXbF8auAYkeN2DlWn6wq6vB6hV3t8GbafdvyqraTM/K+ZrX947oLLaF56ckfHKMXXTsjT0yrTlTZs8GS3be6+0XZ6MnWbKYt52emBZpoyZcZ6upjEPvdTqMTB8EBmtpbgz9wFDPKLmCfm4LPoy/G30B3FtzA1Yunkd4uJykJJSKNvVZmSUIjGlCBvTahGS3Y/l+ZNYUnIci8rPY2ElrSIkB8YFStyg7J2SNL7KSXdArZlV52na5kRtl0yoK5Xq0cKhaw9INipfwsLyC1hYchoLd05hUVoflm4pw6qwRGzYEIdt27KQmVkuaz3q6wfR07NPrB7mXb3grvnQfunt3YeQkHVYt24dBgYGZuyE8qeMV69Hh2OXkpBjx47JZEpreycSqkexuvI4Fld5O2RJ+x2Ct7zyDNZVHUNUzRFxfdvefBTJbceR2nkKGV20LJ1Fdu85Wb/BU9X14nqOrL7zQjbEna3jDOJbT2NT8xmENZDw0Z1NSZ7T5/xcy8+BWwSvTSjClth4Oe2cbldcbK7kg+1ge9hutoU7y/AHfLC3NtBP25mdFABB0EpA4YBcglUDmjX0AYcZgMGZAbXyMWDDmxk1+Tm+4lKWzto7FgQL0NjARGeQD+xpxIXCf5fZarrOvJpyOfKSl6O6LE1OdD842SF++yyHVgu6XO2uiEVz7HxZ26HuV53pK8USQleq0dok7CrfArpj8eBBumKRYDRtfRZ7mjPkxPQ9TWlCZqZ7i0Dd7uwQdGaskpPU3T7Tugv4V7JlZn5dgKYWI4t8aT97rlrsZ+0Pj3gYq5WXr7EmsW9V1vuctE4q434OtCTRle1AN2r60nFf7q143+b3uJMdnPCQi2NQ5Dy8JeIK3LX5h1ibGYW/qQskIFwD8pWH7sL1a56VE82/HzIft0QsxfVrnpP1Gl//9QN4uDkXtFbwdHSeZE6XrBvWzsfPsjfi9pQI3Bi6QCwZJDA3hMyXwwd50OAPN67AnVkxkvct0S/gG08/iJ8XxOJLD9yGe4vjhXSQfNyTt01Ix4+3rMLXn7of337+UdGVNSBCQCLwlrpwPJSzAQW5caitzERrQ55c9dVZqCncjI6cpzCWcQNOpH4cL6XSPesy9/1Sy8irGe/Bubwv4Vjl3TjYvBxTvSmYHi7Dgb2NODTZDr53R7vC8VLeZ3Ci/jHsGyrC/j0tOLK/W1yvzIL+PiEg3yEBcdZvvKM8FF+NWij99pOEdXLCO/uGC+//Ly6WdWdGFG5ID8fbaze49frngnWIjA+V/to70ihrYpSAuO+hfM89gq1WDHsscd9jZ6xwibeOEYyX74txCXUnLOzvkrNIvTA/HWHr14mHDd1yOdltu2TNNcbPhlfn0vmfSPu/LnfWgwhZEf6xUf4wWENtJuSXlwycvF6P7uvVt8sMVk8tV/ObrWz6HZeUlGDlypWIiIhGZ+e4A7oUDHrAWMGfB9BUJhh4mw3oefEmP0/XgFbNMzDUMv2hAiE73sQF19c2aGjr8X6u+GBpLIvgdM+eM+jvnUL3snUY/dSVLgkhAen5/BcQv3ANkpLyUVLSio6OvbJVLxevqxuL1sMuY7Z7lfWHrMvMOPaDAmnTJ34ZPgfTNXIz85yp78n48wlsw0w5O31mvnPV10vzlzlXPq8ta+pIOV6GYF6UNQ/tQ+NYUxWNf0v+Oq6IucL98X9r9Nvwhegv4aHNTyNmexJycirlc66t7RWyyW2X6cJUVNSMjOwqxKZXIzK1DiHpHViRM4ylhYewqPQUFpWdla18F1YaQkJXIkNMaC0xbltCOCpfkvUk3PKXl6wt4QnlBLwkGY6ezMBXXMSCsnNYWHIKCwuPYlH2GBYntWHZ5lKsitmB0KhUxGxMQWLiTmRnm3o3NAzKO8qd2kisudOVWj3YH6YPXxZrSHZ2kfzI0J2Tu1P5/+yxifcct3Tssu9n07PHL1ue90pCuCids2wTExNo7+xBbk03YsrHsLzilFiV2HckIdovJHxLqy6I69vq6jMIrTmJiLoTiKk/gc2NJ7C16QRim04grvkE4lpOYlvzKWxuOoWNTafFnW19/Tmsqb0ga2jUzUrXz7hhxUUsLtiPFSmNCI/LQlJyCoqLi2XLyj179gjR4EFeNvlg/3BtC32oV65cgfbmUtcCosDXAAdnxl2Ar0dECA4MwDWgWGc1SQ703oBcQ04YZxMHAb4CMhygrMBb5XxuG5qnABdHj3nIDPLhfhwcrcDFgqvdBejnk9+DvJRVqK3IALfgpYsW3V+oz+10j080C/HgonHGHd/Xgv19xRLHhenH9jTIQvUjY3ViGWHc0fEGHBwqx/7+EiEYPBOEC9a5de+ZI32iSzeuw8PVOMvT1gVc+YiUts9ZdyGEQOQC+yegv2wyKPpKNkzfK8CTvlfQJqFTtn4mko8HDI28IScEkbqzVf9gGSIrluFjW//eHXe41kwJyCWRl+DzUZ/G+m0LPQJibcNLF6ifxK0Tq8bduVvkcMEHK1NkYfkTvcViieABgjxgkIcK0krxQHmSxHMBOrfcva94u5x+TusGZbmWg4vLuVD9id4SyesXNemSB/XvLYqXQwrvK0kQ9y7umvVgRQq++ezDssCd5IdWkLtqUowFxDmI8JGdkSgtTER7Uz6G+ioxMliLXX1V4rbXVLsDtSVxaMldgP7MO7E/7WpcSH0ffp96ubtOhG5aJLx/SL0ML2d9GOfyv4wTFXfiQPMKTHUnYKI3B3tbYnAh50q8mvlXOFp+F/Z0JmNypB5H9nfJdra0IvSM1uM7bQ4BaYrAO8vX4+tbluE/Y9firh2b5XBGtp/rXXjo4v/2xQMh+fn8sGgr3uFYuLjInQQkIm4daiszEEhAzLuoRELGEOs7re+5jgGS7ryX8q7Ld9oi1f7vhBJvJ97LbzeOHuhFQ20BNm2MFGzpd8l6PeO9H7Pq74b+Jmjo/43x/84Ee/aXb+el8nbIe16z/Wn6XKFfd1YLiApqgXNlGizNr28/B5PXOJULFqrMbCF1mBbsbzYdjacbQ0JCApYvX474+BRwb38FcX6AOBPAeaBSdYKHs8nNjLfLtO8D852pp+nBdPxx9rN9r3n4w5ntNgBYQSrJxK5dR9CZXIBdX/26S0B2f+pK5P3sfmzdko7c3BpwVtmcAXLGBXWz5W3XwV9H/zNl/XH+57nys9OC38/V339a2sxyZs9npuz/PAGxPwf9XHnex+jYSVT1dOPxomfxt1s/gEujL5Uf/kuiL8F7o/8a34m5EWtjNyIlvRCFhU1yQB9Puuchfdwtiou2+/oOgHEkI+XlHcjLq0NqRjnikksQs70Y65PqsDq1Dcsz+rB0xx4syT+AxUVHsKjkpCEPJafFVer5kjOYX3wK8/OPYEHONJ7beQjPFhzH/MITeL7oBBYUn8DComNYlH8Qi3InsTh9AEvoWrWtEitjdiAkMhXh0SnYtDkdCQm5oHUmP79ezvWgq1Vn56RsLUx3Ky4u17UeSjy0jxhy56sNG6KwZs1aOcWbPxDB/nSc0VCJhIaM9//ZcarHUH+E9F5JCN2xuJCbi7j7+wdQ09iK1LIOrC+bwgvlhogYMqeL7emGZkgdD5HkafZLqumy9iKW6VVzEctqXoRxZTPpXD/DU9jFxUrXzwjBcdb1VFzA4uIjeCF7AOu3ZWJrfCKysrJQW1uL3t5eIUkkSyRNfvLBPuDaluzsbNljv6YiN4A4uIBff/AJfBVQOEBWZJSYSLohHwooNBSy4gcbKj+DaDjg2rEImDICZ0MVxCgB4ULYQ7vz5BBCzkzzOpH8IeSnhYCz2cMDNXKmA2ebZwAjpx6GUDlky2qzEiWth1u2rz1uWxU4uX2lliMTMj/NU0GUPDv5CWlz+8Yhc1oWQ4dgmDw8ImLq75ThzBJLnbUtbv5OfSQvb4E0dzM6NN2JhsFsLC57Av8Y9wlcEXO5bHKhxEPDd0e+E7+KuRNJCWEIy93ouWDVbsA3U9eB2+/eV5oou12RPJAw8LDBv8R1W3I4bolcJpYEAvmfVSfjg9wFiwSkIQKP5UWjvDgFPe3FslPa/ok22WJ2cqwZIwO14r7HXdTqy5NQm78B7VmPYiLt6ziV+g9iFVGXP33vZL1I2lvxSubf4OKOK3Gi8LuYKroHZ7I+LaTl9+nvwPGC6zDeHIO9Iw1iISE57hquxXda4x1LQwS49uKauBVg/e8v5c5h2Xi0Ix8kVo93Ff2vX491FODhllzcWp2EdzZwkwGzG9asBITvmb5j+p1y1izpd8N+H92xhLKqp2OJvJtWvH4ftAwl2Rov7oODGOytQdy2jTKhwp1XGxoaxHXWP97z2R7n9V7Hew0Z75e181I9hvbvi32veWg+qm/rznVv680lF0iW0EAAACAASURBVCzN1n1dBISZ6F+wDDVOO8guQPU0VNnZwj9XN5j+bGX547kTC32PeT5IZmaBkBAFGzb4I6idDdjOJh/oruMBR3+++sx8gpdhA9Tg98HqoPlqaPJXq8CfH+osOXe16m4YQv/Pf4nRj38cI5/4JGpv+hHiwrYiM7NCZsG7u/dhdPSUHGRng7o/p07aLhPa/TKzr7VfvdDua+rOpj9bPMvwp3nP/s/D/8w6sy52vOmLYPl67THyphxbd7Y+8OS1bv7Q1MGUTavWRZnh7909jci67fhKyjdwWcybzYxj9DxcEX0F/iX6X/HUlgWIT9qB/PwG1NT0gQcTknDQZYmklASGF9dOMI5nZ3AL5s7OCVlbUVPTi9LSNuzcWSvuWsnJBbLwe/O2HETFFiAsoRzrEiqwNr4Sq+OrELEmFakPvYCs++Yj944nEfZ8NBZE5mHpxkKs2FyE1VsKEbJ5J0Kj07AhYrvsYLVlSzp4fgddwrKzK4RwlJW1o7a2T84w4eJykiUeoKn1VouH9ofdx/y8eH5QQkKarP0gwKYlVf84tth//rGGz8F+EGy519JXWeZDEsKF6TTxcw0KzzrieEbAX13XgKzSZmws6sfakkm8UH4SiysvYJG6vul6EdfCRDe1l/G8Y2WidYn3ch6KWJeMNcqs73gJCysvYlHFOSwuPowXcoawNqUGkfFZSNieiJ07d6K6ulrIGbfXpcsVLRzsK5t82G1le4qKimQyKG9HinHvUYDszDgKeFBA64BWGzCbdB/4EEBh3KsC9CXvILPyLmBxQIcPgAjoljwdEuTUg2SCFhAeFnd4IBkv537SLBxOmYcDSVciPyMMTbU5GB2qFZcXj4AEATcKaty2em5LZrY2sN4GNHltFPDl1FHJQQCwssCSAjGCMOkfpqkLlrZT2ugQumBxDsHR/pU+0ny0LFtG0rQc02/i/sMT5Pe3o304H6tqfoerkz+HN8dc5o49JB2XRXnPl0Zdiu9GfQPbYtdgZ9ZWbCpNwAcaomWNwJvrN+BTeaG4OjcMXymNwdeqY/GN+nhc05iIbzb9Za9rGrbj63Vx+FLlZry71uzoZAhIlKwT6u0okUXVR/f3iFWCi6u5s9r03lbs2V0nVhGSlOa6HaguTUFNXhi6sn6JsfTv4lDqZ3Eh9a/wasoV+EPqpQFuWuqqRWJiX+ezPo69NUsw2lcoRKRtoArXNsfJ2hSCfRKQb8WvkrUwD1anCfmQbYUHK/DrocoZ11ODFeClaf5nWpU0zpZTeX9IS9Tj3UW4vSEd72w0n++cFhAlBzp+OO+2+V543yWXdPP7Ju+89x3g98Z8dxzire+sknnn/XblnO+q5Ol8fw7u60T+zlSsDw0RawjHN47RHOuC/envg47x/udgOhrnl9U87N8byjJe//ReZecK/Tqa11w6mqa6Gs4gIKwkhWf704xeK6S+yth5adxsocoy3f83m47GU171NNQ8VEbbF+yZP4hdXV1yPgh9uktLGwQ0KUA0wM4DgBpvAxNbRgGuHWd0FPhpXoYA+PObTd/kxzz8+XjEyNblfeCzcash+Kf7E4HW/8QlZ4GMnkJvzxS6VoRj5J/+GT1f+Rqyl4QgOSkfxcWtaGkZkQMIx8fPCDj9U8slODaX2YVots9gZt+bPrfl7b7x5E3fmjS99/c3nzVOQ/1M7TB4ml0Hr1xbbyYpMeXZ+Zl7rw12WmBeWoaWyzDYpWSSC83bBsewuCIEn4z/R9DawR9+/ti/L/pv8J2Y72NVbBTSM0pkvYQu1CaIN4fzmdPBtQzNl1v30l2PZJUWs/7+g7IhQVvbmLhrkRRUVHSKC1dBQaNYzbKyKpCeXiIufEVPPoeRT3xCNjoY+djHkXn3gwhbvxWbNqUiIcGQjMzMMnEDy82tBfMgweEWunV1A0J62tvH0d09JaSDu1qRENPaQaLkJx6m3wL7lW2qqmrDqlVrZP0YXZ/8Y46OPTouBRtzNE51NbR1Vd/+AVE9ezzjPYkIF3PTGkJ3sIMHD4K7TPGcjZraOuSW1WF7Uasc3BhSOIaVJftlu94l5WewqOK8WZNTfh7Pl53D/JKzmF94EvOLTuK5ktNyjsuCsrPiJre47BSWFB/FsvwJrMzpAw8d3JBchK2JGUhNTUN+fr5LPHjmEk+Fp3WGVg+SJJIlu+52u3lP0sKdZNJS4syZBgIkdGbdCQUMa5wHHBQIuEDCnp13QLMLOBQUK5i2wIYCiADwIQTIIRwK1KUeCso916FjB3txuHcrXsn5B7NrUco8TCZdhfyMDaAbjRCQ6S7ZrpeERcC+li9AyHH9UMDji5N6WX1gCJFt0SCIMpf0iZIY7UttswuYDAEiOHNnjZ22aR+YPnEAmdMXKsv6m3v2gdadcd7n5frMO20ybfCIB7d+PXqwGz2jxdjUvBI/2PFdvG/LewIsHpdFvwl/E/U+fCni8278B6Lfj9VbnkVOxiZUlaYioTYNHyRA5VatTRG4rGEDSEQur4/A5Q32FYnLG4JdlLHjVYdxwdKCxdv6/nvNz4RvbojAJc5s/lvqN+CB5BUozN6MzsZcjPaU4vBEC6b7S7F/qBxH9jZjX18JRltysLe/XLZy5rvU31UmLlv1VVmoLo5Dw86V6Mz6FUbTr8eR1M/gfOr78HLq2/BfzgJ2ddOiq5YSEoYXsz6C6fKHMdyRjvquYny7YSvmNZj1Fu+sCMO3Elbjzsxo0NXssa5CWWT/9K5K9zBHPdSRxOS+skSxWPx6F8lJBX5Zl4kHKlOFdFCOlpN7i40l5cGqNDzZXzYjH82PIfPg6fJ3NGV4BKQpYk4XLH3/vPfTvN/ynVAy7LyvSr51C2jz3hsyou++knP9TriE3hlL5Dvgfof47psx4ch0D6rKdyA8bJ1s1btjxw4cPnw44HdDx3WG/vHR/2yPmfp7ofrBZDXNr6fPdvpc+lqWhsH07bzse9XRcAYBUWEVsENNmy3USlOHMsH+ZtPV+GA6Gqcys4WUY9psoeppPfls3/OZM3OlpaXCUsPCItDSMhQwM60gjiFBnwI/De107z4QvKiuphPM6P1rhXOXYwNOk6dfnmXZQJCgizO+4+OnMDb2510ElARzBHYtmeUYvObbqHrsd7LrFbcxrarqkTMU6FdP9xaCvj+1TNaZdffvSDSzvcH7hHJ62Z+H91l4n4mR855f6zOaPV0/k8C8vDJnvk92HTVfT94jrjPTTLupzzRPR9ttdJWEkojyXt+N8T2nxeXq4cKn8YFtHzIzj1GGfHwi+lO4d9NDiErYjh07alBZ2Y3W1jEhlvw8STB0XQ/LtS8tQ4kvPz9uaTs2dlqsDyQkfD/6+vYLKaGVpK1tXIgrNy7g7mkNy8Mw8o+fFhc/7q5WfM8vkBCbKZsblJbSqtGPpqZdaG0dBYkG8+jpmRKry+DgYXlH+f7pO8T6knTYfaB11n71h3TRjI7eJCCZ6xp0caE9Vum9hsHGGx2TNFRZf6jp/tA/fvGZl1pDuMaCW/WSiHDRNy0izc0tKK+uQ25xFVJ3liEupxLRO1qwPrcba3f0YU1aF8KiKhEWVobIVbkI2VCKhamdWJreiRUZ7ViT2Yr1qZWITMzF5sQsxKdkIT0rBzz9vby8HM3NzVIOiQ+JB92taJWxrR52vbWt2jY+t7W1yY/09vjNOHGo38xAOkDWBQAKgB3QLuDWB6oVCFOHAMKAdJ3RNMDYyFgg2ZFzy3EABIGFgg4DXkyeek+AQyLBdQvcAevIdDcOdUXjlewPG9/8lHkYT7waeekb0FSdhtHeAhzc1y5rHLwFswpYHGuIC+oN6THt8CwG2j6tg10/L81pmwAkh6A4xMLvcmLno/dapjyLXuDssAA76V+vf7yyHSLiADIlK1JPZ6ZZ+4sWo/GJOuT3xOKOvJvx4di/NbtbcaG5s9bjHdFvwxdjPouHY+7GkpincFnUm3B59OW4c/MPkJEag4riFLQ17kR6yw7oQYSGhBgiIvcOKTHuO85WrgL+7cPtZjkAj3JyOQfuzbjXPGw5594+kE/KYx5O+Vqn5khcXhOKWxbehvgVj6ImOwqNaWvQW7wJzbHPoznuedlwoDN9FYpf+BlG6lNxbH8XePje9J5W7B1uEKsILSetjXmor8xAbdEW1O5cg9asJ7A7/SYcSL0Kp1I+jIvJ75pxvoiQktRL8ErGe3Go+CeoaUjEN2s2yQGJ8xppAQnDt7YbAsJ1HyQQtFyYU+Cr5GT5p4dNyFPkr7rjFlkvwoMbSUi+s+zXct7K4z3Fcjo813Nc+b1rzFbHCx4DT6A3p8ZXmzztU+p5Wv1QpbjOeQTE9N9sLlj2d1beSes7LO++jClK2j2iYdK875nkY40L8v5aRN79rlj56/dFZfk94VjWUJOP9etDZMt2Ws45McOxz/7TsZChPVZqPGVVR0PVV5lgoeal+n+OrpZn5xWsTDvO1uH961qErhn4C2JjtEEqo6Etaxeq6XOFfnk7r9nKY35z/c1VnqZpOfzRpg8yF6VHRsagr28fzCGFCmYDAaQfnLxRnwmqFGA2jB/C1qFBbOrvQ3RPN6K6uxDV3elekV2d+GOviK4ObGhvQ2hzI1ZVVmD9wt9ifmoyns3OxMK8nVhZWYF1TQ0Ib2tFRGc7Irs6XlcZdr3MfRc29nSjtH9c3HloTeHMNdvGNs7W/3OlGR3VNWEgCA1Ms8sIlm8w4K86Xpq+T8GJh8ozVJ2Z91ovLy+1ymi9TOjJ8Vnfg67RSZfIEYhPTJzFrtEjSGzeiW+nfw9XbHyLkA+e6/H26Hfgq9HfwMptEUhNK0JRUZO7Pa1n9TBEhmVo+YHtCCQkWhchJJPnMTlxTixje8dPY8+e0xgbPYGR3ccwMnwcQ0NHhUh0xiRi+KovGALy0Y+i6rZ7kL49F3SpIukgeaGblyG5J4Vc0+JCsqMuYTbh0Lraoakz66r9yv4zfch+ys0tx/LlKxAbGyuzWDou6RiioT0u6TijsvqsoT1+zaU3l77mRRleSkRoeaBFhGZ/nrPBHVl4XgktI9yRqra2DuUVFSgoLEJR+AYMfOGL4Nqt4U9+ChU/+jE2RUUjPmE70tIzkLszD0VFxUI2eJgrCQfzGRwclHyZP3eyoquVWjzU3cpfd+0nbbu2m3XjrmJbN0dj/0SnAH/3R95yfdA4d2bdISnyo++AAZWxwbbGaSgg2yEyKqfA2gbgJs2e4XdAt0M+zhxowOkDTTg23S5uMtOtYXg56wMuARlLuBqlKfMxkncvJhtWYN+oOQmdC9HVChJYd5skaVkOqJf6GsKiQMsNxcKgukpAnGedqQ0gDY6M1WcB7Xf6Rvt5BsFw6+LUUT8jNz+vLsyXlxKPIwe7sWeyHumdUbi/8Kf44La/ddeYicU1+hK8I/rt+OzGK3H/1tuwdfsaZKRGIzE9DH+z6b34euyXkZARisrSVDnccVdvJar6yvCTtkR8o3EbvlK7EV+uisaXKiJx9Rv4Yv2+XBmJrxavx0OL7kTMEz9A8ab5qN78LKoin5CzYkZqtqM9aalsTFAR8qBsKsB+VLc1umgdnurE/r2tmBhpxMhAjVhGOlsK0VSbjZqyFJTlx6IsazXak36Ks0nvda0fdMX6Q8qleCXlLTiT+iEczP4aqrnmozIG82rXixuWuGDNQkA84lBtiMjuKlx9763ggn+1YnAXse+t/J2cJP+b4WrQcvIPX/kiHm3bKQvan+wrcUiMl4fI7TaEZAYBcUjdrARE3kN97513XN57+14tmrZlxLv3xgZPZ0acPe7ou++G1vdVDigdQkdLKWKiN8i6kPT0dJkg0rFRx0Qdy4OFKqPjph1SXvMKpsu42f5mk/fHq76dl8po2RpqvF+WeYgF5LUE/Yp2hrz362vlGKquHaq8hnZ+tq6tr/e27B+jb+vNdq9lM50zhomJiTLDmZSUiaGhQy6g8gMrA1I8gOcBLhPngRgFksFkdWbaAB4DhhT8BIaaX2AYPE+vLszfAE+CsHUD3fhIazz+oSUOf98ci7/zX01B4vwyfLblmrbh7xq34sN1m/Ghymh8uCAUH8hfhw/kh+KDxeH4UFW0pP1dw1b8HWVtXX9eWpZfxon/eHM8nm+qkvUGXORMawhBrP3ZsH+858DPQvuO/RP83utPL4/gn4PdxwpSvTy9fPxpgfnacoHlmPy99EA9u42BbfG3zTwbAsC+2jNxBs9XrkLn4IRYH0ZGjqNjcC/W127FVUlXuy5XdL16f/QHcfPGn2B93FbZKaqiogMtLaOgRYHWC7V6BCeBrLtX/8D+MvVnm6Y69mCqcRemufC7tg9T46cxWT+AifJ2TPROi6WEn3Vv4k4M/+tX3U0O6n94KzLjcwLOluE7ru596qrHuunF8uzLXyfzbGQ0jfLUb23djfDwSISEhMhMPcG1jin2OKXjiYaU4Zil45Z9z7RgupqvhnPpajl2PtTTRep0zeICb06wkIxw4w26AExPT8vCcFosSCK60tIw8sUvugSv+Uc/Qk5SEsrKylzrxq5du8SiQtcz6jMfkg7my/xZDsuziYddd22Pv66M5x9PeA8LC0NUZBhGdzW6lgdDBhy3Imc23iURAnZ1FlNdotSSYACFgl/m4ycako9jGRBriQPQXSCus/8Kqhk6YFoA9bEhnB1JxIXmh3CyKwQTvVmYqF2ClzPfLyCP7i2HEz+CqaTP4UzGxzBcvRqjg7WyExZ3erKtIArwpZ7OzKuSC6Z5dfeAkriIOODKACSnL8SCo+TFA1HMT/ORe2mPEghv9nfGzLHWR0iOkRPS5D4HAi7zmXn5GeIxiBOH+zCytwb5vbF4sux+/OP2j+NN3NzC2tnqLdFX4JMxH8HtW27G2oT5yMrYiJKCRNSUp6O0Mgn/kfTvCMmbL7sfEWQPD1RjcrwZ42NNaBisRElHIXLqs5BWmYLk0iQklmxHYvEb80oq2Y6U8iSkFscjYd3jKIj6HXKW/AwNcQvRuO05jNRux2BRNLqz1squZzwDxn2ffdY3vk/cxcolI6NNGBmswUB3uZC0+sp09GTeg5dS3ia7Zb2a8macT343DiR+Er1JN6E58ynUl2xEUU0mrqmMNgSkYYNZAzIbAaGFYtgQByENw9X40v0/lXNT+Mz1GzeGLcT1q58VAkJ3KhKQD3/58+Ap9XTpoqsWZeWyrR+MC2YBcSxHsxGQWd9d57trvgPe2CDuUo7F04wR3vfBvOPO2COE23z/5XvnjAnu5+F852Ts4PeFZETHj1Pm9PSu1nKHhHDNcaasj9Nx8fWEMlBaGNt+tsdavbfztGXtePte9TTUNNW1Q017rdDW4X1QFywmvFZG/xvpf6lyg7WFM4fDw8OIiorCqlWrkZNTIjOpBCMeuPTuFcwoWNGQskbeBmDm3qSZWdbAPG1Z1Sco9eID5T3Aasd7ddDyvO1yF/Q0uaebumZpMQcHMRfLl9wyOTuzDp6elda4AfMaws2AVbUO8yrWYl5FCObxvi7M+JJSxjVBO7pB83TM2Zpm6by5MQoPlBvQ2dMzLa5cPIvEfA6mP0z/6r3Xd/7P0DwHpmvfmXCutOD5B+p7n4+X38w8WXfVC/wcvXhNf63QyytQl/EE0SQMzYPD+Mz2zyGuNkfclOo6BrCgZCU+Ef8pl3xwvceHo/8e9296DBu3pyA3t1oWmtOtie5StCwEXzPhtTmwLV683Yb9rFNMEvY+8Qym28Yw8fQCTJW2Yu+ClZhYGY6JDbGYHD0l60b6syuw+5pvY+yjH8XoRz+K5utvROa2THHx6+2dFtcqEiy207wPgWTD65vAuvjjzbPpP00bHj6KxMR0WSDNvd3Pnj37Fxkrg41Zc8XxR+T3Z87g9y++iFcvXMBLJAkXLuD84cM4e+iQWCvoqsVDAEfLyjD6ta/JGhv2b8ctt6AkLU0sJRwT1cJB1wFaOeheRUsHSQdd0Ug6dI2H/8drrjraaezX6OhohISsQVdbuQAt/oAHkA131jEQVAfI2EBBCYWAgZk6BuxbZfhnMJ3yXHLgAHaCDFovZBZ6TyFeLPsPvLrj73Gm4Bs4kvddvJL2LpeA/D7lMpllPpjxJTRXxMpZINzhiDtmBRKQmfXz2mWlOe0zpMQCS1ZfeXqGjNn9yDQBSdI3Jl8XeEkeTlkO6RAyIWV6ddD+0Hz1WWUVlLl9dLgP+6ZaUDeUiafLHsRVyf+Et296W+AC8+g34YPR78d3N30DL8Q9jeS0cBTuTEBdZSY6Wgox0FWOnu5iRFUvQ0tbLvo7y2RNDd2Q6PpmwHeXWAK4LevoUK3sOra7v1pclLil7RvpYr2G+6uFJAz3lqM5OwzDbTtRu+UZDJZuwd7WLLQnL0Vn6nLs7yvCsb1Ncr6LfnbaxwzZz7wM0Rtwt9TlrlbTe9uwZ3c9drUm40zmJ/Fq2ttwJvUDGE36Cuq334WipPkozI5CRUkKmup2oLwxF9+s2Wh+uxvnJiAucXAIxNO7q3H1fbcKAaHl4p78bbhx/UJ8b/UzstaDhOPR9nz8/Ve+KOewcEct7k7GfGgxMSTECWcjIE10rZt9G14F/fodcN93d+ywvjPO+CDvsZAFM4lhJhm8cUHfc5O3851yvodajr778qyE3fpOMp1WT45t6o7FDTs4jtrj4Bvt/s/B6MF05zFS/2ZrrP9HROU1pJ7+2XlQz6+r6SrPUOP84Wy6Kvdauv7yg8lrPTRPO+TMIV0LeEghSUh+fqUAN4IRBbcKTBRkaTzJgqYRaAXeG+DjyQaSDBOvoMmT1Xy0/AAAZ8322/l6Oq8IICNY5FqNZzvqzCmi4sMagUsaI3BpwwZcyvDPuZgHr7pwXFobhktrnIvPvOr/9DJYRyUulzVG4o6d22WBMt1uhoePuW5Ydt/7+0L7zI7Xe/MZeYDd9HPwZ+/zDEw3+TPOXMzbi/O/B0bXk7HfA082eFnMV8sJLCMwPrAM5kVwzjUey6vC8daNb8O9GQ8jrbQE92Y9jHdv8g754tken4++Gs9tWY7ktAJxueIWunRxonsTXZr8QF/7l6FXb32HzffG1M9u68sgAZmIiMOe2+7G5NY07L3zPkyGb8NkdCKm6vqx9zcLsa9zQshFf1ETdl13gztD337Nt5G5KUUWrnOtB9d2sF4s36uDfp9Mf2j9TLoXp3UL1g5aUnbuLJdd8jZs2CAz/xwv9M8eO3jvH39UNljo11V9O1QZHcc01HgNtVwNNf7kxo24UFuLi21tuNjTI9epxEScTEjAxYkJWaNB4L+fhwRed51rYeq6/npUpKbKBh20dtDKYVs4lHBwvOTlL1fLZ2j/2fF6T13eMyTBW778BVSV5+DscS7S1hlHggEPAJsfeZ1hV0DhgGnRceIcAMAffxsgmHuVd0IfoDDAxQEhCiTEgmDKJeAjoDgy3YljnevwSvYH8d+pl5oTq52dhnTR7yspV6A99W4BeX2dZQKSAwiIZbEQkOPURUiGlq1xVj1ta0YgEApsm7TF6j8DYnWWNrD9/r7SfBWE2eks3w+IPSA8iGOHejC5rwlFfQl4tvJhfDbxSlxu7apHywcXmL8/5n34zqavYXHck0hKCUPBznjUVmQa96q+KkyMNkqf7Z9oxcRYgyzC3jfejIP7OkAXJG7dSzLIi58JT5rnqd4nDvcLGGdfvyEv1u+wqevJgz3SjlOHenDmaD/O0c2Kp5cf6cN5blhAgnG0332PTd/re24+h5lkZFDafWSqHceaF+JUwbcwnv9z1O14AYXZMSjOixU3ttb6neAakqHeSjR0lZhF6PXheK01IOpm9RtZA2IWmH/5gdvx4y2r8bPMGNyw9jk5vPE/Fj8hhxbeHLFEQp4oz/NSbolaJqfFq/XDT2iCWkBIQOY6B8T/ngvx8N5xmyx7765jVdTvmvM9l3feijPfI/a5098si2Qj4DvpjBmOddD+/lCOJLG1sRgbwkPF5Z+nptOCrOPha42XKqehjp/622Dra5zK+kNb16/3Wrp2Xirrz8OW0XuxgNiCmjBXaMv7Cwumpw3TtNn0NX2uUHUpY//NpmOXbcv772fTpxsB/Zzpj8y9/qurO4SEBAIVD8DMDiYVJCpwtJ8D7z3Q5I+3QVvgvQ2YzL2CPS8PM/N9QcDjM+21jgUkUgaWvypYi7/PWIF/yF6Nj+auxcd2hpgrbx0+9sdeO9cZ3dwQfMy+mOcfmxfld4ZInf66eJ0hTc2RuKwhAj/O2CLbqhIU0zWHbjds48y+8PogWJoSkMA0P3i18/A+b9XxPjNN09DW0/tgaYzTy/tsvXypG6jngWfV89I9PS+OdWU8LUW9u6ZxXcZNMuv46S3/jO/Efx9viTHrPeh3/c6od+Ha6O9j5dZIpGeUyrkdJHrcqpYA315z45Vl19GUG1hHO93UxfSfISCTm1MwGRKDvb9+HnvvfhD7tmVgcm009mVXYGL+cuzrO2AOu6zuxq6bfyzWDy5C7/nSvyEzMkHWgHR1TYg1TAmI12eB/aDlap94cv7PyOjxvaLrFccAul41NTUJSLbHkWBjiI4/Gtry9j11VcbOh3Eab8vznnIaqozq+p8pN/XDH+LYypU4ERaGM9nZOLF+PU4nJ+NkdDROxsaK1YIzcId6ejD+gx+4BK/n2mtRmZiInp4ecU3lD6Rt4bDLsu/t+klFnX9ax2Ah9TWPuro6s8VxxnYcP9TnLiAXIO782LtuR/LsERQbBLjAQYEEAQKBhGXRcIG0ymiaCyYIMpzZTlfGA3xCQI4N4uiBbkwNV+FY+R1CPuh2ZRb2moPh+Hwq6f3YmbQclSWp4hJzYNK/EF0Jkue2wfoSIAlgcuvhASlto4Iol6AJYFJy4dSXgEn6wOhLP2ieTns1HwVo2ldSjhA4py7OvYAxqZ8BY+wPQwAGhHiM7q1FSV8inq14GF9I/iyu2Hi5u7GF2VL3TXh/9PtwzcZ/xRPb7kNiShjyd8ShU3xBCAAAIABJREFUqizNHMbXWykLrA9Otot1g+5bvI4d7MHRAz0SR7cjEg6WbfrK1OWNeE/ycO7EEM4eGxByIeGxQSEVpw/3Stq544PgPdPYhrNH+h3iMeCSEaadPTog8eec82RIVk4f7sE5R0/br5/JiUPdODqYhNHuHehoKUBzfS5IOrpai8QiRwvJvvEWTO1pQfuQfxveORahu1YL40JF68ZtSeHiXnVHepQc6MiT4G9Pi8TPsjaKZYSL0CnDU9RvT40QS4gSD2MFqQItKWoNmbEGxPGKmM0FSwC/+13Wxebmeyz94n6/dTxwZJRoBKQ7ZMImGY6c+32SccUbJ/T7It9HJ02/K5p28sgAGmrNwnTiTK7J44YdwcZPHTN1jPQ/a7x/vOWzygYLVY9ptqzeS2SQPGw91VVZOwxWJuNmEBAt0K/AgrQwf8b6rDoqp8/+UOX9oV9utmfqMU3/bLm5ylZ5O9R87Dzse+ZHNwPu27x69WpERMSgqWlAZlgNiAkEVV6cATIG3AaCn0DAZgMx1fHLa7wHTA1w8uK1XD+YtgmREhAuGP5tW7UL5mmx+FTCUnw79DlcF7UEN2xbhZsS1uKm7SG4OXHdX/xiXb4ftxqfy1jjkiYSkFtSYuQgubq6fjl3gjPyfywB0f6Z6zPx92mwvqe+nUewezvO5OF9zloP/Rz9gNjUwZbXGX19B/TZyGhZJtQ4YwVjP+1or8bH468Uf+s3R1+Oy6K9PfXfHvlOfD/yhwjduhUZGWViWWhv3yt97He58urr1cNum1eP4O+51o+bPOzbWYOp0jZMknisjsR0xx6xgkyQhORUYWrCbN870DqKodvudmfohz7zT8gM3YySkhZZE0QLn0dAtF4mNPXV/gjsT7stWm9ty9DQQWzbZtaE5eTkyJgQbOyYa/xReR1/9FnHG+r+MfrMR3UZ2rrB8tp/1104FReHA/fdhzNpaTi+ahUuNDbiXFERjq1dK6SCP3xHhoaw5/bbxQWLbm593/gGKmNjxRrMtXEkKSQgdtn+e62L1tEf+uX9z9TnmpQXXngBWzZH4+C+LteH2swi+mYaHZBh0gyYcMGzM/uozwwJPhRUC/nw6Rtw4gB0BSEOARCwIcBbiYLJj7OZ3MmJZzSMdSTjYpbZflctH1zky/uB+H9HZkq0uBNxzQJ3MOKsPQGiWycFOU49pc7SDq2ThqYOdhtkNtyqn6ZpvW0AJHECjEx/6rP0D9vt1MN91r4IElKGbVDXH/bFnskGVPSniKvVl1I+j3dtfkcA8eCBpu+Jfhf+ZeNn8dS2+7ExaTlysjaLdai5fidoIRrfVY/9e9twdH+37BhGYsMypByxcgzIrmMm3rgfBdTXaZ/dbrdd2g7n8/f6yCF/Trqdnyujn5HV11KG9Wx/bqrHcKorT06+n+rKl5Ps6VZ1aFcFJtqyMVqbiEPDVTgyWovhqjhMtufg5HQHxuqTcXyiWRajjzUkY09jGqa78zFWn4JTBzplYfrpg13Y15mL0Zrtkp9YS9gGeXfNwn++a3RT46GGo45r2tiuOkyONYHugDxn5NiBXulvHkR4rR5EKOeAzE5AuPuV7oBFwqDrQdxQCYrjYqVuVmo5ETmHbGiakhHJL9gaEPHgmMMFy2m7Oy5Yn3fAZ+p+/33k3Pf5yxjijh+edUPJBPPk5+t+1m6+lizT9XvtyHKCpbggQ9Yc0+2frq4cE3Xc1PFRn/3ju463fjnNQ/U0VDkN7fz8MnzWP5WfLaQc04L9BdMRFyy/ggr6G6Xxduaqa4d2Y1THDu1Kqp7GzVamxmvZtp7q2mXY97ZuML3Xo09/Z+7bzL3pY2K2ygnIs4NdD9jYoOZPu587LxsoBc/f0w8gIK0OAWmKFJepzyYsw03hi/GfcSG4MyNadq7gbMXP82NxT36shLwPfm1zZejnOZtM8HjmaevY9yx7m8yU3Jm1Ef+2M9wlIG9qiMBNSdGIi8uRw+R48N1rExCvL+y+0j5UMEzwz3u9jGzgc2CagmvKEOgaWQ3tOKNn5L08vLwZp+Rjpqzm7enbsnpvylUZu25m/cfI2AksqliDt29ywIBucxk1D2+NfBu+u/5mrImOkZ2uuLNUe/se2VVKF/rzPdK6KzHSepvQLjNYPbz0gLpOnsd+ruOZOIf946fFLUvCkRMSz3L5GQ/0H8Dgr56Ugy65RoFWkB1LQuS8D3XH89YDad9qmQy9ey2fn7G2Se9N2sti8cnOLsLKlauwbdu2ANcrHU90vNGxRp/9oX+ssZ9fS9dfVjDdufI4+v/Iew/wPI7zWlh2XK6TazvxTftvchPJvcSWLMtdtrol2SqWrd6b1ShZvbFIFHsnAQIgCYK9gQQIggBBEI0AUUj0RgIgwd4pFhWqWUo5/3Pe2bM73+IDRPne2IqD51nM7Mz7zszOzM53zr5ThgzBO52dODRoEE4WFOD1nBycSE/HsYkT8eaGDSEBObp9O3bcc48jeKefjs7vfhdlGRloamqyZ+eajzgBieere5aRf6qH4DYMk5zifZcffsaMGYPx48diR0+dm+qg6Un+D33w9d7AgQcYBAJCa4AHRgwY+l8kQwtKBOipJwBJgGHAwssrDmAIvgnuDu9rxnYCutJH8a9L/yI8AI7nLby7+BMomP0QVudmob4m377q87wQgmfqO/BCchEA4AA4GoAK/D4Ics8osOy++BsYiulF4NsRFwNKBo5cXuGzKA+rq+BLroEklScCV65+Ei0eOseDFo+8lllGPL4y7/P4HxmfxEc4zgQXF5uTeHw/4ywMyrwdMxaMRl7uLDt8j4fqtTcVg6B4745NBoRp7RDxOHm8G28cF9npMgsCv/pb/an8dM3SEwBCgcZ4vbIPWFhQd9Z/+KzuXkCR964PBAA11s/CvIJ+ov5iegY4fQDajY7VKegoSEVV2mPoWjcTdbOfRcuKcaid/Sza86ehdvZz2LI2A+UTf4OKqQ9iR+1SrB1xM9pWTUFX8Uw0Z49F/fwXTKdw2HXYUbvMwrZXL0bFlAfRUZCC3Q0rrW6sLMHzs45Yj7QWkdAd2tNk15H9LTZ9jVPVaEWiDOu8ddsGXFw/L9h6uP9teEPCYLtVkXyU40n5A+JhlgxbWO7WeJiOEQ5aOZylQySEskZogjR+XwLi2s/VvXuXvf7Pdg/GBDceqI1dv9d7ltiGgb70AjIhWZdO9N70ee/C/uf3B0dISPoWLsg0jDlr1qxwe15/TPT9A42dGmclH78fSFc6cqVLV2EDuZKLu9Lx805YAyIFCvpCUpRLOf7xXn/yS0aun47vl57vUseXURpxlzoMk6t46g6k7+clfzyd/vQpz/3s582bZx0kI2M22tv3hgtdEwFt9LXVgRoHenyZCOREgCgCbw5oEgA5OT8951e6UbwDrj6Ikr5kkhKQOhKQFHxz4ShcM3MMbl6RhrtLFtmBQQ/U5OKB2pV/5CsX929YgXvKl+DckpnhQOgIyHRkZeXYomgRENWhnjm6j8iHX0eOHET1S71Tv6I0T12nv/QT04rK7csnygycZ19Zpsk1QPWbt+PKFddGu814wOB/pn4a3536I/xm5qOYm5eLmpotdi4HyYcWm/fNt29eft9LlI9kE+s+Ck+Ud8/PsouAsK07ho3Dti9/JbSCrB30FFatqkJtbY8tjucUseR1GNVnYv5RuJ8/132UlNRh/HhOwRxnu0RxjOCfxg5/LNFY1J8rWV/fl002hmlM8nV9v/Qlp3vfpfw7mzfj3999F+9s2YJ/PXQI//b663iroQFvt7Tg34P1G1xEfmz/fux8/HFnATnjDHR94xsonzLFdvzas2ePLbx/PwKivOPl5D3j5EouXnbesyyZmZlmBWmoKw7AQuIPtwGIAAwa8DZgEAEIAwZevAGEABAKNDgQIoIRgBID3VFeIh8OUATh/pf1AIgYcDvUbl/suxtzcDT/Ivz7ko+Fp03vnf9lLJ83BmVrF9v2qAd21SeeAxKWVUDJ5eUIgkhAYhkTntHKnajr15Fk5Rr4Dp41IiEByQgImmQd2XFgibK8BGi5zuLg/kZ0bF+HZY3Tce+aG/DFuf/sdrXyxhdaWv867XP4fsa3cV/mLZi9cDxW5WSipGgRNm7IM+KxbUulnWlxZH+zrVmIiEc0tcq1Q0QSVB61j8rqP7t0LMyIh9e+9jyqa699g/4UT0f3yof5h2EB+eG94uP+fW0FKJt4LypTBtni8tKxd6F15US05U3CkW3rsSH9cbQsH4eSsXdi/dSHsLspD/nPXmXkYk9zPrrWzULz8rFoWTEWm+YPQ8WUB1C/4EU0LR2F+oUvYkvxTGxem2HTs6xcHqGi5Yh1SrJs18udeDWwKonEUYZkpK23Ghc3zAs//CWcA7J+GR5p4jkgJbablSMUjkzYeg0SDyMWzqX/g15KU+nYQYRta6ODCIMNcvqdghW0s9qHfZlkUaRCY4bi1YauztQf2M+id8Jkw3Qloz4T9QO1ud8HXPrB+xmMGcqT6W7vrjWLLy2/PI+Oa+38sdL3a8yUq7HUH19N2RtzfX3KJdOV/kC6p6Infbl++eQPp2D5mSqSbrKMlKDcZLrSk+unKX9cX+Fyk+lKJ+5Kx3fj+nEd/97XS+anLMO5fz6/go4cOdJ2w+ns3B+QkOQAxoGcRHBFYMTwUwFAPhii/1R1kqXfh4AEu0uRgJy5ZAyunTvBrBf3VWbj3vIldoLpb9vW2sE/Rkaqc/DQplVGTngaqeLkMo5kgXM/mQZPPR20KR+/bXVpSK5ft3Ut7i1bYmmEMq1r7bCjB+pW4rz1c4KF86k4FQLi15fAqOov7sbrOfHetZ/SSIxTuye2cf8AXPJ9Xddmp5KXdKM8XZ+K7uNlZLxr/zeQ31iFz8/9Uvg1Ul8ledbHn0//C/x9+j/g8qxfIq1gITbWc4H/sXCb3cR8+s9P+avO5Co80e2bjuTlUp5+7t7FMz7aU+dh6zf+JVynsP7GO+0gwg0bHGGiHJ830k/Mg+HqA1FZ4jLvoKlpO3ggKc8E4kF7XBPGcUDjQdzvjx3x8Uey/rgjP+Mo719+Wr5fOnKlKxmloXu6+kvmlxwXkBP0nzh6FLuGDzcLU+8ZZ9ip8xUjR9oWvDt37rRF6P7Ww9IfyGX+yjsul6y8lCHJKSwsxEsvvYSVOQsjgKcfbgPI/DEPLg8EhgDCvvwSHAQ/+gYcpBOABkuvH7DoyRsQ75OHAzMED7wI4Ajm+HV5R3cVeqvG4c0V/2QEhDtgbZr7S6zMTreT0LktKq0lydYthOUXAPZArQAL3QRyEHylt3CBrAAo+TqRnrMSRGDM1ZHT94CVPbMsCbI8RGdPHDrQiPbeYsyvn4SbV1+Nr83/Ij6eHk3p5PjyZ2l/hr9K+yzOyfgWHp59B1IWDEfO8hlYt2YhatbnorWhyLYk3rt9o32RP3aw3eqFQFig2IBfQNAcqHOEjG3Le7VBZH3g1CMfGHrPFwBKpWNpWxsm1qnSjddfVEc+mI3Sj+KjfqX+QwDMBeVFw29Ay/KxqJn1NGpnPY3tNUuwacEL6CqZhY1zBmNLUQZqZj5p/l2bcswC0p43BZvXTEfbykloWDwCrbkTzPpBAlIz8ymbskXyQhLCtF473B5+5bcyxXbJYt3qUv3RZVgCAbHF3oEFZN4Y3LxsOu6tWGK/9Y92FLttdTeX/qe7j3auwyMta3BjzVJ3EnqAYfolINZf9L577aN+EfQn175+vNNx/cNNYYveE9emjsh4fUzvqpdm+H7aOKRyRONOQponevDa0S2oq16DSRPH2zbk3Bad42OyMZNhGjvjMhpvKdOfP66jPEzB++eHyz+Q66madyDZpNvwDqTw3z2OP4qcnzdz5kwDJYsW5SScEZIIPEU0CGwcuIkAUUQmHPB0oFLxcuPgyAdNTiZZun3DmE4fAmILuNyuV2cuHYvr5k/GXUXzcHfpIlw0/DFcOe1F8IsDTzu95KXH8cuM0bhr7Xz8KnMsHm1bi8c61oUX5e4snINrsyaAC8suGvpbXJ0+Er/OGocHqnNsYDL5znWWJrfbu3/DchfeWeLS6VyHS0c+hcvHP4/HbFArsfhHO9ZhUONqXLBhfmABcQTkyoXJLSCqu6iuXH2oLqP4xHDF+26UBtsnkvfToP+/wsV1ETt2vo4x5dPxsQAgcA72/0j7FP4u7f/DN9POxq9n3IwX5k3Agrx8lFU128F/7nTz9z/o0a+3/upKMqpXV4+sV1eHjFfdSlYuw2mF4Q5crcvXYeuZwVkVZ5yBTZdcbutV1q9vR3v7gXA6ntrFT8PPS+F+eRlGPX5cyMx0Fk/uyMRD/P6Uxz/+IJFgcbrp7tRUbPva18KF/hueeAI1GzbYBxhu10u5/n7A/l/VEdPnye0kf1OnTAQXazoQFYBj7wffAUgv3Itz4EJANA4IfQDhARAPRPj6Bkj0NdQDvQJvBG729fhwO2jd6G5di31Fd+Dflnwcry7+3yhd/AzWFsxH86ZCcHtYEhV+hdZXfuq+7oFCpvfGiejLv/JxLp8lKrMDyw6Iv358iwHy10904eSJLrx2fDNeP7EFJ3kQmvldmv6zOX+UHu/9/PRsPOX92OE2HNzXgJquXGTUjsI1Ky/D32Z+LnFXKyMeH8XfpH0OP5jxbTySeadNtcrNmemIRyWJx1o7LG9Pb52dWaFdrOLEg+XwwZqVTW0sMOm1SxgftKOAYERIAlCpNEIy1/f5TcfLw9VT1I+Ul8Bq6Bro9MFmAFwDMNpdkmnneXBtB9eEvHqwxdZ5bC5Ks3CuCeE0qn1thbbug1aNoztqbDrW5qJ0dJVkYufG5Xh5+wbsbV2NrRXzbK3I1vXz0JY3BTvqsm0Klp6drt+eyfxG5pIRkADo8yDCn84ZjRuWTMNd6xbg/tpcPNRUgIdb1oCnmz/SSld+3dPVpbhkrsL60WtZg0HNhXhw0ypcu34RPl09PfwgORABsffC+kE0fc7qwmt7faCwtmVbB5e1pe6DvqT2V7tH9yLEGmtcX1J/kHxUHo1XTk/tw/UgeTmL7CP34sWLwfH2/9WY+mFLxwgIB/r4j4lYDAvMP999v4eI6+pe6Qyk78vKP1Defrkln8xNlmf8uX29gfLkl8KtW7ciNTXVtuRctGiFbQMbB04EWf4lsJNMzsWJsPhgNwqLQNvAIC3KR1/KXTn6EBBZQGpScNaycbh+4RTcVTzfLB/nP/8QzvjJ9/Boe7GtB/nyZT/Fr7PGm4XkmpmjjVRwe73r5k/CL2eMwqCGfNycPR3XzZ0I7uf93XtuAHe+OPfxe/CrWWPwYE2u6d+waCoG1efjiilD8dOn7sOgTatM7vr5k4z8XD9/sun+auYY3Fk010y7j3WW4OHmQlxQvSBcOE8LSH8EJHp+V3e8VztEcYl1o/BTBaeuDR1opp91y+k6BMgfxotTknbuPImercdwafaV+J8Zn8Y/ZvwTfpRxHq6bcTuezhyOzAXZdshgUdFG0JLQ0rIH3d085PFkOP2KzxrvvxF4j4hEX5movqO6dmF+mlEfj8ieaxPX51m3vb0n0LqhEz3f+0FoAek4+xwsWVSI0lJHmrhYXgvRXZp+espXrp7JybA8XV2HwQNIR40ajblz54brPjSO+OOO7/fHEPnjY4nkNf7oPi7v38fzlY7CJZvMlYzcuK4fTssG117sXboUvWefbWetcCH6xttvR1V5uY17PMCQi9U5DlI32Z/SlOvn6fv702eajOMBh/zYQ4tzR0tFsEYiAHIBqBBosh94AUWzAHhfs0PLRfRj7wCtm17hAIfinDUjAhYxoO9NZyGIEzAnaKY1gwD9+KE2cMcmrgVpr8vGiZxvY9eyC1C0MgMVpdloaSxGb3c19u9qtEW/2smJAJyLt7lNLOfpc2oT02O6Rk4Ci4AjJq6cLIOVNXh23pdsXYRt+6tQvT0HDbtWY0l7Cip7s7F5XymyO9LQuKvACImrMz23A14kNUzDfzabsvNyJw4fbEbPjgoUtc/H8IrH8aOl38Gfz/hUojV1+mngAYL/kP53OH/GD/HE7N9gwaIpyM+dbYvLNdWKFiBaPLgIn8+tZ9SzOVDIcqhcAWCTRcZISQSqo/bi8wTgL+wPEagMnzkAoIn15/Kz5/esZokg0lmOIhAb5WXg0vKUVYZlDp7BB7Aqe1AGlldk0qUbtGnQx8P8AxJhz2c6ERFyMpGelUX6QX6qIz2z6aiugvKp3RMtIO5Mrk9VTME3lo3BuSsm48Limbh0/TxcXr0IP69d8oe5ahbjsqoF+FFZJj5VzS143db8/RIQtb9ckWq6qvuY3/pdQp24/mT9UDoWr/cmqnOOI2pH54/6rPqx9b8wHRdv7RKUkeXat7PRtublGrjq6mqzBsfHTY2dcjUO814XdaTn+32duN/XTeZXWLL8FCdXeete47p0B7SASIjK+lNCcv0M5Kes4n3deLgvI79klJ9/L5n+XF/W11d4Mj2VmXH9/cX1qMMf38bGRvA8AH6hy80tsi+zDkwlB1sRKEsGhhQm1wdHydNzwFrgKS4TgSkBvqQEpNZZQHwC8hAJwrQX8fWrLzFSce3ciTjzpitB8sAF6j958jc2VeoHD92Gq1KHG5G4cfE0/Hr2OFzw/EN4aGMevnPntbhszDOgDInInUXz8ItJQ4yQ3JY3Cz8b+YSlw6la33/gFiM/N2VPB/Pi4UW/zBiF85653xanPba5NCIgJE21A1tA9LzOTazPxLjEOnPtkyjvExeBY6bhwkk83sbW3a+jbvthVG87gKrufajs2ov1W/Z8qK6KzbtR1rETq5s68YPFV+LKRffg/gUvYOi8dExdkI3F2WtRWFhr62nq67ebFYEnoxPI97/lbmL9qV6cG/W/vvUar+PE+6iN/L7t/CQV3Aa4rWUvui66NFwD0vW1r2PZjMUoLq63QxVptSFZ8d85+tVuLg+VUc/h7nfteh15ecUYO3acmcH5sYHve3wc8ccO+eNjSHzs0D3l/Uvhcf14npKLu5KTPuMV5svG81S5JUMLr50FUlyMXh5GePrpRkKarrwSFcXF4AnotARxbrJPQKTfX74MV14qg2STlVPpvfUWz15ZZQRk9aol0XQc/YAH4MHAVgAa3A98DDRIzoB1AEYFPOgKcND1AKQjJpR3gFzgTGSAoJkWDBIGgujD+1qwb08rdu5oQVdPE5rb6lBVW47qwvGoyBmKZatykFtchKLqDahsrEd9ezNaNjejbUszOrqasbm7Cd1bm9Db24jdOxqwf1eDWyRsOxO1Wj4iJbKaCLD7zzC1/gW8UP0oRtY+hVnNY3FdwS+MeKQ2DseDZbehbNvSiIAEAJn6ukSq+HwnDreH6ztWNs/E/WtuhtvR6tMR8QhOLyfx+N9pf4efzfwpnsl6AJmLxhrx4JoXLi7n+RI8/Z3bvHIBNMkWCRannvjP4T+LtacB76AdwrYMwJ1NwQoAf9CWPomSvk8EDOSznT3QZ4DR7wciDrIcWNpBXwmApvW7oDymb2WJQKfSF6hVvno+A55BmR2pcGVSO/jxYR/3yq1+KTn3DFEZ7dk1HS8op+UdtHWiXmL7JxAQ+1jpsAJJyKcrpuEzVan4y+o0/GVN+h/2qk7DpzdMx0eCQwh1Dkjq3EmoLFtulkUeRMl31Af9VhcJBMH1magOPCIZjBPWDjbWKC4gG3pXGBdc8TZTftauCX2E7RONOU4/Iqlq55K1ObbemAey8qOQxkS5/rhpg30Md1POH2t9v+TjaSht6fr38kuX99KXK5m4q7FfupIfkIAoESnpPpmrDBSnDHxdhUmmP1c6cTeZvJ9vMnmFJdONh1GWYfG/uJzu+SNcV1dnIIXnA6xcWYxt2457oEfAJtH1QZADtf0BMOpFcU42Ma0IrPUNl7zcPgQkOMSHBw/GCciVKcNtV6yzbr7aplyRJNyweCruKMgy4sDpU+c9+6DtUEViwelaJBAkDQ9uzMP37rsJNy9Ps6lbjL9+wWSTOfu2X4FkhVaTy8Y+g7uKF+B7v7kRj3WuszUltMKc9+wDuK8qG9++7VfJCUjd+xMQPTNd+QeqK8VFsqp3uVH9ujS1KPoN5HVtx2WNq3BRfS4u2LgC529cbtd5G7PxobnqsvHT2mU4t3IhvrpmPM7Km4LvLJ+Cs5dOwc+yM5CW46wHDQ3bsXnzYTtLg+eq0KoTWRL8uvD9zjqhOoxcycj169CF+QShP72oDd0hijt2vG7nkWy+7d6QgPR8+cvIf3GikSgSKE7T8nfCcu9R33JE4Y7gUGf9+iaMHevO+6ipqbGvTxob9O7L1fgjV+GSj7uKl+vrSZZx/p9kk7nSl65cyepecgqn64dJjqSC53wcbGjA9osvDuu3/dxzUVpQgI6ODjstncRAC9Gl67vy+/kl81Ouvz+VsaGhwTYAyJyVhkPcjlc/+N6Pv8JCNwCWuuePuvl9AMKwmJwBRaUvkBG4BDQE/QTlxw93gLtXHdrbgu072tC5tQNNXR2oat+CNc3bsLxhNzKrD2BaxUGMK96D4flbMWRFJ57L2Yohq3Zh+NrDGFl2HGPXv4bxla9hUtUrmFp1HGkbjmB2zX4srduBwoYtqGhuRV1bM1o6G9HdXY9dvZuMlBze2+SdfeGmcBHAC8S37C7C95adhYymUZjfNhnn5/wQC9umYlnHdNxR/GsUdc+DpmkJONGNnrETxw61Yv/eetT35GNW3Vhcn3cFTp/7j7ajldaN0eXuViQeZ6T/H/xi5oV4cc5jmLt4IvJXzrbF9nVVweLygHjI4kHixvz8crMMajO5iWECgGzPRFkDm2rnmBu2f4KOp98HEAogOplI34FHlU19Sv3Iyfnpev6AKBlJiJVP+i5dXyeycIR5xXV1H/blmD77r2Rirh+u+mN9s036EhB36J9ZHYLZEwT/v9/lLCrJdWnVCNKVG3x4TJD3CMg310xC+oIp2FCxIkZ0ZTMJAAAgAElEQVRAYs8e618Jzx+rG7+NVTfWdmE9i3wE9W1ji8IcWfbTCP2m37eN4u27Z3s9ZsxIdZtwNDT0Ga8HGjeTjbUaT+km++tPx9eL6/r3cTnex694vn0IiH6UKBhP3E9McsnC/Ez8NJRmXNe/93V9v/LxZRUm15dXXgqjDHXj+n6YLyu/0onrKU8u2qQlZMqUKXZaenZ2PvjlOAJWBF1xsJUIin3Zvl9pI30CtLisQJv0nEwE9BRPtw8BofmSu2DFCMj91Tn42aincUfBHHz9qkvwq8xxOPexe8wqQVLx/QdvtbUgP370bjtQiOTj0jHP2BSrHz1yB+4pXYxz7r7eDhbi6aeXjX0W59x1Ha5fOBU/fOg2XDdvki14P//5QbZQ/ew7fo3bVs2ytSNXpr6Enz59vy1iP+uWq20KWIIFJDh4aKApWKqjiEzE6y26l2xUT/G24r1/ufagHsE5v8antTbj47Wajxqc1m77k3/I/DXTcBpPtK2YiNNKxuG04rF2/V3hJAxbtNi2sa2v77VpV9rS2PUrVyfqx3JVd349Ky6qT9cXJevClV6yfuriIvlEWfZhlq2r6wjaRk8LAfLWL3wBZXcPQl7eetTWdgfP4C9Ej9KN0o7yZxjTJfmYOHEyxo8fD55K6+9CorFA775cf2zwxw3fL9m46+syLv6nsLge731d6kkmWRq+rOT8MOmTgJBcHNm50w4j5BbHXIje/fWvY92KFXYWyP79+42kxAkI01U6cpPlxXyVtykM8FtDuSNHjiA9PR0TJ45HS31JaK0w0BZYLAgkCJwcWJXrgcUEcCDAEIATAoIgHYIApSMg5kgHrRwdOLivHbt2taGtpxOVHT1Y1bQLs+sOY/KGVzBi/ZsYWvEuhpa/iyHl72FI2e8wuPRtPLfuDTxT9CqeLjiOpwuPm59hjKPMEMpTz673Av97dj+s/HcYUXESkyuPYnb1Xqyo60FpYysa2jaha0sddvZuwoFdDTaNi1YYWRNefrkFD5Xehqre5cjuTMONa67Cup6F2NC7HMNrHsMTFffi6NFWe1aCTRIBmzp2uB3799Vj8/ZSZDel47nSB/HjpefgExkfd9aOwNJB4sEdrbiw/NsZ38DNmVdj6vwXkLNsBtbkz0Nl6TI7QLCzpQTbuzdg3856cFcrWlPiU62szVT/agv7YpykHQkSPRCZoBt86XdhTs781qZRGyuerus3UZuHIFAgU3lZ32Ia7jJAajKuL8ni4vSj/hWWNUgnzDt4Dpuu4+UhfQPGDA/zY5q6j/qt0pOe3LA89ozOGmhpWb3qGYLwsM878Mw0RUA6ttfiF40L8Mna6fhETSo+UZOCj1d/yK6aFJxTNBVzstNtJzWeb8KPA+zTUf2o7hIJgsVbW7j+4eSD9gv6oKtTFx/6wzivrb10VNdqf7VzQvrBeOX6Ut/y0SpYUZaHMWNG226Ar776ajjGa6z1x1nfz3iNsf4YrDBfVn65kh/IpWz8byB5xcV1TlOBJCBXgrznn+9Kpj83Lu+nRZ3+8lQe76f/QfNVesn0/LJILl7eZHp+GEkIv5ROnjzZSMjKlWvR2/v+lhCCMQfeIpAVfe1VXOTGQZ3uCZ7ioMqFCfw5tw8BMZDcl4D8Zv0yXJ0xCveULcZNS1PtBNNrZowyK8ZtqzLxi8lDcEfhXFwxdRjuXDMPPC+EO2jduDQFV057wdZymNVj0RTcuGSakYmbstNsQTotH7fnZ4J5XDX9JXC617VzJuJXs8fZNK1r50yArRmpW2npc0etBAISfHEZiICoXuSqjv06UZzvxuswiovaR2GU5RQfHnqX2twYEJAUfGr9VHymZBI+WzoFny2bir8s5zUNf1nxIbhYDpapZDL+sngiPlY2CadVTsbniifjtxnpWLJkDaqqOm3bXVo/XH0kf/aoHhL7Z1SHcb34PfukpkRFREDpRm6inuvDb9iaq5alRdj2hS8YQCZQrrviGuQsWxs+A7cOpnz8OaIysuyuDGzLDRvakJKSZmdPcMcrLf7TeKDxwR8zNA4ozJeVnzK+rmTfT1d6cV1f3/crP9/186CsL8843UuH9yRdR19+GTvuvjskeNs+/3mUzZwJWiO0FS/Xi1A+2Z/y7c9Vvozv70/lYz7Lly+3aVg8rIsAVl8kQ4Dhg7UAqDFOXzgpL38E6lyY0iK4MJ0AgNm6hyO0BLRj7552dGztREnbDiyqP4jpNccxtvIkhlX8DkMr3jPCYS79FSQPDHs3ICHv4PmSt/DcupN4rvgknl93EoNL3sSQgICQsFDeXSQjTjcKC+LK38ML5e9g7PpXkFp1CPNqdmFNfQcaWzeiZ0ttwnqKo4dasaxtOnr2VaB+Zz7G1D2D1VuyUNg1FxmNo1DUPR+vHO0ItmPtsKlQO3ZvQM2WHKTXjsRN+Vfh6wu+hE9l/I8+06w+mfZJ/G36X+P7M87CA7Nvw5QFw7AiO90W11eVLTfisbm1FDxRm4cy8mA7kSNZPFTPDmh7RNHaIPGe7UU5u0QMgrYSwBPwNpdAMHaFbd9nipQPNqnn7l3fifqPyqB8/L4Vgs2w/wR9LaH/Kd0A5Af5ROklAlz1yajcQdkCQuLydNN2EmQM1AZ5sN6C+rL+b+9IImC2OlVdhuV3BIRnuvTu2oSXWnNxdc0cXFqWgQvWpuCnBVNw7upJwTUZ567+Y1wu/58UTMb5a6bh3oI0rCycb7upsc+xv4mAqC1Z175fde/CVWdRnapuwvZN6HtO3vqfCKQRELVzRJ6Vb5hO0DfDNjE9r/1VzhPddv5Reto0TJgwwazPGjc5Zmrs1BirMN1TVvIaS/37ZOOudCXv38ufTC+et2Tjbly3jwVEChLk/akmLl3pSE9p6d6Xi/t92bg/Lhu/V/pxPYXH5eP3kovrS06NJ1fhdPnVsKqqChMn8ryA8cjPL7VpLD7Y6e/rcLJw/6uy74+AmQNwifcRkEumk5SAJLGAcDtdA/4dbscq7b3NrXe5KJ3b4Ml1O1uVRFvytkZxv21dAy4gt0OEusrw25Y1ttsVd7V6gvetRbbLle201bLG/NreV3k+vrnk/4qAJKsH1dlAcZJJdCMwzHYVAUlpajAC8pHaFHw1ZyIumDkCF88bh8uzp+IXudNxRV46rszP+HBceWn4RU4qLl86BX9TMhWn1UzDZ9dNxp0TxmJGsIairW0vCN4T+27UtxLrpL/wqK4oP1BayeKShTEd9mFOkyLxay1vQc/Z54QEpPmnF2DlzCUoK2tBe/t+s05p+thAZWZejY29SE1Ntzm3CxcuTEo+ND7w/dfljwH09/fny52qrp+e9E9V1y8HdaVHV2kpnC7/dM8F5jzzaOfIkej9/OfD+t0wbFjCVrzaCUu68TyVT395K1/p6d53pdvc3GxTETgNiwu3CSQMQPDHWmAuAFwGyERILN4DsN49AYGBAC/MtwaQeOzZ3Y62ns1Y1bwb06pfwajKt4x0mIUjIB4iHLRgDCn/HYaUvoUh605iyNpXMKToGAYXvozBhYcxePVBDF61D4PzdmPIyl0YsmovhhQcxNDCIxhSdNRkh6w9jqHFr2FoyRsYUvZ2YB2JSE1ESt7FsIp3MGr9SaRUHsKKmg7UNNZhS2e1nXB9aE+jWTKOH2nD8ZfbsO/QJhw63IQjR5qx7+BGHD3caovcjxxoQfeOcpR0LMKw8kfww6Vn4+9m/3W0ja5n8fhk2ifw9+l/i4tmnGvrO9IX8NTyGSguXIAN5cvRtLEQW9rKjHjw5HIebueIh1tAz7o1AOi1nbWBgJsAs9cuCW0kAGjAPgk5UHzQnuofPsAUCHTgT0QhsR+YngH5JHmorMnyUD8S6Oc9nyUAmPKbdYJhQTxBLONUF/JbvNL05A0Y+3lIJkmYy0OkI0keKp/St3UvjoCQ6B/a14z2nipUNBRiVfkyLCmYiwV5mZi/clZw0f/HuIL88zKxePUcrF63GJtq8rF1cyUO7W2yzRtEQFSvqk+SzKgtgvbxnp9yrt7UdpF1wvS8OvNJirWb2iKJG6ar8cqTsX7ptZ+INbf0XpE9336X1qxZY9uk++Mj/br3x1GNm3Ilx3v5fXnfz3hfTvK+S3ney/Xj3s8vHbqnRECUkRT9DJIVVHJ043++bjI/5f38fP24fDxvycb143r93feXd1w+nq/yoyWkvr4+tIQsW5aP7u4jfb7A9geGfEBMUOTfRzo+sEvuHwi8cUExz3V4iiehB9aE+BQsIx+d7nAhkgd3leNJnm4a3PPEUju11NwoPEEmOAFVOnLtlFQvHYXLVbq850FECRaQU5iClYzQRfUXfbFX/SbWNevUr1cfYEfh1IkISCM+XsdFcSk4O2cKrp03EbeunInfVCzFg3UrbYewQY0F+M+4HmpYbVYkblMcpt+w2ggk7xnPbQvD+IZ8PFi70vZw/z9VGTbX9jMlU3DT6JcwdeocFBbWoLl5N7jOwtWLqw/WletXUR2onlSPrOPI78tJl3XpW+vkT5T12ypR3qXDet++/TW0b9yKzmtvDr/Sd377bBSMmIyiok1obt5jJIWyrtyJ7ajnIUFpaNiGlJR020xi6dKlBr6TjQX+OOCPAb7/g4xB1Ivr+nkoLZWFcXEdyUvG15Hf11N+cn19pUFiwUWPe1auRK9nYWq49VZUVlbaTli0Dn2QnbD8/Hy/8lRZfVdlo8t1Kdzwg7th1W4oTACyDhToy6MDDSFI88GC+R2odPHScV8rCVg4DenowXbs2t2Oys5ezN14GKM4tcqmSXmWDbNuvGNkY3DRCQzO34fBudsxZHkXhixpwZCFGzFsbiWGZZXghdnFGD67CC9lrcWIOcUYOXcdRs0rwah5peaOnFuCkfNK8NK8Mgyfvx7DF9XhxaVNeGHFZgxb2Yuhq3ZhaOFhIyZGbsreiaZ5sRyc7lXxLkZVvIo5Vb0o3tiA1tYq7Nxai4N7Gm2tCBeu8+Lp1pxi1bWjHGvb5mNM5dO4OvcS/E3m/4osHcHBgR9J+wg+kfZx/K+0v8J3M87ErZnXYNK8ocjNnoHCVXNRsW4pNlWvssMDCfy4lS4XlnP6i7+1sEiHwJoIY3gftFHfewf8wraSXEhggraMtasDnEHbEuQFAF/pONeFGyikvkc45PcBY5+yUV5Ala5XNuo7AOt9BVcZgrIa4AwAqOlaGQR4gz4cPqdXVj2PwGtQDgO/KoPFuTJYOSQb6IbPFZTJyeh5WOfR+8Cd2Ajod22tRVdbmW2b3LSpEE0bCz4UV/PGQrN60OLGHec4HZE70NF6aYTXa5uE57T6Tmwfqxcjgi5cbR7WV1CPlo7aUX0gWVsxzGv3yLoWWTts7CIhsXzZziQ/undt2N5cjjGjR9lugIcOHeoXH2vs9Md7+f2xlH7+yfX9kuMY7Y/TCk+mI31fhv5k+pax969fAhJP7A9x39+D/CHy/n+VB3+UuW0ap2ONHj0GXBPiSMg74QnbyQGRA2ECRolgzgdP8lO+L3ATAJSrdHjfxwJyCgSEZIBWDh5EyPNAfttWZKCa54AwjBYLkgRaKgi2eTKqiITICONFKuxkU5GPgKAwTvGOdJD0UMedqppAQIJ1Kx9kCpYPaFUvfhsozJdT3bo4tU1U39RPICC1bleO76ychhsWT7MthLkYnwcq0lrEdTXcfpiWncc7XV09tHFVaAGycMadwvUwSUUtiQ3dXFurQ+uSdB+qy7O2oPXpvvXLjAxxm2Tmy3ZimbhT2T/V8GT5VHymdApuGkUCkoWCguoEAqJ6cP0oev7EulKfpEuZSM6v5746Ti6qfxESpRO5SocuSQMtNJtb96L9iSHhid3dX/kKih97HqvzN4AL0XlgIaeSRdOwVE6XL3cwo+Vj1qx5wZk+i2y73WQD56mMD38K4xefk1OeTp48iX1tbej98pdDgtd+8cUoLynBli1bcPTo0YSdsE6lfuIyH6S+uDaFJwOTgMzJyrBdkxxwi4CWgJMAg+7dj3qSr5oBeJDVgwD9wL52tG7twvKm/Rhb9UYI7rVOY0jZO2bdGFx4xAjH4KWtGDK/FsPmlOOFrHUYMbcYo+aXYeziSkxYVoNJKzZiSm4DpuU1I3V1K6avbsP0gg6kFXYifU2nudML2zG9oB0pq1sxbVUzpq5sxOQVm0x/3JIqjF68ASOWbMLw7Fa8kNONYav3YejaExha+gaG0uJSHhEjrhkZV3EcCyq7sGFTFTa3V2LX1hrs27UJW3esx8auPKTWDMedBdfiu4vPxKdn/EUf4vHR6R/FX6T9OU5P/0dcPPNcOzhw+sIRdnBg0er5qCrLRn1tvp3k3ttVZVO/bEcrj3iwTvtYPASuQtDm2sR9kQ5Al/d1OmzHQN7kfH8AIsPwAPBZuwfpGFgM84vy8MMtHwFS6lHeyipAGJTTQGcEHl25A8DogXsRE+uffriBzKC/qqxe2XxiJkBq5TS9xP7r8uDzBJeXj/KVa4TIIxv2vKpH6uuLvPkDMuVNQ+S6nSP7Wmzzgz29G7F7W60REpKSP/bFspD40uJGouRv5+yIZNRerh4ikufqwbWHxojIOhWRkzDM6jr6wGFtY/UepEl/0HcsvZCkKA/XVk7GS199LWzDgFhbet3goZyTJ0+wjTi6u7uNOMTH0j/U/QcZs+NlSqZ7GgPjf3FF3vuMiPK+jK/vh/fn9+WVllxfJxkQiOvq3tfz/X4aykM6vuvr0C89uf3pxvV4/+abbxoJ8Rem81wBgSgH5gSG/K/DUVhfsObiIrD2frJxcPf7ERASAm6Va4vKl6bYwvKLXvgt7lwzF9+7/2bbevfxLWW2poP33EbXrBad67zpV+UGeh/fXBrGESzzsEFOxXLyJbYTlk9WjIT0sYC43TFOlYAk1ldUJwzX5bdH1EZOVnFRuNohRkDMApKKc/KmWR3xwMaH6lfBDlFsWI0f/fZOXDbuWXtGHsLIrYx/PpEHLq7DY5xmxmlpJGrd5W7hfae7J4lgXZHwsZ5ZX1zY/4vJQ3F12ghbN/PTp+/DvWWLLR3q8zwWrt15oDYXPxv5JH4+cbDtYHZf5TIjPCwTp9n9c82sGAGZk4SA+P3MEYR4XaivJg+P6lFyPkGJwvx8In88Td6TUJBYdG05jNbJmdj6JQeSuQ5k/a33IC+7GDU1XdjceQg7tiffRpjptLbuwuzZzrzNsyZ2794dbi2r917vN+8V5o8bvl+ydCUvHbmM01/cL31fV35fNq4vPbrKR64fJ72468tQjwTEdsLavx/bfvhDNwXr9NOx+XvfQ0V2th0OyC9x2glLZRwoT+Xh560wunFd3UueMjyF3X3YGYVtXTUB+BKIDYBgABKjr9ABcDNgwB99H1y6qSb8WsovvZxuta59B1JqXsHw9e/YInBbj8GpViQea1/F4Lw9eH5hPQbPLsWw2SV4ac46jFlYjgnLqjElt95IBonFjHXdmF3Wiznrd2Je1R7M37APC2r2Y2HtQSyqO4hFGw+F18K6g+C1oPaAyVF+zvpdyCzvxcySHqQXbUZqfqsRmYnLN2LMso0YsXQTXlyxGUPXHLHpWkZEtPak4j28WP4WppXvR17VJtRvWofsujQ8Xnyvndvx97P/Jjow0JtixUNJuaj8q+lfwPUzr8CouU9h9uLxyMudhZKiRaiuWIHGOjfNyhaW73DrO7iVLtcLcNHs68eSEI8AlAlUC6DLNSDogTtHICIwZ0A6AGmWRgjUIrDsh4fpWls70uHaPQD/fl/wwHfYZ4L0qWN6wRdppmtp+0TEyh0ARvkFPINnMqBKP/MNQKURh+A+9EsmfNYAuIbP66URyLjyBX1f6cfSMWLjlUXlMJfEJHyuKH0rZ7grmrMM8h0hwOd19MCH41J5aNlzVrfEPhj2C6tD791PaAfXR3wi6+rMWZ5EXKxfqu39OlN7KQ/FWT9TmztXde5c17/UX6O2DGTVNie6sWDeTJuCyo/b2v5c46bGX3881bhJl+H682V8PY23ipe8XKWh+Li8wiWfzJWM7xoBYYD+/Ejf72cYl9U9XV+HfunJZZjkfFf+/vQVbspJ/ik+7sbzTaJqQXE93Uu/Pz2GS1YudTgdq6WlBdOnT7fDzObNW4q2tj0Gnt4PdP1nxSe1gNT2XYQen4L1RFc5vnDRj20xOs8H4da5BLA/euROOzSQX9dvWp6O//P9b+PudQtsC10eWnjNzDH2JZ5b93J7XYZx0TldXjwpnXEPN3Ah+gSTf7StyLOgJJuC9cEIiF+XcTDrxzm/wLKIysBf5BMsIMEULJ+ADAoIyBNbyuz8k3PuuR53lyy0evzqFRfZInySkWtmjbFnv7d8iZE9q5u0ETZ1i2eo8MBHnj5/47LpeLR9Lb521cW4t3wpblmRbnX9g0G346rUFy09th3rmruV3bgsFefcdb1ZZC4c8gguGvZbPNLs1uGwbB+MgCTWTUQiGO6u96/fiFgk1r3STiSFUR7SiwgQ14Fwt7nWpUXo/uG54YndDRdcgtwZi1FW2oiWNbXoad5l08k49VDrQdhuTU29SE+fZe9mZmYmdu3a1ec91vss1x8LGOb/SSbuUkd6fpx0lY4fR7+v4/v98cb3J9NPpqd8B9Ll7lYkFzzvo/fOO0MLU9e3voWqyZNtIfq+ffvsvJB3f/c7+0FkXn5+Ko/y4338TzIDudKhDMu0evVqs4JkL51rW+HaD7kBrwA8CXx5YEAyBio8kMYv9CQfLx9sR+f2zVjccBAv2KJytyjcrB6lb9n6jeezt2Bw1noMnVmIEXPWYdziKkzOqcf0gjbMXNeDrPU7jTws2ngYS+pfxrKm41je8ipyWl9HbvtJrOx4Ays730Te5reC623kbdblwhhPOcrntL2O5c2vYlnTCSxpOGpkZUH1PiMmzI8WkykrGzB+WR1G5LTjxfzdGFr8illEbFE7F7aXv4sXS19H2toWfCPr2/iztI8mWDu4fS53svpM2v/El9JPx6XB2R1ZiyYgd7lb21FJa0dNPjqb12HblspwfQnPWSAgTbajFQGbwLwAYJ+v7F77RAAsAsChvkCcB/IE2F0+HrAz4Kd+kBjOPqB8XH9wciHo89JXmMrg64V+T97S84Gp1wcTnr+Pjp5XZdZ9VNawLCJKVm+OSKt86t/Rc7n6V39XGqo3yoVhAbkK2ydI38kE6QTWEFm1Psyu6xNRud1zBW3t1aFfF2wj1VXUXolkJapb16/CfqA0vTYP01JcMOZYGr6c/OoXdIOweNuuL80zApKdnR2uAzmVcVPjp+8OpMe4/v6S6cXHfF9XafluPI1TJiC+IjNRovEMfbn+/B9E/1QeUOkly8//YZScX2b5T0XXl5WfbjJdhvGHvK2tDTxIZtSoUZgzZxFaWnbZ9q2JAMwBrMQv8gRbAl6JrsIT3QjEJUubYUkJSF3fgwjjBIRWkC9edC4ufulxnD94kH2NJ+ngTldn3niVTfEhQCaoJjnh7lg85Zzb6fJAQZ6Wft0Cd8L57auzcMHghy2OW/dePPwx+4r/3XtvxCUjnrRpXm4alltXwi/6iVOwTp2AuPpUvTg3sc6ieledKd4Hv8mANcP6EpBEC0hIQLrKcAkPXnziXlyTOdaIwo8fvcssGDcuTTXrEgkCLSR3FM6xcBIHriNhHf/gwVvB0+dpNSHx4/krPPCRU73ur15hBzkynqfP85DHy8Y9Z5aPW3IyTJc7l1H3ZyOexG9b15q15VQJSFQfqiuflCX2S9ZhsroSQVEd93XVNmorpkt/lJdLN5Jj3fOwwbbyFmy+5oYQJG/5+jeQO3oaiufkoO2Rp9G+eoNNxdKBirt3v4GNG7dgxowsm3Y1b9489Pb2JnxVir/PycYgyuhPfrpxWaXlhyfTSyanscvX9fOK+5WG70rXz1PxycIYxy9snErKdR47xowJF6H3fOUrqH3ySbPubt+2Dcf27cNbJ0+Ca0Y41qm8Sl9uf/koPu6qzH440+B9T0+PbXk+ZfIEdzK6fsRDUOW+6DpQEXxhJMCwH3e6ETgh+eAi6bZtXcjceBQvVvwu2IEq2EK36ASez96M57PKMXRmga3ZmLCsFin5rZhV0oO5lbvNorG04RiWt7yCnDYSDZKMt7Gq63fI734Pq7vfQ0HPeyjY+q92FW79N/R3SYby1Mvvehf5Xb/Dqi1vG3lh+iQ1zI9WlLlVuzGrZKtZXSbm1GM0iUjBgcgiUh5sA1x0AucveBIfIQGZfpoRkb9I+xT+Ie3vcd6M7+Oe2Tdg9LynsXTZdBTkzbGzO2orc9FSv8bm/XM3K05x4dkj/OKsL80ComGdBm0QgVsHqAysBfVvwDYAvRGIC0CdtZNrHycXADKvjQUQCfKob0BN8Ql5BHGUCcvFtB2wVBkNLAZfmkMgqntN4wruBSx9cKj89YzxZwr7YVhWB3aZf1SuwC9ybPl6X+X1fIrnMwTPRMCq8vjpKZ5xYbmDumCclYvpBmklhCntwLU0gnz+K/gNxHv1q2d09RP1AV8urA/rV65OTV51HrpRfbo6Yx1G9RjWT9B3eG9tpD4f9CXpqh+qjGFa1uYuL7bf5tYKIyAZGRn2ISY+Ruperj/mxv0cRyWnMda/l7zGXMr4funI7U/XT6c//VMmIANlogL2lwnjk+n7er6uL5tMl2GnqivZuLwqR+F04/n6YZJXOeP31E2mzx/n9vZ2zJgxw0gIp3xs2tRt4FUA2bkRmIuDuAgERgAtCnNfjQXq/PAozOn9PgREazNIQH6ZMRLn3HMDbs2bZesZOAWIBOK8Zx4wUM2DA7kt74VDHwYBNs8H+fXcCXagIbfc5enoBMU/nzAYPOiQX/AveH4QeKghp3XxsMMHanJCC4imJH1QAqI6oBv5o7pjvSg8Xke6T3QTdZ1+jIAEa0CSWkC6yoxcXJnyotXVRS8+hktHP2VE44ppL+Ds23+Nq1KG45czRuO6eRNtihXDblg41SwfPGGbCdYAACAASURBVDWe1pHzn3sQnEbF6V08df6sm662s1N+8tR9dvbKhUMfMesHSR/PXqHc+c8/ZHV6+bjnjMTQmsXpXqdOQCKrQ2KdRP01MTxZXfUnO1B433RU7+zHJCB2InrzbrQ98gy2ffGLIVBee/cg1Nx8FzaffxE2LStCW9s+23iBJKS2dktg+RiFrKwsm9ZD8Kz3mu+8/94nu/fffenJTTYGxNNLpp8sn4H0GKc/ycXz9sckX1Z5xcOkTwJCCy4Xou/KycG2L33JCB6nuDXcdBMqiorQWVGBfUuW4MT+/Tbl1CchSl9l1L2fH/0KT+aqLNKRSysIvwJyLUhe7iLbItf/ehv+kHsgSj/2PsjignN+we/d2YEZdcfN8kGLh+1kVfoWns/fj+fmbMCQmWts4fiE7BqzOmSW9mJ+9T4srT/qLBztb2DV5reNKKwm0ej5VxRu+zcUbvv38Frd/S5WtL6GNb3/EV4kKbSU0OLB8Pwt7yC76TiWNR5DbscbIIExYtP8CqjPdB0p+Z3ll9vxJla0kowcxfya/Zhdth3TCzswYWULRub1YNja4zZt7PnSt/Hsmtdww7J8fDbjn/GZ6X+Fr6d+Cdem/xyDZz+MmQtH205WawsWQNaO9qZibN28Hrt7a21R78u2m5WzdiQ7sTwOonRPAMc6173zO7BmcQFIVLuYG3w1Fmh2YS4NP70Q6P0XAsb/WWW2eg3qWnWpOjcAbODXs66EJMyzvATkT/Vt75HVrWvDKN3AmiDiIqCvdla4lUd5Jur47WiAOyy7AHdEHhPebckpTwPpjkypfBGRcKTOv9eziRTonq7K5J5bBE1pBKTFy1/5sZ5V10nTo47q0aubKL+IvChNpROme6IHu7ZtsoXoPJ+K6/M0PsbHToXHx1eNn768ZBWm+2SylImnKb1krtLwXcopD+kkrAFR4Pu5fqIqlMKky4zimSmOrv/nh9M/kJ4vq7z9sIF0k8mrHEqjP31fzvdLj25/uvxB51fW+fPn21fXadPSUFHRAH6NFdmIALGzfDDcXQ6kKT4eLvDHeD8tyQu08b4PAbGTRAe2gJCAPNxYgC/97Cf2dZ5E4twn7sVteZm4ZOSTuL0gC9+45lIjFt++9Rpbe8CDCy9+8VHcnJ1m1o3zn3/QyMsPHrjVDiG8mAB8zDOmw7RoNWG6JDJ3Fs2zBelPdHNhepJF6Ke4C1Z/dZFYX1HdRvUXAV/VdaST+EU+wQIyAAHhom8SrVtzZ9jp7rQGXTD0YSMatHLwQEedt3Lp6KeNrPzw4TtDgqI1HrQq3b56NniAI9P6/v232D2JGy0nlwx/HFelvoTLJww2KwmtKz8cdLudy3JV6nD8bBQtIEUfiIDIEuHqIPH5VS/9uWoDxbv6ZJ27Oo76qPp6VPfUcfpRmNpj7563sHvHa9jWcxStLbvRkDIHPd8606Zh9Z5+Ojq+dRa2fuGLaD/vQpQvyMPGjVvBrYUrK1swefI0ewf5Lh48eNDeWY0LcvVOx99n3cfl4uOB5JSO70o37kommW48bCDduKzSjevoXvHSo8vxioTi9ddfx566Omw7//xwHUjb+eejYsoUdNx9N7Y/8AD27dxplhKeHcK1I9RVmnTj+VhA8M+X68/vyyutHTt2YOzYsZg8aQK4Q4wDFA606EdbANbugx9++inLL/dccM6drhY1HA4PALSdropfx3NLO/BcRiFezCzC+KUbzMIwu3y7rdPIbjphU6RWbX4nJAYkG2t4BQSjYOu/GVlgOEnG0IxVuG/INKzpBYp6/8OIy9OTl+CRUZl4dOwcLKk/goW1B/DQ8AzcOGiorfngVK6bHn4BQ2fkGyFR2syHFhSXx7tYteUd5La/geXNr2BR3SFklTsiMj63BcPXHMSQkpN4du3reDC3G5fMnYLrpw/D+KnDkDV7vO1mVbZ2MXhSeWvDWnR3VNgp0vt3bsLhfc12Noh2s2Kdse78i0CJQEpginVubeGFCUyFX4EF4pK4vq7StDzYfl7bkTzyogXrv/PFOkhsl4DwJalb1V9Yx8FXe70vYT2HbSfywDR9YsD25n3fvCxtkQI/3ms/9Qfp+/cqmwvz84+ToIh0KB2920wj4ZkCQuuez1kjRGqoK1nn94hSUGYrS0B4QnLCZ9OYYs8ZvBcB0VOZLM+gHlz+sefw8w/erdACEraD0+HW49OmTjIrCKfG9jdeahzXWJls/OxPV+FxHd0r/v3cD5K37YKlDKSoDPQwupfry0tHYZKh+0H1fd3+/H4+8qsMA+XJuIH+/PySlftUdf105Ff5eIIwt/nkj+eECZOwbl0tentPhMTBB2ry+64DydFX4wjARWGRPEFdYngfAnIKu2BxOhR3c+IBg3cWzrGzPDilitN6uG6DO1/xAEJ+UecXfB5KyN2ebNrVvIkWf0vuDJO9cfE0A8zXL5iCGxZNw/0bVtiidS5y55f/GxZNteldJB/MV1OxEiwgJE11qTjVRehRfSTWhR8er1cXF4FtNxXI13dxp0pAuHsY13Xw8MWbl6fblrg8Df7GJal2psr1CyZb/H1V2Wa1sPUeS1JMlhYjrqVhndIiQmsRF/pfv2gq7lidZWnSEsUtf9kmN2VPB4kHSSDJCtfZ8GR6xv96zgQ83FT4AQmI/9z0kxBEpMCvR9Wb70YkQjpyRTCUpsvHycfzjOKsD7fuxY7cMvRkF6N1+TrUZixC13e+ZyCZp3brarz4MuTPXo6Skgbk5q7DxIlTzArJd5DvYnxM0PtKV2OAXD+uv7HAl0mmx/j+/vw8/XR8f1xX6fm6zFdXMl3p+GlJn3okELQI0QJiBGTzZmy9++7QAtL1L/+ClksuAadjbXn4YXBXFhI5fpWjTn8EJJ6fX7Zk5VW8r0c/w5nHsmXL7Md48cLZNiXIQIWBBu8Lu378BSYCEEvQenh/G4rbd2FE5dtu2hUXmpe8gWcXNuH59HyzekzJqbf1HVw8zjUdZpnY8g5o6bBpVB7pKNoOIyC0dKSt6bAF4/Q/l7Icl9/0AH5w8dVYs/0/QLnhWUW47Ib7kLF2i5EMEg8Sid8MmYpzzv85sptP2NqSK29/BOlFnTZ1q8jIC7AmyIeExKwsJCM974EWE1pFSJAW1hzArNKtmLyqAy8V7sXza1/DUwXH8OiKvXhkQQfGp2di6cJUlBYtQvOmNXZ2wp7tG3For9uyNzqtPMmC8hB4RUAvIntxACfA574COxAWAMsgHYY5YOeRl6C9DBwaUHZxBNuvHduCI0c6cPBQK/YdaMbefU3Ys7fxv9XFZ+b5LX47CbzHAbiAsgO2sXdDoNeAs2sHA8/Be2JtE8S5NhbQFgB36Vna1o5KP3CDtrN2D/OIxbEM0u3zvgb9KSA8Ava+vJVL+ajcIgxhelE6qo+QsEgmLIOeySMLklGfVZlNx8mFZUrou6onyiSma+0VpheReFfnjhipTRl2cE8zMtJTbMzjbxfHTI3bvpts3GSYxk7FS0fp+OG+rPyWQBKrdVxfcnFX6Utebh8Cogz7U2C4/+ffx3V4r4wU58v76fj5UieZntKQrPQVTvf99KQjV+VRGv3pS56udBQm3f5cX467x6xatcq2VJs4cTLy80vQ1aWzQiKg5QM7EYk4UPbDJd+fDOP7EJAAzL/fOSA6t4NToowUhOQgcfvckDR454UozHc1rUvpuTS9s0S880P+b9aAqE4GcqM6jIAx5X0gHK9TxQ9IQIqCXbDai93uVcEOV/bMfL7gnq78/cX1F0956civ+351tpTh0fZT3wVL9ZO8DhPrTPWS3E2sU8mIoMTz8evfl+VC8l0te7DtpYnYevmV6L74UnRefBm2fuWrIUgmAdl2+umovfxKLE5bgPT0uRg+fASGDx+OJUuWhJYPvcfJ3luNA3Tll5z/Pms8UJwv6/sVL13pyWW8n1cyXcrE/5RuMv1kafj6yXRJPt4+cACv9/TgWFcXdjY0oOuZZ9yJ86efDp6Izmvrl7+MpqeeQlNTk52Mzula70dAVH4/X/mTlVXyKrN/v23bNkyaNAljxozGltb14Zd3/YDbF3f9wAdfGvmD7qwfnejq7cT0mhOh9YOnkj+7bDOeT19lZ3JwgTetHos3HbHpU1yHkYx4kFTwIoGYW7UHz0xZggdfTDMCwWlWs0q34bFxc/HjS3+NooCA/Pq+Z3Dv85ONsHC73h/97FdmxXhmylL85OfX2+5Z9w6ejMzSrSZDqwkJB8mL0hDhCYnItn/D6p5/tXJwATstLyz/1ILNGF54wKwgJCGPrzyIFxbXYcniWagszUZ3ezn276q3s0J4BoqmWCV+WY/IQVi/AmWea+A3uDd/8DXcvpgH4NBAlbUL04y+QDPdkMhYewWAzUuPX/xJPnJ3VmLG1nVI6ypC6uZCpHQWYFrHf48rpaPAnrlha5Wd8cI1OdwIwG8vB7JZfw7cCxy7NgnArpEPZxH049UOUTsFxMRAfdQmFh++VwFYV3t6752lF4SHOkH7RveurK5vicRGebkyRf3DvdsRcYmTCXueoGyWZtAPXfoqa2KfVlmctYfPHBBlr/9ZOfhs9jyJ5YuIjVfn6sfeO6IyWH5e+0T5B+WL6R7a24KZM6YbAdmzZ0/CbwXHxfj4yTFTY2t8/FS4r+P7fV1/zI2HKx3flYzy9F1fTvl9qM4B8Qv4p+pnxfPHurS01E5NHzNmLObPX4aOjn22OD0OvAb+2twX1BGsJV4RSExKQMJdsMaClom7iufb7kuPcotc2/a1HFw38Me8uBUtv9xfUL0gPDzx97OARHXh15HqXGDYWTzi04IS7ymbQECCXbC+s3KqWSc4Ne2+mhwM4tkp7cVgfX4YLpaFZbq/Jgf/VB2dA3LLmJFISZlrBxG2tJzKQYSsD/Y/1wdVd/G6jOLVV6XHtnCXIx9+uPqwq/NEAvIWdu54DT11Pdjy4OPY+o1/ccSD4Pj008EpWLzoL770Crz49BA8+uijeOGFF+yrOU/41uD3pzrG/L7PxXr51/few4ncXOy9/XbsuuEGbLvnHnRffXW4DkTkruerX8WGoUPDk9FFQP5QdcutzgsKCsyitXB+Jl4+0JZAQviDLiASTrkIthXlFqIlbduDHa/es9PGn8/bjWczuMNVMabmNWJ2xQ4sbTxmi7/zuQaDi8d9i0cvrRGOFKzp/Xc77+PS6++16VZcx0F5kYbB03MjAtL7Hzj/qlvwxIT5RiZmrN2Cb37/fJs+9fSUpTjzRxfhmruewJ1PjTXLh+VB6wctHwHZsXRJSnjvrSth+TQ1K6/zLVtPMqdyF6YWbsHQda/h+XVvGBHh6euzsvNQvm4pejorbHE5p1oR4Fu9GTh1AMusEARD+hJuQFQgSWBRX3oDIBnoKy0DdUozAFZh+4TgVMBYpERpB1/dT3TbdKve/Y34SfMCfHpjenCl4dN1/82ujWlI25SD3i2VtkaH2yAntB0Br9V3YruxHUTAuf6JlkCB6dePd9khnGwXA9redLqw/RLazrW1gX+T9dtNIJxpBUA/JJ2BXhBu5NRL1wF06rtyiGywnHxGkmSVUWUXqDdZSysiECYbECKV1Z6P9ZOQhyNjlmaQBnVZT6or9y4EdfhyJ05aGgGJC4mJqwe9N8pLhMZ/j8JyB8+ve+ZrZWVbBOUkAZk10xGQU9mx8ff9DfhD6w1IQMReWCj9nWoBKS/ZD6IrWenH75WmXP7gJfvRG0hPOnE95am0lYbu6fq68jPc/5N8PD1fn3Osa2pqwF0NuEPWzJlzUFPTYQesCcyJfDiAJuuIXAfYHDhjmMBcBNqkL5mkBKTObcN75tKxNm2K53twytUjwVoBO7yOZ1H8kS6SD56Bwe17z6+ah9O4BqQ25ZSmYAn8OrIRkQ/WbwSUBXY/mEv9OAHhoX5fLpiGC5dOwc/y0vGL4ixcWbYAV61fhKs/JNdV6xfiirL5uLxoNv6mcrrV5V+VTsW9UyYhMzMbJSWNaG/fj507T4Z15OpPfSxZPUV1+0Fk/X7t99UojSgvtZk7hPCkLSpv2bAZLQ8/g65vf8dIiMCxuWecgXk/Phd33nornnvuOeTn59vheZy+E3/v9b4qPP4ux+8lLzfZOMA4/0+yyVw/X8X7uvQrvD83Lu/f+zrKS248jhaQN48dw8GsLGw/77xwYT/rVBfJXdc3voHS0aNtRyyuySAB4ZjGdPXnp02/8pTLMP0l88f1/TTo5w9xeno6xo0bg8qyVTYVS4DD/YALFAkkdNvX4oN7W5BZdyQ6wK/kDTw3rxbDZuTbAYBc0M3F3QTxnN4k4iHrQ0g8AosE46flNeH+F6bjmalL7YBBTtdyBOQ/EBEQRyQu/tWdeGzsXFsPwulaZ/7wIrO0kIB87ewf4e5nJ+JbP7jA0uSCdlk+OP3KrCFGPCJSYtOzYlOzuGidW/5yYTu3CB6zdr8tSh/M80xK38TklbUozF+AzpYSHNzdaHVHcGogSCTDA1UGpPQ12HMjkudIie7lClTJ9cPl98GaA5wBGVE+geWK4LNr9yZ8u2GO+xAVrAfkuMtpuaEbTC22sLg/rhMcbOt+V5ROSvihq99043n66Qa/UX10pePnaeXmbxrLz3yDvOOysecYvi4LjXUF2NFTjSP7m0MCYu2kr/f6Uh8QEsbt27HJpt+NHvG0nWYvee54NnvGOOzdsSnoAwGZFKCna+kEZNPPw4tz4Dkij5a+2jFwGaZwk4+FkwREMiIiPahen4MFc6ZYfzV9EQvqqzxKS88umSDexgUj01G6Lq3gXvqv9ODI/hbMmD4arCtu0qDnb2ssxqz0seC0RfVdufFyUEf9XM9EUmZ5Wt3G8lU5Y3EH9zQhbfpUs4AcOHDAhk1/zNQ4StcfNzXWxl3J+2n4evTHdfx46cv14wbyS15uSECUmZQp4PuloLCBXMkmc+N68XwZ7//593Hd+L304joMj8smu5d+3I3LJiuzZJSX0mB4XJ73vLjVJU8Vnjt3rp1HwMXpxcW12L79lSTgz4GxRNAWAbQ4aHPgOzG+DwEJBrWPVqfgW0vG2DoEriO4r2q5AX5aHbh+4Y99cVtgfrH/aUVWMFC7NSBXLJyOrKwcrF/fji1bjmDXrr6gWfUVJxxReGIdxevR3TuArTQYlkhAGvDx2uk4rSYFf74+BZ8rnYbPlU3D58p5pQRXKj5X/mG4XHn+qmwaPlY1DafVTMPflKbgyczZyM5eh+rqLVaXPDfDf173zJHFw68X509ej0pDbnLZvgRG8nJd/u4QQpaNW/C2tx9A3dpNqH9iCLZ8+zsJYHnrGWcg49xz8cD99yM3N9dOOOdC6TgBib+b/nusd9h3FS9X+nKTySpMOv25kqNLGf+vPx3lG5eP6/pylOV9sjAjIG++iaN792JnVha2fc+trSHpsCsgIlu++U2sSUmxjyg8ILA/AhLPI1neet7+njEerjRJeCorK21TAf44d3VURV9HDZQ4sGSgw5t+tXdnI8ZWnsRQrvuoeBeD1xzFczPW2NSr9DWbzRpBAmHkI7BkyOpAEiJLhMiAhfX+h53ZwVPQH3ppBsYsWm/EheTh+dQV+NHPrjGLB9O5/YlRuP2J0bZb1ouZhbjwl7fZ1CkuTP/xZdfaGSA3P/IiLrz6NszfsNcRGU7B0jSs0CLi1p24MgUWksAiQlLEXbO4LTCnkaWU7MawsrfN2sPdvsas6sDyFfNsDci+HRvtK29IQDzQZMDKQFFgURKgE1BjnAcYBawisJUIWBPAWPCl1+kLlMXTc1/Q3Zf7DnTuqMWZ9bMjgsCPURumuat6Gv6seho+9oe+NpxCnslk/DDfHys/n+kjNe7DmyM1KXgqJwXrS7Ntm+SDexqNWBv5VtuFbRIA3ADYHt7bjIKVWfjKlz+Pxx65xwHhE91YNG8qLrrwx2htKArBcdiGAvOWdtAP1P4DuQGRde9fQBKCtByYZ9ni7R2QD9NVn3NkaPL4objm6kuxpa3MEZTwGUVo9KzBffDMDvAHeam8MV33rIE+ZU502xbds2eOx7e++VU8+9QDIclZsiAVl1x0LhpqV4cWpL3bN9o0xj7vQpCPxiCRj6huozK7dy16fsro/eMi9CmTJxoB4fbo/p/GR4Xp3nc13mvs1Jg7kI70paN76chV+Km40qFL+ZCASFmZxQWlILm4Kz2F+/ryKy4uy3CFKR/p+K70+3Mly/j4n59HXF95J9NjOv3pUk+6ys9PQ/n4cgqTyx/8w4cPY/HixbYuZNy48cjJWWOnNCcCaoHgCOg5cKbwCMQJtMkV6OtDQIIvLhzczsgdhx8vHo8L8qfj0rK5+HnVQlxRsxhX1P6Rr5rF+Hn1IlxWMQ/fKM8ILCADExA9t0hG/Ot6FB/Vmeoo7kZpsN4jeaYRWkAaG/DxDSk4rXIKTls/Gaetn/Rf6JqMvyuehmGLltj0q4aG7di69bg9m+pJ4N89f1QH8bpKvI/klI5cl57rxy6Msr5VKtL106Qs+7CtA9nlrCDNzbuxYV0jap4bgc6zvg0SD/tC/4UvYM5VVyFr9mx0dXXh2LFjIAHh+0YS0t+leL2zer99V369w5Llvf/n30s27sbTkr7keC//QK6vJ//76frlVtqsF34YeeWVV2yHq+7UVPT8+Mdu7QfJB4nIGWdg81lnIX/uXJuCxRPkuWC9PwtI/Bn9fFVGv8wKU5kGcnliO3c047a8ixZk4si+loiEeCCD4IwgltMpdvbWYxjP/LDD+t7D4IJDGJKxCuOXViOrYoftKMVF3VxorilObs0FLRBuIXiiy6lQjgzwLA9aTzj1iQSGJ57/ZvAUnP2TS+3cEJ7rMXVlA6647WFMWFZju14NSVtppOOe5ybh+xdeZYSB2/xy3cidT461qVRWDpuGxXyc9cMRH/npkhy58lGe5edZIitaXsXMyv0YXvaGkS5uNTwyvwcLlszFxg2rsGtbrZ3vwSkufQGS9+U7Bt58MiG/7xp4iukYqFKYXK+dDJjx3guL2q4d7b3VOHNTQEBqU/GJiin4x+yR+PySkfji8rH4Uu54fHklrwl/Ehef54s54/BXxZMCC4mzlAxaNB5rV88Ht0zev6shwYKlNlBdq/60YxgtSed851sY9ODtVs98L7ray5Gfk4lDe5vs/WFfoDzjeLr9q7bjmJuixzClRb8Rn4CI0m9TpYIdyizegDSn0PlpbklI09o9kFPeyp9xLAPJ0ZpVc/HygVZHQLx0uQ7GyTtAz3JwwwKlwXJIhnFGCNj/TvTYM1JO8SePB6ToRA96u6pw8YXn4olH77U8mc62zZXIWTbDpi4yrSP7mjHk2UEoKVpkz+iXn37KUI95qDyJbeTyC8uk9yIoH59/17aNtt5t3LhxtuGHxkuOjfE/hsXHXIUp3B9jff2Bxlo/TjoM058f35+fsr5OHwIiRT9RX0nxA7lxXd0rnVPRjev8vrpKp7881SCM9/90359ePFzl89NQWFw2fk85fkEsKyuzk9NHjx4NWkNWry5DZ+d+8NRngi+BYbo+mIuDNN1LXoCvDwGRWTc0/76PGdvkZKYOXDMhx/xKLzQj++nGTcwxXZXJ0k1iCg/StDUgi6ZjzpxECwifMXr+eJ355C253wfCSscnHq4uHVgWAZne2IC/qEzFJ8un4BMlk/CJdRP+S13/XDgNI5csx9q1G9HYuDNGQKI6VH307XsiDBGR6CsT1bfSkev6tktDfTZRPyoDw9nGrPudO1+3QwabmnahYNV6zLvjbmz41rfQ8/nPY8vXvoaSZ5/F+vXrwQXLR48eBcEqwTUvkhGeKZHsYhwvLqomqOb2srxETmRF0dghl+91sj+F06WsL68wyWjM8NNhnK64rsJ9+bifMv3pSV8un43PyjUWJG3be3rQMWsWui6+2C1EJ8E74wx0fO97yM/NDU9FZ91SL54P7/u7lGe8vLpXuZOlyTj9kQDxsNcRI0agpDgHJ44E88Tta2v0NZQggAcP9m7dhGHlv3NgvNwRkGEzCzBpxSazOHARN8/ecFvrBlOdRDxsqlNgeTDSEUyLMstDAP6DtRkkGyn5LbYI/YEXUs0qwpPRmfbYxesxaMQMWzNCosBteJ+YsAAPDk9HWmGHkYfMsl48Pn6+TaEiodBidEeKIuIRkhNZRgIyxPJzYTq36Z1dcxgvlTsCQuL10qoezFkwG9UVOdjRXY2jB906AgIym5dvX48F5hxQC+evE/gFX64NNBmgc6DO9IOv3EYijEgE6RhgdNPhBI6dqznvUVs58kK9yALCXZ/atlXjWxtJQNy0pc+snYBzJz+Hn08cbIfecrdB7qh43fzJfxLXtfMm2llQX1k5wVlBgt/F++eOxurcLAPlPCSSa3jYv30CSZDLi6R7Q/kKzEwfgzmzJtq2y98750wjIIxvayrGgqzJWDw/Bft2ug0J1q1ZiNkzxmPjhjyUrl2MmWljMG/2JNuqmTun0T8rfQxq1ucG1heXF7dvzlsxy3Spzy2eSXj43i1fko6smePtgMui1fMwI2005mVNMpDPfsCL5ILTnWiRmZs5EbXrc43Q1Nfk2z2BPy0+7B8kNJxCuHj+NGRmjLPpWS31RSbPuMK8OTatrKYy1zZc4DNkzZyAtsa1Vi9GAk70mCWDz8PyLpw71eJJHFienT3VuPSSnxoBYZ7crpoy3EVuT2+d7URGva9+5Qt4/pmHsGpFZjBVbLJN01q9MguH9jSa1YZ5zJk1AU0bC8PpXPaOiHAEpNvaTdZBIyHdaGsqs/Ft5syZ9pvlj53+WMgxUXF0/bFT47DiNX76+opLpuvH9acTl4nfq3zKu18CEs9ACn6C8QfSwyoTyUo3Hq5435VMXEfhvmzcLxlfV/64bLJ7ycbduKye0w+XjsJ4L7/cZHqMkyx/9Dkla+HCheCBM1ygnpW1EJWVzdi69ZgBL4E2uT54U5hzBegEDmO7YNVXJM6j9YG//JrL7rRUlAAAIABJREFU6hOJMM4jDgqTawRE8QMQjrh8PB+7D0zPfpq1zgJy9ZIMzJ+/ClVVneEULJ+A+PWQWDfJwbADvVF9UceRPScvcOzCIwvItKZ6fLJ2Oj5RnYqPV6eYNeRjG1LwX+Gi5eb0knSMWuoISFNTIgHxiYDv768+VUeSpev8fr36/v7JdCIZdDpKTyRk27bjWL++BdOmZeDJRx5B6i23oOmnP0XnWWehatw4W6PQ3dODvQcO4eDLr+DAsdew98ir2HXoBHYcOIHe/cexbf9xc7fvP4YdB45j96Hj2HfkBA4ffQXHTrxiHwe4zSzJioiJCInGQL3bGgfk6t2XKzm5CpcrPd9VHN2B9BivP/mlKz268idLj3EkISReJBW0zm7dvBntWVnoPu88s4RwLUjLRRdhzZo1aG5uti2NtQ0vSRrrRhfT8S/Vm8hcvDwqr9z3i6cc06+trcWECRMwefIE1NetNVCk9QQGcoOvkMcPtWH71jqMqjhp2+/aNKyiY3hhTikm59TbIYO5bSdt+pLO9gitH7bo3E3BishAYBXR9KhgnYamSzmyoB2sPOuFFo8HVovImkHZIA+zZjirhpVBi96VR0B0jJwYMdKieEeQGM61IJxOlllzFC9UvO12/Sp/FyNy2jBnbjo2VORge/cGvLyzHq/2VOMNfvXe3Ri6r3VtsHtHTNyUEEcOArJg89kD8hEAXosPSImAJcP4JVj3rm0iYmJt5BMUgbEgHYLrRALiLAGfLZ6I86YPM/Jx45IU3JY3C3cUzAHXMv4pXHcUZIHb2H+jKAUf0e9xXQrumzPKLBYE3AkEJCDdVtdBnVeVLccvLr8Qt958DV4Y/Cieevw+fOmLZzgLyCs96Ghehwd+c4tZRbiuhDtrrViaAZKUO2+/DsOHPY7Bzz6MH/3wO7jnrhsw4sUnMHzoY/j1NZfjyl9chJ7O9UZI+XU/PXUUfnnVzzBp3BA898xDuOoXFxvJPX64HekpI/HNf/kK7rv3Jox88Uk88+QDOO8n38ezTz1o5IllzlmWYXkuWzQdo4Y/ZWWmhY7luuG6K3HJRT+x8lK2vWmdyT768N3ISB2FJx/7DW66/iqzCpFA5CzNwHe/8y1cf+0VVuYhzz2CSy85D7fcdI2RF+uDr/Tgphuuwu23/toIzoP33Yr7773ZzsNhn/UJCPtoe2MxBj1wO374/bNRW7nSrB+sm89+9tO4/95bkJk+1jZ3uOuO6/DFL5yOcaOeNQLCsrL8bIe6qpXOAuO/D8FC/ug9cMRbhLK8dCVeeuklrFixwj6Mcdzjn8ZJf9xXGN34+Ml76cZ1/PSk66clv68nHbmSibv95XtavIBxRd7rL1lcf2EqkK+rMOmoULqXK524q3i6yXSTyStMusn0FEdZ+uN/ih/IlY5k4vcK78+VPH/8uVNPVVUVyHZpDeF2vYsX56K5eXu4zkEArz8gKPDsuwRtnD/PKTbPNlTio9wBy79quCD9VC8eYHiqsm6h+weTHyBtmt6rU3Ht8tlYtmwtamq60NX1MnbteiMkaREQdiRioHry46QXWT0SwbLqk/XPr/Csy0mNm0Iy99Eat7biY5zTSxJS/SG9NgQ/aHWp+IfSNLy4UFOwdsQsIH79+XXh+/sjEZKRGxE/9V+5UX1TRvLOZZu4dtG9IzU7dryK8vIGsxYOHTrU3pXsJUuwad58NFx3PUpnzMG66maUt2zHuvbDyGt/FYuaTyKz/g2k1r2FKTVvYVL125hY/TYm1bxt96m1b2D2ptewuOlV5LcdR2XXUbT0Hkb37iPYffBlvHz0GF555VUzg5OQ0EpCUC2riN5vvs/0x12G9TfmSlZ6puz9yEiXbvySrlzF676/PH05X5YEgc/GqVXcOnxrdzfaMjPRdf75ZglpvO4628mvtbXV1tdYnbzxBt548028/vpJvPb/s/ceUHkd2bqg75t5791Z981bd9682/F2cju1Qzu23c7t7HY72+0sZznIWbYlgVAWkgAhEAIEIghEBpFzDiIJgRAgopBQzgnJkiy3u79Z366z/784AtnuO/Nu913DWoeqU1W7qk6d85+zv9ppdBTHj4/i2PHjnoNlrCNY4WYL14/SKF1DG5TovL7NvNmWEmQ6GuD7MiI8FJs2VInahjK8ZFhUArJ1sBkr6vZ6mPGZZScwK6UNQRlNoibF+B1UXfIan3ulDQQCBAgmtcpV+iD1rnJLZcpD5wAWL0Cx+7TzVl/22OzT06+2cegcCYixAzkr6l3L69Xl8Fn4VZ7BwpRaJMSHChNFJmtvWRp2vv8GjrYUYX/EEhzvrsG+ZfOw2/cj7Fs6DycPdHvAgzK3uraSKmDQ3VxNyQRr3krPKXPox/TptNd7dw4AaQrDP5ctxd3R88HgroxTNbk6De+syxJHKnSm8p2Opmy828zjO9KN2z5bYmD92/rLxtsNWXijMhlXV600AMTZtJscu0B229tbi8YCEFkzL9Cj5IGM9/XXXSU7/bu3rRf1oZ/86488EhD+Lrij/4uf/0RU8rje3LW/847f4tZbbhCpB+lmfD4FP/rh95AQGyzet7IzonDxRT9H3toYUWWiGhcZfv8FnwtjvmO4GZdeYiQDlND0bKzAb2+8Fo89cp94X6P7Z99p7+FXl12M3SPr5fkiAHjt5T+KetPIUCN8pr+HPSPrQS9dSwP85DoIuijhWBY4C1decSm6NpQJOB3ur5fxZ/l+KFIQ0j3y0D245eYb0NKQK2OELZuHH/3w+6JGxWeNz+GH772G9ORwCU5KqcklF18oEhPWuwEI1yo9KdxpkyHvFEqIfvKvP0RmaoQYrh890I3i/AT87Gf/KhIPqnQd2NUhwC9wsa+Mo8BCn3f5TfHeuX4HLOeRmhwnEhBK9Pm9Od+fvjs1HY/nHY9e2zMdj0brbVqW6d830dltlWZcCQgrdbD/lem/17j/1mv8t8x7Ilo+APv370dpaalHLUvjhmzcuM0DRJR5JiOnzLOmWqep7hrTgHdNRxeer8/BE+WpeKggHvdlR+GezEjcnRnxN3/cu3Yl/pAXh9lZWY7dApnmQxJZnteoTK29DlpmAwiTV2ZXmWMvk2vWjef2YRhk9kfVuP7+QwhqbXJsU5bjBwWBuDJlEX6dEYBrc5bhuvxQXF+wHDcUhP3NHJzTNTnL8N9qQkWV4YflYZgRG4+cnBq0tg6NASD2up0LDHStdO1s8GCXuQGKodPncmyqfWobTU1/uu6bNm1HVlYh/P0Xif5/bFwcmts60N43guL1W5CQ34mw/CEsqj5u9P2rzzoqN2fF8Jg732OPs8YmQMrH5ufWfIllDSewuvUICjYeQGPfXvSP7MHufQdw+MgRYagpGeFOvBuI/FvfLf9e9Hz/KAghYOC7qL+3FxuTk9Hz6GNo+fBjVNbUoaltIzb0jqBt6ABathxF/eAxVPYeRdnmoyjpOYriHpOW9RxBVd9hNPQfRuvQIXSPHMTwrgPYte8QDhw6gqPHjp8D6sYDJOdbD4Kl9PR08SooRumbah21FLP7bnbRu8TTT05zD2ZVn/EYZfvl78aitGZElfVJDI2c7i9EeuABCKrWZEk6jBtcMv+O7YUAAgUPTuwOBRqOtEIAiJYN/QWMFZLYsMsxNB9fmqIR1G37DvZjvHHZY5u5qHTGeMI6jTXrj2EepR9VznNdcggBq9YgbU2YMKVUJdmzNg47XnkWe2Z/ht2fTsHh8nTsnDwJo8Mt2PHq8zjWVW0AiLOrLoyRulgVyYUxFPYwUGSkhJlSZlj16h33p2IzoLu8bKv1Tl6YQ1PPscy9s1WwjFrvP5cH495YfwnA+mZVqjhP+XBjMT7qKvtOxwcdxXg+JUyCtn5X2nPabyrFm5XJeCF9Bd5vK/hO8xjT16ZSCVT7TlM2rq2Lkc1CNUKfHLdwYgBCptW5N5SOPPrwfSK5oK0CGd4DuztwxeWXODYgZo0LcuLGAJAj+7vEKP2VSU+b+3ikDymJy0XViBIV3tv2lkL85oarxVMUx6P04sc/+gGWBc1CYU68HAQwLz73GOjogPYUd97+W/j5fAAy6bznVGeipIAggnNbETIP11x9OebNmorqslQx7Ob9Z1uqkBFIEYBQDYvSmScff9Ax/jYG65zvQw/eLWDj4O6N+ONTf8Crk542nrOO9KO0cA1+9tMfw1wDn6s+eR9QwlJXlSEqYj/8wffE1uQcAOJILHhtBqRkiMobJRo//cmPjFcxee6NHcxDD94lkhWCOUoZX3vlGVFJk2ddwIY+367n3wHebMdrP7RnE5YuDRQbYQZ+Pd878P/ruon41W8z7ni0AkBYYf/ZnY2HhOy27rxNy/x3oXfTjneu47HO/huvrXtsth+PTsu1D6XT1Kax8zq+0mlq02mZtnWnWq8paW16MjZUy6LozQTeWoSVK2NRUlKP3t59ln3IuYyaMuCaKgAZHh5FT88+NDcPoKyMUaKrkJxchDVr8pGYmPc3fyQnFyIzswLFxU2or+9BV9dubNlyzLMWYxla7+45mVddC3cbno9ltm0GeuzaalsCkL6+gwhsbRQAQhH5lelL8ET0IlAdgOLz18vW4M3KFEyuSv2bODiX18sSMSkvBj+oWSGSmx+ULce06Bh5DlpaBgXMUbpj1mP8a9f143raa+pdQzed+9yAFTe99utNvXTsmx7i6uo6sGpVAubPX4CAgEAkpa9FbccQ8ruPI7p5FItrv/AGmHOAxswqejwyoIN54wHJxIAgYyYH6506qSfDpjRO3ZzqLxFUfxKrmo+iaOM+bBzchW079+DQocMiLVAgQuZZf8v275m/8/H+9PevqU2r9KTTes1rquVKZ59r3h6XZW5abceU/agqFiUVNErfOrID6zu6URmfhvSQBEQUdGB5ySBCqvZiad1xBNSdwsLaM+Aaza4+i1kC+kzKsvm1Z7C47jSWNpzCiqaTiG0dRVr7UZR3H8SGoX0Y3L4Pu/YaUEcwYa8l52LPz53XNaKLytWrVwsIiY9bia0DzcK8KhNLxocMQWN7MwJrDhmmnPe24hTm5gwiNG+jSEEy2o+KZyoTz+PPXoBA6YcjeVBAoGCAYGXtphPieYrep2hLQle4lKjwSG8/IgbutA1hxHKOQYN1uu5lIMOszhOiLpXdbWg90hJHqiJueD1gh0DHzEWAiNqgeLxgfS19pnaMIrjhBGZWfWnc8FacxqyMToSGBSEnI1qYObpm3Zsdhz3zp2O378fY/szjBoC8/YpIQnZMnoRjm6qEQTT66Ub9ikyoMIcOw3RO3mGgjPqV2ncwdVSvVDoi/Th9etSw7POJAMhyEIDcF78YL2RG4K3adIkXRdftU3srv9Px0aZSPLDwMzwWsUBAw/sbCsB+6IHxgw2F+Li7zOQ7ikx9WwE+aC/ClJZcfNhZIsFm2YecbyzGK/mxuHPaO3inIes7zcOe9ycMGttVKqDquoY4A0AcVeQJAYisuXet6Z3p3ntuA9WUaOdDppZg7pqrr/BIQHjfbABCxlsByNuTX3QASD8yksMFuNAdLttsbC3GTTdegxWh8+V8dWwwvve9/ymSEkoreHw+9W1Rj9q3Y4MHgCyY+5lHRZKg5qJf/kzsQtgnwfDcWR/jrjtvxn333o7QpXOwd/sG6d8GIHxmqVpFtTKqjMkzdrRf1KPuuetWcU+sAOStN54HvX+x/4riZPz8Z/+KqtIUOed4NDD3mfae2IcELvbBD37wPQETfNbHSEAc6cQYAHKkT1SqvABEn+0+sZO59uorBOyUFyVhMufhgEDOZYxaogNcWG7qDPhgvqezBv7+DNUQJSqx9rtP3/f6/tPUftfb+fFo7TLm9W+8vLut+1xpmdr0eu5uf4ESaGN3Az3XC9NzpXOnWq+pm47l/NNU6bX9N6Xa3p1ORMfxdQ4TjavlE/XBcvtvvPPz0Wp7d/ptxiUNP75UMWhpacHKlSuxaNEi0FtWTEwiamo2oL//gEf1SBk3myn0ln0pHoRGRk5icPAIurv3iNExVZjq6rpQU9P5d3HU1naJu1ju1nd27hSGmXEr6B3JywDrjrkNJMbmx1sjXavx07HMsAeAtHgByK/XBkkgwlcK48R1MF0Zf9T5txGEUAIhdpaAH9e3GjLxo4aVHgAyfZUBIASllCZ5AcjYNRt/XcZr410r0rjvi93PeHXuMoLn/v6DyMkpQ0BAkOjDBiwNQXJhvXgNWtZ4GvNqaFjsSDos4ECj25mMf1B+EjNLj2NmyVHMLD6MmcWHMLPoIGYWHpC8X8lhzCw5Ar/SY5hZNirxEqiuopISD3ARkHIWC2pOI7R+FFnr92Jj/3Zs37lbDLcpMaBqka2aZb+D7HeJ5t3vD31vaarvDm1vp25anivdROPa/dn0SkcARYnOqdOncejoCWzZfQSV3fsQVbMLc/O2YnpqP6ZmjeDT3H2YVnwcPuWnMLPySw9D7wFuFvAzoM4AQbOWZyUY4ILaMwisP4WVTceR37EPGwZ2YWj7Huw/cFDee1TXopqWgjp7vprXefNdSacD/FgzvlLymhgM9zd5QAj11KkO0dddj+T6XsypOi0gU+Zb8QX8c/sRXrwZCet2OhHQz4yVhKgdBiUiHnUrb0BA3/C1oCvepRlNYH5xUo0Yks+MyBGj8/cXRiG6YgDvzF6BqUFrJGI6Dc/ZfnpoOmaEZUowxNnRBZa3K6Ni5fF+pdIYji95r80H1a7oiYtevNI2nkBY4wnMpqSn/BTm5OzFnIxtmBccJbvWhbnxYnBLJmx3YRL2xgZjX0Y0tr/6PI50lGP33M+xe8aH2D3rU5zY3yVgQwEIUwEgHkbKAgwOwBBmSkCGSjcc5kx3eLUPD1DRHWEnterPkYCIB8cwASD3xy/Gi5mReLs+QyQGn/RUYGpfFT79FgfbydFbiUfD5okx+x+W+UlMLAblfSR0jtiXTK5OxYOLp4ur+sfDFwhQeXj5HPx+yQzZbCL44BxoDP/HhKWYXJWCOz5/G2/XZ54zFx3zm+bHdgqCrlsX7wAQY3w/IQARyZFRLeI9MhKQezH59edEPYj3jIw5VZfUC5YbgJBOAQhtQ8y97hsDQFhmAxD2QbuRf/3xD8Q4ngBBD0peeP9UAkLbDnqcYh82AOE550vVrdbGPLEN+fVVvxJvX6yzAQglILSzeOIxRwLiXDdtOf7w4F2gepcXgLzgYfxtAKJ9UhpUUZyE3dtaBZh4AIjbBsQByjYAYR8iAfmpIwGx1p/ugmkrEhzoh5kz3hejdK6TOHBwfgOk58H7Iofn3PxW6P2rqCANCxcuEHfy3AzSd56m+u7Tc6b6Z+ftMrutO6/txkvdbcc7Vzqt47nm3ek5AEQb64fLTaDn9iB2XuvPl2p7dzoRjT0XpdG27nMt13Q8WtKw3v7T9nY6Ea1Np33ZdJqfiF7rvy0t22tb+oCur69HXFwcFi9ejAULTBDD6uo2bN68d0IpgDKBZOSMG9OT2LLluKjb0H6CEpGenr1/BwfnuQ+9vfuFUaY6GW0/yDB71a/GMr82s2vWwTDMCkDI7Greq251fqaaNGMASFOY6OhenROM51LD8FpJIqa05oIfJu5k8WPCDw53xJ6OD8K769aKjvBzSaF4ozwJz6xZhknZUWI4ycCLn/SU442KJCl7NjlUotO/UbYGT8cF4uOuMumLH7c/rl4qO3/8+L2YFenstlVhUm607AqyLx1bPnzcVdtUInrOP14XJQDkh2XLMX1VrEhA3ADEAAGzPpr3rqdZZ+/a2dKQsffA0HJN2ZfWaV7Pz11z3lPe49raDkRGxmDu3LlYsCgAS2MyEFW9HQvrvjQMpEouBGx8iZkVpwRM+Obvhm/WIHzTNmHmmhbMjK+BX2wFZq8qwZxVxZgbU4R5sSXmiCvD3PgKzE2sw5ykZsxO24hZWX3wy9sBv6L98CsbFf15P+4m29KRqq8QUHsCma270NqzFSM7dokdF4243Yyz/JCtf/Zvm+8L+52h7wkt03eAkmu9ptqO55rXVGk0ZRv90/Zsq8Dj5BensffwSazfNorMzhMIqD8tIGNG6QlMKzqKzwsOYVrhEUwn+Cg7CV+uN+NM8GB+vKPytKyfgEF6oeIaUvok8TgcaVT1WcyrOY2oxiMo6tiDzsGd2LHLSJh0PQnsOFc93NfP8i1btiAmJkbU8xhBmLuINEw1jGyXqISsb6tGZM1WmPvpqOQRhBRuR1jJAFbX7xRJBaUZ9Fw1RhrieMMyAMDrhndaSDrmxBRh/upyeabo2ertWcsRXd4vXq4+8I9BdPkApsyNwKI11RL8cMrcSHzoHwPf8Gx8GpQEv8gc0DWvSlhM6kg7KOFQyYcCEbrcHfyzcbvbdxZru08hacMoltafFG9fvhVnMDtnL9LeC0DZIy8i97GHUPjuq6hbOAMbk8MxUJ2FretLsKOzErv712Fvbx3279iAgzvacZCefvZ0it46vRkZV6WbPWvJ9RSmaiIQ4ewaG8ZLGWPvLjHLTxzsweiBbnFRyv556L3Svs8BILSFaDQSEAEgWQaAfNhRhE82OwCkvxqf9ldjap9JTb4KU/urnHIn7a+S9+ZjK+bj5vdfMZKLdVm45YNX8PuAGbj1o9fxZMwS3PT2i3ho6UwxdOd795oXH5f8LR+8KlLla196XMDKpQ/dhdeKV+POaQaA6LifesatxlSdm1PGNtJO28i8qyQAMKUwHgDiXPdEAEQZWmVy1QaEkgqqGVESUZATiwsv/CneePVZ8ZB1dH+X2ID8+Mc/EANqrj+BAA3EX570lNx73gdKQKh6xN183g8FIEv8Z8g96+uqFpuRTz+ZjJHBRrEDYdA+GrmzPdWsbrrxWvhOf8+oVjkA5MJf/FQ8R/Gek1Gn5yjOm9KbG39ztagK0oYkOGAmrrryMtCrFc/Dls3FtddeKSCaUpDBzXViY8I+2BcB0MO/v1vUoAiyCYgVgNBuhXYkc/w+FsP4/q5qkbQsC5oNrgO9bXHO9LL1uztvFiN9ehPjuGlrVoBzLspbLX0wHgjjqtA2hvPm2ByL68oghnTje+/dt4q7a5ZT8qFqWEZyowDdqfN4mOuTzRPatNHBRmdnp4eR5ztO33v2+1vL9N3OlGX2n7axU/tdard1520aO6/zsduz3v7jubZTWg8AcTfUBnaqxHZbd14H0bY2veaVxj7XPFM3rX1u02qeqdLbbbVMU23Pc/3TvLaZKNX246UT0Wi5TeMu0/Px5q1lSs+2/COTsHPnTnHby50+ApElSwKxenWqGOXSM9COHapCY5hCZRLJ/CkIoUE6pSGMN0KXpn9fxwkBHrwGAioFH17m9lxmdiz48DK9ujZextowyt72pq3dN/NjAEjj8jEA5PXSRLzXmif6vyJW505bf7UEefzVo/fitZIEcFeNH7eXsqNw/atPi7vFe+Z8hJfWrhTVLeafTw/HffOnyofu6fhA8OP2cn6sfDAfDZ+PSx68E5Nr0nCX73v49TMPgx9ggo7fvPkcbv/0LVEP4LieD15vlYj1323JxY8boySw4/kAiNfu4/xSDO/aca3stfXmvW303ui6es91jZny3nZ17RRbD9o/0QvIouBwBGesQ0DVIVHxUamEMLMVpzGz9Bh8cnfAN2UjfBPWwS+uCnNiSjA/vgz+iVVYktogxsb0eBSSswHL89oRlteBsPyNktJtKstZT6NkxobwT2nAgrRWzE3fiNk5WwwYoTSFQITMs8NEz6k+gxX1h1HaPoL+oa1ivE3JpUpDJvo98zc90XuANG46+32gdEy13URlOo7S6znpKDkgY8+57j96Eu3bTyC98wssaaAEyKioETj4VhCIfIEZpccxo/gofIoOwyd/L3yzt8I3awC+6T2YmdoJv9SN8Etph18qj43wS9sEv4zN8Fs7CL+cbfAr3GckTVxHJ0CerfbGNSUQiVx3GAXtu9E5MILde/aKKhh3ASmdUYmIfd2aZ0pJSGxsrEhColeuQPfGGtE9J4PFXdmhzbWobKhGQOVeYdQVVM6qPIUFpQcQWrYNsbU7kNJywKhTbT5zDhAxNiIqifgLgtIbsWB1BT4JTIB/QiXmRBfik4DViCzZjDmrijArMhdheRvx8qf+8FmxFiHZbZgakCAuegNS6wWo8BmkNERUvSybE6P6ZXnFIhgRicfX4rUrp+cM0jtPYlXLCVGHM8DqS/hUnMKCxB7U3fukJ1hn/8UXo/e6a9B7/13oe/5JDLz/BgYXzMBQTDC25Cdga0MeRtpKsWNTFXb11WPPlmZRXeO6cWf50N5OYbbIZJEp45pqnAMyYDzIwHkPl3TDASysP0o3wA15ODjYhINO/2TkCHi0H6ZjjdCNJIAqWBMBEAUaHuaem0Bk9C1A4qnrrcQjIbNxxeMP4LfvThLJxc3vvywSjSejF4s67c1TJmFS3iqhfykrErd98gbeqkvHLe+/Akq8r3r6IfwxPgi/D/CRjaQ7Pn9L+vGO50hlHHAhG0MKPGSDyguUBIz0jQdAvlkCopIpSY/0iUE1meDJrz8vnqXmzPxImOIH7r0DdLdbW5GOKW9PEjuGOX4fob25EEnxIWLQfdfvbhFQQuY7N2uVGJ2TaedzQGBBF7XPPfMoNjQXiNOH2OgAAS70cBXgP0M8U5UVrpH2YSHzcMWvLsbjjz6A6rI0YcLJ6F95+SVYtHAatg2sE/Ut2krQKD40eA4mvfikGK8z6OLzzz6GSy/+BRbNnyYAp6ejXMAFvV8xYOCMz98VmxBGdafXLXrSuuG6q3D3724RF8AEUbzW6665AtM+fVtin1SXp8p18nzxwulYOO8z/Pama+E7430BQCFLZ4vdCQFEaWGiqFPRC9YvL/yZGND3dVWJfQc9XL30/BMiBaKkx4CMfrFjufrXl8v6cnyx6xCJh1fFStqq5ETsTIw9CwFSTUWOxP/ge4zfEvudbb/nNa/vP33Hs1z/NO9u6z632yt7tatjAAAgAElEQVSNu0xpmOqY7rZKo6m7Lc//agBCYj10AKbjDaLtNLXbK42m2mai1E2r59reXgwt01THUBo7ZZvxaLVsIlrt+5tSeyw7P9G47v5sGp0LdaS3b9+O4uJihISEiAeYwMClSErKQHNzL4aGjli7zcrkGUaSDLseZOD/Xg+9BjKs5vBepxdAaJmX4SXo0MNupwzwWCb6XOab7cYAEJcExA1AKP0QINBXBX7YKKYn8CCoeLcpGze8/gxeSA/H7Z9OFinHndPfBXfkCF7ebcwW3eIXMyJw68evC7AguPlDsB8uvOtm+ag+4P+59EFpCaPZ3/rhawJKRMwvH12z8zf1OwMQXTumusbjgwrv2o2lmQh4nFvupSOIplQvKipOpHwLFvojNDoRy4oHMa/aZeNR+aUAD0o6fOIbMDO6DHNiS7AwoQJLUuuxbO16hBV0ChO4qnxQgs3F145gdf0OJKzbhcRGHrsl5TnLWc+gdKsqBoWO9OwnIKMZC9NaMDezC7PIRJd/4QEgRu3oLBbXnEBG0w509Axi565dOHbsmNgz6M49f9v6G9bftb4H+L7Rd46+A/Rc22pq96N02lZp7VTp7JT1BB9k6E9+cQo7DoyKDUPQOmPHoSptwpwTcJWdgG/RIfhkbYFPUht8Y2swkxKluArMja/EvNWVWJBQBf/EatnhpwrS4uRaLEquk8M/qR4LkuoxL2kd5iQ1iZTJL2cYfsUHMbP8hEgjVOXN2ONQ1e2UAJH6TcMYHB4Rg/jx1Nzsa9XroiQkPj5eQEjUyjC0t5bj0N5NwqBQTYNqEjlVtVhSQRBi1O3kPladxZyKEwgs24PIqhEkrNuNtA2HkbXphBiO5/d/JapZhYPeYIUEI+kbDovqVkbHUaS07kd6+1FREWQsjoz2o2IHwmfr7VlhiK/bjpzuU0hu2Se2IymtBxzaY9JWAYiCHJMadS+jakXgcVbmk7npJNa0jSJk3ReYV2ODxi/hUzqKJcH5aL/mBgnSSTfKPBjPRdPBX/4Sg1degcEbr8fgnbdi4A/3Y+DV5zD4+XsYDJyFocQV2FKUhOGatRhpKcT2jRXYubkOu4dbxMCXO+tUbRNwsqfTAJR9m5wddgIUcyhI0ZTlB7trMDLldWyfPx3bK9KxvbtGduCpL69AhO2Z7xysd+KAGDfv3wRAvEy+l/k3IMR5H/aZzRmqbVHF6m6/D3D71DfxRJQ/7p37MR4Ln49XiuIFaPx2yiQ8n7ZCJCzPJoXI5tFbNWm45cNXZUOJKlcs53v8tdJE3PrRa3izKmWMtEWBhc5LNoasb4MHEE0kAfkmGxDZYTcSJlXpIXhgPA7aUzAWBnfsy4qSsHrVUokPQokCjcFXRS5G6poVEuuiOG+1eHBi3BC6myXQHOiukXgbaUnhcq9pS0U7Iu78Mz4GGWlKIkoKEgQ80O0ugcb+XR3SPjs9Sug5LufD9pSakJ51lFIMba5D8upQxEYHiXoWJRBUQ2K8kdUxwUKfnhwh4IDglZILxgyhG97EODMPlvOaOQ/G6OA1UGrD8ejSl5HMCU4GemoEPNPzFcEO45QMbq6VuVDaw/cD1coYu4TxO2rK02UtzFotEZDE9nw2GaeE15uyZjm2Da4zalWOi+Pbb7tR5iIqiQ74kLyj0iUqWCoZcVSzWL9rpA1xsZHi/WrdunXCW/Pdbb/nJnrfu9/xev5taNnG/adlSv9txh2vD6XXVACIdk4CrThfqh1rGz130+tHUdtpqu3sVPMTXZhNy7buP9b/tbTsS/ufKHWPZ59PRKPldlsdS8u0zfnmrm3t1KbjTqACkcjISLERWbw4APHxyWKwu3nzHpF02Awi8/+xDi/zOhFja1//+G2UsVagoipChvk2NN667wpA9ONCA/Wb33sZDyyaZlw+0svJi4+LpEM+cukrcN2kp+RDRnUruph8dk2IfPyeSVyGq/74BxH180N3yQN3yI4c+2IALtI/HDJLwMldvu97xfrO7t+5AGQ5zi8B0XUduzZegMZyc3zz+mpf7tSsM8EkXeu2tvbLs0sPV3SrujI6BmsKGxFSe8QxbKYdgWPXUXIYPpn9mBFThVlR+ZgfXw7uJIfmtQtwiKnaIrEdkpr3icEvGUIaBJORZHwEGvy6D5az3hgO01D4EEif0LAT7I+72aF5HQhIb8aCrC7MLtwNv4ovvIbs1WdBacjKun2ob+/Dtm0jYhvCnXvdtefvl392qr9ppvru1NSu07zS67nS2eea17YyqDMu+yb4oJoYpR6NwycQ1nzas8YEAAI8Kk45oGMQPgnN8I0uwezoIllrgougjEbZyacUKbyoGytLe2UnP7piEKuqhsxROSiqR/QyxfVbUbgJobntCM5aj4C0Rvgn12Ne2gbMydmCWcUHnfU0EiYDSM7Cv+YLJDftQkvXEHbs3Amqo3JNbVsbvV47ZSwTesei7VxQ4BKUFmdg19b1EvRs59YWCcRWUFmG0LJBzK6y7qM4J/gScyuOI7B8DyKqdyO+YQ+SWw8io+O4PD85KhXp+0pUoAgMNH6IDRiYV6Ny8Xy1bhdojC5latTuGJC76US9SlSs/iSBBfN7zyK35wzWdn2B1I4TYsy/fN0JzFfgYUmsaOfkl1iPsM8+Qetdt2PTjdej94pfYeCSizFw0UUY/OWFHiDiASZOtPvBCy809Rf9EoMXX4TBX12GgZtuwMAjD2Lg1ecx8OkUDATOxmBkALbEh2I4PRpbCxKxrSIdI+vysb29HDt7arGrrwG7BxpFirJvawv2bVuP/SPrsX97G/aOrMeuLc3Ysmw+Bi+5GIO33IT+T99FX8YqDLWVYPvAOonOTWDDXfeOvlr8uina43XwfABE37eaqhSY56qWpVIS2lpQzer51DBMac4RNdq36zJkg+jZlOV4t3GtbAwxT49ZNHpn0MN3GtfiueRQASg0gn88ciFezovBG+VrxE6EarRm44nSDWcTypF6qGTEMxdLMqPA5BPHEH6MClZTGCZSwTIG/mpDo6puxlmASqa4E0/vT17plC2pYv7cOtIYOtPWfU4ao15k2tljqTcn93jaXsvtPlXyxTK20zaaajlTlrnba7m2Z6qewbTM7sOmt+s1P1Hq7oP9sIygic8r3f4yzgjdGPM6VPVKr92UOfZTY1Sz+lBdkSMqpGFhYeLgxH538/32XXlGN739jrTz2s6d2m0myts0bMM/O3XT/dUARDt1D+geYLxFsif1Xend7fXcPe545xONq+VuGnvuOo62dZ+7ae1zu62bnu3scWw6zbvp9VxplZ7qE8PDw8jPzxfXvUY1ywCR0tIGdHfvdtz3mmjh46seuZnDsec2jS1B8DKkYyULZNrdNOMz/2PH0TY2rZZ929TMbyzjrP1pyr7GzyvdudIPQ/PdJSD6IaTtxrWTnsC986fiw45iEdNTBYt+7P+wdKYAEXpkoYEjP4a3ffy6SDO4u0b7kXvnfIwbJz8vH7qL7r1N7EzunfsJXi2Ml34JRh4JnS06yJ/0Vhq1AxX1f0cJiL02et3nAxrj1ylImegeU93qJDo7R5CbW4aQkOWyY80Xb15+AUrbtiKsyQRRU5Un2hj45GyHT3w9ZkYViIoV1V9W5G8UI1+ChWTZgT4CBpbL7j4lXoHI9HHXOL/P7GDTVel4B3e42Y7t6aGI9OyHAIY72+x/VcWAqG4Frd2A+bkDmF16xNg4OLYN9AIVXrsfdW09GNoyLCCEUsvxQIj+1vlb5kFg4Dn+9CfJ6+9c3yGksfP6PtC+7NR+Z2g79n/mzJfYc/gk8nq+wOJ624ifAO80fIsOwCetGz7xdZgVVSgG0kuSa7FsbStWFGwSt7WUFFFqxGje3M2nZ6e0tkMiMaDnJzk2HEFa22Gkth2U9Utq2iPuZykNYMTvyOIeiRoelNEM/4w2zM3uw6zi/fCrPDVGwkSD6vC6A6hrH8DQlq04cOCAxBQZzwWyfc1sxzghQUFB4lFmbWYi+rrqxDsOd0Wpq15aWYDIknYsKN+PWVXGba2AHwUilSewpPIwwqr3IqZ+DxKb9iOl7TAyCUZEMnIKeZu/BAGC5/ka+NoDTDzghCBlyDoc0CJAQ9sPfG2ey76vJCZJ7uYvkd19GlmbTiKt4wQS1x/HyuZRBNV7JR4Ei6LGRgBScRKzcoYwLyYHQcGLER/uj7xlc1Hu74Paz6Zg3eRJaPnjY2i/93fo+s316L3qCvRddikGLr5Ygk1SOjL4859jiIcjKfGkDkDxSE9Id+3VGLjtZgzcfzcGHnsIAy8+jcHJkzD40VsY9P0YQ4tmYstyf2xZFYzhxDAMp0dha3YcthSsQX9xMroyotF/7dUy5uCFv0DfPXdi8+fvYXN6FIbaSkXlZvuWJrR2V+Iq2q41hH6jDYgy9vreFRs4AQGOKpYDRFQaocDEQ+cAAk+9nBujde2TIMLTXoGFlSrAMSDEABAFHqYPzsU7nzFzGFcFy9i+TARAuMvPwzC4BnioPYjahyjTK8yz03ZMmbUbb/pzwIW6XRYw4h1DmGqHefaMbakUcRzvLr8xwpZ2Mlf27QUvMg9HCiCAQewhTL3WeccYS+cZR1SdHHsjmZczV0c6pGPrvGVu1lwUqNjr5Zk/12CMkwWjVqXSJs6BKokMPEhpE43kGamd6lSqEqd9GWBmgQ8FKEf6sXtbG0JDlsqmSUNDg7z/+d7W971+C9ypvvPtd587r+9/0o5Hr+11LHtc7d9OtQ9tZ9PbeZtG8+PGAdHK/0ipvZjf9br+LbTfdaz/N9pTPYGqB1TNWr58ueM1K1DUWei+l3r1tPuwGXllNE3qZb7dDLoNNGx6zdsMKPvSfpXOXa90mnrbn8v423Vs7z7XMi1n6s2f75rOHUvnY9KxtGYcFwD5BhsQjypUf7XYZdC+44moRWKgzsi9Vz//qOgb3zvvEzyfFibG6nf5vCd6ybd98qbsxHF3jfYgjIz7uxnvSkqbELr7pdoAd+8eCvTFk6uW4L75n4oRJT1ejdn5cwOQprDzSkB4v/QYuybjgwm9z3ZbvQd2ma4hVf/oxa2yshWRkauwaNFiBAQEIDU1FZu6utA5fAChzV5PVGLrUXYCPumb4RNVijmrCkXFZ3luO1ZVDgpTS4lF1sZRUW8xgMNRlyFzx51kYQC9novcO856rm0LHWaQTCX7o9oM+yejnbhul4y7PG8jFmd3Y07RXmOo7hhWz6r+EhE1u1G3vgtbhodl194NQuzfPF/mBAbc1T975hTOju7Gl2fMLj+BC+vs9n9NnmOwr9NnzmD3oRPI7DqFebWWy+Gqs/AtG4UPVdpiKuAn0o4yBKavE5BAKUZ87TasadzjlSp1jjoSpVPI7TntgL0zsl5cMwFyvWeQu/k0cntOiZoX3c5SykTVpuQWA+ooYYoo7say7A1YnLkBc3OHMKv06FjVrOqzCK05iPLWPvQPDuHgwYNjQAjXxP3O5jVTFY4BXvlOpFeZ2FURaG8pw85t64XB7WovR2VlNpIKShFU3I/ZFaNjvarR0QFVs6pOYUHVKAKqjmB5zQHE1O/F6qb9SGol6DoqgIQueLO7Tsqz4lmPzWYdZD0IbvVw1ofghWCXEhK676WEg2uU3jGK5A3HEd96DJFNx8W43L/2NOZWO6pWYofkGNHTuL/kMGYnN2JJ2ApEhC8RlZOS/ARRIWH8g+rSVFTmJaA8OQLlUYEoD56LqlmfoP6tl9Hy5MPY+Lvb0H3Dddh8za/Re+XlBphQCmIBEQEn9rmdd0AL24gU5dJLMHjVFRi87hoM3niDSDkGb78Zg7+7DYP33ImB++5C3/13o/9Xl3lUxDjWwGWXoe+u29Hj+xF6ChLR21qMurYiXFkbjgvqluGCdaH457KJbUA8IMGSeHiZfpfRt9qHWOBBgYECDG9qAIP9XtWxCDg8gMXqy4Af2p84tI7alQEcdn8W/XgAhCpY55WAGG9KHibdsSkYu/OubRzGXhl2R9qgYGQMgyzMsQE0rNf+han2gA3TrwIFBRaaKo0y4AYI2ODCAScOUDD92PVeZl3Agc5XQZQDNgRYOXNSpt87J+calNYDdrx9C5iw1aMswCX9WOPxGvS6dAwGVYyOXIzZvh+KqhfVE821OmPo3CzAobQcmypdZcWZWLBgvqiPMgbTd3nPu99934X2f3XbcwDIeGhGJzXRhdk02pap/afndj3zpLXpdQw71X7ctHo+Hj3r7D60LVMd007t9ppX+olo7fK/Jq/z1usbbzy7X/d8Wac07pR13BWkX/zKykrxmkXGjgxeSEgYcnPLsWHDFomrQCbQDQzsc2UcDSNJhtQwn4YxPV/eMO7al9Ip82nT6xjeOu3XBhGmzG7j7tP041WdYr3dRsexyzVv5ukFG2Z+45+zbowKVuNYL1huGxD5KHEHTXbMqkRqIf7q+ZHpqRCf8jQeFw9YjhcX5j9oK/D4tqc6lvE5X2G8a/WUi496esxi8C32R3/1TEnLMtqQyI6bY+R4jgrWNwAQAyh0Dex11TLvPdG11dSsp7ferKcBe3zm+vr2o6qqFdHR9Oi2RIAH9fWbmppw7NhxDO4ZRVSrkXzIzi4lC8VHMCOxBb6R+ViwulzsMsgQUyKhOvpk+HQHmuBhYpUYehZy4jqI+guNiU18BQUhmlJKQqmIDUY4DlW1OC7HX1nWh2W5nZhfuAN+alhddRazq84gonoH6ls7sG3bNjGkprRSJSH6u1bg8eXpL3Dq8DDODK7BmQ0zMXrcqBrZHrWURlN9T8gL4TwqpWyv4GPb/hNI6jiFOTWONyqub+UZ+OTtxIz4dfCNLJA1FslSwSbEVG9xQMchZG4k4PjCARr2jv+fPDv+su4O4POAucGvxZuUSp08oK7nNMROouOYgBFKVKLL+hGa24HFazsxt3CXd00dFaPA6iMoaOxF/8CgSELoJUvtbHiduiaasoz1/f394iGLKlkBSxahuDAdfd21GBlsEsPUloY8FBenY1V+LZYUb8XcssOYRQ9eItly4smIVOSsGK/Tla9/1XEEVh9GaO1BRNQdRFTDQcQ2HcTq5kNIbDmMpPVHkNx2FKltRwWkMP5JWvsxpLYfQ8qGY0huO4aktmNIXH9UgEZM81GsbDyKFQ3HsKxuFP61pyS+isZWUdsczonG5oxj4ldIN7sdWBSViLCwJUhOWI6ivHjRn6fBcH93DQZ6atG3qQrd7eXixYj2ANTxr69IR3VREsqz41CSHoXiuGUoC5qDKr+PUffe62h64Sm0PfIg2u/7HTpvu1mkJpt/fRX6L70EAxdfJOpcA7Qj4UHVrQuNjcn5QItHouKAF7ut1rGMqll9d96KTTM+QEXKClxeugwXVC/FBfUhEgl9IiN0BQVeIDFW0uCtN+pR3nZuY3DnvK8alChPrk2TmCBj6e0+rHHknV/l8YT4QXshJlenSD86nvTjfBc0z5TfjHNUsL6NDYjDVHuYWgs8eJhzD+iwmGKHsTYAwWb8bWBAyQLPDZ2mHEv6Ho+xdwEDaeua4xhAIXX2+N45GkDhBTo6vgECjuqZAgRh9L39eOdnysyY3uvwro3Tv/ZDkOGsoYATzXuAhKXy5qiE0UkD7WFUJcusj+s6ZM2c8S0Vt03tlSL9oLS2vb3ds+lkv+v1PT9equ87pvoeVFo9Vzq24Z9No3ltO9650tuptvum1Kb5VgDEJpioc3uy2p5t3X9ueptO62wauw+t15S0Nr3SuWlYrjR2atPaNNqPpjYN8+5xvwut0tt96jjjpXY7O69zH49Gy7Q9GRvqQdOQac2aNRLQkLr1BCJpaXni4pTMINVgqIfvZSC9zCPLxmPkta07tdvaeXe78c4VELjrbIbW7tPOu2m+zfn443mZbTLhY8cwdWMByHLjBSt7Kagn/FpxAqY0O0GqesrlA0R3vP/uh4CVEjC67rdxw3vu+hF4nA98aJ1JzT3z0tA7G+N50K1uQkKaSOeoKhgdHQ2Kmvmckjnfe+QkUjaekoB2qgbjW3wUM1Y3wi8yF/5rqmQ3PrZ6K1Ja9iOzc1T04snQEiSIlMORdBjde6ODL4DCieWgOvkaX8FEltaYC9723JWOKOpGcGazBKRjH8JQDxjvQ9TH544358H5LC/owoKinR71IXrqml11GvGVA9jQ3oEdO3aIRxP15OQBHmdO49ToAXyxtQBn6yfh67Xfx6H1weJNi/YOjIehNCoNIS0PfSfZqf7+mWo56Qhk9h4+gZSNX2B2DRlYs7M/s/wL+GQNYUYUbTwKsDipVlTMYqqGBHhQlYoggdcrbmn7XWDDsmFQ97RcY66XpE5eQZ2kjvtYurgt6DcSJqq7UTJCCdPquu1iUxKc04H5BSOYJYbqBEzGDmhJ1RGUNXahf2BAQIhtZ6PvSF0HPWe6a9cuUckKDg4WaUjMqnDUVedhuH8dtvTWiwpFfXUmcvLTEZNTiqX5GzGvdC9mV5yEX9UZY4NEOyTHC5oYrkuwSgMIREpSfRKLqkexuGYUATWjCKwdxdLa4wiuI6g4huC641haN4qg2lFx47yk5gQW1ZwUo/s5Grnc0z/HclSsHNscgly/smOYnbsF85JqsCQqESvCA8WoNy9rFeqqMsU4eKi3Frtoe7Fjg+il03iYMQ/o5nTbQKPEZ+jvrkbPxnJxabqhuRDNDblYV5MlXoMqS1IkgnQxwUliGMrCFqLS3wfVPh8KOGl4/QU0vvAUWp54GG0P3Yf2u+9A5y03oZtetq68An2X/wp9l10mYKX/kktAD1wGtPwSAloIWByjeBuESN5R92L7nt9chzKf93F57hJcUB4gUpB/Ll06xgsWGXxGH/+go0jclXNDhlJgnjNwIF30MpV2GwrE3blu1pjgg2aTh/XGhXoF3l+fL5s5DEhIL4NUg6VtBzd4zHj5sjFEevZF4EBX52zPc24Q0S06VW9J91RcgGwScTNJxumiq3bOq9AzL/Wc+F0BCJnoMTvyHibaqyqkTLuRcJAZt9zAOsy1MuNaZ9SOlNFWAKB0DpOvoMbpz8vwG9BiJAuGxlOn6lk2cHEAi0ptFPDovA0YUBCh12X36wAGATKcq91WGX4DmLRPAWvSTq/JAguefpwynbMbiDjXPwYk6fpba+wFa9qfmS/XZMfweiTER4ntR3Z2NripYr+/9V1m83jMa/l4qf3e07xNY+fHo3eX6dg2nZ13t3ef223PASB2Yx2IZTaR3WaivN1e+7HLbDpdFC2z230TLWmUXulsGs1r3+5UaVk+Hr27vZ7bdG5aPdf+lEZTN63dXmlkMt/wYCmdpkqr50ztg7t/FOcRVdtAZOnSZYiLS0JxcT02bdqBrVuPjwEiyoTaTLid1/qJUtN2LBPPMu1DU6W3gYaWMdVyba+p3ea75rUP7Vvp7XO20XZ2vQeAWJHQr1i7FE+sCcazudF4pToFbzRmYXJLDia35mLy+rx//6MlB2+sy8LLVcn4QX2EiQNSHgaf2HhkZ1fDHQndfd16/SZVsOEFqrpu7nTnztMSz6OpqQdr1mSAzxxBcHh4OMrKyjAyMuKJmUEmsnbwJObXOkEFufNcOorpia2YGZmHRUnVwhjH142I9IFxGvJ6zxqvRA74oA0CVV2MRINB4wwTrIywgA9L+qGSDymXAHMKQCAqMdNC03HnIy94mWmH2VaJCMenRIB2DrRrWF7UiwUlu4VZVb38+ZXHkV+zHt3d3QK0NMAeAcGpk8dxcl8XTncsxJ8KrsRfUv93nM74Cfo2lIg7WUoyCUL4QaIKFw8CNT3Yhx4EKTz4eyfg0IPnbHNs9CSKe09iLiUfjjRBwEd6L3xWFmFubDGC0psQUdwjkp3UtkMi6SEQ80iWHLsFBROMym1H6tYo3R4gQkNrrplnbU08C6UfA0b6/yT3k6puGR3HxAEA7UxC87vgnz/sASEqFVtetQe1ze2idsqI7bZ0yX7/ud+7dGu5YcMGREREiF0IDdRzstagp7MaWwfWgUw5mXF6v8nNS0F8Vg5CsxvgXzCIOWUHMav8uEhl9P56wIgTk8a2ydC8gGldc8dmw9A7IFDtODwpHQE4rp5FQnUKfqVHMatwN+ZkdmJhXB6CIiIRERGENXEhyMuKEdefjPlAiQdBBo1h6cLWuMvtlpReguhVSo27GW2aXn8ElAw2imtRegnq3VSJ7o4yATJcC3r7aaxdK+CmujxNvAuV5cajNC0KpQnLUboqCKUrFqF06VyULZqJ8jmfoerz91D77muof/V5rHvhaTQ9/ShaHvs91v/+Xmy45w503H4zNv72BlH18kg+FHj88pfo/vVVaH7sIVTO/hSZiSG4lACkbAkuqA2GG4BMac7Gw8tm4dHlc8RpByXR9Cz4aNhc3L/wM3GDThs52tc94P+ZuDenYw85XzRNPAgyGjqDEv4xIRiTa9LxuxlT8MjyuaCnQbrbpRrsq0XxeCJ6Ee5bMFVs8qR//8/FlTqBCb0RMijhYxHzJU4TbfweXDJd4j3ReyHbsE8GOqTzkLfqMnD/gk/x0FJfUD33nXVZE0hAlp9XBcsw9mRsDWNOBlsZdtaJpEAYZyevzLTUeRl1bz8GbBg6hzmXnXunra2CpHkP020YbCOdMAw/vZ4x0CA9RWl0cgE3HgCi/XrHUomJAUEGUMi1jHcder1H++X53jOyXozUBWAokPDQmbE8gEDqHemOZz56DTovXY+x81PJiM5LgY2soz2ukzfra8aSe3SkT+xEKI1l1PNly5Zhz549Y/g3+/2lvJ6bT9Ry+72nedJrH9pOU23zbVKbhnn9+7a0dvtvDUDYOf++zSDjtbEHZb0uhLbVc23nTt3t9NxOdX5uWi1nWx1H6exzbWfTa7tvSt00ev5NdKw/39830X8TrX192hcZEtqJ9PT0ICcnR6KrL1myRNRggoNDkZlZiMbGHvT27nMiYp/xMP9jGVDDeJLZdDOc57Y7l1nVNt+W/vfE4noAACAASURBVPzMsBegaL/npuPPYezczW69zknrjBRk7HUSgPT3H0JgazMuaKJ/9uX4l9IQXJoTjMvyQvCrwjBcURyOK0sicGVJ5N/IESFz4tz+qdbM+UcVKzArIQn5+fVobd0iwSk1Erq5fgMeNW/WVcvMumudrhdTHpR4ULpWX9+JxMQ00E001QBpYJ6bmyvSADLFfE65k0/mcc+hUQSuO+MYH5+VoHYzUjbBNzIP/muqxYPS6rodYtws6laOapQyszSEfm6KHwLSGsTmgEwzjZ5jKodkV53SC+7mx9VsFeNh6uozT9BCoELgEle9FbFVw7L7HlXWLzFBrr39fjGgptF0RvtRjzSE4xKIUEWL+vs0VKcK0YqSPswrO+R4xzKxNEIrtqO6vgkDAwM4fPgQRo8fw4lD23CyNx5fld2Jv6T/E/6ScgH+knwB9qy9Bxsq49HTnI1tXWXYN1SPQyOtOLqzHcd2d+LY7i4c29ODo3v7cfTAVhw7tAvHDu/DsSMHcPwo+z6Ck6PHcPLEcZw8OYqTJ0/g6LFjaB0+ikV1p42HK2FqT2NGWg98IvMxL64Uy7LbxP1wUtNeR7LkqLSpZMmSdHC95BAw56izOZIOromsjUcS5bW7EQmVDQAt6YhIlxjfQoDIl2Ivkr7hCFbXb0dESS8WFwxhFgMdcu7VRrq0qmoLmtZvEG+ABBZ8pviec7//7HO+O/nM8UNPxx386PPjvyJsGcpLMtC7qQZb+9eJ+9GN64vRUJ2FkqIUZGYlID4lGWHpJQjIaceCwhHMLdmL2WWHxfjbr/KMAZ4SbNEEXDTqW+YZELUpSk84f1Gh0nK2dQ72QQlH+Sj8Sg5idsFOzMnpx/zUBixelYJl4aGIjAxGQlwoslIjJVp0fVUmOlqLMdBdjZGhRpF4qBtb9fKj3nzopYdl1DfnQcaQAIXAhKojB3Z3iPvUvTs2iAefHcMtGBlqEmDGOAcGnFRJoLZNbSVg7AWCHqqv0cVqQ/Va1FVmoqY8DVXFyajMT0B5ThzKsmJQkh6N4tSVKEqOQEFiGPLilmFtxCKsv/sOjySk/6KLsOnaq9Hw0H3I9f0I2fEhKM6JQ1ZRIi4rCDQSkHEAyNsNmXhk+Rxxn3vntHfEyQddldMdLt2f043undPfES+Bk3KiJbggYy1d88LjEhCWMT9um/qmSCvunPGuAI3HVsyTuEqse39DIe6e9aEADHonvOmdFyUALD0TMj7T7Z8a2mcSg/FcSih+9cg9HkcidI/+YtZK0MU6AxUyVtOUlhxc98qTEhOKzkYY4JAAie5/x1XBYiDC89iAKAOs9huGuVc7BTLRhnGWdh5Jg5fJVoZZpA4eJtwr+TB0RiXLHkNBhvTvqGgZwGBoDYPeJ7+n8ND5eObph7Ey3N+jzqX9GqDg9G8DBWeu3nEsIOB4yeIz7b2uPnGnO8v3IwlIaECGI4lxANRYQKMqZCqNcNZMxrXAB9dPQY4D3rxjGpoxc1Sw5qzl2PvhXXcaqXesL8fixYtEFbmlpcWjeqV820Qp32Nap3n7Pad1TO1y5R3tencbrbPpWGb/aZvzpUrvphMAopXjdUACHXC8+vPR2u21H5adj8Y9QaXT9Hz0Sqvj6rnS6rhMNe/uT9vafWje3dY+Vzod06ax2403rtKynZ3XPnS+Nq3WTURj96P0No3S8SPNoIY0zkxJSXE+wv6gG9+YmETQe1Z7+xaJKUKG2zCWRiJgJANept5bZ5hPPVeVHdPey7B6z3UH3WZqzRjK+JPp1fG8qQ0WFAzZfZl5KK2m3nnpXLxjGeZa+/CmHNOuYx9k0hmvInhDq3wU+GE49zA7VueWj9dWywwwOD+N3a+dd/oQY0WWT9SXl+bHlRGYn5qB4uIWtLVtGwNA3Ndsn9t5roeeM091vp6ePRIYk+6gqWZFnXtKPIqKioQB5y69PpN8RrlDf+LkFyjtPTnG+Ncndztm0B4hvkyCuCXUE3wcFVUgMqnKzKoqFYMH/vSiy/HWzFAwtgJ385mfHpqGt2aGGBex5QN49JWPsCSlDmS0X/xwrgSEI8M8N6YYb/oE403fYDz0wrsSwZoxQK666U4JEPfq50vwxoylIg0wkhSjZsR5cD5UUSJA4TxDSoYwu5JxS4yR8NzKUSSVtoEfmJGhbhzqzcHJ+jfxdfp/B1IukEMASMoFOJ72MxzMuA5Hs2/Aibwb8UXhrThdfCdOl96D06X341T573Gq4lF8UfVHnKx9BSca3sFo4ycYbfbF8fULcKwjBEe74nC0LxNHhkpxcGsTeoZGJFiiT9kX8K08Y2w+1g7DJyIX8+NLEZK9XtTIqP5EdSuPZMky3CegUxUs4y3sKxQM/MlIRwbM/SCYW5a9XuJi0FCf9TS6pstjgjSqddF+Rj2Ncd3ZF8Ef2/JcwIujmkXpS8bGY6IKFlXaB/8iY2ej0gNKl9Iq2tG5aZNIl77JHkRetNa3jcCXkqnExERwM2b+/PmIiQ6XQGD93XXC0G/pq5egaGS0yewzwBqDsyWlxmNVWibC0/IRnF6JgMxGLMruwIK8Acwr2Ia5BTswu2g3Zhfvx6ySQ5hVdlSkJ0xnlxzErKK9mF24C7MLRjAnbwvmZfdgfsZ6+CdXY3FiIYLiMxC6KhYRkcESjyA1cTlyMqNRUpCI+qoMYfwZL2FLb50wW5R4GOAxNqK4MncmNQyXMn5eYGKCCHqBSY8HmLBPDzjZ2S7ucWlwS/Uu7mozjgODyQ33N0hUakpgaG+yeWOFSFE2bSgFgVx7S5FIlmh/0lyfI2tZG7bQuAa+5GJ03XAt6p5+FLk+HyJ1VZCscWVpitiqlNRl4fKyEFxQHWRsQFwqWIyVRI+AlFYwQjnVqC6651ZRyyLwoOvc+xjfI2IBXi9fg+teflJc6jICOhl+Si4o8aA61B2fThaJCJ18sN3jEQtEBeueWR9CAEZqmEhXGAiW9XTby9gfr5etAV2gPxUTgF/ccZPEcLrjs7fESyFjPzFeCB2NMNL6R12luOqPDwlA+p3PexKElnN7cPG0vwqAGEbbMMm6K6/SEMM4O3XWrrwyxfpceFKRaHh3+nVXX/slo236NKmUq72EMOresficsS0DVjL2zkW//BkY0G8MvTL29tw8IMgaQ6UICgaO9IkaIZ8l9q9gh3E5GNRwcHOtsU9xAxpXP57r1vHl+pzrd67VtNHr8q6Njqlro2pf2qe9xroW5tqNy97ujdUIW75MvpP8PvL9NRG/p7ybvsPcqX5Tx0u1T5vG7s9No+3d5Ta95t1txjt3t72AA0w0CDvQP82P16mbXmnGS8ejd5eRTsez+7Dbucccr/14tOPRab92ezuv9ZqO14fd3s4rjZ266e327vz56LTOTaPnrHePpTRM9Y95AhF6k1GpyIoVK+RDzN3q8PAoJCdnoaamHT09uzE87FbR8jKfXibUzbC7z8fS2Ays9vFtUzc4UMBj6N3jmHmcS6MgZOw82Ye3rQ2QDAAZGjqGqI0b8cOmGHy/MRrfa4jCv9SvxL/UR/59HA0rcU1VLEJyilBd3YnOzp2iKqUSEO/1m3XkWox3r1jGg97VNm/ei7KyRsTErEFAQJAHeBQWFmJoaEhsGeznkHnuRHPXeufBUUS0OC53ZXf+DKbH1YpNQnBmkwQHZLA3MrFjwYfZiScgIOi45OqbZCefDO4jL3+AjxbHIbPjGF6fFohXP1ssxtN3PzYJM8IyxSj6lan+eP792aDq0D2PT4JveDZmrczH7Q89K2CCzPQlv75R3M4y7sWVN94h0hOPipcyzA4IEUnIhiOIrd6GRaV7vV6cqr5ESFEfSouy0V/wMQ6lX4kvk/4L/pzsgI9kI/1QMAKWO3We1AEqbEOwYpcLeEn9z/hz2j/h68z/ia/W/gRncy/Fl/lXY7TkQWRXNWB63l5MLzoOn/JT8CnYh+lRZaJ2xejvRkp0SNTJCBhEimGBD9pqhBduQnhhl8T1oPtc5tWVLh0CcP0DUurxymeLsZIxPwo6QQ9ldNu7MLFSooXPiy0RIMjYIXTpy8B9jB+yKKkGSc175T54QIgCu81nkNl5HGua9iC8dAALSvfLuqoUYVnZCKrrG0UVi+pqfJ7UPsb9vPHc/cd35d69e8UWKSoqSlQEaaS+On4lmhoKsW2gSRh8GquTqWEk5vbmIlFJqi5LRUlhAnKzYpCRFo2klFjEJydiVVISIhOTEJ6QjLCEVIQmpCEkIQPLEjIRsjodIfEpCIlLQmhcIsLiVyM8Pg4r46KxKjYcq2NDJChbRnKE9FucvxpVpSlg4DiqQ9GQfLCnRjx40aaDgQApwaA047gTiZzMjs3wGKZnLDOn9drWThWYMDUSEyM14RhGatIlYwo42duJg3s2GunJzg6RwNDehGowVO/aOdwiKl4CVAYbZTd8cHMdepoL0PPGS+i/6TfY8NIfUT7vc2THBgvwYMA8SlQ615di88ZK1LcV44oaxwtWw7lesJ6MXiQSDLozv+3jN8T+42e3XI8noheLxOG1kgTcPfN93Db1DZGC3DPrAwkK+5u3nhfQ8XZ9hrg9p6TkntkfiQTkvnmfiDrV9a88JR4HGQH9iZUL8eiKeTLGm5XJuG7Sk2BKAPJSdjSue/kpASCXPXw33lm3Fr/zmYKnYgMlUCFjNRH8UHryXFqYCVhYmSyAiTGiJAjivE/EduQcG5BvkIAow+tNLSZZGWsFB8rcO7v8+hworQIOllM6tmd7m0jFCE73jLRJsEiWqwoR1fmowkdQeuxgz5jnjs/Kzq2tHpobf3ONABCCmIN7OoWGgSf1OaO9Ep8bPlf6zHIeh/d2grF72BejnWsZAw5+/unbGO6rN6qFezpFOtdYZ4Io6jXxGaZUj/Pk9fCcfYweNnE7WKbqiZT68fmldELBlvbjARpcS11Hz/o6vy+p80pU5DosIGMkJX3Y0teI+LiVYovGTWDyYcq3aep+f7nfXTzXdxrTiei0H5vepvtraG1697juc3tc5s+rgqWT/f/TsbYU/5HXgw8MdfBppFlbWyveYqiaIB5jAoIQFhYhhutU0eIO99ato2IvogzoWNBgM/92Xpl8LdOU5d8mr/SakkbpNDV1EzHL47dXWk1tZptlKlExKT05bdt2Eh19e5HX0Ye0xnYkVDUitqwWq0qqEV1c9bd9lFTLXFOqmlBV34329hFRKdu69QTUM5pZP+91mzXwri3rad+xZcsR8aqWm1uBFStWYsmSAFHpU4kHDa9pt+B+IfG3xDIyinzuWraegH+diUdBxtI3dyd8I3KFMY0s6REPSdxFF2Nzl2qP2nVwF/3y629DTPWwGD/fcOdDwvSyntKN+59+Xdyj/v65twWAsJyRqQWADAGPvfoR3pmzAp8GJeHx16aCdgwEINfceq941CKDffn1txqVrXHUiCgJ4fw4T7qXDasYwZxKGi8bVZv5JfuQunIqNi6/EHuj/hGnVv8Dvk5ygIcFOIwq1j/gL8nOkfIP+It9sNwBIVTZMu3HAhIBKQ6AOZb8fcxdU4UP0rbis/yDmFF0BNNXN8FvZb6sT3R5vxjSe9bXUaEiEOAaUcLE/GfByRLl3CcsEx8sXIUFCRWyjjx/yy9UYrLQPfKkTxaKHclnS5PweXCyuNedFpIqnsuYMkr6p0FrJIDkzPAczIzIBaVLdNWrNjsKQuhRS6RLvQaE0OtYaOkw5tBVrmMvQU9VaSWN6OjoELUq2thQqjbeM3e+dzglc3SGUFJSIqqCtFUKDFyChNVRaKovxHB/I2gzQSaMTMrW/gZR0+rZWCG2EpSQNHFnvzoTtJOoKElGaWEiivMSUJQbj4LsOORnxyJ/baykBdmxKMyJB6NPlxauQUVxMqrLU0UyQOabkgJGmibgYTRqjqcMFKNMM7I7I1WTMSRDRSaOTJUehmnSXVsyR05eGSeLKVIGS9VTtA936gYmHFcPlZ4wNSClG0f2d5lj3yZhEClJIcNKRnNHZQa2+HyEzjUrsK5wDapLU1BXmY6WhlxxBsDo2NuHmuWaW7urcNW6lSYOyLrlcAcifLcpW9SsqOL0Sn6sGJ9TBYtG4AwQSNsLAor7/T/DCxkReK81D6+XJYq61HstOeI1cHJ1Kl7MihDbDdpiPJ++QgDDs8kheLU4Hs+lLhdbELpDf3FtJN5rzcUL6SvAsalmRWNzSk1oL8KAhRyDxucc77XSBDybEor31puyFzMjZRy2ocoWVcVoD/Jy3ip8TOP0cQMRLj9PIEKvmpHnvsv91XtumGNhqMe573r/VVVLz+lFLWjxTAQHzhIQ7D/vM7zz1kvyrJJB7+mswIrQ+Zjl+yHmzvoEhblxIi3jc0OJXFbaSnzy4Zvw8/lApHc33XitkYAc6RM7otkzP0LgIl/5TRGoxq8KwrRP3xHgrc8rnUMwKrn//GkyBvMEIvzd3Hbrb/Dbm67F4oXTUFORBkrMqH61NMBPnhvOg2CGGwVL/GdgzsyPsGj+NJQWJMrvhvYo6cnhmPH5FHGdS7XGGZ+9i5kzPhBHDHyWZS1EwuP9LQn4cn5PHEPXi6n5DY0tG1PvGJ0nro6WuFe0ReMGyHd9X53vXfa3XieBCBWVnG+yXBRdGLZnW/vvfLRap/RKq+Xan567U5tO63Rsnk9Er3SaTkQzHr3S6Hg6jvZhp9qGNEpn591t3efj0bNM+7Lba15p7NQeU8vt9nZe65nqOJradbQV2bx5M8rLy7F69WrQawzByMKFNCCOQlZWkURbZ5BDghHq/Bsw4mVSzS66MrGGqfe2YbmRMnjLFFgoAPACAmWAta1JTR/2OGPLld6egz0G89pGx3TX2+emH3oMI6NO1bTh4VEMDh5BX99B9Pbux+bN+/4uDs61r++QzJ33b2TkCw/4UDBpJEBj14egg2pW3d27UF3dhpSUtQgICJQXaWBgoKiy1NTUCJB170Lrc2Y/j5TAHT1+AjldRv1K9OMrz2BGQgvmxhQhJGcDaHRO70ienXmHMRaDZzFupqEzRI2HACSqvF8Y5tsfegYL4stlN396aDoemfS+tHlk0geYGpQoakYvfTxf7EbI8JJ5fvDZyaK2xR16lgVnteDa2+5H4eDXSG09KACE4EJBj6Scg8OkU3LAeWZtPI64+p1YVKm2IDQ2/hLhaaUoS5mPrrQXsTvlepxO/u/4Ovk/ewAFgcOfk/8T9iX8FLsSL8OepCuwP+UKHEi9HIdSf4VDqZficOrFOJp6IY6n/gQnU76P0yn/A1+m/J84m/JP+Crl/8CfUv4Rf0r5L/g65T/jz8n/G3Yn/gxvhtfj3cQ+TM3Zi2kZg/CJyId/YiUI7mgnw4B66m5YmX+9Jqa8ToIGukCevjxdgFpo7gbxSEYQNzMiB3NWFWFOTJEAkJDsNrw/P0oOGufPjiqQtfx4SbxItXg/qAZHUDNlbiTenRsp90MBj1lfA3wUhBBgUgUvrnYES8opXXKcFVR/hZCiAVTX1IikjXE/CCbs581+t+m7j/XaRuv5bPK53b59O0pLSxEZGem89xaCHrNqq3IxsLkB3LUlI32QNhM7252d4RZsp4epQaOONNRbL+5vadTe11Ulrn57OyvhOTZViapSX5dxk0sbi+G+BqFn8D3u+MruszMWd4HJ0FPdZDzQQYbNwwzZDJPmyXg6x1jVEGVOza653YcwVGSw5HD05VWFxQI7pLEPG6jo7rYNVI7s78aBra3YOdgoa0QQR3Wyvk3VospFBnP/rnZhZgm22vtqcdW3jITOmBxvN2Th2klPSlwluralN6y7Z30gsZLoeUoCBVoxQKiGRfe4npgfdIfruMQ1MTy8wQS9bQyNuND1xByxXfE6eSewIfvxBCi04oJ46DV+yHhueL+FBETW39qNl/toScJ4z3VH3zDQzv10Px/KRB/pFwlW6NI5+OlPfohJLzyBoCW+IIhYtGCaSDVeeO5xPP3kQ8hbGyPg4PrrrhK7JN7z1DUrcPFFPxdmnqDhgymv4Bc//1cHgPSLJO+lF57ANVdfLpJFgtOivNX4xc9/IiBEn70PpryGB++/EwTsMVEBuOrKy8TpAtUR77jtRtx91y1IjAsWFb++rmpMevFJ3HD9rwXEcg3oze3WW24QkFRVmooFcz/DdddeJUCfkhzaL1126UW4+abrsMR/OuKjA/HYI/fjjttvEtsnmYf+trhWLgAn6+xZd+8a87fmaevkuS5bB5uxJnGVeLyixJV2gfr+Yco/Pbe/lVqmqf3+0veYu72e233a9My7abWtO9W22l7PmU70xzo9bLpzJCA6mDYeL9VBWOf+c7fXwTR10+i5m07PbTotc4/Jc63T9NvSaV9K5061H22nY7nPlU7b6znT8/3Z7SbKT0Tvbj/e2Dq+ptqXm3a8c7stP+RE511dXbIzyB+MiSrsj6CgYMTEJGDt2mLU128UFZzh4WPCmCsT+82pl8G12xJI8FxTu07z3jrtQ1MvaDh/H972qm6lAEaBybljeEETgYiCEQISgrC/r4NzPj1GkmWud+y6sIxgi3YvbW2DKCysxqpVqx3D8kUIDQ1Feno62traZAeZu8/6XPHZtA8t5zPGcrbdc+g44tu+MIa5NNItOQafmErDIBdT+rFPJBdUA1LmWI2gFYTwnGpXN971CGatzBMpxLtzwvH69ECJkP7MuzMxLSRN6CfPDMVTb36GgNR6PPDMZDz+6ifCgH+wMBoPv/Qe3vAJxof+MSLpIFN95W/uEIlG2vpDuO72B8Dgh5yHYZYtL1uOOhZtGejJifMOrmJ0bZXsfIXAzFasXbsW1RXFaKtMQG/hVOzNugOn0r6PP6X8VzFC/3PyP6A19g/IiZ+H4tRAVK0NQUP+cqwrWI7GglA05S9Dc34QWvMXYX3uXLTlzER79jR0rv0Q3Vlvoj/zOWzN+AN2pt+JvanXoyHuIbwW3ogpBCBrd+PzuHWyRgRXBHdUg8rtPWPUrpxrEADgAXfGLmNqYKIAEKpeSQDIikFRb6OKFY35aYBPtSoCG0pVVuR3ynl0+YAYtzOlXc7yvA4QoHBsSqb8InLk3tB2hvdTpSCee8s5OZIQuuql2+OIim2YVUWjbwNCFpQdQm5ROTZt2iRueWnbQSBhP2+at1P3u5PPpdZTcjc8PCze2lauXCl62ksWL0LMqgiUFmWgv7tePPoYT1NUSTK7/GPVkYzNBIGEHCNUTfIeVPWgaggBzf6dHaLGRNUUxhPQfilNsAEHmRhhNpW5VIbI3ol1ysgAKSOqjBRTcyhoGGvYazNOHubKZqZIr/06+vsqOTE76F71H6W358v5c/ecKi9UH6OaFoGbqvEQcHDXmtfNg4zpxoF6/Lp5lcfWziMByYwUGwxxv9tTIfYTZPQplXi1ME7sMAguKFWgZIJ2GwJAJD6SE/yPwMA5F5BgnbPcPux6LdcyO2VeD7tvbaNl7lTqe6vEXe976/Nx3bo4/CcnCCHt+s4XCV0AqOfe6n03ki8FnHqfhDHmPVRVIuvZ8TDNTl+Uwv3jP/5XLJz3udyvqrJUAYtUB/zZT38soID3jBI7SiM+n/q2gPIpb0/CLTdfL1JD3v/OtlJ871/+bwNAjtIGYrNIVhSAsA2f+WuvucIDQAjyOUbgYl+R8BGUR4QtELfRVPkiAKK9B58jedaO9GP5srkeAMLfzfTP3sXdd90q3u3YZve29bj7d7dgyjsvG5qj/bj3ntvwwH13iioX26StWYEf/+j74tzB80zr2iog13MntdeN6+r97Zl7wed+qK8RKcmx4vSC7xSqvn8baa39XtL3E9Px3l/Kv7lTm07zNr27vZ5rW02VhqnmWcc/O9X27pQ0IgGxK5TYLmPeHkQnZKfu9t/m3KafaFx3PzYN6/TP3Y7n9qJovXuciejdtNpOU+1vonS89WJb/k1EY5fb4+uY7pTt7XY2vZ1XOpbp30S0dn/a1k5Zzw/ygQMHRNUhKytLDItNkMNFCApaKio4qanZqKlpkx1yMqxeA3ZbCqGSC029TL1KRQxw8EoovEDAtPXWG7DBeqU1qZ57xzBSFMNca1sFM+6U9Vpm5+0yMyb7//s/vNflvRaCqy1bjsq9ZODAuLhk0FsagwfSYJdqVvSmtmXLFoyOjoo9kf38aZ7Pjh76LOpzSIA7vPcYwpqMdyMGWvPN34OZq8oQlL4OjJCdtuEIcnu/9Bqdi8TBYfyd3Xnu0lMFilIMAg/GkaDKFMEId93nx5dJxG62oxrPlHkRYmTOmBfvzY8ShviZd3xEJYvg486HnxdVLL+VeXh9epCAFTK/ZMIZK4NqQeJaVqUx4m7W7NhzHpwvPThF1O6TWCDiqrX6LBbm9CEzM1PUHAnY2lqb0FKbh/aSZdie/ShOpP8cZ1P/G/YlX4r8jBgUFxdL2+bmZrS2tmL9+vVyME+DdgZvbGxslFg/DfV1qK+tRH11Ceor81BbmoHy/ET4RudicsxGfJA6jE/X7sK08DwDJIq6vapX/V95wN0YYOVcHyU7c6ILJS4K8wRZsgZDf5E8JURFQ3+WPCUprGMblks7B0CIIXv/V0baMvi1OAIgEKKXM7pVHjO2AiD1rjX4Z5GSUBpGb2g0QDfr+pWoZK3OrQHXiWqkVMOidE2fQb7LNG+n+t7T1H7nKQ3fe/SYxcCulIgYz4GLxHg0M321eLHZvW2DMNRkrGmHQcZHD0osRCXJ8TjFXVc9hMmmypLVXiUFTMm0uAGHYbTOVfFQJlOAhoISh8FUhkiAhwIXMk7SzulLQYu1U27qDRPr0XlXZtXDwDoSFFVJcfq15yNz8jBqfXJNXBfxviU2JA7oEglPt0eljGtAMNI5SAASMwaA3Be3CC+kh2NydRreb8vHh5tKJbgrA7z+PR8MKPvhphLjKas+1gIgYRMCEAMoHWDJ++i5R957q7Ydck+1ngBSnwG9/za4PNIvkoT/8X/9M1LXhHmAL/ug6tL33GaAQgAAIABJREFUv/8v8Jk2BbHRgQhZOlukBq9MelrUBR975D68/soz8uxzfvRWRUkDjdA5Ju8taRSAsE/+Lq675koPAKFUjKCFwTVJw98Cnwf2RVuRMQDEef7obUslIJQYPvrwfeCcCPzlOTzSh5dfekqkIvJbOtqPB+67Ay+/+KSZ65E+FOcn4Cc/+ZEE89TfgFknY9vhWW9nzdhGnnd5xrnmZt1JwzF4rZScxsYY19/Ll/8/7L0HdB7ZdSY4e3bmnPHszPEZ73gdxvbx7sx63Wp1K3RudVDO0ZbUsmRlqaVuy5KsZIVmaOYcm5kgQQAECIAgQBIgkUkEZhJgzqGZc25mAHfPd6u++u9/+eoHWmxNW7bqnOJ979X33fvqVeHnvfXSRNmxY4cGH/b3CGn+7vC3yJZ5LH+3KMkJSc9FHjxyacdyWdYb13JsOsRDmfaAAJjLAK7ZClrFNk0j9kZylVku62DtkEv7zBPr8yG7IS7tkt9XSR7tM09+mn1cTzvI9dLrIt/qIsdjWU5JrpW8lksSD4w/UAa7ODFpCg4RlrLETtYYpoVVj4YMGSIjR47RPUYWL26Q9vatsmXLEZ3EjoAEw3hsQEHn10s69lF59ld5Yq0em+Z1KxmY2DKkLS9KM0iBzKQtzpYzTb3ARWcmOIrs2HyECdtn8JOxTd0ZPK9ZnUhn9LJe9p7It/Ujh8OrEDhu3LhfdysvLCyT0aPH6TNl0FFaWqqOHoJRvAc8cr1TfG/4TkHiCzW+VO86ekFGtV5XZ1IDkEWHpN+saGUmfFXH7tvYrA5fwaP9JKIhQToR3AQg+tVeA4G4dwKOazyHITOcCF/zo0BBhxYhva9HxpStlk999Qe6n8f8tad0GBEmTCe8ZC5EhFe9SVmkU/WhDHNBdt3Wes9sPSkDsbkcNpZruiUvLz4kaD8EDtiZGzuk79mzR1diWrtmtaxtKpU9NT+V01Ufld3LX5IN61fLzp07FYdV6+Bc40Qa82swVAg6Dhw4oF/rMdkf+vCfG3oDalvWyz/N3yHfLzkkP1p0Un5afkB++cpCGVXSJrMb92kPBoY2IahIej20fbgxI9oqOpPrJjCI2ie+bp8F01nYzO7zmWcVP0sbVCon07NELHqdEMygdwnPaEzD6Wgzxabb0q/hmkyuXCONjY3aDgiI7ZdFvKO9vZ/23eQ7bXl418+dO6fvPp4hev4wLHXw4MG6clb98nLZsqlJjr+6UR0pOB1wPngymPCS17Nl5BiiTJ0dOomxg5OUqwOZwWScoNgZjZ3+yGGKh1hpUGKc1SQwiJwldaa03sDEus2wHAYzDDBYbzph1tG1GDptwKMN0D4asMXBF/I4cY06kcZX8c49rZkApH2i/H7dWHnP7CHyt/MnyJdr58k3W0vl+TWL5Lvrq37rz++sr9J7+caKEnmweYb8b+wBaZ8o3549RCrLZsrGtdXJxO6ovTJBhzrKxjGOhs/xHYGMhwmp05xxqPlO0dHme7d5Q638yZ/8XzqHIxPY7NKNL//4j/9Q50/MnDpCeC5bnK89CR/7yHvlhef/PlqhKn5P3/bAfXEAEjnlGN6VBCAXdt3VA4Jelj/6o/+mwY69L7xXPgCJeip2SVYAcnKLDt/61te/oPOOcE+4z29+/QuCCfF431CGAOTrX/mcBjbIY6W5v/jzP9UluJO/Af4tZv1N8e/ILCGM60kwt1POHN8sG9bUysSJY3WBi9mzZ9817Aq/Pfh94W+U/f3xv0HEhCSx5APDI4SHTdq1OKYpQ1xbBpy1RR7LaYOcu+aAWKCtFAieTCwllRLr8cwDHzrIJ455K3PxaNfifTrER1lfucD5w9tgnvdB/ZbLNCRwxJLLcpv3eizG8m3a19XqsHxrh2nLRVlvXHwhxIZpGKaFr4SFhYUyfvx4XdcawciYMeN1yM6CBVVSV7dK1q3bLbt2ndKND9lDEjnGkQMdOduRg51xmKNAIdNzAUc70xuS4RNnHfEMjo53RtLZt7ZtmU+zXtn6ozpH2FCPib0epaEnoytTZu3xui2z6V/nejYHvRxYwWr37rOyfv1e3ZSyoKBM5/lgJTQ4WAgssUxpS0uLOrcYY8/3wr8rfIfS3m3yIBmA7Dh8Xka0xCtgoQek4oD0n71ch/lgKVcsC8uv7Vxy1zvDtpxpBiuZIT0pzu5e0RWvvj90tvx8Yqk66D8dO18DCNghX4d/cWPD2MFWGxrERPNAFB87yqh3XtuprABk4NKjOlxNl+M9dEjOnj2rgTy+sCOIQLCBwGHHxkbZv2mx7N+7S7++Y3Un7HOBuVk84WTjRDk24sMJHHQiOESQ0rhpv/y86rj8ZPFZ+VnNJflZyXadj4EhUOgJ0rbFLvJ7upMAhO0W3UscrMWBhL2mz8AEGmx3toFeJ8/gcvEibuY5adub4W7oUcHSvOhdmtR0Ig7sML/mhoyr7NAeIwR2dh4I31H7bjLtJbEhCSzfa+jH8AlMWJ8xY4b+1mHS+sQJY6Vkfp40N1TqEK3T2LfltFkZiI6Jd+yNU0NHUB1A7/gb50adqZCexEYmCNFAgDbMF261ETuiGXsRT+tBRzbm+jLope7kWmKfTq/RFzuAwOKE88yTZaonxt0VgLRHy4j/x6Zx8j8WjpAHqsbJO+umySMrZsljrXPk8ba5/ypO3MvDTTPlj1e8ovtM6dLs7ZOiAKT87gCEzy4JEOL2oyPs35Uon/Z+RM+GXCyj/Cd/HAcg+j5GPMytwPCohSXT9FkCjx4MvO9HD6yVL33hU/KZT31IeyxgDwsPAI+hT8jj2U6dNETect//K5i7gTLsv3P/W/5Kfvaj72oeQ/P+8L/9gUwcOyAJDqKhedGKXJhD8rUvf1ZX6Yrqu1MmT3g57gFZrj0a3/r6c/K3n/6I7mcDG6jjpz/xQfn0Jz8Y1fvCLvng+5+Wr4UCkMYyrQffSfBxat6/59o2mYAd93f80AapW1Ym48djH6xhuuUBPhjZ3ln+Bln/DWX+4O8P8SHpOTZPvrdDPR6LPK5REmcldYW4uXjJHBBPtMp9mliW0wAly9Mk+V4CzxuhpA7myUG5PYjLJS3ep3PxvK3/ldxctnGN7ZKr/r6+yOfC81qIxzJiIH0dkMcXbThC+CODw4pgBF8KMVQLE9hHjBgl48ZNlKlTZ0pp6WKdyNzZ+ars2XM2Xlmrb70j2Q59tlOdfc067OnpKGiIghoGAwwkeM2XRz0Hkc5sbHZZNs/asIGR5djghnjcY3SftAW9Nh2qj8UAi8AL81T27bsou3ad1hWsqqubZfbsAhk/fpIOr4IjhXk+WJ0DS+jCKcaXXz+x174T/t3Cu8DTvjNIE4sABMHr9lfPyciWa1k9IAPm1Ouu54XtxwUbBiYBCAIA/SKf/YU8mhsSf7Wn0xsHC3Ca6RxHAUPcY6HlcW/JXtF5IAh4sNzrom3XMvM8qAdzEZimDROI6LwQzgPZdUeHFOWtOmMCkNsycPEhDUAwhAoBAgIIDBfC+u/4u0HggGAevRvo6UAeAQbaCe2Pr/r2RBlOLD2LE39/wEIfntnK7SelX+1l+XntVV1+9+fFWwQ9O5MWd0b3ufVa0rZJwBAPZ1uo7X5b25u9Owkmvv8k7wMMe32vaFugpyULr+0ZB27gsy1DuuLr0Upjt6Rs0yWZtiIKQHSvlcabMqpyq/bGIjDgcrz8jeI7599FnydOX1L3j8fi/cUKbqdOYcPNFu0FjjY1HCqYK8JgZG17jRzat0a/hEabpsVfq80X1ciRz3w5VSdHg42ME6+OvgYP/NqdcXYSB0l18gt3PHmcTpMGC7GN2HbUW2Emmce9I9HXZuh39lkn8OGA6UlMFIyoTlOeZYNOWhwEod5ZJ+3pV/ooQLmrB2TVJHXK//fWCfLvWyfKf2ibFJ3tk+Q/tE/+V3JG96TzP9gDsmqSPD9niE74xsaTcPLhTMPRjZ5T9ByjHozo+SS9GYmzHPWsRc56lI6eV/wO6POPnifeNzj7mLj9f/7Bf9UeD8zHQZADvdgD5uF3PqCTy7HHB4KHpZVzZMvGOg1Chg36qfzP//mXsrqlUhcWwBCuP/2TP9LhUJjvdOXcTiktniJ/8Rf/XSeYg1+UP0GDFPScwBZ6KN7z7BM60R2rzB3Y1ar78HSuq9F5Ji9858vy8Y++V/ZsWyFnjnUqZ9CAH+tE9ZX1pVqP/Nlj5ZGH36Y8DMNa2VgmmCz/ysRBugwv5l898fg75bnPfVyXBcb7yB4Q7PeDgCoKOuLeovhvjD0uUVASB+Lxe4+evYN7VuvHCPwWYGQI5kni9xy/G/xdorS/Le5nJ8kSE+LwWgIOJIhJ4wcoSdG9cr3NZAgWLEA5JYD+pHHWhnnyyEXeG/JY5qnLc8mnDOE9l1hKcigt3qeJgSSfEmW5DnIt3qZDXOrsjUscdNg087Bjbdm0x9t60G4uafE+bXneZsguyvBFtrOzU1eVwXrXmHwFBzdaVWuIDu+ZPHmKzJ9foRsgYqnfjo6D2kty8OBlnUdiJ0tHDn16MHEv161DzzSc9mxH3gYD0TXaJDbCRzjqyWAydec18pDnmcHTfrbM2MjoywQpWCYXq3Vd01Wrdu8+Ixs27NedyTGJHJsEomcKPVQIOBAgIuAoKSmRpqYm7emAc8XnnfaseZ3vJfLEUhJDSSwDkN1Hzsr4ttfiAOS2/GrJcRmQ3yyTqjZJQfuxuwMQGzgkgYANDqLN7bCpHuYLJMEJnVztsYgDGRPQIEjBvAWstKS7o+PLO4OdOLCIgoy4VyS5Fgc+cR4YBExw4GeuOi8Dmm4kcxUGVezQIVgbNmzQng20MQMKBBAIHhCQsJcD11GOL2ZoL7YhJdrYn8AhKEFw07r7nAZAv2q4Kb9qvCm/LNmq8z+mVG+NJvdjX5XdXRpwzWs9ohv+YTJ/XvNB+dYvx+ncGOxtgsAMbYnnUbT6pPZAoAcFQQXukz1VCDAwzwNBHHYwx6aDGEKH5Y2xIhl0FK4+qfNz0NbF68+qTgxZK910Ua9h3xasnFWx+Wq0+ti6szrBHdcxzwT6oXdGy6lkaBsmo49YtEXnJKEnFsEX2g1tw4NtZqV/R4nlO+rzubh4jggasQpccXGxYE+lzJDU4TIvf4Y01VXI5g2Ncmgv9jPAErrRkCN16qzzFwcI/KINJ5E9DXAe6bQzAEBeHSE6R5BxzwZ5kQQudjDhSDIoYWARByt0rujAqoydWFunJFBKeJmv6ryWsWuuZdXN1Ce+D94fe0A272mVt63BHJDMRqp937CVG72GZLxpK4Iaf8LxN87/Xdc9Pitv62nT3k6ua3djX5w3QleKwoRuG4Do+5G0XRxk6Jd6FyTG7xHfCTxnpqN3KHpvome3U7Z11MmPf/hteeyRt8s/vvg1DRR4Dc8Gy0z/zac/LOiJ+N6LX5UXvvP3gqV7oQsbUmIi+sc/8j4ditXvF9+XH3zvG4rH/BHM/8HmlVim9yMffrf8wwtf0RWqnv/m38nffuYjsnxJvi4zjT09sNLWpz75QV0C+PlvfVHWtlVpj0hF6QwdPoXlfLHENQKHv3vuk/LsM4/r6lv7drZqUIFVu772lc/KkEE/1eFXP/rht7RnBhPZC+aMk2efflxXvpo/b1Jkc8VC+cTH3i8//Mdv6CakuGf+3fG9zmo7betoc8EjB9drDyg+QAwdOkTnjWHILX6P+PvB32zmrbS/OTZtMf53i9cs3qeJsdLq8XibB8dirQ6kedi0LSOXMukB8Yr+JeZxI29Gvd4su/d6r/dS798kF44Re0c6Ojp0uBYcXji+CEj4nzVW15o6dZbk5xdLWdlSDUpaWzdLR8chXYnpwIFL6lQjKKHjHjnekcMfOebWKc/0FNDhz3beI8eeDr/lZ/RbfRk7EYf8jCSPkrqtfdtjwethSb1RDwYwkd6oHGkGGxhOhQ0jEXBs2nRAe5gqK+ukoKA02acj6okaoUOrZs6cKZWVlTonAfMHMLSEPxL3+h72xmcAcvD4WZmx+nKya/ivll+Q/gWtMiEeJlTeeSX5Sq8BgA6Lcr0dNljYJ+ogf/uX4+WfRuarw5psHMgv8/zKHkvtJdkn6thitawv/eDlqAdEgwoGGAxIzNd6BkBGMgBBvSe3XZR+TVgFK9oRfVjJal0FC3tWYM8JrtaENkd7INCAI8ueDaQZfPT1uQAHHoKYdXvPyqDmG7rCmO6vsnCPDMmvl6nxBHQMZ0IQ8U8j58pL0yp1J3hsDPjTcfPl7U++X57vN1HyW47oamLfeWmCvDhwinzjZyNlyNw6wXLGs5v2a7Dy+Rd+qft7YP7LsIImXVr3e4OmafuPLVsj2JPli98fqD0v2BwSwQ2CESzbCy6CxdGlq7TdsQgA9mYZkt8geU371TYm/3/9ZyN1yBiCFQQgU1vPZIZgNdyQkeUbpKKiQoeC4qMH27a395DX36jfPv7OYXlNBPMFBQXaC4w5VAj2x44ZJXPnTJfqxSWypn2Z7NvVLhiqhaVpox4SG0zAaY/z6jxmnHh10nktdujpKKmTFDukUSCS0XFXnnr5lTxxZA0nDjDUSWUgomVxbwt1MBCJgyTY0jqZ8qhucbnWO+oFIc46xFEAskW27WuTJ9fmyX9pnyz/R+sk+U8tE+U/tUyQ31v5b+PEveKef7pgvC4ri2WLdeO/03EPiHt2UVvGwUTyDDLBRtY7wGelzzB65voMzu8SLAfd1rxQl6xtaSyTjrU1Wb1e6KHA8CnM+8AeHAgooh4ZOOw7db+cWuxxs6xIl5XesblRdXWsq9FFGlAP2MD1BmD2r9Elq2EL94iFHKAPe8Jgrxzowp44+DsBFxPSV7dWytJFedK5bpkGP+DyxL49wGFlOuzRgyClrbk8WTULK86hZ4V41gvDxbAfDe4dq7Ppe893mH9zcR5thTpirgfmgM0vnK09oPBn8MEVc/Lwe87fmVzyjfoNymXjX8K1rJ3Q8R8W/3NDA4QOW2libRnSuQ5iyaVkObkhPcRAkkdJPCX1UFpuWppYL9PwttxzmLcYpllnSmIpgeNBDiTxtsxiyaEkzvJs2uKYpgxxWUZpsUxD8jolbPJkGfHIc7gWxr/jq2Vtba3k5+cLVogYM2aMrjaDL/TDh4+UsWPHy6RJU2XmzHwdurV8eZvOJdm+/bjOX8BqTdjPIppTciNZwSrs0NtgIkpnAgM69TZosUOi7uZ6GwgIotPqZhBBmdETBRPZ2Lvrw3pFEsEXgg0MpUKwsXnzYWlr2yLV1SukqGihTJ+ep5tHjho1Rp0ezuXAylX4UcSQEThIcILxpRwOa+gdsc/LpvH8/LMNPWO+FyEuHDW8AydOnZGSjecFE9B1snbDDek3f6OMLlsjs1cc1C/j+Oqtk9CTQCM7ANGhPezh2BftCfLCwCm6rC6+4MPRXrzrlgYVVTvgdN9SfejhwARsTGyGIw7nFitfPfTMR/RrO8rAxdyD7CVivf3M5HadhL7ztizYdEnGrLwSLxV7S3sgRucv1WFCeN/RHY+vYmgHtp1tV7YvpcUwzfZFngfSCFowDGvLgdMyeMWNxFH/1eJj8QpYW5IekJIN5+Sx935CJi3epPNfZtbv0T063vXhz8qoBW3aNt/8+Rh5/P2fFkzWxzwZLA5w/8NPy4TKjbrk8Qc//00NYhAYPPnBzwiCjjGlq+R7Q2bokrzv/uQXVTd6MbD3x4ee+7YGIJgM/8QHPiPz2o7J9Npd8vCzH9FNDgfOrtFNIL/yo6Hy9z8cLNh5/YkPfFqGzmuMe0Auy8QWvDNRYPdSwzXdVBHBNNq2twCEbWUl2zSXtHibDnHw3PB+oy5YzhcbvGIuFYakRsHIEBkzeqRMeWWCFBXMkvrahbK1o1knscOZuag7MWd/kYYzpWfiDEW9IUmPRRwIoMciK1hIHFT2nmQCAzqbDAAYIEQ64i+/tKcBA226usTBhNWjabWdCWbYq8Jr0RAt1sfiIsfuwumtcujQWpm8qUp+snKefLdmmnx14Tj5Ysko+cL8EfJc0b/u8wtFI+RLC0bL1xdNkJmLZsjqlkW6b8bJIxvUQadzHA2lcs8keW5xjxiDUpbjfUnejUzbZz2bAAfvYGLXPl+kY32JDvLjoVvsrcl6P7U+UTCr9UnqlNFn35PoncU9RWdyzdbF3KPWJbaBNOvO8igf37+5B7ZNhMsEymo//lvDEC3sar54UZFMmjhWRxXAh6mvr9f5ffx9x+8BfidyHfwdIRZ5+/tvudRFjpWWb8stn2l7HWlrz+oh3kvPD+U9J+kBscZA5GGVoMzmkbY8VpK4NOl12DztUuIa9Vgc0rTHcnJCkphc0vOAxeE53i6ve36IS6yVxPWF7217DvRSn7WRlibf8nLxrX1yiaf0tizHX/M6gOXXX/SS4Gs8xslXV1frcIa8vDwd0sBlf+FQDx2KsZUjdO7CrFnzpKSkUpYsadSv/qtWbZcNG/bKli2HZefO0+qoYwM9BCjcqwOrPqHnAGd20AAn3wcKPgix15kmhvlMkHF3kMKAg8FK1IuBOiG4ePXVa1pX9PZggjgCLfQAYaI4hqjV1q7S3qG8vEKdU4P24IkvL+hRQrCBgA5fhVtbW3VeDr6Kp31Rt8/LPlP7rGy5f6Y27znMWwzs4csQhss0bD0VfalHENJ4S+AoDy9uVad0/tozURAQ7wOSzCNgbwYk5xPEaQQWv3ylQgMQOL2/mFyuO28j8Bgyr0F+PKZQFmw8L5XbrsvQ/AbdSA9lhatO6ApRj73vkzpv4aWplepYA5usukVb7EUx9UDvBwIa6J2z9qIMasbqXgg+bkn/6pMyKa9E/2PCROnQvBrbPn1Js12tBA//6WEo1+FjJ3R+jQZ2qEftFXk5v0n34sCwKex+Xrn9pnztJ8PlQ59/Xn4wLE+HWE2p2Sbv+5uv6v4eGJr2g2Gz5TPf/LEGI9GSuz3yjnd9QAMQPI9Pf/2ftJ2wbPJfPfhotGQylt7dfUfb9MNfeF7bFW2IDSY/+sUXBD1EGAr27Me+oMOw5rUfk8ff/yl5pXprwn/k3R/TnhM8P/S4IPjBM8QzHd6MzSuj1cX61V2WiQXVOmcJK4BxDgjaAe8Z3220E9uVbcZ8SJLHa33hAEseJcvwviPwxBA8LGGNTV4nTcLcq2jBh2HDhsq4caOlqHC2NNQulM71DbJne5scPbA+GbLFXhI4TYnTqc5Q7JDR8VKHL3Kqshwu4zxmO2BREEAsnTrNJw4hHdkYq3M1mM44i5GTZ/JZwUnGkVNcPN8jqy5xHfFlGQ7e6WMdcmBPu2zeWCttKxdK3bIiqV6cL0sq58jif+Un7rFmSb401BbrfAr0OBw9sCaZI8HnA2cZzyzz/OJnzyF35lnx+dhnnDjbWc8q1hc/f31G6vTjmcc9KonTH7+PWofo2Uf1YToKMCIdZkihqXOmXhksAxLWz95vhI+xcb2Ji4Ik2x6ZdFaQ5Ozb4Io42qQ97fE41im7trXonkCjR0W9m5gDhv9vsToh/p/1f//4HQgd/H3xEnye5BJDPb6c10O2PdbqIA8yxAXWYnye18i19cY1HkgnAYgnWSAJNOQVMk+DxFlJHbSTJi2OaUpwaIN8n0e5PZgn3kvLtzyb9py0vOUwnYZFOW0TayWu48jFJ4Y4y2cZbVCPzRPv9RDbm7T816uDXC9p09YTZcjjSy42Q0RXJpb9xUpb+CPH0r8YZ43eEgQm+E8cQxzYa4JJ1dOnz5b8/BIpKamSioplsnRps9TWtktz8wZdHnjduj3q2O/YcVL27j0v2EgRTj96U3CilwEnghfsFo4ABsEBgxjKKGBA0MCT2NeUB36kC3ov6TAp9F5gyVvY3rLlqA6ZWr16h9attrZNsIRxaekSwVK4mCT+yiszNNhA0BXd43C9b9w/5tVgwj+WRMaX1o0bN2p7wclF74Z9Tmh7treVtu0thmlKywmlgcOBa/5AGe1Aom6YZL1131EZ13pFe0F0b4eG6zKoZL1ORMdcA3xZT5biZQCgwUbUE6Gb1sWrJTFAYQACLoYOve8zX9EhPJiL8Mh7Pq7L0GJTPDja+Fr/xX/sr3uP4Ks8egSwl8hnn/9n3ScEq0Xp8C8zZCuyifkgcQCEesHpxhyFzisyoS1zPwiqBpV2yKz8wiQYxH3bZ8P2shLpXCfblxzm4XhjPgkmSM9afTEK6hDYNVyX/iUduis5dhQv77isQcWMuj26v8l7P/MV+cmYIg0MkMYQK9w3ApDnXvilBoJ6v/t6tLcCw6YQZGAHeey3grkbf/32J3R5X0wYR+9T4eoTeh0bP0LXoLzlgoCkrOOizjFBjwnmliAgQq/LtOU7k8nqT330c7o3y9Sa7bqxIXZth86CdZhbE23wiCCk/7IzMm3OfKmrq9P3Hm0LZ58BCNuQ7eMlr/cmPY/53ni4bg/k8eyxlDmCUXwgqKqq0t809v7it2z48GHaQ4Kdk5dUFUtr82LZvLFRXt23Vifdnj8ZrToEhyhxkugs6pdnOomZr9uRg8qvvZBRUKGOYRy8JI4pgxnK2Mml82kd3gw/CjDgCMJW5BBGZRHG2IvrmOFGwQxx4GOYD+YLYMd5rIq0Z/sKHZ6DuQZbNi7/N3DW6sZ/GL60b8dKnftx+miHTkBH22gb0+nX5xQHHgwM+MyYJyarPNPu/LqPZ5Bx5jPvT1QevzfQlaXXcozOOGhhQKJ6Yx7qn/3843cW9hk8KTajLxuP8lhHUhejU99DBh9RvVPfb95PbC+yH+lCPTFnC22/rbNZqiqKZPKkcTrPA3+zmGSOoeX48BP6PeD/e/wd4G+ClWk8z4UO8qw+4qjH58mzXGJD0vJpx+sAz+JCeiwX6bsCEEsiGGU47DVvyOY9z+atjlCaWG/P5q0tq4Nclvk8y720+qwdy7cYy/fl5Fg9wHgcdLCM2DRpsUxbLm3aetk0sSEuyqxdpsm33BCftsmj9DxyqZcd+m+RAAAgAElEQVQ45O3B67n45GLYChwMOFdYSQiBCTZ1Q2DCr4oITLAUMHoCMNwBvQPRhOthMmLESN3Fe9y4CTJx4iu65Oy0abN1V3fMOykqKpcFCyqlvLxaFi2q1V6VmpoWXUIYG/KtWLFJhzthyFNb22Zpbe3UneBXruyQlSs3yYoVGzWIaGpaL42N66S+frUub1tVVa860VODORl5eQW6PDGCpClTZsrkyVO1NwdzYKIgY6gGVKj/qFGjdM4G7gk9G0VFRboEKDZew1AqrJyEoR7o4bBOrW1jm/bPhe1PabE+TUya9HibB4fPGJJDhY4eOy4l6zCpOBqGpU7lkmMyqnydzKzfrftzVG4zu3WbXgcEHJFTnD0sigEIhlANL2xOAhBMsIZji30wfjhirnzmGz/W4VbYYRvzEhCAPPTMh+V7L0+X5381QSdZa/Chk9+zbWiww6DE9H7MW3dJXm7ODH3qV3tRRuVXS8mCBRocYtghAmvcP9vRthPSLM8lyQHGctC2HP6zvBOrRd2KJsJj5/CqwzK8ZJVMr9ul7YrhVF/50TDtqfjOSxPln0bM0d3j3/vpLycbMH77l+PkE1/+ns7V4GpVH/3SixrYYWd0BGzf6TdRyjsvy4c+9y1tVwypQm8H5nd8/Ev/IP88YYH2Kk1YtEGe/cTfCeRPxhTKWx99RjDvBPV55D0f0/bHUDjYwXwRPJ9ZDftkxPwWwUT58s1X5ZX2S8m78qvG2zJo4TbJnzdPVq5cKYcOHUr+DtAOPNlGbDMvcR3YUHvbcvKoj3iLYZmV5HkJDN4DDIXEkEgEJLgPbFiJ3zEsgx2tIjhERo4YrqtrzZg+WXtJqpcskNWtNbJ7W5scO7QhmktyEjunR46pOmqJUxY7cMxDxo5WNJSFeeO8qUNm8s5ZpIOoTmkgkInsm4AD+tSxzNQlcfLiutA5VFy8VC/mAiAIwc7Y2Dkeu1ljEjZ6Av5NnAfX6pwPzE3A6lBY/YoLGFgHPivtnx2d9OQZZJ5LxIufswYmTDOAjd8N06t21zMnz7xTd70fsW19L3yaPFtuy4Jp1jN+n/QeM+9rVntoEER8HBBDZ1Jv0x5sKw1edsqF09vkxOGNsn7VciktmauBB/wJ/L+M0RmY04eRG/jgwb95+3tg0/7vn3nycklivczFgW3a9zzm0/jk4ToOL1lmcdRly2iHss8BiDVIxbkkDXhJjq0Uy6wkD2U4eI08SpYTZ7HUYctCPOqwOHJ5rS/Scmw6zSbLiQ3J3ux6DvA8kKaNkB6LY5rS4r0O5i2WaUjLTUuH8H3lQqc/aAf/gcPxxldPOF5w7vCfOZxzDOXCZFAsKYsNxOC8Y2gSNgRCz0EoWIn2wBiuvSroWUFAkHYiQEi7limPdHFvDUg4FQiQ0G2LMaOYjI89BTAsA3VcuHChDtfBChoY1w7HCkEGvrAgCMO94r75XNgWPm/b16aB91jqoPR4lpNHyfJc0uoCDyd+sPG80FPTueugDG1+Lfpaj0AEvSCVe2VCVafOOUBwgDkZGOKEgIBBAedmZJbIxdK6PckQLAQgI+evlPd+6svqIM9q3CtPfuhvdJf1FwZO1V4OfK1H7wWGFyEAue+dT+qwog9+7lvaE5KxFy/buy9adYvlkKgX6ley6YqMWvla4iCj9+Pl8m0ybVa+Bo0cfoX7tu2H9sGBNqREmm1lsWxni2MZOXg/EKhv23dERq7EcKVoKNhLdVfk5bItOgwLPQ8YXvbTccXyPIKPkfkaiGBX8p+OL5av/mS49oZ8459H6QTxEUUrtW3RvtOWbZcvvPgrQdDyy8kL5bv9J+tcDvRkfPXHw3Qi+eC59bqAAHac//sfDpLxFeu0F+UXE8vkCy++pJPd0TuFoVXDCpvkC//QT3ucsIs9gkr0diBI/OI/DpB/HDxDe5bmrb8kQ1Zcy7RvwzUZNQcfDMr1QwR6S7F6WOhvA23Dg+2V1oa8biW5VtrroXTac6MOy+GzRt3xN44PCvi7xwcW9G5iQjt+I/AhYsQI/KYMkxHDh2tgMn7cGJ3cjp6SlU1Vsmldvezc2iL7d6+SIwfWaXCC4Uz4ao5hXOw1gZOmjpoGF5GDxt6LJEDhMCl12CKnlM5lhgsnkA4eAwzotuW4Hjl6kXNI23AEM1++1SmM6wUc6op6IxDRXeV1N/ktumLRhVP/FuRWDTpw72gDtEXm+UVtzeeRtKN9VnHgqJjY6dYgQB3sqKcB15LrGgTEz4Y9Ieb5JM/U8DWIBY/vkTr8tuch06OR/V7ZdyV+P6weTUd6oncmDobMe4Ty5P6T+7DvMm3EAYe+k3Hd4raJ3svoni+f2yGYe3T04Hqdl1W7rEymTpmof2/4mIn/r/GBYOvWrepr4O+Wh/17DqWJoyQGeaZzSeIogbVp5P1vDvMKNP/kshO6ZqhJMmSvN65OQg8RE619aAzeFI2R6/MoZ5nnsBySOCuZ7o2nZPOP1WdthNKGlpUkFrZpn5LXsggmw+u5pIHflbQ8bxPXch24HuJQJ7khPcTkkuR7mYvDayEOyngdMq3unmvzlh9KEwvd+I8dXxsxFhs9KJhvgh8SjMuGs48J2lhKEz0qmBiPeShLlizR3hU4OehqxURunlhyEydW9MI1nAh0cOJHCoEEhlfU1NRoQAHdGHKBwAhDpRBcYK8NbESHL+NYiQp1xH3goOQ9sMzeZ6jNiLM8lpELXhrX85gHN8ShTtog3uY9F3oQOMLRwhKmpWuO6bK16AF5qfG2YGz/0MpdMmXZDl3aFV/YQ0EIgo9orw8EBlhOt0udaQwNwrKuk6o65OmPPqfDewbMWKqTzKcu2y4jilYIJkijl2XOioMyq2GvLheLFaAw9OtTX/uhfOOfR6sjnPS0mLkfDEAYfCzouCoT269KP06ob7ol/ZZfkDGzSvX9wPuF+2RPFe4/18E2Zbv5dk/jAg8nVueBHD0upWtPSH8MWWpEu96SftUnddUoTDgvXndaKsx+ILinpGeH6Xj/E20DszKYDkMz19g7kvD9NdN2ikU+6UHicLrMzupJ++7pEiwPvKDjioxvjdoXPR+YWzNg0T6ZNmuu/q1i/geCWfz9+OFXaJPQwXLI3s4QH2WWh2fknxNthPi4ZvEhPm3geaK3E79XbW1t+puC3xsEJfiQwTlyHIo6evRIdZzy586QivICqa0plZbmxbJhbZ3s2LJSDu1do0NKzp2EE79VLjrnlk5Z9MU6dugYZBjHMPminQQbETZTHn9d1q/NsQMZO8J0eiMZ82IHEXwGQ+pkus0L4YT/WzzZFnTktY3Mc2EvUtR2sfON56VOeyawUH78HHBNeXTINQCMOfocrJNPXdHzUW78zDRISHTE15PnaQLOOMhI8OSb+7DPP3mXtAcN70l0alvEuiJMpicj4hPr2iG+J7QB3iEE5/g7OLx/nXSsb5C6ZeW6dPaokdGQZ/xt4aPl8uXLBb8z+IDEv0sv+fdMaX8flBT/438X/G+B5XksbXpMyCYx1jb5lORRkmMl+SjjgbTn+Dx1kJMEILxACQDTkDxYRsWQTPOa5ZJny4ijtHyLs1xbTp6XHm/zHhvKW7xNh7C2zrieduAasV5SbxoX5cT0JqmDuBCX9omBDB32em/pvvDT7Ibso8zjfZ42PZ919XibJ5fS6rC2wcEJ5w1fkOHI2L0ZEBxggiucHPRG8ESeJ67jRJcs8PgKjR4Z6IHOUDct60rJe7L1ZN0heZ14SpZ7HvNWWqznW1u0R2l5TKfxaY/6iKcED22NtkH7bdq+X6a0nE0ceHWWay/KyKqdMr1utxStPqEbBmIiMoKMxFnmMCztGRGZv/aUPPfiSzr3ALt+44s6ltb93Hd+IS++PFUDi/4zluh+H1gS9vPf/YUuLzu2fK1+gX/mo89pr8nkJZ3y3Au/kpHFrcm8hMgJ5waGPVoP1KesE0ODrsiA5szcBO3FKWqXWbPz1EHevn27vjO4X9y3bx/bTra9bfvaNPlWsm3Zu4R3dM32V2XMSix1jCFumAtyQwYuPigTqzplzspDuvcJJqP73iU7tI3BBwOHrGtZgQWDwTi4QMBiAxEdypYJcrL0JM8xHuqGnqU93bo62cKt12T6qssysBl7mmDy+W15qfayDJ9TLfOLSwTDEdFb4Fd1s+3F9g1JtBuxlGxLyhCPzwkce5JDSRzy9kCe9qxk2uPJxXW8R/itQVDLoahwkPDxY9asWdpTgvHpGCrCYajoNRk3drTOLZk14xUpyJ8pixYW6Cpcq9tqdCnRvTvb9OvvqSMd2nNy9sRmXSY4ClDi+Sbq0JpgInHo6ADC2eQXZ+N42q/V8Rd0OsLqjKqTHDuLdFwp454YdTrVQbZ64y/stEk9cV6dU+dU027kzMZ15X3FMisw4tf1LEwcNNExjuuaBAGKZT0jJ5/X1K4GZbEOW/e4Pf29Rm2Eumbs6n2wTon96HqWLa1j1LZJj4G1Dx02b58Vny/blfZ43/F9qtOvwQF7Vuj4oz48A8/K2M7cI3Dxvep9sR2pM9YT1wX3FOGj+4ieubmnuH1xj+jlwNLXWOAAq1i1tyyVyoWFkjdrqkwYH60eidENWLUOHxvxwRAfCBF4+L9J5iFx4m+Tf782TRx/C7wkNyQtl78BVuI6ed4mr1m8TZNHybqTB4mD0nOJo93e+DmHYFHZ72TvX8R+10a/a6PfvQP39g7gxwq9IAjSjhw5Kk0b98mYFReSITa6glTdZRm5dI9Mq9urk5rRExItrXs7WjGJQ7JiZxcbAWJo0YIN56Vy+3XFLNx8RecjYEI5hnNhmBGGXWEJXmw8CDy+skM3lqbFClEYkoU8eggYeGS+ykeO8aLtN6Sk46pMbr8S9zLEQ50arsnAsq0yaUa+9oChtws9brhP3G/oR/qNfJfYruhtefXwEalcd0Rebrqe7AmC4Gho1V6ZXL1d9/pAm6BNGdjxPrVnSZc/joa9aW+TGYKWrA6mwUO8IWTcsxEFiBy2FvVsaDCi1+MgRHuvsMcKcbEdDT66kuBjzjoEHzfiIXq3dYjeyws2ypSZc3VoG4I79GrCQbBB/hvZpv/SdeG+8ZEDgQl6SxDUo2cVk2MbGxt18Q4sBYz9fzCXDEO58GUXQQmGmo4cOVxGjRoh6DnBfiVYInjunGlSumCu1CxZICsaK2XdquWyffMK2bOjTQ7sXi1H9q+T44c2CIKVs8c3J/MTMMwLTl52D0XkQGY51XQ8Ywcz+WptnEV1SOFwq9MdO6GxM6u61Cmmwx19qYcjyt6TxNGPg5bIYY/0Jc5urIN1U1tq0zvS0MtAIralTjsd3dhhju2rE6wOMuoNxzkK0KLAgHWIbUCPYrIdbL0Xvd/YRowhlvVBnfWeaYP2knxcB5QnOtierEPUjqgn9bJNGIzRhkpjI1PPWGd8DXytW6Izbj8+D7ZPco+GH7cJ71UDJH0P4jbis9B7it+BWC/s4v3DexhNIO/UwHr/rlWyYU2tLK0qllkzp+hy2JhfNXzYMB0WPXXqVA060Mtoh3T+S//7/22p37+z0QwqzXxvNxDChcqgh//BUtKOGjPRlLdJPCV5lJYf4pJHafGsK6XlA8+T5SEur1lJW5ZPG1ZafeSTYyWv9SbJAc6maYe20/SQQ0kc+cynSc/zuDQ9loe059m8vwdyKS22t7S1ZdOsp5e96cP1XEdvfMsllmXMQ7KulLxGLCXKcfC6l5ZPjpe5OLxmbVg+r+eSITwcJ/Q24WsuHKZla3fLsKboi72uioVhQ/VXZVjNYZlSt1fy245qcIEgoWpn1Bui8zhMIEKHVocDxZPW/dd2XqOzneHQOY6HA8Vf7YGDHTjpsFu+5ZrMW39Fxra+lrXhIJz7ARW7ZNyMQh16ha/zuC/0inF4UKiN7PPhddteNs3ruaTtBdmxe5/Maj0p/RqjjRER2KFNhy49IJOX7dT5G1jeFvMuEMBltWe84lhWT4br9WBbqkyGbmWGVUXDtZCPAgzi/TPhs0BvDALCsi2vyay1V2SI7mcSL1KAZY2rDsi4mUU65BFfJw8fPnxX7wfbK1cb2TYHHtjQYXWAw5PlnkM9vJ5LkksM85AsC0nW3eJtOsRBgIK/MywViqBtzZo1OjQUw0QxnAvz4zCkC8sDcxEPLhPMoV1DhgzWYAW7PWsvyryZOjm3sqJQAxXsaYJ5KKtaq2Xd6uXSsb5eVw7avS2ek7J/nU7qRcCCyeX4Eo0hYJjsi6EwOOEw6hnPWckEM7FjrI5n5MzTyVdHl1/DnZNKTBTg0HnNOPp6PXbKLTb6ap/BpdpQ5zqjN3LWM7xEp62XBil0+C3WlbFe7J1gPg7eonuKOcm1yOnXa3Ts47ZJyhKssa0Of6Zdo0DJBiwRNqtc64Vy1CFT96y2MrYj+xmbia7QszP3mNQ70RW9C1fORbuQ433Bu4PV4U4d2SQHdq/SHj0s1IBlrctL58mcvGkyftxoGTx4sPYKopcD8y8xjBq9h/hQhGGO+J3G3w//Bvl35f+m+DeIcqYt1qY9l3nymCfHSl7rTVqOTVse7OWyCaw9PNfmiaUEj2mPo82kByQXGGQSqNBXinlvKC1v8bTt7TBvG4k8L0N2bJ1xPdfRGz/Epc4QF2W0n8ZN4/nyEB9l1gY5tMl8Li4xVobaGtf9YTmhtMfbvMdbmyFb4LLcc22e905b5Ni8xYfSIVsso37Ps+XEUrIOnsM8uRbPa2lccMgjlnxKlntJruWTA4kjzS7KPT+mKKcvPFsfcimpn3MWoqVJd8vCtj0yrCmz2pEOx2q4LoNrz8jE2gMye8WrOrRKe0O2XVdnFYHBXY6zGfLDr/d0hvULvTrSmYAjmpMQ73oeO8s28IBTDCcd8z1mrLkiw1ZcywwZw7CghhvSf9F+GTujWAoLi3QBBCyGgPsKTY62z4TtnKu9+KwshmnPR569Sxg+sKpjt4xrPpsEIRiO1a/+NRlSc0SmLN+t817Qa4T7w+aPS3ff0V4ibSc3jIoBRJaMez4ygZwbfsXrGhBGbRwFLAj0osBEA7xdd3SuT9nm12Ta6ivxZorxRpUInJadk5GzFsq8ggL9so/hRxhqhi//7P3g+5WrvdhulJZDHiUxaZK4kI40Dst/Ha59b7xN5qm/N4k2Q48JAmQs3oGeOmyeiAAFDhnmrME5w/LnWCADAQqGpmCFLgQp6EWBI4dhXgxShg4dojtCozcFDt/kSeNl2pSJMnPGKzI3b5oU5M+Q4qLZUl6aL1UVhbozfG1NmTTWV0gLgpeWau1twWT6LZuaZeeWFtm7o00O7lmjX7FPHN4kp4526Hn6aKcOFTtzrFN3pMaQMZznTmzRcf1wSENBDoMdDC3D5O4k6GHwA8kACD06wV4d9PLgS7sJjPQLfMbBzvQ20EnntUzQEA2Jss57jGHAYoKFxLGPv/7DMc8+MwFEpjwODqy+uKcgg0EvSKQry4YJSlAODHGWi3TCiwMHxbPusW3tzWCZqXuk27SBqYv2ZJzNBBh4vqePdWoge3D3ag1wsUrVioZKqawo0s09ERwjSEbP3lDd1Bg9fNGKkli9CvM7scADPg6htxBBB/6ueODvBgcl0739PaXxe+PZ6yEd9jrTuX4HgOFBvJfkA+fxubjgkUsepdUVsqcBCJUTDKBVSCLLPN7niSOPkuW04yVxfZV95Vu7tq423ZtNi/VpcGkjTQ84uGYl02kcq1OJ7p++2KVN2qIK2rQ2vD5iQ5L8NBnisCyNw3KL82livMR98F7IgQSOh+eE8p7DPLG0YfO2zNvyefIoyaUd4q0EljjPY97ibTrE9WXEoxwHdeaSrI/n2jwxVo8toy1ymAcGX2cxRAlLLG/ZtkMqWnfIyMZzscOc+fo9oP6KjKg9LlMaX5X8tmNSvP6slMV7WmCSeuQ8x8EIVriKndvXJXVVrG6dFwF9mOexaOt1HW41Z+1lGdf6WjLfA5sNIkB6qf41GVCxW8ZML1RHDcNetm3bpveD+8L9sS3sfdu2QhoY4pAnVhPxP8SFuLaMvUuYi3Tw0CGpX7dLxiII0X004p3EG2/I4OWnZOLyfTJ7xSEdqob9U5JhboHALqvnwvaGZC2RHAd2yfCqqAckCjoyQ63wXJLAY+ctqdh6XeZvvCKTMKfG7PfxUuNN6bfsrAzNq5G8OXPVKcYiDnCa4UDboW22rdh+tk3ZRraMOFzzB/Fp0uOpizKNZ23ZNPXl4vEabZBDyev2HllGSayXuA4e58NhaBvaGEP6MLcNDhvmnmABDSzggeEq2P0ZC3ZgsQ4M9cI8FAQrWG0QQ76wehCGfXFeig1asAmjruo1IlrVC5N/ORxszBg4jaN07gpW+8IY/YkTx8mUyRNk+rRJ2guTN3uqzJs7Qx3Pkvl5OmysoqxAnVEMtalZukCwmlFDbYU0NyzS4WTopcGk/NYVS6Rt5VJpb6mW1a3Vgrkwa9uXaQC0fnWtDtfZuLZOVxbDBOXODY2yZVOjbN3UJNs6mnVIGib1I0jatbVVdm9v1SFqCJgwv0DPXe2yb1e7YPiPnrtX6QplWKUMQ9n03LNaDuxZLQeTc40c3LtGFwvAggGH9q7VPWAO71+rQ9+wuhlWajp2cIMuw3z81Y3qkJ88skl7ABCgwUk/c2yzDo87mwRkd/c62QDMrpSWPYTu7iF1DL5y4yIeAjj2UqDHC71fCBLPHN+s+9pgGB+Wuz12cL3gHnHP6MXYs71VtnY0yfrVy2VlY5X2si0omSN5s6bokrgIcDFkEBsCYnU4vEt4t7AwA947vIfo3cPvcWdnpwbY+D20c/H4vkPmOiwOaZ72byyNn8alDsjQwevWRqgsxGUZ8WmSOErgeCDtbds8cVaSH7KnQ7AIDgFsGQ0BT6UhLnGWi7Qtpw7qIdZiWBaS1q7VFcJSZxqHfOJCOlCWdhAPvtXBtOV5PeRCEs8y5i2faWJ6kxZv05ZHO7aMacuxaV73XJsHHjh7kBeS5Hq8zwNHLPX4PDgh28RRAoM089aWTRNHe2nScphOw/pyi2caEjhbxzSe5TDtsWl54q2kXc9hW6Gch02jzHNsnnxyvQQWzjKDEIy9xRfY5W0dMqHxmAxovBY5+ehlwA7YWAGp4YqMrD8pk5uOSV7LMSlcc1onVGMjQMztwMRqBA7YVwIb5mFYj54MLqyMrwGHTQ/BAx/OMCaYF224LDPXXJYxLVdlYNMN3eFcV+vSFbtuSr/l52Xw/NUycfpc7c7HXg6oP+6DwYf/Om/bJy3NdsJ1eyCf6/2w+uBEwoHE0Jt9+/ZJw9rtMqH5VBLYJStKIbBbflSmNh5MAjvMnUEgEhrqhsCBPSDRXBEz5IrD4WxvhwsEEXRgnk3U3lFbYxnj6asvy/CVUc+STjaP27j/0uMyPG+xzJydp/M+4EzACcbEc7w3tn1te7Et+A5SspzStq9PA5OrvYEHJnRQv7fLfIjDslzc3visM3WEpLXDNCSxtMG8l5Zj08DhvUOvH3tWMOwLvVUIGrnqIJZIx2qDWCUQwUtlZaWuHIiJ9Jj8i6WHsTQ5ViBCMIPx+Vg6PdQDA6eTZ7R8Opc/H6a9M+ihYS8NemrSTuzx0JcTw3h6OwcNGpRgrE70EEVBVxR4MfiCA61zceL5OEkQNnqkIBCDs42v+tgED3N0pk2dJNgXBnMZ5uRNlfw507V3qahwlhQX5Ulp8VztZcIqaJhkvaRyvlRXlUjNkhJZtrRUlteU6YpPGDbXWFchTfWLtBfhruBs5VIdUocAbY0J0DAnCCcCNpy4hhPDnoDFMDxM8EaQt6KxSprqKqR+ebnaRmCIDf0QKGKe0fyi2TrnCEHlpEnjNKhAG7GN0X54ZniG0ZylkRpk4J1AjwaXr0dvHVYcRHCMj1mcOI530r/b9v22aWL5TjMPidNjbZ4cL8kN8XnN1o98XrM2WGZ1eTzzVqflhbjEegnbPKmD9bF2bNriiEVZMgSLABpD3gJ5HZKHTdsyi71LR/cd6em6Jt13rkr3ncvSffuidN06L103z0TyFvKXpOv2Fem+c016gO/JnhtAW1Zam0jfZdfU2/KY9nyfJw4S1+xhsW+0XW/L24U92qS09SGfknyPCeUtlmnKEN6WEUeJazwsLpQmDpI8K8nx98u85ds0eZDEsox54lnu88TxupfAo4wH05DgpvGJt/xfh+v10C6kP1kX2qQ9myeG0usgNmQXWPB4kmt1eb7FMAjBl1Y47/hPpKF9o2DJ2MENFwRfwRF88ER+YMNVGdZwQcY3nZTpK4/L3PaTUrT2rJRsOK8TzjHxHJPONSjZdiNyqOFUxyccbGx0iOvAYbO7BR2XpWjDRclbc1Emtl2REStf08CDK0lhSVvU4aX6a9J/8WEZPne57vWBpZexlDPqjfrjPrxzzPu3bcW0bQumiWd7s9y2KctCEjj0DsAZxERtLD1dv3qLTGw8Jv0bryeBnTr76A2pOy9j64/J9ObDMm/VCVmw4ZzuwYEeIPYw6XA39oz0tZcpDvjAxTwTBDVo+4VbrknxpisySwM8284INqM9YfovOSIjZ1fI7Ly5uqcPNv/CvI/QvBq2F9vC59PajTgrkcYBXeClcXHdHq/HNvV7HSxPs8lya9emWQfimLeSNrxt5C3Ppsm3tnza88mx0trG3z2GwOAdZS8LhtXhfcXfEQJNBDAHDx7UIBrDGrEcKnoYsSwxglEMF8OQGsxraW9v17/DFStW6Jdv9M7AMeWy6thXBfNeEPBgA1sML8OS6ei9wd8wAiB8Ncfy6pgfgGAIDm5hYaGe6OHBfBkER3PmzFEHGEESvrZjkj+CJZzTp0/XHiA4yTgx3wBf5XkimMJ8G/QOIajCiV4ifL3HELfQiR4knBj+xhNDi3hiSByHxSEgSwvGbABmgyOm4fgzrQGTOv8ImIbq8DpM3EawBDliBIKC+ByGICE6IxvZAR3tMiBk/VBn3APuCfvKzk4AACAASURBVPeHe0fboB3RzngOeFYIWNHjhsUV8A5gCJXdjBeBb9ph37+0d9S+l16P54fyng+MLUuzG9IV4ubC+foyT07o75g2iLWSPMpcfOoh1kryIJNJ6ATAINIE+TTyoYN8SvCjs0uDjTs3T8ud1w7JzXPtcvXVuXJ5zxi5uHOAnNvyIzm9/ltycs1X5MyG78j5zT+RizsGyOW9Y+W1w4Vy6/wquXPtsHTdPKuBC4MRkcihYl1oFzJjOztwwTUcFmv5vGfyQziLt9fJRZlNI8+Dactj2nNYTg51WIlrlmfTIb7VRS45lCGet4m8xYHr+cSQG7Jtdfg0eZTkQ3pbaVzL8Zg0HWn1pl3Ls2nqz8UnpjcecPawttO41q7lk2sl0va0XJv2nFy2LY9p8sHjmcsueDzIpTMCRwRfsPDVtH3Neimt3yBj6o7KgIarGohg2FPkNEdDoPo13pCBjddkSONlGdt0ViavOCUzW05KXvtpyV99RgrWnpWidedl/voLUrwhOudvuChF6y9KwboLkr/2vOStPi9T2y/I2JYrMnTFNV15SYcrIdiIAx8dbtV4Q/otOyODStbJuJnzZW7+PHVuMOEc9UW9UX8/6Rz3ivukZBtZaduLWNtG5LK9wPUc8lgODIIQDKPBfJQ9e/ZK06pNMrNhnwxquBwFIbhH7W24Jf0ar8vLDZdlTP1JmdZ8VOa2nZCitWeSHiYsAKA9I/GQNwxTQ1CRduI65s+gVwlzTBZufk339SjccEmmr7okIxHg6e7x2c/0pbrLMrB8u4yeUaxzPpYtW6aOJoIPDAXiMAq2AdvJtpFNs53Zdmwftpfl27TF0ZYty8XHNXKsffKtHZ8m1/LJo/Qc5skNyTQuynF4Du2TR2lt2bTlE2ulxTJNSRxtUrLc4pBmOXD47YADincdgT/+/vAFHCfeFZwIcnDib4Eneilx4m8WJ3rVQieG7vDE+8cTgTB6GHliiFpfTi7ljr9JeyLw4onfEpwYaghHG+8+AjHM08FvDT52IBBDzxKccvQuYWEG9ALAUUdvrN3bCr1NCMQYhDH4QuCFoAuOPoIt9D4h0GKAZYfUMaAKSQy5Q9CAQAwc9E4gWEMQB/0I9GAbdUAwgaFR+GiD307UH5u1Yklt3C/aB22KdsbzwPPCs8TzxbPms/eS7wXfFSuJxfvi3y1c4xFKo4wcSuqjJJ+Sesglj5I8iwtxiSOPkuXkhCQx4Hger3keynFAvh4e9VAv+czf1QPCC/ckuxF0XJM7116V66eb5NKecXJm4wtybOVH5NCyJ+VA9eOyb8ljsmfxo7Kn6hHZXfmI7F4UyT1Vj8reqkf1OnDAH1/5cTm76ftyed9EuXF2pXRdPyo9Xdelpyf9pfP1Z+P58r7k74XbF/2/Kcxva71/U+3xO73ZgcdvS3vgPxc4EHAU8B8Q/tPFl84VLW1SWLtRxtUekkH1FwRBB3pBNCiIh2ZxaFQ0N+OmDt8a1HhVhjZdluFNl2VE8yUZ2XxJRjRfluHNV2Ro8xUZ1PSaDIAu3SsD+0xkJj3TKVc79Vel/9KjMrh0k4ybvUD/c8V/4PhPHvXDV1rUF/W2PR+/Tru/0X/LaFP8x4264T92fDlcv36DLGzaKKPrj2tPkm1L3Dd6efpjyd6GCzK28bS8gqCu9ZTkrzqlvUzF68/Lgo0XpXTTJSnrvCKYO4Iep4Vbrqos23xFyjovy4JNl6V4w0WZt+6izF5zSaa0XZIxK6/o0sBs8+i5ZebTaM9S/nKZOmO2Oi5oYzha+BoOJxCOCJxN/of6RrdXX5/Zm2W3r/VLw91Lvd8sbtq9/K8of7Pu+V7s/qbaBX9zDPb4N8i/Q9p8s+r9Ztnlff+68l7q/WZxf517TQKQtKgGN4MDyim9oeRl08DjNblzdZdcfTVfznb8SI42f0wOVD+hQcWuiodke9k7ZGvJ22Tz/Aels+gB6Sh8QDoK3iqdPAsf0PLNxQ/K1pK3y/byd8quiodl7+JH5UDN43Ks5ZNybvPP5OrhQrl9dY90d13P+k9HKxn/wzqn1Zv3wfqTa3mem6udcvFwjXa8pA3at9LWkbbJ9/bII4fS4qnDc4nNpYNcq8/zwGeZl+SjnDjaowxxfFmI6zGhOtIubXlpdVg+0x5v85br073xPT6Ut7ZsOoT1ZRbv08SijjxZBpnrII73xzyl51p9xOSS5APD/+Dw1RIOJ76IYfgQhlnUNrVK4bK1MqFmtwypPaW9IlEwEgcOnCsCJxqb71GyFyO+TqdXh1Opwx0N+2E+CjquSb/aizKw6oAMm98i4+cslFlz5ulwjYaGBq0PvkSifqgnv8qj/mntxHLeL2Vv7eWfF3mQfeUyCGFghy+ny1eukbzarTKi7oT01/k28VC3OBBjEIa5OIMbr8jwpksyuvmCjF9xXia3nJOpredkWts5mdF+TmbiXHVBprdfkCntF2RS20UZ33pJRq28KsOaX9Ogwy4HHD2DaJd2HdJWfUKGFK+ScbNLZW5+vn4txZAaDLvBcBx8CcWXbTo+vG9Inmxf5omx7WXTFoe05TNt8TZtucR6afE2bbneLq8Bj7Q/eD2NRw4l+cxD8mR9mSeGHEhes2mWkQ/JtOXaNDmQxLKMeYtnmhgriafEtVyH5aal0/hpeJZbHsrsQQwl6wvJNPG9cXGdHOoD1/KY5vVcknYpyaVO5GnP1tfiyLUyZJN6LJc4cpmntByUMU88JfFWEmvLkLaHzXsc+CEd5Fsuysgnh5LlvfGAsxyb9raoy9u1HJv2eOZZN9oGhyevEUvJci9pz/ItB2ndCZ1ArwAAllkiy6zs7u6SO9ePy2uH58npdV+Xww3v014MBA/bFrxdNiPYmHe/bJz7Ftk45z7ZmHefbJx9n3TOvk+2zrlfduQ/oBL5jbPi68DNfYtsmne/8reXvl12Vzws+5c+Jocb3692rh0rl66bpwX2cx22rmnpNH4a3pb/JrjQaW2E0ml230hurvcjZD9UT68jxOtLnaE77QjZ9WVp3JDtUJ3T7Fs7loc082m2LRdpy/Fc2rcyxKcee83aD/Fpi5KYEM+3V6jOnm/zSPO09myadi0OjiYcTgyRwLAGLBOKYQfr1q2T2qYWKa1ukplV7TK6ep8Mqj0jA+ovS78GTFq/mfRkwMmNJjQzQMGX9nhCe3ItntfRcF1eqrsq/WovyIAlR2VIyVoZPadSXsmbL/kFRTpeHGPKYR/1QH1QLwwP8I4x7wPS3ifL2aaUKOdBTC4usZTkWy75fF6QCEIQKGFoA5x6DHtYtWadLGlok+nVnTK09qT0j4e6RQFcPDyLgVs8JA2BRP/GGzKg6YYMbLpuTuSj8v5NN+OliuNAUNudzwGBx01t7/41p2Vw8VoZO7NYZubl61h8DM/AfA8MN0GvDd6BUBvj/v09M892ZxuFJLFess1Qbg/mPT4tH+KiLIRnfSkt16dDfJR5Lsr8YbnEU4bwlu+54Fkur4PjdfFaLmlt2bTn0Calt+XtWz45lORS0i7zlhtKEx+SITzKvG1ycY2H5Vq8LSfWS4tJS3uOzXuOt2+xPm25nodraYflIU2ul+QT7/MsT5PEW+mxtGnLLd6nLa6vXHBwWG4o7W2RQ+k5sG/rEOKncaHr1+Fam2n8pAfEV7jP+e4unUB+40yDnF73VTm0/EntrdhR9g4NGjblv0U65rxFduS/VfYWPCjHSh6WSxVPys3FT0tX9btFAmd39bv1+sWFT8ixkodkT8HbZHv+W6VDg5G3yOb5Dwj07138iLxa97Sc2fRduXG6QSe0Z+aIZDdxX+6HDGDt8Xq4lod0GjftgfSVT70eb/PAWDvkUFqsTxNjpdflOcxbDtP3woUOy6cdL2krJMn3HJvPxcO10MFySNoI6bE46LH5NLwtp21fhry3yzyltWf1sNzq9Poshlxb5rkhPnnE3iufvSEMRPD1HntboEcEX/AxfhhBQeXipVJQWSdTK1fL+MVbZOTSvTJ06UEZXH1EXq45Li/XnJSBy07JgGWnZCDSNSfk5aVHZdCSQzK4aq8MXbRdRi5YI+MKl8mUOcUyO2+OjonGZFXoh53NmzerXdhHPaxTjHrinnnw/q3kMyLOXmMZ28ticc3naYd48kNYcMmHRGBnh7lxWVVM4l1W1ygF1e0yvmaXDFt+XF6uuyD9EJjdNeSNgV08YTzpeUKAwTPGaNCC4XI3dCdz7VXCcLaFW2XUvOUyZWa+jjnH5GCMWcekYgQetmfJjv3mvdt7RtqevF9iyPES1y2WOmyZ5yBPXIjfG9fzrS6rL2SXZZ7DPG1bHNOUxKZJ4rxMw9Omve65yON6CGt5SIcOj2He6vM8qwtpiyWfkvWzOsjvC9fybJr6vfR1Icfa9Jy0PLmUaTiW0zbxIUmsl7m4wOLwHJ8P2WOZx9q8t41r9kCeGMuzaeIt1173aa+PfC/B81ibB97atPkQ15Z5WzYPXOj0ti2H6RAvVGbxNh3C2jJiKVMDEAIgqcCmUYYbunPtiFzeP1GOtXxceyZ2LnynDq9C4LF57v2yv+jtcrbsMble9ZR0L31WZMmzIoufFal6JnNWPis9lc+KVJoyXF/8rPQseVa6lj4r16qektNlj8q+orep3o78+2VL8YOCYV37qx+X4y2flKuHZknXjRO6ahbryvvgPXgZeijA2IP3arHUwzJvj3yPY546IdMOi/Vpa9fzqZMcYpmn9DybBwa8EJdlwAPHg+lfh2t1UD/0hE5irbS4tHoDwyOUtjpCaXK9JPb11Nvbfz1ca5+2c0ngrb3XwyfW89Ps2fvwdq0OpC2W+nwZ7VNaHBx8OKEY/w/HHwEAHFTMvcBXfKyGAwcay3tiwnLV4iVSsmipFCyskTnltTKrrFaml9bKtAW1Mq1kmUwvqZGZxUtk9vxFkj+/VOYXl+iXdzjB4EMPgg7ohX7YgT0beNAptvfh6857oCTWtxd5LCc+JIkhBxgeIXyozLYnJt9iIizuEUsIYyJrfUOjlNc0S96SVTJpcYcGdENqjsrA2gvaQ9Kv4bWotwnBiZ5RcBGlr+sqYdgfBbuu96+9KAMQ7FXulmEL1sqYonqZPG+R5OUX6KpDmJSKeR6YiIohbZhwi8m9aT1L9l6Rxv2hXdm2vF/miQ/JNC51UFouy3Jx02yDg4Ncq8uXWZtMWzxtsAx5ltGG5dk0cZbLNHGUKOdBTC5JrJfkhGyzzHOQtzzibBnTIS7LgHk9XOB5UL+VVhdxkORZCazF45ots3ybpj3PJd/aIM+WWRzSoXyoziyzdfRc2gtJYm29ra4Qh2Vp9ewLP2SX+ihpBxJlPHjd1plllMSGJDC2jsx7LvL24HUvbT0s3qbJsdhQmecgTxxkiM/rxFKinOk0HssVaP7RIVhUbA0bTFYyg+2WrhundMWqI43v116PbaVv02FWm/Luk8Ml75Qrle+S24uf0SBCA47Kp6Vn0dMiFU9Lz8KnRMqfkp7ySCKdnAufiq5XPC0CPAKTWM+txU/LpUVPysH575BNc96i80e2l75D9i55VI40f1Au7x0j3TdPZtUZmUy9w04trqcd5LIRmbeyN67FMk19novrPIjNJYn1khzaYZ6SeOT9QUwa13JsGnrI9dLq8vaY9xybJ5/YkLT4UDrEYVkIb8uAQ94fFhNK23qTT0mdIZ4to03yrIR+2rAcmyaf9pi3mFCaOEpgeCBN2yH7Foe055IPydPqId9K4sgF3jrOcFAxLwDDoBAcYEgU9rvAEo3oreCeA9jRmSvCYGgP5m9A4ms7yrFiDIIN4MEDH3qgD3qhnyuxoCcmV+DB+rPu9h5ZBomD0qYtJi1NvOWrwl7+Hn1dkGdvCIZl4R4RiOC+EXRhCBTapra+USqq66VoUa3kldfKlPJmmVC+SkYv3CAjFm2RYYu2ydDKHTK0cqcMXbRNhi3cLCPK1svIkhYZXVgnE/IrZUp+ucwsKJWC+Qt0Ds3SpUu1/RHsYBI/ejzYs4SAyK54Y+vN+7QyrZ18ueUw7TGhPLFeAmvrZrks9xzmc3Gph9iQJIZ2mKe0HJTZgxjPtXmLZzqNx3JK4r3kdWvHl3kO88TlksDieuhAecgu9ZET4hOTS5LvpeVY+0jbfBqPGEqrD+m0A9do4/VyodPbCeV7s806kMt6eB6u8/BY5iHT+J5rOT5NLCSu8fC4UJ7YkAzhUdZbnaGLOGKpi3lrD9fIIY6SeOYpLZ9pXiOHEuU2TbyXxFksdVJ6Tq89ICRC4oDs7r6jE8DPbf6xHFz+Ltm16GHtjcAQqb2FD8rlyie1t6MHQUMlAo44oCh/l0jZu6Sn9MnoXPCkSEl09sRSUMYTuLKYg4Bl0VPaa4JeEfSmXKx4QnYVPCjoDcGk9d2VD8uh2qfkwtaf6dK9GI7Fw95HWgMRS2nvObrvaInLvvKhp692aYu2KS0/lCaOknpC2FBZiIcyYNPuk3rIJZ7Sc3PpsTosnzbSpOcxTzxshuzShsXbNDmU1AdJLtPksdxybJo6iLN8XoMMcex1pmmX+b5IclgH5nvjEg+cP3rjhjjUYbm57pt4K8klDxInAhEGIwgK4EAzIMHXc8wbwBKWcGoxxAiraeELP5axxFKPkMijHNcxHwJ48MBnwAG9NuiATdaJkvVFngfSrDNxVhJnJfkWF9JhOTZtebnS4NjrbE/bw4Q5ImgPBnVYhQpDoxCorVixUuobGqS6ZpksXrJUKqsWy8JFlVK+cJGUL6zQs2JRpVRWVcmSJUt1HwYEfAgEMX8GvUqYQ4NngOeDNkd7Y5Uu29b+3v29Mm/vJZS295vG8basnjSOxYAf0kEu68A8uWk8XOfh0+RSptm1PGsf5b3ZpW7LYzoXN5dN6sxl394zdVleb2nyWVerg+1EmaaLXOoCri8c8mjT5tlmIT3Wjk2n1c+WWxvk2rK+1p06qYP5NEkbxNt8bzbJAY4H07THdqJkucfZcqbZ1sxbSXuQKOeBdMiW5Xq85VpcSA+xVnpOiEebxJJvy8mDZJp4SvBs2ubJoSQO0uJ83uJt2vOpo9dJ6GrN/ANFd64dkgvbfi6H6p6WnRUPSef8B3R+xvHSR+T2kmdElmJI1TNR4BEHHVIaBRY9C56UnpInRIpznz3FmeBEA5LSOBBBTwl6RqqinpWbS56WIyUPqX3MDdm96GE5VPeUXNw5ULpuHEteKNsAvTWMuV1NkkseJcspPQ95Xssl03h94ZML/f5Ama+rzRNvuaF6Wo69bvm96bA8i6UOSuLSbObiQgf5uSRteRni2HpQv+WFOH0tox7iYcvaYzmkP+w1m07T4fnMey7ytg7AEcM0Jcs9h+XWBtOUnmNtkm+l5bHccux9M43AAF/z4URjfgO+oDMowZCt0Dr/KMM1BC5wfoEHD3zogT7opQ1bB9YLMnTY6+RRWg7TVlou0pZHHGzaNOvQGzcXB3bYjggE0CZoIwRk6BlBkIaABMv4+n0IEFRgCBVP5DFHB4ELelPQo4SAA8OrENjYXiW2OdobdfD36+vMvJVI29PqYNt4afG50p7HfC4OrxFrJa/1JsEBJnSg3N6j1+U5Vg+xafy+cKkjJC3/9di1WKsDaW8nre5pOlDeG4c2erPtdRFv+UxbmWaffC/7wgUGByV1kAubr9cu9VmeTVM3bVnJa5R95dEmebkk7QFjD8uhXUpe83jmeb2v0vKQtjxvE9d4MG2l5TJtdZBLOz5PrJfURR5t2jwx5DJP6W1RB6Tl2HSICz2pPSAkeIn9Pa7snyhHGt+re3jA6e/Mv19OlD2iczUwVErQ64HhVei9SHo5npAeBB3z47PocRF39vj8/CekByd4CFpKnhANYKC34qnIzpJn5c7SZ+R46cM6JGvL/Adld9UjcqTp/XL14HTdBNHfwxudDz28N9rG7/RFzsS9tPW9cH9b2/9e7vleuP8S2gs/gPakIw2nloEJgorQSQw4NuDIdV+/7e2V697Yjmw3BnQMSLDkMIISBBEITLBxGoIKe6IM19C7gXkz3FAMAR/0cIgVbLDNc9Xpt/Havbwjbxb3zWznN+ue78Xu79orO/B/M9ujN9v38pzfLG5v9/SbvP5G37MGIFCKAxWntDeRRDLd3XLz7Ao5vuL9OudiC/bzmPdWOVvxmGDlKg0+FmFeBwOPuJcDQUccXPQUPi5S+LjkkrymOPKKHk+CEQzX0mFcsIM5IlXPSFf1s3Kq/FENhlAvbHR4ouWjcvNMYxKV8d70Jk2EynLec3K/5gsaOVb22l4x33KYpq2+SHK8TOPa+nsO82lcW26xTFNaHO1R4lquw3KZ7o1LncSHJHWk2c7FsdfIt2U+TVuQTAODw0rP83lyaZOSOOpjnnhKlPNgmtiQJA+SaW/D6rM6LIdc2vQ6LA9pci2PGM/1eXKogzzKtPrSLnHUw3xIWhtMk0dJnq8ny62kDpaR4yWvU1revdgltzd7tBuS5Npr0MvgzAYl6CVBIJHrBAYBjO9VYtBh7x22edA+8kxbyXtlGXGWzzJi0qTn9Mazti3W6gnZsjxeJ783LvFWkuOlxfQlbfmvBw8sDnJsmmW5JO16PcjzvaAkxnNs3tpKa2tbxzSutU+d5OXiEEu+zSNtD+Y9xuetXV5jGfO5pLXJdC582rVfh+s5yKfpt8/L88DhYfmWw3KP83ni0ri0FZLkUnodtAVJjE2jjBxIpi2WOliWJi3f2ugrn3qJD0lickna9vwQJwlAcJFED2SjdN86K+c6XpCDNU/IjrJ3Sse8t8jJ8kelu/pZwXwPneuR9Hqg9+JxSXo14sADQUWuU4OPgrsDFOUUPS7dhY/LuZmPyMXZj2pviCAIwZCsxVEQcnTBwzoRfkf5O+Xgsifl3MZvSvedK/rwfYPwfin9fSPPew9xWZaLh2v+sGUhri/zfOY9LpQn1ssQ1pd5js17rM9bLNK4zsNj7R+OxRFPvuchz+djr1meTVuMTVsdtEUJnE2TRw4lccQqyfxDHiV4nstrpFEny9OkxZPjy0Jc2gfW81jmeeRQ0o6V5BBj8yhjueUwzXqQQ0kO8x5v88R4SR0WyzSl5zBPLvJpB7GUlsMyy7W6kPZ45G0ZuZ5H3V6GuNBh+cxbruUR66XlsZ5WMjgJSYuztmya9eE90x7zvG6l1cv6Em8lrnlb5FKfxSNNfbxuZW9cq8vy0tLE47o90vAo9/cT4oYwVqe1ZdMWk8uWt0kdvfGBs1yf9nybpw3KXFz/nCyWfEprg2nffsBaHUwTT9lXu8SnSdpPs8vyEN9yeY9egkecT1MnOcjbI4QP3bfnQQd1e2nrwmu0afXwWpoMcWyZ5XmbxEFam8xbLtNWB/nkEtMXSa6XIS7bmrbTOChP47Pcc8mhJM5KbxfXeFicT/87KiWYeQ/EhO4bp6rl2IoPyZ7KR3S38l1Fb5M7NfHSuljhij0fxdnBx+VZj8jpKQ/LqVcekrNTH5Jrsx+RnoLHMoFIwWPSU/hYUqbpQnPdBC2XZj4sQz7/57Lsn++TbgzNwhAv9oQsfVZuLn1GdhY+qEsB76l6VI41vltunmm46+Wx95t+z5muRGI8j+V3t5fhYjJ8d5f0dN8Rwdl1Ozq7b0tPF8470oPrPZlJ7nyg0Jt2pNlM41pdSFuc1cVy2iWPGN4zcSy3eXKJ9ZIcyhAX1/xBfG/S8qgnF4f2QzyUgUsM9fi85fp0b3zaAI5p6qA9K2Gb9omz0uqxPKbJJc7bZN7iyPXS2mWamFx82iDH5smn9Hosx/KY9nibJxe6/UF7Xob4tEUdnoM8eZTEUgKDI8S1fOLIo7Q82rA8ltGGlUxbHUyTh3yug3gvLd9f60ue/DTb0AEMccxb3Z6LazhCWOqhJM7rYDlxtIe8LSPO84n3klzicd0fKCOOfOYpLcfqyMWlLouHHubTuLAZsmvrQD3EUZfNezw5lOD4k3zLBcYetOW5tpz4EDcXj9fIt5LXWEfkmaa0eKaBw2HxIV3EkGfzxFsJm7nsUo/lME0epcfaPDGWa8tsPcljGXAWyzzLiCMPNnggTRxtW0kcpedaLNPU5zm9ccmjHounLkiUeyzzlMSRR13kEmfzLPMc5qkTHHuCRy7LLYdpXsslaYMSWKa9jZwBiAV3374oF3b0k0PL36WbAHbOu18uVT0pPRh6pcHHUxoMYIJ5T3HU84HeDJw//uifyvvv/335wP2/L594x3+VUc/9hQYjCEIQbEQ9IumSeiBfHf8O+cADvy+1P/tr7WHRuSGY4I5VsjAxvfrdcn7RE7Ix/y3aS4ONES9t/7n0dF1PGkET8T+2cXprVMuzaeV1d0sPTgQTt69L9/UL0n3lhHRfOCjdp7ZJ15E10nVwhXTtrZWu3Uvkzq7F0rWnRrr21knX/ibpOrpeuk/vlO4Lh6T76hnpvnlFuu/cjPY0MX9s3q59Rmn1txym07C2nNiQtDibZn08BxgeFh9KE2cl+SE8y9JsUw9xXpLny30euN6wIVsog64Q15cRS9vMe5zVRwzKeJBvcWlllu91wG4u21Yn09SXxiWOknif/3X50BPi2vvwNmmb0mK9PnK9JI46QpIcXrN5b5MYKy3epsmltByftjxeAy8XFxwcxHvpuczHtITHPHUR5/UxT5yXvJ5L0hYx1ME8bPdmn1iv6/XwqCOXpD5bR6at7d7qTE7IFm34a76c9rwuj/N6cuVz1Zt2KHPp4TVbF9aX1yCpy+LsdZbn4gJPnOUyTRtekpeLSx2UrAd1UQeve2lx/lqIy7rQjudQH3H+ui2nDnI81uc9116nDmAszmKYJjYt3xs/F89zmfc2vQ7iWE4ZKk/TRU5I5uKEbFAHeDjS8rm45FDGqrL08VpI0q6XxFrbWQEIACRZEMpvX9kmp9d+SfYtfUx7F/YXv11ux0OverDMbhlWrYonjWO+BoKPgifkysxH5P7//nsy/PN/Ls2/3fHJDQAAIABJREFUuE+mfe0v5b4//Y9S/OL/0B4PBCG38x+V12Y9IpdnPqK9I93zomCka95jmr8042G9jvy6l98qz/z1f5Gtwx+UrsLH5Xr+Y9KFieoY+oVlepc8I10175YdBQ/I1uJoLsiZtV+UO1e2673x/rzEPeKeebKxKElGntzoGoKOW9J986p0nz8gXQcapatzntxuGSa3an4ot8q/LDeLPik38z8kN+e8V27OelpuzngyOmc+JTdnPSM3894jN+d9RG7O/4zcWvgVuV3/C7mz9hW5s61Mus/ulp5bV6OeEhe1sm6ss31mTLPe2XWOol/yoId4nyaftqy0fFseSns95Fq7IZ4t4z1Y7uvhQ5flWt1Ms55ekktcSHqOzYfwtiyEZZnFpd0vsZTg4IBM41AvOV7yOmVID20AYw9yckmL9+lcPG8LXFtmuWl19vaYBzfEsTqJ9dJi0tKew7zFw36oDsR6SW6Iw2shDsuISZMW59NpHFtODiWu4YBMqzPLyfHS6k9Lew5tEg8btMMySmLTdJAb4nsOdFIf9adJcnmdefJpl9cpWQ+Lt2nickmPZ54c2mDeSmIhUW4P4tLqbrE2TV4uafE+TV5f6g2sPcjtTZJDPvG5bFos+ZC2nHpCkhzimaeOvtj2HHKtvZAe8rxt5ImntLos3qc9LsSnXSupBzLEoV7eG7nksZy4kPScXFxbB6Y9vy82bT3It5LXaYN5Ly3Hpi0upMNjfZ4cSqsPWOR5MG0x4JGbrIJliRas6e5uuXGmSY6v+ICufNVZ+Fad+9FT826RqnhTQQyFiode6YTzeNjU+pffKm/9s9+TnSPepj0dxya9U576q/8ss7/5fwsCjcszHpZlP/n/5Bef+FP51rv/UAb97Z/J6SkPyZ38x2Tz0AdkyGf/TMsHf/bPBNw53/5/5DMP/4GcfOUh2TzsQZnwlb+UVyc/FM0HMb0gR8sekY7Ct+pwsRMtH5ObZ+rYJkF51z07Z/8uUvcd6bl5RXouvipd++rlzqrxcnvJi3Kz6FNRQKFBxhNyc0bgnP643MA54/HwdXAQqMz9gNyqel7utI+Vrn210nPlpPTcuSkS72+COvNBUvr7uKvecYHF9cYF1h+98T3e5sGFTdqlpE6LZZp1ICaX9Bybz8WzNsAhlmlbZ17zklhKqzMX3+KtTpazjdJ0EEdpdVgO9djr5EDisHUmLsQjjhzmyYGkbUp7LbKW/a/VQZuUIa7F27Tl2LTFUB/rT+nxxHlJPOX/z955wF1WVWc/IoIoYIFQRMCG0qUMzDDDUI01xt7QGI01RhMV8xkxiYmx19ilw9B7FVFQUUGU3qT3Jr0Nw7x9fb9n3/s/93nX7HPuO0y+mHyZ8/vdedZe+/mvte99L+bunHPu1fww1r2KOWDhpcT0xevKHApXY+XRgTdrZph3hrgU6qgFK+XwmDrKsWZniGGzwuGraWa8p/trzzuz8rfxXst9tRrubetLDeed64qd8biL8TlniH2+tmbN58NzztdiZ+FcZ/L+8LrUI9e2ZubxuzInhUeZc7/HzHep+xXLy5G5mfalTvZ7PXpkdc+wGFY+HcP8eR4ezfO1MV7vRw6/nnftucPgdxULg1JPykGMKu++triNx1/ryRxaq8Fcm8Kg8nHAtPWWL/tnysKhXou+qPo3GxCSNZ2cHItFtx0et/x0x7jquG3j0kO2iAdOmBtx6i69Hxrk3g+dieif/dBlVVML5pQzHltv+JT4/Ze2jKu/8uL4/rueE6/Y8mlx/mc3L2c1vvTm9WO3TVcvG5D377pW7LzxanHjN7Yq86968dPj7XPXiO+8c8M49G+eX+4f+eBua8Xfv2yduOhzW8Trt3tmfP0dG8bCA7cf3Auiy8FO2TnuP2FuXHjwZuV3Sm47c5ey/tpzW+qcLot69L6YuPnsGDvzn2Pk8Df2zm6UMxuVzUZtAzLD3GJ8qn3Qy2L0uHfF+KVHxNQDN8XU+OiM/gNY6ueXNl7L+enXSi5/PZa/HsvfA8vfA8vfA8vfA8vfA8vfA8vfA8v2HigbEHZCqF5UjvICT47EozfvHbf8ZF5ceew2cfmhW8YjJ+5YfnCwXPZ0dO/HBfWbHX72Q5dMvWfnP43Nnr1KvGX2GmWjsc7TnlQ2IeMHz47TPrFxbP+8p8ZZn94kRg/cvlymJf/9P5gVfzV/zfjrnf407vnets3N6TorIv/b564Zr932GfHVt20YIwfO7v22iH5JvVyGNb+s6+GT5pWvCL7ymG3KxmnhTQdOu8lbz0vP158zz7s85+Y10As8GVMjC2Pinqtj4rKjYuyne/Uuq9p7h3Imo3eWY06M/LD/0Mahf5ajOQOiuSY/2Kg0mwxYfNNq9+oV/oDdYvTkD8XElSfG5MO3lcu/olkrf7Uld+c8T3/OA/fAT47XwLmck5fXzGN8UnhyjOnjigf1tZKTtvUiLw8s/eDpRx0YFJ8rNWqs52qMcpmHQZ3LMazytSP7GQ/jqIW/TfHVtI2htzPy+uEsfuU8dr9iajibGeZmytb4zHb1znyNhWdtWXnOsJrPR2Y0hnO/x9QYxrYx4rtY+uOjH+osXilxrS8sNeVxxmP3eux94XPO/fQi5176+ZqZr/mZG6awaPbTj/7M43dlzhWOOprrOpaGzbXaWNaQ/VoHOWeJncPnayeHXwqDMudcjvGI8Zix+5knxzj3q7EwKKwUHg51L7G0i/W5NsY9HtNXuXyQw483Kxx+xtJhrDM5hkVzX/d7T+87jPUaxDBd6v3gyMGxXo09zj7nYTPjeXhpGwuvvrXeuYbX99hZ7wXvXsX4s+JHmw0IRgrJQKMpbUBu+l7ccvq80NfbXnHIlrHwxB1j6pRdevddlPs/5paNgO794Gt2tXnYddPV48tv2SDO/ufN4sx/3CTes8ta8dY5z4y7vrtNvHunP41PvHLdsvkYOWD72GOHNeJLb9kgHt5nVrlP5Pi/f2Gz+VDNW/5jq1h79RVjwzVWir/Zfe14aJ/tehsebXyaDchO5QzII9qALNiirPeWn86LR27cL6am9E1TS+7YeO7+nHmB9C1Vkw/eHOMXHRSjJ30wRg58Sf8ejsEmYuSHvbi3mVjysirlm41G2YQYm8buK1y/drORkV+PBa8sZ2Ambzuvd1kWCzb158pzVI4YK39nxtI21vPu99g9HtNXuXyQc79iGCkxXtXATz3GbYrPlXo1JvfE67xiZ1kr6nPwOedjeqLMec+2Os54nNka735i53LMuqT40Vp993ssxjli9cNHjJKHhUF9rXi7FE4eelCDHPmuOngymxnv5zG9XGfC0s+1VkO9vB8x64ahTu5dG+NFqVHz0o+5zGjMnGvmmHM/tTwnn7PE7sVPjtqoGB7kYDT2g3lXWHozN1MOv7TrcB9xV0+vhV/qjMfuJ3aOWIxz5GFQ8jV1Xn558pE5GDTPw1Mrz2cOvxSG2FnniPHlGs55DIc657XaGDjmnSdXU3E8NN91OE8/lLnMk3d1xmNYeXU44zEMyhwMdRgzn/15DCd/ZpWTv42BhUPpjXoNGObymDwMSh4/vRgzj2ZOeQ5iVzhXrwErxeMxORhUefcpnnYJVjY25snRePSW/QdnQA7ZIh4+cV46AzK3/FCgnwE57982iy3XX6VcelW+8WrB7DjqwxvFvBeuGtd85cUx5/lPjQUf6N2MvnDf7WLuRqvGMR/ZKG79j63jRes+OS747ObNjeriT/rYC2OLZ68S//La9WL3zZ8W1399697vjOjSr2YD0rsES+vTjyTqjM2tZ8yPhTcf3JwB8ReIGB085/GYeuyBmLjhZ+U+jJEDdm/OYGgDUO7h8Ps4ypmL3hmQxT9k3s5cMM+ZjXKGhDMl2lRo42KbF86IkLMzI+VMizYh+86P0WPeERPX/rh881bvTEhvg9U8j8qGa4nn2vKGxOeq9whj9eBQjoN5V+fIy+8cY+Zd296bMKgztVg+HZpzJa4xrB1PAdM/NU452Fo/StRYONS9xNLcw8fO5v4+VpwfzuZ+9M2MxuJgNcbrStzGex6vVIfPeUxfeuN1JcZT48nhdSXGg3o9efIhnw78qHN4YH2Mf5jSgxqMxeVe5DyPv8Z7bzE8lG876OFszsHW6mQOljWLrXHkMw/njMe+lszmMV7X7PFx2+slD4diX6OPycvrjLPK+8N7wqDUYewcsfP0qan8rA+2lmtjnZkpx/pzX1+zatUO8m19vWbm21hqwWbOx3hdZ7Ju1RBDD+c99l4eu4c413I//cjBuPq68dXUGeKlYfM6VaPGU5s1MK4pNd1LLKVHjfVcZhi7hzivWfnagd+V9bYx1HGGGBZ1L7EUf5fil4cDP/UZo+Txo9M2IJhdZdSZg8fuPD5uPWOnuOr4beOSBZvHvcfP6X8F70693+E4cm5M6Sb05huw5sThH9ooZj33qXH7t7Yp92/opvK377BGvG+XteLBH84qN6N/Y48NQ7/t8et/3jQ2WW+V+M1nNov7f7Bt2YDoDIg2JjqTsmj/7eIfX71uOWvy4N6z4oO7r13Ogjym+z/s90CmdA/IqTuX9ekeEN2zcscvXhaL/3DytDcsz1EvzODF0f+AT8bU4gdj4vozY/S0j/W+paqcwehtDrS5KBsQNgZlQ0Guf3aif5aiOZuRz2L4ZsJqNxubvXs3qZczHWxmmpq2SaH3vvNj/Ly9Y+rRe8sN6jy3YcqbQL58tLH+esHhpQbjrM7izQrD34SxK4xyHD6/tGytRq2efPREybnf47yWYb2c9Zg+zpNzXy12n/N48xp9jD8rbFax8DB4GEvJ4WUsJed+GBSPcx7jq9XwHs4QZ1Z5cnhcfS3eD26mrPthqKc570N/cu6DpR7erLCepw6qOflqXji8WZmnho+JM6Ox5lB8Na2xM+FyfRjq0Ss/5/w64HcVmznqofg15mCupl4Pf01h3U9O2nW4z2NqDWPlw+s8cRvPfI0lJ1a+fCiHhzpZawy1arzqUdNZGOW8B15yjJ2FIZe9PnZeeQ48UjxtOZis+Gs8c2IUt6lY5z2u9SOnmu5lTA5fTVkb3trYOc1z4K0p9fBKZ8LCuddrEGvevayBnPuI0TaWGvikymVVD/rAuLq/wP1/5OniqOEMMXM1nhxedEYbEP1/1kfvPyfu/NWrmm/Buu3oWTH5o51j6sSdym9wTB3V34D0f/18/OA5oc3FxuuuEh/+s7XjQ7uvHa/Z5hnxpu2eGfpmLN2g/q+vf3bsuNFq8bGXrRNvm7NGzHnBqnHuZzYL3Tvy7p3WjJdt8bRyiZa+IevyL24Ze8xdI77/rudGHLJDXPDZLWL+xqvFef++Rei3R8oPEva/BWvy1F3ipiO3jYsP3bys9w/nvClGHzy/9YVtXhx9s5Uut/rd98tX4jaXQPXv59BZj+bsQ39DUDYJ/c1Imbc8Z0majYXfF1I2IYPNBBsbnQXp1eGbsjib0s83a+lveqhz0Mt6m5BF9077H8jaG6p5vrwL7H9USfFmyioW3ufgpJ732Fl8zpFzn/PE+JxlTtrFO5PjXIMxzzf7fYy3psP4GpNz3stjfPRg7Or+HONr47Pfx0vLys8B26XuJZa2Mf4c3E8Mhw8lj7qffqg8NY4cbE2pn7WNlY8jM4xh3SvGx3i7lD5ZxagHD69B78wwhnUmx3hrmr15XGPIycv6UOfx1RS2xpGrccp5D7zkGHex8uCDc6VHruGeNr7GiiMPh3rNtn6ZdUZ1qJV5xvLjw0sNxnhd8cD72GNncjyM1XzbMYx1LteB5flpnHPwbSxMTcU6R4w39yWPj961OvK08c7luIvzvoprY/K5N+Nav8zgJY86qxwH820c8+4nljLfxuP1nplrYzNDrS7ea2U/Y9Ysdb/HeFFn2mJ4ZxT/iSaYzLAM5CYWXR/3XvDeuP6U2XHpEVvGVYe9uPzq+NRJO0UcP7/3OxzaCPRvRJ9YMDsu/vyWZcPwlbduED9493PjJ5/cuPyQ4GT5FfTZ5T6QH77rufGFN60fv9hrkzjxYy+MO7+zTbns6qZvbFW+qvezb1gvTvjoC8sZlB99Qvw2ZQOim9hP/YeN46qvbVV+f6TcgH7c/IiTdy6/T3LJwZvFZUe+OK4/dU7cd9EHY3Lkrua58Jym6eRYTN79+xj7yV7RXG6lzUQ+e8FZB9toZM9gMzH9jEhvs1LJVXo03q65vJYDXxbjFx8coa/qtb/dtOdpp+mLqeUfZ7reHzXc2ba4xinX5vf8/wvWe3c9X62jdigPh7JmxjXO+2a/14RVDgZVfXpQw1ly1IBD4Ws1nMFPznvUWM374WNYemucH84S44HLfRnjl4rhUIwnx9TGX1MY+jvj/q78MJY1U8/Hyvn66cPzc4XHgzpP7JzHNYYc6n6PNe/PlV5wUh2os+Tdm+Ps9zHeWs+2fvCa93VTi3zmGeNr68k8faSwxPRtq+Fsjqnfpvi9p+eUp3+uga+mcGjX2uXxI/epjd3vMV7WXOvrfsXeH17axrrfedgal5naGuDb1Bmv5/6l7S22jaGu980xnjbNfh87U1uDez12TnFmGcPgz2N8zGfF75o9tRruzzG8uJmyYnQ4S+yae8Gg8tZ6UqPGO4uvpl2s92yLnS8bkFqTJXKTI/Hw9f8RN/9kfrmv4sKDNo27jp1dLsOaOnF+75fIdRmWbUK4Gb18Ja/dnM7Yb1iP8ovovR8g1K+jL/kL6YOb28t9JofOiThsTm/zcdS80I8h9n4Jfee4/ahZceECXX61Tdz8kx1j0S26AX3JDzglpzfHovtj4vfHxehhrys3mOczGc1moL/paC6t8k1IuZRqsLlo81CLTUrNNy3X34BMy5W++h0RnR1JN7jvt3N5LlOjj5bn3PYm0HPvOvz1Ug0enm/j3UPs61Cu7cDfpTNlc8+uvqrpPXm+XqPWl5pSvKjXcx+90Oyr8fR2Lzx+KY+ajxpwqLzOUU9595RB/x+vn3mfqzFek77ecxjPPCz9yUvp4UqMf1jPWr0a63VK4/Reyrm2dfv66O2sc6wDn9QP8tScKdtWR3meJ+o9vDcx81mdZ30wrs7BoJqrsZ53XnFmvZfH2escNd1PzJzzM2VVo8Z7TnHXgbfW01mPqQeL5hr4suLPmnnN1w7l8aK5Vhdb83qd3NNrKcaLej2x7vfxTNjcmzE96ImSR91Pb+baGOZhUfJoG4+/po+HFaMDtk1r/cg5M2zd9HO2jaGuez1mfpjCZBVH76ya04HCMnY2x3hgsmreGXqT7+Lx1BjmnM8xnjY+r7V6CZZMFHIdffC3ceevXxPXnrR9XHr4lnHZwZvH4pPnx9TJO5ffA5k6Zl75QUDdC1I2B7ofRBsF/SZI2oD0Nie9DUcs6M8v6G8yyhmSFk71VLd889UOEbr065gdI/q//7HwpHlx2YLNy9mP607ZPu781StjfNEN1eczNTkRkw/fUX51XD/6x+agepmVLn3ivo9yX4ZfApVjLtUaXGI1qG2bFL8XpImXvARrwA7qlU0Sm5OG7dUePfItMXnTL2NKl5S1bbz+iPm299dM1vrHYmeytv+OnuWvV8v/46Hl/b/89Vr+ev13/O94Wde0LO/rZe29LPyyrPuPxS7L811Wdlme87L2/p/I/7Ferz9W32X9Gy3LumtsswGpTebFTk48Fg9d+c9x8+lzyw/8XbRgs7jx8G1i7JSdyyYkdBZCv8WhX0XXmRDbiPQ2HIONCBsSVJsQPNM3LDsM8v3NR6mrb71SH20+dAnYyTvFyMnz4/rDtw79UvvVJ2wbN/90x3j4ms+HNhp6LvmYuu/6GP/lF8svjjcf8tNZjd7lVYMP/lXfkMuklmDYOLChST3LvSP93BJnOJbwDjY0TZ995sXYaXvG1EO3LvG8eR3y37Y25j2RXzfyMLXdrjP4yDF2zTXwZnUmx9TIDOPsr43dSyytecm19WXeeXl5MC/FU4L+P57HSy8UDxy+PMZPb8aZF0eOWl3qfnqSg/NeHtf85GClMKhyXYezzpMXSyz1w/PE6suDXOZyTXyZw+dKDIPCSnM/H+OX4iX2MYyrYn/QUzlYra/tcJYYTuOuA7/UGc+38e7Jsdeq8e53r+fbOOXdp1g1vE6NJZfZPMaHap7Dvd5PecZ4XZ0jZs1wytcO/Fmda2Opl1nG1MAnzbXwurL27PU61HKuFsPkWu5lnShzsFmZryk1MqOx/Ghm/Tnja6uRWR/DkGPc1lc+1qy47aAe6gy5Nt7niTPf1tfX7YzHM2HpKx3GyuN9YZ0j19abeak4Z4kzK29XXzh8eGt1vH9eQ/b7GM7XXOubGY1r7J9gZLJNaTix6Ma4+9y3xA0/2iGuOHrr8mvjNx2xTYycvFPofpCp4+eXTcHUUfPKb3OUr8fVt2PxI4X9syJ8WxabjmYjwpkSznSg4jnroUu9jp43OPNx8k7lTIw2H/rmq98fs3Xc9OO5cd8F74vxRTc2T7w8V/0fXG1IHr49xk78QIzss2PvW63SWQR9mM+XYpWNQTkTsuSHfvdyeVWzIWg2E0tuZNhsNGdXmvs65LWzLmw8bJ3ek17NpVr77RITlxxWnit/Y9dhf2fN1w7ymedNiDoLQy6zeYyvptlbG4tTPh81b87BkM9j8ijPV2M/mCfHOCs8PtTrZYZxG6saeGoKh9ITrTHkYBjDZGW+S2HkyUeN897Zz7jGKScWPnsZSzMPQw3m8XaxmYGVchD7HLH3xpf74m1TatAPvs3veVhyXoOYObSN0Ty9URhU7DC+ra9qZJaebf3oi3p/GPq54kfpi5JHZ8K6V7EfjPG4+prd57xiZ4iHsV4PBoX150xPOB/D1TT78tgZ+qFtXuWdI4ZjLPWDsc8Td7FwXX1rPJwr/bLCs14YH2cmj/FmzT6N1Y+ezDvnOWK0jWVeyuE5j703XimsaxvnnrYaXWzm8bIOxtK25+x9iZ0bFs+Uqb1equ1HV69l4TPb1Yc5MdPOgGihPkkspcHk5FgsvvOEuPNXr4nrTpldLnW68MBN47pDtypnIMpN6SfsFFP6RiptEo7S2ZDBGRFtRKZ074YenNGoaM/T33ToTIrOqBwxt1dPZ1l0tkWXXZ28cyw6ccfS/4IDN43Lj9q6rOsP57wxFt/9k5icHG3WXv4Q2nzcd12MnfFPMbLPvPpN5nzYbzRvHHpjPuxr89DbQEz3Mc/moNGlOGPStplparFG35iU3JwYPe7d5SxIfgP639n/xv53Vszhseec5f1BDh/qNfC4Og9TU2eIYdEap5z8eDLLWIo36zC+gOkfr+c9WAfqvYhrfs8Re0vPEdODsdRz8MrXetc4eGfhqYEHPo9hayom+zUmV2PoS7+szmqudrT1zbWcpVb2+JjeeOEZuzfHsDBSOGI8ztZy+Knl/rYYb03F5D4+rjHkamxeA140z/uYvqgYzXMQw+Bj7Arj6nxmfexMjr2Hx/DZ72P58cEyRt2vWD4082Lg3Oc16JPVOVjniDOnMSyKt6Y1PufglPcj+2pj+TNHjvXB5bH38hh/l2Z/HmfWe7s3x5nTWCw8fuXz0cZSo8ZQwz3UoScqLzVc8dfUWefpSy6zcPTB50rs3lxHnnxQM3sZe73MMsab1VnN+cE4MxrDoc7l2P1eaxiLF5+PyXkvzfuB37WcAfGEAB8T00DjybGH45Ebvh+3/fyl5WtuLztiq9CH/2sPeXEsPHFeTOqSrBP7Z0OOnde7LEv3apRLs9iMzO1doqXNRf9R7h3xsTYceojTGRVtaI7bsXeW5cSdYvLkneLh4+fG1Qu2DN0Uf/mRW8V1J21f1rXo1gUxOd67EZvnUJ7bw3eUXxAf2X/XcgO3PsgPzmz0Nxb+dbn9D/OcbShqGwjPc1M4mwPm6OFa4mbjMjjbAiMtG5iyseh/Je+0eMCUfmXN9lw01u+DXHJo8/fkb4vqb+p/V16nnMOPtvm8nrw65OUgbuPJw/nYa3gfPMPWnGvCSb0efbLicy7HmfFx9uZ67s1xjfUcfuV0oMTu9dhfM2q4yuseZ4ndT8xcm3pNmKyw7iUn7TrcV4uXhe3qXetFjufR1htfTTMrjx8w+Bi7Zj9j9+SYenil8vihsXx4cw33Zj57fUy9zDPGi682xovikcKhPqe47XBfGzuM7+Kc9VjroXfm87i2drHZl+vVuK6+8NKuA1/uzxgWXx4rjzfHeLNSy7laLnOM8Wb1enizinEfNZQjD6O5fNR4OPd7TA3lag949+UYDi9jKTmYrHjwdbGa8wPWmRy7P8fwuTfjmp8cffDmWvhqCpvVa9U45TLD+P8Vq/oc9OpS9xJLYXyd5KRdBz5nyxkQIApI3QSI6ndBpiYejYU3HxC3/eKVcd0pc3qXYx20WVx8wKZx25GzYvGJO5YNQrMR0VfkHrtj6EZ1bSbKmZGyKdE9I7p5He2f5WDDIb+4/sZDZ1gmT945Hjtxx7j1yG3jwgM2jYsWbB5XHL1NXH/KDnH7L/8iHtXmY2JkyRdrdFGM//or5RfE2QAMvkKXzUf+YL/k5VblA782Lvz+R4kHvtYzH/2zFeW+Dm0m7KzFtF9B56xG85sgvdrD6g7WNVjL2EkfiKnFD+itw5+5qP+N2/7WAPLmg/dCm2a/j9sY8u4l1pwOPDXlecBkrTG1HFyeU316+JznWCOafe71OXqizHmdtv7OuJ8a0lpfcpnxesNYzXN4P+fok+cZ1/o7j69Nu/rXerf58zoym8fZz5qzz9cNkxVPjc05WBjvm714YFDyaBuneQ6Pa3VqNbKPWjnPOrLW/EvD+pqJc4/amL4wuWftuVLHvV6H+S6WfqjXUm4mLD1dZ8LKwyOz5NsUv+Z14PN42NphYVBq1Xh5OJxXDq5LnXW+i2GOHrkG8zNRWK81jKt5yQ1jNV87ZsJlD3XI6+9T+xsxL+UgpzFxF5u5Wq02PrN5DIeyHnp0qXtrMb2kzHtQLSeyAAAgAElEQVSsXK2ve6lBzjWzjL1HjcfntYjx1xRPl9I78zWm2YBoEhAji0TJN97J0Xj01kPjzrPfFDf8aG5ceey2ccmhW8Z5B2wSVy7YMu48ert45IS5McH9IfrRwhPmR2gzokuo9NClWvYol25ps6E53Vyuy7lOnB+TJ+0UEyftFI8cPzfuOHJW/P7gLeL8AzaNSw7bsvza+Q2nzYs//OZtseiOE2Ny/LHmj92seXw0Jq45LUb2323wbVf6oO9nM9ggNBsAzQ8uqypnOJInbwoYo2wK0F6+f3YjbVzwTNeBd3p+sMlgI1SdP/gVMXHDmeUX0v0N0bwuHf8BuD/Hztfimp9c9uf3Fz7XzDDOrPJtB0yXiqUGPnIa0w/FA4M3ryGzmYer1YHNjPLZ72Ov2cbiR33dyg17uJ8YRj29r8e1fqwXdb6NzXXoLc0Mc75OYhTPTHgYFJa+KHmpHz5WLL8zHmcvdZT3R67BHH6p12IMh8LhzUo9fDUOhh4wKCyqGsQo3lyDeSkcypyzHteYYawY1kB9dBjb1hs+K37l/cCX+5HPmllxPLJX47YDL31R8rBoruO+Wn/5nc0xfFtf79fFzoSnlveEQ5lrWzfz8vuDvLR2+Lzi3I/5tr7k8dX4Wl9yuWcXL68fzsKhrAd1TjF5+dsYPLAacyjuYt1HjFIXPvfHJ/WejOHbFB62zVfLw2atebvW7Wv1uFZHudpBHib3Iw+Lf1i/5hIswAzwh6EBhYvq//iNPxqL7/px3HPee+Km03cql2RdcdRW5azEBQdsGpcduFnceOjW8cBxO8TISfNj/OSdymai3LCuHzHUpuREXbLVe5Tf8tDN7DrTcdJOxb/4pPlx37Fz4oZDt4pLD9ysXO5VznoctXVcc9L2cdNPdo57zn9fjNz7y5gcX9S8oZs1T4zH5B0Xxuix75y++Wg2GoNNRu9DfB73P+z3L4GSp3epVM3H/SCctcDDGY/emEutBmc+8LHpwTfYaPQ2Q9N95R6RvK5y+Vbfp8uwfvOtmBrrbcr4O+u1aXsT8bq5198XxJn3MSzerLl/jZUnH6wt857PHOPsqY29Hxxr9zUq9rF7vQax5rPfx/SgDup593tMD1d4KQ9nyOGjT67hjMc1PrPywKCZ8/6+BnxwKHnv5Rwx/qzez2PqUb/GMYeXXoyZl7bxeFF5dTjbFsO4upeeynmsMT1cianhjMcFbvkHlp7O0ddRz2XWx8TOeqx59aKfx22s8jpg8dXG+LwnMX56M/Z6bTye2no1x+Gx5+gJz5i6eKVeQ7F74eHQGp9Zxl7PuRy7n77DWDE6pPCu5L2Xcn7gr/XMfI3FQx0fE3s/YuZ4jvQnL60d5KUwXgNeLF7qMMYjzTXwwGQdxjqPlxoa04851s5YXsX5cJYaw1ivA++qmEetHzk8XX3llc+VGB7N64YrsP2jvHvpTx04lH5o9nutGmOtm9fFGY9ZgzPEzHUpa0RZjzT3aW5C7yo4bG5yYiwmFt8ZD1/z1bjjV6+LG388P645Ybu44qit45JDtygbhgv23yQu3G+TuPLgLePWI2bFfcfMjgePnxsLT5gXj54wLxad2FONHzx+h7jnmNlxyxHbxpUHbxEX7rdxXHDAJnHhQZuVMyz69i3VV587z3lLuR9l/LE7Ymqq93W7S6z3kT/E2E/3mnbTeXOGws5wlM2HnRFhPNgs2Gag5St0y+YknSUpdZrNjtcYHg+7Cb2+tul1R3/00Zh85M7mjbfE62P/sS6fG/wP1/LXYvlrsfw9sPw9sPw9sPw9sPw9sPw9sPw98J//HigbEO1K8s6EF5tdDEoedU73XYzef248eNUX4s6z3xw3nb5zXHfynPKbIZcftVW5XOrCgzeP8w/cNM7bf+P47b4vKo/z99s4Ltx/kzhvv0Hud/tvXHw606HLrPTtVlcdt2253+Tmn+wad57ztnjoqi/G6EMXhb6ZS+vhYG29NU/GxBXHxMjBr2i+Vrd8a1W5abt/lqF/rwUf5qU8ehuKwVmHmqfk+mcdBvM6SzI4m+F5NiTlXhDuIzFe3maN/bMtxVt+AFFr0wajv0bmGTcefDvEyCGvjok7Lmz+PxT8zfib5teN18+V9wjqc4qplfPu9xgfHEoedUax8jVvSdr/x0w+WJSaqNfxmHmUvhq3HXi7dGlY70lN1uh1mMvqz9n9Oa5xnnO/8n64r2298meOnPOKqYF6LxjUWfxo7udj53LsPAzKWjKjsXP4auqsM+RhNM4HnjbNfh/XmNzf/cQ1TrkuVvMcj4eHldZ4743Xe9Y4Z4hrLHVqfZWDZZ5e1GLMfFb47Gfc5lceFg+MK3Nt6t4ctzHks9/HeKR5nczh19gP5tEa736PYdA2VvO1Ay4rdTLjdTLDGBZvVtXEmzWzeH0dzrhfeR9nhlrO12Lncux+9fJ+muPAx1hKLis13Otx9udxGy9frS/rhqOe9yRmDs0MPfBnhZPS13Pyt9VwXy3OvXxc85PjOWQ/Y3wofsYoflfmXDOPH0+zASGR1QHFeV7j6S/uZEyOPxSjD/wuHrlxn7j7vL+OW858Sdx42vyyGbn6hO3KvSI6i6FvrdI3aF16+Iubx2VHblXymtc9JVcfPyuuO2WHuPHHO8WtZ74k7rngA/HITQfGyAMXxOTYQ9P+I2CtrlOji2L02L+Mkb3nTrv8anAGRDeTTz9jwAZhaXSparDB6TiL0t67d08I/djY4Cffu7SrvwnZZ15MXPfTmErXWPM66W/IUfv7kpv+d57OwEvxu2aWOTiN21jN+RsZxrWL9941hhxrQp0j533gpD6vGLZt3dSBdV+NxY/Sm15SHvCM6QHjYzyuzsNoHi7HsHBScih1UPIoDEqennCMmZfS13PZz9g9sDmHl15Z3c96Uc1xELs6qzhzzFMDJY/CofhQfBoTo2L8oXzbAYPSj/EwtuYXQ5715f5evy2GYQ0oNWscfd1LHbSLg3cvsXRpWPzwYqnfFuOtaa2352oMOffVYnxZ8batm3k4jf1gHqUOih+Fxe/qDDH+mjqrGIZ8jSGHB50p2+bPefpINceBL6v6swa8WTPDeCasauFH6Yfmfj6GQWEYu5dYczrwoJnFhxeeMRzaxuc6+LvUe3hfz7fx2Q+DZk7rZu2Ph/V6zivvPemBP48z63z2aswDn/NLXIJFATcBsqCawjHX+6asxTGx+K4Yue8X8cj134v7Lv77cmbktl/8edxyxsvi5p/sHjedvkvZnNzwo3nlkiqNlb/lzJfFbb94ddz56zfH/Zd+omxmRu77VYwvvismJxY3b0rv62tWrB8cnLj+zN6lV22bjHzJVZuvJa8P/IMP/cu+kWEjIS11dUaDsySVNfTOhrT37Z1l2SHG9aOEE+PN68bf1F8zz/F37FJY58rrbh+G23jYrG3+rr8zPdG2GqwTxc8aapz60hu/856bCe+9nKWH1/De9ES9TvbJk3P4UerglfJw1tdI7DXkhUM9lxln8aNwKF4UnxSP5zwPI1WeA0/mNc45GJRe2ecsXil9XamBtrHwzua+1EBhfA3kaqz3ps9MWXq2cfR1n8esB1+tr/trsTOa91oa08NZcs4Sew1nanFmfFzz05c5/FLlUMXZC+M+54ndV4vx1bTm95wYjWuH+7riXKPL669B7sk6unjmMsuY+ay1vvLoQIkz62P6oJn1Ps6Rh3PFh4dxVmc8zr7aOPvzuMb4euSXJx8z4ZaGzfVyP8bZ52PWXevrvloMqzkOYlf35TpwrsNY6sHg15g496mN4dGlYfHSkzF9WGNtnJklNiBAXQvLDWBQWLTJ60PO6D0x8sBv47G7fhyL7jg2Hr3loHjk+u/Gg1d/rdzL8eitB8djdxwbi+8+PUYfvCCmxu6b9sKqd1t/+klLT9378eNP2KVXfFDvX3rVPxPR+9Dfu6SJMwf+7Vdl3rz+oX9wBqJ/kzn3lJgfHp3Gs6kQx6OfG9TmUq7e+qfl4aWl95LPTf4x3Yg+PlpeF16/JV6v9B9T22vtvF5nDmIpHrT8PSwP44oHzSx5MYrRnBfXxcKVAi21qCmlVvYzpp4zHmfeOVg8znlvxbUjezLfxlFrGI+vprVeXs+ZvA73+XP32HnF1HBWce3RxQ7jM5t71/qRq7HweLLynGE1n4/M5DH+LpY+mW1jVFNz4mosOXysAXUWb+6Nt6Z4M8u4xigHh+JnLM2H5xTDoM4SU4MxKqaLky8fsNIuPnMaO9sV11hyztXWjo9+ecyanSWn2vkgJ3Wfr4O4ix3GZ9bHbSzPIXvl53AWPzn34Xd1n7Oex59r4Vma10zMsIevg94oa6CGe5VjnP2MpcNYeuCFhfM+5OjrLBxa8+ZatZ7kspd6KH3w53EXj5daUg7P+fP0PF5pZmuM55wlprb72nIwaPblGozxo62XYMngTwqARlKKkmOM15VacHhhUWcUwxHDoXDZF1OTMXnzr2L00L8ol17lsxSMfTPQ2yCwSenQfNakP+7VHNwvUur1z2TQr9rD6rlP8RLr8w1HmWfjVD8TAz921hdjcmxw5miJ1yu98P661l5r5xPa/I+N18hxjcl/YzHe22N46uYx+az4XOXRkb15jMdZYnlZn5Q419CYOnkOBs3z9IJH3d8WZzavQ5yz9HYux/LAtbHU6dLMMub5iSVGqUd/H+MvUOXvmhlYlB7Oe9zFt7HUlnbxmvcazhGLJ0ZZn6vmqEVPFI5a+GCog69L8aLUkOZ+uU5mNM6erhrup5bnWEOu6V785PDy2qDkpTDEztb88uU8jNdyX/bnXs5TQ0yNE9vFMy9t43M/ejq7NDzcTNYsbz4y37ZufPDUIt+m+KUwxBrTD/U6+HIN99RiajnntYb1pabz5FB6MJaScy7H+OXFTw5tY5iXDmPl0eGMx108LOuAq60513HG+8Nmf+5V42sMa3K/12IeFiWPwrNWxsyjNd69zoup+T0PC6c5j+HRzGoM8yduYoJJimbF18YyPz45FSPjk/HY+EQsHp/sPyZifKL3Bhafa3gvxRzUlMKgzE3zLn4oxs7+eozst3P/3g/7oF7ONPBVunbmoJxBsA/9fd/gq3Lx9lidWeDyqObsQznzwfzA38z3Nw0al80BWi6zss1L6a1xP9dfG2dRpL3+/Y1SmZ/uHZwpmRNjP//36gak9trpdSTvr7fnFPvhY3z570MepU+uo/kuFr98XoO6aK1GG0sdeovNPB5quDpH/6xtPL5aT+bopbEfGjvnsbNwNcVXU++l2HnF9JO28c5Qz9kaR857EjNHT5S8FK8rsfs9nikvn7gaWxqnf3w9cGjuCQrDmvHVOObwUoOx5lkrCuOKv8bjq/V3v8e5b603fnnz4Tz9XeWvceR9rbk3vWo8PZwnh8K7Moe28ZlhLC4ztXXLlw96ug5j8aoWsTRzynUdztRYatdqMCcOFmWuxilX6zsTVhw8Pbw/OXzen5zUmZn0pQ71YVDy9MDvSl/35tj9HuOjn5SYOfcrVp5DMUzm3IffFVZae7g3x87SP9eAIZ/HrBfFJ+XwWDmN8bf1rTFez3l5/UEP/K7eO8cadx30mMmacy3YLqW3s/i7erqfGtVLsDRJwZnq5ORUjI5Pxr2LRuPKex6Nn9/4YBx1+T3xnd/dHl8/59b45m9ui2/99vb4wfl3xHG/vyfOvuWhuO7+RXH/orFYPD4REy0fYGbaP/sm77kqRo96y5LfFuVnG8qH/8HGpDljoXy6DIqNBmcUGPc2Av1Lo/LZCcbWkx6u02ppY8EmRTzfasV62JAwLveIqP9gszKtXn/jMnbmvzS/BZJfq//q8eN5f/1Xr/H/l37L8lovC/s/9fX73/icl+Vvtfz1mv6hYthruSyv1x+LHfac/rvOL3+9/me8N/+Y758/1ntkWfr+MV+vZelde87lhwg1oUPFUcW+m/GYRcg7OTUVi0Yn4oYHFsfxV94T/3bWTfGXx10VOx1wccza+4LY9ocXxqwfXhjb7n1BzNr7wvLYbu8L46ULLo33nXR1fPasm+OQS++Ki+58pGxGxvpnR+jvqrh2sJ5Gufl8/117l1/1zw4Mzgjky6v6m5D+h3V8+iA/OEsx3eMbCM5SwDGnMbkSa2Ox7/z+r7HPi5EDXhIj++zYX2N/TfvvGov326V/2djcZjPS1Go2Tf1Nj5/5YHNSNlC9syFsZsZ+8e8xNT5SXr7mdbIduf6+Oe+vteZ0ZA/jzLexXgOG9xZjeuGlFr2k7oVnPnNtvHPUw5uV2l2aGY3lRzNLTzyZJw/nfuV8nNm2nnBi4fGiuW/uD4cv98aPei9ybSy13FeL8dW05vecM8r74T7FbWvPnGpktjb2Xh7XvDnnfo+zrzZ2f45rfs/hVy4f7iP290eNoQb+Ls3ePBZLv6xtvbv6MUefmuKhn8Ye1xhyzopxTnN+1MaZZ4y28cyj9EWVbztgpPilxLBorgOPHyWf/T7G4+o8Xs374X5iODQzw/hcx/0e40PVjwc5qQ4U3ud9nZ7HW1N8znrvWk/qwKJwqPJ+MMbv6gx5Zz1mXiquNnY/sXw63E/sdfDXNPvzumsMOdguxZu1xrBmFM9MWLxoZnyMhz4o+ezVWHMoPldq1NgurjkDkkEvnmO82nzc+cho7HvBnfHO466K3Q+6JGbv09tksNlA2YBsu7c2IxfGtj/sbUjk3/nAi+P1R14Rnzzjhjjzhgfi4cXjzRkRnhhKb63Jj2lrHB+NsV99efoHeztjwAahqvlsRR6XsxNpA5M8nCWp1R897l0x9vPPxsiCV8X4ud+NkcNeW85y4NUPJo6e9vEY2Xd+jB7x5maDwnzWrl7yMj929tdiamK0ecmmvV62EfF8Y64E7uNv47kK0qTcR+w1MGouH/hrSo3MMK4xOYdXypzH5FD1zH01B1MCq+Wc4sxmjrFz3pNe9HG/x/Cu3tvrELs3x87Su4tjzXDuhXf1fjAoczy/Ng5f7o2fNXgdGGnul8fUQZ31mP7K5YOc+xU7Q1/Y7K2Nnde8H7VxruG8szl2jnWiuU8XK69z1M0MY+ZRWFR5P/ApR5w1s7kG9bq4NuY/m/W1+nro4+rzxPBSYmc8hsnaxcrL0cW5D7+rs6wTzWxt7Hwt9l4eZy890dyri821uljVcb/3I++9cowHFc9DOQ6PlcOfNfeHz+ocDH0Z557UcDbHsHhrmhmN4RR3Hc76euGdpZar8x5n3hnVdK9i/Ch+70+cWcYzYbt6U4c+NcXjqr70rjHknCEuGxA3KNYkBTF6E+UeGRmPX9/yULzxqN/H9rbpKGc8dKZDZz7KRoMNyeAMCJsSn+9xF8QO+14U7z/5mvjJ9ffHHQ+PlMu6fC2sNeu0NS+6N0aOeGPZgOQP7OVDeX8zUpvrfWjv30vR4uPyrGn8gX8WI0e+OUaOeFOMHLB7/xKuuWWjMXLUHr2Nxr47lXWNnvKR3o8D3viLGDn67b3fKFnwqnJGZPSkD8bICe+J0WP/KibvujxGjnlH2YQs2bP/eyDNJVhpU5TWPn7e3jE1Mda8bPxdu7QxV4IuTnNdh+b9b5rHNZaa2asxD2rWeOW6WGrgcyWmPl7qkZdPh/IcxM7AuWYOnjw9ch3G2U++TVWPmvSQlxjNPAzqTI7dQx3lyJdm/X9gyWmMr8biR53Dn5V6eKXwrjXO18185pWnB56c895Ly8OqZmbz+hizHlg4r5HXqLE/nM2x+2oxfs3lo+ZXjjXXGGrAupcYlZcaKDl4FIYxffDnMT5pF+sctbLfx/hV1w/6uXemvWFnot6TuI3zteBFxXAM4/HVtI0lX2OUY75NWfvS8nCq23a09VQenjWiXq+Lx4fmNShPj1od/MzlMfk2xe/a5lXe1+KMx/DuzazGbQd8Vuq1ccpnhnEXK08bC0cdesP4GE+b4s3a5vd8ZnzsPmJft3tzLL97M49f+XzU2GmXYAmgYDarae8xFQ8+Nh6HXnpX/MXhl/cvq+pvLppNR2/cO9txQcwqZzvYgKCDsyBlQ/JDXa41mNvlwEtiz9Ovj5/d8EAsGh0vl3r5k8ox6y5rvOeqsgnQ/RO9TQI3i/cvS/JLrWqXZzXzO8SiH8yJh787Ox767ux4rJ/v3Zdhl0Dtv2uM/ebbMX7JoTF+8YIY+/XXyqZi9Li/ivGLFsT4JYfH+GVHxugpfxejJ7w3xn71lRg57HUxceNZMXLsO2PkyLfG+AX7x+iRb4mxX3w2xn72r+UsydTDt5W6I9rcaENBf+4R6T8/LvMabJ4Gl36xSZq44ujyuyi8btKuNxKvJ2+i2pj3BHM+hsuKV1rrz/zj5cTryEqu1rNr3V6HtbVpXrOPxdR6ey38yulw7eLhUGpSg3FW1gOHygfrr03mGeNFyUvp4Tni0qTlHzxLyz9ejmXMhJendsB2aeao1cUwl1mNmZPm18rHzsrrh9cgrrHMZda98mhMLnvzGD+10Ro/U5Ya7vfYe9IHBpXf48x7DXzUgnWGOHtznexjLIXNqr7eGy8sfveRc8UPrzmP6QHjY3wFSP/In3tnto2nV1Z4Wmk+H7W+OVdjlMMn9Yc/j8wyXlbW+3k80+ec+8Mpz3MjZox6P+LMlyL2fuxiVQMen/Pk5MFH38zCwTB2fy12HzHqPZxlLfhq6v622Dl5OBTTo4t1RizeGquc5+mVlRptij/3HtYfP3Wp4xxz6LRLsEh26eKx8Tjy8rvjFYdc1tzT0dtAaEPB2Y6K9jcXg0ux2KT01TYf5cxI/56R1xx+eRx+2V3x4OKxsgnxJ+Wxr3niujOW+OVzPogvoc0mhc1Kf9Pxvdlx+9dnxa8/tUV87vUbxpfeuGHc+fXtqmdVRo/eIyZu/nWM/uwzMXryh2L8t9+NkYNfEeOXHhnj5+8bo0e9NcZ++eUY/90PYvT0f4jxy48uZ0omb/512YyMX3VyjJ7x6RjZf5cYv/DAcmnW6HHvjsm7rwhdsqXLsZZYd/8MB5dYtc/PKd8ENnnjWXr3ljdw1xvCX0d8/joTu68W46OGPBw1f87hzZp9PuY/vswwdm9b7F5iKX56MJaSc78zHuN1XjGeEvT/8Twc6nxm4KgpJnM+dh4GVS33el/6oF7HfW18zU9OfBtX6wcn9d61OHtmwmoteT3OUbPmcw6fK3FtrcrBy8ehPAfcsN4wrs4So7W+9ETlxQfniq+mzrbVqHHKOev9cr7Gu2dp+oqjd+7p41pPcu6rxfhQeTjcn9fNGK9rF+dzzhD7PD08R4y/pniyUs8ZefzIDOMa65xiefHBZYVR3g/3tdVwv8di2xjy7idmDW08LD44V811PfDiYSwl532U87H7PYYdps4QD2M033VoXmv0dXrNYax7c1xj5dGRvbVxjc9sXjfjzD6evjBeq7ZO5dr61lh58Xs99xKzBnzONmdAmMzqTfRNV6dec2/scqBuMJ++4dDZjl6ODQWbEM5qoLo8qx+jdk+Iblz3Oorn739R7HXmDXHF3QtDX+2rg3X6k1RO6x076/PTzhYs8eHcziT05gZnDHSW40cf3SQ+8pJ1Y/vnrhZrrPqkePpTVoynrPTEOPMTm0//6lsuczrgJTH+2+/HxNWnxfhlR8XoWZ8vl1BNXP+zKJdUyafLrw78s+kbkHuvjsm7rojJP1zeu1Rr7x16G5DffDtGDn99TN5xYdnITFu/rd3PfODhUi2f0wZo8p4r9aLxcpXXz/+2vJ6ujdkCzetwXy02pHjzuKu3e3OsXl2sr6/GDuNheE553MXjlbbxWntt/c7Ck6PWTFl454hrSh+05mnLwWRt83seRrl8uK8WZz/j7O16rfHCSslJayzzeDPbxcBKOYiZa/sb48t94dA2nn7w+JdGvQax+K7n7D56o7DwqK8p8z7GV+OYq/mZE9fGskZ4V/iZ6tKwtb7qQ75rze7znrCab3u+NdZzsMN4enl/sTxqPF48eQyD4pNyeI58zrWNqSF1Vv1qPb0OLBw18Azj4VxhpTW+1tP71hhqwmZlXiw8ypzUD/LKEcOg5GHz2FnN1ThYejN2ha3x9ID3sbhhj5qfHH1dvZ739BiP1ltbM/NSDs8p7mKdIZbWauScxhwew7NeNPP4cl5jGNY+7QyIgxgpovHtDy+Ovzr+qnKmQ2cyOONRVN90pcuomo0IGxK0v7HQZoNNRt9bNhz2LVm9S7emc3P3uyg+dOq1ce6tD8dEfxPCi+TrnpoYj9ET3ze49MrOcJQP5+lrbQeXafVu2L7qc1vH7OetGk9c4QnxhCc8IbbeYNX42EufFU9/6opxyHs3aurqAz8f9nUj+eiPPhqjJ7w/xn791Zi49XcxdsanY+KaH8foaR/r3VB+3LvK5mPaGZDbfhdjZ30hxi89IsZ+98NyD0g5A6INyGH9DYjuDWGj06W+MSk+7mOZW34NfmrhXeXl0t9z2uvV8R9gMdr/KDPOPG8m3iv0yH7G7iP29xu+muJ3dVb5tkNz2ZvH8DWt8crxqPX1OvDqmfuKxUtMXTgfe+x9yXsN7+Wxe/GjzEmd8VhzHPh9jDdr9sK40ldsjXevx9SG9zGx/IprR42jP3xmGTNf8zOHl96MfV5xrQaMFI4YP9pVI7Mat/nd6/2JM6f+vgZ8Na2xyvkjcz5HTE/vqzkdqMeZY+ya+2Y+9/JxjSXnPRQ7p3HXgd8Zj2s8OSl8LXafrwFv1mF9qeEcDMoc3prikWaOOTiN/WAeDiWPH80s/qzw7veY+S7N/jyGrfV2b47hXKmhHIfHnnNOsbM1xtnszWN5qeGqeNjD+xCjNdZ7u08xvYnxSompCesKj6fGMeecx8znfuTRzGjMnJTeuY5zOYZz9ZrZzxgPvbJq3o/amBposwEh0aaPjU3EYZfe1f963f43WbHZKJuKXk6bCR69DQn3gKR7PtIZlMKUMyKDe0F694QMxvr63r887sq4+A+PxHj6urZm3YsfLjuPZdoAACAASURBVJc8+RmAXqwP5IN7QJqv2O3fTyGP7vP499duUM52aPOx6pOfGPu+6wVx7qe3jE2f9ZSyASm1fFOjD/5HvS0mrvlR+bE/fd3txC1nx+ix74yxc74Zuryq5K49PcbP/kaM/njPGL/08HIz+sT1Z8TI0XvEyII/j4lbzin3iIyfv3+Mn/PNXu7GX5R7Qkb01bzNV/r2ztbo0qve8+pfOlbm+z9O6F/Vu+9OMX7ud/5LfwNEb8Lm7zGD/3H5/8H7v/E5L8vfbVler2Vhl2XNf0x2WZ7zsrB/zOe8LL2X5Tn/sdhleb7Lyv5vfM7L8poty+v1P7Hvsqx5Wdllea2XhV3WdS8LvyzrXhZ2Wdb8eNjODYieiA4Vvu7+x+I9J14d2gSwwZCWTYY2Dnb/R2/jwO9/JD988fc2F83N6v2zI5wh6fWxMyc/vKB849Z7T7o6LrtrYfNVvTzxstYHb4nRw99Qv1TKvpa2dwZjTvMVuPowf+G/vDjmbbR6OfOxwhOeEK/Y4ulx/Ze2iTP23CzWe8ZKccaem047G9H84N8+83o3l//yizH2yy/G6PHv6d2DcsDuMXrKh8sGRFp+9+OQV5fLs0YO2C1GT3x/L7f3DjGqb7s6eo+ycSnfprXvjsVXzqDweyZdZ0Bsrjkzo9xBL4/xa09vbkDntZJyeOw5vOx089i9xFJ8rrlGzd/Geh1i54mZqyn98dY0c2LgfE5s1ximjReba3g958m7nxjFg9Z4zbnfY+fa1lzzw0mdo7/Pe1yr1cZnbx7TC56x+zz2dbBu1zzPuK0G/VB8qHgOaklZr+eI3e91mHc298UzE/U68udeedxWs4D230QeO8d6ydV61HLyt6039+vi6SvlINfGMe9rh3XF16bO4+nia35xfngdYmkXSw1U9ZytxfRkro0Z1td7ttWgh2v25rG8Xb3xS3V4bWJ4FF+bwqHinC2N+v/g6VJY9yxNDfjMaOw1ZxLD5Fqw9GLsCqMcdXx+WAyPul99a71znzbWaxG7lzooHulM+srH4WyNx4fi15jYNT93fKi8HmvMmmEZF6P9431mEhvahDPh5PGj2YCwMIq4SZc8nXbt/bHbQZf0Nxx5UzE4S8FlWb0zGgNfvjRr2iaGTUl/A1I2NiU3qItfdbQJ+vSZN5avAvZ1a82Td10WI4e+ZtpGoXcJE5ckDW42L/n+2YwHvzs7vvCGDcv9Hjr7sfqTnxj7/NXz49Hvz4mD3/OCsgE5/19e3Fx2xYd8lMukGKPk0V7efmndNg54puvAOz0/eB7NRqhSa/SYd8bkI3+ovqH5G+c3hfK8D7oUPmuN8b9TW7+uvs7nfj6u9face3PsPsXq2dZX835ktjbOfsY1r3LeX17l8pFZXy9zM2Xxu+Z+PnYf8bA1y8d6YKSsG8WHdyZ9vY7zw9iZcNSQt/Zg3bX1trEwKL6a5p4waI0h18Z63r3EUvcoVj96Mue+Lha/8+7PMX7UOeX8wDNMqeEsMTVrNeBQmKyZxe/5zDCWx/2Kfax5HWgt9j6ZZY5+rsy1qXtzLKbWy9efGca5X60O3qyZra0DBi9jKbk2zd48zpyv3b30Ipc5jXmtqIE3axfLXGY0Zq5Nh/WdSQ3VxlcC+6etb84b0oTysL7s97EAjf3weY+9HtzjZekH7+o9Fee+mfXxTFlff2YYq29bb3jUfaw553ydHmefxjxYi5Sj2YDQCJMMxKMTk7H/RXfGnH399z36Zy8489E/o8FGobeJ0NmL3hmMku972Yw03nLvyGCzoU1McxbFNic9f6+evqb3Fzc+UL4ZiyetNU/efn6MHPLnzeVWvQ/tbD7QwYd35m/+yraxw/NXa+792PLZT4lffnLzePh7s+Nvd1s3Xr750+PGL2/b1O1d/lSrx1f+9noMNgc62yJ/jxlcPkUNlK/bxWdrNb6su3/Z1RJ1le/30Vf+Tk1ONH9L/qYobwRX5maizhG3cfydNK8DhSPnPnkYo/gzr7H7a2NY1GvU2FoOVgovbXv4uvFTAwZPHnsexjX7Gbtmv8aaR93r/fC08c4Rt/G5Fv6sznvf2lq72NyPWpmpjbOXcdsafM3eN8e1XsrBex9iatRYOOZYH+ps9uZx7udj6qNiedADvyv+rG29nVUsLntn0reLZS25l49rfcmxHvd7nH30y+smX2OZ81rD+qoOfnh6wirfdsBkhaV+rpH9jHNv+jLv9ZSjD7GPYbPipSbjGqu5fDgH67ns97H7iHnOGvvhY/cSS53Nfmq532OeL4pfmmvlPjCoszn2nsRw9EFrrHvl05hc9mtMLbxSfwzjqeEMcWaVx18C+++JNcJKyeFFvY77iOHweU9yUvzEeUy/mjrjcX7Obayv0XliX7PXYN61Vsv5aRsQBxVzLBydiE+dccO0S6/YPEw742GXZHFpVqPaiNilWmww4JtNSf+bsQbzXILV36DY/AdOvibufXS0eVPqW570dbgjC145OANi92v0PrSzMeh/sO/fL3HK320SKz2xd+P5SiuuEB/YZe34wze2i1u/Niu2Wv+p8eU3bVh+D6S3YRFrG4Z05oGzH+iAsc1EYto8M/qa3bZaB788Jm79bfP6tP19/W/N3zx7a+Ps1bjma8s9Xh4OVX0dbX08D+Oa+fwfDeNaD+qoBj7vV8vlfrDZm8fUra2j5q3lar3pT/1h6v3belADL6q8x5n3sfuGcZoXCz+Mxcc64X1MnGt5HzxeT34dzHmsnHvxoNnr4xrnOfeqno/lcy/rIOfeAvb/YV01hfU5WM8prnmV1+EKR77Gea7WD7bWN7O13qzBvTlHD9TriOti3Ztj+iyN+hpqXF5Lm7+2btga4zl83r9Wz+c9buPxeC+PaxyM1L21sXvb4lKk/0+ukfv7mN6os+6jr3LkYfptm/8twTtM4b2nYu/RVsPZGt/FsV7XNv9/Rp7Xy9eZ1197zs75Olg3uTaf5zNT6089qbMa4891nOmK4b1W7gHv+bZ+7qlx9GnjYVypCcOaywaESQeIZbx30Vi87sjLe99wxRkPOzNRNg9lY8BmofdVu71vytI3Y/W+HYt7O9h0DDYxdpakv4nRHJuS3mZk4IHf+YCL44Qr72nuBYnJiSg3dh/88v49IJxF0I3bg5u2dXZAZyA4C/HAd7aPN2y7Rrn3o1x+tcoT49D3bVQ2HN/e47mxzYarxm8/veU0Bh7V1/fqRwv1UNzrNzibQS/fULAmMbrUq8f119ZfI3V7fbjJvKfT5prn1J/7wZwYOW3PmBxZWP0fHf3NJyYmmv8YeEPo76658fHxaZy/Rzz29wkxNeTjwVxm6YvOhHUvdZXTkXnma4pfcx7XvOSKsfIP811awUpKTH5danVkVt6Pmo8cNd2fY7xt6n55/IDJf2PyeDOnPJ6atq2bOjD4UPL4av3dk+Nch3VSh3HmfOzeHMtHD7TGKpcP99Xi7PdxzZ9z7ifOntqa5fHDx4rbGK/tvMfuqcV4NeeHe9W/tgb8zhI73xWrBozXa2NYB97MO9e2bjxeg5g5+jBG8dVUnjZOc12Hs2014HMtjf1R42GzOqe4jdVc7WhjqJsZrzOMxZtVNZUbxtMb3sew+fn6GD/9UPewDlfncuy+Wh38+BhLybWpez1u85NnHc7QD8UjxmNqSGsHfhh0GEct97Wxw3p7DY/pUVP35Zh1OCcPR5s/5/G7Zk9tjJ+5P9GCWBRJVwF3LRyN3Q+6pHoGpGwUpn39broh3TYqPe/ga3ibDYh5OPPRu3Srt3nxzUgv7m1odC/IJ396Qzz42Fh5g5cNyLWnlxuve2cUhp+l0NfwXvyZF8cGa6zcbEA2XGPlckP6pf+6Vcx53qrx8Zc9Kx753uzBWZX+GQdtILR5ePA728el/7ZV/Pijm8apf7dJnP8vW07/5fR8huKHc+KxH8wJbXz0tb8/+fimccT7Xxjn7rVF3Pet7ctGR5sL9fzdP20Rp39sk7j7m/wIYm9zc89/bBdnf2rzwuoHEzmDoo3KH74xK372f7aMy0/9bkxMjDf/8evvPDIyEgsXLoxbb701fvWrX8V1113X/P21IXnkkUfi3HPPjR/96Edx4403lrm294e/mYil/v4ZFsPJN1MWxnWmfI0hx1p5vih5VH76OYtfSgxT82cWBnUWHs2sezPftl7yYjNDPe/jfo/h22poPh/UR9tY+jgPg9bY7GfsjLiZsuJ0SJ3r4p2BzbzGPNxTmtk/eNDc16wlxKcBMevOrObbjmVlvZfH1GV9uT/zaBvrvLwcXRxzeLMy36Uw8vjhDGtGmct+xpp3r8c1Fg7F06b4app75xo1Rjl8vlaPmYfX2A/m0TZ2Jlwb6/08Vk1nPM79nFP8eFlxsOqXezLv/ZTjYL5L8WaFmWnfNl51ajWy38eZyc/bvcRidLBuNLP48MIz1nyNoR486hyeNt693tfzqlHjsx8GbeN8nV6DfBenOXywjJmrrZU5vM6Sk2Y2j+HQzkuwaHr3o6Px0gWXNr/7wcahbAb6l0TxLVjaQHCGYuDjzIidxejfGzL9LAc3rQ++urdXY8BpY8KZFM299Zjfx6V3LSz3guheh3IG5KCXlw/kXAKF8iHdVR/0P/MX68eTn7RCswF57dZrxDWf3zo+vNu68eL1nxrXfGHr5gM+rD7oa1Nw5AdfFK/Z+pmx1QZPLV/V+6ynrxTPWfPJ8Z75a8cVn92qOcsirncWo7fx+Pk/bBbvnLtWvGidVcpji2c/JTZZd5X4yO7rxi1fnRXn7LVF/J+Xrxcbrb1KfHCXtePWr25bboBXjVu/OiveOfdPy43x6zxtpbL+Xv0d4tHvz47Pvm6DeNH6z4jDFhxYznLwd7zjjjviH//xH2P27Nnx/Oc/PzbbbLM49dRTy5vmwQcfjOOPPz5e/epXx3rrrRdrrbVWbLPNNmWDAr9cBx8Wl78Wy1+L5e+B5e+B5e+B5e+B5e+B5e+B5e+Bx/ceaH4JXTuSthfx7oWj8apDL7NLsAYbBDYbvU2BbRTspnTOavQ2J7ZBKZuV/uaEjUy5BGvw9b6DzUbvHpBSq8+pt87MHH/lPTE2MVlutp644ee9Xw/XDdvlpu3eB//yAb05EzG4NOuOr8+KP9vs6c3mQ5dgfeY168d33/68mPWcVePID7ywt/mwS7a0adHX8370z55Vvi1rx41Wj8Pf/8I4+m9eFJuv95RSS1/jqw3Eg9/pnznpn/XQZuazr90gnvnUFWOVlZ4Yu23ytDjwr18QV39+69jrVevF2quvFG/abs1Y+2krlTqrr7JifO/tzytnWvQc7v/29rHny541bcP0rh3XKpsb/Y7JCR/eOLQJ2nrrrePiiy9udrv62+63336x2mqr9da3wgqx++67x7XXXhu33HJL7LXXXvHMZz5z2uug1+Lb3/522ayK5/D3ie9wFfuY9xQcY+cVw6DZn8c1Bpba9KqxzhPDu99j6rqKgSPvDDFzaGbIu59YyvwwzhliWNdaHfyuziiGQ5Xj8Fi5NtbrwGZ1ll6oz8Ep54d7iJ2XNzPw+LM6n1nGzmQ/Y/pkdbYthqGfj52hl+ecyXH2ieeBF6UnCus9ayy8K6wrLPW8j8eZqY3d73H20tPz+GuKjzVmXow8tSOzGjtfY8jBZp0J38bwHDTP4bFyNTbnMkOtGi+v96WWM8TMuTrrPmLUmRxTQ17N5cP98vJQ3tnMUc/5Wpw5eTiyn37k8dUUT1ZqzJTBrzoez4SX3xliWNXkyOukHwzz+GuKB0ZKrLnaQd5ZmMzjpQ5jZxU7zxxMVubh2nrig2fs6ixryH7GUmcVw6DZC4PC4GcsbTs0x8O5Jc6AeBOA+xaNxh7H/n6JS7DYfDRnOjq+vYozHXjLZqTc58EGpHf2A99A+/PlMq3+jeh2n8j2+1wYXzvn1lg0NhFTuq8h34TO5VLN5mNwE7o2Er/+1Oahsw/6sK3HCis8IT64yzrlG7E+9cpnl8uZ9MGfhy67uunL25YzFas9+Ymx9QZPjdM/tmk8+oM5ccOXtin3kvAr6m/ebs2465vbNWcurvzc1vHGWWvEGqs+KbRB2XXjp5UzHQu/N7tsIHSfyWbrPaWsgfW8YK0nx9mf2qLpf9m/bRXa8DC/8pNWKBsUnSHR1wbrjM3KK68UH//4x8ulVrwp7r333nJ2o+FWXjn23HPPOP/88+Nv//ZvY+7cuWXTstJKvY0PvgMPPHDo/0DzPnHNb0TWUXt/ac7flM7CuSp2BhZ13mM4Z8llljw842EqvzwcxJmjH3n8zrexyjvvrMfUrql4atCTfj6GxcsYxTusL/3QNp68K71RzdHXfbWc5umZ+ezPY69NHXLy+kF+mIrBA8/Y1dfqMXwXqzo8Z6+pOB95vjaGyXzNq1zXeqkldd4Zj/G1cdRwxmN4fHlM3hnl8LmWZFp3F5/91MqMetPfPfAoXE2dx+9aY3JOfnJdLL3wSrsO9ylu46mD+nqG1cCbFc57esy6h/WEQWt+etOzpvD0RWv1nIcjB+c9PcYnFQvvHLF7h8X0QOX3mH5eh97kCtD/h1xNa+t2X+7LnHPKtR34UdaJwjKf65DP2sXnmhq73+Nct8biyVxeq49hXJ13r8fuVwwjJVZeh2vmGIspZ0BItEELR8bjn352o21ABpsGbRTKo7/5aM529O/rmDZ2TzrjwcZEusTGpr/5KJd56cwKX+fb17877dp4cHHvPpCJ2y+IkUNe3bv0yc6C6FurdPlSORNS8juUeyz0oX3t1Z/UfKB/0oorxHPWXDneNW+tclaixww4nWX4+pufUzYRumzrEy9/VnPfhi65eulmTy9f5bviCk8ol3DpjIVq3Pa1WWXT8qT+N21p83Lsh15UzmxwM/p5/7RlcwaFDYA2LDpLw9mcsz65ebxwnVWa9a652orxjbc+Nz606zrlrIk2P+uuu24cffTRzf8x0d/1tNNOi3XWWafhnvGMZ8S3vvWteP/73x+77rprnHXWWfGlL30pVl/dNjcrr1w2KPlNyBvM3zf+BvT3ESy5Gut1iOFQeOZda73pg7rf4xqreQ5iafZqzIOa9KvxeFBnndO8H/hdM9vGO0MsVjHqrMfugUWdZa2a48DnypqddQZW6lwtxlvja356Zz9j1xqvHOvGyzrRNs5Z1o1Syz3UYc21vvREYVzhlcNXAvvH/cTOtbGeh6uptWr+jxI5+f25Mfb+eLPi9Z7UQp3BpxyxKwzqbI6da4szw7jNP9O++KiTx/TJKn/2auy5GqNcjSVX4zXHobjtAeteYrTGwuU+MKjm3Vsb481a6wtPTWc0x1FjxcBlLxxa4z2Hr6bu85jeNUY5vPjymPwwHk4qBo48vbwOXjyo8+7PMf6s9K71pEZmNIZTzFGL29gan+vkPrVaYpamr9ekX01rvfy19p7w5Iax+LPWOOXKGRCZuxro8qZDL70r5u53Ucfvc9hvd9gGgQ2INg3TNipsUPr3dOCT8iibkSXuFdGZkt5lXMW39wXxhiOviNsfHum94e+9OkYOe91go8G3QzX3YAzOZugejr/eca1yNoIP/NogvHvHtcp9Fs1mRWy/zkkf2bjclyG/blb/6Z6blm+80sbk++94Xqyx6oqx8oorlN8NOf+fe9+cpbMgn3zlevHUlZ/YbAC0sdCZj9Jj7963Z+nyKb8ZXj0Ofu9GZQPDfSw6i/LnWz4jVnziE8q69ZXBT3/KivGnqw02UW9605tC93vwR9c3Wuksx4orrtj01+VWL3nJS2KPPfaIa665Jm6//fZ4+9vfHk960qDOFltsEQ899FDz3vA3lWr7f2j0Ioc3v694r+U3PDx+ePyMNU8PZ3IOv6v72+JavzZvzsMqX4vzGuUjh79Ncy9xzpaG9n9IqJM5xpknD4fSg3mUPH1d8bSps/LQixptnOfxZpaxvPTJMXXwUot89lMHdT8xtTJLTWeVcz+xPPjg2uqVAvb3xlfjvRa9UObaemu+64B39TXUWGo60xW31fA+NT5zS9NXLH7q5B5t/d1PjKpGG0d997IG5mbKqgYsMTXa1Pt6LL/6dvXGX9NhfI0h17ZW8vhQ5Tnoixf154E3a82bc5lhjK9L5dW8H4zhfJ0e42urofnsp6b3yzyemuZ61JGXOjXOczDo42GpRw16a32+Ro/dm2Pqud/j7PcxbFbnNVc7lM8+6ng+szUPOVdxGnPk2L2K1ZO+NcZzmfWxfBrXDvq4f4l7QCjAgqT6JfQzrr8/XnJw75uw+OBfNhX9MxnKsWEocdlw9DYKvc1Hb97PdDRxOasx+Pas5r6P/hkT/X4I9Rumv4HR+NWHXR5X37uoPPHJh26P0SPeWH6nQ2cWBpuIwRkQ5fTQjw/6/R86e7Dzi54Wl3+2d9N5b9PR+ypdncVY+P058b6d1m5+rFD3e+hbrHR244B3Pz/mPG+1chbiTbPWjHM+tUX5pitxuuF8k3UHl3k97Skrlns1/LdEdKbkc6/bIJ62ymCToDMsuuRKNXjom7F0X4ru89DlWn+z6zrx5TduGDtuNLi343vf+960N9NNN90U8+bNazYf2tisssoq8brXvS4uueSS4tU3X2288caNZ4UVVohPfvKTzZuJN5W/eXjDeo7Y34DO8v5SLvM+hod1biasc16LHqh8Xg9vVvfAZM2MxvKguafzw1h5M+/jNt57eCwWnvWh8nnsnGI4fMVs/7jf+3i+jaVM9uYxvppmbx47ozk/srdr7JziLi9zmWHMfJfizQrjfxdyaGZ8jKdN8Wo+H8p19a0x1Gjr5/nszeNaf18Pflfqu48c6v4c07ONz34fU19sjc/ePIarsarth4+9L3FWZz12X1df+WoHfBdb45SDbVO43Nv9ua+P4bOKd1+tXmYY423jmZdfsR/MSdt49+cYPrOM5c89qQGbVazz7qceDD7GrnBZ8XgfzymuHeTxtmmNJSem1pda+GoKi7emNU65Lja/hrlGF8saMuNj+NynxiqnwxWfK7VyH8bu9bi5BwQjzdyk4lfe82jsceyVdhmWndFovWyKb7UafPXu4JItNiz9MxqcEbGNjDYX2oz0GPxL6ssPuSzOu/3h8iJNjT0Wo8f8Zdlg9DYfgzMeeXztF7eJbTdctfnQvcqTVojPv36D6T84aD9kqHs/uMlcH+J1f8YX37BBvH7bNcq9IG+ctWa5Gf2GL21bNh/qpzMj//iK9YJLr8TNe8Fqcc0XtmnWqK8C1s3pL9n06dPu/9hy/aeWe0hUh7X/4ZvbxV6venbpfch7X1h+LPHUv9+02bisv/76cfnll/dei/6Ha33TlfLqrYc2F7vttltcffXV5fc+xsbGQpuWJz/5yY1Hl2udffbZ/raYFuf3h48Vdx3Zm8ewtTrZ6+PafwTUkrq3La75nVUP+lCDMazyMJ7DX1N8mWNMD9RrOIvf11BjNE8ehjrOeh+Pne3i5OPhvPeAd9U8PdCZ8O7p4tSLNXjcxed6vl5qZA/1yNcYWHnxoZ6DpaYrftTnFPtRG7dx2et1FHufWo3s97FYZzymrvs9Zh4GJS/1g7xyxF2Kz2sQd3Ga6zqWleV5orlerTceGCmx5ohrrHLwXZpZeTm8R60Gvpq6n3V6zhnl/cBX45hzv8fMw6LkpW2H5mr+mbCqCa8atTptfWHhvR8xrMZ+MF/rx1xm2vhcw8fOEHv9HMPirakY+fDmGjWGHKyr8/ikymd1r8esBb6NxYd6DdiseGoMc5nxMZ423r05XlY292wuwVIjFUdl9MfDI+PxzXNvK5dhcRZC92rw8FyJ7TKs3lz9DAj3e/S0v7ngzEd/A9LNXxC7HXRJnHHDA80bcOxnnxl8uLcbyJsP8v3cJf/64njBWoP7KXRfhr79Smc68PLBXzesn/jhjcu3V/FBXhsWnfX4xMvXizP33Czu+za/1aFLtnrfvnXdF7cpmxMY6R6z14w7v9H77Y6eb4c44oMvLPeV4NOG5T3z1yq/McIa7v3W9vG1Nz+nfN3v5163YTz03e3LDfEv3XzwLV7veMc7ym998EZ57LHH4mtf+1rz7Veqr6/ZPfPMM5v/w3LPPffEzjvv3Gw+5HnFK14Rt912W/N+0O+D8P7gPUKP/KZyXymQ/tF8Znyc7GVIzcw6pzl8bTXc77Gz1EBVS7H7PXYWL/2pgUccD3LugXPVPP1QZ+Fh8hzjWl9YPKqRc97T48y0cTUGr6+ZmDWwXng0980c89lPPvvph+KDR8mj+FHl5eWBz1VePzSnA09bL3x4s9ITnjF1S5P+P54jhmPs6myO8cGj5PFrnA88mSFfY6iBx1mPYbOKh3Wtsd6LuMZ3sc7BZr+P8bNuHyvn3toYv6s/z2Gxc8SZYQ2o5vPhOeedIZ9ZH+NBM+9ej/EPU2c8hsv9yEvbDve08WKpgZ96jGfCwqBi4VDqSTnI5TH5Gus8nBRmmDrjMRw9UfKoMx4z36buzXEbQz77NdYcqtjX6zE+vAXq/0N9+TPDnPs9Zl66tKzqOE/sdbxXjrt6ao7DY8/RB23OgLCQNp2cmorL7l4YrzjksnITOJdZTdscNJsOzlL0z3yUsxpsQFC7Z0RnP/wyq/5ZENXm0iudBen16il55fSL6Kdec1/zwk5ccWxvA9GcvZhT7uHgkiddzqQP9T/7h83i2c8Y/ABhbQOiy7i0Ifntp7eIt81eM7TpYJOg3+DQ5VD6UcHeJoGNS++Sqdu/Niv+7bXrT7v3Q+xHXrJu+Trd3mVVO4R+VHDnF60+7eyHLsX6yps2LPeJaA33fXv7+MZbnhPPX+vJ5Wt4//CN7covp//gL58fz3jK4LKtz33uc83rMDo6Wn5QUN9w5fd27LLL/23vXIAlraornJLEGPABPmJEAkEkESVCoiAgWmhAQUHBIMGkBKKpJBWNQKlRpHjEpIoEE5RghYfyEBggEgyIghgkKUXUKBlgEJGHjOCY4THD5c5LGgAAHx9JREFUa2bo++jZqXW6v77r7jnn73vnImOVc6p61j77rG/v0z2tzvHvv3uv0Ldi8Xd9ww03zLr5fJNNNim/F6LDi94o999/f5xyyimhKyUwXao3XNf6z+vaQva9EPbn9fXYuK/2DbRP9Wuz8f01v7+LhbxeG4p9qt9T3m9DPeeF9PX9P9XxQva9odin+jXyfhvqOS+kr+//qY4Xsu+FsE/185z1S+j6xyYnEz2JPJ6Y6seHrrlrdBDg6kU5GAyvhvghYVZc+WiV1rnfww8UM/mZHx0cHUDKIWfmnhDxbzh3cVxz54qyXb2A/WU3Ru8zr5t1FUMHhPz45kd3nHXTt27oPu6ArUa/3TE4fLwmrvzADvHGlz2nXHnQ73JwANFh5Mi9X1SuZnCo0RUN/Tr6bX+/cxy5z4vixZs/PfSNWDBS3fiuA4R8un/kHw7epuxDvwGCb4vNfjlO+5OXlI+D6SAjjz7ypXs+9Jsh6vfAqbvFYa/9jVk30Z9wwgnloLBq1aq49NJLy48J6rc/dKig9qGHHlpuLufv9/zzzx+tyaOPX4nVe2H58uXx8Y9/PLbffvvQgcTfoPDK+XBP+fuo/L/C7vc4s8zHvTepgb+l+GraYsiLUZwH61n5zxLqvNfJXG1OTzhX9/vr5Hl43wOx+xSzX8V5eA5uXE8Y1PvSK6t7fQ/0ROE0933UGO8L39JxvPfNNZz1GF+LZX8w8pODban74NHM0N9fr+xpsdmneWu413t6vsUq775avBCWerUarHVpjWvtOT/39WHZS4v13t6PWLyP2lw59+e588Ty5EeuofXWyKzPqTNfFo6+aK7jvWox/hpf8+ccfFb3+V4V+7yLUw33es3MMXfPfFnVcJ7+uQ69sjorBg6t+ck5m+MWPxeWWnilyjFY71K8NW1x4/asWrB4NVfM3PtpzRlYGJ8TO0/MGur96Ku11oDr0syOroB4M2+Si/3wodVx0CW3Dm44H17xGBwYZl+ZmDmccMUDHVztKIeTypUOfcNVqceBBU1XQJzf53M3xzfvfWT03PqP3Be98/df58AxOoAMr4DoHpBdtp25B0T/+N7uBc+ILx25Q/z4E6+KxSfsFCcdvE35tqu3/O4WoW/A2nnr2X5dNdGN6V/70CtC31B14/E7xRmHbRev3vaZsf2vPyOO3X+rWfeNqMcLnvX0+OhbXhz//Ee/Ffvv9NxyM7m+kldXQTgk6Fuu9PGuvzto63jzjpvHK7bcNE5917ajr/zVAeuOsw+PvV/z8hEjdptttomjjjqq3GC+7bbbxmGHHRaHHHJIue+D2vqNkNWrBzft60U79thjZ9XYeuutY9GiRfH1r389Dj744PL7IKeffnq5X6T1RuQ9k98vmjuT10d/acMgr7fmmWPe8pOXTzEjx/ik7BuFdcbr1FjP1Xhf95ieniNu9WRdmnkY9sDcGWJn8WstD/xdmhnmXYyv4Xf19VYsv9YYxC2/52HQcWx+veCksMTeRxwP99V452qxMx7XvMqxZzQzmrdY5xXnMY6jZ8tHvbwOh+bePq+x4mD9+bX6eQ04cnNh5IVDlfPhc2pnFQsPOxeOOs46p1rM8brCZY9zxM4pdhY+7z0zcLCoc9SqsZnHA+979RgfvM+J8VOLfJdmb57D8jw1J3avxzCuMKj8rMN6jrUudT81sp9+KAx+n7dY8jCaM4jxoPSTEtMLzziFk8+Hz2s1vCfr8ONY/PSGc8WTlb7O4oHXXIM8CqO5x+71GnDZD+ucPHkOj19KjB9mdADJkBtLh+EfE9P9OP+m5bHXuYNvxBocFga/zaGrGFyp4AAyo8ODhV/1sEMFV0JKveLJBxp4O8gM+bdceEss/r/BPQrlefQei4nL/3zWN2GVj0jpKsgZg3szdKXi/k/tEkfuveU6H5F65VablR8UfP1vP7scPvQbG987fqfyuyH6SNXznznzVbX6R73u13j5lr8We73sObHbds8KfTRrj5c+Oz57xHZx3ydeHR/ed8vQDwZyAJDqG7c2/dWnhW40P/3dLwn9kOA7X/28WR/Dkk9XWXbf7tlx9p++NFaetuvgUHXWHjH5lQ/H0sXXxn777TurrhjdZK6bzvVDg7rRXOpXQHTg0NUMxjHHHDOrhn6McMcdd4ydd9653Buiwwj3gPA+8fcHOeox71K8WWH8DZtzmfE53i51v8c1xvchrzw+aoznnPe818l5n8N7T/bhPo9hUK21hnPEmSNPX5R8S+mp9TyU8z6tGuThmWdVLR5a88E8Mz7Pe4HHk+fk4VDy+F1Zc3UOr9Y1XJ2pxZllTp0aoxz93e9xi8v5zLT60g/eORhyeFqKzzl5mWfOezvrcWZac2c8bvk9735iX++K5dd6bSjvzzHXyYzXyV7m1FsflhreR3V8joc+zKXkcm/m7h0Xw7h6jxovL3nnPM96roWf9RbDOvpkPWf6sQ/meZ/Mx/WFxwfHvlHvR8xaTamHN2uNIQerOcNj5fC6Olfzey3niDNP3rlWb3nha/5xXGbxU4s5PvaW5/i0zsBbU+fx5xri3Fer46zidQ4gDmFWToO1nz7Wi2Ou/VHs/pn/HVytGB4YOHyUqxPKlQOJvl539lWNmfXZ93UMfPbxqsLNzAeHnZmv66XOgRcviR+tnPkH9dr+dEwvviB6Z+5R/sE++HiU7s+YuVdDX7Grezf0Wx1777B5/MomTysfZdIvlOtwoK/A3Xnrzcoh4qen7DL6Gtx7Tn5VHLXPluVmdP1quvw8dNVCh5P3vu6FcdOJO5cDi3rrBwZ1ONFHvPDq0KIrHFf89Q7lm7IePm1wj8fznzX4lXT5VO/A33tu+SV0ff1u+Vrhs/aMyauOjv6Ku2LNmtXxsY99rHylLgcP3eux++67xwUXXBArVqwIfRTruOOOK99wpUPIZpttFieddFI88cQTo7/Tyy+/PDbdVL/A/rTykG/zzTePI444Im699dZZ937oDcabjPcH6u8TPLxnpJ7z9xO859xLDXLug8WT++S5+3NMDfrkOf6a4q0p9cRpPQ/l8DivHHmYzOPHx9wVtqbyLYT1Pjmu9SPX6ut7ca9iMWjuleewWbOPufrS2xmt+8DfUnl9LbP0wJP7Kp8H3i6FgcerPHGXwrviz3tWPu/bOWJ8rjnGm1U+HrX+WusaNXYue1ZNsa2enqc/e6n1JOcKl1Uerw8zbt/yadR4r5f7waDuzbVqLDm8mScvbQ2tuY+565PJUlc1W315Hq2+sNRyrbFarzF44X1Ob1ify+fezOPNKl+Ng0czx5z1XIM5vpq2WPI1hpw89MCPkpdXuTxg8WWOOZzXgMXjSj24muLHm+vVGOXgunR9WO2DvcyXh9Oe2KMrsfvYv+cKbH/M+QDijaf7/bhl+ePxl1f+MHYZHjT4dfLRIUFXJ3RfiF3lKGvM7V4OebgCIiUeHDC48mEHmtJz5vDyZ1fcHg+tnhg9Le21v/yW6F30jnU+hlX+ET+8H4RDiD46ddz+W5V/7OujVu/Z84Vx3ntfWu61WLXON2LtFg9+ape47K9+p3z06oCdnhv77rhFHLLL8+PEt/9m/Pff7FhuKh993Gt4T4g+zqWPXb31lVvEO37/eeXre3Uvh+4D0Z50UNE3Y+leD/3QoK6G6Pc9lv7jq8rN5qXeOXvH5LXHR/+B20OHLD1P/YDgiSeeGPvuu2/oBwg//elPx9KlS8vHpbTe6/XioosuisMPPzx0g/oVV1xRvt1qenp69Ho9+uijhTvwwAPjgAMOiKOPPjquvvrqWLly8M1i/gYqr+3wl7QpoJwPzcc9an7lxnGsO0/MWpfiram4/FxrtcR6njks6p4cw+R8a+5+YlT9eDif94FfHo81z16vU/OzDoeS71LvXeM8517fBx6Ufsyd89h9eMl1qdcQ12LxoarJoH6LZd39Xoe+Nd591Mla4zyXa+S5e722fBrkhtMybzHuJUa9Frma4vN+8pGvMZ5bX44e8KjXrsW11wJWWmNquRozVx6fK3Ftf/SXR8PnxFKxPDxPnNlBtZl61MCf1Xmt5Xn2+9x7eeyeWkyPltYYcvRByc9X4dlDF8/fX2acxdNVR2swudZceBivo3guLAw12IfyXTWyDx5uHAuf1flajN/Xar19nRgf6nniLqU3Kq/HmufXnHkx2h9dfWprho7CWr9xbLkJvQaOqtp/6XgxHUJ+8OCq8iOAo0NIOhjMOpQMDx58JIurF7MOJbMOIjpgzL764V4/pBx/3Y/isd6UbznWrnqg/GO995k9h4cQvqEq3YxuP1aoqxAPnbprORSMDhDD+0X4CJc+ukWsr+bVDwjqQDK4QqFv25pZH9UY5uRXff02yOCqzODret2n/IpTdy1fvyu/rtqUmue9Oaa+86+xdvWKWLu2P+t/CHSY0C+W66qG/o4Y/L0qr4MIbz7+HrNPV0v0yD78WeGzZl9tXmOUc29rH5n1ufO12L05xk9flLz7lfOBR5o51rKfOeviulj58oBFa7yYcWyrd+7nc9Ws9WMvtb7sI7O1OvSC8Tk9ahxr+F1Zk7ZY9+cYvouVpzYym2vUGHKwUnHrw8Jl1vfrsXrTF4Ya5PEzZ7/Oag0eH+r+HDvX6gtDPddWT3lqg7zXaMU1Xjn31/ZMjxqvNd9zjacHvNeDpwaqvD9gXX29Frs3x/LX9up1MsMcD3tFyUtbwz1wKGuwea68cvhRfFIftbl7c+wsvchlb22ON6t72TPKWmY0Z22c1lhyYtUr9/Oa9IJB58LKUxvzYXMNzXm09q2eeLw/uS7Nfmqh9ES9Vo0lJ1+NgceHknedL69a3ncuvPwMZ9lHqwbM6AACgLIZn3tOhaem+3HFDx6MPxzelM4N5BwuBocEu2eDr+nlKgg6/KgW34SlQwoxtWbU6p15Y/kY2JnfXRZPTM38P/pln9OTMfX9y6J37ptGH73iI1j+D/5BnA8neT48tAwPBGIGB4iaj6/85SCCZ7forcPLY+vDg4r7emftGRP/dmhMLfl89NesHP0Hhb8XlL9QnxOjtTcDnCv+uahzxJlr9ZUvD+WyP89hMk/f7Pc5LOo1vLcz1EVhpfCswaHk3Zd5reHPmnln6V/jW5zycKh76U+umO0P5/G4tng4SjnTivGi2Zd75XU41+ypzfGzxlxKTkp/lDX8mjNY61K8rtSA814eu088fmK8Wd2HN/fHI5YHOWlruMc59sB6F5+9Pm9xyqu292Q+rqezmYGVtgYMvX2/8PTIdWCzeo1WX2rSlxrO5n5eS2tdD+rnGs7k3r5GL3JeTzn26TXcC+/qHHHm8Ws9D+qj7IF59vscjzMeZy9z5/BLiVl3v8esS+FarDj5GM7kWHP3wrjigXWFRZ1TjNf36nH2w6D+XJ1T3VZPWDzUYI7icyWGQWFQ+Xwor8G6cx7jw5tV6/LX1NnSLP2hdbjck7WEjKZwKHuAk2qgI9Ces7Pj+M6PYHnTVtyb6sf1P34k3nXpbeXjWFy1KPd/DH/bY9a9IBxCRveFDG82t8NIudIxWucjWMOrISn/jktujW//5NHQ75TkPfYfXRYTV31w9sewuOIx6x/7wwOGcqPfDuHQwWHDPIXl4JDzM1c1uBqy7oFnXWYdz1mvjd55b4qp/zkj+g8vjbXTc/sNjvwabJyv+77Y+JpsfE02vgc2vgc2vgc2vgc2vgc2vgc2vgc23Hvgl/w0o78I5uP+Utw31e/Hd5c9Gu/78h3x+nMWD34nxK5qDO7rsPs87Ot7/bDBVQ6ufnD/yGA+c+XDP8Z11NV3xgOrJtY9fOj/VZieiul7vxW98/YZ/TJ5uXqhjzWdMXhU58O1mY9JzRw2RjnzlJzmw7qlZrmvQwcasdbLDjjl3o9ZN8bvHj0dPC58W0xe89GYvv1LsXZy1TrPrfZ34yddj8tf6PCPGkcOBiUPz7ylmcu+Vh3nFGfO56rhc1jU18bF3stj9pl1XD2td41xvLN4yTGXsleUNbyo8hqsZ3UeJmsXw5r3cJ71LnV/jjPn+9VaHp5zVtw41ms5qziz3sc5xZmtzTPDPHt/1n39eeTezNmD7zHHeFEY5tI8yLknx9TJLPPsr83xutZ85OipuQbqPHlnxGXWGerAdCkcHua5L+so/d3vMb4uzX7mNYZ+rOGVKucDjzRz2dvivIbXcX+OM+McazCa+2DdtWvv8O73OLPqBUNf5s7V4uxnTk1n1JcHeffDoHikec/KMTzOLBxKzRrLmmvmcq9cx1mPqcP+MkfemRxnRuuM7KWf8sTu9TizzOGYw7iyNk6d8dg59evqKa+PzPocLyqOOPvoOboC0mUWDEDBvCldgbh75Zr45A33xZvOv3l4NaRyE3q5MX3mMFEOIFz9KPeQsCaduSGdw8jAP/i639efvTguvuX+mO7XT3DlxZ2aiMnrP1muJvhVhtHVCR0UuBqyAJ1XDQ4vo49f6YrIHtE7+40x+aUPxPSdX43+48tj7fTU6C/QX2/i/PdSm+PNKm/tUV6zMTeZq1aN9Vzu53P3KfaemtcG+cz6nPcoPIzP3V+L5YVjnRz1yaOex4vmWjAorPtZa7Fi4PDCo+SzwjoPI9Vo9VU+80Nk9H6Ab/X1PKxrV2/646EXSl7qD54r6n5nct5r1GL8qDz0cL/n3KuYkVkxzmm9NryPx5kXSw33zSWGdZ69zJWnBhzzFs9zd7/H4vB4Dc+5n9i9XTF+VF6NLoY1fLAo6126Puy450zNrr6ssVdX1rKqL73dTz9ymctzfM7Jwzz7Nc993UM9z3kMSw6/K2uu/nyVbw1nPHY+s17PGcXsF82szzOb5+71OPuY05O5lEHsa3OJ4dH5MvjH8exdPjG1Qa2WwsCj1Gxx5Fs863PRWo0ax/PVWh6eq7HKwYvNfurVWHGwcKjXyqyYcgChOGYZvSAguexnrkOIbga/cdlj8ZH/vDv2u+CWeO3Zg6/qHVzd4HBhV0OGhw+ueox8w49ajeZ2SNFN77rScsJ/3RMr10yW9uwxq/bcX/NwTN993eDjWOe9OXqf3St6Zw2+otcPJesT6+Axr8OHH3L0a+3n7h29RQfG5DdOjv5934n+qgejP/y4Fa+3nlNtKO+e/NyZw3od1jLvc7iawre0xpBrMeTdl2M8WbVv9g4jlY+Rmdo8M8zx0sPnnsu98hwOhaUPfld58WWOufs9rrE5h195DWp2KfvJrM/xeB3P0QvGVYx7mZPLXq1reC+PxTmLnzrMpfjgncXnvTzOLPXI1/rB46FvZumNeq0WSy16OEOOPnizwtQ0e5mzHxjlNVBi/K6w7qVOFye/s9R0Fr7m8xy+2h6o29Lcj1poi/NeHlOvi2ONHjAo6/4cyaF4s7JeY8mJkc8Hc3hXcc7CwfgcH7zP8dUUf02pAVfrW+OUy2yuAYePeWY1zwNvF1vjqOM9qOWKLyueWl/WpK3hHo+9nthaDfd7nFl65xrOeAwPV1P3ezxXFl+NVa41tFZjvU6NZb3Geq7GkqNGS/Gh8jEUe588x+cKL82P8hEszHkxz2ksv9Z8uFc+3aB+z8Nr4sKbl8dffPGHsf+iW2KvcxfHbsPfDhkdLDhooDpo6CNaZT64CiLvrmfdGK87e3EccNGSeP+X74ir7ngoJqcHH9lhP74Hj0f71n/pPXh7TN18cUxe85HoXXzI4MqIDgJn7jH7IFE+OmX3agyvWvDxKg4rfCSr3EwuBq74jdfBQx+vOucPonfh22PiC++JqRs+FdP3fif6vcdirf0XMnsf7dtf6GGMZ5yCysfIDH1yPjM1PrM+l99rMK/1UQ6WPvjzXD681MrzLtZ7EcN7L4/x0a+lzhC3vDnvfmKpfLXnDO/eHOMZp5nzvpnltVKe4TFs5pjDw2bFJ8XrOWI4zX3MhcsMPLWz5n3gl1LLmexn7pzHzrZi/PTzuerTYz58y6s89dQn9/Te7qVeZuFd4fA6S877eOwsXucVM3IM66rYH7A1hct94cUorg08mWVeY8h1seN4sXiok9X7EEvxzZV31nnqtDRzsF19YVQzD+VaLHl61Fj2iTfPM8M8+5i74kW1xlCce3axcNIWSz33Entt+XiQh9W8NvBJM0vO2Ry7R3HtUetLDr/vk5y0Nsjjy/vOtbxGjaWO1FnnPG75PS8/c1jm3oOclHz2M/eaztVYvFnVgwc1cl/vB08PvJqPPoJFoWwm70px5fJwnxqtmZyOHz/8RLlR/d+//0D8y7fvi4989e447Au3xUGXLIm3LrqlfGTrDefeFHudszjeeN5Nsc/nbi5XT9520ZJ492W3xQe/clecfP295TDzzXsfiZ882ouJqZnDh++Z/v4kyZW9as9TvfI1vf1lN8b0bVeUr7edvO7EmLjy/dH7/B9H74IDysehyoFh+GOGHDiK2n0cPp+5ErJH6NureufsXQ4bvcsOj8mrPxST3/hE+Tar6aXfiP6Ku2Nt7/Fy8GB/Lc2vMXP59Tx5rmiuw+sDxzz78hy/8nlkb57X/OSyN8/xSbWWFX9+vsyd9xhOipccc/zk8xwf61nlV45BLBXb4vE7vz5srkNfaX6wF3rSz+d40FwDb62vvOJ4wHqtzOOBdfU1uFbOe7iHmP0yl3rNGu85eJQ67iHn6j1qLDwq1mP4Gut9WjEc2vJ53r3EUve0YnzOEYvhuaG5To3HAyMl9pqK4WHy3Dk8zuEvhYZ/aD33dBYehjVqMa/1dg88Kq7GeB5v1q6erInxmBpeP8c1PxzaYmClGlnJrS/v9Vuvm+/R49yzxmf/XPZLHViU5+41fP85hnO/x9nP3Dli1qTsz3Me0wPVmse1GrlmAYZ/UDt7ch6GvFSDeYvHB+8Ki9ZqeA9nPQ9fU/cRo/hrfVnLmtm8zly+1sDT6juOmwsvjwZeV/pKRzehYwDCpLzHmtcGPComc7oqogOJPja17LFe3PnQ6vjessfia3eviC/c9kBceusD8cXbH4yv3rUidNBYcv/jcd8jT5QfGXx8Yiom7IoH+2Qv9JXSO/fXGtzI35+KtZNryse0dN9Ff+U9Mb30+pi6+aKYvP6fYuLa42Li6g/GxJXvi4n/eG/0Pv+u6C06KHqf2698xe/E+W+N3qK3x8Ql7yxXNSauOjomr/vbmPzWaTG15NKY/sl3o//IfbF29UOxduLx5rdZ1fbKHnmOWXmu+Fo14ORjwMKg1HIvjJS8+8Rm3r21OPu9Hj1afefKUifX1rxVo7ZXcmKc85geeNl7bQ/jOBivQe8W632dVwyDKucPZz12Nsfw7NE5YpjW3vF5DWLqd+25xTtLb3JSH8xdYVBnianhHGtdCoc6Tz/XXAtO6qxiuMy4z3lqZFZ1vAY+Z71m5jMLB+NzeqHOZj+cVGv+fD2mRvZTj3Vncn9ncwwvzRxrmWHujPcfx4mHpScKK20N2JrCZ5Z6man1xcs+qUXtFtPiWnyu4z5idC77xps17zv39X2L9bmzcChr9HPOc/hqiq+m8tNLSkydGqMc685kVp7WmA+f62i+vn197+whK57a3vF6f3JSRi1WjtcIddaZXAcWDnVejNcgdg8cyhr9aopHTOZYy5zyGtL5cNShLjzzda6AsPDzqLwA67O3DcWuz16fLGYhz/nJ2sPGOrP/sb/x9Vj46/GL+L7+RXzOC/nPyoZ6vTZU34W8VmIXsu8NxS70OS+E31DPeSF9F/J8F8puqH1vqL4b8vVayHNeCLs+z3l0AGmdarQhDRVHc6PaKaqY7Y+58vRAuzj2QX9rV0JYrwXjmnnnMtv1OnVxWqNPVnqUTac/2Cd9UfLJXqasofTT3Hln8ZJjjsKh5FE4KbmszuJzjpxzYnyuGJ+z2VPjYJ3z2Gs4T+zeHDub43F89tfmuR/zmjfn8NYUr/bIg5y0a+Dj+TFHM+v18HQpvHPKtRjfB6xr5vCjrMPQ1xWPFE5KDFtTZz1usfIw3E/sfd0rxuf4u5Q+WZ1hn8p578wwdxbGVXHXcN77kZ8PC4POlZU/P2/lWoP6zsGjc2Gd95piNc/DPfTxHAwKz9y9NR4/il9zYhReSgyXFUaKlxzzzNR6Og+nXNegT5e2+C4m963NnWe/UmL6jmO1DkNNsc4Rs96l9EVhqak5/Xy/7oN1rfWkjrP4YJmjzijHHD+K3xWv5xT78Hn2ia/VgHdWOXgYlPw4Tj5nPM69qJX7OuNx9jNnb/QWw4M1vCj5rPRz3hnF5ZfQMeYCMpBzkJyr18BbU2cUw6E1hpyz+D2Hr6bua8U1TrmW3/M/C3YuvVt9n0y29lrrubeGvy7EucZ8WGqgPwtWNamP1vastdqAkTqnmHmNa/WFQWHp7+q9vT8s69SgJ8o6flT5PDwHJxUDh7qXXtTLLPMWW+Poi/oe8Hf1hXNVrIdzPmdd6v3YN5z7ajx+98GicD5XrsXK5wPee8Czd2o553Fma3P3e5y99EK1roE6S75WAz77fZ455gtlazz7p8c4zftkXuPoh+KtaY1XLrPK5eEsfrTmdz6z4pxlXUyuxVqXei+PM0NPNPfK/Z2HQWFR+jJ3thbjr2nNr1zuDas1hrPu9zzerO5pxZnxeWZyf/fm2NnMaa01nFMMmxUef56Tbyl+1+ylp+fdn2P3zZUVo+FsLc69YNDMqL/voca3WNVaH9Z7tvj/Bz2Mcsfg7AXlAAAAAElFTkSuQmCC) scikit-learn을 이용하여 기계학습을 실행할 때, 자신이 하고 싶은 분석(분류/회귀/클러스터링 등)에 대해서 적당한 모델을 선택하는 것에 대해 도움이 된다. 그리고 scikit-learn으로 간단히 모델을 바꿔 기계학습을 하는 것도 가능하다. **- CSIC 2010 데이터셋을 활용한 웹공격탐지**![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAhkAAACVCAYAAAD19+gBAAAa80lEQVR4Ae2dT4/dOHbF3ycyvCgD/gD9DQyvgq6FN943YKD39sJob73wOjBQXsWAN8bAu6ANuJBFJQY8MCazaBSCJGW7M5nMVGfi6YQBn959IiWSoq6o9/TnV8CDqlQiRR6ee+4RReltDD8gAAIgAAIgAAIgMAICmxHqpEoQAAEQAAEQAAEQMJgMSAACIAACIAACIDAKAnuT8duf/sv84+//kw8YwAE4EOTAv//8F2M/6AQ6CQfgQIwD1ku4P3uT8TeP/sF8892PfMAADsCBIAf+9jeXxn7QCXQSDsCBGAesl3B/MBkkFJIGHMjiACaDxBJLLOyHG8IBTAYJJSuhCGHYIh7CAUwGXBAusIULMQ5gMjAZmAw4oOIAJoPEEkss7IcbwgFMBglGlWCEQGzXKyaYjPWOPXHP2OdyAJOBycBkwAEVBzAZJJrcRMNx6+UKJoMEo0owiMZ6RUPGHpMBB4QLbOFCjANHNxn331zvn265fHMxQsL7aN7KGa4uzf2SpuLJpbmUut9/HKHtEDdGXPYfnxuHNxnEMrw/Pu8Zg35jUMZkdCVb9/+NRN9tMi7MiyvJ5Hnbt89dEBTC5LY3ZR5yjytpbKgLMzcRDvQ3GcQyCcrVZn5fAx8wGSHBzjUPuceFzsE+zMLMOYDJIEmuIUnSx2E8x2SEhD7XPOQeFzoH+zAZM+cAJmOY+JK8wG8NHJiByQgQsVdyV9wuef6lvi/TuL3jkaJXOwL9mHmS8bCgL6szTf1NRiAGesUQsUzMBTiE9kxae+ZvMpomwDUItVWofmseGyGnu07EmGvz4kmE2L0EMlJHpA2ICXhNnQPFTUYzPonlSSePqfOT9k1DQ8ubjGZib/7dEBI3oec+XeKWsdV7Cz0HC1NgcVqjzXvyuibD62fCmGAqEM6FcKCEySCWp5EI9pq2EG7Sn+nwaoYmI2ACzLV5+3z3+OtAk1GL3rW5vKofrzUho4HJwDCsWJSHmwximWQ4nWTIWIwzFrMzGbUJ8KYOqj+uvpjHT5rv2si/j3vfNSjbR1ebIliZmf27NjAZmAxMhpoDxPI4ok6yBNcpcaC8yQi9V8JNxo0ZAVdoOm+XuCbAWEPx0bwNvkPji3m8F/88k+G2w9i63fKtc+xuh7j9CvV7XweknxLpaUsZPg6aySCW1eYM/pbhLzgeBsfZmAw7y7B/u6a3DuPCPH7v3Nbw/mdBTJuM+y2jYs1LE3x/RmNvhjAZCOWKjaTWZBDLTX3hbxL+cjkwfZPx5KN50TIRzVsiP5pvdsddtmYU0ibjG8+8pBds3n/zxVy6MzGYDEwGJiOfA8RyPlYr5hWGY1mGY/omo2EC9gs8s4Oww2TYeqxZ2K7n6Dm4mAxEM5uHPbk1g3p7z2QQy8TLDHiNySmrVdM3GZaUVpw0JmBL6AyTAfERPzjQmwO9TQax3BtjEl7ZhAeeh8ezvMkIPPTh7XJvN3z3o3EXXO7XOvQQ/PvPL7e3Uy4DizPtI6hv33w091trLHKAvjCP31yat1fX3lqQqi/28dYvVd092grBc3DnmLnwRGUyEvFCLMP9uXCfduZzdb4m44k1AJ59Sf7Rx8A0F6YlK7bv6BjlK+rzBxHCg9UxOFDMZBDLzHAkzOcxuM05y2nqPE2GuxYi7QC8/2YZDe/ROq948o+sugkkxHRBHChiMohlYmJBMYE5aZuTMiZjAEk0t0vcMubqi3nx/CJ4S8Q+nuo/meK+/6INxjff+Y+q2rd82pd77V++5fSzf92h87GPoJwvB0qYDGJ5vuNP7DJ2ORyYpcl4/L6eVPC+t8QxAXXnXeOQfkTVe6eG90KuGJmcRaWpL1ILtitWJ/vrsQOLKWNRwmQQy3B8yhynbcP5OUuTcfyrHztz0neWZPhgQXgwnBIHSpgMYhlOT4nTtKU8H2dpMrbvtagnM7J/y1o3wZoM7hEz85TFgRImg1guL+okSjCdEgfmaTJsEmBFelYimBLZaMuyxK+IySCWiWNM/aI5MF+TsSOmfbb+7Xv73ormhEb1Ve1D3pOxrZv3ZCw6ADA+euNTzGQQy8QYRmOxHDi6yUDk9SIPdmB3TA6UNhnH7AvnJpbgwDgcwGTgoBfroBGNcURDcMVkjIuv4MwWnOfMAUwGJgOTAQdUHMBkkPzmnPxo+2H4i8kgwagSDAF6mACdMs6YDDgwZX7StmnwE5OBycBkwAEVBzAZ0xBxkinjMGUORE3G3/39vxoREbaXYPEbMCAOfA780+//aOwHXHxcwAM84EDNAesl3J+N+we/gwAIgAAIgAAIgEApBDAZpZCkHhAAARAAARAAAQ8BTIYHB3+AAAiAAAiAAAiUQmBvMj79/Iv5t0/XfMAADsCBIAf+dP3V2A86gU7CATgQ44D1Eu7P3mR8+N3P5uLDJz5gAAfgQJADIiroBDoJB+BAjAPWS7g/mAwSSjChxAjE/vWKCyZjvWNP3DP2uRzAZGAqMBVwQMUBTAaJJjfRcNx6uYLJIMGoEgyisV7RkLHHZMAB4QJbuBDjACYDk4HJgAMqDmAySCyxxMJ+uCEcwGSMlGCe3t2YzebUPJX6Xz80NzYbs7l7phJ0GTC2BO9UOIDJKMNFtKIMjlOJC9rhjycmQ0xA4S3C4RONwFseHpiMMmOKVpTBEY2ZJo6YjGxzcWbu2JmIxOfOs3qQEY4aC4J/mVhgMmLjilYQ8zFurG8/JgOTwe2bbA6sTyBSyQKTEeMDJiPFG/4X480y95cxGbLeIHaV32sdwrl5evekWr+wq+/GzVPz9HXmALw+M3dubow7q9Ai9esz86Bxjs3mxNz5/sy80iScZ6fGln/gtJGZjMzx0uBNmUkYw34mozvx7mcJbz7Mi8NtrJ84s4sn5sbdhx1aodEXTZkI/9GKSXC3lRPQlNHGpYzJ2AZO4lZCtsmoDMJebDzT4ifxNknOzdPvT/fmJG4yzs2Dm4m25gqcQ8pX31uhcxZ5fvhkMBkRkXVwa48hZeaEyTFNRhVzsTiOaYVGXzRl4jxGK+LYzIn7tDV/HIuajBvfnw9yQ1Vi3pjNTfdqxF5F7MQkaAB8cyEGJWky7p6aB8/OvaulV892T39sNqZvP7bta7QNk5FPQgJ2nlj1MxndfaxiJmYQ3PK7WZHtDKejOXaGUi4gGvFoOebqSD0zmtYXTZkUn7f1NdpWncO5SJGZ4eyLMxcbfk/hz/8Oz48iJkOuKuKJPaNjEliNGYGKFPXsQ/McexGw5uCuvQ1SGZLmcVnkkhmZXsFdCV7TmCAcGWPOrMYgU57F6RExLmoyJP6zYu/MPIhe0IhWNMyK1N9HXzRlknijFcfmLOc/vC5PxmSIUWkm6z0pIgbAJnP3PqyYDpXJEFHJErrdYO3a1TwfJuPwZN5zJSn0tKsUTiVNRhUvDWOgHMdQXRp90ZRJYotWzNpUJ8dWydU11FnQZAwTiJAweAMgBqAx1egd40yJNpN+87jg3zsRiBqdFpF2V02BNmEySOZBjrU4NF+cipkMie0+5j6BY0hLQvu88ZE2OLGsKePV6bURrYhjM98YoE/dY1fEZFTB2FiEdbPP0xoyxencl/QC1HZEVqenjqnvu/YyGa+ddR2OyHQSKGFKgpjYhayFhLSzbS38uslAnWDUhwOlTEZnMu/FZVmv4T6hotEXTZkEf9AKZjF68TjBpZnVM57JkCdD7OKsTlByDERO0OeaDKnLMUbWFDUWg6YFNyRmNTEwGTUWaRw5bq74FDEZMoNQxHzXizj9iwyNvmjKxLiMVsyV47Q7xun8/UVMRnsgzo19WsO+r2L7tEfn7EA6CKv6xRikTYskd19kmoBIXY7J2JoiO/virFZPmKOu81T/d9paVEyb/eHvNgfBZGxMSpiMrjjK70Md03YBuF9Ooy+aMmHOdfURrQjj5o8hx8wVj5FMhhBiF6iNF1W1wZLjnKTcSvAiIqljcmcypH2y9U1R15oMWRCWuvWBcAi2bNt8XwYmw01GTiLPwOp1fUETjl2NvmjKtNuKVrQxWWo80K/wWI9sMj4ZCbK8mYWUgcgTpK6rhjQRRFjii1hf7e6t2nd5pN4OiskIEy6NP2XmhM9QkyHaEDYGeVyQOjY3T7037vo45lygNPVFU8ZvM1rh4+GPCf9bCx4HMxlpIZGAjif3C7ndkJXcO14r3polqQkvohUyRfK/LoNhyYPJqDFdSzCtrZ/DTEZGzCfitIqx6pXi9vZIyvBffMg4V0tfNGVqzqMVNRZriwv664/96CYjd2ZBgjKU3LeDllid7Q5q7vncMu7vwXbsvg8lb31JBTAmwyeaizG/LwObQSajldT7YSJxmr54qeuU4/voi6bMBVrRWA9TjwFxv04sxjUZIiTBt+w1AE/ehsi4qthd9QwzGfHbJa/s96J0XjHVfcJk1FggLsvEYojJkASeaxI8DomutBZ4JnDW6IumzAd7ixit8MarY0aKYxO8XQB2BUyG/b4A+y2p7lMZdiFl/WVlzQWSIjD+VYUYicR3l2SISpfJ2AuA195P/tMwGefpCgxMxrIDp2v81/D/ISajK04Fv5BWhPbJ8fGtRl80ZfrzHq3oj1l8nKlratgUMRl35J0YoW1gDUUrqMStyRVKsJ7UotCaWF3iJQIlX6TW2gbaqxm0Vh+lbwUMjKY9lKk5AhZlsNCbDJkx7I7pVhzt1zs1Hz8P/N2MNYnBPvqiKSN6lrlt9VHO2Wx/Zn3wuwy/wbEMjgVMhp0FODU3blaLsCRp37CrvZ+5sxvS4N3VQSyA7Dcp3nXrOlHcpkgt/Ny93VPe4bETnHh7pd39tghHP7wI6PnhpTYZkkQ7DX1YK6rYCpiKpnkIaYxGXzRlehgCtGJ+3Eev8sesiMnoB3h1FaO6F9sjcPu1KR+w3HoRjvKY5mLPcYfBXm0ysuMYrYDLh+EyOI+H8+FNxnYxVeJR1WwBGg+UUQgnV2+hq6ul9pl+LXql/egmA61YNH9G0Vk0Z3KcObjJ2K6J6JwmnZmBgNiTIzYCNn4MjW0y0Irxx5A4AeOxOXBwkzF2h6ifoIEDh+HA2CaDcTzMOIIzOI/JAUwGsxDMQsABFQcwGSSnMZMTdS+DX5gMEowqwSAAyxCAIeOIyYADQ/hD2XXwB5OBycBkwAEVBzAZ60gSmAHGeQgHMBkkGFWCGUI6yi5DtDAZyxhH4pFxHJMDmAxMBiYDDqg4gMkgOY2ZnKh7GfzCZJBgVAkGAViGAAwZR0wGHBjCH8qugz+YDEwGJgMOqDiAyVhHksAMMM5DOIDJIMGoEswQ0lF2GaKFyVjGOBKPjOOYHMBkYDIwGXBAxQFMBslpzORE3cvgFyaDBKNKMAjAMgRgyDhiMuDAEP5Qdh38wWRgMjAZcEDFAUzGOpIEZoBxHsIBTAYJRpVghpCOsssQLUzGMsaReGQcx+QAJgOTgcmAAyoOYDJITmMmJ+peBr8wGSQYVYJBAJYhAEPGEZMBB4bwh7Lr4A8mA5OByYADKg5gMtaRJDADjPMQDmAySDCqBDOEdJRdhmhhMpYxjsQj4zgmBzAZmAxMBhxQcQCTQXIaMzlR9zL4hckgwagSDAKwDAEYMo6YDDgwhD+UXQd/MBmYDEwGHFBxAJOxjiSBGWCch3AgajJ++pc/mn/+6Q98wAAOwIEgB37+w1+M/aAT6CQcgAMxDlgv4f5s5I+vf/3V/M9XPmAAB+BAmAO//vp/xn7AJ4wPuIALHPjVWC/h/uxNxpf/+MVcfb7mAwZwAA4EOfDn66/GftAJdBIOwIEYB6yXcH8wGSQUkgYcyOIAJoPEEkss7IcbwgFMBgklK6EIYdgiHsIBTAZcEC6whQsxDmAyMBmYDDig4gAmg8QSSyzshxvCAUwGCUaVYIRAbNcrJpiM9Y49cc/Y53IAk4HJwGTAARUHMBkkmtxEw3Hr5QomgwSjSjCIxnpFQ8YekwEHhAts4UKMA5iMCZmMs9ON2WzumTNp0/kP5mSzMZvTlxgBwYTtZLiAyTheYkErjod9LJmyPzwmmIwJJS2EI0xSgneauGAyjjcuaMXxsEeP+mGPyRjVZLw039qZiMTn2xf1gCEcNRYE8vSxwGSUHCO0gpgvyafp1IXJwGRMZvodkZmOMOSMBSaj5HhhMnI4xzElOXeYusqZjPOX5ttbt52r9tvm5PQHc3ae6sh7c3Z6u1p3sLvaP7l1r6NMqL6e9chah9gMQ2wNhKqPofZem6sX98xmc9s8dPBhJiOC1ahGkHNqhVttMlRx1DPGo5zR1HOoMhEuohVcCEX5HOHMhI4vYjLePXLNRfP2gJ9Ia0GzpqR5rPwdKxMCVFHPNmjlXIFtwGTo+hhqb7Wvqs9Z5Pn52mAy4njVvOGYqWChMRmqOEpeFNw27i3HJDaqehT68llTJs5rtCKOTXK8J5Ro19zOAiZjN823nYF4XzvO85fmoZiIWz+Yd40BrxLqxmxuubMd9ophl/QDZUID5R5fz5p01LMzGSePnPY22uefS9dHvw4/ULbtbvSx6otjPEQUA6YnVTf/87EGj3Hw6G8yNHH03tMRP8bl4saJmWgc6+rR6IumTIqjaMU4/E1hzv/KYV7EZDyMJmsJ7MbMhCRP93HNvThImU33FYqyHrmayr4C+vzS9O7jvj+hwarEtmlyMBkhrNg3VcHTmIzecZSMcZn9G6oVkXqS547olKYMWlFfnCaxQAumqgWpdhUwGemBrxKnbzIkyTeT7L6hcjuj4wpeW4+UyzcZ/fu470soaHb9a54fk5HGOYlpCGf2jSre/U1GenxDWnElSTuiBdmxrKhH6u6jU5oySV6jFaNyOIk9+lEE+6OYjKCYuAMqgtC4ndAkhLaeSgh849Osu8/fne1w+/Z5dwUU6FtVjzP1KzhEBLZPGzk2neDApz8+BzEZdn3DdoG2ExdOPOXHXv96OuuW+HRiWVMmzj20Io5Nf75S13EwG9lkyD1Yd02GTDOGRaMiQloQqmP09VRC0Fjweeu2+fbRy9bakW5ihvqYGMzEepBgu6zAYjKKOOrusUyMm5PYqKfCqazJSMSRzGy21m/t1mTkxkevejT6oimT4BxaQdwvQHdGNBn14kv/tsC4BsJPAOGgjyZzm9DtAtbsgY31MSYcCSHdP13SMD+YDIQmm48x3o2zv5zJ6I6jdy/ueY+6ywvuTk77XRjk16PRKU2Z2NigFb6Wx3Bi/9RxGslkSHLfGCsCPgjp4KmOlfKphF+qHkvS9+bdix/qR2qd6U+/7S6hpY2hPrrH1b+LufFNV/P/Tp9lOraFYV0m3j6OAZtxOVDGZOTE0Xvz8FSeJGmY8Fv3vHfNpMe8Tz0afdGUCY8RWhHGJT2+lJkiPuVNxnmdrMMLpnaBGHyyREgiwuMk3NbVXKl65Jx2K3V2rNfo7KNbZ/W7LAhL3fqohMXpMyajYVDbuE4xqNbSpsEmIyuORAs25sS7nWkvDGR2oyNet9rRtx7RAiceWxokdcoxmjJtTqMVbUzWElNL7GdRk7EPjuTVRTMwQ4TaBWtyRqFUPf75pQ+x2Qb5v72t4r6tM0UOK4bb6d1kf+QxOhGsa9O1sj51Tv7njyt4lMdjiMnIjSM5LhaPuTHSvx6NvmjK+OOCVvh4ELfzx6OYybCvB7eJtPseqQRi4upDruCTSblUPf4gihiFZmHy+1jXKfXZl441X0jWDCBmMmrcmtjw9/Sw0ZqM/DiSGHeMd+dsQggnTT1Spo9OacrU7UUraiyI9+VgUcRkSHCEEnOILHJ89Ooksarara9UPW6dsXuhcq7cPl5tv59B8/ZSR1DFbLEmg9smreR6fBHSmIx+cSRJ24mJFg7jHSNt7aNTmjJoxfG57OYAfi87HsNNhiYRJm8fiGgkriBEaErVI/VJX5rrRWR/z2T/7tG9jJmdekCZyaixINCnj0Vvk6GIo5jp3/ND6mzGrMT0bquqR6MvmjKfrw1aMX2+7znX4Bb702M32GR0OvfggIiRSHx3SSOhh8/Tvx67uPNh83tWtk+XyCKy9nspwudOA6shHiajPKaacaBM3jj0NRmqOJKkvWm/x6Ze+GkXhdbfQxQ8j6Ie+9SZ+/1L/vem7GYpGzqlK5OHt8tLtKI/Zi5+/H44/AabDLlCkOfWo9tmMO6vQBqPpEXeVdEKKjEvPeupnyAJnHd77vbaCXUfpY2Z21YfpW9N7DLrI5AOF0hrxLqvydDGkZiGlLa4651acbSLl771bMdUYtBqQ/MTe6eOpkzPmG71Uc6JVnBrtSeXxtau45kMC4T9plbv+ffbkdsLuyuKWABl11Mlne0V0C3/ufsT+7TIi/pqyAVeK45uHTm/IxyYghyeTOWYQ5kM29932zVOOTGb1or8ehwu9tSX7fhoyvRIDmiFMz49cJtK7KypHYNNxmHAqh5pdadED3PewxIZ4Tgs3kvk0CH71NdkHKZtaMVhcCZWwTmPA/MwGdv7qRkLQZfmaJkCZepzwpyepMlAK4iZCcfMGo3JLEzG9l5qxnsm1jiA9DnPTYNTeZymaDLQivLjTOyA6RAOzMJkDOkgZQkQODAOB6ZoMhjrccYaXMFVywFMBlNrTK/CARUHMBkkHm3iodx6uIPJIMGoEgwisR6RiI01JgMOxLjBfrghHMBkYDIwGXBAxQFMBolEEglbuBDjACaDBKNKMDFCsX89YoPJWM9YE9eMtZYDmAxMBiYDDqg4gMkg8WgTD+XWwx1MBglGlWAQifWIRGysMRlwIMYN9sMN4QAmA5OByYADKg5gMkgkkkjYwoUYBzAZJBhVgokRiv3rERtMxnrGmrhmrLUcwGRgMjAZcEDFAUwGiUebeCi3Hu5gMkgwqgSDSKxHJGJjjcmAAzFusB9uCAcwGZgMTAYcUHEAk0EikUTCFi7EOIDJIMGoEkyMUOxfj9hgMtYz1sQ1Y63lACYDk4HJgAMqDmAySDzaxEO59XAHk0GCUSUYRGI9IhEba0wGHIhxg/1wQziAycBkYDLggIoDmAwSiSQStnAhxgFMBglGlWBihGL/esQGk7GesSauGWstBzAZmAxMBhxQcQCTQeLRJh7KrYc7mAwSjCrBIBLrEYnYWGMy4ECMG+yHG8IBTAYmA5MBB1QcwGSQSCSRsIULMQ5ETcYv//1XIyLC9itYXIMBceBz4OvX/zX2Ay4+LuABHnCg5oD1Eu7Pxv2D30EABEAABEAABECgFAKYjFJIUg8IgAAIgAAIgICHwP8DIQ+BAlK2qs4AAAAASUVORK5CYII=)![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAikAAACnCAYAAADUtsyfAAAgAElEQVR4Ae2dCZLrIAxEc64cyOfJaXKZHMZTGBoESIC3xJP0r/oVG7NID4zaeJnbzH8kQAIkQAIkQAIkcEECtwvaRJNIgARIgARIgARIYKZI4SAgARIgARIgARK4JAGKlEt2C40iARIgARIgARKgSOEYIAESIAESIAESuCQBipRLdguNIgESIAESIAESoEjhGCABEiABEiABErgkAYqUS3YLjSIBEiABEiABEqBI4RggARIgARIgARK4JAGKlEt2C40iARIgARIgARKgSOEYIAESIAESIAESuCQBipRLdsvvGvV63Ofb7TZPz+와 같은 구조를 가진 데이터셋을 사이킷런 모델중 분류,회귀 모델로 학습하여 접근이 정상 로그인지 비정상 로그인지 확인하는 분류기 성능을 테스트 해보는 것을 목적으로 한다. **- 테스트 목록****1. 분류 모델** from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.neural_network import MLPClassifier **2. 회귀 모델** from sklearn.linear_model import LogisticRegression **- 코드 및 실행결과와 성능 테스트 결과**# mount from google.colab import drive drive.mount('/content/drive') ROOT = '/content/drive/My Drive/Colab Notebooks/infosec/proj1/' from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from sklearn.metrics import f1_score from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from sklearn.svm import LinearSVC from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.neural_network import MLPClassifier import io import urllib.parse import numpy as np anomaly_train_raw = "anomal_train.txt" anomaly_test_raw = "anomal_test.txt" normal_train_raw = "norm_train.txt" normal_test_raw = "norm_test.txt" def parse(file_in, file_out): f = open(ROOT + file_in, 'r', encoding="utf8") lines = list(map(lambda line: line.strip(), f.readlines())) res = [] for i in range(len(lines)): line = lines[i] words = line.split(' ') url_req = None is_req = False if line.startswith("GET"): is_req = True url_req = words[0] + words[1] elif line.startswith("POST") or line.startswith("PUT"): is_req = True url_req = words[0] + words[1] idx = 1 while not lines[i + idx].startswith("Content-Length"): idx += 1 url_req += '?' + lines[i + idx + 2] if is_req: res.append(url_req) f.close() out = io.open(file_out, 'w', encoding="utf-8") for e in res: out.writelines(urllib.parse.unquote(e).replace('\n','').lower() + '\n') print("Parsing complete.", len(res), "requests earned from", file_in) def load_parsed(file): with open(file, 'r', encoding="utf8") as f: data = f.readlines() ret = [] for i in data: i = i.strip() if i != '': ret.append(i) return ret # 0: normal, 1: anomaly def make_data_set(parsed, label): return { "data": parsed, "target": np.array([label] * len(parsed), dtype=np.uint8), "target_names": np.array(["normal", "anomaly"]) } def combine_data_set(a: dict, b: dict): try: if not np.array_equal(a["target_names"], b["target_names"]): print("Invalid combining!") return False except: print("Invalid combining!!!") return False return { "data": a["data"] + b["data"], "target": np.append(a["target"], b["target"]), "target_names": a["target_names"].copy() } parse(normal_train_raw, "normal_train.txt") parse(normal_test_raw, "normal_test.txt") parse(anomaly_train_raw, "anomaly_train.txt") parse(anomaly_test_raw, "anomaly_test.txt") trainset = make_data_set(load_parsed("normal_train.txt"), 0) trainset = combine_data_set(trainset, make_data_set(load_parsed("anomaly_train.txt"), 1)) testset = make_data_set(load_parsed("normal_test.txt"), 0) testset = combine_data_set(testset, make_data_set(load_parsed("anomaly_test.txt"), 1)) vectorizer = TfidfVectorizer( min_df=0, analyzer="char", sublinear_tf=True, ngram_range=(3, 3) ) X = trainset["data"] y = trainset["target"] X_test = testset["data"] y_test = testset["target"] vectorizer.fit(X) X = vectorizer.transform(X) X_test = vectorizer.transform(X_test) print(X.shape) print(X_test.shape) X_train, X_valid, y_train, y_valid = train_test_split( X, y, test_size=0.3, random_state=31, stratify=y ) # Logistic Regression lr = LogisticRegression( solver='liblinear', multi_class='auto', C=90, random_state=1 ) lr.fit(X_train, y_train) y_pred_lr = lr.predict(X_test) score = accuracy_score(y_pred_lr, y_test) f1 = f1_score(y_pred_lr, y_test) print("Logistic Regression 모델의 정확도:", score) print("Logistic Regression 모델의 F1 score:", f1)Logistic Regression 모델의 정확도: 0.987554245476132 Logistic Regression 모델의 F1 score: 0.9848665870171247**-> 속도가 매우 빠르다는 장점을 가지고 있다.****-> F1 score가 약 0.98로 높은 편이다.**# Decision Tree dtree = DecisionTreeClassifier( criterion="entropy", max_depth=150, random_state=29 ) dtree.fit(X_train, y_train) y_pred_dt = dtree.predict(X_test) score = accuracy_score(y_pred_dt, y_test) f1 = f1_score(y_pred_dt, y_test) print("Decision Tree 모델의 정확도 : ", score) print("Decision Tree 모델의 F1 score:", f1) # Ada Boost base_model = DecisionTreeClassifier( max_depth = 1, max_features=0.3, class_weight='balanced', random_state=1 ) ada_model = AdaBoostClassifier( base_estimator = base_model, n_estimators = 1000, learning_rate=1., random_state=1 ) ada_model.fit(X_train, y_train) y_pred_ada = ada_model.predict(X_test) score = accuracy_score(y_pred_ada, y_test) f1 = f1_score(y_pred_ada, y_test) print("ADA-BOOST의 정확도:", score) print("ADA-BOOST의 F1 score:", f1)ADA-BOOST의 정확도: 0.9816588880700893 ADA-BOOST의 F1 score: 0.9776580889686814**-> 속도가 매우 느리다.**# Random Forest rf = RandomForestClassifier( criterion="entropy", n_estimators=180, random_state=2, n_jobs=4 ) rf.fit(X_train, y_train) y_pred_rf = rf.predict(X_test) score = accuracy_score(y_pred_rf, y_test) f1 = f1_score(y_pred_rf, y_test) print("Random Forest 모델의 정확도:", score) print("Random Forest 모델의 F1 score:", f1) # SVM svm = LinearSVC(random_state=1, tol=1e-5, C=1) svm.fit(X_train, y_train) y_pred_svm = svm.predict(X_test) score = accuracy_score(y_pred_svm, y_test) f1 = f1_score(y_pred_svm, y_test) print("SVM 모델의 정확도:", score) print("SVM Forest 모델의 F1 score:", f1) # KNeighbors knn = KNeighborsClassifier( n_neighbors=2, p=2, metric="minkowski" ) knn.fit(X_train, y_train) y_pred_knn = knn.predict(X_test) score = accuracy_score(y_pred_knn, y_test) f1 = f1_score(y_pred_knn, y_test) print("KNN 모델의 정확도:", score) print("KNN 모델의 F1 score:", f1)KNN 모델의 정확도: 0.9583230983378367 KNN 모델의 F1 score: 0.9474227869021795**-> KNN 모델은 가장 낮은 성능을 보여주고 있다.**# MLP mlp = MLPClassifier( hidden_layer_sizes=(20, 5), activation="relu", solver="adam", batch_size=192, learning_rate_init=0.01, max_iter=1, warm_start=True, random_state=7 ) for i in range(1, 101): mlp.fit(X_train, y_train) y_pred_mlp = mlp.predict(X_test) score = accuracy_score(y_pred_mlp, y_test) f1 = f1_score(y_pred_mlp, y_test) print("MLP 모델의 정확도:", score) print("MLP 모델의 F1 score:", f1)/usr/local/lib/python3.7/dist-packages/sklearn/neural_network/_multilayer_perceptron.py:571: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (1) reached and the optimization hasn't converged yet. % self.max_iter, ConvergenceWarning)MIT LicenseCopyright (c) 2020 Permission is hereby granted, free of charge, to any person obtaining a copyof this software and associated documentation files (the "Software"), to dealin the Software without restriction, including without limitation the rightsto use, copy, modify, merge, publish, distribute, sublicense, and/or sellcopies of the Software, and to permit persons to whom the Software isfurnished to do so, subject to the following conditions:The above copyright notice and this permission notice shall be included in allcopies or substantial portions of the Software.THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ORIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THEAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHERLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THESOFTWARE.input = '''(3 * (4 * 8) * 5 * 7 * 3) + 8 4 * (6 * 4 * 6) + 8 + 4 (5 + 7) * 2 * 4 + 6 + 7 + (2 + 2 + 6 + (9 + 3 + 7 * 3)) (4 * 8 + 2 * (7 + 6 + 7) * 2) + ((4 * 2 + 4) + (6 + 7 * 2 * 3) + 5 * 2) * (4 + (4 + 5 + 4) * 7 * 8 * 4 + (7 * 7 + 5 * 3 * 6)) + 8 (3 + 3 + (6 * 4 * 9 * 3 * 6) + (5 * 8 + 7 * 9) * 8 * 7) + 9 + 3 * 8 * 4 + (7 + 4 * 7) 7 + (6 * (6 * 8 * 2) * (8 + 4 + 7 + 5 + 4 * 6) * 5) + 8 * (2 * (9 + 3 * 2) + 3 * 9 * 9 + 7) * 8 + 3 3 + (2 + 8 * 2 + 3 + 6) * 8 * (3 + 7 + 8 + (2 + 9 * 4 * 6) * 3) * 5 + 7 6 * 2 * 7 * (8 + (9 * 4 * 7 + 7 * 4) * 9) + 9 3 + 4 + (8 * 4) + 3 + ((7 * 9 + 8 + 5 * 5) + 8) + (2 + 7) 9 * 3 + ((8 + 2) * 5 + 2) * 6 ((5 + 9 * 4 + 9 * 4 + 3) + 4) + ((3 * 7 * 5 + 7) + (8 + 6) + (6 + 9 * 4 + 7)) + 9 + 5 * 3 * 5 (6 * 7 + 5) + 9 * 9 + 4 * 7 * (4 + 9 + 9 * 2 * (4 * 8 * 5 * 7)) ((5 + 7 + 7 * 8) + 3 + 4 * (2 + 2 + 9 + 3 * 7 * 9)) + 2 * 9 + 2 * 4 9 * 2 + (3 * 3 + 5 + 7) * (2 * 3 * 9 * 8) * 2 + 3 5 + 5 + (7 + 9 * (9 + 9 * 2) + 4) (8 + (4 * 7 + 2 * 8) * 3 * 8 * 3) * (7 + 4 + 4 + 8 * 5 * 9) * 2 + 4 3 * 3 * ((2 * 3 * 8 + 4 + 3 * 3) + 4 * 7 + 3 * 5) * 9 3 * (8 * 5 + 9) 9 + 8 + (8 + 5 * 2 * (6 + 8) * 4) 6 + 8 + 3 * 5 * 2 7 * 9 + ((8 + 8 * 5 * 9) * 3 * 3 + 7) 5 + 9 + (3 + 6 * 4) (6 + 2 + 7 + 6 * 8) + (3 * 6 + 7 + 2 * 4 * 6) (8 * (4 * 2 * 2 * 5 + 6) * 8 * 8) + (9 * 4 + 9 * 2 + 9 * 5) + 7 * 2 * ((6 + 6 + 7 + 6 * 6) + 7) (6 * 3 * 4 * (5 * 5 * 9 + 5 + 9)) + 2 + (5 + (8 + 9 * 5 * 3) * 2 * 3 * 8) + 7 8 * (6 + 6) 7 + 9 + (8 * (7 + 3 * 8 + 3 * 8 * 5) * 8 * 7) (8 + 6 + 6 + 9 + 3) * ((2 * 6 + 8 + 7 * 2) + 9 * 7 * 7) + 5 ((5 + 8) + (4 + 5 * 3 + 2) + (4 * 4 * 7 + 8 + 6) * 2 * 9) + 7 * 3 * 7 + 9 + 5 4 * 2 * (3 * 7) * 5 + 8 8 + (4 * 7 * 6 + 8 * (2 + 4 + 5 + 2)) + 5 * 7 * 9 * 3 (5 + 6 * 4) + 4 * 9 (3 * 2 * 2) * (5 * (4 + 7 + 7)) * 3 8 + (5 * 4 * 3 + (4 * 5 + 6)) 3 * 5 + 3 + 3 + 6 * 2 (6 + 9 * 4 + (5 * 9 * 6) * 7) + 5 7 * 6 + 7 * (2 + 7) 4 * (2 + 6 * 9 + 7 + 7) + 6 + 8 * (8 * 7 + 7) 5 * 9 + (6 * 4 * 5) + 4 + (4 + 6 + 4 * 4 * 9) + 9 (5 + (4 * 8 + 6) * 7) + 6 + (3 * 6) * (3 * 7 * 3 * 7 * 3 * 9) * (4 + 8 * 5 + 7 * (8 + 2 * 9 + 4 + 8)) + 4 6 + 6 * (3 + (4 * 8 * 2 * 7) * 2 + 9) 2 + 2 + (9 + 8 * 7 + 5) + 7 2 * 8 + ((5 + 7 + 5) * 3 + 9) + 7 3 * (7 + 4 * 7 * 3 * 3 * 8) + 9 + 2 + 4 8 + 4 + 5 * 2 + 3 ((6 * 2 + 2) * 9 + 7) + 9 * 2 * (7 * 6 + 4 * 3 * 3) + (8 + 8 + (6 + 5 * 8 * 6) + 6 + (9 * 9 + 4 + 2 + 6) + 2) 9 + 9 + 6 + 9 * (8 + 4 * 7 + 2 * 9 + 6) (4 + 8) * 6 + 5 (5 * 3 * 2) + 8 7 + 7 + (6 * 9) + (3 + 4 + (9 + 3 * 6) + 5 * (4 * 6 * 6) + 9) + (6 + 8 + 7) * (3 + 3) (2 * 8) + 3 + (4 * (7 * 9 * 9 + 6) * (7 + 2 * 5 * 6) * 3 * 7 + 2) * 7 3 * 4 * 6 * 2 + 9 + (8 + (4 * 9 + 8 * 9) * 6) 3 + 7 + (3 * 6 + 2 * 6 + 6 * 4) * (3 * (7 + 4) * (4 * 9 + 7 * 9) + 5 * 8 + 9) 4 + (7 * 8 * 8 * 7 * 5) + 7 * 6 * 7 (5 + 3 * 6 * 8 * 9 + 4) * 9 + (2 + 4 + 4 * 6 + 4 * 7) + (7 + (9 + 8 + 3)) * 6 + 4 (7 * 8 + 8 * 7 + 5 + 4) * 2 + (6 + 4 + 7) + (8 * 6) 9 * 8 + 8 * 3 9 + (7 + (8 * 5 * 5) * 7 + 6 + 2) * 7 + 7 + (3 + 2 + 5 + (3 + 4) + 9) + 2 9 * 7 + 3 * 9 * 6 * (3 + (2 * 6 + 5 * 3 + 7) * 4 * (8 * 8 * 2)) ((4 * 6 + 9) * 6 + (4 + 5 + 8) + 8) + 9 * 6 * 8 + 3 6 + 4 * (2 * 3 * (7 + 8 * 6) + 8 + 4) (8 * 8 + 4 * 8 + 7) + 2 * 4 * 3 * 7 (3 * (7 * 4 * 6 * 5 + 7 * 7)) * 7 + 6 7 * 3 * 3 * 2 + (2 + (3 + 7 * 8 + 4 * 4) + (4 + 3 + 5) * (5 + 2 * 3 * 3 + 9) + 6 + (4 * 3 + 4 + 6)) 5 * (4 * 7 + 6) * 8 9 + 7 + 7 + 2 * 2 8 * (9 * 2 * 2 * 3) + (9 + (9 + 3 + 6 + 2 * 7 + 8)) + 6 + 4 + 7 7 * 2 + ((6 * 7 * 8) * 6 * 7) + 6 + (9 * 8 + 7) + (3 * 3 * (9 + 8) + 8 * 6 * 2) (9 * (2 + 5 * 2) + 3 * 7) * (8 * 3 + 2) + 3 + (6 * 9 + 9 * 8 + 8 * 2) + ((2 + 5 + 3 + 8 + 6) + 4 * 6) (8 * (9 + 4) * 4 + 8 * 7 + 3) + 2 * (3 + 2 * 2) * (6 + 8) + (5 * 9 * 7 + 7 * 2) + 4 6 * 8 * 7 + ((6 + 6 + 8 * 4 + 8) * 3) + 4 2 * 8 * (7 + 7) (9 + 8) + 3 + (5 * 7 + 9 + 6 + 6) 2 * 6 + 9 * 6 * 3 3 + (5 + 2 + 7 * 3 * 9) * 8 * 6 * 4 + 3 (3 * 9 + 2 * 7) + 6 + (9 + (9 + 9 + 4 * 4)) + (7 * 7 + 8 + 4 * 4) 7 * 2 + 7 * ((8 + 9 + 7 + 4) * 7 + (7 + 4 + 7 * 3 * 4 + 2) + 8) (9 + 3 * 9) + (4 * 9 + 7 + 2) + 7 + (7 * (3 + 4 + 8 * 9) + 8) + 9 8 + (4 + 3 + 4 + 9 + 7) * 7 + 6 8 * (9 + (5 + 8)) + 8 4 * (7 + 8 * (8 * 5 + 9 + 3)) * 4 2 + 5 + 7 * (7 * 6 + 6 * 5) * (8 * (7 + 3 + 6 + 4 * 8) + (8 + 6) * (4 * 7 + 2)) * 9 4 * 2 + 7 + (2 * 2 + 8 + 2 * (7 + 8 + 9)) * 5 (6 + (9 + 9 * 2 + 4 + 2)) * 6 (2 + 4 * 8 + 3) * 9 + 7 * 6 + 3 2 * 6 * ((8 * 3) * 8) * 4 * 5 4 + 5 * (5 + (8 + 8 * 2) * 8 * 5) + 3 9 * 7 * 7 + 9 * (7 * 8 + (7 * 4 * 4 * 2 + 5 * 3) + 5 + 5 + (3 + 2 * 4 + 8 * 9)) + 9 2 * ((5 * 8 + 8 + 5 + 6 * 3) * 3 * (3 * 6) * 2) + 8 * 9 9 + (6 + (4 + 3) + 5 + 2 + (6 + 4 * 8 + 7 * 2) * (8 * 6 * 8 + 6 * 4 * 5)) + 6 6 + (9 * 9 + 7 + 3) * 7 * (3 * 5 * 5 * 8) * 8 6 * 7 * 4 * 9 * 6 + (7 + 3 + 9) 4 * 3 + 7 8 * 4 + (6 + 8) + 8 9 * 7 + ((7 * 6 * 7 * 6 * 3 * 5) * 7 * 6 + 3 * 2) + ((4 * 5 * 6) * 5 * 7 + (2 * 6 * 2)) * 8 + 3 7 * 6 + 4 * (4 * 6 + 6 * 4 * 8) 7 + 8 + 3 + 3 6 + 8 + (8 * (3 * 2 * 3 + 5 * 8 + 3) + 7) * ((4 * 4 * 7 * 7 * 9 + 5) * 6 + 7) * 2 * (2 + 3) 7 + 7 * 2 * (4 * 3 + (4 * 2)) + 6 + 4 (5 * 6 + 3) * 8 + (2 + 8) (8 + 8 * 3 + 9 + 5 * 9) + 4 * 9 9 + 4 * 8 + 5 + 6 + 4 4 * 4 + 5 + 3 + ((4 + 3) * 7 * (3 + 9)) (6 * 9 + 3 + 4 * 8 * 2) * (6 * 5) * 5 * 9 * 5 + 3 9 * 2 + 3 + ((3 * 5 * 5 + 8) + 7 * 8 * (2 + 3 + 9) * (6 + 2 + 8)) + 3 + 8 (3 + 9 * 6 + 4) + (7 * (9 * 6 * 2 * 4 + 8) * 4 * 4 + 9 * 3) + 7 * 6 * (7 * 6 + 2 * 2 + (5 + 8 * 4 + 3 * 7) + 3) 6 * (6 * 5 * 7 + (3 + 5)) + 9 + (3 * 2 + 6) + 4 8 * 9 + 8 + (5 + 8) * 8 + (2 + 7 + 7 * 8 * 3 * 5) 9 * 8 * 4 * 3 6 * (3 + 5 * 9 + (3 * 5 * 4 + 6 + 6 + 3) * 8) (7 + 8 + 7 + 4 * 5) * 8 * (7 * 8 + 9 * 2) 9 * 4 * (2 + 2 + (8 * 4 + 8 + 9 * 4 + 4) * 6 + 4 + 5) * (3 * 4 * 5) * 6 5 * 6 5 * 3 * 4 + (9 * (2 + 9 * 8 + 8 + 3)) + 4 + 7 8 + 9 + (7 * (6 * 9 * 2 + 8) * 7 * (2 * 3) * 6) + 3 + (8 + 4 + (7 + 2 * 2 + 2 + 3) + 3 + (3 * 3 * 9 + 7 * 5 * 3) * 4) + 2 8 * 8 * (8 + (3 + 7 + 9 + 2) + 4) (2 + 7 + 8 * 8 * 7 * 6) + 2 + 7 * 7 5 + (4 * 9) * 7 7 + 5 + 4 (3 * 3 * (3 * 6 + 9)) * (6 + 6 * 4 + 7 + 6) (6 + 7 + (3 + 8 * 9 * 7) * 8) + 3 * 6 * 9 * 3 + (7 + 3 * (9 * 3 * 5 + 5)) 3 * (7 * 2 + 8 + (2 * 4 + 4 * 3)) * 7 * (4 * 8) (3 * 9 + 6 * 9 * 7) + 7 * 4 * 4 + ((5 * 7 + 8 + 4 * 8 * 8) + 6 * 3 * 8 + 5 + 4) 4 + (2 + 7 * 4 * (5 + 3 * 4 * 3 + 3 + 6)) * 6 3 + (7 * 4 + (5 + 7 + 2 + 8) * (4 * 5) * 6) + ((4 + 5) + (7 * 4) + (4 + 3) * (5 * 6)) + 5 (7 * 9 + (7 * 3) + 5 + (7 + 3 + 3 * 9 + 2 * 6)) + 2 * 6 * 9 + 3 + 4 ((8 + 5 + 3 * 5 * 5) * 4 + (6 * 5 + 8 + 6 + 4 + 5) + 2 * 6 * 6) * (2 + 2 + 3) * 7 + 8 * (7 + 8 + 7 * 6 + 7 * (2 + 5 * 2 + 3)) * 4 3 + 2 + 7 + (9 * 7 * 3 * (9 * 2)) * 2 5 + 6 + 7 * 4 * ((6 + 8 * 2 + 6 + 3 + 3) + (3 + 3 + 8) * 5 * 5 * 9 + 5) 3 + 5 * 2 + (3 + (6 + 5 * 4 + 2) * 2 + 4 + 4 + 2) * 2 3 + 5 4 + (9 * (9 + 7 + 4 * 3 * 3) * 8 + 6 + (7 + 8)) + 3 * 6 * 5 * 6 (2 * 4 * 6 + 6) + (2 + 6 * 3 + 4 * 7 * 6) * 8 * 6 5 + (3 * 9 * 9 + 4 + (4 + 6) + (5 + 9 + 4)) + 9 * 3 * 6 ((6 + 3) * 8 + 7 * 4) + 3 5 + (5 * (5 + 8 + 2 + 5 + 7 * 8) + (5 * 4 * 5 + 9) + (6 * 3) + (7 * 7)) * 5 ((4 + 4 * 8 + 6 * 8) * (9 + 6 * 9) * (6 * 9)) * 9 + 9 * 4 5 * 6 8 + (5 * 5 + 4 * 5 * 9 * 2) 8 + 5 + (5 + 6 + 4 * 6) 3 * 5 * 6 + 9 + (4 + 2 * 2 * 2 + 4) 8 * 6 + 4 + (5 * (4 * 9) + 2 * (8 + 3 * 2) + 2) * 7 7 * (8 * 6) + 7 * (2 * (4 + 5 * 5 + 6) + (9 + 4 * 4 + 6 * 7) * 3 * 9 * 4) * (3 * (3 * 8 + 8 + 7) + (9 * 2 + 8 + 8 * 2)) * 7 9 + 3 * 9 * (6 + 5 * 6 * 9 + 8) + (9 * 4 * 9 * 5 + 9) + 2 4 + 5 + 7 5 * 5 + ((2 + 5 + 4 + 3 + 4) * 8 + 4 * 3) * 5 * 9 * (8 * 2) (6 * (2 * 2 + 7 + 7)) * 2 + (9 + 2 * 2 + 2 + (7 * 3 * 5 * 9 + 2 + 7)) + 9 + 7 * 2 (9 + 2 + 6) + (6 * 6 * (5 * 6 + 9 * 6 + 8) + 8 + 7 + 2) * 4 * ((2 + 2 * 5 + 7) * 9) 6 + 9 + (8 * 3) + (7 + 6 + 6 * 3 * 2 + 9) * 7 6 + (3 * (4 * 6 + 3 * 4 + 7 + 8)) 3 + (3 + 2) + 9 * (9 + (2 * 3 * 2 * 6 + 9) + 8) + (5 * 7) + (7 + 4 * 9 * 5 + 4) 2 * 5 * (2 + 8 * (4 * 9)) * 8 + (5 + 7 * (5 + 8 * 9 * 9 * 8) * 4 * 8 * 5) 2 * 8 + (2 + (5 + 3) + (2 * 3 + 3 + 6 * 4)) 6 + 3 + 9 + 5 + 5 + ((3 + 3 + 4 * 8) * 4) 5 * ((3 + 6 + 7 * 6 * 8) * 3 + 8 + 7) + 2 + (2 * 6 + (5 + 5) * 9) + 3 9 * ((5 + 6 * 4 + 2) * 5 * 7 + (6 * 9 + 2 + 7)) + 4 + 4 * 2 6 * ((3 * 5 + 6 * 5 + 8 * 3) * 4 * (6 + 7 + 8 + 5 + 3) + 8 + 4 * (6 * 5 + 7 * 9 * 5)) + 5 * 5 * 7 + 4 (7 + 2) + 9 * 8 + 3 * 8 * 7 2 * 5 + 3 + (2 + 9) * 3 (5 * 6 + 9 + 5 + 9 + 3) * (3 + 5 + 6 * 2 + 2) * 5 * 6 * 2 (6 + 8 * 3 * 9 + 2 + 2) + 2 + 2 * 6 * 3 8 + (2 * 4 * (7 * 9) + 4) * 4 * 8 * 4 * 5 3 * (9 + (6 * 4 * 9 * 3 * 5) + 9 + (7 * 3 + 4) * 2 * 8) * 5 + 3 + 3 * (4 * 5) 3 * (2 + 2 + 2 + (9 + 7 + 6 * 3)) + 7 8 * ((3 * 5 * 8 + 9) * (4 * 6 * 6 + 6 + 7) + 6 * 7 + 8 + (3 + 7 * 8 + 6 + 4)) + 4 * 2 * (5 * 2) 3 * 2 * 4 * 7 * 9 * ((2 * 7 + 3 * 4 * 3 * 4) * 9 * 3 * 5) (9 * 5) * ((7 + 4 + 4 + 5 * 4) * 5 + 8 + 8 * 2 * 2) * 2 3 + (4 + 7 * 2 * (9 + 9 * 9 * 5 + 7) * 5) * 5 (3 + (7 * 5 + 6 * 4) * 4 * 3 * 3 + 7) * 9 * 7 (2 + (3 * 3 * 6 + 4) * 6 * 8) + 2 + 9 9 * 4 + 4 + 7 * (6 + 6 * (9 * 2 + 6 + 5) + (3 + 5 * 9)) * 6 ((9 + 6 + 7) * 2 + 2 * (6 + 9) * 8) * 7 * 6 + 6 3 * 5 + (5 * 2 + 8 + 5) + (8 + 9 * 9 + 5) 6 * 2 + (6 * 2 * 6 + (8 * 8 + 7 + 7)) 3 * 6 + 3 * (4 + 9 * 7 + (8 + 6 * 3 + 4) * 7 * 9) 8 + (4 * 9 + 7 + (9 + 9 * 4 + 9)) 8 * ((7 * 6 * 4 + 4) + 3) 6 + 7 * 6 + (6 + 6 + 4 * (2 * 2) + (2 + 7 + 2)) * 3 6 + 8 (6 + 5 + 6 * (3 * 2) * (5 * 7 * 3) * 5) + (6 * 3 + 8) * 6 * 2 + 6 + 6 3 * (7 + (6 + 8 + 9 * 4 + 5 + 4) * 4) * ((9 * 9 * 8 * 5 * 7 + 5) + 7 * 5) * 8 + 8 * 6 (8 * (3 * 5 * 9 + 3 + 4 * 2) * 5 + 6 * 7) * 5 + (6 + (5 + 3 * 3 + 3 * 2) * 4 * 4) 5 * (6 * (7 + 9)) + (9 + 9 + 2) + (2 * 2 + (2 * 9 * 6 + 8 * 2) + 4) * 6 + 2 ((4 + 8) * 4 + 2 + 6 * 7 + (7 * 4 * 8 * 6 + 3)) * 8 * 2 + 5 + 9 5 + (5 * 9 * 4 * 3) * (7 * 3 * 7) * 6 (7 * 5 + 2 + 7) * (2 + 3 + 5 + 6 * (4 + 4 + 9)) + 9 + 2 * 5 ((5 * 9 * 6 + 5 * 9) + 2 + 7 + 9 * (4 + 6 + 8)) + ((8 * 9) + 4 * 3 + 7 * 7 * 2) 9 + 9 + ((7 + 8 + 6 * 3) * 7) ((6 * 2 + 3 + 8 * 5) * 4 * (5 + 7 * 5)) + 4 + 5 + 6 * 2 * (6 + 2) 8 * ((4 * 8 * 3 * 4 + 5) + 3 + 6 + 9 * 9 + 3) + (7 * (8 * 2) * 9) * (8 * 9 + 2 + 7) + (4 * (2 * 8) + 3 + 2 + (7 * 2 * 6)) 2 + 8 * ((2 * 2 + 3 * 6 + 5) + 8) + 6 (2 * (5 + 7 * 3 * 9 * 6) * (4 + 9) * (8 + 2 * 9)) + 9 * 2 2 * 4 * 4 * 9 + 7 4 + 7 + 6 + (6 + (8 + 8 + 7 + 5 * 2) * 7 * 2) + 8 3 + 4 + (4 * (9 + 4 + 5)) + 7 * 8 * 7 9 + 8 * 7 * 4 * ((8 + 2 * 4 + 6 * 9 * 2) + 3 * 3 + (2 * 5 * 5 + 2 + 9 * 9)) * 7 (8 * (5 * 7 + 7 + 2 + 7) + 3 * 9) + 9 * ((6 * 2 + 8 * 8 + 9 + 7) * 2 + (3 * 3) + (7 + 4) + 9 * 5) + (4 * 8 * 4) + 2 + 9 (3 * 5 + (7 + 4 * 8 + 7)) + ((8 + 5 * 6 * 5 + 3) * 2 * (3 * 9 + 4 * 2 + 3)) 5 * 6 * 3 + (8 + 6 * 6 + 6) 6 * (5 * 7) + 2 * (3 + 7 * 5) (9 + 6 + 2 * 7) * 2 * 7 + 6 5 + 3 * ((8 * 5 + 8 + 4) * 7 * 8 * 7 * (2 * 6 * 4 + 5 * 9)) + 3 + 4 3 + 9 + 6 + (4 + 3) * 2 * 7 ((6 + 7 + 8 * 3) * 6 + 9) + 4 * 9 * 8 9 * 2 + (6 + 9 * 5) * (8 + 5 * 9 + 9 * 7 * 9) * 6 9 + 8 * 5 * (4 + (9 * 4 + 5 * 8 + 5) * 7 * (7 * 3 * 4 * 7) + 2) + 5 2 + 6 * (6 + 5 * 7 * 2 * 2) + 5 3 + (5 + 5) + 8 * (8 * 4) (3 * 2) + 5 + 4 * 6 + 3 * (3 + 8) 8 + (2 * (5 * 7 + 4 + 7) * 4 + 5) + (8 * 9 * (7 + 2) + 5) + (6 + 7 * (6 * 8 * 8 + 9) + 6 * 8) (2 + (8 * 3 + 3 + 5 * 8 * 7) + (7 * 6 + 4) + 8) + (2 + 7 * 9) + 9 * 8 * (4 + 5) 8 + (6 * 7 * 8 + (5 + 3 * 9 * 7) * 2) + (2 + 3 + 5 * 9) * 8 * 6 * 5 8 * 3 * 8 + (5 * (6 * 6) * 3 + 6) + ((8 * 5 + 8 * 4 * 7 + 5) * 6 * 7 + 9 * 6 + 8) 3 + 3 * 4 + (4 * 3 * 2) + 2 + 5 6 + 4 + 3 + (2 + 3 * (2 + 6 + 9 * 8 + 4) * 5 + (2 * 5 + 5 * 6 + 6) + 5) 3 + 2 * 4 + 6 + 4 * (6 + 2 + 9 * 2 + 8 + 4) (6 * 9 + 2 * 7) + 7 (8 + 5 * 2) + 7 * (5 * (8 + 5 * 8 + 6 * 7) + (3 * 9 * 9 + 9 * 6) * 5 * 7 * 9) + 6 * 2 * (9 * 9 + (5 + 7 * 4 * 4 * 9 + 6) + (6 * 3 + 7 + 6 + 4 + 8) + 8 * 5) 6 * 7 * (4 * 9 * (2 * 5 * 3 * 3 * 8 * 9) * 7) + 9 * (2 + 5) + 4 8 * 8 + 7 + (7 + 9 * 7) ((3 * 5 * 3) * (6 * 8 * 5 + 8) * 7 + 6) + 7 * 9 + (5 * 3 + 2 * 5 * 8 + 6) + (5 * 7) 7 + 7 * (2 * 9 + 6) * 4 * 4 + 9 (5 * 8 + (8 * 9 + 3 * 2 * 7 * 9)) * 8 * 8 + 3 + 7 6 + 5 + 8 * 2 * (7 * 6 * (5 * 8 * 4)) + 6 (9 + 4 * 2 * (6 + 2 * 2 * 4 + 7 * 9)) + 8 (5 * 3 * 2) * 2 + 2 * 6 * 8 * 8 4 * (2 * (5 + 6 + 6) + 6 + (7 * 2 * 9 * 9) + 4 + (4 * 9 * 7)) 8 + (8 * 6 + 2) * 3 * 6 * 3 + 7 3 + 8 * ((6 + 6) * 9 * (9 * 4) + 4 * 4 + 5) + (3 * (7 + 3 * 4 * 6 * 4) * 8 + 7 * (6 + 7 + 7 * 5 + 9)) + (6 + 6 * 9 + (6 + 6 * 2 * 4 * 5)) 8 + 4 * 7 * ((9 + 5 + 9 + 7) + 5) + 7 8 * 7 + (8 * 9 + 3) + 7 9 * (9 * 2 * (3 + 3 + 7)) ((9 + 9 * 8) * (4 + 8 + 7 * 6 * 5 + 3) + 5 + 3 * 4 + (7 + 7 * 3 * 6 * 3)) * 6 + 5 * 9 2 + 7 * ((3 * 6) * (6 * 5) + 4 + 9 + (4 + 2 + 3 * 3 * 5)) (9 * (7 + 7) + 5 * 2 + 5) + 6 * 3 * 3 * 8 (7 + (8 * 6 + 7) * 7 + (2 * 7 + 3) * 7 * 9) * 7 + 5 * ((7 * 5 + 2 + 5 * 2) + 6 + (7 + 9 * 3 + 8 * 6)) (9 * 2 + 5 * 3 * 8 + 4) * ((4 + 9 + 8) + 6 * 7 * 3 * 8 + 3) * 6 + 5 + 4 * ((9 + 4 + 6 * 4) + (2 + 7) * 5 * 5 * 3 + 5) ((9 * 6 * 3 * 6) * 3 * 9 + (6 + 7 + 5) * 6) * (7 * (4 * 9 + 7 + 8 + 6)) * 7 * 6 + (2 * 4 + 9 * 5 * 2) 7 * (8 + 5 + 4 + 7) ((5 + 8 * 2 * 8 + 6) * 8) + 8 + 2 * 2 (2 * 5 * 5 + (5 * 5 + 5 * 9 + 2)) * 9 * 2 + (6 * 7 + 5) (9 * 5 * 9) + 4 * 8 + 2 * (5 * 7 * 4 + (8 * 4 + 3 + 8) + 9) 7 + 3 * (5 + (5 * 4 * 4 + 9 + 9) * (6 * 8 + 3 * 8) * 5) 7 * 6 + 7 + 3 (5 + 6) + 5 * (6 + 7) 9 * 2 * 9 + (9 + 6 * 4 + 4 * (5 + 4 + 8 * 2 * 9 * 3)) 7 + (8 + 2 * 6 + 9 * (2 * 6 * 3 + 4 + 2 + 6) + 9) + 3 + (9 * 8 + (9 + 3 * 6 * 5) + 6 * 8 * 2) * (2 * 4 + 5) + ((2 + 7 + 2 * 6) * 3 + 7 * 6) 7 + 9 * ((3 * 2) * (7 + 5 + 8)) + 8 * 5 2 + 3 * 6 + 8 + 2 * (9 * 9 + 8 + (6 + 3 + 4 * 4 + 4 * 6)) 4 * 2 + 6 * ((6 * 9 * 5 * 7 + 8 + 7) * 5 + (4 + 2 + 6) * 9 + 3 * 3) 8 * (2 + 3 * 5) + 7 + 8 * (9 + 2 * 8 + 4 * 9) + 8 (9 * 2 + 6) + 3 * ((5 + 6 * 3) + 4) + 6 6 * 3 * (3 + (4 * 4 + 5 * 7) * 4 + (4 + 5 + 2 + 8 + 4 + 3)) * 9 + 7 8 * 3 * 6 + 6 + 7 + 2 (6 + 4 * (6 * 2 * 9 * 4) * 2) + 9 + 4 * 7 + 8 * 4 5 + (7 * (4 + 5 + 9 * 5) * 2 * 5) (4 * (6 * 3 * 6) + 7 + (4 + 6 + 8 + 3 + 3 + 9)) + 6 + (3 * 6) 3 * 8 * (7 * 8 + 2 + 4 + 5 + 4) * 8 + 9 + 3 3 + 4 + 7 + 6 (6 * (9 * 5 + 2 + 8 + 7 * 6) + 4) * 3 * 2 + 3 8 * 6 + (6 + 2 + 7 + (7 * 5)) * 6 * (8 * 4 * (4 * 5 + 4) + 2) 7 * 5 + 2 (6 + 3) + 2 * (3 * 6 + 5 + 6) * 9 6 * (2 * (8 + 8 * 6 * 5 + 8 + 3) + 2 * 9 + 4 + 4) + 5 + 6 5 + 2 + (4 + 2) * (4 * (8 + 3) * 9) * 2 9 + 2 * 4 * 5 + 6 + (9 * 8 + 2 * (8 * 4) * 7 + 8) 2 * 2 * 2 * 6 + 6 * 8 6 + (4 + 7 * 4 * 5 + 2 + 4) * 2 * 8 (5 * 6 + (9 * 7 + 2 * 2 * 7) * 8 * 3 + 5) + 5 (9 + (6 * 8 + 9)) + 7 * 7 * ((8 + 7 + 6 * 9 + 4) * 3 + 6 * (4 + 5) * 2) + (6 * 5 + 9 * 2 + 5 + (7 + 5 * 5)) + 9 ((5 + 8 + 7 * 8 * 6) + 9 + 2) + 9 + 6 + (6 + 2 + 4) (8 + 5 + 2 * (8 * 6 * 2 * 3 + 7 * 5) + 8 + 8) * 7 * 7 + 2 6 * ((7 * 8 + 6 * 8) * 2 + 5 + (3 + 8)) * 2 + 9 4 + 3 + 8 * (6 * 7 * 8 * 6 * 3 + 2) * 9 + 6 3 * ((9 + 7 + 4 * 7) + 3 * 3) + 8 2 + ((5 * 8 * 6 + 4 * 5 + 6) + 2 + 3 * 9 * 7) + 6 * 7 3 * 3 * (5 + 8 * 4 + (3 * 3 + 6) + 2) * 6 + ((8 + 3 + 7 + 5 + 7 * 6) * (8 * 9 + 8 + 2) * 8 + 2) * 3 (6 + 3 * 9 * 9 * 4) + 7 * 6 + 3 + 6 + 8 6 * 2 + (7 + 9 + 6 + 4 + 9) (6 + 8 + 8 + (6 + 4 * 2 * 2 * 4) + 7) * 8 * 7 3 * 7 + 9 (6 + 7 * (2 * 3 * 6 * 8 + 6) + 8 + 3) * 4 + (4 * 9 + (5 + 8 * 4 + 6 + 6) * 6 * 5) 9 * ((7 * 2) + 9 * 2 * 5) * (6 * 4 * 3 + (8 + 3 + 7 * 9 + 8) + 4) + (2 + 4 + 4 + 9) 3 + (9 * 6) * 4 * 8 6 * (3 + (4 * 7 * 7 * 4 + 7) * 5 + (9 + 5) * (9 + 9 + 5 + 7 + 2 + 2)) 7 + ((6 + 2 + 3 * 9) + 4 + 5 + 2 + 8) * (8 + 9 + 9 + 9 * 6) + 9 7 * (6 + (7 + 5 * 3 + 4 * 9 + 3) + 2) 6 * (7 * 7 + (4 * 3 + 2 + 7 + 5 + 6) + 4 + 7) + 4 * 6 (2 * 2 * 9 + (9 * 7 + 8)) + 6 + (9 + 7) + 8 + 4 + (7 * 5 + 8 + 8 * (2 + 5 + 7 + 5 * 9)) 2 * (6 * (8 * 8 * 2 * 3 + 8 * 9) + 3) + 3 * 5 + 7 + (7 + 2 * 2 * 4) 6 * (8 * (4 * 3 + 3 * 4) * (7 + 4 + 5 * 5 + 2) * (6 * 4 * 6 + 9 * 9) + 2 * 5) 8 + (4 + 5 + 6 + (4 + 7) + 3 * 8) + 3 + 4 ((9 * 7 + 2 * 6 + 7) * 2 * 8) * 7 + 2 + (6 + (2 * 6) + 7 * (5 * 4 * 6 * 7 + 9) + 8 + 2) * (8 + 2 + 5 * (6 * 8 + 5 * 4) * 7 + 4) (5 + 2 + (8 * 2 + 2 * 9) + (4 * 8) * 8 + 9) * 9 + 9 * 2 * (2 + (3 * 3) * (5 * 2 * 4 * 2) * 3 + 7) + 8 4 * (4 + (8 + 6) + 9 * 8) * 4 * 6 + (5 * 3 * 3 * 5 * (8 * 2) + 8) 8 + 5 + 2 + (9 * 7 * (4 * 5 + 6 * 5 * 6 + 8)) + 8 + 7 (8 + 9 * 4) + 8 * ((5 * 6 + 9 * 2 * 4 + 8) + 6 * 7 * 4 + 4 * 2) + 8 * (5 + (2 * 7 + 6 * 4 * 8) + 2) + 2 (7 + 3 * 8 * 2) * (6 * 5 * (2 * 3 * 5 * 7 + 8 * 8) + 7 + 7 * (2 * 3)) + 9 7 + 4 + (3 + 2 + 4) * 4 * 4 ((3 * 2) + 3) * (2 * 6) + 2 + 6 * 6 + (3 + (2 * 5 * 3 * 2 * 9 + 2) * 3 * 8 + 3) 9 * ((6 + 2) + 3 + (9 + 7 + 9)) * 8 + 8 + 5 * 3 (8 + 4) * 6 + 2 * (6 * 6 + 4 + 9 + 9 + 6) * (2 * 7) * 9 (2 + 9 + 9 * 6 * 2 * 5) * 2 + 2 + 2 * (7 * 3 + 6 * 7 + 5 * 7) 4 + 8 * 7 * (6 * 7 + (9 * 2 * 8 + 8) + 4 + 5 + 2) * 5 * 7 5 + (7 * 6 + (3 * 2 * 7 * 5) + 4 * 6 + 2) * 2 7 * (4 * 6 * 2 * 3 + (6 * 8 * 4) * 6) + 3 + 6 4 + 3 * ((3 + 3 * 5 + 4 * 3 * 2) * (3 * 6 + 7 * 7 + 9) * 7) + (8 + 2 + 9 + 5 * 8 * 8) + (7 + 8 + 9 + 4 + 2 * 4) + 7 6 * (5 + 2 + 6 + (7 + 9 + 4 * 3) + 8) + 6 * 5 * 8 * 7 7 + 9 * (2 * 9) * 8 + (2 * 2 * 8 * 4 * (7 * 3 * 4 * 2 * 9 * 3) * 5) * (9 + 2 + 3 + 3 + 4) 6 * 3 * (9 * 6 + 7 + 9 * 2 + 2) 5 * ((3 + 5 + 4 + 7) + (5 + 7) * (2 * 5 + 5 + 9 + 7)) + (9 + 7) * 8 (7 + 7 * 5 + 2 + 7 + 3) * 9 9 + (3 * 7 * 4) + 8 * 2 + 2 (3 + 7 + 3 * 6 * (2 + 2) * 8) + 3 3 * (9 + (8 + 4) + (3 * 7 * 6 + 9 * 7 + 7) * (8 * 7 * 9 + 6) * 9 * 7) + 4 5 * 2 * 4 + (4 * 5 + 6 + (6 * 8 + 4 + 2) * 5) + (7 + 2) + ((6 + 7) + 5 * 5 * 5 + 5 * 6) 7 * (7 * 3 * (8 * 2 * 8 * 2 + 4) * 8) * (6 + (4 * 7 * 6 * 8 + 2) + 2 + (2 + 6) * 4) + 7 * 6 + 4 8 * 3 + ((5 + 4 + 4 + 2) * (6 * 8 * 6) * (5 * 7 * 2) + 4) + 8 + 5 * 2 5 * 5 * (6 * 2 * 5 + (8 + 9 * 5 + 5) + 5) (4 * 5 * 2) + 5 * 9 + 3 * 9 7 * 8 + ((3 + 9 * 5 + 4) + 9 + (3 + 7 * 8 + 7) + (2 + 9 * 3) * 5) + 6 * 6 (4 * (8 + 4) + (8 + 2 + 9 + 3 + 2 * 9) + 4 * 7 * 6) + 9 7 * (5 + 5 * 2 * 3) + 4 (8 + (6 * 9 * 7 + 4 * 3) * 6 * 9) * 3 * ((2 * 4 * 5 + 2 * 7) + 4 + (8 * 9 * 9 + 9 + 6 + 7) * 3 + 9 + 3) (3 + (7 + 5 + 4 + 9)) * 4 * 5 * 8 + 6 + 6 (6 * 9 * 8) * (2 + 9 + (6 + 2 * 9 * 4) * (3 * 6 + 5 * 6 * 9) * 9 + 8) + 3 * 3 ((4 * 4 + 4) * 9 * 9 * (3 * 5 + 5 * 4 * 5) + 8) + 8 * 7 * 8 4 + 4 * (2 + 5 * 7 + 2 * 7 + 3) + 5 * 2 6 * (3 * (6 + 8 * 7 + 5 * 3 * 4) * 8 * (4 * 3)) * 7 + 6 + 3 2 + 4 + 5 + 6 + 9 + ((3 * 7 + 4 + 8 + 6) + 5 * 2 + 4 * 6) (8 + 5 * 8 * 7 * 9) + 9 * 5 + 3 5 + 3 * 4 + (6 + 2 * (6 * 2 + 6 + 3) * 2 * 6 + (9 * 9 * 3)) + 2 + 6 3 * 5 * (5 + (2 + 5) * (6 + 8) + 2 + 4 * 5) * 7 4 + 7 + 4 + 9 + 2 * (3 + (7 + 2 + 4 * 9 * 3) * 6 * 7) ((9 * 9) * 5) + ((6 * 3 + 6 * 6 + 2 + 3) + (9 + 7 + 8) + 5 * (3 + 4)) * (8 + 4 + 9) 3 + ((5 * 6 + 2 * 3 + 9) * (6 * 8)) * 7 + 6 * 8 + 8 8 * 5 * 5 * 9 * ((5 * 6) * 6 * (2 * 8 * 9 * 3) + 7 * (9 * 3) * 3) * 9 2 * (5 + 2 + (4 + 6 + 7 + 9 + 7 + 4)) * 5 + 9 * 9 (5 + 6) + 7 + 8 * (4 + 3 * 3 + 5 * (5 * 3 + 8)) 4 * ((6 * 7 + 8 * 3) + 8 * (8 * 2 * 7 * 8 * 2 + 7) * 4 + 9) + 7 + 3 + 9 * 5 7 + (5 * 7 * 4 + (9 + 7 + 2 + 5 * 4 * 2)) + 5 + 2 (9 + 7 * 8) + 7 * 3 * 3 3 * 6 + 9 * 6 + 8 + 5 2 * 3 * 8 + ((6 * 6 + 2) + 2) 5 + 3 + 7 + (2 + (4 + 6 * 7 + 7 + 5 * 7) + 4) 4 * 6 (7 * 6) * 6 * 4 + 2 6 + ((7 + 9 * 3 + 9) * 2 * (6 * 4 * 5) + 3) + 5 + 9 (5 + 3 + (4 + 8 * 6 + 6 + 6) + 4 * 6) * (3 + 6 * (9 + 3 + 6) + 7 + 4 * 3) * 3 * (2 + 5 * 2 + (6 + 5 + 8 * 9 * 8 * 3) + 2 * 6) (6 * 5 * 5 * 6) + 3 * (5 + 8) * (4 * 7 + 8 * 6 + (4 + 9 + 6) + 4) + 4 3 + (2 * 6 * 5 + 9 * 3) + 6 9 * (3 * 4 * 8 * 4 + 4) (9 * (2 + 9 * 6 + 5 * 6 + 2) * (8 * 4 * 6 * 8) * 5) * 7 + (7 + 5 + 3 + 5 + 7 * 9) + (6 * 7) + 8 (4 + 3 * 7) + (4 + 5 * 9 + (8 * 6 * 3) + (4 + 2 * 2) + 3) + 4 + 7 * (9 * 9 + 7 + (8 * 4) * 4 * 6) ((3 * 2 + 2) * 4 + 3 * 2 + 9) * 4 + 7 (7 + 2 + 6 + 7) + 8 + 4 + (3 + 3 + 9) + 8 (7 * 4 * (9 * 2 + 4 + 6)) + 6 * 5 + 9 * 7 7 + 3 + 3 + 8 + (4 * 5 * 9 * 9 * (8 * 4 + 9 + 2 + 4 * 8)) 4 * 3 + 7 * 7 + (4 * 8 * 9) + 2 (4 + 2 + 8 * 9 + 5) * 5 + 8 * (7 * 5 * 5) * 9 * 9 9 * (2 + 5 + (2 * 5 * 5) * 6 * 8 * (8 * 9 + 6 * 2 * 4)) + 5 + 3 4 + 2 + 2 + 3 + (7 * 5 + 3 + 7 + (6 + 6 * 7)) (9 * 3 + 6 + 5) + (3 + 3 + 5 + 9) 3 * ((8 * 6) * 7 * 4) + 9 + 6 + 9 2 + (6 * 7 + 7 + 6 * (7 * 6 * 2 + 4 + 6 + 7)) (4 * 6 * 7) * 6 * (3 * 7) + 2 (3 * 7) * ((9 * 3 * 5) + 9 + 8 + 4 + 4 * 7) + 3 + 8 + 6 + 2 (5 * 5) + (3 * 4 + 8 * (6 + 8 * 5 + 7 * 3)) + 8 * (9 * 2) (3 + (2 * 7 + 9) * 2) * 3 * 7 + 2 + 4 * 9 9 * 9 + 4 * 3 * (8 * 4 * (2 * 9 + 6 + 6 + 2)) 7 * ((5 * 7 + 5 * 4 + 9 + 9) * 4 + 4 + 6 + 2) + 6 * (7 * 5 + 5 + (2 + 9 * 4 + 7) + (8 * 7 + 4 + 5 + 5) + 2) * 7 (8 + (4 * 3 + 9 * 2 + 2 * 7) + 7 * 2 * 5) * 3 5 * 2 * 9 * (5 + 5) + (6 + 3) * (2 + 3 * (5 + 5 + 6) + (9 + 2 + 7) + 5) (9 * 8 + 6 + 9) * 8 * 8 (4 * 4 + 7 + 2) + 7 * 2 + (4 + 3) 2 * 8 + 5 + (2 + (3 * 9 * 8 * 8 + 2 * 5) * 8 * 2 * 2 + 4) + ((3 * 7 + 7 * 5 * 5 + 5) * 8 * 5 * 8 + 6) + 5 (2 * (9 + 3 + 8 * 8) + 6) * 3 * 9 * (7 + 7) * (2 * 2) * 5 ((6 * 3 + 3) * 6 + 5) + 3 + 4''' input = input.replace(' ', '') lines = input.split('\n') from operator import add, mul def eval_part1(line): acc = 0 op = add paren_depth = 0 paren_idx = 0 for i in range(len(line)): c = line[i] if c == '(': if paren_depth == 0: paren_idx = i paren_depth += 1 elif c == ')': paren_depth -= 1 if paren_depth == 0: acc = op(acc, eval_part1(line[paren_idx + 1:i])) elif paren_depth == 0: if c.isdigit(): acc = op(acc, int(c)) elif c == '+': op = add elif c == '*': op = mul return acc def part1(lines): s = 0 for line in lines: s += eval_part1(line) return s part1(lines) # Adds parens around + operations def preprocess_part2(line): chars = list(line) while '+' in chars: idx = chars.index('+') chars[idx] = 'p' if chars[idx - 1].isdigit(): chars.insert(idx - 1, '(') else: paren_depth = 1 i = idx - 2 while paren_depth > 0: if chars[i] == '(': paren_depth -= 1 elif chars[i] == ')': paren_depth += 1 i -= 1 chars.insert(i + 1, '(') if chars[idx + 2].isdigit(): chars.insert(idx + 3, ')') else: paren_depth = 1 i = idx + 3 while paren_depth > 0: if chars[i] == ')': paren_depth -= 1 elif chars[i] == '(': paren_depth += 1 i += 1 chars.insert(i, ')') return ''.join(chars).replace('p', '+') def part2(lines): s = 0 for line in lines: s += eval_part1(preprocess_part2(line)) return s part2(lines)Contour deformation In the context of GW method, contour deformation (CD) technique is used in conjunction with resolution of identity (RI) to reduce the formal scaling of the self-energy calculation. Compared to widely used analytic continuation approach it provides a means to evaluate self-energy directly on the real axis without employing Pade approximants or non-linear least squares fit and potentially offering superior accuracy. Here, we provide a brief outline of the theory behind CD and give an example of the self-energy calculation within CD without invoking RI in order to facilitate comparison with the results prsented above. Detailed discussion of the CD can be found in the following papers:1. ., ., ., & . (2018). Core-Level Binding Energies from GW : An Efficient Full-Frequency Approach within a Localized Basis. Journal of Chemical Theory and Computation, 14(9), 4856–4869. https://doi.org/10.1021/acs.jctc.8b004582. ., ., ., ., ., ., & . (2011). Electronic properties of interfaces and defects from many-body perturbation theory: Recent developments and applications. Physica Status Solidi (B), 248(2), 275–289. https://doi.org/10.1002/pssb.201046094 CD is used to recast the convolution in the GW expression of self-energy as a difference between two integrals, one which can be performed analytically whereas the other can be evaluated numerically on a relatively small grid. This is achieved by closing the inegration contour as shown below [2]:![Integration contour used to evaluate $\Sigma(\omega)$](CD_scheme.jpg)$$\Sigma(r_1,r_2, \omega) = \frac{i}{2\pi} \int_{-\infty}^{+\infty} e^{i\omega^{\prime} \eta} G(r_1, r_2, \omega + \omega^{\prime}) W(r_1, r_2, \omega^{\prime}) d\omega^{\prime}\\ = \frac{i}{2\pi} \oint_{\Gamma} G(r_1, r_2, \omega + z) W(r_1, r_2, z) dz - \frac{1}{2\pi} \int_{-\infty}^{+\infty} G(r_1, r_2, \omega + i\omega^{\prime}) W(r_1, r_2, i\omega^{\prime}) d\omega^{\prime}$$Depending on the $\omega$ value the lower-left and the upper-right loops of the contour can enclose one or several poles of the zero-order Green's function whereas the poles of the screened Coulomb interaction never fall within the contour. This allowes to evaluate the countour integral as a sum of corresponding residues with apropriate signs (note that the upper-right loop is traversed counter-clockwise, while the lower-left loop is traversed clockwise). The imaginary axis contribution is calculated using Gauss-Legendre grid. Importantly, the intgrals over the arches vanish iff the screened Coulomb interaction does not contain the exchange contribution.import psi4 import numpy as np import scipy as sp from matplotlib import pyplot as plt %matplotlib inline from IPython.core.display import display, HTML display(HTML("")) psi4.set_options({'basis' : 'def2-qzvp', 'd_convergence' : 1e-7,'scf_type' : 'out_of_core', 'dft_spherical_points' : 974, 'dft_radial_points' : 150 }) he = psi4.geometry("""He 0.0000 0.0000 0.0000 symmetry c1 units angstrom """) psi4.set_output_file('he_qzvp.out') scf_e, scf_wfn = psi4.energy('PBE', return_wfn=True) print("DFT energy is %16.10f" % scf_e) epsilon = np.asarray(scf_wfn.epsilon_a()) print(epsilon*psi4.constants.hartree2ev)DFT energy is -2.8928225474 [-15.74831893 7.47486413 22.72689859 22.72689859 22.72689859 51.49043714 95.11061037 95.11061037 95.11061037 95.11061037 95.11061037 116.71002742 116.71002742 116.71002742 256.25880273 300.00083801 300.00083801 300.00083801 300.00083801 300.00083801 300.00083801 300.00083801 420.64172788 420.64172788 420.64172788 420.64172788 420.64172788 461.58835865 461.58835865 461.58835865]``` SCF Total Energy (Ha): -2.8928225473 (MOLGW) ```import GW import time start = time.time() # Analytic calculation for reference purposes gw_par = {'no_qp' : 1, 'nv_qp' : 1, 'nomega_sigma' : 501, 'step_sigma' : 0.01, 'gl_npoint' : 200, 'analytic_W' : True, 'low_mem' : True } gw_he_test = GW.GW_DFT(scf_wfn, he, gw_par) gw_he_test.print_summary() end = time.time() print("GW calculation took %10.2f s" % (end - start))Number of basis functions: 30 occ/virt: 1/29 Attempting to create RI basis set for DEF2-QZVP (RIFIT)... Auxiliary basis set has been generated! Number of auxiliary basis functions: 47 Fraction of HF exchange is 0.000 Running in production mode! Shape of the omega_grid_all is (2, 501) Caculating GW self-energy via contour deformation Analytic W has been requested; performing RPA calculation Shape of omega tensor is (30, 30, 29) Calculation of the integral term requires 0.007 Gb Calculation of the residue term requires 0.018 Gb Using low-memory algorithm Finished calculating self-energy Performing one-shot G0W0 SigX - Vxc [-0.34438168 0.13933937] Perfoming graphic solution of the inverse Dyson equation Done! E^lin, eV E^graph, eV Z -23.672423 -23.476796 0.913907 11.007629 11.006144 0.989647 GW calculation took 0.29 s``` GW eigenvalues (eV) E0 SigX-Vxc SigC Z E_qp^lin E_qp^graph 1 -15.748327 -9.371124 0.703643 0.913908 -23.669607 -23.477018 2 7.474860 3.791494 -0.221863 0.989658 11.007575 11.006253```start = time.time() # Analytic calculation for reference purposes gw_par = {'no_qp' : 1, 'nv_qp' : 1, 'nomega_sigma' : 501, 'step_sigma' : 0.01, 'gl_npoint' : 200, 'analytic_W' : True, 'low_mem' : True, 'evgw_iter' : 5 } gw_he_test_ev = GW.GW_DFT(scf_wfn, he, gw_par) gw_he_test_ev.print_summary() end = time.time() print("GW calculation took %10.2f s" % (end - start)) # What if I request more qp-s and converge their energies together? start = time.time() # Analytic calculation for reference purposes gw_par = {'no_qp' : 1, 'nv_qp' : 29, 'nomega_sigma' : 501, 'step_sigma' : 0.01, 'gl_npoint' : 200, 'analytic_W' : True, 'low_mem' : True, 'evgw_iter' : 10 } gw_he_test1_ev = GW.GW_DFT(scf_wfn, he, gw_par) gw_he_test1_ev.print_summary() end = time.time() print("GW calculation took %10.2f s" % (end - start))Number of basis functions: 30 occ/virt: 1/29 Attempting to create RI basis set for DEF2-QZVP (RIFIT)... Auxiliary basis set has been generated! Number of auxiliary basis functions: 47 Fraction of HF exchange is 0.000 Running in production mode! Shape of the omega_grid_all is (30, 501) Caculating GW self-energy via contour deformation Analytic W has been requested; performing RPA calculation Shape of omega tensor is (30, 30, 29) Calculation of the integral term requires 0.007 Gb Calculation of the residue term requires 0.018 Gb Using low-memory algorithm Finished calculating self-energy Performing one-shot G0W0 SigX - Vxc [-0.34438168 0.13933937 0.25006238 0.25006238 0.25006238 0.3270321 0.38471001 0.38471001 0.38471001 0.38471001 0.38471001 0.47563179 0.47563179 0.47563179 0.78111999 0.55286242 0.55286242 0.55286242 0.55286242 0.55286242 0.55286242 0.55286242 0.72129897 0.72129897 0.72129897 0.72129897 0.72129897 0.83403239 0.83403239 0.834[...]ProdMX : Rapid Query and Analysis of Protein Functional Domain based on Compressed Sparse Matries GitHub: https://github.com/visanuwan/prodmx Example 2 : Identifying and extracting of toxin genes from *C. difficile* and *C. botulinum* genomes 1. Installation This section covers the basic installation of ProdMX from source code in our GitHub repository.User can initiate the installation process from the pip installer with the path to the downloaded folder of ProdMx source code.%%bash python -m pip install /path/to/prodmx2. Analyses using ProdMX The gene cluster of toxin genes in *C. difficile* and *C. botulinum* 2.1 Exotoxins from *C. difficile* *Clostridioides difficile* or formerly known as *Clostridium difficile* is a group of bacteria that cause severe damage to the colon with diarrhea symptoms. This gram-positive species is one of the most common bacteria found in healthcare-associated infections (HAIs) in the United States. The exotoxin gene cluster in *C. difficile* organize by two toxin genes *TcdA* and *TcdB*. These two exotoxin genes are regulated by the alternative RNA polymerase sigma factor *TcdR*.To demonstrate the potential use case for screening, the ProdMX were employed to the identify the pathogenicity of unknown genome sequences from Clostridiales order.The data used in the demonstration can be found in a `test/example_2` folder in the ProdMX repository.import os prodmx_test_folder = "/path/to/prodmx/test/example_2" os.chdir(prodmx_test_folder)Like the example 1, we need to go through the steps of creating a compress sparse matrix of the HMMER results for all genomes of interest. However, for the protein analysis, we need to construct the matrix of domain architectures since the order of functional domains within a protein can result in different gene function. The code to generate the compressed sparse matrix of domain architectures is as follows.%%bash prodmx-buildArchitecture -i input_clostridiales_id_hmm.tsv -o architecture_matrix_fol -kTo count all virulence factors, the following codes were used to load the non-binary compressed sparse matrix of domain architectures to the python environment.import prodmx import pandas as pd count_matrix = prodmx.loadMatrix(matrix_fol='architecture_matrix_fol')To check for exotoxin genes, we retrieved the domain architectures from [UniProt](https://www.uniprot.org/) for *TcdA* and *TcdB* protein. The dictionary between protein name and domain architectures were created as follows.dict_tox = {'tcdB':'PF12918_PF12919_PF11713_PF12920_PF19127_PF19127', 'tcdA':'PF12918_PF12919_PF11713_PF12920_PF19127_PF19127_PF19127_PF19127_PF19127'}Using Pandas DataFame, we can create the data table for the in silico-screening of exotoxin in the unknown genomes by the code following:list_result = [] for genome_id in count_matrix.getRow(): x = count_matrix.sumColumn(list_row=[genome_id], list_col=[dict_tox.get('tcdB'), dict_tox.get('tcdA')])['col_sum'].tolist() list_result.append([genome_id]+x) header=['genome_id', 'tcdB', 'tcdA'] pd.DataFrame(list_result, columns=header)The table of genome and protein ids associating with exotoxins can be retrieved as follows:count_matrix.getProteinId(list_row=count_matrix.getRow(), list_col=[dict_tox.get('tcdB'), dict_tox.get('tcdA')], output='clostridiales_exotoxin_protein_id.txt')2.2 Neurotoxins from *C. botulinum* The botulinum neurotoxins (BoNTs) produced by the strains of *Clostridium botulinum* can cause the disease botulism which is a potentially fatal disease in human. This neurotoxin gene cluster in *C. botulinum* comprise of *ntnh* and *bont* genes with the alternative sigma factor *botR* to regulate the expression. Referring to the steps in the previous example of exotoxins, we can use the same domain architecture matrix to retrieve the potential genomes and protein ids that might associate with botulinum neurotoxins by following code:dict_tox = {'bont': 'PF01742_PF07952_PF07953_PF07951'} count_matrix.getProteinId(list_row=count_matrix.getRow(), list_col=[ dict_tox.get('bont'), output='clostridiales_neurotoxin_protein_id.txt')Exploring grain settling with Python Grain settling is one of the most important problems in sedimentology (and therefore sedimentary geology), as neither sediment transport nor deposition can be understood and modeled without knowing what is the settling velocity of a particle of a certain grain size. Very small grains, when submerged in water, have a mass small enough that they reach a terminal velocity before any turbulence develops. This is true for clay- and silt-sized particles settling in water, and for these grain size classes Stokes' Law can be used to calculate the settling velocity:$$w = \frac{RgD^2}{C_1\nu}$$where $R$ = specific submerged gravity, $g$ = gravitational acceleration, $D$ is the particle diameter, $C_1$ is a constant with a theoretical value of 18, and $\nu$ is the kinematic viscosity.For grain sizes coarser than silt, a category that clearly includes a lot of sediment and rock types of great interest to geologists, things get more complicated. The reason for this is the development of a separation wake behind the falling grain; the appearance of this wake results in turbulence and large pressure differences between the front and back of the particle. For large grains - pebbles, cobbles - this effect is so strong that viscous forces become insignificant and turbulent drag dominates; the settling velocity can be estimated using the empirical equation$$w = \sqrt{\frac{4RgD}{3C_2}}$$The important point is that, for larger grains, the settling velocity increases more slowly, with the square root of the grain size, as opposed to the square of particle diameter, as in Stokes' Law.Sand grains are small enough that viscous forces still play an important role in their subaqueous settling behavior, but large enough that the departure from Stokes' Law is significant and wake turbulence cannot be ignored. There are several empirical - and fairly complicated - equations that try to bridge this gap; here I focus on the simplest one, published in 2004 in the Journal of Sedimentary Research (Ferguson and Church, 2004): $$w = \frac{RgD^2}{C_1\nu+\sqrt{0.75C_2RgD^3}}$$At small values of D, the left term in the denominator is much larger than the one containing the third power of D, and the equation is equivalent of Stokes' Law. At large values of D, the second term dominates and the settling velocity converges to the solution of the turbulent drag equation.But the point of this blog post is not to give a summary of the Ferguson and Church paper; what I am interested in is to write some simple code and plot settling velocity against grain size to better understand these relationships through exploring them graphically. So what follows is a series of Python code snippets, directly followed by the plots that you can generate if you run the code yourself. I have done this using the IPython notebook, a very nice tool that allows and promotes note taking, coding, and plotting within one document. I am not going to get into details of Python programming and the usage of IPython notebook, but you can check them out for example here.First we have to implement the three equations as Python functions:from math import pi import numpy as np import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline %config InlineBackend.figure_format = 'svg' rop = 2650.0 # density of particle in kg/m3 rof = 1000.0 # density of fluid in kg/m3 visc = 1.002*1E-3 #8.9*1E-4 # dynamic viscosity in Pa*s (N*s/m^2) C1 = 18 # constant in Ferguson-Church equation C2 = 1 # constant in Ferguson-Church equation, valid for natural sand grains def v_stokes(rop,rof,d,visc,C1): R = (rop-rof)/rof # submerged specific gravity w = R*9.81*(d**2)/(C1*visc/rof) return w def v_turbulent(rop,rof,d,visc,C2): R = (rop-rof)/rof w = (4*R*9.81*d/(3*C2))**0.5 return w def v_ferg(rop,rof,d,visc,C1,C2): R = (rop-rof)/rof w = (R*9.81*d**2)/(C1*visc/rof+(0.75*C2*R*9.81*d**3)**0.5) return wLet's plot these equations for a range of particle diameters:d = np.arange(0,0.0005,0.000001) ws = v_stokes(rop,rof,d,visc,C1) wt = v_turbulent(rop,rof,d,visc,C2) wf = v_ferg(rop,rof,d,visc,C1,C2) plt.figure(figsize=(8,6)) sns.set_style("white") sns.set_style("ticks") plt.plot(d*1000,ws,label='Stokes',linewidth=3) plt.plot(d*1000,wt,label='Turbulent',linewidth=3) plt.plot(d*1000,wf,label='Ferguson-Church',linewidth=3) plt.plot([0.25, 0.25],[0, 0.15],'k--',linewidth=0.5) plt.plot([0.25/2.0, 0.25/2.0],[0, 0.15],'k--',linewidth=0.5) plt.plot([0.25/4.0, 0.25/4.0],[0, 0.15],'k--',linewidth=0.5) plt.text(0.36, 0.11, 'medium sand', fontsize=10) plt.text(0.16, 0.11, 'fine sand', fontsize=10) plt.text(0.075, 0.11, 'v. fine', fontsize=10) plt.text(0.08, 0.105, 'sand', fontsize=10) plt.text(0.01, 0.11, 'silt and', fontsize=10) plt.text(0.019, 0.105, 'clay', fontsize=10) plt.legend(loc=4, fontsize=10) plt.xlabel('grain diameter (mm)',fontsize=12) plt.ylabel('settling velocity (m/s)',fontsize=12) plt.axis([0,0.5,0,0.15]) D = [0.068, 0.081, 0.096, 0.115, 0.136, 0.273, 0.386, 0.55, 0.77, 1.09, 2.18, 4.36] w = [0.00425, 0.0060, 0.0075, 0.0110, 0.0139, 0.0388, 0.0551, 0.0729, 0.0930, 0.141, 0.209, 0.307] err = [0.00009, 0.0001, 0.0001, 0.0002, 0.0001, 0.0002, 0.0005, 0.0010, 0.0016, 0.002, 0.002, 0.003] plt.plot(D,w,'ko',markersize=8);The black dots are data points from settling experiments performed with natural river sands (Table 2 in Ferguson and Church, 2004). It is obvious that the departure from Stokes' Law is already significant for very fine sand and Stokes settling is completely inadequate for describing the settling of medium sand.This plot only captures particle sizes finer than medium sand; let's see what happens as we move to coarser sediment.d = np.arange(0,0.01,0.00001) ws = v_stokes(rop,rof,d,visc,C1) wt = v_turbulent(rop,rof,d,visc,C2) wf = v_ferg(rop,rof,d,visc,C1,C2) plt.figure(figsize=(8,6)) plt.plot(d*1000,ws,label='Stokes',linewidth=3) plt.plot(d*1000,wt,label='Turbulent',linewidth=3) plt.plot(d*1000,wf,label='Ferguson-Church',linewidth=3) plt.plot([0.25, 0.25],[0, 2],'k--',linewidth=0.5) plt.plot([0.5, 0.5],[0, 2],'k--',linewidth=0.5) plt.text(0.33, 1.0, 'medium sand', fontsize=10, rotation='vertical') plt.text(0.09, 1.15, 'fine sand, silt and clay', fontsize=10, rotation='vertical') plt.plot([1, 1],[0, 2],'k--',linewidth=0.5) plt.text(0.7, 1.0, 'coarse sand', fontsize=10, rotation='vertical') plt.plot([2, 2],[0, 2],'k--',linewidth=0.5) plt.text(1.5, 1.0, 'very coarse sand', fontsize=10, rotation='vertical') plt.plot([4, 4],[0, 2],'k--',linewidth=0.5) plt.text(3, 1.0, 'granules', fontsize=10, rotation='vertical') plt.text(4.5, 1.0, 'pebbles', fontsize=10, rotation='vertical') plt.legend(loc=1, fontsize=10) plt.xlabel('grain diameter (mm)',fontsize=12) plt.ylabel('settling velocity (m/s)',fontsize=12) plt.axis([0,5,0,2]);A log-log plot is much better for looking at a wide spectrum of grain sizes.d = np.arange(0,0.01,0.00001) ws = v_stokes(rop,rof,d,visc,C1) wt = v_turbulent(rop,rof,d,visc,C2) wf = v_ferg(rop,rof,d,visc,C1,C2) plt.figure(figsize=(8,6)) plt.loglog(d*1000,ws,label='Stokes',linewidth=3) plt.loglog(d*1000,wt,label='Turbulent',linewidth=3) plt.loglog(d*1000,wf,label='Ferguson-Church',linewidth=3) plt.plot([1.0/64, 1.0/64],[0.00001, 10],'k--',linewidth=0.5) plt.text(0.012, 0.0007, 'fine silt', fontsize=10, rotation='vertical') plt.plot([1.0/32, 1.0/32],[0.00001, 10],'k--',linewidth=0.5) plt.text(0.17/8, 0.0007, 'medium silt', fontsize=10, rotation='vertical') plt.plot([1.0/16, 1.0/16],[0.00001, 10],'k--',linewidth=0.5) plt.text(0.17/4, 0.0007, 'coarse silt', fontsize=10, rotation='vertical') plt.plot([1.0/8, 1.0/8],[0.00001, 10],'k--',linewidth=0.5) plt.text(0.17/2, 0.001, 'very fine sand', fontsize=10, rotation='vertical') plt.plot([0.25, 0.25],[0.00001, 10],'k--',linewidth=0.5) plt.text(0.17, 0.001, 'fine sand', fontsize=10, rotation='vertical') plt.plot([0.5, 0.5],[0.00001, 10],'k--',linewidth=0.5) plt.text(0.33, 0.001, 'medium sand', fontsize=10, rotation='vertical') plt.plot([1, 1],[0.00001, 10],'k--',linewidth=0.5) plt.text(0.7, 0.001, 'coarse sand', fontsize=10, rotation='vertical') plt.plot([2, 2],[0.00001, 10],'k--',linewidth=0.5) plt.text(1.3, 0.001, 'very coarse sand', fontsize=10, rotation='vertical') plt.plot([4, 4],[0.00001, 10],'k--',linewidth=0.5) plt.text(2.7, 0.001, 'granules', fontsize=10, rotation='vertical') plt.text(6, 0.001, 'pebbles', fontsize=10, rotation='vertical') plt.legend(loc=2, fontsize=10) plt.xlabel('grain diameter (mm)', fontsize=12) plt.ylabel('settling velocity (m/s)', fontsize=12) plt.axis([0,10,0,10]) plt.plot(D,w,'o',markerfacecolor=[0.0, 0.0, 0.0], markersize=8);This plot shows how neither Stokes' Law, nor the velocity based on turbulent drag are valid for calculating settling velocities of sand-size grains in water, whereas the Ferguson-Church equation provides a good fit for natural river sand.Grain settling is a special case of the more general problem of flow past a sphere. The analysis and plots above are all dimensional, that is, you can quickly check by looking at the plots what is the approximate settling velocity of very fine sand. That is great, but you would have to generate a new plot - and potentially do a new experiment - if you wanted to look at the behavior of particles in some other fluid than water. A more general treatment of the problem involves dimensionless variables; in this case these variables are the Reynolds number and the drag coefficient. The classic diagram for flow past a sphere is a plot of the drag coefficient against the Reynolds number. I will try to reproduce this plot, using settling velocities that come from the three equations above.At terminal settling velocity, the drag force equals the gravitational force acting on the grain:$$F_d = F_g$$We also know that the gravitational force is given by the submerged weight of the grain:$$F_g = (\rho_p-\rho_f)\frac{4}{3}\pi r^3g$$The drag coefficient is essentially a dimensionless version of the drag force:$$C_d = \frac{F_d}{\frac{\rho_fw^2}{2}A} =\frac{F_d}{\rho_fw^2\pi\frac{d^2}{8}}$$At terminal settling velocity, the particle Reynolds number is$$Re = \frac{\rho_fwd}{\mu}$$Using these relationships it is possible to generate the plot of drag coefficient vs. Reynolds number:d = np.arange(0.000001,0.3,0.00001) C2 = 0.4 # this constant is 0.4 for spheres, 1 for natural grains ws = v_stokes(rop,rof,d,visc,C1) wt = v_turbulent(rop,rof,d,visc,C2) wf = v_ferg(rop,rof,d,visc,C1,C2) Fd = (rop-rof)*4/3*pi*((d/2)**3)*9.81 # drag force Cds = Fd/(rof*ws**2*pi*(d**2)/8) # drag coefficient Cdt = Fd/(rof*wt**2*pi*(d**2)/8) Cdf = Fd/(rof*wf**2*pi*(d**2)/8) Res = rof*ws*d/visc # particle Reynolds number Ret = rof*wt*d/visc Ref = rof*wf*d/visc plt.figure(figsize=(8,6)) plt.loglog(Res,Cds,linewidth=3, label='Stokes') plt.loglog(Ret,Cdt,linewidth=3, label='Turbulent') plt.loglog(Ref,Cdf,linewidth=3, label='Ferguson-Church') # data digitized from Southard textbook, figure 2-2: Re_exp = [0.04857,0.10055,0.12383,0.15332,0.25681,0.3343,0.62599,0.77049,0.94788,1.05956, 1.62605,2.13654,2.55138,3.18268,4.46959,4.92143,8.02479,12.28672,14.97393,21.33792, 28.3517,34.55246,57.57204,78.3929,96.88149,159.92596,227.64082,287.31738,375.98547, 516.14355,607.03827,695.8316,861.51953,1147.26099,1194.43213,1513.70166,1939.70557, 2511.91235,2461.13232,3106.32397,3845.99561,4974.59424,6471.96875,8135.45166,8910.81543, 11949.91309,17118.62109,21620.08203,28407.60352,36064.10156,46949.58594,62746.32422, 80926.54688,97655.00781,122041.875,157301.8125,206817.7188,266273,346423.5938,302216.5938, 335862.5313,346202,391121.5938,460256.375,575194.4375,729407.625] Cd_exp = [479.30811,247.18175,199.24072,170.60068,112.62481,80.21341,45.37168,39.89885,34.56996, 28.01445,18.88166,13.80322,12.9089,11.41266,8.35254,7.08445,5.59686,3.92277,3.53845, 2.75253,2.48307,1.99905,1.49187,1.27743,1.1592,0.89056,0.7368,0.75983,0.64756,0.56107, 0.61246,0.5939,0.49308,0.39722,0.48327,0.46639,0.42725,0.37951,0.43157,0.43157,0.40364, 0.3854,0.40577,0.41649,0.46173,0.41013,0.42295,0.43854,0.44086,0.4714,0.45225,0.47362, 0.45682,0.49104,0.46639,0.42725,0.42725,0.40171,0.31214,0.32189,0.20053,0.16249,0.10658, 0.09175,0.09417,0.10601] plt.loglog(Re_exp, Cd_exp, 'o', markerfacecolor = [0.0, 0.0, 0.0], markersize=6) # Reynolds number for golf ball: rof_air = 1.2041 # density of air at 20 degrees C u = 50 # velocity of golf ball (m/s) d = 0.043 # diameter of golf ball (m) visc_air = 1.983e-5 # dynamic viscosity of air at 20 degrees C Re = rof_air*u*d/visc_air plt.loglog([Re, Re], [0.4, 2], 'k--') plt.text(3e4,2.5,'$Re$ for golf ball',fontsize=13) plt.legend(loc=1, fontsize=10) plt.axis([1e-2,1e6,1e-2,1e4]) plt.xlabel('particle Reynolds number ($Re$)', fontsize=12) plt.ylabel('drag coefficient ($C_d$)', fontsize=12);Temperature of Cygnus X1 is 31,000 K Assumed a base temperaturesize = 500 #size for generating image has nothing to do with Cygnus X1 temperature = 31000 base_temperature = 6500 c = 299792458. * 10**9 h = 6.62607004 * 10**-16 kB = 1.38064852 * 10**-5 λ, x, y, z = np.loadtxt('lin2012xyz2e_fine_7sf.csv', delimiter=',').T plt.plot(λ, x) plt.plot(λ, y) plt.plot(λ, z) xyz2rgb = np.array([ [0.41847, -0.15866, -0.082835], [-0.091169, 0.25243, 0.015708], [0.00092090, -0.0025498, 0.17860] ]) r, g, b = xyz2rgb @ np.array([x, y, z]) plt.plot(λ, r) plt.plot(λ, g) plt.plot(λ, b) def planck(T, λ): return 2.0 * h * c**2 / λ**5 / (np.exp(h * c / (λ * kB * T)) - 1.0) λ_test = np.linspace(1, 2000, 100) p_test = planck(5000., λ_test) plt.plot(λ_test, p_test) T = np.linspace(1500, 10000, 300) I = np.array([planck(t, λ) for t in T]) R = integrate.simps(I * r, λ) G = integrate.simps(I * g, λ) B = integrate.simps(I * b, λ) img = np.array([R, G, B]).T img = np.array([img / np.max(img, axis=1).reshape(-1,1) for i in range(100)]) img[np.isnan(img)] = 0 img[img < 0] = 0 img[img > 1] = 1 plt.imshow(img) def rel_planck(base_T, base_λ, T, λ): return (planck(T, λ) / planck(base_T, λ)) / (planck(T, base_λ) / planck(base_T, base_λ)) plt.plot(λ, rel_planck(6500., 525., 2900., λ)) I = np.array([rel_planck(6500., 525., t, λ) for t in T]) R = integrate.simps(I * r, λ) G = integrate.simps(I * g, λ) B = integrate.simps(I * b, λ) img = np.array([R, G, B]).T img = np.array([img / np.max(img, axis=1).reshape(-1,1) for i in range(100)]) img[np.isnan(img)] = 0 img[img < 0] = 0 img[img > 1] = 1 plt.imshow(img) coef = h * c / kB def opt_planck(T, λ): return 1.0 / λ**5 / (np.exp(coef / λ / T) - 1.0) def opt_rel_planck(base_T, base_λ, T, λ): return opt_planck(T, λ) / opt_planck(base_T, λ) a3_r = np.linspace(0, 1, size) T = temperature * a3_r**(3/4) a_R = np.linspace(0, 1, size) rate = np.sqrt(1.0 - a_R.reshape(-1,1)) / np.sqrt(1.0 - a3_r / 3.0) rgb = np.array([r, g, b]).T img = np.empty((size, size, 3), dtype=np.float64) for i in range(size): I = opt_planck(T.reshape(-1,1), rate[i].reshape(-1,1) * λ) / opt_planck(base_temperature, λ) img[i,:,:] = integrate.simps(I.reshape(size,-1,1) * rgb.reshape(1,-1,3), λ, axis=1).reshape(-1,3) img = img / np.max(img, axis=2).reshape(size,size,1) img[np.isnan(img)] = 0 img[img < 0] = 0/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:4: RuntimeWarning: divide by zero encountered in true_divide after removing the cwd from sys.path. /usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:4: RuntimeWarning: overflow encountered in exp after removing the cwd from sys.path. /usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:4: RuntimeWarning: invalid value encountered in true_divide after removing the cwd from sys.path. /usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:7: RuntimeWarning: invalid value encountered in true_divide import sysRedshift for Cygnus X1plt.imshow(img) Image.fromarray(np.round(img * 255).astype(np.uint8)).save('../Textures/T{}.png'.format(temperature))Exploration of Collider Bias with discrete Bayes Nets using `pomegranate` Collider bias[Collider bias](https://en.wikipedia.org/wiki/Collider_(statistics) ) can be seen as a form of selection bias. In a causal network, if we have the structure `A -> Collider <- B`, then "conditioning on the collider opens the path between A and B [which can] introduce bias when estimating the causal association between A and B, potentially introducing associations where there are none." The `pomegranate` packageHere we use the Python package [pomegranate](https://pomegranate.readthedocs.io/en/latest/) (GitHub repo [here](https://github.com/jmschrei/pomegranate)). It allows us to specify a Bayesian Net structure, along with the conditional probability distributions. Amongst other things, we can query the net, conditional upon certain observations.import pomegranate as pgExample 2: Does collider bias make it look like nicotine is protective of COVID![](img/bayes_net_1.png)Here we have a prior probability of 25% of being a smoker. **Independently**, there is a 10% prior probability of having Covid-19. The vector `[0.9, 0.1, 0.9, 0.01]` specifies the probability of being in hospital as a function of smoking and covid status:- smoking, covid. High probability of being in hospital.- smoking, ¬covid. Moderate probability of being in hospital- ¬smoking, covid. High probability of being in hospital- ¬smoking, ¬covid. Low probability of being in hospitalOne of the key points here is that of people who do not have covid, smokers are expected to have a higher probability of being in hospital than non-smokers.smokeD = pg.DiscreteDistribution({'yes': 0.25, 'no': 0.75}) covidD = pg.DiscreteDistribution({'yes': 0.1, 'no': 0.9}) hospitalD = pg.ConditionalProbabilityTable( [['yes', 'yes', 'yes', 0.9], ['yes', 'yes', 'no', 0.1], ['yes', 'no', 'yes', 0.1], ['yes', 'no', 'no', 0.9], ['no', 'yes', 'yes', 0.9], ['no', 'yes', 'no', 0.1], ['no', 'no', 'yes', 0.01], ['no', 'no', 'no', 0.99]], [smokeD, covidD]) smoke = pg.Node(smokeD, name="smokeD") covid = pg.Node(covidD, name="covidD") hospital = pg.Node(hospitalD, name="hospitalD") model = pg.BayesianNetwork("Covid Collider") model.add_states(smoke, covid, hospital) model.add_edge(smoke, hospital) model.add_edge(covid, hospital) model.bake()Sanity checkProbability you have covid depending on if you are a smoker or non-smoker. These should be equal as our Bayes Net specifies that smoking and covid are independent.beliefs = model.predict_proba({'smokeD': 'yes'}) model.plot() beliefs = map(str, beliefs) print("\n".join("{}\t{}".format(state.name, belief) for state, belief, in zip(model.states, beliefs) ))smokeD yes covidD { "class" :"Distribution", "dtype" :"str", "name" :"DiscreteDistribution", "parameters" :[ { "yes" :0.10000000000000035, "no" :0.8999999999999996 } ], "frozen" :false } hospitalD { "class" :"Distribution", "dtype" :"str", "name" :"DiscreteDistribution", "parameters" :[ { "no" :0.8199999999999997, "yes" :0.18000000000000022 } ], "frozen" :false }Calculate P(covid|smoking, hospital) and P(covid|¬smoking, hospital)If you condition upon being in hospital (ie only observer people who are in hospital), then it might be that we see different probabilities of having covid based on whether you smoke or not.model.predict_proba({'smokeD': 'yes', 'hospitalD': 'yes'}) model.predict_proba({'smokeD': 'no', 'hospitalD': 'yes'})Physical unitsAll variables in scipp have a physical unit.Variables are used for coordinates, labels, data, as well as attributes, i.e., all of these have a unit.When not specified explicitly the unit of a variable defaults to `scipp.units.dimensionless` (`scipp.units.one` is a shorter alias for this), i.e., the variable is considered dimensionless.`scipp.units` provides a number of pre-defined elementary units as well as operations between units.This can be used to create units that do not have a pre-defined identifier:import scipp as sc length = sc.units.m area = length * length area volume = length * length * length volume speed = length / sc.units.s speedYou can get a list of all predefined units (accessible through `sc.units`) with[unit for unit in dir(sc.units) if not unit.startswith('_')]Units can also be created directly from strings:sc.Unit('m/s') sc.Unit('counts')To convert data between compatible units, such as `m` to `mm`, see [sc.to_unit](../generated/scipp.to_unit.rstscipp.to_unit).sc.to_unit(1 * sc.Unit('m'), 'mm')Supported Units Base UnitsAll SI base units are supported with the following names:Name | Unit---|---'m' | meter's' | second'kg' | kilogram'K' | kelvin'A' | ampere'mol' | mole'cd' | candelasc.Unit('K')In addition, these base units are supported for cases not covered by SI:name | Unit---|---'rad' | radian'count' | single object counting Derived unitsA great number of derived units can also be specified as arguments to `sc.Unit`.Some examples areName | Unit---|---'Hz' | hertz'J' | joule'V' | volt'W' | watt'angstrom' / 'Å' | ångström'eV' | electron volt'L' | liter'min' | minute'D' / 'day' | dayUnits can be modified with SI prefixes, for instanceprint(sc.Unit('mm'), sc.Unit('microsecond'), sc.Unit('micro s'), sc.Unit('MJ'))You can also specify exponents for units or exponentiate them explicitly:print(sc.Unit('m^2'), sc.Unit('m**2'), sc.Unit('m')**2)MAC0459/MAC05865 - Data Science and Engineering Name: USP: 9790930 Decision trees exercises--- **Q1. What is the approximate depth of a Decision Tree trained (without restrictions) on a training set with 10 million instances?** Since most decision tree algorithms consider binary decision trees, we could say the approximate depth is $\lceil \log_{2} 10000000\rceil = 24$ **Q2. Is a node’s Gini impurity generally lower or greater than its parent? Is it generally lower (greater), or always lower (greater)?** Let us show that the Gini impurity is generally lower than its parent. The Gini impurity is defined as:$$ i(N) = \sum\limits_{i \neq j} P(W_{i}|N)P(w_{j}|N) = \frac{1}{2} \left[1 - \sum\limits_{j}P^2(W_{j}|N)\right] $$Define:$P(W|N) = \frac{w}{n}$ where $w = \W$ and $n = \N$Define $N_{p}$ and $N_{c}$ as the parent node and the child node respectively, $d_{i} = w_{i} - u_{i}$, $d = \sum\limits_{i} d_{i}$ and $d > 0, n > 1$.$$i(N_{p}) = \frac{1}{2} \left[1 - \sum\limits_{j}\left(\frac{w_{j}}{n}\right)^2\right] = \frac{1}{2} \left[1 - \frac{\sum\limits_{j} w_{j}^2}{n^2}\right]$$$$i(N_{c}) = \frac{1}{2} \left[1 - \sum\limits_{j}\left(\frac{u_{j}}{n - d}\right)^2\right] = \frac{1}{2} \left[1 - \sum\limits_{j}\left(\frac{w_{j} - d_{j}}{n - d}\right)^2\right] = \frac{1}{2} \left[1 - \frac{ \sum\limits_{j}w_{j}^2 - 2\sum\limits_{j}w_{j}d_{j} + \sum\limits_{j}d_{j}^2}{n^2 - 2nd + d^2}\right] = \frac{1}{2} \left[1 - \frac{ \sum\limits_{j}w_{j}^2 - 2nd + \sum\limits_{j}d_{j}^2}{n^2 - 2nd + d^2}\right]$$Now, we need to show that $i(N_{p}) - i(N_{c}) > 0$:$$i(N_{p}) - i(N_{c}) = \frac{1}{2} \left[1 - \frac{\sum\limits_{j} w_{j}^2}{n^2}\right] - \frac{1}{2} \left[1 - \frac{ \sum\limits_{j}w_{j}^2 - 2nd + \sum\limits_{j}d_{j}^2}{n^2 - 2nd + d^2}\right] = \frac{1}{2} \left[1 - \frac{\sum\limits_{j} w_{j}^2}{n^2} - \left(1 - \frac{ \sum\limits_{j}w_{j}^2 - 2nd + \sum\limits_{j}d_{j}^2}{n^2 - 2nd + d^2}\right) \right] = \frac{1}{2} \left[-\frac{\sum\limits_{j} w_{j}^2}{n^2} + \frac{ \sum\limits_{j}w_{j}^2 - 2nd + \sum\limits_{j}d_{j}^2}{n^2 - 2nd + d^2}\right]$$Thus, we must show that $\frac{ \sum\limits_{j}w_{j}^2 - 2nd + \sum\limits_{j}d_{j}^2}{n^2 - 2nd + d^2} - \frac{\sum\limits_{j} w_{j}^2}{n^2} > 0$$$\frac{ \sum\limits_{j}w_{j}^2 - 2nd + \sum\limits_{j}d_{j}^2}{n^2 - 2nd + d^2} - \frac{\sum\limits_{j} w_{j}^2}{n^2} = \left(\frac{1}{(n^2 - 2nd + d^2)n^2}\right)\left(n^2\sum\limits_{j}w_{j}^2 - 2n^3d + n^2\sum\limits_{j}d_{j}^2 - n^2\sum\limits_{j}w_{j}^2 + 2nd\sum\limits_{j}w_{j}^2 - d^2\sum\limits_{j}w_{j}^2\right) =\left(\frac{1}{(n^2 - 2nd + d^2)n^2}\right)\left(n^2\sum\limits_{j}d_{j}^2 + 2nd\sum\limits_{j}w_{j}^2- 2n^3d - d^2\sum\limits_{j}w_{j}^2\right) = \left(\frac{1}{(n^2 - 2nd + d^2)n^2}\right)\left(n^2\sum\limits_{j}d_{j}^2 + 2nd(\sum\limits_{j}w_{j}^2 - n^2) - d^2\sum\limits_{j}w_{j}^2\right)$$ **Q3. If a Decision Tree is overfitting the training set, is it a good idea to try decreasing maxdepth? Why?** Yes, when we decrease the maxdepth we are also reducing the information gain that the last depth would have gained. Hence this new decision tree is less specialized in the training set. **Q4. If a Decision Tree is underfitting the training set, is it a good idea to try scaling the input features? Why?** No, scaling the input features is good when our classifier relies on Eucliadean distance. The decision trees relies on information gain to make the splits and the information gain measurents make use of probabilities, not Eucliadean distance. **Q5. If it takes one hour to train a Decision Tree on a training set containing 10 million instances, roughly how much time will it take to train another Decision Tree on a similar training set containing 100 million instances?** The [Scikit-learn documentation of the decision trees](https://scikit-learn.org/stable/modules/tree.htmlcomplexity) states that the complexity of the training algorithm is $O(n_{features}n_{samples}^{2}\log(n_{samples}))$.The $n_{features}$ is the same for both trainings. By a simple cross- multiplication we can make the following estimation:$$\frac{n_{features}(10^7)^2\log(n_{10^7})}{n_{features}(10^8)^2\log(n_{10^7})} = \frac{60}{x} \implies \frac{10^{14} * 24}{10^{16} * 27} \approx \frac{60}{x} \implies x \approx \frac{10^{16} * 27 * 60}{10^{14} * 24} \implies x \approx \frac{100 * 27 * 60}{24}$$$$\implies x \approx \frac{100 * 27 * 60}{24} \implies x \approx \frac{162000}{24} \implies x \approx 6750$$Hence, it will take approximately 6750 minutes or 112 hours and 30 minutes. **Q6.** **Train and fine-tune a Decision Tree for the Cardiovascular Disease dataset. The dataset has very few information on acquisition protocol except that all of the dataset values were collected at the moment of medical examination. There are three types of input features:****–Objective: factual information;****–Examination: results of medical examination;****–Subjective: information given by the patient.****Besides that, features are classified according to the following values:****–1: normal,****–2: above normal,****–3: well above normal****Questions or tasks to be addressed:** **1) Present an EDA analysis of the dataset.** Let us import necessary data science packages, load, check and prepare the dataset if necessary.import pandas as pd from statistics import mean, median import seaborn as sns import numpy as np import matplotlib.pyplot as plt sns.set() # Loading CSV's print("Loading Dataset") cardio = pd.read_csv('http://linux.ime.usp.br/~guilhevieira/cardio_train.csv', error_bad_lines = False) print("Dataset loaded successfully") cardio.info() cardio.head() cardio.tail()Okay, everything is alright. How many records do we have and how many features for each record?cardio.shapeLet us generate some statistics that summarize each numerical feature, like mean, variance, min, median and and max values.* Age (days)pd.DataFrame(cardio['age'].describe())The age values are in days, which is hard for human readability because we are accustomed with years, let us see the statistics in years.age_in_years = pd.DataFrame(cardio['age'].copy()) age_in_years['age'] = age_in_years['age'].apply(lambda x: x/365) age_in_years.describe()* Height (cm)pd.DataFrame(cardio['height'].describe())* Weigth (kg)pd.DataFrame(cardio['weight'].describe())* Systolic blood pressurepd.DataFrame(cardio['ap_hi'].describe())* Diastolic blood pressurepd.DataFrame(cardio['ap_lo'].describe())Alright, there are some interesting things over there. **CONCLUSIONS**Let us move on and visualize how weel distributed the datasets are in relation to each attribute throw histograms for numerical features and bar plots for categorical values.sns.distplot(cardio['age']) sns.distplot(cardio['height']) sns.distplot(cardio['weight']) cardio['gender'].value_counts().plot(kind="bar") plt.ylabel('Quantity') plt.xlabel('Gender') plt.show() sns.distplot(cardio['ap_hi']) sns.distplot(cardio['ap_lo']) cardio['cholesterol'].value_counts().plot(kind="bar") plt.ylabel('Quantity') plt.xlabel('Cholesterol') plt.show() cardio['gluc'].value_counts().plot(kind="bar") plt.ylabel('Quantity') plt.xlabel('Glucose') plt.show() cardio['smoke'].value_counts().plot(kind="bar") plt.ylabel('Quantity') plt.xlabel('Smoking') plt.show() cardio['alco'].value_counts().plot(kind="bar") plt.ylabel('Quantity') plt.xlabel('Alcohol intake') plt.show() cardio['active'].value_counts().plot(kind="bar") plt.ylabel('Quantity') plt.xlabel('Physical activity') plt.show() cardio['cardio'].value_counts().plot(kind="bar") plt.ylabel('Quantity') plt.xlabel('Presence or absence of cardiovascular disease') plt.show()Write some **CONCLUSIONS** **2) Split it into a training set and a test set using ```train_test_split``` method from sklearn.**from sklearn.model_selection import train_test_split df_train, df_test = train_test_split(cardio, test_size=0.2, random_state=145) df_train.shape df_train.tail() df_test.shape df_train.tail()Code to Generate HAIM embeddings from MIMIC-IV-MM dataset Project Info ->Copyright 2020 (Last Update: June 07, 2022) -> Authors: (), (), (), (), (), (), (), (), (), (), (),```**Licensed under the Apache License, Version 2.0**You may not use this file except in compliance with the License. You may obtain a copy of the License athttps://www.apache.org/licenses/LICENSE-2.0Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.``` Requires ``` -> Previously generated pickle files from MIMIC-IV-MM Dataset```from MIMIC_IV_HAIM_API import * import gc # Full Core MIMIC-IV database path core_mimiciv_path = 'data/' df_haim_ids = pd.read_csv(core_mimiciv_path + 'pickle/haim_mimiciv_key_ids.csv') # General function that processes all data embeddings def process_cxr_embeddings_haim_id(haim_id, dt_patient, df_init): # DEMOGRAPHICS EMBEDDINGS EXTRACTION demo_embeddings = get_demographic_embeddings(dt_patient, verbose=0) gc.collect() #Clear memory # Time Series (TSFRESH-like) CHARTEVENT & LABEVENT EMBEDDINGS EXTRACTION aggregated_ts_ce_embeddings = get_ts_embeddings(dt_patient, event_type = 'chart') gc.collect() #Clear memory aggregated_ts_le_embeddings = get_ts_embeddings(dt_patient, event_type = 'lab') gc.collect() #Clear memory aggregated_ts_pe_embeddings = get_ts_embeddings(dt_patient, event_type = 'procedure') gc.collect() #Clear memory # CHEST XRAY VISION EMBEDDINGS EXTRACTION aggregated_densefeature_embeddings, _, aggregated_prediction_embeddings, _, _ = get_chest_xray_embeddings(dt_patient, verbose=0) gc.collect() #Clear memory # NOTES FROM ECGs aggregated_ecg_embeddings = get_notes_biobert_embeddings(patient, note_type = 'ecgnotes') gc.collect() #Clear memory # NOTES FROM ECOCARDIOGRAMs aggregated_echo_embeddings = get_notes_biobert_embeddings(patient, note_type = 'echonotes') gc.collect() #Clear memory # NOTES FROM RADIOLOGY aggregated_rad_embeddings = get_notes_biobert_embeddings(patient, note_type = 'radnotes') gc.collect() #Clear memory # CHEST XRAY VISION SINGLE-IMAGE EMBEDDINGS EXTRACTION print('getting xray') img = df_imcxr[idx] densefeature_embeddings, prediction_embeddings = get_single_chest_xray_embeddings(img) gc.collect() #Clear memory # Create Dataframes filteed by ordered sample number for Fusion df_haim_ids_fusion = pd.DataFrame([haim_id],columns=['haim_id']) df_demographics_embeddings_fusion = pd.DataFrame(demo_embeddings.reshape(1,-1), columns=['de_'+str(i) for i in range(demo_embeddings.shape[0])]) df_ts_ce_embeddings_fusion = pd.DataFrame(aggregated_ts_ce_embeddings.values.reshape(1,-1), columns=['ts_ce_'+str(i) for i in range(aggregated_ts_ce_embeddings.values.shape[0])]) df_ts_le_embeddings_fusion = pd.DataFrame(aggregated_ts_le_embeddings.values.reshape(1,-1), columns=['ts_le_'+str(i) for i in range(aggregated_ts_le_embeddings.values.shape[0])]) df_ts_pe_embeddings_fusion = pd.DataFrame(aggregated_ts_pe_embeddings.values.reshape(1,-1), columns=['ts_pe_'+str(i) for i in range(aggregated_ts_pe_embeddings.values.shape[0])]) df_vision_dense_embeddings_fusion = pd.DataFrame(densefeature_embeddings.reshape(1,-1), columns=['vd_'+str(i) for i in range(densefeature_embeddings.shape[0])]) df_vision_predictions_embeddings_fusion = pd.DataFrame(prediction_embeddings.reshape(1,-1), columns=['vp_'+str(i) for i in range(prediction_embeddings.shape[0])]) df_vision_multi_dense_embeddings_fusion = pd.DataFrame(aggregated_densefeature_embeddings.reshape(1,-1), columns=['vmd_'+str(i) for i in range(aggregated_densefeature_embeddings.shape[0])]) df_vision_multi_predictions_embeddings_fusion = pd.DataFrame(aggregated_prediction_embeddings.reshape(1,-1), columns=['vmp_'+str(i) for i in range(aggregated_prediction_embeddings.shape[0])]) df_ecgnotes_embeddings_fusion = pd.DataFrame(aggregated_ecg_embeddings.reshape(1,-1), columns=['n_ecg_'+str(i) for i in range(aggregated_ecg_embeddings.shape[0])]) df_echonotes_embeddings_fusion = pd.DataFrame(aggregated_echo_embeddings.reshape(1,-1), columns=['n_ech_'+str(i) for i in range(aggregated_echo_embeddings.shape[0])]) df_radnotes_embeddings_fusion = pd.DataFrame(aggregated_rad_embeddings.reshape(1,-1), columns=['n_rad_'+str(i) for i in range(aggregated_rad_embeddings.shape[0])]) # Vision targets cxr_target_columns = ['split','Atelectasis','Cardiomegaly','Consolidation','Edema','Enlarged Cardiomediastinum','Fracture','Lung Lesion','Lung Opacity','No Finding','Pleural Effusion','Pleural Other','Pneumonia','Pneumothorax','Support Devices', 'PerformedProcedureStepDescription','ViewPosition'] df_vision_targets_fusion = df_stay_cxr.loc[idx:idx][cxr_target_columns].reset_index(drop=True) # Embeddings FUSION df_fusion = df_haim_ids_fusion df_fusion = pd.concat([df_fusion, df_init], axis=1) df_fusion = pd.concat([df_fusion, df_demographics_embeddings_fusion], axis=1) df_fusion = pd.concat([df_fusion, df_vision_dense_embeddings_fusion], axis=1) df_fusion = pd.concat([df_fusion, df_vision_predictions_embeddings_fusion], axis=1) df_fusion = pd.concat([df_fusion, df_vision_multi_dense_embeddings_fusion], axis=1) df_fusion = pd.concat([df_fusion, df_vision_multi_predictions_embeddings_fusion], axis=1) df_fusion = pd.concat([df_fusion, df_ts_ce_embeddings_fusion], axis=1) df_fusion = pd.concat([df_fusion, df_ts_le_embeddings_fusion], axis=1) df_fusion = pd.concat([df_fusion, df_ts_pe_embeddings_fusion], axis=1) df_fusion = pd.concat([df_fusion, df_ecgnotes_embeddings_fusion], axis=1) df_fusion = pd.concat([df_fusion, df_echonotes_embeddings_fusion], axis=1) df_fusion = pd.concat([df_fusion, df_radnotes_embeddings_fusion], axis=1) #Add targets df_fusion = pd.concat([df_fusion, df_vision_targets_fusion], axis=1) gc.collect() #Clear memory return df_fusion # Let's select a single HAIM Patient from pickle files and check if it fits inclusion criteria haim_patient_idx = 4 #Load precomputed file filename = f"{haim_patient_idx:08d}" + '.pkl' folder = f"{haim_patient_idx:05d}"[:2] + "/" patient = load_patient_object(core_mimiciv_path + 'pickle/folder' + folder + filename) # Get information of chest x-rays conducted within this patiewnt stay df_cxr = patient.cxr df_imcxr = patient.imcxr admittime = patient.admissions.admittime.values[0] dischtime = patient.admissions.dischtime.values[0] df_stay_cxr = df_cxr.loc[(df_cxr['charttime'] >= admittime) & (df_cxr['charttime'] <= dischtime)] if not df_stay_cxr.empty: for idx, df_stay_cxr_row in df_stay_cxr.iterrows(): # Get stay anchor times img_charttime = df_stay_cxr_row['charttime'] img_deltacharttime = df_stay_cxr_row['deltacharttime'] # Get time to discharge and discharge location/status img_id = df_stay_cxr_row["dicom_id"] img_length_of_stay = date_diff_hrs(dischtime, img_charttime) discharge_location = patient.core['discharge_location'][0] if discharge_location == "DIED": death_status = 1 else: death_status = 0 # Select allowed timestamp range start_hr = None end_hr = img_deltacharttime # We need to reload it since the original object has been modified patient = load_patient_object(core_mimiciv_path + 'pickle/folder' + folder + filename) dt_patient = get_timebound_patient_icustay(patient, start_hr , end_hr) is_included = True if is_included: df_init = pd.DataFrame([[img_id, img_charttime, img_deltacharttime, discharge_location, img_length_of_stay, death_status]],columns=['img_id', 'img_charttime', 'img_deltacharttime', 'discharge_location', 'img_length_of_stay', 'death_status']) df_fusion = process_cxr_embeddings_haim_id(haim_id, dt_patient, df_init) if os.path.isfile(fname): df_fusion.to_csv(fname, mode='a', index=False, header=False) else: df_fusion.to_csv(fname, mode='w', index=False)Presto C++ mp4 file parserIn this notebook we will show how to use the presto mp4 file parser.*Note:* all cells starting with `%%` contains commands executed in the shell/terminal. Building the libraryThe first thing we need to do is to build the library. The library is hosted on github https://github.com/steinwurf/petro. In case we already clone it we remove it first, this is only for the sake of this notebook.%%bash rm -rf petro git clone :steinwurf/petro.gitCloning into 'petro'...Lets see which files we have:%%bash ls petroconfig.py examples NEWS.rst README.rst src test waf wscriptFirst step is to run the configure step (`--bundle-path` refers to where external dependencies are downloaded):%%bash cd petro ./waf configure --bundle-path=~/dev/bundle_dependenciesChecking for program 'git' : /usr/bin/git Resolve dependency waf-tools : /home/mvp/dev/bundle_dependencies/waf-tools-ae40a5/3.1.3 Resolve dependency gtest : /home/mvp/dev/bundle_dependencies/gtest-39e104/3.0.0 Setting top to : /home/mvp/Dropbox/work_code/notebooks/presto_mp4_example/petro Setting out to : /home/mvp/Dropbox/work_code/notebooks/presto_mp4_example/petro/build/linux Using the mkspec: : cxx_default Checking for 'g++' (C++ compiler) : /usr/bin/g++ Checking for library pthread : yes 'configure' finished successfully (0.184s)You can see all the available options by running `./waf --help`. After configuring the next step is to build the library.%%bash cd petro ./waf buildWaf: Entering directory `/home/mvp/Dropbox/work_code/notebooks/presto_mp4_example/petro/build/linux' [ 1/64] Compiling ../../../../../dev/bundle_dependencies/gtest-39e104/3.0.0/gtest/src/gtest-all.cc [ 2/64] Compiling src/petro/box/avcc.cpp [ 3/64] Compiling src/petro/box/bxml.cpp [ 4/64] Compiling src/petro/box/co64.cpp [ 5/64] Compiling src/petro/box/cprt.cpp [ 6/64] Compiling src/petro/box/ctts.cpp [ 7/64] Compiling src/petro/box/elst.cpp [ 8/64] Compiling src/petro/box/free.cpp [ 9/64] Compiling src/petro/box/frma.cpp [10/64] Compiling src/petro/box/ftyp.cpp [11/64] Compiling src/petro/box/hdlr.cpp [12/64] Compiling src/petro/box/hmhd.cpp [13/64] Compiling src/petro/box/iinf.cpp [14/64] Compiling src/petro/box/iloc.cpp [15/64] Compiling src/petro/box/imif.cpp [16/64] Compiling src/petro/box/ipmc.cpp [17/64] Compiling src/petro/box/mdat.cpp [18/64] Compiling src/petro/box/mdhd.cpp [19/64] Compiling src/petro/box/mehd.cpp [20/64] Compiling src/petro/box/mfhd.cpp [21/64] Compiling src[...]Parsing a mp4 fileLets try to parse an mp4 file. First we need to download one:%%bash wget http://download.blender.org/peach/bigbuckbunny_movies/BigBuckBunny_320x180.mp4--2015-12-01 14:09:20-- http://download.blender.org/peach/bigbuckbunny_movies/BigBuckBunny_320x180.mp4 Resolving download.blender.org (download.blender.org)... 192.168.3.11 Connecting to download.blender.org (download.blender.org)|192.168.3.11|:80... connected. HTTP request sent, awaiting response... 200 OK Length: 64657027 (62M) [video/mp4] Saving to: ‘BigBuckBunny_320x180.mp4.1’ 0K .......... .......... .......... .......... .......... 0% 1.17M 53s 50K .......... .......... .......... .......... .......... 0% 2.31M 40s 100K .......... .......... .......... .......... .......... 0% 77.9M 27s 150K .......... .......... .......... .......... .......... 0% 2.35M 27s 200K .......... .......... .......... .......... .......... 0% 71.0M 21s 250K .......... .......... .......... .......... .......... 0% 2.40M 22s 300K .......... .......... .......... .......... .......... 0% 101M 19s 350K .......... .......... .......... .......... .......... 0% 63.2M [...]To see the `mp4` file content we can use the *complete_parser* example:%%bash ./petro/build/linux/examples/complete_parser BigBuckBunny_320x180.mp4ftyp size: 20 major_brand: isom minor_version: 512 compatible_brands: mp41 free size: 8 mdat size: 64312805 data_offset: 36 moov size: 344194 mvhd size: 108 version: 0 creation_time: Thu Jan 1 01:00:00 1970 modification_time: Thu Jan 1 01:00:00 1970 time_scale: 1000 duration: 596459 rate: 1 volume: 1 next_track_id: 3 trak size: 119807 tkhd size: 92 version: 0 track_enabled: 1 track_in_movie: 1 track_in_preview: 1 creation_time: Thu Jan 1 01:00:00 1970 modification_time: Thu Jan 1 01:00:00 1970 track_id: 1 duration: 596459 layer: 0 alternate_group: 0 volume: 0 width: 320 height: 180 mdia size: 119707 mdhd size: 32 version: 0 creation_time: Thu Jan 1 01:00:00 1970 modifi[...]This script is used for writing all the gradient PNGs to files in scratch First, the gradients for all the 'difference' matrices are plotted using dataframes containing information for each task, and the differences calculated between cases/controls, average for all subjects in that task! ls '/scratch/a/arisvoin/lbassman/spins_gradients/sub-CMH0001' import pandas as pd rs_gsr_df = pd.read_csv('/scratch/a/arisvoin/lbassman/spins_gradients/rs_gsr_df') rs_gsr_grad_group = rs_gsr_df.groupby('grad_num') rs_gsr_grad1_group = rs_gsr_grad_group.get_group('grad1') rs_gsr_grad2_group = rs_gsr_grad_group.get_group('grad2') rs_gsr_grad3_group = rs_gsr_grad_group.get_group('grad3') ea_gsr_df = pd.read_csv('/scratch/a/arisvoin/lbassman/spins_gradients/ea_gsr_df') ea_gsr_grad_group = ea_gsr_df.groupby('grad_num') ea_gsr_grad1_group = ea_gsr_grad_group.get_group('grad1') ea_gsr_grad2_group = ea_gsr_grad_group.get_group('grad2') ea_gsr_grad3_group = ea_gsr_grad_group.get_group('grad3') rs_nogsr_df = pd.read_csv('/scratch/a/arisvoin/lbassman/spins_gradients/rs_nogsr_df') rs_nogsr_grad_group = rs_nogsr_df.groupby('grad_num') rs_nogsr_grad1_group = rs_nogsr_grad_group.get_group('grad1') rs_nogsr_grad2_group = rs_nogsr_grad_group.get_group('grad2') rs_nogsr_grad3_group = rs_nogsr_grad_group.get_group('grad3') ea_nogsr_df = pd.read_csv('/scratch/a/arisvoin/lbassman/spins_gradients/ea_nogsr_df') ea_nogsr_grad_group = ea_nogsr_df.groupby('grad_num') ea_nogsr_grad1_group = ea_nogsr_grad_group.get_group('grad1') ea_nogsr_grad2_group = ea_nogsr_grad_group.get_group('grad2') ea_nogsr_grad3_group = ea_nogsr_grad_group.get_group('grad3') write_df_column_to_pscalar_nib(grad1_group, pconnGrad, columns = ['statistic'], labelname_column = 'ROI', to_filename = '/scratch/a/arisvoin/lbassman/spins_gradients/difference_rs_GSR_grad1.pscalar.nii') write_df_column_to_pscalar_nib(grad2_group, pconnGrad, columns = ['statistic'], labelname_column = 'ROI', to_filename = '/scratch/a/arisvoin/lbassman/spins_gradients/difference_rs_GSR_grad2.pscalar.nii') write_df_column_to_pscalar_nib(grad3_group, pconnGrad, columns = ['statistic'], labelname_column = 'ROI', to_filename = '/scratch/a/arisvoin/lbassman/spins_gradients/difference_rs_GSR_grad3.pscalar.nii') write_df_column_to_pscalar_nib(grad1_group, pconnGrad, columns = ['statistic'], labelname_column = 'ROI', to_filename = '/scratch/a/arisvoin/lbassman/spins_gradients/difference_ea_GSR_grad1.pscalar.nii') write_df_column_to_pscalar_nib(grad2_group, pconnGrad, columns = ['statistic'], labelname_column = 'ROI', to_filename = '/scratch/a/arisvoin/lbassman/spins_gradients/difference_ea_GSR_grad2.pscalar.nii') write_df_column_to_pscalar_nib(grad3_group, pconnGrad, columns = ['statistic'], labelname_column = 'ROI', to_filename = '/scratch/a/arisvoin/lbassman/spins_gradients/difference_ea_GSR_grad3.pscalar.nii') write_df_column_to_pscalar_nib(grad1_group, pconnGrad, columns = ['statistic'], labelname_column = 'ROI', to_filename = '/scratch/a/arisvoin/lbassman/spins_gradients/difference_rs_noGSR_grad1.pscalar.nii') write_df_column_to_pscalar_nib(grad2_group, pconnGrad, columns = ['statistic'], labelname_column = 'ROI', to_filename = '/scratch/a/arisvoin/lbassman/spins_gradients/difference_rs_noGSR_grad2.pscalar.nii') write_df_column_to_pscalar_nib(grad3_group, pconnGrad, columns = ['statistic'], labelname_column = 'ROI', to_filename = '/scratch/a/arisvoin/lbassman/spins_gradients/difference_rs_noGSR_grad3.pscalar.nii') write_df_column_to_pscalar_nib(grad1_group, pconnGrad, columns = ['statistic'], labelname_column = 'ROI', to_filename = '/scratch/a/arisvoin/lbassman/spins_gradients/difference_ea_noGSR_grad1.pscalar.nii') write_df_column_to_pscalar_nib(grad2_group, pconnGrad, columns = ['statistic'], labelname_column = 'ROI', to_filename = '/scratch/a/arisvoin/lbassman/spins_gradients/difference_ea_noGSR_grad2.pscalar.nii') write_df_column_to_pscalar_nib(grad3_group, pconnGrad, columns = ['statistic'], labelname_column = 'ROI', to_filename = '/scratch/a/arisvoin/lbassman/spins_gradients/difference_ea_noGSR_grad3.pscalar.nii') test_plot = plot_cifti_surf_montage(left_surf = '/scratch/a/arisvoin/edickie/SPINS_glasser_tian/tpl-fsLR/S1200.L.inflated_MSMAll.32k_fs_LR.surf.gii', right_surf = '/scratch/a/arisvoin/edickie/SPINS_glasser_tian/tpl-fsLR/S1200.R.inflated_MSMAll.32k_fs_LR.surf.gii', cifti_map = '/scratch/a/arisvoin/lbassman/spins_gradients/difference_rs_GSR_grad1.pscalar.nii', bg_cifti_map = None, bg_cifti_map_index = 0, colormap = "RdYlBu", darkness = 0, to_filename = None) test_plot = plot_cifti_surf_montage(left_surf = '/scratch/a/arisvoin/edickie/SPINS_glasser_tian/tpl-fsLR/S1200.L.inflated_MSMAll.32k_fs_LR.surf.gii', right_surf = '/scratch/a/arisvoin/edickie/SPINS_glasser_tian/tpl-fsLR/S1200.R.inflated_MSMAll.32k_fs_LR.surf.gii', cifti_map = '/scratch/a/arisvoin/lbassman/spins_gradients/difference_rs_GSR_grad2.pscalar.nii', bg_cifti_map = None, bg_cifti_map_index = 0, colormap = "RdYlBu", darkness = 0, to_filename = None) test_plot = plot_cifti_surf_montage(left_surf = '/scratch/a/arisvoin/edickie/SPINS_glasser_tian/tpl-fsLR/S1200.L.inflated_MSMAll.32k_fs_LR.surf.gii', right_surf = '/scratch/a/arisvoin/edickie/SPINS_glasser_tian/tpl-fsLR/S1200.R.inflated_MSMAll.32k_fs_LR.surf.gii', cifti_map = '/scratch/a/arisvoin/lbassman/spins_gradients/difference_rs_GSR_grad3.pscalar.nii', bg_cifti_map = None, bg_cifti_map_index = 0, colormap = "RdYlBu", darkness = 0, to_filename = None)Método de NewtonJ.J---\begin{equation}x_{k+1} = x_{k} - \frac{f(x_{k})}{f'(x_{k})}\end{equation} Ejemplo: $f(x) = x^{2} + 3x +1$import numpy as np from sympy import * from sympy.utilities.lambdify import lambdify import matplotlib.pyplot as plt init_printing(use_unicode=True) #calcular la derivada de f x = symbols('x') funcion = 1 + 3*x + x**2 #escribir la función aqui dfuncion = diff(funcion, x) print(str(dfuncion)) f = lambdify(x, funcion) df = lambdify((x), dfuncion) e = 0.0001 #error maxit = 1000000 #iteraciones máximas x0 = 2. #punto inicial def NewtonR(x0, func = f, dfunc = df, error = e, iterations = maxit): it = 0 while (abs(f(x0)) > e) and (it < maxit): it += 1 x0 = x0 - f(x0)/df(x0) return x0 sol = NewtonR(x0) print(sol) X = np.linspace(-4, 2, 100) plt.plot(X, f(X)) plt.plot(sol,f(sol),'ro') plt.show()Stratified samplingIn large dataset a relatively small group of points might be overplotted by the dominant group. In this case **stratified** sampling can help.import numpy as np import pandas as pd from lets_plot import * LetsPlot.setup_html() N = 5000 small_group = 3 large_group = N - small_group np.random.seed(123) data = dict( x = np.random.normal(0, 1, N), y = np.random.normal(0, 1, N), cond = ['A' for _ in range(small_group)] + ['B' for _ in range(large_group)] ) # Data points in group 'A' (small group) are overplotted by the dominant group 'B'. p = ggplot(data, aes('x','y',color='cond')) + \ scale_color_manual(values=["red", "#1C9E77"], breaks=['A', 'B']) p + geom_point(size=5, alpha=.2) # The 'random' sampling loses the group 'A' altogether. p + geom_point(size=5, sampling=sampling_random(50, seed=2)) # Stratified sampling ensures that group 'A' is represented. p + geom_point(size=5, sampling=sampling_random_stratified(50, seed=2))PandasBasically the main use for pandas is to read in a csv file and accessing content from the `dataframe` that pandas createimport pandas as pd pwd pwd pd.read_csv('../datasets/salaries.csv') df = pd.read_csv('../datasets/salaries.csv')df is a pandas dataframedf['Name'] df[['Name', 'Salary']] df['Salary'].max() df.describe() type(df.describe()) df['Salary'] > 60000 df[df['Salary'] > 60000] df.as_matrix()part 4: descriptive statistics and bariclot calculation * create summary statistics of the entire dataset using * compare training, validation, and testing sets for both leak and clot * calculate bariclot score for each patient in the clot training set this will generate * a `results` directory * a `results/descriptive_stats` subdirectory - holds all descriptive data generated by tableone * a `results/bariclot` subdirectory - holds bariclot results for the test cohort. we also produce a sanity check for bariclot on 2015 data to make sure it performs as expected preliminariesimport pandas as pd import numpy as np import python_modules.constants as constants import os from tableone import TableOne np.random.seed(seed=1872) # Set ipython's max row display pd.set_option('display.max_row', 100) # Set iPython's max column display pd.set_option('display.max_columns', 50) PATH_IMPORT = 'study_data/' PATH_RESULTS = 'results/' PATH_DESCRIPTIVE = 'results/descriptive_stats/' PATH_BARICLOT = 'results/bariclot/' # make dirs to hold outputs # if folders already exist, this will throw errors os.mkdir(f'{PATH_RESULTS}') os.mkdir(f'{PATH_DESCRIPTIVE}') os.mkdir(f'{PATH_BARICLOT}')import data, specify variables to be included in table 1df_main = pd.read_csv(f'{PATH_IMPORT}/study_data_split.csv', low_memory=False, index_col=0) #labs_type = 'continuous' #lim_intra = False # #table_one_cats = constants.categorical(labs = labs_type, lim_intra = lim_intra) + ['LEAK', 'CLOT'] #table_one_cons = constants.continuous(labs = labs_type, lim_intra = lim_intra) #table_one_include = table_one_cats + table_one_cons table_one_cats = constants.CATEGORICAL_PRE + constants.OUTCOME table_one_cons = constants.CONTINUOUS_PRE + constants.CONTINUOUS_POST table_one_include = table_one_cats + table_one_consdescriptive statistics we split the data into quarters in the last notebook and somewhat confusingly named part of the training set `val_1`. therefore create another row to map the actual training, validation, and testing sets, ultimately creating the final training set by adding `val_1` patients to `train` patients.final_analysis_pop_dict = {'train':0, 'val_1':0, 'val_2':1, 'test': 2} df_main['consolidated_clot_groups'] = df_main.CLOT_SET.map(final_analysis_pop_dict) df_main['consolidated_leak_groups'] = df_main.LEAK_SET.map(final_analysis_pop_dict)build summary tablesmytable = TableOne(df_main, table_one_include, table_one_cats) mytable_clot = TableOne(df_main, table_one_include, table_one_cats, 'consolidated_clot_groups', pval=True) mytable_leak = TableOne(df_main, table_one_include, table_one_cats, 'consolidated_leak_groups', pval=True)save themmytable.to_csv(f'{PATH_DESCRIPTIVE}dataset_summary.csv') mytable_clot.to_csv(f'{PATH_DESCRIPTIVE}clot_set_summary.csv') mytable_leak.to_csv(f'{PATH_DESCRIPTIVE}leak_set_summary.csv')bariclot calculation dicts and functions to parse data into format needed for bariclot calculationd_funcstat = {'Independent':0, 'Partially Dependent':1, 'Totally Dependent':2} d_clothist = {'No':0, 'Yes':1} def d_race(race): if race == 'Black or African American': return 1 else: return 0bariclot calculation * , , , , , , . Predicting venous thromboembolism following laparoscopic bariatric surgery: development of the BariClot tool using the MBSAQIP database. Surg Endosc. 2018; PMID:30003351; http://dx.doi.org/10.1007/s00464-018-6348-0def calculate_bariclot(row): w = d_funcstat[row.FUNSTATPRESURG] * 3 x = d_clothist[row.HISTORY_DVT] * 9 y = d_race(row.race_PUF) * 3 z = row.OPLENGTH / 60 return np.sum([w,x,y,z]) def calculate_bariclot_groups(row): bc = calculate_bariclot(row) if bc < 1: return 0 elif bc < 7: return 1 elif bc < 10: return 2 else: return 3test population some patients (333 in the total study cohort) have missing data for operative duration; replace these with the mean operative duration.df_main['OPLENGTH'].fillna(df_main['OPLENGTH'].mean(), inplace = True)select down to clot test populationdf_test_clot = df_main[df_main['CLOT_SET'] == 'test'] len(df_test_clot)run the calculatorbariclot_scores = df_test_clot.apply(calculate_bariclot, axis=1) bariclot_score_groups = df_test_clot.apply(calculate_bariclot_groups, axis=1)save targets and values for post-processing in R#dataframe to hold results df_bariclot = pd.DataFrame() #populate dataframe with bariclot scores and target outcomes df_bariclot['scores'] = bariclot_scores df_bariclot['targs'] = df_test_clot['CLOT'] df_bariclot = df_bariclot.reset_index(drop=True) df_bariclot.to_csv(f'{PATH_BARICLOT}bariclot_test.csv')look at auc for 2015 population just as a sanity check (it checks out when we run stats in R)df_2015_clot = df_main[df_main['OPYEAR'] == 2015] bariclot_scores_2015 = df_2015_clot.apply(calculate_bariclot, axis=1) df_bariclot_2015 = pd.DataFrame() df_bariclot_2015['scores'] = bariclot_scores_2015 df_bariclot_2015['targs'] = df_2015_clot['CLOT'] df_bariclot_2015 = df_bariclot_2015.reset_index(drop=True) df_bariclot_2015.to_csv(f'{PATH_BARICLOT}bariclot_2015.csv')Read MD outputimport numpy as np import matplotlib.pyplot as pltUnbiased resultx,y = np.loadtxt('output/unbiased/xvyw1beta5.0gammax1.0gammay1.0_h0.01.txt', usecols = (0,1), unpack = True, skiprows=1) beta=5.0 levels = np.linspace(1, 5, 9) hist = np.histogram2d(x, y, bins=50) prob = hist[0].T/np.sum(hist[0].T) free = -(1/beta)*np.log(prob+1e-10) mids_x, mids_y = 0.5*(hist[1][1:]+hist[1][:-1]), 0.5*(hist[2][1:]+hist[2][:-1]) plt.contourf(mids_x, mids_y, free, levels=levels) plt.xlim(-1.0,1.0) plt.ylim(-1.0,1.0) plt.xlabel('x') plt.ylabel('y') plt.colorbar() plt.show()Biased resultx,y = np.loadtxt('output/biased/xvyw1beta5.0gammax1.0gammay1.0_h0.01.txt', usecols = (0,1), unpack = True, skiprows=1) weights = np.loadtxt('output/biased/reweighting_factor.txt') beta=5.0 levels = np.linspace(1, 5, 9) hist = np.histogram2d(x, y, bins=50, weights = weights) prob = hist[0].T/np.sum(hist[0].T) free = -(1/beta)*np.log(prob+1e-10) mids_x, mids_y = 0.5*(hist[1][1:]+hist[1][:-1]), 0.5*(hist[2][1:]+hist[2][:-1]) plt.contourf(mids_x, mids_y, free, levels=levels) plt.xlim(-1.0,1.0) plt.ylim(-1.0,1.0) plt.xlabel('x') plt.ylabel('y') plt.colorbar() plt.show()환경 준비1. 라이브러리 다운로드2. 네이버 영화평과 긍부정 데이터를 다운로드합니다 (파일보기 + 새로고침 후 확인)* 자체 데이터셋을 사용할 경우 내용과 카테고리가 각각 content와 label 열에 들어가는 파일(아래 예시 참조)로 dataset.xlsx로 저장 후 기존 파일을 덮어쓰기 하면 됩니다. * 엑셀파일의 label과 content의 순서는 상관없으나 label은 0부터 시작하는 숫자로 입력하면 좋습니다. 예를들어 카테고리가 4개면 label을 0, 1, 2, 3으로 표시해주세요.```label content1 영화가 재밌다. 1 이 영화 추천해요. 0 지루한 영화였습니다.... ```!pip3 install -q transformers !git clone https://github.com/kiyoungkim1/ReadyToUseAI from ReadyToUseAI.src.nlp import make_sample_dataset, bert_sequence_classification make_sample_dataset.nsmc(mode='test', text_only=False) # mode: which datasets? 'train' or 'test'[Training] * 첨부된 샘플의 경우 약 40min 소요 (Tesla T4 GPU)* min_sentence_length보다 긴 문장만 사용합니다.* MAX_LEN은 모델이 인식하는 token의 길이로, 전체길이가 약 MAX_LEN의 2배보다 긴 문장은 뒷부분이 삭제됩니다 (예를들어 MAX_LEN = 128이면, 대략 길이가 256이상인 문장은 뒷부분이 무시됨).* batch_size는 한번에 몇개의 sample을 계산하는지를 나타내며, 제한된 메모리에서 MAX_LEN을 줄이면 batch_size를 키울 수 있고, MAX_LEN를 키우면 batch_size를 줄여야 합니다. * epochs는 데이터셋을 몇번 반복해서 학습할지 여부이며, dataset_split은 전체 데이터 중 몇 %를 검증용 데이터셋으로 사용할지 여부입니다.CLS = bert_sequence_classification.Classification(model_name='kykim/bert-kor-base', min_sentence_length=10, MAX_LEN=128, batch_size=32, use_bert_tokenizer=True) CLS.dataset(data_path='dataset.xlsx') CLS.load_model(mode='train') CLS.train(epochs=3, dataset_split=0.1)[Inference]* sentences에 원하는 문장을 아래 형식과 같이 넣으면 해당하는 카테고리를 반환합니다.* saved_model_path는 학습된 모델이 저장된 '폴더명'입니다.sentences = ['영화 재밌어요', '영화 재미없어요', '그냥 시간떼우기용', '완전 추천작'] saved_model_path='model/saved/3' CLS = bert_sequence_classification.Classification(model_name='kykim/bert-kor-base', min_sentence_length=10, MAX_LEN=128, batch_size=64, use_bert_tokenizer=True) CLS.load_model(mode='inference', saved_model_path=saved_model_path) logit = CLS.inference(sentences=sentences) print(logit) # 네이버 영화평의 경우 0은 부정 카테고리, 1은 긍정 카테고리로 설정되어 있음Condicionales. Declaraciones.Una declaración es un elemento sintáctico que permite la ejecución de un bloque de código delimitado por llaves ```{``` ```}``` dependiendo de un contexto.``` {boque de código}```Cuando una declaración sólo contiene una expresión, no son necesarias las llaves. Expresiones lógicas.Una expresión lógica es una que regresa un avalor ```TRUE```o ```FALSE```. Condicional ```if```.Esta declaración ejecuta un bloque de código si la expresión evaluada es ```TRUE```.```if () {}``` **Ejemplo:**x <- 1 if (x < 2) 'menor que dos' if (x < 2) { y <- x + 2 z <- x ^ 2 } y zLa estructura ```if```... ```else```.```if () {} else {}```x <- 1 if (x > 2) { y <- x + 2 } else { y <- "No" } yNatural Language Processing and Text Mining (NLPTM) Unstructured Data Analysis (UDA)* 02 - Dasar-Dasar Natural Language Processing (NLP)- Bagian ke-01 (C) - 2020 tau-data Indonesia ~ https://tau-data.id Outline Module NLPTM-02/UDA-02:* Tokenisasi* Stemming dan Lemma Instalasi Modul:1. Google Colab ==> Jalankan Cell Berikutnya, stop reading the rest of this cell.2. Local (Windows/Linux)* Coba di terminal jalankan perintah **pip install --upgrade spacy*** **Kalau gagal**, unduh spacy dari sini (Asumsi menggunakan Python 3.6 - Recommended, Baca ADSP-01): - Link: https://www.lfd.uci.edu/~gohlke/pythonlibs/spacy - thinc‑7.4.1‑cp36‑cp36m‑win_amd64.whl - srsly‑2.2.0‑cp36‑cp36m‑win_amd64.whl - preshed‑3.0.2‑cp36‑cp36m‑win_amd64.whl - murmurhash‑1.0.2‑cp36‑cp36m‑win_amd64.whl - cymem‑2.0.3‑cp36‑cp36m‑win_amd64.whl - spacy‑2.3.2‑cp36‑cp36m‑win_amd64.whldi terminal jalankan perintah "**pip install XYZ**" dimana **XYZ** adalah file-file diatas secara terurut. "Setelah" berhasil menginstall spacy, jalankan perintah berikut di terminal/command prompt. - python -m spacy download en - python -m spacy download xx - python -m spacy download en_core_web_smSetelah itu (masih di terminal/command prompt): - python (lalu enter) - import nltk - nltk.download('popular')Semua langkah diatas hanya dilakukan 1x (karena sudah terinstall di komputer). Jika tidak suka repot, maka gunakan saja Google Colaboratory.# Jalankan Cell ini "HANYA" jika anda menggunakan Google Colab import nltk !mkdir data !wget -P data/ https://raw.githubusercontent.com/taudata-indonesia/eLearning/master/data/slang.txt !wget -P data/ https://raw.githubusercontent.com/taudata-indonesia/eLearning/master/data/stopwords_id.txt !wget -P data/ https://raw.githubusercontent.com/taudata-indonesia/eLearning/master/data/stopwords_en.txt !pip install spacy python-crfsuite unidecode textblob sastrawi !python -m spacy download en !python -m spacy download xx !python -m spacy download en_core_web_sm nltk.download('popular')TokenisasiTokenisasi adalah pemisahan kata, simbol, frase, dan entitas penting lainnya (yang disebut sebagai token) dari sebuah teks untuk kemudian di analisa lebih lanjut. Token dalam NLP sering dimaknai dengan "sebuah kata", walau tokenisasi juga bisa dilakukan ke kalimat, paragraf, atau entitas penting lainnya (misal suatu pola string DNA di Bioinformatika).Mengapa perlu tokenisasi? Langkah penting dalam preprocessing, menghindari kompleksitas mengolah langsung pada string asal. Menghindari masalah (semantic) saat pemrosesan model-model natural language. Suatu tahapan sistematis dalam merubah unstructured (text) data ke bentuk terstruktur yang lebih mudah di olah.[Image Source]: https://www.softwareadvice.com/resources/what-is-text-analytics/ Tokenisasi dengan modul NLTKKelebihan: Well established dengan dukungan bahasa yang beragam Salah satu modul NLP dengan fungsi terlengkap, termasuk WordNet Free dan mendapat banyak dukungan akademis.Kekurangan: "Tidak support" bahasa Indonesia Murni Python: relatif lebih lambathttps://www.nltk.org/import nltk T = "Hello, Mr. Man. He smiled!! This, i.e. that, is it." Word_Tokens = nltk.word_tokenize(T) print(Word_Tokens) # tokenisasi kata # Bandingkan jika menggunakan fungsi split di Python, apakah bedanya? print(T.split()) # Apakah kesimpulan yang bisa kita tarik? # Split() ==> Bukan Tokenisasi!. Sentence_Tokens = nltk.sent_tokenize(T) print(Sentence_Tokens) # Tokenisasi kalimat # Perhatikan hasilnya, ada berapa kalimat yang di deteksi? setuju?['Hello, Mr. Man.', 'He smiled!!', 'This, i.e.', 'that, is it.']Trigger Diskusi Forum: Apakah tanda baca seperti "?" atau "!" akan memisahkan kalimat? Apakah tanda "carriage return"/enter/ganti baris memisahkan kalimat? Apakah ";" memisahkan kalimat? Apakah tanda dash "-" memisahkan kata? Dalam bahasa Indonesia/Inggris?Tips: Perhatikan bentuk struktur data "output" dari tokenisasi NLTK.Catatan: pindah baris di Python string bisa dilakukan dengan menggunakan symbol "\n"Contoh: Tokenisasi dengan modul SpacyKelebihan: Di claim lebih cepat (C-based) License termasuk untuk komersil Dukungan bahasa yang lebih banyak dari NLTK (termasuk bahasa Indonesia*)Kekurangan: Fungsi yang lebih terbatas (dibandingkan NLTK). Karena berbasis compiler, sehingga instalasi cukup menantang.https://spacy.io/# Contoh tokenisasi menggunakan Spacy from spacy.lang.en import English nlp_en = English() T = "Hello, . He smiled!! This, i.e. that, is it." nlp = nlp_en(T) for token in nlp: print(token.text, end =', ') nlp_en.add_pipe(nlp_en.create_pipe('sentencizer')) # New in latest Spacy nlp = nlp_en(T) for kalimat in nlp.sents: print(kalimat) # Hati-hati! ... token bukan string di Spacy, karena C-based, ia bekerja di byte bukan unicode. token = nlp[0] print(token) type(token) #token=='Hello'HelloTrigger Diskusi Forum: Apakah hasil tokenisasi Spacy = NLTK? Mengapa? Lakukan latihan seperti yang dilakukan sebelumnya dengan modul NLTK, apakah hasilnya sama dengan Spacy?Catatan: Contoh sederhana ini menekankan perbedaan ilmu linguistik dan computational linguistic. Tokenisasi dengan TextBlobKelebihan: Sederhana & mudah untuk digunakan/pelajari. Textblob objects punya behaviour/properties yang sama dengan string di Python. TextBlob dibangun dari kombinasi modul NLTK dan (Clips) PatternKekurangan: Tidak secepat Spacy dan NLTK Language Model terbatas: English, German, French*Blob : Binary large Object# Tokenizing di TextBlob from textblob import TextBlob T = "Hello, . He smiled!! This, i.e. that, is it." print(TextBlob(T).words) kalimatS = TextBlob(T).sentences print([str(kalimat) for kalimat in kalimatS])['Hello, Mr. Man.', 'He smiled!!', 'This, i.e.', 'that, is it.']Trigger Diskusi Forum: Ada yang berbeda dari hasilnya? Apakah lebih baik seperti ini?Tips: TextBlob biasa digunakan untuk prototyping pada data yang tidak terlalu besar.Catatan: Hati-hati tipe data Blob tidak biasa (objek).# Saat melakukan coding di Python, selalu perhatikan "tipe data" yang dihasilkan oleh modul. A = TextBlob(T).sentences B = TextBlob(T).words print(A[0], type(A[0])) print(B[0], type(B[0])) # Apakah bedanya dengan tipe data str biasa di python? # Di Spacy ini tidak berlaku B[0]=='Hello' # "properties" Blob word print(dir(C)) # "properties" string di Python print(dir(D))['__add__', '__class__', '__contains__', '__delattr__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__getitem__', '__getnewargs__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__iter__', '__le__', '__len__', '__lt__', '__mod__', '__mul__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__rmod__', '__rmul__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', 'capitalize', 'casefold', 'center', 'count', 'encode', 'endswith', 'expandtabs', 'find', 'format', 'format_map', 'index', 'isalnum', 'isalpha', 'isdecimal', 'isdigit', 'isidentifier', 'islower', 'isnumeric', 'isprintable', 'isspace', 'istitle', 'isupper', 'join', 'ljust', 'lower', 'lstrip', 'maketrans', 'partition', 'replace', 'rfind', 'rindex', 'rjust', 'rpartition', 'rsplit', 'rstrip', 'split', 'splitlines', 'startswith', 'strip', 'swapcase', 'title', 'translate', 'upper', 'zfill']Tokenisasi tidak hanya language dependent, tapi juga environment dependentTokenization sebenarnya tidak sesederhana memisahkan berdasarkan spasi dan removing symbol. Sebagai contoh dalam bahasa Jepang/Cina/Arab suatu kata bisa terdiri dari beberapa karakter.[Image Source]# Contoh Tokenizer untuk twitter from nltk.tokenize import TweetTokenizer Tokenizer = TweetTokenizer(strip_handles=True, reduce_len=True) tweet = "@Kirana_Sutanto I am so happpppppppy" print(Tokenizer.tokenize(tweet)) # Masih salah (i.e. "happpy"), nanti kita akan perbaiki ini dengan "spell check" # catatan: pada permasalahan "Sentiment analysis" kata yang ditulis panjang seperti diatas # bisa mengindikasikan sentiment yang kuat['I', 'am', 'so', 'happpy']Tokenisasi (NLP) Bahasa Indonesia:NLTK belum support Bahasa Indonesia, bahkan module NLP Python yang support bahasa Indonesia secara umum masih sangat langka. Beberapa resources yang dapat digunakan: KirraLabs: Mix of NLP-TextMining resources Sastrawi 1.0.1: untuk "stemming" & stopwords bahasa Indonesia. Daftar Kata Dasar Indonesia: Bisa di load sebagai dictionary di Python Wiktionary: ProyekWiki bahasa Indonesia [termasuk Lexicon] WordNet Bahasa Indonesia: Bisa di load sebagai dictionary (atau NLTK*) di Python. Daftar Kata Baku-Tidak Baku: Bisa di load sebagai dictionary di Python. Spacy: Cepat/efisien, MIT License, tapi language model Indonesia masih terbatas. UdPipe: Online request & restricted license (support berbagai bahasa - pemrograman).# Contoh Tokenisasi dalam bahasa Indonesia dengan Spacy from spacy.lang.id import Indonesian nlp_id = Indonesian() # Language Model teks = 'Sore itu, Hamzah melihat kupu-kupu di taman. Ibu membeli oleh-oleh di pasar' tokenS_id = nlp_id(teks) #T = [] #for token in tokenS_id: # T.append(token) print([t for t in tokenS_id]) # Jika menggunakan Language model English: tokenS_en = nlp_en(teks) print([token.text for token in tokenS_en])['Sore', 'itu', ',', 'Hamzah', 'melihat', 'kupu', '-', 'kupu', 'di', 'taman', '.', 'Ibu', 'membeli', 'oleh', '-', 'oleh', 'di', 'pasar']Word Case (Huruf BESAR/kecil): Untuk menganalisa makna (semantic) dari suatu (frase) kata dan mencari informasi dalam proses textmining, seringnya (*) kita tidak membutuhkan informasi huruf besar/kecil dari kata tersebut. Text case normaliation dapat dilakukan pada string secara efisien tanpa melalui tokenisasi (mengapa?). Namun, bergantung pada analisa teks yang akan digunakan pengguna harus berhati-hati dengan urutan proses (pipelining) dalam preprocessing. Mengapa dan apa contohnya?(*) Coba temukan minimal 2 pengecualian dimana  huruf kapital/kecil (case) mempengaruhi makna/pemrosesan teks.# Ignore case (huruf besar/kecil) T = "Hi there!, I am a student. Nice to meet you :)" print(T.lower()) print(T.upper()) # Perintah ini sangat efisien karena hanya merubah satu bit di setiap (awal) bytes dari setiap karakter # Sehingga tetap efisien jika ingin dilakukan sebelum tokenisasihi there!, i am a student. nice to meet you :) HI THERE!, I AM A STUDENT. NICE TO MEET YOU :)Morphological-Linguistic Normalization: Stemming & Lemmatization(Canonical Representation) Stemming dan Lemma Stemmer akan menghasilkan sebuah bentuk kata yang disepakati oleh suatu sistem tanpa mengindahkan konteks kalimat. Syaratnya beberapa kata dengan makna serupa hanya perlu dipetakan secara konsisten ke sebuah kata baku. Banyak digunakan di IR & komputasinya relatif sedikit. Biasanya dilakukan dengan menghilangkan imbuhan (suffix/prefix). lemmatisation akan menghasilkan kata baku (dictionary word) dan bergantung konteks. Lemma & stemming bisa jadi sama-sama menghasilkan suatu akar kata (root word). Misal : Melompat ==> lompat Mengapa melakukan Stemming & Lemmatisasi? Sering digunakan di IR (Information Retrieval) agar ketika seseorang mencari kata tertentu, maka seluruh kata yang terkait juga diikutsertakan. Misal: organize, organizes, and organizing  dan democracy, democratic, and democratization. Di Text Mining Stemming dan Lemmatisasi akan mengurangi dimensi (mengurangi variasi morphologi), yang terkadang akan meningkatkan akurasi. Tapi di IR efeknya malah berkebalikan: meningkatkan recall, tapi menurunkan akurasi [Link]. Contoh: kata operate, operating, operates, operation, operative, operatives, dan operational jika di stem menjadi operate, maka ketika seseorang mencari "operating system", maka entry seperti operational and research dan operative and dentistry akan muncul sebagai entry dengan relevansi yang cukup tinggi. Stemming tidak perlu "benar", hanya perlu konsisten. Sehingga memiliki berbagai variansi, (sebagian) contoh di NLTK:# Contoh Stemming di NLTK from nltk.stem.lancaster import LancasterStemmer from nltk.stem.porter import PorterStemmer from nltk.stem.snowball import SnowballStemmer T = 'presumably I would like to MultiPly my provision, saying tHat without crYing' print('Sentence: ',T) StemmerS = [LancasterStemmer, PorterStemmer, SnowballStemmer] Names = ['Lancaster', 'Porter', 'SnowBall'] for stemmer_name,stem in zip(Names,StemmerS): if stemmer_name == 'SnowBall': st = stem('english') else: st = stem() print(stemmer_name,': ',' '.join(st.stem(t) for t in T.split())) # perhatikan, kita tidak melakukan case normalization (lowercase) # Hasil stemming bisa tidak bermakna # Contoh Lemmatizer di NLTK from nltk.stem import WordNetLemmatizer lemmatizer = WordNetLemmatizer() T = "apples and Oranges are similar. boots and hippos aren't, don't you think?" print('Sentence: ', T) print('Lemmatize: ',' '.join(lemmatizer.lemmatize(t) for t in T.split())) # Lemma case sensitive. Dengan kata lain string harus diubah ke dalam bentuk huruf kecil (lower case) # Lemmatizer menggunakan informasi pos. "pos" (part-of-speech) akan dibahas di segmen berikutnya print(lemmatizer.lemmatize("better", pos="a")) # adjective print(lemmatizer.lemmatize("better", pos="v")) # verb # TextBlob Stemming & Lemmatizer from textblob import Word # Stemming print(Word('running').stem()) # menggunakan NLTK Porter stemmer # Lemmatizer print(Word('went').lemmatize('v')) # default Noun, plural akan menjadi singular dari akar katanya # Juga case sensitive # Spacy Lemmatizer English import spacy nlp = spacy.load("en_core_web_sm") E = "I am sure apples and oranges are similar" doc = nlp(E) for token in doc: print(token.text, token.lemma_) # Perhatikan huruf besar/kecilI -PRON- am be sure sure apples apple and and oranges orange are be similar similarSpacy "tidak" (bukan belum) support Stemming:[Image Source]# Lemmatizer dengan Sastrawi from Sastrawi.Stemmer.StemmerFactory import StemmerFactory stemmer = StemmerFactory().create_stemmer() I = "perayaan itu Berbarengan dengan saat kita bepergian ke Makassar" print(stemmer.stem(I)) print(stemmer.stem("Perayaan Bepergian Menyuarakan")) # Ada beberapa hal yang berbeda antara Sastrawi dan modul-modul diatas. # Apa sajakah?raya itu bareng dengan saat kita pergi ke makassar raya pergi suarafrom google.colab import files uploaded = files.upload()import necessary modules for data explorationimport numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inlineget the documentdf = pd.read_csv('50_Startups.csv') dflook for empty valuesdf.isnull().sum() # looks good to meTotal r&d expendtiture for the 50 companiesdf['R&D Spend'].sum().round(1)Toatal marketing expenditure for the 50 companiesdf['Marketing Spend'].sum().round(1)Most popular cities for the startupsdf['State'].value_counts()Total combined profit for the 50 companiesdf['Profit'].sum().round(1)highest R%D spenddf['R&D Spend'].max()Lowest R%D spend for 50 startupsdf['R&D Spend'].min()california datacalifornia = df[df['State'] == 'California'] californiaTotal profit for startups in californiacalifornia['Profit'].sum().round(1)which state has the highest profitiable companyhighest_profit = df['Profit'].max() df[df['Profit'] == highest_profit]which state has the highest marketing companyhighest_marketing = df['Marketing Spend'].max() df[df['Marketing Spend'] == highest_marketing]relationship between highest R%D spend and Profithighest_rd = df['R&D Spend'].max() Relation = df[df['R&D Spend'] == highest_rd]['Profit'] print(highest_rd, Relation)165349.2 0 192261.83 Name: Profit, dtype: float64profit visualization chartprofits = df['Profit'] profits.plot.line()Histogram chart visualizationprofits.plot.hist()R%D spendrd_spend = df['R&D Spend'] rd_spend.plot.line()pie chart state plotstates = df['State'].value_counts() states.plot.pie(figsize=(8, 8),autopct='%1.2f',startangle=90, textprops={'fontsize': 14})[Encryption keys - Travis CI](https://docs.travis-ci.com/user/encryption-keys/)from Crypto.PublicKey import RSA import base64 from github_settings import SSH_KEY_PASSWORD my_public_key = RSA.importKey( open('/Users/raymondyee/.ssh/id_rsa.pub', 'r').read()) my_private_key = RSA.importKey(open('/Users/raymondyee/.ssh/id_rsa','r').read(), passphrase=SSH_KEY_PASSWORD) message = "abcdefgh"converting between ssh and pemverify that my `id_rsa.pem` is actually equivalent to my `id_rsa.pub`print (my_public_key.exportKey(format='PEM')) print (open("/Users/raymondyee/.ssh/id_rsa.pem").read())[Python and cryptography with pycrypto | Laurent Luce's Blog](http://www.laurentluce.com/posts/python-and-cryptography-with-pycrypto/) How to match the```rubyBase64.encode64```from [travis.rb/repository.rb at dcc9f20535c811068c4ff9788ae9bd026a116351 · travis-ci/travis.rb](https://github.com/travis-ci/travis.rb/blob/dcc9f20535c811068c4ff9788ae9bd026a116351/lib/travis/client/repository.rbL17) This docs: [Module: Base64 (Ruby 2_2_0)](http://ruby-doc.org/stdlib-2.2.0/libdoc/base64/rdoc/Base64.htmlmethod-i-encode64):> Returns the Base64-encoded version of bin. This method complies with RFC 2045. Line feeds are added to every 60 encoded characters. pycrypto + my own id_rsa padding[Class: OpenSSL::PKey::RSA (Ruby 2_2_4)](http://ruby-doc.org/stdlib-2.2.4/libdoc/openssl/rdoc/OpenSSL/PKey/RSA.htmlmethod-i-private_encrypt):> Encrypt string with the public key. padding defaults to PKCS1_PADDING. The encrypted string output can be decrypted using private_decrypt.Also in the doc:> RSA is an asymmetric public key algorithm that has been formalized in RFC 3447.Look for how to do so in Python.possible values for padding (see source: [Ruby MRI/test/openssl/test_pkey_rsa.rb](http://rxr.whitequark.org/mri/source/test/openssl/test_pkey_rsa.rb)):* `OpenSSL::PKey::RSA::NO_PADDING`* `OpenSSL::PKey::RSA::PKCS1_PADDING`Looks like there is no standard library support in Ruby libs for Also: don't know whether `PKCS1_PADDING` means: pycrypto: Module PKCS1_v1_5[Crypto.Cipher.PKCS1_v1_5](https://www.dlitz.net/software/pycrypto/api/2.6/Crypto.Cipher.PKCS1_v1_5-module.html) pycrypto: Module PKCS1_OAEP[Crypto.Cipher.PKCS1_OAEP](https://www.dlitz.net/software/pycrypto/api/2.6/Crypto.Cipher.PKCS1_OAEP-module.html)from Crypto.PublicKey import RSA from Crypto.Cipher import PKCS1_v1_5 from Crypto.Cipher import PKCS1_OAEP from Crypto.Hash import SHA from Crypto import Random import base64 def nopadding_encrypt(message, key): ciphertext = key.encrypt(message, 0)[0] return base64.b64encode(ciphertext) def nopadding_decrypt(ciphertextb64, key): ciphertext = base64.b64decode(ciphertextb64) return key.decrypt(ciphertext) def pkcs1v15_encrypt(message, key): h = SHA.new(message) cipher = PKCS1_v1_5.new(key) ciphertext = cipher.encrypt(message+h.digest()) return base64.b64encode(ciphertext) def pkcs1v15_decrypt (ciphertextb64, key): dsize = SHA.digest_size sentinel = Random.new().read(15+dsize) # Let's assume that average data length is 15 cipher = PKCS1_v1_5.new(key) ciphertext = base64.b64decode(ciphertextb64) message = cipher.decrypt(ciphertext, sentinel) digest = SHA.new(message[:-dsize]).digest() print ("len(message): {} sentinel: {} len(digest):{} dsize: {}".format(len(message), sentinel, len(digest), dsize)) if digest==message[-dsize:]: # Note how we DO NOT look for the sentinel return message[:-dsize] else: raise Exception ('encryption was done incorrectly:{}'.format(message)) def pkcs1oaep_encrypt(message, key): cipher = PKCS1_OAEP.new(key) ciphertext = cipher.encrypt(message) return base64.b64encode(ciphertext) def pkcs1oaep_decrypt(ciphertextb64, key): cipher = PKCS1_OAEP.new(key) ciphertext = base64.b64decode(ciphertextb64) return cipher.decrypt(ciphertext) enc_data = nopadding_encrypt(message, my_public_key) print (enc_data, nopadding_decrypt (enc_data, my_private_key) ) enc_data = pkcs1v15_encrypt(message, my_public_key) print (enc_data, pkcs1v15_decrypt(enc_data, my_private_key )) enc_data = pkcs1oaep_encrypt(message, my_public_key) print (enc_data, pkcs1oaep_decrypt( enc_data, my_private_key )) ### try decrypting output from Ruby with pkcs1v15 ruby_output = """ Upw4QQcNptfvd6t00mVLZaLMd965DqiiNOYmRStkcr1eX/v3ETkTNIqkc8WG ajrTYM20rYw3wfcMIjbCKXBSouTYqrJ4H4Uom3BbOI11Ykmf3Lf20QhB5r9K YwDLol3bKSqbTTNXhPm2ALSjsX5tha4jkc4VooGAA6grMMcTmS9cGgCC0Gm5 oILJzzLb5WEEN2CiUk0JVvSvadYylDyuFou8iP6GVPpOrILDNHHZKb70irXb E846PrDg8x83fL3+OoYAtfup3fR2ZH2qVXvs4JAQqRH9ECQtUkinJ4sukKYU R/pULVPeWI/xgX0cQ3xxXg3V8m4IcqF1nTe8TkZ1RA== """.strip() assert base64.b64decode(ruby_output) == base64.b64decode(ruby_output.replace("\n","")) pkcs1v15_decrypt(ruby_output, my_private_key) pkcs1oaep_decrypt(ruby_output, my_private_key) %%bash echo -n 'abcdefgh' \ | openssl rsautl \ -encrypt \ -pubin -inkey ~/.ssh/id_rsa.pem \ > /Users/raymondyee/Downloads/cipher.txt pkcs1v15_decrypt(base64.b64encode(open("/Users/raymondyee/Downloads/cipher.txt", "rb").read()), my_private_key) %%bash cat /Users/raymondyee/Downloads/test_message.txt \ | base64 -D \ | openssl rsautl \ -decrypt \ -inkey ~/.ssh/id_rsa pkcs1oaep_decrypt(base64.b64encode(open("/Users/raymondyee/Downloads/cipher.txt", "rb").read()), my_private_key) %%bash # using openssl echo -n 'abcdefgh' | openssl rsautl -encrypt -pubin -inkey /Users/raymondyee/.ssh/id_rsa.pem | base64 openssl_output = """ """.strip() pkcs1v15_decrypt(openssl_output, my_private_key)python cryptography libraryBackground for the library:[The state of crypto in Python [LWN.net]](https://lwn.net/Articles/595790/)[RSA — Cryptography 1.3.dev1 documentation](https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/encryption)import base64 from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes digest = hashes.Hash(hashes.SHA256(), backend=default_backend()) digest.update(b"abc") digest.update(b"123") digest.finalize() from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.asymmetric import rsa private_key = rsa.generate_private_key( public_exponent=65537, key_size=2048, backend=default_backend() ) private_key from github_settings import SSH_KEY_PASSWORD from cryptography.hazmat.primitives import serialization with open("/Users/raymondyee/.ssh/id_rsa", "rb") as key_file: private_key = serialization.load_pem_private_key( key_file.read(), password=SSH_KEY_PASSWORD, backend=default_backend() ) from cryptography.hazmat.primitives import serialization from cryptography.hazmat.primitives.asymmetric import padding public_key = private_key.public_key() pem = public_key.public_bytes( encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo ) pem.splitlines() message = b"abcdefgh" #OAEP ciphertext = public_key.encrypt( message, padding.OAEP( mgf=padding.MGF1(algorithm=hashes.SHA1()), algorithm=hashes.SHA1(), label=None ) ) ciphertext message = b"abcdefgh" #PKCS1v15 ciphertext = public_key.encrypt( message, padding.PKCS1v15() ) ciphertext plaintext = private_key.decrypt( ciphertext, padding.PKCS1v15() ) plaintext == message private_key.decrypt( base64.b64decode(openssl_output), padding.PKCS1v15() ) private_key.decrypt( base64.b64decode(ruby_output), padding.PKCS1v15() )Set up deviceos.environ["CUDA_VISIBLE_DEVICES"] = "6" DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") NUM_GPU = torch.cuda.device_count() print("Using {} GPUs".format(torch.cuda.device_count()))Using 1 GPUsConfigurationclass Config(object): def __init__(self, **kwargs): self._homedir = ".." # Training Data path self._datapath = os.path.join( self._homedir, kwargs.get("datapath", "hymenoptera_data") ) self._target_classes = ['ants', 'bees'] self._target_class_to_idx = { "ants": 0, "bees": 1 } # Model backbone self._model_backbone = "resnet18" self._pretrain = True # Data Loader configs self._batch_size = kwargs.get("batch_size", 16) self._shuffle = kwargs.get("shuffle", True) self._num_worker = kwargs.get("num_worker", 0) # Optimization params self._num_epochs = kwargs.get("num_epochs", 25) self._learning_rate = kwargs.get("learning_rate", 0.001) self._momentum = kwargs.get("momentum", 0.9) self._lr_scheduler_dict = kwargs.get("lr_scheduler", { "__name__": "step_lr", "step_size": 7, "gamma": 0.1 }) # Output file self._snapshot_folder = os.path.join( self._homedir, kwargs.get("snapshot_folder", "snapshots") ) self._results_folder = os.path.join( self._homedir, kwargs.get("result_folder", "results") )Modelclass FineTuneModel(Config): def __init__(self, **kwargs): super().__init__(**kwargs) def get_model(self, num_labels): if self._model_backbone == "resnet18": model_ft = models.resnet18(pretrained=self._pretrain) num_ftrs = model_ft.fc.in_features model_ft.fc = nn.Linear(num_ftrs, num_labels) return model_ft def _num_total_params(self, _model): num_params = 0 for p in _model.parameters(): num_params += p.numel() return num_params def _num_trainable_params(self, _model): return sum(p.numel() for p in _model.parameters() if p.requires_grad)Inferenceclass ImageClassification(Config): def __init__(self, weight_path, gpu_number=0, **kwargs): super().__init__(**kwargs) # prepare model self._load_model_weights(weight_path) def _preprocess_data(self, image_path): inference_transforms = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) image_ = Image.open(image_path).convert('RGB') image_tensor = inference_transforms(image_).float().unsqueeze_(0) return image_tensor def _process_output(self, image_tensor): input_ = image_tensor.to(DEVICE) output_ = self.model(input_) raw_output = [ np.round(v, 4) for v in output_.data.cpu().numpy().tolist()[0] ] _, preds = torch.max(output_, 1) pred_index = preds.data.cpu().numpy()[0] pred_class = [ k for k, v in self._target_class_to_idx.items() if v == pred_index ][0] return { "predicted_class": pred_class, "raw_output": raw_output, "predicted_label": pred_index } def _load_model_weights(self, weight_path): print("Preparing model: {} ...".format(self._model_backbone)) self.model = FineTuneModel().get_model(len(self._target_classes)) print("Preparing model: mapping to devices...") self.model = nn.DataParallel(self.model) self.model.to(DEVICE) print("Loading weights: {} ...".format(weight_path)) checkpoint = torch.load(weight_path, map_location=DEVICE) self.model.load_state_dict(checkpoint["state_dict"]) self.model.to(DEVICE) print("Model is ready!") self.model.eval() def predict(self, image_path): image_tensor_ = self._preprocess_data(image_path) output_ = self._process_output(image_tensor_) output_.update({"image_path": image_path}) return output_ import random c = ImageClassification(weight_path="../results/best_resnet18_acc0.9477_checkpoint.pth.tar", gpu_number=[6]) for f in random.choices(glob.glob("../hymenoptera_data/val/*/*"), k=20): print(c.predict(f))Preparing model: resnet18 ... Preparing model: mapping to devices... Loading weights: ../results/best_resnet18_acc0.9477_checkpoint.pth.tar ... Model is ready! {'predicted_class': 'bees', 'raw_output': [-1.4259, 1.0546], 'predicted_label': 1, 'image_path': '../hymenoptera_data/val/bees/144098310_a4176fd54d.jpg'} {'predicted_class': 'bees', 'raw_output': [-2.235, 1.5869], 'predicted_label': 1, 'image_path': '../hymenoptera_data/val/bees/2841437312_789699c740.jpg'} {'predicted_class': 'ants', 'raw_output': [1.4502, -1.6709], 'predicted_label': 0, 'image_path': '../hymenoptera_data/val/ants/8124241_36b290d372.jpg'} {'predicted_class': 'ants', 'raw_output': [2.0823, -2.0385], 'predicted_label': 0, 'image_path': '../hymenoptera_data/val/ants/F.pergan.28(f).jpg'} {'predicted_class': 'ants', 'raw_output': [1.1002, -1.448], 'predicted_label': 0, 'image_path': '../hymenoptera_data/val/bees/54736755_c057723f64.jpg'} {'predicted_class': 'ants', 'raw_output': [0.2554, 0.248], 'predicted_label': 0,[...]kernel density estimatesns.kdeplot(df.query('price < 200').price)"true shape" of interval datadf[df['price'] < 200]['price'].value_counts().sort_index().plot.line()percentilesdf1 = df[df.variety.isin(df.variety.value_counts().head(5).index)] sns.boxplot( x='variety', y='points', data=df1 )[![imagenes/pythonista.png](imagenes/pythonista.png)](https://pythonista.io) Introducción a *NetworkX*.[*NetworkX*](https://networkx.github.io/) es una biblioteca especializada en el análisis de redes complejas.!pip install networkx import networkx as nx import matplotlib.pyplot as plt nodos = ['Juan', 'Emilio', 'Sandra', 'María', 'Ruth', 'Solovino'] ejes = [('Juan', 'Emilio'), ('Emilio','María'), ('Emilio', 'Ruth'), ('Ruth', 'Sandra')] grafo = nx.Graph() grafo.add_nodes_from(nodos) grafo.add_edges_from(ejes) plt.figure() nx.draw_networkx(grafo) plt.close()Create HTML plots with python* The purpose of this notebook is to demonstrate how to create a series of plots that can be used as part of a static webpage.* The html figures which you generate will be used in your static webpage* Source: https://plotly.com/python/linear-fits/ Take a look at the data Prior to running the next cell, you may need to install plotly in your environment. You can read about it [here](https://plotly.com/python/getting-started/) but the step is pretty simple: in a new cell, run the following code. ```! pip install plotly```# import the python library import plotly.express as px # import a dataset about tips at restaurants df = px.data.tips() df.head(3)Create a simple scatter plot# add changes by removing the # scatter_fig = px.scatter(df, x='total_bill', # y='tip' ) scatter_fig.show() # save your figure scatter_fig.write_html('docs/scatter_fig.html')Add size and color# add changes by removing the # time_fig = px.scatter(df, x="total_bill", y="tip", # color='time', # opacity=0.65, # size='size', # color_discrete_sequence=['green', 'orange'] ) time_fig.show() # save your figure time_fig.write_html('docs/time_fig.html')Create a regression plot# add changes by removing the # reg_fig = px.scatter(df, x='total_bill', y='tip', trendline='ols', # trendline_color_override='red' ) reg_fig.show() # save your figure reg_fig.write_html('docs/reg_fig.html')Try comparing two groups# add changes by removing the # smoker_fig = px.scatter(df, x="total_bill", y="tip", # color="smoker", trendline="ols" ) smoker_fig.show() # save your figure smoker_fig.write_html('docs/smoker_fig.html')Numpy IntroductionAccording to [Wikipedia](https://en.wikipedia.org/wiki/NumPy):>NumPy (pronounced /ˈnʌmpaɪ/ (NUM-py) or sometimes /ˈnʌmpi/ (NUM-pee)) is a library for the Python programming language, adding support for large, multi-dimensional arrays and matrices, along with a large collection of high-level mathematical functions to operate on these arrays. Let's get startedimport numpy as npDatatypes & AttributesNumpy's main datatype is nd-Array(n dimensional Array)arr1 = np.array([1,2,3]) arr1 type(arr1) arr2 = np.array([[1,2,3],[4.,5.,6.5]]) arr2 arr3 = np.array([[[1,2,3],[4,5,6],[7,8,9]],[[10,11,12],[13,14,15],[16,17,18]]]) arr3![](Images/AnatomyOfNumpyArray.PNG)arr1.shape arr2.shape arr3.shapearr1.ndim , arr2.ndim , arr3.ndim # Number of dimensionsarr1.dtype , arr2.dtype , arr3.dtype # type of dataarr1.size , arr2.size , arr3.size # total number of elements in the arraytype(arr1) , type(arr2) , type(arr3)# create a DataFrame from Numpy array import pandas as pd df = pd.DataFrame(arr2) dfCreating Arrayssample_array = np.array([1,2,3]) sample_arrayOnes()ones = np.ones((2,3)) # "shift + tab" inside the paranthesis to see the functions # description ones ones.dtype , type(ones)Zeros()zeros = np.zeros((2,3)) zeros range_array = np.arange(0,10,2) range_arrayrandom.randint()random_array = np.random.randint(0 , 10 , size = (3,5)) random_array random_array.size , random_array.shaperandom.random()np.random.random((5,3))random.rand()np.random.rand(5,3)random.seed()np.random.seed(seed = 0 ) # any seed keeps the array the same new_random_arr = np.random.randint( 10 , size = (5,3) ) new_random_arr np.random.seed(seed = 0 ) # any seed keeps the array the same new_random_arr = np.random.randint( 10 , size = (5,3) ) new_random_arr new_random_arr = np.random.randint( 10 , size = (5,3) ) new_random_arrThis means that random numbers are not EXACTLY RANDOM. They are psuedo random based on the amount of the seed. Viewing Matrices & Arrays# finding unique elements of an array np.unique(new_random_arr)arr3 arr3[0] # First two elements of each inner array arr3[:2,:2,:2]a4 = np.random.randint(10 , size = (2,3,4,5)) a4![](Images/ndimArraysAlignment.PNG)a4[:,:,:,:1]Manipulating and Compariung Arrays Arithmeticarr1 arr1 + np.ones(3) , arr1 - np.ones(3) arr2 , arr1 arr1*arr2> Manipulating array datas in Numpy is called Brodcasting[Broadcasting-TutorialsPoint](https://www.tutorialspoint.com/numpy/numpy_broadcasting.htm)## Search : how to reshape numpy array ? ## Search : how to make two numpy arrays compatible ? arr2/arr1 # Floor division removes the decimal points arr2//arr1 # All elements will go to the power of two arr2 ** 2 np.square(9) # Numpy has all mathematic opeartors and functions : # Ex : np.exp() , np.log() , np.sqrt() , ...AggregationMeaning : >the formation of a number of things into a cluster.listOfNumbers = [1,2,3,4] sum(listOfNumbers) np.sum(listOfNumbers) # We'd better use Numpy predefined functions and methods on Numpy arrays. # they are a lot faster. # created a 1000-element array. massive_array = np.random.random(1000) # magic function "timeit" returns the amount of time that each line of code # takes to execute thoroughly. %timeit sum(massive_array) # python's sum %timeit np.sum(massive_array) #Numpy's sum ## this might take some time # as we can see, Numpy's sum method is about 50 times faster than Python's sum. np.mean(arr2) # Avg of arr2 np.max(arr2) # max elements of arr2 np.min(arr2) # min elements of arr2 np.var(arr2) ## calculates the VarianceStandard Deviation & Variance Varinace (واریانس)high_var_Arr = np.array([1,100,200,300,4000,5000]) low_var_Arr = np.array([2,6,4,8,10]) np.var(high_var_Arr) , np.var(low_var_Arr)Deviation (انحراف از معیار)np.std(high_var_Arr),np.std(low_var_Arr) import matplotlib.pyplot as plt plt.hist(high_var_Arr) plt.show() plt.hist(low_var_Arr) plt.show()Reshape and Transpose Reshapingarr2 arr2.shape arr3 arr3.shape # we can't multiply these two arrays due to different shape. We must reshape them arr2.reshape(2,3,1).shape # Now we can multiply arr2 & arr3Transposearr2 arr2.T # tranpose operator is an Attribute :\Create CSV by Pythonindices = np.array(["Sat","Sun","Mon","Tue","Wed","Thur","Frid"]) peanut_coulmns = np.array(["Almond Butter" , "peanut Butter" , "Creamy Butter"]) salesAmounts = np.random.randint(20,size=(7,3)) weeklySales = pd.DataFrame(salesAmounts,index = indices , columns = peanut_coulmns ) weeklySales prices = np.array([10,8,12]) penaut_prices = pd.DataFrame(prices.reshape(1,3),index=["Price"],columns = peanut_coulmns) penaut_prices totalSales = prices.dot(salesAmounts) # shapes aren't aligned, let's transpose totalSales = prices.dot(salesAmounts.T) totalSales ## This part is incompleted ... Youa re supposed to add the weekly total sales # to the table as a column.Comaprison Operatorsarr1 arr2 ## Element wise checks wheter each element in left-hand side is greater than each element in Right-hand-side arr1>arr2 arr1 >= arr2 arr1 > 5 , arr1 < 5 arr1 == arr2 # you'll get the Same results with : arr2 == arr1Sorting Arraysrandom_array = np.random.randint(10,size=(3,5)) random_array # sorts each row in increasing form. np.sort(random_array) # Sorts the values but the indices will be replaced by the real value np.argsort(random_array) np.argsort(arr1) , np.sort(arr1) , arr1 # returns the index of matimum value np.argmax(arr1) random_array # Axis zero = horizontally np.argmax(random_array , axis = 0) # Axis one = vertically np.argmax(random_array , axis = 1)A Practical Example : Turn Images into arrays See the Image Below : ![](Images/panda.png)# import a library to read an image from matplotlib.image import imread as imr panda_img = imr("Images/panda.png") print(type(panda_img)) panda_img panda_img.size , panda_img.shape , panda_img.ndim # that's pretty big. # Image Properties : 3500 x 2330 # the numbers in this array are the RGB of the picture.Drive Mounterfrom google.colab import drive drive.mount('/content/drive')Mounted at /content/driveData Preprcessingfldr="/content/drive/My Drive/FYP/emotion/CK+48" import os files=os.listdir(fldr) print(files) Exp=['fear', 'contempt', 'happy', 'anger', 'surprise', 'disgust', 'sadness'] import cv2 from google.colab.patches import cv2_imshow i=0 last=[] images=[] labels=[] for fle in files: idx=Exp.index(fle) label=idx total=fldr+'/'+fle files_exp= os.listdir(total) for fle_2 in files_exp: file_main=total+'/'+fle_2 print(file_main+" "+str(label)) image= cv2.imread(file_main) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image= cv2.resize(image,(48,48)) images.append(image) labels.append(label) i+=1 last.append(i) import cv2 from google.colab.patches import cv2_imshow from google.colab.patches import cv2_imshow lastFearcv2_imshow(images[24]) cv2_imshow(images[40])Contemptcv2_imshow(images[139])Happycv2_imshow(images[349]) cv2_imshow(images[300]) import tensorflow as tf from sklearn.model_selection import train_test_split import numpy as np images_f=np.array(images) labels_f=np.array(labels) images_f_2=images_f/255 cv2_imshow(images[300]) images_f_2.shape num_of_classes=7 labels_encoded=tf.keras.utils.to_categorical(labels_f,num_classes=num_of_classes) X_train, X_test, Y_train, Y_test= train_test_split(images_f_2, labels_encoded,test_size=0.25)Modelfrom tensorflow.keras.layers import Dropout from tensorflow.keras.layers import Flatten,BatchNormalization from tensorflow.keras.layers import Dense, MaxPooling2D,Conv2D from tensorflow.keras.layers import Input,Activation,Add from tensorflow.keras.models import Model from tensorflow.keras.regularizers import l2 from tensorflow.keras.optimizers import Adam def Convolution(input_tensor,filters): x = Conv2D(filters=filters,kernel_size=(3, 3),padding = 'same',strides=(1, 1),kernel_regularizer=l2(0.001))(input_tensor) x = Dropout(0.1)(x) x= Activation('relu')(x) return x def model(input_shape): inputs = Input((input_shape)) conv_1= Convolution(inputs,32) maxp_1 = MaxPooling2D(pool_size = (2,2)) (conv_1) conv_2 = Convolution(maxp_1,64) maxp_2 = MaxPooling2D(pool_size = (2, 2)) (conv_2) conv_3 = Convolution(maxp_2,128) maxp_3 = MaxPooling2D(pool_size = (2, 2)) (conv_3) conv_4 = Convolution(maxp_3,256) maxp_4 = MaxPooling2D(pool_size = (2, 2)) (conv_4) flatten= Flatten() (maxp_4) dense_1= Dense(128,activation='relu')(flatten) drop_1=Dropout(0.2)(dense_1) output= Dense(7,activation="sigmoid")(drop_1) model = Model(inputs=[inputs], outputs=[output]) model.compile(loss="categorical_crossentropy", optimizer="Adam", metrics=["accuracy"]) return model Model=model(input_shape = (48,48,3)) Model.summary()Model: "functional_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_1 (InputLayer) [(None, 48, 48, 3)] 0 _________________________________________________________________ conv2d (Conv2D) (None, 48, 48, 32) 896 _________________________________________________________________ dropout (Dropout) (None, 48, 48, 32) 0 _________________________________________________________________ activation (Activation) (None, 48, 48, 32) 0 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 24, 24, 32) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 24, 24, 64) 18496 ______________________________________________________[...]Trainingfrom tensorflow.keras.callbacks import ModelCheckpoint fle_s='Emotion_detection.h5' checkpointer = ModelCheckpoint(fle_s, monitor='loss',verbose=1,save_best_only=True,save_weights_only=False, mode='auto',save_freq='epoch') callback_list=[checkpointer] History=Model.fit(X_train,Y_train,batch_size=32,validation_data=(X_test,Y_test),epochs=1000,callbacks=[callback_list])Epoch 1/1000 23/23 [==============================] - ETA: 0s - loss: 2.0967 - accuracy: 0.2095 Epoch 00001: loss improved from inf to 2.09673, saving model to Emotion_detection.h5 23/23 [==============================] - 1s 29ms/step - loss: 2.0967 - accuracy: 0.2095 - val_loss: 1.9929 - val_accuracy: 0.3699 Epoch 2/1000 18/23 [======================>.......] - ETA: 0s - loss: 1.9795 - accuracy: 0.2361 Epoch 00002: loss improved from 2.09673 to 1.95856, saving model to Emotion_detection.h5 23/23 [==============================] - 0s 13ms/step - loss: 1.9586 - accuracy: 0.2490 - val_loss: 1.8577 - val_accuracy: 0.2886 Epoch 3/1000 19/23 [=======================>......] - ETA: 0s - loss: 1.9301 - accuracy: 0.2599 Epoch 00003: loss improved from 1.95856 to 1.91335, saving model to Emotion_detection.h5 23/23 [==============================] - 0s 13ms/step - loss: 1.9133 - accuracy: 0.2680 - val_loss: 1.8104 - val_accuracy: 0.2886 Epoch 4/1000 19/23 [=======================>......] - ETA: [...]Evaluationscore = Model.evaluate(X_train, Y_train) score = Model.evaluate(X_test, Y_test) Pred=Model.predict(X_test) Pred Y_test len(Pred) def test_image(ind,images_f,images_f_2,Model): cv2_imshow(images_f[ind]) image_test=images_f_2[ind] print("Label actual: " + Exp[labels[ind]] ) pred_1=Model.predict(np.array([image_test])) #print(pred_1) pred_class=Exp[int(np.argmax(pred_1))] print("Predicted Label: "+ pred_class) test_image(980,images_f,images_f_2,Model) test_image(36,images_f,images_f_2,Model) test_image(122,images_f,images_f_2,Model) test_image(232,images_f,images_f_2,Model) test_image(647,images_f,images_f_2,Model) test_image(869,images_f,images_f_2,Model) test_image(502,images_f,images_f_2,Model) test_image(800,images_f,images_f_2,Model) import matplotlib.pyplot as plt plt.plot(History.history['loss']) plt.plot(History.history['val_loss']) plt.title('Model loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Train', 'Validation'], loc='upper left') plt.subplots_adjust(top=1.00, bottom=0.0, left=0.0, right=0.95, hspace=0.25, wspace=0.35) plt.plot(History.history['accuracy']) plt.plot(History.history['val_accuracy']) plt.title('Model accuracy') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Train', 'Validation'], loc='upper left') plt.subplots_adjust(top=1.00, bottom=0.0, left=0.0, right=0.95, hspace=0.25, wspace=0.35) from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report i=0 Y_test_l=[] Pred_l=[] while(iUser APIrdm = randint(0,1000) rdm subpath = "myorg{}".format(rdm) subpath_star = "{}/*".format(subpath) identity = IDENTITY identity2 = IDENTITY_BIS identities = [identity, identity2] pp(nxs.acls.fetch(subpath)) pp(nxs.acls.fetch(subpath, rev=2)) pp(nxs.acls.fetch(subpath, self=False)) pp(nxs.acls.fetch(subpath, rev=2, self=False)) pp(nxs.acls.list(subpath_star)) pp(nxs.acls.list(subpath_star, ancestors=True)) pp(nxs.acls.list(subpath_star, self=False)) pp(nxs.acls.list(subpath_star, ancestors=True, self=False)) pp(nxs.acls.replace(subpath, [["projects/read"]], [identity], rev=0)) pp(nxs.acls.fetch(subpath, self=False)) pp(nxs.acls.replace(subpath, [["projects/read"], ["resources/read"]], identities, rev=1)) pp(nxs.acls.fetch(subpath, self=False)) pp(nxs.acls.append(subpath, [["projects/write"]], [identity], rev=2)) pp(nxs.acls.fetch(subpath, self=False)) pp(nxs.acls.append(subpath, [["projects/create"], ["resources/write"]], identities, rev=3)) pp(nxs.acls.fetch(subpath, self=False)) pp(nxs.acls.subtract(subpath, [["projects/read"]], [identity], rev=4)) pp(nxs.acls.fetch(subpath, self=False)) pp(nxs.acls.subtract(subpath, [["projects/create"], ["resources/read"]], identities, rev=5)) pp(nxs.acls.fetch(subpath, self=False)) pp(nxs.acls.delete(subpath, rev=6)) pp(nxs.acls.fetch(subpath, self=False))Developer APIimport json import copy endpoint = "{}/acls".format(DEPLOYMENT) endpoint rdm2 = randint(0, 1000) rdm2 subpath2 = "myorg{}".format(rdm2) path = "{}/{}".format(endpoint, subpath2) path path_star = "{}/*".format(path) path_star acls = { "acl": [ { "permissions": [ "projects/read", ], "identity": IDENTITY }, ] } append_acls = copy.deepcopy(acls) append_acls["@type"] = "Append" append_acls["acl"][0]["permissions"] = ["projects/write"] subtract_acls = copy.deepcopy(acls) subtract_acls["@type"] = "Subtract" subtract_acls["acl"][0]["permissions"] = ["projects/read"] acls_list = { "acl": [ { "permissions": [ "projects/read", ], "identity": IDENTITY }, { "permissions": [ "resources/read", ], "identity": IDENTITY_BIS }, ] } append_acls_list = copy.deepcopy(acls_list) append_acls_list["@type"] = "Append" append_acls_list["acl"][0]["permissions"] = ["projects/create"] append_acls_list["acl"][1]["permissions"] = ["resources/write"] subtract_acls_list = copy.deepcopy(acls_list) subtract_acls_list["@type"] = "Subtract" subtract_acls_list["acl"][0]["permissions"] = ["projects/create"] subtract_acls_list["acl"][1]["permissions"] = ["resources/read"] pp(nxs.acls.fetch_(path)) pp(nxs.acls.fetch_(path, rev=2)) pp(nxs.acls.fetch_(path, self=False)) pp(nxs.acls.fetch_(path, rev=2, self=False)) pp(nxs.acls.list_(path_star)) pp(nxs.acls.list_(path_star, ancestors=True)) pp(nxs.acls.list_(path_star, self=False)) pp(nxs.acls.list_(path_star, ancestors=True, self=False)) pp(acls) pp(nxs.acls.replace_(path, acls, rev=0)) pp(nxs.acls.fetch_(path, self=False)) pp(acls_list) pp(nxs.acls.replace_(path, acls_list, rev=1)) pp(nxs.acls.fetch_(path, self=False)) pp(append_acls) pp(nxs.acls.append_(path, append_acls, rev=2)) pp(nxs.acls.fetch_(path, self=False)) pp(append_acls_list) pp(nxs.acls.append_(path, append_acls_list, rev=3)) pp(nxs.acls.fetch_(path, self=False)) pp(subtract_acls) pp(nxs.acls.subtract_(path, subtract_acls, rev=4)) pp(nxs.acls.fetch_(path, self=False)) pp(subtract_acls_list) pp(nxs.acls.subtract_(path, subtract_acls_list, rev=5)) pp(nxs.acls.fetch_(path, self=False)) pp(nxs.acls.delete_(path, rev=6)) pp(nxs.acls.fetch_(path, self=False))Datasetimport json import pandas as pd import numpy as np import pymysql from sqlalchemy import create_engine from sklearn import neighbors from sklearn.neighbors import NearestNeighbors pd.set_option('display.max_columns', 500) # Credentials with open("credentials.json") as f: credentials = json.loads(f.read()) host = credentials["host"] user = credentials["db_user"] password = credentials[""] db = credentials["db_name"] engine = create_engine(f"mysql+pymysql://{user}:{password}@{host}:3306/{db}") # Database call for bus data df = pd.read_sql_query('SELECT * FROM trips_2017 WHERE lineid = "46A" AND direction = 2', engine) df.head() # Replace missing actual time departure values with timetable values df.actualtime_dep.fillna(df.plannedtime_dep, inplace=True) df.head() # Remove rows with missing values for actual time arrival as we cannot safely assume these are as per timetable df = df[pd.notnull(df['actualtime_arr'])] df.head() # Create a new column for trip duration df['trip_duration'] = df['actualtime_arr'] - df['actualtime_dep'] df.head() # Create a new column with the hour of the day the trip took place df['actualtime_dep_H'] = round(df['actualtime_dep']/3600) df.head() # Hour of actual time arrival df['actualtime_arr_H'] = round(df['actualtime_arr']/3600) df.head() # Average hour of the day of the journey df['avg_H'] = (df['actualtime_dep_H'] + df['actualtime_arr_H']) / 2 df.head() df['avg_H'] = df['avg_H'].astype(int) df.head() # Creating column solely for the dates to correlate with the dates column on the historical weather data table df['time'] = df['timestamp'] + df['avg_H'] * 3600 df.time # Removing suppressed rows where suppressed=1.0 df = df.query('suppressed != 1.0') df.index = range(len(df)) # Creating columns from timestamp for further processing df['dayofweek'] = df['timestamp'] df['monthofyear'] = df['timestamp'] # Converting the unix time to datetime format df.dayofweek = pd.to_datetime(df['dayofweek'], unit='s') df.monthofyear = pd.to_datetime(df['monthofyear'], unit='s') # Converting datetime to name of weekday, and to name of month (in separate columns) df['dayofweek'] = df['dayofweek'].dt.weekday_name df['monthofyear'] = df['monthofyear'].dt.month # Creating dummy variables for weekday names and name of month df_dayofweek_dummies = pd.get_dummies(df['dayofweek']) # Removing rows not in the month of March df = df.query('monthofyear == 3') df df.shape df1 = pd.concat([df, df_dayofweek_dummies], axis=1, join_axes=[df.index]) df1 # Pull historical weather data df2 = pd.read_sql_query('SELECT * FROM DarkSky_historical_weather_data WHERE year = 2017', engine) df2.head() d = {'clear-day':'clear','clear-night':'clear','partly-cloudy-day':'partly-cloudy','partly-cloudy-night':'partly-cloudy'} df2 = df2.replace(d) df2.rename(columns={'day_of_week': 'dayofweek', 'month': 'monthofyear'}, inplace=True) df3 = pd.merge(df1, df2, on=['time']) df3.head() df3 = df3[['avg_H', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday', 'temp', 'precip_intensity','trip_duration']] # Trip duration is in seconds, convert to minutes and round to the nearest integer df3['trip_duration'] = round(df3['trip_duration']/60) df3['trip_duration'] = df3['trip_duration'].astype(int) df3['temp'] = round(df3['temp']) df3['temp'] = df3['temp'].astype(int) #df3 = df3[['avg_H', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday', 'temp','trip_duration']] df3.head()PreprocessingYou can see that our dataset has eleven columns. The task is to predict the trip duration (last column) based on the day of the week, the time of the day and the weather conditions (temperature and rain intesity). The next step is to split our dataset into attributes and labels.# Assign data from first four columns to X variable X = df3.iloc[:, 0:10] # Assign data from fifth column to y variable y = df3['trip_duration'] X.head() y.head()KNN Regressionfrom sklearn.neighbors import KNeighborsRegressor knn = KNeighborsRegressor(n_neighbors=2) knn.fit(X, y) # predict for 9 am on a Tuesday with 0.0 rain and 12 degrees print(round(knn.predict([[9, 0, 1, 0, 0, 0, 0, 0, 12, 0.0]])[0]),"minutes") pred = knn.predict(X) predictions = pd.DataFrame(pred) predictions.rename(columns={0:'estimated_time'}, inplace=True ) predictions['estimated_time'] = round(predictions['estimated_time']) predictions['estimated_time'] = predictions['estimated_time'].astype(int) predictions.head() from sklearn import metrics print("Accuracy: ", metrics.accuracy_score(y, predictions)) print("Confusion matrix: \n", metrics.confusion_matrix(y, predictions)) print("Classification report:\n ", metrics.classification_report(y, predictions)) metrics.mean_absolute_error(y,predictions)/predictions.mean() print(metrics.mean_absolute_error(y,predictions))Код для робота (менять не надо):import random import numpy as np import matplotlib.pyplot as plt class Robot(object): def __init__(self, length=20.0): """ Creates robot and initializes location/orientation to 0, 0, 0. """ self.x = 0.0 self.y = 0.0 self.orientation = 0.0 self.length = length self.steering_noise = 0.0 self.distance_noise = 0.0 self.steering_drift = 0.0 def set(self, x, y, orientation): """ Sets a robot coordinate. """ self.x = x self.y = y self.orientation = orientation % (2.0 * np.pi) def set_noise(self, steering_noise, distance_noise): """ Sets the noise parameters. """ # makes it possible to change the noise parameters # this is often useful in particle filters self.steering_noise = steering_noise self.distance_noise = distance_noise def set_steering_drift(self, drift): """ Sets the systematical steering drift parameter """ self.steering_drift = drift def move(self, steering, distance, tolerance=0.001, max_steering_angle=np.pi / 4.0): """ steering = front wheel steering angle, limited by max_steering_angle distance = total distance driven, most be non-negative """ if steering > max_steering_angle: steering = max_steering_angle if steering < -max_steering_angle: steering = -max_steering_angle if distance < 0.0: distance = 0.0 # apply noise steering2 = random.gauss(steering, self.steering_noise) distance2 = random.gauss(distance, self.distance_noise) # apply steering drift steering2 += self.steering_drift # Execute motion turn = np.tan(steering2) * distance2 / self.length if abs(turn) < tolerance: # approximate by straight line motion self.x += distance2 * np.cos(self.orientation) self.y += distance2 * np.sin(self.orientation) self.orientation = (self.orientation + turn) % (2.0 * np.pi) else: # approximate bicycle model for motion radius = distance2 / turn cx = self.x - (np.sin(self.orientation) * radius) cy = self.y + (np.cos(self.orientation) * radius) self.orientation = (self.orientation + turn) % (2.0 * np.pi) self.x = cx + (np.sin(self.orientation) * radius) self.y = cy - (np.cos(self.orientation) * radius) def __repr__(self): return '[x=%.5f y=%.5f orient=%.5f]' % (self.x, self.y, self.orientation)Добавьте вычисление параметра steer через PID controller в этой клетке:class PIDController: def __init__(self, tau_p, tau_d, tau_i, dt=0.1): self.tau_p, self.tau_d, self.tau_i = tau_p, tau_d, tau_i self.dt = dt self.error = None self.integral = 0.0 def __call__(self, y, target_y): error = target_y - y d_error = 0.0 if self.error is None else (error - self.error) / self.dt self.error = error self.integral += error * self.dt total = ( self.tau_p * error + self.tau_d * d_error + self.tau_i * self.integral ) return total def run(robot, tau_p, tau_d, tau_i, n=200, speed=1.0): x_trajectory = [] y_trajectory = [] controller = PIDController(tau_p, tau_d, tau_i) for i in range(n): cte = robot.y steer = controller(robot.y, 0.0) robot.move(steer, speed) x_trajectory.append(robot.x) y_trajectory.append(robot.y) return x_trajectory, y_trajectoryЗапус и отрисовка траектории, тут нужно подобрать оптимальные параметры PID (сейчас стоят 1, 1, 1).def plot(tau_p, tau_d, tau_i): robot = Robot() robot.set(0, 1, 0) x_trajectory, y_trajectory = run(robot, tau_p, tau_d, tau_i) plt.plot(x_trajectory, y_trajectory, 'g', label='PID controller') plt.plot(x_trajectory, np.zeros(len(x_trajectory)), 'r', label='reference') plt.legend() plt.show() plot(1, 1, 1) plot(1, 0, 1) plot(1, 1, 0) plot(1, 0.5, 0) # Seems like most optimal set of params.Firebase Import Census DataThis is a companion notebook for the new [Data Science Solutions](https://strtupsci.com) book. The code is explained in the book.import pandas as pd import numpy as np column_names = [ 'age', 'workclass', 'fnlwgt', 'education', 'education-num', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'salary'] train_df = pd.read_csv( 'data/aws/census/adult.data', header=None, names=column_names, sep=', ', engine='python') test_df = pd.read_csv( 'data/aws/census/adult.test', header=None, names=column_names, sep=', ', engine='python', skiprows=1) train_df.shape, test_df.shape train_df.head() train_df.to_json( orient='index', path_or_buf='data/firebase/census/census.json') test_df.to_json( orient='index', path_or_buf='data/firebase/census/census_test.json')Pointmass dynamics without noisefig, ax = plt.subplots(figsize=figsize) plot_trajectories(ax, dsr.sel(exp='pointmass'), focal_agent=focal_agent, exp='point mass', focal_color=vcolor.pointmass) fig.savefig(f'trajectories_pointmass.pdf', bbox_inches='tight')Quadcopter dynamics with noisefig, ax = plt.subplots(figsize=figsize) plot_trajectories(ax, dsr.sel(exp='quadcopter'), focal_agent=focal_agent, exp='quadcopter', focal_color=vcolor.quadcopter) fig.savefig(f'trajectories_quadcopter.pdf', bbox_inches='tight')Prepare Datasets for Training Define the correct data type for each column in the datasets *calendar.csv*# Correct data types for "calendar.csv" calendarDTypes = {"event_name_1": "category", "event_name_2": "category", "event_type_1": "category", "event_type_2": "category", "weekday": "category", 'wm_yr_wk': 'int16', "wday": "int16", "month": "int16", "year": "int16", "snap_CA": "float32", 'snap_TX': 'float32', 'snap_WI': 'float32' } # Read csv file calendar = pd.read_csv("../input/m5-forecasting-accuracy/calendar.csv", dtype = calendarDTypes) calendar["date"] = pd.to_datetime(calendar["date"]) # Transform categorical features into integers for col, colDType in calendarDTypes.items(): if colDType == "category": calendar[col] = calendar[col].cat.codes.astype("int16") calendar[col] -= calendar[col].min() calendar.head()*sell_prices.csv*# Correct data types for "sell_prices.csv" priceDTypes = {"store_id": "category", "item_id": "category", "wm_yr_wk": "int16", "sell_price":"float32"} # Read csv file prices = pd.read_csv("../input/m5-forecasting-accuracy/sell_prices.csv", dtype = priceDTypes) # Transform categorical features into integers for col, colDType in priceDTypes.items(): if colDType == "category": prices[col] = prices[col].cat.codes.astype("int16") prices[col] -= prices[col].min() prices.head()*sales_train_validation.csv*firstDay = 250 lastDay = 1913 # Use x sales days (columns) for training numCols = [f"d_{day}" for day in range(firstDay, lastDay+1)] # Define all categorical columns catCols = ['id', 'item_id', 'dept_id','store_id', 'cat_id', 'state_id'] # Define the correct data types for "sales_train_validation.csv" dtype = {numCol: "float32" for numCol in numCols} dtype.update({catCol: "category" for catCol in catCols if catCol != "id"}) # Read csv file ds = pd.read_csv("../input/m5-forecasting-accuracy/sales_train_validation.csv", usecols = catCols + numCols, dtype = dtype) # Transform categorical features into integers for col in catCols: if col != "id": ds[col] = ds[col].cat.codes.astype("int16") ds[col] -= ds[col].min() ds = pd.melt(ds, id_vars = catCols, value_vars = [col for col in ds.columns if col.startswith("d_")], var_name = "d", value_name = "sales") # Merge "ds" with "calendar" and "prices" dataframe ds = ds.merge(calendar, on = "d", copy = False) ds = ds.merge(prices, on = ["store_id", "item_id", "wm_yr_wk"], copy = False) ds.head()Create features Sales featuresdayLags = [7, 28] lagSalesCols = [f"lag_{dayLag}" for dayLag in dayLags] for dayLag, lagSalesCol in zip(dayLags, lagSalesCols): ds[lagSalesCol] = ds[["id","sales"]].groupby("id")["sales"].shift(dayLag) windows = [7, 28] for window in windows: for dayLag, lagSalesCol in zip(dayLags, lagSalesCols): ds[f"rmean_{dayLag}_{window}"] = ds[["id", lagSalesCol]].groupby("id")[lagSalesCol].transform(lambda x: x.rolling(window).mean())Date featuresdateFeatures = {"wday": "weekday", "week": "weekofyear", "month": "month", "quarter": "quarter", "year": "year", "mday": "day"} for featName, featFunc in dateFeatures.items(): if featName in ds.columns: ds[featName] = ds[featName].astype("int16") else: ds[featName] = getattr(ds["date"].dt, featFunc).astype("int16") ds.head() ds.info()Remove unnecessary rows and columns# Remove all rows with NaN value ds.dropna(inplace = True) # Define columns that need to be removed unusedCols = ["id", "date", "sales","d", "wm_yr_wk", "weekday"] trainCols = ds.columns[~ds.columns.isin(unusedCols)] X_train = ds[trainCols] y_train = ds["sales"]Split dataset into train and validation setnp.random.seed(777) # Define categorical features catFeats = ['item_id', 'dept_id','store_id', 'cat_id', 'state_id'] + \ ["event_name_1", "event_name_2", "event_type_1", "event_type_2"] validInds = np.random.choice(X_train.index.values, 2_000_000, replace = False) trainInds = np.setdiff1d(X_train.index.values, validInds) trainData = lgb.Dataset(X_train.loc[trainInds], label = y_train.loc[trainInds], categorical_feature = catFeats, free_raw_data = False) validData = lgb.Dataset(X_train.loc[validInds], label = y_train.loc[validInds], categorical_feature = catFeats, free_raw_data = False) del ds, X_train, y_train, validInds, trainInds ; gc.collect()Modelparams = { "objective" : "poisson", "metric" :"rmse", "force_row_wise" : True, "learning_rate" : 0.075, "sub_row" : 0.75, "bagging_freq" : 1, "lambda_l2" : 0.1, "metric": ["rmse"], 'verbosity': 1, 'num_iterations' : 1200, 'num_leaves': 128, "min_data_in_leaf": 100, } # Train LightGBM model m_lgb = lgb.train(params, trainData, valid_sets = [validData], verbose_eval = 20) # Save the model m_lgb.save_model("model.lgb")Predictions# Last day used for training trLast = 1913 # Maximum lag day maxLags = 57 # Create dataset for predictions def create_ds(): startDay = trLast - maxLags numCols = [f"d_{day}" for day in range(startDay, trLast + 1)] catCols = ['id', 'item_id', 'dept_id','store_id', 'cat_id', 'state_id'] dtype = {numCol:"float32" for numCol in numCols} dtype.update({catCol: "category" for catCol in catCols if catCol != "id"}) ds = pd.read_csv("../input/m5-forecasting-accuracy/sales_train_validation.csv", usecols = catCols + numCols, dtype = dtype) for col in catCols: if col != "id": ds[col] = ds[col].cat.codes.astype("int16") ds[col] -= ds[col].min() for day in range(trLast + 1, trLast+ 28 +1): ds[f"d_{day}"] = np.nan ds = pd.melt(ds, id_vars = catCols, value_vars = [col for col in ds.columns if col.startswith("d_")], var_name = "d", value_name = "sales") ds = ds.merge(calendar, on = "d", copy = False) ds = ds.merge(prices, on = ["store_id", "item_id", "wm_yr_wk"], copy = False) return ds def create_features(ds): dayLags = [7, 28] lagSalesCols = [f"lag_{dayLag}" for dayLag in dayLags] for dayLag, lagSalesCol in zip(dayLags, lagSalesCols): ds[lagSalesCol] = ds[["id","sales"]].groupby("id")["sales"].shift(dayLag) windows = [7, 28] for window in windows: for dayLag, lagSalesCol in zip(dayLags, lagSalesCols): ds[f"rmean_{dayLag}_{window}"] = ds[["id", lagSalesCol]].groupby("id")[lagSalesCol].transform(lambda x: x.rolling(window).mean()) dateFeatures = {"wday": "weekday", "week": "weekofyear", "month": "month", "quarter": "quarter", "year": "year", "mday": "day"} for featName, featFunc in dateFeatures.items(): if featName in ds.columns: ds[featName] = ds[featName].astype("int16") else: ds[featName] = getattr(ds["date"].dt, featFunc).astype("int16") fday = datetime(2016,4, 25) alphas = [1.028, 1.023, 1.018] weights = [1/len(alphas)] * len(alphas) sub = 0. for icount, (alpha, weight) in enumerate(zip(alphas, weights)): te = create_ds() cols = [f"F{i}" for i in range(1,29)] for tdelta in range(0, 28): day = fday + timedelta(days=tdelta) print(tdelta, day) tst = te[(te['date'] >= day - timedelta(days=maxLags)) & (te['date'] <= day)].copy() create_features(tst) tst = tst.loc[tst['date'] == day , trainCols] te.loc[te['date'] == day, "sales"] = alpha * m_lgb.predict(tst) # magic multiplier by kyakovlev te_sub = te.loc[te['date'] >= fday, ["id", "sales"]].copy() te_sub["F"] = [f"F{rank}" for rank in te_sub.groupby("id")["id"].cumcount()+1] te_sub = te_sub.set_index(["id", "F" ]).unstack()["sales"][cols].reset_index() te_sub.fillna(0., inplace = True) te_sub.sort_values("id", inplace = True) te_sub.reset_index(drop=True, inplace = True) te_sub.to_csv(f"submission_{icount}.csv",index=False) if icount == 0 : sub = te_sub sub[cols] *= weight else: sub[cols] += te_sub[cols]*weight print(icount, alpha, weight) sub2 = sub.copy() sub2["id"] = sub2["id"].str.replace("validation$", "evaluation") sub = pd.concat([sub, sub2], axis=0, sort=False) sub.to_csv("submission.csv",index=False)Benchmarking NLP Model with TensorRT NVIDIA Triton Inference Server and Inference Recommender on Amazon SageMaker 💡 Note Use Amazon SageMaker notebook instance to execute this notebook, this will not work on Amazon SageMaker Studio notebooksThis notebook demonstrates the use of Amazon SageMaker Inference recommender to perform custom load testing in order to performance fine tune the NLP BERT Model serving using NVIDIA Triton Serving on SageMaker.[Amazon SageMaker](https://aws.amazon.com/sagemaker/) is a fully managed service for data science and machine learning workflows. It helps data scientists and developers to prepare, build, train, and deploy high-quality ML models quickly by bringing together a broad set of capabilities purpose-built for ML.Now, [NVIDIA Triton Inference Server](https://github.com/triton-inference-server/server/) can be used to serve models for inference in Amazon SageMaker. Thanks to the new NVIDIA Triton container image, you can easily serve ML models and benefit from the performance optimizations, dynamic batching, and multi-framework support provided by NVIDIA Triton. Triton helps maximize the utilization of GPU and CPU, further lowering the cost of inference.SageMaker Inference Recommender is a new capability of SageMaker that reduces the time required to get machine learning (ML) models in production by automating performance benchmarking and load testing models across SageMaker ML instances. You can use Inference Recommender to deploy your model to a real-time inference endpoint that delivers the best performance at the lowest cost.This notebook was tested with the `conda_python3` kernel on an Amazon SageMaker notebook instance of type `ml.g4dn.8xlarge` with 50GB EBS volume. 💡 Pricing: Estimated cost to execute this SageMaker Studio notebook in us-east-1 is pricing page to estimate the cost of executing this notebook. The cost of instances that are provisioned by Amazon SageMaker Inference Recommender during custom load test is based on instance usage, there is no additional fee for SageMaker Inference Recommender. For example, estimated cost to run custom load test using ml.g4dn.xlarge ( SupportedRealtimeInferenceInstanceTypes ) in us-east-1 for 2 hours is < 1.5USD Instances provisioned during the load tests are automatically terminated by Inference recommender after the duration of the job. No explicit clean up required. Introduction to NVIDIA Triton Server[NVIDIA Triton Inference Server](https://github.com/triton-inference-server/server/) was developed specifically to enable scalable, cost-effective, and easy deployment of models in production. NVIDIA Triton Inference Server is open-source inference serving software that simplifies the inference serving process and provides high inference performance.Some key features of Triton are:* **Support for Multiple frameworks**: Triton can be used to deploy models from all major frameworks. Triton supports TensorFlow GraphDef, TensorFlow SavedModel, ONNX, PyTorch TorchScript, TensorRT, RAPIDS FIL for tree based models, and OpenVINO model formats. * **Model pipelines**: Triton model ensemble represents a pipeline of one or more models or pre/post processing logic and the connection of input and output tensors between them. A single inference request to an ensemble will trigger the execution of the entire pipeline.* **Concurrent model execution**: Multiple models (or multiple instances of the same model) can run simultaneously on the same GPU or on multiple GPUs for different model management needs.* **Dynamic batching**: For models that support batching, Triton has multiple built-in scheduling and batching algorithms that combine individual inference requests together to improve inference throughput. These scheduling and batching decisions are transparent to the client requesting inference.* **Diverse CPUs and GPUs**: The models can be executed on CPUs or GPUs for maximum flexibility and to support heterogeneous computing requirements.**Note**: This initial release of NVIDIA Triton on SageMaker will only support a single model. Future releases will have multi-model support. A minimal `config.pbtxt` configuration file is **required** in the model artifacts. This release doesn't support inferring the model config automatically. Install packagesInstalls the dependencies required to package the model and run inferences using Triton server.!pip install -qU pip awscli boto3 sagemaker transformers==4.9.1 !pip install nvidia-pyindex !pip install tritonclient[http]Imports# general imports import boto3 import json import os import re import copy import time from time import gmtime, strftime import numpy as np import datetime import pprint import pandas as pd # sagemaker import sagemaker from sagemaker import get_execution_role # triton import tritonclient.http as httpclient # transformers from transformers import BertTokenizer # custom CloudWatch from cloudwatch import get_endpoint_metricsSet VariablesWe set SageMaker variables and other variables below, also define the IAM role that will give Amazon SageMaker access to the model artifacts and the NVIDIA Triton ECR image.# sagemaker sess = boto3.Session() sm = sess.client("sagemaker") sagemaker_session = sagemaker.Session(boto_session=sess) role = get_execution_role() region = boto3.Session().region_name bucket = sagemaker.Session().default_bucket() prefix = "sagemaker/trt-triton-inference-recommender" # boto clients sm_client = boto3.client("sagemaker", region_name=region) cw_client = boto3.client("cloudwatch", region) account_id_map = { "us-east-1": "785573368785", "us-east-2": "007439368137", "us-west-1": "710691900526", "us-west-2": "301217895009", "eu-west-1": "802834080501", "eu-west-2": "205493899709", "eu-west-3": "254080097072", "eu-north-1": "601324751636", "eu-south-1": "966458181534", "eu-central-1": "746233611703", "ap-east-1": "110948597952", "ap-south-1": "763008648453", "ap-northeast-1": "941853720454", "ap-northeast-2": "151534178276", "ap-southeast-1": "324986816169", "ap-southeast-2": "355873309152", "cn-northwest-1": "474822919863", "cn-north-1": "472730292857", "sa-east-1": "756306329178", "ca-central-1": "464438896020", "me-south-1": "836785723513", "af-south-1": "774647643957", } if region not in account_id_map.keys(): raise ("UNSUPPORTED REGION") print(f"SageMaker Role: {role}") print(f"Region Name: {region}") # local variables ts = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime()) sm_model_name = "trt-triton-benchmark-model-" + ts model_package_group_name = "trt-triton-benchmark-model-group-" + ts advanced_job = "trt-triton-benchmark-advanced-job-" + ts print(f"SageMaker Model Name: {sm_model_name}") print(f"SageMaker Mode Package Name: {model_package_group_name}") print(f"SageMaker Advanced Job Name: {advanced_job}")Amazon SageMaker Triton Inference Server Deep Learning Container ImageLet's retrieve Amazon SageMaker NVIDIA Triton Inference server container image based on the account ID you are running this notebook. Set `triton_image_uri` based on the `account_id` and `region` informationbase = "amazonaws.com.cn" if region.startswith("cn-") else "amazonaws.com" triton_image_uri = "{account_id}.dkr.ecr.{region}.{base}/sagemaker-tritonserver:21.08-py3".format( account_id=account_id_map[region], region=region, base=base ) print(f"Triton Inference server DLC image: {triton_image_uri}")NLP Use caseDeploying and scaling NLP models in a production set up can be quite challenging. NLP models are often very large in size, containing millions of model parameters. Optimal model configurations are required to satisfy stringent performance and scalability of production grade NLP applicationsIn this notebook, we will benchmark a NLP use case using SageMaker Triton inference server and recommend performance tuning optimizations for the below NLP profile. We will use a large pre-trained transformer based `bert-large-uncased` model which has about 336 million model parameters. The input sentence used for the binary classification model will be padded and truncated to a maximum input sequence length 512 tokens. The inference load test will simulate to achieve 500 TPS (30000 maximum invocations per minute) and model latency of < 0.5 seconds (500 milliseconds) NVIDIA Triton Setup with Amazon SageMaker1. We will use this script [generate_models.sh](./workspace/generate_models.sh) to generate the TensorRT plan to be used with NVIDIA Triton inference server.2. The script load the pre-trained `bert_large_uncased` model and saving it ONNX format can be found in this [onnx_exporter.py](./workspace/onnx_exporter.py)3. Pre-trained model is loaded in torchscript format and model artifacts are saved used in onnx exporter(model.onnx)4. trtexec is a tool to quickly utilize TensorRT without having to develop your own application. The trtexec tool has three main purposes: - benchmarking networks on random or user-provided input data. - generating serialized engines from models. - generating a serialized timing cache from the builder.5. After trtexec execution is complete, model plan file(model_bs16.plan) is generated. This file will be used as model artifact by Triton6. We used the pre-configured `config.pbtxt` file provided with this repo to specify model [configuration](https://github.com/triton-inference-server/server/blob/main/docs/model_configuration.md) which Triton uses to load the model. 7. We tar the model directory and upload it to s3 to later create a [SageMaker Model](https://sagemaker.readthedocs.io/en/stable/api/inference/model.html).**Note**: Amazon SageMaker expects the model tarball file to have a top level directory with the same name as the model defined in the `config.pbtxt`. Below is the sample model directory structure```bert├── 1│ └── model.plan└── config.pbtxt``` 💡 Caution TensorRT (TRT) is hardware-specific when it comes to compiling models for performance optimization. Therefore, the following cell should be run in as many different GPU-backed instance types as will be used in the load test. A separate load test should be run for each instance family (like g4, p3, etc..), so as to not try to load a model artifact that was compiled for one type of GPU onto another type of GPU.This step is expected to run for ~60 minutes.!docker run --gpus=all --rm -it \ -v `pwd`/workspace:/workspace nvcr.io/nvidia/pytorch:21.08-py3 \ /bin/bash generate_models.shThe script saves the model in this [workspace](./workspace/) directory Model Configuration- Each model in a model repository must include a model configuration that provides required and optional information about the model. Typically, this configuration is provided in a `config.pbtxt` file specified as `ModelConfig protobuf`. - The model configuration name property is optional. If the name of the model is not specified in the configuration it is assumed to be the same as the model repository directory containing the model. If name is specified it must match the name of the model repository directory containing the model- The `max_batch_size` property indicates the maximum batch size that the model supports for the types of batching that can be exploited by Triton. If the model's batch dimension is the first dimension, and all inputs and outputs to the model have this batch dimension, then Triton can use its dynamic batcher or sequence batcher to automatically use batching with the model. In this case `max_batch_size` should be set to a value greater-or-equal-to 1 that indicates the maximum batch size that Triton should use with the model- Each model input and output must specify a name, datatype, and shape. The name specified for an input or output tensor must match the name expected by the model. The below is the baseline configuration for PyTorch model!mkdir -p triton-serve-trt/bert/ %%writefile triton-serve-trt/bert/config.pbtxt name: "bert" platform: "tensorrt_plan" max_batch_size: 16 input [ { name: "token_ids" data_type: TYPE_INT32 dims: [512] }, { name: "attn_mask" data_type: TYPE_INT32 dims: [512] } ] output [ { name: "output" data_type: TYPE_FP32 dims: [512, 1024] }, { name: "3194" data_type: TYPE_FP32 dims: [1024] } ]You can find other model configurations that are used in the benchmarking exercise in this [workspace](./workspace) directory.1. config-dg.pbtxt (Dynamic batching enabled)2. config-ig.pbtxt (Multiple instance group)3. config-db-ig.pbtxt (Dynamic batching, Multiple instance group enabled)To execute benchmark with different model configurations, copy above files with `config.pbtxt` name in the model repository and reupload the model tar files. We will copy the model.pt file and create model tar file to be uploaded to S3. The model archive tar file will be used by Triton Inference server!mkdir -p triton-serve-trt/bert/1/ !cp workspace/model_bs16.plan triton-serve-trt/bert/1/model.plan !tar -C triton-serve-trt/ -czf model.tar.gz bert model_uri = sagemaker_session.upload_data(path="model.tar.gz", key_prefix="triton-serve-trt")Create Amazon SageMaker Real Time EndpointWe start off by creating a [sagemaker model](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateModel.html) from the model files we uploaded to s3 in the previous step.In this step we also provide an additional Environment Variable i.e. `SAGEMAKER_TRITON_DEFAULT_MODEL_NAME` which specifies the name of the model to be loaded by Triton. **The value of this key should match the folder name in the model package uploaded to s3**. This variable is optional in case of a single model. In case of ensemble models, this key **has to be** specified for Triton to startup in SageMaker.Additionally, customers can set `SAGEMAKER_TRITON_BUFFER_MANAGER_THREAD_COUNT` and `SAGEMAKER_TRITON_THREAD_COUNT` for optimizing the thread counts.*Note*: The current release of Triton (21.08-py3) on SageMaker doesn't support running instances of different models on the same server, except in case of [ensembles](https://github.com/triton-inference-server/server/blob/main/docs/architecture.mdensemble-models). Only multiple model instances of the same model are supported, which can be specified under the [instance-groups](https://github.com/triton-inference-server/server/blob/main/docs/model_configuration.mdinstance-groups) section of the config.pbtxt file. Create payload Create payload in JSON format and upload it on S3. This will be used by Inference Recommender to run the custom load test.def tokenize_text(text): enc = BertTokenizer.from_pretrained("bert-large-uncased") encoded_text = enc(text, padding="max_length", max_length=512, truncation=True) return encoded_text["input_ids"], encoded_text["attention_mask"]If you want to change the payload (Token Length), below are the changes -1. Change the JSON with shape reflecting the right token length below2. Change the tokenize_text method to reflect the token length3. Change the config.pbtxt the triton* folder to reflect the input id and attention mask length.text_triton = """ Create payload JSON and upload it on S3. This will be used by Inference Recommender to run the load test. """ input_ids, attention_mask = tokenize_text(text_triton) payload = { "inputs": [ {"name": "token_ids", "shape": [1, 512], "datatype": "INT32", "data": input_ids}, {"name": "attn_mask", "shape": [1, 512], "datatype": "INT32", "data": attention_mask}, ] } print(f"Sample payload to be used with Inference Recommender") print(payload) payload_location = "./sample-payload/" if not os.path.exists(payload_location): os.makedirs(payload_location) print(f"Directory Created {payload_location}") else: print(f"Directory already exists {payload_location}") payload_archive_name = "payload.tar.gz" f = open(payload_location + "request.json", "w") json.dump(payload, f) f.close() !cd ./sample-payload/ && tar czvf ../payload.tar.gz * %%time sample_payload_url = sagemaker.Session().upload_data( payload_archive_name, bucket=bucket, key_prefix=prefix + "/inference" ) model_archive_name = "model.tar.gz" model_url = sagemaker.Session().upload_data( model_archive_name, bucket=bucket, key_prefix=prefix + "/model" ) print(f"Sample Payload location in S3: {sample_payload_url}") print(f"Model archive location: {model_url}")Amazon SageMaker Inference Recommender set upSet the Domain, Task, Framework, version and Model for Inference Recommender Job.ml_domain = "NATURAL_LANGUAGE_PROCESSING" ml_task = "FILL_MASK" ml_framework = "PYTORCH" framework_version = "1.6.0" model = "bert-base-uncased"Create the Triton Container Dictionary object and Model Package group for Inference recommender Jobcontainer = { "Image": triton_image_uri, "ModelDataUrl": model_url, "NearestModelName": model, "Framework": ml_framework, "Environment": {"SAGEMAKER_TRITON_DEFAULT_MODEL_NAME": "bert"}, } model_pacakge_group_response = sm_client.create_model_package_group( ModelPackageGroupName=str(model_package_group_name), ModelPackageGroupDescription="BERT large uncased Model group for Triton Serving", ) print(f"Model Registry package group: {model_pacakge_group_response}")Amazon SageMaker model registry model package with domain, task and Inference container specification information. Specify the list of supported inference instance types in `SupportedRealtimeInferenceInstanceTypes` parameter. Also, define the `ContentType` and MIME type informationmodel_package_version_response = sm_client.create_model_package( ModelPackageGroupName=str(model_package_group_name), ModelPackageDescription="BERT large uncased Model group for Triton Serving", Domain=ml_domain, Task=ml_task, SamplePayloadUrl=sample_payload_url, InferenceSpecification={ "Containers": [container], "SupportedRealtimeInferenceInstanceTypes": [ "ml.g4dn.4xlarge", "ml.g4dn.8xlarge", "ml.g4dn.16xlarge", "ml.g4dn.12xlarge", "ml.g4dn.xlarge", "ml.g4dn.2xlarge", ], "SupportedContentTypes": ["application/octet-stream"], "SupportedResponseMIMETypes": ["application/json"], }, )Amazon SageMaker Inference Recommender Custom Load TestCreate Custom Inference Recommender Job for Triton Container serving BERT Model with 512 Token lengthUse the `create_inference_recommendations_job` to create an Inference Recommender load test and specify below parameters- Specify Advanced for the JobType field and provide:- A job name for your load test (JobName). - The Amazon Resource Name (ARN) of an IAM role that enables Inference Recommender to perform tasks on your behalf.- A traffic pattern of the load test (TrafficPattern) - Initial number of users = 2 - Spawn Rate = 3 (creates 3 new users every 3 minutes for a duration of 15 minutes)- An endpoint configuration dictionary (InputConfig) where you specify an AWS instance type against which to run benchmarks- StoppingConditions (Inference recommender would adjust the initial number of instances to satisfy below stopping conditions) - MaxInvocations is set to 30000 - ModelLatencyThresholds p95 threshold for 500 ms 💡 ResourceLimitExceeded Exception Please make sure you have access to the requested instance types in your AWS account. You may receive a similar exception if a certain instance type resource limits are inadequate "INVALID_INPUT : 1. An error occurred (ResourceLimitExceeded) when calling the CreateEndpoint operation: The account-level service limit 'ml.g4dn.12xlarge for endpoint usage' is 0 Instances, with current utilization of 0 Instances and a request delta of 1 Instances. Please contact AWS support to request an increase for this limit"advanced_response = sm_client.create_inference_recommendations_job( JobName=advanced_job, JobDescription="nlp triton Inference Advanced Recommender Job", JobType="Advanced", RoleArn=role, InputConfig={ "ModelPackageVersionArn": model_package_version_response["ModelPackageArn"], "JobDurationInSeconds": 7200, "EndpointConfigurations": [{"InstanceType": "ml.g4dn.12xlarge"}], "TrafficPattern": { "TrafficType": "PHASES", "Phases": [ { "InitialNumberOfUsers": 2, "SpawnRate": 3, "DurationInSeconds": 900, }, # simulating 50 users, 2 initial and 3 new users every minute for 16 minutes ], # second phase, we will strt with 50 users, steady traffic for 5 minutes }, "ResourceLimit": {"MaxNumberOfTests": 10, "MaxParallelOfTests": 5}, }, StoppingConditions={ "MaxInvocations": 30000, "ModelLatencyThresholds": [{"Percentile": "P95", "ValueInMilliseconds": 500}], }, ) print(advanced_response)Let's get the inference recommender job details using `describe_inference_recommendations_job` boto3 API%%time ended = False while not ended: inference_recommender_job = sm_client.describe_inference_recommendations_job( JobName=str(advanced_job) ) if inference_recommender_job["Status"] in ["COMPLETED", "STOPPED", "FAILED"]: print(f"Inference recommender job status: {inference_recommender_job['Status']} ") ended = True else: print("Inference recommender job in progress") time.sleep(300) if inference_recommender_job["Status"] == "FAILED": print("Inference recommender job failed ") print("Failed Reason: {}".inference_recommender_job["FailedReason"]) else: print("Inference recommender job completed")Visualize CloudWatch Metrics Use `get_endpoint_metrics` helper functions, visualize the CloudWatch metrics. This will provide detailed overview of resource usage during the load test. Metrics such GPU Memory utilization, Invocations and Model Latency metrics will allow to tweak NVIDIA Triton model configuration to improve application performance.job_name = advanced_response["JobArn"].split("/")[-1] df_cw = get_endpoint_metrics(sm_client, cw_client, region, job_name, include_plots=True) data = [ {**x["EndpointConfiguration"], **x["ModelConfiguration"], **x["Metrics"]} for x in inference_recommender_job["InferenceRecommendations"] ] df = pd.DataFrame(data) df.drop("VariantName", inplace=True, axis=1) pd.set_option("max_colwidth", 400)__Employee Performance Analysis__ __INX Future Inc.__ __Data Processing__# general purpose libraries import os from dotenv import load_dotenv # data loading and wrangling libraries for EDA import pandas as pd import numpy as np # load the dot env file that contains the path to data file for data privacy dotenv_path = os.getcwd()+'\\local.env' load_dotenv(dotenv_path=dotenv_path) import warnings warnings.filterwarnings('ignore') df = pd.read_excel(os.getenv('data')) df.head()Data distribution of Numerical featuresdf.describe() df.info() RangeIndex: 1200 entries, 0 to 1199 Data columns (total 28 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 EmpNumber 1200 non-null object 1 Age 1200 non-null int64 2 Gender 1200 non-null object 3 EducationBackground 1200 non-null object 4 MaritalStatus 1200 non-null object 5 EmpDepartment 1200 non-null object 6 EmpJobRole 1200 non-null object 7 BusinessTravelFrequency 1200 non-null object 8 DistanceFromHome 1200 non-null int64 9 EmpEducationLevel 1200 non-null int64 10 EmpEnvironmentSatisfaction 1200 non-null int64 11 EmpHourlyRate 1200 non-null int64 12 EmpJobInvolvement 1200 non-null int64 13 EmpJobLevel [...]Data Cleaningdf.isna().sum() df.isnull().sum() for i in df: if df[i].dtype in ['object', 'bool']: print(f"________{i}_______") print(df[i].value_counts(), end="\n\n")________EmpNumber_______ E100338 1 E1001201 1 E100708 1 E100279 1 E1002015 1 .. E1001758 1 E1001869 1 E1001840 1 E1001679 1 E1001484 1 Name: EmpNumber, Length: 1200, dtype: int64 ________Gender_______ Male 725 Female 475 Name: Gender, dtype: int64 ________EducationBackground_______ Life Sciences 492 Medical 384 Marketing 137 Technical Degree 100 Other 66 Human Resources 21 Name: EducationBackground, dtype: int64 ________MaritalStatus_______ Married 548 Single 384 Divorced 268 Name: MaritalStatus, dtype: int64 ________EmpDepartment_______ Sales 373 Development 361 Research & Development 343 Human Resources 54 Finance 49 Data Science 20 Name: EmpDepartment, dtype: int64 ________EmpJobRole_______ Sales Executive 270 Developer 236 Manager R&D [...]Data preparation conclusion1. Detect unknown, empty or null values - result : There are no null, empty or unknown values in the dataset2. Unique features - result : The EmpNumber is a unique feature as it contains each employees Id and thus it bears no predictive valuedf.drop('EmpNumber', axis=1, inplace=True) df.to_excel('INX_future_inc_employee_cleaned_dataset.xls')Importer de librairies Spark nécessairesfrom pyspark.sql.functions import * from pyspark.sql.types import * from pyspark.sql.window import Window %cd data #%ls from pyspark.context import SparkContext from pyspark.sql.session import SparkSession sc = SparkContext.getOrCreate() spark = SparkSession(sc)importer nos données en indiquant le path et utiliser read_csv Interpretation de la base de donnéesdata = spark.read.csv("/Users/garbamoussa/Desktop/Python_ML/SparkML-master/data/2010-12-01.csv",header = 'True',inferSchema='True') spark.version data.show() data.printSchema() data.select("InvoiceNo").show() data.select(data['InvoiceNo'], data['UnitPrice'] + 1).show()+---------+-----------------+ |InvoiceNo| (UnitPrice + 1)| +---------+-----------------+ | 536365| 3.55| | 536365|4.390000000000001| | 536365| 3.75| | 536365|4.390000000000001| | 536365|4.390000000000001| | 536365| 8.65| | 536365| 5.25| | 536366| 2.85| | 536366| 2.85| | 536367| 2.69| | 536367| 3.1| | 536367| 3.1| | 536367| 4.75| | 536367| 2.65| | 536367| 5.25| | 536367| 5.95| | 536367| 10.95| | 536367| 6.95| | 536367| 6.95| | 536367| 8.95| +---------+-----------------+ only showing top 20 rowsTotal number of transactions in bureau datprint(data.count()) data.filter(data['UnitPrice'] > 5).show() data.groupBy("Country").count().show() data.groupBy("Quantity").count().show() data.groupBy("device_type").count().show() data.createOrReplaceTempView("Dataset") sqlDF = spark.sql("SELECT * FROM Dataset") sqlDF.show() data.show(n=2, truncate=False, vertical=True) #print(data.toPandas()) click = data.filter(col('click')==1).count() no_click = data.filter(col('click')==0).count() print(click/(click+ no_click)*100) #data.filter(col('click') == 5748394).count().show() data.filter(data['click'] == 5748394).show() from pyspark.sql import SparkSession from pyspark.ml import Pipeline from pyspark.sql.functions import mean,col,split, col, regexp_extract, when, lit from pyspark.ml.feature import StringIndexer from pyspark.ml.feature import VectorAssembler from pyspark.ml.evaluation import MulticlassClassificationEvaluator from pyspark.ml.feature import QuantileDiscretizer campaign_count = data.count() print(campaign_count) data.describe().show() type(data) data.select("click","buyer_bid","geo_city").show() data.groupBy("click").count().show() data.groupBy("buyer_bid").count().show() gropuBy_output = data.groupBy("click").count() display(gropuBy_output) #data.na.drop(subset=["buyer_id"]) #data.groupBy("click","buyer_id").count().show() ## Créer une fonction permettant d'afficher les colonnes ayant de valeurs manquantes et le nombre de valeurs manquantes def null_value_count(df): null_columns_counts = [] numRows = df.count() for k in df.columns: nullRows = df.where(col(k).isNull()).count() if(nullRows > 0): temp = k,nullRows null_columns_counts.append(temp) return(null_columns_counts) null_columns_count_list = null_value_count(data) spark.createDataFrame(null_columns_count_list, ['Column_With_Null_Value', 'Null_Values_Count']).show() #spark.sql("SELECT * FROM global_temp.dataset").show() data.limit(2).toPandas()Feature 1 : Numer of past loans per custermerdata_100 = data.limit(100) # limiter le nombre à afficher à 100 buyerbid_par_click = data_100.select('click', 'buyer_bid').groupBy('click').count().withColumnRenamed("count", "data_buyer_bid_COUNT") data_100 = data_100.join(buyerbid_par_click, ['click'],how ='left') print((data_100.count(), len(data_100.columns))) mean_buyer_bid = data.select(mean('buyer_bid')).collect()[0][0] print(mean_buyer_bid) data.select("creative_freq").show() data.select("buyer_bid").distinct().show() data.select("click").distinct().show() #data.groupby('click').avg('buyer_id').collect() import pandas_profiling import numpy as np from pathlib import Path data1 = data.limit(100).toPandas() profile_data = pandas_profiling.ProfileReport(data1) profile_data feature = VectorAssembler(inputCols=data.columns[1:],outputCol="features") feature_vector= feature.transform(data) (trainingData, testData) = feature_vector.randomSplit([0.8, 0.2],seed = 11) from pyspark.ml.classification import LogisticRegression lr = LogisticRegression(labelCol="click", featuresCol="features") #Training algo lrModel = lr.fit(trainingData) lr_prediction = lrModel.transform(testData) lr_prediction.select("prediction", "click", "features").show() evaluator = MulticlassClassificationEvaluator(labelCol="click", predictionCol="prediction", metricName="accuracy") data.createOrReplaceTempView("data1") from pyspark.sql.functions import max #data.select(max("count")).take(1) #maxSql = spark.sql("""SELECT DEST_buyer_id, sum(count) as buyer_total #FROM data1 #GROUP BY DEST_buyer_id #ORDER BY sum(count) DESC #LIMIT 5""") from pyspark.sql.functions import max data1\ .groupBy("DEST_buyer_id")\ .sum(count)\ .withColumnRenamed("sum(count)", "buyer_total")\ .sort(desc("buyer_total"))\ .limit(5)\ .show() from pyspark.sql.functions import sum df = spark.createDataFrame(data1,["count","buyer_id"]) df.show() from pyspark.sql.functions import sum df = spark.createDataFrame(data1,["count","click"]) df.show() total = data.groupBy().sum() total.show()+------------------+-----------------+---------------+---------------------+------------+-------------------------+-----------------------+----------------+----------------+------------------+-----------------+------------+-------------+----------------+--------------------+----------+ |sum(fold_position)| sum(buyer_bid)|sum(geo_region)|sum(operating_system)|sum(browser)|sum(advertiser_frequency)|sum(advertiser_recency)|sum(campaign_id)|sum(creative_id)|sum(creative_freq)|sum(creative_rec)|sum(geo_dma)|sum(geo_city)|sum(device_type)|sum(geo_postal_code)|sum(click)| +------------------+-----------------+---------------+---------------------+------------+-------------------------+-----------------------+----------------+----------------+------------------+-----------------+------------+-------------+----------------+--------------------+----------+ | 486228|8267747.191345905| 3420869| 29748876| 4271812| 777599| 1299080736|[...]Aliases Although member values are considered unique in enumerations, we can still define multiple member names with the same value. But they do not create different members! They are, in fact, considered aliases of each other, with the first member becoming the "master" member. Let's see a simple example of this first:import enum class NumSides(enum.Enum): Triangle = 3 Rectangle = 4 Square = 4 Rhombus = 4As you can see we have two members with different names (names must **always** be unique), but with the **same** value. However, the `Square` and `Rhombus` members are considered **aliases** of the `Rectangle` member since `Rectangle` is defined first. This means that `Rectangle` and `Square` are actually considered the **same** member:NumSides.Rectangle is NumSides.SquareAnd of course aliases are equal to each other too:NumSides.Square is NumSides.RhombusAliases can be referenced just like an ordinary member, and are considered *contained* in the enumeration:NumSides.Square in NumSidesAnd when we look up the member, by value:NumSides(4)we always get the "master" back. Same holds when when looking up by key:NumSides['Square']When we iterate an enumeration that contains aliases, none of the aliases are returned in the iteration:list(NumSides)The only way to get all the members, including aliases, is to use the `__members__` property:NumSides.__members__Notice how the aliases are treated. Although the keys in the mapping proxy are different, the object they point to are all the "master" member. Example There are times when the ability to define these aliases can be useful. Let's say you have to deal with statuses that are returned as strings from different systems. These systems may not always define exactly the same strings to mean the same thing (maybe they were developed independently). In a case like this, being able to create aliases could be useful to bring uniformity to our own code. Let's say that the statuses from system 1 are: `ready, busy, finished_no_error, finished_with_errors` And for system 2 we have correspondingly: `ready, processing, ran_ok, errored` And in our own system we might want the statuses: `ready, running, ok, errors` In other words we have:```Us System 1 System 2-------------------------------------------ready ready readyrunning busy processingok finished_no_error ran_okerrors finished_with_errors errored``` We can the easily achieve this using this class with aliases:class Status(enum.Enum): ready = 'ready' running = 'running' busy = 'running' processing = 'running' ok = 'ok' finished_no_error = 'ok' ran_ok = 'ok' errors = 'errors' finished_with_errors = 'errors' errored = 'errors'Then when we list our own statuses, we only see our (master) members:list(Status)But now we can look up a status from any of the other two systems, and automatically get our "master" member:Status['busy'] Status['processing']Note that in our case the actual value of the members does not matter. I used strings, but we could equally well just use numbers:class Status(enum.Enum): ready = 1 running = 2 busy = 2 processing = 2 ok = 3 finished_no_error = 3 ran_ok = 3 errors = 4 finished_with_errors = 4 errored = 4This will work the same way:Status.ran_ok status = 'ran_ok' status in Status.__members__ Status[status]Ensuring No Aliases Sometimes we want to make sure we are creating enumerations that do **not** contain aliases. Of course, we can just be careful and not define aliases, but the `enum` module provides a special decorator that can enforce this:@enum.unique class Status(enum.Enum): ready = 1 done_ok = 2 errors = 3And if we try to create aliases, our code will not compile - we'll get an exception as soon as the class is compiled:try: @enum.unique class Status(enum.Enum): ready = 1 waiting = 1 done_ok = 2 errors = 3 except ValueError as ex: print(ex)duplicate values found in : waiting -> readySlicingA common feature of list, tuple, str, and all sequence types inPython is the support of slicing operations, which are more powerful thanmost people realize. Example 2-12. Line items from a flat-file invoiceinvoice = """ 0.....6.................................40........52...55........ 1909 Pimoroni PiBrella $17.50 3 $52.50 1489 6mm Tactile Switch x20 $4.95 2 $9.90 1510 Panavise Jr. - PV-201 $28.00 1 $28.00 1601 PiTFT Mini Kit 320x240 $34.95 1 $34.95 """ SKU= slice(0, 6) DESCRIPTION = slice(6, 40) UNIT_PRICE = slice(40, 52) QUANTITY = slice(52, 55) ITEM_TOTAL = slice(55, None) line_items = invoice.split('\n')[2:] for item in line_items: print(item[UNIT_PRICE], item[DESCRIPTION])$17.50 Pimoroni PiBrella $4.95 6mm Tactile Switch x20 $28.00 Panavise Jr. - PV-201 $34.95 PiTFT Mini Kit 320x240Assigning to Slicesl = list(range(10)) l l[2:5] = [20, 30] l del l[5:7] l l = list(range(7)) print(l) l[3::2] = [-1, -6] l l[2:5]=[9]Using + and * with Sequences Example 2-13. A list with three lists of length 3 can represent a tic-tac-toeboardboard = [['_'] * 3 for i in range(3)] board board[1][2] = 'X' boardExample 2-14. A list with three references to the same list is uselesswierd_board = [['_'] * 3] * 3 wierd_board wierd_board[1][2] = 'O' wierd_boardA += Assignment PuzzlerExample 2-15. A riddlet = (1, 2, [30, 40]) t[2] += [50, 60] t tlist.sort versus the sorted Built-Infruits = ['grape', 'raspberry', 'apple', 'banana'] sorted(fruits, key=len, reverse=True) fruits.sort() fruits import bisect bisect.bisect([1,2,3,4,5,6], 8)Next we need to identify unique songs in our set and match them with Musicbrainz ids. We'll then use them to crawl tags for genre identificationimport os import numpy as np import pandas as pd from glob import glob import re import time # Import API importers import musicbrainzngs # NGram from similarity.ngram import NGram twogram = NGram(2) # Set musicbrainz crednetials musicbrainzngs.set_rate_limit(limit_or_interval=1.0, new_requests=1) musicbrainzngs.set_useragent('concerts', '0.0.1', '') musicbrainzngs.set_format(fmt='xml') # Switch Directory to ../data/interim/ current_dir = os.getcwd() destination_dir = '/data/interim' if current_dir[-len(destination_dir):] != destination_dir: os.chdir('..' + destination_dir) else: print('already in correct directory: ',current_dir) # Generate a DataFrame with unique artists that have a musicbrainzid hot100_processed = pd.read_csv('../processed/hot100_processed.csv',sep='\t') hot100_uniq_df = hot100_processed.drop_duplicates(subset=['artist','title'],keep='first').loc[:,['artist','title']] print(len(hot100_uniq_df)) hot100_uniq_df.head() try: mbid_list = pd.read_csv('../interim/hot100_uniq.csv',sep='\t',encoding='utf-8',index_col='artist') except: mbid_list = pd.DataFrame(index=[hot100_uniq_df.title,hot100_uniq_df.artist],columns=['mbid','checked']) mbid_list.loc[:,'checked'] = False mbid_list.head() def save_progress(df,output_path): df_res_index = df.reset_index() df_res_index.to_csv(output_path, sep='\t',index=False,encoding='utf-8') def twogram_distance(row): # only compare string values return twogram.distance(row[0],row[1]) def search_artist_mb(row): artist_name = row[0] track_title = row[1] res_tracks = musicbrainzngs.search_recordings(query=track_title,limit=20) best_match = False best_dist = 0.5 max_dist = 0.5 for track in res_tracks['recording-list']: mb_title = track['title'] mbid_title = track['id'] track_dist = twogram_distance((track_title,mb_title)) for artist_credit in track['artist-credit']: try: mbid_artist = artist_credit['artist']['id'] mb_artist = artist_credit['artist']['name'] artist_dist = twogram_distance((artist_name,mb_artist)) except: # This is not a valid artist dict instead it's a concatenation (e.g. &, feat. etc.) artist_dist = 1 cumul_dist = artist_dist + track_dist if cumul_dist == 0.0: return mbid_title elif cumul_dist < best_dist and cumul_dist < max_dist: best_match = mbid_title best_dist = cumul_dist if best_match: return best_match return np.nan # Save Progress Settings output_dir = '../interim/' target = 'hot100_uniq_mbid.csv' # Total Artists total_titles = len(hot100_uniq_df) print("Total Titles to be matched: ",total_titles) for i, row in enumerate(hot100_uniq_df.iterrows()): row_values = row[1] checked = mbid_list.loc[(row_values['title'],row_values['artist']),'checked'] if checked: continue mbid_list.loc[row[1]['title'],'mbid'] = search_artist_mb(row_values) mbid_list.loc[row[1]['title'],'checked'] = True if (i % 10) == 0: save_progress(mbid_list,output_dir+target) print("Progress: {}%".format(round(i/total_titles*100,2))) elif i >= (len(hot100_uniq_df) - 1): save_progress(mbid_list,output_dir+target) print("Success: {} of {}".format(i+1,total_titles)) time.sleep(.2)Total Titles to be matched: 28083 Progress: 0.0% Progress: 0.04% Progress: 0.07% Progress: 0.11% Progress: 0.14% Progress: 0.18% Progress: 0.21% Progress: 0.25% Progress: 0.28% Progress: 0.32% Progress: 0.36% Progress: 0.39% Progress: 0.43% Progress: 0.46% Progress: 0.5% Progress: 0.53% Progress: 0.57% Progress: 0.61% Progress: 0.64% Progress: 0.68% Progress: 0.71% Progress: 0.75% Progress: 0.78% Progress: 0.82% Progress: 0.85% Progress: 0.89% Progress: 0.93% Progress: 0.96% Progress: 1.0% Progress: 1.03% Progress: 1.07% Progress: 1.1% Progress: 1.14% Progress: 1.18% Progress: 1.21% Progress: 1.25% Progress: 1.28% Progress: 1.32% Progress: 1.35% Progress: 1.39% Progress: 1.42% Progress: 1.46% Progress: 1.5% Progress: 1.53% Progress: 1.57% Progress: 1.6% Progress: 1.64% Progress: 1.67% Progress: 1.71% Progress: 1.74% Progress: 1.78% Progress: 1.82% Progress: 1.85% Progress: 1.89% Progress: 1.92% Progress: 1.96% Progress: 1.99% Progress: 2.03% Progress: 2.07% Progress: 2.1% Progress: 2.[...]Classification Taskimport matplotlib.pyplot as plt # plotting library import numpy as np # this module is useful to work with numerical arrays import pandas as pd # this module is useful to work with tabular data import random # this module will be used to select random samples from a collection import os # this module will be used just to create directories in the local filesystem from tqdm import tqdm # this module is useful to plot progress bars import torch import torchvision import torch.nn.functional as F from torchvision import transforms from torch.utils.data import DataLoader from torch import nn from sklearn.model_selection import KFold # this module is useful to split data into training and test sets from torch.utils.data import SubsetRandomSampler import pickle # Check if the GPU is available device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") print(f'Selected device: {device}')Selected device: cudaDatasettrain_dataset = torchvision.datasets.FashionMNIST('classifier_data', train=True, download=True, transform=transforms.ToTensor()) test_dataset = torchvision.datasets.FashionMNIST('classifier_data', train=False, download=True, transform=transforms.ToTensor()) print(f"Train dataset : {train_dataset}") print(f"Test dataset : {test_dataset}") label_names=['t-shirt','trouser','pullover','dress','coat','sandal','shirt', 'sneaker','bag','boot'] sample_index = 0 num_labels = len(label_names) image = train_dataset[sample_index][0] label = label_names[train_dataset[sample_index][1]] print(f'PyTorch tensor shape: {image.shape}') print(f'PyTorch tensor type: {image.dtype}')Train dataset : Dataset FashionMNIST Number of datapoints: 60000 Root location: classifier_data Split: Train StandardTransform Transform: ToTensor() Test dataset : Dataset FashionMNIST Number of datapoints: 10000 Root location: classifier_data Split: Test StandardTransform Transform: ToTensor() PyTorch tensor shape: torch.Size([1, 28, 28]) PyTorch tensor type: torch.float32Convolutional Network# #good convolutinal neural network architecture, 88% accuracy with 150 epochs, v1 # class myCNN(nn.Module): # def __init__(self): # super().__init__() # ### Convolutional section # self.conv = nn.Sequential( # # First convolutional layer # nn.Conv2d(in_channels= 1, out_channels=6, kernel_size=5), # nn.ReLU(True), # nn.MaxPool2d(kernel_size=2, stride=2), # # Second convolutional layer # nn.Conv2d(in_channels=6, out_channels=12, kernel_size=5), # nn.ReLU(True), # nn.MaxPool2d(kernel_size=2, stride=2), # ) # ### Flatten layer # self.flatten = nn.Flatten(start_dim=1) # ### Linear section # self.lin = nn.Sequential( # # First linear layer # nn.Linear(in_features=12*4*4, out_features=64), # nn.ReLU(True), # nn.Dropout(p=0.5), # # Second linear # nn.Linear(in_features=64, out_features=32), # nn.ReLU(True), # nn.Dropout(p=0.5), # nn.Linear(in_features=32, out_features=10), # nn.ReLU(True), # # we don't need softmax since we'll use cross entropy loss # ) # def forward(self, x): # # Apply convolutions # x = self.conv(x) # # Flatten # x = self.flatten(x) # # # Apply linear layers # x = self.lin(x) # return x #V2 class myCNN(nn.Module): def __init__(self): super().__init__() ### Convolutional section self.conv = nn.Sequential( # First convolutional layer nn.Conv2d(in_channels= 1, out_channels=16, kernel_size=5), nn.ReLU(True), nn.MaxPool2d(kernel_size=2, stride=2), # Second convolutional layer nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5), nn.ReLU(True), nn.MaxPool2d(kernel_size=2, stride=2), ) ### Flatten layer self.flatten = nn.Flatten(start_dim=1) ### Linear section self.lin = nn.Sequential( # First linear layer nn.Linear(in_features=32*4*4, out_features=64), nn.ReLU(True), nn.Dropout(p=0.5), # Second linear nn.Linear(in_features=64, out_features=32), nn.ReLU(True), nn.Dropout(p=0.5), nn.Linear(in_features=32, out_features=10), nn.ReLU(True), # we don't need softmax since we'll use cross entropy loss ) def forward(self, x): # Apply convolutions x = self.conv(x) # Flatten x = self.flatten(x) # # Apply linear layers x = self.lin(x) return xTraining# Training function def train_epoch(model, dataloader, loss_fn, optimizer, device): # Set the model to training mode model.train() # Initialize the loss running_loss = 0 train_loss = [] # Loop over the training batches for (data, label) in dataloader: # target = onehot encoding of label target = torch.eye(10)[label] # Move the input and target data to the selected device data, target = data.to(device), target.to(device) # Zero the gradients optimizer.zero_grad() # Compute the output output = model(data) assert output.shape == target.shape # Compute the loss loss = loss_fn(output, target) # Compute the gradients loss.backward() # Update the weights optimizer.step() #batch loss loss_batch = loss.detach().cpu().numpy() train_loss.append(loss_batch) # Return the average training loss train_loss = np.mean(train_loss) #print(f"Training loss: {train_loss}") return train_loss #validation function def validate_epoch(model, dataloader, loss_fn, device): # Set the model to evaluation mode model.eval() # Initialize the validation loss val_loss = [] # Loop over the validation batches with torch.no_grad(): for (data, label) in dataloader: # target = onehot encoding of label target = torch.eye(10)[label] # Move the input and target data to the selected device data, target = data.to(device), target.to(device) # Compute the output output = model(data) assert output.shape == target.shape # Compute the loss loss = loss_fn(output, target) #batch loss loss_batch = loss.detach().cpu().numpy() val_loss.append(loss_batch) # Return the average validation loss val_loss = np.mean(val_loss) #print(f"Validation loss: {val_loss}") return val_loss #reset weights function def reset_weights(model): for layer in model.modules(): if isinstance(layer, nn.Linear) or isinstance(layer, nn.Conv2d): layer.reset_parameters() # plot and save validation loss def plot_and_save_losses_and_accuracy(comb_val_losses, model_name): fig = plt.figure(figsize=(8,3)) plt.plot(comb_val_losses, label='Validation loss', color='red') plt.ylim(0,2) plt.title(f"{model_name} Validation Loss") plt.xlabel("Epoch") plt.ylabel("Loss") plt.grid() plt.legend() fig.savefig(f"images/{model_name}_val_loss.eps", format="eps", dpi=500, bbox_inches="tight") plt.show() plt.close() #get the percentage of correct predictions on the test set def get_accuracy(net, dataloader, loss_fn, device): correct = 0 total = 0 net.eval() losses = [] with torch.no_grad(): for (data, label) in dataloader: # target = onehot encoding of label target = torch.eye(10)[label] # Move the input and target data to the selected device data, label, target = data.to(device), label.to(device), target.to(device) # Compute the output output = net(data) assert output.shape == target.shape # Compute the loss loss = loss_fn(output, target) losses.append(loss.detach().cpu().numpy()) # Get the index of the max log-probability pred = output.argmax(dim=1, keepdim=True) # Add to the total number of correct predictions correct += pred.eq(label.view_as(pred)).sum().item() # Add to the total number of predictions total += data.shape[0] # Return the accuracy and test loss test_loss = np.mean(losses) return correct/total, test_lossHyperparameters Tuning and Cross-validation TrainingNote: in this case weights are not reset at every new fold, otherwise the training would take too long. This makes the validation loss not 100% correct, and we need to take that into account when looking at the plots.torch.manual_seed(42) ## Parameters #batch size batch_sizes = [256, 1024]#[32, 64, 128, 256, 512, 1024] #learning rate learning_rates = [5e-4, 1e-3, 5e-3, 1e-2, 5e-2] #[5e-4, 1e-3, 5e-3, 1e-2] #optimiizer optimizers = [torch.optim.Adam, torch.optim.SGD] #[torch.optim.Adam, torch.optim.SGD] optimizer_names = ["Adam", "SGD"] #epochs## NOTE: the total number of epochs is epochs*k_folds epochs = 20 #k-folds k_folds = 4 #test dataloader test_dataloader = DataLoader(test_dataset, batch_size=len(test_dataset), shuffle=False) #try big bs losses = np.zeros((len(batch_sizes), len(learning_rates), len(optimizers), epochs*k_folds)) num_tot_combinations = len(batch_sizes) * len(learning_rates) * len(optimizers) comb = 0 for b, batch_size in enumerate(batch_sizes): for l, lr in enumerate(learning_rates): for o, (optim, optim_name) in enumerate(zip(optimizers, optimizer_names)): print(f"__________________{comb+1}/{num_tot_combinations}_______________________") print(f"BS: {batch_size}, optim: {optimizer_names[o]}, lr: {lr:.0e}, epochs: {epochs*k_folds}") comb += 1 #create model model = myCNN().to(device) #create loss function loss_fn = nn.CrossEntropyLoss() #create optimizer optimizer = optim(model.parameters(), lr=lr) if optim == torch.optim.SGD: optimizer = optim(model.parameters(), lr=lr, momentum=0.9) #K-fold kf = KFold(n_splits=k_folds, shuffle=True) #comb_val_losses = np.zeros(epochs) comb_val_losses = [] for fold, (train_ids, val_ids) in enumerate(kf.split(train_dataset)): #reset weights #reset_weights(model) #no reset #create dataloaders train_dataloader = DataLoader(train_dataset, batch_size=batch_size, sampler=SubsetRandomSampler(train_ids)) val_dataloader = DataLoader(train_dataset, batch_size=len(val_ids), #try big bs for val sampler=SubsetRandomSampler(val_ids)) #train model #val_loss_log = [] #not needed anymore for epoch in tqdm(range(epochs)): train_loss = train_epoch(model, train_dataloader, loss_fn, optimizer, device) val_loss = validate_epoch(model, val_dataloader, loss_fn, device) #val_loss_log.append(val_loss) #not needed anymore comb_val_losses.append(val_loss) #comb_val_losses += np.array(val_loss_log) / k_folds #not needed anymore #store losses comb_val_losses = np.array(comb_val_losses) losses[b, l, o, :] = comb_val_losses #complete training on the whole dataset #not needed anymore #reset_weights(model) #no reset # we use the training already done # train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) # for epoch in tqdm(range(epochs)): # _ = train_epoch(model, train_dataloader, loss_fn, optimizer, device) #plot and save losses model_name = f"CNN_{batch_size}_{optim_name}_{lr:.0e}" print(f"model name: {model_name}") accuracy, test_loss = get_accuracy(model, test_dataloader, loss_fn, device) print(f"Test loss: {test_loss:.4f}") print(f"Test accuracy: {(accuracy*100):.4f}") plot_and_save_losses_and_accuracy(comb_val_losses, model_name)__________________1/20_______________________ BS: 256, optim: Adam, lr: 5e-04, epochs: 80Final Training loop#set seed torch.manual_seed(42) # decide to train or not load_good_model = True # if True do not train a new model, if False train a new model # number of epochs num_epochs = 150 #150 # Instantiate the model cnn = myCNN().to(device) # define the loss function loss_fn = nn.CrossEntropyLoss() #define optimizer optimizer = torch.optim.Adam(cnn.parameters(), lr=3e-3) #lr = 1e-3 #dataloadeers train_dataloader = DataLoader(train_dataset, batch_size=2048, shuffle=True) test_dataloader = DataLoader(test_dataset, batch_size=len(test_dataset), shuffle=False) #try big bs # early stopping helper function def early_stopping(latest_val_losses, best_loss, model, cnt, epoch, increment=0.02, patience=5): if epoch < 4: return best_loss, cnt, False curr_loss = np.mean(latest_val_losses[-3:]) if curr_loss < best_loss: best_loss = curr_loss #print(f"best loss: {best_loss:.4f}, curr_loss: {curr_loss:.4f}") #save the model # torch.save(model.state_dict(), f"training/cnn_epoch_{epoch+1}.pth") cnt = 0 elif curr_loss > (1+increment)*best_loss: cnt += 1 # print(f" ADDING 1 best loss: {best_loss:.4f}, curr_loss: {curr_loss:.4f}") if cnt >= patience: print(f"Early stopping at epoch {epoch+1}") return best_loss, cnt, True else: cnt = 0 return best_loss, cnt, False # training loop train_losses = [] test_losses = [] if not load_good_model: #for early stopping early_stopping_cnt = 0 best_loss = np.inf for epoch in tqdm(range(num_epochs)): #print(f"Epoch {epoch+1}/{num_epochs}") # Train the model train_loss = train_epoch(cnn, train_dataloader, loss_fn, optimizer, device) # Validate the model val_loss = validate_epoch(cnn, test_dataloader, loss_fn, device) #print(f"Test loss: {val_loss}") # Append the losses train_losses.append(train_loss) test_losses.append(val_loss) # early stopping best_loss, early_stopping_cnt, early_stopping_flag = early_stopping(test_losses, best_loss, cnn, early_stopping_cnt, epoch) if early_stopping_flag: break #save the model torch.save(cnn.state_dict(), f"training/final_cnn_v2_{num_epochs}.pth") # Save the losses with open(f'training/class_losses{num_epochs}.pkl', 'wb') as f: pickle.dump([train_losses, test_losses], f) else: #load the model cnn.load_state_dict(torch.load(f"training/final_cnn_v2_{num_epochs}.pth")) #load the losses with open(f'training/class_losses{num_epochs}.pkl', 'rb') as f: [train_losses, test_losses] = pickle.load(f)Losses and model accuracy# Plot the training and validation losses fig, ax = plt.subplots(figsize=(12,8)) ax.plot(train_losses, label='Training loss') ax.plot(test_losses, label='Test loss') ax.set_xlabel('Epochs') ax.set_ylabel('Loss') ax.legend() plt.show() accuracy, test_loss = get_accuracy(cnn, test_dataloader, loss_fn, device) print(f"Test loss: {test_loss:.4f}") print(f"Test accuracy: {(accuracy*100):.2f}")Test loss: 0.3222 Test accuracy: 90.22Confusion Matrices# function to get all predictions def get_predictions(model, dataloader, device): # Set the model to evaluation mode model.eval() # Initialize the predictions predictions = [] # Loop over the test batches with torch.no_grad(): for (data, label) in tqdm(dataloader): # Move the input and target data to the selected device data, label = data.to(device), label.to(device) # Compute the output output = model(data) # Get the index of the max log-probability pred = output.argmax(dim=1, keepdim=True) # Add to the predictions predictions.extend(pred.cpu().numpy().tolist()) return predictions #define dataloaders with no shuffle train_dataloader = DataLoader(train_dataset, batch_size=10000, shuffle=False) #get train dataset predictions train_predictions = get_predictions(cnn, train_dataloader, device) # get test dataset predictions test_predictions = get_predictions(cnn, test_dataloader, device) #get the labels train_labels = [] for data, label in tqdm(train_dataloader): #note: to have a reasonable cm, dataloader needs to be without shuffle train_labels.extend(label.numpy()) test_labels = [] for data, label in tqdm(test_dataloader): test_labels.extend(label.numpy()) print(f"Train labels: {train_labels[:10]}") print(f"Train predictions: {train_predictions[:10]}") print(f"Test labels: {test_labels[:10]}") print(f"Test predictions: {test_predictions[:10]}") # calculate confusion matrix for train and test set import sklearn.metrics as metrics cm_train = metrics.confusion_matrix(train_labels, train_predictions) cm_test = metrics.confusion_matrix(test_labels, test_predictions) def plot_confusion_matrix(cm, target_names, title='Confusion matrix', cmap=None, normalize=True): import itertools accuracy = np.trace(cm) / np.sum(cm).astype('float') misclass = 1 - accuracy if cmap is None: cmap = plt.get_cmap('Blues') fig = plt.figure(figsize=(15, 10)) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() if target_names is not None: tick_marks = np.arange(len(target_names)) plt.xticks(tick_marks, target_names, rotation=45) plt.yticks(tick_marks, target_names) if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] thresh = cm.max() / 1.5 if normalize else cm.max() / 2 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): if normalize: plt.text(j, i, "{:0.4f}".format(cm[i, j]), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") else: plt.text(j, i, "{:,}".format(cm[i, j]), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass)) plt.show() return fig fig1 = plot_confusion_matrix(cm_train, target_names=label_names, title='Confusion matrix - Train set') #save the figure fig1.savefig(f"images/cnn_confusion_matrix_train.png", dpi=500, bbox_inches='tight') fig2 = plot_confusion_matrix(cm_test, target_names=label_names, title='Confusion matrix - Test set') #save the figure fig2.savefig(f"images/cnn_confusion_matrix_test.png", dpi=500, bbox_inches='tight')Apparently, shirts can be confused more easily with t-shirts, coats and pullover. To be fair I looked at some examples of coats and pullover and they are extremely hard to distinguish even as a human. Network Analysis Weight Histograms# Convolutional layers c1w = cnn.conv[0].weight.data.cpu().numpy() c1b = cnn.conv[0].bias.data.cpu().numpy() c2w = cnn.conv[3].weight.data.cpu().numpy() c2b = cnn.conv[3].bias.data.cpu().numpy() # Linear layers l1w = cnn.lin[0].weight.data.cpu().numpy() l1b = cnn.lin[0].bias.data.cpu().numpy() l2w = cnn.lin[3].weight.data.cpu().numpy() l2b = cnn.lin[3].bias.data.cpu().numpy() l3w = cnn.lin[6].weight.data.cpu().numpy() l3b = cnn.lin[6].bias.data.cpu().numpy() # Weights histogram fig, axs = plt.subplots(5, 1, figsize=(12,12)) axs[0].hist(c1w.flatten(), bins=100, color='r', alpha=0.8, label='Conv1') axs[0].set_title('First convolutional layer weights') axs[1].hist(c2w.flatten(), bins=100, color='r', alpha=0.8, label='Conv2') axs[1].set_title('Second convolutional layer weights') axs[2].hist(l1w.flatten(), bins=100, color='r', alpha=0.8, label='Lin1') axs[2].set_title('First linear layer weights') axs[3].hist(l2w.flatten(), bins=100, color='r', alpha=0.8, label='Lin2') axs[3].set_title('Second linear layer weights') axs[4].hist(l3w.flatten(), bins=100, color='r', alpha=0.8, label='Lin3') axs[4].set_title('Third linear layer weights') [ax.grid() for ax in axs] plt.tight_layout() plt.show() # save figure fig.savefig('images/cnn_weights_histogram.png', dpi=500, bbox_inches='tight')Convolutional filters / Receptive Fieldsprint(cnn) def plot_nchw_data(data, h_num, v_num, title): fig, axs = plt.subplots(h_num, v_num, figsize=(8,3)) shape = data.shape data = data.reshape(shape[0]*shape[1], shape[2], shape[3]) for idx, ax in enumerate(axs.flatten()): ax.set_xticks([]) ax.set_yticks([]) if idx < len(data): ax.imshow(data[idx,:,:], cmap='gray') plt.suptitle(title) #plt.tight_layout(rect=[0, 0, 1, 0.97], h_pad=0, w_pad=0) plt.show() return fig fig1 = plot_nchw_data(c1w, 2, 8, 'Layer 1 convolutional kernels') #save fig fig1.savefig('images/cnn_conv1_kernels.png', dpi=500, bbox_inches='tight') fig2 = plot_nchw_data(c2w, 4, 8, 'Layer 2 convolutional kernels') #save fig fig2.savefig('images/cnn_conv2_kernels.png', dpi=500, bbox_inches='tight') ## Convolutional section activation # Hook function to call during forward pass def hook_fn(module, input, output): intermediate_outputs.append(output) # Attach hook function to all the convolutional layers hook_handles = [] # This list will contains all the handles required to remove the hooks later hook_handles.append(cnn.conv[1].register_forward_hook(hook_fn)) hook_handles.append(cnn.conv[4].register_forward_hook(hook_fn)) test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False) #get first image from the test dataloader image, label = iter(test_dataloader).next() # show the image plt.imshow(image.squeeze(), cmap='gray') image, label = image.to(device), label.to(device) #evaluate output cnn.eval() with torch.no_grad(): intermediate_outputs = [] output = cnn(image) for handle in hook_handles: handle.remove() for layer_num, intermediate_output in enumerate(intermediate_outputs): intermediate_output = intermediate_output.cpu().data.numpy() fig = plot_nchw_data(intermediate_output, 2*(layer_num+1), 8, 'Layer {} activation'.format(layer_num)) #save fig fig.savefig('images/cnn_layer{}_activation.png'.format(layer_num), dpi=500, bbox_inches='tight')Last layer activations## Last layer activations # attach an hook to last linear layer def hook_function(module, input, output): global activation activation = output # find an input for each class for which the cnn is very confident fig, axs = plt.subplots(1, 10, figsize=(15,6)) inputs = [] test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False) cnn.eval() for i in range(10): found = False for (image, label) in test_dataloader: if label == i: image, label = image.to(device), label.to(device) output = cnn(image) output = torch.softmax(output, dim=1) pred = output.argmax(dim=1, keepdim=True) confidence = output.detach().cpu().numpy()[0][i] # check if the prediction is correct if pred == label and confidence > 0.95: print(f"{label_names[i]} - confidence: {confidence}") inputs.append(image) # plot image axs[i].imshow(image.squeeze().detach().cpu().numpy(), cmap='gray') found = True break if not found: print(f"{label_names[i]} not found") plt.show() #register hook hook_handle = cnn.lin[3].register_forward_hook(hook_function) #analyze activations cnn = cnn.to(device) cnn.eval() activations = [] with torch.no_grad(): for i in inputs: out = cnn(i) zi = activation.cpu().numpy() activations.append(zi) #remove hook hook_handle.remove() fig, axs = plt.subplots(10, 1, figsize=(12, 20)) for i in range(10): act = activations[i][0] axs[i].stem(act, use_line_collection=True) axs[i].set_title(f"Last layer activations for input {label_names[i]}") axs[i].set_ylim([0, 50]) plt.tight_layout() plt.show() #save fig fig.savefig('images/cnn_last_layer_activations.png', dpi=500, bbox_inches='tight')t-shirt - confidence: 0.9712467789649963 trouser - confidence: 1.0 pullover - confidence: 0.9986371397972107 dress - confidence: 0.9999709129333496 coat - confidence: 0.9995236396789551 sandal - confidence: 0.9999998807907104 shirt - confidence: 0.9599254727363586 sneaker - confidence: 1.0 bag - confidence: 1.0 boot - confidence: 0.9994997978210449Linear Regression Let's implement a simple linear regression using torchimport torchTo start with, we'll create some random data and store it as torch tensors. For now you can think of a torch tensor as equivalent to a numpy array, but we will soon see that tensors have some additional functionality that we can exploit.n = 100 p = 5 x = torch.randn(n, p) # predictors (100 observations, 5 features) y = torch.randn(n, 1) # outcomes (100 observations)The Model We're looking for parameters (coefficients) $\beta$ so that $$\begin{array}{rcl}y_i & \approx & \beta_0 + x_i\beta_{1:p} \\&= &\beta_0 + x_{i1}\beta_1 +x_{i2}\beta_2 \dots x_{ip}\beta_p\end{array}$$ Although you may not have seen it represented this way before, we can also write this model as a picture: This will come in handy later when we get to more complex models, but for now you can think of the linear model in whatever way seems natural to you. The Loss To quantify what we mean by a "good" approximation, we'll use the mean-squared-error loss. So for a given guess $\beta$ we'll give it the "grade" $$L(y,\hat y) = \frac{1}{n}\sum_i (y_i - \hat y_i)^2$$where $\hat y_i = x_i\beta_{1:p} + \beta_0$ and $n$ is the number of observations (rows) in the data. We're looking for the $\beta$ that gives us the best (lowest) grade. This combination of model (linear) and loss (mean-squared-error) is called linear regression. The Optimization Algorithm There are an infinite number of possible values for $\beta$ that we could try, so it would take us forever to try them all and see which gives the best loss. To get around this problem, we need some kind of optimization algorithm that is better than brute-force search. The algorithm we will use here is an extremely useful approach called gradient descent, which you should already be familiar with from our [previous exploration]().To start, we'll initialize the coefficients $\beta$ with random numbers. That's our first guess. We'll update these random numbers using gradient descent to iteratively find better values that make the loss smaller.β = torch.randn(p+1, 1) # 5 coefficients (one per feature) plus one intercept β.requires_grad_() # tell torch that β is going to have to save the gradient of something with respect to itself at some pointAutograd Previously when using gradient descent we would have to analytically derive expressions for the gradient of the loss relative to each model parameter, implement these as functions in code, and call upon them at each gradient descent iteration. With pytorch, however, we can simply compute the current value of the loss and pytorch will automatically calculate all the necessary derivatives for us. Let's have a look.ŷ = torch.matmul(x, β[1:]) + β[0] # ŷ = xβ + β0 L = torch.sum((y-ŷ)**2)/n # L = Σ(yᵢ-ŷᵢ)²/n L L.backward() # compute the gradients of the loss with respect to any tensors that went into the loss with requires_grad=true β.gradwhat we see here is a vector containing all of the derivatives we want. The first element is $\frac{\delta L}{\delta \beta_0}$, the second element is $\frac{\delta L}{\delta \beta_1}$, and so on. Note that this object is part of $\beta$ and not $L$. Let's calculate the gradient manually and make sure it matches up. We have:$$\begin{array}{rcl}\hat y_i &=& x_i\beta + \beta_0 \\L(y,\hat y) &=& \frac{1}{n}\sum_i (y_i - \hat y_i)^2\end{array}$$ So the derivative for $\beta_j$ with $j\ne 0$ is$$\begin{array}{rcl}\frac{\partial L}{\partial \beta_j} &=& \frac{1}{n}\sum_i \frac{\partial L}{\partial y_i} \frac{\partial y_i}{\partial \beta_j}\\&=&\frac{1}{n}\sum_i-2(y_i-\hat y_i)x_{ij}\\&=&-\frac{2}{n}x_j^T(y-\hat y)\end{array}$$and for $\beta_0$ is $$\begin{array}{rcl}\frac{\partial L}{\partial \beta_0}&=&\frac{1}{n}\sum_i-2(y_i-\hat y_i) \\&=&-\frac{2}{n}1^T(y-\hat y)\end{array}$$which means we can calculate the whole gradient as $-\frac{2}{n}[1,x]^T(y-[1,x]\beta)$import numpy as np def beta_grad(β): x_with_1s = torch.Tensor(np.hstack((np.ones((n,1)), x))) return -2*torch.matmul(x_with_1s.transpose(0,1), y-torch.matmul(x_with_1s,β))/n beta_grad(β)which is exactly the same as the result in `β.grad`! How does pytorch do this? It turns out that every pytorch tensor records not only its own value, but also what functions were called to produce it and which tensors went into those functions. That's why we use torch tensors instead of numpy arrays. The `torchvis` package lets us see this:from torchviz import make_dot make_dot(L, {'β':β})To calculate the gradient of $L$ with respect to $\beta$, pytorch takes the gradients of each of these functions in turn (which are simple and hardcoded into pytorch), evaluates them at their current value using the input tensors, and multiplies them together to arrive at the answer you would get via the chain rule. You can learn more about this process [here](https://towardsdatascience.com/pytorch-autograd-understanding-the-heart-of-pytorchs-magic-2686cd94ec95) and elsewhere. Doing Gradient Descent To update the weights, we need to subtract the gradient (times a small learning rate) from the current value of the weights. We do this from inside a `no_grad():` "context" so that $\beta$ doesn't store the history of the update (try the update without the `with torch.no_grad():` and see what happens). We also clear out `β.grad`.with torch.no_grad(): β -= 10e-5 * β.grad # β = β - 10e-5 * β.grad β.grad.zero_() βAs you can see, our new value of $\beta$ is slightly different than what we started with because we've taken a single gradient step. **ZEROING GRADIENTS**If we don't zero the gradient, the next time we calculate the gradient of something with respect to $\beta$, the new gradient will be added to whatever was stored there instead of overwriting it. That's just the way torch was made to work. ---**EXERCISE**Investigate for yourself (either by testing code or googling) why the parameter update and gradient zerioing should be performed within a `torch.no_grad()` context. --- Looping Let's repeat what we have so far but now add a little loop to train our model for 500 gradient descent iterations instead of going slowly though a single iteration:x = torch.randn(n, p) # predictors (10 observations, 5 features) y = torch.randn(n, 1) # outcomes (10 observations) loss_record = [] # to kep track of the loss over the iterations β = torch.randn(p+1,1) # 5 coefficients (one per feature) plus one intercept β.requires_grad_() # tell torch that β is going to have to save the gradient of something with respect to itself at some point for e in range(500): ŷ = torch.matmul(x, β[1:]) + β[0] # ŷ = xβ + β0 (calculate predictions) L = torch.sum((y-ŷ)**2)/n # L = Σ(yᵢ-ŷᵢ)²/n (use predictions to calculate loss) L.backward() # compute gradients (in this case δL/δβ, δL/δW) loss_record.append(L.item()) with torch.no_grad(): # take the gradient descent step β -= 10e-3 * β.grad β.grad.zero_()And we can see how the loss changes over the iterations:import altair as alt import pandas as pd loss_df = pd.DataFrame({'i':range(500), 'loss':loss_record}) alt.Chart(loss_df, height=100).mark_line().encode(x='i', y='loss')We see training loss goes down. And here's the value of $\beta$ after 500 iterations:βRemember our original data is totally random and there is no relationship between the predictors and outcomes. So the "right" answer for what $\beta$ should be in this case is $\beta=0$. As we see above, all the values are near 0, so our algorithm appears to be converging to the right answer. To predict for a new observation, all we have to do is multiply by $\beta$ and add the intercept:new_x = torch.randn(10, 5) # 10 new observations torch.matmul(new_x, β[1:]) + β[0] # predictionsNumba implementation of Random Walk (2D) in 8 directions [Adapted from: geeksforgeeks](https://www.geeksforgeeks.org/random-walk-implementation-python/)@jit(nopython=True) def random_walk_2d_n8(nwalks): # Arrays to store x, y coordinates x = np.zeros(nwalks) y = np.zeros(nwalks) # 8 directions n8 = [(-1,-1), (-1,0), (-1,1), (0,-1), (0,1), (1,-1), (1,0), (1,1)] for n in range(nwalks): idx = np.random.choice(len(n8), 1)[0] x[n] = x[n - 1] + n8[idx][0] y[n] = y[n - 1] + n8[idx][1] return x, y x, y = random_walk_2d_n8(nwalks=10000000) # Show the result fig, ax = plt.subplots() ax.set_title("Random walk (2D) - 8 directions") # Plot the lines ax.plot(x, y, lw= 0.05) # Start point ax.scatter(x[0], y[0], zorder=5, c="k") ax.annotate("Start", (x[0] + 100, y[0]), zorder=5, c="k", weight="bold", path_effects=[pe.withStroke(linewidth=2, foreground="white")]) # End point ax.scatter(x[-1], y[-1], zorder=5, c="k") ax.annotate("End", (x[-1] + 100, y[-1]), zorder=5, c="k", weight="bold", path_effects=[pe.withStroke(linewidth=2, foreground="white")]) ax.grid(linewidth=0.3) plt.tight_layout()Building a Simple Bot with the Rasa Stack![alt text](https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTaX3LNhGcAe1HnPZSuWS0oH6af0LJHXcH7If1sQgLCFAT1chNGFg) Please make a copy of this notebook first!If you're interested in building chatbots or voice apps with the [Rasa Stack](https://rasa.com/products/rasa-stack/), you've come to the right place! In this notebook you'll build a complete chatbot with Rasa Stack, without having to install anything on your computer. Using Colaboratory, everything will get installed in a cloud machine for you, so you can focus on creating an interesting bot. We'll build a relatively simple bot, that just asks you about your mood and tries to cheer you up if you're feeling a bit down. The tutorial consists of three parts:* Part 1: We'll start with a basic bot that can handle multi-turn dialogues but not understand natural language like "I'm doing well"* Part 2: We'll add the abilitiy to understand natural language so that the bot can generalise to similar inputs that it hasn't seen before* Part 3: We'll give you further resources so you can build your own. Part 1: Build a basic bot InstallationFirst we'll have to install Rasa Core on the server running this notebook. In this first step, your bot won't understand natural language yet. This happens in Step 2. **Run the cell below, this will take a couple of minutes to complete so this is a good time to scroll through the rest of the notebook.****Make sure to install the correct version. If you want to experiment with the latest master, use**: `!pip install -U git+https://github.com/rasahq/rasa_core.git`!pip install rasa_core; import logging, io, json, warnings logging.basicConfig(level="INFO") warnings.filterwarnings('ignore')Writing StoriesA good place to start is by writing a few stories. These are example conversations that Rasa Core will learn from. The format works like this:A story starts with `` and you can give it a name. lines that start with `*` are messages sent by the user. Although you don't write the *actual* message, but rather the intent (and the entities) that represent what the user *means*. If you don't know about intents and entities, don't worry! We will talk about them more later. Lines that start with `-` are *actions* taken by your bot. In this case all of our actions are just messages sent back to the user, like `utter_greet`, but in general an action can do anything, including calling an API and interacting with the outside world.stories_md = """ ## happy path * greet - utter_greet * mood_great - utter_happy * mood_affirm - utter_happy * mood_affirm - utter_goodbye ## sad path 1 * greet - utter_greet * mood_unhappy - utter_cheer_up - utter_did_that_help * mood_affirm - utter_happy ## sad path 2 * greet - utter_greet * mood_unhappy - utter_cheer_up - utter_did_that_help * mood_deny - utter_goodbye ## strange user * mood_affirm - utter_happy * mood_affirm - utter_unclear ## say goodbye * goodbye - utter_goodbye """ %store stories_md > stories.mdDefining a DomainThe domain specifies the universe that your bot lives in. You should list all of the intents and actions that show up in your stories. This is also the place to write templates, which contain the messages your bot can send backdomain_yml = """ intents: - greet - goodbye - mood_affirm - mood_deny - mood_great - mood_unhappy actions: - utter_greet - utter_cheer_up - utter_did_that_help - utter_happy - utter_goodbye - utter_unclear templates: utter_greet: - text: "Hey! How are you?" utter_cheer_up: - text: "Here is something to cheer you up:" image: "https://i.imgur.com/nGF1K8f.jpg" utter_did_that_help: - text: "Did that help you?" utter_unclear: - text: "I am not sure what you are aiming for." utter_happy: - text: "Great carry on!" utter_goodbye: - text: "Bye" """ %store domain_yml > domain.ymlTraining your Dialogue ModelNow comes the fun part! We're going to show Rasa Core the stories we wrote above, and train a model on these examples. In this case, the model is a neural network implemented in Keras which learns to predict which action to take next.from rasa_core.policies.keras_policy import KerasPolicy from rasa_core.agent import Agent agent = Agent('domain.yml', policies=[KerasPolicy()]) training_data = agent.load_data('stories.md') agent.train( training_data, validation_split=0.0, epochs=400 ) agent.persist('models/dialogue')Starting up the botLets start up the bot!from rasa_core.agent import Agent agent = Agent.load('models/dialogue')Talking to the bot (no NLU)In this step, we just trained the dialogue model - so basically the conversational flow. So the bot will only understand **structured** input and **no** natural language yet. Go try it out with typing "/" + one of the intents from your domain before, e.g.:/greet/mood_affirm/mood_denyprint("Your bot is ready to talk! Type your messages here or send 'stop'") while True: a = input() if a == 'stop': break responses = agent.handle_message(a) for response in responses: print(response["text"])Part 2: Add natural language understandingSo far, our bot doesn't understand regular sentences with natural language yet.To do that, we're going to build a language understanding model with Rasa NLU. Installing NLUWe have to install a couple of more things in this notebook here. **Run the cell below** - in the meantime, you can check out the rest of the code.!apt-get -qq install -y graphviz libgraphviz-dev pkg-config && pip install pygraphviz; !pip install rasa_nlu[spacy]; !python -m spacy download en;Language UnderstandingLets create some training data here, grouping user messages by their `intent`s. The intent describes what the messages *mean*.nlu_md = """ ## intent:greet - hey - hello - hi - hello there - good morning - good evening - moin - hey there - let's go - hey dude - goodmorning - goodevening - good afternoon ## intent:goodbye - cu - good by - cee you later - good night - good afternoon - bye - goodbye - have a nice day - see you around - bye bye - see you later ## intent:mood_affirm - yes - indeed - of course - that sounds good - correct ## intent:mood_deny - no - never - I don't think so - don't like that - no way - not really ## intent:mood_great - perfect - very good - great - amazing - feeling like a king - wonderful - I am feeling very good - I am great - I am amazing - I am going to save the world - super - extremely good - so so perfect - so good - so perfect ## intent:mood_unhappy - my day was horrible - I am sad - I don't feel very well - I am disappointed - super sad - I'm so sad - sad - very sad - unhappy - bad - very bad - awful - terrible - not so good - not very good - extremly sad - so saad - so sad """ %store nlu_md > nlu.mdTrain the Rasa NLU ModelWe're going to train a model to recognise these intents, so that when we send a message like "hello" to our bot, it will recognise this as a "greet" intent.from rasa_nlu.training_data import load_data from rasa_nlu.config import RasaNLUModelConfig from rasa_nlu.model import Trainer training_data = load_data('nlu.md') pipeline = [{"name": "nlp_spacy"}, {"name": "tokenizer_spacy"}, {"name": "intent_featurizer_spacy"}, {"name": "intent_classifier_sklearn"}] trainer = Trainer(RasaNLUModelConfig({"pipeline": pipeline})) interpreter = trainer.train(training_data) model_directory = trainer.persist('./projects/default/')Starting up the bot (with NLU)Now that we've trained the dialogue **and** language understanding models and saved them, we can start up an `Agent` which will handle conversations for us.from rasa_core.agent import Agent agent = Agent.load('models/dialogue', interpreter=model_directory)Talking to the Bot (with NLU)We can start talking to the bot in natural language.print("Your bot is ready to talk! Type your messages here or send 'stop'") while True: a = input() if a == 'stop': break responses = agent.handle_message(a) for response in responses: print(response["text"])Pro Tip: Visualising the Training DataYou can visualise the stories to get a sense of how the conversations go. This is usually a good way to see if there are any stories which don't make sensefrom rasa_core.agent import Agent from IPython.display import Image agent = Agent.load('models/dialogue') agent.visualize("stories.md", "story_graph.png", max_history=2) Image(filename="story_graph.png")Introduction: Forecast for SEAIRQ model with stochastic parameters In this notebook, we consider the SEAIRQ model. We assume that the parameters * $\beta$ (probability of infection on contact),* $\gamma_{E}$ (rate of progression for exposed individual to class A), * $\gamma_{AA}$ (rate of progression from class A to asymptomatic infective class), * $\gamma_{AS}$ (rate of progression from class A to symptomatic infective class), * $\gamma_{I_a}$ (rate of removal for asymptomatic infected individuals), and* $\gamma_{I_s}$ (rate of removal for symptomatic infected individuals) * $ \tau_S$ (quarantining rate for susceptibles)* $ \tau_E$ (quarantining rate for exposed)* $ \tau_A$ (quarantining rate for A)* $ \tau_{I_a}$ (quarantining rate for asymptomatic infectives)* $ \tau_{I_s}$ (quarantining rate for symptomatic infectives)are not known exactly, but rather are characterized by a 11D Gaussian distribution with known mean and covariance matrix. The Gaussian distribution function is trunacted, i.e. set to zero if any parameter is $< 0$.**We now illustrate how uncertainties in the parameters affect the predictions of the SEAIRQ model.**For this we simulate the SEIR model $N_s = 500$ times; for each simulation the above parameters are sampled from a given 11D Gaussian distribution. The resulting 500 trajectories are shown together with their mean, standard deviation, median, and 5 as well as 95 percentiles.We perform this analysis for the deterministic SEAIRQ model. Define model parameters and initialise pyross.forecast.SEAIRQM = 1 # the SEAIRQ model we consider has no age structure Ni = 50000*np.ones(M) # so there is only one age group N = np.sum(Ni) # and the total population is the size of this age group E0 = np.array([0]) A0 = np.array([1]) Ia0 = np.array([0]) # the SEAIRQ model we consider has only one kind of infective Is0 = np.array([20]) # we take these to be symptomatic Q0 = np.array([0]) R0 = np.array([0]) # and assume there are no recovered individuals initially S0 = N-(Ia0+Is0+R0+E0) # The initial susceptibles are obtained from S + E + A + Ia + Is + R = N # there is no contact structure def contactMatrix(t): return np.identity(M) # duration of simulation and output datapoints Tf = 500; Nt=Tf+1 # These parameters we consider exact fsa = 1 # the self-isolation parameter tE = 0.00 # rate E -> Q tA = 0.00 # rate A -> Q tIa = 0.00 # rate Ia -> Q tIs = 0.05 # rate Is -> Q # These are the parameters that we sample stochastically # means alpha = 0.0 # fraction of asymptomatic infectives beta = 0.2 # infection rate gIa = 0.1 # removal rate of asymptomatic infectives gIs = 0.1 # removal rate of symptomatic infectives gE = 0.04 # removal rate of E gA = 0.2 # rate to go from A to Ia # order in covariance matrix: # beta, gE, gAA, gAS, gIa, gIs, tS, tE, tA, tIa, tIs # cov = np.zeros([6,6],dtype=float) cov[0,0] = 0*alpha**2 # cov(alpha, alpha) = Var(alpha) cov[1,1] = 0.1*beta**2 # cov(beta, beta) = Var(beta) cov[2,2] = 0.01*gIa**2 # cov(gIa,gIa) = Var(gIa) cov[3,3] = 0.01*gIs**2 # cov(gIs,gIs) = Var(gIs) cov[4,4] = 0.01*gA**2 # cov(gA, gA) = Var(gA) cov[5,5] = 0.01*gE**2 # cov(gE, gE) = Var(gE) # cov[1,5] = 0.01*beta*gE # cov(beta, gE) cov[5,1] = cov[1,5] # covariance matrix is symmetric # cov[2,3] = cov[2,2] # cov(gIa, gIs) cov[3,2] = cov[2,3] # Define parameters for simulations parameters = {'alpha':alpha, 'beta':beta, 'gE':gE,'gA':gA, 'gIa':gIa, 'gIs':gIs, 'gE':gE, 'fsa':fsa, 'tE':tE,'tA':tA,'tIa':tIa,'tIs':tIs, 'cov':cov } # Initialise pyross forecast module model_forecast = pyross.forecast.SEAIRQ(parameters, M, Ni) # Number of simulations over which we average, use 500 Ns = 10 # Define a function which we use below to plot simulation results def plot_trajectories(result, percentile=-1, plot_index = 4, # which time series should be plotted? filename='None'): # set filename for saving figures # plot_index class # 0 susceptibles # 1 exposed # 2 asymptomatic and infectious # 3 asymptomatic infectives # 4 symptomatic infectives # 5 quarantined if plot_index == 0: title='Susceptibles' ylabel = r'$N_S$' elif plot_index == 1: title='Exposed' ylabel = r'$N_{E}$' elif plot_index == 2: title=r'Asymptomatic, infectious (A)' ylabel = r'$N_{A}$' elif plot_index == 3: title='Asymptomatic infectives' ylabel = r'$N_{I,a}$' elif plot_index == 4: title='Symptomatic infectives' ylabel = r'$N_{I,s}$' elif plot_index == 5: title='Quarantined' ylabel = r'$N_{Q}$' else: raise RuntimeError("plot_index should be 0, 1, 2, or 3.") # fontsize=25 # # trajectories = result['X'] t_arr = result['t'] traj_mean = result['X_mean'] traj_std = result['X_std'] # # # Plot trajectories # fig, ax = plt.subplots(1,1,figsize=(7,5)) ax.set_title(title, y=1.05, fontsize=fontsize) for i,e in enumerate(trajectories): ax.plot(t_arr,e[plot_index], alpha=0.15, ) ax.fill_between(t_arr,traj_mean[plot_index] - traj_std[plot_index], traj_mean[plot_index] + traj_std[plot_index], alpha=0.7, color='limegreen', label='Std deviation') ax.plot(t_arr,traj_mean[plot_index] - traj_std[plot_index], alpha=1, label='Std deviation', lw=1.5, ls='--', color='black') ax.plot(t_arr,traj_mean[plot_index] + traj_std[plot_index], alpha=1, #label='Std deviation', lw=1.5, ls='--', color='black') ax.plot(t_arr,traj_mean[plot_index], alpha=1, lw=2, color='black', label='Mean') ax.set_xlim(np.min(t_arr),np.max(t_arr)) ax.set_ylabel(ylabel,fontsize=fontsize) ax.set_xlabel(r'$t$ [days]',fontsize=fontsize) ax.legend(loc='upper right',fontsize=18) plt.show() if filename != 'None': fig.savefig(filename + '_trajs.png', bbox_inches='tight',dpi=100) plt.close() # # # # Plot percentiles # if percentile > 0: percentiles_lower = np.percentile(trajectories[:,plot_index],percentile,axis=0) percentiles_upper = np.percentile(trajectories[:,plot_index],100-percentile,axis=0) percentiles_median = np.percentile(trajectories[:,plot_index],50,axis=0) print("In the following plot, red dashed lines denote {0} and {1} percentiles of the numerical data:".format(percentile, 100-percentile)) fig, ax = plt.subplots(1,1,figsize=(7,5)) ax.set_title(title, y=1.05, fontsize=fontsize) for i,e in enumerate(trajectories): ax.plot(t_arr,e[plot_index], alpha=0.15, ) ax.fill_between(t_arr,percentiles_lower, percentiles_upper, alpha=0.1, color='red', label='Percentiles') ax.plot(t_arr,percentiles_lower, alpha=1, lw=2, label='Percentiles', ls='--', color='red', ) ax.plot(t_arr,percentiles_upper, alpha=1, lw=2, color='red', ls='--', ) ax.plot(t_arr,percentiles_median, alpha=1, lw=2, color='red', label='Median') ax.plot(t_arr,traj_mean[plot_index], alpha=1, lw=2, color='black', label='Mean') ax.set_xlim(np.min(t_arr),np.max(t_arr)) ax.set_ylabel(ylabel,fontsize=fontsize) ax.set_xlabel(r'$t$ [days]',fontsize=fontsize) ax.legend(loc='upper right',fontsize=18) plt.show() if filename != 'None': fig.savefig(filename + '_trajs2.png', bbox_inches='tight',dpi=100) plt.close() # Define a function which we use below to plot parameters used for simulations def plot_sample_parameters(result, filename='None'): # set filename for saving figures # fontsize=25 # # Scatterplot of used parameters # sample_parameters = result['sample_parameters'].T beta = result['beta'] gE = result['gE'] gIa = result['gIa'] gIs = result['gIs'] # title = r'Samples for stochastic $\beta$, $\gamma_{E}$' labelx = r'$\beta $' labely = r'$\gamma_{E}$' x_mean = beta y_mean = gE labelx_mean = r'$\langle \beta \rangle$' labely_mean = r'$\langle \gamma_{E} \rangle$' data_index_x = 1 data_index_y = 4 fig, ax = plt.subplots(1,1,figsize=(7,5)) ax.set_title(title,y=1.05,fontsize=fontsize) ax.axvline(x_mean,color='limegreen',ls='--',lw=2,label=labelx_mean) ax.axhline(y_mean,color='dodgerblue',ls='--',lw=2,label=labely_mean) ax.scatter(sample_parameters[data_index_x], sample_parameters[data_index_y] , label='sampled data', color='black',s=10) #, c = truth) ax.set_xlabel(labelx,fontsize=fontsize) ax.set_ylabel(labely,fontsize=fontsize) ax.set_xlim(0,1.05*np.max(sample_parameters[data_index_x])) ax.set_ylim(0,1.05*np.max(sample_parameters[data_index_y])) ax.legend(loc='best',fontsize=15) plt.show() if filename != 'None': fig.savefig(filename + '_samples1.png', bbox_inches='tight',dpi=100) plt.close() #Forecast based on deterministic modelresult = model_forecast.simulate(S0, E0, A0, Ia0, Is0, Q0, contactMatrix, Tf, Nt, verbose=True, Ns=Ns) plot_trajectories(result, plot_index = 2, percentile=5, ) plot_trajectories(result, # filename='forecast_deterministic', percentile=5, ) plot_trajectories(result, plot_index = 5, percentile=5, ) plot_sample_parameters(result)Finished. Time needed for evaluation: 00:00:05genotypescallset = zarr.open_group('new_uniref_merged.zarr/', mode='r') callset gt = allel.GenotypeArray(callset['calldata/GT'][:]) gt chrom = callset['variants/CHROM'] chrom[:]meta datadf_meta = pd.read_csv('1175-VO-KH-STLAURENT-S01-samples.csv', sep=',') df_meta.columns, len(df_meta) #new samples to the meta data - we only need some columms new_meta = pd.DataFrame() new_meta['ROMA_ID'] = list(np.concatenate([callset['samples'][:10], list(callset['samples'][-5:])])) new_meta['Status'] = 'sequenced' new_meta['Species'] = ['An. arabiensis' for _ in range(5)] + ['An. gambiae' for _ in range(5)] + ['An. funestus' for _ in range(5)] new_meta['Species_complex'] = ['An. arabiensis' for _ in range(5)] + ['An. gambiae' for _ in range(5)] + ['An. funestus' for _ in range(5)] new_meta['Sample_type'] = 'Genomic DNA' df_meta = df_meta.append(new_meta, ignore_index=True) df_meta #keep only sequenced df_meta_seq = df_meta[df_meta.Status == 'sequenced'] len(df_meta_seq) #sort the meta data so in the same order as the genotypes df_meta_seq = df_meta_seq.sort_values('ROMA_ID') df_meta_seq.head() all_128_samples = np.asarray(df_meta_seq.ROMA_ID) all_128_samplesremove pf genegt.shape pfbool = chrom[:]!='Pf_M76611' gt_nopf = gt.compress(pfbool) gt_nopf.shapemissingness#first 10 and last 5 are the new samples, we want to keep these. callset['samples'][:] ismiss = gt_nopf.is_missing() persam_ismiss = np.sum(ismiss, axis=0) persam_ismiss.shape #plot persam_rate = persam_ismiss/260581*100 plt.hist(persam_rate); #eyeballing plot - keep samples with <40% missingness - loose 25 samples persam_bool = persam_rate <= 40 np.count_nonzero(persam_bool) #keep track of the samples we have left gud_samples = np.asarray(callset['samples']).compress(persam_bool) len(gud_samples) #looks fine, we keep the new outgroups gud_samples #compress genotype array to keep just gud samples gt_gudsam = gt_nopf.compress(persam_bool, axis=1) gt_gudsam.shape #compress meta data to just keep the info for the gud samples gudsam_bool = np.in1d(all_128_samples, gud_samples) df_meta_gud = df_meta_seq[gudsam_bool] df_meta_gud.shape #3. look at per var missingness ismissvar = gt_gudsam.is_missing() pervar_ismiss = np.sum(ismissvar, axis=1) pervar_ismiss.shape pervar_rate = pervar_ismiss/96*100 plt.hist(pervar_rate); #how many var do we have with 0 missingness? full = pervar_rate == 0 np.count_nonzero(full)with no missingness#if we just use the 0 missingness sites what happens? gt_nomiss = gt_gudsam.compress(full) gt_nomiss #palette palette = sns.color_palette('hls', n_colors=12, desat=0.8) sns.palplot(palette); #use names and palette to define plotting colours colors = { 'An. annularis group': palette[0], 'An. hyrcanus group': palette[3], 'An. maculatus group': palette[4], 'An. kochi group': palette[5], 'An. funestus group': palette[6], 'An. barbirostris group': palette[7], 'An. dirus complex': palette[9], '': palette[10], 'An. gambiae': 'r', 'An. arabiensis':'g', 'An. funestus': 'b' } labels = { 'An. annularis group': 'An. annularis group', 'An. hyrcanus group': 'An. hyrcanus group', 'An. maculatus group':'An. maculatus group', 'An. kochi group': 'An. kochi group', 'An. funestus group': 'An. funestus group', 'An. barbirostris group': 'An. barbirostris group', 'An. dirus complex':'An. dirus complex', 'An. gambiae': 'An. gambiae', 'An. arabiensis': 'An. arabiensis', 'An. funestus': 'An. funestus', '':'other' } #fix the df NaNs df_meta_gud = df_meta_gud.fillna(value='') #names so we can add labels etc vbs = np.asarray(df_meta_gud.ROMA_ID) vsc = np.asarray(df_meta_gud.Species_complex) vsp = np.asarray(df_meta_gud.Species) #remove the WGA samples too? isitnotWGA = df_meta_gud.Sample_type == "Genomic DNA" np.count_nonzero(isitnotWGA) gt_nowga = gt_nomiss.compress(isitnotWGA, axis=1) gt_nowga df_meta_nowga = df_meta_gud[isitnotWGA] df_meta_nowga.shape #names so we can add labels etc vbs = np.asarray(df_meta_nowga.ROMA_ID) vsc = np.asarray(df_meta_nowga.Species_complex) vsp = np.asarray(df_meta_nowga.Species) #measure distance and cluster samples d = allel.stats.pairwise_distance(gt_nowga.to_n_alt(), metric='cityblock') z = scipy.cluster.hierarchy.linkage(d, method='average') #plot fig = plt.figure(figsize=(10, 13), ) gs = mpl.gridspec.GridSpec(nrows=1, ncols=2, width_ratios=(2.5, 0.4), wspace=0.22) #dendro ax = fig.add_subplot(gs[0, 0]) sns.despine(ax=ax, left=True, right=True, bottom=False) r = scipy.cluster.hierarchy.dendrogram( z, no_labels=False, count_sort=True, color_threshold=0, labels=vbs, above_threshold_color='k', ax=ax, leaf_font_size=9, orientation='left') ax.set_xlabel('Distance', size=11) #legend handles = [mpl.patches.Patch(color=colors[p], label=labels[p]) for p in list(colors.keys())] ax.legend(handles=handles, loc='upper right', bbox_to_anchor=(0.5, 1.011), ncol=1, fontsize=12) #colour bar ax = fig.add_subplot(gs[0, 1]) sns.despine(ax=ax, left=True, right=True, bottom=True) #get the clustered order of samples sp_com = vsc[r['leaves']] #make list of species complex colours to match the clustered sample order sp_com_colors = [colors[p] for p in sp_com] #plot ax.bar(left=np.full((len(vbs)), 0.5), height=np.full((len(vbs)), 1), bottom=np.asarray(range(0,len(vbs))), color=sp_com_colors, edgecolor='k') ax.set_xlim(0, 1) ax.set_xticks([]) ax.set_ylim(0,len(vbs)) ax.set_xlabel('Sp. Group', size=11) ax.set_yticks(np.arange(len(vbs))+0.5) ax.set_yticklabels(vsp[r['leaves']], ha='left', position=(1.1,2), fontsize=10) ax.tick_params(length=0.0) ax.text(-0.4, -1.3, 'ID', fontsize=11) ax.text(1.4, -1.3, 'Sp.', fontsize=11) #fig.savefig('../uniref_nomissing_nocrapsamp_noWGA.jpeg', jpeg_quality=100, bbox_inches='tight'); #use different metrics #these seem to generate the cleanest clusters, using method=average is less good d = allel.stats.pairwise_distance(gt_nowga.to_n_alt(), metric='cityblock') z = scipy.cluster.hierarchy.linkage(d, method='complete') #plot fig = plt.figure(figsize=(10, 13), ) gs = mpl.gridspec.GridSpec(nrows=1, ncols=2, width_ratios=(2.5, 0.4), wspace=0.22) #dendro ax = fig.add_subplot(gs[0, 0]) sns.despine(ax=ax, left=True, right=True, bottom=False) r = scipy.cluster.hierarchy.dendrogram( z, no_labels=False, count_sort=True, color_threshold=0, labels=vbs, above_threshold_color='k', ax=ax, leaf_font_size=9, orientation='left') ax.set_xlabel('Distance', size=11) #legend handles = [mpl.patches.Patch(color=colors[p], label=labels[p]) for p in list(colors.keys())] ax.legend(handles=handles, loc='upper right', bbox_to_anchor=(0.5, 1.011), ncol=1, fontsize=12) #colour bar ax = fig.add_subplot(gs[0, 1]) sns.despine(ax=ax, left=True, right=True, bottom=True) #get the clustered order of samples sp_com = vsc[r['leaves']] #make list of species complex colours to match the clustered sample order sp_com_colors = [colors[p] for p in sp_com] #plot ax.bar(left=np.full((len(vbs)), 0.5), height=np.full((len(vbs)), 1), bottom=np.asarray(range(0,len(vbs))), color=sp_com_colors, edgecolor='k') ax.set_xlim(0, 1) ax.set_xticks([]) ax.set_ylim(0,len(vbs)) ax.set_xlabel('Sp. Group', size=11) ax.set_yticks(np.arange(len(vbs))+0.5) ax.set_yticklabels(vsp[r['leaves']], ha='left', position=(1.1,2), fontsize=10) ax.tick_params(length=0.0) ax.text(-0.4, -1.3, 'ID', fontsize=11) ax.text(1.4, -1.3, 'Sp.', fontsize=11);all variants?#take the all var-gud sample genotype, remove the WGA samples then cluster gt_gudsam gt_al = gt_gudsam.compress(isitnotWGA, axis=1) gt_al #measure distance and cluster samples d = allel.stats.pairwise_distance(gt_al.to_n_alt(), metric='cityblock') z = scipy.cluster.hierarchy.linkage(d, method='complete') #plot fig = plt.figure(figsize=(10, 13), ) gs = mpl.gridspec.GridSpec(nrows=1, ncols=2, width_ratios=(2.5, 0.4), wspace=0.22) #dendro ax = fig.add_subplot(gs[0, 0]) sns.despine(ax=ax, left=True, right=True, bottom=False) r = scipy.cluster.hierarchy.dendrogram( z, no_labels=False, count_sort=True, color_threshold=0, labels=vbs, above_threshold_color='k', ax=ax, leaf_font_size=9, orientation='left') ax.set_xlabel('Distance', size=11) #legend handles = [mpl.patches.Patch(color=colors[p], label=labels[p]) for p in list(colors.keys())] ax.legend(handles=handles, loc='upper right', bbox_to_anchor=(0.5, 1.011), ncol=1, fontsize=12) #colour bar ax = fig.add_subplot(gs[0, 1]) sns.despine(ax=ax, left=True, right=True, bottom=True) #get the clustered order of samples sp_com = vsc[r['leaves']] #make list of species complex colours to match the clustered sample order sp_com_colors = [colors[p] for p in sp_com] #plot ax.bar(left=np.full((len(vbs)), 0.5), height=np.full((len(vbs)), 1), bottom=np.asarray(range(0,len(vbs))), color=sp_com_colors, edgecolor='k') ax.set_xlim(0, 1) ax.set_xticks([]) ax.set_ylim(0,len(vbs)) ax.set_xlabel('Sp. Group', size=11) ax.set_yticks(np.arange(len(vbs))+0.5) ax.set_yticklabels(vsp[r['leaves']], ha='left', position=(1.1,2), fontsize=10) ax.tick_params(length=0.0) ax.text(-0.4, -1.3, 'ID', fontsize=11) ax.text(1.4, -1.3, 'Sp.', fontsize=11) fig.savefig('../NEW_uniref_nomissing_nocrapsamp_noWGA_allvar.jpeg', jpeg_quality=100, bbox_inches='tight');Experiment Evaluationevaluation_file = '../data/output/output.csv' plot_output_path = '../plots/' %matplotlib inline import warnings; warnings.simplefilter('ignore') import re import os import datetime import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt sns.set(style="whitegrid", rc={'text.usetex' : True, 'text.latex.unicode': True, 'text.latex.preamble': [r'\usepackage{amsmath}']}) font_scale=2.2 sns.set_context("notebook", font_scale=font_scale) plt.rcParams['figure.figsize'] = (10,10) # Set "True" to output tables in tex-style print_latex = False def pretty_plot(title="Plot", f=sns.catplot, plot_size=(12, 5), pretty_order=None, pretty_hue_order=None, **kwargs): if 'hue' in kwargs: kwargs['hue_order'] = order[kwargs['hue']] if pretty_hue_order is None else pretty_hue_order if 'x' in kwargs and f != sns.scatterplot and f != sns.pointplot: kwargs['order'] = order[kwargs['x']] if pretty_order is None else pretty_order fig, ax = plt.subplots() _ = fig.set_size_inches(plot_size) g = f(ax=ax, **kwargs) if type(g) == sns.axisgrid.FacetGrid: plt.close(g.fig) ax.legend(loc='center left', bbox_to_anchor=(1.0, 0.5)) return fig def highlight_max(data, color='gray'): attr = 'background-color: {}'.format(color) if data.ndim == 1: is_max = data == data.max() return [attr if v else '' for v in is_max] else: is_max = data == data.max().max() return pd.DataFrame(np.where(is_max, attr, ''), index=data.index, columns=data.columns) order = { 'qs': ['RandomQss', 'RandomOutlierQss', 'DecisionBoundaryQss', 'ExplorativeMarginQss_NP', 'ExplorativeMarginQss'], 'data_set': ["ALOI", "Annthyroid", "Arrhythmia", "Cardiotocography", "Glass", "HeartDisease", "Hepatitis", "Ionosphere", "KDDCup99", "Lymphography", "PageBlocks", "Parkinson", "PenDigits", "Pima", "Shuttle", "SpamBase", "Stamps", "WBC", "WDBC", "WPBC", "Waveform"], 'qs_opt_method': ['None', 'ParticleSwarmOptimization', 'BlackBoxOptimization'] } pretty_names = { 'AEQ': {'m_average_end_quality': 'MCC', 'c_average_end_quality': 'kappa', 'auc_average_end_quality': 'AUC', 'pauc_average_end_quality': 'PAUC'}, 'AEQ-M': {'m_average_end_quality': 'MCC-AEQ', 'c_average_end_quality': 'kappa-AEQ', 'auc_average_end_quality': 'AUC-AEQ', 'pauc_average_end_quality': 'PAUC-AEQ'}, 'SQ-M': {'m_start_quality': 'MCC-SQ', 'k_start_quality': 'kappa-SQ', 'auc_start_quality': 'AUC-SQ', 'pauc_start_quality': 'PAUC-SQ'}, 'MCC': {'m_start_quality': 'SQ', 'm_end_quality': 'EQ', 'm_average_end_quality': 'AEQ', 'm_ramp_up': 'RU'}, 'qs': { 'RandomQss': '$DQSS_{\\text{rand}}$', 'RandomOutlierQss': '$DQSS_{\\text{rand-o}}$', 'DecisionBoundaryQss': '$IQSS_{\\text{DB}}$', 'ExplorativeMarginQss_NP': '$IQSS_{\\text{DES}}$', 'ExplorativeMarginQss': '$IQSS^*_{\\text{DES}}$', }, } def load_csv_file(f): df = pd.read_csv(f) df.split_strategy = df.split_strategy.apply(str) # merge kde_errors df.exit_code = df.exit_code.apply(lambda x: 'kde_error' if x in ["KDEException", "PyCall.PyError"] else x) df.exit_code = df.exit_code.apply(lambda x: 'missing_label' if x == "MissingLabelTypeException" else x) df.exit_code = df.exit_code.apply(lambda x: 'missing_train_data' if x == "InexactError" else x) df['qs'] = df[['qs', 'qs_use_penalty']].apply(lambda x: f"{x[0]}_NP" if not x[1] else x[0], axis=1) df['qs_opt_method'] = df['qs_opt_method'].fillna("None") df_data_gen = pd.DataFrame([[int(s) for s in re.findall(r'\d+', x)] for x in df['file_name'].values], index=df.index, columns=["seed", "x", "num_gaussians", "num_gaussians_train"])[["seed", "num_gaussians", "num_gaussians_train"]] df = pd.concat([df, df_data_gen], axis=1) df_success = df.query('exit_code == "success"').reset_index(drop=True) return df, df_success df_full, df_full_success = load_csv_file(evaluation_file) f"Total experiment run time: {str(datetime.timedelta(seconds=np.sum(df_full.total_run_time)))}."Synthetic datatarget_scenario = 'eval_part_1' df_p1, df_p1_success = df_full.query('scenario == @target_scenario'), df_full_success.query('scenario == @target_scenario')Results for DXNEStmp = df_p1_success.query('qs_opt_method in ["None", "BlackBoxOptimization"]').reset_index(drop=True) tmp.qs = tmp.qs.apply(lambda x: pretty_names['qs'][x]) tmp = tmp.rename(columns={'num_dimensions': 'numdim', 'm_end_quality': 'meq'}) f = pretty_plot(title=f'Average end quality', x="numdim", y="meq", hue="qs", data=tmp, pretty_order=sorted(tmp.numdim.unique()), pretty_hue_order=list(pretty_names['qs'].values()), kind="box", plot_size=(16, 4)) f.axes[0].set_xlabel('Num dimensions') f.axes[0].set_ylabel('End quality') f.tight_layout() f.savefig(f'{plot_output_path}/eval_synthetic.pdf') tmp = df_p1_success.query('qs_opt_method in ["None", "BlackBoxOptimization"]').reset_index(drop=True) tmp.qs = tmp.qs.apply(lambda x: pretty_names['qs'][x]) tmp = tmp.rename(columns={'num_gaussians': 'numg', 'm_end_quality': 'meq'}) f = pretty_plot(title=f'Average end quality', x="numg", y="meq", hue="qs", data=tmp, pretty_order=sorted(df_p1_success.num_gaussians.unique()), pretty_hue_order=list(pretty_names['qs'].values()), kind="box", plot_size=(20, 8)) f.axes[0].set_xlabel('Num gaussians') f.axes[0].set_ylabel('End quality') f.tight_layout()Comparison optimization methodsdf_p1_success.groupby('qs_opt_method')[['time_qs_mean', 'time_qs_max', 'total_run_time']].agg(['mean', 'median', 'sum']) tmp = df_p1_success.query('qs in ["RandomOutlierQss", "ExplorativeMarginQss"]').reset_index(drop=True) tmp.qs = tmp.qs.apply(lambda x: pretty_names['qs'][x]) tmp = tmp.rename(columns={'num_dimensions': 'numdim', 'm_end_quality': 'meq', 'qs_opt_method': 'qsoptm'}) f = pretty_plot(title=f'Average end quality', x="numdim", y="meq", hue="qsoptm", data=tmp, pretty_order=sorted(df_p1_success.num_dimensions.unique()), pretty_hue_order=order['qs_opt_method'], kind="box", plot_size=(20, 8)) f.axes[0].set_xlabel('Num dimensions') f.axes[0].set_ylabel('End quality') f.tight_layout() metric = "m_end_quality" res = df_p1_success.query('qs in ["RandomOutlierQss", "ExplorativeMarginQss"]') res = res.groupby(["num_dimensions", "qs_opt_method"]).mean()[[metric]].reset_index().pivot(index='qs_opt_method', columns='num_dimensions')[[metric]] res = res.T.reset_index(level=0, drop=True).T res = res.reset_index().round(decimals=2) res.columns = [''] + [f'd = {x}' for x in res.columns[1:].values] print(res.to_latex(escape=False, na_rep='-', index=False)) if print_latex else res.set_index(res.columns[0]).style.apply(highlight_max, axis=0)Real world datatarget_scenario = 'eval_part_2_qss' df_p2, df_p2_success = df_full.query('scenario == @target_scenario'), df_full_success.query('scenario == @target_scenario')Data set sizedf_p2.groupby(["data_set"])[['num_points']].max()Results for DXNEStmp = df_p2_success.query('qs_opt_method in ["None", "BlackBoxOptimization"]').reset_index(drop=True) tmp.qs = tmp.qs.apply(lambda x: pretty_names['qs'][x]) tmp = tmp.rename(columns={'data_set': 'ds', 'm_end_quality': 'meq'}) f = pretty_plot(title=f'End quality', x="ds", y="meq", hue="qs", data=tmp, pretty_order=sorted(df_p2_success.data_set.unique()), pretty_hue_order=list(pretty_names['qs'].values()), kind="box", plot_size=(20, 8)) f.axes[0].set_xlabel('Data set') f.axes[0].set_ylabel('End quality') plt.xticks(rotation=90) f.tight_layout()Start qualitydf_p2_success.query('qs_opt_method in ["None", "BlackBoxOptimization"]').groupby('data_set').median()[['m_start_quality']]End qualitydf_tmp = df_p2_success.query('qs_opt_method in ["None", "BlackBoxOptimization"]') # For other metrics replace this value, e.g., with "m_quality_range" metric = "m_end_quality" res = df_tmp.query('qs_opt_method in ["None", "BlackBoxOptimization"]').groupby(["data_set", "qs"]).median()[[metric]].reset_index().pivot(index='data_set', columns = 'qs')[[metric]] res = res.T.reset_index(level=0, drop=True).T.round(decimals=2) res = res[list(pretty_names['qs'].keys())] if print_latex: res.columns = [pretty_names['qs'][x] for x in res.columns] res = res.reset_index() res.columns = ['Data set'] + res.columns[1:].values.tolist() print(res.to_latex(escape=False, na_rep='-', index=False)) if print_latex else res.set_index('Data set').style.apply(highlight_max, axis=1)Comparison optimization methodstmp = df_p2_success.query('qs in ["RandomOutlierQss", "ExplorativeMarginQss"]').reset_index(drop=True) tmp.qs = tmp.qs.apply(lambda x: pretty_names['qs'][x]) tmp = tmp.rename(columns={'data_set': 'ds', 'm_end_quality': 'meq', 'qs_opt_method': 'qsoptm'}) f = pretty_plot(title=f'End Quality', x="ds", y="meq", hue="qsoptm", data=tmp, pretty_order=sorted(df_p2_success.data_set.unique()), pretty_hue_order=order['qs_opt_method'], kind="box", plot_size=(20, 8)) f.axes[0].set_xlabel('Data set') f.axes[0].set_ylabel('End quality') plt.xticks(rotation=90) f.tight_layout()Elegant Exception Handling Eyal Trabelsi - thanks for coming out, I hope you enjoyed all the amazing talks by now.- today I am going to talk to you about Elegent Exception Handling. About Me 🙈 - Software Engineer at Salesforce 👷 - Big passion for python, data and performance optimisations 🐍🤖 - Online at [medium.com/@Eyaltra](https://medium.com/@Eyaltra) | [@eyaltra](https://twitter.com/eyaltra) 🌐 Restaurant Recommendation 🍔 - Get user info 👨👩 - Retrieve relevant restaurants 🍕🍗🍩🍔 - Pick the best ones 🍗🍔! pip install typeguard rollbar returns tenacity > /dev/null 2>&1 import contextlib import json import logging import pathlib import os from typing import Union import requests from typeguard import typechecked # Naive code snippets def get_relevant_restaurants(user): base_url = "https://en.wikipedia.org/wiki" return requests.get(f"{base_url}/{user}").content def get_user(path): with open(path, 'r') as json_file: return json.load(json_file)["user"] def pick_best_restaurants(restaurants): pass def get_restaurant_recommendation(path): user = get_user(path) candidates = get_relevant_restaurants(user) return pick_best_restaurants(candidates) get_restaurant_recommendation("MY_AMAZING_FILE.json")We Can Proud of Ourselves 💃 - Implemented restaurant recommendation 💪 - Clean code 💄 Why Exception Handling? 🤨 - Hardware can fail 🌲 - Software often fail 🚪 - [How complex systems fail](https://www.researchgate.net/publication/228797158_How_complex_systems_fail) 🧩 Unexceptable 😡 Lesson 1: We want to build a fault tolerant system. Types of errors- error that can be detected at compile time- errors that can be deteled at run time- errors that can be infered- reproducieable erros- non reproduceable errors We want our code to be safe 👷 Exception Handling to the Rescue 👨‍🚒 - Detect errors 🕵 - Do something about them 🔒 Naive Approach 👶 - Log all exceptions 📝 - Ignore all exceptions 🙈def get_restaurant_recommendation(path): try: user = get_user(path) candidates = get_relevant_restaurants(user) pick_best_restaurants(candidates) except BaseException: logging.error("VERY UNINFORMATIVE INFORMATION") raise BaseException("VERY UNINFORMATIVE INFORMATION")Are we done ?! 🍰 - Code is very clean 🧹 - Code seems safe 🔒 The lurking problems 🐲 - Unintentional exceptions being caught 😧 - KeyboardInterrupt as we want the user to be able to kill the program.- SynatxError as we want our code to be valid. - Exceptions are not distinguishable 😵 - the invoker of this function can't really destinguise between the diffrent types of errors and allow to recover from certain expected issues.- For example, if we have flaky internet i would like to retry, but if a file is actually missing I dont. - Not safe 🔓 - generaly it’s better for a program to fail fast and crash than to silence the error and continue running the program. - The bugs that inevitably happen later on will be harder to debug since they are far removed from the original cause. - Just because programmers often ignore error messages doesn’t mean the program should stop emitting them. - Unfortunately very common 😱 Types of exception handling- EAFP (it’s easier to ask for forgiveness than permission) - LBYL (Look before you leap)- Each has its own pros and cons (whether the thread-safty or readability)- but both are legitimate in python as oppose to other languages. Take 2: Exception Handling 🎬 - Catch relevant exceptions only ☝ - Recover when possible 🔧def get_restaurant_recommendation(path): try: user = get_config(path)["user"] except FileNotFoundException: logging.error("VERY UNINFORMATIVE INFORMATION") raise except JSONDecodeError: logging.error("VERY UNINFORMATIVE INFORMATION") raise candidates = get_relevant_restaurants(user) pick_best_restaurants(candidates)- When a file does not exists or when the file is not a valid json we raise FileNotFoundException and JSONDecodeError and log it away- We will reraise the same exact exception that occured instead raising a generic exception and allow the invoker to handle them diffrently.- Altough this code is far from pretty is much safer, we added deafault patiserie and the invoker of this function can destinguise between the diffrent types of errors and handle them in a diffrent manner if needed. Lesson 2: Catch relevant exceptions only. Our code is bad 😭 - Dominated by exception handling - Business logic is not clear Lesson 3: Error handling should not obscures business logic - Error handling is important, but we should strives to make our job easier.- as the zen of python state "If the implementation is hard to explain, it's a bad idea." Take 3: Exception Handling 🎬 A Bit of Mackup 💄 - Sharing exception blocks. - Use else clause. - Use dictionary built-in method Thisdef get_restaurant_recommendation(path): try: user = get_user(path) except (FileNotFoundException, JSONDecodeError): logging.error("VERY UNINFORMATIVE INFORMATION") raise else: candidates = get_relevant_restaurants(user) pick_best_restaurants(candidates)Becomesdef get_restaurant_recommendation(path): try: user = get_user(path) except (FileNotFoundException, JSONDecodeError): logging.error("VERY UNINFORMATIVE INFORMATION") raise else: candidates = get_relevant_restaurants(user) pick_best_restaurants(candidates)- First since we handle both FileNotFoundException and JSONDecodeError in the same manner they can "share the except block" as except clause may name multiple exceptions as a parenthesized tuple.- Secondly we can use else clause which occur when the try block executed and did not raise an exception.- Thirdly, we use dictionary builtin function get which allow us to define default values. Lesson 4: Frequent flows probably have clean existing solution. Suppressing Exceptions 🤫 - There is another common flow for exception handling- i want to cover which is suppressing exceptions using suppress- supported from python>=3.5def run_unstopable_animation(): pass**This**try: os.remove('somefile.pyc') except FileNotFoundError: pass try: run_unstopable_animation() except KeyboardInterrupt: pass**Becomes**from contextlib import suppress with suppress(FileNotFoundError): os.remove('somefile.pyc') from contextlib import suppress with suppress(KeyboardInterrupt): run_unstopable_animation()Reasons for errors- The obious one is that something exceptional happened.- As a control flow mechanism.- Can be triggered due to a bug in our code. Still dominated by exception handling code😟 Take 3: Exception Handling 🎬 - Separate business logic from exception handling code ✂ - Handled exceptions in other layer 📚 ![](https://vignette.wikia.nocookie.net/memepediadankmemes/images/8/80/Acb.jpg/revision/latest/scale-to-width-down/340?cb=20180822064733) Thisdef get_user(path): with open(path, 'r') as json_file: return json.load(json_file)\ .get("user", "default_user") def get_restaurant_recommendation(path): try: user = get_user(path) except (FileNotFoundException, JSONDecodeError): logging.error("VERY UNINFORMATIVE INFORMATION") raise else: candidates = get_relevant_restaurants(user) pick_best_restaurants(candidates)Becomesdef get_user(path): with open(path, 'r') as json_file: try: user = json.load(json_file)\ .get("user", "default_user") except (FileNotFoundException, JSONDecodeError): logging.error("VERY UNINFORMATIVE INFORMATION") raise else: return user def get_restaurant_recommendation(path): user = get_user(path) candidates = get_relevant_restaurants(user) pick_best_restaurants(candidates)Lesson 5: Push the exception handling down/up in the abstraction level Are we completly safe now? 👷 Silent Errors 🔇 - Does not crash code 🤯 - Delivers incorrect results 😠 - Makes matter worse 🤬 Contract 📜 - Output/Input types - Output/Input values - Postconditions/Preconditions - Side-effects/Invariants Vanilla Exceptions 🍧 - All the validations are supported ✅ - Happens in runtime ✅ but not in compilation time ❌ - Not clean❌ Why not assertions ? ❌ - Raises the wrong exception type 😮 - Can be compiled away 😥 Type Hints 🔍 - Support validating input/output types ✅ - Doesn't support other validation ❌ - Support both Runtime ✅ / Compile time ✅ - Clean and elegant ✅ mypy for compile timedef get_user(path: Union[str, pathlib.PurePath]) -> str: with open(path, 'r') as json_file: try: data = json.load(json_file) except (FileNotFoundException, JSONDecodeError): logging.error("VERY INFORMATIVE INFORMATION") raise else: return data.get("user","default_user")typeguard for runtime@typechecked def get_user(path: Union[str, pathlib.PurePath]) -> str: with open(path, 'r') as json_file: try: data = json.load(json_file) except (FileNotFoundException, JSONDecodeError): logging.error("VERY INFORMATIVE INFORMATION") raise else: return data.get("user","default_user")Contract Testing Libraries 📜 - All the validations are supported ✅ - Happens in runtime ✅ but not in compilation time ❌ - Clean and elegant ✅ - No mature/maintained option ❌ - [icontract](https://github.com/Parquery/icontract)- not matured 🍼 - [contracts](https://github.com/deadpixi/contracts)- not maintained 🤕 Lesson 6: Protect your code from silent errors. There are still problems lurking 🐉def get_relevant_restaurants(user): base_url = "cool_restaurants.com" resp = requests.get(f"{base_url}/{user}") resp.raise_for_status() return resp.contentUnstable environment 🤪 - Your network might be down 😑 - The server might be down 😣 - The server might be too busy and you will face a timeout 😭def get_relevant_restaurants(user): base_url = "cool_restaurants.com" allowed_retries = 5 for i in range(allowed_retries): try: resp = requests.get(f"{base_url}/{user}") resp.raise_for_status() except (requests.ConnectionError): if i == allowed_retries: raise else: return resp.contentThere must be better way 😇 - Decorators extend our function capabilities beyond its core intent🎊 - Context Managers wrap around enter and exit logic over a given resource 🌉 - Common usecases already implemented 💪from functools import wraps def retry(exceptions, allowed_retries=5): def callable(func): @wraps(func) def wrapped(*args, **kwargs): for i in range(allowed_retries): try: res = func() except exceptions: continue else: return res return wrapped return callable @retry(exceptions=requests.ConnectionError) def get_relevant_restaurants(country): base_url = "cool_restaurants.com" resp = requests.get(f"{base_url}/{user}") resp.raise_for_status() return resp.content import tenacity @tenacity.retry(retry=tenacity.retry_if_exception_type(ConnectionError)) def get_relevant_restaurants(user): base_url = "cool_restaurants.com" resp = requests.get(f"{base_url}/{user}") resp.raise_for_status() return resp.contentUseful usecases 🧠- **Decorator**: [ratelimit](https://github.com/tomasbasham/ratelimit), [Retry](https://github.com/jd/tenacity), [logger.catch](https://github.com/Delgan/loguru)- **Context manager**: [Database Connections](), [Transactions](), [Temporary Files]() and [Output Redirections]() - important note retry can be handled in the request itself by [writing an adapter](https://findwork.dev/blog/advanced-usage-python-requests-timeouts-retries-hooks/), but for the example sake i wont use it. Lesson 7: Use decorators and context managers Python hooks 🎣 - Python has builtin hooks for various events - [sys.excepthook](https://dev.to/joshuaschlichting/catching-every-single-exception-with-python-40o3) for uncaught exceptions - Doesn't require modifying existing code. sys's excepthook example! 🎣 - Uncaught exception print traceback to STDERR before closing - Unacceptable in production environment - Graceful exit by notify incident systemimport sys import rollbar rollbar.init("Super Secret Token") def rollbar_except_hook(exc_type, exc_value, traceback): rollbar.report_exc_info((exc_type, exc_value, traceback)) sys.__excepthook__(exc_type, exc_value, traceback) sys.excepthook = rollbar_except_hookWARNING:rollbar:Rollbar already initialized. Ignoring re-init.Useful usecases 🧠 - [Format Diffrently](https://dev.to/joshuaschlichting/catching-every-single-exception-with-python-40o3)- [Redirect To Incident System](https://sweetcode.io/using-rollbar-capturing-logging-python-exceptions/)- [Multi Threading Behaviour](https://www.scrygroup.com/tutorial/2018-02-06/python-excepthook-logging/)- [Search Stackoverflow](https://gist.github.com/WoLpH/88e3222ac57d9c3bff113ff83afddda4/) 😛😛😛 - We can format the exceptions diffrently, to provide more/less information.- We can redirect Exceptions to an incident system like rollbar or pager-duty.- Since threading/multiprocessing have their own unhandled exception machinery. that is a bit customized so no unhandled exception exists at the top level. we might want to overide it to support KeyboardInterupt for example.- Search Stackoverflow for the exception that was being raise Lesson 8: Python has some useful builtin hooks Exceptions Components 📑 - Exception message 💬 - Exception type 🍅🍇🍆 - Exception cause 🤯 Exception Types 🍅🍇🍆 - Helps distinguish between different exceptions - Helps emphasis our intent - Builtin and Custom exceptions Builtin Exception Types 🍅🍇🍆 - Dozens of built-in exceptions - Well documented, we can use some stackoverflow magic - Should use builtin exceptions Custom Exception Types 🍅🍇🍆 - Emphasis our intent - Distinguish between different exceptions. Lets say we have ValueError and we want to recover in diffrent way between TooBig/TooSmall. - Group different exceptions. - Wrapping third party apis. - when we wrap third party api we minimize our dependecy on it. for example uppon recovery shouldn't have to import exceptions from your dependecies for example requests.exceptions- Also the users that use your library does not need/want to know about the implementation details. Wrapping third party example 👀 - get_restaurant_recommendation can raise requests.ConnectionError - Recovering in get_restaurant_recommendation - Require import requests for exceptions Lesson 9: Pick the right exception types and messages. Exception __ cause __ 🤯 - __ cause __ indicates the reason of the error - We can overide the __ cause __ to replace exception raised **Python default behavior** - When a modern Python developer catches an exception and raises a new one to replace it, they can enjoy seeing the complete information for both tracebacks. - This is very helpful for debugging, and is a win for everybody.try: 1/0 except ZeroDivisionError: raise**Replace exception type with both traces**try: 1/0 except ZeroDivisionError as e: raise Exception from e**Replace exception type with only one trace**try: 1/0 except ZeroDivisionError as e: raise Exception from NoneLesson 10: Replace exceptions using __cause__. Still not perfect 💯 - Hard to tell what exceptions can be thrown - Hard to tell where exceptions will be handled - No static analysis Functional Exception Handling for the rescue 🚔 - Use success/failure container values - functions are typed, and safe - Railway oriented programming - [returns library](https://github.com/dry-python/returnsresult-container) ![](https://image.slidesharecdn.com/railway-oriented-programming-slideshare-140312155941-phpapp01/95/railway-oriented-programming-75-638.jpg?cb=1427456657)def pick_best_restaurants(user: str, candidates: List[str]) -> List[str]: validate_user(user) best_candicates = heapq.nlargest(5, valid_candidates) update_df(user, best_candicates) send_email() def get_user(path): with open(path, 'r') as json_file: try: user = json.load(json_file)\ .get("user", "default_user") except (FileNotFoundException, JSONDecodeError): logging.error("VERY UNINFORMATIVE INFORMATION") raise else: try: send_email(user) else : return userLesson 11: For complicated usecases consider the functional way. Lesson 12: Split nested try catch blocks to separated try blocks - Avoid nested exception handling - Split to multiple exception blocks - zen of python state "Flat is better than nested". Not all programs made equal 👯 - Extremely reliable ✈ ✨ - Highly reliable 🚘 - Reliable 💳 - Dodgy 📱 - Crap 💩 Lesson 13: We want to build a fault tolerant to a certain degree Sensative information 🕵 - Messages will be spread far and wide 🇫🇷🇺🇸🇫🇷 through logging, reporting, and monitoring software. - Personal data privacy 🕵 In a world where regulation around personal data is constantly getting stricter, - Never reveal your weaknesses, bad actors are everywhere 👺 - You can never be too careful 🤓def login(user): raise CommonPasswordException(f"password: {password} is too common")***Your Name Here***import numpy as np import matplotlib.pyplot as plt import scipy.stats as stats %matplotlib inline from sklearn.neighbors import KernelDensityPrelab 9 (Last!)***This exercise builds on the ideas that you encountered in Lab 8, so you should complete Lab 8 before beginning it.*** Let's be explicit about what you did in Lab 8, namely: 1. You ***measured*** the means of two arrays of values (Hubble's distances and recessional velocities) and computed their covariance matrix (which captures both the standard deviations of each quantity and the correlation coefficient of their relationship to one another). 2. You used these computed values to create a statistical ***model*** of the data (a bivariate gaussian with parameters $\mu_d, \mu_v, \sigma_d, \sigma_v, r$ derived from the data). 3. You ***synthesized samples*** from this data model in a Monte Carlo simulation and computed two statistics with real physical meaning, namely: (a) the correlation coefficient r, which reveals the strength of the relationship between recessional velocity and distance, and (b) the slope of best fit line (also known as the Hubble constant $H_0$) which describes how a change in one variable affects the other, on average. 4. You explored the variation in this ***synthesized sampling distribution*** by plotting histograms of the best fit values for r and $H_0$ from all of your random draws of 24 galaxies. In a Bayesian framework, we might say that these histograms are visualizations of the Likelihood, or a way of looking at the strength of the evidence in favor of the experimental results (Hubble's data) under the (bivariate gaussian) model. We could turn this into a posterior probability by multiplying it by some prior and dividing by a normalization constant (because probabilities always sum to 1). Indeed, this is what you will do below. 5. You then completed a classical hypothesis test. At the end of this prelab, you will reimagine that as a Bayesian hypothesis test (should you choose to do so in Part E). Part AFirst, bring over the code that you used to complete steps 1-4 of Lab 8. Add a comment to the top of each cell describing the big picture of what it contains. Part BBelow these instructions is a cell that makes a "kernel density estimate", which is a way of turning your histogram into a continuous function with a defined value at a range of x values. You can use this smooth function as a Likelihood function in a Bayesian framework. Comment each line in the provided code and play around with the tunable parameters (especially the bandwidth and kernel keywords). Once you feel you understand what the code is doing in general, describe it in the cell provided, including the effects of changing the values of the tunable parameters. Use visualizations to support your arguments. Part C1. Compute the area under the entire KDE by summing the appropriate array. What is it equal to and why? 2. Write a function that will sum under the KDE for an arbitrary range of x values and return that sum. Note the input should be x values, ***not*** indices, so you will need to translate values (e.g. $x_1$=100, $x_2$=300) to their indices (or nearest index, e.g. i=0, j=200) within the discrete x array so that you can sum over the correct range of indices in the prob array. Note that what you're doing here is essentially computing the value of the finite integral $$prob = norm\int_{x_1}^{x_2}L(x)dx$$ where L(x) is the likelihood, prob is the probabilty of obtaining a value between $x_1$ and $x_2$, and norm is a normalization factor, where necessary. Part DNow let's add in some priors.1. First, devise an ***uninformative prior*** over the same range of x values as the KDE and plot it. *Hint: some manipulation may be required to make sure your prior is a plot of prior probabilities. What special property do probabilties have under summing?)* 2. Now, let's develop two different ***informative priors*** according to the following scenarios: a) A similar sample of galaxy distances and recessional velocities compiled by Dr. is uncovered. Let's not assume anything crazy about these data - just that he also measured 24 random galaxies drawn from roughly the same bivariate gaussian distribution as we assumed Hubble's came from (so basically let's assume the Hubble model is a correct representation of the true universe). Of course, in his case, he has drawn a different random realization of 24 points from this distribution. To simulate this, pick one set of distances and recessional velocities from your Monte Carlo simulator that has an $H_0$ value a little different from Hubble's. Use this information to create a prior for $H_0$ assuming Dr. Bubble's data is all that we know about. Use the values of recessional velocity and distance for your chosen random realization as a starting point, and the code from Part A to draw samples from a slightly different bivariate gaussian centered on the Bubble data values. Use it to make a histogram and turn this into a Likelihood, which you will use as a prior. b) Another mysery scientist (you can name her if you like) has completed a comparable study that is MORE informative than Hubble's data. It has roughly the same average values for recessional velocities and distances, but the sampling distribution is narrower. (how can you accomplish this? There are several ways!). In this case, let's assume our Hubble data are the prior and these data are the Likelihood. Describe the technique you used to make this prior more informative than the Hubble data and how you know you acheived your goal. *Tips:* * *In most cases, you will want to use the specifications for the informative prior to create a new sampling distribution histogram using your code in A, then convert it to a kde then likelihood as you did in part B* * *If creating a new covariance matrix, remind yourself of what the values in the covariance matrix represent and remember that the matrix is symmetric on either side of the diagonal, so you may have to change two values instead of just one)* * *You may wish to show your histograms for the original (Hubble) and new Monte Carlo models side by side to be sure that you understand the results of your modifications* 3. For each of these priors (1, 2a, and 2b), make a plot showing the prior, likelihood, and posterior (derived from the product of the prior and the likelihood, but with a little additional manipulation - see if you can figure out what is missing on your own. Bayes' theorem may provide some hints) on the same plot. The plot should include a legend and axis labels. You may wish to use fill_beteween as in the example below and an alpha (transparency) value so that you can visualize overlap between the three curves via combined colors (e.g. the overlap region between a red likelihood and a blue posterior is a purple color) 4. In a markdown cell, summarize the "takeaway messages" of the three plots you made in Part 3. What do they reveal about priors, likelihoods, and posteriors? [Optional - Challenge Exercise] Part ECalculate Bayes' factor for the following two hypotheses, implementing one of the informative priors from the example above: 1. The true value of $H_0$ is within 1 standard deviation of the measured value. 2. The true value of $H_0$ is NOT within 1 standard deviation of the measured value.## Part A code placeholder. Make sure to comment each cell with the "big picture" #Part B - sample code to comment #hlist is the array of $H_0$ values returned by your Monte Carlo simulation x_d=np.arange(200,700) kde = KernelDensity(bandwidth=10.0, kernel='gaussian') kde.fit(hlist[:, None]) logprob = kde.score_samples(x_d[:, None]) prob=np.exp(logprob) plt.fill_between(x_d, prob, alpha=0.5) #Part B exploration (copy and paste the cell above and manipulate it to explore what it's doing)Part B explanation. Make sure to use plots to support your arguments.#Part C #1 CodePart C 1 Explanation#Part C #2 Function definition - make sure to comment #Part C #2 Function test statements #Part D #1 - Define the uninformative prior #Part D #1 - Plot the uninformative prior #Part D #2 - Define the first informative prior (Edgar Bubble data) #Part D #2 - Define the second informative prior (more informative)Part D 2 - informative prior 2 explanation#Part D #2 (optional but highly suggested) - visualize Hubble, Bubble, and more informative sampling histograms side by side #Part D #3 - Plot of prior, likelihood, and posterior probability for uninformative prior #Part D #3 - Plot of prior, likelihood, and posterior probability for informative prior #1 #Part D #3 - Plot of prior, likelihood, and posterior probability for informative prior #2Reflect Tables into SQLAlchemy ORM# Python SQL toolkit and Object Relational Mapper import sqlalchemy from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine, func, inspect Base = automap_base() # create engine to hawaii.sqlite engine = create_engine("sqlite:///Resources/hawaii.sqlite") # reflect an existing database into a new model Base.prepare(engine, reflect=True) # View all of the classes that automap found Base.classes.keys() # Save references to each table Measurement = Base.classes.measurement Station = Base.classes.station # Create our session (link) from Python to the DB session = Session(engine)Exploratory Precipitation Analysis# Find the most recent date in the data set. end_date = session.query(Measurement.date).order_by(Measurement.date.desc())[0][0] print(end_date) # Create the inspector and connect it to the engine inspector = inspect(engine) columns = inspector.get_columns('Measurement') for column in columns: print(column["name"], column["type"]) columns = inspector.get_columns('Station') for column in columns: print(column["name"], column["type"]) # Design a query to retrieve the last 12 months of precipitation data and plot the results. # Starting from the most recent data point in the database. q = session.query(Measurement).order_by(Measurement.date.desc()).first() end_date_obj = dt.datetime.strptime(q.date,'%Y-%m-%d') # Calculate the date one year from the last date in data set. start_date_obj = dt.datetime.strptime(str(end_date_obj - dt.timedelta(days=365)), '%Y-%m-%d %H:%M:%S').date() print(start_date_obj) # Perform a query to retrieve the data and precipitation scores sel = [Measurement.date, Measurement.prcp] q = session.query(*sel).filter(Measurement.date >= start_date_obj).all() # Save the query results as a Pandas DataFrame and set the index to the date column filtered_df = pd.DataFrame(q) filtered_df = filtered_df.rename(columns={0:"Measurement Date", 1:"Precipitation"}) filtered_df.sort_values(by=["Measurement Date"],inplace=True) filtered_df=filtered_df.set_index("Measurement Date") filtered_df.head(5) # Use Pandas Plotting with Matplotlib to plot the data dates = filtered_df.index.to_list() precipitations = list(filtered_df["Precipitation"]) plt.subplots(figsize=(14, 9)) plt.bar(dates, precipitations, align='center',alpha=.8, label='Count',width=4) plt.xlabel('Dates',fontsize = 15) plt.ylabel('Inches', fontsize = 15) plt.xticks(fontsize=13, rotation=45) plt.legend() majorticks = np.arange(0,380,40) plt.xticks(majorticks,rotation=90,fontsize=13,) plt.yticks(fontsize=13) plt.show() # Use Pandas to calcualte the summary statistics for the precipitation data filtered_df.describe()Exploratory Station Analysis# Design a query to calculate the total number stations in the dataset station_count = len(session.query(Station.id).all()) print(station_count) q = session.query(Station).all() for x in q: print(x.name) # Design a query to find the most active stations (i.e. what stations have the most rows?) # List the stations and the counts in descending order. records = session.query(Station.id,Station.station,func.count(Station.id)).\ filter(Measurement.station == Station.station).\ group_by(Station.id,Station.name).\ order_by(func.count(Station.id).desc()).\ all() for record in records: print(record) # Most active station id from the previous query, most_active_station = records[0][1] print(most_active_station) # Calculate the lowest, highest, and average temperature. q = session.query(func.min(Measurement.tobs).label("lowest_temp"), func.max(Measurement.tobs).label("highest_temp"), func.avg(Measurement.tobs).label("avg_temp")).\ filter(Measurement.station == most_active_station).first() print(f"Data for most active station '{most_active_station}'") print(f"Lowest temperature is {q.lowest_temp}") print(f"Highest temperature is {q.highest_temp}") print(f"Average temperature is {round(q.avg_temp,2)}") # Using the most active station id # Query the last 12 months of temperature observation data for this station and plot the results as a histogram end_date = session.query(Measurement.date).\ filter(Measurement.station == Station.station).\ filter(Measurement.station == most_active_station).\ order_by(Measurement.date.desc()).\ first() end_date_obj = dt.datetime.strptime(end_date.date,'%Y-%m-%d') start_date_obj = dt.datetime.strptime(str(end_date_obj - dt.timedelta(days=365)), '%Y-%m-%d %H:%M:%S').date() records = session.query(Measurement.date,Measurement.tobs).\ filter(Measurement.station == Station.station).\ filter(Measurement.station == most_active_station).\ filter(Measurement.date >= start_date_obj).\ order_by(Measurement.date).\ all() mst_actv_ws_df = pd.DataFrame(records) mst_actv_ws_df = mst_actv_ws_df.rename(columns={0:"Measurement Date", 1:"tobs"}) mst_actv_ws_df=mst_actv_ws_df.set_index("Measurement Date") mst_actv_ws_df.plot(kind = "hist", bins = 12,figsize=(14,9)) plt.xlabel("Temperature") plt.ylabel("Frequency")Close session# Close Session session.close()Mod 1 balancedBinaryTree You are given a binary tree and you need to write a function that can determine if it is height-balanced. A height-balanced tree can be defined as a binary tree in which the left and right subtrees of every node differ in height by a maximum of 1. Example 1: Given the following tree [5,10,25,None,None,12,3]: 5 / \ 10 25 / \ 12 3 return True. Example 2: Given the following tree [5,6,6,7,7,None,None,8,8]: 5 / \ 6 6 / \ 7 7 / \ 8 8 return False. [execution time limit] 4 seconds (py3) [input] tree.integer root [output] boolean# Binary trees are already defined with this interface: # class Tree(object): # def __init__(self, x): # self.value = x # self.left = None # self.right = None def depth(root): if not root: return 0 return max(depth(root.left), depth(root.right)) + 1 def balancedBinaryTree(root): if not root: return True left_depth = depth(root.left) right_depth = depth(root.right) return (abs(left_depth - right_depth) <= 1) and balancedBinaryTree(root.left) and balancedBinaryTree(root.right)minimumDepthBinaryTree You are given a binary tree and you are asked to write a function that finds its minimum depth. The minimum depth can be defined as the number of nodes along the shortest path from the root down to the nearest leaf node. As a reminder, a leaf node is a node with no children. Example: Given the binary tree [5,7,22,None,None,17,9], 5 / \ 7 22 / \ 17 9 your function should return its minimum depth = 2. [execution time limit] 4 seconds (py3) [input] tree.integer root [output] integer# Binary trees are already defined with this interface: # class Tree(object): # def __init__(self, x): # self.value = x # self.left = None # self.right = None def minimumDepthBinaryTree(root): if root is None: return 0 if not root.left and not root.right: return 1 if not root.left: #call again on right return minimumDepthBinaryTree(root.right)+1 if not root.right: #call again on left return minminimumDepthBinaryTreeDepth(root.left) +1 # if none of these are true anymore, exit recursion & return the minimum return min(minimumDepthBinaryTree(root.left), minimumDepthBinaryTree(root.right))+1Mod 2 traverseTree Given a binary tree of integers t, return its node values in the following format: The first element should be the value of the tree root; The next elements should be the values of the nodes at height 1 (i.e. the root children), ordered from the leftmost to the rightmost one; The elements after that should be the values of the nodes at height 2 (i.e. the children of the nodes at height 1) ordered in the same way; Etc. Example For t = { "value": 1, "left": { "value": 2, "left": null, "right": { "value": 3, "left": null, "right": null } }, "right": { "value": 4, "left": { "value": 5, "left": null, "right": null }, "right": null } } the output should be traverseTree(t) = [1, 2, 4, 3, 5]. This t looks like this: 1 / \ 2 4 \ / 3 5def traverseTree(t): if not t: return [] result = [] queue = [] queue.append(t) while len(queue) != 0: node = queue.pop(0) result.append(node.value) if node.left: queue.append(node.left) if node.right: queue.append(node.right) return resultbinaryTreeInOrderTraversal You are given a binary tree. Write a function that returns the binary tree's node values using an in-order traversal. Example: Input: [2,None,3,4] 2 \ 3 / 4 Output: [2,4,3]def helper(root, res): if not root: return helper(root.left, res) res.append(root.value) helper(root.right, res) def binaryTreeInOrderTraversal(root): result = [] helper(root, result) return resulttreePaths Given a binary tree of integers, return all the paths from the tree's root to its leaves as an array of strings. The strings should have the following format: "root->node1->node2->...->noden", representing the path from root to noden, where root is the value stored in the root and node1,node2,...,noden are the values stored in the 1st, 2nd,..., and nth nodes in the path respectively (noden representing the leaf). Example For t = { "value": 5, "left": { "value": 2, "left": { "value": 10, "left": null, "right": null }, "right": { "value": 4, "left": null, "right": null } }, "right": { "value": -3, "left": null, "right": null } } The given tree looks like this: 5 / \ 2 -3 / \ 10 4# Binary trees are already defined with this interface: # class Tree(object): # def __init__(self, x): # self.value = x # self.left = None # self.right = None def treePaths(t): # list to store path path = [] result = [] getPath(t, path, 0, result) return result def getPath(t, path, pathLen, result): if t is None: return if(len(path) > pathLen): # replace element in list path[pathLen] = t.value else: #add to end of list path.append(t.value) pathLen = pathLen + 1 if t.left is None and t.right is None: addString(path, result, pathLen) else: getPath(t.left, path, pathLen, result) getPath(t.right, path, pathLen, result) def addString(ints,res, pathLen): s = "" for i in range(pathLen): s+=(str(ints[i])+"->") res.append(s[:-2]) return resLecture Recursive Max Depth# Definition for a binary tree node. # class TreeNode: # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right ### https://leetcode.com/problems/maximum-depth-of-binary-tree/ class Solution: def maxDepth(self, root: TreeNode) -> int: self.maxDepth = 0 self.maxDepthHelper(root, 1) return self.maxDepth def maxDepthHelper(self, root, currDepth): if root.left == None and root.right == None: if currDepth > self.maxDepth: self.maxDepth = currDepth return self.maxDepthHelper(root.left, currDepth+1) self.maxDepthHelper(root.right, currDepth+1)Iterative Max Depth# Definition for a binary tree node. # class TreeNode: # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right from collections import deque class Solution: def maxDepth(self, root: TreeNode) -> int: if root == None: return 0 stack = deque() stack.append((root, 1)) maxDepthFound = 1 while len(stack) > 0: curr = stack.pop() currNode, currDepth = curr[0], curr[1] if currNode.left == None and currNode.right == None: if currDepth > maxDepthFound: maxDepthFound = currDepth if currNode.left != None: stack.append((currNode.left, currDepth + 1)) if currNode.right != None: stack.append((currNode.right, currDepth + 1)) return maxDepthFoundMod 3 Graph Class codeclass Vertex: def __init__(self, value): self.value = value self.connections = {} def __str__(self): return str(self.value) + ' connections: '+str([x.value for x in self.connections]) def add_connection(self, vert, weight = 0): self.connections[vert] = weight def get_connections(self): return self.connections.keys() def get_value(self): return self.value def get_weight(self, vert): return self.connections[vert] class Graph: def __init__(self): self.vertices = {} self.count = 0 def __contains__(self, vert): return vert in self.vertices def __iter__(self): return iter(self.vertices.values()) def add_vertex(self, value): self.count += 1 new_vert = Vertex(value) self.vertices[value] = new_vert return new_vert def add_edge(self, v1, v2, weight = 0): if v1 not in self.vertices: self.add_vertex(v1) if v2 not in self.vertices: self.add_vertex(v2) self.vertices[v1].add_connection(self.vertices[v2], weight) def get_vertices(self): return self.vertices.keys() if 5 not in [1,3]: print('not') else : print('in')notYou are given a directed acyclic graph (DAG) that contains N nodes. Write a function that can find all the possible paths from node 0 to node N - 1. graph[a] is a list of all nodes b for which the edge a -> b exists. Example: Input: graph = [[1, 2],[3],[3],[4],[]] Output: [[0,1,3,4], [0,2,3,4]] Note: The results must be returned in sorted order. You can use any built-in sort method on the results array at the end of your function before returning.from collections import defaultdict, deque # defaultdict was my downfall. import random # lessons learned, more practice # needed with graphs and deque # def append_value(dict_obj, key, value): # # Check if key exist in dict or not # if key in dict_obj: # # Key exist in dict. # # Check if type of value of key is list or not # if not isinstance(dict_obj[key], list): # # If type is not list then make it list # dict_obj[key] = [dict_obj[key]] # # Append the value in list # dict_obj[key].append(value) # else: # # As key is not in dict, # # so, add key-value pair # dict_obj[key] = value # def convert(a): # ''' # converts a (oddly formatted) graph into an adjancency matrix # ''' # adjList = defaultdict(set) # for i in range(len(a)): # for j in a[i]: # adjList[j].add(i) # return adjList # visited = set() # ''' # initializes global var visited # ''' # def dftRecursive(start, graph, result): # visited.add(start) # for path in start: # if path not in visited: # dftRecursive(path, graph, result) # result.append(visited) # def csFindAllPathsFromAToB(graph): # result = [] # aList = convert(graph) # start = aList[0] # dftRecursive(start, aList, result) # return result def csFindAllPathsFromAToB(graph): stack = deque() stack.append((0, [0])) #Starts stack with the starting node and no path res = [] destinationNode = len(graph) - 1 #the index of the last element while len(stack) > 0: curr = stack.pop() #rmoves/assigns most recent node added to curr currNode = curr[0] #assigns 1st element in node (value) currPath = curr[1] #assigns 2nd element in node (path) for neighbor in graph[currNode]: #iterates over list of neighboring nodes newPath = currPath.copy() # makes a copy of the path so additional newPath.append(neighbor) # neighbors can be added for each path # while not changing the path for the other # neighbors... so [0,1,2] can become # [0,1,2,3] or [0,1,2,4] for the next # iteration if neighbor == destinationNode: # when reaching the emd res.append(newPath) #add path constructed to resulting array else: stack.append((neighbor, newPath)) # continue looping by pushing new # path additions and neighbor # value to the stack res.sort() return resMod 4 csFriendCircles There are N students in a baking class together. Some of them are friends, while some are not friends. The students' friendship can be considered transitive. This means that if Ami is a direct friend of Bill, and Bill is a direct friend of Casey, Ami is an indirect friend of Casey. A friend circle is a group of students who are either direct or indirect friends. Given a N*N matrix M representing the friend relationships between students in the class. If M[i][j] = 1, then the ith and jth students are direct friends with each other, otherwise not. You need to write a function that can output the total number of friend circles among all the students. Example 1: Input: [[1,1,0], [1,1,0], [0,0,1]] Output: 2 Explanation: The 0th and 1st students are direct friends, so they are in a friend circle. The 2nd student himself is in a friend circle. So return 2. Input: [[1,1,0], [1,1,0], [0,0,1]] Output: 2 Explanation: The 0th and 1st students are direct friends, so they are in a friend circle. The 2nd student himself is in a friend circle. So return 2. Example 2: Input: [[1,1,0], [1,1,1], [0,1,1]] Output: 1 Explanation: The 0th and 1st students are direct friends, the 1st and 2nd students are direct friends, so the 0th and 2nd students are indirect friends. All of them are in the same friend circle, so return 1. working solutiondef adjList(matrix): cur = 0 res = [] adjList = [] for i in range(len(matrix)): res.append([] * len(matrix)) #creates indexes to fill later with values instead of 1's while cur < len(matrix): for i in range(len(matrix[cur])): if matrix[cur][i] == 1: res[cur].append(i) cur += 1 return res def hlp(aList, n, visited, i): for x in aList: if x not in visited: visited.append(x) hlp(aList, n, visited, x) def csFriendCircles(friendships): n = len(friendships) aList = adjList(friendships) visited = [] res = 0 for i in range(n): # the outer loop ensures unconnected nodes get traversed if i not in visited: visited.append(i) hlp(aList, n, visited, i) res += 1 return res #whew.. completedwithout conversion to adjListdef hlp(friendships, visited, i): for x in range(len(friendships[i])): if x not in visited and friendships[i][x] == 1: visited.append(x) hlp(friendships, visited, x) def csFriendCircles(friendships): n = len(friendships) visited = [] res = 0 for i in range(n): if i not in visited: visited.append(i) hlp(friendships, visited, i) res += 1 return res sum([10, 5, 15])Python follows LEGB Rule: Local Enclosing Global Built-in#Local example lambda num:num**2 # Enclosing functions local example name = 'THIS IS A GLOBAL STRING' # global def greet(): name = 'sammy' # enclosed def hello(): name = 'I am a local' # local print('Hello ' +name) hello() greet() # when greet func is called - it assigns name to sammy ( internally ) follows LEGB rule # built in function len('sammy') sum([4,3]) x = 50 # scope is Globally def func(): global x # this tells python to go in global level and grab global 'x' print(f'X is {x}') # local reassignment on a global variable x = 'new value' # this x scope is only in this function print(f'I just locally change global x to be {x}') func() print(x) # global x was changednew valueString FormattingThere are four main ways to do jobs of string formatting:1. `+`2. `% formatting`3. `format` function4. `f-strings` `+`first_name = 'Faris' last_name = 'Shi' sport_name = 'basketball' # it is ok print(first_name + ' ' + last_name) # it is not recommended print(first_name + ' ' + last_name + ' love playing ' + sport_name)`% formatting`Before Python 3.6, we always concatenate string by using `% formatting`.But actually, `% formatting` is not recomended by [Python Doc](https://docs.python.org/3/library/stdtypes.htmlprintf-style-string-formatting) due to:1. it is verbose when a lot need to be concatenated together. can lead to error and makes code unreadable 2. it can not display tuples or dictionaries correctly# it is ok print('%s %s love playing %s'%(first_name, last_name, sport_name)) #it is not recommended, it is not easy to read it. career = 'developer' language = 'Python' print('Hello %s %s, you are a %s, your favorite langauge is %s, and also love playing %s'%(first_name, last_name, career, language, sport_name))`format` functionstring.format function is the improvement of `% formatting` that it uses normal function call syntax and can be extensible through implementing `__format__` function to make `object` converting to stringHowever, it also have the same issue(readability) with `% formatting` unless you use dictionary to fix that.## normal way print('Hello {} {} love playing {}'.format(first_name, last_name, sport_name)) ## can specify the order by referecing the index print('Hello {1} {2} love playing {0}'.format(sport_name, first_name, last_name)) #can specifiy the name like keyword arguments print('Hello {first_name} {last_name} love playing {sport_name}'.format(sport_name=sport_name, first_name=first_name, last_name=last_name)) ## and we can operate dictionary person = {'name': 'Faris', 'age': 30} print('Hello, {}. You are {}'.format(person["name"], person["age"])) ## can do it like **kwargs print('Hello, {name}. You are {age}'.format(**person))`f strings`It also uses the `__format__` protocol, and makes string-formatting easier. Just need keep `f` keyword in the beginning.`f-strings` is faster than both `format` function and `% formatting` since it is evaluated at runtime rather than constant values.print(f'Hello {first_name} {last_name} love playing {sport_name}')Load Datafeatures = submission_tools.read_all_features( '/scratch/utrerf/trigger-attack/trojai_submission/scratch/RHPP1CJR0B') features.head() mean_std = features.groupby(['location', 'source_label', 'target_label'], as_index=False).agg({'test_loss':['mean', 'std']}) columns = [] for i in mean_std.columns: result = i[0] if len(i[1]) > 0: result += f'_{i[1]}' columns.append(result) mean_std.columns = columns merged_df = pd.merge(features, mean_std, on=['location', 'source_label', 'target_label'], how='left') merged_df['normalized_test_loss'] = (merged_df['test_loss'] - merged_df['test_loss_mean'])/merged_df['test_loss_std'] merged_df.head()Visualize Dataidx = merged_df.groupby(['model_name'])['normalized_test_loss'].transform(min) == merged_df['normalized_test_loss'] min_features = merged_df[idx] min_features['normalized_test_loss'] = min_features['normalized_test_loss'].astype(float) min_features['task'] = min_features['task'].str.upper() sns.boxplot(data=min_features, x='task', y='normalized_test_loss', hue='poisoned') # plt.ylim(-.05, 1.) plt.legend(loc='upper right', title='Poisoned') plt.ylabel('Trigger Reconstruction Loss') plt.xlabel('Task') plt.savefig('normalization_hurts.jpeg', dpi=2000) idx = merged_df.groupby(['model_name'])['test_loss'].transform(min) == merged_df['test_loss'] min_features = merged_df[idx] min_features['test_loss'] = min_features['test_loss'].astype(float) min_features['task'] = min_features['task'].str.upper() sns.boxplot(data=min_features, x='task', y='test_loss', hue='poisoned') plt.ylim(-.05, 1.) plt.legend(loc='upper right', title='Poisoned') plt.ylabel('Trigger Reconstruction Loss') plt.xlabel('Task') plt.savefig('round9_mar8.jpeg', dpi=2000) import numpy as np idx = merged_df.groupby(['model_name'])['test_loss'].transform(min) == merged_df['test_loss'] min_features = merged_df[idx] min_features['test_loss'] = min_features['test_loss'].astype(float) min_features['task'] = min_features['task'].str.upper() min_features['log_loss'] = -np.log(min_features['test_loss']) ixs = min_features['log_loss'] > 12 min_features['log_loss'].loc[ixs] = 12 sns.boxplot(data=min_features, x='task', y='log_loss', hue='poisoned') plt.legend(loc='upper right', title='Poisoned') plt.ylabel('Trigger Reconstruction Loss') plt.xlabel('Task') plt.savefig('log_loss.jpeg', dpi=2000):6: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy min_features['test_loss'] = min_features['test_loss'].astype(float) :7: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy min_features['task'] = min_features['task'].str.upper() :8: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http[...]Modelfrom sklearn.preprocessing import OneHotEncoder enc = OneHotEncoder(handle_unknown='ignore', sparse=False) result = enc.fit_transform(min_features[['task']]) X = pd.DataFrame(data=result, columns=list(enc.categories_[0])) X['loss'] = min_features['test_loss'].reset_index(drop=True) y = min_features['poisoned'].reset_index(drop=True) X.head() from sklearn.model_selection import RepeatedStratifiedKFold from sklearn.linear_model import LogisticRegression from sklearn.calibration import CalibratedClassifierCV from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier() rsk = RepeatedStratifiedKFold(n_splits=5, n_repeats=20) calibrated_clf = CalibratedClassifierCV(base_estimator=clf, cv=rsk) calibrated_clf.fit(X, y) # from joblib import dump, load # dump(calibrated_clf, 'classifier.joblib') from sklearn.model_selection import train_test_split from sklearn.metrics import log_loss num_iterations = 20 test_size = .15 acc_list, cross_entropy_list = [], [] for i in range(num_iterations): X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=test_size, random_state=i) clf = RandomForestClassifier() rsk = RepeatedStratifiedKFold(n_splits=5, n_repeats=20) calibrated_clf = CalibratedClassifierCV(base_estimator=clf, cv=rsk) calibrated_clf.fit(X_train, y_train) acc_list.append(calibrated_clf.score(X_test, y_test)) cross_entropy_list.append(log_loss(y_test, calibrated_clf.predict_proba(X_test))) fig, axs = plt.subplots(1, 2, figsize=(8, 3)) sns.boxplot(acc_list, ax=axs[0]) axs[0].set_xlabel('CV Accuracy %') sns.boxplot(cross_entropy_list, ax=axs[1]) plt.xlabel('CV Cross-Entropy %') plt.savefig('apr5_results.jpeg', bbox_inches="tight", dpi=2000) from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import log_loss num_iterations = 20 test_size = .15 task_df = min_features[min_features['task']=='SC'] sc_acc_list, sc_cross_entropy_list = [], [] for i in range(num_iterations): X_train, X_test, y_train, y_test = train_test_split( task_df[['loss']], task_df['poisoned'], test_size=test_size, random_state=i) clf = LogisticRegression().fit(X_train, y_train) sc_acc_list.append(clf.score(X_test, y_test)) sc_cross_entropy_list.append(log_loss(y_test, clf.predict_proba(X_test))) task_df = min_features[min_features['task']=='NER'] ner_acc_list, ner_cross_entropy_list = [], [] for i in range(num_iterations): X_train, X_test, y_train, y_test = train_test_split( task_df[['loss']], task_df['poisoned'], test_size=test_size, random_state=i) clf = LogisticRegression().fit(X_train, y_train) ner_acc_list.append(clf.score(X_test, y_test)) ner_cross_entropy_list.append(log_loss(y_test, clf.predict_proba(X_test))) task_df = min_features[min_features['task']=='QA'] qa_acc_list, qa_cross_entropy_list = [], [] for i in range(num_iterations): X_train, X_test, y_train, y_test = train_test_split( task_df[['loss']], task_df['poisoned'], test_size=test_size, random_state=i) clf = LogisticRegression().fit(X_train, y_train) qa_acc_list.append(clf.score(X_test, y_test)) qa_cross_entropy_list.append(log_loss(y_test, clf.predict_proba(X_test))) all_dfs = [] temp_df = pd.DataFrame([]) temp_df['accuracy'] = qa_acc_list temp_df['task'] = 'QA' all_dfs.append(temp_df) temp_df = pd.DataFrame([]) temp_df['accuracy'] = ner_acc_list temp_df['task'] = 'NER' all_dfs.append(temp_df) temp_df = pd.DataFrame([]) temp_df['accuracy'] = sc_acc_list temp_df['task'] = 'SC' all_dfs.append(temp_df) acc_df = pd.concat(all_dfs) sns.boxplot(data=acc_df, x='accuracy', y='task') plt.xlabel('CV Accuracy %') plt.ylabel('Task') plt.savefig('CV_Accuracy.jpeg', dpi=2000) acc_df x = min_features[(min_features['task']=='QA') & (min_features['poisoned']==True)].sort_values('loss') len(x)DLimport torch x = torch.arange(12, dtype=torch.float32) x x.shape X = x.reshape((3,4)) X torch.zeros((3,4)) torch.ones((3,4)) torch.randn((3,4)) x = torch.tensor([1.0, 2, 4, 8]) y = torch.tensor([2, 2, 2, 2]) x+y,x-y,x*y,x/y,x**y X[1:] A = X.numpy() B = torch.from_numpy(A) type(A), type(B) import os os.makedirs(os.path.join('..', 'data'), exist_ok=True) data_file = os.path.join('..', 'data', 'house_tiny.csv') with open(data_file, 'w') as f: f.write('NumRooms,Alley,Price\n') # Column names f.write('NA,Pave,127500\n') # Each row represents a data example f.write('2,NA,106000\n') f.write('4,NA,178100\n') f.write('NA,NA,140000\n') import pandas as pd data = pd.read_csv(data_file) print(data) inputs, outputs = data.iloc[:, 0:2], data.iloc[:, 2] inputs inputs = pd.get_dummies(inputs, dummy_na=True) inputs X, y = torch.tensor(inputs.values), torch.tensor(outputs.values) X, y inputs.values X = torch.ones((3,4,5)) X.sum(dim=[0,1]) X.numel() X.norm() torch.sqrt(3*(X[1].norm()**2))PACKAGES, OPTIONS, CONSTANTSimport os from os import listdir from os.path import isfile, join import requests from time import sleep import credentials # script python con las credenciales almacenadas en un diccionario import locale import pandas as pd pd.set_option('display.max_columns', None) from datetime import datetime from matplotlib import pyplot as plt import seaborn as snsFUNCTIONSdef etl_hospitalizaciones(): """ Generación de df histórico de ingresos, altas y ocupación de hospitales a partir de extracción de CMBD. """ # Hospitalizaciones filename = 'CMBD_6_20181217-135856.xlsx' path = os.path.join('..', 'data', 'external', 'hospitalizaciones', filename) raw_df = pd.read_excel(path) date_cols = ['Fecha de ingreso', 'Fecha de alta'] keep_cols = ['Sexo', 'Edad', 'Diagnóstico Principal', 'Estancia', 'Coste'] for col in date_cols: """ Dates are codified as integer in DDMMYYYY format, so there's no padding zero if day < 10, i.e: 28042013 -> 2013-04-28 7022013 -> 2013-02-07 """ raw_df[col + ' new'] = pd.to_datetime(raw_df[col].map(lambda col: str(col).zfill(8)), format='%d%m%Y', errors = 'coerce') interim_df = raw_df[keep_cols].copy() for col in date_cols: interim_df[col] = raw_df[col + ' new'].copy() # Ingresos y altas by date ingresos_diarios_df = interim_df.groupby(['Fecha de ingreso']).size().reset_index(name = 'Ingresos') ingresos_diarios_df.set_index('Fecha de ingreso', inplace = True) ingresos_diarios_df.index.names = ['Fecha'] altas_diarias_df = interim_df.groupby(['Fecha de alta']).size().reset_index(name = 'Altas') altas_diarias_df.set_index('Fecha de alta', inplace = True) altas_diarias_df.index.names = ['Fecha'] # Join by date, cumsum hospitalizaciones_df = altas_diarias_df.join(ingresos_diarios_df, how= 'outer') hospitalizaciones_df['Ingresos acumulados'] = hospitalizaciones_df['Ingresos'].cumsum() hospitalizaciones_df['Altas acumuladas'] = hospitalizaciones_df['Altas'].cumsum() hospitalizaciones_df['Ocupacion'] = hospitalizaciones_df['Ingresos acumulados'] - hospitalizaciones_df['Altas acumuladas'] # Export to csv hospitalizaciones_df.to_csv(os.path.join('..', 'data', 'processed', 'hospitalizaciones')) return hospitalizaciones_df def etl_calidad_aire(): """ Generación de df con datos de calidad del aire a partir de histórico en conjunto de ficheros csv. """ path = os.path.join('..', 'data', 'external', 'calidad_aire') # Cargar tabla magnitudes, unidades y técnicas de medida. mag_uni_tec_df = pd.read_csv(os.path.join(path,'magnitudes_unidades_tecnicas_medida.csv')) # Listar ficheros de datos de calidad del aire. list_of_files = [f for f in listdir(path) if isfile(join(path, f))] list_of_csv_files = [file for file in list_of_files if file.endswith('csv') and file.startswith('datos')] # Concatenar ficheros de calidad del aire en df. raw_df = pd.concat([pd.read_csv(os.path.join(path, file), delimiter = ';') for file in list_of_csv_files]) #raw_df = pd.read_csv(csv_path, delimiter = ';') raw_df = raw_df.merge(mag_uni_tec_df[['MAGNITUD ID', 'MAGNITUD ABREV']], left_on = 'MAGNITUD', right_on = 'MAGNITUD ID', how= 'left') # Pegar descripción de magnitud. # Trasponer wide to long => añomes to día id_cols = ['PROVINCIA', 'MUNICIPIO', 'ESTACION', 'MAGNITUD ABREV', 'PUNTO_MUESTREO', 'ANO', 'MES'] value_cols = ['D' + str(n).zfill(2) for n in range(1,32,1)] interim_df = pd.melt(raw_df, id_cols, value_cols) interim_df['DIA'] = interim_df['variable'].map(lambda var: var[1:]) interim_df['FECHA'] = pd.to_datetime(interim_df['ANO'].astype(str) + interim_df['MES'].astype(str).str.zfill(2) + interim_df['DIA'], format='%Y%m%d', errors = 'coerce') key = ['FECHA', 'MAGNITUD ABREV'] calidad_aire_t_df = interim_df[key + ['value']].groupby(key).mean() # Trasponer long to wide => fecha, medida 1, medida 2, ..., medida n calidad_aire_df = calidad_aire_t_df.pivot_table(index= 'FECHA', columns= 'MAGNITUD ABREV', values= 'value') return calidad_aire_df def etl_tabaquismo(): """ Generación de df con datos históricos de consumo de tabaco, a partir de agregación de datos de INCLASNS. """ # Cargar csv path = os.path.join('..', 'data', 'external', 'tabaquismo') filename = "inclasns_tabaquismo.csv" raw_df = pd.read_csv(os.path.join(path, filename)) # Generar fecha a partir de año raw_df['FECHA'] = pd.to_datetime(raw_df['ANO'].astype(str) + '01' + '01', format='%Y%m%d', errors = 'coerce') # Pivotar wide to long => generar [CCAA, value] drop_cols = ['ANO'] id_cols = ['FECHA', 'SEXO'] value_cols = list(set(raw_df.columns.values).difference(set(drop_cols).union(set(id_cols)))) # Las columnas CCAA son todas las del df menos las id de pivot y las que dropeamos interim_df = pd.melt(raw_df, id_cols, value_cols) interim_df.rename(columns = {"variable": "CCAA", "value": "PCT_tabaquismo"}, # Renombrar por legibilidad de la siguiente trasposición inplace = True) # Trasponer long to wide => generar [PCT_tabaquismo_H, PCT_Tabaquismo_M, PCT_Tabaquismo tabaquismo_df = interim_df.pivot_table(index= ['FECHA','CCAA'], columns= 'SEXO', values= 'PCT_tabaquismo') rename_cols = {col: 'PCT_tabaquismo_' + col.lower() for col in tabaquismo_df.columns.values} tabaquismo_df.rename(columns = rename_cols, inplace = True) # Dividir entre 100 para representar porcentaje. for col in tabaquismo_df.columns.values: tabaquismo_df[col] = tabaquismo_df[col] / 100 tabaquismo_df.reset_index(inplace = True) tabaquismo_df.rename_axis(None, axis=1, inplace = True) tabaquismo_df.set_index(keys = 'FECHA', inplace = True) return tabaquismo_df def etl_polen(): """ Generación de df con concentraciones de distintos tipos de polen a nivel mensual, a partir de calendario polínico de España. """ path = os.path.join('..', 'data', 'external', 'polen') filename = "calendario_polinico_espana.csv" polen_df = pd.read_csv(os.path.join(path, filename)) polen_df.set_index('Mes', inplace = True) return polen_df def etl_climatologia(): """ Generación de df con información de datos climáticos. """ # Parámetros de la request a la API anyo_ini = 2000 anyo_fin = 2021 endpoint = "https://opendata.aemet.es/opendata/api/valores/climatologicos/diarios/datos/" estacion = 3195 querystring = {'api_key': credentials.api_keys['aemet_opendata']} headers = {'cache-control': "no-cache"} # La API solo permite traerse 5 años, de modo que lanzamos peticiones secuenciales, 1 por año. response_all_years = list() # almacenaremos los datos de todos los años en esta lista for anyo in range(anyo_ini, anyo_fin + 1): url = endpoint + "fechaini/{anyo}-01-01T00%3A00%3A00UTC/fechafin/{anyo}-12-31T23%3A59%3A59UTC/estacion/{estacion}".format(anyo = anyo, estacion = estacion) while True: # La API no permite sobrepasar cierto número de peticiones por minuto; si alcanzamos el límite, reintentamos pasados 60 segundos. try: first_response = requests.request("GET", url, headers=headers, params=querystring) print(first_response.json()) second_response = requests.request("GET", first_response.json()['datos'], headers=headers, params=querystring) response_all_years.extend(second_response.json()) except: sleep(60) break interim_df = pd.json_normalize(response_all_years) # La parte decimal de las variables numéricas viene separada por una coma, es necesario convertir a float def str_to_float(x): # try: _float = float(x.replace(',','.')) except: _float = float("NaN") return _float num_cols = ['altitud','tmed', 'prec', 'tmin', 'tmax', 'dir', 'velmedia', 'racha', 'presMax', 'presMin'] interim_df[num_cols] = interim_df[num_cols].applymap(str_to_float) # Dropeamos las variables relativas a la estación meteorológica y establecemos la fecha como índice drop_cols = ['indicativo', 'nombre', 'provincia'] climatologia_df = interim_df.drop(drop_cols, axis='columns') climatologia_df.set_index('fecha', inplace = True) return climatologia_dfETL PIPELINEhospitalizaciones_df = etl_hospitalizaciones() calidad_aire_df = etl_calidad_aire() tabaquismo_df = etl_tabaquismo() polen_df = etl_polen() climatologia_df = etl_climatologia(){'descripcion': 'exito', 'estado': 200, 'datos': 'https://opendata.aemet.es/opendata/sh/8bfbad4c', 'metadatos': 'https://opendata.aemet.es/opendata/sh/b3aa9d28'} {'descripcion': 'exito', 'estado': 200, 'datos': 'https://opendata.aemet.es/opendata/sh/9f221341', 'metadatos': 'https://opendata.aemet.es/opendata/sh/b3aa9d28'} {'descripcion': 'exito', 'estado': 200, 'datos': 'https://opendata.aemet.es/opendata/sh/78b8b6a9', 'metadatos': 'https://opendata.aemet.es/opendata/sh/b3aa9d28'} {'descripcion': 'exito', 'estado': 200, 'datos': 'https://opendata.aemet.es/opendata/sh/2a5bb8c5', 'metadatos': 'https://opendata.aemet.es/opendata/sh/b3aa9d28'} {'descripcion': 'exito', 'estado': 200, 'datos': 'https://opendata.aemet.es/opendata/sh/b1baa35b', 'metadatos': 'https://opendata.aemet.es/opendata/sh/b3aa9d28'} {'descripcion': 'exito', 'estado': 200, 'datos': 'https://opendata.aemet.es/opendata/sh/52731fa2', 'metadatos': 'https://opendata.aemet.es/opendata/sh/b3aa9d28'} {'descripcion': 'exito', 'es[...]DATA PREP PIPELINEjoin_sources_df = hospitalizaciones_df.join(calidad_aire_df, how= 'left') join_sources_df = join_sources_df.join(tabaquismo_df.loc[tabaquismo_df['CCAA'] == "Comunidad de Madrid"], how= 'left') join_sources_df['Mes'] = join_sources_df.index.month join_sources_df = join_sources_df.join(polen_df, on= 'Mes', how= 'left') join_sources_df = join_sources_df.join(climatologia_df, how= 'left') join_sources_df.describe() join_sources_df.info() DatetimeIndex: 2968 entries, 2007-08-02 to 2015-12-31 Data columns (total 47 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Altas 2921 non-null float64 1 Ingresos 2967 non-null float64 2 Ingresos acumulados 2967 non-null float64 3 Altas acumuladas 2921 non-null float64 4 Ocupacion 2920 non-null float64 5 BEN 2968 non-null float64 6 CH4 2968 non-null float64 7 CO 2968 non-null float64 8 EBE 2968 non-null float64 9 NMHC 2968 non-null float64 10 NO 2968 non-null float64 11 NO2 2968 non-null float64 12 NO[...]EDAfig_dims = (20, 10) fig, ax = plt.subplots(figsize=fig_dims) plot_cols = ['Altas','Ingresos','Ocupacion'] sns.lineplot(data=hospitalizaciones_df[plot_cols], ax = ax); fig_dims = (20, 10) fig, ax = plt.subplots(figsize=fig_dims) sns.lineplot(data=calidad_aire_df, ax = ax); fig_dims = (20, 10) fig, ax = plt.subplots(figsize=fig_dims) filtro = tabaquismo_df['CCAA'] == "Comunidad de Madrid" sns.lineplot(data=tabaquismo_df.loc[filtro], ax = ax); fig_dims = (20, 10) fig, ax = plt.subplots(figsize=fig_dims) sns.lineplot(data=climatologia_df, ax = ax);Fairness in Machine Learning with PyTorch Fairness is becoming a hot topic amongst machine learning researchers and practitioners.The field is aware that their models have a large impact on society and that their predictions are not always beneficial.In a [previous blog](https://blog.godatadriven.com/fairness-in-ml), Stijn showed how adversarial networks can be used to make fairer predictions.This blog post focuses on the implementation part of things, so that you as a practitioner are able to build your own fair classifiers.Lets start with a short recap of how adversarial networks can help to battle unfairness.Instead of having only a single classifier that makes predictions $\hat{y}$ with data $X$, we introduce an adversary that tries to predict if the classifier is unfair for the sensitive attributes $Z$.The classifier has to compete with the adversary in a zero-sum game: the classifier has to make good predictions but is being penalized if the adversary detects unfair decisions.The end-result of this game is, hopefully, a fair classifier that is also good at predicting.Instead of using keras and TensorFlow like the previous blog, we show how to use PyTorch to train the fair classifier.I find PyTorch a bit nicer to try out new ideas, and switching frameworks keeps the mind sharp and the FOMO away!Don't forget to read the [previous blog](https://blog.godatadriven.com/fairness-in-ml) so that you know why we're implementing these things.In the next section, we start by loading the datasets with some PyTorch utilities.After that, we will separately define and pretrain the classifier and adversarial.These components are then combined and trained together to give a fair classifier. Data Our goal is to predict income levels based on personal attributes, such as age, education and marital status.The problem is that our standard classifier is unfair to black people and women.All other attributes being equal, women will, for instance, have lower income predictions than men - even though gender is not part of the personal attributes.Biases like this can be specific to a dataset or even reflect the real world, but we don't want them to lead to unfair predictions.We will start with our dataset from the previous blog.We have the following pandas DataFrames:- `X_train`, `X_test`: attributes used for prediction - like age and native country- `y_train`, `y_test`: target we want to predict - if someone makes more than 50K- `Z_train`, `Z_test`: sensitive attributes - race and color# HIDE # load ICU data set X, y, Z = load_ICU_data('data/adult.data') n_features = X.shape[1] n_sensitive = Z.shape[1] # split into train/test set (X_train, X_test, y_train, y_test, Z_train, Z_test) = train_test_split(X, y, Z, test_size=0.5, stratify=y, random_state=7) # standardize the data scaler = StandardScaler().fit(X_train) scale_df = lambda df, scaler: pd.DataFrame(scaler.transform(df), columns=df.columns, index=df.index) X_train = X_train.pipe(scale_df, scaler) X_test = X_test.pipe(scale_df, scaler)features X: 30940 samples, 93 attributes targets y: (30940,) samples sensitives Z: 30940 samples, 2 attributesPyTorch has some [tools](https://pytorch.org/tutorials/beginner/data_loading_tutorial.html) to make data loading and sampling easier.Here, we will use the `Dataset` and `DataLoader`.A `Dataset` represents your dataset and returns samples from it.The `DataLoader` takes a `Dataset` and helps you with shuffling and batching your samples.A `Dataset` generally takes and returns PyTorch tensors, not rows from a pandas DataFrame.Let's add some logic to the [`TensorDataset`](https://pytorch.org/docs/master/data.htmltorch.utils.data.TensorDataset) that converts DataFrames into tensors.Subclass the `TensorDataset` so we can initialize a `Dataset` with our pandas DataFrames:class PandasDataSet(TensorDataset): def __init__(self, *dataframes): tensors = (self._df_to_tensor(df) for df in dataframes) super(PandasDataSet, self).__init__(*tensors) def _df_to_tensor(self, df): if isinstance(df, pd.Series): df = df.to_frame('dummy') return torch.from_numpy(df.values).float() train_data = PandasDataSet(X_train, y_train, Z_train) test_data = PandasDataSet(X_test, y_test, Z_test)Create a `DataLoader` that returns shuffled batches of our training set:train_loader = DataLoader(train_data, batch_size=32, shuffle=True, drop_last=True) print('# training samples:', len(train_data)) print('# batches:', len(train_loader))# training samples: 15470 # batches: 483That is all the processing we need!All the data needed for training and predicting are respectively in `train_loader` and `test_data`.We get batches of data when iterating over the `train_loader`, `test_data` will be used to test our predictions. Income predictions With our datasets in place, we define and pretrain the classifier to make income predictions.This classifier will be good in predicting income level but is likely to be unfair - it is only penalized on performance and not on fairness.The PyTorch's [`nn`](https://pytorch.org/tutorials/beginner/pytorch_with_examples.htmlnn-module) module makes implementing a neural network easy.We get a fully working network class by inheriting from `nn.Module` and implementing the `.forward()` method.Our network consists of three sequential hidden layers with ReLu activation and dropout.The sigmoid layer turns these activations into a probability for the income class.class Classifier(nn.Module): def __init__(self, n_features, n_hidden=32, p_dropout=0.2): super(Classifier, self).__init__() self.network = nn.Sequential( nn.Linear(n_features, n_hidden), nn.ReLU(), nn.Dropout(p_dropout), nn.Linear(n_hidden, n_hidden), nn.ReLU(), nn.Dropout(p_dropout), nn.Linear(n_hidden, n_hidden), nn.ReLU(), nn.Dropout(p_dropout), nn.Linear(n_hidden, 1), ) def forward(self, x): return torch.sigmoid(self.network(x))Initialize the classifier, choose binary cross entropy as the loss function and let Adam optimize the weights of the classifier:clf = Classifier(n_features=n_features) clf_criterion = nn.BCELoss() clf_optimizer = optim.Adam(clf.parameters())Time to pretrain the classifier!For each epoch, we'll iterate over the batches returned by our `DataLoader`.# CHANGE def pretrain_classifier(clf, data_loader, optimizer, criterion): for x, y, _ in data_loader: clf.zero_grad() p_y = clf(x) loss = criterion(p_y, y) loss.backward() optimizer.step() return clf N_CLF_EPOCHS = 2 for epoch in range(N_CLF_EPOCHS): clf = pretrain_classifier(clf, train_loader, clf_optimizer, clf_criterion)The code above does the following for each batch:- Set the gradients relevant to our classifier to zero.- Let the classifier `clf` predict for a batch `x` to give `p_y`.- Compute the loss given the predictions and the real answer.- Backpropagate the loss with a `.backward()` to give the gradients to decrease the errors.- Let the classifier optimizer perform an optimization step with these gradients.The result should be a fairly performant though still unfair classifier.We will check the performance after defining the adversary. Detecting unfairness With the classifier pretrained, we now define and pretrain the adversary.Similar to the classifier, our adversary consists of three layers.However, the input comes from a single class (the predicted income class) and the output consists of two sensitive classes (sex and race).For our final solution, there will be a trade-off between classifier performance and fairness for our sensitive attributes.We will tweak the adversarial loss to incorporate that trade-off: the lambda parameter weighs the adversarial loss of each class.This parameter is later also used to scale the adversary performance versus the classifier performance.By telling `nn.BCELoss` not to reduce we get the losses for each individual sample and class instead of a single number.Multiplying this with our `lambdas` and taking the average, gives us the weighted adversarial loss, our proxy for unfairness.# CHANGE class Adversary(nn.Module): def __init__(self, n_sensitive, n_hidden=32): super(Adversary, self).__init__() self.network = nn.Sequential( nn.Linear(1, n_hidden), nn.ReLU(), nn.Linear(n_hidden, n_hidden), nn.ReLU(), nn.Linear(n_hidden, n_hidden), nn.ReLU(), nn.Linear(n_hidden, n_sensitive), ) def forward(self, x): return torch.sigmoid(self.network(x)) def pretrain_adversary(adv, clf, data_loader, optimizer, criterion): for x, _, z in data_loader: p_y = clf(x).detach() adv.zero_grad() p_z = adv(p_y) loss = (criterion(p_z, z) * lambdas).mean() loss.backward() optimizer.step() return adv lambdas = torch.Tensor([130, 30]) adv = Adversary(Z_train.shape[1]) adv_criterion = nn.BCELoss(reduce=False) adv_optimizer = optim.Adam(adv.parameters()) N_ADV_EPOCHS = 5 for epoch in range(N_ADV_EPOCHS): pretrain_adversary(adv, clf, train_loader, adv_optimizer, adv_criterion)Training the adversary is pretty similar to how we trained the classifier.Note that we [`.detach()`](https://pytorch.org/docs/master/autograd.htmltorch.Tensor.detach) the predictions of the classifier from the graph.This signals to PyTorch that we don't use the gradients of the classifier operations to optimize the adversary, allowing PyTorch to free up some memory. Are our results similar to those of our earlier blog using keras and TensorFlow?Pretty much!The ROC AUC, accuracy and probability distributions look very similar.# HIDE with torch.no_grad(): pre_clf_test = clf(test_data.tensors[0]) pre_adv_test = adv(pre_clf_test) y_pre_clf = pd.Series(pre_clf_test.data.numpy().ravel(), index=y_test.index) y_pre_adv = pd.DataFrame(pre_adv_test.numpy(), columns=Z.columns) fig = plot_distributions(y_test, Z_test, y_pre_clf, y_pre_adv) fig.savefig('images/torch_biased_training.png')Unfortunately, switching frameworks did not magically make the classifier fairer.We can see this from the probability p%-rule and distributions, but also from the ROC AUC score of the adversary.A score higher than 0.5 indicates that the adversary is able to detect unfairness. Training for fairness Now that we have an unfair classifier and an adversary that is able to pick up on unfairness, we can engage them in the zero-sum game to make the classifier fair.Remember that the fair classifier will be punished according to:$$\min_{\theta_{clf}}\left[Loss_{y}(\theta_{clf})-\lambda Loss_{Z}(\theta_{clf},\theta_{adv})\right].$$The first term represents how good the classifier is in predicting income, the second how good the adversary can reconstruct unfairness. The parameter $\lambda$ represents the trade-off between these terms: it weighs the punishment by the adversary versus the prediction performance.The adversary learns on the full data set and the classifier is given only the single batch, giving the adversary a slight edge in learning.The loss function for the classifier is changed to its original loss plus the weighted negative adversarial loss.# HIDE !rm -f output/torch_*.png # CHANGE def train(clf, adv, data_loader, clf_criterion, adv_criterion, clf_optimizer, adv_optimizer, lambdas): # Train adversary for x, y, z in data_loader: p_y = clf(x) adv.zero_grad() p_z = adv(p_y) loss_adv = (adv_criterion(p_z, z) * lambdas).mean() loss_adv.backward() adv_optimizer.step() # Train classifier on single batch for x, y, z in data_loader: pass p_y = clf(x) p_z = adv(p_y) clf.zero_grad() p_z = adv(p_y) loss_adv = (adv_criterion(p_z, z) * lambdas).mean() clf_loss = clf_criterion(p_y, y) - (adv_criterion(adv(p_y), z) * lambdas).mean() clf_loss.backward() clf_optimizer.step() return clf, adv N_EPOCH_COMBINED = 165 for epoch in range(1, N_EPOCH_COMBINED): clf, adv = train(clf, adv, train_loader, clf_criterion, adv_criterion, clf_optimizer, adv_optimizer, lambdas) with torch.no_grad(): clf_pred = clf(test_data.tensors[0]) adv_pred = adv(clf_pred) y_post_clf = pd.Series(clf_pred.numpy().ravel(), index=y_test.index) Z_post_adv = pd.DataFrame(adv_pred.numpy(), columns=Z_test.columns) fig = plot_distributions(y_test, Z_test, y_post_clf, Z_post_adv, epoch) display.clear_output(wait=True) plt.savefig(f'output/torch_{epoch+1:08d}.png', bbox_inches='tight') plt.show(plt.gcf()) last_img = f'output/torch_{epoch+1:08d}.png' # HIDE !convert -loop 0 -delay 0 output/torch_*.png -delay 500 {last_img} images/torch_training.gif#Código 01 def funcao_1(num1, num2): resultado = num1 * num2 if resultado <= 1000: return resultado else: return num1 + num2 numero_1 = 20 numero_2 = 30 #Código 02 def funcao_2(num): numero_anterior = 0 for i in range(num): resultado = numero_anterior + i print('Numero A ', i, "Numero B ", numero_anterior, 'Resultado: ', resultado) numero_anterior = i funcao_2(20) #Código 03 def funcao_3(str): for i in range(0, len(str) - 1, 2): print('Índice[', i, ']', str[i]) #código 04 def funcao_4(lista_numerica): print('Valor passado: ', lista_numerica) a = lista_numerica[0] b = lista_numerica[-1] if(a == b): return True else: return False numeros = [10, 20, 30, 40, 10] #Código 05 class Classe_1: def funcao_da_classe_1(self, string): dicionario = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000} valor = 0 for i in range(len(string)): if i > 0 and dicionario[string[i]] > dicionario[string[i - 1]]: valor += dicionario[string[i]] - 2 * dicionario[string[i - 1]] else: valor += dicionario[string[i]] return valor #Código 06 class A: def __init__(self): self.calcI(30) print('i da Classe A ', self.i) def calcI(self, i): self.i = 2 * 1 class B(A): def __int__(self): super().__int__() def calcI(self, i): self.i = 3 * i b = B() #Código 07 class Classe_2(): def __int__(self, l, w): self.a = l self.b = w def retodo_1(self): return self.a * self.b objeto_1 = Classe_2(12, 10)Exercices sur les D\[M|D\]L de SQL Exercice 1 - contraintes sur le club Compléter le script sql `club_a_completer.sql` qui réalise le schéma relationnel du club rappelé ci-après: ![schema_rel_club.png](attachment:ad1b8796-c379-4121-b0e8-82c8fb83c8a3.png) ```sqlCREATE TABLE membres (-- nom col type options, contrainte mbr_id serial PRIMARY KEY, prenom varchar(40) NOT NULL, nom varchar(40) NOT NULL, adresse varchar(100) NOT NULL, code_postal char(5) NOT NULL, telephone char(10) NOT NULL, recommande_par integer NOT NULL REFERENCES membres (mbr_id), date_entree date NOT NULL);CREATE TABLE activites ( act_id serial PRIMARY KEY, nom varchar(30) NOT NULL, prix_membre numeric NOT NULL, prix_invite numeric NOT NULL, cout_initial numeric NOT NULL, cout_maintenance_mensuel numeric NOT NULL);-- une erreur...CREATE TABLE reservations ( reserv_id serial PRIMARY KEY, act_id integer NOT NULL REFERENCES activites (act_id), mbr_id integer NOT NULL REFERENCES membres (mbr_id), heure_debut timestamp NOT NULL, nb_demi_heure integer NOT NULL, CHECK (nb_demi_heure >= 1 AND nb_demi_heure <= 6));```*Note*: `serial` est un entier particulier qui est incrémenté automatiquement lors d'une insertion. Exercice 2 - vocabulaire Regrouper les termes *synonymes* parmi: > colonne, entité, champ, domaine, attribut, ligne, champ, schéma, descripteur, base de données, objet, type, *column*, *row*. - colonne, champ, attribut, descripteur, *column*;- entité, ligne, objet, *row*;- domaine, type;- base de données, schéma; Exercice 3 - annuaire sql On rappelle brièvement le résultat de l'exercice «annuaire»:```annuaire(nom , prenom, !num_tel)```Donner un ordre SQL permettant de créer cette table avec un maximum de contrainte d'intégrité. ```sqlCREATE TABLE annuaire ( nom varchar(50) NOT NULL, prenom varchar(50) NOT NULL, num_tel char(10) PRIMARY KEY, UNIQUE (prenom, nom) --discutable, et si une personne a plusieurs téléphones...); ``` Exercice 4 - bulletin sql On rappelle brièvement le résultat de l'exercice «bulletin»:```notes(![etudiant, matiere], note) eleves(nom, prenom, !id)matieres(intitule, !id)``` 1. Donner les ordres SQL permettant de créer ces tables avec un maximum de contrainte d'intégrité. ```sqlCREATE TABLE eleves ( nom varchar(50) NOT NULL, prenom varchar(50) NOT NULL, id integer PRIMARY KEY);CREATE TABLE matieres ( intitule varchar(20) NOT NULL, id integer PRIMARY KEY);CREATE TABLE notes ( etudiant integer REFERENCES eleves (id), matiere integer REFERENCES matieres (id), note decimal(4,2), PRIMARY KEY (etudiant, matiere), CHECK (note >= 0 and note <= 20));``` 2. Donner les ordres SQL permettant de supprimer ces tables une fois qu'elles existent. ```sqlDROP TABLE notes; -- doit être le premier car les autres tables font référence à celle-ciDROP TABLE matieres;DROP TABLE notes;``` Exercice 5 - Trouver l'erreur Pour chacun des exemples donnés ci-dessous, dire quelle instruction provoque une erreur en supposant que la base de données ne contient aucune table avant leur exécution. 1. *first* ```sql DROP TABLE client; CREATE TABLE client (cid INT PRIMARY KEY, nom VARCHAR(100), prenom VARCHAR(100), points_fidelite INT NOT NULL, CHECK (points_fidelite >= 0)); ``` Bien que mal présenté, ce code SQL est «licite». Le seul problème est que `DROP` produit une erreur si la table client n'existe pas déjà. On devrait plutôt écrire:```sqlDROP TABLE IF EXISTS client;``` 2. *second* ```sql CREATE TABLE client (cid INT PRIMARY KEY, nom VARCHAR (100), prenom VARCHAR(100), points_fidelite INT NOT NULL, CHECK (points_fidelite >= 0)); CREATE TABLE commande( cid INT REFERENCES client(cid), pid INT REFERENCES produit(pid), date DATE NOT NULL ); CREATE TABLE produit (pid INT PRIMARY KEY, nom VARCHAR(100), prix decimal(10,2)); ``` Il y a simplement un **problème d'ordre**: la table produit devrait être créée avant la table commande puisque cette dernière y fait référence. 3. *third* ```sql CREATE TABLE client ( cid INT PRIMARY KEY, nom VARCHAR(100), prenom VARCHAR(100), points_fidelite INT NOT NULL, CHECK (points_fidelite >= 0) ); CREATE TABLE produit ( pid INT PRIMARY KEY, nom VARCHAR(100), prix NUMERIC(10,2) ); CREATE TABLE commande( cid INT REFERENCES client(cid), nomp VARCHAR(100) REFERENCES produit(nom), date DATE NOT NULL ); ``` `nom` de **client** n'a aucune raison d'être *unique* (pas de contrainte UNIQUE) et donc une **commande** pourrait faire référence - `nomp` - à plusieurs lignes de la table **client**... ce n'est pas permis! 4. *fourth* ```sql CREATE TABLE client ( cid INT PRIMARY KEY, nom VARCHAR(100), prenom VARCHAR(100), points_fidelite INT NOT NULL, CHECK (points_fidelite >= 0) ); CREATE TABLE produit ( pid INT PRIMARY KEY, nom VARCHAR(100), prix DECIMAL(10,2) ); CREATE TABLE commande( cid INT REFERENCES client(cid), pid INT REFERENCES produit(pid), date DATE NOT NULL ); INSERT INTO commande VALUES (0, 0, '2020-03-02); ``` À ce stade, les tables **client** et **produit** sont vides. Il est impossible d'insérer quoi que ce soit dans **commande** puisque les contraintes de clés étrangères seront violées. Exercice 6 - petits joueurs On considère les deux tables suivantes:```sqlCREATE TABLE joueurs (jid INT PRIMARY KEY, nom VARCHAR(100) NOT NULL);CREATE TABLE parties ( j1 INT REFERENCES joueur(jid), j2 INT REFERENCES joueur(jid), score1 INT NOT NULL, score2 INT NOT NULL, CHECK (j1 j2));``` 1. Ces tables stockent des résultats de parties entre des joueurs. Lister toutes les contraintes d'intégrité et pour chacune donner des ordres SQL violant ces contraintes. Si on omet les types:- contrainte de clé primaire: sur jid de joueurs,- contraintes de clé étrangères: chaque partie fait référence à deux joueurs,- non nullité: les champs nom, score1 et score2 doivent être précisés,- contrainte de vérification: une partie ne peut opposé un joueur à lui-même. 2. Modifier les ordres de création de table pour prendre en compte les modifications suivantes: - La table **parties** contient en plus une colonne `jour` non nulle, indiquant la date à laquelle la partie à eu lieu. - Les scores ne peuvent pas être négatifs; - Deux joueurs ne peuvent pas jouer plusieurs fois le même jour. ```sqlCREATE TABLE parties ( j1 INT REFERENCES joueur(jid), j2 INT REFERENCES joueur(jid), score1 INT NOT NULL, score2 INT NOT NULL, jour DATE NOT NULL, CHECK (j1 j2), CHECK (score1 >= 0 and score2 >= 0), UNIQUE (j1, j2, jour));``` Notez qu'en échangeant l'ordre des joueurs, il est possible que deux joueurs s'affrontent deux fois le même jour... (on pourrait exiger `CHECK (j1 < j2)` pour parrer à cela). Exercice 7 - Python (CSV vers SQL) Écrire un programme Python qui lit le fichier CSV *infos.csv* au format suivant:- les champs sont séparés par des `;`,- le fichier contient 4 colonnes **nom, prenom, annee_naissance, taille** représentant le nom, prénom, l'année de naissance et la taille (en cm) de personnes.Votre programme doit écrire dans un fichier *infos.sql* l'ensemble des ordres permettant de:- créer une table adéquate et contenant en plus un identifiant unique (entier) servant de clé primaire et- remplir la table avec les données du fichier CSV.import os csv_path = os.path.realpath('infos.csv') sql_path = os.path.realpath('infos.sql') table_sql = """CREATE TABLE infos ( id serial PRIMARY KEY, nom varchar(100) NOT NULL, prenom varchar(100) NOT NULL, annee_naissance DATE NOT NULL, taille INT NOT NULL ); """ # un gabarit pour l'insertion -> cf. str.format() insert_tpl = "INSERT INTO infos (nom, prenom, annee_naissance, taille) VALUES ({valeurs});\n" # collecte des données with open(csv_path, "r") as csv: donnees = [] for ligne in csv: valeurs = ligne.split(";") # l = ", ".join(f"'{valeur.strip()}'" for valeur in valeurs[:-1]) + f", {valeurs[-1].strip()}" donnees.append(l) with open(sql_path, "w") as sql: sql.write(table_sql) for d in donnees: sql.write(insert_tpl.format(valeurs=d))Importsimport os import tempfile import datetime import numpy as np import pandas as pd import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers import sklearn as skl import joblib import json import pickle import tempfile from sklearn.preprocessing import LabelEncoder, StandardScaler from sklearn.compose import ColumnTransformer, make_column_transformer from collections import defaultdict import joblib from data_functions import ingest_data, prepare_data %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns SEED = 4321 np.random.seed(SEED) tf.random.set_seed(SEED) # Check GPU physical_devices = tf.config.list_physical_devices('GPU') print('TF version:',tf.__version__ , tf.config.experimental.list_physical_devices(device_type='GPU') ) #memory control tf.config.experimental.set_memory_growth(physical_devices[0], True) #Azure Imports import azureml.core from azureml.core import Workspace from azureml.core.model import Model from azureml.core import Experiment from azureml.core.webservice import Webservice from azureml.core.image import ContainerImage from azureml.core.webservice import AciWebservice from azureml.core.conda_dependencies import CondaDependencies from azureml.core.resource_configuration import ResourceConfiguration from azureml.core.model import InferenceConfig #from azureml.core.webservice import AksWebservice from azureml.core.environment import Environment, DEFAULT_CPU_IMAGE #, DEFAULT_GPU_IMAGE from azureml.core.compute import ComputeTarget #, AksCompute from azureml.core.compute_target import ComputeTargetException #from azureml.core.datastore import Datastore from msrest.exceptions import HttpOperationError from azureml.core.webservice import LocalWebservice print('Azure version:', azureml.core.VERSION)Azure version: 1.19.0Connect to Azurews_name ='Prototype' sub_id ='xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx' #your subscription ID res_grp ='Deploy' region ='South Central US' subscription_id = os.getenv("SUBSCRIPTION_ID", default= sub_id) resource_group = os.getenv("RESOURCE_GROUP", default= res_grp) workspace_name = os.getenv("WORKSPACE_NAME", default= ws_name) workspace_region = os.getenv("WORKSPACE_REGION", default= region) #connect to workspace or create a new workspace try: ws = Workspace.from_config() print("Workspace succesfully loaded from config") except: print("CANNOT create workspace from config file, creating new workspace") ws = Workspace.create(name = ws_name, subscription_id = sub_id, resource_group=res_grp, create_resource_group=True, location= region) ws.write_config() # print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep='\n') # prints details of workspace #displays compute targets, optional for compute_name in ws.compute_targets: compute = ws.compute_targets[compute_name] print(compute.name, ":", compute.type)Import Datauser_df = pd.read_csv('data/user_df.csv') user_df.head(2) live_df = pd.read_csv('data/live_df.csv') live_df.head(2) test_df = pd.read_csv('data/prediction_df.csv') test_df.head(2) recommender_df = pd.read_csv('data/recommender_df.csv') recommender_df.head(2)TF Modeldef create_model(cat_features, num_features, data): ''' Create tf model. input: df and list of categorical features output: tf model ''' models= [] inputs = [] for cat in cat_features: vocab_size = (data[cat].nunique()) + 1 #//2 inpt = tf.keras.layers.Input(shape=(1,),name='input_'+'_'.join(cat.split(' '))) inputs.append(inpt) embed = tf.keras.layers.Embedding(vocab_size,200, trainable=True,embeddings_initializer=tf.random_normal_initializer())(inpt) embed_rehsaped =tf.keras.layers.Reshape(target_shape=(200,))(embed) models.append(embed_rehsaped) num_input = tf.keras.layers.Input(shape=(len(num_features)), name='input_num_features') inputs.append(num_input) models.append(num_input) merge_models= tf.keras.layers.concatenate(models) pre_preds = tf.keras.layers.Dense(1000)(merge_models) pre_preds = tf.keras.layers.BatchNormalization()(pre_preds) pre_preds = tf.keras.layers.Dense(1000)(pre_preds) pre_preds = tf.keras.layers.BatchNormalization()(pre_preds) pred = tf.keras.layers.Dense(1,activation='sigmoid')(pre_preds) model_full = tf.keras.models.Model(inputs= inputs,\ outputs =pred) model_full.compile(loss=tf.keras.losses.binary_crossentropy,\ metrics=['acc'], #accuracy optimizer='adam') return model_full cat_features = ['user_id', 'cat1', 'cat2', 'cat3'] num_features = ['numeric1'] train,labels = ingest_data(user_df) label_dict, scaler_dict = prepare_data(train, cat_features, num_features) train.head()Create TF Modeltf_model = create_model(cat_features, num_features, train) #model.summary() tf.keras.utils.plot_model(tf_model, 'model.png', show_shapes = True)Callbacks#Learning Rate scheduler class LearningRateReducerCb(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs={}): old_lr = self.model.optimizer.lr.read_value() new_lr = old_lr * 1.02 print("\nEpoch: {}. Increasing Learning Rate from {} to {}".format(epoch, old_lr, new_lr)) self.model.optimizer.lr.assign(new_lr) class SaveBestModel(tf.keras.callbacks.Callback): def __init__(self, save_best_metric='val_loss', this_max=False): self.save_best_metric = save_best_metric self.max = this_max if this_max: self.best = float('-inf') else: self.best = float('inf') def on_epoch_end(self, epoch, logs=None): metric_value = logs[self.save_best_metric] if self.max: if metric_value > self.best: self.best = metric_value self.best_model = self.model else: if metric_value < self.best: self.best = metric_value self.best_model = self.model from tensorflow.keras.callbacks import * from tensorflow.keras import backend as K import numpy as np class CyclicLR(Callback): """ from https://github.com/bckenstler/CLR Copyright (c) 2017 Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # About This callback implements a cyclical learning rate policy (CLR). The method cycles the learning rate between two boundaries with some constant frequency, as detailed in this paper (https://arxiv.org/abs/1506.01186). The amplitude of the cycle can be scaled on a per-iteration or per-cycle basis. This class has three built-in policies, as put forth in the paper. "triangular": A basic triangular cycle w/ no amplitude scaling. "triangular2": A basic triangular cycle that scales initial amplitude by half each cycle. "exp_range": A cycle that scales initial amplitude by gamma**(cycle iterations) at each cycle iteration. For more detail, please see paper. # Example ```python clr = CyclicLR(base_lr=0.001, max_lr=0.006, step_size=2000., mode='triangular') model.fit(X_train, Y_train, callbacks=[clr]) ``` Class also supports custom scaling functions: ```python clr_fn = lambda x: 0.5*(1+np.sin(x*np.pi/2.)) clr = CyclicLR(base_lr=0.001, max_lr=0.006, step_size=2000., scale_fn=clr_fn, scale_mode='cycle') model.fit(X_train, Y_train, callbacks=[clr]) ``` # Arguments base_lr: initial learning rate which is the lower boundary in the cycle. max_lr: upper boundary in the cycle. Functionally, it defines the cycle amplitude (max_lr - base_lr). The lr at any cycle is the sum of base_lr and some scaling of the amplitude; therefore max_lr may not actually be reached depending on scaling function. step_size: number of training iterations per half cycle. Authors suggest setting step_size 2-8 x training iterations in epoch. mode: one of {triangular, triangular2, exp_range}. Default 'triangular'. Values correspond to policies detailed above. If scale_fn is not None, this argument is ignored. gamma: constant in 'exp_range' scaling function: gamma**(cycle iterations) scale_fn: Custom scaling policy defined by a single argument lambda function, where 0 <= scale_fn(x) <= 1 for all x >= 0. mode paramater is ignored scale_mode: {'cycle', 'iterations'}. Defines whether scale_fn is evaluated on cycle number or cycle iterations (training iterations since start of cycle). Default is 'cycle'. """ def __init__(self, base_lr=0.001, max_lr=0.006, step_size=2000., mode='triangular', gamma=1., scale_fn=None, scale_mode='cycle'): super(CyclicLR, self).__init__() self.base_lr = base_lr self.max_lr = max_lr self.step_size = step_size self.mode = mode self.gamma = gamma if scale_fn == None: if self.mode == 'triangular': self.scale_fn = lambda x: 1. self.scale_mode = 'cycle' elif self.mode == 'triangular2': self.scale_fn = lambda x: 1/(2.**(x-1)) self.scale_mode = 'cycle' elif self.mode == 'exp_range': self.scale_fn = lambda x: gamma**(x) self.scale_mode = 'iterations' else: self.scale_fn = scale_fn self.scale_mode = scale_mode self.clr_iterations = 0. self.trn_iterations = 0. self.history = {} self._reset() def _reset(self, new_base_lr=None, new_max_lr=None, new_step_size=None): """Resets cycle iterations. Optional boundary/step size adjustment. """ if new_base_lr != None: self.base_lr = new_base_lr if new_max_lr != None: self.max_lr = new_max_lr if new_step_size != None: self.step_size = new_step_size self.clr_iterations = 0. def clr(self): cycle = np.floor(1+self.clr_iterations/(2*self.step_size)) x = np.abs(self.clr_iterations/self.step_size - 2*cycle + 1) if self.scale_mode == 'cycle': return self.base_lr + (self.max_lr-self.base_lr)*np.maximum(0, (1-x))*self.scale_fn(cycle) else: return self.base_lr + (self.max_lr-self.base_lr)*np.maximum(0, (1-x))*self.scale_fn(self.clr_iterations) def on_train_begin(self, logs={}): logs = logs or {} if self.clr_iterations == 0: K.set_value(self.model.optimizer.lr, self.base_lr) else: K.set_value(self.model.optimizer.lr, self.clr()) def on_batch_end(self, epoch, logs=None): logs = logs or {} self.trn_iterations += 1 self.clr_iterations += 1 self.history.setdefault('lr', []).append(K.get_value(self.model.optimizer.lr)) self.history.setdefault('iterations', []).append(self.trn_iterations) for k, v in logs.items(): self.history.setdefault(k, []).append(v) K.set_value(self.model.optimizer.lr, self.clr())Fit TF model#dict for training input_dict= { 'input_user_id':train[cat_features[0]], 'input_cat1':train[cat_features[1]], 'input_cat2':train[cat_features[2]], 'input_cat3':train[cat_features[3]], 'input_num_features':train[num_features[0]] } # Training Parameters epochs = 5 batch_size = 1000 val_split = 0.3 save_best_model = SaveBestModel() clr_triangular = CyclicLR ( base_lr=0.001, max_lr=0.01, step_size=2.,mode='triangular') #fit history = tf_model.fit(input_dict, labels*1, validation_split= val_split, epochs= epochs, batch_size= batch_size, callbacks=[clr_triangular, save_best_model], # LearningRateReducerCb() #shuffle = True ) #plot loss history def plot_loss(history): fig = plt.figure(figsize=(16,6)) plt.plot(history.history['loss'], label='loss') plt.plot(history.history['val_loss'], label='val_loss') plt.ylim([0.1, 2]) plt.xlabel('Epoch') plt.ylabel('Error [Score]') plt.legend() plt.grid(True) plt.show() plot_loss(history) #Save Model H5 #tf_model = save_best_model.best_model #saves best model tf.keras.models.save_model( tf_model, 'outputs/tf_model.h5', overwrite=True, include_optimizer=True, save_format='h5', signatures=None, options=None ) print('Model Saved') #Clear TF Session tf.keras.backend.clear_session()Register Models# #Register TF Model in Azure # TFmodel = Model.register( workspace=ws, # model_name='tf_model', # Name of the registered model in your workspace. # model_path='outputs/tf_model.h5', # Local file to upload and register as a model. # description='TensorFlow model on synthetic data', # tags={'area': 'prototype', 'type': 'TensorFlow'}) # print('Name:', TFmodel.name) # print('Version:', TFmodel.version)Load Models from Azure# Load TF model TFmodel = Model(ws, name='tf_model') print('Name:', TFmodel.name) print('Version:', TFmodel.version)Name: tf_model Version: 1Score.py%%writefile score.py import joblib import sklearn import numpy as np import os import json import pandas as pd import tensorflow as tf from azureml.core.model import Model from sklearn.preprocessing import LabelEncoder import pandas_validator as pv from sklearn.metrics.pairwise import cosine_similarity from sklearn.compose import ColumnTransformer, make_column_transformer from sklearn.preprocessing import LabelEncoder, StandardScaler class InputValidator(pv.DataFrameValidator): ''' Class to test input data validity ''' row_num = 120 column_num = 6 userID = pv.IntegerColumnValidator('user_id', min_value=0) cat1 = pv.IntegerColumnValidator('cat1', min_value=0, max_value=120) cat2 = pv.IntegerColumnValidator('cat2', min_value=0, max_value=3) cat3 = pv.IntegerColumnValidator('cat3', min_value=0, max_value=40) numeric1 = pv.FloatColumnValidator('numeric1', min_value=0, max_value=10) target = pv.FloatColumnValidator('target', min_value=0, max_value=1) class TestValidator(pv.DataFrameValidator): ''' Class to test test data validity ''' row_num = 200 column_num = 5 userID = pv.IntegerColumnValidator('user_id', min_value=0) cat1 = pv.IntegerColumnValidator('cat1', min_value=0, max_value=120) cat2 = pv.IntegerColumnValidator('cat2', min_value=0, max_value=3) cat3 = pv.IntegerColumnValidator('cat3', min_value=0, max_value=40) numeric1 = pv.FloatColumnValidator('numeric1', min_value=0, max_value=10) numeric1 = pv.FloatColumnValidator('numeric1', min_value=0, max_value=10) def prepare_data(data_df, is_train, label_dict, scaler_dict): ''' Process input data from azure ''' data_df.fillna(0) #encode features cat_features = ['user_id', 'cat1', 'cat2', 'cat3'] num_features = ['numeric1'] #label encoding data_df.replace(label_dict) #scaling for col in num_features: (data_df[col] - scaler_dict[col][0]) / scaler_dict[col][1] #dict for tf df_dict= { 'input_user_id':data_df[cat_features[0]], 'input_cat1':data_df[cat_features[1]], 'input_cat2':data_df[cat_features[2]], 'input_cat3':data_df[cat_features[3]], 'input_num_features':data_df[num_features[0]] } # if true return labels if is_train: # convert target to cat data_df = ewma_to_cat(data_df) labels = data_df['target'] return df_dict, labels else: return df_dict def ewma_to_cat(df): ''' Convert target to discrete based on IQR returns: modified df ''' q1, q2, q3 = np.percentile(df['target'], [25,50,75]) df.loc[df['target'] > q2, 'target'] = 1 df.loc[df['target'] <= q2, 'target'] = 0 #user_df.loc[(user_df["ewma"] >= q1) & (user_df["ewma"] <= q3), 'ewma'] = 1 #group Q2 and Q3 to make trinary df['target'] = df['target'].astype(int) return df def cos_sim(user, user_df): ''' Cosine similarity for users ''' n = 10 user_row = user_df.loc[user_df['user_id'] == user] user_row = user_row.drop(['user_id'], axis = 1) result_df = pd.DataFrame(user_df['user_id']) no_id_df = user_df.drop(['user_id'], axis = 1) cos_sim = cosine_similarity(no_id_df, user_row ) result_df['cos_sim'] = cos_sim top_n = result_df[result_df['user_id'] != user].nlargest(10, 'cos_sim')['user_id'] return top_n.values def init(): ''' Initialize model and globals ''' global question_df global TFmodel #Load TF model tf_model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'tf_model.h5') TFmodel = tf.keras.models.load_model(tf_model_path) def run(data): #, test ''' Run script ''' try: #Input Data input_data = pd.DataFrame(json.loads(data)['data']) test_data = pd.DataFrame(json.loads(data)['test']) rec_data = pd.DataFrame(json.loads(data)['rec']) label_dict = json.loads(data, object_hook=lambda d: {int(k) if k.lstrip('-').isdigit() else k: v for k, v in d.items()})['label_dict'] scaler_dict = json.loads(data)['scaler_dict'] #Top ten similar users user = input_data['user_id'][0] top_ten = cos_sim(user, rec_data) except AssertionError as error: return error #Validate Data input_validator = InputValidator() test_validator = TestValidator() #user data try: assert(input_validator.is_valid(input_data)) except: return ('Assertion error, invalid User data') #question data try: assert(test_validator.is_valid(test_data)) except: return ('Assertion error, invalid Question data') #process input and convert dict and labels input_dict, labels = prepare_data(input_data, True, label_dict, scaler_dict) test_dict = prepare_data(test_data, False, label_dict, scaler_dict) #append TF model with input data TFmodel.fit(input_dict, labels, epochs= 5) result = TFmodel.predict(test_dict) return result.tolist(), top_ten.tolist()Overwriting score.pyDeploy''' Curated Environments List ''' # envs = Environment.list(workspace=ws) # for env in envs: # if env.startswith("AzureML"): # print("Name",env) # print("packages", envs[env].python.conda_dependencies.serialize_to_string()) #Create Custom Environment with pip env = Environment('my_env') env.python.conda_dependencies = CondaDependencies.create(pip_packages=[ 'azureml-defaults', 'inference-schema[numpy-support]', 'joblib', 'numpy', 'scikit-learn=={}'.format(skl.__version__), 'tensorflow =={}'.format(tf.__version__ ), 'h5py==2.10.0', 'pandas_validator' ]) #Deploy Model, run this I removed the output for privacy service_name = 'simple-local' mods = [TFmodel] inference_config = InferenceConfig(entry_script='score.py', environment=env) # Deploy options #aci_config = AciWebservice.deploy_configuration(cpu_cores=2, memory_gb=1) local_config = LocalWebservice.deploy_configuration(port=8890) #Deploy Local, requires Docker running #aks_config = AksWebservice.deploy_configuration(autoscale_enabled=True) #cpu_cores = 4, memory_gb = 8 #aks_config2 = AksWebservice.deploy_configuration(cpu_cores = 1, memory_gb = 1) #alternate aks service = Model.deploy(workspace=ws, name=service_name, models= mods, inference_config=inference_config, deployment_config= local_config, #deployment_target= aks_cluster, # For K8S, from initial creation overwrite=True) service.wait_for_deployment(show_output=False) #Get Error Logs if service Fails print(service.get_logs()) #Get URL for local print(service.scoring_uri) #%%timeit #Get Results input_payload = json.dumps({ 'data': live_df.to_dict(), 'test': test_df.to_dict(), 'rec' : recommender_df.to_dict(), 'label_dict' : label_dict, 'scaler_dict' : scaler_dict #'data': test_df.to_dict(), #test input validation #'method': 'predict' # 'predict_proba' else 'predict'. }) output = service.run(input_payload) print(output)[[[0.102875255048275], [0.6256417036056519], [0.8166980147361755], [0.06229126453399658], [0.07255028933286667], [0.8700805306434631], [0.05808218941092491], [0.27427226305007935], [0.24265877902507782], [0.1904037445783615], [0.8177449703216553], [0.4425472021102905], [0.3032751679420471], [0.6422600746154785], [0.8337113261222839], [0.644760251045227], [0.9527812600135803], [0.623135507106781], [0.2013721466064453], [0.1313440352678299], [0.5220851302146912], [0.2463076263666153], [0.24713481962680817], [0.7322932481765747], [0.24610702693462372], [0.10759633779525757], [0.4224775731563568], [0.8915165066719055], [0.8992987871170044], [0.20888544619083405], [0.06251001358032227], [0.1293996274471283], [0.17920616269111633], [0.2851390838623047], [0.1732848882675171], [0.10876649618148804], [0.9177520275115967], [0.6685217022895813], [0.5593864917755127], [0.7411669492721558], [0.9748287200927734], [0.18991979956626892], [0.709888756275177], [0.15333294868469238], [0.5570212602615356][...]Delete Workspace#Delete Workspace, cleanup when finished #ws.delete()gbr2.fit(X_train2, y_train2)gbr.fit(X_train, y_train)gbr2.score(X_train2, y_train2)gbr.score(X_train, y_train) import pickle with open("models/2020-Jul-26-19-06-47.pkl", "rb") as data: model = pickle.load(data) model.n_features_Novo método de feat engfeat_eng = Enginerring(data) features = feat_eng.planning(params).\ convert_missing_to_class().\ # missing_class convert_missing_to_number().\ # missing_number_to_inf # imputation convert_binary_to_class().\ # binary_dummies # one-hot encoding convert_number_to_binary().\ # continuous_to_binary # binning combine_classes_to_binary().\ # unify_classes # binning order_class_levels().\ # factor_to_number # keep_original_feature().\ # identity transform_number().\ # scale_adjust create_missing_dummy().\ # insert_dummy_faltante create_custom_classes() # dummy_controlada import os os.chdir("..") import pandas as pd from calysto.engineering import Blueprint bpr = Blueprint(pd.DataFrame()) bpr.create_plan() bpr.impute_missing_as_category(["asdad"]).\ impute_missing_as_inf(["asdad"]).\ impute_missing_as_category(["asdad"]).\ impute_missing_as_number(["asd", "asdasd"], [4]) bpr.plan a = {} bpr. ok convert_missing_to_class().\ # missing_class # imputation # impute_categorical ok convert_missing_to_number().\ # missing_number_to_inf # imputation # impute_ ok convert_binary_to_class().\ # binary_dummies # one-hot encoding ok convert_number_to_binary().\ # continuous_to_binary # binning ok combine_classes_to_binary().\ # unify_classes # binning ok order_class_levels().\ # factor_to_number # ordering ok keep_original_feature().\ # identity # NA ok transform_number().\ # scale_adjust # scaling ok create_missing_dummy().\ # insert_dummy_faltante # binning ok create_custom_classes() # dummy_controlada # binning bpr.execute()Batch Bayesian Optimization with Kriging BeliverThis notebook details the procedure for performing the batched Bayesian optimization using the [Kriging Beliver](http://www.cs.ubc.ca/labs/beta/EARG/stack/2010_CI_Ginsbourger-ParallelKriging.pdf) algorithm. In short, the Kriging Beliver algorithm selects $q$ query points by (1) selecting the best cannidiate according to the acquisition function, (2) appending the predicted mean and variance of the selected point to the training data, (3) refitting the GPR given the augmented data set, and (4) repeating steps 1-3 until $q$ points have been selected for querying. Importsimport numpy as np import matplotlib.pyplot as plt from active_learning.BO import BayesianOptimizer from sklearn.gaussian_process.kernels import ConstantKernel, RBFHelper functionsdef f(x): """The function to predict.""" return -2 * (6*x - 2)**2 * np.sin(12*x - 4) def plot_results(X, y, dy, x, y_pred, sigma, new_p): plt.figure(figsize=(10,5)) plt.plot(x, f(x), 'r:', label=r'$f(x) = x\,\sin(3x)$') plt.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label='Observations') plt.plot(x, y_pred, 'b-', label='Prediction') plt.fill(np.concatenate([x, x[::-1]]), np.concatenate([y_pred - 1.9600 * sigma, (y_pred + 1.9600 * sigma)[::-1]]), alpha=.5, fc='b', ec='None', label='95% confidence interval') plt.scatter(new_p, f(new_p), color='k', s=50, label='Query points') plt.xlabel('$x$') plt.ylabel('$f(x)$') plt.ylim(-40, 40) plt.legend(loc='upper left')Batched Bayesian Optimization Seed model N = 10 noisy observationsN = 4 # Number of points to sample X = np.random.sample(N) X = np.atleast_2d(X).T x = np.atleast_2d(np.linspace(0, 1, 1000)).T # Observations and noise y = f(X).ravel() dy = 0.5 + 1.0 * np.random.random(y.shape) noise = np.random.normal(0, dy) y += noiseInstatiate the BayesianOptimizer objectkernel = ConstantKernel() * RBF() BO = BayesianOptimizer(kernel=kernel)Fit the model given the observations. By default a vanilla RBF kernel is used. By default the model is fit internal through expecation maximization (i.e. maximizing the log marginal liklihood w.r.t the kernel hyperparamters).BO.fit(X, y, std=dy**2)Once the model is fit, we can predict on a pool of possible points `x`.y_pred, sigma = BO.predict(x, return_std=True)We can now call `BO.query` to query our current model state and select `q = 5` points for sampling. There points are selected using the Kriging Beliver algorithm by default. Set the `epsilon` value in the EI acquisition to `0.1` to help better encourge exploration.new_p = BO.query(x, q = 3, epsilon=0.1) new_pWe can now plot the results. The dotted red line is the ground truth function we're aiming to fit, the solid blue line is the GPR preduction with assocaited 95% confidence interval, the red points are the observations used to fit the GPR, and the black points are the points selected to be querred next. Iteration 1X = BO.X y = BO.y dy = BO.sigma plot_results(X, y, dy, x, y_pred, sigma, new_p)Notice how a diversity of points throughout the space are selected as a result of this batched selection.noise_new = 0.5 + 1.0 * np.random.random(new_p.shape) BO.teach(new_p, (f(new_p) + noise_new).flatten(), noise_new.flatten()**2) y_pred, sigma = BO.predict(x) new_p = BO.query(x, q = 3, epsilon=0.1)Iteration 2X = BO.X y = BO.y dy = BO.sigma plot_results(X, y, dy, x, y_pred, sigma, new_p) noise_new = 0.5 + 1.0 * np.random.random(new_p.shape) BO.teach(new_p, (f(new_p) + noise_new).flatten(), noise_new.flatten()**2) y_pred, sigma = BO.predict(x) new_p = BO.query(x, q = 3, epsilon=0.1)Iteration 3X = BO.X y = BO.y dy = BO.sigma plot_results(X, y, dy, x, y_pred, sigma, new_p) noise_new = 0.5 + 1.0 * np.random.random(new_p.shape) BO.teach(new_p, (f(new_p) + noise_new).flatten(), noise_new.flatten()**2) y_pred, sigma = BO.predict(x) new_p = BO.query(x, q = 3, epsilon=0.1)Iteration 4X = BO.X y = BO.y dy = BO.sigma plot_results(X, y, dy, x, y_pred, sigma, new_p) noise_new = 0.5 + 1.0 * np.random.random(new_p.shape) BO.teach(new_p, (f(new_p) + noise_new).flatten(), noise_new.flatten()**2) y_pred, sigma = BO.predict(x) new_p = BO.query(x, q = 3, epsilon=0.1)Iteration 5X = BO.X y = BO.y dy = BO.sigma plot_results(X, y, dy, x, y_pred, sigma, new_p) noise_new = 0.5 + 1.0 * np.random.random(new_p.shape) BO.teach(new_p, (f(new_p) + noise_new).flatten(), noise_new.flatten()**2) y_pred, sigma = BO.predict(x) new_p = BO.query(x, q = 3, epsilon=0.1)Iteration 6X = BO.X y = BO.y dy = BO.sigma plot_results(X, y, dy, x, y_pred, sigma, new_p)Checking `src/data`annotation = load_annotation.get_annotation() complete_ids = load_annotation.get_complete_ids(category='Urination') water_distance = load_water_distance.get_water_distance_raw( user_id=1861) water_distance = load_water_distance.get_water_distance_clean( user_id=1861) radar_raw = load_radar.get_radar_raw(1861) radar_sum = load_radar.get_radar_sum_clean(1861) seat_raw = load_weight_sensor.get_seat_weight_raw(1861) foot_raw = load_weight_sensor.get_foot_weight_raw(1861) seat_clean, foot_clean = load_weight_sensor.get_seat_and_foot_weight_clean(1861) total_clean = load_weight_sensor.get_total_weight_clean(1861)Checking `src/make_dataset.py`from src.make_dataset import RandomForestDataset, RandomForestExtended dataset_config = { 'USER_IDS': [1831, 1863], 'SOURCE_NAMES': ['TotalWeight', 'WaterDistance', 'RadarSum', 'AudioDelay4'], 'FEATURE_NAMES': ['Max', 'Min', 'Mean', 'Median', 'LogVariance', 'LinearTrend'], 'CATEGORY': 'Urination', 'WINDOW_SECONDS': 3, 'HOP_SECONDS': 1 } dataset = RandomForestExtended(dataset_config) features, labels = dataset.get_features_and_labels_from_users() features.shape, labels.shapeQ2.8 Loop DetectionGiven a circular linked list, return the node at the beginning of the loope.g. - A -> B -> C -> D -> E -> C- return CInput: Linked List headReturn: Node nodefrom myLinkedLists import SinglyLinkedList, Node """ Solution1: Assume loop exist. Traverse the list, store visited node in a hashset, if visited, return node space O(n) time O(n) """ def getLoopHead(head): visited = set() curr = head while curr not in visited: visited.add(curr) curr = curr.next return curr """ Solution2: 1. Two pointers, fast and slow, fast visits all even nodes, slow visits one by one, 2. when fast meets slow - slow move one step a time - fast starts from head and move one step a time 3. the next time fast and slow meet is the beginning of the loop space O(1) time O(n) """ def getLoopHead2(head): if not head: return None fast = head slow = head # check if loop exist, update first, otherwise fast = slow = head -> break while fast.next: fast = fast.next.next slow = slow.next if fast == slow: break if not fast.next: # did not meet, no loop return None # next meeting point is the start of the loop fast = head while fast != slow: fast = fast.next slow = slow.next # return any of fast or slow return fastProve of solution2:Assumption:1. k is the length from head to start of the loop2. M is the loop size1 -> 2 -> .. -> k -> k+1 -> ... -> k+M-1 -> kTry to find when will fast and slow meet:1. when slow moves k steps, 1. slow is at the start of loop 2. fast moves 2k steps 3. fast is at the k%M position in the loop2. Assume when slow moves k+x steps, fast and slow meet. 1. slow is at (x%M) in the loop 2. fast moves 2(k+x) steps => (k+2x)%M in the loop 3. fast and slow meet means they are at the same pos in the loop: x%M = (k+2x)%M => k+x = CM 4. This means if you move k+x from start of loop, it goes back to the beginning.Currently, slow has moved x steps in side the loop, therefore, if it moves k more steps, it reaches the start of the loop. (k+x = CM)If we let fast start from head of list and move k steps, it reaches the start of loop as well! (see Assumption1)Finally, if fast and slow both move k steps, they will meet at the start of the loop.Return any if they meet! Testingdef getRefs(head): refs = [] while head: refs.append(str(id(head))) head = head.next return ' -> '.join(refs) def getNode(head, idx): node = head for i in range(idx): if not node: return None node = node.next return node def appendNode(head, idx): # given a linked list head and a idx, append node at idx to the end of the linked list node = getNode(head, idx) tail = head while tail.next: tail = tail.next tail.next = node return id(node) def test(test_lists, func): total = len(test_lists) correct = 0 for l, idx in test_lists: sll = SinglyLinkedList(l) print('sll: ', getRefs(sll.head), end='') nodeid = appendNode(sll.head, idx) print('->', nodeid) print('Loop Head: ', nodeid) node = func(sll.head) print('res: ', id(node)) curr = id(node) == nodeid correct += curr print(curr, '\n', '-'*50, sep='') print(f'{correct}/{total}') if correct == total: print('All passed') test_lists = [ ([1,2,3,4,5], 2), ([1,2,3,4], 2), ([1,1,1,1,1,1], 2), ([2,4,6,4,2,1,3,52,100,24], 3), ([2,4,6,4,2,1,3,52,100], 3) ] test(test_lists, getLoopHead2)sll: 140413814686664 -> 140413814689296 -> 140413814687560 -> 140413814686272 -> 140413814689520-> 140413814687560 Loop Head: 140413814687560 res: 140413814687560 True -------------------------------------------------- sll: 140413814688176 -> 140413814689408 -> 140413814688568 -> 140413814687672-> 140413814688568 Loop Head: 140413814688568 res: 140413814688568 True -------------------------------------------------- sll: 140413814686664 -> 140413814686328 -> 140413814686552 -> 140413815334728 -> 140413815333496 -> 140413815335400-> 140413814686552 Loop Head: 140413814686552 res: 140413814686552 True -------------------------------------------------- sll: 140413814688176 -> 140413814689296 -> 140413815334784 -> 140413815334336 -> 140413815334616 -> 140413815334000 -> 140413815334504 -> 140413815336800 -> 140413815336856 -> 140413815336072-> 140413815334336 Loop Head: 140413815334336 res: 140413815334336 True -------------------------------------------------- sll: 14041381468[...]Examples on how to use this package:from circuit import CircuitCircuit Creationcirc = Circuit(3,5) circ.X(2) circ.H() circ.barrier()Getting the Current Statecirc.get_state() circ.get_state(output='tensor')Execute Simulationcirc.execute(num_instances=800)Code To OpenQASMprint(circ.compile())OPENQASM 2.0; include "qelib1.inc"; qreg q[3] creg c[5] x q[2]; h q[None]; barrier q;Measurementcirc = Circuit(2, 3) circ.X(1) # measure qubit one and write the result to classical bit 1 circ.measure(0, 1) circ.bits[1]Data Constructs Python EnvironmentsThere are a few different ways that you can interact with python:  CLI = command line interface- Type "python" from your command line- You are now in a python interpreter- This is great for testing out code- Does not persist after you close this session Python IDE's = integrated development environment- Self-contained development environments- A stand-alone program/envirnoment- Save code to work on between sessions- Test and run code- Syntax/error checking- Learning curve for setting up and using- Recommended IDE's include: Spyder, PyCharm, Eclipse Python scripts- You can distribute these as programs that perform a task - Can gather user input- Can be re-run- Standalone, run using exe or command line interface- Usually the final product Jupyter Notebook- Document your code with text as you go along- Test and view output as you develop- Great for analysis, scientific research- View graphs and tables inline- Not great for larger datasets VariablesThink of variables as containers. You create them, you name them, and you determine what value they will store. Naming variablessteven= 17 steven my_var = 5 my_varmy_var is a made up name for your variable. You can call it whatever you want.rando_var = 5 steve = 5 rando_var steveReserved WordsWords used to trigger an action cannot be used as variable names:- and- as- break- del- true- false- int- strbreak = 'test'Statements and Commands StatementA complete instruction, like a sentence in English:print("Go fish!")Go fish!Command (aka Executing a Function)Think of a command as a verb; you are telling python to do something.print() round() sum()- Always lowercase- Always followed by a parenthesis- Called a function, or method ExpressionsExpressions don't have any action to them, but they contain values."Go fish!" 27 + 12 Line ContinuationLine Continuation CharactersTo make things pretty, or to continue a long function on the next line, use "\":def my_long_function(): print("My string \ is really long \ and has to be continued \ on another line")Documenting your codeIt's always good coding practice to make notes that help people use and understand your code. There are two ways to let the python interpreter know that you want to it to ignore something: DocstringsDocstrings are used for longer comments, often at the beginning of a script, to let people know: - how to run your script- dependencies- versions''' Docstrings are enclosed between a series of three single quotes. Anything entered here is a comment and is skipped over by the python interpreter. '''CommentsFor leaving quick notes within your code itself, start a a comment with a hash sign.# this is me designing a silly variable bob = "steve"Running Your First ModelsIn this notebook, we demonstrate the basic use of UCLCHEM's python module by running a simple model and then using the analysis functions to examine the output.import uclchemA Simple CloudUCLCHEM's `cloud()` model is a spherical cloud of isothermal gas. We can keep a constant density or have it increase over time following a freefall equation. This model is generally useful whenever you want to model a homogeneous cloud of gas under constant conditions. For example, in the inner parts of a molecular cloud where Av $\gtrsim$ 10 there are very few depth dependent processes. You may wish to model the whole of this UV shielded portion of the cloud with a single `cloud()` model.Due to the large number of parameters in a chemical model and the way fortran and python interaction, we find it is easiest to do parameter input through python dictionaries. In this block, we define param_dict which contains the parameters we wish to modify for this run. Every `uclchem.model` function accepts a dictionary as an optional argument. Every parameter has a default value which is overriden if that parameter is specified in this dictionary. You can find a complete list of modifiable parameters and their default values in [our parameter docs](/docs/parameters).# set a parameter dictionary for phase 1 collapse model out_species = ["SO","CO"] param_dict = { "endAtFinalDensity": False,#stop at finalTime "freefall": False,#don't increase density in freefall "initialDens": 1e4, #starting density "initialTemp": 10.0,#temperature of gas "finalTime": 1.0e6, #final time "rout":0.1, #radius of cloud in pc "baseAv":1.0, #visual extinction at cloud edge. "outputFile": "../examples/test-output/static-full.dat",#full UCLCHEM output "abundSaveFile": "../examples/test-output/startstatic.dat",#save final abundances to file } result = uclchem.model.cloud(param_dict=param_dict,out_species=out_species) print(result)[1, 3.331065659699488e-11, 3.576305642378658e-05]Checking the outputAt the end of the previous cell, we printed `result` which is a list returned by every UCLCHEM model function. The first element is always an integer which will be positive if the code completed and negative otherwise. You can send negative values to `uclchem.utils.check_error()` to get a more detailed error message.The subsequent elements are the final abundances of any species listed in `out_species`, in this case we have the abundance of SO and CO. This is useful when we want to use UCLCHEM as part of something like an MCMC procedure, obtaining abundances for given parameters. However, we also write the final abundances of all species to `abundSaveFile` and the abundances of all species at every time step in `outputFile` so it is not necessary to acquire abundances in this way.The output file is just a simple csv with some header rows, UCLCHEM has a utility function to read that file into a pandas dataframe. Let's load it up and look at it.result_df=uclchem.analysis.read_output_file("../examples/test-output/static-full.dat") result_df.head()We can also test whether the model run went well by checking for element conservation. We do this because integrator errors often show up as a failure to conserve elemental abundances. We can use `check_element_conservation()` to test whether we conserve elements in this run. This function returns a dictionary where each entry gives the change in the total abundance of an element as a percentage of the original abundance. In an ideal case, these values are 0\% indicating the total abundance at the end of the model is exactly the same as the total at the start.Changes of less than 1\% are fine for many cases but if they are too high, you could consider changing the `reltol` and `abstol` parameters that control the integrator accuracy. They are error tolerance so smaller values lead to smaller errors and (usually) longer integration times. The default values were chosen by running a large grid of models and choosing the tolerances with the lowest average run time from those that conserved elements well and rarely failed. Despite this, there are no one-size-fits-all perfect tolerances and you may run into issues with different networks or models.conservation=uclchem.analysis.check_element_conservation(result_df,element_list=["H","N","C","O","S"]) print("Percentage change in total abundances:") print(conservation)Percentage change in total abundances: {'H': '0.001%', 'N': '0.000%', 'C': '0.000%', 'O': '0.000%', 'S': '0.000%'}Plotting ResultsFinally, you will want to plot your results. This can be done with any plotting library but UCLCHEM does provide a few functions to make quick plots. Note the use of $ symbols in the species list below, this gets the total ice abundance of a species. For two phase models, this is just the surface abudance but for three phase it is the sum of surface and bulk.species=["H","H2","$H","$H2","H2O","$H2O","CO","$CO","$CH3OH","CH3OH"] fig,ax=uclchem.analysis.create_abundance_plot(result_df,species,figsize=(10,7)) ax=ax.set(xscale="log",ylim=(1e-15,1),xlim=(1e3,1e6))Mengingat NumPyimport numpy as np np_tinggi = np.array([1.73, 1.68, 1.71, 1.89, 1.79]) np_berat = np.array([65.4, 59.2, 63.6, 88.4, 68.7]) bmi = np_berat / np_tinggi ** 2 bmi bmi > 23 bmi[bmi > 23]Komparasi Numerik2 < 3 2 == 3 2 <= 3 3 <= 3 x = 2 y = 3 x < yKomparasi yang lain'dian' < 'diantemi' 3 < 'dian' 3 < 4.1 bmi bmi > 23 # < # <= # > # >= # == # !=Boolean Operator# and, or, not True and True False and True True and False False and False x = 12 x > 5 and x < 15 True or True False or True True or False False or False y = 5 y < 7 or y > 13 not True not FalseKembali ke NumPybmi bmi > 21 bmi < 22 bmi > 21 and bmi <22 np.logical_and(bmi > 21, bmi < 22) #np.logical_and np.logical_or np.logical_or bmi[np.logical_and(bmi > 21, bmi < 22)]if, elif, else (conditional statement)z = 4 if z % 2 == 0 : print("z adalah genap") z = 4 if z % 2 == 0: print("Mengecek " + str(z)) print("z adalah genap") z = 5 # TIDAK TEREKSEKUSI KARENA FALSE if z % 2 == 0: print("Mengecek " + str(z)) print("z adalah genap") z = 5 if z % 2 == 0 : print("z adalah genap") else : print("z adalah ganjil") z = 3 if z % 2 == 0: print("z habis dibagi 2") elif z % 3 == 0: print("z habis dibagi 3") else : print("z tidak habis dibagi 2 maupun 3") z = 6 if z % 2 == 0: print("z habis dibagi 2") elif z % 3 == 0: print("z habis dibagi 3") else : print("z tidak habis dibagi 2 maupun 3")z habis dibagi 2Filtering pandasimport pandas as pd df = pd.read_csv('negara.csv', index_col=0) df- Tujuan: Mencari Negara dengan dengan luas diatas 8 juta km^2df['area'] # alternatif df.loc[:,"area"] atau df.iloc[:,2] df['area'] > 8 cek_gedene = df['area'] > 8 df[cek_gedene]Menggunakan Boolean Operatorimport numpy as np np.logical_and(df["area"] > 8, df["area"] < 10) df[np.logical_and(df["area"] > 8, df["area"] < 10)]*Data Science Unit 1 Sprint 3 Assignment 1* Apply the t-test to real dataYour assignment is to determine which issues have "statistically significant" differences between political parties in this [1980s congressional voting data](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records). The data consists of 435 instances (one for each congressperson), a class (democrat or republican), and 16 binary attributes (yes or no for voting for or against certain issues). Be aware - there are missing values!Your goals:1. Load and clean the data (or determine the best method to drop observations when running tests)2. Using hypothesis testing, find an issue that democrats support more than republicans with p < 0.013. Using hypothesis testing, find an issue that republicans support more than democrats with p < 0.014. Using hypothesis testing, find an issue where the difference between republicans and democrats has p > 0.1 (i.e. there may not be much of a difference)5. Practice 1-sample t-testsNote that this data will involve *2 sample* t-tests, because you're comparing averages across two groups (republicans and democrats) rather than a single group against a null hypothesis.Stretch goals:1. Refactor your code into functions so it's easy to rerun with arbitrary variables2. Apply hypothesis testing to your personal project data (for the purposes of this notebook you can type a summary of the hypothesis you formed and tested)3. Add visualsimport pandas as pd import numpy as np import scipy import seaborn as sns import matplotlib.pyplot as plt from scipy.stats import ttest_1samp from scipy.stats import ttest_ind, ttest_ind_from_stats, ttest_rel !wget https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data #import with column names and NaN values. df = pd.read_csv('house-votes-84.data', header=None, na_values='?', names=['party','handicapped-infants','water-project', 'budget','physician-fee-freeze', 'el-salvador-aid', 'religious-groups','anti-satellite-ban', 'aid-to-contras','mx-missile','immigration', 'synfuels', 'education', 'right-to-sue','crime','duty-free', 'south-africa']) df.head() #str to int df = df.replace({'y': 1, 'n':0}) #Separate into our 'Samples' dems = df[df['party'] == 'democrat'] reps = df[df['party'] == 'republican'] cols=['handicapped-infants','water-project', 'budget','physician-fee-freeze', 'el-salvador-aid', 'religious-groups','anti-satellite-ban', 'aid-to-contras','mx-missile','immigration', 'synfuels', 'education', 'right-to-sue','crime','duty-free', 'south-africa'] dems.head() reps.head() #looking at output and practice. ttest_ind(reps['budget'], dems['budget'], nan_policy='omit') #Let's do some comprehension to speed things up. results = {column : ttest_ind(reps[column], dems[column], nan_policy='omit') for column in cols} results #could make this even more general as a function. def get_test_scores(columns_as_list, DataFrame1, DataFrame2, nan_policy): return {col : ttest_ind(DataFrame1[col], DataFrame2[col], nan_policy=nan_policy) for col in columns_as_list} get_test_scores(cols, reps, dems, 'omit') #Dems support more print(f"Dems support 'budget' more than republicans with a score of {results['budget']}") #Reps Support more print(f"Republicans support 'crime' more than republicans with a score of {results['crime']}") #Pretty equal on support print(f"Dems and republicans seem to both support 'water-project with a score of {results['water-project']}")Dems support 'budget' more than republicans with a score of Ttest_indResult(statistic=-23.21277691701378, pvalue=2.0703402795404463e-77) Republicans support 'crime' more than republicans with a score of Ttest_indResult(statistic=16.342085656197696, pvalue=9.952342705606092e-47) Dems and republicans seem to both support 'water-project with a score of Ttest_indResult(statistic=0.08896538137868286, pvalue=0.9291556823993485)1) Null Hypothesis:In 1-sample t-tests YOU GET TO CHOOSE YOUR NULL HYPOTHESISH0 : 0.0 - There is ZERO republican support for the duty-free bill2) Alternative HypothesisHa : x¯≠0 - There is non-zero support for the budget bill among repulbicans.3) Confidence Level: 95% or .95ttest_1samp(reps['duty-free'], 0, nan_policy='omit')4) t-statistic: 3.95) p-value of .00013809--- Conclusion: Due to a p-value of near-zero, I reject the null hypothesis that republican support is zero and conclude that republican support is non-zero. 1) Null Hypothesis:In 1-sample t-tests YOU GET TO CHOOSE YOUR NULL HYPOTHESISH0 : 0.0 - There is ZERO republican support for the crime bill2) Alternative HypothesisHa : x¯≠0 - There is non-zero support for the budget bill among Democrats.3) Confidence Level: 95% or .95ttest_1samp(dems['crime'],0, nan_policy='omit')4) t-statistic: 11.745) p-value of 9.087409645908879e-26---Conclusion: Due to a p-value of near-zero, I reject the null hypothesis that democrat support is zero and conclude that republican support is non-zero.Prepare master tableimport os import pandas as pd import numpy as np import pickle import lap_v2_py3 as lap_v2 reprocess_new_basis = False #Folder with data: folder = '../Data/' if reprocess_new_basis: #Load in the conversion table conv = pd.read_csv(folder+'ann.csv',usecols=[1,2],index_col=0,squeeze=True) #Throw out ambiguous reads conv = conv[~conv.index.duplicated(keep=False)] #Load in the new basis data LU = pd.read_excel(folder+'InVivo.xlsx',sheetname='LU_POS vs FG',index_col=0,usecols=[0,5,6,7]).reindex(index=conv.keys()).mean(axis=1) FG = pd.read_excel(folder+'InVivo.xlsx',sheetname='LU_POS vs FG',index_col=0,usecols=[0,2,3,4]).reindex(index=conv.keys()).mean(axis=1) TH = pd.read_excel(folder+'InVivo.xlsx',sheetname='TH_POS vs FG',index_col=0,usecols=[0,5,6,7]).reindex(index=conv.keys()).mean(axis=1) BR = pd.read_excel(folder+'InVivo.xlsx',sheetname='BR_POS vs ECT',index_col=0,usecols=[0,2,3,4]).reindex(index=conv.keys()).mean(axis=1) ECT = pd.read_excel(folder+'InVivo.xlsx',sheetname='BR_POS vs ECT',index_col=0,usecols=[0,5,6,7]).reindex(index=conv.keys()).mean(axis=1) newdict = {'E.9 Lung Prim Nkx+':LU,'E.13 Thyroid':TH,'E.8.25 Foregut Endoderm':FG,'E.9 Forebrain':BR,'E.8.25 Ectoderm':ECT} #Reindex using Entrez ID's, add name, and average duplicates for name in newdict.keys(): newdict[name].index=conv newdict[name].dropna(inplace=True) newdict[name].name = name temp = newdict[name].copy() newdict[name] = newdict[name][~newdict[name].index.duplicated()] for item in newdict[name].index: newdict[name].loc[item] = temp.loc[item].mean() del temp f = open(folder+'NewBasis.dat','wb') pickle.dump(newdict,f) f.close() else: f = open(folder+'NewBasis.dat','rb') newdict = pickle.load(f) f.close() #%% Load in the basis data basis = pd.read_csv(folder+'Mouse_Basis_Data.txt',delimiter='\t',index_col=0,usecols=[0]+list(range(3,64))) thresh = 1 # Append new basis data basis_new = basis.copy() newdict_log = {} for name in newdict.keys(): newdict_log[name] = np.log2(newdict[name]+1) basis_new = basis_new.join(newdict_log[name][newdict_log[name]>thresh],how='inner') basis_new.dropna(inplace=True) #Load Keri's data keri_index = pd.read_table('../Alex_Analysis/Final_Data/entrez_id.txt',index_col=None,header=None).squeeze().values keri_label = pd.read_table('../Alex_Analysis/Final_Data/keri_cell_lbl.txt',index_col=None,header=None).squeeze() keri = pd.read_table('../Alex_Analysis/Final_Data/keri_ranknorm_data_corr.txt',index_col=None,header=None) keri.index=keri_index keri = keri.rename(columns=keri_label) # Load original project data data = pd.read_excel(folder+'InVitroNew.xlsx',index_col=0,usecols=[0]+list(range(38,50))) #Load new data, reindex using Entrez ID's, and average duplicates #Load in the conversion table conv = pd.read_table(folder+'AnnotationTable_mogene10sttranscriptcluster.txt',index_col=1,usecols=[2,3],squeeze=True) #Throw out ambiguous reads conv = conv[~conv.index.duplicated(keep=False)] data_new = pd.read_excel(folder+'rlog_7-2018.xlsx',index_col=0).reindex(index=conv.keys()) data_new.index=conv data_new.dropna(inplace=True) data_new = data_new[(data_new.T != 0).any()] temp = data_new.copy() data_new = data_new[~data_new.index.duplicated()] #Right now, not averaging duplicates, because it is slow and doesn't matter #for label in data_new.keys(): # for item in data_new.index: # data_new.loc[item,label] = temp[label].loc[item].mean() del temp #Load old test data, reindex using Entrez ID's, and average duplicates conv_probe = pd.read_table(folder+'AnnotationTable_mogene10sttranscriptcluster.txt',index_col=0,usecols=[0,2],squeeze=True) data_old = pd.read_table(folder+'InVitroOld.txt',index_col=0,skipfooter=1,engine='python').reindex(index=conv_probe.keys()) data_old.index = conv_probe data_old_dedup = data_old[~data_old.index.duplicated()].copy() #for item in data_old_dedup.index: # data_old_dedup.loc[item] = data_old.loc[item].mean() # Combine into master table master_table = basis_new.join(data,how='inner') master_table = master_table.join(data_new,how='inner') master_table = master_table.join(data_old_dedup,how='inner') master_table = master_table.join(keri,how='inner') # Rank-norm data N = np.shape(master_table)[0] for item in master_table: master_table[item] = lap_v2.rank_norm(np.asarray(master_table[item]), dist='normal',norm=N) data_keys = data.keys() data_keys = data_keys.append(data_old.keys()) data_keys = data_keys.append(data_new.keys()) data_keys = data_keys.append(keri.keys()) f = open(folder+'master_table.dat','wb') pickle.dump([master_table,data_keys],f) f.close()Make LAP function for pandasimport os os.chdir('/Users/robertmarsland/Dropbox/BU/Laertis-Analysis/Alex_Analysis/Code/') import pandas as pd import numpy as np import pickle import lap_v2 import matplotlib.pyplot as plt import matplotlib matplotlib.style.use('ggplot') %matplotlib inline #Folder with data: folder = '../../Data/' f = open(folder+'master_table.dat','rb') master_table, data_keys = pickle.load(f) f.close() master_table.dropna(inplace=True) #Remove Nkx2-1 master_table.drop(21869,axis=0,inplace=True) def pd_lap(names, exceptions = None, data = master_table): basis = data.drop(data_keys,1) if exceptions != None: reduced_basis = basis.drop(exceptions,1) else: reduced_basis = basis [a, A, eta] = lap_v2.lap(reduced_basis.values, data[names].values, full_output=True) projections = pd.DataFrame(data = a, index = reduced_basis.keys(), columns = names) return projectionsAverage groups of cell types that are too similardef average_groups(data,name_dict): names_all = [item for sublist in name_dict.values() for item in sublist] reduced_data = data.drop(names_all,1) for name in name_dict: reduced_data = reduced_data.join(pd.DataFrame(data = data[name_dict[name]].mean(1), columns = [name])) return reduced_data namedict = {'epithelial':['medullary thymic epithelial', 'terminal bronchiolar epithelial cells', 'endothelial cell - gonad', 'endothelial cell - blood vessel', 'endothelial cell - lymphatic vessel'], 'monocytes':['monocytes - classical', 'Granulocyte-Monocyte Progenitor (GMP)'], 'adipose':['adipose - brown','adipose - white'], 'killer cells':['natural killer cells','T Cell'], 'intestine':['intestine - small','intestine - large'], 'neuron':['neuron - glutamatergic','nsc'], 'fibroblast':['fibroblast - thymus','fibroblast - skin','fibroblast - cardiac']} table_fixed = average_groups(master_table,namedict)Apply to datathyroid_protocol = pd_lap(['BMP4+FGF2 mCherry+ 1','BMP4+FGF2 mCherry+ 2','BMP4+FGF2 mCherry+ 3','BMP4+FGF2 mCherry- 1','BMP4+FGF2 mCherry- 2','BMP4+FGF2 mCherry- 3'], data = master_table) lung_protocol = pd_lap(['BMP4+Wnt3A mCherry+ 1','BMP4+Wnt3A mCherry+ 2','BMP4+Wnt3A mCherry+ 3','BMP4+Wnt3A mCherry- 1','BMP4+Wnt3A mCherry- 2','BMP4+Wnt3A mCherry- 3'], data = master_table) #old_protocol = pd_lap(['GSM861609','GSM861610','GSM861611','GSM861612','GSM861613','GSM861615'], # data = master_table) new_protocols = pd_lap(['3D_EpCAM_2', '3D_EpCAM_3', '3D_EpCAM_4', '3D_mCherry-EpCAM_2','3D_mCherry-EpCAM_3', '3D_mCherry-EpCAM_4', 'Gelatin_mCherry-EpCAM_2','Gelatin_mCherry-EpCAM_3', 'Gelatin_mCherry-EpCAM_4'], data = master_table) keri_protocol = pd_lap(['D1-1','D1-2','D7-1','D7-2','D7-3','D14-1','D14-2'],data = master_table) thyroid_protocol = pd_lap(['BMP4+FGF2 mCherry+ 1','BMP4+FGF2 mCherry+ 2','BMP4+FGF2 mCherry+ 3','BMP4+FGF2 mCherry- 1','BMP4+FGF2 mCherry- 2','BMP4+FGF2 mCherry- 3'], exceptions = ['Common Lymphoid Progenitor (CLP)','thyroid','lung','HSC','sperm','oocyte','mef'], data = table_fixed) lung_protocol = pd_lap(['BMP4+Wnt3A mCherry+ 1','BMP4+Wnt3A mCherry+ 2','BMP4+Wnt3A mCherry+ 3','BMP4+Wnt3A mCherry- 1','BMP4+Wnt3A mCherry- 2','BMP4+Wnt3A mCherry- 3'], exceptions = ['Common Lymphoid Progenitor (CLP)','thyroid','lung','HSC','sperm','oocyte','mef'], data = table_fixed) #old_protocol = pd_lap(['GSM861609','GSM861610','GSM861611','GSM861612','GSM861613','GSM861615'], # exceptions = ['Common Lymphoid Progenitor (CLP)','thyroid','lung','HSC','sperm','oocyte','mef'], # data = table_fixed) new_protocols = pd_lap(['3D_EpCAM_2', '3D_EpCAM_3', '3D_EpCAM_4', '3D_mCherry-EpCAM_2','3D_mCherry-EpCAM_3', '3D_mCherry-EpCAM_4', 'Gelatin_mCherry-EpCAM_2','Gelatin_mCherry-EpCAM_3', 'Gelatin_mCherry-EpCAM_4'], exceptions = ['Common Lymphoid Progenitor (CLP)','thyroid','lung','HSC','sperm','oocyte','mef'], data = table_fixed) keri_protocol = pd_lap(['D1-1','D1-2','D7-1','D7-2','D7-3','D14-1','D14-2'], exceptions = ['Common Lymphoid Progenitor (CLP)','thyroid','lung','HSC','sperm','oocyte','mef'], data = table_fixed) fig,axs=plt.subplots(1,4,figsize=(24,32)) new_protocols.plot.barh(ax=axs[0]) thyroid_protocol.plot.barh(ax=axs[1]) lung_protocol.plot.barh(ax=axs[2]) keri_protocol.plot.barh(ax=axs[3]) axs[0].set_xlim((-0.2,0.7)) axs[1].set_xlim((-0.2,0.7)) axs[2].set_xlim((-0.2,0.7)) axs[3].set_xlim((-0.2,0.7)) axs[1].set_yticklabels(()) axs[2].set_yticklabels(()) axs[3].set_yticklabels(()) axs[0].set_title('New Data') axs[1].set_title('Old Thyroid Protocol') axs[2].set_title('Old Lung Protocol') axs[3].set_title('Keri Data') plt.show()Project noise Multiplicative noise Make noise amplitude proportional to square root of mean abundance. Assume rank-norm correctly extracts original abundance, with mean subtracted off. Add the mean back in (wild guess at 5 times standard deviation of expression levels) before taking square root. These plots show the result when constant of proportionality is set to 0.1, so that noise amplitude at high expression level (4 standard deviations out) is equal to 1/5 of the standard deviation.N = np.shape(master_table)[0] noise = np.random.randn(N,3)#*np.sqrt(master_table[['BMP4+FGF2 mCherry+ 1','BMP4+FGF2 mCherry+ 2','BMP4+FGF2 mCherry+ 3']].values+5.) [a, A, eta] = lap_v2.lap(basis_new.values, noise, full_output=True) projections = pd.DataFrame(data = a, index = basis_new.keys(), columns = ['Noise1','Noise2','Noise3']) projections.plot.barh(figsize=(6, 24)) plt.show()Project everything onto everythingitem = basis_new.keys()[0] reduced_basis = basis_new.drop(item,1) [a, A, eta] = lap_v2.lap(reduced_basis.values, master_table[item].values, full_output=True) projections = pd.DataFrame(data = a, index = reduced_basis.keys(), columns = [item]) for item in basis_new.keys()[1:]: reduced_basis = basis_new.drop(item,1) [a, A, eta] = lap_v2.lap(reduced_basis.values, master_table[item].values, full_output=True) projections_new = pd.DataFrame(data = a, index = reduced_basis.keys(), columns = [item]) projections = projections.join(projections_new) projections = projections.drop('oocyte',1) projections[projections.isnull()] = 0 plt.pcolor(projections) plt.colorbar() plt.show() plt.hist(projections) pairs=[[]] select = projections.values > 0.5 for j in range(len(projections.keys())): for k in range(len(projections.keys())): if select[j,k]: print projections.keys()[k] + ' on ' + projections.keys()[j] 'adipose':['adipose - brown','adipose - white'], 'killer cells':['natural killer cells','T Cell'], 'intestine':['intestine - small','intestine - large'], 'neuron':['neuron - glutamatergic','nsc'] projections[['monocytes - classical','fibroblast - skin','neuron - glutamatergic']].plot.barh(figsize=(6, 24)) plt.show()An Introduction to `repro_eval` This notebook introduces the functionalities of `repro_eval`. We provide sample data that has to be downloaded in advance, but it is also possible to upload your runs and evaluate the reproducibilty of your experiments with this notebook. Install `repro_eval` via PyPI!pip install repro_eval==0.1Download the sample data and extract it!wget https://www.dropbox.com/s/ncu49e91mosidei/data.tar.gz !tar -xzvf ./data.tar.gzImportsOnce installed, the Evaluator classes for the evaluation of reproducibility and replicability can be imported. In this notebook, we also include other Python packages that are not necessarily required when using `repro_eval` for your experiments.from repro_eval.Evaluator import RpdEvaluator, RplEvaluator from repro_eval.util import arp, arp_scores, print_base_adv, print_simple_line, trim import pytrec_eval import pandas as pd from matplotlib import pyplot as plt import seaborn as sns sns.set() sns.set_style('whitegrid') colors = sns.color_palette()Path definitionYou can modify these paths and adapt them to your experiments. The entire notebook should be usable with your experiments when they comply with the given evaluation scenario. First, we need two kind of runs - a baseline run and an advanced run (that outperforms the baseline run). Second, for the evaluation of replicability, the replicated runs should be derived from another target collection. The dictionaries `runs_rpd` and `runs_rpl` contain runs with different parametrizations, but it should also be possible to include just one version for both the baseline and advanced run.QREL = './data/qrels/core17.txt' QREL_RPL = './data/qrels/core18.txt' ORIG_B = './data/runs/orig/input.WCrobust04' ORIG_A = './data/runs/orig/input.WCrobust0405' RPD_B = './data/runs/rpd/14/irc_task1_WCrobust04_001' RPD_A = './data/runs/rpd/14/irc_task1_WCrobust0405_001' RPL_B = './data/runs/rpl/14/irc_task2_WCrobust04_001' RPL_A = './data/runs/rpl/14/irc_task2_WCrobust0405_001' MEASURE = 'ndcg' runs_rpd = { 'rpd_wcr04_tf_1': {'path': './data/runs/rpd/45/irc_task1_WCrobust04_001'}, 'rpd_wcr0405_tf_1': {'path': './data/runs/rpd/45/irc_task1_WCrobust0405_001'}, 'rpd_wcr04_tf_2': {'path': './data/runs/rpd/46/irc_task1_WCrobust04_001'}, 'rpd_wcr0405_tf_2': {'path': './data/runs/rpd/46/irc_task1_WCrobust0405_001'}, 'rpd_wcr04_tf_3': {'path': './data/runs/rpd/47/irc_task1_WCrobust04_001'}, 'rpd_wcr0405_tf_3': {'path': './data/runs/rpd/47/irc_task1_WCrobust0405_001'}, 'rpd_wcr04_tf_4': {'path': './data/runs/rpd/48/irc_task1_WCrobust04_001'}, 'rpd_wcr0405_tf_4': {'path': './data/runs/rpd/48/irc_task1_WCrobust0405_001'}, 'rpd_wcr04_tf_5': {'path': './data/runs/rpd/49/irc_task1_WCrobust04_001'}, 'rpd_wcr0405_tf_5': {'path': './data/runs/rpd/49/irc_task1_WCrobust0405_001'} } runs_rpl = { 'rpl_wcr04_tf_1': {'path': './data/runs/rpl/45/irc_task2_WCrobust04_001'}, 'rpl_wcr0405_tf_1': {'path': './data/runs/rpl/45/irc_task2_WCrobust0405_001'}, 'rpl_wcr04_tf_2': {'path': './data/runs/rpl/46/irc_task2_WCrobust04_001'}, 'rpl_wcr0405_tf_2': {'path': './data/runs/rpl/46/irc_task2_WCrobust0405_001'}, 'rpl_wcr04_tf_3': {'path': './data/runs/rpl/47/irc_task2_WCrobust04_001'}, 'rpl_wcr0405_tf_3': {'path': './data/runs/rpl/47/irc_task2_WCrobust0405_001'}, 'rpl_wcr04_tf_4': {'path': './data/runs/rpl/48/irc_task2_WCrobust04_001'}, 'rpl_wcr0405_tf_4': {'path': './data/runs/rpl/48/irc_task2_WCrobust0405_001'}, 'rpl_wcr04_tf_5': {'path': './data/runs/rpl/49/irc_task2_WCrobust04_001'}, 'rpl_wcr0405_tf_5': {'path': './data/runs/rpl/49/irc_task2_WCrobust0405_001'} }Define a helping function for plotting the average retrieval performance (ARP) later in the notebook.def average_retrieval_performance(baseline_scores, reproduced_scores: dict, measures: list, xlabel: str, ylabel: str): reproduced_scores_arp = [arp_scores(topic_scores) for idx, topic_scores in reproduced_scores.items()] baseline_scores_arp = arp_scores(baseline_scores) index = list(reproduced_scores.keys()) df_content = {} for measure in measures: df_content[measure] = [scores.get(measure) for scores in reproduced_scores_arp] df = pd.DataFrame(df_content, index=index) plt.figure() ax = df.plot.bar(rot=0, figsize=(10, 6)) for num, measure in enumerate(measures): orig_val = baseline_scores_arp.get(measure) ax.hlines(orig_val, -.5, 5.5, linestyles='dashed', color=colors[num]) ax.annotate(' ', (num, orig_val), color=colors[num]) ax.set_ylim(0.0, 1.0) legend_content = [measure + ' (orig)' for measure in measures] + [measure + ' (rpl)' for measure in measures] ax.legend(legend_content, loc='center left', bbox_to_anchor=(1, 0.5)) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) plt.show()Evaluating ReproducibilityThe following code snippet instantiates a reproducibility evaluator `RpdEvaluator` and determines Kendall's tau Union (KTU), the Rank-biased Overlap (RBO), the Root-Mean-Square-Error (RMSE), the Effect Ratio (ER), the Delta Relative Improvement (DRI) and the p-values of the paired t-test. Please be aware, that it takes some time for the RBO to be computed. We've included a progress bar to give you some feedback.rpd_eval = RpdEvaluator(qrel_orig_path=QREL, run_b_orig_path=ORIG_B, run_a_orig_path=ORIG_A, run_b_rep_path=RPD_B, run_a_rep_path=RPD_A) rpd_eval.trim() rpd_eval.evaluate() # KTU ktau = rpd_eval.ktau_union() print("Kendall's tau Union (KTU)") print('------------------------------------------------------------------') for topic, value in ktau.get('baseline').items(): print_base_adv(topic, 'KTU', value, ktau.get('advanced').get(topic)) print_base_adv('ARP', 'KTU', arp(ktau.get('baseline')), arp(ktau.get('advanced'))) # RBO rbo = rpd_eval.rbo(print_feedback=True) print("Rank-biased Overlap (RBO)") print('------------------------------------------------------------------') for topic, value in rbo.get('baseline').items(): print_base_adv(topic, 'RBO', value, rbo.get('advanced').get(topic)) print_base_adv('ARP', 'RBO', arp(rbo.get('baseline')), arp(rbo.get('advanced'))) # RMSE rmse = rpd_eval.rmse() print("Root mean square error (RMSE)") print('------------------------------------------------------------------') for measure, value in rmse.get('baseline').items(): print_base_adv(measure, 'RMSE', value, rmse.get('advanced').get(measure)) # ER print("Effect ratio (ER)") print('------------------------------------------------------------------') er = rpd_eval.er() for measure, value in er.items(): print_simple_line(measure, 'ER', value) # DRI print("Delta Relative Improvement (DRI)") print('------------------------------------------------------------------') dri = rpd_eval.dri() for measure, value in dri.items(): print_simple_line(measure, 'DRI', value) # ttest pvals = rpd_eval.ttest() print("Two-tailed paired t-test (p-value)") print('------------------------------------------------------------------') for measure, value in pvals.get('baseline').items(): print_base_adv(measure, 'PVAL', value, pvals.get('advanced').get(measure))Comparing the Average Retrieval Performance (ARP) of different parametrizations The following code snippet determines the ARP scores and compares them via a bar plot.rpd_eval = RpdEvaluator(qrel_orig_path=QREL, run_b_orig_path=ORIG_B, run_a_orig_path=ORIG_A, run_b_rep_path=None, run_a_rep_path=None) rpd_eval.trim() rpd_eval.evaluate() for run_name, info in runs_rpd.items(): with open(info.get('path')) as run_file: info['run'] = pytrec_eval.parse_run(run_file) trim(info['run']) info['scores'] = rpd_eval.evaluate(info['run']) average_retrieval_performance(rpd_eval.run_b_orig_score, { 'tf_1': runs_rpd.get('rpd_wcr04_tf_1').get('scores'), 'tf_2': runs_rpd.get('rpd_wcr04_tf_2').get('scores'), 'tf_3': runs_rpd.get('rpd_wcr04_tf_3').get('scores'), 'tf_4': runs_rpd.get('rpd_wcr04_tf_4').get('scores'), 'tf_5': runs_rpd.get('rpd_wcr04_tf_5').get('scores'), }, measures=['P_10', 'ndcg', 'bpref', 'map'], xlabel='Reproduced run (wcr04)', ylabel='Score') average_retrieval_performance(rpd_eval.run_a_orig_score, { 'tf_1': runs_rpd.get('rpd_wcr0405_tf_1').get('scores'), 'tf_2': runs_rpd.get('rpd_wcr0405_tf_2').get('scores'), 'tf_3': runs_rpd.get('rpd_wcr0405_tf_3').get('scores'), 'tf_4': runs_rpd.get('rpd_wcr0405_tf_4').get('scores'), 'tf_5': runs_rpd.get('rpd_wcr0405_tf_5').get('scores'), }, measures=['P_10', 'ndcg', 'bpref', 'map'], xlabel='Reproduced run (wcr0405)', ylabel='Score')Kendall's tau Union (KTU) across different cut-offsThe following code snippet compares the ordering of documents for the reproduced runs across different cut-off ranks.cutoffs = [1000, 100, 50, 20, 10, 5] # BASELINE for run_name, info in zip(list(runs_rpd.keys())[::2], list(runs_rpd.values())[::2]): rpd_eval = RpdEvaluator(qrel_orig_path=QREL, run_b_orig_path=ORIG_B, run_a_orig_path=ORIG_A, run_b_rep_path=None, run_a_rep_path=None) rpd_eval.trim() rpd_eval.evaluate() with open(info.get('path')) as run_file: info['run'] = pytrec_eval.parse_run(run_file) for cutoff in cutoffs: rpd_eval.trim(cutoff) rpd_eval.trim(cutoff, info['run']) info['ktu_' + str(cutoff)] = arp(rpd_eval.ktau_union(info['run'])['baseline']) df_content = {} for run_name, info in zip(list(runs_rpd.keys())[::2], list(runs_rpd.values())[::2]): df_content[run_name] = [info.get('ktu_' + str(cutoff)) for cutoff in cutoffs[::-1]] plt.figure() ax = pd.DataFrame(data=df_content, index=[str(cutoff) for cutoff in cutoffs[::-1]]).plot(style='o-', figsize=(10, 6)) ax.set_xlabel('Cut-off values') ax.set_ylabel(r"Kendall's $\tau$") plt.show() # ADVANCED for run_name, info in zip(list(runs_rpd.keys())[1::2], list(runs_rpd.values())[1::2]): rpd_eval = RpdEvaluator(qrel_orig_path=QREL, run_b_orig_path=ORIG_B, run_a_orig_path=ORIG_A, run_b_rep_path=None, run_a_rep_path=None) rpd_eval.trim() rpd_eval.evaluate() with open(info.get('path')) as run_file: info['run'] = pytrec_eval.parse_run(run_file) for cutoff in cutoffs: rpd_eval.trim(cutoff) rpd_eval.trim(cutoff, info['run']) # scores = rpl_eval.evaluate(info['run']) info['ktu_' + str(cutoff)] = arp(rpd_eval.ktau_union(info['run'])['baseline']) df_content = {} for run_name, info in zip(list(runs_rpd.keys())[1::2], list(runs_rpd.values())[1::2]): df_content[run_name] = [info.get('ktu_' + str(cutoff)) for cutoff in cutoffs[::-1]] plt.figure() ax = pd.DataFrame(data=df_content, index=[str(cutoff) for cutoff in cutoffs[::-1]]).plot(style='o-', figsize=(10, 6)) ax.set_xlabel('Cut-off values') ax.set_ylabel(r"Kendall's $\tau$") plt.show()Root-Mean-Square-Error (RMSE) across different cut-offsThe following code snippet compares the reproduced runs at the level of effectiveness by determining the RMSE across different cut-off ranks.rpd_eval = RpdEvaluator(qrel_orig_path=QREL, run_b_orig_path=ORIG_B, run_a_orig_path=ORIG_A, run_b_rep_path=None, run_a_rep_path=None) rpd_eval.trim() rpd_eval.evaluate() for run_name, info in runs_rpd.items(): with open(info.get('path')) as run_file: info['run'] = pytrec_eval.parse_run(run_file) trim(info['run']) info['scores'] = rpd_eval.evaluate(info['run']) info['rmse'] = rpd_eval.rmse(run_b_score=info['scores']) baseline_runs = ['rpd_wcr04_tf_1', 'rpd_wcr04_tf_2', 'rpd_wcr04_tf_3', 'rpd_wcr04_tf_4', 'rpd_wcr04_tf_5'] advanced_runs = ['rpd_wcr0405_tf_1', 'rpd_wcr0405_tf_2', 'rpd_wcr0405_tf_3', 'rpd_wcr0405_tf_4', 'rpd_wcr0405_tf_5'] cutoffs = ['5', '10', '15', '20', '30', '100', '200', '500', '1000'] df_content = {} for run_name in baseline_runs: df_content[run_name] = [runs_rpd[run_name]['rmse']['baseline']['ndcg_cut_' + co] for co in cutoffs] df = pd.DataFrame(df_content, index=cutoffs) plt.figure() ax = df.plot.line(style='o-', figsize=(10, 6)) ax.set_xlabel('Cut-off values') ax.set_ylabel('RMSE') plt.show() df_content = {} for run_name in advanced_runs: df_content[run_name] = [runs_rpd[run_name]['rmse']['baseline']['ndcg_cut_' + co] for co in cutoffs] df = pd.DataFrame(df_content, index=cutoffs) plt.figure() ax = df.plot.line(style='o-', figsize=(10, 6)) ax.set_xlabel('Cut-off values') ax.set_ylabel('RMSE') plt.show()Exploring the space of reproducibility at the level of overall effectsThe following code snippet plots the Delta Relative Improvement (DRI) against the Effect Ratio (ER). Having runs with different parametrizations at hand, we can compare them in the cartesian plane. As a rule of thumb, we can say the closer a point to (ER 1, DRI 0), the better the reproduction.rpd_eval = RpdEvaluator(qrel_orig_path=QREL, run_b_orig_path=ORIG_B, run_a_orig_path=ORIG_A, run_b_rep_path=None, run_a_rep_path=None) rpd_eval.trim() rpd_eval.evaluate() for run_name, info in runs_rpd.items(): with open(info.get('path')) as run_file: info['run'] = pytrec_eval.parse_run(run_file) trim(info['run']) info['scores'] = rpd_eval.evaluate(info['run']) dri_er = { 'wcr_tf_1': { 'er': rpd_eval.er(runs_rpd['rpd_wcr04_tf_1']['scores'], runs_rpd['rpd_wcr0405_tf_1']['scores']), 'dri': rpd_eval.dri(runs_rpd['rpd_wcr04_tf_1']['scores'], runs_rpd['rpd_wcr0405_tf_1']['scores']) }, 'wcr_tf_2': { 'er': rpd_eval.er(runs_rpd['rpd_wcr04_tf_2']['scores'], runs_rpd['rpd_wcr0405_tf_2']['scores']), 'dri': rpd_eval.dri(runs_rpd['rpd_wcr04_tf_2']['scores'], runs_rpd['rpd_wcr0405_tf_2']['scores']) }, 'wcr_tf_3': { 'er': rpd_eval.er(runs_rpd['rpd_wcr04_tf_3']['scores'], runs_rpd['rpd_wcr0405_tf_3']['scores']), 'dri': rpd_eval.dri(runs_rpd['rpd_wcr04_tf_3']['scores'], runs_rpd['rpd_wcr0405_tf_3']['scores']) }, 'wcr_tf_4': { 'er': rpd_eval.er(runs_rpd['rpd_wcr04_tf_4']['scores'], runs_rpd['rpd_wcr0405_tf_4']['scores']), 'dri': rpd_eval.dri(runs_rpd['rpd_wcr04_tf_4']['scores'], runs_rpd['rpd_wcr0405_tf_4']['scores']) }, 'wcr_tf_5': { 'er': rpd_eval.er(runs_rpd['rpd_wcr04_tf_5']['scores'], runs_rpd['rpd_wcr0405_tf_5']['scores']), 'dri': rpd_eval.dri(runs_rpd['rpd_wcr04_tf_5']['scores'], runs_rpd['rpd_wcr0405_tf_5']['scores']) }, } measures = ['P_10', 'map', 'ndcg'] marker_color = [('o', 'b'), ('^', 'g'), ('v', 'r')] fig, ax1 = plt.subplots(figsize=(10, 10)) ax1.set_xlabel('Effect Ratio (ER)') ax1.set_ylabel(u'Delta Relative Improvement (ΔRI)') for measure, mk in zip(measures, marker_color): ax1.plot([dri_er[r]['er'][measure] for r in dri_er.keys()], [dri_er[r]['dri'][measure] for r in dri_er.keys()], marker=mk[0], color=mk[1], linestyle='None', label=measure) ax1.tick_params(axis='y', labelcolor='k') fig.tight_layout() plt.axhline(0, color='grey') plt.axvline(1, color='grey') plt.legend() plt.title('Reproducibility') for m in measures: for r in dri_er.keys(): plt.text(x = dri_er[r]['er'][m], y = dri_er[r]['dri'][m], s = r) plt.show()Evaluating ReplicabilityThe following code snippet instantiates a replicability evaluator `RplEvaluator` and determines the Effect Ratio (ER), the Delta Relative Improvement (DRI) and the p-values of the unpaired t-test.rpl_eval = RplEvaluator(qrel_orig_path=QREL, run_b_orig_path=ORIG_B, run_a_orig_path=ORIG_A, run_b_rep_path=RPL_B, run_a_rep_path=RPL_A, qrel_rpl_path=QREL_RPL) rpl_eval.trim() rpl_eval.evaluate() # ER print("Effect ratio (ER)") print('------------------------------------------------------------------') er = rpl_eval.er() for measure, value in er.items(): print_simple_line(measure, 'ER', value) # DRI print("Delta Relative Improvement (DRI)") print('------------------------------------------------------------------') dri = rpl_eval.dri() for measure, value in dri.items(): print_simple_line(measure, 'DRI', value) # ttest pvals = rpl_eval.ttest() print("Two-tailed unpaired t-test (p-value)") print('------------------------------------------------------------------') for measure, value in pvals.get('baseline').items(): print_base_adv(measure, 'PVAL', value, pvals.get('advanced').get(measure))Exploring the space of replicability at the level of overall effectsThe following code snippet plots the Delta Relative Improvement (DRI) against the Effect Ratio (ER). Having runs with different parametrizations at hand, we can compare them in the cartesian plane. As a rule of thumb, we can say the closer a point to (ER 1, DRI 0), the better the replication.rpl_eval = RplEvaluator(qrel_orig_path=QREL, run_b_orig_path=ORIG_B, run_a_orig_path=ORIG_A, run_b_rep_path=None, run_a_rep_path=None, qrel_rpl_path=QREL_RPL) rpl_eval.trim() rpl_eval.evaluate() for run_name, info in runs_rpl.items(): with open(info.get('path')) as run_file: info['run'] = pytrec_eval.parse_run(run_file) trim(info['run']) info['scores'] = rpl_eval.evaluate(info['run']) dri_er = { 'wcr_tf_1': { 'er': rpl_eval.er(runs_rpl['rpl_wcr04_tf_1']['scores'], runs_rpl['rpl_wcr0405_tf_1']['scores']), 'dri': rpl_eval.dri(runs_rpl['rpl_wcr04_tf_1']['scores'], runs_rpl['rpl_wcr0405_tf_1']['scores']) }, 'wcr_tf_2': { 'er': rpl_eval.er(runs_rpl['rpl_wcr04_tf_2']['scores'], runs_rpl['rpl_wcr0405_tf_2']['scores']), 'dri': rpl_eval.dri(runs_rpl['rpl_wcr04_tf_2']['scores'], runs_rpl['rpl_wcr0405_tf_2']['scores']) }, 'wcr_tf_3': { 'er': rpl_eval.er(runs_rpl['rpl_wcr04_tf_3']['scores'], runs_rpl['rpl_wcr0405_tf_3']['scores']), 'dri': rpl_eval.dri(runs_rpl['rpl_wcr04_tf_3']['scores'], runs_rpl['rpl_wcr0405_tf_3']['scores']) }, 'wcr_tf_4': { 'er': rpl_eval.er(runs_rpl['rpl_wcr04_tf_4']['scores'], runs_rpl['rpl_wcr0405_tf_4']['scores']), 'dri': rpl_eval.dri(runs_rpl['rpl_wcr04_tf_4']['scores'], runs_rpl['rpl_wcr0405_tf_4']['scores']) }, 'wcr_tf_5': { 'er': rpl_eval.er(runs_rpl['rpl_wcr04_tf_5']['scores'], runs_rpl['rpl_wcr0405_tf_5']['scores']), 'dri': rpl_eval.dri(runs_rpl['rpl_wcr04_tf_5']['scores'], runs_rpl['rpl_wcr0405_tf_5']['scores']) }, } measures = ['P_10', 'map', 'ndcg'] marker_color = [('o', 'b'), ('^', 'g'), ('v', 'r')] fig, ax1 = plt.subplots(figsize=(10, 10)) ax1.set_xlabel('Effect Ratio (ER)') ax1.set_ylabel(u'Delta Relative Improvement (ΔRI)') for measure, mk in zip(measures, marker_color): ax1.plot([dri_er[r]['er'][measure] for r in dri_er.keys()], [dri_er[r]['dri'][measure] for r in dri_er.keys()], marker=mk[0], color=mk[1], linestyle='None', label=measure) ax1.tick_params(axis='y', labelcolor='k') fig.tight_layout() plt.axhline(0, color='grey') plt.axvline(1, color='grey') plt.legend() plt.title('Replicability') for m in measures: for r in dri_er.keys(): plt.text(x = dri_er[r]['er'][m], y = dri_er[r]['dri'][m], s = r) plt.show()IntroductionNotebook to analyze the **False Negatives** Results for each model in the context of traceability between **test cases** and **bug reports**. Load Libraries and Datasetsfrom mod_finder_util import mod_finder_util mod_finder_util.add_modules_origin_search_path() import pandas as pd import numpy as np import matplotlib.pyplot as plt from matplotlib_venn import venn3 from modules.models_runner import tc_br_strat_runner from modules.utils import firefox_dataset_p2 as fd from modules.utils import aux_functions from modules.utils import tokenizers as tok import warnings; warnings.simplefilter('ignore')Run All Models Volunteers Only Strategyvol_strat_runner = tc_br_strat_runner.TC_BR_Vol_Strat_Runner() vol_strat_runner.execute() lsi_model_4 = vol_strat_runner.get_lsi_model() lda_model_4 = vol_strat_runner.get_lda_model() bm25_model_4 = vol_strat_runner.get_bm25_model() w2v_model_4 = vol_strat_runner.get_word2vec_model() oracle = vol_strat_runner.get_oracle() evaluator_4 = vol_strat_runner.get_evaluator() evals_df_4 = vol_strat_runner.get_evals_df()OracleVolunteers.shape: (195, 91) TestCases.shape: (195, 12) SelectedBugReports.shape: (91, 18) Running LSI Model ------ Running LDA Model ----- Running BM25 Model ----- Running W2V Model ------ Evaluating LSI Model ----- Evaluating LDA Model ----- Evaluating BM25 Model ----- Evaluating WORDVECTOR Model -----Load Datasetsbugreports = fd.Datasets.read_selected_bugreports_df() testcases = fd.Datasets.read_testcases_df()SelectedBugReports.shape: (91, 18) TestCases.shape: (195, 12)Tokenizationtokenizer = tok.PorterStemmerBased_Tokenizer() bugreports['tokens'] = bugreports.apply(lambda row : tokenizer.__call__(row['br_desc']), axis=1) testcases['tokens'] = testcases.apply(lambda row : tokenizer.__call__(row['tc_desc']), axis=1)Resultsevals_df_4.head()Grouping Results by Model Min Recallgroup = evals_df_4.groupby('model').perc_recall.min() print(group) print() bm25_min_recall = group[group.index == 'bm25'].values[0] lsi_min_recall = group[group.index == 'lsi'].values[0] lda_min_recall = group[group.index == 'lda'].values[0] wv_min_recall = group[group.index == 'wordvector'].values[0] bm25_fn_set = aux_functions.get_false_negatives(oracle, aux_functions.get_trace_links_df(evaluations_df=evals_df_4, model='bm25', perc_recall=bm25_min_recall)) lsi_fn_set = aux_functions.get_false_negatives(oracle, aux_functions.get_trace_links_df(evaluations_df=evals_df_4, model='lsi', perc_recall=lsi_min_recall)) lda_fn_set = aux_functions.get_false_negatives(oracle, aux_functions.get_trace_links_df(evaluations_df=evals_df_4, model='lda', perc_recall=lda_min_recall)) wv_fn_set = aux_functions.get_false_negatives(oracle, aux_functions.get_trace_links_df(evaluations_df=evals_df_4, model='wordvector', perc_recall=wv_min_recall)) venn3([bm25_fn_set, lsi_fn_set, lda_fn_set], ['BM25','LSI','LDA']) plt.title('Comparison False Negatives by Model (BM25, LSI, LDA) - Min Recall') plt.show() venn3([bm25_fn_set, wv_fn_set, lda_fn_set], ['BM25','WV','LDA']) plt.title('Comparison False Negatives by Model (BM25, WV, LDA) - Min Recall') plt.show() venn3([lsi_fn_set, wv_fn_set, lda_fn_set], ['LSI','WV','LDA']) plt.title('Comparison False Negatives by Model (LSI, WV, LDA) - Min Recall') plt.show() venn3([lsi_fn_set, wv_fn_set, bm25_fn_set], ['LSI','WV','BM25']) plt.title('Comparison False Negatives by Model (LSI, WV, BM25) - Min Recall') plt.show()model bm25 14.20 lda 2.59 lsi 1.88 wordvector 3.61 Name: perc_recall, dtype: float64Exclusive Casesprint("BM25 Exclusive FN:") bm25_exc_set = bm25_fn_set - lsi_fn_set - lda_fn_set - wv_fn_set display(bm25_exc_set) print("\n\nLSI Exclusive FN:") lsi_exc_set = lsi_fn_set - bm25_fn_set - lda_fn_set - wv_fn_set display(lsi_exc_set) print("\n\nLDA Exclusive FN:") lda_exc_set = lda_fn_set - lsi_fn_set - bm25_fn_set - wv_fn_set display(lda_exc_set) print("\n\nWV Exclusive FN:") wv_exc_set = wv_fn_set - lda_fn_set - lsi_fn_set - bm25_fn_set display(wv_exc_set)BM25 Exclusive FN:Max Recallgroup = evals_df_4.groupby('model').perc_recall.max() print(group) print() bm25_max_recall = group[group.index == 'bm25'].values[0] lsi_max_recall = group[group.index == 'lsi'].values[0] lda_max_recall = group[group.index == 'lda'].values[0] wv_max_recall = group[group.index == 'wordvector'].values[0] bm25_fn_set = aux_functions.get_false_negatives(oracle, aux_functions.get_trace_links_df(evaluations_df=evals_df_4, model='bm25', perc_recall=bm25_max_recall)) lsi_fn_set = aux_functions.get_false_negatives(oracle, aux_functions.get_trace_links_df(evaluations_df=evals_df_4, model='lsi', perc_recall=lsi_max_recall)) lda_fn_set = aux_functions.get_false_negatives(oracle, aux_functions.get_trace_links_df(evaluations_df=evals_df_4, model='lda', perc_recall=lda_max_recall)) wv_fn_set = aux_functions.get_false_negatives(oracle, aux_functions.get_trace_links_df(evaluations_df=evals_df_4, model='wordvector', perc_recall=wv_max_recall)) venn3([bm25_fn_set, lsi_fn_set, lda_fn_set], ['BM25','LSI','LDA']) plt.title('Comparison False Negatives by Model (BM25, LSI, LDA) - Max Recall') plt.show() venn3([bm25_fn_set, wv_fn_set, lda_fn_set], ['BM25','WV','LDA']) plt.title('Comparison False Negatives by Model (Bm25, WV, LDA) - Max Recall') plt.show() venn3([lsi_fn_set, wv_fn_set, lda_fn_set], ['LSI','WV','LDA']) plt.title('Comparison False Negatives by Model (LSI, WV, LDA) - Max Recall') plt.show() venn3([lsi_fn_set, wv_fn_set, bm25_fn_set], ['LSI','WV','BM25']) plt.title('Comparison False Negatives by Model (LSI, WV, BM25) - Max Recall') plt.show()model bm25 32.54 lda 33.25 lsi 46.99 wordvector 12.62 Name: perc_recall, dtype: float64Exclusive Casesprint("BM25 Exclusive FN:") bm25_exc_set = bm25_fn_set - lsi_fn_set - lda_fn_set - wv_fn_set #display(bm25_exc_set) print("len(bm25_exc_set): {}".format(len(bm25_exc_set))) print("\n\nLSI Exclusive FN:") lsi_exc_set = lsi_fn_set - bm25_fn_set - lda_fn_set - wv_fn_set #display(lsi_exc_set) print("len(lsi_exc_set): {}".format(len(lsi_exc_set))) print("\n\nLDA Exclusive FN:") lda_exc_set = lda_fn_set - lsi_fn_set - bm25_fn_set - wv_fn_set #display(lda_exc_set) print("len(lda_exc_set): {}".format(len(lda_exc_set))) print("\n\nWV Exclusive FN:") wv_exc_set = wv_fn_set - lda_fn_set - lsi_fn_set - bm25_fn_set #display(wv_exc_set) print("len(wv_exc_set): {}".format(len(wv_exc_set)))BM25 Exclusive FN: len(bm25_exc_set): 6 LSI Exclusive FN: len(lsi_exc_set): 6 LDA Exclusive FN: len(lda_exc_set): 47 WV Exclusive FN: len(wv_exc_set): 200Word Cloudsaux_functions.create_wordcloud_tc_br(wv_exc_set, bugreports=bugreports, testcases=testcases, wc_tc_title="FN - Test Cases - Word Vector", wc_br_title="FN - Bug Reports - Word Vector") aux_functions.create_wordcloud_tc_br(bm25_exc_set, bugreports=bugreports, testcases=testcases, wc_tc_title="FN - Test Cases - BM25", wc_br_title="FN - Bug Reports - BM25") aux_functions.create_wordcloud_tc_br(lsi_exc_set, bugreports=bugreports, testcases=testcases, wc_tc_title="FN - Test Cases - LSI", wc_br_title="FN - Bug Reports - LSI") aux_functions.create_wordcloud_tc_br(lda_exc_set, bugreports=bugreports, testcases=testcases, wc_tc_title="FN - Test Cases - LDA", wc_br_title="FN - Bug Reports - LDA") print("LSI FN Amount: {}".format(len(lsi_fn_set))) print("LDA FN Amount: {}".format(len(lda_fn_set))) print("BM25 FN Amount: {}".format(len(bm25_fn_set))) print("WV FN Amount: {}".format(len(wv_fn_set)))LSI FN Amount: 521 LDA FN Amount: 705 BM25 FN Amount: 694 WV FN Amount: 974Detailing Features for Exclusive Setsaux_functions.detail_features_tc_br(bm25_exc_set, bugreports=bugreports, testcases=testcases) aux_functions.detail_features_tc_br(lda_exc_set, bugreports=bugreports, testcases=testcases) aux_functions.detail_features_tc_br(lsi_exc_set, bugreports=bugreports, testcases=testcases) aux_functions.detail_features_tc_br(wv_exc_set, bugreports=bugreports, testcases=testcases)Min Precisiongroup = evals_df_4.groupby('model').perc_precision.min() print(group) print() bm25_min_prec = group[group.index == 'bm25'].values[0] lsi_min_prec = group[group.index == 'lsi'].values[0] lda_min_prec = group[group.index == 'lda'].values[0] wv_min_prec = group[group.index == 'wordvector'].values[0] bm25_fn_set = aux_functions.get_false_negatives(oracle, aux_functions.get_trace_links_df(evaluations_df=evals_df_4, model='bm25', perc_precision=bm25_min_prec)) lsi_fn_set = aux_functions.get_false_negatives(oracle, aux_functions.get_trace_links_df(evaluations_df=evals_df_4, model='lsi', perc_precision=lsi_min_prec)) lda_fn_set = aux_functions.get_false_negatives(oracle, aux_functions.get_trace_links_df(evaluations_df=evals_df_4, model='lda', perc_precision=lda_min_prec)) wv_fn_set = aux_functions.get_false_negatives(oracle, aux_functions.get_trace_links_df(evaluations_df=evals_df_4, model='wordvector', perc_precision=wv_min_prec)) venn3([bm25_fn_set, lsi_fn_set, lda_fn_set], ['BM25','LSI','LDA']) plt.title('Comparison False Negatives by Model (BM25, LSI, LDA) - Min Precision') plt.show() venn3([bm25_fn_set, wv_fn_set, lda_fn_set], ['BM25','WV','LDA']) plt.title('Comparison False Negatives by Model (BM25, WV, LDA) - Min Precision') plt.show() venn3([lsi_fn_set, wv_fn_set, lda_fn_set], ['LSI','WV','LDA']) plt.title('Comparison False Negatives by Model (LSI, WV, LDA) - Min Precision') plt.show() venn3([lsi_fn_set, wv_fn_set, bm25_fn_set], ['LSI','WV','BM25']) plt.title('Comparison False Negatives by Model (LSI, WV, BM25) - Min Precision') plt.show()model bm25 14.04 lda 6.94 lsi 9.07 wordvector 6.21 Name: perc_precision, dtype: float64Exclusive Casesprint("BM25 Exclusive FN:") bm25_exc_set = bm25_fn_set - lsi_fn_set - lda_fn_set - wv_fn_set display(bm25_exc_set) print("\n\nLSI Exclusive FN:") lsi_exc_set = lsi_fn_set - bm25_fn_set - lda_fn_set - wv_fn_set display(lsi_exc_set) print("\n\nLDA Exclusive FN:") lda_exc_set = lda_fn_set - lsi_fn_set - bm25_fn_set - wv_fn_set display(lda_exc_set) print("\n\nWV Exclusive FN:") wv_exc_set = wv_fn_set - lda_fn_set - lsi_fn_set - bm25_fn_set display(wv_exc_set) print("LSI FN Amount: {}".format(len(lsi_fn_set))) print("LDA FN Amount: {}".format(len(lda_fn_set))) print("BM25 FN Amount: {}".format(len(bm25_fn_set))) print("WV FN Amount: {}".format(len(wv_fn_set)))LSI FN Amount: 1179 LDA FN Amount: 1158 BM25 FN Amount: 694 WV FN Amount: 1092Max Precisiongroup = evals_df_4.groupby('model').perc_precision.max() print(group) print() bm25_max_prec = group[group.index == 'bm25'].values[0] lsi_max_prec = group[group.index == 'lsi'].values[0] lda_max_prec = group[group.index == 'lda'].values[0] wv_max_prec = group[group.index == 'wordvector'].values[0] bm25_fn_set = aux_functions.get_false_negatives(oracle, aux_functions.get_trace_links_df(evaluations_df=evals_df_4, model='bm25', perc_precision=bm25_max_prec)) lsi_fn_set = aux_functions.get_false_negatives(oracle, aux_functions.get_trace_links_df(evaluations_df=evals_df_4, model='lsi', perc_precision=lsi_max_prec)) lda_fn_set = aux_functions.get_false_negatives(oracle, aux_functions.get_trace_links_df(evaluations_df=evals_df_4, model='lda', perc_precision=lda_max_prec)) wv_fn_set = aux_functions.get_false_negatives(oracle, aux_functions.get_trace_links_df(evaluations_df=evals_df_4, model='wordvector', perc_precision=wv_max_prec)) venn3([bm25_fn_set, lsi_fn_set, lda_fn_set], ['BM25','LSI','LDA']) plt.title('Comparison False Negatives by Model (BM25, LSI, LDA) - Max Precision') plt.show() venn3([bm25_fn_set, wv_fn_set, lda_fn_set], ['BM25','WV','LDA']) plt.title('Comparison False Negatives by Model (BM25, WV, LDA) - Max Precision') plt.show() venn3([lsi_fn_set, wv_fn_set, lda_fn_set], ['LSI','WV','LDA']) plt.title('Comparison False Negatives by Model (LSI,WV,LDA) - Max Precision') plt.show() venn3([lsi_fn_set, wv_fn_set, bm25_fn_set], ['LSI','WV','BM25']) plt.title('Comparison False Negatives by Model (LSI, WV, BM25) - Max Precision') plt.show()model bm25 21.21 lda 20.03 lsi 33.80 wordvector 6.85 Name: perc_precision, dtype: float64Exclusive Casesprint("BM25 Exclusive FN:") bm25_exc_set = bm25_fn_set - lsi_fn_set - lda_fn_set - wv_fn_set display(bm25_exc_set) print("\n\nLSI Exclusive FN:") lsi_exc_set = lsi_fn_set - bm25_fn_set - lda_fn_set - wv_fn_set display(lsi_exc_set) print("\n\nLDA Exclusive FN:") lda_exc_set = lda_fn_set - lsi_fn_set - bm25_fn_set - wv_fn_set display(lda_exc_set) print("\n\nWV Exclusive FN:") wv_exc_set = wv_fn_set - lda_fn_set - lsi_fn_set - bm25_fn_set display(wv_exc_set) print("LSI FN Amount: {}".format(len(lsi_fn_set))) print("LDA FN Amount: {}".format(len(lda_fn_set))) print("BM25 FN Amount: {}".format(len(bm25_fn_set))) print("WV FN Amount: {}".format(len(wv_fn_set)))LSI FN Amount: 905 LDA FN Amount: 1027 BM25 FN Amount: 1012 WV FN Amount: 1145Plotting ExamplesYourNameHereDateHere Defining Mathematical FunctionsThe following code shows features of numpy for rapidly implementing discrete approximations of mathematical functions.def gaussian(x): """gaussian(x) Normalized Gaussian function, also known as a normal distribution, or a bell curve. """ return (1/np.sqrt(2*np.pi))*np.exp(-x**2/2) def lorentzian(x): """lorentzian(x) Normalized Lorentzian function, also known as a Cauchy distribution, or a resonance linewidth. """ return (1/np.pi)/(1 + x**2) def sigmoid(x): """sigmoid(x) Sigmoid function, used to simulate neuron activation in neural networks. """ return 1/(1 + np.exp(-x)) def sinc(x): """sinc(x) Sinc function, appears in single-slit diffraction, and is Fourier transform of a tophat function. """ # Avoids division by zero by defaulting to the value 1 return np.divide(np.sin(x), x, out=np.ones_like(x), where=(x!=0)) def raisedcosine(x): """raisedcosine(x) Raised cosine distribution, has compact support but similar to bell curve. """ # Efficient conditional evaluation across array return np.where(np.abs(x) < np.pi, (1 + np.cos(x))/(2*np.pi), 0)Note that the python function definitions are type agnostic: the parameter `x` could be any type, and the code will run provided that the expressions inside the function are compatible with that type. This type-agnosticism of python is often called "duck typing", since if it walks like a duck, and quacks like a duck, python will think it is a duck. For the cases above, functions that rely only on array broadcasting and vectorization will have the operations will work correctly when `x` is a float, or when `x` in an array as floats. This is one of the main reasons that broadcasting and vectorization are given no special syntax in numpy.[f(1.24) for f in [gaussian, lorentzian, sigmoid]] # evaluation on a float [f(np.linspace(-1,1,3)) for f in [gaussian, lorentzian, sigmoid]] # evaluation on an arrayThe remaining two functions use features specific to numpy arrays, so will autoconvert a float input to an array output.[f(1.24) for f in [sinc, raisedcosine]] # numpy specialized evaluation on a floatMake sure you understand the concepts of broadcasting and vectorization for arrays. Explain those concepts in your own words below, and use code examples to illustrate your descriptions.# Broadcasting explanation and example** # Broadcasting is the process by which numpy computes operations with different sized arrays. x = np.array([1,2,3]) y = np.ones(shape=[3,3]) y = x+y print(y) # Vectorization explanation and example** # Vectorization is the process by which functions can be evaluated directly upon arrays rather than looping through np.vectorize(gaussian) z = gaussian(y) print(z)[[2. 3. 4.] [2. 3. 4.] [2. 3. 4.]] [[0.05399097 0.00443185 0.00013383] [0.05399097 0.00443185 0.00013383] [0.05399097 0.00443185 0.00013383]]Plotting Functions Lazy Plotting with DataframesFor quick plots, it is convenient to neatly package all function range data together with their common domain data. Pandas dataframes are ideally suited for such data organization. Dataframes also provide a simple plot interface to quickly generate plots.def make_plots(a=-10,b=10,n=1000): # Generate n domain points equally spaced to cover the interval [a,b] x = np.linspace(a,b,n) # Collect function range points corresponding to domain points fs = pd.DataFrame({"x":x ,"gaussian":gaussian(x) ,"lorentzian":lorentzian(x) ,"sigmoid":sigmoid(x) ,"sinc":sinc(x) ,"raisedcosine":raisedcosine(x) }) # Plot all range points with respect to same domain points x fs.plot(x='x') make_plots() make_plots(a=-100,b=100) make_plots(n=10)Plotting with Matplotlib Use `matplotlib.pyplot` to generate a plot comparing the Gaussian function, Lorentzian function, and raised cosine function in more detail. Plot the Gaussian in blue, the Lorentzian in red, and the raised cosine in green. Make the Gaussian solid, the Lorentzian dashed, and the raised cosine dotted. Create a legend that labels each curve. Resize the tickmarks to be double the default size. Label the x axis as "x" and double the default font size. Create a plot title of "Distribution comparison". Annotate the plot with a point on each curve at $x=\pi$, with an arrow pointing to one of the points with a label of $\pi$.After you handle the 1d case, create separate 2d plots of each function by interpreting the "x" value of each function as the radius away from the origin in a 2d plane (so that each 2d function is symmetric under rotations around the origin in the plane).The following resources may be helpful: - https://www.labri.fr/perso/nrougier/teaching/matplotlib/ - http://matplotlib.org/gallery.htmldef make_plots(a=-10,b=10,n=1000): # Generate n domain points equally spaced to cover the interval [a,b] x = np.linspace(a,b,n) # Collect function range points corresponding to domain points fs = pd.DataFrame({"x":x ,"gaussian":gaussian(x) ,"lorentzian":lorentzian(x) ,"sigmoid":sigmoid(x) ,"sinc":sinc(x) ,"raisedcosine":raisedcosine(x) }) # Plot all range points with respect to same domain points x plt.plot('x','gaussian',color='blue',data=fs,label='gaussian') plt.plot('x','lorentzian',color='red',linestyle='dashed',data=fs,label='lorentzian') plt.plot('x','raisedcosine',color='green',linestyle='dotted',data=fs,label='raisedcosine') plt.legend(loc='upper left') plt.tick_params(labelsize='large') plt.Axes. plt.show() make_plots()Plotting with Seaborn Seaborn is a statistical plotting extension to Matplotlib that uses nicer visual defaults for common data-processing tasks. Consider the following code fragment that generates a set of random points sampled from a normalized Gaussian distribution and generates a histogram of the data. The blue curve is the estimated continuous distribution function that is consistent with the sampled data. The red curve is the Gaussian function defined above. Show by increasing the parameter `n` that collecting more samples improves the estimate to converge to the actual Gaussian being used to generate the data. How many samples are required before the distribution convincingly converges?The following documentation may be helpful: - https://seaborn.pydata.org/tutorial/distributions.htmldef gaussian_sample(n): # Generate n random normally-distributed floats data = np.random.randn(n) # Create domain of points for plotting x = np.linspace(-4,4,100) # Plot plt.figure(1) plt.title("Gaussian sampling") plt.xlabel("x") plt.plot(x,gaussian(x),label="gaussian",color="red") # Use seaborn to generate histogram and estimate distribution sb.distplot(data,label="data",color="blue",kde_kws={"label":"estimate"}) # Label curves plt.legend() gaussian_sample(10)/ext/anaconda5/lib/python3.6/site-packages/scipy/stats/stats.py:1713: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result. return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumvalRepeat the above analysis for the Lorentzian distribution (see `numpy.random.standard_cauchy` for sampling Lorentzian-distributed random numbers). How many samples do you need to show convergence to the distribution? Speculate why there is a difference between the Gaussian and Lorentzian cases. (Hint: This problem is tricky, so think about it carefully and describe ways to get around the issues you find.)# Code goes here用 Python 登录网页好了, 对网页结构和 HTML 有了一些基本认识以后, 我们就能用 Python 来爬取这个网页的一些基本信息. 首先要做的, 是使用 Python 来登录这个网页, 并打印出这个网页 HTML 的 source code. 注意, 因为网页中存在中文, 为了正常显示中文, read() 完以后, 我们要对读出来的文字进行转换, decode() 成可以正常显示中文的形式.print 出来就是下面这样啦. 这就证明了我们能够成功读取这个网页的所有信息了. 但我们还没有对网页的信息进行汇总和利用. 我们发现, 想要提取一些形式的信息, 合理的利用 tag 的名字十分重要.from urllib.request import urlopen # if has Chinese, apply decode() html = urlopen("https://mofanpy.com/static/scraping/basic-structure.html").read().decode('utf-8') print(html) Scraping tutorial 1 | 莫烦Python

爬虫测试1

这是一个在 莫烦Python 爬虫教程 中的简单测试.

匹配网页内容所以这里我们使用 Python 的正则表达式 RegEx 进行匹配文字, 筛选信息的工作. 我有一个很不错的正则表达式的教程, 如果是初级的网页匹配, 我们使用正则完全就可以了, 高级一点或者比较繁琐的匹配, 我还是推荐使用 BeautifulSoup. 不急不急, 我知道你想偷懒, 我之后马上就会教 beautiful soup 了. 但是现在我们还是使用正则来做几个简单的例子, 让你熟悉一下套路.如果我们想用代码找到这个网页的 title, 我们就能这样写. 选好要使用的 tag 名称 . 使用正则匹配.import re res = re.findall(r"(.+?)", html) print("\nPage title is: ", res[0])Page title is: Scraping tutorial 1 | 莫烦Python如果想要找到中间的那个段落 , 我们使用下面方法, 因为这个段落在 HTML 中还夹杂着 tab, new line, 所以我们给一个 flags=re.DOTALL 来对这些 tab, new line 不敏感.res = re.findall(r"

(.*?)

", html, flags=re.DOTALL) # re.DOTALL if multi line print("\nPage paragraph is: ", res[0])Page paragraph is: 这是一个在 莫烦Python 爬虫教程 中的简单测试.最后一个练习是找一找所有的链接, 这个比较有用, 有时候你想找到网页里的链接, 然后下载一些内容到电脑里, 就靠这样的途径了.res = re.findall(r'href="(.*?)"', html) print("\nAll links: ", res)All links: ['{{ static_url }}/js/description/tab_icon.png', '/', '/tutorials/data-manipulation/scraping/']The Data Scientist’s Toolbox Tutorial Quite Practical and Far from any Theoretical Concepts last update: 11/13/2018 ---------------------------------------------------------------------Fork, Run and Follow this kernel on GitHub:> [ GitHub](https://github.com/mjbahmani/10-steps-to-become-a-data-scientist)------------------------------------------------------------------------------------------------------------- **I hope you find this kernel helpful and some UPVOTES would be very much appreciated** ----------- **Notebook Content**1. [Introduction](1) 1. [Import](2) 1. [Version](3) 1. [setup](4)1. [Python](5) 1. [Python Syntax compared to other programming languages](6) 1. [Basics](2) 1. [Functions](3) 1. [Types and Sequences](4) 1. [More on Strings](5) 1. [Reading and Writing CSV files](6) 1. [Dates and Times](7) 1. [Objects and map()](8) 1. [Lambda and List Comprehensions](9) 1. [OOP](10)1. [Numpy](12) 1. [Creating Arrays](13) 1. [Combining Arrays](14) 1. [Operations](15) 1. [Math Functions](16) 1. [Indexing / Slicing](17) 1. [Copying Data](18) 1. [Iterating Over Arrays](19) 1. [The Series Data Structure](20) 1. [Querying a Series](21)1. [Pandas](22) 1. [The DataFrame Data Structure](22) 1. [Dataframe Indexing and Loading](23) 1. [Missing values](24) 1. [Merging Dataframes](25) 1. [Making Code Pandorable](26) 1. [Group by](27) 1. [Scales](28) 1. [Pivot Tables](29) 1. [Date Functionality](30) 1. [Distributions in Pandas](31) 1. [Hypothesis Testing](32)1. [Matplotlib](33) 1. [Scatterplots](34) 1. [Line Plots](35) 1. [Bar Charts](36) 1. [Histograms](37) 1. [Box and Whisker Plots](38) 1. [Heatmaps](39) 1. [Animations](40) 1. [Interactivity](41) 1. [DataFrame.plot](42)1. [seaborn](43) 1. [5-1 Seaborn Vs Matplotlib](43) 1. [5-2 10 Useful Python Data Visualization Libraries](43)1. [SKlearn](44) 1. [Introduction](45) 1. [Algorithms](46) 1. [Framework](47) 1. [Applications](48) 1. [Data](49) 1. [Supervised Learning: Classification](50) 1. [Separate training and testing sets](51) 1. [linear, binary classifier](52) 1. [Prediction](53) 1. [Back to the original three-class problem](54) 1. [Evaluating the classifier](55) 1. [Using the four flower attributes](56) 1. [Unsupervised Learning: Clustering](57) 1. [Supervised Learning: Regression](58)1. [Plotly](59) 1. [New to Plotly?](59)1. [CheatSheet](60)1. [Conclusion](61)1. [References](62) 1-Introduction In this kernel, we have a comprehensive tutorials for Five packages in python after that you can read my other kernels about machine learning [Go to top](top) 1-1 Importfrom plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score import matplotlib.pyplot as plt import plotly.graph_objs as go import scipy.stats as stats import plotly.plotly as py import seaborn as sns import pandas as pd import numpy as np import matplotlib import warnings import sklearn import scipy import json import sys import csv import os !pip install plotlyRequirement already satisfied: plotly in /home/vinc3/anaconda3/lib/python3.6/site-packages (3.4.1) Requirement already satisfied: requests in /home/vinc3/anaconda3/lib/python3.6/site-packages (from plotly) (2.18.4) Requirement already satisfied: nbformat>=4.2 in /home/vinc3/anaconda3/lib/python3.6/site-packages (from plotly) (4.4.0) Requirement already satisfied: six in /home/vinc3/anaconda3/lib/python3.6/site-packages (from plotly) (1.11.0) Requirement already satisfied: decorator>=4.0.6 in /home/vinc3/anaconda3/lib/python3.6/site-packages (from plotly) (4.3.0) Requirement already satisfied: pytz in /home/vinc3/anaconda3/lib/python3.6/site-packages (from plotly) (2018.4) Requirement already satisfied: retrying>=1.3.3 in /home/vinc3/anaconda3/lib/python3.6/site-packages (from plotly) (1.3.3) Requirement already satisfied: chardet<3.1.0,>=3.0.2 in /home/vinc3/anaconda3/lib/python3.6/site-packages (from requests->plotly) (3.0.4) Requirement already satisfied: idna<2.7,>=2.5 in /home/vinc[...]1-2 Versionprint('matplotlib: {}'.format(matplotlib.__version__)) print('sklearn: {}'.format(sklearn.__version__)) print('scipy: {}'.format(scipy.__version__)) print('seaborn: {}'.format(sns.__version__)) print('pandas: {}'.format(pd.__version__)) print('numpy: {}'.format(np.__version__)) print('Python: {}'.format(sys.version))matplotlib: 2.2.2 sklearn: 0.19.1 scipy: 1.1.0 seaborn: 0.8.1 pandas: 0.23.0 numpy: 1.14.3 Python: 3.6.5 |Anaconda, Inc.| (default, Apr 29 2018, 16:14:56) [GCC 7.2.0]1-3 Setupwarnings.filterwarnings('ignore') sns.set(color_codes=True) plt.style.available %matplotlib inline %precision 22-Python**Python** is a modern, robust, high level programming language. It is very easy to pick up even if you are completely new to programming.It is used for:1. web development (server-side),1. software development,1. mathematics,1. system scripting. 2-1 Python Syntax compared to other programming languages1. Python was designed to for readability, and has some similarities to the English language with influence from mathematics.1. Python uses new lines to complete a command, as opposed to other programming languages which often use semicolons or parentheses.1. Python relies on indentation, using whitespace, to define scope; such as the scope of loops, functions and classes. Other programming languages often use curly-brackets for this purpose. [Go to top](top) 2-2 Python: Basicsimport thisThe Zen of Python, by Beautiful is better than ugly. Explicit is better than implicit. Simple is better than complex. Complex is better than complicated. Flat is better than nested. Sparse is better than dense. Readability counts. Special cases aren't special enough to break the rules. Although practicality beats purity. Errors should never pass silently. Unless explicitly silenced. In the face of ambiguity, refuse the temptation to guess. There should be one-- and preferably only one --obvious way to do it. Although that way may not be obvious at first unless you're Dutch. Now is better than never. Although never is often better than *right* now. If the implementation is hard to explain, it's a bad idea. If the implementation is easy to explain, it may be a good idea. Namespaces are one honking great idea -- let's do more of those!2-2-1 VariablesA name that is used to denote something or a value is called a variable. In python, variables can be declared and values can be assigned to it as follows,x = 2 y = 5 xy = 'Hey'2-2-2 Operators | Symbol | Task Performed ||----|---|| + | Addition || - | Subtraction || / | division || % | mod || * | multiplication || // | floor division || ** | to the power of | Relational Operators| Symbol | Task Performed ||----|---|| == | True, if it is equal || != | True, if not equal to || < | less than || > | greater than || <= | less than or equal to || >= | greater than or equal to | Bitwise Operators| Symbol | Task Performed ||----|---|| & | Logical And || l | Logical OR || ^ | XOR || ~ | Negate || >> | Right shift || << | Left shift | [Go to top](top) 2-3 Python : Functions `add_numbers` is a function that takes two numbers and adds them together.def add_numbers(x, y): return x + y add_numbers(1, 2)`add_numbers` updated to take an optional 3rd parameter. Using `print` allows printing of multiple expressions within a single cell.def add_numbers(x,y,z=None): if (z==None): return x+y else: return x+y+z print(add_numbers(1, 2)) print(add_numbers(1, 2, 3))3 6`add_numbers` updated to take an optional flag parameter.def add_numbers(x, y, z=None, flag=False): if (flag): print('Flag is true!') if (z==None): return x + y else: return x + y + z print(add_numbers(1, 2, flag=True))Flag is true! 3Assign function `add_numbers` to variable `a`.def add_numbers(x,y): return x+y a = add_numbers a(1,2)2-4 Python : Types and Sequences Use `type` to return the object's type.type('This is a string') type(None) type(1) type(1.0) type(add_numbers)Tuples are an immutable data structure (cannot be altered).x = (1, 'a', 2, 'b') type(x)Lists are a mutable data structure.x = [1, 'a', 2, 'b'] type(x)Use `append` to append an object to a list.x.append(3.3) print(x)[1, 'a', 2, 'b', 3.3]This is an example of how to loop through each item in the list.for item in x: print(item)1 a 2 b 3.3Or using the indexing operator:i=0 while( i != len(x) ): print(x[i]) i = i + 11 a 2 b 3.3Use `+` to concatenate lists.[1,2] + [3,4]Use `*` to repeat lists.[1]*3Use the `in` operator to check if something is inside a list.1 in [1, 2, 3]Now let's look at strings. Use bracket notation to slice a string. [Go to top](top)x = 'This is a string' print(x[0]) #first character print(x[0:1]) #first character, but we have explicitly set the end character print(x[0:2]) #first two charactersT T ThThis will return the last element of the string.x[-1]This will return the slice starting from the 4th element from the end and stopping before the 2nd element from the end.x[-4:-2]This is a slice from the beginning of the string and stopping before the 3rd element.x[:3]And this is a slice starting from the 3rd element of the string and going all the way to the end.x[3:] firstname = 'MJ' lastname = 'Bahmani' print(firstname + ' ' + lastname) print(firstname*3) print('mj' in firstname) MJMJMJ False`split` returns a list of all the words in a string, or a list split on a specific character.firstname = 'Mr Dr Mj Bahmani'.split(' ')[0] # [0] selects the first element of the list lastname = 'Mr Dr Mj Bahmani'.split(' ')[-1] # [-1] selects the last element of the list print(firstname) print(lastname) 'MJ' + str(2)Dictionaries associate keys with values.x = {'': '', 'irmatlab': ''} x[''] # Retrieve a value by using the indexing operator x[''] = None x['']Iterate over all of the keys:for name in x: print(x[name])None Iterate over all of the values:for email in x.values(): print(email)None Iterate over all of the items in the list:for name, email in x.items(): print(name) print(email) None irmatlab You can unpack a sequence into different variables:x = ('MJ', 'Bahmani', '') fname, lname, email = x fname lname2-5 Python: More on Strings [Go to top](top)print('MJ' + str(2))MJ2Python has a built in method for convenient string formatting.sales_record = { 'price': 3.24, 'num_items': 4, 'person': 'MJ'} sales_statement = '{} bought {} item(s) at a price of {} each for a total of {}' print(sales_statement.format(sales_record['person'], sales_record['num_items'], sales_record['price'], sales_record['num_items']*sales_record['price']))MJ bought 4 item(s) at a price of 3.24 each for a total of 12.962-6 Python:Reading and Writing CSV files Let's import our datafile train.csv [Go to top](top)with open('train.csv') as csvfile: train = list(csv.DictReader(csvfile)) train[:1] # The first three dictionaries in our list.`csv.Dictreader` has read in each row of our csv file as a dictionary. `len` shows that our list is comprised of 234 dictionaries.len(train)`keys` gives us the column names of our csv.train[0].keys()How to do some math action on the data setsum(float(d['Fare']) for d in train) / len(train)Use `set` to return the unique values for the type of Sex in our dataset have.Sex = set(d['Sex'] for d in train) Sex2-7 Python: Dates and Timesimport datetime as dt import time as tm`time` returns the current time in seconds since the Epoch. (January 1st, 1970) [Go to top](top)tm.time()Convert the timestamp to datetime.dtnow = dt.datetime.fromtimestamp(tm.time()) dtnowHandy datetime attributes:dtnow.year, dtnow.month, dtnow.day, dtnow.hour, dtnow.minute, dtnow.second # get year, month, day, etc.from a datetime`timedelta` is a duration expressing the difference between two dates.delta = dt.timedelta(days = 100) # create a timedelta of 100 days delta`date.today` returns the current local date.today = dt.date.today() today - delta # the date 100 days ago today > today-delta # compare dates2-8 Python: Objects and map() An example of a class in python:class Person: department = 'School of Information' #a class variable def set_name(self, new_name): #a method self.name = new_name def set_location(self, new_location): self.location = new_location person = Person() person.set_name('') person.set_location('MI, Berlin, Germany') print('{} live in {} and works in the department {}'.format(person.name, person.location, person.department)) live in MI, Berlin, Germany and works in the department School of InformationHere's an example of mapping the `min` function between two lists.store1 = [10.00, 11.00, 12.34, 2.34] store2 = [9.00, 11.10, 12.34, 2.01] cheapest = map(min, store1, store2) cheapestNow let's iterate through the map object to see the values.for item in cheapest: print(item)9.0 11.0 12.34 2.012-9-Python : Lambda and List Comprehensions Here's an example of lambda that takes in three parameters and adds the first two.my_function = lambda a, b, c : a + b my_function(1, 2, 3)Let's iterate from 0 to 9 and return the even numbers. [Go to top](top)my_list = [] for number in range(0, 9): if number % 2 == 0: my_list.append(number) my_listNow the same thing but with list comprehension.my_list = [number for number in range(0,10) if number % 2 == 0] my_list2-10 OOP1. **Class** − A user-defined prototype for an object that defines a set of attributes that characterize any object of the class. The attributes are data members (class variables and instance variables) and methods, accessed via dot notation.1. **Class variable** − A variable that is shared by all instances of a class. Class variables are defined within a class but outside any of the class's methods. Class variables are not used as frequently as instance variables are.1. **Data member** − A class variable or instance variable that holds data associated with a class and its objects.1. **Function overloading** − The assignment of more than one behavior to a particular function. The operation performed varies by the types of objects or arguments involved.1. **Instance variable** − A variable that is defined inside a method and belongs only to the current instance of a class.1. **Inheritance** − The transfer of the characteristics of a class to other classes that are derived from it.1. **Instance** − An individual object of a certain class. An object obj that belongs to a class Circle, for example, is an instance of the class Circle.1. **Instantiation** − The creation of an instance of a class.1. **Method** − A special kind of function that is defined in a class definition.1. **Object** − A unique instance of a data structure that's defined by its class. An object comprises both data members (class variables and instance variables) and methods.1. **Operator overloading** − The assignment of more than one function to a particular operator.[4] [Go to top](top)class FirstClass: test = 'test' def __init__(self,name,symbol): self.name = name self.symbol = symbol eg3 = FirstClass('Three',3) print (eg3.test, eg3.name) class FirstClass: def __init__(self,name,symbol): self.name = name self.symbol = symbol def square(self): return self.symbol * self.symbol def cube(self): return self.symbol * self.symbol * self.symbol def multiply(self, x): return self.symbol * x eg4 = FirstClass('Five',5) print (eg4.square()) print (eg4.cube()) eg4.multiply(2) FirstClass.multiply(eg4,2)2-10-1 InheritanceThere might be cases where a new class would have all the previous characteristics of an already defined class. So the new class can "inherit" the previous class and add it's own methods to it. This is called as inheritance.Consider class SoftwareEngineer which has a method salary.class SoftwareEngineer: def __init__(self,name,age): self.name = name self.age = age def salary(self, value): self.money = value print (self.name,"earns",self.money) a = SoftwareEngineer('Kartik',26) a.salary(40000) dir(SoftwareEngineer) class Artist: def __init__(self,name,age): self.name = name self.age = age def money(self,value): self.money = value print (self.name,"earns",self.money) def artform(self, job): self.job = job print (self.name,"is a", self.job) b = Artist('Nitin',20) b.money(50000) b.artform('Musician') dir(Artist)2-11 Python JSON# some JSON: x = '{ "name":"John", "age":30, "city":"New York"}' # parse x: y = json.loads(x) # the result is a Python dictionary: print(y["age"])302-11-1 Convert from Python to JSON# a Python object (dict): x = { "name": "John", "age": 30, "city": "New York" } # convert into JSON: y = json.dumps(x) # the result is a JSON string: print(y){"name": "John", "age": 30, "city": "New York"}You can convert Python objects of the following types, into JSON strings:* dict* list* tuple* string* int* float* True* False* None [Go to top](top)print(json.dumps({"name": "John", "age": 30})) print(json.dumps(["apple", "bananas"])) print(json.dumps(("apple", "bananas"))) print(json.dumps("hello")) print(json.dumps(42)) print(json.dumps(31.76)) print(json.dumps(True)) print(json.dumps(False)) print(json.dumps(None)){"name": "John", "age": 30} ["apple", "bananas"] ["apple", "bananas"] "hello" 42 31.76 true false nullConvert a Python object containing all the legal data types:x = { "name": "John", "age": 30, "married": True, "divorced": False, "children": ("Ann","Billy"), "pets": None, "cars": [ {"model": "BMW 230", "mpg": 27.5}, {"model": "Ford Edge", "mpg": 24.1} ] } print(json.dumps(x)){"name": "John", "age": 30, "married": true, "divorced": false, "children": ["Ann", "Billy"], "pets": null, "cars": [{"model": "BMW 230", "mpg": 27.5}, {"model": "Ford Edge", "mpg": 24.1}]}2-12 Python PIP 2-12-1 What is a Package?A package contains all the files you need for a module.Modules are Python code libraries you can include in your project. [Go to top](top) 2-12-2 Install PIPIf you do not have PIP installed, you can download and install it from this page: https://pypi.org/project/pip/ 2-13 Python Try ExceptThe **try** block lets you test a block of code for errors.The **except** block lets you handle the error.The **finally** block lets you execute code, regardless of the result of the try- and except blocks.try: print(x) except NameError: print("Variable x is not defined") except: print("Something else went wrong") try: print(x) except: print("Something went wrong") finally: print("The 'try except' is finished"){'name': 'John', 'age': 30, 'married': True, 'divorced': False, 'children': ('Ann', 'Billy'), 'pets': None, 'cars': [{'model': 'BMW 230', 'mpg': 27.5}, {'model': 'Ford Edge', 'mpg': 24.1}]} The 'try except' is finished2-14 Python IteratorsAn iterator is an object that contains a countable number of values.An iterator is an object that can be iterated upon, meaning that you can traverse through all the values.Technically, in Python, an iterator is an object which implements the iterator protocol, which consist of the methods __iter__() and __next__(). [Go to top](top) Return a iterator from a tuple, and print each value:mytuple = ("apple", "banana", "cherry") myit = iter(mytuple) print(next(myit)) print(next(myit)) print(next(myit))apple banana cherry2- 14-1 Looping Through an Iteratormytuple = ("apple", "banana", "cherry") for x in mytuple: print(x)apple banana cherry2- 15 DictionaryA **dictionary** is a collection which is **unordered, changeable and indexed**. In Python dictionaries are written with curly brackets, and they have **keys and values**.thisdict = { "brand": "Ford", "model": "Mustang", "year": 1964 } print(thisdict){'brand': 'Ford', 'model': 'Mustang', 'year': 1964}2-16 TuplesA **tuple** is a collection which is **ordered and unchangeable**. In Python tuples are written with round brackets.thistuple = ("apple", "banana", "cherry") print(thistuple)('apple', 'banana', 'cherry')2-19 SetA set is a collection which is unordered and unindexed. In Python sets are written with curly brackets. [Go to top](top)thisset = {"apple", "banana", "cherry"} print(thisset) thisset = {"apple", "banana", "cherry"} for x in thisset: print(x)apple banana cherry2-17-1 Add ItemsTo add one item to a set use the add() method.To add more than one item to a set use the update() method. [Go to top](top)thisset = {"apple", "banana", "cherry"} thisset.add("orange") print(thisset){'orange', 'apple', 'banana', 'cherry'}Python Packages* Numpy* Pandas* Matplotlib* Seaborn* Sklearn* plotly 3- Numerical Python (NumPy)import numpy as np3-1 NumPy :Creating Arrays Create a list and convert it to a numpy arraymylist = [1, 2, 3] x = np.array(mylist) xOr just pass in a list directlyy = np.array([4, 5, 6]) yPass in a list of lists to create a multidimensional array.m = np.array([[7, 8, 9], [10, 11, 12]]) mUse the shape method to find the dimensions of the array. (rows, columns).m.shape`arange` returns evenly spaced values within a given interval.n = np.arange(0, 30, 2) # start at 0 count up by 2, stop before 30 n`reshape` returns an array with the same data with a new shape.n = n.reshape(3, 5) # reshape array to be 3x5 n`linspace` returns evenly spaced numbers over a specified interval.o = np.linspace(0, 4, 9) # return 9 evenly spaced values from 0 to 4 o`resize` changes the shape and size of array in-place.o.resize(3, 3) o`ones` returns a new array of given shape and type, filled with ones.np.ones((3, 2))`zeros` returns a new array of given shape and type, filled with zeros.np.zeros((2, 3))`eye` returns a 2-D array with ones on the diagonal and zeros elsewhere.np.eye(3)`diag` extracts a diagonal or constructs a diagonal array.np.diag(y)Create an array using repeating list (or see `np.tile`)np.array([1, 2, 3] * 3)Repeat elements of an array using `repeat`.np.repeat([1, 2, 3], 3)3-2 Numpy:Combining Arrays [Go to top](top)p = np.ones([2, 3], int) pUse `vstack` to stack arrays in sequence vertically (row wise).np.vstack([p, 2*p])Use `hstack` to stack arrays in sequence horizontally (column wise).np.hstack([p, 2*p])3-3 Numpy:Operations [Go to top](top) Use `+`, `-`, `*`, `/` and `**` to perform element wise addition, subtraction, multiplication, division and power.print(x + y) # elementwise addition [1 2 3] + [4 5 6] = [5 7 9] print(x - y) # elementwise subtraction [1 2 3] - [4 5 6] = [-3 -3 -3] print(x * y) # elementwise multiplication [1 2 3] * [4 5 6] = [4 10 18] print(x / y) # elementwise divison [1 2 3] / [4 5 6] = [0.25 0.4 0.5] print(x**2) # elementwise power [1 2 3] ^2 = [1 4 9][1 4 9]**Dot Product:** $ \begin{bmatrix}x_1 \ x_2 \ x_3\end{bmatrix}\cdot\begin{bmatrix}y_1 \\ y_2 \\ y_3\end{bmatrix}= x_1 y_1 + x_2 y_2 + x_3 y_3$x.dot(y) # dot product 1*4 + 2*5 + 3*6 z = np.array([y, y**2]) print(len(z)) # number of rows of array2Let's look at transposing arrays. Transposing permutes the dimensions of the array.z = np.array([y, y**2]) zThe shape of array `z` is `(2,3)` before transposing.z.shapeUse `.T` to get the transpose.z.TThe number of rows has swapped with the number of columns.z.T.shapeUse `.dtype` to see the data type of the elements in the array.z.dtypeUse `.astype` to cast to a specific type.z = z.astype('f') z.dtype3-4 Numpy: Math Functions [Go to top](top) Numpy has many built in math functions that can be performed on arrays.a = np.array([-4, -2, 1, 3, 5]) a.sum() a.max() a.min() a.mean() a.std()`argmax` and `argmin` return the index of the maximum and minimum values in the array.a.argmax() a.argmin()3-5 Numpy:Indexing / Slicing [Go to top](top)s = np.arange(13)**2 sUse bracket notation to get the value at a specific index. Remember that indexing starts at 0.s[0], s[4], s[-1]Use `:` to indicate a range. `array[start:stop]`Leaving `start` or `stop` empty will default to the beginning/end of the array.s[1:5]Use negatives to count from the back.s[-4:]A second `:` can be used to indicate step-size. `array[start:stop:stepsize]`Here we are starting 5th element from the end, and counting backwards by 2 until the beginning of the array is reached.s[-5::-2]Let's look at a multidimensional array.r = np.arange(36) r.resize((6, 6)) rUse bracket notation to slice: `array[row, column]`.r[2, 2]And use : to select a range of rows or columns.r[3, 3:6]Here we are selecting all the rows up to (and not including) row 2, and all the columns up to (and not including) the last column.r[:2, :-1]This is a slice of the last row, and only every other element.r[-1, ::2]We can also perform conditional indexing. Here we are selecting values from the array that are greater than 30. (Also see `np.where`)r[r > 30]Here we are assigning all values in the array that are greater than 30 to the value of 30. [Go to top](top)r[r > 30] = 30 r3-6 Numpy :Copying Data Be careful with copying and modifying arrays in NumPy!`r2` is a slice of `r`r2 = r[:3,:3] r2Set this slice's values to zero ([:] selects the entire array)r2[:] = 0 r2`r` has also been changed!rTo avoid this, use `r.copy` to create a copy that will not affect the original arrayr_copy = r.copy() r_copyNow when r_copy is modified, r will not be changed.r_copy[:] = 10 print(r_copy, '\n') print(r)[[10 10 10 10 10 10] [10 10 10 10 10 10] [10 10 10 10 10 10] [10 10 10 10 10 10] [10 10 10 10 10 10] [10 10 10 10 10 10]] [[ 0 0 0 3 4 5] [ 0 0 0 9 10 11] [ 0 0 0 15 16 17] [18 19 20 21 22 23] [24 25 26 27 28 29] [30 30 30 30 30 30]]3-7 Numpy: Iterating Over Arrays Let's create a new 4 by 3 array of random numbers 0-9.test = np.random.randint(0, 10, (4,3)) testIterate by row:for row in test: print(row)[9 1 2] [6 5 8] [4 7 1] [9 6 7]Iterate by index:for i in range(len(test)): print(test[i])[9 1 2] [6 5 8] [4 7 1] [9 6 7]Iterate by row and index:for i, row in enumerate(test): print('row', i, 'is', row)row 0 is [9 1 2] row 1 is [6 5 8] row 2 is [4 7 1] row 3 is [9 6 7]Use `zip` to iterate over multiple iterables.test2 = test**2 test2 for i, j in zip(test, test2): print(i,'+',j,'=',i+j)[9 1 2] + [81 1 4] = [90 2 6] [6 5 8] + [36 25 64] = [42 30 72] [4 7 1] + [16 49 1] = [20 56 2] [9 6 7] + [81 36 49] = [90 42 56]3-8 Numpy: The Series Data StructureOne-dimensional ndarray with axis labels (including time series)animals = ['Tiger', 'Bear', 'Moose'] pd.Series(animals) numbers = [1, 2, 3] pd.Series(numbers) animals = ['Tiger', 'Bear', None] pd.Series(animals) numbers = [1, 2, None] pd.Series(numbers) import numpy as np np.nan == None np.nan == np.nan np.isnan(np.nan) sports = {'Archery': 'Bhutan', 'Golf': 'Scotland', 'Sumo': 'Japan', 'Taekwondo': 'South Korea'} s = pd.Series(sports) s s.index s = pd.Series(['Tiger', 'Bear', 'Moose'], index=['India', 'America', 'Canada']) s sports = {'Archery': 'Bhutan', 'Golf': 'Scotland', 'Sumo': 'Japan', 'Taekwondo': 'South Korea'} s = pd.Series(sports, index=['Golf', 'Sumo', 'Hockey']) s3-9 Numpy: Querying a Seriessports = {'Archery': 'Bhutan', 'Golf': 'Scotland', 'Sumo': 'Japan', 'Taekwondo': 'South Korea'} s = pd.Series(sports) s s.iloc[3] s.loc['Golf'] s[3] s['Golf'] sports = {99: 'Bhutan', 100: 'Scotland', 101: 'Japan', 102: 'South Korea'} s = pd.Series(sports) s = pd.Series([100.00, 120.00, 101.00, 3.00]) s total = 0 for item in s: total+=item print(total) total = np.sum(s) print(total) #this creates a big series of random numbers s = pd.Series(np.random.randint(0,1000,10000)) s.head() len(s) %%timeit -n 100 summary = 0 for item in s: summary+=item %%timeit -n 100 summary = np.sum(s) s+=2 #adds two to each item in s using broadcasting s.head() for label, value in s.iteritems(): s.set_value(label, value+2) s.head() %%timeit -n 10 s = pd.Series(np.random.randint(0,1000,100)) for label, value in s.iteritems(): s.loc[label]= value+2 %%timeit -n 10 s = pd.Series(np.random.randint(0,1000,100)) s+=2 s = pd.Series([1, 2, 3]) s.loc['Animal'] = 'Bears' s original_sports = pd.Series({'Archery': 'Bhutan', 'Golf': 'Scotland', 'Sumo': 'Japan', 'Taekwondo': 'South Korea'}) cricket_loving_countries = pd.Series(['Australia', 'Barbados', 'Pakistan', 'England'], index=['Cricket', 'Cricket', 'Cricket', 'Cricket']) all_countries = original_sports.append(cricket_loving_countries) original_sports cricket_loving_countries all_countries all_countries.loc['Cricket']4- Pandas:The DataFrame Data Structure You'll hone your pandas skills by learning how to organize, reshape, and aggregate multiple data sets to answer your specific questions. **Pandas**:Two-dimensional size-mutable, potentially heterogeneous tabular data structure with labeled axes (rows and columns). Arithmetic operations align on both row and column labels. Can be thought of as a dict-like container for Series objects. The primary pandas data structure.Pandas is capable of many tasks including:Reading/writing many different data formatsSelecting subsets of dataCalculating across rows and down columnsFinding and filling missing dataApplying operations to independent groups within the dataReshaping data into different formsCombing multiple datasets togetherAdvanced time-series functionalityVisualization through matplotlib and seabornAlthough pandas is very capable, it does not provide functionality for the entire data science pipeline. Pandas is typically the intermediate tool used for data exploration and cleaning squashed between data capturing and storage, and data modeling and predicting. [Go to top](top)purchase_1 = pd.Series({'Name': 'Chris', 'Item Purchased': 'Dog Food', 'Cost': 22.50}) purchase_2 = pd.Series({'Name': 'Kevyn', 'Item Purchased': 'Kitty Litter', 'Cost': 2.50}) purchase_3 = pd.Series({'Name': 'Vinod', 'Item Purchased': 'Bird Seed', 'Cost': 5.00}) df = pd.DataFrame([purchase_1, purchase_2, purchase_3], index=['Store 1', 'Store 1', 'Store 2']) df.head() df.loc['Store 2'] type(df.loc['Store 2']) df.loc['Store 1'] df.loc['Store 1', 'Cost'] df.T df.T.loc['Cost'] df['Cost'] df.loc['Store 1']['Cost'] df.loc[:,['Name', 'Cost']] df.drop('Store 1') df copy_df = df.copy() copy_df = copy_df.drop('Store 1') copy_df copy_df.drop del copy_df['Name'] copy_df df['Location'] = None df costs = df['Cost'] costs costs+=2 costs df4-1 Pandas:Dataframe Indexing and LoadingAs a Data Scientist, you'll often find that the data you need is not in a single file. It may be spread across a number of text files, spreadsheets, or databases. You want to be able to import the data of interest as a collection of DataFrames and figure out how to combine them to answer your central questions. [Go to top](top)df = pd.read_csv('train.csv') df.head() df.columns # Querying a DataFrame df['Survived'] > 0 only_Survived = df.where(df['Survived'] > 0) only_Survived.head() only_Survived['Survived'].count() df['Survived'].count() only_Survived = only_Survived.dropna() only_Survived.head() only_Survived = df[df['Survived'] > 0] only_Survived.head() len(df[(df['Survived'] > 0) | (df['Survived'] > 0)]) df[(df['Survived'] > 0) & (df['Survived'] == 0)] # Indexing Dataframes df.head() df['PassengerId'] = df.index df = df.set_index('Survived') df.head() df = df.reset_index() df.head() df = pd.read_csv('train.csv') df.head() df['Age'].unique() df=df[df['Age'] == 50] df.head()4-2 Pandas:Missing valuesdf = pd.read_csv('train.csv') df df.fillna df = df.set_index('PassengerId') df = df.sort_index() df df = df.reset_index() df = df.set_index(['PassengerId', 'Survived']) df df = df.fillna(method='ffill') df.head()4-3 Pandas :Merging Dataframespandas provides various facilities for easily combining together Series, DataFrame, and Panel objects with various kinds of set logic for the indexes and relational algebra functionality in the case of join / merge-type operations.df = pd.DataFrame([{'Name': 'MJ', 'Item Purchased': 'Sponge', 'Cost': 22.50}, {'Name': 'Kevyn', 'Item Purchased': 'Kitty Litter', 'Cost': 2.50}, {'Name': 'Filip', 'Item Purchased': 'Spoon', 'Cost': 5.00}], index=['Store 1', 'Store 1', 'Store 2']) df df['Date'] = ['December 1', 'January 1', 'mid-May'] df df['Delivered'] = True df df['Feedback'] = ['Positive', None, 'Negative'] df adf = df.reset_index() adf['Date'] = pd.Series({0: 'December 1', 2: 'mid-May'}) adf staff_df = pd.DataFrame([{'Name': 'Kelly', 'Role': 'Director of HR'}, {'Name': 'Sally', 'Role': 'Course liasion'}, {'Name': 'James', 'Role': 'Grader'}]) staff_df = staff_df.set_index('Name') student_df = pd.DataFrame([{'Name': 'James', 'School': 'Business'}, {'Name': 'Mike', 'School': 'Law'}, {'Name': 'Sally', 'School': 'Engineering'}]) student_df = student_df.set_index('Name') print(staff_df.head()) print() print(student_df.head()) pd.merge(staff_df, student_df, how='outer', left_index=True, right_index=True) pd.merge(staff_df, student_df, how='inner', left_index=True, right_index=True) pd.merge(staff_df, student_df, how='left', left_index=True, right_index=True) pd.merge(staff_df, student_df, how='right', left_index=True, right_index=True) staff_df = staff_df.reset_index() student_df = student_df.reset_index() pd.merge(staff_df, student_df, how='left', left_on='Name', right_on='Name') staff_df = pd.DataFrame([{'Name': 'Kelly', 'Role': 'Director of HR', 'Location': 'State Street'}, {'Name': 'Sally', 'Role': 'Course liasion', 'Location': 'Washington Avenue'}, {'Name': 'James', 'Role': 'Grader', 'Location': 'Washington Avenue'}]) student_df = pd.DataFrame([{'Name': 'James', 'School': 'Business', 'Location': '1024 Billiard Avenue'}, {'Name': 'Mike', 'School': 'Law', 'Location': 'Fraternity House #22'}, {'Name': 'Sally', 'School': 'Engineering', 'Location': '512 Wilson Crescent'}]) pd.merge(staff_df, student_df, how='left', left_on='Name', right_on='Name') staff_df = pd.DataFrame([{'First Name': 'Kelly', 'Last Name': 'Desjardins', 'Role': 'Director of HR'}, {'First Name': 'Sally', 'Last Name': 'Brooks', 'Role': 'Course liasion'}, {'First Name': 'James', 'Last Name': 'Wilde', 'Role': 'Grader'}]) student_df = pd.DataFrame([{'First Name': 'James', 'Last Name': 'Hammond', 'School': 'Business'}, {'First Name': 'Mike', 'Last Name': 'Smith', 'School': 'Law'}, {'First Name': 'Sally', 'Last Name': 'Brooks', 'School': 'Engineering'}]) staff_df student_df pd.merge(staff_df, student_df, how='inner', left_on=['First Name','Last Name'], right_on=['First Name','Last Name'])4-4 Idiomatic Pandas: Making Code Pandorabledf = pd.read_csv('../input/train.csv') df df = df[df['Age']==50] df.set_index(['PassengerId','Survived'], inplace=True) df.rename(columns={'Pclass': 'pclass'})4-5 Pandas :Group bydf = pd.read_csv('../input/train.csv') df = df[df['Age']==50] df df.head()4-6 Pandas:Scalesdf = pd.DataFrame(['A+', 'A', 'A-', 'B+', 'B', 'B-', 'C+', 'C', 'C-', 'D+', 'D'], index=['excellent', 'excellent', 'excellent', 'good', 'good', 'good', 'ok', 'ok', 'ok', 'poor', 'poor']) df.rename(columns={0: 'Grades'}, inplace=True) df df['Grades'].astype('category').head() grades = df['Grades'].astype('category', categories=['D', 'D+', 'C-', 'C', 'C+', 'B-', 'B', 'B+', 'A-', 'A', 'A+'], ordered=True) grades.head() grades > 'C'4-7 Pandas:Date Functionality [Go to top](top) 4-7-1 Timestamppd.Timestamp('9/1/2016 10:05AM')4-7-2 Periodpd.Period('1/2016') pd.Period('3/5/2016')4-7-3 DatetimeIndext1 = pd.Series(list('abc'), [pd.Timestamp('2016-09-01'), pd.Timestamp('2016-09-02'), pd.Timestamp('2016-09-03')]) t1 type(t1.index)4-7-4 PeriodIndext2 = pd.Series(list('def'), [pd.Period('2016-09'), pd.Period('2016-10'), pd.Period('2016-11')]) t2 type(t2.index)4-8 Pandas: Converting to Datetimed1 = ['2 June 2013', 'Aug 29, 2014', '2015-06-26', '7/12/16'] ts3 = pd.DataFrame(np.random.randint(10, 100, (4,2)), index=d1, columns=list('ab')) ts3 ts3.index = pd.to_datetime(ts3.index) ts3 pd.to_datetime('4.7.12', dayfirst=True) pd.Timestamp('9/3/2016')-pd.Timestamp('9/1/2016')4-8-1 Timedeltaspd.Timestamp('9/3/2016')-pd.Timestamp('9/1/2016') pd.Timestamp('9/2/2016 8:10AM') + pd.Timedelta('12D 3H')4-8-2 Working with Dates in a Dataframedates = pd.date_range('10-01-2016', periods=9, freq='2W-SUN') dates df.index.ravel4-9 Distributions in Pandas [Go to top](top)np.random.binomial(1, 0.5) np.random.binomial(1000, 0.5)/1000 chance_of_tornado = 0.01/100 np.random.binomial(100000, chance_of_tornado) chance_of_tornado = 0.01 tornado_events = np.random.binomial(1, chance_of_tornado, 1000000) two_days_in_a_row = 0 for j in range(1,len(tornado_events)-1): if tornado_events[j]==1 and tornado_events[j-1]==1: two_days_in_a_row+=1 print('{} tornadoes back to back in {} years'.format(two_days_in_a_row, 1000000/365)) np.random.uniform(0, 1) np.random.normal(0.75) distribution = np.random.normal(0.75,size=1000) np.sqrt(np.sum((np.mean(distribution)-distribution)**2)/len(distribution)) np.std(distribution) stats.kurtosis(distribution) stats.skew(distribution) chi_squared_df2 = np.random.chisquare(2, size=10000) stats.skew(chi_squared_df2) chi_squared_df5 = np.random.chisquare(5, size=10000) stats.skew(chi_squared_df5) output = plt.hist([chi_squared_df2,chi_squared_df5], bins=50, histtype='step', label=['2 degrees of freedom','5 degrees of freedom']) plt.legend(loc='upper right')5- MatplotlibThis Matplotlib tutorial takes you through the basics Python data visualization: the anatomy of a plot, pyplot and pylab, and much more [Go to top](top) You can show matplotlib figures directly in the notebook by using the `%matplotlib notebook` and `%matplotlib inline` magic commands. `%matplotlib notebook` provides an interactive environment.# because the default is the line style '-', # nothing will be shown if we only pass in one point (3,2) plt.plot(3, 2) # we can pass in '.' to plt.plot to indicate that we want # the point (3,2) to be indicated with a marker '.' plt.plot(3, 2, '.')Let's see how to make a plot without using the scripting layer.# First let's set the backend without using mpl.use() from the scripting layer from matplotlib.backends.backend_agg import FigureCanvasAgg from matplotlib.figure import Figure # create a new figure fig = Figure() # associate fig with the backend canvas = FigureCanvasAgg(fig) # add a subplot to the fig ax = fig.add_subplot(111) # plot the point (3,2) ax.plot(3, 2, '.') # save the figure to test.png # you can see this figure in your Jupyter workspace afterwards by going to # https://hub.coursera-notebooks.org/ canvas.print_png('test.png')We can use html cell magic to display the image.%%html # create a new figure plt.figure() # plot the point (3,2) using the circle marker plt.plot(3, 2, 'o') # get the current axes ax = plt.gca() # Set axis properties [xmin, xmax, ymin, ymax] ax.axis([0,6,0,10]) # create a new figure plt.figure() # plot the point (1.5, 1.5) using the circle marker plt.plot(1.5, 1.5, 'o') # plot the point (2, 2) using the circle marker plt.plot(2, 2, 'o') # plot the point (2.5, 2.5) using the circle marker plt.plot(2.5, 2.5, 'o') # get current axes ax = plt.gca() # get all the child objects the axes contains ax.get_children() plt.plot([1, 2, 3, 4], [10, 20, 25, 30], color='lightblue', linewidth=3) plt.scatter([0.3, 3.8, 1.2, 2.5], [11, 25, 9, 26], color='darkgreen', marker='^') plt.xlim(0.5, 4.5) plt.show()5-1 Scatterplotsx = np.array([1,2,3,4,5,6,7,8]) y = x plt.figure() plt.scatter(x, y) # similar to plt.plot(x, y, '.'), but the underlying child objects in the axes are not Line2D x = np.array([1,2,3,4,5,6,7,8]) y = x # create a list of colors for each point to have # ['green', 'green', 'green', 'green', 'green', 'green', 'green', 'red'] colors = ['green']*(len(x)-1) colors.append('red') plt.figure() # plot the point with size 100 and chosen colors plt.scatter(x, y, s=100, c=colors) # convert the two lists into a list of pairwise tuples zip_generator = zip([1,2,3,4,5], [6,7,8,9,10]) print(list(zip_generator)) # the above prints: # [(1, 6), (2, 7), (3, 8), (4, 9), (5, 10)] zip_generator = zip([1,2,3,4,5], [6,7,8,9,10]) # The single star * unpacks a collection into positional arguments print(*zip_generator) # the above prints: # (1, 6) (2, 7) (3, 8) (4, 9) (5, 10) # use zip to convert 5 tuples with 2 elements each to 2 tuples with 5 elements each print(list(zip((1, 6), (2, 7), (3, 8), (4, 9), (5, 10)))) # the above prints: # [(1, 2, 3, 4, 5), (6, 7, 8, 9, 10)] zip_generator = zip([1,2,3,4,5], [6,7,8,9,10]) # let's turn the data back into 2 lists x, y = zip(*zip_generator) # This is like calling zip((1, 6), (2, 7), (3, 8), (4, 9), (5, 10)) print(x) print(y) # the above prints: # (1, 2, 3, 4, 5) # (6, 7, 8, 9, 10) plt.figure() # plot a data series 'Tall students' in red using the first two elements of x and y plt.scatter(x[:2], y[:2], s=100, c='red', label='Tall students') # plot a second data series 'Short students' in blue using the last three elements of x and y plt.scatter(x[2:], y[2:], s=100, c='blue', label='Short students') # add a label to the x axis plt.xlabel('The number of times the child kicked a ball') # add a label to the y axis plt.ylabel('The grade of the student') # add a title plt.title('Relationship between ball kicking and grades') # add a legend (uses the labels from plt.scatter) plt.legend() # add the legend to loc=4 (the lower right hand corner), also gets rid of the frame and adds a title plt.legend(loc=4, frameon=False, title='Legend') # get children from current axes (the legend is the second to last item in this list) plt.gca().get_children() # get the legend from the current axes legend = plt.gca().get_children()[-2] x = np.random.randint(low=1, high=11, size=50) y = x + np.random.randint(1, 5, size=x.size) data = np.column_stack((x, y)) fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(8, 4)) ax1.scatter(x=x, y=y, marker='o', c='r', edgecolor='b') ax1.set_title('Scatter: $x$ versus $y$') ax1.set_xlabel('$x$') ax1.set_ylabel('$y$') ax2.hist(data, bins=np.arange(data.min(), data.max()), label=('x', 'y')) ax2.legend(loc=(0.65, 0.8)) ax2.set_title('Frequencies of $x$ and $y$') ax2.yaxis.tick_right()5-2 Line Plotslinear_data = np.array([1,2,3,4,5,6,7,8]) exponential_data = linear_data**2 plt.figure() # plot the linear data and the exponential data plt.plot(linear_data, '-o', exponential_data, '-o') # plot another series with a dashed red line plt.plot([22,44,55], '--r')5-3 Bar Chartsplt.figure() xvals = range(len(linear_data)) plt.bar(xvals, linear_data, width = 0.3) new_xvals = [] # plot another set of bars, adjusting the new xvals to make up for the first set of bars plotted for item in xvals: new_xvals.append(item+0.3) plt.bar(new_xvals, exponential_data, width = 0.3 ,color='red') from random import randint linear_err = [randint(0,15) for x in range(len(linear_data))] # This will plot a new set of bars with errorbars using the list of random error values plt.bar(xvals, linear_data, width = 0.3, yerr=linear_err) # stacked bar charts are also possible plt.figure() xvals = range(len(linear_data)) plt.bar(xvals, linear_data, width = 0.3, color='b') plt.bar(xvals, exponential_data, width = 0.3, bottom=linear_data, color='r') # or use barh for horizontal bar charts plt.figure() xvals = range(len(linear_data)) plt.barh(xvals, linear_data, height = 0.3, color='b') plt.barh(xvals, exponential_data, height = 0.3, left=linear_data, color='r') # Initialize the plot fig = plt.figure(figsize=(20,10)) ax1 = fig.add_subplot(121) ax2 = fig.add_subplot(122) # or replace the three lines of code above by the following line: #fig, (ax1, ax2) = plt.subplots(1,2, figsize=(20,10)) # Plot the data ax1.bar([1,2,3],[3,4,5]) ax2.barh([0.5,1,2.5],[0,1,2]) # Show the plot plt.show() plt.figure() # subplot with 1 row, 2 columns, and current axis is 1st subplot axes plt.subplot(1, 2, 1) linear_data = np.array([1,2,3,4,5,6,7,8]) plt.plot(linear_data, '-o') exponential_data = linear_data**2 # subplot with 1 row, 2 columns, and current axis is 2nd subplot axes plt.subplot(1, 2, 2) plt.plot(exponential_data, '-o') # plot exponential data on 1st subplot axes plt.subplot(1, 2, 1) plt.plot(exponential_data, '-x') plt.figure() ax1 = plt.subplot(1, 2, 1) plt.plot(linear_data, '-o') # pass sharey=ax1 to ensure the two subplots share the same y axis ax2 = plt.subplot(1, 2, 2, sharey=ax1) plt.plot(exponential_data, '-x') plt.figure() # the right hand side is equivalent shorthand syntax plt.subplot(1,2,1) == plt.subplot(121) # create a 3x3 grid of subplots fig, ((ax1,ax2,ax3), (ax4,ax5,ax6), (ax7,ax8,ax9)) = plt.subplots(3, 3, sharex=True, sharey=True) # plot the linear_data on the 5th subplot axes ax5.plot(linear_data, '-') # set inside tick labels to visible for ax in plt.gcf().get_axes(): for label in ax.get_xticklabels() + ax.get_yticklabels(): label.set_visible(True) plt.show() # necessary on some systems to update the plot plt.gcf().canvas.draw() plt.show()5-4 Histograms# create 2x2 grid of axis subplots fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex=True) axs = [ax1,ax2,ax3,ax4] # draw n = 10, 100, 1000, and 10000 samples from the normal distribution and plot corresponding histograms for n in range(0,len(axs)): sample_size = 10**(n+1) sample = np.random.normal(loc=0.0, scale=1.0, size=sample_size) axs[n].hist(sample) axs[n].set_title('n={}'.format(sample_size)) # repeat with number of bins set to 100 fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex=True) axs = [ax1,ax2,ax3,ax4] for n in range(0,len(axs)): sample_size = 10**(n+1) sample = np.random.normal(loc=0.0, scale=1.0, size=sample_size) axs[n].hist(sample, bins=100) axs[n].set_title('n={}'.format(sample_size)) plt.figure() Y = np.random.normal(loc=0.0, scale=1.0, size=10000) X = np.random.random(size=10000) plt.scatter(X,Y) # use gridspec to partition the figure into subplots import matplotlib.gridspec as gridspec plt.figure() gspec = gridspec.GridSpec(3, 3) top_histogram = plt.subplot(gspec[0, 1:]) side_histogram = plt.subplot(gspec[1:, 0]) lower_right = plt.subplot(gspec[1:, 1:]) Y = np.random.normal(loc=0.0, scale=1.0, size=10000) X = np.random.random(size=10000) lower_right.scatter(X, Y) top_histogram.hist(X, bins=100) s = side_histogram.hist(Y, bins=100, orientation='horizontal') # clear the histograms and plot normed histograms top_histogram.clear() top_histogram.hist(X, bins=100, normed=True) side_histogram.clear() side_histogram.hist(Y, bins=100, orientation='horizontal', normed=True) # flip the side histogram's x axis side_histogram.invert_xaxis() # change axes limits for ax in [top_histogram, lower_right]: ax.set_xlim(0, 1) for ax in [side_histogram, lower_right]: ax.set_ylim(-5, 5)5-5 Box and Whisker Plotsnormal_sample = np.random.normal(loc=0.0, scale=1.0, size=10000) random_sample = np.random.random(size=10000) gamma_sample = np.random.gamma(2, size=10000) df = pd.DataFrame({'normal': normal_sample, 'random': random_sample, 'gamma': gamma_sample}) df.describe() plt.figure() # create a boxplot of the normal data, assign the output to a variable to supress output _ = plt.boxplot(df['normal'], whis='range') # clear the current figure plt.clf() # plot boxplots for all three of df's columns _ = plt.boxplot([ df['normal'], df['random'], df['gamma'] ], whis='range') plt.figure() _ = plt.hist(df['gamma'], bins=100) import mpl_toolkits.axes_grid1.inset_locator as mpl_il plt.figure() plt.boxplot([ df['normal'], df['random'], df['gamma'] ], whis='range') # overlay axis on top of another ax2 = mpl_il.inset_axes(plt.gca(), width='60%', height='40%', loc=2) ax2.hist(df['gamma'], bins=100) ax2.margins(x=0.5) # switch the y axis ticks for ax2 to the right side ax2.yaxis.tick_right() # if `whis` argument isn't passed, boxplot defaults to showing 1.5*interquartile (IQR) whiskers with outliers plt.figure() _ = plt.boxplot([ df['normal'], df['random'], df['gamma'] ] )5-6 Heatmapsplt.figure() Y = np.random.normal(loc=0.0, scale=1.0, size=10000) X = np.random.random(size=10000) _ = plt.hist2d(X, Y, bins=25) plt.figure() _ = plt.hist2d(X, Y, bins=100)5-7 Animationsimport matplotlib.animation as animation n = 100 x = np.random.randn(n) # create the function that will do the plotting, where curr is the current frame def update(curr): # check if animation is at the last frame, and if so, stop the animation a if curr == n: a.event_source.stop() plt.cla() bins = np.arange(-4, 4, 0.5) plt.hist(x[:curr], bins=bins) plt.axis([-4,4,0,30]) plt.gca().set_title('Sampling the Normal Distribution') plt.gca().set_ylabel('Frequency') plt.gca().set_xlabel('Value') plt.annotate('n = {}'.format(curr), [3,27]) fig = plt.figure() a = animation.FuncAnimation(fig, update, interval=100)5-8 Interactivityplt.figure() data = np.random.rand(10) plt.plot(data) def onclick(event): plt.cla() plt.plot(data) plt.gca().set_title('Event at pixels {},{} \nand data {},{}'.format(event.x, event.y, event.xdata, event.ydata)) # tell mpl_connect we want to pass a 'button_press_event' into onclick when the event is detected plt.gcf().canvas.mpl_connect('button_press_event', onclick) from random import shuffle origins = ['China', 'Brazil', 'India', 'USA', 'Canada', 'UK', 'Germany', 'Iraq', 'Chile', 'Mexico'] shuffle(origins) df = pd.DataFrame({'height': np.random.rand(10), 'weight': np.random.rand(10), 'origin': origins}) df plt.figure() # picker=5 means the mouse doesn't have to click directly on an event, but can be up to 5 pixels away plt.scatter(df['height'], df['weight'], picker=5) plt.gca().set_ylabel('Weight') plt.gca().set_xlabel('Height') def onpick(event): origin = df.iloc[event.ind[0]]['origin'] plt.gca().set_title('Selected item came from {}'.format(origin)) # tell mpl_connect we want to pass a 'pick_event' into onpick when the event is detected plt.gcf().canvas.mpl_connect('pick_event', onpick) # use the 'seaborn-colorblind' style plt.style.use('seaborn-colorblind')5-9 DataFrame.plotnp.random.seed(123) df = pd.DataFrame({'A': np.random.randn(365).cumsum(0), 'B': np.random.randn(365).cumsum(0) + 20, 'C': np.random.randn(365).cumsum(0) - 20}, index=pd.date_range('1/1/2017', periods=365)) df.head() df.plot('A','B', kind = 'scatter');You can also choose the plot kind by using the `DataFrame.plot.kind` methods instead of providing the `kind` keyword argument.`kind` :- `'line'` : line plot (default)- `'bar'` : vertical bar plot- `'barh'` : horizontal bar plot- `'hist'` : histogram- `'box'` : boxplot- `'kde'` : Kernel Density Estimation plot- `'density'` : same as 'kde'- `'area'` : area plot- `'pie'` : pie plot- `'scatter'` : scatter plot- `'hexbin'` : hexbin plot [Go to top](top)# create a scatter plot of columns 'A' and 'C', with changing color (c) and size (s) based on column 'B' df.plot.scatter('A', 'C', c='B', s=df['B'], colormap='viridis') ax = df.plot.scatter('A', 'C', c='B', s=df['B'], colormap='viridis') ax.set_aspect('equal') df.plot.box(); df.plot.hist(alpha=0.7);[Kernel density estimation plots](https://en.wikipedia.org/wiki/Kernel_density_estimation) are useful for deriving a smooth continuous function from a given sample.df.plot.kde();6- SeabornAs you have just read, **Seaborn** is complimentary to Matplotlib and it specifically targets statistical data visualization. But it goes even further than that: Seaborn extends Matplotlib and that’s why it can address the two biggest frustrations of working with Matplotlib. Or, as says in the “introduction to Seaborn”: “If matplotlib “tries to make easy things easy and hard things possible”, seaborn tries to make a well-defined set of hard things easy too.”One of these hard things or frustrations had to do with the default Matplotlib parameters. Seaborn works with different parameters, which undoubtedly speaks to those users that don’t use the default looks of the Matplotlib plotsSeaborn is a library for making statistical graphics in Python. It is built on top of matplotlib and closely integrated with pandas data structures.Here is some of the functionality that seaborn offers:A dataset-oriented API for examining relationships between multiple variablesSpecialized support for using categorical variables to show observations or aggregate statisticsOptions for visualizing univariate or bivariate distributions and for comparing them between subsets of dataAutomatic estimation and plotting of linear regression models for different kinds dependent variablesConvenient views onto the overall structure of complex datasetsHigh-level abstractions for structuring multi-plot grids that let you easily build complex visualizationsConcise control over matplotlib figure styling with several built-in themesTools for choosing color palettes that faithfully reveal patterns in your dataSeaborn aims to make visualization a central part of exploring and understanding data. Its dataset-oriented plotting functions operate on dataframes and arrays containing whole datasets and internally perform the necessary semantic mapping and statistical aggregation to produce informative plots.Here’s an example of what this means: 6-1 Seaborn Vs MatplotlibIt is summarized that if Matplotlib “tries to make easy things easy and hard things possible”, Seaborn tries to make a well defined set of hard things easy too.”Seaborn helps resolve the two major problems faced by Matplotlib; the problems are* Default Matplotlib parameters* Working with data framesAs Seaborn compliments and extends Matplotlib, the learning curve is quite gradual. If you know Matplotlib, you are already half way through Seaborn.Important Features of SeabornSeaborn is built on top of Python’s core visualization library Matplotlib. It is meant to serve as a complement, and not a replacement. However, Seaborn comes with some very important features. Let us see a few of them here. The features help in −* Built in themes for styling matplotlib graphics* Visualizing univariate and bivariate data* Fitting in and visualizing linear regression models* Plotting statistical time series data* Seaborn works well with NumPy and Pandas data structures* It comes with built in themes for styling Matplotlib graphicsIn most cases, you will still use Matplotlib for simple plotting. The knowledge of Matplotlib is recommended to tweak Seaborn’s default plots.def sinplot(flip = 1): x = np.linspace(0, 14, 100) for i in range(1, 5): plt.plot(x, np.sin(x + i * .5) * (7 - i) * flip) sinplot() plt.show() def sinplot(flip = 1): x = np.linspace(0, 14, 100) for i in range(1, 5): plt.plot(x, np.sin(x + i * .5) * (7 - i) * flip) sns.set() sinplot() plt.show() np.random.seed(1234) v1 = pd.Series(np.random.normal(0,10,1000), name='v1') v2 = pd.Series(2*v1 + np.random.normal(60,15,1000), name='v2') plt.figure() plt.hist(v1, alpha=0.7, bins=np.arange(-50,150,5), label='v1'); plt.hist(v2, alpha=0.7, bins=np.arange(-50,150,5), label='v2'); plt.legend(); plt.figure() # we can pass keyword arguments for each individual component of the plot sns.distplot(v2, hist_kws={'color': 'Teal'}, kde_kws={'color': 'Navy'}); sns.jointplot(v1, v2, alpha=0.4); grid = sns.jointplot(v1, v2, alpha=0.4); grid.ax_joint.set_aspect('equal') sns.jointplot(v1, v2, kind='hex'); # set the seaborn style for all the following plots sns.set_style('white') sns.jointplot(v1, v2, kind='kde', space=0); train = pd.read_csv('../input/train.csv') train.head()6-2 10 Useful Python Data Visualization Libraries I am giving an overview of 10 interdisciplinary Python data visualization libraries, from the well-known to the obscure.* 1- matplotlibmatplotlib is the O.G. of Python data visualization libraries. Despite being over a decade old, it’s still the most widely used library for plotting in the Python community. It was designed to closely resemble MATLAB, a proprietary programming language developed in the 1980s.* 2- SeabornSeaborn harnesses the power of matplotlib to create beautiful charts in a few lines of code. The key difference is Seaborn’s default styles and color palettes, which are designed to be more aesthetically pleasing and modern. Since Seaborn is built on top of matplotlib, you’ll need to know matplotlib to tweak Seaborn’s defaults.* 3- ggplotggplot is based on ggplot2, an R plotting system, and concepts from The Grammar of Graphics. ggplot operates differently than matplotlib: it lets you layer components to create a complete plot. For instance, you can start with axes, then add points, then a line, a trendline, etc. Although The Grammar of Graphics has been praised as an “intuitive” method for plotting, seasoned matplotlib users might need time to adjust to this new mindset.* 4- BokehLike ggplot, Bokeh is based on The Grammar of Graphics, but unlike ggplot, it’s native to Python, not ported over from R. Its strength lies in the ability to create interactive, web-ready plots, which can be easily outputted as JSON objects, HTML documents, or interactive web applications. Bokeh also supports streaming and real-time data.* 5- pygalLike Bokeh and Plotly, pygal offers interactive plots that can be embedded in the web browser. Its prime differentiator is the ability to output charts as SVGs. As long as you’re working with smaller datasets, SVGs will do you just fine. But if you’re making charts with hundreds of thousands of data points, they’ll have trouble rendering and become sluggish.* 6- PlotlyYou might know Plotly as an online platform for data visualization, but did you also know you can access its capabilities from a Python notebook? Like Bokeh, Plotly’s forte is making interactive plots, but it offers some charts you won’t find in most libraries, like contour plots, dendograms, and 3D charts.* 7- geoplotlibgeoplotlib is a toolbox for creating maps and plotting geographical data. You can use it to create a variety of map-types, like choropleths, heatmaps, and dot density maps. You must have Pyglet (an object-oriented programming interface) installed to use geoplotlib. Nonetheless, since most Python data visualization libraries don’t offer maps, it’s nice to have a library dedicated solely to them.* 8- GleamGleam is inspired by R’s Shiny package. It allows you to turn analyses into interactive web apps using only Python scripts, so you don’t have to know any other languages like HTML, CSS, or JavaScript. Gleam works with any Python data visualization library. Once you’ve created a plot, you can build fields on top of it so users can filter and sort data.* 9- missingnoDealing with missing data is a pain. missingno allows you to quickly gauge the completeness of a dataset with a visual summary, instead of trudging through a table. You can filter and sort data based on completion or spot correlations with a heatmap or a dendrogram.* 10- LeatherLeather’s creator, , puts it best: “Leather is the Python charting library for those who need charts now and don’t care if they’re perfect.” It’s designed to work with all data types and produces charts as SVGs, so you can scale them without losing image quality. Since this library is relatively new, some of the documentation is still in progress. The charts you can make are pretty basic—but that’s the intention.At the end, nice cheatsheet on how to best visualize your data. I think I will print it out as a good reminder of "best practices". Check out the link for the complete cheatsheet, also as a PDF. hashtagdata hashtagvisualization hashtagdatascienceLink: https://github.com/mjbahmani/Machine-Learning-Workflow-with-Python![cheatsheet ][1][Reference][2] [1]: http://s8.picofile.com/file/8340669884/53f6a826_d7df_4b55_81e6_7c23b3fff0a3_original.png [2]: https://blog.modeanalytics.com/python-data-visualization-libraries/ 7- SKlearn- The __open source__ Python ecosystem provides __a standalone, versatile and powerful scientific working environment__, including: [NumPy](http://numpy.org), [SciPy](http://scipy.org), [IPython](http://ipython.org), [Matplotlib](http://matplotlib.org), [Pandas](http://pandas.pydata.org/), _and many others..._ 7-1 Introduction- Scikit-Learn builds upon NumPy and SciPy and __complements__ this scientific environment with machine learning algorithms;- By design, Scikit-Learn is __non-intrusive__, easy to use and easy to combine with other libraries;- Core algorithms are implemented in low-level languages. 7-2 Algorithms __Supervised learning:__* Linear models (Ridge, Lasso, Elastic Net, ...)* Support Vector Machines* Tree-based methods (Random Forests, Bagging, GBRT, ...)* Nearest neighbors * Neural networks (basics)* Gaussian Processes* Feature selection __Unsupervised learning:__* Clustering (KMeans, Ward, ...)* Matrix decomposition (PCA, ICA, ...)* Density estimation* Outlier detection __Model selection and evaluation:__* Cross-validation* Grid-search* Lots of metrics_... and many more!_ (See our [Reference](http://scikit-learn.org/dev/modules/classes.html)) 7-3 FrameworkData comes as a finite learning set ${\cal L} = (X, y)$ where* Input samples are given as an array $X$ of shape `n_samples` $\times$ `n_features`, taking their values in ${\cal X}$;* Output values are given as an array $y$, taking _symbolic_ values in ${\cal Y}$. The goal of supervised classification is to build an estimator $\varphi: {\cal X} \mapsto {\cal Y}$ minimizing$$Err(\varphi) = \mathbb{E}_{X,Y}\{ \ell(Y, \varphi(X)) \}$$where $\ell$ is a loss function, e.g., the zero-one loss for classification $\ell_{01}(Y,\hat{Y}) = 1(Y \neq \hat{Y})$. 7-4 Applications- Classifying signal from background events; - Diagnosing disease from symptoms;- Recognising cats in pictures;- Identifying body parts with Kinect cameras;- ... 7-5 Data - Input data = Numpy arrays or Scipy sparse matrices ;- Algorithms are expressed using high-level operations defined on matrices or vectors (similar to MATLAB) ; - Leverage efficient low-leverage implementations ; - Keep code short and readable.from sklearn import datasets iris = datasets.load_iris() X_iris = iris.data y_iris = iris.targetThe dataset includes 150 instances, with 4 attributes each. For each instance, we will also have a target class (in our case, the species). This class is a special attribute which we will aim to predict for new, previously unseen instances, given the remaining (known) attributes.print (X_iris.shape, y_iris.shape) print ('Feature names:{0}'.format(iris.feature_names)) print ('Target classes:{0}'.format(iris.target_names)) print ('First instance features:{0}'.format(X_iris[0]))Let us display each instance in a 2d-scatter plot, using first sepal measures, and then petal measures.plt.figure('sepal') colormarkers = [ ['red','s'], ['greenyellow','o'], ['blue','x']] for i in range(len(colormarkers)): px = X_iris[:, 0][y_iris == i] py = X_iris[:, 1][y_iris == i] plt.scatter(px, py, c=colormarkers[i][0], marker=colormarkers[i][1]) plt.title('Iris Dataset: Sepal width vs sepal length') plt.legend(iris.target_names) plt.xlabel('Sepal length') plt.ylabel('Sepal width') plt.figure('petal') for i in range(len(colormarkers)): px = X_iris[:, 2][y_iris == i] py = X_iris[:, 3][y_iris == i] plt.scatter(px, py, c=colormarkers[i][0], marker=colormarkers[i][1]) plt.title('Iris Dataset: petal width vs petal length') plt.legend(iris.target_names) plt.xlabel('Petal length') plt.ylabel('Petal width') plt.show()7-6 Supervised Learning: Classification In 1936 introduced the Iris dataset to the statistics world, using it to develop a _linear discriminant model_. What he did was to build a linear combination of the attributes that separates a species from the rest, that is, find a straight line similar to the one we suggested in the previous section.Our first task will be to predict the specie of an Iris flower given the four sepal and length measures. For the moment, we will start using only two attributes, its sepal width and length. We will do this to ease visualization, but later we will use the four attributes, and see if performance improves. This is an instance of a **classification problem**, where we want to assign a label taken from a discrete set to an item according to its features.The typical classification process roughly involves the following steps: - select your attributes, - build a model based on available data, and - evaluate your model’s performance on previously unseen data. To do this, before building our model we should separate training and testing data. Training data will be used to build the model, and testing data will be used to evaluate its performance. 7-7 Separate training and testing sets Our first step will be to separate the dataset into to separate sets, using 75% of the instances for training our classifier, and the remaining 25% for evaluating it (and, in this case, taking only two features, sepal width and length). We will also perform _feature scaling_: for each feature, calculate the average, subtract the mean value from the feature value, and divide the result by their standard deviation. After scaling, each feature will have a zero average, with a standard deviation of one. This standardization of values (which does not change their distribution, as you could verify by plotting the X values before and after scaling) is a common requirement of machine learning methods, to avoid that features with large values may weight too much on the final results.from sklearn.model_selection import train_test_split from sklearn import preprocessing # Create dataset with only the first two attributes X, y = X_iris[:, [0,1]], y_iris # Test set will be the 25% taken randomly X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=33) # Standarize the features scaler = preprocessing.StandardScaler().fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test)Check that, after scaling, the mean is 0 and the standard deviation is 1 (this should be exact in the training set, but only approximated in the testing set, because we used the training set media and standard deviation):print ('Training set mean:{:.2f} and standard deviation:{:.2f}'.format(np.average(X_train),np.std(X_train))) print ('Testing set mean:{:.2f} and standard deviation:{:.2f}'.format(np.average(X_test),np.std(X_test)))Display the training data, after scaling.colormarkers = [ ['red','s'], ['greenyellow','o'], ['blue','x']] plt.figure('Training Data') for i in range(len(colormarkers)): xs = X_train[:, 0][y_train == i] ys = X_train[:, 1][y_train == i] plt.scatter(xs, ys, c=colormarkers[i][0], marker=colormarkers[i][1]) plt.title('Training instances, after scaling') plt.legend(iris.target_names) plt.xlabel('Sepal length') plt.ylabel('Sepal width') plt.show()7-8 A linear, binary classifier To start, let's transform the problem to a binary classification task: we will only want to distinguish setosa flowers from the rest (it seems easy, according to the plot). To do this, we will just collapse all non-setosa targets into the same clas (later we will come back to the three-class original problem)import copy y_train_setosa = copy.copy(y_train) # Every 1 and 2 classes in the training set will became just 1 y_train_setosa[y_train_setosa > 0]=1 y_test_setosa = copy.copy(y_test) y_test_setosa[y_test_setosa > 0]=1 print ('New training target classes:\n{0}'.format(y_train_setosa))Our first classifier will be a linear one. Linear classification models have been very well studied through many years, and the are a lot of different methods with actually very different approaches for building the separating hyperplane. We will use the `SGDClassifier` from scikit-learn to implement a linear model, including regularization. The classifier (actually, a family of classifiers, as we will see) receives its name from using Stochastic Gradient Descent, a very effective numerical procedure to find the local minimum of a function. Gradient Descent was introduced by in 1847, to solve a system of linear equations. The idea is based on the observation that a multivariable function decreases fastest in the direction of its negative gradient (you can think of the gradient as a generalization of the derivative for several dimensions). If we want to find its minimum (at least a local one) we could move in the direction of its negative gradient. This is exactly what gradient descent does. Every classifier in scikit-learn is created the same way: calling a method with the classifier's configurable hyperparameters to create an instance of the classifier. In this case, we will use `linear_model.SGDClassifier`, telling scikit-learn to use a _log_ loss function.from sklearn import linear_model clf = linear_model.SGDClassifier(loss='log', random_state=42) print (clf)Note that the classifier includes several parameteres. Usually, scikit-learn specifies default values for every parameter. But be aware that it is not a good idea to keep it with their default values. Later (or in future notebooks, I do not know yet), we will talk about _model selection_, the process of selecting the best parameters.Now, we just call the `fit` method to train the classifier (i.e., build a model we will later use), based on the available training data. In our case, the trainig setosa set.clf.fit(X_train, y_train_setosa)How does our model look? Well, since we are building a linear classifier, our model is a... line. We can show its coefficients:print (clf.coef_,clf.intercept_)... and we can draw the decision boundary using pyplot:x_min, x_max = X_train[:, 0].min() - .5, X_train[:, 0].max() + .5 y_min, y_max = X_train[:, 1].min() - .5, X_train[:, 1].max() + .5 xs = np.arange(x_min, x_max, 0.5) fig,axes = plt.subplots() axes.set_aspect('equal') axes.set_title('Setosa classification') axes.set_xlabel('Sepal length') axes.set_ylabel('Sepal width') axes.set_xlim(x_min, x_max) axes.set_ylim(y_min, y_max) plt.sca(axes) plt.scatter(X_train[:, 0][y_train == 0], X_train[:, 1][y_train == 0], c='red', marker='s') plt.scatter(X_train[:, 0][y_train == 1], X_train[:, 1][y_train == 1], c='black', marker='x') ys = (-clf.intercept_[0]- xs * clf.coef_[0, 0]) / clf.coef_[0, 1] plt.plot(xs, ys, hold=True) plt.show()The blue line is our decision boundary. Every time $30.97 \times sepal\_length - 17.82 \times sepal\_width - 17.34$ is greater than zero we will have an iris setosa (class 0). 7-9 Prediction Now, the really useful part: when we have a new flower, we just have to get its petal width and length and call the `predict` method of the classifier on the new instance. _This works the same way no matter the classifier we are using or the method we used to build it_print ('If the flower has 4.7 petal width and 3.1 petal length is a {}'.format( iris.target_names[clf.predict(scaler.transform([[4.7, 3.1]]))]))Note that we first scaled the new instance, then applyied the `predict` method, and used the result to lookup into the iris target names arrays. 7-10 Back to the original three-class problem Now, do the training using the three original classes. Using scikit-learn this is simple: we do exactly the same procedure, using the original three target classes:clf2 = linear_model.SGDClassifier(loss='log', random_state=33) clf2.fit(X_train, y_train) print (len(clf2.coef_))We have know _three_ decision curves... scikit-learn has simply converted the problem into three one-versus-all binary classifiers. Note that Class 0 is linearly separable, while Class 1 and Class 2 are notx_min, x_max = X_train[:, 0].min() - .5, X_train[:, 0].max() + .5 y_min, y_max = X_train[:, 1].min() - .5, X_train[:, 1].max() + .5 xs = np.arange(x_min,x_max,0.5) fig, axes = plt.subplots(1,3) fig.set_size_inches(10,6) for i in [0,1,2]: axes[i].set_aspect('equal') axes[i].set_title('Class '+ iris.target_names[i] + ' versus the rest') axes[i].set_xlabel('Sepal length') axes[i].set_ylabel('Sepal width') axes[i].set_xlim(x_min, x_max) axes[i].set_ylim(y_min, y_max) plt.sca(axes[i]) ys=(-clf2.intercept_[i]-xs*clf2.coef_[i,0])/clf2.coef_[i,1] plt.plot(xs,ys,hold=True) for j in [0,1,2]: px = X_train[:, 0][y_train == j] py = X_train[:, 1][y_train == j] color = colormarkers[j][0] if j==i else 'black' marker = 'o' if j==i else 'x' plt.scatter(px, py, c=color, marker=marker) plt.show()Let us evaluate on the previous instance to find the three-class prediction. Scikit-learn tries the three classifiers.scaler.transform([[4.7, 3.1]]) print(clf2.decision_function(scaler.transform([[4.7, 3.1]]))) clf2.predict(scaler.transform([[4.7, 3.1]]))The `decision_function` method tell us the classifier scores (in our case, the left side of the decision boundary inequality). In our example, the first classifier says the flower is a setosa (we have a score greater than zero), and it is not a versicolor nor a virginica. Easy. What if we had two positive values? In our case, the greatest score will be the point which is further away from the decision line. 7-11 Evaluating the classifier The performance of an estimator is a measure of its effectiveness. The most obvious performance measure is called _accuracy_: given a classifier and a set of instances, it simply measures the proportion of instances correctly classified by the classifier. We can, for example, use the instances in the training set and calculate the accuracy of our classifier when predicting their target classes. Scikit-learn includes a `metrics` module that implements this (and many others) performance metric.from sklearn import metrics y_train_pred = clf2.predict(X_train) print ('Accuracy on the training set:{:.2f}'.format(metrics.accuracy_score(y_train, y_train_pred)))This means that our classifier correctly predicts 83\% of the instances in the training set. But this is actually a bad idea. The problem with the evaluating on the training set is that you have built your model using this data, and it is possible that your model adjusts actually very well to them, but performs poorly in previously unseen data (which is its ultimate purpose). This phenomenon is called overfitting, and you will see it once and again while you read this book. If you measure on your training data, you will never detect overfitting. So, _never ever_ measure on your training data. Remember we separated a portion of the training set? Now it is time to use it: since it was not used for training, we expect it to give us and idead of how well our classifier performs on previously unseen data.y_pred = clf2.predict(X_test) print ('Accuracy on the training set:{:.2f}'.format(metrics.accuracy_score(y_test, y_pred)))Generally, accuracy on the testing set is lower than the accuracy on the training set, since the model is actually modeling the training set, not the testing set.One of the problems with accuracy is that does not reflect well how our model performs on each different target class. For example, we know that our classifier works very well identifying setosa species, but will probably fail when separating the other two species. If we could measure this, we could get hints for improving performance, changing the method or the features. A very useful tool when facing multi-class problems is the confusion matrix. This matrix includes, in row i and column _j_ the number of instances of class _i_ that were predicted to be in class _j_. A good classifier will accumulate the values on the confusion matrix diagonal, where correctly classified instances belong. Having the original and predicted classes, we can easily print the confusion matrix:print (metrics.confusion_matrix(y_test, y_pred))To read the classification matrix, just remember the definition: the “8” on row 2, column 3, means that eight instances if class 1 where predicted to be in class 2. Our classifier is never wrong in our evaluation set when it classifies class zero (setosa) flowers. However, when it faces classes one and two (versicolor and virginica), it confuses them. The confusion matrix gives us useful information to know what kind of errors the classifier is making. Accuracy on the test set is a good performance measure when the number of instances of each class is similar, i.e., we have a uniform distribution of classes. However, consider that 99 percent of your instances belong to just one class (you have a skewed): a classifier that always predicts this majority class will have an excellent performance in terms of accuracy, despite the fact that it is an extremely naive method (and that it will surely fail in the “difficult” 1% cases).Within scikit-learn, there are several evaluation functions; we will show three popular ones: precision, recall, and F1-score (or f-measure).print (metrics.classification_report(y_test, y_pred, target_names=iris.target_names))- Precision computes the proportion of instances predicted as positives that were correctly evaluated (it measures how right is our classifier when it says that an instance is positive).- Recall counts the proportion of positive instances that were correctly evaluated (measuring how right our classifier is when faced with a positive instance).- F1-score is the harmonic mean of precision and recall, and tries to combine both in a single number. 7-12 Using the four flower attributes To end with this classification section, we will repeat the whole process, this time using the four original attributes, and check if performance improves.# Test set will be the 25% taken randomly X_train4, X_test4, y_train4, y_test4 = train_test_split(X_iris, y_iris, test_size=0.25, random_state=33) # Standarize the features scaler = preprocessing.StandardScaler().fit(X_train4) X_train4 = scaler.transform(X_train4) X_test4 = scaler.transform(X_test4) # Build the classifier clf3 = linear_model.SGDClassifier(loss='log', random_state=33) clf3.fit(X_train4, y_train4) # Evaluate the classifier on the evaluation set y_pred4 = clf3.predict(X_test4) print (metrics.classification_report(y_test4, y_pred4, target_names=iris.target_names))7-13 Unsupervised Learning: Clustering Sometimes, is possible to take an unlabeled training set and try to find a hidden structure or patterns in the data: there is no given target class to predict or to evaluate the resulting model. We call these class of machine learning tasks _unsupervised learning_. For instance, _clustering_ methods try to group instances into subsets (called clusters): an instance should be similar to another in the same subset and different from those belonging to another subset.In this section, we will perform clustering of the Iris data set, to see if we could group instances using their petal and sepal width and length. The trainig set is the same we used we used for our last example on supervised classification. K-means is probably the most popular clustering algorithm, because it is very simple and easy to implement, and it has shown good performance on different tasks. It belongs to the class of partition algorithms that simultaneously partition data points into distinct groups, called clusters. We will apply k-means to the training data, using only sepal dimensions, building 3 clusters (note that we could have selected a different number of cluster to group data into)from sklearn import cluster clf_sepal = cluster.KMeans(init='k-means++', n_clusters=3, random_state=33) clf_sepal.fit(X_train4[:,0:2])We can show the label assigned for each instance (note that this label is a cluster name, it has nothing to do with our original target classes... actually, when you are doing clustering you have no target class!).print (clf_sepal.labels_)Using NumPy's indexing capabilities, we can display the actual target classes for each cluster, just to compare the built clusters with our flower type classes...print (y_train4[clf_sepal.labels_==0]) print (y_train4[clf_sepal.labels_==1]) print (y_train4[clf_sepal.labels_==2])As usually, is a good idea to display our instances and the clusters they belong to, to have a first approximation to how well our algorithm is behaving on our data:colormarkers = [ ['red','s'], ['greenyellow','o'], ['blue','x']] step = .01 margin = .1 sl_min, sl_max = X_train4[:, 0].min()-margin, X_train4[:, 0].max() + margin sw_min, sw_max = X_train4[:, 1].min()-margin, X_train4[:, 1].max() + margin sl, sw = np.meshgrid( np.arange(sl_min, sl_max, step), np.arange(sw_min, sw_max, step) ) Zs = clf_sepal.predict(np.c_[sl.ravel(), sw.ravel()]).reshape(sl.shape) centroids_s = clf_sepal.cluster_centers_Display the data points and the calculated regionsplt.figure(1) plt.clf() plt.imshow(Zs, interpolation='nearest', extent=(sl.min(), sl.max(), sw.min(), sw.max()), cmap= plt.cm.Pastel1, aspect='auto', origin='lower') for j in [0,1,2]: px = X_train4[:, 0][y_train == j] py = X_train4[:, 1][y_train == j] plt.scatter(px, py, c=colormarkers[j][0], marker= colormarkers[j][1]) plt.scatter(centroids_s[:, 0], centroids_s[:, 1],marker='*',linewidths=3, color='black', zorder=10) plt.title('K-means clustering on the Iris dataset using Sepal dimensions\nCentroids are marked with stars') plt.xlim(sl_min, sl_max) plt.ylim(sw_min, sw_max) plt.xlabel("Sepal length") plt.ylabel("Sepal width") plt.show()Repeat the experiment, using petal dimensionsclf_petal = cluster.KMeans(init='k-means++', n_clusters=3, random_state=33) clf_petal.fit(X_train4[:,2:4]) print (y_train4[clf_petal.labels_==0]) print (y_train4[clf_petal.labels_==1]) print (y_train4[clf_petal.labels_==2])Plot the clusterscolormarkers = [ ['red','s'], ['greenyellow','o'], ['blue','x']] step = .01 margin = .1 sl_min, sl_max = X_train4[:, 2].min()-margin, X_train4[:, 2].max() + margin sw_min, sw_max = X_train4[:, 3].min()-margin, X_train4[:, 3].max() + margin sl, sw = np.meshgrid( np.arange(sl_min, sl_max, step), np.arange(sw_min, sw_max, step), ) Zs = clf_petal.predict(np.c_[sl.ravel(), sw.ravel()]).reshape(sl.shape) centroids_s = clf_petal.cluster_centers_ plt.figure(1) plt.clf() plt.imshow(Zs, interpolation='nearest', extent=(sl.min(), sl.max(), sw.min(), sw.max()), cmap= plt.cm.Pastel1, aspect='auto', origin='lower') for j in [0,1,2]: px = X_train4[:, 2][y_train4 == j] py = X_train4[:, 3][y_train4 == j] plt.scatter(px, py, c=colormarkers[j][0], marker= colormarkers[j][1]) plt.scatter(centroids_s[:, 0], centroids_s[:, 1],marker='*',linewidths=3, color='black', zorder=10) plt.title('K-means clustering on the Iris dataset using Petal dimensions\nCentroids are marked with stars') plt.xlim(sl_min, sl_max) plt.ylim(sw_min, sw_max) plt.xlabel("Petal length") plt.ylabel("Petal width") plt.show()Now, calculate the clusters, using the four attributesclf = cluster.KMeans(init='k-means++', n_clusters=3, random_state=33) clf.fit(X_train4) print (y_train[clf.labels_==0]) print (y_train[clf.labels_==1]) print (y_train[clf.labels_==2])Measure precision & recall in the testing set, using all attributes, and using only petal measuresy_pred=clf.predict(X_test4) print (metrics.classification_report(y_test, y_pred, target_names=['setosa','versicolor','virginica'])) y_pred_petal=clf_petal.predict(X_test4[:,2:4]) print (metrics.classification_report(y_test, y_pred_petal, target_names=['setosa','versicolor','virginica']))Wait, every performance measure is better using just two attributes. It is possible that less features give better results? Although at a first glance this seems contradictory, we will see in future notebooks that selecting the right subset of features, a process called feature selection, could actually improve the performance of our algorithms. 7-14 Supervised Learning: Regression In every example we have seen so far the output we aimed at predicting belonged to a discrete set. For classification, the set was the target class, while for the clustering algorithm the set included different calculated clusters. What if we want to predict a value extracted from the real line?. In this case, we are trying to solve a regression problem. To show how regression works in scikit-learn, we will apply to a (very) simple and well-know problem: trying to predict the price of a house given some of its. As the dataset, we will use the Boston house-prices dataset (find the dataset description and attributes [here](https://github.com/mjbahmani).from sklearn.datasets import load_boston boston = load_boston() print ('Boston dataset shape:{}'.format(boston.data.shape)) print (boston.feature_names)Create training and testing sets, and scale values, as usual Create a method for training and evaluating a model. This time, to evaluate our model we will a different approach: instead of separating the training set, we will use _cross-validation_. Cross-validation usually involves the following steps:1. Partition the dataset into k different subsets.2. Create k different models by training on k-1 subsets and testing on the remaining one.3. Measure the performance of each of the k models and use the average value as you performance value. 8- PlotlyHow to use **Plotly** offline inside IPython notebooks. 8-1 New to Plotly?Plotly, also known by its URL, Plot.ly, is a technical computing company headquartered in Montreal, Quebec, that develops online data analytics and visualization tools. Plotly provides online graphing, analytics, and statistics tools for individuals and collaboration, as well as scientific graphing libraries for Python, R, MATLAB, Perl, Julia, Arduino, and REST.# example for plotly import plotly.offline as py import plotly.graph_objs as go py.init_notebook_mode(connected=True) from plotly import tools import plotly.figure_factory as ff iris = datasets.load_iris() X = iris.data[:, :2] # we only take the first two features. Y = iris.target x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 trace = go.Scatter(x=X[:, 0], y=X[:, 1], mode='markers', marker=dict(color=np.random.randn(150), size=10, colorscale='Viridis', showscale=False)) layout = go.Layout(title='Training Points', xaxis=dict(title='Sepal length', showgrid=False), yaxis=dict(title='Sepal width', showgrid=False), ) fig = go.Figure(data=[trace], layout=layout) py.iplot(fig)8-2 Plotly Offline from Command LineYou can plot your graphs from a python script from command line. On executing the script, it will open a web browser with your Plotly Graph drawn.import plotly.graph_objs as go plot([go.Scatter(x=[1, 2, 3], y=[3, 1, 6])])8-3 Generating Offline Graphs within Jupyter NotebookYou can also plot your graphs offline inside a Jupyter Notebook Environment. First you need to initiate the Plotly Notebook mode as below:init_notebook_mode(connected=True)Run at the start of every ipython notebook to use plotly.offline. This injects the plotly.js source files into the notebook.iplot([{"x": [1, 2, 3], "y": [3, 1, 6]}]) import plotly.graph_objs as go import numpy as np x = np.random.randn(2000) y = np.random.randn(2000) iplot([go.Histogram2dContour(x=x, y=y, contours=dict(coloring='heatmap')), go.Scatter(x=x, y=y, mode='markers', marker=dict(color='white', size=3, opacity=0.3))], show_link=False)8-4 Plotting Offline with Cufflinksimport cufflinks as cf iplot(cf.datagen.lines().iplot(asFigure=True, kind='scatter',xTitle='Dates',yTitle='Returns',title='Returns'))---title: "Violin Plot"author: "Charles"date: 2020-08-12description: "-"type: technical_notedraft: false---import matplotlib.pyplot as plt import numpy as np import numpy as np np.random.seed(10) collectn_1 = np.random.normal(100, 10, 200) collectn_2 = np.random.normal(80, 30, 200) collectn_3 = np.random.normal(90, 20, 200) collectn_4 = np.random.normal(70, 25, 200) data_to_plot = [collectn_1, collectn_2, collectn_3, collectn_4] fig, ax = plt.subplots(1, 1) bp = ax.violinplot(data_to_plot) plt.show()Implementing Advantage-Actor Critic (A2C) In this notebook you will implement Advantage Actor Critic algorithm that trains on a batch of Atari 2600 environments running in parallel. Firstly, we will use environment wrappers implemented in file `atari_wrappers.py`. These wrappers preprocess observations (resize, grayscal, take max between frames, skip frames and stack them together) and rewards. Some of the wrappers help to reset the environment and pass `done` flag equal to `True` when agent dies.File `env_batch.py` includes implementation of `ParallelEnvBatch` class that allows to run multiple environments in parallel. To create an environment we can use `nature_dqn_env` function. Note that if you are using PyTorch and not using `tensorboardX` you will need to implement a wrapper that will log **raw** total rewards that the *unwrapped* environment returns and redefine the implemention of `nature_dqn_env` function here.import numpy as np from atari_wrappers import nature_dqn_env, NumpySummaries import matplotlib.pyplot as plt env = nature_dqn_env("SpaceInvadersNoFrameskip-v4", nenvs=8, summaries='Numpy') obs = env.reset() assert obs.shape == (8, 84, 84, 4) assert obs.dtype == np.uint8Next, we will need to implement a model that predicts logits and values. It is suggested that you use the same model as in [Nature DQN paper](https://web.stanford.edu/class/psych209/Readings/MnihEtAlHassibis15NatureControlDeepRL.pdf) with a modification that instead of having a single output layer, it will have two output layers taking as input the output of the last hidden layer. **Note** that this model is different from the model you used in homework where you implemented DQN. You can use your favorite deep learning framework here. We suggest that you use orthogonal initialization with parameter $\sqrt{2}$ for kernels and initialize biases with zeros.# import tensorflow as torch # import torch as tf import torch import torch.nn as nn import torch.nn.functional as F # def ortho_weights(shape, scale=1.): """ PyTorch port of ortho_init from baselines.a2c.utils """ shape = tuple(shape) if len(shape) == 2: flat_shape = shape[1], shape[0] elif len(shape) == 4: flat_shape = (np.prod(shape[1:]), shape[0]) else: raise NotImplementedError a = np.random.normal(0., 1., flat_shape) u, _, v = np.linalg.svd(a, full_matrices=False) q = u if u.shape == flat_shape else v q = q.transpose().copy().reshape(shape) if len(shape) == 2: return torch.from_numpy((scale * q).astype(np.float32)) if len(shape) == 4: return torch.from_numpy((scale * q[:, :shape[1], :shape[2]]).astype(np.float32)) def atari_initializer(module): """ Parameter initializer for Atari models Initializes Linear, Conv2d, and LSTM weights. """ classname = module.__class__.__name__ if classname == 'Linear': module.weight.data = ortho_weights(module.weight.data.size(), scale=np.sqrt(2.)) module.bias.data.zero_() elif classname == 'Conv2d': module.weight.data = ortho_weights(module.weight.data.size(), scale=np.sqrt(2.)) module.bias.data.zero_() elif classname == 'LSTM': for name, param in module.named_parameters(): if 'weight_ih' in name: param.data = ortho_weights(param.data.size(), scale=1.) if 'weight_hh' in name: param.data = ortho_weights(param.data.size(), scale=1.) if 'bias' in name: param.data.zero_() class Network(nn.Module): def __init__(self, shape_in, action_shape): super(Network, self).__init__() self.conv1 = nn.Conv2d(shape_in, 32, kernel_size=8, stride=4) self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2) self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1) self.dense1 = nn.Linear(64*7*7, 512) self.dense2 = nn.Linear(512, action_shape) self.dense3 = nn.Linear(512, 1) self.apply(atari_initializer) self.dense2.weight.data = ortho_weights(self.dense2.weight.size(), scale=.01) self.dense3.weight.data = ortho_weights(self.dense3.weight.size()) def forward(self, inputs): x = F.relu(self.conv1(inputs)) x = F.relu(self.conv2(x)) x = F.relu(self.conv3(x)) x = x.view(x.shape[0], -1) x = F.relu(self.dense1(x)) logits = self.dense2(x) values = self.dense3(x) return logits, valuesYou will also need to define and use a policy that wraps the model. While the model computes logits for all actions, the policy will sample actions and also compute their log probabilities. `policy.act` should return a dictionary of all the arrays that are needed to interact with an environment and train the model. Note that actions must be an `np.ndarray` while the othertensors need to have the type determined by your deep learning framework.device = torch.device('cuda') class Policy: def __init__(self, model, device): self.model = model self.device = device def act(self, inputs): # # Should return a dict containing keys ['actions', 'logits', 'log_probs', 'values']. inputs = torch.tensor(inputs, dtype=torch.float32, device=device) inputs = torch.transpose(inputs/255., 3, 1) logits, values = self.model(inputs) dist = torch.distributions.Categorical(logits=logits) actions = dist.sample().view(-1, 1)[:, 0] actions = actions.cpu().detach().numpy() probs = F.softmax(logits, -1) logprobs = F.log_softmax(logits, -1) logp_actions = torch.sum(logprobs * F.one_hot(torch.tensor(actions, device=device), n_actions), axis=-1) return {'actions': actions, 'logits': logits, 'values': values, 'probs': probs, 'log_probs': logprobs, 'log_probs_actions':logp_actions}Next will pass the environment and policy to a runner that collects partial trajectories from the environment. The class that does is is already implemented for you.#from runners import EnvRunner from collections import defaultdict import numpy as np class EnvRunner: """ Reinforcement learning runner in an environment with given policy """ def __init__(self, env, policy, nsteps, transforms=None, step_var=None): self.env = env self.policy = policy self.nsteps = nsteps self.transforms = transforms or [] self.step_var = step_var if step_var is not None else 0 self.state = {"latest_observation": self.env.reset()} @property def nenvs(self): """ Returns number of batched envs or `None` if env is not batched """ return getattr(self.env.unwrapped, "nenvs", None) def reset(self): """ Resets env and runner states. """ self.state["latest_observation"] = self.env.reset() def get_next(self): """ Runs the agent in the environment. """ trajectory = defaultdict(list, {"actions": []}) observations = [] rewards = [] resets = [] self.state["env_steps"] = self.nsteps for i in range(self.nsteps): observations.append(self.state["latest_observation"]) act = self.policy.act(self.state["latest_observation"]) if "actions" not in act: raise ValueError("result of policy.act must contain 'actions' " f"but has keys {list(act.keys())}") for key, val in act.items(): trajectory[key].append(val) obs, rew, done, _ = self.env.step(trajectory["actions"][-1]) self.state["latest_observation"] = obs rewards.append(rew) resets.append(done) self.step_var += self.nenvs or 1 # Only reset if the env is not batched. Batched envs should auto-reset. if not self.nenvs and np.all(done): self.state["env_steps"] = i + 1 self.state["latest_observation"] = self.env.reset() trajectory.update(observations=observations, rewards=rewards, resets=resets) trajectory["state"] = self.state for transform in self.transforms: transform(trajectory) return trajectoryThis runner interacts with the environment for a given number of steps and returns a dictionary containingkeys * 'observations' * 'rewards' * 'resets'* 'actions'* all other keys that you defined in `Policy`under each of these keys there is a python `list` of interactions with the environment of specified length $T$ — the size of partial trajectory. To train the part of the model that predicts state values you will need to compute the value targets. Any callable could be passed to `EnvRunner` to be applied to each partial trajectory after it is collected. Thus, we can implement and use `ComputeValueTargets` callable. The formula for the value targets is simple:$$\hat v(s_t) = \left( \sum_{t'=0}^{T - 1 - t} \gamma^{t'}r_{t+t'} \right) + \gamma^T \hat{v}(s_{t+T}),$$In implementation, however, do not forget to use `trajectory['resets']` flags to check if you need to add the value targets at the next step when computing value targets for the current step. You can access `trajectory['state']['latest_observation']`to get last observations in partial trajectory — $s_{t+T}$.class ComputeValueTargets: def __init__(self, policy, gamma=0.99): self.policy = policy self.gamma = gamma def __call__(self, trajectory): # This method should modify trajectory inplace by adding # an item with key 'value_targets' to it. # value_target = policy.act(trajectory['state']['latest_observation'])['values'][:, 0] env_steps = trajectory['state']['env_steps'] rewards = torch.tensor(trajectory['rewards'], dtype=torch.float32, device=device) dones = torch.tensor(trajectory['resets'], dtype=torch.float, device=device) is_not_done = 1 - dones trajectory['value_targets'] = [0] * env_steps for i in range(env_steps): j = env_steps - i - 1 value_target = rewards[j] + value_target * self.gamma * is_not_done[j] trajectory['value_targets'][j] = value_targetAfter computing value targets we will transform lists of interactions into tensorswith the first dimension `batch_size` which is equal to `T * nenvs`, i.e. you essentially needto flatten the first two dimensions.class MergeTimeBatch: """ Merges first two axes typically representing time and env batch. """ def __call__(self, trajectory): # Modify trajectory inplace. # trajectory['actions'] = np.array(trajectory['actions']).flatten() trajectory['logits'] = torch.stack(trajectory['logits']).flatten(0, 1) trajectory['probs'] = torch.stack(trajectory['probs']).flatten(0, 1) trajectory['log_probs'] = torch.stack(trajectory['log_probs']).flatten(0, 1) trajectory['log_probs_actions'] = torch.cat(trajectory['log_probs_actions']) trajectory['values'] = torch.cat(trajectory['values'])[:, 0] trajectory['value_targets'] = torch.cat(trajectory['value_targets']) trajectory['observations'] = np.concatenate(trajectory['observations']) trajectory['rewards'] = np.concatenate(trajectory['rewards']) trajectory['resets'] = np.array(trajectory['resets']).flatten() n_actions = env.action_space.n model = Network(shape_in=4, action_shape=env.action_space.n).to(device) # policy = Policy(model, device=device) runner = EnvRunner( env, policy, nsteps=5, transforms=[ ComputeValueTargets(policy), MergeTimeBatch(), ]) # runner.get_next()['actions'].shape # runner.get_next()['logits'].shape print(runner.get_next()['log_probs'].shape) # runner.get_next()['values'].shape print(runner.get_next()['value_targets'].shape)torch.Size([40, 6]) torch.Size([40])Now is the time to implement the advantage actor critic algorithm itself. You can look into your lecture,[Mnih et al. 2016](https://arxiv.org/abs/1602.01783) paper, and [lecture](https://www.youtube.com/watch?v=Tol_jw5hWnI&list=PLkFD6_40KJIxJMR-j5A1mkxK26gh_qg37&index=20) by .class A2C: def __init__(self, policy, optimizer, value_loss_coef=0.25, entropy_coef=0.01, max_grad_norm=0.5): self.policy = policy self.optimizer = optimizer self.value_loss_coef = value_loss_coef self.entropy_coef = entropy_coef self.max_grad_norm = max_grad_norm def policy_loss(self, trajectory): # You will need to compute advantages here. # A = (trajectory['value_targets'] - trajectory['values']).detach() self.advantages_np = A.mean().cpu().numpy() self.policy_loss_np = -torch.mean(A*trajectory['log_probs_actions']).detach().mean().cpu().numpy() return -torch.mean(A*trajectory['log_probs_actions']) def value_loss(self, trajectory): # self.values_np = trajectory['values'].detach().mean().cpu().numpy() self.value_targets_np = trajectory['value_targets'].detach().mean().cpu().numpy() value_loss = torch.mean((trajectory['values'] - trajectory['value_targets'].detach())**2) self.value_loss_np = value_loss.detach().mean().cpu().numpy() return value_loss def loss(self, trajectory): # entropy = - torch.mean(torch.sum(trajectory['probs']*trajectory['log_probs'], 1)) self.entropy_np = entropy.detach().cpu().numpy() loss = self.policy_loss(trajectory) +\ self.value_loss_coef*self.value_loss(trajectory) - self.entropy_coef*entropy self.a2c_loss_np = loss.detach().cpu().numpy() return loss def step(self, trajectory): # self.optimizer.zero_grad() self.loss(trajectory).backward() torch.nn.utils.clip_grad_norm_(policy.model.parameters(), self.max_grad_norm) self.optimizer.step() self.total_norm = 0 for p in policy.model.parameters(): param_norm = p.grad.data.norm(2) self.total_norm += param_norm.item() ** 2 self.total_norm = self.total_norm ** (1. / 2)Now you can train your model. With reasonable hyperparameters training on a single GTX1080 for 10 million steps across all batched environments (which translates to about 5 hours of wall clock time)it should be possible to achieve *average raw reward over last 100 episodes* (the average is taken over 100 last episodes in each environment in the batch) of about 600. You should plot this quantity with respect to `runner.step_var` — the number of interactions with all environments. It is highly encouraged to also provide plots of the following quantities (these are useful for debugging as well):* [Coefficient of Determination](https://en.wikipedia.org/wiki/Coefficient_of_determination) between value targets and value predictions* Entropy of the policy $\pi$* Value loss* Policy loss* Value targets* Value predictions* Gradient norm* Advantages* A2C lossFor optimization we suggest you use RMSProp with learning rate starting from 7e-4 and linearly decayed to 0, smoothing constant (alpha in PyTorch and decay in TensorFlow) equal to 0.99 and epsilon equal to 1e-5.def evaluate(env, agent, n_games=1): """Plays an a game from start till done, returns per-game rewards """ agent.train(False) game_rewards = [] done_counter = 0 for _ in range(n_games): state = env.reset() total_reward = 0 while True: state = torch.tensor(state, dtype=torch.float32, device=device) / 255. state = torch.transpose(state, 3, 1) logits, _ = agent(state) logits = logits[0] dist = torch.distributions.Categorical(logits=logits) action = dist.sample().view(-1, 1) action = action.cpu().detach().numpy() state, reward, done, info = env.step([action]*8) total_reward += reward[0] if done[0]: done_counter += 1 if done_counter>3: break game_rewards.append(total_reward) agent.train(True) return game_rewards # evaluate(model, n_games=3) optimizer = torch.optim.RMSprop(model.parameters(), lr=7e-4, alpha=0.99, eps=1e-5) from IPython.display import clear_output from tqdm import trange def plot_tools(legend, position, data_y): plt.subplot(2,4,position) plt.plot(data_y, label=legend) plt.title(legend); plt.grid(); plt.legend() plt.ticklabel_format(axis="x", style="sci", scilimits=(0,0)) a2c = A2C(policy, optimizer) # # num_steps = [] rewards = [] entropies = [] value_losses = [] policy_losses = [] values =[] value_targets = [] grad_norms = [] advantages = [] a2c_losses = [] DECAY = 10_000_000 mean_rewards1 = [] rewards1 = np.zeros(8, dtype=float) dones = np.zeros(8, dtype=float) runner.reset() for i in trange(0,int(10**7), 40): trajectory = runner.get_next() a2c.step(trajectory) for batch_rewards, batch_dones in zip(trajectory['rewards'], trajectory['resets']): rewards1 += batch_rewards dones += batch_dones if np.sum(dones)>=100: clear_output(True) entropies.append(a2c.entropy_np) value_losses.append(a2c.values_np) policy_losses.append(a2c.policy_loss_np) values.append(a2c.values_np) value_targets.append(a2c.value_targets_np) grad_norms.append(a2c.total_norm) advantages.append(a2c.advantages_np) a2c_losses.append(a2c.a2c_loss_np) # rewards.append(np.mean(evaluate(env, model, n_games=10))) num_steps.append(runner.step_var) plt.figure(figsize=[20,10]) plt.subplot(2,4,1) mean_rewards1.append(np.sum(rewards1) / np.sum(dones)) plt.plot(mean_rewards1, label='mean rewards') plt.title("Rewards"); plt.grid(); plt.legend() plt.ticklabel_format(axis="x", style="sci", scilimits=(0,0)) plot_tools('Entropy', 2, entropies) plt.subplot(2,4,3) plt.plot(values, label='Values') plt.plot(value_targets, label='Value_targets') plt.title("Coefficient of Determination"); plt.grid(); plt.legend() plt.ticklabel_format(axis="x", style="sci", scilimits=(0,0)) plot_tools('Value loss', 4, value_losses) plot_tools('Policy loss', 5, policy_losses) plot_tools('A2C loss', 6, a2c_losses) plot_tools('Grad_norm_L2', 7, grad_norms) plot_tools('Advantages', 8, advantages) plt.show() rewards1 = np.zeros(8, dtype=float) dones = np.zeros(8, dtype=float)Original Paperhttps://doi.org/10.1016/j.bbrc.2018.04.095import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt %matplotlib inline metadata = pd.read_csv('../data/wu_chen_2018/metadata_cleaned_chen_2018.txt', sep='\t', index_col=0) metadataPCoApcoa = pd.read_csv('../data/wu_chen_2018/SI_1085_wUF_pcoa.txt', sep='\t', usecols=range(4), index_col=0) pcoa.head() named_pcoa = pd.merge(pcoa, metadata[metadata['feeding_pattern'] == 'ad libitum'], left_index=True, right_index=True) named_pcoa.head() named_pcoa.niche.unique() niche_pcoa = named_pcoa[named_pcoa['niche'] != 'epithelium'] lumen_pcoa = named_pcoa[named_pcoa['niche'] == 'lumen'] mucosal_pcoa = named_pcoa[named_pcoa['niche'] == 'mucosal'] ax = sns.stripplot(x="time", y="Axis1_25.2", hue='organ', dodge=True, edgecolor='k', linewidth=1, size=9, data=lumen_pcoa, #data=lumen_pcoa[lumen_pcoa["group"] == 'LD'], order=['ZT2', 'ZT6', 'ZT10', 'ZT14', 'ZT18', 'ZT22']) plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.ylabel('Axis1 25.2%') plt.title('Colon wUF PCoA Axis1') plt.rcParams['font.family'] = 'sans-serif' plt.rcParams['font.sans-serif'] = 'Arial' plt.rcParams['svg.fonttype'] = 'none' #plt.savefig('../figures/chen_colon_wUF_pcoa_Axis1.svg', dpi=300, bbox_inches="tight") ax = sns.stripplot(x="time", y="Axis1_25.2", hue='niche', palette=['lightblue', 'darkgreen'], edgecolor='k', linewidth=1, size=9, data=niche_pcoa[niche_pcoa['organ'] == 'colon'], order=['ZT2', 'ZT6', 'ZT10', 'ZT14', 'ZT18', 'ZT22']) plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.ylabel('Axis1 25.2%') plt.title('Colon wUF PCoA Axis1') plt.rcParams['font.family'] = 'sans-serif' plt.rcParams['font.sans-serif'] = 'Arial' plt.rcParams['svg.fonttype'] = 'none' plt.savefig('../figures/chen_colon_wUF_pcoa_Axis1.svg', dpi=300, bbox_inches="tight") niche_pcoa[niche_pcoa['organ'] == 'colon'] niche_pcoa[niche_pcoa['organ'] == 'colon'].time.value_counts() ax = sns.stripplot(x="time", y="Axis1_25.2", hue='group', palette=['darkgreen', 'lightblue'], edgecolor='k', linewidth=1, size=9, data=mucosal_pcoa, order=['ZT2', 'ZT6', 'ZT10', 'ZT14', 'ZT18', 'ZT22']) plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.ylabel('Axis1 25.2%') plt.title('Mucosal wUF PCoA Axis1') plt.rcParams['font.family'] = 'sans-serif' plt.rcParams['font.sans-serif'] = 'Arial' plt.rcParams['svg.fonttype'] = 'none' plt.savefig('../figures/chen_mucosal_wUF_pcoa_Axis1.svg', dpi=300, bbox_inches="tight") ax = sns.stripplot(x="time", y="Axis1_25.2", hue='group', palette=['lightblue', 'darkgreen'], edgecolor='k', linewidth=1, size=9, data=lumen_pcoa, order=['ZT2', 'ZT6', 'ZT10', 'ZT14', 'ZT18', 'ZT22']) plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.ylabel('Axis1 25.2%') plt.title('Lumen wUF PCoA Axis1') plt.rcParams['font.family'] = 'sans-serif' plt.rcParams['font.sans-serif'] = 'Arial' plt.rcParams['svg.fonttype'] = 'none' plt.savefig('../figures/chen_lumen_wUF_pcoa_Axis1.svg', dpi=300, bbox_inches="tight") ax = sns.scatterplot(x="Axis2_16_2", y="Axis1_25.2", hue='group', palette=['lightblue', 'darkgreen'], edgecolor='k', linewidth=1, data=niche_pcoa) plt.title('Chen SI wUF PCoA Axis1&2') plt.ylabel('Axis1 25.2%') plt.xlabel('Axis2 16.2%') plt.rcParams['font.family'] = 'sans-serif' plt.rcParams['font.sans-serif'] = 'Arial' plt.rcParams['svg.fonttype'] = 'none' plt.savefig('../figures/chen_SI_wUF_pcoa_groups.svg', dpi=300, bbox_inches="tight") ax = sns.scatterplot(x="Axis2_16_2", y="Axis1_25.2", hue='niche', palette=['lightblue', 'darkgreen'], edgecolor='k', linewidth=1, data=niche_pcoa[niche_pcoa['organ'] == 'jejunum']) plt.title('Jejunum wUF PCoA Axis1&2') plt.ylabel('Axis1 25.2%') plt.xlabel('Axis2 16.2%') plt.rcParams['font.family'] = 'sans-serif' plt.rcParams['font.sans-serif'] = 'Arial' plt.rcParams['svg.fonttype'] = 'none' plt.savefig('../figures/chen_jejunum_wUF_pcoa_groups.svg', dpi=300, bbox_inches="tight") ax = sns.scatterplot(x="Axis2_16_2", y="Axis1_25.2", hue='group', palette=['lightblue', 'darkgreen'], edgecolor='k', linewidth=1, data=lumen_pcoa) plt.title('Chen Lumen wUF PCoA Axis1&2') plt.ylabel('Axis1 25.2%') plt.xlabel('Axis2 16.2%') plt.rcParams['font.family'] = 'sans-serif' plt.rcParams['font.sans-serif'] = 'Arial' plt.rcParams['svg.fonttype'] = 'none' plt.savefig('../figures/chen_lumen_wUF_pcoa_groups.svg', dpi=300, bbox_inches="tight") ax = sns.scatterplot(x="Axis2_16_2", y="Axis1_25.2", hue='group', palette=['darkgreen', 'lightblue'], edgecolor='k', linewidth=1, data=mucosal_pcoa) plt.title('Chen Mucosal wUF PCoA Axis1&2') plt.ylabel('Axis1 25.2%') plt.xlabel('Axis2 16.2%') plt.rcParams['font.family'] = 'sans-serif' plt.rcParams['font.sans-serif'] = 'Arial' plt.rcParams['svg.fonttype'] = 'none' plt.savefig('../figures/chen_mucosal_wUF_pcoa_groups.svg', dpi=300, bbox_inches="tight")Boxplots All SI TogetherSI_wUF_dm = pd.read_csv('../data/wu_chen_2018/SI_wUF_DM_1085.tsv', sep='\t', index_col=0) SI_wUF_dm.head() named_SI_wUF_dm = pd.merge(metadata[['combined4']], SI_wUF_dm, left_index=True, right_index=True) named_SI_wUF_dm.head() named2_SI_wUF_dm = named_SI_wUF_dm.set_index('combined4') Named_SI_wUF_dm = named_SI_wUF_dm[['combined4']].copy() Named_SI_wUF_dm.head() named3_SI_wUF_dm = pd.merge(named2_SI_wUF_dm.T, Named_SI_wUF_dm, left_index=True, right_index=True) named4_SI_wUF_dm = named3_SI_wUF_dm.set_index('combined4') named4_SI_wUF_dm LD = named4_SI_wUF_dm.filter(regex='LD') LD2 = LD.T.filter(regex='LD') LD2.head() LD_lum_dm = LD2.filter(regex='lumen') LD_lum_muc_dm = LD_lum_dm.T.filter(regex='mucosal') LD_lum_muc_dm.shape LD_lum_muc_dm2 = LD_lum_muc_dm.copy() LD_lum_muc_dm3 = LD_lum_muc_dm2.stack().reset_index() LD_lum_muc_dm3.columns = ['Lumen','Mucosal','Value'] LD_lum_muc_dm3 LD_lum_muc_dm3['timepoints'] = LD_lum_muc_dm3['Lumen'].str.split('_').str[-1] + "_" + LD_lum_muc_dm3['Mucosal'].str.split('_').str[-1] LD_lum_muc_dm3.timepoints.unique() zt_tps = ['ZT2_ZT2', 'ZT6_ZT6', 'ZT10_ZT10', 'ZT14_ZT14', 'ZT18_ZT18', 'ZT22_ZT22'] LD_lum_muc_dm4 = LD_lum_muc_dm3.loc[LD_lum_muc_dm3['timepoints'].isin(zt_tps)] LD_lum_muc_dm4 LD_lum_muc_dm5 = LD_lum_muc_dm4.copy() LD_lum_muc_dm5['ZT'] = LD_lum_muc_dm5['timepoints'].str.split('_').str[-1] LD_lum_muc_dm5 LD_lum_muc_dm5.Value.mean() ax = sns.boxplot(x="ZT", y="Value", palette="BuGn", data=LD_lum_muc_dm5, order=['ZT2', 'ZT6', 'ZT10', 'ZT14', 'ZT18', 'ZT22']) ax = sns.stripplot(x="ZT", y="Value", palette="BuGn", edgecolor='k', linewidth=1, size=9, data=LD_lum_muc_dm5, order=['ZT2', 'ZT6', 'ZT10', 'ZT14', 'ZT18', 'ZT22']) plt.ylabel('All SI LD wUF BCD-Lum v Muc') ax.axhline(0.6123719478224652, color='k', ls='--') plt.rcParams['font.family'] = 'sans-serif' plt.rcParams['font.sans-serif'] = 'Arial' plt.rcParams['svg.fonttype'] = 'none' plt.savefig('../figures/chen_allSI_LD_lummuc_wUF_boxplot.svg', dpi=300, bbox_inches="tight") LD_all = named4_SI_wUF_dm.filter(regex='LD') LD_all2 = LD_all.T.filter(regex='DD') LD_all2.head() LD_all3 = LD_all2.filter(regex='lumen') LD_all_lum = LD_all3.T.filter(regex='lumen') LD_all_lum.head() LD_all_lum2 = LD_all_lum.copy() LD_all_lum3 = LD_all_lum2.stack().reset_index() LD_all_lum3.columns = ['LD','DD','Value'] LD_all_lum3.head() LD_all_lum3['timepoints'] = LD_all_lum3['LD'].str.split('_').str[-1] + "_" + LD_all_lum3['DD'].str.split('_').str[-1] LD_all_lum3.timepoints.unique() LD_all_lum3['organs'] = LD_all_lum3['LD'].str.split('_').str[1] + "_" + LD_all_lum3['DD'].str.split('_').str[1] LD_all_lum3.organs.unique() LD_all_lum4 = LD_all_lum3.loc[LD_all_lum3['timepoints'].isin(zt_tps)] LD_all_lum4 LD_all_lum5 = LD_all_lum4.copy() LD_all_lum5['ZT'] = LD_all_lum5['timepoints'].str.split('_').str[-1] LD_all_lum5 LDvDD_lum_ileum = LD_all_lum5[LD_all_lum5['organs'] == 'ileum_ileum'].copy() LDvDD_lum_jejunum = LD_all_lum5[LD_all_lum5['organs'] == 'jejunum_jejunum'].copy() LDvDD_lum_colon = LD_all_lum5[LD_all_lum5['organs'] == 'colon_colon'].copy() LDvDD_lum_ileum.shape from statannot import add_stat_annotation LDvDD_lum_ileum.Value.median() ax = sns.boxplot(x="ZT", y="Value", palette="BuGn", data=LDvDD_lum_ileum, order=['ZT2', 'ZT6', 'ZT10', 'ZT14', 'ZT18', 'ZT22']) ax = sns.stripplot(x="ZT", y="Value", palette="BuGn", edgecolor='k', linewidth=1, size=9, data=LDvDD_lum_ileum, order=['ZT2', 'ZT6', 'ZT10', 'ZT14', 'ZT18', 'ZT22']) plt.title('Ileum Luminal Contents wUF', fontsize=14) plt.ylabel('BCD-LDvDD') ax.axhline(0.5343440198723557, color='k', ls='--') plt.rcParams['font.family'] = 'sans-serif' plt.rcParams['font.sans-serif'] = 'Arial' plt.rcParams['svg.fonttype'] = 'none' plt.savefig('../figures/chen_ileum_lum_LDvDD_wUF_boxplot.svg', dpi=300, bbox_inches="tight") boxpairs = [('ZT22', 'ZT6'), ('ZT2', 'ZT22'), ('ZT22', 'ZT14'), ('ZT22', 'ZT18'), ('ZT10', 'ZT22')] ax = sns.boxplot(x="ZT", y="Value", palette="BuGn", data=LDvDD_lum_ileum, order=['ZT2', 'ZT6', 'ZT10', 'ZT14', 'ZT18', 'ZT22']) ax = sns.stripplot(x="ZT", y="Value", palette="BuGn", edgecolor='k', linewidth=1, size=9, data=LDvDD_lum_ileum, order=['ZT2', 'ZT6', 'ZT10', 'ZT14', 'ZT18', 'ZT22']) plt.title('Ileum Luminal Contents wUF', fontsize=14) plt.ylabel('BCD-LDvDD') ax.axhline(0.5343440198723557, color='k', ls='--') ax, test_results = add_stat_annotation(ax, data=LDvDD_lum_ileum, x="ZT", y="Value", order=['ZT2', 'ZT6', 'ZT10', 'ZT14', 'ZT18', 'ZT22'], box_pairs = boxpairs, test='Mann-Whitney', text_format='star', loc='outside', verbose=2) plt.rcParams['font.family'] = 'sans-serif' plt.rcParams['font.sans-serif'] = 'Arial' plt.rcParams['svg.fonttype'] = 'none' plt.savefig('../figures/chen_ileum_lum_LDvDD_wUF_boxplot2.svg', dpi=300, bbox_inches="tight") LDvDD_lum_jejunum.Value.mean() ax = sns.boxplot(x="ZT", y="Value", palette="BuGn", data=LDvDD_lum_jejunum, order=['ZT2', 'ZT6', 'ZT10', 'ZT14', 'ZT18', 'ZT22']) ax = sns.stripplot(x="ZT", y="Value", palette="BuGn", edgecolor='k', linewidth=1, size=9, data=LDvDD_lum_jejunum, order=['ZT2', 'ZT6', 'ZT10', 'ZT14', 'ZT18', 'ZT22']) plt.title('Ileum Luminal Contents wUF', fontsize=14) plt.ylabel('BCD-LDvDD') ax.axhline(0.4940588498583712, color='k', ls='--') plt.rcParams['font.family'] = 'sans-serif' plt.rcParams['font.sans-serif'] = 'Arial' plt.rcParams['svg.fonttype'] = 'none' plt.savefig('../figures/chen_jejunum_lum_LDvDD_wUF_boxplot.svg', dpi=300, bbox_inches="tight") boxpairs = [('ZT10', 'ZT6'), ('ZT2', 'ZT10'), ('ZT10', 'ZT14'), ('ZT10', 'ZT18'), ('ZT10', 'ZT22')] ax = sns.boxplot(x="ZT", y="Value", palette="BuGn", data=LDvDD_lum_jejunum, order=['ZT2', 'ZT6', 'ZT10', 'ZT14', 'ZT18', 'ZT22']) ax = sns.stripplot(x="ZT", y="Value", palette="BuGn", edgecolor='k', linewidth=1, size=9, data=LDvDD_lum_jejunum, order=['ZT2', 'ZT6', 'ZT10', 'ZT14', 'ZT18', 'ZT22']) plt.title('Ileum Luminal Contents wUF', fontsize=14) plt.ylabel('BCD-LDvDD') ax.axhline(0.5343440198723557, color='k', ls='--') ax, test_results = add_stat_annotation(ax, data=LDvDD_lum_jejunum, x="ZT", y="Value", order=['ZT2', 'ZT6', 'ZT10', 'ZT14', 'ZT18', 'ZT22'], box_pairs = boxpairs, test='Mann-Whitney', text_format='star', loc='outside', verbose=2) plt.rcParams['font.family'] = 'sans-serif' plt.rcParams['font.sans-serif'] = 'Arial' plt.rcParams['svg.fonttype'] = 'none' plt.savefig('../figures/chen_jejunum_lum_LDvDD_wUF_boxplot2.svg', dpi=300, bbox_inches="tight") LDvDD_lum_colon.Value.mean() ax = sns.boxplot(x="ZT", y="Value", palette="BuGn", data=LDvDD_lum_colon, order=['ZT2', 'ZT6', 'ZT10', 'ZT14', 'ZT18', 'ZT22']) ax = sns.stripplot(x="ZT", y="Value", palette="BuGn", edgecolor='k', linewidth=1, size=9, data=LDvDD_lum_colon, order=['ZT2', 'ZT6', 'ZT10', 'ZT14', 'ZT18', 'ZT22']) plt.title('Ileum Luminal Contents wUF', fontsize=14) plt.ylabel('BCD-LDvDD') ax.axhline(0.5694627506274029, color='k', ls='--') plt.rcParams['font.family'] = 'sans-serif' plt.rcParams['font.sans-serif'] = 'Arial' plt.rcParams['svg.fonttype'] = 'none' plt.savefig('../figures/chen_colon_lum_LDvDD_wUF_boxplot.svg', dpi=300, bbox_inches="tight")Separate by Organnamed_SI_wUF_dm = pd.merge(metadata[['combined4']], SI_wUF_dm, left_index=True, right_index=True) named_SI_wUF_dm.head() named2_SI_wUF_dm = named_SI_wUF_dm.set_index('combined4') Named_SI_wUF_dm = named_SI_wUF_dm[['combined4']].copy() Named_SI_wUF_dm.head() named3_SI_wUF_dm = pd.merge(named2_SI_wUF_dm.T, Named_SI_wUF_dm, left_index=True, right_index=True) named4_SI_wUF_dm = named3_SI_wUF_dm.set_index('combined4') named4_SI_wUF_dm LD = named4_SI_wUF_dm.filter(regex='LD') LD2 = LD.T.filter(regex='LD') LD2.head() LD_lum_dm = LD2.filter(regex='lumen') LD_lum_muc_dm = LD_lum_dm.T.filter(regex='mucosal') LD_lum_muc_dm.shape LD_lum_muc_dm2 = LD_lum_muc_dm.copy() LD_lum_muc_dm3 = LD_lum_muc_dm2.stack().reset_index() LD_lum_muc_dm3.columns = ['Lumen','Mucosal','Value'] LD_lum_muc_dm3 LD_lum_muc_dm3['organ'] = LD_lum_muc_dm3['Lumen'].str.split('_').str[1] + "_" + LD_lum_muc_dm3['Mucosal'].str.split('_').str[1] LD_lum_muc_dm3.organ.unique() LD_lum_muc_dm3['timepoints'] = LD_lum_muc_dm3['Lumen'].str.split('_').str[-1] + "_" + LD_lum_muc_dm3['Mucosal'].str.split('_').str[-1] LD_lum_muc_dm3.timepoints.unique() LD_lum_muc_dm4 = LD_lum_muc_dm3.loc[LD_lum_muc_dm3['timepoints'].isin(zt_tps)] LD_lum_muc_dm4 LD_lum_muc_dm5 = LD_lum_muc_dm4.copy() LD_lum_muc_dm5['ZT'] = LD_lum_muc_dm5['timepoints'].str.split('_').str[-1] LD_lum_muc_dm5 LD_LM_ileum = LD_lum_muc_dm5[LD_lum_muc_dm5['organ'] == 'ileum_ileum'].copy() LD_LM_jejunum = LD_lum_muc_dm5[LD_lum_muc_dm5['organ'] == 'jejunum_jejunum'].copy() LD_LM_colon = LD_lum_muc_dm5[LD_lum_muc_dm5['organ'] == 'colon_colon'].copy()IleumLD_LM_ileum.head() LD_LM_ileum.Value.median() ax = sns.boxplot(x="ZT", y="Value", palette="BuGn", data=LD_LM_ileum, order=['ZT2', 'ZT6', 'ZT10', 'ZT14', 'ZT18', 'ZT22']) ax = sns.stripplot(x="ZT", y="Value", palette="BuGn", edgecolor='k', linewidth=1, size=9, data=LD_LM_ileum, order=['ZT2', 'ZT6', 'ZT10', 'ZT14', 'ZT18', 'ZT22']) plt.title('Ileum LD wUF', fontsize=14) plt.ylabel('BCD-Lum v Muc') ax.axhline(0.4499902753393743, color='k', ls='--') plt.rcParams['font.family'] = 'sans-serif' plt.rcParams['font.sans-serif'] = 'Arial' plt.rcParams['svg.fonttype'] = 'none' plt.savefig('../figures/chen_ileum_LD_lummuc_wUF_boxplot.svg', dpi=300, bbox_inches="tight")JejunumLD_LM_jejunum.head() LD_LM_jejunum.Value.median() ax = sns.boxplot(x="ZT", y="Value", palette="BuGn", data=LD_LM_jejunum, order=['ZT2', 'ZT6', 'ZT10', 'ZT14', 'ZT18', 'ZT22']) ax = sns.stripplot(x="ZT", y="Value", palette="BuGn", edgecolor='k', linewidth=1, size=9, data=LD_LM_jejunum, order=['ZT2', 'ZT6', 'ZT10', 'ZT14', 'ZT18', 'ZT22']) plt.title('Jejunum LD wUF', fontsize=14) plt.ylabel('BCD-Lum v Muc') ax.axhline(0.5061979110581645, color='k', ls='--') plt.rcParams['font.family'] = 'sans-serif' plt.rcParams['font.sans-serif'] = 'Arial' plt.rcParams['svg.fonttype'] = 'none' plt.savefig('../figures/chen_jejunum_LD_lummuc_wUF_boxplot.svg', dpi=300, bbox_inches="tight")ColonLD_LM_colon.head() LD_LM_colon.Value.mean() ax = sns.boxplot(x="ZT", y="Value", palette="BuGn", data=LD_LM_colon, order=['ZT2', 'ZT6', 'ZT10', 'ZT14', 'ZT18', 'ZT22']) ax = sns.stripplot(x="ZT", y="Value", palette="BuGn", edgecolor='k', linewidth=1, size=9, data=LD_LM_colon, order=['ZT2', 'ZT6', 'ZT10', 'ZT14', 'ZT18', 'ZT22']) plt.title('Colon LD wUF') plt.ylabel('BCD-Lum v Muc') ax.axhline(0.5099110235276807, color='k', ls='--') plt.rcParams['font.family'] = 'sans-serif' plt.rcParams['font.sans-serif'] = 'Arial' plt.rcParams['svg.fonttype'] = 'none' plt.savefig('../figures/chen_colon_LD_lummuc_wUF_boxplot.svg', dpi=300, bbox_inches="tight")Colon v Jejunum (LD only)lumen_dm = named4_SI_wUF_dm.filter(regex='lumen') lumen_dm2 = LD.T.filter(regex='lumen') lumen_dm2.head() colon_lum_dm = lumen_dm2.filter(regex='colon') colon_jejunum_lum_dm2 = colon_lum_dm.T.filter(regex='jejunum') colon_jejunum_lum_dm2.shape colon_jejunum_lum_dms = colon_jejunum_lum_dm2.copy() colon_jejunum_lum_dms2 = colon_jejunum_lum_dms.stack().reset_index() colon_jejunum_lum_dms2.columns = ['colon','jejunum','Value'] colon_jejunum_lum_dms2 colon_jejunum_lum_dms2['timepoints'] = colon_jejunum_lum_dms2['colon'].str.split('_').str[-1] + "_" + colon_jejunum_lum_dms2['jejunum'].str.split('_').str[-1] colon_jejunum_lum_dms2.timepoints.unique() colon_jejunum_lum_dms3 = colon_jejunum_lum_dms2.loc[colon_jejunum_lum_dms2['timepoints'].isin(zt_tps)] colon_jejunum_lum_dms3 colon_jejunum_lum_dms4 = colon_jejunum_lum_dms3.copy() colon_jejunum_lum_dms4['ZT'] = colon_jejunum_lum_dms4['timepoints'].str.split('_').str[-1] colon_jejunum_lum_dms4 colon_jejunum_lum_dms4.Value.mean() ax = sns.boxplot(x="ZT", y="Value", palette="BuGn", data=colon_jejunum_lum_dms4, order=['ZT2', 'ZT6', 'ZT10', 'ZT14', 'ZT18', 'ZT22']) ax = sns.stripplot(x="ZT", y="Value", palette="BuGn", edgecolor='k', linewidth=1, size=9, data=colon_jejunum_lum_dms4, order=['ZT2', 'ZT6', 'ZT10', 'ZT14', 'ZT18', 'ZT22']) plt.title('Luminal Contents', fontsize=14) plt.ylabel('wUF BCD Jejunum-v-Colon') ax.axhline(0.801909399398105, color='k', ls='--') plt.rcParams['font.family'] = 'sans-serif' plt.rcParams['font.sans-serif'] = 'Arial' plt.rcParams['svg.fonttype'] = 'none' #plt.savefig('../figures/chen_jejunum_LD_lummuc_wUF_boxplot.svg', dpi=300, bbox_inches="tight")Colon v Jejunum (LD only)LD_lum_dm = LD2.filter(regex='lumen') LD_lum_dm2 = LD_lum_dm.T.filter(regex='lumen') LD_lum_dm2.shape colon_LD_lum_dm = LD_lum_dm2.filter(regex='colon') colon_jejunum_LD_lum_dm2 = colon_LD_lum_dm.T.filter(regex='jejunum') colon_jejunum_LD_lum_dm2.shape colon_jejunum_LD_lum_dms = colon_jejunum_LD_lum_dm2.copy() colon_jejunum_LD_lum_dms2 = colon_jejunum_LD_lum_dms.stack().reset_index() colon_jejunum_LD_lum_dms2.columns = ['colon','jejunum','Value'] colon_jejunum_LD_lum_dms2 colon_jejunum_LD_lum_dms2['timepoints'] = colon_jejunum_LD_lum_dms2['colon'].str.split('_').str[-1] + "_" + colon_jejunum_LD_lum_dms2['jejunum'].str.split('_').str[-1] colon_jejunum_LD_lum_dms2.timepoints.unique() colon_jejunum_LD_lum_dms3 = colon_jejunum_LD_lum_dms2.loc[colon_jejunum_LD_lum_dms2['timepoints'].isin(zt_tps)] colon_jejunum_LD_lum_dms3 colon_jejunum_LD_lum_dms4 = colon_jejunum_LD_lum_dms3.copy() colon_jejunum_LD_lum_dms4['ZT'] = colon_jejunum_LD_lum_dms4['timepoints'].str.split('_').str[-1] colon_jejunum_LD_lum_dms4 colon_jejunum_LD_lum_dms4.Value.mean() ax = sns.boxplot(x="ZT", y="Value", palette="BuGn", data=colon_jejunum_LD_lum_dms4, order=['ZT2', 'ZT6', 'ZT10', 'ZT14', 'ZT18', 'ZT22']) ax = sns.stripplot(x="ZT", y="Value", palette="BuGn", edgecolor='k', linewidth=1, size=9, data=colon_jejunum_LD_lum_dms4, order=['ZT2', 'ZT6', 'ZT10', 'ZT14', 'ZT18', 'ZT22']) plt.title('Luminal Contents LD', fontsize=14) plt.ylabel('wUF BCD Jejunum-v-Colon') ax.axhline(0.801909399398105, color='k', ls='--') plt.rcParams['font.family'] = 'sans-serif' plt.rcParams['font.sans-serif'] = 'Arial' plt.rcParams['svg.fonttype'] = 'none' #plt.savefig('../figures/chen_jejunum_LD_lummuc_wUF_boxplot.svg', dpi=300, bbox_inches="tight")L15 - Model evaluation 2 (holdout)---- Instructor: ()- Course website: https://www.dalcimar.com/disciplinas/aprendizado-de-maquina- Bibliography: based on lectures of Dr. import numpy as np import matplotlib.pyplot as pltPessimistic Bias in Holdout The following experiments further illustrate the issue of the pessimistic bias of the generalization performance estimates using learning curves. Here, the same test set is used while a model is fit on training sets with different sizes.from mlxtend.data import mnist_data from sklearn.model_selection import train_test_split X2, y2 = mnist_data() X_train2, X_test2, y_train2, y_test2 = train_test_split(X2, y2, test_size=0.3, random_state=12, stratify=y2) print('Number of train examples:', y_train2.shape[0]) print('Number of test examples:', y_test2.shape[0]) print('Labels:', y_train2) from sklearn.linear_model import LogisticRegression clf_2 = LogisticRegression(penalty='l2', dual=False, tol=0.0001, C=0.000001, fit_intercept=True, intercept_scaling=1, class_weight=None, random_state=12, solver='lbfgs', max_iter=1000, multi_class='multinomial', verbose=0, warm_start=False, n_jobs=1) pred_train, pred_test = [], [] intervals = np.arange(500, X_train2.shape[0] + 1, 200) for i in intervals: clf_2.fit(X_train2[:i], y_train2[:i]) p_train = clf_2.score(X_train2[:i], y_train2[:i]) p_test = clf_2.score(X_test2, y_test2) pred_train.append(p_train) pred_test.append(p_test) with plt.style.context(('fivethirtyeight')): plt.plot(intervals, pred_train, marker='o', label='Train') plt.plot(intervals, pred_test, marker='s', label='Test') plt.legend(loc='best', numpoints=1) plt.xlim([430, X_train2.shape[0] + X_test2.shape[0]]) plt.axvspan(X_train2.shape[0], X_train2.shape[0] + X_test2.shape[0], alpha=0.2, color='steelblue') plt.ylim([0.85, 1.0]) plt.xlabel('Training Set Size') plt.ylabel('Accuracy') plt.tight_layout() plt.title("Linear Regression") #plt.savefig('figures/model-eval-mnist_0.svg')As it can be seen, the model is likely benefitting from more training data.from sklearn.ensemble import RandomForestClassifier clf_2 = RandomForestClassifier(n_estimators=100, random_state=123) pred_train, pred_test = [], [] intervals = np.arange(500, X_train2.shape[0] + 1, 200) for i in intervals: clf_2.fit(X_train2[:i], y_train2[:i]) p_train = clf_2.score(X_train2[:i], y_train2[:i]) p_test = clf_2.score(X_test2, y_test2) pred_train.append(p_train) pred_test.append(p_test) with plt.style.context(('fivethirtyeight')): plt.plot(intervals, pred_train, marker='o', label='Train') plt.plot(intervals, pred_test, marker='s', label='Test') plt.legend(loc='best', numpoints=1) plt.xlim([430, X_train2.shape[0] + X_test2.shape[0]]) plt.axvspan(X_train2.shape[0], X_train2.shape[0] + X_test2.shape[0], alpha=0.2, color='steelblue') plt.ylim([0.85, 1.0]) plt.xlabel('Training Set Size') plt.ylabel('Accuracy') plt.tight_layout() plt.title("Random Forest") #plt.savefig('figures/model-eval-mnist_0.svg') from sklearn.neighbors import KNeighborsClassifier clf_2 = KNeighborsClassifier(n_neighbors=3) pred_train, pred_test = [], [] intervals = np.arange(500, X_train2.shape[0] + 1, 200) for i in intervals: clf_2.fit(X_train2[:i], y_train2[:i]) p_train = clf_2.score(X_train2[:i], y_train2[:i]) p_test = clf_2.score(X_test2, y_test2) pred_train.append(p_train) pred_test.append(p_test) with plt.style.context(('fivethirtyeight')): plt.plot(intervals, pred_train, marker='o', label='Train') plt.plot(intervals, pred_test, marker='s', label='Test') plt.legend(loc='best', numpoints=1) plt.xlim([430, X_train2.shape[0] + X_test2.shape[0]]) plt.axvspan(X_train2.shape[0], X_train2.shape[0] + X_test2.shape[0], alpha=0.2, color='steelblue') plt.ylim([0.85, 1.0]) plt.xlabel('Training Set Size') plt.ylabel('Accuracy') plt.tight_layout() plt.title("KNN") #plt.savefig('figures/model-eval-mnist_0.svg')List available deep learning NER modelsmalaya.entity.available_deep_model()Describe supported entitiesmalaya.describe_entities() string = 'KUALA LUMPUR: Sempena sambutan Aidilfitri minggu depan, Perdana Menteri Tun Dr dan Menter menitipkan pesanan khas kepada orang ramai yang mahu pulang ke kampung halaman masing-masing. Dalam video pendek terbitan Jabatan Keselamatan Jalan Raya (JKJR) itu, Dr Mahathir menasihati mereka supaya berhenti berehat dan tidur sebentar sekiranya mengantuk ketika memandu.'Load CRF modelcrf = malaya.entity.crf() crf.predict(string)Print important features from CRF modelcrf.print_features(10)Top-10 positive: 15.295689 person word:pengarah 12.352726 location word:dibuat-buat 11.206675 organization word:pas 10.718764 person word:solana 10.579257 person word:anggodo 10.205311 location word:kenya 10.178896 time word:jumat 10.138113 person word:terpantas 9.938075 OTHER word:sudah 9.896239 location word:pakistan Top-10 negative: -6.265843 OTHER word:memintanya -6.318719 OTHER prefix-3:pah -6.365330 time next_word-suffix-3:nin -6.443976 person is_numeric -6.508225 event suffix-1:u -6.535034 OTHER prefix-3:wir -7.109250 OTHER prefix-3:di- -7.176552 OTHER word:ramadan -7.470627 organization suffix-3:ari -7.846867 time next_word-prefix-1:nPrint important transitions from CRF Modelcrf.print_transitions(10)Top-10 likely transitions: quantity -> quantity 4.731903 location -> location 4.547566 organization -> organization 4.322757 OTHER -> OTHER 4.267569 event -> event 3.796581 law -> law 3.234600 person -> person 3.178005 time -> time 2.716374 location -> OTHER 0.057188 OTHER -> location -0.033477 Top-10 unlikely transitions: event -> person -4.618084 event -> quantity -4.649371 time -> law -4.748618 organization -> event -4.763703 event -> location -4.995439 organization -> law -5.343159 person -> law -6.000496 time -> quantity -6.551308 organization -> time -6.602369 quantity -> time -8.047114Load deep learning modelsfor i in malaya.entity.available_deep_model(): print('Testing %s model'%(i)) model = malaya.entity.deep_model(i) print(model.predict(string)) print()Testing concat model [('kuala', 'location'), ('lumpur', 'location'), ('sempena', 'OTHER'), ('sambutan', 'event'), ('aidilfitri', 'event'), ('minggu', 'time'), ('depan', 'time'), ('perdana', 'person'), ('menteri', 'person'), ('tun', 'person'), ('dr', 'person'), ('mahathir', 'person'), ('mohamad', 'person'), ('dan', 'OTHER'), ('menteri', 'organization'), ('pengangkutan', 'organization'), ('anthony', 'person'), ('loke', 'person'), ('siew', 'person'), ('fook', 'person'), ('menitipkan', 'OTHER'), ('pesanan', 'OTHER'), ('khas', 'OTHER'), ('kepada', 'OTHER'), ('orang', 'OTHER'), ('ramai', 'OTHER'), ('yang', 'OTHER'), ('mahu', 'OTHER'), ('pulang', 'OTHER'), ('ke', 'OTHER'), ('kampung', 'OTHER'), ('halaman', 'location'), ('masing-masing', 'OTHER'), ('dalam', 'OTHER'), ('video', 'OTHER'), ('pendek', 'OTHER'), ('terbitan', 'OTHER'), ('jabatan', 'organization'), ('keselamatan', 'organization'), ('jalan', 'organization'), ('raya', 'organization'), ('jkjr', 'location'), ('itu', 'OTHER'), ('dr', 'per[...]Voting stack modelentity_network = malaya.entity.deep_model('entity-network') bahdanau = malaya.entity.deep_model('bahdanau') luong = malaya.entity.deep_model('luong') malaya.stack.voting_stack([entity_network, bahdanau, crf], string)* 2016 has an additional id column called FID, must be dropped* 2014 has an additional id column called FID, must be dropped. Also has AFFGEOID but is not needed because provides same info as FIPS. TRACTCE is part of FIPS, so is not needed.data['2010']['data']['E_P_POV']\ .replace(-999,np.nan)\ .dropna()\ .sort_values() data['2018']['data']['EP_POV']\ .replace(-999,np.nan)\ .dropna()\ .sort_values() (data['2010']['data']['E_P_PCI']-data['2010']['data']['E_PCI']).sum() data['2010']['data']['E_PCI']iTOL Client Usage Examplefrom iTOL_client import ITOL_session login = login pwd = Login to yout iTOL account and get the information about all your workspaces, projects and trees:my_itol = ITOL_session(login=login, password=pwd)Surfing through your iTOL account Here is your workspaces:# workspaces itol_data = my_itol.data itol_data.tableLet's look into Default projectitol_data['Default'].tableAlso, you can call the project by iditol_data['76963'].tableMoreover, you can get any parameter of the workspace (for projects and trees you can run a similar command):itol_data['Default'].name, itol_data['Default'].id, itol_data['Default'].descDive deeper into the project:itol_data['Default']['Marinimicrobia project'].tableTouch trees with duplicated namesitol_data['Default']['Marinimicrobia project']['tree_renamed'] itol_data['Default']['Marinimicrobia project']['Python tree'].datasetsDelete workspaces, projects or trees by their IDsmy_itol.data.table my_itol.delete('workspace', '77443') my_itol.data.tableReload iTOL datamy_itol.get_data()Constrained Local Models - BasicsThe aim of this notebook is to showcase how one can build and fit CLMs to images using ``menpofit``.Note that this notebook assumes that the user has previously gone through the AAMs Basics notebook and he/she is already familiar with the basics of `Menpo`'s Deformable Model Fitting framework explained in there. 1. Loading data%matplotlib inline from pathlib import Path path_to_lfpw = Path('/vol/atlas/databases/lfpw') import menpo.io as mio training_images = [] # load landmarked images for i in mio.import_images(path_to_lfpw / 'trainset', verbose=True): # crop image i = i.crop_to_landmarks_proportion(0.1) # convert it to grayscale if needed if i.n_channels == 3: i = i.as_greyscale(mode='luminosity') # append it to the list training_images.append(i) from menpowidgets import visualize_images visualize_images(training_images)2. Build a CLM with default parameters Building a CLM using `Menpo` can be done using a single line of code.from menpofit.clm import CLM clm = CLM( training_images, verbose=True, group='PTS', diagonal=200 ) print(clm) clm.view_clm_widget()3. Fit the previous CLM In `Menpo`, CLMs can be fitted to images by creating `Fitter` objects around them. One of the most popular algorithms for fitting CLMs is the `Regularized Landmark Mean-Shift` algorithm. In order to fit our CLM using this algorithm using `Menpo`, the user needs to define a `GradientDescentCLMFitter` object. This can be done again using a single line of code!!!from menpofit.clm import GradientDescentCLMFitter fitter = GradientDescentCLMFitter(clm, n_shape=[6, 12])Fitting a `GradientDescentCLMFitter` to an image is as simple as calling its `fit` method. Let's try it by fitting some images of the LFPW database test set!!!import menpo.io as mio # load test images test_images = [] for i in mio.import_images(path_to_lfpw / 'testset', max_images=5, verbose=True): # crop image i = i.crop_to_landmarks_proportion(0.5) # convert it to grayscale if needed if i.n_channels == 3: i = i.as_greyscale(mode='luminosity') # append it to the list test_images.append(i)Found 5 assets, index the returned LazyList to import.Note that for the purpose of this simple fitting demonstration we will just fit the first 5 images of the LFPW test set.from menpofit.fitter import noisy_shape_from_bounding_box fitting_results = [] for i in test_images: gt_s = i.landmarks['PTS'].lms # generate perturbed landmarks s = noisy_shape_from_bounding_box(gt_s, gt_s.bounding_box()) # fit image fr = fitter.fit_from_shape(i, s, gt_shape=gt_s) fitting_results.append(fr) # print fitting error print(fr) from menpowidgets import visualize_fitting_result visualize_fitting_result(fitting_results)Plot MI decay of Markov, Hierarchical, and Hybrid models1. load models2. fit best fit model3. plot decayfrom glob import glob import pandas as pd from parallelspaper.config.paths import DATA_DIR, FIGURE_DIR import parallelspaper.model_fitting as mf from datetime import datetime from tqdm.autonotebook import tqdm import numpy as np from parallelspaper.utils import save_figLoad models# find the latest instance of a Markov model MI_DFs_markov = glob(str(DATA_DIR / 'MI_DF/models/markov_*.pickle')) loc_table = pd.DataFrame([[datetime.strptime('_'.join(loc.split('/')[-1][:-7].split('_')[1:]),"%Y-%m-%d_%H-%M-%S"), loc] for loc in MI_DFs_markov], columns=['dt', 'locat']).sort_values(by='dt') markov_MI_DF = pd.read_pickle(loc_table.locat.values[-1]) # find the latest instance of a Markov model MI_DFs_hierarchical = glob(str(DATA_DIR / 'MI_DF/models/hierarchical_*.pickle')) loc_table = pd.DataFrame([[datetime.strptime('_'.join(loc.split('/')[-1][:-7].split('_')[1:]),"%Y-%m-%d_%H-%M-%S"), loc] for loc in MI_DFs_hierarchical], columns=['dt', 'locat']).sort_values(by='dt') hierarchical_MI_DF = pd.read_pickle(loc_table.locat.values[-1]) # find the latest instance of a Markov model MI_DFs_hybrid = glob(str(DATA_DIR / 'MI_DF/models/hybrid_*.pickle')) loc_table = pd.DataFrame([[datetime.strptime('_'.join(loc.split('/')[-1][:-7].split('_')[1:]),"%Y-%m-%d_%H-%M-%S"), loc] for loc in MI_DFs_hybrid], columns=['dt', 'locat']).sort_values(by='dt') hybrid_MI_DF = pd.read_pickle(loc_table.locat.values[-1]) # concatenate models MI_DF = pd.concat([markov_MI_DF, hierarchical_MI_DF, hybrid_MI_DF]).reset_index()Fit modelsfor idx, row in tqdm(MI_DF.iterrows(), total=len(MI_DF)): print(row['name']) # get signal sig = np.array(row.MI-row.MI_shuff) distances = row.distances # fit models results_power, results_exp, results_pow_exp, best_fit_model = mf.fit_models(distances, sig) results_concat = results_pow_exp # add results to MI_DF MI_DF.loc[idx,'exp_results_params'] = [{i:results_exp.params[i].value for i in dict(results_exp.params).keys()}] MI_DF.loc[idx,'pow_results_params'] = [{i:results_power.params[i].value for i in dict(results_power.params).keys()}] MI_DF.loc[idx,'concat_results_params'] = [{i:results_concat.params[i].value for i in dict(results_concat.params).keys()}] MI_DF.loc[idx,'exp_results'] = results_exp MI_DF.loc[idx,'pow_results'] = results_power MI_DF.loc[idx,'concat_results'] = results_concat # get model fit results from predictions and signal R2_exp, R2_concat, R2_power, AICc_exp, \ AICc_pow, AICc_concat = mf.fit_results(sig, distances, results_exp, results_power, results_pow_exp, logscaled=True) # add AIC to MI_DF MI_DF.loc[idx,'AICc_exp'] = AICc_exp MI_DF.loc[idx,'AICc_concat'] = AICc_concat MI_DF.loc[idx,'AICc_power'] = AICc_pow # determine best fit model MI_DF.loc[idx,'bestfitmodel'] = bestfitmodel = ['exp', 'concat', 'power'][np.argmin([AICc_exp, AICc_concat, AICc_pow])] MI_DFplot fit modelsimport matplotlib.pyplot as plt %matplotlib inline import seaborn as sns col_dict = { "hierarchical": sns.color_palette('Reds', 5)[2:], "markov": sns.color_palette('Greens', 5)[2:], "hybrid": sns.color_palette('Blues', 5)[2:], } ncols = 3 nrows = 1 zoom = 5 d = 100 # plot data fig, axs = plt.subplots(ncols=ncols, nrows=nrows, figsize = (ncols*6,nrows*5)) for axi, (idx, row) in tqdm(enumerate(MI_DF.iterrows()), total=len(MI_DF)): distances = np.linspace(1,d,1000) if 'hierarchical' in row['name']: color = col_dict['hierarchical'][1] ax = axs[0] y_model = mf.get_y(mf.powerlaw_decay, row.pow_results, distances) ax.plot(distances, y_model, alpha = 0.5, lw=10, color=color) elif 'markov' in row['name']: ax = axs[1] if row['name'] == 'Okada_markov': color = col_dict['markov'][0] elif row['name'] == 'Bird2_markov': color = col_dict['markov'][1] elif row['name'] == 'Bird1_markov': color = col_dict['markov'][2] y_model = mf.get_y(mf.exp_decay, row.exp_results, distances) ax.plot(distances, y_model, alpha = 0.5, lw=10, color=color) elif 'hybrid' in row['name']: ax = axs[2] color = col_dict['hybrid'][1] y_model = mf.get_y(mf.pow_exp_decay, row.concat_results, distances) ax.plot(distances, y_model, alpha = 0.5, lw=10, color=color) # plot real data sig = np.array(row.MI-row.MI_shuff) distances = row.distances ax.scatter(distances, sig, alpha = 1, s=80, color=color) # labels, styling for axi, ax in enumerate(axs): ax.tick_params(axis='both', labelsize=18, pad=15) ax.set_xlabel('Distance between elements', labelpad=5, fontsize=18) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(3) ax.spines[axis].set_color('k') ax.grid(False) ax.tick_params(which='both', direction='in') ax.tick_params(which='major', length=10, width =3) ax.tick_params(which='minor', length=5, width =2) ax.set_xscale( "log" , basex=10) ax.set_yscale( "log" , basey=10) ax.set_xticks([1,10,100]) ax.set_xticklabels([1,10,100]) if axi==0: ax.set_ylabel('Mutual Information (bits)', labelpad=5, fontsize=18) #else: # ax.set_yticklabels([]) ax.set_xlim([1,100]) ax.set_ylim([1e-4,10]) #ax.legend() plt.tight_layout() save_fig(FIGURE_DIR/'modelfig')TTUR example code* The objective ( obj ) is $f(x,y) = (1+x^2) \cdot (100-y^2)$ where the derivative with respect to x is $ f_x = 2x\cdot(100-y^2)$ and with respect to y is $ f_y = -2y\cdot(1+x^2)$.* The objective fulfills assumption A4 from the TTUR-paper. * It has a sattle point at $(x,y)^T = (0,0)^T$. The gradient at the sattle point is the zero vector, the function value at the sattle point is $f(0,0) = 100$.* The norm $(x^2 + y^2)^{\frac{1}{2}}$ measures the distance of the parameter vector to the sattle point. We set a base learning rate (base_lr) and obtain the x-learning rate ( lr_x ) and the y-learning rate ( lr_y ) by scaling the base learning rate.* On top of the gradient we add Gaussian noise with standard deviation sigma in order to simulate a stochastic gradient.* We plot the objective (should converge to 100), the norm over time (should converge to 0) and the x-y phase diagram (should converge to (0,0)). * The first line shows one-time scale learning, which usually diverges and has large fluctuations. * The second line shows one-time scale with smaller learning rate, which usually converges, but slower than the TTUR in the next line (see norm). A small learning rate leads to convergence in this simple example but doesn't guarantee convergence in the general case. * The third line shows TTUR with the x-update slowed down, which usually converges fast. * The fourth line shows TTUR with the y-update slowed down, which is still more stable than the one-time scale update.''' ====================== 3D surface (color map) ====================== Demonstrates plotting a 3D surface colored with the coolwarm color map. The surface is made opaque by using antialiased=False. Also demonstrates using the LinearLocator and custom formatting for the z axis tick labels. https://matplotlib.org/mpl_toolkits/mplot3d/tutorial.html#d-plots-in-3d ''' from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from matplotlib import cm from matplotlib.ticker import LinearLocator, FormatStrFormatter import numpy as np fig = plt.figure(figsize=(15,6)) ax = fig.gca(projection='3d') # Make data. x_lim = [-3.0, 3.0] y_lim = [i * 10.0 for i in x_lim] x = np.arange(x_lim[0], x_lim[1], 0.25) y = np.arange(y_lim[0], y_lim[1], 0.25 * 10.0) X, Y = np.meshgrid(x, y) Z = (1 + X**2) * (100 - Y**2) # Plot the surface. cmap=[cm.plasma, cm.viridis] surf = ax.plot_surface(X, Y, Z, cmap=cmap[1], linewidth=0, antialiased=False) # Lines # Saddle point at 0,0,100 for f = (1 + X**2) * (100 - Y**2) x_zeros = np.zeros(len(x)) y_zeros = np.zeros(len(y)) zx_saddle = np.ones(len(x)) * 100 zy_saddle = np.ones(len(y)) * 100 ax.plot(x, y_zeros, zx_saddle, color="red") ax.plot(x_zeros, y, zy_saddle, color="red") # Customize the z axis. #ax.set_zlim(-5000.01, 500.01) #ax.zaxis.set_major_locator(LinearLocator(10)) ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f')) # Add a color bar which maps values to colors. #fig.colorbar(surf, shrink=0.5, aspect=5) fig = plt.gcf() plt.show() fig.savefig("toyfunc3d.pdf", bbox_inches='tight') import numpy as np def run(lr_x, lr_y, n_iter, sigma): x = 0.5 #.509124898 y = -0.4 #-.402918798624 lrx_hist = [] lry_hist = [] x_hist = [x] y_hist = [y] obj_list = [] norm_list = [] for i in range(n_iter): x -= lr_x * (2*x*(100-y*y) + np.random.normal(scale=sigma)) y += lr_y * (-2*y*(1+x*x) + np.random.normal(scale=sigma)) norm = np.sqrt(x ** 2 + y ** 2) obj = (1+x*x)*(100-y*y) obj_list.append(obj) norm_list.append(norm) x_hist.append(x) y_hist.append(y) print (x, y, obj, norm) return x_hist, y_hist, obj_list, norm_list n_iter = 5000 sigma = 1 base_lr = 0.01 res_otur_1 = run(base_lr, base_lr, n_iter, sigma) res_otur_2 = run(base_lr/10, base_lr/10, n_iter, sigma) res_ttur_1 = run(base_lr / 100, base_lr, n_iter, sigma) res_ttur_2 = run(base_lr, base_lr / 100, n_iter, sigma) res = [res_otur_1, res_otur_2, res_ttur_1, res_ttur_2] import matplotlib.pyplot as plt fig, ax = plt.subplots(4, 3, figsize=(15, 6)) for i, r in enumerate(res): ax[i, 0].plot(r[2]) ax[i, 1].plot(r[3]) #ax[i, 2].plot(r[0], r[1], '->', alpha=0.1) ax[i, 2].scatter(r[0], r[1], s=5.0, alpha=1.0, c=np.arange(0, len(r[0]), 1), cmap=cm.viridis) ax[0, 0].set_title("objective") ax[0, 1].set_title("norms") ax[0, 2].set_title("x vs y") ax[0, 0].set_ylabel("orig fast") ax[1, 0].set_ylabel("orig slow") ax[2, 0].set_ylabel("TTUR x") ax[3, 0].set_ylabel("TTUR y") fig.savefig("toyfunc3d_ttur.pdf", bbox_inches='tight') fig plt.close('all')Pandas InstructionsThis assignment will be done completely inside this Jupyter notebook with answers placed in the cell provided.All python imports that are needed shown.Follow all the instructions in this notebook to complete these tasks. Make sure the CSV data files is in the same folder as this notebook - alumni.csv#!pip install matplotlib # Imports needed to complete this exam import pandas as pd import matplotlib.pyplot as pltIn C:\Users\user\Anaconda3\lib\site-packages\matplotlib\mpl-data\stylelib\_classic_test.mplstyle: The text.latex.preview rcparam was deprecated in Matplotlib 3.3 and will be removed two minor releases later. In C:\Users\user\Anaconda3\lib\site-packages\matplotlib\mpl-data\stylelib\_classic_test.mplstyle: The mathtext.fallback_to_cm rcparam was deprecated in Matplotlib 3.3 and will be removed two minor releases later. In C:\Users\user\Anaconda3\lib\site-packages\matplotlib\mpl-data\stylelib\_classic_test.mplstyle: Support for setting the 'mathtext.fallback_to_cm' rcParam is deprecated since 3.3 and will be removed two minor releases later; use 'mathtext.fallback : 'cm' instead. In C:\Users\user\Anaconda3\lib\site-packages\matplotlib\mpl-data\stylelib\_classic_test.mplstyle: The validate_bool_maybe_none function was deprecated in Matplotlib 3.3 and will be removed two minor releases later. In C:\Users\user\Anaconda3\lib\site-packages\matplotlib\mpl-data\stylelib\_classic_test.mplstyle[...]Question 1 : Import CSV file (1 Mark)Write code to load the alumni csv dataset into a Pandas DataFrame called 'alumni'.#loading the dataset alumni = pd.read_csv('alumni.csv')Question 2 : Understand the data set (5 Marks)Use the following pandas commands to understand the data set: a) head, b) tail, c) dtypes, d) info, e) describe#a) (1) # checking the first 5 rows alumni.head() #b) (1) # The last 5 columns alumni.tail() #c) (1)# checking the data types alumni.dtypes #d) (1) # general info on the data types alumni.info() RangeIndex: 88 entries, 0 to 87 Data columns (total 8 columns): Year Graduated 88 non-null int64 Gender 88 non-null object Marital Status 88 non-null object Diploma Type 88 non-null object Defaulted 88 non-null object Salary 88 non-null int64 Fee 88 non-null int64 Savings ($) 88 non-null object dtypes: int64(3), object(5) memory usage: 5.6+ KBThe above shows that there are no null values in the dataset#e) (1) alumni.describe()Question 3 : Cleaning the data set - part A (3 Marks)a) Use clean_currency method below to strip out commas and dollar signs from Savings ($) column and put into a new column called 'Savings'.def clean_currency(curr): return float(curr.replace(",", "").replace("$", "")) clean_currency("$66,000") #using the clean_currency method using the .apply method alumni['Savings'] = alumni['Savings ($)'].apply(clean_currency) alumni.head()b) Uncomment 'alumni.dtypes.Savings' to check that the type change has occurred#b) (1) alumni.dtypes.SavingsQuestion 4 : Cleaning the data set - part B (5 Marks)a) Run the 'alumni["Gender"].value_counts()' to see the incorrect 'M' fields that need to be converted to 'Male'# a) (1) alumni['Gender'].value_counts()b) Now use a '.str.replace' on the 'Gender' column to covert the incorrect 'M' fields. Hint: We must use ^...$ to restrict the pattern to match the whole string.#method 2 alumni['Gender'].str.replace('^[M]$','Male') # b) (1) import re def gender_clean(var): return re.sub('^M$','Male',var) alumni['Gender'].apply(gender_clean)c) That didn't the set alumni["Gender"] column however. You will need to update the column when using the replace command 'alumni["Gender"]=', show how this is done below# c) (1) alumni["Gender"]=alumni['Gender'].apply(gender_clean)d) You can set it directly by using the df.loc command, show how this can be done by using the 'df.loc[row_indexer,col_indexer] = value' command to convert the 'M' to 'Male'# d) (1) alumni.loc[alumni['Gender'] == 'M'] = 'Male'e) Now run the 'value_counts' for Gender again to see the correct columns - 'Male' and 'Female'# e) (1) alumni['Gender'].value_counts()Question 5 : Working with the data set (4)a) get the median, b) mean and c) standard deviation for the 'Salary' columnalumni.head(2) # a)(1) print('The median is:',round(alumni['Salary'].median(),2)) # b)(1) print('The mean is:',round(alumni['Salary'].mean(),2)) # c)(1) print('The mean is:',round(alumni['Salary'].std(),2))The mean is: 21234.13d) identify which alumni paid more than $15000 in fees, using the 'Fee' column# d) (1) alumni[alumni['Fee'] > 15000]Question 6 : Visualise the data set (4 Marks)a) Using the 'Diploma Type' column, plot a bar chart and show its value counts.#a) (1) alumni['Diploma Type'].value_counts().plot.bar();b) Now create a box plot comparison between 'Savings' and 'Salary' columns#b) (1) import seaborn as sns savings_salary = ["Savings", "Salary"] sns.catplot(data=alumni[savings_salary],kind = 'box');c) Generate a histogram with the 'Salary' column and use 12 bins.#c) (1) sns.distplot(alumni['Salary'],bins = 12,kde = False)d) Generate a scatter plot comparing 'Salary' and 'Savings' columns.#d) (1) sns.scatterplot(x = 'Salary',y = 'Savings', data = alumni);Question 7 : Contingency Table (2 Marks)Using both the 'Martial Status' and 'Defaulted' create a contingency table. Hint: crosstab# Q7 (2) category = pd.crosstab(alumni['Marital Status'],alumni['Defaulted']) category #stacked bar plot to show propotion of approved and unapproved loans. category.div(category.sum(1).astype(float),axis = 0).plot(kind = 'bar',stacked=True,figsize = (4,4));resize imagesimage_list = os.listdir('../oxfordflower102/jpg') for image_name in image_list: image = cv2.imread('../oxfordflower102/jpg/' + image_name) if image.shape[0] > image.shape[1]: image = cv2.resize(image, (225, int((image.shape[1] * 225.0)/image.shape[0]))) else: image = cv2.resize(image, (int((image.shape[0] * 225.0)/image.shape[1]), 225)) bool_var = cv2.imwrite('../oxfordflower102/jpg_resized/' + image_name, image) print(bool_var)True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True True [...]TreefortBnB Prices Data Visualization NotebookLink to data and description: https://priceonomics.com/the-priceonomics-data-puzzle-treefortbnb/import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # set plotting parameters %matplotlib inline plt.rcParams['figure.figsize'] = (10, 30) plt.rcParams['xtick.labelsize'] = 16 plt.rcParams['ytick.labelsize'] = 16 plt.rcParams['axes.labelsize'] = 20 plt.rcParams['legend.fontsize'] = 20 # plt.rcParams['text.usetex'] = True plt.rcParams.update({'axes.titlesize': '35'}) plt.style.use('fivethirtyeight') # load dataset df = pd.read_csv('treefortbnb_data.csv') # clean up col names df.columns = ['id', 'city', 'state', 'price', 'num_reviews'] df.head() df.shape # check for duplicates df[df.duplicated(subset=['city', 'state', 'price', 'num_reviews'])]Because there are so many duplicate combinations of city, state, price, and reviews, I will take the unique id column at face value and assume there are not duplicates in the dataset.# fix uppercase issue (e.g. miami vs. Miami) df['city'] = df['city'].apply(lambda x: x.lower()) df['city'] = df['city'].apply(lambda x: x.lower()) # create city_state column def clean_row(row): return row['city'].title() + ', ' + row['state'] df['city_state'] = df.apply(clean_row, axis=1) # get median list price and number of listings by city # get prices for top 100 cities based on number of listings df_sorted = df.groupby('city_state')['price'].agg(['median', 'count']).reset_index().sort_values(by=['count', 'median'], ascending=False)[:100].sort_values(by=['median'], ascending=False) # create dataframe for plotting df_sorted.columns = ['city', 'median_list_price', 'count'] df_sorted['median_list_price'] = df_sorted['median_list_price'].apply(lambda x: int(x)) # add column to dataframe for easy labeling def tup(row): return (row['median_list_price'], row['count']) df_sorted['tup'] = df_sorted.apply(tup, axis=1) # initialize the matplotlib figure f, ax = plt.subplots(figsize=(10, 30)) # plot the mediam prices sns.set_color_codes('pastel') sns.barplot(x='median_list_price', y='city', data=df_sorted[:100], label='Meidan List Price', color='b') # add a legend and remove tick labels ax.legend(ncol=2, loc='center right', frameon=False) ax.set(xlim=(0, 700), ylabel='', xlabel='') ax.get_xaxis().set_visible(False) sns.despine(left=True, bottom=True) # add text labels for ix, val in enumerate(df_sorted['tup'][:100]): plt.text(val[0] + 4.5, ix + .25, '$'+str(val[0]), fontsize=13) if len(str(val[1])) > 3: plt.text(val[0] - 72, ix + .25, 'n = '+str(val[1]), fontsize=12.5, color='#384a85') elif val[0] < 100 and len(str(val[1])) == 3: plt.text(val[0] - 61.7, ix + .25, 'n = '+str(val[1]), fontsize=12.5, color='#384a85') elif val[0] < 100 and len(str(val[1])) < 3: plt.text(val[0] - 58, ix + .25, 'n = '+str(val[1]), fontsize=12.5, color='#384a85') else: plt.text(val[0] - 61, ix + .25, 'n = '+str(val[1]), fontsize=12.5, color='#384a85') # add title and save output plt.title('Highest Median TreeBnB Nightly List Prices') plt.figtext(0.425, 0.07, 'Note: only considering cities in the top 100 for listing count\nSource: Pricenomics Data Puzzle', fontsize=11) plt.savefig('treebnb.png', dpi=300, bbox_inches = 'tight')PC lab 3: Intro to NLP via word2vec and character-level language modeling 1 Word2Vec 1.1 Introduction Word2vec is a famous technique for obtaining dense representations of words that carry some notion of their semantic meaning. The usual way of representing a word as part of a vocabulary is by one-hot encoding. For example, embedding the word "Royal" in a vocabulary of 10.000 possible words would result in a 10.000 dimensional vector filled with zeros with only one nonzero element in the index corresponding to the word "Royal". **One-hot encoding has some adverse effects on learning:** (1) it is a sparse representation. When we embed a 10.000 dimensionality vector to a lower dimensionality (e.g. 512) with a matrix multiplication using learned weights $W$, only one row of $W$ will learn weights pertaining to a specific word (because the input will be zeros at the indices for other words). This is visualized in the figure below. (2) Secondly, one-hot encodings carry no semantic meaning at all for words that are similar. All words are orthogonal. The representation for "run" and "runs" has no similarity and the meaning of **every** word (and in essence the meaning of the whole language) has to be learned from scratch for every NLP task using one-hot encoding. In many supervised problems with limited data, this is usually not possible.One way around these facts is to try to **pre-train** a word representation that is (1) dense and (2) carries similarity between words. Of course, to do this, we need (1) a **large training dataset** of natural language and (2) a model **objective**.Word2vec is considered a breakthrough in deep learning/NLP because it offered a ridiculously simple solution: predicting the neighboring words from an input word in a sentence. For example, in the sentence: "I like dogs but not cats", we would (at random) sample as input word "dogs" and at random sample an output word "like", and then try to predict the word "like" from the word "dogs".Architecturally it looks like the image below: we one-hot encode both words, send the input word through a hidden layer which is then the embedding for that word, and try to predict the output word as a multi-class classification of all possible words:The rationale of the technique is that the meaning of a word can be derived from the contexts in which it is used. Similar words will be used in similar contexts (E.g. the words "cats" and "dogs" are used in similar contexts such as "i like dogs" and "i like cats" or "i have a dog" and "i have a cat".) The ingenius of this technique is that it can easily be scaled up because we don't need a labeled dataset: any possible text can be used, and there's a practically infinite amount of available text on the internet.The first layer of the resulting model is our embedding layer: it embeds every possible word in a lower-dimensional subspace in which word similarities are preserved. When people start a NLP problem, it is most useful to use this representation as opposed to a one-hot encoding, because we don't need to learn the meaning of every word from scratch anymore.We will give a primer on how to use word embeddings in this PC lab using the `gensim` library. 1.2 Gensim library In this PC lab, we're going to use the pre-trained word2vec model published by google. It embeds every word in a 300-dimensional subspace.Downloading the model: the following code only works on google colab.It is quite a large file, Google Colab download speeds are excellent because it does not have to download to the own system.If you're running this locally, use:```import gensim.downloader as apiw2v = api.load("word2vec-google-news-300") ```Keep in mind that downloading this model may take a **very** long time.Also, keep in mind that loading in this model takes up **a lot** of RAM memory.Last note: if you download gensim locally, there is a good chance you will be using gensim 4.0, while the google colab environment is still on gensim 3.6. A lot of functionality has changed place inbetween these version. Keep in mind that this notebook has been written with gensim 3.6 in mind.!gdown https://drive.google.com/uc?id=0B7XkCwpI5KDYNlNUTTlSS21pQmM !gunzip GoogleNews-vectors-negative300.bin.gz from gensim.models import KeyedVectors w2v = KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin', binary=True)Downloading... From: https://drive.google.com/uc?id=0B7XkCwpI5KDYNlNUTTlSS21pQmM To: /content/GoogleNews-vectors-negative300.bin.gz 100% 1.65G/1.65G [00:13<00:00, 119MB/s] gzip: GoogleNews-vectors-negative300.bin already exists; do you wish to overwrite (y or n)? n not overwrittenThe model can be called like a dictionary:vector = w2v['dogs'] print(vector) print(vector.shape)[-0.02050781 -0.01245117 -0.12792969 0.21777344 -0.140625 0.02612305 0.04443359 0.03710938 -0.14160156 0.03466797 0.23925781 -0.50390625 0.02636719 -0.03442383 -0.02990723 0.12988281 -0.16699219 0.20703125 -0.28710938 0.05493164 0.16992188 -0.15917969 0.09472656 -0.22070312 -0.1953125 0.11474609 -0.24511719 0.23535156 0.10009766 -0.0703125 -0.21972656 0.00309753 -0.01483154 -0.12402344 -0.012146 0.04907227 -0.24511719 -0.00245667 0.05761719 0.27148438 0.07958984 -0.15917969 -0.0234375 -0.0111084 -0.06396484 -0.15722656 0.24804688 0.0279541 -0.08642578 0.09619141 0.08935547 0.05322266 0.26757812 0.06542969 0.14453125 -0.0612793 0.24414062 -0.16113281 0.10644531 0.03662109 0.12255859 0.12402344 0.26953125 0.07177734 0.05151367 -0.10644531 0.11083984 0.03149414 -0.25585938 0.08203125 0.2734375 0.05957031 -0.17773438 0.13671875 -0.03833008 -0.12597656 0.10742188 -0.16601562 0.09619141 -0.16601562 -0.06494141 -0.14355469 0.2[...]Some words may not occur in the vocabulary:w2v["modelling"]"modelling" is the british english spelling of the word, whereas this model has (apparently) exclusively been trained on american english text, always using "modeling". The model has an easy-to-use method for trying to find which words are most similar to other words: (using cosine similarity)w2v.most_similar("python")Note: case-sensitivity matters and the two different versions of the word may occur and different contexts. Hence, it may change the embedding (and meaning) of the words:w2v.most_similar("Python") w2v.most_similar("mango")Word2vec is famous for encoding words in a vector space wherein it is possible to do arithmetic.For example: if we substract "man" from "prince" and add "woman", we get "princess":$E_{prince} - E_{man} = x - E_{woman}$$x \simeq E_{princess}$In other words: a prince is to a man as a princess is to a woman.w2v.most_similar( positive = ["prince", "woman"], negative = ["man"], topn = 3)Some other examples:w2v.most_similar( positive = ["leg", "shoulder"], negative = ["arm"], topn = 3) w2v.most_similar( positive = ["Paris", "Spain"], negative = ["France"], topn = 3)However, some analogies are far from fun. Word2vec has been known to carry gender bias in its word embeddings. For example, if we take the embedding for "sewing", substract "woman", and add "man", we get:w2v.most_similar( positive = ["man", "sewing"], negative = ["woman"], topn = 3)More worrying, if we take the embedding for "computer_programmer" (a high-paying job), substract "man" and add "woman", we get:w2v.most_similar( positive = ["woman", "computer_programmer"], negative = ["man"], topn = 3)Lastly, if men are stereotypically conservative, women are:w2v.most_similar( positive = ["woman", "conservatism"], negative = ["man"], topn = 3)It's important to keep in mind that the bias here can be interpreted in a multitude of ways. A good video discussing the sources of bias and ways around it (applied on a google translate case) is this one: https://www.youtube.com/watch?v=J7CrtblmMnU. 1.3 Data analysis on word embeddings.As mentioned previously, we can use these word embeddings as pre-trained vectors in various NLP applications. Here, we will explore them for data visualization. EXERCISE: Randomly sample some words (e.g. 1000) from the word2vec corpus. Reduce their dimensionality using t-SNE to 2 dimensions. Cluster the embeddings in this low-dimensionality subspace. Visualize the results. Inspect some clusters. Do they make sense?Use the code below to help you get started:import numpy as np import seaborn as sns import matplotlib.pyplot as plt from sklearn.manifold import TSNE from sklearn.cluster import DBSCAN # your algorithm here vocab = w2v.index2entity # all words for which the model has embeddings. words = np.random.choice(vocab, replace = False, size = (1000, )) # sample words here embeddings = np.array([w2v[word] for word in words]) # extract embeddings here data_tsne = TSNE().fit_transform(embeddings) clustering = DBSCAN().fit(data_tsne) # cluster here plt.figure(figsize = (10, 10)) sns.scatterplot(x = data_tsne[:, 0], y = data_tsne[:, 1], alpha = 0.5, hue = clustering.labels_, palette="deep", legend = False) # some cluster labels of words for w, c in zip(words[:10], clustering.labels_[:10]): print(w, c) words[clustering.labels_ == 0]Finally, it is useful to think about how we can use word embeddings in learning.Let's say we want to classify words as being "fruits". To do so, it is of course necessary to have a list of fruits and embed them using the word2vec model. There are multiple ways to create a "fruit classifier" from the resulting embeddings:One way is adding some non-fruit words as a negative set, embedding both the fruits and the non-fruits and using any standard machine learning algorithm to train a model.Another way is to keep in mind that it is perfectly possible to do arithmetic on these vectors. Therefore, it is perfectly reasonable to average the embeddings of fruits into one vector that is then the "meta-fruit" vector. Using this single vector, it is also possible to classify words as being fruits. 2 RNNs for language modeling 2.1 Introduction Recurrent networks strictly operate on 1-D sequences. They can be used for a variety of tasks, pictured below:Examples of the settings in the picture:- one to one: vanilla MLPs that map a fixed size 1-D vector to a 1-D vector for classification or regression- one to many: Image captioning, given an input embedding (obtained with a CNN), a textual caption of variable length is generated.- many to one: (1) Sentence classification such as sentiment analysis or (2) image generation from text: in both cases variable input texts are given as input and a fixed dimensional output is generated.- many to many: (1) machine translation of a variable-length sentence to another variable-length sentence or (2) transcription of a variable-length .mp3 audio to a variable length text.- many to many (1to1 correspondence): (1) Video classification: one label for a variable number of frames in the video (the video frame embedding can be obtained with a CNN and then input into a RNN), (2) autoregressive language modeling: trying to predict the next word in the sentence, for generative purposes or (3) word classification: classify every word as belonging to a category.Note that these settings are not exclusive to recurrent neural networks. In fact, any network type that works on variable input sequences can be used towards these ends. Most famously of which are of course, Transformers, which have all but replaced RNNs in NLP and many other fields. An explanation and implementation of transformers is out of the scope of this course. It suffices to know that RNNs process input sequence sequentially through memory cells, whereas transformers do it in parallel through an $n \times n$ attention matrix. Other than RNNs and Transformers, convolutional networks can also be used on variable length inputs: a 1D kernel can equally well convolve over a sequence of length $100$ as $1000$. It is only because of the linear layers at the end for classification requiring a specific number of input nodes that typical CNNs become applicable on only one specific input size. Recurrent neural networks are often one layer deep, as opposed to modern convnets reaching hundreds of layers. However, it is unfair to compare both types of architectures in this way. If an RNN takes in an input sequence of length 100, by the time the information in the first element reaches the last, it will have gone through a hundred hidden memory cells, each with its activation functions and (shared weights) linear layers. Hence, you could say that this element would have gone through a neural network 100 layers deep. In this sense, the depth of RNNs scales dynamically with input sequence length. 2.2 Character level autoregressive language modelingAutoregressive language modeling is the task of trying to predict the next word or character in a sentence given which words or characters came before it: $P(x_i | x_{i-1}, x_{i-2}, ..., x_1)$.In this PC-lab we will explore language modeling on the char level as opposed to the word level. A few considerations in this regard:The biggest advantage is that it has less classes ($N_{words} >>> N_{chars}$), hence resulting in a classification task that is easier to optimize, and generally needing less data. The disadvantage is that the model will not learn to predict words itself and has to compose them from scratch, often resulting in gibberish words. Architecturally, autoregressive language modeling of characters using a vanilla RNN looks like this:The model will embed input words to a hidden layer which acts as a memory bank. The memory bank of every input will consist of a combination of the information at that time point and the information coming in from the memory cell at the previous time point. The specific way this information is brought together depends on the specific construction of the RNN. We refer you to the theory lectures for details. The most popular constructions are the LSTM and the GRU memory cells. For every timestep, the model outputs a vector of $n$ dimensions, with $n$ the number of possible characters. We compute the cross entropy for every character on these vectors as a loss function and backpropagate.Code-wise, it is important to know that for a given sentence, we have an input $x$ consisting of the words in that sentence, and an output $y$, consisting of the same words, but **shifted one time-step to the left**. **Because of the directionality of the RNN, for every time-step, it will predict the next character given only the preceding ones.**import torch import torch.nn as nn import numpy as npTo train a character LM: We'll use the content of the book "" by !wget https://raw.githubusercontent.com/cdemutiis/LSTM_RNN_Text_generation/master/anna.txt with open('./anna.txt', 'r') as f: text = f.read()--2022-04-22 12:10:49-- https://raw.githubusercontent.com/cdemutiis/LSTM_RNN_Text_generation/master/anna.txt Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 192.168.127.12, 172.16.17.32, 192.168.127.12, ... Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|192.168.127.12|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 2025486 (1.9M) [text/plain] Saving to: ‘anna.txt’ anna.txt 100%[===================>] 1.93M --.-KB/s in 0.1s 2022-04-22 12:10:50 (13.3 MB/s) - ‘anna.txt’ saved [2025486/2025486]Let's check out the first 100 characters, make sure everything is peachy. According to the [American Book Review](http://americanbookreview.org/100bestlines.asp), this is the 6th best first line of a book ever.text[:100] import re text = re.sub('\n', ' ', text) text[:100] text = re.sub(' +', ' ', text) text[:100] text = text.lower() text[:100]Because PyTorch can not interpret string characters directly, we convert our characters to integers, using a dictionary as a vocabulary:unique_chars = set(text) char2int = {v : k for k, v in enumerate(unique_chars)} int2char = {v : k for k, v in char2int.items()} torch.tensor([char2int[l] for l in list(text[:100])])For training, we can't put the whole book in to our model as one sample. Just like the reason for doing batches in other networks is that: it is more computationally efficient, and it allows us to have training steps on different parts of data with some stochasticity to it, allowing us to jump out of local minima.For RNN, another reason is that our "actual" neural network depth is essentially decided by our input length, so if we send in a sample containing a million input tokens (chars), we also backpropagate through a million layers, and our computers will surely crash. In addition, it is not reasonable to assume a character a million chars away still influences the prediction of what's the next character in the sequence. So, the problem of batching our text becomes one of weighing two factors: how long of a sequence can our model handle, and how much context (in number of preceding characters) do our models need for prediction?Here we will take 50 as a default:def generate_batches(text, seqlen = 50): batches = [] for i in np.arange(0, len(text) - seqlen, seqlen): batches.append(text[i:i+seqlen]) return torch.stack(batches) data = generate_batches(torch.tensor([char2int[l] for l in list(text)])) data.shape data[0]2.3 Encoding text We need to encode our integer 'classes' of characters to a vector for input into the RNN. The most straightforward way of doing so is via one-hot encoding.one_hot = nn.functional.one_hot(data, num_classes = len(unique_chars)) print(one_hot[0,:5]) one_hot.shapetensor([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])An alternative is the `nn.Embedding` layer, which uses a **learnable look up table**. It is learnable because the numbers in the look-up table act as weights and PyTorch is able to compute the gradient of those numbers w.r.t. the loss.Information-wise, both representations contain the same information: what input token is present at which position. With `nn.Embedding`, the advantage is that the information is already distributed over all input features and makes up a dense representation, which will aid feature learning (all input weights of the first layer will be used optimally), whereas this is not the case for a one hot encoded vocabulary. In addition, `nn.Embedding` allows us to choose an arbitrarily dimensionality for our input tokens.t = nn.Embedding(num_embeddings = len(unique_chars), embedding_dim = 8) embedded = t(data) print(embedded[0,:5]) embedded.shapetensor([[ 0.1192, -0.2315, -0.5006, 0.1289, 0.5616, 1.1376, -0.8923, -0.4254], [-0.5633, -1.4780, 0.7272, -3.4017, -1.0024, 0.2810, 0.6311, -0.3528], [-1.9426, 0.4265, 0.0304, 0.2706, 0.1811, -0.0743, 2.2381, -0.0954], [ 0.0126, 0.5426, 0.6950, 0.0784, -0.2414, -0.6622, 0.2045, 1.2554], [ 0.5589, -0.8246, -1.4034, -0.8905, 2.2930, 0.5590, -0.7760, -0.1234]], grad_fn=)2.4 GRUs in PyTorch [Documentation for the GRU](https://pytorch.org/docs/stable/generated/torch.nn.GRU.html)Extra note: The weird shape expectations (such as not expecting batches to come first by default) are a consequence of optimizations that PyTorch has implemented so the RNNs run efficiently on data with variable input sequence lengths (such as sentences). For this PC lab, we have batched our sequences so that they have constant sequence length, so we can add the argument `batch_first = True`.gru = nn.GRU(input_size = 64, hidden_size = 512, batch_first = True) x = torch.randn(2, 50, 64) output, h_n = gru(x) output.shape, h_n.shapeExplanation of the outputs: `h_n` is the hidden representation of the last hidden memory cell. It can be seen as a summarized representation of the content of the whole input (if one want for example a single output for a whole sequence as in sentence classification). `output` will return the output representation of the RNN for every input token. (Look back at the picture in the introduction of this part of the PC lab (Section 2.1) for more intuition as to when to use what outputs of the RNN) For character level language modeling, we should have an output for every input, mainly: the prediction of the next word. For this purpose, if we have $n$ number of possible characters (classes), we should also have $n$ number of output nodes at every characters, whereas we can see that this is not the case for our GRU model as it is now.EXERCISE: Implement an autoregressive GRU by completing the code below. The model should contain an embedding layer (or one-hot encoding operation), recurrent layer, and a layer that takes the outputs of the GRU at each timestep and manipulates their dimensions so that the output dimensionality of each token is equal to the number of expected classes.class CharGenerator(nn.Module): def __init__(self, vocab_len, hidden_dim): super().__init__() self.embedding = nn.Embedding(num_embeddings = vocab_len, embedding_dim = hidden_dim) self.gru = nn.GRU(input_size = hidden_dim, hidden_size = hidden_dim, batch_first = True) self.output_head = nn.Linear(hidden_dim, vocab_len) def forward(self, x): x = self.embedding(x) output, _ = self.gru(x) return self.output_head(x)How do we create x and y? Similarly to autoencoders, our input is also our output in this case. Only one thing needs to change: we need to shift the data one time-step (character) backward for the output, so that the first input character corresponds to trying to predict the second output character (see vis). We will split up every batch so that we have two sequences: one from $0..49$ for the input, and one from $1..50$ for the output.X = data[:, :-1] y = data[:, 1:]Data splitting:X_train = X[:int(data.shape[0] * 0.80)] X_test = X[int(data.shape[0] * 0.80):] y_train = y[:int(data.shape[0] * 0.80)] y_test = y[int(data.shape[0] * 0.80):] train_dataset = torch.utils.data.TensorDataset(X_train, y_train) train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size = 8, shuffle = True) test_dataset = torch.utils.data.TensorDataset(X_test, y_test) test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size = 8, shuffle = True)Note that we are just taking the first 80% of the book as training and the last 20% as validation set. Normally, we shuffle our data so that we are not biased. In this case, however, we can make a case for doing it our way: because the data here is from a book, and the subjects and settings throughout the book will (gradually) change, splitting it this way corresponds to evaluating if our model generalizes to other scenes in the same book. Shuffled splitting would probably give us a more optimistic view of model performances. Even more stringently, we could also have a test set consisting of an entirely new book, to see if our model generalizes to new styles of writing..batch = next(iter(train_dataloader)) X_batch, y_batch = batch X_batch.shape, y_batch.shapeEXERCISE: Implement the training loop for the character generator using the same principles from last PC labs. It is highly recommended to use GPU resources for training RNNs as backpropagating through time is computationally quite heavy.N_EPOCHS = 20 model = CharGenerator(len(unique_chars), 16).to('cuda') loss_function = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=0.0005) # SGD = stochastic gradient descent for i in range(1, N_EPOCHS + 1): all_losses = [] model.train() for batch in train_dataloader: optimizer.zero_grad() X_batch, y_batch = batch y_hat_batch = model(X_batch.to('cuda')) loss = loss_function(y_hat_batch.permute(0, 2, 1), y_batch.to('cuda')) # Compute loss loss.backward() # Calculate gradients optimizer.step() # Update weights using defined optimizer all_losses.append(loss.item()) train_loss = np.mean(all_losses) predictions = [] true_labels = [] losses = [] with torch.no_grad(): model.eval() for batch in test_dataloader: X_batch, y_batch = batch y_hat_batch = model(X_batch.to('cuda')) loss = loss_function(y_hat_batch.permute(0, 2, 1), y_batch.to('cuda')) losses.append(loss.item()) predictions.append(y_hat_batch.permute(0, 2, 1)) true_labels.append(y_batch) predictions = torch.cat(predictions) true_labels = torch.cat(true_labels) accuracy = (true_labels.to('cuda') == predictions.argmax(-2)).sum().item() / true_labels.numel() print(i, '\t', train_loss, np.mean(losses), accuracy)1 2.644999518262987 2.431447026703066 0.28061030863958486 2 2.3986496777586352 2.406419427377992 0.2825310769469078 3 2.38391315198204 2.399622132491294 0.2814674256799494 4 2.378811850476621 2.396985585094101 0.2809691618582917 5 2.376224288691939 2.395197243492813 0.28153713098142485 6 2.374590817023735 2.3943100462548053 0.2824665350010972 7 2.3734480013221297 2.394009555434795 0.28258271050355627 8 2.3725451240351374 2.3935092565866283 0.2822367656740115 9 2.3718606732689578 2.3932510179263153 0.28148549742477635 10 2.3712959070010275 2.392924167991769 0.28000361434896537 11 2.3708874601489103 2.393365951833156 0.2805586750829364 12 2.370555774549759 2.3932953982550886 0.2824975151350863 13 2.3702874477380687 2.3931461491888526 0.2825956188927184 14 2.370033024233744 2.3928574664768725 0.2810130503814429 15 2.3698339148047842 2.3935199140658634 0.2825956188927184 16 2.369635844634751 2.393503726914872 0.28000361434896537 17 2.3694512966521746 2.393[...]How should you evaluate this model **quantitatively**? Given previous text, multiple possible predictions for the next words are equally good. For example, if the input is "I like " and the real text is "I like dogs", without more context, is a generation of "I like cats" less correct? Because of this, is accuracy truly fair? The answer is generally no, in academia, we judge language models simply on their loss, or more specifically a score derived from the loss: the perplexity. An explanation of perplexity is out of the scope of this course.In the following, we will evaluate our model **qualitatively** by generating new text.Let's start from a prompt and gradually build up the code for how we can use our model towards generative ends:prompt = "a nice day for " input = torch.tensor([char2int[letter] for letter in prompt]) input # our model expects an input with 2-dimensions: B x N # with B the number of samples (here = 1) # and N the variable number of input characters output = model(input.reshape(1, -1).to('cuda')) output.shape # The output is of shape B x N x C # in the first dimension, we can just take the first element # in the second dimension, we only need the last element # since that element is the prediction for the next character in the sequence. output[0, -1] # to get the predicted class, we can take the class for which the logit is highest: output[0, -1].argmax() # .item() to get to integer out of the tensor: output[0, -1].argmax().item() # using our vocabulary dict: int2char[output[0, -1].argmax().item()] def generate_next_letter(prompt, model): input = torch.tensor([char2int[letter] for letter in prompt]) output = model(input.reshape(1, -1).to('cuda')) return int2char[output[0, -1].argmax().item()] prompt = "a nice day for " for _ in range(20): prompt += generate_next_letter(prompt, model) print(prompt)a nice day for t a nice day for th a nice day for the a nice day for the a nice day for the t a nice day for the th a nice day for the the a nice day for the the a nice day for the the t a nice day for the the th a nice day for the the the a nice day for the the the a nice day for the the the t a nice day for the the the th a nice day for the the the the a nice day for the the the the a nice day for the the the the t a nice day for the the the the th a nice day for the the the the the a nice day for the the the the theWhen using this code, you will see that the completions are very repetitive and it often generates in loops. This does not mean the model have failed, as this is known issue even in large language models. This just means our generation procedure has failed: we have always greedily sampled just the most likely next character. It's better to sample from the output distribution of possible next characters. This will promote more diverse text and will often lead to better results.Below you will see how we can sample from a probability distribution instead of taking the top prediction:prompt = "a nice day for " input = torch.tensor([char2int[letter] for letter in prompt]) output = model(input.reshape(1, -1).to('cuda')) output[0, -1] # softmax to get a probability distribution: torch.softmax(output[0, -1], axis = 0) probabilities = torch.softmax(output[0, -1], axis = 0) probabilities_numpy = probabilities.detach().cpu().numpy() np.random.choice(np.arange(len(probabilities_numpy)), p = probabilities_numpy) def generate_next_letter(prompt, model): input = torch.tensor([char2int[letter] for letter in prompt]) output = model(input.reshape(1, -1).to('cuda')) probabilities = torch.softmax(output[0, -1], axis = 0) probabilities_numpy = probabilities.detach().cpu().numpy() sampled_output = np.random.choice(np.arange(len(probabilities_numpy)), p = probabilities_numpy) return int2char[sampled_output] prompt = "a nice day for " for _ in range(20): prompt += generate_next_letter(prompt, model) print(prompt)a nice day for t a nice day for th a nice day for the a nice day for the a nice day for the " a nice day for the "h a nice day for the "he a nice day for the "hea a nice day for the "heas a nice day for the "heas a nice day for the "heas h a nice day for the "heas h a nice day for the "heas h h a nice day for the "heas h he a nice day for the "heas h hed a nice day for the "heas h hed a nice day for the "heas h hed h a nice day for the "heas h hed hi a nice day for the "heas h hed hin a nice day for the "heas h hed hindLess than impressive, right? To play around with a huge (transformer) word-level language model that was trained on a huge corpus of text: try out [HuggingFace "Write With Transformer"](https://transformer.huggingface.co/doc/distil-gpt2)Upload Image of Aadhar# For uploading image from local system #from google.colab import files #uploaded = files.upload() #for fn in uploaded.keys(): # print('User uploaded file "{name}" with length {length} bytes'.format( # name=fn, length=len(uploaded[fn])))start from here if not upload file.#all installing and importing here import numpy as np !pip install pytesseract ! apt install tesseract-ocr ! apt install libtesseract-dev ! pip install Pillow ! pip install pytesseract import pytesseract from PIL import ImageEnhance, ImageFilter, Image import cv2 from google.colab.patches import cv2_imshow from pytesseract import Output import re #getting a random aadhar image !wget -O 'image.jpg' 'http://www.fakingnews.com/wp-content/uploads/2017/04/lead-smith-adhar-copy-584x403.jpg' #showing image using opencv img=cv2.imread('image.jpg') cv2_imshow(img) #conversion to text with custom parameters. custom_config = r'-l eng --oem 3 --psm 3' txt=pytesseract.image_to_string(img, config=custom_config) lst=txt.split('\n') for i in lst: if i!='': print(i)Notebook demonstrating creating a new account that is registered with the system. This will generate a QR code that the user must add to an authenticatorfrom Acquire.Client import UserCreate a user account with a username that you want...user = User("USERNAME")Now ask the system to register a user with this name, with a password that you will supply...(uri, qrcode) = user.register("PASSWORD")Now view the QR code so that it can be loaded into an authenticator, e.g. google-authenticatorqrcodeYou can also get the URI directly if you need to enter this manually...print(uri)SVM Loan Approver There are a number of classification algorithms that can be used to determine loan elgibility. Some algorithms run better than others. Build a loan approver using the SVM algorithm and compare the accuracy and performance of the SVM model with the Logistic Regression model.from path import Path import numpy as np import pandas as pd # Read in the data # Note: The following data has been normalized between 0 and 1 data = Path('../Resources/loans.csv') df = pd.read_csv(data) df.head()Separate the Features (X) from the Target (y)# Segment the features from the target y = df["status"] X = df.drop(columns="status")Split our data into training and testing# Use the train_test_split function to create training and testing subsets from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1, stratify=y) X_train.shapeCreate a SVM Model# Instantiate a linear SVM model from sklearn.svm import SVC model = SVC(kernel='linear')Fit (train) or model using the training data# Fit the data model.fit(X_train, y_train)Score the model using the test data Make predictions# Make predictions using the test data y_pred = model.predict(X_test) results = pd.DataFrame({ "Prediction": y_pred, "Actual": y_test }).reset_index(drop=True) results.head() from sklearn.metrics import accuracy_score accuracy_score(y_test, y_pred)Generate Confusion Matrixfrom sklearn.metrics import confusion_matrix confusion_matrix(y_test, y_pred)Generate Classification Reportfrom sklearn.metrics import classification_report print(classification_report(y_test, y_pred))precision recall f1-score support approve 0.58 0.58 0.58 12 deny 0.62 0.62 0.62 13 accuracy 0.60 25 macro avg 0.60 0.60 0.60 25 weighted avg 0.60 0.60 0.60 25WeatherPy---- Note* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.!pip install citipy # Dependencies and Setup import matplotlib.pyplot as plt import pandas as pd import numpy as np import requests import time import scipy.stats as st # Import API key from api_keys import weather_api_key # Incorporated citipy to determine city based on latitude and longitude from citipy import citipy # Output File (CSV) output_data_file = "output_data/cities.csv" # Range of latitudes and longitudes lat_range = (-90, 90) lng_range = (-180, 180)Generate Cities List# List for holding lat_lngs and cities lat_lngs = [] cities = [] # Create a set of random lat and lng combinations lats = np.random.uniform(lat_range[0], lat_range[1], size=1500) lngs = np.random.uniform(lng_range[0], lng_range[1], size=1500) lat_lngs = zip(lats, lngs) # Identify nearest city for each lat, lng combination for lat_lng in lat_lngs: city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name # If the city is unique, then add it to a our cities list if city not in cities: cities.append(city) # Print the city count to confirm sufficient count len(cities)Perform API Calls* Perform a weather check on each city using a series of successive API calls.* Include a print log of each city as it'sbeing processed (with the city number and city name).url = "http://api.openweathermap.org/data/2.5/weather?" units = "imperial" query_url = f"{url}appid={weather_api_key}&units={units}&q=" record = 0 set = 1 city_name = [] lat = [] long = [] max_temp = [] humidity = [] clouds = [] wind_speed = [] country = [] date = [] print("Beginning Data Retrieval") print("-----------------------------") for city in cities: #Count of cities/sets of 50 if record == 50: record = 0 set += 1 record += 1 #Try validating each city try: #Pull weather info with successful attempts weather_response = requests.get(query_url+city).json() city_name.append(weather_response["name"]) lat.append(weather_response["coord"]["lat"]) long.append(weather_response["coord"]["lon"]) max_temp.append(weather_response["main"]["temp_max"]) humidity.append(weather_response["main"]["humidity"]) clouds.append(weather_response["clouds"]["all"]) wind_speed.append(weather_response["wind"]["speed"]) country.append(weather_response["sys"]["country"]) date.append(weather_response["dt"]) print(f"Processing Record {record} of Set {set} | {city}") except: #Print out for each city that can not be validated print("City not found. Skipping...") continueBeginning Data Retrieval ----------------------------- City not found. Skipping... Processing Record 2 of Set 1 | hilo Processing Record 3 of Set 1 | ushuaia Processing Record 4 of Set 1 | grindavik City not found. Skipping... Processing Record 6 of Set 1 | tuatapere City not found. Skipping... Processing Record 8 of Set 1 | asuncion nochixtlan City not found. Skipping... Processing Record 10 of Set 1 | rikitea Processing Record 11 of Set 1 | Processing Record 12 of Set 1 | atuona Processing Record 13 of Set 1 | tamandare Processing Record 14 of Set 1 | healdsburg Processing Record 15 of Set 1 | ilulissat Processing Record 16 of Set 1 | longyearbyen Processing Record 17 of Set 1 | iqaluit Processing Record 18 of Set 1 | mount gambier Processing Record 19 of Set 1 | punta arenas Processing Record 20 of Set 1 | kuantan Processing Record 21 of Set 1 | saskylakh Processing Record 22 of Set 1 | yellowknife Processing Record 23 of Set 1 | busselton Processing Record 24 of Set 1 | [...]Convert Raw Data to DataFrame* Export the city data into a .csv.* Display the DataFrame#Put data into dictionary wx_dict = {'City':city_name, 'Lat':lat, 'Lng':long, 'Max Temp':max_temp, 'Humidity':humidity, 'Cloudiness':clouds, 'Wind Speed':wind_speed, 'Country':country, 'Date':date} #Put dictionary into DataFrame wx_summary = pd.DataFrame(wx_dict) #Print DataFrame into a csv wx_summary.to_csv(output_data_file) #Print DataFrame here wx_summary #Perform quick analysis of data in DataFrame wx_analysis = pd.DataFrame(wx_summary.describe()) #Print quick analysis wx_analysisInspect the data and remove the cities where the humidity > 100%.----Skip this step if there are no cities that have humidity > 100%.# Get the indices of cities that have humidity over 100%. # Make a new DataFrame equal to the city data to drop all humidity outliers by index. # Passing "inplace=False" will make a copy of the city_data DataFrame, which we call "clean_city_data".Plotting the Data* Use proper labeling of the plots using plot titles (including date of analysis) and axes labels.* Save the plotted figures as .pngs. Latitude vs. Temperature Plot#Outline Scatter Plot data plt.scatter(wx_summary["Lat"],wx_summary["Max Temp"]) #Label graph plt.xlabel("Latitude") plt.ylabel("Max Temperature (F)") plt.title("City Latitude vs. Max Temperature") plt.grid(True) #Save Scatter Plot plt.savefig("output_data/Latitude vs. Temperature Plot.png") #Print Scatter Plot plt.show() #The plot shows that temperatures are warmer the closer they are to the equator and colder at further latitudesLatitude vs. Humidity Plot#Outline Scatter Plot data plt.scatter(wx_summary["Lat"],wx_summary["Humidity"]) #Label graph plt.xlabel("Latitude") plt.ylabel("Humidity (%)") plt.title("City Latitude vs. Humidity") plt.grid(True) #Save Scatter Plot plt.savefig("output_data/Latitude vs. Humidity Plot.png") #Print Scatter Plot plt.show() #The plot appears to show that cities at latitudes further from the equator have a higher percentage of humidity #and almost no low percentages of humidity in the same areasLatitude vs. Cloudiness Plot#Outline Scatter Plot data plt.scatter(wx_summary["Lat"],wx_summary["Cloudiness"]) #Label graph plt.xlabel("Latitude") plt.ylabel("Cloudiness (%)") plt.title("City Latitude vs. Cloudiness") plt.grid(True) #Save Scatter Plot plt.savefig("output_data/Latitude vs. Cloudiness Plot.png") #Print Scatter Plot plt.show() #The plot appears to show that there is some higher percentages of cloudiness further from the equatorLatitude vs. Wind Speed Plot#Outline Scatter Plot data plt.scatter(wx_summary["Lat"],wx_summary["Wind Speed"]) #Label graph plt.xlabel("Latitude") plt.ylabel("Wind Speed (mph)") plt.title("City Latitude vs. Wind Speed") plt.grid(True) #Save Scatter Plot plt.savefig("output_data/Latitude vs. Wind Speed Plot.png") #Print Scatter Plot plt.show() #The plot appears to show there is very little correlation between windspeed and latitudeLinear Regression#Outline data in northern and southern hemisphere northern_hem = wx_summary.loc[wx_summary["Lat"] > 0] southern_hem = wx_summary.loc[wx_summary["Lat"] < 0]Northern Hemisphere - Max Temp vs. Latitude Linear Regression#Create equation (slope, intercept, rvalue, pvalue, stderr) = st.linregress(northern_hem["Lat"],northern_hem["Max Temp"]) regress_values = northern_hem["Lat"] * slope + intercept line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2)) #Outline graph plt.scatter(northern_hem["Lat"],northern_hem["Max Temp"]) plt.plot(northern_hem["Lat"],regress_values,"r-") plt.annotate(line_eq,(5,-15),fontsize=15,color="red") #Label and detail graph plt.xlabel("Latitude") plt.ylabel("Max Temp (F)") plt.title("Northern Hemisphere - City Latitude vs. Max Temp") #Save Scatter Plot plt.savefig("output_data/Northern Hemisphere - Max Temp vs. Latitude.png") #Print Scatter Plot plt.show() #Print R-Value print(f"The r-value is: {rvalue**2}") #The plot shows there is a direct correlation between high max temps and lower latitudes as well as low max temps # and higher latitudesSouthern Hemisphere - Max Temp vs. Latitude Linear Regression#Create equation (slope, intercept, rvalue, pvalue, stderr) = st.linregress(southern_hem["Lat"],southern_hem["Max Temp"]) regress_values = southern_hem["Lat"] * slope + intercept line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2)) #Outline graph plt.scatter(southern_hem["Lat"],southern_hem["Max Temp"]) plt.plot(southern_hem["Lat"],regress_values,"r-") plt.annotate(line_eq,(-25,40),fontsize=15,color="red") #Label and detail graph plt.xlabel("Latitude") plt.ylabel("Max Temp (F)") plt.title("Southern Hemisphere - City Latitude vs. Max Temp") #Save Scatter Plot plt.savefig("output_data/Southern Hemisphere - Max Temp vs. Latitude.png") #Print Scatter Plot plt.show() #Print R-Value print(f"The r-value is: {rvalue**2}") #The plot shows there is a correlation between medium to high max temps and latitudes closer to the equator #as well as low max temps and further away from the equatorNorthern Hemisphere - Humidity (%) vs. Latitude Linear Regression#Create equation (slope, intercept, rvalue, pvalue, stderr) = st.linregress(northern_hem["Lat"],northern_hem["Humidity"]) regress_values = northern_hem["Lat"] * slope + intercept line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2)) #Outline and detail graph plt.scatter(northern_hem["Lat"],northern_hem["Humidity"]) plt.plot(northern_hem["Lat"],regress_values,"r-") plt.annotate(line_eq,(40,20),fontsize=15,color="red") #Label graph plt.xlabel("Latitude") plt.ylabel("Humidity (%)") plt.title("Northern Hemisphere - City Latitude vs. Humidity") #Save Scatter Plot plt.savefig("output_data/Northern Hemisphere - Humidity (%) vs. Latitude.png") #Print Scatter Plot plt.show() #Print R-Value print(f"The r-value is: {rvalue**2}") #This plot appears to show that humidity is becomes reliably higher at latitudes further from the equator and lower #humidities are essentially non-existentSouthern Hemisphere - Humidity (%) vs. Latitude Linear Regression#Create equation (slope, intercept, rvalue, pvalue, stderr) = st.linregress(southern_hem["Lat"],southern_hem["Humidity"]) regress_values = southern_hem["Lat"] * slope + intercept line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2)) #Outline and detail graph plt.scatter(southern_hem["Lat"],southern_hem["Humidity"]) plt.plot(southern_hem["Lat"],regress_values,"r-") plt.annotate(line_eq,(-50,15),fontsize=15,color="red") #Label graph plt.xlabel("Latitude") plt.ylabel("Humidity (%)") plt.title("Southern Hemisphere - City Latitude vs. Humidity") #Save Scatter Plot plt.savefig("output_data/Southern Hemisphere - Humidity (%) vs. Latitude.png") #Print Scatter Plot plt.show() #Print R-Value print(f"The r-value is: {rvalue**2}") #This plot appears to show there is a very very slight correlation between the rise in humidity as the latitudes #approach the equatorNorthern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression#Create equation (slope, intercept, rvalue, pvalue, stderr) = st.linregress(northern_hem["Lat"],northern_hem["Cloudiness"]) regress_values = northern_hem["Lat"] * slope + intercept line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2)) #Outline and detail graph plt.scatter(northern_hem["Lat"],northern_hem["Cloudiness"]) plt.plot(northern_hem["Lat"],regress_values,"r-") plt.annotate(line_eq,(45,25),fontsize=15,color="red") #Label graph plt.xlabel("Latitude") plt.ylabel("Cloudiness (%)") plt.title("Northern Hemisphere - City Latitude vs. Cloudiness") #Save Scatter Plot plt.savefig("output_data/Northern Hemisphere - Cloudiness (%) vs. Latitude.png") #Print Scatter Plot plt.show() #Print R-Value print(f"The r-value is: {rvalue**2}") #This plot appears to show there is a correlation between low percentages of cloudiness at lower latitudes and higher #percentages of cloudiness at higher latitudesSouthern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression#Create equation (slope, intercept, rvalue, pvalue, stderr) = st.linregress(southern_hem["Lat"],southern_hem["Cloudiness"]) regress_values = southern_hem["Lat"] * slope + intercept line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2)) #Outline and detail graph plt.scatter(southern_hem["Lat"],southern_hem["Cloudiness"]) plt.plot(southern_hem["Lat"],regress_values,"r-") plt.annotate(line_eq,(-50,60),fontsize=15,color="red") #Label graph plt.xlabel("Latitude") plt.ylabel("Cloudiness (%)") plt.title("Southern Hemisphere - City Latitude vs. Cloudiness") #Save Scatter Plot plt.savefig("output_data/Southern Hemisphere - Cloudiness (%) vs. Latitude.png") #Print Scatter Plot plt.show() #Print R-Value print(f"The r-value is: {rvalue**2}") #This plot appears to show there is a very slight correlation between the rise in cloudiness as the latitudes #approach the equatorNorthern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression#Create equation (slope, intercept, rvalue, pvalue, stderr) = st.linregress(northern_hem["Lat"],northern_hem["Wind Speed"]) regress_values = northern_hem["Lat"] * slope + intercept line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2)) #Outline and detail graph plt.scatter(northern_hem["Lat"],northern_hem["Wind Speed"]) plt.plot(northern_hem["Lat"],regress_values,"r-") plt.annotate(line_eq,(10,30),fontsize=15,color="red") #Label graph plt.xlabel("Latitude") plt.ylabel("Wind Speed (mph)") plt.title("Northern Hemisphere - City Latitude vs. Wind Speed") #Save Scatter Plot plt.savefig("output_data/Northern Hemisphere - Wind Speed (mph) vs. Latitude.png") #Print Scatter Plot plt.show() #Print R-Value print(f"The r-value is: {rvalue**2}") #This plot appears to show that wind speeds increases slightly from the equator to higher latitudes but is overall rather lowSouthern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression#Create equation (slope, intercept, rvalue, pvalue, stderr) = st.linregress(southern_hem["Lat"],southern_hem["Wind Speed"]) regress_values = southern_hem["Lat"] * slope + intercept line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2)) #Outline and detail graph plt.scatter(southern_hem["Lat"],southern_hem["Wind Speed"]) plt.plot(southern_hem["Lat"],regress_values,"r-") plt.annotate(line_eq,(-25,25),fontsize=15,color="red") #Label graph plt.xlabel("Latitude") plt.ylabel("Wind Speed (mph)") plt.title("Southern Hemisphere - City Latitude vs. Wind Speed") #Save Scatter Plot plt.savefig("output_data/Southern Hemisphere - Wind Speed (mph) vs. Latitude.png") #Print Scatter Plot plt.show() #Print R-Value print(f"The r-value is: {rvalue**2}") #This plot appears to show that wind speed reduces as it approaches the equator from lower latitudesPython for Psychologists - Session 9 session8 recap & plotting Python offers multiple "plotting" libraries, each of them with different features.Today we want to cover two (probably the most common) libraries- matplotlib- seaborn A plot usually contains two main components, i.e., a figure and axes. Image the figure as a page on which you can draw whatever you like. Following that, a figure can contain multiple independent plots, a legend, color bar etc. The axes is the area where we plot our data and any labels are associated with. Each axes has a x and y -axis ![fig](fig.png) matplotlib. - We can use basic matplotlib commands to easiliy create plots.import matplotlib.pyplot as plt %matplotlib inline`%matplotlib inline`or `plt.show()` will show your plot instantly. The latter is particularly used outside jupyter notebooksimport numpy as np x = np.linspace(0,10,20) # generates 20 numbers between 0 and 10 y = x**2 # x squareNow that we got our first plot, let´s give it a name and label the x and y axis Now imagine you need more than one plot on your page. We can easily do this with `plt.subplot()`#nrows #ncolums #plot_number- we could also plot by creating Figure objects in matplotlib Let´s create an empty figure object with `.figure()` , i.e. an object oriented approach. By setting `figsize=(a,b)` one could increase or decrease ones "canvas"fig =Let´s add a blank set of axis using ``fig.add_axes([location_where_axes_should_be_located])``ax1 = #left #bottom # widht #height figRemember that figure can contain more than just one plot. Let´s try to insert a second figure on our canvas. This will help us to understand the input `.add_axes([])` takesax2 = figLet´s plot our x and y arrays on our new blank axis and add x and y labels as well as a plot name. However, here we need to use e.g., `.set_xlabel` instead of just `.xlabel`figAs for the first approach, we could also create multiple plots in the object oriented approach using `.subplots(nrows=, ncols=)` and **not** `.subplot()` as we did before! As we can see, we did create some overlap between our plots, no worris we can use `plt.tight_layout()` to solve this issue. Very conveniently, `plt.subplots()` will automatically add_axes based on the rows and colum input and you don´t have to specify it as we had to using `plt.figure()`Now we could try to plot our x & y arrays to specific subplots. We could do this by indexing ax! In some way, your subplot behaves as a single cell in your dataframe, i.e. we could index it easily by choosing [row/column]# changes color and linestyle # changes the linewidth #changes lower and upper bound of x axis figseabornseaborn is based on matplotlib, but usually works with less lines of codes and therefore provides a easy to handle vizualisation interface.For further information, see https://seaborn.pydata.org/import pandas as pd iris = pd.read_csv("iris.csv", sep=",") iris.head()Let´s try to create a scatter plot with for sepal.length & sepal.width- matplotlib- seaborn# create a figure and axis # scatter the sepal_length against the sepal_width # set a title and labels import seaborn as snsWe could also group our scatterplot by variety using the ``hue`` argument, i.e. different groups will be colored in different numbers. We could easily plot a line chart using `sns.lineplot()`. The only argument that we need are the four numeric columns in our case. We could also use ``sns.boxplot(x=,y=,data=)`` or ``sns.barplot(x=,y=,data=)`` to plot some characteristic of our three categories. The standard solution comes with a 95% confidence intervall around your point estimate. A nice way to to get a first idea about your data (from a plotting perspective) is `sns.pairplot()` or `sns.heatmap()` As we can see, the output does not look that fine, here we can combine matplotlib and seaborn to customize our plot! We could also break our data up across multiple subplots (i.e. *faceting*) and combine it into one single figure. First we can create a multiplot grid (i.e. ``sns.FacetGrid``) which takes our column variety into account and hence creates three empty grids. Afterwards we can use the ``.map()`` function, that calls the specified function for each object of an iterable (i.e., the empty grids in our case)#plot a univariat distribution of observationsControlling figure aestheticsdef sinplot(flip=1): x = np.linspace(0, 14, 100) for i in range(1, 7): plt.plot(x, np.sin(x + i * .5) * (7 - i) * flip) sinplot()```sns.set_style()``` changes the figure theme, go check it out by using "darkgrid" or "whitegrid" or "white" or "ticks" or "dark" as an argumentsns.set_style("ticks") sinplot()We could also remove the top and right axis spine (only white or ticks thema can benefit from it) by using `sns.despine()`sinplot() sns.despine()We could also scale our plots for different context by using `sns.set_context()`. Go and try it for "paper", "notebook", "talk", and "poster".sns.set_context("paper") sinplot() sns.despine()to switch back to the default seaborn settings, simply use `sns.set()`sns.set()Create a list of Trove's digitised periodicalsEveryone know's about Trove's newspapers, but there is also a growing collection of digitised periodicals available in the 'Magazines & newsletters' category. They're not easy to find, however, which is why I created the [Trove Titles](https://trove-titles.herokuapp.com/) web app.This notebook uses the Trove API to harvest metadata relating to digitised periodicals – or more accurately, periodicals that are freely available online in a digital form. This includes some born digital publications that are available to view in formats like PDF and MOBI, but excludes some digital journals that have access restrictions.The search strategy to find digitised (and digital) periodicals takes advantage of the fact that Trove's digitised resources (excluding the newspapers) all have an identifier that includes the string `nla.obj`. So we start by searching in the journals zone for records that include `nla.obj` and have the `format` 'Periodical'. By specifying 'Periodical' we exclude individual articles from digitised journals.Then it's just a matter of looping through all the results and checking to see if a record includes a `fulltext` link to a digital copy. If it does it gets saved.You can see the results in [this CSV file](digital-journals.csv). Obviously you could extract additional metadata from each record if you wanted to.The default fields are:* `title` – the title of the periodical* `contributor` – information about creator or publisher* `issued` – publication date, or date range* `format` – the type of publication, all entries should include 'Periodical', but may include other types such as 'Government publication'* `trove_id` – the 'nla.obj' part of the fulltext_url, a unique identifier for the digital periodical* `trove_url` – url of the periodical's metadata record in Trove* `fulltext_url` – the url of the landing page of the digital version of the periodical* `fulltext_url_type` – the type of digital periodical, one of 'digitised', 'edeposit', or 'other'I've used this list to [harvest all the OCRd text from digitised periodicals](Download-text-for-all-digitised-journals.ipynb).# Let's import the libraries we need. import requests import pandas as pd from bs4 import BeautifulSoup import time import json import os import re from tqdm.auto import tqdm from requests.adapters import HTTPAdapter from requests.packages.urllib3.util.retry import Retry from slugify import slugify from IPython.display import display, HTML from datetime import datetime s = requests.Session() retries = Retry(total=5, backoff_factor=1, status_forcelist=[ 502, 503, 504 ]) s.mount('https://', HTTPAdapter(max_retries=retries)) s.mount('http://', HTTPAdapter(max_retries=retries))Add your Trove API keyYou can get a Trove API key by [following these instructions](https://help.nla.gov.au/trove/building-with-trove/api).# Add your Trove API key between the quotes api_key = 'YOUR API KEY'Define some functions to do the workdef get_total_results(params): ''' Get the total number of results for a search. ''' these_params = params.copy() these_params['n'] = 0 response = s.get('https://api.trove.nla.gov.au/v2/result', params=these_params) data = response.json() return int(data['response']['zone'][0]['records']['total']) def get_fulltext_urls(links): ''' Loop through the identifiers to find a link to the digital version of the journal. ''' urls = [] for link in links: if link['linktype'] == 'fulltext' and 'nla.obj' in link['value']: url = link['value'] if 'digitised' in link['linktext'].lower(): url_type = 'digitised' elif 'edeposit' in link['linktext'].lower(): url_type = 'edeposit' else: url_type = 'other' urls.append({'url': url, 'url_type': url_type}) return urls def listify(value): if not isinstance(value, list): value = [value] return value def format_list(record, field): value = record.get(field, []) value = listify(value) return ' | '.join(value) def get_titles(): ''' Harvest metadata about digitised journals. With a little adaptation, this basic pattern could be used to harvest other types of works from Trove. ''' url = 'http://api.trove.nla.gov.au/v2/result' titles = [] params = { # We can 'NOT' the format facet in the query 'q': '"nla.obj-" NOT format:Article', #'q': '"nla.obj-" NOT format:Article', 'zone': 'article', 'l-format': ['Periodical'], # Journals only # 'l-format': 'Government publication', # Journals only 'include': 'links', 'bulkHarvest': 'true', # Needed to maintain a consistent order across requests 'key': api_key, 'n': 100, 'encoding': 'json' } start = '*' total = get_total_results(params) with tqdm(total=total) as pbar: while start: params['s'] = start response = s.get(url, params=params) data = response.json() # If there's a startNext value then we get it to request the next page of results try: start = data['response']['zone'][0]['records']['nextStart'] except KeyError: start = None for work in data['response']['zone'][0]['records']['work']: # Check to see if there's a link to a digital version try: fulltext_urls = get_fulltext_urls(work['identifier']) except (KeyError, TypeError): pass else: for fulltext_url in fulltext_urls: trove_id = re.search(r'(nla\.obj\-\d+)', fulltext_url['url']).group(1) # Get basic metadata # You could add more work data here # Check the Trove API docs for work record structure title = { 'title': work['title'], 'contributor': format_list(work, 'contributor'), 'issued': work.get('issued', ''), 'format': format_list(work, 'type'), 'fulltext_url': fulltext_url['url'], 'trove_url': work['troveUrl'], 'trove_id': trove_id, 'fulltext_url_type': fulltext_url['url_type'] } titles.append(title) time.sleep(0.2) pbar.update(100) return titlesRun the harvesttitles = get_titles() df = pd.DataFrame(titles) df.head() # How many journals are there? df.shapeFor some reason there are a number of duplicates in the list, where multiple Trove work records point to the same digitised journal. We an display the duplicates like this.# SHow all the rows pd.set_option('display.max_rows', None) # Show dupes df.loc[df.duplicated(subset=['trove_id'], keep=False)].sort_values(by=['trove_id', 'fulltext_url_type']) df.sort_values(by=['trove_id', 'fulltext_url_type']).drop_duplicates(subset='trove_id', keep='last').shape # Save as CSV and display a download link csv_file = f'digital-journals-{datetime.now().strftime("%Y%m%d")}.csv' #csv_file = f'government-publications-periodicals-{datetime.now().strftime("%Y%m%d")}.csv' df.to_csv(csv_file, index=False) display(HTML(f'{csv_file}'))Today, we will scrape Producthunt with SeleniumSelenium is a tool initially created to automate tests on websites. It is therefore very useful when information is accessible by clicking on links. A button for example is an element on which it is very difficult to obtain the link. Beautifull soup then becomes limited.In this case, use Selenium Load librariesimport bs4 import requests from bs4 import BeautifulSoup import numpy as np import pandas as pd import json import re import lxml.html import time import random from random import randint import logging import collections from time import gmtime, strftime import re from tabulate import tabulate import os date=strftime("%Y-%m-%d")Install Selenium according to this manualhttps://selenium-python.readthedocs.io/installation.htmldownloading-python-bindings-for-selenium/binnb: Linux: put your geckodriver (the downloaded extension) in the equivalent path at home to /home/YOURNAME/.local/bin We will simulate a search on the official Phython websiteimport selenium # The selenium.webdriver module provides all the implementations of WebDriver. WebDriver implementations # Currently supported are Firefox, Chrome, IE and Remote. The Keys class provides keys in # the keyboard such as RETURN, F1, ALT etc. from selenium import webdriver from selenium.webdriver.common.keys import Keys # Then, the instance of Firefox WebDriver is created. driver = webdriver.Firefox() # The driver.get method will lead to a page given by the URL. WebDriver will wait until the page is fully completed # loaded (i.e. the "onload" event has been triggered) before returning the control to your script. # It should be noted that if your page uses a lot of AJAX when loading, WebDriver may not know # when it was fully charged: driver.get("http://www.python.org") # The following line is a statement confirming that the title contains the word "Python" assert "Python" in driver.title # WebDriver offers several methods to search for items using one of the methods # find_element_by_by_ * . For example, the input text element can be located by its name attribute by # using the find_element_by_name method elem = driver.find_element_by_name("q") # Then we send keys. This is similar to entering keys using your keyboard. # Special keys can be sent using the imported selenium.webdriver.common.keys Keys class. # For security reasons, we will delete any pre-filled text in the input field # (for example, "Search") so that it does not affect our search results: elem.clear() elem.send_keys("pycon") elem.send_keys(Keys.RETURN) #After submitting the page, you should get the result if there is one. To ensure that certain results # are found, make an assertion: assert "No results found." not in driver.page_source driver.close()Open the source code of this page (HTML) and check that the search area (field) is called "q".input id="id-search-field" name="q" type="search" role="textbox" class="search-field placeholder" placeholder="Search" value="" tabindex="1" Getting a phone number from an ad in the right placefrom selenium import webdriver from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.common.exceptions import TimeoutException from selenium.webdriver.common.by import By url='https://www.leboncoin.fr/sports_hobbies/1536839557.htm/' driver = webdriver.Firefox() driver.implicitly_wait(30) driver.get(url) python_button = driver.find_elements_by_xpath('//div[@data-reactid="269"]')[0] python_button.click() # And then it's like Beautiful soup soup=BeautifulSoup(driver.page_source) driver.close() # And then it's like Beautiful soup for elem in soup.find_all('a',attrs={"data-qa-id" :"adview_number_phone_contact" }): print(elem.text)Starting from an ad in the right place, collect all the information available to define the product being sold. Use selenium for the telephone number API (Application Program Interface) A set of tools and methods that allow different applications to interact with each other. In the case of a web service, we can retrieve data dynamically. By using an API correctly, we can thus obtain in real time, the modifications made on a "parent" site.For example, we will retrieve online news, for example from the "L'équipe" website. Follow the instructions at https://newsapi.org/s/lequipe-api to retrieve an "API key" connection key Your API key is: 73bbb95f8ecb49b499113a46481b4af1It is frequent that a key does not work after a while 5mins 30 mins 1 d ...So don't jump up if you get an error message back.import requests key='' url ='https://newsapi.org/v2/top-headlines?sources=lequipe&apiKey='+key response = requests.get(url) # Here the response format is a json file, it is used as a dictionary print (response.json()) dic=response.json() print(dic.keys()) for elem in list(dic.keys()): print('##############################################') print("clé: ",elem,"// values: ", dic[elem]) # And now we have lists in dictionaries it's json but it's always the same thing # We will discover the information of the article key for elem in enumerate(dic['articles']): print('###############################################') print(elem) # So if we keep going, it gives us another dictionary! for elem in dic['articles'][0].keys(): print(' Key : ',elem,'Values : ',dic['articles'][0][elem])The following script consists of a Python version of Stanford Course 'Machine Learning' taught on the Coursera PlatformNote: All exercise data and structure are credited to Stanford University **Caveat:** Contrary to the modularity presented in Octave scripts and as I'm using Jupyter Notebooks for educational purposes we will implement the functions on the same notebook where we will call them Exercise 1 - Generate E-Mail Features# Import numpy libraries to deal with matrixes and vectors import numpy as np # Import pandas do read data files import pandas as pd # Import matplotlib to plot data import matplotlib.pyplot as plt # Import regular expressions library import re # Import string helper library import string #Import NLTK Tokenizer from nltk.tokenize import word_tokenize # Import and load Porter Stemmer from nltk.stem.porter import * stemmer = PorterStemmer() # Import math import math # Import scipy optimization function from scipy import optimize, io from scipy.ndimage import rotate # Import Support Vector Machine from sklearn.svm import LinearSVC, SVC # Matplotlib notebook property %matplotlib inlineOne of the many problems that you can solve with machine learning is the classification of spam e-mails.We will use an SVM to train this classifier. As usual, let's look at the data first:# Read e-mail contents file_contents = open("emailSample1.txt", "r") file_contents = (file_contents.read()) print(file_contents)> Anyone knows how much it costs to host a web portal ? > Well, it depends on how many visitors you're expecting. This can be anywhere from less than 10 bucks a month to a couple of $100. You should checkout http://www.rackspace.com/ or perhaps Amazon EC2 if youre running something big.. To unsubscribe yourself from this mailing list, send an email to: How to process this text into something readable for the SVM? We need to turn those words into integers of some form - let's start by reading a vocabulary list (this vocab was pre-filtered with only the most common words) after pre-processing and doing some common Natural Language Processing tasks such as:- keeping only alphanumeric characters;- flagging emails or urlsdef getVocabList(): ''' Generates vocabulary list. Maps string to integer (sti) Args: None Returns: vocab_dict(dict): Vocabulary_list ''' vocab_dict = {} with open("vocab.txt", "r") as vocab: for line in vocab: vocab_dict[int((line.split('\t'))[0]),1] = line.split('\t')[1].replace('\n','') return vocab_dict def processEmail( email_contents: str ) -> list: ''' Preprocesses e-mail and returns word indices according to vocabulary. Args: email_contents(str): Content of the e-mail Return: word_indices(list): List of word indexes. ''' vocabList = getVocabList() word_indices = [] #Lowercase all e-mail contents email_contents = email_contents.lower() #Replace \n tags email_contents = email_contents.replace('\n',' ') #Regex pattern substitutions email_contents = re.sub('<[^<>]+>', ' ', email_contents) #Replace numbers email_contents = re.sub('[0-9]+', 'number', email_contents) #Handle URL's email_contents = re.sub('(http|https)://[^\s]*', 'httpaddr', email_contents) #Handle e-mail addresses email_contents = re.sub('[^\s]+@[^\s]+', 'emailaddr', email_contents) #Handle $ sign email_contents = re.sub('[$]+', 'dollar', email_contents) email_contents = word_tokenize(email_contents) process_email_contents = [] for el in email_contents: # Remove punctuation element = (el.translate(str.maketrans('', '', string.punctuation))) # Retain only alphanumeric element = re.sub(r'\W+', '', element) if len(element)>=1: process_email_contents.append(stemmer.stem(element)) # Loop through each element and find corresponding vocab integer value for el in process_email_contents: try: word_indices.append([k for k,v in vocabList.items() if v == el][0][0]) except: pass return word_indices # Generate Word indices for the process e-mail word_indices = processEmail(file_contents) def emailFeatures( word_indices: list ) ->np.array: ''' Returns vectorized version of the e-mail using word indexes. Each array element is mapped to an array consisting of 0's and 1's where 1's are the presence of the word at index n in the e-mail. Args word_indices(list): List of word indexes Returns: vectorized_features(np.array): Word vector. ''' vocabList = getVocabList() vectorized_features = np.zeros(len(vocabList)) for i in range(0,len(vocabList)): if i in word_indices: vectorized_features[i] = 1 return vectorized_features features = emailFeatures(word_indices) print('Length of feature vector is {}'.format(len(features))) print('Length of non-zero elements is {}'.format(features.sum()))Length of feature vector is 1899 Length of non-zero elements is 45.0Exercise 2 - Load Pre-Computed Features and Train SVM# Use scipy Io to load matrix object with exercise data spam_file = io.loadmat('spamTrain.mat') X = np.array(spam_file['X']) y = np.array(spam_file['y'])We have pre-loaded the matrixes for all the spam e-mails using the vocab list above.This matrix object was given by Andrew on his class so we don't need to compute anything. **As in the first part of exercise 6, we are going to train a Linear SVM and assess the results.**def svmTrain( X: np.array, y: np.array, C: float, max_iter:int ) -> SVC: ''' Trains a Support Vector Machine Classifier using sklearn library. Args: X(np.array): Array of original features. y(np.array): Array of target values. C(float): Penalty of the Support Vector Machine max_iter(int): Number of iterations Returns: svm_classifier(sklearn.base.ClassifierMixin): trained classifier. ''' svm_classifier = SVC(C=C, kernel='linear', probability=True) svm_classifier.fit(X,y.reshape(len(y),)) return svm_classifier # Train Model with a 0.1 penalty C = 0.1 model = svmTrain(X,y,C,100) # Predict if spam/not spam based on model - we'll use the sklearn predict method p = model.predict(X) print('Model accuracy is {}'.format((p.reshape(len(p),1)==y).sum()/len(y)*100))Model accuracy is 99.825**Accuracy is really high on the training set.Let's check the performance on the test set:**# Use scipy Io to load matrix object with exercise test data spam_file = io.loadmat('spamTest.mat') X_test = np.array(spam_file['Xtest']) y_test = np.array(spam_file['ytest']) # Predict if spam/not spam based on model - we'll use the sklearn predict method p_test = model.predict(X_test) print('Model accuracy is {}'.format((p_test.reshape(len(p_test),1)==y_test).sum()/len(y_test)*100))Model accuracy is 98.9Model accuracy on the test set is also really good.**Let's take a look at the weight of the features on the algorithm and extract the influence of those features on the target variable.****Let's look at the top predictors for spam - this is, the words that weigh more on the classification of spam/not spam:**vocabList = getVocabList() # Rely on the coefficients of the model to obtain the variable influence weights = model.coef_[0] weights = dict(np.hstack((np.arange(1,1900).reshape(1899,1),weights.reshape(1899,1)))) # Sort Weights in Dictionary weights = sorted(weights.items(), key=lambda kv: kv[1], reverse=True) # Printing the top predictors of spam top_15 = {} for i in weights[:15]: print({v for k,v in vocabList.items() if k[0] == i[0]}){'our'} {'click'} {'remov'} {'guarante'} {'visit'} {'basenumb'} {'dollar'} {'will'} {'price'} {'pleas'} {'most'} {'nbsp'} {'lo'} {'ga'} {'hour'}Loading Cora dataset into Neo4j database Run the latest release of this notebook: This notebook demonstrates how to load Cora dataset into Neo4j graph database.# install StellarGraph if running on Google Colab import sys if 'google.colab' in sys.modules: %pip install -q stellargraph[demos]==1.3.0b # verify that we're using the correct version of StellarGraph for this notebook import stellargraph as sg try: sg.utils.validate_notebook_version("1.3.0b") except AttributeError: raise ValueError( f"This notebook requires StellarGraph version 1.3.0b, but a different version {sg.__version__} is installed. Please see ." ) from None import pandas as pd import os from stellargraph import datasets from IPython.display import display, HTMLLoad Cora dataset (See [the "Loading from Pandas" demo](../../basics/loading-pandas.ipynb) for details on how data can be loaded.)dataset = datasets.Cora() display(HTML(dataset.description)) dataset.download() edge_list = pd.read_csv( os.path.join(dataset.data_directory, "cora.cites"), sep="\t", header=None, names=["target", "source"], ) edge_list["label"] = "cites" display(edge_list.head(5)) feature_names = ["w_{}".format(ii) for ii in range(1433)] column_names = feature_names + ["subject"] node_list = pd.read_csv( os.path.join(dataset.data_directory, "cora.content"), sep="\t", header=None, names=column_names, )Preprocess data# gather all features into lists under 'features' column. node_list["features"] = node_list[feature_names].values.tolist() node_list = node_list.drop(columns=feature_names) node_list["id"] = node_list.index node_list.head(5)Ingest data into Neo4j databaseWe define the graph schema as below:- Each vertex represents a paper + subject (String): the class where each subject belongs to. There are seven classes in total. + features (List[int]): 1D-vector represents the presence of each words in the dictionary. + ID (int): id of each paper. (**Note**: this ID attribute is different from the Neo4j id, i.e., the id of each node or relationship which Neo4j automatically assigns with). - Each *directed* edge represents a citation. Each edge points to the paper being cited.As the Cora dataset is small, we could use Cypher queries and execute the transactions via a Python-supported driver.For bigger dataset, this loading job might take very long, so it is more convenient to use ```neo4j-admin import ``` tool, [tutorial here](https://neo4j.com/docs/operations-manual/current/tutorial/import-tool/).import time import py2neo default_host = os.environ.get("STELLARGRAPH_NEO4J_HOST") # Create the Neo4j Graph database object; the arguments can be edited to specify location and authentication graph = py2neo.Graph(host=default_host, port=None, user=None, password=)Delete the existing edges and relationships in the current database.empty_db_query = """ MATCH(n) DETACH DELETE(n) """ tx = graph.begin(autocommit=True) tx.evaluate(empty_db_query)Delete any existing constraints or indexes in the current database.constraints = graph.run("CALL db.constraints").data() for constraint in constraints: graph.run(f"DROP CONSTRAINT {constraint['name']}") indexes = graph.run("CALL db.indexes").data() for index in indexes: graph.run(f"DROP INDEX {index['name']}")Load all nodes to the graph database.loading_node_query = """ UNWIND $node_list as node CREATE( e: paper { ID: toInteger(node.id), subject: node.subject, features: node.features }) """ # For efficient loading, we will load batch of nodes into Neo4j. batch_len = 500 for batch_start in range(0, len(node_list), batch_len): batch_end = batch_start + batch_len # turn node dataframe into a list of records records = node_list.iloc[batch_start:batch_end].to_dict("records") tx = graph.begin(autocommit=True) tx.evaluate(loading_node_query, parameters={"node_list": records})Load all edges to the graph database.loading_edge_query = """ UNWIND $edge_list as edge MATCH(source: paper {ID: toInteger(edge.source)}) MATCH(target: paper {ID: toInteger(edge.target)}) MERGE (source)-[r:cites]->(target) """ batch_len = 500 for batch_start in range(0, len(edge_list), batch_len): batch_end = batch_start + batch_len # turn edge dataframe into a list of records records = edge_list.iloc[batch_start:batch_end].to_dict("records") tx = graph.begin(autocommit=True) tx.evaluate(loading_edge_query, parameters={"edge_list": records})Ensure node IDs are unique. Creating this constraint also automatically creates an index which will improve performance of querying nodes by ID.node_id_constraint = """ CREATE CONSTRAINT ON (n:paper) ASSERT n.ID IS UNIQUE """ tx = graph.begin(autocommit=True) tx.evaluate(node_id_constraint)CV TRAINhyper_params = { 'task': 'train', 'boosting_type': 'gbdt', 'objective': 'regression', 'metric': ['l1','l2'], 'learning_rate': 0.005, 'feature_fraction': 0.9, 'bagging_fraction': 0.7, 'bagging_freq': 10, 'verbose': 0, "max_depth": 8, "num_leaves": 128, "max_bin": 512, "num_iterations": 15000 } # CROSS VALIDATION name='lgbm'+version score_df = pd.DataFrame() oof = list() predictions = list() oof_x, oof_y = np.zeros(data.shape[0]), np.zeros(data.shape[0]) preds_x, preds_y = 0, 0 scores_per_fold=[] all_preds=[] all_targets=[] np.random.seed(42) sh=np.random.permutation(unpaths) k=5 val_size=int(len(sh)/k) for f in range(k): print('fold:', f) if f<(k-1): val_paths=sh[val_size*f:val_size*(f+1)] else: val_paths=sh[val_size*f:] train_paths=[x for x in unpaths if x not in val_paths] val_paths=np.array(val_paths) train_paths=np.array(train_paths) X_train = data.loc[data.path.isin(train_paths), TRAIN_FEATS ].reset_index(drop=True) y_trainXY = data.loc[data.path.isin(train_paths), ['x', 'y'] ].reset_index(drop=True).values X_val = data.loc[data.path.isin(val_paths), TRAIN_FEATS ].reset_index(drop=True) y_valXY = data.loc[data.path.isin(val_paths), ['x', 'y'] ].reset_index(drop=True).values val_predsXY=[] for i in range(2): y_train = y_trainXY[:,i] y_val = y_valXY[:,i] gbm = lgb.LGBMRegressor(**hyper_params) model = gbm.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_val, y_val)], eval_metric='l1', verbose=100, early_stopping_rounds=200) joblib.dump(model, weights_path+'fold_'+str(f)+['x','y'][i]+'_v2') val_predsXY.append(gbm.predict(X_val, num_iteration=gbm.best_iteration_) ) val_predsXY=np.stack(val_predsXY).T print(euclidean_distance(val_predsXY[:,0], val_predsXY[:,1], y_valXY[:,0], y_valXY[:,1])) all_preds.append(val_predsXY) all_targets.append(y_valXY) all_predsV=np.vstack(all_preds) all_targetsV=np.vstack(all_targets) print('mean validation error, 5-fold', euclidean_distance(all_targetsV[:,0], all_targetsV[:,1], all_predsV[:,0], all_predsV[:,1])) pd.DataFrame(all_predsV,columns=['x', 'y']).to_csv('/gdrive/My Drive/Colab Notebooks/lgbm_oof.csv.gz', index=False, compression='gzip') pd.DataFrame(all_targetsV,columns=['x', 'y']).to_csv('/gdrive/My Drive/Colab Notebooks/all_targetsV.csv.gz', index=False, compression='gzip')EVALUATION - PLOTSimport matplotlib.pyplot as plt plt.plot(all_predsV[:,0],all_predsV[:,1],'.') plt.show()**5 FOLD CV PREDICTIONS OVER TARGET GRID** With blue dots are the true checkpoints Orange dots for predictionsplt.figure(figsize=(10,10)) plt.plot(all_targetsV[:,0],all_targetsV[:,1],'.') plt.plot(all_predsV[:,0],all_predsV[:,1],'.') plt.show() print('mean validation error, 5-fold', euclidean_distance(all_targetsV[:,0], all_targetsV[:,1], all_predsV[:,0], all_predsV[:,1]))Full trainhyper_params["num_iterations"]= 2000 name=version+'KNeighbors' train_paths=unpaths[:] train_paths=np.array(train_paths) X_train = data.loc[data.path.isin(train_paths), TRAIN_FEATS ].reset_index(drop=True) y_trainXY = data.loc[data.path.isin(train_paths), ['x', 'y'] ].reset_index(drop=True).values train_predsXY=[] all_train_targets=[] for i in range(2): y_train = y_trainXY[:,i] gbm = lgb.LGBMRegressor(**hyper_params) model = gbm.fit(X_train, y_train, eval_set=[(X_train, y_train)], eval_metric='l1', verbose=100 ) joblib.dump(model, weights_path+'FullTrain_'+['x','y'][i]) train_predsXY.append(gbm.predict(X_train, num_iteration=gbm.best_iteration_) ) train_predsXY=np.stack(train_predsXY).T print('train error',euclidean_distance(train_predsXY[:,0], train_predsXY[:,1], y_trainXY[:,0], y_trainXY[:,1])) print('Save LightGBM model fully trained') joblib.dump(model,'/gdrive/My Drive/Colab Notebooks/weights/lgbm_model')Random Erasing Data Augmentation From the abstract of the [paper](https://arxiv.org/abs/1708.04896):```In training, Random Erasing randomly selects a rectangle region in an image and erases its pixels with random values. In this process, training images with various levels of occlusion are generated, which reduces the risk of over-fitting and makes the model robust to occlusion. Random Erasing is parameter learning free, easy to implement, and can be integrated with most of the CNN-based recognition models.``` As seen from image above, this `RandomErase` data augmentation, randomly selects a region from the input image, erases the existing image in that region and fills the region with random values. Training models with `RandomErase` using `timm`'s training script To train a model using the `RandomErase` data augmentation using `timm`'s training script, simply add the `--reprob` flag with a probability value. ```pythonpython train.py ../imagenette2-320 --reprob 0.4```Running the above command applies the `RandomErase` data augmentation to the input images with a probability of `0.4`. Using `RandomErase` data augmentation in custom training scripts Section `1.1` provides an example of using `RandomErase` data augmentation to train a nueral net using `timm`'s training script. But often you might want to simply just use `RandomErase` augmentation using your own custom training loop. This section explains how one could achieve that. The `RandomErase` data augmentation inside `timm` is implemented inside `RandomErasing` class. All we do in the code below, is first create an input image tensor, and visualize it. > NOTE: This variant of RandomErasing is intended to be applied to either a batch or single image tensor after it has been normalized by dataset mean and std. This is different from `RandAugment` where the class expects a `PIL.Image` as input.from PIL import Image from timm.data.random_erasing import RandomErasing from torchvision import transforms from matplotlib import pyplot as plt img = Image.open("../../imagenette2-320/train/n01440764/ILSVRC2012_val_00000293.JPEG") x = transforms.ToTensor()(img) plt.imshow(x.permute(1, 2, 0))Great, as we can see it is the same image of a "tench" as shown pretty much everywhere inside this documentation. Let's now apply the `RandomErasing` augmentation and visualize the results.random_erase = RandomErasing(probability=1, mode='pixel', device='cpu') plt.imshow(random_erase(x).permute(1, 2, 0))Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).Intro To Data Science With Linear Regression What Is Linear Regression? In this excercise, you'll utilize the Linear Regression model from Scikit-Learn to predict housing prices in Boston.Linear regression is the fundamental building block of data science and analytics. If you ever venture into data science, this will most likely be the first model you're taught.Linear regression models are very simple, interpretable, and somewhat flexible. The goal is to predict a continuous output variable (e.g. MPG, prices, etc.) from a set of predictor variables, known as features.Within industry, you'll almost always try the linear regression before moving to advanced models, such as GBM, random forests, or neural networks. Getting Started & Preprocessing First, import the necessary libraries to run the notebook. Press `Shift + Enter` to run the cell below.from sklearn.datasets import load_boston from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error import pandas as pd import numpy as np from matplotlib import pyplot as plt from matplotlib import cm as cm import seaborn as sns from matplotlib.colors import ListedColormapLoad the boston dataset. This is a dataset that's installed within Scikit-Learn.The goal with this exercise: predict the housing price, using other columns (features) in the dataset.Load the Boston housing data with the line below.`boston = load_boston()`boston = load_boston()Next, separate the data into the features and target using the following code:`y = boston.target``boston = pd.DataFrame(boston.data)`y = boston.target boston = pd.DataFrame(boston.data)Print the boston dataset using the following code. The `head` method prints out the first 5 lines of your data.`boston.head()`boston.head()The columns don't have any labels! This happens with some datasets. Assuming you have a data dictionary, you can label the columns. For the time being, add this line into the cell below, and call the `head` method on the DataFrame again.Refer to the `data_dictionary.pdf` document to see what each column name refers to.`boston.columns = ['crim', 'zn', 'indus', 'chas', 'nox', 'rm', 'age', 'dis', 'rad', 'tax', 'ptratio', 'black', 'lstat']``boston.head()`boston.columns = ['crim', 'zn', 'indus', 'chas', 'nox', 'rm', 'age', 'dis', 'rad', 'tax', 'ptratio', 'black', 'lstat'] boston.head()Now that the data is labeled, we have a better sense of what each column means.To reiterate, we'll be predicting the housing prices using all of these columns (features). Plotting Correlations Now that the data is in the right format, we can plot a correlation matrix. This shows us what features are correlated with each other.For reference, -1 is uncorrelated, and 1 is highly correlated. Run the function below to look at the numbers.`boston.corr()`boston.corr()We have the numbers from the correlation matrix, but it's not as easy to view or interpret as a plot.To see correlations plotted by color, run the `correlation_matrix_plot` function below.Examine the correlations in the lower triangle, then answer the questions below.def correlation_matrix_plot(n_top_features, df): feats = n_top_features corr = df[list(feats)].corr() mask = np.zeros_like(corr, dtype=np.bool) mask[np.triu_indices_from(mask)] = True labels = corr.where(np.triu(np.ones(corr.shape)).astype(np.bool)) labels = labels.round(2) labels = labels.replace(np.nan,' ', regex=True) # Set up the matplotlib figure f, ax = plt.subplots(figsize=(9,9)) # Generate a custom diverging colormap cmap = cm.get_cmap('jet', 30) # Draw the heatmap with the mask and correct aspect ratio ax = sns.heatmap(corr, mask=mask, cmap=cmap, vmax=1, vmin=-1, center=0, square=True, linewidths=.5, cbar_kws={"shrink": .5}) plt.tight_layout() plt.show() correlation_matrix_plot(boston.columns,boston)Correlation Matrix Questions Looking at the plot above: 1. What features are highly correlated?> From looking at the correlations, `rad` and `tax` are shown to be heavily correlated. 2. Which features are highly uncorrelated?> `lstat` is highly uncorrelated with `rm` and `dis`. Additionally, `dis` is highly uncorrelated with `indus`, `nox`, and `age`. Building the Linear Regression Model Now that the data is in the right format, we can begin to build the linear regression model. First, we're going to split the data. In data science, your data is split into two datasets.The first dataset is the *training* set. Building a model is referred to as "training", hence the moniker of a "training" data set. The second dataset is the *test* set. This is used to make predictions, and evaluate if our model is performing well.To split the data into training and test data sets, type the following line.`X_train, X_test, y_train, y_test = train_test_split(boston, y, test_size=0.20, random_state=42)`X_train, X_test, y_train, y_test = train_test_split(boston, y, test_size=0.20, random_state=42)With the data being split, we'll now create the LinearRegression module. Write the line in the cell below:`model = LinearRegression()`model = LinearRegression()You're now ready to train the model. Write and run the following line:`model.fit(X_train, y_train)`model.fit(X_train, y_train)Predict and Score Model Now that the model is trained, we can predict new values using the test set. Write the following code to predict the housing prices.`predictions = model.predict(X_test)`predictions = model.predict(X_test)Next, we'll look at the coefficients for our model. Coefficients describe the mathematical relationship between each independent feature(s) and the target variable. The sign of a regression coefficient tells you whether there is a positive or negative correlation between each independent variable and the dependent variable. A positive coefficient indicates that as the value of the independent variable increases, the mean of the dependent variable also tends to increase. A negative coefficient suggests that as the independent variable increases, the dependent variable tends to decrease.The coefficient value signifies how much the mean of the dependent variable changes given a one-unit shift in the independent variable while holding other variables in the model constant. This property of holding the other variables constant is crucial because it allows you to assess the effect of each variable in isolation from the others.`coefficients = pd.DataFrame(model.coef_, boston.columns).sort_values(by=0, ascending=False)``print(coefficients)`coefficients = pd.DataFrame(model.coef_, boston.columns).sort_values(by=0, ascending=False) print(coefficients)0 rm 4.432488 chas 2.786767 rad 0.262114 indus 0.040731 zn 0.030081 black 0.012452 age -0.006240 tax -0.010639 crim -0.112463 lstat -0.509349 ptratio -0.916399 dis -1.448485 nox -17.240635Finally, to gain an understanding of how our model is performing, we'll score the model against three metrics: R squared, mean squared error, and mean absolute error. Write the following lines of code to get your output.`print("R Squared Score: ", r2_score(y_test, predictions))``print("Mean Squared Error: ", mean_squared_error(y_test, predictions))``print("Mean Absolute Error: ", mean_absolute_error(y_test, predictions))`print("R Squared Score: ", r2_score(y_test, predictions)) print("Mean Squared Error: ", mean_squared_error(y_test, predictions)) print("Mean Absolute Error: ", mean_absolute_error(y_test, predictions))R Squared Score: 0.668482575397 Mean Squared Error: 24.3114269297 Mean Absolute Error: 3.19150897227Elastic network (harmonic springs) in OpenMMThis network shows how to add harmonic restraints between different atoms in OpenMM. In this case, we'll tie backbone atoms to the "$i+2$" backbone atom (i.e., next-nearest neighbor; nearest backbone atom not already bonded to this one).import openmm_scaled_md as scaled import simtk.openmm as mm from simtk.openmm import app from simtk import unit import mdtraj as md pdb_file = '../resources/AD_initial_frame.pdb' pdb = app.PDBFile(pdb_file) forcefield = app.ForceField('amber96.xml', 'tip3p.xml') pdb_system = forcefield.createSystem(pdb.topology, nonbondedMethod=app.PME, nonbondedCutoff=1.0*unit.nanometers, constraints=app.HBonds, rigidWater=True, ewaldErrorTolerance=0.0005)The first two cells are identical to the normal setup.The next cells add restraints on the absolute positions. We'll use [MDTraj's atom selection language](http://mdtraj.org/latest/atom_selection.html) to select atoms; this *does* assume that MDTraj does not change the atom ordering scheme when creating an `mdtraj.Topology` from an `openmm.app.Topology`. We'll also assume that we want to restrain to the distances as given in the PDB.topology = md.Topology.from_openmm(pdb.topology) pos_restrained_atoms = topology.select("backbone") atom_pairs = [(pos_restrained_atoms[i], pos_restrained_atoms[i+2]) for i in range(len(pos_restrained_atoms)-2)] # calculate the distances based on the PDB file traj = md.load(pdb_file) default_distances = md.compute_distances(traj, atom_pairs=atom_pairs)[0] # first frame in traj elastic_network = mm.HarmonicBondForce() length_unit = unit.nanometer energy_unit = unit.kilojoule_per_mole for ((atom_a, atom_b), r_0) in zip(atom_pairs, default_distances): elastic_network.addBond(atom_a, atom_b, length=float(r_0)*length_unit, k=5.0*energy_unit/length_unit**2) pdb_system.addForce(elastic_network)Running with an elastic networkNow we'll create a trajectory that uses this system for dynamics. Getting the `simulation` object and using it to run MD is the same as is normally done.# this is equivalent to the standard BAOAB integrator, since force_scaling is 1 integrator = scaled.integrators.BAOABIntegrator( temperature=300.0*unit.kelvin, collision_rate=1.0/unit.picosecond, timestep=2.0*unit.femtosecond, force_scaling=1.0 ) sim = app.Simulation(pdb.topology, pdb_system, integrator) sim.context.setPositions(pdb.positions) sim.reporters.append(app.PDBReporter('elastic_network.pdb', 10)) sim.step(1000)Visualizing with NGLViewIf you also have [NGLView](http://nglviewer.org/nglview/latest/) installed, you can visualize the trajectory in-notebook.import nglview as nv traj = md.load("./elastic_network.pdb") view = nv.show_mdtraj(traj) view.add_ball_and_stick("ACE ALA NME") view.add_point("water and .O") viewPandas SeriesSeries :- one-dimensional array of indexed data#Create a series import pandas as pd series_1 = pd.Series([1,2,3,4]) # Left column is index and Right column is Data series_1 series_1.index series_1.values series_1[1] series_1[1:] series_1[:2] # Here Index Assign naming to particulr data series_2 = pd.Series([1,2,3,4], index=['a','b','c','d']) series_2 series_2[1] series_2['b'] #Use Python Dictionary as series Series. # A dictionary is a structure which maps arbitrary keys to a set of arbitrary values, and a series is a structure which # maps typed keys to a set of typed values. #Create a Python Dictionary python_dic = {'Nandan':'Big Data','Kundan':'Manager','Chandan':'Businees Man'} # Assign Dictionary to Series people = pd.Series(python_dic) # Check the Series people #Check type of data object type(people) people['Nandan'] people[2]EKE preprocessing This notebook shows how to generate the EKE_timeseries.nc file# Import libraries import xarray as xr import cmocean as cm import cartopy.crs as ccrs import pylab as plt import numpy as np # Inline plotting %matplotlib inline from dask.distributed import Client from utils import area,ccrs_land,add_patches import datetime c = Client() c files = '/g/data/ua8/CMEMS_SeaLevel/v2019/*/*.nc' # files='path2AVISO+/*.nc' dataset_AVISO = xr.open_mfdataset(files,parallel=True) dataset_AVISOCompute EKE:\begin{equation}EKE = \frac{1}{2} \left(u'^2+v'^2\right)\end{equation}The provided $u'$ and $v'$ from AVISO is an anomaly from 1993-2012. Changing this period does not affect the main result of the study.EKE = 1/2 * (dataset_AVISO.ugosa**2 + dataset_AVISO.vgosa**2)Rechunk datasetEKE_rechunk = EKE.chunk({"latitude": 100, "longitude": 100,'time':357})Coarsen dataset to a 1 degree grid.EKE_coarsen = EKE_rechunk.coarsen({'latitude':4,'longitude':4}).mean()Roll over 12 months:EKE_rolled = EKE_coarsen.rolling(time=365,center=True).mean().compute() # This computation requires ~ 50 GB of RAM, to decrease the usage of RAM, # reduce the size of the chunks. EKE_rolled = EKE_rolled.to_dataset(name="EKE").rename({'latitude':'lat','longitude':'lon'}) EKE_rolledAdd metadataEKE_rolled.attrs['title'] = "Eddy Kinetic Energy" EKE_rolled.attrs['Description'] = """Eddy Kinetic Energy computed from AVISO+ altimetry products.""" EKE_rolled.attrs['Publication'] = "Dataset created for Martínez-Moreno, J. et. al. 2020: \n 'Global changes in oceanic mesoscale currents over the satellite altimetry record'" EKE_rolled.attrs['Author'] = "" EKE_rolled.attrs['Contact'] = "" EKE_rolled.attrs['Created date'] = datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S") counter = 0 units = [r"$m^2s^{-2}$"] names = ["EKE"] long_names = ["Eddy Kinetic Energy"] EKE_rolled['EKE'].attrs['units'] = units[counter] EKE_rolled['EKE'].attrs['name'] = names[counter] EKE_rolled['EKE'].attrs['long_name'] = long_names[counter] EKE_rolled['EKE'].attrs['missing_value'] = np.nan EKE_rolled['EKE'].attrs['valid_min'] = np.nanmin(EKE_rolled['EKE']) EKE_rolled['EKE'].attrs['valid_max'] = np.nanmax(EKE_rolled['EKE']) EKE_rolled['EKE'].attrs['valid_range'] = [np.nanmin(EKE_rolled['EKE']),np.nanmax(EKE_rolled['EKE'])]Store netCDFcomp = dict(zlib=True, complevel=5) encoding = {var: comp for var in EKE_rolled.data_vars} EKE_rolled.to_netcdf('../datasets/EKE_timeseries.nc', encoding=encoding)**Team F5- IPinsights algorithm to detect anomalies from web server logs** Introduction The Amazon sage maker IP insights model captures correlation between Online resources (User Id and or hostnames) and IPv4 addresses by learning vector representations of them using statistical modelling and neural networks. * The Amazon Web Services SDK for Python (Boto3) allows you to establish, configure, and administer AWS services including Amazon Elastic Compute Cloud (Amazon EC2) and Amazon Simple Storage Service (Amazon S3) * The SDK includes both an object-oriented API and low-level access to Amazon Web Services.* The botocore package is the basis for boto3 and the AWS CLI, and it was built with the goal of increasing the use of Python in AWS.#Importing Sagemaker related libraries import boto3 import botocore import os import sagemaker #Importing sagemaker SDK storage = sagemaker.Session().default_bucket() #Our AWS account accesses this default storage S3 bucket main_path = "AWSsagemaker/ipinsights-Team4" #Location in the bucket to store our output and Input execution_role = sagemaker.get_execution_role() #IAM role that grants permissions loc = boto3.Session().region_name #Referring to the region name the session is in # check if the specified S3 bucket is present try: boto3.Session().client("s3").head_bucket(Bucket=storage) except botocore.exceptions.ParamValidationError as e: print("Check the parameter given in S3 bucket" ) except botocore.exceptions.ClientError as e: ec = e.response['Error']['Code'] if ec == "404": print(f"Bucket {storage} doesn't exist") elif ec == "403": print(f"Forbidden to access the bucket, {storage}") else: raise else: print(f"Please find the training output and input stored in the following location: s3://{storage}/{main_path}")Please find the training output and input stored in the following location: s3://sagemaker-(aws region)-(AWS Account ID)/AWSsagemaker/ipinsights-Team4Amazon S3 bucket instance has been created to store the training data and any other object belonging to the trained model.from os import path storage_tools = f"jumpstart-cache-prod-{loc}" #The data generating module is contained in this bucket. path_tools = "1p-algorithms-assets/ip-insights" # The data generating module's prefix s3 = boto3.client("s3") data_gen_file = "generate_data.py" # We can give our own synthetic data generation script here. In this case, #we make use of the readily available script to generate simulated apache web server log data ip2asn_file = "ip2asn-v4-u32.tsv.gz" #IP to ASN database file commonly used in similar IP address related #machine learning #If data generation file is not available, it will download into the given location if not path.exists(data_gen_file): print(f"data genfile path doesnt exist") s3.download_file(storage_tools, f"{path_tools}/{data_gen_file}", data_gen_file) if not path.exists(ip2asn_file): print(f"ip2asn file path doesnt exist") s3.download_file( storage_tools, f"{path_tools}/{ip2asn_file}", ip2asn_file)A dataset that mimics system traffic was created using a python script that generates random Apache web logs.#generating data from generate_data import generate_dataset USER_COUNT = 1000 data_file = "simulated_ipinsights_webserver_traffic.log" generate_dataset(USER_COUNT, data_file)Loaded ASN List: 827696 ASNs. Starting User Activity SimulationWe can see the IP addresses each user has associated with by looking at the sample log output below:# Visualize few log lines !head $data_file192.168.3.11 - user_0 [08/Nov/2018:20:58:23 +0000] "GET /login_success HTTP/1.1" 200 476 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/555.33 (KHTML, like Gecko) Chrome/1.1.1111.100 Safari/555.355" 172.16.58.3 - user_0 [05/Nov/2018:21:19:49 +0000] "GET /login_success HTTP/1.1" 200 476 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/555.33 (KHTML, like Gecko) Chrome/1.1.1111.100 Safari/555.355" 172.16.31.10 - user_0 [08/Nov/2018:07:34:07 +0000] "GET /login_success HTTP/1.1" 200 476 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/555.33 (KHTML, like Gecko) Chrome/1.1.1111.100 Safari/555.355" 172.16.31.10 - user_0 [06/Nov/2018:02:59:27 +0000] "GET /login_success HTTP/1.1" 200 476 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/555.33 (KHTML, like Gecko) Chrome/1.1.1111.100 Safari/555.355" 192.168.3.11 - user_0 [13/Nov/2018:21:41:15 +0000] "GET /login_success HTTP/1.1" 200 476 "-" "Mozilla/5.0 (Macintosh; Intel Mac[...]Now the data generated needs to be transformed in to a way that is feasible to feed it to ip insights.import pandas as pd #reading logs in to the dataframe and adding column names to it data = pd.read_csv( data_file, na_values="-", sep=" ", header=None, names=[ "user_ip_address", #IP address of the user who had accessed the server "rcf_id", "user_id", #ID of the user who had accessed the server "time_stamp", "timezone", "type_of_request", "response_status_code", "object_size", #object size sent to client "referer", "user_agent", ], ) data.head() #size of the dataset data.shape data[["time_stamp"]]To make it easier to sort and analyse the information, we transform the log timestamp strings to Python datetimes.data["time_stamp"] = pd.to_datetime(data["time_stamp"], format="[%d/%b/%Y:%H:%M:%S") #Make sure they're all in the same time zone. count_timezones = len(data["timezone"].unique()) count_timezones from datetime import datetime import pytz def transform_timezone(row): timez = row[1] timez_offset = int(timez[:3]) * 60 # Hour offset timez_offset += int(timez[3:5]) # Minutes offset return row[0].replace(timezinfo=pytz.FixedOffset(timez_offset)) if count_timezones > 1: data["time_stamp"] = data[["time_stamp", "timezone"]].apply(transform_timezone, axis=1)The purpose of this design is to train the IP insights algorithm to predict the anomalous login events by analysing the user login history. 1)To do that, first need to filter the GET requests where success code is 200 to select the successful user logins.data = data[(data["type_of_request"].str.startswith("GET /login_success")) & (data["response_status_code"] == 200)] data = data[["user_id", "user_ip_address", "time_stamp"]] data["time_stamp"].describe(datetime_is_numeric=True)11/04-11/10 marks the end of first 7 days of simulated logs and we will use it as train set. From 11/11 to 11/13 we use it for test set 2) Take the CSV file created and split the dataset into two adjacent sets where one for training and the other set for testing purpose.time_window = ( datetime(2018, 11, 11, timezinfo=pytz.FixedOffset(0)) if count_timezones > 1 else datetime(2018, 11, 11) )**Train-Test split**train_data = data[data["time_stamp"] <= time_window] test_data = data[data["time_stamp"] > time_window]3)Take the training dataset and shuffle it to improve performance of the model then upload the training dataset to S3 bucket storage.# Shuffling train data to improve performance of the model train_data = train_data.sample(frac=1) train_data.head()Store Data on S3# Uploading train dataset as headerless CSV in S3 since IPInsights takes in a headerless CSV as input train_data1 = train_data.to_csv(index=False, header=False, columns=["user_id", "user_ip_address"])Configure the SageMaker IP Insights algorithm parameters and define the computational environment in the model. Then start training the model on training dataset stored in S3 bucket where successful completion of the training would store the output of IP insights model in the specified location.# Upload train data to S3 key training_data1_file = "train.csv" key = os.path.join(main_path, "train", training_data1_file) s3_train_data = f"s3://{storage}/{key}" print(f"Train Data {training_data1_file} uploading to this path: {s3_train_data}") boto3.resource("s3").Bucket(storage).Object(key).put(Body=train_data1) #SageMaker IP Insights Input Channels Configuration input_data = { "train": sagemaker.session.s3_input( s3_train_data, distribution="FullyReplicated", content_type="text/csv" ) } # create a headerless CSV file from the test dataset test_data1 = test_data.to_csv(index=True, header=False, columns=["user_id", "user_ip_address"]) # Using S3 to store test data test_data1_file = "test.csv" key = os.path.join(main_path, "test", test_data1_file) s3_test_data = f"s3://{storage}/{key}" print(f"Test data {test_data1_file} uploading to: {s3_test_data}") boto3.resource("s3").Bucket(storage).Object(key).put(Body=test_data1)Test data test.csv uploading to: s3://sagemaker-(aws region)-(AWS Account ID)/AWSsagemaker/ipinsights-Team4/test/test.csvTrainingfrom sagemaker.amazon.amazon_estimator import get_image_uri uri_image = get_image_uri(boto3.Session().region_name, "ipinsights") #container image used for the purpose of training # Configuration of training job to set up the estimator ip_insights = sagemaker.estimator.Estimator( uri_image, execution_role, instance_count=1, instance_type="ml.p3.2xlarge", output_path=f"s3://{storage}/{main_path}/output", sagemaker_session=sagemaker.Session(), ) # Configure hyperparameters of IPInsights algorithm ip_insights.set_hyperparameters( num_entity_vectors="20000", random_negative_sampling_rate="5", vector_dim="128", mini_batch_size="1000", epochs="5", learning_rate="0.01", ) # Starting the training job ip_insights.fit(input_data)2021-09-29 01:50:16 Starting - Starting the training job... 2021-09-29 01:50:38 Starting - Launching requested ML instancesProfilerReport-1632880215: InProgress ...... 2021-09-29 01:51:40 Starting - Preparing the instances for training......... 2021-09-29 01:53:07 Downloading - Downloading input data 2021-09-29 01:53:07 Training - Downloading the training image...... 2021-09-29 01:54:09 Training - Training image download completed. Training in progress..Docker entrypoint called with argument(s): train Running default environment configuration script [09/29/2021 01:54:13 INFO 140101656037184] Reading default configuration from /opt/amazon/lib/python3.7/site-packages/algorithm/resources/default-input.json: {'batch_metrics_publish_interval': '1000', 'epochs': '10', 'learning_rate': '0.001', 'mini_batch_size': '5000', 'num_entity_vectors': '100000', 'num_ip_encoder_layers': '1', 'random_negative_sampling_rate': '1', 'shuffled_negative_sampling_rate': '1', 'vector_dim[...]The trained model would be deployed to an endpoint to start making predictions on data. This model predicts outputs as a dot product of the learned IP address and the user ID where this output gives the compatibility between the IP address and the user ID. The higher the output value is the more IP address matches with the user ID. But in order to consider a user login as anomalous, a threshold has to be defined. If the dot product is above threshold value, the user login is a suspicious login attempt to the website. The action of defining a good threshold depends on the dataset, the problem specification, or the requirement of the system user. **Deploying to Endpoint**- For performing an inference on the data, we have to deploy the model to an end point which refers to predicting the dot product score/compatibility score if user id and ip address are given- Here we will create an inference endpoint using deploy() function- This inference endpoint will be used to integrate with other services in order to accomplish our goalpredictor = ip_insights.deploy(initial_instance_count=1, instance_type="ml.m5.xlarge") print(f"Endpoint name: {predictor.endpoint}")The endpoint attribute has been renamed in sagemaker>=2. See: https://sagemaker.readthedocs.io/en/stable/v2.html for details.Data Serialization/Deserialization * **Passing csv data to inference endpoint and configuring the inference endpoint using csv_serializer and json_deserializer** *from sagemaker.predictor import csv_serializer, json_deserializer predictor.serializer = csv_serializer predictor.deserializer = json_deserializer- Passing a matrix of inference data to the predictor to predictthe dot product which outlines the amount of compatibility between the online resource and IP Address- A higher dot product value shows the user is more likely to use the identified IP Address.inference_data = [(data[0], data[1]) for data in train_data[:5].values] predictor.predict( inference_data, initial_args={"ContentType": "text/csv", "Accept": "application/json"} ) inference_data = [(data[0], data[1]) for data in test_data[:5].values] predictor.predict( inference_data, initial_args={"ContentType": "text/csv", "Accept": "application/json"} ) inference_data = [(data[0], data[1]) for data in test_data.values] test_scores = predictor.predict( inference_data, initial_args={"ContentType": "text/csv", "Accept": "application/json"} ) test_scores_df = pd.DataFrame(test_scores) # Uploading test score data as headerless CSV in S3 since IPInsights takes in a headerless CSV as input test_scores1 = test_scores_df.to_csv(index=False,header=False) # Upload test data to S3 key test_scores1_file = "test_scores.csv" key = os.path.join(main_path, "test_data_scores", test_scores1_file) s3_test_score_data = f"s3://{storage}/{key}" print(f"Test score data {test_scores1_file} uploading to: {s3_test_score_data}") boto3.resource("s3").Bucket(storage).Object(key).put(Body=test_scores1)Test score data test_scores.csv uploading to: s3://sagemaker-(aws region)-(AWS Account ID)/AWSsagemaker/ipinsights-Team4/test_data_scores/test_scores.csvOnly the dot product between the learnt IP address and the user ID will be reported by the predictor. The dot product summarises the IP address and online resource compatibility. The higher the value, the more probable the IP address will be utilised by the user, according to the algorithm.Because we can specify a threshold for what we consider an abnormal score, this compatibility score is adequate for most applications.predictor.predict( inference_data, initial_args={"ContentType": "text/csv", "Accept": "application/json; verbose=True"}, )The csv_serializer has been renamed in sagemaker>=2. See: https://sagemaker.readthedocs.io/en/stable/v2.html for details. The json_deserializer has been renamed in sagemaker>=2. See: https://sagemaker.readthedocs.io/en/stable/v2.html for details.Compute Anomaly Scores - Since the dot product's range is unbounded, we will have to set a threshold to flag an event as anomolous- In order to detect and compute anomaly score if the dot_product is above the threshold we can flag it as anomolous behavior. In order to do this we will compare the score distributions between known normal and malicious traffic to select a threshold.- However, to select an appropriate threshold, first take the test dataset created before and consider it as the ‘Normal’ traffic. - Then to stimulate an attacker login using a user ID belonging to that website but with a different IP address, take few user IDs from the test dataset, couple it with randomly generated IP addresses and inject them to the test dataset as the ‘Malicious’ traffic. - Then plot the distribution of normal traffic and malicious traffic using the dot product scores generated by the model and select a threshold value that separates the normal traffic and malicious traffic.## Construct 'Normal' Traffic Dataset test_data.head()- Here we will simulate some malicious traffic which replicates a scenario of cyber attack- To simulate a hacker logging into an account with no knowledge of their IP history, we pick a random set of accountsfrom test data and generate random IP addresses. This will make these events malicious## Injection of Malicious Traffic import numpy as np from generate_data import draw_ip def score_ip_insights(predictor, data): def get_score(result): """Return the negative to the dot product of the predictions from the model.""" return [-prediction["dot_product"] for prediction in result["predictions"]] data = data[["user_id", "user_ip_address"]] result = predictor.predict(data.values) return get_score(result) def create_test_case(train_data, test_data, num_samples, attack_freq): # Get as many accounts as you can. Only users who have been seen in training can be predicted by the IP Insights model. # As a result, remove any accounts that have never been viewed from the test sample, as their findings will be meaningless. valid_accounts = set(train_data["user_id"]) valid_test_df = test_data[test_data["user_id"].isin(valid_accounts)] clean_traffic = valid_test_df.sample(num_samples, replace=False) clean_traffic = clean_traffic[["user_id", "user_ip_address"]] clean_traffic["label"] = 0 # Generating anomalous traffic num_malicious_traffic = int(num_samples * attack_freq) malicious_traffic_accounts = np.random.choice( list(valid_accounts), size=num_malicious_traffic, replace=True ) malicious_traffic_ips = [draw_ip() for i in range(num_malicious_traffic)] malicious_traffic = pd.DataFrame({"user_id": malicious_traffic_accounts, "user_ip_address": malicious_traffic_ips}) malicious_traffic["label"] = 1 # All traffic labels are in the format: 0 for legitimate traffic; 1 for malicious traffic. all_traffic = clean_traffic.append(malicious_traffic) return all_traffic NUMBER_OF_SAMPLES = 1000 test_case = create_test_case(train_data, test_data, num_samples=NUMBER_OF_SAMPLES, attack_freq=1) test_case.head() test_case scores_test_case = score_ip_insights(predictor, test_case) scores_test_caseThe csv_serializer has been renamed in sagemaker>=2. See: https://sagemaker.readthedocs.io/en/stable/v2.html for details. The json_deserializer has been renamed in sagemaker>=2. See: https://sagemaker.readthedocs.io/en/stable/v2.html for details.Plot Distribution Let's plot the distribution of scores. This distribution will show us where, we should establish a reasonable threshold.%matplotlib inline import matplotlib.pyplot as plt a, b = np.histogram(scores_test_case[:NUMBER_OF_SAMPLES], bins=100, density=True) plt.plot(b[1:], a) a, b = np.histogram(scores_test_case[NUMBER_OF_SAMPLES:], bins=100, density=True) plt.plot(b[1:], a) plt.legend(["Normal_IP", "Random_IP"]) plt.xlabel("Score from IP Insights") plt.ylabel("Frequency") plt.figure()Threshold value setupWe will select threshold as 0 to catch more true positives based on business impact. We can change it to whatever we want.If the system is more responsive to false positive decisions, the threshold can be set to a value such as 10.0 which detects the login attempt as high suspicious.threshold = 0.0 flag_cases = test_case[np.array(scores_test_case) > threshold] num_flag_cases = len(flag_cases) true_positives = len(flag_cases[flag_cases["label"] == 1]) false_positives = len(flag_cases[flag_cases["label"] == 0]) all_positives = len(test_case.loc[test_case["label"] == 1]) print(f"Threshold that was set: {threshold}") print(f"Number of cases flagged : {num_flag_cases}") print(f"Flagged True positive cases : {true_positives}") print(f"True Positive Rate: {true_positives / float(num_flag_cases)}") print(f"Recall: {true_positives / float(all_positives)}") print(f"Precision: {true_positives / float(num_flag_cases)}")Threshold that was set: 0.0 Number of cases flagged : 1019 Flagged True positive cases : 984 True Positive Rate: 0.9656526005888125 Recall: 0.984 Precision: 0.9656526005888125**Amazon Sagemaker Automatic Model Tuning** - To automate finding the process of best hyperparameters, we can use Amazon Sagemaker Automatic Model Tuning.- If we get to use the same test dataset for best model selection, we might end up being biased.- The test data is split as validation dataset and test dataset where model selection is done using validation dataset.#Splitting test set and validation set validation_split = ( datetime(2018, 11, 13, tzinfo=pytz.FixedOffset(0)) if count_timezones > 1 else datetime(2018, 11, 13) ) validation_df = test_data[test_data["time_stamp"] < validation_split] test_data = test_data[test_data["time_stamp"] >= validation_split] valid_final_data = validation_df.to_csv(index=False, header=False, columns=["user_id", "user_ip_address"]) # Data should be uploaded to an S3 key. valdata_csvfile = "valid.csv" key = os.path.join(main_path, "validation", valdata_csvfile) boto3.resource("s3").Bucket(storage).Object(key).put(Body=valid_final_data) s3_valid_final_data = f"s3://{storage}/{key}" print(f"Uploaded the final validation data in S3 to: {s3_valid_final_data}") # Configuring validation input channels for IP Insights input_data = {"train": s3_train_data, "validation": s3_valid_final_data} from sagemaker.tuner import HyperparameterTuner, IntegerParameter # Configuring autotuner HyperparameterTuner ipins_autotuner = HyperparameterTuner( estimator=ip_insights, # previously-configured Estimator object objective_metric_name="validation:discriminator_auc", hyperparameter_ranges={"vector_dim": IntegerParameter(64, 1024)}, max_jobs=4, max_parallel_jobs=2, ) # Begin the hyperparameter tweaking process. ipins_autotuner.fit(input_data, include_cls_metadata=False) ipins_autotuner.wait() # This code makes the auto tuner to Wait for all jobs to complete # Visualize the outcomes of training ipins_autotuner.analytics().dataframe() # Deploying best model selected by autotuner best_autotuned_predictor = ipins_autotuner.deploy( initial_instance_count=1, instance_type="ml.m4.xlarge", serializer=csv_serializer, deserializer=json_deserializer, ) # prediction based on the SageMaker endpoint best_autotuned_predictor.predict( inference_data, initial_args={"ContentType": "text/csv", "Accept": "application/json"} )The csv_serializer has been renamed in sagemaker>=2. See: https://sagemaker.readthedocs.io/en/stable/v2.html for details. The json_deserializer has been renamed in sagemaker>=2. See: https://sagemaker.readthedocs.io/en/stable/v2.html for details.From the training task, we ought to have the top performing model.Just the same as we did with the inference endpoint above, we now can set thresholds and predict outcomes.We'll be allowed to use this better model to set a threshold and make predictions based on an input. **BATCH TRANSFORM** - Batch transforms removes noise and bias that comes with any inference or training from our data- Inferences from large datasets like log files are obtained from Batch transform - We wil be taking the existing training job and evaluate it on the validation data in S3 bucketbatch_transf = ip_insights.transformer(instance_count=1, instance_type="ml.m5.xlarge") batch_transf.transform(s3_valid_final_data, content_type="text/csv", split_type="Line") # Wait for the Transform Job to complete batch_transf.wait() print(f"O/P for batch transform is in this path {batch_transf.output_path}")O/P for batch transform is in this path s3://sagemaker-(aws region)-(AWS Account ID)/ipinsights-2021-09-29-02-27-55-552Data Import and Preprocessingtrain = pd.read_csv("/kaggle/input/Kannada-MNIST/train.csv") train.head() test = pd.read_csv("/kaggle/input/Kannada-MNIST/Dig-MNIST.csv") test.head() y_train = train.iloc[:,0].to_numpy() # converts label column to numpy array x_train = train.iloc[:,1:].to_numpy() # converts data feature columns to numpy array y_test = test.iloc[:,0].to_numpy() x_test = test.iloc[:,1:].to_numpy() x_train.shape import matplotlib.pyplot as plt one_image = x_train[50] plt.imshow(one_image.reshape(28,28), cmap= "gray") #re-map 1d array to 2d array to draw image y_train[50] x_train = x_train/255 x_train - x_train.astype("float32") x_test = x_test/255 x_test - x_test.astype("float32")**Baseline Model**import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense # create baseline model baseline = Sequential([ Dense(10, input_shape = (784, ), activation = "softmax") ]) # compile model baseline.compile(loss = "sparse_categorical_crossentropy", # loss function = sparse categorical crossentropy optimizer = "adam", metrics = ["accuracy"]) # train model history = baseline.fit(x_train, y_train, epochs = 10, validation_split = 0.1) # 10% data for validation # check overfitting by plotting loss curvers train_accuracy = history.history["accuracy"] val_accuracy = history.history["val_accuracy"] train_loss = history.history["loss"] val_loss = history.history["val_loss"] epochs = range(len(train_accuracy)) plt.plot(epochs, train_accuracy, '-b') plt.plot(epochs, val_accuracy, '-g') plt.title("Accuracy Plot") plt.figure() plt.plot(epochs, train_loss, '-b') plt.plot(epochs, val_loss, '-g') plt.title("Loss Plot") plt.figure()Convolutional Neural Network (CNN)# reshape data into 2 dimensional x_train_2d = x_train.reshape(60000, 28, 28, 1) # reshape(# samples, height of sample, width of sample, channel dimension) x_test_2d = x_test.reshape(-1, 28, 28, 1) # -1 means infer dimension from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten from tensorflow.keras.callbacks import EarlyStopping es = EarlyStopping(patience=3) # stops training when validation loss stops improving (over 3 epochs) cnn = Sequential([ Conv2D(filters = 32, kernel_size = (3,3), input_shape = (28,28,1), padding="same", activation = "relu"), # hyperparameters - can be tuned! MaxPooling2D(), Conv2D(filters = 64, kernel_size = (3,3), padding="same", activation = "relu"), # add another convolutional layer - another hyperparameter MaxPooling2D(), Flatten(), # flatten object from 2d to 1d Dense(256, activation = "relu"), Dense(10, activation = "softmax") ]) cnn.compile(loss="sparse_categorical_crossentropy", optimizer = "adam", metrics = ["accuracy"]) history = cnn.fit(x_train_2d,y_train,epochs = 20, validation_split = 0.1)# ,callbacks = [es]) cnn.evaluate(x_test_2d,y_test) #submit to kaggle compeition sample_submit = pd.read_csv("/kaggle/input/Kannada-MNIST/sample_submission.csv") real_test = pd.read_csv("/kaggle/input/Kannada-MNIST/test.csv") sample_submit.head() real_test.head() real_test = real_test.iloc[:, 1:].to_numpy() real_test = real_test/255 real_test = real_test.astype("float32") real_test = real_test.reshape(-1, 28,28,1) predictions = cnn.predict(real_test) predictions[:3]Reading the images into a numpy arrayimport glob negative_image = glob.glob('/content/Negative/*') positive_image = glob.glob('/content/Positive/*') negative_image[0] from IPython.display import Image from keras.preprocessing.image import load_img import warnings from keras.preprocessing.image import img_to_array import numpy as np import pandas as pd import numpy as np import cv2 import matplotlib.pyplot as plt from google.colab.patches import cv2_imshow image = load_img(positive_image[0],color_mode = 'grayscale', target_size=(50,50)) print(type(image)) print(image.format) print(image.mode) print(image.size) display(image) from keras.preprocessing.image import load_img import warnings from keras.preprocessing.image import img_to_array import numpy as np import pandas as pd class MissingData(Exception): def __init(self,message): self.message = message super().__init__(self.message) def images_to_dataframe(image_path_list): if len(image_path_list)==0 or type(image_path_list) == "undefined": raise MissingData('\nException: \nMissing Image List:\nEmpty list \ is passed or no list passed.') else: parsed_image = [] for i in image_path_list: image = load_img(i,color_mode = 'grayscale', target_size=(50,50)) image_array = img_to_array(image) / 255. onedarr = np.ravel(image_array) parsed_image.append(onedarr) if len(parsed_image)>0: df = pd.DataFrame(parsed_image) else: raise Exception('Exception: \nParsed Image List is Empty.') return df from keras.preprocessing.image import load_img import warnings from keras.preprocessing.image import img_to_array import numpy as np import pandas as pd class MissingData(Exception): def __init(self,message): self.message = message super().__init__(self.message) def images_to_dataframe1(image_path_list): if len(image_path_list)==0 or type(image_path_list) == "undefined": raise MissingData('\nException: \nMissing Image List:\nEmpty list \ is passed or no list passed.') else: parsed_image = [] for i in image_path_list: img0 = cv2.imread(i) # converting to gray scale gray = cv2.cvtColor(img0, cv2.COLOR_BGR2GRAY) # remove noise #img = cv2.GaussianBlur(gray,(3,3),0) #laplacian = cv2.Laplacian(gray,cv2.CV_64F) #sobelx = cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=5) sobely = cv2.Sobel(gray,cv2.CV_64F,0,1,ksize=5) # image_gaus = cv2.GaussianBlur(image,(3,3),0) img1 = cv2.resize(sobely, (50,50), interpolation = cv2.INTER_AREA) img2 = img1/255. onedarr = np.ravel(img2) parsed_image.append(onedarr) if len(parsed_image)>0: df = pd.DataFrame(parsed_image) else: raise Exception('Exception: \nParsed Image List is Empty.') return df negative_df2 = images_to_dataframe1(negative_image) negative_df2 positive_df2 = images_to_dataframe1(positive_image) positive_df2 negative_df2['target'] = [0]*20000 positive_df2['target'] = [1]*20000 data2 = pd.concat([negative_df2,positive_df2]) data2.to_csv('sobely.csv') !mv '/content/sobely.csv' '/content/drive/My Drive' data2.info() #images_to_dataframe(negative_image[1:100]).to_csv('negative.csv',mode='a') negative_df = images_to_dataframe(negative_image) negative_df['target'] = [0]*20000 positive_df = images_to_dataframe(positive_image) positive_df['target'] = [1]*20000 data = pd.concat([negative_df,positive_df]) data.to_csv('data.csv') !mv '/content/data.csv' '/content/drive/My Drive' data.info() ksize = 5 #Use size that makes sense to the image and fetaure size. Large may not be good. #On the synthetic image it is clear how ksize affects imgae (try 5 and 50) sigma = 0.1 #Large sigma on small features will fully miss the features. theta = 3*np.pi/4 #/4 shows horizontal 3/4 shows other horizontal. Try other contributions lamda = 1*np.pi /4 #1/4 works best for angled. gamma=0.2 #Value of 1 defines spherical. Calue close to 0 has high aspect ratio #Value of 1, spherical may not be ideal as it picks up features from other regions. phi = 0 #Phase offset. I leave it to 0. kernel = cv2.getGaborKernel((ksize, ksize), sigma, theta, lamda, gamma, phi, ktype=cv2.CV_32F) img = cv2.imread(positive_image[0]) #img = cv2.imread('BSE_Image.jpg') img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) fimg = cv2.filter2D(img, cv2.CV_8UC3, kernel) kernel_resized = cv2.resize(kernel, (400, 400)) # Resize image cv2_imshow(kernel_resized) cv2_imshow(img) cv2_imshow(fimg) import cv2 import numpy as np from matplotlib import pyplot as plt # loading image #img0 = cv2.imread('SanFrancisco.jpg',) img0 = cv2.imread(positive_image[0]) # converting to gray scale gray = cv2.cvtColor(img0, cv2.COLOR_BGR2GRAY) # remove noise img = cv2.GaussianBlur(gray,(3,3),0) # convolute with proper kernels laplacian = cv2.Laplacian(img,cv2.CV_64F) sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5) # x sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=5) # y canny_edges = cv2.Canny(img,100,200) # plt.subplot(3,2,1),plt.imshow(img,cmap = 'gray') # plt.title('Original'), plt.xticks([]), plt.yticks([]) # plt.subplot(3,2,2),plt.imshow(laplacian,cmap = 'gray') # plt.title('Laplacian'), plt.xticks([]), plt.yticks([]) # plt.subplot(3,2,3),plt.imshow(sobelx,cmap = 'gray') # plt.title('Sobel X'), plt.xticks([]), plt.yticks([]) # plt.subplot(3,2,4),plt.imshow(sobely,cmap = 'gray') # plt.title('Sobel Y'), plt.xticks([]), plt.yticks([]) # plt.subplot(3,2,5),plt.imshow(canny_edges,cmap = 'gray') # plt.title('Canny Edge'), plt.xticks([]), plt.yticks([]) plt.imshow(canny_edges,cmap="gray") plt.show()The goal is to build a network to classify Reuters newswires into 46 mutually exclusive topics.Reuters dataset is a set of short newswires and their topics, publishedby Reuters in 1986. It’s a simple, widely used toy dataset for text classification. Thereare 46 different topics; some topics are more represented than others, but each topichas at least 10 examples in the training set.Reuters dataset comes packaged as part of Keras.# Load Dataset from keras.datasets import reuters from keras.utils.np_utils import to_categorical from keras import models from keras import layers import numpy as np import pandas as pdThe argument`num_words=10000` restricts the data to the10,000 most frequently occurring words found in the data.# Separate Training and Testing data (train_data, train_labels), (test_data, test_labels) = reuters.load_data(num_words=10000) print("Training Data : ",len(train_data)) print( "Testing Data : ", len(test_data))Training Data : 8982 Testing Data : 2246Decoding data back to English wordsword_index = reuters.get_word_index() reversed_word_index = dict([(value, key) for (key, value) in word_index.items()]) decoded_newswire = ' '.join([reversed_word_index.get(i-3,'?') for i in train_data[0]]) train_labels[19]Vectorizing the Training datadef vectorize_sequences(sequences, dimension = 10000): results = np.zeros((len(sequences), dimension)) for i, sequence in enumerate(sequences): results[i, sequence] = 1. return results x_train = vectorize_sequences(train_data) x_test = vectorize_sequences(test_data)Vectorizing the Labelsone_hot_train_labels = to_categorical(train_labels) one_hot_test_labels = to_categorical(test_labels) # Designing our network model = models.Sequential() model.add(layers.Dense(64, activation='relu', input_shape=(10000,))) model.add(layers.Dense(64, activation='relu')) model.add(layers.Dense(46, activation='softmax')) # Compiling the model model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) # Separating data for Validation x_val = x_train[:1000] partial_x_train = x_train[1000:] y_val = one_hot_train_labels[:1000] partial_y_train = one_hot_train_labels[1000:] # Training our network history = model.fit(partial_x_train, partial_y_train, epochs=20, batch_size=512, validation_data=(x_val, y_val))Epoch 1/20 16/16 [==============================] - 2s 57ms/step - loss: 3.0821 - accuracy: 0.4068 - val_loss: 1.6841 - val_accuracy: 0.6140 Epoch 2/20 16/16 [==============================] - 1s 40ms/step - loss: 1.4820 - accuracy: 0.6804 - val_loss: 1.2716 - val_accuracy: 0.7270 Epoch 3/20 16/16 [==============================] - 1s 40ms/step - loss: 1.0649 - accuracy: 0.7779 - val_loss: 1.1175 - val_accuracy: 0.7590 Epoch 4/20 16/16 [==============================] - 1s 41ms/step - loss: 0.8429 - accuracy: 0.8190 - val_loss: 1.0457 - val_accuracy: 0.7670 Epoch 5/20 16/16 [==============================] - 1s 40ms/step - loss: 0.6753 - accuracy: 0.8559 - val_loss: 0.9677 - val_accuracy: 0.8040 Epoch 6/20 16/16 [==============================] - 1s 39ms/step - loss: 0.5293 - accuracy: 0.8939 - val_loss: 0.9277 - val_accuracy: 0.8120 Epoch 7/20 16/16 [==============================] - 1s 40ms/step - loss: 0.4313 - accuracy: 0.9105 - val_loss: 0.9014 - val_accuracy: 0.8150 Epoch 8/20 16[...]Plotting the training and validation lossimport matplotlib.pyplot as plt loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1, len(loss) + 1) plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.show()Plotting the training and validation accuracyplt.clf() acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.show()The network begins to overfit after nine epochs. Let’s train a new network fromscratch for nine epochs and then evaluate it on the test set.model = models.Sequential() model.add(layers.Dense(64, activation='relu', input_shape=(10000,))) model.add(layers.Dense(64, activation='relu')) model.add(layers.Dense(46, activation='softmax')) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) model.fit(partial_x_train, partial_y_train, epochs=9, batch_size=512, validation_data=(x_val, y_val)) results = model.evaluate(x_test, one_hot_test_labels)Epoch 1/9 16/16 [==============================] - 1s 50ms/step - loss: 3.0711 - accuracy: 0.4274 - val_loss: 1.7200 - val_accuracy: 0.6370 Epoch 2/9 16/16 [==============================] - 1s 41ms/step - loss: 1.4693 - accuracy: 0.6938 - val_loss: 1.3193 - val_accuracy: 0.7210 Epoch 3/9 16/16 [==============================] - 1s 40ms/step - loss: 1.0723 - accuracy: 0.7734 - val_loss: 1.1526 - val_accuracy: 0.7520 Epoch 4/9 16/16 [==============================] - 1s 39ms/step - loss: 0.8426 - accuracy: 0.8245 - val_loss: 1.0421 - val_accuracy: 0.7900 Epoch 5/9 16/16 [==============================] - 1s 40ms/step - loss: 0.6627 - accuracy: 0.8616 - val_loss: 0.9773 - val_accuracy: 0.8020 Epoch 6/9 16/16 [==============================] - 1s 41ms/step - loss: 0.5263 - accuracy: 0.8937 - val_loss: 0.9353 - val_accuracy: 0.8180 Epoch 7/9 16/16 [==============================] - 1s 40ms/step - loss: 0.4329 - accuracy: 0.9112 - val_loss: 0.9277 - val_accuracy: 0.8070 Epoch 8/9 16/16 [===[...]Playing with the hidden layersmodel = models.Sequential() model.add(layers.Dense(128, activation='relu', input_shape=(10000,))) model.add(layers.Dense(128, activation='relu')) model.add(layers.Dense(128, activation='relu')) model.add(layers.Dense(46, activation='softmax')) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) model.fit(partial_x_train, partial_y_train, epochs=9, batch_size=512, validation_data=(x_val, y_val)) results = model.evaluate(x_test, one_hot_test_labels)Epoch 1/9 16/16 [==============================] - 3s 116ms/step - loss: 2.7897 - accuracy: 0.3731 - val_loss: 1.4117 - val_accuracy: 0.6960 Epoch 2/9 16/16 [==============================] - 2s 106ms/step - loss: 1.2396 - accuracy: 0.7166 - val_loss: 1.1330 - val_accuracy: 0.7400 Epoch 3/9 16/16 [==============================] - 2s 101ms/step - loss: 0.8636 - accuracy: 0.8037 - val_loss: 1.0195 - val_accuracy: 0.7800 Epoch 4/9 16/16 [==============================] - 2s 113ms/step - loss: 0.5991 - accuracy: 0.8677 - val_loss: 1.1781 - val_accuracy: 0.7390 Epoch 5/9 16/16 [==============================] - 2s 110ms/step - loss: 0.5061 - accuracy: 0.8853 - val_loss: 1.0225 - val_accuracy: 0.7840 Epoch 6/9 16/16 [==============================] - 2s 112ms/step - loss: 0.3313 - accuracy: 0.9282 - val_loss: 0.9561 - val_accuracy: 0.8230 Epoch 7/9 16/16 [==============================] - 2s 120ms/step - loss: 0.2369 - accuracy: 0.9437 - val_loss: 1.0429 - val_accuracy: 0.7930 Epoch 8/9 16/[...]Topic Modelling with MLlibAuthor: Date: 2016/04/10 In this notebook we will explore the utilitis for Topic Modelling available on MLlib.%matplotlib inline import matplotlib.pyplot as plt import pylab # Required imports from wikitools import wiki from wikitools import category # import nltk import nltk from nltk.tokenize import word_tokenize from nltk.stem import WordNetLemmatizer from nltk.corpus import stopwords from test_helper import Test import collections from pyspark.mllib.clustering import LDA, LDAModel from pyspark.mllib.linalg import Vectors # import gensim # import numpy as np # import lda # import lda.datasets1. Corpus acquisition.In this notebook we will explore some tools for text processing and analysis and two topic modeling algorithms available from Python toolboxes.To do so, we will explore and analyze collections of Wikipedia articles from a given category, using `wikitools`, that makes easy the capture of content from wikimedia sites.(*As a side note, there are many other available text collections to test topic modelling algorithm. In particular, the NLTK library has many examples, that can explore them using the `nltk.download()` tool*. import nltk nltk.download()*for instance, you can take the gutemberg dataset* Mycorpus = nltk.corpus.gutenberg text_name = Mycorpus.fileids()[0] raw = Mycorpus.raw(text_name) Words = Mycorpus.words(text_name)*Also, tools like Gensim or Sci-kit learn include text databases to work with*).In order to use Wikipedia data, we will select a single category of articles:site = wiki.Wiki("https://en.wikipedia.org/w/api.php") # Select a category with a reasonable number of articles (>100) cat = "Economics" # cat = "Pseudoscience" print catEconomicsYou can try with any other categories. Take into account that the behavior of topic modelling algorithms may depend on the amount of documents available for the analysis. Select a category with at least 100 articles. You can browse the wikipedia category tree here, https://en.wikipedia.org/wiki/Category:Contents, for instance.We start downloading the text collection.# Loading category data. This may take a while print "Loading category data. This may take a while..." cat_data = category.Category(site, cat) corpus_titles = [] corpus_text = [] for n, page in enumerate(cat_data.getAllMembersGen()): print "\r Loading article {0}".format(n + 1), corpus_titles.append(page.title) corpus_text.append(page.getWikiText()) n_art = len(corpus_titles) print "\nLoaded " + str(n_art) + " articles from category " + catLoading category data. This may take a while... Loading article 316 Loaded 316 articles from category EconomicsNow, we have stored the whole text collection in two lists:* `corpus_titles`, which contains the titles of the selected articles* `corpus_text`, with the text content of the selected wikipedia articlesYou can browse the content of the wikipedia articles to get some intuition about the kind of documents that will be processed.# n = 5 # print corpus_titles[n] # print corpus_text[n]Now, we will load the text collection into an RDDcorpusRDD = sc.parallelize(corpus_text, 4) print "\nRDD created with {0} elements".format(corpusRDD.count()) Test.assertTrue(corpusRDD.count() >= 100, "Your corpus_tokens has less than 100 articles. Consider using a larger dataset")1 test passed.2. Corpus ProcessingTopic modelling algorithms process vectorized data. In order to apply them, we need to transform the raw text input data into a vector representation. To do so, we will remove irrelevant information from the text data and preserve as much relevant information as possible to capture the semantic content in the document collection.Thus, we will proceed with the following steps:1. Tokenization2. Homogeneization3. Cleaning4. VectorizationThe first three steps are independent for each document, so they can be parallelized. 2.1. Tokenization, Homogeneization and Cleaning.For the first steps, we will use some of the powerfull methods available from the [Natural Language Toolkit](http://www.nltk.org). In order to use the `word_tokenize` method from nltk, you might need to get the appropriate libraries using `nltk.download()`. You must select option "d) Download", and identifier "punkt"# You can comment this if the package is already available. # Select option "d) Download", and identifier "punkt" # nltk.download()Also, we need to load a list of english stopwords. Select now identifier "stopwords"# You can comment this if the package is already available. # Select option "d) Download", and identifier "stopwords" # nltk.download()You can check the stopword list. This is a standard python list of strings. We could modify it by removing words or adding new ones if required.stopwords_en = stopwords.words('english') print "The stopword list contains {0} elements: ".format(len(stopwords_en)) print stopwords_enThe stopword list contains 153 elements: [u'i', u'me', u'my', u'myself', u'we', u'our', u'ours', u'ourselves', u'you', u'your', u'yours', u'yourself', u'yourselves', u'he', u'him', u'his', u'himself', u'she', u'her', u'hers', u'herself', u'it', u'its', u'itself', u'they', u'them', u'their', u'theirs', u'themselves', u'what', u'which', u'who', u'whom', u'this', u'that', u'these', u'those', u'am', u'is', u'are', u'was', u'were', u'be', u'been', u'being', u'have', u'has', u'had', u'having', u'do', u'does', u'did', u'doing', u'a', u'an', u'the', u'and', u'but', u'if', u'or', u'because', u'as', u'until', u'while', u'of', u'at', u'by', u'for', u'with', u'about', u'against', u'between', u'into', u'through', u'during', u'before', u'after', u'above', u'below', u'to', u'from', u'up', u'down', u'in', u'out', u'on', u'off', u'over', u'under', u'again', u'further', u'then', u'once', u'here', u'there', u'when', u'where', u'why', u'how', u'all', u'any', u'both', u'each', u'few', u'more', u'most', u'[...]**Task**: Create a method `getTokenList` with two inputs: a document (string) and a stopword list, and completes the first three steps of the corpus processing, as follows:1. Tokenization: convert string to `utf-8` and transform the string into a list of tokens, using `word_tokenize` from `nltk.tokenize`.2. Homogeneization: transform capital letters to lowercase and remove non alphanumeric tokens.3. Cleaning: remove stopwordsReturn the result of cleaning (a list of tokens).def getTokenList(doc, stopwords_en): # scode: tokens = # Tokenize docs tokens = word_tokenize(doc.decode('utf-8')) # scode: tokens = # Remove non-alphanumeric tokens and normalize to lowercase tokens = [t.lower() for t in tokens if t.isalnum()] # scode: tokens = # Remove stopwords tokens = [t for t in tokens if t not in stopwords_en] return tokens Test.assertEquals(getTokenList('The rain in spain stays mainly in the plane', stopwords_en), [u'rain', u'spain', u'stays', u'mainly', u'plane'], 'getTokenList does not return the expected results')1 test passed.**Task**: Apply `getTokenList` to all documents in the corpus and save the result in a `corpus_tokensRDD`# scode: corpus_tokensRDD = corpus_tokensRDD = (corpusRDD .map(lambda x: getTokenList(x, stopwords_en)) .cache()) # print "\n Let's check tokens after cleaning:" print corpus_tokensRDD.take(1)[0][0:30] Test.assertEquals(corpus_tokensRDD.count(), n_art, "The number of documents in the original set does not correspond to the size of corpus_tokensRDD") Test.assertTrue(all([c==c.lower() for c in corpus_tokensRDD.take(1)[0]]), 'Capital letters have not been removed') Test.assertTrue(all([c.isalnum() for c in corpus_tokensRDD.take(1)[0]]), 'Non alphanumeric characters have not been removed') Test.assertTrue(len([c for c in corpus_tokensRDD.take(1)[0] if c in stopwords_en])==0, 'Stopwords have not been removed')1 test passed. 1 test passed. 1 test passed. 1 test passed.2.2. Stemming / LemmatizationNow we will apply stemming and lemmatization to `corpus_tokensRDD`. We will test our topic models over the resulting RDDs, to test their differences. **Task**: Apply stemming to all documents `corpus_tokensRDD` and save the result in a new RDD, `corpus_stemmedRDD`.# Select stemmer. stemmer = nltk.stem.SnowballStemmer('english') # scode: corpus_stemRDD = corpus_stemRDD = corpus_tokensRDD.map(lambda x: [stemmer.stem(token) for token in x]) print "\nLet's check the first tokens from document 0 after stemming:" print corpus_stemRDD.take(1)[0][0:30] Test.assertTrue((len([c for c in corpus_stemRDD.take(1)[0] if c!=stemmer.stem(c)]) < 0.1*len(corpus_stemRDD.take(1)[0])), 'It seems that stemming has not been applied properly')1 test passed.Alternatively, we can apply lemmatization. For english texts, we can use the lemmatizer from NLTK, which is based on [WordNet](http://wordnet.princeton.edu). If you have not used wordnet before, you will likely need to download it from nltk# You can comment this if the package is already available. # Select option "d) Download", and identifier "wordnet" # nltk.download()**Task**: Lemmatize all documents `corpus_tokensRDD` using the .lemmatize() method, from the WordNetLemmatizer object created in the first line and save the result in a new RDD, `corpus_lemRDD`.wnl = WordNetLemmatizer() # scode: corpus_lemmatRDD = corpus_lemmatRDD = (corpus_tokensRDD .map(lambda x: [wnl.lemmatize(token) for token in x])) print "\nLet's check the first tokens from document 0 after stemming:" print corpus_lemmatRDD.take(1)[0][0:30]Let's check the first tokens from document 0 after stemming: [u'social', u'disambiguation', u'theory', u'journal', u'economics', u'economics', u'sidebar', u'economics', u'social', u'science', u'describes', u'factor', u'determine', u'production', u'economics', u'distribution', u'economics', u'consumption', u'economics', u'good', u'service', u'term', u'come', u'ancient', u'greek', u'wikt', u'wikt', u'house', u'wikt', u'custom']One of the advantages of the lemmatizer method is that the result of lemmatization is still a true word, which is more advisable for the presentation of text processing results and lemmatization.However, without using contextual information, lemmatize() does not remove grammatical differences. This is the reason why "is" or "are" are preserved and not replaced by infinitive "be".As an alternative, we can apply `.lemmatize(word, pos)`, where 'pos' is a string code specifying the part-of-speech (pos), i.e. the grammatical role of the words in its sentence. For instance, you can check the difference between `wnl.lemmatize('is')` and `wnl.lemmatize('is, pos='v')`. 2.4. VectorizationUp to this point, we have transformed the raw text collection of articles in a list of articles, where each article is a collection of the word roots that are most relevant for semantic analysis. Now, we need to convert these data (a list of token lists) into a numerical representation (a list of vectors, or a matrix). 2.4.1. Word CountAs a first step, we compute the word count for every document in the corpus. **Task**: Compute a new RDD from `corpus_stemRDD` where each element is a list of tuples related to a document. The key of each tuple is a token, and its value the number of occurrences of this token in the document. To do so, you can use method `Counter` from `collections`.# corpus_wcRDD = corpus_wcRDD = (corpus_stemRDD .map(collections.Counter) .map(lambda x: [(t, x[t]) for t in x])) print corpus_wcRDD.take(1)[0][0:30] Test.assertTrue(corpus_wcRDD.count() == n_art, 'List corpus_clean does not contain the expected number of articles') Test.assertTrue(corpus_wcRDD.flatMap(lambda x: x).map(lambda x: x[1]).sum()== corpus_stemRDD.map(len).sum(), 'The total token count in the output RDD is not consistent with the total number of input tokens')1 test passed. 1 test passed.At this point, we have got a representation of documents as list of tuples `(token, word_count)` in `corpus_wcRDD`. From this RDD, we can compute a dictionary containing all tokens in the corpus as keys, and their respective number of occurrences as values. **Task**: Using `corpus_wcRDD` compute a new RDD of `(key, value)` pairs, where keys are the tokens in the whole corpus and their respective values are the total number of occurences in the corpus.# scode: wcRDD = < FILL IN > wcRDD = (corpus_wcRDD .flatMap(lambda x: x) .reduceByKey(lambda x, y: x + y)) print wcRDD.take(30)[(u'entropi', 1), (u'four', 57), (u'catch', 2), (u'roadblock', 1), (u'fredric', 1), (u'theodor', 5), (u'kerala', 4), (u'crete', 1), (u'freshwat', 1), (u'cyprus', 1), (u'fudg', 1), (u'introd', 2), (u'categori', 1136), (u'stationeri', 1), (u'160', 4), (u'tobia', 1), (u'charnley', 1), (u'groundwork', 2), (u'340', 1), (u'rickman', 1), (u'pamela', 1), (u'lautenbachsch', 1), (u'lora', 1), (u'merchant', 34), (u'20antiqu', 1), (u'everi', 83), (u'scientia', 1), (u'hough', 1), (u'distortionari', 1), (u'escudero', 3)]**Task**: Take all tuples in `wcRDD` in decreasing order of the number of token counts in variable `TD` and compute two lists: 1. `token_count`: a list of token counts, in decreasing order.2. `D`: A list of tokens, in the same order.# Token Dictionary: n_tokens = wcRDD.count() # scode: TD = wcRDD. TD = wcRDD.takeOrdered(n_tokens, lambda x: -x[1]) # scode: D = # Extract tokens from TD D = map(lambda x: x[0], TD) # scode: token_count = # Extract token counts from TD token_count = map(lambda x: x[1], TD) # ALTERNATIVELY: TD_RDD = wcRDD.sortBy(lambda x: -x[1]) D_RDD = TD_RDD.map(lambda x: x[0]) token_countRDD = TD_RDD.map(lambda x: x[1]) print TDWe can visualize the token distribution using `D` and `token_count`, for the most frequent terms# SORTED TOKEN FREQUENCIES (II): # plt.rcdefaults() # Example data n_bins = 25 y_pos = range(n_bins-1, -1, -1) hot_tokens = D[0:n_bins] z = [float(t)/n_art for t in token_count[0:n_bins]] plt.barh(y_pos, z, align='center', alpha=0.4) plt.yticks(y_pos, hot_tokens) plt.xlabel('Average number of occurrences per article') plt.title('Token distribution') plt.show()3. Latent Dirichlet AllocationIn order to apply the LDA algorithm, we need to represent the input documents in the format required by MLlib. More specifically. The input data should be an RDD where each element is a tuple (doc_id, vector)where `doc_id` is an integer document identifier, and `vector` can be a sparse or dense vector from class `Vectors`. We will use sparse vectors, which are more adequate for large vocabularies. To compute the sparse vectors, we must first transform the lists of tuples `(token, value)` in `wcRDD` into a lists of `(token_id, value)`, pairs, thus replacing each token by a numerical identifier.We will proceed in two steps:1. Compute an inverse dictionary, `invD`, transforming tokens into numbers.2. Apply the inverse dictionary to compute a new RDD from `wcRDD` replacing each token by its `token_id`. [** Task**: complete the two steps outlined above.# INDICE INVERTIDO: EJEMPLO: # D = ['token1', 'token2', 'token3', 'token4'] # D[1] = 'token2' # invD = {'token1': 0, 'token2': 1, 'token3': 2, 'token4': 3} # invD['token2'] = 1 # Compute inverse dictionary # scode: invD = invD = dict(zip(D, xrange(n_tokens))) ### ALTERNATIVELY: # invD_RDD = D_RDD.zipWithIndex() ### Tuples (token, index) # Compute RDD replacing tokens by token_ids # scode: corpus_sparseRDD = corpus_sparseRDD = corpus_wcRDD.map(lambda x: [(invD[t[0]], t[1]) for t in x]) # Convert list of tuplas into Vectors.sparse object. corpus_sparseRDD = corpus_sparseRDD.map(lambda x: Vectors.sparse(n_tokens, x))The only remaining step consists on adding an identifier to each document of the corpus.**Task**: Apply method `zipWithIndex` to `corpus_sparseRDD` in order to add consecutive integer identifiers to all documents in the corpus.corpus4lda = corpus_sparseRDD.zipWithIndex().map(lambda x: [x[1], x[0]]).cache()That's all. We can already call to the lda algorithm.'**Task**: Train an LDA model with 3 topics and the corpus obtained in `corpus4lda`. Check the [LDA documentation](http://spark.apache.org/docs/latest/mllib-clustering.htmllatent-dirichlet-allocation-lda) to find the appropriate command.print "Training LDA: this might take a while..." # scode: ldaModel = LDA. ldaModel = LDA.train(corpus4lda, k=3)Training LDA: this might take a while...The whole topics matrix can be computed using the `.topicsMatrix()` method.# Output topics. Each is a distribution over words (matching word count vectors) print("Learned topics (as distributions over vocab of " + str(ldaModel.vocabSize()) + " words):") topics = ldaModel.topicsMatrix()Learned topics (as distributions over vocab of 14905 words):Alternatively, we can use the `.describeTopics` method that returns the most relevan terms for each topic, and it is more useful for a graphical plot.**Task**: Represent the 25 most relevant terms for each topic using bar plots.n_bins = 25 # Example data y_pos = range(n_bins-1, -1, -1) pylab.rcParams['figure.figsize'] = 16, 8 # Set figure size for i in range(3): topic = ldaModel.describeTopics(maxTermsPerTopic=n_bins)[i] tokens = [D[n] for n in topic[0]] weights = topic[1] plt.subplot(1, 3, i+1) plt.barh(y_pos, weights, align='center', alpha=0.4) plt.yticks(y_pos, tokens) plt.xlabel('Average number of occurrences per article') plt.title('Token distribution')**Exercise**: Explore the influence of the `topicConcentration` parameter. Show in barplots the most relevant tokens for each topic for large values of this parameter. Unfortunately, we cannot capture the document distributions over topics, in the current version of pySpark mllib (1.6). 4. Latent Semantic IndexingLSI is not specifically available in MLlib, There are methods to compute the SVD decomposition of a matrix, which is the core transformation for LSI, but, unfortunately, SVD decomposition is available in Java and Scala, but not in python. The following code, taken from Stackoverflow, can be used to compute the SVD.from pyspark.mllib.common import callMLlibFunc, JavaModelWrapper from pyspark.mllib.linalg.distributed import RowMatrix class SVD(JavaModelWrapper): """Wrapper around the SVD scala case class""" @property def U(self): """ Returns a RowMatrix whose columns are the left singular vectors of the SVD if computeU was set to be True.""" u = self.call("U") if u is not None: return RowMatrix(u) @property def s(self): """Returns a DenseVector with singular values in descending order.""" return self.call("s") @property def V(self): """ Returns a DenseMatrix whose columns are the right singular vectors of the SVD.""" return self.call("V") def computeSVD(row_matrix, k, computeU=False, rCond=1e-9): """ Computes the singular value decomposition of the RowMatrix. The given row matrix A of dimension (m X n) is decomposed into U * s * V'T where * s: DenseVector consisting of square root of the eigenvalues (singular values) in descending order. * U: (m X k) (left singular vectors) is a RowMatrix whose columns are the eigenvectors of (A X A') * v: (n X k) (right singular vectors) is a Matrix whose columns are the eigenvectors of (A' X A) :param k: number of singular values to keep. We might return less than k if there are numerically zero singular values. :param computeU: Whether of not to compute U. If set to be True, then U is computed by A * V * sigma^-1 :param rCond: the reciprocal condition number. All singular values smaller than rCond * sigma(0) are treated as zero, where sigma(0) is the largest singular value. :returns: SVD object """ java_model = row_matrix._java_matrix_wrapper.call("computeSVD", int(k), computeU, float(rCond)) return SVD(java_model) from pyspark.ml.feature import * from pyspark.mllib.linalg import Vectors data = [(Vectors.dense([0.0, 1.0, 0.0, 7.0, 0.0]),), (Vectors.dense([2.0, 0.0, 3.0, 4.0, 5.0]),), (Vectors.dense([4.0, 0.0, 0.0, 6.0, 7.0]),)] df = sqlContext.createDataFrame(data,["features"]) pca_extracted = PCA(k=2, inputCol="features", outputCol="pca_features") model = pca_extracted.fit(df) features = model.transform(df) # this create a DataFrame with the regular features and pca_features # We can now extract the pca_features to prepare our RowMatrix. pca_features = features.select("pca_features").rdd.map(lambda row : row[0]) mat = RowMatrix(pca_features) # Once the RowMatrix is ready we can compute our Singular Value Decomposition svd = computeSVD(mat,2,True) print svd.s # DenseVector([9.491, 4.6253]) print svd.U.rows.collect() # [DenseVector([0.1129, -0.909]), DenseVector([0.463, 0.4055]), DenseVector([0.8792, -0.0968])] print svd.V # DenseMatrix(2, 2, [-0.8025, -0.5967, -0.5967, 0.8025], 0)[9.49101642128,4.62528901289] [DenseVector([0.1129, -0.909]), DenseVector([0.463, 0.4055]), DenseVector([0.8792, -0.0968])] DenseMatrix([[-0.80247931, -0.59667995], [-0.59667995, 0.80247931]])Data Cleaning with Pandas====================== Overview Questions What does 'clean data' mean? How can I drop unnecessary data from my dataframe? How can I change column or row names in a dataframe? How can I cast columns to the correct data type? Objectives: Use pandas to drop unnecessary data from our dataframe. Learn how to rename pandas columns. Use pandas string methods to correct characters. Learn how to cast columns to the correct data type. Keypoints: Data cleaning prepares data for analysis. Pandas has built-in methods for handling data cleaning, particular missing data. In this section, we'll read in the data we extracted in the last lesson. You may have noticed in the last session that the data in these dataframes didn't look great. There were columns that appeared to have no values. Once we start working with the data, we are going to see some additional problems.import os import pandas as pd fpath = os.path.join("data", "potts_table1.csv") fpath2 = os.path.join("data", "potts_table2.csv") table1 = pd.read_csv(fpath) table2 = pd.read_csv(fpath2) table1.head()Dropping unneccessary dataIn some cases, we might have data in our dataframe that we don't need. We will want to discard or "drop" this data from the dataframe. For the dataframe we just loaded, for example, we can see that the data in columns 0, 1, 4, 12 appear to not have any values.Check your understanding What pandas method can you use to see how many non-null values you have in each column? ```{admonition} Solution:class: dropdown```pythontable1.info()``` There are two methods you might use to drop data from a dataframe. These are `drop`, and `dropna`. Drop is used when you have specific rows or columns you want to remove from the dataframe, while `dropna` is used when you want to drop columns or rows which contain `NaN` or "not a number" values. This occurs when there are no values in a data cell.In the output of `info` above, we can see that there are two columns which contain 0 non-null values. This means that all of the values in these columns are `NaN`. We can safely discard these columns. We'll use the `dropna` function to get rid of them.help(table1.dropna)Help on method dropna in module pandas.core.frame: dropna(axis=0, how='any', thresh=None, subset=None, inplace=False) method of pandas.core.frame.DataFrame instance Remove missing values. See the :ref:`User Guide ` for more on which values are considered missing, and how to work with missing data. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 Determine if rows or columns which contain missing values are removed. * 0, or 'index' : Drop rows which contain missing values. * 1, or 'columns' : Drop columns which contain missing value. .. versionchanged:: 1.0.0 Pass tuple or list to drop on multiple axes. Only a single axis is allowed. how : {'any', 'all'}, default 'any' Determine if row or column is removed from DataFrame, when we have at least one NA or all NA. * 'any' : If any NA values are present, dro[...]Before saving the dataframe, we'll look at and and discuss output from this function. By default, the function `dropna` will work on `axis 0` or the rows of the dataframe, and will drop any row which contains a `NaN`. You will see this results in a dataframe with no data.table1.dropna()Notice that `dropna` returns a dataframe and does not overwrite the original.table1.info() RangeIndex: 37 entries, 0 to 36 Data columns (total 14 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Unnamed: 0 37 non-null int64 1 Unnamed: 0.1 1 non-null object 2 Compound 37 non-null object 3 log P 37 non-null object 4 Unnamed: 1 0 non-null float64 5 II 37 non-null float64 6 Hy 37 non-null float64 7 H, 37 non-null float64 8 MV 37 non-null object 9 R, 37 non-null float64 10 log Kou 37 non-null object 11 log Kyex 31 non-null object 12 Unnamed: 2 0 non-null float64 13 log Kpep 25 non-null object dtypes: float64(6), int64(1), object(7) memory usage: 4.2+ KBWe can switch to dropping columns which have `NaN` values by adding the argument `axis=1`.table1.dropna(axis=1).info() RangeIndex: 37 entries, 0 to 36 Data columns (total 9 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Unnamed: 0 37 non-null int64 1 Compound 37 non-null object 2 log P 37 non-null object 3 II 37 non-null float64 4 Hy 37 non-null float64 5 H, 37 non-null float64 6 MV 37 non-null object 7 R, 37 non-null float64 8 log Kou 37 non-null object dtypes: float64(4), int64(1), object(4) memory usage: 2.7+ KBThis is closer to what we want. However, you'll notice that this has dropped some columns which have data. By default, pandas will drop a column which contains **any** `NaN` values. This may not be what we want in many cases because some values may simply be missing rather than incorrect.We can add an additional argument, `how=all`, to drop only columns whose values are **all** `NaN`. By default, this function argument is `how=any`. Once we are sure we would like to keep this as our dataframe, we can add `inplace=True` to the function call to overwrite the dataframe.table1.dropna(axis=1, how="all")The output above looks like something to keep, so we will add `inplace=True` to overwrite the original dataframe.table1.dropna(axis=1, how="all", inplace=True) table1.info() RangeIndex: 37 entries, 0 to 36 Data columns (total 12 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Unnamed: 0 37 non-null int64 1 Unnamed: 0.1 1 non-null object 2 Compound 37 non-null object 3 log P 37 non-null object 4 II 37 non-null float64 5 Hy 37 non-null float64 6 H, 37 non-null float64 7 MV 37 non-null object 8 R, 37 non-null float64 9 log Kou 37 non-null object 10 log Kyex 31 non-null object 11 log Kpep 25 non-null object dtypes: float64(4), int64(1), object(7) memory usage: 3.6+ KBWe can drop the final two columns using the `drop` function. You can use this when you have specific rows or columns you would like to discard. Again, we use `axis=1` to drop columns, then we pass the column name.table1.drop(axis=1, columns=["Unnamed: 0.1", "Unnamed: 0"], inplace=True)Changing column namesOur column names are still incorrect. You will likely want to change them to make the table more legible.table1.info() RangeIndex: 37 entries, 0 to 36 Data columns (total 10 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Compound 37 non-null object 1 log P 37 non-null object 2 II 37 non-null float64 3 Hy 37 non-null float64 4 H, 37 non-null float64 5 MV 37 non-null object 6 R, 37 non-null float64 7 log Kou 37 non-null object 8 log Kyex 31 non-null object 9 log Kpep 25 non-null object dtypes: float64(4), object(6) memory usage: 3.0+ KBWe might now want to clean up the column names and make sure they are descriptive. You can see the column names using `table1.columns`. You can either rename the columns by setting `table1.columns` to a list of the appropriate length, or you can use `table1.rename`. In the `.rename` method, you put the argument `columns` and set it equal to a dictionary (curly brackets) where you use the syntax```python"current_column_name": "new_column_name"```table1.columns table1.rename(inplace=True, columns={ "II": "pi", "Hy": "Hd", "H,": "Ha", "R,": "R_2", "log Kou": "log K_oct", "log Kyex": "log K_hex", "log Kpep": "log K_hep" }) table1.head()Fixing Data Types When examining `.info` , you'll notice that a lot of our columns which should be numbers are still 'objects' or strings. We would like `log P`, for example to be numeric. Typically if a column appears that it should be numeric, but pandas does not automatically cast it as such, it is because there are some non-numeric characters in the column which pandas could not decide what to do with. We will need to examine these, decide what to do with them, then cast the column as numeric.There are a few ways to do this, but we'll use the pandas function `to_numeric`.table1.info() RangeIndex: 37 entries, 0 to 36 Data columns (total 10 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Compound 37 non-null object 1 log P 37 non-null object 2 pi 37 non-null float64 3 Hd 37 non-null float64 4 Ha 37 non-null float64 5 MV 37 non-null object 6 R_2 37 non-null float64 7 log K_oct 37 non-null object 8 log K_hex 31 non-null object 9 log K_hep 25 non-null object dtypes: float64(4), object(6) memory usage: 3.0+ KBUsing the `to_numeric` function without any additional inputs will fail on this data set.pd.to_numeric(table1["log P"])Scrolling to the bottom of this message and reading the error, you will see it is having a problem reading the value `"— 6.85"`. It may not seem obvious what this problem is at first. When we run into a problem like this we have a few options. You could choose to handle the errors differently. Pandas will let you set what you would like for it to do when it is unable to cast a value. By default, it will fail (which is what se wee above). For example, you could also set errors to be ignored (which would result in the column being unchanged, there would just be no error raised) or to "coerce" the values. Choosing "coerce" means that anything that can't be cast as numeric will be put as `NaN`.Let's see what happens when we set errors to coerce.pd.to_numeric(table1["log P"], errors="coerce")This unfortunately results in no numeric characters being recognized.We have to do a little bit more processing to the values for this to work. If you examine the columns, you may notice that the negative sign is a little off. It is `—` when it should be `-`. This is very slight, and might be hard to see, but it is important to change for this data set.We will want to replace all `—` with `-`. We could accomplish this using the string method `replace`. Strings in Python have a number of methods. The `replace` method allows us to replace a substring within a string.test_string = "Hello world." test_string.replace(".", "!")The split command is another string method you are probably familiar with:test_string.split()Pandas string methods If we want to use these on a column in a pandas dataframe, you might think to use `apply`, which we learned about in the last session. However, you will notice that the `replace` method acts on a the string and doesn't fit into `apply`.Luckily, when pandas columns are strings, we can use string methods on the whole column by adding `.str.function`. For example, to replace the minus signstable1["log P"].str.replace("—", "-") table1["log P"] = table1["log P"].str.replace("—", "-") # We still need to get rid of spaces table1["log P"] = table1["log P"].str.replace(" ", "") table1["log P"] = pd.to_numeric(table1["log P"], errors="coerce") table1.info() RangeIndex: 37 entries, 0 to 36 Data columns (total 10 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Compound 37 non-null object 1 log P 34 non-null float64 2 pi 37 non-null float64 3 Hd 37 non-null float64 4 Ha 37 non-null float64 5 MV 37 non-null object 6 R_2 37 non-null float64 7 log K_oct 37 non-null object 8 log K_hex 31 non-null object 9 log K_hep 25 non-null object dtypes: float64(5), object(5) memory usage: 3.0+ KBWe actually need to change this character on all of our columns. However `str` methods only work on pandas series. If we want to replace a string across all of our DataFrame, we will use the `.replace` method. In order for it to recognize substrings, set the option `regex=True`. We will discuss `regex` more in the next session, but this is all you need to know about regex for the moment.table1.replace("—", "-", regex=True, inplace=True) table1.replace(" ", "", regex=True, inplace=True)Changing the data type of multiple columns To change the data type of multiple columns, we will want to use the `pd.to_numeric` function on all of those columns. There are several ways you might choose to do this. For example, you might just choose to call the function for each column.We can also accomplish this by using the `apply` operator which we learned about in the last session. The `apply` operator should be used whenever you want to apply a function to a row or column. In this case, we want to apply the `pd.to_numeric` function to each column.Because we want to apply to the columns, we add the argument `axis=1`.table1.apply(pd.to_numeric, axis=1)When we try this code, we immediately see an error. We do not want to try to convert the first column to a number. We can use the `iloc` function to exclude the first column:table1.iloc[:, 1:].apply(pd.to_numeric, axis=1)An error again! This time, we see failure because a string was incorrectly read from the pdf and could not be converted to a number. You could choose to handle this differently, but for this workshop we are just going to discard values like these. If we were using `to_numeric` on a pandas series, we would use the option `errors="coerce"`. You may not see immediately how to use this with the `apply` function, but fortunately, pandas allows us to pass additional arguments with `apply`:table1.iloc[:, 1:] = table1.iloc[:, 1:].apply(pd.to_numeric, axis=1, errors="coerce") table1.info() table1.to_csv("data/potts_table1_clean.csv", index=False)!pip install PyDrive from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive from google.colab import auth from oauth2client.client import GoogleCredentials import multiprocessing from google.colab import drive auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) from google.colab import drive drive.mount('/content/drive') cd drive/My Drive/ !git clone https://github.com/xinntao/ESRGAN cd ESRGAN !wget --no-check-certificate 'https://docs.google.com/uc?export=download&id=1MJFgqXJrMkPdKtiuy7C6xfsU1QIbXEb-' -O models/RRDB_ESRGAN_x4.pth !wget --no-check-certificate 'https://docs.google.com/uc?export=download&id=1mSJ6Z40weL-dnPvi390xDd3uZBCFMeqr' -O models/RRDB_PSNR_x4.pth from os import path from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag platform = '{}{}-{}'.format(get_abbr_impl(), get_impl_ver(), get_abi_tag()) accelerator = 'cu80' if path.exists('/opt/bin/nvidia-smi') else 'cpu' !pip install -q http://download.pytorch.org/whl/{accelerator}/torch-0.4.1-{platform}-linux_x86_64.whl torchvision import torch print(torch.__version__) print(torch.cuda.is_available()) !python test.py models/RRDB_ESRGAN_x4.pth !python test.py models/RRDB_PSNR_x4.pthModel path models/RRDB_PSNR_x4.pth. Testing... 1 mukti (61) /usr/local/lib/python3.6/dist-packages/torch/nn/modules/upsampling.py:122: UserWarning: nn.Upsampling is deprecated. Use nn.functional.interpolate instead. warnings.warn("nn.Upsampling is deprecated. Use nn.functional.interpolate instead.")Test SKNW for Cahn-Hillard dataset.#os.chdir(r'/Users/devyanijivani/git/pygraspi/notebooks/data') dest = "/Users/devyanijivani/git/pygraspi/notebooks/junctions" myFiles = glob.glob('*.txt') myFiles.sort() for i, file in enumerate(myFiles): morph = np.array(pandas.read_csv(file, delimiter=' ', header=None)).swapaxes(0, 1) skel, distance = medial_axis(morph, return_distance=True) graph = sknw.build_sknw(skel) for (s,e) in graph.edges(): ps = graph[s][e]['pts'] plt.plot(ps[:,1], ps[:,0], 'green', zorder=-1) # draw node by o nodes = graph.nodes() ps = np.array([nodes[i]['o'] for i in nodes], dtype = int) plt.scatter(ps[:,1], ps[:,0], s = 1, c ='r') # title and show plt.title('Build Graph') plt.gca().set_aspect('equal') print(os.path.splitext(file)[0]) file_loc = os.path.join(dest, os.path.splitext(file)[0]+'.png') #print(file_loc) #plt.savefig(file_loc,dpi=1200) plt.close() pwd def skeletonize(morph): skel, distance = medial_axis(morph, return_distance=True) return skel, distance morph = np.array([[1,1,1],\ [1,1,1],\ [1,1,1]]) skel = skeletonize(morph)[0] skel def getEndJunction(graph): l = [graph.degree[n] for n in graph.nodes()] return np.array([l.count(1), l.count(3)]) graph = sknw.build_sknw(skel) getEndJunction(graph) def getBranchLen(graph): b_l = [graph.edges[e]['weight'] for e in graph.edges()] return np.array([len(b_l), round(sum(b_l)/len(b_l), 2)]) getBranchLen(graph)Part 1 Let's compute the power level.def hundred_digit(n): nstr = str(n) if len(nstr) > 2: return int(nstr[-3]) else: raise NotImplementedError('Problem here.') def compute_power_level(x, y, serial_number): rack_id = x + 10 power_level = rack_id * y power_level = power_level + serial_number power_level *= rack_id power_level = hundred_digit(power_level) power_level -= 5 return power_levelUnit tests:serial_number = 8 (x, y) = 3, 5 compute_power_level(x, y, serial_number) compute_power_level(122, 79, 57) compute_power_level(217,196, 39) compute_power_level(101,153,71)Let's also see if we get the two examples on matrices right:import numpy as np mat = np.empty((5, 5)) for c in range(32, 37): for r in range(44, 49): x, y = c, r power_level = compute_power_level(x, y, 18) mat[r-44, c-32] = power_level mat mat = np.empty((5, 5)) for c in range(20, 25): for r in range(60, 65): x, y = c, r power_level = compute_power_level(x, y, 42) mat[r-60, c-20] = power_level matLet's build the 300 by 300 grid.mat = np.empty((300, 300)) for c in range(300): for r in range(300): x, y = c+1, r+1 mat[r, c] = compute_power_level(x, y, 5791) matLet's find the largest 3x3 region.def compute_max_3x3(mat): rmax, cmax, max_total = None, None, 0 for c in range(300-3): for r in range(300-3): region = mat[r:r+3, c:c+3] if np.sum(region) > max_total: max_total = np.sum(region) rmax, cmax = r, c return cmax+1, rmax+1 compute_max_3x3(mat)Part 2 We have to rewrite the sliding window code, for which we can also use Numba.from numba import njit @njit def compute_max_any_size(mat, size): rmax, cmax, max_total = -2, -2, 0 for c in np.arange(300-size): for r in np.arange(300-size): region = mat[r:r+size, c:c+size] if np.sum(region) > max_total: max_total = np.sum(region) rmax, cmax = r, c return cmax+1, rmax+1, size, max_total compute_max_any_size(mat, 3) from tqdm import tqdm_notebook scores = [] for size in tqdm_notebook(range(1, 301)): scores.append(compute_max_any_size(mat, size)) max(scores, key=lambda items: items[3])Using pythran%load_ext pythran.magic %%pythran import numpy as np #pythran export compute_max_any_size_pythran(float[:, :], int) def compute_max_any_size_pythran(mat, size): rmax, cmax, max_total = -2, -2, 0 for c in np.arange(300-size): for r in np.arange(300-size): region = mat[r:r+size, c:c+size] if np.sum(region) > max_total: max_total = np.sum(region) rmax, cmax = r, c return cmax+1, rmax+1, size, max_total compute_max_any_size_pythran(mat, 4) scores = [] for size in tqdm_notebook(range(1, 301)): scores.append(compute_max_any_size_pythran(mat, size))1) Create a csv of labels and show how it can be returned by the CSV Readerfrom niftynet.utilities.download import download download('mr_ct_regression_model_zoo_data') labels_location = 'ct.csv' files = [file for file in os.listdir('/home/tom/niftynet/data/mr_ct_regression/CT_zero_mean') if file.endswith('.nii.gz')] pd.DataFrame(data=[(file.replace('.nii.gz', ''), file.replace('.nii.gz', '')) for file in files]).to_csv('label.csv', index=None, header=['subject_id', 'label']) pd.read_csv('label.csv') #### Testing the CSV Reader on labels # Make sure we accept 'Label', 'label', 'LABEL' task_param = TaskParam(['image']) image_data_param = {'CT': {'path_to_search': '~/niftynet/data/mr_ct_regression/CT_zero_mean', 'filename_contains': 'nii'}} #csv_data_file is a csv with data csv_data_param = {'label': {'csv_data_file': 'label.csv', 'to_ohe': True}} grouping_param = {'image': (['CT'])} image_sets_partitioner = ImageSetsPartitioner().initialise(image_data_param) image_reader = ImageReader().initialise(image_data_param, grouping_param, file_list=image_sets_partitioner.all_files) preprocessing = Preprocessing(net_param, action_param, task_param) normalisation_layers = preprocessing.prepare_normalisation_layers() augmentation_layers = preprocessing.prepare_augmentation_layers() image_reader.add_preprocessing_layers(normalisation_layers + augmentation_layers) csv_reader = CSVReader(('label',)).initialise(csv_data_param, {'label': (['label'])}, file_list=image_sets_partitioner.all_files) print('One sample from the csv_reader:', np.squeeze(csv_reader(idx=13)[1]['label'])) window_sizes = {'image': (100, 100, 1), 'label': (1, 1, 1)} sampler = ResizeSampler(reader=image_reader, csv_reader=csv_reader, window_sizes=window_sizes, num_threads=2, smaller_final_batch_mode='drop', batch_size=2, queue_length=2) sample = next(sampler()) print(sample['image'].shape) print(sample['label'].shape)INFO:niftynet: Number of subjects 15, input section names: ['subject_id', 'CT'] -- using all subjects (without data partitioning). INFO:niftynet: Image reader: loading 15 subjects from sections ['CT'] as input [image] WARNING:niftynet: This method will read your entire csv into memory One sample from the csv_reader: [0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.] INFO:niftynet: reading size of preprocessed images WARNING:niftynet: sampler queue_length should be larger than batch_size, defaulting to batch_size * 5.0 (10). (1, 100, 100, 1, 1, 1) (1, 15, 1, 1, 1, 1)2) Create a csv of features and show how it can be returned by the CSV Readerfrom niftynet.utilities.download import download download('mr_ct_regression_model_zoo_data') labels_location = 'ct.csv' files = [file.replace('.nii.gz', '') for file in os.listdir('/home/tom/niftynet/data/mr_ct_regression/CT_zero_mean') if file.endswith('.nii.gz')] pd.DataFrame(data=[tuple([file] + list(np.random.randn(10))) for file in files]).to_csv('features.csv', index=None, header=['subject_id'] + [str(x) for x in range(10)]) pd.read_csv('features.csv') task_param = TaskParam(['image']) image_data_param = {'CT': {'path_to_search': '~/niftynet/data/mr_ct_regression/CT_zero_mean', 'filename_contains': 'nii'}} csv_data_param = {'features': {'csv_data_file': 'features.csv', 'to_ohe': False}} grouping_param = {'image': (['CT'])} image_sets_partitioner = ImageSetsPartitioner().initialise(image_data_param) image_reader = ImageReader().initialise(image_data_param, grouping_param, file_list=image_sets_partitioner.all_files) preprocessing = Preprocessing(net_param, action_param, task_param) normalisation_layers = preprocessing.prepare_normalisation_layers() augmentation_layers = preprocessing.prepare_augmentation_layers() image_reader.add_preprocessing_layers(normalisation_layers + augmentation_layers) csv_reader = CSVReader(('features',)).initialise(csv_data_param, {'features': ['features']}, file_list=image_sets_partitioner.all_files) print('One sample from the csv_reader:', np.squeeze(csv_reader(idx=13)[1]['features'])) window_sizes = {'image': (100, 100, 1), 'features': (1, 1, 1)} sampler = ResizeSampler(reader=image_reader, csv_reader=csv_reader, window_sizes=window_sizes, num_threads=2, smaller_final_batch_mode='drop', batch_size=2, queue_length=2) sample = next(sampler()) print(sample['image'].shape) print(sample['features'].shape) print(sample.keys())INFO:niftynet: Number of subjects 15, input section names: ['subject_id', 'CT'] -- using all subjects (without data partitioning). INFO:niftynet: Image reader: loading 15 subjects from sections ['CT'] as input [image] WARNING:niftynet: This method will read your entire csv into memory One sample from the csv_reader: [ 0.23841972 -0.96603888 -0.07318273 1.59560139 -0.09326917 -1.04853203 -0.65709902 -1.17890471 0.7956195 -0.97414747] INFO:niftynet: reading size of preprocessed images WARNING:niftynet: sampler queue_length should be larger than batch_size, defaulting to batch_size * 5.0 (10). (1, 100, 100, 1, 1, 1) (1, 10, 1, 1, 1, 1) dict_keys(['image_location', 'image', 'features', 'features_location'])Testing the CSV Reader on labels AND Features# Make sure we accept 'Label', 'label', 'LABEL' task_param = TaskParam(['image']) image_data_param = {'CT': {'path_to_search': '~/niftynet/data/mr_ct_regression/CT_zero_mean', 'filename_contains': 'nii'}} csv_data_param = {'label': {'csv_data_file': 'label.csv', 'to_ohe': True}, 'features': {'csv_data_file': 'features.csv', 'to_ohe': False}} grouping_param = {'image': (['CT'])} image_sets_partitioner = ImageSetsPartitioner().initialise(image_data_param) image_reader = ImageReader().initialise(image_data_param, grouping_param, file_list=image_sets_partitioner.all_files) preprocessing = Preprocessing(net_param, action_param, task_param) normalisation_layers = preprocessing.prepare_normalisation_layers() augmentation_layers = preprocessing.prepare_augmentation_layers() image_reader.add_preprocessing_layers(normalisation_layers + augmentation_layers) csv_reader = CSVReader(('label', 'features')).initialise(csv_data_param, {'label': (['label']), 'features': (['features'])}, file_list=image_sets_partitioner.all_files) print('One sample from the csv_reader:', np.squeeze(csv_reader(idx=13)[1]['label'])) window_sizes = {'image': (100, 100, 1), 'label': (1, 1, 1)} sampler = ResizeSampler(reader=image_reader, csv_reader=csv_reader, window_sizes=window_sizes, num_threads=2, smaller_final_batch_mode='drop', batch_size=2, queue_length=2) sample = next(sampler()) print(sample['image'].shape) print(sample['label'].shape) print(sample['features'].shape) modalities = ['t1ce.', 't1.', 'flair.', 't2.'] def get_modality(string): return modalities[[True if mod in string else False for mod in modalities].index(True)][:-1] files = [(file.replace('.nii.gz', ''), get_modality(file)) \ for file in os.listdir('/home/tom/data/BRATS_18_SPLITS/train') if 'seg' not in file] pd.DataFrame(data=files, columns=['subject_id', 'label']).to_csv('/home/tom/phd/NiftyNet-Generator-PR/NiftyNet/modality_labels.csv', index=None)L2 Regularized Logistic Regression Model This demo file provides an example of using my L2 Regularized Logistic Regression model built with numpy to classify tumors in the Wisconsin Breast Cancer Dataset as benign or cancerous. This demo also includes how to implement my from-scratch K fold cross-validation method to find the optimal lambda penalty value for the regression model.- **Dataset:** https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_breast_cancer.html- **Author:** ()- **Credits:** University of Washington DATA 558 with Zaid Harchaoui and Corinne Jones Import Standard Scikit-Learn Functionalityfrom sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.metrics import precision_recall_fscore_support as scoreImport Datasetfrom sklearn.datasets import load_breast_cancerImport Methods for Modeling Training and Analysisfrom l2_regularized_logistic_regression import *Load Data and Separate Features and Targetsdata = load_breast_cancer() X = data.data y = data.target features = data.target_namesTrain Test SplitX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=0)Scale Input Data and Transform Binary Targets to [-1, 1]X_scaler = StandardScaler() X_train = X_scaler.fit_transform(X_train) X_test = X_scaler.transform(X_test) y_train = transform_target(y_train) y_test = transform_target(y_test)Train L2 Regularized Logistic Regressionbeta_vals = l2_log_reg(X_train, y_train, lambda_penalty=1.0, eps=0.001, v=0)Visualize Value of Object from Each Iteration of Gradient Descentplot_objective_values(X_train, y_train, beta_vals, lambda_penalty=1.0)Predict with Each Coefficient Vector on Train and Testtrain_pred_vals = predict_all(beta_vals, X_train) test_pred_vals = predict_all(beta_vals, X_test)Visualize Misclassification Error at Each Step of Gradient Descentplot_errors(train_pred_vals, test_pred_vals, y_train, y_test, labels=['Train Set', 'Test Set'])Use K Fold Cross Validation to Find the Optimal Lambda Valuelambdas = [0.001, 0.01, 0.1, 1, 10, 100, 1000] mean_test_errors = k_fold_crossval(X_train, y_train, lambdas, k=5, eps=0.001) best_lambda = lambdas[np.argmin(mean_test_errors)] print('Best lambda: {}'.format(best_lambda))Best lambda: 0.001Retrain L2 Regularized Logistic Regression with Optimal Lambdabeta_vals = l2_log_reg(X_train, y_train, lambda_penalty=best_lambda, eps=0.001, v=0)Predict with Each Coefficient Vector on Train and Testtrain_pred_vals = predict_all(beta_vals, X_train) test_pred_vals = predict_all(beta_vals, X_test)Visualize Misclassification Error at Each Step of Gradient Descentplot_errors(train_pred_vals, test_pred_vals, y_train, y_test, labels=['Train Set', 'Test Set'])Calculate Precision and Recall with the Final Beta from Trainingtrain_precision, train_recall, train_fscore, train_support = score(y_train, train_pred_vals[-1]) val_precision, val_recall, val_fscore, val_support = score(y_test, test_pred_vals[-1]) pr_df = pd.DataFrame({'Class': list(range(0, 2)), 'TrainPrecision': train_precision, 'TrainRecall': train_recall, 'ValidationPrecision': val_precision, 'ValidationRecall': val_recall}) pr_dfFractional Differencing BackgroundFractional Differencing is a signal processing technique that is used to remove the non-stationarity from the time series while maintaining as much memory as possible. It is widely used in FSI to prepare training data for machine learning algorithms. In this [open-source project](https://github.com/ritchieng/fractional_differencing_gpu/blob/master/notebooks/gpu_fractional_differencing.ipynb) done by Ensemble Capital, fractional differencing computation is accelerated via `cudf.appy_chunk` method in the GPU. It achieves hundreds of times acceleration compared with CPU implementation in their [report](https://www.researchgate.net/publication/335159299_GFD_GPU_Fractional_Differencing_for_Rapid_Large-scale_Stationarizing_of_Time_Series_Data_while_Minimizing_Memory_Loss). Using `apply_rows` and `apply_chunks` method from the cudf library is the easiest way of customizing GPU computations as covered in this [blog](https://medium.com/rapids-ai/user-defined-functions-in-rapids-cudf-2d7c3fc2728d). However, it is not the most efficient way.In this notebook, we are going to show how to use Numba to do fractional differencing computation efficiently. As gQuant wrap the fractional differencing function in the computation node, we are going to show it is easy for data scientists to compute fractional differencing signals and use them to generate alpha signals. Environment Preparationimport sys; sys.path.insert(0, '..') import warnings import gquant from gquant.cuindicator import get_weights_floored, fractional_diff import ipywidgets as widgets import os import time import numpy as np from numba import cuda import cudf import inspect from numba import njit from numba import prange warnings.simplefilter("ignore")Copy the fractional differencing code from the [open-source project](https://github.com/ritchieng/fractional_differencing_gpu/blob/master/notebooks/gpu_fractional_differencing.ipynb). We will use this as our benchmark referencedef moving_dot_product_kernel(in_data, out, window_size, weights): # Set the first window_size-1 rows in each chunk to np.nan due # insufficient history for i in range(cuda.threadIdx.x, window_size - 1, cuda.blockDim.x): out[i] = np.nan # Compute dot product of preceding window_size rows for i in range(cuda.threadIdx.x + window_size - 1, in_data.size, cuda.blockDim.x): rolling_dot_product = 0.0 k = 0 for j in range(i - window_size + 1, i + 1): rolling_dot_product += in_data[j] * weights[k][0] k += 1 out[i] = rolling_dot_product def frac_diff_gpu(df, d, floor=1e-3): r"""Fractionally difference time series via GPU. Args: df (pd.DataFrame): dataframe of raw time series values. d (float): differencing value from 0 to 1 where > 1 has no FD. floor (float): minimum value of weights, ignoring anything smaller. """ # Bring dataframe to GPU, reset index for GPU dot product kernel # gdf_raw = cudf.from_pandas(df).reset_index(drop=True) gdf_raw = df gdf_raw.columns = ['in_data'] # Get weights window weights = get_weights_floored(d=d, num_k=len(gdf_raw), floor=floor) weights_window_size = len(weights) # Reverse weights and as contiguous weights = np.ascontiguousarray(weights[::-1]) # Bring weights to GPU gdf_weights = cudf.DataFrame() gdf_weights[gdf_raw.columns[0]] = weights.reshape(-1) # Length of data data_length = len(gdf_raw) # T4: max of 518 threads per block. # V100: max 1024 threads per block threads_per_block = 518 # Chunk size split # This has to be improved, but as a v0.1, it's sufficient to show speed-up # Up to easily 100 million data points trunk_size = data_length # Get fractionally differenced time series through GPU function gdf_raw_fd = gdf_raw.apply_chunks(moving_dot_product_kernel, incols=['in_data'], outcols=dict(out=np.float64), kwargs=dict(window_size=weights_window_size, weights=weights), chunks=list(range(0, data_length, trunk_size)) + [data_length], tpb=threads_per_block) # Bring to CPU for normal manipulation # df_raw_fd = gdf_raw_fd.to_pandas().dropna().iloc[:-1, 1] return gdf_raw_fd, weightsFollowing is the gQuant's fractional differencing implementation via Numba libraryprint(inspect.getsource(fractional_diff))def fractional_diff(input_arr, d=0.5, floor=1e-3, min_periods=None, thread_tile=2, number_of_threads=512): """ The fractional difference computation method. Arguments: ------- input_arr: numba.cuda.DeviceNDArray or cudf.Series the input array to compute the fractional difference d: float the differencing value. range from 0 to 1 floor: float minimum value for the weights for computational efficiency. min_periods: int default the lengths of the weights. Need at least min_periods of non-na elements to get fractional difference value thread_tile: int each thread will be responsible for `thread_tile` number of elements in window computation number_of_threads: int number of threads in a block for CUDA computation Returns ------- (numba.cuda.DeviceNDArray, np.array) the computed fractional difference array and the weight array tuple [...]It launches the Numba kernel, which defined as:@cuda.jit(device=True) def conv_window(shared, history_len, out_arr, window_size, arr_len, offset, offset2, min_size): """ This function is to do convolution for one thread Arguments: ------ shared: numba.cuda.DeviceNDArray 3 chunks of data are stored in the shared memory the first [0, window_size) elements is the chunk of data that is necessary to compute the first convolution element. then [window_size, window_size + thread_tile * blockDim) elements are the inputs allocated for this block of threads the last [window_size + thread_tile, window_size + thread_tile + window_size) is to store the kernel values history_len: int total number of historical elements available for this chunk of data out_arr: numba.cuda.DeviceNDArray output gpu_array of size of `thread_tile` window_size: int the number of elements in the kernel arr_len: int the chunk array length, same as `thread_tile` offset: int indicate the starting index of the chunk array in the shared for this thread. offset: int indicate the starting position of the weights/kernel array min_size: int the minimum number of non-na elements """ for i in range(arr_len): if i + history_len < window_size-1: out_arr[i] = np.nan else: s = 0.0 average_size = 0 for j in range(0, window_size): if not (cmath.isnan( shared[offset + i - j])): s += (shared[offset + i - j] * shared[offset2 + window_size - 1 - j]) average_size += 1 if average_size >= min_size: out_arr[i] = s else: out_arr[i] = np.nan @cuda.jit def kernel(in_arr, weight_arr, out_arr, window, arr_len, thread_tile, min_size): """ This kernel is to do 1D convlution on `in_arr` array with `weight_arr` as kernel. The results is saved on `out_arr`. Arguments: ------ in_arr: numba.cuda.DeviceNDArray input gpu array weight_arr: numba.cuda.DeviceNDArray convolution kernel gpu array out_arr: numba.cuda.DeviceNDArray output gpu_array window: int the number of elements in the weight_arr arr_len: int the input/output array length thread_tile: int each thread is responsible for `thread_tile` number of elements min_size: int the minimum number of non-na elements """ shared = cuda.shared.array(shape=0, dtype=numba.float64) block_size = cuda.blockDim.x # total number of threads tx = cuda.threadIdx.x # Block id in a 1D grid bid = cuda.blockIdx.x starting_id = bid * block_size * thread_tile # copy the thread_tile * number_of_thread_per_block into the shared for j in range(thread_tile): offset = tx + j * block_size if (starting_id + offset) < arr_len: shared[offset + window - 1] = in_arr[ starting_id + offset] cuda.syncthreads() # copy the window - 1 into the shared for j in range(0, window - 1, block_size): if (((tx + j) < window - 1) and ( starting_id - window + 1 + tx + j >= 0)): shared[tx + j] = \ in_arr[starting_id - window + 1 + tx + j] cuda.syncthreads() # copy the weights into the shared for j in range(0, window, block_size): element_id = tx + j if (((tx + j) < window) and (element_id < window)): shared[thread_tile * block_size + window - 1 + tx + j] = weight_arr[tx + j] cuda.syncthreads() # slice the shared memory for each threads start_shared = tx * thread_tile his_len = min(window - 1, starting_id + tx * thread_tile) # slice the global memory for each threads start = starting_id + tx * thread_tile end = min(starting_id + (tx + 1) * thread_tile, arr_len) sub_outarr = out_arr[start:end] sub_len = end - start conv_window(shared, his_len, sub_outarr, window, sub_len, window - 1 + start_shared, thread_tile * block_size + window - 1, min_size)Fractional differencing is essentially doing 1D convolution computation with the kernel values set to be the weights computed from get_weights_floored. Check the original notebook for the details of the meanings of the weights. To make convolution computation faster, we divide the long input array into small chunks and send to different thread blocks. All the array chunks and the weights are loaded into the GPU shared memory for fast IO. The device function conv_window is doing the convolution computation for one thread.To make a fair comparsion with CPU implementation, we implemented an efficient CPU version of the fractional differencing calculation. It is accelerated by numba.njit that take advantage of multiple cores of the CPU and fastmath compiler optimization.@njit(fastmath=True, parallel=True) def moving_dot_product_cpu(in_data, out, window_size, weights): # Set the first window_size-1 rows in each chunk to np.nan due # insufficient history for i in prange(0, window_size - 1): out[i] = np.nan # Compute dot product of preceding window_size rows for i in prange(window_size - 1, len(in_data)): rolling_dot_product = 0.0 k = 0 for j in range(i - window_size + 1, i + 1): rolling_dot_product += in_data[j] * weights[k] k += 1 out[i] = rolling_dot_product def cpu_fractional_diff(input_arr, d=0.5, floor=1e-3): # compute the weights for the fractional difference weights = get_weights_floored(d=d, num_k=len(input_arr), floor=floor)[::-1, 0] weights_out = np.ascontiguousarray(weights) weights = weights_out weights_window_size = len(weights) window = len(weights) out = np.zeros_like(input_arr) moving_dot_product_cpu(input_arr, out, weights_window_size, weights) return outFractional differencing is essentially doing 1D convolution computation with the kernel values set to be the weights computed from `get_weights_floored`. Check the original [notebook](https://github.com/ritchieng/fractional_differencing_gpu/blob/master/notebooks/gpu_fractional_differencing.ipynb) for the details of the meanings of the weights. To make convolution computation faster, we divide the long input array into small chunks and send to different thread blocks. All the array chunks and the weights are loaded into the GPU shared memory for fast IO. The device function `conv_window` is doing the convolution computation for one thread.We can compare the performance of gQuant GPU implementation vs the original one and CPU implementation:for i in range(5, 9): df_raw = cudf.DataFrame() ran_array = np.random.rand(10**int(i)) df_raw['in'] = ran_array df_raw2 = cudf.DataFrame() df_raw2['in'] = ran_array # Start timer start = time.time() df_raw_fd_from_gpu, weights = frac_diff_gpu(df_raw, d=0.5, floor=5e-5) # End timer end = time.time() duration = end - start start = time.time() gquant_gpu, weights = fractional_diff(df_raw2['in'], d=0.5, floor=5e-5) cuda.synchronize() end = time.time() optimized_duration = end - start #(df_raw_fd_from_gpu.values) start = time.time() cpu_result = cpu_fractional_diff(ran_array, d=0.5, floor=5e-5) end = time.time() cpu_duration = end - start err = np.abs(df_raw_fd_from_gpu['out'].to_array()[weights.size-1:] - np.array(gquant_gpu)[weights.size-1:]).max() err = max(np.abs(df_raw_fd_from_gpu['out'].to_array()[weights.size-1:] - cpu_result[weights.size-1:]).max(), err) print('array size %d, Ensemble: time %.3f s, gQuant GPU Time %.3f s, gQuant CPU Time %.3f, speed up %.2f, speed up vs CPU %.2f, error %.4f ' % (10**int(i), duration, optimized_duration, cpu_duration, duration / optimized_duration, cpu_duration/optimized_duration, err))array size 100000, Ensemble: time 0.404 s, gQuant GPU Time 0.483 s, gQuant CPU Time 0.742, speed up 0.84, speed up vs CPU 1.54, error 0.0000 array size 1000000, Ensemble: time 0.085 s, gQuant GPU Time 0.007 s, gQuant CPU Time 0.042, speed up 12.07, speed up vs CPU 5.98, error 0.0000 array size 10000000, Ensemble: time 0.774 s, gQuant GPU Time 0.010 s, gQuant CPU Time 0.287, speed up 78.79, speed up vs CPU 29.26, error 0.0000 array size 100000000, Ensemble: time 6.987 s, gQuant GPU Time 0.052 s, gQuant CPU Time 2.533, speed up 133.71, speed up vs CPU 48.47, error 0.0000For the array of length 100m, gQuant can achieve 100x speedup compare with the Ensemble Capitial's GPU implementatoin and 30x speed up compared with multiple core CPU. Use the fractional differencing signal to trade stocksWe will use the same [XGBoost example](https://github.com/rapidsai/gQuant/blob/master/notebooks/06_xgboost_trade.ipynbx) to do backtest with fractional differencing signals. The workflow includes the following steps:1. Load the 5000 end-of-day stocks CSV data into the dataframe2. Add rate of return feature to the dataframe.3. Clean up the data by removing low volume stocks and extreme rate of returns stocks.4. Compute the features based on different fractional differencing signals of the closing prices of the stocks 5. Split the data in training and testing and build a XGBoost model based on the training data. From the XGBoost model, compute the trading signals for all the data points.5. Run backtesting and compute the returns from this strategy for each of the days and stock symbols 6. Run a simple portfolio optimization by averaging the stocks together for each of the trading days.7. Compute the Sharpe ratio and cumulative return results for both training and testing datasetsThe whole workflow can be organized into a computation graph, which are fully described in a yaml file. Each nodes has a unique id, a node type, configuration parameters and input nodes ids. gQuant takes this yaml file, wires it into a graph to visualize it.%reset -s -f import sys import os sys.path.append('..') import gquant from gquant.dataframe_flow import TaskGraph import ipywidgets as widgets import warnings warnings.simplefilter("ignore") task_graph = TaskGraph.load_taskgraph('../task_example/xgboost_trade.yaml') task_graph.draw(show='ipynb')Define some constants for the data filters. If using GPU of 32G memory, you can safely set the min_volume to 5.0min_volume = 400.0 min_rate = -10.0 max_rate = 10.0The features used for XGBoost algorithm are prepared in the `node_technical_indicator` node, where `cuIndicator` module is used to compute the technical indicators in the GPU for all the stock symbols. `node_xgboost_strategy` is the node that is used to compute the trading signals from the stock technical indicators. Each of the gQuant node is implemented by overwriting "columns_setup" and "process" methods of the Node base class. Please refer to [customize nodes notebook](https://github.com/rapidsai/gQuant/blob/master/notebooks/05_customize_nodes.ipynb) for details. Define the function to visualize the backtest results# define the function to format the plots def plot_figures(o): # format the figures figure_width = '1200px' figure_height = '400px' sharpe_number = o[0] cum_return_train = o[1] cum_return_train.layout.height = figure_height cum_return_train.layout.width = figure_width cum_return_train.title = 'Training P & L %.3f' % (sharpe_number) sharpe_number = o[2] cum_return_test = o[3] cum_return_test.layout.height = figure_height cum_return_test.layout.width = figure_width cum_return_test.title = 'Testing P & L %.3f' % (sharpe_number) return widgets.VBox([cum_return_train, cum_return_test])In this example, we are going to add 5 fractional differencing signals from the closing pricesindicator_conf = { "indicators": [ {"function": "port_fractional_diff", "columns": ["close"], "args": [0.5] }, {"function": "port_fractional_diff", "columns": ["close"], "args": [0.3] }, {"function": "port_fractional_diff", "columns": ["close"], "args": [0.1] }, {"function": "port_fractional_diff", "columns": ["close"], "args": [0.7] }, {"function": "port_fractional_diff", "columns": ["close"], "args": [0.9] }, {"function": "port_shift", "columns": ["returns"], "args": [-1] } ], "remove_na": True }Run the backtestaction = "load" if os.path.isfile('./.cache/node_csvdata.hdf5') else "save" outlist = ['node_sharpe_training','node_cumlativeReturn_training', 'node_sharpe_testing', 'node_cumlativeReturn_testing'] replace_spec={'node_filterValue': {"conf": [{"column": "volume_mean", "min": min_volume}, {"column": "returns_max", "max": max_rate}, {"column": "returns_min", "min": min_rate}]}, 'node_csvdata': {action: True}} replace_spec['node_technical_indicator'] = {"conf": indicator_conf} #replace_spec['node_sort2'] = {"load": cached_sort} o_gpu = task_graph.run( outputs=outlist, replace=replace_spec) plot_figures(o_gpu)We get Sharpe Ratio of `1.01` just from the fractional differencing signals of the closing prices To visualize the computed fractional differencing signals, we can add a branch to the graph for plotting. In the example below, we select the asset with id `22123` and plot 4 fractional differencing signals with different `d` values. Check the updated graph belowasset_filter = { 'id': 'node_filter_asset', 'type': "AssetFilterNode", 'conf': { "asset": 22123 }, "inputs": ["node_technical_indicator"] } node_lines ={ "id": "node_lineplot", "type": "LinePlotNode", "conf": { "points": 300, "lines": [ {"column": "FR_DI_0.1", "label": "d 0.1", "color": "blue"}, {"column": "FR_DI_0.3", "label": "d 0.3", "color": "orange"}, {"column": "FR_DI_0.5", "label": "d 0.5", "color": "green"}, {"column": "FR_DI_0.7", "label": "d 0.7", "color": "black"} ], "title": "Signals"}, "inputs": ["node_filter_asset"]} task_graph.extend([asset_filter, node_lines]) task_graph.draw(show='ipynb')We can run the sub-graph just for plotting the signals.action = "load" if os.path.isfile('./.cache/node_csvdata.hdf5') else "save" outlist = ['node_lineplot'] replace_spec={'node_filterValue': {"conf": [{"column": "volume_mean", "min": min_volume}, {"column": "returns_max", "max": max_rate}, {"column": "returns_min", "min": min_rate}]}, 'node_csvdata': {action: True}} replace_spec['node_technical_indicator'] = {"conf": indicator_conf} #replace_spec['node_sort2'] = {"load": cached_sort} (lineplot, ) = task_graph.run(outputs=outlist, replace=replace_spec) # display plot in jupyter lineplotДомашнее задание 3. Бонус. Object detection. Вы можете получить за это задание до 5 баллов. В этом задании потребуется обучить детектор фруктов на изображении. Датасет можно скачать [отсюда](https://yadi.sk/d/UPwQB7OZrB48qQ).import pandas as pd import numpy as np import torch from torch import nn from torch.nn import functional as F from torch.utils.data import Dataset, DataLoader # we will need this library to process the labeling ! pip install xmltodict import xmltodict, jsonДатасет мы за вас написали.import pandas as pd import numpy as np import torch from torch import nn from torch.nn import functional as F from torch.utils.data import Dataset, DataLoader import xmltodict import json import glob import cv2 import os import torchvision import matplotlib.pyplot as plt from sklearn.metrics import auc # add any imports you need class2tag = {"apple": 1, "orange": 2, "banana": 3} class FruitDataset(Dataset): def __init__(self, data_dir, transform=None): self.images = [] self.annotations = [] self.transform = transform for annotation in glob.glob(data_dir + "/*xml"): image_fname = os.path.splitext(annotation)[0] + ".jpg" self.images.append(cv2.cvtColor(cv2.imread(image_fname), cv2.COLOR_BGR2RGB)) with open(annotation) as f: annotation_dict = xmltodict.parse(f.read()) bboxes = [] labels = [] objects = annotation_dict["annotation"]["object"] if not isinstance(objects, list): objects = [objects] for obj in objects: bndbox = obj["bndbox"] bbox = [bndbox["xmin"], bndbox["ymin"], bndbox["xmax"], bndbox["ymax"]] bbox = list(map(int, bbox)) bboxes.append(torch.tensor(bbox)) labels.append(class2tag[obj["name"]]) self.annotations.append( {"boxes": torch.stack(bboxes).float(), "labels": torch.tensor(labels)} ) def __getitem__(self, i): if self.transform: # the following code is correct if you use albumentations # if you use torchvision transforms you have to modify it =) res = self.transform( image=self.images[i], bboxes=self.annotations[i]["boxes"], labels=self.annotations[i]["labels"], ) return res["image"], { "boxes": torch.tensor(res["bboxes"]), "labels": torch.tensor(res["labels"]), } else: return self.images[i], self.annotations[i] def __len__(self): return len(self.images)Выпишем кое-какую техническую работу, которая уже была на семинаре.def intersection_over_union(dt_bbox, gt_bbox): """ Intersection over Union between two bboxes :param dt_bbox: list or numpy array of size (4,) [x0, y0, x1, y1] :param gt_bbox: list or numpy array of size (4,) [x0, y0, x1, y1] :return : intersection over union """ ## TODO YOUR CODE intersection_bbox = np.array( [ max(dt_bbox[0], gt_bbox[0]), max(dt_bbox[1], gt_bbox[1]), min(dt_bbox[2], gt_bbox[2]), min(dt_bbox[3], gt_bbox[3]), ] ) intersection_area = max(intersection_bbox[2] - intersection_bbox[0], 0) * max( intersection_bbox[3] - intersection_bbox[1], 0 ) area_dt = (dt_bbox[2] - dt_bbox[0]) * (dt_bbox[3] - dt_bbox[1]) area_gt = (gt_bbox[2] - gt_bbox[0]) * (gt_bbox[3] - gt_bbox[1]) union_area = area_dt + area_gt - intersection_area iou = intersection_area / union_area return iou def evaluate_sample(target_pred, target_true, iou_threshold=0.5): gt_bboxes = target_true["boxes"].numpy() gt_labels = target_true["labels"].numpy() dt_bboxes = target_pred["boxes"].numpy() dt_labels = target_pred["labels"].numpy() dt_scores = target_pred["scores"].numpy() results = [] for detection_id in range(len(dt_labels)): dt_bbox = dt_bboxes[detection_id, :] dt_label = dt_labels[detection_id] dt_score = dt_scores[detection_id] detection_result_dict = {"score": dt_score} max_IoU = 0 max_gt_id = -1 for gt_id in range(len(gt_labels)): gt_bbox = gt_bboxes[gt_id, :] gt_label = gt_labels[gt_id] if gt_label != dt_label: continue if intersection_over_union(dt_bbox, gt_bbox) > max_IoU: max_IoU = intersection_over_union(dt_bbox, gt_bbox) max_gt_id = gt_id if max_gt_id >= 0 and max_IoU >= iou_threshold: detection_result_dict["TP"] = 1 gt_labels = np.delete(gt_labels, max_gt_id, axis=0) gt_bboxes = np.delete(gt_bboxes, max_gt_id, axis=0) else: detection_result_dict["TP"] = 0 results.append(detection_result_dict) return results def evaluate(model, test_loader, device): results = [] model.eval() nbr_boxes = 0 with torch.no_grad(): for batch, (images, targets_true) in enumerate(test_loader): images = list(image.to(device).float() for image in images) targets_pred = model(images) targets_true = [ {k: v.cpu().float() for k, v in t.items()} for t in targets_true ] targets_pred = [ {k: v.cpu().float() for k, v in t.items()} for t in targets_pred ] for i in range(len(targets_true)): target_true = targets_true[i] target_pred = targets_pred[i] nbr_boxes += target_true["labels"].shape[0] results.extend(evaluate_sample(target_pred, target_true)) results = sorted(results, key=lambda k: k["score"], reverse=True) acc_TP = np.zeros(len(results)) acc_FP = np.zeros(len(results)) recall = np.zeros(len(results)) precision = np.zeros(len(results)) if results[0]["TP"] == 1: acc_TP[0] = 1 else: acc_FP[0] = 1 for i in range(1, len(results)): acc_TP[i] = results[i]["TP"] + acc_TP[i - 1] acc_FP[i] = (1 - results[i]["TP"]) + acc_FP[i - 1] precision[i] = acc_TP[i] / (acc_TP[i] + acc_FP[i]) recall[i] = acc_TP[i] / nbr_boxes return auc(recall, precision)Вам мы оставляем творческую часть =)__Задание__. Обучите модель для object detection на __обучающем__ датасете и добейтесь PR-AUC не менее __0.94__ на __тестовом__. - Создайте модель и оптимайзер - Напишите функцию обучения модели - Используйте аугментации Использовать аугментации для обучения __обязательно__. Они дадут 1 балл из 5. Пользуйтесь модулем torchvision.transforms или библиотекой albumentations (о которой говорилось ранее). Последняя библиотека особенно удобна, поскольку умеет сама вычислять новые координаты bounding box'ов после трансформаций картинки. Советуем обратить внимание на следующий [гайд](https://albumentations.ai/docs/getting_started/bounding_boxes_augmentation/). Обратите внимание, что код, написанный в датасете выше, верен только если вы используете albumentations. Если вы выбрали путь torchvision.transforms, вам потребуется метод `__getitem__` изменить (что-то типа `return self.transform(self.images[i])`; однако в таком случае вычислять новые координаты bounding box'ов после трансформаций вам придётся вручную =))Оставшиеся 4 балла вычисляются по простой формуле: __min(4, 4 * (Ваш auc - 0.5) / 0.94)__.def train_one_epoch(model, train_dataloader, optimizer, device): # YOUR CODE # TRAIN YOUR MODEL ON THE train_dataloader pass def train(model, train_dataloader, val_dataloader, optimizer, device, n_epochs=10): for epoch in range(n_epochs): model.eval() a = evaluate(model, val_dataloader, device=device) print("AUC ON TEST: {.4f}".format(a)) model.train() train_one_epoch(model, dataloader, optimizer, device=device) train_transform = # YOUR CODE FOR AUGMENTATIONS val_transform = # YOUR CODE FOR VALIDATION AUGMENTATIONS # HINT: TRAIN TRANSFORM OBVIOUSLY SHOULD BE HARDER THAN THOSE FOR VALIDATION train_dataset = FruitDataset("./train_zip/train", transform=train_transform) val_dataset = FruitDataset("./train_zip/test", transform=val_transform) model = # YOUR CODE, CREATE MODEL FOR OBJECT DETECTION # HINT: USE MATERIALS FROM THE SEMINAR # YOU CAN USE torchvision.models AND torchvision.models.detection # READ OFFICIAL DOCS FOR MORE INFO optimizer = # SELECT YOUR OPTIMIZER train_dataloader = # CREATE YOUR DATALOADER, SELECT APPROPRIATE batch_size val_dataloader = # CREATE VALIDATION DATALOADER n_epochs = # SELECT APPROPRIZTE NUMBER OF EPOCHS device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu") train(model, train_dataloader, val_dataloader, optimizer, device, n_epochs)__Выведите итоговое качество модели__.auc = evaluate(model, val_dataloader, criterion) print("Оценка за это задание составит {} баллов".format(min(4, 4 * (auc - 0.5) / 0.94)))Нарисуйте предсказанные bounding box'ы для любых двух картинок из __тестового__ датасета.image, labels = next(iter(train_dataset)) pred = model(image.unsqueeze(0).to(device))[0] from PIL import ImageDraw image = torchvision.transform.ToPILImage()(image) draw = ImageDraw.Draw(image) for box in labels['boxes']: draw.rectangle([(box[0], box[1]), (box[2], box[3])]) for box in pred['boxes']: draw.rectangle([(box[0], box[1]), (box[2], box[3])], outline='red') imageБонус (0 баллов).__Задание 1__. Скиньте ниже смешную картинку, желательно про машинное обучение. На картинке не должно быть никаких упоминаний лектора, семинаристов и ассистентов этого курса. Если картинка будет смешной, проверяющему(-ей) будет приятно :3__Задание 2__. Расскажите, как вам курс в целом? Что понравилось, что не понравилось, что можно улучшить? Мы примем во внимание любой фидбек.Cartões de Pagamento do Governo FederalCampos do Arquivo:- "CÓDIGO ÓRGÃO SUPERIOR";- "NOME ÓRGÃO SUPERIOR";- "CÓDIGO ÓRGÃO";- "NOME ÓRGÃO";- "CÓDIGO UNIDADE GESTORA";- "NOME UNIDADE GESTORA";- "ANO EXTRATO";- "MÊS EXTRATO";- "CPF PORTADOR";- "NOME PORTADOR";- "CNPJ OU CPF FAVORECIDO";- "NOME FAVORECIDO";- "TRANSAÇÃO";- "DATA TRANSAÇÃO";- "VALOR TRANSAÇÃO"Podemos ter várias modelagens para descrever os relacionamentos no CPGF, em especial as transações. Vamos considerar que teremos as entidades abaixo já definidas:- Órgão Superior- Órgão- Unidade Gestora- Portador- FavorecidoAgora podemos explorar alguns cenários com ênfase nas transações. Caso 1: Transação como vérticeNesse caso, as transações podem ser colocadas como vértices, com atributos identificadores de saque ou compra. Compras sempre ligam portadores com favorecidos e saques são relações apenas entre portador e a transação. A data da transação e o valor também são adicionadas como atributos.Observer que, nessa situação, uma instância de transação terá no máximo duas arestas. Se o grafo pode ter atributos em arestas também, isso poderia ser substituído por uma aresta entre favorecido e portador, mas seria necessário um vértice de saque para identificar que esse foi o tipo de transação.Para esse cenário, as entidades seriam:- Órgão Superior- Órgão- Unidade Gestora- Portador- Favorecido- Transação Caso 2: Transação descrita pelos valores de domínioComo temos dois tipos de transação para CPGF, pode-se criar dois tipos distintos de vértices descrevendo as transações: saque e compra.Dessa forma, um saque se ligaria a um vértice do tipo saque, com informações sobre esse. E as compras seriam parecidas com as transações anteriores, mas da forma portador -> compra -> favorecido. Uma possibilidade adicional nesse cenário é que, se o grafo suporta atributos nas arestas, pode-se colocar apenas uma entidade de saque e as informações do saque específico ficarem nas arestas. Isso permite localizar rapidamente todos os saques realizados além de aumentar a normalização.Para esse cenário, as entidades seriam:- Órgão Superior- Órgão- Unidade Gestora- Portador- Favorecido- Saque- Compra Caso 3: Transações com saque como uma instância e compras como vérticesConsiderando o CPGF, não temos outras entidades que se liguem a transação a não ser favorecido e portador. Dessa forma, o vérice intermediário poderia ser substituído por um vínculo de compra com os dados da compra. Assim, teríamos portador -> comprou -> favorecido com os dados da operação na aresta. A operação de saque precisa de uma entidade para ser representada, e como comentado no caso anterior, os dados da operação podem ser colocados na aresta também. Assim, pode-se ter apenas um vértice de Saque consolidando todas as arestas de eventos de saque. Caso 4: Enfatizando datasNos cenários anteriores as datas estavam sempre inseridas como atributos de algum vértice ou aresta. Entretanto, identificar compras ou saques na mesma data (assim como lugares) pode ser interessante para auditoria ou detecção de fraudes. Em um grafo, podemos enfatizar relacionamentos utilizando os vértices. Criando uma entidade de Data, podemos interligar as transações por meio dessa entidade e identificar quais operações estão conectadas pelas mesmas instâncias.Nesse caso, entretanto, os vértices de compra já não poderiam ser mantidos como arestas (a menos que fosse um hipergrafo), pois o relacionamento possui mais de duas entidades e o vértice de compra permite fazer a ligação entre portador, favorecido e data. As operações de saque poderiam ser colocadas novamente como um vértice de saque que liga com as datas, mas nesse caso deve-se ter uma instância de saque para cada operação para identificar as diferentes datas e operações. Se tivermos a hora da operação, podemos criar o saque como uma aresta entre portador e data, colocando as informações da operação na aresta, isso também pode ser obtido somando os valores de saque do mesmo dia e o número de operação como atributos, mas não saberíamos o valor individual de cada operação. Por fim, para esse cenário vamos considerar que transação será uma entidade, e ligaremos ela a cada favorecido, portador e data. Isso simplifica algumas consultas pois permite agrupar, por datas, todas as transações realizadas.Para esse cenário, as entidades seriam:- Órgão Superior- Órgão- Unidade Gestora- Portador- Favorecido- Transações- DataEsse último cenário será a base para a construção do banco para exploração do CPGF.import pandas as pd import hashlib import networkx as nx #Fonte: Portal da Transparência: http://www.portaldatransparencia.gov.br/ cartoes = pd.read_csv("201801_CPGF_utf8.csv", sep=";") cartoes.head() #Extrair entidades G = nx.Graph() entidades = {"orgao_superior": set(), "orgao": set(), "unidade_gestora": set(), "portador": set(), "favorecido": set(), "data": set()} relacionamentos = {"superior_de-orgao_superior-orgao": set(), "possui_ug-orgao-unidade_gestora": set(), "tem_portador-unidade_gestora-portador": set(), "realizou_tx-portador-tx": set(), "transacao_em-tx-data": set(), "favoreceu-tx-favorecido": set()} transacoes = [] count = -1 for index, row in cartoes.iterrows(): count += 1 id_orgao_sup = hashlib.sha256(str.encode(str(row['CÓDIGO ÓRGÃO SUPERIOR']) + row['NOME ÓRGÃO SUPERIOR'])).hexdigest() orgao_sup = str(id_orgao_sup) + ":" + str(row['CÓDIGO ÓRGÃO SUPERIOR']) + ":" + row['NOME ÓRGÃO SUPERIOR'] entidades["orgao_superior"] = entidades["orgao_superior"] | set([orgao_sup]) id_orgao = hashlib.sha256(str.encode(str(row['CÓDIGO ÓRGÃO']) + row['NOME ÓRGÃO'])).hexdigest() orgao = str(id_orgao) + ":" + str(row['CÓDIGO ÓRGÃO']) + ":" + row['NOME ÓRGÃO'] entidades["orgao"] = entidades["orgao"] | set([orgao]) id_un_gestora = hashlib.sha256(str.encode(str(row['CÓDIGO UNIDADE GESTORA']) + row['NOME UNIDADE GESTORA'])).hexdigest() un_gestora = str(id_un_gestora) + ":" + str(row['CÓDIGO UNIDADE GESTORA']) + ":" + row['NOME UNIDADE GESTORA'] entidades["unidade_gestora"] = entidades["unidade_gestora"] | set([un_gestora]) id_portador = hashlib.sha256(str.encode(str(row['CPF PORTADOR']) + row['NOME PORTADOR'])).hexdigest() portador = str(id_portador) + ":" + str(row['CPF PORTADOR']) + ":" + row['NOME PORTADOR'] entidades["portador"] = entidades["portador"] | set([portador]) data_tx = str(row['DATA TRANSAÇÃO']).replace("/", "") data = data_tx[4:] + data_tx[2:4] + data_tx[0:2] + ":" + data_tx + ":" + str(row['DATA TRANSAÇÃO']) entidades["data"] = entidades["data"] | set([data]) id_transacao = "tx" + str(count) transacao = {"id_transacao": id_transacao, "tipo": row["TRANSAÇÃO"], "valor": row["VALOR TRANSAÇÃO"]} transacoes.append(transacao) relacionamentos["superior_de-orgao_superior-orgao"] = relacionamentos["superior_de-orgao_superior-orgao"] | set([(orgao_sup.split(":")[0], orgao.split(":")[0])]) relacionamentos["possui_ug-orgao-unidade_gestora"] = relacionamentos["possui_ug-orgao-unidade_gestora"] | set([(orgao.split(":")[0], un_gestora.split(":")[0])]) relacionamentos["tem_portador-unidade_gestora-portador"] = relacionamentos["tem_portador-unidade_gestora-portador"] | set([(un_gestora.split(":")[0], portador.split(":")[0])]) relacionamentos["transacao_em-tx-data"] = relacionamentos["transacao_em-tx-data"] | set([(id_transacao, data.split(":")[0])]) relacionamentos["realizou_tx-portador-tx"] = relacionamentos["realizou_tx-portador-tx"] | set([(portador.split(":")[0], id_transacao)]) if row['CNPJ OU CPF FAVORECIDO'] > 0 and "COMPRA" in row["TRANSAÇÃO"]: favorecido = str(row['CNPJ OU CPF FAVORECIDO']) + ":" + str(row['CNPJ OU CPF FAVORECIDO']).zfill(14) + ":" + row['NOME FAVORECIDO'] entidades["favorecido"] = entidades["favorecido"] | set([favorecido]) relacionamentos["favoreceu-tx-favorecido"] = relacionamentos["favoreceu-tx-favorecido"] | set([(id_transacao, favorecido.split(":")[0])]) for key, entidade in entidades.items(): with open("ent_" + key + ".csv", "w") as file: print(key) file.write(key + "Id:ID(" + key.capitalize() + "),id_origem,desc,:LABEL\n") for row in entidade: fields = row.split(":") file.write(fields[0].replace(",","").replace(";","") + "," + fields[1].replace(",","").replace(";","") + "," + fields[2].replace(",","").replace(";","") + "," + key.capitalize() + "\n") G.add_node(fields[0].replace(",","").replace(";",""), id_origem=fields[1].replace(",","").replace(";",""), desc=fields[2].replace(",","").replace(";",""), grupo=key.capitalize()) with open("ent_transacoes.csv", "w") as file: file.write("transacaoId:ID(Tx),tipo,valor,:LABEL\n") for tx in transacoes: file.write(tx["id_transacao"] + "," + tx["tipo"] + "," + str(tx["valor"]).replace(",",".") + "," + "Tx\n") G.add_node(tx["id_transacao"], tipo=tx["tipo"], valor=float(str(tx["valor"]).replace(",",".")), grupo="Tx") for key, rel in relacionamentos.items(): key_fields = key.split("-") with open("rel_" + key_fields[0] + ".csv", "w") as file: file.write(":START_ID(" + key_fields[1].capitalize() + "),:END_ID(" + key_fields[2].capitalize() + ")\n") #file.write(":START_ID(" + key_fields[1].capitalize() + "-ID),:END_ID(" + key_fields[2].capitalize() + "-ID),:TYPE\n") for row in rel: #file.write(row[0] + "," + row[1] + "," + key_fields[0].upper() + "\n") file.write(row[0] + "," + row[1] + "\n") G.add_edge(row[0], row[1]) print(G.order()) print(G.size()) print(G.nodes[""]) #clc = nx.closeness_centrality(G)Table of Contents1  Qtensor simulation times1.1  Get cost estimation1.2  Simulate the circuit1.3  Compare estimation with actual time1.3.1  Explore costs for different edges1.3.2  Determine dependence of estimaiton vs time1.4  Analyze simulator FLOP/s1.4.1  Plot time vs estimated FLOP1.4.2  Estimate FLOP/s1.4.3  Compare with matrix multiplication2  CLI3  Nbdev stuff Compare qtensor with statevector simulations#export import sys import numpy as np import matplotlib.pyplot as plt import qtensor as qt from cartesian_explorer import Explorer #export import matplotlib as mpl from cycler import cycler mpl.rcParams['axes.prop_cycle'] = cycler(color=['#db503d', '#02C6E0']) #export ex = Explorer()Qtensor simulation times Get cost estimationN = 1000 p = 4 edge_idx = 7 gamma, beta = [.1]*p, [.3]*p graph = qt.toolbox.random_graph(nodes=N, degree=3) comp = qt.QtreeQAOAComposer(graph, gamma=gamma, beta=beta) comp.energy_expectation_lightcone(list(graph.edges())[edge_idx]) tn = qt.optimisation.TensorNet.QtreeTensorNet.from_qtree_gates(comp.circuit) opt = qt.optimisation.Optimizer.DefaultOptimizer() peo, _ = opt.optimize(tn) costs, mems = tn.simulation_cost(peo) plt.plot(costs, label='flops') plt.plot(mems, label='memory') plt.yscale('log') plt.legend() plt.grid() plt.title('Simulation cost per step') print(f'Total FLOPS={sum(costs)/1e9} G, Memory={max(mems)/1e9} G')Total FLOPS=0.001157652 G, Memory=0.000589824 GSimulate the circuitbackend = qt.PerfNumpyBackend(print=False) sim = qt.QtreeSimulator(bucket_backend=backend) sim.simulate(comp.circuit)Compare estimation with actual timeprofile_results = backend._profile_results step_times = [x[1] for x in profile_results.values()] ignored_vars = tn.bra_vars+tn.ket_vars plt.plot(step_times[len(ignored_vars):], label='Time') plt.yscale('log') plt.grid() plt.twinx() plt.plot(costs, label='Theoretical FLOPS', color='blue') plt.yscale('log') plt.title('Time in simulation for each step') lastN = 200 axins = plt.gca().inset_axes([0.1, 0.5, 0.47, 0.47]) axins.plot(step_times[-lastN:] ) axins.set_yscale('log') axins.grid() plt.twinx(axins) plt.plot(costs[-lastN:], color='blue') plt.yscale('log') print(f'Total time={sum(step_times)}')Explore costs for different edges#export @ex.provider def graph(n, d, seed): return qt.toolbox.random_graph(nodes=n, degree=d, seed=seed) @ex.provider def circuit(graph, edge_idx, p, composer_type='cone'): gamma, beta = [.1]*p, [.3]*p if composer_type=='cylinder': comp = qt.OldQtreeQAOAComposer(graph, gamma=gamma, beta=beta) if composer_type=='cone': comp = qt.QtreeQAOAComposer(graph, gamma=gamma, beta=beta) comp.energy_expectation_lightcone(list(graph.edges())[edge_idx]) return tuple(comp.circuit) @ex.provider def tn(circuit): return qt.optimisation.TensorNet.QtreeTensorNet.from_qtree_gates(circuit) @ex.provider def peo(tn): opt = qt.optimisation.Optimizer.DefaultOptimizer() peo, _ = opt.optimize(tn) return tuple(peo) @ex.provider def sim_costs(tn, peo): opt = qt.optimisation.Optimizer.DefaultOptimizer() peo, _ = opt.optimize(tn) costs, mems = tn.simulation_cost(peo) return costs, mems @ex.provider def sum_flops(sim_costs): flops, mems = sim_costs return sum(flops) #export @ex.provider def step_flops(sim_costs): flops, mems = sim_costs return tuple(flops) @ex.provider def max_mem(sim_costs): flops, mems = sim_costs return max(mems) #export SEED=107 fig = ex.plot_variables2d(('sum_flops', 'max_mem'), n=[N], p=[3], d=[3,4], edge_idx=range(30), seed=[SEED] ) for ax in fig.axes: ax.set_yscale('log') ax.grid()Determine dependence of estimaiton vs time Determine easy edges from previous graphs.#export EDGE_IDX_FOR_SEED = { 107: [2, 3, 10, 15] } EDGE_IDX_FOR_SEED_JLSE = { 107: [2, 4, 8, 14, 15, 21] } edge_indices = EDGE_IDX_FOR_SEED[SEED] ds = [3, 4] p = 3 #export @ex.provider def sim_profile(circuit, tn): backend = qt.PerfNumpyBackend(print=False) sim = qt.QtreeSimulator(bucket_backend=backend) sim.simulate(circuit) data = backend._profile_results return tuple(tuple([tuple(x[0]), x[1]]) for x in data.values()) @ex.provider def step_sim_time(sim_profile, tn): ignored_vars = tn.bra_vars+tn.ket_vars times = [x[1] for x in sim_profile] return tuple(times[len(ignored_vars):]) f = ex.draw_dependency_graph(figsize=(7,6), node_size=20) estimators = ex.map_variable('step_flops', d=ds, edge_idx=edge_indices, n=[N], p=[p]) times = ex.map_variable('step_sim_time', d=ds, edge_idx=edge_indices, n=[N], p=[p]) est_flat = np.concatenate(estimators.flatten()) times_flat = np.concatenate(times.flatten()) filt = times_flat<1e-1 plt.scatter(est_flat[filt], times_flat[filt]) plt.grid() plt.xlabel('estimated FLOP') plt.ylabel('Runtime')Analyze simulator FLOP/s Plot time vs estimated FLOP#export def plot_with_filter(est_flat, times_flat): filt = (est_flat>1e4) #& (times_flat>1e-4) est_flat_filtered = est_flat[filt] times_flat_filtered = times_flat[filt] # Fit times log_fit_coef = np.polyfit(np.log(est_flat_filtered), np.log(times_flat_filtered), 1) fit_coef = np.polyfit(est_flat_filtered, times_flat_filtered, 1) print('Lin fit:', fit_coef) print('Log fit:', log_fit_coef) fit_fn = np.poly1d(log_fit_coef) # Plot scatter with filtered data plt.scatter(est_flat_filtered, times_flat_filtered) xfit = 10**np.linspace(4, 7, 100) plt.plot(xfit, np.exp(fit_fn(np.log(xfit))), color='blue') plt.loglog() plt.xlabel('estimated FLOP') plt.ylabel('Runtime') return log_fit_coef, fit_coef log_fit_coef, fit_coef = plot_with_filter(est_flat, times_flat)Let's estimate our factual FLOPS on a laptop from worst-case on linear plot Estimate FLOP/sFLOP = 1e6/.03 print(f'Factual FLOPS on a laptop {FLOP:e}')Or use logarithmic fitFLOP_logfit = np.exp(-log_fit_coef[1]) print(f'Factual FLOPS on a laptop, from log fit {FLOP_logfit:e}') N = 500 matmul_flop = N**2*(N-1) x, y = np.random.randn(2, N, N) %timeit np.matmul(x,y)Compare with matrix multiplicationFLOPS_matmul = matmul_flop/4.65e-3 print(f'FLOPS on this laptop for matrix mul: {FLOPS_matmul:e}') print(f'Simulator inefficiency: {FLOPS_matmul/FLOP_logfit}') print(f'Simulator optimality: {FLOP_logfit/FLOPS_matmul}') #export import timeit def get_log_flops_vs_matmul(log_fit_coef): FLOPS_logfit = np.exp(-log_fit_coef[1]) N = 300 matmul_flop = N**2*(N-1) x, y = np.random.randn(2, N, N) number = 100 matmul_time = timeit.timeit(lambda: np.matmul(x,y) , number=number)/number FLOPS_matmul = matmul_flop/matmul_time return FLOPS_logfit, FLOPS_matmulCLIUse click to run some scripts from this notebook# export from qtensor_specs import cli, click @cli.command() @click.argument('filename') def time_vs_flops_plot(filename): """ Plots times and estimated FLOP for each step of several QAOA energy computation contractions. Currently using - random regular graphs with degree=3,4 - p = 3 - N = 1000 """ edge_indices = EDGE_IDX_FOR_SEED[SEED] ds = [3, 4] p = 3 N = 1000 estimators = ex.map_variable('step_flops', d=ds, edge_idx=edge_indices, n=[N], p=[p], seed=[SEED]) maxmems = ex.map_variable('max_mem', d=ds, edge_idx=edge_indices, n=[N], p=[p], seed=[SEED]) if np.max(maxmems)>1e10: print('memory estimations:', maxmems) raise Exception('Will get too large tetsors!!') times = ex.map_variable('step_sim_time', d=ds, edge_idx=edge_indices, n=[N], p=[p], seed=[SEED]) est_flat = np.concatenate(estimators.flatten()) times_flat = np.concatenate(times.flatten()) log_fit_coef, fit_coef = plot_with_filter(est_flat, times_flat) plt.savefig(filename) fit, matmul = get_log_flops_vs_matmul(log_fit_coef) print('===Results===') print(f'Simulator fitted flops: {fit/1e9:.5} G') print(f'Matmul flops: {matmul/1e9:.5} G') print(f'Simulator optimality: {fit/matmul}')Nbdev stuff#hide #default_exp time_vs_flop from nbdev.export import * notebook2script(fname='Time_vs_FLOP.ipynb')Converted Time_vs_FLOP.ipynb.Cluster AnalysisIs a subset of unsupervised learning methods Introduction to Unsupervised Learning* Unsupervised Learning is a type of Machine learning to draw inferences from unlabelled datasets.* Model tries to find relationship between data.* Most common unsupervised learning method is clustering which is used for exploratory data analysis to find hidden patterns or grouping in data Clustering* A learning technique to group a set of objects in such a way that objects of same group are more similar to each other than from objects of other group.* Applications of clustering are as follows - Automatically organizing the data - Labeling data - Understanding hidden structure of data - News Cloustering for grouping similar news together - Customer Segmentation - Suggest social groupsimport numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline from sklearn.datasets import make_blobs X,y = make_blobs(n_features=2, n_samples=1000, centers=3, cluster_std=1, random_state=3) plt.scatter(X[:,0], X[:,1], s=5, alpha=.5)Distance or Similarity Function* Data belonging to same cluster are similar & data belonging to different cluster are different. * We need mechanisms to measure similarity & differences between data. * This can be achieved using any of the below techniques.from sklearn.metrics.pairwise import euclidean_distances,cosine_distances,manhattan_distances X = [[0, 1], [1, 1]] # X = np.random.normal(0,1,(100,2)) euclidean_distances(X, X) euclidean_distances(X, [[0,0]]) cosine_distances(X,X) manhattan_distances(X,X)4. Clustering as an Optimization Problem* Maximize inter-cluster distances* Minimize intra-cluster distances Partitioning Method KMeans* Minimizing creteria : within-cluster-sum-of-squares. KMeans Algorithm1. Initialize k centroids.2. Assign each data to the nearest centroid, these step will create clusters.3. Recalculate centroid - which is mean of all data belonging to same cluster.4. Repeat steps 2 & 3, till there is no data to reassign a different centroid.![](https://upload.wikimedia.org/wikipedia/commons/thumb/e/ea/K-means_convergence.gif/617px-K-means_convergence.gif)from sklearn.datasets import make_blobs, make_moons X,y = make_blobs(n_features=2, n_samples=1000, cluster_std=.5) plt.scatter(X[:,0], X[:,1],s=10) from sklearn.cluster import KMeans, MeanShift kmeans = KMeans(n_clusters=3) kmeans.fit(X) plt.scatter(X[:,0], X[:,1],s=10, c=kmeans.predict(X)) X, y = make_moons(n_samples=1000, noise=.09) plt.scatter(X[:,0], X[:,1],s=10) kmeans = KMeans(n_clusters=2) kmeans.fit(X) plt.scatter(X[:,0], X[:,1],s=10, c=kmeans.predict(X)) kmeans = KMeans(n_clusters=4) centers = [[1, 1], [-.75, -1], [1, -1], [-3, 2]] X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6) plt.scatter(X[:,0], X[:,1],s=10) kmeans = KMeans(n_clusters=4)Mean shiftms = MeanShift() kmeans.fit(X) ms.fit(X) plt.scatter(X[:,0], X[:,1],s=10, c=ms.predict(X)) plt.scatter(X[:,0], X[:,1],s=10, c=kmeans.predict(X))More complex dataX, y = make_moons(n_samples=1000, noise=.05) plt.scatter(X[:,0], X[:,1],s=10)Agglomerative Clusteringfrom sklearn.cluster import AgglomerativeClustering agc = AgglomerativeClustering(linkage='single') agc.fit(X) plt.scatter(X[:,0], X[:,1],s=10,c=agc.labels_)DBSCANcenters = [[1, 1], [-1, -1], [1, -1]] X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4, random_state=0) plt.scatter(X[:,0], X[:,1],s=10) from sklearn.cluster import DBSCAN from sklearn.preprocessing import StandardScaler X = StandardScaler().fit_transform(X) db = DBSCAN(eps=0.3, min_samples=10).fit(X) core_samples_mask = np.zeros_like(db.labels_, dtype=bool) core_samples_mask[db.core_sample_indices_] = True labels = db.labels_ plt.scatter(X[:,0], X[:,1],s=10,c=labels)E2E ML on GCP: MLOps stage 5 : deployment: get started with configuring autoscaling for deployment View on GitHub Run in Colab Open in Vertex AI Workbench OverviewThis tutorial demonstrates how to use Vertex AI for E2E MLOps on Google Cloud in production. This tutorial covers stage 5 : deployment: get started with autoscaling for deployment. DatasetThis tutorial uses a pre-trained image classification model from TensorFlow Hub, which is trained on ImageNet dataset.Learn more about [ResNet V2 pretained model](https://tfhub.dev/google/imagenet/resnet_v2_101/classification/5). ObjectiveIn this tutorial, you learn how to use fine-tune control auto-scaling configuration when deploying a `Model` resource to an `Endpoint` resource.This tutorial uses the following Google Cloud ML services:- `Vertex ML Prediction`The steps performed include:- Download a pretrained image classification model from TensorFlow Hub.- Upload the pretrained model as a `Model` resource.- Create an `Endpoint` resource.- Deploy `Model` resource for no-scaling (single node).- Deploy `Model` resource for manual scaling.- Deploy `Model` resource for auto-scaling.- Fine-tune scaling thresholds for CPU utilization.- Fine-tune scaling thresholds for GPU utilization.- Deploy mix of CPU and GPU model instances with auto-scaling to an `Endpoint` resource. CostsThis tutorial uses billable components of Google Cloud:- Vertex AI- Cloud StorageLearn about [Vertex AI pricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage pricing](https://cloud.google.com/storage/pricing) and use the [Pricing Calculator](https://cloud.google.com/products/calculator/) to generate a cost estimate based on your projected usage. InstallationsInstall the packages required for executing this notebook.import os # The Vertex AI Workbench Notebook product has specific requirements IS_WORKBENCH_NOTEBOOK = os.getenv("DL_ANACONDA_HOME") IS_USER_MANAGED_WORKBENCH_NOTEBOOK = os.path.exists( "/opt/deeplearning/metadata/env_version" ) # Vertex AI Notebook requires dependencies to be installed with '--user' USER_FLAG = "" if IS_WORKBENCH_NOTEBOOK: USER_FLAG = "--user" # Install the packages ! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG -q ! pip3 install --upgrade google-cloud-storage $USER_FLAG -q ! pip3 install tensorflow-hub $USER_FLAG -qRestart the kernelOnce you've installed the additional packages, you need to restart the notebook kernel so it can find the packages.import os if not os.getenv("IS_TESTING"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True)Set up your Google Cloud project**The following steps are required, regardless of your notebook environment.**1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).1. [Enable the Vertex AI, Compute Engine and Cloud Storage APIs](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com,compute_component,storage_component).1. If you are running this notebook locally, you need to install the [Cloud SDK](https://cloud.google.com/sdk).1. Enter your project ID in the cell below. Then run the cell to make sure theCloud SDK uses the right project for all the commands in this notebook.**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands. Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.PROJECT_ID = "[your-project-id]" # @param {type:"string"} if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]": # Get your GCP project id from gcloud shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] print("Project ID:", PROJECT_ID) ! gcloud config set project $PROJECT_IDRegionYou can also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.- Americas: `us-central1`- Europe: `europe-west4`- Asia Pacific: `asia-east1`You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.Learn more about [Vertex AI regions](https://cloud.google.com/vertex-ai/docs/general/locations).REGION = "[your-region]" # @param {type: "string"} if REGION == "[your-region]": REGION = "us-central1"TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")Authenticate your Google Cloud account**If you are using Vertex AI Workbench Notebooks**, your environment is already authenticated. Skip this step.**If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.**Otherwise**, follow these steps:In the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page.1. **Click Create service account**.2. In the **Service account name** field, enter a name, and click **Create**.3. In the **Grant this service account access to project** section, click the Role drop-down list. Type "Vertex AI" into the filter box, and select **Vertex AI Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.4. Click Create. A JSON file that contains your key downloads to your local environment.5. Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.# If you are running this notebook in Colab, run this cell and follow the # instructions to authenticate your GCP account. This provides access to your # Cloud Storage bucket and lets you submit training jobs and prediction # requests. import os import sys # If on Vertex AI Workbench, then don't execute this code IS_COLAB = "google.colab" in sys.modules if not os.path.exists("/opt/deeplearning/metadata/env_version") and not os.getenv( "DL_ANACONDA_HOME" ): if "google.colab" in sys.modules: from google.colab import auth as google_auth google_auth.authenticate_user() # If you are running this notebook locally, replace the string below with the # path to your service account key and run this cell to authenticate your GCP # account. elif not os.getenv("IS_TESTING"): %env GOOGLE_APPLICATION_CREDENTIALS ''Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**When you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.BUCKET_NAME = "[your-bucket-name]" # @param {type:"string"} BUCKET_URI = f"gs://{BUCKET_NAME}" if BUCKET_URI == "" or BUCKET_URI is None or BUCKET_URI == "gs://[your-bucket-name]": BUCKET_NAME = PROJECT_ID + "aip-" + TIMESTAMP BUCKET_URI = "gs://" + BUCKET_NAME**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.! gsutil mb -l $REGION $BUCKET_URIFinally, validate access to your Cloud Storage bucket by examining its contents:! gsutil ls -al $BUCKET_URISet up variablesNext, set up some variables used throughout the tutorial. Import libraries and define constantsimport google.cloud.aiplatform as aiplatform import tensorflow as tf import tensorflow_hub as hubInitialize Vertex AI SDK for PythonInitialize the Vertex AI SDK for Python for your project and corresponding bucket.aiplatform.init(project=PROJECT_ID, staging_bucket=BUCKET_URI)Set hardware acceleratorsYou can set hardware accelerators for training and prediction.Set the variables `DEPLOY_GPU/DEPLOY_NGPU` to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify: (aip.AcceleratorType.NVIDIA_TESLA_K80, 4)Otherwise specify `(None, None)` to use a container image to run on a CPU.Learn more about [hardware accelerator support for your region](https://cloud.google.com/vertex-ai/docs/general/locationsaccelerators).Learn more about [GPU compatibility by Machine Type](https://cloud.google.com/vertex-ai/docs/training/configure-computegpu-compatibility-table).if os.getenv("IS_TESTING_DEPLOY_GPU"): DEPLOY_GPU, DEPLOY_NGPU = ( aiplatform.gapic.AcceleratorType.NVIDIA_TESLA_K80, int(os.getenv("IS_TESTING_DEPLOY_GPU")), ) else: DEPLOY_GPU, DEPLOY_NGPU = (aiplatform.gapic.AcceleratorType.NVIDIA_TESLA_K80, 1)Set pre-built containersSet the pre-built Docker container image for prediction.For the latest list, see [Pre-built containers for prediction](https://cloud.google.com/ai-platform-unified/docs/predictions/pre-built-containers).if os.getenv("IS_TESTING_TF"): TF = os.getenv("IS_TESTING_TF") else: TF = "2.5".replace(".", "-") GPU_VERSION = "tf2-gpu.{}".format(TF) CPU_VERSION = "tf2-cpu.{}".format(TF) DEPLOY_IMAGE_GPU = "{}-docker.pkg.dev/vertex-ai/prediction/{}:latest".format( REGION.split("-")[0], GPU_VERSION ) DEPLOY_IMAGE_CPU = "{}-docker.pkg.dev/vertex-ai/prediction/{}:latest".format( REGION.split("-")[0], CPU_VERSION ) print("Deployment:", DEPLOY_IMAGE_GPU, DEPLOY_IMAGE_CPU, DEPLOY_GPU, DEPLOY_NGPU)Set machine typeNext, set the machine type to use for prediction.- Set the variable `DEPLOY_COMPUTE` to configure the compute resources for the VMs you will use for for prediction. - `machine type` - `n1-standard`: 3.75GB of memory per vCPU. - `n1-highmem`: 6.5GB of memory per vCPU - `n1-highcpu`: 0.9 GB of memory per vCPU - `vCPUs`: number of \[2, 4, 8, 16, 32, 64, 96 \]*Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs*.if os.getenv("IS_TESTING_DEPLOY_MACHINE"): MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE") else: MACHINE_TYPE = "n1-standard" VCPU = "4" DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU print("Train machine type", DEPLOY_COMPUTE)Get pretrained model from TensorFlow HubFor demonstration purposes, this tutorial uses a pretrained model from TensorFlow Hub (TFHub), which is then uploaded to a `Vertex AI Model` resource. Once you have a `Vertex AI Model` resource, the model can be deployed to a `Vertex AI Endpoint` resource. Download the pretrained modelFirst, you download the pretrained model from TensorFlow Hub. The model gets downloaded as a TF.Keras layer. To finalize the model, in this example, you create a `Sequential()` model with the downloaded TFHub model as a layer, and specify the input shape to the model.tfhub_model = tf.keras.Sequential( [hub.KerasLayer("https://tfhub.dev/google/imagenet/resnet_v2_101/classification/5")] ) tfhub_model.build([None, 224, 224, 3]) tfhub_model.summary()Save the model artifactsAt this point, the model is in memory. Next, you save the model artifacts to a Cloud Storage location.MODEL_DIR = BUCKET_URI + "/model" tfhub_model.save(MODEL_DIR)Upload the TensorFlow Hub model to a `Vertex AI Model` resourceFinally, you upload the model artifacts from the TFHub model and serving function into a `Vertex AI Model` resource.*Note:* When you upload the model artifacts to a `Vertex AI Model` resource, you specify the corresponding deployment container image. In this example, you are using a CPU only deployment container.model = aiplatform.Model.upload( display_name="example_" + TIMESTAMP, artifact_uri=MODEL_DIR, serving_container_image_uri=DEPLOY_IMAGE_CPU, ) print(model)Creating an `Endpoint` resourceYou create an `Endpoint` resource using the `Endpoint.create()` method. At a minimum, you specify the display name for the endpoint. Optionally, you can specify the project and location (region); otherwise the settings are inherited by the values you set when you initialized the Vertex AI SDK with the `init()` method.In this example, the following parameters are specified:- `display_name`: A human readable name for the `Endpoint` resource.- `project`: Your project ID.- `location`: Your region.- `labels`: (optional) User defined metadata for the `Endpoint` in the form of key/value pairs.This method returns an `Endpoint` object.Learn more about [Vertex AI Endpoints](https://cloud.google.com/vertex-ai/docs/predictions/deploy-model-api).endpoint = aiplatform.Endpoint.create( display_name="example_" + TIMESTAMP, project=PROJECT_ID, location=REGION, labels={"your_key": "your_value"}, ) print(endpoint)Deploying `Model` resources to an `Endpoint` resource.You can deploy one of more `Vertex AI Model` resource instances to the same endpoint. Each `Vertex AI Model` resource that is deployed will have its own deployment container for the serving binary. *Note:* For this example, you specified the deployment container for the TFHub model in the previous step of uploading the model artifacts to a `Vertex AI Model` resource. ScalingA `Vertex AI Endpoint` resource supports three types of scaling:- No Scaling: The serving binary is deployed to a single VM instance.- Manual Scaling: The serving binary is deployed to a fixed number of multiple VM instances.- Auto Scaling: The number of VM instances that the serving binary is deployed to varies depending on load. No ScalingIn the next example, you deploy the `Vertex AI Model` resource to a `Vertex AI Endpoint` resource, without any scaling -- i.e., single VM (node) instance. In otherwords, when the model is deployed, a single VM instance is provisioned and stays provisioned until the model is undeployed.In this example, you deploy the model with the minimal amount of specified parameters, as follows:- `model`: The `Model` resource.- `model`: The `Model` resource to deploy.- `machine_type`: The machine type for each VM instance.- `deployed_model_displayed_name`: The human readable name for the deployed model instance.For no-scaling, the single VM instance is provisioned during the deployment of the model. Do to the requirements to provision the resource, this may take upto a few minutes.response = endpoint.deploy( model=model, deployed_model_display_name="example_" + TIMESTAMP, machine_type=DEPLOY_COMPUTE, )Display scaling configurationOnce your model is deployed, you can query the `Endpoint` resource to retrieve the scaling configuration for your deployed model with the property `endpoint.gca_resource.deployed_models`.Since an `Endpoint` resource may have multiple deployed models, the `deployed_models` property returns a list, with one entry per deployed model. In this example, there is a single deployed model and you retrieve the scaling configuration as the first entry in the list: `deployed_models[0]`. You then display the property `dedicated_resources`, which will return the machine type and min/max number of nodes to scale. For no-scaling, the min/max nodes will be set to one.*Note:* The deployed model identifier refers to the deployed instance of the model and not the model resource identifier.print(endpoint.gca_resource.deployed_models[0].dedicated_resources) deployed_model_id = endpoint.gca_resource.deployed_models[0].idUndeploy the modelWhen you are done doing predictions, you undeploy the model from the `Endpoint` resouce. This deprovisions all compute resources and ends billing for the deployed model.endpoint.undeploy(deployed_model_id)Manual scalingIn the next example, you deploy the `Vertex AI Model` resource to a `Vertex AI Endpoint` resource for manual scaling -- a fixed number (greater than 1) VM instances. In otherwords, when the model is deployed, the fixed number of VM instances are provisioned and stays provisioned until the model is undeployed.In this example, you deploy the model with the minimal amount of specified parameters, as follows:- `model`: The `Model` resource.- `model`: The `Model` resource to deploy.- `machine_type`: The machine type for each VM instance.- `deployed_model_displayed_name`: The human readable name for the deployed model instance.- `min_replica_count`: The minimum number of VM instances (nodes) to provision.- `max_replica_count`: The maximum number of VM instances (nodes) to provision.For manual-scaling, the fixed number of VM instances are provisioned during the deployment of the model. *Note:* For manual scaling, the minimum and maximum number of nodes are set to the same value.MIN_NODES = MAX_NODES = 2 response = endpoint.deploy( model=model, deployed_model_display_name="example_" + TIMESTAMP, machine_type=DEPLOY_COMPUTE, min_replica_count=MIN_NODES, max_replica_count=MAX_NODES )Display scaling configurationIn this example, there is a single deployed model and you retrieve the scaling configuration as the first entry in the list: `deployed_models[0]`. You then display the property `dedicated_resources`, which will return the machine type and min/max number of nodes to scale. For manual scaling, the min/max nodes will be set to the same value, greater than one.print(endpoint.gca_resource.deployed_models[0].dedicated_resources) deployed_model_id = endpoint.gca_resource.deployed_models[0].idUndeploy the modelWhen you are done doing predictions, you undeploy the model from the `Endpoint` resouce. This deprovisions all compute resources and ends billing for the deployed model.endpoint.undeploy(deployed_model_id)Auto scalingIn the next example, you deploy the `Vertex AI Model` resource to a `Vertex AI Endpoint` resource for auto scaling -- a variable number (greater than 1) VM instances. In otherwords, when the model is deployed, the minimum number of VM instances are provisioned. As the load varies, the number of provisioned instances may dynamically increase upto the maximum number of VM instances, and deprovision to the minimum number of VM instances. The number of provisioned VM instances will never be less than the minimum or more than the maximum.In this example, you deploy the model with the minimal amount of specified parameters, as follows:- `model`: The `Model` resource.- `model`: The `Model` resource to deploy.- `machine_type`: The machine type for each VM instance.- `deployed_model_displayed_name`: The human readable name for the deployed model instance.- `min_replica_count`: The minimum number of VM instances (nodes) to provision.- `max_replica_count`: The maximum number of VM instances (nodes) to provision.For auto-scaling, the minimum number of VM instances are provisioned during the deployment of the model. *Note:* For auto scaling, the minimum number of nodes must be set to a value greater than zero. In otherwords, there will always be at least one VM instance provisioned.MIN_NODES = 1 MAX_NODES = 2 response = endpoint.deploy( model=model, deployed_model_display_name="example_" + TIMESTAMP, machine_type=DEPLOY_COMPUTE, min_replica_count=MIN_NODES, max_replica_count=MAX_NODES )Display scaling configurationIn this example, there is a single deployed model and you retrieve the scaling configuration as the first entry in the list: `deployed_models[0]`. You then display the property `dedicated_resources`, which will return the machine type and min/max number of nodes to scale. For auto scaling, the max nodes will be set to a value greater than the min.print(endpoint.gca_resource.deployed_models[0].dedicated_resources) deployed_model_id = endpoint.gca_resource.deployed_models[0].idUndeploy the modelWhen you are done doing predictions, you undeploy the model from the `Endpoint` resouce. This deprovisions all compute resources and ends billing for the deployed model.endpoint.undeploy(deployed_model_id)Setting scaling thresholdsAn `Endpoint` resource supports auto-scaling based on two metrics: CPU utilization and GPU duty cycle. Both metrics are measured by taking the average utilization of each deployed model. Once the utilization metric exceeds a threshold by a certain amount of time, the number of VM instances (nodes) adjusts up or down accordingly. CPU thresholdsIn the previous examples, the VM instances deployed where with CPUs only -- i.e., no hardware accelerators. By default (in auto-scaling), the CPU utilization metric is set to 60%. When deploying the model, specify the parameter `autoscaling_target_cpu_utilization` to set a non-default value.MIN_NODES = 1 MAX_NODES = 4 response = endpoint.deploy( model=model, deployed_model_display_name="example_" + TIMESTAMP, machine_type=DEPLOY_COMPUTE, min_replica_count=MIN_NODES, max_replica_count=MAX_NODES, autoscaling_target_cpu_utilization=50 )Display scaling configurationIn this example, there is a single deployed model and you retrieve the scaling configuration as the first entry in the list: `deployed_models[0]`. You then display the property `dedicated_resources`, which will return the machine type and min/max number of nodes to scale, and the target value for the CPU utilization: `autoscaling_metric_specs`.print(endpoint.gca_resource.deployed_models[0].dedicated_resources) deployed_model_id = endpoint.gca_resource.deployed_models[0].idUndeploy the modelWhen you are done doing predictions, you undeploy the model from the `Endpoint` resouce. This deprovisions all compute resources and ends billing for the deployed model.endpoint.undeploy(deployed_model_id)Upload TensorFlow Hub model for GPU deployment imageNext, you upload a second instance of your TensorFlow Hub model as a `Model` resourc -- but where the corresponding serving container supports GPUs.model_gpu = aiplatform.Model.upload( display_name="example_" + TIMESTAMP, artifact_uri=MODEL_DIR, serving_container_image_uri=DEPLOY_IMAGE_GPU, ) print(model)GPU thresholdsIn this example, the deployment VM instances are configured to use hardware accelerators -- i.e., GPUs, by specifying the following parameters:- `accelerator_type`: The type of hardware (e.g., GPU) accelerator.- `accelerator_count`: The number of harware accelerators per previsioned VM instance.The type and number of GPUs supported is specific to machine type and region.Learn more about [GPU types and number per machine type](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute).Learn more about [GPU types available per region](https://cloud.google.com/vertex-ai/docs/general/locationsaccelerators).By default (in auto-scaling), the GPU utilization metric is set to 60%. When deploying the model, specify the parameter `autoscaling_target_accelerator_duty_cycle ` to set a non-default value.When serving, if either the CPU utilization or GPU duty cycle exceed or fall below the threshold for a certain amount of time, then auto-scaling is triggered.MIN_NODES = 1 MAX_NODES = 2 response = endpoint.deploy( model=model_gpu, deployed_model_display_name="example_" + TIMESTAMP, machine_type=DEPLOY_COMPUTE, accelerator_type=DEPLOY_GPU.name, accelerator_count=DEPLOY_NGPU, min_replica_count=MIN_NODES, max_replica_count=MAX_NODES, autoscaling_target_accelerator_duty_cycle=50 )Display scaling configurationIn this example, there is a single deployed model and you retrieve the scaling configuration as the first entry in the list: `deployed_models[0]`. You then display the property `dedicated_resources`, which will return the machine type and min/max number of nodes to scale, and the target value for the GPU duty cycle: `autoscaling_metric_specs`.print(endpoint.gca_resource.deployed_models[0].dedicated_resources) deployed_model_id = endpoint.gca_resource.deployed_models[0].idDeploy multiple models to `Endpoint` resourceNext, you deploy two models to the same `Endpoint` resource and split the predictio request traffic between them. One model will use GPUs, with 80% of the traffic and the other the CPU with 20% of the traffic.You already have the GPU version of the model deployed to the `Endpoint` resource. In this example, you add a second model instance -- the CPU version -- to the same `Endpoint` resource, and specify the traffic split between the models. In this example, the `traffic_split` parameter is specified as follows:- `"0": 20`: The model being deployed (default ID is 0) will receive 20% of the traffic.- `deployed_model_id: 80`: The existing deployed model (specified by its deployed model ID) will receive 80% of the traffic.response = endpoint.deploy( model=model, deployed_model_display_name="example_" + TIMESTAMP, machine_type=DEPLOY_COMPUTE, min_replica_count=MIN_NODES, max_replica_count=MAX_NODES, autoscaling_target_cpu_utilization=50, traffic_split={"0": 20, deployed_model_id: 80 } )Display scaling configurationIn this example, there are two deployed models, the CPU and GPU versions.print(endpoint.gca_resource.deployed_models)Undeploy the modelsWhen you are done doing predictions, you undeploy all the models from the `Endpoint` resouce. This deprovisions all compute resources and ends billing for the deployed model.endpoint.undeploy_all()Delete the model instancesThe method 'delete()' will delete the model.model.delete() model_gpu.delete()Delete the endpointThe method 'delete()' will delete the endpoint.endpoint.delete()Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloudproject](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial.# Set this to true only if you'd like to delete your bucket delete_bucket = True if delete_bucket or os.getenv("IS_TESTING"): ! gsutil rm -r $BUCKET_URIGroup Analysis - FOOOFed EEG Analysis: TaskExamine the results of parameterizing spectra for task EEG data, and predicting behavioural data. Throughout these analyses, we compare the `YNG` and `OLD` subject groups.This notebook does the analyses and creates the plots for Figure 6.%matplotlib inline %config InlineBackend.figure_format='retina' import warnings from copy import deepcopy from os.path import join as pjoin import numpy as np import pandas as pd import matplotlib.pyplot as plt from scipy.stats import spearmanr, ttest_1samp, normaltest from statsmodels.stats.anova import anova_lm from statsmodels.stats.diagnostic import compare_cox, compare_j from fooof.plts import plot_spectra from fooof.sim.gen import gen_aperiodic, gen_power_spectrum # Import custom code for this analysis import sys sys.path.append('../code') from plts import * from utils import * from data_mgmt import * from analysis import *Settings# Import general settings from settings import YNG_INDS, OLD_INDS from settings import RESULTS_PATH # Set folder for FOOOF results folder = 'FOOOF' # Model settings - which group to run group = 'ALL' # 'ALL', 'YNG', 'OLD' print('Number of YNG subjects: ', len(YNG_INDS)) print('Number of OLD subjects: ', len(OLD_INDS)) # Data settings srate = 512 tmin, tmax = -0.85, 1.1 times = np.arange(tmin, tmax, 1/srate) seg_times = [(-0.85, -0.35), (0.1, 0.6), (0.5, 1.0)] n_subjs = 31 dict_structure = {'YNG' : {}, 'OLD' : {}, 'ALL' : {}} load_side = 'Contra' # Set the time segments indices to compute the difference measures with i1, i2 = 2, 0 # Wether to save out plots or not SAVE_FIG = FalseCheck dropped trials# Load dropped trials & components dropped_trials = np.load(pjoin(RESULTS_PATH, 'Group', 'dropped_trials.npy')) dropped_components = np.load(pjoin(RESULTS_PATH, 'Group', 'dropped_components.npy')) # Check dropped trials for each subject print('SubNum \t\t # Dropped Trials \t # Dropped Components') for ind, trials, components in zip(range(n_subjs), dropped_trials, dropped_components): temp_trials = trials[trials < 999.] temp_comps = components[components < 999.] #print(ind, '\t\t', len(temp_trials), '\t\t\t', len(temp_comps))SubNum # Dropped Trials # Dropped ComponentsGroup FOOOFing - Trial Averaged DataData objects are 3d arrays, with the shape `[n_loads, n_subjs, n_times]`. Load Data# Load behavioural data behav_data = pd.read_csv(pjoin(RESULTS_PATH, 'Behav', 'neural_aging_data_behaviour.csv')) # Convert data types behav_data['Age'] = behav_data['Age'].astype('str') behav_data['Load'] = behav_data['Load'].astype('str') # Calculate average behaviour across loads avg_behav = behav_data.groupby('SubjID').mean() # Load and extract FOOOF data all_offsets, all_exps = load_fooof_task_ap(RESULTS_PATH, load_side, folder) all_alphas_cf = load_fooof_task_pe(RESULTS_PATH, load_side, 0, folder) all_alphas_pw = load_fooof_task_pe(RESULTS_PATH, load_side, 1, folder) all_alphas_bw = load_fooof_task_pe(RESULTS_PATH, load_side, 2, folder) all_r2s, all_errs = load_fooof_task_md(RESULTS_PATH, load_side, folder) # Load canonical alpha analysis canalph_group = np.load(pjoin(RESULTS_PATH, 'Group', 'canonical_group.npy')) # Load individualized frequency canonical alpha analysis canalph_icf_group = np.load(pjoin(RESULTS_PATH, 'Group', 'canonical_icf_group.npy')) # Average across analytic alpha measures to get canonical alpha measure seg_masks = [] for seg in seg_times: seg_masks.append(np.logical_and(times >= seg[0], times <= seg[1])) canalpha = np.zeros_like(all_alphas_pw) canal_icf = np.zeros_like(all_alphas_pw) for subi, (canalph_data, canalph_icf_data) in enumerate(zip(canalph_group, canalph_icf_group)): for lodi in range(3): for segi, mask in enumerate(seg_masks): canalpha[lodi, subi, segi] = np.mean(canalph_data[lodi, mask]) canal_icf[lodi, subi, segi] = np.mean(canalph_icf_data[lodi, mask])Data Checks# Check number of missing FOOOFed alphas print('Number of missing FOOOFed alphas: \t', np.sum(np.isnan(all_alphas_pw))) # Check if there are any NaN values nans = np.isnan(all_alphas_pw) print('Total number of NaN values:\t\t', np.sum(nans))Total number of NaN values: 0Data Management# Make a data dictionary - each with shape [n_conds, n_times] data_dict = deepcopy(dict_structure) diff_data_dict = deepcopy(dict_structure) behav_dict = deepcopy(dict_structure) # Collect the data and labels into lists for checking through labels = ['offset', 'exponent', 'alpha_cf', 'alpha_pw', 'alpha_bw', 'canalpha', 'canal_icf'] datas = [all_offsets, all_exps, all_alphas_cf, all_alphas_pw, all_alphas_bw, canalpha, canal_icf] # Set up data & diff_data dicts for label, data in zip(labels, datas): data_dict['YNG'][label], data_dict['OLD'][label] = reshape_data(data) data_dict['ALL'][label] = np.concatenate([data_dict['YNG'][label], data_dict['OLD'][label]]) diff_data_dict['YNG'][label] = calc_diff(data_dict['YNG'][label], i1, i2) diff_data_dict['OLD'][label] = calc_diff(data_dict['OLD'][label], i1, i2) diff_data_dict['ALL'][label] = np.concatenate([diff_data_dict['YNG'][label], diff_data_dict['OLD'][label]]) # Set up the behavioural data dict for label in ["d'", "Load", 'CDA']: behav_dict['ALL'][label] = behav_data[label].values behav_dict['YNG'][label] = behav_data[behav_data['Age'] == '1'][label].values behav_dict['OLD'][label] = behav_data[behav_data['Age'] == '2'][label].valuesData CheckingCheck the FOOOF fit measures, checking whether there is a systematic difference between the groups that could explain the differences.# Check if the fit metrics vary by group for label, data in zip(['Error', 'R^2'], [all_errs, all_r2s]): print(label) m1, m2 = np.nanmean(data[:, YNG_INDS, :]), np.nanmean(data[:, OLD_INDS, :]) print('\tMeans: \t\t {:1.4f} \t {:1.4f} \t diff: {:1.4f}'.format(m1, m2, np.abs(m1-m2))) print_stat('\tt-test', *ttest_ind(data[:, YNG_INDS, :].flatten(), data[:, OLD_INDS, :].flatten())) # Check the correlation between FOOOF fit error & R^2 print_stat('Err-R^2 Corr', *spearmanr(all_errs.flatten(), all_r2s.flatten())) # Check if the fit metrics correlate with measured FOOOF parameters print('\t\t Error \t R^2') for label, data in zip(labels, datas): print_stat(label, spearmanr(all_errs.flatten(), data.flatten())[0], spearmanr(all_r2s.flatten(), data.flatten())[0])Error R^2 offset: 0.0828 0.6035 exponent: -0.1176 0.6869 alpha_cf: 0.1621 -0.0953 alpha_pw: 0.7591 0.1712 alpha_bw: 0.2681 0.2580 canalpha: 0.2465 -0.0506 canal_icf: 0.2475 -0.0955Descriptive Data Checks# Print out mean values, per group print('Average Values per measure, per group: \n') print('\t\t YNG \t\t OLD') for label in labels: print_stat(label, np.nanmean(diff_data_dict['YNG'][label]), np.nanmean(diff_data_dict['OLD'][label])) # Print out standard deviations, per group print('Standard deviation per measure, per group: \n') print('\t\t YNG \t\t OLD') for label in labels: print_stat(label, np.nanstd(diff_data_dict['YNG'][label]), np.nanstd(diff_data_dict['OLD'][label])) # Check for normality, per measure, per group print('Normal test per measure, per group: \n') print('YNG\t\t stat \t p-val') for label in labels: print_stat(label, *normaltest(diff_data_dict['YNG'][label])) print('\nOLD\t\t stat \t p-val') for label in labels: print_stat(label, *normaltest(diff_data_dict['OLD'][label])) # Print out tests for group differences print('T-Tests for Differences Between Groups (YNG vs OLD), per Metric: \n') print('\t\t t-val \t p-val') for label in labels: print_stat(label, *nan_ttest(diff_data_dict['YNG'][label], diff_data_dict['OLD'][label])) # Check correlations of physiology to behaviour, per group print('Correlation (r-vals) of each metric with behaviour, per Group: \n') print('\t\t YNG \t\t OLD') for label in labels: print_stat(label, nan_corr(diff_data_dict['YNG'][label], behav_dict['YNG']["d'"])[0], nan_corr(diff_data_dict['OLD'][label], behav_dict['OLD']["d'"])[0])Correlation (r-vals) of each metric with behaviour, per Group: YNG OLD offset: 0.1435 0.0341 exponent: 0.2729 -0.1506 alpha_cf: 0.0938 -0.1061 alpha_pw: -0.1167 0.3955 alpha_bw: -0.1312 0.1600 canalpha: -0.2216 0.2717 canal_icf: -0.2401 0.3341Fit Models to Predict BehaviourPredict behaviour output from evoked responses of alpha and aperiodic parameters.# Set up model definitions models_defs = { 'base_model' : 'behav ~ load', 'cda_model' : 'behav ~ load + cda', 'canal_model' : 'behav ~ load + al_pw_dif', 'canicf_model' : 'behav ~ load + al_pw_dif', 'f_al_model' : 'behav ~ load + al_pw_dif', 'f_al_p_model' : 'behav ~ load + al_cf_dif + al_pw_dif + al_bw_dif', 'f_ap_model' : 'behav ~ load + off_dif + exp_dif', 'f_ap_p_model' : 'behav ~ load + off_dif + exp_dif + al_pw_dif', } # Set up model data models_data = { 'base_model' : lambda group : {'behav' : behav_dict[group]["d'"], 'load' : behav_dict[group]['Load']}, 'cda_model' : lambda group : {'behav' : behav_dict[group]["d'"], 'load' : behav_dict[group]['Load'], 'cda' : behav_dict[group]['CDA']}, 'canal_model' : lambda group : {'behav' : behav_dict[group]["d'"], 'load' : behav_dict[group]['Load'], 'al_pw_dif' : diff_data_dict[group]['canalpha']}, 'canicf_model' : lambda group : {'behav' : behav_dict[group]["d'"], 'load' : behav_dict[group]['Load'], 'al_pw_dif' : diff_data_dict[group]['canal_icf']}, 'f_al_model' : lambda group : {'behav' : behav_dict[group]["d'"], 'load' : behav_dict[group]['Load'], 'al_pw_dif' : diff_data_dict[group]['alpha_pw']}, 'f_al_p_model' : lambda group : {'behav' : behav_dict[group]["d'"], 'load' : behav_dict[group]['Load'], 'al_cf_dif' : diff_data_dict[group]['alpha_cf'], 'al_pw_dif' : diff_data_dict[group]['alpha_pw'], 'al_bw_dif' : diff_data_dict[group]['alpha_bw']}, 'f_ap_model' : lambda group : {'behav' : behav_dict[group]["d'"], 'load' : behav_dict[group]['Load'], 'off_dif' : diff_data_dict[group]['offset'], 'exp_dif' : diff_data_dict[group]['exponent']}, 'f_ap_p_model' : lambda group : {'behav' : behav_dict[group]["d'"], 'load' : behav_dict[group]['Load'], 'al_pw_dif' : diff_data_dict[group]['alpha_pw'], 'off_dif' : diff_data_dict[group]['offset'], 'exp_dif' : diff_data_dict[group]['exponent']}, } # Fit models, across groups models_results = deepcopy(dict_structure) for group in models_results.keys(): for model_name, model_def in models_defs.items(): models_results[group][model_name] = run_model(model_def, models_data[model_name](group), print_model=False) # Check the model performances - per model def per group print('\t\t\t R^2 Adj \t p-val') for group_label, group_models in models_results.items(): print(group_label) for model_label, model_results in group_models.items(): print_stat('\t' + model_label, model_results.rsquared_adj, model_results.f_pvalue) # Check a particular model group = 'OLD' model = 'f_ap_model' print(models_results[group][model].summary())OLS Regression Results ============================================================================== Dep. Variable: behav R-squared: 0.355 Model: OLS Adj. R-squared: 0.285 Method: Least Squares F-statistic: 5.095 Date: Sat, 17 Oct 2020 Prob (F-statistic): 0.00226 Time: 01:57:12 Log-Likelihood: -50.625 No. Observations: 42 AIC: 111.3 Df Residuals: 37 BIC: 119.9 Df Model: 4 Covariance Type: nonrobust ============================================================================== coef std err t P>|[...]Model ComparisonsExplicitly test for differences between different model fits. Comparing Nested Models`statsmodels` offers three tests for nested models: f test, lagrange multiplier, likelihood ratio. Note that these three can be called from a results object, as `compare_x_test` with `f`, `lm` and `lr` as `x`. F-test can also be run with `anova_lm`. Comparing Non-Nested ModelsStatmodels offers two tests for non-nested model: cox test & j testThey are better described in the R implementations:- cox_test: http://math.furman.edu/~dcs/courses/math47/R/library/lmtest/html/coxtest.html- j_test: http://math.furman.edu/~dcs/courses/math47/R/library/lmtest/html/jtest.html# Compare nested models: alpha models vs base models for group in ['OLD', 'YNG']: print('\nGroup: ', group, '\n') with warnings.catch_warnings(): warnings.simplefilter("ignore") print('Canonical alpha vs. Base Model') print(anova_lm(models_results[group]['base_model'], models_results[group]['canal_model'])) print('\n') print('FOOOF alpha vs. Base Model') print(anova_lm(models_results[group]['base_model'], models_results[group]['f_al_model'])) print('\n') print('FOOOF aperiodic vs. Base Model') print(anova_lm(models_results[group]['base_model'], models_results[group]['f_ap_model'])) # Compare different alpha models print('Canonical alpha vs. FOOOFed Alpha') for group in ['OLD', 'YNG']: print('Group: ', group) print_stat('\tAlpha-Model Compare', *compare_cox(models_results[group]['canal_model'], models_results[group]['f_al_model'])) # Compare if the + models are any better for group in ['OLD', 'YNG']: print('\nGroup: ', group, '\n') with warnings.catch_warnings(): warnings.simplefilter("ignore") print('FOOOF Alpha + vs. FOOOF Alpha') print(anova_lm(models_results[group]['f_al_model'], models_results[group]['f_al_p_model'])) print('\n') print('FOOOF AP + vs. FOOOF AP') print(anova_lm(models_results[group]['f_ap_model'], models_results[group]['f_ap_p_model']))Group: OLD FOOOF Alpha + vs. FOOOF Alpha df_resid ssr df_diff ss_diff F Pr(>F) 0 38.0 26.429804 0.0 NaN NaN NaN 1 36.0 26.202530 2.0 0.227274 0.156127 0.856027 FOOOF AP + vs. FOOOF AP df_resid ssr df_diff ss_diff F Pr(>F) 0 37.0 27.399601 0.0 NaN NaN NaN 1 36.0 24.018710 1.0 3.38089 5.067385 0.03058 Group: YNG FOOOF Alpha + vs. FOOOF Alpha df_resid ssr df_diff ss_diff F Pr(>F) 0 47.0 34.338756 0.0 NaN NaN NaN 1 45.0 32.655867 2.0 1.682889 1.159516 0.322831 FOOOF AP + vs. FOOOF AP df_resid ssr df_diff ss_diff F Pr(>F) 0 46.0 28.706282 0.0 NaN NaN NaN 1 45.0 28.637508 1.0 0.068773 0.108068 0.743879ConclusionsIn general, we can see that predicting behaviour from parameterized spectral features appears to be promising.Key points:- In the cases in which alpha is predictive the parameterized alpha tends to do better- There is also some predictive power of the aperiodic components Check Which Parameters ChangeNext we will check which parameters show a significant task related difference.In the cell below, per group, per measure, the average difference of the measure (from during the trial vs baseline) is printed, as well as a 1 sample t-test, which tests if this number is significantly different from 0 (no change).for age in ['YNG', 'OLD']: print(age) for label in ['offset', 'exponent', 'alpha_cf', 'alpha_pw', 'alpha_bw']: temp = diff_data_dict[age][label] print('\n', label) print('\t avg diff: \t {:+1.3f}'.format(np.mean(temp))) print('\t effect size: \t {:+1.3f}'.format(cohens_d_1samp(temp))) print_stat('\t t-test', *ttest_1samp(temp, 0))YNG offset avg diff: -0.057 effect size: -0.533 t-test: -3.8094 0.0004 exponent avg diff: -0.035 effect size: -0.353 t-test: -2.5212 0.0149 alpha_cf avg diff: +0.029 effect size: +0.061 t-test: 0.4381 0.6632 alpha_pw avg diff: +0.099 effect size: +0.750 t-test: 5.3537 0.0000 alpha_bw avg diff: -0.081 effect size: -0.232 t-test: -1.6569 0.1038 OLD offset avg diff: -0.051 effect size: -0.444 t-test: -2.8806 0.0063 exponent avg diff: -0.007 effect size: -0.059 t-test: -0.3799 0.7060 alpha_cf avg diff: +0.300 effect size: +0.350 t-test: 2.2651 0.0289 alpha_pw avg diff: -0.002 effect size: -0.019 t-test: -0.1250 0.9011 alpha_bw avg diff: +0.082 effect size: +0.131 t-test: 0.8492 0.4007Plot the difference of FOOOFs Now we recreate power spectra that visualize these changes, per group.To do so, we will use the values shown above to reconstruct how spectra change from baseline to task engaged.# Generate spectra that show the task related change in spectral activity for group, inds in zip(['YNG', 'OLD'], [YNG_INDS, OLD_INDS]): time_offs = all_offsets[:, inds, :].mean(0).mean(0) time_exps = all_exps[:, inds, :].mean(0).mean(0) time_al_cf = all_alphas_cf[:, inds, :].mean(0).mean(0) time_al_pw = all_alphas_pw[:, inds, :].mean(0).mean(0) time_al_bw = all_alphas_bw[:, inds, :].mean(0).mean(0) fs, base_spectrum = gen_power_spectrum([3, 30], [time_offs[0], time_exps[0]], [time_al_cf[0], time_al_pw[0], time_al_bw[0]/2], nlv=0) fs, task_spectrum = gen_power_spectrum([3, 30], [time_offs[2], time_exps[2]], [time_al_cf[2], time_al_pw[2], time_al_bw[2]/2], nlv=0) plot_spectra(fs, [base_spectrum, task_spectrum], True, True, labels=['Baseline', 'Delay'], figsize=(7, 6), lw=3.5) plt.title(group, {'fontsize' : 16, 'fontweight' : 'bold'}) if SAVE_FIG: plt.tight_layout() plt.savefig('../figures/' + 'EEG-TaskModels-' + group + '.pdf')Correlations Between FeaturesCheck the correlation structure within and between FOOOF and canonical measures.# Settings group = 'ALL' cur_data = data_dict #cur_data = diff_data_dict # Calculate and collect correlations between features corrs = np.zeros(shape=[len(datas), len(datas)]) print('Correlations between features \t\t r-val \t\t p-val') for i1, label1 in enumerate(labels): for i2, label2 in enumerate(labels): corr = nan_corr(cur_data[group][label1].flatten(), cur_data[group][label2].flatten()) if i1 != i2: corrs[i1, i2] = corr[0] else: corrs[i1, i2] = 0 if i1 < i2: print(' {:10s} & {} : \t\t{:+1.3f} \t\t{:1.4f}'.format(label1, label2, *corr)) # Plot the correlation matrix between features plt.imshow(corrs, cmap='bwr', vmin=-1, vmax=1) plt.gca().set_xticklabels([''] + labels, rotation=45); plt.gca().set_yticklabels([''] + labels, rotation=45); plt.colorbar(); if SAVE_FIG: plt.tight_layout() plt.savefig('../figures/EEG-ParamCorrs.pdf')Explore Relation Between Canonical Alpha and Spectral ParametersIn this section, we will explore how the canonical alpha measure relates to FOOOF measures.The idea of the FOOOF model is that the measured power at a particular frequency, say 10 Hz or alpha, reflects a combination of aperiodic power and any band-specific oscillations. If so, the total power at a given frequency, as is traditionally computed, should be predictable by a combination of FOOOF features.To check this, we will further explore the relationship between canonically measured alpha power, and FOOOF features for the aperiodic component (offset and exponent) and the FOOOF measure of alpha peak-specific power.# Calculate correlation between canonical and FOOOF alpha print_stat('C-Alpha vs F-Alpha ', *nan_corr(np.array(all_alphas_pw.flatten()), np.array(canalpha.flatten()))) print_stat('C-ICF-Alpha vs F-Alpha ', *nan_corr(np.array(all_alphas_pw.flatten()), np.array(canal_icf.flatten()))) # Generate the power @ alpha frequency given the aperiodic component, from the FOOOF fits ap_alpha = [] for cf, off, exp in zip(all_alphas_cf.flatten(), all_offsets.flatten(), all_exps.flatten()): ap_alpha.append(gen_aperiodic(np.array([10]), [off, exp])[0]) ap_alpha = np.array(ap_alpha) # Calculate the total power at 10 Hz (or about) from the combination of aperiodic & alpha peak foo_total = ap_alpha + all_alphas_pw.flatten() # Calculate correlation between canonical alpha and aperiodic component @ 10 Hz print_stat('C-Alpha vs 10Hz-AP', *nan_corr(np.array(ap_alpha), np.array(canalpha.flatten()))) # Calculate correlation between the canonical alpha and the FOOOF model total @ 10 Hz print_stat('C-Alpha vs Total-FOOOF', *nan_corr(np.array(foo_total.flatten()), np.array(canalpha.flatten()))) print_stat('C-ICF-Alpha vs Total-FOOOF', *nan_corr(np.array(foo_total.flatten()), np.array(canal_icf.flatten())))C-Alpha vs Total-FOOOF: 0.2628 0.0000 C-ICF-Alpha vs Total-FOOOF: 0.2545 0.0000To put it all together, let's see to what extent we can predict the canonically measured alpha from FOOOF features.# Predict canonical alpha # Note: if this is updated to predict ICF instead, results are approximately the same data = pd.DataFrame() group = 'ALL' data['exp'] = diff_data_dict[group]['exponent'] data['off'] = diff_data_dict[group]['offset'] data['fal'] = diff_data_dict[group]['alpha_pw'] data['canal'] = diff_data_dict[group]['canalpha'] mod = run_model("canal ~ exp + off + fal", data, True) # Check the predictions from individual components m1 = run_model("canal ~ exp", data, False) m2 = run_model("canal ~ off", data, False) m3 = run_model("canal ~ fal", data, False) print('Canal - EXP: \t{:1.4f}'.format(m1.rsquared_adj)) print('Canal - OFF: \t{:1.4f}'.format(m2.rsquared_adj)) print('Canal - FAL: \t{:1.4f}'.format(m3.rsquared_adj))Canal - EXP: 0.1709 Canal - OFF: 0.0290 Canal - FAL: 0.8006Brightfieldpower_meter_measurement = 40e-6 # W power_meter_area = 3.14 * (9.5e-3 / 2) ** 2 / (mag ** 2) bf_irradiance = power_meter_measurement / power_meter_area print('BF Irradiance: %g W/ m^2' % bf_irradiance) bf_illuminance = bf_irradiance * lux_to_irradiance print('BF Illuminance: %g lux' % bf_illuminance) power_meter_areaFluourescence (Indirect)power_meter_measurement = 30e-9 # W power_meter_area = (9.5e-3 / 2) ** 2 * 3.14 / (mag ** 2) fl_irradiance = power_meter_measurement / power_meter_area / fill_area_fraction print('FL Irradiance: %g W/ m^2' % fl_irradiance) fl_illuminance = fl_irradiance * lux_to_irradiance print('FL Illuminance: %g lux' % fl_illuminance)Compare Source Powerpower_meter_area = (9.5 / 2) ** 2 * 3.14 / (mag ** 2) mW = 0.7 * power_meter_area mW #https://en.wikipedia.org/wiki/Lux lux_to_w_m2 = 1.464 / 1e4 w_cm2_to_lux = 1 / lux_to_w_m2 quantum_yield = 0.79 illumination_dict_mw_cm2 = {'LED': 0.7, 'Halogen Lamp': 1, 'Metal Halide': 57.5, 'xenon': 52.8, 'mercury': 32.8} illumination_dict_lux = {} for illum in illumination_dict_mw_cm2: illumination_dict_lux[illum] = illumination_dict_mw_cm2[illum] * w_cm2_to_lux for key in illumination_dict_lux: print(key) print(illumination_dict_lux[key] * quantum_yield) print(np.log10(illumination_dict_lux[key] * quantum_yield)) illumination_dict_lux illumination_dict_lumens = {'LED Array': 0.0159, 'Halogen Lamp': 2800} fov = 16.6e-3 * 14e-3 / (10 ** 2) for key in illumination_dict_lumens: illumination_dict_lux[key] = illumination_dict_lumens[key] / fov illumination_dict_luxCNN with Original Datatest_it.reset() preds = model_CNN_original.predict(test_it, steps = 27) y_predict = np.argmax(preds,axis=1) cr_CNN_original = pd.DataFrame(classification_report(test_it.classes, y_predict, target_names=test_it.class_indices, output_dict=True)).transpose() cr_CNN_original y_test_array = np.array(test_it.labels) y_pred = np.array(y_predict) fig, ax = plt.subplots(figsize=(15, 15)) cmp = ConfusionMatrixDisplay( confusion_matrix(y_test_array, y_pred), display_labels=class_names,) cmp.plot(ax=ax, cmap = 'Blues') plt.title ('Confustion Matrix for CNN model with Original Data') plt.xticks(rotation=90) plt.show();CNN with Augmented Datatest_it.reset() preds = model_CNN_augmented.predict(test_it, steps = 27) y_predict = np.argmax(preds,axis=1) cr_CNN_aug = pd.DataFrame(classification_report(test_it.classes, y_predict, target_names=test_it.class_indices, output_dict=True)).transpose() cr_CNN_aug y_test_array = np.array(test_it.labels) y_pred = np.array(y_predict) fig, ax = plt.subplots(figsize=(15, 15)) cmp = ConfusionMatrixDisplay( confusion_matrix(y_test_array, y_pred), display_labels=class_names,) cmp.plot(ax=ax, cmap = 'Blues') plt.title ('Confustion Matrix for CNN model with Augmented Data') plt.xticks(rotation=90) plt.show();ResNet with Original Datatest_it.reset() preds = model_ResNet_original.predict(test_it, steps = 27) y_predict = np.argmax(preds,axis=1) cr_ResNet_original = pd.DataFrame(classification_report(test_it.classes, y_predict, target_names=test_it.class_indices, output_dict=True)).transpose() cr_ResNet_original y_test_array = np.array(test_it.labels) y_pred = np.array(y_predict) fig, ax = plt.subplots(figsize=(15, 15)) cmp = ConfusionMatrixDisplay( confusion_matrix(y_test_array, y_pred), display_labels=class_names,) cmp.plot(ax=ax, cmap = 'Blues') plt.title ('Confustion Matrix for ResNet model with Original Data') plt.xticks(rotation=90) plt.show();ResNet with Augmented Datatest_it.reset() preds = model_ResNet_Augmented.predict(test_it, steps = 27) y_predict = np.argmax(preds,axis=1) cr_ResNet_aug = pd.DataFrame(classification_report(test_it.classes, y_predict, target_names=test_it.class_indices, output_dict=True)).transpose() cr_ResNet_aug y_test_array = np.array(test_it.labels) y_pred = np.array(y_predict) fig, ax = plt.subplots(figsize=(15, 15)) cmp = ConfusionMatrixDisplay( confusion_matrix(y_test_array, y_pred), display_labels=class_names,) cmp.plot(ax=ax, cmap = 'Blues') plt.title ('Confustion Matrix for ResNet model with Augmented Data') plt.xticks(rotation=90) plt.show();Precision Comparisioncr_Vgg16_aug = pd.read_csv('VGG16_Augmented_90.csv', index_col = 0) cr_Vgg16_original = pd.read_csv('VGG16_original_86.csv', index_col = 0) plt.subplots(figsize=(15, 8)) plt.xticks(rotation=90) X_axis = np.arange (len(cr_CNN_original.index)) plt.plot(X_axis - 0.25, cr_CNN_original.iloc[:,0], label = "CNN with Original Data", color = 'mediumblue') plt.plot(X_axis, cr_ResNet_original.iloc[:,0], label = "ResNet with Original Data", color = 'darkslategray') plt.plot(X_axis + 0.25, cr_Vgg16_original.iloc[:,0], label = "ResNet with Original Data", color = 'steelblue') plt.bar(X_axis - 0.25, cr_CNN_aug.iloc[:,0], 0.25, label = "CNN with Augmented Data", color = 'aqua') plt.bar(X_axis, cr_ResNet_aug.iloc[:,0], 0.25, label = "ResNet with Augmented Data", color = 'lightseagreen') plt.bar(X_axis + 0.25, cr_Vgg16_aug.iloc[:,0], 0.25, label = "Vgg16 with Augmented Data", color = 'skyblue') plt.xticks(X_axis, cr_CNN_original.index) plt.title("Precision Comparisons") plt.legend(loc = 'lower left') plt.show() plt.subplots(figsize=(15, 8)) plt.xticks(rotation=90) X_axis = np.arange (len(cr_CNN_original.index)) plt.plot(X_axis - 0.25, cr_CNN_original.iloc[:,1], label = "CNN with Original Data", color = 'mediumblue') plt.plot(X_axis, cr_ResNet_original.iloc[:,1], label = "ResNet with Original Data", color = 'darkslategray') plt.plot(X_axis + 0.25, cr_Vgg16_original.iloc[:,1], label = "ResNet with Original Data", color = 'steelblue') plt.bar(X_axis - 0.25, cr_CNN_aug.iloc[:,1], 0.25, label = "CNN with Augmented Data", color = 'aqua') plt.bar(X_axis, cr_ResNet_aug.iloc[:,1], 0.25, label = "ResNet with Augmented Data", color = 'lightseagreen') plt.bar(X_axis + 0.25, cr_Vgg16_aug.iloc[:,1], 0.25, label = "Vgg16 with Augmented Data", color = 'skyblue') plt.xticks(X_axis, cr_CNN_original.index) plt.title("Recall Comparisons") plt.legend(loc = 'lower left') plt.show() plt.subplots(figsize=(15, 8)) plt.xticks(rotation=90) X_axis = np.arange (len(cr_CNN_original.index)) plt.plot(X_axis - 0.25, cr_CNN_original.iloc[:,2], label = "CNN with Original Data", color = 'mediumblue') plt.plot(X_axis, cr_ResNet_original.iloc[:,2], label = "ResNet with Original Data", color = 'darkslategray') plt.plot(X_axis + 0.25, cr_Vgg16_original.iloc[:,2], label = "ResNet with Original Data", color = 'steelblue') plt.bar(X_axis - 0.25, cr_CNN_aug.iloc[:,2], 0.25, label = "CNN with Augmented Data", color = 'aqua') plt.bar(X_axis, cr_ResNet_aug.iloc[:,2], 0.25, label = "ResNet with Augmented Data", color = 'lightseagreen') plt.bar(X_axis + 0.25, cr_Vgg16_aug.iloc[:,2], 0.25, label = "Vgg16 with Augmented Data", color = 'skyblue') plt.xticks(X_axis, cr_CNN_original.index) plt.title("F1-Score Comparisons") plt.legend(loc = 'lower left') plt.show()`@cache`: guardar el valor de la función para esos argumentos * Hacer que esa la cache de la función se guarde en un archivo `.json` * ¿Alguien puede conseguir lo mismo con solo 1 línea de código?import time def memoize(function): memo = {} def wrapper(*args, **kwargs): serialized = (args, tuple(list(kwargs.items()))) if serialized in memo: return memo[serialized] # = valor retornado else: rv = function(*args, **kwargs) memo[serialized] = rv return rv return wrapper @memoize def suma2(a,b,c=None): return a + b + c suma2(1,2,c=3) d = {} d[(1,2,("as", 2))] = 1 d["asd"] = 1 d["asda"] =2 tuple(list(d.items())) list(d.items()) @memoize def fibonacci(n): # time.sleep(2) if n < 2: return n return fibonacci(n - 1) + fibonacci(n - 2) def fibonacci_nocache(n): # time.sleep(2) if n < 2: return n return fibonacci(n - 1) + fibonacci(n - 2) %%timeit fibonacci(200) %%timeit fibonacci_nocache(200) # cache import functools def cache(func): """Keep a cache of previous function calls""" @functools.wraps(func) def wrapper_cache(*args, **kwargs): cache_key = args + tuple(kwargs.items()) if cache_key not in wrapper_cache.cache: wrapper_cache.cache[cache_key] = func(*args, **kwargs) return wrapper_cache.cache[cache_key] wrapper_cache.cache = dict() return wrapper_cache**Otros cursos**import time import functools import json from collections import defaultdict import os class Cache: def __init__(self, func): functools.update_wrapper(self, func) self.func = func self.cache = ( self.open_json() if os.path.isfile("cache.json") else defaultdict(str) ) self._time_start = time.time() def __call__(self, *args, **kwargs): rutas = str( [argument for argument in args] + [ (key, kwargs[key]) for key in sorted(kwargs.keys(), key=lambda x: x.lower()) ] ) result = self.cache[rutas] if not result: result = self.func(*args, **kwargs) self.cache[rutas] = result if (time.time() - self._time_start) >= 5: self.save_json() self._time_start = time.time() return result def open_json(self): with open("cache.json", "r") as json_file: cache = json.load(json_file) return defaultdict(str, cache) def save_json(self): with open("cache.json", "w") as json_file: json.dump(self.cache, json_file) time.sleep(1) import time def memoize(function): memo = {} def wrapper(*args, **kwargs): serialized = (args, tuple(list(kwargs.items()))) if serialized in memo: return memo[serialized] # = valor retornado else: rv = function(*args, **kwargs) memo[serialized] = rv return rv return wrapper @memoize def suma2(a,b,c=None): return a + b + c suma2(1,2,c=3) d = {} d[(1,2,("as", 2))] = 1 d["asd"] = 1 d["asda"] =2 tuple(list(d.items())) list(d.items()) @memoize def fibonacci(n): # time.sleep(2) if n < 2: return n return fibonacci(n - 1) + fibonacci(n - 2) def fibonacci_nocache(n): # time.sleep(2) if n < 2: return n return fibonacci(n - 1) + fibonacci(n - 2) %%timeit fibonacci(200) %%timeit fibonacci_nocache(200) # cache import functools def cache(func): """Keep a cache of previous function calls""" @functools.wraps(func) def wrapper_cache(*args, **kwargs): cache_key = args + tuple(kwargs.items()) if cache_key not in wrapper_cache.cache: wrapper_cache.cache[cache_key] = func(*args, **kwargs) return wrapper_cache.cache[cache_key] wrapper_cache.cache = dict() return wrapper_cache**Otros cursos**import time import functools import json from collections import defaultdict import os class Cache: def __init__(self, func): functools.update_wrapper(self, func) self.func = func self.cache = ( self.open_json() if os.path.isfile("cache.json") else defaultdict(str) ) self._time_start = time.time() def __call__(self, *args, **kwargs): rutas = str( [argument for argument in args] + [ (key, kwargs[key]) for key in sorted(kwargs.keys(), key=lambda x: x.lower()) ] ) result = self.cache[rutas] if not result: result = self.func(*args, **kwargs) self.cache[rutas] = result if (time.time() - self._time_start) >= 5: self.save_json() self._time_start = time.time() return result def open_json(self): with open("cache.json", "r") as json_file: cache = json.load(json_file) return defaultdict(str, cache) def save_json(self): with open("cache.json", "w") as json_file: json.dump(self.cache, json_file) time.sleep(1) from functools import lru_cacheCommand execution actions * **Difficulty level**: easy* **Time need to lean**: 10 minutes or less* **Key points**: * Action `run` uses `bash` under linux and `batch` under windows * Other shell scripting actions calls respective shells Action `run` `run` is the most frequently used action in sos. In most cases, it is similar to action `bash` and uses `bash` to execute specified script. Under the hood, this action is quite different from `bash` because the run action does not have a default interpreter and would behave differently under different situations. Executing a list of commands In the simplest case when one or more commands are specified, action `run` would assume it is a batch script under windows, and a bash script otherwise.run: echo "A"echo "A" AIt is different from an `bash` action in that* It will **print the commands that re executed before execute*** It will **exit with error if any of the commands exits with non-zero code**For example, whereas a `bash` action would print an error message but continue as followsbash: echoo "This is wrong" echo "This is correct"/var/folders/ys/gnzk0qbx5wbdgm531v82xxljv5yqy8/T/tmp1nykq5tz.sh: line 1: echoo: command not found This is correctThe `run` action would exit with error%env --expect-error run: echoo "This is wrong" echo "This is correct"echoo "This is wrong" /var/folders/ys/gnzk0qbx5wbdgm531v82xxljv5yqy8/T/tmpiycbcidi.sh: line 1: echoo: command not foundIn another word,```run: command1 command2 command3```is equivalent to```bash: command1 && command2 && command3```under Linux/MacOS systems. Using shebang-specified interpreter If the script starts with a shebang line, this action would execute the script directly. This allows you to execute any script in any language. For example, the following script executes a python script using action `run`run: #!/usr/bin/env python print('This is python')This is pythonand the following example runs a complete sos script using command `sos-runner`run: #!/usr/bin/env sos-runner [10] print(f"This is {step_name}") [20] print(f"This is {step_name}")INFO: Running 10: !/usr/bin/env sos-runner This is 10 INFO: 10 is completed. INFO: Running 20: This is 20 INFO: 20 is completed. INFO: Workflow default (ID=55b3a265b673438c) is executed successfully with 2 completed steps.Note that action `run`would not analyze shebang line of a script if it is executed in a docker container (with option `docker-image`) and would always assumed to be `bash`. Action `bash`Action `bash(script)` accepts a shell script and execute it using `bash`.bash: for (( counter=10; counter>0; counter-- )) do echo -n "$counter " done printf "\n"10 9 8 7 6 5 4 3 2 1Action `sh` Execute script with a `sh` interpreter. On most systems it is just an alias to `bash`.n = 10 sh: expand=True if [ {n} -lt 10 ]; then echo "It is a one digit number" else echo "It is a two digit number" fiIt is a two digit numberAction `csh` Action `csh` executes specified script with a `csh` interpretercsh: set name1 = abc set name = cde set text = "File $name""1 is not $name1" echo $textFile cde1 is not abcAction `tcsh` Action `tcsh` executes specified script with a `tcsh` interpretertcsh: set testing = 10 if ($testing == 10) echo "it worked" echo "This is the end of the test"it worked This is the end of the testAction `zsh` Action `zsh` executes script with a `zsh` interpreterzsh: wordlist="one two three" wordarray=( $wordlist ) for word in $wordarray; do echo "->$word<-" done->one two three<-To_DateTimeimport pandas as pd dates = ['2020-01-05', 'Jan 5, 2020', '01/05/2020', '2020.01.05', '2020/01/05','20200105'] pd.to_datetime(dates) # Converting various format to a unique date format dt = ['2020-01-05 2:30:00 PM', 'Jan 5, 2020 14:30:00', '01/05/2020', '2020.01.05', '2020/01/05','20200105'] pd.to_datetime(dt) # Possible deal with timeBangladeshi style dates with day firstpd.to_datetime('3-2-2020') # 2nd March pd.to_datetime('3-2-2020',dayfirst=True) # 3rd February which is correctCustom date time formatpd.to_datetime('2020$01$05', format='%Y$%m$%d') pd.to_datetime('2020#01#05', format='%Y#%m#%d')Handling invalid datespd.to_datetime(['2017-01-05', 'Jan 6, 2017', 'abc'], errors='ignore') # it doesnot convert into dates if error exist pd.to_datetime(['2017-01-05', 'Jan 6, 2017', 'abc'], errors='coerce') # Coerce exclude errors, remaining TimestampEpoch Epoch or Unix time means number of seconds that have passed since Jan 1, 1970 00:00:00 UTC timecurrent_epoch=1593020453 pd.to_datetime(current_epoch, unit='s') # unit seconds pd.to_datetime(current_epoch*1000, unit='ms') t = pd.to_datetime([current_epoch], unit='s') t常用的 DataFrame 操作* merge / transform* subset* groupby [作業目標]- 練習填入對應的欄位資料或公式, 完成題目的要求 [作業重點]- 填入適當的輸入資料, 讓後面的程式顯示題目要求的結果 (Hint: 填入對應區間或欄位即可, In[4]~In[6], Out[4]~In[6])- 填入z轉換的計算方式, 完成轉換後的數值 (Hint: 參照標準化公式, In[7])# Import 需要的套件 import os import numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline # 設定 data_path # dir_data = './data/' # f_app = os.path.join(dir_data, 'application_train.csv') # print('Path of read in data: %s' % (f_app)) app_train = pd.read_csv('application_train.csv') app_train.head()作業1. 請將 app_train 中的 CNT_CHILDREN 依照下列規則分為四組,並將其結果在原本的 dataframe 命名為 CNT_CHILDREN_GROUP * 0 個小孩 * 有 1 - 2 個小孩 * 有 3 - 5 個小孩 * 有超過 5 個小孩2. 請根據 CNT_CHILDREN_GROUP 以及 TARGET,列出各組的平均 AMT_INCOME_TOTAL,並繪製 baxplot3. 請根據 CNT_CHILDREN_GROUP 以及 TARGET,對 AMT_INCOME_TOTAL 計算 [Z 轉換](https://en.wikipedia.org/wiki/Standard_score) 後的分數#1 """ Your code here """ cut_rule = [-np.inf, 0, 2, 5, app_train['CNT_CHILDREN'].max()] app_train['CNT_CHILDREN_GROUP'] = pd.cut(app_train['CNT_CHILDREN'].values, cut_rule, include_lowest=True) app_train['CNT_CHILDREN_GROUP'].value_counts() #2-1 """ Your code here """ grp = ['CNT_CHILDREN_GROUP', 'TARGET'] grouped_df = app_train.groupby(grp)['AMT_INCOME_TOTAL'] grouped_df.mean() #2-2 """ Your code here """ plt_column = plt_column = 'AMT_INCOME_TOTAL' plt_by = ['CNT_CHILDREN_GROUP', 'TARGET'] app_train.boxplot(column=plt_column, by = plt_by, showfliers = False, figsize=(12,12)) plt.suptitle('') plt.show() #3 """ Your code here """ app_train['AMT_INCOME_TOTAL_Z_BY_CHILDREN_GRP-TARGET'] = grouped_df.apply(lambda x:(x-np.mean(x))/np.std(x) ) app_train[['AMT_INCOME_TOTAL','AMT_INCOME_TOTAL_Z_BY_CHILDREN_GRP-TARGET']].head()Copyright 2020 , and made available under [CC BY-SA](https://creativecommons.org/licenses/by-sa/4.0) for text and [Apache-2.0](http://www.apache.org/licenses/LICENSE-2.0) for code. Descriptive statistics: Problem solvingLet's practice some of the skills we learned in the last session on descriptive statistics. Import librariesWe can calculate measures of central tendency and spread using `pandas dataframes`.First thing you need to do is import `pandas` below.[Use the Reference if you've forgotten any of these steps](Reference.html).import pandas as pd #pdpandaspdLoad dataWe're going to work with data from before, on flowers.Load the CSV file `"datasets/flower-data-2020.csv"` into a dataframe.dataframe = pd.read_csv('datasets/flower-data-2020.csv') #dataframepddataframepdread_csvpd:read_csvdatasets/flower-data-2020.csvFlower mean/median/modeIn the next three cells, use the dataframe to calculate the mean, median, and mode.dataframe.mean() #dataframedataframemeandataframe:mean dataframe.median() #dataframedataframemediandataframe:median dataframe.mode() #dataframedataframemodedataframe:mode**QUESTION:**How are the mean, median, and mode different here?Write your answer in the cell below. **ANSWER: (click here to edit)***In this case, the mean, median, and mode are almost the same* **QUESTION:**Why do the mean and the median have values for `Size`, but the mode has values for `PetalColor`, `PetalShape`, and `Size`?Write your answer in the cell below. **ANSWER: (click here to edit)***`Size` is numeric (actually ordinal), so it is the only variable Python will calculate mean and median for. It actually is incorrect to calculate mean because it is ordinal, but Python isn't smart enough to know that because we didn't tell it our data was ordinal. This is a good example of being careful with computers - they will do exactly what we tell them to do even if it is incorrect. It is up to us to make sure we understand the reasoning behind what we are doing so we can recognize when the computer gives us garbage answers.* Outlier effectsLet's look at some different data: a students quiz grades across a course.Suppose a student received the following grades:- 0- 85- 82- 91- 82In the next cell, make a new dataframe (with a new variable) using this list of numbers (in a double list).grades = pd.DataFrame([0, 85, 82, 91, 82]) #gradespdgradespdDataFramepd:DataFrame085829182Now `print` the mean, median, and mode of this grades dataframe in the next cell.print(grades.median()) print(grades.mean()) print(grades.mode()) #gradesabcgradesmediangrades:medianabcgradesmeangrades:meanabcgradesmodegrades:mode0 82.0 dtype: float64 0 68.0 dtype: float64 0 0 82In the cell below, copy the blocks where you created the grades dataframe but remove the `0` from the list.grades = pd.DataFrame([85, 82, 91, 82]) #gradespdgradespdDataFramepd:DataFrame85829182Now `print` the mean, median, and mode of this grades dataframe (without `0`) in the next cell.print(grades.median()) print(grades.mean()) print(grades.mode()) #gradesabcgradesmediangrades:medianabcgradesmeangrades:meanabcgradesmodegrades:mode0 83.5 dtype: float64 0 85.0 dtype: float64 0 0 82**QUESTION:**- How did the mean, median, and mode change after you dropped the zero?- What does dropping the zero mean to the student who took this class?- Which measures are most affected/least affected by outliers?Write your answer in the cell below. **ANSWER: (click here to edit)***Median changed very slightly and mode did not change at all.Mean changed dramatically from 68 to 85.This means the student would go from a D to a B in the class.This is an example of how an outlier can have a strong effect on the mean, and how the other measures of central tendency are resistant to outliers.* Dataframe samplingLet's switch back to the flower dataframe.In the cell below, get the five number summary of the dataframe (i.e. `describe`).dataframe.describe() #dataframedataframedescribedataframe:describeSamplingNow let's take samples from the flower dataframe, get their five number summaries, and compare those to each other and the entire dataframe.In the cell below, take a sample of `10` from the flowers dataframe, store it in a variable, and display the sample.sample = dataframe.sample(10) sample #sampledataframesampledataframesampledataframe:sample10sampleIn the cell below, repeat what you did above (take a sample of `10` and store it in a variable) but add below that a block doing a five number summary of the sample.sample = dataframe.sample(10) sample.describe() #sampledataframesampledataframesampledataframe:sample10sampledescribesample:describeData preprocessing and Feature Engineering with TFTIn this lab, we use [TensorFlow Transform](https://www.tensorflow.org/tfx/guide/tft) (TFT) to perform the following:1. **Implement** transformation logic in **preprocess_fn.2. **Implement** a Beam pipeline: 1. **Analyze** and **transform** training data. 2. **Transform** evaluation data.3. **Run** pipeline to produce the transformed **data** and transform **artifacts**.#!pip install -q apache-beam[gcp]==2.16 pyarrow==0.14.0 tfx-bsl==0.15.1 tfx==0.15 import os import apache_beam as beam import tensorflow as tf import tensorflow_data_validation as tfdv import tensorflow_transform as tft import tensorflow_transform.beam as tft_beam from tensorflow_transform.tf_metadata import dataset_metadata from tensorflow_transform.tf_metadata import dataset_schema from tensorflow_transform.tf_metadata import schema_utils print('TF version: {}'.format(tf.__version__)) print('TFDV version: {}'.format(tfdv.__version__)) print('TFT version: {}'.format(tft.__version__)) print('Apache Beam version: {}'.format(beam.__version__)) WORKSPACE = 'workspace' # you can set to a GCS location DATA_DIR = os.path.join(WORKSPACE, 'raw_data') TRAIN_DATA_FILE = os.path.join(DATA_DIR,'train.csv') EVAL_DATA_FILE = os.path.join(DATA_DIR,'eval.csv') RAW_SCHEMA_LOCATION = os.path.join(WORKSPACE, 'raw_schema.pbtxt')1. Implement transformation logicWe make use of the raw schema to perform metadata-driven feature handling, as follows:1. Scale numeric features with z-score2. Integerise categorical featuresAther transformations can be performed, including bucketization, polynomial expantion, clipping, or custom formulas.HEADER = ['age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'gender', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income_bracket'] TARGET_FEATURE_NAME = 'income_bracket' WEIGHT_COLUMN_NAME = 'fnlwgt' def make_preprocessing_fn(raw_schema): def preprocessing_fn(input_features): processed_features = {} for feature in raw_schema.feature: feature_name = feature.name # Pass the target and weight features as is. if feature_name in [TARGET_FEATURE_NAME, WEIGHT_COLUMN_NAME]: processed_features[feature_name] = input_features[feature_name] continue if feature.type == 1: # Extract vocabulary and integerize categorical features. processed_features[feature_name + "_integerized"] = tft.compute_and_apply_vocabulary( input_features[feature_name], vocab_filename=feature_name) else: # normalize numeric features. processed_features[feature_name + "_scaled"] = tft.scale_to_z_score( input_features[feature_name]) # Bucketize age using quantiles. quantiles = tft.quantiles(input_features["age"], num_buckets=5, epsilon=0.01) processed_features["age_bucketized"] = tft.apply_buckets( input_features["age"], bucket_boundaries=quantiles) # Feature creation education_to_age_ratio = input_features["age"] / input_features["education_num"] capital_indicator = input_features['capital_gain'] > input_features['capital_loss'] processed_features['education_to_age_ratio'] = tf.cast(education_to_age_ratio, tf.float32) processed_features['capital_indicator'] =tf.cast(capital_indicator, tf.int64) return processed_features return preprocessing_fn2. Implement a Beam pipelinedef run_pipeline(args): pipeline_options = beam.pipeline.PipelineOptions(flags=[], **args) raw_schema_location = args['raw_schema_location'] raw_train_data_location = args['raw_train_data_location'] raw_eval_data_location = args['raw_eval_data_location'] transformed_train_data_location = args['transformed_train_data_location'] transformed_eval_data_location = args['transformed_eval_data_location'] transform_artefact_location = args['transform_artefact_location'] temporary_dir = args['temporary_dir'] runner = args['runner'] # Load TFDV schema and create tft schema from it. source_raw_schema = tfdv.load_schema_text(raw_schema_location) raw_feature_spec = schema_utils.schema_as_feature_spec(source_raw_schema).feature_spec # Since the raw_feature_spec doesn't include the weight column, we need to add it. raw_feature_spec[WEIGHT_COLUMN_NAME] = tf.FixedLenFeature( shape=[1], dtype=tf.int64, default_value=None) raw_metadata = dataset_metadata.DatasetMetadata( dataset_schema.from_feature_spec(raw_feature_spec)) with beam.Pipeline(runner, options=pipeline_options) as pipeline: with tft_beam.Context(temporary_dir): converter = tft.coders.CsvCoder(column_names=HEADER, schema=raw_metadata.schema) ###### analyze & transform trainining data ############################### # Read raw training csv data. step = 'Train' raw_train_data = ( pipeline | '{} - Read Raw Data'.format(step) >> beam.io.textio.ReadFromText(raw_train_data_location) | '{} - Remove Empty Rows'.format(step) >> beam.Filter(lambda line: line) | '{} - Decode CSV Data'.format(step) >> beam.Map(converter.decode) ) # Create a train dataset from the data and schema. raw_train_dataset = (raw_train_data, raw_metadata) # Analyze and transform raw_train_dataset to produced transformed_train_dataset and transform_fn. transformed_train_dataset, transform_fn = ( raw_train_dataset | '{} - Analyze & Transform'.format(step) >> tft_beam.AnalyzeAndTransformDataset( make_preprocessing_fn(source_raw_schema)) ) # Get data and schema separately from the transformed_train_dataset. transformed_train_data, transformed_metadata = transformed_train_dataset # write transformed train data to sink. _ = ( transformed_train_data | '{} - Write Transformed Data'.format(step) >> beam.io.tfrecordio.WriteToTFRecord( file_path_prefix=transformed_train_data_location, file_name_suffix=".tfrecords", coder=tft.coders.ExampleProtoCoder(transformed_metadata.schema)) ) ###### transform evaluation data ######################################### # Read raw training csv data. step = 'Eval' raw_eval_data = ( pipeline | '{} - Read Raw Data'.format(step) >> beam.io.textio.ReadFromText(raw_eval_data_location) | '{} - Remove Empty Rows'.format(step) >> beam.Filter(lambda line: line) | '{} - Decode CSV Data'.format(step) >> beam.Map(converter.decode) ) # Create a eval dataset from the data and schema. raw_eval_dataset = (raw_eval_data, raw_metadata) # Transform eval data based on produced transform_fn. transformed_eval_dataset = ( (raw_eval_dataset, transform_fn) | '{} - Transform'.format(step) >> tft_beam.TransformDataset() ) # Get data from the transformed_eval_dataset. transformed_eval_data, _ = transformed_eval_dataset # Write transformed eval data to sink. _ = ( transformed_eval_data | '{} - Write Transformed Data'.format(step) >> beam.io.tfrecordio.WriteToTFRecord( file_path_prefix=transformed_eval_data_location, file_name_suffix=".tfrecords", coder=tft.coders.ExampleProtoCoder(transformed_metadata.schema)) ) ###### write transformation metadata ####################################################### # Write transform_fn. _ = ( transform_fn | 'Write Transform Artefacts' >> tft_beam.WriteTransformFn( transform_artefact_location) )3. Run Data Transformation PipelineTRANSFORM_ARTEFACTS_DIR = os.path.join(WORKSPACE,'transform_artifacts') TRANSFORMED_DATA_DIR = os.path.join(WORKSPACE,'transformed_data') TEMP_DIR = os.path.join(WORKSPACE, 'tmp') runner = 'DirectRunner' args = { 'runner': runner, 'raw_schema_location': RAW_SCHEMA_LOCATION, 'raw_train_data_location': TRAIN_DATA_FILE, 'raw_eval_data_location': EVAL_DATA_FILE, 'transformed_train_data_location': os.path.join(TRANSFORMED_DATA_DIR, "train"), 'transformed_eval_data_location': os.path.join(TRANSFORMED_DATA_DIR, "eval"), 'transform_artefact_location': TRANSFORM_ARTEFACTS_DIR, 'temporary_dir': TEMP_DIR } from tensorflow.io import gfile if gfile.exists(TEMP_DIR): print("Removing {} contents...".format(TEMP_DIR)) gfile.rmtree(TRANSFORMED_DATA_DIR) if gfile.exists(TRANSFORMED_DATA_DIR): print("Removing {} contents...".format(TRANSFORMED_DATA_DIR)) gfile.rmtree(TRANSFORMED_DATA_DIR) if gfile.exists(TRANSFORM_ARTEFACTS_DIR): print("Removing {} contents...".format(TRANSFORM_ARTEFACTS_DIR)) gfile.rmtree(TRANSFORM_ARTEFACTS_DIR) tf.logging.set_verbosity(tf.logging.ERROR) print("Running TF Transform pipeline...") print("") %time run_pipeline(args) print("") print("Pipeline is done.")Check TFT outputs!ls {TRANSFORM_ARTEFACTS_DIR}/* tft_output = tft.TFTransformOutput(TRANSFORM_ARTEFACTS_DIR) transform_feature_spec = tft_output.transformed_feature_spec() transform_feature_spec tf.enable_eager_execution() def _parse_example(example): # Parse the input `tf.Example` proto using the dictionary above. return tf.io.parse_single_example(example, transform_feature_spec) dataset = tf.data.TFRecordDataset(TRANSFORMED_DATA_DIR + "/train-00000-of-00001.tfrecords") for record in dataset.take(3).map(_parse_example): print(record) print("")Linear Regression%matplotlib inline import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = (20.0,10.0) #reading data data = pd.read_csv('Desktop\headbrain.csv') print(data.shape) data.head() #collecting X and Y X = data['Head Size(cm^3)'].values Y = data['Brain Weight(grams)'].values #mean of X,Y mean_x = np.mean(X) mean_y = np.mean(Y) n = len(X) # calculating b1 and b2 , y = b1*x + b2 num = 0 den = 0 for i in range(n): num += (X[i]-mean_x) * (Y[i]-mean_y) den += (X[i]-mean_x) ** 2 b1 = num/den b2 = mean_y - (b1*mean_x) print(b1,b2) #ploting the values max_x = np.max(X) + 100 min_x = np.min(X) - 100 x = np.linspace(max_x,min_x,1000) y = b2 + b1*x #ploting line plt.plot(x,y,color='red',label='Regression Line') #ploting scatter points plt.scatter(X,Y,c='blue', label='Scatter Plot') plt.xlabel('Head size in cm3') plt.ylabel('Brain Weight in grams') plt.legend() plt.show()Linear Regression using Scikit Learnfrom sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error X = X.reshape((n,1)) #creating model reg = LinearRegression() #fitting training data reg = reg.fit(X,Y) #prediction Y_pred = reg.predict(X) #calculating R2 score r2_score = reg.score(X,Y) print(r2_score)0.639311719957Kalman Filter DemoThe Kalman filter seeks to estimate a system given a state space model with linear-discrete process and observation models$x_{t+1} = \mathbf{F}x_t + \mathbf{U}_fu^{(f)}_t + \mathbf{\xi}_t$$y_t = \mathbf{H}x_t + \mathbf{U}_hu^{(h)}_t + \mathbf{\nu}_t$with Gaussian noise with zero mean and known covariance, $\xi_t \sim \mathcal{N}(\mathbf{0},\mathbf{Q}_t)$ and $\nu_t \sim \mathcal{N}(\mathbf{0}, \mathbf{R}_t)$ 1-D ExampleFor example, basic accelerated velocity model in 1 dimension$x_t = [x, \dot{x}, \ddot{x}]^\text{T}$ and $y_t = [x]$The respective process and observation functions are thus setup as $\mathbf{F} = \begin{bmatrix} 1 & 1 & 0 \\ 0 & 1 & 1 \\ 0 & 0 & 1\end{bmatrix} \qquad \mathbf{H} = \begin{bmatrix} 1 & 0 & 0 \end{bmatrix}$The input control is not used, so $\mathbf{U}_f = \mathbf{U}_h = \mathbf{0}$# Process model F = np.array([[1, 1, 0], [0, 1, 1], [0, 0, 1]]) # H H = np.array([[1, 0, 0]])An example system can be setup, with the initial position at $x=0$, and the velocity and acceleration parameters set to $0.1$ and $0.05$ respectively. Noise for the process and observation is incorporated using setups for covariances for the noise terms.# Process noise covariance Q = np.diag(np.array([0., 0.01, 0.001])) # Observation noise covariance R = np.array([[400]]) # Initial state and estimation error covariance (assume Q) x0 = np.array([[0., 0.1, 0.05]]).T P0 = np.diag(np.array([10, 1, 0.1])) #Q.copy() # Data dimensions n_x = 3 n_y = 1Model the systems# Number of observations n_t = 100 if _reseed: seed = np.random.randint(0,np.iinfo(np.int32).max) print(seed) else: seed = 1892303842 np.random.seed(seed) # Model systems with and without noisy _, y_noiseless = model_noiseless(x0, f=F, h=H, n=n_t) x_true, y_true, y_noisy = model_noisy(x0, F, Q, None, H, R, n=n_t) plt.figure(figsize=(14, 8)) plt.plot(y_noiseless.T, 'k--') plt.plot(y_true.T, 'k-') plt.plot(y_noisy.T,'b.') plt.legend(labels=['Noiseless state', 'True state', 'Noisy measurements']) kf = KalmanFilter(x0, P0, F, Q, H, R, _verbose=False) x_estimate = np.zeros_like(x_true) P_estimate = np.zeros((n_x, n_x, n_t)) y_estimate = np.zeros_like(y_true) x_estimate[:,0] = x0.ravel() y_estimate[:,0] = (H @ x0).ravel() P_estimate[:,:,0] = P0 for i_ in range(1, n_t): kf.predict() kf.update(y_noisy[:,i_]) x_estimate[:,i_] = kf.state['expected'].ravel() P_estimate[:,:,i_] = kf.state['err_cov'] y_estimate[:,i_] = (H @ x_estimate[:,i_].reshape(n_x,1)).ravel() plt.figure(figsize=(14, 8)) plt.plot(y_true.T, 'k-') plt.plot(y_noisy.T, 'b.') plot_estimate(np.arange(n_t), y_estimate, P_estimate[0,0,:]) plt.xlabel('$t$') plt.ylabel('$x$') plt.legend(labels=['true state','noisy measurements','filtered estimate','estimate confidence (0.95)']) lbl = ['$\dot{x}$','$\ddot{x}$'] for i in range(2): plt.figure(figsize=(14,4)) plt.plot(x_true[i+1,:].ravel(),'k-') plot_estimate(np.arange(n_t), x_estimate[i+1,:], P_estimate[i+1,i+1,:]) plt.xlabel('$t$') plt.ylabel(lbl[i]) plt.legend(labels=['true state','filtered estimate','estimate confidence (0.95)']) rts = KalmanSmoother.from_filter(kf) states = rts.smooth() x_smoothed = np.zeros_like(x_true) P_smoothed = np.zeros((n_x, n_x, n_t)) y_smoothed = np.zeros_like(y_true) x_smoothed[:,0] = x0.ravel() y_smoothed[:,0] = (H @ x0).ravel() P_smoothed[:,:,0] = P0 for i_ in range(1, n_t): x_smoothed[:,i_] = states[i_]['expected'].ravel() P_smoothed[:,:,i_] = states[i_]['err_cov'] y_smoothed[:,i_] = (H @ x_smoothed[:,i_].reshape(n_x,1)).ravel() plt.figure(figsize=(14, 8)) plt.plot(y_true.T, 'k-') plt.plot(y_estimate.T, 'r--', lw=3) plot_estimate(np.arange(n_t), y_smoothed, P_smoothed[0,0,:], c='b') plt.xlabel('$t$') plt.ylabel('$x$') plt.legend(labels=['true state','filtered estimate','smoothed estimate','smoother confidence (0.95)']) lbl = ['$\dot{x}$','$\ddot{x}$'] for i in range(2): plt.figure(figsize=(14,4)) plt.plot(x_true[i+1,:].ravel(),'k-') plt.plot(x_estimate[i+1,:].ravel(), 'r--', lw=3) plot_estimate(np.arange(n_t), x_smoothed[i+1,:], P_smoothed[i+1,i+1,:], c='b') plt.xlabel('$t$') plt.ylabel(lbl[i]) plt.legend(labels=['true state','filtered estimate','smoothed estimate','smoother confidence (0.95)']) pr = KalmanFilter(x0, P0, F, Q, H, R, _verbose=False) n_s = int(np.fix(0.8*n_t)) x_predict = np.zeros((n_x, n_t-n_s)) P_predict = np.zeros((n_x, n_x, n_t-n_s)) y_predict = np.zeros((n_y, n_t-n_s)) for i_ in range(1, n_s):#%n_t): pr.predict() pr.update(y_noisy[:,i_]) for i_ in range(0,n_t-n_s): pr.predict() x_predict[:,i_] = pr.state['expected'].ravel() P_predict[:,:,i_] = pr.state['err_cov'] y_predict[:,i_] = (H @ x_predict[:,i_].reshape(n_x,1)).ravel() plt.figure(figsize=(14, 8)) plt.plot(y_true.T, 'k-') plt.plot(y_noisy[:,0:n_s+1].T, 'b.') plot_estimate(np.arange(n_s+1), y_estimate[:,0:n_s+1], P_estimate[0,0,0:n_s+1]) plot_estimate(np.arange(n_s,n_t), y_predict, P_predict[0,0,:], c='g') plt.xlabel('$t$') plt.ylabel('$x$') plt.legend(labels=['true state', 'noisy measurements', 'filtered estimate','prediction','estimation confidence (0.95)','prediction confidence (0.95)'])Outline The package **soundscape_IR** is an open-source Python toolbox that utilizes non-negative matrix factorization (NMF) in audio source separation. This is a quick start guide for the application of **soundscape_IR** in acoustic analysis. For more information, please visit our [Github](https://github.com/elainesun442/soundscape_IR).This guide contains four sections:1. Audio visualization2. Model training3. Deployment and spectrogram reconstruction4. Presence detection Installation To install soundscape_IR, clone the repository and install the required packages# Clone soundscape_IR from GitHub @schonkopf !git clone https://github.com/schonkopf/soundscape_IR.git # Install required packages %cd soundscape_IR %pip install -r requirements.txt1. Audio visualization **soundscape_IR** provides a function ```audio_visualization``` to transform an audio into a spectrogram on the hertz or mel scale. It also enables the use of Welch’s averaging method and spectrogram prewhitening in noise reduction. This example uses a short audio clip of sika deer calls and insect calls to demonstrate the ecoacoustic application of source separation.from soundscape_IR.soundscape_viewer import audio_visualization # Define spectrogram parameters sound_train = audio_visualization(filename='case1_train.wav', path='./data/wav/', offset_read=0, duration_read=15, FFT_size=512, time_resolution=0.1, prewhiten_percent=10, f_range=[0,8000])2. Model training After preparing the training spectrgram, we can train a model with ```source_separation```. NMF learns a set of basis functions to reconstruct the training spectrogram. In **soundscape_IR**, we can apply PC-NMF to separate the basis functions into two groups according to their source-specific periodicity. In this example, one group of basis funcitons is associated with deer call (mainly 3.5 kHz). Save the model for further applications.from soundscape_IR.soundscape_viewer import source_separation # Define model parameters model=source_separation(feature_length=30, basis_num=10) # Feature learning model.learn_feature(input_data=sound_train.data, f=sound_train.f, method='PCNMF') # Plot the basis functions of two sound source model.plot_nmf(plot_type='W', source=1) model.plot_nmf(plot_type='W', source=2) # Save the model model.save_model(filename='./data/model/deer_model.mat')Running periodicity-coded NMF Feature learning...3. Deployment and spectrogram reconstruction Generate another spectrogram for testing the source separation model.# Prepare a spectrogram sound_predict=audio_visualization(filename='case1_predict.wav', path='./data/wav/', offset_read=30, duration_read=15, FFT_size=512, time_resolution=0.1, prewhiten_percent=10, f_range=[0,8000])Load the saved model and perform source separation. After the prediction procedure, plot the reconstructed spectrograms to evaluate the separation of deer calls and noise.# Deploy the model model=source_separation() model.load_model(filename='./data/model/deer_model.mat') model.prediction(input_data=sound_predict.data, f=sound_predict.f) # View individual reconstructed spectrogram model.plot_nmf(plot_type = 'separation', source = 1) model.plot_nmf(plot_type = 'separation', source = 2)Model parameters check Minima and maxima frequancy bin: 0.0 Hz and 7924.21875 Hz Frequancy resolution: 86.1328125 Hz Feature length: 30 Number of basis: 10 Number of source: 24. Presence detection With the reconstructed spectrogram, we can use the function ```spectrogram_detection``` to detect the presence of target signals (e.g., deer calls). This function will generate a txt file contains the beginning time, ending time, minimum frequency, and maximum frequency of each detected call. Explore the detection result in [Raven software](https://ravensoundsoftware.com/).from soundscape_IR.soundscape_viewer import spectrogram_detection # Choose the source for signal detection source_num = 2 # Define the detection parameters sp=spectrogram_detection(model.separation[source_num-1], model.f, threshold=5.5, smooth=1, minimum_interval=0.5, filename='deer_detection.txt', path='./data/txt/')Successifully save to ./data/txt/deer_detection.txtGeron's Chapter 15: Modified for multivariate series (2 in, 2 out)n_steps != None anymore for cases also **TODO**: Still need a clear distinction between different classes of models that take in multivariate multi-step input Code borrows heavily from 's 2nd edition: https://github.com/ageron/handson-ml2/blob/master/15_processing_sequences_using_rnns_and_cnns.ipynb Setup First, let's import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures. We also check that Python 3.5 or later is installed (although Python 2.x may work, it is deprecated so we strongly recommend you use Python 3 instead), as well as Scikit-Learn ≥0.20 and TensorFlow ≥2.0-preview.# Python ≥3.5 is required import sys assert sys.version_info >= (3, 5) # Scikit-Learn ≥0.20 is required import sklearn assert sklearn.__version__ >= "0.20" # TensorFlow ≥2.0-preview is required import tensorflow as tf from tensorflow import keras assert tf.__version__ >= "2.0" # Common imports import numpy as np import os # to make this notebook's output stable across runs np.random.seed(42) tf.random.set_seed(42) # To plot pretty figures %matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt mpl.rc('axes', labelsize=14) mpl.rc('xtick', labelsize=12) mpl.rc('ytick', labelsize=12) # Where to save the figures PROJECT_ROOT_DIR = "." CHAPTER_ID = "rnn" IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID) os.makedirs(IMAGES_PATH, exist_ok=True) def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300): path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension) print("Saving figure", fig_id) if tight_layout: plt.tight_layout() plt.savefig(path, format=fig_extension, dpi=resolution)Generate the Datasetdef generate_time_series(batch_size, n_steps): freq1, freq2, offsets1, offsets2 = np.random.rand(4, batch_size, 1) time = np.linspace(0, 1, n_steps) series = 0.5 * np.sin((time - offsets1) * (freq1 * 10 + 10)) # wave 1 series += 0.2 * np.sin((time - offsets2) * (freq2 * 20 + 20)) # + wave 2 series += 0.1 * (np.random.rand(batch_size, n_steps) - 0.5) # + noise return series[..., np.newaxis].astype(np.float32)TWO problems1. Random seed same leads to the same time series. Changed it.2. For arrays with dimensions > 2, hstack always adds arrays along the 2nd axis (or [1]). Had to use:```pythonnp.stack((array1, array2), axis=-1)```to make sure that the arrays were being stacked along the last axis.n_steps = 50 ## SERIES 1 np.random.seed(42) series = generate_time_series(10000, n_steps + 1) ## SERIES 2 np.random.seed(11) series2 = generate_time_series(10000, n_steps + 1) ## Train, validation, test split np.random.seed(42) X_train, y_train = np.stack((series[:7000, :n_steps, -1], series2[:7000, :n_steps, -1]),axis=-1), np.hstack((series[:7000, -1], series2[:7000, -1])) X_valid, y_valid = np.stack((series[7000:9000, :n_steps,-1], series2[7000:9000, :n_steps, -1]),axis=-1), np.hstack((series[7000:9000, -1], series2[7000:9000, -1])) X_test, y_test = np.stack((series[9000:, :n_steps, -1], series2[9000:, :n_steps, -1]),axis=-1) , np.hstack((series[9000:, -1], series2[9000:, -1])) print(X_train.shape, y_train.shape) print(X_valid.shape, y_valid.shape) print(X_test.shape, y_test.shape) def plot_series(series, y=None, y_pred=None, x_label="$t$", y_label="$x(t)$"): plt.plot(series, ".-") if y is not None: plt.plot(n_steps, y, "bx", markersize=10) if y_pred is not None: plt.plot(n_steps, y_pred, "ro") plt.grid(True) if x_label: plt.xlabel(x_label, fontsize=16) if y_label: plt.ylabel(y_label, fontsize=16, rotation=0) plt.hlines(0, 0, 100, linewidth=1) plt.axis([0, n_steps + 1, -1, 1]) fig, axes = plt.subplots(nrows=1, ncols=3, sharey=True, figsize=(12, 4)) for col in range(3): plt.sca(axes[col]) plot_series(X_valid[col, :, 0], y_valid[col, 0], y_label=("$x(t)$" if col==0 else None)) plot_series(X_valid[col, :, 1], y_valid[col, 1], y_label=("$x(t)$" if col==0 else None)) save_fig("time_series_plot") plt.show()Saving figure time_series_plotComputing Some Baselines Naive predictions (just predict the last observed value):y_pred = X_valid[:, -1, 0] y_pred2 = X_valid[:, -1, 1] print(y_pred.shape, y_pred2.shape) print(f"Series 1: {np.mean(keras.losses.mean_squared_error(y_valid[:,0], y_pred))}") print(f"Series 2: {np.mean(keras.losses.mean_squared_error(y_valid[:,1], y_pred2))}") plot_series(X_valid[0, :, 0], y_valid[0, 0], y_pred[0]) plot_series(X_valid[0, :, 1], y_valid[0, 1], y_pred2[0]) plt.show()Dense layer with one neuron = LINEAR regressionNow adapted for **multivariate regression** Linear predictions:np.random.seed(42) tf.random.set_seed(42) # Multivariate shapes n_input_features = 2 n_output_features = 2 model = keras.models.Sequential([ keras.layers.Flatten(input_shape=[50, n_input_features]), keras.layers.Dense(n_output_features) ]) model.compile(loss="mse", optimizer="adam") history = model.fit(X_train, y_train, epochs=20, validation_data=(X_valid, y_valid)) model.evaluate(X_valid, y_valid)2000/2000 [==============================] - 0s 18us/sample - loss: 0.0043Model summary and plot with shapesmodel.summary() # !pip install pydot --- otherwise won't work keras.utils.plot_model(model, 'dense_50.png', show_shapes=True) def plot_learning_curves(loss, val_loss): plt.plot(np.arange(len(loss)) + 0.5, loss, "b.-", label="Training loss") plt.plot(np.arange(len(val_loss)) + 1, val_loss, "r.-", label="Validation loss") plt.gca().xaxis.set_major_locator(mpl.ticker.MaxNLocator(integer=True)) plt.axis([1, 20, 0, 0.05]) plt.legend(fontsize=14) plt.xlabel("Epochs") plt.ylabel("Loss") plt.grid(True) plot_learning_curves(history.history["loss"], history.history["val_loss"]) plt.show() y_pred = model.predict(X_valid) plot_series(X_valid[0, :, 0], y_valid[0, 0], y_pred[0, 0]) plot_series(X_valid[0, :, 1], y_valid[0, 1], y_pred[0, 1]) plt.show()Using a Simple RNNnp.random.seed(42) tf.random.set_seed(42) # Multivariate shapes n_input_features = 2 n_output_features = 2 model = keras.models.Sequential([ keras.layers.SimpleRNN(n_output_features, input_shape=[None, n_input_features]) ]) optimizer = keras.optimizers.Adam(lr=0.005) model.compile(loss="mse", optimizer=optimizer) history = model.fit(X_train, y_train, epochs=10, validation_data=(X_valid, y_valid)) model.evaluate(X_valid, y_valid) plot_learning_curves(history.history["loss"], history.history["val_loss"]) plt.show() y_pred = model.predict(X_valid) plot_series(X_valid[0, :, 0], y_valid[0, 0], y_pred[0, 0]) plot_series(X_valid[0, :, 1], y_valid[0, 1], y_pred[0, 1]) plt.show() # !pip install pydot --- otherwise won't work keras.utils.plot_model(model, 'rnn_1.png', show_shapes=True)Deep RNNsnp.random.seed(42) tf.random.set_seed(42) # Multivariate shapes n_input_features = 2 n_output_features = 2 model = keras.models.Sequential([ keras.layers.SimpleRNN(20, return_sequences=True, input_shape=[None, n_input_features]), keras.layers.SimpleRNN(20), keras.layers.Dense(n_output_features) ]) model.compile(loss="mse", optimizer="adam") history = model.fit(X_train, y_train, epochs=20, validation_data=(X_valid, y_valid)) model.evaluate(X_valid, y_valid) plot_learning_curves(history.history["loss"], history.history["val_loss"]) plt.show() y_pred = model.predict(X_valid) plot_series(X_valid[0, :, 0], y_valid[0, 0], y_pred[0, 0]) plot_series(X_valid[0, :, 1], y_valid[0, 1], y_pred[0, 1]) plt.show() # !pip install pydot --- otherwise won't work keras.utils.plot_model(model, 'rnn_2_dense_end.png', show_shapes=True)MULTI-STEP forecasting Method 1: 1. Take models from before.2. Run a loop over 10 points to get prediction for each pointnp.random.seed(43) # not 42, as it would give the first series in the train set series = generate_time_series(1, n_steps + 10) np.random.seed(12) series2 = generate_time_series(1, n_steps + 10) X_new, Y_new = np.stack((series[:, :n_steps, -1], series2[:, :n_steps, -1]),axis=-1) , np.stack((series[:, n_steps:, -1], series2[:, n_steps:, -1]), axis=-1) X = X_new for step_ahead in range(10): y_pred_one = model.predict(X[:, step_ahead:, :])[:, np.newaxis, :] print(y_pred_one.shape) X = np.concatenate([X, y_pred_one], axis=1) Y_pred = X[:, n_steps:] Y_pred.shape def plot_multiple_forecasts(X, Y, Y_pred): n_steps = X.shape[1] ahead = Y.shape[1] plot_series(X[0, :, 0]) plt.plot(np.arange(n_steps, n_steps + ahead), Y_pred[0, :, 0], "ro-", label="Actual") plt.plot(np.arange(n_steps, n_steps + ahead), Y[0, :, 0], "bx-", label="Forecast", markersize=10) plt.axis([0, n_steps + ahead, -1, 1]) plt.legend(fontsize=14) plot_multiple_forecasts(X_new, Y_new, Y_pred) save_fig("forecast_ahead_plot") plt.show() def plot_multiple_forecasts2(X, Y, Y_pred): n_steps = X.shape[1] ahead = Y.shape[1] plot_series(X[0, :, 1]) plt.plot(np.arange(n_steps, n_steps + ahead), Y_pred[0, :, 1], "ro-", label="Actual") plt.plot(np.arange(n_steps, n_steps + ahead), Y[0, :, 1], "bx-", label="Forecast", markersize=10) plt.axis([0, n_steps + ahead, -1, 1]) plt.legend(fontsize=14) plot_multiple_forecasts2(X_new, Y_new, Y_pred) save_fig("forecast_ahead_plot2") plt.show()Saving figure forecast_ahead_plot2Method 21. Predict 10 steps at ONCE.2. Have to train a NEW model now, cannot use previous one.This took the longest time because had to use `np.stack` instead of np.hstack for y_train, y_valid, y_test also since there are 3 axis dimensions there!!Also, forgot to change the neuron dimension inside Dense to n_output_features from 1. Now let's create an RNN that predicts all 10 next values at once:n_steps = 50 ## SERIES 1 np.random.seed(42) series = generate_time_series(10000, n_steps + 10) ## SERIES 2 np.random.seed(11) series2 = generate_time_series(10000, n_steps + 10) ## Train, validation, test split # X's remain the same... notice the change in y-indices!! np.random.seed(42) X_train, y_train = np.stack((series[:7000, :n_steps, -1], series2[:7000, :n_steps, -1]),axis=-1), np.stack((series[:7000, -10:, -1], series2[:7000, -10:, -1]), axis=-1) X_valid, y_valid = np.stack((series[7000:9000, :n_steps,-1], series2[7000:9000, :n_steps, -1]),axis=-1), np.stack((series[7000:9000, -10:, -1],series2[7000:9000, -10:, -1]), axis=-1) X_test, y_test = np.stack((series[9000:, :n_steps, -1], series2[9000:, :n_steps, -1]),axis=-1) , np.stack((series[9000:, -10:, -1], series2[9000:, -10:, -1]), axis=-1) print(X_train.shape, y_train.shape) print(X_valid.shape, y_valid.shape) print(X_test.shape, y_test.shape)(7000, 50, 2) (7000, 10, 2) (2000, 50, 2) (2000, 10, 2) (1000, 50, 2) (1000, 10, 2)This was giving a whole lot of issuesGeron's earlier version had different layers that are commented out now.His new version was difficult to fit with dimensions. Finally had to introduce n_steps in input_shape in the first layers.np.random.seed(42) tf.random.set_seed(42) n_output = y_valid.shape[1] * y_valid.shape[2] y_train = y_train.reshape((y_train.shape[0], n_output)) y_valid = y_valid.reshape((y_valid.shape[0], n_output)) print(y_train.shape, y_valid.shape) print(X_train.shape) model = keras.models.Sequential([ keras.layers.SimpleRNN(32, recurrent_dropout=0.2, return_sequences=True, input_shape=(n_steps, n_input_features)), # shouldn't None = n_steps????? That's what I understand from Brownlee and Chollet's book keras.layers.SimpleRNN(32, recurrent_dropout=0.2, dropout=0.2),#, return_sequences=True), #keras.layers.TimeDistributed(keras.layers.Dense(n_output_features)), #keras.layers.Lambda(lambda y_pred: y_pred[:, -10:, :]) # added a column for feature axes #keras.layers.Dense(32), #keras.layers.BatchNormalization(), keras.layers.Dense(n_output) ]) model.compile(loss="mse", optimizer="adam") history = model.fit(X_train, y_train, epochs=10, validation_data=(X_valid, y_valid)) np.random.seed(43) # not 42, as it would give the first series in the train set series = generate_time_series(1, 50 + 10) np.random.seed(12) series2 = generate_time_series(1, 50 + 10) X_new, Y_new = np.stack((series[:, :50, -1], series2[:, :50, -1]),axis=-1), np.stack((series[:, -10:, -1], series2[:, -10:, -1]), axis=-1) X = X_new Y_pred = model.predict(X_new)#[:, :] Y_pred.shape def plot_multiple_forecasts(X, Y, Y_pred): n_steps = X.shape[1] ahead = Y.shape[1] plot_series(X[0, :, 0]) plt.plot(np.arange(n_steps, n_steps + ahead), Y_pred[0, :10], "ro-", label="Actual") plt.plot(np.arange(n_steps, n_steps + ahead), Y[0, :, 0], "bx-", label="Forecast", markersize=10) plt.axis([0, n_steps + ahead, -1, 1]) plt.legend(fontsize=14) plot_multiple_forecasts(X_new, Y_new, Y_pred) plt.show() def plot_multiple_forecasts2(X, Y, Y_pred): n_steps = X.shape[1] ahead = Y.shape[1] plot_series(X[0, :, 1]) plt.plot(np.arange(n_steps, n_steps + ahead), Y_pred[0, 10:], "ro-", label="Actual") plt.plot(np.arange(n_steps, n_steps + ahead), Y[0, :, 1], "bx-", label="Forecast", markersize=10) plt.axis([0, n_steps + ahead, -1, 1]) plt.legend(fontsize=14) plot_multiple_forecasts2(X_new, Y_new, Y_pred) plt.show()New metricsdef last_10_time_steps_mse(Y_true, Y_pred): return keras.metrics.mean_squared_error(Y_true[:, -10:], Y_pred[:, -10:]) def last_5_time_steps_mse(Y_true, Y_pred): return keras.metrics.mean_squared_error(Y_true[:, -5:], Y_pred[:, -5:])Method 3Goal: Forecast 10:59 based on 0:49.Previously, 50:59 was being predicted by 0:49. Now let's create an RNN that predicts the input sequence, shifted 10 steps into the future. That is, instead of just forecasting time steps 50 to 59 based on time steps 0 to 49, it will forecast time steps 10 to 59 based on time steps 0 to 49 (the time steps 10 to 49 are in the input, but the model is causal so at any time step it cannot see the future inputs):n_steps = 50 ## SERIES 1 np.random.seed(42) series = generate_time_series(10000, n_steps + 10) ## SERIES 2 np.random.seed(11) series2 = generate_time_series(10000, n_steps + 10) ## Train, validation, test split # X's remain the same... notice the change in y-indices!! np.random.seed(42) X_train, y_train = np.stack((series[:7000, :n_steps, -1], series2[:7000, :n_steps, -1]),axis=-1), np.stack((series[:7000, 10:, -1], series2[:7000, 10:, -1]), axis=-1) X_valid, y_valid = np.stack((series[7000:9000, :n_steps,-1], series2[7000:9000, :n_steps, -1]),axis=-1), np.stack((series[7000:9000, 10:, -1],series2[7000:9000, 10:, -1]), axis=-1) X_test, y_test = np.stack((series[9000:, :n_steps, -1], series2[9000:, :n_steps, -1]),axis=-1) , np.stack((series[9000:, 10:, -1], series2[9000:, 10:, -1]), axis=-1) #np.random.seed(42) #n_steps = 50 #series = generate_time_series(10000, n_steps + 10) #X_train, Y_train = series[:7000, :n_steps], series[:7000, 10:] #X_valid, Y_valid = series[7000:9000, :n_steps], series[7000:9000, 10:] #X_test, Y_test = series[9000:, :n_steps], series[9000:, 10:] X_train.shape, y_train.shape np.random.seed(42) tf.random.set_seed(42) model = keras.models.Sequential([ keras.layers.SimpleRNN(20, return_sequences=True, input_shape=[None, n_input_features]), keras.layers.SimpleRNN(20, recurrent_dropout=0.2, return_sequences=True), keras.layers.TimeDistributed(keras.layers.Dense(n_output_features)) ]) model.compile(loss="mse", optimizer="adam", metrics=[last_10_time_steps_mse]) history = model.fit(X_train, y_train, epochs=20, validation_data=(X_valid, y_valid)) np.random.seed(43) # not 42, as it would give the first series in the train set series = generate_time_series(1, 50 + 10) np.random.seed(12) series2 = generate_time_series(1, 50 + 10) X_new, Y_new = np.stack((series[:, :50, -1], series2[:, :50, -1]),axis=-1), np.stack((series[:, 50:, -1], series2[:, 50:, -1]), axis=-1) X = X_new Y_pred = model.predict(X_new)[:, -10:, :] def plot_multiple_forecasts(X, Y, Y_pred): n_steps = X.shape[1] ahead = Y.shape[1] plot_series(X[0, :, 0]) plt.plot(np.arange(n_steps, n_steps + ahead), Y_pred[0, :, 0], "ro-", label="Actual") plt.plot(np.arange(n_steps, n_steps + ahead), Y[0, :, 0], "bx-", label="Forecast", markersize=10) plt.axis([0, n_steps + ahead, -1, 1]) plt.legend(fontsize=14) plot_multiple_forecasts(X_new, Y_new, Y_pred) save_fig("forecast_ahead_plot") plt.show() def plot_multiple_forecasts2(X, Y, Y_pred): n_steps = X.shape[1] ahead = Y.shape[1] plot_series(X[0, :, 1]) plt.plot(np.arange(n_steps, n_steps + ahead), Y_pred[0, :, 1], "ro-", label="Actual") plt.plot(np.arange(n_steps, n_steps + ahead), Y[0, :, 1], "bx-", label="Forecast", markersize=10) plt.axis([0, n_steps + ahead, -1, 1]) plt.legend(fontsize=14) plot_multiple_forecasts2(X_new, Y_new, Y_pred) save_fig("forecast_ahead_plot2") plt.show()Saving figure forecast_ahead_plot2LSTMsnp.random.seed(42) tf.random.set_seed(42) model = keras.models.Sequential([ keras.layers.LSTM(20, return_sequences=True, input_shape=[None, n_input_features]), keras.layers.LSTM(20, return_sequences=True), keras.layers.TimeDistributed(keras.layers.Dense(n_output_features)) ]) model.compile(loss="mse", optimizer="adam", metrics=[last_10_time_steps_mse]) history = model.fit(X_train, y_train, epochs=20, validation_data=(X_valid, y_valid)) model.evaluate(X_valid, y_valid) plot_learning_curves(history.history["loss"], history.history["val_loss"]) plt.show() np.random.seed(43) # not 42, as it would give the first series in the train set series = generate_time_series(1, 50 + 10) np.random.seed(12) series2 = generate_time_series(1, 50 + 10) X_new, Y_new = np.stack((series[:, :50, -1], series2[:, :50, -1]),axis=-1), np.stack((series[:, 50:, -1], series2[:, 50:, -1]), axis=-1) X = X_new Y_pred = model.predict(X_new)[:, -10:, :] plot_multiple_forecasts(X_new, Y_new, Y_pred) plt.show() plot_multiple_forecasts2(X_new, Y_new, Y_pred) plt.show()Using One-Dimensional Convolutional Layers to Process Sequences ```1D conv layer with kernel size 4, stride 2, VALID padding: |-----2----| |-----5---... |----23-----| |-----1----| |-----4-----| ... |-----22----| |-----0----| |-----3----| |---...-21---|X: 0 1 2 3 4 5 6 7 8 9 10 11 12 ... 43 44 45 46 47 48 49Y: 10 11 12 13 14 15 16 17 18 19 20 21 22 ... 53 54 55 56 57 58 59Output:X: 0 1 2 3 4 5 ... 19 20 21 22 23Y: 13 15 17 19 21 23 ... 51 53 55 57 59```np.random.seed(42) tf.random.set_seed(42) model = keras.models.Sequential([ keras.layers.Conv1D(filters=20, kernel_size=4, strides=2, padding="VALID", input_shape=[None, n_input_features]), keras.layers.LSTM(20, return_sequences=True), keras.layers.LSTM(20, return_sequences=True), keras.layers.TimeDistributed(keras.layers.Dense(n_output_features)) ]) model.compile(loss="mse", optimizer="adam", metrics=[last_5_time_steps_mse]) history = model.fit(X_train, y_train[:, 3::2], epochs=20, validation_data=(X_valid, y_valid[:, 3::2])) model.evaluate(X_valid, y_valid[:, 3::2]) plot_learning_curves(history.history["loss"], history.history["val_loss"]) plt.show() model.summary() keras.utils.plot_model(model, 'cnn.png', show_shapes=True)WaveNet ```C2 /\ /\ /\ /\ /\ /\ /\ /\ /\ /\ /\ /\.../\ /\ /\ /\ /\ /\ / \ / \ / \ / \ / \ / \ / \ / \ / \ / \ / \ / \ / \C1 /\ /\ /\ /\ /\ /\ /\ /\ /\ /\ /\ /\ /.../\ /\ /\ /\ /\ /\ /\X: 0 1 2 3 4 5 6 7 8 9 10 11 12 ... 43 44 45 46 47 48 49Y: 10 11 12 13 14 15 16 17 18 19 20 21 22 ... 53 54 55 56 57 58 59Output:X: 0 1 2 3 4 5 ... 19 20 21 22 23Y: 13 15 17 19 21 23 ... 51 53 55 57 59``` Conv1D output has filters as last dimension so it has to be changed to n_output_featuresnp.random.seed(42) tf.random.set_seed(42) model = keras.models.Sequential() model.add(keras.layers.InputLayer(input_shape=[None, n_input_features])) for rate in (1, 2, 4, 8) * 2: #model.add(keras.layers.Lambda( # lambda inputs: keras.backend.temporal_padding(inputs, (rate, 0)))) model.add(keras.layers.Conv1D(filters=20, kernel_size=2, padding="causal", activation="relu", dilation_rate=rate)) model.add(keras.layers.Conv1D(filters=n_output_features, kernel_size=1)) model.compile(loss="mse", optimizer="adam", metrics=[last_10_time_steps_mse]) history = model.fit(X_train, y_train, epochs=20, validation_data=(X_valid, y_valid)) model.evaluate(X_valid, y_valid) plot_learning_curves(history.history["loss"], history.history["val_loss"]) plt.show() model.summary() keras.utils.plot_model(model, 'cnn_wavenetsimple_causal.png', show_shapes=True)Inductive bias of neural networksNeural networks have an inductive bias for learning smooth functions. For a class of learning problems like probability density estimation, I'd argue that it is the wrong bias.For example, in case of a binary classification problem trained on 1-hot targets using softmax, outputs should represent probability of each class. For inputs that fall outside the training set, the network should produce close to 0 for both outputs. This is rarely the case due to the way neural networks learn smooth function.In this notebook, I explore the differences between what a neural network classifier learns vs what it should "ideally" learn.import torch import torch.nn.functional as F import seaborn as sns; sns.set() import matplotlib.pyplot as plt from scipy.ndimage.filters import gaussian_filter from scipy import stats from scipy.stats import norm class NN(torch.nn.Module): def __init__(self, input_size, hidden_size, output_size): super(NN, self).__init__() self.linear1 = torch.nn.Linear(input_size, hidden_size) self.linear2 = torch.nn.Linear(hidden_size, hidden_size) self.linear3 = torch.nn.Linear(hidden_size, output_size) def forward(self, x): x = torch.tanh(self.linear1(x)) x = torch.tanh(self.linear2(x)) x = torch.tanh(self.linear3(x)) return x def train_model(x, y): # Construct our model by instantiating the class defined above. model = NN(input_size=x.shape[-1], hidden_size=x.shape[-1]*4, output_size=y.shape[-1]) loss_fn = torch.nn.MSELoss() optimizer = torch.optim.SGD(model.parameters(), lr=1e-2) for t in range(5000): # Forward pass: Compute predicted y by passing x to the model y_pred = model(x) #print("y_pred", y_pred) # Compute and print loss loss = loss_fn(y_pred, y) #print(t, loss.item()) # Zero gradients, perform a backward pass, and update the weights. optimizer.zero_grad() loss.backward() optimizer.step() #if t % 100 == 0: # print("Loss: ", loss.item()) print("Loss: ", loss.item()) return model def plot(x, xlabel, y, ylabel, title, xtrain, ytrain): x = x.squeeze().numpy() y = y.squeeze().numpy() plt.plot(x, y) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.title(title) plt.ylim(-1.1,1.1) xtrain = xtrain.numpy() ytrain = ytrain.numpy() plt.plot(xtrain, ytrain, marker='x', markersize=5, color="red", linestyle = 'None') for i,j in zip(xtrain,ytrain): plt.annotate(f"({i[0]:.2f},{j[0]:.2f})",xy=(i,j)) plt.show() def get_ideal_function(x, y, test_x): test_x = test_x.squeeze() output = torch.zeros(test_x.shape).float() for i in range(x.shape[0]): x_i = x[i] y_i = y[i] var = 0.1 gaussian = torch.tensor(norm.pdf(test_x.numpy(), x_i, var)).float() gaussian = y_i * gaussian / gaussian.max() output += gaussian return output def run_experiment(title, x, y): print(title) print("============") input_size, hidden_size, output_size = x.shape[-1], 4, y.shape[-1] print("x", x) print("y", y) model = train_model(x, y) test_x = torch.linspace(0, 1, 100).unsqueeze(dim=1) plot(x=test_x, xlabel="X", y=model(test_x).detach(), ylabel="Y", title="Neural network", xtrain=x, ytrain=y) plot(x=test_x, xlabel="X", y=get_ideal_function(x, y, test_x), ylabel="Y", title="Ideal", xtrain=x, ytrain=y) x = torch.tensor([[0.5]]) y = torch.tensor([[1.0]]) run_experiment("Experiment 1", x, y) x = torch.tensor([[0.3], [0.7]]) y = torch.tensor([[1.0], [-1.0]]) run_experiment("Experiment 2", x, y) x = torch.tensor([[0.3], [0.7], [0.65]]) y = torch.tensor([[1.0], [1.0], [-1.0]]) run_experiment("Experiment 3", x, y)Experiment 1 ============ x tensor([[0.5000]]) y tensor([[1.]]) Loss: 0.0004523028910625726Compute CDFGiven a set a x values, with x just a float for now -- Find the bucket the values belong to- Sum up how many values in each bucket and put that together in a 1D tensor- Use the counts tensor to build a CDFimport math resolution_10_power = 3 resolution = 10 ** resolution_10_power resolution sample_counts = torch.zeros((resolution,)) x = torch.tensor([0.5, 0.22, 0.56, 0.54]) y = torch.tensor([1, 1, 1, -1]) x_bucket = (x * resolution).long() indices, counts = x_bucket.unique(return_counts=True) for i in range(indices.shape[-1]): sample_counts[indices[i]] += counts[i] cumsum = sample_counts.cumsum(0) cdf_table = (cumsum * resolution / cumsum[-1]).double() def cdf(x_query, cdf_table, resolution): #print("x_query", x_query) query_index = int(x_query * resolution) if query_index == 0 or cdf_table[query_index] == cdf_table[query_index - 1]: result = cdf_table[query_index] else: #print("query_index", query_index) a = cdf_table[query_index - 1] #print("a", a) b = cdf_table[query_index] #print("b", b) fraction = x_query * resolution - query_index #print("fraction", fraction) result = a + fraction * (b - a) #print("result", result) return result / resolution test_x = torch.linspace(0, 0.99, resolution).double() test_x_tilda = torch.tensor([cdf(x, cdf_table, resolution) for x in test_x]) test_x_tilda plt.plot(test_x.numpy(), test_x_tilda.numpy()) plt.title("CDF points") plt.show() points = torch.cat((test_x.unsqueeze(dim=1), test_x_tilda.unsqueeze(dim=1)), 1) def rotate(points, degrees): radians = degrees * math.pi / 180.0 rotation = torch.tensor([[math.cos(radians), -math.sin(radians)],[math.sin(radians), math.cos(radians)]]).double() return points @ rotation points_inverse = rotate(points, 45) # rotate 45deg points_inverse[:, 1] *= -1 # reflect around X axis points_inverse = rotate(points_inverse, -45) # rotate back plt.plot(points_inverse[:, 0].numpy(), points_inverse[:, 1].numpy()) plt.title("CDF Inverse points") plt.show() cdf_inverse_table = points_inverse[:, 0] * resolution def get_ideal_function(x, y, test_x): test_x = test_x.squeeze() output = torch.zeros(test_x.shape).float() for i in range(x.shape[0]): x_i = x[i] y_i = y[i] var = 0.04 x_i_tilda = cdf(x_i.numpy(), cdf_inverse_table, resolution) gaussian = torch.tensor(norm.pdf(test_x.numpy(), x_i_tilda, var)).float() gaussian = y_i * gaussian / gaussian.max() output += gaussian return output plot(x=test_x, xlabel="X axis not to scale", y=get_ideal_function(x, y, test_x), ylabel="Y", title="Ideal", xtrain=torch.tensor([cdf(x_i.numpy(), cdf_inverse_table, resolution) for x_i in x]).unsqueeze(1), ytrain=y.unsqueeze(1)) torch.tensor([[0.5]]).numpy().shapeFIBONACCI SERIES USING DECORATOR¶def fibo(f): fibonacci = {} def helper(x): if x not in fibonacci: fibonacci[x] = f(x) return fibonacci[x] return helper @fibo def fib(n): if n == 0: return 0 elif n == 1: return 1 else: return fib(n-1) + fib(n-2) fib = fibo(fib) print(fib(8))21exception handling in filesfile = open("letsupgrade.txt","w") file.write("Hey Guys I Hope its very good") file.close() file = open("letsupgrade.txt","r") fileData = file.read() print(fileData) file.close() file = open("letsupgrade.txt","r") file.write("Hey") file.close() try: file= open('letsupgrade.txt', 'w') file.write("hey") print(file.read()) file.close() except IOError: print("not writable")not writableThe Decorator Paradigm for Responsive Plotting Functions [![DOI](https://zenodo.org/badge/214871831.svg)](https://zenodo.org/badge/latestdoi/214871831)r"""markdown TITLE : The Decorator Paradigm for Responsive Plots AUTHOR : PROJECT : 2019-10-22 dotAstronomy Plotting Workshop """; __author__ = '' __version__ = "Oct 22, 2019"Decorators are an easy and powerful way to modify and extend functions. If you are unfamiliar with decorators, check out the *making_decorators* notebook under `/references/making_decorators.ipynb`In this notebook I will outline why plotting decorators are the best way to augment plotting functions. This technique will allow you to rapidly build complex plotting functions, incorporate those functions into classes and other objects, and still make the plots responsive.Let's get started. - - - - - - Prepare Importsimport functools import numpy as np from astropy import units as u from matplotlib import pyplot as plt from astropy.visualization import astropy_mpl_style, quantity_support quantity_support() plt.style.use(astropy_mpl_style)Functions Defining a base class for decorators. This just makes decorator creation much simpler. For details, see the *making_decorators* notebook under `/references/making_decorators.ipynb`class DecoratorClassBase(): """ """ @staticmethod def _doc_func(docstring): return docstring def __new__(cls, func=None, **kwargs): """ this is a quick and dirty method for class-based decorator creation it is generically better to do this with a classmethod like @classmethod as_decorator(cls, func=None, ...): all the same code as here """ # make instance self = super().__new__(cls) # wrapper control: if func is not None: # this will return a wrapped function # pass all arguments and kwargs to init # since __init__ is will not be called self.__init__(func, **kwargs) return self(func) else: # this will return a function wrapper # for when using as a @decorator # __init__ will be automatically called after this return self # /def def __init__(self, func=None, **kwargs): """ these are stored to be used inside of __call__ they are not normally passed to the wrapped_function """ # store all values passed to __init__ for k, v in kwargs.items(): setattr(self, k, v) # /def def edit_docstring(self, wrapper): """blank call """ # docstring if wrapper.__doc__ is not None: wrapper.__doc__ = self._doc_func(wrapper.__doc__) # storing extra info wrapper._doc_func = self._doc_func return wrapper # /def # /classData Creating some data to plot.x = np.linspace(0, 2*np.pi, num=100) * u.s y = np.cos(x * u.rad / u.s) * u.m- - - - - - Scripting Plots The simplest way to plot with Matplotlib is to script plots.This can be done with either the Matlab script paradigm, or with an object-oriented paradigm.Here, for clarity and quality we use the object-oriented approach.fig, ax = plt.subplots() ax.plot(x, y) ax.set_xlabel(f'time [{ax.get_xlabel()}]') ax.set_ylabel(f'distance [{ax.get_ylabel()}]') ax.set_title('This is a Title')This technique is fine for prototyping a plot: every step is spelled out and each component may be adjusted.However, this script will product one, and only one, plot. If we want to make many plots than scripts must be turned into functions... Plotting Functions Now we are making simple plotting functions. All we have done is embed the above script inside a function called `plot_distance`.Provided an *x* and *y*, `plot_distance` will produce a plot of *x* vs *y*, titled 'This is a Title', with axis labels 'time [s]' and 'distance [m]', respectively.# defining function def plot_distance(x, y): fig, ax = plt.subplots() ax.plot(x, y) ax.set_xlabel(f'time [{ax.get_xlabel()}]') ax.set_ylabel(f'distance [{ax.get_ylabel()}]') ax.set_title('This is a Title') return fig, axCalling the function,plot_distance(x, y)We can call the function again, with different inputs. In comparison to the script, this is rather responsive.plot_distance(x, y/y*y.unit)However, this function will only ever make this plot. The axis labels and title are fixed. Moreover, the function as written assumes that x and y have units attached. What if x, y don't have units attached?plot_distance(x.value, y.value)The axis labels look pretty silly.In principle we can address the labels and title by adding a bunch of options to the plotting function and giving it a few `if` statements to handle the different scenarios.If fact, I'm going to do one better and even provide the option of specifying the Axes onto which to plot.def plot_distance_custom(x, y, ax=None, has_units=True, title=''): if ax is None: fig, ax = plt.subplots() ax.plot(x, y) if has_units: ax.set_xlabel(f'time [{ax.get_xlabel()}]') ax.set_ylabel(f'distance [{ax.get_ylabel()}]') else: ax.set_xlabel(f'time') ax.set_ylabel(f'distance') ax.set_title(title) return fig, axTesting this outplot_distance_custom(x.value, y.value, ax=None, has_units=False, title='Customized Plot')Great, now this is a responsive function!But what if we want to define a new function, `plot_distance_squared`? We have to go through this rigamarole all over again.```pythondef plot_distance_squared_custom(x, y, ax=None, has_units=True, title=''): if ax is None: fig, ax = plt.subplots() ax.plot(x, y**2) if has_units: ax.set_xlabel(f'time [{ax.get_xlabel()}]') ax.set_ylabel(f'distance [{ax.get_ylabel()}]') else: ax.set_xlabel(f'time') ax.set_ylabel(f'distance') ax.set_title(title) return fig, ax```There has to be a way to write the function `plot_distance_squared` without the hassle of rewriting all the plotting options.What if we could make something that took care of all the plotting options and we could apply that to the functions we want to write? Wouldn't that be nice?This magical beast (can) exist and it is called a **decorator**. Let There Be Decorators Here we will define a simple decorator that takes care of the axes, the units, the title, and saving the figure.Essentially, all the annoying `if` statements and axes shenanigans are controlled by this decorator. We can even add the relevant documentation for our added features to whatever function we are decorating. This is set in the `_addded doc`._added_doc = """ Decorator ------- ax: Axes (default None) will make new axes if None has_units: bool whether the arguments have units title: str the title return_ax: bool whether to return the axes savefig: str, list save locations can save to many locations if list of strs """ class FigureDecorator(DecoratorClassBase): """this is a new decorator all we need to do is inherit from DecoratorClassBase, define a _doc_func and make our wrapper function """ @staticmethod def _doc_func(docstring): return docstring + _added_doc def __call__(self, wrapped_function): """construct a function wrapper """ @functools.wraps(wrapped_function) def wrapper(*func_args, ax=None, xlabel=None, ylabel=None, has_units=True, title=None, return_ax=False, savefig=None, **func_kwargs): if ax is None: # setting new axes fig, ax = plt.subplots() else: plt.sca(ax) # calling function return_ = wrapped_function(*func_args, **func_kwargs) if has_units: ax.set_xlabel(f'{self.xlabel if xlabel is None else xlabel} [{ax.get_xlabel()}]') ax.set_ylabel(f'{self.ylabel if ylabel is None else ylabel} [{ax.get_ylabel()}]') else: ax.set_xlabel(f'{self.xlabel if xlabel is None else xlabel}') ax.set_ylabel(f'{self.ylabel if ylabel is None else ylabel}') if title is None and hasattr(self, 'title'): ax.set_title(self.title) else: ax.set_title(title) if savefig is not None: if isinstance(savefig, str): plt.savefig(savefig) else: for sf in savefig: plt.savefig(sf) if return_ax: return return_, ax return return_ # /def return self.edit_docstring(wrapper) # /defDefining the `plot_distance` and `plot_distance_squared` functions, this time applying the decorators.@FigureDecorator(xlabel='time', ylabel='distance', title='default title') def plot_distance_dec(x, y): plt.plot(x, y) @FigureDecorator(xlabel='time', ylabel='distance^2', title='another default title') def plot_distance_squared_dec(x, y): plt.plot(x, y * y)Now these functions have all the power of `FigureDecorator`. They will accept as key-word arguments: `ax`, `xlabel`, `ylabel`, `has_units`, `title`, `return_ax`, `savefig`. To see this in action,plot_distance_dec(x, y, title='This is a new title', xlabel=' $time$', ylabel='Distance', has_units=False)It works! We can now override the properties of the plot quite easily.To see a cooler example in action, the `ax` argument means that plotting functions can be applied to subplots.fig, axs = plt.subplots(1, 2, figsize=(12, 5)) plot_distance_dec(x, y, ax=axs[0], title='test') plot_distance_squared_dec(x, y, ax=axs[1]) plot_distance_squared_dec(x, y-.5* y.unit, ax=axs[1], has_units=False, title='plotting 2 things')The functions could also have been built via the function call syntax -- `new_func = decorator(old_func, **options)`.This allows existing and undecorated functions to be turned into our new, reponsive plotting functions.def simple_plot_distance(x, y): """An unresponsive function""" plt.plot(x, y) plt.xlabel(f'time [{plt.gca().get_xlabel()}]') plt.ylabel(f'distance [{plt.gca().get_ylabel()}]') plt.title('This is a Title') return plot_distance_comparison = FigureDecorator(simple_plot_distance, xlabel='time', ylabel='distance', title='default title') fig, axs = plt.subplots(1, 2, figsize=(12, 5)) plot_distance_dec(x, y, ax=axs[0]) plot_distance_comparison(x, y, ax=axs[1])This decorator also adds the nice feature of allowing multiple save locations.plot_distance_dec(x, y, title='overridden title', savefig=('../figures/decorator_plot_save1', '../figures/decorator_plot_save2')) plt.close();Mac Users# https://splinter.readthedocs.io/en/latest/drivers/chrome.html !which chromedriver executable_path = {'executable_path': '/usr/local/bin/chromedriver'} browser = Browser('chrome', **executable_path, headless=False)Windows Users# executable_path = {'executable_path': 'chromedriver.exe'} # browser = Browser('chrome', **executable_path, headless=False) url = 'http://quotes.toscrape.com/' browser.visit(url) for x in range(1, 6): html = browser.html soup = BeautifulSoup(html, 'html.parser') quotes = soup.find_all('span', class_='text') for quote in quotes: print('page:', x, '-------------') print(quote.text) browser.click_link_by_partial_text('Next')page: 1 ------------- “The world as we have created it is a process of our thinking. It cannot be changed without changing our thinking.” page: 1 ------------- “It is our choices, Harry, that show what we truly are, far more than our abilities.” page: 1 ------------- “There are only two ways to live your life. One is as though nothing is a miracle. The other is as though everything is a miracle.” page: 1 ------------- “The person, be it gentleman or lady, who has not pleasure in a good novel, must be intolerably stupid.” page: 1 ------------- “Imperfection is beauty, madness is genius and it's better to be absolutely ridiculous than absolutely boring.” page: 1 ------------- “Try not to become a man of success. Rather become a man of value.” page: 1 ------------- “It is better to be hated for what you are than to be loved for what you are not.” page: 1 ------------- “I have not failed. I've just found 10,000 ways that won't work.” page: 1 ------------- “A woman is like a tea bag; you ne[...]MHW Systems#imports from importlib import reload from pkg_resources import resource_filename import numpy as np import h5py import os from matplotlib import pyplot as plt from mhw_analysis.systems import build %matplotlib notebookLoad (this cube was built by MWH_3D)#cube = np.load('tst_cube.npy') cube = np.load('tst_cube_pacific.npy') cube.shapeExplorech = 80 plt.clf() plt.imshow(cube[ch, :, :]) plt.show()Build one Convert cube to boolcube = cube.astype(bool)Run itreload(build) mask, parent = build.make_labels(cube) np.max(mask) for ii in range(1,np.max(mask)): print(ii,np.sum(mask==ii)) cube.size---- Fussing with h5py Load numpy arraytst_mask = np.load(os.path.join(resource_filename('mhw_analysis', 'systems'), 'tst.npz'))['mask'] tst_mask.shape tst_mask.dtypeh5pyf.close() f = h5py.File(os.path.join(resource_filename('mhw_analysis', 'systems'), 'tst_mask.hdf'), mode='w') dset = f.create_dataset("mask", tst_mask.shape, dtype='int32', chunks=True, compression='gzip', data=tst_mask) dset.chunks f.close()Readf2 = h5py.File(os.path.join(resource_filename('mhw_analysis', 'systems'), 'tst_mask.hdf'), mode='r') f2['mask'].shape f2['mask'].chunks %time tmp = f2['mask'][100:200,100:200, 100:205] %time tmp = f2['mask'][:, :, 100:205] tmp.nbytes / 1e6 tmp.shape tmpSOCdf = df.dropna(subset=["處理器型號"]) soc_list = df['處理器型號'].unique() soc_score = pd.read_csv('soc_score.csv').set_index('Unnamed: 0').T.to_dict('list') for s in soc_list: s_split = str(s).split() if s_split[-1] == 'Bionic': s = 'Apple '+s.split()[0] elif 'MediaTek' in s_split and 'Dimensity' in s_split: s = s.split()[-2] + ' '+s.split()[-1] elif s == 'MT6771' or s == 'MT6762V': s = 'Helio P60' elif s == 'MT6762D': s = 'Helio P22' if s not in soc_score: if s[:1] == 'T': s = 'Unisoc '+s if s not in soc_score: print(s) else: print(s) def soc2score(x): s = x['處理器型號'] s_split = str(s).split() if s_split[-1] == 'Bionic': s = 'Apple '+s.split()[0] elif 'MediaTek' in s_split and 'Dimensity' in s_split: s = s.split()[-2] + ' '+s.split()[-1] elif s == 'MT6771' or s == 'MT6762V': s = 'Helio P60' elif s == 'MT6762D': s = 'Helio P22' if s not in soc_score: if s[:1] == 'T': s = 'Unisoc '+s x['soc_s'] = soc_score[s][0] x['soc_m'] = soc_score[s][1] return x df = df.apply(soc2score, axis=1) df.columns df = df.drop(['處理器型號', '處理器品牌', '處理器核心數'], axis=1) df = df.drop(['處理器時脈','圖形處理器'], axis=1) df.columnsPricedf['Price'] = df['Price'].apply(lambda x: int(x.replace('$', '').replace(',','')))ROMdf = df.drop(['作業系統與版本'], axis=1) df = df.dropna(subset=["ROM儲存空間"]) def handle_rom(x): if 'TB' in x: return 1024*(int(x.replace('TB', ''))) else: return int(x.replace('GB', '')) df['ROM儲存空間'] = df['ROM儲存空間'].apply(handle_rom)Screen# df[df['主螢幕尺寸'].str.contains('inch') != True] df['主螢幕尺寸'] = df['主螢幕尺寸'].apply(lambda x: float(str(x).replace('inch', ''))) df = df.dropna(subset=["主螢幕解析度"]) pattern_resolution = '[\d]+x[\d]+' def handle_pix(x): reso = re.search(pattern_resolution, x['主螢幕解析度']).group().split('x') x['resolution_x'] = reso[0] x['resolution_y'] = reso[1] return x df = df.apply(handle_pix, axis=1) df = df.drop(['主螢幕解析度'], axis=1) df['主螢幕像素密度'] = df['主螢幕像素密度'].apply(lambda x: int(x.replace('ppi', ''))) df = df.drop(['主螢幕像素密度'], axis=1) df = df.drop(['主螢幕最大亮度', '主螢幕觸控'], axis=1) df['OLED'] = df['主螢幕材質'].str.contains('OLED') == True df = df.drop(['主螢幕材質'], axis=1) df['90Hz'] = df['主螢幕更新率'].str.contains('90') == True df['120Hz'] = df['主螢幕更新率'].str.contains('120') == True df['144Hz'] = df['主螢幕更新率'].str.contains('144') == True df = df.drop(['主螢幕更新率'], axis=1) df = df.drop(['主螢幕觸控採樣率'], axis=1)Comm.df = df.drop(['4G FDD LTE', '4G TDD LTE', '3G頻率', '2G頻率', '4G LTE', 'VoLTE', 'SIM Card類型', 'Wi-Fi'], axis=1) df['Wifi5G'] = df['IEEE 802.11傳輸速度'].str.contains('5GH') df['Wifi6'] = df['IEEE 802.11傳輸速度'].str.contains('6') df = df.drop(['IEEE 802.11傳輸速度'], axis=1) df['NFC'] = df['NFC'] == 'Yes' df = df.drop(['藍牙', '衛星定位'], axis=1) df['5G連網'] = (df['5G連網'] == 'Yes') df = df.drop(['5G NR頻率', '5G NR頻段', '5G連網', 'SIM1'], axis=1) df = df.drop(['SIM卡槽數', 'SIM卡槽設計', 'SIM2', '5G組網方式'], axis=1) df = df.drop(['CA支援頻段組合', 'LTE Cat.', 'CA載波聚合','SIM卡槽或記憶卡擇一'], axis=1) df = df.drop(['雙卡雙通'], axis=1) df['藍牙版本'].value_counts() df['bt5.0'] = (df['藍牙版本'] == 5.0) df['bt5.1'] = (df['藍牙版本'] == 5.1) df['bt5.2'] = (df['藍牙版本'] == 5.2) df['bt4.2'] = (df['藍牙版本'] == 4.2) df = df.drop(['藍牙版本'], axis=1) df.columns[20:]RAMdf[df['RAM記憶體'].isnull()]['Name'] name2ram = {'mto_m68_plus': 0.5, 'apple_iphone_xr': 3, 'apple_iphone_se_2020': 3, 'apple_iphone_11': 4, 'apple_iphone_11_pro': 4, 'apple_iphone_12': 4, 'apple_iphone_12_mini': 4, 'apple_iphone_12_pro': 6, 'apple_iphone_12_pro_max': 6, 'apple_iphone_13': 4, 'apple_iphone_13_mini': 4, 'apple_iphone_13_pro': 6, 'apple_iphone_13_pro_max': 6 } def handle_ram(x): ram = str(x['RAM記憶體']) if 'B' not in ram: return name2ram[x['Name']] else: if 'GB' in ram: return float(ram.replace('GB', '')) elif 'MB' in ram: return float(ram.replace('MB', ''))/1024 df['RAM記憶體'] = df.apply(handle_ram, axis=1)Other about memorydf['UFS'] = df['儲存空間格式'].str.contains('UFS') == True df['記憶卡'] = ~df['記憶卡'].isnull() df = df.drop(['最大擴充儲存空間','記憶體格式','儲存空間格式'], axis=1)Cameradf.columns df = df.drop(['主相機感光元件', '主相機光圈F', '主相機等效焦距', '主相機LED補光燈', '主相機自動對焦', '第二主相機感光元件', '第二主相機光圈F', '第二主相機等效焦距', '第三主相機感光元件', '第三主相機光圈F','第三主相機等效焦距', '第四主相機感光元件', '第四主相機光圈F','第四主相機等效焦距', '前相機感光元件', '前相機光圈F', '前相機LED補光燈','前相機自動對焦', '第二前相機感光元件', '第二前相機光圈F' ], axis=1) df = df.drop(['前相機等效焦距', '第三前相機光圈F', '第三前相機感光元件'], axis=1) df.columns df['主相機光學防手震'] = df['主相機光學防手震'] == 'Yes' df['主相機UHD 4K錄影'] = df['主相機UHD 4K錄影'] == 'Yes' df['前相機UHD 4K錄影'] = df['前相機UHD 4K錄影'] == 'Yes' df['主相機8K錄影'] = df['主相機8K錄影'] == 'Yes' def to_MP(x): x = str(x) if 'n' in x: return 0 elif '萬畫素' in x: return float(x.replace('萬畫素', ''))/100 elif '億畫素' in x: return float(x.replace('億畫素', ''))*100 else: return float(x)/100 df['主相機畫素'] = df['主相機畫素'].apply(to_MP) df['第二主相機畫素'] = df['第二主相機畫素'].apply(to_MP) df['第三主相機畫素'] = df['第三主相機畫素'].apply(to_MP) df['第四主相機畫素'] = df['第四主相機畫素'].apply(to_MP) df['前相機畫素'] = df['前相機畫素'].apply(to_MP) df['第二前相機畫素'] = df['第二前相機畫素'].apply(to_MP) def calculate_cam_num(x): if x['第四主相機畫素'] != 0: return 4 elif x['第三主相機畫素'] != 0: return 3 elif x['第二主相機畫素'] != 0: return 2 elif x['主相機畫素'] != 0: return 1 else: return 0 df['cam_back_num'] = df.apply(calculate_cam_num, axis=1) df = df.drop(['錄影格式'], axis=1) df['RAW檔拍攝'] = df['RAW檔拍攝'] == 'Yes'Batterydict_charging = { 'asus_zenfone_6_zs630kl': 18, 'asus_zenfone_7_pro_zs671ks': 30, 'asus_zenfone_7_zs670ks': 30, 'blackshark_2': 27, 'cat_s61': 18, 'htc_desire_20plus': 18, 'htc_desire_20_pro': 18, 'huawei_mate_20_x_5g': 40, 'lg_v60_thinq': 25, 'lg_velvet': 27, 'lg_wing': 25, 'motorola_one_hyper': 45, 'oppo_a91': 20, 'sony_xperia_1_ii': 18, 'sugar_t50': 15 } def handle_charging(x): charging = str(x['快充功率']) if 'n' in charging and str(x['快充技術']) != 'nan': return dict_charging[x['Name']] elif 'n' in charging and x['Brand'] == 'apple': return 18 elif 'n' in charging: return 10 else: return float(charging.replace('W', '')) df['快充功率'] = df.apply(handle_charging, axis=1) df['無線充電'] = df['無線充電'] == 'Yes' df['可替換電池'] = df['可替換電池'] == 'Yes' df['無線反向充電'] = df['無線反向充電'] == 'Yes' df = df.drop(['最大通話時間','快充技術'], axis=1) df = df.drop(['最大待機時間'], axis=1) df[df['電池容量'].isnull()]['Name'] dict_battery = { 'apple_iphone_11': 3110, 'apple_iphone_11_pro': 3046, 'apple_iphone_12': 2815, 'apple_iphone_12_mini': 2227, 'apple_iphone_12_pro': 2815, 'apple_iphone_12_pro_max': 3687, 'apple_iphone_13': 3095, 'apple_iphone_13_mini' : 2406, 'apple_iphone_13_pro': 3095, 'apple_iphone_13_pro_max': 4352, 'apple_iphone_se_2020': 1821, 'apple_iphone_xr': 2658 } def handle_battery(x): b = str(x['電池容量']) if 'mAh' in b: return int(b.replace('mAh', '')) elif 'nan' in b: return dict_battery[x['Name']] else: return int(b) df['電池容量'] = df.apply(handle_battery, axis=1)外觀df['glass'] = df['機身材質'].str.contains('玻璃') == True df['plastic'] = df['機身材質'].str.contains('塑料') == True df = df.drop(['機身材質'], axis=1) df['摺疊螢幕'] = (df['裝置分類'].str.contains('摺疊螢幕') | df['裝置分類'].str.contains('疊螢幕')) df = df.drop(['裝置分類'], axis=1) def handle_whl(x): x['機身長度'] = float(str(x['機身長度']).replace('mm', '')) x['機身寬度'] = float(str(x['機身寬度']).replace('mm', '')) x['機身厚度'] = float(str(x['機身厚度']).replace('mm', '')) return x df = df.apply(handle_whl, axis=1) df['副螢幕觸控'] = ((~df['副螢幕觸控'].isnull()) & df['副螢幕觸控'].str.contains('Yes')) def handle_weight(x): if x['Name'] == 'koobee_s16': return 157 else: return float(str(x['機身重量']).replace('g', '')) df['機身重量'] = df.apply(handle_weight, axis=1) df['機身顏色'] = df['機身顏色'].apply(lambda x: len(x.split(', ')))Sensordf.columns df = df.drop(['陀螺儀', '加速度感應器', '指紋辨識器'], axis=1)Otherdf = df.drop(['喇叭', '紅外線','影片播放格式', '音樂播放器', '圖片支援格式', '實用工具', '時間顯示', '計步器', '內建觸控筆', '吊飾孔'], axis=1) df['防水防塵等級'] = ~df['防水防塵等級'].isnull() df['USB3'] = ((~df['傳輸埠'].isnull()) & df['傳輸埠'].str.contains('3')) df = df.drop(['傳輸埠'], axis=1) df['3.5mm耳機孔'] = ((~df['3.5mm耳機孔'].isnull()) & df['3.5mm耳機孔'].str.contains('Yes')) df = df.drop(['麥克風'], axis=1) df = df.drop(['副螢幕尺寸', '高度氣壓感測器', '副螢幕材質'], axis=1) df = df.drop(['主螢幕佔比', '主螢幕耐用性'], axis=1) df.to_csv('sogi_df.csv', index=False) df = pd.read_csv('sogi_df.csv') df df.drop(['Name','副螢幕解析度'], axis=1).to_csv('sogi_mobile.csv', index=False) set(df.columns)-set(['Price', 'ROM儲存空間', '主螢幕尺寸', '主相機畫素', '主相機光學防手震', '主相機UHD 4K錄影', '第二主相機畫素', '前相機畫素', '前相機UHD 4K錄影', 'NFC', '快充功率', '無線充電', 'Brand', '防水防塵等級','機身長度','機身重量', '機身寬度', '機身厚度','副螢幕觸控', '第三主相機畫素','RAM記憶體', '電池容量', '主相機8K錄影', '記憶卡', 'RAW檔拍攝', '可替換電池','USB3','3.5mm耳機孔', '無線反向充電', '第四主相機畫素', '第二前相機畫素', 'soc_s', 'soc_m', 'resolution_x', 'resolution_y', 'OLED', '90Hz', '120Hz', '144Hz', 'Wifi5G', 'Wifi6', 'bt5.0', 'bt5.1', 'bt5.2', 'bt4.2', 'UFS','摺疊螢幕', 'cam_back_num', 'glass', 'plastic'])Random ForestRandom Forest is an ensemble tree-based predictive algorithm. In this case we will be using it for regression - we want to predict a continuous number, predicted points, for each player each game. It works by training many separate decision trees, each using a subset of the training data, and outputs the average prediction across all trees.Applying it to a time series problem, where metrics from recent time periods can be predicitve, requires us to add in window features (e.g. points scored last gameweek). These are created using the player_lag_features function from 00_fpl_features.# add a bunch of player lag features lag_train_df, team_lag_vars = team_lag_features(train_df, ['total_points'], ['all', 1, 2, 3, 4, 5, 10]) lag_train_df, player_lag_vars = player_lag_features(lag_train_df, ['total_points'], ['all', 1, 2, 3, 4, 5, 10])Similar to the simple model, we'll set the validation period to be gameweeks 20-25 of the 2019/20 season - the model will be trained on all data prior to that period. This time however, we'll be using some additional features: the season, gameweek, player position, home/away, and both teams, as well as all the lagging features we created above.# set validaton point/length and categorical/continuous variables valid_season = '2021' valid_gw = 20 valid_len = 6 cat_vars = ['season', 'position', 'was_home', 'team', 'opponent_team'] cont_vars = ['gw']#, 'minutes'] dep_var = ['total_points']Some of the features have an order (2019/20 season is after 2019 season) whereas others do not (position). We can set this in the data where appropriate using an ordered category (e.g. 1617 < 1718 < 1819 < 1920 < 2021).# we want to set gw and season as ordered categorical variables # need lists with ordered categories ordered_gws = list(range(1,39)) ordered_seasons = ['1617', '1718', '1819', '1920', '2021'] # set as categories with correct order lag_train_df['gw'] = lag_train_df['gw'].astype('category') lag_train_df['season'] = lag_train_df['season'].astype('category') lag_train_df['gw'].cat.set_categories(ordered_gws, ordered=True, inplace=True) lag_train_df['season'].cat.set_categories(ordered_seasons, ordered=True, inplace=True) lag_train_df['season']And now we can go ahead and create our training and validation sets using the function we defined in the last notebook.# create dataset with adjusted post-validation lag numbers train_valid_df, train_idx, valid_idx = create_lag_train(lag_train_df, cat_vars, cont_vars, player_lag_vars, team_lag_vars, dep_var, valid_season, valid_gw, valid_len)The way we calculate our lag features means that there will be null values in our dataset. This will cause an error when using random forest in scikit learn, so we will set them all to zero for now (although note that this may not be the best fill strategy).lag_train_df[~np.isfinite(lag_train_df['total_points_pg_last_1'])] # imp = SimpleImputer(missing_values=np.nan, strategy='mean') # need to think about imputing NaN instead of setting to zero # imp.fit(X_train[team_lag_vars + player_lag_vars]) train_valid_df[team_lag_vars + player_lag_vars] = train_valid_df[team_lag_vars + player_lag_vars].fillna(0)The random forest regressor will only take numbers as inputs, so we need to transform our caterogical features into a format that the random forest regressor object will be able to use, numbers instead of strings in one or more columns.# split out dependent variable X, y = train_valid_df[cat_vars + cont_vars + team_lag_vars + player_lag_vars].copy(), train_valid_df[dep_var].copy() # since position is categorical, it should be a string X['position'] = X['position'].apply(str) # need to transform season enc = LabelEncoder() X['season'] = enc.fit_transform(X['season']) X_dict = X.to_dict("records") # Create the DictVectorizer object: dv dv = DictVectorizer(sparse=False, separator='_') # Apply dv on df: df_encoded X_encoded = dv.fit_transform(X_dict) X_df = pd.DataFrame(X_encoded, columns=dv.feature_names_)For example, season is now represented by a number (0 -> 2016/17, 1 -> 2017/18, etc.) in a single column, and position is represented by a 1 or 0 in multiple columns.X_df[['season', 'position_1', 'position_2', 'position_3', 'position_4']] X_df.columnsLet's now split out our training (everything prior to the validation gameweek) and validation (6 gameweeks from the validation gameweek, only rows with >0 minutes)# split out training and validation sets X_train = X_df.loc[train_idx] y_train = y.loc[train_idx] X_test = X_df.loc[valid_idx] # we only want look at rows with >0 minutes (i.e. the player played) # test_mask = (X_test['minutes'] > 0) # X_test = X_test[test_mask] # y_test = y.loc[valid_idx][test_mask] y_test = y.loc[valid_idx] # X_train = X_train.drop('minutes', axis=1) # X_test = X_test.drop('minutes', axis=1)We can now create the RandomForestRegessor with set parameters, train using the training data, and look at the error on the validation set.# def rf(xs, y, n_estimators=40, max_samples=50_000, # max_features=0.5, min_samples_leaf=5, **kwargs): # return RandomForestRegressor(n_jobs=-1, n_estimators=n_estimators, # max_samples=max_samples, max_features=max_features, # min_samples_leaf=min_samples_leaf, oob_score=True).fit(xs, y) def rf(xs, y, max_depth=7, **kwargs): return RandomForestRegressor(n_jobs=-1, max_depth=max_depth, oob_score=True).fit(xs, y) # fit training data m = rf(X_train, y_train.values.ravel()) # predict validation set and output metrics preds = m.predict(X_test) print("RMSE: %f" % (r_mse(preds, y_test.values.ravel()))) print("MAE: %f" % mae(preds, y_test.values.ravel()))RMSE: 2.147401 MAE: 1.217610Right away this looks like it's a significant improvement on the simple model, good to see. Let's go ahead and use the same approach with validation across the whole of the 2019/20 season.def rf_season(df, valid_season='2021'): # empty list for scores scores = [] valid_len = 6 for valid_gw in range(1,40-valid_len): # create dataset with adjusted post-validation lag numbers train_valid_df, train_idx, valid_idx = create_lag_train(df, cat_vars, cont_vars, player_lag_vars, team_lag_vars, dep_var, valid_season, valid_gw, valid_len) train_valid_df[team_lag_vars + player_lag_vars] = train_valid_df[team_lag_vars + player_lag_vars].fillna(0) # split out dependent variable X, y = train_valid_df[cat_vars + cont_vars + team_lag_vars + player_lag_vars].copy(), train_valid_df[dep_var].copy() # since position is categorical, it should be a string X['position'] = X['position'].apply(str) # need to transform season enc = LabelEncoder() X['season'] = enc.fit_transform(X['season']) X_dict = X.to_dict("records") # Create the DictVectorizer object: dv dv = DictVectorizer(sparse=False, separator='_') # Apply dv on df: df_encoded X_encoded = dv.fit_transform(X_dict) X_df = pd.DataFrame(X_encoded, columns=dv.feature_names_) # split out training and validation sets X_train = X_df.loc[train_idx] y_train = y.loc[train_idx] X_test = X_df.loc[valid_idx] # we only want look at rows with >0 minutes (i.e. the player played) # test_mask = (X_test['minutes'] > 0) # X_test = X_test[test_mask] # y_test = y.loc[valid_idx][test_mask] y_test = y.loc[valid_idx] m = rf(X_train, y_train.values.ravel()) preds, targs = m.predict(X_test), y_test.values.ravel() gw_mae = mae(preds, targs) print("GW%d MAE: %f" % (valid_gw, gw_mae)) scores.append(gw_mae) return scores scores = rf_season(lag_train_df) plt.plot(scores) plt.ylabel('GW MAE') plt.xlabel('GW') plt.text(15, 1.55, 'Season Avg MAE: %.2f' % np.mean(scores), bbox={'facecolor':'white', 'alpha':1, 'pad':5}) plt.show()Looking across the whole season we see about a 10% improvement versus the simple model. Also interesting is that the performance again improves as the season progresses - this makes sense, more data about each of teams and players (particularly new ones) means improved ability to predict the next 6 gameweeks.Let's add these validation scores to our comparison dataset.model_validation_scores = pd.read_csv(path/'charts/model_validation_scores.csv', index_col=0) model_validation_scores['random_forest'] = scores model_validation_scores.to_csv(path/'charts/model_validation_scores.csv')A feature of the random forest algorithm is that we can see how often features are being used in trees. This will give us an indication of how important each feature is i.e. is it predictive of todal points scored. Simple models are usually better, so this also gives us a way of seeing if there are any features that are not particularly useful, and can therefore be removed.def rf_feat_importance(m, df): return pd.DataFrame({'cols':df.columns, 'imp':m.feature_importances_} ).sort_values('imp', ascending=False) fi = rf_feat_importance(m, X_train) fi[:32] def plot_fi(fi): return fi.plot('cols', 'imp', 'barh', figsize=(12,7), legend=False).invert_yaxis() plot_fi(fi[:30]);At the moment this algorithm is given minutes played in the gameweek so it's unsurprising that this is by far the most important feature - the more minutes a player plays, the more opportunity to score points. But strictly speaking we don't actually have this information prior to a gameweek (in practice it is estimated using previous minutes and injury status), so we can ignore it for now.Below that the top features are:1. minutes_last_1 - number of minutes in the last fixture for the player2. minutes_last_2 - number of minutes in the last two fixtures for the player3. total_points_pg_last_all - the player's average points per game in all of history (since start of 2016/17 season)4. total_points_team_pg_last_all_opponent - the opposition's average points per game in all of history5. minutes_last_3 - number of minutes in the last three fixtures for the player6. total_points_team_pg_last_all - the player's team's average points per game in all of history7. total_points_pg_last_10 - the player's average points per game in the last 10 fixtures8. total_points_pg_last_1 - the player's average points per game in the last fixtureThis is interesting. It seems to be saying that the amount of minutes a player has played recently and their underlying ability to score points in all of history, along with their team's and opponent team's points scoring in all of history, is most important.Recent performance (i.e. 'form') is also important, but to a lesser extent.It also shows that the lag features are far more useful than the categorical features such as team, opponent and position. Again not too surprising since information on these categories are already captured in the lag features.Let's test this... we can remove anything with a feature importance of less than 0.005 and see how the model performs on the original 2019/20 week 20 validation point (going from 94 features to just 32).to_keep = fi[fi.imp>0.005].cols len(to_keep) len(X_train.columns) X_train_imp = X_train[to_keep] X_test_imp = X_test[to_keep] m = rf(X_train_imp, y_train.values.ravel()) mae(m.predict(X_test_imp), y_test.values.ravel()) # mae(m.predict(X_train_imp), y_train.values.ravel())Very similar albeit slightly higher error (less than 1% worse performance) than previously, and still a long way ahead of the simple model.Continuing our thinking about improving/simplifying the model features, we can also look to see if there are any similar features - quite often we will find that some features are so similar that some of them may be redundant.The following function determines the similarity between columns in a dataset and visualises it using a dendrogram.def cluster_columns(df, figsize=(10,6), font_size=12): corr = np.round(sp.stats.spearmanr(df).correlation, 4) corr_condensed = hc.distance.squareform(1-corr) z = hc.linkage(corr_condensed, method='average') fig = plt.figure(figsize=figsize) hc.dendrogram(z, labels=df.columns, orientation='left', leaf_font_size=font_size) plt.show() cluster_columns(X_train_imp)We can see that our lagging features are somewhat similar - absolutely expected since, for example, minutes_last_5 is equal to minutes_last_4 + minutes 5 games ago. They are still different enough to be of value separately, but it does make me wonder whether separating out each historic game in some way (up to a point) would be valuable. A final useful tool we can use is partial dependency plots. These try to look at the impact of single features on the dependent variable (points scored).fig,ax = plt.subplots(figsize=(12, 3)) plot_partial_dependence(m, X_test_imp, ['total_points_pg_last_all', 'total_points_team_pg_last_all_opponent', 'total_points_pg_last_1'], grid_resolution=20, ax=ax);Appendix: summary of performance using Apache Spark (Apache Parquet and FITS)Author: ** [@JulienPeloton](https://github.com/JulienPeloton)** Last Run: **2018-11-22** See also: [issue/249](https://github.com/LSSTDESC/DC2-production/issues/249)This notebook summarises the performance of data manipulations of the DC2 object catalogs with Apache Spark.We focus on Parquet and FITS format.The notebook is intended to be executed with the `desc-pyspark` kernel with **32 threads** (one full Cori Haswell node). See [LSSTDESC/desc-spark](https://github.com/LSSTDESC/desc-sparkworking-at-nersc-jupyterlab) for more information.from pyspark.sql import SparkSession # Initialise our Spark session spark = SparkSession.builder.getOrCreate()DatasetWe focus on "One Tract" (OT) and "All Tract" (AT) catalogs.import os base_dir = '/global/projecta/projectdirs/lsst/global/in2p3/Run1.1/summary' print("Data will be read from: \n", base_dir) # Path to data parq_4850_OT = os.path.join(base_dir, 'dpdd_object_tract_4850_hive.parquet') fits_4850_OT = os.path.join(base_dir, 'dpdd_object_tract_4850*.fits') parq_hive_AT = os.path.join(base_dir, 'dpdd_object.parquet') parq_simp_AT = os.path.join(base_dir, 'dpdd_object_simple.parquet') fits_simp_AT = os.path.join(base_dir, 'dpdd_object_tract_*.fits')ResultsHere is a summary of the results. The configuration for this run was:- "One Tract" (OT) and "All Tract" (AT) catalogs- 1 full Cori compute node (32 cores).Numbers should be read as order of magnitude (it will slightly vary from run to run, depending also on the load of the machine as JupyterLab is a shared resource).Details can be found below.No cache:| Data set | Rows (size GB) | Load time | statistics* ||---------------------|-----------------|-------------|-----------------|| Parquet (OT) | 719,228 (0.43) | 192 ms ± 66.5 ms | 189 ms ± 84 ms || FITS (OT) | 719,228 (0.57) | 3.74 s ± 107 ms | 3.67 s ± 123 ms || Parquet (AT, Hive) | 6,892,380 (4.5) | 726 ms ± 117 ms | 990 ms ± 521 ms || Parquet (AT, Simple)| 6,892,380 (3.6) | 210 ms ± 28.2 ms | 459 ms ± 46.5 ms || FITS (AT) | 6,892,380 (5.4) | 25.7 s ± 308 ms | 24.4 s ± 1.24 s |_*statistics_ means computing: number of elements, mean, stddev, min, max.With cache (overhead of < 1 second to add to put data in cache):| Data set | Rows (size GB) | Load time | statistics ||---------------------|-----------------|-------------|-----------------|| Parquet (OT) | 719,228 (0.43) | 393 ms ± 86.2 ms | 111 ms ± 45.4 ms || FITS (OT) | 719,228 (0.57) | 312 ms ± 59.2 ms | 99.5 ms ± 49.4 ms || Parquet (AT, Hive) | 6,892,380 (4.5) | 215 ms ± 102 ms | 351 ms ± 150 ms| Parquet (AT, Simple)| 6,892,380 (3.6) | 181 ms ± 74.1 ms | 391 ms ± 52.9 ms| FITS (AT) | 6,892,380 (5.4) | 2.78 s ± 1.37 s | 3.05 s ± 600 msRemarks:- Results using Parquet are much faster than FITS. Reasons can be for e.g. columnar vs row-based or better implementation of the Spark Parquet connector. I believe this is related to what is seen with Dask between Parquet and HDF5.- Note however that the size on disk of the datasets varies: 4.5 GB for Parquet Hive, 3.6 GB for Parquet simple, and 5.4 GB for FITS.- For FITS, the number of input files matter. It is always better to have small number of large files rather than many small files.- Once data in cache, everything is super fast.- Note that given the small volume of data, most of the results below the seconds are basically dominated by Spark noise and not actual computation (which is why sometimes a simple count can be slower than computing full statistics). Details of the benchmarksfrom pyspark.sql import DataFrame from pyspark.sql.functions import col import time def readfits(path: str, hdu: int=1) -> DataFrame: """ Wrapper around Spark reader for FITS Parameters ---------- path : str Path to the data. Can be a file, a folder, or a glob pattern. hdu : int, optional HDU number to read. Default is 1. Returns ---------- df : DataFrame Spark DataFrame with the HDU data. """ return spark.read.format("fits").option("hdu", hdu).load(path) def readparq(path: str) -> DataFrame: """ Wrapper around Spark reader for Parquet Parameters ---------- path : str Path to the data. Can be a file, a folder, or a glob pattern. Returns ---------- df : DataFrame Spark DataFrame with the HDU data. """ return spark.read.format("parquet").load(path) def simple_count(df: DataFrame, cache=False, txt: str="") -> int: """ Return the number of rows in the DataFrame Parameters ---------- df : DataFrame Spark DataFrame cache : bool, optional If True, put the Data in cache prior to the computation. Data will be unpersisted afterwards. Default is False. txt: str, optional Additional text to be printed. Returns ---------- out : int Number of rows """ if cache: start = time.time() df = df.cache() print("Cache took {:.1f} sec".format(time.time() - start)) res = df.count() print("{} has length:".format(txt), res) # Time it! %timeit df.count() return res def stat_diff_col( df: DataFrame, colname_1: str, colname_2: str, cache=False, txt: str="") -> DataFrame: """ Return some statistics about the difference of two DataFrame Columns. Statistics include: count, mean, stddev, min, max. Parameters ---------- df : DataFrame Spark DataFrame colname_1 : str Name of the first Column colname_2 : str Name of the second Column cache : bool, optional If True, put the Data in cache prior to the computation. Data will be unpersisted afterwards. Default is False. txt: str, optional Additional text to be printed. Returns ---------- out : DataFrame DataFrame containing statistics about the Columns difference. """ if cache: df = df.cache() print("{} has length:".format(txt), df.count()) # Time it! %timeit res = df.select(col(colname_1) - col(colname_2)).describe().collect() return df.select(col(colname_1) - col(colname_2)).describe() def stat_one_col(df: DataFrame, colname: str, cache=False, txt: str="") -> DataFrame: """ Return some statistics about one DataFrame Column. Statistics include: count, mean, stddev, min, max. Parameters ---------- df : DataFrame Spark DataFrame colname : str Name of the Column for which we want the statistics cache : bool, optional If True, put the Data in cache prior to the computation. Data will be unpersisted afterwards. Default is False. txt: str, optional Additional text to be printed. Returns ---------- out : DataFrame DataFrame containing statistics about the Column. """ if cache: df = df.cache() print("{} has length:".format(txt), df.count()) # Time it! %timeit res = df.select(colname).describe().collect() return df.select(colname).describe() # Accessing catalogs cache = False df = readparq(parq_4850_OT) c = simple_count(df, cache, "OT (Parquet)") df = readfits(fits_4850_OT) c = simple_count(df, cache, "OT (FITS)") df = readparq(parq_hive_AT) c = simple_count(df, cache, "AT (P-Hive)") df = readparq(parq_simp_AT) c = simple_count(df, cache, "AT (P-simple)") df = readfits(fits_simp_AT) c = simple_count(df, cache, "AT (FITS)") # Statistics: count, mean, stddev, min, max c1 = "mag_g" c2 = "mag_r" cache = False df = readparq(parq_4850_OT) c = stat_diff_col(df, c1, c2, cache, "OT (Parquet)") df = readfits(fits_4850_OT) c = stat_diff_col(df, c1, c2, cache, "OT (FITS)") df = readparq(parq_hive_AT) c = stat_diff_col(df, c1, c2, cache, "AT (P-Hive)") df = readparq(parq_simp_AT) c = stat_diff_col(df, c1, c2, cache, "AT (P-simple)") df = readfits(fits_simp_AT) c = stat_diff_col(df, c1, c2, cache, "AT (FITS)") # Accessing catalogs cache = True df = readparq(parq_4850_OT) c = simple_count(df, cache, "OT (Parquet)") df.unpersist() df = readfits(fits_4850_OT) c = simple_count(df, cache, "OT (FITS)") df.unpersist() df = readparq(parq_hive_AT) c = simple_count(df, cache, "AT (P-Hive)") df.unpersist() df = readparq(parq_simp_AT) c = simple_count(df, cache, "AT (P-simple)") df.unpersist() df = readfits(fits_simp_AT) c = simple_count(df, cache, "AT (FITS)") df.unpersist(); # Statistics: count, mean, stddev, min, max c1 = "mag_g" c2 = "mag_r" cache = True df = readparq(parq_4850_OT) c = stat_diff_col(df, c1, c2, cache, "OT (Parquet)") df.unpersist() df = readfits(fits_4850_OT) c = stat_diff_col(df, c1, c2, cache, "OT (FITS)") df.unpersist() df = readparq(parq_hive_AT) c = stat_diff_col(df, c1, c2, cache, "AT (P-Hive)") df.unpersist() df = readparq(parq_simp_AT) c = stat_diff_col(df, c1, c2, cache, "AT (P-simple)") df.unpersist() df = readfits(fits_simp_AT) c = stat_diff_col(df, c1, c2, cache, "AT (FITS)") df.unpersist();Micro TVM with TFLite Models**Author**: ` `_This tutorial is an introduction to working with MicroTVM and a TFLitemodel with Relay. SetupTo get started, TFLite package needs to be installed as prerequisite.install tflite.. code-block:: bash pip install tflite=2.1.0 --useror you could generate TFLite package yourself. The steps are the following: Get the flatc compiler. Please refer to https://github.com/google/flatbuffers for details and make sure it is properly installed... code-block:: bash flatc --versionGet the TFLite schema... code-block:: bash wget https://raw.githubusercontent.com/tensorflow/tensorflow/r1.13/tensorflow/lite/schema/schema.fbsGenerate TFLite package... code-block:: bash flatc --python schema.fbsAdd the current folder (which contains generated tflite module) to PYTHONPATH... code-block:: bash export PYTHONPATH=${PYTHONPATH:+$PYTHONPATH:}$(pwd)To validate that the TFLite package was installed successfully, ``python -c "import tflite"``CMSIS needs to be downloaded and the CMSIS_ST_PATH environment variable setupThis tutorial only supports the STM32F7xx series of boards.Download from : https://www.st.com/en/embedded-software/stm32cubef7.htmlAfter you've expanded the zip file.. code-block:: bash export CMSIS_ST_PATH=/path/to/STM32Cube_FW_F7_V1.16.0/Drivers/CMSIS Recreating your own Pre-Trained TFLite modelThe tutorial downloads a pretrained TFLite model. When working with microcontrollersyou need to be mindful these are highly resource constrained devices as such standardmodels like MobileNet may not fit into their modest memory.For this tutorial, we'll make use of one of the TF Micro example models.If you wish to replicate the training steps see:https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite/micro/examples/hello_world/train .. note:: If you accidentally download the example pretrained model from: wget https://storage.googleapis.com/download.tensorflow.org/models/tflite/micro/hello_world_2020_04_13.zip this will fail due to an unimplemented opcode (114)import os import numpy as np import tvm import tvm.micro as micro from tvm.contrib.download import download_testdata from tvm.contrib import graph_runtime, utils from tvm import relayLoad and prepare the Pre-Trained ModelLoad the pretrained TFLite model from a file in your currentdirectory into a buffermodel_url = "https://people.linaro.org/~tom.gall/sine_model.tflite" model_file = "sine_model.tflite" model_path = download_testdata(model_url, model_file, module="data") tflite_model_buf = open(model_path, "rb").read()Using the buffer, transform into a tflite model python objecttry: import tflite tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0) except AttributeError: import tflite.Model tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)Print out the version of the modelversion = tflite_model.Version() print("Model Version: " + str(version))Parse the python model object to convert it into a relay moduleand weights.It is important to note that the input tensor name must match whatis contained in the model.If you are unsure what that might be, this can be discovered by usingthe visualize.py script within the Tensorflow project.See : How do I inspect a .tflite file? ``_input_tensor = "dense_4_input" input_shape = (1,) input_dtype = "float32" mod, params = relay.frontend.from_tflite( tflite_model, shape_dict={input_tensor: input_shape}, dtype_dict={input_tensor: input_dtype} )Now we create a build config for relay. turning off two optionsand then calling relay.build which will result in a C sourcefile... code-block:: pythonTARGET = tvm.target.target.micro("host") with tvm.transform.PassContext( opt_level=3, config={"tir.disable_vectorize": True}, disabled_pass=["FuseOps"] ): graph, c_mod, c_params = relay.build(mod, target=TARGET, params=params)Running on simulated deviceFirst, compile a static microTVM runtime for the targeted device. In this case, the host simulateddevice is used.workspace = tvm.micro.Workspace() compiler = tvm.micro.DefaultCompiler(target=TARGET) opts = tvm.micro.default_options(os.path.join(tvm.micro.CRT_ROOT_DIR, "host")) micro_binary = tvm.micro.build_static_runtime( # the x86 compiler *expects* you to give the exact same dictionary for both # lib_opts and bin_opts. so the library compiler is mutating lib_opts and # the binary compiler is expecting those mutations to be in bin_opts. # TODO(weberlo) fix this very bizarre behavior workspace, compiler, c_mod, lib_opts=opts["bin_opts"], bin_opts=opts["bin_opts"], # Use the microTVM memory manager. If, in your main.cc, you change TVMPlatformMemoryAllocate and # TVMPlatformMemoryFree to use e.g. malloc() and free(), you can omit this extra library. extra_libs=[os.path.join(tvm.micro.build.CRT_ROOT_DIR, "memory")], )Next, establish a session with the simulated device and run thecomputation. The `with session` line would typically flash an attachedmicrocontroller, but in this tutorial, it simply launches a subprocessto stand in for an attached microcontroller... code-block:: pythonflasher = compiler.flasher() with tvm.micro.Session(binary=micro_binary, flasher=flasher) as session: graph_mod = tvm.micro.create_local_graph_runtime( graph, session.get_system_lib(), session.context ) # Set the model parameters using the lowered parameters produced by `relay.build`. graph_mod.set_input(**c_params) # The model consumes a single float32 value and returns a predicted sine value. To pass the # input value we construct a tvm.nd.array object with a single contrived number as input. For # this model values of 0 to 2Pi are acceptable. graph_mod.set_input(input_tensor, tvm.nd.array(np.array([0.5], dtype="float32"))) graph_mod.run() tvm_output = graph_mod.get_output(0).asnumpy() print("result is: " + str(tvm_output))This notebook demonstrates how to add metadata and units consistency to xarray fields loaded from grid using cfgrib (https://github.com/ecmwf/cfgrib)This is useful for dimensional analysis and physical type correctness of computations in post-processing of NWP simulationshttps://arxiv.org/pdf/1807.07643.pdf First we need to setup the installation of cfgrib for loading grib files into xarray datasets!pip install -q condacolab import condacolab condacolab.install() !conda install -c conda-forge cfgrib !python -m cfgrib selfcheckDownload a sample grib file and small dataset classes that add pint units to xarray arraysfrom getpass import getpass secret = getpass('Enter the secret value: ') import subprocess !rm -rf grib_files subprocess.run(['git', 'clone', 'https://'+secret+'@github.com/cosunae/grib_files.git'], check=True) !rm -rf postproc_pt1 subprocess.run(['git', 'clone', 'https://'+secret+'@github.com/cosunae/postproc_pt1'], check=True) !pip install postproc_pt1/ !python -m cfgrib selfcheck from dataset import open_datasets dss = open_datasets('grib_files/cosmo-eu/lfff00000000_2014010400.gb2', engine='cfgrib', backend_kwargs={'filter_by_keys': {'typeOfLevel': 'generalVerticalLayer'}})Since there u and v are staggered fields (i.e. have different lon,lat coordinates), not all fields can be inserted into the same hypercube. Therefore 3 different datasets are generated.mass_ds = dss[0] u_ds = dss[1] v_ds = dss[2]Inspect the datasets. Click "show data repr" of latitude/longitude coordinates to see the different coordinates system between mass_ds (mass point dataset) and the u_ds (u point dataset).mass_dsUnpack the individual fieldst = mass_ds['t'] q = mass_ds['q'] QI = mass_ds['QI'] pres = mass_ds['pres'] u = u_ds['u'] v = v_ds['v'] import xarray as xr xr.set_options(keep_attrs = True) # Following are compatible since dimensionless QQ = q+QI*0.01 print(QQ.units) # exception trigger due to incompatibility of dimensions try: f = t+q except RuntimeError as e: print("Testing error (as it should be)",e) # compute T at half levels, and check it preserves metadata and units def half_levels(f): cf = f.cumsum(dim='generalVerticalLayer') return (cf[2:] - cf[:-2])/float(2) t_half = half_levels(t) # Add t0 t0 = t.isel(generalVerticalLayer=0) t_half = t_half+t0 # consistency check try: f = t_half + q except RuntimeError as e: print("Testing error (as it should be)",e) # U & V are compatible. They both are defined in the same indexing x,y although have different lon,lat coordinates (u+v).isel(generalVerticalLayer=0).plot()Day 1: Of Numerical Integration and PythonWelcome to Day 1! Today, we start with our discussion of what Numerical Integration is. What is Numerical Integration?From the point of view of a theoretician, the ideal form of the solution to a differential equation given the initial conditions, i.e. an initial value problem (IVP), would be a formula for the solution function. But sometimes obtaining a formulaic solution is not always easy, and in many cases is absolutely impossible. So, what do we do when faced with a differential equation that we cannot solve? If you are only looking for long term behavior of a solution you can always sketch a direction field. This can be done without too much difficulty for some fairly complex differential equations that we can’t solve to get exact solutions. But, what if we need to determine how a specific solution behaves, including some values that the solution will take? In that case, we have to rely on numerical methods for solving the IVP such as euler's method or the Runge-Kutta Methods. Euler's Method for Numerical IntegrationWe use Euler's Method to generate a numerical solution to an initial value problem of the form:$$\frac{dx}{dt} = f(x, t)$$$$x(t_o) = x_o$$Firstly, we decide the interval over which we desire to find the solution, starting at the initial condition. We break this interval into small subdivisions of a fixed length $\epsilon$. Then, using the initial condition as our starting point, we generate the rest of the solution by using the iterative formulas:$$t_{n+1} = t_n + \epsilon$$$$x_{n+1} = x_n + \epsilon f(x_n, t_n)$$to find the coordinates of the points in our numerical solution. We end this process once we have reached the end of the desired interval.The best way to understand how it works is from the following diagram: Euler's Method in PythonLet $\frac{dx}{dt}=f(x,t)$, we want to find $x(t)$ over $t\in[0,2)$, given that $x(0)=1$ and $f(x,t) = 5x$. The exact solution of this equation would be $x(t) = e^{5t}$.import numpy as np import matplotlib.pyplot as plt %matplotlib inline def f(x,t): # define the function f(x,t) return 5*x epsilon = 0.01 # define timestep t = np.arange(0,2,epsilon) # define an array for t x = np.zeros(t.shape) # define an array for x x[0]= 1 # set initial condition for i in range(1,t.shape[0]): x[i] = epsilon*f(x[i-1],t[i-1])+x[i-1] # Euler Integration Step ax = plt.subplot(111) ax.spines["top"].set_visible(False) ax.spines["right"].set_visible(False) ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() plt.plot(t[::5],x[::5],".",label="Eulers Solution") plt.plot(t,np.exp(5*t),label="Exact Solution") plt.xlabel("t") plt.ylabel("x") plt.legend() plt.show()Euler and VectorsEuler's Method also applies to vectors and can solve simultaneous differential equations.The Initial Value problem now becomes:$$\frac{d\vec{X}}{dt} = \vec{f}(\vec{X}, t)$$$$\vec{X}(t_o) = \vec{X_o}$$where $\vec{X}=[X_1,X_2...]$ and $\vec{f}(\vec{X}, t)=[f_1(\vec{X}, t),f_2(\vec{X}, t)...]$.The Euler's Method becomes:$$t_{n+1} = t_n + \epsilon$$$$\vec{X_{n+1}} = \vec{X_n} + \epsilon \vec{f}(\vec{X_n}, t_n)$$Let $\frac{d\vec{X}}{dt}=f(\vec{X},t)$, we want to find $\vec{X}(t)$ over $t\in[0,2)$, given that $\vec{X}(t)=[x,y]$, $\vec{X}(0)=[1,0]$ and $f(\vec{X},t) = [x-y,y-x]$.def f(X,t): # define the function f(x,t) x,y = X return np.array([x-y,y-x]) epsilon = 0.01 # define timestep t = np.arange(0,2,epsilon) # define an array for t X = np.zeros((2,t.shape[0])) # define an array for x X[:,0]= [1,0] # set initial condition for i in range(1,t.shape[0]): X[:,i] = epsilon*f(X[:,i-1],t[i-1])+X[:,i-1] # Euler Integration Step plt.plot(t[::5],X[0,::5],".",label="Eulers Solution for x") plt.plot(t[::5],X[1,::5],".",label="Eulers Solution for y") plt.xlabel("t") plt.ylabel("x") plt.legend() plt.show()A Generalized function for Euler IntegrationNow, we create a generalized function that takes in 3 inputs ie. the function $\vec{f}(\vec{y},t)$ when $\frac{d\vec{y}}{dt}=f(\vec{y},t)$, the time array, and initial vector $\vec{y_0}$. Algorithm- Get the required inputs: function $\vec{f}(\vec{y},t)$, initial condition vector $\vec{y_0}$ and time series $t$. Entering a time series $t$ allows for greater control over $\epsilon$ as it can now vary for each timestep. The only difference in the Euler's Method is now : $\epsilon\rightarrow\epsilon(t_n)$.- Check if the input is of the correct datatype ie. floating point decimal.- Create a zero matrix to hold the output.- For each timestep, perform the euler method updation with variable $\epsilon$ and store it in the output matrix.- Return the output timeseries matrix.def check_type(y,t): # Ensure Input is Correct return y.dtype == np.floating and t.dtype == np.floating class _Integrator(): def integrate(self,func,y0,t): time_delta_grid = t[1:] - t[:-1] y = np.zeros((y0.shape[0],t.shape[0])) y[:,0] = y0 for i in range(time_delta_grid.shape[0]): y[:,i+1]= time_delta_grid[i]*func(y[:,i],t[i])+y[:,i] return y def odeint_euler(func,y0,t): y0 = np.array(y0) t = np.array(t) if check_type(y0,t): return _Integrator().integrate(func,y0,t) else: print("error encountered") solution = odeint_euler(f,[1.,0.],t) plt.plot(t[::5],solution[0,::5],".",label="Eulers Solution for x") plt.plot(t[::5],solution[1,::5],".",label="Eulers Solution for y") plt.xlabel("t") plt.ylabel("X") plt.legend() plt.show()Runge-Kutta Methods for Numerical IntegrationThe formula for the Euler method is $x_{n+1}=x_n + \epsilon f(x_n,t_n)$ which takes a solution from $t_n$ to $t_{n+1}=t_n+\epsilon$. One might notice there is an inherent assymetry in the formula. It advances the solution through an interval $\epsilon$, but uses the derivative information at only the start of the interval. This results in an error in the order of $O(\epsilon^2)$. But, what if we take a trial step and evaluate the derivative at the midpoint of the update interval to evaluate the value of $y_{n+1}$? Take the equations:$$k_1=\epsilon f(x_n,t_n)$$$$k_2=\epsilon f(x_n+\frac{k_1}{2},t_n+\frac{\epsilon}{2})$$ $$y_{n+1}=y_n+k_2+O(\epsilon^3)$$The symmetrization removes the O($\epsilon^2$) error term and now the method is second order and called the second order Runge-Kutta method or the midpoint method. You can look at this method graphically as follows:But we do not have to stop here. By further rewriting the equation, we can cancel higher order error terms and reach the most commonly used fourth-order Runge-Kutta Methods or RK4 method, which is described below:$$k_1=f(x_n,t_n)$$$$k_2=f(x_n+\epsilon\frac{k_1}{2},t_n+\frac{\epsilon}{2})$$$$k_3=f(x_n+\epsilon\frac{k_2}{2},t_n+\frac{\epsilon}{2})$$$$k_4=f(x_n+\epsilon k_3,t_n+\epsilon)$$$$y_{n+1}=y_n+\frac{\epsilon}{6}(k_1+2 k_2+2 k_3+k_4)+O(\epsilon^5)$$Note that this numerical method is again easily converted to a vector algorithm by simply replacing $x_i$ by the vector $\vec{X_i}$. This method is what we will use to simulate our networks. Generalized RK4 Method in PythonJust like we had created a function for Euler Integration in Python, we create a generalized function for RK4 that takes in 3 inputs ie. the function $f(\vec{y},t)$ when $\frac{d\vec{y}}{dt}=f(\vec{y},t)$, the time array, and initial vector $\vec{y_0}$. We then perform the exact same integration that we had done with Euler's Method. Everything remains the same except we replace the Euler's method updation rule with the RK4 update rule.def check_type(y,t): # Ensure Input is Correct return y.dtype == np.floating and t.dtype == np.floating class _Integrator(): def integrate(self,func,y0,t): time_delta_grid = t[1:] - t[:-1] y = np.zeros((y0.shape[0],t.shape[0])) y[:,0] = y0 for i in range(time_delta_grid.shape[0]): k1 = func(y[:,i], t[i]) # RK4 Integration Steps half_step = t[i] + time_delta_grid[i] / 2 k2 = func(y[:,i] + time_delta_grid[i] * k1 / 2, half_step) k3 = func(y[:,i] + time_delta_grid[i] * k2 / 2, half_step) k4 = func(y[:,i] + time_delta_grid[i] * k3, t + time_delta_grid[i]) y[:,i+1]= (k1 + 2 * k2 + 2 * k3 + k4) * (time_delta_grid[i] / 6) + y[:,i] return y def odeint_rk4(func,y0,t): y0 = np.array(y0) t = np.array(t) if check_type(y0,t): return _Integrator().integrate(func,y0,t) else: print("error encountered") solution = odeint_rk4(f,[1.,0.],t) plt.plot(t[::5],solution[0,::5],".",label="RK4 Solution for x") plt.plot(t[::5],solution[1,::5],".",label="RK4 Solution for y") plt.xlabel("t") plt.ylabel("X") plt.legend() plt.show()Mount Google Drivefrom google.colab import drive drive.mount('/content/drive') !ls drive/My\ DriveGo to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob&scope=email%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdocs.test%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive.photos.readonly%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fpeopleapi.readonly&response_type=code Enter your authorization code: ·········· Mounted at /content/drive 'Colab Notebooks' data webChange Directoryimport os !ls drive print(os.getcwd()) os.chdir("drive/My Drive/data/fcn_pytorch") # to project dir print(os.getcwd()) !ls'My Drive' /content /content/drive/My Drive/data/fcn_pytorch checkpoint_fcn8s.tar fcn8s.onnx fcn8s.pt logImportimport os import re import time import random import copy from PIL import Image import numpy as np import matplotlib.pyplot as plt import torch import torch.nn as nn from torch.utils.data import Dataset, DataLoader from torchvision import transforms from torchvision.transforms import functional as TF import torch.nn.functional as F import torch.optim as optimSetting Directorylog_dir = "log" os.makedirs(log_dir, exist_ok=True) train_data_dir = "../datasets/voc_semseg/train" val_data_dir = "../datasets/voc_semseg/val" train_img_dir = os.path.join(train_data_dir, 'img') train_gt_dir = os.path.join(train_data_dir, 'gt') val_img_dir = os.path.join(val_data_dir, 'img') val_gt_dir = os.path.join(val_data_dir, 'gt')Utility Functionsdef key_sort_by_num(x): re_list = re.findall(r"[0-9]+", x) re_list = list(map(int, re_list)) return re_list def list_from_dir(dir, target_ext=None): img_list = [] fnames = os.listdir(dir) fnames = sorted(fnames, key=key_sort_by_num) for fname in fnames: if target_ext is None: path = os.path.join(dir, fname) img_list.append(path) else: _, ext = os.path.splitext(fname) if ext.lower() in target_ext: path = os.path.join(dir, fname) img_list.append(path) return img_list #train_data_paths = list_from_dir(train_img_dir, ('.jpg', '.png')) #train_gt_paths = list_from_dir(train_gt_dir, ('.jpg', '.png')) #val_data_paths = list_from_dir(val_img_dir, ('.jpg', '.png')) #val_gt_paths = list_from_dir(val_gt_dir, ('.jpg', '.png')) #print(len(train_data_paths)) #print(len(train_gt_paths)) #print(len(val_data_paths)) #print(len(val_gt_paths)) def make_cmap(): """ Make PASCAL VOC color map """ ctbl = ((0x80, 0, 0), (0, 0x80, 0), (0, 0, 0x80), (0x40, 0, 0), (0, 0x40, 0), (0, 0, 0x40), (0x20, 0, 0), (0, 0x20, 0)) n = 256 lookup = np.zeros((n, 3)).astype(np.int32) for i in range(0, n): r, g, b = 0, 0, 0 for j in range(0, 7): bit = (i >> j) & 1 if bit: r |= ctbl[j][0] g |= ctbl[j][1] b |= ctbl[j][2] lookup[i, 0], lookup[i, 1], lookup[i, 2] = r, g, b return lookup[0:21] def pil_to_tensor(pil_img): """ Convert from PIL to tensor # Arguments pil_img: pil img, 3 dimention(H x W x C) """ img = np.array(pil_img, copy=False) img = img.transpose((2, 0, 1)) # H x W x C -> C x H x W img = img.astype(np.float32) # uint8 -> float32 img = torch.from_numpy(img) # ndarray -> tensor img.div_(255) return img def tensor_to_pil(tesnor_img): """ Convert from Tensor to PIL # Arguments tesnor_img: tensor, 3 dimention(C x H x W ) """ img = tesnor_img.mul(255).numpy() # tensor -> ndarray img = np.array(img, dtype=np.uint8) # float32 -> uint8 img = img.transpose(1,2,0) # C x H x W -> H x W x C return Image.fromarray(img) def tensor_to_ndarray(tesnor_img): """ Convert from Tensor to ndarray # Arguments tesnor_img: tensor, 3 dimention(C x H x W ) """ img = tesnor_img.mul(255).numpy() # tensor -> ndarray img = np.array(img, dtype=np.uint8) # float32 -> uint8 img = img.transpose(1,2,0) # C x H x W -> H x W x C return img def pil_to_one_hot_array(label_array, classes, size): """ Convert from PIL to one-hot-array # Arguments label_array: ndarray, 3 dimention(H x W x C) classes: int, num of class size: int, image size """ x = np.zeros((classes, size, size)) # C x H x W for i in range(size): for j in range(size): x[label_array[i][j], i, j] = 1 return x def pred_to_pil(pred): """ Convert from one-hot-array to PIL # Arguments pred: tensor, 3 dimention(C x H x W) """ cmap = make_cmap() pred_ = pred.numpy() # tensor -> ndarray channel, _, _ = pred_.shape pred_ = pred_.transpose(1, 2, 0) # C x H x W -> H x W x C pred_ = np.argmax(pred_, axis=2) # H x W x C -> H x W row, col = pred_.shape dst = np.ones((row, col, 3)) for i in range(channel): dst[pred_ == i] = cmap[i] dst = dst.astype(np.uint8) return Image.fromarray(dst) # ndarray -> PIL class ResizeAndPadding: def __init__(self, size, interpolation=Image.BILINEAR, pad=True): self.size = size self.interpolation = interpolation self.pad = pad def __call__(self, img): """Resize and Padding # Arguments img: pil img, 3 dimention(H x W x C) """ size_wh = img.size ratio = float(self.size)/max(size_wh) new_size_wh = tuple([int(x * ratio) for x in size_wh]) img = img.resize(new_size_wh, resample=self.interpolation) if self.pad: mode = img.mode new_img = Image.new(mode, (self.size, self.size)) new_img.paste(img, ((self.size-new_size_wh[0])//2, (self.size-new_size_wh[1])//2)) else: new_img = img return new_img class RandomCropForSemseg: def __init__(self, size): self.target_size = size def __call__(self, img, target=None): """Random crop img and label # Arguments img: pil img, 3 dimention(H x W x C) target: label img, 3 dimention(H x W x C) """ i, j, h, w = transforms.RandomCrop.get_params(img, output_size=(self.target_size,self.target_size)) img = TF.crop(img, i, j, h, w) if target is not None: target = TF.crop(target, i, j, h, w) return img, target class RandomFlipForSemseg: def __init__(self): pass def __call__(self, img, target=None): """Random flip img and label # Arguments img: pil img, 3 dimention(H x W x C) target: label img, 3 dimention(H x W x C) """ if random.random() < 0.5: img = TF.hflip(img) if target is not None: target = TF.hflip(target) return img, target def show_imgs(imgs, size, figsize=[6.4, 4.8], converter=False): """Show images # Arguments imgs: 1-D array, 4 dimention(batch x H x W x C) row: Int, row for plt.subplot col: Int, column for plt.subplot """ row, col = size if len(imgs) != (row * col): raise ValueError("Invalid imgs len:{} col:{} row:{}".format(len(imgs), row, col)) plt.figure(figsize=figsize) plt.tight_layout() for i, img in enumerate(imgs): if converter: img = converter(img) plot_num = i+1 plt.subplot(row, col, plot_num) plt.axis('off') plt.imshow(img) plt.show()Create Dataset and DataLoaderclass SemSegDataset(Dataset): def __init__(self, classes, input_size, img_dir, target_dir=None, train=False): self.classes = classes self.input_size = input_size self.img_paths, self.target_paths = self._get_paths(img_dir, target_dir) self.train = train #------------------------------------------- # transforms #------------------------------------------- self.trans_resize_and_pad = ResizeAndPadding(self.input_size) self.train_trans_crop = RandomCropForSemseg(self.input_size) self.train_trans_flip = RandomFlipForSemseg() self.eval_trns = transforms.Compose([ transforms.CenterCrop(self.input_size) ]) #------------------------------------------- # check data num #------------------------------------------- img_num = len(self.img_paths) if target_dir: target_num = len(self.target_paths) assert img_num == target_num, "img/gt not equal, img:{} gt:{}".format(img_num, target_num) self.data_num = img_num def __len__(self): return self.data_num def _get_paths(self, img_dir, target_dir): img_paths = list_from_dir(img_dir, ('.jpg', '.png')) if target_dir: target_paths = list_from_dir(target_dir, ('.jpg', '.png')) else: target_paths = None return img_paths, target_paths def _transform(self, img, target, train): #------------------------------------------- # Padding and Resize if needed #------------------------------------------- if min(img.size) < self.input_size: img = self.trans_resize_and_pad(img) if target: target = self.trans_resize_and_pad(target) if train: #------------------------------------------- # Random Crop #------------------------------------------- img, target = self.train_trans_crop(img, target) #------------------------------------------- # Random Flip #------------------------------------------- img, target = self.train_trans_flip(img, target) else: #------------------------------------------- # Evaluation #------------------------------------------- img = self.eval_trns(img) if target: target = self.eval_trns(target) img = pil_to_tensor(img) # 0-255 -> 0-1 if target: target = np.asarray(target, dtype=np.int32) target[target == 255] = 0 # 境界部分をbackgroundクラスにする target = pil_to_one_hot_array(target, self.classes, self.input_size) target = torch.from_numpy(target).long() # ラベルをtensor型に変換 return img, target def __getitem__(self, idx): assert idx < self.data_num,"invalid idx {}, max idx is {}".format(idx, self.data_num-1) img = Image.open(self.img_paths[idx]) if self.target_paths: target = Image.open(self.target_paths[idx]) else: target = None x, y = self._transform(img, target, self.train) return x, y train_dataset = SemSegDataset(21, 224, train_img_dir, train_gt_dir, train=True) val_dataset = SemSegDataset(21, 224, val_img_dir, val_gt_dir, train=False) x,y = train_dataset[0] tensor_to_pil(x) pred_to_pil(y) train_dataloader = DataLoader(train_dataset, batch_size=24, shuffle=True, num_workers=4) val_dataloader = DataLoader(val_dataset, batch_size=24, shuffle=False, num_workers=4) for d in train_dataloader: print(d[0].shape) print(d[1].shape) break show_imgs(d[0], [3, 8], figsize=(15,5), converter=tensor_to_ndarray) show_imgs(d[1], [3, 8], figsize=(15,5), converter=pred_to_pil)torch.Size([24, 3, 224, 224]) torch.Size([24, 21, 224, 224])Define Model# https://github.com/shelhamer/fcn.berkeleyvision.org/blob/master/surgery.py # https://github.com/wkentaro/pytorch-fcn/blob/master/torchfcn/models/fcn32s.py def get_upsampling_weight(in_channels, out_channels, kernel_size): """ Make a 2D bilinear kernel suitable for upsampling """ factor = (kernel_size + 1) // 2 if kernel_size % 2 == 1: center = factor - 1 else: center = factor - 0.5 og = np.ogrid[:kernel_size, :kernel_size] filt = (1 - abs(og[0] - center) / factor) * \ (1 - abs(og[1] - center) / factor) weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size), dtype=np.float64) weight[range(in_channels), range(out_channels), :, :] = filt return torch.from_numpy(weight).float() from torchvision.models.resnet import ResNet, BasicBlock class EncorderResNet(ResNet): def __init__(self, layers=[2, 2, 2, 2]): super().__init__(BasicBlock, layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x1 = x # 1/2 x = self.maxpool(x) x = self.layer1(x) x2 = x # 1/4 x = self.layer2(x) x3 = x # 1/8 x = self.layer3(x) x4 = x # 1/16 x = self.layer4(x) x5 = x # 1/32 return x1, x2, x3, x4, x5 class Conv(nn.Module): def __init__(self, in_ch, out_ch): super().__init__() self.down_conv = nn.Sequential( nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(out_ch), nn.ReLU(inplace=True), nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(out_ch), nn.ReLU(inplace=True), ) def forward(self, x): x = self.down_conv(x) return x class Downsampling(nn.Module): def __init__(self): super().__init__() self.down = nn.MaxPool2d(kernel_size=2, stride=2) def forward(self, x): x = self.down(x) return x class Upsampling(nn.Module): def __init__(self, in_ch, out_ch, batchnorm=False, bilinear=False): super().__init__() self.bilinear = bilinear self.batchnorm = batchnorm self.up = nn.ConvTranspose2d(in_ch, out_ch, kernel_size=2, stride=2) self.batchnorm = nn.BatchNorm2d(out_ch) def forward(self, x): if self.bilinear: x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True) else: x = self.up(x) if self.batchnorm: x = self.batchnorm(x) return x class ResUNet(nn.Module): def __init__(self, num_classes, encorder): super().__init__() self.encorder = encorder self.up1 = Upsampling(512, 256, batchnorm=True) self.conv6 = Conv(512, 256) self.up2 = Upsampling(256, 128, batchnorm=True) self.conv7 = Conv(256, 128) self.up3 = Upsampling(128, 64, batchnorm=True) self.conv8 = Conv(128, 64) self.up4 = Upsampling(64, 32, batchnorm=True) self.conv9 = Conv(64, 32) self.conv_skip = nn.Conv2d(64, 32, kernel_size=1, stride=1, padding=0) self.conv10 = nn.Conv2d(32, num_classes, kernel_size=1, stride=1, padding=0) self.up_final = Upsampling(num_classes, num_classes) init_w = get_upsampling_weight(num_classes, num_classes, self.up_final.up.kernel_size[0]) self.up_final.up.weight.data.copy_(init_w) def forward(self, x): x1, x2, x3, x4, x5 = self.encorder(x) x6 = torch.cat([x4, self.up1(x5)], dim=1) # 1/32 -> 1/16 x6 = self.conv6(x6) x7 = torch.cat([x3, self.up2(x6)], dim=1) # 1/16 -> 1/8 x7 = self.conv7(x7) x8 = torch.cat([x2, self.up3(x7)], dim=1) # 1/8 -> 1/4 x8 = self.conv8(x8) x1 = self.conv_skip(x1) # ch=64 -> ch=32 x9 = torch.cat([x1, self.up4(x8)], dim=1) # 1/4 -> 1/2 x9 = self.conv9(x9) x10 = self.conv10(x9) x11 = self.up_final(x10) # 1/2 -> 1/1 return x11 def build_resunet(num_classes): encorder = build_encorder_resnet18() model = ResUNet(21,encorder) return model class UNet(nn.Module): def __init__(self, num_classes): super().__init__() self.conv1 = Conv(3, 64) self.conv2 = Conv(64, 128) self.conv3 = Conv(128, 256) self.conv4 = Conv(256, 512) self.down = Downsampling() self.conv5 = Conv(512, 1024) self.up1 = Upsampling(1024, 512) self.conv6 = Conv(1024, 512) self.up2 = Upsampling(512, 256) self.conv7 = Conv(512, 256) self.up3 = Upsampling(256, 128) self.conv8 = Conv(256, 128) self.up4 = Upsampling(128, 64) self.conv9 = Conv(128, 64) self.conv10 = nn.Conv2d(64, num_classes, kernel_size=1, stride=1, padding=0) def forward(self, x): x1 = self.conv1(x) x2 = self.conv2(self.down(x1)) x3 = self.conv3(self.down(x2)) x4 = self.conv4(self.down(x3)) x5 = self.conv5 (self.down(x4)) x6 = torch.cat([x4, self.up1(x5)], dim=1) x6 = self.conv6(x6) x7 = torch.cat([x3, self.up2(x6)], dim=1) x7 = self.conv7(x7) x8 = torch.cat([x2, self.up3(x7)], dim=1) x8 = self.conv8(x8) x9 = torch.cat([x1, self.up4(x8)], dim=1) x9 = self.conv9(x9) x10 = self.conv10(x9) return x10 from torchvision import models def build_encorder_resnet18(): encorder = EncorderResNet(layers=[2, 2, 2, 2]) encorder.load_state_dict(models.resnet18(pretrained=True).state_dict()) return encorderCreate Model# model = UNet(num_classes=21) model = build_resunet(21) from torchsummary import summary summary(model, input_size=(3, 224, 224), device='cpu')---------------------------------------------------------------- Layer (type) Output Shape Param # ================================================================ Conv2d-1 [-1, 64, 112, 112] 9,408 BatchNorm2d-2 [-1, 64, 112, 112] 128 ReLU-3 [-1, 64, 112, 112] 0 MaxPool2d-4 [-1, 64, 56, 56] 0 Conv2d-5 [-1, 64, 56, 56] 36,864 BatchNorm2d-6 [-1, 64, 56, 56] 128 ReLU-7 [-1, 64, 56, 56] 0 Conv2d-8 [-1, 64, 56, 56] 36,864 BatchNorm2d-9 [-1, 64, 56, 56] 128 ReLU-10 [-1, 64, 56, 56] 0 BasicBlock-11 [-1, 64, 56, 56] 0 Conv2d-12 [-1, 64, 56, 56] 36,864 BatchNorm2d-13 [-1, 64[...]Define Trainer# pip install tqdm pip install fastprogress # from tqdm import tqdm_notebook as tqdm from fastprogress import master_bar, progress_bar import copy import time def calc_acc(preds, labels): correct = (preds == labels).sum() total = (labels == labels).sum() correct = correct.to(torch.float32) total = total.to(torch.float32) return (correct / total) class Trainer: def __init__(self, model, device, optimizer, criterion, train_loader, val_loader=None, scheduler=None, history=None, prev_epochs=0): self.print_state = True self.done_epochs = prev_epochs # 0- self.model = model self.device = device self.optimizer = optimizer self.criterion = criterion self.scheduler = scheduler self.best_val_acc = 0.0 self.best_model_wts = copy.deepcopy(model.state_dict()) if history: self.history = history else: self.history = {'epoch':[], 'train_acc':[], 'train_loss':[], 'val_acc':[], 'val_loss':[]} self.train_loader = train_loader self.train_data_num = len(self.train_loader.dataset) self.train_sptes_per_epoch = len(self.train_loader) self.val_loader = val_loader if self.val_loader: self.val_data_num = len(self.val_loader.dataset) self.val_sptes_per_epoch = len(self.val_loader) else: self.val_data_num = 0 self.val_sptes_per_epoch = 0 self.model.to(self.device) def set_print_state(self, state=True): self.print_state = state def train_loop(self, epochs): total_epochs = self.done_epochs + epochs #------------------------------------------------------------------------ # pre print #------------------------------------------------------------------------ if self.print_state: print("Device : ", self.device) print("Train on {} samples, validate on {} samples".format(self.train_data_num, self.val_data_num)) for i in range(self.done_epochs): done_history_idx = i if self.val_loader: print("Epoch:{}/{} train_acc:{:.4f}% train_loss:{:.4f} val_acc:{:.4f}% val_loss:{:.4f}".format( i+1, total_epochs, self.history['train_acc'][done_history_idx], self.history['train_loss'][done_history_idx], self.history['val_acc'][done_history_idx], self.history['val_loss'][done_history_idx])) else: print("Epoch:{}/{} train_acc:{:.4f}% train_loss:{:.4f}".format( i+1, total_epochs, self.history['val_acc'][done_history_idx], self.history['val_loss'][done_history_idx])) #------------------------------------------------------------------------ # training loop #------------------------------------------------------------------------ for _ in range(epochs): self.history['epoch'].append(self.done_epochs+1) # 1- start_time = time.time() train_score = self._train_one_epoch() end_time = time.time() self.history['train_acc'].append(train_score['acc']) self.history['train_loss'].append(train_score['loss']) if self.val_loader: val_score = self._val_one_epoch() self.history['val_acc'].append(val_score['acc']) self.history['val_loss'].append(val_score['loss']) if self.best_val_acc < val_score['acc']: self.best_val_acc = val_score['acc'] self.best_model_wts = copy.deepcopy(self.model.state_dict()) if self.scheduler: self.scheduler.step(val_score['loss']) else: if self.scheduler: self.scheduler.step(train_score['loss']) self.done_epochs += 1 #------------------------------------------------------------------------ # post print #------------------------------------------------------------------------ if self.print_state: elapsed_time = end_time-start_time done_history_idx = self.done_epochs-1 if self.val_loader: print("Epoch:{}/{} train_acc:{:.4f}% train_loss:{:.4f} val_acc:{:.4f}% val_loss:{:.4f} time:{:.3f}".format( self.done_epochs, total_epochs, self.history['train_acc'][done_history_idx], self.history['train_loss'][done_history_idx], self.history['val_acc'][done_history_idx], self.history['val_loss'][done_history_idx], elapsed_time)) else: print("Epoch:{}/{} train_acc:{:.4f}% train_loss:{:.4f} time:{:.3f}".format( self.done_epochs, total_epochs, self.history['val_acc'][done_history_idx], self.history['val_loss'][done_history_idx], elapsed_time)) def _one_step(self, data, labels, train=True): if train: self.optimizer.zero_grad() outputs = self.model(data) # (batchsize x C x H x W) -> (bachsize x H x W) labels = torch.argmax(labels, dim=1, keepdim=False) preds = torch.argmax(outputs, dim=1, keepdim=False) loss = self.criterion(outputs, labels) if train: loss.backward() self.optimizer.step() correct = calc_acc(preds, labels) return loss.item(), correct.item() def _train_one_epoch(self): self.model.train() running_loss = 0 running_correct = 0 #for batch_idx, (data, labels) in enumerate(self.train_loader): #for batch_idx, (data, labels) in tqdm(enumerate(self.train_loader)): for batch_idx, (data, labels) in enumerate(progress_bar(self.train_loader)): # naka data, labels = data.to(self.device), labels.to(self.device) loss, correct = self._one_step(data, labels, train=True) running_loss += loss running_correct += correct train_loss = running_loss / self.train_sptes_per_epoch train_acc = running_correct / self.train_sptes_per_epoch return {'loss':train_loss, 'acc':train_acc} def _val_one_epoch(self): self.model.eval() running_loss = 0 running_correct = 0 with torch.no_grad(): for data, labels in self.val_loader: data, labels = data.to(self.device), labels.to(self.device) loss, correct = self._one_step(data, labels, train=False) running_loss += loss running_correct += correct val_loss = running_loss / self.val_sptes_per_epoch val_acc = running_correct / self.val_sptes_per_epoch return {'loss':val_loss, 'acc':val_acc} def save_best_model(self, path): torch.save(self.best_model_wts, path) def save_checkpoint(self, path): ckpt = { 'model_satate_dict':self.model.state_dict(), 'optimizer_state_dict':self.optimizer.state_dict(), 'history':self.history } torch.save(ckpt, path)Start Trainingcriterion = nn.CrossEntropyLoss() # nn.LogSoftmax() + nn.NLLLoss() optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9) from torch.optim import lr_scheduler exp_lr_scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, 'min', factor=0.1, patience=6, verbose=True) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(device) model = model.to(device) trainer = Trainer(model, device, optimizer, criterion, train_dataloader, val_dataloader,scheduler=exp_lr_scheduler) trainer.train_loop(60) trainer.save_checkpoint('checkpoint_fcn8s.tar') trainer.save_best_model('fcn8s.pt')Plot acc and loss curvedef plot_history(history): epochs = len(history['train_acc'],) xaxis = range(epochs) plt.figure() plt.plot(xaxis, history['train_acc'], 'r-', label='train_acc') plt.plot(xaxis, history['val_acc'], 'b-', label='val_acc') plt.legend() plt.xlabel('epoch') plt.ylabel('acc') plt.figure() plt.plot(xaxis, history['train_loss'], 'r-', label='train_loss') plt.plot(xaxis, history['val_loss'], 'b-', label='val_loss') plt.legend() plt.xlabel('epoch') plt.ylabel('loss')ResUNetplot_history(trainer.history)Evaluatedevice = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model.load_state_dict(torch.load('fcn8s.pt', map_location=device)) classnames=[ "background", "aeroplane", "bicycle", "bird", "boad", "bottle", "bus", "car", "cat", "chair", "cow", "dining table", "dog", "horse", "motor_bike", "person", "potted_plant", "sheep", "sofa", "train", "tv"] def evaluate(model, device, criterion, test_loader): model.eval() runnning_loss = 0 running_correct = 0 with torch.no_grad(): for data in progress_bar(test_loader): imgs, labels = data imgs, labels = imgs.to(device), labels.to(device) outputs = model(imgs) labels = torch.argmax(labels, dim=1, keepdim=False) pred = torch.argmax(outputs, dim=1, keepdim=False) runnning_loss += criterion(outputs, labels).item() running_correct += calc_acc(pred, labels) test_acc = running_correct / len(test_loader) test_loss = runnning_loss / len(test_loader) return {'loss':test_loss, 'acc':test_acc} test_loader = val_dataloader device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print("device:",device) model = model.to(device) test_score = evaluate(model, device, criterion, test_loader) print('acc : {:.4f}'.format(test_score['acc'])) print('loss : {:.4f}'.format(test_score['loss']) )acc : 0.7820 loss : 0.7718Predictdef predict(model, device, imgs): model.eval() if len(imgs.size()) != 4: imgs = imgs.unsqueeze(0) imgs = imgs.to(device) with torch.no_grad(): output = model(imgs) return output device = 'cpu' img, label = val_dataset[11] model.to(device) out = predict(model, device, img) label_pil = pred_to_pil(label) out_pil = pred_to_pil(out[0]) input_pil = tensor_to_pil(img) show_imgs([input_pil, label_pil, out_pil], [1, 3], figsize=(15,5))Save as ONNX# pip install torchvision==0.2.1 import torch dummy_input = torch.randn(1, 3, 224, 224,) torch_out = torch.onnx.export(model, dummy_input, "fcn8s.onnx", verbose=True)Modeimport pandas as pd import numpy as np df = pd.read_csv('C:/Users/Swapnil/Desktop/Python Programs/InternShala - Data Science Python/Module - 3 - Statistics/Module 3 - Datasheets/mode.csv') df df.head() mode_sub = df['Subject'].mode() mode_sub tot_favSub = df[df['Subject']=='Chemistry'] tot_favSub tot_favSub.head() tot_favSub.sum()Created a tablecities = pd.DataFrame({'City':['Aurangabad', 'Jalna', 'Beed', 'Pune', 'Mumbai'], 'Rainfall':[98, 67, 98, 98, 55], 'Pop' : [10, 15, 20, 20, 20]}) cities md_rain = cities['Rainfall'].mode() md_rain cities['Rainfall'] md_pop = cities['Pop'].mode() md_pop cities.loc[len(cities.index)] = ["Aurangabad", 20, 16] cities mod_city = cities['City'].mode() mod_cityMistake here while adding multiple rows at oncecities = cities.append({'City':['Georgeo', 'Marina', 'Wassepur'], 'Rainfall': [98, 55, 55], 'Pop': [16, 15, 16]}, ignore_index = True) citiesDelete rows by index numbercity = cities.drop([cities.index[7], cities.index[8]]) city city = city.append({'City':'Georgeo', 'Rainfall':98, 'Pop': 16}, ignore_index = True) city city['City'].mode()Accessing cities with same frequency of rainfall First we have to check what is mode / frequent number of Rainfall category or column i.e 98city['Rainfall'].mode() city[city['Rainfall']== 98]So in above cities are having same mode / frequency of rainfall.city.drop([city.index[7]])Resolving peopledf_people = df[df['GENDER'].isin(['M', 'F'])].copy() df_people_small = df_people.head(600) f = Filter(df_people, "res_WIKIDATA_IDs") f.add_property_filter("P31", 'Q5') # human f.add_label_filter("PREFERRED_NAME", threshold=90, include_aliases=True) f.view_filters() f.process_dataframe() df_new = f.get_dataframe()Edot0=2B^2 Om0^2 R^6/3/c^3tau=3*I*c^3/c/B^2/R^6/Om0^2(1*u.G).to('g/(C*s)') BB = coords.Gal (1.4*u.M_sun).to('g') RR = 1e6 II = 1e45 # BB = 0.17e12 # Mass = 2.7837738e33 omega = np.sqrt((2*finalb.tc*gp.yr_to_sec*finalb.l_f)/II) BB = np.sqrt((3*gp.c_speed**3*II)/(4*RR**6*finalb.tc*gp.yr_to_sec*omega**2)) # omega = np.sqrt((3*gp.c_speed**3*II)/(2*RR**6*BB**2*(final.t0*gp.yr_to_sec))) p0 = 2*np.pi/omega pdot = p0/(2*finalb.tc*gp.yr_to_sec) final['p0'] = p0 final['pdot'] = pdot fauch_p = np.loadtxt('../Computed_Population_20200410/faucher_p.txt').T fauch_b = np.loadtxt('../Computed_Population_20200410/fauch_b.txt').T plot_loghist(p0, 19, 'Periods Distribution of pulsars', 'Period [s]') # plt.xlim(1e-3, 3) plt.semilogx(fauch_p[0], fauch_p[1], drawstyle='steps-mid', lw=3, label='From Faucher-Giguere') #plot_loghist(distr_p0, 15, '', '') # plt.xlim(1e-3, 3) plt.legend # plt.savefig('PeriodDistr_comparison.png',dpi=100, bbox_inches='tight') plot_loghist(pdot, 20, 'Period Derivatives Distribution of pulsars', '$\dotP$ [s s$^{-1}$]') plt.savefig('PeriodDerivDistr.png',dpi=100, bbox_inches='tight') plot_loghist(BB, 12, 'Surface Magnetic Field Distribution', '$B$ [G]') plt.semilogx(fauch_b[0], fauch_b[1], drawstyle='steps-mid', lw=3, label='From Faucher-Giguere') plt.savefig('SurfaceMagField_comparison.png',dpi=100, bbox_inches='tight') len(fauch_b[0]) xxx = np.linspace(0.01, 20) yyy = xxx/(200e3*gp.yr_to_sec) yyy2 = xxx/(200e2*gp.yr_to_sec) yyy1 = xxx/(200e1*gp.yr_to_sec) yyy3 = xxx/(200e4*gp.yr_to_sec) plt.loglog() plt.scatter(final.p0, final.pdot, c=final.age, s=10) plt.plot(xxx,yyy, '--k', alpha=0.5, zorder=-10, lw=1) plt.plot(xxx,yyy2, '--k', alpha=0.5, zorder=-10, lw=1) plt.plot(xxx,yyy1, '--k', alpha=0.5, zorder=-10, lw=1) plt.plot(xxx,yyy3, '--k', alpha=0.5, zorder=-10, lw=1) plt.text(0.02, 6e-13, '1 kyr', rotation=20, alpha=0.5,) plt.text(0.02, 5e-14, '10 kyr', rotation=20, alpha=0.5,) plt.text(0.02, 5e-15, '100 kyr', rotation=20, alpha=0.5,) plt.text(0.02, 4.5e-16, '1 Myr', rotation=20, alpha=0.5,) plt.grid() plt.xlabel(r'Period [$s$]') plt.ylabel(r'Period Derivative [$s\;\;s^{-1}$]') plt.title('$P - \dotP$ diagram') plt.xlim(0.01, 20) plt.ylim(1e-17, 2e-11) plt.tight_layout() plt.savefig('PPdiagram.png',dpi=100, bbox_inches='tight') plot_loghist(BB, 20, '', '') (1*u.G).to('g/(C*s)') final.t0Cointegration Test Engle-Granger Methodinfl = test_data['inflation'] mkt = test_data.loc[infl.index]['MktCap'] i = 0 while True: if adfuller(infl.values)[1] < 0.05: print('Order of the Inflation series is:', i) break else: infl = infl.diff().dropna() i += 1 j = 0 while True: if adfuller(mkt.values)[1] < 0.05: print('Order of the MktCap series is:', j) break else: mkt = mkt.diff().dropna() j += 1 infl = test_data['inflation'] mkt = test_data.loc[infl.index]['MktCap'] model = sm.OLS(infl.values, mkt.values) results = model.fit() residuals = infl.values - results.predict() adfuller_pvalue = adfuller(residuals)[1] print('Engle-Granger pvalue for cointegration:', adfuller_pvalue) residuals = np.diff(residuals,1) adfuller_pvalue = adfuller(residuals)[1] print('Engle-Granger pvalue for cointegration order one:', adfuller_pvalue) infl = infl.diff().dropna() mkt = mkt.diff().dropna() mkt = sm.add_constant(mkt) model = sm.OLS(infl, mkt) results = model.fit() hypotheses = 'MktCap = 0' print('Parameters') print(results.params) print('------------\n') print('t-statistic') print(results.tvalues) print('------------\n') print('Degrees of Freedom:', mkt.shape[0]) print('------------\n') residuals = infl.values - results.predict() print('Normality test result:', kstest(residuals, 'norm')[1]) adfuller_pvalue = adfuller(residuals)[1] print('p-value for unit root:', adfuller_pvalue) infl = test_data['inflation'] mkt = test_data.loc[infl.index]['MktCap'] infl = infl.diff().dropna() mkt = mkt.diff().dropna() plt.figure(figsize=(15,9)) plt.scatter(mkt.values, infl.values) plt.plot(np.linspace(start = min(mkt.values), stop = max(mkt.values)), 0.925 + 0.120 * np.linspace(start = min(mkt.values), stop = max(mkt.values))) plt.xlabel(r'$\Delta$MktCap') plt.ylabel('Inflation') plt.suptitle('Market Cap first difference regressed in Inflation') plt.show()Granger Causalityfrom io import StringIO import sys class Capturing(list): def __enter__(self): self._stdout = sys.stdout sys.stdout = self._stringio = StringIO() return self def __exit__(self, *args): self.extend(self._stringio.getvalue().splitlines()) del self._stringio # free up some memory sys.stdout = self._stdout a = np.log(granger_test_data['MktCap']) - np.log(granger_test_data['MktCap'].shift(1)) # a = (granger_test_data['MktCap']) - (granger_test_data['MktCap'].shift(1)) a.dropna(inplace = True) a = pd.DataFrame(a) a.columns = ['MktCap'] adfuller(a.values)[1] granger_test_data.shape data = np.log(granger_test_data) - (np.log(granger_test_data)).shift(1) data.dropna(inplace = True) data = a.join(granger_test_data['inflation']) # data = data.diff() # data.dropna(inplace = True) for col in data.columns: print(col, adfuller(data[col].values)[1]) with Capturing() as output: grangercausalitytests(data[['inflation','MktCap']], 60) # print(output) f_pvalue = [] chi2_pvalue = [] likelihood_pvalue = [] for line in output: if line[0:16] == 'ssr based F test': f_pvalue.append(float(line[40:46])) elif line[0:19] == 'ssr based chi2 test': chi2_pvalue.append(float(line[40:46])) elif line[0:21] == 'likelihood ratio test': likelihood_pvalue.append(float(line[40:46])) x = [i + 1 for i in range(len(f_pvalue))] plt.figure(figsize=(15,9)) plt.plot(x,f_pvalue, label = 'F Statistic') plt.plot(x,chi2_pvalue, label = r'$\chi^2$ Statistic') plt.plot(x,likelihood_pvalue, label = 'Likelihood Statistic') plt.axhline(0.05, color = 'gray', linestyle='--') plt.axhline(0.1, color = 'gray', linestyle='--') plt.legend() plt.xlabel('Time lag in months') plt.ylabel('p-Value') plt.suptitle('p-Value of Granger Causality Test MktCap on Inflation for 3 statistics') plt.show() df_granger = pd.DataFrame({'Lag':x,'F-statistic p-value':f_pvalue, 'Chi2 statistic p-value':chi2_pvalue}) df_granger = df_granger.set_index('Lag') display(df_granger) grangercausalitytests(data[['inflation','MktCap']], 15) data_inv = data[['MktCap', 'inflation']] gc_res_inv = grangercausalitytests(data_inv, 48) gc_res_inv model = VAR(data[['inflation', 'MktCap']]) results = model.fit(36) results.summary() data = test_data_log_diff[['inflation', 'MktCap']] gc_test = grangercausalitytests(data,12) gc_testGranger Causality number of lags (no zero) 1 ssr based F test: F=1.0137 , p=0.3157 , df_denom=143, df_num=1 ssr based chi2 test: chi2=1.0350 , p=0.3090 , df=1 likelihood ratio test: chi2=1.0313 , p=0.3098 , df=1 parameter F test: F=1.0137 , p=0.3157 , df_denom=143, df_num=1 Granger Causality number of lags (no zero) 2 ssr based F test: F=0.2931 , p=0.7464 , df_denom=140, df_num=2 ssr based chi2 test: chi2=0.6070 , p=0.7382 , df=2 likelihood ratio test: chi2=0.6058 , p=0.7387 , df=2 parameter F test: F=0.2931 , p=0.7464 , df_denom=140, df_num=2 Granger Causality number of lags (no zero) 3 ssr based F test: F=0.1082 , p=0.9552 , df_denom=137, df_num=3 ssr based chi2 test: chi2=0.3412 , p=0.9521 , df=3 likelihood ratio test: chi2=0.3408 , p=0.9522 , df=3 parameter F test: F=0.1082 , p=0.9552 , df_denom=137, df_num=3 Granger Causality number of lags (no zero) 4 ssr based F test: F=0.1616 , p=0.9574 [...]VAR ANALYSIS VAR: inflation, MktCapdef total_squared_error(results, test_data, columns, step): observed_inflation = test_data[['inflation']] error = [] for i in range(observed_inflation.shape[0] - step - 1): initial_data = (test_data[columns].iloc[i:(i+step),:]).values forecast_results = results.forecast(initial_data, 1)[0,0] observed_inflation_i = observed_inflation.iloc[i + step + 1].values error.append(observed_inflation_i-forecast_results) error_norm = np.linalg.norm(error) return error_norm def predict(results, test_data, columns, step): observed_inflation = test_data[['inflation']] predictions = [] for i in range(observed_inflation.shape[0] - step - 1): initial_data = (test_data[columns].iloc[i:(i+step),:]).values forecast_results = results.forecast(initial_data, 1)[0,0] predictions.append(forecast_results) pred_df = pd.DataFrame(predictions) pred_df.index = test_data.index[:observed_inflation.shape[0] - step - 1] pred_df.columns = ['prediction'] return pred_df model = VAR(test_data_log_diff[['inflation', 'MktCap']]) results = model.fit(4) results.summary() observed_inflation = test_data_log_diff[['inflation']] step = 4 error = total_squared_error(results,test_data_log_diff, ['inflation', 'MktCap'], step) print('Total Error:', error) forecast = predict(results, test_data_log_diff, ['inflation', 'MktCap'], step) plt.plot(forecast, label = 'forecast') plt.plot(observed_inflation, label = 'observed') plt.legend() plt.show() residuals = (observed_inflation.values[:observed_inflation.shape[0] - step - 1] - forecast.values) print('Normality test result:', kstest(residuals, 'norm')[1])Total Error: 0.07333334403685315VAR: inflation, M2, MktCapmodel = VAR(test_data_log_diff[['inflation','M2', 'MktCap']]) results = model.fit(4) results.summary() observed_inflation = test_data_log_diff[['inflation']] step = 4 error = total_squared_error(results,test_data_log_diff, ['inflation','M2', 'MktCap'], step) print('Total Error:', error) forecast = predict(results, test_data_log_diff,['inflation','M2', 'MktCap'], step) plt.plot(forecast, label = 'forecast') plt.plot(observed_inflation, label = 'observed') plt.legend() plt.show() residuals = (observed_inflation.values[:observed_inflation.shape[0] - step - 1] - forecast.values) print('Normality test result:', kstest(residuals, 'norm')[1])Total Error: 0.07311603053838961VAR: inflation, M2, GDP, MktCapmodel = VAR(test_data_log_diff[['inflation','M2','GDP', 'MktCap']]) results = model.fit(4) results.summary() observed_inflation = test_data_log_diff[['inflation']] step = 4 error = total_squared_error(results,test_data_log_diff, ['inflation','M2','GDP', 'MktCap'], step) print('Total Error:', error) forecast = predict(results, test_data_log_diff,['inflation','M2','GDP', 'MktCap'], step) plt.plot(forecast, label = 'forecast') plt.plot(observed_inflation, label = 'observed') plt.legend() plt.show() residuals = (observed_inflation.values[:observed_inflation.shape[0] - step - 1] - forecast.values) print('Normality test result:', kstest(residuals, 'norm')[1])Total Error: 0.07566126739477709VAR: inlfation, M2, GDP, MV2, MktCapmodel = VAR(test_data_log_diff[['inflation','M2','GDP', 'M2V', 'MktCap']]) results = model.fit(4) results.summary() irf = results.irf(12) irf.plot(impulse = 'MktCap',response = 'inflation') observed_inflation = test_data_log_diff[['inflation']] step = 4 error = total_squared_error(results,test_data_log_diff, ['inflation','M2','GDP', 'M2V', 'MktCap'], step) print('Total Error:', error) forecast = predict(results, test_data_log_diff,['inflation','M2','GDP', 'M2V', 'MktCap'], step) plt.plot(forecast, label = 'forecast') plt.plot(observed_inflation, label = 'observed') plt.legend() plt.ylabel('Log difference of CPI') plt.show() residuals = (observed_inflation.values[:observed_inflation.shape[0] - step - 1] - forecast.values) print('Normality test result:', kstest(residuals, 'norm')[1]) model = VAR(test_data_log_diff[['inflation','M2','GDP', 'M2V']]) results = model.fit(4) results.summary() observed_inflation = test_data_log_diff[['inflation']] step = 4 error = total_squared_error(results,test_data_log_diff, ['inflation','M2','GDP', 'M2V'], step) print('Total Error:', error) forecast = predict(results, test_data_log_diff,['inflation','M2','GDP', 'M2V'], step) plt.plot(forecast, label = 'forecast') plt.plot(observed_inflation, label = 'observed') plt.legend() plt.ylabel('Log difference of CPI') plt.show() residuals = (observed_inflation.values[:observed_inflation.shape[0] - step - 1] - forecast.values) print('Normality test result:', kstest(residuals, 'norm')[1]) data_shifted = test_data_log_diff[['M2', 'GDP', 'M2V']].shift(-1) data_reg = data_shifted.join(test_data_log_diff['inflation'], how = 'inner').dropna() model = ols("inflation ~ M2 + M2V + GDP", data_reg).fit() model.summary()MinMax ProblemThis is a variation of shortest path.Given an acyclic graph **G** with weighted edges, the shortest path problem is to find the path from source s to target t whose weighted length is minimal.Each path is a sequence of edges, and its length is the sum of the weightsof the edges in the path.The maximal link in any path is the maximum of the weights of the edges in the path.The MINMAX problem is to find the path from s to t whose maximal link is minimal. * find $f(1,0)$$f(k,i) = \min \limits_j \left \{ \max \left \{ b(i,j), f(k+1, j) \right \} \right \}$Base conditions: $f(N-1, i) = b(i, N)$ The implementation considers that:* the source node has index 0 and * destination node has index N-1 When is this problem in real life?Imagine that the graph represents the possible routes of a truck leaving a warehouse to a store. Each edge represents the toll the driver will pay. The values are not cumulative, that is, the driver only pays the difference up to the maximum value of the route. This means that he pays the value of the maximal edge of the path. The problem is to find out what is the lowest possible amount that the driver will pay.import numpy as np import networkx as nx import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = [19, 9] %matplotlib inline import warnings warnings.simplefilter(action='ignore', category=FutureWarning)to draw the graphsdef create_graph(B): G=nx.DiGraph() for i in range(B.shape[0]): for j in range(B.shape[1]): if not np.isinf(B[i, j]): G.add_edge(i, j, weight=B[i, j]) return G def draw_graph(B): plt.figure(1, figsize=(16, 9)) G=create_graph(B) pos=nx.spring_layout(G) # random_layout(G) ax = plt.gca() _ = nx.draw_networkx(G, pos, ax=ax, font_size=20, node_size=500, node_color="yellow") labels = nx.get_edge_attributes(G, 'weight') _ = nx.draw_networkx_edge_labels(G, pos, ax=ax, edge_labels=labels, font_size=20) plt.show()Minimum Maximum implementationdef minimum_maximum(G, verbose=False): N = B.shape[0] # auxiliar f = np.zeros((N - 1, N)) # base case for i in range(N): f[N - 2, i] = B[i,N - 1] max_k = N - 3 for k in range(max_k, -1, -1): for i in range(N): _m = [max(B[i, j], f[k + 1, j]) for j in range(N)] f[k, i] = np.min(_m) # check if processed the line with the source if not np.isinf(f[k, 0]) and np.all(f[k, 1:]): break if verbose: # final matrix print(f[k:, :]) return f[k, 0]Note in the loop the trick we use to recognize the number of steps (k) of the problem. Since previously finding the k value is an NP-complete problem, it is extremely costly to find this value a priori. However, we take advantage of two characteristics of this problem. The graph is acyclic, and from this in the worst situation for a graph of N vertices, we will have N-1 edges.In addition to this, as we place the origin as the vertex of index 0 and the destination as vertex of index N-1, we know that the solution was found when the line F [k] contains only one value in the position F [k, 0] and the other values are equal to infinity.This is because the origin has no incoming edge of a vertex (or set of vertices) that are part of the steps we evaluated to reach the destination (see the last example of this notebook). If there was a vertex with an incoming edge at the origin and that was also part of the following steps, we would have a cycle, which by definition of the problem cannot occur. ExamplesB = np.array([[np.inf, 10, 7, 6, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, ], [np.inf, np.inf, np.inf, np.inf, 9, np.inf, np.inf, np.inf, np.inf, np.inf, ], [np.inf, np.inf, np.inf, np.inf, 7, np.inf, np.inf, np.inf, np.inf, np.inf, ], [np.inf, np.inf, np.inf, np.inf, 11, 7, np.inf, np.inf, np.inf, np.inf, ], [np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, 8, 7, 10, np.inf, ], [np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, 8, 6, 7, np.inf, ], [np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, 13, ], [np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, 8, ], [np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, 9, ], [np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, ], ]) draw_graph(B) minimum_maximum(B) B = np.array([[np.inf, 10, 7, 6, np.inf, np.inf, np.inf], [np.inf, np.inf, np.inf, np.inf, 9, np.inf, np.inf], [np.inf, np.inf, np.inf, np.inf, 7, np.inf, np.inf], [np.inf, np.inf, np.inf, np.inf, 11, 7, np.inf], [np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, 7], [np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, 6], [np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf], ]) draw_graph(B) minimum_maximum(B) B = np.array([[np.inf, 10, 7, 6, np.inf], [np.inf, np.inf, np.inf, np.inf, 9], [np.inf, np.inf, np.inf, np.inf, 7], [np.inf, np.inf, np.inf, np.inf, 11], [np.inf, np.inf, np.inf, np.inf, np.inf], ]) draw_graph(B) # print matrix F to see the results minimum_maximum(B, verbose=True) # add a node (4) that points to the origin B = np.array([[np.inf, 10, 7, 6, np.inf, np.inf], [np.inf, np.inf, np.inf, np.inf, np.inf, 9], [np.inf, np.inf, np.inf, np.inf, np.inf, 7], [np.inf, np.inf, np.inf, np.inf, np.inf, 11], [1, np.inf, np.inf, np.inf, np.inf, np.inf], [np.inf, np.inf, np.inf, np.inf, np.inf, np.inf], ]) draw_graph(B) # print matrix F to see the results # notice that because no 4 does not take part in the calculation of the steps from origin to destination # its presence does not affect the calculations minimum_maximum(B, verbose=True) # also notice that there is no way to have an incoming edge at (4) from the other nodes that does not create a cycleExample 3LSSVM on small data sets: Variation of resulting models.The synthetic data used for this purpose is the 1D Sinc function.#Some imports import matplotlib.pyplot as plt import numpy as np import random import math import scipy.stats as st from sklearn.metrics import mean_squared_error, r2_score from LSSVMRegression import LSSVMRegression #Generating the synthetic data Nmodels=100 Ndata=25 Nrem=5 x=np.array([np.random.uniform(low=-5,high=5,size=Ndata)]).T y=np.sinc(x).ravel() #create the associated targets, needs to be a 1D array #create Nmodels (identical) rbf-models to train on different datasets models=list() datasets_x=list() datasets_y=list() for i in range(Nmodels): clfrbf=LSSVMRegression( gamma=1000001, #set the gamma-hyper parameter equal to 1 kernel='poly', #use the linear kernel sigma=1.0, c=1, d=10, ) #index=list(range(i,i+Nrem)) index=random.sample(range(Ndata),Nrem) seti_x=np.array([np.delete(x,index)]).T #print(seti_x.shape," ",seti_x[0:5,:].ravel()) seti_y=np.delete(y,index) clfrbf.fit(seti_x,seti_y) # train our model instance, aka solve the set of linear equations datasets_x.append(seti_x) datasets_y.append(seti_y) models.append(clfrbf) def mean_confidence_interval(data, confidence=0.95): a = 1.0 * np.array(data) n = len(a) m, se = np.mean(a), st.sem(a) h = se * st.t.ppf((1 + confidence) / 2., n-1) cf=(1.0-confidence)*0.5 qm = np.quantile(a,cf,interpolation='linear') qp = np.quantile(a,1.0-cf,interpolation='linear') return m, m-h, m+h, qm, qp #generate a dens mesh xmin=-8 xmax=8 Npts=2001 xPred=np.linspace((xmin,),(xmax,),Npts) yExact=np.sinc(xPred).ravel() yAvg=np.zeros(Npts) CIlow=np.zeros(Npts) CIhigh=np.zeros(Npts) Qlow=np.zeros(Npts) Qhigh=np.zeros(Npts) # and predict all_yPred=list() yPred2D=np.zeros((Nmodels,Npts)) cnt=-1 for clfrbf in models: cnt+=1 yPred=clfrbf.predict(xPred) all_yPred.append(yPred) yPred2D[cnt]=yPred # The mean squared error (MAE) and The coefficient of determination R²: 1 is perfect prediction #print('MAE: %.3f R²: %.3f' % (mean_squared_error(yExact, yPred), r2_score(yExact, yPred))) for i in range(Npts): yAvg[i], CIlow[i], CIhigh[i], Qlow[i], Qhigh[i]= mean_confidence_interval(yPred2D[:,i],confidence=0.9) #print(yAvg[i], CIlow[i], CIhigh[i]," ",yPred2D[1:5,i]) #print("TYPE:" , type(xPred)," shape:",xPred.shape) # Plot outputs plt.figure(figsize=(12,8)) for yPred in all_yPred: plt.plot(xPred, yPred, color='red' ,linewidth=1, zorder=-1, alpha=0.25) plt.fill_between(xPred.ravel(), CIlow, CIhigh, color='blue', zorder=0, alpha=.5) plt.fill_between(xPred.ravel(), Qlow, Qhigh, color='green', zorder=0, alpha=.25) plt.plot(xPred, yAvg, color='blue',linewidth=3, zorder=0) plt.plot(xPred, yExact, color='black',linewidth=2, zorder=0) plt.scatter(x, y, color='black', zorder=1) plt.axis([xmin,xmax,-0.75,1.5]) step=(xmax-xmin)/11.0 Xlst=list() for a in np.arange(math.floor(xmin),math.ceil(xmax)+1,1.0): Xlst.append(a) plt.xticks(Xlst,rotation=45,fontsize=18) #plt.xticks([-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8]) plt.yticks([-0.75,-0.5,-0.25,0,0.25,0.5,0.75,1.0,1.25,1.5],fontsize=18) plt.xlabel("feature x",fontsize=22,fontweight="bold") plt.ylabel("target y",fontsize=22,fontweight="bold") plt.show()Problem: Sourcing Fine-Grained Mobility Data from Large Datasets OverviewFinding fine-grained, agent-specific mobility data is very difficult to do. It is extremely important to see how an individual acts, as we can see from various clusters of the COVID-19 pandemic that we have experienced in New Zealand. It would be extremely useful to analyze the mobility of individual people, finding their clusters of movement. A more in-depth overview of the problem, with supporting literature, can be found in the accompanying report. The DataMobility data will be obtained from the GeoLife dataset. This data is available for free [here](https://www.microsoft.com/en-us/download/details.aspx?id=52367). It was collected by Microsoft Asia and consists of GPS trajectories from 182 users over a period of three years. You can download it and load it through the cells that follow. This system depends on data in a PostGIS server - don't worry, this will set that up for you, you will just need a PgAdmin server running with default 'postgres' credentials. The Proposed SolutionA user-friendly analysis system for looking at individual users of the dataset and finding patterns. This system should be approachable, web-based and capable of displaying clusters of mobility. It will also deliver key statistics on the data so that the user can validate the results they are seeing. To this end, it will be open-source. Structure**This Notebook consists of two parts**. The first series of cells go through the project requirement checklist, running each step individually with some maps along the way. They show how the data can be uploaded to PgAdmin etc. The final cell is a 'dashboard' that does all of the analysis of the previous steps, all contained. Code in this final cell is not very well commented as I have done this thoroughly in the cells that preceed it. There are also bugs that exist in the following cells that I have fixed for the final version, so please judge program functionality through the dashboard. To be clear, **sequential cells outline basic functionality and have known bugs that have been fixed for the dashboard, so please run the final cell to assess program functionality** Thanks :)# import modules # used for: import psycopg2 # sql implementation from sqlalchemy import create_engine # connection to postgis import pandas as pd # tabular data handling import geopandas as gpd # tabular data handling with geometry (spatial) import folium # map creation (build on leaflet) import geoalchemy2 # support for spatial sql import matplotlib.pyplot as plt # plotting tool import numpy as np # used for getting statistics and complex numerical analyses from ipyleaflet import * # ipython leaflet extension from ipywidgets import * # ipython widgets (buttons, etc.) import glob # used for getting path names (for finding the geolife folder) from IPython.display import display # display function for inline plots etc. import pandas as pd # dataframes import datetime # getting datetime from delimited data import os # use local functions and getting system info import psutil # getting RAM of computer for performance recommendations import seaborn as sns # matplotlib, but fancy import random # random number generation from sklearn.cluster import DBSCAN # clustering algorithm from sklearn.neighbors import NearestNeighbors # neigbourhoods used for clustering from kneed import KneeLocator # locating knee point of clustering algorithm # define constants SHOW_EXCEPTIONS = True # bool for debugging exceptions DATA_EXISTS = True # bool for not reloading data TB_RAW = 'geolife_raw_data' # constant table name TB_CLEAN = 'geolife_ref' # "" TB_BEIJING = 'beijing_raw' # "" ENGINE = create_engine('postgresql://postgres:postgres@localhost/postgres') # postgres engine CON = ENGINE.connect() # engine connection CRS = '4326' # coordinate reference system BEIJING = [39.9, 116.41] # central beijing coords, for map centres B1 = 115.779419, 39.626846 # bbox limits for beijing extent B2 = 116.952209, 40.357010 # "" # method for running certain queries and handling exceptions def runsql(query): try: ENGINE.execute(query) except Exception as exep: if SHOW_EXCEPTIONS: print(exep) # The following code was modified by code developed by HERE Technologies # The original can be found: https://heremaps.github.io/pptk/tutorials/viewer/geolife.html # My edits: # -made it a class, added getters and setters (to meet assignment guidelines) # -added customisation by user (eg. custom file locations without constants) # -general cleanup of code, added nicer progress indicator, commented class GeoLifeHandler: def __init__(self): # apply numeric values for transport modes to save space self.mode_names = ['walk', 'bike', 'bus', 'car', 'subway','train', 'airplane', 'boat', 'run', 'motorcycle', 'taxi'] self.mode_ids = {s : i + 1 for i, s in enumerate(self.mode_names)} # create progress bar self.progress_bar = widgets.FloatProgress( value=0, min=0, max=10.0, description='Loading:', bar_style='info', style={'bar_color': '#0000FF'}, orientation='horizontal') # read plt file def read_plt(self, plt_file): points = pd.read_csv(plt_file, skiprows=6, header=None, parse_dates=[[5, 6]], infer_datetime_format=True) points.rename(inplace=True, columns={'5_6': 'time', 0: 'lat', 1: 'lon', 3: 'alt'}) # rename cols points.drop(inplace=True, columns=[2, 4]) # remove unused columns return points # read labels (where applicable) def read_labels(self, labels_file): labels = pd.read_csv(labels_file, skiprows=1, header=None, parse_dates=[[0, 1], [2, 3]], infer_datetime_format=True, delim_whitespace=True) labels.columns = ['start_time', 'end_time', 'label'] labels['label'] = [self.mode_ids[i] for i in labels['label']] # enumerate return labels # apply travel mode labels to points (enumerated to save bits) def apply_labels(self, points, labels): indices = labels['start_time'].searchsorted(points['time'], side='right') - 1 no_label = (indices < 0) | (points['time'].values >= labels['end_time'].iloc[indices].values) points['label'] = labels['label'].iloc[indices].values points['label'][no_label] = 0 # read individual user (folder is preassigned to each user) def read_user(self, user_folder): labels = None plt_files = glob.glob(os.path.join(user_folder, 'Trajectory', '*.plt')) df = pd.concat([self.read_plt(f) for f in plt_files]) labels_file = os.path.join(user_folder, 'labels.txt') if os.path.exists(labels_file): labels = self.read_labels(labels_file) self.apply_labels(df, labels) else: df['label'] = 0 return df # interate through all users (defined by folders in Data folder) def read_all_users(self): subfolders = os.listdir(self.folder) dfs = [] self.progress_bar.max = len(subfolders) display(self.progress_bar) for i, sf in enumerate(subfolders): #print('processing user {} of {}'.format(i + 1, len(subfolders))) self.progress_bar.value = i + 1 df = self.read_user((os.path.join(self.folder,sf))) df['user'] = int(sf) dfs.append(df) print('Load Complete') self.geolife_df = pd.concat(dfs) # set folder location def set_location(self, file): self.folder = file # get dataframe def get_df(self): return self.geolife_df # function for submitting file location # starts the reading of all users, saving to geodataframe def bt_submitevent(b): if os.path.exists(file_options.value): b.disabled = True handler.set_location(file_options.value) handler.read_all_users() else: print('Data file not found. Please try again...') # submit button for selecting the option, calling the above function when selected bt_submit = widgets.Button(description='Select') bt_submit.on_click(bt_submitevent) # list of file options file_options = widgets.Dropdown( options=[(os.path.join(os.getcwd(), 'geolife_trajectories\Data')), # default zip file location (os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop\Data')), # desktop folder (os.path.join(os.path.join(os.environ['USERPROFILE']), 'Downloads\Data'))], # downloads folder disabled=False, layout={'width': '70%'}) # display these widgets, with title display(VBox([HTML('

GeoLife File Location

'), HBox([file_options, bt_submit])])) # create instance of handler class handler = GeoLifeHandler() # try and get the dataframe information # good test if the dataframe exists. if it doesn't, assume it is already available on server (see constants) try: geolife_raw = handler.get_df() display(geolife_raw.count()) display(geolife_raw.head(10)) geolife_raw = geolife_raw.head(10) except: print('Data either not yet extracted, or already available on server') # class for uploading to postgis and setting data limitations based on memory available class PostGISUploader: def __init__(self): # use constants for table names etc. self.table_rawdata = TB_RAW self.table_cleandata = TB_CLEAN self.crs = CRS # buttons for setting limitations on heatmap self.buttons = [ widgets.Button(description='100,000'), widgets.Button(description='1,000,000'), widgets.Button(description='10,000,000'), widgets.Button(description='No Limitation') ] # assign the set_limit function to all buttons (will change output based on value) for button in self.buttons: button.on_click(self.set_limit) self.limit = 'LIMIT 100' # default limit # get ram of computer self.pc_ram = round(psutil.virtual_memory().total / 1073741824, 1) # upload data to postgis. THIS TAKES A VERY LONG TIME (mine took ~14 hours) def upload(self, geolife_df): # create 'raw data' table. this will include all attributes of the dataset runsql('DROP TABLE IF EXISTS {}'.format(self.table_rawdata)) runsql(('CREATE TABLE {}' '(gid serial PRIMARY KEY, time timestamp, lat float, lon float, alt float, label int, user_id int)' .format(self.table_rawdata))) # data must be inserted row by row. using 'to_postgis' will crash as it uses too much memory for row in geolife_df.iterrows(): cur_row = row[1] runsql(("INSERT INTO {} (time, lat, lon, alt, label, user_id) VALUES ('{}', {}, {}, {}, {}, {})" .format(self.table_rawdata, cur_row['time'], cur_row['lat'], cur_row['lon'], cur_row['alt'], cur_row['label'], cur_row['user']))) # create 'clean' table. will treat the previous table as a backup # this table has a geometry ref instead of lat/lon runsql('DROP TABLE IF EXISTS {}'.format(self.table_cleandata)) runsql(('CREATE TABLE {} AS SELECT gid, time, st_setsrid(st_makepoint(lon, lat), {}) ' 'as geom, label, user_id FROM {}'.format(self.table_cleandata, self.crs, self.table_rawdata))) # disable buttons, get limitation based on the button value (with some string formatting) def set_limit(self, b): for button in self.buttons: button.disabled = True self.limit = b.description.replace(',', '') if self.limit == 'No Limitation': self.limit = '' else: self.limit = 'LIMIT ' + self.limit # get a message based on the amount of RAM your computer has def get_limitoptions(self): if self.pc_ram < 4: message = 'This is quite low (no offense). Recommended: 100,000 row limitation' elif 4 < self.pc_ram < 8: message = 'This is acceptable. Recommended: 1,000,000 row limitation' elif 8 < self.pc_ram < 16: message = 'This is good, but still limited. Recommended: 10,000,000 row limitation' else: message = 'You can handle anything. Try going with the whole dataset!' return message # return buttons def get_buttons(self): return self.buttons # return limit suffix def get_limit(self): return self.limit # return pc ram def get_ram(self): return self.pc_ram # create object instance. this is needed even if data is uploaded already to get limitation and ram etc. uploader = PostGISUploader() # start the data upload process if the data has not already been uploaded (constant = false) if not DATA_EXISTS: uploader.upload(geolife_raw) # class for dealing with folium maps class FoliumCreator: # set default values def __init__(self, zoom=10, size='600px', center=BEIJING, scale_pos='topleft'): self.zoom = zoom self.size = size self.center = center # using American english spelling as that's what leaflet uses, not bc I like it :( self.layout = Layout(height=self.size) self.scale_pos = scale_pos # set map layout def set_layout(self, layout): self.layout = layout # set zoom level def set_zoom(self, level): self.zoom = level # return map object def get_map(self): m = Map(center=self.center, zoom=self.zoom, layout=self.layout, basemap=basemaps.Stamen.Terrain) self.set_control(m, ScaleControl(position=self.scale_pos)) return m # set control (eg. scale bar) def set_control(self, instance, control): instance.add_control(control) # set new layer def set_layer(self, instance, layer): instance.add_layer(layer)Showing our Area of Interest A map of Beijing without any data. Basemaps show the topography and cityscape. We have chosen Beijing as most of the GeoLife data has telemetry from this city.''' MAP 1: Area of Interest -no context, just area -zoomed into centre based on constants -uses two basemaps to show urban and environment ''' # create mapcontroller instance, return a map object mapcontroller = FoliumCreator() map1 = mapcontroller.get_map() # add basemap layers right_layer = basemap_to_tiles(basemaps.OpenTopoMap) # OpenTopMap left_layer = basemap_to_tiles(basemaps.Stamen.Terrain) # Stamen Terrain (my fav) # add splitmap controller to map split_control = SplitMapControl(left_layer=left_layer, right_layer=right_layer) mapcontroller.set_control(map1, split_control) # show map display(map1)Mapping the Raw DataNow that we have uploaded all of our data to a server we can safely manipulate it. Again, we will focus on Beijing, China as this is where the majority of the data originate from. We will create a table that grabs all points that are within our defined 'bounding box'. Then, we can produce a heatmap of these points, coloured by density of points. This will show where most people moved around in Beijing on the dataset (or at least, where their phones reported location from the most). Note that despite limiting the extent, the data is still over 24 million rows, so this may take some time to process!# print information and recommendation based on ram print(('This dataset is over 24 million rows. It requires high amounts of memory to process in its entirety. ' 'Detected {} gigabytes, not all of which will be dedicated to the interpreter. {}. Of course, this is a free' ' world, do what you want - but you have been warned.' .format(uploader.get_ram(), uploader.get_limitoptions()))) # add buttons from uploader object, display buttons buttons = uploader.get_buttons() display(VBox([HTML('

Please select a data row limitation

'), HBox([buttons[0], buttons[1], buttons[2], buttons[3]])])) ''' MAP 2: Movement Heatmap -focus on Beijing, create table of points within that extent -saves table in cache, which will use a fair bit of storage on your device (depending on limitation) ''' # create beijing table based on constant if not DATA_EXISTS: # drop if exists, just in case runsql('DROP TABLE IF EXISTS {}'.format(TB_BEIJING)) print('Existing table dropped (if it existed). Creating Beijing table. This can take several minutes...') # selects beijing area from a makeenvelope function (clip to extent essentially) runsql('CREATE TABLE {} AS SELECT * FROM {} WHERE geom && st_makeenvelope ({}, {}, {}, {}, {})' .format(TB_BEIJING, TB_CLEAN, B1[0], B1[1], B2[0], B2[1], CRS)) print('Table created.') # giving info like this as it can take a while, so good to show something is happening # get beijing table print('Querying server. This can take a while...') query = ('SELECT * FROM {} {}'.format(TB_BEIJING, uploader.get_limit())) # get geodataframe for heatmap (all beijing) gdf = gpd.GeoDataFrame.from_postgis(query, CON) print(gdf.count()) # get map, changing zoom mapcontroller.set_zoom(10) map2 = mapcontroller.get_map() # convert geometry from geodataframe into a list so that it can be used for ipyleaflet heatmap print('GeoDataFrame created. Converting values to produce heatmap...') geom_list = [(x, y) for x, y in zip(gdf['geom'].y, gdf['geom'].x)] heatmap = Heatmap(locations=geom_list, radius=8) # add heatmap to map object, display map map2.add_layer(heatmap) display(map2)Querying server. This can take a while... gid 19567865 time 19567865 geom 19567865 label 19567865 user_id 19567865 dtype: int64 GeoDataFrame created. Converting values to produce heatmap...Finding UsersWe can see that, even when restricting to our city of interest, the dataset is far too big to handle. Let's restrict it further and focus in on a single user to have a look at the fine-grained patterns. Initially, I wrote an algorithm to find trips based on the label of transport changing, which worked by:- iterating through every user, grabbing all of their points from the server- checking if they have labels assigned to their travels - calculating their unique 'trips', assigned by the amount of times the label assigned to these trips changes, eg. walking for 10 minutes followed by a taxi ride will be defined as two trips- getting a user who has more than a defined minimum trip count, then using binary search to find the user who has the lowest amount of trips above this minimumHowever, this method has several flaws. As mentioned, not all users have labels assigned for their travels. Another is that two modes of transport may be considered part of a single trip. Instead, we can focus on user-level data, rather than trip-level data. We can focus on clusters of points rather than alternating transport methods, thus removing the reliance on the existence of labels (the algorithm I wrote found that over 70% of users have to labels). Overall, we can see that user-level data will be more effective than trip-level data as a way to handle the data. First, let's select a user to focus on. We will write the analysis in a way that it will work on any user that exists in the dataset.# class for each individual user # instead of creating a new class instance each time a new user is chosen, this function can update based on user id # handles all the displays for interacting with users, as well as giving information to analysis tools class UserObject: # initialize with some defaults def __init__(self, dashboard_mode=False): self.id = -1 self.id_count = 181 self.dashboard_mode = dashboard_mode # depends on whether used in final version or inline # submit user button self.bt_submit_user = widgets.Button(description='Select') self.bt_submit_user.on_click(self.submit_user) # randomize user button self.bt_random_user = widgets.Button(description='Randomize') self.bt_random_user.on_click(self.randomize_user) # get random id for the first user self.first_user_id = self.get_random_id() # dropdown for all users self.user_options = widgets.Dropdown( value=self.first_user_id, options=self.get_all_ids(), disabled=False, layout={'width': '30%'}) # observed function for change of dropdown self.user_options.observe(self.user_change, 'value') # outputs self.dp_userinfo = widgets.Output(layout={'border': 'solid black 1px', 'max_width': '360px'}) self.clusterbox = widgets.Output(layout={'border': 'solid black 1px', 'max_width': '200px', 'min_width': '200px'}) # get user info for first user info self.get_user_info(self.first_user_id) # disables submit button for dashboard mode if self.dashboard_mode: self.bt_submit_user.disabled = True # display all widgets def display(self): display(HBox([HTML('User: '), self.user_options, self.bt_random_user, self.bt_submit_user])) display(HBox([self.dp_userinfo, widgets.Output(layout={'min_width': '75px'}), self.clusterbox])) # submit user (non-dashboard). disables buttons, sets id based on selected value def submit_user(self, b): # 'b' acts as the button event/container b.disabled = True self.bt_random_user.disabled = True self.user_options.disabled = True self.set_id(self.user_options.value) print('User {} selected!'.format(self.get_id())) # get random user def randomize_user(self, b): self.user_options.value = self.get_random_id() # change user to value in dropdown (observing dropdown widget) def user_change(self, value): self.get_user_info(value.new) if self.dashboard_mode: self.set_id = value.new # set id def set_id(self, new_id): self.id = new_id # return id def get_id(self): return self.id # set what to display in 'clusterbox' (print output on the right to show clustering info) def set_clusterbox(self, content): with self.clusterbox: print(content) # get all possible id values def get_all_ids(self): return [i for i in range(0, self.id_count)] # get random id (random number generation) def get_random_id(self): return random.randint(0, self.id_count) # get user info (through db queries) def get_user_info(self, test_id): # show loading process self.bt_random_user.description = 'Loading...' self.bt_random_user.disabled = True # get info info = CON.execute('select count(*), count(distinct(label)), min(time), max(time) from {} where user_id = {}' .format(TB_BEIJING, test_id)).first() # clear any existing info self.dp_userinfo.clear_output() # print formatted summary based on query with self.dp_userinfo: print('User {} Summary\n--------------------------------------------'.format(test_id)) print(('Data points: {}\nUnique Travel Methods: {}\nData Begin: {}' '\nData End: {}'.format(info[0], info[1], info[2], info[3]))) # show loading process self.bt_random_user.description = 'Randomize' self.bt_random_user.disabled = False # create user object, display widgets user = UserObject() user.display()The code below looks at individual user movement and filters these movements to make this fine-grained data easier to handle. We can look at their mobility in a less processing-intensive way by using the function ST_REMOVEREPEATEDPOINTS which does exactly what the name suggests. Repeated points are defined by a tolerance level (essentially a buffer around each point):# get user information. query is based on collection of geometry (merge) gdf_raw = gpd.GeoDataFrame.from_postgis((('SELECT st_collect(geom) as all_geom, st_npoints(st_collect(geom)) ' 'as point_count from {} where user_id = {}') .format(TB_BEIJING, user.get_id())), CON, geom_col='all_geom') # give up if returns as none, giving some error info gdf_raw_n = str(gdf_raw['point_count'][0]) if gdf_raw_n == 'None': print("No points found. Please ensure you have clicked 'Select' above") else: print('Raw point count: ' + gdf_raw_n) # get simplified user information. removes repeated points as a simplification of the total user's mobility gdf_simple = gpd.GeoDataFrame.from_postgis((('SELECT st_removerepeatedpoints(st_collect(geom)) as simplified_geom, ' 'st_npoints(st_removerepeatedpoints(st_collect(geom), 0.001)) as point_count from {} where user_id = {}') .format(TB_BEIJING, user.get_id())), CON, geom_col='simplified_geom') gdf_simple_n = str(gdf_simple['point_count'][0]) print('Simplified point count: ' + gdf_simple_n) # plot output fig, axes = plt.subplots(1, 2, figsize=(15, 10)) # raw mobility trajectories axes[0].set_title('Original Trip Data for User {} (n={})'.format(user.get_id(), gdf_raw_n)) gdf_raw.plot(ax=axes[0]) axes[0].set_xlabel('Longitude') axes[0].set_ylabel('Latitude') # simplified mobility trajectories axes[1].set_title('Simplified Trip Data for User {} (n={})'.format(user.get_id(), gdf_simple_n)) gdf_simple.plot(ax=axes[1]) axes[1].set_xlabel('Longitude') axes[1].set_ylabel('Latitude') # show plots plt.show()Raw point count: 81184 Simplified point count: 2963Validation of SimplificationThese two plots look very similar, but we yet can't be sure. We can confirm by testing that the simplified analysis falls entirely within the area of the raw dataset. We will also plot the two datasets together to make sure the movement trends are the same. This takes a bit of time as we need to perform the simplification, split the geometery into latitude and longitude so we can plot it and perform our ST_CONTAINS.Note that the plots will show some distortion as they are not assigned a coordinate reference system. For the purposes of this analysis, this does not matter as we are only concerned about their relative (as opposed to absolute) displacement.# get raw data, split as x and y for plotting gdf_raw = gpd.GeoDataFrame.from_postgis((('SELECT st_x((st_dumppoints(st_collect(geom))).geom) as lat, ' 'st_y((st_dumppoints(st_collect(geom))).geom) as lon, (st_dumppoints(st_collect(geom))).geom as geom ' 'FROM {} WHERE user_id = {}'.format(TB_BEIJING, user.get_id()))), CON) # get simplified data, also split into x and y gdf_simple = gpd.GeoDataFrame.from_postgis((('SELECT ' 'st_x((st_dumppoints(st_removerepeatedpoints(st_collect(geom), 0.001))).geom) as lat, ' 'st_y((st_dumppoints(st_removerepeatedpoints(st_collect(geom), 0.001))).geom) as lon, ' '(st_dumppoints(st_removerepeatedpoints(st_collect(geom), 0.001))).geom as geom ' 'FROM {} WHERE user_id = {}'.format(TB_BEIJING, user.get_id()))), CON) # tests if the simplified data is completely contained by the raw data contains = CON.execute(('SELECT st_contains(st_collect(geom), st_removerepeatedpoints(st_collect(geom), 0.001)) ' 'FROM {} where user_id = {}'.format(TB_BEIJING, user.get_id()))).first()[0] # display some information. gives validation based on what the above query returns print(('\nNote that size of points is offset for aesthetics and has no meaning.' 'The ST_CONTAINS was run on the raw data. Also note that the difference in n between this and the previous ' 'plot is due to rounding and is inconsequential to our analysis\n')) if contains: print('Raw data completely contains simplified data.') else: print('Raw data does not contain simplified data.') # create new plot fig = plt.figure(figsize=(10, 8)) ax = fig.add_subplot(111) fig.figsize = (10, 10) # plot raw and simplified datasets atop one another ax.scatter(gdf_raw['lat'], gdf_raw['lon'], s=50, c='black', marker="o", label='Raw (n={})' .format(gdf_raw.count()[0])) # larger dots to show difference ax.scatter(gdf_simple['lat'], gdf_simple['lon'], s=2, c='orange', marker="o", label='Simplified (n={})' .format(gdf_simple.count()[0])) ax.set_ylabel('Latitude') ax.set_xlabel('Longitude') plt.legend(loc='lower left'); # show plot plt.show()Note that size of points is offset for aesthetics and has no meaning.The ST_CONTAINS was run on the raw data. Also note that the difference in n between this and the previous plot is due to rounding and is inconsequential to our analysis Raw data completely contains simplified data.Pattern of User MovementsWe now have a set of telemetry from an individual user, compressed and validated to be more manageable. Let's do something with this data! Density-Based Spatial Clustering of Applications with Noise (DBSCAN)DBSCAN will help us find clusters in the data. These clusters will unveil information about the mobility of the user, showing patterns of movement that are clustered together which we can build inferences from. The sklearn package has all the tools we need to run a DBSCAN analysis, starting with calculating the distance of nearest-neighbour points. We can use these distances to find the 'knee point' on the 'elbow diagram' - the point where the distances of neighbours greatly increases (almost) asymptotically. This 'knee point' serves as the epsilon value in the DBSCAN analysis, where we can then classify points into their respective clusters.# set sample parameter, get list of geometries, create figures for output samples = 20 geom_list = [(x, y) for x, y in zip(gdf_simple['lat'], gdf_simple['lon'])] fig, axes = plt.subplots(1, 2, figsize=(12, 5)) # get nearest neigbours nn = NearestNeighbors(n_neighbors=samples + 1) neighbors = nn.fit(geom_list) distances, indices = neighbors.kneighbors(geom_list) distances = np.sort(distances[:,samples], axis=0) # plot nearest neighbours axes[0].set_title('Nearest Neighbours Plot on User {} Mobility'.format(user.get_id())) axes[0].plot(distances) axes[0].set_xlabel('Points') axes[0].set_ylabel('Distances') # calculate knee point i = np.arange(len(distances)) knee = KneeLocator(i, distances, S=1, curve='convex', direction='increasing', interp_method='polynomial') # plot nearest neighbours with knee point axes[1].set_title('Knee Point of Nearest Neighbours for User {} Mobility'.format(user.get_id())) axes[1].plot(distances) axes[1].axvline(x=knee.knee,color='gray',linestyle='--') axes[1].text((axes[1].get_xlim()[1] * 0.5), (axes[1].get_ylim()[1] * 0.5), 'knee={}'.format(knee.knee)) axes[1].set_xlabel('Points') axes[1].set_ylabel('Distances') # create dbscan instance db = DBSCAN(eps=distances[knee.knee], min_samples=samples).fit(geom_list) # get labels (clustered) labels = db.labels_ # plot figure as scatterplot (seaborn) fig = plt.figure(figsize=(8, 8)) sns_scatter = sns.scatterplot(x=gdf_simple['lat'], y=gdf_simple['lon'], marker="o", hue=["{cluster}".format(cluster = 'Cluster ' + str(cur_label) if cur_label != -1 else 'Noise') for cur_label in labels]) sns_scatter.set(xlabel='Latitude', ylabel='Longitude', title='Clusters in User {} Mobility using DBSCAN\n(where -1 is noise)'.format(user.get_id())) # show plots plt.show()Analyzing Mobility Patterns/ClustersOf course, the simplification will greatly impact the reliability of the clustering. However, it is not viable to load the entire dataset for every user in-situ for clustering as they are far too large. This is a solution that compromises processing time and quality of output. We now have clustered mobility patterns for a specific user. What information can we derive from this? First we will need to consolidate everything about the specific user into one table:# create dataframe for manipulation, add clustering labels gdf_user = gdf_simple gdf_user['label'] = labels #gdf_user['time'] = 'datetime' # count with noise total_count = gdf_user.count()[0] # get locations of noise noise_index = gdf_user[gdf_user['label'] == -1].index # get counts, drop noise gdf_user_clean = gdf_user gdf_user_clean.drop(noise_index, inplace=True) clean_count = gdf_user_clean.count()[0] noise_count = total_count - clean_count # print info of noise, clusters, counts before and after dropping noise print('Out of {} points, {} were detected as noise ({}%). With noise removed, {} points remain with {} cluster(s)' .format(total_count, noise_count, round(noise_count / total_count * 100, 2), clean_count, len(gdf_user['label'].unique())))Out of 2963 points, 11 were detected as noise (0.37%). With noise removed, 2952 points remain with 9 cluster(s)Visualizing Mobility PatternsWe now have a clean, simplifed dataset. GPS trajectories will be plotted along paths so that we can see exactly where the user moves within clusters.# Antpath plot # here we can see the user's mozement along a moving path colours = ['red', 'green', 'blue', 'orange', 'purple', 'pink', 'brown', 'black', 'cyan', 'gold', 'white', 'crimson', 'olive'] mapcontroller.set_zoom(9) map2 = mapcontroller.get_map() by_cluster = gdf_user_clean.groupby('label') by_cluster = dict(list(by_cluster)) for cluster in by_cluster.keys(): current_gdf = by_cluster[cluster] geom_list = [(x, y) for x, y in zip(current_gdf['lon'], current_gdf['lat'])] ant_path = AntPath(locations=geom_list, dash_array=[1, 10], delay=3000, color=colours[cluster], pulse_color='black') mapcontroller.set_layer(map2, ant_path) display(map2)User InterfaceWe have now outlined the methods required to get fine-grained mobility analysis on this GeoLife dataset. We will finish by combining these methods into a single interface so that those less familiar with GISytems can access this data.This following cell, aside from the collation of data and the requirement of third-party packages, should function independently from the rest of the Notebook. This means it will expect an existing dataset uploaded to PostGIS, which, if you have run the previous cells, should be satisfied.''' GeoLife Mobility Dashboard -allows the selection of any user, displaying their DBSCAN clusters and trajectories -based on code above, with a lot of class modification (copied so that it can be run independently)) -for in-depth commenting, see the cells above -can be run independently (along with imports) ''' # import modules # used for: import psycopg2 # sql implementation from sqlalchemy import create_engine # connection to postgis import pandas as pd # tabular data handling import geopandas as gpd # tabular data handling with geometry (spatial) import folium # map creation (build on leaflet) import geoalchemy2 # support for spatial sql import matplotlib.pyplot as plt # plotting tool import numpy as np # used for getting statistics and complex numerical analyses from ipyleaflet import * # ipython leaflet extension from ipywidgets import * # ipython widgets (buttons, etc.) import glob # used for getting path names (for finding the geolife folder) from IPython.display import display # display function for inline plots etc. import pandas as pd # dataframes import datetime # getting datetime from delimited data import os # use local functions and getting system info import psutil # getting RAM of computer for performance recommendations import seaborn as sns # matplotlib, but fancy import random # random number generation from sklearn.cluster import DBSCAN # clustering algorithm from sklearn.neighbors import NearestNeighbors # neigbourhoods used for clustering from kneed import KneeLocator # locating knee point of clustering algorithm DATA_EXISTS = True SHOW_EXCEPTIONS = True TB_RAW = 'geolife_raw_data' TB_CLEAN = 'geolife_ref' ENGINE = create_engine('postgresql://postgres:postgres@localhost/postgres') CON = ENGINE.connect() CRS = '4326' BEIJING = [39.9, 116.41] TB_BEIJING = 'beijing_raw' B1 = 115.779419, 39.626846 B2 = 116.952209, 40.357010 COLOURS = ['green', 'blue', 'orange', 'pink', 'yellow', 'purple', 'red', 'brown', 'gray', 'aqua', 'peru', 'gold', 'crimson', 'olive', 'white'] def runsql(query): try: ENGINE.execute(query) except Exception as exep: if SHOW_EXCEPTIONS: print(exep) class GeoLifeHandler: def __init__(self): # apply numeric values for transport modes to save space self.mode_names = ['walk', 'bike', 'bus', 'car', 'subway','train', 'airplane', 'boat', 'run', 'motorcycle', 'taxi'] self.mode_ids = {s : i + 1 for i, s in enumerate(self.mode_names)} self.progress_bar = widgets.FloatProgress( value=0, min=0, max=10.0, description='Loading:', bar_style='info', style={'bar_color': '#0000FF'}, orientation='horizontal') self.folder = 'undefined' self.geolife_df = [] self.limit = '' # read plt file def read_plt(self, plt_file): points = pd.read_csv(plt_file, skiprows=6, header=None, parse_dates=[[5, 6]], infer_datetime_format=True) points.rename(inplace=True, columns={'5_6': 'time', 0: 'lat', 1: 'lon', 3: 'alt'}) # rename cols points.drop(inplace=True, columns=[2, 4]) # remove unused columns return points # read labels (where applicable) def read_labels(self, labels_file): labels = pd.read_csv(labels_file, skiprows=1, header=None, parse_dates=[[0, 1], [2, 3]], infer_datetime_format=True, delim_whitespace=True) labels.columns = ['start_time', 'end_time', 'label'] labels['label'] = [self.mode_ids[i] for i in labels['label']] # enumerate return labels # apply travel mode labels to points (enumerated to save bits) def apply_labels(self, points, labels): indices = labels['start_time'].searchsorted(points['time'], side='right') - 1 no_label = (indices < 0) | (points['time'].values >= labels['end_time'].iloc[indices].values) points['label'] = labels['label'].iloc[indices].values points['label'][no_label] = 0 # read individual user (folder is preassigned to each user) def read_user(self, user_folder): labels = None plt_files = glob.glob(os.path.join(user_folder, 'Trajectory', '*.plt')) df = pd.concat([self.read_plt(f) for f in plt_files]) labels_file = os.path.join(user_folder, 'labels.txt') if os.path.exists(labels_file): labels = self.read_labels(labels_file) self.apply_labels(df, labels) else: df['label'] = 0 return df # interate through all users (defined by folders in Data folder) def read_all_users(self): subfolders = os.listdir(self.folder) dfs = [] self.progress_bar.max = len(subfolders) display(self.progress_bar) for i, sf in enumerate(subfolders): #print('processing user {} of {}'.format(i + 1, len(subfolders))) self.progress_bar.value = i + 1 df = self.read_user((os.path.join(self.folder,sf))) df['user'] = int(sf) dfs.append(df) print('Load Complete') self.geolife_df = pd.concat(dfs) def set_location(self, file): self.folder = file def get_df(self): return self.geolife_df class FoliumCreator: def __init__(self, zoom=10, size='600px', center=BEIJING, scale_pos='topleft'): self.zoom = zoom self.size = size self.center = center # using American english spelling as that's what leaflet uses, not bc I like it :( self.layout = Layout(height=self.size) self.scale_pos = scale_pos def set_layout(self, layout): self.layout = layout def set_zoom(self, level): self.zoom = level def get_map(self): m = Map(center=self.center, zoom=self.zoom, layout=self.layout, basemap=basemaps.Stamen.Terrain) self.set_control(m, ScaleControl(position=self.scale_pos)) return m def set_control(self, instance, control): instance.add_control(control) def set_layer(self, instance, layer): instance.add_layer(layer) class UserObject: def __init__(self, scanner, panel_plots, folium_map, mapcontrol_object, dashboard_mode=False): self.id = -1 self.trips = {} self.id_count = 182 self.dashboard_mode = dashboard_mode self.scan_object = scanner self.panel_plots = panel_plots self.folium_map = folium_map self.mapper = mapcontrol_object self.bt_submit_user = widgets.Button(description='Select') self.bt_submit_user.on_click(self.submit_user) self.bt_random_user = widgets.Button(description='Randomize') self.bt_random_user.on_click(self.randomize_user) self.first_user_id = self.get_random_id() self.user_options = widgets.Dropdown( value=self.first_user_id, options=self.get_all_ids(), disabled=False, layout={'width': '30%'}) self.user_options.observe(self.user_change, 'value') self.dp_userinfo = widgets.Output(layout={'border': 'solid black 1px', 'max_width': '360px'}) self.clusterbox = widgets.Output(layout={'border': 'solid black 1px', 'max_width': '400px', 'min_width': '400px'}) self.get_user_info(self.first_user_id) self.scan_object.reset(self.gdf, self, self.panel_plots) self.scan_object.show_plot() if self.dashboard_mode: self.bt_submit_user.disabled = True def display(self): display(HBox([HTML('User: '), self.user_options, self.bt_random_user, self.bt_submit_user])) display(HBox([self.dp_userinfo, widgets.Output(layout={'min_width': '75px'}), self.clusterbox])) def submit_user(self, b): b.disabled = True self.bt_random_user.disabled = True self.user_options.disabled = True self.set_id(self.user_options.value) print('User {} selected!'.format(self.get_id())) def randomize_user(self, b): self.user_options.value = self.get_random_id() def user_change(self, value): self.get_user_info(value.new) if self.dashboard_mode: try: self.set_id(value.new) self.scan_object.reset(self.gdf, self, self.panel_plots) self.clean_gdf() self.get_antline() self.scan_object.show_plot() except Exception as e: print('ERROR during processing. Exception:') print(e) def set_id(self, new_id): self.id = new_id def get_id(self): return self.id # currently unused def set_clusterbox(self, content): with self.clusterbox: print(content) def get_all_ids(self): return [i for i in range(0, self.id_count)] def get_random_id(self): return random.randint(0, self.id_count - 1) def get_user_info(self, test_id): self.bt_random_user.description = 'Loading...' self.bt_random_user.disabled = True info = CON.execute('select count(*), count(distinct(label)), min(time), max(time) from {} where user_id = {}' .format(TB_BEIJING, test_id)).first() gdf_simplified = gpd.GeoDataFrame.from_postgis((('SELECT ' 'st_x((st_dumppoints(st_removerepeatedpoints(st_collect(geom), 0.001))).geom) as lat, ' 'st_y((st_dumppoints(st_removerepeatedpoints(st_collect(geom), 0.001))).geom) as lon, ' '(st_dumppoints(st_removerepeatedpoints(st_collect(geom), 0.001))).geom as geom ' 'FROM {} WHERE user_id = {}'.format(TB_BEIJING, test_id))), CON) self.gdf = gdf_simplified self.dp_userinfo.clear_output() with self.dp_userinfo: print('User {} Summary\n--------------------------------------------'.format(test_id)) print(('Data points: {}\nUnique Travel Methods: {}\nData Begin: {}' '\nData End: {}'.format(info[0], info[1], info[2], info[3]))) self.bt_random_user.description = 'Randomize' self.bt_random_user.disabled = False def clean_gdf(self): self.gdf['label'] = self.scan_object.get_labels() self.gdf['time'] = 'datetime' total_count = self.gdf.count()[0] noise_index = self.gdf[self.gdf['label'] == -1].index self.cleaned_gdf = self.gdf self.cleaned_gdf.drop(noise_index, inplace=True) clean_count = self.cleaned_gdf.count()[0] noise_count = total_count - clean_count self.clusterbox.clear_output() with self.clusterbox: print(('Out of {} points, {} were detected as noise ({}%). With noise removed, {} points remain ' 'with {} cluster(s)\n\nNote that cluster colours between plots are not constant') .format(total_count, noise_count, round(noise_count / total_count * 100, 2), clean_count, len(self.gdf['label'].unique()))) def get_antline(self): self.mapper.set_zoom(9) self.folium_map.clear_layers() self.folium_map.add_layer(basemaps.Stamen.Terrain) by_cluster = self.cleaned_gdf.groupby('label') by_cluster = dict(list(by_cluster)) for cluster in by_cluster.keys(): current_gdf = by_cluster[cluster] current_geom_list = [(x, y) for x, y in zip(current_gdf['lon'], current_gdf['lat'])] ant_path = AntPath(locations=current_geom_list, dash_array=[1, 10], delay=3000, color=COLOURS[cluster], pulse_color='black') self.mapper.set_layer(self.folium_map, ant_path) class DBSCANner: def __init__(self): self.backup_knee = 1 def reset(self, gdf, user, sidebar, samples=20): self.samples = samples self.gdf = gdf self.geom_list = [(x, y) for x, y in zip(self.gdf['lat'], self.gdf['lon'])] self.panel = sidebar self.user = user self.panel.clear_output() with self.panel: self.fig, self.axes = plt.subplots(2, 1, figsize=(4, 8)) self.distances = self.get_distances() self.knee = self.get_knee() self.labels = self.get_DBSCAN() self.gdf['cluster'] = self.labels self.plot_knee() self.plot_clusters() def get_distances(self): # get nearest neigbours # Americanized spelling as this is the name of the function, not because I like it try: if (len(self.geom_list) < self.samples + 1): self.samples = int(round(len(self.geom_list) / 2, 0)) nn = NearestNeighbors(n_neighbors=self.samples + 1) neighbors = nn.fit(self.geom_list) distances, indices = neighbors.kneighbors(self.geom_list) distances = np.sort(distances[:, self.samples], axis=0) return distances except Exception as e: print('\nERROR during processing. Exception:') print(e) def get_knee(self): try: # calculate knee point i = np.arange(len(self.distances)) knee = KneeLocator(i, self.distances, S=1, curve='convex', direction='increasing', interp_method='polynomial') # make sure knee returns as number if (isinstance(knee.knee, int)): knee_val = self.backup_knee else: knee_val = knee.knee # save knee point return knee_val # lots of errors/exceptions occur on 1 or 2 users with very low n. handling a single one and returning # is not feasible. return knee point of 1 instead, better than crashing except: return 1 def get_DBSCAN(self): try: # calculate DBSCAN clusters (classify) dbscan = DBSCAN(eps=self.distances[self.knee], min_samples=self.samples).fit(self.geom_list) # get labels of clusters, return them as list labels = dbscan.labels_ return labels except Exception as e: print('\nERROR during processing. Exception:') print(e) def plot_knee(self): with self.panel: try: self.axes[0].set_title('Knee Point of Nearest Neighbours for User {} Mobility'.format(self.user.get_id())) self.axes[0].plot(self.distances) self.axes[0].axvline(x=self.knee, color='gray', linestyle='--') #self.axes[0].text((self.knee, (0.5 * self.axes[0].get_ylim()[0])), # 'knee={}'.format(self.knee)) self.axes[0].set_xlabel('Points') self.axes[0].set_ylabel('Distances') except Exception as e: print('ERROR during processing. Exception:') print(e) def plot_clusters(self): with self.panel: '''scatter_plot = sns.scatterplot(x=self.gdf['lat'], y=self.gdf['lon'], marker="o", ax=self.axes[1], hue=["{cluster}".format(cluster = 'Cluster ' + str(cur_label) if cur_label != -1 else 'Noise') for cur_label in self.gdf['labels']])''' scatter_plot = sns.scatterplot(x=self.gdf['lat'], y=self.gdf['lon'], marker="o", ax=self.axes[1], palette='Set2', hue=self.gdf['cluster'], legend='full') scatter_plot.set(xlabel='Latitude', ylabel='Longitude', title='Clusters in User {} Mobility using DBSCAN\n(where -1 is noise)' .format(self.user.get_id())) def get_labels(self): return self.labels def show_plot(self): with self.panel: plt.tight_layout() plt.show() if not DATA_EXISTS: bt_submit = widgets.Button(description='Select') file_options = widgets.Dropdown( options=[(os.path.join(os.getcwd(), 'geolife_trajectories\Data')), (os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop\Data')), (os.path.join(os.path.join(os.environ['USERPROFILE']), 'Downloads\Data'))], disabled=False, layout={'width': '70%'}) display(VBox([HTML('

GeoLife File Location

'), HBox([file_options, bt_submit])])) def bt_submitevent(b): if os.path.exists(file_options.value): b.disabled = True handler.set_location(file_options.value) handler.read_all_users() else: print('Data file not found. Please try again...') handler = GeoLifeHandler() bt_submit.on_click(bt_submitevent) try: geolife_raw = handler.get_df() display(geolife_raw.count()) display(geolife_raw.head(10)) geolife_raw = geolife_raw.head(10) except: print('Data either not yet extracted, or already available on server') uploader = PostGISUploader() uploader.upload(geolife_raw) mapcontroller = FoliumCreator() map_main = mapcontroller.get_map() panel_plots = widgets.Output() panel_top = HBox([map_main, panel_plots]) display(panel_top) scanner = DBSCANner() user = UserObject(scanner, panel_plots, map_main, mapcontroller, dashboard_mode=True) #scanner.reset(gdf_simple, user, panel_plots) user.display() # TO DO # -get first user working # -fix issues # -get time column working # -sync colours of cluster plot and mapSentiment analysis avec Textblob-FR Documentation: https://textblob.readthedocs.io/en/dev/ Importsimport sys from textblob import Blobber from textblob_fr import PatternTagger, PatternAnalyzerFonctiontb = Blobber(pos_tagger=PatternTagger(), analyzer=PatternAnalyzer()) def get_sentiment(input_text): blob = tb(input_text) polarity, subjectivity = blob.sentiment polarity_perc = f"{100*abs(polarity):.0f}" subjectivity_perc = f"{100*subjectivity:.0f}" if polarity > 0: polarity_str = f"{polarity_perc}% positive" elif polarity < 0: polarity_str = f"{polarity_perc}% negative" else: polarity_str = "neutral" if subjectivity > 0: subjectivity_str = f"{subjectivity}% subjective" else: subjectivity_str = "perfectly objective" print(f"This text is {polarity_str} and {subjectivity_str}.")Analyser le sentiment d'une phraseget_sentiment("Ce conseil municipal est vraiment super intéressant.") get_sentiment("Cette phrase est négative et je ne suis pas content !")Creating long dataframes from *puffy* tablesThis tutorial will show the basic features of using the `puffbird.puffy_to_long` methodimport numpy as np import pandas as pd import puffbird as pbA weirdly complex table First, we will create a *puffy* dataframe as an example:df = pd.DataFrame({ # a and c have the same data repeated three times # b is just a bunch of numpy arrays of the same shapes # d is also just a bunch of numpy arrays of different shapes # e contains various pandas DataFrames with the same column structures # and the same index format. # f contains various pandas DataFrames with different structures # g contains mixed data types # missing data is also included 'a': [ 'aa', 'bb', 'cc', 'dd', 'aa', 'bb', 'cc', 'dd', 'aa', 'bb', 'cc', 'dd' ], 'b': [ np.random.random((10, 5)), np.nan, np.random.random((10, 5)), np.random.random((10, 5)), np.random.random((10, 5)), np.random.random((10, 5)), np.random.random((10, 5)), np.random.random((10, 5)), np.random.random((10, 5)), np.random.random((10, 5)), np.random.random((10, 5)), np.random.random((10, 5)) ], 'c': [ {'dicta':[1,2,3], 'dictb':3, 'dictc':{'key1':1, 'key2':2}}, {'dicta':[52,3], 'dictb':[3,4], 'dictc':{'key4':1, 'key2':2}}, {'dicta':[12,67], 'dictb':(4,5), 'dictc':{'key3':1, 'key2':77}}, {'dicta':[1,23], 'dictb':3, 'dictc':{'key1':55, 'key2':33}}, {'dicta':123, 'dictb':'words', 'dictc':{'key1':4, 'key2':2}}, {'dicta':[1,2,3], 'dictb':3, 'dictc':{'key1':1, 'key2':2}}, {'dicta':[52,3], 'dictb':[3,4], 'dictc':{'key4':1, 'key2':2}}, {'dicta':[12,67], 'dictb':(4,5), 'dictc':{'key3':1, 'key2':77}}, {'dicta':[1,23], 'dictb':3, 'dictc':{'key1':55, 'key2':33}}, {'dicta':123, 'dictb':'words', 'dictc':{'key1':4, 'key2':2}}, {'dicta':[1,2,3], 'dictb':3, 'dictc':{'key1':1, 'key2':2}}, {'dicta':[52,3], 'dictb':[3,4], 'dictc':{'key4':1, 'key2':2}}, ], 'd': [ np.random.random((16, 5)), np.random.random((18, 5)), np.random.random((19, 5)), np.random.random((11, 5)), np.random.random((12, 5)), np.random.random((14, 5)), np.random.random((17, 5)), np.random.random((110, 5)), None, np.random.random((2, 5)), np.random.random((4, 5)), np.random.random((7, 5)) ], 'e': [ pd.DataFrame( {'c1':[1,2,3], 'c2':[1,2,3]}, index=pd.MultiIndex.from_arrays( [['a', 'b', 'c'], ['a', 'b', 'c']], names=['a', 'b'] ) ), pd.DataFrame( {'c1':[1,2,3,4], 'c2':[1,2,3,4]}, index=pd.MultiIndex.from_arrays( [['a', 'b', 'c', 'd'], ['a', 'b', 'c', 'd']], names=['a', 'b'] ) ), pd.DataFrame( {'c1':[3,4,3], 'c2':[3,5,3]}, index=pd.MultiIndex.from_arrays( [['a', 'b', 'c'], ['a', 'b', 'c']], names=['a', 'b'] ) ), np.nan, pd.DataFrame( {'c1':[1,2,3], 'c2':[1,2,3]}, index=pd.MultiIndex.from_arrays( [['a', 'b', 'c'], ['a', 'b', 'c']], names=['a', 'b'] ) ), pd.DataFrame( {'c1':[1,2,3,4], 'c2':[1,2,3,4]}, index=pd.MultiIndex.from_arrays( [['a', 'b', 'c', 'd'], ['a', 'b', 'c', 'd']], names=['a', 'b'] ) ), pd.DataFrame( {'c1':[3,4,3], 'c2':[3,5,3]}, index=pd.MultiIndex.from_arrays( [['a', 'b', 'c'], ['a', 'b', 'c']], names=['a', 'b'] ) ), np.nan, pd.DataFrame( {'c1':[1,2,3], 'c2':[1,2,3]}, index=pd.MultiIndex.from_arrays( [['a', 'b', 'c'], ['a', 'b', 'c']], names=['a', 'b'] ) ), pd.DataFrame( {'c1':[1,2,3,4], 'c2':[1,2,3,4], 'c3':[1,2,3,4]}, index=pd.MultiIndex.from_arrays( [['a', 'b', 'c', 'd'], ['a', 'b', 'c', 'd']], names=['a', 'b'] ) ), pd.DataFrame( {'c1':[3,4,3], 'c2':[3,5,3]}, index=pd.MultiIndex.from_arrays( [['a', 'b', 'c'], ['a', 'b', 'c']], names=['a', 'b'] ) ), np.nan, ], 'f': [ pd.DataFrame( {'f1':[1,2,3], 'hh2':[1,2,3]}, index=pd.MultiIndex.from_arrays( [['a', 'b', 'c'], ['a', 'b', 'c'], ['f', 'f', 'f']], names=['f', 'b', 'e'] ) ), pd.DataFrame( {'hh1':[1,2,3,4], 'qq2':[1,2,3,4]}, index=pd.MultiIndex.from_arrays( [['a', 'b', 'c', 'd'], ['a', 'b', 'c', 'd']], names=['a', 'b'] ) ), pd.DataFrame( {'q1':[3,4,3], 'qq2':[3,5,3], 'c3':[1,2,3], 'c4':[1,2,3]}, index=pd.MultiIndex.from_arrays( [['a', 'b', 'c'], ['a', 'b', 'c'], ['t', 't', 't']], names=['y', 'll', 'tt'] ) ), np.nan, pd.DataFrame( {'qq1':[1,2,3], 'rr2':[1,2,3]}, index=pd.MultiIndex.from_arrays( [['a', 'b', 'c'], ['a', 'b', 'c']], names=['a', 'b'] ) ), pd.DataFrame( [[1,2,3,4], [1,2,3,4]], columns=pd.MultiIndex.from_arrays( [['a', 'b', 'c', 'd'], ['a', 'b', 'c', 'd']], names=['rr', 'b'] ), index=pd.MultiIndex.from_arrays( [[(1,2), (2,3)], ['a', 'b']], names=['a', 'b'] ) ), pd.DataFrame( {'cpp1':[3,4,3], 'c2':[3,5,3]}, index=pd.MultiIndex.from_arrays( [['a', 'b', 'c'], ['a', 'b', 'c']], names=['a', 'rr'] ) ), np.nan, pd.DataFrame( {'sr1':[1,2,3,4], 'c2':[1,2,3,4]}, index=pd.MultiIndex.from_arrays( [['a', 'b', 'c', 'd'], ['a', 'b', 'c', 'd']], names=['a', 'b'] ) ), pd.DataFrame( {'cpp1':[3,4,3], 'c2':[3,5,3]}, index=pd.MultiIndex.from_arrays( [['a', 'b', 'c'], ['a', 'b', 'c']], names=['a', 'b'] ) ), pd.DataFrame( {'c1':[3,4,3], 'c2':[3,5,3]}, index=pd.MultiIndex.from_arrays( [['a', 'b', 'c'], ['a', 'b', 'c']], names=['mm', 'b'] ) ), np.nan, ], 'g': [ 'a', 'b', {'ff':'gg'}, {'a', 'b', 'c'}, ('r',), pd.Series({'a':'b'}), 'a', 'b', 1, 2, 3, 4 ] }) dfSo this dataframe is quite daunting, I like to call it a *puffy* table. *Exploding* the data out that are in *puffy* tablesNow with `puffy_to_long` you can easily unravel this dataframe. Since this dataframe is weirdly constructed `puffy_to_long` may take a while:long_df = pb.puffy_to_long(df) long_df.head()Now we have a dataframe with only hashable elements. `puffy_to_long` iteratively exploded all cells and treated each column individually. For example, if a cell contains a numpy array that is two-dimensional then the column will be exploded twice and two new columns will be added that are called *\[COLUMN_NAME\]_level0* and *\[COLUMN_NAME\]_level1*. For our column `b`, we get two new columns called `b_level0` and `b_level1`. These levels will contain the index corresponding to the data point in the long-format of column `b`. Let's try `puffy_to_long` again but just on column `b`. *Exploding* numpy.array-containing columnslong_df = pb.puffy_to_long(df, 'b') long_df.head()`index_level0` is the previous index from our dataframe. If this were a `pandas.MultiIndex`, we would have multiple columns instead of just one that corresponds to the old index of the dataframe. Now, we could take this normal dataframe objects and perform various operations that we would normally want to perform, e.g.:long_df.groupby('b_level0')['b'].mean()Let's say we want to *explode* both column `b` and column `d` that both contain numpy arrays. Imagine that we want to align the axis 1 of the data in `b` and `d`, and call this axes `aligned_axis`, we can do this with `puffy_to_long` keyword arguments:long_df = pb.puffy_to_long(df, 'b', 'd', aligned_axis={'b':1, 'd':1}) long_df.head()So now, axis 1 of `b` and `d` column data are aligned and those indices are defined in the `aligned_axis` column. Since both columns contain missing cells, some `index_level0` values have missing `b` and `b_level0` columns or missing `d_level0` and `d` columns. You can view these easily using standard pandas functionality:long_df.loc[long_df['b_level0'].isnull()].head() long_df.loc[long_df['d_level0'].isnull()].head()*Exploding* pandas.DataFrame-containing columns Let's take a look at how `pandas.DataFrame` objects are handled within `puffy_to_long` by taking a look at column `e`:long_df = pb.puffy_to_long(df, 'e') long_df.head()`pandas.DataFrame` objects are handled within one *explosion* iteration, unless the cell within that dataframe are non-hashable. This is why all new columns contain `level0`. The first two new columns `e_level0_a` and `e_level0_b` correspond to the `pandas.MultiIndex` index defined in all dataframes within this column. `e_level0_2` corresponds to all the columns names of the dataframe. `e` only contains the data within each cell of each dataframe. Let's say we don't want to unravel our columns in this way but instead just concatenate them all together. We can use the `expand_cols` argument for this, which expects a list of column names that contain only `pandas.DataFrame` or `pandas.Series` objects:long_df = pb.puffy_to_long(df, 'e', expand_cols=['e']) long_df.head()Here we preserved the columns of each dataframe in each cell and simply concatenated the dataframes together with the `index_level0` information preserved. What if we use this method while also *exploding* column `a`, since this has the same column name as the column in the dataframes within column `e`:long_df = pb.puffy_to_long(df, 'a', 'e', expand_cols=['e']) long_df.head()Since the column `a` already existed, the column `a` within each dataframe within column `e` was renamed to `a_e`. Of course, similarly, this is handled with column `b`:long_df = pb.puffy_to_long(df, 'b', 'e', expand_cols=['e']) long_df.head()Less structured dataframe-containing columns will result in more complex long-format dataframes:long_df = pb.puffy_to_long(df, 'f') long_df.head() long_df = pb.puffy_to_long(df, 'f', expand_cols=['f']) long_df.head()*Exploding* dictionaries The column `c` contains dictionaries with various data types that are in them. The `puffy_to_long` algorithm iteratively *explodes* all objects within the dictionaries:long_df = pb.puffy_to_long(df, 'c') long_df.head()import urllib.request BCurl='https://github.com/marongkang/datasets/raw/main/BCHAIN-MKPRU.csv' Gurl='https://github.com/marongkang/datasets/raw/main/LBMA-GOLD.csv' BCrps=urllib.request.urlopen(BCurl) Grps=urllib.request.urlopen(Gurl) BChtml=BCrps.read() Ghtml=Grps.read() f1=open('BCHAIN-MKPRU.csv','wb') f1.write(BChtml) f2=open('LBMA-GOLD.csv','wb') f2.write(Ghtml) import pandas as pd import matplotlib.pyplot as plt from matplotlib.pyplot import MultipleLocator import numpy as np BCdf=pd.read_csv('BCHAIN-MKPRU.csv') Gdf=pd.read_csv('LBMA-GOLD.csv') BCdf['Date']=pd.to_datetime(BCdf['Date']) Gdf['Date']=pd.to_datetime(Gdf['Date']) Gdf['Return']=0数据预处理**缺失值分析** **KNN**import sklearn from sklearn.impute import KNNImputer plt.plot(Gdf['Date'],Gdf['USD (PM)']) #效果不佳 ''' imputer = KNNImputer(n_neighbors=30, weights='distance' ) imputed = imputer.fit_transform(Gdf[['Return','USD (PM)']]) df_imputed = pd.DataFrame(imputed, columns=Gdf[['Return','USD (PM)']].columns) Gdf[['Return','USD (PM)']]=df_imputed ''' plt.plot(Gdf['Date'],Gdf['USD (PM)'])**dropna**Gdf=Gdf.dropna() Gdf=Gdf.reset_index()计算关键指标BCret=[0] for i in range(1,len(BCdf)): BCret.append((BCdf['Value'][i]-BCdf['Value'][i-1])/BCdf['Value'][i-1]) Gret=[0] for i in range(1,len(Gdf)): Gret.append((Gdf['USD (PM)'][i]-Gdf['USD (PM)'][i-1])/Gdf['USD (PM)'][i-1]) BCdf['Return']=BCret Gdf['Return']=Gret|index|Value|Return|Var||---|---|---|---||count|1826\.0|1826\.0|1826\.0||mean|12206\.068281468402|0\.0032333670412730536|0\.0016046231673695472||std|14043\.891626844317|0\.041475857776247516|0\.0004943415076093683||min|594\.08|-0\.39140443386063384|0\.0||25%|3994\.9825|-0\.012508367874880424|0\.0016626569540600347||50%|7924\.46|0\.001438775113487313|0\.001724851684370721||75%|11084\.73|0\.0190618988914242|0\.0018092466918252334||max|63554\.44|0\.21866893459705275|0\.002272227021372883|#计算收益等级 Glabel=[] for i in range(len(Gdf)): if Gdf['Return'][i]>= 0.004494: Glabel.append(0) elif Gdf['Return'][i]>=0.000203: Glabel.append(1) elif Gdf['Return'][i]>=-0.004267: Glabel.append(2) else: Glabel.append(3) Gdf['Label']=Glabel #计算收益等级 BClabel=[] for i in range(len(BCdf)): if BCdf['Return'][i]>= 0.019062: BClabel.append(0) elif BCdf['Return'][i]>=0.001439: BClabel.append(1) elif BCdf['Return'][i]>=-0.012508: BClabel.append(2) else: BClabel.append(3) BCdf['Label']=BClabel**指数加权移动平均(Exponentially Weighted Average)**#args beta=0.9 def EW_avg(data): res=[0]*len(data) for i in range(1,len(data)): res[i]=beta*res[i-1]+(1-beta)*data[i] res[i]=res[i]/(1-beta**(np.exp(i))) return res BCdf['Avg_ret']=EW_avg(BCdf['Return']) plt.figure(dpi=200) plt.plot(BCdf['Date'],BCdf['Return'],label='Return') plt.plot(BCdf['Date'],BCdf['Avg_ret'],label='EW_avg') plt.show() Gdf['Avg_ret']=EW_avg(Gdf['Return']) plt.figure(dpi=200) plt.plot(Gdf['Date'],Gdf['Return'],label='Return') plt.plot(Gdf['Date'],Gdf['Avg_ret'],label='EW_avg') plt.show()/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:5: RuntimeWarning: overflow encountered in exp """合理性解释y=np.random.rand(30) plt.figure(dpi=200) plt.plot(range(30),y,label='Return') plt.plot(range(30),EW_avg(y),label='EW_avg') plt.show()Torchimport torch import torch.nn as nn import torch.optim as optim from torch.utils.data import DataLoader from torch.utils.data.dataset import random_split import torch.utils.data as data from sklearn.preprocessing import MinMaxScalerLSTM#ML Parameters lr = 1E-4 epochs = 100 batch_size = 20 scaler = MinMaxScaler() device = 'cuda' if torch.cuda.is_available() else 'cpu'**构建数据集**class StockDataset(data.Dataset): def __init__(self,input): self.df=input self.orig_dataset = self.df[['Value','Return']].to_numpy() self.TLabel=self.df[['Label']].to_numpy() self.normalized_dataset = np.copy(self.orig_dataset) #self.normalized_dataset = self.normalized_dataset.reshape(-1, 1) self.normalized_dataset = scaler.fit_transform(self.normalized_dataset) #self.normalized_dataset = self.normalized_dataset.reshape(-1) self.sample_len = 20 def __len__(self): if len(self.orig_dataset) > self.sample_len: return len(self.orig_dataset) - self.sample_len else: return 0 def __getitem__(self, idx): target = self.normalized_dataset[idx+self.sample_len,0] i = self.normalized_dataset[idx:(idx+self.sample_len),0] #i = i.reshape((-1, 1)) i = torch.from_numpy(i) i=i.reshape((-1,1)) target = torch.Tensor([target]) i=i.double() target=target.double() return i, target # Load dataset dataset = StockDataset(BCdf[['Value','Return','Label']]) # Split training and validation set train_len = int(0.7*len(dataset)) valid_len = len(dataset) - train_len TrainData, ValidationData = random_split(dataset,[train_len, valid_len]) # Load into Iterator (each time get one batch) train_loader = data.DataLoader(TrainData, batch_size=batch_size, shuffle=True, num_workers=2) test_loader = data.DataLoader(ValidationData, batch_size=batch_size, shuffle=True, num_workers=2) # Print statistics print("Total: ", len(dataset)) print("Training Set: ", len(TrainData)) print("Validation Set: ", len(ValidationData)) dataset.normalized_dataset for i,j in train_loader: #print(i,j) continue class TempLSTM(nn.Module): def __init__(self): # Required in PyTorch Model super(TempLSTM, self).__init__() # Parameters self.feature_dim = 1 self.hidden_dim = 500 self.num_layers = 3 self.output_dim = 1 # Neural Networks self.lstm = nn.LSTM(self.feature_dim, self.hidden_dim, self.num_layers, dropout=0.1, batch_first=True) if torch.cuda.is_available(): self.lstm.cuda() self.fc = nn.Linear(self.hidden_dim , self.output_dim) def forward(self, i): h0 = torch.randn([self.num_layers, i.shape[0], self.hidden_dim], dtype=torch.double, device=device) #.requires_grad_() c0 = torch.randn([self.num_layers, i.shape[0], self.hidden_dim], dtype=torch.double, device=device) #.requires_grad_() # Forward propagate LSTM out, _ = self.lstm.forward(i, (h0.detach(), c0.detach())) # output shape (batch, sequence, hidden_dim) Lout = self.fc(out[:, -1, :]) return Lout # Define model model = TempLSTM() model = model.double() #model = Model(1) print(model) # Load into GPU if necessary if torch.cuda.is_available(): model = model.cuda() # Define loss function criterion = nn.MSELoss() # Define optimization strategy optimizer = torch.optim.Adam(model.parameters(), lr=lr) def train(model, iterator, optimizer, criterion, device): model.train() # Enter Train Mode train_loss =0 for _, (ii, targets) in enumerate(iterator): # move to GPU if necessary if torch.cuda.is_available(): ii, targets = ii.cuda(), targets.cuda() # generate prediction #print(ii) output = model(ii) #print(output) # calculate loss loss = criterion(output, targets) # compute gradients and update weights optimizer.zero_grad() loss.backward() optimizer.step() # record training losses train_loss+=loss.item() # print completed result #print('train_loss: %f' % (train_loss)) return train_loss def test(model, iterator, criterion, device): model.eval() # Enter Evaluation Mode test_loss =0 with torch.no_grad(): for _, (ii, targets) in enumerate(iterator): # move to GPU if necessary ii, targets = ii.to(device), targets.to(device) # generate prediction output = model(ii) #print(output) # calculate loss loss = criterion(output,targets) # record training losses test_loss+=loss.item() # print completed result #print('test_loss: %s' % (test_loss)) return test_loss def predict(model,data): if torch.cuda.is_available(): data=data.cuda() model.eval() # Enter Evaluation Mode with torch.no_grad(): pred = model(data) return pred !pip install git+https://github.com/d2l-ai/d2l-zh@release from d2l import torch as d2l from IPython import display % matplotlib inline class Animator: def __init__(self, xlabel=None, ylabel=None, legend=None, xlim=None, ylim=None, xscale='linear', yscale='linear', fmts=('-', 'm--', 'g-.', 'r:'), nrows=1, ncols=1, figsize=(3.5, 2.5)): if legend is None: legend=[] d2l.use_svg_display() self.fig, self.axes = d2l.plt.subplots(nrows, ncols, figsize=figsize) if nrows * ncols == 1: self.axes = [self.axes, ] self.config_axes = lambda: d2l.set_axes( self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend) self.X, self.Y, self.fmts = None, None, fmts def add(self, x, y): # 向图表中添加多个数据点 if not hasattr(y, "__len__"): y = [y] n = len(y) if not hasattr(x, "__len__"): x = [x] * n if not self.X: self.X = [[] for _ in range(n)] if not self.Y: self.Y = [[] for _ in range(n)] for i, (a, b) in enumerate(zip(x, y)): if a is not None and b is not None: self.X[i].append(a) self.Y[i].append(b) self.axes[0].cla() for x, y, fmt in zip(self.X, self.Y, self.fmts): self.axes[0].plot(x, y, fmt) self.config_axes() display.display(self.fig) display.clear_output(wait=True) if torch.cuda.is_available(): model=model.cuda() #animator animator=Animator(xlabel='epoch', xlim=[1, epochs], legend=['train loss','test_loss']) for epoch in range(epochs): train_loss=train(model, train_loader, optimizer, criterion, device) test_loss=test(model, test_loader, criterion, device) animator.add(epoch+1,(train_loss,test_loss)) for _, (ii, targets) in enumerate(test_loader): pred=predict(model,device,ii) print(pred,targets) #torch.save(model,'/content/drive/MyDrive/Models/LSMT.pt') model=torch.load('/content/drive/MyDrive/Models/LSMT.pt',map_location='cpu')交易模拟 **股价预测**Gdf.count() scaler.inverse_transform([[predict(model,torch.Tensor([scaler.fit_transform(BCdf[['Value']])[0:20]]).double()).item()]])[0][0] pred=[] for i in range(len(BCdf)-20): pred.append(scaler.inverse_transform([[predict(model,torch.Tensor([scaler.fit_transform(BCdf[['Value']])[i:i+20]]).double()).item()]])[0][0]) plt.figure(dpi=200) plt.plot(BCdf['Date'],BCdf['Value'],label='bitcoin_price') plt.plot(BCdf['Date'][20:],pred,label='predict') plt.legend(loc = 'upper left') plt.show() BCdf['Pred']=0 BCdf['Pred'][20:]=pred Gscaler=MinMaxScaler() Gscaler.fit_transform(Gdf[['USD (PM)']]) Gpred=[] for i in range(len(Gdf)-20): Gpred.append(Gscaler.inverse_transform([[predict(model,torch.Tensor([Gscaler.fit_transform(Gdf[['USD (PM)']])[i:i+20]]).double()).item()]])[0][0]) plt.figure(dpi=200) plt.plot(Gdf['Date'],Gdf['USD (PM)'],label='gold_price') plt.plot(Gdf['Date'][20:],Gpred,label='predict') plt.legend(loc = 'upper left') plt.show() Gdf['Pred']=0 Gdf['Pred'][20:]=Gpred Pred_ret=[] for i in range(20,len(BCdf)): Pred_ret.append((BCdf['Pred'][i]-BCdf['Value'][i])/BCdf['Value'][i]) BCdf['pred_ret']=0 BCdf['pred_ret'][20:]=Pred_ret Pred_ret=[] for i in range(20,len(Gdf)): Pred_ret.append((Gdf['Pred'][i]-Gdf['USD (PM)'][i])/Gdf['USD (PM)'][i]) Gdf['pred_ret']=0 Gdf['pred_ret'][20:]=Pred_ret plt.figure(dpi=150) plt.plot(BCdf['Date'],BCdf['pred_ret']) plt.figure(dpi=150) plt.plot(Gdf['Date'],Gdf['pred_ret']) plt.plot(Gdf['Date'],Gdf['Return'])**计算均方差和协方差**xy=[] j=0 for i in range(len(BCdf)): xy.append(BCdf['Return'][i]*Gdf['Return'][j]) if BCdf['Date'][i]>=Gdf['Date'][j]: j=j+1 BCdf['xy']=xy BCdf['xy_avg']=EW_avg(BCdf['xy']) cov=[] j=0 for i in range(len(BCdf)): cov.append(BCdf['xy_avg'][i]-BCdf['Avg_ret'][i]*Gdf['Avg_ret'][j]) if BCdf['Date'][i]>=Gdf['Date'][j]: j=j+1 BCdf['cov']=cov def squareX_EX(df): return (df['Return']-df['Avg_ret'])**2Exponentially weighted averages$X_t=\beta X_{t-1}+(1-\beta)R_t$ $V_t=\beta V_{t-1}+(1-\beta)(R_t-X_t)^2$where:* X_t|第t天的指数加权平均收益率* R_t|第t天的实际收益率* V_t|第t天的偏差值BCdf['squareX_EX']=squareX_EX(BCdf) BCdf['EW_var']=EW_avg(BCdf['squareX_EX']) Gdf['squareX_EX']=squareX_EX(Gdf) Gdf['EW_var']=EW_avg(Gdf['squareX_EX']) BCdf #args epochs=len(BCdf)**组合投资策略** $R_t=\frac{T_t}{T_{t-1}}-1$def portfolio(i,j,W,beta=0.3,BCFrate=0.02,GFrate=0.01): pR=[] pV=[] w_0=np.array(W[1:])*100 mR,mV,alc=-100,-100,W[1] ''' 投资方案为20%—80% 变化幅度为5% ''' for w in range(20,80,1): c=abs(w-w_0[0])*BCFrate+(1-w-w_0[1])*GFrate portfolioR=w*BCdf['Avg_ret'][i]+(100-w)*Gdf['Avg_ret'][j]-c portfolioV=w*BCdf['EW_var'][i]+(100-w)*Gdf['EW_var'][j]+w*(100-w)*BCdf['cov'][i] if portfolioV < beta: if mR$\mu=E(R)=\sum w_i(X_i-S_i)$ $\sigma^2=Var(R)=\sum\sum w_iw_jcov(X_i,X_j)$where:* $\mu|组合收益率$* $\sigma^2|组合风险率$* $w_i|第i项的投资额$* $X_i|第i项的收益率$* $S_i|第i项的交易费率$pR,pV,eP=portfolio(900,1000,[0,0.5,0.5]) plt.figure(dpi=100) plt.scatter(pV,pR,label='allocation_line') plt.scatter(eP[1],eP[0],label='efficient_point') plt.xlabel('Risk') plt.ylabel('Expected_return') plt.legend() eP BCdf.describe() def simulate(epoch,tradeGap,beta=0.3,BCFrate=0.02,GFrate=0.01): j=0 state=[[1000,0,0]] transactionFee=[] W=[] w=[1,0,0] for i in range(1,epoch): #print(Gdf['Date'][j],BCdf['Date'][i]) #计算总资产 total=state[i-1][0]+state[i-1][1]*(1+BCdf['Return'][i])+state[i-1][2]*(1+Gdf['Return'][j]) #分配策略 if i>20 and i%tradeGap==0: pR,pV,eP=portfolio(i,j,w,beta) #配比(和为1) if eP[2]!=0: w=[0,eP[2],1-eP[2]] else: w=[1,0,0] cash=w[0]*total bc=w[1]*total gold=w[2]*total #交易费 bcFee=abs(bc-state[i-1][1])*BCFrate goldFee=abs(gold-state[i-1][2])*GFrate transactionFee.append(bcFee+goldFee) #结余资产 total=total-bcFee-goldFee bc=total*w[1] gold=w[2]*total cash=total-bc-gold state.append([cash,bc,gold]) W.append(w) #保持日期同步 if BCdf['Date'][i]>=Gdf['Date'][j]: j=j+1 return transactionFee,W,state transactionFee,W,state=simulate(epochs,29,0.49,0.04,0.02)200%|5749.531074105381100%|4271.741445766016 50%|2619.119064059438 | |transactionCost|Value||---|---|---||200%|5749.53|11648.55||100%|4271.74|20320.91||50%|2619.12|26812.17||10%|618.24|33452.79|sum(transactionFee) state=np.array(state) len(state) asset=pd.DataFrame() asset['Date']=BCdf['Date'] asset['Property']=state.sum(axis=1) profit=[0] for i in range(len(asset)-1): profit.append((asset['Property'][i+1]-asset['Property'][i])/asset['Property'][i]) asset['profit_rate']=profit asset=asset.set_index('Date') def maximum_drawdown(asset): return (asset['Property'][asset['Property'].idxmax()]-asset['Property'][asset['Property'][asset['Property'].idxmax():].idxmin()])/asset['Property'][asset['Property'].idxmax()] def std(asset): return asset['profit_rate'].std() #def sharp(asset): # (asset['Property'][len(asset)-1]-asset['Property'][0])/asset['Property'][0]/asset['Property'] df2017=pd.DataFrame(asset[['Property','profit_rate']]['2017']) df2018=pd.DataFrame(asset[['Property','profit_rate']]['2018']) df2019=pd.DataFrame(asset[['Property','profit_rate']]['2019']) df2020=pd.DataFrame(asset[['Property','profit_rate']]['2020']) df2021=pd.DataFrame(asset[['Property','profit_rate']]['2021']) df2017 x=[] x.append(maximum_drawdown(df2017)) x.append(maximum_drawdown(df2018)) x.append(maximum_drawdown(df2019)) x.append(maximum_drawdown(df2020)) x.append(maximum_drawdown(df2021)) x|Date|maximum_drawdown||---|---||2017|23.9%||2018|50.7%||2019|29.6%||2020|0%||2021|33.3%plt.plot(range(2017,2022,1),x) year_std=[] year_std.append(std(df2017)) year_std.append(std(df2018)) year_std.append(std(df2019)) year_std.append(std(df2020)) year_std.append(std(df2021)) year_std plt.plot(range(2017,2022,1),year_std)|Date|std||---|---||2017|3.46%||2018|1.47%||2019|1.95%||2020|2.29%||2021|2.76%colors = ['#ff9999','#9999ff','#cc1234'] plt.figure(dpi=150,figsize=(10, 3)) plt.stackplot(BCdf['Date'], state[:,0],state[:,1],state[:,2], # 可变参数,接受多个y labels = ['cash','bitcoin','gold'], # 定义各区块面积的含义 #colors = colors # 设置各区块的填充色 ) ax=plt.gca() #ax.xaxis.set_major_locator(MultipleLocator(90)) #ax.xaxis.set_tick_params(rotation=90) plt.xlabel('Date') plt.ylabel('Total Value') plt.legend(loc = 'upper left')$\frac{收益}{本金}^{-\frac{持有天数}{365}}$res=pd.DataFrame() res['Date']=BCdf['Date'] res['Property']=state.sum(axis=1) profit=[0] for i in range(len(res)-1): profit.append((res['Property'][i+1]-res['Property'][i])/res['Property'][i]) res['profit_rate']=profit res['profit_rate'].std() res['Date'][1000]-res['Date'][1] pred_acc=[] for i in range(20,len(Gdf)-1): pred_acc.append(Gdf['pred_ret'][i]*Gdf['Return'][i+1]) pred_acc=np.array(pred_acc) len(pred_acc[pred_acc>=0])/len(pred_acc) def test(epochs,gap,beta,BCFrate=0.02,GFrate=0.01): transactionFee,W,state=simulate(epochs,gap,beta,BCFrate,GFrate) state=np.array(state) asset=pd.DataFrame() asset['Date']=BCdf['Date'] asset['Property']=state.sum(axis=1) profit=[0] for i in range(len(asset)-1): profit.append((asset['Property'][i+1]-asset['Property'][i])/asset['Property'][i]) asset['profit_rate']=profit return asset['profit_rate'].std(),sum(transactionFee),state.sum(axis=1)[len(state)-1],(asset['Property'][asset['Property'].idxmax()]-asset['Property'][asset['Property'][asset['Property'].idxmax():].idxmin()])/asset['Property'][asset['Property'].idxmax()] cycle_test_inf=[] for i in range(1,35,2): profit_rate_std,transactionFee,property_sum,maximum_drawdown=test(len(BCdf),i,0.49) cycle_test_inf.append([i,profit_rate_std,transactionFee,property_sum,maximum_drawdown]) cycle_test_inf=np.array(cycle_test_inf) #plt.plot(range(1,35,2),test_inf[:,3],label='Value') fig = plt.figure(dpi=100) ax1 = fig.add_subplot() ax1.plot(range(1,35,2),cycle_test_inf[:,3],label='Value') ax1.set_xlabel('Trading Cycle(days)') ax1.set_ylabel('Total Value') ax1.legend() #ax2 = ax1.twinx() #ax2.plot(range(1,35,2),cycle_test_inf[:,4],label='Maximum Drawdowm',c='r') #x2.set_ylabel('Maximum Drawdowm') #fig.legend(loc='upper left') #ax2.legend(loc='upper left') test_inf=[] for i in range(1,100,1): profit_rate_std,transactionFee,property_sum,maximum_drawdown=test(len(BCdf),29,i/100) test_inf.append([i,profit_rate_std,transactionFee,property_sum,maximum_drawdown]) test_inf=np.array(test_inf) plt.figure(dpi=100) plt.plot(range(1,100,1),test_inf[:,3],label='Value') plt.xlabel('Risk threshold(%)') plt.ylabel('Total Value') plt.legend(loc = 'upper left') fig = plt.figure(dpi=100) ax1 = fig.add_subplot() ax1.plot(range(1,100,1),test_inf[:,3],label='Value') ax1.set_xlabel('Risk threshold(%)') ax1.set_ylabel('Total Value') ax2 = ax1.twinx() ax2.plot(range(1,100,1),test_inf[:,4],label='Maximum Drawdowm',c='r') ax2.set_ylabel('Maximum Drawdowm') ax1.legend() ax2.legend() profit_rate_std,transactionFee,property_sum,maximum_drawdown=test(len(BCdf),29,0.49) property_sum$w_t=第t天的投资配置比例|∈\mathbb{R}^{3X1}$ $T_t=第t天的价格$ $R_t=第t天的收益率$ $\mu =投资组合收益$ $σ^2 =投资组合的风险$ 在传统的组合投资策略中,一般使用平均收益和均方差来分布衡量一支股票的收益和风险.在本题中,我们通过投资组合来对每一个周期的的投资进行决策,而平均收益和均方差并不能有效的判断该股票在一段时期内的情况,所以,我们选择使用指数加权法进行计算 公式 其中$\beta$表示权重下降的速率,该公式计算结果类似于最近的$\frac{1}{1-\beta}组数据的均值和均方差$Example 1: Basics Begin by importing AutoMPC.import autompc as ampc import numpy as npLoading AutoMPC... Finished loading AutoMPCSystemsLet's begin by showing how to define a System. In AutoMPC, a System defines the variables of control and observation for a particular robot. Here we define `simple_sys` which has to observation variables (x and y) and one control variable (u). Optionally, the system can also include the time step at which is data is sampled for the system. Here we define the time step as 0.05 s.simple_sys = ampc.System(["x", "y"], ["u"], dt=0.05)Given a system, we can access its properties as followsprint("Observation Dimension: ", simple_sys.obs_dim) print("Observation Variables: ", simple_sys.observations) print("Control Dimension: ", simple_sys.ctrl_dim) print("Control Variables: ", simple_sys.controls)Observation Dimension: 2 Observation Variables: ['x', 'y'] Control Dimension: 1 Control Variables: ['u']TrajectoriesThe Trajectory class stores a sequence of controls and observations. Trajectories are defined with respect to a particular system.Here we define a zero trajectory for `simple_sys` with 10 time steps.traj = ampc.zeros(simple_sys, 10)There are a couple different ways to set trajectory values. We demonstrate a few below:traj[0, "x"] = 1.0 # Set x to 1 at timestep 0 traj[1, "u"] = 2.0 # Set u to 2 at timestep 1 traj[2].obs[:] = np.array([3.0, 4.0]) # Set the observation (x and y) to [3,4] at timestep 2 traj[3].ctrl[:] = np.array([5.0]) # Set the control (u) to [5] at timestep 3Similarly, there are a number of reading trajectory values.print("Value of y at timestep 2: ", traj[2, "y"]) print("Observation at timestep 0: ", traj[0].obs) print("Control at timestep 1: ", traj[1].ctrl)Value of y at timestep 2: 4.0 Observation at timestep 0: [1. 0.] Control at timestep 1: [2.]We can also access the entire set of observations and controls for a trajectory as numpy arrays:print("Observations") print("------------") print(traj.obs) print("") print("Controls") print("--------") print(traj.ctrls)Observations ------------ [[1. 0.] [0. 0.] [3. 4.] [0. 0.] [0. 0.] [0. 0.] [0. 0.] [0. 0.] [0. 0.] [0. 0.]] Controls -------- [[0.] [2.] [0.] [5.] [0.] [0.] [0.] [0.] [0.] [0.]]Exercise 1.01 Loading the dataset and creating the target Tujuan dari latihan ini adalah untuk:- Dapatkan dataset csv dari repositori pembelajaran mesin UCI- Muat kumpulan data ke dalam memori sebagai kerangka data panda- Identifikasi target yang tepat untuk diprediksi, dan buat kolom numerik biner dari target Download the datasetKunjungi https://archive.ics.uci.edu/ml/datasets/Online+Shoppers+Purchasing+Intention+Dataset dan download file dengan nama `online_shoppers_intention.csv`. Buatlah folder dengan nama `data` and letahkkan file tadi di dalam folder ini.Data terletak di file `online_shoppers_intention.csv` dan informasi tentang data tersebut disediakan secara online di situs web UCI di sini: https://archive.ics.uci.edu/ml/datasets/Online+Shoppers+Purchasing+Intention+Dataset.import pandas as pd data = pd.read_csv('../data/online_shoppers_intention.csv') data.head(n=20)Looks good, let's see how many rows and columns we havedata.shape print(f'There are {data.shape[0]} rows and {data.shape[1]} columns')There are 12330 rows and 18 columnsLooks like we have a given output variable which we can use as our taret, it is the 1st column, "Class". This seems like a intuitive, and most importantly useful, target to predict for, whether ther. If we were working for a bank this would be a great variable to predict!Sepertinya kita memiliki variabel output tertentu yang dapat kita gunakan sebagai target kita, yaitu kolom 1, "Class". Ini adalah target intuitif, dan yang akan diprediksi. Jika kita bekerja do bank, Class ini akan menjadi variabel yang bagus untuk diprediksi!feats = data.drop('Revenue', axis=1) target = data['Revenue'] print(f'Features table has {feats.shape[0]} rows and {feats.shape[1]} columns') print(f'Target table has {target.shape[0]} rows')Features table has 12330 rows and 17 columns Target table has 12330 rowsLooks good, let's save these as csvs for later.target.head()Save the data out to csv filesfeats.to_csv('../data/OSI_feats.csv', index=False) target.to_csv('../data/OSI_target.csv', header='Revenue', index=False)Connect to a databaseIn order to get data from a database with pandas, you first need to be able to connect to one. In this exercise, you'll practice creating a database engine to manage connections to a database, data.db. To do this, you'll use sqlalchemy's create_engine() function.create_engine() needs a string URL to the database. For SQLite databases, that string consists of "sqlite:///", then the database file name.# Import sqlalchemy's create_engine() function from sqlalchemy import create_engine # Create the database engine engine = create_engine('sqlite:///' + '../datasets/data.db') # View the tables in the database print(engine.table_names())['boro_census', 'hpd311calls', 'weather']Load entire tablesIn the last exercise, you saw that data.db has two tables. weather has historical weather data for New York City. hpd311calls is a subset of call records made to the city's 311 help line about housing issues.In this exercise, you'll use the read_sql() function in pandas to load both tables. read_sql() accepts a string of either a SQL query to run, or a table to load. It also needs a way to connect to the database, like the engine in the provided code.# Load libraries import pandas as pd from sqlalchemy import create_engine # Create the database engine engine = create_engine('sqlite:///' + '../datasets/data.db') # Load hpd311calls without any SQL hpd_calls = pd.read_sql('hpd311calls', engine) # View the first few rows of data print(hpd_calls.head()) # Create the database engine engine = create_engine('sqlite:///' + '../datasets/data.db') # Create a SQL query to load the entire weather table query = """ SELECT * FROM weather; """ # Load weather with the SQL query weather = pd.read_sql(query, engine) # View the first few rows of data print(weather.head())station name latitude longitude elevation \ 0 USW00094728 NY CITY CENTRAL PARK, NY US 40.77898 -73.96925 42.7 1 USW00094728 NY CITY CENTRAL PARK, NY US 40.77898 -73.96925 42.7 2 USW00094728 NY CITY CENTRAL PARK, NY US 40.77898 -73.96925 42.7 3 USW00094728 NY CITY CENTRAL PARK, NY US 40.77898 -73.96925 42.7 4 USW00094728 NY CITY CENTRAL PARK, NY US 40.77898 -73.96925 42.7 date month awnd prcp snow tavg tmax tmin 0 12/01/2017 December 5.37 0.00 0.0 52 42 1 12/02/2017 December 3.13 0.00 0.0 48 39 2 12/03/2017 December 2.01 0.00 0.0 48 42 3 12/04/2017 December 3.58 0.00 0.0 51 40 4 12/05/2017 December 6.71 0.75 0.0 61 50Selecting columns with SQLDatasets can contain columns that are not required for an analysis, like the weather table in data.db does. Some, such as elevation, are redundant, since all observations occurred at the same place, while others contain variables we are not interested in. After making a database engine, you'll write a query to SELECT only the date and temperature columns, and pass both to read_sql() to make a data frame of high and low temperature readings.pandas has been loaded as pd, and create_engine() has been imported from sqlalchemy.Note: The SQL checker is quite picky about column positions and expects fields to be selected in the specified order.# Create database engine for data.db engine = create_engine('sqlite:///' + '../datasets/data.db') # Write query to get date, tmax, and tmin from weather query = """ SELECT date, tmax, tmin FROM weather; """ # Make a data frame by passing query and engine to read_sql() temperatures = pd.read_sql(query, engine) # View the resulting data frame (temperatures)Selecting rowsSQL WHERE clauses return records whose values meet the given criteria. Passing such a query to read_sql() results in a data frame loaded with only records we are interested in, so there is less filtering to do later on.The hpd311calls table in data.db has data on calls about various housing issues, from maintenance problems to information requests. In this exercise, you'll use SQL to focus on calls about safety.pandas has been loaded as pd, and a database engine, engine, has been created for data.db.import matplotlib.pyplot as plt # Create query to get hpd311calls records about safety query = """ SELECT * FROM hpd311calls WHERE complaint_type = 'SAFETY'; """ # Query the database and assign result to safety_calls safety_calls = pd.read_sql(query, engine) # Graph the number of safety calls by borough call_counts = safety_calls.groupby('borough').unique_key.count() call_counts.plot.barh() plt.show()Filtering on multiple conditionsSo far, you've selectively imported records that met a single condition, but it's also common to filter datasets on multiple criteria. In this exercise, you'll do just that.The weather table contains daily high and low temperatures and precipitation amounts for New York City. Let's focus on inclement weather, where there was either an inch or more of snow or the high was at or below freezing (32° Fahrenheit). To do this, you'll need to build a query that uses the OR operator to look at values in both columns.pandas is loaded as pd, and a database engine, engine, has been created.# Create query for records with max temps <= 32 or snow >= 1 query = """ SELECT * FROM weather WHERE tmax <= 32 OR snow >= 1; """ # Query database and assign result to wintry_days wintry_days = pd.read_sql(query, engine) # View summary stats about the temperatures wintry_days.describe()Getting distinct valuesSometimes an analysis doesn't need every record, but rather unique values in one or more columns. Duplicate values can be removed after loading data into a data frame, but it can also be done at import with SQL's DISTINCT keyword.Since hpd311calls contains data about housing issues, we would expect most records to have a borough listed. Let's test this assumption by querying unique complaint_type/borough combinations.pandas has been imported as pd, and the database engine has been created as engine.Note: The SQL checker is quite picky about column positions and expects fields to be selected in the specified order.# Create query for unique combinations of borough and complaint_type query = """ SELECT DISTINCT borough, complaint_type FROM hpd311calls; """ # Load results of query to a data frame issues_and_boros = pd.read_sql(query, engine) # Check assumption about issues and boroughs print(issues_and_boros)borough complaint_type 0 BRONX HEAT/HOT WATER 1 MANHATTAN PLUMBING 2 MANHATTAN HEAT/HOT WATER 3 BROOKLYN HEAT/HOT WATER 4 QUEENS HEAT/HOT WATER .. ... ... 60 MANHATTAN OUTSIDE BUILDING 61 MANHATTAN ELEVATOR 62 BROOKLYN OUTSIDE BUILDING 63 STATEN ISLAND SAFETY 64 STATEN ISLAND OUTSIDE BUILDING [65 rows x 2 columns]Counting in groupsIn previous exercises, you pulled data from tables, then summarized the resulting data frames in pandas to create graphs. By using COUNT and GROUP BY in a SQL query, we can pull those summary figures from the database directly.The hpd311calls table has a column, complaint_type, that categorizes call records by issue, such as heating or plumbing. In order to graph call volumes by issue, you'll write a SQL query that COUNTs records by complaint type.pandas has been imported as pd, and the database engine for data.db has been created as engine.# Create query to get call counts by complaint_type query = """ SELECT complaint_type, COUNT(*) FROM hpd311calls GROUP BY complaint_type; """ # Create data frame of call counts by issue calls_by_issue = pd.read_sql(query, engine) # Graph the number of calls for each housing issue calls_by_issue.plot.barh(x="complaint_type") plt.show()Working with aggregate functionsIf a table contains data with higher granularity than is needed for an analysis, it can make sense to summarize the data with SQL aggregate functions before importing it. For example, if you have data of flood event counts by month but precipitation data by day, you may decide to SUM precipitation by month.The weather table contains daily readings for four months. In this exercise, you'll practice summarizing weather by month with the MAX, MIN, and SUM functions.pandas has been loaded as pd, and a database engine, engine, has been created.# Create a query to get month and max tmax by month query = """ SELECT month, MAX(tmax) FROM weather GROUP BY month;""" # Get data frame of monthly weather stats weather_by_month = pd.read_sql(query, engine) # View weather stats by month (weather_by_month) # Create a query to get month, max tmax, and min tmin by month query = """ SELECT month, MAX(tmax), MIN(tmin) FROM weather GROUP BY month; """ # Get data frame of monthly weather stats weather_by_month = pd.read_sql(query, engine) # View weather stats by month (weather_by_month) # Create query to get temperature and precipitation by month query = """ SELECT month, MAX(tmax), MIN(tmin), SUM(prcp) FROM weather GROUP BY month; """ # Get data frame of monthly weather stats weather_by_month = pd.read_sql(query, engine) # View weather stats by month (weather_by_month)Joining tablesTables in relational databases usually have key columns of unique record identifiers. This lets us build pipelines that combine tables using SQL's JOIN operation, instead of having to combine data after importing it.The records in hpd311calls often concern issues, like leaks or heating problems, that are exacerbated by weather conditions. In this exercise, you'll join weather data to call records along their common date columns to get everything in one data frame. You can assume these columns have the same data type.pandas is loaded as pd, and the database engine, engine, has been created.Note: The SQL checker is picky about join table order -- it expects specific tables on the left and the right.# Query to join weather to call records by date columns query = """ SELECT * FROM hpd311calls JOIN weather ON hpd311calls.created_date = weather.date; """ # Create data frame of joined tables calls_with_weather = pd.read_sql(query, engine) # View the data frame to make sure all columns were joined (calls_with_weather.head())Joining and filteringJust as you might not always want all the data in a single table, you might not want all columns and rows that result from a JOIN. In this exercise, you'll use SQL to refine a data import.Weather exacerbates some housing problems more than others. Your task is to focus on water leak reports in hpd311calls and assemble a dataset that includes the day's precipitation levels from weather to see if there is any relationship between the two. The provided SQL gets all columns in hpd311calls, but you'll need to modify it to get the necessary weather column and filter rows with a WHERE clause.pandas is loaded as pd, and the database engine, engine, has been created.# Query to get hpd311calls and precipitation values query = """ SELECT hpd311calls.*, weather.prcp FROM hpd311calls JOIN weather ON hpd311calls.created_date = weather.date;""" # Load query results into the leak_calls data frame leak_calls = pd.read_sql(query, engine) # View the data frame (leak_calls.head()) # Query to get water leak calls and daily precipitation query = """ SELECT hpd311calls.*, weather.prcp FROM hpd311calls JOIN weather ON hpd311calls.created_date = weather.date WHERE hpd311calls.complaint_type = 'WATER LEAK';""" # Load query results into the leak_calls data frame leak_calls = pd.read_sql(query, engine) # View the data frame print(leak_calls.head())Joining, filtering, and aggregatingIn this exercise, you'll use what you've learned to assemble a dataset to investigate how the number of heating complaints to New York City's 311 line varies with temperature.In addition to the hpd311calls table, data.db has a weather table with daily high and low temperature readings for NYC. We want to get each day's count of heat/hot water calls with temperatures joined in. This can be done in one query, which we'll build in parts.In part one, we'll get just the data we want from hpd311calls. Then, in part two, we'll modify the query to join in weather data.pandas has been imported as pd, and the database engine has been created as engine.# Query to get heat/hot water call counts by created_date query = """ SELECT hpd311calls.created_date, COUNT(*) FROM hpd311calls WHERE hpd311calls.complaint_type == 'HEAT/HOT WATE' GROUP BY hpd311calls.created_date; """ # Query database and save results as df df = pd.read_sql(query, engine) # View first 5 records print(df.head()) # Modify query to join tmax and tmin from weather by date query = """ SELECT hpd311calls.created_date, COUNT(*), weather.tmax, weather.tmin FROM hpd311calls JOIN weather ON hpd311calls.created_date = weather.date WHERE hpd311calls.complaint_type = 'HEAT/HOT WATER' GROUP BY hpd311calls.created_date; """ # Query database and save results as df df = pd.read_sql(query, engine) # View first 5 records (df.head())Python for Bioinformatics-----------------------------![title](https://s3.amazonaws.com/py4bio/tapabiosmall.png)This Jupyter notebook is intented to be used alongside the book [Python for Bioinformatics](http://py3.us/) Chapter 23: DNA Mutations with Restrictions------------- **Note:** The code in this chapter needs Biopython 1.69. It was not included in the first print of the book because this Biopython version was not available. The full source code in the [GitHub repository](https://github.com/Serulab/Py4Bio). This version is adapted to work in Jupyter Notebook.!pip install biopython '''Given a DNA sequence of a polypeptide, this program generates alternative DNA sequences that code for the same polypeptide but that can be sorted out by DNA restriction. Requires Biopython >= 1.69 Author: ()''' from Bio import Seq from Bio.Alphabet import IUPAC from Bio import Restriction from Bio.Data import CodonTable from jinja2 import Template TABLE_ID = 1 ALLOWED_MUTATIONS = 2 mutation_tpl = """Original Sequence: {{dna_input}} Peptide: {{ori_pep}} Restriction map for original sequence: {{ori_map}} ========================= {% for item in bakpeps_out %} Original sequence enzymes: {{oname}} {{item.graph}} Proposed sequence enzymes: {{item.pames}} Enzimes only in original sequence: {{item.o}} Enzimes only in proposed sequence: {{item.p}} ========================= {% endfor %}""" def backtrans(ori_pep, table_id=1): """ Function to make backtranslation (from peptide to DNA) This function needs the peptide sequence and the code of translation table. Code number is the same as posted in: http://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi """ def recurs(order, pos): for letter in bt[order[pos]]: if pos == len(order) - 1: yield letter continue for prox in recurs(order, pos+1): yield (letter + prox) def combine(order): ordened = set() for frase in recurs(order, 0): ordened.add(frase) return ordened t = CodonTable.generic_by_id[table_id] bt = dict() for a1 in "ATCG" : for a2 in "ATCG" : for a3 in "ATCG" : codon = a1 + a2 + a3 try: amino = t.forward_table[codon] except KeyError: assert codon in t.stop_codons continue try: bt[amino].append(codon) except KeyError: bt[amino] = [codon] return list(combine(ori_pep)) def seqcomp(s1, s2): """ Compares 2 sequences and returns a value with how many differents elements they have. """ p = len(s1) for x,y in zip(s1, s2): # Walk through 2 sequences. if x==y: p -= 1 return p input_dna = input('Enter DNA sequennce: ').upper() dna = Seq.Seq(input_dna, IUPAC.unambiguous_dna) # Translate DNA sequence. ori_pep = dna.translate() # Get all backtranslations. bakpeps = backtrans(ori_pep, TABLE_ID) # Make a restriction analysis for the orignal sequence. analysis = Restriction.Analysis(Restriction.CommOnly, dna) analysis.print_as('map') ori_map = analysis.format_output() # Store the enzymes that cut in the original sequence. enz = list(analysis.with_sites().keys()) # Get a string out of the enzyme list, for printing. oname = str(enz)[1:-1] enz = set(enz) bakpeps_out = [] for bakpep in bakpeps: tmp_d = {} if bakpep not in input_dna: # Make a restriction analysis for each sequence. analysis = Restriction.Analysis(Restriction.CommOnly, Seq.Seq(bakpep, IUPAC.unambiguous_dna)) # Store the enzymes that cut in this sequence. enz_tmp = list(analysis.with_sites().keys()) enz_tmp = set(enz_tmp) # Get the number of mutations in backpep sequence. y = seqcomp(input_dna, bakpep) if enz_tmp != enz and enz and y <= ALLOWED_MUTATIONS: analysis.print_as('map') tmp_d['pames'] = str(enz_tmp)[1:-1] tmp_d['graph'] = analysis.format_output() tmp_d['ori_seq'] = str(list(enz.difference( enz_tmp)))[1:-1] tmp_d['proposed_seq'] = str(list( enz_tmp.difference(enz)))[1:-1] bakpeps_out.append(tmp_d) template = Template(mutation_tpl) render = template.render(dna_input=input_dna, ori_pep=ori_pep, ori_map=ori_map, oname=oname, bakpeps_out=bakpeps_out) print(render)Enter DNA sequennce: atacgctgtcatgca Original Sequence: ATACGCTGTCATGCA Peptide: IRCHA Restriction map for original sequence: 4 AbaSI | | 10 FatI | | | |11 CviAII | || | ||12 FaiI | ||| | ||| 14 Hin1II NlaIII FaeI HpyCH4V Hsp92II | ||| | ATACGCTGTCATGCA ||||||||||||||| TATGCGACAGTACGT 1 15 Enzymes which do not cut the sequence. FspEI AccII AciI AfaI AluBI AluI AoxI AspLEI BfaI BfuCI Bsh1236I BshFI BsiSI BsnI Bsp143I BspACI BspANI BspFNI BssMI BstFNI BstHHI BstKTI BstMBI BstUI BsuRI CfoI Csp6I CviJI CviKI_1 CviQI DpnI DpnII F[...]Mass spectrometry dataThe objective of this exercise is to read in raw peptide MSMS spectrum information and output a dataframe.The .msp file can be downloaded [here](https://chemdata.nist.gov/download/peptide_library/libraries/cptaclib/2015/cptac2_mouse_hcd_selected.msp.tar.gz).The information in this ASCII based text file is organized spectrum by spectrum.The first line per spectrum provides formatted like this: Name: sequence/charge_nmods_collisionenergyfollowed by a comment section which can be disregarded and the actual spectrum data which is tab-separated: m/z intensity additional_infoSpectra are separated by an empty line.Code a function that returns two DataFrames or arrays containing the processed and filtered data. The first one should contain the spectrum information (n_spectra, n_m/z_features) and the second one the sequences per row (n_spectra).Here are some general guidelines:* The m/z values need to be binned to integer values (mathematically rounded), otherwise the dataframe size would get out of hand. This will allow for multiple values mapped to a single bin (e.g. if there are peaks at 145.1 and 145.2). Here, only the maximum of those peaks should be kept in the final dataframe.* Rows that are all-zero should be dropped.Your function should allow for selecting a range on the x-axis (m/z-range). All peaks outside this range can be disregarded. Furthermore, only spectra within a set collision energy range and a maximum sequence length should be contained in the output dataframe.The faster your function runs, the better. I will time them all in the end. Made with &10084;&65039;  by Michelle and meimport numpy as np import pandas as pd import timeit import plotly.express as px from pathlib import Path pd.set_option("max_columns",3000) def msp_to_df( input_file, max_seq_len=30, min_ce=36, max_ce=40, mz_min=135, mz_max=1400, ): """ Function to read spectrum data from .msp file and convert to dataframe. Args: input_file (str): path to .msp file max_seq_len (int): maximum acceptable sequence length min_ce (int): minimum collision energy of spectra to be included in df max_ce (int): maximum collision energy of spectra to be included in df mz_min (int): lower boundary for m/z to be included in df mz_max (int): lower boundary for m/z to be included in df Returns: df (pd.DataFrame or np.array): spectrum information within defined parameters [n_spectra, n_features] seqs (pd.DataFrame or np.array): sequences """ df = pd.DataFrame(columns=range(mz_min,mz_max)) seqs = [] with open(input_file, "r") as file: continue_to_next_name = False index_counter = -1 for line in file: if "Name" in line: continue_to_next_name = False split = line.split(" ")[1].split("/") name = split[0] ce = split[1].rsplit("_")[-1].replace("eV","") ce = float(ce) if not min_ce < ce < max_ce: continue_to_next_name = True else: seqs.append(name) index_counter += 1 df.loc[index_counter] = np.zeros(df.shape[1]) if continue_to_next_name == True: continue if any(substring in line for substring in ["MW","Comment","Num peaks","Name"]): continue if line == "\n": continue else: split2 = line.split("\t") mz = round(float(split2[0])) intensity = float(split2[1]) if mz_min < mz < mz_max: if intensity > df.at[index_counter,mz]: df.at[index_counter,mz] = intensity df = df.loc[~(df==0).all(axis=1)] return df, seqs df,seqs = msp_to_df("../../data/cptac2_mouse_hcd_selected.msp") def create_plots(df,seqs, directory): Path(directory).mkdir(parents=True, exist_ok=True) df = df.copy() df["Name"] = seqs df = pd.melt(df, id_vars=["Name"], var_name="mz", value_name="Intensity") for name in set(df["Name"]): subdf = df[df["Name"]==name].copy() fig = px.line(subdf,x="mz",y="Intensity",title=name) fig.write_image(f"{directory}/"+name+".png") #create_plots(df,seqs,"plots")Simple Deep Neural Network#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.This notebook demonstrates a simple `Deep Neural Network` (DNN) fitting to a very small data set of numbers that follow a pattern. It's meant to demonstrate how to setup a simple DNN using Tensorflow.#!pip install tensorflow==2.5.0; import tensorflow as tf import numpy as np from tensorflow import kerasThe `x` values below are inputs to the network. The `y` values can be considered outputs. The DNN attempts to learn the relationship or policy present between the two sets of numbers (for example, the equation of a line `y = mx + b` could be considered a policy).In `equation_model` below, the dataset models `y = mx + b` where `m = 2` and `b = 1`.def slope_of_a_line_model(): """ Trains a very simple DNN on data that represents y=mx+b, where m=2 and b=1. Input: None Output: The trained y=mx+b DNN model. """ # Define a custom callback so that we can stop training at 99% accuracy class myCallbacks(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs=()): if logs.get('loss')<=0.01: print("Reached <= 0.1 loss, ending training!") self.model.stop_training = True callbacks = myCallbacks() # The input dataset xs = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype=float) ys = np.array([3.0, 5.0, 7.0, 9.0, 11.0, 13.0], dtype=float) # An extremely simple DNN model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])]) model.compile(optimizer='sgd', loss='mean_squared_error') model.fit(xs, ys, epochs=1000, callbacks=[callbacks]) return model # Save the model for use model = slope_of_a_line_model() input_values = (1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0) correct_outputs = (3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0) predictions = [] for index, input_value in enumerate(input_values): new_prediction = model.predict([input_value]) predictions.append(new_prediction[0]) print("Input: {}\nOutput: {}\nCorrect Output: {}\n".format(value, new_prediction[0][0], correct_outputs[index])) from matplotlib import pyplot as plt %matplotlib inline x = input_values y = predictions fig = plt.figure() ax=fig.add_axes([0,0,1,1]) ax.plot(x,y) ax.set_title("DNN Predicted Y-Value for given X-Value") ax.set_xlabel('x-coordinates') ax.set_ylabel('y-coordinates')Task 2: Reflicting on merge types Task 2.1 - TableThe first part of this question is to fill out a table in words. For each merge type, write a sentence or two to explain each merge type. Table To Fill In:| Merge Type | Explanation || ---------- | ----------- || Left join | Your sentence here || Right join | Your sentence here || Outer join | Your sentence here || Inner join | Your sentence here || Cross join | Your sentence here | Task 2.2 - VisualizationThe second part of this question is to find (with citation) or create a diagram to explain the merge types visually.Find (or create/sketch) a visualization/graphic/picture that makes sense to you on the different types of join, and explain it (in words) so you can crystallize your understanding of the merge types and how they work practically.**Note: Please clearly cite and reference any graphic with a link and some text that you choose to use from external sources.**# Your Solution Here (Switch this to a markdown cell, and make sure you include an image using `![](img.png)`)EMBEDDING_DIM = 100the embedding layer requires the sepcificiation of the vocabulary size. the size of the real valuie vector spaceand max length of input documentEmbedding(vocab_size, EMBEDDING_DIM, input_length=max_length)# from keras.models import Sequential from keras.layers import Dense,Embedding, LSTM, GRU from keras.layers.embeddings import Embedding model = Sequential() EMBEDDING_DIM = 100 model.add(Embedding(vocab_size,EMBEDDING_DIM, input_length=max_length)) model.add(GRU(units=32, dropout=.2, recurrent_dropout=.2)) model.add(Dense(1,activation='sigmoid')) #try using different optimizer and different optimizer configs model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) # model.fit(X_train_pad, y_train_em) importChapter 5 ディープラーニングの理論* 5.5 行列と行列積 リスト 5.14 NumPyを使った行列の積import numpy as np a = np.array([[0, 1, 2], [2, 1, 0]]) b = np.array([[1, 2], [1, 2], [1, 2]]) print(np.dot(a, b))[[3 6] [3 6]]models/# layers.py from torch import nn class CBR(nn.Module): def __init__(self, ch0, ch1, bn=True, sample='down', activation=nn.ReLU(True), dropout=False): super().__init__() self.bn = bn self.activation = activation self.dropout = dropout if sample=='down': self.c = nn.Conv2d(ch0, ch1, 4, 2, 1) else: self.c = nn.ConvTranspose2d(ch0, ch1, 4, 2, 1) if bn: self.batchnorm = nn.BatchNorm2d(ch1, affine=True) if dropout: self.Dropout = nn.Dropout() def forward(self, x): h = self.c(x) if self.bn: h = self.batchnorm(h) if self.dropout: h = self.Dropout(h) if not self.activation is None: h = self.activation(h) return h class UpSamplePixelShuffle(nn.Module): def __init__(self, in_ch, out_ch, up_scale=2, activation=nn.ReLU(True)): super().__init__() self.activation = activation self.c = nn.Conv2d(in_channels=in_ch, out_channels=out_ch*up_scale*up_scale, kernel_size=3, stride=1, padding=1, bias=False) self.ps = nn.PixelShuffle(up_scale) def forward(self, x): h = self.c(x) h = self.ps(h) if not self.activation is None: h = self.activation(h) return h # models_utils.py def weights_init(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: m.weight.data.normal_(0.0, 0.02) elif classname.find('BatchNorm2d') != -1 or classname.find('InstanceNorm2d') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) def print_network(net): num_params = 0 for param in net.parameters(): num_params += param.numel() print(net) print('Total number of parameters: %d' % num_params)models/dis# dis.py import torch import torch.nn as nn from torch.autograd import Variable import numpy as np from collections import OrderedDict class _Discriminator(nn.Module): def __init__(self, in_ch, out_ch): super().__init__() self.in_ch = in_ch self.c0_0 = CBR(in_ch, 32, bn=False, sample='down', activation=nn.LeakyReLU(0.2, True), dropout=False) self.c0_1 = CBR(out_ch, 32, bn=False, sample='down', activation=nn.LeakyReLU(0.2, True), dropout=False) self.c1 = CBR(64, 128, bn=True, sample='down', activation=nn.LeakyReLU(0.2, True), dropout=False) self.c2 = CBR(128, 256, bn=True, sample='down', activation=nn.LeakyReLU(0.2, True), dropout=False) self.c3 = CBR(256, 512, bn=True, sample='down', activation=nn.LeakyReLU(0.2, True), dropout=False) self.c4 = nn.Conv2d(512, 1, 3, 1, 1) def forward(self, x): x_0 = x[:, :self.in_ch] x_1 = x[:, self.in_ch:] h = torch.cat((self.c0_0(x_0), self.c0_1(x_1)), 1) h = self.c1(h) h = self.c2(h) h = self.c3(h) h = self.c4(h) return h class Discriminator(nn.Module): def __init__(self, in_ch, out_ch, gpu_ids): super().__init__() self.gpu_ids = gpu_ids self.dis = nn.Sequential(OrderedDict([('dis', _Discriminator(in_ch, out_ch))])) self.dis.apply(weights_init) def forward(self, x): if self.gpu_ids: return nn.parallel.data_parallel(self.dis, x, self.gpu_ids) else: return self.dis(x)models/gen# SPANet import torch from torch import nn import torch.nn.functional as F from collections import OrderedDict # import common ###### Layer def conv1x1(in_channels, out_channels, stride = 1): return nn.Conv2d(in_channels,out_channels,kernel_size = 1, stride =stride, padding=0,bias=False) def conv3x3(in_channels, out_channels, stride = 1): return nn.Conv2d(in_channels,out_channels,kernel_size = 3, stride =stride, padding=1,bias=False) class Bottleneck(nn.Module): def __init__(self,in_channels,out_channels,): super(Bottleneck,self).__init__() m = OrderedDict() m['conv1'] = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False) m['relu1'] = nn.ReLU(True) m['conv2'] = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=2, bias=False,dilation=2) m['relu2'] = nn.ReLU(True) m['conv3'] = nn.Conv2d(out_channels, out_channels, kernel_size=1, bias=False) self.group1 = nn.Sequential(m) self.relu= nn.Sequential(nn.ReLU(True)) def forward(self, x): out = self.group1(x) return out class irnn_layer(nn.Module): def __init__(self,in_channels): super(irnn_layer,self).__init__() self.left_weight = nn.Conv2d(in_channels,in_channels,kernel_size=1,stride=1,groups=in_channels,padding=0) self.right_weight = nn.Conv2d(in_channels,in_channels,kernel_size=1,stride=1,groups=in_channels,padding=0) self.up_weight = nn.Conv2d(in_channels,in_channels,kernel_size=1,stride=1,groups=in_channels,padding=0) self.down_weight = nn.Conv2d(in_channels,in_channels,kernel_size=1,stride=1,groups=in_channels,padding=0) def forward(self,x): _,_,H,W = x.shape top_left = x.clone() top_right = x.clone() top_up = x.clone() top_down = x.clone() top_left[:,:,:,1:] = F.relu(self.left_weight(x)[:,:,:,:W-1]+x[:,:,:,1:],inplace=False) top_right[:,:,:,:-1] = F.relu(self.right_weight(x)[:,:,:,1:]+x[:,:,:,:W-1],inplace=False) top_up[:,:,1:,:] = F.relu(self.up_weight(x)[:,:,:H-1,:]+x[:,:,1:,:],inplace=False) top_down[:,:,:-1,:] = F.relu(self.down_weight(x)[:,:,1:,:]+x[:,:,:H-1,:],inplace=False) return (top_up,top_right,top_down,top_left) class Attention(nn.Module): def __init__(self,in_channels): super(Attention,self).__init__() self.out_channels = int(in_channels/2) self.conv1 = nn.Conv2d(in_channels,self.out_channels,kernel_size=3,padding=1,stride=1) self.relu1 = nn.ReLU() self.conv2 = nn.Conv2d(self.out_channels,self.out_channels,kernel_size=3,padding=1,stride=1) self.relu2 = nn.ReLU() self.conv3 = nn.Conv2d(self.out_channels,4,kernel_size=1,padding=0,stride=1) self.sigmod = nn.Sigmoid() def forward(self,x): out = self.conv1(x) out = self.relu1(out) out = self.conv2(out) out = self.relu2(out) out = self.conv3(out) out = self.sigmod(out) return out class SAM(nn.Module): def __init__(self,in_channels,out_channels,attention=1): super(SAM,self).__init__() self.out_channels = out_channels self.irnn1 = irnn_layer(self.out_channels) self.irnn2 = irnn_layer(self.out_channels) self.conv_in = conv3x3(in_channels,self.out_channels) self.relu1 = nn.ReLU(True) self.conv1 = nn.Conv2d(self.out_channels,self.out_channels,kernel_size=1,stride=1,padding=0) self.conv2 = nn.Conv2d(self.out_channels*4,self.out_channels,kernel_size=1,stride=1,padding=0) self.conv3 = nn.Conv2d(self.out_channels*4,self.out_channels,kernel_size=1,stride=1,padding=0) self.relu2 = nn.ReLU(True) self.attention = attention if self.attention: self.attention_layer = Attention(in_channels) self.conv_out = conv1x1(self.out_channels,1) self.sigmod = nn.Sigmoid() def forward(self,x): if self.attention: weight = self.attention_layer(x) out = self.conv1(x) top_up,top_right,top_down,top_left = self.irnn1(out) # direction attention if self.attention: top_up.mul(weight[:,0:1,:,:]) top_right.mul(weight[:,1:2,:,:]) top_down.mul(weight[:,2:3,:,:]) top_left.mul(weight[:,3:4,:,:]) out = torch.cat([top_up,top_right,top_down,top_left],dim=1) out = self.conv2(out) top_up,top_right,top_down,top_left = self.irnn2(out) # direction attention if self.attention: top_up.mul(weight[:,0:1,:,:]) top_right.mul(weight[:,1:2,:,:]) top_down.mul(weight[:,2:3,:,:]) top_left.mul(weight[:,3:4,:,:]) out = torch.cat([top_up,top_right,top_down,top_left],dim=1) out = self.conv3(out) out = self.relu2(out) mask = self.sigmod(self.conv_out(out)) return mask ###### Network class SPANet(nn.Module): def __init__(self): super(SPANet,self).__init__() self.conv_in = nn.Sequential( conv3x3(3,32), nn.ReLU(True) ) self.SAM1 = SAM(32,32,1) self.res_block1 = Bottleneck(32,32) self.res_block2 = Bottleneck(32,32) self.res_block3 = Bottleneck(32,32) self.res_block4 = Bottleneck(32,32) self.res_block5 = Bottleneck(32,32) self.res_block6 = Bottleneck(32,32) self.res_block7 = Bottleneck(32,32) self.res_block8 = Bottleneck(32,32) self.res_block9 = Bottleneck(32,32) self.res_block10 = Bottleneck(32,32) self.res_block11 = Bottleneck(32,32) self.res_block12 = Bottleneck(32,32) self.res_block13 = Bottleneck(32,32) self.res_block14 = Bottleneck(32,32) self.res_block15 = Bottleneck(32,32) self.res_block16 = Bottleneck(32,32) self.res_block17 = Bottleneck(32,32) self.conv_out = nn.Sequential( conv3x3(32,3) ) def forward(self, x): out = self.conv_in(x) out = F.relu(self.res_block1(out) + out) out = F.relu(self.res_block2(out) + out) out = F.relu(self.res_block3(out) + out) Attention1 = self.SAM1(out) out = F.relu(self.res_block4(out) * Attention1 + out) out = F.relu(self.res_block5(out) * Attention1 + out) out = F.relu(self.res_block6(out) * Attention1 + out) Attention2 = self.SAM1(out) out = F.relu(self.res_block7(out) * Attention2 + out) out = F.relu(self.res_block8(out) * Attention2 + out) out = F.relu(self.res_block9(out) * Attention2 + out) Attention3 = self.SAM1(out) out = F.relu(self.res_block10(out) * Attention3 + out) out = F.relu(self.res_block11(out) * Attention3 + out) out = F.relu(self.res_block12(out) * Attention3 + out) Attention4 = self.SAM1(out) out = F.relu(self.res_block13(out) * Attention4 + out) out = F.relu(self.res_block14(out) * Attention4 + out) out = F.relu(self.res_block15(out) * Attention4 + out) out = F.relu(self.res_block16(out) + out) out = F.relu(self.res_block17(out) + out) out = self.conv_out(out) return Attention4 , out class Generator(nn.Module): def __init__(self, gpu_ids): super().__init__() self.gpu_ids = gpu_ids self.gen = nn.Sequential(OrderedDict([('gen', SPANet())])) self.gen.apply(weights_init) def forward(self, x): if self.gpu_ids: return nn.parallel.data_parallel(self.gen, x, self.gpu_ids) else: return self.gen(x)Main directory (without subdirectories)# data_manager.py import glob import cv2 import random import numpy as np import pickle import os from torch.utils import data class TrainDataset(data.Dataset): def __init__(self, config): super().__init__() self.config = config train_list_file = os.path.join(config.datasets_dir, config.train_list) # 如果数据集尚未分割,则进行训练集和测试集的分割 if not os.path.exists(train_list_file) or os.path.getsize(train_list_file) == 0: files = os.listdir(os.path.join(config.datasets_dir, 'ground_truth')) random.shuffle(files) n_train = int(config.train_size * len(files)) train_list = files[:n_train] test_list = files[n_train:] np.savetxt(os.path.join(config.datasets_dir, config.train_list), np.array(train_list), fmt='%s') np.savetxt(os.path.join(config.datasets_dir, config.test_list), np.array(test_list), fmt='%s') self.imlist = np.loadtxt(train_list_file, str) def __getitem__(self, index): t = cv2.imread(os.path.join(self.config.datasets_dir, 'ground_truth', str(self.imlist[index])), 1).astype(np.float32) x = cv2.imread(os.path.join(self.config.datasets_dir, 'cloudy_image', str(self.imlist[index])), 1).astype(np.float32) M = np.clip((t-x).sum(axis=2), 0, 1).astype(np.float32) x = x / 255 t = t / 255 x = x.transpose(2, 0, 1) t = t.transpose(2, 0, 1) return x, t, M def __len__(self): return len(self.imlist) class TestDataset(data.Dataset): def __init__(self, test_dir, in_ch, out_ch): super().__init__() self.test_dir = test_dir self.in_ch = in_ch self.out_ch = out_ch self.test_files = os.listdir(os.path.join(test_dir, 'cloudy_image')) def __getitem__(self, index): filename = os.path.basename(self.test_files[index]) x = cv2.imread(os.path.join(self.test_dir, 'cloudy_image', filename), 1).astype(np.float32) x = x / 255 x = x.transpose(2, 0, 1) return x, filename def __len__(self): return len(self.test_files) # utils.py import os import cv2 import random import numpy as np import torch from torch.backends import cudnn def gpu_manage(config): if config.cuda: os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(map(str, config.gpu_ids)) config.gpu_ids = list(range(len(config.gpu_ids))) # print(os.environ['CUDA_VISIBLE_DEVICES']) if config.manualSeed is None: config.manualSeed = random.randint(1, 10000) print('Random Seed: ', config.manualSeed) random.seed(config.manualSeed) torch.manual_seed(config.manualSeed) if config.cuda: torch.cuda.manual_seed_all(config.manualSeed) cudnn.benchmark = True if torch.cuda.is_available() and not config.cuda: print("WARNING: You have a CUDA device, so you should probably run with --cuda") def save_image(out_dir, x, num, epoch, filename=None): test_dir = os.path.join(out_dir, 'epoch_{0:04d}'.format(epoch)) if filename is not None: test_path = os.path.join(test_dir, filename) else: test_path = os.path.join(test_dir, 'test_{0:04d}.png'.format(num)) if not os.path.exists(test_dir): os.makedirs(test_dir) cv2.imwrite(test_path, x) def checkpoint(config, epoch, gen, dis): model_dir = os.path.join(config.out_dir, 'models') if not os.path.exists(model_dir): os.makedirs(model_dir) net_gen_model_out_path = os.path.join(model_dir, 'gen_model_epoch_{}.pth'.format(epoch)) net_dis_model_out_path = os.path.join(model_dir, 'dis_model_epoch_{}.pth'.format(epoch)) torch.save(gen.state_dict(), net_gen_model_out_path) torch.save(dis.state_dict(), net_dis_model_out_path) print("Checkpoint saved to {}".format(model_dir)) def make_manager(): if not os.path.exists('.job'): os.makedirs('.job') with open('.job/job.txt', 'w', encoding='UTF-8') as f: f.write('0') def job_increment(): with open('.job/job.txt', 'r', encoding='UTF-8') as f: n_job = f.read() n_job = int(n_job) with open('.job/job.txt', 'w', encoding='UTF-8') as f: f.write(str(n_job + 1)) return n_job def heatmap(img): if len(img.shape) == 3: b,h,w = img.shape heat = np.zeros((b,3,h,w)).astype('uint8') for i in range(b): heat[i,:,:,:] = np.transpose(cv2.applyColorMap(img[i,:,:],cv2.COLORMAP_JET),(2,0,1)) else: b,c,h,w = img.shape heat = np.zeros((b,3,h,w)).astype('uint8') for i in range(b): heat[i,:,:,:] = np.transpose(cv2.applyColorMap(img[i,0,:,:],cv2.COLORMAP_JET),(2,0,1)) return heat def save_attention_as_heatmap(filename, att): att_heat = heatmap(att) cv2.imwrite(filename, att_heat) print(filename, 'saved') # eval.py import numpy as np from skimage.metrics import structural_similarity as SSIM from torch.autograd import Variable def test(config, test_data_loader, gen, criterionMSE, epoch): avg_mse = 0 avg_psnr = 0 avg_ssim = 0 for i, batch in enumerate(test_data_loader): x, t = Variable(batch[0]), Variable(batch[1]) if config.cuda: x = x.cuda(0) t = t.cuda(0) att, out = gen(x) if epoch % config.snapshot_interval == 0: h = 1 w = 3 c = 3 width = config.width height = config.height allim = np.zeros((h, w, c, width, height)) x_ = x.cpu().numpy()[0] t_ = t.cpu().numpy()[0] out_ = out.cpu().numpy()[0] in_rgb = x_[:3] t_rgb = t_[:3] out_rgb = np.clip(out_[:3], 0, 1) allim[0, 0, :] = in_rgb * 255 allim[0, 1, :] = out_rgb * 255 allim[0, 2, :] = t_rgb * 255 allim = allim.transpose(0, 3, 1, 4, 2) allim = allim.reshape((h*height, w*width, c)) save_image(config.out_dir, allim, i, epoch) mse = criterionMSE(out, t) psnr = 10 * np.log10(1 / mse.item()) img1 = np.tensordot(out.cpu().numpy()[0, :3].transpose(1, 2, 0), [0.298912, 0.586611, 0.114478], axes=1) img2 = np.tensordot(t.cpu().numpy()[0, :3].transpose(1, 2, 0), [0.298912, 0.586611, 0.114478], axes=1) ssim = SSIM(img1, img2) avg_mse += mse.item() avg_psnr += psnr avg_ssim += ssim avg_mse = avg_mse / len(test_data_loader) avg_psnr = avg_psnr / len(test_data_loader) avg_ssim = avg_ssim / len(test_data_loader) print("===> Avg. MSE: {:.4f}".format(avg_mse)) print("===> Avg. PSNR: {:.4f} dB".format(avg_psnr)) print("===> Avg. SSIM: {:.4f} dB".format(avg_ssim)) log_test = {} log_test['epoch'] = epoch log_test['mse'] = avg_mse log_test['psnr'] = avg_psnr log_test['ssim'] = avg_ssim return log_test # log_report import json import os import numpy as np import matplotlib as mpl mpl.use('Agg') from matplotlib import pyplot as plt class LogReport(): def __init__(self, log_dir, log_name='log'): self.log_dir = log_dir self.log_name = log_name self.log_ = [] def __call__(self, log): self.log_.append(log) with open(os.path.join(self.log_dir, self.log_name), 'w', encoding='UTF-8') as f: json.dump(self.log_, f, indent=4) def save_lossgraph(self): epoch = [] gen_loss = [] dis_loss = [] for l in self.log_: epoch.append(l['epoch']) gen_loss.append(l['gen/loss']) dis_loss.append(l['dis/loss']) epoch = np.asarray(epoch) gen_loss = np.asarray(gen_loss) dis_loss = np.asarray(dis_loss) plt.plot(epoch, gen_loss) plt.xlabel('epoch') plt.ylabel('loss_gen') plt.savefig(os.path.join(self.log_dir, 'lossgraph_gen.pdf')) plt.close() plt.plot(epoch, dis_loss) plt.xlabel('epoch') plt.ylabel('loss_dis') plt.savefig(os.path.join(self.log_dir, 'lossgraph_dis.pdf')) plt.close() class TestReport(): def __init__(self, log_dir, log_name='log_test'): self.log_dir = log_dir self.log_name = log_name self.log_ = [] def __call__(self, log): self.log_.append(log) with open(os.path.join(self.log_dir, self.log_name), 'w', encoding='UTF-8') as f: json.dump(self.log_, f, indent=4) def save_lossgraph(self): epoch = [] mse = [] psnr = [] for l in self.log_: epoch.append(l['epoch']) mse.append(l['mse']) psnr.append(l['psnr']) epoch = np.asarray(epoch) mse = np.asarray(mse) psnr = np.asarray(psnr) plt.plot(epoch, mse) plt.xlabel('epoch') plt.ylabel('mse') plt.savefig(os.path.join(self.log_dir, 'graph_mse.pdf')) plt.close() plt.plot(epoch, psnr) plt.xlabel('epoch') plt.ylabel('psnr') plt.savefig(os.path.join(self.log_dir, 'graph_psnr.pdf')) plt.close() import os import random import shutil import yaml from attrdict import AttrMap import time import torch from torch import nn from torch.backends import cudnn from torch import optim from torch.utils.data import DataLoader from torch.autograd import Variable from torch.nn import functional as F def train(config): gpu_manage(config) ### DATASET LOAD ### print('===> Loading datasets') dataset = TrainDataset(config) print('dataset:', len(dataset)) train_size = int((1 - config.validation_size) * len(dataset)) validation_size = len(dataset) - train_size train_dataset, validation_dataset = torch.utils.data.random_split(dataset, [train_size, validation_size]) print('train dataset:', len(train_dataset)) print('validation dataset:', len(validation_dataset)) training_data_loader = DataLoader(dataset=train_dataset, num_workers=config.threads, batch_size=config.batchsize, shuffle=True) validation_data_loader = DataLoader(dataset=validation_dataset, num_workers=config.threads, batch_size=config.validation_batchsize, shuffle=False) ### MODELS LOAD ### print('===> Loading models') gen = Generator(gpu_ids=config.gpu_ids) if config.gen_init is not None: param = torch.load(config.gen_init) gen.load_state_dict(param) print('load {} as pretrained model'.format(config.gen_init)) dis = Discriminator(in_ch=config.in_ch, out_ch=config.out_ch, gpu_ids=config.gpu_ids) if config.dis_init is not None: param = torch.load(config.dis_init) dis.load_state_dict(param) print('load {} as pretrained model'.format(config.dis_init)) # setup optimizer opt_gen = optim.Adam(gen.parameters(), lr=config.lr, betas=(config.beta1, 0.999), weight_decay=0.00001) opt_dis = optim.Adam(dis.parameters(), lr=config.lr, betas=(config.beta1, 0.999), weight_decay=0.00001) real_a = torch.FloatTensor(config.batchsize, config.in_ch, config.width, config.height) real_b = torch.FloatTensor(config.batchsize, config.out_ch, config.width, config.height) M = torch.FloatTensor(config.batchsize, config.width, config.height) criterionL1 = nn.L1Loss() criterionMSE = nn.MSELoss() criterionSoftplus = nn.Softplus() if config.cuda: gen = gen.cuda() dis = dis.cuda() criterionL1 = criterionL1.cuda() criterionMSE = criterionMSE.cuda() criterionSoftplus = criterionSoftplus.cuda() real_a = real_a.cuda() real_b = real_b.cuda() M = M.cuda() real_a = Variable(real_a) real_b = Variable(real_b) logreport = LogReport(log_dir=config.out_dir) validationreport = TestReport(log_dir=config.out_dir) print('===> begin') start_time=time.time() # main for epoch in range(1, config.epoch + 1): epoch_start_time = time.time() for iteration, batch in enumerate(training_data_loader, 1): real_a_cpu, real_b_cpu, M_cpu = batch[0], batch[1], batch[2] real_a.data.resize_(real_a_cpu.size()).copy_(real_a_cpu) real_b.data.resize_(real_b_cpu.size()).copy_(real_b_cpu) M.data.resize_(M_cpu.size()).copy_(M_cpu) att, fake_b = gen.forward(real_a) ################ ### Update D ### ################ opt_dis.zero_grad() # train with fake fake_ab = torch.cat((real_a, fake_b), 1) pred_fake = dis.forward(fake_ab.detach()) batchsize, _, w, h = pred_fake.size() loss_d_fake = torch.sum(criterionSoftplus(pred_fake)) / batchsize / w / h # train with real real_ab = torch.cat((real_a, real_b), 1) pred_real = dis.forward(real_ab) loss_d_real = torch.sum(criterionSoftplus(-pred_real)) / batchsize / w / h # Combined loss loss_d = loss_d_fake + loss_d_real loss_d.backward() if epoch % config.minimax == 0: opt_dis.step() ################ ### Update G ### ################ opt_gen.zero_grad() # First, G(A) should fake the discriminator fake_ab = torch.cat((real_a, fake_b), 1) pred_fake = dis.forward(fake_ab) loss_g_gan = torch.sum(criterionSoftplus(-pred_fake)) / batchsize / w / h # Second, G(A) = B loss_g_l1 = criterionL1(fake_b, real_b) * config.lamb loss_g_att = criterionMSE(att[:,0,:,:], M) loss_g = loss_g_gan + loss_g_l1 + loss_g_att loss_g.backward() opt_gen.step() # log if iteration % 10 == 0: print("===> Epoch[{}]({}/{}): loss_d_fake: {:.4f} loss_d_real: {:.4f} loss_g_gan: {:.4f} loss_g_l1: {:.4f}".format( epoch, iteration, len(training_data_loader), loss_d_fake.item(), loss_d_real.item(), loss_g_gan.item(), loss_g_l1.item())) log = {} log['epoch'] = epoch log['iteration'] = len(training_data_loader) * (epoch-1) + iteration log['gen/loss'] = loss_g.item() log['dis/loss'] = loss_d.item() logreport(log) print('epoch', epoch, 'finished, use time', time.time() - epoch_start_time) with torch.no_grad(): log_validation = test(config, validation_data_loader, gen, criterionMSE, epoch) validationreport(log_validation) print('validation finished') if epoch % config.snapshot_interval == 0: checkpoint(config, epoch, gen, dis) logreport.save_lossgraph() validationreport.save_lossgraph() print('training time:', time.time() - start_time) def train_spanet(): config = AttrMap(config_dict) make_manager() n_job = job_increment() config.out_dir = os.path.join(config.out_dir, '{:06}'.format(n_job)) os.makedirs(config.out_dir) print('Job number: {:04d}'.format(n_job)) # 保存本次训练时的配置 shutil.copyfile('config.yml', os.path.join(config.out_dir, 'config.yml')) train(config)Trainingconfig_dict = { # folder path of dataset, which includes 2 subfolders: cloudy_image/ and ground_truth/ 'datasets_dir': './data/RICE_DATASET/RICE1/', 'train_list': 'train_list.txt', 'test_list': 'test_list.txt', 'out_dir': 'results', # edit if there is a GPU 'cuda': False, 'gpu_ids': None, # gpu_ids: [0] 'train_size': 0.8, 'validation_size': 0.2, 'batchsize': 1, 'validation_batchsize': 1, 'epoch': 200, 'n_data': 300, 'width': 512, 'height': 512, 'threads': 4, 'lr': 0.0004, 'beta1': 0.5, 'lamb': 100, 'minimax': 1, 'gen_init': None, 'dis_init': None, 'in_ch': 3, 'out_ch': 3, 'manualSeed': 0, 'snapshot_interval': 50 } train_spanet()Job number: 0028 Random Seed: 0 ===> Loading datasets dataset: 400 train dataset: 320 validation dataset: 80 ===> Loading models ===> beginSetup enviroment Installs packages and gets data so that it is easy to spin up notebooks at places like colab or sagemaker# tiledb deps !pip install tiledbCollecting tiledb Downloading tiledb-0.10.4-cp37-cp37m-manylinux2010_x86_64.whl (17.5 MB)  |████████████████████████████████| 17.5 MB 13 kB/s [?25hRequirement already satisfied: numpy>=1.16.5 in /usr/local/lib/python3.7/dist-packages (from tiledb) (1.19.5) Installing collected packages: tiledb Successfully installed tiledb-0.10.4Set user options# Setup how much synthetic data is generated import numpy as np n_seconds = 3600 # One hour sampling_rate = 10 # in Hz A.K.A. SPS sample_tolerence = np.timedelta64(1, 'ms') # allow this much jitter without creating a data section start_dt = np.datetime64('2000-01-01') attempted_time_dim_array = "attempted_time_dim_array" time_attr_array = "time_attr_array" sparse_array_time_dim_array = "sparse_array_time_dim_array"Create synthetic data for testingimport numpy as np # Create and write data def create_tone(freq, time, amplitude=1.0, phase = 0.0): return amplitude*np.sin(2*np.pi*freq*time + phase) t = np.arange(0, n_seconds, 1.0 / sampling_rate) def create_tone_int16(freq, time, amplitude=1.0, phase = 0.0): return np.int16(32768*amplitude*np.sin(2*np.pi*freq*time + phase)) data = { 't' : start_dt + t * np.timedelta64(1000, 'ms'), # TODO do I need to us nanosecs here? 'x' : create_tone_int16(freq=1.01, time=t, amplitude=0.1), } print(f"For {n_seconds} esconds @ {sampling_rate} SPS created: {data['x'].shape[0]} samples")For 3600 esconds @ 10 SPS created: 36000 samplesNaive approch for dimensionimport tiledb # Always remove and recreate local arrays if tiledb.array_exists(attempted_time_dim_array): tiledb.remove(attempted_time_dim_array) print(f"Removed existing {attempted_time_dim_array}") # Create the dimensions d1 = tiledb.Dim(name="time", domain=(start_dt, start_dt + np.timedelta64(n_seconds, 's')), tile = n_seconds*sampling_rate, dtype=np.datetime64('', 'ms').dtype) # defines resolution I think # Create a domain using the dimension dom1 = tiledb.Domain(d1) # Create an attribute t = tiledb.Attr(name="t", dtype=np.datetime64('', 'ms').dtype) # Have to specify resolution x = tiledb.Attr(name="x", dtype=np.int16) # datetime in tileDB ref: https://tiledb-inc-tiledb.readthedocs-hosted.com/en/1.6.3/tutorials/datetimes.html # Create the array schema, setting `sparse=False` to indicate a dense array schema1 = tiledb.ArraySchema(domain=dom1, sparse=False, attrs=[t, x]) tiledb.Array.create(attempted_time_dim_array, schema1) # Write data to tiles try: write_info_1 = {} with tiledb.open(attempted_time_dim_array, 'w') as A: A[:] = data write_info_1 = A.last_write_info print(f"attempted_time_dim_array write information: {write_info_1}") except tiledb.TileDBError as e: print(f"Got expected error: {e}") !du -h {attempted_time_dim_array}4.0K attempted_time_dim_array/__meta 8.0K attempted_time_dim_array/__schema 16K attempted_time_dim_arrayUse time attributeimport tiledb # Always remove and recreate local arrays if tiledb.array_exists(time_attr_array): tiledb.remove(time_attr_array) print(f"Removed existing {time_attr_array}") # Create the dimensions d1 = tiledb.Dim(name="sample_num", domain=(0, data['t'].shape[0]-1), # minus one probably a zero based thing dtype=np.uint64) # Create a domain using the dimension dom1 = tiledb.Domain(d1) # Create an attribute t = tiledb.Attr(name="t", dtype=np.datetime64('', 'ns').dtype) # Have to specify resolution x = tiledb.Attr(name="x", dtype=np.int16) # Create the array schema, setting `sparse=False` to indicate a dense array schema1 = tiledb.ArraySchema(domain=dom1, sparse=False, attrs=[t, x]) tiledb.Array.create(time_attr_array, schema1) # Write data to tiles write_info_1 = {} with tiledb.open(time_attr_array, 'w') as A: A[:] = data write_info_1 = A.last_write_info print(f"time_attr_array write information: {write_info_1}") !du -h {time_attr_array}4.0K time_attr_array/__meta 8.0K time_attr_array/__schema 364K time_attr_array/__1636412533243_1636412533243_0b48cd0635594a2489cd7bc9579298ed_10 380K time_attr_arraySpare array with time dimension Chat from tileDB Slack: 12:38 PMHello again team. I am trying to understand how to use datetime dimensions properly for a dense array``` Trying to create an array to hold 1 second of 100Hz datansecs = 1data = np.arange(100)start_dt = np.datetime64('2000-01-01')d1 = tiledb.Dim(name="time", domain=(start_dt, start_dt + np.timedelta64(n_seconds, 's')), dtype=np.datetime64(10, 'ms').dtype) how do I specify 10ms period?dom1 = tiledb.Domain(d1)x = tiledb.Attr(name="x", dtype=np.int16)schema1 = tiledb.ArraySchema(domain=dom1, sparse=False, attrs=[x])tiledb.Array.create("time_example", schema1)This seems fine but it expecting data for every ms rather than every 10mswith tiledb.open("time_example", 'w') as A: A[:] = data Raises: TileDBError: [TileDB::Writer] Error: Buffer sizes check failed; Invalid number of cells given for attribute 'x' (100 != 1001)(edited)white_check_markeyesraised_hands``` 12:46 PMjoined tiledb-embedded.dirk 1:04 PMMatt: TileDB maps the NumPy datetime resolutions, cf https://numpy.org/doc/stable/reference/arrays.datetime.html which has secs, millisecs, microsecs, ... nanosesc, ... but not every power of ten in between. You can however use a sparse array and have datetimes (at ms resolution) and then write a record every 10 ms. 7:47 PMHi @, quick follow-up on that — Dirk is right, there’s no support for multiples of integral units, but sparse works well. (you can get interval spacing over a datetime range with some fiddling in numpy, as below, but it would be easier with pandas, b/c pandas supports frequencies). Here’s a quick modification of your example to use sparse:```import tiledb, numpy as np\ Trying to create an array to hold 1 second of 100Hz datansecs = 1data = np.arange(100)start_dt = np.datetime64('2000-01-01')end_dt = start_dt + np.timedelta64(nsecs, 's')coords = np.linspace(start_dt.astype('M8[ms]').astype('int64'), end_dt.astype('M8[ms]').astype('int64'), 100).astype('M8[ms]')d1 = tiledb.Dim(name="time", domain=(start_dt, end_dt), dtype=np.datetime64(1, 'ms').dtype) how do I specify 10ms period?dom1 = tiledb.Domain(d1)x = tiledb.Attr(name="x", dtype=np.int16)schema1 = tiledb.ArraySchema(domain=dom1, sparse=True, attrs=[x]) sparse -> Truetiledb.Array.create("time_example", schema1)with tiledb.open("time_example", 'w') as A: A[coords.tolist()] = data``` Codeimport tiledb # Always remove and recreate local arrays if tiledb.array_exists(sparse_array_time_dim_array): tiledb.remove(sparse_array_time_dim_array) print(f"Removed existing {sparse_array_time_dim_array}") # Create the dimensions d1 = tiledb.Dim(name="time", domain=(start_dt, start_dt + np.timedelta64(n_seconds, 's')), tile = n_seconds*sampling_rate, dtype=np.datetime64('', 'ms').dtype) # defines resolution I think # Create a domain using the dimension dom1 = tiledb.Domain(d1) # Create an attribute t = tiledb.Attr(name="t", dtype=np.datetime64('', 'ms').dtype) # Have to specify resolution x = tiledb.Attr(name="x", dtype=np.int16) # datetime in tileDB ref: https://tiledb-inc-tiledb.readthedocs-hosted.com/en/1.6.3/tutorials/datetimes.html schema1 = tiledb.ArraySchema(domain=dom1, sparse=True, attrs=[t, x]) tiledb.Array.create(sparse_array_time_dim_array, schema1) # Write data to tiles end_dt = start_dt + np.timedelta64(n_seconds, 's') coords = np.linspace(start_dt.astype('M8[ms]').astype('int64'), end_dt.astype('M8[ms]').astype('int64'), n_seconds*sampling_rate).astype('M8[ms]') write_info_1 = {} with tiledb.open(sparse_array_time_dim_array, 'w') as A: A[coords] = data write_info_1 = A.last_write_info print(f"time_attr_array write information: {write_info_1}") !du -h {time_attr_array}Minimally Sufficient Pandas - All sources referenced from Ted Petrou Github repositoryimport pandas as pd df = pd.read_csv('https://raw.githubusercontent.com/lalitgarg12/python-exercises/master/sample_data.csv',index_col=0) df**Selection with the brackets**Placing a column name in the brackets appended to a DataFrame selects a single column of a DataFrame as a Series.df['state']**Selection with dot notation**Alternatively, you may select a single column with dot notation. Simply, place the name of the column after the dot operator. The output is the exact same as above.df.state**Issues with the dot notation**There are three issues with using dot notation. It doesn’t work in the following situations:- When there are spaces in the column name- When the column name is the same as a DataFrame method- When the column name is a variabledf.favorite foodYou can only use the brackets to select columns with spaces.df['favorite food']**The column name is the same as a DataFrame method**When a column name and a DataFrame method collide, Pandas will always reference the method and not the column name. For instance, the column namecount is a method and will be referenced when using dot notation. This actually doesn’t produce an error as Python allows you to reference methods without calling them. Let’s reference this method now.df.countRegardless, it’s clear that using dot notation did not select a single column of the DataFrame as a Series. Again, you must use the brackets when selecting a column with the same name as a DataFrame method.df['count']**Guidance: Use the brackets for selecting a column of data**The dot notation provides no additional functionality over the brackets and does not work in all situations. Therefore, I never use it. Its single advantage is three fewer keystrokes. **Performance comparison iloc vs iat vs NumPy**Let’s compare the perfomance of selecting a single cell with iloc, iat and a NumPy array. Here we create a NumPy array with 100k rows and 5 columns containing random data. We then create a DataFrame out of it and make the selections.import numpy as np a = np.random.rand(10 ** 5, 5) df1 = pd.DataFrame(a) row = 50000 col = 3 %timeit df1.iloc[row, col] %timeit df1.iat[row, col] %timeit a[row, col]The slowest run took 70.95 times longer than the fastest. This could mean that an intermediate result is being cached. 10000000 loops, best of 3: 176 ns per loop**Guidance:** Use **NumPy** arrays if your application relies on performance for selecting a single cell of data and **not at or iat**. **Method Duplication**There are multiple methods in Pandas that do the exact same thing. Whenever two methods share the same exact underlying functionality, we say that they are aliases of each other. Having duplication in a library is completely unnecessary, pollutes the namespace and forces analysts to remember one more bit of information about a library.This next section covers several instances of duplication along with other instances of methods that are very similar to one another. **read_csv vs read_table duplication**college = pd.read_csv('https://raw.githubusercontent.com/lalitgarg12/python-exercises/master/college.csv') college.head() college2 = pd.read_table('https://raw.githubusercontent.com/lalitgarg12/python-exercises/master/college.csv', delimiter=',') college.equals(college2)**Guidance: Only use read_csv to read in delimitted text files** **isna vs isnull and notna vs notnull**The isna and isnull methods both determine whether each value in the DataFrame is missing or not. The result will always be a DataFrame (or Series) of all boolean values.college_isna = college.isna() college_isnull = college.isnull() college_isna.equals(college_isnull)**Guidance: Use only isna and notna****Contagem de itens numa coleção usando o objeto *Counter* do módulo *collections*.**from collections import Counter dic = {'a':1,'b':3,'c':2,'d':3} lista = ['casa', 'melancia', 'inerente', 'pessoa', 'casa'] texto = ''.join(lista) #Converte uma lista numa string única print(texto)casamelanciainerentepessoacasa--- **Contando todos os itens***(o resultado é um dicionário ordenado pelos itens mais recorrentes ou de maior valor)*print(Counter(dic)) print(Counter(lista)) print(Counter(texto))Counter({'b': 3, 'd': 3, 'c': 2, 'a': 1}) Counter({'casa': 2, 'melancia': 1, 'inerente': 1, 'pessoa': 1}) Counter({'a': 7, 'e': 5, 's': 4, 'c': 3, 'n': 3, 'i': 2, 'm': 1, 'l': 1, 'r': 1, 't': 1, 'p': 1, 'o': 1})--- **Contando os 3 itens mais recorrentes***(o resultado é uma lista ordenada de tuplas onde cada uma contém o item e sua quantidade)*print(Counter(dic).most_common(3)) print(Counter(lista).most_common(3)) print(Counter(texto).most_common(3))[('b', 3), ('d', 3), ('c', 2)] [('casa', 2), ('melancia', 1), ('inerente', 1)] [('a', 7), ('e', 5), ('s', 4)]--- **Filtrando o segundo item mais recorrente**print(Counter(dic).most_common(2)[1][0]) print(Counter(lista).most_common(2)[1][0]) print(Counter(texto).most_common(2)[1][0]) #[1][0] pega o primeiro elemento(índice 0) da segunda tupla(índice 1)d melancia eEfficient Frontier Part 2%load_ext autoreload %autoreload 2 %matplotlib inline import edhec_risk_kit as erk ind = erk.get_ind_returns() er = erk.annualize_rets(ind['1996':'2000'], 12) cov = ind['1996':'2000'].cov() def portfolio_return(weights, returns): ''' Weights -> Returns ''' return weights.T @ returns def portfolio_vol(weights, covmat): ''' Weights -> Vol ''' return (weights.T @ covmat @ weights) ** 0.5 l = ['Food', 'Beer', 'Smoke', 'Coal'] er[l] cov.loc[l, l] import numpy as np weights = np.repeat(1/4, 4) weights erk.portfolio_return(weights, er[l]) erk.portfolio_vol(weights, cov.loc[l, l])2 Asset Frontierl = ['Games', 'Fin'] n_points = 20 weights = [np.array([w, 1-w]) for w in np.linspace(0, 1, n_points)] weights import pandas as pd rets = [erk.portfolio_return(w, er[l]) for w in weights] vols = [erk.portfolio_vol(w, cov.loc[l, l]) for w in weights] ef = pd.DataFrame({'R': rets, 'Vol': vols}) ef.plot.scatter(x='Vol', y='R') l = ['Fin', 'Beer'] erk.plot_ef2(25, er[l], cov.loc[l, l], style='.')1. 读入数据并了解自变量含义dataRaw <- read.csv('simudata.csv') head(dataRaw)所有自变量的意义如下:age 年龄cardnum 银行卡数creded 借贷比率billnum 交易笔数meanpay 所有行为均值maxpay 所有行为最大值xiaofeiF 消费类消费频率jinkaF 金卡类消费频率youxiM 游戏类消费金额平均值debitM 借记类消费金额平均值debitF 借记类消费频率gongjiaoR 公缴类最近一次消费gongjiaoF 公缴类消费频率gongjiaoM 公缴类金额平均值zhongxingR 中型银行最近一次消费zhongxingF 中型银行消费频率zhongxingM 中型银行金额平均值sidaR 四大行最近一次消费sidaF 四大行消费频率sidaM 四大行金额平均值zhuanzhangR 转账类最近一次消费zhuanzhangF 转账类消费频率zhuanzhangM 转账类金额平均值xindaiR 信贷类最近一次消费xindaiF 信贷类消费频率xindaiS 信贷类消费波动性 2. 多种机器学习模型进行建模set.seed(1234) SELECT <- sample(0:nrow(dataRaw), size=nrow(dataRaw)) dataTrain <- dataRaw[SELECT[1:(nrow(dataRaw)*0.7)],] dataTest <- dataRaw[SELECT[-(1:(nrow(dataRaw)*0.7))],] # dataTrain$black <- as.factor(dataTrain$black)- Logistic RegressionlogModel <- glm(data=dataTrain, formula=black~., family='binomial') logPred <- predict(object=logModel, newdata=dataTest, type='response') logROC <- roc(dataTest$black, logPred, plot.roc=T) ggplot(mapping=aes(x=1-logROC$specificities, y=logROC$sensitivities)) + geom_line( color='red') + geom_line( color='blue', aes(x=logROC$specificities,y=logROC$specificities)) + labs( x='FPR', y='TPR', title=paste('AUC:',logROC$auc))Setting levels: control = 0, case = 1 Setting direction: controls < cases- Decision Treelibrary(rpart) treeModel <- rpart(data=dataTrain, formula=black~., method='class') treePred <- predict(object=treeModel, newdata=dataTest)[,1] treeROC <- roc(dataTest$black, treePred) ggplot(mapping=aes(x=1-treeROC$specificities, y=treeROC$sensitivities)) + geom_line( color='red') + geom_line( color='blue', aes(x=treeROC$specificities,y=treeROC$specificities)) + labs( x='FPR', y='TPR', title=paste('AUC:',treeROC$auc))Setting levels: control = 0, case = 1 Setting direction: controls > cases- Boosting模型library(gbm) boostModel <- gbm(data=dataTrain, formula=black~.) boostPred <- predict(object=boostModel, newdata=dataTest) boostROC <- roc(as.numeric(dataTest$black), as.numeric(boostPred)) ggplot(mapping=aes(x=1-boostROC$specificities, y=boostROC$sensitivities)) + geom_line( color='red') + geom_line( color='blue', aes(x=boostROC$specificities,y=boostROC$specificities)) + labs( x='FPR', y='TPR', title=paste('AUC:',boostROC$auc))Distribution not specified, assuming bernoulli ...- Random Forestlibrary(randomForest) forestModel <- randomForest(black~., data=dataTrain, importance=T) forestPred <- predict(object=forestModel, newdata=dataTest) forestROC <- roc(dataTest$black, as.numeric(forestPred)) ggplot(mapping=aes(x=1-forestROC$specificities, y=forestROC$sensitivities)) + geom_line( color='red') + geom_line( color='blue', aes(x=forestROC$specificities,y=forestROC$specificities)) + labs( x='FPR', y='TPR', title=paste('AUC:', forestROC$auc))Warning message in randomForest.default(m, y, ...): "The response has five or fewer unique values. Are you sure you want to do regression?"Setting levels: control = 0, case = 1 Setting direction: controls < cases- SVMlibrary(e1071) svmModel <- svm(black~., data=dataTrain, importance=T) svmPred <- predict(object=svmModel, newdata=dataTest) svmROC <- roc(dataTest$black, svmPred) ggplot(mapping=aes(x=1-svmROC$specificities, y=svmROC$sensitivities)) + geom_line( color='red') + geom_line( color='blue', aes(x=svmROC$specificities,y=svmROC$specificities)) + labs( x='FPR', y='TPR', title=paste('AUC:', svmROC$auc))Warning message: "package 'e1071' was built under R version 3.6.3"Setting levels: control = 0, case = 1 Setting direction: controls < cases3. 选择一个最优的模型ROC <- data.frame(FPR=c(1-logROC$specificities, 1-boostROC$specificities, 1-treeROC$specificities, 1-forestROC$specificities, 1-svmROC$specificities), TPR=c(logROC$sensitivities, boostROC$sensitivities, treeROC$sensitivities, forestROC$sensitivities, svmROC$sensitivities), method=c(rep('Logistic Regression', length(logROC$sensitivities)), rep('Boosting', length(boostROC$sensitivities)), rep('Decision Tree', length(treeROC$sensitivities)), rep('Random Forest', length(forestROC$sensitivities)), rep('SVM', length(svmROC$sensitivities)))) ggplot(ROC, aes(x=FPR, y=TPR, group=method, color=method)) + geom_line() + geom_abline( color='black', intercept=0, slope=1) + labs(title="ROC") AUC <- data.frame(method=c('Logistic Regression', 'Boosting', 'Decision Tree', 'Random Forest', 'SVM'), auc=c(logROC$auc, boostROC$auc, treeROC$auc, forestROC$auc, svmROC$auc)) ggplot(AUC) + geom_bar(aes(x=method, y=auc, color=method, fill=method), stat='identity')宿題2import numpy as np import matplotlib.pyplot as plt import matplotlib.cm as cm from collections import defaultdict data = np.loadtxt('data/digit_test0.csv', delimiter=',') data.shape data[0] img = data[0].reshape(16,16) plt.imshow(img, cmap='gray') plt.show() digit = np.array([[1]] * 200) digit.shape np.array(data, digit) np.array([data, [[0]]*200]) test = np.array([1,2,2,3,2,4,5,4]) from collections import Counter c = Counter(test) c.most_common(1)[0][0]kNN http://blog.amedama.jp/entry/2017/03/18/140238import numpy as np from collections import Counter class kNN(object): def __init__(self, k=1): self._train_data = None self._target_data = None self._k = k def fit(self, train_data, target_data): self._train_data = train_data self._target_data = target_data def predict(self, x): distances = np.array([np.linalg.norm(p - x) for p in self._train_data]) nearest_indices = distances.argsort()[:self._k] nearest_labels = self._target_data[nearest_indices] c = Counter(nearest_labels) return c.most_common(1)[0][0] def load_train_data(): for i in range(10): if i==0: train_feature = np.loadtxt('data/digit_train{}.csv'.format(i), delimiter=',') train_label = np.array([i]*train_feature.shape[0]) else: temp_feature = np.loadtxt('data/digit_train{}.csv'.format(i), delimiter=',') train_feature = np.vstack([train_feature, temp_feature]) temp_label = np.array([i]*temp_feature.shape[0]) train_label = np.hstack([train_label, temp_label]) return train_feature, train_label def load_test_data(): for i in range(10): if i==0: test_feature = np.loadtxt('data/digit_test{}.csv'.format(i), delimiter=',') test_label = np.array([i]*test_feature.shape[0]) else: temp_feature = np.loadtxt('data/digit_test{}.csv'.format(i), delimiter=',') test_feature = np.vstack([test_feature, temp_feature]) temp_label = np.array([i]*temp_feature.shape[0]) test_label = np.hstack([test_label, temp_label]) return test_feature, test_label train_feature, train_label = load_train_data() train_feature.shape train_label.shape train_label[1900] test_feature, test_label = load_test_data() test_label.shape model = kNN() model.fit(train_feature, train_label) model._train_data.shape from sklearn.metrics import accuracy_score predicted_labels = [] for feature in test_feature: predicted_label = model.predict(feature) predicted_labels.append(predicted_label) accuracy_score(test_label, predicted_labels) len(predicted_labels) accuracy_score(test_label, predicted_labels) def calc_accuracy(train_feature, train_label, test_feature, test_label): model = kNN() model.fit(train_feature, train_label) predicted_labels = [] for feature in test_feature: predicted_label = model.predict(feature) predicted_labels.append(predicted_label) return accuracy_score(test_label, predicted_labels) calc_accuracy(train_feature, train_label, test_feature, test_label)高速化import numpy as np from collections import Counter def load_train_data(): for i in range(10): if i==0: train_feature = np.loadtxt('data/digit_train{}.csv'.format(i), delimiter=',') train_label = np.array([i]*train_feature.shape[0]) else: temp_feature = np.loadtxt('data/digit_train{}.csv'.format(i), delimiter=',') train_feature = np.vstack([train_feature, temp_feature]) temp_label = np.array([i]*temp_feature.shape[0]) train_label = np.hstack([train_label, temp_label]) return train_feature, train_label def load_test_data(): for i in range(10): if i==0: test_feature = np.loadtxt('data/digit_test{}.csv'.format(i), delimiter=',') test_label = np.array([i]*test_feature.shape[0]) else: temp_feature = np.loadtxt('data/digit_test{}.csv'.format(i), delimiter=',') test_feature = np.vstack([test_feature, temp_feature]) temp_label = np.array([i]*temp_feature.shape[0]) test_label = np.hstack([test_label, temp_label]) return test_feature, test_label train_feature, train_label = load_train_data() test_feature, test_label = load_test_data() import numpy as np from collections import Counter class kNN(object): def __init__(self, k=1): self._train_data = None self._target_data = None self._k = k def fit(self, train_data, target_data): self._train_data = train_data self._target_data = target_data def predict(self, x): distances = np.array([np.linalg.norm(p - x) for p in self._train_data]) nearest_indices = distances.argsort()[:self._k] nearest_labels = self._target_data[nearest_indices] c = Counter(nearest_labels) return c.most_common(1)[0][0] predicted_labels test_feature.shape predicted_labelここまでのまとめimport numpy as np from collections import Counter from sklearn.metrics import accuracy_score class kNN(object): def __init__(self, k=1): self._train_data = None self._target_data = None self._k = k def fit(self, train_data, target_data): self._train_data = train_data self._target_data = target_data def predict(self, x): distances = np.array([np.linalg.norm(p - x) for p in self._train_data]) nearest_indices = distances.argsort()[:self._k] nearest_labels = self._target_data[nearest_indices] c = Counter(nearest_labels) return c.most_common(1)[0][0] def load_train_data(): for i in range(10): if i==0: train_feature = np.loadtxt('data/digit_train{}.csv'.format(i), delimiter=',') train_label = np.array([i]*train_feature.shape[0]) else: temp_feature = np.loadtxt('data/digit_train{}.csv'.format(i), delimiter=',') train_feature = np.vstack([train_feature, temp_feature]) temp_label = np.array([i]*temp_feature.shape[0]) train_label = np.hstack([train_label, temp_label]) return train_feature, train_label def load_test_data(): for i in range(10): if i==0: test_feature = np.loadtxt('data/digit_test{}.csv'.format(i), delimiter=',') test_label = np.array([i]*test_feature.shape[0]) else: temp_feature = np.loadtxt('data/digit_test{}.csv'.format(i), delimiter=',') test_feature = np.vstack([test_feature, temp_feature]) temp_label = np.array([i]*temp_feature.shape[0]) test_label = np.hstack([test_label, temp_label]) return test_feature, test_label def calc_accuracy(train_feature, train_label, test_feature, test_label, k=1): model = kNN(k) model.fit(train_feature, train_label) predicted_labels = [] for feature in test_feature: predicted_label = model.predict(feature) predicted_labels.append(predicted_label) return accuracy_score(test_label, predicted_labels) train_feature, train_label = load_train_data() test_feature, test_label = load_test_data() calc_accuracy(train_feature, train_label, test_feature, test_label, k=1) calc_accuracy(train_feature, train_label, test_feature, test_label, k=5)交差検証n_split = 5 def load_train_data_cv(n_split): for i in range(10): if i==0: train_feature = np.loadtxt('data/digit_train{}.csv'.format(i), delimiter=',') train_label = np.array([i]*train_feature.shape[0]) group_feature = np.split(train_feature, n_split) group_label = np.split(train_label, n_split) else: temp_feature = np.loadtxt('data/digit_train{}.csv'.format(i), delimiter=',') temp_group_feature = np.split(temp_feature, n_split) temp_label = np.array([i]*temp_feature.shape[0]) temp_group_label = np.split(temp_label, n_split) for m in range(n_split): group_feature[m] = np.vstack([group_feature[m], temp_group_feature[m]]) group_label[m] = np.hstack([group_label[m], temp_group_label[m]]) return group_feature, group_label group_feature, group_label = load_train_data_cv(5) len(group_feature) group_feature[0].shape group_label[0].shape group_label[0][999] group_feature.pop(2) temp = np.vstack(group_feature) temp.shape`pop`はよくなさそうtemp = group_feature.copy() temp.pop(2) temp1 = np.vstack(temp) print(temp1.shape) print(len(group_feature)) def cross_validation(n_split=5, params=[1,2,3,4,5,10,20]): n_params = len(params) score_list = np.zeros(n_params) group_feature, group_label = load_train_data_cv(n_split) for j in range(n_params): for i in range(n_split): temp_group_feature = group_feature.copy() temp_test_feature = temp_group_feature.pop(i) temp_train_feature = np.vstack(temp_group_feature) temp_group_label = group_label.copy() temp_test_label = temp_group_label.pop(i) temp_train_label = np.hstack(temp_group_label) score_list[j] += calc_accuracy(temp_train_feature, temp_train_label, temp_test_feature, temp_test_label, k=params[j]) opt_param = params[np.argmax(score_list)] print(score_list) return opt_param cross_validation(n_split=5, params=[1,3,5]) test = np.array([1,2,3,4,5]) np.split(test, 5) test = [1,2,3,4] test.pop(2) test test = [4.838, 4.837, 4.825] for i in test: print(i/5)0.9676 0.9673999999999999 0.9650000000000001まとめimport numpy as np from collections import Counter from sklearn.metrics import accuracy_score class kNN(object): def __init__(self, k=1): self._train_data = None self._target_data = None self._k = k def fit(self, train_data, target_data): self._train_data = train_data self._target_data = target_data def predict(self, x): distances = np.array([np.linalg.norm(p - x) for p in self._train_data]) nearest_indices = distances.argsort()[:self._k] nearest_labels = self._target_data[nearest_indices] c = Counter(nearest_labels) return c.most_common(1)[0][0] def load_train_data(): for i in range(10): if i==0: train_feature = np.loadtxt('data/digit_train{}.csv'.format(i), delimiter=',') train_label = np.array([i]*train_feature.shape[0]) else: temp_feature = np.loadtxt('data/digit_train{}.csv'.format(i), delimiter=',') train_feature = np.vstack([train_feature, temp_feature]) temp_label = np.array([i]*temp_feature.shape[0]) train_label = np.hstack([train_label, temp_label]) return train_feature, train_label def load_test_data(): for i in range(10): if i==0: test_feature = np.loadtxt('data/digit_test{}.csv'.format(i), delimiter=',') test_label = np.array([i]*test_feature.shape[0]) else: temp_feature = np.loadtxt('data/digit_test{}.csv'.format(i), delimiter=',') test_feature = np.vstack([test_feature, temp_feature]) temp_label = np.array([i]*temp_feature.shape[0]) test_label = np.hstack([test_label, temp_label]) return test_feature, test_label def calc_accuracy(train_feature, train_label, test_feature, test_label, k=1): model = kNN(k) model.fit(train_feature, train_label) predicted_labels = [] for feature in test_feature: predicted_label = model.predict(feature) predicted_labels.append(predicted_label) return accuracy_score(test_label, predicted_labels) def load_train_data_cv(n_split=5): for i in range(10): if i==0: train_feature = np.loadtxt('data/digit_train{}.csv'.format(i), delimiter=',') train_label = np.array([i]*train_feature.shape[0]) group_feature = np.split(train_feature, n_split) group_label = np.split(train_label, n_split) else: temp_feature = np.loadtxt('data/digit_train{}.csv'.format(i), delimiter=',') temp_group_feature = np.split(temp_feature, n_split) temp_label = np.array([i]*temp_feature.shape[0]) temp_group_label = np.split(temp_label, n_split) for m in range(n_split): group_feature[m] = np.vstack([group_feature[m], temp_group_feature[m]]) group_label[m] = np.hstack([group_label[m], temp_group_label[m]]) return group_feature, group_label def cross_validation(n_split=5, params=[1,2,3,4,5,10,20]): n_params = len(params) score_list = np.zeros(n_params) group_feature, group_label = load_train_data_cv(n_split) for j in range(n_params): for i in range(n_split): temp_group_feature = group_feature.copy() temp_test_feature = temp_group_feature.pop(i) temp_train_feature = np.vstack(temp_group_feature) temp_group_label = group_label.copy() temp_test_label = temp_group_label.pop(i) temp_train_label = np.hstack(temp_group_label) score_list[j] += calc_accuracy(temp_train_feature, temp_train_label, temp_test_feature, temp_test_label, k=params[j])/n_split opt_param = params[np.argmax(score_list)] print(score_list) return opt_param def main(): k_opt = cross_validation(n_split=5, params=[1,2,3,4,5,10,20]) train_feature, train_label = load_train_data() test_feature, test_label = load_test_data() score = calc_accuracy(train_feature, train_label, test_feature, test_label, k=k_opt) print(score)Datamining Assignment 1 - Exercise 5In General: We want to see if conspiracy theories are on the rise during the covid era, if their rise is connected to the number of cases/victims and each countrys' GDP.import pandas as pd import seaborn as sns import numpy as np import matplotlib.pyplot as plt from scipy import stats from pytrends.request import TrendReq from pycountry_convert import country_alpha3_to_country_alpha2 as pcLoad owid covid data and keep only the global data as first we will examine the rise of conspiracies on a global level.dfCovid = pd.read_csv('owid-covid-data.csv') dfCopy = dfCovid dfCovid = dfCovid.loc[dfCovid['location'] == "World"] dfCovid['death_rate'] = dfCovid['total_deaths']/dfCovid['total_cases'] dfCovid = dfCovid[['date','total_cases','new_cases','total_deaths','new_deaths','total_cases_per_million', 'total_deaths_per_million','death_rate']] dfCovidFirst we will examine some well known conspiracy theories, see their popularity and examine if it has risen during the covid period with data extracted from Google Trends.Please note that these people may not be involved in any way, but are just mentioned as they are well known in the conspiracy-theory cycles. We make len(kw_list) different queries because we don't want Google Trends to normalize the data similarly to the largest search volume, as we only want to see the increase/decrease of the popularity of each keyword.# Create a Google Trend Object # Data is normalized to the search with the largest search volume totalTrend = TrendReq(hl='en-US', tz=360) # Declare a var to store the search term #### build the playload #freemasons kw_list = [["soros"],["new world order"],["5G dangers"],["population control"],["microchip vaccine"]] _cat = 0 _geo = '' _gprop = '' # Build payload request to get data from Google trends _timeframe = '2015-11-27 2020-11-27' totalTrends = [] for i in kw_list: totalTrend = TrendReq(hl='en-US', tz=360) totalTrend.build_payload(i, cat=_cat, timeframe=_timeframe, geo=_geo, gprop=_gprop) # Get interest over time # Capture Monthly Data for use in Normalization against Weekly totalTrends.append(totalTrend.interest_over_time()) f, axes = plt.subplots(1, len(kw_list)) for i in range(len(kw_list)): totalTrends[i].plot(title='Google Trends Monthly Data Points', figsize=(30,10),ax = axes[i]) f.show()On average the pandemic lockdowns started around 20 March 2020 (Q1 2020 in the graphs).Source: https://en.wikipedia.org/wiki/COVID-19_pandemic_lockdowns Let's say that covid period lasts 1 year. We want to find the yearly mean popularity of each conspirancy theory and check if it has risen during the pandemic.#date is end of month monthlyTrends = [] for trends in totalTrends: monthlyTrends.append(trends.groupby(pd.Grouper(freq='1M')).mean())Prepare data and calculate the mean yearly (12 months) values of each Google Trends query.trendsYearly = [] for k in range(len(monthlyTrends)): monthlyTrends[k].iloc[1] monthlyValues = [] for i in range(len(monthlyTrends[k])): monthlyValues.append(np.array(list(monthlyTrends[k].iloc[i]))) yearlyAvg = [] for i in range(5): year = np.full((1, 2), 0.0) for j in range(12): year += monthlyValues[i*12+j] year = year/12 yearlyAvg.append(year) monthlyValues = [] for i in range(5): monthlyValues.append(list(yearlyAvg[i][0])[0]) trendsYearly.append(pd.DataFrame(monthlyValues,columns=[monthlyTrends[k].columns[0]], index=['2015-2016','2016-2017','2017-2018','2018-2019','2019-2020'])) #plot the yearly means f, axes = plt.subplots(1, len(kw_list)) f.set_figheight(5) f.set_figwidth(30) for i in range(len(trendsYearly)): sns.scatterplot(x=list(trendsYearly[i].index),y=trendsYearly[i].columns[0], data = trendsYearly[i],ax = axes[i])Using the yearly mean values we can see that there is a rise in popularity of during the years 2019-2020.From the monthly plots we can see (not that clearly, will change later if time allows it) that the maximum rise is during the beginning of the 1st global lockdown. At least visually we can assume that there is indeed a rise of popularity of conspiracy theories during the pandemic.All conspiracy theories existed before the beginning of the Covid-19 pandemic. Remove extra info and make both dataframes contain the same date.dfCovidNewCases = pd.DataFrame(list(dfCovid['new_cases']),columns=['new_cases'], index=dfCovid['date']) dfCovidNewCases.index = pd.to_datetime(dfCovidNewCases.index) dfCovidNewCases = dfCovidNewCases.groupby(pd.Grouper(freq='1W')).mean() dfCovidNewCases.head() for i in range(len(totalTrends)): mask = (totalTrends[i].index >= '2020-01-05') totalTrends[i] = totalTrends[i].loc[mask] del totalTrends[i]['isPartial'] dfCovidNewCases.drop(dfCovidNewCases.tail(1).index,inplace=True) # drop last n rowsCalculate correlation and p-valuefor i in range(len(totalTrends)): tut = stats.pearsonr(dfCovidNewCases['new_cases'], totalTrends[i][totalTrends[i].columns[0]]) print(totalTrends[i].columns[0]) print(" " + str(tut)) print("-"*60)soros (0.0769887794621878, 0.6070000681198973) ------------------------------------------------------------ new world order (-0.18704570757780425, 0.20805326872306343) ------------------------------------------------------------ 5G dangers (-0.20770867700460552, 0.16122390578381413) ------------------------------------------------------------ population control (-0.41024491264114415, 0.004183054443088195) ------------------------------------------------------------ microchip vaccine (0.08246611035785484, 0.581581940659679) ------------------------------------------------------------As we can see, while we know that there is a rise of popularity during the pandemic, it isn't really correlated with the rate of weekly covid cases. Let's examine the correlation up to the middle of 1st lockdown (average date), 2020-03-30, and see if anything changes.for i in range(len(totalTrends)): mask = (totalTrends[i].index <= '2020-03-30') totalTrends[i] = totalTrends[i].loc[mask] mask = (dfCovidNewCases.index <= '2020-03-30') dfCovidNewCases = dfCovidNewCases.loc[mask] for i in range(len(totalTrends)): tut = stats.pearsonr(dfCovidNewCases['new_cases'], totalTrends[i][totalTrends[i].columns[0]]) print(totalTrends[i].columns[0]) print(" " + str(tut)) print("-"*60)soros (0.5358415042740347, 0.059099270215444186) ------------------------------------------------------------ new world order (0.7170649405516993, 0.0058039617752067806) ------------------------------------------------------------ 5G dangers (0.9283298701290048, 4.691560724114713e-06) ------------------------------------------------------------ population control (0.44181332469163104, 0.13065462754590604) ------------------------------------------------------------ microchip vaccine (0.75743806749597, 0.00271061747015502) ------------------------------------------------------------Up until the first lockdown (popularity rise spike) we can see that 3/5 are of statistical importance, while all (5/5) of our queries are, some more some less, correlated. We believe that this happens because people were stressed due to the first-seen (then) pandemic,self-isolation,war-like situation and sought a way to distract themselves and escape from reality. We also believe that this spike only happens one time because as time passed, people got used of the overall situation and only the true conspirancy theorists remained. Now, we will try to get the mean popularity during the pandemic in some countries and see how it correlates with the country GDP. Convert to alpha2 iso codes for use in pytrends.dfCopy = dfCopy[['date','iso_code','location','new_cases','gdp_per_capita']] iso_codes_alpha2 = pd.unique(dfCopy['iso_code']) #iso_codes_alpha3 iso_codes_alpha2 = iso_codes_alpha2[iso_codes_alpha2!='OWID_WRL'] iso_codes_alpha2 = iso_codes_alpha2[iso_codes_alpha2!='OWID_KOS'] iso_codes_alpha2 = iso_codes_alpha2[:-1] for i in range(len(iso_codes_alpha2)): iso_codes_alpha2[i] = pc(iso_codes_alpha2[i])Pulling the trends for each country is SLOW! We will choose 9 bigger countries as some smaller countries simply don't have the search volume (and time!) required to display data. This can be modified to run for every country.iso_chosen_countries = ['NG','IN','ID','MY','TR','BR','JP','FR','DE'] names_chosen_countries = ['Nigeria','India','Indonesia','Malaysia','Turkey','Brazil','Japan','France','Germany'] countryTrends = [] for iso in iso_chosen_countries: # Create a Google Trend Object # Data is normalized to the search with the largest search volume totalTrend = TrendReq(hl='en-US', tz=360) # Declare a var to store the search term kw_list = [["soros"],["new world order"],["5G dangers"],["population control"],["microchip vaccine"]] _cat = 0 _geo = iso _gprop = '' # Build payload request to get data from Google trends _timeframe = '2019-11-27 2020-11-27' totalTrends = [] for i in kw_list: totalTrend = TrendReq(hl='en-US', tz=360) totalTrend.build_payload(i, cat=_cat, timeframe=_timeframe, geo=_geo, gprop=_gprop) # Get interest over time # Capture Monthly Data for use in Normalization against Weekly totalTrends.append(totalTrend.interest_over_time()) countryTrends.append(totalTrends) for i in range(len(countryTrends)): for j in range(len(countryTrends[i])): countryTrends[i][j] = countryTrends[i][j].mean() gdp_chosen_countries = [] for name in names_chosen_countries: gdppc = list(dfCopy.loc[dfCopy['location'] == name]['gdp_per_capita'])[0] gdp_chosen_countries.append(gdppc) for i in range(len(countryTrends)): tempTrend = [] for j in range(len(countryTrends[i])): t = list(countryTrends[i][j]) if(len(t)>1): tempTrend.append(t[0]) else: tempTrend.append(np.nan) countryTrends[i] = tempTrendNot all countries have data for every search term.conspiracyAllCountries = [[] for j in range(len(kw_list))] for ct in countryTrends: for i in range(len(ct)): conspiracyAllCountries[i].append(ct[i]) for i in range(len(conspiracyAllCountries)): mask1 = np.ma.array(conspiracyAllCountries[i], mask=np.isnan(conspiracyAllCountries[i])) # Use a mask to mark the NaNs mask2 = np.ma.array(gdp_chosen_countries, mask=np.isnan(conspiracyAllCountries[i])) # Use a mask to mark the NaNs mask1 = mask1[~mask1.mask] mask2 = mask2[~mask2.mask] if(len(mask1) > 2): tut = stats.pearsonr(mask1, mask2) print(kw_list[i]) print(" " + str(tut)) print("-"*60)['soros'] (0.17006834886499078, 0.6617819245722316) ------------------------------------------------------------ ['new world order'] (0.24729315622540465, 0.5211906819188104) ------------------------------------------------------------ ['5G dangers'] (0.3885048925177519, 0.5180820109250023) ------------------------------------------------------------ ['population control'] (-0.2712985921486275, 0.48010152314225546) ------------------------------------------------------------Estimating overhead expenses in VASP calculationsThis tutorial describes the issue with overhead expenses in VASP calculations%rm -rf sandbox/overhead %mkdir -p sandbox/overhead %env VASP_COMMANDBase example of ASE + VASP relaxationCreate a normal, single point `ase.calculators.vasp.Vasp` calculatorfrom ase.build import molecule from ase.optimize import BFGS from ase.calculators.vasp import Vasp atoms_base = molecule("CH4", pbc=True, vacuum=5) atoms_base.rattle(0.1) vasp_params = dict(xc="pbe", kpts=1, encut=350, nsw=1, ibrion=-1) calc = Vasp(directory="sandbox/overhead", **vasp_params)Following codes are the standard way to run the relaxation by BFGS optimizer%%capture out atoms = atoms_base.copy() # Avoid overwrite atoms.calc = calc dyn = BFGS(atoms) dyn.run(fmax=0.1) print(out.stdout)Step Time Energy fmax BFGS: 0 02:56:48 -22.827328 12.2022 BFGS: 1 02:56:59 -23.573351 4.3421 BFGS: 2 02:57:09 -23.814358 2.3427 BFGS: 3 02:57:20 -23.907204 1.5573 BFGS: 4 02:57:29 -23.941756 0.6833 BFGS: 5 02:57:38 -23.971885 0.4278 BFGS: 6 02:57:48 -23.991150 0.3614 BFGS: 7 02:57:56 -23.989364 0.2442 BFGS: 8 02:58:05 -23.990127 0.0904As can be seen each step takes about 10 sec. How much time are used for each ionic loop and overhead?Check the ionic loop time and real wall-time of last iteration%%bash grep "LOOP+" sandbox/overhead/OUTCAR grep "Total CPU time" sandbox/overhead/OUTCAR grep "Elapsed time" sandbox/overhead/OUTCARLOOP+: cpu time 2.3971: real time 2.4488 Total CPU time used (sec): 4.953 Elapsed time (sec): 7.924Examine overhead time in different modesNow let's test the ionic loop time and overall walltime, with modified procedure to the relaxationFollowing functions parses the ionic LOOP+ and CPU time and system time profilesimport re import numpy as np def parse_outcar_time(outcar_file): """Parse the outcar file for LOOP+, total CPU time and wall time""" pattern1 = r"LOOP\+\:\s+cpu\s+time\s+([\d\.]+)\:\s+real\s+time\s+([\d\.]+)$" pattern2 = r"Total\s+CPU\s+time\s+used\s+\(sec\)\:\s+([\d\.]+)$" pattern3 = r"Elapsed\s+time\s+\(sec\)\:\s+([\d\.]+)$" outcar_str = open(outcar_file, "r").read() time_loops = [float(m[1]) for m in re.findall(pattern1, outcar_str, re.MULTILINE)] time_cpu = [float(m) for m in re.findall(pattern2, outcar_str, re.MULTILINE)] time_elaps = [float(m) for m in re.findall(pattern3, outcar_str, re.MULTILINE)] return time_loops, time_cpu, time_elaps def ase_vasp_time_profile(atoms, vasp_params, fmax=0.1): """Run ase+vasp relaxation and get time profile for each single step""" calc = Vasp(directory="sandbox/overhead", **vasp_params) atoms.calc = calc dyn = BFGS(atoms) time_loops = [] time_cpu = [] time_elaps = [] while True: # Force run 1 step calculation f = np.abs(atoms.get_forces()).max() print(f) time_profile = parse_outcar_time("sandbox/overhead/OUTCAR") time_loops += time_profile[0] time_cpu += time_profile[1] time_elaps += time_profile[2] if f < fmax: break dyn.step() return time_loops, time_cpu, time_elaps**Case 1**: single point vasp calculator use random trial wavefunction and do not write WAVECAR%rm -rf sandbox/overhead/* %%capture out # CASE 1 # remove wavefunction writing new_vasp_params = vasp_params.copy() new_vasp_params.update(lwave=False, istart=0) time_prof_1 = ase_vasp_time_profile(atoms_base.copy(), new_vasp_params, fmax=0.1)libibverbs: Warning: couldn't open config directory '/usr/etc/libibverbs.d'. libibverbs: Warning: no userspace device-specific driver found for /sys/class/infiniband_verbs/uverbs0 -------------------------------------------------------------------------- Sorry! You were supposed to get help about: dlopen failed But I couldn't open the help file: /proj/nv/libraries/Linux_x86_64/openmpi/2020/196907-rel/share/openmpi/help-mpi-common-cuda.txt: No such file or directory. Sorry! -------------------------------------------------------------------------- -------------------------------------------------------------------------- Sorry! You were supposed to get help about: ini file:file not found But I couldn't open the help file: /proj/nv/libraries/Linux_x86_64/openmpi/2020/196907-rel/share/openmpi/help-mpi-btl-openib.txt: No such file or directory. Sorry! -------------------------------------------------------------------------- ---------------------------------------------[...]**Case 2**: each single point vasp calculator reads old wavefunction and writes WAVECAR%rm -rf sandbox/overhead/* %%capture out # CASE 2 # force adding the lwave=1 tag to save wavecar new_vasp_params = vasp_params.copy() new_vasp_params.update(lwave=True, istart=1) time_prof_2 = ase_vasp_time_profile(atoms_base.copy(), new_vasp_params, fmax=0.1)libibverbs: Warning: couldn't open config directory '/usr/etc/libibverbs.d'. libibverbs: Warning: no userspace device-specific driver found for /sys/class/infiniband_verbs/uverbs0 -------------------------------------------------------------------------- Sorry! You were supposed to get help about: dlopen failed But I couldn't open the help file: /proj/nv/libraries/Linux_x86_64/openmpi/2020/196907-rel/share/openmpi/help-mpi-common-cuda.txt: No such file or directory. Sorry! -------------------------------------------------------------------------- -------------------------------------------------------------------------- Sorry! You were supposed to get help about: ini file:file not found But I couldn't open the help file: /proj/nv/libraries/Linux_x86_64/openmpi/2020/196907-rel/share/openmpi/help-mpi-btl-openib.txt: No such file or directory. Sorry! -------------------------------------------------------------------------- ---------------------------------------------[...]**Case 3**: use internal vasp optimizer (CG)%rm -rf sandbox/overhead/* %%capture out # CASE 3 new_vasp_params = vasp_params.copy() new_vasp_params.update(ibrion=2, nsw=10, ediffg=-0.1) atoms = atoms_base.copy() atoms.calc = Vasp(directory="sandbox/overhead", **new_vasp_params) atoms.get_potential_energy() time_prof_3 = parse_outcar_time("sandbox/overhead/OUTCAR")libibverbs: Warning: couldn't open config directory '/usr/etc/libibverbs.d'. libibverbs: Warning: no userspace device-specific driver found for /sys/class/infiniband_verbs/uverbs0 -------------------------------------------------------------------------- Sorry! You were supposed to get help about: dlopen failed But I couldn't open the help file: /proj/nv/libraries/Linux_x86_64/openmpi/2020/196907-rel/share/openmpi/help-mpi-common-cuda.txt: No such file or directory. Sorry! -------------------------------------------------------------------------- -------------------------------------------------------------------------- Sorry! You were supposed to get help about: ini file:file not found But I couldn't open the help file: /proj/nv/libraries/Linux_x86_64/openmpi/2020/196907-rel/share/openmpi/help-mpi-btl-openib.txt: No such file or directory. Sorry! -------------------------------------------------------------------------- ---------------------------------------------[...]Let's now plot the time usage of different modes%matplotlib inline import matplotlib.pyplot as plt import numpy as np plt.cla() plt.figure(figsize=(18, 6)) # Case 1 plt.subplot(131) x = range(len(time_prof_1[0]) + 1) y1 = np.array(time_prof_1[0]) y2 = np.array(time_prof_1[1]) y3 = np.array(time_prof_1[2]) plt.stairs(y1, x, fill=True, zorder=-1) plt.stairs(y2, x, fill=True, zorder=-2) plt.stairs(y3, x, fill=True, zorder=-3) plt.ylim([0, 12]) plt.xlabel("Ionic steps") plt.ylabel("Time (s)") plt.title("Case 1: no WAVECAR, ASE+VASP") # Case 2 plt.subplot(132) x = range(len(time_prof_2[0]) + 1) y1 = np.array(time_prof_2[0]) y2 = np.array(time_prof_2[1]) y3 = np.array(time_prof_2[2]) plt.stairs(y1, x, fill=True, zorder=-1) plt.stairs(y2, x, fill=True, zorder=-2) plt.stairs(y3, x, fill=True, zorder=-3) plt.ylim([0, 12]) plt.xlabel("Ionic steps") plt.title("Case 2: r/w WAVECAR, ASE+VASP") # Case 3 plt.subplot(133) x = range(len(time_prof_3[0]) + 1) y1 = np.array(time_prof_3[0]) y2 = y1.copy() y2[-1] += time_prof_3[1][0] - np.sum(y1) y3 = y1.copy() y3[-1] += time_prof_3[2][0] - np.sum(y1) plt.stairs(y1, x, fill=True, zorder=-1, label="Ionic loop") plt.stairs(y2, x, fill=True, zorder=-2, label="CPU overhead") plt.stairs(y3, x, fill=True, zorder=-3, label="System turnaround") plt.ylim([0, 12]) plt.xlabel("Ionic steps") plt.title("Case 3: VASP Internal Routines") plt.legend() plt.tight_layout() plt.savefig("ex00-time-profile.png", dpi=600)Testing performance of VaspInteractiveRunning the previous codes using VaspInteractive and check overhead%%capture out # CASE 4 -- VaspInteractive from vasp_interactive import VaspInteractive new_vasp_params = vasp_params.copy() new_vasp_params.update(nsw=10) atoms = atoms_base.copy() calc = VaspInteractive(directory="sandbox/overhead/vpi", **new_vasp_params) with calc: atoms.calc = calc dyn = BFGS(atoms) dyn.run(fmax=0.1) time_prof_4 = parse_outcar_time("sandbox/overhead/vpi/OUTCAR")What are inside the INCAR file?%cat sandbox/overhead/vpi/INCARINCAR created by Atomic Simulation Environment ENCUT = 350.000000 POTIM = 0.000000 GGA = PE IBRION = -1 IWAVPR = 11 NSW = 10 INTERACTIVE = .TRUE.Plot against other cases, a bit messy codeimport matplotlib.pyplot as plt import numpy as np plt.cla() plt.figure(figsize=(18, 4)) # Case 1 plt.subplot(141) x = range(len(time_prof_1[0]) + 1) y1 = np.array(time_prof_1[0]) y2 = np.array(time_prof_1[1]) y3 = np.array(time_prof_1[2]) plt.stairs(y1, x, fill=True, zorder=-1) plt.stairs(y2, x, fill=True, zorder=-2) plt.stairs(y3, x, fill=True, zorder=-3) plt.ylim([0, 12]) plt.xlabel("Ionic steps") plt.ylabel("Time (s)") plt.title("Case 1: no WAVECAR, ASE+VASP") # Case 2 plt.subplot(142) x = range(len(time_prof_2[0]) + 1) y1 = np.array(time_prof_2[0]) y2 = np.array(time_prof_2[1]) y3 = np.array(time_prof_2[2]) plt.stairs(y1, x, fill=True, zorder=-1) plt.stairs(y2, x, fill=True, zorder=-2) plt.stairs(y3, x, fill=True, zorder=-3) plt.ylim([0, 12]) plt.xlabel("Ionic steps") plt.title("Case 2: r/w WAVECAR, ASE+VASP") # Case 3 plt.subplot(143) x = range(len(time_prof_3[0]) + 1) y1 = np.array(time_prof_3[0]) y2 = y1.copy() y2[-1] += time_prof_3[1][0] - np.sum(y1) y3 = y1.copy() y3[-1] += time_prof_3[2][0] - np.sum(y1) plt.stairs(y1, x, fill=True, zorder=-1, label="Ionic loop") plt.stairs(y2, x, fill=True, zorder=-2, label="CPU overhead") plt.stairs(y3, x, fill=True, zorder=-3, label="System turnaround") plt.ylim([0, 12]) plt.xlabel("Ionic steps") plt.title("Case 3: VASP Internal Routines") # plt.legend() # Case 4 plt.subplot(144) x = range(len(time_prof_4[0]) + 1) y1 = np.array(time_prof_4[0]) y2 = y1.copy() y2[-1] += time_prof_4[1][0] - np.sum(y1) y3 = y1.copy() y3[-1] += time_prof_4[2][0] - np.sum(y1) plt.stairs(y1, x, fill=True, zorder=-1, label="Ionic loop") plt.stairs(y2, x, fill=True, zorder=-2, label="CPU overhead") plt.stairs(y3, x, fill=True, zorder=-3, label="System turnaround") plt.ylim([0, 12]) plt.xlabel("Ionic steps") plt.title("Case 4: VASP Interactive") plt.legend() plt.tight_layout() plt.savefig("ex00-time-profile-with-vpi.png", dpi=600)[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1iQH3RvK76elgCiJrnKAnNftyk41pJPFy) Прогноз цены биткойнa на момент закрытия дневных торгов 1. Постановка задачи Об исследовании:- **Объектами исследования:** - является Значения биткойнов в долларах США с 01.01.2017 года по 15.11.2019 года.- **Предметом исследования:** - является набор данных "Bitcoin Historical USD Price".- **Целью работы:** - является работа с временным рядом и построение модели нейронной сети для его прогноза. Сферы применения и актуальность работы- Финансовый сектор- Показания датчиков- Объемы продаж/производства- Телеметрия it-систем- и т.д. В работе будут рассмотрены следующие задачи - Исследование данных; - Построение временного ряд; - Построение графика Автокорреляции; - Создание набора данных для обучения; - Создание нейронной сети; - Разделение данных; - Масштабирование данных; - Обучение и проверка модели; - Прогнозирование на тестовом наборе данных; - Оценка качества модели на тестовой выборке; - Построение графика прогнозa. Целевые метрики- $MSE_{(mean\ squared\ error)}$ - $MSE(y,\hat{y})=\frac{1}{n_{samples}}\sum_{i=0}^{n_{samples}-1}(y_{i}-\hat{y}_{i})^2$- $R^2_{(coefficient\ of\ determination)}$ - $R^2(y, \hat{y}) = 1 - \frac{\sum_{i=1}^{n} (y_i - \hat{y}_i)^2}{\sum_{i=1}^{n} (y_i - \bar{y})^2},\ where\ \bar{y} = \frac{1}{n} \sum_{i=1}^{n} y_i\ and\ \sum_{i=1}^{n} (y_i - \hat{y}_i)^2 = \sum_{i=1}^{n} \epsilon_i^2$ 2. Анализ Подключаем библиотекиimport os import zipfile import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings # Убираем отображения сообщений "Предупреждений" warnings.simplefilter('ignore') %matplotlib inline %config InlineBackend.figure_format = 'retina' # Добавим более четкое отображение графиковЗагружаем набор данныхdef download_file(): pwd = !pwd !wget 'https://www.dropbox.com/s/h1ey1w0i4zw4tr0/bitcoin-usd-stock-prices.zip' # Скачиваем файл with zipfile.ZipFile(pwd[0] + '/bitcoin-usd-stock-prices.zip') as extract_zip: extract_zip.extractall(pwd[0]) extract_zip.close() # Разархивируем zip архив find = !find * -iname '*.zip' -print -or -iname '*.csv' -print -or -iname 'sample_data' -prune print(pwd) # Выводим текущую директорию print(find) # Выводим файлы ['*.zip', '*.csv'] в текущей директории download_file()--2020-03-08 09:30:32-- https://www.dropbox.com/s/h1ey1w0i4zw4tr0/bitcoin-usd-stock-prices.zip Resolving www.dropbox.com (www.dropbox.com)... 192.168.3.11, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:5201 Connecting to www.dropbox.com (www.dropbox.com)|192.168.3.11|:443... connected. HTTP request sent, awaiting response... 301 Moved Permanently Location: /s/raw/h1ey1w0i4zw4tr0/bitcoin-usd-stock-prices.zip [following] --2020-03-08 09:30:32-- https://www.dropbox.com/s/raw/h1ey1w0i4zw4tr0/bitcoin-usd-stock-prices.zip Reusing existing connection to www.dropbox.com:443. HTTP request sent, awaiting response... 302 Found Location: https://uc9b4abf210d96867a89eaf9040f.dl.dropboxusercontent.com/cd/0/inline/Azh_nSw3_Bm70w8hJvLv03SYOWzzos4uZQWhWJplaVmnIpjhcluKKG1qGpJ3qScQ-0F6IPYcj5aBzpprz3JQjAV7KHqWOQzgtgtXK-teVhomOWVHl8kGVQyT7dbl85u4lus/file# [following] --2020-03-08 09:30:32-- https://uc9b4abf210d96867a89eaf9040f.dl.dropboxusercontent.com/cd/0/inline/Azh_nSw3_Bm70w8hJvLv03SYOWzzos4uZQWhWJplaVmnIpjhcluKKG1qGpJ3qSc[...]О наборе данныхЗначения биткойнов в долларах США с 1 января 2017 года по 15 ноября 2019 года, загруженные с сайта [Yahoo Finance](https://finance.yahoo.com/) с однодневным разрешением. Посмотрим на данныеdf = pd.read_csv('bitcoin.csv') # Подгружаем набор данных df.head() # По необходимости можно воспользоваться qgrid для более красивого вывода # Документация qgrid https://github.com/quantopian/qgrid # import qgrid # df_qgrid = qgrid.show_grid(df) # df_qgridПо набору данных 1. столбец datetime ***Date***; 2. начальное значение торгового дня ***Open***; 3. конечное значение в любое время, которое трейдеры должны назвать днем ***Close***; 4. самое высокое ***Hight*** и самое низкое значения ***Low*** за день; 5. ***Adj Close*** скорректированная рыночная стоимость закрытия; 6. ***Volume*** полный объем.df_close = pd.DataFrame(df['Close']) df_close.index = pd.to_datetime(df['Date'])Лучше иметь индекс DataFrame в формате _datetime_, это упростит анализ в дальнейшем.df_close.index df_close.head()Описательная статистика target значения ‘Close’df_close.describe()- В наборе данных приблизительно 1050 дней наблюдений _(count = 1049)_. - Максимальная стоимость биткойна составляло около 19 тыс. Долларов США.- Минимальное падение курса в диапазоне семиста Долларов США. Построение временного рядаimport plotly.graph_objects as go fig = go.Figure() fig.add_trace( go.Scatter( x=df['Date'], y=df['Close'], line_color='#1874cd' ) ) fig.update_layout( title="Bitcoin Closing Price", xaxis_title="Time", yaxis_title="USD ($)", xaxis_rangeslider_visible=True, font=dict( family="Courier New, monospace", size=18, color="#7f7f7f" ) ) fig.show()_В данной работе основной темой будет работы с временным рядом при помощи Нейронной сети _(Neural Network)_ , мы попробуем сделать наше предсказание с её помощью._Нейронные сети популярны в задачах прогнозирования из-за их способности делать прогнозы на нелинейных сложных данных. Обычные статистические методы плохо работают с сильно нелинейными данными. Многомерная линейная регрессия также показала хорошие результаты на сложных нелинейных данных (слово «линейной» может сбить с толку, это относится к линейной комбинации входных переменных, однако сами входные переменные могут быть выражены как более высокие степенные полиномы в функции, которую мы пытаемся смоделировать)._ _Существует два основных способа реализации модели.__Некоторые реализуют, рассматривая исторические значения тех самых рядов, которые они пытаются предсказать как входные регрессоры _(переменные)_. Этот подход предполагает, что будущие ценности могут быть выражены как функция прошлых ценностей. Необходимо определить две вещи - какие прошлые значения использовать _(как далеко назад нам нужно взглянуть в прошлое)_ и параметры самой функции._ Построение график автокорреляцииfrom statsmodels.tsa import stattools acf_djia, confint_djia, qstat_djia, pvalues_djia = stattools.acf( df_close, unbiased=True, nlags=50, qstat=True, fft=True, alpha = 0.05 ) fig = go.Figure() fig.add_trace( go.Scatter( y=pd.Series(acf_djia), line_color='#ee2c2c' ) ) fig.update_layout( title='Autocorrelation of Bitcoin Closing Price', xaxis_title="Lag", yaxis_title="Value", xaxis_rangeslider_visible=True, font=dict( family="Courier New, monospace", size=18, color="#7f7f7f" ) ) fig.show()Чтобы увидеть, какие переменные _прошлого_ можно использовать в качестве входных данных _(input)_ в нашей модели, мы проверяем автокорреляцию временных рядов. (Это способ измерить силу связи между любыми двумя переменными. Когда значение корреляции приближается к 1, это указывает на высокую положительную связь. Корреляция около 0 указывает на то, что связь почти отсутствует, а значения около -1 указывают на сильную отрицательную корреляцию). _В автокорреляции мы берем значение временного ряда в текущий момент как одну переменную и одно из значений из момента времени в прошлом как другую переменную, и находим корреляцию между ними.__Используем stattools.acf _(acf для функции автокорреляции)_ , чтобы построить график автокорреляции с 50 прошлыми моментами времени _лагами_ ._ _(«statsmodels» отличный инструмент для проведения широкого спектра статистического анализа. Эта функция возвращает четыре выхода, из которых первый, которому присвоил имя 'acf_djia', - это список, содержащий значения автокорреляции указанного числа лагов ('nlags'))_ . Выбор большого количества входных атрибутов, особенно если они не имеют достаточно высокой корреляции с целевым атрибутом, может на самом деле навредить нам. Выберем все лаги _(прошлые значения)_ , которые имеют как минимум корреляцию 0,9 с текущей стоимостью _(видно, что это число 15 это означает, что мы **будем использовать цену закрытия последних 15 дней, чтобы предсказать цену закрытия любого конкретного дня**)_._(Таким образом, если у цены есть закрытия какой-либо конкретной акции до сегодняшнего дня, вы можете использовать эту модель для прогнозирования цены закрытия завтра. Нужна только сегодняшняя цена закрытия и цена закрытия последних четырнадцати дней, чтобы предсказать цену закрытия завтрашнего дня. Нам все еще нужны все исторические данные, которые мы должны обучить, и протестировать модель)_. По предварительному анализу:- target значение ‘Close’- колонки Adj Close и Volume исключены- по графику временного ряда, основной пик цены происходит в Июле- по графику автокорреляции, наиболее коррелированные значения в диапазоне 15 lags Дальнейшие шаги:- Создать новый набор данных используя последние 15 значений в качестве входных данных- Построить модель- Разделить данные на train, val, test- Масштабировать данные для уменьшения дисперсии- Обучить и протестировать модель 3. Методика решения Создание набора данных для обучения - Прямо сейчас у нас есть только один столбец - фактическая цена закрытия Биткойн _(кроме индекса datetime)_.- Для обучения нашей нейронной сети мы используем последние 15 значений в качестве входных данных со значением в любой соответствующий момент времени. (Это означает, что теперь у нас будет еще 15 столбцов, в которых в каждой строке будет указана цена закрытия в любой день и соответствующие цены за последние пятнадцать дней.)_(Ниже функция для создания этих столбцов)_.def create_regressor_attributes(df, attribute, list_of_prev_t_instants) : """ Индекс должен иметь тип datetime Создаем объекты с предыдущими временными значениями """ list_of_prev_t_instants.sort() start = list_of_prev_t_instants[-1] end = len(df) df['datetime'] = df.index df.reset_index(drop=True) df_copy = df[start:end] df_copy.reset_index(inplace=True, drop=True) for attribute in attribute: foobar = pd.DataFrame() for prev_t in list_of_prev_t_instants: new_col = pd.DataFrame(df[attribute].iloc[(start - prev_t) : (end - prev_t)]) new_col.reset_index(drop=True, inplace=True) new_col.rename(columns={attribute : '{}_(t-{})'.format(attribute, prev_t)}, inplace=True) foobar = pd.concat([foobar, new_col], sort=False, axis=1) df_copy = pd.concat([df_copy, foobar], sort=False, axis=1) df_copy.set_index(['datetime'], drop=True, inplace=True) return df_copy list_of_attributes = ['Close'] list_of_prev_t_instants = [] for i in range(1,16): list_of_prev_t_instants.append(i) list_of_prev_t_instantsЗдесь у нас есть только один временной ряд, который является ценой закрытия каждого дня. Используя функцию _'create_regressor_attributes'_ мы можем создать атрибуты регрессора для нескольких столбцов в одном кадре данных._Кроме того, мы можем указать, какие прошлые значения использовать точно в форме списка. Это полезно в тех случаях, когда прошлые регрессоры могут отличаться от предыдущих пятнадцати значений. В нашем случае это так, потому что линия автокорреляции была почти прямой линией (отрицательной)._df_new = create_regressor_attributes(df_close, list_of_attributes, list_of_prev_t_instants) df_new.head() df_new.shapeЭтот новый набор данных имеет исходный временной ряд **Close** и другие 15 столбцов, которые являются прошлыми значениями, взятыми в качестве входов регрессора._(Кроме того, он начинается с 16 января 2017 года, но первоначальный временной ряд начался с 1 января 2017 года. Это сделано, чтобы избежать значений NaN, которые будут появляться в новых добавленных столбцах)_ . Создание нейронной сети Подключаем библиотеки_Используем [TensorFlow](https://www.tensorflow.org/) [Keras](https://keras.io/)_from tensorflow.keras.layers import Input, Dense, Dropout from tensorflow.keras.optimizers import SGD from tensorflow.keras.models import Model from tensorflow.keras.models import load_model from tensorflow.keras.callbacks import ModelCheckpointЧто будем строить - Тренируем простой Многослойный [Персептрон](https://ru.wikipedia.org/wiki/%D0%9F%D0%B5%D1%80%D1%86%D0%B5%D0%BF%D1%82%D1%80%D0%BE%D0%BD), который имеет входной слой с 15 узлами _(учитывая каждый из 15 прошлых входов регрессора)_.- У него будет 2 скрытых слоя _(это то, что делает модель «Глубокой»)_.- 60 узлов в каждом. _(Почему 60? Параметр подобран путем разных комбинаций)_.input_layer = Input(shape=(15), dtype='float32') # Входной слой (15 узлов формат данных float) dense1 = Dense(60, activation='linear')(input_layer) # Первый скрытый слой (получает на вход "Входной слой") dense2 = Dense(60, activation='linear')(dense1) # Второй скрытый слой (получает на вход "Первый скрытый слой") dropout_layer = Dropout(0.2)(dense2) # Используем случайное исключение нейронов, для более стабильной модели (получает на вход "Второй скрытый слой") output_layer = Dense(1, activation='linear')(dropout_layer) # Конечный слойWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/resource_variable_ops.py:1630: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version. Instructions for updating: If using Keras pass *_constraint arguments to layers.Ознакомиться с основными функциями:- [Input](https://keras.io/layers/core/input)- [Dense](https://keras.io/layers/core/dense)- [activation](https://keras.io/activations/)- [Dropout](https://keras.io/layers/core/dropout)- [loss](https://keras.io/losses/)- [optimizer](https://keras.io/optimizers/) Модельmodel = Model(inputs=input_layer, outputs=output_layer) # В модели указываем "Входной" и "Выходной" слой # Указываем метрику модели = MSE и Оптимизации = adam # (MSE) Функция вычисляет среднюю квадратическую ошибку , риск метрики , соответствующую ожидаемое значение квадрата (квадратичной) ошибки или потерю. model.compile(loss='mean_squared_error', optimizer='adam') # Указываем метрику модели = MSE и Оптимизации = adam model.summary() # Смотрим на модельModel: "model" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_1 (InputLayer) [(None, 15)] 0 _________________________________________________________________ dense (Dense) (None, 60) 960 _________________________________________________________________ dense_1 (Dense) (None, 60) 3660 _________________________________________________________________ dropout (Dropout) (None, 60) 0 _________________________________________________________________ dense_2 (Dense) (None, 1) 61 ================================================================= Total params: 4,681 Trainable params: 4,681 Non-trainable params: 0 ___________________________________________________________[...]https://towardsdatascience.com/adam-latest-trends-in-deep-learning-optimization-6be9a291375cАдам можно рассматривать как комбинацию RMSprop и Stochastic Gradient Descent с импульсом. Он использует квадратные градиенты для масштабирования скорости обучения, как RMSprop, и использует преимущество импульса, используя скользящее среднее значение градиента, а не сам градиент, как SGD с импульсом.Адам - ​​это метод адаптивной скорости обучения, то есть он рассчитывает индивидуальные скорости обучения для различных параметров. Его название происходит от адаптивной оценки моментов , и причина, по которой он называется, заключается в том, что Адам использует оценки первого и второго моментов градиента, чтобы адаптировать скорость обучения для каждого веса нейронной сети. Теперь, что такое момент? N-й момент случайной величины определяется как ожидаемое значение этой переменной в степени n.# Можно вывести картинку нашей модели или загрузить .gif которую я подготовил заранее # brew install graphviz OR pip install graphviz # pip install pyparsing # pip install pydot # pip install pydot-ng from IPython.display import HTML, Image from tensorflow.keras.utils import plot_model !wget 'https://www.dropbox.com/s/cpwkl1nj3l6s5ps/mod_gif.gif' def viz_model(gif=True): if gif == True: with open('mod_gif.gif','rb') as f: display(Image(data=f.read(), format='png')) if gif == False: return plot_model(model) viz_model(gif=True)**Описание работы модели**Входной слой имеет только узлы. Дальнейшая работа входного слоя - взять данные и передать их на скрытый слой.Плотный слой имеет связанные с ним звенья (ребра) с весами и всем прочим. Он также имеет функцию активации.Линейная функция активации представляет собой прямую линию и пропорциональна входу (то есть взвешенной сумме на этом нейроне).Такой выбор активационной функции позволяет получать спектр значений, а не только бинарный ответ. _Можно соединить несколько нейронов вместе и, если более одного нейрона активировано, решение принимается на основе применения операции max (или softmax).)_Данные от каждого входного узла умножаются на вес ссылок, через которые эти входные узлы связаны с первым скрытым слоем. Каждый узел скрытого слоя затем объединяет все входы в него (по одному от каждого входного узла) и проходит через активацию активации.Подобный процесс повторяется для второго скрытого слоя.Выпадающий слой (Dropout) добавляется перед выходным слоем. Он случайно отбрасывает определенный процент (в нашем случае 20%) ссылок, которые связаны с выходным узлом. Это хорошая практика и в целом помогает обуздать переобучение.Мы просто определяем каждый слой и предыдущий слой, к которому он подключен, а затем упаковываем их все вместе, используя функцию (Model)15 новых столбцов данных (df_new) будут передаваться через эту сеть по одной строке за раз. Сеть выдаст ответ, который будет сравниваться со значением в первом столбце. Это первое предсказание будет основано на весах, которые устанавливаются случайным образом. Ошибка в прогнозе будет использоваться для изменения весов таким образом, чтобы прогноз был лучше.Это произойдет для всех _(1034)_ строк. Мы можем отрегулировать веса после прохождения нескольких выборок (rows) в том, что мы называем «batch». Когда все строки прошли таким образом, мы говорим, что одна «эпоха» окончена.Но сколько эпох мы должны пройти? Когда мы почувствуем, что наша модель нейронной сети захватила шаблоны данных и может делать прогнозы. Как мы это обеспечим?Что мы делаем, это держим несколько образцов данных в качестве набора для проверки. Мы не используем их для обучения (т.е. для обновления весов). Мы запускаем проверочный набор только после каждой эпохи и проверяем ошибку. Мы также отслеживаем ошибку обучения, но она не так важна, как ошибка проверки, поскольку ошибка обучения - это то, что регулирует весовые коэффициенты. Таким образом, в некотором смысле обучающие данные - это то, в чем впитывается сеть.Валидационные данные - это то, в чем они не впитываются. Поскольку мы продолжаем проверять ошибки, только когда ошибка валидации приближается к ошибке обучения, мы останавливаем дальнейшее обучение.Есть много способов вычислить ошибку. Здесь мы использовали «mean_squared_error», а также алгоритм оптимизатора «adam» _(стахостический градиентный спуск)_ , который отвечает за обновление весов. Разделение данных и Масштабирование данных Разделение данных Мы выделим 5% выборок _(строк)_ случайным образом для последующих целей тестирования. Оставшиеся 95% снова делятся случайным образом. Примерно 5% из них используется в качестве набора для проверки, а остальные - в качестве учебного набора. Все это разделение происходит по строкам.Также мы должны разбить по столбцам. Первый столбец - это наши фактические значения _(target)_. Они не передаются в нейронную сеть. Остальные 15 столбцов _(входные регрессоры)_ подаются.Таким образом, разделение данных происходит как по строкам, так и по столбцам. Нарезанные фрагменты данных, имеющие только входные колонки регрессора, обычно имеют "X" в именах, которые мы им присваиваем, а нарезанные фрагменты данных _(или серии)_ , имеющие только действительные значения _(также называемые целевыми значениями, поскольку они являются "target" для нейронных сетей, чтобы точно предсказывать во время обучения)_ , имеют "y" в именах, которые мы им присваиваем.Поскольку это данные временных рядов, разбиение набора данных случайным образом не является разумной мыслью. Поэтому, просто разделим их упорядоченно.test_set_size = 0.05 valid_set_size= 0.05 df_copy = df_new.reset_index(drop=True) df_test = df_copy.iloc[ int(np.floor(len(df_copy)*(1-test_set_size))) : ] df_train_plus_valid = df_copy.iloc[ : int(np.floor(len(df_copy)*(1-test_set_size))) ] df_train = df_train_plus_valid.iloc[ : int(np.floor(len(df_train_plus_valid)*(1-valid_set_size))) ] df_valid = df_train_plus_valid.iloc[ int(np.floor(len(df_train_plus_valid)*(1-valid_set_size))) : ] X_train, y_train = df_train.iloc[:, 1:], df_train.iloc[:, 0] X_valid, y_valid = df_valid.iloc[:, 1:], df_valid.iloc[:, 0] X_test, y_test = df_test.iloc[:, 1:], df_test.iloc[:, 0] print('Shape of training inputs, training target:', X_train.shape, y_train.shape) print('Shape of validation inputs, validation target:', X_valid.shape, y_valid.shape) print('Shape of test inputs, test target:', X_test.shape, y_test.shape)Shape of training inputs, training target: (932, 15) (932,) Shape of validation inputs, validation target: (50, 15) (50,) Shape of test inputs, test target: (52, 15) (52,)Масштабирование данных Мы нормализуем наши данные в диапазоне от 0,01 до 0,99 перед подачей их в нейронную сеть._Функция [MinMaxScaler()](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html) принимает в датафреймах или сериях (также может принимать в массивах), но всегда возвращает n-мерные массивы._from sklearn.preprocessing import MinMaxScaler Target_scaler = MinMaxScaler(feature_range=(0.01, 0.99)) Feature_scaler = MinMaxScaler(feature_range=(0.01, 0.99)) X_train_scaled = Feature_scaler.fit_transform(np.array(X_train)) X_valid_scaled = Feature_scaler.fit_transform(np.array(X_valid)) X_test_scaled = Feature_scaler.fit_transform(np.array(X_test)) y_train_scaled = Target_scaler.fit_transform(np.array(y_train).reshape(-1,1)) y_valid_scaled = Target_scaler.fit_transform(np.array(y_valid).reshape(-1,1)) y_test_scaled = Target_scaler.fit_transform(np.array(y_test).reshape(-1,1))- Данные предобработанны- Нейронная сеть готова к обучению Обучение и проверка моделиmodel.fit( x=X_train_scaled, y=y_train_scaled, batch_size=5, # Количество образцов на обновление градиента. Если batch_size не указан, по умолчанию будет 32. epochs=30, verbose=1, validation_data=(X_valid_scaled, y_valid_scaled), shuffle=True)Train on 932 samples, validate on 50 samples Epoch 1/30 932/932 [==============================] - 1s 2ms/sample - loss: 0.0155 - val_loss: 0.0390 Epoch 2/30 932/932 [==============================] - 1s 601us/sample - loss: 0.0039 - val_loss: 0.0389 Epoch 3/30 932/932 [==============================] - 1s 639us/sample - loss: 0.0034 - val_loss: 0.0472 Epoch 4/30 932/932 [==============================] - 0s 531us/sample - loss: 0.0026 - val_loss: 0.0399 Epoch 5/30 932/932 [==============================] - 1s 584us/sample - loss: 0.0023 - val_loss: 0.0403 Epoch 6/30 932/932 [==============================] - 1s 578us/sample - loss: 0.0023 - val_loss: 0.0234 Epoch 7/30 932/932 [==============================] - 1s 583us/sample - loss: 0.0020 - val_loss: 0.0549 Epoch 8/30 932/932 [==============================] - 1s 567us/sample - loss: 0.0017 - val_loss: 0.0228 Epoch 9/30 932/932 [==============================] - 1s 569us/sample - loss: 0.0019 - val_loss: 0.0262 Epoch 10/30 932/932 [[...]В случае, если потеря валидации _(validation loss)_ оставалась значительно больше, чем потеря тренировки _(training loss)_ в конце тренировки, но постоянно снижалась в последние несколько эпох, это было бы признаком того, что нам нужно запускать нашу модель для большего количества эпох, так как модель нуждалась в большем количестве тренировок.Если бы потеря валидации _(validation loss)_ оставалась значительно больше, чем потеря тренировки _(training loss)_ в конце тренировки, а также становилась более или менее статичной, то это было бы признаком "переобучения", т.е. наша модель работает слишком хорошо на тренировочных данных, она прекрасно захватила ее, включая шум, но она бы плохо работала на данных которые не видела. В таком случае рекомендуется увеличить значение в слое выпадения _(dropout layer)_ , уменьшить количество _(Epoch)_ , увеличить размер партии _(batch size)_ , уменьшить количество скрытых слоев _(hidden layers)_ , уменьшить количество узлов в скрытых слоях _(hidden layers)_ .**Наши потери при проверке не сильно изменились, особенно по сравнению с потерями при обучении. Мы можем приписать это небольшому количеству примеров обучения, которые у нас есть, в контексте модели, которую мы использовали.** 4. Результаты Прогнозирование на тестовом наборе данныхy_pred = model.predict(X_test_scaled)Помним, что все наши входы и цели были уменьшены в диапазоне (0, 1). Таким образом, прогнозы также лежат в этом диапазоне. Нам нужно уменьшить их в обратном направлении.y_pred_rescaled = Target_scaler.inverse_transform(y_pred) # Преобразовать данные обратно в исходное пространство. Другими словами, вернуть входной X_original, преобразование которого будет X.Оценка качества модели на тестовой выборке Одним из способов измерения точности нашей модели на тестовых данных, является сравнение погрешности ее прогнозов с истинными значениями. _Это может быть ошибка среднего квадрата, или средняя ошибка и т.д._([r2_score](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.r2_score.html) измеряет среднеквадратичное расстояние между истинными значениями и значениями лежащими в гиперплоскости предиктора (наши прогнозируемые значения), и выдает оценку от 0 до 1. Чем больше значение r^2 ближе к единице, тем лучше прогнозы вашей модели)from sklearn.metrics import r2_score y_test_rescaled = Target_scaler.inverse_transform(y_test_scaled) score = r2_score(y_test_rescaled, y_pred_rescaled) print('R-squared score for the test set:', round(score,4))R-squared score for the test set: 0.7222Построение графика прогнозаy_actual = pd.DataFrame(y_test_rescaled, columns=['Actual Close Price']) y_hat = pd.DataFrame(y_pred_rescaled, columns=['Predicted Close Price']) fig = go.Figure() fig.add_trace( go.Scatter( y=y_actual['Actual Close Price'], line_color='#ee2c2c', name='Actual', mode='lines+markers' ) ) fig.add_trace( go.Scatter( y=y_hat['Predicted Close Price'], line_color='#1874cd', name='Predicted', mode='lines+markers' ) ) fig.update_layout( title='Bitcoin Stock Closing Prices', xaxis_title='Test Set Day no.', yaxis_title='USD ($)', xaxis_rangeslider_visible=False, font=dict( family="Courier New, monospace", size=18, color="#7f7f7f" ) ) fig.show()5. Заключение Выводы- Задача прогноза временных рядов зависит от внутренних и внешних факторов во временном отрезке, что достаточно сильно приближает модели по временным рядам к категории моделей “черного ящика”.- По прогнозу данного набора данных. **В горизонте прогнозирования на один день, прогноз выглядит правдоподобно** для задачи в которой присутствуют факторы времени и риска. Куда дальше?- LSTM и CNN могут давать лучшие результаты, чем обычные NN на одних и тех же данных. Литература и полезные ссылки:- [Анализ временных рядов с помощью Python](https://habr.com/ru/company/ods/blog/327242/)- [Сравнение моделей временных рядов](https://basegroup.ru/community/bank/compare-model)- [Holt-Winters Forecasting for Dummies (or Developers)](https://grisha.org/blog/2016/01/29/triple-exponential-smoothing-forecasting/)- [Open Machine Learning Course. Topic 9. Part 1. Time series analysis in Python](https://medium.com/open-machine-learning-course/open-machine-learning-course-topic-9-time-series-analysis-in-python-a270cb05e0b3)- [Open Machine Learning Course. Topic 9. Part 2. Predicting the future with Facebook Prophet](https://medium.com/open-machine-learning-course/open-machine-learning-course-topic-9-part-3-predicting-the-future-with-facebook-prophet-3f3af145cdc)- [Notes on regression and time series analysis](https://people.duke.edu/~rnau/411home.htm)- [The Time Series They Are a-Changing: Why all good models eventually fail](https://towardsdatascience.com/the-time-series-they-are-a-changing-why-all-good-models-eventually-fail-24a96a5f48d3)- [Two key challenges for time series analysis](https://medium.com/wwblog/two-key-challenges-for-time-series-analysis-e18f6eaa098f)- [Introduction to Interactive Time Series Visualizations with Plotly in Python](https://towardsdatascience.com/introduction-to-interactive-time-series-visualizations-with-plotly-in-python-d3219eb7a7af)- [Being Bayesian and thinking deep: time-series prediction with uncertainty](https://medium.com/@dan_90739/being-bayesian-and-thinking-deep-time-series-prediction-with-uncertainty-25ff581b056c)- [Facing the ARIMA Model against Neural Networks](https://towardsdatascience.com/facing-the-arima-model-against-neural-networks-745ba5a933ca)- [Stationarity test for time series](https://medium.com/swlh/stochasticity-test-for-time-series-5312f1325700)- [Time Series Decomposition and StatsModels Parameters](https://medium.com/@amitrani/time-series-decomposition-and-statsmodels-parameters-69e54d035453)- [Получение котировок акций при помощи Python](https://habr.com/ru/post/487644/)- [TensorFlow Time Series Forecasting](https://www.tensorflow.org/tutorials/structured_data/time_series)- [Descriptive statistics in Time Series Modelling](https://towardsdatascience.com/descriptive-statistics-in-time-series-modelling-db6ec569c0b8)- [Keras documentation](https://keras.io/)- [Методы оптимизации нейронных сетей](https://habr.com/ru/post/318970/)- [Функции активации нейросети](https://neurohive.io/ru/osnovy-data-science/activation-functions/)passReflect Tables into SQLAlchemy ORM# Python SQL toolkit and Object Relational Mapper import sqlalchemy from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine, func, inspect engine = create_engine("sqlite:///data/hawaii.sqlite") inspector = inspect(engine) inspector.get_table_names() # reflect an existing database into a new model using automap_base() Base = automap_base() # reflect the tables with Base.prepare(), passing in the engine and reflect=True Base.prepare(engine, reflect=True) # We can view all of the classes that automap found with Base.classes Base.classes.keys() # Save references to each table measurement = Base.classes.measurement station = Base.classes.station # Create our Session() and bind it to the engine session = Session(engine)Exploratory Climate Analysis Design a query to retrieve the last 12 months of precipitation data# Design a query to retrieve the last 12 months of precipitation data and plot the results # Calculate the date 1 year ago from the last data point in the database. last_measurement_data_point = session.query(measurement.date).order_by(measurement.date.desc()).first() latest_date = last_measurement_data_point [0] latest_date = dt.datetime.strptime(latest_date, '%Y-%m-%d') latest_date = latest_date.date() date_year_ago = latest_date - relativedelta(years=1) # Use session.query() to retrieve the date and prcp columns, .filter() by the date you calculated above, and selecting .all() results last_year_data = session.query(measurement.date, measurement.prcp).filter(measurement.date >= date_year_ago).all() # Save the query results as a Pandas DataFrame() and set the index to the date column # Save the query results as a Pandas DataFrame and set the index to the date column climate_df = pd.DataFrame(last_year_data, columns = ['date', 'prcp']) #drop nulls climate_df = climate_df.dropna(how="any") #set index climate_df = climate_df.set_index("date") # Sort the dataframe by date climate_df = climate_df.sort_values(by=['date']) climate_df.head(100) # Use Pandas Plotting with Matplotlib to plot the data ax= climate_df.plot.bar(figsize=(16,12), width=20, color='blue') ax.set_xlabel("Date", fontsize=16) ax.set_ylabel("Precipitation", fontsize=16) ax.set_xticklabels([]) ax.set_title(f"Amount of Precipitation (in) from {date_year_ago} to {latest_date}") plt.legend(['Precipitation'], fontsize = 28) ax.get_legend().set_bbox_to_anchor((0.6, 1)) plt.savefig("Images/CLIMATE.png") plt.show() # Use Pandas to calculate the summary statistics for the precipitation data stats = climate_df["prcp"].describe() stats_df = pd.DataFrame(stats) stats_df.rename(columns = {"prcp": "Precipitation"})Design a query to show how many stations are available in this dataset# What are the most active stations? (i.e. what stations have the most rows)? # List the stations and the counts in descending order. Hint: session.query() the station column in the Station table session.query(measurement.station, func.count(measurement.date)).group_by(measurement.station).\ order_by(func.count(measurement.date).desc()).all() # Using the station id from the previous query, calculate the lowest temperature recorded, # highest temperature recorded, and average temperature of the most active station most_active_station=session.query(measurement.station).group_by(measurement.station).\ order_by(func.count(measurement.date).desc()).first() most_active_station_id = most_active_station[0] most_active_station_id # Choose the station with the highest number of temperature observations. # Query the last 12 months of temperature observation data for this station and plot the results as a histogram temp_observation = session.query(measurement.date).\ order_by(measurement.date.desc()).\ filter(measurement.station == most_active_station_id).first() latest_date = temp_observation[0] latest_date = dt.datetime.strptime(latest_date, '%Y-%m-%d') latest_date = latest_date.date() date_year_ago = latest_date - relativedelta(years=1) last_year_data = session.query(measurement.date, measurement.tobs).\ filter(measurement.station == most_active_station_id).\ filter(measurement.date >= date_year_ago).all() last_year_data_df = pd.DataFrame(last_year_data, columns=['date', 'tobs']) last_year_data_df #Histogram df = pd.DataFrame(last_year_data_df, columns=['tobs']) df.plot.hist(bins=12) plt.title(f"Amount of Precipitation") plt.tight_layout() plt.xlabel('Temperature Observations (tobs)') plt.savefig("Images/Frequency.png") plt.show()Bonus Challenge Assignment# This function called `calc_temps` will accept start date and end date in the format '%Y-%m-%d' # and return the minimum, average, and maximum temperatures for that range of dates def calc_temps(start_date, end_date): """TMIN, TAVG, and TMAX for a list of dates. Args: start_date (string): A date string in the format %Y-%m-%d end_date (string): A date string in the format %Y-%m-%d Returns: TMIN, TAVE, and TMAX """ return (session.query(func.min(measurement.tobs), func.avg(measurement.tobs), func.max(measurement.tobs)) .filter(measurement.date >= start_date) .filter(measurement.date <= end_date) .all()) # function usage example print(calc_temps('2012-02-28', '2012-03-05')) # Use your previous function `calc_temps` to calculate the tmin, tavg, and tmax # for your trip using the previous year's data for those same dates. trip_start_date = dt.date(2017, 5, 10) trip_end_date = dt.date(2017, 5, 16) print(calc_temps(trip_start_date, trip_end_date)) temps_list = calc_temps(trip_start_date, trip_end_date) # Plot the results from your previous query as a bar chart. # Use "Trip Avg Temp" as your Title # Use the average temperature for the y value # Use the peak-to-peak (tmax-tmin) value as the y error bar (yerr) df = pd.DataFrame(temps_list, columns=["t_min", "t_avg", "t_max"]) # plot the average temp plt.figure(figsize=(6,6)) ax=df["t_avg"].plot.bar(yerr=(df["t_max"]-df["t_min"]), color="red") plt.title("Trip Average Temp") plt.ylabel("Temp(F)") ax.axes.get_xaxis().set_visible(False) # Save the plot plt.savefig("Images/avg_temp.png") plt.show() # Calculate the total amount of rainfall per weather station for your trip dates using the previous year's matching dates. # Sort this in descending order by precipitation amount and list the station, name, latitude, longitude, and elevation sel = [measurement.station, station.name, station.latitude, station.longitude, station.elevation, func.sum(measurement.prcp)] total_rainfall_per_weather_station = session.query(*sel).\ filter(measurement.station == station.station).\ filter(measurement.date >= trip_start_date).\ filter(measurement.date <= trip_end_date).\ group_by(measurement.station).\ order_by(func.sum(measurement.prcp).desc()).all() rainfall_df = pd.DataFrame(total_rainfall_per_weather_station, columns=["Station", "Name", "Latitude", "Longitude", "Elevation", "Sum/Total Precipitation"]) rainfall_df # Create a query that will calculate the daily normals # (i.e. the averages for tmin, tmax, and tavg for all historic data matching a specific month and day) def daily_normals(date): """Daily Normals. Args: date (str): A date string in the format '%m-%d' Returns: A list of tuples containing the daily normals, tmin, tavg, and tmax """ return (session.query(func.min(measurement.tobs), func.avg(measurement.tobs), func.max(measurement.tobs)) .filter(func.strftime("%m-%d", measurement.date) == date) .all()) daily_normals("01-01") # calculate the daily normals for your trip # push each tuple of calculations into a list called `normals` # Set the start and end date of the trip start = dt.datetime.strftime(trip_start_date, "%m-%d") end = dt.datetime.strftime(trip_end_date, "%m-%d") # Use the start and end date to create a range of dates start = dt.datetime.strptime(start, "%m-%d") end = dt.datetime.strptime(end, "%m-%d") #use the start and end to create range date_array = (start + dt.timedelta(days=x) for x in range(0, ((end-start).days)+1)) # Stip off the year and save a list of %m-%d strings date_list = [] for date_object in date_array: string_date = date_object.strftime("%m-%d") date_list.append(string_date) # Loop through the list of %m-%d strings and calculate the normals for each date daily_normals_list = [] for date in date_list: daily_normals_list.append(daily_normals(date)) min_temp_list = [] avg_temp_list = [] max_temp_list = [] print(daily_normals_list) for daily_normals_item in daily_normals_list: for min_temp, avg_temp, max_temp in daily_normals_item: min_temp_list.append(min_temp) avg_temp_list.append(avg_temp) max_temp_list.append(max_temp) # Load the previous query results into a Pandas DataFrame and add the `trip_dates` range as the `date` index daily_normals_df = pd.DataFrame({ "trip_dates": date_list, "tmin": min_temp_list, "tavg": avg_temp_list, "tmax": max_temp_list }) daily_normals_df = daily_normals_df.set_index("trip_dates") daily_normals_df # Plot the daily normals as an area plot with `stacked=False` ax = daily_normals_df.plot.area(stacked=False) trip_start = trip_start_date.replace(year = trip_start_date.year + 1) trip_end = trip_end_date.replace(year = trip_end_date.year + 1) ax.set_title(f"Daily Normals from {trip_start} to {trip_end}") plt.xticks(rotation=45) ax.set_xlabel("Date") ax.set_ylabel("Temperature (F)") plt.savefig("Images/daily_normals.png") plt.show()决策树>本章实现了决策树 ID3 算法,进行了一个实例实战:* 推荐隐形眼镜类型。>本 Notebook 采用 sklearn 进行实例演示,并结合 pydotplus 可视化决策树。**决策树算法*** 优点:计算复杂度不高、输出结果易于理解、对中间值的缺失不敏感、可以处理不相关特征数据。* 缺点:容易过拟合。* 适用范围:数据型和标称型。df = pd.read_table('data/ch3/lenses.txt', header=None, names=['age', 'prescript', 'astigmatic', 'tearRate', 'lense']) df.head() # 由于 sklearn 并不支持 categorical data 决策树,将 categorical data 转换为 numerical data le = preprocessing.LabelEncoder() x = df[['age', 'prescript', 'astigmatic', 'tearRate']].apply(lambda x: le.fit_transform(x)) y = le.fit_transform(df['lense']) y # 构建决策树 clf = tree.DecisionTreeClassifier(criterion='entropy') # 训练 clf.fit(x, y) # 决策树可视化 # 可视化的决策树为 numerical data ,若要显示为 categorical data,可修改 dot 文件 # tree.export_graphviz(clf, out_file=output_file) 可保持 dot 文件 name = x.columns label = ["hard","no lenses", "soft"] dot_data = tree.export_graphviz(clf, out_file=None,feature_names=name, class_names=label,filled=True) graph = pydotplus.graph_from_dot_data(dot_data) #graph.write_png('tree.png') Image(graph.create_png())SIT742: Modern Data Science **(Week 03: Data Wrangling)**---- Materials in this module include resources collected from various open-source online repositories.- You are free to use, change and distribute this package.- If you found any issue/bug for this document, please submit an issue at [tulip-lab/sit742](https://github.com/tulip-lab/sit742/issues)Prepared by **SIT742 Teaching Team**--- Session 3C - Parsing XML Files Table of Content* Part 1. Loading and Exploring an XML file* Part 2. Extracting XML data into DataFrame* Part 3. Summary---[XML](https://www.w3.org/XML/), Extensible Markup Language, is a markup language much like HTML.It is a simple and flexible data format that defines a set of rules for encoding documents in a way that is both human and machine readable. As a self-descriptive markup language, XML plays an important role in many information systems. It stores data in plain text format, which provides a platform-independent way of storing, transporting, and sharing data. In this chapter we are going to learn how to parse and extract data from XML files with Python.First and foremost, you will need to have some basic understanding about XML.There are a lot of good introductory materials freely available online. We suggest the following two sections of Chapter 12 in "**Dive Into Python 3**":* [12.2 A 5-Minute Crash Course in XML](http://www.diveintopython3.net/xml.htmlxml-intro) 📖* [12.3 The Structure Of An Atom Feed](http://www.diveintopython3.net/xml.htmlxml-structure) 📖If you are quite familiar with XML, you can skip the above materials and jump directly into the parsing sections.XML files are not as easy as the CSV or JSON files to preview and understand.The data we are going to parse is the XML version for the "Melbourne bike share" dataset downloaded from[data.gov.au](https://data.melbourne.vic.gov.au/Transport-Movement/Melbourne-bike-share/tdvh-n9dv).Let's first open the file in your favorite editor to preview it. Note that it is always necessary to inspect the file before we parse it, as the inspection can give an idea of what the format of the file is, what information it stores, etc. If you scroll through the opened file, you will find that the data has been encompassed in XML syntax, using things called tags. The following figure shows a snippet of the data.![XML](https://github.com/tulip-lab/sit742/raw/master/Jupyter/image/xml_example.png "XML File")After inspecting the file, you should find that data values can be stored in two places in an XML file, which are:* in between two tags, for example, ```html Harbour Town - Docklands Dve - Docklands ``` where the value is "Harbour Town - Docklands Dve - Docklands" for the tag.* as an attribute of a tag, for example ```html ``` where the value of latitude is -37.814022 and longitude is 144.939521. The attributes in XML store rich information about a specific tag.Comparing XML with JSON, you will find that the XML tags and attributes hold data in a similar way to the JSON keys. The advantage of XML is that each tag in XML can hold more than one attribute, andmore values can be stored in one node. See the "coordinate" tag above.Now, how can we extract data stored either in between tags or as attributes?The goal is to parse the XML file, extract relevant information, and store the information in Pandas DataFrame that looks like![XML](https://github.com/tulip-lab/sit742/raw/master/Jupyter/image/parsed_xml.png "XML File")In the following sections, we will demonstrate the process of loading and exploring a XML file, extractingdata from the XML file and storing the data in Pandas DataFrame.* * * 1. Loading and Exploring an XML filePython can parse XML files in many ways.You can find several Python libraries for parsing XML from [" XML Processing Modules"](https://docs.python.org/2/library/xml.html).Here we will show you how to use the following Python librariesto parse our XML file.* ElementTree* lxml* beautifulsoupThere are a couple of good materials worth reading* The office ElementTree [API](https://docs.python.org/2/library/xml.etree.elementtree.htmlmodule-xml.etree.ElementTree) documentation, which provides not only the API reference but also a short tutorial on using ElementTree. 📖* [Parsing XML](http://www.diveintopython3.net/xml.htmlxml-parse), Section 12.4 in Chapter 12 of "**Dive into Python**" does a good job on elaborating the process of parsing an example XML file with ElementsTree. 📖If you are a visual learner, we suggest the following YouTube video* [Parsing XML files in Python](https://www.youtube.com/watch?v=c2qlCZhkwtE)We strongly suggest that you read these materials, although we are going to reproduce some of their contentalong with our own XML file.Let's start with ElementTree. There are several ways to import the data, which depends on how the data is stored.Here we will read the file from disk.!pip install wget import wget link_to_data = 'https://github.com/tulip-lab/sit742/raw/master/Jupyter/data/Melbourne_bike_share.xml' DataSet = wget.download(link_to_data) !ls import xml.etree.ElementTree as etree tree = etree.parse("Melbourne_bike_share.xml")In the ElementTree API, an element object is designed to store data in a hierarchical structure according to the XML tag structure.Each element has a number of properties associated with it, for example, a tag, a text string,a set of attributes and a set of child elements.The parse() function is one of the entry points of the ElementTree library.It parses the entire XML document at once into an ElementTree object that contains a hierarchy of Element objects. see ["How ElementTree represents XML"](http://infohost.nmt.edu/tcc/help/pubs/pylxml/web/etree-view.html). 📖The first element in every XML document is called the root element,and an XML document can only have one root.However, the returning ElementTree object is not the root element. Instead, it represents the entire document.To get a reference to the root element, call getroot() method.root = tree.getroot() root.tagAs expected, the root element is the response element. See the original XML file.You can also check the number of children of the root element by typing```python len(root)```It will give you one. To get the only child, one can use the getchildren() method.But it will result in a warning messagethat looks like ```python /Users/land/anaconda/lib/python2.7/site-packages/ipykernel/__main__.py:2: DeprecationWarning: This method will be removed in future versions. Use 'list(elem)' or iteration over elem instead. from ipykernel import kernelapp as app.```This is because the method has already been deprecated in Python 2.7.Indeed, an element acts like a list in the ElementTree API.The items of the list are the element’s children.for child in root: print (child)The root list only contains its direct children elements. The children elements of each entry in the list are not included. Each element can also have its own set of attributes. The attrib property of an element is a mutable Python dictionary. Does the root have attributes? Let's check it out.root.attribIt returns a empty dictionary. So far, the element tree seems to be empty.Now you need to either examine the original xml to discover the structure,or further traverse the element hierarchy by iteratively printing out all the elements and data contained therein .The root element has only one child.It can be accessed by index, for example:```python root[0]```A FOR loop can be used to print out all the children of root[0].print ("the total number of rows: ", len(root[0])) for child in root[0]: print (child)The tag of each child is the same, called 'row', which stores information about one bike station.Let's keep on retrieving the children of these rows. Instead of doing that for all the rows, we retrieve the children of root[0][0] and that should correspond to the first record.for child in root[0][0]: print (child)Fortunately, the tags of the retrieved child elements correspond to the column names in the DataFrame.Thus, all the tags storing the data we want have been found. To confirm it you can inspect the original XML file or simply look at the figure shown in Section 1. Another way of exploring the element hierarchy is to use the iteration function of ElementTree, `iter()`.The iterator loops over all elements in the tree, in section order.Each element is represented as a Python tuple, where the first entry is a tag,the second is the text, and the last is a dictionary of attributes.for elem in tree.iter(): print (elem.tag, elem.text, elem.attrib)Besides ElementTree, there are other Python libraries that can be used to parse XML files.Here we show two of them, which are **`lxml`** and **`BeautifulSoup`**. 1.1 The lxml package[**`lxml`**](http://lxml.de) is an open source third-party library that builds on top of two C libraries libxml2 and libxslt.It is mostly compatible but superior to the well-known ElementTree API.To study **`lxml`** in detail, you should refer to:* [the lxml.etree tutorial](http://lxml.de/tutorial.html), a tutorial on XML processing with lxml.etree.* and [Going Further With lxml](http://www.diveintopython3.net/xml.htmlxml-lxml), Section 12.6 in Chapter 12 of "**Dive into Python 3**". 📖 Here we are going to briefly show you how to extract the text content of an element treeusing **XPath**.**XPath** allows you to extract the separate text chunks into a list:from lxml import etree ltree = etree.parse("Melbourne_bike_share.xml") for el in ltree.xpath('descendant-or-self::text()'): print (el)In the xpath() function,the descendant-or-self:: is an axis selector that limits the search to the context node, its children, their children, and so on out to the leaves of the tree. The text() function selects only text nodes, discarding any elements, comments, and other non-textual content. The return value is a list of strings.Read [XPath processing](http://infohost.nmt.edu/tcc/help/pubs/pylxml/web/xpath.html) 📖 for a short introductionto `xpath` and [W3C's website on Xpath](http://www.w3.org/TR/xpath/) for a detailed introduction to XPath.Note that lxml is significantly faster than the built-in ElementTree library on parsing large xml documents.If your XML files are very large, you should consider using lxml. 1.2 The Beautiful Soup Pacakge[Beautiful Soup](http://www.crummy.com/software/BeautifulSoup/) is an another Python library for pulling data out of HTML and XML files. It provides Pythonic idioms for iterating, searching, and modifying the parsed tree.We begin by reading in our XML file and creating a Beautiful Soup object with the BeautifulSoup function. In regard to the assessment, we suggest the use of beautiful soup.from bs4 import BeautifulSoup btree = BeautifulSoup(open("Melbourne_bike_share.xml"),"lxml-xml")There are two different ways of passing an XML document into the BeautifulSoup constructor.One is to pass in a string, another is to parse an open filehandle. the above example follows the second approach.The second argument is the parser to be used to parse the document.Beautiful Soup presents the same interface to a number of different parsers, but each parser is different. Different parsers will create different parsed trees from the same document.print(btree.prettify())The soup object contains all of the XML content in the original document.The XML tags contained in the angled brackets provide structural information (and sometimes formatting).If you were to take a moment to print out the parsed tree, you would find Beautiful Soup did a good job.It provides a structural representation of the original XML document. Now it is easy for you to eyeball the document and the tags or attributes containing the data we want. We will stop here and leave the extraction of the data with Beautiful Soup as a simple exercise for you.The documentation of how to use Beautiful Soup can be found [here](https://www.crummy.com/software/BeautifulSoup/bs4/doc/).* * * 2. Extracting XML data into DataFrameSo far we have loaded XML into an element tree and have also found all the tags that contain the data we want. We have worked with our XML file in a top-down fashion, starting with the root element, then getting its child elements, and so on. We have also gained a brief idea of **lxml** and **beautiful soup**.This section will show you how to extract the data from all the tags and put it into Pandas DataFrame, a commonand standard storage structure we used in the previous chapter. This structure will also be used in the following chapters. Before we walk through the extracting process, please read: * [Searching For Nodes Within An XML Document](http://www.diveintopython3.net/xml.htmlxml-find) Section 12.5 in Chapter 12 of "**Dive into Python 3**". 📖 Let's first just look at one tag, i.e., '*featurename*'.Since we don't know where it is, the code should loop over all the elements in the tree.To produce a simple list of the featurenames, the logic could be simplified using `findall()` to look for all the elements with tag name '*featurename*'.Both the ElementTree and the Element classes implement `findall(match)` function.The one implemented by the ElementTree class finds all the matched subelements starting from root.The other implemented by the Element finds those sub-elements starting from a given Element in the tree.All the matched elements returned by the function are stored in a list.The `match` argument should take values on either tag names or paths to specific tags.Try ```python tree.findall('featurename')```and ```python tree.findall('row/featurename')```What did you get?The '*featurename*' tag is not the child or grandchild of the root element.In order to get all the '*featurename*', we should first figure out the path from the root to the '*featurename*' tag.By looking at the original file or basing on what we learnt from the previous section, we know the path is```html row/row/featurename```Thus,elements = tree.findall('row/row/featurename') elementsThe above list should contain 50 Elements corresponding to '*featurename*'.As you may notice, the items returned by findall() are Element objects, each representing a node in theXML parse tree. What we want is the data stored in those objects.To pull out the data, we can access the element properpties: tag, attrib and text.featurename = [elem.text for elem in elements] featurenameYou might wonder whether there is another way to extract the text stored in the '*featurename*' tag.It might be possible that the structure of an XML file is quite complex (more complex that our example XML file) and it is not easy to figure out the path. There are other ways to search for descendant elements, i.e., children, grandchildrens, and any element at any nesting level. Using the same function, findall(), we can construct an XPath argument to look for all'*featurename*' elements.tree.findall('.//featurename')It is very similar to the previous example, except for the two forward slashes at the beginning of the query.The two forward slashes are short for /descendant-or-self::node()/. Here .//featurename selects any 'featurename' element in the XML document. Similarly, we can extract the text with Element.text.Remember that to visit the elements in the XML document in order, you can use iter() to create an iterator that iterates over all the ElementTree instances in a tree.We have shown you how to explore the element hierarchy with this iteration fucntion.Here you are going to learn how to find specifc elements.[ElementTree's API](https://docs.python.org/2/library/xml.etree.elementtree.htmlxml.etree.ElementTree.Element.findall)shows that iter() function can take an argument tag.If the tag is specified, the iterator loops over all elements in the tree and returns a list of elements having the specified tag.featurename = [] for elem in tree.iter(tag = 'featurename'): featurename.append(elem.text) featurenameThe code pulls out data from all elements with a tag equal to '*featurename*', and stores the text in a list.Similarly, you can retrieve data from elements having the following tags: 'id', 'terminalname', 'nbbikes','nbemptydoc', and 'uploaddate' as follows. Note that we only print out the first 10 records of the retrieved data.id = [] for elem in tree.iter(tag='id'): id.append(elem.text) id[:10] terminalname = [] for elem in tree.iter(tag='terminalname'): terminalname.append(elem.text) terminalname[:10] nbbikes = [] for elem in tree.iter(tag='nbbikes'): nbbikes.append(elem.text) nbbikes[:10] nbemptydoc = [] for elem in tree.iter(tag='nbemptydoc'): nbemptydoc.append(elem.text) nbemptydoc[:10] uploaddate = [] for elem in tree.iter(tag='uploaddate'): uploaddate.append(elem.text) uploaddate[:10]As mentioned in the introduction section, latitudes and longitudesare stored as attributes in 'coordinates' elements. Extracting them needs to access specific attributes that correspondsto latitude and longitude.Recall that attributes are dictionaries. To extract a specific attribute value, you can use the square brackets along with the attribute name as the key to obtainits value.Let's first extract all the latitudes and longitudes and store them in two lists,"lat" and "lon" respectively.lat = [] lon = [] for elem in tree.iter(tag='coordinates'): lat.append(elem.attrib['latitude']) lon.append(elem.attrib['longitude']) print (lat[0:10]) print (lon[0:10])The last step is to store the extracted data into Pandas DataFrame.There are multiple ways of constructing a DataFrame object. Here you are going to generate a DataFrame by passing a Python dictionary to DataFrame's constructorand setting the index to IDs.import pandas as pd dataDict = {} dataDict['Featurename'] = featurename dataDict['TerminalName'] = terminalname dataDict['NBBikes'] = nbbikes dataDict['NBEmptydoc'] = nbemptydoc dataDict['UploadDate'] = uploaddate dataDict['lat'] = lat dataDict['lon'] = lon df = pd.DataFrame(dataDict, index = id) df.index.name = 'ID' df.head()0.0. IMPORTSimport math import numpy as np import pandas as pd import inflection import seaborn as sns from scipy import stats as ss from matplotlib import pyplot as plt from IPython.display import Image from IPython.core.display import HTML0.1. Helper Functionsdef cramer_v( x, y ): cm = pd.crosstab( x, y ).to_numpy() n = cm.sum() r, k = cm.shape chi2 = ss.chi2_contingency( cm )[0] chi2corr = max( 0, chi2 - (k-1)*(r-1)/(n-1) ) kcorr = k - (k-1)**2/(n-1) rcorr = r - (r-1)**2/(n-1) return np.sqrt( (chi2corr/n) / ( min( kcorr-1, rcorr-1 ) ) ) def jupyter_settings(): %matplotlib inline %pylab inline plt.style.use( 'bmh' ) plt.rcParams['figure.figsize'] = [25, 12] plt.rcParams['font.size'] = 24 display( HTML( '') ) pd.options.display.max_columns = None pd.options.display.max_rows = None pd.set_option( 'display.expand_frame_repr', False ) sns.set() jupyter_settings()0.2. Loading datadf_sales_raw = pd.read_csv( '../input/datasetsprod/Data/train.csv', low_memory=False ) df_store_raw = pd.read_csv( '../input/datasetsprod/Data/store.csv', low_memory=False ) # merge df_raw = pd.merge( df_sales_raw, df_store_raw, how='left', on='Store' )1.0. PASSO 01 - DESCRICAO DOS DADOSdf1 = df_raw.copy()1.1. Rename Columnscols_old = ['Store', 'DayOfWeek', 'Date', 'Sales', 'Customers', 'Open', 'Promo', 'StateHoliday', 'SchoolHoliday', 'StoreType', 'Assortment', 'CompetitionDistance', 'CompetitionOpenSinceMonth', 'CompetitionOpenSinceYear', 'Promo2', 'Promo2SinceWeek', 'Promo2SinceYear', 'PromoInterval'] snakecase = lambda x: inflection.underscore( x ) cols_new = list( map( snakecase, cols_old ) ) # rename df1.columns = cols_new1.2. Data Dimensionsprint( 'Number of Rows: {}'.format( df1.shape[0] ) ) print( 'Number of Cols: {}'.format( df1.shape[1] ) )1.3. Data Typesdf1['date'] = pd.to_datetime( df1['date'] ) df1.dtypes1.4. Check NAdf1.isna().sum()1.5. Fillout NAdf1.sample() #competition_distance df1['competition_distance'] = df1['competition_distance'].apply( lambda x: 200000.0 if math.isnan( x ) else x ) #competition_open_since_month df1['competition_open_since_month'] = df1.apply( lambda x: x['date'].month if math.isnan( x['competition_open_since_month'] ) else x['competition_open_since_month'], axis=1 ) #competition_open_since_year df1['competition_open_since_year'] = df1.apply( lambda x: x['date'].year if math.isnan( x['competition_open_since_year'] ) else x['competition_open_since_year'], axis=1 ) #promo2_since_week df1['promo2_since_week'] = df1.apply( lambda x: x['date'].week if math.isnan( x['promo2_since_week'] ) else x['promo2_since_week'], axis=1 ) #promo2_since_year df1['promo2_since_year'] = df1.apply( lambda x: x['date'].year if math.isnan( x['promo2_since_year'] ) else x['promo2_since_year'], axis=1 ) #promo_interval month_map = {1: 'Jan', 2: 'Fev', 3: 'Mar', 4: 'Apr', 5: 'May', 6: 'Jun', 7: 'Jul', 8: 'Aug', 9: 'Sep', 10: 'Oct', 11: 'Nov', 12: 'Dec'} df1['promo_interval'].fillna(0, inplace=True ) df1['month_map'] = df1['date'].dt.month.map( month_map ) df1['is_promo'] = df1[['promo_interval', 'month_map']].apply( lambda x: 0 if x['promo_interval'] == 0 else 1 if x['month_map'] in x['promo_interval'].split( ',' ) else 0, axis=1 ) df1.isna().sum()1.6. Change Data Types# competiton df1['competition_open_since_month'] = df1['competition_open_since_month'].astype( int ) df1['competition_open_since_year'] = df1['competition_open_since_year'].astype( int ) # promo2 df1['promo2_since_week'] = df1['promo2_since_week'].astype( int ) df1['promo2_since_year'] = df1['promo2_since_year'].astype( int )1.7. Descriptive Statisticsnum_attributes = df1.select_dtypes( include=['int64', 'float64'] ) cat_attributes = df1.select_dtypes( exclude=['int64', 'float64', 'datetime64[ns]'] )1.7.1. Numerical Atributes# Central Tendency - mean, meadina ct1 = pd.DataFrame( num_attributes.apply( np.mean ) ).T ct2 = pd.DataFrame( num_attributes.apply( np.median ) ).T # dispersion - std, min, max, range, skew, kurtosis d1 = pd.DataFrame( num_attributes.apply( np.std ) ).T d2 = pd.DataFrame( num_attributes.apply( min ) ).T d3 = pd.DataFrame( num_attributes.apply( max ) ).T d4 = pd.DataFrame( num_attributes.apply( lambda x: x.max() - x.min() ) ).T d5 = pd.DataFrame( num_attributes.apply( lambda x: x.skew() ) ).T d6 = pd.DataFrame( num_attributes.apply( lambda x: x.kurtosis() ) ).T # concatenar m = pd.concat( [d2, d3, d4, ct1, ct2, d1, d5, d6] ).T.reset_index() m.columns = ['attributes', 'min', 'max', 'range', 'mean', 'median', 'std', 'skew', 'kurtosis'] m sns.distplot( df1['competition_distance'], kde=False )1.7.2. Categorical Atributescat_attributes.apply( lambda x: x.unique().shape[0] ) aux = df1[(df1['state_holiday'] != '0') & (df1['sales'] > 0)] plt.subplot( 1, 3, 1 ) sns.boxplot( x='state_holiday', y='sales', data=aux ) plt.subplot( 1, 3, 2 ) sns.boxplot( x='store_type', y='sales', data=aux ) plt.subplot( 1, 3, 3 ) sns.boxplot( x='assortment', y='sales', data=aux )2.0. PASSO 02 - FEATURE ENGINEERINGdf2 = df1.copy()2.1. Mapa Mental de HipotesesImage( 'img/MindMapHypothesis.png' )2.2. Criacao das Hipoteses 2.2.1. Hipoteses Loja **1.** Lojas com número maior de funcionários deveriam vender mais.**2.** Lojas com maior capacidade de estoque deveriam vender mais.**3.** Lojas com maior porte deveriam vender mais.**4.** Lojas com maior sortimentos deveriam vender mais.**5.** Lojas com competidores mais próximos deveriam vender menos.**6.** Lojas com competidores à mais tempo deveriam vendem mais. 2.2.2. Hipoteses Produto **1.** Lojas que investem mais em Marketing deveriam vender mais.**2.** Lojas com maior exposição de produto deveriam vender mais.**3.** Lojas com produtos com preço menor deveriam vender mais.**5.** Lojas com promoções mais agressivas ( descontos maiores ), deveriam vender mais.**6.** Lojas com promoções ativas por mais tempo deveriam vender mais.**7.** Lojas com mais dias de promoção deveriam vender mais.**8.** Lojas com mais promoções consecutivas deveriam vender mais. 2.2.3. Hipoteses Tempo **1.** Lojas abertas durante o feriado de Natal deveriam vender mais.**2.** Lojas deveriam vender mais ao longo dos anos.**3.** Lojas deveriam vender mais no segundo semestre do ano.**4.** Lojas deveriam vender mais depois do dia 10 de cada mês.**5.** Lojas deveriam vender menos aos finais de semana.**6.** Lojas deveriam vender menos durante os feriados escolares. 2.3. Lista Final de Hipóteses **1.** Lojas com maior sortimentos deveriam vender mais.**2.** Lojas com competidores mais próximos deveriam vender menos.**3.** Lojas com competidores à mais tempo deveriam vendem mais. **4.** Lojas com promoções ativas por mais tempo deveriam vender mais.**5.** Lojas com mais dias de promoção deveriam vender mais.**7.** Lojas com mais promoções consecutivas deveriam vender mais. **8.** Lojas abertas durante o feriado de Natal deveriam vender mais.**9.** Lojas deveriam vender mais ao longo dos anos.**10.** Lojas deveriam vender mais no segundo semestre do ano.**11.** Lojas deveriam vender mais depois do dia 10 de cada mês.**12.** Lojas deveriam vender menos aos finais de semana.**13.** Lojas deveriam vender menos durante os feriados escolares. 2.4. Feature Engineering# year df2['year'] = df2['date'].dt.year # month df2['month'] = df2['date'].dt.month # day df2['day'] = df2['date'].dt.day # week of year df2['week_of_year'] = df2['date'].dt.weekofyear # year week df2['year_week'] = df2['date'].dt.strftime( '%Y-%W' ) # competition since df2['competition_since'] = df2.apply( lambda x: datetime.datetime( year=x['competition_open_since_year'], month=x['competition_open_since_month'],day=1 ), axis=1 ) df2['competition_time_month'] = ( ( df2['date'] - df2['competition_since'] )/30 ).apply( lambda x: x.days ).astype( int ) # promo since df2['promo_since'] = df2['promo2_since_year'].astype( str ) + '-' + df2['promo2_since_week'].astype( str ) df2['promo_since'] = df2['promo_since'].apply( lambda x: datetime.datetime.strptime( x + '-1', '%Y-%W-%w' ) - datetime.timedelta( days=7 ) ) df2['promo_time_week'] = ( ( df2['date'] - df2['promo_since'] )/7 ).apply( lambda x: x.days ).astype( int ) # assortment df2['assortment'] = df2['assortment'].apply( lambda x: 'basic' if x == 'a' else 'extra' if x == 'b' else 'extended' ) # state holiday df2['state_holiday'] = df2['state_holiday'].apply( lambda x: 'public_holiday' if x == 'a' else 'easter_holiday' if x == 'b' else 'christmas' if x == 'c' else 'regular_day' )3.0. PASSO 03 - FILTRAGEM DE VARIÁVEISdf3 = df2.copy()3.1. Filtragem das Linhasdf3 = df3[(df3['open'] != 0) & (df3['sales'] > 0)]3.2. Selecao das Colunascols_drop = ['customers', 'open', 'promo_interval', 'month_map'] df3 = df3.drop( cols_drop, axis=1 )4.0. PASSO 04 - ANALISE EXPLORATORIA DOS DADOSdf4 = df3.copy()4.1. Analise Univariada 4.1.1. Response Variablesns.distplot( df4['sales'], kde=False )4.1.2. Numerical Variablenum_attributes.hist( bins=25 );4.1.3. Categorical Variable# state_holiday plt.subplot( 3, 2, 1 ) a = df4[df4['state_holiday'] != 'regular_day'] sns.countplot( a['state_holiday'] ) plt.subplot( 3, 2, 2 ) sns.kdeplot( df4[df4['state_holiday'] == 'public_holiday']['sales'], label='public_holiday', shade=True ) sns.kdeplot( df4[df4['state_holiday'] == 'easter_holiday']['sales'], label='easter_holiday', shade=True ) sns.kdeplot( df4[df4['state_holiday'] == 'christmas']['sales'], label='christmas', shade=True ) # store_type plt.subplot( 3, 2, 3 ) sns.countplot( df4['store_type'] ) plt.subplot( 3, 2, 4 ) sns.kdeplot( df4[df4['store_type'] == 'a']['sales'], label='a', shade=True ) sns.kdeplot( df4[df4['store_type'] == 'b']['sales'], label='b', shade=True ) sns.kdeplot( df4[df4['store_type'] == 'c']['sales'], label='c', shade=True ) sns.kdeplot( df4[df4['store_type'] == 'd']['sales'], label='d', shade=True ) # assortment plt.subplot( 3, 2, 5 ) sns.countplot( df4['assortment'] ) plt.subplot( 3, 2, 6 ) sns.kdeplot( df4[df4['assortment'] == 'extended']['sales'], label='extended', shade=True ) sns.kdeplot( df4[df4['assortment'] == 'basic']['sales'], label='basic', shade=True ) sns.kdeplot( df4[df4['assortment'] == 'extra']['sales'], label='extra', shade=True )4.2. Analise Bivariada **H1.** Lojas com maior sortimentos deveriam vender mais.**FALSA** Lojas com MAIOR SORTIMENTO vendem MENOS.aux1 = df4[['assortment', 'sales']].groupby( 'assortment' ).sum().reset_index() sns.barplot( x='assortment', y='sales', data=aux1 ); aux2 = df4[['year_week', 'assortment', 'sales']].groupby( ['year_week','assortment'] ).sum().reset_index() aux2.pivot( index='year_week', columns='assortment', values='sales' ).plot() aux3 = aux2[aux2['assortment'] == 'extra'] aux3.pivot( index='year_week', columns='assortment', values='sales' ).plot()**H2.** Lojas com competidores mais próximos deveriam vender menos.**FALSA** Lojas com COMPETIDORES MAIS PROXIMOS vendem MAIS.aux1 = df4[['competition_distance', 'sales']].groupby( 'competition_distance' ).sum().reset_index() plt.subplot( 1, 3, 1 ) sns.scatterplot( x ='competition_distance', y='sales', data=aux1 ); plt.subplot( 1, 3, 2 ) bins = list( np.arange( 0, 20000, 1000) ) aux1['competition_distance_binned'] = pd.cut( aux1['competition_distance'], bins=bins ) aux2 = aux1[['competition_distance_binned', 'sales']].groupby( 'competition_distance_binned' ).sum().reset_index() sns.barplot( x='competition_distance_binned', y='sales', data=aux2 ); plt.xticks( rotation=90 ); plt.subplot( 1, 3, 3 ) x = sns.heatmap( aux1.corr( method='pearson' ), annot=True ); bottom, top = x.get_ylim() x.set_ylim( bottom+0.5, top-0.5 );**H3.** Lojas com competidores à mais tempo deveriam vendem mais.**FALSE** Lojas com COMPETIDORES À MAIS TEMPO vendem MENOS.plt.subplot( 1, 3, 1 ) aux1 = df4[['competition_time_month', 'sales']].groupby( 'competition_time_month' ).sum().reset_index() aux2 = aux1[( aux1['competition_time_month'] < 120 ) & ( aux1['competition_time_month'] != 0 )] sns.barplot( x='competition_time_month', y='sales', data=aux2 ); plt.xticks( rotation=90 ); plt.subplot( 1, 3, 2 ) sns.regplot( x='competition_time_month', y='sales', data=aux2 ); plt.subplot( 1, 3, 3 ) x = sns.heatmap( aux1.corr( method='pearson'), annot=True ); bottom, top = x.get_ylim() x.set_ylim( bottom+0.5, top-0.5);**H4.** Lojas com promoções ativas por mais tempo deveriam vender mais.**FALSA** Lojas com promocoes ativas por mais tempo vendem menos, depois de um certo periodo de promocaoaux1 = df4[['promo_time_week', 'sales']].groupby( 'promo_time_week').sum().reset_index() grid = GridSpec( 2, 3 ) plt.subplot( grid[0,0] ) aux2 = aux1[aux1['promo_time_week'] > 0] # promo extendido sns.barplot( x='promo_time_week', y='sales', data=aux2 ); plt.xticks( rotation=90 ); plt.subplot( grid[0,1] ) sns.regplot( x='promo_time_week', y='sales', data=aux2 ); plt.subplot( grid[1,0] ) aux3 = aux1[aux1['promo_time_week'] < 0] # promo regular sns.barplot( x='promo_time_week', y='sales', data=aux3 ); plt.xticks( rotation=90 ); plt.subplot( grid[1,1] ) sns.regplot( x='promo_time_week', y='sales', data=aux3 ); plt.subplot( grid[:,2] ) sns.heatmap( aux1.corr( method='pearson' ), annot=True );**H5.** Lojas com mais dias de promoção deveriam vender mais. **H7.** Lojas com mais promoções consecutivas deveriam vender mais.**FALSA** Lojas com mais promocoes consecutivas vendem menosdf4[['promo', 'promo2', 'sales']].groupby( ['promo', 'promo2'] ).sum().reset_index() aux1 = df4[( df4['promo'] == 1 ) & ( df4['promo2'] == 1 )][['year_week', 'sales']].groupby( 'year_week' ).sum().reset_index() ax = aux1.plot() aux2 = df4[( df4['promo'] == 1 ) & ( df4['promo2'] == 0 )][['year_week', 'sales']].groupby( 'year_week' ).sum().reset_index() aux2.plot( ax=ax ) ax.legend( labels=['Tradicional & Extendida', 'Extendida']);**H8.** Lojas abertas durante o feriado de Natal deveriam vender mais.**FALSA** Lojas abertas durante o feriado do Natal vendem menos.aux = df4[df4['state_holiday'] != 'regular_day'] plt.subplot( 1, 2, 1 ) aux1 = aux[['state_holiday', 'sales']].groupby( 'state_holiday' ).sum().reset_index() sns.barplot( x='state_holiday', y='sales', data=aux1 ); plt.subplot( 1, 2, 2 ) aux2 = aux[['year', 'state_holiday', 'sales']].groupby( ['year', 'state_holiday'] ).sum().reset_index() sns.barplot( x='year', y='sales', hue='state_holiday', data=aux2 );**H9.** Lojas deveriam vender mais ao longo dos anos.**FALSA** Lojas vendem menos ao longo dos anosaux1 = df4[['year', 'sales']].groupby( 'year' ).sum().reset_index() plt.subplot( 1, 3, 1 ) sns.barplot( x='year', y='sales', data=aux1 ); plt.subplot( 1, 3, 2 ) sns.regplot( x='year', y='sales', data=aux1 ); plt.subplot( 1, 3, 3 ) sns.heatmap( aux1.corr( method='pearson' ), annot=True );**H10.** Lojas deveriam vender mais no segundo semestre do ano.**FALSA** Lojas vendem menos no segundo semestre do anoaux1 = df4[['month', 'sales']].groupby( 'month' ).sum().reset_index() plt.subplot( 1, 3, 1 ) sns.barplot( x='month', y='sales', data=aux1 ); plt.subplot( 1, 3, 2 ) sns.regplot( x='month', y='sales', data=aux1 ); plt.subplot( 1, 3, 3 ) sns.heatmap( aux1.corr( method='pearson' ), annot=True );**H11.** Lojas deveriam vender mais depois do dia 10 de cada mês.**VERDADEIRA** Lojas vendem mais depois do dia 10 de cada mes.aux1 = df4[['day', 'sales']].groupby( 'day' ).sum().reset_index() plt.subplot( 2, 2, 1 ) sns.barplot( x='day', y='sales', data=aux1 ); plt.subplot( 2, 2, 2 ) sns.regplot( x='day', y='sales', data=aux1 ); plt.subplot( 2, 2, 3 ) sns.heatmap( aux1.corr( method='pearson' ), annot=True ); aux1['before_after'] = aux1['day'].apply( lambda x: 'before_10_days' if x <= 10 else 'after_10_days' ) aux2 =aux1[['before_after', 'sales']].groupby( 'before_after' ).sum().reset_index() plt.subplot( 2, 2, 4 ) sns.barplot( x='before_after', y='sales', data=aux2 );**H12.** Lojas deveriam vender menos aos finais de semana.**VERDADEIRA** Lojas vendem menos nos final de semanaaux1 = df4[['day_of_week', 'sales']].groupby( 'day_of_week' ).sum().reset_index() plt.subplot( 1, 3, 1 ) sns.barplot( x='day_of_week', y='sales', data=aux1 ); plt.subplot( 1, 3, 2 ) sns.regplot( x='day_of_week', y='sales', data=aux1 ); plt.subplot( 1, 3, 3 ) sns.heatmap( aux1.corr( method='pearson' ), annot=True );**H13.** Lojas deveriam vender menos durante os feriados escolares.**VERDADEIRA** Lojas vendem menos durante os feriadso escolares, except os meses de Julho e Agosto.aux1 = df4[['school_holiday', 'sales']].groupby( 'school_holiday' ).sum().reset_index() plt.subplot( 2, 1, 1 ) sns.barplot( x='school_holiday', y='sales', data=aux1 ); aux2 = df4[['month', 'school_holiday', 'sales']].groupby( ['month','school_holiday'] ).sum().reset_index() plt.subplot( 2, 1, 2 ) sns.barplot( x='month', y='sales', hue='school_holiday', data=aux2 );4.2.1. Resumo das Hipotesesfrom tabulate import tabulate tab =[['Hipoteses', 'Conclusao', 'Relevancia'], ['H1', 'Falsa', 'Baixa'], ['H2', 'Falsa', 'Media'], ['H3', 'Falsa', 'Media'], ['H4', 'Falsa', 'Baixa'], ['H5', '-', '-'], ['H7', 'Falsa', 'Baixa'], ['H8', 'Falsa', 'Media'], ['H9', 'Falsa', 'Alta'], ['H10', 'Falsa', 'Alta'], ['H11', 'Verdadeira', 'Alta'], ['H12', 'Verdadeira', 'Alta'], ['H13', 'Verdadeira', 'Baixa'], ] print( tabulate( tab, headers='firstrow' ) )4.3. Analise Multivariada 4.3.1. Numerical Attributescorrelation = num_attributes.corr( method='pearson' ) sns.heatmap( correlation, annot=True );4.3.2. Categorical Attributes# only categorical data a = df4.select_dtypes( include='object' ) # Calculate cramer V a1 = cramer_v( a['state_holiday'], a['state_holiday'] ) a2 = cramer_v( a['state_holiday'], a['store_type'] ) a3 = cramer_v( a['state_holiday'], a['assortment'] ) a4 = cramer_v( a['store_type'], a['state_holiday'] ) a5 = cramer_v( a['store_type'], a['store_type'] ) a6 = cramer_v( a['store_type'], a['assortment'] ) a7 = cramer_v( a['assortment'], a['state_holiday'] ) a8 = cramer_v( a['assortment'], a['store_type'] ) a9 = cramer_v( a['assortment'], a['assortment'] ) # Final dataset d = pd.DataFrame( {'state_holiday': [a1, a2, a3], 'store_type': [a4, a5, a6], 'assortment': [a7, a8, a9] } ) d = d.set_index( d.columns ) sns.heatmap( d, annot=True )**Sources**https://towardsai.net/p/data-mining/text-mining-in-python-steps-and-examples-78b3f8fd913bhttps://www.datacamp.com/community/tutorials/text-analytics-beginners-nltk# Importing necessary library import pandas as pd import numpy as np import nltk import os import nltk.corpus nltk.download('punkt') # needed for tokenization nltk.download('stopwords') nltk.download('averaged_perceptron_tagger') # needed for part of speech tagging nltk.download('maxent_ne_chunker') # for named entity recognition nltk.download('words') # for NER[nltk_data] Downloading package punkt to /root/nltk_data... [nltk_data] Package punkt is already up-to-date! [nltk_data] Downloading package stopwords to /root/nltk_data... [nltk_data] Package stopwords is already up-to-date! [nltk_data] Downloading package averaged_perceptron_tagger to [nltk_data] /root/nltk_data... [nltk_data] Package averaged_perceptron_tagger is already up-to- [nltk_data] date! [nltk_data] Downloading package maxent_ne_chunker to [nltk_data] /root/nltk_data... [nltk_data] Package maxent_ne_chunker is already up-to-date! [nltk_data] Downloading package words to /root/nltk_data... [nltk_data] Package words is already up-to-date!**Word tokenizing**# importing word_tokenize from nltk from nltk.tokenize import word_tokenize text = "In Brazil they drive on the right-hand side of the road. Brazil has a large coastline on the eastern side of South America" # Passing the string text into word tokenize for breaking the sentences token = word_tokenize(text) token**Sentence Tokenizing**from nltk.tokenize import sent_tokenize text="""Hello , how are you doing today? The weather is great, and city is awesome. The sky is pinkish-blue. You shouldn't eat cardboard""" tokenized_text=sent_tokenize(text) print(tokenized_text)['Hello , how are you doing today?', 'The weather is great, and city is awesome.', 'The sky is pinkish-blue.', "You shouldn't eat cardboard"]**Frequencies**# finding the frequency distinct in the tokens # Importing FreqDist library from nltk and passing token into FreqDist from nltk.probability import FreqDist fdist = FreqDist(token) fdist # To find the frequency of top 10 words fdist1 = fdist.most_common(10) fdist1 # Frequency Distribution Plot import matplotlib.pyplot as plt fdist.plot(30,cumulative=False) plt.show()**Stemming**# Importing Porterstemmer from nltk library # Checking for the word ‘giving’ from nltk.stem import PorterStemmer pst = PorterStemmer() pst.stem("waiting") # Checking for the list of words stm = ["waited", "waiting", "waits"] for word in stm : print(word+ ":" +pst.stem(word)) # Importing LancasterStemmer from nltk, more agressive than porter from nltk.stem import LancasterStemmer lst = LancasterStemmer() stm = ["giving", "given", "gave"] for word in stm : print(word+ ":" +lst.stem(word))giving:giv given:giv gave:gav**Lemmatization**: In simpler terms, it is the process of converting a word to its base form. The difference between stemming and lemmatization is, lemmatization considers the context and converts the word to its meaningful base form, whereas stemming just removes the last few characters, often leading to incorrect meanings and spelling errors.# Importing Lemmatizer library from nltk from nltk.stem import WordNetLemmatizer # nltk.download('wordnet') lemmatizer = WordNetLemmatizer() print("rocks :", lemmatizer.lemmatize("rocks")) print("corpora :", lemmatizer.lemmatize("corpora"))rocks : rock corpora : corpus**Stop Words**# importing stopwors from nltk library from nltk import word_tokenize from nltk.corpus import stopwords stop_words = set(stopwords.words('english')) text = " was born on February 5, 1985, in Funchal, Madeira, Portugal." tokenized_sent = word_tokenize(text.lower()) print(tokenized_sent) filtered_sent = [x for x in tokenized_sent if x not in stop_words] print(filtered_sent) # another way to do it filtered_sent=[] for w in tokenized_sent: if w not in stop_words: filtered_sent.append(w) print("Tokenized Sentence:",tokenized_sent) print("Filterd Sentence:",filtered_sent)Tokenized Sentence: ['cristiano', 'ronaldo', 'was', 'born', 'on', 'february', '5', ',', '1985', ',', 'in', 'funchal', ',', 'madeira', ',', 'portugal', '.'] Filterd Sentence: ['cristiano', 'ronaldo', 'born', 'february', '5', ',', '1985', ',', 'funchal', ',', 'madeira', ',', 'portugal', '.']**Part-of-speech tagging** is used to assign parts of speech to each word of a given text (such as nouns, verbs, pronouns, adverbs, conjunction, adjectives, interjection) based on its definition and its context. There are many tools available for POS taggers, and some of the widely used taggers are NLTK, Spacy, TextBlob, Standford CoreNLP, etc.text = "vote to choose a particular man or a group (party) to represent them in parliament" #Tokenize the text tex = word_tokenize(text) for token in tex: print(nltk.pos_tag([token]))[('vote', 'NN')] [('to', 'TO')] [('choose', 'NN')] [('a', 'DT')] [('particular', 'JJ')] [('man', 'NN')] [('or', 'CC')] [('a', 'DT')] [('group', 'NN')] [('(', '(')] [('party', 'NN')] [(')', ')')] [('to', 'TO')] [('represent', 'NN')] [('them', 'PRP')] [('in', 'IN')] [('parliament', 'NN')]**Named entity recognition**It is the process of detecting the named entities such as the person name, the location name, the company name, the quantities, and the monetary value.text = "Google's CEO Sundar Pichai introduced the new Pixel at Minnesota Roi Centre Event" #importing chunk library from nltk from nltk import ne_chunk# tokenize and POS Tagging before doing chunk token = word_tokenize(text) tags = nltk.pos_tag(token) chunk = ne_chunk(tags) print(chunk)(S (GPE Google/NNP) 's/POS (ORGANIZATION CEO/NNP Sundar/NNP Pichai/NNP) introduced/VBD the/DT new/JJ Pixel/NNP at/IN (ORGANIZATION Minnesota/NNP Roi/NNP Centre/NNP) Event/NNP)**Chunking**Chunking means picking up individual pieces of information and grouping them into bigger pieces. In the context of NLP and text mining, chunking means a grouping of words or tokens into chunks.text = "We saw the yellow dog" token = word_tokenize(text) tags = nltk.pos_tag(token) reg = "NP: {
?*}" a = nltk.RegexpParser(reg) result = a.parse(tags) print(result)(S We/PRP saw/VBD (NP the/DT yellow/JJ dog/NN))Import librariesfrom scraper import * import pandas as pd import isthmuslib as isli from tqdm.auto import tqdm import pathlib import numpy as npSetup paramscache_path: str = str(pathlib.Path.cwd() / '..' / 'data' / '2021_12_29_data.pickle') style: isli.Style = isli.Style(watermark_text=" Isthmus - github.com/mitchellpkt/firo-forensics", watermark_fontsize=12, grid=False)Initialize - load from cache or scrapeif cache_path: df: pd.DataFrame = pd.read_pickle(cache_path).drop(columns='height') else: for height in tqdm(range(437410, 438059)): df = df.append(get_metadata(height), ignore_index=True) df: pd.DataFrame = pd.DataFrame({'height': []}) df.to_pickle(cache_path) df.head() # See the result df.describe()Visualizationsmarker_line = lambda: isli.plt.axvline(437916, color='b', linestyle=':') title: str = f'Firo on-chain data \n{isli.human_time(min(df.timestamp_unix), formatter="%Y-%m-%d %H:%M", include_timezone=False)} - {isli.human_time(max(df.timestamp_unix), formatter="%Y-%m-%d %H:%M")}\n heights: {min(df.block_height):.0f} - {max(df.block_height):.0f}' isli.scatter(df, 'block_height', 'timestamp_unix', xlabel='block_height', ylabel='timestamp', title=f"{title}\n", style=style, legend_strings=[]); marker_line(); isli.hist((diffs := [(x[0] - x[1]) / 60 for x in zip(df.timestamp_unix[1:], df.timestamp_unix)]), title=title, bins=40, xlabel='block discovery time (min)', style=style.override({'watermark_placement': (-0.05, 0.95)})); isli.scatter(df.timestamp_unix[1:], diffs, ylabel='block discovery time (min)', xlabel='timestamp', title=title, style=style.override({'watermark_placement': (-0.05, 0.025)}), log_axes='y', );From node records Import and process the node recordsthreshold = 500_000 df_node_raw: pd.DataFrame = pd.read_csv( pathlib.Path.cwd() / '..' / 'data_local_only' / 'debug-logs' / 'debug01_blocks.csv') df_node: pd.DataFrame = df_node_raw.loc[df_node_raw.loc[:, 'height'] < threshold, :] df_node.sort_values(by='height', ascending=True, inplace=True) df_node.drop_duplicates(inplace=True) df_node.reset_index(inplace=True, drop=True) df_node['timestamp'] = [isli.machine_time(x) for x in df_node.timestamp_human] df_node/home/mglrnd/Projects/GitHub/firo-forensics/env/lib/python3.8/site-packages/pandas/util/_decorators.py:311: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy return func(*args, **kwargs) /tmp/ipykernel_75997/3413099976.py:8: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy df_node['timestamp'] = [isli.machine_time(x) for x in df_node.timestamp_human]Peek at data qualityThese next few cells are boring, we just identify a duplicate record and remove itisli.basis_quality_plots(df_node.height) marker_line() df_node[df_node.height == 437844] df_node.drop(index=[df_node[df_node.height == 437844].index[0]], inplace=True) df_node.reset_index(inplace=True, drop=True) df_node isli.basis_quality_plots(df_node.height);Review the data from the nodeisli.scatter(df_node, 'height', 'timestamp', legend_strings='', title='Logs from node 1'); marker_line();Join the node data to the chain metadatadf['height'] = df.block_height.astype(int) dfj: pd.DataFrame = pd.DataFrame() for h in df.height: if h in list(df_node.height): try: dfj = dfj.append({ 'height': h, 'timestamp_node': int(df_node[df_node['height'] == h].timestamp) - 7 * 60 * 60, # time zone adjustment 'timestamp_block': int(df[df['block_height'] == h].timestamp_unix)}, ignore_index=True) except Exception as e: print(f"Exception: {e}") dfj['diff'] = dfj['timestamp_node'] - dfj['timestamp_block'] dfjCompare the timestampsisli.scatter(dfj, 'timestamp_node', 'timestamp_block', title=( t := f'Comparing block header timestamp to node records\n{isli.human_time(min(dfj.timestamp_node), formatter="%Y-%m-%d %H:%M", include_timezone=False)} - {isli.human_time(max(dfj.timestamp_node), formatter="%Y-%m-%d %H:%M")}\n')); isli.plt.axvline(x=isli.machine_time('2021-12-30T00:10:00Z'), color='b', linestyle=':'); isli.hist(dfj['diff'], xlabel='(node receipt timestamp) - (block timestamp)', title=t, bins=100);Part 2: Checkouts, Branching, & MergingThis section deals with navigating repository history, creating & mergingbranches, and understanding conflicts The Hangar WorkflowThe hangar workflow is intended to mimic common ``git`` workflows in which smallincremental changes are made and committed on dedicated ``topic`` branches.After the ``topic`` has been adequatly set, ``topic`` branch is ``merged`` intoa separate branch (commonly referred to as ``master``, though it need not be theactual branch named ``"master"``), where well vetted and more permanent changesare kept. Create Branch -> Checkout Branch -> Make Changes -> Commit Making the Initial CommitLet's initialize a new repository and see how branching works in Hangarfrom hangar import Repository import numpy as np repo = Repository(path='foo/pth') repo_pth = repo.init(user_name='Test User', user_email='')When a repository is first initialized, it has no history, no commits.repo.log() # -> returns NoneThough the repository is essentially empty at this point in time, there is onething which is present: A branch with the name: ``"master"``.repo.list_branches()This ``"master"`` is the branch we make our first commit on; until we do, therepository is in a semi-unstable state; with no history or contents, most of thefunctionality of a repository (to store, retrieve, and work with versions ofdata across time) just isn't possible. A significant potion of otherwisestandard operations will generally flat out refuse to to execute (ie. read-onlycheckouts, log, push, etc.) until the first commit is made.One of the only options available at this point in time is to create awrite-enabled checkout on the ``"master"`` branch and begin to add data so wecan make a commit. let’s do that now:co = repo.checkout(write=True)As expected, there are no arraysets or metadata samples recorded in the checkout.print(f'number of metadata keys: {len(co.metadata)}') print(f'number of arraysets: {len(co.arraysets)}')number of metadata keys: 0 number of arraysets: 0Let’s add a dummy array just to put something in the repository history tocommit. We'll then close the checkout so we can explore some useful tools whichdepend on having at least on historical record (commit) in the repo.dummy = np.arange(10, dtype=np.uint16) aset = co.arraysets.init_arrayset(name='dummy_arrayset', prototype=dummy) aset['0'] = dummy initialCommitHash = co.commit('first commit with a single sample added to a dummy arrayset') co.close()If we check the history now, we can see our first commit hash, and that it is labeled with the branch name `"master"`repo.log()* 0fd892b7ce9d9d0150c68bb5483876d58c28cbf1 (master) : first commit with a single sample added to a dummy arraysetSo now our repository contains:- A commit: a fully independent description of the entire repository state as it existed at some point in time. A commit is identified by a `commit_hash`- A branch: a label pointing to a particular `commit` / `commit_hash`Once committed, it is not possible to remove, modify, or otherwise tamper withthe contents of a commit in any way. It is a permanent record, which Hangar hasno method to change once written to disk.In addition, as a ``commit_hash`` is not only calculated from the ``commit``\ ’scontents, but from the ``commit_hash`` of its parents (more on this to follow),knowing a single top-level ``commit_hash`` allows us to verify the integrity ofthe entire repository history. This fundamental behavior holds even in cases ofdisk-corruption or malicious use. Working with Checkouts & BranchesAs mentioned in the first tutorial, we work with the data in a repository thougha ``checkout``. There are two types of checkouts (each of which have differentuses and abilities):**Checking out a branch/commit for reading:** is the process of retrievingrecords describing repository state at some point in time, and setting up accessto the referenced data.- Any number of read checkout processes can operate on a repository (on any number of commits) at the same time.**Checking out a branch for writing:** is the process of setting up a (mutable)``staging area`` to temporarily gather record references / data before allchanges have been made and staging area contents are ``committed`` in a newpermanent record of history (a ``commit``)- Only one write-enabled checkout can ever be operating in a repository at a time- When initially creating the checkout, the ``staging area`` is not actually “empty”. Instead, it has the full contents of the last ``commit`` referenced by a branch’s ``HEAD``. These records can be removed/mutated/added to in any way to form the next ``commit``. The new ``commit`` retains a permanent reference identifying the previous ``HEAD`` ``commit`` was used as it’s base ``staging area``- On commit, the branch which was checked out has it’s ``HEAD`` pointer value updated to the new ``commit``\ ’s ``commit_hash``. A write-enabled checkout starting from the same branch will now use that ``commit``\ ’s record content as the base for it’s ``staging area``. Creating a branchA branch is an individual series of changes/commits which diverge from the mainhistory of the repository at some point in time. All changes made along a branchare completely isolated from those on other branches. After some point in time,changes made in a disparate branches can be unified through an automatic``merge`` process (described in detail later in this tutorial). In general, the``Hangar`` branching model is semantically identical ``Git``; Hangar branchesalso have the same lightweight and performant properties which make working with``Git`` branches so appealing.In hangar, branch must always have a ``name`` and a ``base_commit``. However, Ifno ``base_commit`` is specified, the current writer branch ``HEAD`` ``commit``is used as the ``base_commit`` hash for the branch automatically.branch_1 = repo.create_branch(name='testbranch') branch_1viewing the log, we see that a new branch named: `testbranch` is pointing to our initial commitprint(f'branch names: {repo.list_branches()} \n') repo.log()branch names: ['master', 'testbranch'] * 0fd892b7ce9d9d0150c68bb5483876d58c28cbf1 (master) (testbranch) : first commit with a single sample added to a dummy arraysetIf instead, we do actually specify the base commit (with a different branchname) we see we do actually get a third branch. pointing to the same commit as``"master"`` and ``"testbranch"``branch_2 = repo.create_branch(name='new', base_commit=initialCommitHash) branch_2 repo.log()* 0fd892b7ce9d9d0150c68bb5483876d58c28cbf1 (master) (new) (testbranch) : first commit with a single sample added to a dummy arraysetMaking changes on a branchLet’s make some changes on the ``"new"`` branch to see how things work. We cansee that the data we added previously is still here (``dummy`` arrayset containingone sample labeled ``0``)co = repo.checkout(write=True, branch='new') co.arraysets co.arraysets['dummy_arrayset'] co.arraysets['dummy_arrayset']['0']Let's add another sample to the `dummy_arrayset` called `1`arr = np.arange(10, dtype=np.uint16) # let's increment values so that `0` and `1` aren't set to the same thing arr += 1 co.arraysets['dummy_arrayset']['1'] = arrWe can see that in this checkout, there are indeed, two samples in the `dummy_arrayset`len(co.arraysets['dummy_arrayset'])That's all, let's commit this and be done with this branchco.commit('commit on `new` branch adding a sample to dummy_arrayset') co.close()How do changes appear when made on a branch?If we look at the log, we see that the branch we were on (`new`) is a commit ahead of `master` and `testbranch`repo.log()* 186f1ccae28ad8f58bcae95dd8c1115a3b0de9dd (new) : commit on `new` branch adding a sample to dummy_arrayset * 0fd892b7ce9d9d0150c68bb5483876d58c28cbf1 (master) (testbranch) : first commit with a single sample added to a dummy arraysetThe meaning is exactly what one would intuit. we made some changes, they werereflected on the ``new`` branch, but the ``master`` and ``testbranch`` brancheswere not impacted at all, nor were any of the commits! Merging (Part 1) Fast-Forward MergesSay we like the changes we made on the ``new`` branch so much that we want themto be included into our ``master`` branch! How do we make this happen for thisscenario??Well, the history between the ``HEAD`` of the ``"new"`` and the ``HEAD`` of the``"master"`` branch is perfectly linear. In fact, when we began making changeson ``"new"``, our staging area was *identical* to what the ``"master"`` ``HEAD``commit references are right now!If you’ll remember that a branch is just a pointer which assigns some ``name``to a ``commit_hash``, it becomes apparent that a merge in this case reallydoesn’t involve any work at all. With a linear history between ``"master"`` and``"new"``, any ``commits`` exsting along the path between the ``HEAD`` of``"new"`` and ``"master"`` are the only changes which are introduced, and we canbe sure that this is the only view of the data records which can exist!What this means in practice is that for this type of merge, we can just updatethe ``HEAD`` of ``"master"`` to point to the ``"HEAD"`` of ``"new"``, and themerge is complete.This situation is referred to as a **Fast Forward (FF) Merge**. A FF merge issafe to perform any time a linear history lies between the ``"HEAD"`` of some``topic`` and ``base`` branch, regardless of how many commits or changes whichwere introduced.For other situations, a more complicated **Three Way Merge** is required. Thismerge method will be explained a bit more later in this tutorialco = repo.checkout(write=True, branch='master')Performing the MergeIn practice, you’ll never need to know the details of the merge theory explainedabove (or even remember it exists). Hangar automatically figures out which mergealgorithms should be used and then performed whatever calculations are needed tocompute the results.As a user, merging in Hangar is a one-liner!co.merge(message='message for commit (not used for FF merge)', dev_branch='new')Let's check the log!repo.log() co.branch_name co.commit_hash co.arraysets['dummy_arrayset']As you can see, everything is as it should be!co.close()Making a changes to introduce diverged historiesLet’s now go back to our ``"testbranch"`` branch and make some changes there sowe can see what happens when changes don’t follow a linear history.co = repo.checkout(write=True, branch='testbranch') co.arraysets co.arraysets['dummy_arrayset']We will start by mutating sample `0` in `dummy_arrayset` to a different valuedummy_aset = co.arraysets['dummy_arrayset'] old_arr = dummy_aset['0'] new_arr = old_arr + 50 new_arr dummy_aset['0'] = new_arrlet’s make a commit here, then add some metadata and make a new commit (all onthe ``testbranch`` branch)co.commit('mutated sample `0` of `dummy_arrayset` to new value') repo.log() co.metadata['hello'] = 'world' co.commit('added hellow world metadata') co.close()Looking at our history how, we see that none of the original branches referenceour first commit anymorerepo.log()* 836ba8ff1fe552fb65944e2340b2a2ef2b2b62d4 (testbranch) : added hellow world metadata * 2fe5c53a899ba6accbe8c19debd9a489e3baeaed : mutated sample `0` of `dummy_arrayset` to new value * : first commit with a single sample added to a dummy arraysetWe can check the history of the ``"master"`` branch by specifying it asan argument to the ``log()`` methodrepo.log('master')* 1 (master) (new) : commit on `new` branch adding a sample to dummy_arrayset * : first commit with a single sample added to a dummy arraysetMerging (Part 2) Three Way MergeIf we now want to merge the changes on `"testbranch"` into `"master"`, we can't just follow a simple linear history; **the branches have diverged**. For this case, Hangar implements a **Three Way Merge** algorithm which does the following:- Find the most recent common ancestor `commit` present in both the `"testbranch"` and `"master"` branches- Compute what changed between the common ancestor and each branch's `HEAD` commit- Check if any of the changes conflict with eachother (more on this in a later tutorial)- If no conflicts are present, compute the results of the merge between the two sets of changes- Create a new `commit` containing the merge results reference both branch `HEAD`s as parents of the new `commit`, and update the `base` branch `HEAD` to that new `commit`'s `commit_hash`co = repo.checkout(write=True, branch='master')Once again, as a user, the details are completely irrelevant, and the operationoccurs from the same one-liner call we used before for the FF Merge.co.merge(message='merge of testbranch into master', dev_branch='testbranch')If we now look at the log, we see that this has a much different look thenbefore. The three way merge results in a history which references changes madein both diverged branches, and unifies them in a single ``commit``repo.log()* fd4a07ada0f138870924fc4ffee47839b77f1fbe (master) : merge of testbranch into master |\ | * 836ba8ff1fe552fb65944e2340b2a2ef2b2b62d4 (testbranch) : added hellow world metadata | * 2fe5c53a899ba6accbe8c19debd9a489e3baeaed : mutated sample `0` of `dummy_arrayset` to new value * | 186f1ccae28ad8f58bcae95dd8c1115a3b0de9dd (new) : commit on `new` branch adding a sample to dummy_arrayset |/ * 0fd892b7ce9d9d0150c68bb5483876d58c28cbf1 : first commit with a single sample added to a dummy arraysetManually inspecting the merge result to verify it matches our expectations``dummy_arrayset`` should contain two arrays, key ``1`` was set in the previouscommit originally made in ``"new"`` and merged into ``"master"``. Key ``0`` wasmutated in ``"testbranch"`` and unchanged in ``"master"``, so the update from``"testbranch"`` is kept.There should be one metadata sample with they key ``"hello"`` and the value``"world"``co.arraysets co.arraysets['dummy_arrayset'] co.arraysets['dummy_arrayset']['0'] co.arraysets['dummy_arrayset']['1'] co.metadata co.metadata['hello']**The Merge was a success!**co.close()ConflictsNow that we've seen merging in action, the next step is to talk about conflicts. How Are Conflicts Detected?Any merge conflicts can be identified and addressed ahead of running a ``merge``command by using the built in ``diff`` tools. When diffing commits, Hangar willprovide a list of conflicts which it identifies. In general these fall into 4categories:1. **Additions** in both branches which created new keys (samples / arraysets / metadata) with non-compatible values. For samples & metadata, the hash of the data is compared, for arraysets, the schema specification is checked for compatibility in a method custom to the internal workings of Hangar.2. **Removal** in ``Master Commit/Branch`` **& Mutation** in ``Dev Commit / Branch``. Applies for samples, arraysets, and metadata identically.3. **Mutation** in ``Dev Commit/Branch`` **& Removal** in ``Master Commit / Branch``. Applies for samples, arraysets, and metadata identically.4. **Mutations** on keys both branches to non-compatible values. For samples & metadata, the hash of the data is compared, for arraysets, the schema specification is checked for compatibility in a method custom to the internal workings of Hangar. Let's make a merge conflictTo force a conflict, we are going to checkout the ``"new"`` branch and set themetadata key ``"hello"`` to the value ``"foo conflict... BOO!"``. If we then tryto merge this into the ``"testbranch"`` branch (which set ``"hello"`` to a valueof ``"world"``) we see how hangar will identify the conflict and halt withoutmaking any changes.Automated conflict resolution will be introduced in a future version of Hangar,for now it is up to the user to manually resolve conflicts by making anynecessary changes in each branch before reattempting a merge operation.co = repo.checkout(write=True, branch='new') co.metadata['hello'] = 'foo conflict... BOO!' co.commit ('commit on new branch to hello metadata key so we can demonstrate a conflict') repo.log()* 0a0c4dbcfe63ce10fd2a87a98b785ce03099b09e (new) : commit on new branch to hello metadata key so we can demonstrate a conflict * 186f1ccae28ad8f58bcae95dd8c1115a3b0de9dd : commit on `new` branch adding a sample to dummy_arrayset * 0fd892b7ce9d9d0150c68bb5483876d58c28cbf1 : first commit with a single sample added to a dummy arrayset**When we attempt the merge, an exception is thrown telling us there is a conflict!**co.merge(message='this merge should not happen', dev_branch='testbranch')HANGAR VALUE ERROR:: Merge ABORTED with conflict: {'aset': ConflictRecords(t1=(), t21=(), t22=(), t3=(), conflict=False), 'meta': ConflictRecords(t1=(MetadataRecordKey(meta_name='hello'),), t21=(), t22=(), t3=(), conflict=True), 'sample': {'dummy_arrayset': ConflictRecords(t1=(), t21=(), t22=(), t3=(), conflict=False)}, 'conflict_found': True}Checking for ConflictsAlternatively, use the diff methods on a checkout to test for conflicts before attempting a merge.merge_results, conflicts_found = co.diff.branch('testbranch') conflicts_found conflicts_found['meta']The type codes for a `ConflictRecords` `namedtuple` such as the one we saw: ConflictRecords(t1=('hello',), t21=(), t22=(), t3=(), conflict=True) are as follow:- ``t1``: Addition of key in master AND dev with different values.- ``t21``: Removed key in master, mutated value in dev.- ``t22``: Removed key in dev, mutated value in master.- ``t3``: Mutated key in both master AND dev to different values.- ``conflict``: Bool indicating if any type of conflict is present. To resolve, remove the conflictdel co.metadata['hello'] co.metadata['resolved'] = 'conflict by removing hello key' co.commit('commit which removes conflicting metadata key') co.merge(message='this merge succeeds as it no longer has a conflict', dev_branch='testbranch')We can verify that history looks as we would expect via the log!repo.log()* b3b097d069f351e5b4688f1ebf30ae1a5aa94f4a (new) : this merge succeeds as it no longer has a conflict |\ * | 9af80ed5df5d893b5e918f1a060cce4c46d9ddec : commit which removes conflicting metadata key * | 0a0c4dbcfe63ce10fd2a87a98b785ce03099b09e : commit on new branch to hello metadata key so we can demonstrate a conflict | * 836ba8ff1fe552fb65944e2340b2a2ef2b2b62d4 (testbranch) : added hellow world metadata | * 2fe5c53a899ba6accbe8c19debd9a489e3baeaed : mutated sample `0` of `dummy_arrayset` to new value * | 186f1ccae28ad8f58bcae95dd8c1115a3b0de9dd : commit on `new` branch adding a sample to dummy_arrayset |/ * 0fd892b7ce9d9d0150c68bb5483876d58c28cbf1 : first commit with a single sample added to a dummy arraysetNote* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.# Dependencies and Setup import pandas as pd # File to Load (Remember to Change These) file_to_load = "Resources/purchase_data.csv" # Read Purchasing File and store into Pandas data frame purchase_data = pd.read_csv(file_to_load) purchase_data.head()Player Countplayers = len(purchase_data["SN"].value_counts()) total_players= {"Total Players": [players]} total_players_df = pd.DataFrame(total_players) total_players_df* Display the total number of players Purchasing Analysis (Total) * Run basic calculations to obtain number of unique items, average price, etc.* Create a summary data frame to hold the results* Optional: give the displayed data cleaner formatting* Display the summary data frame# Number of Unique Items unique_items = len(purchase_data["Item ID"].value_counts()) # Average Price avg_price = (purchase_data["Price"].mean()) # Number of Purchases purchases = (purchase_data["Price"].count()) # Total Revenue revenue = (purchase_data["Price"].sum()) # Make DataFrame purchase_totals = {"Number of Unique Items":[unique_items], "Average Price":[avg_price], "Number of Purchases":[purchases], "Total Revenue":[revenue]} purchase_totals_df = pd.DataFrame(purchase_totals) # Format purchase_totals_df["Average Price"] = purchase_totals_df["Average Price"].map("${:.2f}".format) purchase_totals_df["Total Revenue"] = purchase_totals_df["Total Revenue"].map("${:.2f}".format) purchase_totals_dfGender Demographics * Percentage and Count of Male Players* Percentage and Count of Female Players* Percentage and Count of Other / Non-Disclosedgender_dem = purchase_data.groupby(["Gender"]) gender_dem.count() # total counts by gender male = len(purchase_data.loc[purchase_data["Gender"]=="Male", :]) female =len(purchase_data.loc[purchase_data["Gender"]=="Female", :]) other_non= len(purchase_data.loc[purchase_data["Gender"]=="Other / Non-Disclosed", :]) # percents male_percent = (male/(male + female + other_non))*100 female_percent =(female/(male + female + other_non))*100 percent_other = (other_non/(male + female + other_non))*100 # Make DataFrame gender_dem_df = pd.DataFrame([[male, male_percent],[female, female_percent], [other_non, percent_other]], index=[ "Male", "Female","Other / Non-Disclosed"], columns=["Total Count","Percentage of Players"]) # Format gender_dem_df["Percentage of Players"]= gender_dem_df["Percentage of Players"].map("{:.2f}%".format) gender_dem_dfPurchasing Analysis (Gender) * Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. by gender* Create a summary data frame to hold the results* Optional: give the displayed data cleaner formatting* Display the summary data frame# Grouby gender_dem.count() # Average Purchase Price female_avg_price = purchase_data.loc[purchase_data["Gender"]=="Female", "Price"].mean() male_avg_price = purchase_data.loc[purchase_data["Gender"]=="Male", "Price"].mean() non_avg_price = purchase_data.loc[purchase_data["Gender"]=="Other / Non-Disclosed", "Price"].mean() # Total Purchase Value female_tot_val = purchase_data.loc[purchase_data["Gender"]=="Female", "Price"].sum() male_tot_val = purchase_data.loc[purchase_data["Gender"]=="Male", "Price"].sum() non_tot_val = purchase_data.loc[purchase_data["Gender"]=="Other / Non-Disclosed", "Price"].sum() # Average Purchase Total female_avg_tot_per = female_tot_val / female male_avg_tot_per = male_tot_val / male non_avg_tot_per = non_tot_val / other_non # Create DataFrame per_gender_data_df = pd.DataFrame( [[female, female_avg_price, female_tot_val, female_avg_tot_per], [male, male_avg_price, male_tot_val, male_avg_tot_per], [other_non, non_avg_price, non_tot_val, non_avg_tot_per]], index=["Female","Male", "Other / Non-Disclosed"], columns=["Purchase Count", "Average Purchase Price", "Total Purchase Value", "Avg Purchase Total Per Person"]) # Format dataframe columns per_gender_data_df["Average Purchase Price"] = per_gender_data_df["Average Purchase Price"].map("${:.2f}".format) per_gender_data_df["Total Purchase Value"] = per_gender_data_df["Total Purchase Value"].map("${:.2f}".format) per_gender_data_df["Avg Purchase Total Per Person"] = per_gender_data_df["Avg Purchase Total Per Person"].map("${:.2f}".format) per_gender_data_dfAge Demographics * Establish bins for ages* Categorize the existing players using the age bins. Hint: use pd.cut()* Calculate the numbers and percentages by age group* Create a summary data frame to hold the results* Optional: round the percentage column to two decimal points* Display Age Demographics Table# make bins for ages age_bins = [0, 9.90, 14.90, 19.90, 24.90, 29.90, 34.90, 39.90, 99999] group_names = ["<10", "10-14", "15-19", "20-24", "25-29", "30-34", "35-39", "40+"] purchase_data["Age"] = pd.cut(purchase_data["Age"], age_bins, labels=group_names) # Columns age_data = purchase_data.groupby(["Age"]) age_data.count() # Totals Counts by Groups under_10= len(purchase_data.loc[purchase_data["Age"]=="<10", :]) a10_14= len(purchase_data.loc[purchase_data["Age"]=="10-14", :]) a15_19= len(purchase_data.loc[purchase_data["Age"]=="15-19", :]) a20_24 = len(purchase_data.loc[purchase_data["Age"]=="20-24", :]) a25_29 = len(purchase_data.loc[purchase_data["Age"]=="25-29", :]) a30_34= len(purchase_data.loc[purchase_data["Age"]=="30-34", :]) a35_39 = len(purchase_data.loc[purchase_data["Age"]=="35-39", :]) over_40 = len(purchase_data.loc[purchase_data["Age"]=="40+", :]) # variable for total total_age = under_10+ a10_14+ a15_19+ a20_24+ a25_29+ a30_34+ a35_39+ over_40 # percentages by groups p_under10 = (under_10/total_age)*100 p10_14 = (a10_14/total_age)*100 p15_19 = (a15_19/total_age)*100 p20_24 = (a20_24/total_age)*100 p25_29 = (a25_29/total_age)*100 p30_34= (a30_34/total_age)*100 p35_39 = (a35_39/total_age)*100 p_over40 = (over_40/total_age)*100 # create DataFrame age_data_df = pd.DataFrame( [[under_10, p_under10],[a10_14,p10_14], [a15_19,p15_19], [a20_24, p20_24], [a25_29, p25_29],[a30_34, p30_34],[a35_39, p35_39], [over_40, p_over40]], index=["<10", "10-14", "15-19", "20-24", "25-29", "30-34", "35-39","40+"], columns=["Total Count", "Percentage of Players"]) # format age_data_df["Percentage of Players"]= age_data_df["Percentage of Players"].map("{:.2f}%".format) age_data_dfPurchasing Analysis (Age) * Bin the purchase_data data frame by age* Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. in the table below* Create a summary data frame to hold the results* Optional: give the displayed data cleaner formatting* Display the summary data frameage_data.count() # average purchase price u10_avg_price = purchase_data.loc[purchase_data["Age"]=="<10", "Price"].mean() age1014_avg_price = purchase_data.loc[purchase_data["Age"]=="10-14", "Price"].mean() age1519_avg_price = purchase_data.loc[purchase_data["Age"]=="15-19", "Price"].mean() age2024_avg_price = purchase_data.loc[purchase_data["Age"]=="20-24", "Price"].mean() age2529_avg_price = purchase_data.loc[purchase_data["Age"]=="25-29", "Price"].mean() age3034_avg_price = purchase_data.loc[purchase_data["Age"]=="30-34", "Price"].mean() age3539_avg_price = purchase_data.loc[purchase_data["Age"]=="35-39", "Price"].mean() age40_avg_price = purchase_data.loc[purchase_data["Age"]=="40+", "Price"].mean() # Total Purchase Values of each group u10_total=purchase_data.loc[purchase_data["Age"]=="<10", "Price"].sum() age1014_total = purchase_data.loc[purchase_data["Age"]=="10-14", "Price"].sum() age1519_total = purchase_data.loc[purchase_data["Age"]=="15-19", "Price"].sum() age2024_total = purchase_data.loc[purchase_data["Age"]=="20-24", "Price"].sum() age2529_total = purchase_data.loc[purchase_data["Age"]=="25-29", "Price"].sum() age3034_total = purchase_data.loc[purchase_data["Age"]=="30-34", "Price"].sum() age3539_total = purchase_data.loc[purchase_data["Age"]=="35-39", "Price"].sum() age40_total = purchase_data.loc[purchase_data["Age"]=="40+", "Price"].sum() # Average Total Purchase per u10_avg_tot = u10_total/ under_10 a1014_avg_tot = age1014_total/a10_14 a1519_avg_tot = age1519_total/a15_19 a2024_avg_tot = age2024_total/a20_24 a2529_avg_tot = age2529_total/a25_29 a3034_avg_tot = age3034_total/a30_34 a3539_avg_tot = age3539_total/a35_39 a40_avg_tot = age40_total/over_40 # Make DataFrame per_age_data_df= pd.DataFrame( [[under_10, u10_avg_price, u10_total, u10_avg_tot], [a10_14, age1014_avg_price, age1014_total, a1014_avg_tot], [a15_19, age1519_avg_price, age1519_total, a1519_avg_tot], [a20_24, age2024_avg_price, age2024_total, a2024_avg_tot], [a25_29, age2529_avg_price, age2529_total, a2024_avg_tot], [a30_34, age3034_avg_price, age3034_total, a3034_avg_tot], [over_40, age40_avg_price, age40_total, a40_avg_tot]], index=["<10", "10-14", "15-19", "20-24", "25-29", "30-34", "35-39", "40+"], columns= ["Purchase Count", "Average Purchase Price", "Total Purchase Value", "Avg Total Purchase per Person"]) # Format per_age_data_df["Average Purchase Price"] = per_age_data_df["Average Purchase Price"].map("${:.2f}".format) per_age_data_df["Total Purchase Value"] = per_age_data_df["Total Purchase Value"].map("${:.2f}".format) per_age_data_df["Avg Purchase Total Per Person"] = per_age_data_df["Avg Purchase Total Per Person"].map("${:.2f}".format) per_age_data_dfTop Spenders * Run basic calculations to obtain the results in the table below* Create a summary data frame to hold the results* Sort the total purchase value column in descending order* Optional: give the displayed data cleaner formatting* Display a preview of the summary data frametotal_purchase = purchase_data.groupby("SN")["Price"].sum() purchase_count = purchase_data.groupby("SN")["Price"].count() purchase_avg = purchase_data.groupby("SN")["Price"].mean() # sorting total purchase value spender_df = pd.DataFrame({"Purchase Count": purchase_count, "Average Purchase Price": purchase_avg, "Total Purchase Value": total_purchase}) # format spender_df["Average Purchase Price"] = spender_df["Average Purchase Price"].map("${:.2f}".format) spender_df["Total Purchase Value"] = spender_df["Total Purchase Value"].map("${:.2f}".format) spender_df.head()Most Popular Items * Retrieve the Item ID, Item Name, and Item Price columns* Group by Item ID and Item Name. Perform calculations to obtain purchase count, average item price, and total purchase value* Create a summary data frame to hold the results* Sort the purchase count column in descending order* Optional: give the displayed data cleaner formatting* Display a preview of the summary data framepop_tot = purchase_data.groupby("Item Name")["Price"].sum() pop_count = purchase_data.groupby("Item Name")["Price"].count() # mean pop_price = purchase_data.groupby("Item Name")["Price"].mean() # make DataFrame pop_df = pd.DataFrame({"Purchase Count": pop_count, "Avg Item Price": pop_price, "Total Purchase Value": pop_tot}) # sort by count pop_df.sort_values("Purchase Count", ascending = False, inplace=True) # format pop_df["Avg Item Price"] = pop_df["Avg Item Price"].map("${:.2f}".format) pop_df["Total Purchase Value"] = pop_df["Total Purchase Value"].map("${:.2f}".format) pop_df.head()Most Profitable Items * Sort the above table by total purchase value in descending order* Optional: give the displayed data cleaner formatting* Display a preview of the data frame# make new DataFrame prof_df = pd.DataFrame({"Purchase Count": pop_count, "Avg Item Price": pop_price, "Total Purchase Value": pop_tot}) # sort by total purchase value prof_df.sort_values("Total Purchase Value", ascending = False, inplace=True) # format prof_df["Avg Item Price"] = prof_df["Avg Item Price"].map("${:.2f}".format) prof_df["Total Purchase Value"] = prof_df["Total Purchase Value"].map("${:.2f}".format) prof_df.head()Grover's Algorithm In this section, we introduce Grover's algorithm and how it can be used to solve unstructured search problems. We then implement the quantum algorithm using Qiskit, and run on a simulator and device. Contents1. [Introduction](introduction)2. [Example: 2 Qubits](2qubits) 2.1 [Simulation](2qubits-simulation) 2.2 [Device](2qubits-device) 3. [Example: 3 Qubits](3qubits) 3.1 [Simulation](3qubits-simulation) 3.2 [Device](3qubits-device) 4. [Problems](problems)5. [References](references) 1. Introduction You have likely heard that one of the many advantages a quantum computer has over a classical computer is its superior speed searching databases. Grover's algorithm demonstrates this capability. This algorithm can speed up an unstructured search problem quadratically, but its uses extend beyond that; it can serve as a general trick or subroutine to obtain quadratic run time improvements for a variety of other algorithms. This is called the amplitude amplification trick. Unstructured SearchSuppose you are given a large list of $N$ items. Among these items there is one item with a unique property that we wish to locate; we will call this one the winner $w$. Think of each item in the list as a box of a particular color. Say all items in the list are gray except the winner $w$, which is pink.![image1](images/grover_search.png)To find the pink box -- the *marked item* -- using classical computation, one would have to check on average $N/2$ of these boxes, and in the worst case, all $N$ of them. On a quantum computer, however, we can find the marked item in roughly $\sqrt{N}$ steps with Grover's amplitude amplification trick. A quadratic speedup is indeed a substantial time-saver for finding marked items in long lists. Additionally, the algorithm does not use the list's internal structure, which makes it *generic;* this is why it immediately provides a quadratic quantum speed-up for many classical problems. OracleHow will the list items be provided to the quantum computer? A common way to encode such a list is in terms of a function $f$ which returns $f(x) = 0$ for all unmarked items $x$ and $f(w) = 1$ for the winner. To use a quantum computer for this problem, we must provide the items in superposition to this function, so we encode the function into a unitary matrix called an *oracle*. First we choose a binary encoding of the items $x, w \in \{0,1\}^n$ so that $N = 2^n$; now we can represent it in terms of qubits on a quantum computer. Then we define the oracle matrix $U_f$ to act on any of the simple, standard basis states $| x \rangle$ by $U_f | x \rangle = (-1)^{f(x)} | x \rangle.$We see that if $x$ is an unmarked item, the oracle does nothing to the state. However, when we apply the oracle to the basis state $| w \rangle$, it maps $U_f | w \rangle = -| w \rangle$. Geometrically, this unitary matrix corresponds to a reflection about the origin for the marked item in an $N = 2^n$ dimensional vector space. Amplitude AmplificationSo how does the algorithm work? Before looking at the list of items, we have no idea where the marked item is. Therefore, any guess of its location is as good as any other, which can be expressed in terms of auniform superposition: $|s \rangle = \frac{1}{\sqrt{N}} \sum_{x = 0}^{N -1} | x\rangle.$If at this point we were to measure in the standard basis $\{ | x \rangle \}$, this superposition would collapse, according to the fifth quantum law, to any one of the basis states with the same probability of $\frac{1}{N} = \frac{1}{2^n}$. Our chances of guessing the right value $w$ is therefore $1$ in $2^n$, as could be expected. Hence, on average we would need to try about $N = 2^n$ times to guess the correct item.Enter the procedure called amplitude amplification, which is how a quantum computer significantly enhances this probability. This procedure stretches out (amplifies) the amplitude of the marked item, which shrinks the other items' amplitude, so that measuring the final state will return the right item with near-certainty. This algorithm has a nice geometrical interpretation in terms of two reflections, which generate a rotation in a two-dimensional plane. The only two special states we need to consider are the winner $| w \rangle$ and the uniform superposition $| s \rangle$. These two vectors span a two-dimensional plane in the vector space $\mathbb{C}^N.$ They are not quite perpendicular because $| w \rangle$ occurs in the superposition with amplitude $N^{-1/2}$ as well.We can, however, introduce an additional state $|s'\rangle$ that is in the span of these two vectors, which is perpendicular to $| w \rangle$ and is obtained from $|s \rangle$ by removing $| w \rangle$ andrescaling. **Step 1**: The amplitude amplification procedure starts out in the uniform superposition $| s \rangle$, which is easily constructed from $| s \rangle = H^{\otimes n} | 0 \rangle^n$.![image2](images/grover_step1.png)The left graphic corresponds to the two-dimensional plane spanned by perpendicular vectors $|w\rangle$ and $|s'\rangle$ which allows to express the initial state as $|s\rangle = \sin \theta | w \rangle + \cos \theta | s' \rangle,$ where $\theta = \arcsin \langle s | w \rangle = \arcsin \frac{1}{\sqrt{N}}$. The right graphic is a bar graph of the amplitudes of the state $| s \rangle$ for the case $N = 2^2 = 4$. The average amplitude is indicated by a dashed line.**Step 2**: We apply the oracle reflection $U_f$ to the state $|s\rangle$.![image3](images/grover_step2.png)Geometrically this corresponds to a reflection of the state $|s\rangle$ about $|s'\rangle$. This transformation means that the amplitude in front of the $|w\rangle$ state becomes negative, which in turn means that the average amplitude has been lowered.**Step 3**: We now apply an additional reflection $U_s$ about the state $|s\rangle$: $U_s = 2|s\rangle\langle s| - \mathbb{1}$. This transformation maps the state to $U_s U_f| s \rangle$ and completes the transformation. ![image4](images/grover_step3.png)Two reflections always correspond to a rotation. The transformation $U_s U_f$ rotates the initial state $|s\rangle$ closer towards the winner $|w\rangle$. The action of the reflection $U_s$ in the amplitude bar diagram can be understood as a reflection about the average amplitude. Since the average amplitude has been lowered by the first reflection, this transformation boosts the negative amplitude of $|w\rangle$ to roughly three times its original value, while it decreases the other amplitudes. We then go to **step 2** to repeat the application. This procedure will be repeated several times to zero in on the winner. After $t$ steps we will be in the state $|\psi_t\rangle$ where: $| \psi_t \rangle = (U_s U_f)^t | s \rangle.$How many times do we need to apply the rotation? It turns out that roughly $\sqrt{N}$ rotations suffice. This becomes clear when looking at the amplitudes of the state $| \psi \rangle$. We can see that the amplitude of $| w \rangle$ grows linearly with the number of applications $\sim t N^{-1/2}$. However, since we are dealing with amplitudes and not probabilities, the vector space's dimension enters as a square root. Therefore it is the amplitude, and not just the probability, that is being amplified in this procedure.In the case that there are multiple solutions, $M$, it can be shown that roughly $\sqrt{(N/M)}$ rotations will suffice.![image5](images/grover_algorithm.png) 2. Example: 2 Qubits Let's first have a look at the case of Grover's algorithm for $N=4$ which is realized with 2 qubits. In this particular case, contrary to inuition, only one rotation is required which will rotate the initial state $|s\rangle$ to the winner $|w\rangle$ which can easily be shown [3]: Following the above introduction, in the case $N=4$ we have $$\theta = \arcsin \frac{1}{2} = \frac{\pi}{6}.$$ After $t$ steps, we have $$(U_s U_f)^t \lvert s \rangle = \sin \theta_t \lvert w \rangle + \cos \theta_t \lvert s' \rangle ,$$where $$\theta_t = (2t+1)\theta.$$ In order to obtain $\lvert w \rangle$ we need $\theta_t = \frac{\pi}{2}$, which with $\theta=\frac{\pi}{6}$ inserted above results to $t=1$. This implies that after $t=1$ rotation the searched element is found. Now let us look into the possible oracles. We have $N=4$ possible elements, i.e. $\lvert 00 \rangle, \lvert 01 \rangle, \lvert 10 \rangle, \lvert 11 \rangle$ and hence require in total $4$ oracles. Oracle for $\lvert w \rangle = \lvert 11 \rangle$Let us start with the case $\lvert w \rangle = \lvert 11 \rangle$. The oracle $U_f$ in this case acts as follows: $$U_f \lvert s \rangle = U_f\frac{1}{2}\left( \lvert 00 \rangle + \lvert 01 \rangle + \lvert 10 \rangle + \lvert 11 \rangle \right) = \frac{1}{2}\left( \lvert 00 \rangle + \lvert 01 \rangle + \lvert 10 \rangle - \lvert 11 \rangle \right).$$In order to realize the sign flip for $\lvert 11 \rangle$ we simply need to apply a controlled Z gate to the initial state. This leads to the following circuit:![image6](images/grover_circuit_2qbuits_oracle_11.png) Oracle for $\lvert w \rangle = \lvert 00 \rangle$In the case of $\lvert w \rangle = \lvert 00 \rangle$ the oracle $U_f$ acts as follows: $$U_f \lvert s \rangle = U_f\frac{1}{2}\left( \lvert 00 \rangle + \lvert 01 \rangle + \lvert 10 \rangle + \lvert 11 \rangle \right) = \frac{1}{2}\left( -\lvert 00 \rangle + \lvert 01 \rangle + \lvert 10 \rangle + \lvert 11 \rangle \right).$$In order to realize the sign flip for $\lvert 00 \rangle$ we need to apply an "inverted" controlled Z gate to the initial state leading to the following circuit:![image7](images/grover_circuit_2qbuits_oracle_00.png) Oracles for $\lvert w \rangle = \lvert 01 \rangle$ and $\lvert w \rangle = \lvert 10 \rangle$Following the above logic one can straight forwardly construct the oracles for $\lvert w \rangle = \lvert 01 \rangle$ (left circuit) and $\lvert w \rangle = \lvert 10 \rangle$ (right circuit):![image8](images/grover_circuit_2qbuits_oracle_01_10.png) Reflection $U_s$In order to complete the circuit we need to implement the additional reflection $U_s = 2|s\rangle\langle s| - \mathbb{1}$ which acts as follows $$U_s \frac{1}{2}\left( \lvert 00 \rangle + \lvert 01 \rangle + \lvert 10 \rangle + \lvert 11 \rangle \right) = \frac{1}{2}\left( \lvert 00 \rangle - \lvert 01 \rangle - \lvert 10 \rangle - \lvert 11 \rangle \right),$$i.e. the signs of each state are flipped except for $\lvert 00 \rangle$. As can easily be verified, one way of implementing $U_s$ is the following circuit:![image9](images/grover_circuit_2qbuits_reflection.png) Full Circuit for $\lvert w \rangle = \lvert 00 \rangle$Since in the particular case of $N=4$ only one rotation is required we can combine the above components to build the full circuit for Grover's algorithm for the case $\lvert w \rangle = \lvert 00 \rangle$:![image10](images/grover_circuit_2qubits_full_00.png)The other three circuits can be constructed in the same way and will not be depicted here. 2.1 Qiskit ImplementationWe now implement Grover's algorithm for the above case of 2 qubits for $\lvert w \rangle = \lvert 00 \rangle$.#initialization import matplotlib.pyplot as plt import numpy as np %matplotlib inline %config InlineBackend.figure_format = 'svg' # Makes the images look nice # importing Qiskit from qiskit import IBMQ, Aer from qiskit.providers.ibmq import least_busy from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute # import basic plot tools from qiskit.visualization import plot_histogramWe start by preparing a quantum circuit with two qubits:n = 2 grover_circuit = QuantumCircuit(n)Then we simply need to write out the commands for the circuit depicted above. First, Initialize the state $|s\rangle$:for qubit in range(n): grover_circuit.h(qubit) grover_circuit.draw('mpl')Apply the Oracle for $|w\rangle = |00\rangle$:for qubit in range(n): grover_circuit.x(qubit) grover_circuit.cz(0, 1) for qubit in range(n): grover_circuit.x(qubit) grover_circuit.draw('mpl')Apply a Hadamard operation to both qubits:for qubit in range(n): grover_circuit.h(qubit) grover_circuit.draw('mpl')Apply the reflection $U_s$:for qubit in range(n): grover_circuit.z(qubit) grover_circuit.cz(0, 1) grover_circuit.draw('mpl')Apply the final Hadamard to both qubits:for qubit in range(n): grover_circuit.h(qubit) grover_circuit.draw('mpl')And we can see we have assembled the circuit correctly: 2.1.1 Experiment with Simulators Let's run the circuit in simulation. First, we can verify that we have the correct statevector:backend_sim = Aer.get_backend('statevector_simulator') job_sim = execute(grover_circuit, backend_sim) statevec = job_sim.result().get_statevector() from qiskit_textbook.tools import vector2latex vector2latex(statevec, pretext="|\\psi\\rangle =")Now let us measure the state and create the corresponding histogram experiments:grover_circuit.measure_all() backend = Aer.get_backend('qasm_simulator') shots = 1024 results = execute(grover_circuit, backend=backend, shots=shots).result() answer = results.get_counts() plot_histogram(answer)We confirm that in 100% of the cases the element $|00\rangle$ is found. 2.1.2 Experiment with Real Devices We can run the circuit on the real device as below.# Load IBM Q account and get the least busy backend device provider = IBMQ.load_account() device = least_busy(provider.backends(simulator=False)) print("Running on current least busy device: ", device) # Run our circuit on the least busy backend. Monitor the execution of the job in the queue from qiskit.tools.monitor import job_monitor job = execute(grover_circuit, backend=device, shots=1024, max_credits=10) job_monitor(job, interval = 2) # Get the results from the computation results = job.result() answer = results.get_counts(grover_circuit) plot_histogram(answer)We confirm that in the majority of the cases the element $|00\rangle$ is found. The other results are due to errors in the quantum computation. 3. Example: 3 Qubits We now go through the example of Grover's algorithm for 3 qubits with two marked states $\lvert101\rangle$ and $\lvert110\rangle$, following the implementation found in Reference [2]. The quantum circuit to solve the problem using a phase oracle is:![image11](images/grover_circuit_3qubits.png) Apply Hadamard gates to $3$ qubits initialised to $\lvert000\rangle$ to create a uniform superposition: $$\lvert \psi_1 \rangle = \frac{1}{\sqrt{8}} \left( \lvert000\rangle + \lvert001\rangle + \lvert010\rangle + \lvert011\rangle + \lvert100\rangle + \lvert101\rangle + \lvert110\rangle + \lvert111\rangle \right) $$ Mark states $\lvert101\rangle$ and $\lvert110\rangle$ using a phase oracle: $$\lvert \psi_2 \rangle = \frac{1}{\sqrt{8}} \left( \lvert000\rangle + \lvert001\rangle + \lvert010\rangle + \lvert011\rangle + \lvert100\rangle - \lvert101\rangle - \lvert110\rangle + \lvert111\rangle \right) $$ Perform the reflection around the average amplitute: Apply Hadamard gates to the qubits $$\lvert \psi_{3a} \rangle = \frac{1}{2} \left( \lvert000\rangle +\lvert011\rangle +\lvert100\rangle -\lvert111\rangle \right) $$ Apply X gates to the qubits $$\lvert \psi_{3b} \rangle = \frac{1}{2} \left( -\lvert000\rangle +\lvert011\rangle +\lvert100\rangle +\lvert111\rangle \right) $$ Apply a doubly controlled Z gate between the 1, 2 (controls) and 3 (target) qubits $$\lvert \psi_{3c} \rangle = \frac{1}{2} \left( -\lvert000\rangle +\lvert011\rangle +\lvert100\rangle -\lvert111\rangle \right) $$ Apply X gates to the qubits $$\lvert \psi_{3d} \rangle = \frac{1}{2} \left( -\lvert000\rangle +\lvert011\rangle +\lvert100\rangle -\lvert111\rangle \right) $$ Apply Hadamard gates to the qubits $$\lvert \psi_{3e} \rangle = \frac{1}{\sqrt{2}} \left( -\lvert101\rangle -\lvert110\rangle \right) $$ Measure the $3$ qubits to retrieve states $\lvert101\rangle$ and $\lvert110\rangle$Note that since there are 2 solutions and 8 possibilities, we will only need to run one iteration (steps 2 & 3). 3.1 Qiskit Implementation We now implement Grover's algorithm for the above [example](3qubits) for $3$-qubits and searching for two marked states $\lvert101\rangle$ and $\lvert110\rangle$. **Note:** Remember that Qiskit orders it's qubits the opposite way round to this resource, so the circuit drawn will appear flipped about the horizontal.We create a phase oracle that will mark states $\lvert101\rangle$ and $\lvert110\rangle$ as the results (step 1).def phase_oracle(circuit): circuit.cz(0, 2) circuit.cz(1, 2)Next we set up the circuit for inversion about the average (step 2), also known as the diffusion operator:def diffuser(circuit): """Apply inversion about the average step of Grover's algorithm.""" qubits = circuit.qubits nqubits = len(qubits) for q in range(nqubits): circuit.h(q) circuit.x(q) # Do controlled-Z circuit.h(2) circuit.ccx(0,1,2) circuit.h(2) for q in range(nqubits): circuit.x(q) circuit.h(q)Now we put the pieces together, with the creation of a uniform superposition at the start of the circuit and a measurement at the end. Note that since there are 2 solutions and 8 possibilities, we will only need to run one iteration.n = 3 barriers = True grover_circuit = QuantumCircuit(n) for qubit in range(n): grover_circuit.h(qubit) if barriers: grover_circuit.barrier() phase_oracle(grover_circuit) if barriers: grover_circuit.barrier() diffuser(grover_circuit) grover_circuit.measure_all() grover_circuit.draw(output="mpl")3.1.1 Experiment with Simulators We can run the above circuit on the simulator.backend = Aer.get_backend('qasm_simulator') shots = 1024 results = execute(grover_circuit, backend=backend, shots=shots).result() answer = results.get_counts() plot_histogram(answer)As we can see, the algorithm discovers our marked states $\lvert101\rangle$ and $\lvert110\rangle$. 3.1.2 Experiment with Real Devices We can run the circuit on the real device as below.backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 3 and not x.configuration().simulator and x.status().operational==True)) print("least busy backend: ", backend) # Run our circuit on the least busy backend. Monitor the execution of the job in the queue from qiskit.tools.monitor import job_monitor shots = 1024 job = execute(grover_circuit, backend=backend, shots=shots, optimization_level=3) job_monitor(job, interval = 2) # Get the results from the computation results = job.result() answer = results.get_counts(grover_circuit) plot_histogram(answer)As we can (hopefully) see, the algorithm discovers our marked states $\lvert101\rangle$ and $\lvert110\rangle$. The other results are due to errors in the quantum computation. 4. Problems 1. The above [example](example) and [implementation](implementation) of Grover is to find the two marked $3$-qubit states $\lvert101\rangle$ and $\lvert110\rangle$. Modify the implementation to find one marked $2$-qubit state $\lvert01\rangle$. Are the results what you expect? Explain.2. The above [example](example) and [implementation](implementation) of Grover is to find the two marked $3$-qubit states $\lvert101\rangle$ and $\lvert110\rangle$. Modify the implementation to find one marked $4$-qubit state $\lvert0101\rangle$. Are the results what you expect? Explain. 5. References 1. (1996), "A fast quantum mechanical algorithm for database search", Proceedings of the 28th Annual ACM Symposium on the Theory of Computing (STOC 1996), [doi:10.1145/237814.237866](http://doi.acm.org/10.1145/237814.237866), [arXiv:quant-ph/9605043](https://arxiv.org/abs/quant-ph/9605043)2. , , , , & (2017), "Complete 3-Qubit Grover search on a programmable quantum computer", Nature Communications, Vol 8, Art 1918, [doi:10.1038/s41467-017-01904-7](https://doi.org/10.1038/s41467-017-01904-7), [arXiv:1703.10535 ](https://arxiv.org/abs/1703.10535) & , "Quantum Computation and Quantum Information", Cambridge: Cambridge University Press, 2000.import qiskit qiskit.__qiskit_version__Dimensionality reduction The curse of dimensionalityFitting and overfitting get worse with ''curse of dimensionality'' Bellman 1961Think about a hypersphere. Its volume is given by$$ V_D(r) = \frac{2r^D\pi^{D/2}}{D\ \Gamma(D/2)}$$where $\Gamma(z)$ is the complete gamma function, $D$ is the dimension, and $r$ the radius of the sphere.If you populated a hypercube of size $2r$ how much data would be enclosed by the hypersphere- as $D$ increases the fractional volume enclosed by the hypersphere goes to 0! For example: the SDSS comprises a sample of 357 million sources. - each source has 448 measured attributes- selecting just 30 (e.g., magnitude, size..) and normalizing the data range $-1$ to $1$probability of having one of the 357 million sources reside within a unit hypersphere 1 in 1.4$\times 10^5$. Principal Component AnalysisPoints are correlated along a particular direction which doesn't align with the initial choice of axes. * we should rotate our axes to align with this correlation. * rotation preserves the relative ordering of dataChoose rotation to maximize the ability to discriminate between the data points* first axis, or principal component, is direction of maximal variance* second principal component is orthogonal to the first component and maximizes the residual variance* ... In the following example a distribution of points drawn from a bivariate Gaussian and centered on theorigin of $x$ and $y$. PCA defines a rotation such that the new axes ($x’$ and $y’$) are aligned along the directions of maximal variance (the principal components) with zero covariance. This is equivalent to minimizing the square of the perpendicular distances between the points and the principal components.import numpy as np from matplotlib import pyplot as plt from matplotlib.patches import Ellipse from matplotlib import ticker np.random.seed(42) r = 0.9 sigma1 = 0.25 sigma2 = 0.08 rotation = np.pi / 6 s = np.sin(rotation) c = np.cos(rotation) X = np.random.normal(0, [sigma1, sigma2], size=(100, 2)).T R = np.array([[c, -s], [s, c]]) X = np.dot(R, X) #------------------------------------------------------------ # Plot the diagram fig = plt.figure(figsize=(5, 5), facecolor='w') ax = plt.axes((0, 0, 1, 1), xticks=[], yticks=[], frameon=False) # draw axes ax.annotate(r'$x$', (-r, 0), (r, 0), ha='center', va='center', arrowprops=dict(arrowstyle='<->', color='k', lw=1)) ax.annotate(r'$y$', (0, -r), (0, r), ha='center', va='center', arrowprops=dict(arrowstyle='<->', color='k', lw=1)) # draw rotated axes ax.annotate(r'$x^\prime$', (-r * c, -r * s), (r * c, r * s), ha='center', va='center', arrowprops=dict(color='k', arrowstyle='<->', lw=1)) ax.annotate(r'$y^\prime$', (r * s, -r * c), (-r * s, r * c), ha='center', va='center', arrowprops=dict(color='k', arrowstyle='<->', lw=1)) # scatter points ax.scatter(X[0], X[1], s=25, lw=0, c='k', zorder=2) # draw lines vnorm = np.array([s, -c]) for v in (X.T): d = np.dot(v, vnorm) v1 = v - d * vnorm ax.plot([v[0], v1[0]], [v[1], v1[1]], '-k') # draw ellipses for sigma in (1, 2, 3): ax.add_patch(Ellipse((0, 0), 2 * sigma * sigma1, 2 * sigma * sigma2, rotation * 180. / np.pi, ec='k', fc='gray', alpha=0.2, zorder=1)) ax.set_xlim(-1, 1) ax.set_ylim(-1, 1) plt.show()Derivation of principal component analysesSet of data $X$: $N$ observations by $K$ measurementsCenter data by subtracting the mean The covariance is$$ C_X=\frac{1}{N-1}X^TX,$$$N-1$ as the sample covariance matrix.We want a projection, $R$, aligned with the directions of maximal variance ($Y= X R$) with covariance $$C_{Y} = R^T X^T X R = R^T C_X R$$Derive principal component by maximizing its variance (using Lagrange multipliers and constraint)$$\phi(r_1,\lambda_1) = r_1^TC_X r_1 - \lambda_1(r_1^Tr_1-1)$$derivative of $\phi(r_1,\lambda)$ with respect to $r_1$ set to 0$$C_Xr_1 - \lambda_1 r_1 = 0$$$\lambda_1$ is the root of the equation $\det(C_X -\lambda_1 {\bf I})=0$ and the largest eigenvalue$$\lambda_1 = r_1^T C_X r_1$$Other principal components derived byapplying additional constraint that components are uncorrelated (e.g., $r^T_2 C_X r_1 = 0$). Singular value decomposition (SVD)Common approach is eigenvalue decomposition of the covariance or correlation matrix,or singular value decomposition (SVD) of the data matrix$$U \Sigma V^T = \frac{1}{\sqrt{N - 1}} X$$columns of $U$ are _left-singular vectors_columns of $V$ are the _right-singular vectors_The columns of $U$ and $V$ form orthonormal bases ($U^TU = V^TV = I$)Covariance matrix is$$ C_X = \left[\frac{1}{\sqrt{N - 1}}X\right]^T \left[\frac{1}{\sqrt{N - 1}}X\right]\nonumber\\ = V \Sigma U^T U \Sigma V^T\nonumber\\ = V \Sigma^2 V^T.$$right singular vectors $V$ are the principal components so principal from the SVD of $X$ dont need $C_X$. Application of PCA In the following example we have 100 data points in 3 dimensions, $X$, and $R$ as the projection matrix. To compute the PCA components (4 in our case) we use `PCA` from `scikit-learn`.from sklearn.decomposition import PCA X = np.random.normal(size=(100, 3)) R = np.random.random((3, 10)) X = np.dot(X, R) pca = PCA(n_components=4) pca.fit(X) comp = pca.transform(X) mean = pca.mean_ components = pca.components_ var = pca.explained_variance_For our astronomy use case, we are using the SDSS spectroscopic dataset. The SDSS spectra come from galaxies at a range of redshifts,and have sections of unreliable or missing data due to sky absorption, cosmic rays, bad detector pixels,or other effects. AstroML provides a set of spectra which have been moved to rest frame, correctedfor masking using an iterative PCA reconstruction technique, and resampled to 1000 common wavelength bins. The spectra can be downloaded using `fetch_sdss_corrected_spectra()`. In the following example we plot15 of these spectra:import numpy as np from matplotlib import pyplot as plt from astroML.datasets import sdss_corrected_spectra #---------------------------------------------------------------------- # Use pre-computed PCA to reconstruct spectra data = sdss_corrected_spectra.fetch_sdss_corrected_spectra() spectra_raw = data['spectra'] spectra_corr = sdss_corrected_spectra.reconstruct_spectra(data) wavelengths = sdss_corrected_spectra.compute_wavelengths(data) #------------------------------------------------------------ # select random spectra np.random.seed(5) nrows = 5 ncols = 3 ind = np.random.randint(spectra_corr.shape[0], size=nrows * ncols) spec_sample_raw = spectra_raw[ind] spec_sample_corr = spectra_corr[ind]We show the SDSS downloaded spectra with black, and our corrected spectra with blue.fig = plt.figure(figsize=(10, 8)) fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05, bottom=0.1, top=0.95, hspace=0.05) for i in range(ncols): for j in range(nrows): ax = fig.add_subplot(nrows, ncols, ncols * j + 1 + i) ax.plot(wavelengths, spec_sample_raw[ncols * j + i], '-k', lw=1) ax.plot(wavelengths, spec_sample_corr[ncols * j + i], '-k', lw=1, c='blue') ax.set_xlim(3100, 7999) ax.yaxis.set_major_formatter(plt.NullFormatter()) ax.xaxis.set_major_locator(plt.MultipleLocator(1000)) if j < nrows - 1: ax.xaxis.set_major_formatter(plt.NullFormatter()) else: plt.xlabel(r'wavelength $(\AA)$') ylim = ax.get_ylim() dy = 0.05 * (ylim[1] - ylim[0]) ax.set_ylim(ylim[0] - dy, ylim[1] + dy) plt.show()PCA Reconstruction of a spectrumReconstruction of spectrum, ${x}(k)$, from theeigenvectors, ${e}_i(k)$ $$ {x}_i(k) = {\mu}(k) + \sum_j^R \theta_{ij} {e}_j(k),$$Truncating this expansion (i.e., $r#------------------------------------------------------------ # Compute PCA components # Eigenvalues can be computed using PCA as in the commented code below: #from sklearn.decomposition import PCA #pca = PCA() #pca.fit(spectra_corr) #evals = pca.explained_variance_ratio_ #evals_cs = evals.cumsum() # because the spectra have been reconstructed from masked values, this # is not exactly correct in this case: we'll use the values computed # in the file compute_sdss_pca.py evals = data['evals'] ** 2 evals_cs = evals.cumsum() evals_cs /= evals_cs[-1] evecs = data['evecs'] spec_mean = spectra_corr.mean(0) #------------------------------------------------------------ # Find the coefficients of a particular spectrum spec = spectra_corr[1] coeff = np.dot(evecs, spec - spec_mean) #------------------------------------------------------------ # Plot the sequence of reconstructions fig = plt.figure(figsize=(5, 5)) fig.subplots_adjust(hspace=0, top=0.95, bottom=0.1, left=0.12, right=0.93) for i, n in enumerate([0, 4, 8, 20]): ax = fig.add_subplot(411 + i) ax.plot(wavelengths, spec, '-', c='gray') ax.plot(wavelengths, spec_mean + np.dot(coeff[:n], evecs[:n]), '-k') if i < 3: ax.xaxis.set_major_formatter(plt.NullFormatter()) ax.set_ylim(-2, 21) ax.set_ylabel('flux') if n == 0: text = "mean" elif n == 1: text = "mean + 1 component\n" text += r"$(\sigma^2_{tot} = %.2f)$" % evals_cs[n - 1] else: text = "mean + %i components\n" % n text += r"$(\sigma^2_{tot} = %.2f)$" % evals_cs[n - 1] ax.text(0.02, 0.93, text, ha='left', va='top', transform=ax.transAxes) fig.axes[-1].set_xlabel(r'${\rm wavelength\ (\AA)}$') plt.show()EigenvaluesThe eigenvalues for the PCA decomposition of the SDSS spectra described in the previous section. The top panelshows the decrease in eigenvalue as a function of the number of eigenvectors, with a break in the distributionat ten eigenvectors. The lower panel shows the cumulative sum of eigenvalues normalized to unity. 94% of the variance in the SDSS spectra can be captured using the first ten eigenvectors.fig = plt.figure(figsize=(10, 7.5)) fig.subplots_adjust(hspace=0.05, bottom=0.12) ax = fig.add_subplot(211, xscale='log', yscale='log') ax.grid() ax.plot(evals, c='k') ax.set_ylabel('Normalized Eigenvalues') ax.xaxis.set_major_formatter(plt.NullFormatter()) ax.set_ylim(5E-4, 100) ax = fig.add_subplot(212, xscale='log') ax.grid() ax.semilogx(evals_cs, color='k') ax.set_xlabel('Eigenvalue Number') ax.set_ylabel('Cumulative Eigenvalues') ax.set_ylim(0.65, 1.00) plt.show()PCA with missing data Observed spectrum, $x^o$ is the true spectrum, ${x}$ plus a wavelength-dependent weight, ${w}$. Weight is zero where data are missing and $1/{\sigma}^2$ for restMinimizing the quadratic deviation between ${x}^o$ truncated reconstruction, $\sum_i \theta_i {e}_i$and solving for $\theta_i$ gives$$ \sum_k \theta_i {w}(k) {e}_i(k) {e}_j(k) = \sum_k {w}(k) {x}^o(k) {e}_j(k),$$If $M_{ij} = \sum_k {w}(k) {e}_i(k) {e}_j(k)$ and $F_i = \sum_k {w}(k) {x}^o(k) {e}_i(k)$ then $$ \theta_i = \sum_j M_{ij}^{-1} F_{j},$$- $F_j$ are coefficients derived from gappy data- $M_{ij}^{-1}$ shows how correlated eigenvectors are over the missing regions.An estimate of the uncertainty on thereconstruction coefficients is given by$$%Cov(\theta_i,\theta_j) = \frac{1}{N}M_{ij}^{-1}{\rm Cov}(\theta_i,\theta_j) = M_{ij}^{-1}.$$Accuracy of this reconstruction will depend on the distribution ofthe gaps within the data vector. The principal component vectors defined for the SDSS spectra can be used to interpolate across or reconstructmissing data. Examples of three masked spectral regions are shown comparing the reconstruction of the input spectrum (black line) using the mean and the first ten eigenspectra (blue line) The gray bands represent the masked region of the spectrum.evecs = data['evecs'] mu = data['mu'] norms = data['norms'] mask = data['mask'] #------------------------------------------------------------ # plot the results i_plot = ((wavelengths > 5750) & (wavelengths < 6350)) wavelength = wavelengths[i_plot] specnums = [20, 8, 9] subplots = [311, 312, 313] fig = plt.figure(figsize=(10, 12.5)) fig.subplots_adjust(left=0.09, bottom=0.08, hspace=0, right=0.92, top=0.95) for subplot, i in zip(subplots, specnums): ax = fig.add_subplot(subplot) # compute eigen-coefficients spectra_i_centered = spectra_corr[i] / norms[i] - mu coeffs = np.dot(spectra_i_centered, evecs.T) # blank out masked regions spectra_i = spectra_corr[i] mask_i = mask[i] spectra_i[mask_i] = np.nan # plot the raw masked spectrum ax.plot(wavelength, spectra_i[i_plot], '-', color='k', label='True spectrum', lw=1.5) # plot two levels of reconstruction for nev in [10]: if nev == 0: label = 'mean' else: label = 'reconstruction\n(nev=%i)' % nev spectra_i_recons = norms[i] * (mu + np.dot(coeffs[:nev], evecs[:nev])) ax.plot(wavelength, spectra_i_recons[i_plot], label=label, color='grey') # plot shaded background in masked region ylim = ax.get_ylim() mask_shade = ylim[0] + mask[i][i_plot].astype(float) * ylim[1] plt.fill(np.concatenate([wavelength[:1], wavelength, wavelength[-1:]]), np.concatenate([[ylim[0]], mask_shade, [ylim[0]]]), lw=0, fc='k', alpha=0.2) ax.set_xlim(wavelength[0], wavelength[-1]) ax.set_ylim(ylim) ax.yaxis.set_major_formatter(ticker.NullFormatter()) if subplot == 311: ax.legend(loc=1) ax.set_xlabel('$\lambda\ (\AA)$') ax.set_ylabel('normalized flux') plt.show()Comparing PCA, NMF and ICA Nonnegative Matrix Factorization Eigenvectors are defined relative to mean data vector. Principal components that can be positive ornegative but for many physical systems we know data are a linear sum of positive components (e.g. galaxy spectrum is a linear sum of stellar componentsNonnegative matrix factorization (NMF) applies positivity constraintfrom sklearn.decomposition import NMF X = np.random.random((100, 3)) # 100 points in 3 dims, all positive nmf = NMF(n_components=3) # setting n_components is optional nmf.fit(X) proj = nmf.transform(X) # project to 3 dimensions comp = nmf.components_ # 3 x 10 array of components err = nmf.reconstruction_err_ # how well 3 components captures data/Users/bsipocz/.pyenv/versions/3.9.1/lib/python3.9/site-packages/sklearn/decomposition/_nmf.py:312: FutureWarning: The 'init' value, when 'init=None' and n_components is less than n_samples and n_features, will be changed from 'nndsvd' to 'nndsvda' in 1.1 (renaming of 0.26). warnings.warn(("The 'init' value, when 'init=None' and "Independent component analysisfrom sklearn.decomposition import FastICA X = np.random.normal(size=(100, 2)) # 100 pts in 2 dims R = np.random.random((2, 5)) # mixing matrix X = np.dot(X, R) # X is now 2D data in 5D space ica = FastICA(2) # fit two components sources = ica.fit_transform(X) proj = ica.transform(X) # 100 x 2 projection of data comp = ica.components_ # the 2 x 5 matrix of indep. componentsComparing PCA, NMF and ICAA comparison of the decomposition of SDSS spectra using PCA (left panel), ICA (middle panel) and NMF (right panel). The rank of the component increases from top to bottom. For the ICA and PCA the first component is the mean spectrum (NMF does not require mean subtraction). All of these techniques isolate a common set of spectral features (identifying features associated with the continuum and line emission). The ordering of the spectral components is technique dependent.from sklearn.decomposition import NMF, FastICA, PCA data = sdss_corrected_spectra.fetch_sdss_corrected_spectra() spectra = sdss_corrected_spectra.reconstruct_spectra(data) wavelengths = sdss_corrected_spectra.compute_wavelengths(data) #---------------------------------------------------------------------- # Compute PCA, ICA, and NMF components def compute_PCA_ICA_NMF(n_components=5): spec_mean = spectra.mean(0) # PCA: use randomized PCA for speed pca = PCA(n_components - 1, random_state=0, svd_solver='randomized') pca.fit(spectra) pca_comp = np.vstack([spec_mean, pca.components_]) # ICA treats sequential observations as related. Because of this, we need # to fit with the transpose of the spectra ica = FastICA(n_components - 1, random_state=0) ica.fit(spectra.T) ica_comp = np.vstack([spec_mean, ica.transform(spectra.T).T]) # NMF requires all elements of the input to be greater than zero spectra[spectra < 0] = 0 nmf = NMF(n_components, random_state=0) nmf.fit(spectra) nmf_comp = nmf.components_ return pca_comp, ica_comp, nmf_comp n_components = 5 decompositions = compute_PCA_ICA_NMF(n_components) #---------------------------------------------------------------------- # Plot the results fig = plt.figure(figsize=(10, 8)) fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05, bottom=0.1, top=0.95, hspace=0.05) titles = ['PCA components', 'ICA components', 'NMF components'] for i, comp in enumerate(decompositions): for j in range(n_components): ax = fig.add_subplot(n_components, 3, 3 * j + 1 + i) ax.yaxis.set_major_formatter(plt.NullFormatter()) ax.xaxis.set_major_locator(plt.MultipleLocator(1000)) if j < n_components - 1: ax.xaxis.set_major_formatter(plt.NullFormatter()) else: ax.xaxis.set_major_locator( plt.FixedLocator(list(range(3000, 7999, 1000)))) ax.set_xlabel(r'wavelength ${\rm (\AA)}$') ax.plot(wavelengths, comp[j], '-k', lw=1) # plot zero line xlim = [3000, 8000] ax.plot(xlim, [0, 0], '-', c='gray', lw=1) if j == 0: ax.set_title(titles[i]) if titles[i].startswith('PCA') or titles[i].startswith('ICA'): if j == 0: label = 'mean' else: label = 'component %i' % j else: label = 'component %i' % (j + 1) ax.text(0.03, 0.94, label, transform=ax.transAxes, ha='left', va='top') for l in ax.get_xticklines() + ax.get_yticklines(): l.set_markersize(2) # adjust y limits ylim = plt.ylim() dy = 0.05 * (ylim[1] - ylim[0]) ax.set_ylim(ylim[0] - dy, ylim[1] + 4 * dy) ax.set_xlim(xlim) plt.show()/Users/bsipocz/.pyenv/versions/3.9.1/lib/python3.9/site-packages/sklearn/decomposition/_nmf.py:312: FutureWarning: The 'init' value, when 'init=None' and n_components is less than n_samples and n_features, will be changed from 'nndsvd' to 'nndsvda' in 1.1 (renaming of 0.26). warnings.warn(("The 'init' value, when 'init=None' and " /Users/bsipocz/.pyenv/versions/3.9.1/lib/python3.9/site-packages/sklearn/decomposition/_nmf.py:1090: ConvergenceWarning: Maximum number of iterations 200 reached. Increase it to improve convergence. warnings.warn("Maximum number of iterations %d reached. Increase it to"2021년 4월 10일 토요일 Programmers - x만큼 간격이 있는 n개의 숫자 (Python) 문제 : https://programmers.co.kr/learn/courses/30/lessons/12954 블로그 : https://somjang.tistory.com/entry/Programmers-x%EB%A7%8C%ED%81%BC-%EA%B0%84%EA%B2%A9%EC%9D%B4-%EC%9E%88%EB%8A%94-n%EA%B0%9C%EC%9D%98-%EC%88%AB%EC%9E%90-Python 첫번째 시도def solution(x, n): minus = 1 if x < 0: minus = -1 answer = [x * minus for x in range(x * minus, x * minus * n + 1, x * minus)] return answer두번째 시도def solution(x, n): minus = 1 if x < 0: minus = -1 answer = [x + (x*i) for i in range(n)] return answerCreating a Simple ApplicationThe Streams Python API allows a user to create a streams application using only python. The Python API allows for the definition of data sources, transformations, and sinks by performing operations on `Stream` and `Topology` objects.First, it is necessary to import the `Topology` class from the `streamsx.topology` package. This will be used to begin create your application. In addition, the `context` module must be imported, allowing your application to be submitted.Lastly, import the classes of the operators you wish to use. Here we use `Counter`, a source operator which counts from 0 to infinity, and `negative_one`, an operator which returns the negative of every number it is givenfrom streamsx.topology.topology import Topology from streamsx.topology import context from my_module import Counter, negative_oneThe Topology ObjectA Topology object is a container for the structure of your application. Give it a unique name, otherwise it may overwrite other compiled streams applications.top = Topology("myTop")Define and Create Your Data SourceBy creating an instance of the `Counter` operator, we can use it as a data source by invoking the `source` method on your `Topology` object and passing it the `my_counter` object. The output of a source is a `Stream` which represents the flow of data in your applications. A streams consists of a potentially infinite sequence of Python object upon which you can perform subsequent operations (such as multiplying by negative one)# Define source my_counter = Counter() stream_1 = top.source(my_counter)Performing Operations On Your Data StreamA user might want to perform a number of operations on a stream of live data; for example, extracting sentiment from tweets, keeping track of a GPS device, or monitoring live traffic data. All of these operations take data items of one type and modify them or produce data of another type. With the Python API, this can be achieved by calling `map` on a Stream and passing it an operator.As mentioned before, `negative_one` is a callable class which takes a number, and returns its negative.# multiple by negative one neg_one = negative_one() stream_2 = stream_1.map(neg_one) # Print stream stream_2.print()SubmissionSubmit the application to be run locally in a single process. The output will be sent to standard output and printed to the screen.out = context.submit("STANDALONE", top.graph)Generate ground truth labelsdef importScoreInfo(scoreDir): d = {} for csvfile in glob.glob("{}/p*.scoreinfo.csv".format(scoreDir)): pieceStr = os.path.basename(csvfile).split('.')[0] # e.g. 'p7' d[pieceStr] = {} with open(csvfile, 'r') as f: next(f) # skip header for line in f: parts = line.rstrip().split(',') linenum = int(parts[0]) startmeasure = int(parts[1]) endmeasure = int(parts[2]) d[pieceStr][linenum] = (startmeasure, endmeasure) return d scoreDir = 'data/score_info' scoreInfo = importScoreInfo(scoreDir) def importMidiInfo(midiInfoDir, midiDir): d = {} for csvfile in glob.glob("{}/p*_midinfo.csv".format(midiInfoDir)): pieceStr = os.path.basename(csvfile).split('_')[0] # e.g. 'p7' d[pieceStr] = {} with open(csvfile, 'r') as f: for line in f: parts = line.rstrip().split(',') measure = int(parts[0]) time = float(parts[1]) d[pieceStr][measure] = time # add an additional entry to indicate the total duration midfile = "{}/{}.mid".format(midiDir, pieceStr) mid = pretty_midi.PrettyMIDI(midfile) totalDur = mid.get_piano_roll().shape[1] * .01 # default fs = 100 d[pieceStr][measure+1] = totalDur return d midiInfoDir = 'data/midi_info' midiDir = 'data/midi' midiInfo = importMidiInfo(midiInfoDir, midiDir) def getQueryGroundTruth(infile, multMatchFile, scoreInfo, midiInfo): # infers ground truth timestamps for each query d = {} with open(infile, 'r') as fin: next(fin) # skip header for line in fin: # get start, end lines parts = line.rstrip().split(',') # e.g. 'p1_q1,0,3' queryStr = parts[0] startLine = int(parts[1]) endLine = int(parts[2]) # infer start, end measure pieceStr = queryStr.split('_')[0] #print("%s,%s,%s" % (queryStr, startLine,endLine)) startMeasure = scoreInfo[pieceStr][startLine][0] endMeasure = scoreInfo[pieceStr][endLine][1] # infer start, end time #print("%s,%s,%s" % (queryStr, startMeasure, endMeasure)) startTime = midiInfo[pieceStr][startMeasure] endTime = midiInfo[pieceStr][endMeasure+1] # ends on downbeat of next measure d[queryStr] = [(startTime, endTime, startMeasure, endMeasure, startLine, endLine)] addMultipleMatches(d, multMatchFile, scoreInfo, midiInfo) return d def addMultipleMatches(d, multMatchFile, scoreInfo, midiInfo): # some queries match more than 1 segment of the score, these are indicated in multMatchFile with open(multMatchFile, 'r') as f: for line in f: # parse line parts = line.rstrip().split(',') # e.g. 'p31_q8,L3m6,L5m1' queryStr = parts[0] pieceStr = queryStr.split('_')[0] startStr = parts[1] endStr = parts[2] # infer start, end measure startLine = int(startStr.split('m')[0][1:]) endLine = int(endStr.split('m')[0][1:]) startOffset = int(startStr.split('m')[1]) endOffset = int(endStr.split('m')[1]) startMeasure = scoreInfo[pieceStr][startLine][0] + startOffset - 1 endMeasure = scoreInfo[pieceStr][endLine][0] + endOffset - 1 # infer start, end time startTime = midiInfo[pieceStr][startMeasure] endTime = midiInfo[pieceStr][endMeasure+1] # ends on downbeat of next measure tup = (startTime, endTime, startMeasure, endMeasure, startStr, endStr) # startStr more informative than startLine d[queryStr].append(tup) return d def saveQueryInfoToFile(d, outfile): with open(outfile, 'w') as f: for query in sorted(d): for (tstart, tend, mstart, mend, lstart, lend) in d[query]: f.write('{},{:.2f},{:.2f},{},{},{},{}\n'.format(query, tstart, tend, mstart, mend, lstart, lend)) queryInfoFile = 'data/query_info/query_info.csv' # to read multMatchesFile = 'data/query_info/query.multmatches' # to read queryGTFile = 'data/query_info/query.gt' # to write queryInfo = getQueryGroundTruth(queryInfoFile, multMatchesFile, scoreInfo, midiInfo) saveQueryInfoToFile(queryInfo, queryGTFile)Evaluate system performancedef readGroundTruthLabels(gtfile): d = {} with open(gtfile, 'r') as f: for line in f: parts = line.rstrip().split(',') # e.g. 'p1_q1,1.55,32.59' queryStr = parts[0] tstart = float(parts[1]) tend = float(parts[2]) if queryStr in d: d[queryStr].append((tstart, tend)) else: d[queryStr] = [(tstart, tend)] return d def readHypothesisFiles(hypdir): l = [] for hypfile in sorted(glob.glob("{}/*.hyp".format(hypdir))): with open(hypfile, 'r') as f: line = next(f) parts = line.rstrip().split(',') query = parts[0] # e.g. p1_q1 tstart = float(parts[1]) tend = float(parts[2]) rank = int(parts[3]) l.append((query, tstart, tend,rank)) return l def calcPrecisionRecall(hypdir, gtfile): d = readGroundTruthLabels(gtfile) hyps = readHypothesisFiles(hypdir) hypinfo = [] overlapTotal, hypTotal, refTotal = (0,0,0) MRR = 0 for (queryid, hypstart, hypend, rank) in hyps: MRR=MRR+1/(rank) refSegments = d[queryid] idxMax = 0 overlapMax = 0 for i, refSegment in enumerate(refSegments): # find ref segment with most overlap overlap = calcOverlap((hypstart, hypend), refSegment) if overlap > overlapMax: idxMax = i overlapMax = overlap hyplen = hypend - hypstart reflen = refSegments[idxMax][1] - refSegments[idxMax][0] if rank!=1: overlapMax = 0 overlapTotal += overlapMax hypTotal += hyplen refTotal += reflen hypinfo.append((queryid, overlapMax, refSegments[idxMax][0], refSegments[idxMax][1], idxMax)) # keep for error analysis P = overlapTotal / hypTotal R = overlapTotal / refTotal F = 2 * P * R / (P + R) return MRR/len(hyps), F, P, R, hypinfo def calcOverlap(seg1, seg2): overlap_lb = max(seg1[0], seg2[0]) overlap_ub = min(seg1[1], seg2[1]) overlap = np.clip(overlap_ub - overlap_lb, 0, None) return overlap hypdir = 'experiments/train/hyp' MRR, F, P, R, hypinfo = calcPrecisionRecall(hypdir, queryGTFile) MRR, F, P, R, len(hypinfo)Investigate Errorsdef printDebuggingInfo(hypdir, gtfile, scoreInfo, midiInfo, queryInfo, hypInfo): d = readGroundTruthLabels(gtfile) hyps = readHypothesisFiles(hypdir) for i, (query, hyp_tstart, hyp_tend) in enumerate(hyps): # hyp and ref info (sec) piece = query.split('_')[0] _, overlap, ref_tstart, ref_tend, bestIdx = hypInfo[i] # hyp and ref info (measures) interp_m = list(midiInfo[piece].keys()) interp_t = [midiInfo[piece][m] for m in interp_m] hyp_mstart, hyp_mend, ref_mstart, ref_mend = np.interp([hyp_tstart, hyp_tend, ref_tstart, ref_tend], interp_t, interp_m) moverlap = calcOverlap((hyp_mstart, hyp_mend),(ref_mstart, ref_mend)) # hyp and ref info (line # + measure offset) hyp_lstart, hyp_lstartoff = getLineNumberMeasureOffset(hyp_mstart, scoreInfo[piece]) hyp_lend, hyp_lendoff = getLineNumberMeasureOffset(hyp_mend, scoreInfo[piece]) ref_lstart = queryInfo[query][bestIdx][4] ref_lend = queryInfo[query][bestIdx][5] # compare in sec print("{}: hyp ({:.1f} s,{:.1f} s), ref ({:.1f} s,{:.1f} s), overlap {:.1f} of {:.1f} s".format(query, hyp_tstart, hyp_tend, ref_tstart, ref_tend, overlap, ref_tend - ref_tstart)) # compare in measure numbers #print("\thyp ({:.1f} m, {:.1f} m), ref ({:.1f} m, {:.1f} m), overlap {:.1f} m".format(hyp_mstart, hyp_mend, ref_mstart, ref_mend, moverlap)) # compare in line + measure offset print("\thyp (ln {} m{:.1f}, ln {} m{:.1f}), ref (ln {}, ln {})".format(hyp_lstart, hyp_lstartoff, hyp_lend, hyp_lendoff, ref_lstart, ref_lend)) return def getLineNumberMeasureOffset(measureNum, d): line = -1 moffset = -1 for key in d: lb, ub = d[key] # line start, end measure if measureNum >= lb and measureNum < ub + 1: line = key moffset = measureNum - lb + 1 break return line, moffset printDebuggingInfo(hypdir, queryGTFile, scoreInfo, midiInfo, queryInfo, hypinfo)Measure Runtimedef showRuntimeStats(indir): durs = [] cnt = 0 for hypfile in glob.glob('{}/*.hyp'.format(indir)): cnt += 1 with open(hypfile, 'r') as f: line = next(f) parts = line.split(',') dur = float(parts[4]) durs.append(dur) durs = np.array(durs) avgDur = np.mean(durs) minDur = np.min(durs) maxDur = np.max(durs) stdDur = np.std(durs) print('{} files'.format(cnt)) print('Avg Duration: {:.2f} sec'.format(avgDur)) print('Std Duration: {:.2f} sec'.format(stdDur)) print('Min Duration: {:.2f} sec'.format(minDur)) print('Max Duration: {:.2f} sec'.format(maxDur)) plt.hist(durs, bins=np.arange(0,2,.1)) plt.xlabel('Runtime (sec)') plt.ylabel('Count') showRuntimeStats(hypdir)2000 files Avg Duration: 8.98 sec Std Duration: 2.73 sec Min Duration: 0.60 sec Max Duration: 22.14 sec**Level 3**import matplotlib.pyplot as plt from pylab import rand, ylim, xlim from random import randint import numpy as np # random.randint(a, b) # Return a random integer N such that a <= N <= b. kBase = 1.8 def randomMinMaxPref(min, max): aa = randint(int(kBase**min), int(kBase**(max+1)-1)) bb = int(log(aa, kBase)) return bb def randomMinMax(min, max): if min >= max : return min return randint(min, max) def onGenerate(): # z = randint(3,6) z = randomMinMaxPref(1,6) x = randomMinMax(0, z-1) # return [x, z-x, z] return [[x, z-x, z], [z-x, x, z]][randint(0, 1)] X = [] Y = [] Z = [] for i in range(100): c = 0 while (c==0): a, b, c = onGenerate() X.append(a) Y.append(b) Z.append(c) plt.scatter(X,Y, color='r', alpha=0.1) plt.xlabel("X") plt.ylabel("Y") # ylim([-1,10]) # xlim([-1,10]) plt.show() plt.hist(Z, bins=24) plt.xlabel("Z") plt.show() from math import log import matplotlib.pyplot as plt x = [] xx = [] for i in range(100): xxi = randint(1, 2**10-1) xx.append(xxi) x.append(int(log(xxi, 2))) plt.scatter(xx,x, color="b", alpha=0.2) plt.xlabel("xx") plt.ylabel("x") plt.show() import matplotlib.pyplot as plt x = [] xx = [] for i in range(-10, 10): xx.append(i) x.append(-(100*i**2 + -5*i + 5000) ) plt.scatter(xx,x, color="g", alpha=0.2) plt.xlabel("xx") plt.ylabel("x") plt.show() import matplotlib.pyplot as plt from pylab import rand, ylim, xlim from random import randint import numpy as np # random.randint(a, b) # Return a random integer N such that a <= N <= b. def randomMinMax(min, max): if min >= max : return min return randint(min, max) def onGenerate(): z = randomMinMax(3,6) x = randomMinMax(0, z-1) # return [x, z-x, z] return [[x, z-x, z], [z-x, x, z]][randint(0, 1)] X = [] Y = [] Z = [] for i in range(100): a, b, c = onGenerate() X.append(a) Y.append(b) Z.append(c) plt.scatter(X,Y, color='r', alpha=0.1) plt.xlabel("X") plt.ylabel("Y") # ylim([-1,10]) # xlim([-1,10]) plt.show() # plt.scatter(Z, np.zeros_like(Z), color="b", alpha=0.01) plt.hist(Z, ) plt.xlabel("Z") plt.show()Example 1: estimate a pollution model with a gaussian process# create an environment to observe env = PollutionModelEnvironment("water", 100, 100) env.evolve_speed = 1 env.p_pollution = 0.1 for t in range(120): env.proceed() plt.imshow(env.value, vmin=0, vmax=1.0) # create an observation model #im = ScalarFieldInformationModel_stored_observation("sample", env.width, env.height, \ # estimation_type="disk-fixed", # estimation_radius=10) im = ScalarFieldInformationModel_stored_observation("sample", env.width, env.height, \ estimation_type="gaussian-process" ) # generate a series random observations for i in range(20): x = random.randint(0, env.width-1) y = random.randint(0, env.height-1) value = env.value[x,y] obs = {"x": x, "y": y, "value": value} im.add_observation(obs) im.proceed(1) #plt.imshow(im.value, vmin=0, vmax=1.0) plt.imshow(im.uncertainty, vmin=0, vmax=1.0) plt.imshow(im.uncertainty, vmin=0, vmax=1.0) kernel = RationalQuadratic(length_scale = [2.0, 2.0], length_scale_bounds = [1, 100], \ alpha=0.1) +\ WhiteKernel(noise_level=0.2) #kernel = RationalQuadratic(length_scale = [2.0, 2.0], length_scale_bounds = [1, 100], alpha=1000) +\ # WhiteKernel(noise_level=0.2) #kernel = RBF(length_scale = [2.0, 2.0], length_scale_bounds = [1, 100]) +\ # WhiteKernel(noise_level=0.5) im2 = ScalarFieldInformationModel_stored_observation("sample", env.width, env.height, \ estimation_type="gaussian-process", gp_kernel = kernel ) im2.observations.extend(im.observations) im2.proceed(1.0) plt.imshow(im2.value, vmin=0, vmax=1.0)Question 1 : How is presidential attention distributed accross policy spheres?# Pie chart, where the slices will be ordered and plotted counter-clockwise: columns = ['president_Bērnu, ģimenes un sabiedrības integrācijas lietu', 'president_Veselības', 'president_Premjers', 'president_Zemkopības', 'president_Bērnu un ģimenes lietu', 'president_Satiksmes', 'president_Iekšlietu', 'president_Tieslietu', 'president_Vides', 'president_Ārlietu', 'president_Izglītības un zinātnes', 'president_Aizsardzības', 'president_Finanšu', 'president_Vides aizsardzības un reģionālās attīstības', 'president_Reģionālās attīstības un pašvaldību lietu', 'president_Kultūras', 'president_Ekonomikas', 'president_Labklājības',] labels =[i.replace("president_", "") for i in columns] sizes = [round(df[i].mean()*1000) for i in columns] data_dict = {labels[index]:i for index, i in enumerate(sizes)} fig1, ax1 = plt.subplots() ax1.pie(sizes, labels=labels, autopct='%1.1f%%', startangle=90) ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. plt.show() sorted(data_dict.items(), key = lambda x: x[1], reverse=True)Q2: How the attention pattern differs over time?columns = ['president_Premjers', 'president_Ārlietu', 'president_Aizsardzības', 'president_Iekšlietu', 'president_Finanšu',] agg_dict = {i : "mean" for i in columns} df2 = df.groupby(["year"]).agg(agg_dict) print(df2.shape) df2.head() df2.tail() df2.plot.line()Does presidential attention mirror media attention?ministries = ['Premjers' , 'Ārlietu', 'Aizsardzības', 'Iekšlietu', 'Finanšu', 'Zemkopības', 'Tieslietu', 'Izglītības un zinātnes', 'Labklājības', 'Veselības', 'Satiksmes', 'Kultūras', 'Ekonomikas', 'Vides aizsardzības un reģionālās attīstības', 'Reģionālās attīstības un pašvaldību lietu', 'Vides', 'Bērnu, ģimenes un sabiedrības integrācijas lietu', 'Bērnu un ģimenes lietu', ] ministries2 = ["president_"+i for i in ministries] ministries_full = ministries + ministries2 agg_dict = {i:"mean" for i in ministries_full} agg_dict["president"] = "last" dfg = df.groupby(["year", "month"]).agg(agg_dict) dfg["grybauskaite"] = dfg.apply(lambda x:int(x["president"]==""), axis=1) print(dfg.shape) dfg.head() for m in ministries: print(m) print(pearsonr(dfg[m], dfg["president_"+m])) print()Premjers (0.09620211684287103, 0.11545566751360399) Ārlietu (0.39016962929661075, 3.2614662607344576e-11) Aizsardzības (0.44444792852088855, 1.884025027606993e-14) Iekšlietu (0.3287528412720247, 3.369630890849667e-08) Finanšu (0.3285243205526353, 3.448490118915178e-08) Zemkopības (0.5483881790398626, 1.6056103191951939e-22) Tieslietu (0.22198040031950486, 0.00024285152824671914) Izglītības un zinātnes (0.08283079539171213, 0.17556786439771987) Labklājības (0.34590076831217353, 5.621195205333222e-09) Veselības (0.14664173952983234, 0.016087299633658323) Satiksmes (0.19291089708427686, 0.0014770527453926215) Kultūras (0.49170399234287593, 8.807705453853898e-18) Ekonomikas (0.34506512037319237, 6.149437881942686e-09) Vides aizsardzības un reģionālās attīstības (0.3591850381063476, 1.3006671759491291e-09) Reģionālās attīstības un pašvaldību lietu (0.5607103275780431, 1.1268648588954723e-23) Vides (0.5051017022120141, 8.002526127297527e-19) Bērnu, ģimenes un sabiedrības inte[...]Histogramflights_pd.distance.plot.hist(by="distance", bins=30, color="#474747")Histogram 2Dx = flights_pd["sched_dep_time"] y = flights_pd["sched_arr_time"] counts, xedges, yedges = np.histogram2d(x, y, bins=100) counts = np.ma.masked_where(counts == 0, counts) plt.pcolormesh(xedges, yedges, counts.T, cmap='Blues_r') cb = plt.colorbar()Bar plotflights_pd.origin.value_counts().sort_values().plot.bar()Dog Breed Classification using PyTorch - Part 2 Transfer Learning with VGG16 model IntroductionIn the previous post: [Part-1](https://pareshppp.github.io/blogs/dog-breed-classification-scratch/), we had classified the images of dog breeds using a model that we created from scratch. Using that model we predicted the dog breeds with an accuracy of around 10%. With 133 dog breeds (target classes), random selection would have given us an accuracy of less than 1%. Compared to that our simple model performed reasonably well.But ~10% accuracy is still very low. We can use a more complex model for our problem but the more complex a model is, the more time and computing power it takes to train it. To get a high enough accuracy in our problem it would take days to train a sufficiently complex model on any personal computer.Instead, we are going to use a method called Transfer Learning to hasten the model training process.At a fundamental level, all images share the same basic features - Edges, Curves, Gradients, Patterns, etc. As such, we do not need to train the model to recognize these features every time. Since these features are stored in a model as weight parameters, we can re-use a pre-trained model to skip the time needed to train these weights. We only need to train the weights for the final classification layer based on our particular problem. This process is known as Transfer Learning.In this post we are going to use a large but simple model called VGG-16.Let's get started. Import Librariesimport numpy as np from glob import glob import os import torch import torch.nn as nn import torch.optim as optim import torchvision from torchvision import datasets, transforms from torch.utils.data import DataLoader from PIL import Image from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = TrueCheck DatasetsThe first step is to load-in the Images and check the total size of our dataset.> The Dog Images Dataset can be downloaded from here: [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`.# load filenames for dog images dog_files = np.array(glob(os.path.join('dogImages','*','*','*'))) # print number of images in dataset print('There are %d total dog images.' % len(dog_files))There are 8351 total dog images.Check CUDA AvailabilityCheck if GPU is available.# check if CUDA is available use_cuda = torch.cuda.is_available() if use_cuda: print('Using GPU.')Using GPU.Define ParametersDefine the parameters needed in data loader and model creation.# parameters n_epochs = 5 num_classes = 133 num_workers = 0 batch_size = 10 learning_rate = 0.01Data Loaders for the Dog DatasetIn the next step we will do the following:1. Define Transformations that will be applied to the images using `torchvision.transforms`. Transformations are also known as Augmentation. This is a pre-processing step and it helps the model to generalize to new data much better.2. Load the image data using `torchvision.datasets.ImageFolder` and apply the transformations.3. Create Dataloaders using `torch.utils.data.DataLoader`. > **Note:**- We have created dictionaries for all three steps that are divided into train, validation and test sets.- The Image Resize shape and mean & standard-deviation values for Normalization module were chosen so as to replicate the VGG16 model.## TODO: Specify data loaders trans = { 'train': transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomRotation(15), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]), 'valid': transforms.Compose([ transforms.Resize(224), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]), 'test': transforms.Compose([ transforms.Resize(224), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) } data_transfer = { 'train': datasets.ImageFolder(os.path.join('dogImages','train'), transform=trans['train']), 'valid': datasets.ImageFolder(os.path.join('dogImages','valid'), transform=trans['valid']), 'test': datasets.ImageFolder(os.path.join('dogImages','test'), transform=trans['test']) } loaders_transfer = { 'train': DataLoader(data_transfer['train'], batch_size=batch_size, num_workers=num_workers, shuffle=True), 'valid': DataLoader(data_transfer['valid'], batch_size=batch_size, num_workers=num_workers, shuffle=True), 'test': DataLoader(data_transfer['test'], batch_size=batch_size, num_workers=num_workers, shuffle=True) } print(f"Size of Train DataLoader: {len(loaders_transfer['train'].dataset)}") print(f"Size of Validation DataLoader: {len(loaders_transfer['valid'].dataset)}") print(f"Size of Test DataLoader: {len(loaders_transfer['test'].dataset)}")Size of Train DataLoader: 6680 Size of Validation DataLoader: 835 Size of Test DataLoader: 836Model ArchitectureNext, we will initialize the vgg16 **pre-trained** model using the `torchvision.models.vgg16` module. We will keep the whole model unchanged except the last classifier layer, where we change the number of output nodes to number of classes.# specify model architecture model_transfer = torchvision.models.vgg16(pretrained=True) # modify last layer of classifier model_transfer.classifier[6] = nn.Linear(4096, num_classes) print(model_transfer)VGG( (features): Sequential( (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (1): ReLU(inplace) (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (3): ReLU(inplace) (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (6): ReLU(inplace) (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (8): ReLU(inplace) (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (11): ReLU(inplace) (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (13): ReLU(inplace) (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (15): ReLU(inplace) (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (17): Conv2d[...]Freeze Feature GradientsWe need to freeze the gradients for the feature part of the model as we do not want to re-train the weigths for those layers. We will only train the weights for the classifier section of the model.# freeze gradients for model features for param in model_transfer.features.parameters(): param.require_grad = FalseSpecify Loss Function and OptimizerWe have chosen `CrossEntropyLoss` as our loss function and `Stochastic Gradient Descent` as our optimizer.> **Note:**Here we are only optimizing the weights for classifier part of the model. We will not change the weights for the features part of the model.## select loss function criterion_transfer = nn.CrossEntropyLoss() ## select optimizer optimizer_transfer = optim.SGD(params=model_transfer.classifier.parameters(), lr=learning_rate)Train and Validate the ModelWe define a function for Training and Validation. It calculates a running train & validation loss and saves the model whenever the validation loss decreases.def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() train_loss += loss.item() * data.size(0) if batch_idx % 200 == 0: print(f"Training Batch: {batch_idx}+/{len(loaders['train'])}") ###################### # validate the model # ###################### for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss output = model(data) loss = criterion(output, target) valid_loss += loss.item() * data.size(0) if batch_idx % 200 == 0: print(f"Validation Batch: {batch_idx}+/{len(loaders['valid'])}") train_loss = train_loss / len(loaders['train'].dataset) valid_loss = valid_loss / len(loaders['valid'].dataset) # print training/validation statistics print(f'Epoch: {epoch} \tTraining Loss: {train_loss} \tValidation Loss: {valid_loss}') # save the model if validation loss has decreased if valid_loss <= valid_loss_min: print(f'Validation loss decreased from {valid_loss_min} to {valid_loss}.\nSaving Model...') torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss # return trained model return modelFinally, we train the model.# train the model if use_cuda: model_transfer = model_transfer.cuda() model_transfer = train(n_epochs, loaders_transfer, model_transfer, \ optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt')Training Batch: 0+/668 Training Batch: 200+/668 Training Batch: 400+/668 Training Batch: 600+/668 Validation Batch: 0+/84 Epoch: 1 Training Loss: 2.233159229605498 Validation Loss: 1.1463432044326187 Validation loss decreased from inf to 1.1463432044326187. Saving Model... Training Batch: 0+/668 Training Batch: 200+/668 Training Batch: 400+/668 Training Batch: 600+/668 Validation Batch: 0+/84 Epoch: 2 Training Loss: 1.570702178994874 Validation Loss: 0.9507174207243377 Validation loss decreased from 1.1463432044326187 to 0.9507174207243377. Saving Model... Training Batch: 0+/668 Training Batch: 200+/668 Training Batch: 400+/668 Training Batch: 600+/668 Validation Batch: 0+/84 Epoch: 3 Training Loss: 1.4183635966863462 Validation Loss: 0.9120735898167788 Validation loss decreased from 0.9507174207243377 to 0.9120735898167788. Saving Model... Training Batch: 0+/668 Training Batch: 200+/668 Training Batch: 400+/668 Training Batch: 600+/668 Validation Batch: 0+/84 Epoch: 4 Training [...]Loading in the saved model.# load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt'))Test the ModelWe compare the predicted outputs with target to get the number of correct predictions and then calculate the pecentage accuracy.def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss += loss.item() * data.size(0) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) test_loss = test_loss / len(loaders['test'].dataset) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_transfer, model_transfer, criterion_transfer, use_cuda)Test Loss: 0.928593 Test Accuracy: 73% (612/836)LOGimport glob import os import yaml from fddc.log.cin_log import build_cinrecord from fddc.log.annexa_log import build_annexarecord from fddc.log.log import build_logConfig# Config file with open('config/cin_to_annexa.yaml') as FILE: config = yaml.load(FILE, Loader=yaml.FullLoader) # Output file output_file = 'log.csv'Define if the log includes CIN Census and / or Annex A. At least one must be True.# CIN include_cincensus = True # CIN data cin_folder = '.../clean_cin' # Change filepath if needed cin_files = glob.glob(os.path.join(cin_folder, "*.xml")) # Annex A include_annexa = False # Consolidated Annex A annexa_file = 'annex-a-cleaned.xlsx' # Change filepath if neededRun programme (takes a few minutes depending on quantity of data)# 1/ Create CIN log cin = build_cinrecord(cin_files, include_cincensus) # 2/ Create Annex A log annexa = build_annexarecord(annexa_file, include_annexa) # 3/ Create master log log = build_log(annexa, cin, output_file, config) log.head()Observations and Insights# Dependencies and Setup import matplotlib.pyplot as plt import pandas as pd import scipy.stats as st import numpy as np # Study data files mouse_metadata_path = "data/Mouse_metadata.csv" study_results_path = "data/Study_results.csv" # Read the mouse data and the study results mouse_metadata = pd.read_csv(mouse_metadata_path) study_results = pd.read_csv(study_results_path) # Combine the data into a single dataset mouse_df = pd.merge(mouse_metadata, study_results, on='Mouse ID') # Display the data table for preview mouse_df.head() # Checking the number of mice. mouse_df["Mouse ID"].nunique() # Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint. mouse_df[mouse_df.duplicated(subset=["Mouse ID", "Timepoint"], keep=False)] # Optional: Get all the data for the duplicate mouse ID. duplicated = mouse_df[mouse_df["Mouse ID"] == "g989"] # Create a clean DataFrame by dropping the duplicate mouse by its ID. mouse_full = mouse_df.drop(duplicated.index) # Checking the number of mice in the clean DataFrame. mouse_full["Mouse ID"].nunique() import seaborn as snsSummary Statisticsmouse_full.head() # Generate a summary statistics table of mean, median, variance, standard deviation, #and SEM of the tumor volume for each regimen # Use groupby and summary statistical methods to calculate the following properties of each drug regimen: # mean, median, variance, standard deviation, and SEM of the tumor volume. # Assemble the resulting series into a single summary dataframe. drug_group = mouse_full.groupby(["Drug Regimen"]) drug_mean = drug_group.mean()["Tumor Volume (mm3)"] drug_median = drug_group.median()["Tumor Volume (mm3)"] drug_var = drug_group.var()["Tumor Volume (mm3)"] drug_std = drug_group.std()["Tumor Volume (mm3)"] drug_sem = drug_group.sem()["Tumor Volume (mm3)"] drug_summary = pd.DataFrame({"Mean":drug_mean, "Median":drug_median, "Variance":drug_var, "Standard Dev": drug_std, "SEM":drug_sem}) drug_summary # Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen # Using the aggregation method, produce the same summary statistics in a single line mouse_full.groupby(["Drug Regimen"]).agg({"Tumor Volume (mm3)":["mean", "median", "var", "std", "sem"]})Bar and Pie Charts# Generate a bar plot showing the total number of measurements taken on each drug regimen using pandas. drug_group.agg({"Mouse ID":"count"}).sort_values("Mouse ID", ascending=False).plot(kind='bar', title="Number of Measurements by Drug Regimen") plt.ylabel("Number of Measurements") plt.xlabel("Drug Regimen"); # Generate a bar plot showing the total number of measurements taken on each drug regimen using pandas. sns.countplot(data=mouse_full, x="Drug Regimen", order= mouse_full["Drug Regimen"].value_counts().index) plt.title("Number of Measurements by Drug Regimen") plt.ylabel("Number of Measurements") plt.xticks(rotation=90) plt.show() # Generate a bar plot showing the total number of measurements taken on each drug regimen using pyplot. drug_rbymouse = drug_group.count()["Mouse ID"] drug_rbymouse = drug_rbymouse.reset_index() drug_rbymouse = drug_rbymouse.sort_values("Mouse ID", ascending=False) # Generate a bar plot showing the total number of measurements taken on each drug regimen using pyplot. plt.bar(drug_rbymouse["Drug Regimen"], drug_rbymouse["Mouse ID"]) plt.xticks(rotation=90) plt.title("Number of Measurements by Drug Regimen") plt.ylabel("Number of Measurements") plt.xlabel("Drug Regimen") plt.show() # Generate a pie plot showing the distribution of female versus male mice using pandas mouse_full["Sex"].value_counts().plot(kind='pie', autopct="%1.1f%%", startangle=140) plt.title("Female vs Male Distribution") plt.show() # Generate a pie plot showing the distribution of female versus male mice using pyplot gender_group = mouse_full.groupby(["Sex"]).count()["Mouse ID"] gender_group = gender_group.reset_index() gender_group plt.pie(gender_group["Mouse ID"], labels= gender_group["Sex"], autopct= "%1.1f%%", colors=["#FF7F0E", "#2077B4"]) plt.title("Female vs Male Distribution") plt.show()Quartiles, Outliers and Boxplotsmouse_full # Calculate the final tumor volume of each mouse across four of the treatment regimens Sort DF: # Capomulin, Ramicane, Infubinol, and Ceftamin drug_list = ["Capomulin", "Ramicane", "Infubinol", "Ceftamin"] drugs_dataframe = mouse_full[mouse_full["Drug Regimen"].isin(drug_list)] drugs_dataframe # Start by getting the last (greatest) timepoint for each mouse max_time = drugs_dataframe.groupby(["Mouse ID"]).max()["Timepoint"] max_time = max_time.reset_index() max_time # Merge this group df with the original dataframe to get the tumor volume at the Mouse ID & last timepoint drugs_df = pd.merge(drugs_dataframe, max_time, on=["Mouse ID", "Timepoint"]) drugs_df # Put treatments into a list for for loop (and later for plot labels) drug_list = ["Capomulin", "Ramicane", "Infubinol", "Ceftamin"] # Create empty list to fill with tumor vol data (for plotting) tumor_vol = [] # Calculate the IQR and quantitatively determine if there are any potential outliers. for drug in drug_list: # Locate the rows which contain mice on each drug and get the tumor volumes drug_data = drugs_df.loc[drugs_df["Drug Regimen"] == drug, "Tumor Volume (mm3)"] tumor_vol.append(drug_data) # add subset quartiles = drug_data.quantile([0.25, 0.5, 0.75]) lower_q = quartiles[0.25] upper_q = quartiles[0.75] iqr = upper_q - lower_q # Determine outliers using upper and lower bounds lower_bound = lower_q - (1.5*iqr) upper_bound = upper_q + (1.5*iqr) outliers = drug_data[(drug_data > upper_bound) | (drug_data < lower_bound)] print(outliers) # Generate a box plot of the final tumor volume of each mouse across four regimens of interest flierprops = dict(marker="o", markerfacecolor='blue', markersize=12, linestyle='none') fig, ax1 = plt.subplots() ax1.boxplot(tumor_vol, labels= drug_list, flierprops=flierprops) ax1.set_title("Final Tumor Volume of Treatment Regimens") ax1.set_xlabel("Drug Regimen") ax1.set_ylabel("Tumor Volume (mm3)") plt.show() fig, ax1 = plt.subplots() sns.boxplot(y=tumor_vol, x=drug_list) ax1.set_title("Final Tumor Volume of Treatment Regimens") ax1.set_xlabel("Drug Regimen") ax1.set_ylabel("Tumor Volume (mm3)") plt.show()Line and Scatter Plotscapo_df = drugs_dataframe[drugs_dataframe["Drug Regimen"] == "Capomulin"] m_s2185 = capo_df[capo_df["Mouse ID"] == "s185"] mouse_info = m_s2185.groupby(["Timepoint"]).mean()["Tumor Volume (mm3)"] mouse_info = mouse_info.reset_index() mouse_info # Generate a line plot of tumor volume vs. time point for a mouse treated with Capomulin plt.plot(mouse_info["Timepoint"], mouse_info["Tumor Volume (mm3)"], color='red') plt.title('Capomulin Results: Mouse s185') plt.xlabel('Timepoint') plt.ylabel('Tumor Volume (mm3)') plt.show() # Generate a scatter plot of average tumor volume vs. mouse weight for the Capomulin regimen w_capo_group = capo_df.groupby(["Weight (g)"]).mean()["Tumor Volume (mm3)"] w_capo_df = w_capo_group.reset_index() w_capo_df x_values = w_capo_df["Weight (g)"] y_values = w_capo_df["Tumor Volume (mm3)"] plt.scatter(x_values, y_values) plt.xlabel("Weight (g)") plt.title("Weight and Average Tumor Volume Relationship") plt.ylabel("Tumor Volume (mm3)") plt.show() sns.set_style("whitegrid") sns.scatterplot(x=x_values, y=y_values, ) plt.title("Weight and Average Tumor Volume Relationship");Correlation and Regression# Calculate the correlation coefficient and linear regression model # for mouse weight and average tumor volume for the Capomulin regimen from scipy.stats import linregress slope, intercept, rvalue, pvalue, std_err = linregress(x_values, y_values) regress_values = slope * x_values + intercept line_eq = f"y = {round(slope)}x + {round(intercept,2)}" plt.scatter(x_values, y_values) plt.plot(x_values, regress_values, color='red', ls ='--') plt.annotate(line_eq,(20, 37),fontsize=14) plt.xlabel("Weight (g)") plt.title("Weight and Average Tumor Volume Relationship") plt.ylabel("Tumor Volume (mm3)") plt.show() print(f"The correlation between both factors is: {round(rvalue,2)}") print(f"The r2 of the model is: {(rvalue**2)}") plt.scatter(x_values, y_values) plt.xlabel("Weight (g)") plt.title("Weight and Average Tumor Volume Relationship") plt.ylabel("Tumor Volume (mm3)") plt.show()Práctica de Formatos de datos: XML La __EMT__ dispone de un portal de datos abiertos que se puede encontrar en la página http://opendata.emtmadrid.es/Home. El objetivo del mismo es ofrecer diferentes tipos de datos sobre la actividad de los autobuses de la EMT. El acceso a los datos se realiza a través de una API de servicios web. Para poder utilizar la API de servicios, en primre lugar hay que registrarse, lo cual puede hacerse en la página http://opendata.emtmadrid.es/Formulario. El registro devuelve como resultado en un mensaje electrónico, dos valores:* Id de cliente: Identificador de cliente* Pass Key: PasswordA continuación, hay que elegir el servicio que se quiere utilizar. Hay 4 servicios definidos: BUS, GEO, MEDIA, INFOPARKING y BICIMAD. Cada servicio tiene asociado un conjunto de métodos que al invocarlos, devuelven un resultado. Por ejemplo, el servicio BUS dispone del servicio __"GetRouteLines"__ que obtiene el itinerario de una línea (o varias líneas separadas por el carácter pipe(|)), con los vértices para construir las rectas del recorrido y las coordenadas UTM de los ejes viales y los códigos de parada. Si se quiere invocar desde Python, se puede hacer usando el siguiente código:import requests datos = { 'idClient':'Identificador de cliente', 'PassKey':'Password', 'SelectDate': '27/10/2017', 'Lines':'27' } url = 'https://servicios.emtmadrid.es:8443/bus/servicebus.asmx/GetRouteLines' response = requests.post(url, data=datos) print(response.text)En esta práctica se va a trabajar con varios métodos:* Método __'GetStreet'__ del servicio GEO. Este método obtiene una lista de emplazamientos EMT coincidentes con una localización. Cada emplazamiento está compuesto por una lista de paradas EMT situadas dentro de un radio predefinido con todos sus atributos, así como las líneas EMT que pasan por cada parada de la lista. En el documento __Servicios_EMT.pdf__ adjunto a esta práctica, se explica con detalle los parámetros de entrada y el resultado que devuelve este método. El método se puede invocar indicando al menos el nombre de la calle y el número:import requests datos = { 'idClient':'Identificador de cliente', 'PassKey':'Password', 'description': 'Profesor ', 'streetNumber':'9', 'Radius':'', 'Stops':'', 'statistics':'', 'cultureInfo':'' } url = 'https://servicios.emtmadrid.es:8443/geo/servicegeo.asmx/GetStreet' response = requests.post(url, data=datos) print(response.text)El método devuelve un documento XML. Entre los elementos del documento, se encuentran las coordenadas X e Y del lugar buscado. En la búsqueda del ejemplo, la calle búscada tiene como coordenadas UTM X e Y:-3.73379031085432 40.4530280706993* Método __'GetStreetRoute'__ del servicio MEDIA. Este método obtiene hasta tres rutas óptimas para ir de un sitio a otro a pie y en autobús. En el documento __Servicios_EMT.pdf__ adjunto a esta práctica, se explica con detalle los parámetros de entrada y el resultado que devuelve este método. El método se puede invocar indicando los siguientes parámetros * idClient: Código de cliente autorizado para la operación y suministrado por EMT. * passKey: Código de clave asociado al cliente. * coordinateXFrom: Campo para identificar la posición "x" del origen de la ruta a consultar. * coordinateYFrom: Campo para identificar la posición "y" del origen de la ruta a consultar. * coordinateXTo: Campo para identificar la posición "x" del destino de la ruta a consultar. * coordinateYTo: Campo para identificar la posición "y" del destino de la ruta a consultar. * criteriaSelection: Campo que indica el criterio de la búsqueda. Los valores son: * 11 - Mínimo tiempo de trayecto * 13 - Mínimos trasbordos * 14 - Mínimo recorrido a pieimport requests datos = { 'idClient':'Identificador de cliente', 'PassKey':'Password', 'statistics':'', 'cultureInfo':'', 'coordinateXFrom': '-3,63271713087776', 'coordinateYFrom':'40,4543980802915', 'originName':'', 'coordinateXTo': '-3.7013019', 'coordinateYTo':'40.4176416', 'destinationName':'', 'criteriaSelection':'13', 'day':'', 'month':'', 'year':'', 'hour':'', 'minute':'', 'GenerarAudio':'' } url = 'https://servicios.emtmadrid.es:8443/servicemedia/servicemedia.asmx/GetStreetRoute' response = requests.post(url, data=datos) print(response.text)El método devuelve un documento XML que contiene información sobre las rutas óptimas (se explica con detalle en el documento __Servicios_EMT.pdf__). En particular muestra la siguiente información:* El elemento __DescriptionRouteData__ donde aparece la siguiente información: * DescriptionDate: Fecha de la ruta. * DescriptionInitTime: Descripción de hora de inicio de la ruta. * DescriptionEstimateTimeArrival: Descripción de la hora estimada de llegada. * Transfers: Trasbordos. * LongJourney: Duración del viaje.* El elemento __ListSectionRoute__ aparece una lista de subelementos __Section__.Cada uno de ellos, describe parcialmente una parte de la ruta. En un caso ideal, aparecerá un subelemento de __Section__ de tipo __WalkingLeg__ que describe el inicio de la ruta andando, a continuación un conjunto de subelementos de __Section__ de tipo __BusLeg__ que describen cada uno de ellos la ruta en una línea de autobus que forma parte de la ruta buscada, y por último otro subelemento de __Section__ de tipo __WalkingLeg__ que describe el final de la ruta andando.* El elemento __POI__ que muestra información sobre puntos de interés que se encuentran a lo largo de la ruta. Se pide:* [1 punto] Crear una función que solicite al usuario dos calles de Madrid, que actúen de origen y destino de una ruta* [2 puntos]Crear una función que dada una calle de Madrid, devuelva las coordenadas X e Y de la calle.* [4 puntos]Crear una función que dadas las coordenadas X e Y de un origen y un destino dentro de Madrid, obtenga la ruta más óptima para ir del origen al destino usando autobuses de la EMT. La función deberá imprimir por pantalla, la siguiente información: * Fecha de la ruta. * Hora de inicio de la ruta. * Hora estimada de llegada * Número de trasbordos. * Duración del viaje. * Descripción textual de la ruta. Usando el ejemplo anterior debería mostrar: * Fecha de la ruta: 26/10/2017 * Hora de inicio de la ruta: 03:55 * Hora estimada de llegada: 05:28 * Número de trasbordos: 1 * Duración del viaje: 93 * Descripción textual de la ruta: * Caminar 42' hasta parada 3125 - EMILIO VARGAS-ARTURO SORIA, linea N4 * 28' en autobús (línea N4) hasta parada 449 - SERRANO-ORTEGA Y GASSET * Desde parada 449 caminar 5' hasta parada 61 - Castellana-Ministerio Interior, linea N25 * 15' en autobús (línea N25) hasta parada 3691 - SOL-SEVILLA * Desde parada 3691 caminar 3'* [1 punto]Crear una función que solicite al usuarios los nombres de 2 calles de Madrid y muestre la información referente a la ruta más óptima para llegar de una calle a otra usando autobuses de la EMT.* [2 puntos]Crear una función que solicite al usuarios los nombres de 2 calles de Madrid y muestre un listado de todos los puntos de interés que hay dentro de la ruta más óptima para llegar de una calle a otra usando autobuses de la EMT. A continuación, se le pedirá al usuario que introduzca el nombre de uno de los puntos de interés mostrado, y como resultado se mostrará por pantalla los siguientes datos del punto de interés: * Nombre del punto de interés. * Dirección del punto de interés. * Teléfono del punto de interés.# [1 punto] Crear una función que solicite al usuario dos calles de Madrid, que actúen de origen y destino de una ruta def pideRuta(): ruta = tuple() # Variable a devolver origen = input("Introduzca la calle origen de la ruta: ") while(origen == ""): print("El origen no puede estar vaciío. Vuelva a introducir el origen: ") origen = input("Introduzca la calle origen de la ruta: ") numOrigen = input("Introduzca el número del origen: ") # No comprobamos que sea distinto de "" porque puede que sea una plaza sin número destino = input("Introduzca la calle destino de la ruta: ") while(destino == ""): print("El destino no puede estar vacío. Vuelva a introducir el destino: ") destino = input("Introduzca la calle destino de la ruta: ") numDestino = input("Introduzca el número del destino: ") # No comprobamos que sea distinto de "" porque puede que sea una plaza sin número ruta = ((origen, numOrigen), (destino, numDestino)) return ruta # [2 puntos]Crear una función que dada una calle de Madrid, devuelva las coordenadas X e Y de la calle. import requests from xml.etree import ElementTree def devuelveCoordenadas(calle, numero): datos = { 'idClient':'', 'passKey':'', 'description': str(calle), 'streetNumber': str(numero), 'Radius':'', 'Stops':'', 'statistics':'', 'cultureInfo':'' } url = 'https://servicios.emtmadrid.es:8443/geo/servicegeo.asmx/GetStreet' response = requests.post(url, data=datos) # Datos en XML # Si la calle no se encuentra, se lanza excepción y se detiene la ejecución with open("coord.xml","w") as f: f.write(str(response.text)) f.close() f = open("coord.xml","rt") parseado = ElementTree.parse(f) raiz = parseado.getroot() # Conocemos la estructura del XML por lo que podemos acceder de forma indexada elemX = raiz[0][7].text elemY = raiz[0][8].text coord = (elemX, elemY) return coord # [4 puntos]Crear una función que dadas las coordenadas X e Y de un origen y un destino dentro de Madrid, # obtenga la ruta más óptima para ir del origen al destino usando autobuses de la EMT. (Más info arriba) import requests from xml.etree import ElementTree def calculaRutaOptima(coordenadasOrigen, coordenadasDestino): # Ambas son tuplas con X Y datos = { 'idClient':'', 'PassKey':'', 'statistics':'', 'cultureInfo':'', 'coordinateXFrom': str(coordenadasOrigen[0]), 'coordinateYFrom': str(coordenadasOrigen[1]), 'originName':'', 'coordinateXTo': str(coordenadasDestino[0]), 'coordinateYTo': str(coordenadasDestino[1]), 'destinationName':'', 'criteriaSelection':'13', 'day':'', 'month':'', 'year':'', 'hour':'', 'minute':'', 'GenerarAudio':'' } url = 'https://servicios.emtmadrid.es:8443/servicemedia/servicemedia.asmx/GetStreetRoute' response = requests.post(url, data=datos) with open("rutaOptima.xml","w") as f: f.write(str(response.text)) f.close() f = open("rutaOptima.xml","rt") arbol = ElementTree.parse(f) root = arbol.getroot() for nodo in root.iter("DescriptionRouteData"): fecha_de_la_ruta = nodo.find("DescriptionDate").text hora_de_inicio_de_la_ruta = nodo.find("DescriptionInitTime").text hora_estimada_de_llegada = nodo.find("DescriptionEstimateTimeArrival").text numero_de_trasbordos = nodo.find("Transfers").text duracion_del_viaje = nodo.find ("LongJourney").text print("\nFecha del viaje: " + fecha_de_la_ruta) print("Hora de inicio: " + hora_de_inicio_de_la_ruta) print("Hora estimada de llegada: " + hora_estimada_de_llegada) print("Número de transbordos: " + numero_de_trasbordos) print("Duración del viaje: " + duracion_del_viaje) print("\nDescripción textual de la ruta:") # Lo recibido en el XML de la EMT muestra un "Desde caminar" que queda incompleto al principio y un "caminar hasta" también # incompleto, al final, lo cual difiere de la presentación sugerida en el guión, así que truncamos la información # recibida para que tenga una buena presentación description = "" for nodo in root.iter("RouteDescription"): description = description + "\n" + nodo.text tam = len(description) description = description[8:tam - 7] # Truncamos la info para que tenga una buena presentación print(description) print("\n") f.close() # [1 punto]Crear una función que solicite al usuarios los nombres de 2 calles de Madrid y muestre la # información referente a la ruta más óptima para llegar de una calle a otra usando autobuses de la EMT. def infoRutaOptima (): calles = pideRuta() # calles es tupla origen-destino coordenadasOrigen = devuelveCoordenadas(calles[0][0], calles[0][1]) coordenadasDestino = devuelveCoordenadas(calles[1][0], calles[1][1]) calculaRutaOptima(coordenadasOrigen, coordenadasDestino) # [2 puntos]Crear una función que solicite al usuarios los nombres de 2 calles de Madrid y muestre un # listado de todos los puntos de interés que hay dentro de la ruta más óptima para llegar de una calle a # otra usando autobuses de la EMT. A continuación, se le pedirá al usuario que introduzca el nombre de uno # de los puntos de interés mostrado, y como resultado se mostrará por pantalla los siguientes datos del punto # de interés: # Nombre del punto de interés. # Dirección del punto de interés. # Teléfono del punto de interés. from xml.etree import ElementTree def puntosInteres(): calles = pideRuta() # calles es tupla origen-destino # Obtener ruta óptima coordenadasOrigen = devuelveCoordenadas(calles[0][0], calles[0][1]) coordenadasDestino = devuelveCoordenadas(calles[1][0], calles[1][1]) calculaRutaOptima(coordenadasOrigen, coordenadasDestino) # Ahora usamos rutaOptima.xml, que sea ha creado en calculRutaOptima f = open("rutaOptima.xml","rt") arbol = ElementTree.parse(f) root = arbol.getroot() print("\nPuntos de interés en la ruta seleccionada:\n") for nodo in root.iter("POI"): elem = nodo.find("nombre").text elem = elem.strip() if elem != "" and elem != '\n': # Para evitar elementos vacíos y saltos de línea print(elem) encontrado = False while not encontrado: punto = input("\nIntroduzca el punto de interés deseado: ") punto = punto.upper() for nodo in root.iter("POI"): if str(nodo.find("nombre").text) == str(punto): encontrado = True print("\n") print(nodo.find("nombre").text) print(nodo.find("direccion").text) print(nodo.find("telefono").text) if not encontrado: print("Introduzca un punto de interés válido") f.close() infoRutaOptima() puntosInteres()Examen de medio cursoNombre: clase: Martes N4 1.1 El usuario dará un número al azar y el código calculará la suma de todos los números desde el 1 hasta el número dado por el usuario.Ejemplo: el usuario dió el número 4, el código deberá de dar como resultado 10.# Se pide el numero y se declaran las variables a utilizar num = input("Ingrese su numero: ") i=1 sum = 0 # Se abre un ciclo while para que se ejecute mientras que i es menor que el numero dado while i <= int(num) : sum = sum + i i = i + 1 # Se imprime la suma print("La suma de los numeros da: ", sum)Ingrese su numero: 4 La suma de los numeros da: 101.2 Dados el inicio y final de un rango de números, guardar ese rango de números en una lista. Después, imprimir los números que son pares en la lista por medio de uno de los ciclos que vimos en clase. Inicio = 6, final = 31# Se piden los valores del rango y se declara una lista vacia min = int(input("Ingrese el rango minimo de su lista: ")) max = int(input("Ingrese el rango maximo de su lista: ")) Lista = [] # Se realiza un ciclo para adjuntar cada valor dentro del rango a la lista while (min <= max): Lista.append(min) min = min + 1 # Se imprimen los numeros pares de la lista for x in Lista: if (x%2 == 0): print(x)Ingrese el rango minimo de su lista: 6 Ingrese el rango maximo de su lista: 31 6 8 10 12 14 16 18 20 22 24 26 28 30Analysis datasetThis notebook is created to analyze the dataset I created in the notebook living_population. Here is where I get all the data with errors hopefully. 18 October 2019# Import some packages import numpy as np import scipy from scipy import stats import matplotlib.pyplot as plt import copy from scipy.optimize import curve_fit from scipy.special import factorial import os from astropy.coordinates import SkyCoord from astropy.coordinates import ICRS, Galactic, GeocentricTrueEcliptic from astropy.coordinates import Angle, Latitude, Longitude import astropy.units as u import astropy.constants as const %matplotlib inline # This one makes LaTeX in the plots from matplotlib import rc fsize = 20 rc('font',**{'family':'serif','serif':['Times'],'size' : fsize}) rc('text', usetex=True) import sys sys.path.insert(0, '../') #from GetColumnMESA import GetColumnMESA #from extrapolate import extrapolate G = const.G.to('AU3/(M_sun d2)').value pc_SI = u.pc.to(u.m) AU_SI = u.AU.to(u.m) c_SI = const.c.value c_AUday = c_SI*u.m.to(u.AU)/(u.s.to(u.day)) h_SI = const.h.value kB_SI = const.k_B.value b_wien_SI = const.b_wien.value RSun_SI = u.R_sun.to(u.m) G_cgs = const.G.to('cm^3 g^-1 s^-2').value # This I believe is an estimate for rotation rates of stripped stars as # expected from first Fuller+19 and then Spruit dynamo Omega = 10 ** (-5.2) # radians/second Omega = Omega / (2.0 * np.pi) # s^-1 print (Omega) Rstar = 0.8 * u.R_sun.to(u.km) # Radius of star in km vrot = Rstar * Omega print (vrot) Omega_new = 1.0 / (200.0 * 24.0 * 3600.0) Omega_TS = 1.0 / (2.0 * 24.0 * 3600.0) print (Omega_new, Omega_TS) vrot_TS = Rstar * (Omega_TS * 100.0) print (vrot_TS) a = np.logspace(2,3.5,100)*u.R_sun.to(u.AU) m1 = 4. m2 = 1.4 P = np.sqrt(4.*(np.pi**2.)*(a**3.)/(G*(m1+m2))) print (P) P = 0.1 # days a = (P*G*(m1+m2)/(4.*(np.pi**2.)))**(1./3.) Jorb = np.sqrt(G*a/(m1+m2))*m1*m2 print (Jorb) Pdot_GW_div_P = (-96./5.)*((G**3.)/(c_AUday**5.))*(m1*m2*(m1+m2)/(a**4.)) print (Pdot_GW_div_P)[ 49.8478503 52.5259247 55.34787857 58.32144184 61.45475971 64.756415 68.23545163 71.90139941 75.76430013 79.83473509 84.12385406 88.64340582 93.40577038 98.42399285 103.71181918 109.28373384 115.15499946 121.34169867 127.86077812 134.73009494 141.96846562 149.59571756 157.63274341 166.10155825 175.02535994 184.42859263 194.33701374 204.77776451 215.77944432 227.37218908 239.58775374 252.45959928 266.02298437 280.31506195 295.37498101 311.24399381 327.9655689 345.58551014 364.15208225 383.71614294 404.33128226 426.05396938 448.94370727 473.0631957 498.47850299 525.25924697 553.4787857 583.21441837 614.5475971 647.56414999 682.3545163 719.01399408 757.64300131 798.3473509 841.23854056 886.43405822 934.05770383 984.2399285 1037.11819182 1092.8373384 1151.54999461 1213.41698668 1278.60778121 1347.30094939 1419.68465616 1495.957175[...]There is a file with the total number of systemsloc_run = '/data002/ygoetberg/scripts/GW/10yr_notides_long/' filename = loc_run+'pop_full.txt' #filename = '/data002/ygoetberg/scripts/GW/standard_run/pop_full.txt' #filename = '/data002/ygoetberg/scripts/GW/4yr_run/pop_full.txt' data = np.loadtxt(filename,skiprows=1) N_WD = data[:,1] N_NS = data[:,2] N_BH = data[:,3] ww = 15 hh = 5 fig, (ax1,ax2,ax3) = plt.subplots(1,3,figsize=(ww,hh)) N = [N_WD, N_NS, N_BH] n = 0 for ax in (ax1,ax2,ax3): ax.hist(N[n],normed=True) m, s = stats.norm.fit(N[n]) xt = ax.get_xticks() xmin, xmax = min(xt), max(xt) lnspc = np.linspace(xmin, xmax, 100) pdf_g = stats.norm.pdf(lnspc, m, s) ax.plot(lnspc, pdf_g,'r-') print m, s n=n+1/data002/ygoetberg/anaconda3/envs/venv/lib/python2.7/site-packages/matplotlib/axes/_axes.py:6571: UserWarning: The 'normed' kwarg is deprecated, and has been replaced by the 'density' kwarg. warnings.warn("The 'normed' kwarg is deprecated, and has been " /data002/ygoetberg/anaconda3/envs/venv/lib/python2.7/site-packages/scipy/stats/_distn_infrastructure.py:1662: RuntimeWarning: divide by zero encountered in true_divide x = np.asarray((x - loc)/scale, dtype=dtyp) /data002/ygoetberg/anaconda3/envs/venv/lib/python2.7/site-packages/matplotlib/font_manager.py:1331: UserWarning: findfont: Font family [u'serif'] not found. Falling back to DejaVu Sans (prop.get_family(), self.defaultFamily[fontext]))Get the datafilename = loc_run+'pop_individuals.txt' data = np.genfromtxt(filename,dtype=str) s_nbr = data[:,0].astype(np.float) mstrip = data[:,1].astype(np.float) mco = data[:,2].astype(np.float) Porb = data[:,3].astype(np.float) dist = data[:,4].astype(np.float) h_c = data[:,5].astype(np.float) f_GW = data[:,6].astype(np.float) SNR = data[:,7].astype(np.float) MT = data[:,8] fdot = data[:,9].astype(np.float) np.sum(SNR>7.)/100. filename = loc_run+'pop_SNR4.txt' data = np.genfromtxt(filename,dtype=str) s_nbr=data[:,0].astype(np.float) mstrip = data[:,1].astype(np.float) mco = data[:,2].astype(np.float) dist = data[:,3].astype(np.float) h_c = data[:,4].astype(np.float) f_GW = data[:,5].astype(np.float) SNR = data[:,6].astype(np.float) MT = data[:,7] x = data[:,8].astype(np.float) y = data[:,9].astype(np.float) # Calculate the direction of the sources x_sun = 8. y_sun = 0. xtmp = x-x_sun ytmp = copy.copy(y) dtmp = np.sqrt(xtmp**2. + ytmp**2.) #theta = np.arctan(ytmp/xtmp) theta = np.arcsin(ytmp/dtmp) ind = (xtmp<0)*(ytmp>0) theta[ind]= theta[ind]+np.pi/2. ind = (xtmp<0)*(ytmp<0) theta[ind] = theta[ind]-np.pi/2. # This is the same angle, with 180 towards the Galactic center and in degrees Gal_l = 180.+theta*360./(2.*np.pi) Gal_l[Gal_l > 180.] = Gal_l[Gal_l > 180.]-360.Propertiesclr_WD = np.array([49,143,181])/255. clr_NS = np.array([0,80,134])/255. ind_WD = (SNR > 4.)*(mco<=0.6) ind_NS = (SNR > 4.)*(mco==1.4) ww = 9 hh = 9 fig, ((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2,figsize=(ww,hh)) fig.tight_layout() # Mass distribution bins = np.linspace(0,8,17) ax1.hist(mstrip[ind_WD],facecolor=clr_WD,bins=bins,label='WD') ax1.hist(mstrip[ind_NS],facecolor=clr_NS,bins=bins,label='NS') ax1.set_yscale('log') #ax1.set_yticks([1,10,100,1000]) #ax1.set_yticklabels(['$10^{-3}$','$10^{-2}$','$10^{-1}$','$10^{0}$']) ax1.set_ylim([1,3e3]) ax1.set_xlabel('$M_{\\mathrm{strip}}$ [$M_{\\odot}$]') ax1.set_ylabel('Number') legend = ax1.legend(loc=0,fontsize=0.8*fsize,edgecolor='none') legend.set_title('Companion type',prop={'size':0.9*fsize}) ax1.set_xlim([0,8]) ax1.set_xticks([0,2,4,6,8]) # Gravitational wave frequency bins = np.arange(0.05,0.6,0.05) ax2.hist(f_GW[ind_WD]*1e3,facecolor=clr_WD,bins=bins) ax2.hist(f_GW[ind_NS]*1e3,facecolor=clr_NS,edgecolor=clr_NS,histtype='stepfilled',bins=bins,lw=2) ax2.hist(f_GW[ind_WD]*1e3,facecolor='none',edgecolor=clr_WD,histtype='stepfilled',bins=bins,lw=2) ax2.set_xlabel('$f_{\\mathrm{GW}}$ [mHz]') ax2.set_yscale('log') ax2.set_xticks([0.1,0.2,0.3,0.4,0.5]) ax2.set_ylim([1,3e3]) # Distance bins=np.linspace(0,3,13) ax3.hist(np.log10(dist[ind_WD]),facecolor=clr_WD,bins=bins) ax3.hist(np.log10(dist[ind_NS]),facecolor=clr_NS,edgecolor=clr_NS,histtype='stepfilled',lw=2,bins=bins) ax3.hist(np.log10(dist[ind_WD]),facecolor='none',edgecolor=clr_WD,histtype='stepfilled',bins=bins,lw=2) ax3.set_yscale('log') ax3.set_ylim([1,3e3]) ax3.set_xlim([0,3]) ax3.set_xticks([0,1,2,3]) ax3.set_xticklabels([1,10,100,1000]) ax3.set_xlabel('Distance [pc]') ax3.set_ylabel('Number') # Location in the Galactic disk bins=np.linspace(-180,180,15) ax4.hist(Gal_l[ind_WD],facecolor=clr_WD,bins=bins) ax4.hist(Gal_l[ind_NS],facecolor=clr_NS,edgecolor=clr_NS,histtype='stepfilled',lw=2,bins=bins) ax4.hist(Gal_l[ind_WD],facecolor='none',edgecolor=clr_WD,histtype='stepfilled',bins=bins,lw=2) ax4.set_yscale('log') ax4.set_ylim([1,3e3]) ax4.set_xlim([-180,180]) ax4.set_xticks([-180,-90,0,90,180]) ax4.set_xticklabels([180,270,0,90,180]) ax4.set_xlabel('Galactic longitude [$^{\\circ}$]') ax4.text() for ax in (ax1,ax2,ax3,ax4): ax.tick_params(direction="in", which='both') ax.tick_params('both', length=6, width=1,which='major') ax.tick_params('both', length=3, width=1,which='minor') ax.xaxis.set_ticks_position('both') ax.yaxis.set_ticks_position('both') #fig.savefig('prop.png',format='png',dpi=300,bbox_inches='tight',pad_inches=0.1)Mass distributionww = 5 hh = 4 fig, ax = plt.subplots(1,1,figsize=(ww,hh)) clr_WD = np.array([49,143,181])/255. clr_NS = np.array([0,80,134])/255. ind_WD = (SNR > 4.)*(mco<=0.6) ind_NS = (SNR > 4.)*(mco==1.4) bins = np.linspace(0,8,17) ax.hist(mstrip[ind_WD],facecolor=clr_WD,bins=bins,label='WD') ax.hist(mstrip[ind_NS],facecolor=clr_NS,bins=bins,label='NS') #ax.hist(mstrip) ax.set_yscale('log') #ax.set_yticklabels([]) ax.set_xlabel('$M_{\\mathrm{strip}}$ [$M_{\\odot}$]') ax.set_ylabel('Number, SNR $>4$') legend = ax.legend(loc=0,fontsize=0.7*fsize,edgecolor='none') legend.set_title('Companion type',prop={'size':0.8*fsize}) ax.tick_params(direction="in", which='both') ax.tick_params('both', length=6, width=1,which='major') ax.tick_params('both', length=3, width=1,which='minor') ax.xaxis.set_ticks_position('both') ax.yaxis.set_ticks_position('both') ax.set_xlim([0,8]) fig.savefig(loc_run+'M_distr.pdf',format='pdf',bbox_inches='tight',pad_inches=0.1)Distanceww = 5 hh = 4 fig, ax = plt.subplots(1,1,figsize=(ww,hh)) clr_WD = np.array([49,143,181])/255. clr_NS = np.array([0,80,134])/255. ind_MT = MT == 'True' ax.plot(np.sort(dist),np.arange(len(dist))) #ax.plot(np.sort(dist),np.linspace(0,1,len(dist))) #ax.plot(np.sort(dist[ind_NS]),np.linspace(0,1,len(dist[ind_NS]))) ax.plot(np.sort(dist[ind_NS]),np.arange(len(dist[ind_NS]))) ax.plot(np.sort(dist[ind_MT]),np.arange(len(dist[ind_MT]))) ax.tick_params(direction="in", which='both') ax.tick_params('both', length=6, width=1,which='major') ax.tick_params('both', length=3, width=1,which='minor') ax.xaxis.set_ticks_position('both') ax.yaxis.set_ticks_position('both') ax.set_xlim([0,1500]) #ax.set_ylim([0,1]) ax.set_xlabel('Distance [pc]') #ax.set_ylabel('') ax.set_yscale('log') n = 1 nbr_sys = [] nbr_sim = [] for i in range(1,len(s_nbr)): if s_nbr[i] == s_nbr[i-1]: n = n+1 else: nbr_sys.append(n) nbr_sim.append(s_nbr[i-1]) n = 0 nbr_sys.append(n) nbr_sim.append(s_nbr[-1])Count number of systemsnbr_runs = 100 SNR_lim = 4. nbr_strip_BH = [None]*nbr_runs nbr_strip_NS = [None]*nbr_runs nbr_strip_WD = [None]*nbr_runs for i in range(1,nbr_runs+1): ind = (s_nbr == i)*(SNR > SNR_lim) nbr_strip_BH[i-1] = np.sum(mco[ind] == 5.) nbr_strip_NS[i-1] = np.sum(mco[ind] == 1.4) nbr_strip_WD[i-1] = np.sum(mco[ind] <= 0.6) np.sum(np.array(nbr_strip_NS) == 0), np.sum(np.array(nbr_strip_NS) > 0) i = 66 ind = (s_nbr==(i+1))*(SNR > 4.) mstrip[ind], mco[ind], MT[ind], SNR[ind] nbr_strip_NS[81] # poisson function, parameter lamb is the fit parameter def poisson(k, lamb): return (lamb**k/factorial(k)) * np.exp(-lamb) ww = 15 hh = 5 fig, (ax1,ax2,ax3) = plt.subplots(1,3,figsize=(ww,hh)) #ax1.hist(nbr_strip_WD) #entries, bin_edges, patches = ax1.hist(nbr_strip_WD, bins=20, range=[14.5,55.5], normed=True) # calculate binmiddles #bin_middles = 0.5*(bin_edges[1:] + bin_edges[:-1]) # fit with curve_fit #parameters, cov_matrix = curve_fit(poisson, bin_middles, entries) # plot poisson-deviation with fitted parameter #x_plot = np.linspace(14, 55, 100) #ax1.plot(x_plot, poisson(x_plot, *parameters), 'r-', lw=2) m, s = stats.norm.fit(nbr_strip_WD) print m, s ax1.hist(nbr_strip_WD,normed=True) xt = ax1.get_xticks() xmin, xmax = min(xt), max(xt) lnspc = np.linspace(xmin, xmax, 100) pdf_g = stats.norm.pdf(lnspc, m, s) ax1.plot(lnspc, pdf_g,'r-') #ax2.hist(nbr_strip_NS) entries, bin_edges, patches = ax2.hist(nbr_strip_NS, bins=10, range=[-0.5, 10], normed=True) bin_middles = 0.5*(bin_edges[1:] + bin_edges[:-1]) parameters, cov_matrix = curve_fit(poisson, bin_middles, entries) x_plot = np.linspace(-1, 10, 100) ax2.plot(x_plot, poisson(x_plot, *parameters), 'r-', lw=2) print parameters, np.sqrt(parameters) entries, bin_edges, patches = ax3.hist(np.array(nbr_strip_WD), bins=15, range=[-0.5, 15], normed=True) bin_middles = 0.5*(bin_edges[1:] + bin_edges[:-1]) parameters, cov_matrix = curve_fit(poisson, bin_middles, entries) x_plot = np.linspace(-1, 15, 100) ax3.plot(x_plot, poisson(x_plot, *parameters), 'r-', lw=2) print parameters, np.sqrt(parameters) import numpy as np import matplotlib.pylab as plt # create some normal random noisy data ser = 50*np.random.rand() * np.random.normal(10, 10, 100) + 20 # plot normed histogram plt.hist(ser, normed=True) # find minimum and maximum of xticks, so we know # where we should compute theoretical distribution xt = plt.xticks()[0] xmin, xmax = min(xt), max(xt) lnspc = np.linspace(xmin, xmax, len(ser)) # lets try the normal distribution first m, s = stats.norm.fit(ser) # get mean and standard deviation pdf_g = stats.norm.pdf(lnspc, m, s) # now get theoretical values in our interval plt.plot(lnspc, pdf_g, label="Norm") # plot it #np.min(nbr_strip_WD), np.max(nbr_strip_WD) np.min(nbr_strip_NS), np.max(nbr_strip_NS) #np.min(nbr_strip_BH), np.max(nbr_strip_BH)Frequency rangefmin_strip_BH = np.zeros(int(s_nbr[-1])) fmax_strip_BH = np.zeros(int(s_nbr[-1])) fmin_strip_NS = np.zeros(int(s_nbr[-1])) fmax_strip_NS = np.zeros(int(s_nbr[-1])) fmin_strip_WD = np.zeros(int(s_nbr[-1])) fmax_strip_WD = np.zeros(int(s_nbr[-1])) for i in range(int(s_nbr[-1])): ind = (s_nbr == (i+1))*(SNR > 2.)*(mco==5.) if np.sum(ind)>0: fmin_strip_BH[i] = np.min(f_GW[ind]) fmax_strip_BH[i] = np.max(f_GW[ind]) ind = (s_nbr == (i+1))*(SNR > 2.)*(mco==1.4) if np.sum(ind)>0: fmin_strip_NS[i] = np.min(f_GW[ind]) fmax_strip_NS[i] = np.max(f_GW[ind]) ind = (s_nbr == (i+1))*(SNR > 2.)*(mco==0.6) if np.sum(ind)>0: fmin_strip_WD[i] = np.min(f_GW[ind]) fmax_strip_WD[i] = np.max(f_GW[ind]) np.max(fmax_strip_WD[fmax_strip_WD>0]) print np.max(fmax_strip_NS[fmax_strip_NS>0]) print np.max(fmax_strip_BH[fmax_strip_BH>0])0.0002208425864697041 0.0002000456288608138Distance to the sourcestmp = np.sort(dist[(mco<=0.6)*(SNR>2.)]) plt.plot(tmp,np.arange(len(tmp))) plt.show() tmp = np.sort(dist[(mco==1.4)*(SNR>2.)]) plt.plot(tmp,np.arange(len(tmp))) plt.show()Mass transferring systemsMT = MT == 'True' SNR_lim = 2 #np.sum(MT)/np.float_(len(MT)) # Check how large fraction of the WDs with SNR > 2 that are mass transferring ind = (mco==0.6)*(SNR>SNR_lim) print np.sum(MT[ind])/np.float_(np.sum(ind)) ind = (mco==1.4)*(SNR>SNR_lim) print np.sum(MT[ind])/np.float_(np.sum(ind)) ind = (mco==5)*(SNR>SNR_lim) print np.sum(MT[ind])/np.float_(np.sum(ind)) ind = (mco>0.6)*(SNR>SNR_lim) print np.sum(MT[ind])/np.float_(np.sum(ind))0.8627156789197299 0.1276595744680851 nan 0.1276595744680851Orbital period X-ray emission For systems that are not currently transferring mass, I follow what Katie suggested.# THE BELOW IS COPIED FROM KATIE BREIVIK -- BIG THANKS! G_SI = 6.67384e-11 c_SI = 2.99792458e8 rsun_in_au = 215.0954 Msun_SI = 1.9891e30 Rsun_SI = 6.955e8 sec_in_year = 3.15569e7 def calculate_Mdot_edd(M_BH): """ Note: this assumes kappa=0.4 cm^2/g Args: M_BH = bh mass in solar masses Returns: Mdot_edd in Msun/yr """ #mp_by_sigmat = 25.1578947368 #L_edd = 4*np.pi*G*M_BH*Msun*c*mp_by_sigmat X = 0.2 # hydrogen mass fraction in the wind kappa_he = 0.02*(1+X) # m^2/kg L_edd = 4.*np.pi*c_SI*G_SI*M_BH*Msun_SI/kappa_he Mdot_edd = L_edd/c_SI**2 return Mdot_edd/Msun_SI*sec_in_year # This is epsilon def calculate_ADAF_efficiency(Mdot, Mdot_edd, delta=0.01): """ Calculate the mass-luminosity efficiency based on the ADAF model of Xie & Yuan (2012), using the piecewise fitting function using values provided their Table 1 for their Equation 11. Args: Mdot : mass transfer rate onto a BH [msun/year] Mdot_edd : Eddington mass transfer rate (same units as Mdot) delta : (optional) fraction of ionization energy acting on electrons Returns: epsilon : mass-light conversion efficiency """ M_ratio = Mdot/Mdot_edd if not delta in [0.5, 0.1, 0.01, 0.001]: print("You must provide an acceptable value for delta.") sys.exit(-1) if delta == 0.5: conds = [M_ratio < 2.9e-5, (M_ratio >= 2.9e-5) & (M_ratio < 3.3e-3), (M_ratio >= 3.3e-3) & (M_ratio < 5.3e-3), M_ratio >= 5.3e-3] func_epsilon = [lambda M_ratio: 1.58*(100*M_ratio)**0.65, lambda M_ratio: 0.055*(100*M_ratio)**0.076, lambda M_ratio: 0.17*(100*M_ratio)**1.12, lambda M_ratio: 0.1] if delta == 0.1: conds = [M_ratio < 9.4e-5, (M_ratio >= 9.4e-5) & (M_ratio < 5.0e-3), (M_ratio >= 5.0e-3) & (M_ratio < 6.6e-3), M_ratio >= 6.6e-3] func_epsilon = [lambda M_ratio: 0.12*(100*M_ratio)**0.59, lambda M_ratio: 0.026*(100*M_ratio)**0.27, lambda M_ratio: 0.50*(100*M_ratio)**4.53, lambda M_ratio: 0.1] if delta == 1.0e-2: conds = [M_ratio < 1.6e-5, (M_ratio >= 1.6e-5) & (M_ratio < 5.3e-3), (M_ratio >= 5.3e-3) & (M_ratio < 7.1e-3), M_ratio >= 7.1e-3] func_epsilon = [lambda M_ratio: 0.069*(100*M_ratio)**0.69, lambda M_ratio: 0.027*(100*M_ratio)**0.54, lambda M_ratio: 0.42*(100*M_ratio)**4.85, lambda M_ratio: 0.1] if delta == 1.0e-3: conds = [M_ratio < 7.6e-5, (M_ratio >= 7.6e-5) & (M_ratio < 4.5e-3), (M_ratio >= 4.5e-3) & (M_ratio < 7.1e-3), M_ratio >= 7.1e-3] func_epsilon = [lambda M_ratio: 0.065*(100*M_ratio)**0.71, lambda M_ratio: 0.020*(100*M_ratio)**0.47, lambda M_ratio: 0.26*(100*M_ratio)**3.67, lambda M_ratio: 0.1] return np.piecewise(M_ratio, conds, func_epsilon) def M_acc_fac(m_co, m_donor, r_donor, a, wind_profile, alpha=1.5): # Msun, Rsun units v_esc = (2*G*(m_donor)*Msun_SI/(r_donor*Rsun_SI))**0.5 v_infty = 1.5*v_esc if wind_profile: beta = 1. v_wind = v_infty*((1.-r_donor/a)**beta) else: v_wind = copy.copy(v_infty) v_orb = (G_SI*(m_co+m_donor)*Msun_SI/(a*Rsun_SI))**0.5 v_rel_squared = (v_wind**2+v_orb**2) v_rel = v_rel_squared**0.5 M_acc_fac = alpha*(G*m_co*Msun_SI)**2/(2*v_rel**3*v_wind*(a*Rsun_SI)**2) return M_acc_fac def L_X_ray(m_co, m_donor, r_donor, a, m_dot, r_acc, MT, wind_profile, epsilon=1.0, alpha=1.5): # m_co, m_donor [msun], a [rsun], m_dot [msun/yr] # Same as BSE # Eq. 36 M_acc_factor = M_acc_fac(m_co, m_donor, r_donor, a, wind_profile, alpha=1.5) # M_dot_acc = m_dot*Msun_SI/sec_in_year*M_acc_factor # Eq. 83 #ind = MT==True M_dot_acc[MT] = m_dot[MT]*Msun_SI/sec_in_year L_x = epsilon*G_SI*m_co*Msun_SI*M_dot_acc/(r_acc) # L_x_erg = L_x*1e7 L_x_sun = L_x/3.8270e33 return L_x_erg, L_x_sun # I need first to calculate the separation between the two stars in the systems # Porb [days], G [AU, MSun, days], mco & mstrip [Msun] a = ((Porb**2)*G*(mco+mstrip)/(4.*(np.pi**2.)))**(1./3.) # AU a = a*u.AU.to(u.R_sun) # Rsun plt.semilogy(mstrip,a,'.') plt.xlabel('Mstrip [Msun]') plt.ylabel('Separation [Rsun]') # Then I need an estimate for the wind mass loss rates # This is what was assumed in the grid in Gotberg+18 mstrip_grid = np.array([0.35,0.38,0.44,0.51,0.58,0.66,0.74,0.85,0.97,1.11,1.27,1.43,1.64,1.88,2.17,2.49,2.87,3.32,3.85,4.45,5.12,5.88,6.72]) # MSun mdot_grid = 10**np.array([-12.,-12.,-12.,-12.,-12.,-12.,-12.,-11.2,-10.6,-10.,-9.5,-8.4,-8.1,-7.8,-7.5,-7.3,-7.1,-6.8,-6.6,-6.4,-6.2,-6.,-5.8]) # But the mass loss rates are probably about a factor of 10 too high, so I update! mdot_grid = mdot_grid/10. print 'I have divided the Mdotwind by 10 to be more realistic.' plt.plot(np.log10(mstrip_grid),np.log10(mdot_grid),'.',ms=15) # I think it is best to interpolate in log-log space mdot_wind = 10**np.interp(np.log10(mstrip), np.log10(mstrip_grid),np.log10(mdot_grid)) plt.plot(np.log10(mstrip),np.log10(mdot_wind),'.') # Now, I need to know at what distance the accretion should occur r_acc = np.zeros(len(mco)) # The black holes are assumed to accrete at 3 Schwarzschild radius distance G_SI = const.G.value c_SI = const.c.value r_schwarz = 2.*G_SI*mco[mco==5.]*u.M_sun.to(u.kg)/(c_SI**2.) # m r_acc[mco == 5.] = 3.*r_schwarz # The neutron stars are assumed to have a radius of 10 km and accrete directly on the surface r_acc[mco == 1.4] = 1e4 # m # The white dwarfs are assumed to have a radius of 0.005 Rsun r_acc[mco <= 0.6] = 0.005*u.R_sun.to(u.m) # m # I need also the radius of the stripped star rstrip_grid = np.array([0.16,0.17,0.19,0.21,0.23,0.25,0.26,0.29,0.32,0.36,0.40,0.42,0.46,0.50,0.55,0.59,0.65,0.70,0.76,0.80,0.84,0.87,0.88]) # Rsun plt.loglog(mstrip_grid,rstrip_grid,'.') rstrip = 10**np.interp(np.log10(mstrip),np.log10(mstrip_grid),np.log10(rstrip_grid)) plt.loglog(mstrip,rstrip,'.') # Some paramters for the functions alpha=1.5 # From hydrodynamic simulations of Bondi-Hoyle, between 1 and 2, but weigthed by the 2 in the formula delta=0.5 # For ADAF - only use if significantly below Eddington (we set < 10% Mdot_Edd) # I implemented the wind_profile switch - it is assuming a standard beta-law for the wind profile, with beta=1 wind_profile = True # I am now going to use a function to get the M_acc_factor M_acc_factor = M_acc_fac(mco,mstrip,rstrip,a,wind_profile,alpha) print M_acc_factor plt.hist(np.log10(M_acc_factor)) plt.hist(np.log10(M_acc_factor[mco == 1.4])) plt.yscale('log') plt.xlabel('log Maccfactor') print np.min(M_acc_factor[mco == 1.4]), np.max(M_acc_factor[mco == 1.4]) """ # This is what I calculate following the accretion physics book vwind = 1.5* ((2.*G_SI*mstrip*Msun_SI/(rstrip*Rsun_SI))**0.5) # Going to use 1.5 vesc v_NS = (G_SI*(mco+mstrip)*Msun_SI/(a*Rsun_SI))**0.5 vrel = ((v_NS**2.) + (vwind**2.))**0.5 M_acc_factor_book = ((G_SI*mco*Msun_SI/(a*Rsun_SI))**2.)/((vrel**3.)*vwind) print np.min(M_acc_factor_book[mco == 1.4]), np.max(M_acc_factor_book[mco == 1.4]) plt.hist(np.log10(M_acc_factor_book[mco==1.4])) """ # From that, we can get the mass accretion rate M_dot_acc = mdot_wind*M_acc_factor # But, we want to account for systems that are mass transferring # I will assume that these funnel all wind mass loss to the compact object M_dot_acc[MT=='True'] = mdot_wind[MT=='True'] # Need to calculate Eddington accretion rate M_dot_Edd = calculate_Mdot_edd(mco) ind_Edd = M_dot_acc > 0.1*M_dot_Edd ind_ADAF = M_dot_acc <= 0.1*M_dot_Edd epsilon = calculate_ADAF_efficiency(M_dot_acc, calculate_Mdot_edd(mco), delta) epsilon[ind_Edd] = 1. # Fragos+08 for NS surface accretion epsilon = 1. # Calculate the X-ray luminosity LX = L_X_ray(mco, mstrip, rstrip, a, mdot_wind, r_acc, MT=='True', wind_profile, epsilon, alpha) plt.plot(mstrip,np.log10(M_dot_acc),'.') # Here is the X-ray luminosities LXray_cgs = LX[0] # erg/s d = dist*u.pc.to(u.cm) FXray_cgs = LXray_cgs/(4.*np.pi*(d**2.)) ind_WD = (mco <= 0.6) print np.log10(np.min(LXray_cgs[ind_WD])), np.log10(np.max(LXray_cgs[ind_WD])) print np.log10(np.min(FXray_cgs[ind_WD])), np.log10(np.max(FXray_cgs[ind_WD])) print '' ind_NS = (mco == 1.4) print np.log10(np.min(LXray_cgs[ind_NS])), np.log10(np.max(LXray_cgs[ind_NS])) print np.log10(np.min(FXray_cgs[ind_NS])), np.log10(np.max(FXray_cgs[ind_NS])) print '' hh = 5 ww = 6 fig, ax = plt.subplots(1,1, figsize=(ww,hh)) clr_WD = np.array([49,143,181])/255. clr_NS = np.array([0,80,134])/255. ax.plot(mstrip[ind_WD],np.log10(LXray_cgs[ind_WD]),'.',color=clr_WD,label='WD') ax.plot(mstrip[ind_WD*(MT=='True')],np.log10(LXray_cgs[ind_WD*(MT=='True')]),'o', markerfacecolor='none',markeredgecolor=0.5*clr_WD) ax.plot(mstrip[ind_NS],np.log10(LXray_cgs[ind_NS]),'.',color=clr_NS,label='NS') ax.plot(mstrip[ind_NS*(MT=='True')],np.log10(LXray_cgs[ind_NS*(MT=='True')]),'o', markerfacecolor='none',markeredgecolor=0.5*clr_NS) #ax.plot(mstrip[ind_NS*ind_Edd],np.log10(LXray_cgs[ind_NS*ind_Edd]),'o', # markerfacecolor='none',markeredgecolor='r') plt.ylim([30,40]) ax.legend(loc=0,fontsize=0.7*fsize) ax.set_xlabel('$M_{\\mathrm{strip}}$ [$M_{\\odot}$]') ax.set_ylabel('$\\log_{10} L_X$ [erg/s]') ind = (mco <= 0.6) ind = (mco > 0.6) plt.hist(np.log10(LXray_cgs[ind]))What to do about all those white dwarf systems that are undergoing mass transfer? I will follow Nelemans+04# I will start by assuming that the mass transfer rate is very low, like 1e-9 Msun/yr Mdot_acc = copy.copy(mdot_wind) #1e-9 Mdot_acc_cgs = Mdot_acc*u.M_sun.to(u.g)/(u.year.to(u.s)) # Calculte the accretion luminosity M_CO_cgs = mco*u.M_sun.to(u.g) R_CO_cgs = r_acc*u.m.to(u.cm) R_CO_Rsun = R_CO_cgs*u.cm.to(u.R_sun) q = mco/mstrip rL = 0.49*(q**(2./3.))/(0.69*(q**(2./3.)) + np.log(1.+(q**(1./3.)))) #Porb = .1 # day a = ((Porb**2.)*(G*(mstrip+mco)/(4.*(np.pi**2.))))**(1./3.) # AU RL_CO = (rL*a)*u.AU.to(u.R_sun) RL_CO_cgs = RL_CO*u.R_sun.to(u.cm) Lacc_cgs = 0.5*G_cgs*M_CO_cgs*Mdot_acc_cgs*((1./R_CO_cgs) - (1./RL_CO_cgs)) # erg/s Lacc_Lsun = Lacc_cgs*(u.erg/u.s).to(u.L_sun) # Calculate the temperature of the assumed blackbody Tsun = 5778. # K f = 0.001 Tacc = Tsun*(((1./f)*(R_CO_Rsun**(-2.))*Lacc_Lsun)**(1./4.)) print np.log10(Tacc) E_eV = np.logspace(0,6,1000) # in eV E_SI = E_eV*u.eV.to(u.J) # in J lambda_SI = h_SI*c_SI/E_SI # in m # Get the luminosity between 0.1 and 2.4 keV Elim_low = 0.1*1e3 # eV Elim_high = 2.4*1e3 # eV ind_X = (E_eV > Elim_low)*(E_eV < Elim_high) LX_cgs_MT = np.zeros(len(Tacc)) for i in range(len(Tacc)): # Get the blackbody intensity Blambda_X = (2.*h_SI*c_SI/(lambda_SI**5.))/(np.exp(h_SI*c_SI/(lambda_SI*kB_SI*Tacc[i])) - 1.) # SI units d = dist[i]*u.pc.to(u.R_sun) #1e3*u.pc.to(u.R_sun) Flambda_X = np.pi*Blambda_X*((R_CO_Rsun[i]/d)**2.) # J s^-1 m^-2 m^-1 Llambda_X = 4.*np.pi*((d*u.R_sun.to(u.m))**2.)*Flambda_X # J s^-1 m^-1 Ltot = np.trapz(Llambda_X[ind_X][::-1],lambda_SI[ind_X][::-1]) LX_cgs_MT[i] = Ltot*u.J.to(u.erg) #print 'X-ray luminosity:', Ltot*u.J.to(u.erg), 'erg/s' if np.mod(i,10000) == 0: print i ind = (mco == 0.6)*(MT == 'True') plt.hist(LX_cgs_MT[ind]) ind = (mco > 1.)*(MT == 'True') plt.hist(LX_cgs_MT[ind]) epsilon = 0.1 X = 0.2 kappa = 0.02*(1+X) # m^2/kg k = (4.*np.pi*const.G.value / (epsilon*kappa*const.c.value)) # s^-1 k = k/u.s.to(u.year) M_CO = 5 Mdot_Edd = k*M_CO print 'Eddington accretion rate for 5 MSun BH:',k*5,', and for 1.4 Msun NS:',k*1.4Eddington accretion rate for 5 MSun BH: 1.83926073238e-07 , and for 1.4 Msun NS: 5.14993005065e-08Calculate the mass transfer rate due to GWs and tides# This is from the solar metallicity grid of Götberg+18 # Initial masses of the donor stars [Msun] minit_grid = np.array([2.0,2.21,2.44,2.7,2.99,3.3,3.65,4.04,4.46,4.93,5.45,6.03,6.66,7.37,8.15,9.0,9.96,11.01,12.17,13.45,14.87,16.44,18.17]) # Masses of the stripped stars [Msun] mstrip_grid = np.array([0.35,0.38,0.44,0.51,0.58,0.66,0.74,0.85,0.97,1.11,1.27,1.43,1.64,1.88,2.17,2.49,2.87,3.32,3.85,4.45,5.12,5.88,6.72]) # MSun # Radii of the stripped stars [Rsun] rstrip_grid = np.array([0.16,0.17,0.19,0.21,0.23,0.25,0.26,0.29,0.32,0.36,0.40,0.42,0.46,0.50,0.55,0.59,0.65,0.70,0.76,0.80,0.84,0.87,0.88]) # Rsun # The duration of the stripped phases (in Myr) lifetime_stripped_grid = np.array([671.0,414.1,94.6,143.6,76.5,62.9,41.6,28.0,20.0,16.5,10.6,8.0,6.4,4.4,3.4,2.7,2.2,1.7,1.4,1.2,1.0,0.9,0.8]) # Myr # This index says which models reached central helium exhaustion ind_finishedHe = np.array([True]*len(lifetime_stripped_grid)) ind_finishedHe[minit_grid==2.44] = False ind_finishedHe[minit_grid==2.99] = False # Let's assume that the accretor is a neutron star M_CO = 1.4 # Calculate the minimum possible period (stripped star just fills its Roche lobe) qinv = mstrip_grid/M_CO # inverse mass ratio rL = 0.49*(qinv**(2./3.))/(0.69*(qinv**(2./3.))+np.log(1.+(qinv**(1./3.)))) a = rstrip_grid*u.R_sun.to(u.AU)/rL # in AU # minimum orbital period in days Pmin = np.sqrt((a**3.)*4.*(np.pi**2.)/(G*(mstrip_grid+M_CO))) # This is going to be P1 P1 = copy.copy(Pmin) # And the radius of the stripped star is going to be RL1 RL1 = copy.copy(rstrip_grid) # Get the moment of interia of stripped stars from evolutionary models which provide the structure # Get all the model names loc_MESA = '/data001/ygoetberg/taurus/binaries/grids_180117/small_grids/grid_014/' mods = [name for name in os.listdir(loc_MESA) if (name[0]=='M' and name[1]=='1' and name[-1]!='z')] # Sort them with mass nbr_mods = len(mods) mgrid = np.zeros(nbr_mods) for i in range(nbr_mods): mgrid[i] = np.float_(mods[i].split('M1_')[1].split('q')[0]) ind_sort = np.argsort(mgrid) mgrid = mgrid[ind_sort] mods = np.array(mods)[ind_sort] # Locate the profile that corresponds to XHec = 0.5 col = ['logRho','logR','he4','mass'] Igrid = np.zeros(nbr_mods) Igrid_09 = np.zeros(nbr_mods) Igrid_crude = np.zeros(nbr_mods) mass_interior = [None]*nbr_mods logR = [None]*nbr_mods for i in range(nbr_mods): data = np.loadtxt(loc_MESA+mods[i]+'/LOGS1/profiles.index',skiprows=1) ind_05 = data[:,1] == 105 ind_09 = data[:,1] == 109 if np.sum(ind_05) > 1.: ind_05 = ind_05[-1] if np.sum(ind_09) > 1.: ind_09 = ind_09[-1] if np.sum(ind_05) == 1.: tmp = int(data[ind_05,2]) prof_name = 'profile'+str(tmp)+'.data' # Read the density profile and calculate the moment of inertia data2 = GetColumnMESA(loc_MESA+mods[i]+'/LOGS1/'+prof_name,col,2) # Calculate the moment of inertia of the model logRho = data2[col.index('logRho')] logR[i] = data2[col.index('logR')] he4 = data2[col.index('he4')] mass_interior[i] = data2[col.index('mass')] rho = (10**logRho[::-1])*u.g.to(u.M_sun)/((u.cm.to(u.AU))**3.) r = (10**logR[i][::-1])*u.R_sun.to(u.AU) Igrid[i] = (8.*np.pi/3.)*np.trapz(rho*(r**4.),r) # Msun AU^2 Igrid_crude[i] = 0.2*mstrip_grid[i]*((rstrip_grid[i]*u.R_sun.to(u.AU))**2.) print mods[i], prof_name, he4[-1], Igrid[i], Igrid_crude[i], Igrid_crude[i]/Igrid[i] else: mass_interior[i] = np.zeros(2) logR[i] = np.zeros(2) """ if np.sum(ind_09) == 1.: tmp = int(data[ind_09,2]) prof_name = 'profile'+str(tmp)+'.data' # Read the density profile and calculate the moment of inertia data2 = GetColumnMESA(loc_MESA+mods[i]+'/LOGS1/'+prof_name,col,2) # Calculate the moment of inertia of the model logRho = data2[col.index('logRho')] logR = data2[col.index('logR')] he4 = data2[col.index('he4')] rho = (10**logRho[::-1])*u.g.to(u.M_sun)/((u.cm.to(u.AU))**3.) r = (10**logR[::-1])*u.R_sun.to(u.AU) Igrid_09[i] = (8.*np.pi/3.)*np.trapz(rho*(r**4.),r) # Msun AU^2 print mods[i], prof_name, he4[-1], Igrid_09[i] """ # This shows the difference between the different times during evolution plt.loglog(mstrip_grid,Igrid,'.b') # XHec = 0.5 (midway) #plt.loglog(mstrip_grid,Igrid_09,'.m') # XHec = 0.9 (early) #plt.loglog(mstrip_grid,Igrid_crude,'.g') # Marsh+04 plt.show() # This shows that it is ok to put a polynomial fit plt.loglog(mstrip_grid,Igrid,'.b') plt.loglog(mstrip_grid,Igrid,'-b') ind_I = Igrid != 0. coeff_I = np.polyfit(np.log10(mstrip_grid[ind_I]),np.log10(Igrid[ind_I]),2) mm = np.linspace(0.3,8,100) fit = 10**np.polyval(coeff_I,np.log10(mm)) plt.loglog(mm,fit,'-r') plt.show() for i in range(nbr_mods): if len(logR[i])>2: plt.plot(10**logR[i],mass_interior[i]) delta_t = 10**np.interp(np.log10(minit_grid),np.log10(minit_grid[ind_finishedHe]), np.log10(lifetime_stripped_grid[ind_finishedHe])) # Myr print delta_t delta_t = 0.2*delta_t*u.Myr.to(u.day) P1, P2, RL1 # Calculate the new period when accounting for GWs and tides Istrip = 10**np.polyval(coeff_I,np.log10(mstrip_grid)) # [Msun AU^2] #I2 = 0.2*M_CO*((0.005*u.R_sun.to(u.AU))**2.) I2 = 0. # Removing the tides on neutron stars and black holes # Orbital frequency Omega = 1./P1 # day^-1 # Stellar angular momentum Jstar = (Istrip + I2)*Omega # Orbital angular momentum a = ((P1**2.)*G*(mstrip_grid+M_CO)/(4.*(np.pi**2.)))**(1./3.) # AU Jorb = np.sqrt(G*a/(mstrip_grid+M_CO))*mstrip_grid*M_CO M = mstrip_grid+M_CO # Total mass of the system # Constants C1 = (-96./5.)*((G**3.)/(c_AUday**5.))*(mstrip_grid*M_CO*M)/((G*M/(4.*(np.pi**2.)))**(4./3.)) C2 = 1. + (1./(1.-3.*Jstar/Jorb)) C = C1*C2 D = (3./8.)*(P1**(8./3.)) # Get the new orbital periods P2 = ((2.**(9./8.))/(3.**(3./8.)))*((C*delta_t + D)**(3./8.))/home/ygoetberg/anaconda3/envs/venv/lib/python2.7/site-packages/ipykernel_launcher.py:27: RuntimeWarning: invalid value encountered in power$\dfrac{P^2}{a^3} = \dfrac{4\pi^2}{G(M_1+M_2)}$# Now, we go back to Roche-lobe radius a2 = ((P2**2.)*G*(mstrip_grid+M_CO)/(4.*(np.pi**2.)))**(1./3.) # AU # Assuming for this that no mass is lost or transferred qinv = mstrip_grid/M_CO # inverse mass ratio rL = 0.49*(qinv**(2./3.))/(0.69*(qinv**(2./3.))+np.log(1.+(qinv**(1./3.)))) RL2 = a2*rL*u.AU.to(u.R_sun) print zip(RL1,RL2) ind_tmp = np.isnan(RL2) RL2[ind_tmp] = np.interp(RL1[ind_tmp],RL1[ind_tmp==False],RL2[ind_tmp==False]) print RL2 # Calculate the mass transfer rate #rho_MSunRsun = rho_tmp*u.g.to(u.M_sun)/((u.cm.to(u.R_sun))**3.) #delta_M = (4.*np.pi*rho_MSunRsun/3.)*((RL1**3.)-(RL2**3.)) mstrip_grid_1 = np.zeros(nbr_mods) mstrip_grid_2 = np.zeros(nbr_mods) for i in range(nbr_mods): if (len(logR[i])>2): mstrip_grid_1[i] = np.max(mass_interior[i][10**logR[i] < RL1[i]]) if (RL2[i] != 0.): mstrip_grid_2[i] = np.max(mass_interior[i][10**logR[i] 0. mstrip_grid_1 = np.interp(mstrip_grid, mstrip_grid[ind_tmp],mstrip_grid_1[ind_tmp]) ind_tmp = mstrip_grid_2 > 0. mstrip_grid_2 = np.interp(mstrip_grid, mstrip_grid[ind_tmp],mstrip_grid_2[ind_tmp]) mstrip_grid delta_M = mstrip_grid_1-mstrip_grid_2 Mdot_GWtides = delta_M/(delta_t*u.day.to(u.year)) plt.plot(mstrip_grid,np.log10(Mdot_GWtides),'.') plt.xlabel('Mstrip [Msun]') plt.ylabel('log Mdot [Msun/yr]')/home/ygoetberg/anaconda3/envs/venv/lib/python2.7/site-packages/ipykernel_launcher.py:4: RuntimeWarning: divide by zero encountered in log10 after removing the cwd from sys.path.Vectorized sequence alignment diagramsfrom genetracks import Figure, Track, Alignment, Multitrack, Label f = Figure() t1 = Track(50, 300, direction='f', label="Another sequence", regions=[(50, 100, 'lightblue')]) t2 = Track(110, 410, direction='r', label="Sequence 1", regions=[(150, 200, 'salmon')]) f.add(t1) f.add(t2) f.show()Linear tracks MultitracksIn this example we pack multiple tracks onto the same row and join them to illustrate gaps. The child tracks inherit the parent's translations.f = Figure() for i in range(0,3): o = i * 30 e = i * 25 f.add(Multitrack([ Track(o, 150 + o, direction='f', label='Read {}, forward'.format(i + 1)), Track(300 + e, 450 + e, direction='r', label='Read {}, reverse'.format(i + 1))], join=True)) f.show()DirectionalityTracks can be rendered with arrows indicating strand direction.figure = Figure() figure.add(Multitrack([ Track(0, 150, direction='f', label='Forward read'), Track(200, 350, direction='r', label='Reverse read')], join=True)) figure.add(Track(50, 250, direction='fr', label='Read-through', color='salmon', regions=[(75, 225, 'lightgrey')])) figure.add(Track(50, 300, label='Reads overlap')) figure.show()The `Alignment` class allows us to illustrate the relationship between regions of two different tracks: AlignmentRegions beetween two tracts can be illustrated with an `Alignment`.f = Figure() f.add(Alignment(Track(50, 310, direction='r', regions=[(110, 300, 'lightblue')]), Track(100, 360, direction='f', regions=[(110, 300, 'salmon')]), [(110, 300), (300, 110)])) f.show()Label ObjectsLabel offsets and text size can be adjusted to make informative tracks.f = Figure() def draw_hiv_genes(f): third = [ (2085, 5096, "pol", "orange"), (5559, 5850, "vpr", "turquoise"), (5970, 6045, "rev", 'yellowgreen'), (6225, 8795, "env", 'salmon'), ] second = [ (5831, 6045, "tat", "plum"), (6062, 6310, "vpu", "red"), (8379, 8653, "rev", 'yellowgreen'), (9086, 9719, "3' LTR", 'darkgrey'), ] first = [ (0, 634, "5' LTR", "darkgrey"), (790, 2292, "gag", "lightblue"), (5041, 5619, "vif", 'steelblue'), (8379, 8469, "tat", 'plum'), (8797, 9417, "nef", 'mediumaquamarine'), ] for reading_frame in [first, second, third]: f.add(Multitrack([Track(l, r, label=Label(0, text, offset=1), color=color) for l, r, text, color in reading_frame]), gap=0) draw_hiv_genes(f) f.show(w=900)Circular genomesThere is experimental support for circular genomes.from genetracks import Plasmid, Region p = Plasmid(360, regions=[Region(100, 101, color='orange'), Region(110, 280, color='salmon'), Region(230, 275, color='firebrick'), Region(320, 20, color='lightblue'), Region(20, 50, color='slateblue')]) p.show()Batch processing with Argo WorfklowsIn this notebook we will dive into how you can run batch processing with Argo Workflows and Seldon Core.Dependencies:* Seldon core installed as per the docs with an ingress* Minio running in your cluster to use as local (s3) object storage* Argo Workfklows installed in cluster (and argo CLI for commands) Setup Install Seldon CoreUse the notebook to [set-up Seldon Core with Ambassador or Istio Ingress](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html).Note: If running with KIND you need to make sure do follow [these steps](https://github.com/argoproj/argo/issues/2376issuecomment-595593237) as workaround to the `/.../docker.sock` known issue. Set up Minio in your clusterUse the notebook to [set-up Minio in your cluster](https://docs.seldon.io/projects/seldon-core/en/latest/examples/minio_setup.html). Create rclone configurationIn this example, our workflow stages responsible for pulling / pushing data to in-cluster MinIO S3 storage will use `rclone` CLI.In order to configure the CLI we will create a following secret:%%writefile rclone-config.yaml apiVersion: v1 kind: Secret metadata: name: rclone-config-secret type: Opaque stringData: rclone.conf: | [cluster-minio] type = s3 provider = minio env_auth = false access_key_id = minioadmin secret_access_key = minioadmin endpoint = http://minio.minio-system.svc.cluster.local:9000 !kubectl apply -n default -f rclone-config.yamlsecret/rclone-config-secret createdInstall Argo WorkflowsYou can follow the instructions from the official [Argo Workflows Documentation](https://github.com/argoproj/argoquickstart).You also need to make sure that argo has permissions to create seldon deployments - for this you can create a role:%%writefile role.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: workflow rules: - apiGroups: - "" resources: - pods verbs: - "*" - apiGroups: - "apps" resources: - deployments verbs: - "*" - apiGroups: - "" resources: - pods/log verbs: - "*" - apiGroups: - machinelearning.seldon.io resources: - "*" verbs: - "*" !!kubectl apply -n default -f role.yamlA service account:!kubectl create -n default serviceaccount workflowserviceaccount/workflow createdAnd a binding!kubectl create rolebinding workflow -n default --role=workflow --serviceaccount=default:workflowrolebinding.rbac.authorization.k8s.io/workflow createdCreate some input for our modelWe will create a file that will contain the inputs that will be sent to our modelmkdir -p assets/ import os import random random.seed(0) with open("assets/input-data.txt", "w") as f: for _ in range(10000): data = [random.random() for _ in range(4)] data = "[[" + ", ".join(str(x) for x in data) + "]]\n" f.write(data)Check the contents of the file!wc -l assets/input-data.txt !head assets/input-data.txt10000 assets/input-data.txt [[0.8444218515250481, 0.7579544029403025, 0.420571580830845, 0.25891675029296335]] [[0.5112747213686085, 0.4049341374504143, 0.7837985890347726, 0.30331272607892745]] [[0.4765969541523558, 0.5833820394550312, 0.9081128851953352, 0.5046868558173903]] [[0.28183784439970383, 0.7558042041572239, 0.6183689966753316, 0.25050634136244054]] [[0.9097462559682401, 0.9827854760376531, 0.8102172359965896, 0.9021659504395827]] [[0.3101475693193326, 0.7298317482601286, 0.8988382879679935, 0.6839839319154413]] [[0.47214271545271336, 0.1007012080683658, 0.4341718354537837, 0.6108869734438016]] [[0.9130110532378982, 0.9666063677707588, 0.47700977655271704, 0.8653099277716401]] [[0.2604923103919594, 0.8050278270130223, 0.5486993038355893, 0.014041700164018955]] [[0.7197046864039541, 0.39882354222426875, 0.824844977148233, 0.6681532012318508]]Upload the file to our minio!mc mb minio-seldon/data !mc cp assets/input-data.txt minio-seldon/data/Bucket created successfully `minio-seldon/data`. ...-data.txt: 820.96 KiB / 820.96 KiB ┃▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓┃ 71.44 MiB/s 0sCreate Argo WorkflowIn order to create our argo workflow we have made it simple so you can leverage the power of the helm charts.Before we dive into the contents of the full helm chart, let's first give it a try with some of the settings.We will run a batch job that will set up a Seldon Deployment with 10 replicas and 100 batch client workers to send requests.!helm template seldon-batch-workflow helm-charts/seldon-batch-workflow/ \ --set workflow.name=seldon-batch-process \ --set seldonDeployment.name=sklearn \ --set seldonDeployment.replicas=10 \ --set seldonDeployment.serverWorkers=1 \ --set seldonDeployment.serverThreads=10 \ --set batchWorker.workers=100 \ --set batchWorker.payloadType=ndarray \ --set batchWorker.dataType=data \ | argo submit --serviceaccount workflow - !argo list -n default !argo get -n default seldon-batch-process !argo -n default logs seldon-batch-processseldon-batch-process-3626514072: time="2021-01-15T11:44:57.620Z" level=info msg="Starting Workflow Executor" version=v2.12.3 seldon-batch-process-3626514072: time="2021-01-15T11:44:57.622Z" level=info msg="Creating a K8sAPI executor" seldon-batch-process-3626514072: time="2021-01-15T11:44:57.622Z" level=info msg="Executor (version: v2.12.3, build_date: 2021-01-05T00:54:54Z) initialized (pod: default/seldon-batch-process-3626514072) with template:\n{\"name\":\"create-seldon-resource-template\",\"arguments\":{},\"inputs\":{},\"outputs\":{},\"metadata\":{\"annotations\":{\"sidecar.istio.io/inject\":\"false\"}},\"resource\":{\"action\":\"create\",\"manifest\":\"apiVersion: machinelearning.seldon.io/v1\\nkind: SeldonDeployment\\nmetadata:\\n name: \\\"sklearn\\\"\\n namespace: default\\n ownerReferences:\\n - apiVersion: argoproj.io/v1alpha1\\n blockOwnerDeletion: true\\n kind: Workflow\\n name: \\\"seldon-batch-process\\\"\\n uid: \\\"511f64a2-0699-42[...]Check output in object storeWe can now visualise the output that we obtained in the object store.First we can check that the file is present:import json wf_arr = !argo get -n default seldon-batch-process -o json wf = json.loads("".join(wf_arr)) WF_UID = wf["metadata"]["uid"] print(f"Workflow UID is {WF_UID}") !mc ls minio-seldon/data/output-data-"$WF_UID".txt[2021-01-15 11:46:42 GMT]  3.4MiB output-data-511f64a2-0699-42eb-897a-c0a57b24072c.txt Now we can output the contents of the file created using the `mc head` command.!mc cp minio-seldon/data/output-data-"$WF_UID".txt assets/output-data.txt !head assets/output-data.txt !argo delete -n default seldon-batch-processWorkflow 'seldon-batch-process' deletedHello world! Welcome to Python FundamentalsIn this module, we are going to establish or review our skills in Python programming. In this notebook we are going to cover:* Variables and Data Types* Operations* Input and Output Operations* Logic Control* Iterables* Functions Variable and Data Typesx = 3 a,b = 0, -3 x type(x) y = 2.0 type(y) x = float(x) type(x) s,t,u = "8", '4', 'four' type(u) s_int = int(s) s_intOperation Arithmeticm,a,r,k = 4.0, -1.0, 5.0, -15 # Addition S = m+a S # Subtraction D = r-k D # Multiplication P = m*k P # Division Q = r/m Q ### Floor Division Fq = m//a Fq ### Exponentiation t = m**a t ### Modulo mod = m%a modAssignment OperationsL, U, N, A = 0, 31, 9, 9 L += r L U -= m U N *= 1 N A **= 4 AComparatorsres_1, res_2, res_3 = 3, 4.0, "9" true_val = 2.0 ## Equality res_1 == true_val ## Non-equality res_2 != true_val ## Inequality t1 = res_1 > res_2 t2 = res_1 < res_2/2 t3 = res_1 >= res_2/2 t4 = res_1 <= res_2 t2Logicalres_1 == true_val res_1 is true_val res_1 is not true_val r, i = True, True conj = r and i conj r, i = True, False disj = r or i disj r, i = True, False nand = not (r and i) nand r, i = True, False xor = (not r and i) or (r and not i) xorI/Oprint ("Hello Pilipens") cnt = 2 string = "Hello Pilipens" print(string, ", Current run count is:", cnt) cnt += 2 print(f"{string} Current count is: {cnt}") sem_grade = 1.0 name = "" print("Hello {}, your semestral grade is: {}".format(name, sem_grade)) w_pg, w_mg, w_fg = 0.3, 0.3, 0.4 print("The weights of your semestral grades are:\ \t\n{:.2%} for Prelims\ \n\t{:.2%} for Midterms, and\ \n\t{:.2%} for Finals.".format(w_pg, w_mg, w_fg)) x = input("input your name: ") x name = input(": ") pg = input("Enter prelim grade: ") mg = input("Enter midterm grade: ") fg = input("Enter finals grade: ") sem_grade = 85 print("Hello {}, your semestral grade is: {}".format(name, sem_grade))Kimi no nawa: Rio Boy Enter prelim grade: 85 Enter midterm grade: 84 Enter finals grade: 89 Hello Rio Boy, your semestral grade is: 85Looping Statements While## while loops c, o = 1, 25 while(c<=o): print(f"{c}\t|\t{o}") c+=11 | 25 2 | 25 3 | 25 4 | 25 5 | 25 6 | 25 7 | 25 8 | 25 9 | 25 10 | 25 11 | 25 12 | 25 13 | 25 14 | 25 15 | 25 16 | 25 17 | 25 18 | 25 19 | 25 20 | 25 21 | 25 22 | 25 23 | 25 24 | 25 25 | 25For# for(int c=1; c<25; c++){ # print(c) #} c=1 for c in range(25): print(c) playlist = ["Broke", "Mornin"] print('Now Playing:\n') for song in playlist: print(song)Now Playing: Broke MorninFlow Control Condition Statementsnumeral1, numeral2 = 9, 15 if(numeral1 == numeral2): print("Yey") elif(numeral1>numeral2): print("Hiho") else: print("Awit") print("")Awit Functions# void DeleteUser(int userid){ # delete(userid); #} def delete_user (userid): print("Successfully deleted user: {}".format(userid)) def delete_all_users (): print("Successfully deleted all users") userid = 202013869 delete_user(202013869) delete_all_users() def add(addend1, addend2): print("I hate Math addend1 and addend2") return addend1 + addend2 def power_of_base2(exponent): return 2**exponent addend1 = 6 addend2 = 21 exponent = 7 #add(addend1, addend2) power_of_base2(exponent) add(addend1, addend2)I hate Math addend1 and addend2Activity Calculatorname = input(": ") pg = input("Enter prelim grade: ") mg = input("Enter midterm grade: ") fg = input("Enter finals grade: ") sem_grade = 90 print("Hello {}, your semestral grade is: {}".format(name, sem_grade)) name = input("Enter your name: ") cn = input("Enter your course name: ") pg = input("Enter prelim grade: ") mg = input("Enter midterm grade: ") fg = input("Enter finals grade: ") pg = int(pg) mg = int(mg) fg = int(fg) sem_grade = (pg*.3) + (mg*.3) + (fg*.4) if (sem_grade > 89): print("Hello {}, your semestral grade in {} is: {} 😭".format(name, cn, sem_grade)) elif(sem_grade == 85): print("Hello {}, your semestral grade in {} is: {} 😀".format(name, cn, sem_grade)) else: print("Hello {}, your semestral grade in {} is: {} 🥰".format(name, cn, sem_grade))Enter your name: Enter your course name: CHE Enter prelim grade: 89 Enter midterm grade: 85 Enter finals grade: 87 Hello , your semestral grade in CHE is: 87.0 🥰Introduction to Data Science Lecture 22: Neural Networks I*COMP 5360 / MATH 4100, University of Utah, http://datasciencecourse.net/*In this lecture, we'll discuss Neural Networks, which can be used for both Classification and Regression. In particular, we'll discuss * perceptrons and multi-layer perceptrons (MLP)* neural networks with scikit-learn* how to train a neural network* intro to TensorFlowRecommended Reading:* , [Hands-On Machine Learning with Scikit-Learn & TensorFlow](http://proquest.safaribooksonline.com/book/programming/9781491962282) (2017), Ch. 9,10. See also the [associated github page](https://github.com/ageron/handson-ml). * Welch Labs, [Neural Networks demystified](https://github.com/stephencwelch/Neural-Networks-Demystified)* and and , [Deep Learning](http://www.deeplearningbook.org/)import numpy as np from sklearn import datasets from sklearn.datasets import load_iris, make_moons, load_breast_cancer, fetch_mldata, fetch_california_housing from sklearn.linear_model import Perceptron from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.neural_network import MLPClassifier, MLPRegressor from sklearn.metrics import classification_report, confusion_matrix from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap from mpl_toolkits.mplot3d import Axes3D %matplotlib inline plt.rcParams['figure.figsize'] = (10, 6) plt.style.use('ggplot')Neural Networks and deep learning[Artificial Neural Networks](https://en.wikipedia.org/wiki/Artificial_neural_network) were originally motivated by the brain, which is composed of a network of neurons. Each neuron recieves a (chemical) signal from other neurons, does a small computation and then decides if and how to release more chemicals. This composition of small calculations can perform complicated tasks! Similiarly, an artificial neural network is a network composed of neurons, which we simply think of as a computational units. Large scale neural networks are at the core of [deep learning](https://en.wikipedia.org/wiki/Deep_learning), which has gained much publicity for performing very impressive machine learning tasks in the past few years, such as, * classifying billions of images (*e.g.*, Google Images)+ speech recognition (*e.g.*, Amazon's Alexa or Apple’s Siri)+ video recommendation (*e.g.*, YouTube), + beating the world champion at the game of Go (DeepMind’s AlphaGo).Neural Networks can generally be used for supervised learning tasks, such as classification and regression. PerceptronsThe simplest neural network is called the [perceptron](https://en.wikipedia.org/wiki/Perceptron). The *perceptron* is an binary classifier. It maps a real input $x \in \mathbb R^m$ to a binary output$$f(x) = \begin{cases}1 & \textrm{if } \ w\cdot x + b > 0 \\0 & \textrm{otherwise}\end{cases}.$$Here, $w \in \mathbb R^m$ is a vector of weights and $b$ is a scalar called the *bias*. (This is very similiar to the binary classifier we saw when looking at support vector machines.) It is customary to represent this function by the following diagram. Just like for previous classification methods, we first *train* the network on data, which is to say that we find good choices of $w$ and $b$ for our application. Then we use the neural network to classify new data points. Of course, a single perceptron is only a linear discriminator (similiar to logistic regression and linear SVM). But things become much more interesting when you start composing many neurons, that is, considering networks with more *layers*. The way in which we put together the neurons is referred to as the **network architecture**. There are many ways to do this. Here is a peak at the [neural-network-zoo](http://www.asimovinstitute.org/neural-network-zoo/): There are many different network architectures. The most important thing about all of these neural networks is that there is an *input layer*, typically drawn on the left hand side and an *output layer*, typically drawn on the right hand side. The middle layers are sometimes called *hidden layers*. In all of these neural network designs, each layer has its own weight vectors and biases that need to be trained. Consequently, *training* a neural network is a much harder job than we have seen for previous methods. It also requires more data. Neural Networks in PracticeIn the past, we have use scikit-learn for classification. Scikit-learn also has Neural Network library [here](http://scikit-learn.org/stable/modules/neural_networks_supervised.html). However, this implementation does not scale to large-scale applications (no GPU support or deep learning architectures). There are many other packages that have more complete implementations of neural networks. Here is a partial list with short statements taken from the packages.* [TensorFlow](https://github.com/tensorflow/tensorflow): TensorFlow™ is an open source C++ software library for numerical computation using data flow graphs. Nodes in the graph represent mathematical operations, while the graph edges represent the multidimensional data arrays (tensors) communicated between them. The flexible architecture allows you to deploy computation to one or more CPUs or GPUs in a desktop, server, or mobile device with a single API. + [Torch](http://torch.ch/): Torch is a scientific computing framework with wide support for machine learning algorithms that puts GPUs first.+ [CNTK](https://github.com/Microsoft/cntk) Cognitive Toolkit (CNTK) is an open source deep-learning toolkit developed by Microsoft. + [Theano](https://github.com/Theano/Theano): Theano is a Python library that allows you to define, optimize, and evaluate mathematical expressions involving multi-dimensional arrays efficiently. It can use GPUs and perform efficient symbolic differentiation.+ [keras](https://github.com/keras-team/keras): Keras is a high-level neural networks API, written in Python and capable of running on top of TensorFlow, CNTK, or Theano.+ [MXNet](https://github.com/dmlc/mxnet): Lightweight, Portable, Flexible Distributed/Mobile Deep Learning with Dynamic, Mutation-aware Dataflow Dep Scheduler; for Python, R, Julia, Scala, Go, Javascript and more+ [Caffe](http://caffe.berkeleyvision.org/): Caffe is a deep learning framework made with expression, speed, and modularity in mind. It is developed by Berkeley AI Research (BAIR) and by community contributors.+ [Lasagne](https://github.com/Lasagne/Lasagne): Lightweight library to build and train neural networks in Theano. + [prettytensor](https://github.com/google/prettytensor/): Pretty Tensor provides a high level builder API for TensorFlow. It provides thin wrappers on Tensors so that you can easily build multi-layer neural networks.+ [Deeplearning4j](https://deeplearning4j.org/): Open-Source, Distributed, Deep Learning Library for the JVM+ [H2O](https://github.com/h2oai): Fast Scalable Machine Learning For Smarter ApplicationsWe'll start with the scikit-learn implementation, since this environment is familiar and then use TensorFlow later. Neural networks with scikit-learnscikit-learn has a few different neural network functions:1. [perceptron](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Perceptron.html)+ [multi-layer perceptron (MLP) classifier](http://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPClassifier.html)+ [multi-layer perceptron (MLP) regressor](http://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPRegressor.html)The scikit-learn user guide for supervised learning using neural networks is [here](http://scikit-learn.org/stable/modules/neural_networks_supervised.html). Let's first test the `MLPClassifier` on the [two moons dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_moons.html).# X contains two features # y contains labels X,y = make_moons(n_samples=1000,random_state=1,noise=0.2) X = StandardScaler().fit_transform(X) # Plot the data, color by class plt.scatter(X[y == 1, 0], X[y == 1, 1], color="DarkBlue", marker="s",label="class 1") plt.scatter(X[y == 0, 0], X[y == 0, 1], color="DarkRed", marker="o",label="class 2") plt.legend(scatterpoints=1) plt.title('Two Moons Dataset') plt.xlabel('feature 1') plt.ylabel('feature 2') plt.show() model = MLPClassifier(hidden_layer_sizes=(3,3,3), max_iter=1000, alpha=1e-4, solver='adam', verbose=10, random_state=1, learning_rate_init=.1) model.fit(X, y) # Plot the data, color by class plt.scatter(X[y == 1, 0], X[y == 1, 1], color="DarkBlue", marker="s",label="class 1") plt.scatter(X[y == 0, 0], X[y == 0, 1], color="DarkRed", marker="o",label="class 2") plt.legend(scatterpoints=1) # Plot the predictions made by NN x_min, x_max = X[:,0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.linspace(x_min, x_max, 200),np.linspace(y_min, y_max, 200)) zz = model.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape) plt.contourf(xx, yy, zz, cmap=ListedColormap(['DarkRed', 'DarkBlue']), alpha=.2) plt.contour(xx, yy, zz, colors="black", alpha=1, linewidths=0.2) plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) plt.title('Classification of Two Moons using MLPClassifier') plt.xlabel('feature 1') plt.ylabel('feature 2') plt.show() print('After ', model.n_iter_, ' iterations, the loss is ', model.loss_) print('model coef shapes') [print(coef.shape) for coef in model.coefs_] print('model coefs') [print(coef) for coef in model.coefs_] print('model intercepts') [print(coef) for coef in model.intercepts_] print(model.get_params())After 26 iterations, the loss is 0.10547183623600889 model coef shapes (2, 3) (3, 3) (3, 3) (3, 1) model coefs [[-1.45538319 1.95266486 -2.19153839] [ 0.48119992 -0.52172742 -1.14917163]] [[ 0.84881629 -1.21287704 1.98892248] [-0.31298184 1.607918 -0.55635986] [ 1.04945602 1.3496739 -0.47041188]] [[-0.80397104 -1.7798498 2.82996346] [ 0.28897265 1.58600398 -0.9980886 ] [-2.12063679 0.16940877 1.1990305 ]] [[ 0.29733504] [ 1.20505347] [-1.5105632 ]] model intercepts [-0.64310462 -1.12094634 0.94005427] [-2.86559139 -0.17624245 1.47203736] [-1.33906156 0.06421013 1.42286222] [-0.06217724] {'alpha': 0.0001, 'shuffle': True, 'warm_start': False, 'tol': 0.0001, 'max_iter': 1000, 'momentum': 0.9, 'nesterovs_momentum': True, 'beta_1': 0.9, 'learning_rate_init': 0.1, 'verbose': 10, 'early_stopping': False, 'power_t': 0.5, 'epsilon': 1e-08, 'beta_2': 0.999, 'validation_fraction': 0.1, 'hidden_layer_sizes': (3, 3, 3), 'activation': 'relu', 'solver': 'adam', 'learning_ra[...]There are a lot more function parameters for [`MLPClassifier`](http://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPClassifier.html) than for other scikit-learn classification methods. You'll find that tweaking them also makes a very big difference in the output. Here are some of the important parameters: Network architecture parameters+ **hidden_layer_sizes**: tuple, length = n_layers - 2, default (100,). The ith element represents the number of neurons in the ith hidden layer.+ **activation**: {‘identity’, ‘logistic’, ‘tanh’, ‘relu’}, default ‘relu’. Activation function for the hidden layer. - ‘identity’, no-op activation, useful to implement linear bottleneck, returns f(x) = x - ‘logistic’, the logistic sigmoid function, returns f(x) = 1 / (1 + exp(-x)). - ‘tanh’, the hyperbolic tan function, returns f(x) = tanh(x). - ‘relu’, the rectified linear unit function, returns f(x) = max(0, x) Optimization related parameters+ **solver**: {‘lbfgs’, ‘sgd’, ‘adam’}, default ‘adam’. The solver for weight optimization. - ‘lbfgs’ is an optimizer in the family of quasi-Newton methods. - ‘sgd’ refers to stochastic gradient descent. - ‘adam’ refers to a stochastic gradient-based optimizer proposed by Kingma, Diederik, and + **alpha**: float, optional, default 0.0001.L2 penalty (regularization term) parameter.+ **max_iter**: int, optional, default 200. Maximum number of iterations. The solver iterates until convergence (determined by ‘tol’) or this number of iterations. For stochastic solvers (‘sgd’, ‘adam’), note that this determines the number of epochs (how many times each data point will be used), not the number of gradient steps.+ **random_state**: int, RandomState instance or None, optional, default None. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by np.random.+ **tol**: float, optional, default 1e-4. Tolerance for the optimization. When the loss or score is not improving by at least tol for two consecutive iterations, unless learning_rate is set to ‘adaptive’, convergence is considered to be reached and training stops.+ **verbose**: bool, optional, default False. Whether to print progress messages to stdout. **Exercise**: By chainging the hidden_layer_sizes, activation function, and the random_state, see if you can find a better classification of the above two moons dataset. Multi-layer PerceptronsHere, we take a closer look at Multi-layer Perceptrons; this material is taken from the [scikit-learn user guide](http://scikit-learn.org/stable/modules/neural_networks_supervised.html)Given a set of features $X = \{x_1, x_2, ..., x_n\}$ and a target $y$, a **Multi-layer Perceptron** can learn a non-linear function for either classification or regression. Here is an example with one hidden layer. Each neuron applies an affine transformation $$x \mapsto w\cdot x + b$$and then a non-linear *activation function* $g\colon \mathbb R \to \mathbb R$. The composition looks like:$$x \mapsto g(w\cdot x + b).$$The output layer receives the values from the last hidden layer and transforms them into output values.In the previous example, we used saw the trained cofficients by printing `model.coefs_` and `model.intercepts_`. There are several choices of **activation function**: hyperbolic tangent, logistic, and rectified linear unit (ReLU). We used the default activiation function, ReLU.# see . 10 def logit(z): return 1 / (1 + np.exp(-z)) def relu(z): return np.maximum(0, z) def derivative(f, z, eps=0.000001): return (f(z + eps) - f(z - eps))/(2 * eps) z = np.linspace(-5, 5, 200) plt.figure(figsize=(11,4)) plt.subplot(121) plt.plot(z, np.sign(z), "r-", linewidth=2, label="Step") plt.plot(z, logit(z), "g--", linewidth=2, label="Logit") plt.plot(z, np.tanh(z), "b-", linewidth=2, label="Tanh") plt.plot(z, relu(z), "m-.", linewidth=2, label="ReLU") plt.grid(True) plt.legend(loc="center right", fontsize=14) plt.title("Activation functions", fontsize=14) plt.axis([-5, 5, -1.2, 1.2]) plt.subplot(122) plt.plot(z, derivative(np.sign, z), "r-", linewidth=2, label="Step") plt.plot(0, 0, "ro", markersize=5) plt.plot(0, 0, "rx", markersize=10) plt.plot(z, derivative(logit, z), "g--", linewidth=2, label="Logit") plt.plot(z, derivative(np.tanh, z), "b-", linewidth=2, label="Tanh") plt.plot(z, derivative(relu, z), "m-.", linewidth=2, label="ReLU") plt.grid(True) #plt.legend(loc="center right", fontsize=14) plt.title("Derivatives", fontsize=14) plt.axis([-5, 5, -0.2, 1.2]) plt.show()Training a neural networkWe'll follow the scikit-learn [user guide](http://scikit-learn.org/stable/modules/neural_networks_supervised.html) to see how the multi-layer perceptron (MLP) neural network is trained. The MLP uses a loss function of the form $$Loss(\hat{y},y,W) = \frac{1}{2} \sum_{i=1}^n f(\hat{y}_i(W),y_i) + \frac{\alpha}{2} \|W\|_2^2$$Here, + $y_i$ are the labels for the $i$-th example, + $\hat{y}_i(W)$ are the predicted label for the $i$-th example, + $f$ is a function that measures the error, typically $L^2$ difference for regression or cross-entropy for classification, and + $\alpha$ is a regularization parameter. Starting from initial random weights, the loss function is minimized by repeatedly updating these weights. The details of this depend on the chosen method, either a quasi-Newton method `lbfgs`, stochastic gradident descent `sgd`, or `adam`. In the **gradient descent method**, the gradient $\nabla_{W} Loss$ of the loss with respect to the weights is computed. The weights are then changed in the negative gradient direction using a step-length or learning-rate $\varepsilon>0$: $$W \leftarrow W - \varepsilon \nabla_W {Loss}.$$The algorithm stops when it reaches a preset maximum number of iterations, `max_iter`, or when the improvement in loss is below a preset small number, `tol`.The gradient of $W$ is simply computed using the chain rule from calculus. In principle the idea is simple, but in practice it is a complicated job. Data analysts have figured out a clever way how to organize this calculation. This is sometimes called *back propagation*. A complete description of `lbfgs`, `sgd`, and `adam` is beyond the scope of the course. I'll just say that they are clever modifications to the gradient descent method. Let's see a comparison of optimization methods, taken from [this page](http://scikit-learn.org/stable/auto_examples/neural_networks/plot_mlp_training_curves.html).# different learning rate schedules and momentum parameters params = [{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': 0, 'learning_rate_init': 0.2}, {'solver': 'sgd', 'learning_rate': 'constant', 'momentum': .9, 'nesterovs_momentum': False, 'learning_rate_init': 0.2}, {'solver': 'sgd', 'learning_rate': 'constant', 'momentum': .9, 'nesterovs_momentum': True, 'learning_rate_init': 0.2}, {'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': 0, 'learning_rate_init': 0.2}, {'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9, 'nesterovs_momentum': True, 'learning_rate_init': 0.2}, {'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9, 'nesterovs_momentum': False, 'learning_rate_init': 0.2}, {'solver': 'adam', 'learning_rate_init': 0.01}] labels = ["constant learning-rate", "constant with momentum", "constant with Nesterov's momentum", "inv-scaling learning-rate", "inv-scaling with momentum", "inv-scaling with Nesterov's momentum", "adam"] plot_args = [{'c': 'red', 'linestyle': '-'}, {'c': 'green', 'linestyle': '-'}, {'c': 'blue', 'linestyle': '-'}, {'c': 'red', 'linestyle': '--'}, {'c': 'green', 'linestyle': '--'}, {'c': 'blue', 'linestyle': '--'}, {'c': 'black', 'linestyle': '-'}] def plot_on_dataset(X, y, ax, name): # for each dataset, plot learning for each learning strategy print("\nlearning on dataset %s" % name) ax.set_title(name) X = MinMaxScaler().fit_transform(X) mlps = [] if name == "digits": # digits is larger but converges fairly quickly max_iter = 15 else: max_iter = 400 for label, param in zip(labels, params): print("training: %s" % label) mlp = MLPClassifier(verbose=0, random_state=0, max_iter=max_iter, **param) mlp.fit(X, y) mlps.append(mlp) print("Training set score: %f" % mlp.score(X, y)) print("Training set loss: %f" % mlp.loss_) for mlp, label, args in zip(mlps, labels, plot_args): ax.plot(mlp.loss_curve_, label=label, **args) fig, axes = plt.subplots(2, 2, figsize=(15, 10)) # load / generate some toy datasets iris = datasets.load_iris() digits = datasets.load_digits() data_sets = [(iris.data, iris.target), (digits.data, digits.target), datasets.make_circles(noise=0.2, factor=0.5, random_state=1), datasets.make_moons(noise=0.3, random_state=0)] for ax, data, name in zip(axes.ravel(), data_sets, ['iris', 'digits', 'circles', 'moons']): plot_on_dataset(*data, ax=ax, name=name) fig.legend(ax.get_lines(), labels, ncol=3, loc="upper center") plt.show()learning on dataset iris training: constant learning-rateSome advise on optimization methods according to [this page](http://scikit-learn.org/stable/modules/neural_networks_supervised.html): * Empirically, we observed that L-BFGS converges faster and with better solutions on small datasets. For relatively large datasets, however, Adam is very robust. It usually converges quickly and gives pretty good performance. SGD with momentum or nesterov’s momentum, on the other hand, can perform better than those two algorithms if learning rate is correctly tuned. Example: breast cancer datacancer = load_breast_cancer() print(cancer.keys()) # 569 data points with 30 features cancer['data'].shape # full description: print(cancer['DESCR']) X = cancer['data'] y = cancer['target'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2) scaler = StandardScaler() scaler.fit(X_train) # Fit only to the training data # Apply scaling to data X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) mlp = MLPClassifier(hidden_layer_sizes=(30,30,30),random_state=1) mlp.fit(X_train,y_train) y_pred = mlp.predict(X_test) print(confusion_matrix(y_test,y_pred))[[36 4] [ 1 73]]Example: MNIST datasetLet's train a multi layer perceptron on the MNIST dataset.mnist = fetch_mldata("MNIST original") # rescale the data, use the traditional train/test split X, y = mnist.data / 255., mnist.target X_train, X_test = X[:60000], X[60000:] y_train, y_test = y[:60000], y[60000:] mlp = MLPClassifier(hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4, solver='adam', verbose=10, tol=1e-4, random_state=1) mlp.fit(X_train, y_train) print("Training set score: %f" % mlp.score(X_train, y_train)) print("Test set score: %f" % mlp.score(X_test, y_test)) y_pred = mlp.predict(X_test) print(confusion_matrix(y_test,y_pred))Iteration 1, loss = 0.40451061 Iteration 2, loss = 0.15583057 Iteration 3, loss = 0.11098385 Iteration 4, loss = 0.08411502 Iteration 5, loss = 0.06846576 Iteration 6, loss = 0.05652346 Iteration 7, loss = 0.04633846 Iteration 8, loss = 0.03762209 Iteration 9, loss = 0.03365538 Iteration 10, loss = 0.02884553 Iteration 11, loss = 0.02224694 Iteration 12, loss = 0.01873422 Iteration 13, loss = 0.01647119 Iteration 14, loss = 0.01460615 Iteration 15, loss = 0.01141324 Iteration 16, loss = 0.01314417 Iteration 17, loss = 0.01043379 Iteration 18, loss = 0.00753701 Iteration 19, loss = 0.00898900 Iteration 20, loss = 0.00699743 Iteration 21, loss = 0.00740891 Iteration 22, loss = 0.00942422 Iteration 23, loss = 0.00850921 Training loss did not improve more than tol=0.000100 for two consecutive epochs. Stopping. Training set score: 0.997483 Test set score: 0.976600 [[ 971 0 0 1 1 2 2 1 2 0] [ 0 1124 1 4 0 1 2 1 2 0] [ 3 1 1004 11 [...]**Exercise**: By adjusting the parameters in the MLPClassifier, imporve the test set score. **Note**: [This webpage](http://scikit-learn.org/stable/auto_examples/neural_networks/plot_mnist_filters.html)tries to interpret the MLP classification weights learned for the MNIST dataset. Example: Regression with Neural Networks in scikit-learnLet's use a multi-layer perceptron for regression. This can be done with the scikit-learn [`MLPRegressor`](http://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPRegressor.html) function.housing = fetch_california_housing() print(housing.keys()) # 20640 data points with 8 features housing['data'].shape # full description: print(housing['DESCR']) X = housing['data'] y = housing['target'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2) # scale data scaler = StandardScaler() scaler.fit(X_train) # Fit only to the training data # Apply scaling to data X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) # Linear Regression with Scikit-Learn lin_reg = LinearRegression() print(lin_reg.get_params()) lin_reg.fit(X_train, y_train) print(lin_reg.intercept_) print(lin_reg.coef_) print(lin_reg.score(X_test,y_test)) # score = 1 is good # MLP regression with Scikit-Learn mlp_reg = MLPRegressor(hidden_layer_sizes=(8,8),verbose=0,random_state=2,solver='adam') print(mlp_reg.get_params()) mlp_reg.fit(X_train, y_train) print(mlp_reg.score(X_test,y_test)) # score = 1 is good{'alpha': 0.0001, 'shuffle': True, 'warm_start': False, 'tol': 0.0001, 'max_iter': 200, 'momentum': 0.9, 'nesterovs_momentum': True, 'beta_1': 0.9, 'learning_rate_init': 0.001, 'verbose': 0, 'early_stopping': False, 'power_t': 0.5, 'epsilon': 1e-08, 'beta_2': 0.999, 'validation_fraction': 0.1, 'hidden_layer_sizes': (8, 8), 'activation': 'relu', 'solver': 'adam', 'learning_rate': 'constant', 'batch_size': 'auto', 'random_state': 2} 0.7400491068801395Neural Networks demystifiedThere is a good sequence of 7 videos called **Neural Networks demystified** from Welch Labs that builds and trains a neural network *from scratch* in python. * Part 1: [Data + Architecture](https://www.youtube.com/watch?v=bxe2T-V8XRs)+ Part 2: [Forward Propagation](https://www.youtube.com/watch?v=UJwK6jAStmg)+ Part 3: [Gradient Descent](https://www.youtube.com/watch?v=5u0jaA3qAGk)+ Part 4: [Backpropagation](https://www.youtube.com/watch?v=GlcnxUlrtek)+ Part 5: [Numerical Gradient Checking](https://www.youtube.com/watch?v=pHMzNW8Agq4&t=22s)+ Part 6: [Training](https://www.youtube.com/watch?v=9KM9Td6RVgQ)+ Part 7: [Overfitting, Testing, and Regularization](https://www.youtube.com/watch?v=S4ZUwgesjS8)If you're interested in learning more about how Neural Networks are trained, I would recommend watching these videos. The accompanying code is [on github](https://github.com/stephencwelch/Neural-Networks-Demystified) and can be obtained via ```git clone https://github.com/stephencwelch/Neural-Networks-Demystified.git``` Neural Networks with TensorFlow"[TensorFlow™](https://www.tensorflow.org/) is an open source software library for numerical computation using data flow graphs. Nodes in the graph represent mathematical operations, while the graph edges represent the multidimensional data arrays (tensors) communicated between them. The flexible architecture allows you to deploy computation to one or more CPUs or GPUs in a desktop, server, or mobile device with a single API. TensorFlow was originally developed by researchers and engineers working on the Google Brain Team within Google's Machine Intelligence research organization for the purposes of conducting machine learning and deep neural networks research, but the system is general enough to be applicable in a wide variety of other domains as well." Installing TensorFlowInstructions for installing TensorFlow are available at [the tensorflow install page](https://www.tensorflow.org/versions/r1.0/install/).It is recommended that you use the command: ```sudo pip3 install --upgrade tensorflow```I used the command: ```sudo conda install -c conda-forge tensorflow```import tensorflow as tf print(tf.__version__)/opt/anaconda3/lib/python3.5/importlib/_bootstrap.py:222: RuntimeWarning: compiletime version 3.6 of module 'tensorflow.python.framework.fast_tensor_util' does not match runtime version 3.5 return f(*args, **kwds) /opt/anaconda3/lib/python3.5/site-packages/h5py/__init__.py:34: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`. from ._conv import register_converters as _register_convertersCreating and running a graph with TensorFlow# build a computational graph x = tf.Variable(3, name="x") y = tf.Variable(4, name="y") f = x*x*y + y + 2 # evaluate the graph with tf.Session() as sess: x.initializer.run() y.initializer.run() print(f.eval())42Linear regression using TensorFlowimport numpy as np from sklearn.datasets import fetch_california_housing from sklearn.linear_model import LinearRegression housing = fetch_california_housing() m, n = housing.data.shape housing_data_plus_bias = np.c_[np.ones((m, 1)), housing.data] X = tf.constant(housing_data_plus_bias, dtype=tf.float32, name="X") y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name="y") XT = tf.transpose(X) theta = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(XT, X)), XT), y) with tf.Session() as sess: theta_tf = theta.eval() print(theta_tf, '\n') # compare with pure NumPy X = housing_data_plus_bias y = housing.target.reshape(-1, 1) theta_np = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(y) print(theta_np, '\n') # Compare with Scikit-Learn lin_reg = LinearRegression() lin_reg.fit(housing.data, housing.target.reshape(-1, 1)) print(np.r_[lin_reg.intercept_.reshape(-1, 1), lin_reg.coef_.T])[[-3.7465141e+01] [ 4.3573415e-01] [ 9.3382923e-03] [-1.0662201e-01] [ 6.4410698e-01] [-4.2513184e-06] [-3.7732250e-03] [-4.2664889e-01] [-4.4051403e-01]] [[-3.69419202e+01] [ 4.36693293e-01] [ 9.43577803e-03] [-1.07322041e-01] [ 6.45065694e-01] [-3.97638942e-06] [-3.78654265e-03] [-4.21314378e-01] [-4.34513755e-01]] [[-3.69419202e+01] [ 4.36693293e-01] [ 9.43577803e-03] [-1.07322041e-01] [ 6.45065694e-01] [-3.97638942e-06] [-3.78654265e-03] [-4.21314378e-01] [-4.34513755e-01]]Clustering in two dimensionsimport numpy as np import matplotlib.pyplot as plt %matplotlib widget from functions import gen_domains2D, plot_domains2D, confidence_ellipseThis is an introduction to some of the considerations needed for data clustering in higher dimensions and an example of using Gaussian mixture models to predict the best cluster for unknown data. Normal distribution in higher dimensions* In one dimension a normal distrbution is described by $\exp(-\frac{1}{2}\frac{(x-\mu)^2}{\sigma^2})$, where $\mu$ is the mean, $\sigma$ is the standard deviation and $\sigma^2$ is the variance* In higher dimensions this becomes $\exp(-\frac{1}{2}(X-M)^T \Sigma (X-M))$, where $X$ and $M$ are $n$-dimensional vectors and $\Sigma$ is the $n \times n$ covariance matrix* The diagonal elements of $\Sigma$ contain the variances of each dimension and the off-diagonal elements are the covariance between two dimensions* In 2D: $$\Sigma = \begin{pmatrix} \sigma_x^2 & \mathrm{cov}(x, y) \\ \mathrm{cov}(x, y) & \sigma_y^2 \end{pmatrix} = \begin{pmatrix} \sigma_x^2 & \rho\sigma_x\sigma_y \\ \rho\sigma_x\sigma_y & \sigma_y^2 \end{pmatrix}$$ where $\rho$ is the Pearson correlation coefficient* This is demonstrated by the widget belowfrom covariance_matrix import covariance_matrix fig, sliders = covariance_matrix()The advantages of higher dimensions* The example below shows a sample of three materials with two measurements (which could be _e.g._ surface potential and tapping phase)* Comparing the scatter plot to the histograms shows the advantage of higher dimensions: * Clusters that are overlapping in 1D may be distinct in 2D or higher * Distances in 1D are given by $x-x_0$ but in 2D are given by $\sqrt{(x-x_0)^2 + (y-y_0)^2}$ (and so on for higher dimensions)images, materials = gen_domains2D( populations=(1, 3, 1), means=((-50, 101), (0, 100), (50, 99)), stds=((10, 1/5), (10, 1/5), (10, 1/5)), pearsons=(0, 0.4, 0) ); plot_domains2D(images, materials);Normalisation* Clustering looks at distances between points to judge their similarity* But often, different observations have different units and variances so it's not fair to equate them* As an example, if I plot the scatter data from above, but use an equal scaling, the $A$ parameter completely dominates all the point-to-point distancesfig, ax = plt.subplots(figsize=(12, 3)) ax.scatter(*images.reshape(2, -1), alpha=0.05, s=1) ax.set( xlabel='$A$', ylabel='$B$', aspect='equal' ) for side in ('top', 'right'): ax.spines[side].set_visible(False) fig.tight_layout()* We don't want to assume that either parameter is more important so we use Mahalanobis normalisation* Each dimension is normalised to its mean, $\mu$, and standard deviation, $\sigma$, as $\frac{data - \mu(data)}{\sigma(data)}$* This means every observation has unit variance and zero mean* It also means the data is made dimensionless, which removes complications of different units* The scatter plot of Mahalanobis normalised data below shows that neither $A$ nor $B$ is given unfair precendencemahalanobis = ( (images - images.mean(axis=(1, 2))[:, np.newaxis, np.newaxis]) / images.std(axis=(1, 2))[:, np.newaxis, np.newaxis] ) fig, ax = plt.subplots(figsize=(12, 12)) ax.scatter(*mahalanobis.reshape(2, -1), s=1) ax.set( xlabel=r'$\frac{A-{\mu}(A)}{{\sigma}(A)}$', ylabel=r'$\frac{B-{\mu}(B)}{{\sigma}(B)}$', aspect='equal' ) ax.grid() for side in ('top', 'right'): ax.spines[side].set_visible(False) fig.tight_layout()Using clustering to classify new data* One of the main advantages of data clustering is that it can be used to automatically identify new data as belonging to a particular cluster* To show this I'm splitting the image above into two halves, top and bottom* I'll train the clustering on the top, and test it on the bottomimages_top = images[:, :64] materials_top = [mat[:64] for mat in materials] images_bottom = images[:, 64:] materials_bottom = [mat[64:] for mat in materials]Training phase* I start by normalising the data from the top half of the image* This is displayed belownorm_top = ( (images_top - images_top.mean(axis=(1, 2))[:, np.newaxis, np.newaxis]) / images_top.std(axis=(1, 2))[:, np.newaxis, np.newaxis] ) plot_domains2D(norm_top, materials_top);* It has three materials and so should be fit best by three clusters* I train a three-component Gaussian mixture model (GMM) on the data then calculate the probability each point belongs to a particular clusterfrom sklearn.mixture import GaussianMixture gmm = GaussianMixture(n_components=3).fit(norm_top.reshape(2, -1).T) labels_top = gmm.predict(norm_top.reshape(2, -1).T) probs_top = gmm.predict_proba(norm_top.reshape(2, -1).T)* The scatter plot below shows the three identified clusters (ellipses show $1\sigma$ away from the mean position)* The probability for each point being in the cluster is shown by the imagesfig = plt.figure(figsize=(12, 9)) gs = plt.GridSpec(figure=fig, nrows=3, ncols=2, width_ratios=(2, 1)) scatter_ax = fig.add_subplot(gs[:, 0]) cluster_axes = [fig.add_subplot(gs[i, -1]) for i in range(3)] for i in range(3): c = f'C{i+1}' scatter_ax.scatter(*norm_top.reshape(2, -1)[:, labels_top==i], c=c, s=2) confidence_ellipse(scatter_ax, gmm.means_[i], gmm.covariances_[i], fc=c, ls='--', lw=2, alpha=0.2) cluster_axes[i].imshow(probs_top[:, i].reshape(images_top[0].shape)) cluster_axes[i].set_title(f'Cluster {i+1}', c=c) cluster_axes[i].set_axis_off() for side in ('top', 'right'): scatter_ax.spines[side].set_visible(False) fig.tight_layout()New data* We now want to use the clusters found above from the top of the image, to assess the materials in the bottom of the image* To do this we need to apply the same normalisation as we did for the training data: * For the training data we used Mahalanobis normalisation $\frac{training - \mu(training)}{\sigma(training)}$ * For fairness we can't simply Mahalanobis normalise the testing data (it may have a different $\mu$ and $\sigma$) * We need to do $\frac{new data - \mu(training)}{\sigma(training)}$* The appropriately normalised new data from the bottom half of the image is shown herenorm_bottom = ( (images_bottom - images_top.mean(axis=(1, 2))[:, np.newaxis, np.newaxis]) / images_top.std(axis=(1, 2))[:, np.newaxis, np.newaxis] ) plot_domains2D(norm_bottom, materials_bottom);* We still have the means and covariances of our Gaussian clusters found from the training data* We can use these to predict the likelihood that a point from the new data set belongs to each clusterprobs_bottom = gmm.predict_proba(norm_bottom.reshape(2, -1).T) fig = plt.figure(figsize=(12, 9)) gs = plt.GridSpec(figure=fig, nrows=3, ncols=2, width_ratios=(2, 1)) scatter_ax = fig.add_subplot(gs[:, 0]) cluster_axes = [fig.add_subplot(gs[i, -1]) for i in range(3)] scatter_ax.scatter(*norm_bottom.reshape(2, -1), c='C0', s=2) for i in range(3): c = f'C{i+1}' confidence_ellipse(scatter_ax, gmm.means_[i], gmm.covariances_[i], fc=c, ls='--', lw=2, alpha=0.2) cluster_axes[i].imshow(probs_bottom[:, i].reshape(images_bottom[0].shape)) cluster_axes[i].set_title(f'Cluster {i+1}', c=c) cluster_axes[i].set_axis_off() for side in ('top', 'right'): scatter_ax.spines[side].set_visible(False) fig.tight_layout()ML Dataset statsI want to perform some verification on the ml dataset I generated.%load_ext autoreload %autoreload 2 import dask import dask.distributed import logging import pathlib import xarray as xr import torch import torch.distributions import seaborn as sns from crims2s.dask import create_dask_cluster from crims2s.util import add_biweekly_dim #ML_DATASET_DIR = '***BASEDIR***/mlready/2021-08-08-test/' ML_DATASET_DIR = '***BASEDIR***/mlready/2021-08-28-test' ML_DATASET_DIR = '***BASEDIR***/mlready/2021-09-07-test-set' ML_DATASET_DIR = '***BASEDIR***/mlready/2021-09-07-cube-train/' ML_DATASET_DIR = '***BASEDIR***/mlready/2021-09-08-w-features/' ML_DATASET_DIR = '***HOME***fast_scratch/s2s/mlready/2021-09-14-lean-dry-mask' ML_DATASET_DIR = '***HOME***fast_scratch/s2s/mlready/2021-09-23-eccc-test' ML_DATASET_DIR = '***HOME***fast_scratch/s2s/mlready/2021-09-26-ncep'Load datasetdef preprocess_one_example(dataset): return dataset.expand_dims('forecast_time') dataset_files = sorted([x for x in pathlib.Path(ML_DATASET_DIR).iterdir() if '.nc' in x.name]) dataset_files = [f for f in dataset_files if f.stem.endswith('0116')] dataset_files[:10] features = xr.open_mfdataset(dataset_files[:3], group='/features', concat_dim='forecast_time', preprocess=preprocess_one_example) features features.mean(dim=['realization', 'lead_time', 'forecast_time', 'latitude', 'longitude']).compute() model = xr.open_mfdataset(dataset_files[:3], group='/model', concat_dim='forecast_time', preprocess=preprocess_one_example) eccc = xr.open_mfdataset(dataset_files[:3], group='/eccc_parameters', concat_dim='forecast_time', preprocess=preprocess_one_example) eccc.t2m_mu.isel(biweekly_forecast=1).plot() ncep = xr.open_mfdataset(dataset_files[:3], group='/ncep_parameters', concat_dim='forecast_time', preprocess=preprocess_one_example) ncep.t2m_mu.isel(biweekly_forecast=1, forecast_time=0).plot() ncep.t2m_mu obs = xr.open_mfdataset(dataset_files[:3], group='/obs', concat_dim='forecast_time', preprocess=preprocess_one_example) parameters = xr.open_mfdataset(dataset_files[:3], group='/model_parameters', concat_dim='forecast_time', preprocess=preprocess_one_example) parameters.t2m_mu.isel(biweekly_forecast=1, forecast_time=0).plot() (ncep - parameters).tp_mu.isel(biweekly_forecast=0, forecast_time=0).plot() dry_mask = preprocess_one_example(xr.open_dataset(dataset_files[0], group='/dry_mask')) dry_mask.tp.astype(float).isel(lead_time=1).plot() eccc.isel(biweekly_forecast=1).tp_cube_root_mu.plot() dry_mask = xr.open_mfdataset(dataset_files[:3], group='/dry_mask', concat_dim='forecast_time', preprocess=preprocess_one_example) parameters model parameters obs.isnull().sum().compute() features obs len(obs.lead_time) 46 / 7 some_computed = sample.isnull().sum(dim=['latitude', 'longitude', 'realization']) some_computed.sum(dim=['variable', 'forecast_time']).compute().x.plot() some_computed.isel(lead_time=[0, 1]).sum(dim='variable').compute() sample.sel(variable='sst').isel(lead_time=slice(1, None)).sum(dim=['forecast_time', 'lead_time', 'realization']).compute().x.plot()Check ytarget = xr.open_mfdataset(dataset_files[:10], group='/terciles', concat_dim='forecast_time', preprocess=preprocess_one_example) target = target.compute() target target.isnull().isel(forecast_time=0).sum(dim=['category', 'lead_time']).t2m.plot()Check obsobs = xr.open_mfdataset(dataset_files[:10], group='/obs', concat_dim='forecast_time', preprocess=preprocess_one_example) obs.isnull().sum(dim=['forecast_time', 'lead_time']).t2m.plot() model_params = xr.open_mfdataset(dataset_files, group='/model_parameters', preprocess=preprocess_one_example) model_paramsCheck parameters fit For T2Mmodel_biweekly = add_biweekly_dim(model, weeks_12=False) model_biweekly reworked_t2m = model_biweekly.isel(lead_time=slice(1, None)).t2m.squeeze().transpose('realization', 'lead_time', 'forecast_time', ...) reworked_t2m t2m_data = torch.tensor(reworked_t2m.data.compute()) t2m_mu = torch.tensor(parameters.t2m_mu.squeeze().data.compute()) t2m_sigma = torch.tensor(parameters.t2m_sigma.squeeze().data.compute()) t2m_mu.max() t2m_mu.shape t2m_data.shape distribution = torch.distributions.Normal(t2m_mu, t2m_sigma) -distribution.log_prob(t2m_data).mean()For TPtp_data = torch.tensor(model_biweekly.isel(lead_time=-1).tp.squeeze().transpose('realization', 'forecast_time', ...).data.compute()) tp_data.shape parameters tp_mu = torch.tensor(parameters.tp_cube_root_mu.squeeze().data.compute()) tp_sigma = torch.tensor(parameters.tp_cube_root_sigma.squeeze().data.compute()) tp_sigma.min() distribution = torch.distributions.Normal(tp_mu, tp_sigma + 1e-9) -distribution.log_prob(tp_data ** (1. / 3.)).mean() tp_alpha = torch.tensor(parameters.tp_alpha.squeeze().data.compute()) tp_beta = torch.tensor(parameters.tp_beta.squeeze().data.compute()) distribution = torch.distributions.Gamma(tp_alpha, tp_beta) -distribution.log_prob(tp_data + 1e-9).mean() tp_data.min() tp_mu.min() sns.histplot(data=tp_data.flatten())NCEPsample = preprocess_one_example(xr.open_dataset(dataset_files[1], group='/ncep_parameters')) sampleIdentifying High Risk Areas from NYC Traffic Conditions*Data Cleaning -*# libraries # pliers import pandas as pd import numpy as np import missingno as msno # GIS import geopandas as gpd from shapely.geometry import Point, Polygon # viz import matplotlib.pyplot as pltData Ingestiondf = pd.read_csv('../data/crash_features_2019.csv') print(df.shape) df.head()(193813, 30)Create Day of Week Column# Checking date type type(df['DATE'][1]) # creates datatime for date column df['DATE'] = pd.to_datetime(df['DATE']) # creates a columns df['dayofweek'] = df['DATE'].dt.dayofweek # convert dayofweek to char df['dayofweek'] = df['dayofweek'].map ({0:'Mon',1:'Tues',2:'Wed',3:'Thurs',4:'Fri', 5:'Sat',6:'Sun' }) df.iloc[:,-1]Highways / Expressways / Parkways Locating the highways, and creating a boolian column .# randomly looking through street names to discover highway names df.iloc[:,8:11].sample(10) # mask for highways highways = "EXPRESSWAY|PKWY|PARKWAY|EXPY|FDR|BQE|EXPWY|HIGHWAY|HENRY HUDSON" # number of rows where the crash was located on the a 'highway ' df[df['ON STREET NAME'].str.contains(highways, na =False)]['ON STREET NAME'].count() # a list of the the unique highway names np.unique(df[df['ON STREET NAME'].str.contains(highways, na =False)]['ON STREET NAME'])Create a Boolian Column, True crash was on a highway#create a Boolian Column, True crash was on a highway df['highway'] = df['ON STREET NAME'].str.contains(highways, na =False) df['highway'].samplea lookdf.head() # convert to np.... # check which rows are missing zip code and borough if (pd.isnull(row['ZIP CODE']) or pd.isnull(row['BOROUGH'])):# and (row['LOCATION']): # create Point for geo-positioning geo_point = Point(row['LONGITUDE'],row['LATITUDE']) # itterates through and pulls zipcode polys to check for ii, zipcode in zipcodes.iterrows(): # checks polys and updates df rows if zipcode['geometry'].contains(geo_point): row['ZIP CODE'] = zipcode['postalCode'] row['BOROUGH'] = zipcode['borough'] df.at[i,'ZIP CODE'] = zipcode['postalCode'] df.at[i,'BOROUGH'] = zipcode['borough'].upper() c += 1 break print(f'Number of rows updated {c}')Errors and Exceptions Homework - Problem 1Handle the exception thrown by the code below by using try and except blocks.try: for i in ['a','b','c']: print(i**2) except: print ('Error, operation not supported.')Error, operation not supported.Problem 2Handle the exception thrown by the code below by using **try** and **except** blocks. Then use a **finally** block to print 'All Done.'x = 5 y = 0 try: z = x/y except: print('Error, division by zero.') finally: print('Operations complete.')Error, division by zero. Operations complete.Problem 3Write a function that asks for an integer and prints the square of it. Use a while loop with a try,except, else block to account for incorrect inputs.def ask(): while True: try: var = int(input('Input an interger: ')) except: print('An error occured. Pleas try once again.') continue else: print('Thank you. Your number when squared is:',var**2) break ask()Input an interger: 5 Thank you. Your number when squared is: 25Reset the state, we track sectors being scanned and the resoruce found#!rover RoverBrain.Perceive = () => { if (currentSector == null){ return; } if (currentSector.CapturedFrame == null) { Task.Delay(500).Wait(); currentSector.CapturedFrame = RoverBody.Camera.GetFrame().Focus(); } }; #!rover RoverBody.Reset(); #!rover RoverBrain.ClearState = () => { foreach (var sector in scannedSectors) { sector.Reset(); } currentSector = null; RoverBody.Reset(); };Let's capture the current sector frame#!rover RoverBrain.Act = () => { if(currentSector != null){ RoverBody.TiltController.GoToSector(currentSector); } };Define the planning logic:1. Terminate when all sector have been visited and image was aquired2. Classify the current sector if needed3. Blink lights if a classification has been aquired#!rover RoverBrain.Plan = () => { if (AllSectorScanned(scannedSectors)){ RoverBody.AllLightsOff(); return PlanningResult.NoPlan; } if (currentSector != null) { currentSector.Marked = true; } currentSector = scannedSectors.FirstOrDefault(s => s.Marked == false); RoverBody.AllLightsOff(); return currentSector != null ? PlanningResult.NewPlan : PlanningResult.NoPlan; }; #!rover RoverBrain.Plan = () => { if (AllSectorScanned(scannedSectors)){ RoverBody.AllLightsOff(); return PlanningResult.NoPlan; } if (currentSector != null) { currentSector.Marked = true; } if (AnalyseSectorForEnergyCards(currentSector)) { RoverBody.BlinkAllLights(); }else { RoverBody.AllLightsOff(); } currentSector = scannedSectors.FirstOrDefault(s => s.Marked == false); RoverBody.AllLightsOff(); return currentSector != null ? PlanningResult.NewPlan : PlanningResult.NoPlan; }; #!rover RoverBrain.Plan = () => { if (AllSectorScanned(scannedSectors) || EnoughResourcesFound(scannedSectors, 3)){ RoverBody.AllLightsOff(); return PlanningResult.NoPlan; } if (currentSector != null) { currentSector.Marked = true; } if (AnalyseSectorForResource(currentSector)) { RoverBody.BlinkAllLights(); }else { RoverBody.AllLightsOff(); } currentSector = scannedSectors.FirstOrDefault(s => s.Marked == false); RoverBody.AllLightsOff(); return currentSector != null ? PlanningResult.NewPlan : PlanningResult.NoPlan; };Let's move the camera to the sector we planned to scan#!rover RoverBrain.ClearState();Let's explore the sectors#!rover scannedSectors .Where(s => s.ClassificationResults != null ) .GroupBy(s => s.ClassificationResults.Prediction.Label) .Select(g => new { Label = g.Key, Count = g.Count()}) #!rover scannedSectors.Count(s => s.Marked == true) #!rover scannedSectors.Where(s => s.Marked == true) #!rover scannedSectors #!rover currentSector #!rover testSector #!rover AllSectorScanned(scannedSectors) #!rover currentSector = scannedSectors.FirstOrDefault(s => s.Marked == false);Outro Transition ModelsIn this notebook, we will train two models, which, when taken together, will allow us to identify start and end of transition points in the outro of an input song. We will use a fairly similar approach to the one described in the [Introduction Models notebook](2.%20Introduction%20Transition%20Models.ipynb). However, the training task is more difficult in the outro, as we do not have the start of the song as a direct anchor point, and the end of a song cannot provide reliable beat/downbeat information in the same way that the start of the song can. We will therefore need to invert the process taken for our Introduction models. In constructing that process, we first determined the timestamp of the first downbeat of the first phrase, then trained a timing model which was agnostic to specific phrase locations. We could then use the first downbeat timestamp to pick out where the transition points should occur in every subsequent phrase by taking 32 beat jumps. For the outro models we are going to need to use the timing model first, in order to determine the period of the song where the outro actually begins and therefore where the transition should begin. We will then apply a separate Start Bar Finder similar to the one trained for the Introduction models to narrow down that period and pinpoint the specific bar where the transition should begin. For songs which our Introduction models predicted that the first downbeat is on the first beat, we will calculate the BPM of the song and use it to build a downbeat grid which can take advantage of the uniform tempo structure of EDM to identify phrase points in the outro.import pickle import numpy as np import pandas as pd import librosa import random import time from tensorflow.keras.layers import Input, Dense, Lambda, Concatenate, \ Embedding,ReLU,Flatten,Dropout,BatchNormalization,Activation,Dot from tensorflow.keras.models import Model, load_model from tensorflow.keras.layers import Conv1D,MaxPooling1D,LSTM,Bidirectional from tensorflow.keras.callbacks import EarlyStopping import tensorflow.keras.backend as K import tensorflow as tf tf.compat.v1.logging.set_verbosity(40) from sklearn.metrics import confusion_matrix from scipy import statsData importsWe begin by importing our labelled data and chromagram/spectrogram audio data. We have stored these as pickled dictionaries of dataframes and numpy arrays respectively. We also define the same helper functions as for the Introduction models, and perform the same processing to slice the spectrograms, and truncate/pad the slices to a uniform size.with open('grams_full.pkl','rb') as f: full_grams = pickle.load(f) with open('labels_dict','rb') as f: labels_dict = pickle.load(f) def get_ohs(df): """Given a labelled bar/beats input, appends columns with binary indicators at each beat, with 1 at the appropriate transition points and 0 otherwise. Args: df: Bar/beats dataframe with 'Start' and 'End' transition labels in intro and outro Returns: df_copy: Copy of the dataframe with four columns of binary labels appended: Incoming Start, Incoming End, Outgoing Start, Outgoing End """ df_copy = df.copy(deep=True) df_copy['Incoming Start'] = df_copy['Intro Label'].apply( lambda x: int('Start' in str(x))) df_copy['Incoming End'] = df_copy['Intro Label'].apply( lambda x: int('End' in str(x))) df_copy['Outgoing Start'] = df_copy['Outro Label'].apply( lambda x: int('Start' in str(x))) df_copy['Outgoing End'] = df_copy['Outro Label'].apply( lambda x: int('End' in str(x))) df_copy = df_copy.drop(['Intro Label','Outro Label'],axis=1) return df_copy def get_slices(gram,frames): """Utility function for slicing a spectrogram/chromagram according to frames. Args: gram: Spectrogram or chromagram numpy array frames: indices at which to slice array Returns: List of array slices """ return [gram[frames[i]:frames[i+1]] for i in range(len(frames)-1)] def truncate_pad(outputs,length= 175): """Truncates or pads gram slices to be of input length Args: outputs: length two list containing chromagram and spectrogram inputs, i.e. list of four-beat slices length: axis 0 length of output of each slice Returns: length two list of truncated/padded chromagrams and spectrograms """ chromagram,spectrogram = outputs size = spectrogram.shape[0] #We convert the spectrogram power values to db and divide by -80 #so that all values are between 0 and 1 spectrogram = librosa.power_to_db(spectrogram.T, ref=np.max).T/-80.0 if size>=length: return [x[:length] for x in [chromagram,spectrogram]] else: zeros_x = length-size zeros_chromagram = np.zeros((zeros_x,12)) zeros_spectrogram = np.zeros((zeros_x,128)) return [np.concatenate([chromagram,zeros_chromagram],axis = 0).astype(np.float32), np.concatenate([spectrogram,zeros_spectrogram],axis = 0).astype(np.float32)] slice_length = 175 gram_slices_tp = {} gram_slice_times = {} for song in [x for x in labels_dict if x in full_grams]: grams = full_grams[song] full_gram_shape = grams[0].shape[0] tags = labels_dict[song] tags['Frame'] = librosa.time_to_frames(tags.values[:,0],sr=22050,hop_length=256) if tags.shape[0]%4==0: indices = [i*4 for i in range(tags.shape[0]//4)] else: indices = [i*4 for i in range(1+tags.shape[0]//4)] frames = tags.values[indices,-1].astype(np.int32).tolist() if full_gram_shape not in frames: frames.append(full_gram_shape) times = tags.values[indices,0].tolist() gram_slice_times[song] = times chromagrams,spectrograms = [get_slices(gram,frames) for gram in grams] #We check to make sure there are no empty slices, and add zeros at the start and end non_zero_inds = [x for x in range(len(spectrograms)) if spectrograms[x].shape[0]>0] chromagrams = [chromagrams[i] for i in non_zero_inds] chromagrams = [np.zeros((slice_length,12))]+chromagrams+[np.zeros((slice_length,12))] spectrograms = [spectrograms[i] for i in non_zero_inds] spectrograms = [np.zeros((slice_length,128))]+spectrograms+[np.zeros((slice_length,128))] #We now perform the truncation/padding gram_slices_tp[song] = list(zip(*[truncate_pad( x) for x in zip(*[chromagrams,spectrograms])]))Outro Transition TimingWe will first train a model similar to the Introduction transition timing model trained in the [Introduction Models notebook](2.%20Introduction%20Transition%20Models.ipynb). As before, it will consist of 1D convolution on four-beat chunks which are then input into a Bidirectional LSTM. The input will be the last 256 beats of the song, and the training labels are binary labels on each bar of whether the transition should start or end on that bar. Data PreparationWe need to extract the binary start/end labels from the labelled beats for each song.tm_seq_len = 64 ohs_dict = {} timing_model_labels = {} for song in gram_slices_tp: tags = labels_dict[song] ohs = get_ohs(tags) ohs_dict[song] = ohs indices = [i*4 for i in range(tags.shape[0]//4)] ohs_slices = [ohs.values[indices[i]:indices[i+1],-2:] for i in range(len(indices)-1)] ohs_slices += [ohs.values[indices[-1]:,-2:]] ohs_slices = ohs_slices[-1*tm_seq_len:] slice_labels = [np.max(slce,axis = 0) for slce in ohs_slices if slce.shape[0]!=0] slice_labels.append(np.array([0,0])) while len(slice_labels) < tm_seq_len + 1: slice_labels = [np.array([0,0])] + slice_labels timing_model_labels[song] = slice_labels def get_timing_model_inputs(song): """Takes a song as input and returns stacked and concatenated array slices representing the last 256 beats of the song. """ chromagrams,spectrograms = gram_slices_tp[song] chromagrams_inp = np.stack(chromagrams[-(tm_seq_len+1):]) spectrograms_inp = np.stack(spectrograms[-(tm_seq_len+1):]) if chromagrams_inp.shape[0] < tm_seq_len + 1: padding_needed = tm_seq_len + 1 - chromagrams_inp.shape[0] zeros_pad_chromagram = np.zeros((padding_needed,slice_length,12)) chromagrams_inp = np.concatenate([zeros_pad_chromagram,chromagrams_inp], axis = 0) zeros_pad_spectrogram = np.zeros((padding_needed,slice_length,128)) spectrograms_inp = np.concatenate([zeros_pad_spectrogram,spectrograms_inp], axis = 0) return np.concatenate([chromagrams_inp,spectrograms_inp],axis = -1).astype(np.float32)As explained in the [Introduction Models notebook](2.%20Introduction%20Transition%20Models.ipynb), we implement an approach to sample weighting which allows the model to focus on the positive labels. In the Introduction Models, we only needed to take into consideration the previous label when determining the weight, but for the outro we need to take into account the previous and the subsequent labels, as some outros end more than 32 beats before the end of the song. We also want to place more weight on the first positive label and surrounding predictions, as this is what will be used to determine the overall transition timing.def get_single_weight(i,sums,other_weight=0.01): """Determines training weights for transition timing model. All bars with positive labels are set at 1, along with the other bars which are multiples of eight bars (i.e. a phrase) away and within 32 bars (or four phrases). Args: i: Index in sliced label input sums: List of sum of labels at each index. other_weight: Scaling weight for less important inputs Returns: Scaled weight (either 1 or other_weight) """ factor = other_weight/(1-other_weight) if i > len(sums)-9: return (int( sums[i]!=0 or sums[i-8]!=0 or sums[i-16]!=0)+factor)/(1+factor) elif i > len(sums) - 17: return (int( sums[i]!=0 or sums[i+8]!=0 or sums[i-8]!=0 or sums[i-16]!=0)+factor)/(1+factor) elif i > len(sums) - 25: return (int( sums[i]!=0 or sums[i+8]!=0 or sums[i+16]!=0 or sums[i-8]!=0)+factor)/(1+factor) elif i > len(sums) - 33: return (int( sums[i]!=0 or sums[i+8]!=0 or sums[i+16]!=0 or sums[i+24]!=0)+factor)/(1+factor) else: return (int( sums[i]!=0 or sums[i+8]!=0 or sums[i+16]!=0 or sums[i+24]!=0 or sums[i+32]!=0)+factor)/( 1+factor) def get_weights(song): """Wrapper function for get_single_weight function to apply to full label input for a song. Multiplies the first positive example weight and the preceding weight by 1.5 """ labels = timing_model_labels[song] sums = [np.sum(label) for label in labels] weights = [get_single_weight(i,sums) for i in range(len(sums))] pos_weight_indices = [i for i in range(len(weights)) if weights[i] > 0.5 and sums[i]!=0] first_pos_weight_ind = pos_weight_indices[0] weights[first_pos_weight_ind] *= 1.5 if first_pos_weight_ind >= 8: weights[first_pos_weight_ind-8] *= 1.5 return weightsWe load the same validation and test sets as were used for the intro models.with open('sc_vad_set.pkl','rb') as f: vad_set = pickle.load(f) with open('sc_test_set.pkl','rb') as f: test_set = pickle.load(f) train_set = [x for x in gram_slices_tp if x not in vad_set and x not in test_set] tm_train_input = np.stack( [get_timing_model_inputs(song) for song in train_set]).astype('float32') tm_train_target = np.stack( [timing_model_labels[song] for song in train_set]).astype('float32') tm_train_weights = np.stack( [get_weights(song) for song in train_set]).astype('float32') tm_vad_input = np.stack( [get_timing_model_inputs(song) for song in vad_set]).astype('float32') tm_vad_target = np.stack( [timing_model_labels[song] for song in vad_set]).astype('float32') tm_vad_weights = np.stack( [get_weights(song) for song in vad_set]).astype('float32') tm_test_input = np.stack( [get_timing_model_inputs(song) for song in test_set]).astype('float32') tm_test_target = np.stack( [timing_model_labels[song] for song in test_set]).astype('float32') tm_test_weights = np.stack( [get_weights(song) for song in test_set]).astype('float32')Model Definitiontm_gram_in = Input((slice_length,140),name = 'tm_analysis_in') tm_conv_bar_c = Conv1D(filters = 16,kernel_size = 11,activation = 'relu',strides = 3) tm_pool_bar_c = MaxPooling1D(pool_size = 2,strides = 2) tm_bar_out_c = BatchNormalization()(tm_pool_bar_c(tm_conv_bar_c(tm_gram_in))) tm_conv_bar_2_c = Conv1D(filters = 8,kernel_size = 2,activation = 'relu',strides = 2) tm_pool_bar_2_c = MaxPooling1D(pool_size = 1,strides =1) tm_bar_out_2_c = BatchNormalization()(tm_pool_bar_2_c(tm_conv_bar_2_c(tm_bar_out_c))) tm_bar_out_c_flat = Flatten()(tm_bar_out_2_c) tm_gram_model = Model(tm_gram_in,tm_bar_out_c_flat) tm_gram_input = Input((tm_seq_len+1,175,140)) tm_gram_flat = Lambda(lambda x: K.reshape(x,(-1,175,140)))(tm_gram_input) tm_conv = tm_gram_model(tm_gram_flat) tm_conv_seq = Lambda(lambda x: K.reshape(x,(-1,tm_seq_len+1,tm_conv.shape[-1])))(tm_conv) tm_conv_dense = Dense(48,activation='tanh')(Dropout(rate=0.4)(tm_conv_seq)) tm_conv_dense_2 = Dense(32,activation='tanh')(Dropout(rate=0.4)(tm_conv_dense)) tm_lstm_out = Bidirectional(LSTM( 48,return_sequences=True,recurrent_dropout = 0.45,dropout=0.45))(tm_conv_dense_2) tm_dense_1 = Dense(16,activation='tanh')(Dropout(rate=0.4)(tm_lstm_out)) tm_out = Dense(2,activation='sigmoid')(Dropout(rate=0.3)(tm_dense_1)) tm_final_model = Model(tm_gram_input,tm_out)Model TrainingWe train the model using the same approach as in the Introduction case, with early stopping based on the validation loss.tm_adam_opt = tf.keras.optimizers.Adam(lr = 2e-4) tm_final_model.compile(optimizer = tm_adam_opt, loss = 'binary_crossentropy', weighted_metrics = ['accuracy'],sample_weight_mode='temporal') tm_final_model.fit(tm_train_input,tm_train_target,batch_size = 16, sample_weight=tm_train_weights,epochs = 100, validation_data = (tm_vad_input,tm_vad_target,tm_vad_weights), verbose = 0) tm_es = EarlyStopping(restore_best_weights=True,monitor='val_loss',patience=20) tm_final_model.fit(tm_train_input,tm_train_target,batch_size = 16, sample_weight=tm_train_weights,epochs = 200, validation_data = (tm_vad_input,tm_vad_target,tm_vad_weights), callbacks = [tm_es],verbose = 0) tm_train_pred = tm_final_model.predict(tm_train_input) print('Training performance:') tm_final_model.evaluate(tm_train_input,tm_train_target,sample_weight=tm_train_weights) print('Validation performance:') tm_final_model.evaluate(tm_vad_input,tm_vad_target,sample_weight=tm_vad_weights) print('Test performance:') tm_final_model.evaluate(tm_test_input,tm_test_target,sample_weight=tm_test_weights)Test performance: 4/4 [==============================] - 0s 55ms/step - loss: 0.0363 - accuracy: 0.8492We can examine an example song to see what our predictions look like.example_df = pd.DataFrame(np.stack(timing_model_labels[' - Lose My Mind']), columns = ['Transition Start Label','Transition End Label']) example_pred = np.round( tm_final_model.predict( tm_train_input[[train_set.index(' - Lose My Mind')]]),2)[0] example_df = pd.concat([example_df, pd.DataFrame(example_pred, columns = ['Start Probability','End Probability'])],axis=1) example_df.iloc[-34:]Outro Start Bar FinderNow that we have a model which can determine rough timings for the transition, we need to narrow those timings down. As discussed above, we do not have the advantage of the start of the song providing a fixed reference point from which to identify the starting bar. Instead, we will have to use the output of the timing model to give us a window in which we can narrow down the correct bar. We will want the model to be predicting around the point where the Start Probability exceeds 0.5 for the first time; in the example directly above, this is at index 38, so we might want the model to be predicting the starting bar over bars 36 to 44, with the correct bar being bar 40.We will train a model which is similar in architecture to the Intro Start Bar Finder for this purpose. However, the training regime will be different, as we want to provide the model with a more diverse range of starting points around the first label. We will therefore train the model on randomly sampled batches of songs and starting points which are within eight bars of the first Start label. We will also need to give the model more context; in the introduction model, only the 'right' context is available since to the 'left' is the start of the song. In this case however it is likely that both left and right context will be relevant. Data PreparationRather than building a fixed training set, we are going to define a method for sampling individual training batches. To make this easier, we first extract from our sliced grams a fixed-length sequence for each song. The total sequence length of the model input will be 32, so we construct sequences of length 40 with the correct starting label exactly in the middle at index 20. We can then easily generate a training example of length 32 with the correct label being at any index between 16 and 24.gram_slices_sbf = {} for song in gram_slices_tp: slice_labels = timing_model_labels[song] pos_labels = [i for i in range(len(slice_labels)) if np.sum(slice_labels[i])>0] first_pos_label = pos_labels[0] slice_tuples = list(zip(*gram_slices_tp[song])) slices_concat = [np.concatenate( tup,axis=-1).astype('float32') for tup in slice_tuples][-65:] while len(slices_concat) < 65: slices_concat.append(np.zeros((slice_length,140))) if first_pos_label >= len(slice_labels) - 20: slices = slices_concat[first_pos_label-20:] slices += [np.zeros((slice_length,140)) for _ in range( first_pos_label-(len(slice_labels)-20))] gram_slices_sbf[song] = slices elif first_pos_label < 20: slices = slices_concat[:first_pos_label+20] slices = [np.zeros((slice_length,140)) for _ in range(20 - first_pos_label)] + slices gram_slices_sbf[song] = slices else: gram_slices_sbf[song] = slices_concat[first_pos_label-20:first_pos_label+20] del tm_train_input import gc gc.collect() sbf_fixed_data_train_input = np.stack([np.stack( gram_slices_sbf[song]) for song in train_set]).astype('float32') sbf_fixed_data_vad_input = np.stack([np.stack( gram_slices_sbf[song]) for song in vad_set]).astype('float32') sbf_fixed_data_test_input = np.stack([np.stack( gram_slices_sbf[song]) for song in test_set]).astype('float32') def generate_sbf_training_data(batch_inds): """Generates a training batch for input into the outro Start Bar Finder model by randomly sampling how much the correct target label should be shifted""" batch_size = len(batch_inds) fixed_batch_input = sbf_fixed_data_train_input[batch_inds] batch_targets = random.choices(range(8),k=batch_size) batch_input = np.stack( [fixed_batch_input[i][8-batch_targets[i]:40-batch_targets[i]] \ for i in range(batch_size)]) return tf.constant(batch_input,dtype = tf.float64),\ tf.constant(batch_targets,dtype = tf.int64)We also build a fixed validation set for evaluation during training.sbf_vad_targets = random.choices(range(8),k=len(vad_set)) sbf_vad_input = np.stack( [sbf_fixed_data_vad_input[i][8-sbf_vad_targets[i]:40-sbf_vad_targets[i]]\ for i in range(len(vad_set))]) sbf_test_targets = random.choices(range(8),k=len(test_set)) sbf_test_input = np.stack( [sbf_fixed_data_test_input[i][8-sbf_test_targets[i]:40-sbf_test_targets[i]]\ for i in range(len(test_set))])Model Definitionsbf_seq_len = 32 sbf_gram_in = Input((slice_length,140),name = 'sbf_analysis_in') sbf_conv_bar_c = Conv1D(filters = 16,kernel_size = 11,activation = 'relu',strides = 3) sbf_pool_bar_c = MaxPooling1D(pool_size = 2,strides = 2) sbf_bar_out_c = BatchNormalization()(sbf_pool_bar_c(sbf_conv_bar_c(sbf_gram_in))) sbf_conv_bar_2_c = Conv1D(filters = 8,kernel_size = 2,activation = 'relu',strides = 2) sbf_pool_bar_2_c = MaxPooling1D(pool_size = 1,strides =1) sbf_bar_out_2_c = BatchNormalization()(sbf_pool_bar_2_c(sbf_conv_bar_2_c(sbf_bar_out_c))) sbf_bar_out_c_flat = Flatten()(sbf_bar_out_2_c) sbf_gram_model = Model(sbf_gram_in,sbf_bar_out_c_flat) sbf_gram_input = Input((sbf_seq_len,slice_length,140)) sbf_gram_flat = Lambda(lambda x: K.reshape(x,(-1,slice_length,140)))(sbf_gram_input) sbf_conv = sbf_gram_model(sbf_gram_flat) sbf_conv_seq = Lambda(lambda x: K.reshape(x,(-1,sbf_seq_len,sbf_conv.shape[-1])))(sbf_conv) sbf_conv_dense = Dense(64,activation='tanh')(Dropout(rate=0.6)(sbf_conv_seq)) sbf_conv_dense_2 = Dense(32,activation='tanh')(Dropout(rate=0.6)(sbf_conv_dense)) sbf_lstm_out = Bidirectional( LSTM(32,return_sequences=True,recurrent_dropout = 0.5,dropout=0.5))(sbf_conv_dense_2) sbf_dense_1 = Dense(32,activation='tanh')(Dropout(rate=0.45)(sbf_lstm_out)) sbf_zeros = Lambda(lambda x: K.zeros_like(x)[:,:8])(sbf_dense_1) sbf_dense_1_left = Lambda(lambda x: x[:,:-8])(sbf_dense_1) sbf_dense_1_right = Lambda(lambda x: x[:,8:])(sbf_dense_1) sbf_left_attention = Concatenate(axis=1)([sbf_zeros,sbf_dense_1_right]) sbf_right_attention = Concatenate(axis=1)([sbf_dense_1_left,sbf_zeros]) sbf_dense_1_attention = Concatenate(axis=-1)([sbf_left_attention,sbf_lstm_out,sbf_right_attention]) sbf_dense_1_attention_trimmed = Lambda(lambda x: x[:,12:-12])(sbf_dense_1_attention) sbf_dense_2 = Dense(24,activation='tanh')(Dropout(rate=0.5)(sbf_dense_1_attention_trimmed)) sbf_dense_3 = Dense(8,activation='tanh')(Dropout(rate=0.4)(sbf_dense_2)) sbf_out = Dense(1)(Dropout(rate=0.3)(sbf_dense_3)) sbf_out_soft = Activation('softmax')(Lambda(lambda x: x[:,:,0])(sbf_out)) sbf_final_model = Model(sbf_gram_input,sbf_out_soft)Model TrainingWe now build a custom training loop to implement our batch sampling method. We will also need to implement a custom Early Stopping callback to find optimal performance on the validation set.@tf.function def train_step(model,inputs,targets): """tf.function for applying gradient updates to the model. Args: model: Keras model to update inputs: Model inputs used to calculate losses for gradient descent Returns: List of model's losses""" with tf.GradientTape() as tape: pred = model(inputs, training=True) loss_value = tf.keras.losses.SparseCategoricalCrossentropy()( targets,pred) grads = tape.gradient(loss_value, model.trainable_variables) adam_opt.apply_gradients(zip(grads, model.trainable_variables)) acc_metric.update_state(targets,pred) return loss_value sbf_final_model = load_model('outro_start_bar_finder_v3') adam_opt = tf.keras.optimizers.Adam(lr = 5e-4) sbf_final_model.compile(optimizer = adam_opt, loss = None) def run_sbf_epoch(batch_inds_lst,batch_size): """Runs a single training epoch for the Outro Start Bar Finder model, keeping track of training loss and accuracy. Args: batch_inds_lst: Shuffled list of training set indices to be split into batches. batch_size: Batch size to use for training. Returns: Training/validation accuracy and loss """ batches = [batch_inds_lst[i*batch_size:(i+1)*batch_size] for i in range(num_batches)] epoch_losses = [] for batch in batches: batch_input,batch_target = generate_sbf_training_data(batch) loss_value = train_step(sbf_final_model,batch_input,batch_target) epoch_losses.append(loss_value.numpy()) train_acc = acc_metric.result().numpy() acc_metric.reset_states() training_info.append((np.mean(epoch_losses),train_acc)) vad_pred = sbf_final_model(sbf_vad_input) vad_loss = tf.keras.losses.SparseCategoricalCrossentropy()( sbf_vad_targets,vad_pred) acc_metric.update_state(sbf_vad_targets,vad_pred) vad_acc = acc_metric.result().numpy() acc_metric.reset_states() vad_info.append((vad_loss.numpy(),vad_acc)) return training_info[-1],vad_info[-1] num_epochs = 600 patience = 75 patience_counter = 0 batch_size = 32 num_training_examples = len(train_set) num_batches = num_training_examples//batch_size acc_metric = tf.keras.metrics.SparseCategoricalAccuracy() training_info = [] vad_info = [] best_vad_acc = 0 best_vad_loss = 10000 for epoch in range(num_epochs): start = time.time() batch_inds_lst = list(range(num_training_examples)) batch_inds_lst = random.sample(batch_inds_lst,num_training_examples) train_tup,vad_tup = run_sbf_epoch(batch_inds_lst,batch_size) if epoch == 300: weights_300 = sbf_final_model.get_weights() if epoch>=300: vad_loss = vad_tup[0] if vad_loss <= best_vad_loss: best_weights = sbf_final_model.get_weights() best_epoch = epoch+1 best_train_info = training_info[-1] patience_counter = 0 best_vad_loss = vad_loss else: patience_counter += 1 if patience_counter > patience: break print(epoch+1,np.round(time.time()-start,3),training_info[-1],vad_info[-1]) sbf_final_model.set_weights(best_weights)We can examine the performance on our fixed validation and test set data:sbf_final_model.compile(optimizer = adam_opt, loss = 'sparse_categorical_crossentropy', metrics = 'sparse_categorical_accuracy') sbf_final_model.evaluate(sbf_vad_input,np.stack(sbf_vad_targets)) sbf_final_model.evaluate(sbf_test_input,np.stack(sbf_test_targets))4/4 [==============================] - 0s 26ms/step - loss: 1.1707 - sparse_categorical_accuracy: 0.5100Full Outro ModelWe can now combine the two models trained above into an end-to-end process which, given beat and downbeat timestamps alongside chromagram and spectrogram data, can label transition points in the outro of a song. We will construct this process and evaluate overall performance on the test set. First, the transition timing model is used to find the approximate starting point of the transition. Then, if the song was predicted by the Introduction models to have its first downbeat on the first beat of the song, we will calculate the BPM and use it to construct a phrase grid which can be used to identify exact phrase points in the outro. For the remaining songs, the starting point is narrowed down to a single bar by the Start Bar Finder. The madmom downbeat prediction is then used to determine the exact starting beat, and subsequent transition points are identified by 32 beat jumps along with the output of the timing model.tm_test_pred = tm_final_model.predict(tm_test_input)We first identify the point in the transition timing model output where the start prediction goes above a certain threshold value. This will indicate approximately where the transition should begin, for use with the Start Bar Finder. We also identify the distance from this point to the correct starting point of the transition.first_pred_inds = {} first_pred_diffs = [] for j,song in enumerate(test_set): tm_start_pred = tm_test_pred[j][:,0] tm_labels = timing_model_labels[song] #Since some songs might have low probability predictions, #we need to account for these cases when finding the start point try: first_pred = [i for i in range(65) if tm_start_pred[i] >= 0.65][0] except: try: first_pred = [i for i in range(65) if tm_start_pred[i] >= 0.55][0] except: try: first_pred = [i for i in range(65) if tm_start_pred[i] >= 0.45][0] except: try: first_pred = [i for i in range(65) if tm_start_pred[i] >= 0.3][0] except: first_pred = [i for i in range(65) if tm_start_pred[i] >= 0.15][0] first_pred_inds[song] = first_pred pos_labels = [i for i in range(len(tm_labels)) if np.sum(tm_labels[i])>0] next_label_diff = [i-first_pred for i in pos_labels if i>=first_pred][0] first_pred_diffs.append(next_label_diff)We then build the input arrays for the Start Bar Finder using these points, and calculate the output.gram_slices_sbf_pred = {} for song in test_set: slice_labels = timing_model_labels[song] first_pos_label = first_pred_inds[song] slice_tuples = list(zip(*gram_slices_tp[song])) slices_concat = [np.concatenate( tup,axis=-1).astype('float32') for tup in slice_tuples][-65:] while len(slices_concat) < 65: slices_concat.append(np.zeros((slice_length,140))) if first_pos_label >= len(slice_labels) - 20: slices = slices_concat[first_pos_label-20:] slices += [np.zeros((slice_length,140)) for _ in range( first_pos_label-(len(slice_labels)-20))] gram_slices_sbf_pred[song] = slices[8:] elif first_pos_label < 20: slices = slices_concat[:first_pos_label+20] slices = [np.zeros((slice_length,140)) for _ in range( 20 - first_pos_label)] + slices gram_slices_sbf_pred[song] = slices[8:] else: gram_slices_sbf_pred[song] = slices_concat[first_pos_label-20:first_pos_label+20][8:] sbf_test_pred_input = np.stack([np.stack( gram_slices_sbf_pred[song]) for song in test_set]).astype('float32') sbf_test_pred = sbf_final_model.predict(sbf_test_pred_input) sbf_test_pred_ind = np.argmax(sbf_test_pred,axis=-1)Next we begin the process of generating labels for each song. We first import the timestamp of the first downbeat prediction by the Introduction models for each song in our test set.with open('first_downbeat_predictions.pkl','rb') as f: first_downbeat_pred = pickle.load(f) def get_first_label_timestamp(song): """Utility function which calculates the timestamp of the first label in the outro.""" ohs = ohs_dict[song].iloc[:,[0,-2,-1]] return ohs[(ohs['Outgoing Start']+ohs['Outgoing End'])>=1].values[0,0]Now we need to calculate the BPM of each song to construct our phrase grids. We do this by calculating the BPM across the song in 32 beat slices and taking the mode, in order to account for small irregularities in beat structure in the madmom beat prediction.def get_bpm(song): """Calculates the BPM of a song by taking the mode across the 32 beat slices""" def get_bpm_slce(song,slce): """Calculates the BPM of a single 32 beat slice of a song using linear regression. Args: when_beats: beat timestamps slce: slice index Returns: BPM rounded to one decimal place """ when_beats = labels_dict[song].iloc[:,0].apply(float).values[32*slce:32*(slce+1)] m_res = stats.linregress(np.arange(len(when_beats)),when_beats) beat_step = m_res.slope return np.round(60/beat_step,decimals = 1) num_slce = labels_dict[song].shape[0]//32 slce_bpms = [get_bpm_slce(song,i) for i in range(num_slce)] mode = stats.mode(slce_bpms)[0][0] if slce_bpms.count(mode) == slce_bpms.count(np.round(mode)): return np.round(mode) else: return modeWe can then build our phrase grids by incrementing in 32-beat jumps, multiplying by the beat length in seconds to determine phrase timestamps. We see that 81 of our 100 songs have been predicted by the Introduction models as having their first downbeat on the first beat. Furthermore, 54 of these 81 songs have their first label in the outro exactly on 32-beat phrase based on the calculated phrase grid.downbeat_grids = {} phrase_grids = {} bpms = {} good_beat = [] first_beat_songs = [] for i,song in enumerate(list(test_set)): tags = labels_dict[song] bpm = get_bpm(song) beat_len = 60/bpm bpms[song] = bpm first_beat = tags.values[0,0] last_beat = tags.values[-1,0] starting_time = first_downbeat_pred[song] phrase_grid = [starting_time + 32*i*beat_len for i in range(-500,500)] phrase_grid = [x for x in phrase_grid if x>=first_beat and x<=last_beat] downbeat_grid = [starting_time + 4*i*beat_len for i in range(-500,500)] downbeat_grid = [x for x in downbeat_grid if x>=first_beat and x<=last_beat] if starting_time == first_beat: first_beat_songs.append(song) first_outro_label_timestamp = get_first_label_timestamp(song) if min([abs(x-first_outro_label_timestamp) for x in phrase_grid])Number of songs with first downbeat on first beat in Intro: 81 Number of first beat songs with exact phrase match on outro label: 54We are now ready to extract the final labels and evaluate overall performance.def get_labels(prob_pair,threshold = 0.4): """Generates labels based on the transition timing model output at a single timestep. Args: prob_pair: tuple containing the (start prob,end prob) predicted by the model threshold: Threshold probability at which a label will be generated Returns: Label of either Start, End, Start/End, or nan """ start_prob,end_prob = prob_pair if start_prob > threshold: if end_prob > threshold: return 'Start/End' else: return 'Start' elif end_prob > threshold: return 'End' else: return np.nan def get_nearest_slice_time_inds(downbeats,slice_times): """Determines the index of the nearest slice to a list of downbeat/phrase times. Args: downbeats: List of downbeat/phrase timestamps. slice_times: List of model input slice timestamps Returns: List of indices of same length of downbeats, containing index of nearest slice to each downbeat """ nearest_slice_time_inds = [] for downbeat in downbeats: nearest_slice_time_ind = np.argmin([abs(downbeat-x) for x in slice_times]) nearest_slice_time_inds.append(nearest_slice_time_ind) return nearest_slice_time_indsWe extract the timing of transition points and generate the relevant labels for each song in the test set. As mentioned above, for songs where the Introduction models predict that the first downbeat is on the first beat, we use the phrase grid to determine phrase timings in the outro; otherwise, we use the Start Bar Finder.test_song_pred_info = {} for i,song in enumerate(test_set): slice_times = gram_slice_times[song] timing_model_probs = tm_test_pred[i] first_pred_ind = first_pred_inds[song] tags = labels_dict[song] if len(slice_times)>=64: outro_slice_times = slice_times[-64:] else: outro_slice_times = slice_times first_outro_slice_time = outro_slice_times[0] if song in first_beat_songs: phrases = [x for x in phrase_grids[song] if x-first_outro_slice_time > -1] first_nearest_slice_time_ind = get_nearest_slice_time_inds(phrases,outro_slice_times)[0] first_nearest_slice_time = outro_slice_times[first_nearest_slice_time_ind] try: nearest_phrase_time = [x for x in phrases if x>outro_slice_times[first_pred_ind-1]][0] except: nearest_phrase_time = [x for x in phrases if x>outro_slice_times[first_pred_ind]][0] phrase_bar_inds = [first_nearest_slice_time_ind+8*j for j in range(-8,8)] phrase_bar_inds = [x for x in phrase_bar_inds if x < 64 and x>=0] nearest_beat_ind = np.argmin([abs(nearest_phrase_time-x) for x in tags.values[:,0]]) phrase_beat_inds = [nearest_beat_ind+j*32 for j in range(-8,8)] phrase_beat_inds = [x for x in phrase_beat_inds if x=0] phrase_beat_inds = [start_bar_downbeat_index + j*32 for j in range(-8,8)] phrase_beat_inds = [x for x in phrase_beat_inds if xWe can take a look at the resulting labels for an example song, and join on the original labels to compare.song = "Redlight - Sports Mode" test_song_pred_info[song] tags_label = labels_dict[song].loc[:,['Beat Timestamp','Outro Label']] tags_label = tags_label[tags_label['Outro Label'].apply(lambda x: x in ['Start'])] test_song_pred_info[song].merge(labels_dict[song].loc[:,['Beat Timestamp','Outro Label']].dropna(), on = 'Beat Timestamp',how='outer').sort_values('Beat Timestamp')As we did in the [Introduction Models notebook](2.%20Introduction%20Transition%20Models.ipynb), we can now evaluate performance across the full test set, looking at songs where the labelling is exactly correct along with the downbeat and number of labels. Since the starting point in this case isn't fixed, to evaluate the downbeat we check if there is any overlap between the timestamps predicted and the ones which were manually labelled.downbeat_right = [] downbeat_diffs = [] length_right = [] length_diffs = [] exact = [] first_downbeats = {} for song in test_set: tags_label = labels_dict[song].loc[:,['Beat Timestamp','Outro Label']] tags_label = tags_label.dropna().reset_index(drop = True) pred_df = test_song_pred_info[song].dropna().reset_index(drop = True) pred_df.columns = ['Beat Timestamp','Outro Label'] first_downbeats[song] = pred_df.values[0,0] if pred_df.shape[0]>0: if len(set(tags_label.values[:,0]).intersection(set(pred_df.values[:,0])))>0: downbeat_right.append(song) else: downbeat_diffs.append(tags_label.values[0,0] - pred_df.values[0,0]) if tags_label.shape[0] == pred_df.shape[0]: length_right.append(song) if tags_label.equals(pred_df): exact.append(song) #We will relax the exact check slightly by allowing 'Start/End' to be #equal to 'Start' or 'End' elif tags_label.replace('Start/End','Start').equals(pred_df.replace('Start/End','Start')): exact.append(song) elif tags_label.replace('Start/End','End').equals(pred_df.replace('Start/End','End')): exact.append(song) else: length_diffs.append((song,tags_label.shape[0] - pred_df.shape[0])) print('Number of songs with downbeat prediction correct:',len(downbeat_right)) print('Number of songs with same number of transition points:',len(length_right)) print('Number of songs which are exactly correct:', len(exact))Number of songs with downbeat prediction correct: 63 Number of songs with same number of transition points: 66 Number of songs which are exactly correct: 42Similarly to the Introduction models, the downbeat prediction is the key part of the prediction, as if it is incorrect then the timing of the transitions will be either off-downbeat or off-phrase. However, if this is correct, then there is some subjectivity to the timing of the transition. A manual review of the 21 songs which had the correct downbeat but were not an exact match found that 15 of them had appropriate transitions, with the inference of some missing labels based on simple rules (as explained in the [Introduction Models notebook](2.%20Introduction%20Transition%20Models.ipynb). Two examples of these are below.ex_song_1 = 'Motez - Roll Out (Benson Remix)' print('Predicted Label:') print(test_song_pred_info[ex_song_1].dropna().reset_index(drop=True)) print('\nManual Label:') print(labels_dict[ex_song_1].loc[:,['Beat Timestamp','Outro Label']]\ .dropna().reset_index(drop=True)) ex_song_2 = 'Green Velvet & Mauro Venti - Share Now' print('Predicted Label:') print(test_song_pred_info[ex_song_2].dropna().reset_index(drop=True)) print('\nManual Label:') print(labels_dict[ex_song_2].loc[:,['Beat Timestamp','Outro Label']]\ .dropna().reset_index(drop=True))Predicted Label: Beat Timestamp Predicted Outro Label 0 377.96 End 1 393.08 End 2 408.20 End 3 423.34 End Manual Label: Beat Timestamp Outro Label 0 362.85 Start 1 377.96 End 2 393.08 End 3 408.20 End 4 423.34 EndLesson 7 Matrix Factorization for Recommendations Questions still opened How to know that our recommendation is a good one? Training and Testing Data For Recommendations**prior** to implement our recommendation $\rightarrow$ look for a metric of our interestmetric x time In Udacity text:"In the last lesson, you were making recommendations by providing a list of popular items, or a list of items that the user hadn't observed but that someone with similar tastes had observed. However, understanding if these recommendations are good in practice means that you have to deploy these recommendations to users and see how it impacts your metrics (sales, higher engagement, clicks, conversions, etc.).You may not want your recommendations to go live to understand how well they work. In these cases, you will want to split your data into training and testing portions. In these cases, you can train your recommendation engine on a subset of the data, then you can test how well your recommendation engine performs on a test set of data before deploying your model to the world.However, the cases you saw in the last lesson, where just a list of recommendations was provided, don't actually lend themselves very well to training and testing methods of evaluation. In the next upcoming pages, you will be introduced to matrix factorization, which actually does work quite well for these situations." Why to use SVD? Singular Value DecompositionSimon Funk $\rightarrow$ FunkSVD In Udacity text:"Singular Value Decomposition - If we let AA be our user-item matrix, we can write the decomposition of that matrix in the following way:"$A = U \Sigma V^T$Is a **Matrix Factorization** + **no** missing values (positive or negative)$U$ $\rightarrow$ User ($n$) $\times$ Latent Factor ($k$) $\leftarrow$ how users are sensible to latent factors$V^T$ $\rightarrow$ Latent Factor ($k$) $\times$ Movie ($m$) $\leftarrow$ how movies can provide some latent factors$\Sigma$ is a $k \times k$ largest to smallest diagonal matrix $\leftarrow$ how much each latent factor weights to predict the rating (some were dropped down, as we restrict the size of $k$) In Udacity text:"In the next part of this lesson, you will first get exposure to Singular Value Decomposition, or SVD. We will soon see why this technique falls short for many recommendation problems. However, understanding traditional SVD approaches to matrix factorization is useful as a start to a number of matrix factorization techniques that are possible in practice.In order to implement SVD for many recommendation engines, we will need to use a slightly modified approach known as FunkSVD. This approach proved to work incredibly well during the Netflix competition, and therefore, it is one of the most popular recommendation approaches in use today." Latent Factorsis **not** a feature observed in your data (a movie is about robot-love, or kinky-sadism) $\rightarrow$ there is a trigger between user-movie In Udacity text:"When performing SVD, we create a matrix of users by items (or customers by movies in our specific example), with user ratings for each item scattered throughout the matrix. An example is shown in the image below.![latent factors](graphs\latent.png)You can see that this matrix doesn't have any specific information about the users or items. Rather, it just holds the ratings that each user gave to each item. Using SVD on this matrix, we can find latent features related to the movies and customers. This is amazing because the dataset doesn't contain any information about the customers or movies!" --- How to use Machine Learning to make recommendations?split data in Train (**fit**) x Test (**evaluate**) Validating Your Recommendationsold data $\rightarrow$ fitnew data $\rightarrow$ evaluate**SVD** techniques can provide a **rating** for every user x item pair (you predicted an **8** and it was an **8**)Metrics lie **MSE** (Mean Squared Error) or **MAE** (Mean Absolute Error) Online Testing In Udacity text:"For online methods of testing a recommender's performance, many of the methods you saw in the previous lesson work very well - you can deploy your recommendations and just watch your metrics carefully. It is common in practice to set up online recommendations to have an "old" version of recommended items, which is compared to a new page that uses a new recommendation strategy.All ideas associated with A/B testing that you learned in the earlier lessons are critical to watching your metrics in online learning, and ultimately, choosing a recommendation strategy that works best for your products and customers." Offline Testing In Udacity text:"In many cases, a company might not let you simply deploy your recommendations out into the real world any time you feel like it. Testing out your recommendations in a training-testing environment prior to deploying them is called offline testing.The recommendation methods you built in the previous lesson actually don't work very well for offline testing. In offline testing, it is ideal to not just obtain a list of recommendations for each individual, because we ultimately don't know if a user doesn't use an item because they don't like it, or because they just haven't used it yet (but would like it). Rather, it would be great if we have an idea of how much each user would like each item using a predicted rating. Then we can compare this predicted rating to the actual rating any individual gives to an item in the future.In the previous video, you saw an example of a user to whom we gave a list of movies that they still hadn't seen. Therefore, we couldn't tell how well we were doing with our recommendations. Techniques related to matrix factorization lend themselves nicely to solving this problem." User Groups In Udacity text:"The final (possible) method of validating your recommendations is by having user groups give feedback on items you would recommend for them. Obtaining good user groups that are representative of your customers can be a challenge on its own. This is especially true when you have a lot of products and a very large consumer base." --- How to make recommendations to individuals that are new in the platform?the **Cold Start Problem** --- Intro to SVD First Notebook - L10 - Singular Value Decomposition Singular Value DecompositionSo far in this lesson, you have gained some exposure to Singular Value Decomposition. In this notebook, you will get some hands on practice with this technique.Let's get started by reading in our libraries and setting up the data we will be using throughout this notebook`1.` Run the cell below to create the **user_movie_subset** dataframe. This will be the dataframe you will be using for the first part of this notebook.**Note: Unstacking the matrix here could take ~10 mins to run.**from google.colab import drive drive.mount('/content/gdrive') #from pathlib import Path #patmy = Path('/content/gdrive/MyDrive/GoogleColab/DataScientistStudies3') %ls /content/gdrive/MyDrive/GoogleColab/DataScientistStudies3 !cp /content/gdrive/MyDrive/GoogleColab/DataScientistStudies3/svd_tests.py . !cp /content/gdrive/MyDrive/GoogleColab/DataScientistStudies3/udacourse3.py . #reloader for our functions library from importlib import reload import udacourse3 udacourse3 = reload(udacourse3) import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import pickle import svd_tests as t from time import time from scipy import sparse import udacourse3/usr/local/lib/python3.7/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead. import pandas.util.testing as tmYou need to reallocate the maximum pagination (at Advanced Options on Windows) to at least **16Gb** and restart your computer before running this!%ls /content/gdrive/MyDrive/GoogleColab/DataScientistStudies3/data !cp /content/gdrive/MyDrive/GoogleColab/DataScientistStudies3/data/movies_clean.csv . !cp /content/gdrive/MyDrive/GoogleColab/DataScientistStudies3/data/reviews_clean.csv . # Read in the datasets movie = udacourse3.fn_read_data('movies_clean.csv', remove_noisy_cols=True) review = udacourse3.fn_read_data('reviews_clean.csv', remove_noisy_cols=True)--- For specific notebook# Create user-by-item matrix begin=time() user_item = review[['user_id', 'movie_id', 'rating']] user_by_movie = user_item.groupby(['user_id', 'movie_id'])['rating'].max().unstack() user_movie_subset = user_by_movie[[73486, 75314, 68646, 99685]].dropna(axis=0) end=time() print('time spent:', end-begin) user_movie_subset.head(2)time spent: -10.658522605895996`2.` Now that you have the **user_movie_subset** matrix, use this matrix to correctly match each key to the correct value in the dictionary below. Use the cells below the dictionary as necessary.# match each letter to the best statement in the dictionary below - each will be used at most once a = 20 b = 68646 c = 'The Godfather' d = 'Goodfellas' e = 265 f = 30685 g = 4 sol_1_dict = { 'the number of users in the user_movie_subset': a, 'the number of movies in the user_movie_subset': g, 'the user_id with the highest average ratings given': e, 'the movie_id with the highest average ratings received': b, 'the name of the movie that received the highest average rating': c } #test dictionary here t.test1(sol_1_dict) # Cell for work # user with the highest average rating print(user_movie_subset.mean(axis=1)) # movie with highest average rating print(user_movie_subset.mean(axis=0)) # list of movie names for movie_id in [73486, 75314, 68646, 99685]: print(movie[movie['movie_id'] == movie_id]['movie']) # users by movies user_movie_subset.shapeuser_id 265 10.00 1023 8.25 1683 8.00 6571 9.25 11639 8.25 13006 6.50 14076 9.00 14725 8.00 23548 8.25 24760 7.50 28713 8.75 30685 9.50 34110 9.25 34430 6.50 35150 9.50 43294 9.50 46849 8.25 50556 7.25 51382 7.75 51410 8.00 dtype: float64 movie_id 73486 8.60 75314 7.35 68646 9.00 99685 8.50 dtype: float64 4187 One Flew Over the Cuckoo's Nest (1975) Name: movie, dtype: object 4361 Taxi Driver (1976) Name: movie, dtype: object 3706 The Godfather (1972) Name: movie, dtype: object 6917 Goodfellas (1990) Name: movie, dtype: objectow that you have a little more context about the matrix we will be performing Singular Value Decomposition on, we're going to do just that. To get started, let's remind ourselves about the dimensions of each of the matrices we are going to get back. Essentially, we are going to split the **user_movie_subset** matrix into three matrices:$$ U \Sigma V^T $$`3.` Given what you learned about in the previous parts of this lesson, provide the dimensions for each of the matrices specified above using the dictionary below.# match each letter in the dictionary below - a letter may appear more than once. a = 'a number that you can choose as the number of latent features to keep' b = 'the number of users' c = 'the number of movies' d = 'the sum of the number of users and movies' e = 'the product of the number of users and movies' sol_2_dict = { 'the number of rows in the U matrix': b, 'the number of columns in the U matrix': a, 'the number of rows in the V transpose matrix': a, 'the number of columns in the V transpose matrix': c } #test dictionary here t.test2(sol_2_dict)That's right! We will now put this to use, so you can see how the dot product of these matrices come together to create our user item matrix. The number of latent features will control the sigma matrix as well, and this will a square matrix that will at most be the minimum of the number of users and number of movies (in our case the minimum is the 4 movies).Now let's verify the above dimensions by performing SVD on our user-movie matrix.`4.` Below you can find the code used to perform SVD in numpy. You can see more about this functionality in the [documentation here](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.linalg.svd.html). What do you notice about the shapes of your matrices? If you try to take the dot product of the three objects you get back, can you directly do this to get back the user-movie matrix?u, s, vt = np.linalg.svd(user_movie_subset) s.shape, u.shape, vt.shape # Run this cell for our thoughts on the questions posted above t.question4thoughts()Looking at the dimensions of the three returned objects, we can see the following: 1. The u matrix is a square matrix with the number of rows and columns equaling the number of users. 2. The v transpose matrix is also a square matrix with the number of rows and columns equaling the number of items. 3. The sigma matrix is actually returned as just an array with 4 values, but should be a diagonal matrix. Numpy has a diag method to help with this. In order to set up the matrices in a way that they can be multiplied together, we have a few steps to perform: 1. Turn sigma into a square matrix with the number of latent features we would like to keep. 2. Change the columns of u and the rows of v transpose to match this number of dimensions. If we would like to exactly re-create the user-movie matrix, we could choose to keep all of the latent features.`5.` Use the thoughts from the above question to create u, s, and vt with four latent features. When you have all three matrices created correctly, run the test below to show that the dot product of the three matrices creates the original user-movie matrix. The matrices should have the following dimensions:$$ U_{n x k} $$$$\Sigma_{k x k} $$$$V^T_{k x m} $$where:1. n is the number of users2. k is the number of latent features to keep (4 for this case)3. m is the number of movies# Change the dimensions of u, s, and vt as necessary to use four latent features # update the shape of u and store in u_new u_new = u[:, :len(s)] # update the shape of s and store in s_new s_new = np.zeros((len(s), len(s))) s_new[:len(s), :len(s)] = np.diag(s) # Because we are using 4 latent features and there are only 4 movies, # vt and vt_new are the same vt_new = vt # Check your matrices against the solution assert u_new.shape == (20, 4),\ "Oops! The shape of the u matrix doesn't look right. It should be 20 by 4." assert s_new.shape == (4, 4),\ "Oops! The shape of the sigma matrix doesn't look right. It should be 4 x 4." assert vt_new.shape == (4, 4),\ "Oops! The shape of the v transpose matrix doesn't look right. It should be 4 x 4." assert np.allclose(np.dot(np.dot(u_new, s_new), vt_new), user_movie_subset),\ "Oops! Something went wrong with the dot product. Your result didn't reproduce the original movie_user matrix." print("That's right! The dimensions of u should be 20 x 4, and both v transpose and sigma should be 4 x 4") print("The dot product of the three matrices how equals the original user-movie matrix!")That's right! The dimensions of u should be 20 x 4, and both v transpose and sigma should be 4 x 4 The dot product of the three matrices how equals the original user-movie matrix!It turns out that the sigma matrix can actually tell us how much of the original variability in the user-movie matrix is captured by each latent feature. The total amount of variability to be explained is the sum of the squared diagonal elements. The amount of variability explained by the first componenet is the square of the first value in the diagonal. The amount of variability explained by the second componenet is the square of the second value in the diagonal. `6.` Using the above information, can you determine the amount of variability in the original user-movie matrix that can be explained by only using the first two components? Use the cell below for your work, and then test your answer against the solution with the following cell.total_var = np.sum(s**2) var_exp_comp1_and_comp2 = s[0]**2 + s[1]**2 perc_exp = round(var_exp_comp1_and_comp2/total_var*100, 2) print("The total variance in the original matrix is {}.".format(total_var)) print("Ther percentage of variability captured by the first two components is {}%.".format(perc_exp)) assert np.round(perc_exp, 2) == 98.55,\ "Oops! That doesn't look quite right.You should have total variability as the sum of all the squared elements in the sigma matrix. Then just the sum of the squared first two elements is the amount explained by the first two latent features. Try again." print("Yup! That all looks good!")Yup! That all looks good!`7.` Similar to in the previous question, change the shapes of your u, sigma, and v transpose matrices. However, this time consider only using the first 2 components to reproduce the user-movie matrix instead of all 4. After you have your matrices set up, check your matrices against the solution by running the tests. The matrices should have the following dimensions:$$ U_{n x k} $$$$\Sigma_{k x k} $$$$V^T_{k x m} $$where:1. n is the number of users2. k is the number of latent features to keep (2 for this case)3. m is the number of movies# Change the dimensions of u, s, and vt as necessary to use four latent features # update the shape of u and store in u_new k = 2 u_2 = u[:, :k] # update the shape of s and store in s_new s_2 = np.zeros((k, k)) s_2[:k, :k] = np.diag(s[:k]) # Because we are using 2 latent features, we need to update vt this time vt_2 = vt[:k, :] # Check that your matrices are the correct shapes assert u_2.shape == (20, 2),\ "Oops! The shape of the u matrix doesn't look right. It should be 20 by 2." assert s_2.shape == (2, 2),\ "Oops! The shape of the sigma matrix doesn't look right. It should be 2 x 2." assert vt_2.shape == (2, 4),\ "Oops! The shape of the v transpose matrix doesn't look right. It should be 2 x 4." print("That's right! The dimensions of u should be 20 x 2, sigma should be 2 x 2, and v transpose should be 2 x 4") print("The question is now that we don't have all of the latent features, how well can we really re-create") print("the original user-movie matrix?")That's right! The dimensions of u should be 20 x 2, sigma should be 2 x 2, and v transpose should be 2 x 4 The question is now that we don't have all of the latent features, how well can we really re-create the original user-movie matrix?`8.` When using all 4 latent features, we saw that we could exactly reproduce the user-movie matrix. Now that we only have 2 latent features, we might measure how well we are able to reproduce the original matrix by looking at the sum of squared errors from each rating produced by taking the dot product as compared to the actual rating. Find the sum of squared error based on only the two latent features, and use the following cell to test against the solution.# Compute the dot product pred_ratings = np.dot(np.dot(u_2, s_2), vt_2) # Compute the squared error for each predicted vs. actual rating sum_square_errs = np.sum(np.sum((user_movie_subset - pred_ratings)**2)) # Check against the solution assert np.round(sum_square_errs, 2) == 85.34, "Oops! That doesn't look quite right. You should return a single number for the whole matrix." print("That looks right! Nice job!")That looks right! Nice job!At this point, you may be thinking... why would we want to choose a k that doesn't just give us back the full user-movie matrix with all the original ratings. This is a good question. One reason might be for computational reasons - sure, you may want to reduce the dimensionality of the data you are keeping, but really this isn't the main reason we would want to perform reduce k to lesser than the minimum of the number of movies or users.Let's take a step back for a second. In this example we just went through, your matrix was very clean. That is, for every user-movie combination, we had a rating. **There were no missing values.** But what we know from the previous lesson is that the user-movie matrix is full of missing values. A matrix similar to the one we just performed SVD on:The real world:Therefore, if we keep all k latent features it is likely that latent features with smaller values in the sigma matrix will explain variability that is probably due to noise and not signal. Furthermore, if we use these "noisey" latent features to assist in re-constructing the original user-movie matrix it will potentially (and likely) lead to worse ratings than if we only have latent features associated with signal. `9.` Let's try introducing just a little of the real world into this example by performing SVD on a matrix with missing values. Below I have added a new user to our matrix who hasn't rated all four of our movies. Try performing SVD on the new matrix. What happens? A message will appear as: `LinAlgError: SVD did not converge`# This line adds one nan value as the very first entry in our matrix user_movie_subset.iloc[0, 0] = np.nan # Try svd with this new matrix u, s, vt = np.linalg.svd(user_movie_subset)**Even with just one nan value we cannot perform SVD! This is going to be a huge problem, because our real dataset has nan values everywhere! This is where FunkSVD comes in to help.** --- Intro to SVD Second Notebook - L15 - Implementing FunkSVD Udacity notes:"In this notebook we will take a look at writing our own function that performs FunkSVD, which will follow the steps you saw in the previous video. If you find that you aren't ready to tackle this task on your own, feel free to skip to the following video where you can watch as I walk through the steps."To test our algorithm, we will run it on the subset of the data you worked with earlier. Run the cell below to get started.begin=time() #Read in the datasets movie = udacourse3.fn_read_data('movies_clean.csv', remove_noisy_cols=True) review = udacourse3.fn_read_data('reviews_clean.csv', remove_noisy_cols=True) # Create user-by-item matrix user_items = review[['user_id', 'movie_id', 'rating', 'timestamp']] user_by_movie = user_items.groupby(['user_id', 'movie_id'])['rating'].max().unstack() # Create data subset user_movie_subset = user_by_movie[[73486, 75314, 68646, 99685]].dropna(axis=0) rating_mat = np.matrix(user_movie_subset) end=time() print('time spent:', end-begin) rating_mattime spent: 7.243285894393921`1.` You will use the **user_movie_subset** matrix to show that your FunkSVD algorithm will converge. In the below cell, use the comments and document string to assist you as you complete writing your own function to complete FunkSVD. You may also want to try to complete the funtion on your own without the assistance of comments. You may feel free to remove and add to the function in any way that gets you a working solution! **Notice:** There isn't a sigma matrix in this version of matrix factorization.function `fn_FunkSVD` created! `2.` Try out your function on the **user_movie_subset** dataset. First try 4 latent features, a learning rate of 0.005, and 10 iterations. When you take the dot product of the resulting U and V matrices, how does the resulting **user_movie** matrix compare to the original subset of the data?user_mat, movie_mat = udacourse3.fn_FunkSVD( rating_mat=rating_mat, latent_feature=4, learning_rate=0.005, num_iter=10, verbose=True) print(np.dot(user_mat, movie_mat)) print(rating_mat)[[10.0479878 8.87246196 10.51732358 10.24397654] [ 8.68183717 7.20811603 8.87570913 9.08982966] [ 7.77219217 7.05385187 8.37242259 7.6440144 ] [ 9.7356348 8.16457729 9.81244213 9.75149872] [ 8.76186393 7.3328852 8.87044476 8.56329042] [ 6.79822539 6.04766005 7.28043482 6.80179909] [ 9.42381555 8.02439959 9.57946788 9.17939085] [ 8.30759651 7.24556641 8.68488894 8.0376918 ] [ 8.47763219 7.76453366 9.09194749 8.14287574] [ 7.77321148 6.75411772 8.03316793 7.61884992] [ 9.02212402 8.1016081 9.42371448 8.496327 ] [ 9.81172459 8.52194624 10.06310113 9.35853524] [ 9.01085556 8.34387918 9.89038147 9.09683842] [ 6.59833477 6.10551032 7.13312072 6.31222798] [ 9.7161677 8.59577647 10.16193844 9.75859022] [ 9.621073 8.12340597 10.01267351 10.2129507 ] [ 8.10032893 7.11842416 8.61735771 8.53973922] [ 7.25331981 5.84897872 7.23119329 7.41332491] [ 8.30740382 7.24432175 8.76667183 8.51245611] [ 8.07650583 7.17981551 8.57[...]**The predicted ratings from the dot product are already starting to look a lot like the original data values even after only 10 iterations. You can see some extreme low values that are not captured well yet. The 5 in the second to last row in the first column is predicted as an 8, and the 4 in the second row and second column is predicted to be a 7. Clearly the model is not done learning, but things are looking good.** `3.` Let's try out the function again on the **user_movie_subset** dataset. This time we will again use 4 latent features and a learning rate of 0.005. However, let's bump up the number of iterations to 250. When you take the dot product of the resulting U and V matrices, how does the resulting **user_movie** matrix compare to the original subset of the data? What do you notice about your error at the end of the 250 iterations?user_mat, movie_mat = udacourse3.fn_FunkSVD( rating_mat=rating_mat, latent_feature=4, learning_rate=0.005, num_iter=250, verbose=True) print(np.dot(user_mat, movie_mat)) print(rating_mat)[[10.00000029 10.00000014 10.00000008 9.9999997 ] [ 9.9999991 3.99999936 8.99999968 10.00000112] [ 7.99999976 8.99999987 9.99999995 5.00000026] [ 8.99999863 7.99999916 9.99999959 10.0000016 ] [10.00000102 5.00000079 9.0000004 8.99999871] [ 5.99999828 3.99999876 9.99999942 6.00000216] [ 8.9999994 7.99999978 9.99999991 9.0000006 ] [10.00000115 5.00000084 9.00000043 7.9999986 ] [ 7.00000147 8.00000107 10.00000054 7.99999818] [ 9.00000162 5.00000101 9.0000005 6.99999811] [ 9.00000205 8.00000146 10.00000069 7.99999743] [ 9.00000041 10.00000036 10.00000012 8.99999937] [10.0000002 8.99999998 9.99999996 7.99999983] [ 5.00000148 8.00000094 5.00000044 7.99999823] [ 9.99999903 7.99999933 9.99999961 10.00000111] [ 8.99999784 8.9999984 9.9999992 10.00000268] [ 8.99999862 7.99999905 7.99999953 8.00000167] [ 9.99999997 7.99999998 1. 10.00000004] [ 5.00000082 6.00000056 10.00000029 9.99999902] [ 7.99999985 6.99999986 9.99[...]**In this case, we were able to completely reconstruct the item-movie matrix to obtain an essentially 0 mean squared error. I obtained 0 MSE on iteration 165.** The last time we placed an **np.nan** value into this matrix the entire svd algorithm in python broke. Let's see if that is still the case using your FunkSVD function. In the below cell, I have placed a nan into the first cell of your numpy array. `4.` Use 4 latent features, a learning rate of 0.005, and 250 iterations. Are you able to run your SVD without it breaking (something that was not true about the python built in)? Do you get a prediction for the nan value? What is your prediction for the missing value? Use the cells below to answer these questions.rating_mat[0, 0] = np.nan rating_mat # run SVD on the matrix with the missing value user_mat, movie_mat = udacourse3.fn_FunkSVD( rating_mat=rating_mat, latent_feature=4, learning_rate=0.005, num_iter=250, verbose=True) preds = np.dot(user_mat, movie_mat) print("The predicted value for the missing rating is {}:".format(preds[0,0])) print() print("The actual value for the missing rating is {}:".format(rating_mat[0,0])) print() assert np.isnan(preds[0,0]) == False print("That's right! You just predicted a rating for a user-movie pair that was never rated!") print("But if you look in the original matrix, this was actually a value of 10. Not bad!")The predicted value for the missing rating is 10.5865223103412: The actual value for the missing rating is nan: That's right! You just predicted a rating for a user-movie pair that was never rated! But if you look in the original matrix, this was actually a value of 10. Not bad!Now let's extend this to a more realistic example. Unfortunately, running this function on your entire user-movie matrix is still not something you likely want to do on your local machine. However, we can see how well this example extends to 1000 users. In the above portion, you were using a very small subset of data with no missing values.`5.` Given the size of this matrix, this will take quite a bit of time. Consider the following hyperparameters: 4 latent features, 0.005 learning rate, and 20 iterations. Grab a snack, take a walk, and this should be done running in a bit.#https://stackoverflow.com/questions/27779677/how-to-format-elapsed-time-from-seconds-to-hours-minutes-seconds-and-milliseco #begin=time() #a = 0 #for i in range (0,2000000000): # a =+ 1 #spend=time()-begin #hour, remain = divmod(spend, 3600) #minute, second = divmod(remain, 60) #print(spend) #print("{:0>2}:{:0>2}:{:05.2f}".format(int(hour),int(minute),second)) #begin=time() # Setting up a matrix of the first 1000 users with movie ratings first_1000_user = np.matrix(user_by_movie.head(1000)) # perform funkSVD on the matrix of the top 1000 users user_mat, movie_mat = udacourse3.fn_FunkSVD( rating_mat=first_1000_user, latent_feature=4, learning_rate=0.005, num_iter=20, verbose=True) #spend_time=time()-begin #hour, remain = divmod(spend, 3600) #minute, second = divmod(remain, 60) #print("{:0>2}:{:0>2}:{:05.2f}".format(int(hour),int(minute),second))###function FunkSDV started number of users: 1000 number of movies: 31245 number of valid ratings: 10852 Optimizaiton Statistics Iteration Mean Squared Error 1 23.6762 2 10.8570 3 7.4245 4 5.7222 5 4.6490 6 3.8982 7 3.3401 8 2.9080 9 2.5636 10 2.2832 11 2.0513 12 1.8574 13 1.6938 14 1.5548 15 1.4361 16 1.3343 17 1.2466 18 1.1709 19 1.1051 20 1.0477 elapsed time: 735.7977s (00:12:15.7977s)`6.` Now that you have a set of predictions for each user-movie pair. Let's answer a few questions about your results. Provide the correct values to each of the variables below, and check your solutions using the tests below.# How many actual ratings exist in first_1000_users num_rating = np.count_nonzero(~np.isnan(first_1000_user)) print("The number of actual ratings in the first_1000_users is {}.".format(num_rating)) print() # How many ratings did we make for user-movie pairs that didn't have ratings rating_for_missing = first_1000_user.shape[0]*first_1000_user.shape[1] - num_rating print("The number of ratings made for user-movie pairs that didn't have ratings is {}".format(rating_for_missing)) # Test your results against the solution assert num_rating == 10852, "Oops! The number of actual ratings doesn't quite look right." assert rating_for_missing == 31234148,\ "Oops! The number of movie-user pairs that you made ratings for that didn't actually have ratings doesn't look right." # Make sure you made predictions on all the missing user-movie pairs pred = np.dot(user_mat, movie_mat) assert np.isnan(pred).sum() == 0 print("Nice job! Looks like you have predictions made for all the missing user-movie pairs!") print("But I still have one question... How good are they?")Nice job! Looks like you have predictions made for all the missing user-movie pairs! But I still have one question... How good are they?--- Metrics on prediction Third Notebook - L18 - How Are We Doing? Udacity notes:"In the last notebook, you created a working version of SVD for situations even when there are tons of missing values. This is awesome! The question now is how well does this solution work?In this notebook, we are going to simulate exactly what we would do in the real world to tune our recommender."Run the cell below to read in the data and get started.#Read in the datasets movie = udacourse3.fn_read_data('movies_clean.csv', remove_noisy_cols=True) review = udacourse3.fn_read_data('reviews_clean.csv', remove_noisy_cols=True)1. Using the **reviews** dataframe, perform the following tasks to create a training and validation set of data we can use to test the performance of your SVD algorithm using **off-line** validation techniques. * Order the reviews dataframe from earliest to most recent * Pull the first 10000 reviews from the dataset * Make the first 8000/10000 reviews the training data * Make the last 2000/10000 the test data * Return the training and test datasets function `fn_create_train_test` created!# Use our function to create training and test datasets train_df, val_df = udacourse3.fn_create_train_test( review=review, order_by='date', train_size=8000, test_size=2000, verbose=True) # Make sure the dataframes we are using are the right shape assert train_df.shape[0] == 8000,\ "The number of rows doesn't look right in the training dataset." assert val_df.shape[0] == 2000,\ "The number of rows doesn't look right in the validation dataset" assert str(train_df.tail(1)['date']).split()[1] == '2013-03-15',\ "The last date in the training dataset doesn't look like what we expected." assert str(val_df.tail(1)['date']).split()[1] == '2013-03-18',\ "The last date in the validation dataset doesn't look like what we expected." print("Nice job! Looks like you have written a function that provides training and validation dataframes") print("for you to use in the next steps")Nice job! Looks like you have written a function that provides training and validation dataframes for you to use in the next stepsIn the real world, we might have all of the data up to this final date in the training data. Then we want to see how well we are doing for each of the new ratings, which show up in the test data.Below is a working example of the function created in the previous example you can use (or you can replace with your own).`2.` Fit the function to the training data with the following hyperparameters: 15 latent features, a learning rate of 0.005, and 250 iterations. This will take some time to run, so you may choose fewer latent features, a higher learning rate, or fewer iteratios if you want to speed up the process. **Note:** Again, this might be a good time to take a phone call, go for a walk, or just take a little break.`fn_funk_SVD` already exist!# Create user-by-item matrix - nothing to do here train_user_item = train_df[['user_id', 'movie_id', 'rating', 'timestamp']] train_data_df = train_user_item.groupby(['user_id', 'movie_id'])['rating'].max().unstack() train_data_np = np.array(train_data_df) train_data_df.head(2) # Fit FunkSVD with the specified hyper parameters to the training data user_mat, movie_mat = udacourse3.fn_FunkSVD( rating_mat=train_data_np, latent_feature=15, learning_rate=0.005, num_iter=250, verbose=True)###function FunkSDV started number of users: 3278 number of movies: 2679 number of valid ratings: 8000 Optimizaiton Statistics Iteration Mean Squared Error 1 10.5448 2 5.8821 3 4.1158 4 3.0849 5 2.4057 6 1.9249 7 1.5685 8 1.2957 9 1.0819 10 0.9114 11 0.7735 12 0.6609 13 0.5680 14 0.4908 15 0.4262 16 0.3718 17 0.3256 18 0.2862 19 0.2524 20 0.2232 21 0.1980 22 0.1761 23 0.1569 24 0.1402 25 0.1255 26 0.1125 27 0.1011 28 0.0910 29 0.0821 30 0.0741 31 0.0670 32 0.0607 33 0.0551 34 0.0500 35 0.0455 36 0.0414 37 0.0378 38 0.0345 39 0.0315 40 0.0288 41 0.0264 42 0.0242 43 0.0222 44 0.0203 45 0.0187 46 0.0172 47 0.0158 48 0.0146 49 0.0135 50 [...]Now that you have created the **user_mat** and **movie_mat**, we can use this to make predictions for how users would rate movies, by just computing the dot product of the row associated with a user and the column associated with the movie.`3.` Use the comments in the function below to complete the **predict_rating** function.function `predict_rating` created!# Test your function with the first user-movie in the user-movie matrix (notice this is a nan) pred_val = udacourse3.fn_predict_rating( df_train=train_data_df, user_matrix=user_mat, movie_matrix=movie_mat, user_id=8, movie_id=2844, verbose=True) pred_val###function movies watched started elapsed time: 0.0002sIt is great that you now have a way to make predictions. However it might be nice to get a little phrase back about the user, movie, and rating.`4.` Use the comments in the function below to complete the **predict_rating** function. **Note:** The movie name doesn't come back in a great format, so you can see in the solution I messed around with it a bit just to make it a little nicer.function `fn_print_prediction_summary` created!# Test your function the the results of the previous function udacourse3.fn_print_prediction_summary( df_movie=movie, user_id=8, movie_id=2844, prediction=pred_val, verbose=True)###function movies watched started For user 8 we predict a 8.25 rating for the movie Fantômas - À l'ombre de la guillotine (1913).Now that we have the ability to make predictions, let's see how well our predictions do on the test ratings we already have. This will give an indication of how well have captured the latent features, and our ability to use the latent features to make predictions in the future!`5.` For each of the user-movie rating in the **val_df** dataset, compare the actual rating given to the prediction you would make. How do your predictions do? Do you run into any problems? If yes, what is the problem? Use the document strings and comments below to assist as you work through these questions.function `fn_validation_comparison` created!!cp /content/gdrive/MyDrive/GoogleColab/DataScientistStudies3/udacourse3.py . #reloader for our functions library from importlib import reload import udacourse3 udacourse3 = reload(udacourse3) # Perform the predicted vs. actual for the first 6 rows. How does it look? udacourse3.fn_validation_comparison( df_train=train_data_df, user_matrix=user_mat, movie_matrix=movie_mat, val_df=val_df, num_pred=6, verbose=True) # Perform the predicted vs. actual for the first 7 rows. What happened? udacourse3.fn_validation_comparison( df_train=train_data_df, user_matrix=user_mat, movie_matrix=movie_mat, val_df=val_df, num_pred=7, verbose=True)###function validation comparison started Our rating actual: 8 → for user 49056 on movie 1598822 predited: 7 Our rating actual: 9 → for user 49056 on movie 289879 predited: 9 Our rating actual: 9 → for user 49056 on movie 1563738 predited: 7 Our rating actual: 4 → for user 49056 on movie 1458175 predited: 7 Our rating actual: 8 → for user 28599 on movie 103639 predited: 8 Our rating actual: 4 → for user 50593 on movie 1560985 predited: 4 cannot predict for this movie system halted**The 7th movie is a movie that has no ratings. Therefore, we are not able to make a prediction for this user-movie pair.** --- Users with no Data Forth Notebook - L20 - Cold Start Problem in Udacity notes"In the previous notebook, you learned about the **Cold Start Problem** first hand. In cases where you are introduced to a new user or new movie, collaborative flitering is not helpful as a technique to make predictions.Instead, you will need to use one of the techniques from the previous lesson like content based recommendations for new items or rank based recommendations for new users."As a final step to completing out our recommendation system, we will build in these edge cases. Run the cell below to get started. Matrix Factorization - Collaborative Filtering Where PossibleNotice the following information is available by running the below cell:`1.` **reviews** - a dataframe of reviews`2.` **movies** - a dataframe of movies`3.` **create_train_test** - a function for creating the training and validation datasets`4.` **predict_rating** - a function that takes a user and movie and gives a prediction using FunkSVD`5.` **train_df** and **val_df** - the training and test datasets used in the previous notebook`6.` **user_mat** and **movie_mat** - the u and v matrices from FunkSVD`7.` **train_data_df** - a user-movie matrix with ratings where available. FunkSVD was performed on this matrix#Read in the datasets movie = udacourse3.fn_read_data('data/movies_clean.csv', remove_noisy_cols=True) review = udacourse3.fn_read_data('data/reviews_clean.csv', remove_noisy_cols=True) #copy picke files !cp /content/gdrive/MyDrive/GoogleColab/DataScientistStudies3/user_matrix . !cp /content/gdrive/MyDrive/GoogleColab/DataScientistStudies3/movie_matrix . # Use our function to create training and test datasets train_df, val_df = udacourse3.fn_create_train_test( review=review, order_by='date', train_size=8000, test_size=2000, verbose=True) # Create user-by-item matrix - this will keep track of order of users and movies in u and v train_user_item = train_df[['user_id', 'movie_id', 'rating', 'timestamp']] train_data_df = train_user_item.groupby(['user_id', 'movie_id'])['rating'].max().unstack() train_data_np = np.array(train_data_df) # Read in user and movie matrices user_file = open("user_matrix", 'rb') user_mat = pickle.load(user_file) user_file.close() movie_file = open("movie_matrix", 'rb') movie_mat = pickle.load(movie_file) movie_file.close()###function create_train_test started elapsed time: 1.3890sValidating PredictionsUnfortunately, you weren't able to make predictions on every user-movie combination in the test set, as some of these users or movies were new. However, you can validate your predictions for the user-movie pairs that do exist in the user_mat and movie_mat matrices. `1.` Complete the function below to see how far off we were on average across all of the predicted ratings.#########1#########2#########3#########4#########5#########6#########7#########8 def fn_validation_comparison(df_train, user_matrix, movie_matrix, val_df, num_pred=0, relatory=True, verbose=False): val_user = np.array(val_df['user_id']) val_movie = np.array(val_df['movie_id']) val_rating = np.array(val_df['rating']) if relatory: #relatory only if num_pred < 1: if verbose: print('no predictions asked') return False else: for idx in range(num_pred): #call for our subfunction #if verbose: # print('*calling predict rating subfunction') pred = udacourse3.fn_predict_rating( df_train=df_train, user_matrix=user_matrix, movie_matrix=movie_matrix, user_id=val_user[idx], movie_id=val_movie[idx], verbose=False) if not pred: if verbose: print('system halted') return False print('Our rating actual: {} → for user {} on movie {}'\ .format(val_rating[idx], val_user[idx], val_movie[idx])) print(' predited: {}'.format(round(pred))) return True else: #full mode sse = 0 num_rated = 0 ls_pred, ls_act = [], [] actual_v_pred = np.zeros((10,10)) for idx in range(len(val_user)): try: pred = udacourse3.fn_predict_rating( df_train=df_train, user_matrix=user_matrix, movie_matrix=movie_matrix, user_id=val_user[idx], movie_id=val_movie[idx], verbose=False) sse += (val_rating[idx] - pred)**2 num_rated += 1 ls_pred.append(pred) ls_act.append(val_rating[idx]) actual_v_pred[11 - int(val_rating[idx] - 1), int(round(pred) - 1)] += 1 except: continue rmse = np.sqrt(sse / num_rated) perc_rated = num_rated / len(val_user) output = rmse, perc_rated, actual_v_pred, ls_pred, ls_act end = time() if verbose: print('elapsed time: {}s'.format(end-begin)) return output # How well did we do? rmse, perc_rated, actual_v_pred, preds, acts = fn_validation_comparison( df_train=train_data_df, user_matrix=user_mat, movie_matrix=movie_mat, val_df=val_df, num_pred=0, relatory=False, verbose=True) print(rmse, perc_rated) sns.heatmap(actual_v_pred); plt.xticks(np.arange(10), np.arange(1,11)); plt.yticks(np.arange(10), np.arange(1,11)); plt.xlabel("Predicted Values"); plt.ylabel("Actual Values"); plt.title("Actual vs. Predicted Values"); plt.figure(figsize=(8,8)) plt.hist(acts, normed=True, alpha=.5, label='actual'); plt.hist(preds, normed=True, alpha=.5, label='predicted'); plt.legend(loc=2, prop={'size': 15}); plt.xlabel('Rating'); plt.title('Predicted vs. Actual Rating');`2.` We didn't do so bad on making those predictions! But, how many user-movie pairs were we unable to make predictions for? Use the cell below to answer this question.# From the above, this can be calculated as follows: print("Number not rated {}".format(int(len(val_df['rating'])*(1-perc_rated)))) print("Number rated {}.".format(int(len(val_df['rating'])*perc_rated)))Content Based For New MoviesIf all of the above went well, you will notice we still have work to do! We need to bring in a few things we picked up from the last lesson to use for those new users and movies. Below is the code used to make the content based recommendations, which found movies that were similar to one another. This was from **5_Content_Based_Recommendations** in the previous lesson.The below function **find_similar_movies** will provide similar movies to any movie based only on content. Run the cell below to gain access to the content based similarity functions.# Subset so movie_content is only using the dummy variables for each genre and the 3 century based year dummy columns movie_content = np.array(movies.iloc[:,4:]) # Take the dot product to obtain a movie x movie matrix of similarities dot_prod_movies = movie_content.dot(np.transpose(movie_content)) #########1#########2#########3#########4#########5#########6#########7#########8 def fn_find_similar_movie(movie_id, verbose=False): '''This function... Source: Udacity Data Science Course - Lesson 7 - Matrix Factorization for Recommendations - Forth Notebook - Class 20 - Cold Start Problem w/ FunkSVD Inputs: - movie_id (mandatory) - a movie_id - verbose (optional) - if you want some verbosity in your function - (Boolean, default=False) Output: - similar_movie - an array of the most similar movies by title ''' if verbose: print('###function movies watched started') begin = time() # find the row of each movie id movie_idx = np.where(movies['movie_id'] == movie_id)[0][0] # find the most similar movie indices - to start I said they need to be the same for all content similar_idxs = np.where(dot_prod_movies[movie_idx] == np.max(dot_prod_movies[movie_idx]))[0] # pull the movie titles based on the indices similar_movies = np.array(movies.iloc[similar_idxs, ]['movie']) if verbose: print('elapsed time: {}s'.format(end-begin)) return similar_movies #########1#########2#########3#########4#########5#########6#########7#########8 def fn_get_movie_name(movie_id, verbose=False): '''This function... Source: Udacity Data Science Course - Lesson 7 - Matrix Factorization for Recommendations - Forth Notebook - Class 20 - Cold Start Problem w/ FunkSVD Inputs: - movie_id (mandatory) - a list of movie ids - verbose (optional) - if you want some verbosity in your function - (Boolean, default=False) Output: - movies - a list of movie names associated with the movie_ids ''' if verbose: print('###function get movies names started') begin = time() movie_lst = list(movies[movies['movie_id'].isin(movie_ids)]['movie']) if verbose: print('elapsed time: {}s'.format(end-begin)) return movie_lstRank Based For New UsersFrom the above two code cells, we have a way to make recommendations for movie-user pairs that have ratings in any part of our user-movie matrix. We also have a way to make ratings for movies that have never received a rating using movie similarities.In this last part here, we need a way to make recommendations to new users. For this, our functions from **2_Most_Popular_Recommendations** in Lesson 1 will come in handy. Run the cell below to have these functions available.Run the cell below to gain access to the rank based functions.#########1#########2#########3#########4#########5#########6#########7#########8 def fn_create_ranked_df(movie, review, verbose=False): '''This function... Source: Udacity Data Science Course - Lesson 7 - Matrix Factorization for Recommendations - Forth Notebook - Class 20 - Cold Start Problem w/ FunkSVD Inputs: - movie (mandatory) - the movies dataframe - review (mandatory) - the reviews dataframe - verbose (optional) - if you want some verbosity in your function - (Boolean, default=False) Output: ranked_movies - a dataframe with movies that are sorted by highest avg rating, more reviews, then time, and must have more than 4 ratings ''' if verbose: print('###function movies watched started') begin = time() # Pull the average ratings and number of ratings for each movie movie_ratings = reviews.groupby('movie_id')['rating'] avg_ratings = movie_ratings.mean() num_ratings = movie_ratings.count() last_rating = pd.DataFrame(reviews.groupby('movie_id').max()['date']) last_rating.columns = ['last_rating'] # Add Dates rating_count_df = pd.DataFrame({'avg_rating': avg_ratings, 'num_ratings': num_ratings}) rating_count_df = rating_count_df.join(last_rating) # merge with the movies dataset movie_recs = movies.set_index('movie_id').join(rating_count_df) # sort by top avg rating and number of ratings ranked_movies = movie_recs.sort_values(['avg_rating', 'num_ratings', 'last_rating'], ascending=False) # for edge cases - subset the movie list to those with only 5 or more reviews ranked_movies = ranked_movies[ranked_movies['num_ratings'] > 4] if verbose: print('elapsed time: {}s'.format(end-begin)) return ranked_movies #########1#########2#########3#########4#########5#########6#########7#########8 def fn_popular_recommendation(user_id, num_top, ranked_movie, verbose=False): '''This function... Source: Udacity Data Science Course - Lesson 7 - Matrix Factorization for Recommendations - Forth Notebook - Class 20 - Cold Start Problem w/ FunkSVD Inputs: - user_id (mandatory) - the user_id (str) of the individual you are making recommendations for - num_top (mandatory) - an integer of the number recommendations you want back - ranked_movie (mandatory) - a pandas dataframe of the already ranked movies based on avg rating, count, and time - verbose (optional) - if you want some verbosity in your function - (Boolean, default=False) Output: - top_movie - a list of the n top recommended movies by movie title in order best to worst ''' if verbose: print('###function popular recommendations started') begin = time() top_movie = list(ranked_movie['movie'][:n_top]) if verbose: print('elapsed time: {}s'.format(end-begin)) return top_movieNow For Your TaskThe above cells set up everything we need to use to make predictions. Your task is to write a function, which uses the above information as necessary to provide recommendations for every user in the **val_df** dataframe. There isn't one right way to do this, but using a blend between the three could be your best bet. You can see the blended approach I used in the video on the next page, but feel free to be creative with your solution!`3.` Use the function below along with the document strings to assist with completing the task for this notebook.#########1#########2#########3#########4#########5#########6#########7#########8 def fn_make_recommendations(_id, _id_type='movie', train_data=train_data_df, train_df=train_df, movies=movies, rec_num=5, user_mat=user_mat, verbose=False): '''This function... Source: Udacity Data Science Course - Lesson 7 - Matrix Factorization for Recommendations - Forth Notebook - Class 20 - Cold Start Problem w/ FunkSVD Inputs: - _id (mandatory) - either a user or movie id (int) - _id_type (mandatory) - "movie" or "user" (str) - train_data (mandatory) - dataframe of data as user-movie matrix - train_df (mandatory) - dataframe of training data reviews - movies (mandatory) - movies df - rec_num (optional) - number of recommendations to return (int) - user_mat (optional) - the U matrix of matrix factorization - movie_mat (optional) - the V matrix of matrix factorization - verbose (optional) - if you want some verbosity in your function - (Boolean, default=False) Output: - recs - (array) a list or numpy array of recommended movies like the given movie, or recs for a user_id given ''' if verbose: print('###function movies watched started') begin = time() # if the user is available from the matrix factorization data, # I will use this and rank movies based on the predicted values # For use with user indexing val_user = train_data_df.index rec_id = fn_create_ranked_df(movie, train_df, verbose=verbose) if _id_type == 'user': if _id in train_data.index: # Get the index of which row the user is in for use in U matrix idx = np.where(val_user == _id)[0][0] # take the dot product of that row and the V matrix pred = np.dot(user_mat[idx,:],movie_mat) # pull the top movies according to the prediction indice = preds.argsort()[-rec_num:][::-1] #indices rec_id = train_data_df.columns[indices] rec_name = fn_get_movie_name(rec_id, verbose=verbose) else: # if we don't have this user, give just top ratings back rec_names = fn_popular_recommendation(_id, rec_num, ranked_movie, verbose=verbose) # Find similar movies if it is a movie that is passed else: rec_id = fn_find_similar_movie(_id, verbose=verbose) rec_name = get_movie_name(rec_id, verbose=verbose) output = rec_id, rec_name end = time() if verbose: print('elapsed time: {}s'.format(end-begin)) return output fn_make_recommendation(_id=48, _id_type='user' verbose=True) len(set(val_df['user_id'])) # Make recommendations user_rec_dict_with_top = dict() for user_id in set(val_df['user_id']): user_rec_dict_with_top[user_id] = fn_make_recommendation(_id=user_id, _id_type='user')[1] cnter = 0 for user, rec in user_rec_dict_with_top.items(): if cnter < 12: print("For user {}, our recommendations are: \n {}".format(user, rec)) cnter+=1 else: breakEncoding Categorical Features# person_home_ownership pers_h_own_label=LabelEncoder() pers_h_own_label.fit(df_raw["person_home_ownership"]) df_raw["person_home_ownership_enc"]=pers_h_own_label.transform(df_raw["person_home_ownership"]); pers_h_own_label.classes_ pers_h_own_label.transform(pers_h_own_label.classes_) # loan_intent loan_intent_label=LabelEncoder() loan_intent_label.fit(df_raw["loan_intent"]) df_raw["loan_intent_enc"]=loan_intent_label.transform(df_raw["loan_intent"]); loan_intent_label.classes_ loan_intent_label.transform(loan_intent_label.classes_) # loan_grade loan_grade_label=LabelEncoder() loan_grade_label.fit(df_raw["loan_grade"]) df_raw["loan_grade_enc"]=loan_grade_label.transform(df_raw["loan_grade"]); loan_grade_label.classes_ loan_grade_label.transform(loan_grade_label.classes_)We're not going to use any more categorical features except these ones. Dealing with NaN valuespd.DataFrame(df_raw.isna().value_counts())person_emp_length has 827 + 68 NaN valuesloan_int_rate has 3048 + 68 NaN valuesplt.hist(df_raw["loan_int_rate"].value_counts(), bins=30); df_raw.dropna(inplace=False).shapeWe're droping the rows with NaN. It's just almost 4 thousand observations, but I don't have time today to do data preprocessing... =DI'll improve this one day... :)df_raw=df_raw.dropna() df_raw.to_csv(r"..\Data Set\Data_No_NaN.csv",index=False)Outputting the final dataY=df_raw["loan_status"] X=df_raw[["person_age","person_income","person_home_ownership_enc", "person_emp_length",'loan_intent_enc', 'loan_grade_enc','loan_amnt','loan_int_rate','loan_percent_income']] X.info() X_train, X_test, Y_train, Y_test = train_test_split(X,Y,test_size=0.2, random_state=123 # just for replicability. remove later. ) X_train.to_csv(r"..\Data Set\X_train.csv",index=False) X_test.to_csv(r"..\Data Set\X_test.csv",index=False) Y_train.to_csv(r"..\Data Set\Y_train.csv",index=False) Y_test.to_csv(r"..\Data Set\Y_test.csv",index=False)Defensive ProgrammingSo far we have focused on the basic tools of writing a program: variables, lists, loops, conditionals, and functions.We haven't looked very much at whether a program is getting the right answer (and whether it continues to get the right answer as we change it).**Defensive programming** is the practice of expecting your code to have mistakes, and guarding against them.To do this, we will write some code that *checks its own operation*.This is generally good practice, that speeds up software development and helps ensure that your code is doing what you intend. AssertionsA common way to make sure that a program is running correctly is to use an *assertion*.* 80-90% of the `Firefox` browser code is *assertions* to ensure that the application runs correctly!With an *assertion*, we `assert` that some condition is true, as an indicator that the program is running correctly. That is to say that, if the condition is not met, we would consider the program to be running *incorrectly* (it is not a *guarantee* of correct operation).In `Python`, the syntax for this is:```pythonassert , "Some text describing the problem"```as in the code below.numbers = [1.5, 2.3, 0.7, -0.001, 4.4] total = 0.0 for n in numbers: assert n > 0.0, 'Data should only contain positive values' total += n print('total is:', total)Categories of AssertionGenerally-speaking, *assertions* are one of three types:* **preconditions**: something must be true at the *start* of a function (so it can work correctly)* **postconditions**: something must be true at the *end* of a function (to establish that the function worked)* **invariants**: something that is always true (at some point in the code)The code below has both *preconditions* and *postconditions*def normalize_rectangle(rect): '''Normalizes a rectangle so that it is at the origin and 1.0 units long on its longest axis.''' assert len(rect) == 4, 'Rectangles must contain 4 coordinates' x0, y0, x1, y1 = rect assert x0 < x1, 'Invalid X coordinates' assert y0 < y1, 'Invalid Y coordinates' dx = x1 - x0 dy = y1 - y0 if dx > dy: scaled = float(dx) / dy upper_x, upper_y = 1.0, scaled else: scaled = float(dx) / dy upper_x, upper_y = scaled, 1.0 assert 0 < upper_x <= 1.0, 'Calculated upper X coordinate invalid' assert 0 < upper_y <= 1.0, 'Calculated upper Y coordinate invalid' return (0, 0, upper_x, upper_y)The function takes $xy$ co-ordinates for opposite corners of a rectangle, and *normalises* it, so that it is positioned at the origin, and the longest axis has length `1.0`. The first three assertions are *preconditions* that catch invalid inputs.```pythonassert len(rect) == 4, 'Rectangles must contain 4 coordinates'assert x0 < x1, 'Invalid X coordinates'assert y0 < y1, 'Invalid Y coordinates'``` We can test this by supplying input that we know to be 'bad': three co-ordinates, instead of four.print(normalize_rectangle( (0.0, 1.0, 2.0) )) # missing the fourth coordinateThe last two are *postconditions* that tell us whether the calculations were correct.```pythonassert 0 < upper_x <= 1.0, 'Calculated upper X coordinate invalid'assert 0 < upper_y <= 1.0, 'Calculated upper Y coordinate invalid'``` We *think* our code is correct, so we are not expecting any errors.print(normalize_rectangle( (0.0, 0.0, 5.0, 1.0) ))We see an *unexpected* error. This tells us that our calculations are wrong.Some inspection of the code should tell us that we need to swap over ```pythonif dx > dy: scaled = float(dx) / dy```to```pythonif dx > dy: scaled = float(dy) / dx```def normalize_rectangle(rect): '''Normalizes a rectangle so that it is at the origin and 1.0 units long on its longest axis.''' assert len(rect) == 4, 'Rectangles must contain 4 coordinates' x0, y0, x1, y1 = rect assert x0 < x1, 'Invalid X coordinates' assert y0 < y1, 'Invalid Y coordinates' dx = x1 - x0 dy = y1 - y0 if dx > dy: scaled = float(dy) / dx upper_x, upper_y = 1.0, scaled else: scaled = float(dx) / dy upper_x, upper_y = scaled, 1.0 assert 0 < upper_x <= 1.0, 'Calculated upper X coordinate invalid' assert 0 < upper_y <= 1.0, 'Calculated upper Y coordinate invalid' return (0, 0, upper_x, upper_y) print(normalize_rectangle( (0.0, 0.0, 5.0, 1.0) ))(0, 0, 1.0, 0.2)Snail and wellA snail falls at the bottom of a 125 cm well. Each day the snail rises 30 cm. But at night, while sleeping, slides 20 cm because the walls are wet. How many days does it take to escape from the well?TIP: http://puzzles.nigelcoldwell.co.uk/sixtytwo.htm Solución# Assign problem data to variables with representative names # well height, daily advance, night retreat, accumulated distance well_height_cm = 125 daily_advance_cm = 30 night_retreat_cm = 20 accumulated_distance_cm = 0 # Assign 0 to the variable that represents the solution days = 0 # Write the code that solves the problem # While the total distance traveled is less than the well height continue looping while (accumulated_distance_cm < well_height_cm): #(1 day cycle) days += 1 # each loop adds a day to the counter (1 day cycle) accumulated_distance_cm += daily_advance_cm # add 30cm to total distance travled if accumulated_distance_cm > well_height_cm: # if the total distance travled is greater than the well height end the loop break # end loop via break else: # otherwise continue to night cycle #(1 night cycle) accumulated_distance_cm -= night_retreat_cm # Print the result with print('Days =', days) print('Days =', days) # The total days elapsed is 11 because on the 10th night the snail drops to 100cm, #the next morning the snail moves 30cm making a total of 130cm which is 5cm over the well height, #thus removing the snail from the wells clutches!!!Days = 11Goals1. Treatment of variables2. Use of loop **while**3. Use of conditional **if-else**4. Print in console BonusThe distance traveled by the snail is now defined by a list.```advance_cm = [30, 21, 33, 77, 44, 45, 23, 45, 12, 34, 55]```How long does it take to raise the well?What is its maximum displacement in one day? And its minimum?What is its average speed during the day?What is the standard deviation of its displacement during the day?import statistics # Assign problem data to variables with representative names # well height, daily advance, night retreat, accumulated distance advance_cm, well_height_cm, night_retreat_cm, accumulated_distance_cm= [30, 21, 33, 77, 44, 45, 23, 45, 12, 34, 55], 125, 20, 0 # Assign 0 to the variable that represents the solution days = 0 # Total days elapsed # Write the code that solves the problem for comp_day in advance_cm: # counts each number in array and counts it as a day #(1 full day) days += 1 # add day to counter accumulated_distance_cm += comp_day # add current dist travled in day and add to total moved distance if accumulated_distance_cm > well_height_cm: #if snail is over the well height break for loop break # exit loop via break else: # if not over well height snail = sleep so remove 20cm from total height #(1 full night) accumulated_distance_cm -= night_retreat_cm # night time draw back # Print the result with print('Days =', days) print('Days =', days) # What is its maximum displacement in a day? And its minimum? max_disp, mini_disp = max(advance_cm), min(advance_cm) #using python version of what was taught on lesson for google sheets print("Maximum Displacment = {0} \nMinimum Displacement = {1}".format(max_disp, mini_disp)) # What is its average progress? avrg_prog = sum(advance_cm)/len(advance_cm) # adding all distances travled and dividing them by the amount of numbers in array print("Average Progress = {0}".format(avrg_prog)) # What is the standard deviation of your displacement during the day? print("Standard Deviation = {0}".format(statistics.pstdev(advance_cm))) # Using imported statistics library from python to get the standard deviation.Days = 6 Maximum Displacment = 77 Minimum Displacement = 12 Average Progress = 38.09090909090909 Standard Deviation = 17.159437082600803只出现一次的数字* Slug: single-number* Date: 2018-06-25* Category: LeetCode* Tags: 数组, 算法* Author: timking* Summary: LeetCode - 探索 - 初级算法 > [原文链接](https://leetcode-cn.com/problems/single-number/description/) 给定一个非空整数数组,除了某个元素只出现一次以外,其余每个元素均出现两次。找出那个只出现了一次的元素。**说明:**你的算法应该具有线性时间复杂度。 你可以不使用额外空间来实现吗?**示例 1:**```输入: [2,2,1]输出: 1```**示例 2:**```输入: [4,1,2,1,2]输出: 4``` 解答一开始没有想到什么好办法,题目限制死了不使用额外空间。而一开始的解答明显并不符合题意。class Solution: def singleNumber(self, nums): """ :type nums: List[int] :rtype: int """ sum_a = sum(nums) sum_b = sum(set(nums)) return sum_b * 2 - sum_a后来经过同事提示之后,发现,题目重点是**整数**, **出现两次**。 所以可以通过数字的二进制运算来做这道题目。将相同的两个数字异或,其值为0。所以将所有值异或一遍,就能得到最后只出现一次的元素,并且也满足题意。class Solution: def singleNumber(self, nums): """ :type nums: List[int] :rtype: int """ first = nums[0] for i in range(1, len(nums)): first ^= nums[i] return firstPodziałdf_blitz = df_sample[df_sample['Event_enc'] == 0] df_bullet = df_sample[df_sample['Event_enc'] == 1] df_classical = df_sample[df_sample['Event_enc'] == 2] df_correspondence = df_sample[df_sample['Event_enc'] == 3] df_blitz_features = df_blitz.drop('Result', axis = 1) df_bullet_features = df_bullet.drop('Result', axis = 1) df_classical_features = df_classical.drop('Result', axis = 1) df_correspondence_features = df_correspondence.drop('Result', axis = 1)DT BASICdecision_tree_classifier = DecisionTreeClassifier() decision_tree_classifier.fit(X_train, y_train) classifier_accuracy = decision_tree_classifier.score(X_test, y_test) y_pred = decision_tree_classifier.predict(X_test) print(decision_tree_classifier.score(X_test, y_test)*100) print(classification_report(y_test, y_pred,digits = 5)) list(zip(X_train, decision_tree_classifier.feature_importances_*100))41.85826996028425 precision recall f1-score support 1 0.40370 0.45298 0.42692 18716 2 0.41823 0.41092 0.41454 18651 3 0.43756 0.39192 0.41348 18782 accuracy 0.41858 56149 macro avg 0.41983 0.41861 0.41831 56149 weighted avg 0.41985 0.41858 0.41831 56149BLITZX_blitz = df_blitz_features.values y_blitz = df_blitz['Result'].values X_blitz_train, X_blitz_test, y_blitz_train, y_blitz_test = train_test_split(X_blitz, y_blitz, test_size=0.1, random_state = 42) decision_tree_classifier = DecisionTreeClassifier() decision_tree_classifier.fit(X_blitz_train, y_blitz_train) classifier_accuracy = decision_tree_classifier.score(X_blitz_test, y_blitz_test) y_blitz_pred = decision_tree_classifier.predict(X_blitz_test) print(decision_tree_classifier.score(X_blitz_test, y_blitz_test)*100) print(classification_report(y_blitz_test, y_blitz_pred,digits = 5)) list(zip(X_train, decision_tree_classifier.feature_importances_*100))41.0097192224622 precision recall f1-score support 1 0.38457 0.43765 0.40940 8340 2 0.40835 0.39698 0.40258 8406 3 0.44114 0.39708 0.41795 9182 accuracy 0.41010 25928 macro avg 0.41135 0.41057 0.40998 25928 weighted avg 0.41231 0.41010 0.41022 25928BULLETX_bullet = df_bullet_features.values y_bullet = df_bullet['Result'].values X_bullet_train, X_bullet_test, y_bullet_train, y_bullet_test = train_test_split(X_bullet, y_bullet, test_size=0.1, random_state = 42) decision_tree_classifier = DecisionTreeClassifier() decision_tree_classifier.fit(X_bullet_train, y_bullet_train) classifier_accuracy = decision_tree_classifier.score(X_bullet_test, y_bullet_test) y_bullet_pred = decision_tree_classifier.predict(X_bullet_test) print(decision_tree_classifier.score(X_bullet_test, y_bullet_test)*100) print(classification_report(y_bullet_test, y_bullet_pred,digits = 5)) list(zip(X_train, decision_tree_classifier.feature_importances_*100))46.87477208081103 precision recall f1-score support 1 0.47053 0.52376 0.49572 5197 2 0.49413 0.47366 0.48368 5240 3 0.42129 0.37355 0.39599 3274 accuracy 0.46875 13711 macro avg 0.46198 0.45699 0.45846 13711 weighted avg 0.46779 0.46875 0.46730 13711CLASSICALX_classical = df_classical_features.values y_classical = df_classical['Result'].values X_classical_train, X_classical_test, y_classical_train, y_classical_test = train_test_split(X_classical, y_classical, test_size=0.1, random_state = 42) decision_tree_classifier = DecisionTreeClassifier() decision_tree_classifier.fit(X_classical_train, y_classical_train) classifier_accuracy = decision_tree_classifier.score(X_classical_test, y_classical_test) y_classical_pred = decision_tree_classifier.predict(X_classical_test) print(decision_tree_classifier.score(X_classical_test, y_classical_test)*100) print(classification_report(y_classical_test, y_classical_pred,digits = 5)) list(zip(X_train, decision_tree_classifier.feature_importances_*100))39.911417322834644 precision recall f1-score support 1 0.35791 0.41791 0.38559 4879 2 0.38179 0.37891 0.38034 5025 3 0.45675 0.40066 0.42687 6352 accuracy 0.39911 16256 macro avg 0.39882 0.39916 0.39760 16256 weighted avg 0.40391 0.39911 0.40010 16256CORRESPONDENCEX_correspondence = df_correspondence_features.values y_correspondence = df_correspondence['Result'].values X_correspondence_train, X_correspondence_test, y_correspondence_train, y_correspondence_test = train_test_split(X_correspondence, y_correspondence, test_size=0.1, random_state = 42) decision_tree_classifier = DecisionTreeClassifier() decision_tree_classifier.fit(X_correspondence_train, y_correspondence_train) classifier_accuracy = decision_tree_classifier.score(X_correspondence_test, y_correspondence_test) y_correspondence_pred = decision_tree_classifier.predict(X_correspondence_test) print(decision_tree_classifier.score(X_correspondence_test, y_correspondence_test)*100) print(classification_report(y_correspondence_test, y_correspondence_pred,digits = 5)) list(zip(X_train, decision_tree_classifier.feature_importances_*100))45.703125 precision recall f1-score support 1 0.28125 0.32143 0.30000 56 2 0.39706 0.40909 0.40299 66 3 0.58065 0.53731 0.55814 134 accuracy 0.45703 256 macro avg 0.41965 0.42261 0.42037 256 weighted avg 0.46782 0.45703 0.46167 256DT - STROJONYdecision_tree_classifier = DecisionTreeClassifier(max_depth=6, max_features=5) decision_tree_classifier.fit(X_train, y_train) classifier_accuracy = decision_tree_classifier.score(X_test, y_test) y_pred = decision_tree_classifier.predict(X_test) print(decision_tree_classifier.score(X_test, y_test)*100) print(classification_report(y_test, y_pred,digits = 5)) list(zip(X_train, decision_tree_classifier.feature_importances_*100))51.44348073874868 precision recall f1-score support 1 0.59594 0.35793 0.44724 18716 2 0.58504 0.41000 0.48213 18651 3 0.45667 0.77409 0.57445 18782 accuracy 0.51443 56149 macro avg 0.54588 0.51401 0.50127 56149 weighted avg 0.54573 0.51443 0.50138 56149RFfrom sklearn.model_selection import cross_val_score from sklearn.metrics import classification_report from sklearn.ensemble import RandomForestClassifier import matplotlib.pyplot as pltBASICrf_classifier = RandomForestClassifier() rf_classifier.fit(X_train, y_train) classifier_accuracy = rf_classifier.score(X_test, y_test) y_pred = rf_classifier.predict(X_test) print(rf_classifier.score(X_test, y_test)*100) print(classification_report(y_test, y_pred,digits = 5)) list(zip(X_train, rf_classifier.feature_importances_*100))44.36410265543464 precision recall f1-score support 1 0.44111 0.41943 0.43000 18716 2 0.44788 0.42711 0.43725 18651 3 0.44216 0.48419 0.46222 18782 accuracy 0.44364 56149 macro avg 0.44372 0.44357 0.44316 56149 weighted avg 0.44371 0.44364 0.44318 56149BLITZrf_classifier = RandomForestClassifier() rf_classifier.fit(X_blitz_train, y_blitz_train) classifier_accuracy = rf_classifier.score(X_blitz_test, y_blitz_test) y_blitz_pred = rf_classifier.predict(X_blitz_test) print(rf_classifier.score(X_blitz_test, y_blitz_test)*100) print(classification_report(y_blitz_test, y_blitz_pred,digits = 5)) list(zip(X_train, rf_classifier.feature_importances_*100))43.9370564640543 precision recall f1-score support 1 0.42055 0.39317 0.40640 8340 2 0.44000 0.41744 0.42842 8406 3 0.45333 0.50142 0.47616 9182 accuracy 0.43937 25928 macro avg 0.43796 0.43734 0.43699 25928 weighted avg 0.43846 0.43937 0.43824 25928BULLETrf_classifier = RandomForestClassifier() rf_classifier.fit(X_bullet_train, y_bullet_train) classifier_accuracy = rf_classifier.score(X_bullet_test, y_bullet_test) y_bullet_pred = rf_classifier.predict(X_bullet_test) print(rf_classifier.score(X_bullet_test, y_bullet_test)*100) print(classification_report(y_bullet_test, y_bullet_pred,digits = 5)) list(zip(X_train, rf_classifier.feature_importances_*100))49.20866457588797 precision recall f1-score support 1 0.50261 0.50087 0.50173 5197 2 0.52212 0.50897 0.51546 5240 3 0.43137 0.45113 0.44103 3274 accuracy 0.49209 13711 macro avg 0.48537 0.48699 0.48607 13711 weighted avg 0.49305 0.49209 0.49248 13711CLASSICALrf_classifier = RandomForestClassifier() rf_classifier.fit(X_classical_train, y_classical_train) classifier_accuracy = rf_classifier.score(X_classical_test, y_classical_test) y_classical_pred = rf_classifier.predict(X_classical_test) print(rf_classifier.score(X_classical_test, y_classical_test)*100) print(classification_report(y_classical_test, y_classical_pred,digits = 5)) list(zip(X_train, rf_classifier.feature_importances_*100))42.384350393700785 precision recall f1-score support 1 0.39405 0.38307 0.38848 4879 2 0.40556 0.37174 0.38791 5025 3 0.45649 0.49638 0.47560 6352 accuracy 0.42384 16256 macro avg 0.41870 0.41706 0.41733 16256 weighted avg 0.42201 0.42384 0.42235 16256CORRESPONDENCErf_classifier = RandomForestClassifier() rf_classifier.fit(X_correspondence_train, y_correspondence_train) classifier_accuracy = rf_classifier.score(X_correspondence_test, y_correspondence_test) y_correspondence_pred = rf_classifier.predict(X_correspondence_test) print(rf_classifier.score(X_correspondence_test, y_correspondence_test)*100) print(classification_report(y_correspondence_test, y_correspondence_pred,digits = 5)) list(zip(X_train, rf_classifier.feature_importances_*100))48.828125 precision recall f1-score support 1 0.33929 0.33929 0.33929 56 2 0.43103 0.37879 0.40323 66 3 0.57042 0.60448 0.58696 134 accuracy 0.48828 256 macro avg 0.44691 0.44085 0.44316 256 weighted avg 0.48393 0.48828 0.48541 256RF - STROJONYrf_classifier = RandomForestClassifier(n_estimators=100, max_depth=6, min_samples_leaf=8, min_samples_split=15, n_jobs=-1, bootstrap=True) rf_classifier.fit(X_train, y_train) classifier_accuracy = rf_classifier.score(X_test, y_test) y_pred = rf_classifier.predict(X_test) print(rf_classifier.score(X_test, y_test)*100) print(classification_report(y_test, y_pred,digits = 5)) list(zip(X_train, rf_classifier.feature_importances_*100))51.4114231776167 precision recall f1-score support 1 0.60550 0.35510 0.44766 18716 2 0.61613 0.35430 0.44989 18651 3 0.45323 0.83127 0.58662 18782 accuracy 0.51411 56149 macro avg 0.55829 0.51356 0.49473 56149 weighted avg 0.55810 0.51411 0.49489 56149D2df2 = pd.read_csv('../csv/d2_final.csv') df2.head() df2.drop(['Unnamed: 0','BlackElo','WhiteElo'], axis = 1, inplace = True) df2.rename(columns={'Event': 'Event_enc', 'Termination': 'Termination_enc'}, inplace = True) df2.head() print(df2.shape) feature_names = ['Event_enc', 'ECO_enc', 'Termination_enc', 'TimeControl_enc','EloDiff'] X2 = df2[feature_names].values y2 = df2['Result'].values print(X2.shape) print(y2.shape) decision_tree_classifier = DecisionTreeClassifier() decision_tree_classifier.fit(X_train, y_train) y_pred = decision_tree_classifier.predict(X2) # print(decision_tree_classifier.score(y2, y_test)) print(accuracy_score(y2, y_pred)*100) print(classification_report(y2, y_pred,digits = 5)) list(zip(X2, decision_tree_classifier.feature_importances_*100)) decision_tree_classifier = DecisionTreeClassifier(criterion='entropy', max_depth=6, max_features=5) decision_tree_classifier.fit(X_train, y_train) y_pred = decision_tree_classifier.predict(X2) # print(decision_tree_classifier.score(y2, y_test)) print(accuracy_score(y2, y_pred)*100) print(classification_report(y2, y_pred,digits = 5)) list(zip(X2, decision_tree_classifier.feature_importances_*100)) rf_classifier = RandomForestClassifier() rf_classifier.fit(X_train, y_train) y_pred = rf_classifier.predict(X2) print(accuracy_score(y2, y_pred)*100) print(classification_report(y2, y_pred,digits = 5)) list(zip(X2, rf_classifier.feature_importances_*100)) rf_classifier = RandomForestClassifier(n_estimators=100, max_depth=6, min_samples_leaf=8, min_samples_split=15, n_jobs=-1, bootstrap=True) rf_classifier.fit(X_train, y_train) y_pred = rf_classifier.predict(X2) print(accuracy_score(y2, y_pred)*100) print(classification_report(y2, y_pred,digits = 5)) list(zip(X2, rf_classifier.feature_importances_*100))25.780712058001537 precision recall f1-score support 1 0.64985 0.18599 0.28920 47170 2 0.62187 0.19338 0.29502 42549 3 0.11745 0.93604 0.20870 9037 accuracy 0.25781 98756 macro avg 0.46306 0.43847 0.26431 98756 weighted avg 0.58908 0.25781 0.28434 98756DS2df2_sample = pd.read_csv('../csv/df2_sample.csv') df2_sample.head() df2_sample.drop(['BlackElo', 'WhiteElo'], axis = 1, inplace = True) df2_sample.rename(columns={'Event': 'Event_enc', 'Termination': 'Termination_enc'}, inplace = True) df2_sample.head() print(df2_sample.shape) feature_names = ['Event_enc', 'ECO_enc', 'Termination_enc', 'TimeControl_enc','EloDiff'] X2 = df2_sample[feature_names].values y2 = df2_sample['Result'].values print(X2.shape) print(y2.shape) decision_tree_classifier = DecisionTreeClassifier() decision_tree_classifier.fit(X_train, y_train) y_pred = decision_tree_classifier.predict(X2) # print(decision_tree_classifier.score(y2, y_test)) print(accuracy_score(y2, y_pred)*100) print(classification_report(y2, y_pred,digits = 5)) list(zip(X2, decision_tree_classifier.feature_importances_*100)) decision_tree_classifier = DecisionTreeClassifier(criterion='entropy', max_depth=6, max_features=5) decision_tree_classifier.fit(X_train, y_train) y_pred = decision_tree_classifier.predict(X2) # print(decision_tree_classifier.score(y2, y_test)) print(accuracy_score(y2, y_pred)*100) print(classification_report(y2, y_pred,digits = 5)) list(zip(X2, decision_tree_classifier.feature_importances_*100)) rf_classifier = RandomForestClassifier() rf_classifier.fit(X_train, y_train) y_pred = rf_classifier.predict(X2) print(accuracy_score(y2, y_pred)*100) print(classification_report(y2, y_pred,digits = 5)) list(zip(X2, rf_classifier.feature_importances_*100)) rf_classifier = RandomForestClassifier(n_estimators=100, max_depth=6, min_samples_leaf=8, min_samples_split=15, n_jobs=-1, bootstrap=True) rf_classifier.fit(X_train, y_train) y_pred = rf_classifier.predict(X2) print(accuracy_score(y2, y_pred)*100) print(classification_report(y2, y_pred,digits = 5)) list(zip(X2, rf_classifier.feature_importances_*100))43.690752830954224 precision recall f1-score support 1 0.56973 0.18037 0.27400 9037 2 0.58755 0.19531 0.29317 9037 3 0.39772 0.93504 0.55807 9037 accuracy 0.43691 27111 macro avg 0.51833 0.43691 0.37508 27111 weighted avg 0.51833 0.43691 0.37508 27111**Analytic Antipodal Grasps**import numpy as np from manipulation import running_as_notebook from pydrake.all import( Variable, sin, cos, Evaluate, Jacobian, atan, MathematicalProgram, Solve, eq ) import matplotlib.pyplot as plt, mpld3 if running_as_notebook: mpld3.enable_notebook()Introduction to Symbolic Differentiation For this assignment, you will need [symbolic differentiation](https://en.wikipedia.org/wiki/Computer_algebra), supported by Drake's symbolic library. We will demonstrate how to use it with a simple function: $$T=\cos^2(x) + y^5$$and it's Jacobian (first-order derivative), $$J = \begin{pmatrix} \frac{\partial T}{\partial x} & \frac{\partial T}{\partial y} \end{pmatrix}=\begin{pmatrix} -2\cos(x)\sin(x) & 5y^4 \end{pmatrix}$$as well as the Hessian (second-order derivative), $$H = \begin{pmatrix} \frac{\partial^2 T}{\partial x^2} & \frac{\partial^2 T}{\partial x \partial y} \\ \frac{\partial^2 T}{\partial y \partial x} & \frac{\partial^2 T}{\partial y^2} \end{pmatrix}=\begin{pmatrix} 2 \sin^2(x) - 2\cos^2(x) & 0 \\ 0 & 20y^3 \end{pmatrix}$$Below are some snippets of how to define symbolic variables, differentiate expressions, and evaluate them using numerical values.# 1. Symbolic variables are defined x = Variable('x') y = Variable('y') # 2. Expressions can be written by composing operations on Variables. T = cos(x) ** 2.0 + y ** 5.0 print(T) # 3. Use Evaluate to query the numerical value of the expression given the variable values. # Use function for multi-dimensional quantities print(Evaluate(np.array([T]), {x: 3.0, y:5.0})) # Use method for scalar quantities print(T.Evaluate({x: 3.0, y:5.0})) # 4. Differentiate a quantity using Jacobian, or Differentiate. J = np.array([T.Differentiate(x), T.Differentiate(y)]) print(J) # Use method for scalar quantities J = T.Jacobian([x, y]) print(J) print(Evaluate(J, {x: 3.0, y:5.0})) # Use function for taking Jacobian of multi-dimensional quantities. H = Jacobian(J, [x, y]) print(H) print(Evaluate(H, {x: 3.0, y: 5.0}))Are the symbolic values of the Jacobian and Hessian what you expect? The Cycloidal GearNow we enter the main part of the problem. After graduating from MIT, you decide to work at a company producing cutting-edge [hypercycloidal gears](https://youtu.be/MBWkibie_5I?t=74). You are in charge of designing a robotic pick-and-place system for these parts. In order to reliably grasp the gears, you decide to use your knowledge of antipodal points. The mechanical design department gave you a pretty ugly parametric equation for what the shape looks like, which we won't even bother writing in latex! Instead, we provided it via the function `shape`. Given a angle in polar coordinates (parameter $t$), it returns $p(t)=[x(t),y(t)]$, a position in 2D. The below cell implements the function and shows you what the gear part looks like.def shape(t): x = (10*cos(t))-(1.5*cos(t+atan(sin(-9*t)/((4/3)-cos(-9*t)))))-(0.75*cos(10*t)) y = (-10*sin(t))+(1.5*sin(t+atan(sin(-9*t)/((4/3)-cos(-9*t)))))+(0.75*sin(10*t)) return np.array([x, y]) def plot_gear(): theta = np.linspace(0, 2*np.pi, 500) gear_shape = [] for i in range(500): gear_shape.append(Evaluate(shape(theta[i])).squeeze()) gear_shape = np.array(gear_shape) plt.axis("equal") plt.plot(gear_shape[:,0], gear_shape[:,1], 'k-') plot_gear()Grasp Energy FunctionHow can we analytically find a pair of antipodal points given the parametric equation of a shape? We make the following claim: **Claim**: Let $p(t_1)$ and $p(t_2)$ be a pair of antipodal points given in parametric space. Then $t_1$ and $t_2$ are critical points of the following energy function:$$E=\frac{1}{2}\kappa\|p(t_1)-p(t_2)\|^2$$that is, they satisfy $\frac{\partial E}{\partial \mathbf{t}}=[0, 0]$ where $\mathbf{t}=[t_1,t_2]$. For the subsequent problems, you may assume $\kappa=2$. **Problem 5.1.a** [2pts]: Prove the claim. \\**Problem 5.1.b** [2pts]: Prove that the converse may not necessarily hold. HINT: The derivative of $p(t)$ respect to $t$ gives the tangent 'velocity' vector: $v(t)=p'(t)$Write down your answer in a paper / pdf file, and submit to the Gradescope written submission section! Implementation**Problem 5.1.c** [4pts]Using this knowledge, we will write a Mathematical Program to find the antipodal points. Since we are looking for $t_1$ and $t_2$ such that the Jacobians is a zero vector, we are solving a root finding problem. Problems of this nature can still be transcribed as an instance of a Mathematical program; it simply doesn't have a cost. We will write down our problem as follows: $$\begin{aligned} \text{find} \quad & \mathbf{t} \\ \text{s.t.} \quad & \frac{\partial E}{\partial \mathbf{t}}(\mathbf{t}) = \mathbf{0} \\ \quad & 0 \leq \mathbf{t} \leq 2\pi \\ \quad & t_1 - t_2 \geq \varepsilon \end{aligned}$$The first constraint makes sure that they are critical points of the energy function, while the last two makes sure the points are not overlapping. You will write the following outer loop to check for the validity of solutions.1. Pick a random guess for $\mathbf{t}$ using [SetInitialGuess](https://drake.mit.edu/pydrake/pydrake.solvers.mathematicalprogram.html?highlight=setinitialguesspydrake.solvers.mathematicalprogram.MathematicalProgram.SetInitialGuess) by uniform sampling over $[0, 2\pi]$ (use `np.random.rand(2)`). 2. Using `MathematicalProgram`, solve the above problem. Remember there is no cost in this problem, so we simply only add the constraints. 3. If the solution is not valid (i.e. problem doesn't return success), repeat 1-2 with random guesses until a valid solution is found. 4. If a valid solution $\mathbf{t}^*$ is found, return the Eigenvalues of the Hessian of $E$ at $\mathbf{t}^*$. (Use `np.linalg.eigvals`)def find_antipodal_pts(shape): """ Finds antipodal points given the parametric function that describes the shape of the object. Args: - shape: function from parametric space t to position R2. Returns: - result: 2-dim np array that contains antipodal grasp locations parametrized by [t1, t2] - H_eig: 2-dim np array that contains eigenvalues of the Hessian. """ eps = 1e-3 # do not modify, but use it for epsilon variable above. ## Fill your code here result = np.array([0., 0.]) # modify here H_eig = np.array([0., 0.]) # modify here return result, H_eigYou can run the cell below to check the correctnes of your implementation. As the constraint is nonlinear, it might take some time to compute. (Typically, the solve time will still be less than 2~3 seconds).def plot_antipodal_pts(pts, shape): antipodal_pts = [] for i in range(2): val = Evaluate(shape(pts[i])).squeeze() antipodal_pts.append(val) antipodal_pts = np.array(antipodal_pts) plt.scatter(antipodal_pts[:,0], antipodal_pts[:,1], color='red') plot_gear() result, H_eig = find_antipodal_pts(shape) plot_antipodal_pts(result, shape) print(H_eig)Hessian AnalysisWhy did we implement the Hessian? You may remember that if the Hessian is used for the second-derivative test. For a function $f(x)$ with a critical point $x^*$, this critical point is:- A local minima if the Hessian is positive-definite (i.e. all positive eigenvalues)- A local maxima if the Hessian is negative-definite (i.e. all negative eigenvalues)- A saddle point if the Hessian has mixed positive / negative eigenvalues. **Problem 5.1.d** [2pts] Describe what grasps the local minima, maxima, and saddle points correspond to in terms of the geometry of the object. In a very simple sentence, explain why you might prefer one configuration over another. HINT: The cell below will visualize each of the cases.if (running_as_notebook): plt.subplot(1,3,1) plot_gear() plt.title("Local Minima") np.random.seed(45) while True: result, H_eig = find_antipodal_pts(shape) if ((H_eig > 0).all()): break plot_antipodal_pts(result, shape) plt.subplot(1,3,2) plot_gear() plt.title("Local Maxima") np.random.seed(4) while True: result, H_eig = find_antipodal_pts(shape) if ((H_eig < 0).all()): break plot_antipodal_pts(result, shape) plt.subplot(1,3,3) plot_gear() plt.title("Saddle Point") np.random.seed(13) while True: result, H_eig = find_antipodal_pts(shape) if ((H_eig[0] > 0) and (H_eig[1] < 0)): break plot_antipodal_pts(result, shape)How will this notebook be Graded?If you are enrolled in the class, this notebook will be graded using [Gradescope](www.gradescope.com). You should have gotten the enrollement code on our announcement in Piazza. For submission of this assignment, you must do two things. - Download and submit the notebook `analytic_antipodal_grasps.ipynb` to Gradescope's notebook submission section, along with your notebook for the other problems.- Write down your answers to 5.1.a, 5.1.b, and 5.1.d to a separately pdf file and submit it to Gradescope's written submission section. We will evaluate the local functions in the notebook to see if the function behaves as we have expected. For this exercise, the rubric is as follows:- [2 pts] 5.1.a is answered correctly.- [2 pts] 5.1.b is answered correctly. - [4 pts] `find_antipodal_points` must be implemented correctly.- [2 pts] 5.1.d is answered correctly.from manipulation.exercises.clutter.test_analytic_grasp import TestAnalyticGrasp from manipulation.exercises.grader import Grader Grader.grade_output([TestAnalyticGrasp], [locals()], 'results.json') Grader.print_test_results('results.json')Playground for ASFPy module developmentimport os import csv import random import operator from copy import deepcopy from asfpy import asfpy from pathlib import Path FOLDER = "asfp2020" EDITORS_FILE = "processed-editors-list-draft.csv" APPLICANTS_FILE = "processed-applicants-list-draft.csv" MATCHINGS_FILE = "matchings-list-draft.csv" UNMATCHED_FILE = "unmatched-list-draft.csv" MANIFEST_FILE = "applicant-id-manifest-draft.csv" path = Path("__file__").parent.absolute() editors_filename = path.parent.joinpath(FOLDER).joinpath(EDITORS_FILE) editors = asfpy.read_preprocessed_editors_list_csv(editors_filename) applicants_filename = path.parent.joinpath(FOLDER).joinpath(APPLICANTS_FILE) applicants = asfpy.read_preprocessed_applicants_list_csv(applicants_filename) matchings_filename = path.parent.joinpath(FOLDER).joinpath(MATCHINGS_FILE) unmatched_filename = path.parent.joinpath(FOLDER).joinpath(UNMATCHED_FILE) applicant_id_manifest_filename = path.parent.joinpath(FOLDER).joinpath(MANIFEST_FILE) _editors = deepcopy(editors) _applicants = deepcopy(asfpy.randomize(applicants)) p_applicants = asfpy.prioritize(_applicants) allocation_output = asfpy.allocate(p_applicants, _editors)Allocation Output HandlingThe output from running the allocation method includes a list of matchings, a list of unmatched applicants by identifier strings, and a list of editors _after_ decrementing capacities. Handle saving matchingsdyads = asfpy.format_matchings(allocation_output["matchings"], applicants, editors) unmatched_applicants = asfpy.compile_unmatched(allocation_output["unmatched"], p_applicants) asfpy.write_list_to_csv(dyads, matchings_filename) asfpy.write_list_to_csv(asfpy.format_unmatched(unmatched_applicants), unmatched_filename) asfpy.write_list_to_csv(asfpy.format_applicant_id_manifest(p_applicants), applicant_id_manifest_filename) print("Number of matchings: ", len(allocation_output["matchings"])) print("Number of unmatched: ", len(allocation_output["unmatched"])) print("Capacity left by editors: ", asfpy.capacity(allocation_output["editors"])) asfpy.capacity(asfpy.editors_by_categories(allocation_output["editors"], {'Developmental Psychology'}))New editors by capacity and unmatched applicantsThis pattern should be used to re-run the allocation for a given set of editors and applicants. In particular, this would be re-run _if_ editors increase capacity. The applicants remain in the same priority ordering from the first allocation by using `p_applicants`.new_capacity_editors = allocation_output["editors"] unmatched_ids = allocation_output["unmatched"] ## Use p_applicants to keep rank order unmatched_applicants = asfpy.compile_unmatched(unmatched_ids, p_applicants) asfpy.allocate(unmatched_applicants, new_capacity_editors)["matchings"] asfpy.format_unmatched(unmatched_applicants)Hypothesis: I guess that the logistic regression will be the best model.train_df = pd.read_csv(Path('Resources/2019loans.csv')) test_df = pd.read_csv(Path('Resources/2020Q1loans.csv')) train_df.head() # Drop redundant columns to create X training data, remove target column X_train = train_df.drop(['Unnamed: 0', 'index', 'loan_status'], axis=1) X_train # get dummy the x data the entire dataframe X_dummies_train = pd.get_dummies(X_train) print(X_dummies_train.columns) X_dummies_train # loan status is the target y_train = train_df['loan_status'] y_train y_train_label = LabelEncoder().fit_transform(train_df['loan_status']) # Drop redundant columns to create X training data, remove target column X_test = test_df.drop(['Unnamed: 0', 'index', 'loan_status'], axis=1) X_test # get dummy the x test data the entire dataframe X_dummies_test = pd.get_dummies(X_test) print(X_dummies_test.columns) X_dummies_test # add missing dummy variables to testing set for col in X_dummies_train.columns: if col not in X_dummies_test.columns: X_dummies_test[col] = 0 # loan status is the target y_test = test_df['loan_status'] y_test # Do I need to convert the categorical data to numeric? y_test_label = LabelEncoder().fit_transform(test_df['loan_status']) # Train the Logistic Regression model on the unscaled data and print the model score from sklearn.linear_model import LogisticRegression classifier = LogisticRegression() classifier.fit(X_train, y_train_label) print(f"Training Data Score: {classifier.score(X_dummies_train, y_train_label)}") print(f"Testing Data Score: {classifier.score(X_dummies_test, y_test_label)}") # Train a Random Forest Classifier model and print the model score from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier(random_state=1).fit(X_dummies_train, y_train) print(f'Training Score: {clf.score(X_dummies_train, y_train)}') print(f'Testing Score: {clf.score(X_dummies_test, y_test)}') # Scale the data and rerun the models from sklearn.preprocessing import StandardScaler scaler = StandardScaler().fit(X_dummies_train) X_train_scaled = scaler.transform(X_dummies_train) X_test_scaled = scaler.transform(X_dummies_test) # Train the Logistic Regression model on the scaled data and print the model score clf = LogisticRegression().fit(X_train_scaled, y_train) print(f'Training Score: {clf.score(X_train_scaled, y_train)}') print(f'Testing Score: {clf.score(X_test_scaled, y_test)}') predictions = classifier.predict(X_dummies_test) pd.DataFrame({"Prediction": predictions, "Actual": y_test_label}) # Train a Random Forest Classifier model on the scaled data and print the model score clf = RandomForestClassifier(random_state=1).fit(X_train_scaled, y_train) print(f'Training Score: {clf.score(X_train_scaled, y_train)}') print(f'Testing Score: {clf.score(X_test_scaled, y_test)}') # How do I assess which model performed better? Do I need to use a confusion matrix and assess multiple elements?Training Score: 1.0 Testing Score: 0.6548277328796257Connect to Google Drive%%capture import google.colab.drive google.colab.drive.mount('/content/gdrive', force_remount=True)Install Spark and dependenciesimport os os.environ['HADOOP_VERSION'] = '2.7' os.environ['JAVA_HOME'] = '/usr/lib/jvm/java-8-openjdk-amd64' os.environ['SPARK_HOME'] = '/opt/spark' os.environ['SPARK_VERSION'] = '2.4.3' %%capture !wget -qN https://archive.apache.org/dist/spark/spark-$SPARK_VERSION/spark-$SPARK_VERSION-bin-hadoop$HADOOP_VERSION.tgz !tar -xzf spark-$SPARK_VERSION-bin-hadoop$HADOOP_VERSION.tgz -C /opt !rm spark-$SPARK_VERSION-bin-hadoop$HADOOP_VERSION.tgz !rm -rf /opt/spark !ln -s /opt/spark-$SPARK_VERSION-bin-hadoop$HADOOP_VERSION /opt/spark !pip install -q findsparkCreate SparkSessionimport findspark findspark.init() from pyspark.sql import SparkSession spark = SparkSession.builder.master('local[*]').getOrCreate()Read filesimport json import pyspark.sql.functions as F import pyspark.sql.types as T DATA_PATH = '/content/gdrive/My Drive/dataset/adressa/one_week' with open(os.path.join(DATA_PATH, 'schema', 'news_features.json')) as file: news_features_schema = T.StructType.fromJson(json.load(file)) df_news_features = spark.read.json(os.path.join(DATA_PATH, 'news_features'), schema=news_features_schema)Check resultsdf_news_features.show(truncate=False)+----------------------------------------+-----------+-----------------------+---------------------+ |newsId |publishtime|categoryList |categoryVector | +----------------------------------------+-----------+-----------------------+---------------------+ |ed39fa29deca0717bc6ff43a099076a6e542a050|1233225890 |[] |(30,[2],[1.0]) | |daf0bcd2b4ddab61ce760835fa8a42c01775ef35|1407826736 |[pluss, nyheter] |(30,[0,1],[1.0,1.0]) | |7c146f6c7357bc08cb019c5f59fd3e13b2690682|1415446217 |[pluss, okonomi] |(30,[1,7],[1.0,1.0]) | |65d83b9b75b5322281f2970bd3707127df673ee0|1420835007 |[pluss, nyheter] |(30,[0,1],[1.0,1.0]) | |e07df239f1934efebedf70a4985f9ef66a9fe307|1421269371 |[pluss, nyheter] |(30,[0,1],[1.0,1.0]) | |814c6262b09fc2865a84e9fbd0eb9873304643f0|1421408924 |[] |(30,[2],[1.0]) | |8efad3dcccf799278064c20832ee3766cab2f23d|1422177445 |[nyheter, trondheim] |(30,[0,3],[1.[...]Number of itemsdf_news_features.count() df_news_features.select(F.column('newsId')).distinct().count()Disk usage!du -sh /content/gdrive/My\ Drive/dataset/adressa/one_week/news_features195K /content/gdrive/My Drive/dataset/adressa/one_week/news_features`category` vocabularyfrom pyspark.ml.feature import CountVectorizerModel category_count_vectorizer = CountVectorizerModel.load(os.path.join(DATA_PATH, 'model', 'category_count_vectorizer')) print('index\tvalue') for i, x in enumerate(category_count_vectorizer.vocabulary): print(f'{i}\t{x}')index value 0 nyheter 1 pluss 2 3 trondheim 4 100sport 5 sortrondelag 6 nordtrondelag 7 okonomi 8 vintersport 9 fotball 10 meninger 11 innenriks 12 kultur 13 magasin 14 utenriks 15 sjakk 16 sprek 17 ordetfritt 18 moreromsdal 19 andreidretter 20 idrettspolitikk 21 migration catalog 22 ballsport 23 mesterskap 24 tema 25 arets tronder 26 forbruker 27 kuriosa 28 hjem 29 politikkThis is an easy to use single function implementation of CCDF plots, using `matplotlib.pylab` as a dependency. This would take as input a graph and output a CCDF plot. Users are able to control every other aspect of it just as if it were a `matplotlib.pylab` plot object.gType can be loglog, semilogx, semilogy, or linear. The default is loglog base 10. basex and/or basey can be specified to alter the base.import matplotlib.pylab as plt import networkx as nx import numpy as np import netwulf import random import copy import sys import os import random as rnd #G = nx.Graph([(0,1),(0,2),(0,5),(0,6),(0,7),(0,8),(2,3),(2,5),(4,5),(3,5)]) def CCDF(G, gType='loglog', fmt = '', xlabel = 'Degree, k', ylabel = 'Pr(K>=k)', title = None, **kwargs): degs = nx.degree(G) kmax = 0 karr = [] for _,k in degs: karr.append(k) if(k > kmax): kmax = k c, b = np.histogram(karr, bins=[i for i in range(kmax+2)], density=True) a = np.cumsum(c) a = np.insert(a,0,0) if(gType == 'loglog'): plt.loglog(b[1:-1], 1-a[1:-1], fmt, **kwargs) elif(gType == 'semilogx'): plt.semilogx(b[1:-1], 1-a[1:-1], fmt, **kwargs) elif(gType == 'semilogy'): plt.semilogy(b[1:-1], 1-a[1:-1], fmt, **kwargs) elif(gType == 'linear'): plt.plot(b[1:-1], 1-a[1:-1], fmt, **kwargs) else: raise Exception('gType was specified incorrectly. Please specify loglog, semilogx, semilogy, or linear.') plt.xlabel(xlabel) plt.ylabel(ylabel) plt.title(title) plt.show() return # ############# # # Test code # # ############# # G = nx.Graph([(0,1),(0,2),(0,5),(0,6),(0,7),(0,8) # ,(2,3),(2,5),(4,5),(3,5)]) # nx.draw(G) # plt.show() # CCDF(G, 'semilogx', basex = 2, fmt = '-or') # CCDF(G, 'linear', fmt = '--gs') # CCDF(G, ylabel = 'y axis', xlabel = 'x axis', # title = 'Title', basex = 5, basey = 7)An easy to use single function implementation of other distribution plots. It would be similar to the \texttt{seaborn.distplot} function, but it would use \texttt{matplotlib.pyplot} as a base. It would be fully customizable by the user. It would also come with the option of fitting a curve to the distribution, and choosing whether it is a histogram, scatterplot, or other form of plot.# ############# # # Test code # # ############# # # problem 1a # fname = 'karate.gml' # print(f'reading the network file : {fname}\n') # ##### do not modify above here ##### # # your code goes here # G = nx.read_gml(fname, label = None) # CCDF(G, 'linear')A function that allows users to easily input their own motif functions and search the network for them (for instance, the user could input a string, such as \texttt{"i, j, k, i->j, j->k, i->k, i != k"} for feed-forward loops). This function would allow for more complex motifs to be checked, and for users to specify return type to be the number of motifs, or a list of every node i for which there exists the inputted motif, or an array containing how many times that motif occurs per node (for instance, if the motif occurred for nodes 2, 3, and 5, and twice for node 6, the outputted array would be [0, 1, 1, 0, 1, 2]). There would also be an option to return a 2-D array with the first column being the values of all nodes that have the motif and the second being the previously mentioned output, excluding arrays that don't have the motif.A function that returns the number of feed-forward loop motifs, and the number of feedback loop motifs, with customization options similar to the function above.#fname = 'karate.gml' #G = nx.read_gml(fname, label = None) def fbl_count(G): count = 0 tempG = G for i in tempG.edges: for j in tempG.edges(nbunch = i[1]): for k in tempG.edges(nbunch = j[1]): if(k[1] == i[0]): count = count + 1 return (int)(count / 3) def ffl_count(G): count = 0 tempG = G for i in tempG.edges: for j in tempG.edges(nbunch = i[1]): for k in tempG.edges(nbunch = i[0]): if(j[1] == k[1]): count = count + 1 return count # ## This function ignores subgraph isomorphisms? (maybe output this thingy to a new graph only # ## consisting of the things and then do stuff) # #motif = nx.DiGraph([(0,1),(1,2),(2,0)]) # motif = nx.DiGraph([(0,1),(1,2),(0,2)]) # fname = 'metabolism_afulgidus.gml' # print(f'reading the network file : {fname}\n') # ##### do not modify above here ##### # G = nx.read_gml(fname, label = None) # #G = nx.DiGraph([(0, 1),(0,2),(0,3),(1,2),(1,3),(2,3)]) # nx.draw_networkx(motif,arrows=True,with_labels=True,node_size=600,node_color='r',width=2) # plt.show() # nx.draw_networkx(G,arrows=True,with_labels=True,node_size=600,node_color='r',width=2) # plt.show() def nodes_connected(G, u, v): ## This is so annoying. like seriously why isn't this implemented already guys. smh. return v in G.neighbors(u) #print(nodes_connected(motif, 0, 1)) #print(nodes_connected(motif, 1, 0)) #SPECIFY TYPE eg type='list', type='count', etc. def motif_count_3d(G, motif, rtype = 'list'): # step 1: get a matrix from the motif thing. connection_list = [] big_list = [] mo_matrix = nx.to_numpy_matrix(motif) #return mo_matrix big_sum = np.sum(mo_matrix) mo_matrix = np.array(mo_matrix) #len(mo_matrix) = 3 small_list = [] count=0 for i in G.nodes(): for j in G.nodes(): for k in G.nodes(): if(i != j and j != k and i != k): #check interconnected counts if(np.sum(nx.to_numpy_matrix(G.subgraph([i, j, k]))) >= big_sum): #big_list.append([i, j, k]) i2 = [i, j, k] yeet = True for j2, k2 in enumerate(mo_matrix): #print(j) for l2, m2 in enumerate(k2): if(m2 != 0): if not nodes_connected(G, i2[j2], i2[l2]): yeet = False #print(yeet) if yeet == True: #print("worked") small_list.append(i2) # if(count % 1000 == 0): # print(count) sys.stdout.write('\r') sys.stdout.write(str("Number so far: " + str(count))) sys.stdout.flush() count+=1 if(rtype == 'list'): return small_list else: return(len(small_list)) # big_list = motif_count_3d(G, motif) # print(big_list) # print(len(big_list)) #[[0. 1. 1.] # [0. 0. 1.] # [0. 0. 0.]] for FFLs. # [[0. 1. 0.] # [0. 0. 1.] # [1. 0. 0.]] for FBLs. import os #print(ffl_count(G)) #print(fbl_count(G)) #import copy #import random as rnd def drawGz(G,z, colors=None, nsize = 600, flabel = True, orderthresh = 50, orderthreshsize = 100, width = 2, **kwargs): if(colors == None): colors = ['#d61111','#11c6d6','#d67711','#11d646','#1b11d6','#d611cc'] # map node labels to colors (for the visualization) node_colors = [] for i in G.nodes(): node_colors.append(colors[int(z[i])]) if G.order() > orderthresh: nsize = orderthreshsize nx.draw_networkx(G,with_labels=flabel,node_size=nsize,width=width,node_color=node_colors, **kwargs) # draw it pretty limits=plt.axis('off') # turn off axes plt.show() return def DCSBM(G, reps = 20, groups = 2, maxphase = 30, output = 'all'): #################################### # Begin function definition block # ################################### def random_z(n,c): random.seed() z = np.zeros([n,1], dtype=int) for i in range(0, n): z[i] = np.floor(random.random() * c) return z def tabulate_ek(G,z,c): ers = np.zeros([c,c]) kpr = np.zeros([c,1]) for i in np.unique(z): for j in np.unique(z): c = 0 for k, l in enumerate(z): if(l == i): for m in G.edges(k): if(z[m[1]] == j): c = c + 1 ers[i][j] = c kpr = np.sum(ers, axis=1) return ers,kpr def dcsbm_LogL(ers,kpr): c = ers.shape[1] logL = 0 for r in range(c): for s in range(c): if(ers[r,s] < 1 or kpr[r] < 1 or kpr[s] < 1): temp = 0 else: temp = ers[r,s] * np.log(ers[r,s] / (kpr[r]*kpr[s])) logL = logL + temp return logL def makeAMove(G,z,c,f): bestL = -np.inf bestMove = [-1 -1] for i in G.nodes(): #print(z) if f[i] == 0: s = int(z[i]) for r in range(c): z2 = copy.deepcopy(z) z2[i] = r y = tabulate_ek(G,z2,c) x = dcsbm_LogL(y[0], y[1]) if bestL < x: bestL = x bestMove = [i, r] return bestL,bestMove def clear(): os.system( 'cls' ) os.system('clear') def drawGz(G,z): colors = ['#d61111','#11c6d6','#d67711','#11d646','#1b11d6','#d611cc'] node_colors = [] for i in G.nodes(): node_colors.append(colors[int(z[i])]) nsize = 600 flabel = True if G.order() > 50: nsize = 100 flabel = False nx.draw_networkx(G,with_labels=flabel,node_size=nsize,width=2,node_color=node_colors) # draw it pretty limits=plt.axis('off') # turn off axes plt.show() return ################################## # End function definition block # ################################# countt = 0 LL = [] Lstarz = [] zstarz = [] while(reps != 0): c = groups n = G.order() T = maxphase Lt = np.zeros([1,(n+1)]) zt = np.zeros([n,(n+1)], dtype=int) flag_converged = 0 z = random_z(n,c) ers,kpr = tabulate_ek(G,z,c) Lt[0,0] = dcsbm_LogL(ers,kpr) zt[:,0] = z[:,0] LL.append(Lt[0,0]) pc = 0 while not flag_converged: f = np.zeros([n,1],dtype=int) if(pc == 0): Lstar = -np.inf zstar = zt[:,0] LL = [] Lstar1 = copy.deepcopy(Lstar) for j in range(n): b = np.transpose(zt)[j] h = makeAMove(G, b, c, f) f[h[1][0]] = 1 LL.append(h[0]) zstar[h[1][0]] = h[1][1] cc = np.transpose(zt) cc[j+1] = zstar zt = np.transpose(cc) Lstar = max(LL) if(Lstar <= Lstar1): flag_converged = 1 pc = pc + 1 T = T - 1 if(T == 0): break Lstarz.append(Lstar) zstarz.append(zstar) val = Lstar countt += 1 reps -= 1 clear() sys.stdout.write('\r') sys.stdout.write(str("Number of reps completed: " + str(countt) + '\r')) sys.stdout.flush() print() Lstar = max(Lstarz) zstar = zstarz[Lstarz.index(max(Lstarz))] if(output == 'all' or output == 'L'): print("Optimal L: " + str(Lstar)) if(output == 'all' or output == 'graph'): drawGz(G,zstar) return zstar # do number of sims and depth of each sim as parameters. # fname1 = 'karate.gml' # Go = nx.read_gml('./' + fname1, label='id') # G = nx.convert_node_labels_to_integers(Go) # map node names to integers (0:n-1) [because indexing] # DCSBM(G, groups = 3) def mean_degree(G, out = True): c = 0 for i in G.nodes(): try: if(out == True): c = c + G.out_degree(i) else: c = c + G.degree(i) except: c = c + G.degree(i) return c / G.number_of_nodes() G = nx.DiGraph([(0,1),(1,2),(2,0)]) nx.draw(G) print(mean_degree(G, out = True)) def Plague(G = None, Gtype = 'erdos', erdosval = 0.15, node_num = 50, mode = 'Game', ethics = 'Good', difficulty = 'Brutal', starttype = 'random', starters = None, numstarters = 4, vaccines = 'on', quarantines = 'on', antivax = 0, vaxcost = 100, startermoney = 500, allowance = 200, quarantinecost = 300, beta = 0.6, gamma = 0.3, curechance = 0, icost = 50, dcost = 50, ccost = 300, zcost = 400, betainc = 0.02, gammadec = 0.02, campchance = 0.1): turnNum = 0 money = startermoney gameOver = False if((mode != 'Game' and mode != 'Simulation') or (Gtype != 'erdos') or (difficulty != 'Baby' and difficulty != 'Custom' and difficulty != 'Normal' and difficulty != 'Brutal' and difficulty != 'Mega Brutal' and difficulty != 'Impossible') or (starttype != 'random' and starttype != 'choice' and starttype != 'high degree') or (vaccines != 'on' and vaccines != 'off') or (quarantines != 'on' and quarantines != 'off')): raise Exception("Something is wrong with your inputs. ") if(starters != None): numstarters = len(starters) if(difficulty == 'Baby'): if(ethics == 'Good'): startermoney = 600 allowance = 300 vaxcost = 100 quarantinecost = 300 beta = 0.5 gamma = 0.5 if(ethics == 'Evil'): G = nx.erdos_renyi_graph(30, 0.25) startermoney = 600 allowance = 300 if(difficulty == 'Normal'): if(ethics == 'Good'): startermoney = 500 allowance = 250 beta = 0.5 gamma = 0.25 if(ethics == 'Evil'): G = nx.erdos_renyi_graph(40, 0.15) startermoney = 500 allowance = 150 if(difficulty == 'Brutal'): if(ethics == 'Good'): startermoney = 500 allowance = 150 beta = 0.6 gamma = 0.25 if(ethics == 'Evil'): G = nx.erdos_renyi_graph(40, 0.05) startermoney = 500 allowance = 100 if(G == None): G = nx.erdos_renyi_graph(node_num, erdosval) if(difficulty == 'Mega Brutal'): if(ethics == 'Good'): G = nx.erdos_renyi_graph(70, 0.08) startermoney = 500 allowance = 100 vaxcost = 100 quarantinecost = 300 beta = 0.75 gamma = 0.1 antivax = 0.2 numstarters = 4 starttype = 'random' campchance = 0.1 # 4 random starters if(ethics == 'Evil'): G = nx.erdos_renyi_graph(70, 0.05) startermoney = 400 allowance = 100 icost = 50 dcost = 50 ccost = 300 zcost = 400 betainc = 0.02 gammadec = 0.02 numstarters = 3 starttype = 'random' if(difficulty == 'Impossible'): if(ethics == 'Good'): G = nx.erdos_renyi_graph(70, 0.15) startermoney = 200 allowance = 50 vaxcost = 100 quarantinecost = 300 beta = 1 gamma = 0.1 antivax = 0.4 numstarters = 6 starttype = 'high degree' campchance = 0.1 if(ethics == 'Evil'): G = nx.erdos_renyi_graph(70, 0.02) startermoney = 200 allowance = 50 icost = 50 dcost = 50 ccost = 300 zcost = 400 betainc = 0.02 gammadec = 0.02 numstarters = 1 starttype = 'random' if(mode == 'Simulation'): vaccines = 'off' quarantines = 'off' ethics = 'Good' # numstarters z = np.zeros(G.number_of_nodes()) # Tested all start types. Works. if(starttype == 'random'): #initialize z randomly a = random.sample(range(0, G.number_of_nodes()), numstarters) for i in a: z[i] = 1 #print(z) elif(starttype == 'high degree'): count = copy.deepcopy(numstarters) finals = [] eye = [] jay = [] for i in G.nodes(): eye.append(i) jay.append(G.degree[i]) #print(jay) while(count != 0): loc = jay.index(max(jay)) finals.append(loc) #print(loc[0]) jay[loc] = -1 count -= 1 #print(count) #print(jay) for i in finals: #print(i) z[i] = 1 #print(z) #print(jay) elif(starttype == 'choice'): for i in starters: z[i] = 1 #print(z) def Menu(): if(ethics == 'Good'): print('Turn number - ' + str(turnNum)) print('Money - ' + str(money)) print('\nCommands: ') print('N - Visualize netwulf representation') if(quarantines == 'on'): print('Q - Quarantine a node') if(vaccines == 'on'): print('V - Vaccinate a node') print('P - Progress to the next turn') print('H - View the help menu') print('E - Exit the game') elif(ethics == 'Evil'): print('Turn number - ' + str(turnNum)) print('Money - ' + str(money)) print('\nCommands: ') print('N - Visualize netwulf representation') print('I - Make your disease more infectious') print('D - Make your disease less deadly') print('C - Create an anti-vaxx marketing campaign') print('Z - Infect a node') print('P - Progress to the next turn') print('H - View the help menu') print('E - Exit the game') else: raise Exception("ethics is not correctly specified.") inp = input() return(inp) while(gameOver == False): #start of a turn #print("are you looping") turnNum += 1 money += allowance turnOver = False while(turnOver == False): inp = Menu() if(inp == 'N'): netwulf.visualize(zToG(G, z)) elif(inp == 'P'): turnOver = True elif(inp == 'E'): raise Exception("You are a quitter.") elif(inp == 'H'): print("A few tips:") print("In the netwulf interface, press 'post to python' to exit. Otherwise it might glitch.") print("Green nodes are susceptible, yellow nodes are infected, and red nodes are dead.") print("Blue nodes are immune, black nodes are quarantined, and pink nodes are anti-vaxx.") print("Quarantined nodes update at the end of each turn.") print("More information can be found on the github page: https://github.com/thekadenh/betternx/") if(ethics == 'Good'): if(inp == 'Q'): yn = input(str("Do you want to quarantine a node for $" + str(quarantinecost) + "? (y/n)")) if(yn == 'y'): if(money >= quarantinecost): money -= quarantinecost q = input("Which node do you want to quarantine? ") q = int(q) zq = z[q] if(zq == 2): print(str(q) + " is literally dead. You're quarantining a dead person. Nice one.") if(zq == 3): print(str(q) + " can't get the virus. No idea why you're quarantining it but you do you.") if(zq == 4): print("I mean... technically you CAN quarantine " + str(q) + " twice... It's not against the rules or anything...") if(zq == 5): print("Excellent choice") z[q] = 4 else: z[q] = 4 else: print("Sorry, you're too poor") elif(inp == 'V'): yn = input(str("Do you want to vaccinate a node for $" + str(vaxcost) + "? (y/n)")) if(yn == 'y'): if(money >= vaxcost): money -= vaxcost q = input("Which node do you want to vaccinate? ") q = int(q) zq = z[q] if(zq == 1): print(str(q) + " is already infected, dummy. Vaccination doesn't do anything at this point.") elif(zq == 2): print(str(q) + "... They're dead.") elif(zq == 3): print("You make... questionable... decisions.") elif(zq == 4): print("They're quarantined.") elif(zq == 5): print("They refuse, citing the 100% true nonrefutable fact that vaccinations cause autism.") else: z[q] = 3 else: print("Sorry, you're too poor") elif(ethics == 'Evil'): if(inp == 'I'): yn = input(str("Do you want to increase beta by " + str(betainc) + " for $" + str(icost) + "? (y/n)")) if(yn == 'y'): if(money >= icost): beta = beta + betainc money -= icost else: print("Sorry, you're too poor") elif(inp == 'D'): yn = input(str("Do you want to decrease gamma by " + str(gammadec) + " for $" + str(dcost) + "? (y/n)")) if(yn == 'y'): if(money >= dcost): gamma = gamma - gammadec money -= dcost else: print("Sorry, you're too poor") elif(inp == 'C'): yn = input(str("Do you want to fund an anti-vax campaign for $" + str(ccost) + "? (y/n)")) if(yn == 'y'): if(money >= ccost): money -= ccost for i in G.nodes(): if(z[i] == 0): if(random.random() < campchance): z[i] == 5 else: print("Sorry, you're too poor") elif(inp == 'Z'): yn = input(str("Do you want to infect ANY node for $" + str(zcost) + "? (y/n)")) if(yn == 'y'): if(money >= zcost): money -= zcost nod = input(str("Which node would you like to infect?")) nod = int(nod) znod = z[nod] if(znod == 2): print("You brought back node " + str(nod) + " from the dead. The zombie apocalypse is imminent!") z[nod] = 1 if(znod == 3): print("You used voodoo magic to anti-vaccinate node " + str(nod) + ".") z[nod] = 1 if(znod == 4): print("You successfully broke node " + str(nod) + " out of the quarantine!") z[nod] = 1 if(znod == 5): print("I mean... node " + str(nod) + " basically wanted to get infected anyway") z[nod] = 1 else: z[nod] = 1 else: print("Sorry, you're too poor") # at the end of the turn cycle through the quarantined ones and separate the edges. # bunch=[(1,2),(2,3)] # G.remove_edges_from(ebunch) if(turnOver == True): for i, j in enumerate(z): if(j == 4): for k in G.nodes(): G.remove_edges_from([(i, k)]) #Then, run a cycle of the thingy z2 = copy.deepcopy(z) for i, j in enumerate(z): if(j == 1): # it's infected, time to spread! for k in G.edges(i): if(z[k[1]] == 0 or z[k[1]] == 5): if(random.random() < beta): z2[k[1]] = 1 if(random.random() < gamma): z2[i] = 2 z = z2 if not 1 in z: gameOver = True #drawGz(G, z) sus = 0 dead = 0 recovered = 0 qud = 0 antvd = 0 for i in z: if(i == 0): sus+=1 if(i == 2): dead+=1 if(i == 3): recovered+=1 if(i == 4): qud+=1 if(i == 5): antvd+=1 print("Game over! Thanks for playing.") print("I'm not going to tell you how well you did. That's for you to decide.") print("However, these stats may offer some insight.\n") print("# Susceptible: " + str(sus)) print("# Dead: " + str(dead)) print("# Vaccinated or recovered: " + str(recovered)) print("# Quarantined: " + str(qud)) print("# Alive Anti-vaxxers: " + str(antvd)) #Plague(node_num=10)現代制御 Topics* システムの状態方程式表現* 状態遷移行列* 安定性* 可制御性* 可観測性* フィードバック* 最適レギュレータ* サーボ系* オブザーバ システムの状態方程式表現状態,入力,出力ベクトルはそれぞれ, $x(t) \in R^n, u(t) \in R^m, y(t) \in R^r$ とし,定数行列$A \in R^{n\times n}, B \in R^{n\times m}, C \in R^{r\times n}$によって,次のような線形システムが表現される.$$\begin{array}{}\dot{x} &= Ax(t) + Bu(t) \\y &= Cx(t)\end{array}$$import numpy as np class LinearSystem: def __init__(self, A, B, C): self.A = A self.B = B self.C = C def dot(self, x, u): return self.A*x + self.B*u def output(self, x): return self.C*x状態遷移行列行列指数関数$e^{At}$を状態遷移行列と呼ぶ.その定義は,$$e^{At} = I + At + \frac{1}{2!}A^2t^2 + \cdots + \frac{1}{k!}A^kt^k + \cdots$$となる.状態遷移行列を得る主な方法はラプラス逆変換によるものである.def StateTransitionMat(A, t, k=20): # Approximation method E = np.identity(A.shape[0]) # identity term a = np.identity(A.shape[0]) for i in range(1,k+1): a = a*A*t/float(i) E += a return E A = np.array([[1,0],[0,1]]) print(StateTransitionMat(A, 0.01, 1)) print(StateTransitionMat(A, 0.01, 10)) print(StateTransitionMat(A, 0.01, 100)) print(StateTransitionMat(A, 0.01, 1000)) print(StateTransitionMat(A, 0.1, 1000)) print(StateTransitionMat(A, 1.0, 1000)) print(StateTransitionMat(A, 10., 10)) print(StateTransitionMat(A, 10., 100)) print(StateTransitionMat(A, 10., 1000))[[ 1.01 0. ] [ 0. 1.01]] [[ 1.01005017 0. ] [ 0. 1.01005017]] [[ 1.01005017 0. ] [ 0. 1.01005017]] [[ 1.01005017 0. ] [ 0. 1.01005017]] [[ 1.10517092 0. ] [ 0. 1.10517092]] [[ 2.71828183 0. ] [ 0. 2.71828183]] [[ 12842.30511464 0. ] [ 0. 12842.30511464]] [[ 22026.46579481 0. ] [ 0. 22026.46579481]] [[ 22026.46579481 0. ] [ 0. 22026.46579481]]安定性正方行列$A \in R^{n\times n}$の固有値$\lambda \in C$, 固有ベクトル$v \in R^n$は以下の式を満たす.$$Av_i = \lambda_i, i = 1, 2, \cdots, n$$固有値$\lambda_i$の実部が全て負であるとき,そのシステムは安定である.# 固有値・固有ベクトル計算 import numpy.linalg as LA A = np.array([[1, 2], [3, 4]]) values, vectors = LA.eig(A) print(values) print(vectors) # Aの固有値手計算すると, e1 = (5 + np.sqrt(33))/2. e2 = (5 - np.sqrt(33))/2. v1 = np.array([1, (3+np.sqrt(33))/4.]) v1 /= LA.norm(v1) v2 = np.array([1, (3-np.sqrt(33))/4.]) v2 /= LA.norm(v2) print (e1) print (e2) print (v1) print (v2) # numpy.linalgの固有ベクトルは縦に並んでいる[-0.37228132 5.37228132] [[-0.82456484 -0.41597356] [ 0.56576746 -0.90937671]] 5.37228132327 -0.372281323269 [ 0.41597356 0.90937671] [ 0.82456484 -0.56576746]Documentation de l'API :https://github.com/GeneralMills/pytrends Les tendances sur les recherches Codes associés aux termes de recherche :- : '/m/047drb0' (Ancien Premier ministre français)- : '/m/0fqmlm' (Ancien Ministre de l’Écologie, du Développement durable et de l’Énergie)- : '/m/0551vp' (Homme politique)- : '/m/02y2cb' (Homme politique)- : '/m/02qg4z' (Président de la République française).... a completer avec tous les candidats, et aussi faire une liste de partis politiquesfrom trendsAPI import TrendReq # API non officielle TrendReq('mdp', 'user', custom_useragent=None).suggestions("")Fonction qui sauvegarde les requetes via l'API en JSONdef trends_to_json(query='candidats', periode='7d', geo='FR'): from trendsAPI import TrendReq # API non officielle import json status = '' # Formats possibles pour la date : now 1-H, now 4-H, now 1-d, now 7-d, today 1-m, today 3-m, today 12-m, # all (depuis 2004), {vide} 5 dernieres annees periodes = {'1h': 'now 1-H', '4h': 'now 4-H', '1d': 'now 1-d', '7d': 'now 7-d', '1m': 'today 1-m','3m': 'today 3-m', '12m': 'today 12-m', 'all': 'all'} # Les termes de recherche (5 au maximum separes par des virgules) # On associe a un type de recherche la liste des parametres correspondants queries = {'candidats': '/m/047drb0, /m/0fqmlm', 'partis': 'a completer'} # se referer a la table de correspondance ci-dessus if (query not in queries) or (periode not in periodes): return 'Erreur de parametre' try: # Connection to Google (use a valid Google account name for increased query limits) pytrend = TrendReq('user', 'mdp', custom_useragent=None) # Possibilite de periode personnalise : specifier deux dates (ex : 2015-01-01 2015-12-31) # geographie : FR (toute France), FR-A ou B ou C... (region de France par ordre alphabetique) # categorie politique : cat = 396 payload = {'q': queries[query], 'geo': geo, 'date': periodes[periode]} # On fait tourner l'API qui renvoie un dictionnaire format JSON data = pytrend.trend(payload, return_type='json') # print(data) # Sauvegarde en JSON out_file = open(query + '_' + periode + '.json', 'w') json.dump(data, out_file, indent=4) out_file.close() status = 'sauvegarde dans : ' + query + '_' + periode + '.json' except: status = 'Erreur' return statusOn lance la fonctiontrends_to_json(query='candidats', periode='1h')Liste de sujets liés aux élections# toprelated trend_payload = {'q': 'elections', 'geo': 'FR', 'date': 'now 24-H'} toprelated = pytrend.related(trend_payload, related_type='top') print(toprelated) # comment lire cela? et l'afficher proprement dans une liste?Autres analysesfrom pytrends.request import TrendReq google_username = "" google_password = "" path = "" # connect to Google pytrend = TrendReq(google_username, google_password, custom_useragent='My Pytrends Script') trend_payload = {'q': 'Pizza, Italian, Spaghetti, Breadsticks, Sausage', 'cat': '0-71', 'date': 'today 1-m'} # trend df = pytrend.trend(trend_payload, return_type='dataframe') print(df) # toprelated trend_payload = {'q': 'elections', 'date': 'today 1-m'} toprelated = pytrend.related(trend_payload, related_type='top') print(toprelated) risingrelated = pytrend.related(trend_payload, related_type='rising') print(risingrelated) # top30in30 top30in30 = pytrend.top30in30() print(top30in30) country_payload = {'geo': 'US'} # hottrends hottrends = pytrend.hottrends(country_payload) print(hottrends) # hottrendsdetail # returns XML data hottrendsdetail = pytrend.hottrendsdetail(country_payload) print(hottrendsdetail) payload = {'date': '201601', 'geo': 'US'} # alltopcharts topcharts = pytrend.topcharts(payload) print(topcharts) keyword = 'pizza' # suggestions suggestions = pytrend.suggestions(keyword) print(suggestions)I Sectionsec_1 = df[df['section']=='I.'] sec_1.reset_index(inplace=True, drop=True) sec_1_1 = sec_1.iloc[::,:2:].T sec_1_1.drop([0,1], axis=1, inplace=True) sec_1_1.columns = sec_1_1.loc[1] sec_1_1.drop(index=1, inplace=True) sec_1_1.reset_index(inplace=True, drop=True) sec_1_2 =sec_1.iloc[2::,5:7:].T sec_1_2.columns = sec_1_2.loc[6] sec_1_2.drop(index=6, inplace=True) sec_1_2.reset_index(inplace=True, drop=True) sec_1_final = pd.concat([sec_1_1,sec_1_2], axis=1) sec_1_final.head()II Sectionsec_2 = df[df['section']=='II.'].copy(deep=True) sec_2.reset_index(inplace=True, drop=True) sec_2.shape sec_2.columns = sec_2.loc[2][:6].tolist() + sec_2.loc[3][6:].tolist() sec_2 = sec_2.iloc[5::,:-2:] col2=[u'Enliste e indique la relaci\xf3n entre el declarante con el c\xf3nyuge, concubina o concubinario y todo dependiente econ\xf3mico', u'Nacionalidad', u'\xbfHa desempe\xf1ado un cargo de gobierno \nen los \xfaltimos 5 a\xf1os?', u'Dependencia o entidad p\xfablica', u'Cargo, puesto o funci\xf3n', u'Periodo'] sec_2 = sec_2[col2] sec_2 = check_empty(sec_2) sec_2.reset_index(inplace=True, drop=True) sec_2.head()III Sectionsec_3 = df[df['section']=='III.'].copy(deep=True) sec_3.reset_index(inplace=True, drop=True) sec_3.shape subsets = ['1.','1.1'] def sub_sections(x): if (type(x)!=float)&(type(x)!=int): for i in subsets: #print x, i if x.split(' ')[0]==i: return i return np.nan sec_3['section'] = sec_3[1].apply(lambda x: sub_sections(x)) x='drop' x_l=[] for i in sec_3['section'].tolist(): if type(i)==str: x=i x_l.append(x) sec_3['section']=x_l sec_3 = sec_3[sec_3['section']!='drop'] sec_3_1 = sec_3[sec_3['section']=='1.'] sec_3_1.reset_index(inplace=True, drop=True) sec_3_1 = sec_3_1.iloc[2::,::8].T.copy(deep=True) sec_3_1.columns = sec_3_1.loc[1] sec_3_1.drop(1, axis=0, inplace=True) col_3_1=[u'Remuneraci\xf3n neta anual del declarante por cargos p\xfablicos:', # u'Nota 1: Incluye sueldos, honorarios, compensaciones, bonos y otras prestaciones.', # u'Otros ingresos del declarante:', u'\u2013 Por actividades industrial, empresarial o comercial', # u'Nota 2: Se refiere a ingresos por actividades industriales, empresariales o comerciales en M\xe9xico y en el extranjero.', u'\u2013 Por actividad financiera', # u'Nota 3: Se refiere a ingresos por rendimiento de valores o contratos bancarios, plusval\xedas de participaci\xf3n accionaria e ingresos por pr\xe9stamos. ', u'\u2013 Por servicios profesionales', # u'Nota 4: Los ingresos por servicios profesionales pueden incluir la participaci\xf3n en consejos, consultor\xedas o asesor\xedas de forma permanente u ocasional, en M\xe9xico y en el extranjero.', u'\u2013 Por otras actividades', # u'Nota 5: Se refiere a ingresos por arrendamientos, regal\xedas, sorteos, concursos, donaciones, entre otros.', u'Ingreso neto anual total del declarante:'] sec_3_1 = sec_3_1[col_3_1] sec_3_1 = check_empty(sec_3_1) sec_3_1.reset_index(inplace=True, drop=True) sec_3_1.head() sec_3_1_1 = sec_3[sec_3['section']=='1.1'] sec_3_1_1.reset_index(inplace=True, drop=True) sec_3_1_1 = sec_3_1_1.iloc[::,:-3:8].T.copy(deep=True) sec_3_1_1.columns = sec_3_1_1.loc[1] col3_1_1=[#u'1.1 Ingreso anual neto del del C\xd3NYUGE, CONCUBINA O CONCUBINARIO Y/O DEPENDIENTES ECON\xd3MICOS entre el 1\xb0 de enero y el 31 de diciembre del a\xf1o inmediato anterior', #u'El ingreso neto anual del c\xf3nyuge, concubina o concubinario y dependientes econ\xf3micos se refiere a los ingresos netos \u2013despu\xe9s de impuestos\u2013 que tuvieron en el \xfaltimo a\xf1o fiscal concluido por actividades en cargos p\xfablicos, actividad industrial o comercial, actividad financiera, servicios profesionales \u2013incluyendo participaciones en consejos, consultor\xedas o asesor\xedas\u2013 as\xed como cualquier otro ingreso por actividades diversas. No es necesario desagregar por tipo de ingreso. Las cifras expresadas en esta secci\xf3n se declara en moneda nacional (MXN).', u'Ingreso anual neto del c\xf3nyuge, concubina o concubinario:', u'Ingreso anual neto de otros dependientes econ\xf3micos:', u'Ingreso neto anual total del c\xf3nyuge, concubina o concubinario y/o dependientes econ\xf3micos:'] sec_3_1_1 = sec_3_1_1[col3_1_1] sec_3_1_1.drop(1, axis=0, inplace=True) sec_3_1_1 = check_empty(sec_3_1_1) sec_3_1_1.reset_index(inplace=True, drop=True) sec_3_1_1.head()IV Sectionsec_4 = df[df['section']=='IV.'].copy(deep=True) sec_4.reset_index(inplace=True, drop=True) sec_4.shape subsets = ['1.', '1.1','2.', '2.1','3.', '3.1','4.','5.'] sec_4['section'] = sec_4[1].apply(lambda x: sub_sections(x)) print(sec_4['section']) x='drop' x_l=[] for i in sec_4['section'].tolist(): if type(i)==str: x=i x_l.append(x) sec_4['section']=x_l sec_4 = sec_4[sec_4['section']!='drop'] sec_4.reset_index(inplace=True, drop=True) sec_4_1 = sec_4[sec_4['section']=='1.'] sec_4_1.reset_index(inplace=True, drop=True) sec_4_1.columns = sec_4_1.loc[2].tolist()[:1] + sec_4_1.loc[3].tolist()[1:3] + sec_4_1.loc[2].tolist()[3:] sec_4_1 = sec_4_1.iloc[4:-1:,:-2:] sec_4_1 = check_empty(sec_4_1) sec_4_1.reset_index(inplace=True, drop=True) sec_4_1.head() sec_4_1_1 = sec_4[sec_4['section']=='1.1'] sec_4_1_1.reset_index(inplace=True, drop=True) sec_4_1_1.columns = sec_4_1_1.loc[2] sec_4_1_1 = sec_4_1_1.iloc[4:-1:,:-5:] col4_1_1=[u'Tipo de bien declarado', u'Pa\xeds donde se encuentra el bien inmueble', u'Entidad federativa y municipio o delegaci\xf3n donde se encuentra el bien inmueble', u'Forma de operaci\xf3n en la que se adquiri\xf3 el bien inmueble', u'A\xf1o en que se realiz\xf3 la adquisici\xf3n del bien inmueble', u'Titular del bien inmueble declarado'] sec_4_1_1 = sec_4_1_1[col4_1_1] sec_4_1_1 = check_empty(sec_4_1_1) sec_4_1_1.reset_index(inplace=True, drop=True) sec_4_1_1.head() sec_4_2 = sec_4[sec_4['section']=='2.'] sec_4_2.reset_index(inplace=True, drop=True) sec_4_2.columns = sec_4_2.loc[2] sec_4_2 = sec_4_2.iloc[4:-1:,:-2:] col4_2=[u'Tipo de veh\xedculo', u'Marca', u'Modelo', u'Pa\xeds donde est\xe1 registrado el veh\xedculo', u'Forma de operaci\xf3n en la que se adquiri\xf3 el veh\xedculo', u'A\xf1o en que se realiz\xf3 la adquisici\xf3n del veh\xedculo', u'Valor del veh\xedculo', u'Moneda', u'Titular del veh\xedculo'] sec_4_2 = sec_4_2[col4_2] sec_4_2 = check_empty(sec_4_2) sec_4_2.reset_index(inplace=True, drop=True) sec_4_2.head() sec_4_2_1 = sec_4[sec_4['section']=='2.1'] sec_4_2_1.reset_index(inplace=True, drop=True) sec_4_2_1.columns = sec_4_2_1.loc[2] sec_4_2_1 = sec_4_2_1.iloc[3::,:-6:] col4_2_1=[u'Tipo de veh\xedculo', u'Pa\xeds donde est\xe1 registrado el veh\xedculo', u'Forma de operaci\xf3n en la que se adquiri\xf3 el veh\xedculo', u'A\xf1o en que se realiz\xf3 la adquisici\xf3n del veh\xedculo', u'Titular del veh\xedculo'] sec_4_2_1 = sec_4_2_1[col4_2_1] sec_4_2_1 = check_empty(sec_4_2_1) sec_4_2_1.reset_index(inplace=True, drop=True) sec_4_2_1.head() sec_4_3 = sec_4[sec_4['section']=='3.'] sec_4_3.reset_index(inplace=True, drop=True) sec_4_3.columns = sec_4_3.loc[2] sec_4_3 = sec_4_3.iloc[4:-1:,:-4:] col4_3=[u'Tipo de bien mueble', u'Forma de operaci\xf3n en la que se adquiri\xf3 el bien mueble', u'A\xf1o en que se realiz\xf3 la adquisici\xf3n del bien mueble', u'Valor estimado del bien mueble', u'Moneda', u'Titular del bien mueble'] sec_4_3 = sec_4_3[col4_3] sec_4_3 = check_empty(sec_4_3) sec_4_3.reset_index(inplace=True, drop=True) sec_4_3.head() sec_4_3_1 = sec_4[sec_4['section']=='3.1'] sec_4_3_1.reset_index(inplace=True, drop=True) sec_4_3_1.columns = sec_4_3_1.loc[2] sec_4_3_1 = sec_4_3_1.iloc[4:-1:,:-7:] col4_3_1=[u'Tipo de bien mueble', u'Forma de operaci\xf3n en la que se adquiri\xf3 el bien mueble', u'A\xf1o en que se realiz\xf3 la adquisici\xf3n del bien mueble', u'Titular del bien mueble'] sec_4_3_1 = sec_4_3_1[col4_3_1] sec_4_3_1 = check_empty(sec_4_3_1) sec_4_3_1.reset_index(inplace=True, drop=True) sec_4_3_1.head() sec_4_4 = sec_4[sec_4['section']=='4.'] sec_4_4.reset_index(inplace=True, drop=True) sec_4_4.columns = sec_4_4.loc[2].tolist()[:5] + sec_4_4.loc[3].tolist()[5:8] + sec_4_4.loc[2].tolist()[8:] sec_4_4 = sec_4_4.iloc[4:-1:,:-2:] col4_4=[u'Tipo de inversi\xf3n', u'Pa\xeds donde est\xe1 constituida la entidad donde se encuentra la inversi\xf3n', u'Nombre o raz\xf3n social de la entidad', u'Menor o igual a $100,000.00', u'Entre $100,000.01 y $500,000.00', u'Mayor o igual a $500,000.01', u'Moneda de la inversi\xf3n', u'Titularidad de la inversi\xf3n'] sec_4_4 = sec_4_4[col4_4] sec_4_4 = check_empty(sec_4_4) sec_4_4.reset_index(inplace=True, drop=True) sec_4_4.head()V Sectionsec_5 = df[df['section']=='V.'] sec_5.reset_index(inplace=True, drop=True) sec_5.columns = sec_5.loc[2] sec_5 = sec_5.loc[3::,::].copy(deep=True) sec_5.reset_index(inplace=True, drop=True) sec_5 = sec_5.iloc[:-2:,:-6:] def all_5_nan(row): row = row[1:] if len(row.unique())==1: return 'drop' else: return 'keep' sec_5['tag'] = sec_5.apply(lambda row: all_5_nan(row), axis=1) sec_5 = sec_5[sec_5['tag'] =='keep'].copy(deep=True) sec_5.drop('tag', axis=1,inplace=True) col4_5=[u'No. de nota', u'Secci\xf3n a la que hace referencia', u'Subsecci\xf3n a la que hace referencia', u'Descripci\xf3n de la nota aclaratoria'] sec_5 = sec_5[col4_5] sec_5 = check_empty(sec_5) sec_5.reset_index(inplace=True, drop=True) sec_5.head() table_list = {'sec_1':sec_1_final, 'sec_2':sec_2, #'sec_2_1': sec_2_1_1,'sec_2_2':sec_2_2, 'sec_2_2_1':sec_2_2_1,'sec_2_3':sec_2_3,'sec_2_3_1':sec_2_3_1,'sec_2_4':sec_2_4,'sec_2_5':sec_2_5, 'sec_3_1':sec_3_1, 'sec_3_1_1':sec_3_1_1,# 'sec_3_3':sec_3_3, 'sec_4_1':sec_4_1, 'sec_4_1_1':sec_4_1_1, 'sec_4_2':sec_4_2, 'sec_4_2_1':sec_4_2_1, 'sec_4_3':sec_4_3, 'sec_4_3_1':sec_4_3_1, 'sec_4_4':sec_4_4, #'sec_4_5':sec_4_5, 'sec_5':sec_5} for key in table_list.keys(): print(key) table_list[key].to_csv('Output/Patrimonio/'+key+'pat.csv', index=0,encoding='utf-8' ) the_json = {sec_1_final['Nombre:'].values[0] + '_' + str(df[10][0]).split(' ')[0]: {'sec_1':sec_1_final.to_json(), 'sec_2':sec_2.to_json(), 'sec_3':{ 'sec_3_1':sec_3_1.to_json(), 'sec_3_1_1':sec_3_1_1.to_json(), }, 'sec_4':{ 'sec_4_1':sec_4_1.to_json(), 'sec_4_1_1':sec_4_1_1.to_json(), 'sec_4_2':sec_4_2.to_json(), 'sec_4_2_1':sec_4_2_1.to_json(), 'sec_4_3':sec_4_3.to_json(), 'sec_4_3_1':sec_4_3_1.to_json(), 'sec_4_4':sec_4_4.to_json() }, 'sec_5':sec_5.to_json(), } } with open('Output/patrimonio_'+sec_1_final['Nombre:'].values[0].replace(' ','') + '_' + str(df[10][0]).split(' ')[0] + '.json', 'w') as fp: json.dump(the_json, fp)MASK FACE# Mathematical Function to Return Perpendicular Distance of a Point from a Line def get_distance_from_point_to_line(point, line_point1, line_point2): (p,q) = point (x1,y1) = line_point1 (x2,y2) = line_point2 A = (y2-y1) B = (x1-x2) distance = np.abs( A*p + B*q - A*x1 - B*y1 ) / np.sqrt( A**2 + B**2 ) return int(distance) # Mathematical Function to Return Rotated Co-ordinates of a Point around a Reference Point (Origin) def get_point_rotation(point,origin,angle): (p,q) = point (x,y) = origin rotated_p = x + np.cos(angle)*(p-x) - np.sin(angle)*(q-y) rotated_q = y + np.sin(angle)*(p-x) + np.cos(angle)*(q-y) return [int(rotated_p),int(rotated_q)] # Function to Wear Mask to the Human Faces found in a given Input Image def Mask_Faces (image_path,mask_image_path): masked_faces_image = Image.open(image_path) image = face_recognition.load_image_file(image_path) mask_img = Image.open(mask_image_path) # Get Face Landmark Co-ordinates face_landmarks = face_recognition.face_landmarks(image) # Return if no Faces found or Required Landmarks not found if len(face_landmarks)==0: # print("No Faces found in "+image_path.split('/')[-1]+" !") return None for face in face_landmarks: if 'nose_bridge' not in face or 'chin' not in face : continue # Nose Point (Top of Mask) nose_bridge = face['nose_bridge'] nose_point = ((np.array(nose_bridge[0])+np.array(nose_bridge[1]))/2).astype(np.uint64) chin = face['chin'] chin_len = len(chin) # Chin Points (Bottom, Left and Right of Mask) chin_bottom_point = np.array(chin[chin_len // 2]) chin_left_point = np.array(chin[chin_len // 8]) chin_right_point = np.array(chin[chin_len * 7 // 8]) # Dimensions for the Mask width = mask_img.width height = mask_img.height width_ratio = 1.15 new_mask_height = int(np.linalg.norm(nose_point - chin_bottom_point)) # Prepare Left Half of the Mask with appropriate Size mask_left_img = mask_img.crop((0, 0, width // 2, height)) mask_left_width = get_distance_from_point_to_line(chin_left_point, nose_point, chin_bottom_point) mask_left_width = int(mask_left_width * width_ratio) mask_left_img = mask_left_img.resize((mask_left_width, new_mask_height)) # Prepare Right Half of the Mask with appropriate Size mask_right_img = mask_img.crop((width // 2, 0, width, height)) mask_right_width = get_distance_from_point_to_line(chin_right_point, nose_point, chin_bottom_point) mask_right_width = int(mask_right_width * width_ratio) mask_right_img = mask_right_img.resize((mask_right_width, new_mask_height)) # Join the 2 Halves to Produce the New Mask Image with the Correct Size new_mask_size = (mask_left_img.width + mask_right_img.width, new_mask_height) new_mask_img = Image.new('RGBA', new_mask_size) new_mask_img.paste(mask_left_img, (0, 0), mask_left_img) new_mask_img.paste(mask_right_img, (mask_left_img.width, 0), mask_right_img) # Calculate Angle of Rotation (Tilted Face) and Rotate the Mask angle_radian = np.arctan2(chin_bottom_point[1] - nose_point[1], chin_bottom_point[0] - nose_point[0]) rotation_angle_radian = (np.pi/2) - angle_radian rotation_angle_degree = (rotation_angle_radian*180)/np.pi rotation_center = (mask_left_width,new_mask_height//2) rotated_mask_img = new_mask_img.rotate(rotation_angle_degree,expand=True,center = rotation_center) # Calcualate Co-ordinates for Pasting the Mask on the Input Image center_x = (nose_point[0] + chin_bottom_point[0]) // 2 center_y = (nose_point[1] + chin_bottom_point[1]) // 2 mask_corner_points = [[center_x - mask_left_width, center_y - (new_mask_height//2)], [center_x + mask_right_width, center_y - (new_mask_height//2)], [center_x + mask_right_width, center_y + (new_mask_height//2)], [center_x - mask_left_width, center_y + (new_mask_height//2)]] # Make Sure Image Dimentions doesn't exceed 99999 rotated_mask_topleft_corner = np.array([99999,99999]) for point in mask_corner_points: rotated_mask_topleft_corner = np.minimum(rotated_mask_topleft_corner, get_point_rotation(point,(center_x,center_y),-rotation_angle_radian)) # Paste the Mask on Image and Return it masked_faces_image.paste(rotated_mask_img, (rotated_mask_topleft_corner[0],rotated_mask_topleft_corner[1]), rotated_mask_img) return masked_faces_image # Try on a single Image FILE_PATH = "drive/My Drive/Social_Distancing_with_AI/Training_Data/without_mask/155.jpg" MASK_IMAGE_PATH = "drive/My Drive/Social_Distancing_with_AI/Mask_Images/default_mask.png" masked_face_image = Mask_Faces(FILE_PATH,MASK_IMAGE_PATH) plt.figure(figsize=(10,10)) plt.subplot(1,2,1) plt.title("Original") plt.imshow(Image.open(FILE_PATH)) plt.subplot(1,2,2) plt.title("Masked") plt.imshow(masked_face_image) plt.show() # Function to Mask Faces present in all Images within a directory def Generate_Masked_Images (images_path,save_path,mask_image_path): print("Augmenting Images, Please Wait !") # Loop through all the Files for file_name in tqdm.notebook.tqdm(os.listdir(images_path)): # Mask and Save in the Specified save_path try: masked_face_image = Mask_Faces(os.path.join(images_path,file_name),mask_image_path) masked_face_image.save(os.path.join(save_path,file_name.split('.')[0]+"_masked."+ file_name.split('.')[-1])) except: continue print("Done !") # Call the Function for a Directory INPUT_DIR = "drive/My Drive/Social_Distancing_with_AI/Training_Data/without_mask" SAVE_DIR = "drive/My Drive/Social_Distancing_with_AI/Training_Data/augmented" MASK_IMAGE_PATH = "drive/My Drive/Social_Distancing_with_AI/Mask_Images/default_mask.png" Generate_Masked_Images (INPUT_DIR,SAVE_DIR,MASK_IMAGE_PATH)BLURRING EFFECTS# Define Blurring Kernel Size Ranges, a Random Size would be chosen in the Specified Ranges # Greater the Size, Higher is the Blurring Effect (Adjustments can be made according to the needs) motion_blur_kernel_range = (6,10) average_blur_kernel_range = (3,9) gaussian_blur_kernel_range = (3,10) # Set Blurring Kernels to Use and their associated Probabilities Blurring_Kernels = ["none","motion","gaussian","average"] Probs = [0.1,0.4,0.25,0.25] # Add Motion Blur to an Image in a Random Direction def motion_blur(img): # Choose a Random Kernel Size kernel_size = np.random.randint(motion_blur_kernel_range[0],motion_blur_kernel_range[1]) kernel = np.zeros((kernel_size, kernel_size)) # Random Selection of Direction of Motion Blur types = ["vertical","horizontal","main_diagonal","anti_diagonal"] choice = np.random.choice(types) if choice=="vertical": kernel[:,int((kernel_size-1)/2)] = np.ones(kernel_size)/kernel_size elif choice=="horizontal": kernel[int((kernel_size-1)/2),:] = np.ones(kernel_size)/kernel_size elif choice=="main_diagonal": for i in range(kernel_size): kernel[i][i] = 1/kernel_size elif choice=="anti_diagonal": for i in range(kernel_size): kernel[i][kernel_size-i-1] = 1/kernel_size # Convolve and Return the Blurred Image return cv2.filter2D(img,-1,kernel) # Add a Random Blur Effect to an Image with a Random Kernel Size (in the Specified Ranges) def get_blurred_picture(img_path): # Randomly choose a Blurring Technique choice = np.random.choice(Blurring_Kernels,p=Probs) # Load Image img = cv2.imread(img_path) if choice=="none": random_blurred_img = img elif choice=="motion": random_blurred_img = motion_blur(img) elif choice=="gaussian": kernel_size = np.random.randint(gaussian_blur_kernel_range[0],gaussian_blur_kernel_range[1]) if kernel_size%2==0: kernel_size-=1 random_blurred_img = cv2.GaussianBlur(img,(kernel_size,kernel_size),0) elif choice=="average": kernel_size = np.random.randint(average_blur_kernel_range[0],average_blur_kernel_range[1]) random_blurred_img = cv2.blur(img,(kernel_size,kernel_size)) # Return Blurred Image return random_blurred_img # Try on a single Image FILE_PATH = "drive/My Drive/Social_Distancing_with_AI/Training_Data/without_mask/95.jpg" blurred_image = get_blurred_picture(FILE_PATH) plt.figure(figsize=(10,10)) plt.subplot(1,2,1) plt.title("Original") plt.imshow(Image.open(FILE_PATH)) plt.subplot(1,2,2) plt.title("Blurred") plt.imshow(blurred_image[:,:,::-1]) plt.show() # Function to Randomly Blur all Images within a directory def Blur_Images (images_path,save_path): print("Blurring Images, Please Wait !") # Loop through all the Files for file_name in tqdm.notebook.tqdm(os.listdir(images_path)): # Mask and Save in the Specified save_path try : blurred_image = get_blurred_picture(os.path.join(images_path,file_name)) cv2.imwrite(os.path.join(save_path,file_name.split('.')[0]+"_blurred."+\ file_name.split('.')[-1]),blurred_image) except: continue print("Done !") # Call the Function for a Directory INPUT_DIR = "drive/My Drive/Social_Distancing_with_AI/Training_Data/without_mask" SAVE_DIR = "drive/My Drive/Social_Distancing_with_AI/Training_Data/augmented" Blur_Images (INPUT_DIR,SAVE_DIR)Read the given nutrition dataset into a Pandas DataFrame objectNote we are reading only the first 64 rows with `nrows=64` argument because we just want to read all the nutrients informtion and not the maximum/minimum bounds in the dataset. We will enter those bounds in the optimization problem separately.df = pd.read_excel("Data/diet - medium.xls",nrows=17)Show the datasetdfCreate the `PuLP` problem variable. Since it is a cost minimization problem, we need to use `LpMinimize`# Create the 'prob' variable to contain the problem data prob = LpProblem("Simple Diet Problem",LpMinimize)/home/andreas/anaconda3/envs/py38/lib/python3.8/site-packages/pulp/pulp.py:1190: UserWarning: Spaces are not permitted in the name. Converted to '_' warnings.warn("Spaces are not permitted in the name. Converted to '_'")Create a list of food items from the dataset# Creates a list of the Ingredients food_items = list(df['Foods']) print("So, the food items to consdier, are\n"+"-"*100) for f in food_items: print(f,end=', ')So, the food items to consdier, are ---------------------------------------------------------------------------------------------------- Frozen Broccoli, Frozen Corn, Raw Lettuce Iceberg, Baked Potatoes, Tofu, Roasted Chicken, Spaghetti W/ Sauce, Raw Apple, Banana, Wheat Bread, White Bread, Oatmeal Cookies, Apple Pie, Scrambled Eggs, Turkey Bologna, Beef Frankfurter, Chocolate Chip Cookies,Create a dictinary of costs for all food itemscosts = dict(zip(food_items,df['Price/Serving'])) costsCreate a dictionary of calories for all food itemscalories = dict(zip(food_items,df['Calories']))Create a dictionary of cholesterol for all food itemscholesterol = dict(zip(food_items,df['Cholesterol (mg)']))Create a dictionary of total fat for all food itemsfat = dict(zip(food_items,df['Total_Fat (g)']))Create a dictionary of sodium for all food itemssodium = dict(zip(food_items,df['Sodium (mg)']))Create a dictionary of carbohydrates for all food itemscarbs = dict(zip(food_items,df['Carbohydrates (g)']))Create a dictionary of dietary fiber for all food itemsfiber = dict(zip(food_items,df['Dietary_Fiber (g)']))Create a dictionary of protein for all food itemsprotein = dict(zip(food_items,df['Protein (g)']))Create a dictionary of vitamin A for all food itemsvit_A = dict(zip(food_items,df['Vit_A (IU)']))Create a dictionary of vitamin C for all food itemsvit_C = dict(zip(food_items,df['Vit_C (IU)']))Create a dictionary of calcium for all food itemscalcium = dict(zip(food_items,df['Calcium (mg)']))Create a dictionary of iron for all food itemsiron = dict(zip(food_items,df['Iron (mg)']))Create a dictionary of food items with lower bound# A dictionary called 'food_vars' is created to contain the referenced Variables food_vars = LpVariable.dicts("Food",food_items,0,cat='Continuous') food_varsAdding the objective function to the problem# The objective function is added to 'prob' first prob += lpSum([costs[i]*food_vars[i] for i in food_items]), "Total Cost of the balanced diet"Adding the calorie constraints to the problemprob += lpSum([calories[f] * food_vars[f] for f in food_items]) >= 800.0, "CalorieMinimum" prob += lpSum([calories[f] * food_vars[f] for f in food_items]) <= 1300.0, "CalorieMaximum"Adding other nutrient constraints to the problem one by one... Cholesterolprob += lpSum([cholesterol[f] * food_vars[f] for f in food_items]) >= 30.0, "CholesterolMinimum"prob += lpSum([cholesterol[f] * food_vars[f] for f in food_items]) <= 240.0, "CholesterolMaximum" Fatprob += lpSum([fat[f] * food_vars[f] for f in food_items]) >= 40.0, "FatMinimum"prob += lpSum([fat[f] * food_vars[f] for f in food_items]) <= 100.0, "FatMaximum" Sodiumprob += lpSum([sodium[f] * food_vars[f] for f in food_items]) >= 500.0, "SodiumMinimum"prob += lpSum([sodium[f] * food_vars[f] for f in food_items]) <= 2000.0, "SodiumMaximum" Carbsprob += lpSum([carbs[f] * food_vars[f] for f in food_items]) >= 130.0, "CarbsMinimum"prob += lpSum([carbs[f] * food_vars[f] for f in food_items]) <= 450.0, "CarbsMaximum" Fiberprob += lpSum([fiber[f] * food_vars[f] for f in food_items]) >= 125.0, "FiberMinimum"prob += lpSum([fiber[f] * food_vars[f] for f in food_items]) <= 250.0, "FiberMaximum" Proteinprob += lpSum([protein[f] * food_vars[f] for f in food_items]) >= 60.0, "ProteinMinimum"prob += lpSum([protein[f] * food_vars[f] for f in food_items]) <= 100.0, "ProteinMaximum" Vitamin Aprob += lpSum([vit_A[f] * food_vars[f] for f in food_items]) >= 1000.0, "VitaminAMinimum"prob += lpSum([vit_A[f] * food_vars[f] for f in food_items]) <= 10000.0, "VitaminAMaximum" Vitamin Cprob += lpSum([vit_C[f] * food_vars[f] for f in food_items]) >= 400.0, "VitaminCMinimum"prob += lpSum([vit_C[f] * food_vars[f] for f in food_items]) <= 5000.0, "VitaminCMaximum" Calciumprob += lpSum([calcium[f] * food_vars[f] for f in food_items]) >= 300.0, "CalciumMinimum"prob += lpSum([calcium[f] * food_vars[f] for f in food_items]) <= 1500.0, "CalciumMaximum" Ironprob += lpSum([iron[f] * food_vars[f] for f in food_items]) >= 10.0, "IronMinimum"prob += lpSum([iron[f] * food_vars[f] for f in food_items]) <= 40.0, "IronMaximum"# Fat prob += lpSum([fat[f] * food_vars[f] for f in food_items]) >= 20.0, "FatMinimum" prob += lpSum([fat[f] * food_vars[f] for f in food_items]) <= 50.0, "FatMaximum" # Carbs prob += lpSum([carbs[f] * food_vars[f] for f in food_items]) >= 130.0, "CarbsMinimum" prob += lpSum([carbs[f] * food_vars[f] for f in food_items]) <= 200.0, "CarbsMaximum" # Fiber prob += lpSum([fiber[f] * food_vars[f] for f in food_items]) >= 60.0, "FiberMinimum" prob += lpSum([fiber[f] * food_vars[f] for f in food_items]) <= 125.0, "FiberMaximum" # Protein prob += lpSum([protein[f] * food_vars[f] for f in food_items]) >= 100.0, "ProteinMinimum" prob += lpSum([protein[f] * food_vars[f] for f in food_items]) <= 150.0, "ProteinMaximum"Writing problem data to a `.lp` file# The problem data is written to an .lp file prob.writeLP("SimpleDietProblem.lp")Run the solver# The problem is solved using PuLP's choice of Solver prob.solve()Print the problem solution status `'optimal'`, `'infeasible'`, `'unbounded'` etc...# The status of the solution is printed to the screen print("Status:", LpStatus[prob.status])Status: OptimalScan through the problem variables and print out only if the variable quanity is positive i.e. it is included in the optimal solutionprint("Therefore, the optimal (least cost) balanced diet consists of\n"+"-"*110) for v in prob.variables(): if v.varValue>0: print(v.name, "=", v.varValue)Therefore, the optimal (least cost) balanced diet consists of -------------------------------------------------------------------------------------------------------------- Food_Frozen_Broccoli = 6.9242113 Food_Scrambled_Eggs = 6.060891 Food__Baked_Potatoes = 1.0806324Print the optimal diet costprint("The total cost of this balanced diet is: ${}".format(round(value(prob.objective),2)))The total cost of this balanced diet is: $5.52NumPy Tutorial__By __# pip install numpy import numpy as npThe Basicsa = [1, 2, 3] print(a) print(type(a)) a = np.array(a) print(a) print(type(a)) # We can also create arrays of different dimensions in numpy b = np.array([[1, 2, 3], [4, 5, 6]]) print(b) print(type(b)) # we can see the dimension of any numpy array using the .shape command print(b.shape) a = np.array([1, 2, 3]) print(a.shape)(3,)Creating arrays using `Numpy.random` packagenp.random.seed(10) r = np.random.randint(100, size=(4, 10)) print(r) r2 = np.random.rand(3, 4) print(r2)[[0.44524562 0.04426012 0.24749552 0.46513333] [0.88291734 0.1809206 0.35617448 0.3049914 ] [0.72609875 0.26811569 0.34519581 0.35372182]]Accessing Specific Elementsr = np.random.randint(10, size=(3, 4)) print(r) # accessing first row print(r[0, :]) # accessing last row print(r[-1, :]) # accessing specific row row = 2 print(r[row - 1, ]) # accessing the first column print(r[:, 0]) # accessing the first column and reshaping it into a column vector c1 = r[:, 0] print(c1) c1 = c1.reshape((3, 1)) print(c1) # accessing the last column c2 = r[:, -1] print(c2) # reshaping the last column into column vector c2 = c2.reshape((3, 1)) print(c2) # retreiving specific rows np.random.seed(10) r = np.random.randint(low=0, high=100, size=(5, 6)) print(r) print(r[[0, 1], :]) # printing selected rows print(r[[0, 3], ]) # changing order of rows print(r[[3, 0, 4], ]) # getting specific columns print(r[:, [4, 2]]) # selecting a specific element (row, column) print(r[0, 0]) row, column = 2, 3 print(r[row, column])88Getting elements using start, stop, step size (getting crazy 🙃)print(r) print(r[::2, :]) print(r[::-1, ]) print(r[::-1, ::-1]) print(r[1:4:2, 0:5:3]) # 1, 3 | 0, 3[[29 0] [72 51]]Changing the Value of the matricesprint(r) #changing the value of a single element r[0, 0] = 100 print(r) r[0, :] # changing the value of an entire row r[0, :] = np.random.randint(low=-100, high=0, size=(1, 6)) print(r) r[:, 2] # changing the value of an entire column r[:, 2] = np.random.randint(low=-200, high=-100, size=(5,)) print(r) # changing the value of some particular elements r[1, [0, 2]] = 0 print(r) k = r.copy() k # broadcasting k[0, :] = 100 k k[:, 0] = -100 k k[1:4:2, [0, 4]] = -50 k k[0:3:2, [3, 4]]Different Initialization Methods# Creating the zeros Matrix z = np.zeros((3, 3)) print(z) z = np.zeros((3, 4)) print(z) # creating matrix with data type defined as int z = np.zeros((3, 4), dtype=np.int) print(z) print(z.dtype) # creating all 1's matrix o = np.ones((2, 4)) print(o) o = np.ones((2, 4), dtype=np.int) print(o) # creating a matrix with some arbitrary number f = np.full(shape=(3, 4), fill_value=-98) print(f) print(f.dtype) f = np.full(shape=(1, 2), fill_value=2.0) print(f) print(f.dtype) # The Identity Matrix np.identity(3) np.identity(5, dtype=np.int) np.eye(3)Creating copy of Arraysa = np.array([1, 2, 3]) b = a b[0] = 100 print(a) a = np.array([1, 2, 3]) b = a.copy() b[0] = 100 print(a)[1 2 3]Broadcasting in NumPy Arraysa = [1, 2, 3] print(type(a)) print(a + 1) a = np.array([1, 2, 3]) print(a + 1) np.random.randint(low=0, high=100, size=(4, 5)) - 50Basic Mathematicall Operations# Multilication with scalar np.random.seed(10) A = np.random.randint(0, 100, size=(3, 4)) print(A) A * 0 4 * A # Division with scalar A / 2 A // 3 A / np.array([1, 2, 3, 4]) B = np.random.randint(1, 5, size=(3, 4)) B A A / B A / np.random.randint(1, 5, size=(3, 4))Matrix Operationstype(A) A = np.random.randint(1, 20, size=(2, 3)) print(A) B = np.random.randint(1, 20, size=(2, 3)) print(B) A + B A + 2 * B A - BElement wise Operationsprint(A) # element wise power operator A ** 2 np.sin(A) np.cos(A) np.tan(A) np.exp(A)Linear Algebra# Taking the matrix dot products between 2 matrices A = np.array([[1, 2]]) B = np.array([[5, 10, 0], [-9, 0, 3]]) print(A) print(B) A.shape B.shape R = A.dot(B) print(R) R.shapeImportant$dim A$ = ($n_1$, $m_1$)$dim B$ = ($n_2$, $m_2$)if $m_1 = n_2$$$(n_1, m_2)$$print(A.shape) print(B.shape) print(R.shape) # Determinant of a Square Matrix A = np.random.randint(-100, 100, size=(4, 4)) print(A) print(np.linalg.det(A)) I = np.identity(4, dtype=np.int) print(I) print(np.linalg.det(I)) # Find the Trace of a Square Matrix print(A) print(np.trace(A)) print(np.trace(I)) # calculating the inverse of a matrix np.linalg.inv(I) A_inv = np.linalg.inv(A) print(A_inv) K = np.arange(9).reshape(3, 3) K np.linalg.det(K) np.linalg.inv(K) print(A.dot(A_inv)) print(A_inv.dot(A)) J = A.dot(A_inv) print(J) mask = np.array(J > 10 ** -10, dtype=np.int) mask mask2 = np.array([ [1, 1, 1, 1], [0, 0, 0, 0], [0.5, 0.5, 0.5, 0.5], [0, 0, 0, 0] ]) mask2 A * mask2 mask * J # thresholding mask_3 = np.array(A > 10, dtype=np.int) mask_3 A * mask_3 # finding the transpose of a matrix B = np.arange(6).reshape(2, 3) print(B) print(B.shape) print(B.T) print(B.T.shape) J = np.array([[1, 2,3, 4, 5]]) J J.shape J.TStatisticsprint(A) # finding minimum in the matrix np.min(A) # finding minimum in every column vector np.min(A, axis=0) # finding the mimimum in every row vector np.min(A, axis=1) # finding maximum in entire matrix print(np.max(A)) # finding maximum in every column vector np.max(A, axis=0) # finding the maximum in every row vector np.max(A, axis=1) # sum of every element in the matrix np.sum(A) # sum of elements in every column vector np.sum(A, axis=0) # sum of elements in every row vector np.sum(A, axis=1) A.sum(axis=1)Boolean Operations on ArraysA = np.arange(35).reshape(5, 7) print(A) # equality check A == 4 # comparison operator A < 10 A >= 23 # converting a boolean array to integer array (A < 20).astype(np.int)Loading an HCA matrix into Bioconductor This vignette illustrates requesting an expression matrix from the HCA matrix service and loading it into a Bioconductor R object.First, install and import some dependencies:install.packages("downloader") install.packages("BiocManager") library("downloader") library("BiocManager") BiocManager::install("LoomExperiment") library(LoomExperiment) library("httr")The downloaded binary packages are in /var/folders/nl/dgln31tj7l35g879d6f_tjtc0000gn/T//RtmpDOn1ah/downloaded_packages The downloaded binary packages are in /var/folders/nl/dgln31tj7l35g879d6f_tjtc0000gn/T//RtmpDOn1ah/downloaded_packagesNow, we're going to make some requests to describe what fields and values we can filter on when we're selecting our matrix.r <- GET("https://matrix.data.humancellatlas.org/v1/filters") content(r)That's the list of metadata fields we can filter on when requesting the matrix. We can describe any of them with further API calls:r <- GET("https://matrix.data.humancellatlas.org/v1/filters/project.project_core.project_short_name") print(content(r)) r <- GET("https://matrix.data.humancellatlas.org/v1/filters/genes_detected") print(content(r))$field_description [1] "Count of genes with a non-zero count." $field_name [1] "genes_detected" $field_type [1] "numeric" $maximum [1] 13108 $minimum [1] 358For categorical data, we see the number of cells associated with each category. For numeric, we see the range of value. If we want to request a matrix based on these metadata values, we can add them to the filter in the body of a POST request to the matrix service:payload = list( filter = list( op = "and", value = list( list(op = "=", value = "Single cell transcriptome analysis of human pancreas", field = "project.project_core.project_short_name"), list(op = ">=", value = 300, field = "genes_detected") )), format = "loom" ) r <- POST("https://matrix.data.humancellatlas.org/v1/matrix", body = payload, encode = "json") response <- content(r) print(response)$eta [1] "" $matrix_url [1] "" $message [1] "Job started." $request_id [1] "ca657a20-5896-4583-80fc-16c4f02657da" $status [1] "In Progress"That call responds right away and tells us that the matrix is being prepared. We can use the request_id to wait until the matrix is done.request_id <- response["request_id"] status <- response["status"] message(status) while (status != "Complete") { url = paste("https://matrix.data.humancellatlas.org/v1/matrix/", request_id, sep = "") r <- GET(url) response <- content(r) status = response["status"] message(status) Sys.sleep(15) } print(response)In Progress In Progress In Progress In Progress In Progress In Progress In Progress In Progress In Progress In Progress In Progress CompleteNow, that the matrix is ready, we can download it. The file we download is a loom-formatted matrix. Loom is the default matrix format, but others can be specified in the matrix request (csv, mtx).matrix_file_url = unlist(response["matrix_url"]) download.file(url=matrix_file_url, destfile='matrix.loom', method='curl')HCA Matrix Service Loom OutputThe Loom format is documented more fully, along with code samples, [here](https://linnarssonlab.org/loompy/index.html).Per Loom [conventions](https://linnarssonlab.org/loompy/conventions/index.html), columns in the loom-formatted expression matrix represent cells, and rows represent genes. The column and row attributes follow Loom conventions where applicable as well: `CellID` uniquely identifies a cell, `Gene` is a gene name, and `Accession` is an ensembl gene id.Descriptions of the remaining metadata fields are available at the [HCA Data Browser](https://prod.data.humancellatlas.org/metadata).And finally, we can `import` the loom file into a `Bioconductor::SingleCellLoomExperiment` object for further analysis in R.scle <- import("./matrix.loom", type="SingleCellLoomExperiment") scleThe `SingleCellLoomExperiment` also adheres to Loom [conventions](https://linnarssonlab.org/loompy/conventions/index.html) representing features as rows and samples as columns. Expression data is available via the `assays()` method, specifying a named assay.assays(scle)$matrixRow and column attribute data are available through `rowData()` and `colData()` methods respectively.rowData(scle)Numpy laboratoryimport numpy as np import matplotlib.pyplot as plt %matplotlib inline1. Numpy basic exercisesUse vectorization and avoid for loops in all exercises. Implement each exercise as a single function and write test cases for the function. 1.1 Implement standardization for 2D arrays.\begin{equation*}X_{std} = \frac{X - \mu}{\sigma},\end{equation*}where $\mu$ is the mean and $\rho$ is the standard deviation of the array elements.def standardize(X): return (X - X.mean(axis=0)) / X.std(axis=0) x_std = standardize(np.array([[1, 2], [2, 3], [1, 2]])) x_std1.2 Implement normalization for 2D arrays.\begin{equation*}X_{norm} = \frac{X - X_{min}}{X_{max} - X_{min}}\end{equation*}def normalize(X): xmin = X.min(axis=0) xmax = X.max(axis=0) return (X - xmin) / (xmax - xmin) X = np.arange(6).reshape(2, 3) X[1, 2] = -5 print(X) normalize(X)[[ 0 1 2] [ 3 4 -5]]1.3 Implement the softmax function.$$x_i \mapsto \frac{\exp(x_i)}{\sum_{j=1}^n \exp(x_j)}$$def softmax(X): # compute softmax along the last dimension expX = np.exp(X) return expX / expX.sum(axis=-1)[:, None] X = np.arange(12).reshape(3, -1) softmax(X)2. VectorizationRewrite the following examples into vectorized solutions (no for loops and list comprehensions). 2.1 Row-wise Euclidean normWrite a function which has one parameter, a 2D array and it returns a vector of row-wise Euclidean norms of the input. Use `numpy` operations and vectorization, avoid `for` loops. The solution below is a _bad_ solution.def rowwise_norm(X): def my_dot(x, y): result = 0.0 for i in range(len(x)): result += x[i] * y[i] return result return np.array([np.sqrt(my_dot(x, x)) for x in X]) X = np.arange(5)[:, None]*np.ones((5, 3)); print(X) print(rowwise_norm(X)) print(rowwise_norm([[1], [-1], [1], [-1]]))[[0. 0. 0.] [1. 1. 1.] [2. 2. 2.] [3. 3. 3.] [4. 4. 4.]] [0. 1.73205081 3.46410162 5.19615242 6.92820323] [1. 1. 1. 1.]Solutiondef rowwise_norm(X): X = np.array(X) return np.sqrt((X * X).sum(axis=1)) X = np.arange(5)[:, None] * np.ones((5, 3)); print(X) print(rowwise_norm(X)) print(rowwise_norm([[1], [-1], [1], [-1]]))[[0. 0. 0.] [1. 1. 1.] [2. 2. 2.] [3. 3. 3.] [4. 4. 4.]] [0. 1.73205081 3.46410162 5.19615242 6.92820323] [1. 1. 1. 1.]2.2 ChessboardWrite a function which has one parameter, a positive integer $n$, and returns an $n\times n$ array of $\pm1$ values like a chessboard: $M_{i,j} = (-1)^{i+j}$.def chessboard(n): return np.array([[(-1)**(i + j) for j in range(n)] for i in range(n)]) chessboard(5)Solutiondef chessboard(n): return (-1) ** np.arange(n * n).reshape(n, n) chessboard(3)3. Broadcast quizDo the following operations work and if so, what is the shape of the resulting array?* Try to figure it out before evaluating the cell.np.ones(3) + np.ones((3,3)) np.ones(3) + np.ones((4, 3)) # np.ones(3) + np.ones((3, 4)) # fails np.ones(3)[:, None] + np.ones((3, 4)) np.ones((1, 2, 3)) + np.ones((1, 3))[:, None, :] np.ones((1, 2, 3)) + np.ones((1, 3))Can you broadcast these arrays such that they can be added? What will be the shape of the result?# np.ones((4,20,3)) + np.ones((5, 3)) # fails4. Numpy advanced exercises 4.1 BlockmatrixWrite a function named __`blockmatrix`__ that produces the following (block) matrix:$$\left(\begin{array}{ccc|ccc} 1 & & 0& 0 & \cdots & 0 \\ & \ddots & & \vdots & & \vdots \\ 0& & 1 & 0 & \cdots & 0 \\\hline 0 & \cdots & 0 & 1 & \cdots & 1 \\ \vdots & & \vdots & \vdots & & \vdots \\ 0 & \cdots & 0 & 1 & \cdots & 1\end{array}\right)$$The function should have 2 positive integer parameters, the size of the first square block and the size of the last square block. The other two blocks should have the appropriate size (may be rectangle).The first block is an indentity matrix, the last is a constant $1$ matrix.Return the resulted matrix.Use matrix initializers: `ones`, `zeros`, `eye` and concatenation.def blockmatrix(n, m): return np.concatenate( ( np.concatenate((np.eye(n), np.zeros((n, m))), axis=1), # upper block np.concatenate((np.zeros((m, n)), np.eye(m)), axis=1), # lower block ), axis=0 ) blockmatrix(2, 5)4.2 Blockmatrix from arbitrary square matricesWrite a blockmatrix function that takes any number of square matrices and returns a blockmatrix with these matrices in the diagonal.def any_blockmatrix(*matrices): sum_width = sum(m.shape[1] for m in matrices) before = 0 X = np.concatenate((matrices[0], np.zeros((matrices[0].shape[0], sum_width-matrices[0].shape[1]))), axis=1) before += matrices[0].shape[1] for m in matrices[1:]: after = sum_width - before - m.shape[1] height = m.shape[0] X = np.concatenate(( X, np.concatenate((np.zeros((height, before)), m, np.zeros((height, after))), axis=1) )) before += m.shape[1] return X A = np.arange(9).reshape(3, 3) B = np.diag((2, 3)) any_blockmatrix(A, B, A, B)4.3 DerivativeWrite a function which numerically derivates a $\mathbb{R}\mapsto\mathbb{R}$ function. Use the forward finite difference.The input is a 1D array of function values, and optionally a 1D vector of abscissa values. If not provided then the abscissa values are unit steps.The result is a 1D array with the length of one less than the input array.Use `numpy` operations instead of `for` loop in contrast to the solution below.def derivate(f, x=None): if x is None: x = np.arange(len(f)) return np.array([(f[i+1] - f[i]) / (x[i+1] - x[i]) for i in range(len(x) - 1)]) derivate(np.arange(10)**2) x = np.arange(10) plt.plot(x, x**2) plt.plot(x[:-1], derivate(x**2, x))Solutiondef derivate(f, x=None): if x is None: x = np.arange(len(f)) return np.diff(f) / np.diff(x) x = np.arange(10) plt.plot(x, x**2) plt.plot(x[:-1], derivate(x**2, x))4.4 Birthday problemIn probability theory, the birthday problem or birthday paradox concerns the probability that, in a set of n randomly chosen people, some pair of them will have the same birthday. By the pigeonhole principle, the probability reaches 100% when the number of people reaches 367 (since there are only 366 possible birthdays, including February 29). However, 99.9% probability is reached with just 70 people, and 50% probability with 23 people. These conclusions are based on the assumption that each day of the year (excluding February 29) is equally probable for a birthday. -- [Wikipedia](https://en.wikipedia.org/wiki/Birthday_problem)Write a function that simulates this problem for variable $n$. Your function should take $n$ and an experiment count as its parameter and sample experiment count times and return the ratio of "birthday collisions" (how many times there were at least two birthdays on the same day).Run it for different $n$ values with at least 1000 experiment count and plot the results. You can add a grid and check for the 50% probability with: fig, ax = plt.subplots() ax.plot(x) ax.grid()def simulate_birthday_problem(n, iter_no=1000): """Simulate the birthday problem for a group of N people Generates N birthdays with uniform probability and returns the proportion of iterations with birthday collisions. """ collision = 0 for _ in range(iter_no): birthdays = np.random.randint(1, 366, size=n) if np.unique(birthdays).shape[0] < n: # collision collision += 1 return collision / iter_no max_n = 50 fig, ax = plt.subplots(figsize=(16, 8)) ax.set_yticks(np.arange(0, 1.0, .1)) ax.grid(True) # number of simulations to run iter_nums = [20, 100, 1000] for iter_no in iter_nums: x = [] for n in range(2, max_n): x.append(simulate_birthday_problem(n, iter_no=iter_no)) ax.plot(x) ax.legend(iter_nums) # notice that the plot becomes less noisy as we run more simulations4.5 Horner's methodImplement the [Horner's method](https://en.wikipedia.org/wiki/Horner%27s_methodDescription_of_the_algorithm) for evaluating polynomials. The first input is a 1D array of numbers, the coefficients, from the constant coefficient to the highest order coefficent. The second input is the variable $x$ to subsitute. The function should work for all type of variables: numbers, arrays; the output should be the same type array as the input, containing the elementwise polynomial values.def horner(C, x): y = np.zeros_like(x) for c in C[::-1]: y = y*x + c return y C = [2, 0, 1] # 2 + 0*x + 1*x^2 print(horner(C, 3)) # 2 + 3^2 print(horner(C, [3, 3]))11 [11 11]Assignment 6: NeuroevolutionIn our last assignment, we explored the week's major idea (representation and genetic encoding) in a more realistic application in the Traveling Salesman Problem. This week we'll follow that trend and explore the ideas around mutation rates and measuring diversity in the setting of evoliving artificial neural networks (Neuroevolution). While neuroevolution really shines outside of standard machine learning benchmarks, for the sake of simplicity, we'll use one of the most basic benchmarks for neural networks, classification of handwritten digits in MNIST.# imports import numpy as np import copy import matplotlib.pyplot as plt plt.style.use('seaborn') import scikits.bootstrap as bootstrap import warnings warnings.filterwarnings('ignore') # Danger, ! (not a scalable hack, and may surpress other helpful warning other than for ill-conditioned bootstrapped CI distributions) import scipy.stats # for finding statistical significance import timeI've taken the liberty to preprocess MNIST for you by deskewing (standard preprocessing step to straigthen tilted images) and downscaling the images from `28x28` to `14x14` to try and keep out genome size down (at the cost of losing some resolution/information in the images), split out the labels (turning them into one-hot encodings), and separating the train and test sets. If you aren't familiar with machine learning practices like this, don't worry about it -- just load the datasets below.*Note:* This dataset contains 60,000 training examples, and 10,000 testing examples. This is likely far overkill for what we need, so if your machine is struggling with the size of the dataset, feel free to use only a small portion of the training examples/labels provided (doing so didn't effect runtime much on my laptop, but your mileage may vary)train_x = np.loadtxt("train_x.csv", delimiter=',') test_x = np.loadtxt("test_x.csv", delimiter=',') train_y = np.loadtxt("train_y.csv", delimiter=',') test_y = np.loadtxt("test_y.csv", delimiter=',')Let's take a look at the images!# This is what the image looks like num_images = 6 fig, axs = plt.subplots(1, num_images, figsize=(3*num_images, 3), sharey=True) for i in range(num_images): axs[i].imshow(train_x[i].reshape(14,14)) # we will keep the images flat to easily feed them into the neural network, but we need them back in a square shape to visualize axs[i].grid(False) axs[i].axis('off') axs[i].set_title("Label:"+str(np.argmax(train_y[i]))) # the argmax takes out one-hot encoding and turns it into a readable labelQ1: ImplementationOur individual solutions this week will be (again keeping things overly simplistic) single-layer neural networks. These networks are defined by a single weight matrix with input dimenion of the size of the flattened image (`14*14=196`) and output dimension of the size of the number of possible classes (`10`). Feel free to implement the genome as the weight matrix, or simply as a flattened float vector of size `1960`.class Individual: def __init__(self, fitness_function, genome_length): self.fitness_function = fitness_function self.genome = np.random.uniform(0,1,genome_length) def eval_fitness(self): self.fitness = self.fitness_function(self.genome)There are two main ways to measure the performance of a neural network, loss and accuracy. For the sake of intuition, let's use accuracy here, but I'm providing the implementaition of loss just in case you want to play around with it as well (thought returning the negative of the loss, as the smaller magnitudes are better so this allows us to continue going "uphill" if we do ever choose to optimize for loss). As we haven't covered neural networks, I'm also providing the implementation of a single layer neural network (desite its apparent simplicity compared to mult-layer networks) in the fitness function below.def accuracy(output, y): return np.sum(np.isclose(np.argmax(output,axis=1),np.argmax(y,axis=1)))/y.shape[0] def loss (output, y): return -np.sum(np.square(output-y))/y.shape[0] def neural_network_fitness(weights,x=train_x,y=train_y): weight_matrix = weights.reshape((14*14,10)) output = x.dot(weight_matrix) return accuracy(output,y)Q1b: Real-valued mutationIn class, we've only alluded indrectly to mutating vectors of floats as genomes (like neural network weights). Let's play around with the implmentations of these. For simplicity, we'll ignore crossover for now. Rather than flipping a given number of bits, let's try adding a small random value to each gene's value by adding `(np.random.rand(genome_length)*2-1)*mutation_size` to the genome. This takes a uniform distribution, normalizes it to be between -1 and 1, then scales it by some `mutation_size` scaling factor that you can pass into your `evolutionary_algorithm` function. Q1c: Diversity TrackingIn addition to keeping track of the best genome, and fitness at each generation, let's also record the diversity of the population at each generation. The metric we talked about most in class was measuring genotypic diversity with the average standard deviation of the distribution across the population of the values for each gene.def evolutionary_algorithm(fitness_function=None, total_generations=100, num_parents=10, num_children=10, genome_length=10, num_elements_to_mutate=1, mutation_size=0.1, crossover=True, tournament_size=4, num_tournament_winners=2): """ Evolutinary Algorithm (copied from the basic hillclimber in our last assignment) parameters: fitness_funciton: (callable function) that return the fitness of a genome given the genome as an input parameter (e.g. as defined in Landscape) total_generations: (int) number of total iterations for stopping condition num_parents: (int) the number of parents we downselect to at each generation (mu) num_childre: (int) the number of children (note: parents not included in this count) that we baloon to each generation (lambda) genome_length: (int) length of the genome to be evoloved num_elements_to_mutate: (int) number of alleles to modify during mutation (0 = no mutation) mutation_size: (float) scaling parameter of the magnitidue of mutations for floating point vectors crossover: (bool) whether to perform crossover when generating children tournament_size: (int) number of individuals competing in each tournament num_tournament_winners: (int) number of individuals selected as future parents from each tournament (must be less than tournament_size) returns: fitness_over_time: (numpy array) track record of the top fitness value at each generation solutions_over_time: (numpy array) track record of the top genome value at each generation diversity_over_time: (numpy array) track record of the population genetic diversity at each generation """ # initialize record keeping solution = None # best genome so far solution_fitness = -99999 # fitness of best genome so far solution_generation = 0 # time (generations) when solution was found fitness_over_time = np.zeros(total_generations) solutions_over_time = np.zeros((total_generations,genome_length),dtype=int) diversity_over_time = np.zeros(total_generations) # the initialization proceedure population = [] # keep population of individuals in a list for i in range(num_parents): # only create parents for initialization (the mu in mu+lambda) population.append(Individual(fitness_function,genome_length)) # generate new random individuals as parents # get population fitness for i in range(len(population)): population[i].eval_fitness() # evaluate the fitness of each parent for generation_num in range(total_generations): # repeat # print(generation_num) # the modification procedure new_children = [] # keep children separate for now (lambda in mu+lambda) while len(new_children) < num_children: # inheretance [parent1, parent2] = np.random.choice(population, size=2) # pick 2 random parents child1 = copy.deepcopy(parent1) # initialize children as perfect copies of their parents child2 = copy.deepcopy(parent2) # # crossover if crossover: for child, this_parent, other_parent in [[child1, parent1, parent2],[child2, parent2, parent1]]: child.genome = -1*np.ones(len(child.genome)) child.genome[0] = this_parent.genome[0] next_index = np.where(other_parent.genome == this_parent.genome[0]) while next_index != 0: child.genome[next_index] = this_parent.genome[next_index] next_index = np.where(other_parent.genome == child.genome[next_index])[0] child.genome[np.where(child.genome == -1)] = other_parent.genome[np.where(child.genome == -1)] child.genome = child.genome.astype(int) # mutation for this_child in [child1,child2]: for _ in range(num_elements_to_mutate): # [index_to_swap1, index_to_swap2] = np.random.randint(0,genome_length,2) # while index_to_swap1 == index_to_swap2: [index_to_swap1, index_to_swap2] = np.random.randint(0,genome_length,2) # orig_gene_1 = this_child.genome[index_to_swap1] # this_child.genome = np.delete(this_child.genome,index_to_swap1) # this_child.genome = np.insert(this_child.genome,index_to_swap2,orig_gene_1) this_child.genome = this_child.genome + (np.random.rand(genome_length)*2-1)*mutation_size new_children.extend((child1,child2)) # add children to the new_children list # the assessement procedure for i in range(len(new_children)): new_children[i].eval_fitness() # assign fitness to each child # selection procedure population += new_children # combine parents with new children (the + in mu+lambda) population = sorted(population, key=lambda individual: individual.fitness, reverse = True) # sort the full population by each individual's fitness (from highers to lowest) # tournament selection new_population = [] new_population.append(population[0]) while len(new_population) < num_parents: tournament = np.random.choice(population, size = tournament_size) tournament = sorted(tournament, key=lambda individual: individual.fitness, reverse = True) new_population.extend(tournament[:num_tournament_winners]) population = new_population # record keeping if population[0].fitness > solution_fitness: # if the new parent is the best found so far solution = population[0].genome # update best solution records solution_fitness = population[0].fitness solution_generation = generation_num fitness_over_time[generation_num] = solution_fitness # record the fitness of the current best over evolutionary time solutions_over_time[generation_num,:] = solution all_gene_std = [] for x in range(genome_length): this_gene_values=[] for y in range(len(population)): this_gene_values.append(population[y].genome[x]) all_gene_std.append(np.std(this_gene_values)) diversity_over_time[generation_num] = np.mean(all_gene_std) return fitness_over_time, solutions_over_time, diversity_over_timeQ2: ExperimentationDue to the high dimensionality of this problem, the runs are a bit slower than before, so let's keep the scale small on this with just `50` generations and `5` repitions. Hopefully this keeps things managable from a runtime persepctive (runs in a little over 30 seconds for each repition, or a little under 3 minutes for all 5, on my machine). Let's use a mutation size of `1.0`, the same `50` parents and `50` children settings from last week, and a tournament size of `20`, choosing `10` winners. *Hint:* If this still takes to long to run on your machine (especially while debugging/exploring code), feel free to run smaller test runs first by reducing the number of generations for the runs, plotting without bootstrapping, etc.experiment_results = {} solutions_results = {} diversity_results = {} num_runs = 5 total_generations = 50 genome_length = 14*14*10 num_elements_to_mutate = genome_length mutation_size = 1.0 num_parents = 50 num_children = 50 tournament_size = 20 num_tournament_winners = 10 fitness_function = neural_network_fitness crossover = False for run_name in ["mutation_only"]: experiment_results[run_name] = np.zeros((num_runs, total_generations)) solutions_results[run_name] = np.zeros((num_runs, total_generations, genome_length)) diversity_results[run_name] = np.zeros((num_runs, total_generations)) for run_num in range(num_runs): start_time = time.time() fitness_over_time, solutions_over_time, diversity_over_time = evolutionary_algorithm(fitness_function=fitness_function, total_generations=total_generations, num_parents=num_parents, num_children=num_children, genome_length=genome_length, num_elements_to_mutate=num_elements_to_mutate, crossover=crossover, tournament_size=tournament_size, num_tournament_winners=num_tournament_winners) experiment_results[run_name][run_num] = fitness_over_time solutions_results[run_name][run_num] = solutions_over_time diversity_results[run_name][run_num] = diversity_over_time print(run_name, run_num, time.time()-start_time,fitness_over_time[-1])mutation_only 0 105.99584913253784 0.5202166666666667 mutation_only 1 106.23305344581604 0.47358333333333336 mutation_only 2 105.68558168411255 0.4451333333333333 mutation_only 3 104.95845532417297 0.44398333333333334 mutation_only 4 105.58599615097046 0.46776666666666666Q2b: VisualizationLike last time, please plot the bootstrapped fitness values over time.def plot_mean_and_bootstrapped_ci_over_time(input_data = None, name = "change me", x_label = "change me", y_label="change me", y_limit = None, plot_bootstrap = True): """ parameters: input_data: (numpy array of shape (max_k, num_repitions)) solution metric to plot name: (string) name for legend x_label: (string) x axis label y_label: (string) y axis label returns: None """ print(input_data.shape) generations = input_data.shape[0] CIs = [] mean_values = [] for i in range(generations): mean_values.append(np.mean(input_data[i])) CIs.append(bootstrap.ci(input_data[i], statfunction=np.mean)) mean_values=np.array(mean_values) print(CIs) high = [] low = [] for i in range(len(CIs)): low.append(CIs[i][0]) high.append(CIs[i][1]) low = np.array(low) high = np.array(high) fig, ax = plt.subplots() y = range(0, generations) ax.plot(y, mean_values, label=name) ax.fill_between(y, high, low, color='b', alpha=.2) ax.set_xlabel(x_label) ax.set_ylabel(y_label) ax.legend() if (name) and len(name)>0: ax.set_title(name) # plot fitness over time plot_mean_and_bootstrapped_ci_over_time(input_data=np.transpose(np.array(experiment_results['mutation_only'])), name='mutation_only', y_label="Fitness", x_label='Generations')(50, 5) [array([0.20702667, 0.23659333]), array([0.22110333, 0.24901333]), array([0.22425333, 0.25309667]), array([0.23076667, 0.25441667]), array([0.23747 , 0.25450333]), array([0.24807, 0.27686]), array([0.25638, 0.29037]), array([0.26177, 0.31738]), array([0.27003667, 0.3186 ]), array([0.26952333, 0.32225667]), array([0.28655667, 0.33385667]), array([0.28937667, 0.33417667]), array([0.28890667, 0.33417667]), array([0.29157333, 0.35129333]), array([0.29364333, 0.35972667]), array([0.29423 , 0.36148667]), array([0.30726667, 0.37057667]), array([0.30726667, 0.37057667]), array([0.31211667, 0.37300333]), array([0.31042333, 0.38298333]), array([0.32543667, 0.39209 ]), array([0.33347333, 0.40271333]), array([0.35085333, 0.40417333]), array([0.35179333, 0.40588333]), array([0.35218333, 0.40778667]), array([0.35375333, 0.41314333]), array([0.35638 , 0.41749333]), array([0.36030667, 0.42815333]), array([0.37832333, 0.42946333]), array([0.38381333, 0.43859 ]), array([0.38381333, [...]Q3: Visualizing DiversityPlease also plot the diveristy of our population over evolutionary time.# plot diversity over time plot_mean_and_bootstrapped_ci_over_time(input_data=np.transpose(np.array(diversity_results['mutation_only'])), name='mutation_only', y_label="Fitness", x_label='Generations')(50, 5) [array([1.63889107, 1.80565008]), array([2.10462457, 2.38680837]), array([2.35397269, 2.79787932]), array([2.72355195, 3.15845761]), array([2.80790824, 3.25355958]), array([3.11277682, 3.50803828]), array([3.31854842, 3.74404904]), array([3.48889336, 4.00433845]), array([3.5345177 , 4.14225393]), array([3.66442273, 4.37107582]), array([3.76515597, 4.32150118]), array([3.70483771, 4.29565305]), array([3.58243146, 4.2119078 ]), array([3.50795931, 4.14844868]), array([3.70375615, 4.17007702]), array([3.69805414, 3.98740259]), array([3.69169745, 4.09174402]), array([3.53641527, 4.21167258]), array([3.58689107, 4.42603252]), array([3.72710066, 4.40495652]), array([3.73198775, 4.28176557]), array([3.73955574, 4.19489802]), array([3.48770185, 3.96529226]), array([3.28492374, 3.81528462]), array([2.90132356, 3.84347349]), array([2.99439515, 4.03757992]), array([3.29311493, 4.08548733]), array([2.81596995, 4.21512307]), array([2.91691355, 4.23345761]), array([3.05616068, 4.34644256]), a[...]Q3b: AnalysisWhat do you notice about the diveristy over time? Is this what you expected to tradeoff exploration and exploitation -- and how it related to fitness? **insert text here** Q4: Generalization to Test DatasetsWhenever doing classification, it's good to make sure that your algorithm isn't overfitting to the training data. Based on your intuition about diversity and overfitting, what do you expect this relationship to look like? **insert text here** Q5: Evaluating Test AccuracySince we already have test data loaded in above, let's evaluate your already trained algorithms (using your saved best-solution-so-far genomes at each generation) to see how test fitness tracks with the training fitness. Please implement a script which calcualtes the test accuracy of the solutions over time below.*Hin:* Look for where the training set is used during fitness evaluation during training for ideas of what functions/syntax to usetest_accuracy_results = {} def calc_test_accuracy_over_time(name = None): test_accuracy_results = np.zeros((num_runs, total_generations)) # loop through each run for run_num in range(num_runs): # loop through each solution over time for generation in range(total_generations): # calculate the fitness on the test set for this solution and add this to test_accuracy_results test_accuracy_results[run_num][generation] = neural_network_fitness(weights=solutions_results['mutation_only'][run_num][generation],x=test_x, y=test_y) return test_accuracy_results test_accuracy_results['mutation_only'] = calc_test_accuracy_over_time() # print(test_accuracy_results['mutation_only'])[[0.2164 0.2269 0.2016 0.2115 0.2551 0.2551 0.299 0.3227 0.3227 0.3283 0.3451 0.3309 0.3309 0.3751 0.3751 0.3649 0.3884 0.3884 0.3909 0.4025 0.422 0.4179 0.4179 0.4179 0.4179 0.4396 0.4396 0.455 0.455 0.4733 0.4733 0.4733 0.4733 0.4733 0.4733 0.4733 0.4746 0.4746 0.4746 0.4746 0.4959 0.4959 0.4959 0.4959 0.497 0.5032 0.515 0.515 0.515 0.5289] [0.098 0.098 0.098 0.098 0.098 0.2629 0.2684 0.3402 0.3527 0.3527 0.319 0.319 0.319 0.3686 0.377 0.377 0.377 0.377 0.3761 0.3761 0.3875 0.4114 0.4114 0.4114 0.4114 0.4114 0.4219 0.4291 0.4291 0.4291 0.4291 0.4351 0.4351 0.4423 0.46 0.46 0.46 0.46 0.46 0.46 0.4692 0.4718 0.4718 0.4718 0.4718 0.4718 0.4666 0.4666 0.4543 0.4728] [0.2062 0.2373 0.2373 0.2373 0.2569 0.3179 0.3179 0.3179 0.3179 0.2889 0.2889 0.3002 0.3002 0.3188 0.3188 0.3186 0.3186 0.3201 0.3345 0.3648 0.3648 0.3553 0.3553 0.3553 0.3858 0.3891 0.3891 0.3891 0.3699 0.4055 0.4055 0.4055 0.4055 0.4085 0.4085 0.4085 0.4085 0.4108 0.4099[...]Q5b: Running and VisualizationRun and plot the test accuracy over time of the runs you performed above (to reduce clutter, feel free to just do this for the tournaments of size `20`).plot_mean_and_bootstrapped_ci_over_time(input_data=np.transpose(np.array(test_accuracy_results['mutation_only'])), name='mutation_only', y_label="Fitness", x_label='Generations')(50, 5) [array([0.11964, 0.22448]), array([0.12378, 0.25358]), array([0.11872, 0.24346]), array([0.13124, 0.25882]), array([0.13124, 0.2706 ]), array([0.26144, 0.3069 ]), array([0.26916, 0.30422]), array([0.26924, 0.32778]), array([0.26386, 0.33278]), array([0.26514, 0.33018]), array([0.28604, 0.33136]), array([0.28734, 0.32376]), array([0.28734, 0.32594]), array([0.29324, 0.36124]), array([0.29412, 0.36498]), array([0.29408, 0.36048]), array([0.31376, 0.37794]), array([0.31514, 0.37794]), array([0.31802, 0.3779 ]), array([0.31362, 0.38968]), array([0.33052, 0.40334]), array([0.32796, 0.40518]), array([0.35006, 0.40652]), array([0.35006, 0.40652]), array([0.34832, 0.40894]), array([0.3561 , 0.41896]), array([0.3664 , 0.42242]), array([0.3664 , 0.43186]), array([0.37344, 0.43468]), array([0.38358, 0.45084]), array([0.38358, 0.45084]), array([0.39038, 0.4521 ]), array([0.39038, 0.4521 ]), array([0.39612, 0.45414]), array([0.39868, 0.4589 ]), array([0.40216, 0.45768]), array([0.40406, 0.4[...]Q5c: AnalysisWhat did you find for a relationship between genetic diversity and overfitting to the training set? Was this what you expected? **insert text here** Q6: Modifying Muation RatesNext well modify the mutation rate for our algorithm. Based on the results you see above, and how you expect mutation rate to modify the genetic diveristy of a population, how might you think that increasing or decreasing the mutation rate might effect the different tournament size runs above? **insert text here** Q7: ExperimentationLet's find out! Let's do a mini grid search on the `mutation_size` and `num_tournament_winners`. To keep the number of runs down, let's just look at the exteme values of `num_tournament_winners` we had above (`1` and `10`), and run these for a `mutation_size` of `0.5` and `2.0` (in addition to the value of `1.0` we had before). *Hint:* This is a good time to double check that your `mutation_size` parameter you implemented above is working correctly (i.e. your results for how it should effect diversity below make sense)*Note:* This may take some time to run (if each condition is a couple minutes). Please try debugging code with smaller runs and make sure that if there are errors in along the way, what you've run already is saved and logged (so you don't have to rerun all 10 or 15 mins if you find a bug at the end of your script). And just use this time to go grab a coffee (or do some reading in your lovely evolutionary computation textbooks)!num_runs = 5 total_generations = 50 genome_length = 14*14*10 num_elements_to_mutate = genome_length mutation_size = ... num_parents = 50 num_children = 50 tournament_size = ... num_tournament_winners = ... ...Q8: VisualizePlease plot the results of these experiments (both fitness over time, and diveristy)...Q8b: AnalysisWhat patterns do you see? Did you expect this given the implications of each independently? Does the level of diversity match your intuition about how well search should perform? Does this tell you anything about the role/interaction of variation (e.g. mutation rate) and selection (e.g. tournament parameters)? **insert text here** Q9: Dynamic Mutation RateWe talked in class about many way to have dynamic or adaptive mutation rates. Let's experiment with the simplest form of this, a mutation rate that changes linearly over generational time, from some provided starting value to some provided ending value. Please modify your evolutionary algorithm code below to enable this.def evolutionary_algorithm(fitness_function=None, total_generations=100, num_parents=10, num_children=10, genome_length=10, num_elements_to_mutate=1, mutation_size_start=1.0, mutation_size_end = 0.1, crossover=True, tournament_size=4, num_tournament_winners=2): """ Evolutinary Algorithm (copied from the basic hillclimber in our last assignment) parameters: fitness_funciton: (callable function) that return the fitness of a genome given the genome as an input parameter (e.g. as defined in Landscape) total_generations: (int) number of total iterations for stopping condition num_parents: (int) the number of parents we downselect to at each generation (mu) num_childre: (int) the number of children (note: parents not included in this count) that we baloon to each generation (lambda) genome_length: (int) length of the genome to be evoloved num_elements_to_mutate: (int) number of alleles to modify during mutation (0 = no mutation) mutation_size_start: (float) scaling parameter of the magnitidue of mutations for floating point vectors at the beginning of search mutation_size_end: (float) scaling parameter of the magnitidue of mutations for floating point vectors at the end of search (note: if same as mutation_size_start, mutation rate is static, otherwise mutation rate is linearly interpolated between the two) crossover: (bool) whether to perform crossover when generating children tournament_size: (int) number of individuals competing in each tournament num_tournament_winners: (int) number of individuals selected as future parents from each tournament (must be less than tournament_size) returns: fitness_over_time: (numpy array) track record of the top fitness value at each generation solutions_over_time: (numpy array) track record of the top genome value at each generation diversity_over_time: (numpy array) track record of the population genetic diversity at each generation """ ... return fitness_over_time, solutions_over_time, diversity_over_timeQ9b: ExperimentationPlease peform a set of runs which decrease the mutation rate from `1.0` to `0.1` linearly over the 50 generations of search for a tournament of size `20` with `1` winner selected.num_runs = 5 total_generations = 50 genome_length = 14*14*10 proportion_elements_to_mutate = 1.0 mutation_size_start = 1.0 mutation_size_end = 0.1 num_parents = 50 num_children = 50 tournament_size = 20 num_tournament_winners = 1 ...Q10: VisualizePlease plot (fitness and diversity of) the dynamic mutation rate against fixed mutation rates of `1.0` and `0.5` for the same tournament parameters....Personnel Scheduling Library> This library provides data structures and code that are useful for personnel scheduling We will have more documentation later. Install To install on your computer, the easiest way may be to use a distribution such as Anaconda or Miniconda. I would recommend creating an own conda environment (see https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html) for this project, you can set up the environment by calling `conda env create -f environment.yml` from the root directory of the clone of the git repository. After having done this, you can start Jupyter Lab from your original `base` environment, and choose the `personnel_scheduling` kernel that has all needed packages installed. General Project StructureThe project uses [nbdev](https://nbdev.fast.ai/) an environment where we can develop in notebooks, and code can be automatically exported in python files. This way, we can use multiple notebooks. (nbdev has a lot more functionality, but this is most important for us here.). Please find the documentation for nbdev at https://nbdev.fast.ai/Currently, two basic notebooks (00_datastructures and 00_reporting) and two notebooks for shift scheduling problem instances (The Demassey instances, and the Dahmen instances), providing implementations of the rulesets and functionality for reading instances. Workflow for development and experimentationWhen you have changed something in one of the basic notebooks, make sure that you save the notebook and execute the function `notebook2script()` (which you already find in the last cells of each notebook and at the beginning of the experimentation notebook).After that, to make sure that everything is clean, it is recommended to shutdown and restart the current kernel. Workflow for collaboration / gitThe repository is hosted on git, and you should regularly commit your changes to a git and push them to github. Please only develop in your own branches, you can later create pull requests that are merged in the master branch. See https://ericmjl.github.io/essays-on-data-science/workflow/gitflow/ for a good description of the workflow I intend to use. Before committing Before commiting, please do the following steps:- save and close all notebooks- make sure that all notebooks work- call the follown cell which: - syncs the notebooks with the python-files - cleans the notebooks and removes all outputs.- after running this cell, please just close this index-notebook without saving.#hide from nbdev.clean import * from nbdev.export import * notebook2script() nbdev_clean_nbs()Marketa Analytics - E-Commerce data ContentThis is a transactional data set which contains all the transactions occurring between 01/12/2010 and 09/12/2011 for a UK-based and registered non-store online retail.The company mainly sells unique all-occasion gifts. Many customers of the company are wholesalers. This dataframe contains 8 variables that correspond to:InvoiceNo: Invoice number. Nominal, a 6-digit integral number uniquely assigned to each transaction. If this code starts with letter 'c', it indicates a cancellation.StockCode: Product (item) code. Nominal, a 5-digit integral number uniquely assigned to each distinct product.Description: Product (item) name. Nominal.Quantity: The quantities of each product (item) per transaction. Numeric.InvoiceDate: Invice Date and time. Numeric, the day and time when each transaction was generated.UnitPrice: Unit price. Numeric, Product price per unit in sterling.CustomerID: Customer number. Nominal, a 5-digit integral number uniquely assigned to each customer.Country: Country name. Nominal, the name of the country where each customer resides. Importing and Data Cleaning#importing all important package.. import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings("ignore") #load data into pandas dataframe.. df = pd.read_csv('C:/Users/harini/Desktop/NEUCourses/ADM/Assignment 1/data.csv',encoding = "ISO-8859-1") df.head() #information of dataset.. df.info() df.Country.value_counts().head(5) df = df[df.Country == 'United Kingdom'] df.Quantity.describe() #Quantity can not be negative so remove negative values.. df = df[df['Quantity']>0] df.Quantity.describe() df = df[df['UnitPrice']>0] df.UnitPrice.describe() #checking null values in all columns in dataset null_values = pd.DataFrame(df.isnull().sum(),columns=['count_value']) ax = sns.barplot(null_values.count_value,null_values.index)Removing Missing Valuedf.dropna(subset=['CustomerID'],how='all',inplace=True) df.isnull().sum() #last date available in our dataset df['InvoiceDate'].max() #use latest date in our data as current date.. import datetime as dt now = dt.date(2011,12,9) df['date'] = pd.DatetimeIndex(df.InvoiceDate).date df['month'] = pd.DatetimeIndex(df.InvoiceDate).month_name(locale = 'English') df['year'] = pd.DatetimeIndex(df.InvoiceDate).year df.head()RFM Analysis#group by customer by last date they purchased... recency_df = df.groupby(['CustomerID'],as_index=False)['date'].max() recency_df.columns = ['CustomerID','LastPurchaseDate'] recency_df.head() #calculate how often he is purchasing with reference to latest date in days.. recency_df['Recency'] = recency_df.LastPurchaseDate.apply(lambda x : (now - x).days) recency_df.drop(columns=['LastPurchaseDate'],inplace=True) #check frequency of customer means how many transaction has been done.. frequency_df = df.copy() frequency_df.drop_duplicates(subset=['CustomerID','InvoiceNo'], keep="first", inplace=True) frequency_df = frequency_df.groupby('CustomerID',as_index=False)['InvoiceNo'].count() frequency_df.columns = ['CustomerID','Frequency'] frequency_df.head() #calculate how much a customer spend in the each transaction... df['Total_cost'] = df['UnitPrice'] * df['Quantity'] df.columns #check summed up spend of a customer with respect to latest date.. monetary_df=df.groupby(['CustomerID'],as_index=False)['Total_cost'].sum() monetary_df.columns = ['CustomerID','Monetary'] monetary_df.head() #Combine all together all dataframe in so we have recency, frequency and monetary values together.. #combine first recency and frequency.. rf = recency_df.merge(frequency_df,left_on='CustomerID',right_on='CustomerID') #combibe rf frame with monetary values.. rfm = rf.merge(monetary_df,left_on='CustomerID',right_on='CustomerID') #rfm.set_index('CustomerID',inplace=True) rfm.head() #checking correctness of output.. df[df.CustomerID == 12346.0] (now - dt.date(2011,1,18)).days == 325 #bring all the quartile value in a single dataframe rfm_segmentation = rfm.copy()Customer Segmentation using RFM Analysisfrom sklearn.cluster import KMeans # get right number of cluster for K-means so we neeed to loop from 1 to 20 number of cluster and check score. #Elbow method is used to represnt that. Nc = range(1, 20) kmeans = [KMeans(n_clusters=i) for i in Nc] score = [kmeans[i].fit(rfm_segmentation).score(rfm_segmentation) for i in range(len(kmeans))] plt.plot(Nc,score) plt.xlabel('Number of Clusters') plt.ylabel('Score') plt.title('Elbow Curve') plt.show() #fitting data in Kmeans theorem. kmeans = KMeans(n_clusters=3, random_state=0).fit(rfm_segmentation) # this creates a new column called cluster which has cluster number for each row respectively. rfm_segmentation['cluster'] = kmeans.labels_ #check our hypothesis rfm_segmentation[rfm_segmentation.cluster == 0].head(10) rfm_segmentation[rfm_segmentation.cluster == 1].head(5) rfm_segmentation[rfm_segmentation.cluster == 2].head(10) rfm_segmentation.head() rfm_segmentation = rfm_segmentation.reset_index(drop = 1) rfm_segmentation['Status'] = 0 for i in range(0,len(rfm_segmentation)): if(rfm_segmentation['cluster'][i] == 0): rfm_segmentation['Status'][i] = "Silver" elif(rfm_segmentation['cluster'][i] == 1): rfm_segmentation['Status'][i]= "Gold" else: rfm_segmentation['Status'][i] ="Platinum" rfm_segmentation.head(10) ''' cluster 0 have high recency rate which is bad. cluster 1 and cluster 2 having low so they are in race of platinum and gold customer. ''' sns.boxplot(rfm_segmentation.cluster,rfm_segmentation.Recency) ''' cluster 0 have low frequency rate which is bad. cluster 1 and cluster 2 having high so they are in race of platinum and gold customer. ''' sns.boxplot(rfm_segmentation.cluster,rfm_segmentation.Frequency) ''' cluster 0 have low Monetary rate which is bad. cluster 1 have highest Montary (money spend) platinum where as cluster 2 have medium level(Gold) and cluster 0 is silver customer. ''' sns.boxplot(rfm_segmentation.cluster,rfm_segmentation.Monetary) seg_data = df.merge(rfm_segmentation,left_on='CustomerID',right_on='CustomerID') seg_data.columns seg_data.head()Creating Discountsgrouped = seg_data[['Description','UnitPrice']].groupby(['Description']) seg_data['discount'] = 0 for i in range(0,len(seg_data)): if(i%50000 == 0): print(i) des = seg_data['Description'][i] if seg_data['UnitPrice'][i] < grouped.get_group(des).max()[1]: seg_data['discount'][i] = 1 platinum = seg_data[seg_data['Status'] == 'Platinum'] gold = seg_data[seg_data['Status'] == 'Gold'] silver = seg_data[seg_data['Status'] == 'Silver'] print(platinum.shape,gold.shape,silver.shape) seg_data.head(10)Product Segmentationimport pickle import nltk, warnings from nltk.tokenize import sent_tokenize, word_tokenize from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from string import digits, punctuation from scipy.stats import chi2_contingency from sklearn.preprocessing import LabelEncoder, StandardScaler, Normalizer from sklearn.preprocessing import StandardScaler from sklearn.cluster import KMeans from sklearn.metrics import silhouette_samples, silhouette_score from sklearn import preprocessing, model_selection, metrics, feature_selection from sklearn.model_selection import GridSearchCV, learning_curve from sklearn.svm import SVC from sklearn.metrics import confusion_matrix from sklearn import neighbors, linear_model, svm, tree, ensemble from sklearn.decomposition import PCA, TruncatedSVD from sklearn.manifold import TSNE from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from wordcloud import WordCloud, STOPWORDS X = seg_data1["Description"].unique() stemmer = nltk.stem.porter.PorterStemmer() stopword = nltk.corpus.stopwords.words('english') def stem_and_filter(doc): tokens = [stemmer.stem(w) for w in analyzer(doc)] return [token for token in tokens if token.isalpha()] analyzer = TfidfVectorizer().build_analyzer() CV = TfidfVectorizer(lowercase=True, stop_words="english", analyzer=stem_and_filter, min_df=0.00, max_df=0.3) # we remove words if it appears in more than 30 % of the corpus (not found stopwords like Box, Christmas and so on) TF_IDF_matrix = CV.fit_transform(X) print("TF_IDF_matrix :", TF_IDF_matrix.shape, "of", TF_IDF_matrix.dtype) svd = TruncatedSVD(n_components = 100) normalizer = Normalizer(copy=False) TF_IDF_embedded = svd.fit_transform(TF_IDF_matrix) TF_IDF_embedded = normalizer.fit_transform(TF_IDF_embedded) print("TF_IDF_embedded :", TF_IDF_embedded.shape, "of", TF_IDF_embedded.dtype) score_tfidf = [] x = list(range(5, 155, 10)) for n_clusters in x: kmeans = KMeans(init='k-means++', n_clusters = n_clusters, n_init=10) kmeans.fit(TF_IDF_embedded) clusters = kmeans.predict(TF_IDF_embedded) silhouette_avg = silhouette_score(TF_IDF_embedded, clusters) rep = np.histogram(clusters, bins = n_clusters-1)[0] score_tfidf.append(silhouette_avg) plt.figure(figsize=(20,16)) plt.subplot(2, 1, 1) plt.plot(x, score_tfidf, label="TF-IDF matrix") plt.title("Evolution of the Silhouette Score") plt.legend()The highest value for the silhouette score is when there are 135 clusters. So we'll chose this value.n_clusters = 135 kmeans = KMeans(init='k-means++', n_clusters = n_clusters, n_init=30, random_state=0) proj = kmeans.fit_transform(TF_IDF_embedded) clusters = kmeans.predict(TF_IDF_embedded) plt.figure(figsize=(10,10)) plt.scatter(proj[:,0], proj[:,1], c=clusters) plt.title("ACP with 135 clusters", fontsize="20") X = list(X) clusters = list(clusters) prod_seg = pd.DataFrame(list(zip(X, clusters)), columns =['Description', 'prod_cluster']) prod_seg.to_csv('C:/Users/harini/Desktop/NEUCourses/ADM/Assignment 1/prod_seg.csv') Y = [0,1,2,3,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40, 41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77, 78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110, 111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134] cluster_name = ['STATIONARY',' PURSE','ZINC','SETS','VINTAGE','BOXES',' METAL',' BEADS','CUISHON',' ROSE',' BAG', ' HEART',' EGG',' FRAME',' RETROSPOT',' DECORATION',' GARDEN',' HOME',' CAKE',' NECKLACE',' ART', ' EASTER',' CANDEL',' LOLA',' LIST',' SILVER',' DOOR',' CARD',' PACK',' RIBBON',' PENCIL',' POLKADOT', ' BRACELET',' PAPER',' FLOWER',' HOLDER',' WRAP',' TILE',' TRAY',' MINI',' BUTTERFLY',' STAND',' JAR', ' BLACK',' DRAWER',' HEART',' TEA',' HOUSEHOLD',' FELT',' LUGGAGE',' DOLLY',' WITH',' T LIGHT',' PAISLEY', ' VINTAGE',' HANGING',' BIRD',' STAR',' IN',' BOX',' IVORY',' KEY RING',' STICKER',' CERAMIC, STRAWBERRY', ' EARRING',' BAG',' CHOCOLATE MOULD',' HOOK, PANTRY',' KIT',' WHITE',' BOWL',' WOODEN',' HEART',' NOTEBOOK', ' SET',' MUG',' MIRROR',' BLUE',' BIN, BASKET, TUMBLER',' POT',' HOLDER',' MAGIC',' SKULL',' SPACEBOY', ' CLOCK',' SET',' GARLAND',' BIRD',' SET',' BELL',' SMALL',' CHRISTMAS',' DESIGN',' GLASS',' SPOT',' BOX', ' FELTCRAFT',' BRACELET',' VINTAGE BAG',' TIN',' TOY',' SWEETHEART',' ASSORTED',' TRADITIONAL',' ZINC', ' PARTY',' CHRISTMAS',' BOWL',' WOOD',' GREEEN',' PAPER',' LIGHT',' LARGE',' MUG',' PURPLE',' ENAMEL', ' JUMBO BAG',' RED',' DOORMAT',' LOVE',' CREAM',' PINK',' GOLD',' APRON',' POLYESTER',' BAG',' ACRYLIC', ' CHARM',' SET',' INCENSE',' NAPKIN',' FLOWER',' BOTTLE'] prod_seg_name = pd.DataFrame(list(zip(Y, cluster_name)), columns =['prod_cluster', 'Cluster']) prod_seg_name.head() seg_data.columns merge_1_1 = seg_data1.merge(prod_seg,left_on='Description',right_on='Description') merge_2_2 = merge_1.merge(prod_seg_name, left_on = 'prod_cluster',right_on='prod_cluster') merge_2.columns final_data_nonuk = merge_2[['InvoiceNo', 'StockCode', 'Description', 'Quantity', 'InvoiceDate', 'UnitPrice', 'CustomerID', 'Country', 'date', 'Total_cost','Status', 'discount','Cluster']] final_data['month'] = pd.DatetimeIndex(final_data.InvoiceDate).month_name(locale = 'English') final_data['year'] = pd.DatetimeIndex(final_data.InvoiceDate).year final_data_nonuk.to_csv('C:/Users/harini/Desktop/NEUCourses/ADM/Assignment 1/final_data_nonuk.csv') platinum = final_data[final_data['Status'] == 'Platinum'] gold = final_data[final_data['Status'] == 'Gold'] silver = final_data[final_data['Status'] == 'Silver']Parameter ManagementOnce we have chosen an architectureand set our hyperparameters,we proceed to the training loop,where our goal is to find parameter valuesthat minimize our loss function.After training, we will need these parametersin order to make future predictions.Additionally, we will sometimes wishto extract the parameterseither to reuse them in some other context,to save our model to disk so thatit may be executed in other software,or for examination in the hope ofgaining scientific understanding.Most of the time, we will be ableto ignore the nitty-gritty detailsof how parameters are declaredand manipulated, relying on deep learning frameworksto do the heavy lifting.However, when we move away fromstacked architectures with standard layers,we will sometimes need to get into the weedsof declaring and manipulating parameters.In this section, we cover the following:* Accessing parameters for debugging, diagnostics, and visualizations.* Parameter initialization.* Sharing parameters across different model components.We start by focusing on an MLP with one hidden layer.import tensorflow as tf net = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(4, activation=tf.nn.relu), tf.keras.layers.Dense(1), ]) X = tf.random.uniform((2, 4)) net(X)Parameter AccessLet us start with how to access parametersfrom the models that you already know.When a model is defined via the `Sequential` class,we can first access any layer by indexinginto the model as though it were a list.Each layer's parameters are convenientlylocated in its attribute.We can inspect the parameters of the second fully-connected layer as follows.print(net.layers[2].weights)[, ]The output tells us a few important things.First, this fully-connected layercontains two parameters,corresponding to that layer'sweights and biases, respectively.Both are stored as single precision floats (float32).Note that the names of the parametersallow us to uniquely identifyeach layer's parameters,even in a network containing hundreds of layers. Targeted ParametersNote that each parameter is representedas an instance of the parameter class.To do anything useful with the parameters,we first need to access the underlying numerical values.There are several ways to do this.Some are simpler while others are more general.The following code extracts the biasfrom the second neural network layer, which returns a parameter class instance, and further accesses that parameter's value.print(type(net.layers[2].weights[1])) print(net.layers[2].weights[1]) print(tf.convert_to_tensor(net.layers[2].weights[1])) tf.Tensor([0.], shape=(1,), dtype=float32)All Parameters at OnceWhen we need to perform operations on all parameters,accessing them one-by-one can grow tedious.The situation can grow especially unwieldywhen we work with more complex blocks (e.g., nested blocks),since we would need to recursethrough the entire tree to extracteach sub-block's parameters. Below we demonstrate accessing the parameters of the first fully-connected layer vs. accessing all layers.print(net.layers[1].weights) print(net.get_weights())[, ] [array([[ 0.7081526 , -0.01244313, 0.55799276, -0.6492791 ], [ 0.22770828, -0.770811 , -0.54992485, -0.34173292], [-0.5914638 , 0.4005019 , -0.13743204, 0.28752023], [-0.7954682 , 0.18603677, 0.1703785 , -0.03160411]], dtype=float32), array([0., 0., 0., 0.], dtype=float32), array([[-0.70263696], [-0.22071278], [ 0.70914674], [ 0.6964009 ]], dtype=float32), array([0.], dtype=float32)]This provides us with another way of accessing the parameters of the network as follows.net.get_weights()[1]Collecting Parameters from Nested BlocksLet us see how the parameter naming conventions workif we nest multiple blocks inside each other.For that we first define a function that produces blocks(a block factory, so to speak) and thencombine these inside yet larger blocks.def block1(name): return tf.keras.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(4, activation=tf.nn.relu)], name=name) def block2(): net = tf.keras.Sequential() for i in range(4): # Nested here net.add(block1(name=f'block-{i}')) return net rgnet = tf.keras.Sequential() rgnet.add(block2()) rgnet.add(tf.keras.layers.Dense(1)) rgnet(X)Now that we have designed the network,let us see how it is organized.print(rgnet.summary())Model: "sequential_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= sequential_2 (Sequential) (2, 4) 80 _________________________________________________________________ dense_6 (Dense) (2, 1) 5 ================================================================= Total params: 85 Trainable params: 85 Non-trainable params: 0 _________________________________________________________________ NoneSince the layers are hierarchically nested,we can also access them as thoughindexing through nested lists.For instance, we can access the first major block,within it the second sub-block,and within that the bias of the first layer,with as follows.rgnet.layers[0].layers[1].layers[1].weights[1]Parameter InitializationNow that we know how to access the parameters,let us look at how to initialize them properly.We discussed the need for proper initialization in :numref:`sec_numerical_stability`.The deep learning framework provides default random initializations to its layers.However, we often want to initialize our weightsaccording to various other protocols. The framework provides most commonlyused protocols, and also allows to create a custom initializer. By default, Keras initializes weight matrices uniformly by drawing from a range that is computed according to the input and output dimension, and the bias parameters are all set to zero.TensorFlow provides a variety of initialization methods both in the root module and the `keras.initializers` module. Built-in InitializationLet us begin by calling on built-in initializers.The code below initializes all weight parametersas Gaussian random variableswith standard deviation 0.01, while bias parameters cleared to zero.net = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense( 4, activation=tf.nn.relu, kernel_initializer=tf.random_normal_initializer(mean=0, stddev=0.01), bias_initializer=tf.zeros_initializer()), tf.keras.layers.Dense(1)]) net(X) net.weights[0], net.weights[1]We can also initialize all the parametersto a given constant value (say, 1).net = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense( 4, activation=tf.nn.relu, kernel_initializer=tf.keras.initializers.Constant(1), bias_initializer=tf.zeros_initializer()), tf.keras.layers.Dense(1), ]) net(X) net.weights[0], net.weights[1]We can also apply different initializers for certain blocks.For example, below we initialize the first layerwith the Xavier initializerand initialize the second layerto a constant value of 42.net = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense( 4, activation=tf.nn.relu, kernel_initializer=tf.keras.initializers.GlorotUniform()), tf.keras.layers.Dense( 1, kernel_initializer=tf.keras.initializers.Constant(1)), ]) net(X) print(net.layers[1].weights[0]) print(net.layers[2].weights[0]) Custom InitializationSometimes, the initialization methods we needare not provided by the deep learning framework.In the example below, we define an initializerfor any weight parameter $w$ using the following strange distribution:$$\begin{aligned} w \sim \begin{cases} U(5, 10) & \text{ with probability } \frac{1}{4} \\ 0 & \text{ with probability } \frac{1}{2} \\ U(-10, -5) & \text{ with probability } \frac{1}{4} \end{cases}\end{aligned}$$ Here we define a subclass of `Initializer` and implement the `__call__`function that return a desired tensor given the shape and data type.class MyInit(tf.keras.initializers.Initializer): def __call__(self, shape, dtype=None): data=tf.random.uniform(shape, -10, 10, dtype=dtype) factor=(tf.abs(data) >= 5) factor=tf.cast(factor, tf.float32) return data * factor net = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense( 4, activation=tf.nn.relu, kernel_initializer=MyInit()), tf.keras.layers.Dense(1), ]) net(X) print(net.layers[1].weights[0])Note that we always have the optionof setting parameters directly.net.layers[1].weights[0][:].assign(net.layers[1].weights[0] + 1) net.layers[1].weights[0][0, 0].assign(42) net.layers[1].weights[0]Tied ParametersOften, we want to share parameters across multiple layers.Let us see how to do this elegantly.In the following we allocate a dense layerand then use its parameters specificallyto set those of another layer.# tf.keras behaves a bit differently. It removes the duplicate layer # automatically shared = tf.keras.layers.Dense(4, activation=tf.nn.relu) net = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), shared, shared, tf.keras.layers.Dense(1), ]) net(X) # Check whether the parameters are different print(len(net.layers) == 3)TrueLSTM code in Tensorflow 2.0Keras で ランダム系列の予測from random import randint import numpy as np from tensorflow.keras.models import Sequential from tensorflow.keras.layers import LSTM, Dense import matplotlib.pyplot as plt def generate_sequence(length, n_features): """長さがlengthで、ランダムな整数がn_featuresまでの系列を1つ生成する""" return [randint(0, n_features - 1) for _ in range(length)] def one_hot_encode(sequence, n_features): encoding = list() for value in sequence: vector = [0 for _ in range(n_features)] vector[value] = 1 encoding.append(vector) return np.array(encoding) def one_hot_decode(encoded_seq): return [np.argmax(vector) for vector in encoded_seq]% sequence = generate_sequence(5, 10)[4, 6, 1, 7, 3]% encoded = one_hot_encode(sequence, 10)[[0 0 0 0 1 0 0 0 0 0] [0 0 0 0 0 0 1 0 0 0] [0 1 0 0 0 0 0 0 0 0] [0 0 0 0 0 0 0 1 0 0] [0 0 0 1 0 0 0 0 0 0]]% decoded = one_hot_decode(encoded)[4, 6, 1, 7, 3]def generate_example(length, n_features, out_index): # 訓練データを1サンプル(1系列)だけ生成する sequence = generate_sequence(length, n_features) encoded = one_hot_encode(sequence, n_features) X = encoded.reshape((1, length, n_features)) y = encoded[out_index].reshape(1, n_features) return X, y% X, y = generate_example(5, 10, 2)% print(X.shape, y.shape)(1, 5, 10) (1, 10)% print(X)[[[0 0 0 1 0 0 0 0 0 0] [0 0 0 0 0 0 0 0 0 1] [0 0 0 0 0 1 0 0 0 0] [0 0 1 0 0 0 0 0 0 0] [0 0 0 0 0 0 0 0 1 0]]]% print(y)[[0 0 0 0 0 1 0 0 0 0]]length = 5 out_index = 2 # echo sequence predictionで入力の何番目の要素を返すか n_features = 10 hidden_size = 25 model = Sequential() model.add(LSTM(25, input_shape=(length, n_features))) model.add(Dense(n_features, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc']) model.summary() losses = [] for i in range(10000): X, y = generate_example(length, n_features, out_index) history = model.fit(X, y, epochs=1, verbose=0) losses.append(history.history['loss'][0]) plt.plot(losses) plt.show() # evaluate model correct = 0 for i in range(100): X, y = generate_example(length, n_features, out_index) yhat = model.predict(X) if one_hot_decode(yhat) == one_hot_decode(y): correct += 1 print('Accuracy: %f' % ((correct / 100) * 100.0)) # predict on new data X, y = generate_example(length, n_features, out_index) yhat = model.predict(X) print('Sequence: %s' % [one_hot_decode(x) for x in X]) print('Expected: %s' % one_hot_decode(y)) print('Predicted: %s' % one_hot_decode(yhat))Sequence: [[2, 1, 7, 6, 5]] Expected: [7] Predicted: [7]*If running in Colab run this first to install ACN-Portal.*import subprocess import sys if 'google.colab' in str(get_ipython()): print('Running on CoLab') subprocess.check_call([sys.executable, "-m", "pip", "install", "acnportal"]) subprocess.check_call([sys.executable, "-m", "pip", "install", "git+https://github.com/caltech-netlab/adacharge"]) import os import subprocess if not os.path.exists("data/jpl_weekeday_40.pkl"): if not os.path.exists("data"): subprocess.run(["mkdir", "data"]) subprocess.run(["wget", "-P", "./data", "https://ev.caltech.edu/assets/data/gmm/jpl_weekday_40.pkl"])Comparing Infrastructure Designs using ACN-Sim by Last updated: 4/16/2020In this case study, we demonstrate how ACN-Data and ACN-Sim can be used to evaluate infrastructure configurations and algorithms. We consider the case of a site host who expects to charge approximately 100 EVs per day with a demand pattern similar to that of JPL.The site host has several options, including* 102 Uncontrolled Level-1 EVSEs with a 200 kW Transformer* 30 Uncontrolled Level-2 EVSEs with a 200 kW Transformer* 102 Uncontrolled Level-2 EVSEs with a 670 kW Transformer* 102 Smart Level-2 EVSEs running LLF with a 200 kW TransformerWe evaluate the scenarios on the number of times drivers would have to swap parking places to allow other drivers to charge, the percentage of total demand met, and the operating costs (calculated using ACN-Sim's integration with utility tariffs). This demonstrates the significant benefits of developing smart EV charging systems in terms of reducing both capital costs (transformer capacity) and operating costs.import acnportal from copy import deepcopy import warnings import pytz import numpy as np import pandas as pd import pickle from datetime import datetime from acnportal import acnsim from acnportal import algorithms from acnportal.signals.tariffs.tou_tariff import TimeOfUseTariff from acnportal.acnsim.events import GaussianMixtureEvents from acnportal.contrib.acnsim import StochasticNetwork import adachargeCharging Network DesignsTo define our charging network options, we will use two functions which generate a StochasticNetwork object. The StochasticNetwork assigns users to spaces dynamically based on available spaces. In this example, we will assume each driver has equal preference for all spots.If all spaces are taken, drivers join a queue which is drained as drivers finish charging and move their vehicle (the early departure option specifies that drivers move their vehicle when it is done charging rather than their normal departure time). We record each time that the user leave and is replaced with someone from the queue as a swap. Swaps are undesirable as they waste time and are frustrating for users. Despite this, swapping is a common practice in many charging facilities where the number of users exceeds the number of EVSEs.def level_1_network(transformer_cap=200, evse_per_phase=34): """ Configurable charging network for level-1 EVSEs connected line to ground at 120 V. Args: transformer_cap (float): Capacity of the transformer feeding the network [kW] evse_per_phase (int): Number of EVSEs on each phase. Total number of EVSEs will be 3 * evse_per_phase. Returns: ChargingNetwork: Configured ChargingNetwork. """ network = StochasticNetwork(early_departure=True) voltage = 120 # Define the sets of EVSEs in the Caltech ACN. A_ids = ['A-{0}'.format(i) for i in range(evse_per_phase)] B_ids = ['B-{0}'.format(i) for i in range(evse_per_phase)] C_ids = ['C-{0}'.format(i) for i in range(evse_per_phase)] # Add Caltech EVSEs for evse_id in A_ids: network.register_evse(acnsim.FiniteRatesEVSE(evse_id, [0, 16]), voltage, 0) for evse_id in B_ids: network.register_evse(acnsim.FiniteRatesEVSE(evse_id, [0, 16]), voltage, 120) for evse_id in C_ids: network.register_evse(acnsim.FiniteRatesEVSE(evse_id, [0, 16]), voltage, -120) # Add Caltech Constraint Set I3a = acnsim.Current(A_ids) I3b = acnsim.Current(B_ids) I3c = acnsim.Current(C_ids) # Define intermediate currents I2a = (1 / 4) * (I3a - I3c) I2b = (1 / 4) * (I3b - I3a) I2c = (1 / 4) * (I3c - I3b) # Build constraint set primary_side_constr = transformer_cap * 1000 / 3 / 277 secondary_side_constr = transformer_cap * 1000 / 3 / 120 network.add_constraint(I3a, secondary_side_constr, name='Secondary A') network.add_constraint(I3b, secondary_side_constr, name='Secondary B') network.add_constraint(I3c, secondary_side_constr, name='Secondary C') network.add_constraint(I2a, primary_side_constr, name='Primary A') network.add_constraint(I2b, primary_side_constr, name='Primary B') network.add_constraint(I2c, primary_side_constr, name='Primary C') return network def level_2_network(transformer_cap=200, evse_per_phase=34): """ Configurable charging network for level-2 EVSEs connected line to line at 208 V. Args: transformer_cap (float): Capacity of the transformer feeding the network [kW] evse_per_phase (int): Number of EVSEs on each phase. Total number of EVSEs will be 3 * evse_per_phase. Returns: ChargingNetwork: Configured ChargingNetwork. """ network = StochasticNetwork(early_departure=True) voltage = 208 evse_type = 'AeroVironment' # Define the sets of EVSEs in the Caltech ACN. AB_ids = ['AB-{0}'.format(i) for i in range(evse_per_phase)] BC_ids = ['BC-{0}'.format(i) for i in range(evse_per_phase)] CA_ids = ['CA-{0}'.format(i) for i in range(evse_per_phase)] # Add Caltech EVSEs for evse_id in AB_ids: network.register_evse(acnsim.get_evse_by_type(evse_id, evse_type), voltage, 30) for evse_id in BC_ids: network.register_evse(acnsim.get_evse_by_type(evse_id, evse_type), voltage, -90) for evse_id in CA_ids: network.register_evse(acnsim.get_evse_by_type(evse_id, evse_type), voltage, 150) # Add Caltech Constraint Set AB = acnsim.Current(AB_ids) BC = acnsim.Current(BC_ids) CA = acnsim.Current(CA_ids) # Define intermediate currents I3a = AB - CA I3b = BC - AB I3c = CA - BC I2a = (1 / 4) * (I3a - I3c) I2b = (1 / 4) * (I3b - I3a) I2c = (1 / 4) * (I3c - I3b) # Build constraint set primary_side_constr = transformer_cap * 1000 / 3 / 277 secondary_side_constr = transformer_cap * 1000 / 3 / 120 network.add_constraint(I3a, secondary_side_constr, name='Secondary A') network.add_constraint(I3b, secondary_side_constr, name='Secondary B') network.add_constraint(I3c, secondary_side_constr, name='Secondary C') network.add_constraint(I2a, primary_side_constr, name='Primary A') network.add_constraint(I2b, primary_side_constr, name='Primary B') network.add_constraint(I2c, primary_side_constr, name='Primary C') return networkExperimentsIn these experiments we will run a simulation for each system configuration can compare the results on key metrics.# How long each time discrete time interval in the simulation should be. PERIOD = 5 # minutes # Voltage of the network. VOLTAGE = 208 # volts # Default maximum charging rate for each EV battery. DEFAULT_BATTERY_POWER = 6.6 # kW**Network Options**We consider five infrastrucuture configurations.# Network of 102 Level-1 EVSEs with a 200 kW Transformer level_1 = level_1_network(transformer_cap=200, evse_per_phase=34) # Network of 30 Level-2 EVSEs with a 200 kW Transformer level_2_200kW_30 = level_2_network(transformer_cap=200, evse_per_phase=10) # Network of 102 Level-2 EVSEs with a 200 kW Transformer level_2_200kW_102 = level_2_network(transformer_cap=200, evse_per_phase=34) # Network of 102 Level-2 EVSEs with a 680 kW Transformer level_2_680kW_102 = level_2_network(transformer_cap=680, evse_per_phase=34) # Network of 201 Level-2 EVSEs with a 200 kW Transformer level_2_200kW_201 = level_2_network(transformer_cap=200, evse_per_phase=67)**Events**We assume that our site will have a usage profile similar to JPL, so we use a Gaussian Mixture Model trained on data from weekdays at JPL to generate events for this experiment. We assume that the site will be closed on weekends, so no charging will occur.def get_synth_events(sessions_per_day): gmm = pickle.load(open('./data/jpl_weekday_40.pkl', 'rb')) # Generate a list of the number of sessions to draw for each day. # This generates 30 days of charging demands. num_evs = [0]*2 + [sessions_per_day]*5 + [0]*2 + [sessions_per_day]*5 + [0]*2 + \ [sessions_per_day]*5 + [0]*2 + [sessions_per_day]*5 + [0]*2 # Note that because we are drawing from a distribution, some sessions will be # invalid, we ignore these sessions and remove the corresponding plugin events. gen = GaussianMixtureEvents(pretrained_model=gmm, duration_min=0.08334) synth_events = gen.generate_events(num_evs, PERIOD, VOLTAGE, DEFAULT_BATTERY_POWER) return synth_events # Events with 100 EVs per weekday sessions_100 = get_synth_events(100) # Events with 200 EVs per weekday sessions_200 = get_synth_events(200) def run_experiment(network, algorithm, events): """ Run simulation for the events defined previously and the specified network / algorithm / events. """ # Timezone of the ACN we are using. timezone = pytz.timezone('America/Los_Angeles') # Start and End times are used when collecting data. start = timezone.localize(datetime(2019, 6, 1)) end = timezone.localize(datetime(2019, 7, 1)) sch = deepcopy(algorithm) cn = deepcopy(network) signals = {'tariff': TimeOfUseTariff('sce_tou_ev_4_march_2019')} sim = acnsim.Simulator(cn, sch, events, start, period=PERIOD, verbose=False, signals=signals) with warnings.catch_warnings(): warnings.simplefilter("ignore") sim.run() r = {'proportion_of_energy_delivered': acnsim.proportion_of_energy_delivered(sim), 'energy_delivered': sum(ev.energy_delivered for ev in sim.ev_history.values()), 'num_swaps': cn.swaps, 'num_never_charged': cn.never_charged, 'energy_cost': acnsim.energy_cost(sim), 'demand_charge': acnsim.demand_charge(sim) } r['total_cost'] = r['energy_cost'] + r['demand_charge'] r['$/kWh'] = r['total_cost'] / r['energy_delivered'] return r**Define Algorithms**We consider Uncontrolled, Least-Laxity First and Model Predictive Control (cost-minimization).uncontrolled = algorithms.UncontrolledCharging() llf = algorithms.SortedSchedulingAlgo(algorithms.least_laxity_first) def days_remaining_scale_demand_charge(rates, infrastructure, interface, baseline_peak=0, **kwargs): day_index = interface.current_time // ((60 / interface.period) * 24) days_in_month = 30 day_index = min(day_index, days_in_month - 1) scale = 1 / (days_in_month - day_index) dc = adacharge.demand_charge(rates, infrastructure, interface, baseline_peak, **kwargs) return scale * dc cost_min_obj = [adacharge.ObjectiveComponent(adacharge.total_energy, 1000), adacharge.ObjectiveComponent(adacharge.tou_energy_cost), adacharge.ObjectiveComponent(days_remaining_scale_demand_charge), adacharge.ObjectiveComponent(adacharge.quick_charge, 1e-6), adacharge.ObjectiveComponent(adacharge.equal_share, 1e-12) ] cost_min = adacharge.AdaptiveSchedulingAlgorithm(cost_min_obj, solver="ECOS", quantize=True, reallocate=True, peak_limit=1000, max_recompute=1)**Run Experiments (100 EVs)**level1_unctrl_100 = run_experiment(level_1, uncontrolled, deepcopy(sessions_100)) level2_200kW_untrl_100 = run_experiment(level_2_200kW_30, uncontrolled, deepcopy(sessions_100)) level2_670kW_unctrl_100 = run_experiment(level_2_670kW_102, uncontrolled, deepcopy(sessions_100)) level2_200kW_llf_100 = run_experiment(level_2_200kW_102, llf, deepcopy(sessions_100)) level2_200kW_cost_min_100 = run_experiment(level_2_200kW_102, cost_min, deepcopy(sessions_100))**Run Experiments (200 EVs)**level1_unctrl_200 = run_experiment(level_1, uncontrolled, deepcopy(sessions_200)) level2_200kW_untrl_200 = run_experiment(level_2_200kW_30, uncontrolled, deepcopy(sessions_200)) level2_680kW_unctrl_200 = run_experiment(level_2_680kW_102, uncontrolled, deepcopy(sessions_200)) level2_200kW_llf_200 = run_experiment(level_2_200kW_102, llf, deepcopy(sessions_200)) level2_200kW_cost_min_200 = run_experiment(level_2_200kW_102, cost_min, deepcopy(sessions_200)) level2_200kW_cost_min_201_200 = run_experiment(level_2_200kW_201, cost_min, deepcopy(sessions_200))Analyze Resultsevs_100 = pd.DataFrame({ 'Level 1: Unctrl: 200 kW : 102 EVSEs': level1_unctrl_100, 'Level 2: Unctrl: 200 kW : 30 EVSEs': level2_200kW_untrl_100, 'Level 2: Unctrl: 670 kW : 102 EVSEs': level2_670kW_unctrl_100, 'Level 2: LLF: 200 kW : 102 EVSEs': level2_200kW_llf_100, 'Level 2: Min Cost: 200 kW : 102 EVSEs': level2_200kW_cost_min_100 }) evs_100.to_csv("results/100_EV_comparison.csv") pd.read_csv("results/100_EV_comparison.csv", index_col=0) evs_200 = pd.DataFrame({ 'Level 1: Unctrl: 200 kW : 102 EVSEs': level1_unctrl_200, 'Level 2: Unctrl: 200 kW : 30 EVSEs': level2_200kW_untrl_200, 'Level 2: Unctrl: 680 kW : 102 EVSEs': level2_680kW_unctrl_200, 'Level 2: LLF: 200 kW : 102 EVSEs': level2_200kW_llf_200, 'Level 2: Min Cost: 200 kW : 102 EVSEs': level2_200kW_cost_min_200, 'Level 2: Min Cost: 200 kW : 201 EVSEs': level2_200kW_cost_min_201_200 }) evs_200.to_csv("results/200_EV_comparison.csv") pd.read_csv("results/200_EV_comparison.csv", index_col=0)Assignment No - 2 Question ! Write a Python Function for finding a given number is Prime or not and do Unit Testing on it using PyLint and Unittest library. Answer !def test_prime(n): if (n==1): return False elif (n==2): return True; else: for x in range(2,n): if(n % x==0): return False return True print(test_prime(2)) def test_prime(n): if (n==1): return False elif (n==2): return True; else: for x in range(2,n): if(n % x==0): return False return True print(test_prime(4))FalseThank You ! Assignment No - 1 Question ! Make a small Generating Program for returning Armstrong Numbers In Between 1 To 1000 in a Generator Object. Answer !for num in range(1,1000): temp=num sum=0 while temp>0: digit=temp%10 sum=sum+digit**3 temp=temp//10 if sum==num: print (num)1 153 370 371 407Subbundles Part 4: Adjacencies**Subbundle** - a subgroup of streamlines with a set of common propertiesfrom utils import get_tractogram_filename import os.path as op from AFQ import api import AFQ.data as afd import numpy as np import pandas as pd from dipy.io.streamline import load_tractogram import dipy.tracking.streamline as dts import matplotlib.pyplot as plt/Users/bloomdt/anaconda/envs/subbundles/lib/python3.8/site-packages/dask/dataframe/utils.py:13: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead. import pandas.util.testing as tmStreamlines (from Part 2)Necessary to calculate mfd adjacencymyafq = api.AFQ( bids_path=op.join(afd.afq_home, 'stanford_hardi'), dmriprep='vistasoft' ) row = myafq.data_frame.iloc[0] bundle_name='SLF_L' tg_fname = tg_fname = get_tractogram_filename(myafq, bundle_name) tractogram = load_tractogram(tg_fname, 'same') streamlines = tractogram.streamlines affine = tractogram.affineStreamline Profiles (from Part 3)fa_values = np.load('streamline_profile_fa.npy') md_values = np.load('streamline_profile_md.npy')Distance/Adjacency Matrix of Streamline Correlations**TODO: Not sure that these matricies really [adjacencies](https://mathworld.wolfram.com/AdjacencyMatrix.html)**- **Question: Terminology perhaps should simply call them correlation matrcies?**An $N \times N$ matrix of coorelations of streamline tract profiles, where $N$ is the number of streamline profiles - **single matrix and single metric** - **single matrix and multiple metrics (weighted linear combination)** - manually stacked - learn hyperparameters - *multiple matrices (tensor) and multiple metrics* (optional) Streamline Correlations NOTE: There are multiple correlation metrics, using Pearson's r- **Question: are streamline profiles considered: measurement, ordinal, or categorical?** - For now stick with Pearson's r NOTE: There are multiple python implementations to compute correlation- Haven't [compared or contrasted benefits](https://realpython.com/numpy-scipy-pandas-correlation-python/) of `NumPy`, `SciPy`, or `Pandas` Candidiate [Correlation Coefficients](https://en.wikipedia.org/wiki/Correlation_coefficientTypes):- Pearson's r- [Rank correlation](https://en.wikipedia.org/wiki/Rank_correlation) coefficients - Spearman's $\rho$ - Kendall's $\tau$ - ... FAfa_df = pd.DataFrame(fa_values.T) fa_corr = fa_df.corr() print(fa_corr.shape) print(fa_corr.head()) np.save('adjacency_fa.npy', fa_corr.to_numpy())(465, 465) 0 1 2 3 4 5 6 \ 0 1.000000 0.882085 0.890257 0.558226 0.633334 0.776883 0.516523 1 0.882085 1.000000 0.892869 0.691708 0.613339 0.720867 0.445386 2 0.890257 0.892869 1.000000 0.692492 0.738387 0.804009 0.646199 3 0.558226 0.691708 0.692492 1.000000 0.675228 0.649760 0.573325 4 0.633334 0.613339 0.738387 0.675228 1.000000 0.797512 0.865986 7 8 9 ... 455 456 457 458 \ 0 0.685918 0.604393 0.704893 ... 0.304867 0.318514 0.317555 0.253821 1 0.642329 0.555229 0.638696 ... 0.298098 0.338879 0.283364 0.288640 2 0.767272 0.708093 0.794917 ... 0.371554 0.405669 0.355182 0.372981 3 0.658389 0.592143 0.579051 ... 0.176189 0.307546 0.267071 0.280450 4 0.919683 0.784439 0.792001 ... 0.389692 0.363524 0.234314 0.270914 459 460 461 462 463 [...]MDmd_df = pd.DataFrame(md_values.T) md_corr = md_df.corr() print(md_corr.shape) print(md_corr.head()) np.save('adjacency_md.npy', md_corr.to_numpy())(465, 465) 0 1 2 3 4 5 6 \ 0 1.000000 0.793244 0.818938 0.559873 0.386459 0.493746 0.342482 1 0.793244 1.000000 0.930559 0.630848 0.545399 0.644990 0.471161 2 0.818938 0.930559 1.000000 0.634464 0.552601 0.677292 0.505378 3 0.559873 0.630848 0.634464 1.000000 0.650833 0.663157 0.448061 4 0.386459 0.545399 0.552601 0.650833 1.000000 0.867259 0.723318 7 8 9 ... 455 456 457 458 \ 0 0.348693 0.418816 0.473305 ... 0.656315 0.614037 0.831250 0.621922 1 0.547331 0.626289 0.712350 ... 0.663411 0.587650 0.729432 0.591102 2 0.550235 0.635176 0.715263 ... 0.689260 0.637978 0.737541 0.639177 3 0.606575 0.511968 0.566453 ... 0.519041 0.553736 0.425878 0.492955 4 0.931270 0.662321 0.669176 ... 0.715378 0.754388 0.398269 0.725093 459 460 461 462 463 [...]$\alpha$ FA + $\beta$ MD Weighted linear combinationalpha, beta = [0.5, 0.5] weighted = 0.5 * fa_corr + 0.5 * md_corr print(weighted.shape) print(weighted.head()) np.save('adjacency_wt.npy', weighted.to_numpy())(465, 465) 0 1 2 3 4 5 6 \ 0 1.000000 0.837664 0.854597 0.559049 0.509897 0.635314 0.429502 1 0.837664 1.000000 0.911714 0.661278 0.579369 0.682929 0.458273 2 0.854597 0.911714 1.000000 0.663478 0.645494 0.740650 0.575789 3 0.559049 0.661278 0.663478 1.000000 0.663031 0.656459 0.510693 4 0.509897 0.579369 0.645494 0.663031 1.000000 0.832385 0.794652 7 8 9 ... 455 456 457 458 \ 0 0.517305 0.511605 0.589099 ... 0.480591 0.466276 0.574403 0.437871 1 0.594830 0.590759 0.675523 ... 0.480754 0.463265 0.506398 0.439871 2 0.658753 0.671634 0.755090 ... 0.530407 0.521823 0.546362 0.506079 3 0.632482 0.552056 0.572752 ... 0.347615 0.430641 0.346475 0.386703 4 0.925477 0.723380 0.730588 ... 0.552535 0.558956 0.316291 0.498003 459 460 461 462 463 [...]**TODO: Coefficient of determination**In Schurr 2019 paper, main differentiating factors between SLFII and the other SLF sections are offsets in the mean FA between streamlines. Consider use a distance function that takes into account the offset, and not only co-variation, like:https://scikit-learn.org/stable/modules/generated/sklearn.metrics.r2_score.htmlfrom sklearn.metrics import r2_score fgarray = np.array(dts.set_number_of_points(streamlines, 100)) r2 = pd.DataFrame([[r2_score(a,b) for a in fgarray] for b in fgarray]) print(r2.shape) print(r2.head()) np.save('adjacency_r2.npy', r2.to_numpy())(465, 465) 0 1 2 3 4 5 6 \ 0 1.000000 0.965553 0.955606 0.755725 0.172403 0.080605 0.202922 1 0.958335 1.000000 0.973569 0.883134 0.032060 -0.194007 0.079324 2 0.959581 0.976100 1.000000 0.896647 0.300170 0.134881 0.360598 3 0.768498 0.901135 0.890487 1.000000 -0.114340 -0.526090 -0.000353 4 0.474810 0.483688 0.621132 0.526388 1.000000 0.948086 0.978525 7 8 9 ... 455 456 457 458 \ 0 0.141238 0.747652 0.799847 ... 0.073243 0.513796 -0.998765 0.494018 1 -0.073921 0.843453 0.861517 ... -0.010603 0.473189 -1.474571 0.446953 2 0.247577 0.903656 0.930615 ... -0.089947 0.446519 -1.151416 0.414131 3 -0.281006 0.916792 0.884548 ... -0.427102 0.235958 -2.168506 0.183226 4 0.985146 0.748262 0.778650 ... -0.640166 0.027477 -0.413377 -0.018836 459 460 461 462 463 [...]**TODO** Distance MetricsGiven: - NxN streamlines Assuming:- same subject- same streamlinesAnything varies streamlines would violate assumptions:- comparing subjects- using different tractometry, segmentation, or metricsWant:- a distance metric that is similar to correlation - bounded between 0 and 1 - 0 signifies streamlines are infinitely far apart - 1 signifies same streamlineConsider: - MDF between every pair- Threshold distance $\theta$ - If distance is greater than threshold then streamlines are considered infinitely far apart (not part of the same subbundle) [Bundle Adjacency (BA)](https://www.nature.com/articles/s41598-020-74054-4)> Use bundle adjacency (BA) to calculate the shape similarity between the same type of bundles **across subjects and groups** ... BA uses a minimum direct flip (MDF) distance to get the distance between two streamlines> 𝐵𝐴(𝐵1,𝐵2)=0.5(𝑐𝑜𝑣𝑒𝑟𝑎𝑔𝑒(𝐵1,𝐵2)+𝑐𝑜𝑣𝑒𝑟𝑎𝑔𝑒(𝐵2,𝐵1)) MDFhttps://dipy.org/documentation/1.2.0./reference/dipy.segment/bundles-distances-mdffgarray = np.array(dts.set_number_of_points(streamlines, 100)) mdf = pd.DataFrame(dts.bundles_distances_mdf(fgarray, fgarray)) print(mdf.shape) print(mdf.head()) np.save('adjacency_mdf.npy', mdf.to_numpy())(465, 465) 0 1 2 3 4 5 6 \ 0 0.000000 2.556787 3.050165 7.892504 9.566525 9.116579 9.715490 1 2.556787 0.000000 1.846113 5.549762 9.376993 9.667726 9.184445 2 3.050165 1.846113 0.000000 5.331358 8.321434 8.769215 7.917379 3 7.892504 5.549762 5.331358 0.000000 9.645145 11.535656 8.619782 4 9.566525 9.376993 8.321434 9.645145 0.000000 2.547179 1.788271 7 8 9 ... 455 456 457 \ 0 9.153671 6.666740 5.791087 ... 12.268264 11.370953 13.095141 1 9.099032 5.035235 4.616476 ... 11.839233 10.927974 15.045596 2 7.956161 4.014346 3.047695 ... 11.788706 10.834809 14.521595 3 9.945946 4.020088 4.664461 ... 13.915105 13.095520 19.092976 4 1.372938 6.632364 6.410689 ... 17.450544 17.250725 13.569562 458 459 460 461 462 463 464 0 10.822467[...]**TODO** Compare MatriciesOnce get streamline correlation matricies:- Begin with "eye-ball comparison" between FA and MD matrices- Then consider difference of adjacency matrices- **Question: What is the 'correct' way compare these matricies?** - Check how much information is shared (mutual information: $I(X,Y)$ or alternatively sum of conditional information -- only present in one of the matricies: $H(X,Y)-I(X,Y)=H(Y|X)+H(X|Y)$)**NOTE: we are assuming that streamlines are same (from same individual and same same tractography)** "Eye-ball" FAplt.imshow(fa_corr, cmap='hot', interpolation='nearest') plt.show() plt.imshow(r2, cmap='hot', interpolation='nearest') plt.show()MDplt.imshow(md_corr, cmap='hot', interpolation='nearest') plt.show()Absolute Differencedel_corr = np.abs(fa_corr-md_corr) plt.imshow(del_corr, cmap='hot', interpolation='nearest') plt.show()$\alpha$ FA + $\beta$ MDplt.imshow(weighted, cmap='hot', interpolation='nearest') plt.show()MDFplt.imshow(mdf, cmap='hot', interpolation='nearest') plt.show()Pareto distribution$X\sim\text{Pareto}(\alpha, \gamma)$ with pdf$$f(x) = \frac{\gamma^\alpha}{x^{\alpha+1}},\text{ }x>\gamma$$class loss_model: def __init__(self,name, parm_names): self.name = name self.parm_names = parm_names self.d= len(parm_names) def set_logp(self, X): if self.name == "Pareto": def logp(parms): α, γ = parms if np.all(parms > 0) and np.all(X >= γ) : return(len(X)*(np.log(α) + α * np.log(γ)) - (α + 1) * np.sum(np.log(X))) else: return(-np.inf) self.logp = nb.jit(nopython = True)(logp) def set_logd(self, parms): if self.name == "Pareto": def logd(x): α, γ = parms[:,0], parms[:,1] res = np.zeros(len(α)) s = np.logical_and(np.logical_and(α > 0, γ > 0), γ <= x) res[np.where(s)] = np.log(α[s]) + α[s] * np.log(γ[s]) - (α[s] + 1) * np.log(x) res[np.where(np.invert(s))] = -np.inf return(res) self.logd = logd def set_cdf(self): if self.name == "Pareto": def cdf(parms, x): α, γ = parms return(1 - (γ / x)**α) self.cdf = nb.jit(nopython = True)(cdf) def set_pdf(self): if self.name == "Pareto": def pdf(parms, x): α, γ = parms return(α * γ**α / x**(α + 1)) self.pdf = nb.jit(nopython = True)(pdf) def set_ppf(self): if self.name == "Pareto": def ppf(parms, y): α, γ = parms return( γ * ((1-y)**(-1 / α))) self.ppf = ppf def sample(self, parms, n): if self.name == "Pareto": α, γ = parms return( γ * st.pareto(α).rvs(n)) pareto_dist = loss_model("Pareto", ["α", "γ"]) print(pareto_dist.name, pareto_dist.parm_names, pareto_dist.d) parms = np.array([1/2, 1/2]) α, γ = parms x, y = 2, 0.5 pareto_dist.set_cdf(), pareto_dist.set_pdf(), pareto_dist.set_ppf() pareto_dist.cdf(parms, x) - st.pareto(α).cdf(x / γ),\ pareto_dist.ppf(parms, y)- γ * st.pareto(α).ppf(y),\ pareto_dist.pdf(parms, x)- st.pareto(α).pdf(x / γ) / γ,\ pareto_dist.sample(parms, 10) X = pareto_dist.sample(parms, 10) pareto_dist.set_logp(X) print(pareto_dist.logp(parms) - np.sum(np.log(st.pareto(α).pdf(X / γ) / γ))) α_prior, γ_prior= bsm.prior_model('gamma','α', 1, 1), bsm.prior_model('gamma','σ', 1, 1) prior_gamma_model = bsm.independent_priors([α_prior, γ_prior]) particle_cloud = prior_gamma_model.sample(20) pareto_dist.set_logd(particle_cloud.values) α_vec, γ_vec = particle_cloud.values[:,0], particle_cloud.values[:,1] print(np.array([np.log(st.pareto(α_vec[i]).pdf(X[0] / γ_vec[i]) / γ_vec[i]) for i in range(len(γ_vec))] - pareto_dist.logd(X[0]))) parms_true = np.array([2, 1]) f = loss_model("Pareto", ["α", "γ"]) # X = f.sample(parms_true, 100) danish = pd.read_csv("Data/danish.csv").x X = danish.values plt.hist(X,bins=25) sns.despine() α_prior, γ_prior = bsm.prior_model('gamma','α', 1, 1), bsm.prior_model('fixed','γ', min(X), 1) prior_single_model = bsm.independent_priors([α_prior, γ_prior]) popSize, ρ, c, n_step_max, err, paralell, n_proc, verbose = 2000, 1/2, 0.99, 25, 1e-6, False, 4, True %time trace, log_marg, DIC, WAIC = bsm.smc_likelihood_annealing(X, f, popSize, prior_single_model, ρ, c,n_step_max, err, paralell, 4, verbose) print(log_marg, DIC, WAIC) bsm.posterior_plots(f, trace) bsm.trace_plots(f, trace) f.set_ppf() bsm.qq_plot(X, f, trace.mean().values)-5679.288240674828 11348.282949922615 11351.293745253335's Safe Driving Prediction (AutoML Local Compute)Now let's use Azure Automated ML! Hint: use shift + enter to run the code cells below. Once the cell turns from [*] to [], you can be sure the cell has run. Import Needed PackagesImport the packages needed for this solution notebook. The most widely used package for machine learning is [scikit-learn](https://scikit-learn.org/stable/), [pandas](https://pandas.pydata.org/docs/getting_started/index.htmlgetting-started), and [numpy](https://numpy.org/). These packages have various features, as well as a lot of clustering, regression and classification algorithms that make it a good choice for data mining and data analysis.import os import numpy as np import pandas as pd from sklearn.model_selection import train_test_split import joblib from sklearn import metricsGet Azure ML Workspace to usefrom azureml.core import Workspace, Dataset # Get Workspace defined in by default config.json file ws = Workspace.from_config()Load data from file into regular Pandas DataFrameDATA_DIR = "../../data/" data_df = pd.read_csv(os.path.join(DATA_DIR, 'porto_seguro_safe_driver_prediction_train.csv')) print(data_df.shape) # print(data_df.describe()) data_df.head(5)Split Data into Train and Validatation SetsPartitioning data into training, validation, and holdout sets allows you to develop highly accurate models that are relevant to data that you collect in the future, not just the data the model was trained on. In machine learning, features are the measurable property of the object you’re trying to analyze. Typically, features are the columns of the data that you are training your model with minus the label. In machine learning, a label (categorical) or target (regression) is the output you get from your model after training it.# Split in train/validation datasets (Validation=20%, Train=80%) # Only split in test/train train_df, validation_df = train_test_split(data_df, test_size=0.2, random_state=0) train_df.describe()Train with Azure AutoML automatically searching for the 'best model' (Best algorithms and best hyper-parameters) List and select primary metric to drive the AutoML classification problemfrom azureml.train import automl # Get a list of valid metrics for your given task automl.utilities.get_primary_metrics('classification') # List of possible primary metrics is here: # https://docs.microsoft.com/en-us/azure/machine-learning/how-to-configure-auto-train#primary-metricDefine AutoML Experiment settingsimport logging # You can provide additional settings as a **kwargs parameter for the AutoMLConfig object # automl_settings = { # "whitelist_models": ['LightGBM'] # } from azureml.train.automl import AutoMLConfig automl_config = AutoMLConfig(task='classification', primary_metric='AUC_weighted', training_data=train_df, validation_data=validation_df, label_column_name="target", enable_early_stopping= True, blacklist_models=['LogisticRegression', 'ExtremeRandomTrees', 'RandomForest'], iterations=10, # experiment_exit_score= 0.63, featurization= 'auto', debug_log='automated_ml_errors.log', verbosity= logging.INFO, enable_onnx_compatible_models=False # **automl_settings ) # Explanation of Settings: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-configure-auto-train#configure-your-experiment-settings # AutoMLConfig info on: # https://docs.microsoft.com/en-us/python/api/azureml-train-automl-client/azureml.train.automl.automlconfig.automlconfigRun Experiment with multiple child runs under the coversfrom azureml.core import Experiment experiment_name = "SDK_local_porto_seguro_driver_pred" print(experiment_name) experiment = Experiment(workspace=ws, name=experiment_name) run = experiment.submit(automl_config, show_output=True)Explore results with Widget# Explore the results of automatic training with a Jupyter widget: https://docs.microsoft.com/en-us/python/api/azureml-widgets/azureml.widgets?view=azure-ml-py from azureml.widgets import RunDetails RunDetails(run).show()Retrieve the 'Best' Scikit-Learn Modelbest_run, fitted_model = run.get_output() print(best_run) print('--------') print(fitted_model)Make Predictions Prep Validation Data: Extract X values (feature columns) from dataset and convert to NumPi array for predictingimport pandas as pd #Remove Label/y column if 'target' in validation_df.columns: y_validation_df = validation_df.pop('target') x_validation_df = validation_df x_validation_df.describe() y_validation_df.describe()Make predictions in bulk# Try the best model making predictions with the test dataset y_predictions = fitted_model.predict(x_validation_df) print('10 predictions: ') print(y_predictions[:10])Evaluate ModelEvaluating performance is an essential task in machine learning. In this case, because this is a classification problem, the data scientist elected to use an AUC - ROC Curve. When we need to check or visualize the performance of the multi - class classification problem, we use AUC (Area Under The Curve) ROC (Receiver Operating Characteristics) curve. It is one of the most important evaluation metrics for checking any classification model’s performance.Markdown Monster icon Calculate the Accuracy with Validation or Test Datasetfrom sklearn.metrics import accuracy_score print('Accuracy with Scikit-Learn model:') print(accuracy_score(y_validation_df, y_predictions)) fpr, tpr, thresholds = metrics.roc_curve(y_validation_df, y_predictions) print('AUC (Area Under the Curve) with Scikit-Learn model:') metrics.auc(fpr, tpr)See files associated with the 'Best run'print(best_run.get_file_names()) # best_run.download_file('azureml-logs/70_driver_log.txt') # To check errorsDownload Scikit-Learn model pickle file from the run# Download the model .pkl file to local (Using the 'run' object) best_run.download_file('outputs/model.pkl')Load model in memory from .pkl file# Load the model into memory import joblib fitted_model = joblib.load('model.pkl') print(fitted_model)Tensorflow Core Tutorial 따라하기 (KH Practice)-. Tutorial 코딩을 재현 + 이해한 내용 주석이 추가되어있습니다 (keyword. kh)-. Tutorial Link. https://www.tensorflow.org/tutorials/structured_data/time_series?hl=ko 이 튜토리얼에서는 TensorFlow를 사용한 시계열 예측을 소개합니다. Convolutional/Recurrent Neural Network(CNN 및 RNN)를 포함하여 몇 가지 다른 스타일의 모델을 빌드합니다.이 내용은 각각 하위 항목이 있는 두 부분으로 나누어 생각합니다.- 단일 타임스텝 예측: - 단일 특성 - 모든 특성- 다중 스텝 예측: - 싱글샷: 모두 한 번에 예측합니다. - 자가 회귀: 한 번에 하나의 예측을 수행하고 결과를 모델로 피드백합니다. Settingimport os import datetime import IPython import IPython.display import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import tensorflow as tf from tensorflow.keras.utils import plot_model # kh) keras 모델 시각화 mpl.rcParams['figure.figsize'] = (8, 6) mpl.rcParams['axes.grid'] = FalseLoad Dataset 날씨 데이터세트이 튜토리얼은 막스 플랑크 생물 지구화학 연구소에서 기록한 날씨 시계열 데이터세트를 사용합니다.이 데이터세트에는 온도, 대기압 및 습도와 같은 14가지 특성이 있습니다. 이러한 데이터는 2003년부터 시작해 10분 간격으로 수집되었습니다. 효율성을 위해 2009년과 2016년 사이에 수집된 데이터만 사용하겠습니다. 이 데이터세트 부분은 François Chollet이 자신이 저술한 책 [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python)을 위해 준비했습니다.zip_path = tf.keras.utils.get_file( origin='https://storage.googleapis.com/tensorflow/tf-keras-datasets/jena_climate_2009_2016.csv.zip', fname='jena_climate_2009_2016.csv.zip', extract=True) csv_path, _ = os.path.splitext(zip_path)이 튜토리얼은 **시간별 예측**만 다루므로 10분 간격부터 1시간까지 데이터를 서브 샘플링하는 것으로 시작합니다.df = pd.read_csv(csv_path) display(df.head()) # slice [start:stop:step], starting from index 5 take every 6th record. df = df[5::6] date_time = pd.to_datetime(df.pop('Date Time'), format='%d.%m.%Y %H:%M:%S') display(df.head()) display(date_time.head()) # 시간에 따른 변수 Trend Plotting plot_cols = ['T (degC)', 'p (mbar)', 'rho (g/m**3)'] plot_features = df[plot_cols] plot_features.index = date_time _ = plot_features.plot(subplots = True) print('df[plot_cols][:480] - 처음 480개의 데이터만 plotting') print('KH- subplots = True/False 차이 확인하기') plot_features = df[plot_cols][:480] plot_features.index = date_time[:480] _ = plot_features.plot(subplots = False) _ = plot_features.plot(subplots = True)df[plot_cols][:480] - 처음 480개의 데이터만 plotting KH- subplots = True/False 차이 확인하기EDA & 전처리 검사 및 정리하기*다음으로* 데이터세트의 통계를 살펴봅니다.df.describe().transpose()이상치 보정 풍속한 가지 주목할 점은 풍속의 `min` 값, `wv (m/s)` 및 `max. wv (m/s)` 열입니다. 이 `-9999`는 문제가 있는 것으로 보입니다. 별도의 풍향 열이 있으므로 속도는 `>=0`여야 합니다. 값을 0으로 대체합니다.wv = df['wv (m/s)'] bad_wv = wv == -9999.0 # kh) wv가 -9999.0 이면 true; 조건 값의 소속 여부를 Boolean 값으로 반환 print(bad_wv) # kh) 조건 값의 소속 여부를 Boolean 값으로 반환 wv[bad_wv] = 0.0 # kh) bad_wv = True 인 행 값을 -> 0.0 으로 대체 max_wv = df['max. wv (m/s)'] bad_max_wv = max_wv == -9999.0 max_wv[bad_max_wv] = 0.0 # The above inplacer edits are reflected in the DataFrame print(df['wv (m/s)'].min())5 False 11 False 17 False 23 False 29 False ... 420521 False 420527 False 420533 False 420539 False 420545 False Name: wv (m/s), Length: 70091, dtype: bool 0.0데이터 변환 - 변수화 특성 엔지니어링모델을 본격적으로 빌드하기 전에 데이터를 이해하고 모델에 적절한 형식의 데이터를 전달하는 것이 중요합니다. 바람데이터의 마지막 열인 `wd (deg)`는 도 단위로 바람의 방향을 나타냅니다. 각도가 있으면 모델 입력으로 좋지 않으므로 360°와 0°는 서로 가까워야 하며 부드럽게 휘어져야 합니다. 바람이 불지 않으면 방향은 중요하지 않습니다.현재, 바람 데이터의 분포는 다음과 같습니다.plt.hist2d(df['wd (deg)'], df['wv (m/s)'], bins=(50, 50), vmax=400) plt.colorbar() plt.xlabel('Wind Direction [deg]') plt.ylabel('Wind Velocity [m/s]')그러나 풍향과 속도 열을 바람 **벡터**로 변환하면 모델이 해석하기가 더 쉽습니다.wv = df.pop('wv (m/s)') max_wv = df.pop('max. wv (m/s)') # Convert to radians. wd_rad = df.pop('wd (deg)')*np.pi / 180 # Calculate the wind x and y components. df['Wx'] = wv*np.cos(wd_rad) df['Wy'] = wv*np.sin(wd_rad) # Calculate the max wind x and y components. df['max Wx'] = max_wv*np.cos(wd_rad) df['max Wy'] = max_wv*np.sin(wd_rad)바람 벡터의 분포는 모델이 올바르게 해석하기에 훨씬 더 간단합니다.plt.hist2d(df['Wx'], df['Wy'], bins=(50, 50), vmax=400) plt.colorbar() plt.xlabel('Wind X [m/s]') plt.ylabel('Wind Y [m/s]') ax = plt.gca() ax.axis('tight')시간마찬가지로 `Date Time` 열은 매우 유용하지만 이 문자열 형식으로는 유용하지 않습니다. 우선 초로 변환합니다.print(datetime.datetime.timestamp) # kh check print(date_time) # kh check timestamp_s = date_time.map(datetime.datetime.timestamp) print(timestamp_s) 5 2009-01-01 01:00:00 11 2009-01-01 02:00:00 17 2009-01-01 03:00:00 23 2009-01-01 04:00:00 29 2009-01-01 05:00:00 ... 420521 2016-12-31 19:10:00 420527 2016-12-31 20:10:00 420533 2016-12-31 21:10:00 420539 2016-12-31 22:10:00 420545 2016-12-31 23:10:00 Name: Date Time, Length: 70091, dtype: datetime64[ns] 5 1.230772e+09 11 1.230775e+09 17 1.230779e+09 23 1.230782e+09 29 1.230786e+09 ... 420521 1.483211e+09 420527 1.483215e+09 420533 1.483219e+09 420539 1.483222e+09 420545 1.483226e+09 Name: Date Time, Length: 70091, dtype: float64시간 -> 하루 및 연 단위의 주기성 변수로 변환 풍향과 유사하게 초 단위의 시간은 유용한 모델 입력이 아닙니다. 날씨 데이터이므로 하루 및 연 단위의 주기성이 명확합니다. 주기성을 처리할 수 있는 방법에는 여러 가지가 있습니다.사용 가능한 신호로 변환하는 간단한 방법은 `sin` 및 `cos`를 사용하여 시간을 명확한 "하루 중 시간" 및 "연중 시간" 신호로 변환하는 것입니다.day = 24*60*60 # kh) sec -> day: 24hr * 60min * 60sec year = (365.2425)*day # kh) 통상적으로 사용되는 윤년을 고려한 avg days in year = 365.2425 df['Day sin'] = np.sin(timestamp_s * (2 * np.pi / day)) df['Day cos'] = np.cos(timestamp_s * (2 * np.pi / day)) df['Year sin'] = np.sin(timestamp_s * (2 * np.pi / year)) df['Year cos'] = np.cos(timestamp_s * (2 * np.pi / year)) plt.plot(np.array(df['Day sin'])[:25]) plt.plot(np.array(df['Day cos'])[:25]) plt.xlabel('Time [h]') plt.title('Time of day signal')그러면 모델이 가장 중요한 빈도 특성에 액세스할 수 있습니다. 이 경우 어떤 빈도가 중요한지 미리 알고 있었습니다.모르는 경우 `fft`를 사용하여 중요한 빈도를 결정할 수 있습니다. 시간에 따른 온도의 `tf.signal.rfft`를 보면 여기서 가정한 내용이 확인됩니다. `1/year` 및 `1/day` 근처에서 빈도 피크가 확실하다는 것을 알 수 있습니다.fft = tf.signal.rfft(df['T (degC)']) f_per_dataset = np.arange(0, len(fft)) print("kh_check--------") print(f"original df['T (degC)'] \n {df['T (degC)'].head()}") print(f"fft \n { fft[0:5] }") print(f"f_per_dataset {f_per_dataset}") print("------------") n_samples_h = len(df['T (degC)']) hours_per_year = 24*365.2524 years_per_dataset = n_samples_h/(hours_per_year) f_per_year = f_per_dataset/years_per_dataset plt.step(f_per_year, np.abs(fft)) plt.xscale('log') plt.ylim(0, 400000) plt.xlim([0.1, max(plt.xlim())]) plt.xticks([1, 365.2524], labels=['1/Year', '1/day']) # kh) matplotlib.pyplot.xticks(ticks=None, labels=None, **kwargs); Get or set the current tick locations and labels of the x-axis. _ = plt.xlabel('Frequency (log scale')kh_check-------- original df['T (degC)'] 5 -8.05 11 -8.88 17 -8.81 23 -9.05 29 -9.63 Name: T (degC), dtype: float64 fft [662393.7 +0.j -4666.83 +37410.137j -2705.8481+12410.741j 12935.66 -1294.4241j -16647.074 +6837.21j ] f_per_dataset [ 0 1 2 ... 35043 35044 35045] ------------데이터 분할 (train/val/test)*훈련*, 검증 및 테스트 세트에 `(70%, 20%, 10%)` 분할을 사용합니다. 분할하기 전에 데이터가 임의로 셔플되지 **않습니다**. 이것은 두 가지 이유 때문입니다.1. 데이터를 연속된 샘플의 창으로 자르는 것이 여전히 가능합니다.2. 모델을 훈련한 후 수집된 데이터를 바탕으로 평가하므로 검증/테스트 결과가 보다 현실적입니다.column_indices = {name: i for i, name in enumerate(df.columns)} print("column_indices", column_indices) # kh check n = len(df) train_df = df[0:int(n*0.7)] val_df = df[int(n*0.7):int(n*0.9)] test_df = df[int(n*0.9):] num_features = df.shape[1] print("kh_check -------") print("length of total dataset", n) print("length of train_df, val_df, test_df", len(train_df), len(val_df), len(test_df))column_indices {'p (mbar)': 0, 'T (degC)': 1, 'Tpot (K)': 2, 'Tdew (degC)': 3, 'rh (%)': 4, 'VPmax (mbar)': 5, 'VPact (mbar)': 6, 'VPdef (mbar)': 7, 'sh (g/kg)': 8, 'H2OC (mmol/mol)': 9, 'rho (g/m**3)': 10, 'Wx': 11, 'Wy': 12, 'max Wx': 13, 'max Wy': 14, 'Day sin': 15, 'Day cos': 16, 'Year sin': 17, 'Year cos': 18} kh_check ------- length of total dataset 70091 length of train_df, val_df, test_df 49063 14018 7010데이터 정규화신경망을 훈련하기 전에 특성의 크기를 정하는 것이 중요합니다. 정규화는 이 크기 조정을 수행하는 일반적인 방법입니다. 평균을 빼고 각 특성의 표준 편차로 나눕니다.모델이 검증 및 테스트 세트의 값에 액세스할 수 없도록 훈련 데이터를 사용해서만 평균 및 표준 편차를 계산해야 합니다.----또한 모델이 훈련할 때 훈련 세트의 미래 값에 액세스할 수 없어야 하고 이 정규화가 이동 평균을 사용하여 수행되어야 한다고 말할 수도 있습니다. 이 내용은 본 튜토리얼의 중점 사항이 아니며, 검증 및 테스트 세트가 있기 때문에 (다소) 정직한 메트릭을 얻을 수 있습니다. 따라서 단순화를 위해 이 튜토리얼에서는 단순 평균을 사용합니다.train_mean = train_df.mean() train_std = train_df.std() train_df = (train_df - train_mean) / train_std val_df = (val_df - train_mean) / train_std test_df = (test_df - train_mean) / train_std이제 특성의 분포를 살펴봅니다. 일부 특성은 꼬리가 길지만 `-9999` 풍속 값과 같은 명백한 오류는 없습니다.df_std = (df - train_mean) / train_std df_std = df_std.melt(var_name = 'Column', value_name = 'Normalized') #kh ) pandas.melt: Unpivot a DataFrame from wide to long format, optionally leaving identifiers set. display(df_std.head()) # kh check plt.figure(figsize=(12, 6)) ax = sns.violinplot(x='Column', y='Normalized', data = df_std) _ = ax.set_xticklabels(df.keys(), rotation= 90) print("kh Check - df.keys()", df.keys())데이터 창 작업이 튜토리얼의 모델은 데이터의 연속된 샘플 창을 기반으로 일련의 예측을 수행합니다.입력 창의 주요 특성은 다음과 같습니다.- 입력 및 레이블 창의 너비(타임스텝 수)- 각 사이의 시간 오프셋- 입력, 레이블 또는 둘 모두로 사용되는 특성이 튜토리얼은 다양한 모델(선형, DNN, CNN 및 RNN 모델 포함)을 빌드하고 다음 두 가지 목적으로 이 모델을 사용합니다.- *단일 출력* 및 *다중 출력* 예측- *단일 타임스텝* 및 *다중 타임스텝* 예측이 섹션에서는 모든 모델에 재사용할 수 있도록 데이터 창 작업을 구현하는 부분에 중점을 둡니다.----작업 및 모델 유형에 따라 다양한 데이터 창을 생성할 수 있습니다. 다음은 몇 가지 예입니다.(kh - 예시그림은 원본 링크 참고; https://www.tensorflow.org/tutorials/structured_data/time_series?hl=ko)1. 예를 들어, 24시간의 기록이 주어졌을 때 앞으로 24시간의 미래를 단일 예측하려면 다음과 같은 창을 정의할 수 있습니다.![One prediction 24h into the future.](images/raw_window_24h.png)(kh. * Input width = 24 (24시간의 기록이 주어졌을 때* offset = 24 (앞으로 24시간의 미래를 ) , label width = 1 ( '단'일 예측하려면) (offset의 마지막 index 사용)* total width = 48)1. 6시간의 기록이 주어졌을 때 앞으로 1시간의 미래를 예측하는 모델에는 다음과 같은 창이 필요합니다.![One prediction 1h into the future.](images/raw_window_1h.png)(kh. * Input width = 6 (6시간의 기록이 주어졌을 때* offset = 1 (앞으로 1시간의 미래를 ) , label width = 1 ( '단'일 예측하려면) (offset의 마지막 index 사용)* total width = 7)----이 섹션의 나머지 부분에서는 `WindowGenerator` 클래스를 정의합니다. 이 클래스는 다음을 수행할 수 있습니다.1. 위의 다이어그램과 같이 인덱스와 오프셋을 처리합니다.2. 특성 창을 `(features, labels)` 쌍으로 분할합니다.3. 결과 창의 내용을 플롯합니다.4. `tf.data.Dataset`를 사용하여 훈련, 평가 및 테스트 데이터로부터 이러한 창을 여러 배치로 효율적으로 생성합니다. 1. 인덱스 및 오프셋우선 `WindowGenerator` 클래스를 만듭니다. `__init__` 메서드에는 입력 및 레이블 인덱스에 필요한 모든 논리가 포함됩니다.또한 train, eval 및 test 데이터 프레임을 입력으로 사용합니다. 이러한 데이터 프레임은 나중에 창의 `tf.data.Dataset`로 변환됩니다.""" kh) [Python] 클래스 내장모듈 __repr__ (self) __repr__: representation (표현하다) __repr__ : 사용자가 객체 자체를 이해할 수 있게 표현해주는 메서드이다 추가사항 : __str__이 서로 다른 자료형 간 인터페이스를 제공하기 위한 목적으로 존재한다면, __repr__는 해당 객체를 인간이 이해할 수 있는 표현으로 나타내기 위한 용도이다. 출처: https://it-neicebee.tistory.com/104 [IT's Portfolio] """ class WindowGenerator(): def __init__(self, input_width, label_width, shift, train_df = train_df, val_df = val_df, test_df=test_df, label_columns=None): # Store the raw data. self.train_df = train_df self.val_df = val_df self.test_df = test_df # Work out the label column indices. self.label_columns = label_columns if label_columns is not None: self.label_columns_indices = {name: i for i, name in enumerate(label_columns)} self.column_indices = {name: i for i, name in enumerate(train_df.columns)} # Work out the window parameters self.input_width = input_width self.label_width = label_width self.shift = shift self.total_window_size = input_width + shift self.input_slice = slice(0, input_width) # kh) slice(start, stop[, step]); 잘라내기 원하는 index 들을 정의하는 클래스이다. Ref Link. https://technote.kr/255 self.input_indices = np.arange(self.total_window_size)[self.input_slice] self.label_start = self.total_window_size - self.label_width self.labels_slice = slice(self.label_start, None) self.label_indices = np.arange(self.total_window_size)[self.labels_slice] def __repr__(self): return '\n'.join([ f'Total window size: {self.total_window_size}', f'Input indices: {self.input_indices}', f'Label indices: {self.label_indices}', f'Label column name(s): {self.label_columns}'])이 섹션의 시작 부분에서 다이어그램에 나타낸 두 개의 창을 만드는 코드는 다음과 같습니다.""" (kh - 예시그림은 원본 링크 참고; https://www.tensorflow.org/tutorials/structured_data/time_series?hl=ko) 1. 예를 들어, 24시간의 기록이 주어졌을 때 앞으로 24시간의 미래를 단일 예측하려면 다음과 같은 창을 정의할 수 있습니다. ![One prediction 24h into the future.](images/raw_window_24h.png) (kh. * Input width = 24 (24시간의 기록이 주어졌을 때 * offset = 24 (앞으로 24시간의 미래를 ) , label width = 1 ( '단'일 예측하려면) (offset의 마지막 index 사용) * total width = 48 ) """ w1 = WindowGenerator(input_width=24, label_width=1, shift=24, label_columns=['T (degC)']) w1 """ 2. 6시간의 기록이 주어졌을 때 앞으로 1시간의 미래를 예측하는 모델에는 다음과 같은 창이 필요합니다. ![One prediction 1h into the future.](images/raw_window_1h.png) (kh. * Input width = 6 (6시간의 기록이 주어졌을 때 * offset = 1 (앞으로 1시간의 미래를 ) , label width = 1 ( '단'일 예측하려면) (offset의 마지막 index 사용) * total width = 7 ) """ w2 = WindowGenerator(input_width=6, label_width=1, shift=1, label_columns=['T (degC)']) w22. 분할연속적인 입력 목록이 주어지면 `split_window` 메서드는 이 목록을 입력 창과 레이블 창으로 변환합니다.위의 예제 `w2`는 다음과 같이 분할됩니다.![The initial window is all consecuitive samples, this splits it into an (inputs, labels) pairs](https://github.com/tensorflow/docs-l10n/blob/master/site/ko/tutorials/structured_data/images/split_window.png?raw=true)이 다이어그램에는 데이터의 `features` 축이 나와 있지 않지만 이 `split_window` 함수는 단일 출력과 다중 출력 예에서 모두 사용될 수 있도록 `label_columns`를 처리합니다.def split_window(self, features): """ kh) Parameters ___ -. features # 입력 목록이 주어지면 Return --- -. inputs, labels # 입력 창과 레이블 창으로 변환 """ inputs = features[:, self.input_slice, :] labels = features[:, self.labels_slice, :] if self.label_columns is not None: labels = tf.stack( [labels[:, :, self.column_indices[name]] for name in self.label_columns], axis=-1) # Slicing doesn't preserve static shape information, so set the shapes # manually, This way the 'tf.data.Datasets' are easier to inspect. inputs.set_shape([None, self.input_width, None]) labels.set_shape([None, self.label_width, None]) return inputs, labels #kh) WindowGenerator 클래스에 split_window 내장함수 추가 WindowGenerator.split_window = split_window ## 다음을 사용해 보세요 (WindowGenerator.split_window 사용 예시) # Stack three slices, the lenth of the total window: example_window = tf.stack([np.array(train_df[:w2.total_window_size]), np.array(train_df[100:100+w2.total_window_size]), np.array(train_df[200:200+w2.total_window_size])]) example_inputs, example_labels = w2.split_window(example_window) print('All shapes are: (batch, time, features)') print(f'Window shape: {example_window.shape}') print(f'Inputs shape: {example_inputs.shape}') print(f'labels shape: {example_labels.shape}')All shapes are: (batch, time, features) Window shape: (3, 7, 19) Inputs shape: (3, 6, 19) labels shape: (3, 1, 1)일반적으로 TensorFlow의 데이터는 가장 바깥 쪽 인덱스가 여러 예제("배치" 차원)에 걸쳐 있는 배열로 구성됩니다. 중간 인덱스는 "시간" 또는 "공간"(너비, 높이) 차원입니다. 가장 안쪽 인덱스는 특성입니다.(kh* 가장 바깥 쪽 인덱스 shape[0]: 여러 예제("배치" 차원)에 걸쳐 있는 배열로 구성; batch size* 중간 인덱스 shape[1]: "시간" 또는 "공간"(너비, 높이) 차원; time length* 가장 안쪽 인덱스 shape[2]: 특성; of column variables)위의 코드는 두 배치의 7-타임스텝 창을 사용하며 각 타임스텝에는 19개의 특성이 있습니다. 그러면 이것을 한 배치의 6-타임스텝과 19개의 특성 입력 및 1-타임스텝 1-특성 레이블로 분할합니다. 레이블에는 하나의 특성만 있는데, `WindowGenerator`가 `label_columns=['T (degC)']`로 초기화되었기 때문입니다. 우선 이 튜토리얼에서는 단일 출력 레이블을 예측하는 모델을 빌드합니다. 3. 플롯하기다음은 분할 창을 간단하게 시각화할 수 있는 플롯 메서드입니다.w2.example = example_inputs, example_labels print(w2.example) #kh check def plot(self, model=None, plot_col='T (degC)', max_subplots=3): inputs, labels = self.example plt.figure(figsize=(12, 8)) plot_col_index = self.column_indices[plot_col] max_n = min(max_subplots, len(inputs)) for n in range(max_n): plt.subplot(3, 1, n+1) plt.ylabel(f'{plot_col} [normed]') plt.plot(self.input_indices, inputs[n, :, plot_col_index], label='Inputs', marker='.', zorder=-10) if self.label_columns: label_col_index = self.label_columns_indices.get(plot_col, None) else: label_col_index = plot_col_index if label_col_index is None: continue plt.scatter(self.label_indices, labels[n, :, label_col_index], edgecolors='k', label='Labels', c='#2ca02c', s=64) if model is not None: predictions = model(inputs) plt.scatter(self.label_indices, predictions[n, :, label_col_index], marker='X', edgecolors='k', label='Predictions', c='#ff7f0e', s=64) if n == 0: plt.legend() plt.xlabel('Time [h]') # -- end of def plot() # kh added WindowGenerator.plot = plot이 플롯은 항목이 참조하는 시간을 기준으로 입력, 레이블 및 (나중에) 예측값을 정렬합니다.w2.plot()다른 열을 플롯할 수 있지만 예제 창 `w2` 구성에는 `T (degC)` 열에 대한 레이블만 있습니다.w2.plot(plot_col='p (mbar)')4. `tf.data.Dataset` 만들기마지막으로, 이 `make_dataset` 메서드는 시계열 `DataFrame`을 가져와 `preprocessing.timeseries_dataset_from_array` 함수를 이용해 `(input_window, label_window)` 쌍의 `tf.data.Dataset`로 변환합니다.def make_dataset(self, data): data = np.array(data, dtype=np.float32) ds = tf.keras.preprocessing.timeseries_dataset_from_array( data=data, targets=None, sequence_length=self.total_window_size, sequence_stride=1, shuffle=True, batch_size=32, ) ds = ds.map(self.split_window) return ds WindowGenerator.make_dataset = make_dataset`WindowGenerator` 객체는 훈련, 검증 및 테스트 데이터를 보유합니다. 위의 `make_dataset` 메서드를 사용하여 `tf.data.Datasets`로 여기에 액세스하기 위한 특성을 추가합니다. 또한 간편한 액세스와 플롯을 위한 표준 예제 배치를 추가합니다.""" kh) @property property 함수나 @property 데코레이터를 사용했을 때 가장 큰 이점은 외부에 티 내지 않고 내부적으로 클래스의 필드 접근 방법을 바꿀 수 있다는 것 Ref LINK. https://www.daleseo.com/python-property/ ==> kh) property(), @property: getter, setter 없이 클래스 필드에 접근 가능 하게 함 """ @property def train(self): return self.make_dataset(self.train_df) @property def val(self): return self.make_dataset(self.val_df) @property def test(self): return self.make_dataset(self.test_df) @property def example(self): """Get and cache an example batch of 'inputs, labels' for plotting.""" result = getattr(self, '_example_', None) if result is None: # No example batch was found, so get one from the '.train' dataset result = next(iter(self.train)) # ANd cache it for next time self._example = result return result WindowGenerator.train = train WindowGenerator.val = val WindowGenerator.test = test WindowGenerator.example = example이제 `WindowGenerator` 객체가 `tf.data.Dataset` 객체에 대한 액세스 권한을 부여하므로 데이터를 쉽게 반복할 수 있습니다.`Dataset.element_spec` 속성은 데이터세트 요소의 구조, `dtypes` 및 형상을 알려줍니다.# Each element is an (inputs, label) pair w2.train.element_spec`Dataset`를 반복하면 구체적인 배치가 생성됩니다.for example_inputs, example_labels in w2.train.take(1): print(f'Inputs shape (batch, time, features): {example_inputs.shape}') print(f'Labels shape (batch, time, features): {example_labels.shape}') print("\nkh check----") print("w2.train.take(1)", w2.train.take(1))Inputs shape (batch, time, features): (32, 6, 19) Labels shape (batch, time, features): (32, 1, 1) kh check---- w2.train.take(1) 단일 스텝 모델이러한 종류의 데이터를 기반으로 빌드할 수 있는 가장 간단한 모델은 현재 조건에만 기초하여 미래로 1 타임스텝(1시간) 진행된 단일 특성 값을 예측하는 모델입니다.따라서 1시간 미래의 `T (degC)` 값을 예측하는 모델을 빌드하는 것으로 시작하겠습니다.![Predict the next time step](images/narrow_window.png)kh) Image LINK. https://www.tensorflow.org/tutorials/structured_data/images/narrow_window.png?hl=ko----다음과 같은 단일 스텝 `(input, label)` 쌍을 생성하도록 `WindowGenerator` 객체를 구성합니다.single_step_window = WindowGenerator( input_width=1, label_width=1, shift=1, label_columns=['T (degC)']) single_step_window`window` 객체는 훈련, 검증 및 테스트 세트로부터 `tf.data.Datasets`를 생성하므로 데이터 배치를 쉽게 반복할 수 있습니다.for example_inputs, example_labels in single_step_window.train.take(1): print(f'Inputs shape (batch, time, features): {example_inputs.shape}') print(f'Labels shape (batch, time, features): {example_labels.shape}')Inputs shape (batch, time, features): (32, 1, 19) Labels shape (batch, time, features): (32, 1, 1)기준 (kh* BaseLine 모델 빌드* 제공되는 tf.keras.Model 을 기준 모델로 사용)훈련 가능한 모델을 빌드하기 전에 나중에 더 복잡한 모델과 비교하기 위한 포인트로 성능 기준을 갖는 것이 좋습니다.첫 번째 작업은 모든 특성의 현재 값을 고려하여 1시간 미래의 온도를 예측하는 것입니다. 현재 값에는 현재 온도가 포함됩니다.따라서 예측으로 현재 온도를 반환하여 "변화 없음"을 예측하는 모델로 시작하겠습니다. 온도는 천천히 변하기 때문에 이것은 합리적인 기준입니다. 물론, 더 미래로 들어가면 이 기준의 예측 효과는 떨어질 것입니다.![Send the input to the output](images/baseline.png)kh IMAGE LINK. https://www.tensorflow.org/tutorials/structured_data/images/baseline.png?hl=koclass Baseline(tf.keras.Model): def __init__(self, label_index=None): super().__init__() self.label_index = label_index def call(self, inputs): if self.label_index is None: return inputs result = inputs[:, :, self.label_index] return result[:, :, tf.newaxis]이 모델을 인스턴스화하고 평가합니다.baseline = Baseline(label_index=column_indices['T (degC)']) baseline.compile(loss=tf.losses.MeanSquaredError(), metrics=tf.metrics.MeanAbsoluteError()) val_performance = {} performance = {} val_performance['Baseline'] = baseline.evaluate(single_step_window.val) performance['Baseline'] = baseline.evaluate(single_step_window.test, verbose=0) print("kh check---") print("baseline", baseline) # kh-check print("val_performance, performance", val_performance, performance) #kh-check439/439 [==============================] - 4s 9ms/step - loss: 0.0128 - mean_absolute_error: 0.0785 kh check--- baseline <__main__.Baseline object at 0x7f02017c0b50> val_performance, performance {'Baseline': [0.012845640070736408, 0.07846628874540329]} {'Baseline': [0.014162620529532433, 0.08516015112400055]}몇 가지 성능 메트릭을 출력했지만 모델이 얼마나 잘 동작하는지에 대한 느낌은 주지 않습니다.`WindowGenerator`에는 플롯 메서드가 있지만 단일 샘플만으로는 플롯이 그다지 흥미롭지 않습니다. 따라서 한 번에 24시간 범위의 연속 입력과 레이블을 생성하는 더 넓은 `WindowGenerator`를 만듭니다.`wide_window`는 모델이 동작하는 방식을 변화시키지 않습니다. 이 모델은 단일 입력 타임스텝을 기반으로 1시간 미래를 예측합니다. 여기서 `time` 축은 `batch` 축과 같은 역할을 합니다. 각 예측은 타임스텝 사이의 상호 작용 없이 독립적으로 이루어집니다.wide_window = WindowGenerator( input_width=24, label_width=24, shift=1, label_columns=['T (degC)']) wide_window이 확장된 창은 어떠한 코드 변경 없이 동일한 `baseline` 모델에 직접 전달할 수 있습니다. 이는 입력과 레이블이 동일한 수의 타임스텝을 가지며 기준이 입력을 출력으로 전달하기 때문에 가능합니다.![One prediction 1h into the future, ever hour.](images/last_window.png)print('Input shape:', single_step_window.example[0].shape) print('Output shape:', baseline(single_step_window.example[0]).shape)Input shape: (32, 1, 19) Output shape: (32, 1, 1)기준 모델의 예측값을 플롯하면 1시간씩 오른쪽으로 이동한 단순한 레이블임을 알 수 있습니다.wide_window.plot(baseline)위의 세 가지 예제 플롯에서 단일 스텝 모델은 24시간 동안 실행됩니다. 이에 관해 몇 가지 설명이 필요합니다.- 파란색 "입력" 라인은 각 타임스텝의 입력 온도를 보여줍니다. 이 모델은 모든 특성을 수신하며 이 플롯은 온도만 표시합니다.- 녹색 "레이블" 점은 목표 예측값을 나타냅니다. 이러한 점은 입력 시간이 아니라 예측 시간에 표시됩니다. 레이블의 범위가 입력에 상대적으로 한 스텝 이동하는 이유가 여기에 있습니다.- 주황색 "예측" 십자는 각 출력 타임스텝에 대한 모델의 예측입니다. 모델이 완벽하게 예측하는 경우 예측값은 "레이블" 바로 위에 놓여집니다. 선형 모델이 작업에 적용할 수 있는 가장 간단한 **훈련 가능한** 모델은 입력과 출력 사이에 선형 변환을 삽입하는 것입니다. 이 경우 타임스텝의 출력은 해당 스텝에만 의존합니다.![A single step prediction](images/narrow_window.png)`activation` 세트가 없는 `layers.Dense`는 선형 모델입니다. 레이어는 데이터의 마지막 축을 `(batch, time, inputs)`에서 `(batch, time, units)`로만 변환하며, `batch` 및 `time` 축의 모든 항목에 독립적으로 적용됩니다.linear = tf.keras.Sequential([ tf.keras.layers.Dense(units=1) ]) print('Input shape:', single_step_window.example[0].shape) print('Output shape:', linear(single_step_window.example[0]).shape)Input shape: (32, 1, 19) Output shape: (32, 1, 1)Training 함수 만들기이 튜토리얼은 많은 모델을 훈련하므로 훈련 절차를 하나의 함수 패키지로 만듭니다.MAX_EPOCHS = 5 def compile_and_fit(model, window, patience=2): early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience, mode='min') model.compile(loss=tf.losses.MeanSquaredError(), optimizer=tf.optimizers.Adam(), metrics=[tf.metrics.MeanAbsoluteError()]) history = model.fit(window.train, epochs=MAX_EPOCHS, validation_data=window.val, callbacks=[early_stopping]) return history ## 모델을 훈련하고 성능을 평가합니다. history = compile_and_fit(linear, single_step_window) val_performance['Linear'] = linear.evaluate(single_step_window.val) performance['Linear'] = linear.evaluate(single_step_window.test, verbose=0) print("kh check---") print("Linear", linear) # kh-check print("val_performance", val_performance) #kh-check print("performance", performance) #kh-checkEpoch 1/20 1534/1534 [==============================] - 16s 10ms/step - loss: 0.4326 - mean_absolute_error: 0.3925 - val_loss: 0.0405 - val_mean_absolute_error: 0.1513 Epoch 2/20 1534/1534 [==============================] - 15s 9ms/step - loss: 0.0218 - mean_absolute_error: 0.1115 - val_loss: 0.0116 - val_mean_absolute_error: 0.0802 Epoch 3/20 1534/1534 [==============================] - 15s 10ms/step - loss: 0.0101 - mean_absolute_error: 0.0746 - val_loss: 0.0087 - val_mean_absolute_error: 0.0683 Epoch 4/20 1534/1534 [==============================] - 16s 10ms/step - loss: 0.0091 - mean_absolute_error: 0.0698 - val_loss: 0.0086 - val_mean_absolute_error: 0.0679 Epoch 5/20 1534/1534 [==============================] - 15s 9ms/step - loss: 0.0091 - mean_absolute_error: 0.0698 - val_loss: 0.0086 - val_mean_absolute_error: 0.0673 Epoch 6/20 1534/1534 [==============================] - 19s 12ms/step - loss: 0.0091 - mean_absolute_error: 0.0699 - val_loss: 0.0085 - val_mean_absolute_error: 0[...]`baseline` 모델과 마찬가지로 선형 모델은 넓은 범위의 배치에서 호출할 수 있습니다. 이러한 방식으로 모델은 연속적인 타임스텝에 대해 일련의 독립적인 예측을 수행합니다. `time` 축은 다른 `batch` 축처럼 작동합니다. 각 타임스텝에서 예측 사이에 상호 작용은 없습니다.![A single step prediction](images/wide_window.png)print('Input shape:', wide_window.example[0].shape) print('Output shape:', baseline(wide_window.example[0]).shape)Input shape: (32, 24, 19) Output shape: (32, 24, 1)다음은 `wide_widow`에 대한 예제 예측값을 플롯한 내용입니다. 많은 경우 예측이 단순히 입력 온도를 반환하는 것보다는 분명히 더 낮지만 몇 가지 경우에는 더 나쁘다는 사실에 주목하세요.wide_window.plot(linear)Linear 모델 해석 - Linear 모델의 가중치 가져오기 선형 모델의 한 가지 장점은 해석하기가 상대적으로 간단하다는 것입니다. 레이어의 가중치를 가져와 각 입력에 할당된 가중치를 볼 수 있습니다.""" kh) tensorflow 모델 가중치 조회하기 {tf_model}.layers[{n}].kernel.numpy() 를 통해 n번째 layer의 kernel (weight matrix) 에 접근가능 """ plt.bar(x = range(len(train_df.columns)), height=linear.layers[0].kernel[:,0].numpy()) axis=plt.gca() axis.set_xticks(range(len(train_df.columns))) _ = axis.set_xticklabels(train_df.columns, rotation=90) print("kh check---") print(linear.layers[0].kernel.numpy()) print(linear.layers[0].kernel[:,0].numpy())kh check--- [[ 1.7976382e-01] [ 3.2778519e-01] [ 4.4595923e-02] [-7.9864554e-02] [ 3.8219649e-02] [ 1.4331529e-01] [-6.2604576e-02] [-6.4536095e-02] [ 1.5413145e-02] [ 5.1153690e-04] [-6.7973697e-01] [ 4.8075421e-03] [-1.8081620e-03] [-1.8796587e-02] [ 1.8348318e-02] [ 3.6073193e-02] [-5.2411981e-02] [ 4.2632245e-03] [-1.8240711e-02]] [ 1.7976382e-01 3.2778519e-01 4.4595923e-02 -7.9864554e-02 3.8219649e-02 1.4331529e-01 -6.2604576e-02 -6.4536095e-02 1.5413145e-02 5.1153690e-04 -6.7973697e-01 4.8075421e-03 -1.8081620e-03 -1.8796587e-02 1.8348318e-02 3.6073193e-02 -5.2411981e-02 4.2632245e-03 -1.8240711e-02]때로 모델은 입력 `T (degC)`에 가장 많은 가중치를 두지 않습니다. 이것은 무작위 초기화의 위험 중 하나입니다. 밀집 (kh- Deep 하게 쌓기)실제로 여러 타임스텝에서 동작하는 모델을 적용하기 전에 더 깊고 강력한 단일 입력 스텝 모델의 성능을 확인하는 것이 좋습니다.다음 모델은 입력과 출력 사이에 몇 개의 `Dense` 레이어를 쌓는다는 점을 제외하면 `linear` 모델과 유사합니다.dense = tf.keras.Sequential([ tf.keras.layers.Dense(units=64, activation='relu'), tf.keras.layers.Dense(units=64, activation='relu'), tf.keras.layers.Dense(units=1) ]) history = compile_and_fit(dense, single_step_window) val_performance['Dense'] = dense.evaluate(single_step_window.val) performance['Dense'] = dense.evaluate(single_step_window.test, verbose=0) # kh) 모델 시각화 하기 plot_model(dense, show_shapes=True)다중 스텝 밀집단일 타임스텝 모델에는 입력의 현재 값에 대한 컨텍스트가 없습니다. 시간에 따라 입력 특성이 어떻게 변하는지 볼 수 없습니다. 이 문제를 해결하려면 모델이 예측을 수행할 때 여러 타임스텝에 액세스해야 합니다.![Three time steps are used for each prediction.](images/conv_window.png) `baseline` , `linear` 및 `dense` 모델은 각 타임스텝을 독립적으로 처리했습니다. 여기서 모델은 단일 출력을 생성하기 위해 여러 타임스텝을 입력으로 사용합니다.3시간의 입력과 1시간의 레이블 배치를 생성하는 `WindowGenerator`를 만듭니다.`Window`의 `shift` 매개변수는 두 창의 끝에 상대적입니다.CONV_WIDTH = 3 conv_window = WindowGenerator( input_width=CONV_WIDTH, label_width=1, shift=1, label_columns=['T (degC)']) conv_window conv_window.plot() plt.title("Given 3h as input, predict 1h into the future.")`layers.Flatten`을 모델의 첫 번째 레이어로 추가하여 다중 입력 스텝 창에서 `dense` 모델을 훈련할 수 있습니다.multi_step_dense = tf.keras.Sequential([ # Shape: (time, features) => (time*features) tf.keras.layers.Flatten(), tf.keras.layers.Dense(units=32, activation='relu'), tf.keras.layers.Dense(units=32, activation='relu'), tf.keras.layers.Dense(units=1), # Add back the time dimension. # Shape: (outputs) => (1, outputs) tf.keras.layers.Reshape([1, -1]), ]) print('Input shape:', conv_window.example[0].shape) print('Output shape:', multi_step_dense(conv_window.example[0]).shape) history = compile_and_fit(multi_step_dense, conv_window) IPython.display.clear_output() # kh) display 되는 결과 중 마지막 쪽 결과만 display 되도록 설정 val_performance['Multi step dense'] = multi_step_dense.evaluate(conv_window.val) performance['Multi step dense'] = multi_step_dense.evaluate(conv_window.test, verbose=0) plot_model(multi_step_dense, show_shapes=True) # kh) 모델 시각화 하기 conv_window.plot(multi_step_dense)이 접근법의 주된 단점은 결과적인 모델이 정확히 이 형상의 입력 창에서만 실행될 수 있다는 것입니다.print('Input shape:', wide_window.example[0].shape) try: print('Output shape:', multi_step_dense(wide_window.example[0]).shape) except Exception as e: print(f'\n{type(e).__name__}:{e}')Input shape: (32, 24, 19) ValueError:Exception encountered when calling layer "sequential_4" (type Sequential). Input 0 of layer "dense_10" is incompatible with the layer: expected axis -1 of input shape to have value 57, but received input with shape (32, 456) Call arguments received: • inputs=tf.Tensor(shape=(32, 24, 19), dtype=float32) • training=None • mask=None다음 섹션의 컨볼루셔널 모델은 이 문제를 해결합니다. 컨볼루션 신경망컨볼루션 레이어(`layers.Conv1D`)도 각 예측에 대한 입력으로 여러 타임스텝을 사용합니다.다음은 컨볼루션으로 다시 작성한 `multi_step_dense`와 **동일한** 모델입니다.다음 변경 사항에 주목하세요.- `layers.Flatten`과 첫 번째 `layers.Dense`는 `layers.Conv1D`로 대체됩니다.- 컨볼루션이 출력에서 시간 축을 유지하므로 `layers.Reshape`는 이 더 이상 필요하지 않습니다.""" multi_step_dense = tf.keras.Sequential([ # Shape: (time, features) => (time*features) tf.keras.layers.Flatten(), tf.keras.layers.Dense(units=32, activation='relu'), tf.keras.layers.Dense(units=32, activation='relu'), tf.keras.layers.Dense(units=1), # Add back the time dimension. # Shape: (outputs) => (1, outputs) tf.keras.layers.Reshape([1, -1]), ]) """ conv_model = tf.keras.Sequential([ tf.keras.layers.Conv1D(filters=32, kernel_size=(CONV_WIDTH,), activation='relu'), tf.keras.layers.Dense(units=32, activation='relu'), tf.keras.layers.Dense(units=1) ])예제 배치에서 실행하여 모델이 예상된 형상으로 출력을 생성하는지 확인합니다.print("Conv model on `conv_window`") print('Input shape:', conv_window.example[0].shape) print('Output shape:', conv_model(conv_window.example[0]).shape) #kh) multi_step_dense vs conv_model display(plot_model(multi_step_dense, show_shapes=True)) # kh) 모델 시각화 하기 display(plot_model(conv_model, show_shapes=True)) # kh) 모델 시각화 하기`conv_window`에서 훈련하고 평가하면 `multi_step_dense` 모델과 유사한 성능을 제공해야 합니다.history = compile_and_fit(conv_model, conv_window) IPython.display.clear_output() val_performance['Conv'] = conv_model.evaluate(conv_window.val) performance['Conv'] = conv_model.evaluate(conv_window.test, verbose=0)438/438 [==============================] - 5s 10ms/step - loss: 0.0065 - mean_absolute_error: 0.0571이 `conv_model`과 `multi_step_dense` 모델의 차이점은 `conv_model`은 모든 길이의 입력에서 실행될 수 있다는 것입니다. 컨볼루셔널 레이어는 입력의 슬라이딩 윈도우에 적용됩니다.![Executing a convolutional model on a sequence](images/wide_conv_window.png)더 넓은 입력에서 실행하면 더 넓은 출력이 생성됩니다.print("Wide window") print('Input shape:', wide_window.example[0].shape) print('Labels shape:', wide_window.example[1].shape) print('Output shape:', conv_model(wide_window.example[0]).shape) # kh) 모델의 output shapeWide window Input shape: (32, 24, 19) Labels shape: (32, 24, 1) Output shape: (32, 22, 1)출력은 입력보다 짧습니다. 훈련 또는 플롯 작업을 수행하려면 레이블과 예상의 길이가 동일해야 합니다. 따라서 레이블과 예측 길이가 일치하도록 몇 개의 추가 입력 타임스텝으로 넓은 창을 생성하는 `WindowGenerator`를 빌드합니다.#kh) Labels shape = Output shape 가 되도록 window 생성 LABEL_WIDTH = 24 INPUT_WIDTH = LABEL_WIDTH + (CONV_WIDTH - 1) wide_conv_window = WindowGenerator( input_width=INPUT_WIDTH, label_width=LABEL_WIDTH, shift=1, label_columns=['T (degC)']) wide_conv_window print("Wide conv window") print('Input shape:', wide_conv_window.example[0].shape) print('Labels shape:', wide_conv_window.example[1].shape) print('Output shape:', conv_model(wide_conv_window.example[0]).shape) # kh) wide_window vs wide_conv_window display(wide_window) display(wide_conv_window)이제 더 넓은 창에 모델의 예측값을 플롯할 수 있습니다. 첫 번째 예측 전 3개의 입력 타임스텝에 주목하세요. 여기서 모든 예측은 이전 3개의 타임스텝에 기초합니다.wide_conv_window.plot(conv_model)순환 신경망Recurrent Neural Network(RNN)는 시계열 데이터에 적합한 신경망 유형입니다. RNN은 시계열을 단계별로 처리하여 타임스텝 사이에서 내부 상태를 유지합니다.자세한 내용은 [텍스트 생성 튜토리얼](https://www.tensorflow.org/tutorials/text/text_generation) 또는 [RNN 가이드](https://www.tensorflow.org/guide/keras/rnn)를 읽어보세요.이 튜토리얼에서는 [Long Short Term Memory](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/layers/LSTM)(LSTM)이라는 RNN 레이어를 사용합니다. 모든 keras RNN 레이어에 대한 중요한 생성자 인수는 `return_sequences` 인수입니다. 이 설정은 다음 두 가지 방법 중 하나로 레이어를 구성할 수 있습니다.1. 기본값인 `False`인 경우 레이어는 최종 타임스텝의 출력만 반환하여 단일 예측을 수행하기 전에 모델이 내부 상태를 준비할 시간을 줍니다.![An lstm warming up and making a single prediction](images/lstm_1_window.png)1. `True`이면 레이어가 각 입력에 대한 출력을 반환합니다. 다음과 같은 경우에 유용합니다.- RNN 레이어 쌓기- 여러 타임스텝에서 동시에 모델 훈련![An lstm making a prediction after every timestep](images/lstm_many_window.png)(kh* Input 의 time_length = 24 인 경우,* return_sequence=False: input (t=1, ..,, 24) /output ( t=24)* return_sequence=True: input (t=1, ..,, 24) /output ( t=1, ..., 24). 시점마다 대응되는 output 반환)lstm_model = tf.keras.models.Sequential([ # Shape [` ] ]) plot_model(multi_step_dense, show_shapes=True) # kh) 모델 시각화 하기Network Intrusion Detectionfrom google.colab import drive drive.mount('/content/drive') import keras from keras.callbacks import ModelCheckpoint from keras.callbacks import EarlyStopping from keras.models import Sequential from keras.layers.core import Dense, Activation, Flatten from keras.optimizers import Adam import pandas as pd import io import requests import numpy as np from sklearn import metrics import os import json import csv import pandas as pd import sklearn.feature_extraction.text as tfidf from sklearn.model_selection import train_test_split from sklearn import datasets,linear_model, preprocessing,utils from sklearn.metrics import mean_squared_error,r2_score from scipy.stats import zscore import numpy as np import pickle from sklearn.preprocessing import LabelEncoder from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import MultinomialNB from sklearn import svm from sklearn.metrics import f1_score import collections from keras import optimizers from keras.layers import Conv1D, Conv2D, MaxPooling2D import collections from sklearn import preprocessing import matplotlib.pyplot as plt import numpy as np import pandas as pd import shutil import os %matplotlib inline import matplotlib.pyplot as plt from sklearn.metrics import roc_curve, auc from sklearn.model_selection import train_test_split import tensorflow as tf import numpy as np from sklearn import metrics from keras.models import Sequential from keras.layers.core import Dense, Activation from keras.callbacks import EarlyStopping from keras.callbacks import ModelCheckpoint from sklearn import svm, datasets from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix, classification_report# Plot a confusion matrix. # cm is the confusion matrix, names are the names of the classes. def plot_confusion_matrix(cm, names, title='Confusion matrix', cmap=plt.cm.Blues): plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(names)) plt.xticks(tick_marks, names, rotation=45) plt.yticks(tick_marks, names) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') # Plot an ROC. pred - the predictions, y - the expected output. def plot_roc(pred,y): fpr, tpr, thresholds = roc_curve(y, pred) roc_auc = auc(fpr, tpr) plt.figure() plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc) plt.plot([0, 1], [0, 1], 'k--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver Operating Characteristic (ROC)') plt.legend(loc="lower right") plt.show()Data Preprocessing Step 1: Read network intrusion data csv filedf = pd.read_csv("/content/drive/My Drive/Colab Notebooks/Data_set_KDD_cup/kddcup99_csv.csv") df.head()Step 2 Adding Column Headersdf.columns = [ 'duration', 'protocol_type', 'service', 'flag', 'src_bytes', 'dst_bytes', 'land', 'wrong_fragment', 'urgent', 'hot', 'num_failed_logins', 'logged_in', 'num_compromised', 'root_shell', 'su_attempted', 'num_root', 'num_file_creations', 'num_shells', 'num_access_files', 'num_outbound_cmds', 'is_host_login', 'is_guest_login', 'count', 'srv_count', 'serror_rate', 'srv_serror_rate', 'rerror_rate', 'srv_rerror_rate', 'same_srv_rate', 'diff_srv_rate', 'srv_diff_host_rate', 'dst_host_count', 'dst_host_srv_count', 'dst_host_same_srv_rate', 'dst_host_diff_srv_rate', 'dst_host_same_src_port_rate', 'dst_host_srv_diff_host_rate', 'dst_host_serror_rate', 'dst_host_srv_serror_rate', 'dst_host_rerror_rate', 'dst_host_srv_rerror_rate', 'outcome' ] df.head()Step 3 Encoding good connections as “0” and bad connections as “1”df['outcome'][:1] df.groupby('outcome').count() df.groupby('outcome').head() def classify_connections(outcome): if(outcome =="normal"): outcome = 0 else: outcome = 1 return outcome df["outcome"] = df["outcome"].apply(classify_connections) df["outcome"][9020] df.count()[1]Step 4 Drop duplicatesdf = df.drop_duplicates() df.count()[0]Step 5 Drop the rows where at least one element is missingdf = df.dropna() df.count()[0] # shows that there is no row with atleast one element missingData Preprocessing Completeddf.values[:5] df["wrong_fragment"].values[:1000]df.columns = [ 'duration', 'protocol_type', 'service', 'flag', 'src_bytes', 'dst_bytes', 'land', 'wrong_fragment', 'urgent', 'hot', 'num_failed_logins', 'logged_in', 'num_compromised', 'root_shell', 'su_attempted', 'num_root', 'num_file_creations', 'num_shells', 'num_access_files', 'num_outbound_cmds', 'is_host_login', 'is_guest_login', 'count', 'srv_count', 'serror_rate', 'srv_serror_rate', 'rerror_rate', 'srv_rerror_rate', 'same_srv_rate', 'diff_srv_rate', 'srv_diff_host_rate', 'dst_host_count', 'dst_host_srv_count', 'dst_host_same_srv_rate', 'dst_host_diff_srv_rate', 'dst_host_same_src_port_rate', 'dst_host_srv_diff_host_rate', 'dst_host_serror_rate', 'dst_host_srv_serror_rate', 'dst_host_rerror_rate', 'dst_host_srv_rerror_rate', 'outcome' ]df.groupby('outcome').count() df.groupby('outcome').head() def Z_score(column): df[column] = zscore(df[column]) mylist = ['duration','src_bytes','dst_bytes','hot','num_failed_logins','num_compromised','num_root','num_file_creations','num_access_files','count','srv_count', 'dst_host_count', 'dst_host_srv_count'] for i in mylist: Z_score(i) df.head() df # Encode text values to dummy variables(i.e. [1,0,0],[0,1,0],[0,0,1] for red,green,blue) def encode_text_dummy(df, name): dummies = pd.get_dummies(df[name]) for x in dummies.columns: dummy_name = "{}-{}".format(name, x) df[dummy_name] = dummies[x] df.drop(name, axis=1, inplace=True) mylist = ["protocol_type","service","flag","logged_in","root_shell","is_guest_login"] for i in mylist: print(i) encode_text_dummy(df,i) df df.drop(['num_outbound_cmds', 'is_host_login'], axis=1)LOGISTIC REGRESSIONoutput_df = df["outcome"] output_array = output_df.values print(output_array[:1]) df = df.drop(["outcome"], axis=1) input_df = df input_array = input_df.values x_train,x_test,y_train,y_test = train_test_split(input_array,output_array, test_size=0.2,random_state=42) print(x_train.shape) logreg = linear_model.LogisticRegression() print(type(y_train)) print(y_train) print(utils.multiclass.type_of_target(y_train)) logreg.fit(x_train, y_train) y_pred_logistic = logreg.predict(x_test) # Explained f1 score: 1 is perfect prediction print('Evaluating with F1 score metric: %.2f' % f1_score(y_test, y_pred_logistic,average='micro')) score = metrics.accuracy_score(y_test, y_pred_logistic) print("Accuracy score: {}".format(score)) score = metrics.precision_score(y_test,y_pred_logistic, average= "weighted") print("Precision score: {}".format(score)) score = metrics.recall_score(y_test,y_pred_logistic, average= "weighted") print("Recall score: {}".format(score)) names = (np.unique(output_array))Confusion Matrix# Compute confusion matrix cm = confusion_matrix(y_test, y_pred_logistic) print(cm) print('Plotting confusion matrix') plt.figure() plot_confusion_matrix(cm, names) plt.show() print(classification_report(y_test, y_pred_logistic))[[17511 93] [ 123 11390]] Plotting confusion matrixROC Curveplot_roc(y_pred_logistic,y_test)Nearest Neighborneigh = KNeighborsClassifier(n_neighbors=23) neigh.fit(x_train, y_train) y_pred_neigh = neigh.predict(x_test) # Explained variance score: 1 is perfect prediction print('Evaluating with F1 score metric: %.2f' % f1_score(y_test, y_pred_neigh)) score = metrics.accuracy_score(y_test, y_pred_neigh) print("Accuracy score: {}".format(score)) score = metrics.precision_score(y_test,y_pred_neigh, average= "weighted") print("Precision score: {}".format(score)) score = metrics.recall_score(y_test,y_pred_neigh, average= "weighted") print("Recall score: {}".format(score)) # Compute confusion matrix cm = confusion_matrix(y_test,y_pred_neigh) print(cm) print('Plotting confusion matrix') plt.figure() plot_confusion_matrix(cm, names) plt.show() print(classification_report(y_test, y_pred_neigh)) plot_roc(y_pred_neigh,y_test)MULTINOMIAL NAIVE BAYESfrom sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() input_array_min_max = scaler.fit_transform(input_array) x_train,x_test,y_train,y_test = train_test_split(input_array_min_max,output_array, test_size=0.2,random_state=42) clf = MultinomialNB() clf.fit(x_train, y_train) clf_predict = clf.predict(x_test) # Explained variance score: 1 is perfect prediction print('Evaluating with F1 score metric: %.2f' % f1_score(y_test, clf_predict)) score = metrics.accuracy_score(y_test, clf_predict) print("Accuracy score: {}".format(score)) score = metrics.precision_score(y_test,clf_predict, average= "weighted") print("Precision score: {}".format(score)) score = metrics.recall_score(y_test,clf_predict, average= "weighted") print("Recall score: {}".format(score)) # Compute confusion matrix cm = confusion_matrix(y_test, clf_predict) print(cm) print('Plotting confusion matrix') plt.figure() plot_confusion_matrix(cm, names) plt.show() print(classification_report(y_test, clf_predict)) plot_roc(clf_predict,y_test)SUPPORT VECTOR MACHINESVM_classifier = svm.SVC() SVM_classifier.fit(x_train, y_train) y_pred_SVM = SVM_classifier.predict(x_test) # Explained variance score: 1 is perfect prediction print('Evaluating with F1 score metric: %.2f' % f1_score(y_test, y_pred_SVM, average='micro')) score = metrics.accuracy_score(y_test, y_pred_SVM) print("Accuracy score: {}".format(score)) score = metrics.precision_score(y_test,y_pred_SVM, average= "weighted") print("Precision score: {}".format(score)) score = metrics.recall_score(y_test,y_pred_SVM, average= "weighted") print("Recall score: {}".format(score)) # Compute confusion matrix cm = confusion_matrix(y_test, y_pred_SVM) print(cm) print('Plotting confusion matrix') plt.figure() plot_confusion_matrix(cm, names) plt.show() print(classification_report(y_test, y_pred_SVM)) plot_roc(y_pred_SVM,y_test)Gaussian Naive Bayesfrom sklearn.naive_bayes import GaussianNB GaussianNB_clf = GaussianNB() GaussianNB_clf.fit(x_train, y_train) print(x_train.shape) y_test1 = y_test.reshape((y_test.shape[0], 1)) GaussianNB_pred = GaussianNB_clf.predict(x_test) # Explained variance score: 1 is perfect prediction print('Evaluating with F1 score metric: %.2f' % f1_score(y_test1, GaussianNB_pred, average='micro')) score = metrics.accuracy_score(y_test1, GaussianNB_pred) print("Accuracy score: {}".format(score)) score = metrics.precision_score(y_test1,GaussianNB_pred, average= "weighted") print("Precision score: {}".format(score)) score = metrics.recall_score(y_test1,GaussianNB_pred, average= "weighted") print("Recall score: {}".format(score)) # Compute confusion matrix cm = confusion_matrix(y_test1, GaussianNB_pred) print(cm) print('Plotting confusion matrix') plt.figure() plot_confusion_matrix(cm, names) plt.show() print(classification_report(y_test1, GaussianNB_pred)) plot_roc(GaussianNB_pred,y_test1) print(x_train.shape) print(y_train.shape) num_classes = 2 #Convert class vectors to one hot format y_train = keras.utils.to_categorical(y_train,num_classes) print(y_train[:1]) y_test = keras.utils.to_categorical(y_test,num_classes) print(x_train.shape) print(y_train.shape)(116465, 121) (116465, 2)Tensorflow Classificationfor i in range(5): model_class = Sequential() model_class.add(Dense(50, input_dim=x_train.shape[1], activation='relu')) # Hidden 1 model_class.add(Dense(25, activation='relu')) # Hidden 2 model_class.add(Dense(y_train.shape[1], activation='softmax')) # Output #model_class.compile(optimizer = 'adam' , loss = 'categorical_crossentropy', metrics = ['accuracy']) model_class.compile(optimizer = 'adam' , loss = 'categorical_crossentropy', metrics = ['accuracy']) monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=2, verbose=2, mode='auto') checkpointer = ModelCheckpoint(filepath="best_weights_class.hdf5", verbose=0, save_best_only=True) # save best model model_class.fit(x_train, y_train,validation_data=(x_test,y_test),callbacks=[monitor,checkpointer],verbose=2,epochs=10) model_class.load_weights('best_weights_class.hdf5') # load weights from best model Tf_class_pred = model_class.predict(x_test) pred1 = np.argmax(Tf_class_pred ,axis=1) # raw probabilities to chosen class (highest probability) print(pred1) y_true= np.argmax(y_test,axis=1) score = metrics.accuracy_score(y_true, pred1) print("Accuracy score: {}".format(score)) # Explained variance score: 1 is perfect prediction print('Evaluating with F1 score metric: %.2f' % f1_score(y_true,pred1 , average='micro')) pre_score = metrics.precision_score(y_true, pred1, average= "weighted") print("Precision score: {}".format(pre_score)) score = metrics.recall_score(y_true,pred1, average= "weighted") print("Recall score: {}".format(score)) # Compute confusion matrix cm = confusion_matrix(y_true,pred1) print(cm) print('Plotting confusion matrix') plt.figure() plot_confusion_matrix(cm, names) plt.show() print(classification_report(y_true, pred1)) plot_roc(y_true,pred1)CNNprint(x_train.shape) print(y_train.shape) x_train2 = x_train.reshape((116465,1,121,1)) print(x_train2[:2]) print(x_train2.shape) # define a CNN cnn = Sequential() cnn.add(Conv2D(64, kernel_size=(1, 10), strides=(1, 1),padding='valid', activation='relu', input_shape=(1,121,1))) # the above code is equivalent to # model.add(Conv1D(64, kernel_size=3, strides=1, activation='relu', input_shape=(128, 1))) cnn.add(MaxPooling2D(pool_size=(1,2))) cnn.add(Flatten()) cnn.add(Dense(30, activation="relu")) cnn.summary() cnn.add(Dense(num_classes, activation="softmax")) cnn.summary() # define optimizer and objective, compile cnn cnn.compile(loss="categorical_crossentropy", optimizer="adam" , metrics = ['accuracy']) monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=2, verbose=2, mode='auto') checkpointer = ModelCheckpoint(filepath="best_weights_cnn.hdf5", verbose=0, save_best_only=True) # save best model cnn.fit(x_train2, y_train,validation_split=0.2,callbacks=[monitor,checkpointer],verbose=2,epochs=10) cnn.load_weights('best_weights_cnn.hdf5') # load weights from best model x_test = x_test.reshape((-1,1,121,1)) print(x_test.shape) pred = cnn.predict(x_test) #pred = np.argmax(pred,axis=1) print("Shape: {}".format(pred.shape)) print(pred) print(y_test.shape) pred=pred.reshape((-1,2)) print(pred.shape) score = metrics.accuracy_score(y_test, pred.round()) print('Accuracy: {}'.format(score)) f1 = metrics.f1_score(y_test, pred.round(), average='weighted') print('Averaged F1: {}'.format(f1)) print(metrics.classification_report(y_test, pred.round())) score = metrics.precision_score(y_test,pred.round(), average= "weighted") print("Precision score: {}".format(score)) score = metrics.recall_score(y_test,pred.round(), average= "weighted") print("Recall score: {}".format(score)) plot_roc(pred.round().argmax(axis=1),y_test.argmax(axis=1)) # Compute confusion matrix cm = confusion_matrix(y_test.argmax(axis=1), pred.round().argmax(axis=1)) print(cm) print('Plotting confusion matrix') plt.figure() plot_confusion_matrix(cm, names) plt.show() print(classification_report(y_test.argmax(axis=1), pred.round().argmax(axis=1)))[[17583 21] [ 40 11473]] Plotting confusion matrixIntermediate Strings Dilimleme başlangıç ve son indeksini vererek bu aralıktaki ögelere erişim sağlayabilmemize dilimleme denir.s = '' print(s[0:4]) print(s[6:7]) print(s[6:20])Pythonverilen son indeks tahmin edilmez. sadece 5 e kadar git ama dahil etme denir. Slicing Strings dilimlemenin ilk veya son indeks numarasını yazmazsak, ilk indeksini yazmadıysan değerin ilk indeksini, son indeksini yazmadıysak son indeksini baz alır.s = 'Month Python' print(s[:2]) print(s[8:])Mo thonEğer hiçbir indeks numarası yazmazsak tüm değeri alır.print(s[:])Month PythonString Concatenation "+" operatörü karakter dizilerinde birleştirme işlemi yapar.Fakat bu birleştirmede her iki değerin ortasına boşluk koymaz. Bu yüzden ya bir değerin yanına boşluk koyulacak yada ortalarına boşluk içeren bir karakter dizisi koyulacak.a = 'Hello' b = a + 'There' print(b) c = a + ' ' + 'There' print(c)Hello ThereUsing "in" as a logical Operator in operatörü for döngüsünde kullanıldığı gibi aradığımız bir ögenin bir değerin içinde bulunup bulunmadığını kontrol etmek içinde kullanabaliriz.Örneğin a ögesi laptop dizisinde var mı?bu sorunun cevabı sadece evet veya hayır olduğu için in operatörü de sadece boolean değer döndürür: True veya False.fruit = 'banana' print('a' in fruit) print('m' in fruit) print('nan' in fruit) if 'a' in fruit: print('Found it!')True False True Found it!String Library String veri tipine özel Python'da yerleşik olarka bulunan bazı fonksiyonlar vardır.Bunları karakter dizisin üzerine kullanarak karakter dizimizin üzerinde değişiklikler yapabiliriz.fakat bu işlemler çoğunlukla kalıcı değildir, eğer kalıcı olmasını istiyorsak yeni bir değişkene atamamız lazım.greet = 'Hello Bob' zap = greet.lower() # tüm harfleri küçük harfe çevirir print(zap) # karakter dizimize bir şey olmadı print(greet) print('Hi There'.lower()) stuff = 'Hello world' print(type(stuff))Bir değere uyguyalabileceğimiz tüm methodları öğrenmek için dir() fonksiyonundan yararlanırız.dir(stuff)Searching a String find() methodu ile ögelerin hangi indexe sahip olduğunu bulabiliriz.eğer olmayan bir ögeye aratmaya çalışırsak -1 değeri döndürülür.fruit = 'banana' pos = fruit.find('na') print(pos) aa = fruit.find('z') print(aa)-1Making everything Upper Case harfleri küçültmek için lower() methodunu, harfleri büyütmek için upper() methodunu kullanırız.name = '' nn = name.lower() print(nn) name = '' mm = name.upper() print(mm)JACK MASearch and Replace replace() methodu karakter dizisindeki ögeleri değiştirebilmemizi yarayan bir yapıdır.greet = 'Hello Bob' nstr = greet.replace('Bob','Jane') print(nstr) nstr = greet.replace('o','x') print(nstr)Hellx Bxbreplace() methodunun syntax yapısıişlemyapılacaköge.replace('değiştirelecek öge','yeni öge')Stripping Whitespace Karakter dizimizin sağındaki ve solundaki boşlukları kaldırmak için strip() modülünü kullanırız.rstrip() methodu sağ kısımda boşluk varsa, lstrip() methodu soldaki boşluk kısmını veya strip() methodu sağ ve solunda boşlukları siler.greet = ' Hello Bob ' print(greet.lstrip()) print(greet) print(greet.rstrip()) print(greet.strip())Hello Bob Hello Bob Hello Bob Hello BobPrefixes startswith() methodu bir karakter dizisinin başlama ile başlamadığını gösterir.line = 'Please have a nice day' print(line.startswith('Please')) print(line.startswith('p'))True FalseParsing and Extracting fing() methodunu kullanarak aradığımız ögenin karakter dizinin kaçıncı sırada olduğunu bulabiliriz.data = 'From Sat Jan 5 09:14:16 2008' atpos = data.find('@') print(atpos) sppos = data.find(' ',atpos) print(sppos)21 31find() methodu iki parametre alır. Bunlardan biri aramak istediğimiz öge diğeri ise başlayacağımız indeks veya yer.find(arananOge,baslanacakYer)host = data[atpos+1:sppos] print(host)uct.ac.zaEğer bir öge bulunmazsa değer olarak -1 döndürülür. Summary - String type- Read/Convert- ındexing strings []- Slicing strings [2:4]- Looping through strings with for and while- Concatenating strings wtih '+'- String operations- String library- String Comparisons- Searching in strings- Replacing text- Stripping white space Exercise Use find and string slicing to extract the portion of the string after the colon character and then use the float function to convert the extracted string into a floating point number.str1 = 'X-DSPAM-Confidence: 0.8475' m = str1.find(':') mn = str1[m+1:] value = float(mn) print(value) print(value+45.8)0.8475 46.647499999999994Transformers# https://zablo.net/blog/post/pandas-dataframe-in-scikit-learn-feature-union from scipy import sparse from sklearn.externals.joblib import Parallel, delayed from sklearn.pipeline import FeatureUnion, _fit_transform_one, _transform_one class PandasFeatureUnion(FeatureUnion): def fit_transform(self, X, y=None, **fit_params): self._validate_transformers() result = Parallel(n_jobs=self.n_jobs)( delayed(_fit_transform_one)(trans, weight, X, y, **fit_params) for name, trans, weight in self._iter() ) if not result: # All transformers are None return np.zeros((X.shape[0], 0)) Xs, transformers = zip(*result) self._update_transformer_list(transformers) if any(sparse.issparse(f) for f in Xs): Xs = sparse.hstack(Xs).tocsr() else: Xs = self.merge_dataframes_by_column(Xs) return Xs def merge_dataframes_by_column(self, Xs): return pd.concat(Xs, axis="columns", copy=False) def transform(self, X): Xs = Parallel(n_jobs=self.n_jobs)( delayed(_transform_one)(trans, weight, X) for name, trans, weight in self._iter() ) if not Xs: # All transformers are None return np.zeros((X.shape[0], 0)) if any(sparse.issparse(f) for f in Xs): Xs = sparse.hstack(Xs).tocsr() else: Xs = self.merge_dataframes_by_column(Xs) return Xs class NoFitMixin(): def fit(self, X, y=None): return self class MICEImputer(BaseEstimator, TransformerMixin, NoFitMixin): def transform(self, X, *args, **kwargs): """ Fill a 1-D array missing values with MICE """ assert isinstance(X, pd.Series) X = X.copy() # Should avoid error of already full for repeat execution has_null = X.isnull().any() # TODO: Used to avoid error of no null values from MICE if has_null: mice = MICE(verbose=False, *args, **kwargs) imputed = mice.complete(X.values.reshape(-1, 1)) X.loc[:] = imputed.reshape(X.loc[:].shape) return pd.DataFrame(X) else: return X class ColumnSelector(BaseEstimator, TransformerMixin, NoFitMixin): def __init__(self, columns, one_col=True): self.columns = columns self.one_col = one_col def transform(self, X, y=None): if self.one_col: return X[self.columns].iloc[:, 0] else: return X[self.columns] class ColumnDummifier(BaseEstimator, TransformerMixin, NoFitMixin): def transform(self, X, y=None): return pd.get_dummies(X, sparse=True, drop_first=True) class LabelEncoderWNaN(TransformerMixin, BaseEstimator): """ Applies the sklearn.LabelEncoder while keeping missing values """ def fit(self, X, y=None): self.le_ = LabelEncoder() self.le_.fit(X.loc[X.notnull()]) return self def transform(self, X, y=None): X = X.copy(deep=True) # Do not apply tranform to the actual DF X.loc[X.notnull()] = self.le_.transform(X.loc[X.notnull()]) return X.astype("float")Exploretrain_x = train.drop(columns=["survived"]) train_y = train.survived benchmark_pipeline = Pipeline([( "prep", PandasFeatureUnion([ ("age", make_pipeline(ColumnSelector(["age"]), MICEImputer())), ("sex_dummy", make_pipeline(ColumnSelector(["sex"]), ColumnDummifier())), ( "embarked", make_pipeline(ColumnSelector(["embarked"]), LabelEncoderWNaN(), MICEImputer(), ColumnDummifier()) ), ("fare", make_pipeline(ColumnSelector(["fare"]), MICEImputer())), ("rest", make_pipeline(ColumnSelector(["parch", "sibsp", "pclass"], one_col=False))) ]) )]) train.columnsBenchmark modellr_pipeline = make_pipeline(benchmark_pipeline, LogisticRegression()) lr_pipeline.fit(train, train_y) accuracy_score(train_y, lr_pipeline.predict(train_x)) validate = pd.read_excel("/home/iyed/Downloads/titanic3.xls") accuracy_score(validate.survived, lr_pipeline.predict(validate))The Hair Diffraction ExperimentDiffraction is the bending of waves around an object or aperture similar to the wavelength . When it happens it is as though the obstacle or slit the wave has to pass though emit waves in all directions. If you follow how this waves superpose ahead of the obstacle, what you will see is a pattern of light and shadows showing the constructive and destructive interference of the waves. These interferences are closely related to the shape of the object diffracting the waves.import numpy as np import matplotlib.pyplot as plt import math from ipywidgets import interact, interactive, fixed, interact_manual import ipywidgets as widgets # Set default font size for plots: font = {'size' : 20} plt.rc('font',**font) def sinc(x): if (x != 0): return np.sin(np.pi * x) / (np. pi * x) else: return 1 sinc = np.vectorize(sinc) ''' slitWidth = float(input("slitWidth: ")) #2500 # 2.5mm wavelength = float(input("wavelength: ")) #0.650 # 650nm distance = float(eval(input("distance: "))) #10.3 * 10**6 # 10.3m ''' slitWidth = 60 # 2.5mm wavelength = 0.650 # 650nm distance = 1 * 10**6 # 10.3m #result_mm = float(input("result: ")) #result_mm = 10.5 #Visualizing a single diffractor amplitude = 5 # the power of the laser (mW) as marked on the laser point. The accuracy of this value is trivial. x = np.arange(-50000, 50000, 10) # numerical bounds and resolution of the graph (micron) F = sinc(slitWidth * x / wavelength / distance) intensity = amplitude / (wavelength * distance) * (slitWidth * F)**2 plt.figure(1, (12, 6)) plt.plot(x, intensity, linewidth=1, color='r') # red indicates the intensity #result = result_mm * 1000 plt.axvline(x = 0,color='k', label="Center of beam: "+str(0.0)+"mm") #plt.axvline(x = result, color='b', label="Obtained result: "+str(result_mm)+"mm") # blue indicated your result indicated earlier #plt.xlim((0, 50000)) plt.ylim((0, 0.03)) plt.xlabel(r'Position ($\mu$m)') plt.ylabel(r'Intensity (mW)') plt.legend(loc='best') plt.title("Single-slit Diffraction (intensity / distance)") plt.show()The Diffraction ModelIn order to understand the variables involved in the diffraction please answer the following questions. Questions:1) Change the distance to the background wall. Describe what happens2) What is the effect of changing the width of the hair?import numpy as np import matplotlib.pyplot as plt import math from ipywidgets import interact, interactive, fixed, interact_manual import ipywidgets as widgets # Set default font size for plots: font = {'size' : 20} plt.rc('font',**font) def hair_diffrac(slitWidth,wavelength,distance): ''' slitWidth = float(input("slitWidth: ")) #2500 # 2.5mm wavelength = float(input("wavelength: ")) #0.650 # 650nm distance = float(eval(input("distance: "))) #10.3 * 10**6 # 10.3m ''' def sinc(x): if (x != 0): return np.sin(np.pi * x) / (np. pi * x) else: return 1 sinc = np.vectorize(sinc) #slitWidth = 60 # 2.5mm #wavelength = 0.650 # 650nm #distance = 1 * 10**6 # 10.3m #result_mm = float(input("result: ")) #result_mm = 10.5 #Visualizing a single diffractor wavelength=wavelength*1e-3 #nm distance=distance*1e4 #cm amplitude = 5 # the power of the laser (mW) as marked on the laser point. The accuracy of this value is trivial. x = np.arange(-30000, 30000, 10) # numerical bounds and resolution of the graph (mm) F = sinc(slitWidth * x / wavelength / distance) intensity = amplitude / (wavelength * distance) * (slitWidth * F)**2 plt.figure(1, (12, 6)) plt.plot(x*1e-3, intensity, linewidth=1, color='r') # red indicates the intensity #result = result_mm * 1000 plt.axvline(x = 0,color='k', label="Center of beam: "+str(0.0)+"mm") #plt.axvline(x = result*1e-3, color='b', label="Obtained result: "+str(result_mm)+"mm") # blue indicated your result indicated earlier #plt.xlim((0, 50000)) plt.ylim((0, 0.006)) plt.xlabel(r'Position ($mm$))') plt.ylabel(r'Intensity (mW)') plt.legend(loc='best') plt.title("Single-slit Diffraction (intensity / distance)") plt.show() return interactive(hair_diffrac, slitWidth = (20,80,20),wavelength=(630,670,10),distance=(1,120,20),continuous_update=False)**Python Numbers, Type Conversion and Mathematics** - In this article, you'll learn about - the different numbers used in Python, - how to convert from one data type to the other, and - the mathematical operations supported in Python. **1. Number Data Type in Python** - Python supports integers, floating-point numbers and complex numbers. They are defined as `int`, `float`, and `complex` classes in Python. - Integers and floating points are separated by the presence or absence of a decimal point. For instance, 5 is an integer whereas 5.0 is a floating-point number. - Complex numbers are written in the form, `x + yj`, where `x` is the real part and `y` is the imaginary part. - We can use the `type()` function to know which class a variable or a value belongs to and `isinstance()` function to check if it belongs to a particular class. - Let's look at an example:a = 5 print(type(a)) print(type(5.0)) c = 5 + 3j print(c + 3) print(isinstance(c, complex)) (8+3j) True- While integers can be of any length, a floating-point number is accurate only up to 15 decimal places (the 16th place is inaccurate). - The numbers we deal with every day are of the decimal (base 10) number system. But computer programmers (generally embedded programmers) need to work with binary (base 2), hexadecimal (base 16) and octal (base 8) number systems. - In Python, we can represent these numbers by appropriately placing a prefix before that number. The following table lists these prefixes. **Number System** - **Prefix** - Binary - '0b' or '0B' - Octal - '0o' or '0O' - Hexadecimal - '0x' or '0X' Here are some examples:# Output: 107 print(0b1101011) # Output: 253 (251 + 2) print(0xFB + 0b10) # Output: 13 print(0o15)107 253 13**2. Type Conversion** - We can convert one type of number into another. This is also known as **coercion**. - Operations like addition, subtraction coerce integer to float implicitly (automatically), if one of the operands is float.1 + 2.0- We can see above that 1 (integer) is coerced into 1.0 (float) for addition and the result is also a floating point number. - We can also use built-in functions like `int()`, `float()` and `complex()` to convert between types explicitly. These functions can even convert from [strings](https://www.programiz.com/python-programming/string).int(2.3) int(-2.8) float(5) complex('3+5j')- When converting from float to integer, the number gets truncated (decimal parts are removed). **3. Python Decimal** - Python built-in class float performs some calculations that might amaze us. - We all know that the sum of 1.1 and 2.2 is 3.3, but Python seems to disagree.(1.1 + 2.2) == 3.3- What is going on? - It turns out that floating-point numbers are implemented in computer hardware as binary fractions as the computer only understands binary (0 and 1). Due to this reason, most of the decimal fractions we know, cannot be accurately stored in our computer. - Let's take an example. We cannot represent the fraction 1/3 as a decimal number. This will give 0.33333333... which is infinitely long, and we can only approximate it. - It turns out that the decimal fraction 0.1 will result in an infinitely long binary fraction of 0.000110011001100110011... and our computer only stores a finite number of it. - This will only approximate 0.1 but never be equal. Hence, it is the limitation of our computer hardware and not an error in Python.1.1 + 2.2- To overcome this issue, we can use the decimal module that comes with Python. While floating-point numbers have precision up to 15 decimal places, the decimal module has user-settable precision. - Let's see the difference:import decimal print(0.1) print(decimal.Decimal(0.1))0.1 0.1000000000000000055511151231257827021181583404541015625- This module is used when we want to carry out decimal calculations as we learned in school. - It also preserves significance. We know 25.50 kg is more accurate than 25.5 kg as it has two significant decimal places compared to one.from decimal import Decimal as D print(D('1.1') + D('2.2')) print(D('1.2') * D('2.50'))3.3 3.000- Notice the trailing zeroes in the above example. - We might ask, why not implement `Decimal` every time, instead of `float`? - **The main reason is efficiency. Floating point operations are carried out much faster than Decimal operations**. **4. When to use Decimal instead of float** We generally use Decimal in the following cases. - When we are making financial applications that need exact decimal representation. - When we want to control the level of precision required. - When we want to implement the notion of significant decimal places. **5. Python Fractions** - Python provides operations involving fractional numbers through its `fractions` module. - A fraction has a numerator and a denominator, both of which are integers. This module has support for rational number arithmetic. - We can create Fraction objects in various ways. Let's have a look at them.import fractions print(fractions.Fraction(1.5)) print(fractions.Fraction(5)) print(fractions.Fraction(1,3))3/2 5 1/3- While creating `Fraction` from `float`, we might get some unusual results. This is due to the imperfect binary floating point number representation as discussed in the previous section. - Fortunately, `Fraction` allows us to instantiate with string as well. This is the preferred option when using decimal numbers.import fractions # As float # Output: 2476979795053773/2251799813685248 print(fractions.Fraction(1.1)) # As string # Output: 11/10 print(fractions.Fraction('1.1'))2476979795053773/2251799813685248 11/10- This data type supports all basic operations. Here are a few examples.from fractions import Fraction as F print(F(1, 3) + F(1, 3)) print(1 / F(5, 6)) print(F(-3, 10) > 0) print(F(-3, 10) < 0)2/3 6/5 False True**6. Python Mathematics** - Python offers modules like `math` and `random` to carry out different mathematics like trigonometry, logarithms, probability and statistics, etc.import math print(math.pi) print(math.cos(math.pi)) print(math.exp(10)) print(math.log10(1000)) print(math.sinh(1)) print(math.factorial(6))3.141592653589793 -1.0 22026.465794806718 3.0 1.1752011936438014 720- Here is the full list of functions and attributes available in the [Python math module](https://www.programiz.com/python-programming/modules/math).import random print(random.randrange(10, 20)) x = ['a', 'b', 'c', 'd', 'e'] # Get random choice print(random.choice(x)) # Shuffle x random.shuffle(x) # Print the shuffled x print(x) # Print random element print(random.random())17 e ['e', 'd', 'b', 'c', 'a'] 0.017498222791133333Toxic Comment Classifier---For the full the report please refer to [Toxic Comment Classifier Report](insert link here). The goal of this classifier is to be able to identify if a comment is toxic, severely toxic, obscene, a threat, an insult, and/or identity hate. Housekeeping In the below section section we'll be importing python modules we'll be using throughout the project. As well as some jupyter notebook specific configuration.# Import libraries necessary for this project import numpy as np import pandas as pd import matplotlib.pyplot as plt from functools import reduce import seaborn as sns from io import StringIO from wordcloud import WordCloud import re from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.model_selection import train_test_split import pickle import subprocess sns.set(style="white") # Pretty display for notebooks %matplotlib inline #load the Toxic Comment data data = pd.read_csv('./data/train.csv')Exploraty Data AnalysisOur first step is to understand the data set we're working with. In this section we want to perfrom the following analysis of dataset: - Dimensions of the data set - Data Inspection- Completeness of data set - Identify the distribution of labels- Frequencies of words used for each label Dimensions of the data setprint("The data set has {} rows with {} columns".format(*data.shape))Data Inspectiondata.dtypesAs we can see our dataframe is made of 8 columns. The latter 6 are our target labels. They contain only binary values 0 for false and 1 for true. The `comment_text` column contains the comment as a string.data.head(n=10)As we can see in the above table comments that contain mixed case characters. As well as escaped characters such as `\n`. Completeness of data setprint("Does the data contain missing data? : ") print(pd.isnull(data).any()) print("Longest comments length: {}".format(max(data["comment_text"].apply(len)))) print("Shortest comments length: {}".format(min(data["comment_text"].apply(len)))) print("Mean comments length: {}".format(np.average(data["comment_text"].apply(len)))) print("") data[(data["comment_text"].apply(len)) == 6]Does the data contain missing data? : id False comment_text False toxic False severe_toxic False obscene False threat False insult False identity_hate False dtype: bool Longest comments length: 5000 Shortest comments length: 6 Mean comments length: 394.0732213246768Identify the distribution of labelsrowsums = data.iloc[:,2:].sum(axis=1) temp_data = data.copy() temp_data['clean'] = (rowsums == 0) labels = temp_data.columns[2:] def calculate_label_proportions(label): proportion = temp_data[temp_data[label] == 1].shape[0] / temp_data.shape[0] return "Percentage of {} comments: {:.2%}".format(label, proportion) label_distribution = reduce(lambda res, cur: "\n".join([res, calculate_label_proportions(cur)]), labels, "") print(label_distribution)Percentage of toxic comments: 9.58% Percentage of severe_toxic comments: 1.00% Percentage of obscene comments: 5.29% Percentage of threat comments: 0.30% Percentage of insult comments: 4.94% Percentage of identity_hate comments: 0.88% Percentage of clean comments: 89.83%We can see in the above code block that our data set is very unbalanced. With none of the labels coming near 10% of the data set let alone anywhere near 50%. When we go to train our network we want to be sure that we take steps to ensure our models give a true output of it's "accuracy". Therefore, we can initailly rule out the use of arithmatic mean (A.K.A accuracy) as our evaluation method. Furthermore, given that we're trying to perform to build a classifier that can output multiple classification the ROC AUC evaluation metric would be a good choice.x = temp_data[labels].sum() plt.figure(figsize=(12,4)) ax= sns.barplot(x.index, x.values, alpha=0.8) plt.title("Number of Occurances Per Class") plt.ylabel('Number of Occurrences', fontsize=12) plt.xlabel('Comment Type ', fontsize=12) xWe see that the `toxic` comment is the most common of the toxicity comment types. From looking at the head of the comments we see that a comment can be classed as one or more of the toxicity comment types. Therefore, we should look to identify any correlations one comment type would have with others.# TODO: Confusion matrixIt is also worth exlporing the frequency of certain words that appear in each of toxicity columns as well as the clean column. To visualise the word frequency we'll use word cloud diagrams for each category.def concat_strings(label): string_io = StringIO() comments = temp_data[temp_data[label] == 1].comment_text comments.apply(lambda x: string_io.write(str(x))) text = string_io.getvalue() string_io.close() return text fig = plt.figure(figsize=(20,30)) for i, label in enumerate(labels): text = concat_strings(label) wordcloud = WordCloud().generate(text) wordcloud = WordCloud(max_words=len(text), relative_scaling=.5).generate(text) fig.add_subplot(7, 1, i + 1) plt.imshow(wordcloud) plt.xlabel("{} comments".format(label)) plt.show() # str.isupper(temp_data["comment_text"].iloc[6]) # TODO violin graph comment length sns.set(color_codes=True) # Load the example tips dataset temp_data['comment_word_count'] = temp_data['comment_text'].apply(lambda x: len(str.split(x))) temp_data['comment_sentance_count'] = temp_data['comment_text'].apply(lambda x: len(re.split('[\.|!|\n]', x))) fig = plt.figure(figsize=(30,60)) fig.add_subplot(6, 2, 1) sns.violinplot(x='toxic', y='comment_word_count', data=temp_data, split=True, inner="quart", pallette={0:'b', 1:'r'}) fig.add_subplot(6, 2, 2) sns.violinplot(x='toxic', y='comment_sentance_count', data=temp_data, split=True, inner="quart", pallette={0:'b', 1:'r'}) fig.add_subplot(6, 2, 3) sns.violinplot(x='severe_toxic', y='comment_word_count', data=temp_data, split=True, inner="quart", pallette={0:'b', 1:'r'}) fig.add_subplot(6, 2, 4) sns.violinplot(x='severe_toxic', y='comment_sentance_count', data=temp_data, split=True, inner="quart", pallette={0:'b', 1:'r'}) fig.add_subplot(6, 2, 5) sns.violinplot(x='obscene', y='comment_word_count', data=temp_data, split=True, inner="quart", pallette={0:'b', 1:'r'}) fig.add_subplot(6, 2, 6) sns.violinplot(x='obscene', y='comment_sentance_count', data=temp_data, split=True, inner="quart", pallette={0:'b', 1:'r'}) fig.add_subplot(6, 2, 7) sns.violinplot(x='insult', y='comment_word_count', data=temp_data, split=True, inner="quart", pallette={0:'b', 1:'r'}) fig.add_subplot(6, 2, 8) sns.violinplot(x='insult', y='comment_sentance_count', data=temp_data, split=True, inner="quart", pallette={0:'b', 1:'r'}) fig.add_subplot(6, 2, 9) sns.violinplot(x='identity_hate', y='comment_word_count', data=temp_data, split=True, inner="quart", pallette={0:'b', 1:'r'}) fig.add_subplot(6, 2, 10) sns.violinplot(x='identity_hate', y='comment_sentance_count', data=temp_data, split=True, inner="quart", pallette={0:'b', 1:'r'}) fig.add_subplot(6, 2, 11) sns.violinplot(x='clean', y='comment_word_count', data=temp_data, split=True, inner="quart", pallette={0:'b', 1:'r'}) fig.add_subplot(6, 2, 12) sns.violinplot(x='clean', y='comment_sentance_count', data=temp_data, split=True, inner="quart", pallette={0:'b', 1:'r'}) # TODO violin graph comment frequency of uppercase words temp_data.describe() # temp_data[] temp_data[temp_data.columns[-2:]].describe()In the above the table we see that the longest comment has approximately 1411 words.fig = plt.figure(figsize=(30,30)) def x_score(z, m, o): # z = (x - m) / o return (z * o) + m word_count_mean = temp_data['comment_word_count'].mean() word_count_std = temp_data['comment_word_count'].std() word_count_outlier_score = x_score(2.0, word_count_mean, word_count_std) print(word_count_outlier_score) temp_data_1 = temp_data[(temp_data['comment_word_count'] < word_count_outlier_score)] print("Data count with outliers {}".format(len(temp_data))) print("Data without word_count outliers".format(len(temp_data_1))) sentance_count_mean = temp_data['comment_sentance_count'].mean() sentance_count_std = temp_data['comment_sentance_count'].std() sentance_count_outlier_score = x_score(2.0, sentance_count_mean, sentance_count_std) print(sentance_count_outlier_score) temp_data_2 = temp_data[(temp_data['comment_sentance_count'] < sentance_count_outlier_score)] print("Data without sentance_count outliers".format(len(temp_data_2))) fig.add_subplot(2, 2, 1) sns.violinplot(x='clean', y='comment_word_count', data=temp_data_1, split=True, inner="quart", pallette={0:'b', 1:'r'}) fig.add_subplot(2, 2, 2) sns.violinplot(x='clean', y='comment_sentance_count', data=temp_data_2, split=True, inner="quart", pallette={0:'b', 1:'r'})265.73493153046707 Data count with outliers 159571 Data without word_count outliers 65.69847377402671 Data without sentance_count outliersData Preprcessing Now that we've performed our data analysis we'll begin our work on preporcessing the data. We want to apply several methods for preprocessing our data. The first method is to use the bag of words technique and (word frequency). Then we want to use Word2Vec and GloVe. Below we will define several functions that will take in the raw comments and preprocess them. Pre Processing StepsThe first preprocessing step we'll take is to clean the text before processing it into vectors.data.loc[50, 'comment_text'][:500]As we can see above, some of the comments contain escapped characters, punctuation, and other non-alphanumeric characters. Our inital preprocessing step is to remove all non alphabetical characters. By removing the escaped characters we'll be able to make the text cleaner. However, by removing some of the punctuation we could loose some semantic information. Particularly if we have a user quoting another user's comment or wikipedia that has unsavory text in it.def text_preprocessor(text : str)-> str: regex = r"(\n|\t|\d|[^\s\w])+" partially_processed_text = re.sub(regex, ' ', text.lower(), 0, re.MULTILINE) return re.sub(r" +",' ', partially_processed_text).strip().split() text_preprocessor(data['comment_text'][0])As we can now see in the table above we've removed punctuations and escaped characters. We also converted the text to lower case.X = data['comment_text'] y = data[data.columns[2:]] processed_data_set = train_test_split(X, y, test_size=0.33, random_state=42) X_train_raw, X_test_raw, y_train, y_test = processed_data_set # import os # processed_data_set_filename = "./pickles/data-set.p" # os.makedirs(os.path.dirname(processed_data_set_filename), exist_ok=True) # pickle.dump(processed_data_set, open(processed_data_set_filename, 'wb'))Bags of WordsNow that we've cleaned up the text up our text we're going to begin covnerting our text into numerical data. Our initial method for doing this will be using the **bag of words** model. This will involve us to create a vocabulary of unique tokens. Then we'll create a feature vector from each comment contianing the counts of how often each word occurs within it.vec = TfidfVectorizer(ngram_range=(1,2), tokenizer=text_preprocessor, min_df=3, max_df=0.9, strip_accents='unicode', use_idf=1, smooth_idf=1, sublinear_tf=1 ) X_train = vec.fit_transform(X_train_raw) X_test = vec.transform(X_test_raw) X_train.shape[0] / 2Naive BenchmarkingOur initial experiment is to first assert that we can classify comments with an accuracy of greater than 50%, which indicates random they we're able to make better predictions than merely guessing the toxicity of a comment. We therefore decidied to first use Multinominal Naive Bayes.from sklearn.naive_bayes import MultinomialNB def create_classifiers(classifier, X, y, comment_type): clfs = classifier clfs.fit(X, y[comment_type]) return clfs clfs = MultinomialNB(alpha=.01) classifiers = [create_classifiers(clfs, X_train, y_train, comment_type) for comment_type in y_train.columns] def predict_proba(clfs, x): return [clf.predict_proba(x)[0][1] for clf in clfs] def predict(clfs, x): return [clf.predict(x)[0] for clf in clfs] print(repr(data[(data['threat'] == 1)].iloc[0].comment_text)) print() print(repr(data[(data['threat'] == 1)].iloc[2].comment_text)) threat_sub = data[(data['threat'] == 1)].iloc[0].comment_text #"I’m going to teach you to speak English with this fucking knife!" threat_not_sub = data[(data['threat'] == 1)].iloc[2].comment_text # "Just die!!" vec_threat_sub, vec_threat_not_sub = vec.transform([threat_sub, threat_not_sub]) print(predict_proba(classifiers, vec_threat_sub)) print(predict_proba(classifiers, vec_threat_not_sub)) from sklearn.metrics import roc_auc_score y_pred = [predict_proba(classifiers, x) for x in X_test] roc_auc_score(y_test, y_pred)As we can see above achieved an roc_auc_score of `~0.94`. SVM NB bench marking From reading (this paper)[https://nlp.stanford.edu/pubs/sidaw12_simple_sentiment.pdf] we may be able to build a more robust classifier befre develing into the use of CNN and RNN for sentimental analysis. Unfortuantely this model is not available out of the box from sklearn. Therefore we'll have to create our own implementation. It is also worth mentioning that from reading this paper using Logistic Regression instead of SVM will yield similar results. Therefore, instead of using SVM, mainly due to it's perfromance issues, we'll instead user Sklearn's Logistic Regression model.from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.utils.validation import check_X_y, check_is_fitted from sklearn.linear_model import LogisticRegression from scipy import sparse import numpy as np class NbSvmClassifier(BaseEstimator, ClassifierMixin): def __init__(self, C=1.0, dual=False, n_jobs=1): self.C = C self.dual = dual self.n_jobs = n_jobs def predict(self, x): # Verify that model has been fit check_is_fitted(self, ['_r', '_clf']) return self._clf.predict(x.multiply(self._r)) def predict_proba(self, x): # Verify that model has been fit check_is_fitted(self, ['_r', '_clf']) return self._clf.predict_proba(x.multiply(self._r)) def fit(self, x, y): y = y.values x, y = check_X_y(x, y, accept_sparse=True) self._r = sparse.csr_matrix(np.log(self._pr(x, 1, y) / self._pr(x, 0, y))) x_nb = x.multiply(self._r) self._clf = LogisticRegression(C=self.C, dual=self.dual, n_jobs=self.n_jobs).fit(x_nb, y) return self def _pr(self, x, y_i, y): p = x[y == y_i].sum(0) return (p + 1) / ((y == y_i).sum() + 1) classifiers_svm = [create_classifiers(NbSvmClassifier(1.0), X_train, y_train, comment_type) for comment_type in y_train.columns] print(predict_proba(classifiers_svm, vec_threat_sub)) print(predict_proba(classifiers_svm, vec_threat_not_sub)) y_pred = [predict_proba(classifiers, x) for x in X_test] roc_auc_score(y_test, y_pred)Problem 3.5_Consider the SCM $X\gets Y^2+N_X,Y\gets N_Y$ and $N_X,N_Y$ unit normal. Generate 200 iid samples._Like all DAG SCMs, just generate in topological order of the graph.y = np.random.randn(200) x = y ** 2 + np.random.randn(200) sns.scatterplot(x, y) plt.xlabel('X') plt.ylabel('Y') plt.show()import praw reddit = praw.Reddit(user_agent='risi', client_id='********', client_secret="********", username='risi1001', password='********') subreddit = reddit.subreddit('corona') hot_corona = subreddit.hot(limit = 10) for submission in hot_corona: print(submission.title)Just a reminder that this subreddit is for the City of Corona, CA and related topics No one is out in Spain and these pigeons are looking for food. Anyone who sees them should throw out food for them [Serious] How can I stay safe in an airport If anyone needs supplies... Toilet paper, food, dairy, medical... I work at Target and we are allowing employees to shop 1 hour before open. Skyline trail packed Why is the farmers market still going on? Two things happening. First case confirmed in the city of Corona This actually applies to this group 😂 I guess CNUSD will be staying closed until May? Sounds like we will have to start taking online classesMore String Methodsprint("Mountains".upper()) print("Mountains".lower()) answer = "YES" if answer.lower() == "yes": print("User said yes") print(" yes ".strip()) print(" yes ".lstrip()) print(" yes ".rstrip()) print("The number of times e occurs in this string in 4".count("e")) print("Forest".endswith("rest")) print("Forest".isnumeric()) print("12345".isnumeric()) print(int("12345")) print(" ".join(["This", "is", "a", "phrasee", "joined", "by", "spaces"])) print("...".join(["This", "is", "a", "phrase", "joinded", "by", "triple", "dots"])) print("This is another example".split())['This', 'is', 'another', 'example']Formatting Stringsname = "Manny" number = len(name) * 3 "Hello {}, your lucky number is {}".format(name, number) "Your lucky number is {number}, {name}".format(name=name, number=len(name)*3) price = 7.5 with_tax = price * 1.09 print(price, with_tax) print("Base price: ${:.2f} with Tax: ${:.2f}".format(price, with_tax)) def to_celsius(x): return (x-32) * 5/9 for x in range(0, 101, 10): print("{:>3} F | {:>6.2f} C".format(x, to_celsius(x))) print("'{:d}'".format(10)) print("'{:.2f}'".format(0.5)) print("'{:.2s}'".format('Python')) print("'{:<6s}'".format('Py')) print("'{:>6s}'".format('Py')) print("'{:^6s}'".format('Py'))'10' '0.50' 'Py' 'Py ' ' Py' ' Py 'Official documentation for [the format string syntax](https://docs.python.org/3/library/string.htmlformatstrings)Check out the official documentation for [all available expressions](https://docs.python.org/3/library/string.htmlformat-specification-mini-language).name = "Mircah" print(f"Hello {name}") item = "Purple Cup" amount = 5 price = amount * 3.25 print(f"Item: {item} - Amount: {amount} - Price: {price:.2f}")Hello Mircah Item: Purple Cup - Amount: 5 - Price: 16.25GLADs Intersecting AOIs#Import Global Metadata etc %run '0.Importable_Globals.ipynb' from datetime import date from datetime import timedelta import datetimeStandard Rank ApproachAllows users to rank regions withing an iso, or adm1, by the amount of GLAD alerts that have occured in a given time frame.e.g. https://production-api.globalforestwatch.org/query/b6c75127-85c1-42fa-81a4-088d313797bc?sql=SELECT%20*%20FROM%20data%20%20WHERE%20polyname%20=%20%27wdpa%27%20LIMIT%2010See: https://basecamp.com/3063126/projects/10727890/todos/344440348and: https://basecamp.com/3063126/projects/10727890/todos/344440348comment_605072039polynames: - 'gadm28'- 'ifl_2013'- 'wdpa'- 'kba'- 'aze'- TCLs (tiger conservation landscapes) not sure...Rank by:- area of GLADs within region (ha)- area of GLADs as a persentage of total extentadm0 = 'BRA' adm1 = None polyname = 'gadm28' threshold = 30 #hard code extent_year = 2010 #hard coded since all glads data is post-2010 window = 12 #allow 4,12,26,52 sortBy = 'area_perc' tags = ["forest_change", "Biodiversity"] #replaces current tree cover loss widget in summary selectable_polynames = ['gadm28', 'kba', 'ifl_2013', 'wdpa', 'aze', 'tcl'] areaId_to_name = None if adm1: tmp = get_admin2_json(iso=adm0, adm1=adm1) areaId_to_name ={} for row in tmp: areaId_to_name[row.get('adm2')] = row.get('name') if iso and not adm1: tmp = get_admin1_json(iso=adm0) areaId_to_name={} for row in tmp: areaId_to_name[row.get('adm1')] = row.get('name') def glads_queries_by_poly(iso, adm1=None, polyname='gadm28'): #POLYNAME = gadm28, wdpa... # Return count for now (in case we change our mind). But do not include on prod url = f'https://production-api.globalforestwatch.org/query/b6c75127-85c1-42fa-81a4-088d313797bc?sql=' if adm1: print(f'Requesting adm2 alerts') sql = f""" SELECT iso, adm1, adm2, alert_count as count, alert_date as date, area_ha, polyname FROM data WHERE iso = '{iso}' AND adm1 = {adm1} AND polyname = '{polyname}'""" elif iso: print(f'Requesting adm1 alerts') sql = f""" SELECT iso, adm1, adm2, alert_count as count, alert_date as date, area_ha, polyname FROM data WHERE iso = '{iso}' AND polyname = '{polyname}'""" r = requests.get(url+sql) print(r.url) return r.json().get('data',None)#.get('attributes', None).get('value', None) # Returns json object containing admin-codes, total area and extent (both in ha) # If adm1 is not specified, it returns the total values for each adm1 region # Else, returns the adm2 values within that adm1 region # You may also specify a polyname (intersecting area) e.g. 'extent and % of plantations only' # By default polyname is 'gadm28' (all forest extent) def multiregion_extent_queries(adm0, adm1=None, year='area_extent_2000', p_name='gadm28', threshold=30): if adm0 and not adm1: print('Request for adm1 areas') sql = (f"SELECT adm1 as region, sum({year}) as extent, sum(area_gadm28) as total " f"FROM {ds} " f"WHERE iso = '{adm0}' " f"AND thresh = {threshold} " f"AND polyname = '{p_name}' " f"GROUP BY adm1 " f"ORDER BY adm1") elif adm0 and adm1: print('Request for adm2 areas') sql = (f"SELECT adm2 as region, {year} as extent, area_gadm28 as total " f"FROM {ds} " f"WHERE iso = '{adm0}' " f"AND thresh = {threshold} " f"AND polyname = '{p_name}' " f"AND adm1 = '{adm1}' ") return sql def sumByAdm(data, adm1=None, window=4, sortBy='area_ha'): """Filters away dates outside of the past {window} weeks and then groups by adm""" if adm1: admin = 'adm2' else: admin = 'adm1' today = date.today() start = today.strftime('%Y/%m/%d') end = (today - timedelta(weeks=window-1)).strftime('%Y/%m/%d') tmp = [] for d in data: if d.get(admin) not in tmp: tmp.append(d.get(admin)) adm_list = [] for adm in tmp: adm_filter = list(filter(lambda x: (x.get(admin) == adm and x.get('date') >= end), data)) total_area = total_count = 0 ext = list(filter(lambda x: x.get('region') == adm, extent_data))[0].get('extent') for a in adm_filter: total_count += a.get('count') total_area += a.get('area_ha') adm_list.append({'adm': areaId_to_name[adm], 'area_ha': total_area, 'area_perc': 100 * total_area / ext # 'count': total_count, # 'count_per_ha': 100 * total_count / ext }) ranked_list = sorted(adm_list, key=lambda k: k[sortBy], reverse=True) return ranked_list # Example sql and returned data url = f"https://production-api.globalforestwatch.org/v1/query/{ds}" sql = multiregion_extent_queries(adm0, adm1, extent_year_dict[extent_year], polyname, threshold) properties = {"sql": sql} r = requests.get(url, params = properties) print(r.url) print(f'Status: {r.status_code}') extent_data = r.json()['data'] glad_data = glads_queries_by_poly(iso=adm0, adm1=adm1, polyname=polyname) sorted(glad_data, key=lambda k: k['date'], reverse=True)[0:5] #Append ranking number using a list comprehension ranked_data = [{'adm': x.get('adm'), 'area_ha': x.get('area_ha'), 'area_perc': x.get('area_perc'), # 'count': x.get('count'), # 'count_per_ha': x.get('count_per_ha'), 'rank': i+1} for i,x in enumerate(sumByAdm(data=glad_data, adm1=adm1, window=window, sortBy=sortBy))] ranked_data[0:5]Dynamic Sentencephrase_dict = { 'gadm28': 'in', 'wdpa': 'in protected areas within', 'aze': 'in Alliance for Zero Extinction sites within', 'ifl_2013': 'in intact forests within', 'tcl': 'in tiger conservation landscapes within', 'kba': 'in key biodiversity areas within' } if window == 4: timeframe = 'month' elif window == 12: timeframe = '3 months' elif window == 26: timeframe = '6 months' elif window == 52: timeframe = 'year' print(f"In the last {timeframe} ",end="") print(f"{ranked_data[0].get('area_ha')} ha of GLAD alerts were detected {phrase_dict[polyname]} ",end="") print(f"{ranked_data[0].get('adm')}, ",end="") print(f"equivalent to a {ranked_data[0].get('area_perc')}% loss ",end="") print(f"relative to 2010 tree cover extent.",end="")In the last 3 months 74583.33857296083 ha of GLAD alerts were detected in Maranhão, equivalent to a 0.46339521267453937% loss relative to 2010 tree cover extent.Experimental - DO NOT IMPLEMENT!Could be interesting to plot counts vs time for the last X weeks for the top 5 admin regions experiencing the largest number of glad alerts.def glads_queries_by_poly(iso, adm1=None, polyname='gadm28'): #POLYNAME = gadm28, wdpa... url = f'https://production-api.globalforestwatch.org/query/b6c75127-85c1-42fa-81a4-088d313797bc?sql=' if adm1: print(f'Requesting adm2 alerts') sql = f""" SELECT iso, adm1, adm2, alert_count as count, alert_date as date, area_ha, polyname FROM data WHERE iso = '{iso}' AND adm1 = {adm1} AND polyname = '{polyname}'""" elif iso: print(f'Requesting adm1 alerts') sql = f""" SELECT iso, adm1, adm2, alert_count as count, alert_date as date, area_ha, polyname FROM data WHERE iso = '{iso}' AND polyname = '{polyname}'""" r = requests.get(url+sql) print(r.url) return r.json().get('data',None)#.get('attributes', None).get('value', None) def cleanData(test_data): for d in test_data: year = int(d.get('date')[0:4]) week = int(d.get('date')[5:7]) day = int(d.get('date')[8:10]) date = datetime.date(year,week,day) d['j_year'] = date.isocalendar()[0] d['j_week'] = date.isocalendar()[1] return test_data adm0 = 'BRA' adm1 = None polyname = 'gadm28' tags = ["forest_change", "Biodiversity"] #replaces current tree cover loss widget in summary selectable_polynames = ['gadm28', 'kba', 'ifl_2013', 'wdpa', 'aze', 'tcl'] def createData(test_data, adm1=None): window = 58 today = date.today() end = (today - timedelta(weeks=window)).strftime('%Y/%m/%d') if adm1: print('adm2') admin = 'adm2' else: print('adm1') admin = 'adm1' tmp = [] for d in test_data: if d.get(admin) not in tmp: tmp.append(d.get(admin)) adm_list = [] for adm in tmp: adm_filter = list(filter(lambda x: x.get(admin) == adm, test_data)) adm_filter = cleanData(list(filter(lambda x: x.get('date') >= end, adm_filter))) tmp = [] for w in range(0,window): tmp_week = (today - timedelta(weeks=w)) year = tmp_week.isocalendar()[0] week = tmp_week.isocalendar()[1] tmp_filter = list(filter(lambda x: (x.get('j_week') == week and x.get('j_year') == year), adm_filter)) ta = tc = 0 if (len(tmp_filter) != 0): for t in tmp_filter: ta += t.get('area_ha') tc += t.get('count') tmp.append({'area_ha': ta, 'count': tc, 'j_week': week, 'j_year': year}) adm_filter = sorted(tmp, key=lambda k: (k['j_year'], k['j_week']), reverse=True) adm_list.append({'adm': adm, 'data': adm_filter}) rank_data = [] for d in adm_list: tc = 0 ta = 0 for el in d.get('data'): ta += el.get('area_ha') tc += el.get('count') rank_data.append({'adm': d.get('adm'),'data': d.get('data'), 'total_count': tc, 'area_ha': ta}) return sorted(rank_data, key=lambda k: k['area_ha'], reverse=True) def convolution(data): """Takes a list of means data and applies a runnung-mean over it with a window of 12 weeks to return a smoothed set of values. Incrementally run through the list of means and calculates the mean of 12 values at a time.""" window = 6 N = len(data) smoothed = [] for i in range(0, int(N-window)): tmp = np.mean(data[i:i+window]) smoothed.append(tmp) return smoothed def getGladViz(test_data, metric='count'): """Displays the top 5 adm regions and plots their metric ('counts' or 'area_ha') against the number of weeks. Each of the adm regions counts are stacked on top of eachother to show a cumulative metric (in shades of red). The total number of counts for teh region is also displayed in grey as a dotted line. Finally, to avoid noise, the counts are smoothed with a window of 6 weeks.""" x_val = [] y1 = [] y2 = [] y3 = [] y4 = [] y5 = [] yt = [] for i in range(0, 58): tmp = 0 for el in test_data: tmp += el.get('data')[i].get(metric) yt.append(tmp) y5.append(test_data[4].get('data')[i].get(metric)) y4.append(test_data[3].get('data')[i].get(metric)+y5[i]) y3.append(test_data[2].get('data')[i].get(metric)+y4[i]) y2.append(test_data[1].get('data')[i].get(metric)+y3[i]) y1.append(test_data[0].get('data')[i].get(metric)+y2[i]) x_val.append(-i) y1 = convolution(y1) y2 = convolution(y2) y3 = convolution(y3) y4 = convolution(y4) y5 = convolution(y5) yt = convolution(yt) x_val = x_val[3:-3] plt.figure(figsize=(20,10)) plt.plot(x_val, yt, 'r--', color='#aaaaaa', linewidth=2.5) plt.plot(x_val, y1, 'r-', color='#5c0909', linewidth=1.5) # week in question=RED plt.plot(x_val, y2, 'r-', color='#a70000', linewidth=1.5) # mean=BLACK plt.plot(x_val, y3, 'r-',color='#ff0000', linewidth=1.5) # mean=BLACK plt.plot(x_val, y4, 'r-',color='#ff5252', linewidth=1.5) # mean=BLACK plt.plot(x_val, y5, 'r-',color='#ff7b7b', linewidth=1.5) # mean=BLACK plt.fill_between(x_val, y1, y2, color='#5c0909', alpha=0.35) plt.fill_between(x_val, y2, y3, color='#a70000', alpha=0.35) plt.fill_between(x_val, y3, y4, color='#ff0000', alpha=0.35) plt.fill_between(x_val, y4, y5, color='#ff5252', alpha=0.35) plt.fill_between(x_val, y5, 0, color='#ff7b7b', alpha=0.35) ax = plt.gca() ax.set_xlabel('weeks') ax.set_ylabel(f"Thousands of {metric}s") plt.show() adm0 = 'MYS' adm1 = None polyname = 'gadm28' getGladViz(createData(glads_queries_by_poly(iso=adm0, adm1=adm1, polyname=polyname), adm1)) adm0 = 'IDN' adm1 = None polyname = 'gadm28' getGladViz(createData(glads_queries_by_poly(iso=adm0, adm1=adm1, polyname=polyname), adm1)) adm0 = 'BRA' adm1 = None polyname = 'wdpa' getGladViz(createData(glads_queries_by_poly(iso=adm0, adm1=adm1, polyname=polyname), adm1)) def getGladVizB(test_data, metric='count'): x_val = [] y1 = [] y2 = [] y3 = [] y4 = [] y5 = [] yt = [] for i in range(0, 58): tmp = 0 for el in test_data: tmp += el.get('data')[i].get(metric) yt.append(tmp) y5.append(test_data[4].get('data')[i].get(metric)) y4.append(test_data[3].get('data')[i].get(metric)+y5[i]) y3.append(test_data[2].get('data')[i].get(metric)+y4[i]) y2.append(test_data[1].get('data')[i].get(metric)+y3[i]) y1.append(test_data[0].get('data')[i].get(metric)+y2[i]) x_val.append(-i) y1 = convolution(y1) y2 = convolution(y2) y3 = convolution(y3) y4 = convolution(y4) y5 = convolution(y5) yt = convolution(yt) x_val = x_val[3:-3] plt.figure(figsize=(20,10)) plt.plot(x_val, yt, 'r--', color='#aaaaaa', linewidth=2.5) plt.bar(x_val, y1, color='#5c0909', bottom=0) # week in question=RED plt.bar(x_val, y2, color='#a70000', bottom=0) # mean=BLACK plt.bar(x_val, y3, color='#ff0000', bottom=0) # mean=BLACK plt.bar(x_val, y4, color='#ff5252', bottom=0) # mean=BLACK plt.bar(x_val, y5, color='#ff7b7b', bottom=0) # mean=BLACK ax = plt.gca() ax.set_xlabel('weeks') ax.set_ylabel(f"Thousands of {metric}s") plt.show()**CNN Model For Early Detection of Sepsis** **Mounting Drive to the Content Folder**from google.colab import drive drive.mount('/content/drive')Mounted at /content/drive**Import Required Libraries**#Importing Libraries import numpy as np import pandas as pd**Load the dataset "Patient.csv" from drive**dataset=pd.read_csv('/content/drive/My Drive/EPS DATA/patient.csv')The dataset consists of clinical data of 40336 patients.For each patient hourly clinical data is given.The dataset is made each and patient Clinical data separated by Patient IDIt consists of 42 ColoumnsWe need to predict whther the patient is diagnosed with sepsis i.e 'Sepsis Label'Sepsis Label is given as 0 for not diagnosed with 1. 0 for not diagnosed with Sepsis2. 1 for diagnosed with Sepsis#View The dataset dataset.head(5) dataset.shape dataset.size**Preparing Patient wise data**#Separating Patient ID,Sepsis Label form all the data features=dataset.drop(['SepsisLabel','Patient_Id'],axis=1) features.head(5) features.shape features.size #Finding the percentage of missing data from each Column missing=features.isnull().sum()*100/1552210 missing cols=list(missing[missing<90].index) #Dropping all the columns from features data which are 90% missing features=dataset[cols] features.head(5) features.shape features.size #Dropping HospAdmTime and ICULOS features=features.drop(['HospAdmTime','ICULOS'],axis=1) #Filling the data with mean features=features.fillna(features.mean()) features.head(5) #Converting the DataFrame to array featurearr=features.iloc[:,:].values label=dataset[['SepsisLabel',"Patient_Id"]] label.shape featurearr.shape yarr=label.iloc[:].values patientid=dataset['Patient_Id'].values.tolist() #Appending Hourly data of each patient to single row c=0 ip=[] yp=[] seq=[] index=0 for i in range(len(patientid)): if i==0: seq+=featurearr[index].tolist() index+=1 continue if patientid[i]!=patientid[i-1]: ip.append(seq) yp+=[yarr[i-1,0]] seq=[] if yarr[i,0]==0: seq+=featurearr[index].tolist() elif yarr[i,0]==1 and patientid[i-1]!=patientid[i]: seq+=featurearr[index].tolist() index+=1 ip.append(seq) yp+=[yarr[i-1,0]] m=0 for i in yp: if i==1: m+=1 print('Number of Patients Diagnosed With Sepsis is ',m) max=0 for i in ip: if max**Splitting Data Using for training and testing**#Spliiting data set in ratio 6:4 for training and testing from sklearn.model_selection import train_test_split x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.4,random_state=0) print('Training Set:') print('Features',x_train.shape,'\nLabel',y_train.shape) print('Testing Set:') print('Features',x_test.shape,'\nLabel',y_test.shape)Training Set: Features (24201, 4032) Label (24201, 2) Testing Set: Features (16135, 4032) Label (16135, 2)**Preparing the model**from keras.models import Sequential from keras.layers import Dense, Conv1D, MaxPooling1D, Flatten from keras.layers import LSTM from keras.layers.embeddings import Embedding embedding_vecor_length = 32 model1 = Sequential() model1.add(Embedding(1000, embedding_vecor_length, input_length=4032)) model1.add(Conv1D(filters=64, kernel_size=3, padding='same', activation='relu')) model1.add(MaxPooling1D(pool_size=2)) model1.add(Conv1D(filters=64, kernel_size=3, padding='same', activation='relu')) model1.add(MaxPooling1D(pool_size=2)) model1.add(Conv1D(filters=64, kernel_size=3, padding='same', activation='relu')) model1.add(MaxPooling1D(pool_size=2)) model1.add(Flatten()) model1.add(Dense(128, activation='relu')) model1.add(Dense(2, activation='softmax')) model1.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) print(model1.summary())Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= embedding (Embedding) (None, 4032, 32) 32000 _________________________________________________________________ conv1d (Conv1D) (None, 4032, 64) 6208 _________________________________________________________________ max_pooling1d (MaxPooling1D) (None, 2016, 64) 0 _________________________________________________________________ conv1d_1 (Conv1D) (None, 2016, 64) 12352 _________________________________________________________________ max_pooling1d_1 (MaxPooling1 (None, 1008, 64) 0 _________________________________________________________________ conv1d_2 (Conv1D) (None, 1008, 64) 12352 ________________________________________________________[...]**Training the model with train data**#training the model model1.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=10, batch_size=128)Epoch 1/10 190/190 [==============================] - 341s 2s/step - loss: 0.2138 - accuracy: 0.9212 - val_loss: 0.1360 - val_accuracy: 0.9641 Epoch 2/10 190/190 [==============================] - 338s 2s/step - loss: 0.1319 - accuracy: 0.9650 - val_loss: 0.1322 - val_accuracy: 0.9639 Epoch 3/10 190/190 [==============================] - 337s 2s/step - loss: 0.1346 - accuracy: 0.9628 - val_loss: 0.1492 - val_accuracy: 0.9634 Epoch 4/10 190/190 [==============================] - 337s 2s/step - loss: 0.1256 - accuracy: 0.9636 - val_loss: 0.1782 - val_accuracy: 0.9505 Epoch 5/10 190/190 [==============================] - 338s 2s/step - loss: 0.1199 - accuracy: 0.9656 - val_loss: 0.1364 - val_accuracy: 0.9642 Epoch 6/10 190/190 [==============================] - 338s 2s/step - loss: 0.1065 - accuracy: 0.9682 - val_loss: 0.1440 - val_accuracy: 0.9642 Epoch 7/10 190/190 [==============================] - 339s 2s/step - loss: 0.0981 - accuracy: 0.9701 - val_loss: 0.1508 - val_accuracy: 0.9623[...]**Predict the test data using trained model**y_pred=model1.predict(x_test) y_pred y_pred.shape y_pred1=np.round(y_pred) ypred=[] for i in y_pred1: if i[0]==0: ypred.append(1) else: ypred.append(0) ytest=[] for i in y_test: if i[0]==0: ytest.append(1) else: ytest.append(0)**Accuracy Score**from sklearn.metrics import accuracy_score accuracy_score(ytest,ypred)**Confusion Matrix**from sklearn.metrics import confusion_matrix cm=confusion_matrix(ytest,ypred) cm**HeatMap**import seaborn as sns sns.heatmap(cm,annot=True)Understanding Jupyter Notebook Interface- Print "Hello World"- How to run code- Navigating around Jupyter notebook- Playing a Random Guessing Gameimport random magic_num = random.randint(1,101) print('\t\t\t\t\t\t\t Guessing Game\t\t') print('\t\t\t\t\t\t\tBy ') while True: user_input = int(input('Enter a number in 1 to 100 inclusive: ')) if user_input > magic_num: print('Too High! Chose smaller number.') continue elif user_input < magic_num: print('Too Low! Chose Larger number.') continue else: print('Congratulations! You have guessed the number {} correctly!'.format(magic_num)) breakGuessing Game By Enter a number in 1 to 100 inclusive: 56 Too Low! Chose Larger number. Enter a number in 1 to 100 inclusive: 78 Too Low! Chose Larger number. Enter a number in 1 to 100 inclusive: 82 Too High! Chose smaller number. Enter a number in 1 to 100 inclusive: 92 Too High! Chose smaller number. Enter a number in 1 to 100 inclusive: 80 Congratulations! You have guessed the number 80 correctly!What if I wannt to quit game in the middle, when I get exhausted? Can I do that? Yes! Definietly you can. With slight modification in code you can make the code work as you want it to be. Can YOU Modify this code?prnt("djhf")Dogs vs. Cats Prediction Problem* https://www.tensorflow.org/tutorials/images/cnn* https://machinelearningmastery.com/how-to-develop-a-convolutional-neural-network-to-classify-photos-of-dogs-and-cats/from tensorflow.keras.preprocessing.image import load_img, img_to_array from tensorflow.keras.models import load_modelLoad image to be predicted# Load and prepare the image def load_image(filename): # Load the image img = load_img(filename, target_size=(32, 32)) # Convert to array img = img_to_array(img) # Reshape into a single sample with 3 channels img = img.reshape(1, 32, 32, 3) # Center pixel data img = img.astype('float32') img = img - [123.68, 116.779, 103.939] return img # Load the image # https://3qeqpr26caki16dnhd19sv6by6v-wpengine.netdna-ssl.com/wp-content/uploads/2019/03/sample_image.jpg img = load_image('sample_image.jpeg') # Load model model = load_model('final_model.h5') # Predict the class result = model.predict(img) # 0 - Cat , 1 - Dog # Wrong prediction result[0]Exercise 5# Import libraries import cv2 import numpy as np import matplotlib.pyplot as plt/home/viniciuscampos/miniconda3/envs/dip/lib/python3.7/site-packages/matplotlib/font_manager.py:232: UserWarning: Matplotlib is building the font cache using fc-list. This may take a moment. 'Matplotlib is building the font cache using fc-list. 'Loading imageoriginal = cv2.imread('images/oranges.jpg') original = cv2.cvtColor(original, cv2.COLOR_BGR2RGB) hsv = cv2.cvtColor(original, cv2.COLOR_RGB2HSV) plt.figure(figsize=(15,10)) ax1 = plt.subplot(121) ax1.imshow(original) ax2 = plt.subplot(122) ax2.imshow(hsv, cmap='hsv') ax1.axis('off') ax2.axis('off') ax1.text(0.5,-0.1, "original", ha="center", transform=ax1.transAxes) ax2.text(0.5,-0.1, "hsv", ha="center", transform=ax2.transAxes) plt.show()Filtering the orangesoranges = cv2.inRange(hsv, (0, 154, 130), (35, 255, 255)) oranges = cv2.bitwise_and(original, original, mask=oranges) plt.figure(figsize=(10, 5)) plt.imshow(oranges) plt.axis('off') plt.show()Filtering the leavesleaves = cv2.inRange(hsv, (15, 110, 0), (70, 255, 85)) leaves = cv2.bitwise_and(original, original, mask=leaves) plt.figure(figsize=(10, 5)) plt.imshow(leaves) plt.axis('off') plt.show()Filtering the doordoor = cv2.inRange(hsv, (0, 40, 30), (25, 105, 160)) door = cv2.bitwise_and(original, original, mask=door) plt.figure(figsize=(10, 5)) plt.imshow(door) plt.axis('off') plt.show()Filtering the backgroundbackground = cv2.inRange(hsv, (0, 0, 180), (115, 70, 255)) background = cv2.bitwise_and(original, original, mask=background) plt.figure(figsize=(10, 5)) plt.imshow(background) plt.axis('off') plt.show()Model-1.1 > Covid-19 Indian Death Predictor using Normal Equation in linear regression with regularization.import numpy as np import matplotlib.pyplot as plt import pandas as pd days = np.arange(316) train_days = np.arange(300) test_days = np.arange(16) for i in range(0,316): days[i] = i+1 if(i<300): train_days[i] = i+1 else: test_days[i-300] = i+1 data = pd.read_csv('Covid19_India_Shyam.csv', sep=',',header=None) shyam = data.values sample_data = np.arange(316) for i in range(1,316): sample_data[i]= shyam[i][1] #print(sample_data) #print(len(days), len(sample_data)) plt.title("Normal equation with regularization: Sample Data plot") plt.xlabel("Days") plt.ylabel("No of deaths per day") plt.scatter(days, sample_data, color='black') plt.show()New sectionimport numpy as np #for inverse matrix from numpy.linalg import pinv import matplotlib.pyplot as plt x = days y = sample_data # form matrix X and Y and coefficent of lembda X = np.array([np.ones(len(x)),x, np.power(x, 2),np.power(x, 3) ,np.power(x, 4),np.power(x, 5),np.power(x, 6) ]).T Y = (y[:, np.newaxis]) one = np.identity(X.shape[1]) one[0,0] = 0 lamda = 2 # apply normal equation theta_final = pinv(X.T.dot(X) + lamda*one).dot(X.T).dot(Y) print("Showing the theta (or the parameters) for the hypothesis:\n"+ str(theta_final)) print("Ploting the graph:") y_pred = theta_final.T.dot(X.T) model_1_output = y_pred plt.title("Normal Equation with Regularization") plt.xlabel("Days") plt.ylabel("No of Deaths per Day") plt.scatter(x, y, color='black') plt.plot(x, y_pred[0,:], color='red', linewidth=5) plt.show() # Prediction for january month # generating fuction of death is y= Hw(X) = [1 + X + X^2 + X^3 + X^4 + X^5 + X^6]*theta_final.T import numpy as np jan_days = np.arange(31) draft = np.arange(31) res = np.arange(31) death = np.arange(31) total_death = 0 for i in range(0,31): jan_days[i] = 336+i draft = np.array([np.ones(len(jan_days)),jan_days, np.power(jan_days, 2),np.power(jan_days, 3) ,np.power(jan_days, 4),np.power(jan_days, 5),np.power(jan_days, 6)]).T res = np.dot(draft,theta_final) for i in range(0,30): if(res[i]-abs(res[i])>=0.50): death[i] = res[i][0]+1 else: death[i] = res[i][0]-1 total_death = total_death + death[i] print("The expected number of deaths due to COVID-19 for the month of January, 2021 using Normal equation with regularization : " + str(total_death)) plt.title("Normal equation with regularization: January Month Death Plotting") plt.xlabel("Days (In January Month)") plt.ylabel("No of deaths per day (In January Month)") plt.scatter(jan_days, death, color='black') plt.show()Model-1.2 Using iterative method with regularization# Data making data = [] import numpy as np data_e = np.arange(316*2).reshape(316,2) for i in range(0,316): data_e[i][0] = days[i] data_e[i][1] = sample_data[i] data =data_e.tolist() #data = [(1, 1), (2, 2), (5, 5.5), (6, 8), (9, 10)] import random import matplotlib.pyplot as plt import math import numpy as np import functools from matplotlib.animation import FuncAnimation """ Dummy Data for Linear Regression """ #data = [(1, 1), (2, 2), (5, 5.5), (6, 8), (9, 10)] """ Plot the line using theta_values """ print(type(data)) def plot_line(formula, x_range, order_of_regression, label=None): x = np.array(x_range).tolist() y = [formula(update_features(x_i, order_of_regression, 9)) for x_i in x] l, = plt.plot(x, y, label=label, color = 'darkorange') return l """ Hypothesis Function """ def h(x, theta): return np.matmul(theta.T, x)[0][0] """ Update features by order of the regression """ def update_features(x, order_of_regression, m = 9): features = [2] for i in range(order_of_regression): features.append(math.pow(x, i+1)/ math.pow(m, i+1)) return np.atleast_2d(features).T """ Simultaneous Update """ def update_theta(data, alpha, theta, order_of_regression): temp = [] for i in range(order_of_regression+1): temp.append(theta[i] - alpha * j_prime_theta(data, theta, order_of_regression, i)) theta = np.array(temp) return theta def reg_j_prime_theta(data, theta, l, order_of_regression, i): result = 0 m = len(data) for x, y in data : x = update_features(x, order_of_regression) result += (h(x, theta) - y) * x[i] result += l*theta[i] return (1/m) * result def reg_j(data, theta, l, order_of_regression): cost = 0 m = len(data) for x, y in data: x = update_features(x, order_of_regression) cost += math.pow(h(x, theta) - y, 2) reg = 0 for j in theta: reg += math.pow(j, 2) reg = reg * l return (1/(2*m)) * (cost + reg) def reg_update_theta(data, alpha, theta, l, order_of_regression): temp = [] for i in range(order_of_regression+1): temp.append(theta[i] - alpha * reg_j_prime_theta(data, theta, l, order_of_regression, i)) theta = np.array(temp) return theta def reg_gradient_descent(data, alpha, l, tolerance, theta=[], order_of_regression =316): if len(theta) == 0: theta = np.atleast_2d(np.random.random(order_of_regression+1) * 100).T prev_j = 100000 curr_j = reg_j(data, theta, l, order_of_regression) print(curr_j) cost_history = [] theta_history = [] counter = 0 while(abs(curr_j - prev_j) > tolerance): try: cost_history.append(curr_j) theta_history.append(theta) theta = reg_update_theta(data, alpha, theta, l, order_of_regression) prev_j = curr_j curr_j = reg_j(data, theta, l, order_of_regression) if counter % 100 == 0: print(curr_j) counter += 1 except: break print("Stopped with Error at %.5f" % prev_j) return theta, cost_history, theta_history reg_theta, reg_cost_history, reg_theta_history = reg_gradient_descent(data, 0.0001, 0.01 , 0.001, order_of_regression = 1) #print(reg_theta) #print(reg_cost_history) #print(reg_theta_history) f = functools.partial(h, theta = reg_theta_history[-1]) plt.scatter([i[0] for i in data], [i[1] for i in data]) reg = plot_line(f, (np.array(range(10,91))/10).tolist(), order_of_regression=1, label='regularized') #f = functools.partial(h, theta=theta_history[-1]) #unreg = plot_line(f, (np.array(range(10,91))/10).tolist(), order_of_regression=5, label='not regularized') #plt.legend([reg, unreg], ['Regularized', 'Not Regularized']) plt.title('Regularization with GDA') plt.show() print(data) f = functools.partial(h, theta = reg_theta_history[-1]) plt.scatter([i[0] for i in data], [i[1] for i in data]) reg = plot_line(f, (np.array(range(10,91))/10).tolist(), order_of_regression=50, label='regularized') #f = functools.partial(h, theta=theta_history[-1]) #unreg = plot_line(f, (np.array(range(10,91))/10).tolist(), order_of_regression=5, label='not regularized') #plt.legend([reg, unreg], ['Regularized', 'Not Regularized']) plt.title('Effect of Regularization') plt.show() print(data)Model-1.3 >Using locally weighted Linear Regressiondef LWR(x, y, cp, tau): gamma = np.exp(np.sum((x-cp)**2, axis = 1)/(-2*tau*tau)) w = (x.T) * gamma #print(w) Jw = np.linalg.pinv(w@x)@w@y #print(Jw) return (np.dot(cp, Jw)) def Locally_weighted_Reg (x, y, tau): #weight = [] y_pred = [] x = np.c_[np.ones(len(x)), x] from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, train_size = 0.8, random_state = 3) for cp in x_test: predict = [LWR (x_train, y_train, cp, tau)] #print(predict) y_pred.append(predict) # Plotting graphs plt.title('Locally Weighted Regression') plt.xlabel('Days') plt.ylabel('Deaths') plt.scatter(x[:,1],y) plt.scatter(x_test[:,1], y_pred, color = 'darkorange') plt.show() return (y_pred,y_test) tau = 0.08 (model_3_output, model_3_actual) = Locally_weighted_Reg (days, sample_data, tau) """##prediction jan_days_list = [] jan_days_list = jan_days.tolist() Jan_pred = [] for i in jan_days_list: pred = [LWR(x, y, i, tau)] Jan_pred.append(pred) plt.scatter(Jandays, Jan_pred) # x, y are actual data points... There is no need to use splitted data """ ################# error calculation ##################### def rmse(predictions, targets): differences = predictions - targets #the DIFFERENCEs. differences_squared = differences ** 2 #the SQUAREs of ^ mean_of_differences_squared = differences_squared.mean() #the MEAN of ^ rmse_val = np.sqrt(mean_of_differences_squared) #ROOT of ^ return rmse_val rmse(sample_data,model_1_output) rmse(model_3_output, model_3_actual) #rmse(model_2_output, model_2_actual)draft"""def get_WeightMatrix_for_LOWES(query_point, Training_examples, Bandwidth): # M is the No of training examples M = Training_examples.shape[0] # Initialising W with identity matrix W = np.mat(np.eye(M)) # calculating weights for query points for i in range(M): xi = Training_examples[i] denominator = (-2 * Bandwidth * Bandwidth) W[i] = np.exp(np.dot((xi-query_point), (xi-query_point).T)/denominator) return W # function to make predictions def predict(training_examples, Y, query_x, Bandwidth): M = training_examples.shape[0] all_ones = np.ones((M, 1)) X_ = np.hstack((training_examples, all_ones)) qx = np.mat([query_x, 1]) W = get_WeightMatrix_for_LOWES(qx, X_, Bandwidth) # calculating parameter theta theta = np.linalg.pinv(X_.T*(W * X_))*(X_.T*(W * Y)) # calculating predictions pred = np.dot(qx, theta) return theta, pred Bandwidth = 0.1 X_test = np.linspace(-2, 2, 20) Y_test = [] for query in X_test: theta, pred = predict(X, Y, query, Bandwidth) Y_test.append(pred[0][0]) horizontal_axis = np.array(X) vertical_axis = np.array(Y) plt.title("Tau / Bandwidth Param %.2f"% Bandwidth) plt.scatter(horizontal_axis, vertical_axis) Y_test = np.array(Y_test) plt.scatter(X_test, Y_test, color ='red') plt.show() """About This notebook will cover a basic model with a basic EDA about the competition. Imports# Imports import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn import tree from sklearn import metrics # Visualization # Ploty Imports from itertools import combinations import plotly.graph_objs as go from plotly.offline import init_notebook_mode, iplot # Figures inline and set visualization style %matplotlib inline sns.set() #Different type of visualization # import the necessary modelling algorithms # Regression from sklearn.linear_model import LinearRegression,Ridge,Lasso,RidgeCV from sklearn.ensemble import RandomForestRegressor,BaggingRegressor,GradientBoostingRegressor,AdaBoostRegressor from sklearn.svm import SVR from sklearn.neighbors import KNeighborsRegressor from sklearn.ensemble import ExtraTreesRegressor import xgboost as xgb from xgboost.sklearn import XGBRegressor # Model selection from sklearn.model_selection import train_test_split,cross_validate from sklearn.model_selection import KFold from sklearn.model_selection import GridSearchCV # Preprocessing from sklearn.preprocessing import MinMaxScaler,StandardScaler,Imputer,LabelEncoder,PolynomialFeatures # Evaluation metrics from sklearn.metrics import mean_squared_log_error,mean_squared_error, r2_score,mean_absolute_error # for regression from sklearn.metrics import accuracy_score,precision_score,recall_score,f1_score # for classification # Show multiple statements at once from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all"Load Data# Train Data train = pd.read_csv('input/train.csv') # Test Data test = pd.read_csv('input/test.csv') sub = pd.read_csv('input/sample_submission.csv') structures = pd.read_csv('input/structures.csv')EDA# Lets take a look at the main csv files train.head() sub.head() structures.head() print(f'{train.shape[0]} rows in the train data.') print(f'{test.shape[0]} rows in the test data.') print(f"{train['molecule_name'].nunique()} different molecules in the train data.") print(f"There are {test['molecule_name'].nunique()} different molecules in the test data.") print(f"There are {structures['atom'].nunique()} unique atoms.") print(f"There are {train['type'].nunique()} unique types.") # We take a first look at the dataset train.info() print ('#################################################') print ('#################################################') test.info() sub.info() sub['scalar_coupling_constant'].unique()Lets take a look at a nicer visualization.# Plotly notebook mode init_notebook_mode(connected=True) ''' This function will plot a mollecule ''' def plot_molecule(molecule_name, structures_df): """Creates a 3D plot of the molecule""" atomic_radii = dict(C=0.68, F=0.64, H=0.23, N=0.68, O=0.68) cpk_colors = dict(C='black', F='green', H='white', N='blue', O='red') molecule = structures[structures.molecule_name == molecule_name] coordinates = molecule[['x', 'y', 'z']].values x_coordinates = coordinates[:, 0] y_coordinates = coordinates[:, 1] z_coordinates = coordinates[:, 2] elements = molecule.atom.tolist() radii = [atomic_radii[element] for element in elements] def get_bonds(): """Generates a set of bonds from atomic cartesian coordinates""" ids = np.arange(coordinates.shape[0]) bonds = set() coordinates_compare, radii_compare, ids_compare = coordinates, radii, ids for i in range(len(ids)): coordinates_compare = np.roll(coordinates_compare, -1, axis=0) radii_compare = np.roll(radii_compare, -1, axis=0) ids_compare = np.roll(ids_compare, -1, axis=0) distances = np.linalg.norm(coordinates - coordinates_compare, axis=1) bond_distances = (radii + radii_compare) * 1.3 mask = np.logical_and(distances > 0.1, distances < bond_distances) bonds.update(map(frozenset, zip(ids[mask], ids_compare[mask]))) return bonds def atom_trace(): """Creates an atom trace for the plot""" colors = [cpk_colors[element] for element in elements] markers = dict(color=colors, line=dict(color='lightgray', width=2), size=7, symbol='circle', opacity=0.8) trace = go.Scatter3d(x=x_coordinates, y=y_coordinates, z=z_coordinates, mode='markers', marker=markers, text=elements) return trace def bond_trace(): """"Creates a bond trace for the plot""" trace = go.Scatter3d(x=[], y=[], z=[], hoverinfo='none', mode='lines', marker=dict(color='grey', size=7, opacity=1)) for i, j in bonds: trace['x'] += (x_coordinates[i], x_coordinates[j], None) trace['y'] += (y_coordinates[i], y_coordinates[j], None) trace['z'] += (z_coordinates[i], z_coordinates[j], None) return trace bonds = get_bonds() atoms = zip(range(len(elements)), x_coordinates, y_coordinates, z_coordinates) annotations = [dict(text=num, x=x, y=y, z=z, showarrow=False, yshift=15) for num, x, y, z in atoms] data = [atom_trace(), bond_trace()] axis_params = dict(showgrid=False, showticklabels=False, zeroline=False, titlefont=dict(color='white')) layout = go.Layout(scene=dict(xaxis=axis_params, yaxis=axis_params, zaxis=axis_params, annotations=annotations), margin=dict(r=0, l=0, b=0, t=0), showlegend=False) fig = go.Figure(data=data, layout=layout) iplot(fig) # Plots plot_molecule('dsgdb9nsd_133885', structures) plot_molecule('dsgdb9nsd_105227', structures) plot_molecule('dsgdb9nsd_099964', structures)Duplicate Check# We check if we have duplicates train.duplicated().any() test.duplicated().any()Label Encoding Before modeling it would be a good idea to do a Label Encoding to variables which are still as objects in order to improve the model's performance.categoricals = train.select_dtypes(include='object').columns categoricals = test.select_dtypes(include='object').columns for c in categoricals: lbl = LabelEncoder() lbl.fit(list(train[c].values)) train[c] = lbl.transform(list(train[c].values)) for c in categoricals: lbl = LabelEncoder() lbl.fit(list(test[c].values)) test[c] = lbl.transform(list(test[c].values)) # Check it has been done properly train.dtypes test.dtypesMetricdef metric(df, preds): df["prediction"] = preds maes = [] for t in df.type.unique(): y_true = df[df.type==t].scalar_coupling_constant.values y_pred = df[df.type==t].prediction.values mae = np.log(metrics.mean_absolute_error(y_true, y_pred)) maes.append(mae) return np.mean(maes)Modeling Setting K-Folds# Setting a 5-fold stratified cross-validation (note: shuffle=True) skf = KFold(n_splits=5, shuffle=True, random_state=8) params = {'booster' : 'gbtree', #'nthread' : 5, 'objective' : 'reg:linear', 'eval_metric' : 'mae', 'max_depth' : 8, 'eta' : 0.3, 'subsample' : 0.7, 'colsample_bytree' : 0.7 } # We define de label y = train['scalar_coupling_constant'] dtrain = xgb.DMatrix(train, label = y) res = xgb.cv(params, dtrain, num_boost_round = 4000, folds=skf, seed=2019, early_stopping_rounds = 10, verbose_eval=True) best_round=[i for i, e in enumerate(res['test-rmse-mean']) if e == min(res['test-rmse-mean'])][0] best_round res.iloc[best_round]SSEBopfrom pynhd import NLDI import hydrodata as hd import matplotlib.pyplot as pltThe daily actual evapotranspiration can be retrieved from [SEEBop](https://earlywarning.usgs.gov/ssebop/modis/daily) database. Note that since this service does not offer a web service and data are available as raster files on the server, so this function is not as fast as other functions and download speed might be the bottleneck.You can get the actual ET for location using ``ssebopeta_byloc`` and for a region using ``ssebopeta_bygeom``. Let's get a watershed geometry using NLDI and then get the actual ET.geometry = NLDI().getfeature_byid("nwissite", "USGS-01031500", basin=True).geometry[0] dates = ("2005-10-01", "2005-10-05") coords = (geometry.centroid.x, geometry.centroid.y) eta_p = hd.ssebopeta_byloc(coords, dates=dates) eta_g = hd.ssebopeta_bygeom(geometry, dates=dates) eta_g.isel(time=4).plot(size=8);Principal Component Analysis (PCA) on Iris Datasetimport numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import confusion_matrix, accuracy_score dataset = pd.read_csv('C:\\Users\\black\\Desktop\\ml_py\\datasets\\iris.csv', header="infer") dataset.head() X = dataset.drop('species', 1) Y = dataset['species'] X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0) pca = PCA() X_train = pca.fit_transform(X_train) X_test = pca.transform(X_test) explained_variance = pca.explained_variance_ratio_ explained_variance for n in range(1, len(X.columns) + 1): X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0) pca = PCA(n_components=n) X_train = pca.fit_transform(X_train) X_test = pca.transform(X_test) classifier = RandomForestClassifier(max_depth=2, random_state=0) classifier.fit(X_train, Y_train) Y_pred = classifier.predict(X_test) cm = confusion_matrix(Y_test, Y_pred) print("Confusuion Matrix with {0} Principle Components: ".format(n)) print(cm) print("Accuracy Score with {0} Principle Components: ".format(n), end="") print(accuracy_score(Y_test, Y_pred), end="\n\n")Confusuion Matrix with 1 Principle Components: [[11 0 0] [ 0 12 1] [ 0 2 4]] Accuracy Score with 1 Principle Components: 0.9 Confusuion Matrix with 2 Principle Components: [[11 0 0] [ 0 8 5] [ 0 1 5]] Accuracy Score with 2 Principle Components: 0.8 Confusuion Matrix with 3 Principle Components: [[11 0 0] [ 0 5 8] [ 0 1 5]] Accuracy Score with 3 Principle Components: 0.7 Confusuion Matrix with 4 Principle Components: [[11 0 0] [ 0 11 2] [ 0 1 5]] Accuracy Score with 4 Principle Components: 0.9Exploratory data analysisWhenever you collect a new dataset, a good first step is to explore it. This means different things for different kinds of datasets, but if it's a timeseries then there are common techniques that will likely prove useful. This tutorial covers the basics of exploratory data analysis with timeseries. It focuses on raster plots and PSTHs recorded from spiking neurons, though the principles extend to most timeseries data. Data and TaskThe dataset was recorded from M1 neurons in a monkey performing a center-out reaching task. The monkey had to reach towards different targets, each at a different angle. On each trial, the target angle and onset was recorded.import numpy as np from scipy import io as si from matplotlib import pyplot as plt import h5py %matplotlib inlineLoad the dataFirst we'll load the data - an important first step in any analysis is determining the structure of the data. This is particularly important if you've never analyzed the data before. Let's take a look at our data.data = si.loadmat('../data/StevensonV4.mat') type(data)It looks like our data is a dictionary, which means it has **keys** and **values**. Let's take a look!for key in data.keys(): print(key)__header__ __version__ __globals__ timeBase spikes time handVel handPos target startBins targets targetsAngles startBinned targetNumbersThese are all fields contained within data, corresponding to the data contained within it. The fields beginning with `__` correspond to the fact that this dataset was saved with matlab. These are automatically inserted and we can probably ignore them.Let's take a look at our spiking data# Load in the spiking data spikes = data['spikes'] spikes.shapeJudging from the size of each axis, it looks like we have an array of shape (n_neurons, n_times). We can determine the time step for each bin by looking at the `timeBase` variable. We can use this to create the time of our datatime_step = data['timeBase'].squeeze() times = np.arange(spikes.shape[-1]) * time_step print(time_step) print(times[:10])0.001 [ 0. 0.001 0.002 0.003 0.004 0.005 0.006 0.007 0.008 0.009]A good first step with raster plots is to calculate summary statistics of the activity. First we'll take a look at the mean activity across time (averaging across neurons), then we'll look at the distribution of spikes across neurons (aggregating across time).# Calculate the mean across neurons and plot it for a quick viz mean_spikes = np.mean(spikes, 0) fig, ax = plt.subplots() ax.plot(mean_spikes, color='g') total_spikes = np.sum(spikes, -1) fig, ax = plt.subplots() _ = ax.hist(total_spikes, color='g')Now let's pull out the activity of a single neuron and see what it looks like:neuron_ix = 192 # Which neuron are we looking at? neuron = spikes[neuron_ix] ixs_spikes = np.where(neuron == 1)[0] fig, ax = plt.subplots() ax.vlines(times[ixs_spikes[:100]], 0, 1)These vertical lines represent the spikes of a single neuron. We can visualize all of the neurons at once (but more on that later). Binning by eventsIt's generally true that the neural activity we record can be split up in to distinct "events" that occur at a moment in time. Let's slice up our data based on the event times in `startBins` and see how this looks. We've also got information about event types in the field `targetNumbers`.# Only process what constitutes valid trials - identify malformed ones start_bins = data['startBins'][0] target_numbers = data['targetNumbers'][:, 0] print(start_bins.shape) # We'll only keep the trials that occur before a pre-specified time end_ix = 676790 mask_keep = start_bins < end_ix start_bins = start_bins[mask_keep] target_numbers = target_numbers[mask_keep] print(start_bins.shape)(155,)At this point, we'd generally need to clean up the data. This might mean throwing away neurons that had a bad signal, or events where there was clearly noise being recorded. In this case, the data is relatively clean already (thanks to Konrad for this).We'll create a dictionary that lets us map each condition type onto times corresponding to events of that condition.n_conditions = len(np.unique(target_numbers)) print('Number of conditions: {}'.format(n_conditions)) condition_dict = {} for ii in range(1, n_conditions + 1): condition_dict[ii] = np.where(target_numbers == ii)[0]Number of conditions: 8Visualizing event-related activityNow that we know when each event occurs, let's visualize the activity of our focus neuron. To do this, we'll need to pull a *window* of time around each event. Then we can see the activity during that window.# We infer the sfreq from the time step sfreq = 1. / time_step # Define how we'll take a window around each event tmin, tmax = -.5, 10 ixmin = int(tmin * sfreq) ixmax = int(tmax * sfreq) # Now loop through conditions cond_data = {} for cond in range(1, n_conditions + 1): # For each condition, we'll take a window of time around each onset indices = condition_dict[cond] this_onsets = start_bins[indices] # Loop through each event for this event epochs = [] for onset in this_onsets: if (onset + ixmax) > spikes.shape[-1]: # If the window extends beyond the data, skip it continue epochs.append(spikes[:, onset + ixmin : onset + ixmax]) epochs = np.array(epochs) cond_data[cond] = epochs # Now create time (in seconds) around each window time_epochs = np.linspace(tmin, tmax, num=epochs.shape[-1]) # Now, we can plot the spiking activity (rasters) in response to each condition n_row = 3 n_col = int(np.ceil(n_conditions / float(n_row))) fig, axs = plt.subplots(n_row, n_col, sharex=True, sharey=True, figsize=(5 * n_col, 5 * n_row)) for ax, (cond, i_data) in zip(axs.ravel(), cond_data.items()): this_epoch = i_data[:, neuron_ix, :] for ii, i_ep in enumerate(this_epoch): mask_spikes = i_ep == 1 ixs_spikes = np.where(mask_spikes)[0] times_spikes = time_epochs[ixs_spikes] if len(times_spikes) > 0: ax.vlines(times_spikes, ii, ii + 1, color='k') ax.set_title('Condition {}'.format(cond)) plt.autoscale(tight=True)Visualizing with a Peri-Stimulus Time Histogram (PSTH)It is helpful to summarize the spiking activity across repetitions of one condition. For this, we create the peri-stimulus time histogram (PSTH). This shows us the general pattern of spiking activity in response to a stimulus.# We'll use this to smooth in time, which is important when using spikes from scipy.ndimage.filters import gaussian_filter1dTo create teh PSTH we'll need to smooth the spikes in time. This effectively converts the spikes from bins to a continuously-varying spike rate. We'll smooth using a gaussian distribution...if we want a smoother spike rate, we should increase the standard deviation of the gaussian.# Smooth the spiking activity, then take every "Nth" sample to reduce size gaussian_sd = 10 n_decimate = 5 binned_dict = {} for i_cond, i_data in cond_data.items(): i_data = gaussian_filter1d(i_data.astype(float), gaussian_sd, axis=-1) # We'll take every Nth sample to speed up plotting i_data = i_data[..., ::n_decimate] binned_dict[i_cond] = i_data # Compare this plot with the raster images above n_row = 3 n_col = int(np.ceil(n_conditions / float(n_row))) fig, axs = plt.subplots(n_row, n_col, sharex=True, sharey=True, figsize=(5 * n_col, 5 * n_row)) for ax, (i_cond, i_data) in zip(axs.ravel(), binned_dict.items()): ax.plot(time_epochs[::n_decimate], i_data.mean(0)[192], 'k') ax.set_title('Condition: {}'.format(i_cond)) ax.axvline(0, color='r', ls='--') plt.autoscale(tight=True)#Importando o Pandas import pandas as pd #Importando o Dataframe a ser usado google cloud df = pd.read_csv("/content/drive/MyDrive/DADOS/Dados mini_projeto/marketing_campaign.csv.csv", sep=",") df.head(1) #Fazendo uma Copia do Dataframe df_copy = df.copy() df1 = df #Analisando os Tipos existentes no dataframe df1.dtypes #Renomeando as Colunas para o Português df1.rename(columns={'ID': 'ID_Clientes', 'Year_Birth': 'Ano_Nacimento' , 'Education' : 'Educacao', 'Marital_Status': 'Estado_Civil' , 'Income' : 'Renda_Anual', 'Kidhome': 'Qts_Criancas' , 'Teenhome' : 'Qts_Adolecentes', 'Dt_Customer': 'Data_Cadastro' , 'Recency' : 'Dias_desde_ultima_compra', 'MntWines': 'Qt_Vinho_Comprado' , 'MntFruits' : 'Qt_Fruta_Comprada', 'MntMeatProducts': 'Qt_Carne_comprada' , 'MntFishProducts' : 'Qt_Produto_pesca_comprada', 'MntSweetProducts': 'Qt_Doce_comprado' , 'MntGoldProds' : 'Qt_Produto_premiun_comprado', 'NumDealsPurchases': 'Qt_Compra_desconto' , 'NumWebPurchases' : 'Qt_Compra_site', 'NumCatalogPurchases' : 'Qt_Compra_Catálogo', 'NumStorePurchases': 'Qt_Compra_Loja' , 'NumWebVisitsMonth' : 'Qt_Visita_site_mês', 'AcceptedCmp3' : 'Aceitou_campanha_3', 'AcceptedCmp4': 'Aceitou_campanha_4' , 'AcceptedCmp5' : 'Aceitou_campanha_5', 'AcceptedCmp1' : 'Aceitou_campanha_1', 'AcceptedCmp2': 'Aceitou_campanha_2' , 'Complain' : 'Reclamação', 'Z_CostContact' : 'Custo_Contato_Cliente', 'Z_Revenue' : 'Receita_Campanha', 'Response' : 'Resposta'}, inplace=True) df1.head(1) #Descobrir se a Coluna ID_Clientes é única df1["ID_Clientes"].is_unique #Achar Colunas com Na df1.isna().sum() #Utilizando o unique para buscar inconsistências em cada Coluna pd.unique(df1["Ano_Nacimento"]) #Utilizando o unique para buscar inconsistências em cada Coluna pd.unique(df1["Educacao"]) #Alterando as linhas de dados da coluna Educação para o Português - BR df1["Educacao"].replace("2n Cycle", "Ensino Médio", inplace=True) df1["Educacao"].replace("Graduation", "Ensino Superior", inplace=True) df1["Educacao"].replace("Master", "Mestrado", inplace=True) df1["Educacao"].replace("PhD", "Doutorado", inplace=True) #Utilizando o unique para buscar inconsistências em cada Coluna pd.unique(df1["Estado_Civil"]) #Alterando as linhas de dados para o Português - BR e mudando valores inconsistêntes para NA df1["Estado_Civil"].replace("Single", "Solteiro(a)", inplace=True) df1["Estado_Civil"].replace("Together", "União Estável", inplace=True) df1["Estado_Civil"].replace("Married", "Casado(a)", inplace=True) df1["Estado_Civil"].replace("Divorced", "Divorciado(a)", inplace=True) df1["Estado_Civil"].replace("Widow", "Viúvo(a)", inplace=True) df1["Estado_Civil"].replace("Alone", "Solteiro(a)", inplace=True) df1["Estado_Civil"].replace("Divorced", "Divorciado(a)", inplace=True) df1.replace(["Absurd", "YOLO"], pd.NA, inplace = True) #Utilizando o unique para buscar inconsistências em cada Coluna pd.unique(df1["Renda_Anual"]) #Utilizo o Sorted com for quando a coluna tem muitas linhas que não se repetem, assim consigo analisar item por item para achar alguma inconsistência x = sorted(pd.unique(df1["Renda_Anual"])) for i in range(len(x)): if i % 20 == 0: print() print(x[i], end=', ') #Utilizando o unique para buscar inconsistências em cada Coluna pd.unique(df1["Qts_Criancas"]) #Utilizando o unique para buscar inconsistências em cada Coluna pd.unique(df1["Qts_Adolecentes"]) #Utilizando o unique para buscar inconsistências em cada Coluna pd.unique(df1["Data_Cadastro"]) #Utilizando o unique para buscar inconsistências em cada Coluna pd.unique(df1["Dias_desde_ultima_compra"]) #Utilizando o unique para buscar inconsistências em cada Coluna pd.unique(df1["Qt_Vinho_Comprado"]) #Utilizando o unique para buscar inconsistências em cada Coluna pd.unique(df1["Qt_Fruta_Comprada"]) #Utilizando o unique para buscar inconsistências em cada Coluna pd.unique(df1["Qt_Carne_comprada"]) #Utilizando o unique para buscar inconsistências em cada Coluna pd.unique(df1["Qt_Produto_pesca_comprada"]) #Utilizando o unique para buscar inconsistências em cada Coluna pd.unique(df1["Qt_Doce_comprado"]) #Utilizando o unique para buscar inconsistências em cada Coluna pd.unique(df1["Qt_Produto_premiun_comprado"]) #Utilizando o unique para buscar inconsistências em cada Coluna pd.unique(df1["Qt_Compra_desconto"]) #Utilizando o unique para buscar inconsistências em cada Coluna pd.unique(df1["Qt_Compra_site"]) #Utilizando o unique para buscar inconsistências em cada Coluna pd.unique(df1["Qt_Compra_Catálogo"]) #Utilizando o unique para buscar inconsistências em cada Coluna pd.unique(df1["Qt_Compra_Loja"]) #Utilizando o unique para buscar inconsistências em cada Coluna pd.unique(df1["Qt_Visita_site_mês"]) #Utilizando o unique para buscar inconsistências em cada Coluna pd.unique(df1["Aceitou_campanha_3"]) #Alteração para string, para facilitar a leitura df1["Aceitou_campanha_3"].replace(0, "Não", inplace=True) df1["Aceitou_campanha_3"].replace(1, "Sim", inplace=True) #Utilizando o unique para buscar inconsistências em cada Coluna pd.unique(df1["Aceitou_campanha_4"]) #Alteração para string, para facilitar a leitura df1["Aceitou_campanha_4"].replace(0, "Não", inplace=True) df1["Aceitou_campanha_4"].replace(1, "Sim", inplace=True) #Utilizando o unique para buscar inconsistências em cada Coluna pd.unique(df1["Aceitou_campanha_5"]) #Alteração para string, para facilitar a leitura df1["Aceitou_campanha_5"].replace(0, "Não", inplace=True) df1["Aceitou_campanha_5"].replace(1, "Sim", inplace=True) #Utilizando o unique para buscar inconsistências em cada Coluna pd.unique(df1["Aceitou_campanha_1"]) #Alteração para string, para facilitar a leitura df1["Aceitou_campanha_1"].replace(0, "Não", inplace=True) df1["Aceitou_campanha_1"].replace(1, "Sim", inplace=True) #Utilizando o unique para buscar inconsistências em cada Coluna pd.unique(df1["Aceitou_campanha_2"]) #Alteração para string, para facilitar a leitura df1["Aceitou_campanha_2"].replace(0, "Não", inplace=True) df1["Aceitou_campanha_2"].replace(1, "Sim", inplace=True) #Utilizando o unique para buscar inconsistências em cada Coluna pd.unique(df1["Reclamação"]) #Alteração para string, para facilitar a leitura df1["Reclamação"].replace(0, "Sem reclamação", inplace=True) df1["Reclamação"].replace(1, "Com reclamação", inplace=True) #Utilizando o unique para buscar inconsistências em cada Coluna pd.unique(df1["Custo_Contato_Cliente"]) #Utilizando o unique para buscar inconsistências em cada Coluna pd.unique(df1["Receita_Campanha"]) #Utilizando o unique para buscar inconsistências em cada Coluna pd.unique(df1["Resposta"]) #Alteração para string, para facilitar a leitura df1["Resposta"].replace(0, "Não", inplace=True) df1["Resposta"].replace(1, "Sim", inplace=True) # Retirada da coluna df1.drop(['Receita_Campanha', 'Resposta', 'Custo_Contato_Cliente'], axis = 1, inplace= True) #Visualizando o dataframe df1.head(1) #Salvando meu Dataframe df1.to_csv('marketing.csv', index=False) df1.dtypes**PYSPARK**#Instalar Pyspark !pip install pyspark #Importanto Bibliotecas para utilização do Pyspark from pyspark.sql import SparkSession import pyspark.sql.functions as F from pyspark.sql.window import Window from pyspark.sql.types import StructType, StructField, StringType, IntegerType, DoubleType spark = (SparkSession.builder. master('local'). appName('dataframe_withcolumn'). config('spark.ui.port', '4050'). getOrCreate()) df2 = (spark.read. format("csv"). option("header", "true"). option("inferschema", "true"). option("delimiter", ","). load('/content/drive/MyDrive/DADOS/Dados mini_projeto/marketing.csv')) df2.printSchema() #Renomeando novas colunas df2 = df2.withColumnRenamed('ID_Clientes', 'ID_Cliente').withColumnRenamed('Qt_Compra_desconto', 'Qtd_Compra_desconto').withColumnRenamed('Qt_Compra_site', 'Qtd_Compra_site').withColumnRenamed('Qt_Compra_Catálogo', 'Qtd_Compra_Catálogo').withColumnRenamed('Qt_Compra_Loja', 'Qtd_Compra_Loja').withColumnRenamed('Qt_Visita_site_mês', 'Qtd_Visita_site_mes').withColumnRenamed('Reclamação', 'Reclamacoes') df2.show(5) #Criando novas colunas df2 = df2.withColumn("Filhos", F.col("Qts_Criancas") + F.col("Qts_Adolecentes")) df2 = df2.withColumn("Valor das Compras", F.col("Qt_Vinho_Comprado") + F.col("Qt_Fruta_Comprada") + F.col("Qt_Carne_comprada") + F.col("Qt_Produto_pesca_comprada") + F.col("Qt_Doce_comprado") + F.col("Qt_Produto_premiun_comprado")) df2.show(1) #Filtrando os gastos de uma família com filhos ou sem filhos filtro = F.col("Estado_Civil") == "Casado(a)" df2.select(F.col("Estado_Civil"), F.col("Filhos"), F.col("Valor das Compras")).filter(filtro).show(10) #Filtrando os gastos de pais solteiros com filhos df2.select(F.col("Estado_Civil"), F.col("Filhos"), F.col("Qt_Doce_comprado"))\ .filter(F.col("Filhos") > 0)\ .filter(F.col("Estado_Civil") == "Solteiro(a)")\ .filter(F.col("Qt_Doce_comprado") < 10)\ .show(100) #Ordenando a Tabela de acordo com o Valor das compras com o cadastro do cliente df2.orderBy("valor das Compras","Data_Cadastro").show(truncate=False) #Média de Gastos dde clientes ordenado por estado civil df2.groupby(F.col("Estado_Civil")).mean("Renda_Anual").show() #Particionando Estado Civil por ordem de Educação usando rank w0 = Window.partitionBy(F.col("Estado_Civil")).orderBy("Educacao") df2.withColumn("rank", F.rank().over(w0)).show() #Ordenando Ano de Nascimento usando row_number w0 = Window.orderBy("Ano_Nacimento") df2.withColumn("row_number", F.row_number().over(w0)).show() #Mostrar como está o dataframe depois de todas as alterações df2.show() #Salvando o arquivo no drive #Marketing_pyspark1 = df2.write.format("csv").option("header", "true").option("inferschema", "true").option("delimiter", ",").save("/content/drive/MyDrive/Marketing_pyspark1.csv")**SparkSQL**#Importando um dataframe já limpo do Pyspark e Pandas df3 = spark.read.format("csv") \ .option("inferSchema", "true") \ .option("header", "true") \ .option("sep", ",") \ .load("/content/drive/MyDrive/Marketing_pyspark1.csv/Marketing_pyspark1.csv") \ .createOrReplaceTempView("Marketing") #Visualizando o dataframe com a função Select do sql. spark.sql("SELECT * FROM Marketing").show(1) #Retora qual tipo de tabela posso melhor trabalhar, ajudando assim a fazer as filtragens spark.sql("DESCRIBE Marketing ").show(30) #Verificando a quantidade de Filhos por Estado Civil utilizando a função "count", cotando todos os filhos listados em cada estado civil. spark.sql("SELECT Estado_Civil, COUNT(Filhos) AS Filhos FROM Marketing GROUP BY Estado_Civil").show(10) #Verificando quais os clientes que aceitaram Campanha 5, utilizando a função "where" onde identificamos cada cliente que disse sim. spark.sql("SELECT * FROM Marketing WHERE Aceitou_campanha_5 == 'Sim'").show(10) #Continuando com a função "Where" lozalizamos quais os clientes que compraram vinho e quantas vezes eles foram até a loja física. spark.sql("SELECT * FROM Marketing WHERE Qt_Vinho_Comprado > 0 AND Qtd_Compra_Loja > 0").show() #Com a função "Where" e "Like", encontramos todos os nossos clientes Solteiros, assim podemos saber quanto e como compram. spark.sql("SELECT * FROM Marketing WHERE Estado_Civil LIKE 'S%'").show() # Sabemos a data de cadastro dos clientes sem repetições, a empresa pode enviar cartões ou descontros especiais de aniversário fidelidade, utilo a função "Distinct" para realizar essa filtragem. spark.sql("SELECT DISTINCT Data_Cadastro FROM Marketing").show() #Utilizamo a fução "Count" "Where" "GroupBy" para para saber qual o melhor consumo de um cliente, selecionamos os divorciados com filhos e podemos fazer comparações com outros clientes. spark.sql("SELECT Estado_Civil, COUNT(Filhos) AS QTS_FILHOS FROM Marketing WHERE Estado_Civil == 'Divorciado(a)' GROUP BY Estado_Civil").show() #Utilizando a Educaçao como parametro, verifico a Soma da renda, a Renda mínima, a Máxima e a média de cada cliente pela sua educação, dessa forma pode-se melhor ofertar para cada tipo de cliente. spark.sql("SELECT Educacao, SUM(Renda_Anual) AS SOMA_RENDA, MIN(Renda_Anual) AS MIN_RENDA, MAX(Renda_Anual) AS MAX_RENDA, ROUND(MEAN(Renda_Anual), 2) AS MEDIA_RENDA FROM Marketing GROUP BY Educacao ORDER BY Educacao").show() #Utilizando a função "Count" "Where" "GoupBy"para termos um parametro de quantos clientes aceitaram a campanha 4 e compraram pelo site. spark.sql("SELECT Aceitou_campanha_4, COUNT(Qtd_Compra_site) AS QTD_COMPRA_SITE FROM Marketing WHERE Aceitou_campanha_4 == 'Sim' GROUP BY Aceitou_campanha_4").show() #Utilizando o "Where" descobrimos quantas visitas no site geraram vendas com descontos. spark.sql("SELECT Qtd_Compra_desconto FROM Marketing WHERE Qtd_Visita_site_mes > 0").show() Reinforcement Learning OpenAI gym''' The make() function creates an environment, in this case a CartPole environment. This is a 2D simulation in which a cart can be accelerated left or right in order to balance a pole placed on top of it. After the environment is created, we must initialize it using the reset() method. This returns the first observation. Observations depend on the type of environment. For the CartPole environment, each observation is a 1D NumPy array containing four floats: these floats represent the cart’s horizontal position (0.0 = center), its velocity, the angle of the pole (0.0 = vertical), and its angular velocity. Finally, the render() method displays the environment ''' import gym env = gym.make("CartPole-v0") print('The environment: ', env ) obs = env.reset() print('the first observation: ', obs) env.render() # Ask the environment what actions are possible print('What actions are possible: ', env.action_space) ''' Discrete(2) means that the possible actions are integers 0 and 1, which represent accelerating left (0) or right (1). Other environments may have more discrete actions, or other kinds of actions (e.g.,continuous). Since the pole is leaning toward the right, let’s accelerate the cart toward the right: ''' action = 1 # accelerate right obs, reward, done, info = env.step(action) print('The new observation: ', obs) print( 'Thereward: ', reward) print('This value will be True when the episode is over: ', done) print('This dictionary may provide extra debug information in other environments: ', info) ''' The step() method executes the given action and returns four values: - obs: This is the new observation. The cart is now moving toward the right (obs[1]>0). The pole is still tilted toward the right (obs[2]>0 ), but its angular velocity is now negative (obs[3]<0 ), so it will likely be tilted toward the left after the next step. - reward:In this environment, you get a reward of 1.0 at every step, no matter what you do, so the goal is to keep running as long as possible. - done: This value will be True when the episode is over. This will happen when the pole tilts too much. After that, the environment must be reset before it can be used again. - info: This dictionary may provide extra debug information in other environments. This data should not be used for training (it would be cheating). ''' ''' Let’s hardcode a simple policy that accelerates left when the pole is leaning toward the left and accelerates right when the pole is leaning toward the right. We will run this policy to see the average rewards it gets over 500 episodes ''' def basic_policy(obs): angle = obs[2] return 0 if angle < 0 else 1 totals = [] for episode in range(500): episode_rewards = 0 obs = env.reset() for step in range(1000): # 1000 steps max, we don't want to run forever action = basic_policy(obs) obs, reward, done, info = env.step(action) episode_rewards += reward if done: break totals.append(episode_rewards) # Let’s look at the result import numpy as np print('The result: ', np.mean(totals), np.std(totals), np.min(totals), np.max(totals)) ''' Even with 500 tries, this policy never managed to keep the pole upright for more than 68 consecutive steps. Not great. '''Neural Network Policies''' Let’s create a neural network policy. Just like the policy we hardcoded earlier, this neural network will take an observation as input, and it will output the action to be executed. More precisely, it will estimate a probability for each action, and then we will select an action randomly according to the estimated probabilities. In the case of the CartPole environment, there are just two possible actions (left or right), so we only need one output neuron. It will output the probability p of action 0 (left), and of course the probability of action 1 (right) will be 1 – p. For example, if it outputs 0.7, then we will pick action 0 with 70% probability, and action 1 with 30% probability. You may wonder why we are picking a random action based on the probability given by the neural network, rather than just picking the action with the highest score. This approach lets the agent find the right balance between exploring new actions and exploiting the actions that are known to work well. Here’s an analogy: suppose you go to a restaurant for the first time, and all the dishes look equally appealing so you randomly pick one. If it turns out to be good, you can increase the probability to order it next time, but you shouldn’t increase that probability up to 100%, or else you will never try out the other dishes, some of which may be even better than the one you tried. Also note that in this particular environment, the past actions and observations can safely be ignored, since each observation contains the environment’s full state. If there were some hidden state, then you may need to consider past actions and observations as well. For example, if the environment only revealed the position of the cart but not its velocity, you would have to consider not only the current observation but also the previous observation in order to estimate the current velocity. Another example is when the observations are noisy; in that case, you generally want to use the past few observations to estimate the most likely current state. The CartPole problem is thus as simple as can be; the observations are noise-free and they contain the environment’s full state. ''' # The code to build this neural network policy using TensorFlow ''' import tensorflow as tf from tensorflow.contrib.layers import fully_connected # 1. Specify the neural network architecture n_inputs = 4 # == env.observation_space.shape[0] n_hidden = 4 # it's a simple task, we don't need more hidden neurons n_outputs = 1 # only outputs the probability of accelerating left initializer = tf.contrib.layers.variance_scaling_initializer() # 2. Build the neural network X = tf.placeholder(tf.float32, shape=[None, n_inputs]) hidden = fully_connected(X, n_hidden, activation_fn=tf.nn.elu, weights_initializer=initializer) logits = fully_connected(hidden, n_outputs, activation_fn=None, weights_initializer=initializer) outputs = tf.nn.sigmoid(logits) # 3. Select a random action based on the estimated probabilities p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs]) action = tf.multinomial(tf.log(p_left_and_right), num_samples=1) init = tf.global_variables_initializer() ''' ''' Let’s go through this code: 1. After the imports, we define the neural network architecture. The number of inputs is the size of the observation space (which in the case of the CartPole is four), we just have four hidden units and no need for more, and we have just one output probability (the probability of going left). 2. Next we build the neural network. In this example, it’s a vanilla Multi-Layer Perceptron, with a single output. Note that the output layer uses the logistic (sigmoid) activation function in order to output a probability from 0.0 to 1.0. If there were more than two possible actions, there would be one output neuron per action, and you would use the softmax activation function instead. 3. Lastly, we call the multinomial() function to pick a random action. This function independentlysamples one (or more) integers, given the log probability of each integer. For example, if you call it with the array [np.log(0.5), np.log(0.2), np.log(0.3)] and with num_samples=5 , then it will output five integers, each of which will have a 50% probability of being 0, 20% of being 1, and 30% of being 2. In our case we just need one integer representing the action to take. Since the outputs tensor only contains the probability of going left, we must first concatenate 1-outputs to it to have a tensor containing the probability of both left and right actions. Note that if there were more than two possible actions, the neural network would have to output one probability per action so you would not need the concatenation step. '''Evaluating Actions: The Credit Assignment Problem''' If we knew what the best action was at each step, we could train the neural network as usual, by minimizing the cross entropy between the estimated probability and the target probability. It would just be regular supervised learning. However, in Reinforcement Learning the only guidance the agent gets is through rewards, and rewards are typically sparse and delayed. For example, if the agent manages to balance the pole for 100 steps, how can it know which of the 100 actions it took were good, and which of them were bad? All it knows is that the pole fell after the last action, but surely this last action is not entirely responsible. This is called the credit assignment problem: when the agent gets a reward, it is hard for it to know which actions should get credited (or blamed) for it. Think of a dog that gets rewarded hours after it behaved well; will it understand what it is rewarded for? To tackle this problem, a common strategy is to evaluate an action based on the sum of all the rewards that come after it, usually applying a discount rate r at each step. For example, if an agent decides to go right three times in a row and gets +10 reward after the first step, 0 after the second step, and finally –50 after the third step, then assuming we use a discount rate r = 0.8, the first action will have a total score of 10 + r × 0 + r 2 × (–50) = –22. If the discount rate is close to 0, then future rewards won’t count for much compared to immediate rewards. Conversely, if the discount rate is close to 1, then rewards far into the future will count almost as much as immediate rewards. Typical discount rates are 0.95 or 0.99. With a discount rate of 0.95, rewards 13 steps into the future count roughly for half as much as immediate rewards (since 0.95 13 ≈ 0.5), while with a discount rate of 0.99, rewards 69 steps into the future count for half as much as immediate rewards. In the CartPole environment, actions have fairly short-term effects, so choosing a discount rate of 0.95 seems reasonable. Of course, a good action may be followed by several bad actions that cause the pole to fall quickly, resulting in the good action getting a low score (similarly, a good actor may sometimes star in a terrible movie). However, if we play the game enough times, on average good actions will get a better score than bad ones. So, to get fairly reliable action scores, we must run many episodes and normalize all the action scores (by subtracting the mean and dividing by the standard deviation). After that, we can reasonably assume that actions with a negative score were bad while actions with a positive score were good. '''Policy Gradients''' PG algorithms optimize the parameters of a policy by following the gradients toward higher rewards. One popular class of PG algorithms, called REINFORCE algorithms, was introduced back in 1992 by . Here is one common variant: 1. First, let the neural network policy play the game several times and at each step compute the gradients that would make the chosen action even more likely, but don’t apply these gradients yet. 2. Once you have run several episodes, compute each action’s score (using the method described in the previous paragraph). 3. If an action’s score is positive, it means that the action was good and you want to apply the gradients computed earlier to make the action even more likely to be chosen in the future. However, if the score is negative, it means the action was bad and you want to apply the opposite gradients to make this action slightly less likely in the future. The solution is simply to multiply each gradient vector by the corresponding action’s score. 4. Finally, compute the mean of all the resulting gradient vectors, and use it to perform a Gradient Descent step. Let’s implement this algorithm using TensorFlow. We will train the neural network policy we built earlier so that it learns to balance the pole on the cart. Let’s start by completing the construction phase we coded earlier to add the target probability, the cost function, and the training operation. Since we are acting as though the chosen action is the best possible action, the target probability must be 1.0 if the chosen action is action 0 (left) and 0.0 if it is action 1 (right) ''' # y = 1. - tf.to_float(action) ''' Now that we have a target probability, we can define the cost function (cross entropy) and compute the gradients ''' # learning_rate = 0.01 # cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits( # labels=y, logits=logits) # optimizer = tf.train.AdamOptimizer(learning_rate) # grads_and_vars = optimizer.compute_gradients(cross_entropy) ''' Note that we are calling the optimizer’s compute_gradients() method instead of the minimize() method. This is because we want to tweak the gradients before we apply them. The compute_gradients() method returns a list of gradient vector/variable pairs (one pair per trainable variable). Let’s put all the gradients in a list, to make it more convenient to obtain their values ''' # gradients = [grad for grad, variable in grads_and_vars] ''' Okay, now comes the tricky part. During the execution phase, the algorithm will run the policy and at each step it will evaluate these gradient tensors and store their values. After a number of episodes it will tweak these gradients as explained earlier (i.e., multiply them by the action scores and normalize them) and compute the mean of the tweaked gradients. Next, it will need to feed the resulting gradients back to the optimizer so that it can perform an optimization step. This means we need one placeholder per gradient vector. Moreover, we must create the operation that will apply the updated gradients. For this we will call the optimizer’s apply_gradients() function, which takes a list of gradient vector/variable pairs. Instead of giving it the original gradient vectors, we will give it a list containing the updated gradients (i.e., the ones fed through the gradient placeholders) ''' # gradient_placeholders = [] # grads_and_vars_feed = [] # for grad, variable in grads_and_vars: # gradient_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape()) # gradient_placeholders.append(gradient_placeholder) # grads_and_vars_feed.append((gradient_placeholder, variable)) #training_op = optimizer.apply_gradients(grads_and_vars_feed) # Let’s step back and take a look at the full construction phase import tensorflow as tf from tensorflow.contrib.layers import fully_connected n_inputs = 4 n_hidden = 4 n_outputs = 1 initializer = tf.contrib.layers.variance_scaling_initializer() learning_rate = 0.01 X = tf.placeholder(tf.float32, shape=[None, n_inputs]) hidden = fully_connected(X, n_hidden, activation_fn=tf.nn.elu, weights_initializer=initializer) logits = fully_connected(hidden, n_outputs, activation_fn=None, weights_initializer=initializer) outputs = tf.nn.sigmoid(logits) p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs]) action = tf.multinomial(tf.log(p_left_and_right), num_samples=1) y = 1. - tf.to_float(action) cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits( labels=y, logits=logits) optimizer = tf.train.AdamOptimizer(learning_rate) grads_and_vars = optimizer.compute_gradients(cross_entropy) gradients = [grad for grad, variable in grads_and_vars] gradient_placeholders = [] grads_and_vars_feed = [] for grad, variable in grads_and_vars: gradient_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape()) gradient_placeholders.append(gradient_placeholder) grads_and_vars_feed.append((gradient_placeholder, variable)) training_op = optimizer.apply_gradients(grads_and_vars_feed) init = tf.global_variables_initializer() saver = tf.train.Saver() ''' On to the execution phase! We will need a couple of functions to compute the total discounted rewards, given the raw rewards, and to normalize the results across multiple episodes ''' def discount_rewards(rewards, discount_rate): discounted_rewards = np.empty(len(rewards)) cumulative_rewards = 0 for step in reversed(range(len(rewards))): cumulative_rewards = rewards[step] + cumulative_rewards * discount_rate discounted_rewards[step] = cumulative_rewards return discounted_rewards def discount_and_normalize_rewards(all_rewards, discount_rate): all_discounted_rewards = [discount_rewards(rewards, discount_rate) for rewards in all_rewards] flat_rewards = np.concatenate(all_discounted_rewards) reward_mean = flat_rewards.mean() reward_std = flat_rewards.std() return [(discounted_rewards - reward_mean)/reward_std for discounted_rewards in all_discounted_rewards] # Let’s check that this works print('The discount_rewards: ', discount_rewards([10, 0, -50], discount_rate=0.8)) print('The discount_and_normalize_rewards: \n', discount_and_normalize_rewards([[10, 0, -50], [10, 20]], discount_rate=0.8)) ''' The call to discount_rewards() returns exactly what we expect. You can verify that the function discount_and_normalize_rewards() does indeed return the normalized scores for each action in both episodes. Notice that the first episode was much worse than the second, so its normalized scores are all negative; all actions from the first episode would be considered bad, and conversely all actions from the second episode would be considered good ''' # Train the policy env = gym.make("CartPole-v0") n_iterations = 250 # number of training iterations n_max_steps = 1000 # max steps per episode n_games_per_update = 10 # train the policy every 10 episodes save_iterations = 10 # save the model every 10 training iterations discount_rate = 0.95 with tf.Session() as sess: init.run() for iteration in range(n_iterations): print("\rIteration: {}".format(iteration), end="") all_rewards = [] # all sequences of raw rewards for each episode all_gradients = [] # gradients saved at each step of each episode for game in range(n_games_per_update): current_rewards = [] # all raw rewards from the current episode current_gradients = [] # all gradients from the current episode obs = env.reset() for step in range(n_max_steps): action_val, gradients_val = sess.run( [action, gradients], feed_dict={X: obs.reshape(1, n_inputs)}) # one obs obs, reward, done, info = env.step(action_val[0][0]) current_rewards.append(reward) current_gradients.append(gradients_val) if done: break all_rewards.append(current_rewards) all_gradients.append(current_gradients) # At this point we have run the policy for 10 episodes, and we are # ready for a policy update using the algorithm described earlier. all_rewards = discount_and_normalize_rewards(all_rewards, discount_rate=discount_rate) feed_dict = {} for var_index, grad_placeholder in enumerate(gradient_placeholders): # multiply the gradients by the action scores, and compute the mean mean_gradients = np.mean( [reward * all_gradients[game_index][step][var_index] for game_index, rewards in enumerate(all_rewards) for step, reward in enumerate(rewards)], axis=0) feed_dict[grad_placeholder] = mean_gradients sess.run(training_op, feed_dict=feed_dict) if iteration % save_iterations == 0: saver.save(sess, "./my_policy_net_pg.ckpt") ''' Each training iteration starts by running the policy for 10 episodes (with maximum 1,000 steps per episode, to avoid running forever). At each step, we also compute the gradients, pretending that the chosen action was the best. After these 10 episodes have been run, we compute the action scores using the discount_and_ normalize_rewards() function; we go through each trainable variable, across all episodes and all steps, to multiply each gradient vector by its corresponding action score; and we compute the mean of the resulting gradients. Finally, we run the training operation, feeding it these mean gradients (one per trainable variable). We also save the model every 10 training operations. And we’re done! This code will train the neural network policy, and it will successfully learn to balance the pole on the cart. Note that there are actually two ways the agent can lose the game: either the pole can tilt too much, or the cart can go completely off the screen. With 250 training iterations, the policy learns to balance the pole quite well, but it is not yet good enough at avoiding going off the screen. A few hundred more training iterations will fix that. Despite its relative simplicity, this algorithm is quite powerful. You can use it to tackle much harder problems than balancing a pole on a cart. In fact, AlphaGo was based on a similar PG algorithm '''Learning to Play Ms. Pac-Man Using Deep Q-Learning# Create a Ms. Pac-Man environment import gym env = gym.make("MsPacman-v0") obs = env.reset() print('The observation: ', obs.shape) # [height, width, channels] print('The environment space: ', env.action_space) ''' As you can see, there are nine discrete actions available, which correspond to the nine possible positions of the joystick (left, right, up, down, center, upper left, and so on), and the observations are simply screenshots of the Atari screen, represented as 3D NumPy arrays. These images are a bit large, so we will create a small preprocessing function that will crop the image and shrink it down to 88 × 80 pixels, convert it to grayscale, and improve the contrast of Ms. Pac-Man. This will reduce the amount of computations required by the DQN, and speed up training ''' mspacman_color = np.array([210, 164, 74]).mean() def preprocess_observation(obs): img = obs[1:176:2, ::2] # crop and downsize img = img.mean(axis=2) # to greyscale img[img==mspacman_color] = 0 # improve contrast img = (img - 128) / 128 - 1 # normalize from -1. to 1. return img.reshape(88, 80, 1) ''' Next, let’s create the DQN. It could just take a state-action pair (s,a) as input, and output an estimate of the corresponding Q-Value Q(s,a), but since the actions are discrete it is more convenient to use a neural network that takes only a state s as input and outputs one Q-Value estimate per action. The DQN will be composed of three convolutional layers, followed by two fully connected layers, including the output layer. As we will see, the training algorithm we will use requires two DQNs with the same architecture (but different parameters): one will be used to drive Ms. Pac-Man during training (the actor), and the other will watch the actor and learn from its trials and errors (the critic). At regular intervals we will copy the critic to the actor. Since we need two identical DQNs, we will create a q_network() function to build them ''' from tensorflow.contrib.layers import convolution2d, fully_connected input_height = 88 input_width = 80 input_channels = 1 conv_n_maps = [32, 64, 64] conv_kernel_sizes = [(8,8), (4,4), (3,3)] conv_strides = [4, 2, 1] conv_paddings = ["SAME"]*3 conv_activation = [tf.nn.relu]*3 n_hidden_in = 64 * 11 * 10 # conv3 has 64 maps of 11x10 each n_hidden = 512 hidden_activation = tf.nn.relu n_outputs = env.action_space.n # 9 discrete actions are available initializer = tf.contrib.layers.variance_scaling_initializer() def q_network(X_state, scope): prev_layer = X_state conv_layers = [] with tf.variable_scope(scope) as scope: for n_maps, kernel_size, stride, padding, activation in zip( conv_n_maps, conv_kernel_sizes, conv_strides, conv_paddings, conv_activation): prev_layer = convolution2d( prev_layer, num_outputs=n_maps, kernel_size=kernel_size, stride=stride, padding=padding, activation_fn=activation, weights_initializer=initializer) conv_layers.append(prev_layer) last_conv_layer_flat = tf.reshape(prev_layer, shape=[-1, n_hidden_in]) hidden = fully_connected( last_conv_layer_flat, n_hidden, activation_fn=hidden_activation, weights_initializer=initializer) outputs = fully_connected( hidden, n_outputs, activation_fn=None, weights_initializer=initializer) trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope.name) trainable_vars_by_name = {var.name[len(scope.name):]: var for var in trainable_vars} return outputs, trainable_vars_by_name ''' The first part of this code defines the hyperparameters of the DQN architecture. Then the q_network() function creates the DQN, taking the environment’s state X_state as input, and the name of the variable scope. Note that we will just use one observation to represent the environment’s state since there’s almost no hidden state (except for blinking objects and the ghosts’ directions). The trainable_vars_by_name dictionary gathers all the trainable variables of this DQN. It will be useful in a minute when we create operations to copy the critic DQN to the actor DQN. The keys of the dictionary are the names of the variables, stripping the part of the prefix that just corresponds to the scope’s name. It looks like this ''' print(trainable_vars_by_name) # Create the input placeholder, the two DQNs, and the operation to copy the critic DQN to the actor DQN X_state = tf.placeholder(tf.float32, shape=[None, input_height, input_width, input_channels]) actor_q_values, actor_vars = q_network(X_state, scope="q_networks/actor") critic_q_values, critic_vars = q_network(X_state, scope="q_networks/critic") copy_ops = [actor_var.assign(critic_vars[var_name]) for var_name, actor_var in actor_vars.items()] copy_critic_to_actor = tf.group(*copy_ops) ''' Let’s step back for a second: we now have two DQNs that are both capable of taking an environment state (i.e., a preprocessed observation) as input and outputting an estimated Q-Value for each possible action in that state. Plus we have an operation called copy_critic_to_actor to copy all the trainable variables of the critic DQN to the actor DQN. We use TensorFlow’s tf.group() function to group all the assignment operations into a single convenient operation. The actor DQN can be used to play Ms. Pac-Man (initially very badly). As discussed earlier, you want it to explore the game thoroughly enough, so you generally want to combine it with an ε-greedy policy or another exploration strategy. But what about the critic DQN? How will it learn to play the game? The short answer is that it will try to make its Q-Value predictions match the Q-Values estimated by the actor through its experience of the game. Specifically, we will let the actor play for a while, storing all its experiences in a replay memory. Each memory will be a 5-tuple (state, action, next state, reward, continue), where the “continue” item will be equal to 0.0 when the game is over, or 1.0 otherwise. Next, at regular intervals we will sample a batch of memories from the replay memory, and we will estimate the Q-Values from these memories. Finally, we will train the critic DQN to predict these Q-Values using regular supervised learning techniques. Once every few training iterations, we will copy the critic DQN to the actor DQN. NOTE: The replay memory is optional, but highly recommended. Without it, you would train the critic DQN using consecutive experiences that may be very correlated. This would introduce a lot of bias and slow down the training algorithm’s convergence. By using a replay memory, we ensure that the memories fed to the training algorithm can be fairly uncorrelated. Let’s add the critic DQN’s training operations. First, we need to be able to compute its predicted Q-Values for each state-action in the memory batch. Since the DQN outputs one Q-Value for every possible action, we need to keep only the Q-Value that corresponds to the action that was actually chosen in this memory. For this, we will convert the action to a one-hot vector (recall that this is a vector full of 0s except for a 1 at the i th index), and multiply it by the Q-Values: this will zero out all Q-Values except for the one corresponding to the memorized action. Then just sum over the first axis to obtain only the desired Q-Value prediction for each memory ''' X_action = tf.placeholder(tf.int32, shape=[None]) q_value = tf.reduce_sum(critic_q_values * tf.one_hot(X_action, n_outputs), axis=1, keep_dims=True) ''' Next let’s add the training operations, assuming the target Q-Values will be fed through a placeholder. We also create a nontrainable variable called global_step. The optimizer’s minimize() operation will take care of incrementing it. Plus we create the usual init operation and a Saver. ''' y = tf.placeholder(tf.float32, shape=[None, 1]) cost = tf.reduce_mean(tf.square(y - q_value)) global_step = tf.Variable(0, trainable=False, name='global_step') optimizer = tf.train.AdamOptimizer(learning_rate) training_op = optimizer.minimize(cost, global_step=global_step) init = tf.global_variables_initializer() saver = tf.train.Saver() ''' That’s it for the construction phase. Before we look at the execution phase, we will need a couple of tools. First, let’s start by implementing the replay memory. We will use a deque list since it is very efficient at pushing items to the queue and popping them out from the end of the list when the maximum memory size is reached. We will also write a small function to randomly sample a batch of experiences from the replay memory ''' from collections import deque replay_memory_size = 10000 replay_memory = deque([], maxlen=replay_memory_size) def sample_memories(batch_size): indices = rnd.permutation(len(replay_memory))[:batch_size] cols = [[], [], [], [], []] # state, action, reward, next_state, continue for idx in indices: memory = replay_memory[idx] for col, value in zip(cols, memory): col.append(value) cols = [np.array(col) for col in cols] return (cols[0], cols[1], cols[2].reshape(-1, 1), cols[3], cols[4].reshape(-1, 1)) ''' Next, we will need the actor to explore the game. We will use the ε-greedy policy, and gradually decrease ε from 1.0 to 0.05, in 50,000 training steps ''' eps_min = 0.05 eps_max = 1.0 eps_decay_steps = 50000 def epsilon_greedy(q_values, step): epsilon = max(eps_min, eps_max - (eps_max-eps_min) * step/eps_decay_steps) if rnd.rand() < epsilon: return rnd.randint(n_outputs) # random action else: return np.argmax(q_values) # optimal action ''' That’s it! We have all we need to start training. The execution phase does not contain anything too complex, but it is a bit long, so take a deep breath. Ready? Let’s go! First, let’s initialize a few variables ''' n_steps = 100000 # total number of training steps training_start = 1000 # start training after 1,000 game iterations training_interval = 3 # run a training step every 3 game iterations save_steps = 50 # save the model every 50 training steps copy_steps = 25 # copy the critic to the actor every 25 training steps discount_rate = 0.95 skip_start = 90 # skip the start of every game (it's just waiting time) batch_size = 50 iteration = 0 # game iterations checkpoint_path = "./my_dqn.ckpt" done = True # env needs to be reset # Next, let’s open the session and run the main training loop with tf.Session() as sess: if os.path.isfile(checkpoint_path): saver.restore(sess, checkpoint_path) else: init.run() while True: step = global_step.eval() if step >= n_steps: break iteration += 1 if done: # game over, start again obs = env.reset() for skip in range(skip_start): # skip the start of each game obs, reward, done, info = env.step(0) state = preprocess_observation(obs) # Actor evaluates what to do q_values = actor_q_values.eval(feed_dict={X_state: [state]}) action = epsilon_greedy(q_values, step) # Actor plays obs, reward, done, info = env.step(action) next_state = preprocess_observation(obs) # Let's memorize what just happened replay_memory.append((state, action, reward, next_state, 1.0 - done)) state = next_state if iteration < training_start or iteration % training_interval != 0: continue # Critic learns X_state_val, X_action_val, rewards, X_next_state_val, continues = ( sample_memories(batch_size)) next_q_values = actor_q_values.eval( feed_dict={X_state: X_next_state_val}) max_next_q_values = np.max(next_q_values, axis=1, keepdims=True) y_val = rewards + continues * discount_rate * max_next_q_values training_op.run(feed_dict={X_state: X_state_val, X_action: X_action_val, y: y_val}) # Regularly copy critic to actor if step % copy_steps == 0: copy_critic_to_actor.run() # And save regularly if step % save_steps == 0: saver.save(sess, checkpoint_path) ''' We start by restoring the models if a checkpoint file exists, or else we just initialize the variables normally. Then the main loop starts, where iteration counts the total number of game steps we have gone through since the program started, and step counts the total number of training steps since training started (if a checkpoint is restored, the global step is restored as well). Then the code resets the game (and skips the first boring game steps, where nothing happens). Next, the actor evaluates what to do, and plays the game, and its experience is memorized in replay memory. Then, at regular intervals (after a warmup period), the critic goes through a training step. It samples a batch of memories and asks the actor to estimate the Q-Values of all actions for the next state, and it applies Equation 16-7 to compute the target Q-Value y_val. The only tricky part here is that we must multiply the next state’s Q-Values by the continues vector to zero out the Q-Values corresponding to memories where the game was over. Next we run a training operation to improve the critic’s ability to predict Q-Values. Finally, at regular intervals we copy the critic to the actor, and we save the model. TIP: Unfortunately, training is very slow: if you use your laptop for training, it will take days before Ms. Pac-Man gets any good, and if you look at the learning curve, measuring the average rewards per episode, you will notice that it is extremely noisy. At some points there may be no apparent progress for a very long time until suddenly the agent learns to survive a reasonable amount of time. As mentioned earlier, one solution is to inject as much prior knowledge as possible into the model (e.g., through preprocessing, rewards, and so on), and you can also try to bootstrap the model by first training it to imitate a basic strategy. In any case, RL still requires quite a lot of patience and tweaking, but the end result is very exciting. '''Creating data set for splitting that into Train, Validation and TestX = pd.DataFrame(np.random.randint(100, size = (100, 2)), columns = ['x', 'y']) X.index = [pd.Timestamp('20200312010000') + pd.Timedelta(minutes = i * 2) for i in range(X.shape[0])] X.shapeCreating labels for the data sety = pd.Series(np.random.randint(2, size = X.shape[0]), index = X.index) ySplitting data setHere the data set will be splitted into 3 sections popularly known as train, validation and test. Test is optional though.The idea is that according to temporal order, train data comes first, after that there may be a gap which will be defined by the parameter 'dist' and then comes validation data and again there is a same amount of gap and then comes test data. Initialization parameters for SplitDataset classdata: pandas dataframe or series or numpy array of dimension samples X other dimensions with rank 2 or higherlabel: pandas dataframe or series or numpy array with dimension samples X classes with rank 2index : numpy array, we can explicitly insert indices of the data set and labelstest_ratio : float, ratio of test datanp_seed : int, numpy seed used for random shufflesame_ratio : bool, should be only true if the labels are classification labels and we want to keep the test and validation ratio same for all classes (it doesnt have any use in sequence split)make_onehot : bool, if True, the labels will be converted into one hot encoded format# defining object as splitter splitter = msd.SplitDataset(X, y, test_ratio = .1)Parameters for sequence_split functionseq_len : int, length of each sequenceval_ratio : float, validation data set ratio, deafult is .15data_stride : int, indicates the number of shift between two adjacent example data to prepare the data set, default is 1label_shift : temporal difference between the label and the corresponding data's last time step, default is 1split_method : {'train_val', 'multiple_train_val'}, default is 'multiple_train_val'sec : int or pandas dataframe with columns naming 'start' and 'stop', number of sections for multiple_train_val method splitting (basically train_val splitting is multiple_train_val method with sec = 1), deafult is 1dist : int, number of data to be leftover between two adjacent sec, default is 0# applying sequence split data = splitter.sequence_split(seq_len = 10, val_ratio = .2, data_stride = 2, label_shift = 3, split_method = 'train_val', sec = 1, dist = 1)Checking output of split functiondata.keys() data['train'].keys() data['validation']['index'] # Accumulating data and label in order to understand the splitting easily X['label'] = y X.reset_index().values for see in sees: print('\n############## %s #############\n'%see) for i in range(data[see]['data'].shape[0]): print(data[see]['data'][i], data[see]['label'][i], data[see]['index'][i])############## train ############# [[12 97] [28 88] [99 36] [28 52] [58 63] [56 85] [ 5 92] [87 18] [70 46] [30 8]] [1] 2020-03-12 01:20:00 [[99 36] [28 52] [58 63] [56 85] [ 5 92] [87 18] [70 46] [30 8] [36 85] [54 64]] [0] 2020-03-12 01:24:00 [[58 63] [56 85] [ 5 92] [87 18] [70 46] [30 8] [36 85] [54 64] [26 81] [83 7]] [0] 2020-03-12 01:28:00 [[ 5 92] [87 18] [70 46] [30 8] [36 85] [54 64] [26 81] [83 7] [ 5 13] [53 65]] [0] 2020-03-12 01:32:00 [[70 46] [30 8] [36 85] [54 64] [26 81] [83 7] [ 5 13] [53 65] [72 50] [ 3 71]] [0] 2020-03-12 01:36:00 [[36 85] [54 64] [26 81] [83 7] [ 5 13] [53 65] [72 50] [ 3 71] [70 19] [12 67]] [1] 2020-03-12 01:40:00 [[26 81] [83 7] [ 5 13] [53 65] [72 50] [ 3 71] [70 19] [12 67] [90 75] [46 51]] [1] 2020-03-12 01:44:00 [[ 5 13] [53 65] [72 50] [ 3 71] [70 19] [12 67] [90 75] [46 51] [40 98] [39 6]] [1] 2020-03-12 01:48:00 [[72 50] [ 3 71] [70 19] [12 67] [90 75[...]Analyze the Effects of a Hard Wall in the z-Directionimport numpy as np import matplotlib.pyplot as plt from graphenetools import gt import re,glob,os from scipy.signal import argrelextrema from scipy.optimize import brentq from graphenetools import gt import multiprocessing import sys,importlib from dgutils import colors as colortools from collections import defaultdict import pickle from numpy import pi as π # Notebook display options %matplotlib inline %config InlineBackend.figure_format = 'svg' # plot style plot_style = {'notebook':'../include/notebook.mplstyle','aps':'../include/aps.mplstyle'} plt.style.reload_library() plt.style.use(plot_style['aps']) figsize = plt.rcParams['figure.figsize'] plt.rcParams['text.latex.preamble'] = f'\input{{{os.getcwd()}/../include/texheader}}' colors = plt.rcParams['axes.prop_cycle'].by_key()['color']Do you want to process the full data set?Default is False. The full data set can be found here: [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.4553524.svg)](https://doi.org/10.5281/zenodo.4553524)A minimal set of reduced (averaged and processed) data files is included with the repository `../data/QMC.tar.bz2`. We can extract if it hasn't already happened.reduce_data = False if not os.path.isdir('../data/QMC/'): ! tar xjf ../data/QMC.tar.bz2Some helper and analysis functions Note on using pimchelp`pimchelp` is a python library that can be installed via `pip`, instructors are on the [github repository](https://github.com/DelMaestroGroup/pimcscripts).import layerutils from layerutils import lab,vals,texformat,get_base_dir from pimcscripts import pimchelp # get the graphene lattice information Am, An, b1, b2, gm, gn = gt.get_graphene_vectors(0.0)Setting up and Extracting Information from the QMC Datanum_sites = [24,48,96,192] filling = [0,1/3,1] sim_params = {'T':0.0,'canonical':True,'τ':0.00313, 'β':1.002} Lz = np.arange(4.5,5.51,0.05) pimcid = defaultdict(dict) par_map = defaultdict(dict) base_dir = defaultdict(dict) L,n,N,τ = defaultdict(dict),defaultdict(dict),defaultdict(dict),defaultdict(dict) N_ads = defaultdict(dict) simulations,pimcids = {},{} pigs_pimcids,pimc_pimcids = defaultdict(list),defaultdict(list) for cnum in num_sites: Nkey = lab(N=cnum) cbase_dir = get_base_dir(cnum,T=sim_params['T']) log_names = pimchelp.get_file_list_from_params(**sim_params,base_dir=cbase_dir) # We go through each file and automatically populate the simulation map for log in log_names: par_ = pimchelp.get_parameter_map(cbase_dir + log) cN = par_['Initial Number Particles'] if cN == 1: cf = 0 else: cf = cN/cnum sim = lab(T=sim_params['T'],n=cf,Lz=par_['Container Length'],N=cnum) base_dir[Nkey][sim] = cbase_dir # sort the pimcids into two possible groups pimcid[Nkey][sim] = par_['PIMCID'] if sim_params['T'] > 0: pimc_pimcids[Nkey].append(par_['PIMCID']) else: pigs_pimcids[Nkey].append(par_['PIMCID']) par_map[Nkey][sim] = par_ # We add some short-hand variables for ease of referencing L[Nkey][sim] = par_map[Nkey][sim]['Container Dimensions'] n[Nkey][sim] = par_map[Nkey][sim]['Initial Density'] N[Nkey][sim] = par_map[Nkey][sim]['Initial Number Particles'] τ[Nkey][sim] = par_map[Nkey][sim]['Specified Imaginary Time Step'] N_ads[Nkey][sim] = [int(round(0.5*L[Nkey][sim][0]/An[0])),int(round(L[Nkey][sim][1]/An[1]))] simulations[Nkey] = list(pimcid[Nkey].keys()) pimcids[Nkey] = list(pimcid[Nkey].values())Reduce Estimator Files for the Linear Density We process as many jobs as possible in the same directoryif reduce_data: for cnum in num_sites[:]: print(f'=== N = {cnum} ===\n') reduce_command = f"parallel python {script_dir}/reduce-one.py -r T -i {{}} -s 0.25 --canonical {get_base_dir(cnum,T=sim_params['T'])} ::: {' '.join(pimcids[lab(N=cnum)])}" stream = os.popen(reduce_command) output = stream.read() print(output) estimator = defaultdict(dict) linear_density = defaultdict(dict) z_lin,ρ_lin,Δρ_lin = defaultdict(dict),defaultdict(dict),defaultdict(dict) for cnum in num_sites: cNkey = lab(N=cnum) for sim in simulations[cNkey]: num_particles = N[cNkey][sim] reduce_params = {'canonical':True,'reduce':'T', 'pimcid':pimcid[cNkey][sim],'base_dir':base_dir[cNkey][sim]} estimator[cNkey][sim] = pimchelp.PIMCResults(pimchelp.get_reduce_name(**reduce_params,estimator='estimator')) linear_density[cNkey][sim] = pimchelp.PIMCResults(pimchelp.get_reduce_name(**reduce_params, estimator='lineardensity')) reduce_par = linear_density[cNkey][sim].params[0] z_lin[cNkey][sim] = linear_density[cNkey][sim].x(reduce_par)+L[cNkey][sim][2]/2 ρ_lin[cNkey][sim] = linear_density[cNkey][sim].y(reduce_par)*L[cNkey][sim][0]*L[cNkey][sim][1]/num_particles Δρ_lin[cNkey][sim] = linear_density[cNkey][sim].Δy(reduce_par)*L[cNkey][sim][0]*L[cNkey][sim][1]/num_particlesSave/Load this data to diskimport pickle save_data = False if save_data: with open('../data/rholinear_vs_z.pickle','wb') as handle: pickle.dump([z_lin,ρ_lin,Δρ_lin], handle, protocol=pickle.HIGHEST_PROTOCOL) else: with open('../data/rholinear_vs_z.pickle','rb') as handle: z_lin,ρ_lin,Δρ_lin = pickle.load(handle)Check for missing valuesWe only have partial coverage of parameters for $96$ and $192$ adsorption sites.for j,cnum in enumerate(num_sites): cNkey = lab(N=cnum) for i,cf in enumerate(filling): for k,cLz in enumerate(Lz): sim = lab(T=sim_params['T'],n=cf,Lz=cLz,N=cnum) if cnum in num_sites: try: if True in np.isnan(ρ_lin[cNkey][sim]): #print(f'*{pimcid[cNkey][sim]}*', end=' ') print('NaN: ',sim) except: print('Missing: ', sim) from mpl_toolkits.axes_grid1.inset_locator import inset_axes import matplotlib.ticker as ticker import matplotlib as mpl from fractions import Fraction from scipy import interpolate colors = colortools.get_linear_colors('Spectral',len(Lz),reverse=True) ncols = 1 #len(simulations) cnum_sites = [48] cfilling = [1,1/3] _figsize=(1.125*len(cnum_sites)*figsize[0],len(cfilling)*figsize[1]) fig,ax = plt.subplots(nrows=len(cfilling),ncols=len(cnum_sites),sharex=True, sharey=True, figsize=_figsize,squeeze=False,constrained_layout=False) fig.subplots_adjust(wspace=0.05,hspace=0.05) for j,cnum in enumerate(cnum_sites): cNkey = lab(N=cnum) for i,cf in enumerate(cfilling): ax[i,j].set_ylim(0,2.59) if cf == 0: df = Fraction(1/cnum).limit_denominator() else: df = Fraction(cf).limit_denominator() for k,cLz in enumerate(Lz): sim = lab(T=sim_params['T'],n=cf,Lz=cLz,N=cnum) if sim in ρ_lin[cNkey]: x,y = z_lin[cNkey][sim],ρ_lin[cNkey][sim] f = interpolate.interp1d(x,y,kind='cubic') _x = np.linspace(x[0],x[-1],10000) #ax[i,j].plot(_x,f(_x), color=colors[k],lw=1) ax[i,j].fill_between(x,y-Δρ_lin[cNkey][sim],y+Δρ_lin[cNkey][sim], color=colors[k], alpha=0.25, zorder=-100) if np.abs(cLz-5.05) < 0.01 and cf==1: lw = 4 else: lw = 1 ax[i,j].plot(z_lin[cNkey][sim],ρ_lin[cNkey][sim], color=colors[k],lw=lw) else: print(sim) ax[i,j].annotate(f'$N_\graphene = {cnum},\ f = {df}$', xy=(0.98,0.95),xytext=(0.98, 0.95), xycoords='axes fraction', ha='right', va='top') # Now we plot the bulk data sim = lab(T=sim_params['T'],n=1/3,Lz=10.0,N=cnum) if sim in ρ_lin[cNkey]: ax[0,j].plot(z_lin[cNkey][sim],ρ_lin[cNkey][sim], color='k', ls='--', lw=1, zorder=-10, label=r'$f=1/3$' + '\n' + r'$L_z = 10\, \mathrm{\AA}$') ax[1,j].plot(z_lin[cNkey][sim],ρ_lin[cNkey][sim], color='k', ls='--', lw=1, zorder=10, label=r'$f=1/3$' + '\n' + r'$L_z = 10\, \mathrm{\AA}$') loc = 'upper left' axins1 = inset_axes(ax[0,0], width="100%", # width = 50% of parent_bbox width height="5%", # height : 5% loc=loc, bbox_to_anchor=(0.0, 1.15, 1.0, 1.0), bbox_transform=ax[1,-1].transAxes, borderpad=0) sm = plt.cm.ScalarMappable(cmap=mpl.cm.Spectral_r, norm=plt.Normalize(vmin=Lz[0], vmax=Lz[-1])) sm._A = [] cb = fig.colorbar(sm, cax=axins1, orientation='horizontal') cb.set_label(r"$\alabel{L_z}{\angstrom}$",labelpad=5) cb.ax.xaxis.set_ticks_position('top') cb.ax.xaxis.set_label_position('top') cb.ax.xaxis.set_tick_params(pad=1) ax[0,0].annotate('(a)', xy=(0.01,0.98),ha='left', va='top', xycoords='axes fraction') ax[1,0].annotate('(b)', xy=(0.01,0.98),ha='left', va='top', xycoords='axes fraction') #ax[-1,0].axis('off') ax[0,0].set_xlim(1.5,4.1) #for j in range(1,len(num_sites)): # ax[0,j].set_yticklabels([]) #for j in range(2,len(num_sites)): # ax[1,j].set_yticklabels([]) for j in range(len(cfilling)): ax[j,0].set_ylabel(r'$\alabel{\rho(z)/N}{\angstrom^{-1}}$') ax[0,0].legend(loc=(0.5,0.45),handlelength=1.5) ax[1,0].legend(loc=(0.5,0.45),handlelength=1.5) for j in range(len(cnum_sites)): ax[-1,j].set_xlabel(r'$\alabel{z}{\angstrom}$') plt.savefig('../plots/rho_z_Lz.pdf') plt.savefig('../plots/rho_z_Lz.svg')Investigate higher derivatives of unit filling dataThis is purely exploratory and did not end up in the final manuscript.# from mpl_toolkits.axes_grid1.inset_locator import inset_axes # import matplotlib.ticker as ticker # import matplotlib as mpl # from fractions import Fraction # Lz = np.arange(4.5,5.51,0.05) # colors = colortools.get_linear_colors('Spectral',len(Lz),reverse=True) # figsize = plt.rcParams['figure.figsize'] # fig,ax = plt.subplots(nrows=1,ncols=len(simulations),sharex=True, sharey=True, # figsize=(len(num_sites)/2*figsize[0],len(filling)*figsize[1]),squeeze=False) # fig.subplots_adjust(wspace=0.05,hspace=0.05) # for j,cnum in enumerate(num_sites[::1]): # cNkey = Nkey(cnum) # cf = 1.0 # df = Fraction(cf).limit_denominator() # for k,cLz in enumerate(Lz): # sim = lab(n=cf,Lz=cLz,N=cnum) # if sim in ρ_lin[cNkey]: # ax[0,j].plot(z_lin[cNkey][sim],np.gradient(np.gradient(ρ_lin[cNkey][sim])), color=colors[k]) # ax[0,j].annotate(f'$N = {cnum},\ f = {df}$', xy=(0.95,0.95),xytext=(0.95, 0.95), # xycoords='axes fraction', ha='right', va='top', fontsize=12) # #ax[0,0].set_xlim(1.5,4.5) # ax[0,0].set_ylabel(r'$\rho/N\; [\mathrm{\AA}^{-1}]$') # ax[-1,0].set_ylabel(r'$\rho/N\; [\mathrm{\AA}^{-1}]$') # #for j in range(len(num_sites)): # # ax[1,j].set_xlabel(r'$z\; [\mathrm{\AA}]$') # #plt.savefig('./plot|s/rho_z_first_layer.pdf',dpi=300) # def crossings_nonzero_all(data): # pos = data > 0 # npos = ~pos # return ((pos[:-1] & npos[1:]) | (npos[:-1] & pos[1:])).nonzero()[0] # for j,cnum in enumerate(num_sites[::1]): # cNkey = Nkey(cnum) # for k,cLz in enumerate(Lz): # sim = lab(n=1,Lz=cLz,N=cnum) # if sim in ρ_lin[cNkey]: # zero_crossings = crossings_nonzero_all(np.gradient(np.gradient(ρ_lin[cNkey][sim]))) # print(sim,z_lin[cNkey][sim][zero_crossings])Investigate $\chi^2$ to determine which box-height is the most natural choiceWe compare with a *bulk* box with $L_z = 10$ Å.from scipy import integrate,interpolate χ2 = defaultdict(list) Lz_min = {} idx_min = {} for j,cnum in enumerate(num_sites[:-2]): cNkey = lab(N=cnum) for k,cLz in enumerate(Lz): sim_neq1 = lab(T=sim_params['T'],n=1,Lz=cLz,N=cnum) sim_neq1o3 = lab(T=sim_params['T'],n=1/3,Lz=10.0,N=cnum) if ((sim_neq1 in ρ_lin[cNkey]) and (sim_neq1o3 in ρ_lin[cNkey])): # normalize both values ρ_neq1 = ρ_lin[cNkey][sim_neq1]/integrate.simps(ρ_lin[cNkey][sim_neq1],z_lin[cNkey][sim_neq1]) ρ_neq1o3 = ρ_lin[cNkey][sim_neq1o3]/integrate.simps(ρ_lin[cNkey][sim_neq1o3],z_lin[cNkey][sim_neq1o3]) f = interpolate.interp1d(z_lin[cNkey][sim_neq1o3], ρ_neq1o3) norm = (z_lin[cNkey][sim_neq1][1]-z_lin[cNkey][sim_neq1][0])**2 χ2[cNkey].append(np.sum((ρ_neq1-f(z_lin[cNkey][sim_neq1]))**2)) else: χ2[cNkey].append(0) χ2[cNkey] = np.array(χ2[cNkey]) Lz_min[cNkey] = Lz[np.nanargmin(χ2[cNkey])] idx_min[cNkey] = np.where(np.abs(Lz-Lz_min[cNkey])<0.01)[0][0] print(idx_min[cNkey],Lz_min[cNkey])11 5.049999999999998 11 5.049999999999998An alternate approach where we shift the minima so that the peak location coincidesWe did not use this alternative approach.alternate = False if alternate: from scipy import integrate,interpolate χ2 = defaultdict(list) Lz_min = {} idx_min = {} with plt.style.context('notebook'): for j,cnum in enumerate(num_sites[:-1]): cNkey = lab(N=cnum) for k,cLz in enumerate(Lz): sim_neq1 = lab(T=sim_params['T'],n=1,Lz=cLz,N=cnum) sim_neq1o3 = lab(T=sim_params['T'],n=1/3,Lz=10.0,N=cnum) if ((sim_neq1 in ρ_lin[cNkey]) and (sim_neq1o3 in ρ_lin[cNkey])): # normalize both values ρ_neq1 = ρ_lin[cNkey][sim_neq1]/integrate.simps(ρ_lin[cNkey][sim_neq1],z_lin[cNkey][sim_neq1]) ρ_neq1o3 = ρ_lin[cNkey][sim_neq1o3]/integrate.simps(ρ_lin[cNkey][sim_neq1o3],z_lin[cNkey][sim_neq1o3]) zshift_neq1o3 = z_lin[cNkey][sim_neq1o3][np.argmax(ρ_lin[cNkey][sim_neq1o3])] zshift_neq1 = z_lin[cNkey][sim_neq1][np.argmax(ρ_lin[cNkey][sim_neq1])] zshift = zshift_neq1 - zshift_neq1o3 #print(zshift_neq1o3,zshift_neq1,zshift) f = interpolate.interp1d(z_lin[cNkey][sim_neq1o3]-zshift_neq1o3, ρ_neq1o3) f1 = interpolate.interp1d(z_lin[cNkey][sim_neq1]-zshift_neq1, ρ_neq1) min_z = np.max([z_lin[cNkey][sim_neq1o3][0]-zshift_neq1o3, z_lin[cNkey][sim_neq1][0]-zshift_neq1]) max_z = np.min([z_lin[cNkey][sim_neq1o3][-1]-zshift_neq1o3, z_lin[cNkey][sim_neq1][-1]-zshift_neq1]) z_interp = np.linspace(min_z,max_z,1000) χ2[cNkey].append(np.sum((f1(z_interp)-f(z_interp))**2)) else: χ2[cNkey].append(0) χ2[cNkey] = np.array(χ2[cNkey]) Lz_min[cNkey] = Lz[np.nanargmin(χ2[cNkey])] idx_min[cNkey] = np.where(np.abs(Lz-Lz_min[cNkey])<0.01)[0][0] print(idx_min[cNkey],Lz_min[cNkey]) fig,ax = plt.subplots(figsize=(1.1*figsize[0],1.05*figsize[1])) for i,cnum in enumerate(num_sites[:-2]): cNkey = lab(N=cnum) ax.plot(Lz,χ2[cNkey], marker='o', mfc='None', label=f'$N_{{\graphene}} = {cnum}$', lw=0.75,ms=5,mew=0.75, ls='--', color=colortools.get_alpha_hex(colors[i],0.5), mec=colors[i]) #ax.axvline(x=Lz_min[lab(N=24)],color='gray', ls=':', zorder=-10, alpha=0.5,lw=0.5) axins = inset_axes(ax, width="80%", height="80%", bbox_to_anchor=(0.35, .24, .45, .78), bbox_transform=ax.transAxes) cNkey = lab(N=48) sim_neq1 = lab(T=sim_params['T'],n=1,Lz=5.05,N=48) sim_neq1o3 = lab(T=sim_params['T'],n=1/3,Lz=10.0,N=48) label_neq1o3 = r'$f=1/3$' + '\n' + r'$L_z = 10\, \mathrm{\AA}$' label_neq1 = r'$f=1$' + '\n' + r'$L_z = 5.05\, \mathrm{\AA}$' axins.plot(z_lin[cNkey][sim_neq1o3],ρ_lin[cNkey][sim_neq1o3], lw=1, color='k', ls='--', label=label_neq1o3) axins.plot(z_lin[cNkey][sim_neq1],ρ_lin[cNkey][sim_neq1], lw=2, color=colors[11], label=label_neq1) axins.set_xlim(1.5,4.25) axins.set_ylim(0.0,1.49) axins.set_ylabel(r'$\alabel{\rho(z)/N}{\angstrom^{-1}}$') axins.set_xlabel(r'$\alabel{z}{\angstrom}$', labelpad=-5) axins.legend(loc=(0.6,0.325), handlelength=1, facecolor='white', framealpha=1, frameon=True, edgecolor='None', fontsize=7) ax.annotate("", xy=(5.05, 1.5), xycoords='data',zorder=-100, xytext=(4.9, 9.6), textcoords='data', arrowprops=dict(arrowstyle="-", connectionstyle="arc3", color='gray', ls=':', alpha=0.5,lw=0.5), ) ax.annotate("", xy=(5.05, 1.5), xycoords='data',zorder=-100, xytext=(5.31, 9.6), textcoords='data', arrowprops=dict(arrowstyle="-", connectionstyle="arc3", color='gray', ls=':', alpha=0.5,lw=0.5), ) ax.set_xlabel(r'$\alabel{L_z}{\angstrom}$') ax.set_ylabel(r'$\alabel{\chi^2}{\angstrom^{-2}}$') ax.legend(loc='lower left') plt.savefig('../plots/chi2_vs_Lz.pdf') plt.savefig('../plots/chi2_vs_Lz.svg')Determine the values of Lz for which we have only 1 layerfrom scipy.signal import argrelextrema Lz_max = defaultdict(list) # Figure out when we develop the for j,cnum in enumerate(num_sites[:-2]): cNkey = lab(N=cnum) for k,cLz in enumerate(Lz): sim_neq1 = lab(T=sim_params['T'],n=1,Lz=cLz,N=cnum) if (sim_neq1 in ρ_lin[cNkey]): idxx_min = argrelextrema(ρ_lin[cNkey][sim_neq1], np.less)[0] if idxx_min.size > 0 and z_lin[cNkey][sim_neq1][idxx_min[0]] > 2: Lz_max[cNkey].append(cLz) Lz_max[cNkey] = np.sort(np.array(Lz_max[cNkey]))Consider $V^\prime$ for $L_z = 10$ ÅVp_bulk = {} ΔVp_bulk = {} for cnum in num_sites: cNkey = lab(N=cnum) sim_neq1o3 = lab(T=sim_params['T'],n=1/3,Lz=10,N=cnum) if sim_neq1o3 in estimator[cNkey]: Vp_bulk[cNkey] = estimator[cNkey][sim_neq1o3].data['V_int']/(cnum*3/3) ΔVp_bulk[cNkey] = estimator[cNkey][sim_neq1o3].data['ΔV_int']/(cnum*3/3) print(f"V' = {Vp_bulk[cNkey]:.5f} ± {ΔVp_bulk[cNkey]:.5f}")V' = -2.58497 ± 0.00354 V' = -2.71606 ± 0.00161 V' = -2.73716 ± 0.00430 V' = -2.74716 ± 0.00517Investigate the Effects of Finite Size Scalingfrom scipy.optimize import curve_fit def lin(x,Vp0,m): return Vp0 + m*x**1 fig,ax = plt.subplots(figsize=(1.1*figsize[0],1.05*figsize[1])) invN,Vp_fit,ΔVp_fit = [],[],[] for cnum in num_sites[::-1]: cNkey = lab(N=cnum) sim_neq1o3 = lab(T=sim_params['T'],n=1/3,Lz=10,N=cnum) if cnum==192: label='QMC' else: label = '' if sim_neq1o3 in estimator[cNkey]: ax.errorbar(1.0/cnum,Vp_bulk[cNkey],yerr=ΔVp_bulk[cNkey], marker='o', mfc='None', lw=0, mec=colors[0],ms=5,mew=0.75, ecolor=colors[0],elinewidth=0.5, label=label, ls='None') invN.append(1.0/cnum) Vp_fit.append(Vp_bulk[cNkey]) ΔVp_fit.append(ΔVp_bulk[cNkey]) popt,pcov = curve_fit(lin,invN[:-1],Vp_fit[:-1],sigma=ΔVp_fit[:-1],p0=[-2.9,6]) invN_fit = np.linspace(0,1/40,1000) perr = [int(np.floor(x / (10**np.floor(np.log10(x))))) for x in np.sqrt(np.diag(pcov))] pacc = [int(-np.floor(np.log10(x))) for x in np.sqrt(np.diag(pcov))] label = f'fit: ${popt[0]:.{pacc[0]}f}({perr[0]}) + {popt[1]:.{pacc[1]}f}({perr[1]})/N_{{\graphene}}$' ax.plot(invN_fit, lin(invN_fit,*popt), lw=0.5, zorder=-1, color=colors[0], label=label) ax.annotate(r'$L_z= \SI{10}{{\angstrom}}$', xy=(0.98,0.02), ha='right', va='bottom', xycoords='axes fraction') print(popt) print(np.sqrt(np.diag(pcov))) handles, labels = ax.get_legend_handles_labels() ax.legend(handles[::-1], labels[::-1],loc='upper left', handlelength=1) ax.set_xlim(0,1/23) ax.set_xticks([1/192,1/96,1/48,1/24]) ax.set_xticklabels(['1/192','1/96','1/48','1/24']) ax.set_xlabel(r'$1/N_{\graphene}$') ax.set_ylabel(r'$\alabel{V^\prime_{\rm QMC}}{\kelvin}$') plt.savefig('../plots/Vprime_QMC_fss.pdf') plt.savefig('../plots/Vprime_QMC_fss.svg')[-2.75780731 2.00342378] [0.00036638 0.01916735]Extract $V$ and $V^\prime$ from the interactionsV_BH = defaultdict(list) ΔV_BH = defaultdict(list) K,ΔK = defaultdict(list),defaultdict(list) E,ΔE = defaultdict(list),defaultdict(list) V_ext,ΔV_ext = defaultdict(list),defaultdict(list) V_int,ΔV_int = defaultdict(list),defaultdict(list) Vp_BH,ΔVp_BH = defaultdict(list),defaultdict(list) for cnum in num_sites: cNkey = lab(N=cnum) for i,cLz in enumerate(Lz): sim_neq1 = lab(T=sim_params['T'],n=1,Lz=cLz,N=cnum) sim_neq1o3 = lab(T=sim_params['T'],n=1/3,Lz=cLz,N=cnum) sim_neq0 = lab(T=sim_params['T'],n=0,Lz=cLz,N=cnum) # Compute NN interaction V if sim_neq1 in estimator[cNkey]: V_BH[cNkey].append(estimator[cNkey][sim_neq1].data['V_int']/(cnum*3)) Δ2 = (estimator[cNkey][sim_neq1].data['ΔV_int']/(cnum*3))**2 else: print(sim_neq1) V_BH[cNkey].append(np.nan) Δ2 = np.nan if sim_neq1o3 in estimator[cNkey]: V_BH[cNkey][-1] -= estimator[cNkey][sim_neq1o3].data['V_int']/(cnum) Δ2 += (estimator[cNkey][sim_neq1o3].data['ΔV_int']/cnum)**2 # For computing NNN interaction V' if sim_neq1o3 in estimator[cNkey]: Vp_BH[cNkey].append(estimator[cNkey][sim_neq1o3].data['V_int']/(cnum*3)) ΔVp_BH[cNkey].append(estimator[cNkey][sim_neq1o3].data['ΔV_int']/(cnum*3)) else: print(sim_neq1o3) Vp_BH[cNkey].append(np.nan) ΔVp_BH[cNkey].append(np.nan) ΔV_BH[cNkey].append(np.sqrt(Δ2)) # now compute quantities for all fillings for fill in filling: sim = lab(T=sim_params['T'],n=fill,Lz=cLz,N=cnum) cNfkey = lab(N=cnum,n=fill) if sim in estimator[cNkey]: E[cNfkey].append(estimator[cNkey][sim].data['E']) ΔE[cNfkey].append(estimator[cNkey][sim].data['ΔE']) K[cNfkey].append(estimator[cNkey][sim].data['K']) ΔK[cNfkey].append(estimator[cNkey][sim].data['ΔK']) V_ext[cNfkey].append(estimator[cNkey][sim].data['V_ext']) ΔV_ext[cNfkey].append(estimator[cNkey][sim].data['ΔV_ext']) V_int[cNfkey].append(estimator[cNkey][sim].data['V_int']) ΔV_int[cNfkey].append(estimator[cNkey][sim].data['ΔV_int']) V_BH[cNkey] = np.array(V_BH[cNkey]) ΔV_BH[cNkey] = np.array(ΔV_BH[cNkey]) Vp_BH[cNkey] = np.array(Vp_BH[cNkey]) ΔVp_BH[cNkey] = np.array(ΔVp_BH[cNkey]) for fill in filling: cNfkey = lab(N=cnum,n=fill) K[cNfkey] = np.array(K[cNfkey]) ΔK[cNfkey] = np.array(ΔK[cNfkey]) E[cNfkey] = np.array(E[cNfkey]) ΔE[cNfkey] = np.array(ΔE[cNfkey]) V_ext[cNfkey] = np.array(V_ext[cNfkey]) ΔV_ext[cNfkey] = np.array(ΔV_ext[cNfkey]) V_int[cNfkey] = np.array(V_ext[cNfkey]) ΔV_int[cNfkey] = np.array(ΔV_ext[cNfkey]) V_BH[lab(N=48)] fig,ax = plt.subplots(figsize=(1.1*figsize[0],1.05*figsize[1])) z_1layer = np.zeros_like(Lz, dtype=bool) for i,cnum in enumerate(num_sites[:-2]): cNkey = lab(N=cnum) z_1layer[np.where(Lz < np.min(Lz_max[lab(N=24)]))[0]] = True ax.errorbar(Lz[z_1layer],V_BH[cNkey][z_1layer],ΔV_BH[cNkey][z_1layer], marker='o', mfc='None', label=f'$N_\graphene = {cnum}$', lw=0.5, mec=colors[i], color=colortools.get_alpha_hex(colors[i],0.5),ms=5,mew=0.75,ls='--', elinewidth=0.5,ecolor=colors[i]) ax.errorbar(Lz[~z_1layer],V_BH[cNkey][~z_1layer], ΔV_BH[cNkey][~z_1layer], marker='o', mfc='None', lw=0.5, mec=colors[i],alpha=0.5, color=colors[i],ms=5,mew=0.75,ls='--') cNkey = lab(N=48) VBg = V_BH[cNkey][idx_min[cNkey]] # we estimate uncertainty by considering the minimal finite size effects ΔVBg = np.abs(V_BH[lab(N=48)][11]-V_BH[lab(N=24)][11]) perr = int(np.floor(ΔVBg / (10**np.floor(np.log10(ΔVBg))))) pacc = int(-np.floor(np.log10(ΔVBg))) ax.axvline(x=Lz_min[cNkey],color='gray', ls=':', zorder=-10, alpha=0.5,lw=0.5) ax.axhline(y=VBg,color='gray', ls=':', zorder=-10, alpha=0.5,lw=0.5) ax.annotate(f'$V_{{\\rm QMC}} = {VBg:.{pacc}f}({perr})\ \mathrm{{K}}$', xy=(Lz_min[cNkey]-0.02,VBg-1), xycoords='data', va='top', ha='right') ax.set_xlabel(r'$\alabel{L_z}{\angstrom}$') ax.set_ylabel(r'$\alabel{V_{\rm QMC}}{\kelvin}$') ax.legend() #plt.xlim(4.8,5.2) plt.savefig('../plots/VBH_vs_Lz.pdf') plt.savefig('../plots/VBH_vs_Lz.svg') print(V_BH[lab(N=24)][11]) print(V_BH[lab(N=48)][11]) print(V_BH[lab(N=48)][0]) print(V_BH[lab(N=48)][14]) print(V_BH[lab(N=24)][10]) print(V_BH[lab(N=24)][11]) print(V_BH[lab(N=24)][12])55.668225174999996 54.50495827361111 53.143706375Investigate the variation due to FSEprint((V_BH[lab(N=24)][11]-V_BH[lab(N=48)][11])/V_BH[lab(N=48)][11]*100)0.3049197080606991FSS effects seem to be on the order of <1%. The Next NN interactions V'fig,ax = plt.subplots(figsize=(1.1*figsize[0],1.05*figsize[1])) z_1layer = np.zeros_like(Lz, dtype=bool) for i,cnum in enumerate(num_sites[:]): cNkey = lab(N=cnum) z_1layer[np.where(Lz < np.min(Lz_max[lab(N=24)]))[0]] = True ax.errorbar(Lz[z_1layer],Vp_BH[cNkey][z_1layer],ΔVp_BH[cNkey][z_1layer], marker='o', mfc='None', label=f'$N_A = {cnum}$', lw=1, mec=colors[i], color=colors[i],ms=5,mew=0.75,ls=':') ax.errorbar(Lz[~z_1layer],Vp_BH[cNkey][~z_1layer], ΔVp_BH[cNkey][~z_1layer], marker='o', mfc='None', lw=1, mec=colors[i],alpha=0.5, color=colors[i],ms=5,mew=0.75,ls=':') ax.axvline(x=Lz_min[lab(N=24)],color='gray', ls=':', zorder=-10, alpha=0.5,lw=0.5) #VBg = V_BH[Nkey(24)][np.where(np.abs(Lz-Lz_min[cNkey])<1E-6)[0][0]] #VBg = V_BH[lab(N=24)][11] #ΔVBg = ΔV_BH[lab(N=24)][11] #ax.annotate(f'$V_{{BH}} = {VBg:.2f} \pm {ΔVBg:.2f}\ \mathrm{{K}}$', xy=(0.025,0.05),xytext=(0.025, 0.05), # xycoords='axes fraction') ax.set_xlabel(r'$L_z\;\; / \;\; \mathrm{\AA}$') ax.set_ylabel(r'$V^\prime_{BH}\;\;/\;\; \mathrm{K}$') ax.legend() #plt.xlim(4.8,5.2) #plt.savefig('../plots/VBH_vs_Lz.pdf',dpi=300) #i = 0 #for i,cz in enumerate(Lz): # print(cz,V_BH[cNkey][i],ΔV_BH[cNkey][i])/Users/agdelma/opt/miniconda3/lib/python3.9/site-packages/matplotlib/axes/_base.py:2283: UserWarning: Warning: converting a masked element to nan. xys = np.asarray(xys) /Users/agdelma/opt/miniconda3/lib/python3.9/site-packages/matplotlib/axes/_base.py:2283: UserWarning: Warning: converting a masked element to nan. xys = np.asarray(xys)There seems to be finite size effects here on the order of 5%print(Vp_BH[lab(N=24)][11]) print(Vp_BH[lab(N=48)][11]) print(Vp_BH[lab(N=96)][11]) print((Vp_BH[lab(N=96)][11]-Vp_BH[lab(N=48)][11])/Vp_BH[lab(N=96)][11]*100)-0.8646507208333333 -0.9075692777777777 -0.9174413506944445 1.0760440336806576Finding Contours Import resources and display imageimport numpy as np import matplotlib.pyplot as plt import cv2 %matplotlib inline # Read in the image image = cv2.imread('images/thumbs_up_down.jpg') # Change color to RGB (from BGR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) plt.imshow(image)Produce a binary image for finding contours# Convert to grayscale gray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY) # Create a binary thresholded image retval, binary = cv2.threshold(gray, 230, 255, cv2.THRESH_BINARY_INV) #retval, binary = cv2.threshold(gray, 230, 255, cv2.THRESH_BINARY) plt.imshow(binary, cmap='gray')Find and draw the contours# Find contours from thresholded, binary image contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # Draw all contours on a copy of the original image contours_image = np.copy(image) contours_image = cv2.drawContours(contours_image, contours, -1, (0,255,0), 3) plt.imshow(contours_image) print(contours[1]) #print(contours.lenght)[[[172 60]] [[169 63]] [[169 65]] [[168 66]] [[168 67]] [[167 68]] [[167 69]] [[166 70]] [[166 71]] [[165 72]] [[165 73]] [[164 74]] [[164 75]] [[163 76]] [[163 77]] [[162 78]] [[162 79]] [[160 81]] [[160 82]] [[159 83]] [[159 84]] [[157 86]] [[157 87]] [[156 88]] [[156 89]] [[153 92]] [[153 93]] [[143 103]] [[142 103]] [[134 111]] [[134 112]] [[132 114]] [[132 115]] [[128 119]] [[128 120]] [[115 133]] [[115 134]] [[112 137]] [[112 138]] [[111 139]] [[111 140]] [[110 141]] [[108 141]] [[107 142]] [[105 142]] [[104 143]] [[ 96 143]] [[ 95 142]] [[ 78 142]] [[ 77 141]] [[ 76 141]] [[ 75 142]] [[ 74 141]] [[ 69 141]] [[ 68 140]] [[ 64 140]] [[ 63 139]] [[ 57 139]] [[ 56 138]] [[ 51 138]] [[ 50 137]] [[ 43 137]] [[ 42 136]] [[ 38 136]] [[ 37 135]] [[ 30 135]] [[ 29 134]] [[ 25 134]] [[ 24 133]] [[ 19 133]] [[ 18 132]] [[ 13[...]Contour FeaturesEvery contour has a number of features that you can calculate, including the area of the contour, it's orientation (the direction that most of the contour is pointing in), it's perimeter, and many other properties outlined in [OpenCV documentation, here](http://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_contours/py_contour_properties/py_contour_properties.html).In the next cell, you'll be asked to identify the orientations of both the left and right hand contours. The orientation should give you an idea of which hand has its thumb up and which one has its thumb down! OrientationThe orientation of an object is the angle at which an object is directed. To find the angle of a contour, you should first find an ellipse that fits the contour and then extract the `angle` from that shape. ```python Fit an ellipse to a contour and extract the angle from that ellipse(x,y), (MA,ma), angle = cv2.fitEllipse(selected_contour)```**Orientation values**These orientation values are in degrees measured from the x-axis. A value of zero means a flat line, and a value of 90 means that a contour is pointing straight up!So, the orientation angles that you calculated for each contour should be able to tell us something about the general position of the hand. The hand with it's thumb up, should have a higher (closer to 90 degrees) orientation than the hand with it's thumb down. TODO: Find the orientation of each contour## TODO: Complete this function so that ## it returns the orientations of a list of contours ## The list should be in the same order as the contours ## i.e. the first angle should be the orientation of the first contour def orientations(contours): """ Orientation :param contours: a list of contours :return: angles, the orientations of the contours """ # Create an empty list to store the angles in # Tip: Use angles.append(value) to add values to this list angles = [] for selected_contour in contours: (x,y), (MA,ma), angle = cv2.fitEllipse(selected_contour) angles.append(angle) return angles # ---------------------------------------------------------- # # Print out the orientation values angles = orientations(contours) print('Angles of each contour (in degrees): ' + str(angles))Angles of each contour (in degrees): [61.08085632324219, 82.78831481933594]Bounding RectangleIn the next cell, you'll be asked to find the bounding rectangle around the *left* hand contour, which has its thumb up, then use that bounding rectangle to crop the image and better focus on that one hand!```python Find the bounding rectangle of a selected contourx,y,w,h = cv2.boundingRect(selected_contour) Draw the bounding rectangle as a purple boxbox_image = cv2.rectangle(contours_image, (x,y), (x+w,y+h), (200,0,200),2)```And to crop the image, select the correct width and height of the image to include.```python Crop using the dimensions of the bounding rectangle (x, y, w, h)cropped_image = image[y: y + h, x: x + w] ``` TODO: Crop the image around a contour## TODO: Complete this function so that ## it returns a new, cropped version of the original image def left_hand_crop(image, selected_contour): """ Left hand crop :param image: the original image :param selectec_contour: the contour that will be used for cropping :return: cropped_image, the cropped image around the left hand """ ## TODO: Detect the bounding rectangle of the left hand contour x,y,w,h = cv2.boundingRect(selected_contour) ## TODO: Crop the image using the dimensions of the bounding rectangle # Make a copy of the image to crop cropped_image = np.copy(image) cropped_image = cropped_image[y: y + h, x: x + w] return cropped_image ## TODO: Select the left hand contour from the list ## Replace this value selected_contour = contours[1] # ---------------------------------------------------------- # # If you've selected a contour if(selected_contour is not None): # Call the crop function with that contour passed in as a parameter cropped_image = left_hand_crop(image, selected_contour) plt.imshow(cropped_image)Huffman EncodingJust a little example of huffman coding1. Compute the character frequencies2. Build a Huffman Tree using a priority queue3. Encode the message using the generate tree4. Decode the compressed msg using the treefrom heapq import heapify, heappush, heappop from collections import defaultdict global codes codes = dict() class HuffNode(object): def __lt__(self, other): return self.weight < other.weight def __init__(self, weight, ltr=None): self.weight = weight self.ltr = ltr self.left = None self.right = None def calculate_frequencies(string): freqs = defaultdict(int) for c in string: freqs[c] += 1 return freqs def build_tree(frequencies): heap = [] for ltr, weight in frequencies.items(): node = HuffNode(weight, ltr) heappush(heap, node) while len(heap) > 1: n1 = heappop(heap) n2 = heappop(heap) weight = n1.weight + n2.weight root = HuffNode(weight) root.left = n1 root.right = n2 heappush(heap, root) tree = heappop(heap) return tree def generate_codes(node, code=''): if node.ltr: #leaf codes[node.ltr] = (node.weight, code) else: code += '0' generate_codes(node.left, code) code = code[:-1] code += '1' generate_codes(node.right, code) code = code[:-1] def print_coding_table(code_dict): print("Symbol\tWeight\tCode") for symbol, code in sorted(code_dict.items(), key=lambda item: item[1][0], reverse=True): print(symbol, *code, sep='\t') def encode(code_dict, msg): return ''.join([code_dict[ltr][1] for ltr in msg]) def decode(tree, compressed): current_node = tree uncompressed = '' for bit in compressed: if bit == '0': current_node = current_node.left elif bit == '1': current_node = current_node.right if current_node.ltr: uncompressed += current_node.ltr current_node = tree return uncompressed test_string = 'this is an example for huffman encoding' freqs = calculate_frequencies(test_string) tree = build_tree(freqs) generate_codes(tree) print_coding_table(codes) compressed = encode(codes, test_string) print("\nCompressed:\n", compressed, sep='') decompressed = decode(tree, compressed) print("\nDecompressed:\n", decompressed, sep='')Symbol Weight Code 6 110 n 4 000 a 3 1011 f 3 1010 i 3 1111 e 3 1110 m 2 0110 o 2 10011 s 2 0011 h 2 0010 t 1 10010 l 1 01001 r 1 10000 d 1 01111 g 1 01110 x 1 01010 u 1 01011 p 1 10001 c 1 01000 Compressed: 1001000101111001111011110011110101100011011100101010110110100010100111101101010100111000011000100101110101010011010110001101110000010001001101111111100001110 Decompressed: this is an example for huffman encodingDay 2: Password PhilosophyYour flight departs in a few days from the coastal airport; the easiest way down to the coast from here is via toboggan.The shopkeeper at the North Pole Toboggan Rental Shop is having a bad day. "Something's wrong with our computers; we can't log in!" You ask if you can take a look.Their password database seems to be a little corrupted: some of the passwords wouldn't have been allowed by the Official Toboggan Corporate Policy that was in effect when they were chosen.To try to debug the problem, they have created a list (your puzzle input) of passwords (according to the corrupted database) and the corporate policy when that password was set.For example, suppose you have the following list:```1-3 a: abcde1-3 b: cdefg2-9 c: ccccccccc```Each line gives the password policy and then the password. The password policy indicates the lowest and highest number of times a given letter must appear for the password to be valid. For example, 1-3 a means that the password must contain a at least 1 time and at most 3 times.In the above example, 2 passwords are valid. The middle password, , is not; it contains no instances of b, but needs at least 1. The first and third passwords are valid: they contain one a or nine c, both within the limits of their respective policies.How many passwords are valid according to their policies?with open("input.txt","r") as f: input_data = f.read().split("\n") def valid_pwd(reqpwd): temp, pwd = reqpwd.split(":") temp, letter = temp.split(" ") lower, upper = temp.split("-") return int(lower)<=sum([x == letter for x in pwd])<=int(upper) def count_valid(input_data): return sum([valid_pwd(reqpwd) for reqpwd in input_data]) count_valid(input_data)Part TwoWhile it appears you validated the passwords correctly, they don't seem to be what the Official Toboggan Corporate Authentication System is expecting.The shopkeeper suddenly realizes that he just accidentally explained the password policy rules from his old job at the sled rental place down the street! The Official Toboggan Corporate Policy actually works a little differently.Each policy actually describes two positions in the password, where 1 means the first character, 2 means the second character, and so on. (Be careful; Toboggan Corporate Policies have no concept of "index zero"!) Exactly one of these positions must contain the given letter. Other occurrences of the letter are irrelevant for the purposes of policy enforcement.Given the same example list from above:* 1-3 a: abcde is valid: position 1 contains a and position 3 does not.* 1-3 b: cdefg is invalid: neither position 1 nor position 3 contains b.* 2-9 c: ccccccccc is invalid: both position 2 and position 9 contain c.How many passwords are valid according to the new interpretation of the policies?def valid_pwd_2(reqpwd): temp, pwd = reqpwd.split(":") temp, letter = temp.split(" ") lower, upper = temp.split("-") return (int(pwd[int(lower)] == letter) + int(pwd[int(upper)] == letter)) == 1 def count_valid_2(input_data): return sum([valid_pwd_2(reqpwd) for reqpwd in input_data]) count_valid_2(input_data)For convenience, we'll further process this dataset by shortening the column names and adding a "Total" column:data.columns = ['East', 'West'] data['Total'] = data.eval('West + East')Now let's take a look at the summary statistics for this data:data.dropna().describe()Visualizing the dataWe can gain some insight into the dataset by visualizing it.Let's start by plotting the raw data:%matplotlib inline import seaborn seaborn.set() data.plot() plt.ylabel('Hourly Bicycle Count');The ~25,000 hourly samples are far too dense for us to make much sense of.We can gain more insight by resampling the data to a coarser grid.Let's resample by week:weekly = data.resample('W').sum() weekly.head() weekly.plot(style=['.-', '--', '-']) plt.ylabel('Weekly bicycle count');This shows us some interesting seasonal trends: as you might expect, people bicycle more in the summer than in the winter, and even within a particular season the bicycle use varies from week to week (likely dependent on weather; see [In Depth: Linear Regression](05.06-Linear-Regression.ipynb) where we explore this further).Another way that comes in handy for aggregating the data is to use a rolling mean, utilizing the ``pd.rolling_mean()`` function.Here we'll do a 30 day rolling mean of our data, making sure to center the window:daily = data.resample('D').sum() daily.head() daily.plot(style=['.-', '--', '-']) plt.ylabel('Weekly bicycle count'); daily.rolling(120, center=True).std().plot(style=[':', '--', '-']) # Mena + Std plt.ylabel('mean hourly count'); daily.rolling(120, center=True).mean().plot(style=[':', '--', '-']) # Mena + Std plt.ylabel('mean hourly count');The jaggedness of the result is due to the hard cutoff of the window.We can get a smoother version of a rolling mean using a window function–for example, a Gaussian window.The following code specifies both the width of the window (we chose 50 days) and the width of the Gaussian within the window (we chose 10 days):daily.rolling(50, center=True,win_type='gaussian').sum(std=10).plot(style=[':', '--', '-']); daily.rolling(150, center=True,win_type='gaussian').sum(std=50).plot(style=[':', '--', '-']);Digging into the dataWhile these smoothed data views are useful to get an idea of the general trend in the data, they hide much of the interesting structure.For example, we might want to look at the average traffic as a function of the time of day.We can do this using the GroupBy functionality discussed in [Aggregation and Grouping](03.08-Aggregation-and-Grouping.ipynb):by_time = data.groupby(data.index.time).sum() by_time.head() plt.figure(figsize=(16,9)) hourly_ticks = 4 * 60 * 60 * np.arange(6) by_time.plot(xticks=hourly_ticks, style=[':', '--', '-']);The hourly traffic is a strongly bimodal distribution, with peaks around 8:00 in the morning and 5:00 in the evening.This is likely evidence of a strong component of commuter traffic crossing the bridge.This is further evidenced by the differences between the western sidewalk (generally used going toward downtown Seattle), which peaks more strongly in the morning, and the eastern sidewalk (generally used going away from downtown Seattle), which peaks more strongly in the evening.We also might be curious about how things change based on the day of the week. Again, we can do this with a simple groupby:by_weekday = data.groupby(data.index.dayofweek).sum() by_weekday.head() by_weekday.index = ['Mon', 'Tues', 'Wed', 'Thurs', 'Fri', 'Sat', 'Sun'] by_weekday.plot(style=[':', '--', '-']);This shows a strong distinction between weekday and weekend totals, with around twice as many average riders crossing the bridge on Monday through Friday than on Saturday and Sunday.With this in mind, let's do a compound GroupBy and look at the hourly trend on weekdays versus weekends.We'll start by grouping by both a flag marking the weekend, and the time of day:weekend = np.where(data.index.weekday < 5, 'Weekday', 'Weekend') weekend[0:50] by_time = data.groupby([weekend, data.index.time]).mean() by_time.head() by_time.tail()Now we'll use some of the Matplotlib tools described in [Multiple Subplots](04.08-Multiple-Subplots.ipynb) to plot two panels side by side:fig, ax = plt.subplots(1, 2, figsize=(16, 8)) by_time.loc['Weekday'].plot(ax=ax[0], title='Weekdays', xticks=hourly_ticks, style=[':', '--', '-']) by_time.loc['Weekend'].plot(ax=ax[1], title='Weekends', xticks=hourly_ticks, style=[':', '--', '-']);**函数** 1 默认参数函数的参数中如果有默认参数,那么函数在定义的时候将被计算而不是等到函数被调用的时候bigx = 10 def double_times(x = bigx): return x * 2 bigx = 1000 double_times()在可变的集合类型中(list和dictionary)中,如果默认参数为该类型,那么所有的操作调用该函数的操作将会发生变化def foo(values, x=[]): for value in values: x.append(value) return x foo([0,1,2]) foo([4,5]) def foo_fix(values, x=[]): if len(x) != 0: x = [] for value in values: x.append(value) return x foo_fix([0,1,2]) foo_fix([4,5])2 global 参数x = 5 def set_x(y): x = y print 'inner x is {}'.format(x) set_x(10) print 'global x is {}'.format(x)inner x is 10 global x is 5x = 5 表明为global变量,但是在set_x函数内部中,出现了x,但是其为局部变量,因此全局变量x并没有发生改变。def set_global_x(y): global x x = y print 'global x is {}'.format(x) set_global_x(10) print 'global x now is {}'.format(x)global x is 10 global x now is 10通过添加global关键字,使得global变量x发生了改变。 3 Exercise Fibonacci sequence$F_{n+1}=F_{n}+F_{n-1}$ 其中 $F_{0}=0,F_{1}=1,F_{2}=1,F_{3}=2 \cdots$ + 递归版本 算法时间时间复杂度高达 $T(n)=n^2$def fib_recursive(n): if n == 0 or n == 1: return n else: return fib_recursive(n-1) + fib_recursive(n-2) fib_recursive(10)+ 迭代版本 算法时间复杂度为$T(n)=n$def fib_iterator(n): g = 0 h = 1 i = 0 while i < n: h = g + h g = h - g i += 1 return g fib_iterator(10)+ 迭代器版本 使用 yield 关键字可以实现迭代器def fib_iter(n): g = 0 h = 1 i = 0 while i < n: h = g + h g = h -g i += 1 yield g for value in fib_iter(10): print value,1 1 2 3 5 8 13 21 34 55+ 矩阵求解法 $$\begin{bmatrix}F_{n+1}\\F_{n}\end{bmatrix}=\begin{bmatrix}1&1\\1&0\end{bmatrix}\begin{bmatrix}F_{n}\\F_{n-1}\end{bmatrix}$$ 令$u_{n+1}=Au_{n}$ 其中 $u_{n+1}=\begin{bmatrix}F_{n+1}\\F_{n}\end{bmatrix}$通过矩阵的迭代求解$u_{n+1}=A^{n}u_{0}$,其中 $u_{0}=\begin{bmatrix}1 \\0 \end{bmatrix}$,对于$A^n$ 可以通过 $(A^{n/2})^{2}$ 方式求解,使得算法时间复杂度达到 $log(n)$import numpy as np a = np.array([[1,1],[1,0]]) def pow_n(n): if n == 1: return a elif n % 2 == 0: half = pow_n(n/2) return half.dot(half) else: half = pow_n((n-1)/2) return a.dot(half).dot(half) def fib_pow(n): a_n = pow_n(n) u_0 = np.array([1,0]) return a_n.dot(u_0)[1] fib_pow(10)Quick Sortdef quick_sort(array): if len(array) < 2: return array else: pivot = array[0] left = [item for item in array[1:] if item < pivot] right = [item for item in array[1:] if item >= pivot] return quick_sort(left)+[pivot]+quick_sort(right) quick_sort([10,11,3,21,9,22])Clustering of Airbnb Lists Source of data: http://insideairbnb.com/get-the-data.html Load data into Pandasimport pandas as pd import numpy as np import matplotlib.pyplot as plt % matplotlib inline listings = pd.read_csv("data/Airbnb_listings_20171002.csv") listings.head(5) features = ['neighbourhood','room_type','price','minimum_nights','number_of_reviews','reviews_per_month','availability_365','last_review'] lists = listings[features] lists.info() lists.loc['last_review_datetime'] = pd.to_datetime(lists['last_review']) lists.info() Index: 8934 entries, 0 to last_review_datetime Data columns (total 8 columns): neighbourhood 8933 non-null object room_type 8933 non-null object price 8933 non-null object minimum_nights 8933 non-null object number_of_reviews 8933 non-null object reviews_per_month 7123 non-null float64 availability_365 8933 non-null object last_review 7123 non-null object dtypes: float64(1), object(7) memory usage: 628.2+ KBWhen "number_of_reviews" is zero, transform the "reviews_per_month" from null to zero.lists['reviews_per_month'] = np.where(lists['number_of_reviews'] == 0, 0, lists['reviews_per_month']) lists.head(10)Use apply function convert availability_365 from the number of days available to the percentage of the whole year that is availablelists['availability_365_perc'] = lists['availability_365'].apply(lambda x: np.round(x/365, 2)) lists.head(5)Data visualizationimport seaborn as sns sns.set_style("whitegrid") pricing = listings["price"].values roomType = listings['room_type'].values # sns.distplot(pricing) # sns.countplot(x="price", data=listings, palette="Greens_d") #PYTEST_VALIDATE_IGNORE_OUTPUT sns.barplot(x="room_type", y="price", data=listings); # this is how it works to prevent py.test --nbval mark here as an error: https://github.com/computationalmodelling/nbval/issues/6 #ax = sns.barplot(x="neighbourhood", y="availability_365_perc", data=lists) lists['neighbourhood'] = lists['neighbourhood'].astype('str') # listings.groupby('room_type').size().plot(kind = "bar") #lists.plot.hist(column = "price", bins = 10)One-hot encodinglists[['price','minimum_nights','number_of_reviews','availability_365_perc']] = lists[['price','minimum_nights','number_of_reviews','availability_365_perc']].apply(pd.to_numeric,errors='coerce') drop_columns = ['number_of_reviews','availability_365','last_review'] clean_lists = lists.drop(drop_columns, axis = 1) clean_lists.columns final_lists = pd.get_dummies(clean_lists) final_lists = final_lists.dropna()Define target variablefinal_lists['affordability'] = (final_lists['price'] > 150) target = final_lists['affordability'].values target[:2] target.mean() list_features_columns = list(final_lists.columns.values) list_features_columns.remove('affordability') list_features_columns.remove('price') list_features = final_lists[list_features_columns]Train-test splitfrom sklearn.cross_validation import train_test_split list_features_train, list_features_test, target_train, target_test = train_test_split(list_features, target, test_size = 0.3, random_state = 32 )Elbow method for optimal k for clusteringdistortions = [] for i in range(1, 11): km = KMeans(n_clusters=i, init='k-means++', n_init=10, max_iter=300, random_state=0) km.fit(list_features_train) distortions.append(km.inertia_) plt.plot(range(1,11), distortions, marker='o') plt.xlabel('Number of clusters') plt.ylabel('Distortion') plt.show()silhouette plot to test k = 3from matplotlib import cm from sklearn.metrics import silhouette_samples kmeans = KMeans(n_clusters=3, init='k-means++',n_init=10,max_iter=300, tol=1e-04, random_state=0) kmeans.fit(list_features_train) labels = kmeans.labels_ cluster_labels = np.unique(labels) n_clusters = cluster_labels.shape[0] silhouette_vals = silhouette_samples(list_features_train, labels, metric='euclidean') y_ax_lower, y_ax_upper = 0, 0 yticks = [] for i, c in enumerate(cluster_labels): c_silhouette_vals = silhouette_vals[labels == c] c_silhouette_vals.sort() y_ax_upper += len(c_silhouette_vals) color = cm.jet(float(i) / n_clusters) plt.barh(range(y_ax_lower, y_ax_upper), c_silhouette_vals, height=1.0, edgecolor='none', color=color) yticks.append((y_ax_lower + y_ax_upper) / 2.) y_ax_lower += len(c_silhouette_vals) silhouette_avg = np.mean(silhouette_vals) plt.axvline(silhouette_avg, color="red", linestyle="--") plt.yticks(yticks, cluster_labels + 1) plt.ylabel('Cluster') plt.xlabel('Silhouette coefficient') plt.show()silhouette plot to test k = 4kmeans = KMeans(n_clusters=4, init='k-means++',n_init=10,max_iter=300, tol=1e-04, random_state=0) kmeans.fit(list_features_train) labels = kmeans.labels_ cluster_labels = np.unique(labels) n_clusters = cluster_labels.shape[0] silhouette_vals = silhouette_samples(list_features_train, labels, metric='euclidean') y_ax_lower, y_ax_upper = 0, 0 yticks = [] for i, c in enumerate(cluster_labels): c_silhouette_vals = silhouette_vals[labels == c] c_silhouette_vals.sort() y_ax_upper += len(c_silhouette_vals) color = cm.jet(float(i) / n_clusters) plt.barh(range(y_ax_lower, y_ax_upper), c_silhouette_vals, height=1.0, edgecolor='none', color=color) yticks.append((y_ax_lower + y_ax_upper) / 2.) y_ax_lower += len(c_silhouette_vals) silhouette_avg = np.mean(silhouette_vals) plt.axvline(silhouette_avg, color="red", linestyle="--") plt.yticks(yticks, cluster_labels + 1) plt.ylabel('Cluster') plt.xlabel('Silhouette coefficient') plt.show()Cluster lists with KMeans, k = 3from sklearn.cluster import KMeans kmeans = KMeans(n_clusters=3, random_state=0) kmeans.fit(list_features_train) kmeans.predict(list_features) kmeans.cluster_centers_.shape print ("cluster centers:") print (kmeans.cluster_centers_) labels = kmeans.labels_ list_features_train["cluster"] = labels list(list_features_train) g = sns.FacetGrid(list_features_train, col="cluster") g = g.map(plt.scatter, "reviews_per_month", "minimum_nights", marker=".")load the GAN, encoder, and part extractorgtype = 'stylegan' domain = 'car' nets = networks.define_nets(gtype, domain) compositer = compositions.get_compositer(domain)(nets)visualize the reconstruction, and masked reconstructionwith torch.no_grad(): im = nets.seed2image(1,seed=10) show.a(['Input Image', renormalize.as_image(im[0]).resize((256, 256), Image.ANTIALIAS)]) rec = nets.invert(im, mask=None) show.a(['Reconstructed', renormalize.as_image(rec[0]).resize((256, 256), Image.ANTIALIAS)]) show.flush() hints, mask = masking.mask_upsample(im, mask_cent=0.5 if gtype == 'proggan' else 0.) rec_mask = nets.invert(hints, mask=mask) show.a(['Input Masked', renormalize.as_image(hints[0]).resize((256, 256), Image.ANTIALIAS)]) show.a(['Rec. Masked', renormalize.as_image(rec_mask[0]).resize((256, 256), Image.ANTIALIAS)]) show.flush()randomly sample image parts, and compose themrng = np.random.RandomState(0) indices = rng.choice(compositer.total_samples, len(compositer.ordered_labels)) with torch.no_grad(): composite_data = compositer(indices) for im, m in zip(composite_data.parts_image, composite_data.parts_mask): show.a([renormalize.as_image(im).resize((128, 128), Image.ANTIALIAS)], cols=6) a, b, c = imutil.draw_masked_image(im[None], m[None], size=128) show.a([c], cols=6) show.flush() show.a(['Network Input', renormalize.as_image(composite_data.composite_image[0]).resize((256, 256), Image.ANTIALIAS)]) show.a(['Network Output', renormalize.as_image(composite_data.inverted_RGBM[0]).resize((256, 256), Image.ANTIALIAS)]) show.flush()VacationPy---- Note* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.# Dependencies and Setup import matplotlib.pyplot as plt import pandas as pd import numpy as np import requests import gmaps import os # Import API key from api_keys import g_keyStore Part I results into DataFrame* Load the csv exported in Part I to a DataFramedf = pd.read_csv('../output_data/RawData.csv') print(f'There are now {len(df)} cities in the dataframe') df.head()There are now 552 cities in the dataframeHumidity Heatmap* Configure gmaps.* Use the Lat and Lng as locations and Humidity as the weight.* Add Heatmap layer to map.# Note: NaN values were handled in export of csv data already but may as well do it again: df = df.dropna() # Store latitude and longitude in locations as float locations = df[["Latitude", "Longitude"]].astype(float) # Convert to float humidity = df["Humidity"].astype(float) print(f'There are {len(locations)} locations') print(f'There are {len(humidity)} humidity rows') assert (len(locations) == len(humidity)), "Locations and humidity do not have the same number of rows!" # Value verification - any rows outside of lat/long coordinates? print(f'Min Lat: {df["Latitude"].min()}') print(f'Max Lat: {df["Latitude"].max()}') print(f'Min Lng: {df["Longitude"].min()}') print(f'Max Lng: {df["Longitude"].max()}') # Configure gmaps gmaps.configure(api_key=g_key) # Plot Heatmap fig = gmaps.figure() heatmap_layer = gmaps.heatmap_layer(locations) # For max intensity in the heat map, try setting it to the highest humidity found in the data set. # Must be float intensity = df["Humidity"].max().astype(float) # Create heat layer heat_layer = gmaps.heatmap_layer( locations, weights=humidity, dissipating=False, max_intensity=intensity ) # Add layer fig.add_layer(heat_layer) # Display figure fig heat_layer = gmaps.heatmap_layer( locations, weights=humidity, dissipating=False, max_intensity=intensity, point_radius=1 )Create new DataFrame fitting weather criteria* Narrow down the cities to fit weather conditions.* Drop any rows will null values.import gmaps.datasets df = gmaps.datasets.load_dataset_as_df('earthquakes') # dataframe with columns ('latitude', 'longitude', 'magnitude') fig = gmaps.figure() heatmap_layer = gmaps.heatmap_layer( df[['latitude', 'longitude']], weights=df['magnitude'], max_intensity=99, point_radius=3.0 ) fig.add_layer(heatmap_layer) figHotel Map* Store into variable named `hotel_df`.* Add a "Hotel Name" column to the DataFrame.* Set parameters to search for hotels with 5000 meters.* Hit the Google Places API for each city's coordinates.* Store the first Hotel result into the DataFrame.* Plot markers on top of the heatmap.# NOTE: Do not change any of the code in this cell # Using the template add the hotel marks to the heatmap info_box_template = """
Name
{Hotel Name}
City
{City}
Country
{Country}
""" # Store the DataFrame Row # NOTE: be sure to update with your DataFrame name hotel_info = [info_box_template.format(**row) for index, row in narrowed_city_df.iterrows()] locations = hotel_df[["Lat", "Lng"]] # Add marker layer ontop of heat map # Display MapKEN3140: Lab 4 (Part 2) Writing and executing basic SPARQL queries on a local RDF file Authors:+ [](https://www.maastrichtuniversity.nl/vincent.emonet): [](mailto:)+ [](https://www.maastrichtuniversity.nl/kody.moodley): [](mailto:) Affiliation: [Institute of Data Science](https://www.maastrichtuniversity.nl/research/institute-data-science) License:[CC-BY 4.0](https://creativecommons.org/licenses/by/4.0) Date:2021-09-06In addition to running SPARQL queries on a remote SPARQL endpoint on the Web, you can also execute SPARQL queries on RDF documents that you store locally on a file.We will use the Java RDF library [Apache Jena](https://jena.apache.org/) to demonstrate this. Follow these instructions to install the library:1. Copy the `apache-jena-3.16.0` folder from Lab 3 to the same directory as this notebook2. Run the next cell (it will import all jars in the `apache-jena-3.16.0/lib` folder)%jars apache-jena-*/lib/*.jar import org.apache.jena.riot.RDFDataMgr; import org.apache.jena.rdf.model.*; import org.apache.jena.util.PrintUtil; import org.apache.jena.vocabulary.RDF; import org.apache.jena.query.Query; import org.apache.jena.query.QueryExecution; import org.apache.jena.query.QueryExecutionFactory; import org.apache.jena.query.QueryFactory; import org.apache.jena.query.QuerySolution; import org.apache.jena.query.ResultSetFormatter; import org.apache.jena.query.ResultSet; import java.io.File; import org.apache.commons.io.FileUtils; import java.nio.charset.StandardCharsets;Setup: load the RDF file in an Apache Jena modelLoads the RDF file, and creates a `runSparqlQuery()` function to run SPARQL queries in a single call from a string or a file.Model model = RDFDataMgr.loadModel("family.ttl"); static void runSparqlQuery(String queryString, Model model) throws java.io.IOException { if (queryString.endsWith(".rq")) { queryString = FileUtils.readFileToString(new File(queryString), StandardCharsets.UTF_8); } System.out.println(queryString); Query query = QueryFactory.create(queryString); QueryExecution qexec = QueryExecutionFactory.create(query, model); ResultSetFormatter.out(qexec.execSelect(), model); }Write down a SPARQL query in a Java stringJava does not support multiline strings, which is why we need to use "+" to compile strings in a readable way here below.\We create a basic SPARQL select query to list all entities and their types in the graph (for those entities that actually specify one).String queryString = "PREFIX schema: \n" + "SELECT * WHERE{ \n" + "?entity a ?type .\n" + "}";Now we execute this query on our model and display the resultsrunSparqlQuery(queryString, model)PREFIX schema: SELECT * WHERE{ ?entity a ?type . } ---------------------------------- | entity | type | ================================== | family:Miranda | schema:Person | | family:Pierre | schema:Person | ----------------------------------Import a SPARQL query from a fileYou can also import a SPARQL query to execute from a file. This might be useful if you have a list of predefined SPARQL queries and you don't want to rewrite them each time. We have provided an example SPARQL query in a file **with the extension `.rq`**runSparqlQuery("query.rq", model)PREFIX schema: PREFIX family: SELECT * WHERE { # ?entity a ?type . # Line 1 ?entity a schema:Person ; # Line 2 schema:parent ?parent . # Line 3 } ------------------- | entity | parent | =================== -------------------Task 1: Comment out Line 1 ``query.rq`` and uncomment Lines 2-3. What inferences do you expect to receive for this query? After writing down what you think the results would be, execute the previous cell again to run the query again. Is this the result that you expected? The reason you get this unexpected answer is that Apache Jena "Model" class does not enable inference when running SPARQL queries. Task 2: By default, Apache Jena does not apply inference with SPARQL queries. In order to use inference, you need to explicitly indicate this by creating an instance of "InfModel" rather than the basic "Model".InfModel rdfsModel = ModelFactory.createRDFSModel(RDFDataMgr.loadModel("family.ttl"));Now, repeat Task 1 by commenting out the Line 1 in ``query.rq`` and uncomment Lines 2-3. Then, run the next cell and observe the results.runSparqlQuery("query.rq", rdfsModel)PREFIX schema: PREFIX family: SELECT * WHERE { # ?entity a ?type . # Line 1 ?entity a schema:Person ; # Line 2 schema:parent ?parent . # Line 3 } ------------------------------------ | entity | parent | ==================================== | family:Miranda | family:Mathilde | | family:Miranda | family:Pierre | ------------------------------------Введение в Bokeh Базовый пример создания самого обычного графика [2] и его отображения в ноутбуке:# bokeh basics from bokeh.plotting import figure from bokeh.io import show, output_notebook # Create a blank figure with labels p = figure(plot_width = 600, plot_height = 600, title = 'Example Glyphs', x_axis_label = 'X', y_axis_label = 'Y') # Example data squares_x = [1, 3, 4, 5, 8] squares_y = [8, 7, 3, 1, 10] circles_x = [9, 12, 4, 3, 15] circles_y = [8, 4, 11, 6, 10] # Add squares glyph p.square(squares_x, squares_y, size = 12, color = 'navy', alpha = 0.6) # Add circle glyph p.circle(circles_x, circles_y, size = 12, color = 'red') # Set to output the plot in the notebook output_notebook() # Show the plot show(p)Если импортить show из io, то отображение будет в ноутбукеimport pandas as pd from bokeh.models import ColumnDataSource, LabelSet, HoverTool from bokeh.plotting import figure, output_file from bokeh.io import output_notebook, show from bokeh.sampledata.periodic_table import elements elements = elements.copy() elements = elements[elements["atomic number"] <= 82] elements = elements[~pd.isnull(elements["melting point"])] mass = [float(x.strip("[]")) for x in elements["atomic mass"]] elements["atomic mass"] = mass palette = ["#053061", "#2166ac", "#4393c3", "#92c5de", "#d1e5f0", "#f7f7f7", "#fddbc7", "#f4a582", "#d6604d", "#b2182b", "#67001f"] melting_points = elements["melting point"] low = min(melting_points) high = max(melting_points) melting_point_inds = [int(10*(x-low)/(high-low)) for x in melting_points] #gives items in colors a value from 0-10 elements['melting_colors'] = [palette[i] for i in melting_point_inds] TITLE = "Density vs Atomic Weight of Elements (colored by melting point)" TOOLS = "pan,wheel_zoom,box_zoom,reset,save" p = figure(tools=TOOLS, toolbar_location="above", logo="grey", plot_width=1200, title=TITLE) p.background_fill_color = "#dddddd" p.xaxis.axis_label = "atomic weight (amu)" p.yaxis.axis_label = "density (g/cm^3)" p.grid.grid_line_color = "white" ### Используем hover для добавления информации при нажатии hover = HoverTool() hover.tooltips = [ ("name", "@name"), ("symbol:", "@symbol"), ("density", "@density"), ("atomic weight", "@{atomic mass}"), ("melting point", "@{melting point}") ] p.tools.append(hover) ### source = ColumnDataSource(elements) p.circle("atomic mass", "density", size=12, source=source, color='melting_colors', line_color="black", fill_alpha=0.8) labels = LabelSet(x="atomic mass", y="density", text="symbol", y_offset=8, text_font_size="8pt", text_color="#555555", source=source, text_align='center') p.add_layout(labels) # Save plot as html #output_file("elements.html", title="elements.py example") output_notebook() show(p)Goal to test binary classifier for delays, binning for target variable and give ranges for answersimport pandas as pd pd.set_option('display.max_columns', None) import numpy as np import random import matplotlib.pyplot as plt import seaborn as sns sns.set(font_scale=1.4) import copy import pickle df = pd.read_csv("data/flights_cleaned_no_outlier_iqr_with_delays.csv") df.head() working = [ # ran with what the test data can do 'fl_date', # get month and bin # 'op_unique_carrier', # most extensive name list # 'origin', # need 'origin' to merge weather but already merged! ;) # 'dest_airport_id', # not sure about this one 'crs_dep_time', # bin times # 'dep_time', # only using in TRAIN, to learn how other columns affect this # 'crs_arr_time', # 'arr_time', # only using in TRAIN, to learn how other columns affect this 'weather_type', # add weight values #'passengers', # not sure about this one 'arr_delay' # so we can make a target column... ] df_ = df.filter(items = working) df_ # Time weight: 0-500 = 1, 501-1000 = 8, 1001-1500 = 10, 1501-2000 = 8, 2001 > = 5 df_.crs_dep_time = df_.crs_dep_time // 100 crs_dep_time_remap = { 0: 0.10, 1: 0.10, 2: 0.10, 3: 0.10, 4: 0.10, 5: 0.10, 6: 1, 7: 1, 8: 1, 9: 1, 10: 1, 11: 1, 12: 1, 13: 1, 14: 1, 15: 1, 16: 1, 17: 1, 18: 1, 19: 1, 20: 1, 21: 0.50, 22: 0.50, 23: 0.50 } df_["dep_time_hour_weight"] = df_.crs_dep_time.map(crs_dep_time_remap) df_["month"] = [ i [5:7] for i in df_.fl_date ] df_ = df_.drop(labels="fl_date", axis=1) df_ = df_.drop(labels="crs_dep_time", axis=1) month_remap = { '10': 0.5, '11': 0.75, '12': 1, '01': 0.75 } df_["month_weight"] = df_.month.map(month_remap) df_ = pd.get_dummies(df_, columns=['weather_type'], drop_first=True) df_ = df_.drop(labels="month", axis=1) df_.fillna(0,inplace=True) # df_['binned']=pd.cut(x=df_['arr_delay'], bins=[-45,-30,-15,0,15,30,45],labels = [0,1,2,3,4,5]) tcriteria = [df_['arr_delay'].between(-10000, 0), df_['arr_delay'].between(0, 10000)] values = [0,30] df_['arr_delay'] = np.select(tcriteria, values, 0) df_ X = df_.drop('arr_delay',axis=1) # Features y = df_.arr_delay # Target variable # X=(X-X.min())/(X.max()-X.min()) # y.replace([np.inf, -np.inf], np.nan, inplace=True) # y.dropna() # X.replace([np.inf, -np.inf], np.nan, inplace=True) # X.dropna() y y.head(50) # importing train_test_split from sklearn from sklearn.model_selection import train_test_split # splitting the data x_train, x_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42) # importing module from sklearn.linear_model import LinearRegression # creating an object of LinearRegression class LR = LinearRegression() # fitting the training data LR.fit(x_train,y_train) y_prediction = LR.predict(x_test) y_prediction # importing r2_score module from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error # predicting the accuracy score score=r2_score(y_test,y_prediction) print( 'r2 score is',score) print('mean_sqrd_error is ==',mean_squared_error(y_test,y_prediction)) print('root_mean_squared error of is ==',np.sqrt(mean_squared_error(y_test,y_prediction)))r2 score is 0.006841228530647836 mean_sqrd_error is == 137.77564943879722 root_mean_squared error of is == 11.737787246274198Auto-tuning a Convolutional Network for Mobile GPU**Author**: ` `_, ` `_Auto-tuning for a specific device is critical for getting the bestperformance. This is a tutorial about how to tune a whole convolutionalnetwork.The operator implementation for Mobile GPU in TVM is written in template form.The template has many tunable knobs (tile factor, vectorization, unrolling, etc).We will tune all convolution, depthwise convolution and dense operatorsin the neural network. After tuning, we produce a log file which storesthe best knob values for all required operators. When the TVM compiler compilesthese operators, it will query this log file to get the best knob values.We also released pre-tuned parameters for some arm devices. You can go to`Mobile GPU Benchmark `_to see the results.Note that this tutorial will not run on Windows or recent versions of macOS. Toget it to run, you will need to wrap the body of this tutorial in a :code:`if__name__ == "__main__":` block. Install dependenciesTo use the autotvm package in tvm, we need to install some extra dependencies.(change "3" to "2" if you use python2):.. code-block:: bash pip3 install --user psutil xgboost tornado cloudpickleTo make TVM run faster during tuning, it is recommended to use cythonas FFI of tvm. In the root directory of tvm, execute(change "3" to "2" if you use python2):.. code-block:: bash pip3 install --user cython sudo make cython3Now return to python code. Import packages.import os import numpy as np import tvm from tvm import relay, autotvm import tvm.relay.testing from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner from tvm.contrib.utils import tempdir import tvm.contrib.graph_runtime as runtimeDefine networkFirst we need to define the network in relay frontend API.We can load some pre-defined network from :code:`relay.testing`.We can also load models from MXNet, ONNX and TensorFlow.def get_network(name, batch_size): """Get the symbol definition and random weight of a network""" input_shape = (batch_size, 3, 224, 224) output_shape = (batch_size, 1000) if "resnet" in name: n_layer = int(name.split("-")[1]) mod, params = relay.testing.resnet.get_workload( num_layers=n_layer, batch_size=batch_size, dtype=dtype ) elif "vgg" in name: n_layer = int(name.split("-")[1]) mod, params = relay.testing.vgg.get_workload( num_layers=n_layer, batch_size=batch_size, dtype=dtype ) elif name == "mobilenet": mod, params = relay.testing.mobilenet.get_workload(batch_size=batch_size, dtype=dtype) elif name == "squeezenet_v1.1": mod, params = relay.testing.squeezenet.get_workload( batch_size=batch_size, version="1.1", dtype=dtype ) elif name == "inception_v3": input_shape = (batch_size, 3, 299, 299) mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype) elif name == "mxnet": # an example for mxnet model from mxnet.gluon.model_zoo.vision import get_model block = get_model("resnet18_v1", pretrained=True) mod, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype) net = mod["main"] net = relay.Function( net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs ) mod = tvm.IRModule.from_expr(net) else: raise ValueError("Unsupported network: " + name) return mod, params, input_shape, output_shapeStart RPC TrackerTVM uses RPC session to communicate with ARM boards.During tuning, the tuner will send the generated code to the board andmeasure the speed of code on the board.To scale up the tuning, TVM uses RPC Tracker to manage distributed devices.The RPC Tracker is a centralized controller node. We can register all devices tothe tracker. For example, if we have 10 phones, we can register all of themto the tracker, and run 10 measurements in parallel, accelerating the tuning process.To start an RPC tracker, run this command on the host machine. The tracker isrequired during the whole tuning process, so we need to open a new terminal forthis command:.. code-block:: bash python -m tvm.exec.rpc_tracker --host=0.0.0.0 --port=9190The expected output is.. code-block:: bash INFO:RPCTracker:bind to 0.0.0.0:9190 Register devices to RPC TrackerNow we can register our devices to the tracker. The first step is tobuild the TVM runtime for the ARM devices.* For Linux: Follow this section `build-tvm-runtime-on-device` to build the TVM runtime on the device. Then register the device to tracker by .. code-block:: bash python -m tvm.exec.rpc_server --tracker=[HOST_IP]:9190 --key=rk3399 (replace :code:`[HOST_IP]` with the IP address of your host machine)* For Android: Follow this `readme page `_ to install TVM RPC APK on the android device. Make sure you can pass the android RPC test. Then you have already registered your device. During tuning, you have to go to developer option and enable "Keep screen awake during changing" and charge your phone to make it stable.After registering devices, we can confirm it by querying rpc_tracker.. code-block:: bash python -m tvm.exec.query_rpc_tracker --host=0.0.0.0 --port=9190For example, if we have 2 Huawei mate10 pro, 11 Raspberry Pi 3B and 2 rk3399,the output can be.. code-block:: bash Queue Status ---------------------------------- key total free pending ---------------------------------- mate10pro 2 2 0 rk3399 2 2 0 rpi3b 11 11 0 ----------------------------------You can register multiple devices to the tracker to accelerate the measurement in tuning. Set Tuning OptionsBefore tuning, we should apply some configurations. Here I use an RK3399 boardas example. In your setting, you should modify the target and device_key accordingly.set :code:`use_android` to True if you use android phone.#### DEVICE CONFIG #### target = tvm.target.Target("opencl -device=mali") # Replace "aarch64-linux-gnu" with the correct target of your board. # This target host is used for cross compilation. You can query it by :code:`gcc -v` on your device. target_host = "llvm -mtriple=aarch64-linux-gnu" # Also replace this with the device key in your tracker device_key = "rk3399" # Set this to True if you use android phone use_android = False #### TUNING OPTION #### network = "resnet-18" log_file = "%s.%s.log" % (device_key, network) dtype = "float32" tuning_option = { "log_filename": log_file, "tuner": "xgb", "n_trial": 1000, "early_stopping": 450, "measure_option": autotvm.measure_option( builder=autotvm.LocalBuilder(build_func="ndk" if use_android else "default"), runner=autotvm.RPCRunner( device_key, host="0.0.0.0", port=9190, number=10, timeout=5, ), ), }NoteHow to set tuning options In general, the default values provided here work well. If you have enough time budget, you can set :code:`n_trial`, :code:`early_stopping` larger, which makes the tuning run longer. If your device runs very slow or your conv2d operators have many GFLOPs, considering to set timeout larger. Begin TuningNow we can extract tuning tasks from the network and begin tuning.Here, we provide a simple utility function to tune a list of tasks.This function is just an initial implementation which tunes them in sequential order.We will introduce a more sophisticated tuning scheduler in the future.# You can skip the implementation of this function for this tutorial. def tune_tasks( tasks, measure_option, tuner="xgb", n_trial=1000, early_stopping=None, log_filename="tuning.log", use_transfer_learning=True, ): # create tmp log file tmp_log_file = log_filename + ".tmp" if os.path.exists(tmp_log_file): os.remove(tmp_log_file) for i, tsk in enumerate(reversed(tasks)): prefix = "[Task %2d/%2d] " % (i + 1, len(tasks)) # create tuner if tuner == "xgb" or tuner == "xgb-rank": tuner_obj = XGBTuner(tsk, loss_type="rank") elif tuner == "ga": tuner_obj = GATuner(tsk, pop_size=50) elif tuner == "random": tuner_obj = RandomTuner(tsk) elif tuner == "gridsearch": tuner_obj = GridSearchTuner(tsk) else: raise ValueError("Invalid tuner: " + tuner) if use_transfer_learning: if os.path.isfile(tmp_log_file): tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file)) # do tuning tsk_trial = min(n_trial, len(tsk.config_space)) tuner_obj.tune( n_trial=tsk_trial, early_stopping=early_stopping, measure_option=measure_option, callbacks=[ autotvm.callback.progress_bar(tsk_trial, prefix=prefix), autotvm.callback.log_to_file(tmp_log_file), ], ) # pick best records to a cache file autotvm.record.pick_best(tmp_log_file, log_filename) os.remove(tmp_log_file)Finally, we launch tuning jobs and evaluate the end-to-end performance.def tune_and_evaluate(tuning_opt): # extract workloads from relay program print("Extract tasks...") mod, params, input_shape, _ = get_network(network, batch_size=1) tasks = autotvm.task.extract_from_program( mod["main"], target=target, target_host=target_host, params=params, ops=(relay.op.get("nn.conv2d"),), ) # run tuning tasks print("Tuning...") tune_tasks(tasks, **tuning_opt) # compile kernels with history best records with autotvm.apply_history_best(log_file): print("Compile...") with tvm.transform.PassContext(opt_level=3): lib = relay.build_module.build( mod, target=target, params=params, target_host=target_host ) # export library tmp = tempdir() if use_android: from tvm.contrib import ndk filename = "net.so" lib.export_library(tmp.relpath(filename), ndk.create_shared) else: filename = "net.tar" lib.export_library(tmp.relpath(filename)) # upload module to device print("Upload...") remote = autotvm.measure.request_remote(device_key, "0.0.0.0", 9190, timeout=10000) remote.upload(tmp.relpath(filename)) rlib = remote.load_module(filename) # upload parameters to device ctx = remote.context(str(target), 0) module = runtime.GraphModule(rlib["default"](ctx)) data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype)) module.set_input("data", data_tvm) # evaluate print("Evaluate inference time cost...") ftimer = module.module.time_evaluator("run", ctx, number=1, repeat=30) prof_res = np.array(ftimer().results) * 1000 # convert to millisecond print( "Mean inference time (std dev): %.2f ms (%.2f ms)" % (np.mean(prof_res), np.std(prof_res)) ) # We do not run the tuning in our webpage server since it takes too long. # Uncomment the following line to run it by yourself. # tune_and_evaluate(tuning_option)*Based on a blog post by [](http://jakevdp.github.io/blog/2018/09/13/waiting-time-paradox/)* IntroductionBit of a lighter post after the MCMC series. Came across this article on the waiting time paradox, and I thought it would be a good chance to dosomething light-hearted amidst this COVID outbreak.We all know that gnawing irritation we feel when, somehow, things are not as rosy as advertised. Bus schedules say that buses come every 10 minutes on average, but why does it always feel like you just missed the bus? Schools say that their average class size is around 20, but how do you always end up in a sardine-packed class? Why do your friends on Facebook always have more friends than you do? -->Lies -->Welcome to the inspection paradox, or as the cited post puts it, the "why is my bus always late" paradox. What is the inspection paradox?The inspection paradox occurs when the probability of observing a quantity is related to the quantity being observed. This definition is pretty convoluted on it's own, so let's talk about an intuitive example - class size.Imagine a university cohort of 100 students with 3 different classes in each semester. Class 1 has 70 students, class 2 has 20, and class 3 has 10. From the university's perspective, the average class size is a straightforward $\frac{100}{3} = 33$. What happens if the students are surveyed? Now the story is completely different! By definition, the class size of 70 is experienced by 70 students, which biases the sample average! Instead of $\frac{100}{3} = 33$, we now have a weighted average of class size $(0.7 \cdot 70) + (0.2 \cdot 20) + (0.1 \cdot 10) = 54$ instead.In other words, the probability of an individual observing a class size of 70 is directly related to the probability of him/her being in that class! Seeing this in practiceLet's see this for ourselves using a very relatable example: bus arrival timings. First, let's simulate the arrival time of 100000 bus arrivals with 10 minutes as the average waiting time between buses. The timeline below shows the first 10 simulated bus arrivals assuming the arrivals are random.import numpy as np import matplotlib.pyplot as plt simulated_bus_count = 100000 time_between_buses = 10 random = np.random.RandomState(1) bus_arrival_times = simulated_bus_count * time_between_buses * np.sort(random.random(simulated_bus_count)) fig, ax = plt.subplots() plt.plot(bus_arrival_times[:10], [0]*10, '-o', color = 'blue') ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['left'].set_visible(False) tmp = 1 for i in range(10): ax.annotate(round(bus_arrival_times[i], 1), (bus_arrival_times[i], 0.01*tmp)) tmp *= -1 plt.yticks([]) plt.title(f'Time of Bus Arrivals ({time_between_buses} minute average)') plt.show()Great! Let's just make sure that, on average, the time between arrivals is 10 minutes.np.diff(bus_arrival_times).mean()Next, we simulate the arrival of passengers randomly along this range.random = np.random.RandomState(123) num_passengers = 100000 # Get passenger arrival time passenger_arrival_times = simulated_bus_count * time_between_buses * np.sort(random.random(num_passengers)) fig, ax = plt.subplots() plt.plot(bus_arrival_times[:20], [0]*20, '-o', color = 'blue') plt.plot(passenger_arrival_times[:10], [0]*10, '-o', color = 'red') ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['left'].set_visible(False) tmp = 1 for i in range(0, 20, 3): ax.annotate(round(bus_arrival_times[i], 1), (bus_arrival_times[i], 0.01*tmp)) ax.annotate(round(passenger_arrival_times[i], 1), (bus_arrival_times[i], 0.01*-tmp)) plt.yticks([]) plt.title(f'Time of Bus and Passenger Arrivals') plt.show()We overlay our randomly generated passengers on our bus timeline. Using the np.searchsorted() function, we input the array of bus and passenger arrivals, and the function returns an `index` of bus_arrival_times for which inserting each passenger_arrival_times[i] preserves the ordering of the array. Since side = 'right', it returns the largest possible value of the index, or the nearest possible bus arrival for each passenger. Using this, we compute the average waiting time for each passenger.i = np.searchsorted(bus_arrival_times, passenger_arrival_times, side='right') average_waiting_time = (bus_arrival_times[i] - passenger_arrival_times).mean() print(average_waiting_time)10.061306085620576This is a companion notebook for the book [Deep Learning with Python, Second Edition](https://www.manning.com/books/deep-learning-with-python-second-edition?a_aid=keras&a_bid=76564dff). For readability, it only contains runnable code blocks and section titles, and omits everything else in the book: text paragraphs, figures, and pseudocode.**If you want to be able to follow what's going on, I recommend reading the notebook side by side with your copy of the book.**This notebook was generated for TensorFlow 2.6. Conclusions Key concepts in review Various approaches to AI What makes deep learning special within the field of machine learning How to think about deep learning Key enabling technologies The universal machine-learning workflow Key network architectures Densely-connected networksfrom tensorflow import keras from tensorflow.keras import layers inputs = keras.Input(shape=(num_input_features,)) x = layers.Dense(32, activation="relu")(inputs) x = layers.Dense(32, activation="relu")(x) outputs = layers.Dense(1, activation="sigmoid")(x) model = keras.Model(inputs, outputs) model.compile(optimizer="rmsprop", loss="binary_crossentropy") inputs = keras.Input(shape=(num_input_features,)) x = layers.Dense(32, activation="relu")(inputs) x = layers.Dense(32, activation="relu")(x) outputs = layers.Dense(num_classes, activation="softmax")(x) model = keras.Model(inputs, outputs) model.compile(optimizer="rmsprop", loss="categorical_crossentropy") inputs = keras.Input(shape=(num_input_features,)) x = layers.Dense(32, activation="relu")(inputs) x = layers.Dense(32, activation="relu")(x) outputs = layers.Dense(num_classes, activation="sigmoid")(x) model = keras.Model(inputs, outputs) model.compile(optimizer="rmsprop", loss="binary_crossentropy") inputs = keras.Input(shape=(num_input_features,)) x = layers.Dense(32, activation="relu")(inputs) x = layers.Dense(32, activation="relu")(x) outputs layers.Dense(num_values)(x) model = keras.Model(inputs, outputs) model.compile(optimizer="rmsprop", loss="mse")Convnetsinputs = keras.Input(shape=(height, width, channels)) x = layers.SeparableConv2D(32, 3, activation="relu")(inputs) x = layers.SeparableConv2D(64, 3, activation="relu")(x) x = layers.MaxPooling2D(2)(x) x = layers.SeparableConv2D(64, 3, activation="relu")(x) x = layers.SeparableConv2D(128, 3, activation="relu")(x) x = layers.MaxPooling2D(2)(x) x = layers.SeparableConv2D(64, 3, activation="relu")(x) x = layers.SeparableConv2D(128, 3, activation="relu")(x) x = layers.GlobalAveragePooling2D()(x) x = layers.Dense(32, activation="relu")(x) outputs = layers.Dense(num_classes, activation="softmax")(x) model = keras.Model(inputs, outputs) model.compile(optimizer="rmsprop", loss="categorical_crossentropy")RNNsinputs = keras.Input(shape=(num_timesteps, num_features)) x = layers.LSTM(32)(inputs) outputs = layers.Dense(num_classes, activation="sigmoid")(x) model = keras.Model(inputs, outputs) model.compile(optimizer="rmsprop", loss="binary_crossentropy") inputs = keras.Input(shape=(num_timesteps, num_features)) x = layers.LSTM(32, return_sequences=True)(inputs) x = layers.LSTM(32, return_sequences=True)(x) x = layers.LSTM(32)(x) outputs = layers.Dense(num_classes, activation="sigmoid")(x) model = keras.Model(inputs, outputs) model.compile(optimizer="rmsprop", loss="binary_crossentropy")Transformersencoder_inputs = keras.Input(shape=(sequence_length,), dtype="int64") x = PositionalEmbedding(sequence_length, vocab_size, embed_dim)(encoder_inputs) encoder_outputs = TransformerEncoder(embed_dim, dense_dim, num_heads)(x) decoder_inputs = keras.Input(shape=(None,), dtype="int64") x = PositionalEmbedding(sequence_length, vocab_size, embed_dim)(decoder_inputs) x = TransformerDecoder(embed_dim, dense_dim, num_heads)(x, encoder_outputs) decoder_outputs = layers.Dense(vocab_size, activation="softmax")(x) transformer = keras.Model([encoder_inputs, decoder_inputs], decoder_outputs) transformer.compile(optimizer="rmsprop", loss="categorical_crossentropy") inputs = keras.Input(shape=(sequence_length,), dtype="int64") x = PositionalEmbedding(sequence_length, vocab_size, embed_dim)(inputs) x = TransformerEncoder(embed_dim, dense_dim, num_heads)(x) x = layers.GlobalMaxPooling1D()(x) outputs = layers.Dense(1, activation="sigmoid")(x) model = keras.Model(inputs, outputs) model.compile(optimizer="rmsprop", loss="binary_crossentropy")Titanic and Lusitania Data Miningimport pandas as pd import numpy as np import re import math from scipy.stats import chi2_contingency from collections import OrderedDict from scipy.stats import norm import seaborn as sns import matplotlib.pyplot as plt from matplotlib.ticker import FuncFormatter %matplotlib inlineLoad the data into a Pandas DataFrame:titanic = pd.read_csv('../data/titanic.csv') lusit = pd.read_csv('../data/lusitania.csv')Observations:print "Titanic dataset contains {} instances and {} features".format(titanic.shape[0], titanic.shape[1]) print "Titanic dataset contains {} instances and {} features".format(lusit.shape[0], lusit.shape[1])Titanic dataset contains 1961 instances and 23 featuresMissing Values:titanic.isnull().sum() lusit.isnull().sum() titanic.columns = [col.replace(".", "_") for col in titanic.columns] lusit.columns = [col.lower().replace(" ", "_").replace("/", "_") for col in lusit.columns]Let's check the age column for any discrepancies then plot their distribution:titanic['age'].unique() fig, ax = plt.subplots(figsize=(8,5)) ax = sns.distplot(titanic['age'].dropna(), fit=norm, kde=False) ax.set_xlabel('Age (years)') ax.set_ylabel('Probability density') ax.set_title('Titanic age distribution and maximum likelihood gaussian distribution fit') plt.xlim(0,80) fig.tight_layout() plt.show() lusit['age'].unique()The lusitania age column is a mess and needs cleaning and processing:def clean_lusitAge(row): regex = r"(\d+)(\?)" if type(row) == str and "-" in row: row = round(float(row.split("-")[0])/12, 2) if type(row) == str: row = row.split(" ")[0] row = row.replace("Infant", "0.08").replace("_", "") row = re.sub(regex, r"\1", row) row = np.where(row == '?', 0, row) return row lusit['age'] = lusit['age'].apply(clean_lusitAge) lusit['age'] = lusit['age'].astype(float) fig, ax = plt.subplots(figsize=(8,5)) ax = sns.distplot(lusit['age'].dropna(), fit=norm, kde=False) ax.set_xlabel('Age (years)') ax.set_ylabel('Probability density') ax.set_title('Lusitania age distribution and maximum likelihood gaussian distribution fit') plt.xlim(0,80) fig.tight_layout() plt.show()Data Imputation & Feature Engineering: Creating new variablesprint "{} missing age values in Titanic dataset".format(titanic['age'].isnull().sum())263 missing age values in Titanic datasetIn the name column from Titanic, we have titles. I will extract the titles and map missing age values. Titles also may reflect social status and may help predict survival probability.def extract_titanicTitle(row): pattern = re.compile("(, .+?\.)") row = pattern.findall(row) row = row[0].replace(", ", "") return row titanic["title"] = titanic["name"].apply(extract_titanicTitle) titanicAge_dict = titanic.groupby(['title'])['age'].mean().to_dict() def fill_titanicAge(row): global titanicAge_dict age = row['age'] title = row['title'] if math.isnan(age): if title in titanicAge_dict.keys(): return titanicAge_dict[title] else: return age titanic['age'] = titanic.apply(fill_titanicAge, axis=1) print "{} missing age values in Lusitania dataset".format(lusit['age'].isnull().sum())653 missing age values in Lusitania datasetI will use the title column here as well to impute the missing age values.def clean_lusitTitle(row): row = row.replace("Mr. (later Sir)", "Sir") row = row.replace("Master (?)", "Master") row = "".join(row.split(" ")) return row lusit["title"] = lusit["title"].apply(clean_lusitTitle) lusitAge_dict = lusit.groupby(['title'])['age'].mean().to_dict() def fill_lusitAge(row): global lusitAge_dict age = row['age'] title = row['title'] if math.isnan(age) or age == 0.0: if title in lusitAge_dict.keys(): return lusitAge_dict[title] else: return age lusit["age"] = lusit.apply(fill_lusitAge, axis=1) titanic['life'] = titanic['survived'].apply(lambda x: 'survived' if x == 1 else 'deceased') def to_percent(y, position): # Ignore the passed in position. This has the effect of scaling the default # tick locations s = str(round(y/1309.0*100)) # The percent symbol needs escaping in latex if plt.rcParams['text.usetex'] is True: return s + r'$\%$' else: return s + '%' fig, ax = plt.subplots(figsize=(8,5)) # the histogram of the data titanic.groupby(['life'])['age'].plot.hist(alpha=0.5, ax=ax, legend = True, bins=23) formatter = FuncFormatter(to_percent) plt.gca().yaxis.set_major_formatter(formatter) ax.set_xlabel('Age Group') ax.set_ylabel('Percent') ax.set_title('Percent Survived vs Deceased by Age Group in Titanic Disasters') # Tweak spacing to prevent clipping of ylabel fig.tight_layout() plt.grid(True) plt.show() def to_percent(y, position): # Ignore the passed in position. This has the effect of scaling the default # tick locations s = str(round(y/1961.0*100)) # The percent symbol needs escaping in latex if plt.rcParams['text.usetex'] is True: return s + r'$\%$' else: return s + '%' fig, ax = plt.subplots(figsize=(8,5)) # the histogram of the data lusit.groupby(['fate'])['age'].plot.hist(alpha=0.5, ax=ax, legend = True, bins=23) formatter = FuncFormatter(to_percent) plt.gca().yaxis.set_major_formatter(formatter) ax.set_xlabel('Age Group') ax.set_ylabel('Percent') ax.set_title('Percent Survived vs Deceased by Age Group in Lusitania Disasters') # Tweak spacing to prevent clipping of ylabel fig.tight_layout() plt.grid(True) plt.show()Distribution of ages in groups of Titanic passengersg = sns.FacetGrid(titanic, row='survived', col='pclass') g.map(sns.distplot, "age") plt.grid(True) plt.show()Make age groups 0-12 child, 12-17 teenager, 17-60 adult, 60+ elderbins = [0, 12, 17, 60, np.inf] labels = ['child', 'teenager', 'adult', 'elder'] titanic['age_group'] = pd.cut(titanic.age, bins, labels=labels)Annotated heatmap of survivors by groups of passengers:df = titanic.pivot_table(index='embarked', columns='age_group', values='survived', aggfunc=np.mean) ax = sns.heatmap(df, annot=True, fmt=".1f")Annotated heatmap of Pearson correlation coefficients between features in Titanicax = sns.heatmap(titanic.corr(), annot=True, fmt=".2f")Distribution of survivors by age and sexax = sns.swarmplot(x="sex", y="age", hue="life", data=titanic)Group Statistics & Chi Square Testtitanic['deceased'] = titanic['survived'].apply(lambda x: 1 if x == 0 else 0) titanic['all'] = titanic['survived'] + titanic['deceased'] titanic['survival_rate'] = titanic['survived']/titanic['all']Survival Rate by SexFemales had a much higher survival rate than males. The p-value is 4.5899249369529454e-81, indicating a statistically significant difference at any reasonable significance level.table = pd.crosstab(titanic['sex'], titanic['life']) table chi, p, df, expected = chi2_contingency(table) print "p-value:", pp-value: 4.58992493695e-81Survival Rate by Passenger ClassThere appears to be a strong relationship between class and survival rate: first-class had the highest survival rate, then second, then third.titanic.groupby(['pclass']).agg(OrderedDict([('deceased', 'sum'), ('survived', 'sum'), ('all', 'sum'), ('survival_rate', 'mean')])) table = pd.crosstab(titanic['pclass'], titanic['life']) table chi, p, df, expected = chi2_contingency(table) print "p-value:", pp-value: 1.72082595883e-28Survival Rate by Age GroupThere appears to be a strong relationship between age and survival rate: The p-value is 4.24066535673e-06, indicating a statistically significant difference at any reasonable significance level.bins = np.linspace(titanic.age.min(), titanic.age.max(), 10) table = pd.crosstab(np.digitize(titanic.age, bins), titanic['life']) table chi, p, df, expected = chi2_contingency(table) print "p-value:", pp-value: 4.24066535673e-06Survival Rate by Class and Sextitanic.groupby(['pclass','sex']).agg(OrderedDict([('deceased', 'sum'), ('survived', 'sum'), ('all', 'sum'), ('survival_rate', 'mean')]))Comparative Statistics: Lusitanialusit['lost'] = lusit['fate'].apply(lambda x: 1 if x == 'Lost' else 0) lusit['not_on_board'] = lusit['fate'].apply(lambda x: 1 if x == 'Not on board' else np.nan) lusit['saved'] = lusit['fate'].apply(lambda x: 1 if x == 'Saved' else 0) lusit['died_from_trauma'] = lusit['fate'].apply(lambda x: 1 if x == 'Saved (died from trauma)' else np.nan) lusit['all'] = lusit['value'] lusit['survival_rate'] = (lusit['all'] - lusit['lost']) / lusit['all']Survival Rate by SexThe p-value is greater than 0.05, indicating a statistically insignificant difference between survival rate of a female and male.table = pd.crosstab(lusit['sex'], lusit['saved']) table chi, p, df, expected = chi2_contingency(table) print "p-value:", pp-value: 0.291182076147Survival Rate by Passenger vs CrewThe p-value is greater than 0.05, indicating a statistically insignificant difference between survival rate of a passenger vs crew. Let's drop the stowaways since we have no information on them!lusit = lusit[lusit['passenger_crew'] != 'Stowaway'] table = pd.crosstab(lusit['passenger_crew'], lusit['saved']) table chi, p, df, expected = chi2_contingency(table) print "p-value:", pp-value: 0.059455252032Survival Rate by ClassPassenger accommodation:* saloon (first class)* second cabin (second class)* third classCrew accommodation:* deck* victualling* engineering The p-value is less than 0.05, indicating a statistically significant difference between survival rate of Class.lusit['department_class'] = lusit['department_class'].str.replace(r'Third \(Distressed British Seaman\)', 'Third') lusit.groupby(['department_class']).agg(OrderedDict([('lost', 'sum'), ('saved', 'sum'), ('all', 'count'), ('survival_rate', 'mean')])) table = pd.crosstab(lusit['department_class'], lusit['saved']) table chi, p, df, expected = chi2_contingency(table) print "p-value:", pp-value: 0.0189448858527Survival Rate by Age GroupThere appears to be a strong relationship between age and survival rate: The p-value is 3.83316420923e-06, indicating a statistically significant difference at any reasonable significance level.bins = np.linspace(lusit.age.min(), lusit.age.max(), 10) table = pd.crosstab(np.digitize(lusit.age, bins), lusit['saved']) table chi, p, df, expected = chi2_contingency(table) print "p-value:", pp-value: 3.83316420923e-06Load dataX_train = np.load('./dataset-n15-X-train.npy') X_valid = np.load('./dataset-n15-X-validate.npy') y_train = np.load('./dataset-n15-y-train.npy') y_valid = np.load('./dataset-n15-y-validate.npy')Train dataset exploringvisualization.dataset_classes_summary(y_train, 'Train data')Total number of data: 34932275 Number of A: 6694158 Number of C: 4158060 Number of G: 4150797 Number of T: 6671367 Number of I: 13062694 Number of D: 195199Valid dataset exploringvisualization.dataset_classes_summary(y_valid, 'Valid data')Total number of data: 3881364 Number of A: 743924 Number of C: 461213 Number of G: 461596 Number of T: 741132 Number of I: 1452024 Number of D: 21475This notebook was prepared by [](http://donnemartin.com). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). Solution Notebook Problem: Implement insertion sort.* [Constraints](Constraints)* [Test Cases](Test-Cases)* [Algorithm](Algorithm)* [Code](Code)* [Unit Test](Unit-Test) Constraints* Is a naive solution sufficient? * Yes* Are duplicates allowed? * Yes* Can we assume the input is valid? * No* Can we assume this fits memory? * Yes Test Cases* None -> Exception* Empty input -> []* One element -> [element]* Two or more elements AlgorithmWikipedia's animation:![alt text](http://upload.wikimedia.org/wikipedia/commons/0/0f/Insertion-sort-example-300px.gif)* For each value index 1 to n - 1 * Compare with all elements to the left of the current value to determine new insertion point * Hold current value in temp variable * Shift elements from new insertion point right * Insert value in temp variable * BreakComplexity:* Time: O(n^2) average, worst. O(1) best if input is already sorted* Space: O(1) for the iterative solutionMisc: * In-place* StableInsertion sort works well for very small datasets where most of the input is already sorted. Codeclass InsertionSort(object): def sort(self, data): if data is None: raise TypeError('data cannot be None') if len(data) < 2: return data for r in range(1, len(data)): for l in range(r): if data[r] < data[l]: temp = data[r] data[l+1:r+1] = data[l:r] data[l] = temp return dataUnit Test%%writefile test_insertion_sort.py import unittest class TestInsertionSort(unittest.TestCase): def test_insertion_sort(self): insertion_sort = InsertionSort() print('None input') self.assertRaises(TypeError, insertion_sort.sort, None) print('Empty input') self.assertEqual(insertion_sort.sort([]), []) print('One element') self.assertEqual(insertion_sort.sort([5]), [5]) print('Two or more elements') data = [5, 1, 7, 2, 6, -3, 5, 7, -1] self.assertEqual(insertion_sort.sort(data), sorted(data)) print('Success: test_insertion_sort') def main(): test = TestInsertionSort() test.test_insertion_sort() if __name__ == '__main__': main() %run -i test_insertion_sort.pyNone input Empty input One element Two or more elements Success: test_insertion_sortNoisy SAR 2 groups - paper tabledataset_name="yago3_10" is_pca_version: bool = False true_prop_score_in_filter = 0.5 true_prop_score_other_list = [0.3, .7] # true_prop_scores = PropScoresTwoSARGroups( # in_filter=true_prop_score_in_filter, # other=true_prop_score_other # ) noisy_prop_score_in_filter: float = true_prop_score_in_filter noisy_prop_score_not_in_filter_list: List[float] = [0.1, 0.2, .3, .4, .5, .6, .7, .8, .9, 1] root_experiment_dir: str = os.path.join( get_root_dir_experiment_noisy_propensity_scores(), 'sar_two_subject_groups', dataset_name ) path_root_experiment_dir = Path(root_experiment_dir) true_prop_score_other_to_df_map: Dict[float, pd.DataFrame] = dict() df_list_complete: List[pd.DataFrame] = [] for true_prop_score_other in true_prop_score_other_list: true_prop_scores = PropScoresTwoSARGroups( in_filter=true_prop_score_in_filter, other=true_prop_score_other ) # df_list: List[pd.DataFrame] = [] for target_rel_path in path_root_experiment_dir.iterdir(): if target_rel_path.is_dir(): for filter_dir in target_rel_path.iterdir(): if filter_dir.is_dir(): target_relation = target_rel_path.name filter_relation = filter_dir.name print(f"{target_relation} - {filter_relation}") try: experiment_info = NoisyPropScoresSARExperimentInfo( dataset_name=dataset_name, target_relation=target_relation, filter_relation=filter_relation, true_prop_scores=true_prop_scores, noisy_prop_score_in_filter=noisy_prop_score_in_filter, noisy_prop_score_not_in_filter_list=noisy_prop_score_not_in_filter_list, is_pca_version=is_pca_version ) df_rule_wrappers_tmp = load_df_noisy_prop_scores_two_groups( experiment_info=experiment_info ) df_list_complete.append(df_rule_wrappers_tmp) except Exception as err: print(err) df_rule_wrappers_all_targets: pd.DataFrame = pd.concat(df_list_complete, axis=0) # true_prop_score_other_to_df_map[true_prop_score_other] = df_for_true_prop_score_other df_rule_wrappers_all_targets.head() df_rule_wrappers_all_targets.columns column_names_logistics: List[str] = [ 'target_relation', 'filter_relation', 'true_prop_scores_in_filter', 'true_prop_scores_not_in_filter', 'noisy_prop_scores_in_filter', 'noisy_prop_scores_not_in_filter', 'random_trial_index', "Rule" ] other_columns = [col for col in df_rule_wrappers_all_targets.columns if col not in column_names_logistics] resorted_columns = column_names_logistics + other_columns df_rule_wrappers_all_targets = df_rule_wrappers_all_targets[resorted_columns] df_rule_wrappers_all_targets.head() df_rule_wrappers_all_targets.rename( columns={ 'true_prop_scores_in_filter': "true_filter", 'true_prop_scores_not_in_filter': "true_other", 'noisy_prop_scores_in_filter': "noisy_filter", 'noisy_prop_scores_not_in_filter': "noisy_other", }, inplace=True, errors="ignore" ) column_names_logistics: List[str] = [ 'target_relation', 'filter_relation', 'true_filter', 'true_other', 'noisy_filter', 'noisy_other', 'random_trial_index', "Rule" ] df_rule_wrappers_all_targets.head()2. Only keep a subset of rules 2.1. Only keep the non-recursive rules; drop recursive rulesfrom kbc_pul.data_structures.rule_wrapper import get_pylo_rule_from_string, is_pylo_rule_recursive def is_rule_recursive(rule_string: str) -> bool: pylo_rule: PyloClause = get_pylo_rule_from_string(rule_string) is_rule_recursive = is_pylo_rule_recursive(pylo_rule) return is_rule_recursive mask_recursive_rules = df_rule_wrappers_all_targets.apply( lambda row: is_rule_recursive(row["Rule"]), axis=1 ) print(len(df_rule_wrappers_all_targets)) df_rule_wrappers_all_targets: pd.DataFrame = df_rule_wrappers_all_targets[~mask_recursive_rules] print(len(df_rule_wrappers_all_targets))47530 153002.3 Drop the Pair-positive columns (both directions)df_rule_wrappers_all_targets.drop( [ConfidenceEnum.TRUE_CONF_BIAS_YS_ZERO_S_TO_O.value, ConfidenceEnum.TRUE_CONF_BIAS_YS_ZERO_O_TO_S.value], axis=1, inplace=True, errors='ignore' ) df_rule_wrappers_all_targets.head()2.4 Drop the IPW-PCA columns (both directions)df_rule_wrappers_all_targets.drop( [ConfidenceEnum.IPW_PCA_CONF_S_TO_O.value, ConfidenceEnum.IPW_PCA_CONF_O_TO_S.value], axis=1, inplace=True, errors='ignore' ) df_rule_wrappers_all_targets.head()2.4 Drop the $c_{q}=0.5$ columndf_rule_wrappers_all_targets.drop( ["true_filter", "noisy_filter"], axis=1, inplace=True, errors='ignore' ) column_names_logistics = [ col for col in column_names_logistics if col != "true_filter" and col != "noisy_filter" ] df_rule_wrappers_all_targets.head() group_by_list = [ "target_relation", "filter_relation", 'true_other', 'noisy_other', "Rule", "random_trial_index" ] df_count_trials: pd.DataFrame = df_rule_wrappers_all_targets[ [ "target_relation", "filter_relation", 'true_other', 'noisy_other', "Rule", "random_trial_index" ] ].groupby( [ "target_relation", "filter_relation", 'true_other', 'noisy_other', "Rule", ] ).count().reset_index() df_less_than_ten_trials: pd.DataFrame = df_count_trials[df_count_trials["random_trial_index"].values != 10] df_less_than_ten_trials df_rule_wrappers_all_targets = df_rule_wrappers_all_targets[ ~( (df_rule_wrappers_all_targets["target_relation"] == "isaffiliatedto") & (df_rule_wrappers_all_targets["filter_relation"] == "wasbornin") & (df_rule_wrappers_all_targets["Rule"]=="isaffiliatedto(A,B) :- playsfor(A,B)") ) ] df_rule_wrappers_all_targets.head()**Now, we have the full dataframe****** Calculate $[conf(R) - \widehat{conf}(R)]$true_conf: ConfidenceEnum = ConfidenceEnum.TRUE_CONF conf_estimators_list: List[ConfidenceEnum] = [ ConfidenceEnum.CWA_CONF, ConfidenceEnum.ICW_CONF, ConfidenceEnum.PCA_CONF_S_TO_O, ConfidenceEnum.PCA_CONF_O_TO_S, ConfidenceEnum.IPW_CONF, ] all_confs_list: List[ConfidenceEnum] = [ConfidenceEnum.TRUE_CONF ] + conf_estimators_list column_names_all_confs: List[str] = [ conf.get_name() for conf in all_confs_list ] df_rule_wrappers_all_targets = df_rule_wrappers_all_targets[ column_names_logistics + column_names_all_confs ] df_rule_wrappers_all_targets.head() df_conf_estimators_true_other = df_rule_wrappers_all_targets[ df_rule_wrappers_all_targets["true_other"] == df_rule_wrappers_all_targets["noisy_other"] ] df_conf_estimators_true_other.head() column_names_info =ColumnNamesInfo( true_conf=true_conf, column_name_true_conf=true_conf.get_name(), conf_estimators=conf_estimators_list, column_names_conf_estimators=[ col.get_name() for col in conf_estimators_list ], column_names_logistics=column_names_logistics ) def get_df_rulewise_squared_diffs_between_true_conf_and_conf_estimator( df_rule_wrappers: pd.DataFrame, column_names_info: ColumnNamesInfo ) -> pd.DataFrame: df_rulewise_diffs_between_true_conf_and_conf_estimator: pd.DataFrame = df_rule_wrappers[ column_names_info.column_names_logistics ] col_name_estimator: str for col_name_estimator in column_names_info.column_names_conf_estimators: df_rulewise_diffs_between_true_conf_and_conf_estimator \ = df_rulewise_diffs_between_true_conf_and_conf_estimator.assign( **{ col_name_estimator: ( (df_rule_wrappers[column_names_info.column_name_true_conf] - df_rule_wrappers[col_name_estimator]) ** 2 ) } ) return df_rulewise_diffs_between_true_conf_and_conf_estimator df_conf_squared_errors: pd.DataFrame = get_df_rulewise_squared_diffs_between_true_conf_and_conf_estimator( df_rule_wrappers=df_rule_wrappers_all_targets, column_names_info = column_names_info ) df_conf_squared_errors.head()AVERAGE the PCA(S) and PCA(O)df_conf_squared_errors["PCA"] = ( ( df_conf_squared_errors[ConfidenceEnum.PCA_CONF_S_TO_O.value] + df_conf_squared_errors[ConfidenceEnum.PCA_CONF_O_TO_S.value] ) / 2 ) df_conf_squared_errors.head() df_conf_squared_errors = df_conf_squared_errors.drop( columns=[ ConfidenceEnum.PCA_CONF_S_TO_O.value + ConfidenceEnum.PCA_CONF_O_TO_S.value ], axis=1, errors='ignore' ) df_conf_squared_errors.head()Now start averagingdf_conf_squared_errors_avg_over_trials: pd.DataFrame = df_conf_squared_errors.groupby( by=["target_relation", "filter_relation", 'true_other', "noisy_other", "Rule"], sort=True, as_index=False ).mean() df_conf_squared_errors_avg_over_trials.head() df_conf_squared_errors_avg_over_trials_and_rules: pd.DataFrame = df_conf_squared_errors_avg_over_trials.groupby( by=["target_relation", "filter_relation", 'true_other', "noisy_other",], sort=True, as_index=False ).mean() df_conf_squared_errors_avg_over_trials_and_rules.head() len(df_conf_squared_errors_avg_over_trials_and_rules)How many $p$, $q$ combinations are there?df_p_and_q = df_conf_squared_errors_avg_over_trials_and_rules[["target_relation", "filter_relation"]].drop_duplicates() df_p_and_q.head() len(df_p_and_q) df_conf_errors_avg_over_trials_and_rules_and_q: pd.DataFrame = df_conf_squared_errors_avg_over_trials_and_rules.groupby( by=["target_relation", 'true_other', "noisy_other",], sort=True, as_index=False ).mean() df_conf_errors_avg_over_trials_and_rules_and_q.head() len(df_conf_errors_avg_over_trials_and_rules_and_q)Subset of noisy_otherfirst_true_label_freq_to_include = 0.3 second_true_label_freq_to_include = 0.7 true_label_frequencies_set: Set[float] = { first_true_label_freq_to_include, second_true_label_freq_to_include, } true_label_frequency_to_estimate_map: Dict[float, Set[float]] = dict() label_frequency_est_diff: float = 0.1 label_frequencies_to_keep: Set[float] = set(true_label_frequencies_set) for true_label_freq in true_label_frequencies_set: true_label_frequency_to_estimate_map[true_label_freq] = { round(true_label_freq - label_frequency_est_diff, 1), round(true_label_freq + label_frequency_est_diff, 1) } label_frequencies_to_keep.update(true_label_frequency_to_estimate_map[true_label_freq]) df_conf_errors_avg_over_trials_and_rules_and_q_c_subset = df_conf_errors_avg_over_trials_and_rules_and_q[ df_conf_errors_avg_over_trials_and_rules_and_q["noisy_other"].isin(label_frequencies_to_keep) ] df_conf_errors_avg_over_trials_and_rules_and_q_c_subset.head() len(df_conf_errors_avg_over_trials_and_rules_and_q_c_subset)Count the rules per $p$df_n_rules_per_target = df_rule_wrappers_all_targets[["target_relation", "Rule"]].groupby( by=['target_relation'], # sort=True, # as_index=False )["Rule"].nunique().to_frame().reset_index().rename( columns={"Rule" : "# rules"} ) df_n_rules_per_target.head()**** Format pretty tableGoal:* put smallest value per row in BOLT* per target: mean_value 0.3 / 0.4true_label_freq_to_noisy_to_df_map: Dict[float, Dict[float, pd.DataFrame]] = dict() for true_label_freq in true_label_frequencies_set: df_true_tmp: pd.DataFrame = df_conf_errors_avg_over_trials_and_rules_and_q_c_subset[ df_conf_errors_avg_over_trials_and_rules_and_q_c_subset["true_other"] == true_label_freq ] noisy_label_freq_to_df_map = dict() true_label_freq_to_noisy_to_df_map[true_label_freq] = noisy_label_freq_to_df_map df_true_and_noisy_tmp = df_true_tmp[ df_true_tmp["noisy_other"] == true_label_freq ] noisy_label_freq_to_df_map[true_label_freq] = df_true_and_noisy_tmp[ [col for col in df_true_and_noisy_tmp.columns if col != "noisy_other" and col != "true_other"] ] for noisy_label_freq in true_label_frequency_to_estimate_map[true_label_freq]: df_true_and_noisy_tmp = df_true_tmp[ df_true_tmp["noisy_other"] == noisy_label_freq ] noisy_label_freq_to_df_map[noisy_label_freq] = df_true_and_noisy_tmp[ [col for col in df_true_and_noisy_tmp.columns if col != "noisy_other" and col != "true_other"] ] true_label_freq_to_noisy_to_df_map[first_true_label_freq_to_include][0.2].head() from typing import Iterator true_label_freq_to_df_map = dict() label_freq_estimators: Iterator[float] for true_label_freq in true_label_frequencies_set: noisy_to_df_map: Dict[float, pd.DataFrame] = true_label_freq_to_noisy_to_df_map[true_label_freq] df_true_label_freq: pd.DataFrame = noisy_to_df_map[true_label_freq] lower_est: float = round(true_label_freq - label_frequency_est_diff, 1) higher_est: float = round(true_label_freq + label_frequency_est_diff, 1) df_lower: pd.DataFrame = noisy_to_df_map[lower_est][ ['target_relation', ConfidenceEnum.IPW_CONF.value] ].rename( columns={ ConfidenceEnum.IPW_CONF.value: f"{ConfidenceEnum.IPW_CONF.value}_lower" } ) df_true_label_freq = pd.merge( left=df_true_label_freq, right=df_lower, on="target_relation" ) df_higher = noisy_to_df_map[higher_est][ ['target_relation', ConfidenceEnum.IPW_CONF.value] ].rename( columns={ ConfidenceEnum.IPW_CONF.value: f"{ConfidenceEnum.IPW_CONF.value}_higher" } ) df_true_label_freq = pd.merge( left=df_true_label_freq, right=df_higher, on="target_relation" ) true_label_freq_to_df_map[true_label_freq] = df_true_label_freq true_label_freq_to_df_map[0.3].head() for key, df in true_label_freq_to_df_map.items(): true_label_freq_to_df_map[key] = df.drop( columns=["random_trial_index"], axis=1, errors='ignore' ) df_one_row_per_target = pd.merge( left=true_label_freq_to_df_map[first_true_label_freq_to_include], right=true_label_freq_to_df_map[second_true_label_freq_to_include], on="target_relation", suffixes=(f"_{first_true_label_freq_to_include}", f"_{second_true_label_freq_to_include}") ) df_one_row_per_target.head()What is the smallest value?all_values: np.ndarray = df_one_row_per_target[ [ col for col in df_one_row_per_target.columns if col != "target_relation" ] ].values min_val = np.amin(all_values) min_val min_val * 10000 max_val = np.amax(all_values) max_val max_val * 10000 df_one_row_per_target.head() * 10000 df_one_row_per_target.dtypes exponent = 4 multiplication_factor = 10 ** exponent multiplication_factor df_one_row_per_target[ df_one_row_per_target.select_dtypes(include=['number']).columns ] *= multiplication_factor df_one_row_per_target df_one_row_per_target.head()Output files definitionsdir_latex_table: str = os.path.join( kbc_e_metrics_project_dir, "paper_latex_tables", 'known_prop_scores', 'sar_two_groups' ) if not os.path.exists(dir_latex_table): os.makedirs(dir_latex_table) filename_tsv_rule_stats = os.path.join( dir_latex_table, "conf_error_stats_v3.tsv" ) filename_tsv_single_row_summary = os.path.join( dir_latex_table, "noisy_sar_two_groups_single_row_summary.tsv" )Create single-row summarydf_one_row_in_total: pd.Series = df_one_row_per_target.mean( ) df_one_row_in_total df_n_rules_per_target.head() df_one_row_in_total["# rules"] = int(df_n_rules_per_target["# rules"].sum()) df_one_row_in_total type(df_one_row_in_total) df_one_row_in_total.to_csv( filename_tsv_single_row_summary, sep = "\t", header=None )Now create a pretty tablecolumn_names_info.column_names_conf_estimators simplified_column_names_conf_estimators = ['CWA', 'PCA', 'ICW', 'IPW',] multi_index_columns = [ ("$p$", ""), ("\# rules", "") ] from itertools import product # conf_upper_cols = column_names_info.column_names_conf_estimators + [ # f"{ConfidenceEnum.IPW_CONF.value} " + "($\Delta c=-" + f"{label_frequency_est_diff}" + "$)", # f"{ConfidenceEnum.IPW_CONF.value} " + "($\Delta c=" + f"{label_frequency_est_diff}" + "$)", # ] conf_upper_cols = simplified_column_names_conf_estimators + [ f"{ConfidenceEnum.IPW_CONF.value} " + "($-\Delta$)", f"{ConfidenceEnum.IPW_CONF.value} " + "($+\Delta$)", ] c_subcols = ["$c_{\\neg q}=0.3$", "$c_{\\neg q}=0.7$"] multi_index_columns = multi_index_columns + list(product(c_subcols, conf_upper_cols)) # multi_index_list multi_index_columns = pd.MultiIndex.from_tuples(multi_index_columns) multi_index_columns rule_counter: int = 1 rule_str_to_rule_id_map: Dict[str, int] = {} float_precision: int = 1 col_name_conf_estimator: str pretty_rows: List[List] = [] row_index: int row: pd.Series # columns_to_use = [ # "$p$", # "\# rules" # ] + column_names_info.column_names_conf_estimators + [ # f"{ConfidenceEnum.IPW_CONF.value} " + "($\Delta c=-" + f"{label_frequency_est_diff}" + "$)", # f"{ConfidenceEnum.IPW_CONF.value} " + "($\Delta c=" + f"{label_frequency_est_diff}" + "$)", # ] LabelFreq = float def get_dict_with_smallest_estimator_per_label_freq(row: pd.Series) -> Dict[LabelFreq, Set[str]]: # Find estimator with smallest mean value for label frequency################### label_freq_to_set_of_smallest_est_map: Dict[LabelFreq, Set[str]] = dict() for label_freq in [first_true_label_freq_to_include, second_true_label_freq_to_include]: o_set_of_col_names_with_min_value: Optional[Set[str]] = None o_current_smallest_value: Optional[float] = None # Find smallest squared error for col_name_conf_estimator in simplified_column_names_conf_estimators: current_val: float = row[f"{col_name_conf_estimator}_{label_freq}"] # print(current_val) if o_set_of_col_names_with_min_value is None or o_current_smallest_value > current_val: o_set_of_col_names_with_min_value = {col_name_conf_estimator} o_current_smallest_value = current_val elif current_val == o_current_smallest_value: o_set_of_col_names_with_min_value.update(col_name_conf_estimator) label_freq_to_set_of_smallest_est_map[label_freq] = o_set_of_col_names_with_min_value return label_freq_to_set_of_smallest_est_map def format_value_depending_on_whether_it_is_smallest( value: float, is_smallest: bool, float_precision: float, use_si: bool = False )-> str: if is_smallest: if not use_si: formatted_value = "$\\bm{" + f"{value:0.{float_precision}f}" + "}$" # formatted_value = "$\\bm{" + f"{value:0.{float_precision}e}" + "}$" else: formatted_value = "\\textbf{$" + f"\\num[round-precision={float_precision},round-mode=figures,scientific-notation=true]"+\ "{"+ str(value) + "}"+ "$}" else: if not use_si: formatted_value = f"${value:0.{float_precision}f}$" # formatted_value = f"${value:0.{float_precision}e}$" else: formatted_value = "$" + f"\\num[round-precision={float_precision},round-mode=figures,scientific-notation=true]"+\ "{"+ str(value) + "}"+ "$" return formatted_value estimator_columns = simplified_column_names_conf_estimators + [ f"{ConfidenceEnum.IPW_CONF.value}_lower", f"{ConfidenceEnum.IPW_CONF.value}_higher" ] # For each row, i.e. for each target relation for row_index, row in df_one_row_per_target.iterrows(): # Find estimator with smallest mean value for label frequency################### label_freq_to_set_of_smallest_est_map: Dict[float, Set[str]] = get_dict_with_smallest_estimator_per_label_freq( row=row ) ################################################################################## # Construct the new row ###################### target_relation = row["target_relation"] nb_of_rules = df_n_rules_per_target[df_n_rules_per_target['target_relation'] == target_relation][ "# rules" ].iloc[0] new_row: List[str] = [ target_relation, nb_of_rules ] # For each Confidence estimator, get the value at c 0.3 and 0.7 # for col_name_conf_estimator in estimator_columns: # mean_val_03:float = row[f"{col_name_conf_estimator}_0.3"] # mean_val_07:float = row[f"{col_name_conf_estimator}_0.7"] # # new_row_value = ( # format_value_depending_on_whether_it_is_smallest( # value=mean_val_03, # is_smallest=col_name_conf_estimator == label_freq_to_smallest_est_map[0.3], # float_precision=float_precision # ) # + " / " # + format_value_depending_on_whether_it_is_smallest( # value=mean_val_07, # is_smallest=col_name_conf_estimator == label_freq_to_smallest_est_map[0.7], # float_precision=float_precision # ) # ) # new_row.append(new_row_value) for col_name_conf_estimator in estimator_columns: mean_val_03:float = row[f"{col_name_conf_estimator}_{first_true_label_freq_to_include}"] new_row_value_03 = format_value_depending_on_whether_it_is_smallest( value=mean_val_03, is_smallest=( col_name_conf_estimator in label_freq_to_set_of_smallest_est_map[first_true_label_freq_to_include] ), float_precision=float_precision ) new_row.append(new_row_value_03) for col_name_conf_estimator in estimator_columns: mean_val_07:float = row[f"{col_name_conf_estimator}_{second_true_label_freq_to_include}"] new_row_value_07 = format_value_depending_on_whether_it_is_smallest( value=mean_val_07, is_smallest=( col_name_conf_estimator in label_freq_to_set_of_smallest_est_map[second_true_label_freq_to_include] ), float_precision=float_precision ) new_row.append(new_row_value_07) pretty_rows.append(new_row) df_pretty: pd.DataFrame = pd.DataFrame( data=pretty_rows, columns=multi_index_columns ) df_pretty.head() df_pretty: pd.DataFrame = df_pretty.sort_values( by=["$p$"] ) df_pretty.head()To file# dir_latex_table: str = os.path.join( # kbc_e_metrics_project_dir, # "paper_latex_tables", # 'known_prop_scores', # 'scar' # ) # # if not os.path.exists(dir_latex_table): # os.makedirs(dir_latex_table) filename_latex_table: str = os.path.join( dir_latex_table, "confidence-error-table-sar-two-subject-groups-agg-per-p.tex" ) filename_tsv_table: str = os.path.join( dir_latex_table, "confidence-error-table-sar-two-subject-groups-agg-per-p.tsv" ) with open(filename_latex_table, "w") as latex_ofile: with pd.option_context("max_colwidth", 1000): latex_ofile.write( df_pretty.to_latex( column_format="lr|lllllll|lllllll", index=False, float_format="{:0.3f}".format, escape=False, # caption="$[widehat{conf}-conf]^2$ for SCAR. " # "std=standard confidence, " # "PCA (S) = PCA confidence with $s$ as domain, " # "PCA (O) = PCA confidence with $o$ as domain, " # "IPW = PCA confidence with $\hat{e}=e$, " # "IPW +/- $" + f"{label_frequency_est_diff:0.1}" + "$ = IPW confidence with $\hat{e}=e+/-" + f"{label_frequency_est_diff:0.1}" + "$." ) ) with open(filename_tsv_table, "w") as tsv_ofile: tsv_ofile.write(df_pretty.to_csv( index=False, sep="\t" )) print(filename_latex_table)/home/joschout/Documents/Repos/KUL-PUL/paper_latex_tables/known_prop_scores/sar_two_groups/confidence-error-table-sar-two-subject-groups-agg-per-p.texLab 3The goals of this lab are as follows: reinforce basic modeling concepts with MCNP/OpenMC introduce energy bins for tallies introduce non-monoenergetic soruces To compare results between 1-group and 2-group Neutron Diffusion Theory and MCNP/OpenMC The domain comprises an infinite body of water at room temperature. A neutron point source is located at the orgin. The point source is modeled as a low energy (0.0253 eV), high energy (1 MeV) and a Watt Fission spectrum representative of neutrons born from fission of U-235. Flux is measured on a series of concentric spheres around the orign and compared to the flux from 1- and 2-group diffusion theory. Diffusion Theory ResultsAs with Lab 2, it's a good approach to have a strong notion of what theory says your computational results should be **before** you undertake the computation. The (1-group) diffusion theory solution for flux from a point source in an infinite non-multiplying medium is:$$\phi(r) = \frac{Se^{-r/L}}{4 \pi D r}$$where $S$ is the source strength, $D$ is the diffusion coefficient for the medium (which is assumed to be homogeneous), and $L$ is the diffusion length for the (homogeneous) medium.For 2-group diffusion theory, the neutrons are segregated by energy into a high energy group (group 1) and a low energy group (group 2). In some contexts the low energy group is considered "Thermal" neutrons and so a subscript "T" is used to denote that flux. The solution in this case is:$$\phi_1 = \frac{Se^{-r/\sqrt{\tau_T}}}{4 \pi D_1 r}$$$$\phi_T = \frac{SL_T^2}{4 \pi r \bar{D}\left(L_T^2-\tau_T \right)}\left(e^{-r/L_T} - e^{-r/\sqrt{\tau_T}}\right)$$The constants in the above equations have analgous meanings as with 1-group diffusion theory albeit for thier individual groups. The constants $\bar{D}$ and $L_T$ are the diffusion coefficent and diffusion length for "thermal" neutrons; this number varies with temperature as does the thermal neutron energy spectrum and can be obtained from standard data references.%matplotlib inline import matplotlib.pyplot as plt import numpy as np S = 1; #n/s, representative unit source strength D = 0.84; #cm, diffusion coefficient for water L = 59; # cm, diffusion length for water at 20 degrees C # one group flux def phi(r): return (S/(4.*np.pi*D*r))*np.exp(-r/L); # two-group result D1 = 1.016; # cm, diffusion coefficient for fast neutrons tau_t = 368; #cm, neutron age, water # group 1 flux for 2-group theory def phi_1(r): return (S/(4.*np.pi*D1*r))*np.exp(-r/np.sqrt(tau_t)); # thermal flux for 2-group theory def phi_t(r): const = (S*(L**2))/(4.*np.pi*r*D*(L**2 - tau_t)); return const*(np.exp(-r/L) - np.exp(-r/np.sqrt(tau_t))); rMin = 1e-3; rMax = 30; nR = 1000 R = np.linspace(rMin,rMax,nR); one_group_flux = phi(R); fast_flux = phi_1(R); thermal_flux = phi_t(R); plt.semilogy(R,one_group_flux,label='One Group',linestyle='-', linewidth=4); plt.semilogy(R,fast_flux,label='Fast Flux',linestyle='-.', linewidth=4); plt.semilogy(R,thermal_flux,label='Thermal Flux',linestyle='--', linewidth=4); plt.semilogy(R,fast_flux+thermal_flux, label='Fast + Thermal',linestyle=(0, (3, 5, 1, 5, 1, 5)), linewidth=4) plt.legend(); plt.grid(True); plt.title('One Group and Two Group Analytic Flux', fontsize=14,fontweight='bold'); plt.xlabel('R [cm]',fontsize=12,fontweight='bold'); plt.ylabel('$\phi$ [$n/cm^2-s$]',fontsize=12,fontweight='bold');As can be seen, at least under these conditions, the 1-group and 2-group (Fast + Thermal) flux results are comparable. OpenMC Modelimport openmcMaterialsThe domain under consideration is simply an infinite domain with water at 20 $^{\circ}$Cwater = openmc.Material(name='water'); water.add_nuclide('H1',2); water.add_nuclide('O16',1); water.set_density('g/cm3',1.0); water.add_s_alpha_beta('c_H_in_H2O'); mf = openmc.Materials([water]); mf.export_to_xml()GeometryThe domain is infinite, but I will set up some surfaces on which to obtain flux tallies. Per the lab directions I want a set of concentric spheres starting at 5 cm and going to 30 cm in 5cm increments# there's probably a fancier way to do this s1 = openmc.Sphere(r=5.); s2 = openmc.Sphere(r=10.); s3 = openmc.Sphere(r=15.); s4 = openmc.Sphere(r=20.); s5 = openmc.Sphere(r=25.); s6 = openmc.Sphere(r=30.); s7 = openmc.Sphere(r=100.,boundary_type='vacuum'); # outer region c1 = openmc.Cell() c1.fill = water; c1.region = -s1; c2 = openmc.Cell() c2.fill = water; c2.region = +s1 & -s2; c3 = openmc.Cell() c3.fill = water; c3.region = +s2 & -s3; c4 = openmc.Cell() c4.fill = water; c4.region = +s3 & -s4; c5 = openmc.Cell() c5.fill = water; c5.region = +s4 & -s5; c6 = openmc.Cell() c6.fill = water; c6.region = +s5 & -s6; c7 = openmc.Cell() c7.fill = water; c7.region = +s6 & -s7; root = openmc.Universe(); root.add_cells([c1,c2,c3,c4,c5,c6,c7]); g = openmc.Geometry(); g.root_universe = root; g.export_to_xml();TalliesI want surface flux tallies on each of the concentric spheres. Energy should be binned for thermal (0 to 1eV) and fast (1eV to 20MeV) flux.tallies = openmc.Tallies(); # object to hold all of the tallies energy_filter = openmc.EnergyFilter([0.0, 1.0, 20.0e6]); #energy filter for all tallies # define a regular mesh in 2D mesh = openmc.RegularMesh(); #mesh.n_dimension = 2; mesh.dimension = [241,241]; mesh.lower_left = [-30.,-30.]; mesh.upper_right = [30.,30.]; mesh_filter = openmc.MeshFilter(mesh); t = openmc.Tally(name='flux'); t.filters.append(energy_filter); t.filters.append(mesh_filter); t.scores = ['flux']; tallies.append(t); tallies.export_to_xml();Source and SettingsI will run this problem with 3 different sources: monoenergetic at 0.0253eV and 2 MeV; and a Watt Fission Spectrum with parameters for U235 fission.s1 = openmc.Source(); s1.particle = 'neutron'; s1.space = openmc.stats.Point(xyz=(0.,0.,0.)); s1.angle = openmc.stats.Isotropic(); s1.energy = openmc.stats.Discrete([0.0253],[1.0]); s1.strength = 1.; s2 = openmc.Source(); s2.particle = 'neutron'; s2.space = openmc.stats.Point(xyz=(0.,0.,0.)); s2.angle = openmc.stats.Isotropic(); s2.energy = openmc.stats.Discrete([2.0e6],[1.0]); s2.strength = 1.; s3 = openmc.Source(); s3.particle = 'neutron'; s3.space = openmc.stats.Point(xyz=(0.,0.,0.)); s3.angle = openmc.stats.Isotropic(); s3.energy = openmc.stats.Watt(a=0.988e6,b=2.249e-6); #watch units on constants settings = openmc.Settings(); settings.run_mode = 'fixed source'; settings.batches = 10; settings.particles = 5000000; settings.source = s3; settings.export_to_xml(); openmc.run()%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%% ############### %%%%%%%%%%%%%%%%%%%%%%%% ################## %%%%%%%%%%%%%%%%%%%%%%% ################### %%%%%%%%%%%%%%%%%%%%%%% #################### %%%%%%%%%%%%%%%%%%%%%% ##################### %%%%%%%%%%%%%%%%%%%%% ###################### %%%%%%%%%%%%%%%%%%%% ####################### %%%%%%%%%%%%%%%%%% ####################### %%%%%%%%%%%%%%%%% #####################[...]Check ResultsOpen the state point file and see what we getsp = openmc.StatePoint('statepoint.10.h5'); sp.tallies tally = sp.get_tally(scores=['flux']); print(tally) tally.sum print(tally.mean.shape) (tally.mean, tally.std_dev) totalflux = tally.get_slice(scores=['flux']) nb = int(mesh.dimension[0]) totalflux.mean.shape = (2,nb,nb); totalflux.std_dev.shape = (2,nb,nb); tFlux = totalflux.mean[0,:,:]; tFlux_err = totalflux.std_dev[0,:,:]; fFlux = totalflux.mean[1,:,:]; fFlux_err = totalflux.std_dev[1,:,:]; fig = plt.subplot(221); fig.imshow(tFlux); fig2 = plt.subplot(222); fig2.imshow(tFlux_err); fig3 = plt.subplot(223); fig3.imshow(fFlux); fig4 = plt.subplot(224); fig4.imshow(fFlux_err); # consider the thermal flux tally with the 1-group flux result nBins = int((mesh.dimension[0] - 1)/2); print(nBins) tFlux_slice = totalflux.mean[0,nBins:,nBins] fFlux_slice = totalflux.mean[1,nBins:,nBins] npoints = nBins+1; r = np.linspace(0,30,num=npoints); fig = plt.subplot(111); fig.semilogy(r,tFlux_slice,label='thermal flux'); fig.semilogy(r,fFlux_slice,label='fast flux'); fig.semilogy(r,fFlux_slice+tFlux_slice,label='combined flux'); fig.semilogy(R,one_group_flux,label='one-group flux'); fig.grid(True); fig.legend(); totalflux.mean[1,nBins:,nBins].max()Fill nan# fill null values with the mean values of that feature df["tenure"].fillna(df["tenure"].mean(), inplace=True) # fill null values with the mode values of that feature is repeated more often than any other df["SeniorCitizen"].fillna(df["SeniorCitizen"].mode()[0], inplace=True) df.isna().sum() corr=df.corr() plt.figure(figsize=(14,6)) sns.heatmap(corr,annot=True)Standardizationfrom sklearn.preprocessing import StandardScaler sc=StandardScaler() df['tenure']=sc.fit_transform(df['tenure'].values.reshape(-1,1)) df['MonthlyCharges']=sc.fit_transform(df['MonthlyCharges'].values.reshape(-1,1)) df['TotalCharges']=sc.fit_transform(df['TotalCharges'].values.reshape(-1,1))Label encodingdf['Partner'].dtype if df['Partner'].dtype == 'O': print('yes') else: print('no') from sklearn.preprocessing import LabelEncoder le = {} le_name_mapping = {} for i in df.columns: if df[i].dtype == 'O': le[i] = LabelEncoder() df[i] = le[i].fit_transform(df[i]) le_name_mapping[i] = dict(zip(le[i].classes_, le[i].transform(le[i].classes_))) print(i,":-",le_name_mapping[i]) df.head() corr=df.corr() plt.figure(figsize=(35,24)) sns.heatmap(corr,annot=True) corr X = df[['gender', 'SeniorCitizen', 'tenure', 'OnlineSecurity', 'Contract', 'PaperlessBilling', 'PaymentMethod', 'MonthlyCharges']] y = df['Churn'] y.value_counts()* The data is biased to 0 under samplingfrom sklearn.linear_model import LogisticRegression from imblearn.under_sampling import InstanceHardnessThreshold iht = InstanceHardnessThreshold(random_state=0, estimator=LogisticRegression( solver='lbfgs', multi_class='auto')) X_resampled, y_resampled = iht.fit_resample(X, y) #print(sorted(Counter(y_resampled).items()))Splitting the dataset into the Training set and Test set# Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X_resampled, y_resampled, test_size=0.33, random_state=42) print(y_train.shape) #y_train.value_counts()(2504,)* the ratio of label 0 = 73.696 % from y trainprint(y_test.shape) #y_test.value_counts()(1234,)* the ratio of label 0 = 72.989 % from y test Logistic Regression# Importing the libraries from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score # Fitting Logistic Regression to the Training set log_reg = LogisticRegression() log_reg.fit(X_train,y_train) # Predicting the Test set results y_pred = log_reg.predict(X_test) # accuracy print('Accuracy for train= ',round(log_reg.score(X_train,y_train),4) *100, '%') print('Accuracy for test = ',round(accuracy_score(y_test,y_pred),4) *100, '%') # Making the Confusion Matrix will contain the correct and incorrect prediction on the dataset. from sklearn.metrics import confusion_matrix cm_log_reg = confusion_matrix(y_test, y_pred) print(cm_log_reg) from sklearn.metrics import classification_report print(classification_report(y_test, y_pred))Accuracy for train= 98.16 % Accuracy for test = 98.06 % [[637 0] [ 24 573]] precision recall f1-score support 0 0.96 1.00 0.98 637 1 1.00 0.96 0.98 597 micro avg 0.98 0.98 0.98 1234 macro avg 0.98 0.98 0.98 1234 weighted avg 0.98 0.98 0.98 1234KNNfrom sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors=5) knn.fit(X_train,y_train) knn_pred=knn.predict(X_test) # Accuracy print('Accuracy for train = ',round(knn.score(X_train,y_train),4) *100, '%\n') print('Accuracy for test = ',round(accuracy_score(y_test,knn_pred),4) *100, '%\n') # Making the Confusion Matrix will contain the correct and incorrect prediction on the dataset. from sklearn.metrics import confusion_matrix cm_knn = confusion_matrix(y_test, knn_pred) print('Confusion Matrix :- \n',cm_knn) from sklearn.metrics import classification_report print(classification_report(y_test, knn_pred)) import sklearn.metrics as metrics score=[] for k in range(1,100): knn=KNeighborsClassifier(n_neighbors=k,weights='uniform') knn.fit(X_train,y_train) predKNN=knn.predict(X_test) accuracy=metrics.accuracy_score(predKNN,y_test) score.append(accuracy*100) print ('k = ',k,'-> accuracy : ',accuracy) print(score.index(max(score))+1,' : ',round(max(score),2),'%') m=score.index(max(score))+1 knn = KNeighborsClassifier(n_neighbors=m) knn.fit(X_train,y_train) knn_pred=knn.predict(X_test) # Accuracy print('Accuracy for train = ',round(knn.score(X_train,y_train),4) *100, '%\n') print('Accuracy for test = ',round(accuracy_score(y_test,knn_pred),4) *100, '%\n') # Making the Confusion Matrix will contain the correct and incorrect prediction on the dataset. from sklearn.metrics import confusion_matrix cm_knn = confusion_matrix(y_test, knn_pred) print('Confusion Matrix :- \n',cm_knn) from sklearn.metrics import classification_report print(classification_report(y_test, knn_pred)) train_accuracy=np.empty(len(range(1,100))) test_accuracy=np.empty(len(range(1,100))) for i, k in enumerate(range(1,100)): # Setup a k-NN Classifier with k neighbors: knn knn = KNeighborsClassifier(k) # Fit the classifier to the training data knn.fit(X_train, y_train) #Compute accuracy on the training set train_accuracy[i] = knn.score(X_train,y_train) #Compute accuracy on the testing set test_accuracy[i] = knn.score(X_test, y_test) # Generate plot plt.title('k-NN: Varying Number of Neighbors') plt.plot(range(1,100), test_accuracy, label = 'Testing Accuracy') plt.plot(range(1,100), train_accuracy, label = 'Training Accuracy') plt.legend() plt.xlabel('Number of Neighbors') plt.ylabel('Accuracy') plt.show()SVMfrom sklearn.svm import SVC svm_rbf=SVC(kernel='rbf').fit(X_train,y_train) svm_rbf_pred=svm_rbf.predict(X_test) # Accuracy print('Accuracy for train = ',round(svm_rbf.score(X_train, y_train),2) *100, '%\n') print('Accuracy for test = ',round(accuracy_score(y_test,svm_rbf_pred),4) *100, '% \n') # Making the Confusion Matrix will contain the correct and incorrect prediction on the dataset. cm_svm_rbf = confusion_matrix(y_test, svm_rbf_pred) print('Confusion Matrix :- \n',cm_svm_rbf) from sklearn.metrics import classification_report print(classification_report(y_test, svm_rbf_pred)) svm_linear=SVC(kernel='linear').fit(X_train,y_train) svm_pred=svm_linear.predict(X_test) # Accuracy print('Accuracy for train = ',round(svm_linear.score(X_train, y_train),3) *100, '%\n') print('Accuracy for test = ',round(accuracy_score(y_test,svm_pred),4) *100, '% \n') # Making the Confusion Matrix will contain the correct and incorrect prediction on the dataset. cm_svm_lin = confusion_matrix(y_test, svm_pred) print('Confusion Matrix :- \n',cm_svm_lin) from sklearn.metrics import classification_report print(classification_report(y_test, svm_pred)) svm_poly=SVC(kernel='poly').fit(X_train,y_train) svm_polr_pred=svm_poly.predict(X_test) # Accuracy print('Accuracy for train = ',round(svm_poly.score(X_train, y_train),3) *100, '%\n') print('Accuracy for test = ',round(accuracy_score(y_test,svm_polr_pred),4) *100, '% \n') # Making the Confusion Matrix will contain the correct and incorrect prediction on the dataset. cm_svm_polr = confusion_matrix(y_test, svm_polr_pred) print('Confusion Matrix :- \n',cm_svm_polr) from sklearn.metrics import classification_report print(classification_report(y_test, svm_polr_pred))/home/marie/anaconda3/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning. "avoid this warning.", FutureWarning)Naive bayesfrom sklearn.naive_bayes import GaussianNB nb=GaussianNB().fit(X_train,y_train) nb_pred=nb.predict(X_test) # Accuracy print('Accuracy for train = ',round(nb.score(X_train, y_train),3) *100, '%\n') print('Accuracy for test = ',round(accuracy_score(y_test,nb_pred),4) *100, '%\n') # Making the Confusion Matrix will contain the correct and incorrect prediction on the dataset. cm_nb = confusion_matrix(y_test, nb_pred) print('Confusion Matrix :- \n',cm_nb) from sklearn.metrics import classification_report print(classification_report(y_test, nb_pred)) # Fitting Decision Tree Classification to the Training set from sklearn.tree import DecisionTreeClassifier dt = DecisionTreeClassifier(criterion = 'entropy', random_state = 0) dt.fit(X_train, y_train) # Predicting the Test set results y_pred_dt = dt.predict(X_test) # Accuracy print('Accuracy for train = ',round(dt.score(X_train, y_train),3) *100, '%\n') print('Accuracy for test = ',round(accuracy_score(y_test,y_pred_dt),3) *100, '%\n') # Making the Confusion Matrix from sklearn.metrics import confusion_matrix cm_dt = confusion_matrix(y_test, y_pred_dt) print('Confusion Matrix :- \n',cm_dt) from sklearn.metrics import classification_report print(classification_report(y_test, y_pred_dt))Accuracy for train = 100.0 % Accuracy for test = 96.7 % Confusion Matrix :- [[620 17] [ 24 573]] precision recall f1-score support 0 0.96 0.97 0.97 637 1 0.97 0.96 0.97 597 micro avg 0.97 0.97 0.97 1234 macro avg 0.97 0.97 0.97 1234 weighted avg 0.97 0.97 0.97 1234RandomForestfrom sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score,confusion_matrix rfc=RandomForestClassifier(n_estimators=10,random_state=45,criterion='gini').fit(X_train,y_train) # Predicting the Test set results rfc_pred=rfc.predict(X_test) # Accuracy print('Accuracy for train = ',round(rfc.score(X_train, y_train),3) *100, '%\n') print('Accuracy for test = ',round(accuracy_score(y_test,rfc_pred),3) *100, '%\n') # Making the Confusion Matrix from sklearn.metrics import confusion_matrix cm_dt = confusion_matrix(y_test, rfc_pred) print('Confusion Matrix :- \n',cm_dt) from sklearn.metrics import classification_report print(classification_report(y_test, rfc_pred)) # pickle.dump(rfc,open("rfc.pkl","wb")) # my_rfc=pickle.load(open("rfc.pkl","rb"))Reproducible visualizationIn "The Functional Art: An introduction to information graphics and visualization" by , on page 12 we are presented with a visualization of UN data time series of Fertility rate (average number of children per woman) per country:Figure 1.6 Highlighting the relevant, keeping the secondary in the background.Let's try to reproduce this. Getting the dataThe visualization was done in 2012, but limited the visualization to 2010. This should make it easy, in theory, to get the data, since it is historical. These are directly available as excel spreadsheets now, we'll just ignore the last bucket (2010-2015). Pandas allows loading an excel spreadsheet straight from a URL, but here we will download it first so we have a local copy.!wget 'http://esa.un.org/unpd/wpp/DVD/Files/1_Indicators%20(Standard)/EXCEL_FILES/2_Fertility/WPP2015_FERT_F04_TOTAL_FERTILITY.XLS'--2015-12-29 16:57:23-- http://esa.un.org/unpd/wpp/DVD/Files/1_Indicators%20(Standard)/EXCEL_FILES/2_Fertility/WPP2015_FERT_F04_TOTAL_FERTILITY.XLS Resolving esa.un.org... 172.16.31.10 Connecting to esa.un.org|172.16.31.10|:80... connected. HTTP request sent, awaiting response... 200 OK Length: 869376 (849K) [application/vnd.ms-excel] Saving to: 'WPP2015_FERT_F04_TOTAL_FERTILITY.XLS' WPP2015_FERT_F04_TO 100%[=====================>] 849.00K 184KB/s in 4.6s 2015-12-29 16:57:28 (184 KB/s) - 'WPP2015_FERT_F04_TOTAL_FERTILITY.XLS' saved [869376/869376]World Population Prospects: The 2015 Revision File FERT/4: Total fertility by major area, region and country, 1950-2100 (children per woman) ```Estimates, 1950 - 2015 POP/DB/WPP/Rev.2015/FERT/F04 July 2015 - Copyright © 2015 by United Nations. All rights reserved Suggested citation: United Nations, Department of Economic and Social Affairs, Population Division (2015). World Population Prospects: The 2015 Revision, DVD Edition.```df = pd.read_excel('WPP2015_FERT_F04_TOTAL_FERTILITY.XLS', skiprows=16, index_col = 'Country code') df = df[df.index < 900] len(df) df.head()First problem... The book states on page 8: -- "Using the filters the site offers, I asked for a table that included the more than 150 countries on which the UN has complete research."Yet we have 201 countries (codes 900+ are regions) with complete data. We do not have a easy way to identify which countries were added to this. Still, let's move forward and prep our data.df.rename(columns={df.columns[2]:'Description'}, inplace=True) df.drop(df.columns[[0, 1, 3, 16]], axis=1, inplace=True) # drop what we dont need df.head() highlight_countries = ['Niger','Yemen','India', 'Brazil','Norway','France','Sweden','United Kingdom', 'Spain','Italy','Germany','Japan', 'China' ] # Subset only countries to highlight, transpose for timeseries df_high = df[df.Description.isin(highlight_countries)].T[1:] # Subset the rest of the countries, transpose for timeseries df_bg = df[~df.Description.isin(highlight_countries)].T[1:]Let's make some art# background ax = df_bg.plot(legend=False, color='k', alpha=0.02, figsize=(12,12)) ax.xaxis.tick_top() # highlighted countries df_high.plot(legend=False, ax=ax) # replacement level line ax.hlines(y=2.1, xmin=0, xmax=12, color='k', alpha=1, linestyle='dashed') # Average over time on all countries df.mean().plot(ax=ax, color='k', label='World\naverage') # labels for highlighted countries on the right side for country in highlight_countries: ax.text(11.2,df[df.Description==country].values[0][12],country) # start y axis at 1 ax.set_ylim(ymin=1)For one thing, the line for China doesn't look like the one in the book. Concerning. The other issue is that there are some lines that are going lower than Italy or Spain in 1995-2000 and in 2000-2005 (majority in the Balkans) and that were not on the graph in the book, AFAICT:df.describe() df[df['1995-2000']<1.25] df[df['2000-2005']<1.25]Usage of the Event Detector by Liu et al. Load the required packages# Import public packages import sys import os from pathlib import Path from matplotlib import pyplot as plt import seaborn as sns sns.set(style="darkgrid") import glob from datetime import datetime, timedelta import numpy as np import pandas as pd from pandas.plotting import register_matplotlib_converters register_matplotlib_converters() from io import StringIO # Add src to the path for import project_dir = Path(os.getcwd()).resolve().parents[0] module_path = os.path.abspath(os.path.join(project_dir)) if module_path not in sys.path: sys.path.append(module_path) # Import private source code from Event_Detectors import EventDet_Liu_Ripple import BLUED_loader as blued # Activate Autoreload %load_ext autoreload %autoreload 2 %matplotlib notebookThe autoreload extension is already loaded. To reload it, use: %reload_ext autoreloadSet all global Parameters for the BLUED Dataset# Hardcoded Hyperparameters DATASET_LOCATION_BLUED = os.path.join("./Test_Data/") #Path to Test Data CURRENT_COLUMN = "Current B" NETWORK_FREQUENCY_BLUED = 60 SAMPLES_PER_SECOND_BLUED = NETWORK_FREQUENCY_BLUED #because we compute period wise datapoints SAMPLERATE_BLUED = 12000 # For the event detector init_dict_BLUED = {"median_filter_window" : 9, "q_ripple_window_size" : 10, "power_threshold" : 10, "perform_input_order_checks": True, "window_size_n" : 5 * 21 #5 times the q_ripple_window_size is an assumption } SAMPLES_FEATURE_PER_WINDOW_BLUED = init_dict_BLUED["window_size_n"] WINDOW_SIZE_SECONDS_BLUED = SAMPLES_FEATURE_PER_WINDOW_BLUED / SAMPLES_PER_SECOND_BLUED # Compute how big the window is regarding the raw samples --> this is used for the streamiong samples_raw_per_window_BLUED = SAMPLERATE_BLUED * WINDOW_SIZE_SECONDS_BLUED seconds_per_feature_sample = 1 / SAMPLES_PER_SECOND_BLUED BLUED_period = int(SAMPLERATE_BLUED / NETWORK_FREQUENCY_BLUED)Load and display the BLUED Test File# Get the Test File test_file = glob.glob(os.path.join(DATASET_LOCATION_BLUED, "*.txt"))[0] #get the full path of the test file # Load the Data from the test File data, file_info = blued.load_file(test_file) lable_path:str = glob.glob(os.path.join(DATASET_LOCATION_BLUED, "*.csv"))[0] data_start = file_info['file_start'] data_end = file_info['file_end'] labels = blued.load_labels(lable_path, data_start, data_end) current = data["Current"] voltage = data["Voltage"] # Plot the data from the test File _, ax = plt.subplots(figsize=(9.9,5)) plt.title("Full Current signal of Test File") plt.ylabel("Current") plt.xlabel("Time") ax.plot(current) ax.scatter(x=labels.index, y=np.zeros(len(labels.index)), color='r',zorder=100) ax.vlines(x=labels.index, color='r',ymin=-80, ymax=80, zorder=101) plt.show()Run the Event Detection on the Test Datafound_events=[] show_plots = False samples_remaining = len(current) # number of samples that we have not predicted yet window_start = 0 # offset of the next window # Step 1: Initialize the Event Detector with the Hypperparameter dictionary Liu = EventDet_Liu_Ripple(**init_dict_BLUED) #i.e. values are unpacked into the parameters Liu.fit() # Call the fit() method to further initialize the algorithm (required by the sklearn API) # As we will stream overlapping windows with a relative offset to each other we set this here relative_offset = Liu.q_ripple_window_size * 2 # is 2q and -1 because we need it to be negative relative_offset = (relative_offset / 60) * SAMPLERATE_BLUED # convert the relative offset that is relative to the feature domain # to the relative offset for the raw input domain. # we devide by 60 as we have 60 feature points per second to get the number of seconds and then we multpily by the sampling rate # to get the number of raw samples required. while samples_remaining >= samples_raw_per_window_BLUED: #while we still have samples to "stream" do the following window_stop = int(window_start + samples_raw_per_window_BLUED) # compute end index of the new window # Get the voltage and current windows voltage_window = voltage[window_start:window_stop] current_window = current[window_start:window_stop] window_start_timestamp = current_window.index[0] window_end_timestamp = current_window.index[-1] print(">"+ f" ({window_start}-{window_stop})") # Step 2: Use the feature computation function of the algorithm to compute the input features X = Liu.compute_input_signal(voltage=voltage_window, current=current_window, period_length=BLUED_period) # Plot the computed features if show_plots: plt.title("Computed input features for this window") plt.plot(X) # Step 3: Run the prediciton on the features events = Liu.predict(X) if len(events) > 0: # if an event is returned print(f"({voltage_window.index[0]} - {voltage_window.index[-1]})") event_timestamps = [] # Convert indices to timestamps for the postprocessing for event_index in events: event_timestamps.append(window_start_timestamp + timedelta(seconds=event_index * seconds_per_feature_sample)) # As the algorithm is very sensitive, we have applied postprocessing like the other reference algorithms # to filter out events that are detected within a certain time limit, i.e. here 1 second. # so events that fall within this timeframe are treated as one event. events_postprocessed = Liu.event_list_postprocessing(event_timestamps, postprocessing_limit=1) if len(events_postprocessed)>0: for event in events_postprocessed: print("Event Detected at " + str(event)) found_events.append(str(event)) if show_plots: time_delta = window_end_timestamp-window_start_timestamp time_delta_ms= int(time_delta.total_seconds()*10000) pos_event = int((event - window_start_timestamp).total_seconds()*10000) relative_pos = pos_event/time_delta_ms event_idx = int(len(X)*relative_pos) plt.vlines(x=[event_idx], color='red',ymin=np.min(X), ymax=np.max(X), zorder=101) if show_plots: plt.show() # We start at the end of the previous window window_start = int(window_stop - relative_offset) #print("+++++++++++++++++++++++++++++++++++") # We need to update the data points that remain for streaming now. samples_remaining -= samples_raw_per_window_BLUED _, ax = plt.subplots(figsize=(9.5,5)) plt.title("Full Current signal of Test File") plt.ylabel("Current") plt.xlabel("Time") ax.plot(current) ax.scatter(x=labels.index, y=np.zeros(len(labels.index)), color='r',zorder=100) ax.vlines(x=found_events, color='red',ymin=-80, ymax=80, zorder=101) #end of sliding window ax.vlines(x=data.index[window_stop], color='y',ymin=-80, ymax=80, zorder=101) plt.show()CUHK [STAT3009](https://www.bendai.org/STAT3009/) Notebook3: ALS for Latent Factor Models Object-Oriented Programming (OOP) in Python- define an RS as a `class` with `parameters` in Python- define `fit`, `predict` functions# warm-up with baseline methods class glb_mean(object): def __init__(self): self.glb_mean = 0 def fit(self, train_rating): self.glb_mean = np.mean(train_rating) def predict(self, test_pair): pred = np.ones(len(test_pair)) pred = pred*self.glb_mean return pred class user_mean(object): def __init__(self, n_user): self.n_user = n_user self.glb_mean = 0. self.user_mean = np.zeros(n_user) def fit(self, train_pair, train_rating): self.glb_mean = train_rating.mean() for u in range(self.n_user): ind_train = np.where(train_pair[:,0] == u)[0] if len(ind_train) == 0: self.user_mean[u] = self.glb_mean else: self.user_mean[u] = train_rating[ind_train].mean() def predict(self, test_pair): pred = np.ones(len(test_pair))*self.glb_mean j = 0 for row in test_pair: user_tmp, item_tmp = row[0], row[1] pred[j] = self.user_mean[user_tmp] j = j + 1 return pred class item_mean(object): def __init__(self, n_item): self.n_item = n_item self.glb_mean = 0. self.item_mean = np.zeros(n_item) def fit(self, train_pair, train_rating): self.glb_mean = train_rating.mean() for i in range(self.n_item): ind_train = np.where(train_pair[:,1] == i)[0] if len(ind_train) == 0: self.item_mean[i] = self.glb_mean else: self.item_mean[i] = train_rating[ind_train].mean() def predict(self, test_pair): pred = np.ones(len(test_pair))*self.glb_mean j = 0 for row in test_pair: user_tmp, item_tmp = row[0], row[1] pred[j] = self.item_mean[item_tmp] j = j + 1 return pred # define class for correlation based RS from numpy.linalg import norm from scipy.sparse import lil_matrix class cor_rs_user(object): def __init__(self, n_user, n_item): self.n_user = n_user self.n_item = n_item self.glb_mean = 0. self.user_mean = np.zeros(n_user) self.S = lil_matrix((n_user, n_user)) self.index_item = [] self.index_user = [] self.min_co = 3 def cossim(self, index_u, index_v, train_pair, train_rating): item_u = train_pair[index_u][:,1] item_v = train_pair[index_v][:,1] # find co-rating items by `set` item_co = list(set(item_u).intersection(set(item_v))) if len(item_co) < self.min_co: # a tuning parameter return 0.0 else: vec_u, vec_v = train_rating[index_u], train_rating[index_v] # find the co-rating vectors by using `np.isin` ind_co_u = [np.where(item_u == item_co_tmp)[0][0] for item_co_tmp in item_co] ind_co_v = [np.where(item_v == item_co_tmp)[0][0] for item_co_tmp in item_co] vec_co_u, vec_co_v = vec_u[ind_co_u], vec_v[ind_co_v] return np.dot(vec_co_u, vec_co_v) / (norm(vec_co_u)+1e-5) / (norm(vec_co_v)+1e-5) def sim_mat(self, train_pair, train_rating): self.index_item = [np.where(train_pair[:,1] == i)[0] for i in range(n_item)] self.index_user = [np.where(train_pair[:,0] == u)[0] for u in range(n_user)] for u in range(self.n_user): for v in range(u): if (len(self.index_user[u]) == 0) or (len(self.index_user[v]) == 0): continue weight_tmp = self.cossim(self.index_user[u],self.index_user[v],train_pair,train_rating) if weight_tmp > 0: self.S[u,v] = weight_tmp self.S = self.S + self.S.T def fit(self, train_pair, train_rating): self.glb_mean = train_rating.mean() # use another class to predict the user mean user_ave_method = user_mean(self.n_user) user_ave_method.fit(train_pair, train_rating) self.user_mean = user_ave_method.user_mean self.sim_mat(train_pair, train_rating) def predict(self, test_pair, train_pair, train_rating, top=10): pred = np.zeros(len(test_pair)) for j in range(len(test_pair)): user_tmp, item_tmp = test_pair[j,0], test_pair[j,1] index_tmp = self.index_item[item_tmp] rated_users = train_pair[index_tmp][:,0] rated_ratings = train_rating[index_tmp] sim_weight = self.S[user_tmp, rated_users].toarray()[0] ## only keep top 10 closest users top_ind = sim_weight.argsort()[-top:][::-1] sim_weight_knn = np.zeros(len(sim_weight)) sim_weight_knn[top_ind] = sim_weight[top_ind] if (len(rated_users) == 0) or (max(sim_weight_knn) == 0): # if no rated users or no similar users pred[j] = self.user_mean[user_tmp] else: pred[j] = np.sum(sim_weight_knn*rated_ratings) / np.sum(sim_weight_knn) return pred class cor_rs_item(object): def __init__(self, n_user, n_item): self.n_user = n_user self.n_item = n_item self.glb_mean = 0. self.item_mean = np.zeros(n_item) self.S = lil_matrix((n_item, n_item)) self.index_item = [] self.index_user = [] self.min_co = 3 def cossim(self, index_i, index_j, train_pair, train_rating): # index_u = np.where(train_pair[:,0] == u)[0] # index_v = np.where(train_pair[:,0] == v)[0] user_i = train_pair[index_i][:,0] user_j = train_pair[index_j][:,0] # find co-rating items by `set` user_co = list(set(user_i).intersection(set(user_j))) if len(user_co) < self.min_co: # a tuning parameter return 0.0 else: # find the co-rating vectors by using `np.where` vec_i, vec_j = train_rating[index_i], train_rating[index_j] ind_co_i = [np.where(user_i == user_co_tmp)[0][0] for user_co_tmp in user_co] ind_co_j = [np.where(user_j == user_co_tmp)[0][0] for user_co_tmp in user_co] vec_co_i, vec_co_j = vec_i[ind_co_i], vec_j[ind_co_j] return np.dot(vec_co_i, vec_co_j) / (norm(vec_co_i)+1e-5) / (norm(vec_co_j)+1e-5) def sim_mat(self, train_pair, train_rating): self.index_item = [np.where(train_pair[:,1] == i)[0] for i in range(n_item)] self.index_user = [np.where(train_pair[:,0] == u)[0] for u in range(n_user)] for i in range(self.n_item): for j in range(i): if (len(self.index_item[i]) == 0) or (len(self.index_item[j]) == 0): continue weight_tmp = self.cossim(self.index_item[i],self.index_item[j],train_pair,train_rating) if weight_tmp > 0: self.S[i,j] = weight_tmp self.S = self.S + self.S.T def fit(self, train_pair, train_rating): self.glb_mean = train_rating.mean() # use another class to predict the item mean item_ave_method = item_mean(self.n_item) item_ave_method.fit(train_pair, train_rating) self.item_mean = item_ave_method.item_mean self.sim_mat(train_pair, train_rating) def predict(self, test_pair, train_pair, train_rating, top=10): pred = np.zeros(len(test_pair)) for j in range(len(test_pair)): user_tmp, item_tmp = test_pair[j,0], test_pair[j,1] index_tmp = self.index_user[user_tmp] rated_items = train_pair[index_tmp][:,1] rated_ratings = train_rating[index_tmp] sim_weight = self.S[item_tmp, rated_items].toarray()[0] ## only keep top 10 closest users top_ind = sim_weight.argsort()[-top:][::-1] sim_weight_knn = np.zeros(len(sim_weight)) sim_weight_knn[top_ind] = sim_weight[top_ind] if (len(rated_items) == 0) or (max(sim_weight_knn) == 0): # if no rated items or no similar items pred[j] = self.item_mean[item_tmp] else: pred[j] = np.sum(sim_weight_knn*rated_ratings) / np.sum(sim_weight_knn) return pred def rmse(true, pred): return np.sqrt(np.mean((pred - true)**2))Load and pro-processed datasetimport numpy as np import pandas as pd dtrain = pd.read_csv('./dataset/train.csv') dtest = pd.read_csv('./dataset/test.csv') ## save real ratings for test set for evaluation. test_rating = np.array(dtest['rating']) ## remove the ratings in the test set to simulate prediction dtest = dtest.drop(columns='rating') ## convert string to user_id and item_id -> [user_id, item_id, rating] # pre-process for training data train_pair = dtrain[['user_id', 'movie_id']].values train_rating = dtrain['rating'].values # pre-process for testing set test_pair = dtest[['user_id', 'movie_id']].values n_user, n_item = max(train_pair[:,0].max(), test_pair[:,0].max())+1, max(train_pair[:,1].max(), test_pair[:,1].max())+1Define and training the predictive models based on `class`## baseline user mean methods user_ave = user_mean(n_user=n_user) user_ave.fit(train_pair=train_pair, train_rating=train_rating) pred_user = user_ave.predict(test_pair) print('RMSE for user_mean: %.3f' %rmse(test_rating, pred_user) ) ## baseline item mean methods item_ave = item_mean(n_item=n_item) item_ave.fit(train_pair=train_pair, train_rating=train_rating) pred_item = item_ave.predict(test_pair) print('RMSE for item_mean: %.3f' %rmse(test_rating, pred_item) ) ## Correlation-based RS (user) cor_user = cor_rs_user(n_user=n_user, n_item=n_item) cor_user.fit(train_pair=train_pair, train_rating=train_rating) pred_cor_user = cor_user.predict(test_pair, train_pair, train_rating) print('RMSE for corr_user: %.3f' %rmse(test_rating, pred_cor_user) ) ## Correlation-based RS (item) cor_item = cor_rs_item(n_user=n_user, n_item=n_item) cor_item.fit(train_pair=train_pair, train_rating=train_rating) pred_cor_item = cor_item.predict(test_pair, train_pair, train_rating) print('RMSE for corr_item: %.3f' %rmse(test_rating, pred_cor_item) ) ## Baseline + Correlation-based RS # glb mean glb_ave = glb_mean() glb_ave.fit(train_rating) pred = glb_ave.predict(test_pair) # user_mean train_rating_cm = train_rating - glb_ave.predict(train_pair) user_ave = user_mean(n_user=n_user) user_ave.fit(train_pair=train_pair, train_rating=train_rating_cm) train_rating_res = train_rating_cm - user_ave.predict(train_pair) pred = pred + user_ave.predict(test_pair) # fit correlation-based RS by residual ratings cor_user = cor_rs_user(n_user=n_user, n_item=n_item) cor_user.fit(train_pair=train_pair, train_rating=train_rating_res) pred = pred + cor_user.predict(test_pair, train_pair, train_rating_res, top=10) print('RMSE for glb + user_mean + cor_rs(user): %.3f' %rmse(test_rating, pred) ) ## Baseline + Correlation-based RS # glb mean glb_ave = glb_mean() glb_ave.fit(train_rating) pred = glb_ave.predict(test_pair) # item_mean train_rating_cm = train_rating - glb_ave.predict(train_pair) item_ave = item_mean(n_item=n_item) item_ave.fit(train_pair=train_pair, train_rating=train_rating_cm) train_rating_res = train_rating_cm - item_ave.predict(train_pair) pred = pred + item_ave.predict(test_pair) # fit correlation-based RS by residual ratings cor_item = cor_rs_item(n_user=n_user, n_item=n_item) cor_item.fit(train_pair=train_pair, train_rating=train_rating_res) pred = pred + cor_item.predict(test_pair, train_pair, train_rating_res, top=10) print('RMSE for glb + item_mean + cor_rs(item): %.3f' %rmse(test_rating, pred) )RMSE for glb + user_mean + cor_rs(item): 1.017Cross-validation for glb + user_mean + cor_rsfrom sklearn.model_selection import KFold import itertools cv=3 kf = KFold(n_splits=cv, shuffle=True) df = {'top': [], 'valid_rmse': []} for top_tmp in [5, 10]: valid_rmse_tmp = 0.0 for train_index, valid_index in kf.split(train_pair): train_pair_cv, train_rating_cv = train_pair[train_index], train_rating[train_index] valid_pair_cv, valid_rating_cv = train_pair[valid_index], train_rating[valid_index] # glb mean glb_ave = glb_mean() glb_ave.fit(train_rating_cv) pred = glb_ave.predict(valid_pair_cv) # user_mean train_rating_cm = train_rating_cv - glb_ave.predict(train_pair_cv) user_ave = user_mean(n_user=n_user) user_ave.fit(train_pair=train_pair_cv, train_rating=train_rating_cm) train_rating_res = train_rating_cm - user_ave.predict(train_pair_cv) pred = pred + user_ave.predict(valid_pair_cv) # fit correlation-based RS by residual ratings cor_user = cor_rs_user(n_user=n_user, n_item=n_item) cor_user.fit(train_pair=train_pair_cv, train_rating=train_rating_res) pred = pred + cor_user.predict(valid_pair_cv, train_pair_cv, train_rating_res, top=top_tmp) score_tmp = rmse(valid_rating_cv, pred) valid_rmse_tmp += score_tmp / cv print('Valid_RMSE for glb + user_mean + cor_rs(user) with top = %d: %.3f' %(top_tmp, score_tmp)) df['top'].append(top_tmp) df['valid_rmse'].append(valid_rmse_tmp) best_top = df['top'][np.argmin(df['valid_rmse'])] print('the best model is top: %d' %best_top) ## refit with the full train set ## Baseline + Correlation-based RS # glb mean glb_ave = glb_mean() glb_ave.fit(train_rating) pred = glb_ave.predict(test_pair) # user_mean train_rating_cm = train_rating - glb_ave.predict(train_pair) user_ave = user_mean(n_user=n_user) user_ave.fit(train_pair=train_pair, train_rating=train_rating_cm) train_rating_res = train_rating_cm - user_ave.predict(train_pair) pred = pred + user_ave.predict(test_pair) # fit correlation-based RS by residual ratings cor_user = cor_rs_user(n_user=n_user, n_item=n_item) cor_user.fit(train_pair=train_pair, train_rating=train_rating_res) pred = pred + cor_user.predict(test_pair, train_pair, train_rating_res, top=best_top) print('RMSE for glb + user_mean + cor_rs(user): %.3f' %rmse(test_rating, pred) )RMSE for glb + user_mean + cor_rs(user): 1.004Implementation for PCA example**C**omponent **A**nalysis for **D**imensionality **R**eduction Installing required packagesfrom IPython.display import clear_output !pip install --upgrade pip !pip install findspark !pip install pyspark clear_output(wait=False)Importing global objectsimport findspark, pyspark from pyspark.sql import SparkSession from pyspark import SparkFilesGlobal SettingsNeeded for environments not Databricksfindspark.init() spark = SparkSession.builder.getOrCreate()Reading data sourceurl = 'https://raw.githubusercontent.com/edsonlourenco/public_datasets/main/Carros.csv' spark.sparkContext.addFile(url) csv_cars = SparkFiles.get("Carros.csv") df_cars = spark.read.csv(csv_cars, header=True, inferSchema=True, sep=';')Checking **data**df_cars.orderBy('Consumo').show(truncate=False)+-------+---------+-----------+---------------+----+-----+---------+-----------+-------+-----------+---+ |Consumo|Cilindros|Cilindradas|RelEixoTraseiro|Peso|Tempo|TipoMotor|Transmissao|Marchas|Carburadors|HP | +-------+---------+-----------+---------------+----+-----+---------+-----------+-------+-----------+---+ |15 |8 |301 |354 |357 |146 |0 |1 |5 |8 |335| |21 |6 |160 |39 |2875|1702 |0 |1 |4 |4 |110| |21 |6 |160 |39 |262 |1646 |0 |1 |4 |4 |110| |26 |4 |1203 |443 |214 |167 |0 |1 |5 |2 |91 | |104 |8 |472 |293 |525 |1798 |0 |0 |3 |4 |205| |104 |8 |460 |3 |5424|1782 |0 |0 |3 |4 |215| |133 |8 |350 |373 |384 |154[...]Transform VectorAssembler Importing **VectorAssembler** classfrom pyspark.ml.feature import VectorAssemblerDoing transformation and creating features columnvectas = VectorAssembler(inputCols=[ "Consumo", "Cilindros", "Cilindradas", "RelEixoTraseiro", "Peso", "Tempo", "TipoMotor", "Transmissao", "Marchas", "Carburadors" ], outputCol="features") df_cars_vet = vectas.transform(df_cars) df_cars_vet.orderBy('Consumo').select('features').show(truncate=False)+-----------------------------------------------------+ |features | +-----------------------------------------------------+ |[15.0,8.0,301.0,354.0,357.0,146.0,0.0,1.0,5.0,8.0] | |[21.0,6.0,160.0,39.0,2875.0,1702.0,0.0,1.0,4.0,4.0] | |[21.0,6.0,160.0,39.0,262.0,1646.0,0.0,1.0,4.0,4.0] | |[26.0,4.0,1203.0,443.0,214.0,167.0,0.0,1.0,5.0,2.0] | |[104.0,8.0,472.0,293.0,525.0,1798.0,0.0,0.0,3.0,4.0] | |[104.0,8.0,460.0,3.0,5424.0,1782.0,0.0,0.0,3.0,4.0] | |[133.0,8.0,350.0,373.0,384.0,1541.0,0.0,0.0,3.0,4.0] | |[143.0,8.0,360.0,321.0,357.0,1584.0,0.0,0.0,3.0,4.0] | |[147.0,8.0,440.0,323.0,5345.0,1742.0,0.0,0.0,3.0,4.0]| |[152.0,8.0,2758.0,307.0,378.0,18.0,0.0,0.0,3.0,3.0] | |[152.0,8.0,304.0,315.0,3435.0,173.0,0.0,0.0,3.0,2.0] | |[155.0,8.0,318.0,276.0,352.0,1687.0,0.0,0.0,3.0,2.0] | |[158.0,8.0,351.0,422.0,317.0,145.0,0.0,1.0,5.0,4.0] | |[164.0,8.0,2758.0,307.0,407.0,174.0,0.0,0.0,3.0,3.0] | |[173.0,8.0,2758.0,307.0,373.0,176.0,0.0,0.0,3.0[...]Importing **PCA** classfrom pyspark.ml.feature import PCA pca = PCA(k=3, inputCol="features", outputCol="features_pca") model = pca.fit(df_cars_vet)Transforming PCAdf_result = model.transform(df_cars_vet)Checking **data**df_result.select('features_pca').show(truncate=False)+-----------------------------------------------------------+ |features_pca | +-----------------------------------------------------------+ |[618.7707206779614,-937.712394997354,1231.9633529945509] | |[3112.9887675342206,-161.05746385491523,1191.861991305438] | |[640.4959007710695,-1120.7188865110418,1320.0756315189049] | |[3466.0956877556678,-149.69421418298342,1401.2041780368531]| |[661.4577445758732,-812.4592128844115,1395.2949328316356] | |[769.234367178774,-1120.4160559477316,1518.7436632279525] | |[644.8369503533214,-727.9539376169615,1313.681521097935] | |[9.101880661709801,1061.295403667789,1045.1710500215693] | |[67.13360966508397,878.479368204501,1143.9379120496164] | |[31.390504477140617,1095.3694498285743,1306.0124861906327] | |[32.89165922208959,1091.1521230845228,1310.0881577350906] | |[-118.37273751675397,1832.771927405815,2088.6955393326043] | |[-150.18148405358022,1820.8730926512776,2091.1033550766124]| |[-184.0[...]En este problema se usan las funciones definidad en la parte 1 para implementar MCMC con el algoritmo Metropolis-Hastings y visualizar problemas de convergencia---------La gran mayoría de funciones que cree las dejé en el archivo Mis_funciones.py para hacer más fácil de leer esta hoja%matplotlib inline import numpy as np import matplotlib.pyplot as plt from Misfunciones import * import pandas # Estilo de gráficos plt.style.use('dark_background') # Seed np.random.seed(123) plt.style.use('dark_background') Datos = pandas.read_csv('blanton.csv', sep=',') # Pongo los datos en dos variables Mags = Datos['M'] Lum = Datos['f'] Barra_sup = np.loadtxt('Barra_sup.txt') Barra_inf = np.loadtxt('Barra_inf.txt') ERR = [] ij = 0 while ijMetropolis- Hastings, definiciones Mi PROPOSAL será una gaussiana centrada en cero con desviación estándar 'std'. Esta desviación me determina qué tan grande (o largo) pueden ser los pasos en las cadenas---------Mi PRIOR será una distribución uniforme en 3d, está limitado adentro de la definición de la función CADENAS() que viene a continuación.def CADENAS(Nsteps, Nburnt, Tstep, rPhi, rMe, ralpha): """ Devuelve las cadenas de Markov para los tres parámetros del problema Parameters ---------- Nsteps : int Número de pasos de las cadenas Nburnt : int Número de pasos desde que se empiezan a grabar las cadenas (quemado) Tstep : .float Una medida del tamaño de los pasos de las cadenas rPhi, rMe, ralpha : list(2x1), list(2x1), list(2x1) Rangos para el PRIOR, asociados a los parámetros. Ejemplo: rPhi = [0,1] Returns ------- Cadenas : list Lista con los pasos y la evolución de los parámetros (Paso, Phi_evol, Me_evol, alpha_evol) """ import numpy as np Paso = [] # Graba los pasos Phi_evol = [] # Cadenas para el parámetro "Phi" Me_evol = [] alpha_evol = [] # Busco condición inicial tal que la posterior no sea cero post_actual = 0 while post_actual < 1e-8: phi_actual = np.random.normal(loc=np.mean([rPhi[0], rPhi[1]]), scale=(rPhi[1]-rPhi[0])) Me_actual = np.random.normal(loc=np.mean([rMe[0], rMe[1]]), scale=(rMe[1]-rMe[0])) alpha_actual = np.random.normal(loc=np.mean([ralpha[0], ralpha[1]]), scale=(ralpha[1]-ralpha[0])) post_actual = POSTERIOR(Mags, Lum, ERR, Phi=phi_actual, Me=Me_actual, alpha=alpha_actual, Phimin=rPhi[0], Phimax=rPhi[1], Memin=rMe[0], Memax=rMe[1], alphamin=ralpha[0], alphamax=ralpha[1] ) par_actual = [phi_actual, Me_actual, alpha_actual] ij = 0 while ijNburnt: Paso.append( ij ) Phi_evol.append( par_actual[0] ) Me_evol.append( par_actual[1] ) alpha_evol.append( par_actual[2] ) # Imprime progreso: from IPython.display import clear_output clear_output(wait=True) print('%', round(ij*100/Nsteps)) ij = ij + 1 return Paso, Phi_evol, Me_evol, alpha_evolAcá pondré unos bloques para crear las cadenas, y luego unos para guardar datos y otro para cargarlosUno puedo correr las cadenas y luego ir directamente a la parte de ploteos o, sino, puede ignorar las cadenas e ir directamente a la parte de importación de los archivos que las contienen (recomendado)# Datos para hacer las cadenas: Nburnt = 0 Nsteps = 50000 # Haré cadenas cambiando sólo la longitud de los pasos "Tstep" # C = CADENAS(Nsteps=Nsteps, Nburnt=Nburnt, Tstep=1, rPhi=rPhi, rMe=rMe, ralpha=ralpha) # C2 = CADENAS(Nsteps=Nsteps, Nburnt=Nburnt, Tstep=1, rPhi=rPhi, rMe=rMe, ralpha=ralpha) # C3 = CADENAS(Nsteps=Nsteps, Nburnt=Nburnt, Tstep=1, rPhi=rPhi, rMe=rMe, ralpha=ralpha) # C4 = CADENAS(Nsteps=Nsteps, Nburnt=Nburnt, Tstep=0.1, rPhi=rPhi, rMe=rMe, ralpha=ralpha) # C5 = CADENAS(Nsteps=Nsteps, Nburnt=Nburnt, Tstep=10, rPhi=rPhi, rMe=rMe, ralpha=ralpha)% 100Guardado de datos (manual):# Save_chain(Steps=C5[0], Phi=C5[1], Me=C5[2], alpha=C5[3], name='Cadena5.txt')Importación de datos:D = np.loadtxt('Cadena.txt') C = [D[:,0], D[:,1], D[:,2], D[:,3] ] D2 = np.loadtxt('Cadena2.txt') C2 = [D2[:,0], D2[:,1], D2[:,2], D2[:,3] ] D3 = np.loadtxt('Cadena3.txt') C3 = [D3[:,0], D3[:,1], D3[:,2], D3[:,3] ] D4 = np.loadtxt('Cadena4.txt') C4 = [D4[:,0], D4[:,1], D4[:,2], D4[:,3] ] D5 = np.loadtxt('Cadena5.txt') C5 = [D5[:,0], D5[:,1], D5[:,2], D5[:,3] ]Para hacer los ploteos cree la función $\color{orange}{\text{Ploteo()}}$ Convenientemente, hice que las primeras tres cadenas tengan un buen mezclado (mucha prueba y error) y las otras dos tienen pasos muy chicos o grandes.Las grafico por separado para que se vean mejor:(La línea celeste por detrás es el valor de Blanton)""" BUEN MEZCLADO (SIN QUEMADO) """ fig, ax = plt.subplots(3, 1, figsize = (14,8), sharex=True) Ploteo(C, color='orange', label='1', fig=fig, ax=ax) Ploteo(C2, color='yellow', label='2', fig=fig, ax=ax) Ploteo(C3, color='violet', label='3', fig=fig, ax=ax) # Bordes de los priors # ax[0].fill_between([Nburnt, Nsteps], y1=rPhi[0], y2=rPhi[1], facecolor='green', alpha=0.3) # ax[1].fill_between([Nburnt, Nsteps], y1=rMe[0], y2=rMe[1], facecolor='green', alpha=0.3) # ax[2].fill_between([Nburnt, Nsteps], y1=ralpha[0], y2=ralpha[1], facecolor='green', alpha=0.3) """ MAL MEZCLADO """ fig, ax = plt.subplots(3, 1, figsize = (14,8), sharex=True) Ploteo(C4, color='yellow', label='1', fig=fig, ax=ax) Ploteo(C5, color='red', label='2', fig=fig, ax=ax) # Bordes de los priors # ax[0].fill_between([Nburnt, Nsteps], y1=rPhi[0], y2=rPhi[1], facecolor='green', alpha=0.3) # ax[1].fill_between([Nburnt, Nsteps], y1=rMe[0], y2=rMe[1], facecolor='green', alpha=0.3) # ax[2].fill_between([Nburnt, Nsteps], y1=ralpha[0], y2=ralpha[1], facecolor='green', alpha=0.3)La cadena amarilla tiene un paso muy chico y nunca llega al máximo del likelihood, la roja tiene un paso muy grande (comparada con las cadenas anteriores, pero no es tan malo)""" CAMINOS """ fig, ax = plt.subplots(1, 2, figsize = (14,8), sharex=True) ax[0].plot(C[1], C[2], color='orange', label='1, buen mezclado') ax[0].plot(C2[1], C2[2], color='yellow', label='2, buen mezclado') ax[0].plot(C3[1], C3[2], color='violet', label='3, buen mezclado') ax[1].plot(C4[1], C4[2], color='purple', label='4, mal mezclado') ax[1].plot(C5[1], C5[2], color='red', label='5, mal mezclado'); ax[0].set_ylabel('Me', fontsize=20) ax[0].set_xlabel('Phi', fontsize=20) ax[1].set_ylabel('Me', fontsize=20) ax[1].set_xlabel('Phi', fontsize=20) ax[0].set_title('Cadenas buenas', fontsize=20) ax[1].set_title('Cadenas malas', fontsize=20);En la imagen anterior no hay mucha diferencia entre las cadenas "buenas" y "malas" (porque todo se superpone)Notar que las cadenas "malas" empiezan en el mismo lugar que algunas de las "buenas". Eso fue intencional como para compararlas mejor entre si Ahora hago un corner plot con el paquete de una de las cadenas buenas, para eso hago un quemado manual:# Quemado manual: B = [C[1][10000:], C[2][10000:], C[3][10000:]] np.shape(B) plt.style.use('classic') ndim = 3 # Corner plot de una cadena: import corner aa = np.transpose(B) plt.style.use('classic') fig, ax = plt.subplots(3, 3, figsize = (8,6)) labels = ['Phi', 'Me', 'alpha'] fig = corner.corner(aa, labels = labels, fig = fig, show_titles = True) # Blanton value1 = 0.0146 value2 = -20.83 value3 = -1.20 axes = np.array(fig.axes).reshape((ndim, ndim)) axes[1,0].scatter(value1, value2, zorder=5, color='red', s=40) axes[2,0].scatter(value1, value3, zorder=5, color='red', s=40) axes[2,1].scatter(value2, value3, zorder=5, color='red', s=40)Los puntos rojos son los valores de BlantonSe me complicó poner las regiones de confianza graficadas en las posteriors (líneas verticales), será para otra ocasión Veo la obtención de incertezas:# Obtención de errores, lo hago tal que el area en las colas sea del 10% (arbitrario) # Eso implica que los cuantiles que buscaré son: el 5 y el 95 Param = np.empty(3) ERR_DOWN = np.empty(3) # Arreglos para meter los valores ERR_UP = np.empty(3) ij=0 while ij<3: # Rcordar que C3[0] : pasos (no es un parámetro) q_05, q_50, q_95 = corner.quantile(C[ij+1], [0.05, 0.5, 0.95]) x = q_50 # Parametro ajustado dx_down, dx_up = q_50-q_05, q_95-q_50 # Errores Param[ij] = q_50 ERR_DOWN[ij] = dx_down ERR_UP[ij] = dx_up ij = ij+1 Param, ERR_DOWN, ERR_UP Blanton = [0.0146, -20.83, -1.2] if ERR_DOWN[0] < Blanton[0] and ERR_UP[0] < Blanton[0]: print('Phi es compatible con Blanton') else: print('Phi NO es compatible con Blanton') if ERR_DOWN[1] < Blanton[1] and ERR_UP[1] < Blanton[1]: print('Me es compatible con Blanton') else: print('Me NO es compatible con Blanton') if ERR_DOWN[2] < Blanton[2] and ERR_UP[2] < Blanton[2]: print('alpha es compatible con Blanton') else: print('alpha NO es compatible con Blanton')alpha NO es compatible con BlantonHice este análisis muy simplificado de intervalos de confianza por el tiempo. Cualitativamente parecería como que el máximo no está justo en los valores que obtuvo Blanton Grafico mi modelo contra el de Blanton# Modelo Markov xs = np.linspace(min(Mags), max(Mags), 100) F_mod2 = Modelo(xs,C[1][-1], C[2][-1], C[3][-1]) plt.style.use('dark_background') fig, ax = plt.subplots(1, 1, figsize = (12,6), sharex=True) plt.style.use('dark_background') ax.plot(xs, F_mod, color='orange', label='Ajuste Blanton') ax.plot(xs, F_mod2, color='yellow', label='Ajuste Markov') ax.errorbar(Mags, Lum, yerr=ERR, color='cyan', fmt='.') ax.set_xlabel('Magnitud', fontsize=20) ax.set_ylabel('Luminosidad', fontsize=20) ax.set_yscale('log') ax.legend(fontsize=15, loc=2);Modellieren echter DatenEs soll die Reatkion :butanal butyroin buteraldehyd modelliert werdenDer Gesamtverlauf sieht dabei so aus:import pandas as pd import matplotlib.pyplot as plt df = pd.read_excel(r"4,5-ODO Upscaling.xlsx") print(df.head()) time = df["T [min]"] butanal = df["Butanal"] butyroin = df["butyroin"] octandiol = df["4,5-octanediol"] fig,ax = plt.subplots() ax.plot(time, butanal, label = "butanal") ax.plot(time, butyroin, label = "butyroin") ax.plot(time, octandiol, label = "octandiol") plt.legend() plt.show()T [min] Butanal butyroin 4,5-octanediol 0 0 235.403070 0.000000 0.0 1 15 136.667003 35.135764 0.0 2 30 78.775571 59.773672 0.0 3 60 39.501736 80.610846 0.0 4 140 15.373700 85.416647 0.0Schritt 1: Modellieren des Substratverlaufsfrom scipy.integrate import odeint import pandas as pd import matplotlib.pyplot as plt import numpy as np #import lmfit time = df["T [min]"] concentration_A0 = 240 def substrate_curve(A, t, k1, k2): dAdt = -k1*A**2 + k2*((concentration_A0-A)*0.5) return dAdt solver_substrat = odeint(substrate_curve, concentration_A0, time, args=(0.0003,0.00002)) print(solver_substrat) fig, ax = plt.subplots() ax.plot(time,solver_substrat) ax.plot(time,butanal, linestyle ="", marker = "d") #plt.show()[[240. ] [115.39234415] [ 75.96836553] [ 45.15540862] [ 21.76700349] [ 19.85514362] [ 18.25713069] [ 15.52533465] [ 7.68686215] [ 3.33601707]]Einbinden des Ersten Produktsfrom scipy.integrate import odeint import pandas as pd import matplotlib.pyplot as plt import numpy as np #import lmfit time = df["T [min]"] inital_conditions = [240,0] concentration_A0 = 240 concentration_B0 = 0 def substrate_curve(init_cond, t, k1, k2): A = init_cond[0] B = init_cond[1] dAdt = -k1*A**2 + k2*B dBdt = k1*A**20.5 -k2*B dZdt = np.array([dAdt,dBdt]) return dZdt solver_substrat = odeint(substrate_curve, inital_conditions, time, args=(0.0003,0.00002)) #print(solver_substrat) fig, ax = plt.subplots() ax.plot(time,solver_substrat[:,0], linestyle="--", color="blue") ax.plot(time,solver_substrat[:,1], linestyle="--", color="green") ax.plot(time,butanal, linestyle ="", marker = "d") #plt.show()Einbinden des Zweiten produktsfrom scipy.integrate import odeint import pandas as pd import matplotlib.pyplot as plt import numpy as np #import lmfit time = df["T [min]"] inital_conditions = [240,0,0] concentration_A0 = 240 concentration_B0 = 0 def substrate_curve(init_cond, t, k1, k2, k3, k4): A = init_cond[0] B = init_cond[1] C= init_cond[2] dAdt = -k1*A**2 + k2*B dBdt = k1*A**2*0.5 - (k2+k3)*B dCdt = k3*B print(C) dZdt = np.array([dAdt,dBdt, dCdt]) return dZdt solver_substrat = odeint(substrate_curve, inital_conditions, time, args=(0.0003,0.002, 0.001, 0.2)) #print(solver_substrat) fig, ax = plt.subplots() ax.plot(time,solver_substrat[:,0], linestyle="--", color="blue") ax.plot(time,solver_substrat[:,1], linestyle="--", color="green") ax.plot(time,solver_substrat[:,2], linestyle="--", color="red") ax.plot(time,butanal, linestyle ="", marker = "d") #plt.show()0.0 0.0 1.7245732492873803e-12 3.449139335281839e-12 5.173709002937437e-12 1.6723584504260742e-07 1.671604048751826e-07 6.656021661793499e-07 6.654513865242471e-07 1.4947275902054294e-06 1.4945770116190684e-06 1.1856390481259557e-05 1.1854551525662499e-05 7.459871644032933e-06 7.459016103574524e-06 2.3237730774421463e-06 2.323738584308103e-06 3.334830493974359e-06 3.3348651090548026e-06 4.527840750879716e-06 4.527840798629771e-06 5.902543064045394e-06 5.902543114569214e-06 9.19663755652842e-06 9.196637798693708e-06 1.3216170926899405e-05 1.321617080388232e-05 1.7960165872757272e-05 1.79601657494957e-05 2.3427648140738316e-05 2.342764813887226e-05 2.9617645411329244e-05 2.9617645409415407e-05 0.0001310433399009203 0.00013104333475881354 0.0003036647737543663 0.000303664763706992 0.0005465402601125065 0.0005465402797762438 0.0008587463572731454 0.000858746366410712 0.0012393771732361301 0.001239377178569503 0.0016875440866850207 0.0016875440918015057 0.00338808120236053 0.003388081445172[...]Monophonic SED on UrbanSound8K dataset 1 Import major librariesimport numpy as np import plaidml.keras import os plaidml.keras.install_backend() os.environ["KERAS_BACKEND"] = "plaidml.keras.backend" from keras import backend as K print(K) import keras from keras.preprocessing.image import ImageDataGenerator print("Keras version %s" %keras.__version__) Keras version 2.2.42 Load meta fileimport pandas as pd # Set the path to the full UrbanSound dataset fulldatasetpath = '/Xception/UrbanSound8K/audio/' meta = pd.read_csv('E:\\Xception\\UrbanSound8K\\metadata\\UrbanSound8K.csv')3 Data analysis Labels distribution in dataset# Class distribution meta['class'].value_counts()Audio propertiesimport struct import re import os fulldatasetpath = '/Xception/UrbanSound8K/audio/' # support to read file header and return audio properties def read_header(filename): wave = open(filename,"rb") riff = wave.read(12) fmat = wave.read(36) num_channels_string = fmat[10:12] num_channels = struct.unpack('Audio duration visualizationimport matplotlib.pyplot as plt plt.hist(audiodatadf['duration'], rwidth=0.9, color='#86bf91') plt.xlabel('Duration') plt.ylabel('Population') plt.title('Histogram of audio lengths') plt.grid(False) plt.show()Audio channelsprint("Audio channels:\n") print(audiodatadf.channels.value_counts(normalize=True))Audio channels: 2 0.915369 1 0.084631 Name: channels, dtype: float64Bit depthprint("Bit depths:\n") print(audiodatadf.bit_depth.value_counts(normalize=True))Bit depths: 16 0.659414 24 0.315277 32 0.019354 8 0.004924 4 0.001031 Name: bit_depth, dtype: float64Sample rateprint("Sample rates:\n") print(audiodatadf.sample_rate.value_counts(normalize=True))Sample rates: 44100 0.614979 48000 0.286532 96000 0.069858 24000 0.009391 16000 0.005153 22050 0.005039 11025 0.004466 192000 0.001947 8000 0.001374 11024 0.000802 32000 0.000458 Name: sample_rate, dtype: float644 Data Preprocessing# Load various imports import pandas as pd import os import librosa max_pad_len = 174 def extract_features(file_name): try: audio, sample_rate = librosa.load(file_name, res_type='kaiser_fast') mfccs = librosa.feature.mfcc(y=audio, sr=sample_rate, n_mfcc=40) pad_width = max_pad_len - mfccs.shape[1] mfccs = np.pad(mfccs, pad_width=((0, 0), (0, pad_width)), mode='constant') except Exception as e: print("Error encountered while parsing file: ", file_name) return None return mfccs features = [] # Iterate through each sound file and extract the features for index, row in meta.iterrows(): if index%500==0: print("processing element ",index) file_name = os.path.join(os.path.abspath(fulldatasetpath),'fold'+str(row["fold"]),str(row["slice_file_name"])) class_label = row["class"] data = extract_features(file_name) features.append([data, class_label]) # Convert into a Panda dataframe featuresdf = pd.DataFrame(features, columns=['feature','class_label']) print('Finished feature extraction from ', len(featuresdf), ' files')processing element 0 processing element 250 processing element 500 processing element 750 processing element 1000 processing element 1250 processing element 1500 processing element 1750 processing element 2000 processing element 2250 processing element 2500 processing element 2750 processing element 3000 processing element 3250 processing element 35005 Preprocessed data manipulation Save processed data to pickle filefeaturesdf.to_pickle('E:\\Xception\\UrbanSound8K\\audio\\processed_data_frame.pkl')Load processed data from pickle fileimport pandas as pd featuresdf = pd.read_pickle('E:\\Xception\\UrbanSound8K\\audio\\processed_data_frame.pkl') print("imported", featuresdf.shape[0],"elements")imported 8732 elementsProcess data to input the networkfrom sklearn.preprocessing import LabelEncoder from keras.utils import to_categorical # Convert features and corresponding classification labels into numpy arrays X = np.array(featuresdf.feature.tolist()) y = np.array(featuresdf.class_label.tolist()) # Encode the classification labels le = LabelEncoder() yy = to_categorical(le.fit_transform(y)) # split the dataset from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(X, yy, test_size=0.2, random_state = 42) # compute number of output classes num_labels = featuresdf['class_label'].nunique()6 Model definitions Baselinefrom keras.models import Model from keras.layers import SeparableConv2D, ZeroPadding2D, Activation, Dropout, Dense, \ Conv2D, MaxPooling2D, Reshape, GRU from keras.layers.normalization import BatchNormalization from keras import Input, optimizers num_rows = X.shape[1] num_columns = X.shape[2] num_channels = 1 x_train = x_train.reshape(x_train.shape[0], num_columns, num_rows, num_channels) x_test = x_test.reshape(x_test.shape[0], num_columns, num_rows, num_channels) def Net(): # input layer inputs = Input(shape=(num_columns, num_rows, num_channels)) # DWS-CNN layer 1 x = Conv2D(256,kernel_size=(5,5), strides=(1,1), padding='same')(inputs) x = Activation('relu')(x) # Batch Normalisation before passing it to the next layer x = BatchNormalization()(x) # Pooling x = MaxPooling2D(pool_size=(1,5), padding='same')(x) # Dropout x = Dropout(0.25)(x) # DWS-CNN layer 2 x = Conv2D(256,kernel_size=(5,5), strides=(1,1), padding='same')(x) x = Activation('relu')(x) # Batch Normalisation before passing it to the next layer x = BatchNormalization()(x) # Pooling x = MaxPooling2D(pool_size=(1,4), padding='same')(x) # Dropout x = Dropout(0.25)(x) # DWS-CNN layer 3 x = Conv2D(256,kernel_size=(5,5), strides=(1,1), padding='same')(x) x = Activation('relu')(x) # Batch Normalisation before passing it to the next layer x = BatchNormalization()(x) # Pooling x = MaxPooling2D(pool_size=(1,2), padding='same')(x) # Dropout x = Dropout(0.25)(x) x = Reshape((174, 256))(x) # GRU x = GRU(256)(x) # classifier layer outputs = Dense(num_labels,activation='softmax')(x) # model compilation for training adam = optimizers.Adam(lr=0.0001) model = Model(inputs, outputs) model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=["accuracy"]) return model # create the model model = Net() model.summary()INFO:plaidml:Opening device "opencl_amd_gfx1010.0"Proposed model, low accuracyfrom keras.models import Model from keras.layers import SeparableConv2D, ZeroPadding2D, Activation, Dropout, Dense, \ Conv2D, MaxPooling2D, GlobalAveragePooling2D, Reshape, Permute from keras.layers.normalization import BatchNormalization from keras import Input, optimizers num_rows = X.shape[1] num_columns = X.shape[2] num_channels = 1 x_train = x_train.reshape(x_train.shape[0], num_columns, num_rows, num_channels) x_test = x_test.reshape(x_test.shape[0], num_columns, num_rows, num_channels) def Net(dilated_kernel, dilation, dilated_padding): # input layer inputs = Input(shape=(num_columns, num_rows, num_channels)) # DWS-CNN layer 1 x = ZeroPadding2D(padding=(2))(inputs) # use valid padding since padding is introduced before since it has a special form x = SeparableConv2D(256,kernel_size=(5,5), strides=(1,1), padding='valid')(x) x = Activation('relu')(x) # Batch Normalisation before passing it to the next layer x = BatchNormalization()(x) # Pooling x = MaxPooling2D(pool_size=(1,5), strides=(1,5), padding='valid')(x) # Dropout x = Dropout(0.25)(x) # DWS-CNN layer 2 x = ZeroPadding2D(padding=(2))(x) # use valid padding since padding is introduced before since it has a special form x = SeparableConv2D(256,kernel_size=(5,5), strides=(1,1), padding='valid')(x) x = Activation('relu')(x) # Batch Normalisation before passing it to the next layer x = BatchNormalization()(x) # Pooling x = MaxPooling2D(pool_size=(1,4), strides=(1,4), padding='valid')(x) # Dropout x = Dropout(0.25)(x) # DWS-CNN layer 3 x = ZeroPadding2D(padding=(2))(x) # use valid padding since padding is introduced before since it has a special form x = SeparableConv2D(256,kernel_size=(5,5), strides=(1,1), padding='valid')(x) x = Activation('relu')(x) # Batch Normalisation before passing it to the next layer x = BatchNormalization()(x) # Pooling x = MaxPooling2D(pool_size=(1,2), strides=(1,2), padding='valid')(x) # Dropout x = Dropout(0.25)(x) x = Permute((1,3,2))(x) # DIL-CNN x = ZeroPadding2D(padding=(dilated_padding*dilation, 0))(x) x = Conv2D(256, kernel_size=dilated_kernel, dilation_rate=(dilation, 1))(x)#, strides=(1,3))(x) x = Activation('relu')(x) x = BatchNormalization()(x) # original idea with 1x1 convolution not working good #initializer = keras.initializers.Ones() #x = Conv2D(256, (1,1), strides=(1,3), kernel_initializer=initializer, trainable = False)(x) #x = MaxPooling2D(pool_size=(1,3), strides=(1,3), padding='valid')(x) # newer idea with global pooling, not working good either x = GlobalAveragePooling2D(name='avg_pool')(x) # classifier layer outputs = Dense(num_labels,activation='sigmoid')(x) # model compilation for training adam = optimizers.Adam(lr=0.0001) model = Model(inputs, outputs) model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy']) return model # create the model dilated_kernel = (3,3) dilation = (10) dilated_padding = 1 model = Net(dilated_kernel,dilation,dilated_padding) model.summary()INFO:plaidml:Opening device "opencl_amd_gfx1010.0"7 Model Trainingfrom keras.callbacks import ModelCheckpoint, EarlyStopping from datetime import datetime num_epochs = 250 # low batch size due to memory maximum dimension, modify if using smaller dataset or larger VRAM num_batch_size = 6 callbacks = [ModelCheckpoint(filepath='E:\\Xception\\UrbanSound8K\\model-{val_loss:.2f}.h5', verbose=1, save_best_only=True, monitor="val_loss"), EarlyStopping(monitor='val_loss', patience=30)] start = datetime.now() history = model.fit(x_train, y_train, batch_size=num_batch_size, epochs=num_epochs, validation_split=0.16, verbose=1, callbacks=callbacks) duration = datetime.now() - start print("Training completed in time: ", duration)Train on 5867 samples, validate on 1118 samples Epoch 1/250 5867/5867 [==============================] - 352s 60ms/step - loss: 2.2666 - acc: 0.1824 - val_loss: 2.2912 - val_acc: 0.1127 Epoch 00001: val_loss improved from inf to 2.29120, saving model to E:\Xception\UrbanSound8K\model-2.29.h5 Epoch 2/250 5867/5867 [==============================] - 235s 40ms/step - loss: 2.2678 - acc: 0.1587 - val_loss: 2.2732 - val_acc: 0.1002 Epoch 00002: val_loss improved from 2.29120 to 2.27316, saving model to E:\Xception\UrbanSound8K\model-2.27.h5 Epoch 3/250 5867/5867 [==============================] - 235s 40ms/step - loss: 2.2613 - acc: 0.1198 - val_loss: 2.2694 - val_acc: 0.1020 Epoch 00003: val_loss improved from 2.27316 to 2.26942, saving model to E:\Xception\UrbanSound8K\model-2.27.h5 Epoch 4/250 5867/5867 [==============================] - 235s 40ms/step - loss: 2.2596 - acc: 0.1198 - val_loss: 2.2639 - val_acc: 0.1002 Epoch 00004: val_loss improved from 2.26942 to 2.26390, saving mode[...]Save model of last epochmodel.save('E:\\Xception\\UrbanSound8K\\model-new-0.24_lastEpoch.h5')8 Model Evaluation Evaluate model# Evaluate the model on the test data using `evaluate` print("Evaluate on test data") results = model.evaluate(x_test, y_test, batch_size=num_batch_size) print("test loss, test acc:", results)Evaluate on test data 1747/1747 [==============================] - 17s 10ms/step test loss, test acc: [2.046015382358942, 0.26273612499782956]Plot history graphsimport matplotlib.pyplot as plt # summarize history for accuracy plt.plot(history.history['acc']) plt.plot(history.history['val_acc']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() # summarize history for loss plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show()Compute metrics of modelimport sklearn.metrics from sklearn.metrics import classification_report, confusion_matrix classnames = ["air conditioner", "car horn", "children playing", "dog bark", "drilling", "engine idling", "gun shot", "jackhammer", "siren", "street music"] preds = model.predict(x_test,verbose=1) Ypred = np.argmax(preds, axis=1) Ytest = np.argmax(y_test, axis=1) print(classification_report(Ytest, Ypred, labels=None, target_names=classnames, digits=3))1747/1747 [==============================] - 21s 12ms/step precision recall f1-score support air conditioner 0.071 0.005 0.009 203 car horn 0.286 0.023 0.043 86 children playing 0.171 0.158 0.164 183 dog bark 0.218 0.398 0.282 201 drilling 0.218 0.286 0.247 206 engine idling 0.000 0.000 0.000 193 gun shot 0.354 0.792 0.489 72 jackhammer 0.328 0.423 0.370 208 siren 0.336 0.218 0.265 165 street music 0.281 0.465 0.350 230 accuracy 0.263 1747 macro avg 0.226 0.277 0.222 1747 weighted avg 0.213 0.263 0.217 1747Lab 4 – DATA 3401 (Fall 2021) Lab Dates: 9/24 & 10/1 Due Date 10/8 (before the beginning of lab) Lab DescriptionThe purpose of this lab is for you to code your own tic-tac-toe game that takes input from the user for each player and visualizes the state of the board after each move- As in the previous labs, copy and paste the cells below into a jupyter notebook titled Lab 4- Solve the problems and push them to a new Lab 4 folder in your GitHub repo prior to the deadlineYou will build an n x n Tic Tac Toe game. As you do the exercises, make sure your solutions work for any size Tic Tac Toe game, not just the standard 3x3. Exercise 1:Write a function that creates an n by n matrix (a list of lists) which will represent the state of a Tic Tac Toe game. Let 0, 1, and 2 represent empty, "X", or "O".def make_game(size=3): Tic_Tac_Toe=[[0]*size for i in range(size)] return Tic_Tac_Toe game=make_game(3) gameExercise 2:Write a function that takes a `n` by `n` matrix representing a tic-tac-toe game, and returns -1, 0, 1, or 2 indicating the game is incomplete, the game is a draw, player 1 has won, or player 2 has one, respectively. Here are some example inputs you can use to test your code:def make_game_board(num_rows,num_cols): board = [[empty]*num_cols for i in range(num_rows)] global row_names, row_map, column_names, column_map row_names = list("ABCDEFGHIJKLMNOPQRSTUVWXYZ") row_map = dict(zip(row_names,range(num_rows))) column_names = list(map(str,range(1,num_cols+1))) column_map = dict(zip(column_names,range(num_cols))) return board def draw_board(board): num_rows = len(board) num_cols = len(board[0]) print(" ",end=" ") for i in range(num_cols): print(column_names[i],end=" ") print(" ",end = " ") print() print(" ",end=" ") print("----"*num_cols) for i in range(num_rows): print(row_names[i],end=" ") print("|",end=" ") for j in range(num_cols): print(space_character[board[i][j]],end=" ") print("|",end=" ") print() print(" ",end=" ") print("----"*num_cols) board2 = make_game_board(4,5) draw_board(board2)Exercise 3Write a function that takes 2 integers `n` and `m` as input and draws a `n` by `m` game board. For example the following is a 3x3 board:``` --- --- --- | | | | --- --- --- | | | | --- --- --- | | | | --- --- --- ```def draw_board(n,m): for i in range(n): print("---"*m) print("| "*(m+1)) print("---"*m) draw_board(3,3)Exercise 4:Modify exercise 3, so that it takes a matrix of the form from exercise 2 and draws a tic-tac-tie board with "X"s and "O"s.def draw_tic_tac_toe(matrix): l = len(matrix) for i in range(l): if i!=0: print("\n----------") for j in range(l): var = matrix[i][j] if var == 1: var = "X" if var == 2: var = "O" if var == 0: var = " " if j!=0: print("| "+var+" ",end="") if j==0: print(" "var,end="") draw_tic_tac_toe(no_winner)Exercise 5:Write a function that takes a game board, player number, and `(x,y)` coordinates and places "X" or "O" in the correct location of the game board. Make sure that you only allow filling previously empty locations. Return `True` or `False` to indicate successful placement of "X" or "O".def place_on_board(matrix,player_number,x_coordinate,y_coordinate): player_1_piece = "X" player_2_piece = "O" empty_space = " " if matrix[x_coordinate][y_coordinate]!=0: return False elif player_number == 1: matrix[x_coordinate][y_coordinate]=1 return True elif player_number == 2: matrix[x_coordinate][y_coordinate]=2 return True board=make_game() place_on_board(board,2,1,2) draw_tic_tac_toe(board)| | ---------- | | O ---------- | |Exercise 6:Modify Exercise 5 to show column and row labels so that players can specify location using "A2" or "C1".# Write you solution here def draw_tic_tac_toe_updated(matrix): l = len(matrix) print(" 1 2 3") for i in range(l): if i!=0: print("\n ----------") for j in range(l): var = matrix[i][j] if var == 1: var = "X" if var == 2: var = "O" if var == 0: var = " " if i==0 and j==0: print("A ",end="") if i==1 and j==0: print("B ",end="") if i==2 and j==0: print("C ",end="") if j!=0: print("| "+var+" ",end="") if j==0: print(" "+var,end="") draw_tic_tac_toe_updated(no_winner)Exercise 7:Write a function that takes a board, player number, and location specified as in exercise 6 and then calls exercise 5 to correctly modify the board.# Write you solution here def nice_make_move(board,player,location): loc = parse_location(location) if loc: return make_move(board,player,loc) else: print_message("Invalid move.") return False # Test your solution here def parse_location(location_string): if not isinstance(location_string,str): print_message("Bad input. Location must be a string.") return False if len(location_string) != 2: print_message("Bad input. Location must be 2 characters.") return False row = location_string[0].upper() col = location_string[1].upper() if not row in row_names: print_message("This is not an existing row.") return FalseExercise 8:Write a function is called with a board and player number, takes input from the player using python's `input`, and modifies the board using your function from exercise 7. Note that you should keep asking for input until you have gotten a valid input that results in a valid move.# Write you solution here def make_move(board,player,location,verbose=True): x,y = location # Check if space is empty if board[x][y] != empty: print_message("Invalid move n/: space is not empty") return False else: board[x][y] = player return True # Test your solution hereExercise 9: Use all of the previous exercises to implement a full tic-tac-toe game, where an appropriate board is drawn, 2 players are repeatedly asked for a location coordinates of where they wish to place a mark, and the game status is checked until a player wins or a draw occurs.def col_winner(): for i in range(num_cols): if is_winner(col_list[i]) == player_1: return player_1 if is_winner(col_list[i]) == player_2: return player_2 return FalseExercise 10:Test that your game works for 5x5 Tic Tac Toe.def diag_winner(): for i in range(2): if is_winner(diag_list[i]) == player_1: return player_1 if is_winner(diag_list[i]) == player_2: return player_2 return False def is_full(): n = 0 for i in range(num_rows): for j in range(num_cols): if board[i][j] != empty: n+=1 if n==(num_rows*num_cols): return True else: return False def winner(): row_state = row_winner() col_state = col_winner() diag_state = diag_winner() if row_state: return row_state elif col_state: return col_state elif diag_winner(): return diag_state else: return False if winner(): return winner() elif is_full(): return 0 else: return -Exercise 11:(Extra Credit)Develop a version of the game where one player is the computer. Note that you don't need to do an extensive seach for the best move. You can have the computer simply protect against loosing and otherwise try to win with straight or diagonal patterns.# Write you solution here def tictactoe_game(): print("Welcome to Tic-Tac-Toe!") print("--------------------") board_0 = make_game_board(3,3) player = player_1 this_game_state = -1 while this_game_state==-1: draw_board(board_0) print("Player",player,"move:") new_move = take_move(board_0,player) if new_move == "end": print("Player has exited the game") break this_game_state = game_state(board_0) if player==player_1: player = player_2 else: player = player_1 if not new_move == "end": draw_board(board_0) if this_game_state == 0: print("The game is a draw") else: print("Winner is player",this_game_state) # Test your solution hereWelcome to a qMRLab interactive blog post Jupyter Notebook!If this is your first time running a Juptyer Notebook, there's a lot of tutorials available online to help. [Here's one](https://www.dataquest.io/blog/jupyter-notebook-tutorial/) for your convenience. IntroductionThis notebook contains everything needed to reproduce the MP2RAGE T1 blog post on the [qMRLab website](). In fact, this notebook generated the HTML for the blog post too! This notebook is currently running on a MyBinder server that only you can access, but if you want to be kept up-to-date on any changes that the developpers make to this notebook, you should go to it's [GitHub repository](https://github.com/qMRLab/t1_notebooks) and follow it by clicking the "Watch" button in the top right (you may need to create a GitHub account, if you don't have one already). TipsHere's a few things you can do in this notebook Code* Run the entire processing by clicking above on the "Kernel" tab, then "Restart & Run All". It will be complete when none of the cells have an asterix "\*" in the square brackets.* To change the code, you need to click once on code cells. To re-run that cell, click the "Run" button above when the cell is selected. * **Note:** Cells can depend on previous cells, or even on previous runs of the cell itself, so it's best to run all the previous cells beforehand.* This binder runs on SoS, which allows the mixing of Octave (i.e. an open-source MATLAB) and Python cells. Take a look a the drop down menu on the top right of the cells to know which one you are running.* To transfer data from cells of one language to another, you need to create a new cell in the incoming language and run `%get (param name) --from (outgoing language)`. See cells below for several examples within this notebook. HTML* To reproduce the HTML of the blog post, run the entire processing pipeline (see point one in the previous section), then save the notebook (save icon, top left). Now, click on the drop down menu on the left pannel, and select `%sossave --to html --force` . After a few seconds, it should output "Workflow saved to VariableFlipAngle.html" – click on the HTML name, and you're done!* Cells with tags called "scratch" are not displayed in the generated HTML.* Cells with the tag "report_output" display the output (e.g. figures) in the generated HTML.* Currently in an un-run notebook, the HTML is not formatted like the website. To do so, run the Python module import cell (` Module imports`) and then very last cell (`display(HTML(...)`).**If you have any other questions or comments, please raise them in a [GitHub issue](https://github.com/qMRLab/t1_notebooks/issues).** NoteThe following cell is meant to be displayed for instructional purposes in the blog post HTML when "All cells" gets displayed (i.e. the Octave code).% **Blog post code introduction** % % Congrats on activating the "All cells" option in this interactive blog post =D % % Below, several new HTML blocks have appears prior to the figures, displaying the Octave/MATLAB code that was used to generate the figures in this blog post. % % If you want to reproduce the data on your own local computer, you simply need to have qMRLab installed in your Octave/MATLAB path and run the "startup.m" file, as is shown below. % % If you want to get under the hood and modify the code right now, you can do so in the Jupyter Notebook of this blog post hosted on MyBinder. The link to it is in the introduction above. # PYTHON CODE # Module imports import matplotlib.pyplot as plt import plotly.plotly as py import plotly.graph_objs as go import numpy as np from plotly import __version__ from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot config={'showLink': False, 'displayModeBar': False} init_notebook_mode(connected=True) from IPython.core.display import display, HTMLMP2RAGE T1 Mapping Dictionary-based MRI techniques capable of generating T1 maps are increasing in popularity, due to their growing availability on clinical scanners, rapid scan times, and fast post-processing computation time, thus making quantitative T1 mapping accessible for clinical applications. Generally speaking, dictionary-based quantitative MRI techniques use numerical dictionaries—databases of pre-calculated signal values simulated for a wide range of tissue and protocol combinations—during the image reconstruction or post-processing stages. Popular examples of dictionary-based techniques that have been applied to T1 mapping are MR Fingerprinting (MRF) (Ma et al. 2013), certain flavours of compressed sensing (CS) (Doneva et al. 2010; Li et al. 2012), and Magnetization Prepared 2 Rapid Acquisition Gradient Echoes (MP2RAGE) (Marques et al. 2010). Dictionary-based techniques can usually be classified into one of two categories: techniques that use information redundancy from parametric data to assist in accelerated imaging (e.g. CS, MRF), or those that use dictionaries to estimate quantitative maps using the MR images after reconstruction. Because MP2RAGE is a technique implemented primarily for T1 mapping, and it is becoming increasingly available as a standard pulse sequence on many MRI systems, the remainder of this section will focus solely on this technique. However, many concepts discussed are shared by other dictionary-based techniques.MP2RAGE is an extension of the conventional MPRAGE pulse sequence widely used in clinical studies (Haase et al. 1989; Mugler & Brookeman 1990). A simplified version of the MP2RAGE pulse sequence is shown in Figure 1. MP2RAGE can be seen as a hybrid between the inversion recovery and VFA pulse sequences: a 180° inversion pulse is used to prepare the magnetization for T1 sensitivity at the beginning of each TRMP2RAGE, and then two images are acquired at different inversion times using gradient recalled echo (GRE) imaging blocks with low flip angles and short repetition times (TR). During a given GRE imaging block, each excitation pulse is followed by a constant in-plane (“y”) phase encode weighting (varied for each TRMP2RAGE), but with different 3D (“z”) phase encoding gradients (varied at each TR). The center of k-space for the 3D phase encoding direction is acquired at the TI for each GRE imaging block. The main motivation for developing the MP2RAGE pulse sequence was to provide a metric similar to MPRAGE, but with self-bias correction of the static (B0) and receive (B1-) magnetic fields, and a first order correction of the transmit magnetic field (B1+). However, because two images at different TIs are acquired (unlike MPRAGE, which only acquires data at a single TI), information about the T1 values can also be inferred, thus making it possible to generate quantitative T1 maps using this data. Figure 1. Simplified diagram of an MP2RAGE pulse sequence. TR: repetition time between successive gradient echo readouts, TRMP2RAGE: repetition time between successive adiabatic 180° inversion pulses, TI1 and TI2: inversion times, θ1 and θ2: excitation flip angles. The imaging readout events occur within each TR using a constant in-plane phase encode (“y”) gradient set for each TRMP2RAGE, but varying 3D phase encode (“z”) gradients between each successive TR. Signal Modelling Prior to considering the full signal equations, we will first introduce the equation for the MP2RAGE parameter (SMP2RAGE) that is calculated in addition to the T1 map. For complex data (magnitude and phase, or real and imaginary), the MP2RAGE signal (SMP2RAGE) is calculated from the images acquired at two TIs (SGRE,TI1 and SGRE,TI2) using the following expression (Marques et al. 2010):This value is bounded between [-0.5, 0.5], and helps reduce some B0 inhomogeneity effects using the phase data. For real data, or magnitude data with polarity restoration, this metric is instead calculated as:Because MP2RAGE is a hybrid of pulse sequences used for inversion recovery and VFA, the resulting signal equations are more complex. Typically, a steady state is not achieved during the short train of GRE imaging blocks, so the signal at the center of k-space for each readout (which defines the contrast weighting) will depend on the number of phase-encoding steps. For simplicity, the equations presented here assume that the 3D phase-encoding dimension is fully sampled (no partial Fourier or parallel imaging acceleration). For this case (see appendix of (Marques et al. 2010) for derivation details), the signal equations are:where B1- is the receive field sensitivity, “eff” is the adiabatic inversion pulse efficiency, ER = exp(-TR/T1), EA = exp(-TA/T1), EB = exp(-TB/T1), EC = exp(-TC/T1). The variables TA, TB, and TC are the three different delay times (TA: time between inversion pulse and beginning of the GRE1 block, TB: time between the end of GRE1 and beginning of GRE2, TC: time between the end of GRE2 and the end of the TR). If no k-space acceleration is used (e.g. no partial Fourier or parallel imaging acceleration), then these values are TA = TI1 - (n/2)TR, TB = TI2 - (TI1 + nTR), and TC = TRMP2RAGE - (TI2 + (n/2)TR), where n is the number of voxels acquired in the 3D phase encode direction varied within each GRE block. The value mz,ss is the steady-state longitudinal magnetization prior to the inversion pulse, and is given by: From Eqs. (3–6), it is evident that the MP2RAGE parameter SMP2RAGE (Eqs. (1.11) and (1.12)) cancels out the effects of receive field sensitivity, T2*, and M0. The signal sensitivity related to the transmit field (B1+), hidden in Eqs. (3–6) within the flip angle values θ1 and θ2, can also be reduced by careful pulse sequence protocol design (Marques et al. 2010), but not entirely eliminated (Marques & Gruetter 2013). Data Fitting Dictionary-based techniques such as MP2RAGE do not typically use conventional minimization algorithms (e.g. Levenberg-Marquardt) to fit signal equations to observed data. Instead, the MP2RAGE technique uses pre-calculated signal values for a wide range of parameter values (e.g. T1), and then interpolation is done within this dictionary of values to estimate the T1 value that matches the observed signal. This approach results in rapid post-processing times because the dictionaries can be simulated/generated prior to scanning and interpolating between these values is much faster than most fitting algorithms. This means that the quantitative image can be produced and displayed directly on the MRI scanner console rather than needing to be fitted offline. Figure 2. T1 lookup table as a function of B1 and SMP2RAGE value. Inversion times used to acquire this magnitude image dataset were 800 ms and 2700 ms, the flip angles were 4° and 5° (respectively), TRMP2RAGE = 6000 ms, and TR = 6.7 ms. The code that was used were shared open sourced by the authors of the original MP2RAGE paper (https://github.com/JosePMarques/MP2RAGE-related-scripts).%% MATLAB/OCTAVE CODE % Code used to generate the data required for Figure 5 of the blog post clear all cd ../qMRLab startup MP2RAGE.B0=7; % in Tesla MP2RAGE.TR=6; % MP2RAGE TR in seconds MP2RAGE.TRFLASH=6.7e-3; % TR of the GRE readout MP2RAGE.TIs=[800e-3 2700e-3];% inversion times - time between middle of refocusing pulse and excitatoin of the k-space center encoding MP2RAGE.NZslices=[35 72];% Slices Per Slab * [PartialFourierInSlice-0.5 0.5] MP2RAGE.FlipDegrees=[4 5];% Flip angle of the two readouts in degrees invEFF=0.99; B1_vector=0.005:0.05:1.9; T1_vector=0.5:0.05:5.2; npoints=40; %% creates a lookup table of MP2RAGE intensities as a function of B1 and T1 k=0; for b1val=B1_vector k=k+1; [Intensity T1vector ]=MP2RAGE_lookuptable(2,MP2RAGE.TR,MP2RAGE.TIs,b1val*MP2RAGE.FlipDegrees,MP2RAGE.NZslices,MP2RAGE.TRFLASH,'normal',invEFF); MP2RAGEmatrix(k,:)=interp1(T1vector,Intensity,T1_vector); end; %% make the matrix MP2RAGEMatrix into T1_matrix(B1,ratio) MP2RAGE_vector=linspace(-0.5,0.5,npoints); k=0; for b1val=B1_vector k=k+1; try T1matrix(k,:)=interp1(MP2RAGEmatrix(k,:),T1_vector,MP2RAGE_vector,'pchirp'); catch temp=MP2RAGEmatrix(k,:); temp(isnan(temp))=linspace(-0.5,-1,length(find(isnan(temp)==1))); temp=interp1(temp,T1_vector,MP2RAGE_vector); T1matrix(k,:)=temp; end end %get T1matrix --from Octave %get T1matrix --from Octave %get T1matrix --from Octave %get T1matrix --from Octave %get B1_vector --from Octave %get MP2RAGE_vector --from Octave # PYTHON CODE init_notebook_mode(connected=True) # The polling here is to ensure that plotly.js has already been loaded before # setting display alignment in order to avoid a race condition. trace5 = go.Heatmap(x = MP2RAGE_vector, y = B1_vector, z=T1matrix, zmin=0, zmax=5, colorscale='Portland', xaxis='x2', yaxis='y2', visible=True, name = 'T1 values (ms)') data=[trace5] layout = dict( width=560, height=345, margin = dict( t=40, r=50, b=50, l=80), annotations=[ dict( x=-0.14, y=0.5, showarrow=False, text='B1 values', font=dict( family='Times New Roman', size=22 ), textangle=-90, xref='paper', yref='paper' ), dict( x=0.5, y=-0.2, showarrow=False, text='SMP2RAGE values', font=dict( family='Times New Roman', size=22 ), xref='paper', yref='paper' ), dict( x=0.5, y=1.15, showarrow=False, text='Lookup Table', font=dict( family='Times New Roman', size=26 ), xref='paper', yref='paper' ), dict( x=1.17, y=1.15, showarrow=False, text='T1 (ms)', font=dict( family='Times New Roman', size=20 ), xref='paper', yref='paper' ), ], xaxis = dict(range = [0,1], autorange = False, showgrid = False, zeroline = False, showticklabels = False, ticks = '', domain=[0, 0.5]), yaxis = dict(range = [0,1], autorange = False, showgrid = False, zeroline = False, showticklabels = False, ticks = '', domain=[0, 1]), showlegend = False, autosize = False, ) fig = dict(data=data, layout=layout) iplot(fig, filename = 'basic-heatmap', config = config)To produce T1 maps with good accuracy and precision using dictionary-based interpolation methods, it is important that the signal curves are unique for each parameter value. MP2RAGE can produce good T1 maps by using a dictionary with only dimensions (T1, SMP2RAGE), since SMP2RAGE is unique for each T1 value for a given protocol (Marques et al. 2010). However, as was noted above, SMP2RAGE is also sensitive to B1 because of θ1 and θ2 in Eqs. (1.13–1.16). The B1-sensitivity can be reduced substantially with careful MP2RAGE protocol optimization (Marques et al. 2010), and further improved by including B1 as one of the dictionary dimensions [T1, B1, SMP2RAGE] (Figure 1.15). This requires an additional acquisition of a B1 map (Marques & Gruetter 2013), which lengthens the scan time. Figure 3. Example MP2RAGE dataset of a healthy adult brain at 7T and T1 map. Inversion times used to acquire this magnitude image dataset were 800 ms and 2700 ms, the flip angles were 4° and 5° (respectively), TRMP2RAGE = 6000 ms, and TR = 6.7 ms. The dataset and code that was used were shared open sourced by the authors of the original MP2RAGE paper (https://github.com/JosePMarques/MP2RAGE-related-scripts).%% MATLAB/OCTAVE CODE % Download variable flip angle brain MRI data for Figure 7 of the blog post cmd = ['curl -L -o mp2rage.zip https://osf.io/8x2c9/download?version=1']; [STATUS,MESSAGE] = unix(cmd); unzip('mp2rage.zip'); %% MATLAB/OCTAVE CODE % Code used to generate the data required for Figure 5 of the blog post clear all cd ../qMRLab startup MP2RAGE.B0=7; % in Tesla MP2RAGE.TR=6; % MP2RAGE TR in seconds MP2RAGE.TRFLASH=6.7e-3; % TR of the GRE readout MP2RAGE.TIs=[800e-3 2700e-3];% inversion times - time between middle of refocusing pulse and excitatoin of the k-space center encoding MP2RAGE.NZslices=[35 72];% Slices Per Slab * [PartialFourierInSlice-0.5 0.5] MP2RAGE.FlipDegrees=[4 5];% Flip angle of the two readouts in degrees MP2RAGE.filenameUNI='MP2RAGE_UNI.nii'; % file with UNI MP2RAGE.filenameINV1='MP2RAGE_INV1.nii'; % file with UNI MP2RAGE.filenameINV2='MP2RAGE_INV2.nii'; % file with INV2 % load the MP2RAGE data - it can be either the SIEMENS one scaled from % 0 4095 or the standard -0.5 to 0.5 MP2RAGEimg=load_untouch_nii(MP2RAGE.filenameUNI, [], [], [], [], [], []); MP2RAGEINV1img=load_untouch_nii(MP2RAGE.filenameINV1, [], [], [], [], [], []); MP2RAGEINV2img=load_untouch_nii(MP2RAGE.filenameINV2, [], [], [], [], [], []); [T1map , M0map , R1map]=T1M0estimateMP2RAGE(MP2RAGEimg,MP2RAGEINV2img,MP2RAGE,0.96); % Code used to re-orient the images to make pretty figures, and to assign variables with the axis lengths. T1_map = imrotate(squeeze(T1map.img(:,130,:)),180); T1_map(T1map.img>5)=0; T1_map = T1_map*1000; % Convert to ms xAxis = [0:size(T1_map,2)-1]; yAxis = [0:size(T1_map,1)-1]; % Raw MRI data at different TI values S_INV1 = imrotate(squeeze(MP2RAGEINV1img.img(:,130,:)),180); S_INV2 = imrotate(squeeze(MP2RAGEINV2img.img(:,130,:)),180); B1map = imrotate(-0.5+1/4095*double(squeeze(MP2RAGEimg.img(:,130,:))),180); %get T1_map --from Octave %get S_INV1 --from Octave %get S_INV2 --from Octave %get B1map --from Octave %get xAxis --from Octave %get yAxis --from Octave init_notebook_mode(connected=True) # The polling here is to ensure that plotly.js has already been loaded before # setting display alignment in order to avoid a race condition. trace1 = go.Heatmap(x = xAxis, y = yAxis, z=S_INV1, colorscale='Greys', showscale = False, visible=False, name = 'Signal') trace2 = go.Heatmap(x = xAxis, y = yAxis, z=S_INV2, colorscale='Greys', showscale = False, visible=False, name = 'Signal') trace3 = go.Heatmap(x = xAxis, y = yAxis, z=B1map, zmin=-0.5, zmax=0.5, colorscale='RdBu', showscale = False, visible=True, name = 'S_MP2RAGE') trace5 = go.Heatmap(x = xAxis, y = yAxis, z=T1_map, zmin=0.0, zmax=5000, colorscale='Portland', xaxis='x2', yaxis='y2', visible=True, name = 'T1 values (ms)') data=[trace1, trace2, trace3, trace5] updatemenus = list([ dict(active=2, x = 0.09, xanchor = 'left', y = -0.15, yanchor = 'bottom', direction = 'up', font=dict( family='Times New Roman', size=16 ), buttons=list([ dict(label = 'SINV1', method = 'update', args = [{'visible': [True, False, False, True]}, ]), dict(label = 'SINV2', method = 'update', args = [{'visible': [False, True, False, True]}, ]), dict(label = 'SMP2RAGE', method = 'update', args = [{'visible': [False, False, True, True]}, ]) ]) ) ]) layout = dict( width=560, height=345, margin = dict( t=40, r=50, b=10, l=50), annotations=[ dict( x=0.055, y=1.15, showarrow=False, text='Input Data', font=dict( family='Times New Roman', size=26 ), xref='paper', yref='paper' ), dict( x=0.6, y=1.15, showarrow=False, text='T1 map', font=dict( family='Times New Roman', size=26 ), xref='paper', yref='paper' ), dict( x=1.22, y=1.15, showarrow=False, text='T1 (ms)', font=dict( family='Times New Roman', size=26 ), xref='paper', yref='paper' ), ], xaxis = dict(range = [0,206], autorange = False, showgrid = False, zeroline = False, showticklabels = False, ticks = '', domain=[0, 0.58]), yaxis = dict(range = [0,215], autorange = False, showgrid = False, zeroline = False, showticklabels = False, ticks = '', domain=[0, 1]), xaxis2 = dict(range = [0,206], autorange = False, showgrid = False, zeroline = False, showticklabels = False, ticks = '', domain=[0.40, 0.98]), yaxis2 = dict(range = [0,215], autorange = False, showgrid = False, zeroline = False, showticklabels = False, ticks = '', domain=[0, 1], anchor='x2'), showlegend = False, autosize = False, updatemenus=updatemenus ) fig = dict(data=data, layout=layout) iplot(fig, filename = 'basic-heatmap', config = config)The MP2RAGE pulse sequence is increasingly being distributed by MRI vendors, thus typically a data fitting package is also available to reconstruct the T1 maps online. Alternatively, several open source packages to create T1 maps from MP2RAGE data are available online (Marques 2017; de Hollander 2017), and for new users these are recommended—as opposed to programming one from scratch—as there are many potential pitfalls (e.g. adjusting the equations to handle partial Fourier or parallel imaging acceleration). Benefits and Pitfalls This widespread availability and its turnkey acquisition/fitting procedures are a main contributing factor to the growing interest for including quantitative T1 maps in clinical and neuroscience studies. T1 values measured using MP2RAGE showed high levels of reproducibility for the brain in an inter- and intra-site study at eight sites (same MRI hardware/software and at 7 T) of two subjects (Voelker et al. 2016). Not only does MP2RAGE have one of the fastest acquisition and post-processing times among quantitative T1 mapping techniques, but it can accomplish this while acquiring very high resolution T1 maps (1 mm isotropic at 3T and submillimeter at 7T, both in under 10 min (Fujimoto et al. 2014)), opening the doors to cortical studies which greatly benefit from the smaller voxel size (Waehnert et al. 2014; Beck et al. 2018; Haast et al. 2018).Despite these benefits, MP2RAGE and similar dictionary-based techniques have certain limitations that are important to consider before deciding to incorporate them in a study. Good reproducibility of the quantitative T1 map is dependent on using one pre-calculated dictionary. If two different dictionaries are used (e.g. cross-site with different MRI vendors), the differences in the dictionary interpolations will likely result in minor differences in T1 estimates for the same data. Also, although the B1-sensitivity of the MP2RAGE T1 maps can be reduced with proper protocol optimization, it can be substantial enough that further correction using a measured B1 map should be done (Marques & Gruetter 2013; Haast et al. 2018). However B1 mapping brings an additional potential source of error, so carefully selecting a B1 mapping technique and accompanying post-processing method (e.g. filtering) should be done before integrating it in a T1 mapping protocol (Boudreau et al. 2017). Lastly, the MP2RAGE equations (and thus, dictionaries) assume monoexponential longitudinal relaxation, and this has been shown to result in suboptimal estimates of the long T1 component for a biexponential relaxation model (Rioux et al. 2016), an effect that becomes more important at higher fields. Works Cited Beck, E.S. et al., 2018. Improved Visualization of Cortical Lesions in Multiple Sclerosis Using 7T MP2RAGE. Am. J. Neuroradiol. Available at: http://dx.doi.org/10.3174/ajnr.A5534.Boudreau, M. et al., 2017. B1 mapping for bias-correction in quantitative T1 imaging of the brain at 3T using standard pulse sequences. J. Magn. Reson. Imaging, 46(6), pp.1673–1682.Doneva, M. et al., 2010. Compressed sensing reconstruction for magnetic resonance parameter mapping. Magn. Reson. Med., 64(4), pp.1114–1120.Fujimoto, K. et al., 2014. Quantitative comparison of cortical surface reconstructions from MP2RAGE and multi-echo MPRAGE data at 3 and 7 T. NeuroImage, 90, pp.60–73.Haase, A. et al., 1989. Inversion recovery snapshot FLASH MR imaging. J. Comput. Assist. Tomogr., 13(6), pp.1036–1040.Haast, R.A.M., . & ., 2018. The impact of B1+ correction on MP2RAGE cortical T1 and apparent cortical thickness at 7T. Hum. Brain Mapp., 39(6), pp.2412–2425.de Hollander, G., 2017. PyMP2RAGE. Available at: https://github.com/Gilles86/pymp2rage [Accessed January 2, 2019].., . & ., 2012. Fast cardiac T1 mapping in mice using a model-based compressed sensing method. Magn. Reson. Med., 68(4), pp.1127–1134.Ma, D. et al., 2013. Magnetic resonance fingerprinting. Nature, 495(7440), pp.187–192.., 2017. MP2RAGE related scripts. Available at: https://github.com/JosePMarques/MP2RAGE-related-scripts [Accessed January 2, 2019].Marques, J.P. et al., 2010. MP2RAGE, a self bias-field corrected sequence for improved segmentation and T1-mapping at high field. NeuroImage, 49(2), pp.1271–1281.. & ., 2013. New Developments and Applications of the MP2RAGE Sequence - Focusing the Contrast and High Spatial Resolution R1 Mapping. PloS one, 8(7), p.e69294.Mugler, J.P., 3rd & Brookeman, J.R., 1990. Three-dimensional magnetization-prepared rapid gradient-echo imaging (3D MP RAGE). Magn. Reson. Med., 15(1), pp.152–157.Rioux, J.A., . & ., 2016. Biexponential longitudinal relaxation in white matter: Characterization and impact on T1 mapping with IR-FSE and MP2RAGE. Magn. Reson. Med., 75(6), pp.2265–2277.Voelker, M.N. et al., 2016. The traveling heads: multicenter brain imaging at 7 Tesla. Magma, 29(3), pp.399–415.Waehnert, M.D. et al., 2014. Anatomically motivated modeling of cortical laminae. NeuroImage, 93 Pt 2, pp.210–220. Errata A previous version of this text incorrectly stated the equations for TB and TC. They were written as TB = TI2 - TI1 + (n/2)TR and TC = TRMP2RAGE - TI2 + (n/2)TR, whereas they should have been TB = TI2 - (TI1 + nTR) and TC = TRMP2RAGE - (TI2 + (n/2)TR). This has been corrected in the text. Thank you to (LinkedIn, Twitter) from the MRI Methods Research Group at McGill University for notifying us of this error.# PYTHON CODE display(HTML( '' ))Client Churn Prediction--- The Top Bank company operates in Europe with a bank account as the main product, this product can keep client's salary and make payments. This account doesn't have any cost in the first 12 months, however, after that time trial, the client needs to rehire the bank for upcoming 12 months and redo this process every year. Recently the Analytics Team noticed that the churn rate is increasing.As a Data Science Consultant, you need to create an action plan to decrease the number of churn customers and show the financial return on your solution.At the end of your consultancy, you need to deliver to the TopBottom CEO a model in production, which will receive a customer base via API and return that same base with an extra column with the probability of each customer going into churn.In addition, you will need to provide a report reporting your model's performance and the financial impact of your solution. Questions that the CEO and the Analytics team would like to see in their report:1. What is Top Bank's current Churn rate? How does it vary monthly?2. What is the performance of the model in classifying customers as churns?3. What is the expected return, in terms of revenue, if the company uses its model to avoid churn from customers? > Disclaimer: This is a fictional bussiness case PLANNING Input- Predict wheter customer will be in churn;- Dataset with sales records and customers info. Output- Which customer will be in churn;- Churn rate of the company;- Performance of the model;- Action plan Tasks 1. Which customer will be in churn: - What is the criterion? - Downtime - Time remaining until the contract ends2. Current churn rate of the company: - Calculate churn rate - Calculate monthly churn rate and variation3. Performance of the model: - TopK score - Precision at K score - Recall at K score4. Action plan: - Discount - Voucher - Deposit bonus IMPORTSimport pickle import warnings import inflection import numpy as np import pandas as pd import seaborn as sns import scikitplot as skplt from IPython.core.display import HTML from IPython.core.display import Image from boruta import BorutaPy from sklearn.model_selection import train_test_split from imblearn.combine import SMOTETomek from xgboost import XGBClassifier, DMatrix from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import MinMaxScaler, RobustScalerHELPER FUNCTIONSdef numerical_metrics(data): """ Shows the main values ​​for descriptive statistics in numerical variables. Args: data ([float64 and int64]): Insert all numerical attributes in the dataset Returns: [dataframe]: A dataframe with mean, median, std deviation, skewness, kurtosis, min, max and range """ data_mean = pd.DataFrame(data.apply(np.mean)).T data_median = pd.DataFrame(data.apply(np.median)).T data_std = pd.DataFrame(data.apply(np.std)).T data_min = pd.DataFrame(data.apply(min)).T data_max = pd.DataFrame(data.apply(max)).T data_range = pd.DataFrame(data.apply(lambda x: x.max() - x.min())).T data_q1 = pd.DataFrame(data.apply(lambda x: np.quantile(x, .25) )).T data_q3 = pd.DataFrame(data.apply(lambda x: np.quantile(x, .75) )).T data_skew = pd.DataFrame(data.apply(lambda x: x.skew())).T data_kurtosis = pd.DataFrame(data.apply(lambda x: x.kurtosis())).T num_attributes = pd.concat([data_min,data_max,data_range,data_mean,data_median, data_q1, data_q3,data_std,data_skew,data_kurtosis]).T.reset_index() num_attributes.columns = ['attributes','min','max','range','mean','median','q1','q3', 'st deviation','skewness','kurtosis'] return num_attributes def categorical_metrics(data, col): """ Shows the the absolute and percent values in categorical variables. Args: data ([object]): Insert all categorical attributes in the dataset Returns: [dataframe]: A dataframe with absolute and percent values """ return pd.DataFrame({'absolute': data[col].value_counts(), 'percent %': data[col].value_counts(normalize = True) * 100 }) def plot_label(title, xlabel, ylabel, fontsize): plt.title(title, fontsize = 18) plt.xlabel(xlabel, fontsize = fontsize) plt.ylabel(ylabel, fontsize = fontsize) def jupyter_settings(): %matplotlib inline %pylab inline plt.style.use( 'tableau-colorblind10' ) plt.rcParams['figure.figsize'] = [25, 12] plt.rcParams['font.size'] = 24 display( HTML( '') ) pd.options.display.max_columns = None pd.options.display.max_rows = None pd.set_option( 'display.expand_frame_repr', False ) sns.set() jupyter_settings() def correlation_matrix(data, id, method): """Generates a correlation matrix of numerical variables Args: data ([DataFrame]): [The dataframe of the EDA] id ([int]): [The customer reference number] method ([string]): [The method used, it can be ‘pearson’, ‘kendall’ or ‘spearman’] Returns: [Image]: [The correlation matrix plot made with seaborn] """ # correlation num_attributes = data.select_dtypes(include = ['int64', 'float64']) num_attributes = num_attributes.drop(columns = id ) correlation = num_attributes.corr(method = method) # mask mask = np.zeros_like(correlation) mask = np.triu(np.ones_like(correlation, dtype=np.bool)) # plot ax = sns.heatmap(correlation, mask = mask, fmt = '.2f', vmin = -1, vmax = 1, annot = True, cmap = 'magma', square = True) return ax def sum_of_na(data): return pd.DataFrame({'Sum of NA': data.isna().sum(), '% NA': data.isna().sum()/data.shape[0]}) # ignoring errors warnings.filterwarnings('ignore') # round pd.options.display.float_format = '{:.3f}'.format seed = 42DATA This dataset is avaliable in: https://www.kaggle.com/mervetorkan/churndataset- **Data fields**- **RowNumber**: the number of the columns- **CustomerID**: unique identifier of clients- **Surname**: client's last name- **CreditScore**: clien'ts credit score for the financial market- **Geography**: the country of the client- **Gender**: the gender of the client- **Age**: the client's age- **Tenure**: number of years the client is in the bank - **Balance**: the amount that the client has in their account - **NumOfProducts**: the number of products that the client bought - **HasCrCard**: if the client has a credit card - **IsActiveMember**: if the client is active (within the last 12 months) - **EstimateSalary**: estimative of anual salary of clients - **Exited**: if the client is a churn (*target variable*)homepath = '/home/gutto/repos/pa003_client_churn/' df_raw = pd.read_csv(homepath + 'data/raw/churn.csv', low_memory = False) pickle.dump(df_raw, open(homepath + 'data/processed/df_raw.pkl', 'wb'))1. DATA DESCRIPTION - **Tasks** - Dataset First Look - Organizing Columns - Data Overview - Check Zero Values - Check Data Types - Descriptive Statistics 1.1 Dataset First Lookdf1 = pickle.load(open(homepath + 'data/processed/df_raw.pkl', 'rb')) df1.head().T df1.duplicated('CustomerId').sum()1.2 Organizing Columns These columns will not be used#Remove columns df1 = df1.drop(['CustomerId', 'RowNumber', 'Surname'], axis = 1) # changing to snakecase type cols_old = df1.columns snakecase = lambda x: inflection.underscore(x) cols_new = list(map(snakecase, cols_old)) df1.columns = cols_new df1.columns df1 = df1.reindex(columns=['is_active_member', 'exited', 'has_cr_card', 'credit_score', 'num_of_products', 'estimated_salary', 'balance', 'tenure', 'age', 'gender', 'geography']) df1.head()1.3 Data Overviewprint(f'Number of rows: {df1.shape[0]} \nNumber of columns: {df1.shape[1]}') df1.info() RangeIndex: 10000 entries, 0 to 9999 Data columns (total 11 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 is_active_member 10000 non-null int64 1 exited 10000 non-null int64 2 has_cr_card 10000 non-null int64 3 credit_score 10000 non-null int64 4 num_of_products 10000 non-null int64 5 estimated_salary 10000 non-null float64 6 balance 10000 non-null float64 7 tenure 10000 non-null int64 8 age 10000 non-null int64 9 gender 10000 non-null object 10 geography 10000 non-null object dtypes: float64(2), int64(7), object(2) memory usage: 859.5+ KBis_active_member, exited and has_cr_card should be strings There's no NA in this dataset, however, there's a possibility that there are zeros in place of NAs 1.4 Check Zero Valuesdf1.head() df1_verify = df1[['credit_score', 'num_of_products', 'estimated_salary', 'balance', 'age']] for col in df1_verify.columns: df1_verify[col] = df1_verify[col].apply(lambda x: '0' if x ==0 else 'another value') df1_verify.apply(lambda x: x.unique()) df1_verify['balance'].value_counts()The balance columns has more than 1/3 of zero values, it doesn't mean that they're missing values but the DS team should get more information about this 1.5 Change Data Typescol_cat = ['has_cr_card', 'is_active_member', 'exited'] for col in col_cat: df1[col] = df1[col].apply(lambda x: 'yes' if x == 1 else 'no')1.6 Descriptive Statistics 1.6.1 Numerical Attributesnum_attributes = df1.select_dtypes(include = ['int64', 'float64']) num_cols = num_attributes.columns.tolist() num_attributes_result = numerical_metrics(num_attributes[num_cols]) num_attributes_result1.6.2 Outliersi = 1 for col in df1[num_cols]: plt.subplot(3, 3, i) ax = sns.boxplot(data = df1, x = col) i += 11.6.3 Categorical Attributescategorical_attributes = df1.select_dtypes(exclude = ['int64', 'float64']) categorical_attributes.columns categorical_metrics(categorical_attributes, 'is_active_member') categorical_metrics(categorical_attributes, 'has_cr_card') categorical_metrics(categorical_attributes, 'exited') categorical_metrics(categorical_attributes, 'gender') categorical_metrics(categorical_attributes, 'geography')Important informations:- There are outliers in **credit_score, num_of_products and age**- The **churn ratio is 20.37%**;- **70.6%** of the members **has credit card**;- More than **50% of the clients** are **from France** 1.7 Save Statepickle.dump(df1, open(homepath + 'data/processed/df1.pkl', 'wb'))2. FEATURE ENGINEERING - **Tasks** - Dataset Checkpoint - Features Creatrion - Save State 2.1 Dataset Checkpointdf2 = pickle.load(open(homepath + 'data/processed/df1.pkl', 'rb'))2.2 Features Creationdf2.columns1. Creating `avg_salary`:avg_salary = np.mean(df2.loc[:, 'estimated_salary'].values) df2['avg_salary'] = avg_salary2. Creating `avg_credit_score`:avg_credit_score = np.mean(df2.loc[:, 'credit_score'].values) df2['avg_credit_score'] = avg_credit_score3. Creating `avg_balance`:avg_balance = np.mean(df2.loc[:, 'balance'].values) df2['avg_balance'] = avg_balance4. Creating `avg_tenure`:avg_tenure = np.mean(df2.loc[:, 'tenure'].values) df2['avg_tenure'] = avg_tenure5. Creating `estimated_salary_per_credit_score`:estimated_salary_cs = df2.loc[:, ['credit_score', 'estimated_salary']].groupby('credit_score').mean().reset_index() estimated_salary_cs.columns = ['credit_score', 'estimated_salary_per_credit_score'] # merge df2 = pd.merge(df2, estimated_salary_cs, on = 'credit_score', how = 'left')6. Creating `estimated_salary_per_gender`:estimated_salary_gender = df2.loc[:, ['gender', 'estimated_salary']].groupby('gender').mean().reset_index() estimated_salary_gender.columns = ['gender', 'estimated_salary_per_gender'] # merge df2 = pd.merge(df2, estimated_salary_gender, on = 'gender', how = 'left')7. Creating `estimated_salary_per_country`:estimated_salary_country = df2.loc[:, ['geography', 'estimated_salary']].groupby('geography').mean().reset_index() estimated_salary_country.columns = ['geography', '_estimated_salary_per_country'] # merge df2 = pd.merge(df2, estimated_salary_country, on = 'geography', how = 'left')8. Creating `balance_per_age`:balance_age = df2.loc[:, ['age', 'balance']].groupby('age').mean().reset_index() balance_age.columns = ['age', 'balance_per_age'] # merge df2 = pd.merge(df2, balance_age, on = 'age', how = 'left')9. Creating `balance_per_country`:balance_country = df2.loc[:, ['geography', 'balance']].groupby('geography').mean().reset_index() balance_country.columns = ['geography', 'balance_per_country'] # merge df2 = pd.merge(df2, balance_country, on = 'geography', how = 'left')10. Creating `balance_per_num_of_products`:balance_number_of_products = df2.loc[:, ['num_of_products', 'balance']].groupby('num_of_products').mean().reset_index() balance_number_of_products.columns = ['num_of_products', 'balance_per_num_of_products'] # merge df2 = pd.merge(df2, balance_number_of_products, on = 'num_of_products', how = 'left')11. Creating LTV:balance_tenure = df2.loc[:, ['tenure', 'balance']].groupby('tenure').mean().reset_index() balance_tenure.columns = ['tenure', 'LTV'] # merge df2 = pd.merge(df2, balance_tenure, on = 'tenure', how = 'left')12. Creating estimated_salary_credit_card:estimated_salary_credit_card = df2.loc[:, ['has_cr_card', 'estimated_salary']].groupby('has_cr_card').mean().reset_index() estimated_salary_credit_card.columns = ['has_cr_card', 'estimated_salary_credit_card'] # merge df2 = pd.merge(df2, estimated_salary_credit_card, on = 'has_cr_card', how = 'left')13. Creating tenure_per_country:tenure_country = df2.loc[:, ['geography', 'tenure']].groupby('geography').mean().reset_index() tenure_country.columns = ['geography', 'tenure_per_country'] # merge df2 = pd.merge(df2, tenure_country, on = 'geography', how = 'left')14. Creating tenure_per_num_of_products:tenure_num_of_products = df2.loc[:, ['num_of_products', 'tenure']].groupby('num_of_products').mean().reset_index() tenure_num_of_products.columns = ['num_of_products', 'tenure_per_num_of_products'] # merge df2 = pd.merge(df2, tenure_num_of_products, on = 'num_of_products', how = 'left')15. Creating num_of_products_per_age:num_of_products_age = df2.loc[:, ['age', 'num_of_products']].groupby('age').mean().reset_index() num_of_products_age.columns = ['age', 'num_of_products_per_age'] # merge df2 = pd.merge(df2, num_of_products_age, on = 'age', how = 'left')16. Creating num_of_products_per_country:num_of_products_country = df2.loc[:, ['geography', 'num_of_products']].groupby('geography').mean().reset_index() num_of_products_country.columns = ['geography', 'num_of_products_per_country'] # merge df2 = pd.merge(df2, num_of_products_country, on = 'geography', how = 'left') verify_nan = pd.DataFrame({'Sum of NA': df2.isna().sum(), '% NA': df2.isna().sum()/df2.shape[0]}) verify_nan df2.sample()2.3 Save Statepickle.dump(df2, open(homepath + 'data/processed/df2.pkl', 'wb'))3. DATA FILTERING - **Tasks** - Dataset Checkpoint - Features - Save State 3.1 Dataset Checkpointdf3 = pickle.load(open(homepath + 'data/processed/df2.pkl', 'rb'))3.3 Save Statepickle.dump(df3, open(homepath + 'data/processed/df3.pkl', 'wb'))4.0 EXPLORATORY DATA ANALYSIS - **Tasks** - Univariate Analysis - Bivariate Analysis - Multivariate Analysis 4.1 Dataset Checkpointdf4 = pickle.load(open(homepath + 'data/processed/df3.pkl', 'rb'))4.3 Univariate Analysis 4.4 Bivariate Analysis 4.5 Multivariate Analysis 4.6 Save Statepickle.dump(df4, open(homepath + 'data/processed/df4.pkl', 'wb'))5.0 DATA PREPARATION - **Tasks** - Dataset Checkpoint - Encoding - Rescaling - Split Data - Balancing Data - Feature Selection - Save State 5.1 Dataset Checkpointdf5 = pickle.load(open(homepath + 'data/processed/df4.pkl', 'rb')) df5.drop(['avg_salary', 'avg_credit_score', 'avg_balance', 'avg_tenure'], axis = 1, inplace = True)5.2 Encoding A form to transform categorical variables into numerical# label encoding gender_encoding = {'Male': 0 , 'Female': 1} df5['gender'] = df5['gender'].map(gender_encoding) questions_encoding = {'yes': 1,'no': 0} df5['is_active_member'] = df3['is_active_member'].map(questions_encoding) df5['has_cr_card'] = df5['has_cr_card'].map(questions_encoding) df5['exited'] = df5['exited'].map(questions_encoding) # one hot encoding encoding df5 = pd.get_dummies(df5, prefix=['country'], columns=['geography']) df5 = pd.get_dummies(df5, prefix=['gender'], columns=['gender']) df5.columns5.3 Rescaling [Reference](https://towardsdatascience.com/scale-standardize-or-normalize-with-scikit-learn-6ccc7d176a02)i = 1 for col in df5.columns: plt.subplot(8, 4, i) sns.kdeplot(df5[col]) i += 1 df5.columns # Min-Max Scaler mms = MinMaxScaler() min_max_scaler = ['is_active_member', 'exited', 'has_cr_card', 'estimated_salary', 'balance', 'tenure', 'estimated_salary_per_gender', '_estimated_salary_per_country', 'balance_per_country', 'LTV', 'estimated_salary_credit_card', 'tenure_per_country', 'country_France', 'country_Germany', 'country_Spain', 'gender_0', 'gender_1'] for col in min_max_scaler: df5[col] = mms.fit_transform(df5[[col]].values) # Robust Scaler rs = RobustScaler() robust_scaler = ['credit_score', 'num_of_products', 'age', 'estimated_salary_per_credit_score', 'balance_per_age', 'balance_per_num_of_products', 'tenure_per_num_of_products', 'num_of_products_per_age', 'num_of_products_per_country'] for col in robust_scaler: df5[col] = mms.fit_transform(df5[[col]].values) i = 1 for col in df5.columns: plt.subplot(8, 4, i) sns.kdeplot(df5[col]) i += 15.4 Split Data The target is **exited**X = df5.drop(['exited'], axis = 1) #target y = df5['exited'] X_train_imbalanced, X_test, y_train_imbalanced, y_test = train_test_split(X , y, test_size = 0.33, shuffle = True, random_state = seed)5.5 Balancing Data In this dataset, **the proportion of churn clients is 20.4% and the the rest is not churn clients**. The target variable is imbalanced, a problem for Machine Learning Algorithms that demand **balanced data for a good performance**.[Reference](https://towardsdatascience.com/applying-smote-for-class-imbalance-with-just-a-few-lines-of-code-python-cdf603e58688)balancer = SMOTETomek(sampling_strategy = 'minority', random_state = seed, n_jobs = 1) X_train, y_train = balancer.fit_resample(X_train_imbalanced, y_train_imbalanced) # creating subplots fig, (ax1, ax2) = plt.subplots(1, 2) ax = sns.countplot(x = y_train_imbalanced, ax = ax1, palette = 'Set2').set_title('Imbalanced Data') ax = sns.countplot(x = y_train, ax = ax2, palette = 'Set2').set_title('Balanced Data')5.6 Feature Selectionrf = RandomForestClassifier(n_estimators = 500, n_jobs = -1, random_state = seed) boruta = BorutaPy(rf, n_estimators = 'auto', verbose = 2, random_state = seed).fit(X_train.values, y_train.values) cols_selected = boruta.support_.tolist() cols_selected_boruta = X_train.iloc[:, cols_selected].columns.to_list() cols_not_selected_boruta = list( np.setdiff1d(X_train.columns, cols_selected_boruta)) cols_selected_boruta cols_not_selected_boruta X_train.columns cols_drop = ['_estimated_salary_per_country', 'balance_per_country', 'country_France', 'country_Germany', 'country_Spain', 'estimated_salary_credit_card', 'estimated_salary_per_gender', 'gender_0', 'gender_1', 'has_cr_card', 'is_active_member', 'num_of_products_per_country', 'tenure_per_country'] X_train.drop(cols_drop, axis=1, inplace=True) X_test.drop(cols_drop, axis=1, inplace=True)6.0 MACHINE LEARNING MODELLING - **Tasks** -#model definition rf = RandomForestClassifier(n_estimators = 300, random_state = seed) #model training rf.fit(X_train, y_train) #model prediction yhat_rf = rf.predict_proba(X_test) # Accumulative gain skplt.metrics.plot_cumulative_gain(y_test, yhat_rf); plt.title( 'Random Forest', fontsize = 18);Investigate to what $M_{\rm ext}$ and $R_{\rm ext}$ we are sensitive toimport sys sys.path.append("/Users/yuhanyao/Documents/Github/AT2019dge/playground/") import numpy as np import pandas as pd import astropy.constants as const from helper.models_piro2020 import model_piro20 from query_marshal import mylinear_fit fs= 14 import matplotlib matplotlib.rcParams['font.size']=fs import matplotlib.pyplot as plt from astropy.cosmology import FlatLambdaCDM import astropy.constants as const cosmo = FlatLambdaCDM(H0=70., Om0=0.275) d_pc = cosmo.luminosity_distance([0.0213])[0].value * 1e+6 # in pcLight Curve Zoom with ProgramIDtb = pd.read_csv("../../data/photometry/P48/det_program.csv") tb = tb.drop(columns=["diffimgname", "psfimgname", "jdobs"]) tb["phase"] = tb["mjd"].values - 58583.2 ix = tb["Fratio"] > 3*tb["Fratio_unc"] tb = tb[ix] tb["snr"] = tb["Fratio"].values / tb["Fratio_unc"].values tb["mag"] = -2.5 * np.log10(tb["Fratio"].values) tb["emag"] = 2.5 / np.log(10) * tb["Fratio_unc"].values / tb["Fratio"].values %matplotlib notebook pids = [1,2] markersizes = [6,4] fids = [1,2] colors = ["royalblue", "crimson"] markers = ["o", "s"] alphas = [1, 0.2] plt.figure(figsize = (6, 4)) ax = plt.subplot(111) for i in range(len(pids)): pid = pids[i] markersize = markersizes[i] subtb = tb[tb.programid==pid] alpha = alphas[i] for j in range(len(fids)): fid = fids[j] color = colors[j] marker = markers[j] subsubtb = subtb[subtb.filterid==fid] xx = subsubtb["phase"].values yy = subsubtb["mag"].values eyy = subsubtb["emag"].values if pid==1 and fid==1: mylabel = "$g$ Public Survey" if pid==1 and fid==2: mylabel = "$r$ Public Survey" if pid==2 and fid==1: mylabel = "$g$ High Cadence Survey" if pid==2 and fid==2: mylabel = "$r$ High Cadence Survey" ax.errorbar(xx, yy, eyy, fmt = ".", marker = marker, markersize=markersize, color = color, alpha = alpha, label = mylabel, zorder = 3-j) ax.set_xlim(-3.2, 17.8) ax.set_ylim(21.5, 18.3) ax.grid(ls = ":") ax.xaxis.set_major_locator(plt.MultipleLocator(2)) ax.xaxis.set_minor_locator(plt.MultipleLocator(1)) ax.yaxis.set_major_locator(plt.MultipleLocator(0.5)) ax.yaxis.set_minor_locator(plt.MultipleLocator(0.1)) ax.tick_params(which = 'major', length = 4, top=True, right=True) ax.tick_params(which = 'minor', length = 2, top=True, right=True) ax.set_ylabel("Observed Magnitude") ax.set_xlabel("MDJ$-58583.2$ (d)") ax.legend(ncol = 1, fontsize = fs-2, frameon = True, fancybox = True, bbox_to_anchor=(0.35, 0.), loc = "lower center", edgecolor = "k", framealpha=1) plt.tight_layout() # plt.savefig("../../paper/figures/lc_programids.pdf") tb1 = tb[tb.programid==1] tbr = tb1[tb1.filterid == 2] tbg = tb1[tb1.filterid == 1] tbr = tbr[tbr.phase>=0] tbr = tbr[tbr.phase<=14] tbg = tbg[tbg.phase>=0] tbg = tbg[tbg.phase<=14] mylinear_fit(tbr["phase"].values, tbr["mag"].values, np.ones(len(tbr))) mylinear_fit(tbg["phase"].values, tbg["mag"].values, np.ones(len(tbg))) 1.208646 - (-2.731597) 1.256597 - (-2.752257)Shock Cooling Modellingtgrid = np.linspace(0.2, 8, 781) tgrid = np.linspace(0.1, 8, 791) R_exts = np.linspace(3e+12, 5e+13, 95) M_exts = np.linspace(0.01, 0.22, 22) wvg = 4813.97 wvr = 6421.81 wvs = np.array([wvg, wvr]) mpeaks = np.zeros((len(R_exts), len(M_exts), len(wvs))) # g and r trises = np.zeros((len(R_exts), len(M_exts), len(wvs))) # g and r trises_ = np.zeros((len(R_exts), len(M_exts), len(wvs))) # g and r mpeaks.shape for i in range(len(R_exts)): for j in range(len(M_exts)): for k in range(len(wvs)): wave = wvs[k] freq = const.c.cgs.value / (wave * 1e-8) mymodel = model_piro20(tgrid, wave, Renv=R_exts[i], Menv_=M_exts[j], Eext49 = 5.3) mymodel = mymodel / (4 * np.pi * (10 * const.pc.cgs.value)**2) # erg/s/cm^2/AA mymodel = mymodel*wave # erg/s/cm^2 mymodel = mymodel/freq # erg/s/cm^2/Hz f0 = 3631e-23 fratio = mymodel / f0 id_peak = np.argsort(fratio)[-1] fratio_peak = fratio[id_peak] m_peak = -2.5*np.log10(fratio_peak) t_peak = tgrid[id_peak] tgrid_rise = tgrid[:id_peak] fratio_rise = fratio[:id_peak] mpeaks[i, j, k] = m_peak # 0.75 mag id_half = np.argsort(abs(fratio_rise - 0.5*fratio_peak))[0] fratio_half = fratio_rise[id_half] t_half = tgrid_rise[id_half] trise = t_peak - t_half #print (m_peak, trise) trises[i, j, k] = trise # 1.5 mag id_qua = np.argsort(abs(fratio_rise - 0.25*fratio_peak))[0] fratio_qua = fratio_rise[id_qua] t_qua = tgrid_rise[id_qua] trise_ = t_peak - t_qua trises_[i, j, k] = trise_ X, Y = np.meshgrid(M_exts, R_exts) fig = plt.figure(figsize = (6, 4)) ax = plt.subplot(111) cp = ax.contourf(X, Y/1e+13, mpeaks[:,:,0], # [-18, -17, -16, -15], origin = "lower", zorder = 1, cmap = plt.cm.summer_r) cp2 = ax.contour(X, Y/1e+13, mpeaks[:,:,0], levels=[-17, -16.5, -16], origin = "lower", zorder = 2, colors = "k") ax.clabel(cp2, cp2.levels, inline=True, fmt="%.1f", fontsize=fs) ax.set_xlim(M_exts[0], M_exts[-1]) ax.set_ylim(R_exts[0]/1e+13, R_exts[-1]/1e+13) cbar = fig.colorbar(cp) ax.set_xlabel(r"$M_{\rm ext}$"+" ("+r"$M_\odot$"+")") ax.set_ylabel(r"$R_{\rm ext}$"+" ("+r"$10^{13}$"+"cm)") cbar.ax.set_ylabel(r"$M_{\rm peak}$"+" in $g$-band (mag)") ax.xaxis.set_major_locator(plt.MultipleLocator(0.05)) ax.xaxis.set_minor_locator(plt.MultipleLocator(0.01)) ax.yaxis.set_major_locator(plt.MultipleLocator(1)) ax.yaxis.set_minor_locator(plt.MultipleLocator(0.5)) ax.tick_params(which = 'major', length = 4, top=True, right=True) ax.tick_params(which = 'minor', length = 2, top=True, right=True) ax.plot(0.0971, 1.19, color = 'r', zorder = 3, marker = "*", markersize= 12, alpha = 0.5) plt.tight_layout() plt.savefig("../../paper/figures/cooling_Mpeak.pdf") fig = plt.figure(figsize = (6, 4)) ax = plt.subplot(111) cp = ax.contourf(X, Y/1e+13, trises[:,:,1],# [-18, -17, -16, -15], origin = "lower", zorder = 1, cmap = plt.cm.summer) cp2 = ax.contour(X, Y/1e+13, trises[:,:,1], levels=[1, 2, 3], origin = "lower", zorder = 2, colors = "k") ax.contourf(X, Y/1e+13, trises_[:,:,1], levels=[4.1, 1e+3], origin = "lower", hatches=['x'], alpha = 0., color = "k") ax.contour(X, Y/1e+13, trises_[:,:,1], levels=[4.1, 1e+3], origin = "lower", color = "k") ax.clabel(cp2, cp2.levels, inline=True, fmt="%.1f", fontsize=fs) ax.set_xlim(M_exts[0], M_exts[-1]) ax.set_ylim(R_exts[0]/1e+13, R_exts[-1]/1e+13) cbar = fig.colorbar(cp) ax.set_xlabel(r"$M_{\rm ext}$"+" ("+r"$M_\odot$"+")") ax.set_ylabel(r"$R_{\rm ext}$"+" ("+r"$10^{13}$"+"cm)") cbar.ax.set_ylabel(r"$t_{\rm rise}$"+" in $r$-band (d)") ax.xaxis.set_major_locator(plt.MultipleLocator(0.05)) ax.xaxis.set_minor_locator(plt.MultipleLocator(0.01)) ax.yaxis.set_major_locator(plt.MultipleLocator(1)) ax.yaxis.set_minor_locator(plt.MultipleLocator(0.5)) ax.tick_params(which = 'major', length = 4, top=True, right=True) ax.tick_params(which = 'minor', length = 2, top=True, right=True) ax.plot(0.0971, 1.19, color = 'r', zorder = 3, marker = "*", markersize= 12, alpha = 0.5) plt.tight_layout() plt.savefig("../../paper/figures/cooling_trise.pdf") -16.5 + 2.5 * np.log10((100e+6 / 10)**2) -16.5 + 2.5 * np.log10((150e+6 / 10)**2)Third party Data Import - PlanetScope This Jupyter Notebook demonstrates how to import **PlanetScope** data into Sentinel Hub and then access it.We will use **Simple search** and **Order products**[1]. Useful links: [1] TPDI API documentation: https://docs.sentinel-hub.com/api/latest//DATA_IMPORT_API/README [2] TPDI API reference (Swagger docs): https://docs.sentinel-hub.com/api/latest/reference/tag/dataimport_planet [3] About PlanetScope data: https://docs.sentinel-hub.com/api/latest//data/PlanetScope Imports and credentials If you are running the JN localy fill in **config.py** file with your credentials. If you are running the JN on EDC, you will need a SH subscription and SH credentials will be automatically available.# Read SH credentials from environment variables import os client_id = os.environ['SH_CLIENT_ID'] client_secret = os.environ['SH_CLIENT_SECRET'] import requests import json from oauthlib.oauth2 import BackendApplicationClient from requests_oauthlib import OAuth2Session import os import datetime import dateutil.parser # Create a session client = BackendApplicationClient(client_id=client_id) oauth = OAuth2Session(client=client) # Get token for the session token = oauth.fetch_token(token_url='https://services.sentinel-hub.com/oauth/token', client_id=client_id, client_secret=client_secret) #Get my quota url = f"https://services.sentinel-hub.com/dataimport/v1/quotas" r = oauth.get(url=url) r.json()Searchurl = "https://services.sentinel-hub.com/dataimport/v1/search" aoi = {'type': 'Polygon', 'coordinates': [[[12.560804463221928, 41.885177999999996], [12.585804463221926, 41.885177999999996], [12.585804463221926, 41.910178], [12.560804463221928, 41.910178], [12.560804463221928, 41.885177999999996]]]} aoi = {"type": "Polygon", "coordinates": [[[12.567054463221927, 41.891428], [12.579554463221926, 41.891428], [12.579554463221926, 41.903928], [12.567054463221927, 41.903928], [12.567054463221927, 41.891428]]]} query = { "provider": "PLANET", "planetApiKey": PLANET_API_KEY, "bounds": { "geometry": aoi }, "data": [ { "itemType": "PSScene4Band", "dataFilter": { "timeRange": { "from": "2020-04-15T00:00:00.000Z", "to": "2020-04-17T00:00:00.000Z" }, "maxCloudCoverage": 30 } } ] } response = oauth.post(url, json=query) response.raise_for_status() results = response.json() search_results = [(feature["id"], feature["properties"]['acquired'], \ feature["properties"]['cloud_cover']) \ for feature in results["features"]] print("Product id \t acquisition date&time \t cloudcover ") for item in search_results: print(item[0], item[1], item[2]) # Additional filtering of search results by cloud coverage item_ids = [feature['id'] for feature in results["features"] if feature["properties"]['cloud_cover'] == 0] item_idsOrder data importurl = "https://services.sentinel-hub.com/dataimport/v1/orders" query['data'][0]["productBundle"] = "analytic" name = "" payload = { "name": name, "input": query } response = oauth.post(url, json=payload) response.raise_for_status() order = response.json() order_id = order['id'] area = order['sqkm'] print(f"Order {name}, id: {order_id}, area: {area} km^2")Order anja test, id: 109af2d3-b72e-4304-ba8c-a0d5d8f8dd96, area: 4.968239307460755 km^2Confirm the orderurl = f"https://services.sentinel-hub.com/dataimport/v1/orders/{order_id}/confirm" response = oauth.post(url) response.raise_for_status() response.json()Check Statusorder_id = order_id url = f"https://services.sentinel-hub.com/dataimport/v1/orders/{order_id}" response = oauth.get(url) response.raise_for_status() order_status = response.json() order_statusList all ordersurl = "https://services.sentinel-hub.com/dataimport/v1/orders" response = oauth.get(url) response.raise_for_status() response.json()本文将介绍如何利用 Arctern 处理大型地理空间数据,并使用 keplergl 进行数据可视化分析上海渣土车数据集。加载上海市渣土车运行轨迹数据,原数据共 8 列,此处只读取本文分析所需要的 4 列:车牌号、时间、经度、维度。import pandas as pd import arctern from arctern import GeoSeries sh_columns=[ ("plate_number","string"), ("pos_time","string"), ("pos_longitude","double"), ("pos_latitude","double"), ("pos_direction0","double"), ("pos_direction1","double"), ("pos_direction2","double"), ("pos_direction3","double") ] sh_select_columns={ "plate_number", "pos_time", "pos_longitude", "pos_latitude" } sh_schema={} sh_use_cols=[] sh_names=[] for idx in range(len(sh_columns)): if sh_columns[idx][0] in sh_select_columns: sh_schema[sh_columns[idx][0]] = sh_columns[idx][1] sh_use_cols.append(idx) sh_names.append(sh_columns[idx][0]) sh_df = pd.read_csv("/tmp/20181016.txt", usecols=sh_use_cols, names=sh_names, dtype=sh_schema, header=None, delimiter="\t", date_parser=pd.to_datetime, parse_dates=["pos_time"])根据经纬度信息,构造位置点信息:sh_df["pos_point"]=GeoSeries.point(sh_df.pos_longitude,sh_df.pos_latitude) sh_df我们可以根据某一辆车的车牌号,来还原出该车辆的运行轨迹,首先我们选择其中一个车牌号,并筛选该车牌号的所有数据:one_trunck_plate_number=sh_df.plate_number[0] print(one_trunck_plate_number) one_truck_df = sh_df[sh_df.plate_number==one_trunck_plate_number]沪DK7362在地图上绘制这辆车的所有轨迹点:from keplergl import KeplerGl KeplerGl(data={"car_pos": pd.DataFrame(data={'car_pos':one_truck_df.pos_point.to_wkt()})})User Guide: https://docs.kepler.gl/docs/keplergl-jupyter接下来,我们将根据上海市的路网信息看上述车辆的运行轨迹,首先加载路网信息:sh_roads=pd.read_csv("/tmp/sh_roads.csv", dtype={"roads":"string"}, usecols=[0], names=["roads"], header=None, delimiter='|') sh_roads=GeoSeries(sh_roads.roads) sh_roads同时在地图上绘制上述轨迹点信息和路网信息:one_truck_roads=KeplerGl(data={"car_pos": pd.DataFrame(data={'car_pos':one_truck_df.pos_point.to_wkt()})}) one_truck_roads.add_data(data=pd.DataFrame(data={'sh_roads':sh_roads.to_wkt()}),name="sh_roads") one_truck_roadsUser Guide: https://docs.kepler.gl/docs/keplergl-jupyter局部放大后可以发现有些轨迹点并不在路上。 我们认为并不在路上的轨迹点为噪点信息,需要将轨迹点绑定到离它最近的一条道路上:is_near_road=arctern.near_road(sh_roads,sh_df.pos_point) sh_near_road_df=sh_df[is_near_road] on_road=arctern.nearest_location_on_road(sh_roads, sh_near_road_df.pos_point) on_road=GeoSeries(on_road) on_road使用绑定到路上点,重新构造 `DataFrame`:sh_on_road_df=pd.DataFrame(data={"plate_number":sh_near_road_df.plate_number, "pos_time":sh_near_road_df.pos_time, "on_road":on_road }) sh_on_road_df再次绘制前述车辆的轨迹点及上海市的路网信息:one_on_road_df=sh_on_road_df[sh_on_road_df.plate_number==one_trunck_plate_number] one_on_roads=KeplerGl(data={"car_pos": pd.DataFrame(data={'car_pos':one_on_road_df.on_road.to_wkt()})}) one_on_roads.add_data(data=pd.DataFrame(data={'sh_roads':sh_roads.to_wkt()}),name="sh_roads") one_on_roadsUser Guide: https://docs.kepler.gl/docs/keplergl-jupyter局部放大后,可以发现所有点均在道路上 分析 我们了解到,上海市的道路网信息共 74693 条记录,但并不是所有道路都会被渣土车经过,我们将对渣土车经过的道路进行分析,查看哪些道路是渣土车经过频率最高的。首先筛选出渣土车经过的所有道路:all_roads=arctern.nearest_road(sh_roads,sh_on_road_df.on_road) all_roads=GeoSeries(all_roads) road_codes, road_uniques = pd.factorize(all_roads)打印所有渣土车经过的道路数据,及所占整体道路的百分比:print(len(road_uniques)) print(len(road_uniques)*100.0/len(sh_roads))16450 22.0234827895519绘制所有渣土车经过的道路:KeplerGl(data={"all_roads": pd.DataFrame(data={'all_roads':GeoSeries(road_uniques).to_wkt()})})User Guide: https://docs.kepler.gl/docs/keplergl-jupyter可以看出,对于一些主干道路,每辆渣土车都可能会经过,导致这条道路上的 `GPS` 信号采样点比较多,同时因为是主干道路,经过车辆比较多,导致渣土车速度缓慢,进一步加强该道路上的 `GPS` 采样点数据。 接下来我们将根据道路上渣土车的采样点数目,来寻找较繁忙的道路。统计每条道路上的 `GPS` 采样点数目,并重新构建 `DataFrame` ,我们把道路上 `GPS` 采样点的数据记为道路权重:roads_codes_series = pd.Series(road_codes) roads_codes_series = roads_codes_series.value_counts() roads_codes_series = roads_codes_series.sort_index() sh_road_weight = pd.DataFrame(data={"on_road":GeoSeries(road_uniques), "weight_value":roads_codes_series }) sh_road_weight输出道道路权重的基本概况:sh_road_weight.weight_value.describe()可以发现大部分道路是不繁忙的,但是也存在特别繁忙的道路。将`weight_value`绘制成柱状图,可进一步佐证:import matplotlib.pyplot as plt plt.bar(sh_road_weight.index,sh_road_weight.weight_value) plt.show()按照道路权重,对所有道路做排序:sh_sorted_road=sh_road_weight.sort_values(by=['weight_value'],ascending=False) sh_sorted_road选取最繁忙的前100个路段:sh_sorted_road.iloc[0:100]在地图上绘制最繁忙的前100个路段:KeplerGl(data={"on_roads": pd.DataFrame(data={'on_roads':sh_sorted_road.on_road.iloc[0:100].to_wkt()})})User Guide: https://docs.kepler.gl/docs/keplergl-jupyterMIT LicenseCopyright (c) 2021 and Permission is hereby granted, free of charge, to any person obtaining a copyof this software and associated documentation files (the "Software"), to dealin the Software without restriction, including without limitation the rightsto use, copy, modify, merge, publish, distribute, sublicense, and/or sellcopies of the Software, and to permit persons to whom the Software isfurnished to do so, subject to the following conditions:The above copyright notice and this permission notice shall be included in allcopies or substantial portions of the Software.THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ORIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THEAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHERLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THESOFTWARE. Train/Valid/Test Splitting (text files)DATADIR/HMDB51/labelstvt/ will be made. HMDB51 Original Naming Rules of Label Texts`glob` does not work for "[" or "]". Use "[[]" and "[]]" instead. `path.replace("[", "[[").replace("]", "[]]").replace("[[", "[[]")` does a good job.``` Naming rules in label text file There are totally 153 files in this folder,[action]_test_split[1-3].txt corresponding to three splits reported in the paper.The format of each file is[video_name] [id]The video is included in the training set if id is 1The video is included in the testing set if id is 2The video is not included for training/testing if id is 0There should be 70 videos with id 1 , 30 videos with id 2 in each txt file.PROPERTY LABELS (ABBREVIATION)visible body parts head(h), upper body(u), full body (f), lower body(l)camera motion motion (cm), static (nm)number of people involved in the action Single (np1), two (np2), three (np3)camera viewpoint Front (fr), back (ba), left(le), right(ri)video quality good (goo), medium (med), ok (bad) Templates label file names:ClassName_test_split[1-3].txtvideo names:VideoName_ClassName_VisibleBodyParts_CameraMotion_NumberOfPeopleInvolvedInTheAction_CameraViewpoint_VideoQuality_Number\.avi ID Examples in class "smile" my_smile_smile_h_cm_np1_fr_goo_0.avi 1prelinger_LetsPlay1949_smile_h_nm_np1_fr_goo_27.avi 2prelinger_LetsPlay1949_smile_h_nm_np1_le_goo_25.avi 2prelinger_LetsPlay1949_smile_u_nm_np1_fr_med_24.avi 0prelinger_LetsPlay1949_smile_u_nm_np1_ri_med_21.avi 2prelinger_they_grow_up_so_fast_1_smile_u_nm_np1_fr_med_0.avi 1show_your_smile_-)_smile_h_nm_np1_fr_med_0.avi 1showyoursmile_smile_h_nm_np1_fr_goo_0.avi 1smile_collection_7_smile_h_nm_np1_fr_goo_0.avi 1smile_collection_7_smile_h_nm_np1_fr_goo_1.avi 1youtube_smile_response_smile_h_nm_np1_fr_goo_0.avi 1```from glob import glob import os, shutil from copy import copy, deepcopy import statistics import matplotlib.pyplot as plt import numpy as npPreliminariesDATADIR = "Define this first. E.g., /data/t-miyagawa" splitnum = 1 # Official splitting. 1, 2, or 3. # Get videodir and numf datadir = "{}/HMDB51png".format(DATADIR) classdir = sorted(glob(datadir + "/*")) classdir = [i + "/" for i in classdir] classnames = [i[i.rfind("HMDB51png/") + 10 : -1] for i in classdir] videodir = { k : sorted(glob([v for v in classdir if v.find("/" + k + "/") != -1][0] + "/*")) for k in classnames} numf = dict() for k in classnames: v1 = videodir[k] v2 = [i.replace("[", "[[").replace("]", "[]]").replace("[[", "[[]") for i in v1] numf[k] = [len(glob(_video + "/*.png")) for _video in v2] # Smear the keys numf_concat = [] for k in classnames: v = numf[k] numf_concat.extend(v) videodir_concat = [] for k in classnames: v = videodir[k] videodir_concat.extend(v) # Classwise num of frames numf_classwise = [] for k in classnames: v = numf[k] v = sum(v) numf_classwise.append(v) # Classwise num of videos (clips) numv_classwise = [] for k in classnames: v = videodir[k] v = len(v) numv_classwise.append(v) # Classwise num of unique videos (groups) numuv_classwise = [] for k in classnames: v1 = videodir[k] # ['DATADIR/HMDB51png/wave/20060723sfjffbartsinger_wave_f_cm_np1_ba_med_0', # 'DATADIR/HMDB51png/wave/21_wave_u_nm_np1_fr_goo_5', # 'DATADIR/HMDB51png/wave/50_FIRST_DATES_wave_f_cm_np1_fr_med_0', # 'DATADIR/HMDB51png/wave/50_FIRST_DATES_wave_u_cm_np1_fr_goo_30', # 'DATADIR/HMDB51png/wave/50_FIRST_DATES_wave_u_cm_np1_fr_med_1', # 'DATADIR/HMDB51png/wave/50_FIRST_DATES_wave_u_cm_np1_fr_med_36', v2 = [i[i.rfind("/")+1:] for i in v1] # ['20060723sfjffbartsinger_wave_f_cm_np1_ba_med_0', # '21_wave_u_nm_np1_fr_goo_5', # '50_FIRST_DATES_wave_f_cm_np1_fr_med_0', # '50_FIRST_DATES_wave_u_cm_np1_fr_goo_30', # '50_FIRST_DATES_wave_u_cm_np1_fr_med_1', # '50_FIRST_DATES_wave_u_cm_np1_fr_med_36', v3 = [i[:i.rfind(k)-1] for i in v2] # ['20060723sfjffbartsinger', # '21', # '50_FIRST_DATES', # '50_FIRST_DATES', # '50_FIRST_DATES', # '50_FIRST_DATES', v4 = [] for i in v3: if not i in v4: v4.append(i) # ['20060723sfjffbartsinger', # '21', # '50_FIRST_DATES', numuv_classwise.append(len(v4)) # """ # Returns: # classnames: List. Len = Num of classes. Names of classes in alphabetical order. # # videodir: Dict. Paths to video directories. Each values (paths) are in alphabetical order of video names. # numf: Dict. Num of frames for each videos. Each values (integers) are in alphabetical order of video names. # # numf_concat: List. Len = Num of total videos. Order is the same as `videoddir_concat`. # videodir_concat: List. Len = Num of total videos. Order is the same as `numf_concat`. # # numf_classwise: List. Len = Num of classes. The classwise numbers of frames in alphabetical order of class names. # numv_classwise: List. Len = Num of classes. The classwise numbers of videos in alphabetical order of class names. # numuv_classwise: List. Len = Num of classes. The classwise numbers of unique videos (groups) in alphabetical order of class names. # """Save [train,valid,test]list[n].txtChange `n` = splitnum.# Define file names ################################################# orglabeldir = "{}/HMDB51/labels".foramt(DATADIR) newtrtxt = "{}/HMDB51/labelstvt/trainlist0{}.txt".format(DATADIR, splitnum) newvatxt = "{}/HMDB51/labelstvt/validlist0{}.txt".format(DATADIR, splitnum) newtetxt = "{}/HMDB51/labelstvt/testlist0{}.txt".format(DATADIR, splitnum) # Load label text files ################################################# trclips = [] trclips_dc = dict() trgroups = dict() trgroups_concat = [] teclips = [] tegroups = dict() tegroups_concat = [] num_clips = 0 for classname in classnames: labeltxt = orglabeldir + "/{}_test_split{}.txt".format(classname, splitnum) # classwise with open(labeltxt) as f: clips = f.readlines() # list num_clips += len(clips) # e.g., wave_test_split1.txt: # ['20060723sfjffbartsinger_wave_f_cm_np1_ba_med_0.avi 2 \n', # '21_wave_u_nm_np1_fr_goo_5.avi 1 \n', # '50_FIRST_DATES_wave_f_cm_np1_fr_med_0.avi 1 \n', # '50_FIRST_DATES_wave_u_cm_np1_fr_goo_30.avi 1 \n', # '50_FIRST_DATES_wave_u_cm_np1_fr_med_1.avi 1 \n', # '50_FIRST_DATES_wave_u_cm_np1_fr_med_36.avi 1 \n', ...'... \n'] tmp1 = ["/" + clip[:clip.rfind(".avi")] for clip in clips if clip[-3] == "1"] trclips_dc[classname] = tmp1 trclips.extend(tmp1) # e.g., ("/VideoName_ClassName_MetaData") # trclips = # ['/April_09_brush_hair_u_nm_np1_ba_goo_0', # '/April_09_brush_hair_u_nm_np1_ba_goo_1', # '/April_09_brush_hair_u_nm_np1_ba_goo_2', # '/Aussie_Brunette_Brushing_Hair_II_brush_hair_u_nm_np1_ri_med_3', # '/Aussie_Brunette_Brushing_Hair_II_brush_hair_u_nm_np2_le_goo_0', # '/Aussie_Brunette_Brushing_Hair_II_brush_hair_u_nm_np2_le_goo_1', # '/Aussie_Brunette_Brushing_Hair_II_brush_hair_u_nm_np2_le_med_2', ...] tmp2 = ["/" + clip[:clip.rfind("_{}_".format(classname))] + "_" + classname for clip in clips if clip[-3] == "1"] # "_" + classname is necessary, because there are some videos with the same VideoName in different classes. # (There is no duplication of VideoName within each single class, though). trgroups[classname] = tmp2 # There can be duplicate strings in THE list in a key. # e.g,, ("/VideoName_ClassName") # tfgroups["bruch_hair"] = # ['/April_09_brush_hair', # '/April_09_brush_hair', # '/April_09_brush_hair', # '/Aussie_Brunette_Brushing_Hair_II_brush_hair', # '/Aussie_Brunette_Brushing_Hair_II_brush_hair', # '/Aussie_Brunette_Brushing_Hair_II_brush_hair', # '/Aussie_Brunette_Brushing_Hair_II_brush_hair', ...] trgroups_concat.extend(tmp2) assert len(tmp1) == len(tmp2) tmp3 = ["/" + clip[:clip.rfind(".avi")] for clip in clips if clip[-3] == "2"] teclips.extend(tmp3) tmp4 = ["/" + clip[:clip.rfind("_{}_".format(classname))] + "_" + classname for clip in clips if clip[-3] == "2"] # "_" + classname is necessary, because there are some videos with the same VideoName in different classes. # (There is no duplication of VideoName within each single class, though). tegroups[classname] = tmp4 tegroups_concat.extend(tmp4) assert len(tmp3) == len(tmp4) # Assert assert len(trclips) + len(teclips) < num_clips # because ID 0 is removed from train/val/test split. # Assert: no duplicated names and no contamination in tr and te for i, v in enumerate(trclips): assert not v in teclips, "{}, {}".format(i, v) for i, v in enumerate(teclips): assert not v in trclips, "{}, {}".format(i, v) for i, v in enumerate(trgroups_concat): assert not v in tegroups_concat, "{}, {}".format(i, v) for i, v in enumerate(tegroups_concat): assert not v in trgroups_concat, "{}, {}".format(i, v) # Create index set for unique videos ... ################################################# trgroupsu_idxset_of_trgroups = dict() for k, v in trgroups.items(): idxset = [] for cnt, itr_clip in enumerate(v): if cnt == 0: tmp_clipname = itr_clip tmp_idx = 0 idxset.append(tmp_idx) else: if tmp_clipname != itr_clip: tmp_clipname = itr_clip tmp_idx += 1 idxset.append(tmp_idx) assert len(idxset) == len(v) trgroupsu_idxset_of_trgroups[k] = idxset # e.g., # trgroupsu_idxset_of_trgroups = # {'brush_hair': [0, # 0, # 0, # 1, # 1, # 1, # 1, # 2, # 2, ...,22,22,23,24,25,25], ... # ... and extract unique-group list ################################################# trgroupsu = dict() for k, v in deepcopy(trgroups).items(): _tmp = list(np.unique(v)) # no duplication assert len(_tmp) == trgroupsu_idxset_of_trgroups[k][-1] + 1 trgroupsu[k] = list(np.unique(v)) # no duplication tegroupsu = dict() for k, v in deepcopy(tegroups).items(): tegroupsu[k] = list(np.unique(v)) # no duplication # Note: # trgroups = # {'brush_hair': # ['/April_09_brush_hair', # '/April_09_brush_hair', # '/April_09_brush_hair', # '/Aussie_Brunette_Brushing_Hair_II_brush_hair', # '/Aussie_Brunette_Brushing_Hair_II_brush_hair', # '/Aussie_Brunette_Brushing_Hair_II_brush_hair', # '/Aussie_Brunette_Brushing_Hair_II_brush_hair', ... # is now # trgroupsu = # {'brush_hair': # ['/April_09_brush_hair', # '/Aussie_Brunette_Brushing_Hair_II_brush_hair', ... # New train/valid/test split ################################################# newtegroupsu = deepcopy(tegroupsu) newtrgroupsu = [] newvagroupsu = [] newtrgroupsu_idxset_of_trgroups = dict() newvagroupsu_idxset_of_trgroups = dict() for c, cls in enumerate(classnames): _tmp = trgroupsu[cls] numva = int(len(_tmp) * 0.1) # num of validation examples in class `cls` assert numva > 0 for v in _tmp: assert not v in newtrgroupsu # check there's no duplication assert not v in newvagroupsu # check there's no duplication newtrgroupsu_idxset_of_trgroups[cls] = [i for i in trgroupsu_idxset_of_trgroups[cls] if i <= trgroupsu_idxset_of_trgroups[cls][-1] - numva] newvagroupsu_idxset_of_trgroups[cls] = [i for i in trgroupsu_idxset_of_trgroups[cls] if i > trgroupsu_idxset_of_trgroups[cls][-1] - numva] # e.g., # newtrgroupsu_idxset_of_trgroups = # {'brush_hair': [0, # 0, # 0, # 1, # 1, # 1, # 1, # 2, # 2, ..., 22,22,23], ... for i in newtrgroupsu_idxset_of_trgroups[cls]: assert not i in newvagroupsu_idxset_of_trgroups # no duplication for i in newvagroupsu_idxset_of_trgroups[cls]: assert not i in newtrgroupsu_idxset_of_trgroups # no duplication _tmptr = _tmp[:- numva] _tmpva = _tmp[- numva:] assert len(_tmptr) == newtrgroupsu_idxset_of_trgroups[cls][-1] + 1 newtrgroupsu.extend(_tmptr) newvagroupsu.extend(_tmpva) # e.g., # newtrgroupsu = # ['/April_09_brush_hair', # '/Aussie_Brunette_Brushing_Hair_II_brush_hair', # '/Blonde_being_brushed_brush_hair', # '/Brunette_Foxyanya_ultra_silky_long_hair_brushing_hairjob_brush_hair', # '/Brushing_Hair_with_Beth_brush_hair', # Assert for i, v in enumerate(newvagroupsu): assert not v in newtrgroupsu, "{}, {}".format(i, v) for i, v in enumerate(newtrgroupsu): assert not v in newtegroupsu, "{}, {}".format(i, v) for i, v in enumerate(newtegroupsu): assert not v in newvagroupsu, "{}, {}".format(i, v) # Fetch clip numbers ################################################# newteclips = copy(teclips) newtrclips = [] newvaclips = [] for classname in classnames: idxset_tr = newtrgroupsu_idxset_of_trgroups[classname] idxset_va = newvagroupsu_idxset_of_trgroups[classname] newtrclips.extend(trclips_dc[classname][:len(idxset_tr)]) # trclips_dc (and trclips) includes both tr and va clips newvaclips.extend(trclips_dc[classname][len(idxset_tr):]) # trclips_dc (and trclips) includes both tr and va clips assert 0 < len(newtrclips) < len(trclips) assert 0 < len(newvaclips) < len(trclips) assert len(newvaclips) + len(newtrclips) == len(trclips), "Contamination (tr & va) detected!" for v in newvaclips: assert v in trclips for v in newtrclips: assert v in trclips assert len(trclips) == len(np.unique(trclips)) # e.g., # newtrclips = # ['/April_09_brush_hair_u_nm_np1_ba_goo_0', # '/April_09_brush_hair_u_nm_np1_ba_goo_1', # '/April_09_brush_hair_u_nm_np1_ba_goo_2', # '/Aussie_Brunette_Brushing_Hair_II_brush_hair_u_nm_np1_ri_med_3', # '/Aussie_Brunette_Brushing_Hair_II_brush_hair_u_nm_np2_le_goo_0', # '/Aussie_Brunette_Brushing_Hair_II_brush_hair_u_nm_np2_le_goo_1', # '/Aussie_Brunette_Brushing_Hair_II_brush_hair_u_nm_np2_le_med_2', # '/Blonde_being_brushed_brush_hair_f_nm_np2_ri_med_0', # '/Blonde_being_brushed_brush_hair_u_cm_np2_ri_med_1', # Save train/valid/testlist01.txt # E.g., "DATADIR/HMDB51/labels/ucfTrainValidTestlist/validlist01.txt" ################################################# # Comment-outed for safety with open(newtrtxt, mode="w") as f: l = len(newtrclips) for i, v in enumerate(newtrclips): f.write(v + " \n") with open(newvatxt, mode="w") as f: l = len(newvaclips) for i, v in enumerate(newvaclips): f.write(v + " \n") with open(newtetxt, mode="w") as f: l = len(newteclips) for i, v in enumerate(newteclips): f.write(v + " \n") # Assert with open(newtrtxt, mode="r") as f: a = f.readlines() with open(newvatxt, mode="r") as f: b = f.readlines() with open(newtetxt, mode="r") as f: c = f.readlines() for i in a: assert not i in b assert not i in c for i in b: assert not i in a assert not i in c for i in c: assert not i in a assert not i in bBinary classification correcteddf_train = pd.read_csv('dataset/merged1.csv', sep=';') df_train_x = df_train['Text'] for i in range(len(df_train_x)): df_train_x[i] = df_train_x[i].replace('\n', ' ').replace(',', ' ').lower() df_train_y = df_train['Label'] X_train, X_test, y_train, y_test = train_test_split(df_train_x, df_train_y, test_size=0.30, random_state=42, stratify=df_train_y) clf = Pipeline([ ('vectorizer', CountVectorizer(analyzer="word", tokenizer=word_tokenize, max_features=None, lowercase=True)), #('smote', SMOTE(random_state=42)), ('classifier', LinearSVC(random_state=42)) ]) clf.fit(X_train, y_train) y_pred = clf.predict(y_test) cm = confusion_matrix(y_test, y_pred) y_unique = y_test.unique() cm_df = pd.DataFrame(cm, index = [y_unique], columns = [y_unique])# plt.figure(figsize=(5,4)) sns.heatmap(cm_df, annot=True) plt.title('Confusion Matrix') plt.ylabel('Actual Values') plt.xlabel('Predicted Values') plt.show() print("Accuracy : ", metrics.accuracy_score(y_test, y_pred)) x = random.randrange(0, 1000) print(x) X = X_test.iloc[[x]] #print(X) y = y_test.iloc[x] y_pred = clf.predict(X) print(y, y_pred[0]) df_train_all = pd.read_csv('dataset/train_all.csv', sep=';') df_train_all_x = df_train_all['Text'] for i in range(len(df_train_all_x)): df_train_all_x[i] = df_train_all_x[i].replace('\n', ' ').replace(',', ' ').lower() df_train_all_y = df_train_all['Label'] print(df_train_all.shape) unique, counts = np.unique(df_train_all_y, return_counts=True) plt.figure(figsize=(20,10)) plt.bar(unique, counts, 1) plt.title('Class Frequency') plt.xlabel('Class') plt.ylabel('Frequency') plt.show() unique, counts = np.unique(df_train_all_y, return_counts=True) print(unique) print(counts)['Audio' 'Computer Vision' 'General' 'Graphs' 'Natural Language Processing' 'Reinforcement Learning' 'Sequential'] [ 286 13664 7896 363 1928 1237 454]Nice versionclass DataframeContainer: def __init__(self, name, inputFilename): self.name = name self.inputFilename = inputFilename self.dataframe = pd.read_csv(inputFilename, sep=';') def filter_dataframe(self): count = 0 for ind, row in self.dataframe.iterrows(): if self.name != str(row['Label']): count += 1 row['Label'] = 'Other' row['Text'] = row['Text'].replace('\n', ' ').replace(',', ' ').lower() print(f'{self.name} filtered {count} rows') def separate_x_y(self): self.df_X, self.df_y = self.dataframe['Text'], self.dataframe['Label'] unique, counts = np.unique(self.df_y , return_counts=True) plt.bar(unique, counts, 1) plt.title('Class Frequency') plt.xlabel('Class') plt.ylabel('Frequency') plt.show() def split_train_test(self, test_size = 0.2, random_state = 42): self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.df_X, self.df_y, test_size=test_size, random_state=random_state, stratify=self.df_y) def clf_fit_cv_ru_lsvc(self): self.clf = Pipeline([ ('vectorizer', CountVectorizer(analyzer="word", tokenizer=word_tokenize, max_features=None, lowercase=True)), ('undersample', RandomUnderSampler(sampling_strategy='majority')), ('linearsvc', LinearSVC(random_state=42)) ]) self.clf.fit(self.X_train, self.y_train) print(f'{self.name} clf fit done') def clf_fit_tfidf_ru_lsvc(self): self.clf = Pipeline([ ('tfidf', TfidfVectorizer(analyzer="word", tokenizer=word_tokenize, max_features=None, lowercase=True)), ('undersample', RandomUnderSampler(sampling_strategy='majority')), ('linearsvc', LinearSVC(random_state=42)) ]) self.clf.fit(self.X_train, self.y_train) print(f'{self.name} clf fit done') def clf_fit_tfidf_ru_rfc(self): self.clf = Pipeline([ ('tfidf', TfidfVectorizer(analyzer="word", tokenizer=word_tokenize, max_features=None, lowercase=True)), ('undersample', RandomUnderSampler(sampling_strategy='majority')), ('randomforest', RandomForestClassifier(max_depth=None,random_state=1)) ]) self.clf.fit(self.X_train, self.y_train) print(f'{self.name} clf fit done') def clf_fit_cv_ru_rfc(self): self.clf = Pipeline([ ('vectorizer', CountVectorizer(analyzer="word", tokenizer=word_tokenize, max_features=None, lowercase=True)), ('undersample', RandomUnderSampler(sampling_strategy='majority')), ('randomforest', RandomForestClassifier(max_depth=None,random_state=1)) ]) self.clf.fit(self.X_train, self.y_train) print(f'{self.name} clf fit done') def clf_fit_tfidf_ru_mnb(self): self.clf = Pipeline([ ('tfidf', TfidfVectorizer(analyzer="word", tokenizer=word_tokenize, max_features=None, lowercase=True)), ('undersample', RandomUnderSampler(sampling_strategy='majority')), ('multinomualnb', MultinomialNB()) ]) self.clf.fit(self.X_train, self.y_train) print(f'{self.name} clf fit done') def clf_fit_cv_ru_mnb(self): self.clf = Pipeline([ ('vectorizer', CountVectorizer(analyzer="word", tokenizer=word_tokenize, max_features=None, lowercase=True)), ('undersample', RandomUnderSampler(sampling_strategy='majority')), ('multinomialnb', MultinomialNB()) ]) self.clf.fit(self.X_train, self.y_train) print(f'{self.name} clf fit done') def predict(self): self.y_pred = self.clf.predict(self.X_test) def confusion_matrix_macro(self): y_unique = self.y_test.unique() cm = confusion_matrix(self.y_test, self.y_pred, labels=y_unique) cm_df = pd.DataFrame(cm, index = [y_unique], columns = [y_unique]) plt.figure(figsize=(5,4)) sns.heatmap(cm_df, annot=True, fmt='d') plt.title('Confusion Matrix') plt.ylabel('Actual Values') plt.xlabel('Predicted Values') plt.show() print(f"Accuracy {self.name} : {metrics.accuracy_score(self.y_test, self.y_pred)}") m = metrics.precision_recall_fscore_support(self.y_test, self.y_pred, average='macro') self.precision = m[0] self.recall = m[1] self.f1score = m[2] print(f"Precision {self.name} : {m[0]} \nRecall {self.name} : {m[1]} \nF1-score {self.name} : {m[2]}") def confusion_matrix_weighted(self): y_unique = self.y_test.unique() cm = confusion_matrix(self.y_test, self.y_pred, labels=y_unique) cm_df = pd.DataFrame(cm, index = [y_unique], columns = [y_unique]) plt.figure(figsize=(5,4)) sns.heatmap(cm_df, annot=True, fmt='d') plt.title('Confusion Matrix') plt.ylabel('Actual Values') plt.xlabel('Predicted Values') plt.show() print(f"Accuracy {self.name} : {metrics.accuracy_score(self.y_test, self.y_pred)}") m = metrics.precision_recall_fscore_support(self.y_test, self.y_pred, average='weighted') self.precision = m[0] self.recall = m[1] self.f1score = m[2] print(f"Precision {self.name} : {m[0]} \nRecall {self.name} : {m[1]} \nF1-score {self.name} : {m[2]}") def cv(self): scores = cross_val_score(self.clf, self.df_X, self.df_y, cv=3, scoring='f1') print(scores) def save_pickle(self): self.currentDatetime = datetime.now().strftime("%d_%m_%Y_%H_%M_%S") self.pickleFilename = f"{self.name}.sav" Path('../results/pickles/' + self.currentDatetime).mkdir(exist_ok=True) pickle.dump(self.clf, open('../results/pickles/' + self.currentDatetime + '/' + self.pickleFilename, 'wb')) def printScoreboard(self): csvFileName = f"{self.name.lower().replace(' ', '_')}.csv" csvExists = os.path.exists('../results/scoreboards/' + csvFileName) with open('../results/scoreboards/' + csvFileName, 'a+') as csvfile: csvWriter = csv.writer(csvfile, delimiter=';') if not csvExists: csvWriter.writerow(["Pipeline", "Input", "F1 score", "Precision", "Recall", "Pickle file name", "Datetime", "Sklearn version"]) csvWriter.writerow([self.clf.steps, self.inputFilename, self.f1score, self.precision, self.recall, self.currentDatetime + '/' + self.pickleFilename, self.currentDatetime, pkg_resources.get_distribution('sklearn').version]) names_list = ["Audio", "Computer Vision", "General", "Graphs", "Natural Language Processing", "Reinforcement Learning", "Sequential"] #names_list = ["Graphs"] dataframecontainers_list = [DataframeContainer(name, '../data/somef_data1.csv') for name in names_list] for container in dataframecontainers_list: container.filter_dataframe() container.separate_x_y() container.split_train_test() for container in dataframecontainers_list: container.clf_fit_tfidf_ru_mnb() for container in dataframecontainers_list: container.predict() container.confusion_matrix_macro() #container.cv() for container in dataframecontainers_list: container.save_pickle() container.printScoreboard() def finaltest(df_train_all): pos = 0 df_train_all_x = df_train_all['Text'] for i in range(len(df_train_all_x)): df_train_all_x[i] = df_train_all_x[i].replace('\n', ' ').replace(',', ' ').lower() df_train_all_y = df_train_all['Label'] for x in range(df_train_all_x.shape[0]): X = df_train_all_x.iloc[[x]] y = df_train_all_y.iloc[x] #predictions = [dataframe.clf.predict(X)[0] for dataframe in dataframecontainers_list] for dataframecontainer in dataframecontainers_list: #print(dataframecontainer.name) prediction = dataframecontainer.clf.predict(X)[0] # p.append(prediction) if prediction != 'Other' and prediction == y: pos += 1 break #print(p, y) print(pos/df_train_all_x.shape[0]) finaltest(df_train_all)0.9969025863404057First trydf_audio = pd.read_csv('dataset/train_all.csv', sep=';') count = 0 for ind, row in df_audio.iterrows(): if str('Audio') != str(row['Label']): row['Label'] = 'Other' count += 1 print('Audio done', count) df_cv = pd.read_csv('dataset/train_all.csv', sep=';') count = 0 for ind, row in df_cv.iterrows(): if str('Computer Vision') != str(row['Label']): count += 1 row['Label'] = 'Other' print('Computer vision done', count) df_general = pd.read_csv('dataset/train_all.csv', sep=';') count = 0 for ind, row in df_general.iterrows(): if str('General') != str(row['Label']): row['Label'] = 'Other' count += 1 print('General done', count) df_graphs = pd.read_csv('dataset/train_all.csv', sep=';') count = 0 for ind, row in df_graphs.iterrows(): if str('Graphs') != str(row['Label']): row['Label'] = 'Other' count += 1 print('Graphs done', count) df_nlp = pd.read_csv('dataset/train_all.csv', sep=';') count = 0 for ind, row in df_nlp.iterrows(): if str('Natural Language Processing') != str(row['Label']): row['Label'] = 'Other' count += 1 print('NLP done', count) df_rl = pd.read_csv('dataset/train_all.csv', sep=';') count = 0 for ind, row in df_rl.iterrows(): if str('Reinforcement Learning') != str(row['Label']): count += 1 row['Label'] = 'Other' print('RL done', count) df_sequential = pd.read_csv('dataset/train_all.csv', sep=';') count = 0 for ind, row in df_sequential.iterrows(): if str('Sequential') != str(row['Label']): count += 1 row['Label'] = 'Other' print('Seqential done', count) df_audio_x = df_audio['Text'] df_audio_y = df_audio['Label'] unique, counts = np.unique(df_audio_y , return_counts=True) plt.bar(unique, counts, 1) plt.title('Class Frequency') plt.xlabel('Class') plt.ylabel('Frequency') plt.show() df_cv_x = df_cv['Text'] df_cv_y = df_cv['Label'] unique, counts = np.unique(df_cv_y , return_counts=True) plt.bar(unique, counts, 1) plt.title('Class Frequency') plt.xlabel('Class') plt.ylabel('Frequency') plt.show() df_general_x = df_general['Text'] df_general_y = df_general['Label'] unique, counts = np.unique(df_general_y , return_counts=True) plt.bar(unique, counts, 1) plt.title('Class Frequency') plt.xlabel('Class') plt.ylabel('Frequency') plt.show() df_graphs_x = df_graphs['Text'] df_graphs_y = df_graphs['Label'] unique, counts = np.unique(df_graphs_y , return_counts=True) plt.bar(unique, counts, 1) plt.title('Class Frequency') plt.xlabel('Class') plt.ylabel('Frequency') plt.show() df_nlp_x = df_nlp['Text'] df_nlp_y = df_nlp['Label'] unique, counts = np.unique(df_nlp_y , return_counts=True) plt.bar(unique, counts, 1) plt.title('Class Frequency') plt.xlabel('Class') plt.ylabel('Frequency') plt.show() df_rl_x = df_rl['Text'] df_rl_y = df_rl['Label'] unique, counts = np.unique(df_rl_y , return_counts=True) plt.bar(unique, counts, 1) plt.title('Class Frequency') plt.xlabel('Class') plt.ylabel('Frequency') plt.show() df_sequential_x = df_sequential['Text'] df_sequential_y = df_sequential['Label'] unique, counts = np.unique(df_sequential_y , return_counts=True) plt.bar(unique, counts, 1) plt.title('Class Frequency') plt.xlabel('Class') plt.ylabel('Frequency') plt.show() X_audio_train, X_audio_test, y_audio_train, y_audio_test = train_test_split(df_audio_x, df_audio_y, test_size=0.30, random_state=42, stratify=df_audio_y) X_cv_train, X_cv_test, y_cv_train, y_cv_test = train_test_split(df_cv_x, df_cv_y, test_size=0.30, random_state=42, stratify=df_cv_y) X_general_train, X_general_test, y_general_train, y_general_test = train_test_split(df_general_x, df_general_y, test_size=0.30, random_state=42, stratify=df_general_y) X_graphs_train, X_graphs_test, y_graphs_train, y_graphs_test = train_test_split(df_graphs_x, df_graphs_y, test_size=0.30, random_state=42, stratify=df_graphs_y) X_nlp_train, X_nlp_test, y_nlp_train, y_nlp_test = train_test_split(df_nlp_x, df_nlp_y, test_size=0.30, random_state=42, stratify=df_nlp_y) X_rl_train, X_rl_test, y_rl_train, y_rl_test = train_test_split(df_rl_x, df_rl_y, test_size=0.30, random_state=42, stratify=df_rl_y) X_sequential_train, X_sequential_test, y_sequential_train, y_sequential_test = train_test_split(df_sequential_x, df_sequential_y, test_size=0.30, random_state=42, stratify=df_sequential_y) clf_audio = Pipeline([ ('vectorizer', CountVectorizer(analyzer="word", tokenizer=word_tokenize, max_features=None, lowercase=True)), #('smote', SMOTE(random_state=42)), ('classifier', LinearSVC(random_state=42)) ]) clf_audio.fit(X_audio_train, y_audio_train) print('Audio done') clf_cv = Pipeline([ ('vectorizer', CountVectorizer(analyzer="word", tokenizer=word_tokenize, max_features=None, lowercase=True)), #('smote', SMOTE(random_state=42)), ('classifier', LinearSVC(random_state=42)) ]) clf_cv.fit(X_cv_train, y_cv_train) print('CV done') clf_general = Pipeline([ ('vectorizer', CountVectorizer(analyzer="word", tokenizer=word_tokenize, max_features=None, lowercase=True)), #('smote', SMOTE(random_state=42)), ('classifier', LinearSVC(random_state=42)) ]) clf_general.fit(X_general_train, y_general_train) print('General done') clf_graphs = Pipeline([ ('vectorizer', CountVectorizer(analyzer="word", tokenizer=word_tokenize, max_features=None, lowercase=True)), #('smote', SMOTE(random_state=42)), ('classifier', LinearSVC(random_state=42)) ]) clf_graphs.fit(X_graphs_train, y_graphs_train) print('Graphs done') clf_nlp = Pipeline([ ('vectorizer', CountVectorizer(analyzer="word", tokenizer=word_tokenize, max_features=None, lowercase=True)), #('smote', SMOTE(random_state=42)), ('classifier', LinearSVC(random_state=42)) ]) clf_nlp.fit(X_nlp_train, y_nlp_train) print('NLP done') clf_rl = Pipeline([ ('vectorizer', CountVectorizer(analyzer="word", tokenizer=word_tokenize, max_features=None, lowercase=True)), #('smote', SMOTE(random_state=42)), ('classifier', LinearSVC(random_state=42)) ]) clf_rl.fit(X_rl_train, y_rl_train) print('RL done') clf_sequential = Pipeline([ ('vectorizer', CountVectorizer(analyzer="word", tokenizer=word_tokenize, max_features=None, lowercase=True)), #('smote', SMOTE(random_state=42)), ('classifier', LinearSVC(random_state=42)) ]) clf_sequential.fit(X_sequential_train, y_sequential_train) print('Sequential done') y_audio_pred = clf_audio.predict(X_audio_test) y_cv_pred = clf_cv.predict(X_cv_test) y_general_pred = clf_general.predict(X_general_test) y_graphs_pred = clf_graphs.predict(X_graphs_test) y_nlp_pred = clf_nlp.predict(X_nlp_test) y_rl_pred = clf_rl.predict(X_rl_test) y_sequential_pred = clf_sequential.predict(X_sequential_test) cm = confusion_matrix(y_audio_test, y_audio_pred) y_audio_unique = y_audio_test.unique() cm_df = pd.DataFrame(cm, index = [y_audio_unique], columns = [y_audio_unique])# plt.figure(figsize=(5,4)) sns.heatmap(cm_df, annot=True) plt.title('Confusion Matrix') plt.ylabel('Actual Values') plt.xlabel('Predicted Values') plt.show() print("Accuracy audio : ", metrics.accuracy_score(y_audio_test, y_audio_pred)) cm = confusion_matrix(y_cv_test, y_cv_pred) y_cv_unique = y_cv_test.unique() cm_df = pd.DataFrame(cm, index = [y_cv_unique], columns = [y_cv_unique])# plt.figure(figsize=(5,4)) sns.heatmap(cm_df, annot=True) plt.title('Confusion Matrix') plt.ylabel('Actual Values') plt.xlabel('Predicted Values') plt.show() print("Accuracy cv : ", metrics.accuracy_score(y_cv_test, y_cv_pred)) cm = confusion_matrix(y_general_test, y_general_pred) y_general_unique = y_general_test.unique() cm_df = pd.DataFrame(cm, index = [y_general_unique], columns = [y_general_unique])# plt.figure(figsize=(5,4)) sns.heatmap(cm_df, annot=True) plt.title('Confusion Matrix') plt.ylabel('Actual Values') plt.xlabel('Predicted Values') plt.show() print("Accuracy general : ", metrics.accuracy_score(y_general_test, y_general_pred)) cm = confusion_matrix(y_graphs_test, y_graphs_pred) y_graphs_unique = y_graphs_test.unique() cm_df = pd.DataFrame(cm, index = [y_graphs_unique], columns = [y_graphs_unique])# plt.figure(figsize=(5,4)) sns.heatmap(cm_df, annot=True) plt.title('Confusion Matrix') plt.ylabel('Actual Values') plt.xlabel('Predicted Values') plt.show() print("Accuracy graphs : ", metrics.accuracy_score(y_graphs_test, y_graphs_pred)) cm = confusion_matrix(y_nlp_test, y_nlp_pred) y_nlp_unique = y_nlp_test.unique() cm_df = pd.DataFrame(cm, index = [y_nlp_unique], columns = [y_nlp_unique])# plt.figure(figsize=(5,4)) sns.heatmap(cm_df, annot=True) plt.title('Confusion Matrix') plt.ylabel('Actual Values') plt.xlabel('Predicted Values') plt.show() print("Accuracy nlp : ", metrics.accuracy_score(y_nlp_test, y_nlp_pred)) cm = confusion_matrix(y_rl_test, y_rl_pred) y_rl_unique = y_rl_test.unique() cm_df = pd.DataFrame(cm, index = [y_rl_unique], columns = [y_rl_unique])# plt.figure(figsize=(5,4)) sns.heatmap(cm_df, annot=True) plt.title('Confusion Matrix') plt.ylabel('Actual Values') plt.xlabel('Predicted Values') plt.show() print("Accuracy rl : ", metrics.accuracy_score(y_rl_test, y_rl_pred)) cm = confusion_matrix(y_sequential_test, y_sequential_pred) y_sequential_unique = y_sequential_test.unique() cm_df = pd.DataFrame(cm, index = [y_sequential_unique], columns = [y_sequential_unique])# plt.figure(figsize=(5,4)) sns.heatmap(cm_df, annot=True) plt.title('Confusion Matrix') plt.ylabel('Actual Values') plt.xlabel('Predicted Values') plt.show() print("Accuracy sequential : ", metrics.accuracy_score(y_sequential_test, y_sequential_pred)) pos = 0 for x in range(df_train_all_x.shape[0]): predictions = [] X = df_train_all_x.iloc[[x]] y = df_train_all_y.iloc[x] predictions.append(clf_audio.predict(X)[0]) predictions.append(clf_cv.predict(X)[0]) predictions.append(clf_general.predict(X)[0]) predictions.append(clf_graphs.predict(X)[0]) predictions.append(clf_nlp.predict(X)[0]) predictions.append(clf_rl.predict(X)[0]) predictions.append(clf_sequential.predict(X)[0]) for i in predictions: if i != 'Other': if i == y: pos += 1 continue print(pos/df_train_all_x.shape[0])MAT281 - Laboratorio N°03 Objetivos de la clase* Reforzar los conceptos básicos de pandas. Contenidos* [Problema 01](p1) Problema 01EL conjunto de datos se denomina `ocupation.csv`, el cual contiene información tal como: edad ,sexo, profesión, etc.Lo primero es cargar el conjunto de datos y ver las primeras filas que lo componen:import pandas as pd import os # cargar datos df = pd.read_csv(os.path.join("data","ocupation.csv"), sep="|").set_index('user_id') df.head() # se eliminan los valore nulos del dataframe df = df[lambda df: df.notnull().all(axis=1)]El objetivo es tratar de obtener la mayor información posible de este conjunto de datos. Para cumplir este objetivo debe resolver las siguientes problemáticas: 1. ¿Cuál es el número de observaciones en el conjunto de datos?df.shapePor lo tanto el número de observaciones es 943 2. ¿Cuál es el número de columnas en el conjunto de datos?number = len(df.columns) print("el número de columnas en el conjunto de datos es:",number)el número de columnas en el conjunto de datos es: 43. Imprime el nombre de todas las columnasfor i in range(len(df.columns)): print(df.columns[i])age gender occupation zip_code4. Imprima el índice del dataframedf.index5. ¿Cuál es el tipo de datos de cada columna?print("Tipo de dato por columna:") df.dtypesTipo de dato por columna:6. Resumir el conjunto de datosdf.describe()7. Resume conjunto de datos con todas las columnasdf.describe(include='all')8. Imprimir solo la columna de **occupation**.df['occupation']9. ¿Cuántas ocupaciones diferentes hay en este conjunto de datos?ocupaciones_diferentes = df['occupation'].unique() ocupaciones_totales = list(ocupaciones_diferentes) n = len(ocupaciones_totales) print("el número de ocupaciones diferentes es:" ,n)el número de ocupaciones diferentes es: 2110. ¿Cuál es la ocupación más frecuente?diccionario={} for ocupacion in ocupaciones_diferentes: diccionario[ocupacion]=0 for ocupacion1 in diccionario.keys(): for ocupacion2 in df['occupation']: if ocupacion1==ocupacion2: diccionario[ocupacion1]+=1 dataframe = pd.DataFrame({ "ocupacion":diccionario.keys(), "ocurrencia":diccionario.values() }) dataframe maxima_frecuencia=dataframe['ocurrencia'].max() for i in diccionario.keys(): if diccionario[i]==maxima_frecuencia: print("La ocupación con la mayor ocurrencia es:",i)La ocupación con la mayor ocurrencia es: student11. ¿Cuál es la edad media de los usuarios?edades = df['age'] print("La edad media de los usuarios es:",edades.mean())La edad media de los usuarios es: 34.0519618239660712. ¿Cuál es la edad con menos ocurrencia?diccionario2 = {} edades_diferentes = df['age'].unique() for edad in edades_diferentes: diccionario2[edad]=0 for edad1 in diccionario2.keys(): for edad2 in df['age']: if edad1==edad2: diccionario2[edad1]+=1 dataframe2 = pd.DataFrame({ "edad":diccionario2.keys(), "ocurrencia":diccionario2.values() }) print(dataframe2) minima_ocurrencia = dataframe2['ocurrencia'].min() lista_de_edades=[] for i in diccionario2.keys(): if diccionario2[i]==minima_ocurrencia: lista_de_edades.append(i) print("Las edades con la menor ocurrencia son:") for i in lista_de_edades: print(i)edad ocurrencia 0 24 33 1 53 12 2 23 28 3 33 26 4 42 21 .. ... ... 56 10 1 57 73 1 58 58 3 59 69 2 60 70 3 [61 rows x 2 columns] Las edades con la menor ocurrencia son: 7 66 11 10 73Question 85 - Testing user conversionGiven the following [data set](https://docs.google.com/spreadsheets/d/1WnKwSW--x835Uokeq6xInuHg3xxdOyoKcuVfFVcV870/editgid=1401812744), can you see if there's a significant difference between the conversion rate of users between the test and control group? The relevant columns in the table are conversion and test. The conversion column has values of 0 and 1 which represent if the user converted (1) or not (0). The test table has values of 0 and 1 as well, 0 for the control group and 1 for the test group.The solution for premium users will written using Python Pandas.import pandas as pd import matplotlib.pyplot as plt import numpy as np df = pd.read_csv('q085_data.csv') df.head() group_conv = dict(df.groupby('test').apply(lambda s: s['conversion'].sum() / len(s))) conv0, conv1 = group_conv[0], group_conv[1] print(conv0, conv1) from statsmodels.stats.weightstats import ztest ztest( df[df['test']==0]['conversion'], x2=df[df['test']==1]['conversion'], value=0, alternative='two-sided' ) # https://towardsdatascience.com/hypothesis-testing-in-machine-learning-using-python-a0dc89e169ce # https://towardsdatascience.com/spotting-conversion-rate-drop-with-two-sample-hypothesis-testing-using-e-commerce-monitoring-24542ada6122Election expensesRecently we found a congressperson who made an allegedly postal service expense in a company created for his own benefit, for his next candidacy in elections.We believe there are more cases like this one.import pandas as pd import numpy as np reimbursements = pd.read_csv('../data/2016-12-06-reimbursements.xz', dtype={'cnpj_cpf': np.str}, low_memory=False) companies = pd.read_csv('../data/2016-09-03-companies.xz', low_memory=False) companies['cnpj'] = companies['cnpj'].str.replace(r'\D', '') dataset = pd.merge(reimbursements, companies, left_on='cnpj_cpf', right_on='cnpj') is_election_company = \ dataset['legal_entity'] == '409-0 - CANDIDATO A CARGO POLITICO ELETIVO' suspect = dataset[is_election_company] suspect.shape suspect['total_net_value'].sum() suspect['total_net_value'].describe() suspect['name'].sample(10) dataset['name'] = dataset['name'].astype(np.str) contains_election_str = dataset['name'].str.lower().str.contains(r'(eleic)[(ao)(oes)]') company_name_suspects = dataset[contains_election_str].index np.array_equal(suspect.index, company_name_suspects) import unicodedata def normalize_string(string): if isinstance(string, str): nfkd_form = unicodedata.normalize('NFKD', string.lower()) return nfkd_form.encode('ASCII', 'ignore').decode('utf-8') suspect['congressperson_name'] = \ suspect['congressperson_name'].apply(normalize_string) suspect['name'] = suspect['name'].apply(normalize_string) suspect[suspect.apply(lambda row: row['congressperson_name'] in row['name'], axis=1)] suspect[['congressperson_name', 'name']]Multithreading and MultiprocessingRecall the phrase "many hands make light work". This is as true in programming as anywhere else.What if you could engineer your Python program to do four things at once? What would normally take an hour could (almost) take one fourth the time.\*This is the idea behind parallel processing, or the ability to set up and run multiple tasks concurrently.\* *We say almost, because you do have to take time setting up four processors, and it may take time to pass information between them.* Threading vs. ProcessingA good illustration of threading vs. processing would be to download an image file and turn it into a thumbnail.The first part, communicating with an outside source to download a file, involves a thread. Once the file is obtained, the work of converting it involves a process. Essentially, two factors determine how long this will take; the input/output speed of the network communication, or I/O, and the available processor, or CPU. I/O-intensive processes improved with multithreading:* webscraping* reading and writing to files* sharing data between programs* network communications CPU-intensive processes improved with multiprocessing:* computations* text formatting* image rescaling* data analysis Multithreading Example: WebscrapingHistorically, the programming knowledge required to set up multithreading was beyond the scope of this course, as it involved a good understanding of Python's Global Interpreter Lock (the GIL prevents multiple threads from running the same Python code at once). Also, you had to set up special classes that behave like Producers to divvy up the work, Consumers (aka "workers") to perform the work, and a Queue to hold tasks and provide communcations. And that was just the beginning.Fortunately, we've already learned one of the most valuable tools we'll need – the `map()` function. When we apply it using two standard libraries, *multiprocessing* and *multiprocessing.dummy*, setting up parallel processes and threads becomes fairly straightforward. Here's a classic multithreading example provided by [IBM](http://www.ibm.com/developerworks/aix/library/au-threadingpython/) and adapted by [](http://chriskiehl.com/article/parallelism-in-one-line/) where you divide the task of retrieving web pages across multiple threads: import time import threading import Queue import urllib2 class Consumer(threading.Thread): def __init__(self, queue): threading.Thread.__init__(self) self._queue = queue def run(self): while True: content = self._queue.get() if isinstance(content, str) and content == 'quit': break response = urllib2.urlopen(content) print 'Thanks!' def Producer(): urls = [ 'http://www.python.org', 'http://www.yahoo.com' 'http://www.scala.org', 'http://www.google.com' etc.. ] queue = Queue.Queue() worker_threads = build_worker_pool(queue, 4) start_time = time.time() Add the urls to process for url in urls: queue.put(url) Add the poison pill for worker in worker_threads: queue.put('quit') for worker in worker_threads: worker.join() print 'Done! Time taken: {}'.format(time.time() - start_time) def build_worker_pool(queue, size): workers = [] for _ in range(size): worker = Consumer(queue) worker.start() workers.append(worker) return workers if __name__ == '__main__': Producer() Using the multithreading library provided by the *multiprocessing.dummy* module and `map()` all of this becomes: import urllib2 from multiprocessing.dummy import Pool as ThreadPool pool = ThreadPool(4) choose a number of workers urls = [ 'http://www.python.org', 'http://www.yahoo.com' 'http://www.scala.org', 'http://www.google.com' etc.. ] results = pool.map(urllib2.urlopen, urls) pool.close() pool.join() In the above code, the *multiprocessing.dummy* module provides the parallel threads, and `map(urllib2.urlopen, urls)` assigns the labor! Multiprocessing Example: Monte CarloLet's code out an example to see how the parts fit together. We can time our results using the *timeit* module to measure any performance gains. Our task is to apply the Monte Carlo Method to estimate the value of Pi. Monte Carle Method and Estimating PiIf you draw a circle of radius 1 (a unit circle) and enclose it in a square, the areas of the two shapes are given as Area Formulas circle$$πr^2$$ square$$4 r^2$$Therefore, the ratio of the volume of the circle to the volume of the square is $$\frac{π}{4}$$The Monte Carlo Method plots a series of random points inside the square. By comparing the number that fall within the circle to those that fall outside, with a large enough sample we should have a good approximation of Pi. You can see a good demonstration of this [here](https://academo.org/demos/estimating-pi-monte-carlo/) (Hit the **Animate** button on the page).For a given number of points *n*, we have $$π = \frac{4 \cdot points\ inside\ circle}{total\ points\ n}$$To set up our multiprocessing program, we first derive a function for finding Pi that we can pass to `map()`:from random import random # perform this import outside the function def find_pi(n): """ Function to estimate the value of Pi """ inside=0 for i in range(0,n): x=random() y=random() if (x*x+y*y)**(0.5)<=1: # if i falls inside the circle inside+=1 pi=4*inside/n return piLet's test `find_pi` on 5,000 points:find_pi(5000)This ran very quickly, but the results are not very accurate!Next we'll write a script that sets up a pool of workers, and lets us time the results against varying sized pools. We'll set up two arguments to represent *processes* and *total_iterations*. Inside the script, we'll break *total_iterations* down into the number of iterations passed to each process, by making a processes-sized list.For example: total_iterations = 1000 processes = 5 iterations = [total_iterations//processes]*processes iterations Output: [200, 200, 200, 200, 200] This list will be passed to our `map()` function along with `find_pi()`%%writefile test.py from random import random from multiprocessing import Pool import timeit def find_pi(n): """ Function to estimate the value of Pi """ inside=0 for i in range(0,n): x=random() y=random() if (x*x+y*y)**(0.5)<=1: # if i falls inside the circle inside+=1 pi=4*inside/n return pi if __name__ == '__main__': N = 10**5 # total iterations P = 5 # number of processes p = Pool(P) print(timeit.timeit(lambda: print(f'{sum(p.map(find_pi, [N//P]*P))/P:0.7f}'), number=10)) p.close() p.join() print(f'{N} total iterations with {P} processes') ! python test.py3.1466800 3.1364400 3.1470400 3.1370400 3.1256400 3.1398400 3.1395200 3.1363600 3.1437200 3.1334400 0.2370227286270967 100000 total iterations with 5 processesGreat! The above test took under a second on our computer.Now that we know our script works, let's increase the number of iterations, and compare two different pools. Sit back, this may take awhile!%%writefile test.py from random import random from multiprocessing import Pool import timeit def find_pi(n): """ Function to estimate the value of Pi """ inside=0 for i in range(0,n): x=random() y=random() if (x*x+y*y)**(0.5)<=1: # if i falls inside the circle inside+=1 pi=4*inside/n return pi if __name__ == '__main__': N = 10**7 # total iterations P = 1 # number of processes p = Pool(P) print(timeit.timeit(lambda: print(f'{sum(p.map(find_pi, [N//P]*P))/P:0.7f}'), number=10)) p.close() p.join() print(f'{N} total iterations with {P} processes') P = 5 # number of processes p = Pool(P) print(timeit.timeit(lambda: print(f'{sum(p.map(find_pi, [N//P]*P))/P:0.7f}'), number=10)) p.close() p.join() print(f'{N} total iterations with {P} processes') ! python test.py3.1420964 3.1417412 3.1411108 3.1408184 3.1414204 3.1417656 3.1408324 3.1418828 3.1420492 3.1412804 36.03526345242264 10000000 total iterations with 1 processes 3.1424524 3.1418376 3.1415292 3.1410344 3.1422376 3.1418736 3.1420540 3.1411452 3.1421652 3.1410672 17.300921846344366 10000000 total iterations with 5 processesHopefully you saw that with 5 processes our script ran faster! More is Better ...to a point.The gain in speed as you add more parallel processes tends to flatten out at some point. In any collection of tasks, there are going to be one or two that take longer than average, and no amount of added processing can speed them up. This is best described in [Amdahl's Law](https://en.wikipedia.org/wiki/Amdahl%27s_law). Advanced ScriptIn the example below, we'll add a context manager to shrink these three lines p = Pool(P) ... p.close() p.join() to one line: with Pool(P) as p: And we'll accept command line arguments using the *sys* module.%%writefile test2.py from random import random from multiprocessing import Pool import timeit import sys N = int(sys.argv[1]) # these arguments are passed in from the command line P = int(sys.argv[2]) def find_pi(n): """ Function to estimate the value of Pi """ inside=0 for i in range(0,n): x=random() y=random() if (x*x+y*y)**(0.5)<=1: # if i falls inside the circle inside+=1 pi=4*inside/n return pi if __name__ == '__main__': with Pool(P) as p: print(timeit.timeit(lambda: print(f'{sum(p.map(find_pi, [N//P]*P))/P:0.5f}'), number=10)) print(f'{N} total iterations with {P} processes') ! python test2.py 10000000 5003.14121 3.14145 3.14178 3.14194 3.14109 3.14201 3.14243 3.14150 3.14203 3.14116 16.871822701405073 10000000 total iterations with 500 processesPotentialfeld und Vektorfeld*** Wir definieren uns als Potentialfunktion $U(x,y)$ die *inverse Länge*:$$U(x,y,z) = {{1} \over {r}} = {{1} \over {\sqrt{(x-x_0)^2 + (y-y_0)^2 + (z-z_0)^2}}} $$mit $(x_0,y_0,z_0)^T$ der Punktquelle des Potentials, und $(x,y,z)^T$ dem Beobachtungspunkt. Setzen wir den Ursprung des Potentials in den Ursprung des Koordinatensystems, $(x_0,y_0,z_0)^T=(0,0,0)^T$, dann gilt:$$U(x,y,z) = {{1} \over {r}} = {{1} \over {\sqrt{x^2 + y^2 + z^2}}} $$ Die Vektorfunktion $\vec{F}(x,y)$ definieren wir als Gradienten des Potentials:$$\vec{F}(x,y) = \nabla U= \left(\begin{array}{c} {{\partial} \over {\partial x}} \\ {{\partial} \over {\partial y}} \\ {{\partial} \over {\partial z}} \end{array}\right) \left( {{1} \over {r}} \right)= \left(\begin{array}{c} {{\partial} \over {\partial x}} \\ {{\partial} \over {\partial y}} \\ {{\partial} \over {\partial z}} \end{array}\right) \left( x^2 + y^2 + z^2 \right)^{-{{1} \over {2}}}= \left(\begin{array}{c}-{{1} \over {2}} (x^2+y^2+z^2)^{-{{3} \over {2}}} 2x \\-{{1} \over {2}} (x^2+y^2+z^2)^{-{{3} \over {2}}} 2y \\-{{1} \over {2}} (x^2+y^2+z^2)^{-{{3} \over {2}}} 2z\end{array}\right)= \left(\begin{array}{c}-{{x} \over {r^3}} \\-{{y} \over {r^3}} \\-{{z} \over {r^3}}\end{array}\right)= - {{\vec{r}} \over {r^3}}= - {{\hat{r}} \over {r^2}}$$Im letzten Schritt haben wir den *Einheitsvektor* $\hat{r}={{\vec{r}}\over{r}}$, derdie Länge eins hat, eingeführt. Die Definition von Potentialfeld $U$ und Vektorfeld $\vec{F}$ ist eine Beschreibung für so unterschiedliche Felder wie- **Gravitationsfeld**: $\vec{g}=\nabla U$$$\begin{array}{rcl}U &=& {{GM}\over{r}} \\\vec{g} &=& -{{GM}\over{r^2}} \hat{r}\end{array}$$- **Elektrisches Feld**: $\vec{E}=-\nabla U$$$\begin{array}{rcl}U &=& {{1}\over{4\pi \epsilon_0}} {{Q}\over{r}} \\\vec{E} &=& {{1}\over{4\pi \epsilon_0}} {{Q}\over{r^2}} \hat{r}\end{array}$$- **Magnetisches Feld**: $\vec{B}=-\nabla U$$$\begin{array}{rcl}U &=& {{\mu_0}\over{4\pi}} {{P}\over{r}} \\\vec{B} &=& {{\mu_0}\over{4\pi}} {{P}\over{r^2}} \hat{r}\end{array}$$ |Symbol |Einheit |Wert | Bezeichnung ||------------|-----------------|--------------------------------|----------------------------||$G$ |m$^3$/kg/s$^2$ |$6.67408 \times 10^{-11}$ | Gravitationskonstante ||$M$ |kg |$5.972 \times 10^{24}$ | Masse der Erde ||$\epsilon_0$|Am/(Vs) |$8.854 \times 10^{-12}$ | elektrische Permittivität ||$\mu_0$ |Vs/(Am) |$4\pi \times 10^{-7}$ | magnetische Permeabilität |%matplotlib inline ''' calculate scalar potential U in x-y-plane and derive vector field (Ex,Ey) from nabla(U) ''' import numpy as np import matplotlib.pyplot as plt import matplotlib.cm as cm # gravitational constant [m3/kg/s2], Earth's mass [kg] g, mass = 6.67408e-11, 5.972e24 # Dipole charge (C), Permittivity of free space (F.m-1), Dipole +q, -q distance (m) q, eps0,d = +1.602e-19, 8.854e-12, 1.e-12 # scaling for potential kgrav = g*mass kmonopole = 1/4/np.pi/eps0 * q #* d k = 1. # Cartesian axis system with origin at the dipole (m) size = 1.5 X = np.linspace(-size, size, 40) Y = np.linspace(-size, size, 40) # re-mesh for 2D plot X, Y = np.meshgrid(X, Y) # calculate angle T for unit vector components T = np.arctan2(Y, X) # calculate R=sqrt(Y**2+Y**2) R = np.hypot(X,Y) # Dipole potential (V), using point dipole approximation U = k * X / np.hypot(X, Y)**3 # Monopole potential (V) U = k / R Ey, Ex = np.gradient(U) Ex, Ey = -1*Ex,-1*Ey #Ex, Ey = k/R**2 * np.cos(T), k/R**2 * np.sin(T) E_norm = np.sqrt(Ex**2 + Ey**2) # define levels for contours and colormap for vector magnitudes levels = np.array([10**pw for pw in np.linspace(-1,2,7)]) levels = sorted(list(-levels) + list(levels)) cmap = cm.get_cmap(name='nipy_spectral', lut=None) # plot figure for potential and vector field plt.figure(figsize=(10.0,10.0)) plt.title('Potential and vector field') CS = plt.contour(X, Y, U,colors='k',levels=levels) plt.clabel(CS, inline=1, fontsize=10,fmt='%8.1f') plt.quiver(X, Y, Ex/E_norm, Ey/E_norm, E_norm, alpha=.5,scale=40) plt.quiver(X, Y, Ex/E_norm, Ey/E_norm,edgecolor='k', facecolor='None', linewidth=.5,color='blue',scale=40) plt.show()課題2(プログラムによる引張試験結果の整理)について ① PYTHONによるプログラム作成および実行が可能なプラットフォームを準備する。② “PYTHON_practice_ver2.xlsx”の中身を確認する。   引張試験結果(試験前後の形状、引張荷重、伸び(引張方向)、縮み(円周方向)のデータ列) ③ PYTHONを用いて“PYTHON_practice_ver2.xlsx”を読み込み、公称ひずみ、公称応力、対数ひずみ、実応力を計算する。ただし、対数ひずみと実応力は一様変形が開始したところから最大荷重点の一つ前までのデータとして求める。 ④ PYTHONを用いて、ヤング率、ポアソン比、塑性係数、加工硬化、引張強さ、破断伸び(全伸び)、絞りを計算により求める。 ⑤ 求めた結果についてPYTHONを用いてcsvファイルとして出力する。 ⑥ Excelでcsvファイルを読み込んで公称応力―公称ひずみ線図、実応力-対数ひずみ線図を一つのグラフとして作成する。 データ整理import pandas as pd import numpy as np import matplotlib.pyplot as plt import japanize_matplotlib #日本語の表示 %matplotlib inline data = pd.read_excel('practice.xlsx',header=None) df1 = data.iloc[0:6,0:5].fillna('-') df2 = data.iloc[8:,0:4].reset_index(drop=True) df2.rename(columns={0:"No.",1:"Weight (kN)",2:"Stretch (mm)",3:"Shrink (mm)"},inplace=True) df1.head(6) df2.head(10) #最初の10つを確認 # 試験片のデータ shikenhen = df1.iloc[3:,1:].astype(float).reset_index(drop=True) shikenhen.rename(columns={1:"Distance before (mm)",2:"Diameter before (mm)",3:"Distance after (mm)",4:"Diameter after (mm)"},inplace=True) shikenhen # jiku = hippari[] # 引張り試験のデータ hippari = df2[["No.","Weight (kN)","Stretch (mm)","Shrink (mm)"]].fillna(0).iloc[2:].astype(float).reset_index(drop=True) print(hippari) # hippari.info() # hippari.describe()求める材料定数- ヤング率 E [GPa] = 〇- ポアソン比 ν = 〇- 引張強さ σ_max [MPa] = 〇- 上降伏応力 [MPa] = 〇- 下降伏応力 [MPa] = 〇- 全伸び δ = 〇- 絞り φ = 〇- 塑性係数 F [MPa] = 〇- 加工硬化指数 n = 〇avg_distance_before = shikenhen["Distance before (mm)"].mean() print(f'試験前の標点間距離の平均は {avg_distance_before.round(4)} mm') avg_distance_after = shikenhen["Distance after (mm)"].mean() print(f'試験後の標点間距離の平均は {avg_distance_after.round(4)} mm') avg_diameter_before = shikenhen["Diameter before (mm)"].mean(axis=0) print(f'試験前の外径の平均は {avg_diameter_before.round(4)} mm') area_before = (np.pi*(avg_diameter_before/2)**2).round(4) print(f'試験前の断面積は {area_before} mm^2') avg_diameter_after = shikenhen["Diameter after (mm)"].mean(axis=0) print(f'試験前後外径の平均は {avg_diameter_after.round(4)} mm') area_after = (np.pi*(avg_diameter_after/2)**2).round(4) print(f'試験後の断面積は {area_after} mm^2') P_max = hippari["Weight (kN)"].max().round(2) #最大荷重 print(f'最大荷重は {P_max} kN') sigma_max = P_max*10**3/area_before print(f'引張強さ σ_max は {sigma_max.round(4)} MPa') zennobi = (avg_distance_after-avg_distance_before)/avg_distance_before print(f'全伸び δ は {zennobi.round(4)}') # 絞り shibori = (area_before-area_after)/area_before print(f'絞り φ は {shibori.round(4)}') # 公称応力、公称ひずみ、対数ひずみ、実応力を計算する hippari["Nominal Stress (MPa)"] = (hippari["Weight (kN)"]*1000/area_before).round(1) #公称応力 hippari["Nominal Strain"] = hippari["Stretch (mm)"]*10**(-2) #公称ひずみ(軸方向) με hippari["Logarithmic Strain"] = np.log(1+hippari["Nominal Strain"]) #対数ひずみ hippari["True Stress (MPa)"] = hippari["Nominal Stress (MPa)"]*(1+hippari["Nominal Strain"]) #実応力 # 一様変形が開始したところから最大荷重点の一つ前までのデータを求める hippari2 = hippari.copy() hippari2 = hippari2.iloc[19:33].reset_index(drop=True) hippari2["Shrink (mm)"].replace(0, pd.NA,inplace=True) hippari2.drop(columns=['Nominal Stress (MPa)','Nominal Strain'],inplace=True) # csvファイルに出力 hippari.drop(columns=["Logarithmic Strain","True Stress (MPa)"],inplace=False).to_csv("hippari.csv",index=False,float_format='%g') hippari2.to_csv("hippari2.csv",index=False,float_format='%g') hippari["Nominal Strain 2"]= hippari["Shrink (mm)"]*10**(-2) #公称ひずみ(円周方向) με jiku = hippari.iloc[:,[4,5]].iloc[:19,:] print(jiku) enshu = hippari.iloc[:19,[4,8]].abs() print(enshu) df3 = [jiku,enshu] jiku_enshuu = pd.concat(df3,axis=1) jiku_enshuu.rename(columns={"Nominal Stress (MPa)":"公称応力 (軸方向)","Nominal Strain":"公称ひずみ (軸方向)","Nominal Stress (MPa)":"公称応力 (円周方向)","Nominal Strain 2":"公称ひずみ (円周方向)"},inplace=True) jiku_enshuu.to_csv("hippari3.csv",index=False,float_format='%g') jiku_enshuu plt.title('弾性域における公称応力ー公称ひずみ線図',fontsize=16,pad=18) #obtain m (slope) and b(intercept) of linear regression line (最小二乗法) m, b = np.polyfit(jiku["Nominal Strain"], jiku["Nominal Stress (MPa)"], 1) plt.plot(jiku["Nominal Strain"], m*jiku["Nominal Strain"]+b,"--",linewidth=0.5) plt.scatter(jiku["Nominal Strain"],jiku["Nominal Stress (MPa)"],label = "軸方向",marker= ".") m2, b2 = np.polyfit(enshu["Nominal Strain 2"], enshu["Nominal Stress (MPa)"], 1) plt.plot(enshu["Nominal Strain 2"], m2*enshu["Nominal Strain 2"]+b2,"--",linewidth=0.5) plt.scatter(enshu["Nominal Strain 2"],enshu["Nominal Stress (MPa)"],label = "円周方向",marker= ".") plt.xlabel('ひずみ ε_N [-]', fontsize=12) plt.ylabel('応力 σ_N [MPa]', fontsize=12) plt.ticklabel_format(axis="x", style="sci", scilimits=(0,0)) plt.grid() plt.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left') plt.tight_layout() plt.show() print(f'(軸方向) 傾き = {int(m.round(0))} MPa, 切片 = {b.round(2)} MPa') print(f'(円周方向) 傾き = {int(m2.round(0))} MPa, 切片 = {b2.round(2)} MPa') print(f'ヤング率 E = {int(m.round(0))*10**(-3)} GPa') print(f'ポアソン比 ν = {(m/m2).round(3)}') print(hippari) print(f'最大荷重は {P_max} kN\n') print(hippari2) print(f'上降伏応力 δ_upper は 293.0 MPa') print(f'下降伏応力 δ_lower は 284.5 MPa') x1=hippari["Nominal Strain"] y1=hippari["Nominal Stress (MPa)"] x2=hippari2["Logarithmic Strain"] y2=hippari2["True Stress (MPa)"] plt.figure(figsize=(12,6)) # plot for engineering stress- engineering strain diagram plt.subplot(1,2,1) plt.plot(x1,y1,linewidth=2.5) plt.fill_between(x1,y1,color='green',hatch='||',alpha=0.15) plt.title("公称応力ー公称ひずみ",fontsize=20) plt.xlabel(r'公称ひずみ $ε_N$ [-]', fontsize=16) plt.ylabel(r'公称応力 $σ_N$ [MPa]', fontsize=16) plt.grid() # plot for true stress- logarithmic strain diagram plt.tight_layout(pad = 7.0) plt.subplot(1,2,2) plt.plot(x2,y2,'red',linewidth=2.5) plt.fill_between(x2,y2,color='yellow',hatch='\\',alpha=0.15) plt.title('実応力ー対数ひずみ',fontsize=20) plt.xlabel('対数ひずみ ε [-]', fontsize=16) plt.ylabel('実応力 σ [MPa]', fontsize=16) plt.grid() plt.savefig("stress-strain_diagram.jpg", dpi=300) plt.show() plt.title('塑性曲線 (公称応力ー公称ひずみ、実応力ー対数ひずみ線図)',fontsize=16,pad=18) plt.scatter(x1,y1,marker = ".") plt.plot(x1,y1,label = "公称応力ー公称ひずみ") plt.scatter(x2,y2, marker = ".") plt.plot(x2,y2,label = "実応力ー対数ひずみ",color= "red") plt.xlabel(r'ひずみ $ε_N$ , ε [-]', fontsize=12) plt.ylabel(r'応力 $σ_N$ , σ [MPa]', fontsize=12) plt.grid() plt.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left') plt.tight_layout() plt.show() hippari2 plt.title('Fとnの導出(最小二乗法用)',fontsize=16,pad=18) #obtain m (slope) and b(intercept) of linear regression line m3, b3 = np.polyfit(np.log10(hippari2["Logarithmic Strain"]), np.log10(hippari2["True Stress (MPa)"]), 1) plt.plot(np.log10(hippari2["Logarithmic Strain"]), m3*np.log10(hippari2["Logarithmic Strain"])+b3,"--",linewidth =0.5) plt.scatter(np.log10(hippari2["Logarithmic Strain"]),np.log10(hippari2["True Stress (MPa)"]) ,label = "実応力ー対数ひずみ",marker = ".") plt.xlabel(r'$log_{10}$ ひずみ ε [-]', fontsize=12) plt.ylabel(r'$log_{10}$ 応力 σ [MPa]', fontsize=12) plt.grid() plt.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left') plt.tight_layout() plt.show() print(f'\n傾き = {m3.round(3)} , 切片 = {b3.round(3)}') print(f'\nlog10 F = {b3.round(3)}, ∴ F = {int(10**(b3.round(3)))} MPa') print(f'\n∴ n = {m3.round(3)}')| | || ----------------- | ------------------------------------------------------------------ || ヤング率 E [GPa] | 402.793 || ポアソン比 ν | 0.247 || 引張強さ σ_max [MPa] | 443.036 || 上降伏応力 [MPa] | 293.0 || 下降伏応力 [MPa] | 284.5 || 全伸び δ | 0.4056 || 絞り φ | 0.5974 || 塑性係数 F [MPa] | 881 || 加工硬化指数 n | 0.255 | 求める材料定数- ヤング率 E [GPa] = 〇- ポアソン比 ν = 〇- 引張強さ σ_max [MPa] = 〇- 上降伏応力 [MPa] = 〇- 下降伏応力 [MPa] = 〇- 全伸び δ = 〇- 絞り φ = 〇- 塑性係数 F [MPa] = 〇- 加工硬化指数 n = 〇 Compiled codes# import pandas as pd # import numpy as np # import matplotlib.pyplot as plt # import japanize_matplotlib # 日本語の表示 # data = pd.read_excel(r'C:\Users\User\Desktop\practice.xlsx', header=None) # df1 = data.iloc[0:6, 0:5].fillna('-') # df2 = data.iloc[8:, 0:4].reset_index(drop=True) # df2.rename(columns={0: "no", 1: "weight_kN", # 2: "stretch_mm", 3: "shrink_mm"}, inplace=True) # # 試験片のデータ # shikenhen = df1.iloc[3:, 1:].astype(float).reset_index(drop=True) # shikenhen.rename(columns={1: "distance_bef", 2: "diameter_bef", # 3: "distance_aft", 4: "diameter_aft"}, inplace=True) # # 引張り試験のデータ # hippari = df2[["weight_kN", "stretch_mm", "shrink_mm"]].fillna( # 0).iloc[2:].astype(float).reset_index(drop=True) # # print(hippari) # avg_distance_before = shikenhen["distance_bef"].mean() # print(f'試験前の標点間距離の平均は {avg_distance_before.round(4)} mm') # avg_distance_after = shikenhen["distance_aft"].mean() # print(f'試験後の標点間距離の平均は {avg_distance_after.round(4)} mm') # avg_diameter_before = shikenhen["diameter_bef"].mean(axis=0) # print(f'試験前の外径の平均は {avg_diameter_before.round(4)} mm') # area_before = (np.pi*(avg_diameter_before/2)**2).round(4) # print(f'試験前の断面積は {area_before} mm^2') # avg_diameter_after = shikenhen["diameter_aft"].mean(axis=0) # print(f'試験前後外径の平均は {avg_diameter_after.round(4)} mm') # area_after = (np.pi*(avg_diameter_after/2)**2).round(4) # print(f'試験後の断面積は {area_after} mm^2') # # 最大荷重 # P_max = hippari["weight_kN"].max().round(2) # print(f'最大荷重は {P_max} kN') # # 引張強さ # sigma_max = P_max*10**3/area_before # print(f'引張強さ σ_max は {sigma_max.round(4)} MPa') # # 全伸び # zennobi = (avg_distance_after-avg_distance_before)/avg_distance_before # print(f'全伸び δ は {zennobi.round(4)}') # # 絞り # shibori = (area_before-area_after)/area_before # print(f'絞り φ は {shibori.round(4)}\n') # # 公称応力、公称ひずみ、対数ひずみ、実応力を計算する # hippari["nominal_stress_MPa"] = (hippari["weight_kN"]*1000/area_before).round(1) #公称応力 # hippari["nominal_strain"] = hippari["stretch_mm"]*10**(-6) #公称ひずみ # hippari["log_strain"] = np.log(1+hippari["nominal_strain"]) #対数ひずみ # hippari["true_stress_MPa"] = hippari["nominal_stress_MPa"]*(1+hippari["nominal_strain"]) #実応力 # # 一様変形が開始したところから最大荷重点の一つ前までのデータを求める # hippari2 = hippari.copy() # hippari2 = hippari2.iloc[19:33].reset_index(drop=True) # hippari2["shrink_mm"].replace(0, pd.NA,inplace=True) # hippari2.drop(columns=['nominal_stress_MPa','nominal_strain'],inplace=True) # # csvファイルに出力 # hippari.to_csv("hippari.csv",index=False,float_format='%g') # hippari2.to_csv("hippari2.csv",index=False,float_format='%g') # # stress-strain diagram # x1=hippari["nominal_strain"] # x軸 # y1=hippari["nominal_stress_MPa"] # y軸 # x2=hippari2["log_strain"] # y2=hippari2["true_stress_MPa"] # plt.grid(True,alpha=0.3) # plt.plot(x1,y1,'g',label="公称ひずみー降伏応力") # plt.plot(x2,y2,'r',label="対数ひずみー実応力") # plt.title("塑性曲線(公称応力ー公称ひずみ、実応力ー対数ひずみ線図)",fontsize=20,pad=20) # plt.xlabel('ひずみ ε [-]', fontsize=16) # plt.ylabel('応力 σ [MPa]', fontsize=16) # plt.legend() # plt.show() # print(f'\n図より上降伏応力 δ_upper は 293.0 MPa') # print(f'図より下降伏応力 δ_lower は 284.5 MPa')回歸問題常見的評估指標有- MAE- MSE- R-square 我們隨機生成(X, y)資料,然後使用線性回歸模型做預測,再使用 MAE, MSE, R-square 評估X, y = datasets.make_regression(n_features=1, random_state=42, noise=4) # 生成資料 model = LinearRegression() # 建立回歸模型 model.fit(X, y) # 將資料放進模型訓練 prediction = model.predict(X) # 進行預測 mae = metrics.mean_absolute_error(prediction, y) # 使用 MAE 評估 mse = metrics.mean_squared_error(prediction, y) # 使用 MSE 評估 r2 = metrics.r2_score(prediction, y) # 使用 r-square 評估 print("MAE: ", mae) print("MSE: ", mse) print("R-square: ", r2)MAE: 2.8417972525655673 MSE: 12.488680067398239 R-square: 0.9916581036260311分類問題常見的評估指標有- AUC- F1-Score (Precision, Recall)cancer = datasets.load_breast_cancer() # 我們使用 sklearn 內含的乳癌資料集 X_train, X_test, y_train, y_test = train_test_split(cancer.data, cancer.target, test_size=50, random_state=0) print(y_test) # 測試集中的 label y_pred = np.random.random((50,)) # 我們先隨機生成 50 筆預測值,範圍都在 0~1 之間,代表機率值 print(y_pred)[0.88020553 0.00582627 0.55231401 0.93010329 0.75114499 0.88251056 0.88751515 0.25403727 0.56522296 0.39482483 0.30985037 0.07305572 0.76539673 0.10472615 0.07738924 0.55117679 0.38266898 0.31422488 0.47429077 0.91976477 0.70888401 0.97002571 0.30401677 0.16284212 0.20703373 0.28801946 0.05953835 0.31013068 0.86415154 0.70828046 0.26690311 0.91564687 0.10973302 0.12845369 0.0853283 0.74136598 0.2907843 0.5520364 0.17597545 0.10793774 0.20796923 0.47554019 0.26701358 0.10186195 0.52445989 0.64575859 0.81274211 0.26800271 0.67786045 0.33819718]AUCauc = metrics.roc_auc_score(y_test, y_pred) # 使用 roc_auc_score 來評估。 **這邊特別注意 y_pred 必須要放機率值進去!** print("AUC: ", auc) # 得到結果約 0.5,與亂猜的結果相近,因為我們的預測值是用隨機生成的AUC: 0.366723259762309F1-Scorethreshold = 0.5 y_pred_binarized = np.where(y_pred > threshold, 1, 0) # 使用 np.where 函數, 將 y_pred > 0.5 的值變為 1,小於 0.5 的為 0 f1 = metrics.f1_score(y_test, y_pred_binarized) # 使用 F1-Score 評估 precision = metrics.precision_score(y_test, y_pred_binarized) # 使用 Precision 評估 recall = metrics.recall_score(y_test, y_pred_binarized) # 使用 recall 評估 print("F1-Score: ", f1) print("Precision: ", precision) print("Recall: ", recall)F1-Score: 0.5818181818181819 Precision: 0.6666666666666666 Recall: 0.5161290322580645Assignment 6 - Exercise 2c - 201805995prob_statement = """A empresa Schwabe utiliza ouro e prata para produzir dois tipos de colares. O colar 1 requer 2g de ouro, 3g de prata e 1 hora de trabalho para confec¸c˜ao. O colar 2 requer 3g de ouro, 2g de prata e 2 horas de trabalho. Cada colar 1 vende por $400, e cada colar 2 vende por $500. Todos os colares produzidos s˜ao vendidos. Em estoque a empresa tem 100g de ouro e 120g de prata, e disp˜oe de 70 horas de m˜ao de obra. Ouro extra pode ser comprado por um custo de $100/g. Encomendas j´a realizadas requerem que sejam produzidos pelo menos 20 colares 1 e 25 colares 2. Formule um modelo de programa¸c˜ao linear para maximizar o lucro da empresa. . """ def fix_statement(str_statement): list_replacements = [("¸c", "ç"), ("´a", "á"), ("´e", "é"), ("´ı", "í"), ("´o", "ó"), ("´u", "ú"), ("˜a", "ã"), ("˜o", "õ"), ("$", "\\\$")] for replacement in list_replacements: str_statement = str_statement.replace(replacement[0], replacement[1]) return str_statement prob_statement = fix_statement(prob_statement) print(prob_statement)A empresa Schwabe utiliza ouro e prata para produzir dois tipos de colares. O colar 1 requer 2g de ouro, 3g de prata e 1 hora de trabalho para confecção. O colar 2 requer 3g de ouro, 2g de prata e 2 horas de trabalho. Cada colar 1 vende por \\$400, e cada colar 2 vende por \\$500. Todos os colares produzidos são vendidos. Em estoque a empresa tem 100g de ouro e 120g de prata, e dispõe de 70 horas de mão de obra. Ouro extra pode ser comprado por um custo de \\$100/g. Encomendas já realizadas requerem que sejam produzidos pelo menos 20 colares 1 e 25 colares 2. Formule um modelo de programação linear para maximizar o lucro da empresa. .A empresa Schwabe utiliza ouro e prata para produzir dois tipos de colares. O colar 1 requer 2g deouro, 3g de prata e 1 hora de trabalho para confecção. O colar 2 requer 3g de ouro, 2g de prata e 2 horasde trabalho. Cada colar 1 vende por \\$400, e cada colar 2 vende por \\$500. Todos os colares produzidossão vendidos. Em estoque a empresa tem 100g de ouro e 120g de prata, e dispõe de 70 horas de mão deobra. Ouro extra pode ser comprado por um custo de \\$100/g. Encomendas já realizadas requerem quesejam produzidos pelo menos 20 colares 1 e 23 colares 2. Formule um modelo de programação linearpara maximizar o lucro da empresa..import gurobipy as gp from gurobipy import GRB, Model # Create a new model m = Model("Schwabe") # Create variables x1 = m.addVar(lb=0, vtype=GRB.CONTINUOUS, name="no. collars type 1") x2 = m.addVar(lb=0, vtype=GRB.CONTINUOUS, name="no. collars type 2") g = m.addVar(lb=0, vtype=GRB.CONTINUOUS, name="grams of purchased gold") m.setObjective(x1 * 400 + x2 * 500 - g * 100, GRB.MAXIMIZE) # Add constraints m.addConstr(x1 * 2 + x2 * 3 <= 100 + g, 'Gold constraint') m.addConstr(x1 * 3 + x2 * 2 <= 120, 'Silver constraint') m.addConstr(x1 * 1 + x2 * 2 <= 70, 'MH constraint') m.addConstr(x1 >= 20, 'Min production of collar type 1') m.addConstr(x2 >= 23, 'Min production of collar type 2') m.optimize() for v in m.getVars(): print(f'{v.varName}, {v.x}') print(f'Obj: {m.objVal}')no. collars type 1, 24.0 no. collars type 2, 23.0 grams of purchased gold, 17.0 Obj: 19400.0---title: "Evidently: Simulate Evidence Accumulation Models in Python"date: "2020-01-23"source: "jupyter"--- I've just put the finishing touches on version 0.0.1 of `Evidently` is a python package for working with evidence accumulation models.In short, it lets you do things like this:![](schurger.gif)Since I spent all that time writing a Read Me page for the [GitHub repository](https://github.com/EoinTravers/Evidently),I've reproduced it below. Evidently Evidently provides- Efficient functions for simulating data from a range of models.- Classes that make it easier to tweak model parameters and manage simulated data.- A consistent way to implement new models.- Visualisation, including interactive widgets for Jupyter.- Kernel density-based methods for estimating the likelihood of real data under a given model/set of parameters, allowing parameter estimation and model comparision.To see some of the features of Evidently in action,click the link below to launch a notebook packed full of interactive visualisations.[![Launch Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/EoinTravers/Evidently/master?filepath=dashboards%2FInteractive%20Models.ipynb) InstallationEvidently isn't on PyPI yet, but you can install it directly from GitHub:`pip install git+https://github.com/EoinTravers/Evidently` Basic Useimport pandas as pd import numpy as np import matplotlib.pyplot as plt import evidentlySet up a model and provide parametersmodel = evidently.models.Diffusion(pars=[1., .5, -.25, .8, .4], max_time=5., dt=.001) model model.describe_parameters()Simulate dataX, responses, rts = model.do_dataset(n=1000) X.head() print(responses[:5]) print(rts[:5])VisualiseThe `evidently.viz` submodule contains a collection of `matplotlib`-based functions for visualising model simulations. Here are a few examples.ax = evidently.viz.setup_ddm_plot(model) # Uses model info to draw bounds. evidently.viz.plot_trace_mean(model, X, ax=ax); # Plots simulations ax = evidently.viz.setup_ddm_plot(model) evidently.viz.plot_traces(model, X, responses, rts, ax=ax, terminate=True, show_mean=True); # Show raw data ax = evidently.viz.setup_ddm_plot(model) for resp in [1, -1]: mask = (responses == resp) # Split by response evidently.viz.plot_trace_mean(model, X[mask], ax=ax, label='Response: %i' % resp) plt.legend(); mX = evidently.utils.lock_to_movement(X, rts, duration=2) # Time-lock to threshold crossing ax = evidently.viz.setup_ddm_plot(model, time_range=(-2, 0)) evidently.viz.plot_traces(model, mX, responses, rts, ax=ax, show_mean=True); ax = evidently.viz.setup_ddm_plot(model, time_range=(-2, 0)) for resp in [1, -1]: mask = responses == resp resp_mX = evidently.utils.lock_to_movement(X[mask], rts[mask]) evidently.viz.plot_trace_mean(model, resp_mX, ax=ax, label='Response: %i' % resp) plt.legend();There high-level functions can create multi-axis figures.evidently.viz.visualise_model(model, model_type='ddm', measure='means');Interactive VisualisationUsing the `ipywidgets` package, we can wrap high level visualisation functions like `accum.viz.visualise_ddm` in a call to `ipywidgets` to make them interactive.To try the interactive plots, download this repository to your own computer,or run the code in the cloud by visiting [this Binder notebook](https://mybinder.org/v2/gh/EoinTravers/Evidently/master?filepath=dashboards%2FInteractive%20Models.ipynb).[![Launch Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/EoinTravers/Evidently/master?filepath=dashboards%2FInteractive%20Models.ipynb)from ipywidgets import interact, FloatSlider def fs(v, low, high, step, desc=''): return FloatSlider(value=v, min=low, max=high, step=step, description=desc, continuous_update=False) def ddm_simulation_plot(t0=1., v=.5, z=0., a=.5, c=.1): model = evidently.Diffusion(pars=[t0, v, z, a, c]) evidently.viz.visualise_model(model) title = 't0 = %.1f, Drift = %.1f, Bias = %.1f, Threshold = %.1f; Noise SD = %.1f' % (t0, v, z, a, c) plt.suptitle(title, y=1.01) interact(ddm_simulation_plot, t0 = fs(1., 0, 2., .1, 't0'), v = fs(.5, 0, 2., .1, 'Drift'), z = fs(0., -1., 1., .1, 'Bias'), a = fs(.5, 0., 2., .1, 'Threshold'), c = fs(.1, 0., 1., .1, 'Noise SD'));Table of Contents1  Goal2  Var3  Init4  Test samples4.1  n50_Pseudomonas4.2  n104.3  n55  Gene list Goal* test dataset construction & evaluation for Struo2 Varsamps_dir = '/ebio/abt3_projects/software/dev/struo2/tests/samples/' work_dir = '/ebio/abt3_projects/software/dev/struo2/tests/data/' meta_file = '/ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/' meta_file = file.path(meta_file, 'metadata_1per-GTDB-Spec_gte50comp-lt5cont_wtaxID_wPath.tsv')Initlibrary(dplyr) library(tidyr) library(ggplot2) library(LeyLabRMisc) library(uuid) df.dims()Test samples# reading in GTDB metadata file used for Struo meta = read.delim(meta_file, sep='\t') metan50_Pseudomonas# selecting subset meta_n50 = meta %>% filter(grepl('Pseudomonas', gtdb_taxonomy)) %>% sample_n(50) meta_n50 outF = file.path(samps_dir, 'GTDBr95_n50_Pseudomonas.tsv') write_table(meta_n50, outF)File written: /ebio/abt3_projects/software/dev/struo2/tests/samples//GTDBr95_n50_Pseudomonas.tsvn10# selecting subset meta_n10 = meta %>% sample_n(10) meta_n10 outF = file.path(samps_dir, 'GTDBr95_n10.tsv') write_table(meta_n10, outF)File written: /ebio/abt3_projects/software/dev/struo2/tests/samples//GTDBr95_n10.tsvn5# selecting subset meta_n5 = meta %>% anti_join(meta_n10, c('accession')) %>% sample_n(5) meta_n5 outF = file.path(samps_dir, 'GTDBr95_n5.tsv') write_table(meta_n5, outF)File written: /ebio/abt3_projects/software/dev/struo2/tests/samples//GTDBr95_n5.tsvGene list* A list of genes to add to an existing gene database* Columns required in gene metadata table:```'seq_uuid','seq_orig_name','domain','phylum','class','order','family','genus','species','taxid','genome_name','genome_length_bp'```F = file.path(work_dir, 'clusters_rep-seqs_tmp.txt') meta = read.delim(F, sep='\t', header=FALSE) meta meta_f = meta %>% rename('seq_orig_name' = V1) %>% mutate(seq_uuid = gsub('-', '', UUIDgenerate())) %>% mutate(domain = '', phylum = '', class = '', order = '', family = '', genus = gsub('(.+)\\.s__.+', '\\1', V3), species = gsub('.+\\.(s__.+)', '\\1', V3), taxid = gsub('.+__taxID(.+)', '\\1', V3), genome_name = '', genome_length_bp = '') %>% mutate(species = gsub('__taxID.+', '', species), taxid = ifelse(grepl('^[0-9]+$', taxid), taxid, '')) %>% dplyr::select(seq_uuid, seq_orig_name, domain, phylum, class, order, family, genus, species, taxid, genome_name, genome_length_bp) meta_f # writing outF = file.path(work_dir, 'clusters_rep-seqs.txt') write_table(meta_f, outF)File written: /ebio/abt3_projects/software/dev/struo2/tests/data//clusters_rep-seqs.txtCaffe2 GPU Distributed IntroductionThis example demonstrates how to run standard Caffe2 [resnet50_trainer.py](https://github.com/caffe2/caffe2/blob/master/caffe2/python/examples/resnet50_trainer.py) example using Batch AI. You can run it on a single or multiple compute nodes. Details- Standard Caffe2 sample script [resnet50_trainer.py](https://github.com/caffe2/caffe2/blob/master/caffe2/python/examples/resnet50_trainer.py) is used;- MNIST Dataset has been translated into a lmdb database, and can be obtained at http://download.caffe2.ai/databases/mnist-lmdb.zip;- Automatically created NFS folder will be used for rendezvous temp files to coordinate between each shard/node - Standard output of the job will be stored on Azure File Share. Instructions Install Dependencies and Create Configuration file.Follow [instructions](/recipes) to install all dependencies and create configuration file. Read Configuration and Create Batch AI clientfrom __future__ import print_function from datetime import datetime import os import sys import zipfile from azure.storage.file import FileService from azure.storage.blob import BlockBlobService import azure.mgmt.batchai.models as models # The BatchAI/utilities folder contains helper functions used by different notebooks sys.path.append('../../../') import utilities as utils cfg = utils.config.Configuration('../../configuration.json') client = utils.config.create_batchai_client(cfg)Create Resoruce Group and Batch AI workspace if not exists:utils.config.create_resource_group(cfg) _ = client.workspaces.create(cfg.resource_group, cfg.workspace, cfg.location).result()1. Prepare Training Dataset and Script in Azure Storage Create Azure Blob ContainerWe will create a new Blob Container with name `batchaisample` under your storage account. This will be used to store the *input training dataset***Note** You don't need to create new blob Container for every cluster. We are doing this in this sample to simplify resource management for you.azure_blob_container_name = 'batchaisample' blob_service = BlockBlobService(cfg.storage_account_name, cfg.storage_account_key) blob_service.create_container(azure_blob_container_name, fail_on_exist=False)Upload MNIST Dataset to Azure Blob ContainerFor demonstration purposes, we will download preprocessed MNIST dataset to the current directory and upload it to Azure Blob Container directory named `mnist_dataset`.There are multiple ways to create folders and upload files into Azure Blob Container - you can use [Azure Portal](https://ms.portal.azure.com), [Storage Explorer](http://storageexplorer.com/), [Azure CLI2](/azure-cli-extension) or Azure SDK for your preferable programming language.In this example we will use Azure SDK for python to copy files into Blob.mnist_dataset_directory = 'mnist_dataset' utils.dataset.download_and_upload_mnist_dataset_to_blob( blob_service, azure_blob_container_name, mnist_dataset_directory)Create Azure File ShareFor this example we will create a new File Share with name `batchaisample` under your storage account. This will be used to share the *training script file* and *output file*.**Note** You don't need to create new file share for every cluster. We are doing this in this sample to simplify resource management for you.azure_file_share_name = 'batchaisample' file_service = FileService(cfg.storage_account_name, cfg.storage_account_key) file_service.create_share(azure_file_share_name, fail_on_exist=False)Deploy Sample Script to Azure File ShareDownload original sample scriptscript_to_deploy = 'resnet50_trainer.py' utils.dataset.download_file('https://raw.githubusercontent.com/caffe2/caffe2/v0.6.0/caffe2/python/examples/resnet50_trainer.py', script_to_deploy)We will create a folder on Azure File Share containing a copy of original sample scriptscript_directory = 'Caffe2Samples' file_service.create_directory( azure_file_share_name, script_directory, fail_on_exist=False) file_service.create_file_from_path( azure_file_share_name, script_directory, script_to_deploy, script_to_deploy)2. Create Azure Batch AI Compute Cluster Configure Compute Cluster- For this example we will use a GPU cluster of `STANDARD_NC6` nodes. Number of nodes in the cluster is configured with `nodes_count` variable;- We will call the cluster `nc6`;So, the cluster will have the following parameters:nodes_count = 2 cluster_name = 'nc6' parameters = models.ClusterCreateParameters( location=cfg.location, vm_size='STANDARD_NC6', scale_settings=models.ScaleSettings( manual=models.ManualScaleSettings(target_node_count=nodes_count) ), user_account_settings=models.UserAccountSettings( admin_user_name=cfg.admin, admin_user_password=cfg. or None, admin_user_ssh_public_key=cfg.admin_ssh_key or None, ) )Create Compute Cluster_ = client.clusters.create(cfg.resource_group, cfg.workspace, cluster_name, parameters).result()Monitor Cluster CreationGet the just created cluster. The `utilities` module contains a helper function to print out all kind of nodes count in the cluster.cluster = client.clusters.get(cfg.resource_group, cfg.workspace, cluster_name) utils.cluster.print_cluster_status(cluster)3. Run Azure Batch AI Training Job Configure Job- The job will use `caffe2ai/caffe2` container.- Will run `resnet50_trainer.py` from SCRIPT input directory;- Will output standard output and error streams to file share;- Will mount file share at folder with name `afs`. Full path of this folder on a computer node will be `$AZ_BATCHAI_JOB_MOUNT_ROOT/afs`;- Will mount Azure Blob Container at folder with name `bfs`. Full path of this folder on a computer node will be `$AZ_BATCHAI_JOB_MOUNT_ROOT/bfs`;- The job needs to know where to find mnist_replica.py and input MNIST dataset. We will create two input directories for this. The job will be able to reference those directories using environment variables: - ```AZ_BATCHAI_INPUT_SCRIPT``` : refers to the directory containing the scripts at mounted Azure File Share - ```AZ_BATCHAI_INPUT_DATASET``` : refers to the directory containing the training data on mounted Azure Blob Container- Will use $AZ_BATCHAI_SHARED_JOB_TEMP shared directory created by Batch AI to coordinate execution between nodes;- For demostration purpose, we will only run 5 epochs with epoch size as 2000.azure_file_share = 'afs' azure_blob = 'bfs' parameters = models.JobCreateParameters( location=cfg.location, cluster=models.ResourceId(id=cluster.id), node_count=2, mount_volumes=models.MountVolumes( azure_file_shares=[ models.AzureFileShareReference( account_name=cfg.storage_account_name, credentials=models.AzureStorageCredentialsInfo( account_key=cfg.storage_account_key), azure_file_url='https://{0}.file.core.windows.net/{1}'.format( cfg.storage_account_name, azure_file_share_name), relative_mount_path=azure_file_share) ], azure_blob_file_systems=[ models.AzureBlobFileSystemReference( account_name=cfg.storage_account_name, credentials=models.AzureStorageCredentialsInfo( account_key=cfg.storage_account_key), container_name=azure_blob_container_name, relative_mount_path=azure_blob) ] ), input_directories = [ models.InputDirectory( id='SCRIPT', path='$AZ_BATCHAI_JOB_MOUNT_ROOT/{0}/{1}'.format(azure_file_share, script_directory)), models.InputDirectory( id='DATASET', path='$AZ_BATCHAI_JOB_MOUNT_ROOT/{0}/{1}'.format(azure_blob, mnist_dataset_directory)) ], std_out_err_path_prefix='$AZ_BATCHAI_JOB_MOUNT_ROOT/{0}'.format(azure_file_share), container_settings=models.ContainerSettings( image_source_registry=models.ImageSourceRegistry(image='caffe2ai/caffe2')), caffe2_settings = models.Caffe2Settings( python_script_file_path='$AZ_BATCHAI_INPUT_SCRIPT/'+script_to_deploy, command_line_args='--num_shards 2 --shard_id $AZ_BATCHAI_TASK_INDEX --run_id 0 --epoch_size 2000 --num_epochs 5 --train_data $AZ_BATCHAI_INPUT_DATASET/mnist_train_lmdb --file_store_path $AZ_BATCHAI_SHARED_JOB_TEMP'))Create a training Job and wait for Job completionexperiment_name = 'caffe2_experiment' experiment = client.experiments.create(cfg.resource_group, cfg.workspace, experiment_name).result() job_name = datetime.utcnow().strftime('caffe2_%m_%d_%Y_%H%M%S') job = client.jobs.create(cfg.resource_group, cfg.workspace, experiment_name, job_name, parameters).result() print('Created Job {0} in Experiment {1}'.format(job.name, experiment.name))Wait for Job to FinishThe job will start running when the cluster will have enough idle nodes. The following code waits for job to start running printing the cluster state. During job run, the code prints current content of stdout.txt.**Note** Execution may take several minutes to complete.utils.job.wait_for_job_completion(client, cfg.resource_group, cfg.workspace, experiment_name, job_name, cluster_name, 'stdouterr', 'stderr-1.txt')List stdout.txt and stderr.txt files for the Jobfiles = client.jobs.list_output_files(cfg.resource_group, cfg.workspace, experiment_name, job_name, models.JobsListOutputFilesOptions(outputdirectoryid='stdouterr')) for f in list(files): print(f.name, f.download_url or 'directory')4. Clean Up (Optional) Delete the Job_ = client.jobs.delete(cfg.resource_group, cfg.workspace, experiment_name, job_name)Delete the ClusterWhen you are finished with the sample and don't want to submit any more jobs you can delete the cluster using the following code._ = client.clusters.delete(cfg.resource_group, cfg.workspace, cluster_name)Delete File ShareWhen you are finished with the sample and don't want to submit any more jobs you can delete the file share completely with all files using the following code.service = FileService(cfg.storage_account_name, cfg.storage_account_key) service.delete_share(azure_file_share_name)from pixel_elements import * # Helpers def vert_join(elements): """Joins elements vertically in reversed order.""" return np.concatenate(list(reversed(elements)), axis = 0) def horz_join(elements): """Joins elements horizontally in the same order.""" return np.concatenate(elements, axis = 1) def paint(element, x, y): """Paints an element in the x, y position of the grid.""" y = int(board.shape[0] / 5) - y board[(y * 5) - 5: y * 5, x * 5: (x * 5) + 5, :] = element # Tools def make_board(h, w): """Makes an MxN drawing board.""" blank = make_canvas() board = horz_join([vert_join([blank] * h)] * w) return board def paint_vert(x, y, monomer, link = 'a', occu = None): """Paints a monomer with a vertical stem at x, y.""" paint(vert_stick(link, occu), x, y) paint(monomer, x, y + 1) def paint_fuc(x, y, link = 'a', occu = None): """Paints Fucose with a horizontal stem from x, y.""" paint(horz_stick(link, occu), x + 1, y + 1) paint(Fuc, x + 2, y + 1) def paint_left_diag(x, y, monomer, link = 'a', occu = None): """Paints a monomer with a left diagonal stem from x, y.""" paint(left_diag_stick(link, occu), x - 1, y) paint(monomer, x - 2, y + 1) def paint_right_diag(x, y, unit, link = 'a', occu = None): """Paints a monomer with a right diagonal stem from x, y.""" paint(right_diag_stick(link, occu), x + 1, y) paint(unit, x + 2, y + 1) # Visuals def show(board, axis = True, save = False): """Shows the drawing board.""" h, w, _ = board.shape plt.imshow(board) plt.grid() if axis: plt.xticks(np.arange(0, w, 5) + 2, range(int(w / 5))) plt.yticks(np.arange(0, h, 5) + 2, reversed(range(int(h / 5)))) else: plt.axis('off') plt.savefig('pixel_glycan.png', bbox_inches = 'tight') if save else plt.show() # Elements elements = horz_join([Glc, Man, Gal, GlcNAc, GalNAc, GlcA, Neu5Ac, Neu5Gc, Fuc, vert_stick(), horz_stick(), left_diag_stick(), right_diag_stick()]) show(elements) board = make_board(12, 9) # makes a drawing board of h = 12 and w = 9 c = 4 # sets x = 4 as the centre paint_vert(c, 0, GlcNAc) # draws GlcNAc with stem from y = 0 paint_vert(c, 2, Gal) paint_fuc(c, 2) # attaches Fuc next to unit with stem from y = 2 paint_vert(c, 4, Gal) show(board) # shows the drawing board board = make_board(12, 9) c = 4 paint_vert(c, 0, Man) paint_left_diag(c, 2, Man) # draws Man diagonally to the left from y = 2 paint_right_diag(c, 2, Man) paint_vert(c - 2, 4, Man) paint_left_diag(c + 2, 4, Man) paint_vert(c + 2, 4, Man) paint_vert(c - 2, 6, Man) paint_vert(c, 6, Man) paint_vert(c + 2, 6, Man) show(board, save = True) # drawings can be saved board = make_board(12, 9) c = 4 paint_vert(c, 0, Man, 'a', '3') # linkage information like (a1-3) can be added paint_left_diag(c, 2, Man, 'b', '4') paint_right_diag(c, 2, Man, 'a', '3') paint_vert(c - 2, 4, Man, 'b', '4') paint_left_diag(c + 2, 4, Man, 'a', '3') paint_vert(c + 2, 4, Man, 'b', '4') paint_vert(c - 2, 6, Man, 'a', '3') paint_vert(c, 6, Man, 'b', '4') paint_vert(c + 2, 6, Man, 'a', '3') show(board) # black is alpha, grey is beta and the red dot depicts the occupancy board.shape # each board is a numpy array of the shape # more examples board = make_board(12, 9) c = 4 paint_vert(c, 0, GlcNAc) paint_left_diag(c, 2, GlcNAc) paint_vert(c, 2, GlcNAc) paint_right_diag(c, 2, GlcNAc) show(board) board = make_board(12, 9) c = 4 paint_vert(c, 0, GlcNAc) paint_vert(c, 2, Man) paint_left_diag(c, 4, Man) paint_right_diag(c, 4, Man) paint_vert(c - 2, 6, GlcNAc) paint_fuc(c - 2, 6) paint_vert(c + 2, 6, GlcNAc) paint_fuc(c + 2, 6) paint_vert(c - 2, 8, Gal) paint_vert(c + 2, 8, Gal) show(board) board = make_board(12, 9) c = 4 paint_vert(c, 0, GlcNAc) paint_vert(c, 2, Man) paint_left_diag(c, 4, Man) paint_right_diag(c, 4, Man) paint_left_diag(c - 2, 6, GlcNAc) paint_vert(c - 2, 6, GlcNAc) paint_right_diag(c + 2, 6, GlcNAc) paint_vert(c + 2, 6, GlcNAc) paint_vert(c - 4, 8, GlcNAc) paint_vert(c - 2, 8, GlcNAc) paint_vert(c + 2, 8, GlcNAc) paint_vert(c + 4, 8, GlcNAc) paint_vert(c - 4, 10, Gal) paint_vert(c - 2, 10, Gal) paint_vert(c + 2, 10, Gal) paint_vert(c + 4, 10, Gal) show(board)Qiita ~ 傾向スコアを用いて観察データからUpliftをモデリングする~import numpy as np import pandas as pd import sklearn import statsmodels.api as sm import random import sys sys.path.append("/home/yuta_saito/notebook/qiita/uplift-modeling/uplift_tools/") from metrics import * from operator import itemgetter from pandas import DataFrame, Series from plotly.offline import iplot, plot, init_notebook_mode from plotly.graph_objs import Histogram, Box, Scatter, Figure, Layout, Bar from sklearn.cross_validation import train_test_split, cross_val_score, StratifiedKFold from sklearn.decomposition import PCA from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor from sklearn.feature_selection import SelectFromModel, SelectKBest from sklearn.kernel_ridge import KernelRidge from sklearn.linear_model import LogisticRegression, LinearRegression, ElasticNet, Ridge from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score, roc_curve, auc, confusion_matrix, precision_recall_curve from sklearn.model_selection import KFold, TimeSeriesSplit, GridSearchCV, RandomizedSearchCV from sklearn.preprocessing import StandardScaler, RobustScaler, Normalizer from sklearn.pipeline import Pipeline from sklearn.svm import SVR, LinearSVRRegression Uplift Modeling ~ Separate Model Approach ~# データの読み込み data_df = pd.read_csv('https://github.com/iwanami-datascience/vol3/raw/master/kato%26hoshino/q_data_x.csv') #data_df.head() # 説明変数と目的変数を指定 cols = ["child_dummy", "area_kanto", "area_tokai", "area_keihanshin", "T", "F1", "F2", "F3", "M1", "M2"] X = data_df[cols] y = DataFrame(data_df.gamesecond) # CMありをtreatment, CMなしをcontrolとする treat = (data_df.cm_dummy == 1).tolist() # train, test半分ずつに分ける y_train, y_test, train_treat, test_treat, X_train, X_test = train_test_split(y.values, treat, X, test_size=0.5, random_state=2) # indexをリセット X_train = X_train.reset_index(drop=True) X_test = X_test.reset_index(drop=True) # Separate Model Approach # 学習用に用いるデータを生成 num = len(y_train) treat_y = np.reshape(np.array([y_train[i] for i in range(num) if train_treat[i] is True]), -1) control_y = np.reshape(np.array([y_test[i] for i in range(num) if train_treat[i] is False]), -1) treat_X = DataFrame([X_train.loc[i] for i in range(num) if train_treat[i] is True]) control_X = DataFrame([X_train.loc[i] for i in range(num) if train_treat[i] is False]) # ランダムフォレスト回帰を用いる params_rf = {"max_depth": [10, 50, 100, 200, 500]} gs_rf_treat = GridSearchCV(RandomForestRegressor(n_estimators=500), param_grid=params_rf, cv=5) gs_rf_control = GridSearchCV(RandomForestRegressor(n_estimators=500), param_grid=params_rf, cv=5) # 介入群と非介入群それぞれでパラメータチューニング及びスマホアプリ利用時間を予測するモデルを構築 gs_rf_treat.fit(treat_X, treat_y) gs_rf_control.fit(control_X, control_y) # uplift-scoreの算出 # CMを見た場合のアプリ利用時間 pred_treat = gs_rf_treat.predict(X_test) # CMを見なかった場合のアプリ利用時間 pred_control = gs_rf_control.predict(X_test) # 今回は、(CMを見た場合のアプリ利用時間) - (CMを見なかった場合のアプリ利用時間) をuplift_scoreとする。 uplift_score = pred_treat - pred_control uplift_df = uplift_frame_reg(y_test.T.tolist()[0], test_treat, uplift_score) #uplift_curve(uplift_df) lr = LinearRegression() lr.fit(X_test, uplift_score) for feature, coef in zip(X_train.columns, lr.coef_): print(f"{feature} / {round(coef, 4)}")child_dummy / 929.3386 area_kanto / -1396.441 area_tokai / -3068.5063 area_keihanshin / 635.3306 T / 12180.1861 F1 / 1299.7327 F2 / 1779.0302 F3 / 895.7932 M1 / 2782.9887 M2 / 3498.1391Uplift Modeling ~Transformed Outcome Tree Model~# データの読み込み data_df = pd.read_csv('https://github.com/iwanami-datascience/vol3/raw/master/kato%26hoshino/q_data_x.csv') #data_df.head() # 傾向スコアを求める # 説明変数 cols_ = ["age", "sex", "TVwatch_day", "marry_dummy", "child_dummy", "inc", "pmoney", "area_kanto", "area_tokai", "area_keihanshin", "job_dummy1", "job_dummy2", "job_dummy3", "job_dummy4", "job_dummy5", "job_dummy6", "fam_str_dummy1", "fam_str_dummy2", "fam_str_dummy3", "fam_str_dummy4"] X_ = data_df[cols_].copy() # 切片の導入 X_.loc[:, "Intercept"] = 1 # CM視聴有無ダミー z1 = data_df.cm_dummy # StatsModelsのLogitにより傾向スコアを推定 glm = sm.Logit(z1, X_) result = glm.fit() ps = result.predict(X_) #ps # Propensity Scoreで補正された目的変数のカラムを作成 data_df.loc[:, "ps"] = ps data_df.loc[:, "adj_gamesecond"] = 0 data_df.loc[data_df.cm_dummy == 1, "adj_gamesecond"] = data_df.loc[data_df.cm_dummy == 1, "gamesecond"] / data_df.loc[data_df.cm_dummy == 1, "ps"] data_df.loc[data_df.cm_dummy == 0, "adj_gamesecond"] = -data_df.loc[data_df.cm_dummy == 0, "gamesecond"] / (1 - data_df.loc[data_df.cm_dummy == 0, "ps"]) adj_y = data_df[["gamesecond", "adj_gamesecond"]] # CMありをtreatment, CMなしをcontrolとする treat = (data_df.cm_dummy == 1).tolist() # train, test半分ずつに分ける adj_y_train, adj_y_test, train_treat, test_treat, X_train, X_test = train_test_split(adj_y, treat, X, test_size=0.5, random_state=2) # indexをリセット X_train = X_train.reset_index(drop=True) X_test = X_test.reset_index(drop=True) adj_y_train = adj_y_train.reset_index(drop=True) adj_y_test = adj_y_test.reset_index(drop=True) # ランダムフォレスト回帰を用いる params_rf = {"max_depth": [10, 50, 100, 200, 500]} gs_rf = GridSearchCV(RandomForestRegressor(n_estimators=500), param_grid=params_rf, cv=5) # パラメータチューニング及び補正スマホアプリ利用時間を予測するモデルを構築 gs_rf.fit(X_train, adj_y_train.adj_gamesecond) # 補正uplift_scoreを求める adj_uplift_score = gs_rf.predict(X_test) adj_uplift_df = uplift_frame_reg(adj_y_test.gamesecond.tolist(), test_treat, adj_uplift_score) #uplift_curve(adj_uplift_df, "Adj-Uplift Score") lr = LinearRegression() lr.fit(X_train, gs_rf.predict(X_train)) for feature, coef in zip(X_train.columns, lr.coef_): print(f"{feature} / {round(coef, 4)}")child_dummy / -7845.8122 area_kanto / -2572.0754 area_tokai / -4473.9973 area_keihanshin / 11648.8147 T / 4738.0588 F1 / 465.9719 F2 / 3207.2909 F3 / -630.0232 M1 / -5634.8067 M2 / 11468.2376History matchingThis notebook demonstrates a pipeline for adaptation (history matching) of the real-world reservoir model to match daily recorded well's production rates. Adaptation is based on gradient propagation through the neural network model used for reservoir simulation as it is described in the paper [End-to-end neural network approach to 3D reservoir simulation and adaptation](https://arxiv.org/abs/2102.10304). Content1. [Initialization](Initialization)2. [Load the reservoir model](Load-the-reservoir-model)3. [Load the metamodel](Load-the-metamodel)4. [Rates module](Rates-module)4. [Define adapdation space](Define-adapdation-space)5. [Target wells set up](Target-wells-set-up)5. [Load target rates](Load-target-rates)7. [Adaptation period set up](Adaptation-period-set-up)8. [Run adaptation](Run-adaptation)9. [Analysis of correction factors](Analysis-of-correction-factors)9. [Inference on the full period](Inference-on-the-full-period)9. [Plot predicted rates](Plot-predicted-rates) Initialization Required importsimport sys sys.path.append('..') import numpy as np import pandas as pd import torch import torch.optim as optim from deepfield import Field from deepfield.metamodelling import init_metamodel from deepfield.datasets import FieldDataset, FieldSample from deepfield.datasets.transforms import ToTensor, Normalize, AutoPadding, Denormalize from deepfield.metamodelling.rates import RatesModule from nb_utils import (loss_plot, hist_plots, slice_view, cumulative_plots, corr_plots, gas_oil_plot) device = torch.device('cuda')Specify the reservoir model and initial preprocessing (convertion to `torch` tensors, normalization and appropriate padding):SAMPLE_ATTRS = { 'masks': ['actnum', 'time', 'well_mask', 'named_well_mask', 'cf_mask', 'perf_mask'], 'states': ['pressure', 'soil', 'swat', 'sgas', 'rs'], 'rock': ['poro', 'permx', 'permy', 'permz'], 'tables': ['pvto', 'pvtw', 'pvdg', 'swof', 'sgof'], 'control': ['bhpt'] } dataset = FieldDataset(src='fields/', sample_attrs=SAMPLE_ATTRS) dataset.load_statistics('model_stats.pkl') dataset.set_transform([ToTensor, Normalize, AutoPadding])Load the reservoir model Here we obtain a sample with preprocessed reservoir model data:sample = next(iter(dataset))Spatial size of the model and number of active cellsprint('Dimensions:', list(sample.masks.actnum.shape)) print('Active cells:', sample.masks.actnum.sum().item())Dimensions: [148, 124, 212] Active cells: 1335384Hint: to save time it is recommended to dump sample into the .hdf5 file:```pythonsample.dump('sample_dump.hdf5', state=True)```Then, one can restore the sample as follows:```pythonsample = FieldSample('sample_dump.hdf5').load()sample.transformed(ToTensor, inplace=True)``` To save space on GPU, we avoid moving the full sample to the device. Instead, only time-independent parameters go to GPU first:for k in sample.tables.keys(): sample.tables[k] = sample.tables[k].to(device) sample.masks.actnum = sample.masks.actnum.long().to(device) sample.masks.well_mask = sample.masks.well_mask.to(device) control_t = sample.masks.control_t.unsqueeze(0).to(device) t = sample.masks.time.unsqueeze(0).to(device) s_init = sample.states[:1].clone().to(device) params = sample.rock.clone().unsqueeze(0).to(device) actnum = sample.masks.actnumLarger tensors we leave on CPU and will move to GPU only small slices of these tensorscf_mask = sample.masks.cf_mask.clone() perf_mask = sample.masks.perf_mask.clone() base_control = sample.control.clone()We will also need a sample that contains data at well blocks onlysample_at_wells = sample.at_wells(inplace=False)Load the metamodel Init and load the pre-trained metamodelautoenc_path = 'ae_module.pt' lsd_path = 'lsd_module.pt' lsd, ae_kwargs, kwargs = init_metamodel(autoenc_path=autoenc_path, lsd_path=lsd_path, z_ch=32, max_integration_timestep=1000, use_checkpointing=True, use_inner_checkpointing=True, max_in_batch=1, max_seq_len=10, atol=1e-3, device=device)Rates moduleInit the differentiable (`torch` based) model for production rates calculationrates_module = RatesModule(units='METRIC').to(device)Define adapdation space Tensor `rock_coefs` contains additive correction factors for upscaled `ROCK` propertiesshape = sample.rock.shape rock_coefs = 0.01*torch.randn((1, 4) + tuple(i//4 for i in shape[1:])).to(device) rock_coefs.requires_grad = True`up` module upscales rock correction factors to the full shapeup = torch.nn.Upsample(size=shape[-3:])Tensor `cf_coefs` contains corrections for connection factors of the wellscf_coefs = 0.01*torch.randn((sample.masks.well_mask > 0).sum()).to(device) cf_coefs.requires_grad = TrueDefine an optimizer on the space of `rock_coefs` and `cf_coefs` variables with appropriate `learning_rate` and `weight decay` parametersoptimizer = optim.Adam([rock_coefs, cf_coefs], lr=0.3, weight_decay=0.0005)Target wells set upHere we specify a set of target wells for adaptationtarget_wells = np.array(['well_1', 'well_2', 'well_3', 'well_4', 'well_5', 'well_6', 'well_7', 'well_8', 'well_9', 'well_10', 'well_11', 'well_12'])Load target ratesSeparately, we load daily production rates that will be used as adaptation targets and convert them to torch tensors:n_timesteps = 365 + 181 #adaptation + forecast period in days model = Field('fields/field_model.hdf5').load() dates = model.wells.result_dates[:n_timesteps] target_rates = {} for name in target_wells: rates = model.wells[name].total_rates.set_index('DATE') df = pd.DataFrame(index=dates).join(rates).fillna(0) vals = df[['WWPR', 'WGPR', 'WOPR']].values.astype(float) target_rates[name] = torch.Tensor(vals).to(device)INFO:Field:===== Field summary ===== INFO:Field:GRID attributes: MAPAXES, ACTNUM, COORD, DIMENS, ZCORN INFO:Field:ROCK attributes: PERMX, PERMY, PERMZ, PORO INFO:Field:STATES attributes: PRESSURE, RS, SGAS, SOIL, SWAT INFO:Field:TABLES attributes: DENSITY, PVDG, PVTO, PVTW, ROCK, SGOF, SWOF INFO:Field:WELLS attributes: BLOCKS, RESULTS, BLOCKS_INFO, HISTORY, EVENTS, PERF, WELLTRACK INFO:Field:========================= INFO:Field:Grid pillars (`COORD`) are mapped to new axis with respect to `MAPAXES`.Adaptation period set upHere we specify time segments on which we will run adaptation. The segments cover 1 year in total:max_len = 10 n_timesteps_train = 365 train_segments = np.arange(0, n_timesteps_train + max_len, max_len) train_segments = np.clip(train_segments, 0, n_timesteps_train-1) print(train_segments)[ 0 10 20 30 40 50 60 70 80 90 100 110 120 130 140 150 160 170 180 190 200 210 220 230 240 250 260 270 280 290 300 310 320 330 340 350 360 364]Run adaptationAdaptation is an iterative procedure. First we iterate over time segments. On each segment we calculate production rates, compute loss function and accumulate gradients with respect to the adaptation variables. At the end of the adaptation period we update adaptation variables according to the gradients accumulated and repeat the iterations.loss_hist = [] sample.sample_attrs.rates = ['BWPR', 'BGPR', 'BOPR'] #MAKE SURE THE SAME ORDER IN TARGETS! for _ in range(150): loss_sum = 0 z_init = lsd.state_enc(s_init, **ae_kwargs) for l, r in zip(train_segments[:-1], train_segments[1:]): params_actn = (params + up(rock_coefs)) * actnum #apply rock correction r_params = lsd.params_enc(params_actn, **ae_kwargs) s_control, s_control_t = lsd.get_control_subset(base_control.unsqueeze(0), control_t, (t[0, l], t[0, r])) s_control = s_control.to(device) r_control = lsd.control_enc(s_control, **ae_kwargs) z_pred_piece = lsd._compute_dynamics((z_init, r_params, r_control), s_control_t, t[:, l:r+1], **kwargs) s_pred_piece = lsd._decode(z_pred_piece, **ae_kwargs) sample.states = s_pred_piece[0] sample.control = s_control[0] sample.rock = params_actn[0] sample.masks.time = t[0, l:r+1] sample.masks.control_t = s_control_t[0] sample.masks.cf_mask = cf_mask[l:r].to(device) sample.masks.perf_mask = perf_mask[l:r].to(device) sample.state.normalized = True sample = sample.transformed(Denormalize, inplace=False) sample.states = torch.clamp(sample.states, min=0) sample.rock = torch.clamp(sample.rock, min=0) rates = rates_module(sample) rates = torch.clamp(rates, min=0) rates = rates * torch.exp(cf_coefs) #apply CF correction loss = 0 for name in target_wells: targ = target_rates[name][l+1:r+1] mask = sample_at_wells.masks.named_well_mask[name] pred = rates[..., mask.bool()].sum(dim=-1) * (targ != 0) log_pred = torch.log(1 + pred) log_targ = torch.log(1 + targ) loss += ((log_pred - log_targ)**2).mean() loss = loss / len(target_wells) z_init = z_pred_piece[:, -1].detach() loss.backward() loss_sum += loss.item() optimizer.step() optimizer.zero_grad() loss_hist.append(loss_sum)Plot the adaptation loss decay against iterationsloss_plot(loss_hist, logscale=True)Analysis of correction factors Some data preparation. Here we translate tensors to `numpy` arrays, get rock corrections at active cells and connectivity corrections at the target wells.up_rock_coefs = up(rock_coefs) final_params = ((params + up_rock_coefs) * actnum).detach().cpu().numpy()[0] initial_params = params.detach().cpu().numpy()[0] rock_diff = up_rock_coefs[..., actnum==1].detach().cpu().numpy().ravel() cf_coefs_at_wells = [] for name in target_wells: mask = sample_at_wells.masks.named_well_mask[name].bool() cf_coefs_at_wells.append(cf_coefs[mask].detach().cpu().numpy()) cf_coefs_at_wells = np.exp(np.hstack(cf_coefs_at_wells)) target_wells_mask = np.zeros(model.well_mask.shape).astype(bool) for name in target_wells: target_wells_mask[model.well_mask == name] = TruePlot distributions of the rock and connectivity correction factorshist_plots(rock_diff, cf_coefs_at_wells)Slice view of the initial and final porosity and the differenceslice_view(initial_params[0], final_params[0], target_wells_mask, cv=2, z_ind=50, name='porosity')Slice view for the x-permeabilityslice_view(initial_params[1], final_params[1], target_wells_mask, cv=2, z_ind=50, name='x-permeability', save='./JPSE_plots/Figure_9.pdf')Inference on the full period Specify time segments on which we will run the simulation. The segments cover 1.5 year in total:test_segments = np.arange(0, n_timesteps + max_len, max_len) test_segments = np.clip(test_segments, 0, n_timesteps-1) print(test_segments)[ 0 10 20 30 40 50 60 70 80 90 100 110 120 130 140 150 160 170 180 190 200 210 220 230 240 250 260 270 280 290 300 310 320 330 340 350 360 370 380 390 400 410 420 430 440 450 460 470 480 490 500 510 520 530 540 545]Run inferencerates_pred = [] sample.sample_attrs.rates = ['BWPR', 'BGPR', 'BOPR'] with torch.no_grad(): z_init = lsd.state_enc(s_init, **ae_kwargs) params_actn = (params + up(rock_coefs)) * actnum #apply rock correction r_params = lsd.params_enc(params_actn, **ae_kwargs) for l, r in zip(test_segments[:-1], test_segments[1:]): s_control, s_control_t = lsd.get_control_subset(base_control.unsqueeze(0), control_t, (t[0, l], t[0, r])) s_control = s_control.to(device) r_control = lsd.control_enc(s_control, **ae_kwargs) z_pred_piece = lsd._compute_dynamics((z_init, r_params, r_control), s_control_t, t[:, l:r+1], **kwargs) s_pred_piece = lsd._decode(z_pred_piece, **ae_kwargs) sample.states = s_pred_piece[0] sample.control = s_control[0] sample.rock = params_actn[0] sample.masks.time = t[0, l:r+1] sample.masks.control_t = s_control_t[0] sample.masks.cf_mask = cf_mask[l:r].to(device) sample.masks.perf_mask = perf_mask[l:r].to(device) sample.state.normalized = True sample = sample.transformed(Denormalize, inplace=False) sample.states = torch.clamp(sample.states, min=0) sample.rock = torch.clamp(sample.rock, min=0) rates = rates_module(sample, inplace=False) rates = torch.clamp(rates, min=0) rates = rates * torch.exp(cf_coefs) #apply CF correction rates_pred.append(rates.detach().cpu().numpy()) z_init = z_pred_piece[:, -1].detach() rates_pred = np.vstack(rates_pred)Plot predicted rates Collect target and predicted rates over target wells in `numpy` arrays:np_targets = [] np_pred = [] for name in target_wells: targ = target_rates[name].detach().cpu().numpy() np_targets.append(targ[1:n_timesteps]) mask = sample_at_wells.masks.named_well_mask[name].detach().cpu().numpy() pred = rates_pred[..., mask!=0].sum(axis=-1) #sum over all well blocks np_pred.append(pred) np_targets = np.array(np_targets) np_pred = np.array(np_pred)Plot cumulative production rates computed for all target wellsphases = [{'G': 'Gas', 'O': 'Oil', 'W': 'Water'}[k[1]] for k in sample.sample_attrs.rates] cumulative_plots(np_targets.sum(axis=0), np_pred.sum(axis=0), phases, vline=n_timesteps_train)Correlation between target and predicted cumulative production rates for individual wellscorr_plots(np_targets, np_pred_full, phases, mark_well=sample_well_id)Cumulative plot for the sample wellcumulative_plots(np_targets[sample_well_id], np_pred[sample_well_id], phases, vline=n_timesteps_train)Gas/oil ratiogas_oil_plot(np_targets[..., 1].sum(axis=0), np_pred[..., 1].sum(axis=0), np_targets[..., 2].sum(axis=0), np_pred[..., 2].sum(axis=0), vline=n_timesteps_train)Starbucks Capstone Challenge IntroductionThis data set contains simulated data that mimics customer behavior on the Starbucks rewards mobile app. Once every few days, Starbucks sends out an offer to users of the mobile app. An offer can be merely an advertisement for a drink or an actual offer such as a discount or BOGO (buy one get one free). Some users might not receive any offer during certain weeks. Not all users receive the same offer, and that is the challenge to solve with this data set.Your task is to combine transaction, demographic and offer data to determine which demographic groups respond best to which offer type. This data set is a simplified version of the real Starbucks app because the underlying simulator only has one product whereas Starbucks actually sells dozens of products.Every offer has a validity period before the offer expires. As an example, a BOGO offer might be valid for only 5 days. You'll see in the data set that informational offers have a validity period even though these ads are merely providing information about a product; for example, if an informational offer has 7 days of validity, you can assume the customer is feeling the influence of the offer for 7 days after receiving the advertisement.You'll be given transactional data showing user purchases made on the app including the timestamp of purchase and the amount of money spent on a purchase. This transactional data also has a record for each offer that a user receives as well as a record for when a user actually views the offer. There are also records for when a user completes an offer. Keep in mind as well that someone using the app might make a purchase through the app without having received an offer or seen an offer. ExampleTo give an example, a user could receive a discount offer buy 10 dollars get 2 off on Monday. The offer is valid for 10 days from receipt. If the customer accumulates at least 10 dollars in purchases during the validity period, the customer completes the offer.However, there are a few things to watch out for in this data set. Customers do not opt into the offers that they receive; in other words, a user can receive an offer, never actually view the offer, and still complete the offer. For example, a user might receive the "buy 10 dollars get 2 dollars off offer", but the user never opens the offer during the 10 day validity period. The customer spends 15 dollars during those ten days. There will be an offer completion record in the data set; however, the customer was not influenced by the offer because the customer never viewed the offer. CleaningThis makes data cleaning especially important and tricky.You'll also want to take into account that some demographic groups will make purchases even if they don't receive an offer. From a business perspective, if a customer is going to make a 10 dollar purchase without an offer anyway, you wouldn't want to send a buy 10 dollars get 2 dollars off offer. You'll want to try to assess what a certain demographic group will buy when not receiving any offers. Final AdviceBecause this is a capstone project, you are free to analyze the data any way you see fit. For example, you could build a machine learning model that predicts how much someone will spend based on demographics and offer type. Or you could build a model that predicts whether or not someone will respond to an offer. Or, you don't need to build a machine learning model at all. You could develop a set of heuristics that determine what offer you should send to each customer (i.e., 75 percent of women customers who were 35 years old responded to offer A vs 40 percent from the same demographic to offer B, so send offer A). Data SetsThe data is contained in three files:* portfolio.json - containing offer ids and meta data about each offer (duration, type, etc.)* profile.json - demographic data for each customer* transcript.json - records for transactions, offers received, offers viewed, and offers completedHere is the schema and explanation of each variable in the files:**portfolio.json*** id (string) - offer id* offer_type (string) - type of offer ie BOGO, discount, informational* difficulty (int) - minimum required spend to complete an offer* reward (int) - reward given for completing an offer* duration (int) - time for offer to be open, in days* channels (list of strings)**profile.json*** age (int) - age of the customer * became_member_on (int) - date when customer created an app account* gender (str) - gender of the customer (note some entries contain 'O' for other rather than M or F)* id (str) - customer id* income (float) - customer's income**transcript.json*** event (str) - record description (ie transaction, offer received, offer viewed, etc.)* person (str) - customer id* time (int) - time in hours since start of test. The data begins at time t=0* value - (dict of strings) - either an offer id or transaction amount depending on the record**Note:** If you are using the workspace, you will need to go to the terminal and run the command `conda update pandas` before reading in the files. This is because the version of pandas in the workspace cannot read in the transcript.json file correctly, but the newest version of pandas can. You can access the termnal from the orange icon in the top left of this notebook. You can see how to access the terminal and how the install works using the two images below. First you need to access the terminal:Then you will want to run the above command:Finally, when you enter back into the notebook (use the jupyter icon again), you should be able to run the below cell without any errors. Project OverviewThis project is part of the Udacity Data Scientist program. This project used to create the data simulates how people make purchasing decisions and how those decisions are influenced by promotional offers. The given simulated data for the sake of testing algorithms, not simulated data for the sake of mimicking real people. Project StatementThe goal of this project is to build a model to see whether the people who receive the offer will complete the offer.The approach to solving these two questions are as follows:- Data Exploration- Data Preprocessing(cleaning)- Questions and Visualization- Modeling- Conclusion- Improvements Metrics - Supervised machine learning model is used (random forest classifier)- Unsupervised machine learning model is used (artificial nueral network)- using models to predict whether user will complete offer. Data Explorationimport pandas as pd import numpy as np import math import json %matplotlib inline import seaborn as sns import datetime import matplotlib.pyplot as plt from sklearn.preprocessing import MinMaxScaler from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.svm import SVC from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestRegressor # read in the json files portfolio = pd.read_json('data/portfolio.json', orient='records', lines=True) profile = pd.read_json('data/profile.json', orient='records', lines=True) transcript = pd.read_json('data/transcript.json', orient='records', lines=True)C:\Users\User\Anaconda3\lib\site-packages\numpy\_distributor_init.py:32: UserWarning: loaded more than 1 DLL from .libs: C:\Users\User\Anaconda3\lib\site-packages\numpy\.libs\libopenblas.NOIJJG62EMASZI6NYURL6JBKM4EVBGM7.gfortran-win_amd64.dll C:\Users\User\Anaconda3\lib\site-packages\numpy\.libs\libopenblas.PYQHXLVVQ7VESDPUVUADXEVJOBGHJPAY.gfortran-win_amd64.dll stacklevel=1)Portfolio datasetportfolio.head() portfolio.info() portfolio.shape portfolio.isna().sum() portfolio['offer_type'].value_counts() portfolio['id'].nunique() portfolio.columns.duplicated().sum()From the portfolio dataset, following things are observed:1.This dataset has no missing values nor duplicates.2.There are 10 unique offers.3.There are 4 Discount offers, 4 Bogo Offers and 2 informational offers. Profile datasetprofile.head() profile.shape profile.isna().sum() profile[profile.isnull().any(axis=1)] profile.info() profile['id'].nunique() profile['gender'].unique() profile.columns.duplicated().sum() plt.hist(profile['age'], bins=10); profile.age.value_counts() profile.gender.value_counts() sns.countplot(profile['gender']) profile['age'].describe() profile['income'].describe() plt.hist(profile['age'], bins=10);Following things are observed from the profile dataset:1.This dataset has no duplicates.2.This dataset has 2175 missing values on 'gender', 'income'.3.With above exploration and visualizaton with users age=118, I think it is the mobile app default setting for users who doesn't give his/her basic info for the gender and income variables.4.Male users are more than Female users than Others. Transcript datasettranscript.head() transcript.shape transcript.info() transcript.isna().sum() transcript.columns.duplicated().sum() transcript.event.unique() transcript.event.value_counts()From the transcript dataset, following things are observed:1.There are 306534 rows in total.2.There is no missing value nor duplicates.3.There are 4 types of events in this dataset: 'transaction', 'offer received', 'offer viewed' and 'offer completed'. Data Preprocessing Portfolio dataset# create a copy from the dataset portfolio_clean = portfolio.copy() portfolio_clean.head() # rename 'id' column to 'offer_id' portfolio_clean.rename(columns={'id':'offer_id'},inplace=True) portfolio_clean['channels']Since channels is a categorical column, I will create dummy variables for it.portfolio_clean['channel_email'] = portfolio_clean['channels'].apply(lambda x: 1 if 'email' in x else 0) portfolio_clean['channel_mobile'] = portfolio_clean['channels'].apply(lambda x: 1 if 'mobile' in x else 0) portfolio_clean['channel_social'] = portfolio_clean['channels'].apply(lambda x: 1 if 'social' in x else 0) portfolio_clean['channel_web'] = portfolio_clean['channels'].apply(lambda x: 1 if 'web' in x else 0) portfolio_clean[['channels','channel_email','channel_mobile','channel_web','channel_social']].head() # drop the 'channels'column portfolio_clean.drop('channels', axis=1, inplace=True) portfolio_clean.head()Since offer_type is a catergorical column, I will create dummy variables for it.portfolio_clean['offer_type_bogo'] = portfolio_clean['offer_type'].apply(lambda x: 1 if 'bogo' in x else 0) portfolio_clean['offer_type_informational'] = portfolio_clean['offer_type'].apply(lambda x: 1 if 'informational' in x else 0) portfolio_clean['offer_type_discount'] = portfolio_clean['offer_type'].apply(lambda x: 1 if 'social' in x else 0) portfolio_clean.head()Profile datasetprofile_clean = profile.copy() profile_clean.head() # renaming 'id' column name to ''user_id'. profile_clean.rename(columns={'id':'user_id'},inplace=True)Extract year & month info from became_member_on column.profile_clean['became_member_on'] = pd.to_datetime(profile_clean['became_member_on'], format = '%Y%m%d') profile_clean['membership_year'] = profile_clean['became_member_on'].dt.year profile_clean['membership_month'] = profile_clean['became_member_on'].dt.month profile_clean['membership_date'] = profile_clean['became_member_on'].dt.day profile_clean.head() # replacing 'age = 118' by NaN value profile_clean['age'] = profile_clean['age'].apply(lambda x: np.nan if x == 118 else x) # dropping rows with NaNs in 'age', 'gender' and 'income' columns profile_clean.dropna(inplace=True) profile_clean.isna().sum()Since gender is a categorical column, I will create the dummy variables for it.profile_clean['gender_M'] = profile_clean['gender'].apply(lambda x: 1 if 'M' in x else 0) profile_clean['gender_F'] = profile_clean['gender'].apply(lambda x: 1 if 'F' in x else 0) profile_clean['gender_O'] = profile_clean['gender'].apply(lambda x: 1 if 'O' in x else 0) profile_clean.drop(['became_member_on'], axis=1, inplace=True) profile_clean.head()Transcript datasettranscript_clean = transcript.copy() # rename 'person' to 'user_id' transcript_clean.rename(columns={'person': 'user_id'}, inplace=True) transcript_clean.head() # Extract each key that exist in 'value' column to a seperate column. # getting the different keys that exists in the 'value' column keys = [] for idx, row in transcript_clean.iterrows(): for k in row['value']: if k in keys: continue else: keys.append(k) keys #create columns and specify the datatype of each of them transcript_clean['offer_id'] = '' # datatype : string transcript_clean['amount'] = 0 # datatype : integer transcript_clean['reward'] = 0 # datatype : integer # Iterating over transcript_clean dataset and checking 'value' column # then updating it and using the values to fill in the columns created above for idx, row in transcript_clean.iterrows(): for k in row['value']: if k == 'offer_id' or k == 'offer id': # b/c 'offer_id' and 'offer id' are representing the same thing transcript_clean.at[idx, 'offer_id'] = row['value'][k] if k == 'amount': transcript_clean.at[idx, 'amount'] = row['value'][k] if k == 'reward': transcript_clean.at[idx, 'reward'] = row['value'][k] # filling all the NaNs in the 'offer_id' column with 'N/A' values transcript_clean['offer_id'] = transcript_clean['offer_id'].apply(lambda x: 'N/A' if x == '' else x) # drop the 'value' column transcript_clean.drop(columns=['value','amount','reward'], axis=1, inplace=True) transcript_clean.head() # check the unique values in 'event' column transcript['event'].unique() # create 'offer received', 'offer viewed', 'transaction', 'offer completed' columns in the dataset transcript_clean['offer_received'] = transcript_clean['event'].apply(lambda x: 1 if 'offer received' in x else 0) transcript_clean['offer_viewed'] = transcript_clean['event'].apply(lambda x: 1 if 'offer viewed' in x else 0) transcript_clean['transaction'] = transcript_clean['event'].apply(lambda x: 1 if 'transaction' in x else 0) transcript_clean['offer_completed'] = transcript_clean['event'].apply(lambda x: 1 if 'offer completed' in x else 0) transcript_clean.head()Merge three clean dataset( Portfolio, Profile, Transaction) into one master clean dataset.# merge 'transcript_clean' dataset with 'portfolio_clean' on 'offer_id' df_master =transcript_clean.merge(portfolio_clean,how='left',on='offer_id') # merge'df_master' dataset with 'profile_clean' on 'user_id' df_master = df_master.merge(profile_clean,how ='left', on = 'user_id') df_master.head() df_master.info() # removing rows that contain NaNs df_master = df_master.dropna(how='any',axis=0) df_master.info() Int64Index: 148805 entries, 0 to 306527 Data columns (total 28 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 user_id 148805 non-null object 1 event 148805 non-null object 2 time 148805 non-null int64 3 offer_id 148805 non-null object 4 offer_received 148805 non-null int64 5 offer_viewed 148805 non-null int64 6 transaction 148805 non-null int64 7 offer_completed 148805 non-null int64 8 reward 148805 non-null float64 9 difficulty 148805 non-null float64 10 duration 148805 non-null float64 11 offer_type 148805 non-null object 12 channel_email 148805 non-null float64 13 channel_mobile 148805 non-null f[...]Questions and Visualization Questions1.What is the most common age distribution for Starbucks customers?2.What is the most common income for Starbucks customers?3.Who gets more income, male or female?4.Which type of offers each gender likes?5.From all the offers that customers reviewed, how many offers they completed?6.How many new customers Starbucks gets each year?7.What is the income among different ages, at what ages would have the most disposable income? 1.What is the most common age distribution for Starbucks customers?plt.figure(figsize=(16, 4)) sns.boxplot(df_master['age']) plt.title('Age Distribution') plt.xlabel('Age') plt.xticks(rotation = 90) plt.show();From the above age distribution plot, we can see that most of ages in our profile dataframe falls in-between 40 and 80. We already notice one outlier which is 118. 2.What is the most common income for Starbucks customers?plt.figure(figsize=(16, 4)) sns.boxplot(df_master['income']) plt.title('Income Distribution') plt.xlabel('income') plt.xticks(rotation = 90) plt.show();From the above income distribution plot, we can see that the most common of income for the customers that falls between 50K and 80K. 3.Who gets more income, male or female?plt.figure(figsize=(14, 6)) sns.violinplot(x=df_master[df_master['gender'] != 'NA']['gender'], y=df_master['income']) plt.title('Income vs Gender') plt.ylabel('Income') plt.xlabel('Gender') plt.xticks(rotation = 0) plt.show();The graph above shows that income median (the white dot) for females (around 70k) is higher than males (around 60k). We can also see that the income range for females is 40k to 100k. The income range for the males is 40k to 70k which close to median. 4.Which type of offers each gender likes?plt.figure(figsize=(14, 6)) sns.countplot(x=df_master[df_master["gender"] != 'NA']['gender'], hue="offer_type", data=df_master) plt.title('Most Popular Offers to Each Gender') plt.ylabel('Total') plt.xlabel('Gender') plt.xticks(rotation = 0) plt.legend(title='Offer Type') plt.show();From the above plot graph, we can see that both genders like bogo and discount offers and they have the same reaction to informational offers, they both not interested in it. 5.From all the offers that customers reviewed, how many offers they completed?total_trans_g_o = df_master[df_master["gender"]!= 'NA'].groupby(['gender','offer_type']).count() total_trans_g_e = df_master[df_master["gender"]!= 'NA'].groupby(['gender','event']).count() total_trans_go_o_t = total_trans_g_o.loc[('F')]['event'].sum() total_trans_go_o_tt = total_trans_g_o.loc[('M')]['event'].sum() total_trans_go_o_t_offers_f = total_trans_g_o.loc[('F')].loc[['bogo', 'discount', 'informational']]['event'].sum() total_trans_go_o_t_offers_m = total_trans_g_o.loc[('M')].loc[['bogo', 'discount', 'informational']]['event'].sum() print('For Females:') print(f"Number of offer completed: {total_trans_g_e.loc[('F', 'offer completed')].values[0]}, {round((total_trans_g_e.loc[('F', 'offer completed')].values[0]/total_trans_g_e.loc[('F', 'offer received')].values[0])*100,2)}% of total offers received.") print(f"Number of offer received: {total_trans_g_e.loc[('F', 'offer received')].values[0]}, {round((total_trans_g_e.loc[('F', 'offer received')].values[0]/total_trans_go_o_t_offers_f)*100,2)}% of total.") print(f"Number of offer viewed: {total_trans_g_e.loc[('F', 'offer viewed')].values[0]}, {round((total_trans_g_e.loc[('F', 'offer viewed')].values[0]/total_trans_go_o_t_offers_f)*100,2)}% of total.") print(f"Number of transaction: {total_trans_g_e.loc[('F', 'transaction')].values[0]}, {round((total_trans_g_e.loc[('F', 'transaction')].values[0]/total_trans_go_o_t)*100,2)}% of total.") print('\nFor Males:') print(f"Number of offer completed: {total_trans_g_e.loc[('M', 'offer completed')].values[0]}, {round((total_trans_g_e.loc[('M', 'offer completed')].values[0]/total_trans_g_e.loc[('M', 'offer received')].values[0])*100,2)}% of total offers received.") print(f"Number of offer received: {total_trans_g_e.loc[('M', 'offer received')].values[0]}, {round((total_trans_g_e.loc[('M', 'offer received')].values[0]/total_trans_go_o_t_offers_m)*100,2)}% of total.") print(f"Number of offer viewed: {total_trans_g_e.loc[('M', 'offer viewed')].values[0]}, {round((total_trans_g_e.loc[('M', 'offer viewed')].values[0]/total_trans_go_o_t_offers_m)*100,2)}% of total.") print(f"Number of transaction: {total_trans_g_e.loc[('M', 'transaction')].values[0]}, {round((total_trans_g_e.loc[('M', 'transaction')].values[0]/total_trans_go_o_t)*100,2)}% of total.")For Females: Number of offer completed: 15477, 56.37% of total offers received. Number of offer received: 27456, 43.09% of total. Number of offer viewed: 20786, 32.62% of total. Number of transaction: 15477, 24.29% of total. For Males: Number of offer completed: 16466, 43.18% of total offers received. Number of offer received: 38129, 46.0% of total. Number of offer viewed: 28301, 34.14% of total. Number of transaction: 16466, 25.84% of total.Females completed 56% of the offers they received, it is 13% more than males, but males made more transactions than females, 25% to 24%. 6.How many new customers Starbucks gets each year?df_master['membership_year'] = df_master['membership_year'].astype(int) plt.figure(figsize=(16, 4)) sns.countplot(df_master['membership_year']) plt.title('Number of Profiles In Each Year') plt.ylabel('Number of Profiles') plt.xlabel('Year') plt.xticks() plt.show(); # getting the number of customers that became members on 2017 members_2017 = (df_master['membership_year']==2017).sum() # getting the total number of members among all the available years total = df_master['membership_year'].count() # getting the percentages of customers that became members on 2017 perc_2017 = round((members_2017/total)*100,2) print(members_2017) perc_201756063From the above plot graph we can see that, 2017 has the highest number of profiles. About 38% of all the customers are registered as members on 2017. 7.What is the income among different ages, at what ages would have the most disposable income?dataAge = profile.groupby('age')['income'].sum().reset_index() dataAge.head() dataAge.plot.bar(x='age', y='income', rot=0, figsize=(20,6),color = '#00A0A0')The age group with the most disposable income is between age 48 and 70. Data Modeling Data Prepdf_master.info() # dropping all columns that have been encoded or not needed for training model df_model = df_master.drop(columns = ['event','user_id','time','offer_id','transaction','offer_type','gender','offer_received','offer_viewed']) #moving the y denpendent variable to the first column col_name = "offer_completed" first_col = df_model.pop(col_name) df_model.insert(0, col_name, first_col) df_model.info() X = df_model.iloc[:, 1:].values y = df_model.iloc[:, 0].values # feature scaling from sklearn.preprocessing import StandardScaler sc = StandardScaler() X = sc.fit_transform(X) #splitting data into train and test sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)Supervised Model#training classifier from sklearn.ensemble import RandomForestClassifier classifier = RandomForestClassifier(n_estimators = 200, criterion = 'entropy', random_state = 0) classifier.fit(X_train, y_train) #evaluating classifier with confusion matrix from sklearn.metrics import confusion_matrix y_pred = classifier.predict(X_test) cm = confusion_matrix(y_test, y_pred) print(cm) #evluating classifier with k-fold cross-validation from sklearn.model_selection import cross_val_score accuracy = cross_val_score(estimator = classifier, X= X_train, y = y_train, cv = 10) print("Accuracy: {:.2f} %".format(accuracy.mean()*100)) print("Standard Deviation: {:.2f} %".format(accuracy.std()*100)) df_model.info() # Feature importance in RandomForest Classifier col = [ 'reward','difficulty','duration','channel_email','channel_mobile' 'channel_social', 'channel_web', 'offer_type_bogo','offer_type_informational','offer_type_discount','age','income', 'membership_year','membership_month','membership_date','gender_M','gender_F','gender_O'] #modelname.feature_importance_ imp = classifier.feature_importances_ #plot fig, ax = plt.subplots() width = 0.4 # the width of the bars ind = np.arange(len(imp)) # the x locations for the groups ax.barh(ind, imp, width, color='green') ax.set_yticks(ind+width/10) ax.set_yticklabels(col, minor=False) plt.title('Feature importance in RandomForest Classifier') plt.xlabel('Relative importance') plt.ylabel('feature') plt.figure(figsize=(5,5)) fig.set_size_inches(6.5, 4.5, forward=True)Unsupervised modelimport tensorflow as tf #Training artificial neural network ann = tf.keras.models.Sequential() ann.add(tf.keras.layers.Dense(units=6, activation='relu')) ann.add(tf.keras.layers.Dense(units=6, activation='relu')) ann.add(tf.keras.layers.Dense(units=1, activation='sigmoid')) ann.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy']) ann.fit(X_train, y_train, batch_size = 32, epochs = 100) # evaluating ann model y_pred1 = ann.predict(X_test) y_pred1 = (y_pred1 > 0.5) cm = confusion_matrix(y_test, y_pred1) print(cm)[[23281 0] [ 6480 0]]Importsimport os import tensorflow as tf # Load compressed models from tensorflow_hub os.environ['TFHUB_MODEL_LOAD_FORMAT'] = 'COMPRESSED' # for resetting the output display and displaying image import IPython.display as display # optional: for image visualization on mpl import matplotlib.pyplot as plt import matplotlib as mpl mpl.rcParams['figure.figsize'] = (12,12) mpl.rcParams['axes.grid'] = False # arrays, image viz, performance timing import numpy as np import PIL.Image import time import functools # not neededUtility Functions# tensor to image def tensor_to_image(tensor): tensor = tensor*255 tensor = np.array(tensor, dtype=np.uint8) if np.ndim(tensor)>3: assert tensor.shape[0] == 1 tensor = tensor[0] return PIL.Image.fromarray(tensor) # loading any image def load_img(path_to_img): max_dim = 512 img = tf.io.read_file(path_to_img) img = tf.image.decode_image(img, channels=3) img = tf.image.convert_image_dtype(img, tf.float32) shape = tf.cast(tf.shape(img)[:-1], tf.float32) long_dim = max(shape) scale = max_dim / long_dim new_shape = tf.cast(shape * scale, tf.int32) img = tf.image.resize(img, new_shape) img = img[tf.newaxis, :] return img # showing image from tensor/numpy/array def imshow(image, title=None): if len(image.shape) > 3: image = tf.squeeze(image, axis=0) plt.imshow(image) if title: plt.title(title)Building the Model and Training Content and Style Images# content and style images root = "C:/Users/VTSB/Desktop/CS Resources/AI Hands-On ML/images/Style Transfer Tutorial/" content_path = root + "style_transfer_image_13.png" style_path = root + "style_1.jpg"Building the Feature Extraction Model and Extracting# defining output layers and output layer counts content_layers = ['block5_conv2'] style_layers = ['block1_conv1', 'block2_conv1', 'block3_conv1', 'block4_conv1', 'block5_conv1'] num_content_layers = len(content_layers) num_style_layers = len(style_layers) # building the CNN model with custom output def vgg_layers(layer_names): vgg = tf.keras.applications.VGG19(include_top=False, weights='imagenet') vgg.trainable = False outputs = [vgg.get_layer(name).output for name in layer_names] model = tf.keras.Model([vgg.input], outputs) return model # defining style correlation fn def gram_matrix(input_tensor): result = tf.linalg.einsum('bijc,bijd->bcd', input_tensor, input_tensor) input_shape = tf.shape(input_tensor) num_locations = tf.cast(input_shape[1]*input_shape[2], tf.float32) return result/(num_locations) # building the full model class StyleContentModel(tf.keras.models.Model): def __init__(self, style_layers, content_layers): super(StyleContentModel, self).__init__() self.vgg = vgg_layers(style_layers + content_layers) self.style_layers = style_layers self.content_layers = content_layers self.num_style_layers = len(style_layers) self.vgg.trainable = False def call(self, inputs): "Expects float input in [0,1]" inputs = inputs*255.0 preprocessed_input = tf.keras.applications.vgg19.preprocess_input(inputs) outputs = self.vgg(preprocessed_input) style_outputs, content_outputs = (outputs[:self.num_style_layers], outputs[self.num_style_layers:]) style_outputs = [gram_matrix(style_output) for style_output in style_outputs] content_dict = {content_name:value for content_name, value in zip(self.content_layers, content_outputs)} style_dict = {style_name:value for style_name, value in zip(self.style_layers, style_outputs)} return {'content':content_dict, 'style':style_dict} extractor = StyleContentModel(style_layers, content_layers) # loading and extracting content and style from respective images content_image = load_img(content_path) style_image = load_img(style_path) style_targets = extractor(style_image)['style'] content_targets = extractor(content_image)['content']Training# initializing it as a variable image = tf.Variable(content_image) # clipping images def clip_0_1(image): return tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=1.0) # optimizer opt = tf.optimizers.Adam(learning_rate=0.02, beta_1=0.99, epsilon=1e-1) # selected weights style_weight=1e-2 content_weight=1e4 total_variation_weight=30 # loss function def style_content_loss(outputs): style_outputs = outputs['style'] content_outputs = outputs['content'] style_loss = tf.add_n([tf.reduce_mean((style_outputs[name]-style_targets[name])**2) for name in style_outputs.keys()]) style_loss *= style_weight / num_style_layers content_loss = tf.add_n([tf.reduce_mean((content_outputs[name]-content_targets[name])**2) for name in content_outputs.keys()]) content_loss *= content_weight / num_content_layers loss = style_loss + content_loss return loss # 1 training step tf fn @tf.function() def train_step(image): with tf.GradientTape() as tape: outputs = extractor(image) loss = style_content_loss(outputs) loss += total_variation_weight*tf.image.total_variation(image) grad = tape.gradient(loss, image) opt.apply_gradients([(grad, image)]) image.assign(clip_0_1(image)) # training start = time.time() epochs = 10 steps_per_epoch = 100 step = 0 for n in range(epochs): for m in range(steps_per_epoch): step += 1 train_step(image) print(".", end='') display.clear_output(wait=True) display.display(tensor_to_image(image)) print("Train step: {}".format(step)) end = time.time() print("Total time: {:.1f}".format(end-start))Saving Picturefile_name = root + 'stylized-image_1.png' tensor_to_image(image).save(file_name) try: from google.colab import files except ImportError: pass else: files.download(file_name)Implementation1. Import Librariesfrom nltk.corpus import stopwords # Stopwords from nltk.tokenize import word_tokenize, sent_tokenize # Tokenizer- Corpus:- A collection of text is known as Corpus.- Tokenizers:- This divides a text into a series of tokens2. Next, we remove the `stopwords` in the sentence for narrowing the words.StopWords :- Words such as is, an, a, the, for that do not add value to the meaning of a sentence. 3. We create a `Frequency Table` of words.This `dict` would keep record of how many times each word will appear in the text after removing `stopwords`.# Diwali Essay Example text = """The religious significance of this festival has differences. It varies from one region to another in India. There is an association of many deities, cultures, and traditions with Diwali. The reason for these differences is probably local harvest festivals. Hence, there was a fusion of these harvest festivals into one pan-Hindu festival. According to the Ramayana, Diwali is the day of the return of Rama. This day Lord Rama returned to Ayodhya along with his wife Sita. This return was made after Rama defeated demon King Ravana. Furthermore, Rama’s brother Lakshmana and Hanuman also came back to Ayodhya victorious. There is another popular tradition for the reason of Diwali. Here Lord Vishnu as an incarnation of Krishna killed Narakasura. Narakasura was certainly a demon. Above all, this victory brought the release of 16000 captive girls. Furthermore, this victory shows the triumph of good over evil. This is due to Lord Krishna being good and Narakasura being evil. Association of Diwali to Goddess Lakshmi is the belief of many Hindus. Lakshmi is the wife of Lord Vishnu. She also happens to be the Goddess of wealth and prosperity. According to a legend, Diwali is the night of Lakshmi wedding. This night she chose and wed Vishnu. Eastern India Hindus associate Diwali with Goddess Durga or kali. Some Hindus believe Diwali to be the start of a new year."""```pythonstopWords = set(stopwords.words('english'))words = word_tokenize(text)freqtable = dict()``` 4. Depending on the words it contains and the frequency table, we will now assign a score to each sentenceWe will `sent_tokenize()` method to create an list of sentences. Also, a `dict` is to be created to keep track of the score of each sentence for later use.sentences = sent_tokenize(text) sentenceValue = dict()5. To compare the sentences within the `text`, assign a scoreWe can compare the score by finding `average` score of particular sentence. This score is now a `Threshold`.sumValues = 0 for sentence in sentenceValue: sumValues += sentenceValue[sentence] average = int(sumValues / len(sentenceValue))6. Now, apply this `threshold` to store sentences and print the `summary`stopWords = set(stopwords.words('english')) words = word_tokenize(text) # Frequency Table freqTable = dict() for word in words: word = word.lower() if word in stopWords: continue if word in freqTable: freqTable[word] += 1 else: freqTable[word] = 1 # Creating dict to keep the score of each sentence sentences = sent_tokenize(text) sentenceValue = dict() for sentence in sentences: for word, freq in freqTable.items(): if word in sentence.lower(): if sentence in sentenceValue: sentenceValue[sentence] += freq else: sentenceValue[sentence] = freq sumValues = 0 for sentence in sentenceValue: sumValues += sentenceValue[sentence] # Average value of a sentence from the original text avg = int(sumValues / len(sentenceValue)) # Storing sentences in the summary summary = '' for sentence in sentences: if (sentence in sentenceValue) and (sentenceValue[sentence] > (1.2 * avg)): summary += " " + sentence print(summary)There is an association of many deities, cultures, and traditions with Diwali. According to the Ramayana, Diwali is the day of the return of Rama. Furthermore, Rama’s brother Lakshmana and Hanuman also came back to Ayodhya victorious. According to a legend, Diwali is the night of Lakshmi wedding.Tokenizedef tokenize(doc_df, remove_pos_tuple=False, OHCO=OHCO): # Paragraphs to Sentences df = doc_df.para_str\ .apply(lambda x: pd.Series(nltk.sent_tokenize(x)))\ .stack()\ .to_frame()\ .rename(columns={0:'sent_str'}) # Sentences to Tokens # .apply(lambda x: pd.Series(nltk.pos_tag(nltk.word_tokenize(x))))\ df = df.sent_str\ .apply(lambda x: pd.Series(nltk.pos_tag(nltk.WhitespaceTokenizer().tokenize(x))))\ .stack()\ .to_frame()\ .rename(columns={0:'pos_tuple'}) # Grab info from tuple df['pos'] = df.pos_tuple.apply(lambda x: x[1]) df['token_str'] = df.pos_tuple.apply(lambda x: x[0]) if remove_pos_tuple: df = df.drop('pos_tuple', 1) # Add index df.index.names = OHCO return df TOKEN = tokenize(DOC) TOKEN.sample(25) TOKEN['term_str'] = TOKEN['token_str'].str.lower().str.replace('[\W_]', '') VOCAB = TOKEN.term_str.value_counts().to_frame().rename(columns={'index':'term_str', 'term_str':'n'})\ .sort_index().reset_index().rename(columns={'index':'term_str'}) VOCAB.index.name = 'term_id' VOCAB['num'] = VOCAB.term_str.str.match("\d+").astype('int') VOCAB sw = pd.DataFrame(nltk.corpus.stopwords.words('english'), columns=['term_str']) sw = sw.reset_index().set_index('term_str') sw.columns = ['dummy'] sw.dummy = 1 sw.sample(10) VOCAB['stop'] = VOCAB.term_str.map(sw.dummy) VOCAB['stop'] = VOCAB['stop'].fillna(0).astype('int') VOCAB[VOCAB.stop == 1].sample(10)Porter Stemsfrom nltk.stem.porter import PorterStemmer stemmer = PorterStemmer() VOCAB['p_stem'] = VOCAB.term_str.apply(stemmer.stem) VOCAB.sample(10) TOKEN token1 = TOKEN pos_max = token1.groupby(['term_str',"pos"]).count().sort_values("token_str", ascending = False).groupby(level=0).head(1)\ .reset_index().set_index('term_str') pos_max.sort_index().tail(200) VOCAB['pos_max'] = VOCAB.term_str.map(pos_max.pos) VOCABsave csvsDOC.to_csv('DOC.csv') LIB.to_csv('LIB.csv') VOCAB.to_csv('VOCAB.csv') TOKEN.to_csv('TOKEN.csv')Experiments parameters file Experiments 0.0.x#Experiment 0.0.0 path_to_trained_model = path_data+'models/wv/conv/[word2vec-Py-Java-SK-500-20E-1592607739.629433].model' def libest_params(): return { "vectorizationType": VectorizationType.word2vec, "linkType": LinkType.req2tc, "system": 'libest', "path_to_trained_model": path_to_trained_model, "source_type": SoftwareArtifacts.REQ.value, "target_type": SoftwareArtifacts.TC.value, "system_path_config": { "system_path": path_data + 'se-benchmarking/traceability/testbeds/processed/[libest-all-corpus-1596063103.098236].csv', "sep": '~', "names": ['ids','conv'], "prep": Preprocessing.conv }, "path_mappings": path_data + "se-benchmarking/traceability/testbeds/groundtruth/english/[libest-ground-req-to-tc].txt", "saving_path": path_data + 'metrics/traceability/experiments0.0.x/', "names": ['Source','Target','Linked?'], } #Experiments 0.0.1 <<-- doc2vec path_to_trained_model = path_data+'/models/pv/conv/[doc2vec-Java-PVDBOW-500-20E-1592603043.674285].model' def libest_params(): return { "vectorizationType": VectorizationType.doc2vec, "linkType": LinkType.req2tc, "system": 'libest', "path_to_trained_model": path_to_trained_model, "source_type": SoftwareArtifacts.REQ.value, "target_type": SoftwareArtifacts.TC.value, "system_path_config": { "system_path": path_data + 'se-benchmarking/traceability/testbeds/processed/[libest-all-corpus-1596063103.098236].csv', "sep": '~', "names": ['ids','conv'], "prep": Preprocessing.conv }, "path_mappings": path_data + "se-benchmarking/traceability/testbeds/groundtruth/english/[libest-ground-req-to-tc].txt", "saving_path": path_data + 'metrics/traceability/experiments0.0.x/', "names": ['Source','Target','Linked?'] } #Experiments 0.0.2 <<-- word2vec path_to_trained_model = path_data+'/models/wv/conv/[word2vec-Py-Java-SK-500-20E-1592607739.629433].model' def sacp_params(): return { "vectorizationType": VectorizationType.word2vec, "linkType": LinkType.issue2src, "system": 'sacp-python-common', "path_to_trained_model": path_to_trained_model, "source_type": SoftwareArtifacts.PR.value, "target_type": SoftwareArtifacts.PY.value, "system_path_config": { "system_path": '/tf/data/cisco/sacp_data/[sacp-python-common-all-corpus-1596383717.992744].csv', "sep": '~', "names": ['ids','conv'], "prep": Preprocessing.conv }, "path_mappings": "/tf/data/cisco/sacp_data/sacp-pr-mappings.csv", "saving_path": path_data + 'metrics/traceability/experiments0.0.x/', "names": ['Source','Target','Linked?'] } #Experiments 0.0.3 <<-- doc2vec path_to_trained_model = path_data+'/models/pv/conv/[doc2vec-Py-Java-PVDBOW-500-20E-1592609630.689167].model' def sacp_params(): return { "vectorizationType": VectorizationType.doc2vec, "linkType": LinkType.issue2src, "system": 'sacp-python-common', "path_to_trained_model": path_to_trained_model, "source_type": SoftwareArtifacts.PR.value, "target_type": SoftwareArtifacts.PY.value, "system_path_config": { "system_path": '/tf/data/cisco/sacp_data/[sacp-python-common-all-corpus-1596383717.992744].csv', "sep": '~', "names": ['ids','conv'], "prep": Preprocessing.conv }, "path_mappings": "/tf/data/cisco/sacp_data/sacp-pr-mappings.csv", "saving_path": path_data + 'metrics/traceability/experiments0.0.x/', "names": ['Source','Target','Linked?'] } #Experiment 0.0.4 path_to_trained_model = path_data+'models/wv/conv/[word2vec-Py-Java-SK-500-20E-1592607739.629433].model' def albergate_params(): return { "vectorizationType": VectorizationType.word2vec, "linkType": LinkType.req2src, "system": 'albergate', "path_to_trained_model": path_to_trained_model, "source_type": SoftwareArtifacts.REQ.value, "target_type": SoftwareArtifacts.SRC.value, "system_path_config": { "system_path": path_data + 'se-benchmarking/traceability/testbeds/processed/[albergate-all-corpus-1609208282.940618].csv', "sep": '~', "names": ['ids','conv'], "prep": Preprocessing.conv }, "path_mappings": path_data + "se-benchmarking/traceability/testbeds/groundtruth/italian/[albergate-ground-req-to-src].txt", "saving_path": path_data + 'metrics/traceability/experiments0.0.x/', "names": ['Source','Target','Linked?'], } #Experiment 0.0.5 path_to_trained_model = path_data+'models/wv/conv/[word2vec-Py-Java-SK-500-20E-1592607739.629433].model' def ebt_params(): return { "vectorizationType": VectorizationType.word2vec, "linkType": LinkType.req2src, "system": 'ebt', "path_to_trained_model": path_to_trained_model, "source_type": SoftwareArtifacts.REQ.value, "target_type": SoftwareArtifacts.SRC.value, "system_path_config": { "system_path": path_data + 'se-benchmarking/traceability/testbeds/processed/[ebt-all-corpus-1609221582.171744].csv', "sep": '~', "names": ['ids','conv'], "prep": Preprocessing.conv }, "path_mappings": path_data + "se-benchmarking/traceability/testbeds/groundtruth/english/[ebt-ground-req-to-src].txt", "saving_path": path_data + 'metrics/traceability/experiments0.0.x/', "names": ['Source','Target','Linked?'], } #Experiment 0.0.6 path_to_trained_model = path_data+'models/wv/conv/[word2vec-Py-Java-SK-500-20E-1592607739.629433].model' def etour_params(): return { "vectorizationType": VectorizationType.word2vec, "linkType": LinkType.uc2src, "system": 'etour', "path_to_trained_model": path_to_trained_model, "source_type": SoftwareArtifacts.UC.value, "target_type": SoftwareArtifacts.SRC.value, "system_path_config": { "system_path": path_data + 'se-benchmarking/traceability/testbeds/processed/[etour-all-corpus-1609209368.279199].csv', "sep": '~', "names": ['ids','conv'], "prep": Preprocessing.conv }, "path_mappings": path_data + "se-benchmarking/traceability/testbeds/groundtruth/italian/[etour-ground-uc-to-src].txt", "saving_path": path_data + 'metrics/traceability/experiments0.0.x/', "names": ['Source','Target','Linked?'], } #Experiment 0.0.7 path_to_trained_model = path_data+'models/wv/conv/[word2vec-Py-Java-SK-500-20E-1592607739.629433].model' def itrust_params(): return { "vectorizationType": VectorizationType.word2vec, "linkType": LinkType.uc2src, "system": 'itrust', "path_to_trained_model": path_to_trained_model, "source_type": SoftwareArtifacts.UC.value, "target_type": SoftwareArtifacts.SRC.value, "system_path_config": { "system_path": path_data + 'se-benchmarking/traceability/testbeds/processed/[itrust-all-corpus-1609210989.304283].csv', "sep": '~', "names": ['ids','conv'], "prep": Preprocessing.conv }, "path_mappings": path_data + "se-benchmarking/traceability/testbeds/groundtruth/english/[itrust-ground-uc-to-src].txt", "saving_path": path_data + 'metrics/traceability/experiments0.0.x/', "names": ['Source','Target','Linked?'], } #Experiment 0.0.8 path_to_trained_model = path_data+'models/wv/conv/[word2vec-Py-Java-SK-500-20E-1592607739.629433].model' def smos_params(): return { "vectorizationType": VectorizationType.word2vec, "linkType": LinkType.uc2src, "system": 'smos', "path_to_trained_model": path_to_trained_model, "source_type": SoftwareArtifacts.UC.value, "target_type": SoftwareArtifacts.SRC.value, "system_path_config": { "system_path": path_data + 'se-benchmarking/traceability/testbeds/processed/[smos-all-corpus-1609210822.872445].csv', "sep": '~', "names": ['ids','conv'], "prep": Preprocessing.conv }, "path_mappings": path_data + "se-benchmarking/traceability/testbeds/groundtruth/italian/[smos-ground-uc-to-src].txt", "saving_path": path_data + 'metrics/traceability/experiments0.0.x/', "names": ['Source','Target','Linked?'], }Experiments 1.0.x 8K#Experiment 1.0.0 <<-- word2vec path_model_prefix = path_data+'models/bpe/sentencepiece/wiki_py_java_bpe_8k' path_to_trained_model = path_data+'models/wv/bpe8k/[word2vec-Java-Py-SK-500-20E-8k-1594090297.869643].model' def libest_params(): return { "vectorizationType": VectorizationType.word2vec, "linkType": LinkType.req2tc, "system": 'libest', "path_to_trained_model": path_to_trained_model, "source_type": SoftwareArtifacts.REQ.value, "target_type": SoftwareArtifacts.TC.value, "system_path_config": { "system_path": path_data + 'se-benchmarking/traceability/testbeds/processed/[libest-all-corpus-1596063103.098236].csv', "sep": '~', "names": ['ids','bpe8k'], "prep": Preprocessing.bpe }, "path_mappings": path_data + "se-benchmarking/traceability/testbeds/groundtruth/english/[libest-ground-req-to-tc].txt", "saving_path": path_data + 'metrics/traceability/experiments1.0.x/', "names": ['Source','Target','Linked?'], "model_prefix": path_model_prefix } #Experiment 1.0.1 <<-- doc2vec path_to_trained_model = path_data+'models/pv/bpe8k/[doc2vec-Py-Java-PVDBOW-500-20E-8k-1594572857.17191].model' def libest_params(): return { "vectorizationType": VectorizationType.doc2vec, "linkType": LinkType.req2tc, "system": 'libest', "path_to_trained_model": path_to_trained_model, "source_type": SoftwareArtifacts.REQ.value, "target_type": SoftwareArtifacts.TC.value, "system_path_config": { "system_path": path_data + 'se-benchmarking/traceability/cisco/libest_data/[libest-all-corpus-1596063103.098236].csv', "sep": '~', "names": ['ids','bpe8k'], "prep": Preprocessing.bpe }, "path_mappings": path_data + "se-benchmarking/traceability/testbeds/groundtruth/english/[libest-ground-req-to-tc].txt", "saving_path": path_data + 'metrics/traceability/experiments1.0.x/', "names": ['Source','Target','Linked?'], } #Experiments 1.0.2 <<-- word2vec path_model_prefix = path_data+'models/bpe/sentencepiece/wiki_py_java_bpe_8k' path_to_trained_model = path_data+'/models/wv/bpe8k/[word2vec-Java-Py-SK-500-20E-8k-1594090297.869643].model' def sacp_params(): return { "vectorizationType": VectorizationType.word2vec, "linkType": LinkType.issue2src, "system": 'sacp-python-common', "path_to_trained_model": path_to_trained_model, "source_type": SoftwareArtifacts.PR.value, "target_type": SoftwareArtifacts.PY.value, "system_path_config": { "system_path": '/tf/data/cisco/sacp_data/[sacp-python-common-all-corpus-1609224778.517111].csv', "sep": '~', "names": ['ids','bpe8k'], "prep": Preprocessing.bpe }, "path_mappings": "/tf/data/cisco/sacp_data/sacp-pr-mappings.csv", "saving_path": path_data + 'metrics/traceability/experiments1.0.x/', "names": ['Source','Target','Linked?'], "model_prefix": path_model_prefix } #Experiments 1.0.3 <<-- doc2vec path_model_prefix = path_data+'models/bpe/sentencepiece/wiki_py_java_bpe_8k' path_to_trained_model = path_data+'/models/pv/bpe8k/[doc2vec-Py-Java-PVDBOW-500-20E-8k-1594572857.17191].model' def sacp_params(): return { "vectorizationType": VectorizationType.doc2vec, "linkType": LinkType.issue2src, "system": 'sacp-python-common', "path_to_trained_model": path_to_trained_model, "source_type": SoftwareArtifacts.PR.value, "target_type": SoftwareArtifacts.PY.value, "system_path_config": { "system_path": '/tf/data/cisco/sacp_data/[sacp-python-common-all-corpus-1596383717.992744].csv', "sep": '~', "names": ['ids','bpe8k'], "prep": Preprocessing.bpe }, "path_mappings": "/tf/data/cisco/sacp_data/sacp-pr-mappings.csv", "saving_path": path_data + 'metrics/traceability/experiments1.0.x/', "names": ['Source','Target','Linked?'], "model_prefix": path_model_prefix }Experiments 1.1.x 32K#Experiments 1.1.0 <<-- word2vec path_model_prefix = path_data+'models/bpe/sentencepiece/wiki_py_java_bpe_32k' path_to_trained_model = path_data+'/models/wv/bpe32k/[word2vec-Py-Java-SK-500-20E-32k-1593748814.350487].model' def libest_params(): return { "vectorizationType": VectorizationType.word2vec, "linkType": LinkType.req2tc, "system": 'sacp-python-common', "path_to_trained_model": path_to_trained_model, "source_type": SoftwareArtifacts.REQ.value, "target_type": SoftwareArtifacts.TC.value, "system_path_config": { "system_path": path_data + 'se-benchmarking/traceability/testbeds/processed/[libest-all-corpus-1596063103.098236].csv', "sep": '~', "names": ['ids','bpe32k'], "prep": Preprocessing.bpe }, "path_mappings": path_data + "se-benchmarking/traceability/testbeds/groundtruth/english/[libest-ground-req-to-tc].txt", "saving_path": path_data + 'metrics/traceability/experiments1.1.x/', "names": ['Source','Target','Linked?'], "model_prefix": path_model_prefix } #Experiment 1.1.1 <<-- doc2vec path_model_prefix = path_data+'models/bpe/sentencepiece/wiki_py_java_bpe_32k' path_to_trained_model = path_data+'models/pv/bpe32k/[doc2vec-Java-py-PVDBOW-500-20E-32k-1595514224.303453].model' def libest_params(): return { "vectorizationType": VectorizationType.doc2vec, "linkType": LinkType.req2tc, "system": 'libest', "path_to_trained_model": path_to_trained_model, "source_type": SoftwareArtifacts.REQ.value, "target_type": SoftwareArtifacts.TC.value, "system_path_config": { "system_path": path_data + 'se-benchmarking/traceability/cisco/libest_data/[libest-all-corpus-1596063103.098236].csv', "sep": '~', "names": ['ids','bpe32k'], "prep": Preprocessing.bpe }, "path_mappings": path_data + "se-benchmarking/traceability/testbeds/groundtruth/english/[libest-ground-req-to-tc].txt", "saving_path": path_data + 'metrics/traceability/experiments1.1.x/', "names": ['Source','Target','Linked?'], "model_prefix": path_model_prefix } #Experiments 1.1.2 <<-- word2vec path_model_prefix = path_data+'models/bpe/sentencepiece/wiki_py_java_bpe_32k' path_to_trained_model = path_data+'/models/wv/bpe32k/[word2vec-Py-Java-SK-500-20E-32k-1593748814.350487].model' def sacp_params(): return { "vectorizationType": VectorizationType.word2vec, "linkType": LinkType.issue2src, "system": 'sacp-python-common', "path_to_trained_model": path_to_trained_model, "source_type": SoftwareArtifacts.PR.value, "target_type": SoftwareArtifacts.PY.value, "system_path_config": { "system_path": '/tf/data/cisco/sacp_data/[sacp-python-common-all-corpus-1609224778.517111].csv', "sep": '~', "names": ['ids','bpe32k'], "prep": Preprocessing.bpe }, "path_mappings": "/tf/data/cisco/sacp_data/sacp-pr-mappings.csv", "saving_path": path_data + 'metrics/traceability/experiments1.1.x/', "names": ['Source','Target','Linked?'], "model_prefix": path_model_prefix } #Experiment 1.1.3 <<-- doc2vec path_model_prefix = path_data+'models/bpe/sentencepiece/wiki_py_java_bpe_32k' path_to_trained_model = path_data+'/models/pv/bpe32k/[doc2vec-Java-py-PVDBOW-500-20E-32k-1595514224.303453].model' def sacp_params(): return { "vectorizationType": VectorizationType.doc2vec, "linkType": LinkType.issue2src, "system": 'sacp-python-common', "path_to_trained_model": path_to_trained_model, "source_type": SoftwareArtifacts.PR.value, "target_type": SoftwareArtifacts.PY.value, "system_path_config": { "system_path": '/tf/data/cisco/sacp_data/[sacp-python-common-all-corpus-1609224778.517111].csv', "sep": '~', "names": ['ids','bpe32k'], "prep": Preprocessing.bpe }, "path_mappings": "/tf/data/cisco/sacp_data/sacp-pr-mappings.csv", "saving_path": path_data + 'metrics/traceability/experiments1.1.x/', "names": ['Source','Target','Linked?'], "model_prefix": path_model_prefix }Experiments 1.1.x 128k#Experiments 1.2.0 <<-- word2vec path_model_prefix = path_data+'models/bpe/sentencepiece/wiki_py_java_bpe_128k' path_to_trained_model = path_data+'/models/wv/bpe128k/[word2vec-Java-Py-SK-500-20E-128k-1594873397.267055].model' def libest_params(): return { "vectorizationType": VectorizationType.word2vec, "linkType": LinkType.req2tc, "system": 'sacp-python-common', "path_to_trained_model": path_to_trained_model, "source_type": SoftwareArtifacts.REQ.value, "target_type": SoftwareArtifacts.TC.value, "system_path_config": { "system_path": path_data + 'se-benchmarking/traceability/testbeds/processed/[libest-all-corpus-1596063103.098236].csv', "sep": '~', "names": ['ids','bpe128k'], "prep": Preprocessing.bpe }, "path_mappings": path_data + "se-benchmarking/traceability/testbeds/groundtruth/english/[libest-ground-req-to-tc].txt", "saving_path": path_data + 'metrics/traceability/experiments1.2.x/', "names": ['Source','Target','Linked?'], "model_prefix": path_model_prefix } #Experiment 1.2.1 <<-- doc2vec path_model_prefix = path_data+'models/bpe/sentencepiece/wiki_py_java_bpe_128k' path_to_trained_model = path_data+'models/pv/bpe128k/[doc2vec-Java-py-PVDBOW-500-20E-128k-1595350537.25915].model' def libest_params(): return { "vectorizationType": VectorizationType.doc2vec, "linkType": LinkType.req2tc, "system": 'libest', "path_to_trained_model": path_to_trained_model, "source_type": SoftwareArtifacts.REQ.value, "target_type": SoftwareArtifacts.TC.value, "system_path_config": { "system_path": path_data + 'se-benchmarking/traceability/testbeds/processed/[libest-all-corpus-1596063103.098236].csv', "sep": '~', "names": ['ids','bpe128k'], "prep": Preprocessing.bpe }, "path_mappings": path_data + "se-benchmarking/traceability/testbeds/groundtruth/english/[libest-ground-req-to-tc].txt", "saving_path": path_data + 'metrics/traceability/experiments1.2.x/', "names": ['Source','Target','Linked?'], "model_prefix": path_model_prefix } #Experiments 1.2.2 <<-- word2vec path_model_prefix = path_data+'models/bpe/sentencepiece/wiki_py_java_bpe_128k' path_to_trained_model = path_data+'/models/wv/bpe128k/[word2vec-Java-Py-SK-500-20E-128k-1594873397.267055].model' def sacp_params(): return { "vectorizationType": VectorizationType.word2vec, "linkType": LinkType.issue2src, "system": 'sacp-python-common', "path_to_trained_model": path_to_trained_model, "source_type": SoftwareArtifacts.PR.value, "target_type": SoftwareArtifacts.PY.value, "system_path_config": { "system_path": '/tf/data/cisco/sacp_data/[sacp-python-common-all-corpus-1609224778.517111].csv', "sep": '~', "names": ['ids','bpe128k'], "prep": Preprocessing.bpe }, "path_mappings": "/tf/data/cisco/sacp_data/sacp-pr-mappings.csv", "saving_path": path_data + 'metrics/traceability/experiments1.2.x/', "names": ['Source','Target','Linked?'], "model_prefix": path_model_prefix } #Experiment 1.2.3 <<-- doc2vec path_model_prefix = path_data+'models/bpe/sentencepiece/wiki_py_java_bpe_128k' path_to_trained_model = path_data+'/models/pv/bpe128k/[doc2vec-Java-py-PVDBOW-500-20E-128k-1595350537.25915].model' def sacp_params(): return { "vectorizationType": VectorizationType.doc2vec, "linkType": LinkType.issue2src, "system": 'sacp-python-common', "path_to_trained_model": path_to_trained_model, "source_type": SoftwareArtifacts.PR.value, "target_type": SoftwareArtifacts.PY.value, "system_path_config": { "system_path": '/tf/data/cisco/sacp_data/[sacp-python-common-all-corpus-1609224778.517111].csv', "sep": '~', "names": ['ids','bpe128k'], "prep": Preprocessing.bpe }, "path_mappings": "/tf/data/cisco/sacp_data/sacp-pr-mappings.csv", "saving_path": path_data + 'metrics/traceability/experiments1.2.x/', "names": ['Source','Target','Linked?'], "model_prefix": path_model_prefix }Experiments 2.0.x#Experiment 2.0.0 path_to_trained_model = path_data+'models/wv/conv/[word2vec-Py-Java-Wiki-SK-500-20E[5]-1593060168.198436].model' def libest_params(): return { "vectorizationType": VectorizationType.word2vec, "linkType": LinkType.req2tc, "system": 'libest', "path_to_trained_model": path_to_trained_model, "source_type": SoftwareArtifacts.REQ.value, "target_type": SoftwareArtifacts.TC.value, "system_path_config": { "system_path": path_data + 'se-benchmarking/traceability/testbeds/processed/[libest-all-corpus-1596063103.098236].csv', "sep": '~', "names": ['ids','conv'], "prep": Preprocessing.conv }, "path_mappings": path_data + "se-benchmarking/traceability/testbeds/groundtruth/english/[libest-ground-req-to-tc].txt", "saving_path": path_data + 'metrics/traceability/experiments2.0.x/', "names": ['Source','Target','Linked?'], } #Experiment 2.0.1 <<-- doc2vec path_to_trained_model = path_data+'models/pv/conv/[doc2vec-Py-Java-Wiki-PVDBOW-500-20E[5]-1592888296.590531].model' def libest_params(): return { "vectorizationType": VectorizationType.doc2vec, "linkType": LinkType.req2tc, "system": 'libest', "path_to_trained_model": path_to_trained_model, "source_type": SoftwareArtifacts.REQ.value, "target_type": SoftwareArtifacts.TC.value, "system_path_config": { "system_path": path_data + 'se-benchmarking/traceability/testbeds/processed/[libest-all-corpus-1596063103.098236].csv', "sep": '~', "names": ['ids','conv'], "prep": Preprocessing.conv }, "path_mappings": path_data + "se-benchmarking/traceability/testbeds/groundtruth/english/[libest-ground-req-to-tc].txt", "saving_path": path_data + 'metrics/traceability/experiments2.0.x/', "names": ['Source','Target','Linked?'], } #Experiments 2.0.2 <<-- word2vec path_to_trained_model = path_data+'/models/wv/conv/[word2vec-Py-Java-Wiki-SK-500-20E[5]-1593060168.198436].model' def sacp_params(): return { "vectorizationType": VectorizationType.word2vec, "linkType": LinkType.issue2src, "system": 'sacp-python-common', "path_to_trained_model": path_to_trained_model, "source_type": SoftwareArtifacts.PR.value, "target_type": SoftwareArtifacts.PY.value, "system_path_config": { "system_path": '/tf/data/cisco/sacp_data/[sacp-python-common-all-corpus-1609224778.517111].csv', "sep": '~', "names": ['ids','conv'], "prep": Preprocessing.conv }, "path_mappings": "/tf/data/cisco/sacp_data/sacp-pr-mappings.csv", "saving_path": path_data + 'metrics/traceability/experiments2.0.x/', "names": ['Source','Target','Linked?'] } #Experiment 2.0.3 <<-- doc2vec path_to_trained_model = path_data+'/models/pv/conv/[doc2vec-Py-Java-Wiki-PVDBOW-500-20E[5]-1592888296.590531].model' def sacp_params(): return { "vectorizationType": VectorizationType.doc2vec, "linkType": LinkType.issue2src, "system": 'sacp-python-common', "path_to_trained_model": path_to_trained_model, "source_type": SoftwareArtifacts.PR.value, "target_type": SoftwareArtifacts.PY.value, "system_path_config": { "system_path": '/tf/data/cisco/sacp_data/[sacp-python-common-all-corpus-1609224778.517111].csv', "sep": '~', "names": ['ids','conv'], "prep": Preprocessing.conv }, "path_mappings": "/tf/data/cisco/sacp_data/sacp-pr-mappings.csv", "saving_path": path_data + 'metrics/traceability/experiments2.0.x/', "names": ['Source','Target','Linked?'] }Experiments 3.0.x#Experiment 3.0.4 <<-- doc2vec path_to_trained_model = path_data+'models/pv/conv/[doc2vec-Py-Java-PVDBOW-500-20E-1592609630.689167].model' def albergate_params(): return { "vectorizationType": VectorizationType.doc2vec, "linkType": LinkType.req2src, "system": 'albergate', "path_to_trained_model": path_to_trained_model, "source_type": SoftwareArtifacts.REQ.value, "target_type": SoftwareArtifacts.SRC.value, "system_path_config": { "system_path": path_data + 'se-benchmarking/traceability/testbeds/processed/[albergate-all-corpus-1609208282.940618].csv', "sep": '~', "names": ['ids','conv'], "prep": Preprocessing.conv }, "path_mappings": path_data + "se-benchmarking/traceability/testbeds/groundtruth/italian/[albergate-ground-req-to-src].txt", "saving_path": path_data + 'metrics/traceability/experiments3.0.x/', "names": ['Source','Target','Linked?'], } #Experiment 3.0.5 <<-- doc2vec path_to_trained_model = path_data+'models/pv/conv/[doc2vec-Py-Java-PVDBOW-500-20E-1592609630.689167].model' def ebt_params(): return { "vectorizationType": VectorizationType.doc2vec, "linkType": LinkType.req2src, "system": 'ebt', "path_to_trained_model": path_to_trained_model, "source_type": SoftwareArtifacts.REQ.value, "target_type": SoftwareArtifacts.SRC.value, "system_path_config": { "system_path": path_data + 'se-benchmarking/traceability/testbeds/processed/[ebt-all-corpus-1609221582.171744].csv', "sep": '~', "names": ['ids','conv'], "prep": Preprocessing.conv }, "path_mappings": path_data + "se-benchmarking/traceability/testbeds/groundtruth/english/[ebt-ground-req-to-src].txt", "saving_path": path_data + 'metrics/traceability/experiments3.0.x/', "names": ['Source','Target','Linked?'], } #Experiment 3.0.6 <<-- doc2vec path_to_trained_model = path_data+'models/pv/conv/[doc2vec-Py-Java-PVDBOW-500-20E-1592609630.689167].model' def etour_params(): return { "vectorizationType": VectorizationType.doc2vec, "linkType": LinkType.uc2src, "system": 'etour', "path_to_trained_model": path_to_trained_model, "source_type": SoftwareArtifacts.UC.value, "target_type": SoftwareArtifacts.SRC.value, "system_path_config": { "system_path": path_data + 'se-benchmarking/traceability/testbeds/processed/[etour-all-corpus-1609209368.279199].csv', "sep": '~', "names": ['ids','conv'], "prep": Preprocessing.conv }, "path_mappings": path_data + "se-benchmarking/traceability/testbeds/groundtruth/italian/[etour-ground-uc-to-src].txt", "saving_path": path_data + 'metrics/traceability/experiments3.0.x/', "names": ['Source','Target','Linked?'], } #Experiment 3.0.7 <<-- doc2vec path_to_trained_model = path_data+'models/pv/conv/[doc2vec-Py-Java-PVDBOW-500-20E-1592609630.689167].model' def itrust_params(): return { "vectorizationType": VectorizationType.doc2vec, "linkType": LinkType.uc2src, "system": 'itrust', "path_to_trained_model": path_to_trained_model, "source_type": SoftwareArtifacts.UC.value, "target_type": SoftwareArtifacts.SRC.value, "system_path_config": { "system_path": path_data + 'se-benchmarking/traceability/testbeds/processed/[itrust-all-corpus-1610408791.737875].csv', "sep": '~', "names": ['ids','conv'], "prep": Preprocessing.conv }, "path_mappings": path_data + "se-benchmarking/traceability/testbeds/groundtruth/english/[itrust-ground-uc-to-src].txt", "saving_path": path_data + 'metrics/traceability/experiments3.0.x/', "names": ['Source','Target','Linked?'], } #Experiment 3.0.8 <<-- doc2vec path_to_trained_model = path_data+'models/pv/conv/[doc2vec-Py-Java-PVDBOW-500-20E-1592609630.689167].model' def smos_params(): return { "vectorizationType": VectorizationType.doc2vec, "linkType": LinkType.uc2src, "system": 'smos', "path_to_trained_model": path_to_trained_model, "source_type": SoftwareArtifacts.UC.value, "target_type": SoftwareArtifacts.SRC.value, "system_path_config": { "system_path": path_data + 'se-benchmarking/traceability/testbeds/processed/[smos-all-corpus-1609210822.872445].csv', "sep": '~', "names": ['ids','conv'], "prep": Preprocessing.conv }, "path_mappings": path_data + "se-benchmarking/traceability/testbeds/groundtruth/italian/[smos-ground-uc-to-src].txt", "saving_path": path_data + 'metrics/traceability/experiments3.0.x/', "names": ['Source','Target','Linked?'], }1. 조건문 (condition statement)# 일반적인 boolean 변수를 이용한 조건문 is_student = True if is_student : print('나는 학생이다.') else: print('나는 학생이 아니다.') # 정수형 변수에 의한 조건문 a = 0 b = 3 if a : print('a is not 0') else : print('a is 0') if b : print('b is not 0') else : print('b is 0') # Q. Float 0.0은 False로 분류가 될까? # 문자열 변수에 의한 조건문 a = "" b = "hello" if a : print("Something is in a") else: print("Something is not in a") if b : print("Something is in b") else: print("Something is not in b") if 'h' in b: print('h is in b') # 리스트형 변수에 의한 조건문 a = [] b = [1,2,3] if a : print("a is not empty") else : print("a is empty") if b : print("b is not empty") else : print("b is empty") if 1 in b: print("1 is in b") # Q. 왜 다음 조건문은 True가 아닐까? if '1' in b: print("1 is in b") a = {} b = {"Korea":10, "China":0} if a : print("dict a is not empty") else : print("dict a is empty") if "Korea" in b: print("Korea is in b") # Q. 10이라는 Value가 dictionary에 있는지는 어떻게 확인할까? is_empty = True x = [1,2,3,4] if x : is_empty = False else: is_empty = True print(is_empty) is_empty = False if x else True print(is_empty)False2. 반복문 (loop statement)# for loop for i in range(0,10): print(i) for i in range(0,10,2): print(i) # Q. 10부터 0까지는 1씩 감소하는 for loop는? list_a = ['Orange', 'Apple', 'Banana'] dict_a = {'Orange':3, 'Apple':4, 'Banana':2} for a in list_a: print(a) for index, value in enumerate(list_a): print(index, value) for a in dict_a: print(a) for a in dict_a: print(a, dict_a[a]) for k,v in dict_a.items(): print(k, v) import time # 0 부터 9999999까지 원소를 갖는 list x를 만들어보자 Option 1 start_time = time.time() x = [] for i in range(0,10000000): x.append(i) print(f"{time.time()-start_time:.2f} 초") # 0 부터 9999999까지 원소를 갖는 list x를 만들어보자 Option 2 start_time = time.time() x = [i for i in range(0,10000000)] print(f"{time.time()-start_time:.2f} 초")0.99 초Save your work!Sad news...After all that work, your jupyter-hub server will be going away, sucked back into the cloud from which it came ;^(But fear not, you can preserve all your hard work and notebooks and just move them to your own machine (where they should all run perfectly well in a recent installation of [Anaconda 3](https://www.anaconda.com/products/individual)Just execute the two cells below and this should create a file named jupyterfiles.tar.gz (it may take a bit of time so wait for the success! prompt)Right click on jupyterfiles.tar.gz in the File Browser and select DownloadLet me know if you have any questions or issuesBest of luckimport shutil import os shutil.make_archive("jupyterfiles", 'gztar', os.environ["HOME"]) print('Success!: You should be able to download your files now')OverviewNetworks (a.k.a. graphs) are widely used mathematical objects for representing and analysing social systems. This week is about getting familiar with networks, and we'll focus on four main aspects:* Basic mathematical description of networks* The `NetworkX` library.* Building the network of GME redditors.* Basic analysis of the network of GME redditors. Part 1: Basic mathematical description of networksThis week, let's start with some lecturing. You will watch some videos made by Sune for his course _Social Graphs and Interactions_, where he covers networks in details. > **_Video Lecture_**. Start by watching the ["History of Networks"](https://youtu.be/qjM9yMarl70).from IPython.display import YouTubeVideo YouTubeVideo("qjM9yMarl70",width=800, height=450)> **_Video Lecture_**. Then check out a few comments on ["Network Notation"](https://youtu.be/MMziC5xktHs).YouTubeVideo("MMziC5xktHs",width=800, height=450)parseprefix = 'qm7b' file_prefix = prefix+'/benchmark_'+prefix by_model = parse(file_prefix+'.json.gz') all_model_keys = list(by_model.keys()) print(all_model_keys) train_fraction_keys = list(by_model[all_model_keys[0]].keys()) print(train_fraction_keys) max_train_frac = train_fraction_keys[-1]['21:7190', '43:7168', '86:7125', '173:7038', '346:6865', '692:6519', '1384:5827', '2769:4442', '5538:1673']LClc_by_model = {} lc_by_model_train = {} sc_name='RMSE' worst_error = 0.0 best_model_error = {} for category, model_key in bmol.items(): best_error = 10**20. best_model = None for key_now in model_key: lc_by_model[key_now], lc_by_model_train[key_now], _ = get_learning_curve(by_model, model_key_now=key_now, sc_name=sc_name) if np.min(lc_by_model[key_now][:,1]) < best_error: best_error, best_model = np.min(lc_by_model[key_now][:,1]), key_now if lc_by_model[key_now][0,1] > worst_error: worst_error = lc_by_model[key_now][0,1] best_model_error[best_error] = { "category": category, "model": best_model} lr_by_model = {} for category, model_key in bmol.items(): for key_now in model_key: lc_now = lc_by_model[key_now] lr_by_model[key_now] = -(np.log(lc_now[0,1])-np.log(lc_now[-1,1]))/(np.log(lc_now[0,0])-np.log(lc_now[-1,0])) fig, ax = plt.subplots(nrows=len(bmol.items())//4, ncols=4,figsize=(12,6),sharex=True,sharey=True) for i, [_, v] in enumerate(dict(sorted(best_model_error.items())).items()): category = v['category'] best_model = v['model'] for key_now in bmol[category]: ax[i//4,i%4].errorbar(lc_by_model[key_now][:,0], lc_by_model[key_now][:,1], yerr=lc_by_model[key_now][:,2], linestyle='-', c=color_dict[category], alpha=0.5, uplims=True, lolims=True) ax[i//4,i%4].errorbar(lc_by_model[best_model][:,0], lc_by_model[best_model][:,1], yerr=lc_by_model[best_model][:,2], linestyle='-',linewidth=4, c=color_dict[category], alpha=1.0, label=acronym_dict[best_model]+"\n"+"LR="+"{:.1e}".format(lr_by_model[best_model]), uplims=True, lolims=True) ax[i//4,i%4].legend(loc='lower left') # bbox_to_anchor=(1.3, 0.5)) for i in range(4): ax[1,i].set_xlabel('N',labelpad=-10) for i in range(len(bmol.items())//4): ax[i,0].set_ylabel('AE[kcal/mol]',labelpad=0) ax[0,0].set_ylim([best_error*0.7,worst_error*1.3]) ax[0,0].set_xscale('log') ax[0,0].set_yscale('log') # To specify the number of ticks on both or any single axes #ax.yaxis.set_major_locator(ticker.LogLocator(base=10, numticks=100)) #ax.yaxis.set_major_locator(ticker.MaxNLocator(4)) #ax.set_xticks([10, 20, 30, 40, 50]) #ax.get_xaxis().set_major_formatter(ticker.ScalarFormatter()) #ax.set_yticks([0.025, 0.05, 0.1, 0.2, 0.4]) #ax.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) ax[0,0].text(0.5, 0.9,prefix, fontsize=16, horizontalalignment='left', verticalalignment='center', transform = ax[0,0].transAxes) fig.tight_layout() fig.savefig("benchmark_"+prefix+'-lc-seperate.pdf') fig, ax = plt.subplots(figsize=(8,6)) for i, [_, v] in enumerate(dict(sorted(best_model_error.items())).items()): category = v['category'] best_model = v['model'] for key_now in bmol[category]: ax.errorbar(lc_by_model[key_now][:,0], lc_by_model[key_now][:,1], yerr=lc_by_model[key_now][:,2], linestyle='-', c=cm.tab10(i), alpha=0.5, uplims=True, lolims=True) ax.errorbar(lc_by_model[best_model][:,0], lc_by_model[best_model][:,1], yerr=lc_by_model[best_model][:,2], linestyle='-',linewidth=4, c=color_dict[category], alpha=1.0, label=acronym_dict[best_model]+" LR="+"{:.1e}".format(lr_by_model[best_model]), uplims=True, lolims=True) ax.legend(loc='best') # bbox_to_anchor=(1.3, 0.5)) #ax.set_title('Learning curve for the dataset '+prefix) ax.set_xlabel('N') ax.set_ylabel('Test {}'.format(sc_name)) ax.set_ylim([best_error*0.7,worst_error*1.3]) ax.set_xscale('log') ax.set_yscale('log') # To specify the number of ticks on both or any single axes #ax.yaxis.set_major_locator(ticker.LogLocator(base=10, numticks=100)) #ax.yaxis.set_major_locator(ticker.MaxNLocator(4)) #ax.set_xticks([10, 20, 30, 40, 50]) #ax.get_xaxis().set_major_formatter(ticker.ScalarFormatter()) #ax.set_yticks([0.025, 0.05, 0.1, 0.2, 0.4]) #ax.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) ax.text(0.5, 0.1,prefix, fontsize=16, horizontalalignment='left', verticalalignment='center', transform = ax.transAxes) fig.savefig("benchmark_"+prefix+'-lc.pdf') test_RMSE = [ [k, lc_by_model[k][-1,1]] for k in by_model.keys()] np.savetxt(prefix+'-test_RMSE.dat',test_RMSE,fmt='%s')model correlation matixtry: correlation_matrix = np.genfromtxt(prefix+'-model-mae.kmat') model_list = np.genfromtxt(prefix+'-model-mae.kmat', dtype='str') except: correlation_matrix, model_list = model_correlation_matrix(by_model, max_train_frac, 'test','MAE', "CORR", verbose=True) np.savetxt(prefix+'-model-mae.kmat', correlation_matrix, fmt='%.8e') np.savetxt(prefix+'-model-mae.names', model_list, fmt='%s') try: correlation_matrix = np.genfromtxt(prefix+'-model-mse.kmat') model_list = np.genfromtxt(prefix+'-model-mse.kmat', dtype='str') except: correlation_matrix, model_list = model_correlation_matrix(by_model, max_train_frac, 'test', 'MSE', "PearsonR", verbose=True) np.savetxt(prefix+'-model-mse.kmat', correlation_matrix, fmt='%.8e') np.savetxt(prefix+'-model-mse.names', model_list, fmt='%s')try: correlation_matrix = np.genfromtxt(prefix+"/"+prefix+'-model-y.kmat') model_list = np.genfromtxt(prefix+"/"+prefix+'-model-y.kmat', dtype='str')except: correlation_matrix, model_list = model_correlation_matrix(by_model, max_train_frac, 'test', 'y', "SpearmanR", replica=None, verbose=True) np.savetxt(prefix+"/"+prefix+'-model-y.kmat', correlation_matrix, fmt='%.8e') np.savetxt(prefix+"/"+prefix+'-model-y.names', model_list, fmt='%s') f = plt.figure(figsize=(12, 12))pmat = plt.matshow(correlation_matrix, fignum=f.number, cmap='gnuplot')annotate = list([ acronym_dict[x] for x in by_model.keys()])plt.xticks(range(correlation_matrix.shape[1]), annotate, fontsize=10, rotation=90)plt.yticks(range(correlation_matrix.shape[1]), annotate, fontsize=10)cbaxes = f.add_axes([0.95, 0.1,0.02, 0.75])cbar=f.colorbar(pmat, cax=cbaxes, orientation='vertical')cbar.ax.set_ylabel('Spearman R between pairs of models',labelpad=0, fontsize=12)cb = plt.colorbar()cb.ax.tick_params(labelsize=14)cb.set_label('Spearman R between pairs of models', fontsize=12)plt.title('Correlation Matrix', fontsize=16);plt.tight_layout()f.savefig(file_prefix+'-model-Rmatrix.pdf',bbox_inches='tight')## KPCA this correlation matrixfrom asaplib.reducedim import Dimension_Reducersreduce_dict = {}reduce_dict['kpca'] = {"type": 'SPARSE_KPCA', 'parameter':{"n_components": 10, "n_sparse": -1, no sparsification "kernel": {"first_kernel": {"type": 'linear'}}}}dreducer = Dimension_Reducers(reduce_dict) proj = dreducer.fit_transform(np.clip((correlation_matrix),0,1))from asaplib.reducedim import KernelPCA proj = KernelPCA(10).fit_transform(np.clip((correlation_matrix),0,1))Visualizationsfrom asaplib.plot import Plotters fig_spec = { 'outfile': prefix+'-model-kpca.pdf', 'show': False, 'title': None, 'size': [12*1.1, 12], 'xlabel': None, 'ylabel': None, 'xaxis': True, 'yaxis': True, 'remove_tick': True, 'cmap': 'viridis', 'components':{ 'first_p': {'type': 'scatter', 'clabel': 'RMSE by model', 'vmin':None, 'vmax': None}, 'second_p': {"type": 'annotate', 'adtext': False} } } asap_plot = Plotters(fig_spec) plotcolor = [ np.log(lc_by_model[k][-1,1]) for k in by_model.keys()] annotate = list([ acronym_dict[x] for x in by_model.keys()]) asap_plot.plot(proj[:, [0,1]], plotcolor, [], annotate) fig, ax = plt.subplots(figsize=(10, 10)) # project all cset1 = ax.scatter(proj[:, 0], proj[:, 1], c=plotcolor[:], cmap=cm.get_cmap('gnuplot'), marker='o', s=50) cbaxes = fig.add_axes([0.48, 0.17,0.39, 0.02]) cbar=fig.colorbar(cset1, cax=cbaxes, orientation='horizontal') cbar.ax.set_xlabel('log(RMSE)',labelpad=0) # the region to zoom in zoomx=[0.018,0.042] zoomy=[-0.022,0.000] rect = patches.Rectangle((zoomx[0],zoomy[0]), zoomx[1]-zoomx[0], zoomy[1]-zoomy[0], fill=False) ax.add_patch(rect) # embed another subplot subpos = [0.01,0.1,0.7,0.83] subax = plot_styles.add_subplot_axes(ax,subpos) cset2 = subax.scatter(proj[:, 0], proj[:, 1], c=plotcolor[:], cmap=cm.get_cmap('gnuplot'), marker='o', s=200) subax.set_xlim(zoomx) subax.set_ylim(zoomy) subax.tick_params(direction='in', length=2, width=1, colors='black', grid_color='r', grid_alpha=0.5) subax.tick_params(axis="y",direction="in", pad=-30) subax.tick_params(axis="x",direction="in", pad=-10) ax.tick_params( axis='x', # changes apply to the x-axis which='both', # both major and minor ticks are affected bottom=False, # ticks along the bottom edge are off top=False, # ticks along the top edge are off labelbottom=False) # labels along the bottom edge are off ax.tick_params( axis='y', # changes apply to the x-axis which='both', # both major and minor ticks are affected left=False, # ticks along the bottom edge are off right=False, # ticks along the top edge are off labelleft=False) # labels along the bottom edge are off subax.tick_params( axis='x', # changes apply to the x-axis which='both', # both major and minor ticks are affected bottom=False, # ticks along the bottom edge are off top=False, # ticks along the top edge are off labelbottom=False) # labels along the bottom edge are off subax.tick_params( axis='y', # changes apply to the x-axis which='both', # both major and minor ticks are affected left=False, # ticks along the bottom edge are off right=False, # ticks along the top edge are off labelleft=False) # labels along the bottom edge are off # annotate texts = [] subtexts = [] for i,ano in enumerate(list([ acronym_dict[x] for x in by_model.keys()])): if zoomx[0] < proj[i, 0] < zoomx[1] and zoomy[0] < proj[i, 1] < zoomy[1]: subtexts.append(subax.text(proj[i, 0], proj[i, 1], ano, ha='center', va='center', fontsize=9,color='black')) else: texts.append(ax.text(proj[i, 0], proj[i, 1], ano, ha='center', va='center', fontsize=9,color='black')) adjust_text(texts,on_basemap=True,# only_move={'points':'', 'text':'x'}, expand_text=(1.01, 1.05), expand_points=(1.01, 1.05), force_text=(0.03, 0.1), force_points=(0.0, 0.0), ax=ax, precision=0.001, arrowprops=dict(arrowstyle="-", color='black', lw=1,alpha=0.8)) adjust_text(subtexts,on_basemap=True,# only_move={'points':'', 'text':'x'}, expand_text=(1.01, 1.05), expand_points=(1.01, 1.05), force_text=(0.03, 0.1), force_points=(0.0, 0.), ax=subax, precision=0.001, arrowprops=dict(arrowstyle="-", color='black', lw=1,alpha=0.8)) fig.savefig(file_prefix+'-model-kpca.pdf', transparent=True)kernel matrix of descriptors# note that we clip the min desc_kmat = np.clip(np.loadtxt(prefix+"/"+prefix+"-descriptors-spearman.kmat"),0,1) #desc_kmat = desc_kmat+np.ones(np.shape(desc_kmat)) desc_names = np.genfromtxt(prefix+"/"+prefix+"-descriptors.names", dtype='str') f = plt.figure(figsize=(10, 8)) pmat = plt.matshow(desc_kmat, fignum=f.number, cmap='gnuplot') annotate = list([ acronym_dict[x] for x in desc_names]) plt.xticks(range(desc_kmat.shape[1]), annotate, fontsize=10, rotation=90) plt.yticks(range(desc_kmat.shape[1]), annotate, fontsize=10) cbaxes = f.add_axes([0.87, 0.1,0.02, 0.75]) cbar=f.colorbar(pmat, cax=cbaxes, orientation='vertical') cbar.ax.set_ylabel('Spearman R between of the kernel matrices of 100 random samples',labelpad=2, fontsize=12) f.savefig(file_prefix+'-kmat-Rmatrix.pdf',bbox_inches='tight')reduce_dict2 = {}reduce_dict2['kpca'] = {"type": 'SPARSE_KPCA', 'parameter':{"n_components": 10, "n_sparse": -1, no sparsification "kernel": {"first_kernel": {"type": 'linear'}}}}dreducer2 = Dimension_Reducers(reduce_dict2)proj = dreducer2.fit_transform(desc_kmat)proj = KernelPCA(10).fit_transform(np.clip((desc_kmat),0,1)) fig_spec = { 'outfile': file_prefix+'-descriptor-similarity.pdf', 'show': False, 'title': None, 'size': [12*1.1, 12], 'xlabel': None, 'ylabel': None, 'xaxis': False, 'yaxis': False, 'remove_tick': True, 'cmap': 'viridis', 'components':{ 'first_p': {'type': 'scatter', 'clabel': 'RMSE by model', 'vmin':None, 'vmax': None}, 'second_p': {"type": 'annotate', 'adtext': False} } } asap_plot = Plotters(fig_spec) plotcolor = [ np.log(lc_by_model[k][-1,1]) for k in desc_names] annotate = list([ acronym_dict[x] for x in desc_names]) #plotcolor = np.arange(len(proj)) asap_plot.plot(proj[:, [0,1]], plotcolor, [], annotate) fig, ax = plt.subplots(figsize=(10, 10)) # project all cset1 = ax.scatter(proj[:, 0], proj[:, 1], c=plotcolor[:], cmap=cm.get_cmap('gnuplot'), marker='o', s=50) cbaxes = fig.add_axes([0.48, 0.2,0.39, 0.02]) cbar=fig.colorbar(cset1, cax=cbaxes, orientation='horizontal') cbar.ax.set_xlabel('log(RMSE)',labelpad=0) # the region to zoom in zoomx=[0.37,0.47] zoomy=[-0.1,0.01] rect = patches.Rectangle((zoomx[0],zoomy[0]), zoomx[1]-zoomx[0], zoomy[1]-zoomy[0], fill=False) ax.add_patch(rect) # embed another subplot subpos = [0.25,0.18,0.6,0.75] subax = plot_styles.add_subplot_axes(ax,subpos) cset2 = subax.scatter(proj[:, 0], proj[:, 1], c=plotcolor[:], cmap=cm.get_cmap('gnuplot'), marker='o', s=200) subax.set_xlim(zoomx) subax.set_ylim(zoomy) subax.tick_params(direction='in', length=2, width=1, colors='black', grid_color='r', grid_alpha=0.5) subax.tick_params(axis="y",direction="in", pad=-30) subax.tick_params(axis="x",direction="in", pad=-10) ax.tick_params( axis='x', # changes apply to the x-axis which='both', # both major and minor ticks are affected bottom=False, # ticks along the bottom edge are off top=False, # ticks along the top edge are off labelbottom=False) # labels along the bottom edge are off ax.tick_params( axis='y', # changes apply to the x-axis which='both', # both major and minor ticks are affected left=False, # ticks along the bottom edge are off right=False, # ticks along the top edge are off labelleft=False) # labels along the bottom edge are off subax.tick_params( axis='x', # changes apply to the x-axis which='both', # both major and minor ticks are affected bottom=False, # ticks along the bottom edge are off top=False, # ticks along the top edge are off labelbottom=False) # labels along the bottom edge are off subax.tick_params( axis='y', # changes apply to the x-axis which='both', # both major and minor ticks are affected left=False, # ticks along the bottom edge are off right=False, # ticks along the top edge are off labelleft=False) # labels along the bottom edge are off # annotate texts = [] subtexts = [] for i,ano in enumerate(list([ acronym_dict[x] for x in desc_names])): if zoomx[0] < proj[i, 0] < zoomx[1] and zoomy[0] < proj[i, 1] < zoomy[1]: subtexts.append(subax.text(proj[i, 0], proj[i, 1], ano, ha='center', va='center', fontsize=9,color='black')) else: texts.append(ax.text(proj[i, 0], proj[i, 1], ano, ha='center', va='center', fontsize=9,color='black')) adjust_text(texts,on_basemap=True,# only_move={'points':'', 'text':'x'}, expand_text=(1.01, 1.05), expand_points=(1.01, 1.05), force_text=(0.03, 0.1), force_points=(0.0, 0.0), ax=ax, precision=0.001, arrowprops=dict(arrowstyle="-", color='black', lw=1,alpha=0.8)) adjust_text(subtexts,on_basemap=True,# only_move={'points':'', 'text':'x'}, expand_text=(1.01, 1.05), expand_points=(1.01, 1.05), force_text=(0.03, 0.1), force_points=(0.0, 0.), ax=subax, precision=0.001, arrowprops=dict(arrowstyle="-", color='black', lw=1,alpha=0.8)) fig.savefig(file_prefix+'-kmat-kpca.pdf', transparent=True)Multilayer Percepton https://zhuanlan.zhihu.com/p/33669143import numpy as np import pandas as pd %matplotlib inline import matplotlib.pyplot as plt import matplotlib.cm as cm import tensorflow as tf from keras.datasets import mnist from keras.models import Sequential from keras.layers import Dense from keras.layers import Dropout from keras.utils import np_utils import pandas as pd #load the data train = pd.read_csv("train.csv") test = pd.read_csv("test.csv")Using TensorFlow backend.data preparationfrom sklearn.model_selection import train_test_split Y_train = train["label"] # Drop 'label' column X_train = train.drop(labels = ["label"],axis = 1) random_seed = 2 #split the data into train set and validation set X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.3, random_state=random_seed) #data normalization from [0..255] to [0..1] X_train = X_train / 255.0 X_val = X_val / 255.0 test = test / 255.0 #decode the lable to one-hot vectors Y_train = np_utils.to_categorical(Y_train) Y_val = np_utils.to_categorical(Y_val) num_classes = Y_val.shape[1]Model Architecture#model architecture model = Sequential() #hidden layer use relu activation function model.add(Dense(784, input_dim=784, kernel_initializer="normal", activation='relu')) #output use softmax to classifier model.add(Dense(num_classes, kernel_initializer="normal", activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense_3 (Dense) (None, 784) 615440 _________________________________________________________________ dense_4 (Dense) (None, 10) 7850 ================================================================= Total params: 623,290 Trainable params: 623,290 Non-trainable params: 0 _________________________________________________________________Evaluate the Modelepechs=10 accuracy is 0.9789model.fit(X_train, Y_train, validation_data=(X_val, Y_val), epochs=10, batch_size=200, verbose=2)Train on 29400 samples, validate on 12600 samples Epoch 1/10 - 2s - loss: 5.8998e-07 - acc: 1.0000 - val_loss: 0.1363 - val_acc: 0.9791 Epoch 2/10 - 2s - loss: 5.5708e-07 - acc: 1.0000 - val_loss: 0.1368 - val_acc: 0.9789 Epoch 3/10 - 2s - loss: 5.2520e-07 - acc: 1.0000 - val_loss: 0.1371 - val_acc: 0.9789 Epoch 4/10 - 2s - loss: 5.0092e-07 - acc: 1.0000 - val_loss: 0.1372 - val_acc: 0.9789 Epoch 5/10 - 2s - loss: 4.7155e-07 - acc: 1.0000 - val_loss: 0.1373 - val_acc: 0.9789 Epoch 6/10 - 2s - loss: 4.4646e-07 - acc: 1.0000 - val_loss: 0.1380 - val_acc: 0.9790 Epoch 7/10 - 2s - loss: 4.2467e-07 - acc: 1.0000 - val_loss: 0.1389 - val_acc: 0.9790 Epoch 8/10 - 2s - loss: 4.0605e-07 - acc: 1.0000 - val_loss: 0.1385 - val_acc: 0.9791 Epoch 9/10 - 2s - loss: 3.8464e-07 - acc: 1.0000 - val_loss: 0.1393 - val_acc: 0.9784 Epoch 10/10 - 2s - loss: 3.6825e-07 - acc: 1.0000 - val_loss: 0.1390 - val_acc: 0.9789change the Epoches 100the accuracy does not improve too muchso we change another moethod# change the epoches model.fit(X_train, Y_train, validation_data=(X_val, Y_val), nb_epoch=100, batch_size=200, verbose=2)/Users/yangjing/anaconda3/envs/py36/lib/python3.6/site-packages/keras/models.py:942: UserWarning: The `nb_epoch` argument in `fit` has been renamed `epochs`. warnings.warn('The `nb_epoch` argument in `fit` 'Tutorial: From physics to tuned GPU kernelsThis tutorial is designed to show you the whole process starting from modeling a physical process to a Python implementation to creating optimized and auto-tuned GPU application using Kernel Tuner.In this tutorial, we will use [diffusion](https://en.wikipedia.org/wiki/Diffusion) as an example application.We start with modeling the physical process of diffusion, for which we create a simple numerical implementation in Python. Then we create a CUDA kernel that performs the same computation, but on the GPU. Once we have a CUDA kernel, we start using the Kernel Tuner for auto-tuning our GPU application. And finally, we'll introduce a few code optimizations to our CUDA kernel that will improve performance, but also add more parameters to tune on using the Kernel Tuner.**Note:** If you are reading this tutorial on the Kernel Tuner's documentation pages, note that you can actually run this tutorial as a Jupyter Notebook. Just clone the Kernel Tuner's [GitHub repository](http://github.com/benvanwerkhoven/kernel_tuner). Install using *pip install .[tutorial,cuda]* and you're ready to go! You can start the tutorial by typing "jupyter notebook" in the "kernel_tuner/tutorial" directory. DiffusionPut simply, diffusion is the redistribution of something from a region of high concentration to a region of low concentration without bulk motion. The concept of diffusion is widely used in many fields, including physics, chemistry, biology, and many more.Suppose that we take a metal sheet, in which the temperature is exactly equal to one degree everywhere in the sheet.Now if we were to heat a number of points on the sheet to a very high temperature, say a thousand degrees, in an instant by some method. We could see the heat diffuse from these hotspots to the cooler areas. We are assuming that the metal does not melt. In addition, we will ignore any heat loss from radiation or other causes in this example.We can use the [diffusion equation](https://en.wikipedia.org/wiki/Diffusion_equation) to model how the heat diffuses through our metal sheet:\begin{equation*}\frac{\partial u}{\partial t}= D \left( \frac{\partial^2 u}{\partial x^2} + \frac{\partial^2 u}{\partial y^2} \right)\end{equation*}Where $x$ and $y$ represent the spatial descretization of our 2D domain, $u$ is the quantity that is being diffused, $t$ is the descretization in time, and the constant $D$ determines how fast the diffusion takes place.In this example, we will assume a very simple descretization of our problem. We assume that our 2D domain has $nx$ equi-distant grid points in the x-direction and $ny$ equi-distant grid points in the y-direction. Be sure to execute every cell as you read through this document, by selecting it and pressing **shift+enter**.nx = 1024 ny = 1024This results in a constant distance of $\delta x$ between all grid points in the $x$ dimension. Using central differences, we can numerically approximate the derivative for a given point $x_i$:\begin{equation*}\left. \frac{\partial^2 u}{\partial x^2} \right|_{x_{i}} \approx \frac{u_{x_{i+1}}-2u_{{x_i}}+u_{x_{i-1}}}{(\delta x)^2}\end{equation*}We do the same for the partial derivative in $y$:\begin{equation*}\left. \frac{\partial^2 u}{\partial y^2} \right|_{y_{i}} \approx \frac{u_{y_{i+1}}-2u_{y_{i}}+u_{y_{i-1}}}{(\delta y)^2}\end{equation*}If we combine the above equations, we can obtain a numerical estimation for the temperature field of our metal sheet in the next time step, using $\delta t$ as the time between time steps. But before we do, we also simplify the expression a little bit, because we'll assume that $\delta x$ and $\delta y$ are always equal to 1.\begin{equation*}u'_{x,y} = u_{x,y} + \delta t \times \left( \left( u_{x_{i+1},y}-2u_{{x_i},y}+u_{x_{i-1},y} \right) + \left( u_{x,y_{i+1}}-2u_{x,y_{i}}+u_{x,y_{i-1}} \right) \right)\end{equation*}In this formula $u'_{x,y}$ refers to the temperature field at the time $t + \delta t$. As a final step, we further simplify this equation to:\begin{equation*}u'_{x,y} = u_{x,y} + \delta t \times \left( u_{x,y_{i+1}}+u_{x_{i+1},y}-4u_{{x_i},y}+u_{x_{i-1},y}+u_{x,y_{i-1}} \right)\end{equation*} Python implementationWe can create a Python function that implements the numerical approximation defined in the above equation. For simplicity we'll use the assumption of a free boundary condition.def diffuse(field, dt=0.225): field[1:nx-1,1:ny-1] = field[1:nx-1,1:ny-1] + dt * ( field[1:nx-1,2:ny]+field[2:nx,1:ny-1]-4*field[1:nx-1,1:ny-1]+ field[0:nx-2,1:ny-1]+field[1:nx-1,0:ny-2] ) return fieldTo give our Python function a test run, we will now do some imports and generate the input data for the initial conditions of our metal sheet with a few very hot points. We'll also make two plots, one after a thousand time steps, and a second plot after another two thousand time steps. Do note that the plots are using different ranges for the colors. Also, executing the following cell may take a little while.#do the imports we need import numpy from matplotlib import pyplot %matplotlib inline #setup initial conditions def get_initial_conditions(nx, ny): field = numpy.ones((ny, nx)).astype(numpy.float32) field[numpy.random.randint(0,nx,size=10), numpy.random.randint(0,ny,size=10)] = 1e3 return field field = get_initial_conditions(nx, ny) #run the diffuse function a 1000 times and another 2000 times and make plots fig, (ax1, ax2) = pyplot.subplots(1,2) for i in range(1000): field = diffuse(field) ax1.imshow(field) for i in range(2000): field = diffuse(field) ax2.imshow(field)Now let's take a quick look at the execution time of our diffuse function. Before we do, we also copy the current state of the metal sheet to be able to restart the computation from this state.#save the current field for later use field_copy = numpy.copy(field) #run another 1000 steps of the diffuse function and measure the time from time import time start = time() for i in range(1000): field = diffuse(field) end = time() print("1000 steps of diffuse took", (end-start)*1000.0, "ms") pyplot.imshow(field)1000 steps of diffuse took 4164.018869400024 msComputing on the GPUThe next step in this tutorial is to implement a GPU kernel that will allow us to run our problem on the GPU. We store the kernel code in a Python string, because we can directly compile and run the kernel from Python. In this tutorial, we'll use the CUDA programming model to implement our kernels.> If you prefer OpenCL over CUDA, don't worry. Everything in this tutorial > applies as much to OpenCL as it does to CUDA. But we will use CUDA for our > examples, and CUDA terminology in the text.def get_kernel_string(nx, ny): return """ #define nx %d #define ny %d #define dt 0.225f __global__ void diffuse_kernel(float *u_new, float *u) { int x = blockIdx.x * block_size_x + threadIdx.x; int y = blockIdx.y * block_size_y + threadIdx.y; if (x>0 && x0 && yThe above CUDA kernel parallelizes the work such that every grid point will be processed by a different CUDA thread. Therefore, the kernel is executed by a 2D grid of threads, which are grouped together into 2D thread blocks. The specific thread block dimensions we choose are not important for the result of the computation in this kernel. But as we will see will later, they will have an impact on performance.In this kernel we are using two, currently undefined, compile-time constants for `block_size_x` and `block_size_y`, because we will auto tune these parameters later. It is often needed for performance to fix the thread block dimensions at compile time, because the compiler can unroll loops that iterate using the block size, or because you need to allocate shared memory using the thread block dimensions.The next bit of Python code initializes PyCuda, and makes preparations so that we can call the CUDA kernel to do the computation on the GPU as we did earlier in Python.import pycuda.driver as drv from pycuda.compiler import SourceModule #initialize PyCuda and get compute capability needed for compilation drv.init() context = drv.Device(0).make_context() devprops = { str(k): v for (k, v) in context.get_device().get_attributes().items() } cc = str(devprops['COMPUTE_CAPABILITY_MAJOR']) + str(devprops['COMPUTE_CAPABILITY_MINOR']) #allocate GPU memory u_old = drv.mem_alloc(field_copy.nbytes) u_new = drv.mem_alloc(field_copy.nbytes) #setup thread block dimensions and compile the kernel threads = (16,16,1) grid = (int(nx/16), int(ny/16), 1) block_size_string = "#define block_size_x 16\n#define block_size_y 16\n" diffuse_kernel = SourceModule(block_size_string+kernel_string, arch='sm_'+cc).get_function("diffuse_kernel") #create events for measuring performance start = drv.Event() end = drv.Event()The above code is a bit of boilerplate we need to compile a kernel using PyCuda. We've also, for the moment, fixed the thread block dimensions at 16 by 16. These dimensions serve as our initial guess for what a good performing pair of thread block dimensions could look like.Now that we've setup everything, let's see how long the computation would take using the GPU.#move the data to the GPU drv.memcpy_htod(u_old, field_copy) drv.memcpy_htod(u_new, field_copy) #call the GPU kernel a 1000 times and measure performance context.synchronize() start.record() for i in range(500): diffuse_kernel(u_new, u_old, block=threads, grid=grid) diffuse_kernel(u_old, u_new, block=threads, grid=grid) end.record() context.synchronize() print("1000 steps of diffuse took", end.time_since(start), "ms.") #copy the result from the GPU to Python for plotting gpu_result = numpy.zeros_like(field_copy) drv.memcpy_dtoh(gpu_result, u_new) fig, (ax1, ax2) = pyplot.subplots(1,2) ax1.imshow(gpu_result) ax1.set_title("GPU Result") ax2.imshow(field) ax2.set_title("Python Result")1000 steps of diffuse took 53.423038482666016 ms.That should already be a lot faster than our previous Python implementation, but we can do much better if we optimize our GPU kernel. And that is exactly what the rest of this tutorial is about!#cleanup the PyCuda context context.pop()Also, if you think the Python boilerplate code to call a GPU kernel was a bit messy, we've got good news for you! From now on, we'll only use the Kernel Tuner to compile and benchmark GPU kernels, which we can do with much cleaner Python code. Auto-Tuning with the Kernel TunerRemember that previously we've set the thread block dimensions to 16 by 16. But how do we actually know if that is the best performing setting? That is where auto-tuning comes into play. Basically, it is very difficult to provide an answer through performance modeling and as such, we'd rather use the Kernel Tuner to compile and benchmark all possible kernel configurations.But before we continue, we'll increase the problem size, because the GPU is very likely underutilized.nx = 4096 ny = 4096 field = get_initial_conditions(nx, ny) kernel_string = get_kernel_string(nx, ny)The above code block has generated new initial conditions and a new string that contains our CUDA kernel using our new domain size.To call the Kernel Tuner, we have to specify the tunable parameters, in our case `block_size_x` and `block_size_y`. For this purpose, we'll create an ordered dictionary to store the tunable parameters. The keys will be the name of the tunable parameter, and the corresponding value is the list of possible values for the parameter. For the purpose of this tutorial, we'll use a small number of commonly used values for the thread block dimensions, but feel free to try more!from collections import OrderedDict tune_params = OrderedDict() tune_params["block_size_x"] = [16, 32, 48, 64, 128] tune_params["block_size_y"] = [2, 4, 8, 16, 32]We also have to tell the Kernel Tuner about the argument list of our CUDA kernel. Because the Kernel Tuner will be calling the CUDA kernel and measure its execution time. For this purpose we create a list in Python, that corresponds with the argument list of the `diffuse_kernel` CUDA function. This list will only be used as input to the kernel during tuning. The objects in the list should be Numpy arrays or scalars.Because you can specify the arguments as Numpy arrays, the Kernel Tuner will take care of allocating GPU memory and copying the data to the GPU.args = [field, field]We're almost ready to call the Kernel Tuner, we just need to set how large the problem is we are currently working on by setting a `problem_size`. The Kernel Tuner knows about thread block dimensions, which it expects to be called `block_size_x`, `block_size_y`, and/or `block_size_z`. From these and the `problem_size`, the Kernel Tuner will compute the appropiate grid dimensions on the fly.problem_size = (nx, ny)And that's everything the Kernel Tuner needs to know to be able to start tuning our kernel. Let's give it a try by executing the next code block!from kernel_tuner import tune_kernel result = tune_kernel("diffuse_kernel", kernel_string, problem_size, args, tune_params)Using: GeForce GTX TITAN X diffuse_kernel block_size_x=16, block_size_y=2, time=1.22305920124, block_size_x=16, block_size_y=4, time=0.779033613205, block_size_x=16, block_size_y=8, time=0.824838399887, block_size_x=16, block_size_y=16, time=0.900499212742, block_size_x=16, block_size_y=32, time=0.999763202667, block_size_x=32, block_size_y=2, time=0.727967989445, block_size_x=32, block_size_y=4, time=0.752479994297, block_size_x=32, block_size_y=8, time=0.797900807858, block_size_x=32, block_size_y=16, time=0.876627194881, block_size_x=32, block_size_y=32, time=0.93347837925, block_size_x=48, block_size_y=2, time=0.766662418842, block_size_x=48, block_size_y=4, time=0.803033602238, block_size_x=48, block_size_y=8, time=0.853574407101, block_size_x=48, block_size_y=16, time=0.971545600891, block_size_x=64, block_size_y=2, time=0.763775992393, block_size_x=64, block_size_y=4, time=0.791257584095, block_size_x=64, block_size_y=8, time=0.848044800758, block_size_x=64, blo[...]Note that the Kernel Tuner prints a lot of useful information. To ensure you'll be able to tell what was measured in this run the Kernel Tuner always prints the GPU or OpenCL Device name that is being used, as well as the name of the kernel.After that every line contains the combination of parameters and the time that was measured during benchmarking. The time that is being printed is in milliseconds and is obtained by averaging the execution time of 7 runs of the kernel. Finally, as a matter of convenience, the Kernel Tuner also prints the best performing combination of tunable parameters. However, later on in this tutorial we'll explain how to analyze and store the tuning results using Python.Looking at the results printed above, the difference in performance between the different kernel configurations may seem very little. However, on our hardware, the performance of this kernel already varies in the order of 10%. Which of course can build up to large differences in the execution time if the kernel is to be executed thousands of times. We can also see that the performance of the best configuration in this set is 5% better than our initially guessed thread block dimensions of 16 by 16.In addtion, you may notice that not all possible combinations of values for `block_size_x` and `block_size_y` are among the results. For example, 128x32 is not among the results. This is because some configuration require more threads per thread block than allowed on our GPU. The Kernel Tuner checks the limitations of your GPU at runtime and automatically skips over configurations that use too many threads per block. It will also do this for kernels that cannot be compiled because they use too much shared memory. And likewise for kernels that use too many registers to be launched at runtime. If you'd like to know about which configurations were skipped automatically you can pass the optional parameter `verbose=True` to `tune_kernel`.However, knowing the best performing combination of tunable parameters becomes even more important when we start to further optimize our CUDA kernel. In the next section, we'll add a simple code optimization and show how this affects performance. Using Shared MemoryShared memory, is a special type of the memory available in CUDA. Shared memory can be used by threads within the same thread block to exchange and share values. It is in fact, one of the very few ways for threads to communicate on the GPU.The idea is that we'll try improve the performance of our kernel by using shared memory as a software controlled cache. There are already caches on the GPU, but most GPUs only cache accesses to global memory in L2. Shared memory is closer to the multiprocessors where the thread blocks are executed, comparable to an L1 cache.However, because there are also hardware caches, the performance improvement from this step is expected to not be that great. The more fine-grained control that we get by using a software managed cache, rather than a hardware implemented cache, comes at the cost of some instruction overhead. In fact, performance is quite likely to degrade a little. However, this intermediate step is necessary for the next optimization step we have in mind.kernel_string = """ #define nx %d #define ny %d #define dt 0.225f __global__ void diffuse_kernel(float *u_new, float *u) { int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x * block_size_x; int by = blockIdx.y * block_size_y; __shared__ float sh_u[block_size_y+2][block_size_x+2]; #pragma unroll for (int i = ty; i=0 && x=0 && y0 && x0 && yUsing: GeForce GTX TITAN X diffuse_kernel block_size_x=16, block_size_y=2, time=1.75041918755, block_size_x=16, block_size_y=4, time=1.18713598251, block_size_x=16, block_size_y=8, time=1.09015038013, block_size_x=16, block_size_y=16, time=1.06844799519, block_size_x=16, block_size_y=32, time=1.09730558395, block_size_x=32, block_size_y=2, time=1.14420480728, block_size_x=32, block_size_y=4, time=1.05957758427, block_size_x=32, block_size_y=8, time=1.07508480549, block_size_x=32, block_size_y=16, time=1.0731967926, block_size_x=32, block_size_y=32, time=1.14729599953, block_size_x=48, block_size_y=2, time=1.08389122486, block_size_x=48, block_size_y=4, time=1.10700161457, block_size_x=48, block_size_y=8, time=1.10125439167, block_size_x=48, block_size_y=16, time=1.31661438942, block_size_x=64, block_size_y=2, time=1.0629119873, block_size_x=64, block_size_y=4, time=1.04807043076, block_size_x=64, block_size_y=8, time=1.054880023, block_size_x=64, block_size_y=16, time=[...]Tiling GPU CodeOne very useful code optimization is called tiling, sometimes also called thread-block-merge. You can look at it in this way, currently we have many thread blocks that together work on the entire domain. If we were to use only half of the number of thread blocks, every thread block would need to double the amount of work it performs to cover the entire domain. However, the threads may be able to reuse part of the data and computation that is required to process a single output element for every element beyond the first.This is a code optimization because effectively we are reducing the total number of instructions executed by all threads in all thread blocks. So in a way, were are condensing the total instruction stream while keeping the all the really necessary compute instructions. More importantly, we are increasing data reuse, where previously these values would have been reused from the cache or in the worst-case from GPU memory.We can apply tiling in both the x and y-dimensions. This also introduces two new tunable parameters, namely the tiling factor in x and y, which we will call `tile_size_x` and `tile_size_y`. This is what the new kernel looks like:kernel_string = """ #define nx %d #define ny %d #define dt 0.225f __global__ void diffuse_kernel(float *u_new, float *u) { int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x * block_size_x * tile_size_x; int by = blockIdx.y * block_size_y * tile_size_y; __shared__ float sh_u[block_size_y*tile_size_y+2][block_size_x*tile_size_x+2]; #pragma unroll for (int i = ty; i=0 && x=0 && y0 && x0 && yWe can tune our tiled kernel by adding the two new tunable parameters to our dictionary `tune_params`.We also need to somehow tell the Kernel Tuner to use fewer thread blocks to launch kernels with `tile_size_x` or `tile_size_y` larger than one. For this purpose the Kernel Tuner's `tune_kernel` function supports two optional arguments, called grid_div_x and grid_div_y. These are the grid divisor lists, which are lists of strings containing all the tunable parameters that divide a certain grid dimension. So far, we have been using the default settings for these, in which case the Kernel Tuner only uses the block_size_x and block_size_y tunable parameters to divide the problem_size.Note that the Kernel Tuner will replace the values of the tunable parameters inside the strings and use the product of the parameters in the grid divisor list to compute the grid dimension rounded up. You can even use arithmetic operations, inside these strings as they will be evaluated. As such, we could have used ``["block_size_x*tile_size_x"]`` to get the same result.We are now ready to call the Kernel Tuner again and tune our tiled kernel. Let's execute the following code block, note that it may take a while as the number of kernel configurations that the Kernel Tuner will try has just been increased with a factor of 9!tune_params["tile_size_x"] = [1,2,4] #add tile_size_x to the tune_params tune_params["tile_size_y"] = [1,2,4] #add tile_size_y to the tune_params grid_div_x = ["block_size_x", "tile_size_x"] #tile_size_x impacts grid dimensions grid_div_y = ["block_size_y", "tile_size_y"] #tile_size_y impacts grid dimensions result = tune_kernel("diffuse_kernel", kernel_string, problem_size, args, tune_params, grid_div_x=grid_div_x, grid_div_y=grid_div_y)Using: GeForce GTX TITAN X diffuse_kernel block_size_x=16, block_size_y=2, tile_size_x=1, tile_size_y=1, time=1.759308815, block_size_x=16, block_size_y=2, tile_size_x=1, tile_size_y=2, time=1.29789438248, block_size_x=16, block_size_y=2, tile_size_x=1, tile_size_y=4, time=1.06983039379, block_size_x=16, block_size_y=2, tile_size_x=2, tile_size_y=1, time=1.2634239912, block_size_x=16, block_size_y=2, tile_size_x=2, tile_size_y=2, time=0.997139203548, block_size_x=16, block_size_y=2, tile_size_x=2, tile_size_y=4, time=0.843692803383, block_size_x=16, block_size_y=2, tile_size_x=4, tile_size_y=1, time=1.05549435616, block_size_x=16, block_size_y=2, tile_size_x=4, tile_size_y=2, time=0.862348806858, block_size_x=16, block_size_y=2, tile_size_x=4, tile_size_y=4, time=0.750636804104, block_size_x=16, block_size_y=4, tile_size_x=1, tile_size_y=1, time=1.19084160328, block_size_x=16, block_size_y=4, tile_size_x=1, tile_size_y=2, time=0.876377594471, block_size_x=16, block_size_y=4,[...]We can see that the number of kernel configurations tried by the Kernel Tuner is growing rather quickly. Also, the best performing configuration quite a bit faster than the best kernel before we started optimizing. On our GTX Titan X, the execution time went from 0.72 ms to 0.53 ms, a performance improvement of 26%!Note that the thread block dimensions for this kernel configuration are also different. Without optimizations the best performing kernel used a thread block of 32x2, after we've added tiling the best performing kernel uses thread blocks of size 64x4, which is four times as many threads! Also the amount of work increased with tiling factors 2 in the x-direction and 4 in the y-direction, increasing the amount of work per thread block by a factor of 8. The difference in the area processed per thread block between the naive and the tiled kernel is a factor 32.However, there are actually several kernel configurations that come close. The following Python code prints all instances with an execution time within 5% of the best performing configuration.best_time = min(result[0], key=lambda x:x['time'])['time'] for i in result[0]: if i["time"] < best_time*1.05: print("".join([k + "=" + str(v) + ", " for k,v in i.items()]))block_size_x=16, block_size_y=8, tile_size_x=2, tile_size_y=4, time=0.554745602608, block_size_x=32, block_size_y=2, tile_size_x=2, tile_size_y=4, time=0.563417613506, block_size_x=32, block_size_y=4, tile_size_x=2, tile_size_y=4, time=0.542387211323, block_size_x=32, block_size_y=8, tile_size_x=2, tile_size_y=4, time=0.539891195297, block_size_x=32, block_size_y=16, tile_size_x=2, tile_size_y=4, time=0.550105595589, block_size_x=48, block_size_y=2, tile_size_x=2, tile_size_y=4, time=0.560505592823, block_size_x=48, block_size_y=4, tile_size_x=2, tile_size_y=4, time=0.562521612644, block_size_x=64, block_size_y=2, tile_size_x=2, tile_size_y=4, time=0.544691193104, block_size_x=64, block_size_y=4, tile_size_x=2, tile_size_y=4, time=0.540352010727, block_size_x=64, block_size_y=8, tile_size_x=2, tile_size_y=4, time=0.538227200508, block_size_x=128, block_size_y=2, tile_size_x=2, tile_size_y=4, time=0.542937588692, block_size_x=128, block_size_y=4, tile_size_x=2, tile_size_y=4,[...]Storing the resultsWhile it's nice that the Kernel Tuner prints the tuning results to stdout, it's not that great if we'd have to parse what is printed to get the results. That is why the `tune_kernel()` returns a data structure that holds all the results. We've actually already used this data in the above bit of Python code.`tune_kernel` returns a list of dictionaries, where each benchmarked kernel is represented by a dictionary containing the tunable parameters for that particular kernel configuration and one more entry called 'time'. The list of dictionaries format is very flexible and can easily be converted other formats that are easy to parse formats, like json or csv, for further analysis.You can execute the following code block to store the tuning results to both a json and a csv file (if you have Pandas installed).#store output as json import json with open("tutorial.json", 'w') as fp: json.dump(result[0], fp) #store output as csv from pandas import DataFrame df = DataFrame(result[0]) df.to_csv("tutorial.csv")Converting Arepo snapshots to be usable by TARDIS What is [Arepo](https://arepo-code.org/)?> *Arepo is a massively parallel gravity and magnetohydrodynamics code for astrophysics, designed for problems of large dynamic range. It employs a finite-volume approach to discretize the equations of hydrodynamics on a moving Voronoi mesh, and a tree-particle-mesh method for gravitational interactions. Arepo is originally optimized for cosmological simulations of structure formation, but has also been used in many other applications in astrophysics.*(Weinberger, 2020) This parser is intended for loading Arepo output files ('snapshots'), extracting the relevant (line-of-sight dependent) data and exporting it to `csvy` files, which can in turn be used in TARDIS models ([see CSVY model](../index.rstcsvy-model)). NoteThis parser has been developed for the (not publically available) development version of Arepo, not the public version. Althought it should also work with snapshots from the public version, this has not been tested. If you run into trouble loading the snapshot using the built-in functions, try providing the data manually.import numpy as np import matplotlib.pyplot as plt from tardis.io.parsers import arepo import json %matplotlib inlineLoading the simulation data As a first step, the relevant data has to be loaded from an Arepo snapshot, i.e. an output file of Arepo. In case you have the arepo-snap-util package installed, you can use the built in wrapper (as described below) to load the relevant data. In case you do not have this package installed or want to load the snapshot in a different way, you can manually provide the relevant data and continue with the next step.If you're using the built-in tool, you will need to provide the path to the snapshot file, a list with the elements you want to include in your TARDIS model, as well as the species file with which the Arepo simulation was run. *(The species file should contain one header line, followed by two colums, where the first contains the names of all the species and is used to find the indices of the individual species within the snapshot. The second column is not used by the loader.)* In case you have the arepo-snap-util package installed, you can load the data directly from a snapshot:```pythonsnapshot = arepo.ArepoSnapshot( "arepo_snapshot.hdf5", ["ni56", "si28"], "species55.txt", resolution=32)pos, vel, rho, xnuc, time = snapshot.get_grids()```This will load the necessary from the snapshot. See the [API](../../../../../api/tardis.io.parsers.arepo.rst) documentation for more options on how to load snapshots. This will fail with an error if you do not have the arepo-snap-util package installed. Since this is not a default dependency of TARDIS, lets manually load the data. *(This manual load can effectively be used to load all kinds of models unrelated to Arepo, as long as the data comes in the correct format.)*In this case the data is loaded from a `json` file. This file has been created manually by dumping the data which would have been loaded from the snapshot to a `json` file. *(The `json` file is only an example and more for illustrative purposes. As long as the data is provided in the correct format (see below) it is up to the user how it is saved/loaded.)* Code: (click to expand) ```pythondata = { "pos" : pos.tolist(), "vel" : vel.tolist(), "rho" : rho.tolist(), "xnuc": [xnuc[x].tolist() for x in list(xnuc.keys())], "time": time,}json_string = json.dumps(data)with open('arepo_snapshot.json', 'w') as outfile: json.dump(json_string, outfile)```with open('arepo_snapshot.json') as json_file: data = json.loads(json.load(json_file)) # The following lines only parse the .json file. You might not need this depending on how you saved # the snapshot data pos, vel, rho, nucs, time = data["pos"], data["vel"], data["rho"], data["xnuc"], data["time"] pos = np.array(pos) vel = np.array(vel) rho = np.array(rho) # The nuclear data should be in a dict where each element has its own entry (with the key being the element name) xnuc = { "ni56" : np.array(nucs[0]), "si28" : np.array(nucs[1]), } print("Position data shape: ", pos.shape) print("Velocity data shape: ", vel.shape) print("Density data shape: ", rho.shape) print("Nuclear data shape (per element): ", xnuc["ni56"].shape)In case you want to load the snapshot data itself with your own tools, you will need to provide the following data:- Position in the center of mass frame ("pos") -> `np.array`- Velocity ("vel") -> `np.array`- Density ("rho") -> `np.array`- Nuclear fraction ("xnuc") of each element you want to include -> `dict` containing `np.array` - Time of the snapshot ("time") -> `float`The data is expected to be mapped to a Carthesian grid and should have the same shape as the one provided by the built-in tool. Extracting a profile and converting it to a csvy fileNow You can create a TARDIS model. There are three possibilities on how to extract the profiles from the snapshot: - **Line profile**: This extracts the data along a straight line (the x-axis) - **Cone profile**: This extracts the data within a specified cone - **Full profile**: This averages over the whole simulationprofile = arepo.ConeProfile(pos, vel, rho, xnuc, time)This loads the data (in this example for a cone profile), which can then be cut to the ranges which you want to include in your TARDIS model. The syntax for the other profiles is similar: - `arepo.LineProfile()` - `arepo.FullProfile()` Next you can create the profiles acccording to the model option you selected. A diagnostic plot will be shown per default, but this behaviour can be turned off with the option `show_plot=False`. The plot will always show both the positve and negative axis. NoteThe keyword ``opening_angle=40`` is only needed for the cone profile. The other modes do not accept this keyword! The angle itself is the opening angle of the full cone and NOT the angle between the central x-axis and the cone!profile.create_profile(opening_angle=40)In many cases you only want a very specific region from the snapshot, e.g. cutting out the dense, optically thick regions. This can be acchieved using the keywords `inner_radius` and `outer_radius`.profile.create_profile(opening_angle=40, inner_radius=1e11, outer_radius=2e11)Once you have created a profile of the desired region, you can export the profile to a `csvy` using the commented-out code below, which in turn can be used in a TARDIS model. Here you have to specify how many shells you want to export. The profiles are rebinned using [Scipys binned_statistic function](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.binned_statistic.html), using the mean value of the data in each bin.profile.export(20, "snapshot_converted_to_tardis.csvy", overwrite=True)NoteBy default, the `export` method will not overwrite existing files with the same file name. Setting `overwrite=True` allows overwriting -- for example, if `overwrite=True`, if you make changes and rerun the export your exported file will be updated without creaing an additional file. During the export, the `yaml` header is automatically written and includes the time of the screenshot as the time for both the nuclear data as well as the density profile. Per default, the positive axis will be exported. the negative axis can be exported with `direction="neg"`.All abundences will normalised such that roughly sum to 1, but slight deviations are expected to occur. Manually rebinning the dataUsing `profile.rebin(, statistic=)`, you can manually rebin the data and use all `` keywords accepted by the `scipy.stats.binned_statistic` function. In this case you should pass the `statistic=None` keyword to the `export` function, so the data does not get rebinned twice.profile.create_profile(opening_angle=40, inner_radius=1e11, outer_radius=2e11, show_plot=False) profile.rebin(20) profile.plot_profile() plt.show() profile.export(20, "rebinned_snapshot_converted_to_tardis.csvy", overwrite=True)Distances API Example Run this example in [Colab](https://colab.research.google.com/github/SignalOceanSdk/SignalSDK/blob/master/docs/examples/jupyter/DistancesAPI/QuickStartDistancesAPI.ipynb) SetupInstall the Signal Ocean SDK:```pip install signal-ocean```Set your subscription key acquired here: https://apis.signalocean.com/profilepip install signal-ocean signal_ocean_api_key = '' #replace with your subscription keyCalculate a distance over 2 portsCreate a connections towards distances API.Then call the distance API to get a distance between Fuijairah and Singapore for a VLCC vessel (corresponding restrictions applied)from signal_ocean import Connection from signal_ocean.distances import DistancesAPI, VesselClassFilter, PortFilter, LoadingCondition connection = Connection(api_key=signal_ocean_api_key) distances_api = DistancesAPI(connection) load_port = distances_api.get_ports(PortFilter(name_like='Fujairah'))[0] discharge_port = distances_api.get_ports(PortFilter(name_like='Singapore'))[0] vessel_class = distances_api.get_vessel_classes(VesselClassFilter(name_like='vlcc'))[0] distanceInNM = distances_api.get_port_to_port_distance(vessel_class, LoadingCondition.BALLAST, load_port, discharge_port) print(distanceInNM)3302.86Create a distance matrixFind distance in NM for a Aframax vessel from Trieste to every available port. Persist the output in an excel file.distancesFromTriesteDictionary = {} port_from = distances_api.get_ports(PortFilter(name_like='Trieste'))[0] ports_to_get_distance = ['Rotterdam','Singapore','Lome','Long Beach','Galveston','Pazflor','Bonny','Ras Tanura','Houston','Escravos'] for port_to_filter in ports_to_get_distance: port_to = distances_api.get_ports(PortFilter(name_like=port_to_filter))[0] distanceInNM = distances_api.get_port_to_port_distance(vessel_class, LoadingCondition.BALLAST, port_from, port_to) distancesFromTriesteDictionary[port_to.name] = float(distanceInNM) import pandas as pd import openpyxl df = pd.DataFrame(data=distancesFromTriesteDictionary, index=[0]) pd.options.display.float_format = '{:,.2f}'.format df = (df.T) df.to_excel('simpleDistanceMatrix_Trieste_Aframax_Ballast.xlsx') df**Why Normative Modeling?** **Context** - Currently, there are no biological tests to diagnose psychiatric disorders. They are diagnosed using clinical symptoms, which leads heterogeneous groups on the biological level.- The overwhelming majority of analysis in psychiatry & neuroimaging is focussed on group averages, which ignore individual differences.- This leads to losing information about individual subjects and missing patterns that aren't consistent across the entire group.From Marquand et al. 2016:![image1](image1.jpg)>The classical case-control approach assumes that cases and controls each form a well-defined group. This may often be a reasonable assumption, but in practice many other scenarios are possible. >- A Cases and controls each form a well-defined group>- B The clinical population may be composed of multiple groups, each having distinct pathology >- C Disease-related variation may be nested within healthy variation>- D The clinical group may be diffuse and heterogeneous as a result of misdiagnosis, comorbidities, or an aggregation of different pathologies. **Example where case-control fails**We'll construct an example in which:- We know there is a difference between probands and controls (as we've created it)- Case-control analysis misses it Generate DataFirst we generate the data:- We model score = 2*log(age) + gaussian noise- Probands have a random offset to their score- Probands are not distributed evenly throughout the dataset# Define Functions def sample_x(low=1,high=100,n_subs=1000,sampling='full'): if sampling =='full': x = np.random.uniform(low=low,high=high,size=n_subs) else: x = np.concatenate([np.random.normal(20,10,size=int(n_subs/2)),np.random.normal(80,10,size=int(n_subs/2))]) x = x[(x low)] return x def f(x): return 2*np.log(x) + np.random.randn() def dataset(seed=10,sampling='full'): # Set seed np.random.seed(seed) # Age param x = np.sort(sample_x(sampling=sampling)) # PROB = 1/CTR = 0 status1 = np.random.binomial(1,0.2,size=(int(0.75*x.shape[0]))) status2 = np.random.binomial(1,0.07,size=(x.shape[0] - status1.shape[0])) status = np.concatenate([status1, status2]) # Score param score = np.zeros(x.shape[0]) for i in range(x.shape[0]): s = f(x[i]) # Random offset for probands s = s + status[i]*np.random.normal(0.7,1) score[i] = s return pd.DataFrame([x,status,score],index=['age','status','score']).transpose() # Create a dataset df = dataset() df.head()Visualize data- CTR = 0- PROB = 1We can see below that extreme scores for a given age are more frequently probands.plt.figure(figsize=(15,7)) sns.scatterplot(x='age',y='score',hue='status',data=df)Case-control- Here we perform a simple case-control analysis using the status column of the dataset (PROB/CTR).- The result is not significant (p = 0.99)def case_control(df, group='status',score='score'): dmat = pat.dmatrix('C({})'.format(group), df, return_type='dataframe',NA_action='raise') results = sm.OLS(df[score],dmat).fit() return results.pvalues[1] print('p-value =', case_control(df))p-value = 0.8760719899903657Messtechnik HS2021 - Tutorial 7 Aufgabe: Datenverarbeitung für höhere Auflösung--------------------Analysieren Sie den simulierten *free induction decay* ``FID.mat`` (das Format ``.mat`` entspricht eine MATLAB formatierte Datei), welcher mit $e^{-\sigma^2t^2/2}$ und dem Zerfallsparameter $\sigma = $ 1.6 MHz abklingt. Ein solcher Zerfall kann zum Beispiel dann beobachtet werden, wenn die Resonanzfrequenzen Gauss-verteilt sind. Das Signal enthält zusätzlich Rauschen, das mit einem Pseudozufallszahlengenerator (*pseudo-random number generator*) erzeugt wurde. -----------------__(a)__ Versuchen Sie mit Hilfe der *Self-deconvolution*, *Zero-filling* und *Apodization* die spektrale Auflösung zu verbessern und die ursprüngliche unverbreitete Linien zurückzuerhalten. Überlagern Sie den originalen FID mit ihrer Apodisationsvariante, sowohl im Zeit- wie auch im Frequenzraum. * Wie viele Linien erkennen Sie im Spektrum? * Was sind die Amplitudenverhältnisse dieser Linien? * Geben Sie für jede Window-Funktion, welche Sie zur Apodisation verwenden, den Effekt auf die spektrale Linienbreite an (Verbreiterung/Verschmälerung). __(i)__ Fourier Transform + Zero-fillingimport numpy as np import matplotlib.pyplot as plt from numpy.fft import fft,fftshift from scipy.io import loadmat # Load the MATLAB-formatted file data = loadmat('FID.mat',squeeze_me=True) t = data['t'] # microseconds fid = data['I1'] # arb. units # Construct frequency axis: for even Npts, (fs/Npts) increment in [-fs/2,fs/2] zerofilling = 3*len(fid) Nfreq = len(fid) + zerofilling # Points in frequency-domain = length of FID + zero-filling of length of FID dt = t[1] - t[0] # FID sampling steps nyq_freq = 1/(dt*2) # MHz freq = np.linspace(-nyq_freq,nyq_freq,Nfreq) # MHz # Get the spectrum, weight first point by 0.5 fidw = fid fidw[0] /= 2 # Get the spectrum spc = fftshift(fft(fidw,Nfreq)) spc /= max(spc) # normalize to maximum # Plot time-domain FID plt.figure(figsize=[9,4]) plt.subplot(1,2,1) plt.plot(t,fid) plt.ylabel('FID [a.u.]') plt.xlabel('Time [µs]') # Plot frequency-domain spectrum plt.subplot(1,2,2) plt.plot(freq,spc.real) plt.ylabel('Normalized spectrum [a.u.]') plt.xlabel('Frequency [MHz]') # Plot only a region of positive frequencies: since the FID has only a real component, # the positive and negative frequencies are indistinguishable plt.xlim([6,11]) plt.tight_layout() plt.show()__(ii)__ Self-deconvolutionWenn man die Gauss'sche Envelope des Signals kompensiert mit $e^{\sigma_\text{apo}^2t^2/2}$ bekommt man ein Spektrum mit schmaleren Linien. Das führt aber zu einer Explosion des Rauschen bei spätere Zeiten.sigma = 1.6 # MHz, the decay constant as given in the tasksheet apo_sigma = sigma*1.0 # the rise constant of the apodization window # Envelope function of the FID Gaussian decay envelope_decay = np.exp(apo_sigma**2*t**2/2) # Compensation of the FID Gaussian decay fid_comp = fid*envelope_decay # Get the spectrum spc_comp = fftshift(fft(fid_comp,Nfreq)) spc_comp /= max(spc_comp) # normalize to maximum plt.figure(figsize=[18,5]) plt.subplot(131) plt.plot(t,fid,t,fid_comp) plt.xlabel('Time [µs]') plt.ylabel('FID [a.u.]') plt.legend(['Original FID','Decay-compensated FID'],frameon=False) plt.subplot(132) plt.plot(t[fid>0],np.log(fid[fid>0]),t[fid_comp>0],np.log(fid_comp[fid_comp>0])) plt.xlabel('Time [µs]') plt.ylabel('log(FID) [a.u.]') plt.legend(['Original FID','Decay-compensated FID'],frameon=False) plt.subplot(133) plt.plot(freq,spc.real,freq,spc_comp.real) plt.xlabel('Frequency [MHz]') plt.ylabel('Normalized spectrum [a.u.]') plt.legend(['Original','Processed'],loc='upper right',frameon=False) plt.xlim([6,11]) plt.ylim([-0.25,1]) plt.tight_layout() plt.show()__(iii)__ Truncation Bei der Korrektur mit $e^{\sigma_\text{apo}^2t^2/2}$ explodiert gegen Ende des Zeitintervalls das Rauschen. Um das S/N im Frequenzbereich zu minimieren, muss man das Signal im Zeitbereich ab einem bestimmten Zeitpunkt abschneiden.# Signal truncation cutoff = 3 # We choose 3 us as the time where the signal has decayed fid_cut = fid_comp[t<=cutoff] # Cut the FID vector at t_cut t_cut = t[t<=cutoff] # Get the spectrum spc_cut = fftshift(fft(fid_cut,Nfreq)) spc_cut /= max(spc_cut) # normalize to maximum plt.figure(figsize=[18,5]) plt.subplot(131) plt.plot(t,fid,t_cut,fid_cut) plt.xlabel('Time [µs]') plt.ylabel('FID [a.u.]') plt.legend(['Original FID','Decay-compensated & Truncated FID'],frameon=False) plt.xlim([0,3]) plt.subplot(132) plt.plot(t[fid>0],np.log(fid[fid>0]),t_cut[fid_cut>0],np.log(fid_cut[fid_cut>0])) plt.xlabel('Time [µs]') plt.ylabel('log(FID) [a.u.]') plt.legend(['Original FID','Decay-compensated & Truncated FID'],frameon=False) plt.subplot(133) plt.plot(freq,spc.real,freq,spc_cut.real) plt.xlabel('Frequency [MHz]') plt.ylabel('Normalized spectrum [a.u.]') plt.legend(['Original','Processed'],loc='upper right',frameon=False) plt.xlim([6,11]) plt.ylim([-0.25,1]) plt.tight_layout() plt.show()__(iv)__ Apodisierung Um Abschneide-Effekte zu verhindern, wurde ausserdem ein Hamming-Window verwendet (Gl. (8.29) im Skript).# Signal apodization n = np.arange(len(t_cut)) hamming_win = 0.54 + 0.46*np.cos(np.pi*n/max(n)) fid_apo = fid_cut*hamming_win # Get the spectrum spc_apo = fftshift(fft(fid_apo,Nfreq)) spc_apo /= max(spc_apo) # normalize to maximum plt.figure(figsize=[18,5]) plt.subplot(131) plt.plot(t,fid,t_cut,fid_apo,t_cut,hamming_win*max(fid_apo)) plt.xlabel('Time [µs]') plt.ylabel('FID [a.u.]') plt.legend(['Original FID','Processed FID','Scaled hamming window'],frameon=False) plt.xlim([0,3]) plt.subplot(132) plt.plot(t[fid>0],np.log(fid[fid>0]),t_cut[fid_apo>0],np.log(fid_apo[fid_apo>0])) plt.xlabel('Time [µs]') plt.ylabel('log(FID) [a.u.]') plt.legend(['Original FID','Processed FID'],frameon=False) plt.subplot(133) plt.plot(freq,spc.real,freq,spc_apo.real) plt.xlabel('Frequency [MHz]') plt.ylabel('Normalized spectrum [a.u.]') plt.legend(['Original','Processed'],loc='upper right',frameon=False) plt.xlim([6,11]) plt.tight_layout() plt.show()Beobachtungen: * Es sind 4 Spektrallinien zu erkennen. * Die Amplitudenverhältnisse kann man aus der Abbildung direkt ablesen: approx. 0.35/0.6/1.0/0.8* Da die Signalenvelope mit $e^{-\sigma^2t^2/2}$ exakt kompensiert wird, haben die Linien im apodisierten Spektrum \emph{keine} Gauss'sche Verbreiterung mehr. Da der FID jedoch mit einem Hamming-Window überlagert ist, werden die Linien nun wiederum verbreitert. -----------------__(b)__ Normalerweise ist der Zerfallsparameter $\sigma$ nicht genau bekannt. Wie verändert sich das Spektrum, wenn Sie anstelle von $\sigma = $ 1.6 MHz von einem grösseren oder einem kleineren $\sigma$ ausgehen?#---------------------------------------------------------------------------------------- def process_decay_compensation(sigma, cutoff=3): """ This function performs the same analysis as in in the previous section with a given decay parameter sigma """ # Load the MATLAB-formatted file data = loadmat('FID.mat',squeeze_me=True) t = data['t'] # microseconds fid = data['I1'] # arb. units # Construct frequency axis: for even Npts, (fs/Npts) increment in [-fs/2,fs/2] zerofilling = 3*len(fid) Nfreq = len(fid) + zerofilling # Points in frequency-domain = length of FID + zero-filling of length of FID dt = t[1] - t[0] # FID sampling steps nyq_freq = 1/(dt*2) # MHz freq = np.linspace(-nyq_freq,nyq_freq,Nfreq) # MHz # Get the spectrum, weight first point by 0.5 fidw = fid fidw[0] /= 2 # Envelope function of the FID Gaussian decay apo_sigma = sigma*1.0 # the rise constant of the apodization window envelope_decay = np.exp(apo_sigma**2*t**2/2) # Compensation of the FID Gaussian decay fid_comp = fid*envelope_decay # Signal truncation fid_cut = fid_comp[t<=cutoff] # Cut the FID vector at t_cut t_cut = t[t<=cutoff] # Signal apodization n = np.arange(len(t_cut)) hamming_win = 0.54 + 0.46*np.cos(np.pi*n/max(n)) fid_apo = fid_cut*hamming_win # Get the spectrum spc_apo = fftshift(fft(fid_apo,Nfreq)) spc_apo /= max(spc_apo) # normalize to maximum return freq,spc_apo #---------------------------------------------------------------------------------------- # List of sigma values to evaluate sigmas = [1.2, 1.4, 1.6, 1.8, 2] # MHz plt.figure(figsize=[5,8]) for n,sigma in enumerate(sigmas): # Process the FID data freq,spec = process_decay_compensation(sigma) # Plot the processed spectrum plt.plot(freq,2.2*n + spec.real,color='k', linewidth=1) # Add text next to spectrum plt.annotate(f'$\sigma$ = {sigma} MHz', xy=(6,0.5+2.2*n), xytext=(1.02*6, 0.3+2.2*n), color='k') plt.yticks(ticks=1.1*np.arange(2*len(sigmas)),labels=[0,1,0,1,0,1,0,1,0,1]) plt.xlabel('Frequency [MHz]') plt.ylabel('Spectra [a.u.]') plt.xlim([6,11]) plt.tight_layout() plt.show()There are 4149 elements, and PE has a significant amount of missing valueswell_PE_Miss = train.loc[train["PE"].isnull(),"Well Name"].unique() well_PE_Miss train.loc[train["Well Name"] == well_PE_Miss[0]].count() train.loc[train["Well Name"] == well_PE_Miss[1]].count()The two wells have all PE missed(train.groupby("Well Name"))["PE"].mean() (train.groupby("Well Name"))["PE"].median() train["PE"] = train["PE"].fillna(train["PE"].median()) print(train.loc[train["Well Name"] == "CHURCHMAN BIBLE","PE"].mean()) print(train.loc[train["Well Name"] == "CHURCHMAN BIBLE","PE"].median()) print((train.groupby("Well Name"))["PE"].median()) ## QC for the fill in print(train.loc[train["Well Name"] == "CHURCHMAN BIBLE","PE"].mean()) print(train.loc[train["Well Name"] == "CHURCHMAN BIBLE","PE"].median()) plt.show()3.789925742574257 3.74 Well Name ALEXANDER D 3.5515 CHURCHMAN BIBLE 3.7400 CROSS H CATTLE 3.2010 KIMZEY A 3.5515 LUKE G U 3.6000 NEWBY 3.7000 NOLAN 3.5980 Recruit F9 5.3000 SHANKLE 3.1000 SHRIMPLIN 4.0000 Name: PE, dtype: float64 3.789925742574257 3.74The PE of all wells have no strong variance; For now, fillin the Missing value of median Fancy visualization from forumfeatures = ['GR', 'ILD_log10', 'DeltaPHI', 'PHIND','PE','NM_M', 'RELPOS'] feature_vectors = train[features] facies_labels = train['Facies'] ## 1=sandstone 2=c_siltstone 3=f_siltstone ## 4=marine_silt_shale 5=mudstone 6=wackestone 7=dolomite ## 8=packstone 9=bafflestone facies_colors = ['#F4D03F', '#F5B041','#DC7633','#6E2C00', '#1B4F72','#2E86C1', '#AED6F1', '#A569BD', '#196F3D'] facies_labels = ['SS', 'CSiS', 'FSiS', 'SiSh', 'MS', 'WS', 'D','PS', 'BS'] #facies_color_map is a dictionary that maps facies labels #to their respective colors facies_color_map = {} for ind, label in enumerate(facies_labels): facies_color_map[label] = facies_colors[ind] def label_facies(row, labels): return labels[ row['Facies'] -1] train.loc[:,'FaciesLabels'] = train.apply(lambda row: label_facies(row, facies_labels), axis=1) # def make_facies_log_plot(logs, facies_colors): #make sure logs are sorted by depth logs = logs.sort_values(by='Depth') cmap_facies = colors.ListedColormap( facies_colors[0:len(facies_colors)], 'indexed') ztop=logs.Depth.min(); zbot=logs.Depth.max() cluster=np.repeat(np.expand_dims(logs['Facies'].values,1), 100, 1) f, ax = plt.subplots(nrows=1, ncols=6, figsize=(8, 12)) ax[0].plot(logs.GR, logs.Depth, '-g') ax[1].plot(logs.ILD_log10, logs.Depth, '-') ax[2].plot(logs.DeltaPHI, logs.Depth, '-', color='0.5') ax[3].plot(logs.PHIND, logs.Depth, '-', color='r') ax[4].plot(logs.PE, logs.Depth, '-', color='black') im=ax[5].imshow(cluster, interpolation='none', aspect='auto', cmap=cmap_facies,vmin=1,vmax=9) divider = make_axes_locatable(ax[5]) cax = divider.append_axes("right", size="20%", pad=0.05) cbar=plt.colorbar(im, cax=cax) cbar.set_label((17*' ').join([' SS ', 'CSiS', 'FSiS', 'SiSh', ' MS ', ' WS ', ' D ', ' PS ', ' BS '])) cbar.set_ticks(range(0,1)); cbar.set_ticklabels('') for i in range(len(ax)-1): ax[i].set_ylim(ztop,zbot) ax[i].invert_yaxis() ax[i].grid() ax[i].locator_params(axis='x', nbins=3) ax[0].set_xlabel("GR") ax[0].set_xlim(logs.GR.min(),logs.GR.max()) ax[1].set_xlabel("ILD_log10") ax[1].set_xlim(logs.ILD_log10.min(),logs.ILD_log10.max()) ax[2].set_xlabel("DeltaPHI") ax[2].set_xlim(logs.DeltaPHI.min(),logs.DeltaPHI.max()) ax[3].set_xlabel("PHIND") ax[3].set_xlim(logs.PHIND.min(),logs.PHIND.max()) ax[4].set_xlabel("PE") ax[4].set_xlim(logs.PE.min(),logs.PE.max()) ax[5].set_xlabel('Facies') ax[1].set_yticklabels([]); ax[2].set_yticklabels([]); ax[3].set_yticklabels([]) ax[4].set_yticklabels([]); ax[5].set_yticklabels([]) ax[5].set_xticklabels([]) f.suptitle('Well: %s'%logs.iloc[0]['Well Name'], fontsize=14,y=0.94) make_facies_log_plot( train[train['Well Name'] == 'SHRIMPLIN'], facies_colors) plt.show() ## Investigate the dependencies of the depth feature and Facies wells = train["Well Name"].unique() #train.plot(x = "Depth", y = "Facies") #plt.show() pi = 0 for well in wells: pi = pi + 1 # Plot index ax = plt.subplot(3, 4, pi) depthi = train.loc[train["Well Name"] == well, "Depth"].values faci = train.loc[train["Well Name"] == well, "Facies"].values plt.plot(faci,depthi) ax.set_title(well) ## Create dummy variables for Well Name, Formation, which may have geologic or geospatial information train_dummy = pd.get_dummies(train[["Formation"]]) train_dummy.describe() cols_dummy = train_dummy.columns.values train[cols_dummy] = train_dummy[cols_dummy] print(len(cols_dummy)) ## For trainning drop Formation, FaciesLabels Leave Well Name for Later group splitting wellgroups = train["Well Name"].values train_inp = train.drop(["Formation","Well Name",'FaciesLabels'],axis =1) train_inp.info() RangeIndex: 4149 entries, 0 to 4148 Data columns (total 23 columns): Facies 4149 non-null int64 Depth 4149 non-null float64 GR 4149 non-null float64 ILD_log10 4149 non-null float64 DeltaPHI 4149 non-null float64 PHIND 4149 non-null float64 PE 4149 non-null float64 NM_M 4149 non-null int64 RELPOS 4149 non-null float64 Formation_A1 LM 4149 non-null float64 Formation_A1 SH 4149 non-null float64 Formation_B1 LM 4149 non-null float64 Formation_B1 SH 4149 non-null float64 Formation_B2 LM 4149 non-null float64 Formation_B2 SH 4149 non-null float64 Formation_B3 LM 4149 non-null float64 Formation_B3 SH 4149 non-null float64 Formation_B4 LM 4149 non-null float64 Formation_B4 SH 4149 non-null float64 Formation_B5 LM 4149 non-null float64 Formation_B5 SH 4149 non-null float64 Formation_C LM 4149 non-null flo[...]Build up Initial Test Loop for model and feature engineering : Test 1 SVCfrom sklearn.model_selection import LeavePGroupsOut X = train_inp.drop(["Facies","Depth"],axis = 1).values y = train_inp["Facies"].values lpgo = LeavePGroupsOut(n_groups=2) split_no = lpgo.get_n_splits(X,y,wellgroups)Bad indicator of model performance. It means no accurate prediction was found in one class /home/computer/anaconda3/lib/python3.5/site-packages/sklearn/metrics/classification.py:1113: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.'precision', 'predicted', average, warn_for)svc_b1 = SVC(C =1, gamma = 0.001, kernel = 'rbf') svc_b1.fit(X,y) test = pd.read_csv('01_raw_data/validation_data_nofacies.csv') test.count() test["Formation"].unique() test_dummy = pd.get_dummies(test[["Formation"]]) test_cols_dummy = test_dummy.columns.values test[test_cols_dummy] = test_dummy[cols_dummy] test_inp = test.drop(["Formation","Well Name"],axis =1) X_test = test_inp.drop(["Depth"],axis = 1).values svc_b1.predict(X_test) test = test.drop(test_cols_dummy,axis = 1) test["Facies"] = svc_b1.predict(X_test) test.to_csv("Houston_J_sub_1.csv")Introduction to Python and Natural Language Technologies__Lecture 9, Transformers, BERT____April 13, 2021______import gc from IPython.display import Image import numpy as np import seaborn as sns import torch import torch.nn as nn from transformers import pipeline from transformers import AutoTokenizer, AutoModelAttention mechanismAttention:- emphasizes the important part of the input- and de-emphasizes the rest.- Mimics cognitive attention.Method:- It does this by assigning weights to the elements of the input sequence.- The weights depend on the current context in the decoder: - the current decoder hidden state, - the previous output.- The source vectors are multiplied by the weights and then summed -> **context vector**- The context vector is used for predicting the next output symbol.Image("img/dl/attention_mechanism.jpg")ProblemsRecall that we used recurrent neural cells, specifically LSTMs to encode and decode sequences.__Problem 1. No parallelism__LSTMs are recurrent, they rely on their left and right history (horizontal arrows), so the symbols need to be processed in order -> no parallelism.__Problem 2. Long-range dependencies__Long-range dependencies are not infrequent in NLP."The **people/person** who called and wanted to rent your house when you go away next year **are/is** from California" -- Miller & Chomsky 1963LSTMs have a problem capturing these because there are too many backpropagation steps between the symbols. TransformersIntroduced in [Attention Is All You Need](https://papers.nips.cc/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf) by Vaswani et al., 2017Transformers solve Problem 1 by relying purely on attention instead of recurrence.Not having recurrent connections means that sequence position no longer matters.Recurrence is replaced by **self attention**.Each symbol is encoded the following way:__Step 1__: the encoder 'looks' at the other symbols in the input sequence - In the example above: the representation of **are/is** depends on **people/person** more than any other word in the sentence, it should receive the highest attention weight.Image("http://jalammar.github.io/images/t/transformer_self-attention_visualization.png", embed=True) # from Illustrated Transformers__Step 2__: the context vector is passed through a feed-forward network which is shared across all symbols.Image("http://jalammar.github.io/images/t/encoder_with_tensors.png", embed=True) # from Illustrated TransformersThis visualization is available in the [Tensor2tensor notebook in Google Colab](https://colab.research.google.com/github/tensorflow/tensor2tensor/blob/master/tensor2tensor/notebooks/hello_t2t.ipynb) Other components__Residual connections__- Also called __skip connections__- The output of a module is added to the input$$\text{output} = \text{layer}(\text{input}) + \text{input}$$__Softmax__- Only used in the decoder- Maps the output vector to a probability distribution - In other words it tells us how likely each symbol is. Multiple heads and layersTransformers have a number of additional components summarized in this figure:Image("img/dl/transformer.png") # from Vaswani et al. 2018PyTorch supportPyTorch has a `nn.Transformer` class and its encoder and decoder versions.from torch.nn import TransformerEncoder, TransformerEncoderLayer embedding_dim = 12 # num_heads = 5 # embedding_dim must be divisible by the number of heads num_heads = 2 hidden_size = 7 dropout = 0.2 TransformerEncoderLayer(embedding_dim, num_heads, hidden_size, dropout) layer = TransformerEncoderLayer(embedding_dim, num_heads, hidden_size, dropout) TransformerEncoder(layer, 2) encoder = TransformerEncoder(layer, 2) sequence_len = 9 batch_size = 3 X = torch.rand((sequence_len, batch_size, embedding_dim)) y = encoder(X) y.size()Positional encodingWithout recurrence word order information is lost.Positional information is important: John loves Mary. Mary loves John.Transformers apply positional encoding:$$\text{PE}_{\text{pos},2i} = \sin(\frac{\text{pos}}{10000^{2i/d_{\text{model}}}}), \\\text{PE}_{\text{pos},2i+1} = \cos(\frac{\text{pos}}{10000^{2i/d_{\text{model}}}}),$$where:- $d_{\text{model}}$ is the input dimension to the Transformer, usually the embedding size- $\text{pos}$ is the position of the symbol in the input sequence i.e. first word, second word etc.- $i$ is the coordinate index in the input vector. Let's create a position encoder in PyTorch.For $\text{pos}=0$, the sine values are 0, and the cosine values are 1:t = torch.FloatTensor([0.]) torch.cos(t), torch.sin(t)For large $i$ values, the denominator is close to 10000, so it's again close to 0.# Pick a few random values for pos pos = torch.randint(512, size=(10, )) print(pos) # Divide by 10000^2*i/d_model. Make 2*i/d_model close to one (high 2*i values) t = pos / (10000 ** 0.95) print(t) torch.cos(t), torch.sin(t)tensor([445, 109, 251, 47, 17, 42, 412, 231, 155, 230]) tensor([0.0705, 0.0173, 0.0398, 0.0074, 0.0027, 0.0067, 0.0653, 0.0366, 0.0246, 0.0365])Let's generate the full grid. There are $\text{maxlen} \times d_\text{model}$ values.__maxlen__ is the maximum position we allow. This has to be predefined.__d_model__ is the size of the input, which is embedding_dim in most cases.maxlen = 20 d_model = 12 pe = torch.zeros((maxlen, d_model)) pe.dtype, pe.size()__pos__ are the indices of the sequence from 0 to $\text{maxlen}-1$:pos = torch.arange(maxlen, dtype=torch.float) posReminder:$$\text{PE}_{\text{pos},2i} = \sin(\frac{\text{pos}}{10000^{2i/d_{\text{model}}}}), \\\text{PE}_{\text{pos},2i+1} = \cos(\frac{\text{pos}}{10000^{2i/d_{\text{model}}}}),$$Let's define the denominator:divterm = 10000 ** (torch.arange(0, d_model, step=2) / float(d_model)) divterm pos.size(), divterm.size() print((pos[:, None] / divterm).size()) pos[:, None] / divterm pe[:, ::2] = torch.sin(pos[:, None] / divterm) sns.heatmap(pe, cmap='RdBu', center=0) pe[:, 10:] pe[:, 1::2] = torch.cos(pos[:, None] / divterm) sns.heatmap(pe, cmap='RdBu', center=0)Combining it in a `nn.Module`:# took inspiration from here: https://pytorch.org/tutorials/beginner/transformer_tutorial.html class PositionalEncoding(nn.Module): def __init__(self, d_model, dropout=0.1, maxlen=50): super(PositionalEncoding, self).__init__() self.dropout = nn.Dropout(p=dropout) pe = torch.zeros(maxlen, d_model) pos = torch.arange(maxlen, dtype=torch.float) divterm = 10000 ** (torch.arange(0, d_model, step=2) / float(d_model)) pe[:, ::2] = torch.sin(pos[:, None] / divterm) pe[:, 1::2] = torch.cos(pos[:, None] / divterm) # Since pe is a constant value not a parameter of the module, we register it as a buffer. # Buffers are part of the state dictionary of the module along with parameters. # Docs: https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.register_buffer self.register_buffer('pe', pe) def forward(self, x): # The input sequence may be shorter than maxlen seqlen = x.size(0) # The middle dimension is the batch size. # We add it as a dummy dimension. x = x + self.pe[:seqlen, None, :] return self.dropout(x) d_model = 12 maxlen = 20 batch_size = 7 seqlen = 10 pos_enc = PositionalEncoding(d_model=d_model, dropout=0., maxlen=maxlen) x = torch.rand(size=(seqlen, batch_size, d_model)) x_pe = pos_enc(x) x_pe.size()Contextual embeddingsIn GloVe and Word2vec representations, words have static representations, in other words, the same vector is assigned for every occurrence of the word.But words can have different meaning in different contexts, e.g. the word 'stick':1. Find some dry sticks and we'll make a campfire.2. Let's stick with glove embeddings.![elmo](http://jalammar.github.io/images/elmo-embedding-robin-williams.png)_(Peters et. al., 2018 in the ELMo paper)_ ELMo**E**mbeddings from **L**anguage **Mo**delsWord representations are functions of the full sentences instead of the word alone.Two bidirectional LSTM layers are linearly combined.[Deep contextualized word representations](https://arxiv.org/abs/1802.05365) by Peters et al., 2018, 6300 citations BERT[BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://www.aclweb.org/anthology/N19-1423/)by Devlin et al. 2018, 17500 citations[BERTology](https://huggingface.co/transformers/bertology.html) is the nickname for the growing amount of BERT-related research.Trained on two tasks:1. Masked language model: 1. 15% of the tokenswordpieces are selected at the beginning. 2. 80% of those are replaced with `[MASK]`, 3. 10% are replaced with a random token, 4. 10% are kept intact. 2. Next sentence prediction: - Are sentences A and B consecutive sentences? - Generate 50-50%. - Binary classification task. Embedding layerImage("img/dl/bert_embedding.png")Transformer layers Finetuning1. Take a trained BERT model.2. Add a small classification layer on top (typically a 2-layer MLP).3. Train BERT along with the classification layer on an annotated dataset. - Much smaller than the data BERT was trained onAnother option: freeze BERT and train the classification layer only.- Easier training regime.- Smaller memory footprint.- Worse performance.Image("img/dl/bert_encoding_finetuning.png")BERT pretrained checkpoints BERT-Base- 12 layers- 12 attention heads per layer- 768 hidden size- 110M parameters BERT-Large- 24 layers- 16 attention heads per layer- 1024 hidden size- 340M parameters Cased and uncasedUncased: everything is lowercased. Diacritics are removed. Multilingual BERT - mBERT104 language version trained on the 100 largest Wikipedia. BERT implementations[Original Tensorflow implementation](https://github.com/google-research/bert)[Huggingface Transformers](https://huggingface.co/transformers/)- PyTorch implementation originally for BERT-only- Now it supports dozens of other models- Hundreds of other model checkpoints from the community BERT tokenization WordPiece tokenizerBERT's input **must** be tokenized with BERT's own tokenizer.A middle ground between word and character tokenization.Static vocabulary:- Byte-pair encoding: simple frequency-based tokenization method- Continuation symbols (\\symbol)- Special tokens: `[CLS]`, `[SEP]`, `[MASK]`, `[UNK]`- It tokenizes everything, falling back to characters and `[UNK]` if necessary`AutoTokenizer` is a factory class for pretrained tokenizers. ng id. `from_pretrained` instantiates the corresponding class and loads the weights:t = AutoTokenizer.from_pretrained('bert-base-uncased') print(type(t)) print(len(t.get_vocab())) t.tokenize("My beagle's name is Tündérke.") t.tokenize("Русский")**Cased** models keep diacritics:t = AutoTokenizer.from_pretrained('bert-base-cased') print(len(t.get_vocab())) t.tokenize("My beagle's name is Tündérke.")28996It character tokenizes Chinese and Japanese but doesn't know all the characters:t.tokenize("日本語")Korean is missing from this version:t.tokenize("한 한국어")mBERT tokenization104 languages, 1 vocabularyt = AutoTokenizer.from_pretrained('bert-base-multilingual-cased') len(t.get_vocab()) t.tokenize("My puppy's name is Tündérke.") t.tokenize("한 한국어") t.tokenize("日本語")Using BERT Using `BertModel` directly`AutoModel`- each pretrained checkpoint has a string id. `from_pretrained` instantiates the corresponding class and loads the weights:tokenizer = AutoTokenizer.from_pretrained('bert-base-cased') model = AutoModel.from_pretrained('bert-base-cased') type(model), type(tokenizer) tokenizer.tokenize("There are black cats and black dogs.")`__call__` return a dictionary of BERT's encoding:tokenizer("There are black cats and black dogs.")It can be used for pairs of sentences. Note the values of `token_type_ids`:tokenizer("There are black cats and black dogs.", "Another sentence.")It can be used for multiple sentences:tokenizer(["There are black cats and black dogs.", "There are two white cats."]) encoded = tokenizer(["There are black cats and black dogs.", "There are two white cats."], return_tensors='pt', padding=True) encoded['attention_mask']We need tensors as inputs for BERT:encoded = tokenizer("There are black cats and black dogs.", return_tensors='pt') encoded['input_ids'].size() encoded output = model(**encoded, return_dict=True) output.keys() output['last_hidden_state'].size(), output['pooler_output'].size()Getting all layers:output = model(**encoded, output_hidden_states=True, return_dict=True) output.keys() len(output['hidden_states']), output['hidden_states'][0].size()Remove variable from the global namespace, run the garbage collector:del model gc.collect()BERT applications Sequence classificationPretrained model for sentiment analysis.Base model: `distilbert-base-uncased`Finetuned on the [Stanford Sentiment Treebank](https://nlp.stanford.edu/sentiment/index.html) or SST-2, a popular sentiment analysis dataset.Model id: `distilbert-base-uncased-finetuned-sst-2-english`nlp = pipeline("sentiment-analysis") nlp("This is an amazing class.") nlp("This is not a good class but it's not too bad either.") nlp("This is not a class.") del nlp gc.collect()Sequence tagging/labeling: Named entity recognitionBase model: `bert-large-cased`Finetuned on [CoNLL-2003 NER](https://www.clips.uantwerpen.be/conll2003/ner/).nlp = pipeline("ner") result = nlp("Jupiter is a planet that orbits around James the center of the Universe") result result = nlp(" has a pet pig named Estella.") result del nlp gc.collect()Machine translationnlp = pipeline("translation_en_to_fr") print(nlp("Hugging Face is a technology company based in New York and Paris", max_length=40))Some weights of T5Model were not initialized from the model checkpoint at t5-base and are newly initialized: ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight'] You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.Even the [blessé - blessed false cognate](https://frenchtogether.com/french-english-false-friends/) is handled correctly:nlp("I was blessed by God after I injured my head.", max_length=40) gc.collect() del nlp gc.collect()Masked language modelingUses `distilroberta-base`nlp = pipeline("fill-mask") prompt = "Twitter is a bad idea /s> [MASK]" for n in range(10): result = nlp(f"{prompt} {nlp.tokenizer.mask_token}") token = result[0]['token_str'][1:] prompt += " " + token prompt from pprint import pprint pred = nlp(f"HuggingFace is creating a {nlp.tokenizer.mask_token} that the community uses to solve NLP tasks.") pprint(pred) pred = nlp(f"{nlp.tokenizer.mask_token} is a very good idea.") pprint(pred) pred = nlp(f"{nlp.tokenizer.mask_token} is a bad idea.") pprint(pred) del nlp gc.collect()Other models Pretrained modelsRoBERTa: identical model, larger training data, different training objectiveDistilBERT: smaller version of BERT. It was _distilled_ or compressed from BERT with a student-teacher setup.ALBERT: smaller BERTXLM-RoBERTa: multilingual version of RoBERTaDistil-mBERT: distilled multilingual BERT Community models[Over 1000 community contributions](https://huggingface.co/models) huBERTThe first Hungarian-only model and the only one registered on Huggingface.Other models are available at https://hilanco.github.io/.BERT base, trained on Webcorpus 2.0, a version of CommonCrawl.Its tokenizer works much better for Hungarian than mBERT's:hubert_tokenizer = AutoTokenizer.from_pretrained('SZTAKI-HLT/hubert-base-cc') # hubert = AutoModel.from_pretrained('SZTAKI-HLT/hubert-base-cc') sent = ("yarországról szóló, az Orbán-kormányt kritizáló levelére miniszteri és " "államtitkári szinten is reagált a magyar kormány.") hubert_tokenizer.tokenize(sent) bert_tokenizer = AutoTokenizer.from_pretrained('bert-base-multilingual-cased') bert_tokenizer.tokenize(sent)GPT-2 text generationCausal language modeliing is when the $i^{th}$ token is modeled based on all the previous tokens as opposed to masked language modeling where both left and right context are used.text_generator = pipeline("text-generation") print(text_generator("This is a serious issue we should address", max_length=50, do_sample=False)[0]['generated_text']) print(text_generator("Twitter is a bad idea, had a bad day when he came up with it", max_length=100, do_sample=False)[0]['generated_text']) del text_generator gc.collect()First we load and preprocess the datafilename = Path("../data/gdsc2_public.csv") data = load_table(filename) total_times = compute_work_item_times(data) times = time_for_phase(data, end_date="2018-03-31", process=False) times.dropna(inplace=True) open_wis = total_times[pd.isnull(total_times["duration_in_days"])]["work_item"].values times_open = times[times.work_item.isin(open_wis)] times_closed = times[~times.work_item.isin(open_wis)] times_closed.dropna(inplace=True) times_closed.loc[:, "receive_date"] = times_closed["from_timestamp"].apply(lambda x: x.date()) times_closed.loc[:, "drop_date"] = times_closed["to_timestamp"].apply(lambda x: x.date()) times.loc[:, "receive_date"] = times["from_timestamp"].apply(lambda x: x.date()) times.loc[:, "drop_date"] = times["to_timestamp"].apply(lambda x: x.date()) daterange = pd.date_range(start=str(times["receive_date"].min()), end=str(times["drop_date"].max()), freq='D')For easier handling we assume every working step to be closed until the last day in the dataset.We now want to compute a measure for the experience of an employee. We define it as follow:\begin{equation}x^{er}_{exp}(t) = \frac{t_{spent}\sum{w^{er}_{closed}(t)}}{\sum{w_{closed}(t)}}\end{equation}We also want to define a measure for the workload of an employee:\begin{equation}x^{er}_{load}(t) = \frac{\sum{w^{er}_{open}(t)}}{\sum{w_{open}(t)}}\end{equation}with $t$ as time, $t_{spent}$ as time already spend in the company, $w$ being a working step and $er$ being a resource.def x_experience(times, resource, t, col="current_resource"): assert col in times.columns, col + " not in columns of dataframe" closed_tickets = times[times["receive_date"] < t] er = closed_tickets[closed_tickets[col] == resource] try: date_diff = (t - er["receive_date"].min()) working_time = round(date_diff.total_seconds() / (24*3600), 2) except TypeError: working_time = 1 if pd.isna(working_time): working_time = 1 try: x_exp = (working_time*len(er))/len(closed_tickets) except ZeroDivisionError: x_exp = 0 return x_exp def x_workload(times, resource, t, col="current_resource"): assert col in times.columns, col + " not in columns of dataframe" open_tickets = times[(times["receive_date"] <= t) & (times["drop_date"] >= t)] er = open_tickets[open_tickets[col] == resource] try: x_load = (len(er))/len(open_tickets) except ZeroDivisionError: x_load = 0 return x_loadWe are going to test the functions with the most frequent resource.resource = "ER_00061" plot_df = pd.DataFrame(index=daterange, columns=["x_exp", "x_load"]) for date in daterange: plot_df.loc[date, "x_exp"] = x_experience(times, resource, date.date()) plot_df.loc[date, "x_load"] = x_workload(times, resource, date.date())The function runs without errors. We want to plot the results, the graph should show an increasing trend for the experience and some kind of fluktuation for the workload.fig, ax = plt.subplots(1, 2, figsize=(15,5), sharex=True, constrained_layout=True) plot_df["x_exp"].plot(color='g', ax=ax[0], label="x_exp") plot_df["x_load"].plot(color='b', ax=ax[1], label="x_load") ax[0].set_title("Experience of most frequent resource") ax[1].set_title("Workload of most frequent resource") ax[0].legend() ax[1].legend() plt.show()Now we are able to compute the two measures for every resource for a given time. To add this as a feature to our model we also need a function to aggregate the measures regarding to the multiple resources working on one work item. We define the following as the employment rate for every work item:\begin{equation}x^{wi}_{emp} = \frac{\sum_{er}{x^{er}_{exp}(t)x^{er}_{load}(t)}}{\sum_{er}{w^{er}}}\end{equation}with $t$ as the time the employee recieves the task.def employment_rate(times, work_item, resource_col="current_resource"): assert resource_col in times.columns, resource_col + " not in columns of dateframe!" wi = times[times["work_item"] == work_item] resources = list(wi[resource_col].values) res_counter = defaultdict(int) numerator = 0 x_exp_sum = 0 x_load_sum = 0 denumerator = len(resources) for resource in resources: if resources.count(resource) > 1: res_counter[resource] += 1 t = wi[wi[resource_col] == resource]["receive_date"].iloc[res_counter[resource]-1] elif resources.count(resource) == 1: t = wi[wi[resource_col] == resource]["receive_date"].values[0] x_exp = x_experience(times, resource, t, resource_col) x_load = x_workload(times, resource, t, resource_col) x_exp_sum += x_exp x_load_sum += x_load numerator += (x_exp * x_load) x_emp = numerator/denumerator x_ex = x_exp_sum/denumerator x_l = x_load_sum/denumerator return x_emp, x_ex, x_lNow we want to compute the measures for every work item and compare themx_emp, x_exp, x_load = employment_rate(times, "WI_000001") # res_df = pd.DataFrame(index=times.work_item.unique(), columns=["x_emp", "x_exp", "x_load"]) # counter = 0 # for wi in times.work_item.unique(): # x_emp, x_exp, x_load = employment_rate(times, wi) # res_df.loc[wi, "x_emp"] = x_emp # res_df.loc[wi, "x_exp"] = x_exp # res_df.loc[wi, "x_load"] = x_load # counter += 1 # if counter == 500: # print("500 work items done!") # counter = 0Computation takes a while. We will write the data to the SQL-Server so we don't have to compute them every time.# import sqlalchemy # engine = sqlalchemy.create_engine("XXX") # con = engine.connect() # # tosql = res_df.reset_index().rename(columns={"index":"work_item"}) # # tosql.to_sql(name="resource_employment_rate", con=con) # res_df = pd.read_sql_table(table_name="resource_employment_rate", con=con) # con.close() fig, ax = plt.subplots(3,1, figsize=(12,10), sharex=True, constrained_layout=True) res_df["x_emp"].plot(ax=ax[0], label="x_emp") ax[0].set_title("Employment rate") ax[0].legend() res_df["x_exp"].plot(ax=ax[1], label="x_exp") ax[1].set_title("Experience rate") ax[1].legend() res_df["x_load"].plot(ax=ax[2], label="x_load") ax[2].set_title("Workload rate") ax[2].legend() plt.show()We want to add another feature regarding to the difficulty of a work item. Resources have days where they are closing a lot of items at once. But there are always some items that are skipping these 'closing days' so we assume that these items have a higher difficulty.We define a closing day as\begin{equation}t^{er}_{close} =\begin{cases} 1 & \frac{\sum{w^{er}_{closed}(t)}}{\sum{w^{er}_{open}(t)}} \geq 0.25 \\ 0 & else \end{cases}\end{equation}So if 30% of the open tickets the resource holds are closed it's a closing day. We define than the difficulty of a work item\begin{equation}x^{wi}_{diff} = \sum_{t}{\sum_{er}{t^{er}_{close}}}\end{equation}The difficulty is the sum of all closing days a work item skipped for every resource that worked on it.Now let's implement this.def calc_t_close(times_closed, resource): res_df = times_closed[times_closed["current_resource"] == resource] # We calculate the number of items dropped at a drop date t_close = pd.DataFrame(res_df["drop_date"].value_counts()).reset_index().rename(columns={"index":"drop_date", "drop_date":"w_closed"}) # We calculate the work items that are open over a drop date res_df["w_open"] = res_df["drop_date"].apply(lambda t: res_df[(res_df["drop_date"].apply(lambda x: x>=t)) & (res_df["receive_date"].apply(lambda x: x<=t))].shape[0]) # Now we merge them together and calculate the percentage t_close = pd.merge(t_close, res_df[["drop_date", "w_open"]].drop_duplicates()) t_close.loc[:, "percentage_closed"] = t_close["w_closed"] / t_close["w_open"].apply(lambda x: 1 if x==0 else x) t_close.loc[:, "t_close"] = t_close["percentage_closed"].apply(lambda x: 1 if x >= 0.25 else 0) close_days = t_close[t_close["t_close"] == 1]["drop_date"].values return close_days times.loc[:, "x_diff"] = 0 for resource in times["current_resource"].unique(): close_days = calc_t_close(times_closed, resource) res_df = times[times["current_resource"] == resource] for x, y in res_df.iterrows(): difficulty = len([e for e in close_days if ((y["receive_date"]Lasso-HighlightThe lasso-highlight delineates a substructure of a molecule. Installation in CondaIf not already installed, install **pip** and **git**: ```conda install gitconda install pip```Then install via pip:```pip install git+git://github.com/c-feldmann/lassohighlight``` Code Examples Loading Packages# Default packages from rdkit import Chem from rdkit.Chem import rdDepictor from rdkit.Chem import Draw from rdkit.Chem.Draw import rdMolDraw2D from PIL import Image import io # Functions of this package from lassohighlight.lassohighlight import draw_substructurematch from lassohighlight.lassohighlight import draw_multi_matchesSetting up an ExampleDefining colors as RGBAcolor_dict = {"gray": (0.5, 0.5, 0.5, 1), "pink": (1, 0, 0.5, 1), "blue": (0, 0.5, 1, 1), "orange": (1, 0.5, 0, 1), "salmon": (1, 0.75, 0.75, 1)} # Exemplary molecule smi = 'CO[C@@H](O)C1=C(O[C@H](F)Cl)C(C#N)=C1ONNC[NH3+]' # List of exemplpary substructures smarts_list = ['CONN', 'N#CC~CO', 'C=CON', 'CONNCN']Creating and preparing the molecule:mol = Chem.MolFromSmiles(smi) mol = Draw.PrepareMolForDrawing(mol)Defining highlighted atoms:atom_idx_list = [] for smart_str in smarts_list: smart_obj = Chem.MolFromSmarts(smart_str) matched_atoms = set.union(*[set(x) for x in mol.GetSubstructMatches(smart_obj)]) atom_idx_list.append(matched_atoms)Highlighting a single substructure# Defining the canvas d = rdMolDraw2D.MolDraw2DCairo(500, 500) # Setting all atom colors to black. Only personal preference. d.drawOptions().updateAtomPalette({i: (0, 0, 0, 1) for i in range(100)}) # Setting atom color to black # Setting up the coordinate system by drawing and erasing molecule d.DrawMolecule(mol) d.ClearDrawing() # adding the lasso highlighting draw_substructurematch(d, mol, atom_idx_list[0], rel_radius=0.4, rel_width=0.5, color=color_dict["pink"]) # adding the molecule d.DrawMolecule(mol) d.FinishDrawing() # displaying the canvas Image.open(io.BytesIO(d.GetDrawingText()))Relative bond widthThe parameter `relative_bond_width` sets the width for the highlighting around the bonds. When set to 0.5 (pink) the width corresponds to half of the circles around the atoms. When set to 1.0 (blue) the width is equal to the radius of the circles.# Defining the canvas d = rdMolDraw2D.MolDraw2DCairo(500, 500) # Setting all atom colors to black. Only personal preference. d.drawOptions().updateAtomPalette({i: (0, 0, 0, 1) for i in range(100)}) # Setting atom color to black # Setting up the coordinate system by drawing and erasing molecule d.DrawMolecule(mol) d.ClearDrawing() # adding the lasso highlighting with a relative_bond_width of 0.5 draw_substructurematch(d, mol, atom_idx_list[0], rel_radius=0.4, rel_width=0.5, color=color_dict["pink"]) # adding the lasso highlighting with a relative_bond_width of 1.0 draw_substructurematch(d, mol, atom_idx_list[1], rel_radius=0.4, rel_width=1, color=color_dict["blue"]) # adding the molecule d.DrawMolecule(mol) d.FinishDrawing() # displaying the canvas Image.open(io.BytesIO(d.GetDrawingText()))Highlighting a multiple substructuresIt is also possible to highlight multiple substructures at once.# Defining the canvas settings d = rdMolDraw2D.MolDraw2DCairo(500, 500) d.drawOptions().updateAtomPalette({i: (0, 0, 0, 1) for i in range(100)}) # Setting atom color to black # Setting up the coordinate system by drawing and erasing molecule d.DrawMolecule(mol) d.ClearDrawing() # adding the lasso highlighting for multiple matches draw_multi_matches(d, mol, atom_idx_list, r_min=0.3, r_dist=0.12, relative_bond_width=0.5, color_list=color_dict.values(), line_width=2) # adding the molecule d.DrawMolecule(mol) d.FinishDrawing() # displaying the canvas Image.open(io.BytesIO(d.GetDrawingText()))RNNs: How To Implement A Basic RNN Read In, Clean, And Split The Data# Read in data and split into training and test set # NOTE: we are NOT cleaning the data import numpy as np import pandas as pd from sklearn.model_selection import train_test_split pd.set_option('display.max_colwidth', 1000) messages = pd.read_csv('../../../data/spam.csv', encoding='latin-1') messages = messages.drop(labels = ["Unnamed: 2", "Unnamed: 3", "Unnamed: 4"], axis = 1) messages.columns = ["label", "text"] labels = np.where(messages['label']=='spam', 1, 0) X_train, X_test, y_train, y_test = train_test_split(messages['text'], labels, test_size=0.2)Prep Data For Modeling# Install keras !pip install -U keras # Import the tools we will need from keras from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences # Initialize and fit the tokenizer tokenizer = Tokenizer() tokenizer.fit_on_texts(X_train) # Use that tokenizer to transform the text messages in the training and test sets X_train_seq = tokenizer.texts_to_sequences(X_train) X_test_seq = tokenizer.texts_to_sequences(X_test) # What do these sequences look like? X_train_seq[0] # Pad the sequences so each sequence is the same length X_train_seq_padded = pad_sequences(X_train_seq, 50) X_test_seq_padded = pad_sequences(X_test_seq, 50) # What do these padded sequences look like? X_train_seq_padded[0]Build Model# Import the tools needed from keras and define functions to calculate recall and precision import keras.backend as K from keras.layers import Dense, Embedding, LSTM from keras.models import Sequential def recall_m(y_true, y_pred): true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) recall = true_positives / (possible_positives + K.epsilon()) return recall def precision_m(y_true, y_pred): true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) return precision # Construct a simple RNN model model = Sequential() model.add(Embedding(len(tokenizer.index_word)+1, 32)) model.add(LSTM(32, dropout=0, recurrent_dropout=0)) model.add(Dense(32, activation='relu')) model.add(Dense(1, activation='sigmoid')) model.summary() # Compile the model model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy', precision_m, recall_m]) # Fit the RNN model history = model.fit(X_train_seq_padded, y_train, batch_size=32, epochs=10, validation_data=(X_test_seq_padded, y_test)) # Plot the evaluation metrics by each epoch for the model to see if we are over or underfitting import matplotlib.pyplot as plt for i in ['accuracy', 'precision_m', 'recall_m']: acc = history.history[i] val_acc = history.history['val_{}'.format(i)] epochs = range(1, len(acc) + 1) plt.figure() plt.plot(epochs, acc, label='Training Accuracy') plt.plot(epochs, val_acc, label='Validation Accuracy') plt.title('Results for {}'.format(i)) plt.legend() plt.show()Linear regression In this section, we illustrate how to perform linear regression using scikit-learn. Install necessary librariesSee [this notebook](https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/pyprobml_setup.ipynb) for detailed setup instructions.# Standard Python libraries from __future__ import absolute_import, division, print_function, unicode_literals import os import time import numpy as np import glob import matplotlib.pyplot as plt import PIL import imageio from IPython import display import sklearn import seaborn as sns; sns.set(style="ticks", color_codes=True) import pandas as pd pd.set_option('precision', 2) # 2 decimal places pd.set_option('display.max_rows', 20) pd.set_option('display.max_columns', 30) pd.set_option('display.width', 100) # wide windows # Check we can plot stuff plt.figure() plt.plot(range(10))Linear regression in 1dfrom sklearn.preprocessing import PolynomialFeatures from sklearn.linear_model import LinearRegression from sklearn.preprocessing import MinMaxScaler import sklearn.metrics from sklearn.metrics import mean_squared_error as mse def make_1dregression_data(n=21): np.random.seed(0) xtrain = np.linspace(0.0, 20, n) xtest = np.arange(0.0, 20, 0.1) sigma2 = 4 w = np.array([-1.5, 1/9.]) fun = lambda x: w[0]*x + w[1]*np.square(x) ytrain = fun(xtrain) + np.random.normal(0, 1, xtrain.shape) * \ np.sqrt(sigma2) ytest= fun(xtest) + np.random.normal(0, 1, xtest.shape) * \ np.sqrt(sigma2) return xtrain, ytrain, xtest, ytest xtrain, ytrain, xtest, ytest = make_1dregression_data(n=21) #Rescaling data scaler = MinMaxScaler(feature_range=(-1, 1)) Xtrain = scaler.fit_transform(xtrain.reshape(-1, 1)) Xtest = scaler.transform(xtest.reshape(-1, 1)) degs = np.arange(1, 21, 1) ndegs = np.max(degs) mse_train = np.empty(ndegs) mse_test = np.empty(ndegs) ytest_pred_stored = np.empty(ndegs, dtype=np.ndarray) ytrain_pred_stored = np.empty(ndegs, dtype=np.ndarray) for deg in degs: model = LinearRegression() poly_features = PolynomialFeatures(degree=deg, include_bias=False) Xtrain_poly = poly_features.fit_transform(Xtrain) model.fit(Xtrain_poly, ytrain) ytrain_pred = model.predict(Xtrain_poly) ytrain_pred_stored[deg-1] = ytrain_pred Xtest_poly = poly_features.transform(Xtest) ytest_pred = model.predict(Xtest_poly) mse_train[deg-1] = mse(ytrain_pred, ytrain) mse_test[deg-1] = mse(ytest_pred, ytest) ytest_pred_stored[deg-1] = ytest_pred # Plot MSE vs degree fig, ax = plt.subplots() mask = degs <= 15 ax.plot(degs[mask], mse_test[mask], color = 'r', marker = 'x',label='test') ax.plot(degs[mask], mse_train[mask], color='b', marker = 's', label='train') ax.legend(loc='upper right', shadow=True) plt.xlabel('degree') plt.ylabel('mse') #save_fig('polyfitVsDegree.pdf') plt.show() # Plot fitted functions chosen_degs = [1, 2, 14, 20] fig, axes = plt.subplots(2,2, figsize=(10,7)) axes = axes.reshape(-1) for i, deg in enumerate(chosen_degs): #fig, ax = plt.subplots() ax = axes[i] ax.scatter(xtrain, ytrain) ax.plot(xtest, ytest_pred_stored[deg-1]) ax.set_ylim((-10, 15)) ax.set_title('degree {}'.format(deg)) #save_fig('polyfitDegree{}.pdf'.format(deg)) plt.show() # Plot residuals chosen_degs = [1, 2, 14, 20] fig, axes = plt.subplots(2, 2, figsize=(10,7)) axes = axes.reshape(-1) for i, deg in enumerate(chosen_degs): #fig, ax = plt.subplots(figsize=(3,2)) ax = axes[i] ypred = ytrain_pred_stored[deg-1] residuals = ytrain - ypred ax.plot(ypred, residuals, 'o') ax.set_title('degree {}'.format(deg)) #save_fig('polyfitDegree{}Residuals.pdf'.format(deg)) plt.show() # Plot fit vs actual chosen_degs = [1, 2, 14, 20] fig, axes = plt.subplots(2,2, figsize=(10,7)) axes = axes.reshape(-1) for i, deg in enumerate(chosen_degs): for train in [True, False]: if train: ytrue = ytrain ypred = ytrain_pred_stored[deg-1] dataset = 'Train' else: ytrue = ytest ypred = ytest_pred_stored[deg-1] dataset = 'Test' #fig, ax = plt.subplots() ax = axes[i] ax.scatter(ytrue, ypred) ax.plot(ax.get_xlim(), ax.get_ylim(), ls="--", c=".3") ax.set_xlabel('true y') ax.set_ylabel('predicted y') r2 = sklearn.metrics.r2_score(ytrue, ypred) ax.set_title('degree {}. R2 on {} = {:0.3f}'.format(deg, dataset, r2)) #save_fig('polyfitDegree{}FitVsActual{}.pdf'.format(deg, dataset)) plt.show()Linear regression for boston housingimport sklearn.datasets import sklearn.linear_model as lm from sklearn.model_selection import train_test_split boston = sklearn.datasets.load_boston() X = boston.data y = boston.target X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=42) scaler = sklearn.preprocessing.StandardScaler() scaler = scaler.fit(X_train) Xscaled = scaler.transform(X_train) # equivalent to Xscaled = scaler.fit_transform(X_train) # Fit model linreg = lm.LinearRegression() linreg.fit(Xscaled, y_train) # Extract parameters coef = np.append(linreg.coef_, linreg.intercept_) names = np.append(boston.feature_names, 'intercept') print(names) print(coef) # Assess fit on test set Xtest_scaled = scaler.transform(X_test) ypred = linreg.predict(Xtest_scaled) plt.figure() plt.scatter(y_test, ypred) plt.xlabel("true price") plt.ylabel("predicted price") mse = sklearn.metrics.mean_squared_error(y_test, ypred) plt.title("Boston housing, rmse {:.2f}".format(np.sqrt(mse))) xs = np.linspace(min(y), max(y), 100) plt.plot(xs, xs, '-') #save_fig("boston-housing-predict.pdf") plt.show()Ridge regression In this section, we illustrate how to perform ridge regression using scikit-learn.from sklearn.preprocessing import PolynomialFeatures from sklearn.linear_model import Ridge from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import mean_squared_error as mse xtrain, ytrain, xtest, ytest = make_1dregression_data(n=21) #Rescaling data scaler = MinMaxScaler(feature_range=(-1, 1)) Xtrain = scaler.fit_transform(xtrain.reshape(-1, 1)) Xtest = scaler.transform(xtest.reshape(-1, 1)) deg = 14 alphas = np.logspace(-10, 1.3, 10) nalphas = len(alphas) mse_train = np.empty(nalphas) mse_test = np.empty(nalphas) ytest_pred_stored = dict() for i, alpha in enumerate(alphas): model = Ridge(alpha=alpha, fit_intercept=False) poly_features = PolynomialFeatures(degree=deg, include_bias=False) Xtrain_poly = poly_features.fit_transform(Xtrain) model.fit(Xtrain_poly, ytrain) ytrain_pred = model.predict(Xtrain_poly) Xtest_poly = poly_features.transform(Xtest) ytest_pred = model.predict(Xtest_poly) mse_train[i] = mse(ytrain_pred, ytrain) mse_test[i] = mse(ytest_pred, ytest) ytest_pred_stored[alpha] = ytest_pred # Plot MSE vs degree fig, ax = plt.subplots() mask = [True]*nalphas ax.plot(alphas[mask], mse_test[mask], color = 'r', marker = 'x',label='test') ax.plot(alphas[mask], mse_train[mask], color='b', marker = 's', label='train') ax.set_xscale('log') ax.legend(loc='upper right', shadow=True) plt.xlabel('L2 regularizer') plt.ylabel('mse') #save_fig('polyfitVsRidge.pdf') plt.show() # Plot fitted functions chosen_alphas = alphas[[0,5,8]] for i, alpha in enumerate(chosen_alphas): fig, ax = plt.subplots() ax.scatter(xtrain, ytrain) ax.plot(xtest, ytest_pred_stored[alpha]) plt.title('L2 regularizer {:0.5f}'.format(alpha)) #save_fig('polyfitRidge{}.pdf'.format(i)) plt.show()Dataset Info: COVID-19 RADIOGRAPHYA team of researchers from Qatar University, Doha, Qatar, and the University of Dhaka, Bangladesh along with their collaborators from Pakistan and Malaysia in collaboration with medical doctors have created a database of chest X-ray images for COVID-19 positive cases along with Normal and Viral Pneumonia images. Citation:- , , , , , , , , , , , , “Can AI help in screening Viral and COVID-19 pneumonia?” IEEE Access, Vol. 8, 2020, pp. 132665 - 132676. [Paper link](https://ieeexplore.ieee.org/document/9144185)- ., ., ., ., ., ., ., ., ., . and ., 2020. Exploring the Effect of Image Enhancement Techniques on COVID-19 Detection using Chest X-ray Images. [Paper Link](https://doi.org/10.1016/j.compbiomed.2021.104319) Acknowledgments:Thanks to the Italian Society of Medical and Interventional Radiology (SIRM) for publicly providing the COVID-19 Chest X-Ray dataset, Valencia Region Image Bank (BIMCV) padchest dataset and would like to thank for taking the initiative to gather images from articles and online resources. Finally to the Chest X-Ray Images (pneumonia) database in Kaggle and Radiological Society of North America (RSNA) Kaggle database for making a wonderful X-ray database for normal, lung opacity, viral, and bacterial pneumonia images. Also, a big thanks to our collaborators! Dataset Link:[COVID-19 Radiography Database](https://www.kaggle.com/tawsifurrahman/covid19-radiography-database) Motivation:Thousands of individuals have died as a result of the coronavirus epidemic, which has afflicted millions of people around the world. Any technical device that allows for rapid and accurate COVID-19 infection detection can be extremely beneficial to healthcare providers.X-ray imaging is an easily accessible method used to diagnose COVID-19 patients, according to Chowdhury et al. (2020). Despite the fact that regular Chest X-Ray (XCR) scans can help with early detection of suspected cases, the images of various viral pneumonia patients are identical. As a result, radiologists have a hard time distinguishing COVID-19 from other viral pneumonia patients.The goal of this database and current study is to see how useful artificial intelligence (AI) can be in detecting COVID-19 from chest X-ray pictures quickly and accurately. Contents:1. Reading the Dataset & Selecting 2 classes 2. EDA 3. Data Augmentation 4. Train-Test Split 5. Model Training 6. Fine Tuning the Model 7. Plotting Losses 1. Reading the Dataset There are a total of 21165 samples, which are classified into four categories:1. Covid-192. Lung Opacity3. Normal4. Viral PneumoniaThe photos are all in the Portable Network Graphics (PNG) file type and are 299x299 pixels in size. The database presently contains 3,616 COVID-19 positive cases, 10,192 Normal, 6,012 Lung Opacity (Non-COVID lung infection), and 1,345 Viral Pneumonia pictures, according to the most recent update.We will only train our model on two of these four classes, namely the 'Normal' and 'COVID' classes.# Unzip the Dataset from zipfile import ZipFile file_name = "/content/drive/MyDrive/Kaggle/COVID-19 Prediction/COVID-19.zip" with ZipFile(file_name, 'r') as zip: zip.extractall() print('Done') # Import the required modules import datetime import pandas as pd import numpy as np import matplotlib import matplotlib.pyplot as plt %matplotlib inline matplotlib.rcParams['font.size'] = 8 from skimage import img_as_float from skimage import exposure import plotly.graph_objects as go import os import glob import random from skimage import io # To preprocess the images from distutils.file_util import copy_file import seaborn as sns import cv2 import keras from keras.models import load_model from keras import backend as K import tensorflow as tf from skimage.transform import rescale from keras_preprocessing.image import ImageDataGenerator import warnings warnings.simplefilter('ignore') # Set dataset path DATASET_PATH = '/content/COVID-19_Radiography_Dataset' # There are two classes of images that we will deal with cls = ['COVID', 'Normal']2. Exploratory Data Analysiscovid_path = os.path.join(DATASET_PATH, cls[0], '*') covid = glob.glob(covid_path) covid = io.imread(covid[0]) normal_path = os.path.join(DATASET_PATH, cls[1], '*') normal = glob.glob(normal_path) normal = io.imread(normal[0]) f, axes = plt.subplots(1, 2, sharey=True) f.set_figwidth(10) axes[0].imshow(covid, cmap='gray') axes[1].imshow(normal, cmap='gray') print(f'Image shape for COVID dataset is: {covid.shape}') print(f'Image shape for Normal dataset is: {normal.shape}') print(f'Number of COVID Images: {len(os.listdir(covid_path[:-2]))} \ \nNumber of Non-COVID Images: {len(os.listdir(normal_path[:-2]))}')Number of COVID Images: 3616 Number of Non-COVID Images: 10192> This is an Imbalanced Dataset!! >> To deal with this issue, we have applied ***focal loss*** later in the section. Preprocessing Images# Histogram Equalization def plot_img_and_hist(image, axes, bins=256): """Plot an image along with its histogram and cumulative histogram. """ image = img_as_float(image) ax_img, ax_hist = axes ax_cdf = ax_hist.twinx() # Display image ax_img.imshow(image, cmap=plt.cm.gray) ax_img.set_axis_off() # Display histogram ax_hist.hist(image.ravel(), bins=bins, histtype='step', color='black') ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0)) ax_hist.set_xlabel('Pixel intensity') ax_hist.set_xlim(0, 1) ax_hist.set_yticks([]) # Display cumulative distribution img_cdf, bins = exposure.cumulative_distribution(image, bins) ax_cdf.plot(bins, img_cdf, 'r') ax_cdf.set_yticks([]) return ax_img, ax_hist, ax_cdf # Load a normal image img = normal # Contrast stretching p2, p98 = np.percentile(img, (2, 98)) img_rescale = exposure.rescale_intensity(img, in_range=(p2, p98)) # Equalization img_eq = exposure.equalize_hist(img) # Adaptive Equalization img_adapteq = exposure.equalize_adapthist(img, clip_limit=0.03) # Display results fig = plt.figure(figsize=(12, 8)) axes = np.zeros((2, 4), dtype=np.object) axes[0, 0] = fig.add_subplot(2, 4, 1) for i in range(1, 4): axes[0, i] = fig.add_subplot(2, 4, 1+i, sharex=axes[0,0], sharey=axes[0,0]) for i in range(0, 4): axes[1, i] = fig.add_subplot(2, 4, 5+i) ax_img, ax_hist, ax_cdf = plot_img_and_hist(img, axes[:, 0]) ax_img.set_title('Low contrast image') y_min, y_max = ax_hist.get_ylim() ax_hist.set_ylabel('Number of pixels') ax_hist.set_yticks(np.linspace(0, y_max, 5)) ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_rescale, axes[:, 1]) ax_img.set_title('Contrast stretching') ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_eq, axes[:, 2]) ax_img.set_title('Histogram equalization') ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_adapteq, axes[:, 3]) ax_img.set_title('Adaptive equalization') ax_cdf.set_ylabel('Fraction of total intensity') ax_cdf.set_yticks(np.linspace(0, 1, 5)) # prevent overlap of y-axis labels fig.tight_layout() plt.show() # Load a covid image img = covid # Contrast stretching p2, p98 = np.percentile(img, (2, 98)) img_rescale = exposure.rescale_intensity(img, in_range=(p2, p98)) # Equalization img_eq = exposure.equalize_hist(img) # Adaptive Equalization img_adapteq = exposure.equalize_adapthist(img, clip_limit=0.03) # Display results fig = plt.figure(figsize=(12, 8)) axes = np.zeros((2, 4), dtype=np.object) axes[0, 0] = fig.add_subplot(2, 4, 1) for i in range(1, 4): axes[0, i] = fig.add_subplot(2, 4, 1+i, sharex=axes[0,0], sharey=axes[0,0]) for i in range(0, 4): axes[1, i] = fig.add_subplot(2, 4, 5+i) ax_img, ax_hist, ax_cdf = plot_img_and_hist(img, axes[:, 0]) ax_img.set_title('Low contrast image') y_min, y_max = ax_hist.get_ylim() ax_hist.set_ylabel('Number of pixels') ax_hist.set_yticks(np.linspace(0, y_max, 5)) ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_rescale, axes[:, 1]) ax_img.set_title('Contrast stretching') ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_eq, axes[:, 2]) ax_img.set_title('Histogram equalization') ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_adapteq, axes[:, 3]) ax_img.set_title('Adaptive equalization') ax_cdf.set_ylabel('Fraction of total intensity') ax_cdf.set_yticks(np.linspace(0, 1, 5)) # prevent overlap of y-axis labels fig.tight_layout() plt.show()3. Data Augmentation# Create the list of paths to the images # Lists for access paths listCovidPaths = [] listNormalPaths = [] # Get covid images files paths for root, directories, files in os.walk(covid_path[:-2]): for name in files: listCovidPaths.append(os.path.join(root, name)) # Get normal images files paths for root, directories, files in os.walk(normal_path[:-2]): for name in files: listNormalPaths.append(os.path.join(root, name)) # Shuffle lists for random train / test random.shuffle(listCovidPaths) random.shuffle(listNormalPaths)4. Train-Test Split# Create new folders for image training # main folder !mkdir ./Data/ # Train data folders !mkdir ./Data/Train/ !mkdir ./Data/Train/Covid/ !mkdir ./Data/Train/Normal/ # Test data folders !mkdir ./Data/Test/ !mkdir ./Data/Test/Covid/ !mkdir ./Data/Test/Normal/ # Paths to covid images folders pathCovidTrain = './Data/Train/Covid/' pathCovidTest = './Data/Test/Covid/' # Paths to normal images folders pathNormalTrain = './Data/Train/Normal/' pathNormalTest = './Data/Test/Normal/' # Move files to new folders in the 80:20 ratio len_covid = len(os.listdir(covid_path[:-2])) len_normal = len(os.listdir(normal_path[:-2])) covid_80 = round(len(os.listdir(covid_path[:-2])) * 0.8) # 80% of the COVID data normal_80 = round(len(os.listdir(normal_path[:-2])) * 0.8) # 80% of the Normal data # Move covid images files to new folders for i in range(len_covid): if i < covid_80: copy_file(listCovidPaths[i], pathCovidTrain) else : copy_file(listCovidPaths[i], pathCovidTest) # Move normal images files to new folders for i in range(len_normal): if i < normal_80: copy_file(listNormalPaths[i], pathNormalTrain) else: copy_file(listNormalPaths[i], pathNormalTest) # datagen = ImageDataGenerator( # featurewise_center = True, # featurewise_std_normalization = True, # rotation_range = 10, # width_shift_range = 0, # height_shift_range = 0, # vertical_flip = False # ) # def preprocess_img(img, mode): # # standarising the image # img = (img - img.min()) / (img.max() - img.min()) # if mode == 'train': # img = datagen.random_transform(img) # return img # Definition of data generators # for train data trainGenerator = tf.keras.preprocessing.image.ImageDataGenerator( rescale = 1./255, rotation_range = 20, zoom_range = 0.2, shear_range = 0.2, featurewise_center = True, featurewise_std_normalization = True, width_shift_range = 0, height_shift_range = 0, vertical_flip = False, fill_mode = 'nearest' ) # for test data testGenerator = tf.keras.preprocessing.image.ImageDataGenerator( rescale = 1./255, ) # Build data generators # Build for train data pathTrainDir = './Data/Train/' trainGeneratorBuild = trainGenerator.flow_from_directory( pathTrainDir, target_size = (299, 299), class_mode = 'binary', batch_size = 16, shuffle = True ) # Build for test data pathTestDir = './Data/Test/' testGeneratorBuild = testGenerator.flow_from_directory( pathTestDir, target_size = (299, 299), class_mode = 'binary', batch_size = 16, shuffle = True )Found 11047 images belonging to 2 classes. Found 2761 images belonging to 2 classes.5. Model Traning Train Model from Scratch# COVID and Normal dataset directory BASIS_DIR = '/content/COVID-19_Radiography_Dataset' CLASSES = ["COVID", "Normal"] #Image augmentation process: train_datagen = ImageDataGenerator( rescale = 1./255, rotation_range = 20, horizontal_flip = True, vertical_flip = False, shear_range = 0.2, zoom_range = 0.2, width_shift_range = 0.2, height_shift_range = 0.2, fill_mode = 'nearest', #split dataset to training(80%) and validation(20%): validation_split = 0.2 ) # Training dataset and Validation dataset: train_data = train_datagen.flow_from_directory( directory=BASIS_DIR, target_size=(299, 299), batch_size=32, shuffle=True, class_mode='binary', subset='training', classes=CLASSES ) val_data = train_datagen.flow_from_directory( directory=BASIS_DIR, target_size=(299, 299), batch_size=32, shuffle=True, class_mode='binary', subset='validation', classes=CLASSES ) #Using sequential model: model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(299, 299, 3)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(32, (3,3), activation='relu'), tf.keras.layers.MaxPooling2D(2,2), tf.keras.layers.Conv2D(64, (3,3), activation='relu'), tf.keras.layers.MaxPooling2D(2,2), tf.keras.layers.Conv2D(128, (3,3), activation='relu'), tf.keras.layers.MaxPooling2D(2,2), tf.keras.layers.Conv2D(256, (3,3), activation='relu'), tf.keras.layers.MaxPooling2D(2,2), tf.keras.layers.Dropout(0.2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dense(1, activation='sigmoid') ]) model.summary() # Define our custom loss function def focal_loss(y_true, y_pred): gamma = 2.0 alpha = 0.25 pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred)) pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred)) return -K.sum(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1))-K.sum((1-alpha) * K.pow( pt_0, gamma) * K.log(1. - pt_0)) # Compile our model adam = tf.keras.optimizers.Adam(lr=0.0001) model.compile(loss=[focal_loss], metrics=["accuracy"], optimizer=adam) # Training process: start = datetime.datetime.now() number_epochs = 25 history = model.fit(train_data, epochs = number_epochs, validation_data = val_data, verbose = 1) end = datetime.datetime.now() print(f'Total Training Time: {end - start}') # verbose = 0 => silent, # = 1 => progress bar, # = 2 => one line per epoch model.save('my_model_1.h5') # Plot training accuracy and validation accuracy plt.plot(history.history['accuracy'], 'r', label='Accuracy Training') plt.plot(history.history['val_accuracy'], 'b', label='Accuracy Validation') plt.title('Accuracy Training and Validation') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(loc=0) plt.show() # Plotting training loss and validation loss plt.plot(history.history['loss'], 'r', label='Loss Training') plt.plot(history.history['val_loss'], 'b', label='Loss Validation') plt.title('Loss Training and Validation') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(loc=0) plt.show()Train Model using Trasfer Learning# Define the Keras model # Use InceptionResNetV2 Keras model engine = tf.keras.applications.InceptionResNetV2( # Freezing the weights of the top layer in the InceptionResNetV2 pre-traiined model include_top = False, # Use Imagenet weights weights = 'imagenet', # Define input shape to 224x224x3 input_shape = (224, 224, 3), # Set classifier activation to sigmoid classifier_activation = 'sigmoid' ) # Define the Keras model outputs x = tf.keras.layers.GlobalAveragePooling2D(name = 'avg_pool')(engine.output) out = tf.keras.layers.Dense(1, activation = 'sigmoid', name = 'dense_output')(x) # Build the Keras model model = tf.keras.models.Model(inputs = engine.input, outputs = out)(i) Loss: binary_crossentropy# Compile the model model.compile( # Set optimizer to Adam(0.001) optimizer = tf.keras.optimizers.Adam(0.001), # Set loss to binary crossentropy loss = 'binary_crossentropy', # Set metrics to accuracy metrics = ['accuracy'] ) # Fit Keras model start = datetime.datetime.now() history = model.fit_generator( # Use train generator trainGeneratorBuild, # Set epochs to 12 epochs = 12, # Set steps per epochs to 300 steps_per_epoch = 300, # Set verbose to 1 verbose = 1 ) end = datetime.datetime.now() print(f'Total Training Time: {end - start}') model.save('my_model_2.h5') # Create a graph representing the loss # Get loss data lossG = history.history['loss'] accuracyG = history.history['accuracy'] epochs = [i for i in range(len(lossG))] # Create graph fig = go.Figure() fig.add_trace( go.Scatter( x = epochs, y = lossG, name = 'Loss', marker = dict( color = 'rgba(250,50,50,1)' ) ) ) fig.add_trace( go.Scatter( x = epochs, y = accuracyG, name = 'Accuracy', marker = dict( color = 'rgba(50,250,50,1)' ) ) ) fig.update_layout( title = 'Model loss', template = 'plotly_white' ) fig.update_xaxes( title_text='Epochs' ) fig.update_yaxes( title_text='Loss / Accuracy values' ) fig.show() # Checke the accuracy of the Keras model on the test data testLoss, testAccuracy = model.evaluate( # Use test generator testGeneratorBuild, # Set verbose to 1 verbose = 1 ) # Print results print('Accuracy of model : ' + str(round(testAccuracy,4)*100) + ' %') print('Loss of model : ' + str(round(testLoss,4)))173/173 [==============================] - 35s 182ms/step - loss: 0.3006 - accuracy: 0.8725 Accuracy of model : 87.25 % Loss of model : 0.30066. Fine Tuning the Model (ii) Loss: Focal LossBecause our dataset is unbalanced, we can utilise this strategy to balance the weighting of our training instances.Instead of giving all training examples the same weight, focus loss gives the well-classified instances a lower weight. As a result, more training emphasis is placed on data that is difficult to classify!If we have a data imbalance in practise, our majority class will soon become well-classified because we have considerably more data for it. As a result, we may use the focus loss to give those minority class instances more relative weight during training, ensuring that we obtain high accuracy on our minority class as well.# Define our custom loss function def focal_loss(y_true, y_pred): gamma = 2.0 alpha = 0.25 pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred)) pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred)) return -K.sum(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1))-K.sum((1-alpha) * K.pow( pt_0, gamma) * K.log(1. - pt_0)) # Compile our model adam = tf.keras.optimizers.Adam(lr=0.0001) model.compile(loss=[focal_loss], metrics=["accuracy"], optimizer=adam) # Fit Keras model start = datetime.datetime.now() history = model.fit_generator( # Use train generator trainGeneratorBuild, # Set epochs to 20 epochs = 20, # Set steps per epochs to 300 steps_per_epoch = 300, # batch_size # Set verbose to 1 verbose = 1 ) end = datetime.datetime.now() print(f'Total Training Time: {end - start}') model.save('my_model_3.h5')7. Plot Loss# Create a graph representing the loss # Get loss data lossG = history.history['loss'] accuracyG = history.history['accuracy'] epochs = [i for i in range(len(lossG))] # Create graph fig = go.Figure() fig.add_trace( go.Scatter( x = epochs, y = lossG, name = 'Loss', marker = dict( color = 'rgba(250,50,50,1)' ) ) ) fig.add_trace( go.Scatter( x = epochs, y = accuracyG, name = 'Accuracy', marker = dict( color = 'rgba(50,250,50,1)' ) ) ) fig.update_layout( title = 'Model loss', template = 'plotly_white' ) fig.update_xaxes( title_text='Epochs' ) fig.update_yaxes( title_text='Loss / Accuracy values' ) fig.show() # Check the accuracy of the Keras model on the test data testLoss, testAccuracy = model.evaluate( # Use test generator testGeneratorBuild, # Set verbose to 1 verbose = 1 ) # Print results print('Accuracy of model : ' + str(round(testAccuracy,4)*100) + ' %') print('Loss of model : ' + str(round(testLoss,4)))Assigment week 1import pandas as pd import numpy as np %load_ext autoreload %autoreload 2 %matplotlib inline me_m = pd.read_csv(r"C:\Users\fiu126\Desktop\Coursera\Python\Introduction to Portfolio Construction and Analysis with Python\data\Portfolios_Formed_on_ME_monthly_EW.csv", header=0, index_col=0, na_values=-99.99) rets = me_m[['Lo 20', 'Hi 20']] #rets.columns = ['SmallCap', 'LargeCap'] rets = rets/100 rets.index = pd.to_datetime(rets.index, format="%Y%m").to_period('M') rets.head() #Retorno Anual n_meses=rets.shape[0] retorno_mensal= (1+rets).prod()**(1/n_meses)-1 retorno_mensal retorno_anual=(1+retorno_mensal)**12-1 retorno_anual*100 #Q1 - 15.20 #Q3 - 9.85 #Volatilidade Anual Volatilidade_anual=rets.std()*np.sqrt(12) Volatilidade_anual*100 #Q2 - 33.6 #Q4 - 19.51 # periodo entre 1999 e 2015 rets["1999":"2015"].shape rets_2=rets["1999":"2015"] #Retorno Anual n_meses=rets_2.shape[0] retorno_mensal= (1+rets_2).prod()**(1/n_meses)-1 retorno_mensal retorno_anual=(1+retorno_mensal)**12-1 retorno_anual*100 #Q5 - 11.44 #Q7 -6.29 #Volatilidade Anual Volatilidade_anual=rets_2.std()*np.sqrt(12) Volatilidade_anual*100 #Q6 - 22.89 #Q8 - 17.27 # Drawdown ### 1) Compute the wealth_index ### 2) Compute previous peakes ### 3) Comput drawdown Wealth_Index=1000*(1+rets["1999":"2015"]["Lo 20"]).cumprod() Wealth_Index.plot() Previous_Peaks=Wealth_Index.cummax() Previous_Peaks.plot() Drawdown=-(Wealth_Index-Previous_Peaks)/Previous_Peaks Drawdown.plot() var_list=[Wealth_Index,Previous_Peaks,Wealth_Index-Previous_Peaks] comparison=pd.concat(var_list,axis=1) comparison.columns=['Wealth_Index','Previous_Peaks','Wealth_Index-Previous_Peaks'] comparison.plot(title="Lo 20") Drawdown.max()*100 #Q9 - 62.48 Drawdown.idxmax() #Q10 - 2009-02 Wealth_Index=1000*(1+rets["1999":"2015"]["Hi 20"]).cumprod() rets.head() Previous_Peaks=Wealth_Index.cummax() Previous_Peaks.plot() Drawdown=-(Wealth_Index-Previous_Peaks)/Previous_Peaks Drawdown.plot() var_list=[Wealth_Index,Previous_Peaks,Wealth_Index-Previous_Peaks] comparison=pd.concat(var_list,axis=1) comparison.columns=['Wealth_Index','Previous_Peaks','Wealth_Index-Previous_Peaks'] comparison.plot(title="Hi 20") Drawdown.max()*100 #Q11 - 55.27 Drawdown.idxmax() #Q12 - 2009-02 hfi=pd.read_csv(r"C:\Users\fiu126\Desktop\Coursera\Python\Introduction to Portfolio Construction and Analysis with Python\data\edhec-hedgefundindices.csv",header=0,index_col=0,parse_dates=True) hfi = hfi/100 hfi.index =hfi.index.to_period('M') hfi=hfi["2009":"2018"] hfi.head() hfi.shape hfi[hfi<0].std(ddof=0).sort_values() #Q13 - Short Selling def skewness(r): """ Alternative to scipy.stats.skew() Computes the skewness of the supplied Series or DataFrame Returns a float or a Series """ demeaned_r = r - r.mean() # use the population standard deviation, so set dof=0 sigma_r = r.std(ddof=0) exp = (demeaned_r**3).mean() return exp/sigma_r**3 skewness(hfi).sort_values() #Q14 - Equity Market Neutral hfi=pd.read_csv(r"C:\Users\fiu126\Desktop\Coursera\Python\Introduction to Portfolio Construction and Analysis with Python\data\edhec-hedgefundindices.csv",header=0,index_col=0,parse_dates=True) hfi = hfi/100 hfi.index =hfi.index.to_period('M') hfi=hfi["2000":"2018"] hfi.head() def kurtosis(r): """ Alternative to scipy.stats.kurtosis() Computes the kurtosis of the supplied Series or DataFrame Returns a float or a Series """ demeaned_r = r - r.mean() # use the population standard deviation, so set dof=0 sigma_r = r.std(ddof=0) exp = (demeaned_r**4).mean() return exp/sigma_r**4 kurtosis(hfi).sort_values() #Q16 - Fixed Income Arbitrage![Atmospheric Toolbox](https://atmospherictoolbox.org/media/filer_public_thumbnails/filer_public/6d/35/6d35dffd-43f1-43ec-bff6-5aa066c8aabc/toolbox-header.jpg__1080x416_q85_subsampling-2.jpg) Atmospheric Toolbox - HARP comparisonThis practical will show you how to compare Sentinel-5P satellite data against ground based data by making use of the [ESA Atmospheric Toolbox](https://atmospherictoolbox.org/).In this exercise we will be focusing primarily on HARP as the toolset to do this. We will use Sentinel-5P Level2 NO2 data and compare this against both a MAXDOAS and Pandora instrument that is located in Athens, Greece.Both MAXDOAS and Pandora are DOAS instruments. MAXDOAS is a MAXDOAS type instrument and Pandora uses the DirectSun approach.You can find an explanation on the different measurement techniques on the [FRM4DOAS website](https://frm4doas.aeronomie.be/index.php/project-overview/doas)![doas-techniques](https://frm4doas.aeronomie.be/ProjectDir/doasinstruments.png)The main difference to be aware of is the altitude range for which the measurements are applicable.The MAXDOAS measurements only provide information on the troposphere, so we will use this data to compare against the tropospheric NO2 column information from S5P (`tropospheric_NO2_column_number_density` variable).The Pandora measurements, on the other hand, provide information on the total column. So we will use that data to compare against the total NO2 column from S5P (`NO2_column_number_density` variable). For this exercise we will look at data from February 2020.The Sentinel-5P data was retrieved from the [Sentinel-5P Pre-Operations Data Hub](https://s5phub.copernicus.eu/dhus//home).The MAXDOAS data was retrieved from [NDACC](http://www.ndaccdemo.org) and the Pandora data from [Pandonia](https://www.pandonia-global-network.org). Both datasets are also available through [EVDC](https://evdc.esa.int). S5P vs. MAXDOAS NO2 comparisonIn order to perform a comparison for the full month of February 2020, we would need a full month of Sentinel-5P data.Even if we already filter for only those products that have data over Athens, we would still end up with about 48 orbits (note that, because orbits slightly overlap, we have multiple overpasses within a single day for some days).Since we are only interested in the data around Athens, we ideally don't want keep the full 450MB for each L2 product, but only the satellite data around the area.A convenient first step is therefore to create so-called _overpass files_. We can do this with HARP, by providing a geographic filter on the location of the MAXDOAS instrument, which is at 38.05 latitude and 23.86 longitude.As an example we will perform such a filter on the NO2 data from the regridding exercise from yesterday (which was data from 15 September 2020).import csv import datetime import harp import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable from matplotlib.colors import Normalize import cartopy.crs as ccrs filename = "../eodata/sentinel5p/no2/2020/09/15/S5P_OFFL_L2__NO2____20200915T002200_20200915T020329_15147_01_010302_20200916T170359.nc" # since the area_covers_point filter is quite slow, we add some explicit filters # on latitude (which is fast) to already exclude a large part of the product operations = "latitude>36;latitude<40;area_covers_point(38.05, 23.86)" try: overpass = harp.import_product(filename, operations) except harp.NoDataError: print('No overpasses found')No overpasses foundWe see that this product did not contain any matches. If that happens you get an error which you can catch using this `try`/`catch` approach.If we try this filter for a product that actually does contain an overpass we get:filename = "../eodata/sentinel5p/no2/2020/09/15/S5P_OFFL_L2__NO2____20200915T103056_20200915T121226_15153_01_010302_20200917T040857.nc" operations = "latitude>36;latitude<40;area_covers_point(38.05, 23.86)" try: overpass = harp.import_product(filename, operations) except harp.NoDataError: print('No overpasses found') print(overpass)source product = 'S5P_OFFL_L2__NO2____20200915T103056_20200915T121226_15153_01_010302_20200917T040857.nc' history = "2020-11-16T12:17:13Z [harp-1.12] harp.import_product('../eodata/sentinel5p/no2/2020/09/15/S5P_OFFL_L2__NO2____20200915T103056_20200915T121226_15153_01_010302_20200917T040857.nc',operations='latitude>36;latitude<40;area_covers_point(38.05, 23.86)')" int scan_subindex {time=1} double datetime_start {time=1} [seconds since 2010-01-01] float datetime_length [s] int orbit_index long validity {time=1} float latitude {time=1} [degree_north] float longitude {time=1} [degree_east] float latitude_bounds {time=1, 4} [degree_north] float longitude_bounds {time=1, 4} [degree_east] float sensor_latitude {time=1} [degree_north] float sensor_longitude {time=1} [degree_east] float sensor_altitude {time=1} [m] float solar_zenith_angle {time=1} [degree] float solar_azimuth_angle {time=1} [degree] float sensor_zenith_angle {time=1} [degree] float sensor_azimuth_angle {time=1} [degree] doub[...]You can see that we only got one measurement for each variable.Instead of reading this data in Python, we actually want to have this data stored as a file on disk.This allows us to reuse it later as input for our comparisons (and we can then throw away the original L2 products).To do this we could use a combination of `harp.import_product()` and `harp.export_product()` in Python.However, it is actually faster to call the `harpconvert` tool from the command line.You can call command line tools from within a python notebook by prefixing the command with a `!`.This is an IPython features that is described in the [documentation](https://ipython.readthedocs.io/en/stable/interactive/python-ipython-diff.htmlshell-assignment).We will use this several times in this exercise.Be aware that the commands that we will execute are Linux-style commands which will work on Linux and macOS, but may not work on Windows (without some modification to path references and/or usage of quotes).To convert the product using `harpconvert` we can use:!harpconvert -a "latitude>36;latitude<40;area_covers_point(38.05, 23.86)" ../eodata/sentinel5p/no2/2020/09/15/S5P_OFFL_L2__NO2____20200915T103056_20200915T121226_15153_01_010302_20200917T040857.nc s5p_l2_no2_15153_athens.ncAnd we can then read in this overpass file in Python using:overpass = harp.import_product("s5p_l2_no2_15153_athens.nc") print(overpass)source product = 'S5P_OFFL_L2__NO2____20200915T103056_20200915T121226_15153_01_010302_20200917T040857.nc' history = "2020-11-16T12:17:26Z [harp-1.12] harpconvert -a 'latitude>36;latitude<40;area_covers_point(38.05, 23.86)' ../eodata/sentinel5p/no2/2020/09/15/S5P_OFFL_L2__NO2____20200915T103056_20200915T121226_15153_01_010302_20200917T040857.nc s5p_l2_no2_15153_athens.nc" int scan_subindex {time=1} double datetime_start {time=1} [seconds since 2010-01-01] float datetime_length [s] int orbit_index long validity {time=1} float latitude {time=1} [degree_north] float longitude {time=1} [degree_east] float latitude_bounds {time=1, 4} [degree_north] float longitude_bounds {time=1, 4} [degree_east] float sensor_latitude {time=1} [degree_north] float sensor_longitude {time=1} [degree_east] float sensor_altitude {time=1} [m] float solar_zenith_angle {time=1} [degree] float solar_azimuth_angle {time=1} [degree] float sensor_zenith_angle {time=1} [degree] float sensor_azimuth_angle {time=1} [degr[...]Note that the product contains a `history` attribute that shows how HARP generated the file.HARP will include such history information in each file that it writes, which is very useful for traceability.print(overpass.history)2020-11-16T12:17:26Z [harp-1.12] harpconvert -a 'latitude>36;latitude<40;area_covers_point(38.05, 23.86)' ../eodata/sentinel5p/no2/2020/09/15/S5P_OFFL_L2__NO2____20200915T103056_20200915T121226_15153_01_010302_20200917T040857.nc s5p_l2_no2_15153_athens.ncFor the month of February we already created such overpass files for you, which are available in the `../eodata/sentinel5p/overpass/no2/athens` directory. These files are actually the official overpass files that are used by the [Sentinel-5P Mission Performance Center Validation Facility](http://mpc-vdaf.tropomi.eu).These files contain not just the pixel over Athens itself, but also a range of pixels around that area. This allows the validation experts to investigate other criteria such as the spatial homogeneity of the data.filename = "../eodata/sentinel5p/overpass/no2/athens/S5P_OFFL_L2VONO2____20200201T094106_20200201T112236_11932_01_010302_20200204T143211_athens.nc" overpass_11932 = harp.import_product(filename) print(overpass_11932)source product = 'S5P_OFFL_L2__NO2____20200201T094106_20200201T112236_11932_01_010302_20200204T143211.nc' history = "2020-02-10T16:48:45Z [vdafop-1.8.0] vdafop S5P_OPER_CFG_VDAFOP_00000000T000000_99999999T999999_20191001T140933.EOF S5P_OFFL_L2__NO2____20200201T094106_20200201T112236_11932_01_010302_20200204T143211.nc\n2020-02-10T16:48:45Z [harp-1.5] harpconvert -a 'point_in_area((39.336,39.312,39.238,39.119,38.958,38.762,36.558,36.193,36.295,36.588,38.762,38.958,39.119,39.238,39.312),(23.775,24.104,24.422,24.714,24.972,25.184,27.145,26.875,20.598,20.385,22.365,22.577,22.835,23.127,23.445))' S5P_OFFL_L2__NO2____20200201T094106_20200201T112236_11932_01_010302_20200204T143211.nc S5P_OFFL_L2VONO2____20200201T094106_20200201T112236_11932_01_010302_20200204T143211_athens.nc" int scan_subindex {time=2735} double datetime_start {time=2735} [seconds since 2010-01-01] float datetime_length [s] int orbit_index long validity {time=2735} float latitude {time=2735} [degree_north] float longitude {t[...]As you can see from the `history` attribute this overpass file was just a filtering of the original proudct using a polygon area; no other HARP operations were performed. We can use the `harp_l2geoscatterplot` from the regridding exercise from yesterday to plot this overpass data:def harp_l2geoscatterplot(product, value, colorrange=None, colortable='jet', size=1): variable = product[value] if colorrange is not None: vmin, vmax = colorrange else: vmin = np.nanmin(variable.data) vmax = np.nanmax(variable.data) fig=plt.figure(figsize=(20, 10)) ax = plt.axes(projection=ccrs.PlateCarree()) img = plt.scatter(product.longitude.data, product.latitude.data, c=variable.data, vmin=vmin, vmax=vmax, cmap=colortable, s=size, transform=ccrs.PlateCarree()) ax.coastlines() cbar = fig.colorbar(img, ax=ax, orientation='horizontal', fraction=0.04, pad=0.1) cbar.set_label(f'{variable.description} [{variable.unit}]') cbar.ax.tick_params(labelsize=14) plt.show() harp_l2geoscatterplot(overpass_11932, 'tropospheric_NO2_column_number_density', colorrange=(0,0.0001), size=30)Now that we have the satellite data, we can start collocating the data with the MAXDOAS data.What we want, is to know which satellite measurements match up in time and space with which MAXDOAS measurements.The `harpcollocate` command line tool is designed to answer this question. It will take distance criteria on e.g. time and space and produce a list of pairs of measurements where the satellite and reference data match.You can get a quick help reference by passing the `--help` argument to the harpcollocate tool.!harpcollocate --helpUsage: harpcollocate [options] Find matching sample pairs between two datasets of HARP files. The path for a dataset can be either a single file or a directory containing files. The results will be written as a comma separated value (csv) file to the provided output path. If a directory is specified then all files (recursively) from that directory are used for a dataset. If a file is a .pth file then the file paths from that text file (one per line) are used. These file paths can be absolute or relative and can point to files, directories, or other .pth files. Options: -d ' [unit]' Specifies a collocation criterium. Only include pairs where the absolute difference between the values of the given variable for dataset A and B are less/equal than the given value. [...]As a time distance criterium we are interested in measurements that are only half an hour apart. And for the spatial distance, we are only interested on satellite pixels that are directly over the MAXDOAS instrument.The command with this criteria will then be:!harpcollocate -d "datetime 0.5 [h]" --point-in-area-yx ../eodata/sentinel5p/overpass/no2/athens ../eodata/groundbased/maxdoas/athens collocations_maxdoas_full.csvThis command produced a `csv` file called `collocations_maxdoas_full.csv` that contains the matching pairs.with open('collocations_maxdoas_full.csv') as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') for row in csv_reader: print(', '.join(row))collocation_index, source_product_a, index_a, source_product_b, index_b, datetime_diff [h] 0, S5P_OFFL_L2__NO2____20200201T094106_20200201T112236_11932_01_010302_20200204T143211.nc, 1450403, groundbased_uvvis.doas.offaxis.no2_iup008_athens_20200201t062127z_20200215t125518z_001.hdf, 8, 0.28686417 1, S5P_OFFL_L2__NO2____20200201T094106_20200201T112236_11932_01_010302_20200204T143211.nc, 1450403, groundbased_uvvis.doas.offaxis.no2_iup008_athens_20200201t062127z_20200215t125518z_001.hdf, 9, 0.039664167 2, S5P_OFFL_L2__NO2____20200201T094106_20200201T112236_11932_01_010302_20200204T143211.nc, 1450403, groundbased_uvvis.doas.offaxis.no2_iup008_athens_20200201t062127z_20200215t125518z_001.hdf, 10, -0.20753583 3, S5P_OFFL_L2__NO2____20200201T112236_20200201T130406_11933_01_010302_20200204T163837.nc, 1417498, groundbased_uvvis.doas.offaxis.no2_iup008_athens_20200201t062127z_20200215t125518z_001.hdf, 12, 0.23090444 4, S5P_OFFL_L2__NO2____20200201T112236_20200201T130406_11933_01_010302_20200204T1[...]What you will see on each line is:- a unique identifier of the collocation pair (the `collocation_index`)- a reference to the satellite product- an index of the measurement within the satellite product- a reference to the maxdoas product- an index of the measurement within the maxdoas product- the distance (in time) between the two measurementsNote that the reference to the satellite product is the orginal L2 product. Also, the 'index' of the satellite measurement is the index of the pixel in the original L2 product (this index value is stored as an `index` variable in the overpass file).The advantage of this, is that you can get the measurement directly from the original L2 product again without having to have access to the overpass file.All the operations we perform below on the overpass files using this collocation result file can actually also still be performed on the original L2 products as well. This makes it easy to share a collocation result file with someone else who doesn't have your overpass files. That person can then download the original products and use the collocation result file to quickly extract the collocated measurements. We can see in the list that sometimes the same satellite measurement appears twice (e.g. `S5P_OFFL_L2__NO2____20200201T094106_20200201T112236_11932_01_010302_20200204T143211.nc` measurement `1450403`). This is because within the the given half hour time distance there are multiple MAXDOAS measurements that match that criteria.We can instruct HARP to only take the nearest MAXDOAS measurement in that case by providing the `-nx datetime` option to `harpcollocate`. Also, the collocations that we produced were actually not filtered for quality. We actually only want measurements that are 'of good quality'.For the S5P data this means applying the `tropospheric_NO2_column_number_density_validity > 75` filter (as we have seen in the exercise from yesterday) and for MAXDOAS we are only going to filter out NaN values (which can be done using the `valid(tropospheric_NO2_column_number_density)` filter.We can pass these filters as part of the `harpcollocate` command line using the `-aa` and `-ab` parameters. If we add the `-nx` and `-aa` and `-ab` options we get:!harpcollocate -d "datetime 0.5 [h]" --point-in-area-yx -nx datetime -aa "tropospheric_NO2_column_number_density_validity > 75" -ab "valid(tropospheric_NO2_column_number_density)" ../eodata/sentinel5p/overpass/no2/athens ../eodata/groundbased/maxdoas/athens collocations_maxdoas.csvNow that we know which measurements pair up, we need to filter both the satellite data and the MAXDOAS data to provide us the data for those pairs.We do this by using the `collocate_left()` and `collocate_right()` HARP operations. The `collocate_left()` filters based on the information that is on the _left_ for each pair (i.e. the satellite data) and `collocate_right()` the information that is on the _right_ (i.e. the MAXDOAS data).In addition, we need to add several other operations that allow us to make sure that variables have the same units for both the satellite and maxdoas data.filepattern = "../eodata/sentinel5p/overpass/no2/athens/*" operations = ';'.join([ 'collocate_left("collocations_maxdoas.csv")', 'derive(datetime {time} [days since 2000-01-01])', 'derive(tropospheric_NO2_column_number_density [Pmolec/cm2])', 'derive(tropospheric_NO2_column_number_density_uncertainty {time} [Pmolec/cm2])', 'sort(collocation_index)', ]) s5p = harp.import_product(filepattern, operations) filepattern = "../eodata/groundbased/maxdoas/athens/*" operations = ';'.join([ 'collocate_right("collocations_maxdoas.csv")', 'derive(datetime {time} [days since 2000-01-01])', 'derive(tropospheric_NO2_column_number_density [Pmolec/cm2])', 'derive(tropospheric_NO2_column_number_density_uncertainty {time} [Pmolec/cm2])', 'sort(collocation_index)', ]) maxdoas = harp.import_product(filepattern, operations)You will see that the imported s5p and maxdoas data now contain the same amount of measurements. And by sorting both datasets by the `collocation_index` we make sure that all the measurements are nicely aligned.print(s5p) print(maxdoas)history = '2020-02-10T16:48:45Z [vdafop-1.8.0] vdafop S5P_OPER_CFG_VDAFOP_00000000T000000_99999999T999999_20191001T140933.EOF S5P_OFFL_L2__NO2____20200201T094106_20200201T112236_11932_01_010302_20200204T143211.nc\n2020-02-10T16:48:45Z [harp-1.5] harpconvert -a \'point_in_area((39.336,39.312,39.238,39.119,38.958,38.762,36.558,36.193,36.295,36.588,38.762,38.958,39.119,39.238,39.312),(23.775,24.104,24.422,24.714,24.972,25.184,27.145,26.875,20.598,20.385,22.365,22.577,22.835,23.127,23.445))\' S5P_OFFL_L2__NO2____20200201T094106_20200201T112236_11932_01_010302_20200204T143211.nc S5P_OFFL_L2VONO2____20200201T094106_20200201T112236_11932_01_010302_20200204T143211_athens.nc\n2020-11-16T12:17:43Z [harp-1.12] harp.import_product(\'../eodata/sentinel5p/overpass/no2/athens/*\',operations=\'collocate_left("collocations_maxdoas.csv");derive(datetime {time} [days since 2000-01-01]);derive(tropospheric_NO2_column_number_density [Pmolec/cm2]);derive(tropospheric_NO2_column_number_density_uncertainty {t[...]We can now plot the s5p and maxdoas data side-by-sidefig = plt.figure(figsize=(20, 10)) plt.title("S5P vs. MAXDOAS - February 20202", fontsize=20) t = [datetime.datetime(2000,1,1) + datetime.timedelta(x) for x in s5p.datetime.data] plt.errorbar(t, s5p.tropospheric_NO2_column_number_density.data, yerr=s5p.tropospheric_NO2_column_number_density_uncertainty.data, fmt='o', capsize=5, label="s5p") t = [datetime.datetime(2000,1,1) + datetime.timedelta(x) for x in maxdoas.datetime.data] plt.errorbar(t, maxdoas.tropospheric_NO2_column_number_density.data, yerr=maxdoas.tropospheric_NO2_column_number_density_uncertainty.data, fmt='o', capsize=5, label="maxdoas") fig.autofmt_xdate() plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.ylabel(f'{s5p.tropospheric_NO2_column_number_density.description} [{s5p.tropospheric_NO2_column_number_density.unit}]', fontsize=16) fig.legend(loc='right', prop={'size': 14}) plt.show()We can also plot the difference. This can be done using:fig = plt.figure(figsize=(20, 10)) plt.title("S5P vs. MAXDOAS - February 20202", fontsize=20) t = [datetime.datetime(2000,1,1) + datetime.timedelta(x) for x in s5p.datetime.data] diff = s5p.tropospheric_NO2_column_number_density.data - maxdoas.tropospheric_NO2_column_number_density.data # propagate uncertainty by taking the squared sum of the invidual uncertainties of s5p and maxdoas err = np.sqrt(s5p.tropospheric_NO2_column_number_density_uncertainty.data**2 + maxdoas.tropospheric_NO2_column_number_density_uncertainty.data**2) plt.errorbar(t, diff, yerr=err, fmt='o', capsize=5, label="s5p - maxdoas") fig.autofmt_xdate() plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.ylabel(f'tropospheric NO2 difference (S5P-MAXDOAS) [{s5p.tropospheric_NO2_column_number_density.unit}]', fontsize=14) plt.show()Central Limit Theorem for Different Distributions: It can be observed that for all the distribution types considering 100 distributions of sample size 100 each the weight of the graph is concentrated towards the Mean of the distribution. This justifies that all the distributions that we have plotted follow central limit theorem.\ Normal Distribution "The central limit theorem states that if you have a population with mean μ and standard deviation σ and take sufficiently large random samples from the population with replacement , then the distribution of the sample means will be approximately normally distributed" It is very clearly evident that the normal distribution obeys central limit theorem. Poisson Distribution "Poisson(100) distribution can be thought of as the sum of 100 independent Poisson(1) variables and hence may be considered approximately Normal, by the central limit theorem, so Normal( μ = rate*Size = λ*N, σ =√(λ*N)) approximates Poisson(λ*N = 1*100 = 100)" Since Poisson can be approximated to a normal distribution. It obeys Central Limit Theorem. Binomial Distribution For the binomial distribution b(n, p, j) we have limn→∞ √npq b(n, p,{np + x √npq}) = φ(x) So higher the distribution, induvidual probabilities turn close to zero making n tending to ∞ making it obey CLT. ChiSquare Distribution If there we take standardized normal distribtution as in our case, the distribution tends to follow the standard normal distribution Laplace Distribution Laplace Distribution clearly follows the Central Distribution Theorem.# Plotting all the distributions together as a histogram Normal = list(md['Normal']['means'].values()) Poisson = list(md['Poisson']['means'].values()) Binomial = list(md['Binomial']['means'].values()) ChiSquare = list(md['ChiSquare']['means'].values()) Laplace = list(md['Laplace']['means'].values()) pl.figure(figsize=(8,8)) pl.hist((Normal, Poisson, Binomial, ChiSquare, Laplace),bins=30,stacked=True) pl.xlabel('Sample Mean', fontsize = 18) pl.ylabel('No. of Samples', fontsize = 18) pl.legend(('Chisquared','Normal','Poisson','Binomial','LaPlace'),loc='upper right',bbox_to_anchor=(1.25,1)) pl.title("All Distributions", fontsize = 18)This notebook contains all the analyses associated with the Texas dataset (see tx_process_data.ipynb)import numpy as np import pandas as pd import plotly plotly.offline.init_notebook_mode(connected=True) df = pd.read_pickle('./Texas')I. Yelp Ratings Over Timedf_ino_plano = df[(df['restaurant'] == 'In-N-Out') & (df['outlet'] == 'Plano')][['date','rating','content']].copy() df_ino_plano_sorted = df_ino_plano.sort_values('date') import plotly import plotly.plotly as py import plotly.graph_objs as go plotly.offline.init_notebook_mode(connected=True) import pandas as pd dallas_uptown = go.Scatter( x=df_ino_plano_sorted['date'], y=df_ino_plano_sorted['rating'], name = "PLano", marker={'color': 'red', 'symbol': 104, 'size': "10"}, line={'color': 'blue'}, mode = 'lines+markers', opacity = 0.8) data = [dallas_uptown] layout = dict( title='Yelp Ratings Over Time (In-N-Out, Plano, TX) ', xaxis=dict( rangeselector=dict( buttons=list([ dict(count=1, label='1m', step='month', stepmode='backward'), dict(count=6, label='6m', step='month', stepmode='backward'), dict(step='all') ]) ), rangeslider=dict(), type='date' ) ) fig = dict(data=data, layout=layout) plotly.offline.iplot(fig, show_link=False) pd.set_option('display.max_colwidth', -1) df_ino_plano_sorted[(df_ino_plano_sorted['date'] > '2017-3-10') & (df_ino_plano_sorted['date'] < '2017-7-20')][['date','content', 'rating']]II. Average Yelp Ratings Over Timeoutlets_ino = df[df['restaurant'] == 'In-N-Out']['outlet'].unique().tolist() outlets_ino df_ino = df[df['restaurant'] == 'In-N-Out'].copy()- create column of rating means for each outletdef get_means(df_local, outlet): df_outlet = df_local[df_local.outlet == outlet] df_outlet_sort = df_outlet[['outlet','date','year','month','day','rating']].sort_values(['year', 'month', 'day']) df_index = df_outlet_sort.reset_index() nrow = df_index.shape[0] df_index['rating_means'] = df_index['rating'].apply(lambda x: 0*x) for i in range(nrow): if i == 0: df_index['rating_means'].iloc[i] = df_index['rating'].iloc[i] else: df_index['rating_means'].iloc[i] = (1-1/(i+1))*df_index['rating_means'].iloc[i-1] \ + df_index['rating'].iloc[i]/(i+1) return df_index dframes = [] for outlet in outlets_ino: frame = get_means(df_ino, outlet) dframes.append(frame) df_ino_means = pd.concat(dframes)/Applications/anaconda/lib/python3.6/site-packages/pandas/core/indexing.py:179: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copya. Average Yelp Ratings - Plano, TXdf_ino_means[df_ino_means.outlet == 'Plano'][['outlet','date','rating', 'rating_means']].head() import plotly import plotly.plotly as py import plotly.graph_objs as go plotly.offline.init_notebook_mode(connected=True) import pandas as pd plano = go.Scatter( x=df_ino_means[df_ino_means.outlet == 'Plano'].date,#.sort_values(), y=df_ino_means[df_ino_means.outlet == 'Plano']['rating_means'], name = "Plano, X", line={'color': 'orange'}, mode = 'lines+markers', opacity = 0.8) data = [plano] layout = dict( title='Average Yelp Rating Over Time (In-N-Out, Plano, TX)', xaxis=dict( rangeselector=dict( buttons=list([ dict(count=1, label='1m', step='month', stepmode='backward'), dict(count=6, label='6m', step='month', stepmode='backward'), dict(step='all') ]) ), rangeslider=dict(), type='date' ) ) fig = dict(data=data, layout=layout) plotly.offline.iplot(fig, show_link=False)b. Average Yelp Ratings - Dallas Upper Greenville (UG), Dallas North, and Planoimport plotly import plotly.plotly as py import plotly.graph_objs as go plotly.offline.init_notebook_mode(connected=True) import pandas as pd dallas_ug = go.Scatter( x=df_ino_means[df_ino_means.outlet == 'Dallas UG'].date,#.sort_values(), y=df_ino_means[df_ino_means.outlet == 'Dallas UG']['rating_means'], name = "Dallas UG", line={'color': 'blue'}, mode = 'lines+markers', opacity = 0.8) dallas_north = go.Scatter( x=df_ino_means[df_ino_means.outlet == 'Dallas North'].date,#.sort_values(), y=df_ino_means[df_ino_means.outlet == 'Dallas North']['rating_means'], name = "D", line={'color': 'green'}, mode = 'lines+markers', opacity = 0.8) plano = go.Scatter( x=df_ino_means[df_ino_means.outlet == 'Plano'].date,#.sort_values(), y=df_ino_means[df_ino_means.outlet == 'Plano']['rating_means'], name = "Plano, TX", line={'color': 'orange'}, mode = 'lines+markers', opacity = 0.8) data = [dallas_ug, dallas_north, plano] layout = dict( title='Average Yelp Ratings Over Time (In-N-Out, Texas)', xaxis=dict( rangeselector=dict( buttons=list([ dict(count=1, label='1m', step='month', stepmode='backward'), dict(count=6, label='6m', step='month', stepmode='backward'), dict(step='all') ]) ), rangeslider=dict(), type='date' ) ) fig = dict(data=data, layout=layout) plotly.offline.iplot(fig, show_link=False)- create date_month variable for the next sectiondef get_date_month(x): return str(x.year) + '-' + str(x.month) df['date_month'] = df['date'].apply(lambda x: get_date_month(x)) df['date_month'] = pd.to_datetime(df['date_month']) df_ino_plano = df[(df['restaurant'] == 'In-N-Out') & (df['outlet'] == 'Plano')]III. Number of Reviews Per Monthimport matplotlib.pyplot as plt plt.style.use('ggplot') %matplotlib inline plt.rcParams["figure.figsize"] = (20,3) group_month_ino_plano = df_ino_plano[['date_month','rating']].groupby(['date_month']) group_month_ino_plano.count().plot(kind='bar', color='orange', title = 'In-N-Out (Plano, TX)');Load account datadf = importer.load_data(importer.FileType.ACCOUNT) # dfAdd/Modify rows on conditiondf.loc[df['Account'] == 'eKonto', 'Type'] = 'checking' data.add_new_operations(models.Bank.PL_MILLENIUM, account_name='', file_name='biz1.csv') data.add_new_operations(models.Bank.PL_ING, account_name='Konto z Lwem Direct', file_name='ing.csv')Save account filedf.to_csv(config.mankoo_file_path('account'), index=False)--- Otherimport numpy as np df['Operation'] = np.where(df['Obciążenia'] < 0, df['Obciążenia'], df['Uznania']) df df = df.drop(columns=['Obciążenia', 'Uznania']) df = df.rename(columns={'Data transakcji': 'Date', 'Opis': 'Title', 'Waluta': 'Currency'}) df['Bank'] = 'Millenium' existing_columns = list(df.columns) columns = ['Category', 'Comment'] df.reindex(existing_columns + columns, axis="columns") dfInference- Run inference on the test dataset.- Mainly for testing your model is doing ok visually.%env CUDA_VISIBLE_DEVICES=1 %load_ext autoreload %autoreload 2 from pathlib import Path import matplotlib.pyplot as plt import tqdm import torch import numpy as np import sys; sys.path.append("../") import maskflow root_dir = Path("/home/hadim/.data/Neural_Network/Maskflow/Microtubule") data_dir = root_dir / "Data" model_dir = root_dir / "Models" # Import the configuration associated with this dataset and network. config = maskflow.config.load_config(root_dir / "config.yaml") # Select the model model_name = '2018.11.20-12:15:32' model_path = model_dir / model_name # Set some configurations config['MODEL']['DEVICE'] = "cpu" config['DATALOADER']['NUM_WORKERS'] = 4 config['TEST']['IMS_PER_BATCH'] = 6 config['SOLVER']['IMS_PER_BATCH'] = 4 batch_to_load = 1 # Build the model model = maskflow.inference.build_model(config, model_path) # Load some data data_loader = maskflow.dataset.get_data_loader(config, data_dir, is_train=False) data = [datum for _, datum in zip(range(batch_to_load), data_loader)] # Retrieve category's names categories = data_loader.dataset.coco.cats # Run inference predictions = [] images = [] ground_truth = [] for batch_image, batch_target, batch_idx in data: batch_image = batch_image.to(config['MODEL']['DEVICE']) with torch.no_grad(): prediction = model(batch_image) prediction = [o.to('cpu') for o in prediction] predictions.extend(prediction) images.extend([im for im in batch_image.tensors]) ground_truth.extend(batch_target) images = np.array([o.to('cpu').numpy() for o in images]) len(images) # Show prediction idx = 3 image = images[idx] prediction = predictions[idx] gt = ground_truth[idx] prediction = maskflow.inference.select_top_predictions(prediction, confidence_threshold=0.7) print(prediction.bbox.shape[0]) print(prediction.get_field('scores')) _ = maskflow.viz.display_prediction_and_gt(image, prediction, gt, class_names=config['CLASS_NAMES'], pixel_mean=config['INPUT']['PIXEL_MEAN'], pixel_std=config['INPUT']['PIXEL_STD'])52 tensor([1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 0.9999, 0.9999, 0.9998, 0.9997, 0.9995, 0.9994, 0.9976, 0.9935, 0.9077])Check we reproduce the same answer as galpy for some random potentialq=0.5 #Getting units correct is painful. with ro=1 vo=1 and turn_physical_off then everything should be just G=1 galpy_pot = galpy.potential.TwoPowerTriaxialPotential(c=q,ro=1,vo=1) galpy_pot.turn_physical_off() pot = astro_dynamo.analytic_potentials.SpheroidalPotential(lambda m: galpy_pot._amp*galpy_pot._mdens(m),q=q) x=np.linspace(0,10,100) plt.semilogy(x,list(map(lambda x: -galpy_pot.Rforce(x,1),x)),'r',label='galpy FR') plt.semilogy(x,-pot.f_r_cyl(x,np.array([1.])),'--k') plt.semilogy(x,list(map(lambda x: -galpy_pot.zforce(x,1),x)),'y',label='galpy Fz') plt.semilogy(x,-pot.f_z(x,np.array([1.])),'--k',label='astro-dynamo') plt.legend() plt.ylabel('Force') plt.xlabel('R') x=np.linspace(0,10,100) plt.plot(x,list(map(lambda x: galpy_pot.vcirc(x,0),x)),'r',label='galpy FR') plt.plot(x,torch.sqrt(pot.vc2(x,np.array([0.]))),'--k',label='astro-dynamo') plt.ylabel('$V_c$') plt.xlabel('$R$')Try replacing the dark matter particles in a snapshot by an analytic profilesnap=astro_dynamo.snap.SnapShot('../inputmodels/M85_0.gz', particle_type_mapping={0:ParticleType.DarkMatter,1:ParticleType.Star}) q,qerr = astro_dynamo.analytic_potentials.fit_q_to_snapshot(snap,plot=True,r_bins=50) print(f'q={q:.3f}+-{qerr:.3f}')Define and test a spheriodal potential based on this fitdef ein(m,rhor0,m0,alpha): rho0 = rhor0 / (np.exp(-(2 / alpha) * ((8.2 / m0) ** alpha - 1))) return rho0 * np.exp(-(2 / alpha) * ((m / m0) ** alpha - 1)) pot = astro_dynamo.analytic_potentials.fit_potential_to_snap(snap.dm,ein,init_parms=[1e-3,8.0,0.7],plot=True) r,dm_vc2 = mwtools.nemo.rotationcurve(snap.dm.as_numpy_array(),rrange=(0, 40)) r,stellar_vc2 = mwtools.nemo.rotationcurve(snap.stars.as_numpy_array(),rrange=(0, 40)) i = (np.abs(snap.dm.positions[:,0]) < 10) & \ (np.abs(snap.dm.positions[:,1]) < 10) & \ (np.abs(snap.dm.positions[:,2]) < 10) r,dm_vc2_trunc = mwtools.nemo.rotationcurve(snap.dm[i].as_numpy_array(),rrange=(0, 40)) i = (np.abs(snap.stars.positions[:,0]) < 10) & \ (np.abs(snap.stars.positions[:,1]) < 10) & \ (np.abs(snap.stars.positions[:,2]) < 10) r,stellar_vc2_trunc = mwtools.nemo.rotationcurve(snap.stars[i].as_numpy_array(),rrange=(0, 40)) f,ax = plt.subplots(1,1) ax.plot(r,np.sqrt(dm_vc2),label = 'DM Particles') ax.plot(r,np.sqrt(stellar_vc2),label = 'Stellar Particles') ax.plot(r,np.sqrt(dm_vc2_trunc),label = 'DM Particles in 10kpc box') x=np.linspace(0.,40,100) ax.plot(x,np.sqrt(pot.vc2(x,torch.tensor(0.0,dtype=torch.float64))),label = 'Einasto Fit') r=r.copy() ax.plot(r,np.sqrt(stellar_vc2+pot.vc2(r,torch.tensor(0.0,dtype=torch.float64)).numpy()),label = 'Total Vc: Einasto Fit') ax.plot(r,np.sqrt(stellar_vc2+dm_vc2),label = 'Total Vc: Particles') ax.set_xlim((0,20)) ax.set_ylabel('$V_c$') ax.set_xlabel('$R$') ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))Test the gridding of the potentialpot.grid_acc() maxi=1000 positions = snap.stars.positions r_cyl = snap.stars.rcyl z = snap.stars.positions[..., 2] f_r_cyl,f_z = pot.get_accelerations_cyl(positions[:maxi,:]).t() f,ax = plt.subplots(1,2, figsize = (8,4), sharey = 'row') ax[0].plot(r_cyl[:maxi],np.abs((pot.f_r_cyl(r_cyl[:maxi],z[:maxi])-f_r_cyl)/f_r_cyl),'.',label='$F_r$') ax[0].plot(r_cyl[:maxi],np.abs((pot.f_z(r_cyl[:maxi],z[:maxi])-f_z)/f_z),'.',label='$F_z$') ax[0].semilogy() ax[0].legend() ax[0].set_ylabel('Fractional Difference') ax[0].set_xlabel('R') ax[1].plot(z[:maxi],np.abs((pot.f_r_cyl(r_cyl[:maxi],z[:maxi])-f_r_cyl)/f_r_cyl),'.',label='$F_r$') ax[1].plot(z[:maxi],np.abs((pot.f_z(r_cyl[:maxi],z[:maxi])-f_z)/f_z),'.',label='$F_z$') ax[1].semilogy() ax[1].legend() ax[1].set_xlabel('z') maxi=1000 positions = snap.stars.positions r_cyl = snap.stars.rcyl z = snap.stars.positions[..., 2] acc = pot.get_accelerations(positions) f_r_cyl = -torch.sqrt( acc[..., 0]**2 + acc[..., 1]**2 ) f_z = acc[..., 2] f_r_cyl=f_r_cyl[:maxi] f_z=f_z[:maxi] f,ax = plt.subplots(1,2, figsize = (8,4), sharey = 'row') ax[0].plot(r_cyl[:maxi],np.abs((pot.f_r_cyl(r_cyl[:maxi],z[:maxi])-f_r_cyl)/f_r_cyl),'.',label='$F_r$') ax[0].plot(r_cyl[:maxi],np.abs((pot.f_z(r_cyl[:maxi],z[:maxi])-f_z)/f_z),'.',label='$F_z$') ax[0].semilogy() ax[0].legend() ax[0].set_ylabel('Fractional Difference') ax[0].set_xlabel('R') ax[1].plot(z[:maxi],np.abs((pot.f_r_cyl(r_cyl[:maxi],z[:maxi])-f_r_cyl)/f_r_cyl),'.',label='$F_r$') ax[1].plot(z[:maxi],np.abs((pot.f_z(r_cyl[:maxi],z[:maxi])-f_z)/f_z),'.',label='$F_z$') ax[1].semilogy() ax[1].legend() ax[1].set_xlabel('z') gpu_pot = pot.to('cuda') acc = gpu_pot.get_accelerations(positions) f_r_cyl = -torch.sqrt( acc[..., 0]**2 + acc[..., 1]**2 ) f_z = acc[..., 2] f_r_cyl=f_r_cyl[:maxi] f_z=f_z[:maxi] f,ax = plt.subplots(1,2, figsize = (8,4), sharey = 'row') ax[0].plot(r_cyl[:maxi],np.abs((pot.f_r_cyl(r_cyl[:maxi],z[:maxi])-f_r_cyl)/f_r_cyl),'.',label='$F_r$') ax[0].plot(r_cyl[:maxi],np.abs((pot.f_z(r_cyl[:maxi],z[:maxi])-f_z)/f_z),'.',label='$F_z$') ax[0].semilogy() ax[0].legend() ax[0].set_ylabel('Fractional Difference') ax[0].set_xlabel('R') ax[1].plot(z[:maxi],np.abs((pot.f_r_cyl(r_cyl[:maxi],z[:maxi])-f_r_cyl)/f_r_cyl),'.',label='$F_r$') ax[1].plot(z[:maxi],np.abs((pot.f_z(r_cyl[:maxi],z[:maxi])-f_z)/f_z),'.',label='$F_z$') ax[1].semilogy() ax[1].legend() ax[1].set_xlabel('z') Regressão Linear - Solução Pythônica Regressão Linear com uma variável Notação- **m** = número de exemplos treináveis;- **x's** = dados de entrada / características;- **y's** = dados de saída / valor esperado;- **h** = hipótese do valor de y; ![ciclo de vida da regressão linear](Imagens/ciclo-de-vida-regressao-linear.png) ![determinando a hipótese](Imagens/determinando-hipotese.png)# importando as bibliotecas do python import numpy as np import pandas as pd import matplotlib.pyplot as plt # importando os dados data = pd.read_csv('ex1data1.txt', names=['População', 'Lucro']) # visualizando os 5 primeiros dados data.head() plt.figure(figsize = (20,10)) # definindo a dimensão do gráfico X = data['População'].values # obtém os dados da população Y = data['Lucro'].values # obtém os dados do lucro m = len(Y) # número de exemplos treináveis plt.scatter(X, Y, color = "red", marker = "*") # plotar os dados # aplicando as legendas ao gráfico plt.xlabel('População da Cidade') plt.ylabel('Lucro da População') plt.title('Plotando os Dados de Treinamento') print(X) print(Y) X = np.append(np.ones([m,1]), X.reshape(m,1), axis=1) # adicionando x0 = 1 e ajustando a dimensão para mx1 print(X) Y = Y.reshape(m,1) # ajustando a dimensão para mx1 Y.shape teta = np.zeros([2,1]) # definindo uma array de zeros para armazenar os coeficientes da equação da reta teta![objetivos da regressão linear](Imagens/objetivos-regressao-linear.png)def Custo(x, y, teta): ''' Retorna o custo (erro da predição) ''' m = len(y) # número de exemplos treináveis h = x.dot(teta) # definindo a hipótese do algoritmo J = 1/(2*m)*(np.sum((h-y)**2)) # Implementando a função de custo return J # retorna o cursto custo = Custo(X, Y, teta) # Chama a função que calcula o custo e a imprime print(custo)32.072733877455676Usando o Gradiente Descendente para minimizar o custo ![gradiente descendente](Imagens/visualizando-gradiente-descendente.png) ![agoritmo do gradiente descendente](Imagens/gradiente-descendente-algoritmo.png) ![derivadas do gradiente descendente](Imagens/gradiente-descendente-derivadas.png) ![taxa de aprendizado](Imagens/taxa-de-aprendizado.png)interacoes = 1000 # número de interações alfa = 0.01 # taxa de aprendizado def GradienteDescendente(x, y, teta, alfa, interacoes): ''' Calculando o gradiente descendente para minizar o custo ''' historico_custo = [] # define uma lista vazia para armazenar o valor do custo em cada interação m = len(y) # número de exemplos treináveis for i in range(interacoes): # intera 1000 vezes h = x.dot(teta) # calcula a hipotese teta = teta - (alfa/m) * (x.T.dot(h-y)) # aplica o gradiente descendente historico_custo.append(Custo(x, y, teta)) # adiciona o valor do custo em cada etapa (erro de predição) return teta, historico_custo # obtendo os parâmetros da equação da reta e o histórico do custo em cada etapa de treinamento novo_teta, historico_custo = GradienteDescendente(X, Y, teta, alfa, interacoes) novo_teta historico_custo = np.array(historico_custo) # transformando a lista em um array numpy historico_custo.shape # definindo a dimensão do gráfico plt.figure(figsize = (20,10)) # plotando os dados x e y no gráfico plt.scatter(X[:,1], Y, color = "red", marker = "*") # plotar os dados plt.plot(X[:,1], np.dot(X, novo_teta), label = "Previsão") plt.xlabel('População da Cidade') plt.ylabel('Lucro da População') plt.title('Plotando os Dados de Treinamento') # colocando a legenda no gráfico plt.legend() # colocando uma grade ao gráfico plt.grid(True) # removendo a moldura do gráfico plt.box(False) def predicao(x, teta): ''' Essa função retorna uma predição para novos dados ''' pred = np.dot(x, teta) x = np.array(x) print('Para uma população de {} habitantes, teremos {} $ de lucro'.format(x[1] * 1000, pred * 10000)) return None predicao(([1, 3.5]), novo_teta) predicao(([1, 7]), novo_teta) plt.figure(figsize = (20, 10)) plt.plot(historico_custo) plt.ylabel('Custo J') plt.xlabel('Número de Interações') plt.title('Minimizando o custo usando gradiente descendente')Data Preproccessing Steps Import Libraryimport pandas as pd import numpy as np import matplotlib.pyplotImport Datasetdataset=pd.read_csv('C:\\Users\\YogeshR\\Downloads\\Machine Learning A-Z (Codes and Datasets)\\Part 1 - Data Preprocessing\\1\\Python\\data.csv') x=dataset.iloc[:,:-1].values y=dataset.iloc[:,-1].values print(x) print(y)['No' 'Yes' 'No' 'No' 'Yes' 'Yes' 'No' 'Yes' 'No' 'Yes']Handling the missing datafrom sklearn.impute import SimpleImputer imputer = SimpleImputer(missing_values=np.nan, strategy= 'mean') imputer.fit(x[:,1:3]) (x[:,1:3])=imputer.transform(x[:,1:3]) print(x)[['France' 44.0 72000.0] ['Spain' 27.0 48000.0] ['Germany' 30.0 54000.0] ['Spain' 38.0 61000.0] ['Germany' 40.0 63777.77777777778] ['France' 35.0 58000.0] ['Spain' 38.77777777777778 52000.0] ['France' 48.0 79000.0] ['Germany' 50.0 83000.0] ['France' 37.0 67000.0]]Encoding Independent Varaiblesfrom sklearn.compose import ColumnTransformer from sklearn.preprocessing import OneHotEncoder ct = ColumnTransformer(transformers=[('encoder',OneHotEncoder(),[0])],remainder='passthrough') x = np.array(ct.fit_transform(x)) print(x)[[1.0 0.0 0.0 44.0 72000.0] [0.0 0.0 1.0 27.0 48000.0] [0.0 1.0 0.0 30.0 54000.0] [0.0 0.0 1.0 38.0 61000.0] [0.0 1.0 0.0 40.0 63777.77777777778] [1.0 0.0 0.0 35.0 58000.0] [0.0 0.0 1.0 38.77777777777778 52000.0] [1.0 0.0 0.0 48.0 79000.0] [0.0 1.0 0.0 50.0 83000.0] [1.0 0.0 0.0 37.0 67000.0]]Encoding Dependent Varaiablesfrom sklearn.preprocessing import LabelEncoder le = LabelEncoder() y = le.fit_transform(y) print(y)[0 1 0 0 1 1 0 1 0 1]Splitting The Dataset Into Training Set And Test Setfrom sklearn.model_selection import train_test_split xtrain,xtest,ytrain,ytest = train_test_split(x,y,test_size= 0.2,random_state = 1) print(xtrain) print(xtest) print(ytrain) print(ytest)[0 1]Feature Scalingfrom sklearn.preprocessing import StandardScaler sc = StandardScaler() xtrain[:, 3:] = sc.fit_transform(xtrain[:, 3:]) xtest[:, 3:] = sc.transform(xtest[:, 3:]) print(xtrain) print(xtest)[[0.0 1.0 0.0 -1.4661817944830124 -0.9069571034860727] [1.0 0.0 0.0 -0.44973664397484414 0.2056403393225306]]Load Assay TSVsassays = [x for x in os.listdir(assays_path) if x.endswith('.tsv')] print(assays) assay_name = assays[2] #pick which assay assay_name def read_assay(assay_path): assay = {} with open(assay_path, "r") as f: lines = [x.strip().split('\t') for x in f.readlines()] for i, column in enumerate(lines[0]): assay[column] = [line[i] for line in lines[1:]] return assay assay = read_assay(assays_path + assay_name) assay.keys() result = np.array([float(result) for result in assay['result']], dtype=np.float32) binding = np.array([int(result) for result in assay['binding']], dtype=np.int32) binding.sum() num_instances = len(assay['CanonicalSMILES']) print(num_instances)7462Load Pretrained Transformerimport torch import torch.nn as nn import torch.nn.functional as F import itertools import sys sys.path.insert(1, '../') #make parent folder visible from transformer import Transformer, create_masks, nopeak_mask checkpoint_dir = "../checkpoints/" MAX_LEN = 256 MODEL_DIM = 512 N_LAYERS = 6 DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(DEVICE) TRANSFORMER_DEVICE = DEVICE #torch.device("cpu") PRINTABLE_ASCII_CHARS = 95 _extra_chars = ["seq_start", "seq_end", "pad"] EXTRA_CHARS = {key: chr(PRINTABLE_ASCII_CHARS + i) for i, key in enumerate(_extra_chars)} ALPHABET_SIZE = PRINTABLE_ASCII_CHARS + len(EXTRA_CHARS) def find_ckpts(*args, **kwargs): ckpts = os.listdir(checkpoint_dir) str_args = [str(x) for x in itertools.chain(args, kwargs.values())] return [checkpoint_dir + ckpt for ckpt in ckpts if all([arg in ckpt.replace(".", "_").split("_") for arg in str_args])] def encode_char(c): return ord(c) - 32 def decode_char(n): return chr(n + 32) def encode_string(string, start_char=chr(0)): return torch.tensor([ord(start_char)] + [encode_char(c) for c in string]) def encode_string_np(string, start_char=chr(0), pad_char=chr(0)): if len(string) > 255: string = string[:255] arr = np.full((256,), ord(pad_char), dtype=np.float32) arr[:len(string)+1] = np.array([ord(start_char)] + [encode_char(c) for c in string]) return arr def pad_tensors(tensors, pad_char=chr(0), max_len=None): if not max_len: max_len = max([t.shape[0] for t in tensors]) + 1 padded_tensors = torch.full((len(tensors), max_len), ord(pad_char), dtype=torch.long) for i, tensor in enumerate(tensors): padded_tensors[i, 0:tensor.shape[0]] = tensor return padded_tensorsSelect Weights Checkpointfound = find_ckpts() print(found) load_path = found[0] if len(found) > 0 else "" model = Transformer(ALPHABET_SIZE, MODEL_DIM, N_LAYERS) model = nn.DataParallel(model) model = model.to(TRANSFORMER_DEVICE) checkpoint = torch.load(load_path) model.load_state_dict(checkpoint['state_dict']) model = model.eval()Create H5PY Dataset Fileimport h5py transformer_epoch = 2 assay_name = assay_name.replace(".tsv", "_" + str(transformer_epoch) + ".hdf5") assay_path = embeddings_path + assay_name print(assay_path) f = h5py.File(assay_path, 'w-') embeddings = f.create_dataset("embeddings", (num_instances, 256, 512), dtype=np.float32) result_dset = f.create_dataset("result", (num_instances,), dtype=np.float32) binding_dset = f.create_dataset("binding", (num_instances,), dtype=np.int32) smiles_enc = f.create_dataset("smiles", (num_instances, 256), dtype=np.float32) with torch.no_grad(): for i, smiles in enumerate(assay['CanonicalSMILES']): encoded = encode_string(smiles, start_char=EXTRA_CHARS['seq_start']).unsqueeze(0).to(TRANSFORMER_DEVICE) encoded = encoded[:,:MAX_LEN] mask = create_masks(encoded) embedding = model.module.encoder(encoded, mask)[0].cpu().numpy() embeddings[i,:embedding.shape[0],:] = embedding result_dset[i] = result[i] binding_dset[i] = binding[i] encoded = encode_string_np(smiles, start_char=EXTRA_CHARS['seq_start'], pad_char=EXTRA_CHARS['pad']) encoded = encoded / ALPHABET_SIZE smiles_enc[i,:] = encoded if i % 1000 == 0: print(i) f.close()Imports and Settings# basics import os import time import numpy as np import pandas as pd # sklearn from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder, StandardScaler # keras import keras from keras.datasets import mnist from keras.models import Sequential from keras.layers import Conv2D, MaxPooling2D from keras.layers import Dense, Dropout, Flatten from keras.callbacks import TensorBoard, ModelCheckpoint # tensorflow import tensorflow as tf # plotting import matplotlib.pyplot as plt # local from data_loaders import load_mnist_data, load_iris_data, retrieve_predictions from models import * from utils import train_model_iteratively, get_model_weights, convert_weight_dict_to_dataframe plt.style.use('ggplot') %matplotlib inline %load_ext autoreload %autoreload 2 # suppress warnings import warnings warnings.filterwarnings('ignore')Working examples that use module functions MNIST working examplex_train_mnist, y_train_mnist, x_test_mnist, y_test_mnist = load_mnist_data() pd.DataFrame(y_test_mnist).to_csv("y_test_mnist.csv", index=False) baseline_mnist_model(name='mnist_test', num_classes=2).summary() example_mnist_outdir = './data/example_mnist' train_model_iteratively(baseline_model=baseline_mnist_model, X_train=x_train_mnist, Y_train=y_train_mnist, X_test=x_test_mnist, Y_test=y_test_mnist, outdir=example_mnist_outdir, epochs=4, epochs_to_save=None, batch_size=128, num_models=10) mnist_preds = retrieve_mnist_preds(y_test=y_test_mnist, outdir=example_mnist_outdir) mnist_weights = convert_weight_dict_to_dataframe(get_model_weights(example_mnist_outdir))IRIS working examplex_train_iris, x_test_iris, y_train_iris, y_test_iris = load_iris_data() pd.DataFrame(y_test_iris).to_csv("y_test_iris.csv", index=False) baseline_iris_model(name='iris_test').summary() example_iris_outdir = './data/example_iris' # need this to suppress tf.function retracing warning that kept coming up tf.compat.v1.logging.set_verbosity("ERROR") train_model_iteratively(baseline_model=baseline_iris_model, X_train=x_train_iris, Y_train=y_train_iris, X_test=x_test_iris, Y_test=y_test_iris, outdir=example_iris_outdir, epochs=15, epochs_to_save=None, batch_size=5, num_models=2)X_train: (75, 2), Y_train: (75, 3) X_test: (75, 2), Y_test: (75, 3)Model Runs MNIST model runs Baseline MNISTstart = time.time() train_model_iteratively(baseline_model=baseline_mnist_model, X_train=x_train_mnist, Y_train=y_train_mnist, X_test=x_test_mnist, Y_test=y_test_mnist, outdir="./data/mnist_baseline", epochs=4, epochs_to_save=None, batch_size=128, num_models=100) print("train_model_iteratively took {} seconds\n".format(time.time() - start))X_train: (12665, 28, 28, 1), Y_train: (12665, 2) X_test: (100, 28, 28, 1), Y_test: (100, 2) train_model_iteratively took 347.0760819911957 secondsSweep epochsfor num_epochs in [8, 12, 16]: outdir = "./data/mnist_epoch_{}".format(num_epochs) start = time.time() train_model_iteratively(baseline_model=baseline_mnist_model, X_train=x_train_mnist, Y_train=y_train_mnist, X_test=x_test_mnist, Y_test=y_test_mnist, outdir=outdir, epochs=num_epochs, epochs_to_save=None, batch_size=128, num_models=100) print("train_model_iteratively took {} seconds\n".format(time.time() - start))X_train: (12665, 28, 28, 1), Y_train: (12665, 2) X_test: (100, 28, 28, 1), Y_test: (100, 2) train_model_iteratively took 637.4080867767334 seconds X_train: (12665, 28, 28, 1), Y_train: (12665, 2) X_test: (100, 28, 28, 1), Y_test: (100, 2) train_model_iteratively took 923.3448550701141 seconds X_train: (12665, 28, 28, 1), Y_train: (12665, 2) X_test: (100, 28, 28, 1), Y_test: (100, 2) train_model_iteratively took 1249.7377841472626 secondsSweep batch sizefor batch_size in [32, 64, 256]: outdir = "./data/mnist_batch_{}".format(batch_size) start = time.time() train_model_iteratively(baseline_model=baseline_mnist_model, X_train=x_train_mnist, Y_train=y_train_mnist, X_test=x_test_mnist, Y_test=y_test_mnist, outdir=outdir, epochs=4, epochs_to_save=None, batch_size=batch_size, num_models=100) print("train_model_iteratively took {} seconds\n".format(time.time() - start))X_train: (12665, 28, 28, 1), Y_train: (12665, 2) X_test: (100, 28, 28, 1), Y_test: (100, 2) train_model_iteratively took 821.0129718780518 seconds X_train: (12665, 28, 28, 1), Y_train: (12665, 2) X_test: (100, 28, 28, 1), Y_test: (100, 2) train_model_iteratively took 459.6735169887543 seconds X_train: (12665, 28, 28, 1), Y_train: (12665, 2) X_test: (100, 28, 28, 1), Y_test: (100, 2) train_model_iteratively took 303.78163290023804 secondsSweep layersfor idx, model in enumerate([mnist_2_layers, mnist_3_layers, mnist_4_layers]): outdir = "./data/mnist_layers_{}".format(idx+2) start = time.time() train_model_iteratively(baseline_model=model, X_train=x_train_mnist, Y_train=y_train_mnist, X_test=x_test_mnist, Y_test=y_test_mnist, outdir=outdir, epochs=4, epochs_to_save=None, batch_size=128, num_models=100) print("train_model_iteratively took {} seconds\n".format(time.time() - start))X_train: (12665, 28, 28, 1), Y_train: (12665, 2) X_test: (100, 28, 28, 1), Y_test: (100, 2) train_model_iteratively took 559.6651039123535 seconds X_train: (12665, 28, 28, 1), Y_train: (12665, 2) X_test: (100, 28, 28, 1), Y_test: (100, 2) train_model_iteratively took 788.8657438755035 seconds X_train: (12665, 28, 28, 1), Y_train: (12665, 2) X_test: (100, 28, 28, 1), Y_test: (100, 2) train_model_iteratively took 932.9938669204712 secondsIRIS model runs Baseline IRISstart = time.time() train_model_iteratively(baseline_model=baseline_iris_model, X_train=x_train_iris, Y_train=y_train_iris, X_test=x_test_iris, Y_test=y_test_iris, outdir="./data/iris_baseline", epochs=150, epochs_to_save=None, batch_size=5, num_models=100) print("train_model_iteratively took {} seconds\n".format(time.time() - start))X_train: (75, 2), Y_train: (75, 3) X_test: (75, 2), Y_test: (75, 3) train_model_iteratively took 774.9198100566864 secondsSweep epochsfor num_epochs in [100, 300, 450]: outdir = "./data/iris_epoch_{}".format(num_epochs) start = time.time() train_model_iteratively(baseline_model=baseline_iris_model, X_train=x_train_iris, Y_train=y_train_iris, X_test=x_test_iris, Y_test=y_test_iris, outdir=outdir, epochs=num_epochs, epochs_to_save=None, batch_size=5, num_models=100) print("train_model_iteratively took {} seconds\n".format(time.time() - start))X_train: (75, 2), Y_train: (75, 3) X_test: (75, 2), Y_test: (75, 3) train_model_iteratively took 507.4716958999634 seconds X_train: (75, 2), Y_train: (75, 3) X_test: (75, 2), Y_test: (75, 3) train_model_iteratively took 1513.5248148441315 seconds X_train: (75, 2), Y_train: (75, 3) X_test: (75, 2), Y_test: (75, 3) train_model_iteratively took 2045.6344740390778 secondsSweep batch size:for batch_size in [10, 20, 40]: outdir = "./data/iris_batch_{}".format(batch_size) start = time.time() train_model_iteratively(baseline_model=baseline_iris_model, X_train=x_train_iris, Y_train=y_train_iris, X_test=x_test_iris, Y_test=y_test_iris, outdir=outdir, epochs=150, epochs_to_save=None, batch_size=batch_size, num_models=100) print("train_model_iteratively took {} seconds\n".format(time.time() - start))X_train: (75, 2), Y_train: (75, 3) X_test: (75, 2), Y_test: (75, 3) train_model_iteratively took 668.1161737442017 seconds X_train: (75, 2), Y_train: (75, 3) X_test: (75, 2), Y_test: (75, 3) train_model_iteratively took 741.2695188522339 seconds X_train: (75, 2), Y_train: (75, 3) X_test: (75, 2), Y_test: (75, 3) train_model_iteratively took 589.3309698104858 secondsSweep layers:for idx, model in enumerate([iris_2_layers, iris_3_layers, iris_4_layers]): outdir = "./data/iris_layers_{}".format(idx+2) start = time.time() train_model_iteratively(baseline_model=model, X_train=x_train_iris, Y_train=y_train_iris, X_test=x_test_iris, Y_test=y_test_iris, outdir=outdir, epochs=150, epochs_to_save=None, batch_size=5, num_models=100) print("train_model_iteratively took {} seconds\n".format(time.time() - start))X_train: (75, 2), Y_train: (75, 3) X_test: (75, 2), Y_test: (75, 3) train_model_iteratively took 726.9213287830353 seconds X_train: (75, 2), Y_train: (75, 3) X_test: (75, 2), Y_test: (75, 3) train_model_iteratively took 773.2967009544373 seconds X_train: (75, 2), Y_train: (75, 3) X_test: (75, 2), Y_test: (75, 3) train_model_iteratively took 769.7694590091705 secondsSweep nodes:for idx, model in enumerate([iris_3_nodes, iris_4_nodes, iris_5_nodes, iris_6_nodes]): outdir = "./data/iris_nodes_{}".format(idx+3) start = time.time() train_model_iteratively(baseline_model=model, X_train=x_train_iris, Y_train=y_train_iris, X_test=x_test_iris, Y_test=y_test_iris, outdir=outdir, epochs=150, epochs_to_save=None, batch_size=5, num_models=100) print("train_model_iteratively took {} seconds\n".format(time.time() - start))X_train: (75, 2), Y_train: (75, 3) X_test: (75, 2), Y_test: (75, 3) train_model_iteratively took 720.6743988990784 seconds X_train: (75, 2), Y_train: (75, 3) X_test: (75, 2), Y_test: (75, 3) train_model_iteratively took 726.2040410041809 seconds X_train: (75, 2), Y_train: (75, 3) X_test: (75, 2), Y_test: (75, 3) train_model_iteratively took 729.7382500171661 seconds X_train: (75, 2), Y_train: (75, 3) X_test: (75, 2), Y_test: (75, 3) train_model_iteratively took 765.2918190956116 secondsGeneral informationIn this kernel I'll do EDA and visualization of the data, maybe even modelling, though I plan to do serious modelling in my other kernels.We have quite an interesting data. We are challenged to build a model that recognizes toxicity and minimizes unintended bias with respect to mentions of identities.For examplewe need to make sure that a comment like "I am a gay woman" is considered to be not toxic.**Two important points**:1. A subset of comments is labeled with identities. Only identities with more than 500 examples in the test set will be included in the evaluation calculation. This means that not all the test data will be included in evaluation. If we can correctly extract identities, then we will know which test samples are evaluated.2. Target column was created as a fraction of human raters who believed that the comment is toxic. For evaluation, test set examples with target >= 0.5 will be considered to be in the positive class (toxic). I think that we could try both regression and classification approaches here.*Work in progress*import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline from nltk.tokenize import TweetTokenizer import datetime from scipy import stats from scipy.sparse import hstack, csr_matrix from sklearn.model_selection import train_test_split, cross_val_score from sklearn import metrics from wordcloud import WordCloud from collections import Counter from nltk.corpus import stopwords from nltk.util import ngrams from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LogisticRegression from sklearn.svm import LinearSVC from sklearn.multiclass import OneVsRestClassifier pd.set_option('max_colwidth',400) pd.set_option('max_columns', 50) import json import altair as alt from altair.vega import v3 from IPython.display import HTML import gc import os import eli5 from eli5.lime import TextExplainer import lightgbm as lgbfrom keras.preprocessing.text import Tokenizerfrom keras.preprocessing.sequence import pad_sequencesfrom keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation, Conv1D, GRU, CuDNNGRU, CuDNNLSTM, BatchNormalizationfrom keras.layers import Bidirectional, GlobalMaxPool1D, MaxPooling1D, Add, Flattenfrom keras.layers import GlobalAveragePooling1D, GlobalMaxPooling1D, concatenate, SpatialDropout1Dfrom keras.models import Model, load_modelfrom keras import initializers, regularizers, constraints, optimizers, layers, callbacksfrom keras import backend as Kfrom keras.engine import InputSpec, Layerfrom keras.optimizers import Adamfrom keras.callbacks import ModelCheckpoint, TensorBoard, Callback, EarlyStopping# Preparing altair. I use code from this great kernel: https://www.kaggle.com/notslush/altair-visualization-2018-stackoverflow-survey vega_url = 'https://cdn.jsdelivr.net/npm/vega@' + v3.SCHEMA_VERSION vega_lib_url = 'https://cdn.jsdelivr.net/npm/vega-lib' vega_lite_url = 'https://cdn.jsdelivr.net/npm/vega-lite@' + alt.SCHEMA_VERSION vega_embed_url = 'https://cdn.jsdelivr.net/npm/vega-embed@3' noext = "?noext" paths = { 'vega': vega_url + noext, 'vega-lib': vega_lib_url + noext, 'vega-lite': vega_lite_url + noext, 'vega-embed': vega_embed_url + noext } workaround = """ requirejs.config({{ baseUrl: 'https://cdn.jsdelivr.net/npm/', paths: {} }}); """ #------------------------------------------------ Defs for future rendering def add_autoincrement(render_func): # Keep track of unique
IDs cache = {} def wrapped(chart, id="vega-chart", autoincrement=True): if autoincrement: if id in cache: counter = 1 + cache[id] cache[id] = counter else: cache[id] = 0 actual_id = id if cache[id] == 0 else id + '-' + str(cache[id]) else: if id not in cache: cache[id] = 0 actual_id = id return render_func(chart, id=actual_id) # Cache will stay outside and return wrapped @add_autoincrement def render(chart, id="vega-chart"): chart_str = """
""" return HTML( chart_str.format( id=id, chart=json.dumps(chart) if isinstance(chart, dict) else chart.to_json(indent=None) ) ) HTML("".join(( "", )))Data overviewtrain = pd.read_csv('../data/raw/train.csv') test = pd.read_csv('../data/raw/test.csv') sub = pd.read_csv('../data/raw/sample_submission.csv') train.head() train.shape, test.shape, (train['target'] > 0).sum() / train.shape[0], (train['target'] >= 0.5).sum() / train.shape[0] train['comment_text'].value_counts().head(20) train.loc[train['comment_text'] == 'Well said.', 'target'].unique() print('Rate of unique comments:', train['comment_text'].nunique() / train['comment_text'].shape[0]) train_comments = set(train['comment_text'].values) test_comments = set(test['comment_text'].values) len(train_comments.intersection(test_comments)), len(test.loc[test['comment_text'].isin(list(train_comments.intersection(test_comments)))])- We have a lot of data in train - 1.8 mln rows! Test data has less than 100k rows. There are also additional columns in train, we'll look at them later.29% samples have value of target higher than 0 and only 7.99% have target higher than 0.5.- One more point: ~1.4% of all comments are duplicates and they can have different target values.- 1170 unique comments from train data are in test data;hist_df = pd.cut(train['target'], 20).value_counts().sort_index().reset_index().rename(columns={'index': 'bins'}) hist_df['bins'] = hist_df['bins'].astype(str) render(alt.Chart(hist_df).mark_bar().encode( x=alt.X("bins:O", axis=alt.Axis(title='Target bins')), y=alt.Y('target:Q', axis=alt.Axis(title='Count')), tooltip=['target', 'bins'] ).properties(title="Counts of target bins", width=400).interactive())Most of comments aren't toxic. We can also see some spikes in the distribution...train['target'].value_counts().head(20)Do you remember how target was created? This is a fraction of voters who considered the comment to be toxic. Then is is completely normal that 0%, 1/6, 1/5 of voters could think the same.train['created_date'] = pd.to_datetime(train['created_date']).values.astype('datetime64[M]') counts = train.groupby(['created_date'])['target'].mean().sort_index().reset_index() means = train.groupby(['created_date'])['target'].count().sort_index().reset_index() c = alt.Chart(counts).mark_line().encode( x=alt.X("created_date:T", axis=alt.Axis(title='Date')), y=alt.Y('target:Q', axis=alt.Axis(title='Rate')), tooltip=[alt.Tooltip('created_date:T', timeUnit='yearmonth'), alt.Tooltip('target:Q')] ).properties(title="Counts and toxicity rate of comments", width=800).interactive() r = alt.Chart(means).mark_line(color='green').encode( x=alt.X("created_date:T", axis=alt.Axis(title='Date')), y=alt.Y('target:Q', axis=alt.Axis(title='Counts')), tooltip=[alt.Tooltip('created_date:T', timeUnit='yearmonth'), alt.Tooltip('target:Q')], ).properties().interactive() render(alt.layer( c, r ).resolve_scale( y='independent' ))We can see how despite the increase of number of comments the toxicity rate is quite stable.- Green: count comments- Blue : rating toxity Additional toxic subtypesHere I plot histogram of scores for additional toxicity subtypes **for scores higher that 0**.plot_dict = {} for col in ['severe_toxicity', 'obscene', 'threat', 'insult', 'identity_attack', 'sexual_explicit']: df_ = train.loc[train[col] > 0] hist_df = pd.cut(df_[col], 20).value_counts().sort_index().reset_index().rename(columns={'index': 'bins'}) hist_df['bins'] = hist_df['bins'].astype(str) plot_dict[col] = alt.Chart(hist_df).mark_bar().encode( x=alt.X("bins:O", axis=alt.Axis(title='Target bins')), y=alt.Y(f'{col}:Q', axis=alt.Axis(title='Count')), tooltip=[col, 'bins'] ).properties(title=f"Counts of {col} bins", width=300, height=200).interactive() render((plot_dict['severe_toxicity'] | plot_dict['obscene']) & (plot_dict['threat'] | plot_dict['insult']) & (plot_dict['identity_attack'] | plot_dict['sexual_explicit']))Text exploration Text lengthhist_df = pd.cut(train['comment_text'].apply(lambda x: len(x)), 10).value_counts().sort_index().reset_index().rename(columns={'index': 'bins'}) hist_df['bins'] = hist_df['bins'].astype(str) render(alt.Chart(hist_df).mark_bar().encode( x=alt.X("bins:O", axis=alt.Axis(title='Target bins'), sort=list(hist_df['bins'].values)), y=alt.Y('comment_text:Q', axis=alt.Axis(title='Count')), tooltip=['comment_text', 'bins'] ).properties(title="Counts of target bins of text length", width=400).interactive()) text_length = train['comment_text'].apply(lambda x: len(x)).value_counts(normalize=True).sort_index().cumsum().reset_index().rename(columns={'index': 'Text length'}) render(alt.Chart(text_length).mark_line().encode( x=alt.X("Text length:Q", axis=alt.Axis(title='Text length')), y=alt.Y('comment_text:Q', axis=alt.Axis(title='Cummulative rate')), tooltip=['Text length', 'comment_text'] ).properties(title="Cummulative text length", width=400).interactive())It seeems that there is relatively high number of comments with length 1000. Maybe this is some kind of default max length? Word counthist_df = pd.cut(train['comment_text'].apply(lambda x: len(x.split())), 10).value_counts().sort_index().reset_index().rename(columns={'index': 'bins'}) hist_df['bins'] = hist_df['bins'].astype(str) render(alt.Chart(hist_df).mark_bar().encode( x=alt.X("bins:O", axis=alt.Axis(title='Target bins'), sort=list(hist_df['bins'].values)), y=alt.Y('comment_text:Q', axis=alt.Axis(title='Count')), tooltip=['comment_text', 'bins'] ).properties(title="Counts of target bins of word count", width=400).interactive()) word_count = train['comment_text'].apply(lambda x: len(x.split())).value_counts(normalize=True).sort_index().cumsum().reset_index().rename(columns={'index': 'Word count'}) render(alt.Chart(word_count).mark_line().encode( x=alt.X("Word count:Q", axis=alt.Axis(title='Text length')), y=alt.Y('comment_text:Q', axis=alt.Axis(title='Cummulative rate')), tooltip=['Word count:Q', 'comment_text'] ).properties(title="Cummulative word cound", width=400).interactive())We can see that ~ 90% of all comments have less than 125 words. IdentitiesSome of the comments are labeled with identities, but only eight of them are included into evaluation: male, female, homosexual_gay_or_lesbian, christian, jewish, muslim, black, white, psychiatric_or_mental_illness. Basic modelLet's try building a baseline logistic regression on tf-idf and see what words are considered to be toxic.identity_columns = ['male', 'female', 'homosexual_gay_or_lesbian', 'christian', 'jewish', 'muslim', 'black', 'white', 'psychiatric_or_mental_illness'] for col in identity_columns + ['target']: train[col] = np.where(train[col] >= 0.5, True, False) # adding preprocessing from this kernel: https://www.kaggle.com/taindow/simple-cudnngru-python-keras punct_mapping = {"_":" ", "`":" "} punct = "/-'?!.,#$%\'()*+-/:;<=>@[\\]^_`{|}~" + '""“”’' + '∞θ÷α•à−β∅³π‘₹´°£€\×™√²—–&' def clean_special_chars(text, punct, mapping): for p in mapping: text = text.replace(p, mapping[p]) for p in punct: text = text.replace(p, f' {p} ') return text train['comment_text'] = train['comment_text'].apply(lambda x: clean_special_chars(x, punct, punct_mapping)) test['comment_text'] = test['comment_text'].apply(lambda x: clean_special_chars(x, punct, punct_mapping)) train_df, valid_df = train_test_split(train, test_size=0.1, stratify=train['target']) y_train = train_df['target'] y_valid = valid_df['target'] tokenizer = TweetTokenizer() vectorizer = TfidfVectorizer(ngram_range=(1, 2), tokenizer=tokenizer.tokenize, max_features=30000) %%time vectorizer.fit(train['comment_text'].values) train_vectorized = vectorizer.transform(train_df['comment_text'].values) valid_vectorized = vectorizer.transform(valid_df['comment_text'].values) logreg = LogisticRegression(n_jobs=6) logreg.fit(train_vectorized, y_train) oof_name = 'predicted_target' valid_df[oof_name] = logreg.predict_proba(valid_vectorized)[:, 1]/opt/anaconda3/envs/textenv/lib/python3.6/site-packages/ipykernel_launcher.py:2: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copyValidateI use code from benchmark kernelSUBGROUP_AUC = 'subgroup_auc' BPSN_AUC = 'bpsn_auc' # stands for background positive, subgroup negative BNSP_AUC = 'bnsp_auc' # stands for background negative, subgroup positive def compute_auc(y_true, y_pred): try: return metrics.roc_auc_score(y_true, y_pred) except ValueError: return np.nan def compute_subgroup_auc(df, subgroup, label, oof_name): subgroup_examples = df[df[subgroup]] return compute_auc(subgroup_examples[label], subgroup_examples[oof_name]) def compute_bpsn_auc(df, subgroup, label, oof_name): """Computes the AUC of the within-subgroup negative examples and the background positive examples.""" subgroup_negative_examples = df[df[subgroup] & ~df[label]] non_subgroup_positive_examples = df[~df[subgroup] & df[label]] examples = subgroup_negative_examples.append(non_subgroup_positive_examples) return compute_auc(examples[label], examples[oof_name]) def compute_bnsp_auc(df, subgroup, label, oof_name): """Computes the AUC of the within-subgroup positive examples and the background negative examples.""" subgroup_positive_examples = df[df[subgroup] & df[label]] non_subgroup_negative_examples = df[~df[subgroup] & ~df[label]] examples = subgroup_positive_examples.append(non_subgroup_negative_examples) return compute_auc(examples[label], examples[oof_name]) def compute_bias_metrics_for_model(dataset, subgroups, model, label_col, include_asegs=False): """Computes per-subgroup metrics for all subgroups and one model.""" records = [] for subgroup in subgroups: record = { 'subgroup': subgroup, 'subgroup_size': len(dataset[dataset[subgroup]]) } record[SUBGROUP_AUC] = compute_subgroup_auc(dataset, subgroup, label_col, model) record[BPSN_AUC] = compute_bpsn_auc(dataset, subgroup, label_col, model) record[BNSP_AUC] = compute_bnsp_auc(dataset, subgroup, label_col, model) records.append(record) return pd.DataFrame(records).sort_values('subgroup_auc', ascending=True) oof_name = 'predicted_target' bias_metrics_df = compute_bias_metrics_for_model(valid_df, identity_columns, oof_name, 'target') bias_metrics_df def calculate_overall_auc(df, oof_name): true_labels = df['target'] predicted_labels = df[oof_name] return metrics.roc_auc_score(true_labels, predicted_labels) def power_mean(series, p): total = sum(np.power(series, p)) return np.power(total / len(series), 1 / p) def get_final_metric(bias_df, overall_auc, POWER=-5, OVERALL_MODEL_WEIGHT=0.25): bias_score = np.average([ power_mean(bias_df[SUBGROUP_AUC], POWER), power_mean(bias_df[BPSN_AUC], POWER), power_mean(bias_df[BNSP_AUC], POWER) ]) return (OVERALL_MODEL_WEIGHT * overall_auc) + ((1 - OVERALL_MODEL_WEIGHT) * bias_score) get_final_metric(bias_metrics_df, calculate_overall_auc(valid_df, oof_name))ELI5 for model interpretationAnd now let's use ELI5 to see how model makes predictions!import eli5 from eli5.lime import TextExplainer te = TextExplainer(random_state=42) def model_predict(x): return logreg.predict_proba(vectorizer.transform(x)) te.fit(valid_df['comment_text'].values[2:3][0], model_predict) te.show_prediction() te.fit(valid_df['comment_text'].values[12:13][0], model_predict) te.show_prediction() test_vectorized = vectorizer.transform(test['comment_text'].values) sub['prediction'] = logreg.predict_proba(test_vectorized)[:, 1] sub.to_csv('submission.csv', index=False) del logreg, vectorizer, test_vectorized, train_vectorized, valid_vectorizedSelecting number of words and sequence lengthOn of important hyperparameters for our neural nets will be the number of words in tokenizer and the number of words in sequence. Let's compare model AUC for different values of these parameters.For preparing data I use code from my kernel: https://www.kaggle.com/artgor/basic-cnn-in-kerasI train the same model on the same data for 3 epochs.def build_model(X_train, y_train, X_valid, y_valid, max_len, max_features, embedding_matrix, lr=0.0, lr_d=0.0, spatial_dr=0.0, dense_units=128, dr=0.1): file_path = "best_model.hdf5" check_point = ModelCheckpoint(file_path, monitor = "val_loss", verbose = 1, save_best_only = True, mode = "min") early_stop = EarlyStopping(monitor = "val_loss", mode = "min", patience = 3) inp = Input(shape = (max_len,)) x = Embedding(max_features, embed_size, weights = [embedding_matrix], trainable = False)(inp) x1 = SpatialDropout1D(spatial_dr)(x) # from benchmark kernel x = Conv1D(128, 2, activation='relu', padding='same')(x1) x = MaxPooling1D(5, padding='same')(x) x = Conv1D(128, 3, activation='relu', padding='same')(x) x = MaxPooling1D(5, padding='same')(x) x = Flatten()(x) x = Dropout(dr)(Dense(dense_units, activation='relu') (x)) x = Dense(1, activation = "sigmoid")(x) model = Model(inputs = inp, outputs = x) model.compile(loss = "binary_crossentropy", optimizer = Adam(lr = lr, decay = lr_d), metrics = ["accuracy"]) history = model.fit(X_train, y_train, batch_size = 128, epochs = 3, validation_data=(X_valid, y_valid), verbose = 0, callbacks = [check_point, early_stop]) model = load_model(file_path) return model full_text = list(train['comment_text'].values) + list(test['comment_text'].values) embedding_path = "../input/fasttext-crawl-300d-2m/crawl-300d-2M.vec" embed_size = 300 oof_name = 'oof_name' def calculate_score(num_words, max_len, full_text, train_df, valid_df, embedding_path, embed_size, identity_columns, oof_name): tk = Tokenizer(lower = True, filters='', num_words=num_words) tk.fit_on_texts(full_text) def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embedding_index = dict(get_coefs(*o.strip().split(" ")) for o in open(embedding_path)) embedding_matrix = np.zeros((num_words + 1, embed_size)) for word, i in tk.word_index.items(): if i >= num_words: continue embedding_vector = embedding_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector del embedding_index train_tokenized = tk.texts_to_sequences(train_df['comment_text']) valid_tokenized = tk.texts_to_sequences(valid_df['comment_text']) # test_tokenized = tk.texts_to_sequences(test['comment_text']) X_train = pad_sequences(train_tokenized, maxlen = max_len) X_valid = pad_sequences(valid_tokenized, maxlen = max_len) # X_test = pad_sequences(test_tokenized, maxlen = max_len) model = build_model(X_train=X_train, y_train=y_train, X_valid=X_valid, y_valid=y_valid, max_len=max_len, max_features=embedding_matrix.shape[0], embedding_matrix=embedding_matrix, lr = 1e-3, lr_d = 0, spatial_dr = 0.0, dr=0.1) valid_df[oof_name] = model.predict(X_valid) bias_metrics_df = compute_bias_metrics_for_model(valid_df, identity_columns, oof_name, 'target') score = get_final_metric(bias_metrics_df, calculate_overall_auc(valid_df, oof_name)) del embedding_matrix, tk gc.collect() return score # scores = [] # for n_words in [50000, 100000]: # for seq_len in [150, 300]: # loc_score = calculate_score(n_words, seq_len, full_text, train_df, valid_df, embedding_path, embed_size, identity_columns, oof_name) # scores.append((n_words, seq_len, loc_score))In this exercise, you will write your first lines of code and learn how to use the coding environment for the micro-course! SetupFirst, you'll learn how to run code, and we'll start with the code cell below. (Remember that a **code cell** in a notebook is just a gray box containing code that we'd like to run.)- Begin by clicking inside the code cell. - Click on the blue triangle (in the shape of a "Play button") that appears to the left of the code cell.- If your code was run sucessfully, you will see `Setup Complete` as output below the cell.![ex0_run_code](https://i.imgur.com/TOk6Ot4.png)import pandas as pd import matplotlib.pyplot as plt %matplotlib inline import seaborn as sns print("Setup Complete")The code cell above imports and configures the Python libraries that you need to complete the exercise.Now, follow the same process to run the code cell below. If successful, you'll see `Setup Complete` as output.# Set up code checking from learntools.core import binder binder.bind(globals()) from learntools.data_viz_to_coder.ex1 import * print("Setup Complete")The code that you just ran sets up the system that will give you feedback on your work. You'll learn more about the feedback system in the next step. Step 1: Explore the feedback systemIn order to successfully finish this micro-course, you'll need to complete various hands-on coding exercises. Each exercise allows you to put your new skills to work with a real-world dataset. Along the way, you'll receive feedback on your work. We'll tell you if an answer you've written is correct or incorrect, provide customized hints, and show you our official solution (_if you'd like to take a look!_).To explore the feedback system, we'll start with a simple example of a coding problem. Follow the following steps in order:1. Begin by running the code cell below without making any edits. This should return the following output: > Check: When you've updated the starter code, `check()` will tell you whether your code is correct. You need to update the code that creates variable `one` This feedback tells us that there are some necessary changes to the code that we haven't made yet: we need to set the variable `one` to something other than the blank provided below (`____`). 2. Replace the underline with a value of `2`, so that the line of code appears as `one = 2`. Then, run the code cell. This should return the following output:> Incorrect: Incorrect value for `one`: `2` This feedback tells us that the value that we've provided is incorrect: `2` is not the correct answer here!3. Now, change the value of `2` to `1`, so that the line of code appears as `one = 1`. Then, run the code cell. The answer should be marked as Correct, and, you have now completed this problem!In this exercise, you are responsible for filling in the line of code that sets the value of variable `one`. **Please never edit the code that is used to check your answer.** So, lines of code like `step_1.check()` and `step_2.check()` should always be left as provided.# Fill in the line below one = 1 # Check your answer step_1.check() #%%RM_IF(PROD)%% one = 1 step_1.assert_check_passed()This problem was relatively straightforward, but for more difficult problems, you may like to receive a hint or view the official solution. Run the code cell below now to receive both for this problem.step_1.hint() step_1.solution()Step 2: Load the dataNow, we're ready to get started with some data visualization code! You'll begin by loading the dataset from the previous tutorial. Recall that loading a dataset into a notebook is done in two parts:- begin by specifying the location (or [filepath](https://bit.ly/1lWCX7s)) where the dataset can be accessed, and then- use the filepath to load the contents of the dataset into the notebook.We have provided the first part for you, and you need to fill in the second part to set the value of `fifa_data`. Feel free to copy this code from the tutorial. Once running the code returns a Correct result, you're ready to move on!# Path of the file to read fifa_filepath = "../input/fifa.csv" # Fill in the line below to read the file into a variable fifa_data fifa_data = ____ # Check your answer step_2.check() #%%RM_IF(PROD)%% fifa_data = pd.read_csv(fifa_filepath, index_col="Date", parse_dates=True) step_2.assert_check_passed()Recall the difference between comments and executable code:- **Comments** are preceded by a pound sign (``) and contain text that appear faded and italicized. They are completely ignored by the computer when the code is run.- **Executable code** is code that is run by the computer.In the code cell below, every line is a comment:```python Uncomment the line below to receive a hintstep_2.hint()step_2.solution()```If you run the code cell (that appears below this big block of text) as-is, it won't return any output. Try this now!Next, remove the pound sign before `step_2.hint()` so that the code cell appears as follows:```python Uncomment the line below to receive a hintstep_2.hint()step_2.solution()```When we remove the pound sign before a line of code, we say we **uncomment** the line. This turns the comment into a line of executable code that is run by the computer. Run the code cell now, which should return the Hint as output.Finally, uncomment the line to see the solution, so the code cell appears as follows:```python Uncomment the line below to receive a hintstep_2.hint()step_2.solution()```Then, run the code cell. You should receive both a Hint and the Solution.If at any point you're having trouble with coming up with the correct answer to a problem, you are welcome to obtain either a hint or the solution before completing the cell. (So, you don't need to get a Correct result before running the code that gives you a Hint or the Solution.)# Uncomment the line below to receive a hint #_COMMENT_IF(PROD)_ step_2.hint() # Uncomment the line below to see the solution #_COMMENT_IF(PROD)_ step_2.solution()Step 3: Review the dataIn the next code cell, use a Python command to print the first 5 rows of the data. Please completely erase the underline (`____`) and fill in your own code. If you don't remember how to do this, please take a look at the previous tutorial, ask for a Hint, or view the Solution. The code you write here won't give you feedback on whether your answer is correct, but you'll know if your answer is right if it prints the first 5 rows of the dataset!# Print the last five rows of the data ____ # Your code hereUse the first 5 rows of the data to answer the question below.# Fill in the line below: What was Brazil's ranking (Code: BRA) on December 23, 1993? brazil_rank = ____ # Check your answer step_3.check() #%%RM_IF(PROD)%% brazil_rank = 3 step_3.assert_check_passed()If you haven't already, uncomment the lines and run the code to view the Hint and the Solution.# Lines below will give you a hint or solution code #_COMMENT_IF(PROD)_ step_3.hint() #_COMMENT_IF(PROD)_ step_3.solution()Step 4: Plot the dataNow that the data is loaded into the notebook, you're ready to visualize it! Part ACopy the code from the tutorial that we used to make a line chart. This code may not make sense just yet - you'll learn all about it in the next tutorial!Before proceeding to **Part B** of this question, make sure that you get a Correct result. If you need help, feel free to view the Hint or the Solution.# Set the width and height of the figure plt.figure(figsize=(16,6)) # Fill in the line below: Line chart showing how FIFA rankings evolved over time ____ # Your code here # Check your answer step_4.a.check() #%%RM_IF(PROD)%% plt.figure(figsize=(16,6)) sns.lineplot(data=fifa_data) step_4.a.assert_check_passed()If you haven't already, uncomment the lines and run the code to view the Hint and the Solution.# Lines below will give you a hint or solution code #_COMMENT_IF(PROD)_ step_4.a.hint() #_COMMENT_IF(PROD)_ step_4.a.solution_plot()Part BSome of the questions that you'll encounter won't require you to write any code. Instead, you'll generally need to interpret visualizations. These questions don't require a Correct result, and you won't be able to check your answer. However, you can receive a Hint to guide the way you think about the question, or you can view our official Solution. As an example, consider the question: Considering only the years represented in the dataset, which countries spent at least 5 consecutive years in the 1 ranked spot?To receive a Hint, uncomment the line below, and run the code cell.#_COMMENT_IF(PROD)_ step_4.b.hint()To see the Solution, uncomment the line and run the code cell.#_COMMENT_IF(PROD)_ step_4.b.solution()This is a classic problem of *Classification* that is very similar to the **MNIST** digit recognition problem with the exception that this problem comprises of 5 classes whereas **MNIST** has 10 classes. As this is a image-based problem one good approach is to build a Convolutional Neural Network (CNN). (We can go for SVM with a softmax loss, but this is better) 1. INTRODUCTION#Importing all the necessary packages import numpy as np # Python's library to deal with Scientific computation. It is famous for its efficiency and speed import pandas as pd # Python's library to deal with data cleaning and handling import matplotlib.pyplot as plt # Python's library for Visualization from sklearn.preprocessing import OneHotEncoder # To convert the numeric class labels into one-hot vectors import cv2 # Advanced "Image Processing" library for Python import os # For changing the current working directory and for file handling from sklearn.metrics import confusion_matrix, classification_report # Metrics useful for evaluating our model from sklearn.utils import shuffle # To shuffle the dataset randomly in-place to avoid any possible bias from keras.models import Sequential # Choosing 'Sequential model' from the available two types of models from keras.layers import Conv2D, MaxPool2D, Dropout, Dense, Flatten # Other important components for building the network ###from keras.optimizers import RMSprop # Discarded RMSprop as 'Adam' was performing better ###from keras.callbacks import ReduceLROnPlateau # I felt no need for reducing the learning rate constantly over the iterations as, the model converged pretty quickly and there were no saddle points import warnings # To ignore package deprecation warnings, etc.; # To plot the graphs, figures in the notebook itself and not in separate windows %matplotlib inline warnings.filterwarnings('ignore') #Turn de-bugging mode 'ON' or 'OFF' DEBUG = True #Storing the paths of both the training and test sets in respective variables training_set_path = './/Train_val//' test_set_path = './/Test//' #Storing the list of all character classes character_classes = [folder_name.split('_')[2] for folder_name in sorted(os.listdir(training_set_path))] if DEBUG: print(character_classes)['ka', 'kha', 'ga', 'gha', 'kna']2. PRE-PROCESSING 2.1 Training Data Creation#Changing the path the working directory to the location where the training data is present os.chdir(training_set_path) #Iterating through all the character folders and buliding a training set and its corresponding class labels images = [] classes = [] for class_number, character_folder in enumerate(sorted(os.listdir())): os.chdir('.//' + character_folder + '//') for image in sorted(os.listdir()): img = cv2.imread(image, cv2.IMREAD_GRAYSCALE) # Converting the color image (3 channels) into grayscale image (1 channel) height, width = img.shape reshaped_img_vector = img.reshape(height * width) # Flatten the image into a long single vector images.append(reshaped_img_vector) classes.append(class_number) os.chdir('.//..//') #Returning to the Home directory where the Jupyter notebook is present os.chdir('.//..//')2.2 Test Data Creation#Changing the path the working directory to the location where the test data is present os.chdir(test_set_path) #Iterating through all the character folders and buliding a test set and its corresponding class labels images = [] classes = [] for class_number, character_folder in enumerate(sorted(os.listdir())): os.chdir('.//' + character_folder + '//') for image in sorted(os.listdir()): img = cv2.imread(image, cv2.IMREAD_GRAYSCALE) # Converting the color image (3 channels) into grayscale image (1 channel) height, width = img.shape reshaped_img_vector = img.reshape(height * width) # Flatten the image into a long single vector images.append(reshaped_img_vector) classes.append(class_number) os.chdir('.//..//') #Returning to the Home directory where the Jupyter notebook is present os.chdir('.//..//')2.4 Normalization We perform a grayscale normalization to reduce the effect of illumination's differences. Moreover, the CNN converges faster on [0..1] data than on [0..255]. (The minimum intensity value is '0' and maximum intensity value is '256') 2.4 (a) Normalizing training data#Normalizing the image intensity values to be in the range of 0 - 1 (Because, there is no need for 3 channels as the images are mostly in black & white and it also helps for easy computation when passed through neural network) X_train = np.array(images) / 255.0 X_train = X_train.reshape(-1, 32, 32, 1) # Reshape the 1024 x 1 vector back to 32 x 32 image so as to pass to CNN. '-1' indicates that it is to be replaced with the "total number of samples" y_train = np.array(classes) y_train = y_train.reshape(y_train.shape[0], 1) #Shuffling the training set in order to avoid bias during training X_train, y_train = shuffle(X_train, y_train, random_state=0)2.4 (b) Normalizing test data#Normalizing the image intensity values to be in the range of 0 - 1 (Because, there is no need for 3 channels as the images are mostly in black & white and it also helps for easy computation when passed through neural network) X_test = np.array(images) / 255.0 X_test = X_test.reshape(-1, 32, 32, 1) # Reshape the 1024 x 1 vector back to 32 x 32 image so as to pass to CNN. '-1' indicates that it is to be replaced with the "total number of samples" y_test = np.array(classes) y_test = y_test.reshape(y_test.shape[0], 1) #Shuffling the test set in order to avoid bias during class prediction X_test, y_test = shuffle(X_test, y_test, random_state=0)2.4 Check for NULL and MISSING values Null and missing values occur if any of the images is/are corrupted. So, check for any null and missing values as this may affect the model's performance.#Check for null or missing values in the training set pd.DataFrame.from_records(X_train).isnull().any().describe() #Check for null or missing values in the test set pd.DataFrame.from_records(X_test).isnull().any().describe()2.5 One-Hot encoding Problem with label encoding (or numeric encoding) is that, it **assumes** *higher* the categorical value, *better* the category.#One-hot encoding training set labels onehot_encoder = OneHotEncoder(sparse=False) # Setting 'sparse' argument to "False" to avoiding problems when using Keras y_train_encoded = onehot_encoder.fit_transform(y_train) #One-hot encoding test set labels (NOT REQUIRED as, we are predicting these values based on the probabilities that are obtained from the 'Softmax' funtion) ###onehot_encoder = OneHotEncoder(sparse=False) # Setting 'sparse' argument to "False" to avoiding problems when using Keras ###y_test_encoded = onehot_encoder.fit_transform(y_test)2.6 Validation set creation Validation set is used because we can perform hyperparameter tuning and also choose the right model for the problem under consideration. This also ensures that we do not perform parameter tuning during the time of testing and thus the testing will be un-biased. * Popular **train-validation-test** splits are 70-20-10, 60-20-20, 95-2.5-2.5, 99-0.5-0.5, etc.;#Randomly choosing 33% of the training samples to be the cross-validation (or) hold-out set num_values = int(0.33 * X_train.shape[0]) random_samples = np.random.permutation(X_train.shape[0])[:num_values] X_val = X_train[random_samples] y_val = y_train_encoded[random_samples] #Deleting the previously chosen random samples from our training set along the row (axis=0) X_train = np.delete(X_train, random_samples, axis=0) y_train_encoded = np.delete(y_train_encoded, random_samples, axis=0) X_train.shape, X_val.shape3. UNDERSTANDING THE DATASETif DEBUG: _, ax = plt.subplots(1, 2) ax[0].axis('off') #A random training data sample ax[0].imshow(X_train[np.random.randint(X_train.shape[0])].reshape(32, 32)) ax[0].set_title('A random training sample') ax[1].axis('off') #A random test data sample ax[1].imshow(X_test[np.random.randint(X_test.shape[0])].reshape(32, 32)) ax[1].set_title('A random test sample') if DEBUG: #Getting to know about the data more... num_of_train_samples = X_train.shape[0] num_of_val_samples = X_val.shape[0] num_of_test_samples = X_test.shape[0] num_classes = len(np.unique(y_train)) print("The training set is of shape : ", X_train.shape) print("The training labels vector is of shape : ", y_train_encoded.shape) print() print("The validation set is of shape : ", X_val.shape) print("The validation labels vector is of shape : ", y_val.shape) print() print("The test set is of shape : ", X_test.shape) print("The test labels vector is of shape : ", y_test.shape) print("\n") print("There are " + str(num_of_train_samples) + " training, " + str(num_of_val_samples) + " validation and " + str(num_of_test_samples) + " test samples respectively.") print("The total number of classes are :- ", num_classes)The training set is of shape : (1005, 32, 32, 1) The training labels vector is of shape : (1005, 5) The validation set is of shape : (495, 32, 32, 1) The validation labels vector is of shape : (495, 5) The test set is of shape : (1500, 32, 32, 1) The test labels vector is of shape : (1500, 1) There are 1005 training, 495 validation and 1500 test samples respectively. The total number of classes are :- 54. CNN MODEL CNN architechture is **Input -> [ [ Conv2D -> ReLU] * 2 -> MaxPool2D -> Dropout ] * 2 -> Flatten -> Dense -> Dropout -> Output**#Creating the model architecture model = Sequential() model.add(Conv2D(filters = 32, kernel_size = (5, 5), padding = 'Same', activation ='relu', input_shape = (32, 32, 1))) model.add(Conv2D(filters = 32, kernel_size = (5, 5), padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(filters = 64, kernel_size = (3, 3),padding = 'Same', activation ='relu')) model.add(Conv2D(filters = 64, kernel_size = (3, 3),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation = "relu")) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation = "softmax")) #Define the optimizer ###optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0) #Compile the model model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) #Set a learning rate annealer ###learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.00001) ### epochs = 30 ### batch_size = 128 epochs = 15 batch_size = 64 history = model.fit(X_train, y_train_encoded, batch_size = batch_size, epochs = epochs, validation_data = (X_val, y_val), verbose = 2)Train on 1005 samples, validate on 495 samples Epoch 1/15 - 6s - loss: 1.3995 - acc: 0.4398 - val_loss: 0.6863 - val_acc: 0.7455 Epoch 2/15 - 6s - loss: 0.6861 - acc: 0.7682 - val_loss: 0.3043 - val_acc: 0.8788 Epoch 3/15 - 6s - loss: 0.3419 - acc: 0.8836 - val_loss: 0.1584 - val_acc: 0.9495 Epoch 4/15 - 6s - loss: 0.1604 - acc: 0.9453 - val_loss: 0.0939 - val_acc: 0.9636 Epoch 5/15 - 6s - loss: 0.1359 - acc: 0.9483 - val_loss: 0.0686 - val_acc: 0.9636 Epoch 6/15 - 6s - loss: 0.0807 - acc: 0.9771 - val_loss: 0.0377 - val_acc: 0.9879 Epoch 7/15 - 6s - loss: 0.0486 - acc: 0.9831 - val_loss: 0.0259 - val_acc: 0.9879 Epoch 8/15 - 6s - loss: 0.0483 - acc: 0.9781 - val_loss: 0.0333 - val_acc: 0.9899 Epoch 9/15 - 6s - loss: 0.0395 - acc: 0.9861 - val_loss: 0.0423 - val_acc: 0.9899 Epoch 10/15 - 6s - loss: 0.0390 - acc: 0.9891 - val_loss: 0.0260 - val_acc: 0.9939 Epoch 11/15 - 6s - loss: 0.0270 - acc: 0.9920 - val_loss: 0.0266 - val_acc: 0.9919 Epoch 12/15 - 6s - loss: 0.0220 - acc[...]5. EVALUATE THE MODEL We've now trained our model (above) and let's proceed onto validating it and tuning the network#Plot the loss and accuracy curves for training and validation sets if DEBUG: ### LOSS CURVE ### _, ax = plt.subplots(1, 2) ax[0].plot(history.history['loss'], color='b', label="Training loss") ax[0].plot(history.history['val_loss'], color='r', label="validation loss") legend = ax[0].legend(loc='best', shadow=True) ### ACCURACY CURVE ### ax[1].plot(history.history['acc'], color='b', label="Training accuracy") ax[1].plot(history.history['val_acc'], color='r', label="Validation accuracy") legend = ax[1].legend(loc='best', shadow=True)6. PREDICTING THE CLASS LABELS OF THE TEST DATA It is time to test the model we built above to **new images** (that it has never seen before !!!)#Collecting the class probabilities that are obtained from the 'Softmax' function predictions = model.predict_on_batch(X_test) #Considering only the class with maximum 'class probability' as this is our "Predicted Class" predicted_classes = predictions.argmax(axis=1) # Returns the index with the maximum value along each row if DEBUG: predicted_classes # 0 - 'ka', 1 - 'kha', 2 - 'ga', 3 - 'gha', 4 - 'kna'7. EVALUATION METRICS 7.1 Confusion Matrix Check the distribution of currently and incorrectly classified labels across all classesmatrix = confusion_matrix(y_test, predicted_classes) print(matrix)[[300 0 0 0 0] [ 0 299 1 0 0] [ 0 0 300 0 0] [ 0 0 2 298 0] [ 0 1 0 1 298]]7.2 Classification Report Generate a report on Precision-Recall, F1-score, etc.; for our datareport = classification_report(y_test, predicted_classes) print(report)precision recall f1-score support 0 1.00 1.00 1.00 300 1 1.00 1.00 1.00 300 2 0.99 1.00 1.00 300 3 1.00 0.99 0.99 300 4 1.00 0.99 1.00 300 micro avg 1.00 1.00 1.00 1500 macro avg 1.00 1.00 1.00 1500 weighted avg 1.00 1.00 1.00 1500CH10 Parcours en largeur d'un arbre - ELEVE Nous avons étudié 3 parcours en ***profondeur d'abord*** (infixe, postfixe, préfixe). C'est à dire qu'une branche est explorée jusqu'à la feuille avant de passer aux autres fils. Ex 1 : Rappel .Donner l'affichage résultant de parcours infixe, postfixe et préfixe pour l'arbre binaire suivant : réponse : - préfixe : - infixe : - postfixe : Ex 2 : Parcours en largeur d'abord.On considère l'organigramme suivant.Il s'agit de faire la liste du personnel de direction, en respectant l'ordre hiérarchique. Il s'agit d'un parcours en largeur d'abord. Donner cette liste "à la main". Ex 3. Implémentation en Python du parcours en largeuridée : On stocke les noeuds dans une file. A chaque fois qu'un noeud est traité, on le supprime de la file et on enfile les fils etc. ..On a besoin de la classe ArbreBinaire :class Noeud: def __init__(self, valeur, gauche, droit): self.r = valeur self.g = gauche self.d = droit class ArbreBinaire: def __init__(self, c): # si le noeud = None cela signifie un arbre vide self.n = c def creeVide(): # c'est une methode de classe return ArbreBinaire(None) def creeNGD(valeur, gauche, droit): return ArbreBinaire(Noeud(valeur, gauche, droit)) # c'est une méthode de classe def estVide(self): return self.n is None def racine(self): assert not(self.n is None), 'Arbre vide' return self.n.r def filsGauche(self): assert not(self.n is None), 'Arbre vide' return self.n.g def filsDroit(self): assert not(self.n is None), 'Arbre vide' return self.n.d # affichage def affiche_r(self,p):# affichage parcours postfixe , p est la profondeur if not self.estVide(): # recursivité self.filsDroit().affiche_r(p+1) print(' '*p,self.racine()) self.filsGauche().affiche_r(p+1) def affiche(self): self.affiche_r(0) vide = ArbreBinaire(None) def creeFeuille(x): #fonction pratique pour créer une feuille qui est un arbre binaire sans fils return ArbreBinaire.creeNGD(x, vide, vide)et de la classe Queue (file) :class Queue: ''' attribut : la liste sous jacente. Cet attribut est normalement inaccessible méthodes : enqueue, dequeue, empty, size''' def __init__(self): self.l=[] def enqueue(self,e): self.l.append(e) def dequeue(self): if len(self.l)>0: return self.l.pop(0) else: return None def empty(self): return len(self.l)==0 def size(self): return len(self.l) def __str__(self): return str(self.l) #création de l'arbre binaire "Organigramme" orga=ArbreBinaire.creeNGD('', ArbreBinaire.creeNGD('',creeFeuille(''),vide), ArbreBinaire.creeNGD('',creeFeuille('M.Herbert'),creeFeuille('J.Martinez')))Ex 4:coder la fonction `largeur(arbre):` qui affiche les valeurs de l'arbre en largeur d'abordlargeur(orga) M.Herbert J.Martinez for www.canari.dev (Feb-2021) Deutsche Boerse A7 usage example for equity options **Downloading intraday prices and trades from A7 and calibrating a vol surface**Abstract :Any data analysis project on options must start by computing an implicit volatility surface.This notebook associated with following dependencies takes care of this initial stage :you will need to download the following files from the code folder :- PricingAndCalibration.py (does the calibration heacy lifting)- DateAndTime.py (for date related functions)- Setup.py (in order to indicate where input and output must go)You will also need to load then minsize_level_tb.yml pre-processing code in A7 (see readme file for more explanations)# Indicate here the folders where you want the quotes and trades data (folder1) # and the calibration result with "fleshed" trades (folder 2) folder1 = 'D:/Users/GitHub/TradesDynamics/processed' folder2 = 'D:/Users/GitHub/TradesDynamics/parameters' import os os.makedirs(folder1, exist_ok=True) os.makedirs(folder1 + '/raw', exist_ok=True) os.makedirs(folder2, exist_ok=True) # We are now importing public libraries import numpy as np import pandas as pd import QuantLib as ql #free derivatives pricing package import math import datetime import matplotlib.pyplot as plt import requests import warnings pd.set_option('display.width', 200) pd.set_option('display.max_columns', 30) # ...and specific libraries available in this git from DateAndTime import DateAndTime # uses QuantLib to calculate numbers of business day between dates and generate a list expiration dates from PricingAndCalibration import Pricing # uses Quantlib to price European and American options with continuous dividend yield and the associated greeks from PricingAndCalibration import FittingSpline # uses scipy-UnivariateSpline to fit a 2nd degree spline through the implicit vol of bid and ask quotes (for each maturity)We will first retrieve trades and order book data from A7#indicate your A7 credentials : owner = 'your A7 username here' API_TOKEN = "Bearer " + "your A7 API token here" # The API token is obtained by clicking on your name in the upper right corner of the A7 Analytics Platform, and then on "API token generation" proxies = { "http": "", # Enter http Proxy if needed", "https": "" # Enter https Proxy if needed", } #choose a date for analysis : reference_date = '20210105' # Select an underlying udl = 'DAI' isin = 'DE0007100000' # Select an algo for the retrieving of quotes. # 'top_level' algo is pre-loaded in A7 # 'minsize_level_tb' allows you to look into the orderbook until finding a minimum number of lots. # 'minsize_level_tb' is given in this git as a .yml file and must be loaded first in your A7 account. algo = 'minsize_level_tb' # If you have chosen the 'minsize_level' algo : min_lots = 30 #Some unimportant parameters and inital settings # filter settings to speed up the process # for 1 year maturity option with an adjustment in sqrt(T) moneyness_range_call = (-0.4, 0.7) moneyness_range_put = (-0.7, 0.4) DT = DateAndTime('2021-01-05', '2021-01-05') df_orderbook = pd.DataFrame() df_trades = pd.DataFrame() # Let's first find the identification code for the stock itself : url = 'https://a7.deutsche-boerse.com/api/v1/rdi/XETR/{}?mode=detailed'.format(reference_date) r = requests.get(url=url, headers={'Authorization': API_TOKEN}, proxies = proxies) res = r.json() lst_ms = np.array([x['MarketSegment'] for x in res['MarketSegments']]) indx = np.where(lst_ms==isin)[0][0] segmentIDudl = res['MarketSegments'][indx]['MarketSegmentID'] print('Market Segment for the underlying {} :: {}'.format(udl, str(segmentIDudl))) url = 'https://a7.deutsche-boerse.com/api/v1/rdi/XETR/{}/{}?mode=detailed'.format(reference_date, segmentIDudl) r = requests.get(url=url, headers={'Authorization': API_TOKEN}, proxies = proxies) res_u = r.json() security = res_u['Securities'][0] # Let's now get the get all options segments for this underlying (we will filter them mater) url = 'https://a7.deutsche-boerse.com/api/v1/rdi/XEUR/{}?mode=detailed'.format(reference_date) r = requests.get(url = url, headers={'Authorization': API_TOKEN}, proxies = proxies) res = r.json() lst_ms = np.array([x['MarketSegment'] for x in res['MarketSegments']]) indx = np.where(lst_ms==udl)[0][0] segmentIDopt = res['MarketSegments'][indx]['MarketSegmentID'] print('Market Segment for options on {} :: {}'.format(udl, str(segmentIDopt))) url = 'https://a7.deutsche-boerse.com/api/v1/rdi/XEUR/{}/{}?mode=detailed'.format(reference_date, segmentIDopt) r = requests.get(url = url, headers={'Authorization': API_TOKEN}, proxies = proxies) res_i = r.json() # We will now retrieve the quotes (underlying and options) selected_fields = ['SecurityDesc', 'SecurityID'] selected_fields_desc = ['PutOrCall', 'StrikePrice', 'ContractMultiplier', 'ExerciseStyle'] raw = pd.DataFrame() matulist = sorted(list(set([str(elt['MaturityDate']) for elt in res_i['Securities'] if elt['MaturityDate'] != None]))) for matu in ['UDL'] + matulist: print(matu) df = pd.DataFrame(columns=['SegmentID'] + selected_fields + selected_fields_desc) if matu == 'UDL': df.loc[0] = [segmentIDudl, security['SecurityDesc'], security['SecurityID'], 'S', None, 1, None] df['in_range'] = True else: i = 0 for x in res_i['Securities']: if (str(x['MaturityDate']) == matu) and (x['SecurityType'] == 'OPT'): df.loc[i] = [segmentIDopt] + [x[elt] for elt in selected_fields] + \ [x['DerivativesDescriptorGroup']['SimpleInstrumentDescriptorGroup'][elt] for elt in selected_fields_desc] i += 1 df.sort_values(by=['StrikePrice', 'PutOrCall'], ascending = [True, True], inplace=True) # Computing moneyness/sqrt(T) will allow us to filter out deep ITM options TTM = DT.time_between(pd.Timestamp(reference_date), pd.Timestamp(matu)) df['moneyness_T'] = df.apply(lambda opt: math.log(opt.StrikePrice / FVU) / (max(3.0 / 12.0, TTM) ** 0.5), axis='columns') # the forward ratio is unknown at this stage so we take a high dividend rate of 8% as instead, hence the 0.92 df['moneyness_T_w_div'] = df.apply(lambda opt: math.log(opt.StrikePrice / FVU*0.92) / (max(3.0 / 12.0, TTM) ** 0.5), axis='columns') df['in_range'] = df.apply(lambda opt: (opt.moneyness_T_w_div > moneyness_range_call[0]) and (opt.moneyness_T < moneyness_range_call[1]) \ if opt.PutOrCall == '1' else \ (opt.moneyness_T_w_div > moneyness_range_put[0]) and (opt.moneyness_T < moneyness_range_put[1]), axis='columns') df = df.loc[df.in_range] for index, opt in df.iterrows(): if opt['PutOrCall'] == 'S': market = 'XETR' url = 'https://a7.deutsche-boerse.com/api/v1/algo/{}/top_level/'.format(owner) url = url+"run?marketId={}&date={}&marketSegmentId={}&securityId={}".format(market, reference_date, opt['SegmentID'], opt['SecurityID']) else: market = 'XEUR' if algo == 'top_level': url = 'https://a7.deutsche-boerse.com/api/v1/algo/{}/top_level/'.format(owner) url = url+"run?marketId={}&date={}&marketSegmentId={}&securityId={}".format(market, reference_date, opt['SegmentID'], opt['SecurityID']) elif algo == 'minsize_level_tb': url = 'https://a7.deutsche-boerse.com/api/v1/algo/{}/minsize_level_tb/'.format(owner) url = url+"run?marketId={}&date={}&marketSegmentId={}&securityId={}&from_h=9&from_m=0&&min_lots={}&to_h=17&to_m=30&ts_step=5".format(market, reference_date, opt['SegmentID'], opt['SecurityID'], min_lots) r = requests.get(url=url, headers={'Authorization': API_TOKEN}, proxies = proxies) res = r.json() if type(res) == list: if (algo == 'minsize_level_tb') and (opt['PutOrCall'] != 'S'): df_opt = pd.DataFrame.from_dict(res[0]['series'][0]['content']) df_opt.ts = df_opt.ts.astype(np.int64) df_opt.ts = pd.to_datetime(df_opt.ts) df_opt.set_index('ts', inplace=True) df_opt[selected_fields_desc] = opt[selected_fields_desc] df_opt['matu'] = matu df_orderbook = df_orderbook.append(df_opt) else: bid_ask_sampled = {} for i, bidask in enumerate(['bid', 'ask']): df_price = pd.DataFrame(index=res[0]['series'][i]['content']['ts']) df_price = df_price.assign(pv=res[0]['series'][i]['content']['price']) df_price = df_price.dropna() if df_price.shape[0] > 0: df_price['pv'] = df_price['pv'].astype(float)/1e3 df_price.columns = [bidask] df_price.index = df_price.index.astype(np.int64) df_price.index = pd.to_datetime(df_price.index) for elt in selected_fields_desc: df_price[elt] = opt[elt] df_price['matu'] = matu if opt['PutOrCall'] == 'S': df_raw = df_price.copy() df_raw.rename(columns={bidask: 'level'}, inplace=True) df_raw['bidask'] = bidask for elt in selected_fields: df_raw[elt] = opt[elt] raw = raw.append(df_raw) index = pd.date_range(df_price.index[0].round('T'), df_price.index[-1], freq='1T') df_price = df_price.reindex(index, method='ffill') bid_ask_sampled[bidask] = df_price if len(bid_ask_sampled) == 2: df_opt = pd.merge(bid_ask_sampled['bid'][['bid']], bid_ask_sampled['ask'], how='inner', left_index=True, right_index=True) if opt['PutOrCall'] == 'S': FVU = (df_opt.bid.median() + df_opt.ask.median())/2 df_orderbook = df_orderbook.append(df_opt) raw.to_pickle(folder1 + '/raw/Quotes_' + '{}_{}.pkl'.format(udl, reference_date)) df_orderbook.to_pickle(folder1 + '/Quotes_' + udl + '.pkl') # Finally, we retreive the trades selected_fields = ['SecurityDesc', 'SecurityID'] selected_fields_desc = ['PutOrCall', 'StrikePrice', 'ContractMultiplier', 'ExerciseStyle'] for matu in matulist: df = pd.DataFrame(columns=['SegmentID'] + selected_fields + selected_fields_desc) i = 0 for x in res_i['Securities']: if (str(x['MaturityDate']) == matu) and (x['SecurityType'] == 'OPT'): df.loc[i] = [segmentIDopt] + [x[elt] for elt in selected_fields] + \ [x['DerivativesDescriptorGroup']['SimpleInstrumentDescriptorGroup'][elt] for elt in selected_fields_desc] i += 1 for index, opt in df.iterrows(): url = 'https://a7.deutsche-boerse.com/api/v1/algo/{}/trades_PVA/'.format(owner) market = 'XEUR' url = url+"run?marketId={}&date={}&marketSegmentId={}&securityId={}".format(market, reference_date, opt['SegmentID'], opt['SecurityID']) r = requests.get(url=url, headers={'Authorization': API_TOKEN}, proxies = proxies) res = r.json() if (type(res) == list) and (len(res[0]['series'][0]['content']['time'])>0): df_opt = pd.DataFrame.from_dict(res[0]['series'][0]['content']) df_opt.index = df_opt.index.astype(np.int64) df_opt.index = pd.to_datetime(df_opt.index) for field in ['time', 'priots', 'bidentry', 'askentry']: df_opt[field] = df_opt[field].astype(np.int64) df_opt[field] = pd.to_datetime(df_opt[field]) df_opt.set_index('time', inplace=True) df_opt[selected_fields_desc] = opt[selected_fields_desc] df_opt['matu'] = matu df_opt['SegmentID'] = opt['SegmentID'] df_opt['SecurityID'] = opt['SecurityID'] df_trades = df_trades.append(df_opt) df_trades.to_pickle(folder1 + '/Trades_' + udl + '.pkl')Let's now fit volatility spline curves on the bid and ask quotes separatelywarnings.filterwarnings('ignore') FS = FittingSpline(udl, DT, folder1, folder2) FS.fit_all() for reference_date in [elt for elt in DT.dates_list]: print(reference_date) matulist = [elt for elt in DT.get_matu_list(reference_date) if elt != reference_date] for matu in matulist: print(' ' + matu) #ini_day intializies the dataframe and sets the starting implicit vol flat at 30% FS.ini_day(reference_date, matu) #fit_day starts a process of fitting the vol curve every 5 minutes allong with the forward ratio (dividend + repo yield) FS.fit_day() FS.df_params.to_pickle(folder2 + '/Params_' + udl + '.pkl') print(FS.df_params[['spline_bid', 'spline_ask']].head(5))20210105 20210115 20210219 20210319 20210416 20210514 20210618 20210917 20211217 leeway : 4 leeway : 4 20220617 20221216 20210105 20210115 20210219 20210319 20210416 20210514 20210618 20210917 20211217 20220617 20221216 spline_bid spline_ask ts matu 2021-01-05 08:05:00 20210115 Congratulations, you have created a parameters dataframe with the fitted spline curve for the bid and ask implicit vol#Let's now graph what we have done : FS.graph(day="20210105", matu="20210319") # First graph : the spline curves themselves at different times of day # Second graphs : We use these volatilities to compute a fair bid and fair ask price for each strike # (Put on the left, Calls on the right). # Since we are representing on the same graph options with different strikes, the values are rebased # so that the model (or fair_value) bid is at 0, allowing for a more compact graphAnalyse Psylab Sarah, Ginny, Emily Preparation: Imports and Stuff, load Datasetimport os import pandas as pd import numpy as np import matplotlib.pyplot as plt from IPython.display import display import seaborn as sns print(abspath(".")) df = pd.read_csv(os.path.join("..","..","Data","main","results_281_Group8_Conformity_Pilot_Group8_final.csv")) with pd.option_context('display.max_columns', 9999, 'display.max_rows', 10): display(df.head())Clean Dataset* remove superflous columns and rows* make cleaner format (one row per participant, ...)df = df.drop(columns=["QUD","RT", "age", "comments", "education", "endTime", "experiment_id", "gender", "id", "languages", "startDate", "startTime", "text2", "timeSpent", "trial_name", "trial_number"]) df.head() #for all participants, drop fith row (irrelevant) df = df.drop(index=df[df["question"] == "How do you feel about your choice?"].index) df = df.drop(columns=["option1", "option2", "option3", "option4", "option5", "s1", "s2", "s3", "s4", "s5"]) df.head() df.loc[df["question"].isna(), "question"] = "statement" df.head() #so now there's 4 rows per participant, and we'll in a next step use the .pivot()-method to create one row per participant. However what's left there #would be the text1-column which doesn't fit the scheme, so in a previous step we'll add a fith row per participant where the response is text1, #and we'll remove the column text1. Note that text1 IS NO RESPONSE, but we'll behave like it is to not break pandas' .pivot() method. text1_cols = df[~df["text1"].isna()][["submission_id", "text1"]].set_index("submission_id").rename(columns={"text1": "response"}) text1_cols["question"] = "text1" text1_cols.head() df = df.set_index("submission_id").append(text1_cols).sort_index().reset_index().drop(columns="text1") df.head() tidy_df = df.pivot(index="submission_id", columns="question", values="response") tidy_df.loc[:,"What do you do?"] = tidy_df["What do you do?"].astype("int") tidy_df.loc[:,"statement"] = tidy_df["statement"].astype("int") display(tidy_df.head()) len(tidy_df)Exclude participants etc#exclude participants that failed the understanding question or/and were neutral towards their chosen topic clean_df = tidy_df[(tidy_df["To ensure you understood the assignment, please click on which of the following statements is true."]=="Participants chose which action they preferred.") & ~(tidy_df["statement"]==0)] len(clean_df) clean_df.rename(columns = {'What do you do?':'preference'}, inplace = True) clean_df.rename(columns = {'text1':'condition'}, inplace = True) clean_df.head()C:\Users\sarah\Miniconda3\envs\psylab\lib\site-packages\pandas\core\frame.py:5034: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy return super().rename(Make everything shorter and cleaner and save it to a new CSV# show the whole text for the different conditions with pd.option_context('display.max_colwidth', None): display(clean_df["condition"].head()) # differentiate between conditions clean_df["only_ingroup_norm_show"] = clean_df["condition"].isin(["

60% of the other participants who agree with you on your chosen issue chose to report the robber.", "

Approximately 60% of the other participants who agree with you on your chosen issue chose to let the robber go."]) clean_df["let_go_norm_show"] = clean_df["condition"].isin(["

Approximately 60% of the other participants who agree with you on your chosen issue chose to let the robber go.

Approximately 85% of participants in a previous study who disagreed with you on your chosen issue chose to report the robber.", "

Approximately 60% of the other participants who agree with you on your chosen issue chose to let the robber go."]) clean_df = clean_df.drop(columns="condition") clean_df.head()C:\Users\sarah\AppData\Local\Temp/ipykernel_11192/1067831111.py:3: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy clean_df["only_ingroup_norm_show"] = clean_df["condition"].isin(["

60% of the other participants who agree with you on your chosen issue chose to report the robber.", "

Approximately 60% of the other participants who agree with you on your chosen issue chose to let the robber go."]) C:\Users\sarah\AppData\Local\Temp/ipykernel_11192/1067831111.py:4: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-vie[...]Plotting stuff# compute mean of choice for every condition mean_ingroup = clean_df.groupby(["only_ingroup_norm_show", "let_go_norm_show"])["preference"].mean() display(pd.DataFrame(mean_ingroup)) # plot means of choice for every condition with barplot which looks shitty fig, ax = plt.subplots(1,2, figsize=(10,3)) mean_ingroup[False].plot.bar(title="Both_norms_shown", ax=ax[0], ylim=[-3,3]) mean_ingroup[True].plot.bar(title="Only_ingroup_norm_shown", ax=ax[1], ylim=[-3,3]); tmp = pd.DataFrame(mean_ingroup).reset_index() tmp sns.catplot(x="let_go_norm_show", y="preference", col="only_ingroup_norm_show", data=tmp, kind="bar").set(ylim=[-3,3]) p = sns.catplot(x="let_go_norm_show", y="preference", col="only_ingroup_norm_show", data=clean_df, kind="bar") #p.set_titles(row_template = 'row', col_template = 'col') p = sns.catplot(x="let_go_norm_show", y="preference", col="only_ingroup_norm_show", data=clean_df, kind="violin").set(ylim=[-3,3]) p.set_axis_labels("Ingroup Norm", "Preference") p.set_yticklabels(['Definitely report','','','','','','Definitely leave alone']) g = sns.FacetGrid(data=clean_df, col ='only_ingroup_norm_show', height=6).set(ylim=[-3,3]) g.map_dataframe(sns.violinplot, x="let_go_norm_show", y="preference") g.map_dataframe(sns.swarmplot, x="let_go_norm_show", y="preference", color="red") g.map_dataframe(sns.boxplot, x="let_go_norm_show", y="preference", showmeans=True, meanline=True, meanprops={'color': 'k', 'ls': '-', 'lw': 2}, medianprops={'visible': False}, whiskerprops={'visible': False}, showfliers=False, showbox=False, showcaps=False,) g.set_axis_labels("Ingroup Norm", "Participant's preference") g.set_yticklabels(['Definitely report','- -','-','o','+','++','Definitely leave alone']) # black line is mean, red dots are single participants g.set(ylim=(-3.3, 3.3)) g.set_xticklabels(['Report', 'Don\'t report']) axes = g.axes.flatten() axes[0].set_title("Both Norms Shown") axes[1].set_title("Only Ingroup Norm ")Working on the dataset again: make the dataset look like the one from the study.clean_df.head() assert len(set(clean_df["To ensure you understood the assignment, please click on which of the following statements is true."])) == 1 clean_df = clean_df.drop(columns=["To ensure you understood the assignment, please click on which of the following statements is true."]) clean_df["bothShown"] = ~clean_df["only_ingroup_norm_show"] clean_df = clean_df.drop(columns=["only_ingroup_norm_show"]) clean_df.head(10) clean_df["ingroupAgree"] = (clean_df["let_go_norm_show"] & (clean_df["preference"] > 0)) | (~clean_df["let_go_norm_show"] & (clean_df["preference"] < 0)) clean_df.head(10) clean_df["outgroupDisagree"] = ~clean_df["ingroupAgree"] clean_df["ingroupNorm"] = clean_df["let_go_norm_show"] clean_df = clean_df.drop(columns="let_go_norm_show") clean_df.head(10) clean_df.to_csv(os.path.join("..","..","Analyses","main","df_for_modeling.csv")) print(os.path.abspath(os.path.join("..","..","Analyses","main","df_for_modeling.csv")))Python-Highchartsのドリルダウンでポートフォリオを可視化する Python-HighchartsはJavaScriptの可視化ライブラリHighchartsのPythonのラッパーです.Python-Highchartsを使うとインタラクティブに操作できるグラフを作成できます. 準備 はじめに,Python-Highchartsと今回使うライブラリをインポートします.from itertools import product # 投資比率パターン生成用 import numpy as np import pandas as pd import matplotlib.pyplot as plt from highcharts import Highchart %matplotlib inlineここでは,伝統的な4つの資産,国内株式,国内債券,外国株式,外国債券のインデックスに連動する投資信託への資産配分を考えます.- 国内株式インデックスファンド- 国内債券インデックスファンド- 外国株式インデックスファンド- 外国債券インデックスファンド事前にこれらの資産の時系列データから週次のリターンの平均と共分散行列を計算しておきます.(*数値は一例です.用いた数値に一切責任は負いません.)asset_list = ['国内株式', '国内債券', '外国株式', '外国債券'] mean = pd.Series([0.001487, 0.000283, 0.001905, 0.000723], index=asset_list, name='expected return') mean cov = pd.DataFrame([[7.95021e-04, -2.4461e-05, 6.06600e-04, 2.00937e-04], [ -2.4461e-05, 8.485e-06, -2.1082e-05, -9.016e-06], [ 6.06600e-04, -2.1082e-05, 8.82778e-04, 2.15332e-04], [ 2.00937e-04, -9.016e-06, 2.15332e-04, 1.38414e-04]] , index=asset_list, columns=asset_list) covシャープレシオ(リスクフリーレートは考慮しない)も計算してみます.std = pd.Series(np.sqrt(np.diag(np.array(cov))), index=asset_list, name='expected volatility') df = pd.concat([mean, std], axis=1) df['sharpe ratio'] = mean / std df.T4つの資産の平均と標準偏差をmatplotlibでプロットしてみます.plt.figure(figsize=(10, 6)) plt.scatter(std, mean, c=df.loc[:, 'expected return']/df.loc[:, 'expected volatility'], marker='o') plt.grid(True) plt.xlim(-0.005, 0.035) plt.ylim(-0.00025, 0.002) plt.xlabel('expected volatility') plt.ylabel('expected return') plt.colorbar(label='Sharpe ratio') #plt.savefig('risk_returen_matplotlib.png')matplotlibで可視化する(比較用) 投資比率のパターンをたくさん作り,ポートフォリオの期待リターンと標準偏差を計算します.ランダムに作成した例とitertoolsを使った例を載せます.どちらか選択して実行してください. - ランダムに投資比率のパターンを作ってみる 投資比率のパターンをたくさん作り,ポートフォリオの期待リターンと標準偏差を計算します.prets = [] pvols = [] weights_list = [] # highcharts用 for p in range(500): weights = np.random.randint(low=0, high=11, size=len(asset_list)) weights = weights / np.sum(weights) weights_name = [[_i, _j] for _i, _j in zip(asset_list, weights)] # highcharts用 weights_list += [weights_name] # highcharts用 prets.append(np.sum(mean * weights)) pvols.append(np.sqrt(np.dot(weights.T,np.dot(cov,weights))))資産の組み合わせごとのリスク(標準偏差)と期待リターンをプロットし,色でシャープレシオの大きさを表します.plt.figure(figsize=(10, 6)) plt.scatter(pvols, prets, c=np.array(prets)/np.array(pvols), marker='o') plt.grid(True) plt.xlim(-0.005, 0.035) plt.ylim(-0.00025, 0.002) plt.xlabel('expected volatility') plt.ylabel('expected return') plt.colorbar(label='Sharpe ratio')- itertoolsのproductを使って投資比率のパターンを作ってみる 投資比率のパターンをたくさん作ります.weights = np.linspace(0, 1, 5) weights = product(weights, repeat=4) weights = pd.DataFrame(list(weights)[1:], columns=asset_list) weights = weights.divide(weights.sum(axis=1), axis=0) weights = weights.drop_duplicates() weightsポートフォリオの期待リターンと標準偏差を計算します.prets = list((weights * mean).sum(axis=1)) pvols = [] for _num, _ix in enumerate(weights.index): w = weights.loc[_ix, :] pvols += [np.sqrt(np.dot(w.T, np.dot(cov, w)))] plt.figure(figsize=(10, 6)) plt.scatter(pvols, prets, c=np.array(prets)/np.array(pvols), marker='o') plt.grid(True) plt.xlim(-0.005, 0.035) plt.ylim(-0.00025, 0.002) plt.xlabel('expected volatility') plt.ylabel('expected return') plt.colorbar(label='Sharpe ratio') #plt.savefig('efficient_frontier_matplotlib.png')Highchartsで可視化する Python-Highchartsを使うとインタラクティブで見栄えの良いグラフを作れます.options = { 'title': { 'text': 'リスクとリターン' }, 'xAxis': { 'title': { 'text': 'リスク' } }, 'yAxis': { 'title': { 'text': 'リターン' } }, 'chart': { 'type': 'scatter', }, 'plotOptions': { 'scatter': { 'marker': { 'radius': 4, 'symbol': 'circle', 'states': { 'hover': { 'enabled': True, 'lineColor': 'rgb(100,100,100)' } } }, 'states': { 'hover': { 'marker': { 'enabled': False } } }, 'tooltip': { 'headerFormat': '{series.name}
', 'pointFormat': 'risk :{point.x}
return:{point.y} ' } } } } H = Highchart() H.set_dict_options(options) data = list(zip(pvols, prets)) weights_data = [] for i in range(len(weights)): weights_data += [{'x':data[i][0] ,'y': data[i][1], 'drilldown': 'portfolio'+str(i), 'name': 'portfolio'+str(i)}] # 'drilldown'と'name'はドリルダウン用 weights_data H.add_data_set(weights_data, 'scatter', 'ポートフォリオ') H投資比率をドリルダウンで表示してみる 各リスク・リターンのプロットを実現する投資比率が分かるドリルダウンを加えてみます.# itertoolsを使ったとき用 weights_list = [] for _num, _ix in enumerate(weights.index): weights_name = [[_i, _j] for _i, _j in zip(asset_list, weights.loc[_ix, :])] weights_list += [weights_name] for i in range(len(weights)): H.add_drilldown_data_set(weights_list[i], 'pie', weights_data[i]['name'], name=weights_data[i]['name']) HActor Critic - Syft Duet - Data Scientist 🥁Contributed by [@Koukyosyumei](https://github.com/Koukyosyumei) PART 1: Connect to a Remote Duet ServerAs the Data Scientist, you want to perform data science on data that is sitting in the Data Owner's Duet server in their Notebook.In order to do this, we must run the code that the Data Owner sends us, which importantly includes their Duet Session ID. The code will look like this, importantly with their real Server ID.```import syft as syduet = sy.duet('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')```This will create a direct connection from my notebook to the remote Duet server. Once the connection is established all traffic is sent directly between the two nodes.Paste the code or Server ID that the Data Owner gives you and run it in the cell below. It will return your Client ID which you must send to the Data Owner to enter into Duet so it can pair your notebooks.from itertools import count from collections import namedtuple import numpy as np import torch import syft as sy duet = sy.join_duet(loopback=True) sy.logger.add(sink="./syft_ds.log")Checkpoint 0 : Now STOP and run the Data Owner notebook until Checkpoint 1.sy.load("gym") sy.load("numpy") config = { "gamma": 0.99, "seed": 543, "render": False, "log_interval": 10, "no_cuda": False, "log_interval": 1, "wait_interval": 1, "dry_run":True, } remote_torch = duet.torch remote_torch.manual_seed(config["seed"]) has_cuda = False has_cuda_ptr = remote_torch.cuda.is_available() # lets ask to see if our Data Owner has CUDA has_cuda = bool( has_cuda_ptr.get( request_block=True, reason="To run test and inference locally", timeout_secs=3, # change to something slower ) ) print("Is cuda available ? : ", has_cuda) use_cuda = not config["no_cuda"] and has_cuda # now we can set the seed remote_torch.manual_seed(config["seed"]) device = remote_torch.device("cuda" if use_cuda else "cpu") # print(f"Data Owner device is {device.type.get()}") SavedAction = namedtuple("SavedAction", ["log_prob", "value"]) buffer_saved_actions = [] buffer_rewards =[] class Policy(sy.Module): """ implements both actor and critic in one model """ def __init__(self, torch_ref): super(Policy, self).__init__(torch_ref=torch_ref) self.affine1 = self.torch_ref.nn.Linear(4, 128) # actor's layer self.action_head = self.torch_ref.nn.Linear(128, 2) # critic's layer self.value_head = self.torch_ref.nn.Linear(128, 1) # action & reward buffer # self.saved_actions = [] # self.rewards = [] def forward(self, x): """ forward of both actor and critic """ x = remote_torch.relu(self.affine1(x)) # actor: choses action to take from state s_t # by returning probability of each action action_prob = remote_torch.softmax(self.action_head(x), dim=-1) # critic: evaluates being in the state s_t state_values = self.value_head(x) # return values for both actor and critic as a tuple of 2 values: # 1. a list with the probability of each action over the action space # 2. the value from state s_t return action_prob, state_values # send our model to remote policy = Policy(torch) remote_policy = policy.send(duet) optimizer = remote_torch.optim.Adam(remote_policy.parameters(), lr=3e-2) eps = np.finfo(np.float32).eps.item() # if we have CUDA lets send our model to the GPU if has_cuda: remote_policy.cuda(device) else: remote_policy.cpu() # You cannot see the state def select_action(state): global buffer_saved_actions global buffer_rewards state = remote_torch.from_numpy(state).float() probs_ptr, state_value_ptr = remote_policy(state) # create a categorical distribution over the list of probabilities of actions m = remote_torch.distributions.Categorical(probs_ptr) # and sample an action using the distribution action = m.sample() # save to action buffer buffer_saved_actions.append(SavedAction(m.log_prob(action), state_value_ptr)) # the action to take (left or right) return action.item() def finish_episode(): """ Training code. Calculates actor and critic loss and performs backpropagation. """ global buffer_saved_actions global buffer_rewards gamma = duet.python.Float(config["gamma"]) R = duet.python.Float(0) policy_losses = duet.python.List([]) value_losses = duet.python.List([]) returns = duet.python.List([]) for r in buffer_rewards[::-1]: R = r + gamma * R returns.insert(0, R) returns = remote_torch.Tensor(returns) returns = (returns - returns.mean()) / (returns.std() + eps) for (log_prob, value), R in zip(buffer_saved_actions, returns): advantage = R - value.item() # calculate actor (policy) loss policy_losses.append(-log_prob * advantage) # calculate critic (value) loss using L1 smooth loss value_losses.append(remote_torch.nn.functional.smooth_l1_loss(value, R.reshape(1))) # reset gradients optimizer.zero_grad() # sum up all the values of policy_losses and value_losses loss = remote_torch.stack(policy_losses).sum() + remote_torch.stack(value_losses).sum() # perform backprop loss.backward() optimizer.step() # reset rewards and action buffer del buffer_saved_actions[:] del buffer_rewards[:] reward_threshold_ptr = duet.store["reward_threshold"] reward_threshold = reward_threshold_ptr.get(request_block=True, delete_obj=False) print(f"reward_threshold is {reward_threshold}") remote_gym = duet.gym remote_env = remote_gym.make("CartPole-v0") remote_env.seed(config["seed"]) running_reward = 10 # run inifinitely many episodes for i_episode in count(1): # reset environment and episode reward state = remote_env.reset() ep_reward = duet.python.Float(0) # for each episode, only run 9999 steps so that we don't # infinite loop while learning for t in range(1, 10000): # select action from policy action = select_action(state) # take the action state, reward, done, _ = remote_env.step(action) buffer_rewards.append(reward) ep_reward += reward if done.get(request_block=True): break # update cumulative reward running_reward = 0.05 * ep_reward.get(request_block=True, delete_obj=False) + (1 - 0.05) * running_reward # perform backprop finish_episode() # log results if i_episode % config["log_interval"] == 0: print( "Episode {}\tLast reward: {:.2f}\tAverage reward: {:.2f}".format( i_episode, ep_reward.get(request_block=True, delete_obj=False), running_reward ) ) # check if we have "solved" the cart pole problem if running_reward > reward_threshold: print( "Solved! Running reward is now {} and " "the last episode runs to {} time steps!".format(running_reward, t) ) break if config["dry_run"]: breakImport libraries:import pandas as pd import numpy as np import warnings warnings.filterwarnings("ignore")Creating a dataframe:courses = ["Tableau", "GSS", "SQL", "Python"] dfcourses = pd.DataFrame(data=courses) dfcoursesAdding a new column to dataframe:popularity = [3, 4, 2, 1] dfcourses[1] = popularity dfcoursesRenaming column:dfcourses.columns = ["Courses", "Popularity"] dfcoursesCreating a dataframe from dictionarydf_from_dict = pd.DataFrame({'Courses' :['Tableau' , 'GSS' , 'SQL' , 'Python'] , "Popularity" : [3,4, 2,1]}) df_from_dictCreating date ranges with defining "start - end"holiday1=pd.date_range(start="2020-08-16", end="2020-08-32") # !!!ATTENTION: day must be in range for month holiday1 holiday1=pd.date_range(start="2020-08-17", end="2020-08-23") # !!!ATTENTION: day must be in range for month holiday1Creating data ranges with defining "periods"holiday2=pd.date_range("2020-08-31", periods=7) holiday2Creating random values to build a dfrandom_values1=np.random.random((7,7)) random_values1Creating a df from data ranges and random_valuesdf_new= pd.DataFrame(random_values1, index=holiday1) df_newRenaming columnsdf_new.columns=["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday" ] df_newShowing index valuesdf_new.indexShowing columnsdf_new.columnsData types of columnsdf_new.dtypesGeneral view of the dfdf_new.info() DatetimeIndex: 7 entries, 2020-08-17 to 2020-08-23 Freq: D Data columns (total 7 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Monday 7 non-null float64 1 Tuesday 7 non-null float64 2 Wednesday 7 non-null float64 3 Thursday 7 non-null float64 4 Friday 7 non-null float64 5 Saturday 7 non-null float64 6 Sunday 7 non-null float64 dtypes: float64(7) memory usage: 448.0 bytesShapedf_new.shape #Sorting df df_new.sort_values(by="Monday") #default is ascending=True df_new.sort_values(by="Monday", ascending=False) #default is ascending=TrueDeleting a column-1dfcourses["Prix"]=["100","75","200","300"] dfcourses dfcourses.drop(["Popularity"], axis=1, inplace=False)Deleting a column-2dfcourses.drop(columns=["Popularity"], inplace=False)Deleting multiple columnsdfcourses.drop(columns=["Popularity", "Prix"], inplace=False)Deleting a rowdfcourses.drop([1], axis=0, inplace=False)Deleting multiple rowsdfcourses.drop([2,3], axis=0, inplace=False)Deleting a row-Version_2dfcourses.drop(dfcourses.index[2], inplace=False)Deleting a row based on a column valuedfcourses.drop(dfcourses.index[dfcourses.Courses=="Tableau"].tolist(), axis=0, inplace=False) dfcourses=dfcourses.drop(dfcourses[dfcourses.Popularity==4].index) dfcoursesKeeping rows based on values in a column using isin()dfcourses=dfcourses[dfcourses.Popularity.isin([1,2])] dfcourses dfstd = pd.DataFrame({ 'Student_id' :[101,102,103,104] , 'first_name' :['Ali' , 'Veli' , 'Emre' , 'Hasan'] , 'country' : ['Turkey' , 'US','UK' , 'France']}, index=[1,2,3,4]) dfstdSelecting data using loc--index valuesdfstd.loc[2] dfstd.loc[2:3]Selecting data using ilocdfstd.iloc[1] dfstd.iloc[2:4]Conditional Selectiondfstd.loc[dfstd.Student_id > 102] dfstd2=dfstd.rename(index={1: 'A', 2:"B", 3:"C", 4:"D"}) dfstd2Label based selectionsdfstd2.loc["D"] dfstd2.loc[["A","D"]]Row label selectionsdfstd2[1:3] dfstd2.loc[:,["Student_id", "country"]] dfstd2.loc["A":"C",["Student_id", "country"]]Data selection using positiondfstd2.iloc[1][2] dfstd2.iloc[1:3,:] dfstd2.iloc[1,:] a = np.linspace(10, 100, 12) b = np.random.randint(10,100,12) c = np.linspace(5,20,12) d = np.random.randint(100,150,12) df6 = pd.DataFrame({"Var1" : a , "Var2" :b, "Var3":c, "Var4":d}) df6 df6[df6.Var2.isin([13,70])]Setting a value (column)df6.at[2:4,"Var1"]=44 df6 df6.at[2:4,"Var1"]=50 df6Setting a value (second row, second column)df6.iat[2,1]=28 df6 df6[(df6.Var1<20) & (df6.Var2>30)]Dealing with NULL valuesdf6.at[2:3, "Var1"] = np.NaN df6.at[5:6, "Var2"] = np.NaN df6.at[8:10, "Var4"] = np.NaN df6Detecting Missing Values (or df6.isna())df6.isnull()Detecting Non-Missing Valuesdf6.notna()Sum of null:df6.isnull().sum()Filling NaNs with a specific valuedf6.fillna(999) #(inplace = False)Filling NaNs with different valuesdf6.fillna(value={"Var1":333, "Var2":666,"Var4":999})Replacing based on a condition (only the first 2 NULLs in Var4 columns)df6.fillna(value={"Var4":2020}, limit=2)Dropping all NULLsdf6.dropna() # default axis=0 df6.dropna(axis=1)Descriptive Statisticsdf6.fillna(34, inplace=True)Mean of all columnsdf6.mean() #(sum, min, max, median, std, var can also be used here)Mean of a single columnsdf6.Var3.mean() df6.quantile([.25, .5, .75], axis = 1) df6.quantile([.25, .5, .75], axis = 0)Describedf6.describe() df6.describe().loc["min":"max", "Var2":"Var4"]Skewdf6.skew()Kurtdf6.kurt()Correlationdf6.corr()Covariencedf6.cov()Usage of different functionsdf6.apply(sum) #(min, max, mean, median and other aggs can be used here) df6.apply(np.sum, axis=0) df6.apply(np.sum, axis=1) df6.applymap(np.sqrt)Lambda Functionsdf6.apply(lambda x: x*2) df6.apply(lambda x: max(x)) # Default axis=0 df6.apply(lambda x: max(x), axis=1)Merging Dataframesdfstd dfgrad =pd.DataFrame({"Student_id":[101, 103, 105, 107, 109], "Grades": [80, 85, 90, 100, 95]}) dfgradJoin Type-1: Inner Joinpd.merge(dfstd, dfgrad, on="Student_id", how="inner")Join Type-2:Left Joinpd.merge(dfstd, dfgrad, on="Student_id", how="left")Join Type-3:Right Joinpd.merge(dfstd, dfgrad, on="Student_id", how="right")Join Type-4:Outer Joinpd.merge(dfstd, dfgrad, on="Student_id", how="outer")Loading a dataset from seabornimport seaborn as sns data=sns.load_dataset("titanic") data.head() data.tail()Another version instead of tail()data[-5:] data.info() data.embarked.unique() data.embarked.nunique() data[["pclass", "survived", "sex"]].head() data.iloc[0:5, 0:3]Filtering datadata.loc[data.sex=="male"]Filtering datadata.loc[(data.sex=="male") & (data.pclass==3)]Displaying sorted partition of the dataframedisplay("Top 3 most expensive tickets:", data.sort_values(by="fare", ascending=False).head(3))Operations with strdata2=sns.load_dataset("mpg") data2.head(2)Containsdata2.name.str.contains("chevrolet").count() data2.loc[data2.name.str.contains("chevrolet")].head(3) data2.loc[~data2.name.str.contains("chevrolet")].head() data2.loc[(data2.name.str.contains("chevrolet")) & (data2.origin.str.contains("usa"))].head() data2.loc[data2.name.str.contains("chevrolet|buick", regex=True)].head() data2.loc[data2.name.str.contains("CHEVROLET|BUICK", case=False, regex=True)].head()Contains with regex all name values starting with "bu"data2.loc[data2.name.str.contains("^bu", case = False, regex = True)].head()all name values ending with "lo"data2.loc[data2.name.str.contains('lo$', case=False, regex=True)]Regex-Extracting First 5 characters from name columndata2.name.str.extract(r'(^\w{5})')[160:165]Using regex-matchdata2.name.str.match(r'(^[C|c].*)').count()Replacing valuesdata2.cylinders.replace({8:"eight", 6:"six"})[100:105] data2.origin.replace(["japan", "europe"], "others") data2.loc[data2.model_year<80, "Type"] = "Classic" data2.head()Groupbydata2.groupby("cylinders")["mpg"].mean() data2.groupby("cylinders").mean().sort_values("mpg", ascending = False) data2.groupby("cylinders").mpg.agg(["mean", "count"]).head()Selecting data based on dtypesdata2.select_dtypes(include='number').head() data2.select_dtypes(include = ["number", "object"]).head() data2.select_dtypes(exclude ="number").head()Changing data typedata2.astype({'horsepower':'object', 'cylinders':'float'}).dtypes pd.to_numeric(data2.horsepower, errors='coerce')Changing all df to numeric, filling non-nuermic values with 0data2.apply(pd.to_numeric, errors='coerce').fillna(0).head() data2.mpg.value_counts()Converting continuous data into categorical data by cut() functionpd.cut(data2.mpg, bins=[13, 16, 34, 40], labels=['low', 'medium', 'high']).head(10)Unstackdata2.groupby(['cylinders', 'origin']).mpg.mean().unstack()Pivot tabledata2.pivot_table(index='cylinders', columns='origin', values='mpg', aggfunc='mean')BusThis bus has a passenger entry and exit control system to monitor the number of occupants it carries and thus detect when there is too high a capacity.At each stop the entry and exit of passengers is represented by a tuple consisting of two integer numbers.```bus_stop = (in, out)```The succession of stops is represented by a list of these tuples.```stops = [(in1, out1), (in2, out2), (in3, out3), (in4, out4)]``` Goals:* lists, tuples* while/for loops* minimum, maximum, length* average, standard deviation Tasks1. Calculate the number of stops.2. Assign to a variable a list whose elements are the number of passengers at each stop (in-out),3. Find the maximum occupation of the bus.4. Calculate the average occupation. And the standard deviation.# variables stops = [(1, 1), (2, 2), (3, 3), (4, 4)] enter = (1, 2, 3, 4) exit = (1, 2, 3 ,4) total_stops = 0 passengers = [] maxpass = 0 averagepass = 0 stdpass = 0 # 1. Calculate the number of stops. for i in range(len(stops)): total_stops += 1 print(total_stops) # 2. Assign a variable a list whose elements are the number of passengers in each stop: # Each item depends on the previous item in the list + in - out. for i in range(len(stops)): passengers.append((stops[i][0]) - (stops[i][1])) print(passengers) # 3. Find the maximum occupation of the bus. max(passengers) # 4. Calculate the average occupation. And the standard deviation. import statistics as s s.mean(passengers) s.stdev(passengers)Intuition and Introduction*Maximum Likelihood Estimation*, or MLE, is a technique for guessing *unknown parameters* for *models* of observed data. Specifically, the *Maximum Likelihood Estimator* for an unknown parameter is the value which maximizes the probability of the observed data. To understand how this works intuitively, consider the following example (or just skip straight to the [math](math)). Imagine you flip a penny 100 times, and it lands heads every time. You probably have some internal model of how coin flips work - for example, it's reasonable to assume that each toss is independent of the other coin tosses. However, there's a key parameter in this model you're missing: for a weighted coin, you don't know the probability $p$ that any individual toss will come up as heads. However, you've observed that the penny landed heads 100 times in a row, so you infer that $p$ is pretty close to $1$, because that makes the observed data more probable.import numpy as np import matplotlib.pyplot as plt x = np.arange(0, 100, 1).astype(float)/100 plt.plot(x, x ** 100, c = 'cornflowerblue') plt.title('Likelihood of 100 Coins Flips Landing Heads') plt.xlabel('Likelihood of 1 Flip Landing Heads') plt.show()# Install OpenCV library !python3 -m pip install opencv-python # Import OpenCV library in your code import cv2 as cv !wget https://github.com/opencv/opencv_extra/blob/4.x/testdata/cv/features2d/tsukuba.png !pwd # Download image !curl -o logo.png https://colab.research.google.com/img/colab_favicon_256px.png # Read image with help opencv img = cv2.imread('logo.png', cv2.IMREAD_UNCHANGED) # Check image print(img) #show image with help imshow by opencv # cv.imshow('image', img) # cv.waitKey(0) # cv.destroyAllWindows() from google.colab.patches import cv2_imshow cv2_imshow(img)Notebook examples for Chapter 3 Power spectrum of an image bandimport warnings # these are innocuous but irritating warnings.filterwarnings("ignore", message="numpy.dtype size changed") warnings.filterwarnings("ignore", message="numpy.ufunc size changed") %matplotlib inline import numpy as np from numpy import fft from osgeo import gdal from osgeo.gdalconst import GA_ReadOnly import matplotlib.pyplot as plt gdal.AllRegister() infile = 'imagery/AST_20070501' inDataset = gdal.Open(infile,GA_ReadOnly) cols = inDataset.RasterXSize rows = inDataset.RasterYSize band = inDataset.GetRasterBand(3) image = band.ReadAsArray(0,0,cols,rows) # arrays of i and j values a = np.reshape(range(rows*cols),(rows,cols)) i = a % cols j = a / cols # shift Fourier transform to center image1 = (-1)**(i+j)*image # compute power spectrum and display image1 = np.log(np.abs(fft.fft2(image1))**2) mn = np.amin(image1) mx = np.amax(image1) plt.imshow((image1-mn)/(mx-mn), cmap='gray') #plt.savefig('/home/mort/LaTeX/new projects/CRC4/Chapter3/fig3_1.eps')Image compression with the Haar wavelet# The Haar mother wavelet def psi_m(x): if x<0:return 0.0 elif x<=0.5:return 1.0 elif x<=1.0:return -1.0 else:return 0.0 # The Haar basis functions def psi(m,k,n): c = 2**n result = np.zeros(c) x = np.linspace(0,1,num=c) for i in range(c): result[i] = psi_m((2**m)*x[i]-k) return result # Generate wavelet basis B_8 n = 8 B = np.ones((2**n,2**n)) i = 1 for m in range(n): for k in range(2**m): B[:,i] = psi(m,k,n) i += 1 B = np.mat(B) # 256x256 subset G = np.mat(image[200:456,200:456]) # Wavelet transformation Gw = np.mat(np.zeros((256,256))) # Filter the columns for j in range(256): Gw[:,j] = B.I*G[:,j] # Filter the rows for i in range(256): Gw[i,:] = (B.I*Gw[i,:].T).T # Histogram of wavelet coefficients Gw = np.array(Gw).ravel() p = plt.hist(Gw,bins=30,range=(-10,10)) #plt.savefig('/home/mort/LaTeX/new projects/CRC4/Chapter3/fig3_6.eps') # Truncate and reshape Gw = np.reshape(np.where(np.abs(Gw)<2,0,Gw),(256,256)) # Invert the transformation Gw = np.mat(Gw) Gc = np.mat(np.zeros((256,256))) for i in range(256): Gc[i,:] = (B*Gw[i,:].T).T for j in range(256): Gc[:,j] = B*Gc[:,j] f, ax = plt.subplots(1,2,figsize=(16,8)) ax[0].imshow(np.array(G)/255,cmap='gray') ax[1].imshow(np.array(Gc)/255,cmap='gray') #f.savefig('/home/mort/LaTeX/new projects/CRC4/Chapter3/fig3_7.eps',bbox_inches='tight') from scipy import sparse sG = sparse.csr_matrix(G) sGw = sparse.csr_matrix(Gw) print sG.data.nbytes print sGw.data.nbytesThe cascade algorithmdef F(x,i,c): if i==0: if x==0: return 1.0 else: return 0.0 else: return c[0]*F(2*x,i-1,c)+c[1]*F(2*x-1,i-1,c) \ +c[2]*F(2*x-2,i-1,c)+c[3]*F(2*x-3,i-1,c) \ +c[4]*F(2*x-4,i-1,c) # Haar refinement coefficients c = np.zeros(5) c[0] = 1.0; c[1] = 1.0 # fourth order approximation n = 4 x = np.array(range(4*2**n))/float(2**n) FF = np.zeros(4*2**n) for i in range(4*2**n): FF[i] = F(x[i],n,c) plt.plot(x,FF) plt.ylim(-1,2) #plt.savefig('/home/mort/LaTeX/new projects/CRC4/Chapter3/fig3_8.eps') # Daubechies D4 refinement coeffificents c = np.zeros(5) c[0] = (1+np.sqrt(3))/4; c[1] = (3+np.sqrt(3))/4 c[2] = (3-np.sqrt(3))/4; c[3] = (1-np.sqrt(3))/4 c[4] = 0.0 for i in range(4*2**n): FF[i] = F(x[i],n,c) plt.plot(x,FF) plt.ylim(-1,2) #plt.savefig('/home/mort/LaTeX/new projects/CRC4/Chapter3/fig3_9.eps')Principal componentsimport ee from ipyleaflet import (Map,DrawControl,TileLayer) from auxil import eepca ee.Initialize() # function for overlaying tiles onto a map def GetTileLayerUrl(ee_image_object): map_id = ee.Image(ee_image_object).getMapId() tile_url_template = "https://earthengine.googleapis.com/map/{mapid}/{{z}}/{{x}}/{{y}}?token={token}" return tile_url_template.format(**map_id) # get the image im = ee.Image( 'LANDSAT/LE07/C01/T1_RT_TOA/LE07_197025_20010626') \ .select('B1','B2','B3','B4','B5','B7') # perform principal components analysis pcs, lambdas = eepca.pca(im) # display the default base map and overlay the PCA image m = Map(center=[50.7, 6.4], zoom=7) m.add_layer(TileLayer(url=GetTileLayerUrl( pcs.select('pc1','pc2','pc3') \ .visualize(min=-0.1, max=0.1, opacity = 1.0) ) )) m gdexporttask = ee.batch.Export.image.toDrive(pcs, description='driveExportTask', folder='EarthEngineImages', fileNamePrefix='PCS', scale=30, maxPixels=1e9) gdexporttask.start() run scripts/ex3_1 run scripts/pca -r 2 -n imagery/LE7_20010626 %run scripts/dispms -f imagery/LE7_20010626 -p [1,2,3] -e 2 \ -F imagery/LE7_20010626_recon -P [1,2,3] -E 2 #-s '/home/mort/LaTeX/new projects/CRC4/Chapter3/fig3_11.eps'Dual solution# column-centered data matrix for random 2D data G = np.mat(2*np.random.rand(100,2))-1 # covariance matrix S = G.T*G/99 # Gram matrix K = G*G.T lambda_s, _ = np.linalg.eigh(S) lambda_k, _ = np.linalg.eigh(K) # sort eigenvalues in decreasing oder idx = np.argsort(lambda_s)[::-1] lambda_s = lambda_s[idx] idx = np.argsort(lambda_k)[::-1] lambda_k = lambda_k[idx] # compare print lambda_s print lambda_k[0:3]/99 run scripts/ex3_2 imagery/LE7_20010626Minimum noise fractionrun scripts/mnf -n imagery/LE7_20010626 run scripts/dispms -f imagery/LE7_20010626_mnf -p [3,1,2] -e 4 \ #-s '/home/mort/LaTeX/new projects/CRC4/Chapter3/fig3_12.eps'Excercise 13# a 2D two-class image n1 = np.random.randn(1000) n2 = n1 + np.random.randn(1000) B1 = np.zeros((1000,2)) B2 = np.zeros((1000,2)) B1[:,0] = n1 B1[:,1] = n2 B2[:,0] = n1+4 B2[:,1] = n2 G = np.concatenate((B1,B2)) # center the image G[:,0] = G[:,0] - np.mean(G[:,0]) # estimate covariance and diagonalize C = np.mat(G).T*np.mat(G)/2000 _,U = np.linalg.eigh(C) # slopes of the principal axes s1 = U[1,1]/U[0,1] s2 = U[1,0]/U[0,0] # plot plt.xlim((-5,5)) plt.ylim((-5,5)) plt.axes().set_aspect(1) plt.plot(G[:,0],G[:,1],'b.', [-5,5],[-5*s1,5*s1],'k', [-5,5],[-5*s2,5*s2],'k') #plt.savefig('/home/mort/LaTeX/new projects/CRC4/Chapter3/fig3_13.eps',bbox_inches='tight')Improving Performance of Invasion Percolation* Use setsimport numpy as np import random def percolation(size, spread): """ Simulate invasion percolation on a size x size grid with values in [1..spread], reporting density of final filled shape. """ assert (type(size) == int) and ((size > 0) and (size % 2 == 1)), 'size must be positive odd integer' assert (type(spread) == int) and (spread > 0), 'spread must be non-negative integer' grid = make_grid(size, spread) boundary = make_boundary(spread) chosen = (int(size/2), int(size/2)) fill(grid, chosen) while not on_boundary(grid, chosen): extend_boundary(grid, boundary, chosen) chosen = choose_next(grid, boundary) fill(grid, chosen) return grid, calculate_density(grid) def make_grid(size, spread): """ Create size x size grid filled with values in [1..spread]. """ return np.random.randint(low=1, high=spread+1, size=(size, size)) def fill(grid, loc): """ Mark a cell as filled. """ grid[loc] = 0 def on_boundary(grid, loc): """ Is the specified cell on the boundary of the grid? """ grid_x, grid_y = grid.shape loc_x, loc_y = loc return (loc_x == 0) or (loc_y == 0) or (loc_x == (grid_x -1)) or (loc_y == (grid_y -1)) def test_on_boundary(): grid = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) assert on_boundary(grid, (0, 0)) assert not on_boundary(grid, (1, 1)) assert on_boundary(grid, (2, 0)) def calculate_density(grid): """ Return proportion of cells that are filled. """ filled = np.sum(grid == 0) return filled / grid.size def make_boundary(spread): """ Create object to keep track of boundary cells. """ result = [] for i in range(spread + 1): result.append(set()) return result def test_make_boundary(): assert make_boundary(3) == [set(), set(), set(), set()] def extend_boundary(grid, boundary, loc): """ Extend boundary with unfilled cells next to given location. """ loc_x, loc_y = loc for (x, y) in ((loc_x-1, loc_y), (loc_x + 1, loc_y), (loc_x, loc_y-1), (loc_x, loc_y+1)): if grid[x, y] != 0: boundary[grid[x, y]].add((x, y)) def test_extend_boundary(): grid = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) boundary = make_boundary(9) extend_boundary(grid, boundary, (1, 1)) assert boundary == [set(), set(), {(0, 1)}, set(), {(1, 0)}, set(), {(1, 2)}, set(), {(2, 1)}, set()] test_extend_boundary() def choose_next(grid, boundary): """ Find and return coordinates of next grid cell to fill. """ for val in range(len(boundary)): if boundary[val]: break loc = random.choice(list(boundary[val])) boundary[val].discard(loc) return loc def is_adjacent(grid, loc): """ Is the location (x, y) adjacent to a filled cell? """ x, y = loc max_x, max_y = grid.shape if grid[loc] == 0: return False if (x > 0) and (grid[x-1, y] == 0): return True if (y > 0) and (grid[x, y-1] == 0): return True if (x < max_x-1) and (grid[x+1, y] == 0): return True if (y < max_y-1) and (grid[x, y+1] == 0): return True return False def test_is_adjacent(): grid = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]]) assert not is_adjacent(grid, (0, 0)) assert is_adjacent(grid, (1, 0)) assert is_adjacent(grid, (0, 1)) assert not is_adjacent(grid, (1, 1)) assert not is_adjacent(grid, (2, 0)) assert not is_adjacent(grid, (2, 1)) assert not is_adjacent(grid, (0, 2)) assert not is_adjacent(grid, (1, 2)) assert not is_adjacent(grid, (2, 2)) def test_all(): test_on_boundary() test_is_adjacent() test_make_boundary() test_all() percolation(5, 5) import timeit timeit.timeit(stmt='percolation(21, 10)', number=200, setup='from __main__ import percolation')Usage of pretrained model (VGG16) Import modulesfrom __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import os import sys import time import numpy as np import matplotlib.pyplot as plt %matplotlib inline from PIL import Image import tensorflow as tf tf.enable_eager_execution() os.environ["CUDA_VISIBLE_DEVICES"]="0" print('TensorFlow version: {}'.format(tf.__version__)) # if you have a memeory trouble then uncomment it # from tensorflow.compat.v1 import ConfigProto # from tensorflow.compat.v1 import InteractiveSession # config = ConfigProto() # config.gpu_options.allow_growth = True # session = InteractiveSession(config=config)Usage `VGG16`* [code link](https://github.com/keras-team/keras-applications/blob/master/keras_applications/vgg16.py)* [document link](https://keras.io/applications/vgg16)```pythondef VGG16(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, **kwargs): """Instantiates the VGG16 architecture. Optionally loads weights pre-trained on ImageNet. Note that the data format convention used by the model is the one specified in your Keras config at `~/.keras/keras.json`. Arguments include_top: whether to include the 3 fully-connected layers at the top of the network. weights: one of `None` (random initialization), 'imagenet' (pre-training on ImageNet), or the path to the weights file to be loaded. input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. input_shape: optional shape tuple, only to be specified if `include_top` is False (otherwise the input shape has to be `(224, 224, 3)` (with `channels_last` data format) or `(3, 224, 224)` (with `channels_first` data format). It should have exactly 3 input channels, and width and height should be no smaller than 32. E.g. `(200, 200, 3)` would be one valid value. pooling: Optional pooling mode for feature extraction when `include_top` is `False`. - `None` means that the output of the model will be the 4D tensor output of the last convolutional block. - `avg` means that global average pooling will be applied to the output of the last convolutional block, and thus the output of the model will be a 2D tensor. - `max` means that global max pooling will be applied. classes: optional number of classes to classify images into, only to be specified if `include_top` is True, and if no `weights` argument is specified. Returns A Keras model instance. Raises ValueError: in case of invalid argument for `weights`, or invalid input shape. """``` General method (`include_top=True`)vgg16_1 = tf.keras.applications.VGG16() vgg16_1.summary()General method (`include_top=False`)vgg16_2 = tf.keras.applications.VGG16(include_top=False, input_shape=(150, 150, 3)) vgg16_2.summary()Read a imagedef vgg_preprocessing(image): """image preprocessing Args: image (PIL image): image with shape [height, width, channels] Returns: image (np.float32): vgg preprocessed image with 4-rank tensor shape [1, height, width, channels] applied by mean_image_subtraction """ norm_means = np.array([123.68, 116.779, 103.939]) vgg_image_size = 224 image = image.resize((vgg_image_size, vgg_image_size)) image = np.asarray(image) image = image.astype(np.float32) image -= norm_means return image my_image = Image.open('../../input_data/cat2.jpg') plt.imshow(my_image) my_image = vgg_preprocessing(my_image) my_image = np.expand_dims(my_image, axis=0)Inference `my_image` through VGG16prediction = vgg16_1(my_image) prediction.shape pred_index = tf.argmax(prediction, axis=1)[0] print('prediction index: {}'.format(pred_index))Read a label filecategory_names = [] with open('../../input_data/imagenet.classname.txt', 'r') as f: for line in f.readlines(): category_names.append(line.strip()) category_names[pred_index]Observe all activation mapslayer_outputs = [layer.output for layer in vgg16_1.layers[1:]] for layer in layer_outputs: print(layer)Observe all training variablesfor var in vgg16_1.variables: print(var.name) vgg16_1.variables[0].shape vgg16_1.layers[1].kernel.shape vgg16_1.variables[0].numpy() == vgg16_1.layers[1].kernel.numpy()Extract all activation maps (feature maps)from tensorflow.keras import models activation_model = models.Model(inputs=vgg16_1.input, outputs=layer_outputs) activations = activation_model.predict(my_image) conv1_1 = activations[0] conv2_1 = activations[2] conv3_2 = activations[6] conv4_3 = activations[11] conv5_3 = activations[15]Print feature mapsdef print_all_feature_maps(layer, layer_name): """Print all feature maps This code is borrowed from "Deep Learning with Python" (by ) Args: layer (4-rank Tensor): feature maps layer_name (string): name of feature maps Returns: print all feature maps """ num_features = layer.shape[-1] size = layer.shape[1] images_per_row = 16 for feature_map in range(num_features): num_cols = num_features // images_per_row display_grid = np.zeros((size * num_cols, images_per_row * size)) for col in range(num_cols): for row in range(images_per_row): channel_image = layer[0,:,:,col * images_per_row + row] channel_image -= channel_image.mean() channel_image /= channel_image.std() channel_image *= 64 channel_image += 128 channel_image = np.clip(channel_image, 0, 255).astype('uint8') display_grid[col * size : (col + 1) * size, row * size : (row + 1) * size] = channel_image scale = 1. / size plt.figure(figsize=(scale * display_grid.shape[1], scale * display_grid.shape[0])) plt.title(layer_name) plt.grid(False) plt.imshow(display_grid, aspect='auto', cmap='viridis') print_all_feature_maps(conv1_1, 'conv1_1') print_all_feature_maps(conv2_1, 'conv2_1') print_all_feature_maps(conv3_2, 'conv3_2') print_all_feature_maps(conv4_3, 'conv4_3') print_all_feature_maps(conv5_3, 'conv5_3')Some exploratory data analysis Loadimport seaborn as sns from sklearn.datasets import load_iris df = load_iris(as_frame=True)['data']Cleandf df.shape df = df[df['petal length (cm)'] > 2] df.shapePlotsns.histplot(df['petal length (cm)'])Train text modelimport sys sys.path.insert(0,'../input/shopee-competition-utils') from config import CFG from run_training import run_bert_training from run_test import run_bert_testTrainParameters in `CFG`:+ `CFG.DEVICE` can be set to one of the availiable cuda, `['cuda:0','cuda:1','cuda:2','cuda:3']`+ `CFG.BERT_MARGINS = [0.5,0.6,0.7,0.8]`+ `CFG.BERT_MODEL_NAMES = ['bert-base-multilingual-uncased', 'cahya/bert-base-indonesian-1.5G', 'cahya/distilbert-base-indonesian', 'sentence-transformers/paraphrase-xlm-r-multilingual-v1', 'sentence-transformers/paraphrase-distilroberta-base-v1']`# choose which cuda to train model on CFG.DEVICE = 'cuda:0' CFG.BATCH_SIZE = 16 # choose which model with what hyperparameters to train CFG.BERT_MODEL_NAME = CFG.BERT_MODEL_NAMES[3] CFG.MARGIN = CFG.BERT_MARGINS[3] CFG.MODEL_PATH_BERT = f"{CFG.BERT_MODEL_NAME.rsplit('/', 1)[-1]}_epoch8-bs16x1_margin_{CFG.MARGIN}.pt" # start training run_bert_training() run_bert_test()Building Model Backbone for sentence-transformers/paraphrase-xlm-r-multilingual-v1 model, margin = 0.8 paraphrase-xlm-r-multilingual-v1_epoch8-bs16x1_margin_0.8.pt get_bert_embeddings: 100%|████████████████████| 216/216 [00:04<00:00, 53.00it/s] Searching best threshold... threshold = 0.01 -> f1 score = 0.5043079609507127, recall = 0.3705569079686218, precision = 0.9982965883097289 threshold = 0.02 -> f1 score = 0.5221985775132593, recall = 0.3904053620942551, precision = 0.9972015379374117 threshold = 0.03 -> f1 score = 0.5364176878539656, recall = 0.40718974835347266, precision = 0.9969971285345792 threshold = 0.04 -> f1 score = 0.5493804316529174, recall = 0.4226959014617234, precision = 0.996058861564775 threshold = 0.05 -> f1 score = 0.5609129187284428, recall = 0.43637580972483175, precision = 0.9953818423266519 threshold = 0.06 -> f1 score = 0.5727563397591395, recall = 0.45070187858379923, precision = 0.9945953180512972 threshold = 0.07 -> f1 score = 0.5820083336170618, recall =[...]Chapter 8 - Comparing Gaussian means 8.1 One-sample comparison$$ \delta \sim \text{Cauchy} (0, 1)$$$$ \sigma \sim \text{Cauchy} (0, 1)_{\mathcal I(0,∞)}$$$$ \mu = \delta\sigma $$$$ x_{i} \sim \text{Gaussian}(\mu,1/\sigma^2)$$# Read data Dr. Smith Winter = np.array([-0.05,0.41,0.17,-0.13,0.00,-0.05,0.00,0.17,0.29,0.04,0.21,0.08,0.37, 0.17,0.08,-0.04,-0.04,0.04,-0.13,-0.12,0.04,0.21,0.17,0.17,0.17, 0.33,0.04,0.04,0.04,0.00,0.21,0.13,0.25,-0.05,0.29,0.42,-0.05,0.12, 0.04,0.25,0.12]) Summer = np.array([0.00,0.38,-0.12,0.12,0.25,0.12,0.13,0.37,0.00,0.50,0.00,0.00,-0.13, -0.37,-0.25,-0.12,0.50,0.25,0.13,0.25,0.25,0.38,0.25,0.12,0.00,0.00, 0.00,0.00,0.25,0.13,-0.25,-0.38,-0.13,-0.25,0.00,0.00,-0.12,0.25, 0.00,0.50,0.00]) x = Winter - Summer # allowed because it is a within-subjects design x = x / np.std(x) with pm.Model() as model1: delta = pm.Cauchy('delta', alpha=0, beta=1) sigma = pm.HalfCauchy('sigma', beta=1) miu = delta*sigma xi = pm.Normal('xi', mu=miu, sd=sigma, observed=x) trace1=pm.sample(3e3, njobs=2) burnin=0 pm.traceplot(trace1[burnin:], varnames=['delta']); plt.show() def display_delta(trace, x): # BFs based on density estimation (using kernel smoothing instead of spline) from scipy.stats.kde import gaussian_kde from scipy.stats import cauchy pm.summary(trace, varnames=['delta']) tmp = pm.df_summary(trace, varnames=['delta']) # 95% confidence interval: x0 = tmp.values[0, 3] x1 = tmp.values[0, 4] t_delt = trace['delta'][:] my_pdf = gaussian_kde(t_delt) plt.plot(x, my_pdf(x), '--', lw=2.5, alpha=0.6, label='Posterior') # distribution function plt.plot(x, cauchy.pdf(x), 'r-', lw=2.5, alpha=0.6, label='Prior') posterior = my_pdf(0) # this gives the pdf at point delta = 0 prior = cauchy.pdf(0) # height of order-restricted prior at delta = 0 BF01 = posterior/prior print ('the Bayes Factor is %.5f' %(BF01)) plt.plot([0, 0], [posterior, prior], 'k-', [0, 0], [posterior, prior], 'ko', lw=1.5, alpha=1) plt.xlabel('Delta') plt.ylabel('Density') plt.legend(loc='upper left') plt.show() x = np.linspace(-3, 3, 100) display_delta(trace1, x)delta: Mean SD MC Error 95% HPD interval ------------------------------------------------------------------- 0.117 0.156 0.002 [-0.187, 0.413] Posterior quantiles: 2.5 25 50 75 97.5 |--------------|==============|==============|--------------| -0.186 0.011 0.120 0.226 0.416 the Bayes Factor is 5.920978.2 Order-restricted one-sample comparison$$ \delta \sim \text{Cauchy} (0, 1)_{\mathcal I(-∞,0)}$$$$ \sigma \sim \text{Cauchy} (0, 1)_{\mathcal I(0,∞)}$$$$ \mu = \delta\sigma $$$$ x_{i} \sim \text{Gaussian}(\mu,1/\sigma^2)$$# Read data Dr. Smith Winter = np.array([-0.05,0.41,0.17,-0.13,0.00,-0.05,0.00,0.17,0.29,0.04,0.21,0.08,0.37, 0.17,0.08,-0.04,-0.04,0.04,-0.13,-0.12,0.04,0.21,0.17,0.17,0.17, 0.33,0.04,0.04,0.04,0.00,0.21,0.13,0.25,-0.05,0.29,0.42,-0.05,0.12, 0.04,0.25,0.12]) Summer = np.array([0.00,0.38,-0.12,0.12,0.25,0.12,0.13,0.37,0.00,0.50,0.00,0.00,-0.13, -0.37,-0.25,-0.12,0.50,0.25,0.13,0.25,0.25,0.38,0.25,0.12,0.00,0.00, 0.00,0.00,0.25,0.13,-0.25,-0.38,-0.13,-0.25,0.00,0.00,-0.12,0.25, 0.00,0.50,0.00]) x = Winter - Summer # allowed because it is a within-subjects design x = x / np.std(x) with pm.Model() as model2: delta1 = pm.HalfCauchy('delta1', beta=1) delta = pm.Deterministic('delta', -delta1) sigma = pm.HalfCauchy('sigma', beta=1) miu = delta*sigma xi = pm.Normal('xi', mu=miu, sd=sigma, observed=x) trace2=pm.sample(3e3, njobs=2) burnin=0 pm.traceplot(trace2[burnin:], varnames=['delta']); plt.show() x = np.linspace(-3, 0, 100) display_delta(trace2, x)delta: Mean SD MC Error 95% HPD interval ------------------------------------------------------------------- -0.089 0.074 0.001 [-0.236, -0.000] Posterior quantiles: 2.5 25 50 75 97.5 |--------------|==============|==============|--------------| -0.275 -0.127 -0.071 -0.030 -0.003 the Bayes Factor is 13.168568.3 Two-sample comparison$$ \delta \sim \text{Cauchy} (0, 1)$$$$ \mu \sim \text{Cauchy} (0, 1)$$$$ \sigma \sim \text{Cauchy} (0, 1)_{\mathcal I(0,∞)}$$$$ \alpha = \delta\sigma $$$$ x_{i} \sim \text{Gaussian}(\mu+\frac{\alpha}{2},1/\sigma^2)$$$$ y_{i} \sim \text{Gaussian}(\mu-\frac{\alpha}{2},1/\sigma^2)$$# Read data x =np.array([70,80,79,83,77,75,84,78,75,75,78,82,74,81,72,70,75,72,76,77]) y =np.array([56,80,63,62,67,71,68,76,79,67,76,74,67,70,62,65,72,72,69,71]) n1 = len(x) n2 = len(y) # Rescale y = y - np.mean(x) y = y / np.std(x) x = (x - np.mean(x)) / np.std(x) with pm.Model() as model3: delta = pm.Cauchy('delta', alpha=0, beta=1) mu = pm.Cauchy('mu', alpha=0, beta=1) sigma = pm.HalfCauchy('sigma', beta=1) alpha = delta*sigma xi = pm.Normal('xi', mu=mu+alpha/2, sd=sigma, observed=x) yi = pm.Normal('yi', mu=mu-alpha/2, sd=sigma, observed=y) trace3=pm.sample(3e3, njobs=2) burnin=0 pm.traceplot(trace3[burnin:], varnames=['delta']); plt.show() x = np.linspace(-3, 3, 100) display_delta(trace3, x)delta: Mean SD MC Error 95% HPD interval ------------------------------------------------------------------- 1.303 0.357 0.005 [0.623, 2.011] Posterior quantiles: 2.5 25 50 75 97.5 |--------------|==============|==============|--------------| 0.611 1.059 1.302 1.549 2.006 the Bayes Factor is 0.00467**separate() :** split the target following a specific separatorsep_employee <- separate(employee,name,into = c('first_name','last_name'),sep = ' ') sep_employee**unite() :** concatenate the columns into oneunite(sep_employee,'name',first_name,last_name,sep = ' ') library(palmerpenguins) penguins[seq.int(10),] %>% mutate(body_mass_kg = body_mass_g/1000,flipper_length_m = flipper_length_mm / 1000)Projeto Ciencia de Dados: Carteira de Ações com Inteligência Artificial- Projeto Inspiração: https://medium.com/swlh/teaching-a-machine-to-trade-stocks-like-warren-buffett-part-i-445849b208c6- Resumo Fundamentos Empresas BR: https://fundamentus.com.br/index.php Criar Dicionários de Dataframes com cada empresaimport pandas as pd import os empresas = ["ABEV3", "AZUL4", "BTOW3", "B3SA3", "BBSE3", "BRML3", "BBDC4", "BRAP4", "BBAS3", "BRKM5", "BRFS3", "BPAC11", "CRFB3", "CCRO3", "CMIG4", "HGTX3", "CIEL3", "COGN3", "CPLE6", "CSAN3", "CPFE3", "CVCB3", "CYRE3", "ECOR3", "ELET6", "EMBR3", "ENBR3", "ENGI11", "ENEV3", "EGIE3", "EQTL3", "EZTC3", "FLRY3", "GGBR4", "GOAU4", "GOLL4", "NTCO3", "HAPV3", "HYPE3", "IGTA3", "GNDI3", "ITSA4", "ITUB4", "JBSS3", "JHSF3", "KLBN11", "RENT3", "LCAM3", "LAME4", "LREN3", "MGLU3", "MRFG3", "BEEF3", "MRVE3", "MULT3", "PCAR3", "PETR4", "BRDT3", "PRIO3", "QUAL3", "RADL3", "RAIL3", "SBSP3", "SANB11", "CSNA3", "SULA11", "SUZB3", "TAEE11", "VIVT3", "TIMS3", "TOTS3", "UGPA3", "USIM5", "VALE3", "VVAR3", "WEGE3", "YDUQ3"] # fundamentos = { # "ABEV3": balanco_dre_abev3, # "MGLU3": balanco_dre_mglu3 # } fundamentos = {} arquivos = os.listdir("balancos") for arquivo in arquivos: nome = arquivo[-9:-4] if "11" in nome: nome = arquivo[-10:-4] if nome in empresas: print(nome) # pegar o balanco daquela empresa balanco = pd.read_excel(f'balancos/{arquivo}', sheet_name=0) # na primeira coluna colocar o título com o nome da empresa balanco.iloc[0, 0] = nome # pegar 1ª linha e tornar um cabeçalho balanco.columns = balanco.iloc[0] balanco = balanco[1:] # tornar a 1ª coluna (que agora tem o nome da emrpesa) balanco = balanco.set_index(nome) dre = pd.read_excel(f'balancos/{arquivo}', sheet_name=1) # na primeira coluna colocar o título com o nome da empresa dre.iloc[0, 0] = nome # pegar 1ª linha e tornar um cabeçalho dre.columns = dre.iloc[0] dre = dre[1:] # tornar a 1ª coluna (que agora tem o nome da emrpesa) dre = dre.set_index(nome) fundamentos[nome] = balanco.append(dre)ABEV3 WARNING *** OLE2 inconsistency: SSCS size is 0 but SSAT size is non-zero WARNING *** OLE2 inconsistency: SSCS size is 0 but SSAT size is non-zero AZUL4 WARNING *** file size (30309) not 512 + multiple of sector size (512) WARNING *** OLE2 inconsistency: SSCS size is 0 but SSAT size is non-zero WARNING *** file size (30309) not 512 + multiple of sector size (512) WARNING *** OLE2 inconsistency: SSCS size is 0 but SSAT size is non-zero B3SA3 WARNING *** file size (64274) not 512 + multiple of sector size (512) WARNING *** OLE2 inconsistency: SSCS size is 0 but SSAT size is non-zero WARNING *** file size (64274) not 512 + multiple of sector size (512) WARNING *** OLE2 inconsistency: SSCS size is 0 but SSAT size is non-zero BBAS3 WARNING *** file size (68412) not 512 + multiple of sector size (512) WARNING *** OLE2 inconsistency: SSCS size is 0 but SSAT size is non-zero WARNING *** file size (68412) not 512 + multiple of sector size (512) WARNING *** OLE2 inconsistency: SSCS size is [...]Pegar Preços das Ações nas Datas Correspondentescotacoes_df = pd.read_excel("Cotacoes.xlsx") cotacoes = {} for empresa in cotacoes_df["Empresa"].unique(): cotacoes[empresa] = cotacoes_df.loc[cotacoes_df['Empresa']==empresa, :] print(len(cotacoes))77Remover empresas que tem cotações vazias da análise (mesmo após o tratamento que fizemos na hora de pegar as cotações)for empresa in empresas: if cotacoes[empresa].isnull().values.any(): cotacoes.pop(empresa) fundamentos.pop(empresa) empresas = list(cotacoes.keys()) print(len(empresas))65Juntar fundamentos com Preço da Ação# no cotacoes: jogar as datas para índice # no fundamnetos: # trocar linhas por colunas # tratar as datas para formato de data do python # juntar os fundamentos com a coluna Adj Close das cotacoes for empresa in fundamentos: tabela = fundamentos[empresa].T tabela.index = pd.to_datetime(tabela.index, format="%d/%m/%Y") tabela_cotacao = cotacoes[empresa].set_index("Date") tabela_cotacao = tabela_cotacao[["Adj Close"]] tabela = tabela.merge(tabela_cotacao, right_index=True, left_index=True) tabela.index.name = empresa fundamentos[empresa] = tabela display(fundamentos["ABEV3"])Tratar colunas 1. Vamos pegar apenas empresas que possuem as mesmas colunas2. Ajeitar colunas com nome repetido3. Analisar valores vazios nas colunas 1. Remover da análise tabelas que tem colunas diferentescolunas = list(fundamentos["ABEV3"].columns) for empresa in empresas: if set(colunas) != set(fundamentos[empresa].columns): fundamentos.pop(empresa) print(len(fundamentos))612. Ajeitando colunas com o mesmo nometexto_colunas = ";".join(colunas) colunas_modificadas = [] for coluna in colunas: if colunas.count(coluna) == 2 and coluna not in colunas_modificadas: texto_colunas = texto_colunas.replace(";" + coluna + ";",";" + coluna + "_1;", 1) colunas_modificadas.append(coluna) colunas = texto_colunas.split(';') print(colunas) # implementar as colunas nas tabelas for empresa in fundamentos: fundamentos[empresa].columns = colunas3. Analisar valores vazios nas colunas# valores_vazios = { # "Ativo Total": 0, # "Passivo Total": 0, # } valores_vazios = dict.fromkeys(colunas, 0) total_linhas = 0 for empresa in fundamentos: tabela = fundamentos[empresa] total_linhas += tabela.shape[0] for coluna in colunas: qtde_vazios = pd.isnull(tabela[coluna]).sum() valores_vazios[coluna] += qtde_vazios print(valores_vazios) print(total_linhas) remover_colunas = [] for coluna in valores_vazios: if valores_vazios[coluna] > 50: remover_colunas.append(coluna) for empresa in fundamentos: fundamentos[empresa] = fundamentos[empresa].drop(remover_colunas, axis=1) fundamentos[empresa] = fundamentos[empresa].ffill() fundamentos["ABEV3"].shapeDerivations: Q2Design and tests (run, sign, KS,...) of Pseudorandom number generator and use in Monte-Carlo method Using Linear Congruence generator:def random_generator(seed, number, typ): #returns number np. of ints in range 0 to m or floats in range 0 to 1 #m = 0 to 2^32 m=100000 xval = seed % m r=[] for i in range(0,number): xval=(27*xval+10)%m if typ=='int': #xval=mini+int((xval-0)*(maxi-mini)/(m-0)) r.append(xval) elif typ=='float': r.append(xval/m) return r x=random_generator(107, 10, 'int') x x=random_generator(93, 10, 'float') xKolmogorov–Smirnov test a very efficient way to determine if two samples are significantly different from each other. It is usually used to check the uniformity of random numbers. Uniformity is one of the most important properties of any random number generator and Kolmogorov–Smirnov test can be used to test it.The Kolmogorov–Smirnov test may also be used to test whether two underlying one-dimensional probability distributions differ. It is a very efficient way to determine if two samples are significantly different from each other.The Kolmogorov–Smirnov statistic quantifies a distance between the empirical distribution function of the sample and the cumulative distribution function of the reference distribution, or between the empirical distribution functions of two samples.To use the test for checking the uniformity of random numbers, we use the CDF (Cumulative distribution function) of U[0, 1].Empirical CDF, Sn(x)= (number of R1, R2...Rn < x) / N array of random numbers, the random numbers must be in the range of [0, 1].H0(Null Hypothesis): Null hypothesis assumes that the numbers are uniformly distributed between 0-1.If we are able to reject the Null Hypothesis, this means that the numbers are not uniformly distributed between 0-1. Failure to reject the Null Hypothesis although does not necessarily mean that the numbers follow the uniform distribution.Statistics: This is the calculated value of D, where D=|F(x)-Sn(x)|.-> This D is compared with Dalpha where alpha is the level of significance. Alpha is defined as the probability of rejecting the null hypothesis given the null hypothesis(H0) is true. For most of the practical applications, alpha is chosen as 0.05.p-value: This is calculated with the help of D.-> If pvalue> alpha, we fail to reject the null hypothesis. Otherwise, we conclude that the numbers are not uniform. Ideally, the p-value should be as large as possible. For perfect uniform distribution pvalue=1 and Statisitics=0.#SELECTION SORT, for Ascending order, by finding smallest numbers def sorting(x): N=len(x) for i in range(0,N): Min=x[i] Index=i #print(i) for j in range(i+1,N): #print(j) if x[j]max(D_minus): D=max(D_plus) else: D=min(D_minus) print(D) #Verification of D-statistic and equivalent p-value from K-S table from scipy.stats import kstest print(kstest(x,"uniform")) import matplotlib.pyplot as plt %matplotlib inline from collections import Counter x=random_generator(93, 1000, 'float') d=Counter(x) #print(list(d.values())) #print(x) plt.stem(x,d.values())Using Monte-Carlo Method to Compute $\pi$ One method to estimate the value of $\pi$ (3.141592...) is by using the Monte Carlo method. Assuming we have a circle of radius r=1, enclosed by a 2 × 2 square. The area of the circle is $\pi r^2=\pi$, the area of the square is 4. If we divide the area of the circle, by the area of the square we get $\pi /4$.We then generate a large number of **uniformly distributed** random points and plot them on the graph. These points can be in any position within the square i.e. between (0,0) and (1,1). If they fall within the circle, they are coloured red, otherwise they are coloured blue. We keep track of the total number of points, and the number of points that are inside the circle. If we divide the number of points within the circle, Ninner by the total number of points, Ntotal, we should get a value that is an approximation of the ratio of the areas we calculated above, $\pi/4$.import numpy as np def PI_Monte_Carlo(Ntotal,plot_result = False): #Ntotal = 10000 Ninside = 0 x=np.array(random_generator(93, Ntotal, 'float')) y=np.array(random_generator(95, Ntotal, 'float')) r = np.sqrt(x**2+y**2) inside = r < 1.0 Ninside = np.sum(inside) Pinside = Ninside / Ntotal if plot_result: fig,ax = plt.subplots() ax.scatter(x[inside],y[inside],c='r') ax.scatter(x[~inside],y[~inside],c='b') return 4.0 * Pinside pi_est = PI_Monte_Carlo(100000,plot_result=True) err = np.abs((pi_est-np.pi))*100/np.pi print(f'estimated Pi is {pi_est } and absolute percentage error is ', err)estimated Pi is 3.136 and absolute percentage error is 0.17801969276323767Q3Solve the differential equation using RK4, FDM Differential Equation of the Orbit The differential equation of the Orbit of a planet of constant mass '$\mathbf{m}$', under the Central force $\mathbf{\vec{F}=f(r)\hat{r}}$ with respect to the Origin '$\mathbf{O}$' taken at the centre of mass of star-planet system (effectively the position of the star), is given by:$$ \mathbf{\frac{d^2u}{d\theta^2}+u=-\frac{m}{l^2u^2}f(\frac{1}{u})}$$where, $\mathbf{\theta=}$ angular co-ordinate of the planet (independent; varies from $\mathbf{0}$ to $\mathbf{2\pi}$)$\mathbf{r=}$ radial co-ordinate of the planet (positive, function of $\mathbf{\theta}$)$\mathbf{l=mr^2\dot{\theta}=}$ angular momentum of the planet (constant)$\mathbf{\dot{\theta}=\frac{d\theta}{dt}}$$\mathbf{u=\frac{1}{r}}$which is a nonhomogenous differential equation of second order whose solution gives us the equation of the orbit of the form $\mathbf{r(\theta)}$, for a given force law (power law in terms of $\mathbf{r}$) $\mathbf{f(r)=-\frac{1}{r^n}=-u^n}$ . For simplicity, let the planet collide with the star when it approaches the star's surface at a radius $\mathbf{r=0.001}$. Also, we have assumed the constant term $\mathbf{\frac{-m}{l^2}=-1}$. Hence, the differential equation reduces to:$$ \mathbf{\frac{d^2u}{d\theta^2}+u=-\frac{1}{u^2}f(\frac{1}{u})}$$Let, initially at $\mathbf{\theta=0}$ rad, initial distance of the planet be $\mathbf{r(0)=1}$ from '$\mathbf{O}$' and initial velocity (tangential) $\mathbf{\frac{dr(\theta)}{d\theta}|_{\theta=0}=-0.1}$Therefore, $$\mathbf{\frac{du(\theta)}{d\theta}=\frac{d}{d\theta}(\frac{1}{r(\theta)})=-\frac{1}{r(\theta)^2}\frac{dr(\theta)}{d\theta}}$$Then, the initial conditions for the differential equation of the orbit at $\mathbf{\theta=0}$ rad will be: $\mathbf{u(0)=\frac{1}{r(0)}=1}$ and $\mathbf{\frac{du(\theta)}{d\theta}|_{\theta=0}=-\frac{1}{r(0)^2}(\frac{dr(\theta)}{d\theta}|_{\theta=0})=0.1}$. Converting the second order differential equation into two first order simultaneous equations:Let, $\mathbf{\frac{du}{d\theta}=v}$. Then, $ \mathbf{\frac{d^2u}{d\theta^2}=-u-\frac{1}{u^2}f(\frac{1}{u})=\frac{dv}{d\theta}}$Thus, the two first order simultaneous equations are:$$\mathbf{\frac{du}{d\theta}=v=f_1}$$$$\mathbf{\frac{dv}{d\theta}=-u-\frac{1}{u^2}f(\frac{1}{u})=f_2}$$So, the initial conditions for these first order differential equations are: $\mathbf{u(0)=\frac{1}{r(0)}=1}$ and $\mathbf{v(0)=\frac{du(\theta)}{d\theta}|_{\theta=0}=0.1}$Now, we can use 4th order RK method to solve this second order differential equation and plot the orbit for different cases of the force law.def force(n,u): return -u**n def f1(v): return v def f2(n,u): return -u-(force(n,u)/u**2) def RK4(n,h,u,v): u_v=[] k1=h*f1(v) l1=h*f2(n,u) k2=h*f1(v+l1/2) l2=h*f2(n,u+k1/2) k3=h*f1(v+l2/2) l3=h*f2(n,u+k2/2) k4=h*f1(v+l3) l4=h*f2(n,u+k3) k=(k1+2*k2+2*k3+k4)/6 l=(l1+2*l2+2*l3+l4)/6 u_v.append(u+k) u_v.append(v+l) return u_v def X_Y(n): u=1 v=0.1 theta=0 h=0.1 X=[] Y=[] T=[] X.append((1/u)*np.cos(theta)) Y.append((1/u)*np.sin(theta)) err=0 t = np.arange(theta,theta+20*3.14,h) for i in t: T.append(i) u_v=[] u_v=RK4(n,h,u,v) u=u_v[0] v=u_v[1] r=1/u x=r*np.cos(i) X.append(x) y=r*np.sin(i) Y.append(y) if abs(r)<0.001 or abs(r)>10: err_theta=i err_r=r err=1 break #if err==1: #t = np.arange(-err_theta,0,h) #for i in t: #T.append(i) #u_v=[] #u_v=RK4(n,h,u,v) #u=u_v[0] #v=u_v[1] #V.append(v) #r=1/u #x=r*np.cos(i) #X.append(x) #y=r*np.sin(i) #print(y) #Y.append(y) #if abs(r)<0.001: #err_theta=i #err_r=r #err=1 #break plt.plot(X,Y)Case-1: Inverse square lawWhen the force varies as the inverse 2nd power of the distance: $\mathbf{\vec{F}=-\frac{1}{r^2}\hat{r}}$n=2 X_Y(n)Case-2:When the force varies as the inverse 3rd power of the distance: $\mathbf{\vec{F}=-\frac{1}{r^3}\hat{r}}$n=3 X_Y(n)Case-3:When the force varies as the inverse 4th power of the distance: $\mathbf{\vec{F}=-\frac{1}{r^4}\hat{r}}$n=4 X_Y(n)Case-4:When the force varies as the inverse 5th power of the distance: $\mathbf{\vec{F}=-\frac{1}{r^5}\hat{r}}$n=5 X_Y(n)Case-5:When the force varies as the inverse 6th power of the distance: $\mathbf{\vec{F}=-\frac{1}{r^6}\hat{r}}$n=6 X_Y(n)Case-6:When the force varies as the inverse 7th power of the distance: $\mathbf{\vec{F}=-\frac{1}{r^7}\hat{r}}$n=7 X_Y(n)Case-7:When the force varies as the inverse 8th power of the distance: $\mathbf{\vec{F}=-\frac{1}{r^8}\hat{r}}$n=8 X_Y(n)The SMS Spam Collection is a set of SMS tagged messages that have been collected for SMS Spam research. It contains one set of SMS messages in English of 5,574 messages, tagged acording being ham (legitimate) or spam. The files contain one message per line. Each line is composed by two columns: class (spam or ham) and raw message. This data will be our labeled training set. Using these ham/spam examples, we'll train a machine learning model to learn to discriminate between ham/spam automatically. Then, with a trained model, we'll be able to classify arbitrary unlabeled messages as ham or spam. Goal - To build a predictive model which will determine whether a text message is spam or ham. DATA ANALYSIS Imports required packagesimport pandas as pd import matplotlib.pyplot as plt import numpy as np import seaborn as sns %matplotlib inline # Warnings import warnings warnings.filterwarnings('ignore') # Text Preprocessing import nltk nltk.download("stopwords") nltk.download('wordnet') nltk.download('punkt') from nltk.corpus import stopwords import string from nltk.tokenize import word_tokenize from nltk.stem import SnowballStemmer stemmer = SnowballStemmer("english") from wordcloud import WordCloud from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import TfidfTransformer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import CountVectorizer from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import cross_val_score from sklearn.metrics import fbeta_score from sklearn.naive_bayes import MultinomialNB from sklearn.calibration import CalibratedClassifierCV from sklearn.metrics import classification_report, f1_score, accuracy_score, confusion_matrix from textblob import TextBlob import pickle #reading the data data = pd.read_csv("spam.csv", encoding = "latin-1") # data = data[['v1', 'v2']] # data = data.rename(columns = {'v1': 'label', 'v2': 'text'}) #checking first few rows data.head() #removing unnecessary columns data = data.drop(['Unnamed: 2','Unnamed: 3','Unnamed: 4'], axis = 1) #again check first few rows of data display(data.head(n = 5)) # Lets look at the dataset info to see if everything is alright data.info() # check the statistics of data data.groupby('class').describe()There are some repetative messages in the data. Lets check the top messages of data to find the most repetative messages#check the top spam/ham messages topMessages = data.groupby("message")["class"].agg([len, np.max]).sort_values(by = "len", ascending = False).head(n = 5) display(topMessages)So. People are really busy it seems. "Sorry, i'll call later" tops the ham message list with 30 counts with "I cant pick the phone right now. Pls send a message" comes second with 12 counts.Theres a quite lot of Ok..., Okie and Ok. in there too How long are the messages?#add new column to the dataframe data to see the length of every message data['length'] = data['message'].map(lambda text: len(text)) print (data.head()) data.length.plot(bins=100, kind='hist')We can see here maximum messages contains number of words in the range of [0, 200].#check the statistics of the length of the message data.length.describe()The average length of the message is 80, while the minimum length is 2 and maximun length is 910. Is there any difference in message length between spam and ham?f, ax = plt.subplots(1, 2, figsize = (20, 6)) sns.distplot(data[data["class"] == "spam"]["length"], bins = 20, ax = ax[0]) ax[0].set_xlabel("Spam Message Word Length") sns.distplot(data[data["class"] == "ham"]["length"], bins = 20, ax = ax[1]) ax[0].set_xlabel("Ham Message Word Length") plt.show()Looks like spam messages are usually longer. Maybe messageLength can become a feature to predict whether the message is spam/ ham ?#Check what precentage of our data is spam/ham data["class"].value_counts().plot(kind = 'pie', explode = [0,0.1], figsize = (6,6), autopct = '%1.1f%%', shadow = True) plt.ylabel("Spam vs Ham") plt.legend(["Ham", "Spam"]) plt.show()A lot of messages are actually not spam. About 87% of our dataset consists of normal messages. While we split our data set into train and test or when we use cross validation, we will have to use stratified sampling, otherwise we have a chance of our training model being skewed towards normal messages. If the sample we choose to train our model consists majorly of normal messages, it may end up predicting everything as ham and we might not be able to figure this out since most of the messages we get are actually ham and will have a pretty good accuracy.A very basic model would be a model that predicts everything as ham. It would have a decent accuracy. But then again, is that right? No. We will then have to use an accuracy metric that keeps this in mind. Goal : We don't mind if we miss the odd spam message but we surely don't want to mark a ham message as spam i.e Precision is very important. Hence we will use fbeta score as our accuracy metric with inclination towards Precision#Study individual Spam/ham words spam_messages = data[data["class"] == "spam"]["message"] ham_messages = data[data["class"] == "ham"]["message"] spam_words = [] ham_words = [] def extractSpamWords(spamMessages): global spam_words words = [word.lower() for word in word_tokenize(spamMessages) if word.lower() not in stopwords.words("english") and word.lower().isalpha()] spam_words = spam_words + words def extractHamWords(hamMessages): global ham_words words = [word.lower() for word in word_tokenize(hamMessages) if word.lower() not in stopwords.words("english") and word.lower().isalpha()] ham_words = ham_words + words spam_messages.apply(extractSpamWords) ham_messages.apply(extractHamWords) #Spam Word cloud spam_wordcloud = WordCloud(width=600, height=400).generate(" ".join(spam_words)) plt.figure( figsize=(10,8), facecolor='k') plt.imshow(spam_wordcloud) plt.axis("off") plt.tight_layout(pad=0) plt.show() #Ham word cloud ham_wordcloud = WordCloud(width=600, height=400).generate(" ".join(ham_words)) plt.figure( figsize=(10,8), facecolor='k') plt.imshow(ham_wordcloud) plt.axis("off") plt.tight_layout(pad=0) plt.show() # Top 10 spam words spam_words = np.array(spam_words) print("Top 10 Spam words are :\n") pd.Series(spam_words).value_counts().head(n = 10) # Top 10 Ham words ham_words = np.array(ham_words) print("Top 10 Ham words are :\n") pd.Series(ham_words).value_counts().head(n = 10)Top 10 Ham words are :Data Preprocessing Cleaning textual data is a little different than regular data cleaning. There is a much heavier emphasis on text normalisation than removing outliers or leverage points. When used correctly, it reduces noise, groups terms with similar semantic meanings and reduces computational costs by giving us a smaller matrix to work with.There are many types of Text Normalization.Like Case normalisation, Removing stop words, Removing punctuations and special symbols, Lemmatising/Stemming etc.For this particular classification problem, we will use case normalisation and removing stop wordds.def cleanText(message): message = message.translate(str.maketrans('', '', string.punctuation)) # words = [stemmer.stem(word) for word in message.split() if word.lower() not in stopwords.words("english")] message = message.lower() return message data["message"] = data["message"].apply(cleanText) data.head(n = 10)Convert our clean text into a representation that a machine learning model can understand. We will use the Tfifd for this. Now we will convert each message, represented as a list of tokens (lemmas) above, into a vector that machine learning models can understand.Doing that requires essentially three steps, in the bag-of-words model:1. counting how many times does a word occur in each message (term frequency)2. weighting the counts, so that frequent tokens get lower weight (inverse document frequency)3. normalizing the vectors to unit length, to abstract from the original text length (L2 norm) Here we will use the TF-IDF vectorizer (Term Frequency — Inverse Document Frequency), an embedding technique which takes into account the importance of each term to document. TF-IDF vectorizes documents by calculating a TF-IDF statistic between the document and each term in the vocabulary. The document vector is constructed by using each statistic as an element in the vector. The TF-IDF statistic for term i in document j is calculated as follows: ![image.png](attachment:image.png)#specifing features and labels labels = data['class'] text = data['message'] #splitting data into three sets - train, test and validation X_train, X_remain, y_train, y_remain = train_test_split(text, labels, stratify = labels, random_state=0, test_size=0.3) X_test, X_val, y_test, y_val = train_test_split(X_remain,y_remain,stratify = y_remain, random_state = 0, test_size = 0.5) #check the shape of all three datasets print("Training Data:",X_train.shape,y_train.shape ) print("Testing Data:", X_test.shape,y_test.shape) print("Validation Data:", X_val.shape,y_val.shape ) #The CountVectorizer provides a simple way to #both tokenize a collection of text documents and build a vocabulary of known words, #but also to encode new documents using that vocabulary. # create the transform count_vec = CountVectorizer(stop_words='english') # Learn the vocabulary dictionary and return term-document matrix. X_train_counts = count_vec.fit_transform(X_train) #Learn vocabulary and idf from training set. tf_transformer = TfidfTransformer().fit(X_train_counts) # Learn vocabulary and idf, return term-document matrix. X_train_transformed = tf_transformer.transform(X_train_counts) #test data X_test_counts = count_vec.transform(X_test) X_test_transformed = tf_transformer.transform(X_test_counts) #converts classes into integer labels = LabelEncoder() y_train_labels_fit = labels.fit(y_train) y_train_lables_trf = labels.transform(y_train) #label mapping of integer to class label_mapping = dict(zip(labels.classes_,labels.transform(labels.classes_))) print(label_mapping){'ham': 0, 'spam': 1}Model Buildingmodel = MultinomialNB() clf = model.fit(X_train_transformed,y_train_lables_trf) prediction = clf.predict(X_test_transformed)PrecisionPrecision is the number of True Positives divided by the number of True Positives and False Positives. Put another way, it is the number of positive predictions divided by the total number of positive class values predicted. It is also called the Positive Predictive Value (PPV). RecallRecall is the number of True Positives divided by the number of True Positives and the number of False Negatives. Put another way it is the number of positive predictions divided by the number of positive class values in the test data. It is also called Sensitivity or the True Positive Rate. F1 ScoreThe F1 Score is the 2*((precision*recall)/(precision+recall)). It is also called the F Score or the F Measure. Put another way, the F1 score conveys the balance between the precision and the recall. Validation and Prediction#Check accuracy on test data print('Average accuracy on test set={}'.format(np.mean(prediction == labels.transform(y_test)))) print ('Accuracy', accuracy_score(labels.transform(y_test), prediction)) print() results = confusion_matrix(labels.transform(y_test), prediction) print(results) print() print ('Report:') print (classification_report(labels.transform(y_test), prediction))Accuracy 0.9629186602870813 [[724 0] [ 31 81]] Report: precision recall f1-score support 0 0.96 1.00 0.98 724 1 1.00 0.72 0.84 112 accuracy 0.96 836 macro avg 0.98 0.86 0.91 836 weighted avg 0.96 0.96 0.96 836Deploymentweights = 'spam_detection_mnb.sav' pickle.dump(clf,open(weights,'wb')) vocab = 'vocabulary_mnb.sav' pickle.dump(count_vec, open(vocab, 'wb')) tfidf = 'tfidf_mnb.sav' pickle.dump(tf_transformer,open(tfidf,'wb'))Data Retrieval for Machine Learning In this notebook, we retrieve MLB data from 1996-2017 that we will use to fit a machine learning model to. We decided that we wanted to see if we could find a model that would predict team wins even better than the Pythagorean Expectation does. After researching and examining baseball statistics, we decided to not only focus on team statistics as a whole, but pitcher statistics as well. The performance of the pitcher has a huge impact on the game results.import numpy as np import pandas as pd import requests import plotly.offline as py import matplotlib.pyplot as plt from plotly.graph_objs import * from sklearn.linear_model import LinearRegression, LogisticRegression from sklearn.neighbors import KNeighborsRegressor from sklearn.model_selection import cross_val_score from sklearn.cluster import KMeans from bs4 import BeautifulSoup %matplotlib inline pd.set_option("max_r", 15) py.init_notebook_mode(connected=True)For unknown reasons, the seventh and eighth tables that we need from the above URLs do not appear in the html returned from BeautifulSoup (it skips over them). Thus, instead of scraping from them like we did for the other datasets we need in the previous notebook, we are taking CSV's. It is also worth noting that this is the only website that has the advanced metrics we need for our calculations, which makes this situation even more of a shame.# read in all the csvs dfs = [] for i in range(1996, 2017): fname = "mlb%d.csv" % i df = pd.read_csv(fname) df["year"] = i dfs.append(df) # concat all the team data into one mlb = pd.concat(dfs).dropna() mlb = mlb.sort_values(by=["Tm","year"]) mlb.head() # read in pitching data dfs = [] for i in range(1996, 2017): fname = "pitch%d.csv" % i df = pd.read_csv(fname) df["year"] = i dfs.append(df) # combine pitching data pitch = pd.concat(dfs).dropna() pitch = pitch.sort_values(by=["Tm","year"]) pitch.head() mlb = mlb.reset_index().drop("index",axis=1) pitch = pitch.reset_index().drop("index",axis=1) # Ensure all overlapping columns have the same values. overlapping_cols = ['Tm', 'W', 'L', 'G', 'year'] df = (mlb[overlapping_cols] == pitch[overlapping_cols]) df.all() # Convert runs and runs allowed to yearly numbers from averages per game. mlb['R'] = mlb['R'] * 162 mlb['RA'] = mlb['RA'] * 162 # Add all of the columns we need into one dataframe. mlb.head()The variables that we felt would be most usuful for predicting wins with a machine learning model were:+ Runs: the total number of runs the team scored during the season+ Runs Allowed: the total number of runs that were scored against the team during the season+ Strength of Schedule: the difficulty or ease of a team's opponent as compared to other teams.+ ERA: the mean of the earned runs given up by a pitcher per nine innings pitched+ WHIP: walks hits per inning pitched+ FIP: field independent pitching+ SO: strike outs# Create a dataframe for our training data. machine_data = pd.DataFrame(mlb[['R',"RA","SOS",'W',"year"]]) machine_data['ERA'] = pitch['ERA'] machine_data['WHIP'] = pitch['WHIP'] machine_data['FIP'] = pitch['FIP'] machine_data['SO'] = pitch['SO'] machine_data.dtypes machine_data.to_csv("training.csv", index=False)\doublespacing Chapter GoalsIn this subchapter, the reader will learn the fundamentals of logistic regression, and how to present and interpret such an analysis. Introduction\doublespacingIn subchapter 5b we covered a very useful methodology for modeling quantitative or continuous outcomes. We of course know though that health outcomes come in all different types of data types. In fact, the health outcomes we often care about most -- cured/not cured, alive/dead, are discrete binary outcomes. It would be ideal if we could extend the same general framework for continuous outcomes to these binary outcomes. Logistic regression allows us to incorporate much of what we learned in the previous subchapter and apply the same principles to binary outcomes.When dealing with binary data, we would like to be able to model the probability of a type of outcome given one or more covariates. One might ask, why not just simply use linear regression? There are several reasons why this is generally a bad idea. Probabilities need to be somewhere between zero and one, and there is nothing in linear regression to constrain the estimated probabilities to this interval. This would mean that you could have an estimated probability 2, or even a negative probability! This is one unattractive property of such a method (there are others), and although it is sometimes used, the availability of good software such as `R` allows us to perform better analyses easily and efficiently. Before introducing such software, we should introduce the analysis of small contingency tables. 2x2 Tables\doublespacingContingency tables are the best way to start to think about binary data. A contingency table cross-tabulates the outcome across two or more levels of a covariate. Let's begin by creating a new variable (`age.cat`) which dichotomizes `age` into two age categories: $\le55$ and $>55$. Note, because we are making age a discrete variable, we also change the data type to a factor. This is similar to what we did for the `gender_num` variable when discussing linear regression in the previous subchapter. We can get a breakdown of the new variable using the `table` function.\singlespacingdat$age.cat <- as.factor(ifelse(dat$age<=55, "<=55",">55")) table(dat$age.cat)\doublespacingWe would like to see how 28 day mortality is distributed among the age categories. We can do so by constructing a contingency table, or in this case what is commonly referred to as a 2x2 table.\singlespacingtable(dat$age.cat,dat$day_28_flg)\doublespacingFrom the above table, you can see that 40 patients in the young group ($\le55$) died within 28 days, while 243 in the older group died. These correspond to $P(\text{die} | \text{age}\le55) = 0.043$ or 4.3\% and $P(\text{die} | \text{age}>55) = 0.284$ or 28.4\%, where the "|" can be interpreted as "given" or "for those who have." This difference is quite marked, and we know that age is an important factor in mortality, so this is not surprising.The odds of an event happening is a positive number and can be calculated from the probability of an event, $p$, by the following formula\centering$\text{Odds} = \frac{p}{1-p}$.\raggedrightAn event with an odds of zero never happens, and an event with a very large odds (>100) is very likely to happen. Here, the odds of dying within 28 days in the young group is 0.043/(1-0.043)=0.045, and in the older group is 0.284/(1-0.284)=0.40. It is convenient to represent these two figures as a ratio, and the choice of what goes in the numerator and the denominator is somewhat arbitrary. In this case, we will choose to put the older group's odds on the numerator and the younger in the denominator, and it's important to make it clear which group is in the numerator and denominator in general. In this case the *Odds ratio* is 0.40/0.045 = 8.79, which indicates a very strong association between age and death, and means that the odds of dying in the older group is nearly 9 fold higher than when compared to the younger group. There is a convenient shortcut for doing odds ratio calculation by making an X on a 2x2 table and multiplying top left by bottom right, then dividing it by the product of bottom left and top right. In this case $\frac{883 \times 243}{610 \times 40}= 8.79$.Now let us look at a slightly different case -- when the covariate takes on more than two values. Such a variable is the `service_unit`. Let's see how the deaths are distributed among the different units:\singlespacingdeathbyservice <- table(dat$service_unit,dat$day_28_flg) deathbyservice\doublespacingwe can get frequencies of these service units by applying the `prop.table` function to our cross-tabulated table.\singlespacingdbys.proptable <- prop.table(deathbyservice,1) dbys.proptable\doublespacingIt appears as though the `FICU` may have a lower rate of death than either the `MICU` or `SICU`. To compute an odds ratios, first compute the odds:\singlespacingdbys.proptable[,"1"]/dbys.proptable[,"0"]\doublespacingand then we need to pick which of `FICU`, `MICU` or `SICU` will serve as the reference or baseline group. This is the group which the other two groups will be compared to. Again the choice is arbitrary, but should be dictated by the study objective. If this were a clinical trial with two drug arms and a placebo arm, it would be foolish to use one of the treatments as the reference group, particularly if you wanted to compare the efficacy of the treatments. In this particular case, there is no clear reference group, but since the FICU is so much smaller than the other two units, we will use it as the reference group. Computing the odds ratio for MICU and SICU we get 4.13 and 3.63, respectively. These are also very strong associations, meaning that the odds of dying in the SICU and MICU are around 4 times higher than in the FICU, but relatively similar.Contingency tables and 2x2 tables in particular are the building blocks of working with binary data, and it's often a good way to begin looking at the data. Introducing Logistic RegressionWhile contingency tables are a fundamental way of looking at binary data, they are somewhat limited. What happens when the covariate of interest is continuous? We could of course create categories from the covariate by establishing cut points, but we may still miss some important aspect of the relationship between the covariate and the outcome by not choosing the right cut points. Also, what happens when we know that a nuisance covariate is related to both the outcome and the covariate of interest. This type of nuisance variable is called a confounder and occurs frequently in observational data, and although there are ways of accounting for confounding in contingency tables, they become more difficult to use when there are more than one present.Logistic regression is a way of addressing both of these issues, among many others. If you recall, using linear regression is problematic because it is prone to estimating probabilities outside of the [0,1] range. Logistic regression has no such problem per se, because it uses a link function known as the logit function which maps probabilities in the interval $[0,1]$ to a real number $(-\infty,\infty)$. This is important for many practical and technical reasons. The logit of $p$ and how it is related to the covariates is defined as\centering$logit(p_x) = log(Odds_x) = log(\frac{p_x}{1-p_x}) = \beta_0 + \beta_1 \times x$.\raggedrightIt is worth pointing out here that log here, and in most places in statistics is referring to the natural logarithm, sometimes denoted $ln$.The first covariate we were considering, `age.cat` was also a binary variable, where it takes on values 1 when the `age`$>55$ and 0 when `age`$\le55$. So plugging these values in, first for the young group $(x=0)$:\centering$logit(p_{x=0}) = log(Odds_{x=0}) = log(\frac{p_{x=0}}{1-p_{x=0}}) = \beta_0 + \beta_1 \times 0 = \beta_0$,\raggedrightand then for the older group $(x=1)$:\centering$logit(p_{x=1}) = log(Odds_{x=1}) = log(\frac{p_{x=1}}{1-p_{x=1}}) = \beta_0 + \beta_1 \times 1 = \beta_0 + \beta_1$.\raggedrightIf we subtract the two cases $logit(p_{x=1}) - logit(p_{x=0}) = log(Odds_{x=1}) - log(Odds_{x=0})$, and we notice that this quantity is equal to $\beta_1$. If you recall the properties of logarithms, that the difference of two logs is the log of their ratio, so $log(Odds_{x=1}) - log(Odds_{x=0}) = log(Odds_{x=1}/Odds_{x=0})$, which may be looking familiar. This is the log ratio of the odds or the *log odds ratio* in the $x=1$ group relative to the $x=0$ group. Hence, we can estimate odds ratios using logistic regression by exponentiating the coefficients of the model (the intercept notwithstanding, which we will get to in a moment).Let's fit this model, and see how this works using a real example. We fit logistic regression very similarly to how we fit linear regression models, with a few exceptions. First, we will use a new function called `glm`, which is a very powerful function in `R` which allow one to fit a class of models known as generalized linear models or GLMs [@mccullagh1989generalized]. The `glm` function works in much the same way the `lm` function does. We need to specify a formula of the form: `outcome ~ covariates`, specify what dataset to use (in our case the `dat` data frame), and then specify the family. For logistic regression `family='binomial'` will be our choice. You can run the `summary` function, just like you did for `lm` and it produces output very similar to what `lm` did.\singlespacingage.glm <- glm(day_28_flg ~ age.cat,data=dat,family="binomial") summary(age.glm)\doublespacingAs you can see, we get a coefficients table that is similar to the `lm` table we used earlier. Instead of a `t value`, we get a `z value`, but this can be interpreted similarly. The rightmost column is a p-value, for testing the null hypothesis $\beta=0$. If you recall, the non-intercept coefficients are log-odds ratios, so testing if they are zero is equivalent to testing if the odds ratios are one. If an odds ratio is one the odds are equal in the numerator group and denominator group, indicating the probabilities of the outcome are equal in each group. So, assessing if the coefficients are zero will be an important aspect of doing this type of analysis.Looking more closely at the coefficients. The intercept is `r round(age.glm$coef[1],2)` and the `age.cat` coefficient is `r round(age.glm$coef[2],2)`. The coefficient for `age.cat` is the log odds ratio for the 2x2 table we previously did the analysis on. When we exponentiate `r round(age.glm$coef[2],2)`, we get `exp(` `r round(age.glm$coef[2],2)` `)` = `r round(exp(age.glm$coef[2]),2)`. This corresponds with the estimate using the 2x2 table. For completeness, let's look at the other coefficient, the intercept. If you recall, $log(Odds_{x=0}) = \beta_0$, so $\beta_0$ is the log odds of the outcome in the younger group. Exponentiating again, `exp(` `r round(age.glm$coef[1],2)` `)` = `r round(exp(age.glm$coef[1]),3)`, and this corresponds with the previous analysis we did. Similarly, $log(Odds_{x=1}) = \beta_0 + \beta_1$, and the estimated odds of 28 day death in the older group is `exp(` `r round(age.glm$coef[1],2)` ` + ` `r round(age.glm$coef[2],2)` `)` = `r round(exp(sum(age.glm$coef[1:2])),2)`, as was found above. Converting estimated odds into a probability can be done directly using the `plogis` function, but we will cover a more powerful and easier way of doing this later on in the section. Beyond a Single Binary CovariateWhile the above analysis is useful for illustration, it does not readily demonstrate anything we could not do with our 2x2 table example above. Logistic regression allows us to extend the basic idea to at least two very relevant areas. The first is the case where we have more than one covariate of interest. Perhaps we have a confounder, we are concerned about, and want to adjust for it. Alternatively, maybe there are two covariates of interest. Secondly, it allows use to use covariates as continuous quantities, instead of discretizing them into categories. For example, instead of dividing age up into exhaustive strata (as we did very simply by just dividing the patients into two groups, $\le55$ and $>55$ ), we could instead use age as a continuous covariate.First, having more than one covariate is simple. For example, if we wanted to add `service_unit` to our previous model, we could just add it as we did when using the `lm` function for linear regression. Here we specify `day_28_flg ~ age.cat + service_unit` and run the `summary` function.\singlespacingageunit.glm <- glm(day_28_flg ~ age.cat + service_unit,data=dat,family="binomial") summary(ageunit.glm)$coef\doublespacingA coefficient table is produced, and now we have four estimated coefficients. The same two, `(Intercept)` and `age.cat` which were estimated in the unadjusted model, but also we have `service_unitMICU` and `service_unitSICU` which correspond to the log odds ratios for the MICU and SICU relative to the FICU. Taking the exponential of these will result in an odds ratio for each variable, adjusted for the other variables in the model. In this case the adjusted odds ratios for Age>55, MICU and SICU are `r round(exp(ageunit.glm$coef[2]),2) `, `r round(exp(ageunit.glm$coef[3]),2) `, and `r round(exp(ageunit.glm$coef[4]),2) `, respectively. We would conclude that there is an almost 9-fold increase in the odds of 28 day mortality for those in the $>55$ year age group relative to the younger $\le55$ group while holding service unit constant. This adjustment becomes important in many scenarios where groups of patients may be more or less likely to receive treatment, but also more or less likely to have better outcomes, where one effect is confounded by possibly many others. Such is almost always the case with observational data, and this is why logistic regression is such a powerful data analysis tool in this setting.Another case we would like to be able to deal with is when we have a continuous covariate we would like to include in the model. One can always break the continuous covariate into mutually exclusive categories by selecting break or cut points, but selecting the number and location of these points can be arbitrary, and in many cases unnecessary or inefficient. Recall that in logistic regression we are fitting a model:\centering$logit(p_x) = log(Odds_x) = log(\frac{p_x}{1-p_x}) = \beta_0 + \beta_1 \times x$,\raggedrightbut now assume $x$ is continuous. Imagine a hypothetical scenario where you know $\beta_0$ and $\beta_1$ and have a group of 50 year olds, and a group of 51 year olds. The difference in the log Odds between the two groups is:\centering$log(Odds_{51}) -log(Odds_{50}) = (\beta_0 + \beta_1 \times 51) - (\beta_0 + \beta_1 \times 50) = \beta_1(51-50) = \beta_1$.\raggedrightHence, the odds ratio for 51 year olds versus 50 year olds is $\exp{(\beta_1)}$. This is actually true for any group of patients which are 1 year apart, and this gives a useful way to interpret and use these estimated coefficients for continuous covariates. Let's work with an example. Again fitting the 28 day mortality outcome as a function of age, but treating age as it was originally recorded in the dataset, a continuous variable called `age`.\singlespacingagects.glm <- glm(day_28_flg ~ age,data=dat,family="binomial") summary(agects.glm)$coef\doublespacingWe see the estimated coefficient is `r round(agects.glm$coef[2],2)` and still very statistically significant. Exponentiating the log odds ratio for age, we get an estimated odds ratio of `r round(exp(agects.glm$coef[2]),2)`, which is per 1 year increase in age. What if the age difference of interest is ten years instead of one year? There are at least two ways of doing this. One is to replace `age` with `I(age/10)`, which uses a new covariate which is `age` divided by ten. The second is to use the `agects.glm` estimated log odds ratio, and multiple by ten prior to exponentiating. They will yield equivalent estimates of `r round(exp(agects.glm$coef[2]*10),2)`, but it is now per 10 year increases in age. This is useful when the estimated odds ratios (or log odds ratios) are close to one (or zero). When this is done, one unit of the covariate is 10 years, so the generic interpretation of the coefficients remains the same, but the units (per 10 years instead of per 1 year) changes.This of course assumes that the form of our equation relating the log odds of the outcome to the covariate is correct. In cases where odds of the outcome decreases and increases as a function of the covariate, it is possible to estimate a relatively small effect of the linear covariate, when the outcome may be strongly affected by the covariate, but not in the way the model is specified. Assessing the linearity of the log odds of the outcome and some discretized form of the covariate can be done graphically. For instance, we can break age into 5 groups, and estimate the log odds of 28 day mortality in each group. Plotting these quantities in Figure 1, we can see in this particular case, age is indeed strongly related to the odds of the outcome. Further, expressing age linearly appears like it would be a good approximation. If on the other hand, 28 day mortality has more of a "U"-shaped curve, we may falsely conclude that no relationship between age and mortality exists, when the relationship may be rather strong. Such may be the case when looking at the the log odds of mortality by the first temperature (`temp_1st`) in Figure 1 (right).library(Hmisc); library(grid); library(gridExtra) postscript("FigC1.eps") #tmp <- prop.table(table(cut2(dat$age,g=5), dat$day_28_flg),1) tmp.glm <- glm(day_28_flg ~ cut2(age,g=5),data=dat,family="binomial") tmp <- tmp.glm$coef tmp <- tmp[1] + c(0,tmp[2:5]) names(tmp) <- levels(cut2(dat$age,g=5)) library(ggplot2) se <- sqrt(diag(summary(tmp.glm)$cov.unscaled) + c(0,diag(summary(tmp.glm)$cov.unscaled)[-1]) + 2*c(0,summary(tmp.glm)$cov.unscaled[1,2:5])) limits <- aes(ymax = tmp + se, ymin=tmp - se) plotage <- qplot(names(tmp),tmp) + xlab("Age Group") + ylab("Log Odds of 28 Day Mortality") + geom_errorbar(limits, width=0.12) + theme(axis.text.x = element_text(colour="grey20",size=6,angle=0,hjust=.5,vjust=.5,face="plain")) tmp2.glm <- glm(day_28_flg ~ cut2(temp_1st,g=5),data=dat,family="binomial") tmp2 <- tmp2.glm$coef tmp2 <- tmp2[1] + c(0,tmp2[2:5]) names(tmp2) <- levels(cut2(dat$temp_1st,g=5)) library(ggplot2) se <- sqrt(diag(summary(tmp2.glm)$cov.unscaled) + c(0,diag(summary(tmp2.glm)$cov.unscaled)[-1]) + 2*c(0,summary(tmp2.glm)$cov.unscaled[1,2:5])) limits <- aes(ymax = tmp2 + se, ymin=tmp2 - se) plottemp <- qplot(names(tmp2),tmp2) + xlab("Temperature Group") + ylab("Log Odds of 28 Day Mortality") + geom_errorbar(limits, width=0.12) + theme(axis.text.x = element_text(colour="grey20",size=6,angle=0,hjust=.5,vjust=.5,face="plain")) grid.arrange(plotage, plottemp, nrow=1, ncol=2) dev.off() ```{r echo=FALSE,message=FALSE,warning=FALSE,fig.cap="Plot of log-odds of mortality for each of the five age and temperature groups. Error bars represent 95% confidence intervals for the log odds"} tmp.glm <- glm(day_28_flg ~ cut2(age,g=5),data=dat,family="binomial") tmp <- tmp.glm$coef tmp <- tmp[1] + c(0,tmp[2:5]) names(tmp) <- levels(cut2(dat$age,g=5)) library(ggplot2) se <- sqrt(diag(summary(tmp.glm)$cov.unscaled) + c(0,diag(summary(tmp.glm)$cov.unscaled)[-1]) + 2*c(0,summary(tmp.glm)$cov.unscaled[1,2:5])) limits <- aes(ymax = tmp + se, ymin=tmp - se) plotage <- qplot(names(tmp),tmp) + xlab("Age Group") + ylab("Log Odds of 28 Day Mortality") + geom_errorbar(limits, width=0.12) + theme(axis.text.x = element_text(colour="grey20",size=6,angle=0,hjust=.5,vjust=.5,face="plain")) tmp2.glm <- glm(day_28_flg ~ cut2(temp_1st,g=5),data=dat,family="binomial") tmp2 <- tmp2.glm$coef tmp2 <- tmp2[1] + c(0,tmp2[2:5]) names(tmp2) <- levels(cut2(dat$temp_1st,g=5)) library(ggplot2) se <- sqrt(diag(summary(tmp2.glm)$cov.unscaled) + c(0,diag(summary(tmp2.glm)$cov.unscaled)[-1]) + 2*c(0,summary(tmp2.glm)$cov.unscaled[1,2:5])) limits <- aes(ymax = tmp2 + se, ymin=tmp2 - se) plottemp <- qplot(names(tmp2),tmp2) + xlab("Temperature Group") + ylab("Log Odds of 28 Day Mortality") + geom_errorbar(limits, width=0.12) + theme(axis.text.x = element_text(colour="grey20",size=6,angle=0,hjust=.5,vjust=.5,face="plain")) grid.arrange(plotage, plottemp, nrow=1, ncol=2)Hypothesis Testing and Model SelectionJust as in the case for linear regression, there is a way to test hypotheses for logistic regression. It follows much of the same framework, with the null hypothesis being $\beta=0$. If you recall, this is the log odds ratio, and testing if it is zero is equivalent to a test for the odds ratio being equal to one. Particularly when dealing with a single categorical covariate, there are techniques taught in introductory statistics courses which can be applied here (see `?fisher.test` and `?chisq.test`). In this chapter, we focus on how to conduct such a test in `R`.As was the case when using `lm`, we first fit the two competing models, a larger (alternative model), and a smaller (null model). Provided that the models are nested, we can again use the `anova` function, passing the smaller model, then the larger model. Here our larger model is the one which contained `service_unit` and `age.cat`, and the smaller only contains `age.cat`, so they are nested. We are then testing if the log odds ratios for the two coefficients associated with `service_unit` are zero. Let's call these coefficients $\beta_{MICU}$ and $\beta_{SICU}$. To test if $\beta_{MICU}$ and $\beta_{SICU} = 0$, we can use the `anova` function, where this time we will specify the type of test, in this case set the `test` parameter to `"Chisq"`.\singlespacinganova(age.glm,ageunit.glm,test="Chisq")\doublespacingHere the output of the `anova` function when applied to `glm` objects looks similar to the output generated when used on `lm` objects. A couple good practices to get in a habit are to first make sure the two competing models are correctly specified. He we are are testing `~ age.cat` versus `age.cat + service_unit`. Next, the difference between the residual degrees of freedom (`Resid. Df`) in the two models tell us how many more parameters the larger model has when compared to the smaller model. Here we see `1774 - 1772 = 2` which means that there are two more coefficients estimated in the larger model than the smaller one, which corresponds with the output from the `summary` table above. Next looking at the p-value (`Pr(>Chi)`), we see a test for $\beta_{MICU}$ and $\beta_{SICU} = 0$ has a p-value of around 0.08. At the typical 0.05 significance level, we would not reject the null, and use the simpler model without the service unit. In logistic regression, this is a common way of testing whether a categorical covariate should be retained in the model, as it can be difficult to assess using the `z value` in the `summary` table, particularly when one is very statistically significant, and one is not. Confidence IntervalsGenerating confidence intervals for either the log-odds ratios or the odds ratios are relatively straightforward. To get the log-odds ratios and respective confidence intervals for the `ageunit.glm` model which includes both age and service unit.\singlespacingageunit.glm$coef confint(ageunit.glm)\doublespacingHere the coefficient estimates and confidence intervals are presented in much the same way as for a linear regression. In logistic regression, it is often convenient to exponentiate these quantities to get it on a more interpretable scale.\singlespacingexp(ageunit.glm$coef[-1]) exp(confint(ageunit.glm)[-1,])\doublespacingSimilar to linear regression, we will look at if the confidence intervals for the log odds ratios include zero. This is equivalent to seeing if the intervals for the odds ratios include 1. Since the odds ratios are more directly interpretable it is often more convenient to report them instead of the coefficients on the log odds ratio scale. PredictionOnce you have decided on your final model, you may want to generate predictions from your model. Such a task may occur when doing a propensity score analysis (Chapter 3.9) or creating tools for clinical decision support. In the logistic regression setting this involves attempting to estimating the probability of the outcome given the characteristics (covariates) of a patient. This quantity is often denoted $P(outcome | X)$. This is relatively easy to accomplish in `R` using the `predict` function. One must pass a dataset with all the variables contained in the model. Let's assume that we decided to include the `service_unit` in our final model, and want to generate predictions from this based on a new set of patients. Let's first create a new data frame called `newdat` using the `expand.grid` function which computes all combinations of the values of variables passed to it.\singlespacingnewdat <- expand.grid(age.cat=c("<=55",">55"),service_unit=c("FICU","MICU","SICU")) newdat$pred <- predict(ageunit.glm,newdata=newdat,type="response") newdatPrint Numbers from 0 to 100 (as integers) Without Using Any Numbers in your Codeimport string num_alpha = ["zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"] print(num_alpha) print(string.digits) print(type(string.digits)) dict_num = {num_alpha[i] : n for i, n in enumerate(string.digits)} print(dict_num) one_o_one = dict_num['one'] + dict_num['zero'] + dict_num['one'] print(one_o_one) print(type(one_o_one)) for n in range(int(one_o_one)): print(n, '\t', type(n))0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 [...]It is a study note over http://interactivepython.org/runestone/static/pythonds/index.html by , which is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License. Searching Shared and adapted by: YinTaiChen In Python, there is a very easy way to ask whether an item is in a list of items. We use the __in__ operator.15 in [3, 5, 2, 4, 1] 3 in [3, 5, 2, 4, 1]The Sequential Search Starting from the first item in the list, we simply move from item to item, following the underlying sequential order until we either find what we are looking for or run out of items.def sequentialSearch(alist, item): pos = 0 found = False while pos < len(alist) and not found: if alist[pos] == item: found = True else: pos = pos+1 return found testlist = [1, 2, 32, 8, 17, 19, 42, 13, 0] print(sequentialSearch(testlist, 3)) print(sequentialSearch(testlist, 13))TrueAnalysis of Sequential Search | __Case__ | __Best Case__ | __Worst Case__ | __Average Case__ ||----------|---------------|----------------|------------------|| item is present | 1 | n | n/2 || item is not present | n | n | n | What if the list is ordered? In some cases, the algorithm does not have to continue looking through all of the items to report that the item was not found.def orderedSequentialSearch(alist, item): pos = 0 found = False stop = False while pos < len(alist) and not found and not stop: if alist[pos] == item: found = True else: if alist[pos] > item: stop = True else: pos = pos+1 return found testlist = [0, 1, 2, 8, 13, 17, 19, 32, 42] print(orderedSequentialSearch(testlist, 3)) print(orderedSequentialSearch(testlist, 13))TrueThe Binary Search Instead of searching the list in sequence, a __binary search__ will start by examing the middle term. If it is not the correct item, we can use the ordered nature of the list to eliminate half of the remaining items.def binarySearch(alist, item): first = 0 last = len(alist)-1 found = False while first<=last and not found: midpoint = (first + last) // 2 if alist[midpoint] == item: found = True else: if item < alist[midpoint]: last = midpoint-1 else: first = midpoint+1 return found testlist = [0, 1, 2, 8, 13, 17, 19, 32, 42] print(binarySearch(testlist, 3)) print(binarySearch(testlist, 13))TrueThis algorithm is a great example of a divide and conquer strategy. Recursive Version of Binary Searchdef r_binarySearch(alist, item): if len(alist) == 0: return False else: midpoint = len(alist) // 2 if alist[midpoint]==item: return True else: if item < alist[midpoint]: return r_binarySearch(alist[:midpoint], item) else: return r_binarySearch(alist[:midpoint], item) testlist = [0, 1, 2, 8, 13, 17, 19, 32, 42] print(r_binarySearch(testlist, 3)) print(r_binarySearch(testlist, 13))TruePrepare the dataWe first need to load both our league-data and EMA data and combine these together.league_data = pd.read_csv('data/league_data.csv') league_data.drop(['Unnamed: 0', 'Date', 'HomeTeam', 'AwayTeam', 'HTGS', 'ATGS', 'HTGC', 'ATGC', 'HM1', 'HM2', 'HM3', 'HM4', 'HM5', 'AM1', 'AM2', 'AM3', 'AM4', 'AM5', 'HTFormPts', 'ATFormPts', 'MW', 'HTFormPtsStr', 'ATFormPtsStr'], 1, inplace=True) league_data.columns EMA_data = pd.read_csv('data/EMA_data.csv') EMA_data.drop(['Unnamed: 0', 'f_DateHome', 'f_seasonHome', 'HomeTeam', 'homeGame_x', 'f_yellowsAgainstAway', 'f_yellowsForAway', 'f_yellowsAgainstHome', 'f_yellowsForHome', 'f_DateAway', 'f_seasonAway', 'AwayTeam', 'homeGame_y'], 1, inplace=True) EMA_data.columns df = pd.merge(EMA_data, league_data, left_on='gameId', right_index=True) df.head()Cleaning and splitting the dataBecause the machine -learning model only takes numeric input we will change our labels from strings to integers and use categorical cross-entropy as our loss function. We will also scale our data using sklearn StandardScaler.First we will seperate the labels from the rest of our data and split into training and testing.training_data = df.loc[df['season'] != 1920].reset_index(drop=True) testing_data = df.loc[df['season'] == 1920].reset_index(drop=True) X = training_data.drop(['gameId', 'gameId_x', 'gameId_y', 'FTR', 'season', 'gameId_y', ], 1) Y = training_data['FTR'] X_test = testing_data.drop(['gameId', 'gameId_x', 'gameId_y', 'FTR', 'season', 'gameId_y', ], 1) y_test = testing_data['FTR'] def transform_results(results): transformed = [] for i in range(len(results)): if results[i] == 'H': transformed.append(0) elif results[i] == 'A': transformed.append(2) else: transformed.append(1) return np.array(transformed) Y = transform_results(Y) y_test = transform_results(y_test) scaler = StandardScaler() X = scaler.fit_transform(X) X_test = scaler.fit_transform(X_test) print('Number of matches in training data:', X.shape[0]) print('Number of matches in test data:', X_test.shape[0]) print('Number of features:', X.shape[1]) # Split our data into train/validation. We are using the # most recent full season (18/19) for validation data. X_train = X[:-380] y_train = Y[:-380] X_val = X[-380:] y_val = Y[-380:]Build the modelNow we have cleaned the data we can now create our model and train it.# input dimension is number of features input_dim = X_train.shape[1] activation_func = 'relu' kernel_init = 'glorot_normal' learning_rate = 0.001 batch_size = 16 model = keras.Sequential([ Dense(48, input_shape=(input_dim,), activation=activation_func), Dropout(0.3), Dense(16), Dropout(0.2), Dense(3, activation='softmax') ]) es = EarlyStopping(monitor='loss', patience=3, verbose=1) from keras.optimizers import Adam, SGD opt = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(X_train, y_train, batch_size=batch_size, callbacks=[es], epochs=500, verbose=0) train_loss, train_acc = model.evaluate(X_train, y_train) val_loss, val_acc = model.evaluate(X_val, y_val) print('Training loss:', train_loss) print('Training accuracy:', train_acc) print('Validation loss:', val_loss) print('Validation accuracy:', val_acc)4123/4123 [==============================] - 0s 18us/step 380/380 [==============================] - 0s 15us/step Training loss: 0.7936528218680148 Training accuracy: 0.6480717658996582 Validation loss: 0.8073749272446883 Validation accuracy: 0.6184210777282715We are getting around 65% on training and 62% on validation. This is good as is well above the bookies accuracy. Once we are happy with how the model is performing we can check the accuracy on the test set. First let's get a more detailed breakdown of the model's accuracy by using classification report. This will show how we perform for each label.labels = ['Home', 'Draw', 'Away'] y_preds = model.predict(X_val) y_pred_argmax = [np.argmax(i) for i in y_preds] print(classification_report(y_val, y_pred_argmax, target_names=labels)) print(confusion_matrix(y_val, y_pred_argmax)) # Saving best model # model.save('25Nov19.h5')Check test accuracyOnce we have our model performing well we will use our 'holdout' test-set, season 19/20 so fartest_loss, test_acc = model.evaluate(X_test, y_test) print('Test loss:', test_loss) print('Test accuracy:', test_acc) y_preds = model.predict(X_test) y_pred_argmax = [np.argmax(i) for i in y_preds] print(classification_report(y_test, y_pred_argmax))precision recall f1-score support 0 0.67 0.91 0.77 54 1 0.45 0.15 0.22 34 2 0.70 0.78 0.74 40 accuracy 0.66 128 macro avg 0.61 0.61 0.58 128 weighted avg 0.62 0.66 0.62 128Create a betting strategyNow that we have an accurate model let's see if we can make it as profitable as possible. Our confusion matrix looks good. I mainly wanted to check the performance for draws as these are difficult to predict, the results are ok.We will now use the for-loop below to see how much we would have won had we bet.funds = 100 wager = 10 favourites = 0 no_bets = 0 min_diff = 0.03 y_preds = model.predict(X_test) for i in range(len(X_test)): prediction = np.argmax(y_preds[i]) print('\nPrediction', prediction) print('Actual', y_test[i]) print('Favourite', np.argmin([testing_data['B365H'][i], testing_data['B365D'][i], testing_data['B365A'][i]])) print('Prediction proba', y_preds[i]) print('Home, Draw and Away odds', testing_data['B365H'][i], testing_data['B365D'][i], testing_data['B365A'][i]) if prediction == 0: odds_diff = y_preds[i][prediction] - (1/testing_data['B365H'][i]) if odds_diff > min_diff: if prediction == np.argmin([testing_data['B365H'][i], testing_data['B365D'][i], testing_data['B365A'][i]]): favourites +=1 if prediction == y_test[i]: funds += (wager * testing_data['B365H'][i]) - wager else: funds -= wager else: no_bets +=1 elif prediction == 1: odds_diff = y_preds[i][prediction] - (1/testing_data['B365D'][i]) if odds_diff > min_diff: if prediction == np.argmin([testing_data['B365H'][i], testing_data['B365D'][i], testing_data['B365A'][i]]): favourites +=1 if prediction == y_test[i]: funds +=( wager * testing_data['B365D'][i]) - wager else: funds -= wager else: no_bets +=1 else: odds_diff = y_preds[i][prediction] - (1/testing_data['B365A'][i]) if odds_diff > min_diff: if prediction == np.argmin([testing_data['B365H'][i], testing_data['B365D'][i], testing_data['B365A'][i]]): favourites +=1 if prediction == y_test[i]: funds += (wager * testing_data['B365A'][i]) - wager else: funds -= wager else: no_bets +=1 print('Funds', funds) print(f'Betted on favourite {favourites} times out of {len(X_test)} matches.') print(f'No bet placed {no_bets} times')Prediction 0 Actual 0 Favourite 0 Prediction proba [0.6406146 0.2976048 0.06178055] Home, Draw and Away odds 1.14 10.0 19.0 Funds 100 Prediction 2 Actual 2 Favourite 2 Prediction proba [0.1498708 0.09711154 0.75301766] Home, Draw and Away odds 12.0 6.5 1.22 Funds 100 Prediction 2 Actual 1 Favourite 0 Prediction proba [0.24385048 0.229821 0.5263285 ] Home, Draw and Away odds 1.95 3.6 3.6 Funds 90 Prediction 2 Actual 1 Favourite 2 Prediction proba [0.23566064 0.30126706 0.46307233] Home, Draw and Away odds 3.0 3.25 2.37 Funds 80 Prediction 2 Actual 2 Favourite 0 Prediction proba [0.26342666 0.29570556 0.44086778] Home, Draw and Away odds 1.9 3.4 4.0 Funds 110.0 Prediction 0 Actual 0 Favourite 0 Prediction proba [0.36956793 0.30569366 0.32473844] Home, Draw and Away odds 1.3 5.25 10.0 Funds 110.0 Prediction 2 Actual 1 Favourite 0 Prediction proba [0.21831408 0.30487978 0.4768061 ] Home, Draw and Away odds 2.2 3.2 3.4 Funds 100.0 Prediction 2 Actual 2 Favourite 2 Prediction pr[...]ConvNet Let's get the data and training interface from where we left in the last notebook.x_train, y_train, x_valid, y_valid = get_data() x_train, x_valid = normalize_to(x_train, x_valid) train_ds, valid_ds = Dataset(x_train, y_train), Dataset(x_valid, y_valid) nh, bs = 50, 512 c = y_train.max().item() + 1 loss_func = F.cross_entropy data = DataBunch(*get_dls(train_ds, valid_ds, bs), c) data mnist_view = view_tfm(1,28,28) cbfs = [Recorder, partial(AvgStatsCallback,accuracy), CudaCallback, partial(BatchTransformXCallback, mnist_view)] nfs = [8,16,32,64,64] learn,run = get_learn_run(nfs, data, 0.4, conv_layer, cbs=cbfs) %time run.fit(2, learn)train: [1.28051703125, tensor(0.5900, device='cuda:0')] valid: [0.2614665283203125, tensor(0.9187, device='cuda:0')] train: [0.25912669921875, tensor(0.9218, device='cuda:0')] valid: [0.11952723388671875, tensor(0.9622, device='cuda:0')] CPU times: user 3.84 s, sys: 911 ms, total: 4.75 s Wall time: 5.15 sBatchnorm Custom Let's start by building our own `BatchNorm` layer from scratch. i still can not figure out how to calculate the variance of chosen axis for a high dimensional tensor# calculate high dimensional mean? think this as a bs=2, channel=2, pic size = 3x3 # batch normal to 2 channels, we are looking for the mean and variance of each channel x = tensor([[[[1.,1., 1.], [1.,1., 2.]], [[1.,1.,3.], [1.,1., 4.]]], [[[2.,2., 2.], [2.,2., 3.]], [[2.,2., 4.], [2.,2., 5.]]]]) x.shape x.mean() x.mean((0,2,3), keepdim=True).shape # x.mean((1,2,3), keepdim=True).shape x.mean((0,2,3), keepdim=True) # x.mean((1,2,3), keepdim=True) a = np.array([[1.,1., 1.], [1.,1., 2.], [2.,2., 2.], [2.,2., 3.]]) a a.flatten().mean() a.flatten().var() x.view(1,2,2, -1) # (x.view(2,-1,1,1)).var(dim=1, keepdim=True) x.numpy().var((0,2,3)) x.var(dim=1).shape torch.var(x.view(1, 2, -1, 1), dim=2, keepdim=True).shape x.view(-1, 2, 1, 1) x[0][0] x[0][1] torch.var?? # torch.lerp?? start = torch.arange(1., 5.) end = torch.empty(4).fill_(10) torch.lerp(start, end, 0.5) start.lerp(end, 0.5) start # trailing underscore means assign the function results to the variable start.lerp_(end, 0.5) start$$\text{out}_i = \text{start}_i + \text{weight} \times (\text{end}_i - \text{start}_i)$$ register_buffer: will saved with the modelclass BatchNorm(nn.Module): def __init__(self, nf, mom=0.1, eps=1e-5): super().__init__() # NB: pytorch bn mom is opposite of what you'd expect self.mom, self.eps = mom, eps self.mults = nn.Parameter(torch.ones(nf, 1, 1)) self.adds = nn.Parameter(torch.zeros(nf, 1, 1)) self.register_buffer("vars", torch.ones(1, nf, 1, 1)) self.register_buffer("means", torch.zeros(1, nf, 1, 1)) def update_stats(self, x): m = x.mean((0, 2, 3), keepdim=True) # torch version problem, use this work around # v = x.var((0, 2, 3), keepdim=True) v = (x.view(1, x.shape[1], 1, -1,)).var(dim=3, keepdim=True) self.means.lerp_(m, self.mom) self.vars.lerp_(v, self.mom) return m, v def forward(self, x): if self.training: with torch.no_grad(): m, v = self.update_stats(x) else: m, v = self.means, self.vars x = (x - m) / (v + self.eps).sqrt() return x * self.mults + self.adds 3//2; 5 // 2 def conv_layer(ni, nf, ks=3, stride=2, bn=True, **kwargs): # No bias needed if using bn layers = [ nn.Conv2d(ni, nf, ks, padding=ks // 2, stride=stride, bias=not bn), GeneralRelu(**kwargs), ] if bn: layers.append(BatchNorm(nf)) return nn.Sequential(*layers) #export def init_cnn_(m, f): if isinstance(m, nn.Conv2d): f(m.weight, a=0.1) if getattr(m, 'bias', None) is not None: m.bias.data.zero_() for l in m.children(): init_cnn_(l, f) def init_cnn(m, uniform=False): f = init.kaiming_uniform_ if uniform else init.kaiming_normal_ init_cnn_(m, f) def get_learn_run(nfs, data, lr, layer, cbs=None, opt_func=None, uniform=False, **kwargs): model = get_cnn_model(data, nfs, layer, **kwargs) init_cnn(model, uniform=uniform) return get_runner(model, data, lr=lr, cbs=cbs, opt_func=opt_func)We can then use it in training and see how it helps keep the activations means to 0 and the std to 1.learn,run = get_learn_run(nfs, data, 0.9, conv_layer, cbs=cbfs) with Hooks(learn.model, append_stats) as hooks: run.fit(1, learn) fig,(ax0,ax1) = plt.subplots(1,2, figsize=(10,4)) for h in hooks[:-1]: ms,ss = h.stats ax0.plot(ms[:10]) ax1.plot(ss[:10]) plt.legend(range(6)); fig,(ax0,ax1) = plt.subplots(1,2, figsize=(10,4)) for h in hooks[:-1]: ms,ss = h.stats ax0.plot(ms) ax1.plot(ss) learn,run = get_learn_run(nfs, data, 1.0, conv_layer, cbs=cbfs) %time run.fit(3, learn)train: [0.36266734375, tensor(0.8851, device='cuda:0')] valid: [0.1511704833984375, tensor(0.9550, device='cuda:0')] train: [0.1059038671875, tensor(0.9670, device='cuda:0')] valid: [0.09263137817382812, tensor(0.9700, device='cuda:0')] train: [0.075050380859375, tensor(0.9768, device='cuda:0')] valid: [0.11988330078125, tensor(0.9619, device='cuda:0')] CPU times: user 3.35 s, sys: 86.8 ms, total: 3.43 s Wall time: 3.42 sBuiltin batchnorm#export def conv_layer(ni, nf, ks=3, stride=2, bn=True, **kwargs): layers = [nn.Conv2d(ni, nf, ks, padding=ks//2, stride=stride, bias=not bn), GeneralRelu(**kwargs)] if bn: layers.append(nn.BatchNorm2d(nf, eps=1e-5, momentum=0.1)) return nn.Sequential(*layers) learn,run = get_learn_run(nfs, data, 1., conv_layer, cbs=cbfs) %time run.fit(3, learn)train: [0.28877712890625, tensor(0.9121, device='cuda:0')] valid: [0.1067998046875, tensor(0.9680, device='cuda:0')] train: [0.0775843115234375, tensor(0.9765, device='cuda:0')] valid: [0.07021278076171875, tensor(0.9800, device='cuda:0')] train: [0.053029921875, tensor(0.9840, device='cuda:0')] valid: [0.06344912109375, tensor(0.9817, device='cuda:0')] CPU times: user 5.67 s, sys: 915 ms, total: 6.59 s Wall time: 8.88 sWith scheduler Now let's add the usual warm-up/annealing.sched = combine_scheds([0.3, 0.7], [sched_lin(0.6, 2.), sched_lin(2., 0.1)]) learn,run = get_learn_run(nfs, data, 0.9, conv_layer, cbs=cbfs +[partial(ParamScheduler,'lr', sched)]) run.fit(8, learn)train: [0.32286591796875, tensor(0.9060, device='cuda:0')] valid: [0.1767323486328125, tensor(0.9450, device='cuda:0')] train: [0.09884525390625, tensor(0.9682, device='cuda:0')] valid: [0.17165784912109375, tensor(0.9475, device='cuda:0')] train: [0.0699253076171875, tensor(0.9774, device='cuda:0')] valid: [0.06874525756835938, tensor(0.9787, device='cuda:0')] train: [0.042292138671875, tensor(0.9869, device='cuda:0')] valid: [0.06281668090820312, tensor(0.9808, device='cuda:0')] train: [0.02978388671875, tensor(0.9907, device='cuda:0')] valid: [0.061725439453125, tensor(0.9817, device='cuda:0')] train: [0.01906186279296875, tensor(0.9946, device='cuda:0')] valid: [0.0569135498046875, tensor(0.9834, device='cuda:0')] train: [0.01300778564453125, tensor(0.9967, device='cuda:0')] valid: [0.048958990478515624, tensor(0.9857, device='cuda:0')] train: [0.009002083129882812, tensor(0.9983, device='cuda:0')] valid: [0.04764816589355469, tensor(0.9860, device='cuda:0')]More norms Layer norm From [the paper](https://arxiv.org/abs/1607.06450): "*batch normalization cannot be applied to online learning tasks or to extremely large distributed models where the minibatches have to be small*". General equation for a norm layer with learnable affine:$$y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta$$The difference with BatchNorm is1. we don't keep a moving average2. we don't average over the batches dimension but over the hidden dimension, so it's independent of the batch sizeclass LayerNorm(nn.Module): __constants__ = ['eps'] def __init__(self, eps=1e-5): super().__init__() self.eps = eps self.mult = nn.Parameter(tensor(1.)) self.add = nn.Parameter(tensor(0.)) def forward(self, x): m = x.mean((1,2,3), keepdim=True) v = x.var ((1,2,3), keepdim=True) x = (x-m) / ((v+self.eps).sqrt()) return x*self.mult + self.add def conv_ln(ni, nf, ks=3, stride=2, bn=True, **kwargs): layers = [nn.Conv2d(ni, nf, ks, padding=ks//2, stride=stride, bias=True), GeneralRelu(**kwargs)] if bn: layers.append(LayerNorm()) return nn.Sequential(*layers) learn,run = get_learn_run(nfs, data, 0.8, conv_ln, cbs=cbfs) %time run.fit(3, learn)train: [nan, tensor(0.1184, device='cuda:0')] valid: [nan, tensor(0.0991, device='cuda:0')] train: [nan, tensor(0.0986, device='cuda:0')] valid: [nan, tensor(0.0991, device='cuda:0')] train: [nan, tensor(0.0986, device='cuda:0')] valid: [nan, tensor(0.0991, device='cuda:0')] CPU times: user 9.16 s, sys: 1.82 s, total: 11 s Wall time: 14.3 s*Thought experiment*: can this distinguish foggy days from sunny days (assuming you're using it before the first conv)? Instance norm From [the paper](https://arxiv.org/abs/1607.08022): The key difference between **contrast** and batch normalization is that the latter applies the normalization to a whole batch of images instead for single ones:\begin{equation}\label{eq:bnorm} y_{tijk} = \frac{x_{tijk} - \mu_{i}}{\sqrt{\sigma_i^2 + \epsilon}}, \quad \mu_i = \frac{1}{HWT}\sum_{t=1}^T\sum_{l=1}^W \sum_{m=1}^H x_{tilm}, \quad \sigma_i^2 = \frac{1}{HWT}\sum_{t=1}^T\sum_{l=1}^W \sum_{m=1}^H (x_{tilm} - mu_i)^2.\end{equation}In order to combine the effects of instance-specific normalization and batch normalization, we propose to replace the latter by the *instance normalization* (also known as *contrast normalization*) layer:\begin{equation}\label{eq:inorm} y_{tijk} = \frac{x_{tijk} - \mu_{ti}}{\sqrt{\sigma_{ti}^2 + \epsilon}}, \quad \mu_{ti} = \frac{1}{HW}\sum_{l=1}^W \sum_{m=1}^H x_{tilm}, \quad \sigma_{ti}^2 = \frac{1}{HW}\sum_{l=1}^W \sum_{m=1}^H (x_{tilm} - mu_{ti})^2.\end{equation}class InstanceNorm(nn.Module): __constants__ = ['eps'] def __init__(self, nf, eps=1e-0): super().__init__() self.eps = eps self.mults = nn.Parameter(torch.ones (nf,1,1)) self.adds = nn.Parameter(torch.zeros(nf,1,1)) def forward(self, x): m = x.mean((2,3), keepdim=True) v = x.var ((2,3), keepdim=True) res = (x-m) / ((v+self.eps).sqrt()) return res*self.mults + self.adds def conv_in(ni, nf, ks=3, stride=2, bn=True, **kwargs): layers = [nn.Conv2d(ni, nf, ks, padding=ks//2, stride=stride, bias=True), GeneralRelu(**kwargs)] if bn: layers.append(InstanceNorm(nf)) return nn.Sequential(*layers) learn,run = get_learn_run(nfs, data, 0.1, conv_in, cbs=cbfs) %time run.fit(3, learn)train: [nan, tensor(0.0986, device='cuda:0')] valid: [nan, tensor(0.0991, device='cuda:0')] train: [nan, tensor(0.0986, device='cuda:0')] valid: [nan, tensor(0.0991, device='cuda:0')] train: [nan, tensor(0.0986, device='cuda:0')] valid: [nan, tensor(0.0991, device='cuda:0')] CPU times: user 8.74 s, sys: 1.6 s, total: 10.3 s Wall time: 13.6 s*Question*: why can't this classify anything? Lost in all those norms? The authors from the [group norm paper](https://arxiv.org/pdf/1803.08494.pdf) have you covered:![Various norms](images/norms.png) Group norm *From the PyTorch docs:* `GroupNorm(num_groups, num_channels, eps=1e-5, affine=True)`The input channels are separated into `num_groups` groups, each containing``num_channels / num_groups`` channels. The mean and standard-deviation are calculatedseparately over the each group. $\gamma$ and $\beta$ are learnableper-channel affine transform parameter vectors of size `num_channels` if`affine` is `True`.This layer uses statistics computed from input data in both training andevaluation modes.Args:- `num_groups (int)`: number of groups to separate the channels into- `num_channels (int)`: number of channels expected in input- `eps`: a value added to the denominator for numerical stability. Default: `1e-5`- `affine`: a boolean value that when set to ``True``, this module has learnable per-channel affine parameters initialized to ones (for weights) and zeros (for biases). Default: ``True``.Shape:- Input: `(N, num_channels, *)`- Output: `(N, num_channels, *)` (same shape as input)Examples:: >>> input = torch.randn(20, 6, 10, 10) >>> Separate 6 channels into 3 groups >>> m = nn.GroupNorm(3, 6) >>> Separate 6 channels into 6 groups (equivalent with InstanceNorm) >>> m = nn.GroupNorm(6, 6) >>> Put all 6 channels into a single group (equivalent with LayerNorm) >>> m = nn.GroupNorm(1, 6) >>> Activating the module >>> output = m(input) Fix small batch sizes What's the problem? When we compute the statistics (mean and std) for a BatchNorm Layer on a small batch, it is possible that we get a standard deviation very close to 0. because there aren't many samples (the variance of one thing is 0. since it's equal to its mean).data = DataBunch(*get_dls(train_ds, valid_ds, 2), c) def conv_layer(ni, nf, ks=3, stride=2, bn=True, **kwargs): layers = [nn.Conv2d(ni, nf, ks, padding=ks//2, stride=stride, bias=not bn), GeneralRelu(**kwargs)] if bn: layers.append(nn.BatchNorm2d(nf, eps=1e-5, momentum=0.1)) return nn.Sequential(*layers) learn,run = get_learn_run(nfs, data, 0.4, conv_layer, cbs=cbfs) %time run.fit(1, learn)train: [2.33641984375, tensor(0.1839, device='cuda:0')] valid: [3050.7212, tensor(0.2348, device='cuda:0')] CPU times: user 1min 45s, sys: 8.65 s, total: 1min 54s Wall time: 1min 54sRunning Batch Norm To solve this problem we introduce a Running BatchNorm that uses smoother running mean and variance for the mean and std.class RunningBatchNorm(nn.Module): def __init__(self, nf, mom=0.1, eps=1e-5): super().__init__() self.mom,self.eps = mom,eps self.mults = nn.Parameter(torch.ones (nf,1,1)) self.adds = nn.Parameter(torch.zeros(nf,1,1)) self.register_buffer('sums', torch.zeros(1,nf,1,1)) self.register_buffer('sqrs', torch.zeros(1,nf,1,1)) self.register_buffer('batch', tensor(0.)) self.register_buffer('count', tensor(0.)) self.register_buffer('step', tensor(0.)) self.register_buffer('dbias', tensor(0.)) def update_stats(self, x): bs,nc,*_ = x.shape self.sums.detach_() self.sqrs.detach_() dims = (0,2,3) s = x.sum(dims, keepdim=True) ss = (x*x).sum(dims, keepdim=True) c = self.count.new_tensor(x.numel()/nc) mom1 = 1 - (1-self.mom)/math.sqrt(bs-1) self.mom1 = self.dbias.new_tensor(mom1) self.sums.lerp_(s, self.mom1) self.sqrs.lerp_(ss, self.mom1) self.count.lerp_(c, self.mom1) self.dbias = self.dbias*(1-self.mom1) + self.mom1 self.batch += bs self.step += 1 def forward(self, x): if self.training: self.update_stats(x) sums = self.sums sqrs = self.sqrs c = self.count if self.step<100: sums = sums / self.dbias sqrs = sqrs / self.dbias c = c / self.dbias means = sums/c vars = (sqrs/c).sub_(means*means) if bool(self.batch < 20): vars.clamp_min_(0.01) x = (x-means).div_((vars.add_(self.eps)).sqrt()) return x.mul_(self.mults).add_(self.adds)NB: the calculation of `self.dbias` in the version in the lesson video was incorrect. The correct version is in the cell above. Also, we changed how we calculated `self.mom1` to something that it more mathematically appropriate. These two changes improved the accuracy from 91% (in the video) to 97%+ (shown below)!def conv_rbn(ni, nf, ks=3, stride=2, bn=True, **kwargs): layers = [nn.Conv2d(ni, nf, ks, padding=ks//2, stride=stride, bias=not bn), GeneralRelu(**kwargs)] if bn: layers.append(RunningBatchNorm(nf)) return nn.Sequential(*layers) learn,run = get_learn_run(nfs, data, 0.4, conv_rbn, cbs=cbfs) %time run.fit(1, learn)train: [0.3415529296875, tensor(0.9098, device='cuda:0')] valid: [0.13382496337890626, tensor(0.9723, device='cuda:0')] CPU times: user 4min 32s, sys: 24 s, total: 4min 56s Wall time: 4min 56sThis solves the small batch size issue! What can we do in a single epoch? Now let's see with a decent batch size what result we can get.data = DataBunch(*get_dls(train_ds, valid_ds, 32), c) learn,run = get_learn_run(nfs, data, 0.8, conv_rbn, cbs=cbfs +[partial(ParamScheduler,'lr', sched_lin(1., 0.2))]) %time run.fit(1, learn)train: [0.155741728515625, tensor(0.9518, device='cuda:0')] valid: [0.10425401611328125, tensor(0.9705, device='cuda:0')] CPU times: user 17.5 s, sys: 1.6 s, total: 19.1 s Wall time: 19.1 sSimplified RunningBatchNorm It turns out we don't actually need to debias - because, for instance, dividing a debiased sum by a debiased count is the same as dividing a *biased* sum by a *biased* count! So we can remove all the debiasing stuff and end up with a simpler class. Also, we should save `eps` as a buffer since it impacts the calculation. (Thanks to for noticing these.) Also we can slightly change the final calculation in `forward` with one that uses `factor` and `offset` to reduce the amount of broadcasting required. (Thanks to for this suggestion.)#export class RunningBatchNorm(nn.Module): def __init__(self, nf, mom=0.1, eps=1e-5): super().__init__() self.mom, self.eps = mom, eps self.mults = nn.Parameter(torch.ones (nf,1,1)) self.adds = nn.Parameter(torch.zeros(nf,1,1)) self.register_buffer('sums', torch.zeros(1,nf,1,1)) self.register_buffer('sqrs', torch.zeros(1,nf,1,1)) self.register_buffer('count', tensor(0.)) self.register_buffer('factor', tensor(0.)) self.register_buffer('offset', tensor(0.)) self.batch = 0 def update_stats(self, x): bs,nc,*_ = x.shape self.sums.detach_() self.sqrs.detach_() dims = (0,2,3) s = x .sum(dims, keepdim=True) ss = (x*x).sum(dims, keepdim=True) c = s.new_tensor(x.numel()/nc) mom1 = s.new_tensor(1 - (1-self.mom)/math.sqrt(bs-1)) self.sums .lerp_(s , mom1) self.sqrs .lerp_(ss, mom1) self.count.lerp_(c , mom1) self.batch += bs means = self.sums/self.count varns = (self.sqrs/self.count).sub_(means*means) if bool(self.batch < 20): varns.clamp_min_(0.01) self.factor = self.mults / (varns+self.eps).sqrt() self.offset = self.adds - means*self.factor def forward(self, x): if self.training: self.update_stats(x) return x*self.factor + self.offset learn,run = get_learn_run(nfs, data, 0.8, conv_rbn, cbs=cbfs +[partial(ParamScheduler,'lr', sched_lin(1., 0.2))]) %time run.fit(1, learn)train: [0.15630783203125, tensor(0.9518, device='cuda:0')] valid: [0.07094555053710938, tensor(0.9787, device='cuda:0')] CPU times: user 15.6 s, sys: 1.57 s, total: 17.1 s Wall time: 17.2 sExportnb_auto_export()MLR Predictive Model SelectionSo now we know how to build a wide array of linear regression models. For instance if our data set contained $n$ observations of $m$ features we could build $2^m$ models without even considering interactions, polynomials, or other nonlinear transformations of the data. That's a lot of models to choose from, so in this notebook we'll introduce how you might go about selecting the best multiple linear regression model. What We'll Accomplish in This NotebookIn this notebook we'll:- Have a discussion of generalization error in predictive models- Introduce the concept of cross-validation- Use best subset selection for the Advertising data set- Practice building the best predictive model, using the tools we've learned so farLet's go!# import the packages we'll use ## For data handling import pandas as pd import numpy as np # We'll use this later from numpy import meshgrid ## For plotting import matplotlib.pyplot as plt import seaborn as sns ## This sets the plot style ## to have a grid on a white background sns.set_style("whitegrid")Generalization ErrorAs we've discussed before we can't just choose the model that performs the best on our training data because we could just arbitrarily make a model that fits each training point to its target value.We may also be tempted to see which model performs best on the test set we made, but this has a similar problem. We'll just be producing models that perform really well on the test set.We want to get a sense of how well our model will perform on any new random data pull, this is known as the generalization error.What can we do to get a sense for the generalization error of our model? Cross-ValidationEnter $k$-fold cross-validation.The idea behind this technique is pretty clever. We'll introduce the technique in a way that's generalizes beyond linear regression, which will come in handy down the line.When building a predictive model, the model estimate (what we've been calling $\hat{f}$) is found from minimizing a loss function. For linear regression this loss function was the MSE of the training data. Let's consider the case where we will randomly draw a new set of data (think test set) and see how well our estimate performs. Because the data was randomly drawn, the algorithm we use is a random process. So the value of the loss function (the generalization error) on this new data is an example of a random variable, let's call this random variable $G$.It would be nice to know something about the distribution of $G$, but this is tricky with a finite amount of data. However, we can leverage a popular theorem from probability theory called the law of large numbers (see the probability theory and statistics cheat sheet).If we were able to generate a bunch of random draws of $G$, say $k$ random draws, then the law of large numbers says that:$$\frac{1}{k}\sum_{i=1}^k G_i \approx E(G),$$assuming our random draws were independent.So in $k$-fold cross-validation we take our training set, and randomly split it into $k$ equally sized chunks. For each chunk we train the algortihm on the $k-1$ other chunks and then calculate the testing loss using the chunk we left out. Then we take the arithmetic mean of all $k$ testing errors. This is an approximation of the expected value of the true generalization error of the algorithm.Here's a picture to help illustrate the idea for $5$-fold cross-validation:Let's see how this works in `sklearn` to help us choose the best model for the `Advertising` data.ads = pd.read_csv("Advertising.csv") ads_copy = ads.copy() ads_train = ads_copy.sample(frac=.75, random_state=614) ads_test = ads_copy.drop(ads_train.index) ## remember in notebook 3 we found these ## to be useful ads_train['sqrt_TV'] = np.sqrt(ads_train['TV']) ads_train['sqrtTV_radio'] = ads_train['sqrt_TV']*ads_train['radio'] ## import the KFold object from sklearn from sklearn.model_selection import KFold ## We'll need this when we fit models from sklearn.base import clone ## Now we make a kfold object ## we'll use 5 splits ## and shuffle the data before making the splits kfold = KFold(n_splits = 5, shuffle = True, random_state = 440) ## To make this simpler I make my data ## into an array X = np.array(ads_train[['TV','radio']]) y = np.array(ads_train['sales']) # You can loop through all the splits like so for train_index, test_index in kfold.split(X,y): print("TRAIN:", train_index, "TEST:", test_index) X_train, X_test = X[train_index], X[test_index] y_train, y_test = y[train_index], y[test_index] ## Now let's put it all together. ## It will be easier for us to make a couple functions ## to loop through all 8 possible models ## This gets our data for us def get_X_y(df,features,target): # Returns X then y return np.array(df[features]), np.array(df[target]) ## this calculates the mse def get_mse(model, X, y): # get the prediction pred = model.predict(X) # Returns the mse return np.sum(np.power(pred-y,2))/len(y) # This function was modified from stackexchange user hughdbrown # at this link, # https://stackoverflow.com/questions/1482308/how-to-get-all-subsets-of-a-set-powerset # This returns the power set of a set minus the empty set def powerset_no_empty(s): power_set = [] x = len(s) for i in range(1 << x): power_set.append([s[j] for j in range(x) if (i & (1 << j))]) return power_set[1:] powerset_no_empty(['TV','radio','newspaper','sqrt_TV','sqrtTV_radio']) possible_features = powerset_no_empty(['TV','radio','newspaper','sqrt_TV','sqrtTV_radio']) ## Now make an array that will hold the mses ## for all the models ## the columns represent each possible model MSEs = np.empty((5,len(possible_features))) from sklearn.linear_model import LinearRegression ## Make a regression model reg = LinearRegression(copy_X = True) ## keep track of what split we're on i = 0 ## This is for the initial input into the kfold object X, y = get_X_y(ads_train, possible_features[0], 'sales') ## Perform CV for train_index, test_index in kfold.split(X): ## For each possible model for j in range(len(possible_features)): ## get X and y X, y = get_X_y(ads_train, possible_features[j], 'sales') # Get the cv train test split X_train, X_test = X[train_index], X[test_index] y_train, y_test = y[train_index], y[test_index] # Cloning the regression makes a fresh regression # model for each run clone_reg = clone(reg) # fit the model clone_reg.fit(X_train,y_train) MSEs[i,j] = get_mse(clone_reg, X_test, y_test) ## We'll now move to the next split i = i + 1 # Here are the MSEs MSEs ## We can get the mean MSE for each model ## across the CV splits like so np.mean(MSEs, axis=0) ## We can get the mean MSEs using np.mean ## axis = 0 tells np.mean to take the column mean ## we can get where the min occurs with argmin np.argmin(np.mean(MSEs, axis = 0)) print("The model with the lowest mean CV MSE", "was the one with", possible_features[np.argmin(np.mean(MSEs, axis = 0))], "as the features. This model had a mean CV MSE of", np.round(np.min(np.mean(MSEs, axis=0)),5)) ## Another popular measure is root mse ## this is because it has the same dimension as ## the target variable ## It can be interpreted as how far off we were from the ## true value on average np.round(np.min(np.sqrt(np.mean(MSEs, axis = 0))),5)Here we brute forced our way through all the models because we had a small number of predictors and a decent sample size. This really doesn't work when your data has a large number of predictors (there are too many models to check, and you need a large quantity of samples to fit models with a lot of predictors this is known as the curse of dimensionality), or if you have a small sample size (makes it difficult to split your data even further). We'll learn other techniques for model selection in those cases later on. Also in many cases it doesn't make sense to even include a predictor in the model, for example because it has no association with your target.There are two other algorithms we touch on in the HW called backwards and forwards selection that are greedy algorithms. The benefit of these approaches is that they run more quickly than brute force, the problem is that they might not give you the "best model".For now we'll stick to examining correlations, scatter plots to provide plausible features, and then use cross validation to pick the best subset of those plausible features. You CodeGo ahead and try to build the best model you can to predict `carseats` `Sales`. Everyone's model may be different so do your best :)carseats = pd.read_csv("carseats.csv") ## Code here ## Code here ## Code hereMultiple Linear Regression 데이터 불러오기from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error import pandas as pd # 당뇨병 데이터 갖고 오기 diabetes_dataset = datasets.load_diabetes()데이터 정보print(diabetes_dataset.DESCR).. _diabetes_dataset: Diabetes dataset ---------------- Ten baseline variables, age, sex, body mass index, average blood pressure, and six blood serum measurements were obtained for each of n = 442 diabetes patients, as well as the response of interest, a quantitative measure of disease progression one year after baseline. **Data Set Characteristics:** :Number of Instances: 442 :Number of Attributes: First 10 columns are numeric predictive values :Target: Column 11 is a quantitative measure of disease progression one year after baseline :Attribute Information: - age age in years - sex - bmi body mass index - bp average blood pressure - s1 tc, T-Cells (a type of white blood cells) - s2 ldl, low-density lipoproteins - s3 hdl, high-density lipoproteins - s4 tch, thyroid stimulating hormone - s5 ltg, lamotrigine - s6 glu, blood sugar level Note: Each of these 10 feature va[...]데이터 컬럼명 가져오기* feature_namesdiabetes_dataset.feature_names데이터만 가져오기* .datadiabetes_dataset.data # 입력 변수 pandas dataframe으로 변환 X = pd.DataFrame(diabetes_dataset.data, columns=diabetes_dataset.feature_names) X목표 변수 ['diabetes']* .targetdiabetes_dataset.target # 목표 변수를 사용하기 편하게 pandas dataframe으로 변환 y = pd.DataFrame(diabetes_dataset.target, columns=['diabetes']) y데이터 분리x_train, x_test, y_train, y_test = train_test_split(X,y, test_size=0.2, random_state=5) print(x_train.shape, x_test.shape, y_train.shape, y_test.shape)(353, 10) (89, 10) (353, 1) (89, 1)train datadisplay(x_train,x_test)test datadisplay(y_train,y_test)모델 적용 및 학습model = LinearRegression() model.fit(x_train, y_train)theta 순서대로 theta 1, 2, 3, 4, ...model.coef_theta 0 `intercept_` 손실을 최대한 적게 하는 값model.intercept_모델 평가 `test set`의 예측 값 구하기y_test_prediction = model.predict(x_test) y_test_prediction[:10]예측 값(`y_test_prediction`)과 실제 값(output = `y_test`)을 비교 RMSE평균 제곱 오차의 루트를 통해서 테스트 데이터에서의 모델 성능 판단mse = mean_squared_error(y_test, y_test_prediction) rmse = mse ** 0.5 rmse--- 코드 정리# 필요한 라이브러리 import from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error import pandas as pd # 당뇨병 데이터 갖고 오기 diabetes_dataset = datasets.load_diabetes() # 입력 변수를 사용하기 편하게 pandas dataframe으로 변환 X = pd.DataFrame(diabetes_dataset.data, columns=diabetes_dataset.feature_names) # 목표 변수를 사용하기 편하게 pandas dataframe으로 변환 y = pd.DataFrame(diabetes_dataset.target, columns=['diabetes']) # train_test_split를 사용해서 주어진 데이터를 학습, 테스트 데이터로 나눈다 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=5) linear_regression_model = LinearRegression() # 선형 회귀 모델을 가지고 오고 linear_regression_model.fit(X_train, y_train) # 학습 데이터를 이용해서 모델을 학습 시킨다 y_test_predict = linear_regression_model.predict(X_test) # 학습시킨 모델로 예측 # 평균 제곱 오차의 루트를 통해서 테스트 데이터에서의 모델 성능 판단 mse = mean_squared_error(y_test, y_test_predict) mse ** 0.5Regression Analysis- so far, we've learned the main concepts behind supervised learning and trained many models for classification tasks to predict group memberships or categorial variables- regression models are used to predict target variable on a continous scale- some important applications are: 1. predicting insurance premium 2. making weather forecast 3. predicting stock market 4. predicting housing market 5. predicting sales of a company in future month, etc.- In this notebook, we will discuss the main concepts of regression models and cover the following topics: - Exploring and visualizing datasets - Looking at different approaches to implement linear regression models - Training regression models that are robust to outliers - Evaluating regression models and diagnosing common problems - Fitting regression models to nonlinear data Linear regression- the goal of linear regression is to model the relationship between one or multiple features and a continuous target variable Simple linear regression- the goal of simple (**univariate**) linear regression is to model the relationship between a single feature (**explanatory variable**, *x*) and a continuous-valued **target (response variable**, *y*) - the equation of a linear model with one explanatory variable is defined as follows: - $y = w_0 + w_1x$ - $w_0$ is the $y$ axis incercept - $w_1$ is the weight coefficient independent variable- the goal is to learn the weights of the linear equation to describe the relationship between the independent variable and the target variable- the learned weights can be used predict responses of new samples- visusally, linear regression can be understood as finding the best-fitting straight line through the training example, as shown in the following figure![Linear Regression](./images/linear-reg-1.png)- the best-fitting line is also called the regression line- the vertical lines from the regression line to the training examples are the **offsets** or **residual** -- the errors of our prediction Multiple linear regression- generalizing the linear regression model to multiple explanatory variables - $y = w_0 x_0 + w_1 x_1 + w_nx_x = \sum_{i=0}^{n}w_i x_i = w^T x$- visualizing 2-d, fitted hyperplane of a multiple linear regression model with two features is already difficult![Multiple linear regression](./images/multiple-linear-reg.png) - due to the challenge and limitations of visualizing multiple linear regression hyperplanes in dataset with more than 2 features, we'll focus on univariate case, using simple regression models Exploring the Housing dataset- housing dataset contains information about houses in the suburbs of Boston collected in 1978- made freely available from UCI ML Repository or Sci-kit learn - https://github.com/scikit-learn/scikit-learn/blob/main/sklearn/datasets/data/boston_house_prices.csv- 506 examples with 14 columns- feature description: - CRIM: Per capita crime rate by town - ZN: Proportion of residential land zoned for lots over 25,000 sq. ft. - INDUS: Proportion of non-retail business acres per town - CHAS: Charles River dummy variable (= 1 if tract bounds river and 0 otherwise) - NOX: Nitric oxide concentration (parts per 10 million) - RM: Average number of rooms per dwelling - AGE: Proportion of owner-occupied units built prior to 1940 - DIS: Weighted distances to five Boston employment centers - RAD: Index of accessibility to radial highways - TAX: Full-value property tax rate per \$10,000 - PTRATIO: Pupil-teacher ratio by town - B: $1000(Bk – 0.63)^2$, where $Bk$ is the proportion of [people of African American descent] by town - LSTAT: Percentage of lower status of the population - MEDV: Median value of owner-occupied homes in $\$1000s$import pandas as pd import numpy as np url = 'https://raw.githubusercontent.com/scikit-learn/scikit-learn/main/sklearn/datasets/data/boston_house_prices.csv' df = pd.read_csv(url, header=1) # column header is at row 1 dfVisualize the important characteristics of a dataset- **Exploratory data analysis (EDA)** allows us to visually detect the presence of outliers, distribution of the data, and the relationships between features- let's create **scatterplot matrix** that allows us to visualize the pair-wise correlations between the different features in one place- due to space constraint, we'll use some selected columns - feel free to explore all...import matplotlib.pyplot as plt import seaborn as sns cols = ['LSTAT', 'INDUS', 'NOX', 'RM', 'MEDV'] g = sns.PairGrid(df.loc[:, cols]) g.map_diag(sns.histplot) g.map_offdiag(sns.scatterplot) g.add_legend()- eyeball some observations: - diagonal charts are just histogram distribution of each feature on x-axix - there's a linear relationship between RM and MEDV, the house price - the MEDV feature seems to be normally distributed but contains several outliers Estimating the coefficient of a regression model- use scikit learn's LinearRegression APIfrom sklearn.linear_model import LinearRegression # let's just use no. of bedrooms as feature for the sake of simplicity X = df[['RM']] X y = df['MEDV'] y lr = LinearRegression() lr.fit(X, y) y_pred = lr.predict(X) lr.coef_ print(f'Slope, w0: {lr.coef_[0]:.3f}') print(f'Intercept: {lr.intercept_:.3f}')Intercept: -34.671plot the regression line- the best fitting line on the datasetsns.set_theme(color_codes=True) fig_dims = (10, 8) fig, ax = plt.subplots(figsize=fig_dims) ax = sns.regplot(x="RM", y="MEDV", data=df) ax.set(ylabel="Price in \$1000s [MEDV]", xlabel="Average number of rooms [RM]") import locale # currency formatting locale.setlocale(locale.LC_ALL, '') # set US locale # let's predict the price for a 5 bedroom house num_rooms = np.array([[5.0]]) price = lr.predict(num_rooms) print(f'''Price of {num_rooms[0][0]:.0f} bedroom house is predicted as {locale.currency(price[0]*1000, grouping=True )}''')Price of 5 bedroom house is predicted as $10,839.92Fitting a robust regression model using RANSAC- Linear regression models can be heavily impacted by the presence of outliers- outliers can be detected and removed; requires judgement as data scientist and the domain knowledge- alternative to throwing outliers is using the RANSAC - RANdom SAmple Consensus algorithm - fits a regression model to a subset of the data (**inliers**)- We can summarize the iterative RANSAC algorithm as follows: 1. Select a random number of examples to be inliers and fit the model. 2. Test all other data points against the fitted model and add those points that fall within a user-given tolerance to the inliers. 3. Refit the model using all inliers. 4. Estimate the error of the fitted model versus the inliers. 5. Terminate the algorithm if the performance meets a certain user-defined threshold or if a fixed number of iterations were reached; go back to step 1 otherwise. - use RANSACRegression API of scikit-learn - https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RANSACRegressor.htmlfrom sklearn.linear_model import RANSACRegressor ransac = RANSACRegressor(LinearRegression(), max_trials=100, min_samples=50, loss='absolute_loss', residual_threshold=5.0, random_state=0) ransac.fit(X, y) # let's obtain the inliers and outliers from the fitted ransac # plot them with the linear fit inlier_mask = ransac.inlier_mask_ outlier_mask = np.logical_not(inlier_mask) line_X = np.arange(3, 10, 1) line_y_ransac = ransac.predict(line_X[:, np.newaxis]) fig_dims = (10, 8) fig, ax = plt.subplots(figsize=fig_dims) plt.scatter(X[inlier_mask], y[inlier_mask], c='steelblue', edgecolor='white', marker='o', label='Inliers') plt.scatter(X[outlier_mask], y[outlier_mask], c='limegreen', edgecolor='white', marker='s', label='Outliers') plt.plot(line_X, line_y_ransac, color='black', lw=2) plt.xlabel('Average number of rooms [RM]') plt.ylabel('Price in $1000s [MEDV]') plt.legend(loc='upper left') print(f'Slope: {ransac.estimator_.coef_[0]:.3f}') print(f'Intercept: {ransac.estimator_.intercept_:.3f}') # we get a slightly different slope and intercept compared to straight linear regression # let's predict the price for a 5 bedroom house with ransac model num_rooms = np.array([[5.0]]) price = ransac.predict(num_rooms) print(f'Price of {num_rooms[0][0]:.0f} bedroom house is predicted as \ {locale.currency(price[0]*1000, grouping=True )}')Price of 5 bedroom house is predicted as $9,583.48Evaluating the performance of linear regression models- like supervised classifiers, regressors need to be trained on training set and evaluate on test set- the goal is to evaluate the model's performance on unseen data to estimate the generalization performance- in order to properly evaluate the model, we'll use all the variables/features in the datasetfrom sklearn.model_selection import train_test_split X = df.iloc[:, :-1].values # use all the columns except for the last as explanatory variables y = df['MEDV'].values # use the last column as dependent/response variable # 80/20 split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) slr = LinearRegression() slr.fit(X_train, y_train) # get the training and testing prediction y_train_pred = slr.predict(X_train) y_test_pred = slr.predict(X_test)Residual plots- since our model uses multiple explanatory variables, we can't visualize the linear regression line or hyperplane- residual plots lets us visualize the residual (differences or vertical distances between the actual and predicted values) versus the predicted values to diagnose our regression model- the following code plots a residual plot by simply subtracting the true target value from predicted responsesfig, ax = plt.subplots(figsize=(10, 8)) plt.scatter(y_train_pred, y_train_pred - y_train, c='steelblue', marker='o', edgecolor='white', label='Training data') plt.scatter(y_test_pred, y_test_pred - y_test, c='limegreen', marker='s', edgecolor='white', label='Test data') plt.xlabel('Predicted prices in $1000s') plt.ylabel('Residuals') plt.legend(loc='upper left') plt.hlines(y=0, xmin=-10, xmax=50, color='black', lw=2) plt.xlim([-10, 50]) plt.show()Mean Squared Error (MSE)- useful quantative measure of regressor models' performance- simply the averaged value of the Sum of Squared Errors (SSE) - $MSE = \frac{1}{n}\sum_{1=1}^{n}(y^{(i)}-\hat y^{(i)})^2$from sklearn.metrics import mean_squared_error mse_train = mean_squared_error(y_train, y_train_pred) mse_test = mean_squared_error(y_test, y_test_pred) print(f'MSE train {mse_train:.3f} test: {mse_test:.3f}')MSE train 19.326 test: 33.449interpreting errors- if the difference in train and test mean squared errors (MSE) is big, means the model is overfitting the training data- the interpretration of MSE depends on the dataset and feature scaling- e.g., if the house prices were represented as multiples of 1,000 (with K suffix), the same model would yield a lower MSE compared to a model that worked with unscaled features - $(10K - 15K)^2 < (10,000 - 15,000)^2$ Coefficient of determination ($R^2$)- standarized version of the MSE for better interpretation of the MSE- $R^2$ is the fraction of response variance captured by the model $R^2 = 1 - \frac {MSE}{Var(y)}$- higer the $R^2$, better the prediction meaning lesser the errorfrom sklearn.metrics import r2_score print('R^2 train: %.3f, test: %.3f' % (r2_score(y_train, y_train_pred), r2_score(y_test, y_test_pred)))R^2 train: 0.773, test: 0.589Running a linear regression model into a curve - polynomial regression- linearity assumption in data can be violated with polynomial regression model by adding polynomial terms: - $y=w_0+w_1x+w_2x^2 + ... + w_nx^n$ - $n$ denotes the degree of the polynomial- although we can use ploynomial regression to model nonlinear relationshop, it is still considered a multiple linear regression model because of regression coefficients, $w$ Adding ploynomial terms- can use `PolynomialFeatures` trasformer in scikit-learn to add a quadratic term (degree = 2)- compare the linear with the polynomial fitfrom sklearn.preprocessing import PolynomialFeatures # create a toy dataset X_train = np.array([258.0, 270.0, 294.0, 320.0, 342.0, 368.0, 396.0, 446.0, 480.0, 586.0])[:, np.newaxis] X_train y_train = np.array([ 236.4, 234.4, 252.8, 298.6, 314.2, 342.2, 360.8, 368.0, 391.2, 390.8]) y_train # add a second degree polynomial term quadratic = PolynomialFeatures(degree=2) X_quad = quadratic.fit_transform(X_train) X_quad # fit a simple linear regression model for comparison lr = LinearRegression() # fit linear features lr.fit(X_train, y_train) X_test = np.arange(250, 600, 10)[:, np.newaxis] y_lin_test = lr.predict(X_test) # fit quadratic features pr = LinearRegression() pr.fit(X_quad, y_train) y_quad_test = pr.predict(quadratic.fit_transform(X_test)) # plot results fig, ax = plt.subplots(figsize=(10, 5)) plt.scatter(X_train, y_train, label='Training points') plt.plot(X_test, y_lin_test, label='Linear fit', linestyle='--') plt.plot(X_test, y_quad_test, label='Quadratic fit') plt.xlabel('Explanatory variable') plt.ylabel('Predicted or known target values') plt.legend(loc='upper left') plt.tight_layout() #plt.savefig('images/10_11.png', dpi=300) plt.show() # find the MSE and R^2 y_lin_pred = lr.predict(X_train) y_quad_pred = pr.predict(X_quad) print('Training MSE linear: %.3f, quadratic: %.3f' % ( mean_squared_error(y_train, y_lin_pred), mean_squared_error(y_train, y_quad_pred))) print('Training R^2 linear: %.3f, quadratic: %.3f' % ( r2_score(y_train, y_lin_pred), r2_score(y_train, y_quad_pred)))Training MSE linear: 569.780, quadratic: 61.330 Training R^2 linear: 0.832, quadratic: 0.982Modeling nonlinear relationships in the Housing dataset- let's model the relationship between house prices and LSTAT (percentage of lower status of the population) using second-degree (quadratic) and third-degree (cubic) polynomials- compare quadratic and cubic polynomaials with linear fit# use just one feature LSTAT as explanatory feature X = df[['LSTAT']].values y = df['MEDV'].values # target variable regr = LinearRegression() # create quadratic features quadratic = PolynomialFeatures(degree=2) cubic = PolynomialFeatures(degree=3) X_quad = quadratic.fit_transform(X) X_cubic = cubic.fit_transform(X) # test data X_fit = np.arange(X.min(), X.max(), 1)[:, np.newaxis] # fit linear features regr = regr.fit(X, y) y_lin_fit = regr.predict(X_fit) linear_r2 = r2_score(y, regr.predict(X)) # fit quadratic features regr = regr.fit(X_quad, y) y_quad_fit = regr.predict(quadratic.fit_transform(X_fit)) quadratic_r2 = r2_score(y, regr.predict(X_quad)) # fit cubic features regr = regr.fit(X_cubic, y) y_cubic_fit = regr.predict(cubic.fit_transform(X_fit)) cubic_r2 = r2_score(y, regr.predict(X_cubic)) # plot results fig, ax = plt.subplots(figsize=(10, 5)) plt.scatter(X, y, label='Training points', color='lightgray') plt.plot(X_fit, y_lin_fit, label='Linear (d=1), $R^2=%.2f$' % linear_r2, color='blue', lw=2, linestyle=':') plt.plot(X_fit, y_quad_fit, label='Quadratic (d=2), $R^2=%.2f$' % quadratic_r2, color='red', lw=2, linestyle='-') plt.plot(X_fit, y_cubic_fit, label='Cubic (d=3), $R^2=%.2f$' % cubic_r2, color='green', lw=2, linestyle='--') plt.xlabel('% lower status of the population [LSTAT]') plt.ylabel('Price in $1000s [MEDV]') plt.legend(loc='upper right') plt.show()other transformations ?- polynomial is not always the best choice for modeling non-linear relationships- e.g., MEDV-LSTAT scatterplot may lead to a hypothesis that a log-transformation of the LSTAT feature variable and the square root of MEDV may project the data onto linear feature space suitable for a linear regression fit - $f(x) = e^{-x}$ - $log(f(x)) = -x$- natural log of an exponential function is a straight lineX = df[['LSTAT']].values y = df['MEDV'].values # transform features X_log = np.log(X) y_sqrt = np.sqrt(y) # fit features X_fit = np.arange(X_log.min()-1, X_log.max()+1, 1)[:, np.newaxis] regr = regr.fit(X_log, y_sqrt) y_lin_fit = regr.predict(X_fit) linear_r2 = r2_score(y_sqrt, regr.predict(X_log)) # plot results plt.scatter(X_log, y_sqrt, label='Training points', color='lightgray') plt.plot(X_fit, y_lin_fit, label='Linear (d=1), $R^2=%.2f$' % linear_r2, color='blue', lw=2) plt.xlabel('log(% lower status of the population [LSTAT])') plt.ylabel('$\sqrt{Price \; in \; \$1000s \; [MEDV]}$') plt.legend(loc='lower left') plt.tight_layout() #plt.savefig('images/10_13.png', dpi=300) plt.show()Decision Tree Regressor- Decision Tree can be used as a regressor model![DT Regressor](./images/DT-Regressor.png)- points to note: 1. no data transformation required; feature is analyzed one at a time 2. DT regressor captures the general trend in the data 3. doesn't capture the contuinuity and differentiability of the desired predition 4. need to be careful about choosing an appropriate value for the depth of the tree so as to not overfit or underfit the data - let's visualize fitting regression curve with decision treefrom sklearn.tree import DecisionTreeRegressor def lin_regplot(X, y, model): plt.scatter(X, y, c='steelblue', edgecolor='white', s=70) plt.plot(X, model.predict(X), color='black', lw=2) return X = df[['LSTAT']].values y = df.MEDV tree = DecisionTreeRegressor(max_depth=3) tree.fit(X, y) sort_idx = X.flatten().argsort() fig, ax = plt.subplots(figsize=(10, 5)) lin_regplot(X[sort_idx], y[sort_idx], tree) plt.xlabel('% lower status of the population [LSTAT]') plt.ylabel('Price in $1000s [MEDV]') plt.show() # making predictions # generate 10 random LSTAT samples using np.random X_test = np.random.choice(X.max(axis=1), 10) # random values between 0 and max LSTAT X_test = X_test[:, np.newaxis] X_test y_pred = tree.predict(X_test) y_predMSE/$R^2$? - since we don't know the actual y for the random test data, we can't calculate the MSE/$R^2$- let's split the dataset into train/test and evaluate the DT Regressor model# 80/20 split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) tree = DecisionTreeRegressor(max_depth=3) tree.fit(X_train, y_train) y_pred = tree.predict(X_test) y_train_pred = tree.predict(X_train) print('Training MSE linear: %.3f'% mean_squared_error(y_train, y_train_pred)) print('Training R^2 linear: %.3f'% r2_score(y_train, y_train_pred)) print('Testing MSE linear: %.3f'% mean_squared_error(y_test, y_pred)) print('Testing R^2 linear: %.3f'% r2_score(y_test, y_pred))Testing MSE linear: 39.417 Testing R^2 linear: 0.516Underfitting and Overfitting Decision Tree- DT Regressor models can suffer from underfit and overfit- one of the reasons could be "Tree Depth"- higher the the tree depth, more leaf nodes - tree with 10 levels, will have $2^{10} = 1024$ leaves- leaves with very fewer houses will make predictions that are quite close to those homes' actual values - however, they may make unreliable predictions for new data - because, each prediction is based on only a few houses ![DT under and overfitting](./images/dt-fitting-errors.png) Finding the optimal leaf nodes- Scit-kit learn DT Regressor provides `max_leaf_nodes` argument to control overfitting vs underfitting- we can use a utility function to help compare MSE scores from models with different `max_leaf_nodes`def get_mse(max_leaf_nodes, X_train, y_train, X_test, y_test): model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=0) model.fit(X_train, y_train) y_preds = model.predict(X_test) mse = mean_squared_error(y_test, y_preds) return mse # compare MAE with differing values of max_leaf_nodes errors = [] for i in range(1, 11, 1): max_leaf_nodes = 2**i mse_err = get_mse(max_leaf_nodes, X_train, y_train, X_test, y_test) print("Max leaf nodes: %d \t\t Mean Squared Error: %d" %(max_leaf_nodes, mse_err)) errors.append((mse_err, max_leaf_nodes)) mse, optimal_leaves = min(errors) print(f'Optimal leaves: {optimal_leaves} MSE: {mse:.3f}') # let's find the optimal depth of the tree import math print(f'Optimal depth = {math.floor(math.log2(optimal_leaves))}')Optimal depth = 3Random forest regression- a random forest is an ensemble technique that combines multiple decision trees- a random forest usually has a better generalization performance than an individual decision tree due to randomness (which helps to decrease the model's variance)- RF are also less sensitive to outliers and don't require much parameter tuning- only parameter that could be tuned is number of trees in the forest- https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html- let's apply `RandomForestRegression` to Housing dataset# use all the features X = df.iloc[:, :-1].values y = df.MEDV X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2, random_state=1) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2, random_state=1) from sklearn.ensemble import RandomForestRegressor forest = RandomForestRegressor(random_state=1, n_jobs=-1) # train and test the model and find MSE and R^2 forest.fit(X_train, y_train) y_train_pred = forest.predict(X_train) y_test_pred = forest.predict(X_test) print('MSE train: %.3f, test: %.3f' % ( mean_squared_error(y_train, y_train_pred), mean_squared_error(y_test, y_test_pred))) print('R^2 train: %.3f, test: %.3f' % ( r2_score(y_train, y_train_pred), r2_score(y_test, y_test_pred))) # Let's plot the residuals of the predictions fig, ax = plt.subplots(figsize=(10, 6)) plt.scatter(y_train_pred, y_train_pred - y_train, c='steelblue', edgecolor='white', marker='o', s=35, alpha=0.9, label='Training data') plt.scatter(y_test_pred, y_test_pred - y_test, c='limegreen', edgecolor='white', marker='s', s=35, alpha=0.9, label='Test data') plt.xlabel('Predicted prices in $1000s') plt.ylabel('Residual values in $1000s') plt.legend(loc='upper left') plt.hlines(y=0, xmin=-10, xmax=50, lw=2, color='black') plt.xlim([-10, 50]) plt.tight_layout() plt.show()Feature ranking- both `RandomForestClassifier` and `RandomForestRegressor` provide `feature_importances_` attribute- the following code displays the ranking and bar charts of each feature based on its importance value- model trained above is used to demostrate itfeat_labels = df.columns[:-1] importances = forest.feature_importances_ # https://numpy.org/doc/stable/reference/generated/numpy.argsort.html # return the indices that would sort the importances array and reverse it indices = np.argsort(importances)[::-1] # print all the features and their importances in highest to lowest importance for f in range(X_train.shape[1]): print("%2d) %-*s %f" % (f + 1, 30, feat_labels[indices[f]], importances[indices[f]])) # plot the histogram bar chart plt.title('Housing Dataset - Feature Importance') plt.bar(range(X_train.shape[1]), importances[indices], align='center') plt.xticks(range(X_train.shape[1]), feat_labels[indices], rotation=90) plt.xlim([-1, X_train.shape[1]]) plt.tight_layout() #plt.savefig('images/04_09.png', dpi=300) plt.show() indicesUse top features to train RandomForestRegressor- use forward selection technique from highest to lowest important feature- can also use to train and test other regressor modelsdf MSEs = [] # collect all the MSE R2s = [] # collect all the R^2 feature_ids = [] y = df.MEDV for feature_id in indices: feature_ids.append(feature_id) X = df.iloc[:, feature_ids].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2, random_state=1) forest = RandomForestRegressor(random_state=1, n_jobs=-1) forest.fit(X_train, y_train) y_train_pred = forest.predict(X_train) y_test_pred = forest.predict(X_test) mse = mean_squared_error(y_test, y_test_pred) print('Top features: %d MSE train: %.3f, test: %.3f' % ( len(feature_ids), mean_squared_error(y_train, y_train_pred), mse)) r2 = r2_score(y_test, y_test_pred) print('Top features: %d R^2 train: %.3f, test: %.3f' % ( len(feature_ids), r2_score(y_train, y_train_pred), r2)) MSEs.append((mse, len(feature_ids))) R2s.append((r2, len(feature_ids))) # let's see the MSEs for test datasets MSEs # can plot import matplotlib.pyplot as plt data = pd.DataFrame(MSEs) plt.plot(data[1], data[0]) plt.xlabel("Top Feature Count") plt.ylabel("Mean Squared Error (MSE)") plt.title("Feature ranking on Housing Dataset") plt.show() # let's see the R2s for test datasets R2s # can do line plot as wellData Visualization Seaborn www.data4sci.com @bgoncalves, @data4sciimport pandas as pd import numpy as np import matplotlib.pyplot as plt import matplotlib.image as mpimg import warnings warnings.filterwarnings("ignore") import watermark import seaborn as sns %load_ext watermark %matplotlib inline %watermark -n -v -m -g -ivnumpy 1.18.1 watermark 2.0.2 pandas 1.0.1 autopep8 1.5 seaborn 0.10.0 json 2.0.9 Mon Jun 01 2020 CPython 3.7.3 IPython 6.2.1 compiler : Clang 4.0.1 (tags/RELEASE_401/final) system : Darwin release : 19.4.0 machine : x86_64 processor : i386 CPU cores : 8 interpreter: 64bit Git hash : f388954fe48cfc6df2a30721b0c4d3dfcad2dae5Seaborn datasets Seaborn makes a small list of datasets easily available:plt.style.use('seaborn') sns.get_dataset_names()This makes it easier to learn how to use the library using standard datatips = sns.load_dataset("tips")The first time you load a dataset you'll need an internet connection so that the data can be downloaded from github. The **load_dataset** function returns a pandas data frametips.head()Scatter plot Seaborn is designed to facilitate the use of data frames. Making an attractive scatter plot is as simple as:ax = sns.scatterplot(x="total_bill", y="tip", data=tips) type(ax) tips.plot.scatter(x='total_bill', y='tip')As you can see the axis level functions return an Axis objectfg = sns.relplot(x="total_bill", y="tip", data=tips, kind="scatter") type(fg)The generic figure functions return a FacetGrid object. Making more complex figures is also very simplesns.relplot(x="total_bill", y="tip", data=tips, kind="scatter", hue="sex", size="size", )And we can specify the order in which the colors are usedsns.relplot(x="total_bill", y="tip", data=tips, kind="scatter", hue="sex", size="size", hue_order=["Male", "Female"], )And differnet marker types, etcsns.relplot(x="total_bill", y="tip", data=tips, kind="scatter", hue="sex", size="size", style='smoker', hue_order=["Female", "Male"], )The main advantage of the figure level functions is that they can easily generate subplots covering other dimensions of the datasetsns.relplot(x="total_bill", y="tip", data=tips, kind="scatter", hue="sex", size="size", col='smoker', row='day', style='time', row_order=["Thur", "Fri", "Sat", 'Sun'] )Categorical functionssns.catplot(x="sex", y="total_bill", hue="day", data=tips, kind="strip") sns.catplot(x="sex", y="total_bill", hue="day", data=tips, kind="strip", jitter=False, dodge=True) sns.catplot(x="sex", y="total_bill", hue="day", data=tips, kind="box") sns.catplot(x="sex", y="total_bill", hue="day", data=tips, kind="violin")Axis level functions While Figure level plots are extremely convenient for data exploration they have one fundamental limitation. Figure level functions build their own figure from scratch. This means that they are not easily stackable so you can't easily combine multiple plots on top of one another. This is perhaps the main reason you migth find yourself using axis level functions directly. Let's look at a quick examplesns.boxplot(x="sex", y="total_bill", data=tips, color=".8", showfliers=False) sns.swarmplot(x="sex", y="total_bill", hue="day", data=tips) plt.gcf().set_size_inches(11, 8)FacetGrid We already saw how we can easily use figure level functions to generate faceted plotssns.relplot(x="total_bill", y="tip", data=tips, kind="scatter", hue="sex", col='smoker', row='day', row_order=["Thur", "Fri", "Sat", 'Sun'] )This can also be done manually using **FacetGrid**. First we instantiate a facetfg = sns.FacetGrid(tips, col="smoker", row="day", hue="sex", row_order=["Thur", "Fri", "Sat", 'Sun'] ) fg.map(plt.scatter, "total_bill", "tip") plt.gcf().set_size_inches(11, 16)The **FacetGrid** object contains pointers to all the axes so that we can manipulate them directly, if we wishfg.axesSo if we want to add an extra line to one of the subplots we can easily do:fg = sns.FacetGrid(tips, col="smoker", row="day", hue="sex", row_order=["Thur", "Fri", "Sat", 'Sun'] ) fg.map(plt.scatter, "total_bill", "tip") # call methods directly on the axis object fg.axes[1][1].plot([10, 50], [2, 10], 'r-') fg.axes[1][1].set_title('Friday non-smokers') fg.axes[1][1].legend() plt.gcf().set_size_inches(11, 16)Advanced plots Pair plotThis generates a matrix of plots looking at all combinations of numerical columnstips.head() tips.dtypes pp = sns.pairplot(tips, hue="sex")Pairplots return a **PairGrid** object, a specialized version of **FacetGrid**. We can still access the individual axis objects in the same way as beforepp.axesJoint plot**jointplot** is also a useful way of exploring datasets. It makes a scatter plot in the center and histograms on the marginsjp = sns.jointplot("total_bill", "tip", data=tips)**Joint plots** return a **JointGrid** object that is slightly different in structure than a **FacetGrid**. To access the center plot you use **jp.ax_joint** and **jp.ax_marg_x**, **jp.ax_marg_y** for the x and y margin plotsprint(type(jp.ax_joint)) print(type(jp.ax_marg_x)) print(type(jp.ax_marg_y)) You can also use the jointplot to visualize the point densityjp = sns.jointplot("total_bill", "tip", data=tips, kind="hex") jp = sns.jointplot("total_bill", "tip", data=tips, kind="kde")Ligandnet workflow#************************************** # # # UTEP, Computational Science # # Last modified: 1/25/20 # # *************************************Import librariesimport warnings import os, sys, json, glob sys.path.append('utilities') from train2 import Train from fetch_ligand2 import Pharos_Data from utility import FeatureGenerator # for features generation of txt file from utility2 import FeatureGenerator2 # for features generation of sdf file import pandas as pd import numpy as np from tqdm import tqdm from rdkit import Chem from rdkit.Chem import AllChem from sklearn import metrics from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedKFold from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.utils.class_weight import compute_class_weight import joblib from sklearn.neural_network import MLPClassifier # from sklearn.metrics import make_scorer, roc_auc_score, recall_score, accuracy_score, precision_score class Run_Workflow: def __init__(self, actives, decoys): self.actives = actives self.decoys = decoys self.results = dict() def get_fingerprints(self,smiles): try: fg = FeatureGenerator(smiles) features = fg.toTPATF() return features except Exception as e: print(e) def get_models(self): # Get features at first! if not self.fp_generation(): print('Error: features extraction failed!') return try: t = Train(self.actives_x, self.decoys_x) t.train_models() except Exception as e: print(e) def fp_generation(self): # Fingerprint generation print('Pleae wait! Fingerprints are getting generated......') if self.decoys[-4:] == '.sdf' and self.actives[-4:] == '.sdf': # Get fingerprints for actives self.actives_x = self.sdf_fp_active() # Get fingerprints for decoys self.decoys_x = self.sdf_fp_decoy() return True elif self.decoys[-4:] == '.sdf': df = pd.read_csv(self.actives) # df = pd.read_csv(open(self.actives,'rU'))#, encoding='utf-8', engine='c') # Get fingerprints for actives df['tpatf'] = df.SMILES.apply(self.get_fingerprints) self.actives_x = np.array([f for f in df.tpatf.values], dtype = np.float32) # Get fingerprints for decoys self.decoys_x = self.sdf_fp_decoy() return True else: df = pd.read_csv(self.actives) df2 = pd.read_csv(self.decoys) # df = pd.read_csv(open(self.actives,'rU'))#, encoding='utf-8', engine='c') # df2 = pd.read_csv(open(self.decoys, 'rU'))#, encoding='utf-8', engine='c') # Get fingerprints for actives df['tpatf'] = df.SMILES.apply(self.get_fingerprints) # Get fingerprints for decoys df2['tpatf'] = df2.SMILES.apply(self.get_fingerprints) # numpy arrays self.actives_x = np.array([f for f in df.tpatf.values], dtype = np.float32) self.decoys_x = np.array([f for f in df2.tpatf.values], dtype = np.float32) return True return False def sdf_fp_decoy(self): try: fg2 = FeatureGenerator2(self.decoys) feat_decoy = fg2.sepTPATF() return feat_decoy except Exception as e: print(e) def sdf_fp_active(self): try: fg2 = FeatureGenerator2(self.actives) feat_active = fg2.sepTPATF() return feat_active except Exception as e: print(e) # If users have their own actives and decoys def actives_decoys(): active_file = input("Uniprot id of the file? Example: P07948 \n") active_file = active_file.strip() print('Looking for active and decoy files....') # active in .txt actives = main_path+'actives/'+active_file+'.txt' if not os.path.isfile(actives): # active in .sdf actives = main_path+'actives/'+active_file+'.sdf' # decoy in .txt.. decoys = main_path+'decoys/'+"decoys_" + active_file +".txt" if not os.path.isfile(decoys): # decoy in .sdf.. decoys = main_path+'decoys/'+ "decoys_" +active_file+".sdf" if os.path.isfile(actives) and os.path.isfile(decoys): print('Actives and Decoys are found!') return actives, decoys # Searches decoys in our database for give active file (Uniprot id) def actives_bt_not_decoys(): active_file = input("Uniprot id of the file? Example: P07948 \n") active_file = active_file.strip() actives = main_path+'actives/'+active_file+'.txt' if not os.path.isfile(actives): actives = main_path+'actives/'+active_file+'.sdf' # Path for decoys database decoys_database = '../decoys_database' # if not os.path.isfile(os.path.join(decoys_database, active_file+".txt")): print('Searching decoys .....') if not os.path.isfile(os.path.join(decoys_database, active_file+".sdf")): print("Decoys are not found, exiting! Look for decoys in DUDE website and come back!") sys.exit(1) # decoys = os.path.join(decoys_database, active_file+".txt") decoys = os.path.join(decoys_database, "decoys_" +active_file+".sdf") if os.path.isfile(actives) and os.path.isfile(decoys): print('Actives and decoys are extracted!') return actives, decoys def no_actives_and_decoys(): active_file = input("Uniprot id of the file? Example: P07948 \n") active_file = active_file.strip() active_dir = main_path+'/'+ "actives" pdata = Pharos_Data(active_file, active_dir ) print('Actives for a given protein are getting downloaded from Pharos website!') pdata.fetch_ligand() actives = main_path+'actives/'+active_file+'.txt' print('Searching decoys .....') decoys_database = '../decoys_database/' if not os.path.isfile(os.path.join(decoys_database, "decoys_" +active_file+".sdf")): print("Decoys are not found, exiting! Look for decoys in DUDE website and come back!") sys.exit(1) decoys = os.path.join(decoys_database, active_file+".sdf") if os.path.isfile(actives) and os.path.isfile(decoys): print('Actives and decoys are extracted!') return actives, decoys # Start here def start_workflow(): print('Actives and decoys should either be in sdf file or text file (with header "SMILES" for txt files!)') print('ACTIVES AND DECOYS FILE NAMES SHOULD BE LIKE THAT: P07948.txt(or .sdf) and decoys_P07948.txt (or .sdf) ') print('PLEASE, MAKE SURE YOU HAVE FOLDERS "actives" and "decoys"') print('DO YOU HAVE "actives" and "decoys" FOLDERS? Type y for Yes and n for No!') check = input() if check != 'y': print('Exiting...') sys.exit(1) print("Do you have actives? Please type y for Yes and n for No !") answer1 = input() print("Do you have decoys? Please type y for Yes and n for No !") answer2 = input() if answer1 == 'y' and answer2 == 'y': actives, decoys = actives_decoys() rw = Run_Workflow(actives, decoys) rw.get_models() elif answer1 == 'y' and answer2 == 'n': actives, decoys = actives_bt_not_decoys() rw = Run_Workflow(actives, decoys) rw.get_models() elif answer1 == 'n' and answer2 == 'n': actives, decoys = no_actives_and_decoys() rw = Run_Workflow(actives, decoys) rw.get_models() else: print('Please provide the right information!. Exiting!') sys.exit(1) if __name__ == '__main__': # Path for working directory print("Please, provide the path for working directory. Example: /Users/gvin/ligandnet_workflow/test_ligandnet/ \n") main_path = input() main_path = main_path.strip() os.chdir(main_path) dirs = ["actives", "decoys"] for _dir in dirs: if not os.path.isdir(_dir): os.makedirs(_dir) if main_path[-1]!='/': main_path = main_path+'/' # Start Function start_workflow()Please, provide the path for working directory. Example: /Users/gvin/ligandnet_workflow/test_ligandnet/ /Users/gvin/ligandnet_workflow/test_ligandnet/ Actives and decoys should either be in sdf file or text file (with header "SMILES" for txt files!) ACTIVES AND DECOYS FILE NAMES SHOULD BE LIKE THAT: P07948.txt(or .sdf) and decoys_P07948.txt (or .sdf) PLEASE, MAKE SURE YOU HAVE FOLDERS "actives" and "decoys" DO YOU HAVE "actives" and "decoys" FOLDERS? Type y for Yes and n for No! y Do you have actives? Please type y for Yes and n for No ! y Do you have decoys? Please type y for Yes and n for No ! y Uniprot id of the file? Example: P07948 P07948 Looking for active and decoy files.... Actives and Decoys are found! Pleae wait! Fingerprints are getting generated...... Please choose the name (Example type 1 for Random Forest) of the model from the following options! 1. Random Forest Classifier 2. Extreme Gradient Boosting 3. Support Vector Classifier 4. Artificial Neural Network 5. All[...]Styling with cyclerDemo of custom property-cycle settings to control colors and other styleproperties for multi-line plots.NoteMore complete documentation of the ``cycler`` API can be found `here `_.This example demonstrates two different APIs:1. Setting the default rc parameter specifying the property cycle. This affects all subsequent axes (but not axes already created).2. Setting the property cycle for a single pair of axes.from cycler import cycler import numpy as np import matplotlib.pyplot as pltFirst we'll generate some sample data, in this case, four offset sinecurves.x = np.linspace(0, 2 * np.pi, 50) offsets = np.linspace(0, 2 * np.pi, 4, endpoint=False) yy = np.transpose([np.sin(x + phi) for phi in offsets])Now ``yy`` has shapeprint(yy.shape)So ``yy[:, i]`` will give you the ``i``-th offset sine curve. Let's set thedefault prop_cycle using :func:`matplotlib.pyplot.rc`. We'll combine a colorcycler and a linestyle cycler by adding (``+``) two ``cycler``'s together.See the bottom of this tutorial for more information about combiningdifferent cyclers.default_cycler = (cycler(color=['r', 'g', 'b', 'y']) + cycler(linestyle=['-', '--', ':', '-.'])) plt.rc('lines', linewidth=4) plt.rc('axes', prop_cycle=default_cycler)Now we'll generate a figure with two axes, one on top of the other. On thefirst axis, we'll plot with the default cycler. On the second axis, we'llset the prop_cycler using :func:`matplotlib.axes.Axes.set_prop_cycle`which will only set the ``prop_cycle`` for this :mod:`matplotlib.axes.Axes`instance. We'll use a second ``cycler`` that combines a color cycler and alinewidth cycler.custom_cycler = (cycler(color=['c', 'm', 'y', 'k']) + cycler(lw=[1, 2, 3, 4])) fig, (ax0, ax1) = plt.subplots(nrows=2) ax0.plot(yy) ax0.set_title('Set default color cycle to rgby') ax1.set_prop_cycle(custom_cycler) ax1.plot(yy) ax1.set_title('Set axes color cycle to cmyk') # Add a bit more space between the two plots. fig.subplots_adjust(hspace=0.3) plt.show()Graphs in articleThis notebook is used to generated figures for the article. Specifically, it generates the Moe2016: Example 2 graphs, where we use Denavit-Hartenberg parameters with `urd2casadi`, and a trajectory that escapes a box.urdf_path = "./urdf/ur5.urdf" links = ["world", "base_link", "base", "shoulder_link", "upper_arm_link", "forearm_link", "wrist_1_link", "wrist_2_link", "wrist_3_link", "tool0"] fk_dict = converter.from_file(root="base_link", tip="tool0", filename=urdf_path) link_lengths = [0.,-0.425, -0.392, 0., 0., 0.] link_twists = [cs.np.pi/2, 0., 0., cs.np.pi/2, -cs.np.pi/2, 0.] link_offsets = [0.089, 0., 0., 0.109, 0.095, 0.082] joint_angles = ["s" for i in range(6)] fk_dict = converter.from_denavit_hartenberg( joint_angles=joint_angles, link_lengths=link_lengths, link_offsets=link_offsets, link_twists=link_twists, joint_names=fk_dict["joint_names"], upper_limits=fk_dict["upper"], lower_limits=fk_dict["lower"] ) fk_dict.keys() # Setup time and robot_var t = cs.MX.sym("t") q = cs.MX.sym("q", len(fk_dict["joint_names"])) dq = cs.MX.sym("dq", len(fk_dict["joint_names"])) # Functions for end-effector things (casadi functions of q) T_fk = fk_dict["T_fk"] p_fk = cs.Function("p_fk", [t, q], [T_fk(q)[:3, 3]]) R_fk = cs.Function("R_fk", [t, q], [T_fk(q)[:3, :3]]) # Manipulability J_p = cs.jacobian(p_fk(t,q), q) man = cs.mtimes(J_p,J_p.T) detman = man[0,0]*man[1,1]*man[2,2]+man[0,1]*man[1,2]*man[2,0]+man[0,2]*man[1,0]*man[2,1] detman += -man[0,2]*man[1,1]*man[2,0] -man[0,1]*man[1,0]*man[2,2] -man[0,0]*man[1,2]*man[2,1] manipulability_cost = -detman for i in range(6): manipulability_cost += 1e3*q[i]*q[i] fmanipulability = cs.Function("fman",[q],[manipulability_cost]) # Check the joint limits from the URDF: q_max = cs.np.array(fk_dict["upper"]) q_min = cs.np.array(fk_dict["lower"]) print("q_min ",str(q_min)) print("q_max ",str(q_max)) # Define a reasonable max joint speed max_speed = cs.np.pi/5 # rad/s #cs.inf print("Max speed: ", max_speed) # Let's pretend home (where we start) is such that we start in the box. UR5_home = cs.np.array([-1.5, -1.6447247447521742, 1.4830607057020933, -0.9733457906220311, -0.6578997165707934, 0.0]) UR5_home = cs.np.array([-(50.0/180.0)*cs.np.pi, -(160.0/180.0)*cs.np.pi, -(110.0/180.0)*cs.np.pi, -(90.0/180.0)*cs.np.pi, -(90.0/180.0)*cs.np.pi, 0.0]) dt = 0.008 # Define the basic system limits # Uphold the joint constraints joint_limits_cnstr = cc.SetConstraint( label="Joint_Limits", expression = q, set_min = q_min, set_max = q_max) # Listify the joint limits constraints for pseudoinverse, starting with the lowest joint_limits_cnstr_list = [] for i in range(q.size()[0]): joint_limits_cnstr_list.append( cc.SetConstraint(label="limit_q_"+str(i), expression=q[i], set_min = q_min[i], set_max = q_max[i], priority = i)) # Let's have some speed limit joint_speed_limits_cnstr = cc.VelocitySetConstraint( label="Joint_speed_limits", expression = q, set_min = -cs.vertcat([max_speed]*q.size()[0]), set_max = cs.vertcat([max_speed]*q.size()[0])) # Wall avoidance: x_min, x_max = 0.1, 0.6#0.1, 0.5#0.2, 0.65# y_min, y_max = -0.5, 0.4#-0.5, 0.4#-0.5, 0.5# z_min, z_max = -0.3, 0.25#0., 0.55#0.15, 0.55# # Desired trajectory omega=0.1 path_des = cs.vertcat(0.5*cs.sin(omega*t)*cs.sin(omega*t) + 0.2, 0.5*cs.cos(omega*t)+0.25*cs.sin(omega*t), 0.5*cs.sin(omega*t)*cs.cos(omega*t) + 0.1)#0.4) fpath_des = cs.Function("fpath_des",[t],[path_des]) # collision avoidance separate colav_x_cnstr = cc.SetConstraint( label="colav_x", expression=p_fk(t, q)[0], set_min=x_min, set_max=x_max, priority=len(fk_dict["joint_names"])+2, constraint_type="hard", gain=5e2 ) colav_y_cnstr = cc.SetConstraint( label="colav_y", expression=p_fk(t, q)[1], set_min=y_min, set_max=y_max, priority=len(fk_dict["joint_names"])+1, constraint_type="hard", gain=5e2 ) colav_z_cnstr = cc.SetConstraint( label="colav_z", expression=p_fk(t, q)[2], set_min=z_min, set_max=z_max, priority=len(fk_dict["joint_names"])+3, constraint_type="hard", gain=5e2 ) colav_x_cnstr_mpc = cc.SetConstraint( label="colav_x", expression=p_fk(t, q)[0], set_min=x_min, set_max=x_max, priority=len(fk_dict["joint_names"])+2, constraint_type="hard", gain=1. ) colav_y_cnstr_mpc = cc.SetConstraint( label="colav_y", expression=p_fk(t, q)[1], set_min=y_min, set_max=y_max, priority=len(fk_dict["joint_names"])+1, constraint_type="hard", gain=1. ) colav_z_cnstr_mpc = cc.SetConstraint( label="colav_z", expression=p_fk(t, q)[2], set_min=z_min, set_max=z_max, priority=len(fk_dict["joint_names"])+3, constraint_type="hard", gain=1. ) # collision avoidance box colav_box_cnstr = cc.SetConstraint( label="colav_box", expression=p_fk(t,q), set_min = cs.np.array([x_min,y_min,z_min]), set_max = cs.np.array([x_max,y_max,z_max]), priority=len(fk_dict["joint_names"])+1, constraint_type="hard", gain=5e2 ) colav_box_cnstr_mpc = cc.SetConstraint( label="colav_box", expression=p_fk(t,q), set_min = cs.np.array([x_min,y_min,z_min]), set_max = cs.np.array([x_max,y_max,z_max]), priority=len(fk_dict["joint_names"])+1, constraint_type="hard", gain=1. ) # Tracking trajectory path_cnstr = cc.EqualityConstraint( label="move_point2", expression=p_fk(t, q) - path_des, priority=len(fk_dict["joint_names"])+4, constraint_type="soft", gain=0.15 ) path_cnstr.eval = cs.Function("path_eval", [t,q], [cs.norm_2(path_cnstr.expression)]) # List constraints constraints = [colav_x_cnstr, colav_y_cnstr, colav_z_cnstr, path_cnstr]# + joint_limits_cnstr_list constraints_mpc = [colav_x_cnstr_mpc, colav_y_cnstr_mpc, colav_z_cnstr_mpc, path_cnstr]# + joint_limits_cnstr_list # Setup the skill and print info skill = cc.SkillSpecification( label="box_move", time_var=t, robot_var=q, robot_vel_var=dq, constraints=constraints ) skill_mpc = cc.SkillSpecification( label="box_move", time_var=t, robot_var=q, robot_vel_var=dq, constraints=constraints_mpc ) skill_multidim = cc.SkillSpecification( label="box_move_multidim", time_var=t, robot_var=q, robot_vel_var=dq, constraints=[colav_box_cnstr, path_cnstr]#+joint_limits_cnstr_list ) skill_multidim_mpc = cc.SkillSpecification( label="box_move_multidim", time_var=t, robot_var=q, robot_vel_var=dq, constraints=[colav_box_cnstr_mpc, path_cnstr]#+joint_limits_cnstr_list ) skill.print_constraints() skill_multidim.print_constraints() # Let's test all the available controllers controller_classes = { "qp":cc.ReactiveQPController, "nlp":cc.ReactiveNLPController, "pinv":cc.PseudoInverseController, "mpc":cc.ModelPredictiveController } controllers = {} for key in controller_classes.keys(): controllers[key] = {} skill_situations = { "singular":skill, "multidim":skill_multidim } # Compile all the controllers for each situation for sitn_key in skill_situations.keys(): print("Compiling skill: "+str(sitn_key)) for key in controllers.keys(): t0 = time.time() if key == "pinv" and sitn_key == "multidim": controllers[key][sitn_key] = controller_classes[key](skill_spec=skill_situations[sitn_key], options={"multidim_sets":True}) elif key == "mpc": if sitn_key == "singular": skill_spec = skill_mpc elif sitn_key == "multidim": skill_spec = skill_multidim_mpc controllers[key][sitn_key] = controller_classes[key](skill_spec=skill_spec, horizon_length=10, timestep=dt) elif key == "nlp": controllers[key][sitn_key] = controller_classes[key](skill_spec=skill_situations[sitn_key])#, #cost_expr=cs.dot(dq,dq)+manipulability_cost) else: controllers[key][sitn_key] = controller_classes[key](skill_spec=skill_situations[sitn_key]) controllers[key][sitn_key].setup_problem_functions() controllers[key][sitn_key].setup_solver() print("\t-"+str(key)+", compile time: "+str(time.time()-t0)) timesteps = 10000 for cntr_key in controllers.keys(): print("Simulating controller: "+str(cntr_key)) for sitn_key in skill_situations.keys(): print("\t-"+str(sitn_key)) print("\t\tSetting up initial value problem") controllers[cntr_key][sitn_key].setup_initial_problem_solver() print("\t\tSolving initial value problem") slack_res = controllers[cntr_key][sitn_key].solve_initial_problem(0,UR5_home)[-1] t0 = time.time() # Simulate it! t_sim = cs.np.array([dt*i for i in range(timesteps+1)]) t_run_sim = cs.np.array([dt*i for i in range(timesteps)]) # Robot q_sim = cs.np.zeros((len(t_sim),q.shape[0])) q_sim[0,:] = UR5_home dq_sim = cs.np.zeros((len(t_sim),q.shape[0])) # Cartesian position p_sim = cs.np.zeros((len(t_sim), 3)) p_sim[0,:] = T_fk(UR5_home)[:3,3].toarray()[:,0] # Rotation R_sim = cs.np.zeros((len(t_sim), 3, 3)) R_sim[0,:,:] = T_fk(UR5_home)[:3,:3].toarray() # Error in tracking e_sim = cs.np.zeros(len(t_sim)) e_sim[0] = path_cnstr.eval(t_sim[0],q_sim[0,:]) # Controller mode mode_sim = cs.np.zeros(len(t_sim)) # Manipulability man_sim = cs.np.zeros(len(t_sim)) man_sim[0] = fmanipulability(UR5_home) # Loop for i in range(len(t_sim) - 1): t_run0 = time.time() res = controllers[cntr_key][sitn_key].solve(t_sim[i],q_sim[i,:],warmstart_slack_var=slack_res) t_run_sim[i] = time.time() - t_run0 dq_sim[i,:] = res[0].toarray()[:,0] if res[-1] is not None: slack_res = res[-1].toarray()[:,0] for idx, dqi in enumerate(dq_sim[i,:]): dq_sim[i,idx] = max(min(dqi,max_speed),-max_speed) q_sim[i+1,:] = q_sim[i,:] + dq_sim[i,:]*dt p_sim[i+1,:] = T_fk(q_sim[i+1,:])[:3,3].toarray()[:,0] R_sim[i+1,:,:] = T_fk(q_sim[i+1,:])[:3,:3].toarray() e_sim[i+1] = path_cnstr.eval(t_sim[i],q_sim[i+1,:]) if cntr_key == "pinv": mode_sim[i+1] = controllers[cntr_key][sitn_key].current_mode man_sim[i+1] = fmanipulability(q_sim[i+1]) controllers[cntr_key][str(sitn_key)+"_res"] = { "t_sim":t_sim, "t_run_sim": t_run_sim, "dq_sim": dq_sim, "q_sim": q_sim, "p_sim": p_sim, "R_sim": R_sim, "e_sim": e_sim, "mode_sim": mode_sim, "man_sim": man_sim } print("\t\tRuntime: "+str(time.time()-t0)) %matplotlib notebook fig, ax = plt.subplots() for name in controllers.keys(): ax.plot(controllers[name]["singular_res"]["t_sim"], controllers[name]["singular_res"]["e_sim"], label=name) ax.legend() ax.set_xlabel("t [s]") ax.set_ylabel("tracking error [m]") #ax.set_yscale("log") ax.set_title("singular") fig, ax = plt.subplots() for name in controllers.keys(): ax.plot(controllers[name]["multidim_res"]["t_sim"], controllers[name]["multidim_res"]["e_sim"], label=name) ax.legend() ax.set_xlabel("t [s]") ax.set_ylabel("error") #ax.set_yscale("log") ax.set_title("multidim") import nice_plotting nice_plotting.latexify(fig_width=3.5, fig_height=0.7*2.1636) from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes from mpl_toolkits.axes_grid1.inset_locator import mark_inset fig, ax = plt.subplots() axins = zoomed_inset_axes(ax, 6, loc=9) for name in controllers.keys(): ax.plot(controllers[name]["singular_res"]["t_sim"], controllers[name]["singular_res"]["e_sim"], label=name) axins.plot(controllers[name]["singular_res"]["t_sim"], controllers[name]["singular_res"]["e_sim"], label=name) ax.legend() ax.set_xlabel("t [s]") ax.set_ylabel("Tracking error [m]") axins.set_xlim(41, 45) axins.set_ylim(0.05,0.13) mark_inset(ax, axins, loc1=3, loc2=4, fc="none", ec="0.5") nice_plotting.format_axes(ax) axins.xaxis.set_visible(False) axins.yaxis.set_visible(False) plt.savefig("singular_tracking_error.pdf",bbox_inches="tight") import nice_plotting nice_plotting.latexify(fig_width=3.5, fig_height=0.7*2.1636) fig, ax = plt.subplots() axins = zoomed_inset_axes(ax, 6, loc=9) for name in controllers.keys(): ax.plot(controllers[name]["multidim_res"]["t_sim"], controllers[name]["multidim_res"]["e_sim"], label=name) axins.plot(controllers[name]["multidim_res"]["t_sim"], controllers[name]["multidim_res"]["e_sim"], label=name) ax.legend() ax.set_xlabel("t [s]") ax.set_ylabel("Tracking error [m]") axins.set_xlim(41, 45) axins.set_ylim(0.05,0.13) mark_inset(ax, axins, loc1=3, loc2=4, fc="none", ec="0.5") axins.xaxis.set_visible(False) axins.yaxis.set_visible(False) nice_plotting.format_axes(ax) plt.savefig("multidim_tracking_error.pdf",bbox_inches="tight") fig, ax = plt.subplots() ax.plot(controllers["pinv"]["singular_res"]["t_sim"], controllers["pinv"]["singular_res"]["mode_sim"], label="separate", ls="--", c="g") ax.plot(controllers["pinv"]["multidim_res"]["t_sim"], controllers["pinv"]["multidim_res"]["mode_sim"], label="multidim", ls="-", c="b") ax.legend() ax.set_xlim([-1, 50]) ax.set_xlabel("t [s]") ax.set_ylabel("mode") nice_plotting.format_axes(ax) plt.savefig("modes.pdf", bbox_inches="tight") #ax.plot(controllers["pinv"]["singular_res"]["t_sim"], controllers["pinv"]["singular_res"]["mode_sim"],ls="--") fig, ax = plt.subplots() ax.plot(controllers["pinv"]["multidim_res"]["t_sim"], controllers["pinv"]["multidim_res"]["mode_sim"]) #ax.plot(controllers["pinv"]["singular_res"]["t_sim"], controllers["pinv"]["singular_res"]["mode_sim"],ls="--") ax = common_plots.pos_point(controllers["pinv"]["multidim_res"], p_des=fpath_des) ax = common_plots.pos_point(controllers["mpc"]["multidim_res"],ax=ax, lstyle=":", p_des=fpath_des) ax.plot([min(t_sim), max(t_sim)], [x_min,x_min],"b-.") ax.plot([min(t_sim), max(t_sim)], [x_max,x_max],"b-.") ax.plot([min(t_sim), max(t_sim)], [y_min,y_min],"r-.") ax.plot([min(t_sim), max(t_sim)], [y_max,y_max],"r-.") ax.plot([min(t_sim), max(t_sim)], [z_min,z_min],"g-.") ax.plot([min(t_sim), max(t_sim)], [z_max,z_max],"g-.") ax = common_plots.pos_point(controllers["pinv"]["singular_res"], p_des=fpath_des) ax = common_plots.pos_point(controllers["qp"]["singular_res"], ax=ax, lstyle=":") ax.plot([min(t_sim), max(t_sim)], [x_min,x_min],"b-.") ax.plot([min(t_sim), max(t_sim)], [x_max,x_max],"b-.") ax.plot([min(t_sim), max(t_sim)], [y_min,y_min],"r-.") ax.plot([min(t_sim), max(t_sim)], [y_max,y_max],"r-.") ax.plot([min(t_sim), max(t_sim)], [z_min,z_min],"g-.") ax.plot([min(t_sim), max(t_sim)], [z_max,z_max],"g-.") fig, ax = plt.subplots() cmap = plt.get_cmap("tab10") # Trajectory ax.plot( controllers["pinv"]["singular_res"]["t_sim"], [fpath_des(ti)[0] for ti in controllers["pinv"]["singular_res"]["t_sim"]], ls=":", color="k", label="traj" ) ax.plot([min(t_sim), max(t_sim)], [x_min,x_min],"k-.") ax.plot([min(t_sim), max(t_sim)], [x_max,x_max],"k-.") # Controllers for i,key in enumerate(controllers): ax.plot( controllers[key]["singular_res"]["t_sim"], controllers[key]["singular_res"]["p_sim"][:,0], c=cmap(i), ls="-", label=key ) ax.legend() ax.set_xlabel("t [s]") ax.set_ylabel("x [m]") nice_plotting.format_axes(ax) plt.savefig("singular_set_constraint_x.pdf", bbox_inches="tight") fig, ax = plt.subplots() cmap = plt.get_cmap("tab10") # Trajectory ax.plot( controllers["pinv"]["singular_res"]["t_sim"], [fpath_des(ti)[1] for ti in controllers["pinv"]["singular_res"]["t_sim"]], ls=":", color="k", label="traj" ) ax.plot([min(t_sim), max(t_sim)], [y_min,y_min],"k-.") ax.plot([min(t_sim), max(t_sim)], [y_max,y_max],"k-.") # Controllers for i,key in enumerate(controllers): ax.plot( controllers[key]["singular_res"]["t_sim"], controllers[key]["singular_res"]["p_sim"][:,1], c=cmap(i), ls="-", label=key ) ax.legend() ax.set_xlabel("t [s]") ax.set_ylabel("y [m]") nice_plotting.format_axes(ax) plt.savefig("singular_set_constraint_y.pdf", bbox_inches="tight") fig, ax = plt.subplots() cmap = plt.get_cmap("tab10") # Trajectory ax.plot( controllers["pinv"]["singular_res"]["t_sim"], [fpath_des(ti)[2] for ti in controllers["pinv"]["singular_res"]["t_sim"]], ls=":", color="k", label="traj" ) ax.plot([min(t_sim), max(t_sim)], [z_min,z_min],"k-.") ax.plot([min(t_sim), max(t_sim)], [z_max,z_max],"k-.") # Controllers for i,key in enumerate(controllers): ax.plot( controllers[key]["singular_res"]["t_sim"], controllers[key]["singular_res"]["p_sim"][:,2], c=cmap(i), ls="-", label=key ) ax.legend() ax.set_xlabel("t [s]") ax.set_ylabel("z [m]") nice_plotting.format_axes(ax) plt.savefig("singular_set_constraint_z.pdf", bbox_inches="tight") f, axs = plt.subplots(3,1) cmap = plt.get_cmap("tab10") axs[0].plot([min(t_sim), max(t_sim)], [x_min,x_min],"k-.") axs[0].plot([min(t_sim), max(t_sim)], [x_max,x_max],"k-.") axs[1].plot([min(t_sim), max(t_sim)], [y_min,y_min],"k-.") axs[1].plot([min(t_sim), max(t_sim)], [y_max,y_max],"k-.") axs[2].plot([min(t_sim), max(t_sim)], [z_min,z_min],"k-.") axs[2].plot([min(t_sim), max(t_sim)], [z_max,z_max],"k-.") for i, ax in enumerate(axs): ax.plot(controllers["pinv"]["singular_res"]["t_sim"], [fpath_des(ti)[i] for ti in controllers["pinv"]["singular_res"]["t_sim"]], ls=":", color="k", label="traj") for j, key in enumerate(controllers): ax.plot( controllers[key]["singular_res"]["t_sim"], controllers[key]["singular_res"]["p_sim"][:,i],c=cmap(j), ls="-", label=key ) ax.legend() fig, ax = plt.subplots() cmap = plt.get_cmap("tab10") # Trajectory ax.plot( controllers["pinv"]["multidim_res"]["t_sim"], [fpath_des(ti)[0] for ti in controllers["pinv"]["multidim_res"]["t_sim"]], ls=":", color="k", label="traj" ) ax.plot([min(t_sim), max(t_sim)], [x_min,x_min],"k-.") ax.plot([min(t_sim), max(t_sim)], [x_max,x_max],"k-.") # Controllers for i,key in enumerate(controllers): ax.plot( controllers[key]["multidim_res"]["t_sim"], controllers[key]["multidim_res"]["p_sim"][:,0], c=cmap(i), ls="-", label=key ) ax.legend() ax.set_xlabel("t [s]") ax.set_ylabel("x [m]") nice_plotting.format_axes(ax) plt.savefig("multidim_set_constraint_x.pdf", bbox_inches="tight") fig, ax = plt.subplots() axins = zoomed_inset_axes(ax, 7, loc=7) cmap = plt.get_cmap("tab10") # Trajectory ax.plot( controllers["pinv"]["multidim_res"]["t_sim"], [fpath_des(ti)[1] for ti in controllers["pinv"]["multidim_res"]["t_sim"]], ls=":", color="k", label="traj" ) axins.plot( controllers["pinv"]["multidim_res"]["t_sim"], [fpath_des(ti)[1] for ti in controllers["pinv"]["multidim_res"]["t_sim"]], ls=":", color="k", label="traj" ) ax.plot([min(t_sim), max(t_sim)], [y_min,y_min],"k-.") ax.plot([min(t_sim), max(t_sim)], [y_max,y_max],"k-.") # Controllers for i,key in enumerate(controllers): ax.plot( controllers[key]["multidim_res"]["t_sim"], controllers[key]["multidim_res"]["p_sim"][:,1], c=cmap(i), ls="-", label=key ) axins.plot( controllers[key]["multidim_res"]["t_sim"], controllers[key]["multidim_res"]["p_sim"][:,1], c=cmap(i), ls="-", label=key ) ax.legend(loc=2) ax.set_xlabel("t [s]") ax.set_ylabel("y [m]") axins.set_xlim(58, 62) axins.set_ylim(0.37,0.41) axins.plot([min(t_sim), max(t_sim)], [y_max, y_max], "k-.") mark_inset(ax, axins, loc1=2, loc2=1, fc="none", ec="0.5") axins.xaxis.set_visible(False) axins.yaxis.set_visible(False) nice_plotting.format_axes(ax) plt.savefig("multidim_set_constraint_y.pdf", bbox_inches="tight") fig, ax = plt.subplots() cmap = plt.get_cmap("tab10") # Trajectory ax.plot( controllers["pinv"]["multidim_res"]["t_sim"], [fpath_des(ti)[2] for ti in controllers["pinv"]["multidim_res"]["t_sim"]], ls=":", color="k", label="traj" ) ax.plot([min(t_sim), max(t_sim)], [z_min,z_min],"k-.") ax.plot([min(t_sim), max(t_sim)], [z_max,z_max],"k-.") # Controllers for i,key in enumerate(controllers): ax.plot( controllers[key]["multidim_res"]["t_sim"], controllers[key]["multidim_res"]["p_sim"][:,2], c=cmap(i), ls="-", label=key ) ax.legend() ax.set_xlabel("t [s]") ax.set_ylabel("z [m]") nice_plotting.format_axes(ax) plt.savefig("multidim_set_constraint_z.pdf", bbox_inches="tight")#, pad_inches=200) def ms_format(t): return"{:.2f} ms".format(1000.0*t) cntrllrs_tab = ["pinv","qp","nlp","mpc"] tab_str = "& PINV & QP & NLPC & MPC\\\\ \n\midrule\n" tab_str += "Initial (separate)" for cntrl_key in cntrllrs_tab: tab_str += "& "+ms_format(controllers[cntrl_key]["singular_res"]["t_run_sim"][0]) tab_str += "\\\\\n" tab_str += "Average (separate)" for cntrl_key in cntrllrs_tab: tab_str += "& "+ms_format(cs.np.mean(controllers[cntrl_key]["singular_res"]["t_run_sim"])) tab_str += "\\\\\n" tab_str += "Initial (multidim)" for cntrl_key in cntrllrs_tab: tab_str += "& "+ms_format(controllers[cntrl_key]["multidim_res"]["t_run_sim"][0]) tab_str += "\\\\\n" tab_str += "Average (multidim)" for cntrl_key in cntrllrs_tab: tab_str += "& "+ms_format(cs.np.mean(controllers[cntrl_key]["multidim_res"]["t_run_sim"])) print(tab_str)& PINV & QP & NLPC & MPC\\ \midrule Initial (separate)& 0.23 ms& 0.40 ms& 3.57 ms& 25.03 ms\\ Average (separate)& 0.22 ms& 0.21 ms& 3.26 ms& 35.79 ms\\ Initial (multidim)& 0.14 ms& 0.39 ms& 3.07 ms& 20.60 ms\\ Average (multidim)& 0.10 ms& 0.21 ms& 3.14 ms& 17.68 msWarning: Before running below cells please make sure you have API key. Please see README.md for more info on API key.import os os.environ["LS_API_KEY"] = "MY-API-KEY" # replace your API key here. # Marker Cluster Example import os from here_map_widget import Map, MarkerCluster, ZoomControl, ObjectLayer m = Map(api_key=os.environ["LS_API_KEY"], center=[51.01, 0.01], zoom=7) data = """

Marker

{}

""" p1 = dict(lat=51.01, lng=0.01, data=data.format("First Marker")) p2 = dict(lat=50.04, lng=1.01, data=data.format("Second Marker")) p3 = dict(lat=51.45, lng=1.01, data=data.format("Third Marker")) p4 = dict(lat=51.01, lng=2.01, data=data.format("Fourth Marker")) provider = MarkerCluster(data_points=[p1, p2, p3, p4], show_bubble=True) layer = ObjectLayer(provider=provider) m.add_layer(layer) zc = ZoomControl(alignment="LEFT_TOP") m.add_control(zc) mPart 3: Other options for inputUmami is a package for calculating metrics for use with for Earth surface dynamics models. This notebook is the final notebook in a three-part introduction to using umami.Umami was designed to work well with the [terrainbento](https://terrainbento.readthedocs.io/en/latest/) model package, as well as other models built using the [Landlab Toolkit](https://github.com/landlab/landlab). However, umami can be used with models built with other modeling tools and data in a variety of formats. This notebook is meant to demonstrate this capability. Scope of this tutorialIn this tutorial you will learn how to use other input options along with umami. Specifically we will use square gridded terrain stored in [ESRI ASCII](http://resources.esri.com/help/9.3/arcgisengine/java/GP_ToolRef/spatial_analyst_tools/esri_ascii_raster_format.htm) format. We will read this in as a numpy array. We will also interpolate it to an irregular grid.If you have comments or questions about the notebooks, the best place to get help is through [GitHub Issues](https://github.com/TerrainBento/umami/issues).To begin this example, we will import the required python packages.import warnings warnings.filterwarnings('ignore') from io import BytesIO, StringIO import numpy as np from scipy.interpolate import RegularGridInterpolator import matplotlib.pylab as plt from urllib.request import urlopen from urllib.error import URLError import rasterio from landlab import imshow_grid, RasterModelGrid, HexModelGrid from umami import MetricUmami does not make any requirements regarding where terrain data comes from or what model or modeling package is used to construct modeled terrain. However, umami does require that modeled or observed terrain is provided to it as a Landlab grid with an at-node field called `topographic__elevation`. Using the Landlab model grid datastructure means that umami knows how large each grid cell is, and how they are connected. Landlab has five model grid classes. One of them will probably suit your needs. - [`RasterModelGrid`](https://landlab.readthedocs.io/en/release/landlab.grid.raster.htmlraster)- [`HexModelGrid`](https://landlab.readthedocs.io/en/release/landlab.grid.hex.htmlhex)- [`RadialModelGrid`](https://landlab.readthedocs.io/en/release/landlab.grid.radial.htmlradial)- [`VoronoiDelaunayGrid`](https://landlab.readthedocs.io/en/release/landlab.grid.voronoi.htmlvoronoi)- [`NetworkModelGrid`](https://landlab.readthedocs.io/en/release/landlab.grid.network.htmlnetwork) In this example we will use the `RasterModelGrid` for regularly spaced square grid cells and the `VoronoiDelaunayGrid` for irregularly spaced observations. If you can read your topography into python as a numpy array, you can put it on a Landlab grid called `topographic__elevation` and use it with umami. While umami is strict about use of the Landlab grid, this quality of the grid makes it very flexible. You can use a Landlab function such as [`read_esri_ascii`](https://landlab.readthedocs.io/en/release/landlab.io.esri_ascii.htmllandlab.io.esri_ascii.read_esri_ascii) or [`read_netcdf`](https://landlab.readthedocs.io/en/release/landlab.io.netcdf.htmllandlab.io.netcdf.read.read_netcdf) to read your data into a numpy array. You can also create a synthetic one, or use some other package to read a file into python. The world is your oyster. In this case we will use the [rasterio](https://rasterio.readthedocs.io/en/stable/) package to read an [ESRI ASCII](http://resources.esri.com/help/9.3/arcgisengine/java/GP_ToolRef/spatial_analyst_tools/esri_ascii_raster_format.htm) format file that we will download from the [OpenTopography rest server](https://opentopography.org/developers). Step 1: Read in a numpy arrayFirst, we download a small patch of land near Boulder, CO. You can change the values of `north`, `south`, `east` and `west` to change the location. Its not hard to download a very large file, so increase values carefully. These data are provided with horizontal units of degrees. For this example we will not convert from degrees to meters, or address the issue of changing from a geographic coordinate system (WGS84) to a projected one (e.g, UTM Zone 13 N, for Colorado). These are things you should address if you are using this sort of data in an application or research project. The code is wrapped in a `try`-`except` block because if it takes a very long time to get a response from OpenTopography (which sometimes happens on Binder), or if you don't have internet, we want you to still be able to do the tutorial. In this case, you will use some data pre-loaded into the file "topo_data.asc".try: west = -105.4 # longitude (degrees) east = -105.15 # longitude (degrees) north = 40.1 # latitude (degrees) south = 39.9 # latitue (degrees) URL = "http://opentopo.sdsc.edu/otr/getdem?demtype=SRTMGL3&" url = (URL + "west=" + str(west) + "&" + "south=" + str(south) + "&" "east=" + str(east) + "&" "north=" + str(north) + "&" "outputFormat=AAIGrid") f = urlopen(url) file_like = BytesIO(f.read()) print("URL Sucess: Using data from OpenTopography.") except URLError: print("URL Timed out, using pre-saved file.") file_like = "topo_data.asc"We now have a variable in our python workspace called `file_like`. We can think of it like a python object that will behave like like an ESRI ASCII file on disk.Next we read `file_like` in with the rasterio package and grab important characteristics like the number of rows (`nrows`), number of columns (`ncols`), the resolution of each pixel (`dx, dy`), the coordinates of the lower left corner (`xy_lower_left`), and the actual elevation data (`elevations`).with rasterio.open(file_like) as dataset: nrows = dataset.height ncols = dataset.width dx, dy = dataset.res xy_lower_left = (dataset.bounds.left, dataset.bounds.bottom) elevations = dataset.read(1)We can plot it. As expected, it looks like the topography near Boulder, CO.plt.imshow(elevations, cmap="terrain", origin="lower")Step 2: Create a `RasterModelGrid` to give to UmamiNext we create a Landlab model grid by passing the information we got from the rasterio dataset to `RasterModelGrid`. We add the field using the name umami requires, `topographic__elevation`. One tricky step here is that the variable elevations is of datatype `int32`.elevations.dtypeSome of the underlying tools that umami uses assume that this field is of type `float`. So when we provide the field `topographic__elevation` to the grid, we will specify the that it should be as type `float`.rmg = RasterModelGrid((nrows, ncols), xy_spacing=(dx, dy), xy_of_lower_left=xy_lower_left) z = rmg.add_field("topographic__elevation", elevations.astype(float))If we use the Landlab function [`imshow_grid`](https://landlab.readthedocs.io/en/release/landlab.plot.htmllandlab.plot.imshow.imshow_grid) we see that the topography is correctly represented by the grid.imshow_grid(rmg, "topographic__elevation", cmap="terrain")Now we make our Metric using the same settings we used in [Part 1](IntroductionToMetric.ipynb).metrics = { "me": { "_func": "aggregate", "method": "mean", "field": "topographic__elevation" }, "ep10": { "_func": "aggregate", "method": "percentile", "field": "topographic__elevation", "q": 10 } } rmg_metric = Metric(rmg, metrics=metrics) rmg_metric.calculate() rmg_metric.names rmg_metric.valuesStep 3: Use irregular data and a `HexModelGrid`As a final example, we will look at specifying umami with an irregular grid. We won't import any standard format of irregular data but will create some by interpolating the regular data using the scipy tool [RegularGridInterpolator](https://docs.scipy.org/doc/scipy-0.16.0/reference/generated/scipy.interpolate.RegularGridInterpolator.html).We use a smaller number of nodes as we had in the prior example. This is just for speed, feel free to adjust the value for `factor` to change this. We start by creating a set of grid node locations in x and y.factor = 5 dx = rmg.spacing[0] * factor hmg = HexModelGrid((int(rmg.shape[0]/factor*1.2), int(rmg.shape[1]/factor)+1), dx, node_layout="rect", xy_of_lower_left=rmg.xy_of_lower_left)We can plot them in comparison with our regular grid nodes. There are a lot of nodes, so we will zoom into a corner of the plot.plt.plot(rmg.x_of_node, rmg.y_of_node, 'k.', markersize=2, label="Raster Points") plt.plot(hmg.x_of_node, hmg.y_of_node, 'm.', label="Irregular Points") plt.xlim(-105.40, -105.375) plt.ylim(40.00, 40.025)Next we create an interpolation object and interpolate to find the elevation values at our new randomly located set of model grid nodes based on the regular grid.interp_obj = RegularGridInterpolator((rmg.y_of_node.reshape(rmg.shape)[:, 0], rmg.x_of_node.reshape(rmg.shape)[0, :]), z.reshape(rmg.shape), bounds_error=False, fill_value=None) interp_z = interp_obj((hmg.y_of_node, hmg.x_of_node))Next we create a `HexModelGrid` and add `topographic__elevation` to it. One nice feature of the `imshow_grid` function is that it works for both regular and irregular grids.z = hmg.add_field("topographic__elevation", interp_z, at="node") imshow_grid(hmg, z, cmap="terrain")As expected we see a slightly smoothed version of our original topography. This is expected because we decreased the number of model grid nodes by a factor of 10. The final step is to create a `Metric` and calculate values.metrics = { "me": { "_func": "aggregate", "method": "mean", "field": "topographic__elevation" }, "ep10": { "_func": "aggregate", "method": "percentile", "field": "topographic__elevation", "q": 10 } } hmg_metric = Metric(hmg, metrics=metrics) hmg_metric.calculate() hmg_metric.names hmg_metric.valuesComparing the metric values for the two grids, we can see that the mean is slightly different in absolute value but very close based on percent change and the 10th percentile is identical.for n in hmg_metric.names: abs_change = np.abs(hmg_metric.value(n) - rmg_metric.value(n)) pct_change = abs_change /( (hmg_metric.value(n) + rmg_metric.value(n))/2) print(n, "\n abs_change: ", abs_change, "\n pct_change: ", pct_change)review.json# review.json: Contains full review text data including the user_id that wrote the review and the business_id the review is written for. review_rdd = sc.textFile(review_file_path, 240) review_rdd.take(1) # The total number of reviews n_review = review_rdd \ .map(lambda r: 1) \ .reduce(lambda a, b: a + b) # The number of distinct users who wrote reviews user_count = review_rdd \ .map(lambda r: (json.loads(r)['user_id'], 1)) \ .reduceByKey(lambda a, b: a + b).cache() n_user = user_count \ .map(lambda r : 1) \ .reduce(lambda a, b: a + b) # The top 10 users who wrote the largest numbers of reviews and the number of reviews they wrote top10_user = user_count \ .sortBy(lambda r: (-r[1], r[0])) \ .take(10) # The number of distinct businesses that have been reviewed business_count = review_rdd \ .map(lambda r: (json.loads(r)['business_id'], 1)) \ .reduceByKey(lambda a, b: a + b, 1).cache() # The top 10 businesses that had the largest numbers of reviews and the number of reviews they had top10_business = business_count \ .sortBy(lambda r: (-r[1], r[0])) \ .take(10) print("number of reviews: %d \n" % n_review) print("number of users who has written at least one review on yelp: %d \n" % n_user) print("top 10 active users (user_id, count): ") print(*top10_user, sep='\n') print("\ntop 10 popular businesses (business_id, count): ") print(*top10_business, sep='\n') """ TODO: + business_id ---> business_name using business.json (find top 10 popular businesses) ++ visualization: bar chart etc. - What categories are they in? - What about in different states? - their corresponding avg stars? """Simple Stock Plots in Rlibrary(quantmod) library(TTR) getSymbols('AAPL', src = 'yahoo', from = as.Date('2019-01-01'), to = as.Date('2020-01-01')) AAPL <- na.omit(AAPL) head(AAPL) plot(Cl(AAPL), main="Line Chart of Apple", xlab="Date") barplot(Cl(AAPL), main="Bar Chart of Apple", xlab="Date") barplot(Cl(AAPL), main="Bar Chart of Apple", horiz=TRUE, ylab="Date") AAPL.DF<-data.frame(Date=index(AAPL),coredata(AAPL)) AAPL.DF x = AAPL.DF[,'AAPL.Open'] y = AAPL.DF[,'AAPL.Close'] x = AAPL.DF['AAPL.Open'] y = AAPL.DF['AAPL.Close'] library(ggplot2) ggplot(AAPL, aes(x=AAPL.Open, y=AAPL.Close)) + ggtitle("Scatterplot Open vs Close") + geom_point()Appendix 1 - Transfer Learning with MONK Steps to take1. Create a network - Creating base network - Visualize layers - Creating loss function module - Creating optimizer module [Set learning rates here] - Creating learning rate scheduler 2. Data prepraration - Creating a data transformer - Downloading and storing dataset - Applying transformation - Understanding dataset - Loading the transformed dataset [Set batch size and number of parallel processors here] 3. Setting up data - plotters4. Training - Set Epoch - Train model 5. Testing - Just after training - After loading saved weights!git clone https://github.com/Tessellate-Imaging/monk_v1 #!pip install -r monk_v1/installation/requirements_cu9.txt #Select Installation file as per the system import sys sys.path.append("monk_v1/monk/") from pytorch_prototype import prototype # Step 1 - Create experiment ptf = prototype(verbose=1); ptf.Prototype("exp-1", "proj-1"); ''' original code - snippet data_transforms = { 'train': transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'val': transforms.Compose([ transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), } data_dir = 'cat_dog/' image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val']} batch = 4 parallel_processors = 3 dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch, shuffle=True, num_workers=parallel_processors) for x in ['train', 'val']} dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']} class_names = image_datasets['train'].classes ''' # Replicating same using Monk ptf.Dataset_Params(dataset_path=["cat_dog/train", "cat_dog/val"], input_size=224, batch_size=4, shuffle_data=True, num_processors=3); # Transform ptf.apply_random_horizontal_flip(train=True); ptf.apply_normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], train=True, val=True, test=True); ptf.Dataset(); ''' Original Code Snippet model_ft = models.resnet18(pretrained=True) # Using pretrained models num_ftrs = model_ft.fc.in_features # Adding a new layer to next final layer model_ft.fc = nn.Linear(num_ftrs, 2) model_ft.cuda() ''' # Replicating same using Monk ptf.Model_Params(model_name="resnet18", freeze_base_network=True, use_gpu=True, use_pretrained=True); ptf.Model(); ''' Original Code Snippet cross_entropy_loss = nn.CrossEntropyLoss() optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9) ''' # Replicating same using Monk ptf.optimizer_sgd(0.001); ptf.lr_fixed(); ptf.loss_softmax_crossentropy() ''' Original Code Snippet def train_model(model, criterion, optimizer, num_epochs=25): since = time.time() # A shallow copy constructs a new compound object and then (to the extent possible) inserts # references into it to the objects found in the original. # A deep copy constructs a new compound object and then, recursively, inserts copies into it of # the objects found in the original. best_model_wts = copy.deepcopy(model.state_dict()) best_acc = 0.0 for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) # Each epoch has a training and validation phase for phase in ['train', 'val']: if phase == 'train': model.train() # Set model to training mode else: model.eval() # Set model to evaluate mode running_loss = 0.0 running_corrects = 0 # Iterate over data. for inputs, labels in dataloaders[phase]: inputs = inputs.cuda() labels = labels.cuda() # zero the parameter gradients optimizer.zero_grad() # forward # track history if only in train with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) _, preds = torch.max(outputs, 1) loss = criterion(outputs, labels) # backward + optimize only if in training phase if phase == 'train': loss.backward() optimizer.step() # statistics running_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds == labels.data) epoch_loss = running_loss / dataset_sizes[phase] epoch_acc = running_corrects.double() / dataset_sizes[phase] print('{} Loss: {:.4f} Acc: {:.4f}'.format( phase, epoch_loss, epoch_acc)) # deep copy the model if phase == 'val' and epoch_acc > best_acc: best_acc = epoch_acc best_model_wts = copy.deepcopy(model.state_dict()) print() time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) print('Best val Acc: {:4f}'.format(best_acc)) torch.save(model.state_dict(), "cat_dog_model") # load best model weights model.load_state_dict(best_model_wts) return model model_ft = train_model(model_ft, cross_entropy_loss, optimizer_ft, num_epochs=5) ''' # Replicating using Monk ptf.Training_Params(num_epochs=4, display_progress=True, display_progress_realtime=True, save_intermediate_models=True, intermediate_model_prefix="intermediate_model_", save_training_logs=True); ptf.Train();Training params Num Epochs: 4 Display params Display progress: True Display progress realtime: True Save Training logs: True Save Intermediate models: True Intermediate model prefix: intermediate_model_ Training Start Epoch 1/4 ----------HW06Name: GTid:903461609from sklearn.cluster import KMeans, MeanShift from sklearn.cluster import AgglomerativeClustering import matplotlib.pyplot as plt import pandas as pd %matplotlib inline import numpy as np X=np.array([[1,0], [2,0], [8,0], [9,0], [31,0], [32,0], [38,0], [39,0],]) cluster = AgglomerativeClustering(n_clusters=4, affinity='euclidean', linkage='ward') cluster.fit_predict(X) print(cluster.labels_) plt.scatter(X[:,0],X[:,1], c=cluster.labels_, cmap='rainbow') def distance()Working with Python Pandas and XlsxWriter[Source](https://xlsxwriter.readthedocs.io/working_with_pandas.html) A simple example of converting a Pandas dataframe to an xlsx file using Pandas and XlsxWriter.Copyright 2013-2019, , import pandas as pd # Create a Pandas dataframe from some data. df = pd.DataFrame({'Data': [10, 20, 30, 20, 15, 30, 45]}) # Create a Pandas Excel writer using XlsxWriter as the engine. writer = pd.ExcelWriter('pandas_simple.xlsx', engine='xlsxwriter') # Convert the dataframe to an XlsxWriter Excel object. df.to_excel(writer, sheet_name='Sheet1') # Close the Pandas Excel writer and output the Excel file. writer.save()In order to apply XlsxWriter features such as Charts, Conditional Formatting and Column Formatting to the Pandas output we need to access the underlying workbook and worksheet objects. After that we can treat them as normal XlsxWriter objects.Continuing on from the above example we do that as follows:# Create a Pandas dataframe from the data. df = pd.DataFrame({'Data': [10, 20, 30, 20, 15, 30, 45]}) # Create a Pandas Excel writer using XlsxWriter as the engine. writer = pd.ExcelWriter('pandas_simple.xlsx', engine='xlsxwriter') # Convert the dataframe to an XlsxWriter Excel object. df.to_excel(writer, sheet_name='Sheet1') # Get the xlsxwriter objects from the dataframe writer object. workbook = writer.book worksheet = writer.sheets['Sheet1']Once we have the Workbook and Worksheet objects, as shown in the previous section, we we can use them to apply other features such as adding a chart:# Get the xlsxwriter objects from the dataframe writer object. workbook = writer.book worksheet = writer.sheets['Sheet1'] # Create a chart object. chart = workbook.add_chart({'type': 'column'}) # Configure the series of the chart from the dataframe data. chart.add_series({'values': '=Sheet1!$B$2:$B$8'}) # Insert the chart into the worksheet. worksheet.insert_chart('D2', chart) writer.save()Validate F+ Lead Scoring 2.0 Model Load Data# Installing Library !pip install pydata_google_auth # Using GBQ shout Out to Hughes import pandas_gbq import pydata_google_auth SCOPES = [ 'https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/drive', ] credentials = pydata_google_auth.get_user_credentials( SCOPES, auth_local_webserver=False) sql = """ SELECT a.id ,m.name ,m.loan_application_id ,m.lead_type__c ,m.createddate ,m.prequal_submitted_date__c ,m.full_app_submitted_date__c ,m.Loan_Officer_Assigned ,m.date_funded__c ,m.funded_flag ,m.loan_officer_name__c ,m.amount_of_loan_requested__c ,m.first_amount_of_loan_requested ,m.verifiable_annual_income__c ,m.first_income_p1 ,m.co_app_verifiable_annual_income__c ,m.first_income_p2 ,m.co_app ,m.first_coapp ,m.income_sum ,CAST(m.first_income_sum AS INT64) AS first_income_sum ,m.c_LTI ,m.First_LTI ,m.loan_use__c ,m.first_loan_use ,m.employment_status__c ,m.ndi_ratio__c ,m.first_ndi_ratio__c ,m.fico__c ,m.first_FICO ,m.utm_source__c ,m.bcc0300__c ,m.first_bcc0300__c ,a.interest_rate__c ,a.risk_group__c ,a.final_risk_group__c ,a.risk_group_p1__c FROM `ffn-dw-bigquery-prd.Credit_Views.Check_Sales_NPV_Model_Inputs` m LEFT JOIN `freedom-dw.salesforce_ffam.application__c` a ON m.name = a.name WHERE m.createddate BETWEEN '2019-05-01' AND '2019-10-15' """ df1 = pandas_gbq.read_gbq(sql, project_id='ffn-dw-bigquery-prd', credentials=credentials, dialect='standard') df = df1 df1.head() df.columns df.columnsModel Test 1 - First touch variables# removed duplicate columns and save the first values firstDf = df.drop(["id", "amount_of_loan_requested__c", "verifiable_annual_income__c", \ "co_app_verifiable_annual_income__c", "co_app", "c_LTI", \ "loan_use__c", "employment_status__c", "ndi_ratio__c", \ "fico__c", "bcc0300__c"], axis=1) firstDf.columnsModel Test 2 - Current model Mount Drive# Load the Drive helper and mount from google.colab import drive # This will prompt for authorization. drive.mount('/content/drive') # set working directory import os os.chdir("/content/drive/My Drive/Data Scientist/F+ Lead Scoring Model/Validation/") os.getcwd() lsfinal_df.csv main_leadLevel.py scoringData.py first_df.csv model.py unitEconomicsData.csv Fplus_Lead_Scoring __pycache__/ 'Validation F+ Model.ipynb' full_data.csv Retrain_Fplus.csv library_binning.py score_collector_phase1.pyModule Setupimport numpy as np import pandas as pd import logging import os from sklearn.externals import joblib #dirdata = os.path.join(os.path.dirname(__file__), 'Data') global initialized initialized = False def init(): global fplus_ls_model global unitEconomicsData global initialized if (initialized == True): return try: fplus_ls_model = joblib.load('Fplus_Lead_Scoring') unitEconomicsData = pd.read_csv('unitEconomicsData.csv') #fplus_ls_model = joblib.load(os.path.join(dirdata, 'Fplus_Lead_Scoring')) #unitEconomicsData = pd.read_csv(os.path.join(dirdata, 'unitEconomicsData.csv')) except Exception as e: #print(e) return initialized = True ## initialize Model init() ## doesn't work.... So here we go manual #fplus_ls_model = joblib.load('Data/Fplus_Lead_Scoring') #unitEconomicsData = pd.read_csv('Data/unitEconomicsData.csv') unitEconomicsData.head()Library Binningimport numpy as np import pandas as pd def get_co_app_cat(co_app_income): if pd.isnull(co_app_income): return 0 return 1 def get_loan_use_cat(loan_use): if pd.isnull(loan_use): return 3 loan_use = loan_use.strip() if (loan_use == 'Credit Card Refinancing'): return 4 if (loan_use in ['Major Purchase','Other']): return 2 if (loan_use == 'Auto Purchase'): return 1 return 3 def get_employment_cat(employment_status): if pd.isnull(employment_status): employment_status = '' employment_status = employment_status.strip() if (employment_status == 'Retired'): return 4 if (employment_status in ['Self-employed']): return 2 if (employment_status in ['Other', '']): return 1 return 3 def get_loan_amount_cat(loan_amount): if pd.isnull(loan_amount): return 1 loan_amount = float(loan_amount) if (loan_amount < 15000): return 4 if (loan_amount >= 15000) and (loan_amount < 20000): return 3 if (loan_amount >= 20000) and (loan_amount < 25000): return 2 return 1 def get_mkt_chan_cat(utm_source): if pd.isnull(utm_source): return 3 utm_source = utm_source.strip() if (utm_source in ['creditkarma','nerdwallet']): return 7 if (utm_source in ['credible','experian']): return 6 if (utm_source in ['website', 'google','msn','ck','nerd', '115','save','dm','SLH','201']): return 5 if (utm_source in ['facebook', 'even','uplift','Quinstreet', 'Personalloanpro','113']): return 2 if (utm_source in ['LendEDU', 'monevo','247','sfl']): return 1 return 3 def get_fico(fico): if pd.isnull(fico): return 990 fico = int(fico) if (fico >= 9000): return 990 if fico < 600: return 990 return fico def get_lti(lti): if pd.isnull(lti): return 36 lti = float(lti) if (lti > 35) or (lti < 1): return 36 if (lti >= 1) and (lti < 2): return 35 if (lti >= 2) and (lti < 3): return 34 return np.floor(lti) def get_bcc0300(bcc0300): if pd.isnull(bcc0300): return 99 bcc0300 = int(bcc0300) if (bcc0300 >= 25): return 30 return bcc0300 def get_ndi_ratio(ndi_ratio): if pd.isnull(ndi_ratio): return 5 ndi_ratio = float(ndi_ratio) ndi_ratio = np.floor(ndi_ratio) if (ndi_ratio < 10): return 5 if (ndi_ratio > 75): return 80 return ndi_ratio import numpy as np import pandas as pd from sklearn.linear_model import LogisticRegression import library_binning as lib #import scoringData as sd def score_collector_phase1(lead, debug = False): mod_lead = pd.DataFrame() bin_vars = {} error = False error_msg = '' prob_prediction = 0 npv = 0 MODEL_NAME = 'fplus_ls_201902_v1' try: # create binned/transformed variables bin_vars['co_app'] = pd.Series(lib.get_co_app_cat(lead['co_app_verifiable_annual_income'])) bin_vars['loan_use'] = lib.get_loan_use_cat(lead['loan_use']) bin_vars['employment'] = lib.get_employment_cat(lead['employment_status']) bin_vars['loan_amount'] = lib.get_loan_amount_cat(lead['final_loan_amount']) bin_vars['mkt_chan'] = lib.get_mkt_chan_cat(lead['utm_source']) bin_vars['ficox'] = lib.get_fico(lead['fico']) bin_vars['lti'] = lib.get_lti(lead['lti']) bin_vars['bcc0300'] = lib.get_bcc0300(lead['xpn_bcc0300']) bin_vars['ndi'] = lib.get_ndi_ratio(lead['ndi_ratio']) bin_vars['ndisq'] = bin_vars['ndi'] * bin_vars['ndi'] mod_lead = pd.DataFrame(bin_vars) # create dummies cat_vars=['co_app','loan_use','employment','loan_amount','mkt_chan'] for var in cat_vars: cat_list = pd.get_dummies(mod_lead[var], prefix=var) temp=mod_lead.join(cat_list) mod_lead=temp data_vars=mod_lead.columns.values.tolist() to_keep=[i for i in data_vars if i not in cat_vars] mod_lead=mod_lead[to_keep] # print(mod_lead.columns.values) # re-index to have same columns as the model mod_lead = mod_lead.reindex(columns = ['ficox', 'lti', 'bcc0300', 'ndi', 'ndisq', 'co_app_0', 'co_app_1', 'loan_use_1', 'loan_use_2', 'loan_use_3', 'loan_use_4', 'employment_1', 'employment_2', 'employment_3', 'employment_4', 'loan_amount_1', 'loan_amount_2', 'loan_amount_3', 'loan_amount_4', 'mkt_chan_1', 'mkt_chan_2', 'mkt_chan_3', 'mkt_chan_5', 'mkt_chan_6', 'mkt_chan_7'], fill_value=0) # score #prob_prediction = sd.fplus_ls_model.predict_proba(mod_lead)[0][1] I'm changing this to remove sd and just loading scoring model and data already prob_prediction = fplus_ls_model.predict_proba(mod_lead)[0][1] #print(prob_prediction) # calc NPV npv = get_npv_calc(lead['final_loan_amount'], prob_prediction, lead['utm_source']) except Exception as e: # print(e) error = True error_msg = 'Error in Scoring.' # add an error flag if (debug): return_scores = { 'bin_lead': mod_lead, 'fuse.score':prob_prediction, 'fuse.npv': npv, 'fuse.model': MODEL_NAME, 'fuse.error_flag': error, 'fuse.error_reason': error_msg } return return_scores # return the dictionary return_scores = { 'fuse.score':prob_prediction, 'fuse.npv': npv, 'fuse.model': MODEL_NAME, 'fuse.error_flag': error, 'fuse.error_reason': error_msg } return return_scores def get_npv_calc(loan_amt,prob_prediction,utm_source): if pd.notnull(utm_source): utm_src = utm_source.strip() utm_src = utm_src.lower() npv = 0 cpa = 0 #if pd.notnull(utm_source) and (utm_src in set(sd.unitEconomicsData['utm_source'])): commenting this out because of the sd and loading it locally # cpa = sd.unitEconomicsData.loc[sd.unitEconomicsData['utm_source'] == utm_src, 'CPA'].values[0] if pd.notnull(utm_source) and (utm_src in set(unitEconomicsData['utm_source'])): cpa = unitEconomicsData.loc[unitEconomicsData['utm_source'] == utm_src, 'CPA'].values[0] if pd.isnull(loan_amt): loan_amt = 0 else: loan_amt = float(loan_amt) npv = prob_prediction * ((loan_amt * 0.0785) - (707.9 + cpa)) return npvMain Lead Levelimport numpy as np import pandas as pd import score_collector_phase1 as sc1 def main_leadlevel(leads, phase=1, provide="scored_lead"): if phase == 1: checked_lead = check_lead_phase1(leads) if checked_lead["error"]: return_value = { "fuse.score" : 0, "fuse.npv" : 0, "fuse.model" : '', "fuse.error_flag": True, "fuse_error_reason" : "Lead Validation Failed." } return return_value error_msg = checked_lead["warning"] return_value = sc1.score_collector_phase1(checked_lead["return_lead"]) if len(error_msg.strip()) > 0: return_value['fuse.error_reason'] = error_msg + ' ' + return_value['fuse.error_reason'] return return_value # Check that the lead has all the required fields # if a value is missing, return warning message def check_lead_phase1(lead): # convert all keys to lower case raw_lead = {k.lower(): v for k, v in lead.items()} error = False return_lead = {} missing_variables = [] error_msg = '' required_fields = ["co_app_verifiable_annual_income","loan_use","employment_status", "final_loan_amount","fico","lti","xpn_bcc0300","ndi_ratio","utm_source"] try: for field in required_fields: if (field not in raw_lead): raw_lead[field] = np.NaN for key in raw_lead: var = returnNaNifEmptyorNaN(raw_lead[key]) return_lead[key] = var if (pd.isnull(return_lead[key])): missing_variables.append(key) if (len(missing_variables)>0): error_msg = ",".join(missing_variables) error_msg = "Imputing values for: " + error_msg except Exception as e: #print(e) error=True return_value = { "return_lead" : return_lead, "warning": error_msg, "error": error } return return_value def returnNaNifEmptyorNaN(variable): if isinstance(variable, int): return variable if isinstance(variable, float): return variable # if null or empty if pd.isnull(variable) or len(variable)<1: return np.NaN if variable.isnumeric(): return variable return variable.strip() lead = {"co_app_verifiable_annual_income":342,"loan_use":'Debt Consolidation', \ "employment_status":'Full-time', "final_loan_amount":25000, "fico":700,\ "lti":25.8, "xpn_bcc0300":42, "ndi_ratio":40, "utm_source":'Lending Tree'} check_lead_phase1(lead)Modelimport scoringData as sd import main_leadLevel as ml import pandas as pd import json sd.init() # Assumption : gets dictionary as a input def run(leads): score = ml.main_leadlevel(leads) #score is dictionary, score['fuse_score'] is dataframe #add scores to the lead if ('co_app_verifiable_annual_income' in leads): leads['co_app_verifiable_annual_income']=str(leads['co_app_verifiable_annual_income']) if ('fico' in leads): leads['fico']=str(leads['fico']) if ('LTI' in leads): leads['LTI']=str(leads['LTI']) if ('xpn_bcc0300' in leads): leads['xpn_bcc0300']=str(leads['xpn_bcc0300']) if ('ndi_ratio' in leads): leads['ndi_ratio']=str(leads['ndi_ratio']) if ('final_loan_amount' in leads): leads['final_loan_amount']=str(leads['final_loan_amount']) leads.update(score) return_list = [] return_list.append(leads) return return_list def format_scores_returned(score): # convert everything to a dataframe score = pd.DataFrame.from_dict(score,orient='index').transpose() # convert back to dictionary in list format score = score.to_dict('list') return score run(lead) main_leadlevel(lead) score_collector_phase1(lead) leadNew Score Load Data# load data df = pd.read_csv('full_data.csv') df.head() df.columns lead = {"co_app_verifiable_annual_income":342,"loan_use":'Debt Consolidation', \ "employment_status":'Full-time', "final_loan_amount":25000, "fico":700,\ "lti":18.8, "xpn_bcc0300":42, "ndi_ratio":40, "utm_source":'Lending Tree'}Split Final DF and First DFfinal_var = ['SF_App_Id', 'Funding_Score', 'Funding_Model_Segment', 'NPV_Score',\ 'NPV_Model_Segment', 'Funding', 'NPV_Actual', 'co_app', 'loan_use__c',\ 'employment_status__c', 'amount_of_loan_requested__c', 'fico__c', \ 'c_LTI', 'bcc0300__c', 'ndi_ratio__c', 'utm_source__c'] first_var = ['SF_App_Id', 'Funding_Score', 'Funding_Model_Segment', 'NPV_Score',\ 'NPV_Model_Segment', 'Funding', 'NPV_Actual', 'first_coapp', 'first_loan_use',\ 'employment_status__c', 'first_amount_of_loan_requested', 'first_FICO', \ 'First_LTI', 'first_bcc0300__c', 'first_ndi_ratio__c', 'utm_source__c'] final_df = df[final_var] first_df = df[first_var] print(final_df.head()) print(first_df.head())SF_App_Id Funding_Score ... ndi_ratio__c utm_source__c 0 a010f00000Vy85MAAR 9.801852e-08 ... NaN NaN 1 a010f00000WN27oAAD 6.886856e-01 ... 62.3 115 2 a010f00000Tq9b3AAB 8.452948e-02 ... 29.0 115 3 a010f00000WSBY6AAP 5.279838e-01 ... 44.6 115 4 a010f00000WsXcoAAF 4.749825e-01 ... 28.1 115 [5 rows x 16 columns] SF_App_Id Funding_Score ... first_ndi_ratio__c utm_source__c 0 a010f00000Vy85MAAR 9.801852e-08 ... 0.441415 NaN 1 a010f00000WN27oAAD 6.886856e-01 ... 0.457451 115 2 a010f00000Tq9b3AAB 8.452948e-02 ... 0.290281 115 3 a010f00000WSBY6AAP 5.279838e-01 ... 0.446276 115 4 a010f00000WsXcoAAF 4.749825e-01 ... 0.281249 115 [5 rows x 16 columns]Transform Variables **These Metrics need to be multiplied by 100**c_LTIFirst_LTIfirst_ndi_ratio__c Initial Transformfinal_df['c_LTI'] = final_df['c_LTI']*100 first_df['First_LTI'] = first_df['First_LTI']*100 first_df['first_ndi_ratio__c'] = first_df['first_ndi_ratio__c']*100 first_df.describe() final_df.columnsApply Model Transformations# Transform variables final_df['co_app'] = final_df['co_app'].apply(get_co_app_cat) final_df['loan_use'] = final_df['loan_use__c'].apply(get_loan_use_cat) final_df['employment'] = final_df['employment_status__c'].apply(get_employment_cat) final_df['loan_amount'] = final_df['amount_of_loan_requested__c'].apply(get_loan_amount_cat) final_df['mkt_chan'] = final_df['utm_source__c'].apply(get_mkt_chan_cat) final_df['ficox'] = final_df['fico__c'].apply(get_fico) final_df['lti'] = final_df['c_LTI'].apply(get_lti) final_df['bcc0300'] = final_df['bcc0300__c'].apply(get_bcc0300) final_df['ndi'] = final_df['ndi_ratio__c'].apply(get_ndi_ratio) final_df['ndisq'] = final_df['ndi'] * final_df['ndi'] # Transform variables first_df['co_app'] = first_df['first_coapp'].apply(get_co_app_cat) first_df['loan_use'] = first_df['first_loan_use'].apply(get_loan_use_cat) first_df['employment'] = first_df['employment_status__c'].apply(get_employment_cat) first_df['loan_amount'] = first_df['first_amount_of_loan_requested'].apply(get_loan_amount_cat) first_df['mkt_chan'] = first_df['utm_source__c'].apply(get_mkt_chan_cat) first_df['ficox'] = first_df['first_FICO'].apply(get_fico) first_df['lti'] = first_df['First_LTI'].apply(get_lti) first_df['bcc0300'] = first_df['first_bcc0300__c'].apply(get_bcc0300) first_df['ndi'] = first_df['first_ndi_ratio__c'].apply(get_ndi_ratio) first_df['ndisq'] = first_df['ndi'] * first_df['ndi']/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:1: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy """Entry point for launching an IPython kernel. /usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:2: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy /usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:3: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/p[...]Dummy Variables Final DFfinal_df.columns # create dummies cat_vars = ['co_app','loan_use','employment','loan_amount','mkt_chan'] for var in cat_vars: cat_list = pd.get_dummies(final_df[var], prefix=var) temp = final_df.join(cat_list) final_df = temp data_vars = final_df.columns.values.tolist() to_keep = [i for i in data_vars if i not in cat_vars] final_df = final_df[to_keep] final_df.columns # re-index to have same columns as the model final_df_indexed = final_df.reindex(columns = ['ficox', 'lti', 'bcc0300', 'ndi', 'ndisq', 'co_app_0', 'co_app_1', 'loan_use_1', 'loan_use_2', 'loan_use_3', 'loan_use_4', 'employment_1', 'employment_2', 'employment_3', 'employment_4', 'loan_amount_1', 'loan_amount_2', 'loan_amount_3', 'loan_amount_4', 'mkt_chan_1', 'mkt_chan_2', 'mkt_chan_3', 'mkt_chan_5', 'mkt_chan_6', 'mkt_chan_7'], fill_value=0) prob_prediction = fplus_ls_model.predict_proba(final_df_indexed)[0][1] final_df['New_Funding_Score'] = fplus_ls_model.predict_proba(final_df_indexed)[:,1] final_df['New_NPV_Score'] = final_df.apply(lambda row : get_npv_calc(row['amount_of_loan_requested__c'], row['New_Funding_Score'], row['utm_source__c']), axis = 1)/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:2: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copyFirst DFfirst_df.columns # create dummies cat_vars = ['co_app','loan_use','employment','loan_amount','mkt_chan'] for var in cat_vars: cat_list = pd.get_dummies(first_df[var], prefix=var) temp = first_df.join(cat_list) first_df = temp data_vars = first_df.columns.values.tolist() to_keep = [i for i in data_vars if i not in cat_vars] first_df = first_df[to_keep] first_df.columns # re-index to have same columns as the model first_df_indexed = first_df.reindex(columns = ['ficox', 'lti', 'bcc0300', 'ndi', 'ndisq', 'co_app_0', 'co_app_1', 'loan_use_1', 'loan_use_2', 'loan_use_3', 'loan_use_4', 'employment_1', 'employment_2', 'employment_3', 'employment_4', 'loan_amount_1', 'loan_amount_2', 'loan_amount_3', 'loan_amount_4', 'mkt_chan_1', 'mkt_chan_2', 'mkt_chan_3', 'mkt_chan_5', 'mkt_chan_6', 'mkt_chan_7'], fill_value=0)Predictfirst_df['First_Funding_Score'] = fplus_ls_model.predict_proba(first_df_indexed)[:,1] first_df['New_NPV_Score'] = first_df.apply(lambda row : get_npv_calc(row['first_amount_of_loan_requested'], row['First_Funding_Score'], row['utm_source__c']), axis = 1) first_df.to_csv('first_df.csv', index=False) final_df.to_csv('final_df.csv', index=False) ['SF_App_Id', 'Funding_Score', 'Funding_Model_Segment', 'NPV_Score',\ 'NPV_Model_Segment', 'Funding', 'NPV_Actual', 'co_app', 'loan_use__c',\ 'employment_status__c', 'amount_of_loan_requested__c', 'fico__c', \ 'c_LTI', 'bcc0300__c', 'ndi_ratio__c', 'utm_source__c']Austen Data# Using GBQ shout Out to Hughes import pandas_gbq import pydata_google_auth SCOPES = [ 'https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/drive', ] credentials = pydata_google_auth.get_user_credentials( SCOPES, auth_local_webserver=False) sql = """ SELECT id , co_app_verifiable_annual_income__c , loan_use__c , employment_status__c , amount_of_loan_requested__c , fico__c , lti__c , bcc0300__c , ndi_ratio__c , utm_source__c FROM `freedom-dw.salesforce_ffam.application__c` a WHERE createddate >= '2019-01-01' """ df1 = pandas_gbq.read_gbq(sql, project_id='ffn-dw-bigquery-prd', credentials=credentials, dialect='standard') df = df1 final_df = df final_df.head() # Transform variables final_df['co_app'] = final_df['co_app_verifiable_annual_income__c'].apply(get_co_app_cat) final_df['loan_use'] = final_df['loan_use__c'].apply(get_loan_use_cat) final_df['employment'] = final_df['employment_status__c'].apply(get_employment_cat) final_df['loan_amount'] = final_df['amount_of_loan_requested__c'].apply(get_loan_amount_cat) final_df['mkt_chan'] = final_df['utm_source__c'].apply(get_mkt_chan_cat) final_df['ficox'] = final_df['fico__c'].apply(get_fico) final_df['lti'] = final_df['lti__c'].apply(get_lti) final_df['bcc0300'] = final_df['bcc0300__c'].apply(get_bcc0300) final_df['ndi'] = final_df['ndi_ratio__c'].apply(get_ndi_ratio) final_df['ndisq'] = final_df['ndi'] * final_df['ndi'] # create dummies cat_vars = ['co_app','loan_use','employment','loan_amount','mkt_chan'] for var in cat_vars: cat_list = pd.get_dummies(final_df[var], prefix=var) temp = final_df.join(cat_list) final_df = temp data_vars = final_df.columns.values.tolist() to_keep = [i for i in data_vars if i not in cat_vars] final_df = final_df[to_keep] final_df.info() # re-index to have same columns as the model final_df_indexed = final_df.reindex(columns = ['ficox', 'lti', 'bcc0300', 'ndi', 'ndisq', 'co_app_0', 'co_app_1', 'loan_use_1', 'loan_use_2', 'loan_use_3', 'loan_use_4', 'employment_1', 'employment_2', 'employment_3', 'employment_4', 'loan_amount_1', 'loan_amount_2', 'loan_amount_3', 'loan_amount_4', 'mkt_chan_1', 'mkt_chan_2', 'mkt_chan_3', 'mkt_chan_5', 'mkt_chan_6', 'mkt_chan_7'], fill_value=0) final_df_indexed.info() prob_prediction = fplus_ls_model.predict_proba(final_df_indexed)[0][1] final_df['New_Funding_Score'] = fplus_ls_model.predict_proba(final_df_indexed)[:,1] final_df['New_NPV_Score'] = final_df.apply(lambda row : get_npv_calc(row['amount_of_loan_requested__c'], row['New_Funding_Score'], row['utm_source__c']), axis = 1) df = final_df[['id', 'amount_of_loan_requested__c', 'utm_source__c', 'New_Funding_Score', 'New_NPV_Score']] df.head() df.info() sql_1 = """ SELECT application_key FROM `ffam-data-platform.standardized_data.fplus_application` WHERE new_lead_datetime >= '2019-01-01' AND flag_eligible_lead = TRUE AND lead_type = 'Web' AND latest_prequal_decision <> 'DECLINED' """ good_leads = pandas_gbq.read_gbq(sql_1, project_id='ffn-dw-bigquery-prd', credentials=credentials, dialect='standard') good_leads.info() good_leads1 = good_leads.merge(df, left_on='application_key', right_on='id') good_leads1 = good_leads1.drop(columns=['application_key']) good_leads1.info() good_leads2 = good_leads1.drop_duplicates(subset=['id'], keep='first') good_leads2.info() Int64Index: 697536 entries, 0 to 697535 Data columns (total 5 columns): id 697536 non-null object amount_of_loan_requested__c 697536 non-null float64 utm_source__c 697536 non-null object New_Funding_Score 697536 non-null float64 New_NPV_Score 697536 non-null float64 dtypes: float64(3), object(2) memory usage: 31.9+ MBSend Back to GBQ#set destination table to insert data into destinationtable = 'Jason.Austen_NPV_Fplus' project_id='ffn-dw-bigquery-prd' #send data to GBQ (pandas_gqb.to_gbq) #https://pandas-gbq.readthedocs.io/en/latest/api.html#pandas_gbq.to_gbq #params are: dataframe, destination_table, project_id, if_exists (append), and table_schema (list of dicts, name and type) pandas_gbq.to_gbq(dataframe = good_leads1, destination_table = destinationtable, project_id = project_id, if_exists = 'append', table_schema = [{'name':'id', 'type':'STRING'}, {'name':'amount_of_loan_requested__c', 'type':'FLOAT'}, {'name':'utm_source__c', 'type':'STRING'}, {'name':'New_Funding_Score', 'type':'FLOAT'}, {'name':'New_NPV_Score', 'type':'FLOAT'}])0it [00:00, ?it/s] 1it [00:06, 6.88s/it] Shit for Aleks# load data import pandas as pd df = pd.read_csv('Retrain_Fplus.csv') df.head() df1 = df.dropna() df1.info() #set destination table to insert data into destinationtable = 'Jason.retrain_fplus' project_id='ffn-dw-bigquery-prd' #send data to GBQ (pandas_gqb.to_gbq) #https://pandas-gbq.readthedocs.io/en/latest/api.html#pandas_gbq.to_gbq #params are: dataframe, destination_table, project_id, if_exists (append), and table_schema (list of dicts, name and type) pandas_gbq.to_gbq(dataframe = df1, destination_table = destinationtable, project_id = project_id, if_exists = 'append', table_schema = [ {'name':'SF_App_Id', 'type':'STRING'}, {'name':'Funding', 'type':'INTEGER'}, {'name':'first_coapp', 'type':'INTEGER'}, {'name':'first_loan_use', 'type':'STRING'}, {'name':'employment_status__c', 'type':'STRING'}, {'name':'first_amount_of_loan_requested', 'type':'INTEGER'}, {'name':'first_FICO', 'type':'INTEGER'}, {'name':'First_LTI', 'type':'FLOAT'}, {'name':'first_bcc0300__c', 'type':'INTEGER'}, {'name':'first_ndi_ratio__c', 'type':'FLOAT'}, {'name':'utm_source__c', 'type':'INTEGER'}, {'name':'ficox', 'type':'INTEGER'}, {'name':'lti', 'type':'INTEGER'}, {'name':'bcc0300', 'type':'INTEGER'}, {'name':'ndi', 'type':'INTEGER'}, {'name':'ndisq', 'type':'INTEGER'}, {'name':'co_app_0', 'type':'INTEGER'}, {'name':'co_app_1', 'type':'INTEGER'}, {'name':'loan_use_1', 'type':'INTEGER'}, {'name':'loan_use_2', 'type':'INTEGER'}, {'name':'loan_use_3', 'type':'INTEGER'}, {'name':'loan_use_4', 'type':'INTEGER'}, {'name':'employment_1', 'type':'INTEGER'}, {'name':'employment_2', 'type':'INTEGER'}, {'name':'employment_3', 'type':'INTEGER'}, {'name':'employment_4', 'type':'INTEGER'}, {'name':'loan_amount_1', 'type':'INTEGER'}, {'name':'loan_amount_2', 'type':'INTEGER'}, {'name':'loan_amount_3', 'type':'INTEGER'}, {'name':'loan_amount_4', 'type':'INTEGER'}, {'name':'mkt_chan_1', 'type':'INTEGER'}, {'name':'mkt_chan_2', 'type':'INTEGER'}, {'name':'mkt_chan_3', 'type':'INTEGER'}, {'name':'mkt_chan_5', 'type':'INTEGER'}, {'name':'mkt_chan_6', 'type':'INTEGER'}, {'name':'mkt_chan_7', 'type':'INTEGER'}]) #set destination table to insert data into destinationtable = 'Jason.retrain' project_id='ffn-dw-bigquery-prd' #send data to GBQ (pandas_gqb.to_gbq) #https://pandas-gbq.readthedocs.io/en/latest/api.html#pandas_gbq.to_gbq #params are: dataframe, destination_table, project_id, if_exists (append), and table_schema (list of dicts, name and type) pandas_gbq.to_gbq(dataframe = df1, destination_table = destinationtable, project_id = project_id, if_exists = 'append', table_schema = [ {'name':'SF_App_Id', 'type':'STRING'}, {'name':'Funding', 'type':'INTEGER'}, {'name':'first_coapp', 'type':'INTEGER'}, {'name':'first_loan_use', 'type':'STRING'}, {'name':'employment_status__c', 'type':'STRING'}, {'name':'first_amount_of_loan_requested', 'type':'INTEGER'}, {'name':'first_FICO', 'type':'INTEGER'}, {'name':'First_LTI', 'type':'FLOAT'}, {'name':'first_bcc0300__c', 'type':'INTEGER'}, {'name':'first_ndi_ratio__c', 'type':'FLOAT'}, {'name':'utm_source__c', 'type':'INTEGER'}, {'name':'ficox', 'type':'INTEGER'}, {'name':'lti', 'type':'INTEGER'}, {'name':'bcc0300', 'type':'INTEGER'}, {'name':'ndi', 'type':'INTEGER'}, {'name':'ndisq', 'type':'INTEGER'}, {'name':'co_app_0', 'type':'INTEGER'}, {'name':'co_app_1', 'type':'INTEGER'}, {'name':'loan_use_1', 'type':'INTEGER'}, {'name':'loan_use_2', 'type':'INTEGER'}, {'name':'loan_use_3', 'type':'INTEGER'}, {'name':'loan_use_4', 'type':'INTEGER'}, {'name':'employment_1', 'type':'INTEGER'}, {'name':'employment_2', 'type':'INTEGER'}, {'name':'employment_3', 'type':'INTEGER'}, {'name':'employment_4', 'type':'INTEGER'}, {'name':'loan_amount_1', 'type':'INTEGER'}, {'name':'loan_amount_2', 'type':'INTEGER'}, {'name':'loan_amount_3', 'type':'INTEGER'}, {'name':'loan_amount_4', 'type':'INTEGER'}, {'name':'mkt_chan_1', 'type':'INTEGER'}, {'name':'mkt_chan_2', 'type':'INTEGER'}, {'name':'mkt_chan_3', 'type':'INTEGER'}, {'name':'mkt_chan_5', 'type':'INTEGER'}, {'name':'mkt_chan_6', 'type':'INTEGER'}, {'name':'mkt_chan_7', 'type':'INTEGER'}])This is an example of how to use the sinusoid_fitting code.import sinusoid_fitting reload(sinusoid_fitting) import numpy as npLoad Data (a sinusoidal signal)time, signal = np.loadtxt('../data/2015-05-21/ALL0001/F0001CH1.CSV', delimiter=',', usecols=[3, 4], unpack=True)Fit Datafit_parameters, fit_stds, covariance = sinusoid_fitting.fit_signal(time, signal)Fit Parametersprint fit_parameters fit_parameters['amplitude']Fit Uncertaintyprint fit_stds{'phase': 0.00082947952786513739, 'frequency': 3.2737319954604396, 'amplitude': 0.00019508874694828298, 'offset': 0.00013940592080727326}Save a plot into a dated output directorysinusoid_fitting.plot_signal_vs_fit(time, signal, fit_parameters, '1'.zfill(4))Display the plotfrom IPython.display import Image Image(filename='../output/2015-06-16/0001.png')Starting with a 1-indexed array of zeros and a list of operations, for each operation add a value to each the array element between two given indices, inclusive. Once all operations have been performed, return the maximum value in the array.def main(): n, m = map(int, input().split()) xs = [0] * (n + 2) for _ in range(m): a, b, k = map(int, input().split()) xs[a] += k xs[b + 1] -= k answer = 0 current = 0 for x in xs: current += x answer = max(answer, current) print(answer) if __name__ == '__main__': main() import math import os import random import re import sys # Complete the arrayManipulation function below. def arrayManipulation(n, queries): res = [0]*(n+1) for row in range(len(queries)): a = queries[row][0] b = queries[row][1] k = queries[row][2] res[a-1] += k res[b] -= k sm = 0 mx = 0 for i in range(len(res)): sm += res[i] if sm > mx: mx = sm return mx if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') nm = input().split() n = int(nm[0]) m = int(nm[1]) queries = [] for _ in range(m): queries.append(list(map(int, input().rstrip().split()))) result = arrayManipulation(n, queries) fptr.write(str(result) + '\n') fptr.close()DFT + GWBSE Energy Calculation Using CH4 Introduction This tutorial explains how to perform calculation to predict electronic excitation using the **GWBSE** method. See [the GW Compendium: A Practical Guide to Theoretical Photoemission Spectroscopy](https://doi.org/10.3389/fchem.2019.00377), for an excellent introduction to the method. Requirements* You will need to install **VOTCA** using the instructions described [here](https://github.com/votca/votca/blob/master/share/doc/INSTALL.rst)* Once the installation is completed you need to activate the VOTCA enviroment by running the `VOTCARC.bash` script that has been installed at the bin subfolder for the path that you have provided for the installation step above Interacting with the XTP command line interfaceTo run a DFT-GWBSE calculation we will use the [xtp_tools](https://www.votca.org/xtp/xtp_tools_overview.html) calculator. Run the following command to view the help message of `xtp_tools`:!xtp_tools --helpNote> * In Jupyter the `!` symbol means: *run the following command as a standard unix command*> * In Jupyter the command `%env` set an environmental variable Running a calculation with the default optionsTo run a DFT-GWBSE calculation we just need to provide the path to the file in XYZ with the molecular coordinates. Check the [dftgwbse defaults](https://www.votca.org/xtp/dftgwbse.html) for further information.!xtp_tools -c job_name=methane -t 2 -e dftgwbse > dftgwbse.logThe previous command will run the DFT-GWBSE calculation using the aforementioned defaults and the results are store in the *Current Work Directory* in a file named `methane_summary.xml`. The `-c` option is important and we will come back to it later. It allows changing options form the command line. Running a calculation using your own input fileLet create a folder to store the input `options` for XTP and use the `-p` option to print an option file, specified by `-o`, with all the options so we can modify it afterwards!mkdir -p OPTIONFILES !xtp_tools -p dftgwbse -o OPTIONFILES/dftgwbse.xmlYou should have a *XML* file with the DFTWGSE options that looks like!head -n 10 OPTIONFILES/dftgwbse.xmlSome options are labelled as `OPTIONAL`, either fill them in or delete them if you do not want that functionality We created a small options file!cat dftgwbse2.xml !xtp_tools -o dftgwbse2.xml -t 2 -e dftgwbse > dftgwbse2.logXTP will automatically compare the default values with the user-provided and overwrites the defaults with the user input. Also, If I given property does not have a default value you can provide one using the XML file described above. Partial ChargesWe can compute now the partial charges using the `CHELPG` method by default. For more information see the [partialcharges documentation](https://www.votca.org/xtp/partialcharges.html). Once again, we only need to provide the name of the system to compute, which in our case is `methane`.!xtp_tools -c job_name=methane -e partialchargesSpectrum CalculationFinally, lets compute a convolution of the singlet spectrum using a gaussian function. For doing so, we will modify the default values for the [spectrum calculator](https://www.votca.org/xtp/spectrum.html) to compute the spectrum between 9 and 25 eV, using 1000 points in that energy range. We will use the `-c` option to modify the options accordingly. Instead we could have printed out an options file using the `xtp_tools -p spectrum` command and then modify the entries accordingly and then read them in using the `-o` option.!xtp_tools -c job_name=methane lower=9 upper=25 points=1000 -e spectrumThe results are stored in the `methane_spectrum.dat` file. (Optional) Plot the spectrum We will use [matplotlib](https://matplotlib.org/), [seaborn](https://seaborn.pydata.org/) and [pandas](https://pandas.pydata.org/) libraries to plot the spectrum. You can install it using [pip](https://pip.pypa.io/en/stable/) like!pip install seaborn --user import pandas as pd import matplotlib.pyplot as plt import seaborn as sns columns = ["E(eV)", "epsGaussian","IM(eps)Gaussian", "epsLorentz", "Im(esp)Lorentz"] df = pd.read_table("methane_spectrum.dat", comment="#", sep='\s+',names=columns) sns.relplot(x="E(eV)", y="epsGaussian", ci=None, kind="line", data=df) plt.plot()Flairfrom google.colab import drive drive.mount('/content/gdrive') import os os.chdir( "/content/gdrive/MyDrive/flair" ) pip install flair from flair.data import Corpus from flair.datasets import WIKINER_ENGLISHNext, we create __wikiner_corpus__, an instance of the class __Corpus__.* WikiNwr is a NER dataset automatically generated from WikipediaRead [here](https://github.com/flairNLP/flair/blob/master/resources/docs/TUTORIAL_6_CORPUS.md) the documentation of __Corpus__.__Question 1__: explain, what the __WIKINER__ corpus is.Then, we create __tag_dictionary__ which is an __BILUO__-__NER__-encoding.# 1. get the corpus wikiner_corpus: Corpus = WIKINER_ENGLISH().downsample(0.1) print(wikiner_corpus) # 3. make the tag dictionary from the corpus tag_dictionary = Corpus.make_tag_dictionary( wikiner_corpus, tag_type='ner') print(tag_dictionary.idx2item) print(wikiner_corpus.train[73]) print(wikiner_corpus.train[73].to_tagged_string())Sentence: "His mother , Maia , had been secretly impregnated by Zeus ." [− Tokens: 12 − Token-Labels: "His mother , <,> Maia , <,> had been secretly impregnated by Zeus . <.>"] His mother , <,> Maia , <,> had been secretly impregnated by Zeus . <.>Ok, above, we loaded a corpus, a collection of texts, and with this collection the annotation of these texts.Next, we load the data, we prepared using Spacy.from flair.data_fetcher import NLPTaskDataFetcher downsample = 1.0 # 1.0 is full data, try a much smaller number like 0.01 to test run the code data_folder = os.getcwd() columns = {0: 'text', 1: 'ner'} # 1. get the corpus corpus: Corpus = NLPTaskDataFetcher.load_column_corpus(data_folder, columns, train_file='training_data.csv', test_file='test_data.csv', dev_file=None).downsample(downsample) print(corpus) # 3. make the tag dictionary from the corpus tag_dictionary = corpus.make_tag_dictionary(tag_type='ner') print(tag_dictionary.idx2item)2021-06-02 07:11:44,479 Reading data from /content/gdrive/My Drive/flair 2021-06-02 07:11:44,480 Train: /content/gdrive/My Drive/flair/training_data.csv 2021-06-02 07:11:44,483 Dev: None 2021-06-02 07:11:44,486 Test: /content/gdrive/My Drive/flair/test_data.csv__Question 2__: what is the difference between __tag_dictionary__ created in the cell above, and __tag_dictionary__ created before that.Next, we take the first sentence from the test data, and annotate this sentence using __to_tagged_string__.for sent in corpus.test: print(sent.to_tagged_string()) break Manager Mumbai , Maharashtra - Email me on Indeed : indeed.com/r/Harinath-Rudra/7c4ee202549ec8f0 ``` ASSIGNMENT METADATAassignment_id: "NlpIntro"``` Extracting and counting bigramsWrite a function that counts the occurrences of bigrams within some given text, and returns this in a dictionary. Each word is separated by a space (`' '`). ExampleWhen given an input string ```pyprint(count_bigrams('hello there i am a friendly robot saying there i am'))```the function `count_bigrams` should print the following dictionary:```py { 'a friendly': 1, 'am a': 1, 'friendly robot': 1, 'hello there': 1, 'i am': 2, 'robot saying': 1, 'saying there': 1, 'there i': 2, }"""`````` EXERCISE METADATAexercise_id: "CountBigrams"```%%solution def count_bigrams(text): """ # BEGIN PROMPT pass """ # END PROMPT # BEGIN SOLUTION words = text.split(' ') i = 0 bigrams = {} while i < len(words) - 1: current_bigram = [] for j in range(i, i + 2): current_bigram.append(words[j]) concat_bigram = ' '.join(current_bigram) if concat_bigram in bigrams: bigrams[concat_bigram] += 1 else: bigrams[concat_bigram] = 1 i += 1 return bigrams # END SOLUTION %%studenttest CountBigramsStudentTest assert len(count_bigrams('hello there')) == 1 assert count_bigrams('hello there')['hello there'] == 1 assert count_bigrams('yes or yes or')['yes or'] == 2 assert count_bigrams('yes or yes or')['or yes'] == 1 %%inlinetest CountBigramsAutograderTest try: count_bigrams except: assert False, "Did you define count_bigrams?" try: count_bigrams('hello world') except Exception as e: assert False, "count_bigrams() returned error: " + str(e) assert len(count_bigrams('hello')) == 0, 'Are you creating bigrams with less than two words?' assert 'hello there' in count_bigrams('hello there'), 'Are you joining words back together correctly?' assert count_bigrams('hello there hello there')['hello there'] == 2, 'Are you correctly counting the bigrams?' result, log = %autotest CountBigramsAutograderTest from IPython.core import display display.display(report(CountBigramsAutograderTest, results=result.results)) assert(result.results['passed']) # Uncomment and run the following line for autograding your solution: #Submit('CountBigrams')Extracting n-gramsWrite a function that counts the occurrences of n-grams within some given text, and returns this in a dictionary. These are more general than bigrams, and contain n words each. These can be useful for longer phrases. Each word is separated by a space (' '). Example```py"""Prints the following dictionary: { 'i am a': 1, 'robot saying there': 1, 'am a friendly': 1, 'hello there i': 1, 'there i am': 2, 'friendly robot saying': 1, 'saying there i': 1, 'a friendly robot': 1, }"""print(count_ngrams('hello there i am a friendly robot saying there i am'), 3)`````` EXERCISE METADATAexercise_id: "CountNgrams"```%%solution def count_ngrams(text, n): """ # BEGIN PROMPT pass """ # END PROMPT # BEGIN SOLUTION words = text.split(' ') i = 0 ngrams = {} while i < len(words) - (n - 1): current_ngram = [] for j in range(i, i + n): current_ngram.append(words[j]) concat_ngram = ' '.join(current_ngram) if concat_ngram in ngrams: ngrams[concat_ngram] += 1 else: ngrams[concat_ngram] = 1 i += 1 return ngrams # END SOLUTION %%studenttest CountNgramsStudentTest assert len(count_ngrams('hello there', 2)) == 1 assert len(count_ngrams('hello there', 3)) == 0 assert count_ngrams('yes or yes', 3)['yes or yes'] == 1 assert count_ngrams('yes or yes', 1)['yes'] == 2 %%inlinetest CountNgramsAutograderTest try: count_ngrams except: assert False, "Did you define count_ngrams?" try: count_ngrams('hello world', 3) except Exception as e: assert False, "count_ngrams() returned error: " + str(e) assert len(count_ngrams('hello hello hello', 2)) == 1, 'Are you creating ngrams correctly? Expected to see only a bigram "hello hello" with the input "hello hello hello", but got [%s]' % (",".join(count_ngrams('hello hello hello', 2).keys())) assert len(count_ngrams('hello there', 3)) == 0, 'Are you creating ngrams with fewer than n words?' assert count_ngrams('yes yes yes', 1)['yes'] == 3, 'Are you correctly counting the ngrams?' result, log = %autotest CountNgramsAutograderTest from IPython.core import display display.display(report(CountNgramsAutograderTest, results=result.results)) assert(result.results['passed']) # Use this cell to submit your exercise to autograder. Uncomment the following line: #Submit('CountNgrams')Exercise 3: [introduction]this section takes the japanese tokenizer, the n-gram counter, and the wikipdia articles, and the similarity score function, and glues it together to see how the similarity result differs when you use n-grams vs. single words#@title MeCabのインストールとインポート !pip install mecab-python3 # GLOBAL CONTEXT import MeCab # 日本語文章をトークン化する関数 def tokenize_japanese(sentence): tagger = MeCab.Tagger() text = sentence node = tagger.parseToNode(text) words = [] while(node): if node.surface != "": words.append(node.surface) node = node.next if node is None: break return words # GLOBAL CONTEXT from math import sqrt # ベクターのコサイン類似度の計算する関数 # article1とarticle2のフォマットは{word: frequency} def cosine_similarity(article1, article2): numerator_sum = 0 # 共通の単語のみの足し算を計算する for word1 in article1: for word2 in article2: if word1 == word2: numerator_sum += article1[word1]*article2[word2] # 各ベクターの全ての単語の二乗した出現回数を足し算する article1_squared_sum = 0 for word in article1: article1_squared_sum += article1[word]**2 article2_squared_sum = 0 for word in article2: article2_squared_sum += article2[word]**2 return numerator_sum / (sqrt(article1_squared_sum)*sqrt(article2_squared_sum))---#@title ウィキペディア記事「Python」をpython_article変数に読み込む python_article = """ Python Python(パイソン)は、汎用のプログラミング言語である。コードがシンプルで扱いやすく設計されており、C言語などに比べて、さまざまなプログラムを分かりやすく、少ないコード行数で書けるといった特徴がある。 文法を極力単純化してコードの可読性を高め、読みやすく、また書きやすくしてプログラマの作業性とコードの信頼性を高めることを重視してデザインされた、汎用の高水準言語である。 核となる本体部分は必要最小限に抑えられている。一方で標準ライブラリやサードパーティ製のライブラリ、関数など、さまざまな領域に特化した豊富で大規模なツール群が用意され、インターネット上から無料で入手でき、自らの使用目的に応じて機能を拡張してゆくことができる。 またPythonは多くのハードウェアとOS (プラットフォーム) に対応しており、複数のプログラミングパラダイムに対応している。Pythonはオブジェクト指向、命令型、手続き型、関数型などの形式でプログラムを書くことができる。動的型付け言語であり、参照カウントベースの自動メモリ管理(ガベージコレクタ)を持つ。 これらの特性によりPythonは広い支持を獲得し、Webアプリケーションやデスクトップアプリケーションなどの開発はもとより、システム用の記述 (script) や、各種の自動処理、理工学や統計・解析など、幅広い領域における有力なプログラム言語となった。プログラミング作業が容易で能率的であることは、ソフトウェア企業にとっては投入人員の節約、開発時間の短縮、ひいてはコスト削減に有益であることから、産業分野でも広く利用されている。Googleなど主要言語に採用している企業も多い。 Pythonのリファレンス実装であるCPythonは、フリーかつオープンソースのソフトウェアであり、コミュニティベースの開発モデルを採用している。CPythonは、非営利団体であるPythonソフトウェア財団が管理している。その他の実装としては、PyPyやIronPythonなどが有名である。 Pythonは、オランダ人のグイド・ヴァンロッサムが開発した。名前の由来は、イギリスのテレビ局 BBC が製作したコメディ番組『空飛ぶモンティ・パイソン』である。Pythonという英単語が意味する爬虫類のニシキヘビがPython言語のマスコットやアイコンとして使われている。 Pythonはインタプリタ上で実行することを前提に設計している。以下の特徴をもっている: Pythonの最初のバージョンはAmoeba上で開発された。のちに多くの計算機環境上で動作するようになった。 Pythonには複数の実装が存在する。 Python のリリースは全てオープンソースであり、PSF (Python Software Foundationライセンス)として配布されている。これはGPL互換であるが、GPLと異なり、変更したバージョンを配布する際に変更をオープンソースにしなくてもよい。 元々はAmoebaの使用言語であるABC言語に例外処理やオブジェクト指向を対応させるために作られた言語である。 1991年にヴァンロッサムがPython 0.90のソースコードを公開した。この時点ですでにオブジェクト指向言語の特徴である継承、クラス、例外処理、メソッドやさらに抽象データ型である文字列、リストの概念を利用している。これはModula-3のモジュールを参考にしていた。 1994年1月、Python 1.0を公開した。主な特徴として関数型言語の基本であるラムダ計算を実装、map関数、reduce関数などを組み込んだ。 バージョン1.4ではCommon Lispにある機能とよく似たキーワード引数を導入した。また簡易ながら名前修飾を用いたカプセル化も実装した。 2000年に公開。ガベージコレクションやUnicode、リストを導入した。一躍メジャーな言語となった。多くの機能はHaskellを参考にして導入している。 2.6以降のバージョンには、2.xから3.xへの移植を助ける「2to3 ツール」と「lib2to3 モジュール」を含んでいる。2.xのサポートは2020年までとされている。 2008年、長い試験期間を経てPython 3.0が公開された。 開発初期には、西暦3000年に公開予定の理想のPythonとして、Python 3000と呼んでいた。Py3Kと略すこともある。 しかし2.xとの後方互換性が損なわれている。当初は2.xに比べて3.xが利用できるライブラリ等が著しく少ないという問題点があったが、Djangoなど徐々に3.xに対応したフレームワークやライブラリなどが増えていったこともあり、2016年時点においては新規のプロジェクトについて3.xで開発することが多くなっている。 2015年11月にリリースされたFedora 23や2016年4月にリリースされたUbuntu 16.04 LTSでは、デフォルトでインストールされるPythonのバージョンが2.xから3.xに変更されている。 Pythonには、読みやすく、それでいて効率もよいコードをなるべく簡単に書けるようにするという思想が浸透しており、Pythonコミュニティでも単純で短いコードをよしとする傾向が強い。 Pythonの本体は、ユーザがいつも必要とする最小限の機能のみを提供する。基本機能以外の専門機能や拡張プログラムはインターネット上にライブラリとして提供されており、別途ダウンロードして保存し、必要なツールはこのツールキットからその都度呼び出して使用する。 またPythonでは、Perlの「あることを行うのに1つ以上の方法がある」という哲学とは逆に、ある動作をさせる方法は、基本的に1通りしかないように作られている。そのためPythonのプログラムは、誰が書いてもだいたいどれも同じようなコードになり、作成者以外が見ても動作を把握しやすい。また、Pythonではプログラムの文書化(ソフトウェアドキュメンテーション)が重視されており、言語の基本機能の一部となっている。 インデント(「オフサイドルール」)が特徴的である。 以下に、階乗を題材にC言語と比較した例を示す。 Pythonのコード: わかりやすく整形されたC言語のコード: この例では、Pythonと整形されたC言語とでは、プログラムコードの間に違いがほとんど見られない。しかし、C言語のインデントはルール(構文規則上のもの)ではなく、単なるコンベンション(コーディングスタイル)でしかない。そのためC言語では全く同じプログラムを以下のように書くこともできる。 わかりにくいC: Pythonではインデントは構文規則として決められているため、こうした書き方は不可能である。Pythonではこのような強制を課すことによって、プログラムのスタイルがその書き手にかかわらずほぼ統一したものになり、その結果読みやすくなるという考え方が取り入れられている。これについては賛否両論があり、批判的立場の人々からは、これはプログラマがスタイルを選ぶ自由を制限するものだ、という意見も出されている。 インデントによる整形は、単に「見かけ」だけではなく品質そのものにも関係する。例として次のコードを示す。 間違えたC: このコードはC言語の構文規則上は問題無いが、インデントによる見かけのifの範囲と、言語仕様によるifの実際の範囲とが異なっているため、プログラマの意図が曖昧になる。この曖昧さは、検知しにくいバグを生む原因になる。 ソースコードを読む際、多くの人はインデントのような空白によって明確に整列されたコードを目安として読み、コンパイラのように構文解析しながらソースを読むものではない。その結果、一見しただけでは原因を見つけられないバグを作成する危険がある。 Pythonではインデントをルールとすることにより、人間が目視するソースコードの理解とコンパイラの構文解析の間の誤差を少なくすることで、より正確に意図した通りにコーディングすることができると主張されている。 Pythonのデータは動的に型付けされる。値自身が型を持っており、変数はすべて値への参照である。 基本的なデータ型として、整数型・多倍長整数型・浮動小数点数型・複素数型・文字列型・Unicode文字列型・論理型、そして関数型がある。多倍長整数型は(メモリの許す限り)無制限の桁数で整数計算が可能である。 さらに組み込みのコンテナ型として、リスト型、タプル型、辞書型(連想配列)のほか、値の重複を許さない集合型(Python 2.3以降)がある。 Python 3.x以降では、整数型が多倍長整数型と統合され、従来の文字列型とUnicode文字列型に代わり、バイト列型と文字列型が導入された。 リスト型および辞書型は内部の値をあとから変えられる(、変更可能)が、タプル型は一度構築したら内部の値は変わらない(、変更不能)。タプル型とリスト型は、多くのプログラミング言語では配列と呼ばれるものに類似している。しかし、Pythonではタプル型は辞書のキーとして使うことができるが、リスト型は内容が変わるため辞書のキーとして使うことはできないという理由から、これら2つの型を区別している。集合型には変更可能なものと変更不能なものの2種類がある。 多くのオブジェクト指向プログラミング言語と同様、Pythonではユーザが新しく自分の型を定義することも可能である。この場合、組み込み型を含む既存の型を継承して新たな型(クラス)を定義する事も、ゼロから全く新しい型を作り出す事も出来る。 Pythonは基本的にメソッドや関数の引数に型を指定する必要がないので、内部で必要とする演算子やメソッドに対応していれば、関数やオブジェクトの設計時点で意図していなかったオブジェクトを引き渡すことも可能である(いわゆるダック・タイピングが可能)。 Pythonはガベージコレクションを内蔵しており、参照されなくなったオブジェクトは自動的にメモリから破棄される。CPythonでは、ガベージコレクションの方式として参照カウント方式とマーク・アンド・スイープ方式を併用している。マーク・アンド・スイープ方式のみに頼っている言語では、オブジェクトがいつ回収されるか保証されないので、ファイルのクローズなどをデストラクタに任せることができない。CPythonは参照カウント方式を併用することで、循環参照が発生しない限り、オブジェクトはスコープアウトした時点で必ずデストラクトされることを保証している。なおJythonおよびIronPythonではマーク・アンド・スイープ方式を採用しているため、スコープアウトした時点で必ずデストラクトされることが前提のコードだとJythonやIronPythonでは正しく動かない。 イテレータを実装するためのジェネレータが言語仕様に組み込まれており、Pythonでは多くの場面でイテレータを使うように設計されている。イテレータの使用はPython全体に普及していて、プログラミングスタイルの統一性をもたらしている。 Pythonでは扱えるデータの全てがオブジェクトである。単純な数値といった基本的なデータ型をはじめ、組み込みのコンテナ型、組み込み関数など、これらは全て統一的な継承関係をもつオブジェクトであり「型」をもっている。これらの組み込み型とユーザ定義型は区別されず、組み込み型を継承したクラスを定義できる。上の「データ型」の項で述べたように Pythonは静的な型チェックを持たないため、Javaのようなインターフェイスという言語上の仕組みは必要とされない。 クラスの継承 () メカニズムでは、複数の基底クラスを持つことができ(多重継承)、導出されたクラスでは基底クラスの任意のメソッドをオーバライド(、上書き)することが可能である。 また、オブジェクトには任意のデータを入れることができる。これらのメソッドやデータは、基本的に、すべてcodice_1であり、codice_2(仮想)である。ただし、先頭にアンダースコアをもつメンバをcodice_3とすることができる。これは単なるマナーであるが、アンダースコアを2つもつ場合は、クラスの外部からメンバの名前を隠された状態(、難号化)とすることでカプセル化を実現できる。また、利用者定義演算子が機能として用意されておりほとんどの組み込み演算子(算術演算子()や添字表記)はクラスインスタンスで使うために再定義することが可能となっている。 Pythonには「電池が付属しています()」の思想があり、プログラマがすぐに使えるようなライブラリや統合環境をあらかじめディストリビューションに含めるようにしている。このため標準ライブラリは非常に充実している。 サードパーティによるライブラリも豊富に存在する。 マイナーなものまで含めると多すぎて収拾がつかない。 Python Package Index (PyPI) と呼ぶ公式のパッケージリポジトリを導入した。 Pythonは当初1バイト単位での文字列型のみ扱い、かな漢字のようなマルチバイト文字をサポートしていなかったが、Python 2.0からUnicode文字型が新たに導入された。 Python 3.0では、文字列型がバイト列型に、Unicode文字列型が文字列型に変更された。従来は文字列の表現方法として2通り(旧文字列型と旧Unicode文字列型)があったが、これを1通り(新文字列型=旧Unicode型)に統一し、旧文字列型は単なるバイト列型として再定義された。これにより、Python 3.0で文字列を扱う際には後述の変換処理を必ず行う必要がある。ファイル入出力などエンコードを明示しなければ標準エンコードを用いて暗黙に行われる場合も多い。これにより多言語の扱いを一貫したものにしている。 Pythonでは文字のバイト列表現(エンコーディング)とUnicodeの内部表現を明確に区別している。Unicode文字はメモリ中に保持される抽象的なオブジェクトであり、画面表示やファイルへの入出力の際には変換ルーチン(コーデック)を介して特定のエンコーディングのバイト列表現と相互変換する。また、ソースコード中の文字コードを認識する機能があり、これによって異なる文字コードで書かれたプログラムの動きが異なるという危険を解消している。 Pythonでは変換ルーチンをモジュールとして追加することで、さまざまなエンコーディングに対応できるようになっている。日本語の文字コード (EUC-JP, Shift_JIS, MS932, ISO-2022-JP) に対応したコーデックも作成されている。Python 2.4からは、日中韓国語用のコーデックが標準でディストリビューションに含まれるようになったため、現在では日本語の処理に問題はほとんどなくなった。ただしGUIライブラリであるTkinterや統合開発環境のIDLEは、プラットフォームにもよるが、まだきちんと日本語に対応していないものもある。 ソースコードの文字コードは、ASCIIと互換性があり、Pythonが対応しているものを使用する。デフォルトエンコーディング以外の文字コードを使う場合は、ソースファイルの 1行目か 2行目に一定の書式でコメントとして記述することになっており、しばしば以下のようにEmacsやVimなどのテキストエディタにも認識可能な書式で記述される(次の例は Emacs が認識できる書式)。 Pythonはおもに欧米の企業でよく使われている。大企業ではマイクロソフトやアップルなどのパッケージソフトウェア企業をはじめ、Google、Yahoo!、YouTube などの企業も利用している。また携帯電話メーカーのノキアでは、S60シリーズでPythonアプリケーションが動く。研究機関では、NASAや日本の高エネルギー加速器研究機構でPythonが使われている。 適応範囲はWebプログラミング、GUIベースのアプリケーション、CAD、3Dモデリング、数式処理など幅広い分野に及ぶ。さらにスクリプト言語としての特性から、従来Perlやシェルスクリプトが用いられることの多かったシステム管理用のスクリプトとして採用しているOSも複数ある。また、多くの異なる言語で書かれたモジュールをまとめるグルー言語としての利用例も多い。実際、多くの商用アプリケーションで Python は組み込みのスクリプト言語として採用されている。 またNumPy、SciPyなどの高速な数値計算ライブラリの存在により、科学技術コンピューティングにもよく用いられる。NumPy、SciPyの内部はC言語で書かれている為に動的スクリプト言語の欠点の一つである速度の遅さを補っている。 Pythonは教育目的で設計されたわけではないが、単純さから子供が最初に学ぶ、プログラミング教育用の言語としても利用が増えている。グイド・ヴァンロッサムはPython設計以前に教育用言語であるABCの開発にかかわり、教育用としての利用について期待感を示したこともあり、方針として非技術者向けといった利用を視野に入れているとされることもある。 私の大好きなPython利用法は、騒ぎ立てずに、言語教育でプログラミングの原理を教えること。それを考えてくれ――次の世代の話だね。-- スラッシュドット・ジャパン『 へのインタビュー』 """ #@title ウィキペディア記事「C++」をcpp_article変数に読み込む cpp_article = """ C++ C++(シープラスプラス)は、汎用プログラミング言語の一つである。日本語では略してシープラプラ、シープラなどとも呼ばれる。 1983年にベル研究所のコンピュータ科学者のビャーネ・ストロヴストルップが、C言語の拡張として開発した。当時の名前は「」(クラス付きのC言語)だった。拡張はクラスの追加に始まり、仮想関数、多重定義、多重継承、テンプレート、例外処理といった機能が続いていった。 標準規格化がISOとIEC共同で行われており、現在最新のバージョンは、2017年に制定されたISO/IEC 14882:2017、通称「C++17」である。 表現力と効率性の向上のために、手続き型プログラミング・データ抽象・オブジェクト指向プログラミング・ジェネリックプログラミングの複数のプログラミングパラダイムを組み合わせている。アセンブリ言語以外の低水準言語を必要としない、使わない機能に時間的・空間的費用を要しないことが、言語設計の重要な原則となっている。また、静的な型システムを持つ。 ストロヴストルップはプログラミング言語C with Classesの開発を1979年に開始した。彼は大規模なソフトウェアの開発に有用な特徴をSimulaが備えていることに気がついたが、Simulaは実行速度が遅く実用的ではなかった。一方でBCPLは実行速度こそ速かったものの、大規模なソフトウェア開発を念頭に置いた場合にあまりにも低級だった。 これらの事情を鑑みて、ストロヴストルップは当時既に汎用的な言語だったC言語にSimulaの特徴を取り入れることを試みた。この取り組みにあたってはALGOL68 やAda、 CLU、 ML等の言語の影響も受けている。最初はクラスと派生クラス、型検査機構の強化、インライン関数、デフォルト引数の機能を、Cfrontを介してC言語に追加した。1985年10月に最初の商用リリースがなされた。 1983年にはC with ClassesからC++に名称を変更した。この際に、仮想関数と、関数と演算子の多重定義、参照型、codice_1型、ユーザー制御可能な自由領域メモリ制御、型検査機構の改良、BCPL形式(「codice_2」による)の行単位のコメントアウトなどの機能が追加された。1985年には『The C++ Programming Language』の初版が出版された(邦訳『プログラミング言語C++』1988年))。この時点では公式な標準が策定されていなかったために、この本が事実上のリファレンスとなった。1989年C++のヴァージョン2.0として、多重継承と抽象クラス、静的メンバ関数、codice_1メンバ関数、codice_4メンバ等の機能が追加されたものがリリースされた。1990年に『The Annotated C++ Reference Manual (ARM)』(邦訳『注解C++リファレンスマニュアル』)が出版され、将来の標準化の土台となるものを提供した。後に追加された機能にはテンプレートと例外処理、名前空間、新形式のキャスト、ブール型が含まれた。 ARMが事実上の標準として使われた時代が続いたが、標準化が進んだ。C++言語の最初の標準は1998年にISO/IEC 14882:1998として承認された。2003年の改訂版を経て、2011年にメジャーアップデートとして制定されたのがISO/IEC 14882:2011、通称「C++11」である。このバージョンは、元々、非公式に「C++0x」と呼ばれていた。2000年代中に制定され、正式に「C++09」と呼称されることを見越した仮称だったが、2000年代中には実現しなかった。2011年8月10日まで続いた最終国際投票で C++0x は全会一致で承認された。これにより C++0x と呼ばれてきた C++ の次期改正案はついに国際標準になり、C++11と呼べるようになった。また、2014年にはISO/IEC 14882:2014、通称「C++14」が策定された。2018年現在の最新バージョンは2017年に策定されたISO/IEC 14882:2017、通称「C++17」である。2020年にはISO/IEC 14882:2020、「C++20」が策定される予定である。 C++言語の進化に伴い、標準ライブラリもまた進化していった。C++標準ライブラリに最初に追加されたのは、従来のC言語の codice_5 や codice_6 といった関数を置き換えるストリームI/Oライブラリである。また、C++98における標準ライブラリへの追加で最も重要なものはStandard Template Library (STL)である。C++11では、正規表現による検索・置換や複数スレッドでの同時実行、ハッシュテーブル・ハッシュセットの追加などさらなる拡充が続いている。 長年にわたる作業の後、ANSIとISOの合同委員会はC++言語を1998年に標準化した(ISO/IEC 14882:1998)。1998年の標準の公式なリリースから数年間に渡って委員会は不具合の報告を続け、2003年に訂正版を出版した。2003年12月に制定された日本工業規格JIS X 3014:2003(プログラム言語 C++)は、ISO/IEC 14882:2003(E)の日本語への翻訳である。 2007年11月15日に『C++ Technical Report 1』 (TR1)というテクニカルレポートがリリースされた。これは標準の公式な一部ではないが、次のバージョンのC++に含まれると期待される、標準ライブラリへの数多くの拡張を与えた。TR1の内容は、多少の修正を加えてC++11に取り込まれている。 2011年9月1日、C++98以来初の大きな改訂となるISO/IEC 14882:2011が発行された。 2014年8月18日、ISO/IEC 14882:2014(C++14)が投票で承認され、2014年12月15日、公式に出版された。 2017年12月、ISO/IEC 14882:2017(C++17)が公式に発行された。 現在、C++20の仕様策定が進められている。 C++に対しては、今もなお要望が絶えない。特にBoostはC++の方向性の決定に大きく貢献し、さらにC++標準化委員会へ改良すべき点などを意見している。現在はマルチパラダイムプログラミングをより自然に行えるようにすることに力が注がれており、たとえばBoostでは、C++の関数型プログラミングやメタプログラミングの可能性を模索している。 C++11と呼ばれている新しいバージョンのC++標準ではこれらの一部が取り込まれ、今後のC++でもさらなる追加が行われると見られている。 この名称はの功績で、最初に使用されたのは1983年の12月である。初期の研究期間では、開発中の言語は「C with Classes」と呼ばれていた。最終名は、変数の値を一つ加算する、C言語のcodice_7(インクリメント)演算子からの派生である。また一般的な命名規則での「+」の使用は、機能強化されたコンピュータプログラムを意味する。ストロヴストルップによれば「この名前は、C言語からの変更の革新的な本質を示している」ということである。C+は、より初期の無関係なプログラミング言語の名前である。 ストロヴストルップは著書『The C++ Programming Language』の前文で名前の起源を語り、ジョージ・オーウェルの小説『1984年』の付録から「C++」が連想されるかもしれないと付け加えている。新語法という架空の言語の解説に宛てられた3つの章の中に、科学技術に関する専門用語とジャーゴンの解説に宛てられた「C vocabulary」という章がある。新語法で「ダブルプラス」は最上級の修飾語である。ゆえに新語法で「C++」は「最も極端な専門用語またはジャーゴン」という意味になるだろう。 1992年、は名前について非公式に質問されると、彼はおふざけのつもりで命名したという旨の回答をした。彼はこの言語の正式な名称になるとは夢にも思っていなかった。 ビャーネ・ストロヴストルップは著書『(1994)』でC++を設計する際に用いたルールを述べている。 C++のコンパイラがどのようにコードを出力しメモリのレイアウトを決めるのかということについては『Inside the C++ Object Model』(Lippman, 1996)に記載されている。ただしコンパイラが出力するコードの仕様はコンパイラ制作者の裁量に任されている。 1998年に施行されたANSI/ISO C++ 規格は言語仕様とライブラリの2つのパートで構成される。ライブラリ規格の大半はStandard Template Library (STL)とC言語の標準ライブラリの改良版についての内容である。標準規格以外にも様々なライブラリが数多く存在し、リンカを使用することにより、C言語、FORTRAN、Pascal、BASICのような言語を用いて作成されたライブラリを利用できる。規格外のライブラリが利用できるかどうかはコンパイラに依存する。 C++標準ライブラリはC++向けに若干の最適化が施されたC言語標準ライブラリを含んでいる。C++標準ライブラリの大部分はSTLである。 コンテナ(可変長配列やリストなど)、コンテナを配列のように扱えるようにするイテレータ、検索やソートを行うアルゴリズムといった有用なツールが提供されている。さらにcodice_8やcodice_9のような連想配列や、codice_10やcodice_11のようなソート済みコンテナも提供され、これらは全てインターフェイスに互換性がある。テンプレートを用いることにより、あらゆるコンテナ(またはイテレータで定義したシーケンス)に適用できる汎用的なアルゴリズムを記述できる。C言語と同様にライブラリの機能にはcodice_12 ディレクティブを使ってヘッダファイルを読み込むことによってアクセスする。C++には69本の標準ヘッダファイルがあるが、このうち19本については非推奨となっている。 STLは標準規格に採用される前は、ヒューレット・パッカードの(一時はシリコングラフィックスの)商用ライブラリだった。STLは標準規格の単なる一部分に過ぎず規格書にSTLという表記は見られないが、入出力ストリーム、国際化、デバッグ機能、C言語標準ライブラリ等の、STL以外の部分と区別するために、今でも多くの人がSTLという用語を使っている。 大半のC++コンパイラはSTLを含むC++標準ライブラリの実装を提供している。STLPortのようなコンパイラ非依存のSTLも存在する。様々な目的でC++標準ライブラリを独自に実装しているプロジェクトは他にもある。 C++の標準ライブラリは大きく次のように分けられる。多種多様な実行環境が存在することを考慮して、GUIに関するライブラリは標準に含まれていない。 以下に、C++で広く使われていると思われるライブラリを挙げる。 C言語に、オブジェクト指向プログラミングをはじめとする様々なプログラミングパラダイムをサポートするための改良が加えられたものといえる。ただし、他のプログラミング言語と違い、旧来のCと同様に手続き型言語としても扱えるという特徴がある。このことから、C++を"better C"というふうに呼ぶことがある。すなわち、基本的にC言語に対して上位互換性がある。初期のC++はCへのトランスレータとして実装され、C++プログラムを一旦Cプログラムに変換してからコンパイルしていた。 ただし、C++という名称が定まった当初の時期から、C言語とC++との間には厳密な互換性はない。当時、Cとの互換性について議論の末、「C++とANSI Cの間には不正当な非互換性はない」という合意が形成されることとなった。そのため、正当な非互換性を巡って多くの議論が発生した。ただし、まだANSIによるC言語の標準規格も策定途中の時期である。 その後、先祖であるC言語のANSIによる標準規格制定時には、関数のプロトタイプ宣言やcodice_1修飾など、C++の機能がC言語に取り入れられることにもなった。C99の出現により、codice_2コメントなどのC++で使われていた便利な機能が加わってCとC++の互換性が高まる一方、別々に審議し、別の時期に発行していることと、開発対象が必ずしも同じでないために利害関係者が異なることによる違いもある​。 次のような多種多様な機能を持っており、言語仕様は大変複雑である。 ここから、よりオブジェクト指向を強化し、「なんでもあり」ではない代わりに分かりやすくスマートな設計を目指した新たな言語(Java、D言語など)が作られることとなった。 C++はC言語およびそのプリプロセッサの構文をほぼ継承している。以下のサンプルはビャーネ・ストロヴストルップの書籍「The C++ Programming Language, 4th Edition」(ISBN 978-0321563842) の「2.2.1 Hello, World!」に記載されている標準C++ライブラリのストリーム機能を用いて標準出力に出力するHello worldプログラムである。 書籍でも明記されているが、codice_15関数で意図的に返り値を返さない手法が使用されている。 C++には四則演算、ビット演算、参照、比較、論理演算などの30を超える演算子がある。メンバーアクセス演算子(codice_16とcodice_17)のような一部の例外はあるが、大半の演算子はユーザー定義によるオーバーロードが可能である。オーバーロード可能な演算子が豊富に揃えられているためC++を一種のドメイン固有言語として利用できる。またオーバーロード可能な演算子はスマートポインタのような先進的な実装テクニックに欠かせないものとなっている。演算子をオーバーロードしても演算の優先順位は変化せず、また演算子のオペランドの数も変化しない。ただし指定したオペランドが無視される可能性はある。 C++には、ジェネリックプログラミングを実現する機能としてテンプレートが存在する。テンプレートにできる対象は、関数とクラスである。テンプレートは型、コンパイル時定数またはその他のテンプレートによってパラメタライズできる。テンプレートはコンパイル時にインスタンス化(実体化・具現化などとも)される。コンパイラは関数やクラスをインスタンス化するためにテンプレート仮引数を特定の値に置き換える。テンプレートはジェネリックプログラミング、テンプレートメタプログラミング、コード最適化などのために利用される強力なツールであるが、一定のコストを伴う。各テンプレートのインスタンスはテンプレート仮引数毎にテンプレートコードのコピーを生成するためコードサイズが肥大化する。コンパイル時に型の情報を削除して単一のテンプレートインスタンスを生成するランタイム型のジェネリクスを実装したJavaなどの言語とは対照的である。 テンプレートとマクロはいずれもコンパイル時に処理される言語機能であり条件に基づいたコンパイルが行われるが、テンプレートは字句の置き換えに限定されない。テンプレートはC++の構文と型を解析し、厳密な型チェックに基づいた高度なプログラムの流れの制御ができる。マクロは条件コンパイルに利用できるが、新しい型の生成、再帰的定義、型の評価などは行えないため、コンパイル前のテキストの置き換えや追加・削除といった用途に限定される。つまりマクロは事前に定義されたシンボルに基づいてコンパイルの流れを制御できるものの、テンプレートとは異なり独立して新しいシンボルを生成することはできない。テンプレートは静的な多態(下記参照)とジェネリックプログラミングのためのツールである。 C++のテンプレートはコンパイル時におけるチューリング完全なメカニズムである。これはテンプレートメタプログラムを用いて実行する前にコンピュータが計算可能なあらゆる処理を表現できることを意味している。 概略すれば、テンプレートはインスタンス化に必要な引数を明確にしなくても記述できるコンパイル時にパラメタライズされる関数またはクラスである。インスタンス化した結果は、テンプレート仮引数に指定した型に特化した形で記述されたコードと全く等価になる。これによりテンプレートは、汎用的かつおおまかに記述された関数及びクラス(テンプレート)と特定の型に特化した実装(インスタンス化されたテンプレート)の依存関係を解消し、パフォーマンスを犠牲にすることなく抽象化できる手段を提供する。 C++はC言語にオブジェクト指向プログラミングをサポートするための改良を加えたものといえる。C++のクラスには、オブジェクト指向言語で一般的な抽象化、カプセル化、継承、多態の4つの機能がある。オブジェクトは実行時に生成されるクラスの実体である。クラスは実行時に生成される様々なオブジェクトのひな形と考えることができる。 なお、C++はSmalltalkなどに見られるメッセージ転送の概念によるオブジェクト指向を採用していない。 カプセル化とは、データ構造を保証し、演算子が意図したとおりに動作し、クラスの利用者が直感的に使い方を理解できるようにするためにデータを隠蔽することである。クラスや関数はC++の基礎的なカプセル化のメカニズムである。クラスのメンバはcodice_18、codice_4、codice_20のいずれかとして宣言され明示的にカプセル化できる。publicなメンバはどの関数からでもアクセスできる。privateなメンバはクラスのメンバ関数から、またはクラスが明示的にアクセス権を与えたフレンド関数からアクセスできる。protectedなメンバはクラスのメンバおよびフレンド関数に加えてその派生クラスのメンバからもアクセスできる。 オブジェクト指向では原則としてクラスのメンバ変数にアクセスする全ての関数はクラスの中にカプセル化されなければならない。C++ではメンバ関数およびフレンド関数によりこれをサポートするが、強制はされない。プログラマはメンバ変数の一部または全体をpublicとして定義でき、型とは無関係な変数をpublicな要素として定義できる。このことからC++はオブジェクト指向だけでなく、モジュール化のような機能分割のパラダイムもサポートしているといえる。 一般的には、全てのデータをprivateまたはprotectedにして、クラスのユーザに必要最小限の関数のみをpublicとして公開することがよい習慣であると考えられている。このようにしてデータの実装の詳細を隠蔽することにより、設計者はインターフェイスを変更することなく後日実装を根本から変更できる 継承を使うと他のクラスの資産を流用できる。基底クラスからの継承はcodice_18、codice_4、codice_20のいずれかとして宣言する。このアクセス指定子により、派生クラスや全く無関係なクラスが基底クラスのpublicおよびprotectedメンバにアクセスできるかどうかを決定できる。普通はpublic継承のみがいわゆる"派生"に対応する。残りの二つの継承方法はあまり利用されない。アクセス指定子を省略した場合、構造体はpublic継承になるのに対し、クラスではprivate継承になる。基底クラスをcodice_24として宣言することもできる。これは仮想継承と呼ばれる。仮想継承は基底クラスのオブジェクトが一つだけ存在することを保証するものであり、多重継承の曖昧さの問題を避けることができる。 多重継承はC++の中でもしばしば問題になる機能である。多重継承では複数の基底クラスから一つのクラスを派生できる。これにより継承関係が複雑になる。例えば"FlyingCat"クラスは"Cat"クラスと"FlyingMammal"クラスから派生できる。JavaやC#では、基底クラスの数を一つに制限する一方で、複数のインタフェースを継承でき、これにより制約はあるものの多重継承に近い機能を実現できる。インタフェースはクラスと異なりメンバ関数を宣言できるのみであり、関数の実装やメンバ変数は定義できない。JavaとC#のインタフェースや抽象クラスはC++の抽象基底クラスと呼ばれる関数宣言のみを持つクラスに相当する。JavaやC#の継承モデルを好むプログラマは、非抽象クラスからのみクラスを派生させる方法を選択できる。この場合は抽象基底クラスのメンバ関数を必ず明示的に定義しなければならず、またこのクラスを継承することはできない。 多態(ポリモーフィズム)はよく多用されているインターフェイスの機能であり、様々な状況下でオブジェクトに異なる振る舞いをさせることができる。 C++は"静的な多態"と"動的な多態"の両方をサポートする。コンパイル時に解決される静的な多態は実行時には考慮されないのに対し、ランタイム時に解決される動的な多態はパフォーマンス的に不利である。 関数のオーバーロードは名称が同じ複数の関数を宣言できる機能である。ただし引数は異なっていなければならない。関数は引数の型や数で区別される。同名の関数はコードの文脈によってどの関数が呼ばれるのかが決まる。関数の戻り値の型で区別することはできない。 関数を宣言する際にプログラマはデフォルト引数を指定できる。関数を呼び出すときに引数を省略した場合はデフォルト引数が適用される。関数を呼び出すときに宣言よりも引数の数が少ない場合は、左から右の順で引数の型が比較され、後半部分にデフォルト引数が適用される。たいていの場合は一つの関数にデフォルト引数を指定するよりも引数の数が異なる関数をオーバーロードする方が望ましい。 C++のテンプレートではより洗練された汎用的な多態を実現できる。特ににより仮想関数のオーバーライドをシミュレートした静的な多態を実装できる。C++のテンプレートは型安全かつ チューリング完全であるため、 テンプレートメタプログラミングによりコンパイラに条件文を再帰的に解決させて実行コードを生成させることにも利用できる。 基底クラスへのポインタ変数及び参照は、正確に型が一致するオブジェクトだけでなく、その派生クラスのオブジェクトを示すことができる。これにより配列やコンテナは複数の型へのポインタを保持できる。ポインタ変数は実行時に値が割り当てられるためこれは実行時の話である。 codice_25は基底オブジェクトから派生オブジェクトへの変換を安全に行うための演算子である(派生オブジェクトから基底オブジェクトへの変換ではキャストは必要ない)。この機能は実行時型情報 (RTTI)に依存している。オブジェクトが特定の派生オブジェクトであることがあらかじめわかっている場合はcodice_26でキャストすることもできる。codice_26は純粋にコンパイル時に解決されるため動作が速くRTTIを必要としない。 基底クラスの関数を派生クラスでオーバーライドした場合、実際に呼び出される関数はオブジェクトの型によって決定される。派生クラスによってオーバーライドでされるのは引数の数や型が同じ関数である。基底クラスのポインタのみが与えられた場合、コンパイラはオブジェクトの型をコンパイル時に特定できず正しい関数を呼び出せないため、実行時にこれを特定する。これをダイナミックディスパッチと呼ぶ。仮想関数や"メソッド"により、オブジェクトに割り当てられた実際の型に従って、最上位の派生クラスで実装した関数が呼び出される。一般的なC++コンパイラは仮想関数テーブルを用いる。オブジェクトの型が判明している場合はスコープ解決演算子を利用して仮想関数テーブルを使わないようにバイパスすることもできるが、一般的には実行時に仮想関数の呼び出しを解決するのが普通である。 標準のメンバ関数に加え、オーバーロードした演算子やデストラクタも仮想関数にできる。原則的にはクラスが仮想関数を持つ場合はデストラクタも仮想関数にすべきである。コンストラクタやその延長線上にあるコピーコンストラクタはコンパイルされた時点でオブジェクトの型が確定しないため仮想関数にできない。しかし、派生オブジェクトへのポインタが基底オブジェクトへのポインタとして渡された場合に、そのオブジェクトのコピーを作らなければならない場合は問題が生じる。このような場合はcodice_28関数(またはそれに準じる物)を仮想関数として作成するのが一般的な解決方法である。codice_28は派生クラスのコピーを生成して返す。 codice_30を関数宣言の閉じ括弧とセミコロンの間に挿入することによりメンバ関数を"純粋仮想関数"にできる。純粋仮想関数を持つクラスは純粋仮想クラスと呼ばれ、このクラスからオブジェクトを生成することはできない。このような純粋仮想クラスは基底クラスとしてのみ利用できる。派生クラスは純粋仮称関数を継承するため、派生クラスのオブジェクトを生成したい場合は全ての純粋仮想関数をオーバーライドして実装しなければならない。純粋仮想関数を持つクラスのオブジェクトを生成しようと試みるようなプログラムは行儀が悪い。 型消去と呼ばれるテンプレートを活用して動的な多態性を実現する手法が存在する。この手法はC++の標準ライブラリでもcodice_31やcodice_32の削除子で採用されている。いずれも、コンストラクタや代入演算子で(一定の条件を満たす)任意のオブジェクトを実引数として渡せるようにすることから多態性を実現している。 C99の制定前、C言語とC++との分かりやすい差異として、codice_2 で始まり改行で終わる、単一行コメントの有無があった。 単一行コメントはもともと、C言語の祖先にあたるBCPLに含まれていた仕様である。現在のC++のコンパイラの多くがC言語のコンパイラとしても使えるようになっているのと同様に、C言語が生まれて間もない頃は、C言語に加えB言語やBCPLのコンパイルができるコンパイラが用いられていた。それらコンパイラは、C言語のソースであってもBCPLと同様に単一行コメントが使用できるよう独自の拡張がなされていたため、 現在ではC99の制定によって正式に単一行コメントがサポートされるようになった。 LALR(1)のような旧式のパースアルゴリズムを用いてC++のパーサを記述することは比較的難しい。その理由の一つはC++の文法がLALRではないことである。このため、コード分析ツールや、高度な修正を行うツール(リファクタリングツールなど)は非常に少ない。この問題を取り扱う方法としてLALR(1)でパースできるように改良されたC++の亜種()を利用する方法がある。GLRパーサのようにより強力でシンプルなパーサもあるが処理が遅い。 パースはC++を処理するツールを作成する際の最も難しい問題ではない。このようなツールはコンパイラと同じように識別子の意味を理解しなければならない。従ってC++を処理する実用的なシステムはソースコードをパースするだけでなく、各識別子の定義を正確に適用し(つまりC++の複雑なスコープのルールを正確に取り扱い)、型を正しく特定できなければならない。 いずれにせよC++ソースコード処理ツールが実用的であるためには、GNU GCCやVisual C++で使われているような、様々なC++の方言を取り扱えなければならず、適切な分析処理やソース変換やソース出力などが実装できなければならない。GLRのような先進的なパースアルゴリズムとシンボルテーブルを組み合わせてソースコードを変換する方法を利用すればあらゆるC++ツールを開発できる。 その言語文法の複雑さゆえ、C++規格に準拠したコンパイラを開発するのは一般的に難しい。20世紀末から何年にも渡りC++に部分的に準拠した様々なコンパイラが作られ、テンプレートの部分特殊化などの部分で実装にばらつきがあった。中でも、テンプレートの宣言と実装を分離できるようにするためのcodice_34は問題のキーワードの一つだった。exportを定義したC++98規格がリリースされてから5年後の2003年前半にが初めてexportを実装した。2004年にBorland C++ Builder Xがexportを実装した。これらのコンパイラはいずれもEDGのフロントエンドをベースにしていた。大半のコンパイラで実装されていないexportは多くのC++関連書籍(例えば""Beginning ANSI C++"", Ivor Horton著)にサンプルが記されているが、exportが記載されていることによる問題は特に指摘されていない。GCCをはじめとするその他のコンパイラでは全くサポートしていない。はC++の標準規格からexportを削除することを推奨していたが、C++98では最終的にこれを残す決定がなされた。結局、C++11では実装の少なさ・困難さを理由に削除された。 コンパイラ開発者の裁量で決められる範囲を確保するため、C++標準化委員会は名前修飾や例外処理などの実装に依存する機能の実装方法を決定しないことに決めた。この決定の問題は、コンパイラが異なるとオブジェクトファイルの互換性が保証されない点である。特定の機種やOSでコンパイラの互換性を持たせ、バイナリレベルでのコード再利用性を高めようとするABIのような非標準の規格もあり、一部のコンパイラではこうした準規格を採用している。 2015年現在のメジャーなC++コンパイラ(gcc,Clang,Intel C++ Compiler,Microsoft Visual C++など)の最新版はC++11規格にほぼ準拠しており、特にClangは2013年4月時点で全機能を実装完了した 。ただしマイナーアップデートとなるC++14を含めると、処理系間でのばらつきは依然として存在する。 C++は基本的にC言語の上位互換であるが、厳密には異なる。C言語で記述された大半のプログラムはC++でコンパイルできるように簡単に修正できるが、C言語では正当でもC++では不正になる部分や、C++とは動作が異なる部分が若干存在する。 例えば、C言語では汎用ポインタcodice_35は他の型へのポインタに暗黙的に変換できるが、C++ではキャスト演算子によって変換を明示する必要がある。またC++ではcodice_36やcodice_37といった数多くの新しいキーワードが追加されたが、移植の際に元のC言語のプログラムでそれらが識別子(例えば変数名)として使われていると、問題になる。 C言語の標準規格であるC99やその後継C11ではこうした非互換性の一部が解決されており、codice_2形式のコメントや宣言とコードの混在といったC++の機能がC言語でサポートされている。その一方でC99では、可変長配列、複素数型の組み込み変数、指示初期化子、複合リテラルといった、C++でサポートしていない数多くの新機能が追加された。C99で追加された新機能の一部はC++11に反映され、次期C++1yに対してもC99やC11との互換性を向上される提案が行われている。また、可変長配列や複素数型などのC99に追加された機能の一部はC11でオプションとなった。 C++で書かれた関数をC言語で書かれたプログラムから呼び出す、あるいはその逆を行なう場合など、C言語のコードとC++のコードを混在させるためにはCリンケージを利用する必要があり、関数をcodice_39で個別に修飾するか、codice_40のブロックの中で宣言しなければならない。また、関数引数や戻り値などのインターフェイスはC言語互換形式に合わせる必要がある。Cリンケージを利用した関数については、C++名前修飾がされず、名前修飾に依存している関数オーバーロード機能は利用できない。 C/C++の相互運用性が確保されていることで、慣れ親しんだC言語標準ライブラリ関数の大半をC++でもそのまま利用し続けることができるということはC++の大きなメリットのひとつである。 """ #@title ウィキペディア記事「Java」をjava_article変数に読み込む java_article = """ Java Java(ジャバ)は、狭義ではプログラミング言語Javaを指す。広義では言語仕様以外にも、仕様が与えられているJavaクラスライブラリやJava仮想マシン、さらにはJDKやJREなどの公式のものをはじめとする、場合によってはサードパーティのものなどを含め曖昧にJavaプラットフォームと総称されるようなものなどのエコシステムなどを指すこともある。構文についてはJavaの文法の記事を参照。 Javaは当初、それ以前のさまざまな言語の良い部分を引き継ぎ、また言語仕様や構文などの複雑さを排除するよう設計された。次のような特徴を持つ。 Javaは組み込みシステムや携帯機器(携帯電話・PHSやPDA・スマートフォン等)のシステムから、企業の情報システムを担う大規模なデータベース、サーバ、スーパーコンピュータまで、多くの分野で使用されている。 プログラミング言語JavaおよびJavaプラットフォームは、1990年代前半当時、サン・マイクロシステムズに居たジェームズ・ゴスリン、ビル・ジョイなどの人々によって設計・開発された。2018年現在はサンを買収したOracleによる管理の他、追加提案などはサン時代から続いているJava Community Process (JCP) というプロセスによって進められる。 Javaに関わる呼称とその意味内容は、文脈に応じていくつか使い分けられている。サン・マイクロシステムズは、「Javaテクノロジ」(Java技術、Java technology)という呼称を使い、一方でJavaのさまざまな技術の形容詞として「Java」の呼称を使ってきた。多くのプログラマは、プログラミング言語の意味で「Java」の呼称を使っている。Javaの実行環境は、Java実行環境(Java Runtime Environment;JRE)と呼ばれる。Java の基本的な開発環境は、Java開発キット(Java Development Kit;JDK)と呼ばれる。 Javaはクラスベースのオブジェクト指向プログラミング言語である(#オブジェクト指向プログラミング)。Java のプログラムは複数のクラスから構成され、プログラムの実行は、各クラスが実体化したオブジェクト群が相互にメッセージをやりとりしながら行われる。Javaでは、継承については実装の単一継承を採用している。ただし1つのクラス(オブジェクト)は複数のインタフェースを実装できる。Java で扱うデータ/オブジェクトの型(データ型)は、強い静的型付けを採用している。Javaのコンパイラおよび実行環境が、型同士の整合性を検査することによって、プログラムが正しく記述されていることや、安全に動作するか検証が可能である。 Javaは例外処理機構を備えており、プログラム実行中に生じた異常(例外)の扱いを、比較的安全な方法で行い、プログラムを読みやすく記述できる。 Javaでは簡潔なメモリモデルを採用しており、プログラマがメモリ(主記憶装置)を管理する負担を軽減する。あらゆるオブジェクトはメモリ内のヒープという領域に割り当てられる。メモリ管理は、Java仮想マシンに統合されたガベージコレクションの機能によって行われる。従来のオブジェクト指向プログラミング言語であるC++では、ヒープ領域に生成したオブジェクトについて、もはや必要が無くなった時に破棄する指示を、プログラマが自分で責任をもって行わなければならなかった。これは、C++プログラマにとっては負担が大きく複雑で間違えやすい作業であり、ソフトウェアの安全性・開発効率・保守性を損なう要因だった。Javaではガベージコレクションの機能があるため、このようなことは無く、プログラマの負担は大きく軽減される。 Javaでは、C/C++のような、整数とポインタの相互変換、配列の要素へのポインタによるアクセス、ポインタ演算といった機能は、基本機能としては提供されていない。ただし、オブジェクトへの参照は内部的にはアドレスである。 Javaではプラットフォーム非依存を目標の一つとし、またバージョン間の互換性に注意して開発が進められている。プラットフォームに依存しない(Javaプラットフォーム自体を除く)アプリケーションソフトウェアの開発と配備を行うことができると主張される。従来のプログラミング言語の多くはプラットフォーム(CPU)に依存したネイティブなコードにコンパイルすることを前提として設計されていたが、Javaはこうした言語と異なり、中間言語(バイトコード)にコンパイルされ、Java仮想マシンで実行されるよう設計された(pコードマシンなど、過去にもあったものだが、)。性能向上のため、Java仮想マシンの成熟した実装では多くの場合、ジャストインタイムコンパイル方式が使われる。 プラットフォーム非依存とバージョン間の互換性の目標は、完全に達成できたわけではなく課題が残っている。 Javaではスレッドを言語仕様で規定しており、マルチスレッドによる並行計算を、従来の言語と比べて簡単に実装できる。なお並行計算は、複数の処理を同時に実行する処理形態である。またスレッドは、プロセスより小さく軽量な処理の単位である。 Javaでは充実したライブラリにより、コンピュータネットワークを活用するソフトウェアを、効率良く開発できる。Javaはその初期のバージョンから、TCP/IP のライブラリを備えていた。分散オブジェクト環境 (Java RMI, CORBA) のソフトウェアの開発も早い時期からできるようになっていた。では、さまざまなネットワークプロトコルの高水準なライブラリが使えるようになっている。W3Cにより標準化された、汎用マークアップ言語のひとつであるXMLで記述された文書を扱うライブラリも早期に実装・標準サポートされた。では、XMLプロセサとXSLTプロセサがJava標準ライブラリに統合され提供されている。充実したネットワーク機能とXML文書を扱う機能を有効に組み合わせることにより、Javaは標準機能だけでも高度なシステムを構築できる言語の一つとなっている。 Javaはセキュリティを考慮して設計されており、サンドボックスモデルに基づいたセキュリティ機構を備えている。セキュリティ機構を正しく実装したJava実行環境を適切に使うことで、遠隔のコンピュータ上にある実行コードを安全に実行できる(Javaアプレット)。 また、名前空間の機構をもつ言語であり、ライブラリおよびアプリケーションに含まれる多数の Java のプログラム(クラスとインタフェース)は、パッケージという階層構造で管理できる。 Javaに対する批判も少なくない。いくつかの批判に対しては、サン(後にはオラクル)やJCPに参加する人々の努力により、Javaの改良が行われている。一方で、。 JavaScript(ECMAScript)は、Javaにちなんで命名されたスクリプト言語である。とはいえ基本構文の一部がC言語系すなわちJavaに似ているだけであり、またクラスベースではなくプロトタイプベースのオブジェクト指向言語であるため、言語仕様的な共通点は少ない。 表記は"J"のみが大文字の「Java」が正しい。「JAVA」は正式な表記ではない。 この節では次の構成でJavaの歴史と近況を説明する。 Javaプラットフォームおよびプログラミング言語Javaは、1990年12月にサン・マイクロシステムズが1つの内部プロジェクトを立ち上げたことから始まった。この内部プロジェクトでは、C++ / Cの代替となるプログラミング言語を開発した。この言語は、プロジェクトで Greenオペレーティングシステム (Green OS) と共に、同OSの標準言語として開発された。この言語は、1992年頃プロジェクト内ではOakと呼ばれていたが、後にJavaの呼称に変更されることになる。呼称変更の理由は、Oakはすでに別の会社が商標として使っていたからである。 1990年頃、サンのエンジニア、パトリック・ノートンは、自社のプログラミング言語C++とCのアプリケーションプログラミングインタフェース(API)と開発ツールに不満を募らせていた。その頃、NeXTが注目を浴びていたことがきっかけとなって、ノートンはサンでプログラミング環境の開発の仕事をすることになった。NeXTワークステーションと、その環境であるNEXTSTEPでは、主力の言語としてObjective-Cが開発されていた(余談になるが、その「直系の子孫」に当たるのは、macOSおよびiOSと、Swiftである)。こうした経緯のなかで「ステルスプロジェクト」が始まった。 ステルスプロジェクトには、始まってすぐにジェームズ・ゴスリンとマイク・シェルダンが参加し、プロジェクトの名称は「グリーンプロジェクト」に変更された。プロジェクトには他のエンジニアたちも参加し、彼らはアメリカ合衆国カリフォルニア州メンローパーク市サンドヒルロードの道沿いにある小さなオフィスで作業を始めた。プロジェクトの目的は、次世代の家電製品のための新しいプログラミング言語を設計し、その処理系を開発することだった。サンはこの分野が重要な市場になると予測していた。 プロジェクトチームでは当初はC++を検討していたが、いくつかの理由から却下された。理由は、当時の彼らの目的が、家電製品すなわち組み込みシステムだったからである。組み込みシステムでは、利用できるコンピュータ資源が少ないという制約がある。彼らはC++ではコンピュータ資源を食いすぎると判断した。またC++は複雑なプログラミング言語であり、C++を使うプログラマは注意していても間違いを犯しがちである。 C++にはガベージコレクションの機能が無い。ガベージコレクションが無いということは、プログラマが自分でオブジェクトの寿命(生存期間)を管理しなければならないことを意味する。プログラマが自分でオブジェクトの寿命を管理することは、冒険的で間違いやすい作業である。 プロジェクトチームは、いくつかの重要な機能についてC++の移植性が乏しいことも問題であると考えた。 このプロジェクトでの重要な機能とは、セキュリティおよび分散コンピューティング、マルチスレッドであり、これらの機能が、プラットフォームに依存せずに使える必要があった。このような事情で、彼らはあらゆる機器に容易に移植できるプラットフォームが必要であると認識するようになった。 一方で、サンの別のエンジニア、ビル・ジョイは、ゼロックスのパロアルト研究所でAltoというワークステーション試作機のために開発されたプログラミング言語・MesaとCの良いとこどりをした新しいプログラミング言語を構想していた。ジョイは "Further" という名前で呼ばれる論文を書き、自社でC++に基づいたオブジェクト指向環境を開発するべきであることを進言した。まずジェームズ・ゴスリンがC++を改変し拡張することを試みた。ゴスリンはこの拡張版C++を、"C++ ++ --"と名付けた。しかしゴスリンは、すぐにこの拡張版C++の開発を中止して、全く新しいプログラミング言語を開発する方針を採ることにした。ゴスリンはこの新しい言語にOakという名前をつけた。この名前の由来は、ゴスリンのオフィスのすぐそばにオークの木が立っていたことによる。 プロジェクトチームは残業までして作業を続け、1992年の夏までに新しいプラットフォームを、Green OS、Oak言語、ライブラリ、ハードウェアによって部分的なデモンストレーションができるようになった。1992年9月3日の最初のデモンストレーションでは、チームは Star7という携帯情報端末機器を開発することに力点をおいていた。この機器の名称の由来は、電話機能が "*7" とボタンを押すことで有効になることによる。 この機器は、グラフィカルなインタフェースを備え、"Duke" という名前の知的な仮想代理人が利用者を支援した。同年11月、サンはグリーンプロジェクトを分離して完全子会社のFirstPerson, Incを設立した。それにともないチームはパロアルトに引っ越した。FirstPersonチームは、高度にインタラクティブな機器に関心を持っていた。そのおりタイム・ワーナーがケーブルテレビのセットトップボックスのRFP (Request For Proposal) を公表していた。そこでFirstPersonチームは自分たちの目標を変更し、タイム・ワーナーの RFP に応じてセットトップボックスの提案を提出した。しかし、FirstPersonは入札でシリコングラフィックス(SGI)に負けた。その後に3DO社のセットトップボックスの案件もあったが、契約には至らなかった。FirstPersonはテレビ業界では利益を出すことができず、サンはFirstPersonを解散してチームを自社に戻した。 1994年の6月から7月にかけて、ジョン・ゲージと、ジェームズ・ゴスリン、ビル・ジョイ、パトリック・ノートン、ウェイン・ロジン、エリック・シュミットの間で、3日間かけてブレインストーミングを行い、プロジェクトチームはウェブの世界に主眼を置くという方針変更を行う。彼らは、革新的なウェブブラウザであるNCSA Mosaicの出現を目の当たりにし、ウェブを含むインターネットの世界は、ケーブルテレビの世界に劣らず、高度にインタラクティブな媒体に発展しつつあると認識するようになった。Oakを使ったプロトタイプとして、ノートンはWebRunnerという小さなウェブブラウザを開発。このウェブブラウザの名称は後に HotJava と変更される。ウェブページにJavaアプレットという小さなJavaプログラムを埋め込んでおいて、ウェブブラウザHotJavaでそのページにアクセスすると、HotJava上でアニメーションの表示やマウスによるインタラクティブな操作ができた。 同年、チームはOakの名称をJavaに変更する。変更の理由は、商標を調べて、"Oak" という名前がすでにビデオカードアダプタの製造会社 (Oak Technology) によって使われていたことが判明したからである。Javaという名称は、一部のチームメンバーがよく出入りしていた近くのコーヒーショップで命名されたという。 この名称が、何かの頭字語であるかどうかについては、よく分かっていない。 1994年10月に、HotJavaとJavaプラットフォームが、サン・マイクロシステムズの幹部社員の前でデモンストレーションされた。そして1994年内に Java 1.0a(アルファ版)がダウンロードできるようになる。 JavaとHotJavaが最初に公的な場で公表されたのは、1995年5月23日のSunWorldカンファレンスだった。サンは、ウェブブラウザHotJava中で、Javaアプレットにより、ウェブページ内でアニメーションの表示やマウスによるインタラクティブな操作が可能であることをアピールした。カンファレンスでアナウンスを行ったのは、サンの技術部長ジョン・ゲージである。このカンファレンスではまた、ゲージのアナウンスに関連する、当時のネットスケープコミュニケーションズの上級副社長マーク・アンドリーセンによるアナウンスが人々を驚かせた。それは、ネットスケープが自社のウェブブラウザであるNetscape NavigatorにJavaの実行機能を追加する予定だというものだった。このアナウンスにより業界の耳目を集める話題となった。 1995年秋にはJava 1.0のベータ版が公開された。1996年1月9日にサンは、JavaSoft部門を立ち上げた。その2週間後に、最初の正式バージョンであるJava 1.0がリリースされた。 Javaの最初のバージョンが公開されてから現在までの動向を、いくつかの側面から述べる。なお、Javaの開発元であるサン・マイクロシステムズはこの間の2010年1月にオラクルにより買収されており、Javaに関する権利も同社に移転している。 Javaアプレットは、WWWブラウザで動作するJavaプログラムであり、クライアントサイドのウェブアプリケーションの実装方法のひとつとして広く使われている。いくつかの有力な競合が存在する。競合技術の代表としてMicrosoft ActiveXおよびAdobe Flashが挙げられるが、これらはいずれも衰退している。 なお、Javaの最初の普及期であった20世紀末の頃には圧倒的なシェアを持っていた、Microsoft Windows 95上でのInternet Explorerが、Javaアプレットを使用したページを表示しようとする際に、VMの起動のために、数十秒〜数分間操作を受け付けなくなったことが(なお、起動してしまえば実際には高性能だったのだが)、「Javaは重い」という風評の根源である。その後は、携帯端末等を含めれば、Windowsのシェアが圧倒的という状況が順調に消滅したため、IEのシェアが圧倒的ということも無くなり、一方でそのような風評のせいで、Javaの利用先としてサーバサイドが注力されたこともあり、遅いなどと言われることもほとんどなくなった。 簡単でインタラクティブなアニメーション用には、JavaアプレットよりもGIF89aやAdobe Flashを採用する事例が多い。この分野においては、ではAjaxも普及しつつある。Ajaxアプリケーションの作成に欠かせないJavaScriptの開発では、Java開発で一般的に用いられているほどドキュメントや技術が成熟した標準ライブラリ、サードパーティーライブラリ、IDE、単体テストツールなどの開発環境がないが、Java開発環境を利用してJavaScriptによるAjaxウェブアプリケーションを開発するツールとしてGoogle Web Toolkitを用いることができる。GWTコンパイラはJavaソースコードをバイトコードの代わりにJavaScriptにコンパイルし、ブラウザのJavaScript解釈エンジンをあたかもJVMのように活用することを可能にする。これによりJavaを用いてブラウザ上で動作するデスクトップアプリケーションと遜色ないウェブアプリケーションを作成することが可能となっている。HTML5によって導入されるデータベースのWeb Storage、ファイルAPI、クライアントハードウェアの位置情報を得るジオロケーション、JavaScriptをマルチスレッドで起動するWeb workerなどのクライアント側技術はJavaScriptによる呼び出しを前提としている。GWTやサードパーティのGWTライブラリはHTML5APIのJavaラッパーを提供しており、開発者は複雑なクライアント側プログラムをJavaのIDEでデバッグ、テストしながら開発し、最適化されたJavaScriptにコンパイルして実行させることができる。2011年Adobe社は携帯向けのFlash開発を断念し、HTML5にクライアント側技術の焦点を変更した。携帯機器を含めると2012年現在ではFlashよりもJavaScriptが普及してはいるが、Flashほど充実した開発環境やライブラリはない。アプレットはFlashよりも普及していない。GWTはJavaScriptの普及度とJavaの充実した開発環境の両方を用いることができるため、Java経験者のリッチクライアント作成ツールとしてアプレットに取って代わる存在となりうる。 以上のように、ネットワーク越しにダウンロードしたアプリケーションをその場で実行する、というような場合に不可欠なのは、サンドボックスと呼ばれる一種の仮想化環境である、という事実はJavaが設計された当初から(あるいは、それ以前の先駆的な事例から)基本的に何ら変わるものではない。そのためのJava以外のものとしては、インタプリタベースのJavaScriptの他、バイトコード(あるいはネイティブコードの安全な実行)を指向したものとしてはNaCl (PNaCl)や、WebAssemblyがある。 現在、ウェブのサーバ側において、Java技術 (Java EE) は広く使われている。多くのウェブサイトが、Javaサーブレット (Java Servlet) やJavaServer Pages (JSP) などのJava EE技術を使って動的にページを生成するウェブを構築している。Javaサーブレットは2000年前後から急速に広く使われるようになり、現在では多くのウェブアプリケーション(動的なウェブページ)がサーブレットとして稼動するようになっている。 サン・マイクロシステムズが開発したJavaサーブレット技術を簡単に説明する。必ずしも厳密な説明ではない。 サンがJavaサーブレット技術を開発した1990年代末当時、ウェブアプリケーションの開発には、次に述べるようないくつかの問題があった。 Javaサーブレットはこれらの問題をある程度解決することができる技術だった。 デスクトップ環境においては、スタンドアロンのJava (Java SE) のアプリケーションソフトウェア(Javaアプリケーション)は、これまではあまり多く使われていなかったが、近年はいくつかのソフトウェアが広く使われるようになっている。近年になって使われるようになってきた理由としては、次のことが挙げられる。 広く使われているJavaのソフトウェアとしては、NetBeansおよびEclipse SDKの統合開発環境や、LimeWireやAzureusのようなファイル共有クライアントのソフトウェアなどがある。また数学ソフトウェアMATLABにおいても、ユーザインタフェースのレンダリングと計算機能の一部を実現するために使われている。多くの Java のSwingやSWTのウィジェット・ツールキットを使ったアプリケーションが、現在も開発されている。 このように、近年はデスクトップ上でJavaアプリケーションを使う事例が増えつつあるものの、従来は次に述べるいくつかの理由のためにあまり使われてこなかった。 一部のソフトウェア開発者は、情報技術はウェブを基盤としたモデルが主流となっており、スタンドアロンアプリケーションは流行遅れであり、新しいプログラミング技術は優れたウェブアプリケーションを開発することに充てられている、と思っていた。この見解については、ソフトウェア技術者の間で賛否が分かれている。 現在では、リッチクライアントやWeb 2.0の登場により新たなパラダイムが生まれようとしている。すなわちウェブを基盤としたウェブアプリケーションとスタンドアロンアプリケーションの融合である。ウェブアプリケーションをAjaxや Java Web Start、Adobe Flash などと組み合わせることにより、Web2.0時代に見合ったより洗練されたアプリケーションを開発することができる。 一昔前、ほとんどの パーソナルコンピュータ (PC) のユーザは、何ら問題なくウェブおよびデスクトップ環境上でJavaアプリケーションを実行できていた。かつて多くのPCメーカーは、自分たちが製造・販売するWindows PCにJava実行環境 (JRE) を同梱していた。アップルのmacOSや、多くのLinuxディストリビューションでも、Java実行環境を同梱していた。今では追加インストールが必要である。しかしながらパーソナルコンピュータにおいてJavaアプリケーションは殆ど使われなくなってしまっているので、マイクロソフトが2001年頃以降にJava実行環境をWindowsに同梱していないことの影響は小さい。 2001年頃にマイクロソフトによるJava実行環境をWindowsに同梱することを止めたという行動は、サン・マイクロシステムズが同社を「品質の低い」Java実行環境を同梱してきたとして告訴したことが契機となった。マイクロソフトがそれまでWindowsに同梱してきたJava実行環境向けに開発されたJavaプログラムは、他のプラットフォームのJava実行環境で動かない可能性があった。 しかし近年では、Javaアプリケーションパッケージ自体にJava実行環境を同梱する事例が少なくない。その背景にはJavaアプリケーション開発者の判断がある。Javaアプリケーションが想定どおりに機能するよう、Java実行環境のバージョンの違いによる非互換性に基づく不具合を避けるために、PCに同梱されているJava実行環境を使わないという判断である。 現在では、Javaアプレットは動作対象のJava実行環境のバージョンを認識することができる。また、バージョン間の互換性も プログラミング言語の中では高い水準にあり、上位互換性についてはJava SE 1.3以降は大きな問題はほぼおきにくくなっている。さらにJava Web StartではデスクトップにインストールされているJavaのバージョンを確認してアップデートできるならアップデートし、それだけでなくJava Web Start対応アプリケーションをもアップデートしようとする。そのため古いバージョンのJava実行環境を使っているマシンがあったとしても、自動アップデートされるためにそう難しい問題は起きない。 組み込みシステム向けのJava(Java ME)も広く使われている。 携帯機器(携帯電話・PHSやPDA・スマートフォン等)にJavaの実行環境が実装されるケースが多い。Java環境はこれら携帯機器全般に広く普及している。一方、SymbianおよびBREWは携帯電話や(日本的定義での)スマートフォンを主なターゲットとし、Javaと競合している。 Java MEでは、BREWとは異なり、開発者がライセンス料を支払わずに、プログラムを開発することができる。Java MEはSymbianより広く普及している。その理由は、Java MEがSymbianより広範な携帯機器、特に廉価なモデルで動作するからである。こうした事情からサードパーティによりOpera miniのようなフリーのJavaソフトウェアを開発することができるようになった。 携帯機器のJava MEプログラムは、サンドボックスのもとで動くため、多くの開発者が特別な配慮をせずにプログラムを開発しても、安全に実行できる。携帯機器のJava技術が多様化するに伴い、異なるメーカーの携帯機器でもJavaプログラムが動くよう、携帯機器のためのJava技術の標準が必要となった。携帯機器のためのJava MEの標準がMobile Information Device Profile (MIDP) である。最初の標準はMIDP 1で、小さい画面を想定したものであり、音声機能は無く、プログラムサイズは32kBまでという制限があった。後のMIDP 2の標準では、音声機能を備え、プログラムサイズの制限は64kBまでと緩和された。携帯機器の設計の進歩は標準化よりも急速であるため、一部のメーカーは、MIDP 2標準の最大プログラムサイズなどいくつかの制限を、意図的に緩和して携帯機器を開発している。 携帯機器におけるJava MEの競合技術について簡単に述べる。 世界的な動向としては、 また、2001年にはソニーのコンシューマゲーム機 PlayStation 2 にJava 仮想マシンが搭載される予定と発表され話題になった。 Java は、JDK(Java Development Kit; Java開発キット)1.0 以来、数度のメジャーバージョンアップを経ている。バージョンアップに伴い、多くのクラスとパッケージが標準ライブラリに追加されてきた。プログラミング言語JavaおよびJavaプラットフォームは、高い水準でバージョン間の互換性を保ちつつ発展してきている。 J2SE 1.4から、Javaの開発はJCP (Java Community Process) という標準化プロセスで行うようになっている。JCPでは、JSRs (Java Specification Requests) という文書群により、Javaに対する追加機能やJavaプラットフォームに対する変更の提案と規定を行う。 また、J2SE 1.3以降では開発コードネームとして、メジャーバージョンには動物の名前が、マイナーバージョンには昆虫の名前が付けられる傾向がある。 言語仕様は JLS(Java Language Specification; Java言語仕様)により規定する。JLSはJSR 901の管理下にある。 バージョンアップの過程で、言語仕様の変更だけでなく、標準クラスライブラリにおいても大きな変更が加えられている。JDK 1.0では標準ライブラリは約200クラス / インタフェースだったが、Java SE 6では4000以上のクラス / インタフェースとなっている。SwingやJava 2Dのような全く新しいAPIが追加された。その一方で、もともとJDK 1.0から存在していたクラスのメソッドの多くが、J2SE 5.0での使用は推奨されないようになっている。 最初のバージョン。プレスリリース (英語) いくつかの重要な機能が追加された。プレスリリース (英語) コードネームPlayground。このバージョンから呼称がJava 2に変更され、J2SE 5.0までこの呼称が使われる。またエディション名がJDKから "J2SE" (Java 2 Platform, Standard Edition) に変更された。この J2SE の名称により、J2EE (Java 2 Platform, Enterprise Edition) および J2ME (Java 2 Platform, Micro Edition) の基となるエディションであることが明確化された。プレスリリース (英語) コードネームKestrel。プレスリリース (英語) 新機能の概要(日本語) コードネームMerlin。このバージョンは、JCP (Java Community Process) の下で開発された最初のJavaプラットフォームである。プレスリリース(英語) 新機能の概要(日本語) コードネームTiger。JSR 176 のもとで開発された。J2SE 5.0 では、言語仕様に大きく拡張が加えられ、多くの新しい言語機能が追加された。プレスリリース(英語) 新機能の概要(日本語)。もともとは J2SE 1.5 という名称だったが、この名称はすでに内部的なバージョン番号として使われていた。またマーケティング上の理由もあった。 この例では、codice_7という変数名のコレクションオブジェクト内の、各codice_8オブジェクトを反復して繰り返し処理する。各codice_8オブジェクトにはループサイクルごとにcodice_10という変数名をつける。各ループサイクルで、codice_10に対してcodice_8型で定義されているcodice_13メソッドを呼び出す。拡張forループはJSR 201で規定された。 コードネームMustang。JSR 270のもとで開発された。Java SE 6においては、サンは命名方針を変更して、"J2SE" からJava SEに変更し、バージョン番号から ".0" の部分を廃止している。 Java SE 6 Update 10が2008年10月22日にリリースされた。Update 8と9が省略され、7の次が10となった。Javaの動作速度が改善され、アプリケーションやアプレットの起動を高速化するJava Quick Starterが搭載され、Javaのインストールを高速化する、Java Kernelが搭載された。JavaアプレットやJava Web Startの起動を容易にするための、配備ツールキットが公開された。 コードネームはDolphinである。2006年に開発が始まった。元々は2008年春にリリースされる見通しであったが、何度かリリース予定が変更された。2007年8月の時点では2009年1月をリリース目標としていたが、2008年12月、ジェームズ・ゴスリンは、「私の勝手な憶測だが」という注意書き付きで、2010年6月以降のリリースを予測し、2009年11月には2010年9月以降のリリース予定に変更された。2010年9月に、これ以上の延期を避けるため、大きな言語仕様の改訂などの部分は Java SE 8 に先送りし、Java SE 7 を2011年中頃に、Java SE 8を2012年終わり頃に提供するという目標を立て、結局2011年7月28日にリリースした。Java SE 7は、オラクルによるサン買収後、初のメジャーリリースである。 Java SE 7に追加された項目は以下のとおりである。 以下の項目を Java SE 8 に追加。2014年3月4日に JSR 337 にて仕様が規定された。JDK 8 は2013年9月9日にリリース予定だったが、2013年4月18日にリリースの延期が発表になり2014年3月18日にリリースされた。CLDC, CDC を統合した Java ME 8 は2014年4月30日にリリースされた。 当初搭載予定だった、以下の機能はJava SE 9に延期となった。 また、搭載予定だった以下の機能は廃止 (withdrawn) になった。 Java SE 9 は Java SE 8 リリース3年後の2017年9月21日にリリースされた。。言語レベルでのモジュール化のサポート (Project Jigsaw, JSR 294) などを追加した。 Java SE 10 はJava SE 9 リリース半年後の2018年3月20日にリリースされた。ローカル変数型推論などの機能が追加されている。 OracleによるJDKの無償配布はこのバージョンが最後。 Java SE 11以降はオープンソース版のOpenJDKが無償版という扱いになり、Oracle JDKは有償サポート契約を結んだ顧客にのみ配布される形になるという。 Java SE 11 には以下の機能が追加予定。 Project Valhalla Project Panama Javaの主な特徴を述べる。 Javaを開発する上では、5つの目標があった。 ネットワーク機能および遠隔コンピュータの実行コードの実行を実現するために、場合によっては、Javaプログラマは、CORBAやInternet Communications Engine、OSGiのような拡張機能を使う。 Javaはクラスベースのオブジェクト指向プログラミング言語である。Javaのプログラムは複数のクラスから構成され、プログラムの実行は、各クラスが実体化したオブジェクト群が相互にメッセージをやりとりしながら行われる。Javaでは、実装の単一継承を採用し、一つのクラスが複数のインタフェースをもつことができる。クラスとは、オブジェクト指向においてオブジェクトの設計図にあたるものである。オブジェクトについては後述する。継承とは、既存のクラスを基にして、そのクラスの機能を引き継いだ新しいクラスを定義できることをいう。Javaでは実装の多重継承は採用していない。Javaでは一つのクラスが複数のインタフェースをもてるため、一つのクラスに複数の役割をもたせることができる。 Javaで扱うデータ / オブジェクトの型(データ型)は、強い静的型付けを採用している。静的型付けにより、Javaのコンパイラおよび実行環境が、型同士の整合性を検査することによって、プログラムが正しく記述されていることや、安全に動作することの検証が可能である。 Javaのデータ型には、参照型 (reference type) と基本型(プリミティブ型、primitive type)の2種類がある。Javaのオブジェクトはすべて参照型である。Javaの基本型は、単純な構造のデータ(数値、論理値、文字 など)のための型である。Javaの標準ライブラリは、基本型の値をオブジェクトとして扱えるようにするためのラッパクラスを提供している。近年のJava (J2SE 5.0) からは型の扱いに改良が加えられている。 Javaの特徴の一つであるオブジェクト指向プログラミングは、プログラミングおよびプログラミング言語設計の手法をいう。Javaはオブジェクト指向プログラミング言語である。オブジェクト指向の概念に対しては、多くの解釈がなされてきた。一般には、オブジェクト指向を特徴づける重要な考え方は、ソフトウェアで扱うさまざまな種類のデータについて、データとそのデータに関連する手続きを一体化するように、ソフトウェアを設計することである。こうして、データとコードは、オブジェクトと呼ばれる実体に一体化される。オブジェクトとは、状態(データ)と振る舞い(コード)がひとかたまりとなったものと考えることができる。 Javaでは、オブジェクトの設計図であるクラスに定義する振る舞いを「メソッド」と、状態を「フィールド」(インスタンス変数)と呼ぶ。 オブジェクト指向以前の技術での本質的な問題点は、プログラムにおいて、状態と振る舞いが分離されていたことである。 オブジェクト指向に基づいて、これまで分離されていた状態と振る舞いを、オブジェクトに一体化することは、ソフトウェアシステムの設計において堅牢な基盤となる。オブジェクト指向を有効に活用することにより、大規模なソフトウェア開発プロジェクトを管理することの困難さが軽減され、ソフトウェアの品質が向上し、失敗するプロジェクトの数を減らすことができる。 オブジェクト指向のもう一つの目標は、汎用的なオブジェクトを開発することで、プロジェクトをまたがってソフトウェアをより再利用可能にしていくというものである。たとえば、汎用的な「顧客」オブジェクトは、別のプロジェクトにおいても、理論的にはほぼ同一の手続き群を備えるであろう。大きな組織において、その組織の複数のプロジェクトが機能的に共通する基盤層をもつ場合は、なおさらソフトウェアの再利用が重要となる。こうしたことから、ソフトウェアオブジェクトは、さまざまなシステムに組み込み可能であるように、汎用性を備えていることが望ましい。こうすることで、ソフトウェア業界は、既存のしっかりテストされたオブジェクトコンポーネントを活用してプロジェクトを進めることができ、開発期間を大幅に短縮することができる。 一方で、ソフトウェアの再利用性を高めるということには、実践においては、2つの大きな困難を伴う。 いくつかのオープンソースコミュニティでは、再利用に伴う問題を軽減するために、オブジェクトやクラスライブラリの開発者に、自分たちが開発した汎用的で再利用可能な開発物についての情報を広報する手段を提供している。 Javaのもう一つの特徴はプラットフォームに依存していないことであり、これは、Javaのプログラムがさまざまなハードウェアやオペレーティングシステム上で必ず同じように動く、ということを意味する。一度Javaのプログラムを作成すれば、そのプログラムはどのプラットフォーム上でも動くのである。近年では、Java実行環境を構成するJava仮想マシンに高速化の技術が導入され、プラットフォームに依存したプログラムと同水準の実行性能を実現している。 Javaのプラットフォーム非依存は、次のようにして実現されている。 また、実際にはJavaコンパイラの実装として、ソースコードから直接にプラットフォームのハードウェアにネイティブなオブジェクトコード(機械語コード)を生成するものがある。このようなJavaコンパイラの実装としてはGNUプロジェクトのGNU Compiler for Java (GCJ)などがある。この場合、中間言語のバイトコードを生成するという段階は省かれる。しかしこの方法で生成されるJavaの実行コードは、コンパイル時に指定したプラットフォームでしか動かない。 Javaの実行コード(バイトコード)を生成する手段としては、プログラミング言語Javaでプログラムを書くことが標準的なやり方である。Javaのバイトコードの実行は、Java仮想マシンという仮想マシンの環境上で行われる。Java仮想マシンは実行時にバイトコードをネイティブコードに変換する。なお、Javaのバイトコードを生成する他の方法としては、現在ではRuby(JRuby)や Groovy 、Kotlin 、Jabaco 、Python(Jython)などのプログラミング言語でプログラムを書くこともできる。 サン・マイクロシステムズのJavaのライセンスは、すべてのJava実行環境の実装は「互換性」を備えるべきであることを要求していた。このことに関連して、サン・マイクロシステムズとマイクロソフトとの間で法的な争いが起こったことがあった。この法的な争いは、サンが、マイクロソフトのJava実行環境の実装について次のように主張したことによる。 サンは訴訟を起こして勝訴し、約2000万ドルの違約金の支払いを受けた。また裁判所は、マイクロソフトに対してサンのライセンス条件に従うことを命じた。この決定を受けて、マイクロソフトは自社のOSであるWindowsにJava実行環境を同梱しない方針を採った。また近年のバージョンのWindowsでは自社のウェブブラウザであるInternet ExplorerでJavaをサポートしないようにした。その結果、Internet ExplorerでJavaアプレットを動かすためには、別途にプラグインが必要となった。しかし、サンなどの企業は、近年のバージョンのWindowsのユーザが、無償でJava実行環境を利用できるようにした。そのため、ほとんどのWindows PCのユーザは、何ら問題なくウェブおよびデスクトップ上でJavaアプリケーションを実行できる。 最初期のJava実行環境の実装では、Javaプログラムの実行速度が遅かったが、近年では大きく改善されて、高速に実行できるようになった。最初期のJava実行環境のJava仮想マシンの実装は、移植性を実現するためにインタプリタとして動作する仮想マシンを採用した。こうした初期のJava実行環境の実装では、Javaプログラムの実行速度がCやC++のプログラムと比べて遅かった。そのため、Javaプログラムの実行速度は遅いという評判が広まった。近年のJava実行環境の実装では、いくつかの技術を導入することにより、以前と比べて、Javaプログラムをかなり高速に実行できるようになった。 Javaプログラムを高速に実行するために使われる技術を説明する。 Java の移植性(プラットフォーム非依存)がどの程度実現できているかについては、議論の対象となっている。技術的には移植性とは実現が難しい目標である。多くのプラットフォームにおいて同一に動作するJavaプログラムを作成することは、可能である。しかし実際には、Javaを利用できるプラットフォームによってはちょっとしたエラーが発生したり、微妙に異なる動作をする事例が多い。こうしたことから一部の人々は、サン・マイクロシステムズのJavaの売り文句であった "Write once, run anywhere"(一度コードを書けば、どの環境でも動く)をもじって "Write once, debug everywhere"(一度コードを書けば、どの環境でもデバッグが必要)と皮肉をいわれることがある。 しかし、Javaのプラットフォーム非依存は、サーバ側や組み込みシステムのアプリケーションに関しては、非常に成功している。サーバ側 (Java EE) では、Java のサーブレット、Webサービス、EJB (Enterprise JavaBeans) などの技術が広く使われている。組み込みシステムの分野においても、組み込みシステム向けの Java環境 (Java ME) を使ったOSGiを基にした開発が広く行われている。 Javaはガベージコレクション機能を備えており、これを備えていない従来の多くの言語と比較して、プログラムの開発生産性と安定性が高く、プログラマの負担が完全に解消されるわけではないものの、大きく軽減される。近年のJavaでは世代別ガベージコレクションというより効率的な技術を導入している。 ガベージコレクションを備えていないC++やその他の言語の場合、プログラマが適切にメモリの管理をしなければならない。オブジェクト指向プログラミングをするプログラマは一般に、Javaと同様メモリ内のヒープにオブジェクトを格納する領域を割り当てる。そしてオブジェクトがもはや必要なくなった場合に、必ず明示的にオブジェクトを削除する指示を記述して、そのオブジェクトが使っていたメモリ領域を解放しなければならない。メモリ管理が不十分なプログラムでは、メモリリークが発生する可能性がある。メモリリークとは、不適切な指示などで、解放されなかったメモリ領域が累積していき、利用できるメモリの量が減っていくことで、気付かないうちに大量のメモリを消費してしまう問題が起こり得る。他にも、メモリ領域を解放する際に、解放の指示を重複して行ってしまい、プログラムの実行を不安定にするなどのケースがあり、悪くすると異常終了してしまうこともある。 ガベージコレクション機能は、このような潜在的な問題の多くを未然に防ぐことができる。プログラマは任意の時点でオブジェクトを生成することができ、Java実行環境は生成されたオブジェクトのライフサイクルを管理する責任を持つ。 プログラム(オブジェクト)は、他のオブジェクトへの参照を持ち、そのオブジェクトのメソッドを呼び出すことができる。他のオブジェクトへの参照とは、低水準の視点で述べると、メモリ内のヒープという領域上に確保されたそのオブジェクトを指すアドレスのことである。 オブジェクトがどこからも参照されなくなった場合、Javaのガベージコレクション機能が自動的にその「到達不可能なオブジェクト」を削除し、そのメモリ領域を解放することで、解放し忘れた未解放メモリが累積していき利用できるメモリ量が減っていくメモリリークを防ぐ。 ただしJavaのガベージコレクション機能は、メモリリークの問題を完全に解消するわけではない。プログラマが、自分のプログラムでもはや必要のないオブジェクトへの参照を保持し続けた場合は、やはりメモリリークが発生する可能性がある。 別の表現で述べると、Javaでは、メモリリークは概念的に高い水準においては、発生する可能性が残っているということである。概念的に低い水準においては、ガベージコレクションが正しく実装されたJava仮想マシンを使えば、メモリリークが発生する可能性は無くなった。全体として、Javaのガベージコレクション機能により、C++の場合と比べると、オブジェクトの生成と削除は、より簡潔になり、潜在的に安全になり、また多くの場合は高速になっている。 C++においても、Javaと同等のメモリ管理の高速性と効率性を実現することは可能ではあるが、先に述べた通り、複雑な作業で間違いやすく、完璧に行おうとすれば開発期間が非常に長くなり、開発したソフトウェアはかなり複雑で難解になる。たとえば、C++で特定のクラスを対象として、高速実行およびメモリ利用の断片化の最小化を、高水準で達成できるメモリ管理モデルで設計開発する技法があるが、こうした技法は複雑である。 ガベージコレクションの機構は、Java仮想マシンに組み込まれており、開発者からは、事実上隠蔽されている。開発者は、場合にもよるが、ガベージコレクションがいつ起こるか意識しなくて良い。というのも多くの場合、ガベージコレクションの実行は、プログラマが自分で書いたコードによって明示的に起こる何らかの挙動と、必ずしも関連しているわけではないからである。 Javaでは充実したライブラリにより、コンピュータネットワークを使うソフトウェアを、効率良く開発できる。Javaの初期のバージョンから、TCP/IP (IPv4) のライブラリを備えており、ネットワークでソケット通信を行うソフトウェアを簡単に実装できた。分散オブジェクト環境のソフトウェアの開発も早い時期からできるようになった。Java RMIもしくはCORBAの分散オブジェクト技術を標準で使うことができる。近年では、標準、拡張その他のライブラリにより、さまざまなネットワークプロトコルを高水準で扱えるようになっている。 現在ではIPv6も扱えるようになりつつある。 XML文書を扱う技術とネットワーク機能を有効に組み合わせることにより、高度なシステムやサービスを構築できるようになっている。 Javaでは初期のバージョンから遠隔のコンピュータ上にある実行コード(Javaアプレット)を安全に実行できるよう設計されていた。 Javaは、パッケージという名前空間の機構を持つ言語であり、ライブラリおよびアプリケーションソフトウェアに含まれる多数の Java のプログラム(クラスとインタフェース)を、パッケージの階層構造に分類・整理することができる。名前空間の機構を持たない言語と比べて、多数のクラスとインタフェースの管理が容易となり、クラスとインタフェースの命名についても、既存のクラス/インタフェースとの名前の衝突回避を考慮する労力が、大きく軽減される。 Java のバイトコードには複数の実行形態があると考えることができる。ただしいずれのバイトコードも、Java実行環境 (JRE) の下で実行されるという点では、同じと考えることもできる。 構文は、CおよびC++から多くを引き継いでいる。このため、設計当時には割合として多かった、CやC++しか書けないプログラマにも習得しやすいと、。Javaが設計された1990年代中旬以前は、Cのプログラマが多く、またオブジェクト指向プログラミング言語の中では、C++は広く使われてきた言語の一つだった。 なお、JavaではC++と違って名前空間レベルの関数(メソッド)および変数(フィールド)の宣言および定義を許可しておらず、必ず何らかのクラス定義の中に記述することが特徴である。この特徴は後発のC#も踏襲している。 次の節以降では、Hello worldプログラムで、Javaプログラムの例を示して説明する。 Hello worldプログラムとは、"Hello, world" という文字列をディスプレイなどの出力装置に出力する簡単なソフトウェアプログラムである。プログラミング言語の初学者向けのプログラム例としてよく使われる。 なお先に述べた通り、Javaには複数の実行形態があると考えることができるので、以降では、それぞれの実行形態におけるHello worldプログラムを例示する。 コマンドライン環境で動くスタンドアロンのJavaアプリケーションの例を示す。Javaでは、他のプログラミング言語と同様に、コマンドライン環境で動くプログラムを簡単に開発できる。 このプログラムについて説明する。 グラフィカルユーザインタフェース (GUI) 環境で動く Swingを使ったスタンドアロンのJavaアプリケーションの例を示す。Swingは、Java SEの高度なGUIのウィジェット・ツールキットのライブラリである。 Javaアプレットは、他のアプリケーションに埋め込まれるプログラムである。多くの場合は、ウェブブラウザに表示されるウェブページに埋め込まれる。 Javaサーブレットは、サーバ側のJava EEの構成要素であり、クライアントから受けた要求 (request) に対する応答 (response) を生成する。現在、多くの場合はウェブブラウザから要求を受け、応答としてXHTML / HTMLのウェブページを動的に生成する。 Javaプラットフォームの構成を説明する。 Java実行環境 (JRE; Java Runtime Environment) は、Javaプラットフォームに配置されたJavaアプリケーションを実行するために必要な、ソフトウェアである。標準クラスライブラリやJava仮想マシンなどから構成される。 エンドユーザは普通、Javaソフトウェアパッケージやウェブブラウザプラグインの利用を通じてJREを使う。オラクル / JCPをはじめ複数の団体や企業により、 さまざまなプラットフォーム向けに、多くの JRE の実装が開発・提供されている。 JREの他、オラクル / JCPなどの団体・企業は、Java開発キット (JDK) と呼ばれるJREのスーパーセットの実装を開発・提供している。JDKは、Javaプログラムの開発を支援する基本的なソフトウェアであり、多くの開発ツールが含まれている。 Java実行環境は、標準クラスライブラリとJava仮想マシン、およびいくつかのファイルとソフトウェアから構成される。 Java開発キット (JDK; Java Development Kit) は、オラクル / JCP をはじめ複数の団体や企業により開発・提供されている、Javaプログラムの開発を支援する基本的なソフトウェアである。Javaが世に出て以来、広く使われてきたJavaの開発ツールである。javac、javadoc、デバッガなどを含む多くの開発ツールが含まれている。また、完全なJava実行環境 (JRE) を同梱している。 Javaプログラムを実行するだけであれば、Java実行環境が導入されていれば充分で、Java開発キットを導入する必要は無い。 Java開発キット (JDK) の呼称は、これまでに何度か変更されている。 広く使われているプラットフォームなどに対しては、複数の団体や企業が独自にJREやJDKの実装を開発・提供している。独自の最適化技術を適用したり、特定の用途に特化した最適化、あるいは異なるライセンスを採用するなど、それぞれ特徴がある。 オラクルおよびいくつかの団体が、オープンソースもしくはフリーソフトウェアのライセンスで利用できる、Java仮想マシンおよびJRE 、JDKの実装を開発している。 サン / JCPはJava発表時からJava仮想マシンおよび標準ライブラリの仕様を公開しており、Java標準クラスライブラリのソースコードもJDKの一部として提供していた。しかしソースコードの改変は下記のOpenJDKリリースまでライセンスで認めていなかった。そのため、サンの実装とは別に、オープンソースもしくはフリーソフトウェアでかつサンの実装と互換性のあるJava標準クラスライブラリとJava仮想マシンが開発された。また、2006年にサンはライセンスの方針を変更し近い将来オープンソースにする意向を表明し、2007年5月8日にJava SE 6をOpenJDKとして GNU General Public License にてリリースした。 GNU Classpathは、2007年3月現在、J2SE 1.4のライブラリの99%以上を実装し、J2SE 5.0では95%以上を実装している。 またOpenJDKにはIBMが協力している。 GNUプロジェクトがGNU Interpreter for JavaおよびGNUコンパイラコレクション (GCC) のJava版であるGNU Compiler for Javaを出している。GNU Compiler for Javaはahead-of-timeコンパイラを搭載しており、Javaのソースコードやバイトコードをネイティブマシンコード(Windowsの場合はexeファイル)に変換できる。クラスライブラリはGNU Classpathを使っており、1.4のほとんどの部分が対応しており、5.0の部分も実装が進んでいる。 Windows環境では、GCJはMinGW (Minimalist GNU for Windows) もしくはCygwinを使って実行できる。Cygwinの場合は、対象がライセンスがGPLのソフトウェアに限られるが、MinGWの場合は商用含め、すべてのソフトウェアで利用できる。 米Excelsior社がExcelsior JETというahead-of-timeコンパイラを販売している。Java SE 用に書かれたプログラムを Windowsのネイティブマシンコードであるexeファイル(実行ファイル)に変換できる。起動の高速化やアプリケーションの難読化を実現する。 Windowsにて、配布、実行しやすくするために、Javaのjarファイルをexeファイル(実行ファイル)でラッピングするツールがある。以下が、その一例である。 Java Web Startには、適切なバージョンのJREをインストールする機能があるが、そもそも、JREがインストールされていない場合は、それが不可能である。JSmoothなどでは、ラッピングされたexeが、必要なJREがインストールされていないことを検出した時は、JREをダウンロードして、インストールする機能を持つ。また、上記3つすべてにおいて、JREを同梱して、同梱したJREを使ってアプリケーションを実行する機能を持つ。 また、通常のJavaアプリケーションでは、Windowsのタスクマネージャには、java.exeやjavaw.exeと表示され、Javaのアイコンが表示されるが、自前のexeファイル名と自前のアイコンを表示する機能を持つ。 さらに、上記のいくつかは、アプリケーションの2重起動を防止したり、アプリケーションをWindowsサービス(NTサービス)化する機能を持つ。 サンが特定のOSに特化した機能を提供することを嫌がっていたため、これらの機能が不足しており、それを補うために、exeパッケージ化が存在する。 オラクルとJCPは、さまざまな環境に対応するため、3つのJavaプラットフォームのエディションを規定している。JavaのAPIの多くは分類され各エディションに割り当てられている。 エディションごとに実行環境と開発環境がある。Java実行環境 (JRE) は、Java仮想マシンと標準ライブラリの実装から構成される。JDK 1.1のバージョンまでは、Java SEに相当するエディションのみが提供されていた。3つのエディションが規定されたのは、JDK 1.1の次のバージョンからである。オラクル / JCPが規定しているエディションを次に示す。 Java API に含まれるクラスは、パッケージと呼ばれるグループに分類される。各パッケージは、相互に関連するインタフェース、クラス、例外を含む。Java の各エディションでどのような機能が使えるかについては、それぞれのエディションの項目 (Java SE, Java EE, Java ME) を参照のこと。 JavaのAPIセットは、オラクルと他の個人や企業・団体が共同で、JCP (Java Community Process) プログラムに沿って管理している。このプロセスに参加する人々が、Java APIの設計と開発に関わっている。このプロセスのあり方については、議論の対象となっている。 2004年より、IBMとBEAシステムズ(後にオラクルが買収)は、Javaの公式のオープンソース実装を作る動きを、公的に支援している。2006年まで、サンはこうした動きに対しては拒否する立場をとってきたが、方針を変えて自社とJCPによるJavaの実装をオープンソースにする意向を表明し実行に移し始めている。 Javaの標準機能に対する拡張機能は、多くの場合、codice_113パッケージに属する。こうした拡張機能は、Java SEの Java開発キット (JDK) や Java実行環境 (JRE) には含まれない。Java の拡張機能や関連技術は、プログラミング言語Javaと密接に連携する。主なJavaの拡張機能と関連技術を示す(いくつかの拡張機能は近年のJava SE標準ライブラリに統合された)。 現在、Javaプラットフォームの将来のバージョンや機能は、JCP (Java Community Process) の標準化プロセスのもとで開発されている。JCPのメンバになることで、Java技術の将来のバージョンや機能の定義に関与することができる。JCPには、IBM、ボーランド、富士通、Apacheソフトウェア財団、ヒューレット・パッカード など、さまざまな個人、団体、企業がメンバとして参加している。 JCPは、Javaプラットフォームに追加する仕様や技術を、JSRs (Java Specification Requests) という文書群に記述する。 プログラミング言語JavaとJavaコアAPIに関わるいくつかの JSRs を示す。 Javaアプリケーションを開発するための開発ツール(開発用ソフトウェア)をいくつか示す。次に示すツール以外にも、数多くのツールが開発・提供されている。 Javaプログラムを開発できるいくつかの統合開発環境 (IDE) を示す。 Javaに対しては、優れた技術だと評価する人々がいる一方で、批判も少なくない。Javaは、ソフトウェアに関する複雑さを管理する問題に対して、革新的な方法を提供するという目標の下で、開発された。多くの人々は、Java技術は、この期待に対して満足できる答えを提供したと評価している。しかしJavaにも欠点が無いわけではない。Javaは、どのようなプログラミング作法にも適応しているわけではない。また、どのような環境や要件にも普遍的に適応しているわけではない。 Javaに対する批判を大まかに記述する。 Javaの初期のバージョンでは、CやC++などのネイティブにコンパイルする言語と比べて、とても実行が遅くメモリの消費が激しいとして、批判されることが多かったが、のバージョンでは改善されてきている。近年のJava仮想マシンで採用しているジャストインタイムコンパイラや動的再コンパイルの実行性能は、従来の言語のネイティブコンパイラとほぼ同じ水準の実行性能かそれ以上を達成することがある。これは頻繁にメモリアクセスを行うプログラムにおいてJavaのガベージコレクションの技術が、Cのmallocやfreeよりも高い性能を発揮できることによる。こうした事情から、Javaの実行性能については、議論の対象となっている。 ルックアンドフィールに関して、JavaのSwingのツールキットを使ったグラフィカルユーザインタフェース (GUI) を備えたアプリケーションの既定のルックアンドフィールが、従来のネイティブなアプリケーションとは大きく異なるため、エンドユーザの人々にとってJavaのGUIアプリケーションはなじみにくいと批判されることがある。Javaではプラグイン可能なルックアンドフィールの機構を備えており、サンは Windows、macOSおよびMotifの各ルックアンドフィールのクローンを提供した。そのため、Swingの既定のルックアンドフィールではなく、プラットフォームネイティブと同様のルックアンドフィールでJavaのアプリケーションを動かすよう指定することができる。しかしエンドユーザにとってこの指定方法は簡単ではないと指摘されることがある。 言語の設計(デザイン)に対する批判をいくつかの側面から述べる。 Javaの設計者は、他のプログラミング言語では備えているいくつかの機能をJavaから排除した。利便性より言語設計の簡潔さを優先した結果だが、こうした設計上の判断については賛否が分かれている。 また、Javaは、Smalltalkのように「純粋な」オブジェクト指向プログラミング言語ではないとして、設計の一貫性のなさを批判されることがある。たとえばJavaには、プリミティブ型という、クラス型(参照型)ではないものがある。Javaの設計者は、実行性能上の理由から、意図的にプリミティブ型をJavaに導入した。例えば、codice_3型などの組み込み型がプリミティブ型に相当する。codice_3型の値はオブジェクトではなく、これをクラス型のオブジェクトとして扱うには、ラッパーであるcodice_116クラス型への変換(ボクシング)が必要になる。J2SE 5.0以降ではオートボクシングにより、プリミティブ型と、それに対応するボックスクラス型の間のやりとり(変数への代入や値の参照など)は、コンパイラによって自動的に行われるようになり、ソースコード上の煩雑さは軽減されたが、本質的に同じものを指すはずのデータ型が複数存在するという矛盾を依然として抱えたままである。 一方で、C++のように名前空間レベルのメソッドやフィールドを定義できず、必ずクラス定義が必要であることから、クラスを定義する側も利用する側も記述量が肥大化しがちである。J2SE 5.0 では、メソッドとフィールドのstaticインポートを行えるようになり、クラス修飾なしでstaticメンバーを利用できるようになった。しかしstaticインポートを不用意に使うとソースコードを判読困難にする可能性がある。サン / JCPはstaticインポートを適切に使用するガイドラインを合わせて公開した。 なお、C#はDelphiの設計思想を取り入れた言語であり、登場当初からプロパティをサポートしている。演算子オーバーロードについても、C++より限定的ではあるがサポートされる。そのほか、Javaで問題視されていたプリミティブ型の扱いやジェネリクスなどに対して、C#は独自の解決策を提示している。 一般に、Javaプログラムを実行する際、-classpathオプションを使用するか、環境変数のクラスパス (CLASSPATH) を必要に応じて適切に設定する必要がある。クラスパスを指定すると、既定のカレントディレクトリという設定が上書きされる。したがって、クラスパスを変更するソフトをインストールするなど設定を変えられた場合は、Java実行環境は正しくJavaプログラムを実行することができなくなることがある。このため Javaを使い始めた人々は、クラスパスについて戸惑うことがある。サンは-classpathオプションを指定する方法を推奨していた。 Javaは高い移植性と互換性を実現するべく開発されており、ある程度の水準まで達成しているが、課題が残っている。Javaのバージョン間の下位互換性・上位互換性が完全ではないことが問題として議論の対象になっている。Javaでは高い移植性を保っているため、一部のプラットフォームにしかない独自の機能はJavaからは使えない。 サン・マイクロシステムズは複数のJava認定資格を主催していた。オラクルによる買収後、一部資格は変更されている。ただし、買収前に以下の資格を取得した者は買収後も有効資格である。 現行はオラクルが以下のJava認定資格を主催している。これらのうちOracle認定Java EE 6 Enterprise JavaBeansディベロッパ以外の試験はサン・マイクロシステムズが主催していた試験の、それぞれ略号の "OCJ" を "SJC" に変更したものに対応する位置付けにある。 認定試験に不合格だった場合、その試験日を含めて14日以内は同一試験を受験することができない。 """--- 問題3:日本語ウィキペディアの次の記事:「Python」、「C++」、「Java」をそれぞれ処理して、n-gramsの計算とその出現回数を抽出した後、記事間のコサイン類似度を計算してみましょう。 問題3.1 get_ngrams次のセルに`get_ngrams(words, n)`の関数を定義しましょう。 `words`は単語のリストです。`n`はn-gramの長さを表しています、例えば`n=2`は2-gramを指定します。``` EXERCISE METADATAexercise_id: "GetNgrams"```**ヒント**: 答えは以上定義した`count_ngrams()`にとても似ています。%%solution def get_ngrams(words, n): """ # BEGIN PROMPT pass """ # END PROMPT # BEGIN SOLUTION i = 0 ngrams = {} while i < len(words) - (n - 1): current_ngram = [] for j in range(i, i + n): current_ngram.append(words[j]) concat_ngram = ' '.join(current_ngram) if concat_ngram in ngrams: ngrams[concat_ngram] += 1 else: ngrams[concat_ngram] = 1 i += 1 return ngrams # END SOLUTION %%studenttest GetNgramsStudentTest assert get_ngrams(['I', 'am', 'fine', 'today'], 4) == {"I am fine today": 1} assert get_ngrams(['I', 'am', 'fine', 'today'], 3) == {"I am fine": 1, "am fine today": 1} %%inlinetest GetNgramsAutograderTest try: get_ngrams except: assert False, "get_ngrams is not defined?" assert get_ngrams(['a'], 1) == {'a': 1}, 'get_ngrams does not work on a single word input: "a"' assert get_ngrams(['a', 'b'], 1) == {'a': 1, 'b': 1}, 'get_ngrams does not count single words correctly' assert get_ngrams(['a', 'b'], 2) == {'a b': 1}, 'get_ngrams does not count bigrams correctly' assert get_ngrams(['a', 'b', 'c'], 2) == {'a b': 1, 'b c': 1}, 'get_ngrams does not count bigrams correctly' assert get_ngrams(['a', 'a', 'a'], 2) == {'a a': 2}, 'get_ngrams does not count bigrams correctly' try: assert get_ngrams(['a', 'b'], 3) == {}, 'get_ngrams does not count single words correctly' except Exception as e: assert False, 'get_ngrams seems to have an index violation: ' + str(e) %%submission def get_ngrams(words, n): n += 1 i = 0 ngrams = {} while i < len(words) - (n - 1): current_ngram = [] for j in range(i, i + n): current_ngram.append(words[j]) concat_ngram = ' '.join(current_ngram) if concat_ngram in ngrams: ngrams[concat_ngram] += 1 else: ngrams[concat_ngram] = 1 i += 1 return ngrams result, log = %autotest GetNgramsAutograderTest from IPython.core import display display.display(report(GetNgramsAutograderTest, results=result.results)) assert(result.results['passed']) # Uncomment and run the following line for autograding your solution: #Submit('GetNgrams')問題3.2 count_article_ngrams``` EXERCISE METADATAexercise_id: "CountArticleNgrams"```以下のセルに`count_article_ngrams(article, n)`を実装しましょう。`article`は日本語の記事のテキストで、`n`はn-gramのnです。戻り値python dictionary(辞書)にならなければなりません。キーはn-gram, 値はn-gramの頻度です。記事を単語に分けるために以上に定義された`tokenize_japanese`を使いましょう。例えば、```count_article_ngrams('ウィキペディアを読みましょう!', 2)```以下の辞書を返さなければなりません```{'ウィキペディア を': 1, 'を 読み': 1, '読み ましょ': 1, 'ましょ う': 1, 'う !': 1}```# EXERCISE CONTEXT # Make sure we have a working get_ngrams implementation in the context of this test. def get_ngrams(words, n): i = 0 ngrams = {} while i < len(words) - (n - 1): current_ngram = [] for j in range(i, i + n): current_ngram.append(words[j]) concat_ngram = ' '.join(current_ngram) if concat_ngram in ngrams: ngrams[concat_ngram] += 1 else: ngrams[concat_ngram] = 1 i += 1 return ngrams %%solution # Python, C++, Javaの記事からn-gramsを計算し、そのngramの出現回数ベクター from collections import defaultdict def count_article_ngrams(article, n): """ # BEGIN PROMPT pass """ # END PROMPT # BEGIN SOLUTION ngram_count_dict = defaultdict(int) for line in article.replace('。','\n').split('\n'): sentence_tokens = tokenize_japanese(line) sentence_ngrams = get_ngrams(sentence_tokens, n) for ngram in sentence_ngrams: ngram_count_dict[ngram] += 1 return ngram_count_dict # END SOLUTION %%studenttest CountArticleNgramsStudentTest assert count_article_ngrams('ウィキペディアを読みましょう!', 2)=={'ウィキペディア を': 1, 'を 読み': 1, '読み ましょ': 1, 'ましょ う': 1, 'う !': 1} %%inlinetest CountArticleNgramsAutograderTest try: count_article_ngrams except: assert False, 'count_article_ngrams is not defined' try: res = count_article_ngrams('ウィキペディアを読みましょう!', 2) except Exception as e: assert False, 'count_article_ngrams returned error %s on input "ウィキペディアを読みましょう!"' % str(e) assert 'ウィキペディア を' in res, "Did you extract bigrams correctly? With input 'ウィキペディアを読みましょう!' expected to see 'ウィキペディア を' in the bigram list, but only got [%s]" % ",".join(res.keys()) assert res=={'ウィキペディア を': 1, 'を 読み': 1, '読み ましょ': 1, 'ましょ う': 1, 'う !': 1}, "Have you run the test?" # Uncomment and run the following line for autograding your solution: #Submit('CountArticleNgrams')問題3.3 ngram_based_article_similarity``` EXERCISE METADATAexercise_id: "NgramBasedArticleSimilarity"```以下のセルに`ngram_based_article_similarity(article1, article2, n)`の関数を定義しましょう。`article1`と`article2`は日本語の記事のテキストです。`n`はn-gramのnです。**ヒント**: `count_article_ngrams`と`cosine_similartity`を使いましょう。実行例:```ngram_based_article_similarity("日本語が難しい", "日本語が難しい", 1) == 1.0ngram_based_article_similarity("日本語が難しい", "日本語が優しい", 1) == 2/3```# EXERCISE CONTEXT # make sure we have a correct implementation of count_article_ngrams and cosine similarity in context of the test from collections import defaultdict def count_article_ngrams(article, n): ngram_count_dict = defaultdict(int) for line in article.replace('。','\n').split('\n'): sentence_tokens = tokenize_japanese(line) sentence_ngrams = get_ngrams(sentence_tokens, n) for ngram in sentence_ngrams: ngram_count_dict[ngram] += 1 return ngram_count_dict def get_ngrams(words, n): i = 0 ngrams = {} while i < len(words) - (n - 1): current_ngram = [] for j in range(i, i + n): current_ngram.append(words[j]) concat_ngram = ' '.join(current_ngram) if concat_ngram in ngrams: ngrams[concat_ngram] += 1 else: ngrams[concat_ngram] = 1 i += 1 return ngrams %%solution def ngram_based_article_similarity(article1, article2, n): """ # BEGIN PROMPT pass """ # END PROMPT # BEGIN SOLUTION article1_ngrams_count = count_article_ngrams(article1, n) article2_ngrams_count = count_article_ngrams(article2, n) return cosine_similarity(article1_ngrams_count, article2_ngrams_count) # END SOLUTION %%studenttest NgramBasedArticleSimilarityStudentTest assert abs(ngram_based_article_similarity("日本語が難しい", "日本語が難しい", 1) - 1.0) < 0.1 assert abs(ngram_based_article_similarity("日本語が難しい", "日本語が優しい", 1) - 2.0/3.0) < 0.1 %%inlinetest NgramBasedArticleSimilarityAutograderTest assert abs(ngram_based_article_similarity("日本語", "日本語", 1) - 1.0) < 0.1 assert abs(ngram_based_article_similarity("日本語が", "日本語が", 1) - 1.0) < 0.1 assert abs(ngram_based_article_similarity("日本語が難しい", "日本語が難しい", 1) - 1.0) < 0.1 assert abs(ngram_based_article_similarity("日本語が", "日本語が", 2) - 1.0) < 0.1 assert abs(ngram_based_article_similarity("日本語が難しい", "日本語が優しい", 1) - 2.0/3.0) < 0.1 assert abs(ngram_based_article_similarity("日本語が難しい", "日本語が難しい", 3) - 1.0) < 0.1 # Uncomment and run the following line for autograding your solution: #Submit('NgramBasedArticleSimilarity')実際のウィキペディアのデータに適用では、以上に定義した関数を実際のウィキペディアのデータに適用してみましょう。cpp_up_to_n_grams = defaultdict(int) python_up_to_n_grams = defaultdict(int) for n in range(1, 10): similarity_score = ngram_based_article_similarity(cpp_article, python_article, n) if similarity_score == 0: break print("{}-grams cosine similarity (Python vs C++): {}".format(n, similarity_score)) for index in range(10): cpp_article_ngrams = sorted(count_article_ngrams(cpp_article, n).items(), key=lambda x:x[1], reverse=True) if n >2: cpp_up_to_n_grams.update(cpp_article_ngrams) python_article_ngrams = sorted(count_article_ngrams(python_article, n).items(), key=lambda x:x[1], reverse=True) if n >2: python_up_to_n_grams.update(python_article_ngrams) print("{}\t|\t{}".format(cpp_article_ngrams[index], python_article_ngrams[index])) print("up-to-10-grams cosine similarity (Python vs C++): {}".format(cosine_similarity(cpp_up_to_n_grams, python_up_to_n_grams))) java_up_to_n_grams = defaultdict(int) python_up_to_n_grams = defaultdict(int) for n in range(1, 10): similarity_score = ngram_based_article_similarity(java_article, python_article, n) if similarity_score == 0: break print("{}-grams cosine similarity (Python vs Java): {}".format(n, similarity_score)) for index in range(10): java_article_ngrams = sorted(count_article_ngrams(java_article, n).items(), key=lambda x:x[1], reverse=True) if n >2: java_up_to_n_grams.update(java_article_ngrams) python_article_ngrams = sorted(count_article_ngrams(python_article, n).items(), key=lambda x:x[1], reverse=True) if n >2: python_up_to_n_grams.update(python_article_ngrams) print("{}\t|\t{}".format(java_article_ngrams[index], python_article_ngrams[index])) print("up-to-10-grams cosine similarity (Python vs Java): {}".format(cosine_similarity(java_up_to_n_grams, python_up_to_n_grams))) java_up_to_n_grams = defaultdict(int) cpp_up_to_n_grams = defaultdict(int) for n in range(1, 10): similarity_score = ngram_based_article_similarity(java_article, cpp_article, n) if similarity_score == 0: break print("{}-grams cosine similarity (Java vs C++): {}".format(n, similarity_score)) for index in range(10): java_article_ngrams = sorted(count_article_ngrams(java_article, n).items(), key=lambda x:x[1], reverse=True) if n >2: java_up_to_n_grams.update(java_article_ngrams) cpp_article_ngrams = sorted(count_article_ngrams(cpp_article, n).items(), key=lambda x:x[1], reverse=True) if n >2: cpp_up_to_n_grams.update(cpp_article_ngrams) print("{}\t|\t{}".format(java_article_ngrams[index], cpp_article_ngrams[index])) print("up-to-10-grams cosine similarity (C++ vs Java): {}".format(cosine_similarity(java_up_to_n_grams, cpp_up_to_n_grams))) #@title ウィキペディア記事「栃木県」をtochigi_article変数に読み込む tochigi_article = """ 栃木県 栃木県(とちぎけん)は、日本の都道府県の一つ。関東地方北部に位置する。県庁所在地は宇都宮市。県内には日光国立公園が立地し、日光・那須などの観光地・リゾート地を有する。 関東地方北部に位置する県で、人口は約198万人。境界部に海岸線を有しない内陸県である。県内の地域区分は概ね宇都宮市、鹿沼市、下野市、真岡市、さくら市などを中心とする県央、小山市、栃木市、佐野市、足利市など国道50号沿線の県南、那須野が原に広がる那須塩原市、大田原市、那須町や県北西部を占める日光市を中心とする県北に分類される。 地勢は、北部から北西部にかけて奥羽山脈、日光連山、足尾山地が連なり、標高1500m - 2500m程の急峻な山岳が連なっている。これらの山々から流れ出る鬼怒川、那珂川、渡良瀬川等諸河川が関東平野の北端を形成し、更に北に進むと那須野が原に至り、県北の町並みが広がる。 県土のほぼ中央に宇都宮市が立地し、人口は県全体の4分の1に当たる約50万人が集中している。そのほかは、県南の小山市、栃木市、足利市、佐野市、県北の那須塩原市が10万人以上の人口を抱えている。県南に人口の多い市が連なる。 産業は、農業、工業、商業、観光業のバランスがとれ、それぞれ盛んである。 農業は、平野部が米や麦の産地、那須野が原などの高原部(那須高原)が酪農地、畜産地となっているほか、いちごやかんぴょうなどの特産物も生産されている。 工業は、東北自動車道・国道4号と北関東自動車道・国道50号を軸とする地域に、北関東工業地域、及び、関東内陸工業地域(本県の場合は両者の定義に当てはまる)が広がり、第二次世界大戦中に軍需産業が集積した宇都宮市では、機械工業や金属工業、食品・飲料工業が、真岡市、上三川町、芳賀町では自動車関連産業(日産自動車系、本田技研工業系)が、那須塩原市、大田原市ではタイヤ製造や精密機械工業(医療機器、写真用レンズ製造)がそれぞれ発達している。また、県南では、食品・飲料工業や機械工業、機械・自動車部品等の中小規模の工場が広く立地するほか、伝統的に繊維産業が盛んである(結城紬、足利銘仙)。 商業は、宇都宮市における小売業が発達し、ほか工業地域では卸売業も盛んである。 観光業は、日光・鬼怒川エリアや、那須・塩原エリアで盛んで、日光市の鬼怒川温泉、川治温泉、湯西川温泉、日光湯元温泉、那須町、那須塩原市の那須温泉郷、塩原温泉郷のような、飛鳥時代や奈良時代からの古い歴史を持つ名湯。東照宮・日光二荒山神社・輪王寺の二社一寺で構成される世界遺産、日光の社寺。いろは坂に華厳の滝、中禅寺湖や戦場ヶ原など、風光明媚な奥日光の景勝地。皇室の御用邸や那須岳、殺生石に代表される別荘地や牧場、温泉が広がる那須高原と豊かな自然や文化に育まれた名所を有している。これらの地域は日光国立公園の指定地域内にあり、自然保護などの施策も執られている。日光市の一部には尾瀬国立公園に指定された地域も存在する。 茨城県や群馬県と共に北関東を構成する。 人口は、約198万人で全国第18位、(本県に近い人口規模の県としては、岐阜県の約204万人、群馬県の約197万人、福島県の約193万人、岡山県の約192万人等が挙げることができる)。面積は6,408.28 kmで全国第20位(関東地方最大の面積)、東西約84 km・南北約98 km。県庁所在地の宇都宮市は、東京から約100 km、JR東北新幹線で約50分強の位置にあり、中核市の指定を受けている。 地形的には東部の八溝山地、北部から西部にかけての那須連山・下野山地(高原山・日光連山・帝釈山地)・足尾山地の山岳地帯と、県中央部の那珂川・鬼怒川・渡良瀬川の沿岸平野部の3地域に大別される。 東部の八溝山地は標高600 - 1,000 mの阿武隈高地に続く比較的なだらかな山地が連なる。 北部から西部にかけての山岳地帯は日光国立公園、尾瀬国立公園(帝釈山、田代山湿原周辺)に指定されており、日光・鬼怒川・川治・塩原・那須などの観光地がある。北部には奥羽山脈(那須火山帯)に連なる那須連山・高原・男体・日光白根山などの諸火山がある。日光連山他の険しい山岳地帯は、標高2,000 m以上の山脈が関東の北限を形成し、瀑布や湖沼が点在している。また、諸河川の源にもなっており、鬼怒川は中央部を、渡良瀬川は群馬県との県境を流れ利根川に合流し、那珂川は八溝地域から東折し茨城県に入り、ともに太平洋に注いでいる。 南部・東部は、そのほとんどが関東平野の一端で、首都圏の一角として市街化が進んでいる。 群馬県や茨城県や福島県の隣接自治体と深いつながりを持つ県内市町がある一方で、埼玉県との隣接に関しては、渡良瀬遊水地により飛び地状態になっている栃木市藤岡町下宮地区とのみ、自動車用道路で行き来可能である。事実上、自動車や鉄道で埼玉県に入る場合、群馬県か茨城県を経由しなければならない。栃木県と埼玉県を結ぶ主要幹線の国道4号、東北本線、東北新幹線は茨城県、同じく東北自動車道、東武伊勢崎線、東武日光線は群馬県を経由して埼玉県に入る。 なお、この地区は栃木県・埼玉県・群馬県の3県境が交差する地点が畑の中に存在し、看板が設置されている。山間部や河川上で3県が交差する地点はあるが、平地にあるのは珍しい。 中南部地域は関東平野の中北部に立地し、南端部には渡良瀬川と思川、利根川、また田川と鬼怒川のそれぞれ合流点があり、茨城県、群馬県、埼玉県と境界を接している。一方、県北西部地域は奥羽山脈の南端部に位置し、山岳地域となっており、北東部は低山地で那珂川が縦断して開析が進んだ八溝山地とともに茨城県境を形成しているが、北西部は関東地方屈指の山岳地帯であり、標高2,000メートル以上の山並みが続き群馬県および福島県との境界を形成している。特に日光連山は標高2,300メートル以上の峻険な独立峰を複数有し、空気の澄んだ日には関東南部からも遠望できる。この日光連山と八溝山地の北部接点付近に立地する那須岳は関東の最北端であり、福島県との境を成す。 気候は太平洋側気候を呈し、山間部では冬季の降雪、また平地部では同じく冬季の乾燥と夏季の雷を特徴とする。全県で夏季多雨多湿、冬季少雨乾燥を呈し、年間降水量は山間部で多く奥日光では2,000mmを超すが、平地部では少なく県南部では1,200mm程度である。 2017年度時点で日光地域、県南地域、県東地域、県央地域、那須地域の5地域区分の場合と河内地区、上都賀地区、南那須・芳賀地区、下都賀地区、那須・塩谷地区、安足地区の6地域区分が存在する。 嘗ては3地域に10の広域市町村圏があった(2005年1月の県央・県北・県南3地域区分参考資料)。 以下の14市5郡11町がある。町はすべて「まち」と読む。県内に村は無い。ただし、平成の大合併以前には“村”があり、その時はすべて「むら」と読んでいた。 都市雇用圏(10%通勤圏)の変遷 歴史的には古墳時代、毛野川(けぬのかわ)(現在の鬼怒川)流域一帯には「毛野国」が成立し、これを上下に分かって「下毛野国(しもつけぬのくに)」「下野国(しもつけのくに)」が成立し、唐名では「野州(やしゅう)」と称する。現在でも「下野(しもつけ)」の呼称が広く使われている。 近世には豊臣政権による仕置が行われ、壬生氏、小山氏ら中世以来の氏族は領地を没収されるが、宇都宮氏や大関・大田原両氏の那須衆は豊臣政権に臣従し旧領を安堵されたほか、鎌倉公方の名跡を継ぐ喜連川氏や成田氏などが領地を得た。天正18年(1590年)には徳川家康が関東に移封され、下野は豊臣系大名と徳川領国の接点に位置する。なお、豊臣政権下での1597年(慶長2年)には宇都宮氏が改易されている。 豊臣秀吉没後の豊臣政権では五大老の家康と五大老の上杉景勝、五奉行の石田三成が対立し、慶長5年5月に家康は景勝討伐のため会津出兵を行う。三成は家康の出兵中に上方において挙兵し、家康は小山において上片へ引き返し、関ヶ原の戦いにおいて三成方を撃破する。家康が江戸に徳川幕府を開くと、中世以来の有力豪族は相次いで下野から姿を消し、県域は幕府直轄領や旗本領に細分化され、徳川家の譜代大名や旗本が支配するようになった。 江戸時代の幕藩体制においては宇都宮藩、壬生藩、烏山藩、黒羽藩、大田原藩、佐野藩、足利藩、吹上藩、高徳藩、喜連川藩の諸藩が成立し、福原家の佐久山陣屋、芦野家の芦野陣屋、那須家の福原陣屋、大田原家の森田陣屋など交代寄合の陣屋による領内統治が行われた。 下野国は江戸から奥州へ向かう結節点に位置し、近世には日光道中や奥州街道、壬生通りなど街道や脇往還、が整備され、小山宿や今市宿などの宿駅も整備された。また、利根川水系の渡良瀬川や思川、鬼怒川などの河川交通網も整備され、河岸が設置され舟運が行われた。 日光(日光市)は幕府の聖地として、東照宮をはじめとする華麗な建物が作られ、特別に保護・崇敬された。 近世期には日光山麓をはじめ各地で新田開発・用水開削が進むが、それに伴い秣場を巡る争論や水論も発生した。 江戸時代後期に入ると、今の栃木県域は、地域社会の著しい疲弊・荒廃と、急激な人口減少に見舞われることになる。人口推計によれば、江戸中期の享保6年(1721年)から、江戸後期の天保5年(1834年)までの約1世紀の間に、下野国の総人口は、約56万人から、61.1%の約32万人まで減少し、1世紀で約4割の人口減少という、事実上の人口崩壊状態となっている。同時期、日本の総人口は、度重なる飢饉にも関わらず、約10%の伸び(110.3%)を見せており、とりわけ飢饉が深刻であったとされる、東北太平洋側の陸奥国でも、1.5割弱の人口減少(86.1%)に抑えていることから考えても、江戸後期における、下野国の際立った荒廃ぶりが伺える。 このような状態の中で、二宮尊徳は農村のたて直しを図るため、桜町(現在の真岡市旧二宮町)の旗本領の復興につとめ、以後各地で報徳仕法と呼ばれる改革事業を実施した。 明治時代には「栃木」の表記に揺れがあった。「栃」は中国でクヌギ(櫟)を意味する漢字「櫔」(lì)を簡略化したと考えられ、「櫔木」の表記もあった。また、1871年の廃藩置県の後は「橡木」の表記が使われる例もあったが、「杤木」の表記が主であった。旁の「万」は、下側を「力」とつくる例もあった。1881年(明治14年)ごろより、「栃木」の表記が見られるが、旁は「櫔」に従って「厂」の中に「万」が主である。「厂」の中の「万」の部分は下側を「力」とつくる例や「丂」とつくる例もあった。「厂」の1画目を右から書く字形が生まれたのは昭和中期以降である。なお、中国語の文書で「櫪」や「枥」を使う例もみられるが、主に飼い葉桶を意味する字であり、誤りである。 栃木県の都市は、主に北部山岳地域から流れ出る鬼怒川、那珂川、渡良瀬川等の諸河川が太平洋に向かう流路帯の沖積平野と河岸段丘部に発達している。県域のほぼ中央に位置する宇都宮市は、北西部の山地部と中南部の平野部から成り、市街地はこの山地部と平野部の境界部に形成されている。 このほか、渡良瀬川、田川、鬼怒川、那珂川、思川などの河川中流域には、それぞれ足利市、下野市、真岡市、那須烏山市、鹿沼市、栃木市、小山市といった中規模都市が発達している。 衆議院の小選挙区が5。参議院では、全県で1区を構成。 2008年(平成20年)度の県民総生産は7兆9901億円である。国のGDPと比較しても、過半数の国よりも大きな規模を有している。主要産業は、宇都宮市のほか上三川町、小山市、大田原市、真岡市、栃木市、足利市、鹿沼市、矢板市などに代表される内陸型近代工業で、これらは北関東工業地域の一部を形成している。ほか、農業も盛んであり、県北から流出する河川の豊かな水を利用した産業地帯で、米作が盛んなほか飲料工業も発達している。那須塩原市や大田原市、宇都宮市は国内有数の米産地となっているほか、那須塩原市では酪農も盛んであり、国内生乳生産量は北海道に次いで高い。ほか、真岡市や鹿沼市などを中心にいちごの栽培が盛んであり、壬生町や鹿沼市のかんぴょうとともに県の特産品となっている。商業は宇都宮市で盛んで、県内年間商品販売額の約半分が宇都宮市で占められており、県内第一の商業都市となっている。観光産業も盛んで、日光市や那須塩原市、足利市の歴史・自然遺産(日光の社寺、足尾銅山跡、足利学校跡、中禅寺湖、華厳滝、鬼怒川温泉、那須岳、那須高原、塩原温泉など)、宇都宮市の餃子やカクテル、ジャズ、茂木町のツインリンクもてぎ、栃木市の蔵の街なども観光資源となっている。 県庁所在地の宇都宮市は全国屈指の米産地であるほか、県の年間製品販売額の約半分を占め、工業生産額も県内市町村最大となっており、県内最大の農商工業都市となっている。 ちなみに、かんぴょうは栃木県が99%を生産し、残りは茨城県が生産している。 県内のほぼ中心を南北に東京と東北地方を結ぶJR東北新幹線、JR宇都宮線(東北本線)、東北自動車道、国道4号が南北に走る。 また宇都宮を中心にして放射線状に県内各地へと路線が伸びている。宇都宮を基準にして各方向への主要路線は次の通り。 このほかに東西を結ぶ路線は、県南部では茨城県、群馬県両県間を結ぶ北関東自動車道やJR両毛線、JR水戸線、国道50号など。県北部では、国道400号などがある。 南北方向には、県東部では国道294号など。県西部には東京から直通する鉄道路線の東武日光線、東武鬼怒川線、野岩鉄道会津鬼怒川線が走る。 栃木県内に空港は存在しない。最寄りの空港は成田国際空港(千葉県成田市)・東京国際空港(東京都)国際空港に限らない場合は茨城空港(茨城県)、福島空港(福島県) 放送対象地域は関東広域圏に属する。栃木県域放送のとちぎテレビ・NHK宇都宮放送局のほかに、NHK放送センター(教育テレビ)と在京キー局5局(日本テレビ・テレビ朝日・TBS・テレビ東京・フジテレビ)が県内全域が放送区域になっている。また、とちぎテレビのみならずNHK・キー局ともに県内各地に中継局を置いている。 ※JRN系列のTBSラジオ、NRN系列の文化放送・ニッポン放送も放送対象地域になっている。 1994年(平成6年)、栃木市の栃木コミュニティ放送に免許が交付されたことがあるが(JOZZ3AA-FM:FM蔵の街)、経済的な問題もあり免許を取り下げたため、コミュニティ放送局は2015年10月までない状態が20年近く続き、新規の開局の機運はなく、全国で唯一コミュニティ放送の無い都道府県「空白県」であったが、2015年(平成27年)1月27日、栃木ケーブルテレビ(運営・ケーブルテレビ株式会社)が運営するとちぎシティエフエム(JOZZ3CB-FM)に予備免許が交付され、栃木県にもコミュニティ放送が開局の見通しとなった。 ゆるきゃら・ご当地キャラ """ #@title ウィキペディア記事「鳥取県」をtottori_article変数に読み込む tottori_article = """ 鳥取県 鳥取県(とっとりけん)は中国地方の日本海側にあり、山陰地方の東側を占める県である。同県は全国で面積は7番目に小さく、人口は最も少ない。県庁所在地は鳥取市。 国土地理院の全国都道府県市区町村別面積調によると、鳥取県の面積は平方キロメートルである。 国土地理院地理情報によると鳥取県の東西南北それぞれの端は以下の位置で、東西の長さは125.41km、南北の長さは61.79kmである。 鳥取県は県内ほぼ全域が日本海側気候で、豪雪地帯となっている。春と秋は好天の日が多く、夏は南風によるフェーン現象で猛暑日となることもあるが平野部でも熱帯夜は少ない。冬は曇りや雨、雪の日が多いが、平野部の1月平均気温は4℃台と東京郊外や名古屋、京都と同じくらいであり冷え込みは厳しくない。米子などの西部沿岸部は平年の最深積雪は20cm程度と比較的雪は少ないが、東へ行くほど降雪/積雪量は多くなり、鳥取市では中心部でも平年で40cm以上の最深積雪を観測する。東部ではJPCZ(日本海寒帯気団収束帯)の影響を受けやすい鳥取市や岩美町などの沿岸部のほうが智頭町などの内陸部よりも降雪量が多くなることがある。 特に大山周辺の内陸山地は山陰一の豪雪地帯となっており、冷え込みも厳しく−15°C以下にまで下がることもある。 県域はかつての因幡国、伯耆国に相当し、4市・5郡・14町・1村がある(町はすべて「ちょう」、村は「そん」と読む)。現在は東部・中部・西部の3つに区分するのが一般的で、鳥取市を中心とする東部は旧因幡国、倉吉市を中心とする中部と米子市や境港市を中心とする西部は旧伯耆国。旧因幡の東部に対して、同じ伯耆であった西部と中部を総称して中・西部と二区分にすることもある。 鳥取平野を中心とする旧因幡国の区域に相当し、鳥取砂丘や白兎海岸などの景勝地がある。県庁所在地で県唯一の施行時特例市でもある鳥取市は、液晶工場などが立地する工業都市でもある。 面積:km、 推計人口:人、 人口密度:人/km 旧伯耆国の東部、倉吉平野を中心とする区域。白壁土蔵群で知られる倉吉市を中心とし、三朝温泉、はわい温泉、東郷温泉といった温泉地も多い。 面積:km、 推計人口:人、 人口密度:人/km かつての伯耆国西部、米子平野や日野川流域を含む地域。中国地方最高峰で日本百名山の一つ・大山(1,729m)があり、古来から隣県島根の旧出雲国地域との結びつきが強い。 面積:km、 推計人口:人、 人口密度:人/km 府県予報区は「鳥取県」。一次予報区分は2区分、「市町村等をまとめた地域」は5区分。二次予報区分は鳥取市以外は市町村単位で、鳥取市のみ北部・南部の2つに分かれている。 「鳥取」の語は『古事記』『日本書紀』の垂仁天皇に「鳥取造(ととりのみやつこ)」、「鳥取部(ととりべ)」が見える。 『古事記』には、大和朝廷が諸国に鳥を捕らえさせ、これを税として納めるように命じていたという一節があり、当時、沼や沢の多い湿地帯であった鳥取平野で水辺に集まる鳥などを捕らえて暮らしていた狩猟民族が、大和に政権ができてからその支配体系に組み込まれ、「鳥取部」として従属するようになり、そこからこの地が「鳥取」と呼ばれるようになったとされる。 『日本書紀』垂仁天皇二十三年九月から十一月の条にかけて「鳥取」の起源説話が見える。誉津別王子(ほむつわけのみこ)が成人しても言葉が喋れないことを天皇が憂いていた時、大空を白鳥が飛んでいるのを見つけ「是何物ぞ」と発した。天皇、喜びて、その鳥の捕獲を命じた。天湯河板挙(あまのゆかわたな)が鳥を追いつづけ各地を巡り、ついに出雲の地(現;島根県安来地方だという説が有力)で捕獲に成功した。この功績から「鳥取造」の称号(姓:かばね)を拝命した。『記』にも同類の説話が見えるが、結末が違っている。 『和名類聚抄』の因幡国邑美(おうみ)郡の五郷の一つに鳥取がある。この郷名は上述の垂仁天皇の王子本牟智和気御子(ほむちわけみこ)のために設置された鳥取部に由来する。この辺り一帯が沼地で、全国の白鳥伝説との関連が取り上げられている。文書のうえでは、天慶3年(940年)の因幡国高草郡東大寺領高庭庄坪付注進状(東南院文書)に「主張鳥取高俊」(郡司と推定)の署名が初見である。(参考文献 内藤正中他『鳥取県の歴史』山川出版社 2003年) 山陰道:因幡国・伯耆国 大山裾野丘陵から後期旧石器時代の黒曜石製と安山岩製のナイフ形石器や削器、彫器・掻器が見つかっている。今から約2万3千年前以降のものと推測される。また、旧石器終わり頃の黒曜石製細石刃と呼ばれる石器が発見されている。このように遺物は少数ながら発見されているが、県内からは人が生活した遺跡はまだ発見されていない。 因幡国では西軍方の宮部長熈、垣屋恒総、木下重堅が改易され、代わって亀井茲矩、池田長吉、山崎家盛の3大名が統治した(関ヶ原の戦い以前から鹿野城主であった亀井氏に関しては加増、それ以外の大名は転封)。 伯耆国においても吉川広家、南条元忠が西軍方として改易・転封処分となり、中村忠一が入部した。慶長14年(1609年)に中村忠一が急死すると、その翌年には加藤貞泰、市橋長勝、関一政の3大名に分割された。この時、河村郡・久米郡は幕府直轄領となり、山田直時・伊丹康勝が代官として派遣された。また、慶長19年(1614年)には里見忠義が倉吉に配流され、久米郡の一部4,000石が与えられた。 その後、元和4年(1618年)、池田光政が鳥取城に入部し、因伯2国からなる鳥取藩が成立した。このほか、鹿奴藩・若桜藩の2つの新田藩があった。 また、伯耆国汗入郡の大山寺は僧・豪円の活躍によって慶長15年(1610年)、徳川秀忠より寺領3,000石が認められた。この大山寺領は西伯耆一帯に散在し、18ヵ村が大山寺本坊西楽院の管掌下に置かれた。大山寺は鳥取藩士とは別の大山侍と呼ばれる武士を登用し、寺領支配に携わらせた。 鳥取県の人口は全国で最も少ない。また全国どの政令指定都市と比較しても少なく、政令指定都市以外で最大の千葉県の船橋市や、鹿児島県の県庁所在地の鹿児島市よりも少ない。2010年(平成22年)の国勢調査で588,418人と35年ぶりに60万人を下回った。 ただし、面積も小さいことから人口密度で見ると168.5人/kmと37位で、鹿児島県・長野県のそれと近い。 衆議院の小選挙区が2。参議院では、鳥取県選挙区として全県で1区を構成していたが、2016年の第24回参議院議員通常選挙より、島根県選挙区と合区され、鳥取県・島根県選挙区となり、島根県とともに1区を構成する合同選挙区が創設された。 鳥取県の行政機関は、鳥取県庁のほか、地方機関として総合事務所等が置かれている。なお、鳥取市・岩美町・若桜町・智頭町・八頭町については本庁が担当する。伯耆町(旧溝口町)は2005年(平成17年)4月1日に日野総合事務所から西部総合事務所へ、鳥取市(河原町・用瀬町・佐治町)は2007年(平成19年)4月1日に八頭総合事務所から東部総合事務所へ、それぞれ移管された。2013年(平成25年)4月1日に東部・八頭の各総合事務所が本庁に集約、日野総合事務所が西部総合事務所日野振興センターとなった。2018年(平成30年)4月1日の鳥取市の中核市移行に伴い、東部福祉保健事務所・東部生活環境事務所が鳥取市に移譲され、鳥取市保健所となった(管轄区域は変更なし。岩美町・若桜町・智頭町・八頭町についても、鳥取県からの事務委託により鳥取市に移譲)。 地方債の残高 人口が日本国内で一番少ない小さな県であるため、県内総生産(名目GDP)も2008年4月から2009年3月までのベース(平成20年度の統計)で1兆9927億円と、47都道府県では最も小さく、世界の中では国内総生産 (GDP) のランクが90位程度の国と同程度である。なお、これを一人当たりで見ると和歌山県・岩手県のそれに近い。また、一人当たり県民所得は230.4万円であり、全国平均の83.7%である。これは熊本県・鹿児島県のそれに近い。 スターバックスが未出店である唯一の県だったが、2015年5月23日に、同店の鳥取県1号店が鳥取市の鳥取駅南口付近に開業した。これにより、スターバックスは47都道府県全てに店舗を構えることになった。その後、イオンモール鳥取北内にも出店している。 スイカ、松葉ガニ、らっきょう、二十世紀梨など全国的に有名。米や野菜、果物などもバランスよく生産されている。 鳥取県警察本部の管轄にある。2005年(平成17年)4月1日に再編が行われ、以下の9警察署が置かれている。 おおむね兵庫県境~島根県境までの海岸線に沿った旧山陰道のルートに沿って走る幹線(鉄道の山陰本線や国道9号線)が県内主要都市(もしくはその付近)を通り、鳥取・米子・倉吉の各都市近辺から中国山地・山陽方面へルートが分岐する構造になっている。 ()のないものは県内のみを走る路線。 日ノ丸自動車、日本交通の2社があり、県内の路線バスや県内各都市と県外を結ぶ高速バスを運行。 どんぐりコロコロは日本交通の運行、はまるーぷバスは日ノ丸自動車の運行、他3つは日本交通・日ノ丸自動車の共同運行。 全国で最もバス利用者が少ない。 松江市内に松江道路が開通した1998年(平成10年)4月17日以降、長らく「日本で唯一都道府県庁所在都市に高速道路がない県」であったが、2004年(平成16年)11月1日に山陰自動車道青谷インターチェンジのある青谷町を編入合併したため解消された。 鳥取県・島根県の県域民放各局は互いの県に乗り入れて放送を実施している。詳しくは電波相互乗り入れ#鳥取・島根両県の民放相互乗り入れ放送の項を参照。局名の後の( )内は局所在地。 ※他に、鳥取県に系列局を持たないANN(テレビ朝日)系列が、取材拠点としてテレビ朝日米子支局・テレビ朝日鳥取支局を置いている。 鳥取県の方言は、東部と西部で大きく異なる。東部(因幡)の因州弁は兵庫県北部の但馬弁と共通した特徴を持ち、東山陰方言に分類される。中部(東伯耆)の倉吉弁は、伯耆でありながら因州弁に近い特徴を持つ。一方、西部(西伯耆)の方言は島根県東部の出雲弁と共通した特徴を持ち、雲伯方言に分類される。 """ for n in range(1, 10): similarity_score = ngram_based_article_similarity(tottori_article, tochigi_article, n) if similarity_score == 0: break print("{}-grams cosine similarity (鳥取県 vs 栃木県): {}".format(n, similarity_score)) print("鳥取県\t | \t栃木県") for index in range(10): tottori_article_ngrams = sorted(count_article_ngrams(tottori_article, n).items(), key=lambda x:x[1], reverse=True) tochigi_article_ngrams = sorted(count_article_ngrams(tochigi_article, n).items(), key=lambda x:x[1], reverse=True) print("{}\t|\t{}".format(tottori_article_ngrams[index], tochigi_article_ngrams[index]))1-grams cosine similarity (鳥取県 vs 栃木県): 0.8753957524683921 鳥取県 | 栃木県 ('、', 74) | ('の', 86) ('の', 70) | ('、', 82) ('は', 49) | ('は', 81) ('が', 45) | ('に', 69) ('に', 43) | ('が', 60) ('と', 38) | ('を', 48) ('鳥取', 36) | ('で', 48) ('で', 34) | ('と', 41) (')', 32) | ('て', 38) ('(', 31) | ('県', 37) 2-grams cosine similarity (鳥取県 vs 栃木県): 0.36822960619627304 鳥取県 | 栃木県 ('鳥取 県', 16) | ('て いる', 33) ('て いる', 14) | ('は 、', 26) ('、 鳥取', 12) | ('に は', 17) ('県 の', 12) | ('し 、', 16) ('さ れ', 12) | ('し て', 13) ('年 )', 12) | ('さ れ', 13) ('鳥取 市', 11) | ('宇都宮 市', 12) ('年 (', 11) | ('なっ て', 12) ('し た', 10) | ('で は', 12) ('が 、', 9) | ('で ある', 11) 3-grams cosine similarity (鳥取県 vs 栃木県): 0.0851823284002506 鳥取県 | 栃木県 ('鳥取 県 の', 6) | ('なっ て いる', 10) ('、 鳥取 市', 6) | ('し て いる', 9) ('さ れ た', 6) | ('と なっ て', 8) ('年 ( 平成', 6) | ('、 栃木 市', 6) ('れ て いる', 5) | ('れ て いる', 6) ('年 ) 4', 5) | ('て おり 、', 6) (') 4 月', 5) | ('で あり 、', 6) ('月 1 日', 5) | ('、 真岡 市', 5) ('、 鳥取 県', 4) | ('さ れ て', 5) ('を 中心 と', 4) | ('さ れ た', 5) 4[...]Instantiating Pulses: Obtaining Pulse Instances From Pulse TemplatesIn the previous examples, we have modelled pulses using the basic members of qupulse's `PulseTemplate` class hierarchy. However, these are only templates (or classes) of pulses and may contain parameters so that they cannot be run directly on hardware (this is also the reason why we always have to provide some parameters during plotting). First, we have to instantiate a concrete pulse in a process we call *instantiating*. We achieve this by making use of the `create_program()` method and will need to provide concrete parameter values.The example should be mostly self-contained and easy to follow, however, if you started here and don't know what pulse templates are and how to create them, maybe it's best to have a look at [Modelling a Simple TablePulseTemplate](00SimpleTablePulse.ipynb) first.To start, let us first create a pulse template with a few parameters and two channels. Instantiating a TablePulse%matplotlib inline from qupulse.pulses.plotting import plot from qupulse.pulses import TablePT template = TablePT(entries={'A': [(0, 0), ('ta', 'va', 'hold'), ('tb', 'vb', 'linear'), ('tend', 0, 'jump')], 'B': [(0, 0), ('ta', '-va', 'hold'), ('tb', '-vb', 'linear'), ('tend', 0, 'jump')]}, measurements=[('m', 0, 'ta'), ('n', 'tb', 'tend-tb')]) parameters = {'ta': 2, 'va': 2, 'tb': 4, 'vb': 3, 'tc': 5, 'td': 11, 'tend': 6} _ = plot(template, parameters, sample_rate=100, show=False, plot_measurements={'m', 'n'})The `HardwareSetup` class represents the actual hardware and interfaces to the devices in qupulse. It is thus responsible for uploading to and executing pulses on the hardware. To do so it currently expects an instantiated pulse which is represented by `Loop` objects. These can be obtained by plugging the desired parameters into the `create_program` method of your `PulseTemplate` object.program = template.create_program(parameters=parameters, channel_mapping={'A': 'A', 'B': 'B'}) print(program) print('Defined on', program[0].waveform.defined_channels) print(program.get_measurement_windows())LOOP 1 times: ->EXEC 1 times Defined on {'B', 'A'} {'m': (array([0.]), array([2.])), 'n': (array([4.]), array([2.]))}The output shows us that a simple `Loop` object was created which just executes a single waveform without repetitions, just as our `PulseTemplate` specifies. In the `Loop` object all parameter references from the template have been resolved and replaced by the values provided in the `parameters` dictionary, so this is our pulse ready to be executed on the hardware. Mapping Channels and Measurements During InstantiationThe `channel_mapping` keyword argument allows us to rename channels or to drop them by mapping them to `None`. We can do the same to measurements using the `measurement_mapping` keyword argument.program = template.create_program(parameters=parameters, channel_mapping={'A': None, 'B': 'Y'}, measurement_mapping={'m': 'foo', 'n': None}) print(program) print('Defined on', program[0].waveform.defined_channels) print(program.get_measurement_windows())LOOP 1 times: ->EXEC 1 times Defined on {'Y'} {'foo': (array([0.]), array([2.]))}Instantiating Composed PulsesLet's have a brief look at a slightly more complex pulse. Say we want to repeat our previous pulse a few times and follow it up with a brief sine wave on each channel.from qupulse.pulses import FunctionPT, SequencePT, RepetitionPT, AtomicMultiChannelPT repeated_template = RepetitionPT(template, 'n_rep') sine_template = FunctionPT('sin_a*sin(t)', '2*3.1415') two_channel_sine_template = AtomicMultiChannelPT( (sine_template, {'default': 'A'}), (sine_template, {'default': 'B'}, {'sin_a': 'sin_b'}) ) sequence_template = SequencePT(repeated_template, two_channel_sine_template) sequence_parameters = dict(parameters) # we just copy our parameter dict from before sequence_parameters['n_rep'] = 4 # and add a few new values for the new params from the sine wave sequence_parameters['sin_a'] = 1 sequence_parameters['sin_b'] = 2 _ = plot(sequence_template, parameters=sequence_parameters, sample_rate=100, show=False) sequence_program = sequence_template.create_program(parameters=sequence_parameters, channel_mapping={'A': 'A', 'B': 'B'}) print(sequence_program) print(sequence_program.get_measurement_windows())LOOP 1 times: ->LOOP 4 times: ->EXEC 1 times ->EXEC 1 times {'m': (array([ 0., 6., 12., 18.]), array([2., 2., 2., 2.])), 'n': (array([ 4., 10., 16., 22.]), array([2., 2., 2., 2.]))}Plot weight against LORfrom seffnet.default_predictor import predictor from seffnet.constants import DEFAULT_WEIGHTED_FULLGRAPH_PICKLE import pybel import networkx as nx from tqdm import tqdm_notebook as tqdm graph = pybel.from_pickle(DEFAULT_WEIGHTED_FULLGRAPH_PICKLE) info = {} for edge in tqdm(graph.edges()): if edge[0].namespace == 'pubchem.compound': if edge[1].namespace != 'uniprot': continue r = predictor.find_new_relation( source_curie=edge[0].namespace+':'+edge[0].identifier, target_curie=edge[1].namespace+':'+edge[1].identifier, ) for iden, edge_d in graph[edge[0]][edge[1]].items(): weight = edge_d['weight'] lor = r['lor'] info[edge] = (weight, lor) len(info.keys()) weights = [w for w, lor in info.values()] lors = [lor for w, lor in info.values()] len(lors) import matplotlib.pyplot as plt plt.plot(lors, weights, 'bo') plt.ylabel('weights') plt.xlabel('lors') plt.show()Calour Experiment class tutorialLearn about how calour stores the data of an experiment Setupimport calour as ca ca.set_log_level(11) import numpy as np import matplotlib.pyplot as plt %matplotlib notebook/Users/amnon/miniconda3/envs/calour/lib/python3.5/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`. from ._conv import register_converters as _register_convertersLoad the datawe use the chronic fatigue syndrome data from:., ., ., ., . and ., 2016.Reduced diversity and altered composition of the gut microbiome in individuals with myalgic encephalomyelitis/chronic fatigue syndrome.Microbiome, 4(1), p.30.cfs=ca.read_amplicon('data/chronic-fatigue-syndrome.biom', 'data/chronic-fatigue-syndrome.sample.txt', normalize=10000,min_reads=1000)2018-03-04 12:36:35 INFO loaded 87 samples, 2129 features 2018-03-04 12:36:35 WARNING These have metadata but do not have data - dropped: {'ERR1331814'} 2018-03-04 12:36:35 INFO After filtering, 87 remainingThe `Experiment` classCalour stores the experiment as two Pandas.DataFrame (for sample_metadata and feature_metadata) and a (sparse or dense) data matrix.The order in the dataframes and the table is synchronized, so entry number X in the sample_metadata dataframe always corresponds to row X in the data matrix (and similarily entry Y in the feature_metadata always corresponds to column Y in the data matrix). The \__repr__ of the ExperimentContains the class (in our case - ca.AmpliconExperiment - which is derived from ca.Experiment),the original biom table filename,and how many samples and features does it have.print(cfs)AmpliconExperiment ("chronic-fatigue-syndrome.biom") with 87 samples, 2129 featuresThe per-sample metadata (`Experiment.sample_metadata`)This is a Pandas.DataFrame, with the index being the SampleID, and columns for the sample metadata fields (loaded from the mapping file).Note that Calour also added the "_calour_original_abundance" fieldcfs.sample_metadataThe per-feature metadata (`Experiment.feature_metadata`)This is a Pandas.DataFrame, with the index being the featureID (usually the sequence), and columns for the feature metadata (usually "taxonomy", and also additional fields added by calour following differential abundance testing)cfs.feature_metadataReads table (`Experiment.data`)This is a numpy 2D array or a scipy.Sparse matrix containing the feature X sample reads.Rows are samples, columns are features.cfs.dataChoosing sparse/dense representation of the data When loading the data, it is by default loaded as a scipy.Sparse.CSR matrix (which is more memory efficient for sparse data).We can force Calour to load the data as a dense numpy 2D array using the `sparse=False` parameter in the `read_amplicon()` function.We can also convert between sparse and dense using the `sparse` attribute of the experiment Convert to densecfs.sparse=False cfs.dataconvert to sparsecfs.sparse=True cfs.dataGetting the dataWe can use the `Experiment.get_data()` function to obtain a copy of the data, either as sparse or dense. sparse=None means keep the original formatdat = cfs.get_data(sparse=None) datsparse=True returns a sparse representation of the data (copies if needed)dat = cfs.get_data(sparse=True) datsparse=False returns a dense representation of the data (copies if needed)dat = cfs.get_data(sparse=False) datwe can also force copying the data using `copy=True`dat = cfs.get_data(sparse=None, copy=False) dat is cfs.data dat = cfs.get_data(sparse=None, copy=True) dat is cfs.datagetting a single entry based on feature and sample valuesWe can use the \__getitem(sampleid, featureid)\__ attribute.cfs['ERR1331815','TACGGAGGATCCGAGCGTTATCCGGATTTATTGGGTTTAAAGGGAGCGTAGGCGGACGCTTAAGTCAGTTGTGAAAGTTTGCGGCTCAACCGTAAAATTGCAGTTGATACTGGGTGTCTTGAGTACAGTAGAGGCAGGCGGAATTCGTGG']Saving an Experiment Save everything (biom table+sample/feature mapping files)By default saves as an HDF5 biom table. We can save as a text biom table insteadusing `fmt="txt"` parametercfs.save('cfs') !ls cfs*cfs.biom cfs_feature.txt cfs_sample.txt![Callysto.ca Banner](https://github.com/callysto/curriculum-notebooks/blob/master/callysto-notebook-banner-top.jpg?raw=true) Callysto's Weekly Data Visualization Christmas Tunes Popularity Recommended grade level: 6-12 Instructions: Step 1 (your only step): “Run” the cells to see the graphsClick “Cell” and select “Run All.” This will import the data and run all the code to make this week's data visualizations (scroll to the top after you’ve run the cells). **You don’t need to do any coding.**![instructions](https://github.com/callysto/data-viz-of-the-week/blob/main/images/instructions.png?raw=true)After a code cell runs, a number appears in the top left corner. If the code cell experiences a technical error some red script will appear below the cell. About This Notebook:Callysto's Weekly Data Visualization is a learning resource that helps Grades 5-12 teachers and students grow and develop data literacy skills. We do this by providing a data visualization, like a graph, and asking teachers and students to interpret it. This companion resource walks learners through how the data visualization is created and interpreted using the data science process. The steps of this process are listed below and applied to each weekly topic.1. Question - What are we trying to answer?2. Gather - Find the data source(s) you will need.3. Organize - Arrange the data so that you can easily explore it.4. Explore - Examine the data to look for evidence to answer our question. This includes creating visualizations.5. Interpret - Explain how the evidence answers our question.6. Communicate - Reflect on the interpretation. 1. Question Which pop songs compete with "All I want For Christmas is You" in terms of holiday popularity?You’ve likely heard this holiday tune before. ’s *All I Want for Christmas is You* is one of the [best-selling singles of all time](https://en.wikipedia.org/wiki/All_I_Want_for_Christmas_Is_You). There are a lot of different ways to decide what makes a song "popular". We will use [Google Trends](https://trends.google.com/trends/?geo=US) data. This is data of Google searches for a given topic – in our case, a small collection of popular Christmas pop songs 2. GatherThe code below will import the Python programming libraries, a software function that helps us gather and organize the data to answer our question. This notebook will attempt to collect new data from Google. But, if anything goes wrong with the connection between the notebook and Google, the notebook will use backup data. The backup data was sourced from [Google Trends](https://trends.google.com/trends/?geo=US) on November 16, 2020.First, we will import the Python libraries we need.# Import python libraries import pandas as pd import plotly.graph_objects as go import osThen, we try to connect to Google to grab current information. If that fails, we need to use the backup Google Trends data (.csv) that is saved alongside this notebook.try: #import additional libraries needed from pytrends.request import TrendReq from datetime import datetime from datetime import date #set up the connection to google pytrends = TrendReq(hl='en-US', tz=360) #create string holding todays date today = date.today() today = str(today) #keys found by manually finding the 'mid' matching the desired search term using 'pytrends.suggestions()' mariah_key = ' wham_key = ' kelly_key = ' #call pytrends to grab google trends data pytrends.build_payload(kw_list=[mariah_key, kelly_key, wham_key], timeframe='2013-11-01 '+today) #from the trend data collected from google, save the interest over time data to a datframe df = pytrends.interest_over_time() #organize the data to look a bit more like the data grabbed from google manually df.reset_index(inplace=True) df.drop(['isPartial'], axis=1, inplace=True) #convert the 'datetime' object to a string so it can be more easily manipulated df.date = df.date.map(lambda x: x.strftime("%Y-%m")) print("Notebook succesfully connected to Google Trends.\nUsing current data.") except: # Create a pandas dataframe from our saved data path = os.path.join('datasets', 'christmas-songs.csv') df = pd.read_csv(path, skiprows=1) print("Notebook could not connect to Google Trends.\nUsing backup csv.") #show the first 5 rows of the data df3. OrganizeThis data is fairly organized. However, the column names will be made clearer. We will rename the columns. Additionaly, if the backup data was used there are '<1' scores that need to be replaced.Our first step in organizing will be creating string variables for the song titles and renaming the columns to something easier to work with.# Create variables to hold the column/song names mariah = 'All I Want For Christmas Is You' wham = 'Last Christmas' kelly = 'Underneath the Tree' # rename the columns df.columns=['Month', mariah, kelly, wham]Next, we are going to replace any '<1' score with '0' so we can deal with all scores as numeric data.# replace any cell showing '<1' with 0 df.loc[:,mariah] = df[mariah].map(lambda x: 0 if x == '<1' else x) df.loc[:,wham] = df[wham].map(lambda x: 0 if x == '<1' else x) df.loc[:,kelly] = df[kelly].map(lambda x: 0 if x == '<1' else x) # show the first 5 rows of data df.head()Finally, we create a second data set only looking at December data points since those are likely the most interesting to us.# Create a separate dataframe only showing rows for December # So, only show rows where the Column Month ends in '12' df_december = df[df['Month'].map(lambda x: x[5]+x[6] == '12')] df_december.head()4. ExploreThe code below will be used to create a line graph and scatter plot chart to explore the question: "Which pop songs compete with All I Want For Christmas is You in terms of holiday popularity?"The code cell below creates two plots: * A line graph that looks at our more complete data set * A scatter plot that looks at some details of the December data set. The cell below does not show the plots. It just creates them and they will be displayed when '.show()' is called in a later cell.# This code creates a line graph using the three song columns for the y-axis and uses the Month column as the x-axis fig = go.Figure() fig.add_trace(go.Scatter(x=df['Month'], y=df[mariah], mode='lines', name='All I Want for Christmas is You by ')) fig.add_trace(go.Scatter(x=df['Month'], y = df[wham], mode='lines', name='Last Christmas by Wham!')) fig.add_trace(go.Scatter(x=df['Month'], y=df[kelly], mode='lines', name='Underneath The Tree by ')) # Label and formats the interactive plot: fig.update_layout(title='Popular Christmas Songs According to Google Trends', xaxis_title="Date", yaxis_title='Relative Popularity', legend=dict(orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1), hovermode='x') # This code creates a scatterplot showing the difference in score between Wham! and y fig2 = go.Figure() fig2.add_trace(go.Scatter(x=df_december['Month'], y=df_december[mariah]-df_december[wham], name='Test', hovertemplate='Score difference: Mariah - Wham! = %{y}', marker=dict( size=16, cmax=39, cmin=0, color=df_december[mariah]-df_december[wham], colorbar=dict(title="Score Difference"), colorscale=['#ff7f0e', 'blue']), mode="markers")) # Label and formats the interactive plot fig2.update_layout(title=' Wham! Google Trends December Score Differences', xaxis_title="Date", yaxis_title='\'All I want for Christmas is You\' minus \'Last Christmas\'', hovermode='x');We can now show the first plot.# show the line graph fig.show()The line graph shows how popular the three songs are. The Google Trends data uses Google searches to measure popularity and assigns the most popular data point a score of 100. Every score less than the 100 score is scored relatively. So, a score of 50 is half as popular of a search than the 100 scored search for in December 2014. The second plot is shown below.fig2.show()optimized hyper-parameters### optimized hyper-parameters n_neighbors = 15 input_feature_maps = 'both' batch_size = 64 lr = 1e-4 dense_layers = [128] ## random data = dataset.load_Tox21() task_name = data.task_name task_type = data.task_type Y = pd.DataFrame(data.y).fillna(-1).values df = data.df n_outputs = Y.shape[1] gpuid = 4 # which gpu to use random_seeds = [2, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096] mp1 = loadmap('../../descriptor.mp') mp1.fit(method = 'umap', n_neighbors = n_neighbors) mp2 = loadmap('../../fingerprint.mp') mp2.fit(method = 'umap', n_neighbors = n_neighbors) X1 = mp1.batch_transform(df.smiles.tolist(), n_jobs = 16) X2 = mp2.batch_transform(df.smiles.tolist(), n_jobs = 16) fmap_shape1= X1.shape[1:] fmap_shape2= X2.shape[1:] file_path = "/raid/shenwanxiang/08_Robustness/dataset_induces/split" #split result_file = 'OPT_%s.csv' % task_name with open(result_file, 'w+') as f: f.write('task_name, seed, valid_auc, test_auc\n') # the dense layers for these multi outputs tasks res = [] for seed in random_seeds: train_path = os.path.join(file_path, task_name,"%s" % seed, "train.csv") valid_path = os.path.join(file_path, task_name,"%s" % seed, "val.csv") test_path = os.path.join(file_path, task_name,"%s" % seed, "test.csv") train_df = pd.read_csv(train_path) valid_df = pd.read_csv(valid_path) test_df = pd.read_csv(test_path) train_idx = df[df.smiles.isin(train_df.smiles)].index.tolist() valid_idx = df[df.smiles.isin(valid_df.smiles)].index.tolist() test_idx = df[df.smiles.isin(test_df.smiles)].index.tolist() print(len(train_idx), len(valid_idx), len(test_idx)) X_train = (X1[train_idx], X2[train_idx]) y_train = Y[train_idx] X_valid = (X1[valid_idx], X2[valid_idx]) y_valid = Y[valid_idx] X_test = (X1[test_idx], X2[test_idx]) y_test = Y[test_idx] pos_weights, neg_weights = get_pos_weights(y_train) loss = lambda y_true, y_pred: molmap.model.loss.weighted_cross_entropy(y_true,y_pred, pos_weights, MASK = -1) clf = MultiLabelEstimator(n_outputs, fmap_shape1, fmap_shape2, batch_size = batch_size, dense_layers = dense_layers, gpuid = gpuid, loss = loss, lr = lr, monitor = 'val_auc', ) clf.fit(X_train,y_train, X_valid, y_valid) train_aucs = clf._performance.evaluate(X_train,y_train) valid_aucs = clf._performance.evaluate(X_valid,y_valid) test_aucs = clf._performance.evaluate(X_test,y_test) train_auc = np.nanmean(train_aucs) valid_auc = np.nanmean(valid_aucs) test_auc = np.nanmean(test_aucs) final_res = {'seed': seed, "task_name": task_name, 'train_auc':train_auc, 'valid_auc':valid_auc, 'test_auc':test_auc,} print(final_res) with open(result_file, 'a+') as f: f.write('%s, %s, %s, %s\n' % (task_name, seed, valid_auc, test_auc)) res.append(final_res)Generate time series of random numbers then down sampleimport pandas as pd import numpy as np import matplotlib.pyplot as plt # I want 7 days of 24 hours with 60 minutes each periods = 7 * 24 * 60 tidx = pd.date_range('2016-07-01', periods=periods, freq='T') # ^ ^ # | | # Start Date Frequency Code for Minute # This should get me 7 Days worth of minutes in a datetimeindex # Generate random data with numpy. We'll seed the random # number generator so that others can see the same results. # Otherwise, you don't have to seed it. np.random.seed([3,1415]) # This will pick a number of normally distributed random numbers # where the number is specified by periods data = np.random.randn(periods) ts = pd.Series(data=data, index=tidx, name='HelloTimeSeries') ts.describe()Let's take this 7 days of per minute data and down sample to every 15 minutes. All frequency codes can be found here.https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html# resample says to group by every 15 minutes. But now we need # to specify what to do within those 15 minute chunks. # We could take the last value. ts.resample('15T').last()Or any other thing we can do to a groupby object We can even aggregate several useful things. Let's plot the min, mean, and max of this resample('15M') data.ts.resample('15T').agg(['min', 'mean', 'max']).plot()Let's resample over '15T' (15 minutes), '30T' (half hour), and '1H' (1 hour) and see how our data gets smoother.fig, axes = plt.subplots(1, 3, figsize=(12, 4)) for i, freq in enumerate(['15T', '30T', '1H']): ts.resample(freq).agg(['max', 'mean', 'min']).plot(ax=axes[i], title=freq)`zip`- `zip`은 여러 자료형을 묶어서 iteration을 통해 출력해준다. - 자료형은 길이가 같아야 한다.animals = ['cat', 'dog', 'lion'] sounds = ['meow', 'woof', 'roar'] answer = dict(zip(animals, sounds)) answer mylist = [ 1,2,3 ] new_list = [ 40, 50, 60 ] for i in zip(mylist, new_list): print (i) list1 = [1, 2, 3, 4] list2 = [100, 120, 30, 300] list3 = [392, 2, 33, 1] for i, j, k in zip(list1, list2, list3): print( i, j, k )1 100 392 2 120 2 3 30 33 4 300 1`map`- `map(함수, 자료형)`은 함수와 반복 가능한 자료형을 입력으로 받는다. - 입력받은 자료형의 각 요소가 순서대로 함수로 입력되어, 결과값이 리턴된다.# x를 입력 받아 x2를 수행하는 함수 def two_times(x): return x*2 # 우측의 리스트의 요소들이 순서대로 two_times의 함수로 입력되어, 그 결과값이 출력된다. list(map(two_times, [1, 2, 3, 4]))Overview The goal of this tutorial is to provide an overview of the use of Matplotlib. Matplotlib has a vast array of functionality, so this is by no means complete. For more information, try looking at the:- [Matplotlib Homepage](http://matplotlib.org)- [Matplotlib Gallery](http://matplotlib.org/gallery.html) Matplotlib is a library that started as a way to get MATLAB-like plotting capabilities in Python. It has since evolved on its own, with a focus on publication-quality graphics. Some features- A variety of support GUI and non-interactive backends- Lines, scatter, bar- Contours, image, mesh- LaTeX-like math rendering- Animations- Basic 3D plotting Basic Plotting Matplotlib is a python 2D plotting library which produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms.# Set-up to have matplotlib use its inline backend %matplotlib inline # Convention for import of the pyplot interface import matplotlib.pyplot as plt import numpy as np # Create some example data x = np.linspace(0, 2, 100) # Go ahead and explicitly create a figure and an axes fig, ax = plt.subplots(1, 1) # Plot our x variable on the x-axis and x^2 on the y-axis ax.plot(x, x**2) # Add some labels to the plot ax.set_xlabel('x') ax.set_ylabel('f(x)') # Needed to reuse and see the updated plot while using inline fig # Let's add a title with a bit of latex syntax ax.set_title('$y = x^2$', fontdict={'size':22}) fig fig, ax = plt.subplots(figsize=(6, 4), dpi=100) # Plot a set of different polynomials. # The label argument is used when generating a legend. ax.plot(x, x, label='$x$') ax.plot(x, x * x, label='$x^2$') ax.plot(x, x**3, label='$x^3$') # Add labels and title ax.set_xlabel('x') ax.set_ylabel('f(x)') ax.set_title('Polynomials') # Add gridlines ax.grid(True) # Add a legend to the upper left corner of the plot ax.legend(loc='upper left')Design and test a Butterworth lowpass filterThis document describes how to design a Butterworth lowpass filter with a cutoff frequency $\omega_c$ and compute the discrete coefficients so that it can be implemented on hardware.# Packages and adjustments to the figures from scipy import signal import matplotlib.pyplot as plt import numpy as np import math plt.rcParams["figure.figsize"] = 10,5 plt.rcParams["font.size"] = 16 plt.rcParams.update({"text.usetex": True,"font.family": "sans-serif","font.sans-serif": ["Helvetica"]})1. Generate a test signal * A simple test signal $\boldsymbol{y} = \{ y_i \}$ is generated with a fixed sampling frequency using the function:$$y(t) = m_0 \sin(2\pi f_0 t) + m_1 \sin(2\pi f_1 t)$$* The power spectrum is plotted as the magnitude of the discrete fourier transform (DFT): $|\hat{\boldsymbol{y}}|$# Generate a signal samplingFreq = 1000; # sampled at 1 kHz = 1000 samples / second tlims = [0,1] # in seconds signalFreq = [2,50]; # Cycles / second signalMag = [1,0.2]; # magnitude of each sine t = np.linspace(tlims[0],tlims[1],(tlims[1]-tlims[0])*samplingFreq) y = signalMag[0]*np.sin(2*math.pi*signalFreq[0]*t) + signalMag[1]*np.sin(2*math.pi*signalFreq[1]*t) # Compute the Fourier transform yhat = np.fft.fft(y); fcycles = np.fft.fftfreq(len(t),d=1.0/samplingFreq); # the frequencies in cycles/s # Plot the signal plt.figure() plt.plot(t,y); plt.ylabel("$y(t)$"); plt.xlabel("$t$ (s)"); plt.xlim([min(t),max(t)]); # Plot the power spectrum plt.figure() plt.plot(fcycles,np.absolute(yhat)); plt.xlim([-100,100]); plt.xlabel("$\omega$ (cycles/s)"); plt.ylabel("$|\hat{y}|$");2. Butterworth low-pass filter transfer functionThis document does not derive the formula for a Butterworth filter. Instead, it uses the standard form with DC gain $G=1$.* A cutoff frequency $\omega_c$ is selected* The Butterworth low-pass filter transfer function with $\omega_c = 1$ can be written as (see https://en.wikipedia.org/wiki/Butterworth_filter)$$H(s) = \frac{1}{\sum_1^{n} a_k s^k}$$where $n$ is the order of the filter. The coefficients are given by the recursion formula:$$a_{k+1} = \frac{\cos( k \gamma )}{\sin((k+1)\gamma)}$$with $a_0 = 1$ and $\gamma = \frac{\pi}{2n}$.* Because the Butterworth polynomial is $$B_n(s) = \sum_{k=0}^n a_k s^k$$and we want to set a new cutoff frequency of $\omega_c$, substitute$$B_n = \sum_{k=0}^n a_k \left(\frac{s}{\omega_c}\right)^k = \sum_{k=0}^n \frac{a_k}{{\omega_c}^k} s^k$$for convenience set $$B_n(s) = \sum_{k=0}^n c_k s^k$$ with $c_k = \frac{a_k}{{\omega_c}^k}$# Butterworth filter wc = 2*np.pi*5; # cutoff frequency (rad/s) n = 2; # Filter order # Compute the Butterworth filter coefficents a = np.zeros(n+1); gamma = np.pi/(2.0*n); a[0] = 1; # first coef is always 1 for k in range(0,n): rfac = np.cos(k*gamma)/np.sin((k+1)*gamma); a[k+1] = rfac*a[k]; # Other coefficients by recursion print("Butterworth polynomial coefficients a_i: " + str(a)) # Adjust the cutoff frequency c = np.zeros(n+1); for k in range(0,n+1): c[n-k] = a[k]/pow(wc,k) print("Butterworth coefficients with frequency adjustment c_i: " + str(c)) # Low-pass filter w0 = 2*np.pi*5; # pole frequency (rad/s) num = [1]; # transfer function numerator coefficients den = c; # transfer function denominator coefficients lowPass = signal.TransferFunction(num,den) # Transfer function # Generate the bode plot w = np.logspace( np.log10(min(signalFreq)*2*np.pi/10), np.log10(max(signalFreq)*2*np.pi*10), 500 ) w, mag, phase = signal.bode(lowPass,w) # Magnitude plot plt.figure() plt.semilogx(w, mag) for sf in signalFreq: plt.semilogx([sf*2*np.pi,sf*2*np.pi],[min(mag),max(mag)],'k:') plt.ylabel("Magnitude ($dB$)") plt.xlim([min(w),max(w)]) plt.ylim([min(mag),max(mag)]) # Phase plot plt.figure() plt.semilogx(w, phase) # Bode phase plot plt.ylabel("Phase ($^\circ$)") plt.xlabel("$\omega$ (rad/s)") plt.xlim([min(w),max(w)]) plt.show()3. Discrete transfer functionTo implement the low-pass filter on hardware, you need to compute the discrete transfer function using the signal's sampling frequency.* The time step is $\Delta t = 1/f_s$* Compute the discrete transfer function using Tustin's method by setting $s = \frac{2}{\Delta t} \left( \frac{1-z^{-1}}{1+z^{-1}} \right)$* Why do it yourself? The to_discrete method computes the bilinear transform (Tustin's method when $\alpha = 1/2$)# Compute the discrete low pass with delta_t = 1/samplingFrequency dt = 1.0/samplingFreq; discreteLowPass = lowPass.to_discrete(dt,method='gbt',alpha=0.5) print(discreteLowPass)TransferFunctionDiscrete( array([0.00024132, 0.00048264, 0.00024132]), array([ 1. , -1.95558189, 0.95654717]), dt: 0.001 )4. Filter coefficientsWe want to find the filter coefficients for the discrete update:$$y[n] = a_1 y[n-1] + a_2 y[n-2] + ... + b_0 x[n] + b_1 x[n-1] + ...$$The coefficients can be taken directly from the discrete transfer function of the filter in the form:$$H(z) = \frac{b_0 + b_1 z^{-1} + b_2 z^{-2} + \ldots}{1 - a_1 z^{-1} - a_2 z^{-2} + \ldots}$$(This is a result of taking the Z-transform which is not shown here)Compare this to a transfer function with coefficientsnum = [b_0, b_1, b_2]den = [1, a_1, a_2]is $$H(z) = \frac{b_0 z^2 + b_1 z + b_2}{z^2 + a_1 z + a_2}$$which is equivalent to$$H(z) = \frac{b_0 + b_1 z^{-1} + b_2 z^{-2}}{1 + a_1 z^{-1} + a_2 z^{-2}}$$So you can take the coefficients in the same order that they are defined in the numerator and denominator of the transfer function object. The only difference is that the **coefficients in the denominator need a negative sign**.* To filter the signal, apply the filter using the discrete update* The filtered signal and filtered signal power spectrum are plotted alongside the unfiltered signal# The coefficients from the discrete form of the filter transfer function (but with a negative sign) b = discreteLowPass.num; a = -discreteLowPass.den; print("Filter coefficients b_i: " + str(b)) print("Filter coefficients a_i: " + str(a[1:])) # Filter the signal Nb = len(b) yfilt = np.zeros(len(y)); for m in range(3,len(y)): yfilt[m] = b[0]*y[m]; for i in range(1,Nb): yfilt[m] += a[i]*yfilt[m-i] + b[i]*y[m-i]; # View the result # Plot the signal plt.figure() plt.plot(t,y); plt.plot(t,yfilt); plt.ylabel("$y(t)$") plt.xlim([min(t),max(t)]); # Generate Fourier transform yfilthat = np.fft.fft(yfilt) fcycles = np.fft.fftfreq(len(t),d=1.0/samplingFreq) plt.figure() plt.plot(fcycles,np.absolute(yhat)); plt.plot(fcycles,np.absolute(yfilthat)); plt.xlim([-100,100]); plt.xlabel("$\omega$ (cycles/s)"); plt.ylabel("$|\hat{y}|$");Filter coefficients b_i: [0.00024132 0.00048264 0.00024132] Filter coefficients a_i: [ 1.95558189 -0.95654717]Lasso Regression Part 2 Lasso Regression (Least Absolute Shrinkage and Selection Operator) is a method that performs both variable selection and regularization in order to enhance the prediction accuracy and interpretability of the statistical model it produces.import numpy as np import matplotlib.pyplot as plt import pandas as pd import warnings warnings.filterwarnings("ignore") # yfinance is used to fetch data import yfinance as yf yf.pdr_override() # input symbol = 'AMD' start = '2014-01-01' end = '2018-08-27' # Read data dataset = yf.download(symbol,start,end) # View Columns dataset.head() X = dataset[['Open','High', 'Low']] Y = dataset['Adj Close'] from sklearn.linear_model import Lasso from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler X_train,X_test,y_train,y_test=train_test_split(X,Y, test_size=0.3, random_state=0) model = Lasso() model.fit(X_train,y_train) train_score=model.score(X_train,y_train) test_score=model.score(X_test,y_test) coeff_used = np.sum(model.coef_!=0) print("Training score:", train_score) print("Test score: ", test_score) print("Number of features used: ", coeff_used) print("Accuracy Score: ", model.score(X_test, y_test)) r_squared = model.score(X_test, y_test) print("The model can predict {0:.1%} of the variance in the test set.".format(r_squared)) zero_coef = model.coef_ == 0 n_ignored = sum(zero_coef) print("The model has ignored {} out of {} features.".format(n_ignored, len(model.coef_))) model = Lasso(alpha=0.1, random_state=0) # Fits the model and calculates performance stats model.fit(X_train, y_train) r_squared = model.score(X_test, y_test) n_ignored_features = sum(model.coef_ == 0) # Print peformance stats print("The model can predict {0:.1%} of the variance in the test set.".format(r_squared)) print("{} out of {} features were ignored.".format(n_ignored_features, len( model.coef_))) model.score(X_test, y_test) from sklearn.metrics import r2_score y_pred_lasso = model.fit(X_train, y_train).predict(X_test) r2_score_lasso = r2_score(y_test, y_pred_lasso) print("r^2 on test data : %f" % r2_score_lasso) plt.plot(model.coef_, color='gold', linewidth=2, label='Lasso coefficients') plt.legend(loc='best') plt.title("Lasso R^2: %f" % (r2_score_lasso)) plt.show()Ekstraksi Data Akan diekstraksi data dari 100 halaman review pada FemaleDaily sebagai batasan agar proses ekstraksi tidak dilakukan terlalu lama. Beirkut adalah syntax yang digunakan untuk mendapatkan dataimport pandas as pd from time import sleep from random import randint import numpy as np from bs4 import BeautifulSoup import requests #Pembuatan array untuk menyimpan data yang diekstraksi id = [] nama = [] umur = [] id_produk = [] produk = [] brand = [] nilai = [] pembelian = [] kepuasan = [] rekomendasi = [] jenis = [] date = [] tempat = [] tipe =[] parent = [] waktu_pemakaian = [] penggunaan = [] jumlah_review = [] review = [] url='https://reviews.femaledaily.com/?page=' for page in range(1,100): result = requests.get(url) temp = result.content soup = BeautifulSoup(result.content, 'html.parser') string_html = str(result.content) print(page) startr = '"storeNewestReviews":' starting_splitr = string_html.split(startr,1)[1] end_splitr = starting_splitr.split('storeProductsCampaign',1)[0] raw_list_review = ['"text":"'+itemr for itemr in end_splitr.split('"text":"')[1:]] for itemr in raw_list_review: start_tokenr = '"text":"' end_tokenr = ',"is_like"' temp = itemr.split(start_tokenr, 1)[1] resultr = temp.split(end_tokenr, 1)[0] review.append(resultr) review = list(set(review)) #Ekstraksi kategori produk start = '"storeNewestReviews":' starting_split = string_html.split(start,1)[1] end_split = starting_split.split('"storeProducts"',1)[0] raw_list_jenis = ['"category":{"name":"'+item for item in end_split.split('"category":{"name":"')[1:]] for item in raw_list_jenis: start_token = '"category":{"name":"' end_token = '","slug"' temp = item.split(start_token, 1)[1] hasil = temp.split(end_token, 1)[0] jenis.append(hasil) #Ekstraksi tanggal dibuatnya review yuhu = '"storeNewestReviews":' starting = string_html.split(yuhu,1)[1] end_splits = starting.split('"storeProducts"',1)[0] raw_list_date = ['"date":"'+yo for yo in end_splits.split('"date":"')[1:]] for yo in raw_list_date: start_letsgo = '"date":"' end_letsgo = '","text":' temp = yo.split(start_letsgo, 1)[1] hisal = temp.split(end_letsgo, 1)[0] date.append(hisal) # Ekstraksi data tempat pembelian produk yuhu1 = '"storeNewestReviews":' starting1 = string_html.split(yuhu1,1)[1] end1_splits = starting1.split('"storeProducts"',1)[0] raw_list_tempat = ['"place_to_get":{"place":"'+wi for wi in end1_splits.split('"place_to_get":{"place":"')[1:]] for wi in raw_list_tempat: start1_letsgo = '"place_to_get":{"place":"' end1_letsgo = '","order"' temp = wi.split(start1_letsgo, 1)[1] hisal1 = temp.split(end1_letsgo, 1)[0] tempat.append(hisal1) #Ekstraksi data kategori parent dari produk yang diulas yuhu2 = '"storeNewestReviews":' starting2 = string_html.split(yuhu2,1)[1] end2_splits = starting2.split('"storeProducts"',1)[0] raw_list_parent = ['"parent":{"name":"'+wo for wo in end2_splits.split('"parent":{"name":"')[1:]] for wo in raw_list_parent: start11_letsgo = '"parent":{"name":"' end11_letsgo = '","slug"' temp = wo.split(start11_letsgo, 1)[1] hisal11 = temp.split(end11_letsgo, 1)[0] parent.append(hisal11) #Ekstraksi id user yuhu21 = '"storeNewestReviews":' starting21 = string_html.split(yuhu21,1)[1] end21_splits = starting21.split('"storeProducts"',1)[0] raw_list_id = ['"user":{"id":'+wo1 for wo1 in end21_splits.split('"user":{"id":')[1:]] for wo1 in raw_list_id: start111_letsgo = '"user":{"id":' end111_letsgo = ',"username"' temp = wo1.split(start111_letsgo, 1)[1] hisal111 = temp.split(end111_letsgo, 1)[0] id.append(hisal111) # Ekstraksi id produk yang diulas mulai = '"storeNewestReviews":' starting211 = string_html.split(mulai,1)[1] end211_splits = starting211.split('"storeProducts"',1)[0] raw_list_id = ['"product":{"id":'+wo11 for wo11 in end211_splits.split('"product":{"id":')[1:]] for wo11 in raw_list_id: start1111_letsgo = '"product":{"id":' end1111_letsgo = ',"image"' temp = wo11.split(start1111_letsgo, 1)[1] hisal1111 = temp.split(end1111_letsgo, 1)[0] id_produk.append(hisal1111) #Ekstraksi data waktu pemakaian produk mulai1 = '"storeNewestReviews":' start_line = string_html.split(mulai1,1)[1] end_line = start_line.split('"storeProducts"',1)[0] raw_list_waktu = ['"duration_of_use":"'+nilai2 for nilai2 in end_line.split('"duration_of_use":"')[1:]] for nilai2 in raw_list_waktu: start_now = '"duration_of_use":"' end_now = '","place_to_get"' temp = nilai2.split(start_now, 1)[1] hasilnya = temp.split(end_now, 1)[0] waktu_pemakaian.append(hasilnya) #Ekstraksi data mulai penggunaan mulai11 = '"storeNewestReviews":' start_line1 = string_html.split(mulai11,1)[1] end_line1 = start_line1.split('"storeProducts"',1)[0] raw_list_peng = ['"is_firsttime_use":"'+nilai1 for nilai1 in end_line1.split('"is_firsttime_use":"')[1:]] for nilai1 in raw_list_peng: start_now1 = '"is_firsttime_use":"' end_now1 = '","duration_of_use"' temp = nilai1.split(start_now1, 1)[1] hasilnya = temp.split(end_now1, 1)[0] penggunaan.append(hasilnya) #Ekstraksi data-data yang dapat diambil langsung menggunakan struktur html nya project = soup.find_all("div", class_="review-card") for value in project: #Ekstraksi nama pengguna name = value.find_all("p", class_="profile-username") nama.append(name) #Ekstraksi rentang umur pengguna age = value.find_all("p", class_="profile-age") umur.append(age) #Ekstraksi nama produk yang diulas product = value.find_all("p", class_="product-name") produk.append(product) #Ekstraksi brand produk yang diulas merek = value.find_all("h3", class_="product-brand") brand.append(merek) #Ekstraksi nilai rating produk secara total puas = value.find_all('span', class_='rating-text') nilai.append(puas) #Ekstraksi data apakah pengguna merekomendasikan produk atau tidak recommend = value.find_all('p', class_='recommend') rekomendasi.append(recommend) #Ekstraksi tipe kulit pengguna tipes = value.find_all('p', class_='profile-description') tipe.append(tipes) #Ekstraksi jumlah review terhadap produk yang sedang diulas jumlach = value.find_all('span', class_='rating-total') jumlah_review.append(jumlach) #Ekstraksi besar rating yang diberikan pengguna kepada produk yang diulas full = len(value.find_all(('i'), class_ = "icon-ic_big_star_full")) kepuasan.append(full) url = 'https://reviews.femaledaily.com/?page='+ str(page+1) #Pembuatan dataframe dari data yang diekstrak a = {'date':date,'id' : id, 'nama' : nama ,'umur' : umur , 'id_produk':id_produk, 'produk': produk , 'brand':brand , 'nilai_produk':nilai ,'jumlah_review':jumlah_review, 'tempat_beli':tempat, 'jenis': jenis,'kategori': parent, 'tipe_kulit':tipe, 'waktu_pemakaian':waktu_pemakaian, 'penggunaan':penggunaan, 'kepuasan': kepuasan, 'rekomendasi': rekomendasi, 'review': review} skincare = pd.DataFrame.from_dict(a, orient='index') skincare = skincare.transpose() skincare.head(5) skincare.shapeSelanjutnya, data akan disimpan dalam bentuk CSV agar dapat dibersihkan bentuk html-nyafrom google.colab import files skincare.to_csv('data_ekstraksi.csv', header = True) #files.download("data_tubes.csv")Data Cleaningdata = pd.read_csv('/content/data_ekstraksi.csv') data.head(2)Data di-upload kembali. Kemudian, data baru dikeluarkan dan dilihat bentuknya. Dpaat diihat bahwa bentuk dari data sebelum diganti menjadi CSV dengan data baru memiliki format yang berbeda. Data yang lama memiliki "[[ ]]" yang merupakan penyingkatan dari bentuk html. Setelah data diganti menjadi csv, data html tersebut terlihat sehingga harus dibersihkan terlebih dahulu Data akan dibersihkan dari bentuk html-nya dengan syntax berikut ini#Pembersihan data dari bentuk html-nya from bs4 import BeautifulSoup date = [] nama = [] umur = [] produk = [] brand = [] nilai_produk = [] tempat_beli = [] jenis = [] kategori = [] tipe_kulit = [] kepuasan = [] rekomendasi = [] jumlah_review = [] for text in data['nama']: soup = BeautifulSoup(text,"html.parser") name = soup.find('a').get_text() nama.append(name) data.loc[:, 'nama'] = nama for text in data['umur']: soup = BeautifulSoup(text,"html.parser") p = soup.find('p') if p is not None: age = p.get_text() umur.append(age) data['umur']= pd.DataFrame(umur) for text in data['produk']: soup = BeautifulSoup(text,"html.parser") produk1 = soup.find('a').get_text() produk.append(produk1) data.loc[:, 'produk'] = produk for text in data['brand']: soup = BeautifulSoup(text,"html.parser") merk = soup.find('a').get_text() brand.append(merk) data.loc[:, 'brand'] = brand for text in data['nilai_produk']: soup = BeautifulSoup(text,"html.parser") mantap = soup.find('span') if mantap is not None: lala = mantap.get_text() nilai_produk.append(lala) data['nilai_produk']= pd.DataFrame(nilai_produk) for text in data['tipe_kulit']: soup = BeautifulSoup(text,"html.parser") kulit = soup.find('p') if kulit is not None: lala1 = kulit.get_text() tipe_kulit.append(lala1) data['tipe_kulit']= pd.DataFrame(tipe_kulit) for text in data['rekomendasi']: soup = BeautifulSoup(text,"html.parser") mantap4 = soup.find('p') if mantap4 is not None: lala4 = mantap4.get_text() rekomendasi.append(lala4) data['rekomendasi']= pd.DataFrame(rekomendasi) for text in data['jumlah_review']: soup = BeautifulSoup(text,"html.parser") mantap3 = soup.find('span') if mantap3 is not None: lala3 = mantap3.get_text() jumlah_review.append(lala3) data['jumlah_review']= pd.DataFrame(jumlah_review) data.head(5)Dapat dilihat bahwa data bentuk HTML sudah dibersihkan dan diambil data intinya saja. Kemudian, akan kolom tipe kulit akan dipisah menjadi tiga kolom, yaitu kolom tipe kulit, warna kulit, dan tone kulit#Slicing kolom tipe_kulit data[['tipe_kulit', 'warna_kulit', 'tone_kulit']] = data['tipe_kulit'].str.split(', ', 2, expand=True) data.head() data.shapeLalu, data akan dideteksi jumlah nilai Nullnya# Melihat data hilang data.isna().sum()Data null dibuang#Membuang data hilang data = data.dropna()Kemudian, terdapat beberapa data emoji yang terekstrasi dalam bentuk unicode. Dengan ini, akan dihilangkan teks emoji yang terdapat pada review.#Pembersihan emoji import re df = data df['review'] = data['review'].str.replace(r'\\\w+', '') df.head()/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:4: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy after removing the cwd from sys.path.Dilakukan transformasi bentuk data pada kolom rekomendasi dari bentuk string menjadi bentuk numerik biner#Pengubahan kolom rekomendasi : 1 untuk merekomendasikan produk, 0 untuk tidak merekomendasikan produk data['rekomendasi'] = data['rekomendasi'].str.replace(r"\b(?!doesn'|t\b)\w+.",'') data["rekomendasi"] = [0 if ele == "doesn't " else 1 for ele in data["rekomendasi"]]Dilakkan transformasi bentuk data pada kolom penggunaan menjadi biner#Pengubahan kolom rekomendasi : 1 untuk penggunaan pertama kali, 0 untuk bukan penggunaan pertama kali data["penggunaan"] = ["0" if ele == "n" else "1" for ele in data["penggunaan"]] data.head(5)Dilakukan penghapusan simbol '( )' pada kolom jumlah review agar si kolom hanya berbentuk numerik#Penghapusan string () data['jumlah_review'] = data['jumlah_review'].str.replace(r'\(', '') data['jumlah_review'] = data['jumlah_review'].str.replace(r'\)','') data.head() #Pengubahan tipe data kolom data['penggunaan'] = data['penggunaan'].astype('int') data['jumlah_review'] = data['jumlah_review'].astype('int') data.dtypesKemudian, akan dihapus kolom pertama yang tidak akan relevan untuk digunakan pada pengolahan data berjenis apa pundata = data.drop(['Unnamed: 0'], axis = 1) data.head()Implementing a Simple Hash Table This notebooks walks through a simple hash table implementation that behaves relatively similar to a Python dictionary but has a fixed size of elements that it can store for simplicity. The methods we are going to implement are listed below. Note that `__setitem__` and `__getitem__` are used to overload the `[]` index access for the `insert` and `get` methods, respectively.class HashTable(): def __init__(self, size=17): pass def insert(self, key, value): """Insert a new key-value pair or overwrites an existing one.""" pass def __setitem__(self, key, value): return self.insert(key, value) def get(self, key): """Retrieve the value corresponding to the key.""" pass def __getitem__(self, key): return self.get(key) def keys(self): """Retrieves all keys from the hash table""" pass def _hash(self, key): """Computes the hash position""" pass def _rehash(self, previous_hash): """Find the next hash for linear probing""" passHash Function For this simple example, we will be using a very naive hash function, the *remainder method*, which simply computes the remainder of the key when dividing it by the size of the hash table. For example:hash_table_size = 17 key = 123467 position = key % hash_table_size print('Position in the hash table:', position)Position in the hash table: 13Note that this function only works with integer keys. Thus, if the key is a string, we have to make a little modification:string_key = 'abcdef' key = sum(ord(c) for c in string_key) position = key % hash_table_size print('Position in the hash table:', position)Position in the hash table: 2Collision resolution with linear probingFor collision resolution we will use linear probing. That is, if a new key hashes to the same position as an existing one, we will increase the old hash value by a skip constant and check if the resulting slot is empty. If not, we will repeat this procedure until we find the next empty slot and insert the new key in this empty slot. We call this "***rehashing***."For look-ups, we then have to use the following procedure:- Check the key in the hash table that maps to the hash value of a key we want to look up. - If the keys are identical, return the value. - Else, go through the hash table using "reshashing" look-up key is found or return None if an empty slot was encountered. For simplicity, we will use implement the reshashing function using a skip constant of 1. So let's now implement the `__init__`, `_hash`, `_rehash`, `keys` and `insert` methods to get a simple hashtable set up in which we can already store some keys:class HashTable(): def __init__(self, size=17): self.size = size self.stored_keys = [None] * self.size self.stored_values = [None] * self.size def insert(self, key, value): """Insert a new key-value pair or overwrites an existing one.""" hash_val = self._hash(key) # insert new key if it doesn't exist yet if self.stored_keys[hash_val] is None: self.stored_keys[hash_val] = key self.stored_values[hash_val] = value # overwrite key if it already exists else: if self.stored_keys[hash_val] == key: self.stored_values[hash_val] = value # collision resolution elif len(self.keys()) == self.size: raise ValueError('Hash table is full') else: next_hash = self._rehash(hash_val) while (self.stored_keys[next_hash] is not None and self.stored_keys[next_hash] != key): next_hash = self._rehash(next_hash) # insert new key if it doesn't exist yet if self.stored_keys[next_hash] is None: self.stored_keys[next_hash] = key self.stored_values[next_hash] = value # overwrite key if it already exists else: self.stored_values[next_hash] = value def __setitem__(self, key, value): return self.insert(key, value) def get(self, key): """Retrieve the value corresponding to the key.""" pass def __getitem__(self, key): return self.get(key) def keys(self): """Retrieves all keys from the hash table""" return [k for k in self.stored_keys if k is not None] def _hash(self, key): """Computes the hash position.""" if isinstance(key, str): key = sum(ord(c) for c in key) position = key % hash_table_size return position def _rehash(self, previous_hash): """Find the next hash for linear probing""" return (previous_hash + 1) % self.sizeLet's start by inserting 2 different keys and check they have been stored by listing all keys:hashtable = HashTable() hashtable['abc'] = 1 hashtable['xyz'] = 2 hashtable.keys()Next, let's use a key that would result in a hash collision with one of the already stored keys:print(hashtable._hash('abc')) print(hashtable._hash('efg')) print(hashtable._hash('abcdefgh'))5 0 5We can use this key, `'abcdefgh'`, now if hash collisions are resolved correctly:hashtable['abcdefgh'] = 3 hashtable.keys()Finally, let's add the get method to retrieve keys:class HashTable(): def __init__(self, size=17): self.size = size self.stored_keys = [None] * self.size self.stored_values = [None] * self.size def insert(self, key, value): """Insert a new key-value pair or overwrites an existing one.""" hash_val = self._hash(key) # insert new key if it doesn't exist yet if self.stored_keys[hash_val] is None: self.stored_keys[hash_val] = key self.stored_values[hash_val] = value # overwrite key if it already exists else: if self.stored_keys[hash_val] == key: self.stored_values[hash_val] = value # collision resolution elif len(self.keys()) == self.size: raise ValueError('Hash table is full') else: next_hash = self._rehash(hash_val) while (self.stored_keys[next_hash] is not None and self.stored_keys[next_hash] != key): next_hash = self._rehash(next_hash) # insert new key if it doesn't exist yet if self.stored_keys[next_hash] is None: self.stored_keys[next_hash] = key self.stored_values[next_hash] = value # overwrite key if it already exists else: self.stored_values[next_hash] = value def __setitem__(self, key, value): return self.insert(key, value) def get(self, key): """Retrieve the value corresponding to the key.""" hash_val = self._hash(key) if self.stored_keys[hash_val] == key: return self.stored_values[hash_val] elif self.stored_keys[hash_val] is None: return KeyError(key) else: next_hash = self._rehash(hash_val) while self.stored_keys[next_hash] != key: next_hash = self._rehash(next_hash) if next_hash == hash_val: return KeyError(key) elif self.stored_keys[next_hash] is None: return KeyError(key) return self.stored_values[next_hash] def __getitem__(self, key): return self.get(key) def keys(self): """Retrieves all keys from the hash table""" return [k for k in self.stored_keys if k is not None] def _hash(self, key): """Computes the hash position.""" if isinstance(key, str): key = sum(ord(c) for c in key) position = key % hash_table_size return position def _rehash(self, previous_hash): """Find the next hash for linear probing""" return (previous_hash + 1) % self.size hashtable = HashTable() hashtable['abc'] = 1 hashtable['xyz'] = 2 hashtable['abcdefgh'] = 3 hashtable['abc'] hashtable['xyz'] hashtable['abcdefgh']Software Carpentry with Python: Part 2 Data wrangling with the pandas libraryFor November 21, 2019Data needs to be downloaded at:https://go.gwu.edu/pythondatahttps://swcarpentry.github.io/python-novice-gapminder/files/python-novice-gapminder-data.zip Starting in the same spot:Remember, we're in the python-lesson directory. 1. Let's create a New > Folder. Click the checkbox and Rename it: data. 2. Then click to go into it. 2. We need to put our gapminder data here. You should have already downloaded that file as part of the set-up. If not, go to https://go.gwu.edu/gapminder and download it now. Unzip it!3. Click Upload and upload the unzipped data file. We're setting things up this was so that we all have the same file structure and can follow along. Also, this is generally a good practice, to create a data folder and put your original data files in there. A quick aside that there are Python libraries like OS Library that can work with our directory structure, however, that is not our focus today.Lessons used: Software Carpentry: https://swcarpentry.github.io/python-novice-gapminder/08-data-frames/index.html Why use Python for data analysis? * We can automate the process of performing data manipulations in Python. * It’s efficient to spend time building the code to perform these tasks because once it’s built, we can use it over and over on different datasets that use a similar format. * This makes our methods easily reproducible. We can also easily share our code with colleagues and they can replicate the same analysis. Working With Pandas DataFramesOne of the best options for working with tabular data in Python is to use the Python library pandas. The pandas library provides data structures, produces high quality plots with matplotlib and integrates nicely with other libraries that use NumPy (which is another Python library) arrays.**Python doesn’t load all of the libraries available to it by default.** We have to add an import statement to our code in order to use library functions. To import a library, we use the syntax import libraryName. If we want to give the library a nickname to shorten the command, we can add **as nickNameHere**. An example of importing the pandas library using the common nickname pd is below.import pandas as pdReading CSV Data Using PandasWe will begin by locating and reading our data which are in CSV format. CSV stands for Comma-Separated Values and is a common way store formatted data. We can use Pandas’ read_csv function to pull the file directly into a DataFrame.pd.read_csv('data/gapminder_all.csv')This output is the rows in our CSV file, now as a pandas DataFrame object. The first column is the index of the DataFrame. The index is used to identify the position of the data, but it is not an actual column of the DataFrame. It looks like the read_csv function in Pandas read our file properly. However, we haven’t saved any data to memory so we can work with it. We need to assign the DataFrame to a variable. Remember that a variable is a name for a value, such as x, or data. We can create a new object with a variable name by assigning a value to it using =.data = pd.read_csv("data/gapminder_all.csv", index_col="country")There are many ways to summarize and access the data stored in DataFrames, using attributes and methods provided by the DataFrame object.Methods are called on a DataFrame object using the syntax df_object.method(). As an example, `data.head()` gets the first few rows in the DataFrame surveys_df using the head() method. With a method, we can supply extra information in the parens to control behaviour.# nothing in parens defaults to first 5 rows data.head() type(data) data.dtypesThe DataFrame.columns variable stores information about the dataframe’s columns. Note that this is data, not a method. Like math.pi. So do not use () to try to call it.data.columns data.describe()Indexing and Slicing in PythonWe often want to work with subsets of a DataFrame object. There are different ways to accomplish this including: using:* labels (column headings)* numeric ranges* specific x,y index locations. Selecting data using Labels (Column Headings)We use square brackets [] to select a subset of a Python object. For example, we can select all data from a column named lifeExp2007 from the data DataFrame by name. There are two ways to do this:# Method 1: select a 'subset' of the data using the column name data['lifeExp_2007'] # Method 2: use the column name as an 'attribute'; gives the same output data.lifeExp_2007A DataFrame is a collection of Series; The DataFrame is the way Pandas represents a table, and Series is the data-structure Pandas use to represent a column.What we did above by taking a column was creating a Series.Pandas is built on top of the Numpy library, which in practice means that most of the methods defined for Numpy Arrays apply to Pandas Series/DataFrames.What makes Pandas so attractive is the powerful interface to access individual records of the table, proper handling of missing values, and relational-databases operations between DataFrames. Selecting using slices of rows Slicing using the `[]` operator selects a set of rows and/or columns from a DataFrame, not counting the labels. To slice out a set of rows, you use the following syntax: data[start:stop]. When slicing in pandas the start bound is included in the output. The stop bound is one step BEYOND the row you want to select. So if you want to select rows 0, 1 and 2 your code would look like this:data[0:3] # Can also leave out the 0 data[:3]Selecting valuesTo access a value at the position row i, column j [i,j] of a DataFrame, we have two options, depending on what is the meaning of i in use. Remember that a DataFrame provides a index as a way to identify the rows of the table; a row, then, has a position inside the table as well as a label, which uniquely identifies its entry in the DataFrame.Use DataFrame.loc[..., ...] to select values by their label.Use `: ` on its own to mean all columns or all rowsdata.loc["Algeria", "gdpPercap_1952"]Use `DataFrame.iloc[..., ...]` to select values by their (entry) positionCan specify location by numerical index analogously to 2D version of character selection in strings. (Or items in lists). The labels aren't included in the counting, they apply to the data.data.iloc[0,0]Let's do the same thing to get at the gdp in 1952 for Algeria:data.iloc[0, 1]**Exercise:**Practice Series and slicing.1) Get the lifeExp for all countries in 1992 and assign it to a variable called lifeExp1992. 2) Get the GDP in 1962 for New Zealand using multiple methods of slicing. Remember we can use .tail() to see the end of the data. **Answer 1**lifeExp1992 = data["lifeExp_1992"] print(lifeExp1992)country Algeria 67.744 Angola 40.647 Benin 53.919 Botswana 62.745 Burkina Faso 50.260 ... Switzerland 78.030 Turkey 66.146 United Kingdom 76.420 Australia 77.560 New Zealand 76.330 Name: lifeExp_1992, Length: 142, dtype: float64**Answer 2**data.loc["New Zealand", "gdpPercap_1962"] #There are 142 rows in our dataframe, however, since we count starting with zero, the last row is 141. data.iloc[141,3] # answer is 13175.678000 #help(pd.DataFrame.loc)**Using labels on multiple rows and columns:**To slice the life expectancy from 1992, 2002, 2007, for all of the countries:Specify the rows we want and then the columns we want.data.loc[:,["lifeExp_1992","lifeExp_2002","lifeExp_2007"]] # A list of the labels of the columns we want. data.loc[["Benin", "Turkey", "Afghanistan"],["lifeExp_1992","lifeExp_2002","lifeExp_2007"]]Creating slices lets us then use methods on those subsets. For example, we earlier grabbed just the column that had the lifeExp for 1992. We can find the max and min for that slice as follows:lifeExp1992 = data["lifeExp_1992"] print(lifeExp1992.min()) print(lifeExp1992.max())23.599 79.3623.599 is Rwanda79.36 is Japan When pandas selects a single column from a DataFrame, pandas creates a view and not a copy. A view just means that no new object has been created. No new object is created, just a new reference to the one already in existence. Since no new data has been created, the assignment will modify the original DataFrame. Subsetting Data Using CriteriaWe can also select a subset of our data using criteria.data[data.lifeExp_2007 > 80]Quick review of conditions: `== != > = <= ` **Exercise 5**Create a subset of the data that contains rows for countries where the GDP per capita in 2007 was less than $1000data[data.gdpPercap_2007 <= 1000]Using a mask to identify a specific condition.A mask can be useful to locate where a particular subset of values exist or don't exist, for example, NaN or "not a number". Comparison or function is applied element by element. Returns a similarly-shaped dataframe of True and False.Boolean is a Python data type, True or False. False is Python's way of saying "No."x = 1 x > 5There is a way with pandas' methods to check for null values, (missing data or NaN).pd.isnull(data)This is very clean data, all data is here, none of the cells have a value of True, isnull? = False.Let's confirm by taking a closer look, applying a filter to the data. using the any() method, which looks for only True values.data[pd.isnull(data).any(axis=1)]Group By: split-apply-combinePandas vectorizing methods and grouping operations are features that provide users much flexibility to analyse their data.For instance, let’s say we want to have a clearer view on how the European countries split according to their GDP.We can split the countries in two groups during the years surveyed, those who presented a GDP higher than the European average and those with a lower GDP.Remember we can use methods like .mean() on a dataframe. .mean() is calculated per column.# first create a new DataFrame that is a subset, just those countries in the continent of Europe. europe_df = data[data.continent == "Europe"].copy() europe_df.head()Looks like we still have ALL of the columns, not just GDP, so let's further slice the df. Going to use `.iloc` because there are a dozen columns out of the 37 we want and I don't want to list them all.# overwrite our existing dataframe, and use iloc to get all rows, just the columns with the GDP per capita variables. europe_df = europe_df.iloc[:,1:13] europe_df.head()We can use mean() to get the mean of each column.europe_df.mean()What is the result of taking mean() on a whole DataFrame?type(europe_df.mean())Now we know the average GDP for each of the years. Next is to figure out whether each country's GDP is over the mean, by creating a boolean mask like we did earlier. Remember the mask is a DataFramemask = europe_df > europe_df.mean() mask_higher = europe_df > europe_df.mean() mask_higher.head()We then estimate a wealthy score based on the historical (from 1962 to 2007) values, where we count how many times a country has participated in the groups of lower or higher GDP. So, need to count how many Trues there are in each row.We can use the **aggregate()** method to count (or sum), and then use axis=1 because we're applying this horizontally, across the all of the columns in a row. Axis = 0 is often the default in pandas and that applies something down a column.wealth_score = mask_higher.aggregate('sum', axis=1) / len(europe_df.columns) wealth_score type(wealth_score)We can now add this back to our dataframe since it's a pandas Series and it has the same index. (They'll be able to matched up)europe_df["wealth_score"] = wealth_score europe_df.head()Groupby() We often want to calculate summary statistics grouped by subsets or attributes within fields of our data. For example, we might want to calculate the average life expectancy in a particular year. Remember we've done this on a column using the describe() method. We can calculate basic statistics for all records in a single column using the syntax below:data["lifeExp_2007"].describe()But if we want to summarize by one or more variables, for example continent, and then apply statistics. So we can use Pandas’ .groupby() method. Once we’ve created a groupby DataFrame, we can quickly calculate summary statistics by a group of our choice.grouped_data = data.copy() grouped_data = grouped_data.groupby("continent") type(grouped_data)We can now look at descriptive statistics for each of the columns in the original DataFrame, grouped by continent.grouped_data.describe()We can look at a specific statistic, mean, applied across the groupby DataFrame:grouped_data.mean()Now let's look at a specific column.grouped_data["lifeExp_2007"].describe()Backup content on Transforms.data2 = data.T data2.head()Capstone Project - The Battle of Neighborhoods Where to live in Paris? ![alt text](paris-cityscape-overview-guide.jpg) I. Introduction Paris is a vibrant and complex city. For someone who has not been living in Paris for many years, the city may look impenetrable. Which neighborhoods are great for a coffee? Which neighborhoods are famous for its markets? Where are located the best bars? When confronted with these questions, tourists and new residents generally use a guide or use websites such as Yelp or Tripadvisor. My experience with these platforms has often been disappointing because the website (or the guide) is not tailored to my tastes and preferences. For this capstone project, I would like to offer an alternative based on data mining and clustering. Data on Paris neighborhoods amenities (bars, cafés, museums, bakeries, etc.) can easily be collected and treated to generate a map of Paris. Based on the user's preferences, we can direct the user towards a specific neighborhood in Paris. This approach can be seen as a refinement of the traditional tourism websites, with the addition of a data-driven customization layer. Having a data-driven map of Paris is also helpful for people moving to Paris when deciding where to live. The French capital is extremely expensive. Yes, [the Marais](https://en.wikipedia.org/wiki/The_Marais) is great, but maybe you prefer living in the much cheaper 19th or 20th? To answer this question, we need data and a robust clustering methodology. II. Data To create a data-driven map of Paris' neighborhoods, I will data from [Foursquare API](https://developer.foursquare.com/). Foursquare defines itself as "a location technology platform dedicated to improving how people move through the real world".In practice, people use Foursquare [platform](https://foursquare.com/bestbarsuk) or app to find places. They can then rate the place, give a rating, add photos and/or a description. The Foursquare API allows us the retrieve the data that was created by users. I will also use data from Wikipedia to get information on Paris. This [page](https://en.wikipedia.org/wiki/Arrondissements_of_Paris) contains the name of Paris areas ("arrondissements"), as well as some basic information such as area and population. -------------- III. Cluster Analysis Table of ContentsPart A: Download Neighborhoods DataPart B: Geocode NeighborhoodsPart C: Cluster Analysis1. Download and Explore Dataset using the Foursquare API 2. Explore Neighborhoods in Paris 3. Analyze Each Neighborhood4. Cluster Neighborhoods5. Examine Clusters Part A: Download Neighborhoods Dataimport numpy as np # library to handle data in a vectorized manner import pandas as pd # library for data analsysis pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) import json # library to handle JSON files #!conda install -c conda-forge geopy --yes # uncomment this line if you haven't completed the Foursquare API lab from geopy.geocoders import Nominatim # convert an address into latitude and longitude values import requests # library to handle requests from pandas.io.json import json_normalize # tranform JSON file into a pandas dataframe # Matplotlib and associated plotting modules import matplotlib.cm as cm import matplotlib.colors as colors # import k-means from clustering stage from sklearn.cluster import KMeans #!conda install -c conda-forge folium=0.5.0 --yes # uncomment this line if you haven't completed the Foursquare API lab import folium # map rendering library import time #to create wordclouds from PIL import Image from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator import matplotlib.pyplot as plt %matplotlib inline print('Libraries imported.') address = 'Paris, France' geolocator = Nominatim(user_agent="par") location = geolocator.geocode(address) latitude = location.latitude longitude = location.longitude print('The geograpical coordinate of Paris are {}, {}.'.format(latitude, longitude)) # create map of New York using latitude and longitude values map_paris = folium.Map(location=[latitude, longitude], zoom_start=12) map_parisLet's try to add "arrondissements" to the map above.import pandas as pd import numpy as np link = "https://en.wikipedia.org/wiki/Arrondissements_of_Paris" tables = pd.read_html(link, header=0) # read_html creates a list of dataframes. # Let's just take the first one: df = tables[2] # rename the first column: df.rename(columns = {"Arrondissement (R for Right Bank, L for Left Bank)" : "Arrondissement"}, inplace=True) df.head()Part B: Geocode Neighborhoods Let's geocode the dataframe above:# List to store values Postcode = [] Borough = [] Neighbourhood = [] # Options # Normal sleeping time sleeping_time = 0.1 # Sleeping time in case of error sleeping_time_error = 1.0 # Max number of attempts (sometimes, we don't get a value) max_nb_attempts = 2 geolocator = Nominatim(user_agent="Coursera_Capstone") #Initialization df["Latitude"] = 0 df["Longitude"] = 0 for index, row in df.iterrows(): # Select the first neighborhood l = row["Name"] + ", Paris France" print(l) # Loop until success current_attempt = 0 while True: current_attempt +=1 try: location = geolocator.geocode(l) print((location.latitude, location.longitude)) except: print("Error with geolocator.geocode(l)") print("Sleeping for a while and trying again") time.sleep(sleeping_time_error) # Sleep for some time to prevent us being blocked: if location is not None: time.sleep(sleeping_time) df.loc[index, "Latitude"] = location.latitude df.loc[index, "Longitude"] = location.longitude # exit while loop break # If error, sleep a little bit longer else: print("Sleeping for a while and trying again") time.sleep(sleeping_time_error) # Exit if reached the max number of attempts if current_attempt == max_nb_attempts: print("Max number of attempts reached. Setting lat and lon to 0") df.loc[index,"Latitude"] = 0 df.loc[index,"Longitude"] = 0 break df.head() # create map of New York using latitude and longitude values map_paris = folium.Map(location=[latitude, longitude], zoom_start=12) # add markers to map for (lat, lng, borough, arr) in zip(df['Latitude'], df['Longitude'], df['Name'], df['Arrondissement']): label = '{},{}'.format(borough, arr) label = folium.Popup(label, parse_html=True) folium.CircleMarker( [lat, lng], radius=5, popup=label, color='blue', fill=True, fill_color='#3186cc', fill_opacity=0.7, parse_html=False).add_to(map_paris) map_paris![alt text](map_Paris_2.png) Part C: Cluster Analysis Download and Explore Dataset using the Foursquare API Let's first read credentials, which are stored in a json filefrom pathlib import Path import json path_to_main = Path().absolute() #insert your own generated keys: #Read JSON data into the datastore variable filename = str(path_to_main) + "/ID_Foursquare.json" if filename: with open(filename, 'r') as f: ids = json.load(f) else: print("ID_Foursquare.json not found. Cannot connect to Foursquare.") CLIENT_ID = ids["Client_Id"] # your Foursquare ID CLIENT_SECRET = ids["Client_Secret"] # your Foursquare Secret ACCESS_TOKEN = ids["Access_Token"] VERSION = '20180604' df.head()Now, let's get the top 100 venues that are in the first arrondissement with radius of 500 meters.LIMIT = 500 # limit of number of venues returned by Foursquare API radius = 1000 # define radius (in meters) # create URL url = 'https://api.foursquare.com/v2/venues/explore?&client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}'.format( CLIENT_ID, CLIENT_SECRET, VERSION, df.loc[0, "Latitude"], df.loc[0, "Longitude"], radius, LIMIT)Let's examine the requestresults = requests.get(url).json()From the Foursquare lab in the previous module, we know that all the information is in the *items* key. Before we proceed, let's borrow the **get_category_type** function from the Foursquare lab.# function that extracts the category of the venue def get_category_type(row): try: categories_list = row['categories'] except: categories_list = row['venue.categories'] if len(categories_list) == 0: return None else: return categories_list[0]['name']Now we are ready to clean the json and structure it into a *pandas* dataframe.venues = results['response']['groups'][0]['items'] nearby_venues = json_normalize(venues) # flatten JSON # filter columns filtered_columns = ['venue.name', 'venue.categories', 'venue.location.lat', 'venue.location.lng'] nearby_venues =nearby_venues.loc[:, filtered_columns] # filter the category for each row nearby_venues['venue.categories'] = nearby_venues.apply(get_category_type, axis=1) # clean columns nearby_venues.columns = [col.split(".")[-1] for col in nearby_venues.columns] nearby_venues.head()And how many venues were returned by Foursquare?print('{} venues were returned by Foursquare.'.format(nearby_venues.shape[0]))100 venues were returned by Foursquare.Let's create a function to repeat the same process to all the neighborhoods in Parisdef getNearbyVenues(names, latitudes, longitudes, radius=500): venues_list=[] for name, lat, lng in zip(names, latitudes, longitudes): print(name) # create the API request URL url = 'https://api.foursquare.com/v2/venues/explore?&client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}'.format( CLIENT_ID, CLIENT_SECRET, VERSION, lat, lng, radius, LIMIT) # make the GET request results = requests.get(url).json()["response"]['groups'][0]['items'] # return only relevant information for each nearby venue venues_list.append([( name, lat, lng, v['venue']['name'], v['venue']['location']['lat'], v['venue']['location']['lng'], v['venue']['categories'][0]['name']) for v in results]) nearby_venues = pd.DataFrame([item for venue_list in venues_list for item in venue_list]) nearby_venues.columns = ['Neighborhood', 'Neighborhood Latitude', 'Neighborhood Longitude', 'Venue', 'Venue Latitude', 'Venue Longitude', 'Venue Category'] return(nearby_venues)Let's collect venues for each arrondissement.paris_venues = getNearbyVenues(names=df['Name'], latitudes=df['Latitude'], longitudes=df['Longitude'], radius=radius)Louvre Bourse Temple Hôtel-de-Ville Panthéon Luxembourg Palais-Bourbon Élysée Opéra Entrepôt Popincourt Reuilly Gobelins Observatoire Vaugirard Passy Batignolles-Monceau Butte-Montmartre Buttes-Chaumont MénilmontantLet's check the size of the resulting dataframeprint(paris_venues.shape) toronto_venues.head()(1921, 7)Let's check how many venues were returned for each neighborhoodparis_venues.groupby('Neighborhood').count()How many different categories do we have?print('There are {} uniques categories.'.format(len(paris_venues['Venue Category'].unique())))There are 230 uniques categories.Let's save the dataframe to disk.paris_venues.to_csv("data/paris_venues.csv") print("paris_venues.csv saved")paris_venues.csv saved3. Analyze Each Neighborhood# one hot encoding paris_onehot = pd.get_dummies(paris_venues[['Venue Category']], prefix="", prefix_sep="") # add neighborhood column back to dataframe paris_onehot['Neighborhood'] = paris_venues['Neighborhood'] # move neighborhood column to the first column fixed_columns = [paris_onehot.columns[-1]] + list(paris_onehot.columns[:-1]) paris_onehot = paris_onehot[fixed_columns] print(paris_onehot.shape) paris_onehot.head()(1921, 231)Next, let's group rows by neighborhood and by taking the mean of the frequency of occurrence of each categoryparis_grouped = paris_onehot.groupby('Neighborhood').mean().reset_index() paris_grouped.head()paris_grouped should have 20 rows. Is it the case?paris_grouped.shapeLet's print each neighborhood along with the top 5 most common venues Not surprisingly, the category **"French Restaurant"** often comes firstnum_top_venues = 5 for hood in paris_grouped['Neighborhood']: print("----"+ hood+"----") temp = paris_grouped[paris_grouped['Neighborhood'] == hood].T.reset_index() temp.columns = ['venue','freq'] temp = temp.iloc[1:] temp['freq'] = temp['freq'].astype(float) temp = temp.round({'freq': 2}) print(temp.sort_values('freq', ascending=False).reset_index(drop=True).head(num_top_venues)) print('\n') print('\n')----Batignolles-Monceau---- venue freq 0 French Restaurant 0.20 1 Hotel 0.06 2 Italian Restaurant 0.06 3 Wine Bar 0.05 4 Bar 0.04 ----Bourse---- venue freq 0 French Restaurant 0.11 1 Japanese Restaurant 0.07 2 Wine Bar 0.06 3 Hotel 0.05 4 Bakery 0.04 ----Butte-Montmartre---- venue freq 0 French Restaurant 0.18 1 Bar 0.09 2 Pizza Place 0.05 3 Bistro 0.04 4 Café 0.04 ----Buttes-Chaumont---- venue freq 0 French Restaurant 0.14 1 Bar 0.09 2 Café 0.07 3 Restaurant 0.05 4 Cocktail Bar 0.04 ----Entrepôt---- venue freq 0 French Restaurant 0.08 1 Coffee Shop 0.07 2 Indian Restaurant 0.05 3 Hotel 0.04 4 Pizza Place 0.04 ----Gobelins---- venue [...]Let's put that into a *pandas* dataframe Now let's create the new dataframe and display the top 10 venues for each neighborhood.def return_most_common_venues(row, num_top_venues): row_categories = row.iloc[1:] row_categories_sorted = row_categories.sort_values(ascending=False) return row_categories_sorted.index.values[0:num_top_venues] num_top_venues = 10 indicators = ['st', 'nd', 'rd'] # create columns according to number of top venues columns = ['Neighborhood'] for ind in np.arange(num_top_venues): try: columns.append('{}{} Most Common Venue'.format(ind+1, indicators[ind])) except: columns.append('{}th Most Common Venue'.format(ind+1)) # create a new dataframe neighborhoods_venues_sorted = pd.DataFrame(columns=columns) neighborhoods_venues_sorted['Neighborhood'] = paris_grouped['Neighborhood'] for ind in np.arange(paris_grouped.shape[0]): neighborhoods_venues_sorted.iloc[ind, 1:] = return_most_common_venues(paris_grouped.iloc[ind, :], num_top_venues) neighborhoods_venues_sorted4. Cluster Neighborhoods Run *k*-means to cluster the neighborhood into 5 clusters.# set number of clusters kclusters = 5 paris_grouped_clustering = paris_grouped.drop('Neighborhood', 1) # run k-means clustering kmeans = KMeans(n_clusters=kclusters, random_state=0).fit(paris_grouped_clustering) # check cluster labels generated for each row in the dataframe kmeans.labels_[0:10]Let's create a new dataframe that includes the cluster as well as the top 10 venues for each neighborhood.# add clustering labels neighborhoods_venues_sorted.insert(0, 'Cluster Labels', kmeans.labels_) paris_merged = df # rename for consistency: paris_merged.rename(columns={"Name": "Neighborhood"}, inplace=True) # merge toronto_grouped with toronto_data to add latitude/longitude for each neighborhood paris_merged = paris_merged.join(neighborhoods_venues_sorted.set_index('Neighborhood'), on='Neighborhood') # drop a missing labels paris_merged.dropna(subset = ["Cluster Labels"], inplace=True) # convert to int paris_merged["Cluster Labels"] = paris_merged["Cluster Labels"].astype(int) paris_merged.head() # create map map_clusters = folium.Map(location=[latitude, longitude], zoom_start=12) # set color scheme for the clusters x = np.arange(kclusters) ys = [i + x + (i*x)**2 for i in range(kclusters)] colors_array = cm.rainbow(np.linspace(0, 1, len(ys))) rainbow = [colors.rgb2hex(i) for i in colors_array] # add markers to the map markers_colors = [] for lat, lon, poi, cluster in zip(paris_merged['Latitude'], paris_merged['Longitude'], paris_merged['Arrondissement'], paris_merged['Cluster Labels']): label = folium.Popup(str(poi) + ' Cluster ' + str(cluster), parse_html=True) folium.CircleMarker( [lat, lon], radius=5, popup=label, color=rainbow[cluster-1], fill=True, fill_color=rainbow[cluster-1], fill_opacity=0.7).add_to(map_clusters) map_clusters![alt text](map_paris_clusters.png) 5. Examine Clusters Now, you can examine each cluster and determine the discriminating venue categories that distinguish each cluster.for k in range(0,kclusters): print("---------------------") print("Cluster {}".format(k)) print("---------------------") print(paris_merged.loc[paris_merged['Cluster Labels'] == k, paris_merged.columns[[1] + list(range(5, paris_merged.shape[1]))]].head()) print("\n")--------------------- Cluster 0 --------------------- Neighborhood Density (2005)(inhabitants per km2) Peak of population \ 4 Panthéon 23849 1911 6 Palais-Bourbon 13552 1926 7 Élysée 9972 1891 13 Observatoire 23964 1954 14 Vaugirard 27335 1962 Mayor Latitude Longitude Cluster Labels \ 4 (LR) 48.846191 2.346079 0 6 (LR) 48.861596 2.317909 0 7 (LR) 48.846644 2.369830 0 13 (PS) 48.829567 2.323962 0 14 (LR) 48.841430 2.296165 0 1st Most Common Venue 2nd Most Common Venue 3rd Most Common[...]Cluster 1 The first cluster (label 0) is composed of the **5th, 7th, 8th, 14th, 15th, 16th and 17th arrondissements**.* The first most common venue: French restaurants* The second most common venue: hotels* The third most common venue: italian restaurantsdf_cluster_1 = paris_merged[paris_merged['Cluster Labels'] == 0] df_cluster_1Visualize common venueslist_venues = [] for index, row in df_cluster_1.iterrows(): #loop over top 10 venues: for i in range(1,11): #let's create a ponderation to take into account the order #that is, we add several times the same word based on the order #(it would be more accurate to weight by the actual number of occurances) for k in range(0,i): list_venues.append(df_cluster_1.iloc[0, -i]) list_venues = (" ").join(list_venues) # instantiate a word cloud object\n", cluster1_wc = WordCloud(collocations=False, background_color='white', max_words=2000) cluster1_wc.generate(list_venues) plt.figure(figsize=(15,8)) plt.imshow(cluster1_wc) plt.axis("off") plt.savefig("cluster1.png", bbox_inches='tight')![alt text](cluster1.png) Cluster 2 The first cluster (label 1) is composed of the **12th arrondissement**.* The first most common venue: theater* The second most common venue: stadium* The third most common venue: recreation centerdf_cluster_2 = paris_merged[paris_merged['Cluster Labels'] == 1] df_cluster_2 list_venues = [] for index, row in df_cluster_2.iterrows(): #loop over top 10 venues: for i in range(1,11): #let's create a ponderation to take into account the order #that is, we add several times the same word based on the order #(it would be more accurate to weight by the actual number of occurances) for k in range(0,i): list_venues.append(df_cluster_2.iloc[0, -i]) list_venues = (" ").join(list_venues) # instantiate a word cloud object\n", cluster1_wc = WordCloud(collocations=False, background_color='white', max_words=2000) cluster1_wc.generate(list_venues) plt.figure(figsize=(15,8)) plt.imshow(cluster1_wc) plt.axis("off") plt.savefig("cluster2.png", bbox_inches='tight')![alt text](cluster2.png) Cluster 3 The first cluster (label 2) is composed of the **1st, 2nd, 3rd, 4th, 6th, 9th and 10th arrondissements**.* The first most common venue: French restaurants* The second most common venue and third most common venue are quite diverse (coffee shops, hotels, wine bars)df_cluster_3 = paris_merged[paris_merged['Cluster Labels'] == 2] df_cluster_3 list_venues = [] for index, row in df_cluster_3.iterrows(): #loop over top 10 venues: for i in range(1,11): #let's create a ponderation to take into account the order #that is, we add several times the same word based on the order #(it would be more accurate to weight by the actual number of occurances) for k in range(0,i): list_venues.append(df_cluster_3.iloc[0, -i]) list_venues = (" ").join(list_venues) # instantiate a word cloud object\n", cluster1_wc = WordCloud(collocations=False, background_color='white', max_words=2000) cluster1_wc.generate(list_venues) plt.figure(figsize=(15,8)) plt.imshow(cluster1_wc) plt.axis("off") plt.savefig("cluster3.png", bbox_inches='tight')![alt text](cluster3.png) Cluster 4 The 4th cluster (label 3) is composed of the **11th, 18th, 19th, and 20th arrondissements**.* The first most common venue: French restaurants* The second most common venue: bars* The third most common venue: pizza placesdf_cluster_4 = paris_merged[paris_merged['Cluster Labels'] == 3] df_cluster_4 list_venues = [] for index, row in df_cluster_4.iterrows(): #loop over top 10 venues: for i in range(1,11): #let's create a ponderation to take into account the order #that is, we add several times the same word based on the order #(it would be more accurate to weight by the actual number of occurances) for k in range(0,i): list_venues.append(df_cluster_4.iloc[0, -i]) list_venues = (" ").join(list_venues) # instantiate a word cloud object\n", cluster1_wc = WordCloud(collocations=False, background_color='white', max_words=2000) cluster1_wc.generate(list_venues) plt.figure(figsize=(15,8)) plt.imshow(cluster1_wc) plt.axis("off") plt.savefig("cluster4.png", bbox_inches='tight')![alt text](cluster4.png) Cluster 5 The 5th cluster (label 4) is composed of the **11th, 18th, 19th, and 20th arrondissements**.* The first most common venue: Vietnamese restaurants* The second most common venue: Thai restaurants* The third most common venue: French restaurantsdf_cluster_5 = paris_merged[paris_merged['Cluster Labels'] == 4] df_cluster_5 list_venues = [] for index, row in df_cluster_5.iterrows(): #loop over top 10 venues: for i in range(1,11): #let's create a ponderation to take into account the order #that is, we add several times the same word based on the order #(it would be more accurate to weight by the actual number of occurances) for k in range(0,i): list_venues.append(df_cluster_5.iloc[0, -i]) list_venues = (" ").join(list_venues) # instantiate a word cloud object\n", cluster1_wc = WordCloud(collocations=False, background_color='white', max_words=2000) cluster1_wc.generate(list_venues) plt.figure(figsize=(15,8)) plt.imshow(cluster1_wc) plt.axis("off") plt.savefig("cluster5.png", bbox_inches='tight')Setup rendering dependencies for Google Colaboratory.!pip install gym pyvirtualdisplay > /dev/null 2>&1 !apt-get install -y xvfb python-opengl ffmpeg > /dev/null 2>&1Install d3rlpy!!pip install d3rlpyCollecting d3rlpy Downloading d3rlpy-1.1.0-cp37-cp37m-manylinux1_x86_64.whl (1.2 MB)  |████████████████████████████████| 1.2 MB 5.2 MB/s [?25hRequirement already satisfied: torch in /usr/local/lib/python3.7/dist-packages (from d3rlpy) (1.11.0+cu113) Requirement already satisfied: scikit-learn in /usr/local/lib/python3.7/dist-packages (from d3rlpy) (1.0.2) Requirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from d3rlpy) (4.2.0) Requirement already satisfied: click in /usr/local/lib/python3.7/dist-packages (from d3rlpy) (7.1.2) Requirement already satisfied: scipy in /usr/local/lib/python3.7/dist-packages (from d3rlpy) (1.4.1) Requirement already satisfied: cloudpickle in /usr/local/lib/python3.7/dist-packages (from d3rlpy) (1.3.0) Requirement already satisfied: h5py in /usr/local/lib/python3.7/dist-packages (from d3rlpy) (3.1.0) Collecting tensorboardX Downloading tensorboardX-2.5-py2.py3-none-any.whl (125 kB)  |███████████████[...]Setup cartpole dataset.from d3rlpy.datasets import get_cartpole # get CartPole dataset dataset, env = get_cartpole()Downloading cartpole.pkl into d3rlpy_data/cartpole_replay_v1.1.0.h5...Setup data-driven deep reinforcement learning algorithm.from d3rlpy.algos import DiscreteCQL from d3rlpy.metrics.scorer import discounted_sum_of_advantage_scorer from d3rlpy.metrics.scorer import evaluate_on_environment from d3rlpy.metrics.scorer import td_error_scorer from d3rlpy.metrics.scorer import average_value_estimation_scorer from sklearn.model_selection import train_test_split # setup CQL algorithm cql = DiscreteCQL(use_gpu=False) # split train and test episodes train_episodes, test_episodes = train_test_split(dataset, test_size=0.2) # start training cql.fit(train_episodes, eval_episodes=test_episodes, n_epochs=1, scorers={ 'environment': evaluate_on_environment(env), # evaluate with CartPol-v0 environment 'advantage': discounted_sum_of_advantage_scorer, # smaller is better 'td_error': td_error_scorer, # smaller is better 'value_scale': average_value_estimation_scorer # smaller is better })2022-05-06 06:02.00 [debug ] RoundIterator is selected. 2022-05-06 06:02.00 [info ] Directory is created at d3rlpy_logs/DiscreteCQL_20220506060200 2022-05-06 06:02.00 [debug ] Building models... 2022-05-06 06:02.00 [debug ] Models have been built. 2022-05-06 06:02.00 [info ] Parameters are saved to d3rlpy_logs/DiscreteCQL_20220506060200/params.json params={'action_scaler': None, 'alpha': 1.0, 'batch_size': 32, 'encoder_factory': {'type': 'default', 'params': {'activation': 'relu', 'use_batch_norm': False, 'dropout_rate': None}}, 'gamma': 0.99, 'generated_maxlen': 100000, 'learning_rate': 6.25e-05, 'n_critics': 1, 'n_frames': 1, 'n_steps': 1, 'optim_factory': {'optim_cls': 'Adam', 'betas': (0.9, 0.999), 'eps': 1e-08, 'weight_decay': 0, 'amsgrad': False}, 'q_func_factory': {'type': 'mean', 'params': {'share_encoder': False}}, 'real_ratio': 1.0, 'reward_scaler': None, 'scaler': None, 'target_update_interval': 8000, 'use_gpu': None, 'algorithm': 'DiscreteCQL', 'observation[...]Setup rendering utilities for Google Colaboratory.import glob import io import base64 from gym.wrappers import Monitor from IPython.display import HTML from IPython import display as ipythondisplay from pyvirtualdisplay import Display # start virtual display display = Display(visible=0, size=(1400, 900)) display.start() # play recorded video def show_video(): mp4list = glob.glob('video/*.mp4') if len(mp4list) > 0: mp4 = mp4list[0] video = io.open(mp4, 'r+b').read() encoded = base64.b64encode(video) ipythondisplay.display(HTML(data=''' '''.format(encoded.decode('ascii')))) else: print("Could not find video")Record video!# wrap Monitor wrapper env = Monitor(env, './video', force=True) # evaluate evaluate_on_environment(env)(cql)Let's see how it works!show_video()Starting !! Part 02-Module 01-Lesson 01_Training and Testing Models/05. NumPy Arrays !!import pandas as pd import numpy as np data = pd.read_csv("data/dataForNumpy.csv") print(data) X = np.array(data[['x1', 'x2']]) print(X) Y = np.array(data['y'])[[ 0.78051 -0.063669 ] [ 0.28774 0.29139 ] [ 0.40714 0.17878 ] [ 0.2923 0.4217 ] [ 0.50922 0.35256 ] [ 0.27785 0.10802 ] [ 0.27527 0.33223 ] [ 0.43999 0.31245 ] [ 0.33557 0.42984 ] [ 0.23448 0.24986 ] [ 0.0084492 0.13658 ] [ 0.12419 0.33595 ] [ 0.25644 0.42624 ] [ 0.4591 0.40426 ] [ 0.44547 0.45117 ] [ 0.42218 0.20118 ] [ 0.49563 0.21445 ] [ 0.30848 0.24306 ] [ 0.39707 0.44438 ] [ 0.32945 0.39217 ] [ 0.40739 0.40271 ] [ 0.3106 0.50702 ] [ 0.49638 0.45384 ] [ 0.10073 0.32053 ] [ 0.69907 0.37307 ] [ 0.29767 0.69648 ] [ 0.15099 0.57341 ] [ 0.16427 0.27759 ] [ 0.33259 0.055964 ] [ 0.53741 0.28637 ] [ 0.19503 0.36879 ] [ 0.40278 0.035148 ] [ 0.21296 0.55169 ] [ 0.48447 0.56991 ] [ 0.25476 0.34596 ] [ 0.21726 0.28641 ] [ 0.67078 0.46538 ] [ 0.3815 0.4622 ] [ 0.53838 0.32774 ] [ 0.4849 0.26071 ] [...]!! Part 02-Module 01-Lesson 01_Training and Testing Models/06. Training models in sklearn !! In the last section, we learned most of the most important classification algorithms in Machine Learning, including the following:Logistic RegressionNeural NetworksDecision TreesSupport Vector Machinesimport matplotlib.pyplot as plt fig, (ax1, ax2) = plt.subplots(1, 2) fig.suptitle("Data of Points") ax1.plot(X, 'r+'); ax2.plot(Y, 'b.');Fit Model Classifier With Scikit Learn 1. Logistic Regressionfrom sklearn.linear_model import LogisticRegression classifierLR = LogisticRegression() classifierLR.fit(X, Y)Logistic testingxtest = [[ 0.9, .7 ]] print(classifierLR.predict(xtest))[1]2. Neural Networksfrom sklearn.neural_network import MLPClassifier classifierNN = MLPClassifier()3. Decision Treesfrom sklearn.tree import DecisionTreeClassifiere classifierDT = DecisionTreeClassifiere()4. Support Vector Machinesfrom sklearn.svm import SVC classifierSV = SVC()Building a Model to Predict Survival for Titanic Passengers**Welcome to _DS2: Introduction to Machine Learning_**! This course will be all about _predictive analytics_--that is, using data and algorithms to make accurate predictions. For our introductory exercise for this course, we're going to focus on the one of the areas where machine learning really shines--**_Classification_**. We're going to examine the data and build a simple model to predict whether or not a passenger survived the Titanic disaster. Here's the catch: before we use any machine learning, we're going to build a classifier by hand to gain an intuition about how classification actually works. The GameplanWe're going to start by building the simplest model possible, and then slowly add complexity as we notice patterns that can make our classifier more accurate. Recall that we've investigated this dataset before, in DS1. We're going to use our _Data Analysis_ and _Visualization_ skills from DS1 to investigate our dataset and see if we can find some patterns that we can use in our prediction algorithm. In order to successfully build a prediction algorithm, we'll use the following process:**1. Load and explore the data.** --We'll begin by reading our data into a dataframe, and then visualizing our data to see if we can find certain groups that had higher survival rates than others. At this step, we'll also remove the `Survived` column from the dataframe and store it in a separate variable. **2.Write a prediction function.** -- We'll write a function that takes in a dataframe and predicts 0 (died) or 1(survived) for each passenger based on whatever we decide is important. This function should output a vector containing only 0's and 1's, where the first element is the prediction for the first passenger in the dataframe, the 2nd element is the prediction for the second passenger, etc. **3. Write an evaluation function.** -- In order to evaluate how accurate our prediction function is, we'll need to track how it does. To do this, we'll create a _confusion matrix_. This matrix will exist as a dictionary that tracks the number of _True Positives_, _True Negatives_, _False Positives_, and _False Negatives_ our algorithm makes--don't worry if you haven't seen these terms before. We'll define them in a later section. **4. Tweak our prediction function until we're happy!** --once we've built out the functions that underpin our predictive algorithm, we'll tweak them until we hit our desired accuracy metric. In this case, **_we'll shoot for an accuracy of at least 80%._**Let's get started!#Import everything needed for the project. import pandas as pd import numpy as np import matplotlib.pyplot as plt %matplotlib inlineStep 1: Load and Explore the DataIn this section, we'll:1. Read the data from `titanic.csv` and store it in a dataframe (you'll find this file in the `/datasets` folder).2. Remove the `Survived` column from the dataframe and store it as a Pandas Series in a variable. 3. Create a general purpose function that visualizes survivors vs deaths in any data frame passed in.4. Clean our dataframe (remove unnecessary columns, deal with null values, etc). 5. Explore our data and figure out which groups are most likely to survive.NOTE: There are many ways to successfully visualize survival rates across the different features. The most inuitive way to visualize survival rates as a stacked bar chart, where 'survived' and 'dead' are different colors on the same bar. For an easy explanation of how to make these bar charts, see [this Stack Overflow question](https://stackoverflow.com/questions/41622054/stacked-histogram-of-grouped-values-in-pandas).# Read in the titanic.csv dataset from the /datasets folder. raw_df = None # Store the survived column in the labels variable, and then drop the column from the data frame. labels = None #Don't forget to remove these columns from the dataframe! columns_to_remove = ['PassengerId', 'Name', 'Ticket', 'Cabin']Next, we'll create a function that allows us to quickly visualize the survival rates of any dataframe of passengers. This way, we can iterate quickly by slicing our dataframe and visualizing the survival rate to see if we can find any patterns that will be useful to us. As an example, if we wanted to visualize the survival rates of men versus women, we would create a dataframe object that contains only the information that matters to us, and then pass it into this function. When completed, this function should output a histogram plot that looks like the ones seen in the Stack Overflow link listed above.# Create a function used to visualize survival rates for the data frame passed in def visualize_survival_rates(dataframe, xlabel=None, ylabel="Count"): """ Inputs: dataframe--a pandas dataframe object consisting of the things you want visualized. labels--a pandas series object that tells us whether each passenger died (0) or survived(1) Outputs: A 2 color histogram that visualizes the survival rate of passengers based on the values contained within the dataframe. For instance, if we pass in a visualization NOTE: You should rely on the dataframe's .hist() method to do most of the heavy lifting for visualizations. Any slicing of the dataframe should be done BEFORE you call this function. For instance, if you want to visualize survival rates of men under 30 vs women under 30, you should create a dataframe containing only these rows and columns before passing it into this function, rather than passing in the full original dataframe. This will allow you to keep the logic in this function simple. """ passBuilding a Prediction FunctionNext, we'll write a prediction function. We'll use basic control flow to examine each row in the data set and make a prediction based on whatever we think is important. If you explored the data set, you may have stumbled upon a few interesting discoveries, such as:* Women were more likely to survive than men. * Rich people were more likely to survive than poor people. * Young people were more likely to survive than others. (NOTE: We made these up--don't automatically assume they're true without investigating first!)These may seem obvious, but don't discount their usefulness! We can use these facts to build a prediction function that has decent accuracy! For instance, let's pretend that we found that 80% of all women survived. Knowing this, if we then tell our algorithm to predict than all female passengers survived, we'll be right 80% of the time for female passengers! Complete the following prediction function. It should take in a dataframe of titanic passengers. Based on the things you think are important (just use a bunch of nested control flow statements), you'll output a 1 if you think this passenger survived, or a if you think they died. The function should output an array where the first item is the prediction for the first row in the dataframe, the 2nd item in the array is the prediction for the seconf row in the dataframe, etc.def predict_survival(dataframe): predictions = [] # WRITE YOUR PREDICTION CODE BELOW! return predictionsEvaluating Your PredictionsGreat! Now we've evaluated our data and made a bunch of predictions--but predictions are only interesting if they're accurate. In order to do this, we're going to create a **_Confusion Matrix_** to track what we got right and wrong (and _how_ we were right and wrong). There are 4 different possible outcomes for each prediction:1. **True Positive** -- You predicted they survived (1), and they actually survived (1). 2. **True Negative** -- You predicted they died (0), and they actually died (0).3. **False Positive** -- You predicted they survived (1), and they actually died (0).4. **False Negative** -- You predicted they died (0), and they actually survived (1).We're going to write a function that takes in our predictions and the actual labels (the "Survived" column we removed from the actual data frame), and determines which possible outcome we had for each prediction. We will keep track of how many times each outcome happened by incrementing a counter for each in our _Confusion Matrix_ dictionary.def create_confusion_matrix(predictions, labels): confusion_matrix = {"TP": 0, "TN": 0, "FP": 0, "FN": 0} # Recall each index in both 'predictions' and 'labels' are referring to the corresponding row. # E.G. predictions[0] and label [0] both refer to row 0 in the dataframe that was passed into the # prediction function. #TODO: Create the confusion matrix by comparing the values in predictions to the corresponding values in labels. # Use the definitions in the text above to determine which item in the dictionary you should increment. return confusion_matrix def get_accuracy(confusion_matrix): # Create a function that returns the accuracy score for your classifier. # The formula for accuracy = TP + TN / TP + TN + FP + FN passConfigure parameters# Path to dataset file #data_path='/data/biodata/Iris/' %store -r path # Sample of train and test dataset train_sample = 0.7 test_sample = 0.3 # Create Spark Session spark = SparkSession.builder \ .master("local[8]") \ .appName("MachineLearningIris") \ .getOrCreate() # Enable Arrow-based columnar data transfers #spark.conf.set("spark.sql.execution.arrow.enabled", "true") # Load Iris CSV dataset to Spark Dataframe orig_data = spark.read.format("csv").options(sep=',',header='true',inferschema='true').\ load(path) print("Original Dataframe read from CSV file") #orig_data.dtypes orig_data.show(5) # ML libraries doesn't accept string column => everything should be numeric! # create a numeric column "label" based on string column "class" indexer = StringIndexer(inputCol="class", outputCol="label").fit(orig_data) label_data = indexer.transform(orig_data) # Save the inverse map from numeric "label" to string "class" to be used further in response labelReverse = IndexToString().setInputCol("label") # Show labeled dataframe with numeric lable print("Dataframe with numeric lable") label_data.show(5) # Drop string column "class", no string column label_data = label_data.drop("class") # Most Machine Learning Lib inpute 2 columns: label (output) and feature (input) # The label column is the result to train ML algorithm # The feature column should join all parameters as a Vector # Set the column names that is not part of features list ignore = ['label'] # list will be all columns parts of features list = [x for x in label_data.columns if x not in ignore] # VectorAssembler mount the vector of features assembler = VectorAssembler( inputCols=list, outputCol='features') # Create final dataframe composed by label and a column of features vector data = (assembler.transform(label_data).select("label","features")) print("Final Dataframe suitable to classifier input format") #data.printSchema() data.show(5) # Split ramdomly the dataset into train and test group # [0.7,0.3] => 70% for train and 30% for test # [1.0,0.2] => 100% for train and 20% for test, not good, acuracy always 100% # [0.1,0.02] => 10% for train and 2% for test, if big datasets # 1234 is the random seed (train, test) = data.randomSplit([train_sample, test_sample], 1234) start_time_nb = time.time() # create the trainer and set its parameters trainer = NaiveBayes(smoothing=1.0, modelType="multinomial") #trainer = LogisticRegression(maxIter=10, tol=1E-6, fitIntercept=True) #trainer = RandomForestClassifier(labelCol="indexedLabel", featuresCol="indexedFeatures", numTrees=10) # train the model and get the result model = trainer.fit(train) result_nb = model.transform(test) # compute accuracy on the test set against model evaluator = MulticlassClassificationEvaluator(labelCol="label", predictionCol="prediction",\ metricName="accuracy") accuracy_nb = evaluator.evaluate(result_nb) * 100 time_nb = time.time() - start_time_nb print("Naive Bayes: accuracy = %3.1f %%" % accuracy_nb) print("Naive Bayes: time = %3.3f s" % time_nb) print("Naive Bayes Final Result") result_nb.show() print("Naive Bayes final result with name of class") labelReverse.transform(result_nb).show()Naive Bayes final result with name of class +-----+-----------------+--------------------+--------------------+----------+----------------------------------+ |label| features| rawPrediction| probability|prediction|IndexToString_982c59433006__output| +-----+-----------------+--------------------+--------------------+----------+----------------------------------+ | 0.0|[4.3,3.0,1.1,0.1]|[-9.8947865959498...|[0.73694376226986...| 0.0| Iris-setosa| | 0.0|[4.4,2.9,1.4,0.2]|[-10.798200610812...|[0.65275740620864...| 0.0| Iris-setosa| | 0.0|[4.4,3.0,1.3,0.2]|[-10.714490841463...|[0.68253047838642...| 0.0| Iris-setosa| | 0.0|[4.8,3.1,1.6,0.2]|[-11.685557246997...|[0.66369141823549...| 0.0| Iris-setosa| | 0.0|[5.0,3.5,1.6,0.6]|[-13.716429687974...|[0.60272102805515...| 0.0| Iris-setosa| | 0.0|[5.0,3.6,1.4,0.2]|[-11.987255175816..[...]Capstone Project: Create a Customer Segmentation Report for Arvato Financial ServicesIn this project, you will analyze demographics data for customers of a mail-order sales company in Germany, comparing it against demographics information for the general population. You'll use unsupervised learning techniques to perform customer segmentation, identifying the parts of the population that best describe the core customer base of the company. Then, you'll apply what you've learned on a third dataset with demographics information for targets of a marketing campaign for the company, and use a model to predict which individuals are most likely to convert into becoming customers for the company. The data that you will use has been provided by our partners at Bertelsmann Arvato Analytics, and represents a real-life data science task.If you completed the first term of this program, you will be familiar with the first part of this project, from the unsupervised learning project. The versions of those two datasets used in this project will include many more features and has not been pre-cleaned. You are also free to choose whatever approach you'd like to analyzing the data rather than follow pre-determined steps. In your work on this project, make sure that you carefully document your steps and decisions, since your main deliverable for this project will be a blog post reporting your findings.# import libraries here; add more as necessary import numpy as np import pandas as pd from collections import Counter from operator import itemgetter import time from matplotlib_venn import venn2 import matplotlib.pyplot as plt import seaborn as sns from sklearn.impute import SimpleImputer from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.cluster import KMeans from sklearn.model_selection import GridSearchCV from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import BaggingClassifier from sklearn.pipeline import Pipeline # magic word for producing visualizations in notebook %matplotlib inlinePart 0: Get to Know the DataThere are four data files associated with this project:- `Udacity_AZDIAS_052018.csv`: Demographics data for the general population of Germany; 891 211 persons (rows) x 366 features (columns).- `Udacity_CUSTOMERS_052018.csv`: Demographics data for customers of a mail-order company; 191 652 persons (rows) x 369 features (columns).- `Udacity_MAILOUT_052018_TRAIN.csv`: Demographics data for individuals who were targets of a marketing campaign; 42 982 persons (rows) x 367 (columns).- `Udacity_MAILOUT_052018_TEST.csv`: Demographics data for individuals who were targets of a marketing campaign; 42 833 persons (rows) x 366 (columns).Each row of the demographics files represents a single person, but also includes information outside of individuals, including information about their household, building, and neighborhood. Use the information from the first two files to figure out how customers ("CUSTOMERS") are similar to or differ from the general population at large ("AZDIAS"), then use your analysis to make predictions on the other two files ("MAILOUT"), predicting which recipients are most likely to become a customer for the mail-order company.The "CUSTOMERS" file contains three extra columns ('CUSTOMER_GROUP', 'ONLINE_PURCHASE', and 'PRODUCT_GROUP'), which provide broad information about the customers depicted in the file. The original "MAILOUT" file included one additional column, "RESPONSE", which indicated whether or not each recipient became a customer of the company. For the "TRAIN" subset, this column has been retained, but in the "TEST" subset it has been removed; it is against that withheld column that your final predictions will be assessed in the Kaggle competition.Otherwise, all of the remaining columns are the same between the three data files. For more information about the columns depicted in the files, you can refer to two Excel spreadsheets provided in the workspace. [One of them](./DIAS Information Levels - Attributes 2017.xlsx) is a top-level list of attributes and descriptions, organized by informational category. [The other](./DIAS Attributes - Values 2017.xlsx) is a detailed mapping of data values for each feature in alphabetical order.In the below cell, we've provided some initial code to load in the first two datasets. Note for all of the `.csv` data files in this project that they're semicolon (`;`) delimited, so an additional argument in the [`read_csv()`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html) call has been included to read in the data properly. Also, considering the size of the datasets, it may take some time for them to load completely.You'll notice when the data is loaded in that a warning message will immediately pop up. Before you really start digging into the modeling and analysis, you're going to need to perform some cleaning. Take some time to browse the structure of the data and look over the informational spreadsheets to understand the data values. Make some decisions on which features to keep, which features to drop, and if any revisions need to be made on data formats. It'll be a good idea to create a function with pre-processing steps, since you'll need to clean all of the datasets before you work with them. Load DataSince the dataset is more than 1GB, I've used `c` engine instead of `python` to load data faster. In addition, there are mixed data in column 18 & 19; more specifically some `NaN` values are represented by `X` & `XX`. These values are set as `na_values`.# Load data def load_data(azdias_filepath, customers_filepath, attributes_filepath): """ Method for loading dataset from CSV & Excel Args: azdias_filepath (str): Azdias Filepath customers_filepath (str): Customers Filepath attributes_filepath (str): Attributes Filepath Output: azdias: Pandas Dataframe customers: Pandas Dataframe attributes: Pandas Dataframe """ # Load "azdias" dataset azdias = pd.read_csv(azdias_filepath, na_values=["X", "XX"], engine="c") # Load "customers" dataset customers = pd.read_csv(customers_filepath, na_values=["X", "XX"], engine="c") # Load "attributes" dataset attributes = pd.read_excel("DIAS Attributes - Values 2017.xlsx", header=1).loc[:, ["Attribute", "Value", "Meaning"]] \ .fillna(method='ffill') return azdias, customers, attributes azdias_filepath = "Udacity_AZDIAS_052018.csv" customers_filepath = "Udacity_CUSTOMERS_052018.csv" attributes_filepath = "DIAS Attributes - Values 2017.xlsx" azdias, customers, attributes = load_data(azdias_filepath, customers_filepath, attributes_filepath)Data Exploration (azdias)Explore `azdias` dataset to gain insights about the type of data, extent of data cleaning and feature engineeringazdias.head() attributes.head() # Show column names and datatype azdias.info(verbose=True) # List of unique features in different dataset attribute_list = list(attributes.Attribute.unique()) azdias_features = azdias.columns.tolist() # Find common and unique features between 'azdias' and 'attributes' common_features = set(azdias_features) & set(attribute_list) unique_azdias_features = set(azdias_features) - set(attribute_list) num_common_features = len(common_features) num_unique_azdias_features = len(unique_azdias_features) print("common features: {}, unique azdias features: {}".format(num_common_features, num_unique_azdias_features))common features: 272, unique azdias features: 94**Observations:** Since 94 features in `azdias` do not have any descriptions in `attributes`, these features will be dropped. In addition, some features in `attributes` have values assigned to _unknown_ or _missing information._ These values in `azdias` will be replaced with `NaN`.# Drop features that are not present in "attributes" azdias.drop(columns=list(unique_azdias_features), inplace=True) azdias.shape # Create a subset of "attributes" where only unknown values are present attributes_unknown_val = attributes[(attributes['Meaning'].str.contains("unknown") | attributes['Meaning'].str.contains("no "))] unknown_val = [] for attribute in attributes_unknown_val['Attribute'].unique(): # Create a list of unknown value for a feature val = attributes_unknown_val.loc[attributes['Attribute'] == attribute, 'Value'].astype("str").str.cat(sep=',').split(',') # Convert the list to "int" val = list(map(int, val)) unknown_val.append(val) # Create dataframe of features with unknown value attributes_unknown_val = pd.concat([pd.Series(attributes_unknown_val['Attribute'].unique()), pd.Series(unknown_val)], axis=1) # Rename the columns attributes_unknown_val.columns = ['attribute', 'unknown'] attributes_unknown_val.head() # Replace the unknown values in "azdias" with NaN based on "attributes_unknown_val" for row in attributes_unknown_val.itertuples(index=False): if row.attribute in azdias.columns.values.tolist(): print(row.attribute) nan_val = attributes_unknown_val.loc[attributes_unknown_val['attribute'] == row.attribute, 'unknown'].iloc[0] nan_idx = azdias.loc[:, row.attribute].isin(nan_val) azdias.loc[nan_idx, row.attribute] = np.NaN else: continue azdias.head() # Check the total number of NaN count on each column nan_count_column_azdias = azdias.isnull().sum() # Plot the distribution of missing or unknown data for each column plt.figure(figsize=(15,5)) plt.title('Distribution of Missing Data in Each Column') plt.hist(nan_count_column_azdias, bins=100) plt.ylabel('Number of Columns') plt.xlabel('Number of Missing Values') plt.show() # Drop the columns where the NaN count is higher than 20000 i.e. more than 20% # Select the columns that are needed to be dropped drop_columns = nan_count_column_azdias[nan_count_column_azdias > 200000] # Make list of the drop_columns drop_column_list = drop_columns.index.tolist() # Drop the columns from azdias azdias.drop(columns=drop_column_list, inplace=True) azdias.shape azdias.head() # Similar to columns, check the total NaN count for each row count_nan_row = azdias.isnull().sum(axis=1) plt.figure(figsize=(15,5)) plt.title('Distribution of Missing Data in Each Row') plt.hist(count_nan_row, bins=50) plt.ylabel('Number of Rows') plt.xlabel('Number of Missing Values') plt.show() # # Drop the rows where the NaN count is higher than 50 i.e. more than 20% count_nan_row = azdias.shape[1] - azdias.count(axis=1) drop_row = azdias.index[count_nan_row > 50] azdias.drop(drop_row, axis=0, inplace=True) print(azdias.shape) azdias.head()(737288, 237)**Observations:** There are features in `azdias` where significant number of data are missing (more than 200,000). These features are dropped. Similarly, more than 50 datapoints are missing across rows which are dropped as well. Based on these observations, a data cleaning function was created to clean `customers` dataset. In the function, if more than 20% data are missing in a column or across a row, it will be dropped. Create function for Cleaning the Datasetdef clean_data(df, attributes, col_nan_threshold, row_nan_threshold): """ Method for cleaning the dataset according to attributes Args: df (Pandas Dataframe): Dataset to clean attributes (Pandas Dataframe): Reference dataset for cleaning df col_nan_threshold (float): Threshold value (0-1) for missing NaN count in a column row_nan_threshold (float): Threshold value (0-1) for missing NaN count in a row Output: df (Pandas Dataframe): Cleaned dataset """ # List of attributes attribute_list = list(attributes.Attribute.unique()) # List of df attributes df_attributes = list(df.columns) # Find attributes that are unique in df unique_df_attributes = list(set(df_attributes) - set(attribute_list)) # Drop attributes from df that are not present in attribute df.drop(columns=unique_df_attributes, inplace=True) # Create a subset of attributes with unknown or no information value unknown_val = [] attributes_unknown_val = attributes[(attributes['Meaning'].str.contains("unknown") | attributes['Meaning'].str.contains("no "))] for attribute in attributes_unknown_val['Attribute'].unique(): val = attributes_unknown_val.loc[attributes['Attribute'] == attribute, 'Value'].astype("str") \ .str.cat(sep=',').split(',') val = list(map(int, val)) unknown_val.append(val) attributes_unknown_val = pd.concat([pd.Series(attributes_unknown_val['Attribute'].unique()), pd.Series(unknown_val)], axis=1) attributes_unknown_val.columns = ['attribute', 'unknown'] # Replace unknown or missing values in df with NaN for row in attributes_unknown_val.itertuples(index=False): if row.attribute in df.columns.values.tolist(): nan_val = attributes_unknown_val.loc[attributes_unknown_val['attribute'] == row.attribute, 'unknown'].iloc[0] nan_idx = df.loc[:, row.attribute].isin(nan_val) df.loc[nan_idx, row.attribute] = np.NaN else: continue # Drop columns where NaN count is above the threshold nan_count_column = (df.isnull().sum()/df.shape[0]).sort_values(ascending=False) # Select the columns that are needed to be dropped nan_count_column = nan_count_column[nan_count_column > col_nan_threshold] # Make list of the drop_columns drop_column_list = nan_count_column.index.tolist() # Drop the columns from azdias df.drop(columns=drop_column_list, inplace=True) # Dropping rows where NaN count is above the threshold number_col = df.shape[1] count_nan_row = (df.shape[1] - df.count(axis=1)) / number_col drop_row = df.index[count_nan_row > row_nan_threshold] df.drop(drop_row, axis=0, inplace=True) return df customers = clean_data(customers, attributes, 0.2, 0.2) customers.shape customers.head(20) azdias.head(20)**Observations:** `customers` has only 37 features after cleaning while `azdias` has 237 columns. Therefore, columns that are not in `customers` dataset will be dropped from `azdias`.# Find unique "azdias" features to drop customers_feature = customers.columns.tolist() azdias_feature = azdias.columns.tolist() unique_azdias_feature = set(azdias_feature) - set(customers_feature) azdias.drop(columns=list(unique_azdias_feature), inplace=True) azdias.info(verbose=True) Int64Index: 737288 entries, 1 to 891220 Data columns (total 37 columns): CJT_GESAMTTYP 732906 non-null float64 FINANZ_ANLEGER 737288 non-null float64 FINANZ_HAUSBAUER 737288 non-null float64 FINANZ_MINIMALIST 737288 non-null float64 FINANZ_SPARER 737288 non-null float64 FINANZ_UNAUFFAELLIGER 737288 non-null float64 FINANZ_VORSORGER 737288 non-null float64 FINANZTYP 737288 non-null float64 GEBURTSJAHR 737288 non-null int64 GFK_URLAUBERTYP 732906 non-null float64 GREEN_AVANTGARDE 737288 non-null int64 HH_EINKOMMEN_SCORE 737288 non-null float64 LP_FAMILIE_FEIN 732906 non-null float64 LP_FAMILIE_GROB 732906 non-null float64 LP_LEBENSPHASE_FEIN 732906 non-null float64 LP_LEBENSPHASE_GROB 732906 non-null float64 LP_STATUS_FEIN 732906 non-null float64 LP_STATUS_GROB 732906 non-null float64 ONLINE_AFFI[...]Feature Encoding and Engineering# Drop "LP_LEBENSPHASE_GROB", "LP_STATUS_GROB" and "LP_FAMILIE_GROB" # There are other columns that already captures the information that are presented in these columns azdias.drop(columns=['LP_LEBENSPHASE_GROB', "LP_STATUS_GROB", "LP_FAMILIE_GROB"], inplace=True) customers.drop(columns=['LP_LEBENSPHASE_GROB', "LP_STATUS_GROB", "LP_FAMILIE_GROB"], inplace=True) azdias.shape, customers.shape # Drop "GEBURTSJAHR" since the column represents timeseries information which is not necessarily important for classification # In "GEBURTSJAHR" missing data are represented as 0. Fill in the missing values at "GEBURTSJAHR" with NaN #azdias["GEBURTSJAHR"].replace(0, np.nan, inplace=True) # Create subset of "GEBURTSJAHR" for different imputation strategy #azdias_year = azdias.loc[:, ["GEBURTSJAHR"]] azdias.drop(columns=["GEBURTSJAHR"], inplace=True) # Fill missing values (0) at "GEBURTSJAHR" with NaN #customers["GEBURTSJAHR"].replace(0, np.nan, inplace=True) # Create subset of "GEBURTSJAHR" for different imputation strategy #customers_year = customers.loc[:, ["GEBURTSJAHR"]] customers.drop(columns=["GEBURTSJAHR"], inplace=True) azdias.shape, customers.shape # Impute numeric columns imputer_numeric = SimpleImputer(missing_values=np.nan, strategy='median') azdias = pd.DataFrame(imputer_numeric.fit_transform(azdias), columns = azdias.columns) customers = pd.DataFrame(imputer_numeric.fit_transform(customers), columns = customers.columns) # Since "GEBURTSJAHR" represents timeseries, instead of "median" "most_frequent" strategy is used to impute "GEBURTSJAHR" # imputer_year = SimpleImputer(missing_values=np.nan, strategy='most_frequent') # azdias_year = pd.DataFrame(imputer_numeric.fit_transform(azdias_year), columns = azdias_year.columns) # customers_year = pd.DataFrame(imputer_numeric.fit_transform(customers_year), columns = customers_year.columns) # Standardize the numeric features in the dataset scaler = StandardScaler() azdias_scale = scaler.fit_transform(azdias) customers_scale = scaler.transform(customers) azdias = pd.DataFrame(azdias_scale, columns=azdias.columns) customers = pd.DataFrame(customers_scale, columns=customers.columns) azdias.head() # Concat "GEBURTSJAHR" with the dataframe #azdias = pd.concat([azdias, azdias_year], axis=1) #customers = pd.concat([customers, customers_year], axis=1)Part 1: Customer Segmentation ReportThe main bulk of your analysis will come in this part of the project. Here, you should use unsupervised learning techniques to describe the relationship between the demographics of the company's existing customers and the general population of Germany. By the end of this part, you should be able to describe parts of the general population that are more likely to be part of the mail-order company's main customer base, and which parts of the general population are less so.# Perform PCA on the dataset (taken from the classroom) def pca_op(n_components, data): """ Performs PCA to create n_components, and returns results of the transformation. """ pca = PCA(n_components) X_pca = pca.fit_transform(data) return pca, X_pca azdias_pca_all, azdias_X_pca_all = pca_op(None, azdias) # Plot variance by each features (taken from the classroom) def plot_variance_by_component(pca): plt.rcParams["figure.figsize"] = [15,5] plt.bar(range(len(pca.explained_variance_ratio_)), pca.explained_variance_ratio_) plt.title("Explained variance by component") plt.xlabel("Principal component") plt.ylabel("Explained variance ratio") plt.show() # Investigate the variance accounted for by each principal component. plot_variance_by_component(azdias_pca_all) # Plot scree plot def variance_cumulative(pca): plt.plot(range(len(pca.explained_variance_ratio_)),np.cumsum(pca.explained_variance_ratio_), '-') plt.title("Explained Variance Cumulative") plt.xlabel("Component number") plt.ylabel("Explained variance ratio") plt.show() # Investigate the variance cumulative variance_cumulative(azdias_pca_all)**Observations:** From the plot, it is clear that 15 components explain more than 90% variance.# Re-run PCA to the data while selecting to retain 10 components. azdias_pca_15, azdias_X_pca_15 = pca_op(15, azdias) # Investigate the variance accounted for by each principal component when 10 components are used plot_variance_by_component(azdias_pca_15) # Investigate the variance cumulative when 10 components are used variance_cumulative(azdias_pca_15) # Get weights of each component (taken from classroom) def get_weights(pca, component_num, data): df = pd.DataFrame(pca.components_, columns = list(azdias.columns)) weights = df.iloc[component_num].sort_values(ascending=False) return weights # Print relationship between azdias features and pca components dimensions = len(azdias_pca_15.components_) result = pd.DataFrame() for dim in range(dimensions): df = get_weights(azdias_pca_15, dim, azdias) result = pd.concat([result, df], axis = 1) result = result.T result # Plot weights for each component def plot_weights_by_component(idx, pca_data): df = pd.DataFrame(pca_data.loc[idx].sort_values().reset_index()) print(df) plt.rcParams["figure.figsize"] = [15,5] #plt.bar(result.loc[0].sort_values().index.tolist(), result.loc[0].sort_values().values.tolist()) plt.bar(df.index.tolist(), result.loc[idx].sort_values().values.tolist()) plt.title("Weight by feature by component") plt.xlabel("Feature #") plt.ylabel("Feature Weight") plt.show() plt.rcParams["figure.figsize"] = [15,5] plt.bar(result.loc[idx].sort_values().iloc[[0, 1, 2, -3, -2, -1]].index.tolist(), result.loc[idx].sort_values().iloc[[0, 1, 2, -3, -2, -1]].values.tolist()) plt.title("Weight by feature by component") plt.xlabel("Feature #") plt.ylabel("Feature Weight") plt.show() for i in range(5): plot_weights_by_component(i, result) # Over a number of different cluster counts run k-means clustering on the data and compute the average within-cluster distances def elbow_plot(data, start_K, end_K, step): ''' Generate an elbow plot to find optimal number of clusters graphing K values from start_K to end_K every step value ''' score_list = [] for i in range(start_K, end_K, step): print(i) start = time.time() kmeans = KMeans(i) model = kmeans.fit(data) score = model.score(data) score_list.append(abs(score)) end = time.time() elapsed_time = end - start print(elapsed_time) print(score_list) plt.plot(range(start_K, end_K, step), score_list, linestyle='--', marker='o', color='b'); plt.xlabel('# of clusters K'); plt.ylabel('Sum of squared errors'); plt.title('SSE vs. K'); plt.savefig('elbow_plot.png') elbow_plot(azdias_X_pca_15, 1, 10, 1) # Fit the k-means model with the selected number of clusters and obtain cluster predictions for the general population # demographics data. azdias_kmeans = KMeans(6) azdias_model = azdias_kmeans.fit(azdias_X_pca_15) azdias_labels = azdias_model.predict(azdias_X_pca_15) # View first few labels predicted for the azdias dataset azdias_labels[0:10] # Perform PCA on customers dataset customers_pca_15 = azdias_pca_15.transform(customers) # predict and print first few labels for the customers dataset customers_labels = azdias_model.predict(customers_pca_15) customers_labels[0:10]Compare Customer with Demographicsdef calc_propotions(labels, population): counter = Counter(labels) proportions = [(i, counter[i] / population * 100.0) for i in counter] proportions.sort(key=itemgetter(0)) proportions = np.array(proportions) dropped_gen_pop = 100 - round(proportions[:, 1].sum()) if (dropped_gen_pop > 1): proportions = np.insert(proportions, 0, values=[dropped_gen_pop], axis=0) return proportions customers.shape, azdias.shape # for azdias calculated the proportions of each cluster general_population = 737288 azdias_proportions = calc_propotions(azdias_labels, general_population) print(azdias_proportions) # for customers calculated the proportions of each cluster general_population = 188439 #191652 #132907# customers_proportions = calc_propotions(customers_labels, general_population) print(customers_proportions) # calculate the ration between general population (azdias) and customers # to see which customer segments are under and over represented representation_ratio = customers_proportions[0:, 1]/azdias_proportions[0:, 1] representation_ratio plt.figure(figsize=(15, 4)) ax1 = plt.subplot(131) ax1.bar(azdias_proportions[:, 0], azdias_proportions[:, 1]) ax1.set_xticks(azdias_proportions[:, 0]) ax1.set_ylim(ymax = 35) ax1.set_xlabel("Cluster") ax1.set_ylabel("Proportion of Total (%)") plt.title('General Population') ax2 = plt.subplot(132) ax2.bar(customers_proportions[:, 0], customers_proportions[:, 1]) ax2.set_xticks(azdias_proportions[:, 0]) ax2.set_ylim(ymax = 45) ax2.set_xlabel("Cluster") ax2.set_ylabel("Proportion of Total (%)") plt.title('Customer Population') ax3 = plt.subplot(133) ax3.bar(azdias_proportions[:, 0], representation_ratio) ax3.set_xticks(azdias_proportions[:, 0]) ax3.set_xlabel("Cluster") ax3.set_ylabel("Representation Ratio") ax3.axhline(y=1, linestyle = "--", linewidth = 0.8) plt.title('Customer Population/General Population') tt = azdias_pca_15.inverse_transform(azdias_model.cluster_centers_[1]) c_1 = tt underpresented = pd.Series(data = c_1, index = customers.columns).sort_values() underpresented tt = azdias_pca_15.inverse_transform(azdias_model.cluster_centers_[4]) c_4 = tt underpresented = pd.Series(data = c_4, index = customers.columns).sort_values() underpresented tt = azdias_pca_15.inverse_transform(azdias_model.cluster_centers_[5]) c_5 = tt underpresented = pd.Series(data = c_5, index = customers.columns).sort_values() underpresented**Observations:** While cluster 0, 2 and 3 are well represented in customers, cluster 1, 4 and 5 are underrepresented. These underrepresented clusters in general represents populations who are cultural minded, social and aware about the product. Part 2: Supervised Learning ModelNow that you've found which parts of the population are more likely to be customers of the mail-order company, it's time to build a prediction model. Each of the rows in the "MAILOUT" data files represents an individual that was targeted for a mailout campaign. Ideally, we should be able to use the demographic information from each individual to decide whether or not it will be worth it to include that person in the campaign.The "MAILOUT" data has been split into two approximately equal parts, each with almost 43 000 data rows. In this part, you can verify your model with the "TRAIN" partition, which includes a column, "RESPONSE", that states whether or not a person became a customer of the company following the campaign. In the next part, you'll need to create predictions on the "TEST" partition, where the "RESPONSE" column has been withheld.mailout_train = pd.read_csv('Udacity_MAILOUT_052018_TRAIN.csv') mailout_train.shape # Count of response vc = mailout_train['RESPONSE'].value_counts() # Positive response pos_res = vc[1]/(vc[0]+vc[1]) # Negative response neg_res = 1 - pos_res print("Ratio of postive response: {0: 0.3f} and negative response: {1: 0.3f}".format(pos_res, neg_res)) # Extract RESPONSE column response = mailout_train['RESPONSE'] # Drop RESPONSE column mailout_train.drop(labels=['RESPONSE'], axis=1, inplace=True) mailout_train = clean_data(mailout_train, attributes, 0.2, 1) mailout_train.shape # Apply similar steps as part 0. Find unique features in mailout_train and drop them unique_mailout_feature = set(mailout_train.columns.tolist()) - set(azdias.columns.tolist()) mailout_train.drop(columns=list(unique_mailout_feature), inplace=True) mailout_train.info(verbose=True) # Fill missing values (0) at "GEBURTSJAHR" with NaN #mailout_train["GEBURTSJAHR"].replace(0, np.nan, inplace=True) # Create subset of "GEBURTSJAHR" for different imputation strategy #mailout_train_year = mailout_train.loc[:, ["GEBURTSJAHR"]] #mailout_train.drop(columns=["GEBURTSJAHR"], inplace=True) imputer_numeric = SimpleImputer(missing_values=np.nan, strategy='median') mailout_train = pd.DataFrame(imputer_numeric.fit_transform(mailout_train), columns = mailout_train.columns) scaler = StandardScaler() mailout_train_scale = scaler.fit_transform(mailout_train) mailout_train = pd.DataFrame(mailout_train_scale, columns=azdias.columns) mailout_train.head() def classify(clf, param_grid, X_train=mailout_train, y_train=response): """ Fits a classifier to its training data and prints its ROC AUC score. INPUT: - clf (classifier): classifier to fit - param_grid (dict): classifier parameters used with GridSearchCV - X_train (DataFrame): training input - y_train (DataFrame): training output OUTPUT: - classifier: input classifier fitted to the training data """ # cv uses StratifiedKFold # scoring roc_auc available as parameter grid = GridSearchCV(estimator=clf, param_grid=param_grid, scoring='roc_auc', cv=5) grid.fit(X_train, y_train) print(grid.best_score_) return grid.best_estimator_ # LogisticRegression lor = LogisticRegression(random_state=0) classify(lor, {}) # BaggingClassifier bac = BaggingClassifier(random_state=0) classify(bac, {}) # RandomForestClassifier rfc = RandomForestClassifier(random_state=0) classify(rfc, {}) # AdaBoostClassifier abc = AdaBoostClassifier(random_state=0) abc_best_est = classify(abc, {}) # GradientBoostingClassifier gbc = GradientBoostingClassifier(random_state=0) classify(gbc, {})0.5175244452709992Model Tuning""" # tune the most promising classifier with the help of GridSearchCV # the result is our model that will be used with the test set lor = LogisticRegression(random_state=0) param_grid = {"algorithm" :["SAMME", "SAMME.R"], 'n_estimators':[80] } abc_best_est = classify(abc, param_grid) abc_best_est """ # LogisticRegression has the highest score. Select the logistic regression model lor = LogisticRegression(random_state=0) model = classify(lor, {}) # Find the feature importance """ fi = pd.DataFrame({'FI':abc_best_est.feature_importances_}, index=mailout_train.columns) fi_sorted = fi.sort_values(by=['FI'], ascending=False) fi_sorted.head(10) """ fi = pd.DataFrame({'FI':model.coef_[0]}, index=mailout_train.columns) fi_sorted = fi.sort_values(by=['FI'], ascending=False) fi_sorted.head(10)Part 3: Kaggle CompetitionNow that you've created a model to predict which individuals are most likely to respond to a mailout campaign, it's time to test that model in competition through Kaggle. If you click on the link [here](http://www.kaggle.com/t/21e6d45d4c574c7fa2d868f0e8c83140), you'll be taken to the competition page where, if you have a Kaggle account, you can enter. If you're one of the top performers, you may have the chance to be contacted by a hiring manager from Arvato or Bertelsmann for an interview!Your entry to the competition should be a CSV file with two columns. The first column should be a copy of "LNR", which acts as an ID number for each individual in the "TEST" partition. The second column, "RESPONSE", should be some measure of how likely each individual became a customer – this might not be a straightforward probability. As you should have found in Part 2, there is a large output class imbalance, where most individuals did not respond to the mailout. Thus, predicting individual classes and using accuracy does not seem to be an appropriate performance evaluation method. Instead, the competition will be using AUC to evaluate performance. The exact values of the "RESPONSE" column do not matter as much: only that the higher values try to capture as many of the actual customers as possible, early in the ROC curve sweep.mailout_test = pd.read_csv("Udacity_MAILOUT_052018_TEST.csv") mailout_test.head() # Extract lnr for later generation of the competition result file lnr = mailout_test.LNR # Clean data mailout_test = clean_data(mailout_test, attributes, 0.2, 1) mailout_test.shape # Keep the features that are common in azdias and mailout_test unique_mailout_test_feature = set(mailout_test.columns.tolist()) - set(azdias.columns.tolist()) mailout_test.drop(columns=list(unique_mailout_test_feature), inplace=True) mailout_test.shape # Fill missing values (0) at "GEBURTSJAHR" with NaN #mailout_test["GEBURTSJAHR"].replace(0, np.nan, inplace=True) # Create subset of "GEBURTSJAHR" for different imputation strategy #mailout_test_year = mailout_test.loc[:, ["GEBURTSJAHR"]] #mailout_test.drop(columns=["GEBURTSJAHR"], inplace=True) imputer_numeric = SimpleImputer(missing_values=np.nan, strategy='most_frequent') mailout_test = pd.DataFrame(imputer_numeric.fit_transform(mailout_test), columns = mailout_test.columns) #mailout_test_year = pd.DataFrame(imputer_numeric.fit_transform(mailout_test_year), columns = mailout_test_year.columns) scaler = StandardScaler() mailout_test_scale = scaler.fit_transform(mailout_test) mailout_test = pd.DataFrame(mailout_test_scale, columns=mailout_test.columns) #mailout_test = pd.concat([mailout_test, mailout_test_year], axis=1) mailout_test.head() # Use the trained model from Part 2 to predict the probabilties of the testing data response_test = model.predict_proba(mailout_test) response_test # Generate result file for the competition result = pd.DataFrame({'LNR':lnr, 'RESPONSE':response_test[:,0]}) result.to_csv("result.csv", index=False) result.head(10) result.describe()Setupimport numpy as num import scipy.special as scis # scis.gamma(5) --> 24 == 4! import matplotlib.pyplot as plt import math # math.exp(), math.sqrt(), math.factorial() import sympy as sym import sympy.vector as symvect sym.init_printing() # Only need to do this once per session. x,y,x1,y1,x2,y2,epsilon = sym.symbols('x y x1 y1 x2 y2 epsilon')C is our coordinate system, with basis vectors $\hat{i}$ and $\hat{j}$ (and $\hat{k}$, but we won't use that).C = symvect.CoordSys3D('') # As long as we restrict ourselves to vectors involving i & j, we'll have 2D vectors. # We give the coordinate system no name (''), so we don't get unnecessary subscript clutter on the # basis unit vectors when pretty-printed. v1 = x1*C.i + y1*C.j # Symbolic computation. v2 = x2*C.i + y2*C.j v1.dot(v2) v2-v1Dot product with side's left normal Let points $p_1$ and $p_2$ be defined as:$$% Each line terminated with "\\"p_1 = (x_1, y_1) \\p_2 = (x_2, y_2)$$The "left normal" of a vector $\overrightarrow{p_{1}p_{2}}$ from $p_1$ to $p_2$, $(x_{2}, y_{2}) - (x_{1}, y_{1}) = (\Delta x, \Delta y)$, is $(-\Delta y, \Delta x)$.p1 = x1*C.i + y1*C.j p2 = x2*C.i + y2*C.j deltaY = y2-y1 deltaX = x2-x1 leftNormal = -deltaY*C.i + deltaX*C.j leftNormalNow we create a vector $\overrightarrow{p_{1}p}$ from $p_1$ to a test point $p$ and take its dot product with $\overrightarrow{p_{1}p_{2}}$:p = x*C.i + y*C.j p1p = p - p1 p1p leftNormal.dot( p1p)...which is obvious, but I had made the mistake of trying to expand the above expressionby hand and then simplify after cancelling some opposite-sign terms out and factoring.I think that was doomed to failure.Anyway, if the above expression is negative, then the test point $p$ is on the opposite side of the line segment $p_{1}p_{2}$ from $p_{1}p_{2}$'s left normal (so... it's on the right).We see that the totologic blog post has the signs reversed (presumably becausehe had the y-axis reversed for typical computer screens). Distance from point to side (segment) ![side-and-point sketch](side-and-point-sketch.png) We're going to find an expression for $d^2$, the distance $d$ of the test point $p$ from the triangle side $p_{1}p_{2}$, squared.$\overrightarrow{p_{1}p_{2}} \cdot \overrightarrow{p_{1}p} = |\overrightarrow{p_{1}p_{2}}| \times |\overrightarrow{p_{1}p}| \times \cos{\theta}$, where $\theta$ is the angle between the vectors.q1 = 1*C.i + 1*C.j q2 = 3*C.i + 2*C.j [q1.magnitude(), q2.magnitude()] q1.dot( q2)As you can see from above, multiplying big vectors _can_ result in big numbers.Using the definitions of $p_1$, $p_2$, and $p$ from above, we define vectors $\overrightarrow{p_{1}p_{2}}$ and $\overrightarrow{p_{1}p}$:p1p2 = p2 - p1 p1p = p - p1 p1p2.dot( p1p)$|\overrightarrow{p_{1}p}|^2$ is the length of a hypotenuse of a **right triangle**, squared.hypSqrd = p1p.magnitude()**2 hypSqrdOn unit vectors...A "unit vector" is a vector in the direction of some other vector, but having length 1.For example, if we have a vector $(10,0)$ (i.e., extending along the x-axis), the unit vector in that direction is $(1,0)$.Pretty simple, but suppose we have a vector $(2,3)$. What is a vector pointing in the same direction but only having length 1? We need to scale that vector back a bit, but by how much? This is probably looking pretty obvious by now, but the length of $(2,3)$ is:b = (2*C.i + 3*C.j) b.magnitude()and the scaling is dividing by that length:(b / sym.sqrt(13)).magnitude()So, unit vector $\hat{b}$ is$$\hat{b} = \frac{\vec{b}}{|\vec{b}|}$$So, the _projection_ of a vector $\vec{a}$ on to another vector $\vec{b}$ is a vector whose _length_ is the length of $\vec{a}$ scaled by the cosine of the angle between $\vec{a}$ and $\vec{b}$, and whose _direction_ is the same as $\hat{b}$. In other words,$$\overrightarrow{a_b} = |\vec{a}| \times \cos \theta \times \hat{b}$$(where $\overrightarrow{a_b}$ is the projection of $\vec{a}$ onto $\vec{b}$.)But that expression is just$$\vec{a} \cdot \frac{\vec{b}}{|\vec{b}|}$$Finally, the _length_ of the _projection_ is:(a_x, a_y, b_x, b_y) = sym.symbols('a_x a_y b_x b_y') a = a_x*C.i + a_y*C.j b = b_x*C.i + b_y*C.j proj = a.dot( b / b.magnitude()) proj(where $a_x$ and $a_y$ are the x- and y-components of $a$, and likewise for $b$.) Orproj = (a.dot(b)) / b.magnitude() projNote that we hate taking square roots unnecessarily when computering, so we're going to find a way to square the above expression to get rid of the radical.... Having established the basics of projection...One leg of the right triangle, the projection of $\overrightarrow{p_{1}p}$ onto $\overrightarrow{p_{1}p_{2}}$, has length squared equal to$$\frac{(\overrightarrow{p_{1}p_{2}} \cdot \overrightarrow{p_{1}p})^2}{|\overrightarrow{p_{1}p_{2}}|^2}$$p1p2Sqrd = p1p2.magnitude()**2 sym.simplify( p1p2Sqrd)First, real quick, let's check our variables and make sure nothing's changed, inadvertently.dict( p1=p1, p2=p2, p=p, p1p=p1p, p1p2=p1p2, p1p2Sqrd=p1p2Sqrd) projSqrd = (p1p.dot( p1p2)**2 / p1p2Sqrd) sym.simplify( projSqrd)Length $d$ of remaining leg of triangle is unknown, *but* Pythagoras tells us $$|\overrightarrow{p_{1}p}|^2 - (\overrightarrow{p_{1}p} \cdot \overrightarrow{p_{1}p_{2}})^2 / |\overrightarrow{p_{1}p_{2}}|^2 = d^2$$dSqrd = hypSqrd - projSqrd dSqrdA little exercise checking correctness so farIf we set up a simple test of a vertical line of length 2 (from (0,-1) to (0,1)), and a test point on thex-axis ($y=0$) being brought to the y-axis (x -> 0), then the distance of the test point from the linesegment should be exactly $x$. And it is (don't forget we're calculating distance SQUARED):dSqrd.subs(x1,0).subs(y1,-1).subs(x2,0).subs(y2,1).subs(y,0)If our test point is at $(1,y)$ and we're projecting onto a vertical line on the y-axis, it doesn't matterwhat our $x$ value is, because only the $y$ value determines what the projection will be. And if $y$ islarger than the length of our (vertical, in this example) "triangle side" $p_{1}p_{2}$, then so be it, the projection will also be large.projSqrd.subs(x1,0).subs(y1,0).subs(x2,0).subs(y2,2).subs(x,1) projSqrd.subs(x1,0).subs(y1,0).subs(x2,0).subs(y2,2).subs(x,1).subs(y,10)So, we have to check if the projection is larger than $|\overrightarrow{p_{1}p_{2}}|$, and, if so, we know that the test vector $\overrightarrow{p_{1}p}$ is actually LONGER than the triangle side, so we truncate the testing against $\epsilon^2$ by simply computing the distance from the test point $p$ to $p_2$, the other ("far") end of the triangle side.That's not described in this document, but it is in the code. Reducing math operationsIdeally, we'd like to keep the number of multiplications and divisions to a minimum, and the number square roots and other transcendental functions to zero, in order to make the code fastfastfast. (Well, it's gonna be Haskell, but, ya know... the principle of the thing.)(Note on divisions: any time a division by the difference of two numbers occurs, that's a danger sign. If the two numbers are almost equal in value, you're dividing by a very small value (yielding a very large value), and if there are errors in the two numbers you're subtracting (say, because they're just digital approximations of real values) you can wind up with very big errors in your result. Avoid dividing by differences, if you can. Or maybe find a way to throw a $+1$ in the denominator.)So, our distance-to-side code looks like this: 1. If dot product is negative, angle is obtuse, projection will be off side near $p_1$, and we might as well just calculate distance to $p_1$. 2. If the projection is longer than $p_{1}p_{2}$, the same logic leads us to calculating the distance to $p_2$. Since calculating the length of $p_{1}p_{2}$ involves dividing by a square root, let's try to avoid that. We're just comparing two numbers for greater-than, anyway. 3. Otherwise, yeah, go ahead and run the full algorithm, but _remember_: we just want to know if the distance is within epsilon ($\epsilon$), since we're only running this distance-to-side algorithm if previously-attempted algorithms have _failed_, declaring this point to be _outside_ the triangle. Here's our code at this moment, before we start verifying/improving it, along with a monstrous chunk of commented-out copied from the totologic blog post:```haskell-- | Computes square of distance from given point to given side.distanceSqrdPointToSide :: Point -> Point -> Point -> DoubledistanceSqrdPointToSide (Point _ (x,y)) -- ^ The point under test (Point _ (x1,y1)) -- ^ Starting point of side (Point _ (x2,y2)) -- ^ Ending point of side = let p1p2LengthSqrd = (x2-x1)*(x2-x1) + (y2-y1)*(y2-y1) dotProduct = ((x-x1)*(x2-x1) + (y-y1)*(y2-y1)) / p1p2LengthSqrd pp1LengthSqrd = (x1 - x)*(x1 - x) + (y1 - y)*(y1 - y) in if dotProduct 90°, closest point of side IS p1 else if dotProduct <= 1 then pp1LengthSqrd - dotProduct * dotProduct * p1p2LengthSqrd else (x - x2)*(x - x2) + (y - y2)*(y - y2) -- TODO: not at ALL sure this is correct.{- function distanceSquarePointToSegment(x1, y1, x2, y2, x, y:Number):Number { var p1_p2_squareLength:Number = (x2 - x1)*(x2 - x1) + (y2 - y1)*(y2 - y1); var dotProduct:Number = ((x - x1)*(x2 - x1) + (y - y1)*(y2 - y1)) / p1_p2_squareLength; if ( dotProduct < 0 ) { return (x - x1)*(x - x1) + (y - y1)*(y - y1); } else if ( dotProduct <= 1 ) { var p_p1_squareLength:Number = (x1 - x)*(x1 - x) + (y1 - y)*(y1 - y); return p_p1_squareLength - dotProduct * dotProduct * p1_p2_squareLength; } else { return (x - x2)*(x - x2) + (y - y2)*(y - y2); } }-}``````p1p2LengthSqrd``` looks right. The signs are reversed, but who cares, because it's going to be squared.```dotProduct``` needs to change, if possible, because we're dividing (bad) by differences (real bad). How is it used? 1. It's compared to 0 and 1. Easy fix: compare to 0 and the thing it's divided by. 2. It's multiplied by the thing it's divided by. Duh, cancel those terms out. But first, make sure that math is correct. We're checking to see that the projection is shorter than the side. ```p1pLengthSqrd``` is the _hypotenuse_ length, and we want to subtract the _length_ of the projection from it, to get $d^2$, the length of the unknown side.In that calculation, why are we multiplying by ```p1p2LengthSqrd```? Let's multiply ```dSqrd``` through by ```p1p2Sqrd```.dSqrd * p1p2Sqrd sym.simplify( dSqrd * p1p2Sqrd)Generate a dataset in the form of a .csv file from MongoDB instance running locallyimport pandas as pd from pymongo import MongoClient small_dataset = True # Sampling the dataset for training and testing purposes - to support systems with lowered performance sample_size = 10000 start_year = '07' end_year = '22' size_cutoff = 220000 # Maximum value: 740000 # Connect to local instance of MongoDB client = MongoClient('127.0.0.1', 27017) db = client.frtp collection = db.documents # Extract all the data available with specific restrictions result = collection.find({ "year": { '$lt': end_year, '$gte': start_year }, "size": { '$lt': size_cutoff } }) # Save the data ready for Drive upload df = pd.DataFrame(list(result)) df['year'] = pd.to_datetime(df['year'], format='%y') if small_dataset: df = df.sample(n=sample_size) df.to_csv(f'collab_dataset_small_{start_year}_{end_year}_<{size_cutoff}.csv') else: df.to_csv(f'collab_dataset_{start_year}_{end_year}_<{size_cutoff}.csv')Exercices d’encodage Conversions de baseEncodez en ASCII puis en latin1 la phrase :```txtLa petite brise la glace.```# Your code here sent = 'La petite brise la glace.' # ASCII encoding_ascii = b'La petite brise la glace.' encoding_ascii = bytes(sent, 'ascii') encoding_ascii = sent.encode() encoding_ascii = sent.encode('ascii') # latin1 encoding_latin1 = bytes(sent, 'latin1') encoding_latin1 = sent.encode('latin1')Encodage de caractères accentuésEst-il possible d’encoder en latin1 la phrase ci-dessous ? Expliquez et recommandez un encodage approprié.```Jak się czujesz?```# Your code here text = 'Jak się czujesz?' charset = 'latin1' try: text.encode(charset) print(f'Le message a été correctement encodé en {charset}') except UnicodeEncodeError: print(f'Impossible d’encoder le message en {charset}.')**Your answer**Le jeu de caractères *latin1* est prévu pour ne représenter que les 191 caractères de l’alphabet latin encodés sur un octet (8 bits). Or, certains caractères ne font pas partie de ce jeu. Un jeu de caractères plus complet, comme *ISO-8859-2* ou *UTF-8*, permettent de l’encoder convenablement. Du binaireAffichez la représentation binaire de la phrase ci-dessous, encodée en UTF-8 :```Mercredi, viendras-tu manger avec Jean sur une nappe propre ?```# Your code question = 'Mercredi, viendras-tu manger avec Jean sur une nappe propre ?' utf8 = question.encode('utf8') output = [f'{c:b}' for c in utf8] print(' '.join(output))test the version without pandasimport sys dossier = '/home/benjamin/Documents/eviacybernetics/Projets/Grevia' sys.path.append(dossier) import grevia import importlib importlib.reload(grevia.graph_structure) importlib.reload(grevia) import networkx as nx G = nx.read_gpickle('/media/benjamin/Largo/testspdfs/pickle/graph.pkl') G.size() edge_info = G.edges(data=True) edge_sorted = sorted(edge_info, key=lambda edge: edge[2]['weight'], reverse=True) edge_sorted[:None] CSV_FILE = '/media/benjamin/Largo/testspdfs/csv/table_classif.csv' import csv cluster_dic ={} print('Loading: ',CSV_FILE) with open(CSV_FILE, 'r') as csvfile: clusters_table = csv.DictReader(csvfile, delimiter=',') for row in clusters_table: for key in row.keys(): if key in cluster_dic.keys(): cluster_dic[key].append(row[key]) else: cluster_dic[key]=[row[key]] del cluster_dic[''] cluster_dicChapter Two - Data Loading, Saving and file Formatsfrom optimus import Optimus op = Optimus("pandas")Loading a filedf = op.load.file("my_file.json") display(df)Fixed number of rowsdf = op.load.csv("data/file.csv", n_rows=5) display(df)Without a headerdf = op.load.csv("data/file.csv", header=None) display(df)Assigning which value will be assumed as nulldf = op.load.csv("data/file.csv", null_value="Null") display(df)Using a specific separatordf = op.load.csv("data/file.csv", sep=";") display(df)Wildcardsdf = op.load.csv("csv/*") display(df) df = op.load.csv("csv/file-*.csv") display(df) df = op.load.csv("csv/file-?.csv") display(df) df = op.load.csv("csv/file-*[0-9].*.csv") display(df)Loading large filesop = Optimus("dask") df = op.load.csv("s3://my-storage/massive-file.csv")From a remote connectionconn = op.connect.s3(endpoint_url="s3://my-storage/") df = op.load.csv("files/foo_file.csv", conn=conn) df2 = op.load.file("files/file_bar.xml", conn=conn)From a databasedb = op.connect.mysql(host="localhost", database="my_database") df = op.load.database_table("foo_table", conn=db)Memory usageop = Optimus("dask") df = op.create.dataframe({ "a": [1000,2000,3000,4000,5000]*10000, "b": [1,2,3,4,5]*10000 }) df.size() df = df.optimize() df.size()Creating a dataframe from scratchdf = op.create.dataframe({ "name":["OPTIMUS", "BUMBLEBEE", "EJECT"], "function":["Leader", "Espionage", " Electronic Surveillance"] }) dfSaving a dataframe To a local filedf.save.csv("foo_output.csv", sep=";")To a remote connectiondf.save.xml("files/foo_output.xml", conn=conn)To a database tabledf.save.database_table("foo_output_table", db=db)Repartitiondf = df.repartition(2)Import Librariesimport pandas as pd import matplotlib.pyplot as plt import seaborn as sns import randomImport all data# #data source : https://www.kaggle.com/daveianhickey/2000-16-traffic-flow-england-scotland-wales/version/8 # df2005_07 = pd.read_csv("accidents_2005_to_2007.csv",low_memory=False) # df2009_11 = pd.read_csv("accidents_2009_to_2011.csv",low_memory=False) # df2012_14 = pd.read_csv("accidents_2012_to_2014.csv",low_memory=False) # df2014_lon = pd.read_csv("accidents_london_district1-32_2014.csv",low_memory=False) # dfaadf = pd.read_csv("ukTrafficAADF.csv") # clust = pd.read_csv("cluster_output.csv") # df500clust = pd.read_csv("accidents_london_district2014w500clust.csv") #Union the 3 files into 1 mega file and reset index # df_all = pd.concat([df2005_07,df2009_11,df2012_14]).reset_index(drop=True) df_all = pd.read_csv('data_processed.csv') df_all = df_all[df_all['Year'] >= 2010] df_all.info() Int64Index: 756934 entries, 718592 to 1475525 Data columns (total 13 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Cluster 756934 non-null int64 1 Accident 756934 non-null int64 2 Longitude 756934 non-null float64 3 Latitude 756934 non-null float64 4 Weather_Fine 756934 non-null float64 5 Weather_Raining 756934 non-null float64 6 Weather_Fog 756934 non-null float64 7 Weather_Other 756934 non-null float64 8 Day_of_Year 756934 non-null int64 9 Month 756934 non-null int64 10 Hour 756934 non-null float64 11 Day_of_Week 756934 non-null int64 12 Year 756934 non-null int64 dtypes: float64(7), int64(6) memory usage: 80.8 MBGenerate Negative Samples (Non-accident data)T = df_all[df_all['Year'] == 2014] df_nonacc = pd.DataFrame(columns=['Cluster','Day_of_year','Hour']) i = 1 maxclust = df_all['Cluster'].max() random.seed(888) # while i < T.shape[0]*3: #3 times more non-accident than accident records while i < T.shape[0]: # while i < 50: #try 50 times first nday = random.randint(1, 365) nclust = random.randint(1, maxclust) #maximum cluster id nhr = random.randint(0, 23) isacc = T.loc[(df_all['Cluster'] == nclust) & (T['Day_of_Year'] == nday) & (T['Hour'] == nhr)] if isacc.empty: i += 1 dfnew = pd.DataFrame([[nclust,nday,nhr]], columns=['Cluster','Day_of_Year','Hour']) df_nonacc = df_nonacc.append(dfnew,ignore_index=True) print(i) # else: # print([nclust,nday,nhr]) # print("Accident found!") df_nonacc.to_csv("non_accident_2014.csv")8954 8955 8956 8957 8957 8958 8959 8960 8961 8961 8962 8963 8964 8964 8964 8965 8965 8966 8966 8967 8968 8969 8970 8970 8971 8972 8972 8973 8974 8974 8975 8975 8976 8977 8978 8979 8980 8981 8982 8982 8983 8984 8985 8986 8986 8987 8988 8989 8990 8991 8992 8992 8992 8992 8992 8993 8994 8994 8994 8994 8995 8996 8997 8998 8999 9000 9001 9002 9003 9004 9005 9005 9006 9007 9008 9009 9010 9011 9012 9013 9014 9015 9016 9017 9017 9017 9017 9017 9018 9019 9020 9021 9022 9023 9023 9024 9024 9025 9025 9025 9026 9027 9028 9029 9030 9031 9032 9033 9034 9035 9036 9037 9038 9039 9040 9041 9042 9042 9042 9043 9043 9043 9044 9045 9046 9047 9048 9049 9050 9051 9052 9053 9053 9054 9055 9055 9056 9057 9057 9058 9058 9059 9060 9061 9062 9063 9064 9065 9065 9066 9066 9067 9068 9068 9069 9069 9069 9070 9071 9072 9073 9074 9074 9075 9076 9076 9076 9077 9077 9077 9077 9078 9079 9080 9081 9082 9083 9084 9085 9085 9085 9086 9087 9087 9088 9089 9090 9091 9092 9092 9092 9093 9094 9095 9095 9096 9097 9097 9097 9098 [...]**Artificial Intelligence - MSc**CS6501 - MACHINE LEARNING AND APPLICATIONS**Business Analytics - MSc**ET5003 - MACHINE LEARNING APPLICATIONS ***Annual Repeat***Instructor: RepMLA_Etivity-2#@title Current Date Today = '2021-06-28' #@param {type:"date"} #@markdown --- #@markdown ### Enter your details here: Student_ID = "" #@param {type:"string"} Student_full_name = "" #@param {type:"string"} #@markdown --- #@title Notebook information Notebook_type = 'Example' #@param ["Example", "Lab", "Practice", "Etivity", "Assignment", "Exam"] Version = 'Draft' #@param ["Draft", "Final"] {type:"raw"} Submission = False #@param {type:"boolean"}Encoder模型定义class Encoder(nn.Module): def __init__(self, embed_size, vocab_size, enc_hidden_size, dec_hidden_size): '''构造方法. Args: embed_size: 词向量维度 vocab_size: 词典大小 enc_hidden_size: Encoder的隐藏状态维度 dec_hidden_size: Decoder的隐藏状态维度 ''' super().__init__() self.embed = nn.Embedding(vocab_size, embed_size) self.rnn = nn.GRU(embed_size, enc_hidden_size) self.linear = nn.Linear(enc_hidden_size, dec_hidden_size) def forward(self, input_seq): '''前向传播. Args: input_seq: 输入序列, shape (N, batch_size) Returns: encoder_outputs: 每个时间步的输出(N, batch_size, enc_hidden_size) last_hidden: 最后一个时间步的输出(1, batch_size, dec_hidden_size) ''' # 判断是否有batch_size维度 if input_seq.dim() < 2: input_seq = input_seq.view(-1, 1) # 保证序列长度至少为2 if len(input_seq) < 2: input_seq = input_seq.repeat(2, 1) embed = self.embed(input_seq) encoder_outputs, last_hidden = self.rnn(embed) last_hidden = torch.tanh(self.linear(last_hidden)) return encoder_outputs, last_hidden测试EncoderSRC_VOCAB_SIZE = 100 TGT_VOCAB_SIZE = 100 EMBED_SIZE = 30 ENC_HIDDEN_SIZE = 20 DEC_HIDDEN_SIZE = 30 ATT_SIZE = 15 BATCH_SIZE = 30 CUDA = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print(CUDA) x = torch.empty(10, 10, BATCH_SIZE, device=CUDA, dtype=torch.long).random_(SRC_VOCAB_SIZE) y = torch.empty(10, 7, BATCH_SIZE, device=CUDA, dtype=torch.long).random_(TGT_VOCAB_SIZE)Attention模型定义class Attention(nn.Module): def __init__(self, enc_hidden_size, dec_hidden_size, att_size): '''构造方法. Args: enc_hidden_size: dec_hidden_size: att_size: ''' super().__init__() self.linear_1 = nn.Linear(enc_hidden_size, att_size) self.linear_2 = nn.Linear(dec_hidden_size, att_size) def forward(self, encoder_outputs, decoder_hidden): # 首先将二者变换到att_size query = self.linear_1(encoder_outputs) key = self.linear_2(decoder_hidden) scores = torch.softmax((key * query).sum(dim=2), dim=0) #(N, batch_size) # (1, batch_size, enc_hidden_size) weighted_val = (scores.unsqueeze(2) * encoder_outputs).sum(dim=0, keepdim=True) return weighted_val, scoresDecoder模型定义class Decoder(nn.Module): def __init__(self, embed_size, vocab_size, dec_hidden_size, enc_hidden_size, att_size): '''构造方法. Args: embed_size: vocab_size: dec_hidden_size: enc_hidden_size: att_size: ''' super().__init__() self.dec_hidden_size = dec_hidden_size self.embed = nn.Embedding(vocab_size, embed_size) self.rnn = nn.GRU(embed_size+enc_hidden_size, dec_hidden_size) self.att = Attention(enc_hidden_size, dec_hidden_size, att_size) self.linear = nn.Linear(dec_hidden_size, vocab_size) def forward(self, tgt_seq, encoder_outputs, hidden): # 判断是否有batch_size维度 if tgt_seq.dim() < 2: tgt_seq = tgt_seq.view(1, -1) # 保证序列长度至少为2 if len(tgt_seq) < 2: tgt_seq = tgt_seq.repeat(2, 1) embed = self.embed(tgt_seq) #(N, batch_size, embed_size) max_len, batch_size = embed.shape[0], embed.shape[1] tgt_seq_hat = torch.zeros(max_len, embed.shape[1], self.dec_hidden_size, device=CUDA) att_scores = torch.zeros(max_len, encoder_outputs.shape[0], embed.shape[1], device=CUDA) for idx in range(1, max_len): weight_val, scores = self.att(encoder_outputs, hidden) #(1, batch_size, enc_hidden_size) rnn_input = torch.cat((embed[idx-1:idx], weight_val), dim=2) decoder_output, hidden = self.rnn(rnn_input, hidden) tgt_seq_hat[idx] = decoder_output att_scores[idx] = scores tgt_seq_hat = torch.tanh(self.linear(tgt_seq_hat)) return tgt_seq_hat, hidden, att_scoresSeq2Seq模型定义class Seq2Seq(nn.Module): def __init__(self, enc:Encoder, dec:Decoder): super().__init__() self.enc = enc self.dec = dec def forward(self, src_seq, tgt_seq): encoder_outputs, last_hidden = self.enc(src_seq) tgt_seq_hat, _, att_scores = self.dec(tgt_seq, encoder_outputs, last_hidden) return tgt_seq_hat, att_scores enc = Encoder(EMBED_SIZE, 100, ENC_HIDDEN_SIZE, DEC_HIDDEN_SIZE) dec = Decoder(EMBED_SIZE, 100, DEC_HIDDEN_SIZE, ENC_HIDDEN_SIZE, ATT_SIZE) seq2seq = Seq2Seq(enc, dec) seq2seq.to(CUDA) s = sum(p.numel() for p in seq2seq.parameters() if p.requires_grad) print(f'total train parameters: {s}') def train_model(seq2seq, x, y, epochs=10, lr=0.01): seq2seq.train() criterion = nn.CrossEntropyLoss() trainer = optim.Adam(seq2seq.parameters(), lr=lr) # n = len(train_iter) n = len(x) losses = torch.zeros(epochs) for idx in range(epochs): for xx, yy in zip(x, y): # xx, yy = batch.src, batch.trg yy_hat, _ = seq2seq(xx, yy) yy_hat = yy_hat.transpose(1, 2) loss = criterion(yy_hat[1:], yy[1:]) loss.backward() trainer.step() trainer.zero_grad() losses[idx] += loss.cpu().detach().item() return losses/n losses = train_model(seq2seq, x, y, 30, 0.02) plt.plot(losses, 'r--') plt.show() print(losses[-1]) def translate(enc, dec, input_seq, sos_token, max_len, tgt_vocab_size): enc.eval() dec.eval() with torch.no_grad(): encoder_outputs, hidden = enc(input_seq) batch_size = encoder_outputs.shape[1] outs = torch.zeros(max_len, batch_size, tgt_vocab_size, device=CUDA) att_scores = torch.zeros(max_len, encoder_outputs.shape[0], batch_size, device=CUDA) decoder_input = sos_token for idx in range(max_len): decoder_output, hidden, att_score = dec(decoder_input, encoder_outputs, hidden) att_scores[idx] = att_score[-1] outs[idx] = decoder_output[-1] decoder_input = torch.argmax(outs[idx:idx+1], dim=2) return torch.argmax(outs, dim=2), att_scores outs, scores = translate(enc, dec, x[1, :, 0:3], y[1, 0:1, 0:3], 6, 100) print(outs.squeeze()) print(y[1, 1:, 0:10]) scores.shape plt.imshow(scores[:, :, 0].cpu()) plt.show()Python NumPy Array Slicing (:) > 1) np.arange()> 2) np.arange().reshape()> 3) Slicing – [:, 0], [:, 0:1], [1:4, 1:4], [:, 1:3], [::], [:,:]> 4) np.itemsizimport numpy as np arr=np.arange(1,101).reshape(10,10) arr arr[0,0] # first row and first column arr[0,0].ndim arr[0] arr[:,0] # first column of all rows arr[:,0:1].ndim arr arr[1:4,1:4] arr[1:4,1:4].ndim arr[:] arr[::] arr[:,:] arr.dtypeExercise 02.1 (if-else)Consider the following assessment criteria which map a score out of 100 to an assessment grade:| Grade | Raw score (/100) || ---------------- | ---------------------- || Excellent | $\ge 82$ || Very good | $\ge 76.5$ and $< 82$ || Good | $\ge 66$ and $< 76.5$ || Need improvement | $\ge 45$ and $< 66$ || Did you try? | $< 45$ |Write a program that, given an a score, prints the appropriate grade. Print an error message if the input score is greater than 100 or less than zero.# YOUR CODE HERE raise NotImplementedError()Exercise 02.2 (bisection)Bisection is an iterative method for finding approximate roots of a function. Say we know that the function $f(x)$ has one root between $x_{0}$ and $x_{1}$ ($x_{0} < x_{1}$). We then:- Evaluate $f$ at the midpoint $x_{\rm mid} = (x_0 + x_1)/2$, i.e. compute $f_{\rm mid} = f(x_{\rm mid})$- Evaluate $f(x_0) \cdot f(x_{\rm mid})$ - If $f(x_0) \cdot f(x_{\rm mid}) < 0$: $f$ must change sign somewhere between $x_0$ and $x_{\rm mid}$, hence the root must lie between $x_0$ and $x_{\rm mid}$, so set $x_1 = x_{\rm mid}$. - Else $f$ must change sign somewhere between $x_{\rm mid}$ and $x_1$, so set $x_0 = x_{\rm mid}$.The above steps can be repeated a specified number of times, or until $|f_{\rm mid}|$is below a tolerance, with $x_{\rm mid}$ being the approximate root. TaskThe function$$f(x) = x^3 - 6x^2 + 4x + 12$$has one root somewhere between $x_0 = 3$ and $x_1 = 6$.1. Use the bisection method to find an approximate root $x_{r}$ using 15 iterations (use a `for` loop).2. Use the bisection method to find an approximate root $x_{r}$ such that $\left| f(x_{r}) \right| < 1 \times 10^{-6}$ and report the number of iterations required (use a `while` loop).Store the approximate root using the variable `x_mid`, and store $f(x_{\rm mid})$ using the variable `f`.*Hint:* Use `abs` to compute the absolute value of a number, e.g. `y = abs(x)` assigns the absolute value of `x` to `y`. (1) Using a `for` loop.# Initial end points x0 = 3.0 x1 = 6.0 # Use 15 iterations for n in range(15): # Compute midpoint x_mid = (x0 + x1)/2 # Evaluate function at left end-point and at midpoint f0 = x0**3 - 6*x0**2 + 4*x0 + 12 f = x_mid**3 - 6*x_mid**2 + 4*x_mid + 12 # YOUR CODE HERE raise NotImplementedError() print(n, x_mid, f) assert round(x_mid - 4.534149169921875, 10) == 0.0 assert abs(f) < 0.0009(2) Using a `while` loopUse the variable `counter` for the iteration number. *Remember to guard against infinite loops.*# Initial end points x0 = 3.0 x1 = 6.0 tol = 1.0e-6 error = tol + 1.0 # Iterate until tolerance is met counter = 0 while error > tol: # YOUR CODE HERE raise NotImplementedError() # Guard against an infinite loop if counter > 1000: print("Oops, iteration count is very large. Breaking out of while loop.") break print(counter, x_mid, error) assert counter == 23 assert abs(f) < 1.0e-6Exercise 02.3 (series expansion)The power series expansion for the sine function is: $$\sin(x) = \sum_{n = 0}^{\infty} (-1)^n \frac{x^{2n +1}}{(2n+1)!}$$(See mathematics data book for a less compact version; this compact version is preferred here as it is simpler to program.)1. Using a `for` statement, approximate $\sin(3\pi/2)$ using 15 terms in the series expansion and report the absolute error.1. Using a `while` statement, compute how many terms in the series are required to approximate $\sin(3\pi/2)$ to within $1 \times 10^{-8}$. Store the absolute value of the error in the variable `error`.*Note:* Calculators and computers use iterative or series expansions to compute trigonometric functions, similar to the one above (although they use more efficient formulations than the above series). HintsTo compute the factorial and to get a good approximation of $\pi$, use the Python `math` module:```pythonimport mathnfact = math.factorial(10)pi = math.pi```You only need '`import math`' once at the top of your program. Standard modules, like `math`, will be explained in a later. If you want to test for angles for which sine is not simple, you can use ```pythona = 1.3s = math.sin(a)``` to get an accurate computation of sine to check the error. (1) Using a `for` loop# Import the math module to access math.sin and math.factorial import math # Value at which to approximate sine x = 1.5*math.pi # Initialise approximation of sine approx_sin = 0.0 # YOUR CODE HERE raise NotImplementedError() print("The error is:") print(error) assert error < 1.0e-12(2) Using a `while` loop*Remember to guard against infinite loops.*# Import the math module to access math.sin and math.factorial import math # Value at which to approximate sine x = 1.5*math.pi # Tolerance and initial error (this just needs to be larger than tol) tol = 1.0e-8 error = tol + 1.0 # Intialise approximation of sine approx_sin = 0.0 # Initialise counter n = 0 # Loop until error satisfies tolerance, with a check to avoid # an infinite loop while error > tol and n < 1000: # YOUR CODE HERE raise NotImplementedError() # Increment counter n += 1 print("The error is:") print(error) print("Number of terms in series:") print(n) assert error <= 1.0e-8Pytorch test on ColabExsample from https://github.com/pytorch/examples/blob/master/mnist/main.pyfrom __future__ import print_function import argparse import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torchvision import datasets, transforms from torch.optim.lr_scheduler import StepLR네트워크 정의# Load the TensorBoard notebook extension %load_ext tensorboard import datetime, os from torch.utils.tensorboard import SummaryWriter logdir = os.path.join("logs", datetime.datetime.now().strftime("%Y%m%d-%H%M%S")) print(logdir) writer = SummaryWriter(logdir) %tensorboard --logdir logs class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 32, 3, 1) self.conv2 = nn.Conv2d(32, 64, 3, 1) self.dropout1 = nn.Dropout2d(0.25) self.dropout2 = nn.Dropout2d(0.5) self.fc1 = nn.Linear(9216, 128) self.fc2 = nn.Linear(128, 10) def forward(self, x): x = self.conv1(x) x = F.relu(x) x = self.conv2(x) x = F.relu(x) x = F.max_pool2d(x, 2) x = self.dropout1(x) x = torch.flatten(x, 1) x = self.fc1(x) x = F.relu(x) x = self.dropout2(x) x = self.fc2(x) output = F.log_softmax(x, dim=1) return output def train(model, device, train_loader, optimizer, epoch): model.train() for batch_idx, (data, target) in enumerate(train_loader): data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) loss = F.nll_loss(output, target) loss.backward() optimizer.step() if batch_idx % 10 == 0: writer.add_scalar('training loss',loss.item(),batch_idx) print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item())) def test(model, device, test_loader): model.eval() test_loss = 0 correct = 0 with torch.no_grad(): for data, target in test_loader: data, target = data.to(device), target.to(device) output = model(data) test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability correct += pred.eq(target.view_as(pred)).sum().item() test_loss /= len(test_loader.dataset) print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset))) #@title Setting Parameter batch_size = 64 #@param {type:"integer"} test_batch_size = 64 #@param {type:"integer"} epochs = 10 #@param {type:"integer"} lr = 1.0 #@param {type:"number"} gamma = 0.7 #@param {type:"number"} seed = 1 #@param {type:"integer"} use_cuda = torch.cuda.is_available() torch.manual_seed(seed) device = torch.device("cuda" if use_cuda else "cpu") kwargs = {'batch_size': batch_size} if use_cuda: kwargs.update({'num_workers': 1,'pin_memory': True,'shuffle': True}, ) print ("use_cuda : ", use_cuda, device) !nvidia-smi transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ]) dataset1 = datasets.MNIST('../data', train=True, download=True, transform=transform) dataset2 = datasets.MNIST('../data', train=False, transform=transform) train_loader = torch.utils.data.DataLoader(dataset1,**kwargs) test_loader = torch.utils.data.DataLoader(dataset2, **kwargs) model = Net().to(device) optimizer = optim.Adadelta(model.parameters(), lr=lr) scheduler = StepLR(optimizer, step_size=1, gamma=gamma) for epoch in range(1, epochs + 1): train(model, device, train_loader, optimizer, epoch) test(model, device, test_loader) scheduler.step()Training Neural NetworksThe network we built in the previous part isn't so smart, it doesn't know anything about our handwritten digits. Neural networks with non-linear activations work like universal function approximators. There is some function that maps your input to the output. For example, images of handwritten digits to class probabilities. The power of neural networks is that we can train them to approximate this function, and basically any function given enough data and compute time.At first the network is naive, it doesn't know the function mapping the inputs to the outputs. We train the network by showing it examples of real data, then adjusting the network parameters such that it approximates this function.To find these parameters, we need to know how poorly the network is predicting the real outputs. For this we calculate a **loss function** (also called the cost), a measure of our prediction error. For example, the mean squared loss is often used in regression and binary classification problems$$\large \ell = \frac{1}{2n}\sum_i^n{\left(y_i - \hat{y}_i\right)^2}$$where $n$ is the number of training examples, $y_i$ are the true labels, and $\hat{y}_i$ are the predicted labels.By minimizing this loss with respect to the network parameters, we can find configurations where the loss is at a minimum and the network is able to predict the correct labels with high accuracy. We find this minimum using a process called **gradient descent**. The gradient is the slope of the loss function and points in the direction of fastest change. To get to the minimum in the least amount of time, we then want to follow the gradient (downwards). You can think of this like descending a mountain by following the steepest slope to the base. BackpropagationFor single layer networks, gradient descent is straightforward to implement. However, it's more complicated for deeper, multilayer neural networks like the one we've built. Complicated enough that it took about 30 years before researchers figured out how to train multilayer networks.Training multilayer networks is done through **backpropagation** which is really just an application of the chain rule from calculus. It's easiest to understand if we convert a two layer network into a graph representation.In the forward pass through the network, our data and operations go from bottom to top here. We pass the input $x$ through a linear transformation $L_1$ with weights $W_1$ and biases $b_1$. The output then goes through the sigmoid operation $S$ and another linear transformation $L_2$. Finally we calculate the loss $\ell$. We use the loss as a measure of how bad the network's predictions are. The goal then is to adjust the weights and biases to minimize the loss.To train the weights with gradient descent, we propagate the gradient of the loss backwards through the network. Each operation has some gradient between the inputs and outputs. As we send the gradients backwards, we multiply the incoming gradient with the gradient for the operation. Mathematically, this is really just calculating the gradient of the loss with respect to the weights using the chain rule.$$\large \frac{\partial \ell}{\partial W_1} = \frac{\partial L_1}{\partial W_1} \frac{\partial S}{\partial L_1} \frac{\partial L_2}{\partial S} \frac{\partial \ell}{\partial L_2}$$**Note:** I'm glossing over a few details here that require some knowledge of vector calculus, but they aren't necessary to understand what's going on.We update our weights using this gradient with some learning rate $\alpha$. $$\large W^\prime_1 = W_1 - \alpha \frac{\partial \ell}{\partial W_1}$$The learning rate $\alpha$ is set such that the weight update steps are small enough that the iterative method settles in a minimum. Losses in PyTorchLet's start by seeing how we calculate the loss with PyTorch. Through the `nn` module, PyTorch provides losses such as the cross-entropy loss (`nn.CrossEntropyLoss`). You'll usually see the loss assigned to `criterion`. As noted in the last part, with a classification problem such as MNIST, we're using the softmax function to predict class probabilities. With a softmax output, you want to use cross-entropy as the loss. To actually calculate the loss, you first define the criterion then pass in the output of your network and the correct labels.Something really important to note here. Looking at [the documentation for `nn.CrossEntropyLoss`](https://pytorch.org/docs/stable/nn.htmltorch.nn.CrossEntropyLoss),> This criterion combines `nn.LogSoftmax()` and `nn.NLLLoss()` in one single class.>> The input is expected to contain scores for each class.This means we need to pass in the raw output of our network into the loss, not the output of the softmax function. This raw output is usually called the *logits* or *scores*. We use the logits because softmax gives you probabilities which will often be very close to zero or one but floating-point numbers can't accurately represent values near zero or one ([read more here](https://docs.python.org/3/tutorial/floatingpoint.html)). It's usually best to avoid doing calculations with probabilities, typically we use log-probabilities.import torch from torch import nn import torch.nn.functional as F from torchvision import datasets, transforms # Define a transform to normalize the data transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,)), ]) # Download and load the training data trainset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)NoteIf you haven't seen `nn.Sequential` yet, please finish the end of the Part 2 notebook.# Build a feed-forward network model = nn.Sequential(nn.Linear(784, 128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 10)) # Define the loss criterion = nn.CrossEntropyLoss() # Get our data images, labels = next(iter(trainloader)) # Flatten images images = images.view(images.shape[0], -1) # Forward pass, get our logits logits = model(images) # Calculate the loss with the logits and the labels loss = criterion(logits, labels) print(loss)tensor(2.3058, grad_fn=)In my experience it's more convenient to build the model with a log-softmax output using `nn.LogSoftmax` or `F.log_softmax` ([documentation](https://pytorch.org/docs/stable/nn.htmltorch.nn.LogSoftmax)). Then you can get the actual probabilities by taking the exponential `torch.exp(output)`. With a log-softmax output, you want to use the negative log likelihood loss, `nn.NLLLoss` ([documentation](https://pytorch.org/docs/stable/nn.htmltorch.nn.NLLLoss)).>**Exercise:** Build a model that returns the log-softmax as the output and calculate the loss using the negative log likelihood loss. Note that for `nn.LogSoftmax` and `F.log_softmax` you'll need to set the `dim` keyword argument appropriately. `dim=0` calculates softmax across the rows, so each column sums to 1, while `dim=1` calculates across the columns so each row sums to 1. Think about what you want the output to be and choose `dim` appropriately.### Import needed modules import torch from torch import nn import torch.nn.functional as F from torchvision import datasets, transforms # Define a transform to normalize the data transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,)), ]) # Download and load the training data trainset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) ################################################ # TODO: Build a feed-forward network model = nn.Sequential(nn.Linear(784, 128), nn.LogSoftmax(dim=1), nn.Linear(128, 64), nn.LogSoftmax(dim=1), nn.Linear(64, 10)) # TODO: Define the loss criterion = nn.NLLLoss() ### Run this to check your work # Get our data images, labels = next(iter(trainloader)) # Flatten images images = images.view(images.shape[0], -1) # Forward pass, get our logits logits = model(images) # Calculate the loss with the logits and the labels loss = criterion(logits, labels) print(loss)tensor(2.6158, grad_fn=)AutogradNow that we know how to calculate a loss, how do we use it to perform backpropagation? Torch provides a module, `autograd`, for automatically calculating the gradients of tensors. We can use it to calculate the gradients of all our parameters with respect to the loss. Autograd works by keeping track of operations performed on tensors, then going backwards through those operations, calculating gradients along the way. To make sure PyTorch keeps track of operations on a tensor and calculates the gradients, you need to set `requires_grad = True` on a tensor. You can do this at creation with the `requires_grad` keyword, or at any time with `x.requires_grad_(True)`.You can turn off gradients for a block of code with the `torch.no_grad()` content:```pythonx = torch.zeros(1, requires_grad=True)>>> with torch.no_grad():... y = x * 2>>> y.requires_gradFalse```Also, you can turn on or off gradients altogether with `torch.set_grad_enabled(True|False)`.The gradients are computed with respect to some variable `z` with `z.backward()`. This does a backward pass through the operations that created `z`.x = torch.randn(2,2, requires_grad=True) print(x) y = x**2 print(y)Below we can see the operation that created `y`, a power operation `PowBackward0`.## grad_fn shows the function that generated this variable print(y.grad_fn)The autograd module keeps track of these operations and knows how to calculate the gradient for each one. In this way, it's able to calculate the gradients for a chain of operations, with respect to any one tensor. Let's reduce the tensor `y` to a scalar value, the mean.z = y.mean() print(z)You can check the gradients for `x` and `y` but they are empty currently.print(x.grad)To calculate the gradients, you need to run the `.backward` method on a Variable, `z` for example. This will calculate the gradient for `z` with respect to `x`$$\frac{\partial z}{\partial x} = \frac{\partial}{\partial x}\left[\frac{1}{n}\sum_i^n x_i^2\right] = \frac{x}{2}$$z.backward() print(x.grad) print(x/2)These gradients calculations are particularly useful for neural networks. For training we need the gradients of the cost with respect to the weights. With PyTorch, we run data forward through the network to calculate the loss, then, go backwards to calculate the gradients with respect to the loss. Once we have the gradients we can make a gradient descent step. Loss and Autograd togetherWhen we create a network with PyTorch, all of the parameters are initialized with `requires_grad = True`. This means that when we calculate the loss and call `loss.backward()`, the gradients for the parameters are calculated. These gradients are used to update the weights with gradient descent. Below you can see an example of calculating the gradients using a backwards pass.# Build a feed-forward network model = nn.Sequential(nn.Linear(784, 128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 10), nn.LogSoftmax(dim=1)) criterion = nn.NLLLoss() images, labels = next(iter(trainloader)) images = images.view(images.shape[0], -1) logits = model(images) loss = criterion(logits, labels) print('Before backward pass: \n', model[0].weight.grad) loss.backward() print('After backward pass: \n', model[0].weight.grad)Training the network!There's one last piece we need to start training, an optimizer that we'll use to update the weights with the gradients. We get these from PyTorch's [`optim` package](https://pytorch.org/docs/stable/optim.html). For example we can use stochastic gradient descent with `optim.SGD`. You can see how to define an optimizer below.from torch import optim # Optimizers require the parameters to optimize and a learning rate optimizer = optim.SGD(model.parameters(), lr=0.01)Now we know how to use all the individual parts so it's time to see how they work together. Let's consider just one learning step before looping through all the data. The general process with PyTorch:* Make a forward pass through the network * Use the network output to calculate the loss* Perform a backward pass through the network with `loss.backward()` to calculate the gradients* Take a step with the optimizer to update the weightsBelow I'll go through one training step and print out the weights and gradients so you can see how it changes. Note that I have a line of code `optimizer.zero_grad()`. When you do multiple backwards passes with the same parameters, the gradients are accumulated. This means that you need to zero the gradients on each training pass or you'll retain gradients from previous training batches.print('Initial weights - ', model[0].weight) images, labels = next(iter(trainloader)) images.resize_(64, 784) # Clear the gradients, do this because gradients are accumulated optimizer.zero_grad() # Forward pass, then backward pass, then update weights output = model(images) loss = criterion(output, labels) loss.backward() print('Gradient -', model[0].weight.grad) # Take an update step and few the new weights optimizer.step() print('Updated weights - ', model[0].weight)Training for realNow we'll put this algorithm into a loop so we can go through all the images. Some nomenclature, one pass through the entire dataset is called an *epoch*. So here we're going to loop through `trainloader` to get our training batches. For each batch, we'll doing a training pass where we calculate the loss, do a backwards pass, and update the weights.>**Exercise:** Implement the training pass for our network. If you implemented it correctly, you should see the training loss drop with each epoch.# Import needed modules import torch from torch import nn import torch.nn.functional as F from torchvision import datasets, transforms from torch import optim # Define a transform to normalize the data transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,)), ]) # Download and load the training data trainset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=True, transform=transform) trainloader = torch.utils.data.DataLoader( trainset, batch_size=64, shuffle=True) ################################################ ## Your solution here model = nn.Sequential(nn.Linear(784, 128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 10), nn.LogSoftmax(dim=1)) criterion = nn.NLLLoss() optimizer = optim.SGD(model.parameters(), lr=0.003) epochs = 5 for e in range(epochs): running_loss = 0 for images, labels in trainloader: # Flatten MNIST images into a 784 long vector images = images.view(images.shape[0], -1) optimizer.zero_grad() # TODO: Training pass logits = model(images) loss = criterion(logits,labels) loss.backward() running_loss += loss.item() optimizer.step() else: print(f"Training loss: {running_loss/len(trainloader)}")Training loss: 1.8140400818416051 Training loss: 0.7986363140123485 Training loss: 0.5134363588430225 Training loss: 0.42579773433808327 Training loss: 0.3839154780578257With the network trained, we can check out it's predictions.%matplotlib inline import helper images, labels = next(iter(trainloader)) img = images[0].view(1, 784) # Turn off gradients to speed up this part with torch.no_grad(): logps = model(img) # Output of the network are log-probabilities, need to take exponential for probabilities ps = torch.exp(logps) helper.view_classify(img.view(1, 28, 28), ps)Section 2.1 `xarray`, `az.InferenceData`, and NetCDF for Markov Chain Monte Carlo_How do we generate, store, and save Markov chain Monte Carlo results_import numpy as np import pandas as pd import scipy.stats as stats import matplotlib.pyplot as plt import arviz as az import pystan import xarray as xr from IPython.display import Video np.random.seed(0) plt.style.use('arviz-white')Learning Objectives* Understand Markov chain Monte Carlo fundamentals* Recognize the meaning of sample, draws, and chains in MCMC context* Understand relationship between Xarray, az.InferenceData, and NetCDF* Gain profiency with Xarray, NetCDF, and az.InferenceData objects Markov Chain Monte Carlo**Pop quiz**: Why do we use Markov chain Monte Carlo in Bayesian inference?**Highlight for answer:** Calculating the posterior distribution is hard!**Example:** If a flight has cancellation rate $r$, alternate tickets cost you $c$, and these distributions are modelled by $p(r, c)$, then expected cost of insuring a flight is$$\text{risk} = \int_{r=0}^{1}\int_{c=0}^{\infty} r\cdot c~dp(r, c)$$This can be hard to calculate for any number of reasons! If, instead, we have samples $$\{r_j, c_j\}_{j=1}^N \sim p(r, c)$$then $$\text{risk} \approx \frac{1}{N}\sum_{j=1}^N r_j \cdot c_j$$In python code, this would just be```risk = np.dot(r, c) / N``` Markov Chain Monte Carlo algorithm (greatly simplified)Step 1: Start at a random spot Step 2: Propose a new spot, possibly based on the previous spot Step 3: Accept or reject this proposal based on some mathematical book keeping Step 4: If accepted, move to proposed spot, if rejected, stay where you are Step 5: Write down where you're standing Step 6: Go back to step 2 The accepted proposals are called draws (or samples).When animated this algorithm looks like this:Video("../../img/medium_steps.mp4")In MCMC Step 2 and Step 4 is where most MCMC variants differentiate themselves. Algorithms like Hamiltonian Monte Carlo and Sequential Monte Carlo are better at picking that next step for certain tasks. has a great visual explainer [on his blog]([http://elevanth.org/blog/2017/11/28/build-a-better-markov-chain/)Chain: A Markov chainSample/Draw: A single element of that chainRegardless of algorithm in MCMC we end up with the same thing, a chain of accepted proposals with a fixed size. There is a rich literature to show that these algorithms produce samples that are eventually distributed according to the distribution we care about. Markov chain Monte Carlo with Metropolis-HastingsBelow is a working Metropolis-Hastings sampler, taken from ['s blog](https://twiecki.io/blog/2015/11/10/mcmc-sampling/). For the purposes of this tutorial focus more on the return value than the algorithm details.It is important to note that this for simplicity's sake we have also hard coded the likelihood and prior in the sampler below. In mathematical notation our model looks like this. We are adding 20 to the estimation of mu to make it easier to recognize the distribution of **parameters** from the distribution of **observed data**$$\mu \sim \mathcal{N}(0, 1) \\y \sim \mathcal{N}(\mu+20, 1)$$def mh_sampler(data, samples=4, mu_init=.5): mu_current = mu_init posterior = [] prior_logpdf = stats.norm(0, 1).logpdf for i in range(samples): # suggest new position mu_proposal = stats.norm(mu_current, 0.5).rvs() # Compute likelihood by multiplying probabilities of each data point likelihood_current = stats.norm(mu_current + 20, 1).logpdf(data).sum() likelihood_proposal = stats.norm(mu_proposal + 20, 1).logpdf(data).sum() # Compute prior probability of current and proposed mu prior_current = prior_logpdf(mu_current) prior_proposal = prior_logpdf(mu_proposal) # log(p(x|θ) p(θ)) = log(p(x|θ)) + log(p(θ)) p_current = likelihood_current + prior_current p_proposal = likelihood_proposal + prior_proposal # Accept proposal? p_accept = np.exp(p_proposal - p_current) accept = np.random.rand() < p_accept if accept: # Update position mu_current = mu_proposal else: # don't move pass posterior.append(mu_current) return np.array(posterior)SetupBefore using the sampler let's generate some data to test our Metropolis Hasting Implementation. In the code block below we are generating a bimodal distribution for the sampler.data = stats.norm.rvs(loc=30, scale=1, size=1000).flatten()We'll also plot our samples to get a sense of what the distribution of data looks like. Note how the histogram centers around 30. This should intuitively make sense as we're specified a mean of 30 when generating random values.fig, ax = plt.subplots() ax.hist(data) fig.suptitle("Histogram of observed data");As humans we can intuit *data mean* of **30** + an offset of **20** will lead to a parameter mean for *mu* of **10**. We want to see if our inference algorithm can recover our parameters. Single Variable Single Chain Inference Run The simplest MCMC run we can perform is with a single variable and a single chain. We'll do so by putting our sampler function and data to use.samples = 200 chain = mh_sampler(data=data, samples=samples) chain[:100]And just like that we've performed an inference run! We can generate a traceplotfig, ax = plt.subplots(figsize=(10, 7)) x = np.arange(samples) ax.plot(x, chain);In terms of data structures, for a **single** variable **single** chain inference run, an array suffices for storing samples. Single Variable Multiple Chain Inference Run As Bayesian modelers, life would be relatively easy if a single chain worked well every time, but unfortunately this is not the case. To understand why look at the above inference run. While the sampler started at *mu=8*, it took a 50 or so steps before the sampler honed in on the "correct" value of 10.MCMC algorithms are sensitive to their starting points and in finite runs it's **not** guaranteed that the Markov Chain will approach the true underlying distribution. A common method to get around this is to sample from many chains in parallel and see if we get to the same place. We will discuss this further when we get to single model diagnostics.chain_0 = mh_sampler(data=data, samples=samples) chain_1 = mh_sampler(data=data, samples=samples, mu_init=13) data_df = pd.DataFrame({"x_0":chain_0, "x_1":chain_1}) fig, ax = plt.subplots() x = np.arange(samples) ax.plot(x, data_df["x_0"], c="g") ax.plot(x, data_df["x_1"])With two chains converging to approximately a single value we can be more confident that the sampler reached the true underlying parameter. We can also store the results in a 2D data structures, such as Pandas Dataframes in python memory, and csvs or sql tables for persistent on disk storage. Multiple Variable Multiple Chain Inference Runs A Bayesian modelers, life would be relatively easy if all models only had one variable (univariate models in math speak). Unfortunately many types of models require 2 or more variables. For example in a linear regression we are interested in estimating both m and b:$$ y \sim mx+b$$With at least 3 things to track (chains, samples, and variables) a 2d data structures become limiting. This problem exists in many domains and is the focus of the *xarray* project.A motivating example comes from climate sciences. In this image from the xarray documentation the researcher might want to measure the temperature and humidity, across a 2D region at a point in time. Or they may want to plot the temperature over a time interval. xarray simplifies the data handling in cases like these.![XarrayStructure](../../img/dataset-diagram.png) XarrayIn ArviZ an xarray DataSet object would look like the one below, where the variables are the Inference run variables, and the coordinates are at a minimum chains, draws.posterior = xr.Dataset( {"mu": (["chain", "draw"], [[11,12,13],[22,23,24]]), "sd": (["chain", "draw"], [[33,34,35],[44,45,46]])}, coords={"draw": [1,2,3], "chain": [0,1]}, ) posteriorMultiple Variable Multiple Chain Inference runs and associated datasetsAs a Bayesian modelers, life would be relatively easy if we were only concerned about posterior distributions. Looking back at the full end to end workflow, recall that there are other datasets, such as prior predictive samples, posterior predictive samples, among others. To aid the ArviZ user we present `az.InferenceData`. az.InferenceDataaz.InferenceData serves as a data container for the various xarray datasets that are generated from an end-to-end Bayesian workflow. Consider our earlier simple model, and this time let's use `stan` to run a full analysis with multiple chains, multiple runs, and generate all sorts of datasets common in Bayesian analysis. Calculating priorstan_code_prior = """ data { int N; } parameters { real mu; // Estimated parameter } model { mu ~ normal(0, 1); } generated quantities { real y_hat[N]; // prior prediction for (n in 1:N) { y_hat[n] = normal_rng(mu+20, 1); } } """ stan_prior = pystan.StanModel(model_code=stan_code_prior) stan_data_prior = {"N" : len(data)} stan_fit_prior = stan_prior.sampling(data=stan_data_prior) stan_code_posterior = """ data { int N; real y[N]; // Observed data } parameters { real mu; // Estimated parameter } model { mu ~ normal(0, 1); y ~ normal(mu+20, 1); } generated quantities { real y_hat[N]; // posterior prediction real log_lik[N]; // log_likelihood for (n in 1:N) { // Stan normal functions https://mc-stan.org/docs/2_19/functions-reference/normal-distribution.html y_hat[n] = normal_rng(mu, 1); log_lik[n] = normal_lpdf(y[n] | mu, 1); } } """ stan_model_posterior = pystan.StanModel(model_code=stan_code_posterior) stan_data_posterior = dict( y=data, N=len(data) ) stan_fit_posterior = stan_model_posterior.sampling(data=stan_data_posterior) stan_inference_data = az.from_pystan(posterior=stan_fit_posterior, observed_data="y", # Other Bayesian Datasets that we have not discussed yet! posterior_predictive="y_hat", prior=stan_fit_prior, prior_predictive="y_hat", log_likelihood="log_lik", )NetCDFCalculating the various datasets is usually not trivial. Network Common Data Form (NetCDF) is an open standard for storing multidimensional datasets, and `xarray` is a library for doing high performance analysis on those datasets. NetCDF even comes with "group" support, making it easy to serialize az.InferenceData straight to disk. ArviZ uses NetCDF to save the results to disk, allowing reproducible analyses, multiple experiments, and sharing with others. ArviZ even ships with sample datasets, serialized in NetCDFhttps://github.com/arviz-devs/arviz/tree/master/arviz/data/_datasetsIn short: like SQL is to Pandas DataFrame, NetCDF is to az.InferenceData.data = az.load_arviz_data("centered_eight") dataThe benefits of az.InferenceDataOne of the goals for the ArviZ developers is to ensure that Bayesian practioners can share and reproduce analyses regardless of PPl, regardless of language and az.InferenceData was the implementation of this idea.In summary az.InferenceData * provides a consistent format for Bayesian datasets.* makes it easy to save results* makes use of ArviZ plotting and statistics functions simpler* stores metadata for ease of reproducibility InferenceData in practiceIn practice it's rare to ever generate a xarray manually for use in ArviZ. Instead ArviZ provides methods for instantiating InferenceData from plain Python objects, mappings to various PPLs, as well as methods to save and load NetCDF files.For further references consider the ArviZ cookbook, and data structure tutorial.https://arviz-devs.github.io/arviz/notebooks/InferenceDataCookbook.htmlhttps://arviz-devs.github.io/arviz/notebooks/XarrayforArviZ.html ExamplesSee below for some useful methods of interacting with az.InferenceData, Xarray, and NetCDFFor Xarray methods we only demo a subset of the available API. For a much more comprehensive explanation view the indexing and selection page from the xarray docshttp://xarray.pydata.org/en/stable/indexing.html Creating InferenceData objectsWe can create an InferenceData objects from our "home built" chain, not just from the output of supported PPLsdata_dict = {"mu": [chain_0, chain_1]} home_built_data = az.from_dict(data_dict) home_built_data # Load NetCDF from disk into memory ## Replace with NetCDF that's "visible" data = az.load_arviz_data("centered_eight") # Reference posterior directly posterior = data.posterior posterior # Select specific variables posterior[["mu", "tau"]] # Select specific chains and draws posterior.sel(chain=[0,2], draw=slice(0,5)) # Get first 10 samples of mu from chain 0 posterior["mu"].sel(chain=0, draw=slice(0,10)).valuesAs expected, there is a strong daily seasonality to this time series. The strongest correlations are 1 day lag and 7 day lag (weekly component)from statsmodels.tsa.seasonal import seasonal_decompose decomposition = seasonal_decompose(midtc_df, freq=4*24) #daily fig = plt.figure() fig = decomposition.plot() fig.set_size_inches(15, 8) #there is also a weekly component #inspect residuals plot_acf(decomposition.resid.dropna()) #too much autocorrelation in the residuals ; we have neglected the weekly componentThere is still a seasonal component left in the trend. Try weekly seasonalityfrom statsmodels.tsa.seasonal import seasonal_decompose weekly_decomposition = seasonal_decompose(midtc_df, freq=7*4*24) #weekly fig = plt.figure() fig = weekly_decomposition.plot() fig.set_size_inches(15, 8) #weekly component #examine the residuals print( "Peak at {}".format(50+np.argmax(acf(weekly_decomposition.resid.dropna(), nlags=8*24*4)[50:]))) #we are now neglecting daily seasonality mt_pacf = pacf(midtc_df, nlags=8*24*4) #8 days #significance level of pacf with N samples N = len(midtc_df) sig = (np.exp(2*1.96/np.sqrt(N-3)-1)/(np.exp(2*1.96/np.sqrt(N-3)+1))) plt.scatter(np.arange(24*4+1), mt_pacf[:24*4+1]) plt.axhline(y=sig, color='r', linestyle='-') plt.axhline(y=-sig, color='r', linestyle='-') #find significant components np.argwhere(abs(mt_pacf[:24*7]) > sig) #%pdb on # PACF indicates a model AR(1-4) with seasonality of day. Potential for MA component given damped sin effect # ACF indicates additional seasonality component of week from statsmodels.tsa.statespace.sarimax import SARIMAX #grid search using AIC models = [] results = [] seasonal_options = [(1,0,0,96), (0,0,1,96), (1,0,1,96)] # order 7 (weekly) doesn't converge (singular matrix) for p in range(1,5): for q in range(0,5): for s in range(3): print(p,q,s) try: models.append(SARIMAX(midtc_df.values, order=(p,0,q), seasonal_order=seasonal_options[s])) results.append(models[-1].fit()) except Exception as e: print(e) results.append(None) results[0].summary() #pick best model based on AIC & Q stat from matplotlib import pyplot as plt aic = pd.Series([x.aic if x else 50000 for x in results]) aic = aic.fillna(50000) np.argmin(aic) ax = aic.plot() ax.set_ylim((26000,30000)) # pick simplest model with AIC below 24000 best_order = (1,0,1) best_seasonal = seasonal_options[2] # (1,0,1,96)dataset.py Command to create an empty dataset in AutoML dataset! python dataset.py create_dataset --dataset_name trainDataset name: projects/401832599639/locations/us-central1/datasets/TBL7687283924107526144 Dataset id: TBL7687283924107526144 Dataset display name: train Dataset metadata: stats_update_time { } Dataset example count: 0 Dataset create time: seconds: 1564496871 nanos: 340824000Command to list of existing dataset info in AutoML dataset! python dataset.py list_datasetsCommand to import new data into an existing empty AutoML dataset! python dataset.py import_data --dataset_id TBL7687283924107526144 --path "bq://hackathon1-183523.demo3_v2.train"Processing import... Data imported.Command to list of existing dataset info in AutoML, to make sure the import is successful! python dataset.py list_datasetsSchema review! python dataset.py schema_review --dataset_name "projects/401832599639/locations/us-central1/datasets/TBL7687283924107526144"Feature name:start_month Feature name:FLOAT64 Feature name:dayOfWeek Feature name:CATEGORY Feature name:start_hour Feature name:FLOAT64 Feature name:pickup_longitude Feature name:FLOAT64 Feature name:price_per_mile Feature name:FLOAT64 Feature name:trip_miles Feature name:FLOAT64 Feature name:dropoff_longitude Feature name:FLOAT64 Feature name:dayOfYear Feature name:FLOAT64 Feature name:trip_seconds Feature name:FLOAT64 Feature name:pickup_latitude Feature name:FLOAT64 Feature name:dropoff_latitude Feature name:FLOAT64list table spec id of the dataset! python dataset.py list_table_specs --dataset_id TBL7687283924107526144List of table specs: Table spec name: projects/401832599639/locations/us-central1/datasets/TBL7687283924107526144/tableSpecs/5409614800843964416 Table spec id: 5409614800843964416 Table spec time column spec id: Table spec row count: 27589 Table spec column count: 11list each column spec info in the table! python dataset.py list_column_specs --dataset_id TBL7687283924107526144 --table_spec_id 5409614800843964416List of column specs: Column spec name: projects/401832599639/locations/us-central1/datasets/TBL7687283924107526144/tableSpecs/5409614800843964416/columnSpecs/3796398146431483904 Column spec id: 3796398146431483904 Column spec display name: trip_miles Column spec data type: type_code: FLOAT64 Column spec name: projects/401832599639/locations/us-central1/datasets/TBL7687283924107526144/tableSpecs/5409614800843964416/columnSpecs/4949319651038330880 Column spec id: 4949319651038330880 Column spec display name: pickup_latitude Column spec data type: type_code: FLOAT64 Column spec name: projects/401832599639/locations/us-central1/datasets/TBL7687283924107526144/tableSpecs/5409614800843964416/columnSpecs/914094384914366464 Column spec id: 914094384914366464 Column spec display name: pickup_longitude Column spec data type: type_code: FLOAT64 Column spec name: projects/401832599639/locations/us-central1/datasets/TBL7687283924107526144/tableSpecs/5409614800843964416/columnSpecs/3219937394128[...]get dataset! python dataset.py get_dataset --dataset_id TBL7687283924107526144Dataset name: projects/401832599639/locations/us-central1/datasets/TBL7687283924107526144 Dataset id: TBL7687283924107526144 Dataset display name: train Dataset metadata: primary_table_spec_id: "5409614800843964416" stats_update_time { seconds: 1564496993 nanos: 135000000 } Dataset example count: 27589 Dataset create time: seconds: 1564496871 nanos: 340824000get table spec info! python dataset.py get_table_spec --dataset_id TBL7687283924107526144 --table_spec_id 5409614800843964416Table spec name: projects/401832599639/locations/us-central1/datasets/TBL7687283924107526144/tableSpecs/5409614800843964416 Table spec id: 5409614800843964416 Table spec time column spec id: Table spec row count: 27589 Table spec column count: 11get column spec id! python dataset.py get_column_spec --dataset_id TBL7687283924107526144 --table_spec_id 5409614800843964416 --column_spec_id 1490555137217789952Column spec name: projects/401832599639/locations/us-central1/datasets/TBL7687283924107526144/tableSpecs/5409614800843964416/columnSpecs/1490555137217789952 Column spec id: 1490555137217789952 Column spec display name: price_per_mile Column spec data type: type_code: FLOAT64 Column spec data stats: distinct_value_count: 3249 float64_stats { mean: 6.26339242877 standard_deviation: 4.0244233649 quantiles: 0.00172413793103 quantiles: 4.16666666667 quantiles: 5.38461538462 quantiles: 6.875 quantiles: 95.0 histogram_buckets { min: -inf max: 9.50155172414 count: 25103 } histogram_buckets { min: 9.50155172414 max: 19.0013793103 count: 2239 } histogram_buckets { min: 19.0013793103 max: 28.5012068966 count: 118 } histogram_buckets { min: 28.5012068966 max: 38.0010344828 count: 67 } histogram_buckets { min: 38.0010344828 max: 47.500862069 count: 15 } histogram_buckets { min: 47.500862069 max:[...]set target column! python dataset.py update_dataset --dataset_id TBL7687283924107526144 --target_column_spec_id 1490555137217789952Dataset updated. name: "projects/401832599639/locations/us-central1/datasets/TBL7687283924107526144" display_name: "train" create_time { seconds: 1564496871 nanos: 340824000 } etag: "AB3BwFq372u9hlP1Gpky6KuWjnHECjpwp98fF9qhhwEbNP4NWaCANjXvX-qx2QPnd_qj" example_count: 27589 tables_dataset_metadata { primary_table_spec_id: "5409614800843964416" target_column_spec_id: "1490555137217789952" stats_update_time { seconds: 1564496993 nanos: 135000000 } }model.py list most recent model! python model.py list_modelsList of models: Model name: projects/401832599639/locations/us-central1/models/TBL2266814744474157056 Model id: TBL2266814744474157056 Model display name: demo3_v2 Target column display name: price_per_mile Training budget in node milli hours: 5000 Training cost in node milli hours: 1896 Model create time: seconds: 1563997966 nanos: 318460000 Model deployment state: undeployedtraining model! python model.py create_model --dataset_id TBL7687283924107526144 --model_name demo3_v2 --train_budget_milli_node_hours 5000Training model... Training operation name: projects/401832599639/locations/us-central1/operations/TBL3051905629124820992get status! python model.py get_operation_status --operation_full_id "projects/401832599639/locations/us-central1/operations/TBL3051905629124820992"Operation status: name: "projects/401832599639/locations/us-central1/operations/TBL3051905629124820992" metadata { type_url: "type.googleapis.com/google.cloud.automl.v1beta1.OperationMetadata" value: "\032\014\010\220\253\201\352\005\020\210\346\222\352\002\"\014\010\255\357\201\352\005\020\220\341\313\313\003R\000" } done: true response { type_url: "type.googleapis.com/google.cloud.automl.v1beta1.Model" value: "\nIprojects/401832599639/locations/us-central1/models/TBL1845165229361594368" }list existing model again! python model.py list_modelsList of models: Model name: projects/401832599639/locations/us-central1/models/TBL573461284582850560 Model id: TBL573461284582850560 Model display name: test_demo Target column display name: price_per_mile Training budget in node milli hours: 5000 Training cost in node milli hours: 2748 Model create time: seconds: 1564496817 nanos: 269715000 Model deployment state: undeployedget model metadata! python model.py get_model --model_id TBL573461284582850560Model name: projects/401832599639/locations/us-central1/models/TBL573461284582850560 Model id: TBL573461284582850560 Model display name: test_demo Model metadata: target_column_spec { name: "projects/401832599639/locations/us-central1/datasets/TBL975513104441933824/tableSpecs/2068506827288477696/columnSpecs/6904655945503080448" data_type { type_code: FLOAT64 } display_name: "price_per_mile" } input_feature_column_specs { name: "projects/401832599639/locations/us-central1/datasets/TBL975513104441933824/tableSpecs/2068506827288477696/columnSpecs/9210498954716774400" data_type { type_code: FLOAT64 } display_name: "trip_miles" } input_feature_column_specs { name: "projects/401832599639/locations/us-central1/datasets/TBL975513104441933824/tableSpecs/2068506827288477696/columnSpecs/8057577450109927424" data_type { type_code: FLOAT64 } display_name: "pickup_latitude" } input_feature_column_specs { name: "projects/401832599639/locations/us-central1/dataset[...]list existing model eval! python model.py list_model_evaluations --model_id TBL573461284582850560List of most recent model evaluations: Model evaluation name: projects/401832599639/locations/us-central1/models/TBL573461284582850560/modelEvaluations/9119363279356331151 Model evaluation id: 9119363279356331151 Model evaluation example count: 2730 Model evaluation time: seconds: 1564506840 nanos: 830440000get evaluation! python model.py get_model_evaluation --model_id TBL573461284582850560 --model_evaluation_id 9119363279356331151name: "projects/401832599639/locations/us-central1/models/TBL573461284582850560/modelEvaluations/9119363279356331151" create_time { seconds: 1564506840 nanos: 830440000 } evaluated_example_count: 2730display evaluation! python model.py display_evaluation --model_id TBL573461284582850560Model regression metrics: Model RMSE: 0.701361298561 Model MAE: 0.191221699119 Model MAPE: 2.81096339226 Model R^2: 0.96477496624deploy! python model.py deploy_model --model_id TBL573461284582850560Model deployed.predict.py batch prediction! python predict.py batch_predict --model_id TBL573461284582850560 --input_path "bq://hackathon1-183523.demo3_v2.test" --output_path "bq://hackathon1-183523"Making batch prediction... Batch prediction complete. create_time { seconds: 1564509019 nanos: 115143000 } update_time { seconds: 1564509172 nanos: 622171000 } batch_predict_details { input_config { bigquery_source { input_uri: "bq://hackathon1-183523.demo3_v2.test" } } output_info { bigquery_output_dataset: "bq://hackathon1-183523.prediction_test_demo_2019_07_30T10_50_19_032Z" } }online prediction! python predict.py predict --model_id TBL573461284582850560 --file_path 'test.csv' ! curl -X POST -H "Content-Type: application/json" \ -H "Authorization: Bearer $(gcloud auth application-default print-access-token)" \ https://automl.googleapis.com/v1beta1/projects/hackathon1-183523/locations/us-central1/models/TBL6421666875700150272:predict \ -d @request.json{ "payload": [ { "tables": { "value": 8.3914804458618164, "predictionInterval": { "start": 6.6885089874267578, "end": 12.913139343261719 } } } ] }Word embedding-based Homograph Disambigation Logistic Regression ModelMultinomial (one model per class) logistic regression (LR) for homograph disambiguation (HD). LR FeaturesThe feature for each homograph pronunciation label is a BERT token embedding. Each embedding is taken from the token embeddings for a sentence containing the homograph. Data[Wikipedia Homograph Data (WHD)](https://github.com/google-research-datasets/WikipediaHomographData); see:., ., and . (2018). [Improving homograph disambiguation with machine learning.](https://aclanthology.org/L18-1215/) In Proceedings of the Eleventh International Conference on Language Resources and Evaluation, pages 1349-1352. Miyazaki, Japan. Context Nicolis and Klimkov (2021; [NK2021](https://www.researchgate.net/profile/Marco-Nicolis-2/publication/354151448_Homograph_disambiguation_with_contextual_word_embeddings_for_TTS_systems/links/613619910360302a0083e34b/Homograph-disambiguation-with-contextual-word-embeddings-for-TTS-systems.pdf)) claim SOTA results with word-embedding-featured HD LR. However, ~%40 of the classes (homograph pronunciations/wordids) in the WHD test set are represented by either one instance, or are _not_ represented in the WHD test set used by NK2021. Over the entire WHD, 17 of the homographs have only 1 pronunciation class. NK2021 take 'conglomerate' out from the data, as one pronunciation in the test set is not present in the training set. They use the rest of the WHD data as is, which possibly calls into question their results. Would the model(s) perform as well with a more robust test set and with each homograph having at least two pronunciations from which to select? PurposeThe HD LR in this notebook is developed to replicate experimentation found in NK2021. Use1. Compare metrics obtained with NK2021-replicated HD LR using the WHD to a data set that provides better class coverage.2. Compare metrics obtained in 1 to those obtained with multi-class token classifier developed in [Seale (2021)](https://academicworks.cuny.edu/cgi/viewcontent.cgi?article=5591&context=gc_etds).3. Determine if SOTA claims using HD LR still hold given data issues, and when compared to multi-class neural nets. TO DO: 1. Handle homographs with more than 2 pronunciations. (?)2. Continue to align with NK2021. Notes:I only have this running on CPU right now. Struggled with getting MXNET to play well with my GPU-enabled laptop. Makes sense to do. Running this with BERT_LARGE embeddings is ridiculously slow. Model training: 159it [4h:21m:43s, 98.76s/it]import os import regex as re import csv import glob import operator from tqdm import tqdm from typing import Dict, List, Tuple import pandas as pd import numpy as np from sklearn.metrics import accuracy_score, balanced_accuracy_score # MXNET import mxnet as mx from mxnet import nd, autograd, gluon from mxnet.gluon import nn, Trainer from mxnet.gluon.data import DataLoader, ArrayDataset from mxnet.contrib import text # https://pypi.org/project/BERT-embedding/ from bert_embedding import BertEmbedding # PATHS # https://github.com/google-research-datasets/WikipediaHomographData TRAIN_PATH = "./WikipediaHomographData/data/train/*.tsv" TEST_PATH = "./WikipediaHomographData/data/eval/*.tsv" WORD_IDS_PATH = "./WikipediaHomographData/data/wordids.tsv" # Data from Seale 2021 dissertation # TRAIN_PATH = "./WHD/train_whd_fren_34_low_prev_restricted/*.tsv" # TEST_PATH = "./WHD/test_whd_fren_34_low_prev_restricted/*.tsv" #HELPER FUNCTION: Used to generate global variables WORDIDS_LAB_DICT, WORDIDS_IDX_DICT def make_wordids_dict() -> Tuple[Dict, Dict]: # FUNCTIONALITY: Makes dictionary used to convert wordids to 0,1 and vice versa # OUTPUT: { homograph_str : {0: wordid_1_str, 1: wordid_2_str}, ...}, # { homograph_str : {wordid_1_str: 0, wordid_2_str: 1}, ...} lab_dict : Dict = {} idx_dict : Dict = {} df : pd.DataFrame = pd.read_csv(WORD_IDS_PATH, sep="\t") for hom, e in df.groupby("homograph"): idx = 0 l_dict : Dict = {} i_dict : Dict = {} for wid in e["wordid"]: i_dict[idx] = wid l_dict[wid] = idx idx += 1 lab_dict[hom] = l_dict idx_dict[hom] = i_dict return lab_dict, idx_dict # GLOBAL VARIABLES # Used for cleaning tokens in get_embedding() REGEX = r"(?<=[^A-Za-z])(?=[A-Za-z])|(?<=[A-Za-z])(?=[^A-Za-z])" SUB = " " # Used to create functionality to get BERT embeddings BERT_SMALL = 'bert_12_768_12' BERT_LARGE = 'bert_24_1024_16' SENTENCE_LENGTH = 100 #Default sentence length is too short for some WHD sentences BERT_EMBEDDING = BertEmbedding(model=BERT_LARGE, max_seq_length=SENTENCE_LENGTH) # Used for training, eval SEED_1 = mx.random.seed(12345) TRAIN_DATA_SIZE = 100 VAL_DATA_SIZE = 10 BATCH_SIZE = 10 EPOCHS = 10 THRESHOLD = 0.5 # Used for for label conversion WORDIDS_LAB_DICT, WORDIDS_IDX_DICT = make_wordids_dict() # Check out DICTS print("WORDIDS_LAB_DICT: wordids as keys in dictionary that serves as value for homograph key") for e in list(WORDIDS_LAB_DICT.items())[:5]: print(e) print("\n") print("WORDIDS_IDX_DICT: ints as keys in dictionary that serves as value for homograph key") for e in list(WORDIDS_IDX_DICT.items())[:5]: print(e)WORDIDS_LAB_DICT: wordids as keys in dictionary that serves as value for homograph key ('abstract', {'abstract_adj-nou': 0, 'abstract_vrb': 1}) ('abuse', {'abuse_nou': 0, 'abuse_vrb': 1}) ('abuses', {'abuses_nou': 0, 'abuses_vrb': 1}) ('addict', {'addict_nou': 0, 'addict_vrb': 1}) ('advocate', {'advocate_nou': 0, 'advocate_vrb': 1}) WORDIDS_IDX_DICT: ints as keys in dictionary that serves as value for homograph key ('abstract', {0: 'abstract_adj-nou', 1: 'abstract_vrb'}) ('abuse', {0: 'abuse_nou', 1: 'abuse_vrb'}) ('abuses', {0: 'abuses_nou', 1: 'abuses_vrb'}) ('addict', {0: 'addict_nou', 1: 'addict_vrb'}) ('advocate', {0: 'advocate_nou', 1: 'advocate_vrb'})Functionsdef get_embedding(sentence : str, tsv_name : str) -> List: # FUNCTIONALITY: Obtain a homograph embedding from all # the token embeddings of a sentence containing that homograph # INPUT: # sentence: string, 1 sentence containing a homograph from tsv of sentences # csv_name: string, name of csv of training data for 1 homograph, tsv name is the homograph # OUTPUT: array of float32s, the embedding of the homograph # Clarify that the csv name is the homograph homograph = tsv_name # Isolate homograph tokens; separate non-alabetic characters from alphabetic ones with a space, # preventing occurences like '4Minute' sentence_clean = re.sub(REGEX, SUB, sentence, 0) # Obtain word embeddings for sentence embs = BERT_EMBEDDING([sentence_clean]) # Find homograph embedding of embeddings for each token in sentence df = pd.DataFrame({'token': embs[0][0], 'embedding': embs[0][1]}) homograph_emb = df[df['token'] == homograph]['embedding'] homograph_emb = homograph_emb.tolist() if len(homograph_emb) < 1: # Didn't find homograph in sentence, check out the problem print(homograph) print(sentence) print(embs[0][0]) return homograph_emb[0] def get_data(path : str) -> Tuple[List, List[str], str, List[str]]: # FUNCTIONALITY: Get pronunciation labels and embedding features for LR # INPUT: Path to tsv with labeled sentences; 1 tsv per homograph # OUTPUT: List of embedding features, list of pronunciation labels, the homograph text string, sentences labels : List[str] = [] emb_features: List = [] sentences : List[str] = [] with open(path, "r", encoding="utf8") as source: for row in csv.DictReader(source, delimiter="\t"): labels.append(WORDIDS_LAB_DICT[os.path.basename(path[:-4])][row["wordid"]]) embedding = get_embedding(row['sentence'], os.path.basename(path)[:-4]) emb_features.append(nd.array(embedding)) # sentences used in debugging sentences.append(row['sentence']) labels = nd.array(labels) labels = labels.astype("float32") homograph : str = os.path.basename(path)[:-4] return emb_features, labels, homograph, sentences # Following two functions taken from: # https://mxnet.apache.org/versions/1.5.0/tutorials/gluon/logistic_regression_explained.html def train_model(train_dataloader): cumulative_train_loss = 0 for i, (data, label) in enumerate(train_dataloader): with autograd.record(): # Do forward pass on a batch of training data output = lr_net(data) # Calculate loss for the training data batch loss_result = loss(output, label) # Calculate gradients loss_result.backward() # Update parameters of the network trainer.step(BATCH_SIZE) # sum losses of every batch cumulative_train_loss += nd.sum(loss_result).asscalar() return cumulative_train_loss def validate_model(THRESHOLD, val_dataloader): cumulative_val_loss = 0 for i, (val_data, val_ground_truth_class) in enumerate(val_dataloader): # Do forward pass on a batch of validation data output = lr_net(val_data) # Similar to cumulative training loss, calculate cumulative validation loss cumulative_val_loss += nd.sum(loss(output, val_ground_truth_class)).asscalar() # Get prediction as a sigmoid prediction = lr_net(val_data).sigmoid() # Convert neuron outputs to classes predicted_classes = mx.ndarray.abs(mx.nd.ceil(prediction - THRESHOLD)) # Update validation accuracy accuracy.update(val_ground_truth_class, predicted_classes.reshape(-1)) targs_preds = (val_ground_truth_class, predicted_classes.reshape(-1)) # Calculate probabilities of belonging to different classes. F1 metric works only with this notation prediction = prediction.reshape(-1) probabilities = mx.nd.stack(1 - prediction, prediction, axis=1) #f1.update(val_ground_truth_class, probabilities) return cumulative_val_loss, targs_predsTrain and evaluate#https://mxnet.apache.org/versions/1.5.0/tutorials/gluon/logistic_regression_explained.html lr_net = nn.HybridSequential() with lr_net.name_scope(): lr_net.add(nn.Dense(units=10, activation='relu')) lr_net.add(nn.Dense(units=1)) #Hyperparameters from NK2021 lr_net.initialize(mx.init.Xavier()) trainer = Trainer(params=lr_net.collect_params(), optimizer='adam', optimizer_params={'learning_rate': 0.001, 'wd' : 0.01}) loss = gluon.loss.SigmoidBinaryCrossEntropyLoss() accuracy = mx.metric.Accuracy() targ_labels = [] pred_labels = [] #Train, eval a model for each tsv for train_path in tqdm(glob.iglob(TRAIN_PATH)): features_train, targets_train, homograph, sentences = get_data(train_path) train_dataset = ArrayDataset(features_train, targets_train) train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=False) cum_train_loss = train_model(train_dataloader) test_path = train_path.replace("train", "eval") features_test, targets_test, homograph, sentences = get_data(test_path) val_dataset = ArrayDataset(features_test, targets_test) val_dataloader = DataLoader(val_dataset, batch_size=BATCH_SIZE, shuffle=True) for e in range(EPOCHS): avg_train_loss = train_model(train_dataloader) / TRAIN_DATA_SIZE cumulative_val_loss, targs_preds = validate_model(THRESHOLD, val_dataloader) avg_val_loss = cumulative_val_loss / VAL_DATA_SIZE hom_dict = WORDIDS_IDX_DICT[homograph] try: targ_labels.extend(hom_dict[int(i.asscalar())] for i in targs_preds[0]) pred_labels.extend(hom_dict[int(i.asscalar())] for i in targs_preds[1]) except: print(hom_dict) print(targs_preds) accuracy.reset()159it [4:21:43, 98.76s/it]Metricsprint("Accuracy") print(accuracy_score(targ_labels, pred_labels)) print("Balanced accuracy") print(balanced_accuracy_score(targ_labels, pred_labels))Accuracy 0.9831309904153355 Balanced accuracy 0.9575439891718961Study Topics We want to simulate the real-world situation where people in your profession circle make change for their career. In the last 6 mths there are around 10 people Assumptions Made 1. Assume the distribution of years of experience in a particular person's circle follows normal distribution with mean of the same yrs of experience of the person and regular stdev of 1.0.2. Assume that the average time for a person to switch job is around 3 yrs. So the distribution is a normal distribution follows normal distribution with mean as 5 and stdev as 1.0.3. In this study, we constrain the interested time window into 1 year or 12 months. 4. We assume the number of people a person know relatively intimately is around 50 people. 5. For people across different yrs of experience will at any time point will have 50% chance of looking for a new job.6. Assume the average preparation time is 6 month. 7. Assume the people who are looking and prepared have 35% chance to find the job they want and the other also want them. 8. Assume that the preparation could start at any time point in the year which implies a uniform distribution. **Question to answer** we are trying to figure out how many people will leave the circle based on the simulation. Questions 1. Years of experience in total or in one company? - in this case, I think we should just concern the years of experience in one company2. The possibility of change Simulationimport numpy as np import matplotlib.pyplot as plt from scipy import stats import seaborn as sns sns.set(color_codes = True) %matplotlib inline %config InlineBackend.figure_format = 'retina'Hyperparametersnum_pop = 50 avg_exp = 3 stdev_exp = 1.0 avg_ts = 3 stdev_ts = 1.0Simulate the yrs of experienceprint('number of people who are looking for a new role at any time point is %s' % len(exp_pop))number of people who are looking for a new role at any time point is 25Simulate the starting and the ending date of preparationsta_prep = np.random.uniform(low=1, high =12, size = int(num_pop/2)) sta_prep end_prep = sta_prep + np.random.normal(6,3,int(num_pop/2)) end_prep**For those who have the end date > 12 will be ruled out becasue they pass 1 yr boundary in this case.**# 1 year window size end_pop = [i for i in end_prep if i <= 18] len(end_pop) # first 6 month window size halfyr_end_pop = [i for i in end_prep if i <= 12] len(halfyr_end_pop)Ramdon sample these people years of experience in the companyexp_pop = np.random.normal(avg_exp,stdev_exp,len(end_pop)) exp_pop pop_w_3more_exp = [i for i in exp_pop if i >= 3] len(pop_w_3more_exp) pop_w_3less_exp = [i for i in exp_pop if i < 3] len(pop_w_3less_exp)A quick exampleFinite difference methods estimate derivatives of functions from point-evaluations of said function.The same is true for probabilistic finite differences.This set of notes explains the very basics of computing numerical derivatives with `probfindiff`. As a side quest, some basic design choices are explained.import jax.numpy as jnp from probfindiff import central, differentiate, forwardFirst-order derivativesAt the heart of `probfindiff`, there is the function `differentiate()`, and a set of finite difference schemes.For example, to differentiate a function with a central scheme, compute the following.scheme, xs = central(dx=0.01) dfx, _ = differentiate(jnp.sin(xs), scheme=scheme) print(dfx, jnp.cos(0.0))WARNING:absl:No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)The function `differentiate` acts on point-evaluations of a function on some grid-points.These points can be chosen by a user, but more often than not, they are coupled tightly to the scheme itself.print("xs =", xs) print() print("scheme =", scheme)xs = [-0.01 0. 0.01] scheme = FiniteDifferenceScheme(weights=DeviceArray([-5.0015533e+01, -6.9692903e-03, 5.0022503e+01], dtype=float32), covs_marginal=DeviceArray(-0.00038028, dtype=float32), order_derivative=DeviceArray(1, dtype=int32, weak_type=True))The function ``differentiate()`` is self is so simple and lightweight, you could in fact implement it yourself.dfx = jnp.sin(xs) @ scheme.weights print(dfx, jnp.cos(0.0))1.0003637 1.0The finite difference scheme expects that the array consists of function evaluations at a specific grid.This is important, because, for instance, smaller step-sizes imply different weights/coefficients, and different accuracy.The requirement of acting only on discretised functions is different to many existing finite difference implementations, which behave more like automatic differentiation (i.e., they act on the function _as a function_ and evaluate it internally).**Why?** This design choice is deliberate. In many applications, e.g. differential equations, the number of function evaluations counts. Depending on the implementation, some functions can also be batched efficiently, while others cannot.To make this transparent, `probfindiff` lets a user evaluate their functions themselves.It is therefore closer to `np.gradient` than to automatic differentiation.(There are also some other advantages regarding types, compilation, and vectorisation, but this is left for a different tutorial.) Higher-order derivativesIt is easy to compute higher-order derivatives by changing the scheme accordingly.scheme, xs = central(dx=0.01, order_derivative=2) d2fx, _ = differentiate(jnp.sin(xs), scheme=scheme) print(d2fx, -jnp.sin(0.0))-1.1569691e-06 -0.0Higher-order methodsTo increase the accuracy of the approximation, the method-order can be increased freely.scheme, xs = central(dx=0.02, order_method=4) dfx, _ = differentiate(jnp.sin(xs), scheme=scheme) print(dfx, jnp.cos(0.0))0.9998326 1.0Forward, central, and backward schemesWhile central schemes tend to be more accurate than forward and backward schemes, all three are available. For example, we can replace the central scheme with a forward schemescheme, xs = forward(dx=0.02) dfx, _ = differentiate(jnp.sin(xs), scheme=scheme) print(dfx, jnp.cos(0.0))1.0013572 1.0batched 1D mode in 3 separate goes does not work! (need work-around for maximum of 2^{27} entries in plan for 3D with 8x Hockney algorithm in double) http://stackoverflow.com/questions/26918101/1d-ffts-of-columns-and-rows-of-a-3d-matrix-in-cuda however, the following method might work for a work-around: - batch 2D x-y FFTs until plan is full, then launch next batch of x-y FFTs- launch len(y) many batches with length len(x) each of 1D z FFTs - check that transposing is not quicker for memory alignment for the last operation? 2Da2d = a3d[1].astype(np.complex128) nx = 32#1024#*4 ny = 16#1024#*4 a2d = gpuarray.zeros((ny, nx), dtype=np.complex128) + 1 y, x = map(gpuarray.to_gpu, np.meshgrid(np.arange(a2d.shape[0], dtype=np.float64), np.arange(a2d.shape[1], dtype=np.float64)) ) a2d *= cumath.sin(2*x) * cumath.sin(y) plt.contourf(np.real(a2d.get())) plt.colorbar() plan2d = fft.Plan(shape=a2d.shape, in_dtype=np.complex128, out_dtype=np.complex128) res = gpuarray.empty(a2d.shape, dtype=np.complex128) - 42. %%timeit fft.fft(a2d, res, plan2d) plt.imshow(np.abs(res.get()), interpolation='none', origin='lower') plt.colorbar()FFT along x direction (first)plan_1 = fft.Plan(shape=a2d.shape[1], in_dtype=np.complex128, out_dtype=np.complex128, batch=a2d.shape[0], inembed=np.asarray(a2d.shape), onembed=np.asarray(a2d.shape), istride=1, idist=a2d.shape[1], ostride=1, odist=a2d.shape[1]) res = gpuarray.empty(a2d.shape, dtype=np.complex128) - 42. %%timeit fft.fft(a2d, res, plan_1) context.synchronize() plt.imshow(np.abs(res.get()), interpolation='none', origin='lower') plt.colorbar() np_res = np.fft.fft(a2d.get(), axis=1) plt.imshow(np.abs(np_res), interpolation='none', origin='lower') plt.colorbar()alternative first FFT along y directionplan_1a = fft.Plan(shape=a2d.shape[0], in_dtype=np.complex128, out_dtype=np.complex128, batch=a2d.shape[1], inembed=np.asarray(a2d.shape), onembed=np.asarray(a2d.shape), istride=a2d.shape[1], idist=1, ostride=a2d.shape[1], odist=1) resa = gpuarray.empty(a2d.shape, dtype=np.complex128) - 42. fft.fft(a2d, resa, plan_1a) plt.imshow(np.abs(resa.get()), interpolation='none', origin='lower') plt.colorbar() np_res = np.fft.fft(a2d.get(), axis=0) plt.imshow(np.abs(np_res), interpolation='none', origin='lower') plt.colorbar() np.allclose(np_res, res.get())FFT along y directionplan_2 = fft.Plan(shape=a2d.shape[0], in_dtype=np.complex128, out_dtype=np.complex128, batch=a2d.shape[1], inembed=np.asarray(a2d.shape), onembed=np.asarray(a2d.shape), idist=1, istride=a2d.shape[1], odist=1, ostride=a2d.shape[1]) res2 = gpuarray.empty(a2d.shape, dtype=np.complex128) - 42. %%timeit fft.fft(res, res2, plan_2) plt.imshow(np.abs(res2.get()), interpolation='none', origin='lower') plt.colorbar() a2d_h = a2d.get() %%timeit np.fft.fft2(a2d_h) np_res = np.fft.fft2(a2d.get()) plt.imshow(np.abs(np_res), interpolation='none', origin='lower') plt.colorbar() np.allclose(res2.get(), np_res) fftw_1 = pyfftw.builders.fft(a2d_h, axis=0) fftw_2 = pyfftw.builders.fft(a2d_h, axis=1) %%timeit fftw_res = fftw_1(a2d_h) fftw_res = fftw_2(fftw_res) fftw_res = fftw(a2d.get()) np.allclose(fftw_res, np_res) pyfftw.interfaces.cache.enable() pyfftw.interfaces.cache.set_keepalive_time(30) %%timeit pyfftw.interfaces.numpy_fft.fft2(a2d_h) 25.1/1.171Da1d = a3d[1,1].astype(np.complex128) plt.plot(np.real(a1d.get())) plan = fft.Plan(shape=a1d.shape, in_dtype=np.complex128, out_dtype=np.complex128) res = gpuarray.zeros(a1d.shape, dtype=np.complex128) - 42. fft.fft(a1d, res, plan) plt.plot(np.abs(res.get())[:(a1d.shape[0]/2 + 1)]) plt.plot(np.abs(np.fft.fft(a1d.get()))[:(a1d.shape[0]/2 + 1)])Toy MC exampleThis script generates a kink plot similar to that seen in Fig 9d of http://arxiv.org/abs/0711.4008 .It is intended to illustrate the way that the mass of the invisible daughters of particleswhich have been pair-produced can (in principle) be determined by identifying the locationof kink in the boundary of the region of (chi,mt2)-space which is accessible by events.Specifically: the upper boundary of the filled region of the plot which the script generatesis seen to have a "kink" at the location (chi=50, mt2=100), showing that the generatedinvisible daughter particles had a mass of 50 and that the generated parent particles had a mass of 100.It should be noted that the actual kinematics modelled in the program below are not intendedto represent physical reality. For example: the factor 0.01 in the ran_three_direc() functionbreaks isotropy in a purely ad-hoc way and here is used to (very crudely) approximate thebias toward central production seen in supersymmetric events. The event generation processused is therefore not Lorentz Invariant and should therefore not be used for any seriouspurpose other than, as here, the pedagocial illustration of a kinematic property ofthe mt2 variable.def unif(): return numpy.random.uniform(-1.0, 1.0) def ran_three_direc(mag=1): while True: x, y, z = unif(), unif(), 0.01 * unif() psq = x ** 2 + y ** 2 + z ** 2 if 0.02 < psq < 1: # Protection against division by zero p = numpy.sqrt(psq) return mag * x / p, mag * y / p, mag * z / p def ran_four_mom(mass_squared, mag): x, y, z = ran_three_direc(mag=mag) return numpy.sqrt(mass_squared + mag ** 2), x, y, z def boost(bx, by, bz, four_vec): bsq = bx ** 2 + by ** 2 + bz ** 2 gam = 1.0 / numpy.sqrt(1.0 - bsq) thing = gam ** 2 / (1 + gam) # This is the same as (gam-1)/bsq but is safe in the limit bsq->0. v0, v1, v2, v3 = four_vec[0], four_vec[1], four_vec[2], four_vec[3] return ( gam * v0 + bx * gam * v1 + by * gam * v2 + bz * gam * v3, bx * gam * v0 + (1 + bx * bx * thing) * v1 + (0 + bx * by * thing) * v2 + (0 + bx * bz * thing) * v3, by * gam * v0 + (0 + by * bx * thing) * v1 + (1 + by * by * thing) * v2 + (0 + by * bz * thing) * v3, bz * gam * v0 + (0 + bz * bx * thing) * v1 + (0 + bz * by * thing) * v2 + (1 + bz * bz * thing) * v3, ) def decay(parent_four_mom, m_parent, mv, mi): E, px, py, pz = parent_four_mom bx, by, bz = px / E, py / E, pz / E # Velocity beta! u = ran_three_direc() # print("Unit ", u[0]**2 + u[1]**2 + u[2]**2) p = numpy.sqrt((m_parent - mi - mv) * (m_parent + mi - mv) * (m_parent - mi + mv) * (m_parent + mi + mv)) / ( 2 * m_parent) v = numpy.sqrt(mv ** 2 + p ** 2), +p * u[0], +p * u[1], +p * u[2] i = numpy.sqrt(mi ** 2 + p ** 2), -p * u[0], -p * u[1], -p * u[2] v_out = boost(bx, by, bz, v) i_out = boost(bx, by, bz, i) # print ("E comp ", v_out[0]+i_out[0], E) # print ("x comp ", v_out[1]+i_out[1], px) # print () return v_out, i_out def get_mc_data(n_samples: int, n_chi_bins: int): """ Get N data points to plot, where N = n_samples * n_chi_bins data points. Args: n_samples: The number of samples to generate with the toy MC. n_chi_bins: The number of bins in invisible particle mass to use. Returns: (N,), (N,) Two arrays, the first is chi, the second is MT2. """ m_parent1 = 100 m_parent2 = 100 m_vis1 = 10 m_vis2 = 10 m_invis = 50 chi_max = 100.0 # For every sample, we'll compute MT2 for this range of chi. chis = numpy.arange(n_chi_bins + 1) * chi_max / n_chi_bins data_chi = [] data_mt2 = [] for _ in range(n_samples): parent1 = ran_four_mom(m_parent1 ** 2, numpy.random.uniform(10, 200)) parent2 = ran_four_mom(m_parent2 ** 2, numpy.random.uniform(10, 200)) p1, i1 = decay(parent1, m_parent1, m_vis1, m_invis) p2, i2 = decay(parent2, m_parent2, m_vis2, m_invis) computed_mt2 = mt2( m_vis1, p1[0], p1[1], m_vis2, p2[0], p2[1], i1[0] + i2[0], i1[1] + i2[1], chis, chis) # Symmetric configuration -- same mass for both invisible particles. data_chi.append(chis) data_mt2.append(computed_mt2) # Re-arrange and plot data. data_chi = numpy.asarray(data_chi).ravel() data_mt2 = numpy.asarray(data_mt2).ravel() return data_chi, data_mt2 %%time n_samples = 10000 n_chi_bins = 200 # About 90% of the CPU time here is spent computing MT2 data_chi, data_mt2 = get_mc_data(n_samples, n_chi_bins) pyplot.figure(figsize=(9, 5)) pyplot.hist2d(data_chi, data_mt2, bins=(n_chi_bins + 1, 200), cmap="jet", cmin=1, norm=matplotlib.colors.LogNorm()) pyplot.colorbar().set_label("# events / bin") pyplot.title("$M_{T2}$ kink") pyplot.xlabel(r"$\chi$") pyplot.ylabel(r"$M_{T2}(\chi)$") pyplot.tight_layout() pyplot.show()Frequently Asked Questions (FAQs)Have a question? Try searching the FAQs! How do I install MASSpy?There are several ways to install MASSpy. To use ``pip`` to [install MASSpy from PyPI](https://pypi.python.org/pypi/masspy)```pythonpip install masspy```Check out the [Quick Start Guide](../installation/quickstart.rst) to learn more about getting started! How do I cite MASSpy?A manuscript is in preparation for publication and will be the proper reference for citing the **MASSpy** software package in the future. In the meantime, feel free to cite the preprint MASSpy: Building, simulating, and visualizing dynamic biological models in Python using mass action kinetics, which can be found at [bioRxiv](https://www.biorxiv.org/content/10.1101/2020.07.31.230334v1). How do I change the rate expression for a reaction?Use the `MassModel.add_custom_rate()` method.import mass.test model = mass.test.create_test_model("textbook")Using license file /Users/zhaiman/opt/licenses/gurobi.lic Academic license - for non-commercial use onlyWhen metabolites are added to reactions, **MASSpy** will generates rate expressions automatically based on mass action kinetics and the kinetic reversibility given by the `MassReaction.reversible` attribute.print(model.reactions.PGI.rate)kf_PGI*(g6p_c(t) - f6p_c(t)/Keq_PGI)If a reaction is associated with a model, a custom rate expression can be set using the `MassModel.add_custom_rate()` method. The `add_custom_rate()` method requires the corresponding reaction object and a string representing the custom rate expression to set. For example, to set a simple Michaelis Menten rate expression with $V_{max}$ and $K_{m}$ parameters:custom_parameter_dict = {"vmax_PGI": None, "Km_PGI": None} model.add_custom_rate( model.reactions.PGI, custom_rate="(vmax_PGI * g6p_c)/(Km_PGI + g6p_c)", custom_parameters=custom_parameter_dict) print(model.reactions.PGI.rate)vmax_PGI*g6p_c(t)/(Km_PGI + g6p_c(t))The reaction rate expression is converted from a string to a symbolic expression using the `sympy.sympify()` function. All metabolites and standard reaction parameters (i.e. returned by the `MassReaction.all_parameter_ids`), and boundary conditions are recognized. However, all additional parameters must be set as a custom parameter in the `MassModel.custom_parameters` attribute.print("Recognized Parameters: {!r}".format(model.reactions.PGI.all_parameter_ids)) print("Custom Parameters: {!r}".format(list(custom_parameter_dict)))Recognized Parameters: ['kf_PGI', 'Keq_PGI', 'kr_PGI', 'v_PGI'] Custom Parameters: ['vmax_PGI', 'Km_PGI']Compile MRIQC metrics into TSV filesThis should do the same thing as an MRIQC call at the group level, but without having to use Singularity.import json import os.path as op from glob import glob import pandas as pd def load_json(f): with open(f, "r") as fo: data = json.load(fo) return data base_dir = "/home/data/nbc/misc-projects/Salo_PowerReplication/" sub_dir = "derivatives/mriqc/" dsets = ["dset-dupre", "dset-cambridge", "dset-camcan", "dset-dalenberg", "dset-cohen"] modalities = ["anat", "func"] for dset in dsets: print(f"Processing {dset}") data_dict = {} dset_dir = op.join(base_dir, dset) deriv_dir = op.join(dset_dir, sub_dir) sub_folders = sorted(glob(op.join(deriv_dir, "sub-*"))) sub_folders = [sf for sf in sub_folders if op.isdir(sf)] for sub_folder in sub_folders: sub_id = op.basename(sub_folder) for mod in modalities: mod_folder = op.join(sub_folder, mod) jsons = sorted(glob(op.join(mod_folder, "*.json"))) for json_file in jsons: json_mod = "_".join(op.basename(json_file).split("_")[1:]).split(".")[0] if json_mod not in data_dict.keys(): data_dict[json_mod] = pd.DataFrame() json_data = load_json(json_file) json_data = {k: v for k, v in json_data.items() if not isinstance(v, dict)} temp_df = pd.DataFrame(json_data, index=[sub_id]) data_dict[json_mod] = data_dict[json_mod].append(temp_df) for out_name, df in data_dict.items(): out_file = op.join(deriv_dir, out_name + ".tsv") df.to_csv(out_file, sep="\t", index_label="participant_id")Processing dset-dupre Processing dset-cambridge Processing dset-camcan Processing dset-dalenberg Processing dset-cohenAbout Notebook- [**Kaggle Housing Dataset**](https://www.kaggle.com/ananthreddy/housing)- Implement linear regression using: 1. **Batch** Gradient Descent 2. **Stochastic** Gradient Descent 3. **Mini-batch** Gradient Descent **Note**: _Trying to implement using **PyTorch** instead of numpy_import pandas as pd from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler import torch def banner(msg, _verbose=1): if not _verbose: return print("-"*80) print(msg.upper()) print("-"*80)Data import and preprocessingdf = pd.read_csv('Housing.csv', index_col=0) def convert_to_binary(string): return int('yes' in string) for col in df.columns: if df[col].dtype == 'object': df[col] = df[col].apply(convert_to_binary) data = df.values scaler = StandardScaler() data = scaler.fit_transform(data) X = data[:, 1:] y = data[:, 0] print("X: ", X.shape) print("y: ", y.shape) X_train, X_valid, y_train, y_valid = map(torch.from_numpy, train_test_split(X, y, test_size=0.2)) print("X_train: ", X_train.shape) print("y_train: ", y_train.shape) print("X_valid: ", X_valid.shape) print("y_valid: ", y_valid.shape) class LinearRegression: def __init__(self, X_train, y_train, X_valid, y_valid): self.X_train = X_train self.y_train = y_train self.X_valid = X_valid self.y_valid = y_valid self.Theta = torch.randn((X_train.shape[1]+1)).type(type(X_train)) def _add_bias(self, tensor): bias = torch.ones((tensor.shape[0], 1)).type(type(tensor)) return torch.cat((bias, tensor), 1) def _forward(self, tensor): return torch.matmul( self._add_bias(tensor), self.Theta ).view(-1) def forward(self, train=True): if train: return self._forward(self.X_train) else: return self._forward(self.X_valid) def _cost(self, X, y): y_hat = self._forward(X) mse = torch.sum(torch.pow(y_hat - y, 2))/2/X.shape[0] return mse def cost(self, train=True): if train: return self._cost(self.X_train, self.y_train) else: return self._cost(self.X_valid, self.y_valid) def batch_update_vectorized(self): m, _ = self.X_train.size() return torch.matmul( self._add_bias(self.X_train).transpose(0, 1), (self.forward() - self.y_train) ) / m def batch_update_iterative(self): m, _ = self.X_train.size() update_theta = None X = self._add_bias(self.X_train) for i in range(m): if type(update_theta) == torch.DoubleTensor: update_theta += (self._forward(self.X_train[i].view(1, -1)) - self.y_train[i]) * X[i] else: update_theta = (self._forward(self.X_train[i].view(1, -1)) - self.y_train[i]) * X[i] return update_theta/m def batch_train(self, tolerance=0.01, alpha=0.01): converged = False prev_cost = self.cost() init_cost = prev_cost num_epochs = 0 while not converged: self.Theta = self.Theta - alpha * self.batch_update_vectorized() cost = self.cost() if (prev_cost - cost) < tolerance: converged = True prev_cost = cost num_epochs += 1 banner("Batch") print("\tepochs: ", num_epochs) print("\tcost before optim: ", init_cost) print("\tcost after optim: ", cost) print("\ttolerance: ", tolerance) print("\talpha: ", alpha) def stochastic_train(self, tolerance=0.01, alpha=0.01): converged = False m, _ = self.X_train.size() X = self._add_bias(self.X_train) init_cost = self.cost() num_epochs=0 while not converged: prev_cost = self.cost() for i in range(m): self.Theta = self.Theta - alpha * (self._forward(self.X_train[i].view(1, -1)) - self.y_train[i]) * X[i] cost = self.cost() if prev_cost-cost < tolerance: converged=True num_epochs += 1 banner("Stochastic") print("\tepochs: ", num_epochs) print("\tcost before optim: ", init_cost) print("\tcost after optim: ", cost) print("\ttolerance: ", tolerance) print("\talpha: ", alpha) def mini_batch_train(self, tolerance=0.01, alpha=0.01, batch_size=8): converged = False m, _ = self.X_train.size() X = self._add_bias(self.X_train) init_cost = self.cost() num_epochs=0 while not converged: prev_cost = self.cost() for i in range(0, m, batch_size): self.Theta = self.Theta - alpha / batch_size * torch.matmul( X[i:i+batch_size].transpose(0, 1), self._forward(self.X_train[i: i+batch_size]) - self.y_train[i: i+batch_size] ) cost = self.cost() if prev_cost-cost < tolerance: converged=True num_epochs += 1 banner("Stochastic") print("\tepochs: ", num_epochs) print("\tcost before optim: ", init_cost) print("\tcost after optim: ", cost) print("\ttolerance: ", tolerance) print("\talpha: ", alpha) %%time l = LinearRegression(X_train, y_train, X_valid, y_valid) l.mini_batch_train() %%time l = LinearRegression(X_train, y_train, X_valid, y_valid) l.stochastic_train() %%time l = LinearRegression(X_train, y_train, X_valid, y_valid) l.batch_train()Linear RegressionIn statistics, linear regression is a linear approach to modeling the relationship between a scalar output label (or dependent variable) and one or more explanatory variables (or features/attributes). The case of one explanatory variable is called a simple linear regression.We will understand it with a practice example, Steps we are going to follow are :1. Importing important libraries2. Importing the data3. Feature selection4. Data visualization5. Finding weight (slope) and bias (intercept) of the fitting line for single feature variable6. Visualizing the fitted line7. Finding weight (slope) and bias (intercept) of the fitting line for multiple feature variables8. Comparing fitted line obtained from scikit-learn model (A machine learning library) **1. Importing important libraries**# IMporting importatnt libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt # Importing model from sklearn.linear_model import LinearRegression**2. Importing Data.** Importing the required data for the prediction of sales is taken from kaggle.Link to the dataset --> https://www.kaggle.com/sazid28/advertising.csv#read the file and store as dataframe data = pd.read_csv('Advertising.csv', index_col=[0]) # Let's see the top five rows of the dataset data.head()**3. Feature Selection.**# Columns we have data.columns # First we will implement a single variable linear regression model # Let's take Tv as our variable to find the sales price_data=data[['TV', 'sales']] price_data.head()**4. Data Visualization.**Before we start with the linear regression, we should check if our dependent and independent variables are linearly related or not. We can check it on scatter plot:#Let's distribute our house price data set into X(Feature variable, area of the house) and y(the price of the house) X=price_data['TV'] y=price_data['sales'] # Let's plot it and try to visualize the data # Plot blank figure with the axis figure, ax =plt.subplots() # Plot a scatter plot for feature ax.scatter(X, y, color='green') # Set titles and axis labels ax.set_title('Effect of advertisement on sales') ax.set_xlabel('Advertisement on TV') ax.set_ylabel('sales') # Set legends ax.legend(labels=['(TV, sales)']);From the above graph, we can see that feature variable is linearly related with dependent variable **5. Finding weight(slope) and constant(intercept) of the fitting line.** To find the fitting line `y=w*X + b` we have to find out the value of `w`( the slope of the line) and `b` (intercept of the line) by the Least Square fitting method: ![title](least_square_fitting.jpg) Where: * `x bar` = mean of X * `y bar` = mean of y# finding X_mean and y_mean X_mean = X.mean() y_mean = y.mean() # finding th value of m and c denominator=0 numerator=0 for i in range(1, len(X)+1): numerator+=(X[i] - X_mean)*(y[i] - y_mean) denominator+=(X[i] - X_mean)**2 w = numerator/denominator b = y_mean - X_mean * w print(f'The value of slope(w): {w*1:.2f}') # value of slope(m) upto two decimal place print(f'The value of intercept(b): {b:.2f}') # value of intercept(c) upto two decimal placeThe value of slope(w): 0.05 The value of intercept(b): 7.03**6. Visualizing fitting line.**# Let's draw the fitting line into the House pricing dataset. # Make a figure with axis figure1, ax =plt.subplots() # Plot feature ax.scatter(X, y, color='red') # Set title and axis labels ax.set_title('Effect of advertisement on sales') ax.set_xlabel('Advertisement on TV') ax.set_ylabel('sales') # Plot fitting line y_axis = w* X + b ax.plot(X, y_axis) # Set Legends ax.legend(labels=['fitting line', 'scatter plot']);**7. Finding weights and constants for multi-variable Linear regression problem using vector formulation**#importing data data1=data[['TV', 'radio', 'newspaper']] # including a column of one to find the value of intercept simultaneously data1['ones']=pd.DataFrame([1]*(len(data)+1)); #data1.head() # converting into array form to do matrix multiplication to find out weights using the vector method. X=np.array(data1) y=np.array(data['sales'])Mathemtical formulation for finding out weights for the 3 features is:![title](images.png)and then the prediction can be calculated as![title](images2.png)# vector multiplication to find weight vector w = np.linalg.inv(np.transpose(X).dot(X)).dot(np.transpose(X)).dot(np.transpose(y)) #print(w) #print(X) print(f'weight(w) vector for our multi feature variables is {w[1:]}') print(f'intercept(b) vector for our multi feature variables is {w[0]:.2f}') # Predicting y from the calculated weight and intercept y_pred1 = X.dot(np.transpose(w)) y_pred1[:5]you cn find about R2 score from this website:https://www.britannica.com/science/coefficient-of-determination# finding the accuracy from sklearn.metrics import r2_score print(f'accuracy obtained is {r2_score(y, y_pred1)*100:.2f}%')accuracy obtained is 89.72%**8. comparing fitting line obtained from scikit-learn model (A machine learning library)****Linear regression with single feature variable**# setting a model model1 = LinearRegression() X=data['TV'] #print(X[:5]) # reshaped 1-D 'X' X = np.array(X) #print(X[:5]) X = X.reshape(-1, 1) #print(X[:5]) # Fitting the data into model model1.fit(X, y) #Evaluate our model print(f'Accuracy we obtain from the model is {model1.score(X, y)*100:.2f}%') # predicting y value y_pred2 = model1.predict(X) # plot our predicted price we getting as y_pred with old x we should get the same plot figure2, ax = plt.subplots() ax.scatter(X, y, color='red') ax.plot(X, y_pred2) ax.set_xlabel('No. of Advertisement on TV') ax.set_ylabel('sales') ax.legend(labels=['fitting line', 'scatter plot']);Accuracy we obtain from the model is 61.19%**Multi-variable linear regression using scikit-learn library**Here we are going to do a practice problem on multivariate features using machine learning libraries:# obtaining data sales_data = pd.read_csv('Advertising.csv', index_col=[0]) sales_data.head() sales_data.columns # Now we take Area(SqFt), Number of bedroom(Bedrooms) and Number of bathrooms(Bathrooms) as our feature varibales feature_variable = sales_data[['TV', 'radio', 'newspaper']] # Let's see the distribution of the feature variable though histogram plot. print('1. Distribution of the feature variables:') feature_variable.hist() plt.show() # Price of the house sales = sales_data[['sales']] # We set our X and y variable X=feature_variable y=sales #set a particular random variable so that answer obtained can be the same for everyone and everytime np.random.seed(42) # spliting the data into train data and test data from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2) # fitting our data into model from sklearn.linear_model import LinearRegression model2=LinearRegression() model2.fit(X_train, y_train) # Evaluating model print(f'2. Accuracy of the Linear regression model: {model2.score(X_test, y_test)*100:.2f}%' )1. Distribution of the feature variables:Install tree-sitter parser & clang1) Install tree-sitter parser- ```cd evaluation/CodeBLEU/parser```- ```bash build.sh```2) Install clang- goto `data/github/preprocessing/src/code_tokenizer.py` and replace path with your `clang` path- **NOTE:** You may try to copy `clang` from `/tmp/clang`, but i'm not sure it will work Preprocessing pipeline PLBART Tokenization/detokenization- **tokenization**: Code -> Tokens (a.k.a. python tokenizer) -> SPM.- so we need to do detokenization: SPM -> Tokens -> syntax highliting -> Printed Codefrom itertools import islice import subprocess from pygments import highlight from pygments.lexers import Python3Lexer from pygments.formatters import HtmlFormatter, TerminalFormatter, TerminalTrueColorFormatter, Terminal256Formatter from IPython.core.display import HTML, display import data.github.preprocessing.src.code_tokenizer as code_tokenizer def peak(path: str, n=10) -> str: """reads first n lines from file""" with open(path, 'r') as f: head = list(islice(f, n)) return "".join(head) def detok(code, lang='python'): detokenize = getattr(code_tokenizer, f"detokenize_{lang}") return detokenize(code) def tokenize(code, lang='python'): detokenize = getattr(code_tokenizer, f"tokenize_{lang}") return detokenize(code) def colored(code: str, formatter=Terminal256Formatter) -> str: return highlight(code, Python3Lexer(), formatter()) def remove_bpe(s: str): return s.replace(" ", "").replace("▁", " ").lstrip(' ')How to detokenize- tokenizer/detokenizer is implemented in `PLBART/data/github/preprocessing`- you may also use `tokenize` function# raw BPE tokens, brr... print(peak(path_to_spm_file, 20)) # nice code! print(colored(detok( remove_bpe(peak(path_to_spm_file, 20)) ))) tree_sitter_path=f"{HOME}/evaluation/CodeBLEU/parser/my-languages.so" !ls $tree_sitter_path from tree_sitter import Language, Parser, Tree PY_LANGUAGE = Language(tree_sitter_path, 'python') JAVA_LANGUAGE = Language(tree_sitter_path, 'java') def traverse_tree(tree: Tree): cursor = tree.walk() reached_root = False while reached_root == False: yield cursor.node if cursor.goto_first_child(): continue if cursor.goto_next_sibling(): continue retracing = True while retracing: if not cursor.goto_parent(): retracing = False reached_root = True if cursor.goto_next_sibling(): retracing = False class TreeSitterCode: """ stores 1) the source code in bytes 2) the tree allows to traverse nodes in the AST tree, replace values. updates both with `replace` method (tool to change identifiers) """ def __init__(self, code: str, parser: Parser): self.code_bytes = bytes( code, "utf-8" ) self.parser = parser self.tree = self.parser.parse(self.code_bytes) @property def code(self): return self.code_bytes.decode() def traverse(self): for item in traverse_tree(self.tree): yield item def identifiers(self): def is_identifier(node): return node.type=='identifier' for node in self.traverse(): if is_identifier(node): yield node def get_value(self, node): return self.code_bytes[node.start_byte: node.end_byte] def inspect(self): print("="*80) for it in code_tree.traverse(): print(it.type, '\t', code_tree.get_value(it), '\t', it.start_point, '\t', it.end_point, '\t', it.start_byte, '\t', it.end_byte) def replace(self, node: Tree, new_value: bytes): assert not b'\n' in new_value start_byte_old, end_byte_old = node.start_byte, node.end_byte length = len(new_value) self.code_bytes = self.code_bytes[:node.start_byte] + \ new_value + \ self.code_bytes[node.end_byte:] assert node.end_point[0] == node.start_point[0], "we should edit one line" # print(start_byte_old, end_byte_old, start_byte_old + length, node.start_point, node.end_point, (node.start_point[0], node.start_point[1] + length)) self.tree.edit( start_byte=start_byte_old, old_end_byte=end_byte_old, new_end_byte=start_byte_old + length, start_point=node.start_point, old_end_point=node.end_point, new_end_point=(node.start_point[0], node.start_point[1] + length), ) self.tree = self.parser.parse(self.code_bytes, self.tree) # self.tree = self.parser.parse(self.code_bytes) code_example =""" my_file = "input.txt" with open(my_file, "r") as f: print(f.readlines()) """ parser = Parser() parser.set_language(PY_LANGUAGE) code_tree = TreeSitterCode(code_example, parser) print("") print(colored(code_tree.code)) for identifier in code_tree.identifiers(): value = code_tree.get_value(identifier) if value == b'f': code_tree.replace(identifier, b'my_file') # insert bug break print("") print(colored(code_tree.code)) # code_tree.inspect() for identifier in code_tree.identifiers(): value = code_tree.get_value(identifier) if value == b'f': code_tree.replace(identifier, b'my_file') # insert bug break print("") print(colored(code_tree.code)) # code_tree.inspect() my_file = "input.txt" with open(my_file, "r") as f: print(f.readlines()) my_file = "input.txt" with open(my_file, "r") as my_file: print(f.readlines()) my_file = "input.txt" with open(my_file, "r") as my_file: print(my_file.readlines())Get data# dataset fpaths = [] labels = [] boy_fpaths = [] boy_labels = [] word_spoken = [] dataset='new' dataset = '84{}'.format(dataset) input_folder = '..\data\{}'.format(dataset) # pars the input directory that contains audio files # get audio files and their lables for f in os.listdir(input_folder): for w in os.listdir(input_folder+'\\'+ f): # check wheter files is wav or not if (w.find('wav')!=-1): fpaths.append(input_folder+'\\'+f+'\\'+w) labels.append(f) # idx_schunk=w.find('schunk') # idx_chunk=w.find('chunk') # if (idx_chunk!=idx_schunk==-1): # if (int(w[idx_chunk-2:idx_chunk])<80): # fpaths.append(input_folder+'\\'+f+'\\'+w) # labels.append(f) # else: # boy_fpaths.append(input_folder+'\\'+f+'\\'+w) # boy_labels.append(f) # else: # boy_fpaths.append(input_folder+'\\'+f+'\\'+w) # boy_labels.append(f) if f not in word_spoken: word_spoken.append(f) print("Spoken words: "+ str(word_spoken)) print("Number of classes: " + str(len(word_spoken))) # size of dataset print(len(labels)) label_set=list(set(labels)) label_set.sort() # # boys # boy_label_set=list(set(boy_labels)) # boy_label_set.sort() label_set len(label_set)Extracting frequeny domain featuresAt the second stage we convet a signal into the freqency domain. In monst modern speech recognitoon freqeency-domain features are used as key component. In case of multispeakers MFFC feature extraction works best. After convert a signal into a freq domain, it's requered to convert it into a useable form. **Mel Frequency Cepstral Coefficients (MFCC)** is a good way to do that. *MFCC* takes the power spectrum of a signal and then uses a combination of filter banks and disrete cosinetransform to extract pattern of phones or features.After extracting **MFFC** features we exract data into single data matrix, and a label vector with the correct label for eac data file is ceated.from scipy.io import wavfile def get_mfcc(fpaths): data = [] mfcc_max_length = 0 # first file desitination name and index file_name = '' word_spoken_index = 0 count=0; for n,file in enumerate(fpaths): # show current desintation if (file.find(file_name)<=0): file_name=word_spoken[word_spoken_index] print(word_spoken[word_spoken_index]) word_spoken_index+=1 # read file sampling_freq, audio = wavfile.read(file) # Extract MFCC features mfcc_features = mfcc(audio, sampling_freq) mfcc_len=mfcc_features.shape[0]*mfcc_features.shape[1] # get length of largets feature array if mfcc_len>mfcc_max_length: mfcc_max_length=mfcc_len # flat data into 2D array mfcc_features=np.resize(mfcc_features,(1,mfcc_len)) data.insert(n,mfcc_features) return data, mfcc_max_length data, max_len=get_mfcc(fpaths) # boy_data=get_mfcc(boy_fpaths) print("Lenght Longest Audio Array: " + str(max_len))Lenght Longest Audio Array: 4914Get Labelsdef get_labels(labels, label_set): label_idx_dic={} # dict for storing labels #Each sample file is one row in data, and has one entry in labels print('Number of files total:', len(labels)) all_labels = np.zeros(len(labels),dtype=int) for n, l in enumerate(label_set): label_idx_dic[n]=l all_labels[np.array([i for i, _ in enumerate(labels) if _ == l])] = n print('Labels and label indices', all_labels) return label_idx_dic, all_labels label_dic, all_label = get_labels(labels,label_set) # boy_label_dic, boy_all_label = get_labels(boy_labels,boy_label_set) # save label dictionary def save_dict_to_file(dic,dataset): with open('dict{}.txt'.format(dataset),'w') as file: file.write(json.dumps(dic)) # save dict save_dict_to_file(label_dic,dataset)Pad Zeros to Small Arraysdef data_x_y(data,all_labels): x_data = np.zeros((len(data),2808+1), dtype=float) for i,_d in enumerate(data): if (_d.shape[1]>2808): x_data[i,1:]=_d[0,0:2808] else : x_data[i,1:_d.shape[1]+1]=_d x_data[i,0]=all_labels[i] return x_dataSAVE DATASETfile_name="84new" # save all features data np.savetxt('data/{}.csv'.format(file_name), data_x_y(data,all_label), delimiter=',',header=data_header) # np.savetxt('data/{}/data41{}.csv'.format(save_path,'ADA_boy'), data_x_y(boy_data,boy_all_label), delimiter=',',header=data_header) # np.savetxt('data/{}/data41{}.csv'.format(save_path,'ADA_girl'), data_x_y(data,all_label), delimiter=',',header=data_header)GENERATE MIX NEW DATASETfeatures_df = pd.read_csv('data/data41.csv') features_df_1 = pd.read_csv('data/data41ADA.csv') features_df_2 = pd.read_csv('data/new_test/data41new_test.csv') features_df_3 = pd.read_csv('data/new_test/data41new_test2.csv') x_data = np.vstack((features_df, features_df_1,features_df_2,features_df_3)) print("normal dataset shape: {}".format(features_df.shape)) print("ada dataset shape: {}".format(features_df_1.shape)) print("ada dataset shape: {}".format(features_df_2.shape)) print("ada dataset shape: {}".format(features_df_3.shape)) print("new mix dataset shape: {}".format(x_data.shape)) save_path="" np.savetxt('data/{}/data41{}.csv'.format(save_path,'mix2'), x_data, delimiter=',',header=data_header)SPLIT TEST AND TRAIN DATAsave_path="84new" x_data = pd.read_csv('data/84new.csv') x_data.tail(5) def create_test_train(x_data, save_path, data_header): from sklearn.model_selection import train_test_split X_train, X_test = train_test_split( x_data, test_size=0.15, random_state=42, shuffle=True) print('Size of training matrix:', X_train.shape) print('Size of testing matrix:', X_test.shape) # test data np.savetxt('data/{}/{}.csv'.format(save_path,save_path+'_train'), X_train, delimiter=',', header=data_header) np.savetxt('data/{}/{}.csv'.format(save_path,save_path+'_test'), X_test, delimiter=',', header=data_header) create_test_train(x_data, save_path, data_header)Size of training matrix: (32811, 2809) Size of testing matrix: (5791, 2809)Figure 1f, axes = plt.subplots(nrows=2, ncols=6, figsize=(30,10)) for i in range(4): data = pickle.load(open('teaser_data/left_test_iter=651_step{}.pkl'.format(i), 'rb')) data['binarized_mask'][56:72, 56:72] = 2 axes[0, i].imshow(np.log(data['kspace']), cmap='gray') axes[0, i].imshow(data['binarized_mask'], cmap=cmap, alpha=0.4) axes[0, i].get_xaxis().set_ticks([]) axes[0, i].get_yaxis().set_ticks([]) if i == 3: axes[0, 4].imshow(data['recon'], cmap='gray') axes[0, 4].axis('off') axes[0, 5].imshow(data['input'], cmap='gray') axes[0, 5].axis('off') axes[0, 0].set_ylabel('Left Rotated', fontsize=25, labelpad=20) axes[0, 1].set_title(r'${}$ 8 $\times$ Subsampled $\kappa$-space'.format('\qquad'*7), fontsize=25, pad=20) axes[0, 4].set_title('Final Reconstruction', fontsize=25, pad=20) axes[0, 5].set_title('Ground Truth', fontsize=25, pad=20) axes[0, 3].arrow(64, 64, 18, -6, width=2, fc='cyan', ec='cyan', head_length=5, alpha=1) for i in range(4): data = pickle.load(open('teaser_data/right_test_iter=651_step{}.pkl'.format(i), 'rb')) data['binarized_mask'][56:72, 56:72] = 2 axes[1, i].imshow(np.log(data['kspace']), cmap='gray') axes[1, i].imshow(data['binarized_mask'], cmap=cmap, alpha=0.4) axes[1, i].get_xaxis().set_ticks([]) axes[1, i].get_yaxis().set_ticks([]) axes[1, i].set_xlabel('Step {}'.format(i), fontsize=25, labelpad=20) if i == 3: axes[1, 4].imshow(data['recon'], cmap='gray') axes[1, 4].axis('off') axes[1, 5].imshow(data['input'], cmap='gray') axes[1, 5].axis('off') axes[1, 3].arrow(64, 64, 18, 4, width=2, fc='cyan', ec='cyan', head_length=5, alpha=1) axes[1, 0].set_ylabel('Right Rotated', fontsize=25, labelpad=20) plt.tight_layout() plt.show()Figure 5def plot_scatter(ax, seq0_files, seq4_files, loupe_files, which): show = 1000 seq0_ssims = load_ssims(*seq0_files) seq4_ssims = load_ssims(*seq4_files) loupe_ssims = load_ssims(*loupe_files) indices = np.argsort(seq0_ssims) loupe = ax.scatter(range(show), loupe_ssims[indices[-show:]], s=4, c=golden) seq4 = ax.scatter(range(show), seq4_ssims[indices[-show:]], s=4, c=blue) seq0 = ax.scatter(range(show), seq0_ssims[indices[-show:]], s=4, c=red) ax.set_ylim((88, 100)) # set the xlim to left, right ax.set_title('{} Acceleration Factor'.format(which), fontsize=25, pad=20) ax.get_xaxis().set_ticks([]) ax.get_yaxis().set_ticks([]) if which == '4x': ax.legend((loupe, seq4, seq0), ('LOUPE', 'Non-sequential', '4-step Sequential'), scatterpoints=1, loc='lower right', fontsize=25, markerscale=10, frameon=False) ax.set_ylabel('SSIM', fontsize=25, labelpad=20) ax.set_yticks(range(88, 102, 2)) ax.tick_params(axis='y', which='major', labelsize=25) elif which == '8x': ax.set_xlabel('Targets Sorted by the Non-sequential Performance Within Each Acceleration Factor', fontsize=25, labelpad=20) f, axes = plt.subplots(nrows=1, ncols=3, figsize=(24,6)) plot_scatter(axes[0], fx_seq0_files, fx_seq4_files, fx_loupe_files, '4x') plot_scatter(axes[1], ex_seq0_files, ex_seq4_files, ex_loupe_files, '8x') plot_scatter(axes[2], sx_seq0_files, sx_seq4_files, sx_loupe_files, '16x') plt.tight_layout() plt.show()Figure 6mean_4x, std_4x = calc_avg_for_fig6([fx_loupe_files, fx_seq0_files, fx_seq1_files, fx_seq2_files, fx_seq4_files]) mean_8x, std_8x = calc_avg_for_fig6([ex_loupe_files, ex_seq0_files, ex_seq1_files, ex_seq2_files, ex_seq4_files]) mean_16x, std_16x = calc_avg_for_fig6([sx_loupe_files, sx_seq0_files, sx_seq1_files, sx_seq2_files, sx_seq4_files]) mean_4x_lc, std_4x_lc = calc_avg_for_fig6([lc_loupe_files, lc_seq0_files, lc_seq1_files, lc_seq2_files, lc_seq4_files]) mean_4x = [92.44, 92.56, 92.66, 92.74, 92.91] std_4x = [0.01, 0.02, 0.06, 0.00, 0.01] mean_8x = [90.60, 90.66, 90.73, 90.95, 91.07] std_8x = [0.03, 0.04, 0.03, 0.04, 0.02] mean_16x = [88.73, 88.67, 88.82, 88.88, 89.10] std_16x = [0.04, 0.11, 0.03, 0.08, 0.03] mean_4x_lc = [89.52, 90.89, 90.84, 91.20, 91.08] std_4x_lc = [0.02, 0.01, 0.16, 0.05, 0.09] x = np.arange(5) plt.figure(figsize=(8,5)) plt.ylim([88, 93.5]) plt.axhline(89.52, alpha=0.5, color=gray) plt.axhline(92.44, alpha=0.5, color=gray) plt.axhline(90.60, alpha=0.5, color=gray) plt.axhline(88.73, alpha=0.5, color=gray) plt.errorbar(x, mean_4x_lc, std_4x_lc, ls='--', lw=3, elinewidth=3, markeredgewidth=3, capsize=5, color=blue) plt.errorbar(x, mean_4x, std_4x, lw=3, elinewidth=3, markeredgewidth=3, capsize=5, color=blue) plt.errorbar(x, mean_8x, std_8x, lw=3, elinewidth=3, markeredgewidth=3, capsize=5, color=red) plt.errorbar(x, mean_16x, std_16x, lw=3, elinewidth=3, markeredgewidth=3, capsize=5, color=golden) line1 = plt.errorbar(x-6, mean_4x, std_4x, lw=3, elinewidth=3, markeredgewidth=3, capsize=5, color='black') line2 = plt.errorbar(x-6, mean_4x_lc, std_4x_lc, ls='--', lw=3, elinewidth=3, markeredgewidth=3, capsize=5, color='black') fx = plt.scatter(-6, 0, marker='s', edgecolors='black', s=2, c=blue) ex = plt.scatter(-6, 0, marker='s', edgecolors='black', s=2, c=red) sx = plt.scatter(-6, 0, marker='s', edgecolors='black', s=2, c=golden) plt.xlim([-1., 5]) plt.xticks(x, ['LOUPE', 'Non-Seq.', '1-Step Seq.', '2-Step Seq.', '4-Step Seq.'], rotation=20) plt.xticks(fontsize=15) plt.yticks(fontsize=15) plt.ylabel('SSIM', fontsize=15, labelpad=10) data_list = [mean_4x_lc, mean_4x, mean_8x, mean_16x] for i, data in enumerate(data_list): for j, entry in enumerate(data): if i == 0: if j == 0 or j == 2: plt.text(j-0.2, entry+0.28,'{}'.format(format(entry, '.2f'))) continue if i == 2: if j != 0: plt.text(j-0.2, entry-0.35,'{}'.format(format(entry, '.2f'))) continue plt.text(j-0.2, entry+0.2,'{}'.format(format(entry, '.2f'))) plt.legend((line1, line2, fx, ex, sx), ('2D', '1D', r'4$\times$', r'8$\times$', r'16$\times$'), scatterpoints=1, loc=[0.9,0.42], fontsize=15, markerscale=10, facecolor='white', edgecolor='white', framealpha=1, labelspacing=0.8) plt.gca().spines['top'].set_visible(False) plt.gca().spines['right'].set_visible(False) plt.show()Figure 7def plot_histograms(ax, alg1_files, alg2_files, which): low = -0.5 high = 1.5 step = 0.05 edges = np.arange(low, step+high, step); alg1_ssims = load_ssims(*alg1_files) alg2_ssims = load_ssims(*alg2_files) ratio_mean, ratio_std = calc_ratios_for_fig7(*alg1_files, *alg2_files) alg1_over_alg2 = alg1_ssims - alg2_ssims minimum = alg1_over_alg2.min() maximum = alg1_over_alg2.max() alg1_over_alg2 = np.clip(alg1_over_alg2, low, high) ax.hist(alg1_over_alg2, bins=edges, color='black', edgecolor='black', alpha=0.6) ax.axvline(0, ls='--', dashes=(10, 5), color='black') ax.set_ylim([0,500]) ax.set_title(which, fontsize=25, pad=20) ax.set_xticks([-0.5, 0, 0.5, 1, 1.5]) ax.set_xticklabels([r'$\leq -0.5$', '0', '0.5', '1', r'$\geq 1.5$']) ax.get_yaxis().set_ticks([]) ax.tick_params(axis='both', which='major', labelsize=25) ax.text(-0.5, 20, r'min=${}$'.format(format(minimum, '.2f')), fontsize=25) ax.text(1.2, 20, 'max={}'.format(format(maximum, '.2f')), fontsize=25) ax.text(0.75, 450, '{}%'.format(format(ratio_mean, '.2f')), fontsize=25) if which == '4-Step Seq. over 1-Step Seq.': ax.set_ylabel('# of Test Samples', fontsize=25, labelpad=20) ax.set_yticks(range(0, 600, 100)) elif which == '4-Step Seq. over 2-Step Seq.': ax.yaxis.set_label_position('right') ax.set_ylabel(r'4$\times$ Acceleration Factor', fontsize=25, labelpad=40, rotation=270) f, axes = plt.subplots(nrows=1, ncols=2, figsize=(24,8), sharex=True) plot_histograms(axes[0], fx_seq4_files, fx_seq1_files, '4-Step Seq. over 1-Step Seq.') plot_histograms(axes[1], fx_seq4_files, fx_seq2_files, '4-Step Seq. over 2-Step Seq.') f.text(0.26,-0.04,'SSIM Improvement Distribution on the Test Set (Averaged over 3 Runs)', fontsize=25) plt.tight_layout() plt.show()Timeseries tutorialimport numpy as np import pandas as pd import matplotlib.pyplot as plt import sys import os timeseries_path = os.path.join('..', 'pvops', 'timeseries') sys.path.append('..') sys.path.append(timeseries_path) from pvops.timeseries import preprocess from pvops.timeseries.models import linear, iec, AIT from pvops.text2time import utils as t2t_utils, preprocess as t2t_preprocess example_OMpath = os.path.join('example_data', 'example_om_data2.csv') example_prodpath = os.path.join('example_data', 'example_prod_with_covariates.csv') example_metapath = os.path.join('example_data', 'example_metadata2.csv') prod_data = pd.read_csv(example_prodpath, error_bad_lines=False, engine='python') prod_data.head() metadata = pd.DataFrame() metadata['randid'] = ['R15', 'R10'] metadata['dcsize'] = [25000, 25000] metadata.head() #Format for dictionaries is {pvops variable: user-specific column names} prod_col_dict = {'siteid': 'randid', 'timestamp': 'date', 'powerprod': 'generated_kW', 'energyprod': 'generated_kW', 'irradiance':'irrad_poa_Wm2', 'temperature':'temp_amb_C', # Optional parameter, used by one of the modeling structures 'baseline': 'AIT', #user's name choice for new column (baseline expected energy defined by user or calculated based on IEC) 'dcsize': 'dcsize', #user's name choice for new column (System DC-size, extracted from meta-data) 'compared': 'Compared',#user's name choice for new column 'energy_pstep': 'Energy_pstep', #user's name choice for new column 'capacity_normalized_power': 'capacity_normalized_power', #user's name choice for new column } metad_col_dict = {'siteid': 'randid', 'dcsize': 'dcsize'}Data Formattingprod_data_converted = t2t_preprocess.prod_date_convert(prod_data, prod_col_dict) prod_data_datena_d, _ = t2t_preprocess.prod_nadate_process(prod_data_converted, prod_col_dict, pnadrop=True) prod_data_datena_d.index = prod_data_datena_d[prod_col_dict['timestamp']] min(prod_data_datena_d.index), max(prod_data_datena_d.index)Data Preprocessingmasked_prod_data = preprocess.prod_inverter_clipping_filter(prod_data_datena_d, prod_col_dict, metadata, metad_col_dict, 'threshold', freq=60) filtered_prod_data = masked_prod_data[masked_prod_data['mask'] == False] del filtered_prod_data['mask'] print(f"Detected and removed {sum(masked_prod_data['mask'])} rows with inverter clipping.") # Visualize the power signal versus covariates for one site temp = filtered_prod_data[filtered_prod_data['randid'] == 'R10'] for xcol in ['irrad_poa_Wm2', 'temp_amb_C', 'wind_speed_ms']: plt.scatter(temp[xcol], temp[prod_col_dict['powerprod']]) plt.title(xcol) plt.grid() plt.show() # Add a dcsize column filtered_prod_data[prod_col_dict['dcsize']] = filtered_prod_data.loc[:, prod_col_dict['siteid']] metad = metadata.copy() metad.index = metad['randid'] del metad['randid'] filtered_prod_data.replace(metad.to_dict(), inplace=True) filtered_prod_data[filtered_prod_data['randid'] == 'R15'][prod_col_dict['energyprod']].plot() model_prod_data = filtered_prod_data.dropna(subset=['irrad_poa_Wm2', 'temp_amb_C', 'wind_speed_ms', 'dcsize']+[prod_col_dict['energyprod']]) model_prod_dataDynamic linear modelingmodel_prod_data = AIT.AIT_calc(model_prod_data, prod_col_dict) def plot(data, randid, from_idx=0, to_idx=1000): data.copy() # Just making the visualization labels better here.. for this example's data specifically. data.rename(columns={'generated_kW': 'Measured Energy', 'AIT': 'Our Pre-trained Model', 'expected_kW': 'Partner Expected Energy'}, inplace=True) data[data['randid']==randid][['Measured Energy', 'Our Pre-trained Model', 'Partner Expected Energy']].iloc[from_idx:to_idx].plot(figsize=(12,6))Visualize results We visualize the measured hourly energy, our pre-trained model's expected energy, and the results of a partner-produced expected energy.plot(model_prod_data, "R15", from_idx=0, to_idx=100) plot(model_prod_data, "R15", from_idx=-100, to_idx=-1) plot(model_prod_data, "R10", from_idx=0, to_idx=100) plot(model_prod_data, "R10", from_idx=-100, to_idx=-1) plot(model_prod_data, "R10", from_idx=0, to_idx=-1)import sklearn新しいセクション!git clone https://github.com/kihaya/heisei_kamenrider_spec !ls heisei_kamenrider_spec import pandas as pd data = pd.read_csv("heisei_kamenrider_spec/kamenrider_spec.csv",sep=",") data del data["year"] data.iloc[:, 1:] data from sklearn.cluster import KMeans kmeans_model = KMeans(n_clusters=3, random_state=9999).fit(data.iloc[:, 1:]) labels = kmeans_model.labels_ print(labels) data.iloc[2] data.iloc[11]Hierachical Models Assignment# import pandas import pandas as pd # import Agglomerative clustering from sklearn.cluster import AgglomerativeClustering # import scipy.cluster.hierarchy import scipy.cluster.hierarchy as sch # import numpy import numpy as np # import matplotlib import matplotlib.pyplot as plt # set figure size plt.rcParams["figure.figsize"] = (12,8) # Load dataframe df = pd.read_csv('https://raw.githubusercontent.com/lucko515/clustering-python/master/Customer%20in%20Mall%20clusterng/Mall_Customers.csv')Objective: - Fit Hierarchical clustering, - find optimal number of clusters via dendrogram - and plot clusters. Again, to simplify plotting in 2-D, we can create clustering model based on the last two columns of the dataset only.df* define function plot_dendogramX = df[["Annual Income (k$)","Spending Score (1-100)"]].to_numpy() def plot_dendrogram(X,method='ward'): dendrogram = sch.dendrogram(sch.linkage(X, method=method)) plt.title("Dendrogram") plt.ylabel("Euclidean distances") plt.xlabel('Points') plt.show()* Plot dendrogramplot_dendrogram(X)* Fit Hierarchical clustering with optimal number of clustersac = AgglomerativeClustering(affinity='euclidean', linkage='ward', # ward, maximum, average, single n_clusters = 3) # the rules* define function plot_clustersy = ac.fit_predict(X) def plot_clusters(X,y_res, plt_cluster_centers = False): X_centroids = [] Y_centroids = [] for cluster in set(y_res): x = X[y_res == cluster,0] y = X[y_res == cluster,1] X_centroids.append(np.mean(x)) Y_centroids.append(np.mean(y)) plt.scatter(x, y, s=50, marker='s', label=f'cluster {cluster}') if plt_cluster_centers: plt.scatter(X_centroids, Y_centroids, marker='*', c='red', s=250, label='centroids') plt.legend() plt.grid() plt.show()* Plot HC clustersplot_clusters(X,y,plt_cluster_centers=True)Let's Grow your Own Inner Core! Choose a model in the list: - geodyn_trg.TranslationGrowthRotation() - geodyn_static.Hemispheres() Choose a proxy type: - age - position - phi - theta - growth rate set the parameters for the model : geodynModel.set_parameters(parameters) set the units : geodynModel.define_units() Choose a data set: - data.SeismicFromFile(filename) Lauren's data set - data.RandomData(numbers_of_points) - data.PerfectSamplingEquator(numbers_of_points) organized on a cartesian grid. numbers_of_points is the number of points along the x or y axis. The total number of points is numbers_of_points**2*pi/4 - as a special plot function to show streamlines: plot_c_vec(self,modelgeodyn) - data.PerfectSamplingEquatorRadial(Nr, Ntheta) same than below, but organized on a polar grid, not a cartesian grid. Extract the info: - calculate the proxy value for all points of the data set: geodyn.evaluate_proxy(data_set, geodynModel) - extract the positions as numpy arrays: extract_rtp or extract_xyz - calculate other variables: positions.angular_distance_to_point(t,p, t_point, p_point)%matplotlib inline # import statements import numpy as np import matplotlib.pyplot as plt #for figures from mpl_toolkits.basemap import Basemap #to render maps import math import json #to write dict with parameters from GrowYourIC import positions, geodyn, geodyn_trg, geodyn_static, plot_data, data plt.rcParams['figure.figsize'] = (8.0, 3.0) #size of figures cm = plt.cm.get_cmap('viridis') cm2 = plt.cm.get_cmap('winter')/Users/marine/.python-eggs/GrowYourIC-0.5-py3.5.egg-tmp/GrowYourIC/data/CM2008_data.matDefine the geodynamical model Un-comment one of the model## un-comment one of them geodynModel = geodyn_trg.TranslationGrowthRotation() #can do all the models presented in the paper # geodynModel = geodyn_static.Hemispheres() #this is a static model, only hemispheres.Change the values of the parameters to get the model you want (here, parameters for .TranslationGrowthRotation())age_ic_dim = 1e9 #in years rICB_dim = 1221. #in km v_g_dim = rICB_dim/age_ic_dim # in km/years #growth rate print("Growth rate is {:.2e} km/years".format(v_g_dim)) v_g_dim_seconds = v_g_dim*1e3/(np.pi*1e7) translation_velocity_dim = 0.8*v_g_dim_seconds#4e-10 #0.8*v_g_dim_seconds#4e-10 #m.s, value for today's Earth with Q_cmb = 10TW (see Alboussiere et al. 2010) time_translation = rICB_dim*1e3/translation_velocity_dim /(np.pi*1e7) maxAge = 2.*time_translation/1e6 print("The translation recycles the inner core material in {0:.2e} million years".format(maxAge)) print("Translation velocity is {0:.2e} km/years".format(translation_velocity_dim*np.pi*1e7/1e3)) units = None #we give them already dimensionless parameters. rICB = 1. age_ic = 1. omega = 0.#0.5*np.pi/200e6*age_ic_dim#0.5*np.pi #0. #0.5*np.pi/200e6*age_ic_dim# 0.#0.5*np.pi#0.#0.5*np.pi/200e6*age_ic_dim #0. #-0.5*np.pi # Rotation rates has to be in ]-np.pi, np.pi[ print("Rotation rate is {:.2e}".format(omega)) velocity_amplitude = translation_velocity_dim*age_ic_dim*np.pi*1e7/rICB_dim/1e3 velocity_center = [0., 100.]#center of the eastern hemisphere velocity = geodyn_trg.translation_velocity(velocity_center, velocity_amplitude) exponent_growth = 1.#0.1#1 print(v_g_dim, velocity_amplitude, omega/age_ic_dim*180/np.pi*1e6)Growth rate is 1.22e-06 km/years The translation recycles the inner core material in 2.50e+03 million years Translation velocity is 9.77e-07 km/years Rotation rate is 0.00e+00 1.221e-06 0.7999999999999999 0.0Define a proxy type, and a proxy name (to be used in the figures to annotate the axes)You can re-define it later if you want (or define another proxy_type2 if needed)proxy_type = "age"#"growth rate" proxy_name = "age (Myears)" #growth rate (km/Myears)" proxy_lim = [0, maxAge] #or None #proxy_lim = None fig_name = "figures/test_" #to name the figures print(rICB, age_ic, velocity_amplitude, omega, exponent_growth, proxy_type) print(velocity)1.0 1.0 0.7999999999999999 0.0 1.0 age [ -1.38918542e-01 7.87846202e-01 4.89858720e-17]Parameters for the geodynamical modelThis will input the different parameters in the model.parameters = dict({'units': units, 'rICB': rICB, 'tau_ic':age_ic, 'vt': velocity, 'exponent_growth': exponent_growth, 'omega': omega, 'proxy_type': proxy_type}) geodynModel.set_parameters(parameters) geodynModel.define_units() param = parameters param['vt'] = parameters['vt'].tolist() #for json serialization # write file with parameters, readable with json, byt also human-readable with open(fig_name+'parameters.json', 'w') as f: json.dump(param, f) print(parameters){'exponent_growth': 1.0, 'vt': [-0.13891854213354424, 0.7878462024097663, 4.8985871965894125e-17], 'proxy_type': 'age', 'omega': 0.0, 'tau_ic': 1.0, 'units': None, 'rICB': 1.0}Different data set and visualisations Perfect sampling at the equator (to visualise the flow lines)You can add more points to get a better precision.npoints = 10 #number of points in the x direction for the data set. data_set = data.PerfectSamplingEquator(npoints, rICB = 1.) data_set.method = "bt_point" proxy = geodyn.evaluate_proxy(data_set, geodynModel, proxy_type="age", verbose = False) data_set.plot_c_vec(geodynModel, proxy=proxy, cm=cm, nameproxy="age (Myears)") plt.savefig(fig_name+"equatorial_plot.pdf", bbox_inches='tight')=== == Evaluate value of proxy for all points of the data set = Geodynamic model is Translation, Rotation and Growth = Proxy is age = Data set is Perfect sampling in the equatorial plane = Proxy is evaluated for bt_point = Number of points to examine: 60 ===Perfect sampling in the first 100km (to visualise the depth evolution)data_meshgrid = data.Equator_upperpart(10,10) data_meshgrid.method = "bt_point" proxy_meshgrid = geodyn.evaluate_proxy(data_meshgrid, geodynModel, proxy_type=proxy_type, verbose = False) #r, t, p = data_meshgrid.extract_rtp("bottom_turning_point") fig3, ax3 = plt.subplots(figsize=(8, 2)) X, Y, Z = data_meshgrid.mesh_RPProxy(proxy_meshgrid) sc = ax3.contourf(Y, rICB_dim*(1.-X), Z, 100, cmap=cm) sc2 = ax3.contour(sc, levels=sc.levels[::15], colors = "k") ax3.set_ylim(-0, 120) fig3.gca().invert_yaxis() ax3.set_xlim(-180,180) cbar = fig3.colorbar(sc) #cbar.set_clim(0, maxAge) cbar.set_label(proxy_name) ax3.set_xlabel("longitude") ax3.set_ylabel("depth below ICB (km)") plt.savefig(fig_name+"meshgrid.pdf", bbox_inches='tight') npoints = 20 #number of points in the x direction for the data set. data_set = data.PerfectSamplingSurface(npoints, rICB = 1., depth=0.01) data_set.method = "bt_point" proxy_surface = geodyn.evaluate_proxy(data_set, geodynModel, proxy_type=proxy_type, verbose = False) #r, t, p = data_set.extract_rtp("bottom_turning_point") X, Y, Z = data_set.mesh_TPProxy(proxy_surface) ## map m, fig = plot_data.setting_map() y, x = m(Y, X) sc = m.contourf(y, x, Z, 30, cmap=cm, zorder=2, edgecolors='none') plt.title("Dataset: {},\n geodynamic model: {}".format(data_set.name, geodynModel.name)) cbar = plt.colorbar(sc) cbar.set_label(proxy_name) fig.savefig(fig_name+"map_surface.pdf", bbox_inches='tight')=== == Evaluate value of proxy for all points of the data set = Geodynamic model is Translation, Rotation and Growth = Proxy is age = Data set is Perfect sampling at the surface = Proxy is evaluated for bt_point = Number of points to examine: 400 ===Random data set, in the first 100km - bottom turning point only Calculate the data# random data set data_set_random = data.RandomData(300) data_set_random.method = "bt_point" proxy_random = geodyn.evaluate_proxy(data_set_random, geodynModel, proxy_type=proxy_type, verbose=False) data_path = "../GrowYourIC/data/" geodynModel.data_path = data_path if proxy_type == "age": # ## domain size and Vp proxy_random_size = geodyn.evaluate_proxy(data_set_random, geodynModel, proxy_type="domain_size", verbose=False) proxy_random_dV = geodyn.evaluate_proxy(data_set_random, geodynModel, proxy_type="dV_V", verbose=False) r, t, p = data_set_random.extract_rtp("bottom_turning_point") dist = positions.angular_distance_to_point(t, p, *velocity_center) ## map m, fig = plot_data.setting_map() x, y = m(p, t) sc = m.scatter(x, y, c=proxy_random,s=8, zorder=10, cmap=cm, edgecolors='none') plt.title("Dataset: {},\n geodynamic model: {}".format(data_set_random.name, geodynModel.name)) cbar = plt.colorbar(sc) cbar.set_label(proxy_name) fig.savefig(fig_name+data_set_random.shortname+"_map.pdf", bbox_inches='tight') ## phi and distance plots fig, ax = plt.subplots(2,2, figsize=(8.0, 5.0)) sc1 = ax[0,0].scatter(p, proxy_random, c=abs(t),s=3, cmap=cm2, vmin =-0, vmax =90, linewidth=0) phi = np.linspace(-180,180, 50) #analytic_equator = np.maximum(2*np.sin((phi-10)*np.pi/180.)*rICB_dim*1e3/translation_velocity_dim /(np.pi*1e7)/1e6,0.) #ax[0,0].plot(phi,analytic_equator, 'r', linewidth=2) ax[0,0].set_xlabel("longitude") ax[0,0].set_ylabel(proxy_name) if proxy_lim is not None: ax[0,0].set_ylim(proxy_lim) sc2 = ax[0,1].scatter(dist, proxy_random, c=abs(t), cmap=cm2, vmin=-0, vmax =90, s=3, linewidth=0) ax[0,1].set_xlabel("angular distance to ({}, {})".format(*velocity_center)) phi = np.linspace(-90,90, 100) if proxy_type == "age": analytic_equator = np.maximum(2*np.sin((phi-10)*np.pi/180.)*rICB_dim*1e3/translation_velocity_dim /(np.pi*1e7)/1e6,0.) ax[0,0].plot(phi,analytic_equator, 'r', linewidth=2) analytic_equator = np.maximum(2*np.sin((-phi)*np.pi/180.)*rICB_dim*1e3/translation_velocity_dim /(np.pi*1e7)/1e6,0.) ax[0,1].plot(phi+90,analytic_equator, 'r', linewidth=2) ax[0,1].set_xlim([0,180]) ax[0,0].set_xlim([-180,180]) cbar = fig.colorbar(sc1) cbar.set_label("longitude: abs(theta)") if proxy_lim is not None: ax[0,1].set_ylim(proxy_lim) ## figure with domain size and Vp if proxy_type == "age": sc3 = ax[1,0].scatter(dist, proxy_random_size, c=abs(t), cmap=cm2, vmin =-0, vmax =90, s=3, linewidth=0) ax[1,0].set_xlabel("angular distance to ({}, {})".format(*velocity_center)) ax[1,0].set_ylabel("domain size (m)") ax[1,0].set_xlim([0,180]) ax[1,0].set_ylim([0, 2500.000]) sc4 = ax[1,1].scatter(dist, proxy_random_dV, c=abs(t), cmap=cm2, vmin=-0, vmax =90, s=3, linewidth=0) ax[1,1].set_xlabel("angular distance to ({}, {})".format(*velocity_center)) ax[1,1].set_ylabel("dV/V") ax[1,1].set_xlim([0,180]) ax[1,1].set_ylim([-0.017, -0.002]) fig.savefig(fig_name +data_set_random.shortname+ '_long_dist.pdf', bbox_inches='tight') fig, ax = plt.subplots(figsize=(8, 2)) sc=ax.scatter(p,rICB_dim*(1.-r), c=proxy_random, s=10,cmap=cm, linewidth=0) ax.set_ylim(-0,120) fig.gca().invert_yaxis() ax.set_xlim(-180,180) cbar = fig.colorbar(sc) if proxy_lim is not None: cbar.set_clim(0, maxAge) ax.set_xlabel("longitude") ax.set_ylabel("depth below ICB (km)") cbar.set_label(proxy_name) fig.savefig(fig_name+data_set_random.shortname+"_depth.pdf", bbox_inches='tight')Real Data set from Waszek paper## real data set data_set = data.SeismicFromFile("../GrowYourIC/data/WD11.dat") data_set.method = "bt_point" proxy2 = geodyn.evaluate_proxy(data_set, geodynModel, proxy_type=proxy_type, verbose=False) if proxy_type == "age": ## domain size and DV/V proxy_size = geodyn.evaluate_proxy(data_set, geodynModel, proxy_type="domain_size", verbose=False) proxy_dV = geodyn.evaluate_proxy(data_set, geodynModel, proxy_type="dV_V", verbose=False) r, t, p = data_set.extract_rtp("bottom_turning_point") dist = positions.angular_distance_to_point(t, p, *velocity_center) ## map m, fig = plot_data.setting_map() x, y = m(p, t) sc = m.scatter(x, y, c=proxy2,s=8, zorder=10, cmap=cm, edgecolors='none') plt.title("Dataset: {},\n geodynamic model: {}".format(data_set.name, geodynModel.name)) cbar = plt.colorbar(sc) cbar.set_label(proxy_name) fig.savefig(fig_name+data_set.shortname+"_map.pdf", bbox_inches='tight') ## phi and distance plots fig, ax = plt.subplots(2,2, figsize=(8.0, 5.0)) sc1 = ax[0,0].scatter(p, proxy2, c=abs(t),s=3, cmap=cm2, vmin =-0, vmax =90, linewidth=0) phi = np.linspace(-180,180, 50) #analytic_equator = np.maximum(2*np.sin((phi-10)*np.pi/180.)*rICB_dim*1e3/translation_velocity_dim /(np.pi*1e7)/1e6,0.) #ax[0,0].plot(phi,analytic_equator, 'r', linewidth=2) ax[0,0].set_xlabel("longitude") ax[0,0].set_ylabel(proxy_name) if proxy_lim is not None: ax[0,0].set_ylim(proxy_lim) sc2 = ax[0,1].scatter(dist, proxy2, c=abs(t), cmap=cm2, vmin=-0, vmax =90, s=3, linewidth=0) ax[0,1].set_xlabel("angular distance to ({}, {})".format(*velocity_center)) phi = np.linspace(-90,90, 100) if proxy_type == "age": analytic_equator = np.maximum(2*np.sin((-phi)*np.pi/180.)*rICB_dim*1e3/translation_velocity_dim /(np.pi*1e7)/1e6,0.) ax[0,1].plot(phi+90,analytic_equator, 'r', linewidth=2) analytic_equator = np.maximum(2*np.sin((phi-10)*np.pi/180.)*rICB_dim*1e3/translation_velocity_dim /(np.pi*1e7)/1e6,0.) ax[0,0].plot(phi,analytic_equator, 'r', linewidth=2) ax[0,1].set_xlim([0,180]) ax[0,0].set_xlim([-180,180]) cbar = fig.colorbar(sc1) cbar.set_label("longitude: abs(theta)") if proxy_lim is not None: ax[0,1].set_ylim(proxy_lim) ## figure with domain size and Vp if proxy_type == "age": sc3 = ax[1,0].scatter(dist, proxy_size, c=abs(t), cmap=cm2, vmin =-0, vmax =90, s=3, linewidth=0) ax[1,0].set_xlabel("angular distance to ({}, {})".format(*velocity_center)) ax[1,0].set_ylabel("domain size (m)") ax[1,0].set_xlim([0,180]) ax[1,0].set_ylim([0, 2500.000]) sc4 = ax[1,1].scatter(dist, proxy_dV, c=abs(t), cmap=cm2, vmin=-0, vmax =90, s=3, linewidth=0) ax[1,1].set_xlabel("angular distance to ({}, {})".format(*velocity_center)) ax[1,1].set_ylabel("dV/V") ax[1,1].set_xlim([0,180]) ax[1,1].set_ylim([-0.017, -0.002]) fig.savefig(fig_name + data_set.shortname+'_long_dist.pdf', bbox_inches='tight') fig, ax = plt.subplots(figsize=(8, 2)) sc=ax.scatter(p,rICB_dim*(1.-r), c=proxy2, s=10,cmap=cm, linewidth=0) ax.set_ylim(-0,120) fig.gca().invert_yaxis() ax.set_xlim(-180,180) cbar = fig.colorbar(sc) if proxy_lim is not None: cbar.set_clim(0, maxAge) ax.set_xlabel("longitude") ax.set_ylabel("depth below ICB (km)") cbar.set_label(proxy_name) fig.savefig(fig_name+data_set.shortname+"_depth.pdf", bbox_inches='tight')* **This kernel is based on one of the exercises in the excellent book: [Deep Learning with Python by ](https://www.amazon.com/Deep-Learning-Python-Francois-Chollet/dp/1617294438)*** The kernel imports the IMDB reviews (originally text - already transformed by Keras to integers using a dictionary)* Vectorizes and normalizes the data* Compiles a multi layers NN* Monitors the learning / validation curves for loss and accuracy* Try and error with different layers and hidden units* Employs L1 and L2 weight regularization* Implements a DROPOUT layer* The above mentioned book is a **MUST READ**.* *Thanks Francois for an amazing book !*# IMPORT MODULES # TURN ON the GPU !!! # If importing dataset from outside - like this IMDB - Internet must be "connected" import os from operator import itemgetter import numpy as np import pandas as pd import matplotlib.pyplot as plt import warnings warnings.filterwarnings('ignore') get_ipython().magic(u'matplotlib inline') plt.style.use('ggplot') import tensorflow as tf from keras import models, regularizers, layers, optimizers, losses, metrics from keras.models import Sequential from keras.layers import Dense from keras.utils import np_utils, to_categorical from keras.datasets import imdb print(os.getcwd()) print("Modules imported \n") print("Files in current directory:") from subprocess import check_output print(check_output(["ls", "../input"]).decode("utf8")) #check the files available in the directory # LOAD IMDB DATA (train_data, train_labels), (test_data, test_labels) = imdb.load_data( num_words=10000) print("train_data ", train_data.shape) print("train_labels ", train_labels.shape) print("_"*100) print("test_data ", test_data.shape) print("test_labels ", test_labels.shape) print("_"*100) print("Maximum value of a word index ") print(max([max(sequence) for sequence in train_data])) print("Maximum length num words of review in train ") print(max([len(sequence) for sequence in train_data])) # See an actual review in words # Reverse from integers to words using the DICTIONARY (given by keras...need to do nothing to create it) word_index = imdb.get_word_index() reverse_word_index = dict( [(value, key) for (key, value) in word_index.items()]) decoded_review = ' '.join( [reverse_word_index.get(i - 3, '?') for i in train_data[123]]) print(decoded_review) # VECTORIZE as one cannot feed integers into a NN # Encoding the integer sequences into a binary matrix - one hot encoder basically # From integers representing words, at various lengths - to a normalized one hot encoded tensor (matrix) of 10k columns def vectorize_sequences(sequences, dimension=10000): results = np.zeros((len(sequences), dimension)) for i, sequence in enumerate(sequences): results[i, sequence] = 1. return results x_train = vectorize_sequences(train_data) x_test = vectorize_sequences(test_data) print("x_train ", x_train.shape) print("x_test ", x_test.shape) # VECTORIZE the labels too - NO INTEGERS only floats into a tensor...(rare exceptions) y_train = np.asarray(train_labels).astype('float32') y_test = np.asarray(test_labels).astype('float32') print("y_train ", y_train.shape) print("y_test ", y_test.shape) # Set a VALIDATION set x_val = x_train[:10000] partial_x_train = x_train[10000:] y_val = y_train[:10000] partial_y_train = y_train[10000:] print("x_val ", x_val.shape) print("partial_x_train ", partial_x_train.shape) print("y_val ", y_val.shape) print("partial_y_train ", partial_y_train.shape) # NN MODEL # Use of DROPOUT model = models.Sequential() model.add(layers.Dense(16, kernel_regularizer=regularizers.l1(0.001), activation='relu', input_shape=(10000,))) model.add(layers.Dropout(0.5)) model.add(layers.Dense(16, kernel_regularizer=regularizers.l1(0.001),activation='relu')) model.add(layers.Dropout(0.5)) model.add(layers.Dense(1, activation='sigmoid')) # Use of REGULARIZATION #model = models.Sequential() #model.add(layers.Dense(16, kernel_regularizer=regularizers.l1_l2(l1=0.001, l2=0.001),activation='relu', input_shape=(10000,))) #model.add(layers.Dense(16, kernel_regularizer=regularizers.l1_l2(l1=0.001, l2=0.001),activation='relu')) #model.add(layers.Dense(1, activation='sigmoid')) # REGULARIZERS L1 L2 #regularizers.l1(0.001) #regularizers.l2(0.001) #regularizers.l1_l2(l1=0.001, l2=0.001) # OPTIMIZERS #model.compile(optimizer=optimizers.RMSprop(lr=0.001), loss=losses.binary_crossentropy, metrics=[metrics.binary_accuracy]) #model.compile(optimizer='rmsprop',loss='binary_crossentropy',metrics=['accuracy']) # FIT / TRAIN model NumEpochs = 10 BatchSize = 512 model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc']) history = model.fit(partial_x_train, partial_y_train, epochs=NumEpochs, batch_size=BatchSize, validation_data=(x_val, y_val)) results = model.evaluate(x_test, y_test) print("_"*100) print("Test Loss and Accuracy") print("results ", results) history_dict = history.history history_dict.keys() # VALIDATION LOSS curves plt.clf() history_dict = history.history loss_values = history_dict['loss'] val_loss_values = history_dict['val_loss'] epochs = range(1, (len(history_dict['loss']) + 1)) plt.plot(epochs, loss_values, 'bo', label='Training loss') plt.plot(epochs, val_loss_values, 'b', label='Validation loss') plt.title('Training and validation loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.show() # VALIDATION ACCURACY curves plt.clf() acc_values = history_dict['acc'] val_acc_values = history_dict['val_acc'] epochs = range(1, (len(history_dict['acc']) + 1)) plt.plot(epochs, acc_values, 'bo', label='Training acc') plt.plot(epochs, val_acc_values, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.legend() plt.show() # PREDICT model.predict(x_test)Testing Sentiment Analysis-----In this notebook, we test sentiment analysis techniques for analysing natural language for which there are three experiments.As representations of conflict narratives, we curated a dataset comprising Hitler’s “” along with political speeches from and Osama bin Laden during the “War on Terror”. In how he advocated for non-violence, provides control data. We do not suggest any moral equivalence between each of these orators. Instead, we are exploring functional equivalence in the use of language to legitimise violence. Each has successfully brought about dramatic change through force, or in the case of Luther King, no-violent means.Against this dataset, We tested three sentiment analysis API for this experiment. The first is TextBlob, a general-purpose and open-source NLP python library. As state-of-the-art technologies, and therefore more technically sophisticated, the next two are APIs from Google and Watson.Having such extremes in the dataset means we can assess experimental results through observation since the moral colour each text is accepted, and we know who each were, or were not, seeking to legitimise violence against.We will now see how quantitative representations of language can distort meaning. Where stark results would be expected from a dataset of extremes, our tests show that regardless of technical sophistication, these technologies are unable to distinguish abusive from non-abusive texts.In the first experiment, we compare different document scores for each text.In the second experiment, we compare scores for named entities identified in each document.More experiments results to follow. Load the Sentiment Analysis Dataset and Show Metadataimport os from cndobjects import SentimentData apis = ["textblob", "watson", "google"] filename = "sentiment_analytics" filepath = os.getcwd() sentiment_analytics = SentimentData(apis = apis, filename = filename, filepath = filepath) sentiment_analytics.fromDisk() sentiment_analytics.dfloading: sentiment_analytics from: C:\Users\Steve\OneDrive - University of Southampton\CNDPipeline\Quantitative AnalysisTest 1 - Document Sentiment ScoresIn this first test we look at how each API scores each text of the dataset overall.In this test we colate and display document sentiment scores for TextBlob, Watson and Google.Note - is too large for the Google API limits, therefore, its score is derived from the mean sentence level sentiment scoreimport numpy as np import matplotlib.pyplot as plt import importlib import cndplots importlib.reload(cndplots) fig, ax = plt.subplots(figsize = (30, 15)) scores = np.array([sentiment_analytics.df[api] for api in sentiment_analytics.apis]) im, cbar = cndplots.heatmap(scores, sentiment_analytics.apis, [f'{title} ({ref})' for title, ref in zip(sentiment_analytics.df["title"], sentiment_analytics.df["ref"])], ax=ax, figtitle = "A Heatmap of Document-level Sentiment Scores", cmap="Blues", cbarlabel="Sentiment Score") texts = cndplots.annotate_heatmap(im, valfmt="{x:.2f}") fig.tight_layout() plt.show()Test 1 - DiscussionFirstly, as a genocidal text, Textblob scores “” at joint 15th for positivity and 5th highest for Watson. For negativity ranking, Google scores “” as the sixth most negative. In comparison with the most positive of these texts, TextBlob scores “I Have a Dream” at 0.04 more positive, Watson scores “” as 0.01 more positive and Google scores “” at 0.13 more negative. A reasonable expectation would be for “” to generate scores dramatically more negatively than “I Have a Dream”, which is not the case in these results.The highest score for TextBlob is Bush’s “The World Will Always Remember 9/11”. The highest score for Watson is bin Laden’s “Letter Calling for Global Islamic State”. The highest score for Google is also bin Laden’s “Letter Calling for Global Islamic State”. In how Luther King advocated for non-violent change, contrary to these results, his texts would be expected to score more highly.The lowest negativity score for TextBlob is -0.08 for Bush’s “Address at Islamic Center of Washington, D.C.”. The lowest negativity score for Watson is bin Laden’s “Al Jazeera Speech”. The lowest negativity score for Google is bin Laden’s “letter to America”. As a genocidal text, “Mein Kampf” would reasonably be expected to generate the most negative scores.Across the range of texts, and excluding scores of 0, the only text where all three APIs consistently score for positivity or negativity is bin Laden’s “Letter Calling for Global Islamic State”. For all other texts, there is no agreement between all three APIs for positivity of negativity.Google, scores nine of Bush’s 14 texts at 0, which suggests neutral sentiment for speeches in which he sought to legitimise the “War on Terror”.With each API generating contrasting scores, there is simply no way to assess the efficacy of these results. The APIs certainly do not produce results that might be expected. We can only conclude that quantitative expressions of sentiment generate meaningless outputs. Test 2 - Testing Sentiment Scores at the Sentence LevelIn this next experiment we show the sentiment scores for each sentence of selected documents.import importlib import cndplots importlib.reload(cndplots) # setup document references # orator ref // document reference // smoothing window orators_list = [("bush", 4, 10), ("laden", 0, 10), ("hitler", 0, 70), ("king", 1, 10)] fig = cndplots.sentiment_plot(orators_list, sentiment_analytics, smooth = True, \ figtitle = 'Sentiment Analysis Comparison for Overall Document Scores and Sentence-Level Profiles', xlabel = 'progress through the document (%)', ylabel = 'rolling average sentiment') fig.savefig(fname = "sentiment_plots")Test 2 - DiscussionThese four graphs show a sentence level sentiment profiles against each API for significant texts from the dataset.Firstly consider the profile of both Bush’s and bin Laden’s texts in which they declare war upon each other. Both have similar sentiment profiles relative to positivity and negativity; however, the sentiment relative to how they were received was very different. Their supporters would receive each positively while their opponents would view them negatively. Moreover, transcripts of Bush’s speech show how people applauded statements these APIs scored negativity.Secondly, in a dataset of extremes, we can see the profile for is indistinguishable from the rest. We might expect such an extremely harmful body of words to produce more stark results. Notably, if you look at the overall document scores, where and I Have a Dream are extreme opposites in terms of sentiment, Watson and TextBlob determine both as positive, and Google determines them to be negative. In a face-to-face discussion, we might now discuss what these results mean, and inevitably there will be disagreement, yet what is the assessment framework for analysing sentiment that moderates our differences in agreement? Missing from teh y axis of each graph is a unit of measurement for sentiment, so how can we assess what does not have a defined metric? What we might agree upon for now is that for a dataset comprising extremes of sentiment, and APIs of extremes in technical sophistication, these results distort th intended meaning of each text. And perhaps in how these APIs are unable to distinguish Mein Kampf from the others, these results might also be regarded as somewhat troublesome. Review of Sentence ScoresFollowing the findings from the sentence-level sentiment profiles, this next section is a more detailed review of sentence-level scores.First is a reivew of agreement between each API for sentence scores for which there are three assessments. The first assessment reviews agreement between each of the APIs. If all three APIs score either positively or negatively they are in agreement, if one differs from the others they are in disagrement. The second assessment reviews agreement between general purpose and state-of-the-art. If Watson and Google agree, then there is agreement, otherwise there is disagrement. The third assessment reviews Watson's emotion scores. If an positive emotion correlates with a positive sentiment score there is agreement, otherwise there is disagreements.Second is a review of the most positive and negative scores from each document. In this review scores of +1 or -1 for any API are extracted from each document, then the highest and lowest score not +1 or -1 are extracted. Through observation, these sentences are then reviewed.Third is a comparison of selected sentences from "Mein Kampf" and "I Have a Dream". Sentences from Mein Kampf are selected for obvious assertions of racism. The iconic, 'I Have a Dream" statements are selected from "I Have a Dream" Agreement between APIsimport pandas as pd agreement = {1 : {"assessment" : "assess the agreement between APIs", "score" : 0}, 2: {"assessment" : "assess the agreement between state-of-the-art APIs", "score" : 0}, 3 : {"assessment" : "assess agreement between Watson emotion and sentiment", "score" : 0} } for orator in sentiment_analytics.values(): for text in orator: for sentence in text["sentences"]: scores = sentence["scores"] # assessment 1: agreement between each # if all the scores are positive or all the scores are negative or all the scores are neutral there is agreement if all([i > 0 for i in scores.values()]) or all([i < 0 for i in scores.values()]) or all([i == 0 for i in scores.values()]): agreement[1]["score"] +=1 # assessment 2: agreement between each state of the art sota = ["watson", "google"] # if all the scores are positive or all the scores are negative or all the scores are neutral there is agreement if all([i > 0 for k, i in scores.items() if k in sota]) or all([i < 0 for k, i in scores.items() if k in sota]) or all([i == 0 for k, i in scores.items() if k in sota]): agreement[2]["score"] +=1 # sadness, joy, fear, disgust, anger api = "watson" pos_emotion = ["joy"] neg_emotion = ["sadness", "fear", "disgust", "anger"] watson_score = sentence["scores"]["watson"] emotions = sentence["emotion"] # if sentiment score is positive and the highest scoring emotion is a positive emotion, or if sentiment score is negative and highest scoring emotion is a negative emotion if all([watson_score > 0, max(emotions, key=emotions.get) in pos_emotion]) or all([watson_score < 0, max(emotions, key=emotions.get) in neg_emotion]): agreement[3]["score"] +=1 df = pd.DataFrame(agreement) for i, v in enumerate(df.values[1]): df.values[1][i] = f'{round(v*100/len(sentiment_analytics))}%' df.TThese results show that agreement between all three APIs is 42%, while agreement between state-of-the-art APIs is 67%. There is a reasonable amount of disagreement between these APIs, yet what is the assessment framework for sentiment analysis to determine which API is correct?Within the Watson API, agreement between sentiment and emotion assertion is 52%, this means for 48% of sentences the sentiment and emotion scores are contradictory. Review of highest scoring sentences from each documentimport pandas as pd orator = "hitler" text = 0 title = f'{orator} ({sentiment_analytics[orator][text]["datestamp"]}) {sentiment_analytics[orator][text]["title"]}' print(title) print('-'*len(title)) pd.set_option('max_colwidth', 1000) for sentdata in sentiment_analytics.minmax[orator][text].values(): if sentdata["sentences"]: print(sentdata["explain"]) if isinstance(sentdata["sentences"], list): display(pd.DataFrame(sentdata["sentences"])) if isinstance(sentdata["sentences"], dict): display(pd.DataFrame(sentdata["sentences"]).T)hitler (2020-06-30) Mein Kampf ------------------------------ List of sentences an API has scored at +1Comparison of selected sentences from Mein Kampf and I Have a DreamSeveral sentences from Meing Kampf were selected for comparison with Luther King's "I Have a Dream" statements. Sentence from were selected for their obvious expression and promotion of hate and any contradiction with either sentiment or emotion scores.import pandas as pd from collections import Counter import seaborn as sns racism_texts = [(238, "A change would be possible only by a mixture of blood, but in this case the quality of the superior race would be debased."), (240, "It is especially the cultural creativeness which disappears when a superior race intermixes with an inferior one, even though the resultant mongrel race should excel a thousandfold in speaking the language of the race that once had been superior."), (326, "Thus, conversely, a State may be called bad if, in spite of the existence of a high cultural level, it dooms to destruction the bearers of that culture by breaking up their racial uniformity."), (389, "Nature generally takes certain measures to correct the effect which racial mixture produces in life."), (394, "At all critical moments in which a person of pure racial blood makes correct decisions, that is to say, decisions that are coherent and uniform, the person of mixed blood will become confused and take measures that are incoherent."), (837, "No boy or girl must leave school without having attained a clear insight into the meaning of racial purity and the importance of maintaining the racial blood unadulterated."), (1133, "The constructive principle of Aryan humanity is thus displaced by the destructive principle of the Jews, They become the 'ferment of decomposition' among nations and races and, in a broad sense, the wreckers of human civilization."), (1855, "And the swastika signified the mission allotted to us – the struggle for the victory of Aryan mankind and at the same time the triumph of the ideal of creative work which is in itself and always will be anti-Semitic."), (4525, "A state which in this age of racial poisoning dedicates itself to the care of its best racial ele­ments must some day become lord of the earth."), ] dream_texts = [(61, "I have a dream that one day this nation will rise up and live out the true meaning of its creed:"), (62, "We hold these truths to be self-evident, that all men are created equal."), (63, "I have a dream that one day on the red hills of Georgia, the sons of former slaves and the sons of former slave owners will be able to sit down together at the table of brotherhood."), (64, "I have a dream that one day even the state of Mississippi, a state sweltering with the heat of injustice, sweltering with the heat of oppression, will be transformed into an oasis of freedom and justice."), (65, "I have a dream that my four little children will one day live in a nation where they will not be judged by the color of their skin but by the content of their character."), (66, "I have a dream today!"), (67, "I have a dream that one day, down in Alabama, with its vicious racists, with its governor having his lips dripping with the words of \"interposition\" and \"nullification\" -- one day right there in Alabama little black boys and black girls will be able to join hands with little white boys and white girls as sisters and brothers."), (68, "have a dream today!"), (69, "I have a dream that one day every valley shall be exalted, and every hill and mountain shall be made low, the rough places will be made plain, and the crooked places will be made straight; \"and the glory of the Lord shall be revealed and all flesh shall see it together.\"") ] # for orator in sentiment_analytics.values(): # for text in orator: # for i, sentence in enumerate(text["sentences"]): # if "race" in sentence["text"].split(" "): # print(i, '=>', sentence["text"]) # function to colour sentiments based on positivity or negativity def color_red_or_green(val): if isinstance(val, (int, float)): color = 'red' if val < 0 else 'green' return 'color: %s' % color return val # function to get the formatted sentiment and emotion scores def get_sents(orator, text, sents): for n in sents: sent = sentiment_analytics[orator][text]["sentences"][n[0]] sent_scores = dict() sent_scores["text"] = sent["text"] sent_scores.update(sent["scores"]) sent_scores.update(sent["emotion"]) yield sent_scores def sentiment_rank(dataframe): """ review sentiment scores to determine prevalence of positivity/negativity """ for col in dataframe[sentiment_analytics.apis]: pos = [i > 0 for i in dataframe[col] if i != ''].count(True) neg = [i < 0 for i in dataframe[col] if i != ''].count(True) neu = [i == 0 for i in dataframe[col] if i != ''].count(True) dataframe[col][dataframe.index[-1]] = (f'positive = {pos} negative = {neg} neutral = {neu}') return dataframe def emotion_rank(dataframe): """ review emotion scores to determine ranking of expressed emotions in the sentences. """ ranking = {"first" : "", "second" : "", "third" : ""} emotions = ["sadness", "joy", "fear", "disgust", "anger"] for rank in ranking: cache = list() # iterate through the column slice containing emotions for index, row in dataframe[emotions].iterrows(): # exclude just added new_row row if not any(row.values): continue # remove the previous ranked emotions from consideration temp = row for v in list(ranking.values()): if v in temp: temp = temp.drop(v) # sort the emotion scores in descending order to get # top emotion minus previously ranked emotion sorted_row = temp.sort_values(ascending = False) cache.append(temp.loc[lambda x: x == sorted_row[0]].keys()[0]) # add top ranking score to ranking dictionary # add ranking to dataframe emotion = Counter(cache).most_common()[0][0] ranking[rank] = emotion df[emotion][len(dataframe) -1] = rank return dataframe hitler = ("hitler", 0, racism_texts) king = ("king", 1, dream_texts) # create the dataframe of selected sentences with sentiment and emotion scores pd.set_option('max_colwidth', 1000) df = pd.DataFrame(get_sents(*hitler)) # add an empty row for assessment data df = df.append(pd.Series(dtype=str), ignore_index=True).fillna('') # add assessment data df = sentiment_rank(df) df = emotion_rank(df) # display dataframe u = df.index.get_level_values(0) # used to capture subset slice of df for gradient for rows with mixture on string and index cm = sns.light_palette("green", as_cmap=True) # set colour scheme display(df.style.applymap(color_red_or_green, subset = sentiment_analytics.apis).\ background_gradient(cmap=cm, subset = pd.IndexSlice[u[:-1], emotions] )) pd.set_option('max_colwidth', 1000) df = pd.DataFrame(get_sents(*king)) # add an empty row for assessment data df = df.append(pd.Series(dtype=str), ignore_index=True).fillna('') # add assessment data df = sentiment_rank(df) df = emotion_rank(df) # display dataframe u = df.index.get_level_values(0) # used to capture subset slice of df for gradient for rows with mixture on string and index cm = sns.light_palette("green", as_cmap=True) # set colour scheme display(df.style.applymap(color_red_or_green, subset = sentiment_analytics.apis).\ background_gradient(cmap=cm, subset = pd.IndexSlice[u[:-1], emotions] ))Active Learning for NLP ClassificationIn this tutorial, we guide you through using our new [HuggingFace](https://huggingface.co/transformers/main_classes/trainer.html) trainer wrapper to do active learning with transformers models. Any model which could be trained by HuggingFace trainer and has `Dropout` layers could be used in the same manner.We will use the `SST2` dataset and `BertForSequenceClassification` as the model for the purpose of this tutorial. As usual, we need to first download the dataset.Note: This tutorial is intended for advanced users. If you are not familiar with BaaL, please refer to other tutorials.from datasets import load_dataset datasets = load_dataset("glue", "sst2", cache_dir="/tmp") raw_train_set = datasets['train']Reusing dataset glue (/tmp/glue/sst2/1.0.0/7c99657241149a24692c402a5c3f34d4c9f1df5ac2e4c3759fadea38f6cb29c4)ActiveLearning DatasetIn order to create an active learning dataset, we need to wrap the dataset with `baal.ActiveLearningDataset`.This requires a `torch.utils.Dataset` so we propose a `baal.active.HuggingFaceDataset` that can take a HuggingFace datasetand perform the preprocessing steps.from baal.active import active_huggingface_dataset from transformers import BertTokenizer pretrained_weights = 'bert-base-uncased' tokenizer = BertTokenizer.from_pretrained(pretrained_model_name_or_path=pretrained_weights) active_set = active_huggingface_dataset(raw_train_set, tokenizer) # lets randomly label 100 samples, therefore len(active_set) should be 100 active_set.label_randomly(100) assert len(active_set) == 100 print(len(active_set.pool))67249Active Learning ModelThe process of making a model bayesian is exactly the same as before. In this case, we will get the `Bert` model and use `baal.bayesian.dropout.patch_module` to make the dropout layer stochastic at inference time.from copy import deepcopy import torch from transformers import BertForSequenceClassification from baal.bayesian.dropout import patch_module use_cuda = torch.cuda.is_available() model = BertForSequenceClassification.from_pretrained(pretrained_model_name_or_path=pretrained_weights) model = patch_module(model) if use_cuda: model.cuda() init_weights = deepcopy(model.state_dict())Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForSequenceClassification: ['cls.predictions.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.transform.dense.bias', 'cls.predictions.decoder.weight', 'cls.seq_relationship.weight', 'cls.seq_relationship.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.LayerNorm.bias'] - This IS expected if you are initializing BertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model). - This IS NOT expected if you are initializing BertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model). Some weights of BertForSequenceClassification were not initialized from the model checkpoint at[...]HeuristicAs already implemented and useful in all classification cases, we continue using `BALD` as our active learning heuristic.Note: ActiveLearning for NLP tasks is an open and challenging field and hence, desiging a proper heuristic is out of the scope of this tutorial.We encourage any pull request that would propose better heuristics.from baal.active import get_heuristic heuristic = get_heuristic('bald')HugginFace Trainer WrapperIf you are not familiar with the HuggingFace trainer module please start [here](https://huggingface.co/transformers/main_classes/trainer.html).HuggingFace Trainer is one of the most popular library to train Transformer models.In order to do active learning, we need the prediction to be run over every sample in pool for number of iterations and hence our wrapper `baal.BaalTransformersTrainer` will provide this functionality on top of the provided functionalities in the `Trainer` module.In the rest of this tutorial, we show how to initialize the `baal.active.active_loop.ActiveLearningLoop` and how to do Active Training.from transformers import TrainingArguments from baal.transformers_trainer_wrapper import BaalTransformersTrainer from baal.active.active_loop import ActiveLearningLoop #Initialization for the huggingface trainer training_args = TrainingArguments( output_dir='.', # output directory num_train_epochs=5, # total # of training epochs per AL step per_device_train_batch_size=16, # batch size per device during training per_device_eval_batch_size=64, # batch size for evaluation weight_decay=0.01, # strength of weight decay logging_dir='.', # directory for storing logs ) # create the trainer through Baal Wrapper baal_trainer = BaalTransformersTrainer(model=model, args=training_args, train_dataset=active_set, tokenizer=None) active_loop = ActiveLearningLoop(active_set, baal_trainer.predict_on_dataset, heuristic, 10, iterations=3) for epoch in range(2): baal_trainer.train() should_continue = active_loop.step() # We reset the model weights to relearn from the new train set. baal_trainer.load_state_dict(init_weights) baal_trainer.lr_scheduler = None if not should_continue: break # at each Active step we add 10 samples to labelled data. At this point we should have 30 samples added # to the labelled part of training set. print(len(active_set))================================================================Demonstration of how to use ClickableImage / generate_2d_layout.================================================================In this example, we open an image file, then use ClickableImage toreturn 2D locations of mouse clicks (or load a file already created).Then, we use generate_2d_layout to turn those xy positions into a layoutfor use with plotting topo maps. In this way, you can take arbitrary xypositions and turn them into a plottable layout.# Authors: <> # # License: BSD (3-clause) from scipy.ndimage import imread import numpy as np from matplotlib import pyplot as plt from os import path as op import mne from mne.viz import ClickableImage, add_background_image # noqa from mne.channels import generate_2d_layout # noqa print(__doc__) # Set parameters and paths plt.rcParams['image.cmap'] = 'gray' im_path = op.join(op.dirname(mne.__file__), 'data', 'image', 'mni_brain.gif') # We've already clicked and exported layout_path = op.join(op.dirname(mne.__file__), 'data', 'image') layout_name = 'custom_layout.lout'Load data and clickim = imread(im_path) plt.imshow(im) """ This code opens the image so you can click on it. Commented out because we've stored the clicks as a layout file already. # The click coordinates are stored as a list of tuples click = ClickableImage(im) click.plot_clicks() coords = click.coords # Generate a layout from our clicks and normalize by the image lt = generate_2d_layout(np.vstack(coords), bg_image=im) lt.save(layout_path + layout_name) # To save if we want """ # We've already got the layout, load it lt = mne.channels.read_layout(layout_name, path=layout_path, scale=False) # Create some fake data nchans = len(lt.pos) nepochs = 50 sr = 1000 nsec = 5 events = np.arange(nepochs).reshape([-1, 1]) events = np.hstack([events, np.zeros([nepochs, 2], dtype=int)]) data = np.random.randn(nepochs, nchans, sr * nsec) info = mne.create_info(nchans, sr, ch_types='eeg') epochs = mne.EpochsArray(data, info, events) evoked = epochs.average() # Using the native plot_topo function with the image plotted in the background f = evoked.plot_topo(layout=lt, fig_background=im)Sample Wine Classifier on Wine Dataset# Import important libraries import numpy as np import pandas as pd import matplotlib as plt from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.neural_network import MLPClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.model_selection import train_test_split %matplotlib inlineLoad Datasetdf = pd.read_csv('wine12.csv') #test = pd.read_csv('test_data.csv') #Adding Columns df.columns = [ 'name' ,'alcohol' ,'malicAcid' ,'ash' ,'ashalcalinity' ,'magnesium' ,'totalPhenols' ,'flavanoids' ,'nonFlavanoidPhenols' ,'proanthocyanins' ,'colorIntensity' ,'hue' ,'od280_od315' ,'proline' ]Analyze Train Datasetdf.head() df.shape df.isnull().sum() df.describe()Create Test and Train SplitsX= df.drop(['name', 'ash'], axis=1) X.head() y = df['name'] y.head() train_X, valid_X, train_y, valid_y = train_test_split(X, y, test_size = 0.3) print(train_X.shape, valid_X.shape) models = [] models.append(("Logistic Regression:",LogisticRegression())) models.append(("Naive Bayes:",GaussianNB())) models.append(("K-Nearest Neighbour:",KNeighborsClassifier(n_neighbors=3))) models.append(("Decision Tree:",DecisionTreeClassifier())) models.append(("Random Forest:",RandomForestClassifier(n_estimators=20))) models.append(("MLP:",MLPClassifier(hidden_layer_sizes=(45,30,15),solver='sgd',learning_rate_init=0.01,max_iter=500))) models.append(("AdaBoostClassifier:",AdaBoostClassifier())) models.append(("GradientBoostingClassifier:",GradientBoostingClassifier())) print('Models appended...') results = [] names = [] for name,model in models: kfold = KFold(n_splits=10, random_state=0) cv_result = cross_val_score(model,train_X,train_y.values.ravel(), cv = kfold,scoring = "accuracy") names.append(name) results.append(cv_result) for i in range(len(names)): print(names[i],results[i].mean()*100)Logistic Regression: 94.4444444444 Naive Bayes: 95.5555555556 K-Nearest Neighbour: 94.4444444444 Decision Tree: 95.5555555556 Random Forest: 98.8888888889 MLP: 48.8888888889 AdaBoostClassifier: 96.6666666667 GradientBoostingClassifier: 94.4444444444load artist_qid# open artist-metadata csv filename = '../data/artist_qid_html_new.csv' #csv_file = csv.reader(open(filename, "r")) #df_new = pd.read_csv(filename) df_new.to_csv(filename, index = False, header = True) df_new.columns filename = '../data/artist_qid_rijks.csv' #csv_file = csv.reader(open(filename, "r")) #df_urls = pd.read_csv(filename) #df['URLs Rijk'] = df_urls['URLs Rijk'] #df['URLs Rijk'][288].strip('[').strip(']').strip('\'') df_new['cloudinary artworks'][4] # clean the formats in Rijks URLs list ix_clean = [] for i in df_new['cloudinary artworks']: rix = [] if i == '[]': rix = [] ix_clean.append(rix) elif i!=i: rix = [] ix_clean.append(rix) elif len(i.split(',')) == 1: rix = [(str(i).strip('[').strip(']').strip("'"))] ix_clean.append(rix) #print(int(str(i).strip('[').strip(']').strip("'"))) elif len(i.split(',')) >1: for j in i.split(','): rix.append(j.strip('[').strip(']').strip("'").strip("\"").strip(" \'")) ix_clean.append(rix) len(ix_clean) #rijks df_new['cloudinary artworks new'] = ix_clean df_new['cloudinary artworks new'][4]still formatting# place two artworks into thumbnails for i, j in enumerate(df_new['thumbnail']): if j!=j: #print(df_new['Name'][i],i) if df_new['URLs Rijk new'][i] != []: #df_new['thumbnail'][i] = df_new['URLs Rijk new'][i][0] print (df_new['thumbnail'][i]) # print(df['Name'][i])Download thumbnailsindex = 284 index # open thumbnail, get width, height # limit the iterator (8 per call) for i in range(8): # load file if isnan(df_new['thumbnail'][(int(index))]): # sort nan values out index += 1 if index < 10: filename = '../data/thumbnails/thumbn_c/thumbnailDUMMY_00' + str(index) + '.png' elif index < 100: filename = '../data/thumbnails/thumbn_c/thumbnailDUMMY_0' + str(index) + '.png' else: filename = '../data/thumbnails/thumbn_c/thumbnailDUMMY_' + str(index) + '.png' else: new_img = getsquared_image(df_new['thumbnail'][((index))]) # filename if index < 10: filename = '../data/thumbnails/thumbn_c/thumbnail_00' + str(index) + '.png' elif index < 100: filename = '../data/thumbnails/thumbn_c/thumbnail_0' + str(index) + '.png' else: filename = '../data/thumbnails/thumbn_c/thumbnail_' + str(index) + '.png' index +=1 new_img.save(filename, 'PNG') list_of_cloud_thumbnail_filenames = os.listdir('../data/thumbnails2_cloudinary/') # build filename list for .startswith comparison index2 = 0 filename_list = [] for i in range(292): if index2 < 10: filename = "thumbnail_00" + str(index2) elif index2 < 100: filename = "thumbnail_0" + str(index2) else: filename = "thumbnail_" + str(index2) index2+=1 filename_list.append(filename) #filename_list #https://res.cloudinary.com/dfyzsulq8/image/upload/v1622639726/thumbnails2/thumbnail_291_fehaff.png # get the fitting cloudinary filename for the thumbnails in a column new_list = [] for i in filename_list: check = 0 for j in list_of_cloud_thumbnail_filenames: if j.startswith(i): new_list.append('https://res.cloudinary.com/dfyzsulq8/image/upload/v1622639726/thumbnails2/'+j) check = 1 if check == 0: new_list.append('') #new_list df_new['cloudinary thumbnails new'] = new_list type(df_new['cloudinary artworks new'][4]) df_new.to_csv('BACKUP_df_NEW.csv', index=False, header=True) df_new.columnsget all the Rijksmuseum artworksdf_new['URLs Rijk new'][30] == [] # open all the URLs and save the squared images names [artist-index]_[image-index].png count = 0 # iterate over artists for i in range(len(df_new)): if df_new['URLs Rijk new'][i] == []: print(count, i, df_new['URLs Rijk new'][i]) count += 1 else: index = 0 # iterate over list of urls for j in df_new['URLs Rijk new'][i]: if j == 'nan': index += 1 continue elif j == '': index += 1 continue else: img = getsquared_image(j + '-s1400') #.replace(' ', '%20') filename = '../data/Rijks/' + str(i) + '_' + str(index) + '.png' img.save(filename, 'PNG') index += 1 print(count) count += 1 for h in (df_new['URLs Rijk new'][25]): print(h) df_new['Unnamed: 0'][0] list_of_artworks = os.listdir('../data/Rijks_cloudinary/') #list_of_artworks # get the fitting cloudinary filename for the Rijks artworks # https://res.cloudinary.com/dfyzsulq8/image/upload/v1622644688/artworks_rijk/286_1_vdkwpx.png artwork_list = [] for i in df_new['Unnamed: 0']: check = 0 storage = [] for j in list_of_artworks: if j.startswith(str(i) + '_'): storage.append('https://res.cloudinary.com/dfyzsulq8/image/upload/v1622644688/artworks_rijk/' + str(j)) check = 1 if check == 0: artwork_list.append('') else: artwork_list.append(storage) df_new[' Rijks cloudinary artworks new'] = artwork_list (artwork_list) df_new[' Rijks cloudinary artworks new']finally make the artwork column!df_new.columns final_storage = [] for i, j in enumerate(df_new['cloudinary thumbnails new']): store = list() store.append(j) for k in df_new['cloudinary artworks new'][i]: store.append(k) for l in df_new[' Rijks cloudinary artworks new'][i]: store.append(l) #.extend().extend(df_new[' Rijks cloudinary artworks new'][i]) final_storage.append(store) len(final_storage) df_new['cloudinary thumbnails new'] thumbnail = '' for i in df['']: death+=str(i)+',' death = death.strip(',') #final_storage[233] with open('') df_new['final artworks'] = final_storageEND of formatting make final cables dataframe#final_storage position_in_timeline = [1271,1266,1650,1254,1282,1249,1276,1268,1304,1400,1316,1311,1326,1305,1314,1319,1308,1314.5,1285,1309,1324,1328,1341,1340,1412,1373,1395,1365,1287,1395.5,1321,1380,1384,1319.5,1398,1392,1388,1407,1453,1406,1410,1446,1396.0,1440,1480,1527,1436,1416,1405,1415,1420,1414,1411,1426,1434,1439,1429,1448,1461,1454,1424,1449,1487,1438,1429.5,1449.5,1462,1466,1433,1465,1468,1458,1655,1463,1472,1433.5,1431,1390,1425,1376,1435,1453.5,1461.5,1470,1441,1443,1457,1448.5,1431.5,1447,1456,1498,1504,1474,1430.0,1473,1463.5,1475,1514,1468.5,1467,1470.5,1477,1471,1466.5,1478,1488,1486,1464,1469,1445,1473.5,1465.5,1457.5,1490,1535,1464.5,1475.5,1482,1462.5,1499,1484,1485,1481,1479,1526,1510,1500,1496,1407.5,1383,1360,1404,1335,1355,1467.5,1495,1483,1495.5,1469.5,1491,1492,1481.5,1487.5,1496.5,1494,1507,1521,1521.5,1483.5,1518,1522,1501,1478.5,1532,1482.5,1489,1512,1488.5,1474.5,1489.5,1490.5,1493,1497.0,1476.0,1491.5,1502,1497.5,1477.5,1484.5,1476.5,1505,1492.5,1511,1493.5,1494.5,1509,1498.5,1530,1499.5,1500.5,1516,1508,1501.5,1503,1503.5,1505.5,1556,1567,1580,1517,1530.5,1520,1506,1519,1507.5,1512.5,1513,1515,1516.5,1504.5,1508.5,1509.5,1502.5,1506.5,1517.5,1510.5,1513.5,1471.5,1511.5,1514.5,1515.5,1524,1518.5,1519.5,1520.5,1522.5,1523.0,1523.5,1524.5,1525.0,1525.5,1526.5,1542,1527.5,1528.0,1528.5,1529.0,1529.5,1531.0,1538,1531.5,1532.5,1558,1565,1533.0,1533.5,1534.0,1534.5,1535.5,1536,1536.5,1485.5,1479.5,1537.0,1537.5,1551,1540,1538.5,1539.0,1539.5,1548,1540.5,1543,1574,1541.0,1543.5,1544,1541.5,1561,1554,1542.5,1544.5,1550,1562,1545.0,1545.5,1557,1553,1560,1546.0,1546.5,1557.5,1547.0,1547.5,1548.5,1549.0,1566,1594,1549.5,1550.5,1551.5,1552.0,1552.5,1660,1553.5,1554.5,1555.0,1575,1555.5,1556.5,1558.5] len(position_in_timeline) # make new dataframe for cables -> to be converted to json array index = [] for i in range(292): index.append(i) df_cables = pd.DataFrame() df_cables['index'] = index df_cables['Name'] = df_new['Name'] df_cables['birth'] = df_new['birth'] df_cables['death'] = df_new['death'] df_cables['timeline_position'] = df_new['timeline_position'] df_cables['timeline_position_extended'] = position_in_timeline df_cables['Wikipedia'] = df_new['Wikipedia-URL'] df_cables['Summary'] = df_new['Summary'] df_cables['extract_html'] = df_new['extract_html'] # make new! df_cables['cloudinary thumbnails'] = df_new['cloudinary thumbnails new'] # make new! df_cables['final artworks'] = final_storage # make new! df_cables['Summary'][2] df_final = df_cables.sort_values(by = ['timeline_position_extended'], inplace= False) df_final df_final.to_csv('FINAL.csv', index = False, header = True) len(df_final) df_final.to_json('FINAL_json_indexf.json', orient = 'index') os.listdir('../') # make list of jsons from dataframe list_of_json = [] for i in range(len(df_final2)): x = df_final2.iloc[i].to_json() list_of_json.append(x) len(list_of_json) #df_cables.iloc[4].to_json() with open('json_listTEST.json', 'w+') as output: output.write(str(list_of_json)) output.close() another_list = [] for i in list_of_json: data = json.loads(i) another_list.append(data) type(another_list[0]) with open('json_list_NEW_testtest.json', 'w') as outfile: json.dump(another_list, outfile) (df_final2)['indexf'] = range(292) df_final2 = df_final.set_index('indexf') df_final2 thumbnail = '' for i in df_final2['cloudinary thumbnails']: thumbnail+=str(i)+',' death = thumbnail.strip(',') f = open('thumbnails_squared.txt', 'wb') # names f.write(death.encode("utf-8")) f.close() df_final2.columnsM2.851 - Tipología y ciclo de vida de los datos aula 1 · Práctica 12018 · Máster universitario en Ciencia de datos (Data science)Prof. Colaboradora: éAlumno: -   DatasetHistorial de eventos del UFC (*Ultimate Fighting Championship*) DescripciónUn dataset completo con todos los eventos deportivos de MMA (*mixed martial arts*) más famoso del mundo, los deportistas, resultados, entre otros. Imagen identificativaFigura 1: Resultados y card de peleas de un evento de UFC. ContextoEl conjunto de datos se trata de todos los eventos deportivos de MMA realizados desde la creación del UFC (primer evento en el **12 de Noviembre de 1993**) hasta el último que ocurrió en el **27 de Octubre de 2018**, o sea, **454 eventos**, bien como el resultado de cada una de las peleas que ocurrieron en cada evento - precisamente **4869 luchas**. ContenidoEl dataset tiene un contenido bastante sencillo que consiste en:* [String] *event_name:* Nombre del evento* [String] *weight_class:* Categoría de peso de la pelea* [String] *fighter_1:* Nombre del competidor 1* [String] *action:* Acción que ejecutó el competidor 1 sobre el oponente / resultado* [String] *fighter_2:* Nombre del oponente* [Integer] *round:* *Round* en que acabó la lucha* [String] *time:* Tiempo en el *round* que acabó la lucha* [String] *method:* Como ha sido el método utilizado para ganarEl intervalo de tiempo fuera mencionado arriba, vale resaltar que en cada evento hay una media de 10.7 luchas. La estrategia utilizada para recoger los datos es el resultado de iteraciones en listas:* Inicialmente, pillamos la [lista de todos los eventos del UFC](https://en.wikipedia.org/wiki/List_of_UFC_events)* Hacemos un `for` loop en la lista completa y extraemos (con uso de BeautifulSoup) las URLs individuales de cada evento* Finalmente también haremos una lectura de tabla para finalmente recolectar los datos. AgradecimientosEn análisis se realizó entre **17 y 20 de Octubre** y específicamente el enfoque era encontrar puntos donde pudiera haber ilegalidad en la extracción de información de las fuentes de datos. Para ello, estuve mirando especialmente los ficheros de [robots.txt de Wikipedia](https://en.wikipedia.org/robots.txt), que dicho, es nuestra fuente de información, al cual dejamos el agradecimiento y sugiero donaciones para la iniciativa.Las URLs de lectura no estaban *Disallowed* en el fichero de Robots, por lo tanto, no habría problemas explorarlas. Aun así, he aplicado técnicas como sleep recomienda **[4] . (2017)** para evitar la sobrecarga (*throttling*) de la webpage. InspiraciónDesde mi punto de vista, el conjunto de datos es interesante porque el ámbito de su aplicación es bastante amplio, para comprobar, voy a describir escenarios de su aplicación:1. Primeramente, una aplicación podría ser para uso en periodismo deportivo - para enseñar datos y patrones del deporte.2. Un deportista, que compete o no en el evento, podría sacar informaciones de métodos más comúnmente utilizados para encerrar una lucha y, de esa manera, prepararse para evitar que hicieran con él o mismo practicar las técnicas para intentar aplicarlas cuando esté competiendo.3. La preparación física en un deporte es un punto clave. Entender donde suele pasar la mayor parte de fines de luchas puede también ayudar los profesionales de preparación física a entrenar sus atletas para que en cansancio no les quite la oportunidad de victoria.4. Podríamos identificar patrones de acuerdo con el peso (categoría) de los atletas, es decir, puede que en una categoría más ligera estén acostumbrados a pelearse más de pie, mientras en otras categorías más pasadas suele ocurrir grappling (lucha de suelo). Seguramente hay otras numerosas posibilidades de aplicación. LicenciaHaciendo un breve estudio de las licencias presentadas, creo que la que se aplicaría más ampliamente a mi estudio sería **CC BY-NC-SA 4.0**, me explico: la licencia en cuestión permite:* *Compartir*: copiar y redistribuir el material* *Adaptar*: transformar y cambiar el materialSin embargo, no permite el uso comercial del mismo - el que, siendo un trabajo de máster, tiene fines más académicos.Abajo, una imagen de que trata la licencia elegida:Figura 2: CC BY-NC-SA 4.0. P.D.: Como el los repositorios de Github no había disponibilidad de utilizar la dicha licencia, por allí he definido el uso de **BSD 3-Clause** que básicamente define que las redistribuciones de generadas con base en el proyecto en cuestión deben ser hechas con notificación a priori. Ademas, garantiza que los nombres de los creadores del proyecto inicial no pueden ser usados para promover productos derivados del proyecto inicial. CódigoEn ese apartado, tendremos el código utilizado para la extracción de los datos y al final para la generación del CSV.import sys print(sys.version) # Not necessary at all, but to demonstrate that I'm aware that BeautifulSoup4 must be installed !{sys.executable} -m pip install --upgrade pip !{sys.executable} -m pip install BeautifulSoup4 from bs4 import BeautifulSoup from IPython.core.display import display, HTML from time import sleep import requests import pandas as pd import os Events = [] base_url = 'https://en.wikipedia.org' main_url = base_url + '/wiki/List_of_UFC_events' def perform_http_get(url): """ Note: This is a simple function that performs an http_get and if the result code is 200 proceed the return. Args: url (str): A string. Returns: BeautifulSoup: a version parsed of the request content """ r = requests.get(url) if r.status_code == 200: return BeautifulSoup(r.content, 'html.parser') def extract_cell(cells, id_td): """ Note: This method is aimed for returning the content of a given cell (td) decoded and without additional blank spaces. Args: cells (list): A list representing the table row. id_td (int): The index of the wanted cell Returns: String: the real content of the cell. """ return cells[id_td].renderContents().decode().strip() def append_fighter_names(cells_event, info): """ Note: This method presents some logic that will be briefly explained here. It turns out that some fighters don't have their own wikipedia page, in these cases, there is no link () within their names. Therefore, returning just the content of the cell where this info is present is enough. In other cases, where the link is there, we must extract to update our dictionary. Args: cells_event (list): A list representing the table row. info (dict): The dictionary to be updated """ fighter1 = '' fighter2 = '' if len(cells_event[1].findAll('a')) == 0: fighter1 = extract_cell(cells_event, 1) else: fighter1 = cells_event[1].find('a').renderContents().decode().strip() if len(cells_event[3].findAll('a')) == 0: fighter2 = extract_cell(cells_event, 3) else: fighter2 = cells_event[3].find('a').renderContents().decode().strip() info.update({"fighter_1" : fighter1}) info.update({"fighter_2" : fighter2}) def extract_row(cells_event, link): """ Note: The goal here is to create a dictionary for the given events and also to update in our events list (Events.append). We realize that within the method we are also invoking the append_fighter_names that will call the aforementioned method to treat the fighters' name. Args: cells_event (list): A list representing the table row. link (str): Basically the event name """ info = { "event_name": link.contents[0], "weight_class": extract_cell(cells_event, 0), "action": extract_cell(cells_event, 2), "method": extract_cell(cells_event, 4), "round": extract_cell(cells_event, 5), "time": extract_cell(cells_event, 6) } append_fighter_names(cells_event, info) Events.append(info) def download_event_image(individual_event, link): """ Note: The method's name is pretty clear here, however, the objective is to download the images of each event in order to present them at the bottom of this current document. In a nutshell, the behaviour expected here is to create all the recovered images in a folder called pictures which, by the way, we can observe that will be created. Args: individual_event (BeautifulSoup): A BeautifulSoup representing the page of a single event. """ event_images = individual_event.select('table.infobox a.image img[src]') if len(event_images) > 0: for img in event_images: img_url = 'http:' + img['src'] r = requests.get(img_url) with open('pictures/' + link.contents[0], "wb") as code: code.write(r.content) def extract_info_individual_event(link): """ Note: As you will see, this is the second part of a for loop that I will summarize here: First we iterate over the list of UFC events presented in the main web-page ( https://en.wikipedia.org/wiki/List_of_UFC_events). From the data gathered there each individual event will be accessed and we'll collect data from these events. Needless to say that I'm not iterating over a single web-page, but instead, more than 450 different web pages, being the first of them the "master one", and this individual ones. Args: link (url): The URL for the individual web-pages of a given list. """ individual_event = perform_http_get(base_url + link.get('href')) table = individual_event.find('table',{'class': 'toccolours'}) download_event_image(individual_event, link) if table is not None: rows_event = table.findAll('tr') for row_event in rows_event: cells_event = row_event.findAll('td') if len(cells_event) > 0 : extract_row(cells_event, link) """ From my perspective this is a key part of the program and it will trigger the execution and the invocation of the previous methods. Basically we are collecting the "Past Events" of a table in one page and for each event in this table (currently 454), we will read another information as previously explained in different web pages. By reading another information, I mean: download the event image, read the card and the results of the event and so on. Apart from the behaviour expected, in this part of the code, I also tried to avoid throtling the Wikipedia source by adding some sleeps within each iteration - it will slow down the process and avoid some common problems. """ Events = [] soup = perform_http_get(main_url) table_past_events = soup.find('table', {'id': 'Past_events'}) rows = table_past_events.findAll('tr') for row in rows: #sleep(10) # Wait 10 sec, recommendations explained by [4] . (2017) cells = row.findAll('td') if len(cells) > 0 : links = cells[1].findAll('a') for link in links: extract_info_individual_event(link) """ Once we have a list of dict in Events attributes, it's high time we defined a panda dataframe to properly store the info. It is done here and we present a glimpse of the data with the df.head() below. """ print(len(Events), ' eventos fueron añadidos') df = pd.DataFrame(Events) df = df[['event_name', 'weight_class', 'fighter_1', 'action', 'fighter_2', 'round', 'time', 'method' ]] df.head(15) """ This is particularly a visual part of the presentation of the data. For each event, I'm presenting the folder advertising it. Remembering that we download each of these pictures in a method explained before """ df_imgs = df['event_name'].drop_duplicates() print('Imagenes: ', len(df_imgs)) display(HTML('')) i = 0 row = '' for img in df_imgs: img_src = 'pictures/' + img if i == 20: row = '{}'.format(row, img_src, img) display(HTML(row)) row = '' i = 0 else: row = '{}'.format(row, img_src) i += 1 display(HTML('
\'{}\'
'))Imagenes: 454Dataset: Dataset en formato CSVPor aquí generamos el dataset que podrá ser consultado en el repositorio.""" Last but not least, we are storing the dataframe in a CSV file """ file_name = 'ufc-events.csv' df.to_csv(file_name)Let's memorize some digits!from sklearn import datasets import matplotlib.pyplot as plt #Load the digits dataset digits = datasets.load_digits() # Binarize the dataset for digit_im, digit_data in zip(digits['images'], digits['data']): digit_im[digit_im>0] = 1 digit_data[digit_data>0] = 1 digit_data[digit_data==0] = -1 #Display the first digit plt.figure(1, figsize=(3, 3)) plt.imshow(digits.images[0], cmap=plt.cm.gray_r, interpolation='nearest') plt.show() hop = hopfieldNetwork(np.matrix(digits.data[0])) print(digits.data[0]) print([i.output for i in hop.neuronList]) output = np.array([i.output for i in hop.neuronList]) image_out = output.reshape((8,8)) #Display the first digit plt.figure(1, figsize=(3, 3)) plt.imshow(image_out, cmap=plt.cm.gray_r, interpolation='nearest') plt.show() output[3:6] = -1 image_out = output.reshape((8,8)) #Display the first digit plt.figure(1, figsize=(3, 3)) plt.imshow(image_out, cmap=plt.cm.gray_r, interpolation='nearest') plt.show() hop.mapInputToNeurons(np.matrix(output)) hop.compute() output = np.array([i.output for i in hop.neuronList]) image_out = output.reshape((8,8)) #Display the first digit plt.figure(1, figsize=(3, 3)) plt.imshow(image_out, cmap=plt.cm.gray_r, interpolation='nearest') plt.show() a * -1Let's Visualize the state changesstart_image = np.matrix(digits.data[0]) perturb_image = copy.deepcopy(digits.data[0]) perturb_image[0:32] = -1 hop = hopfieldNetwork(start_image) states = hop.runHopfieldNetwork( np.matrix(perturb_image)) pprint(states) image_out = states[0].reshape(8,8) from IPython import display plt.ion() f, ax = plt.subplots(1,1) im = ax.imshow(states[0].reshape(8,8), cmap=plt.cm.gray_r, interpolation = 'nearest') f.colorbar(im) f.canvas.draw() for i in range(len(states)): try: im.set_data(states[i].reshape(8,8)) f.canvas.draw() display.display(f) display.clear_output(wait=True) plt.pause(0.1) except KeyboardInterrupt: break plt.ioff() plt.close() plt.ion() f = plt.figure(1, figsize=(3, 3)) im = plt.imshow(start_image.reshape((8,8)), cmap=plt.cm.gray_r, interpolation='nearest') plt.show()Initialize script with same initial conditions as in the paper# initial parameters x = np.zeros(4) u = np.zeros(3) p = np.zeros(3) # this is given by the problem data, but might be "controlled" via OPF u[0] = 1.0 #VM1 u[1] = 1.7 #P2 u[2] = 1.0 #VM2 # these parameters are fixed through the computation p[0] = 0.0 #VA1, slack angle p[1] = 2.0 #P3 p[2] = 1.0 #Q3 # initial guess x[0] = 1.0 #VM3 x[1] = 0.0 #VA3 x[2] = 0.0 #VA2 x[3] = 0.5 # print initial guesses print(x) print(u) # POWER FLOW ALGO def powerflow(x, u, p): sol = fsolve(gfun, x, args=(u,p,)) return sol print(powerflow(x, u, p)) print(np.linalg.cond(gfun_x(x, u, p))) # Reduced gradient iteration max_iter = 100 xk = np.copy(x) uk = np.copy(u) for i in range(max_iter): # power flow xk = powerflow(xk, uk, p) # lambda calculation J_x = gfun_x(xk, uk, p) G_x = cfun_x(xk, uk, p) print("condition jacobian:", np.linalg.cond(J_x)) lam = -np.dot(inv(np.transpose(J_x)), G_x) # gradient cost function J_u = gfun_u(xk, uk, p) G_u = cfun_u(xk, uk, p) grad_c = G_u + np.dot(np.transpose(J_u), lam) print("Norm of gradient: ", np.linalg.norm(grad_c)) # evaluate cost function print("Cost function: ", cfun(xk, uk, p)) # compute step alpha = 0.12 uk = uk - alpha*grad_ccondition jacobian: 39.59958561633352 Norm of gradient: 0.7038831242740287 Cost function: 2.1767109512499543 condition jacobian: 46.68308341866992 Norm of gradient: 0.5531510420686726 Cost function: 2.1598296133612314 condition jacobian: 44.71393962254119 Norm of gradient: 0.4450119712664789 Cost function: 2.149087109860759 condition jacobian: 49.48699914092949 Norm of gradient: 0.3506100402295171 Cost function: 2.1398376900170497 condition jacobian: 48.99354430244175 Norm of gradient: 0.2865642394328765 Cost function: 2.1329741612923963 condition jacobian: 52.204220536784455 Norm of gradient: 0.23911203526530678 Cost function: 2.127220777411127 condition jacobian: 52.614602175128354 Norm of gradient: 0.2081087508231566 Cost function: 2.122449802647705 condition jacobian: 54.89049527130433 Norm of gradient: 0.186821075858742 Cost function: 2.1183014665515123 condition jacobian: 55.76733748285113 Norm of gradient: 0.17233726046432105 Cost function: 2.1146440807183913 c[...]每日运势from iching import iching from datetime import date from datetime import timedelta %matplotlib inline def ichingByDay(day): dayStr = str(day).replace('-', '') #replace '19850526' with your birthday birthoday = int('19850526' + dayStr) iching.ichingDate(birthoday) fixPred, changePred = iching.getPredict() iching.plotTransition(6, w = 15) guaNames = iching.ichingName(fixPred, changePred) fixText = iching.ichingText(fixPred, iching) if changePred: changeText = iching.ichingText(changePred, iching) else: changeText = None sepline1 = '\n (O--__/\__--O)' sepline2 = '\n(-------------(O---- |__|----O)----------------)' sepline4 = '\n (-------(O-/_--_\-O)-------)' sepline3 = '\n(-----------(O-----/-|__|-\------O)------------)' print guaNames, '\n', u'本卦: ', fixText, sepline1,sepline2,sepline3,sepline4,'\n\n\n', u'变卦: ', changeText今日运势today = date.today() ichingByDay(today)Your birthday & your prediction time: 1985052620150705 there is a changing predict! Also run changePredict() there is a changing predict! Also run changePredict() 噬嗑 & 随 本卦: 噬嗑卦原文噬嗑。亨。利用狱。象曰:雷电噬嗑。先王以明罚敕法。白话文解释噬瞌卦:通泰。利于讼狱。《象辞》说:本卦下卦为震为雷,上卦为离为电,雷电交合是噬嗑的卦象。先王观此卦象,取法于威风凛凛的雷、照彻幽隐的电,思以严明治政,从而明察其刑罚,修正其法律。 《断易天机》解噬嗑卦离上震下,为巽宫五世卦。噬嗑为咬合之意,象征物品咬碎之后才能通过。此卦于诉讼有利。 北宋易学家邵雍解咬碎硬骨,强硬态度;事多困阻,积极谋求。得此卦者,事不遂心,纷争难免,诸事被阻,宜坚守常规,不为利诱,可保平安。 台湾国学大儒傅佩荣解时运:好运初动,声名直上。财运:买卖皆成,货物畅销。家宅:小心火灾;百年好合。身体:须防郁热,失物不保。 传统解卦这个卦是异卦(下震上离)相叠。离为阴卦;震为阳卦。阴阳相交,咬碎硬物,喻恩威并施,宽严结合,刚柔相济。噬嗑为上下颚咬合,咀嚼。大象:上唇与下唇间有物,必须咬断,方能合拢,乃诸事被阻,务必去除,方可成功。运势:诸事阻隔,纷争难免,宜守常规,不为利诱,问题可解决。事业:困难与阻力非常大,应以坚强的意志,果敢的行为,公正无私的态度去战胜种种厄运,争取事态好转。为了早日化险为夷,必要时可采取强硬手段,甚至诉诸法律。经商:处于不利的时候,头脑冷静,明察形势,寻求机遇,不为眼前小利所诱,不发非分之财。认真听取忠告,遵守法纪,秉公办事,不得徇私情,更警惕不得触犯刑律。求名:自己的努力尚不为人所知,不可急于求成,受到挫折应看作是对自己的考验,持之以恒,必能成功。婚恋:初不顺利,须有顽强精神可以取得满意的结果,不可以个人的情绪左右家庭事务。决策:一生不平坦,会遇到挫折和磨难,但应看作是对个人的考验,应认真总结经验教训,以更为坚强的意志,不屈不挠,继续前进。经过锻炼,各方面都会有较大的进展,终将进入光明境地,取得重大成就。 台湾张铭仁解卦噬嗑:表示如鲠在喉、难以决策。主吉凶未定,是个状况卦,有点偏小凶。也如同「鸡肋」一般[...]N天前后的运势N = -2 day = today + timedelta(days=N) ichingByDay(day)Your birthday & your prediction time: 1985052620150703 there is a changing predict! Also run changePredict() 咸 & 小过 本卦: 咸卦原文咸。亨,利贞。取女吉。象曰:山上有泽,咸。君子以虚受人。白话文解释咸卦:通达,吉利的贞卜。娶女,吉利。《象辞》说:本卦下卦为艮,艮为山,上卦为兑,兑为泽,山中有泽,山气水息,互相感应,是咸卦的卦象。君子观此卦象,取法于深邃的山谷,深广的大泽,从而虚怀若谷,以谦虚的态度,接受他人的教益。 《断易天机》解咸卦兑上艮下,为兑宫三世卦。咸为感应,万物皆有感应,男女感应,夫妇康宁,感应之事,无有不亨。 北宋易学家邵雍解两性交感,正道感应;物击则鸣,识时知机。得此卦者,宜谦虚待人,则会吉祥如意,谋事可成。但勿为不正当感情而意乱情迷。 台湾国学大儒傅佩荣解时运:谦虚待人,可保功名。财运:转运贩卖,必可图利。家宅:知其所止;两性合好。身体:虚弱宜补。 传统解卦这个卦是异卦(下艮上兑)相叠。艮为山;泽为水。兑柔在上,艮刚在下,水向下渗,柔上而刚下,交相感应。感则成。大象:少男在少女之下,彼此感应,象征新婚,两性交感。运势:如意吉祥,但勿为不正当感情而意乱情迷。事业:和为贵,和则万事兴,务以诚感人,以诚待人。广泛交往朋友,谦虚礼让。树立大志向,坚持主见,不可盲目随大流。不利时应安居待机,不可妄动强求。经商:有利可图。但应以纯正之心,以诚待人,加强合作。市场若不景气,决不可妄动,不可强求,而应静待发展,虚心听取不同意见。求名:志向应宏大,脚踏实地,虚心请教他人,使人悦而应,帮助、重用自己。切莫自我封闭,更不得冷漠孤僻,以广阔的胸怀发展自身。婚恋:成功的可能性极大。双方很有感情,但动机务必纯正,婚姻方可幸福、永久。决策:吉祥如意。人际关系好,善于交际。以真诚的态度,无私的心去处世,不会有不利的地方。与他人合作应坚持原则。办事不可优柔寡断。不顺利时,应停止行动,总结经验,以图发展。待人诚恳,但不可与玩弄口舌的小人交往,防止受骗。 台湾张铭仁解卦咸:表示感动也。主吉象。感,有如青春男女,清纯无暇的感情,心心相印有感而发的情感,得此卦,与感情有非常大的关系,也可引伸为对一种理念的认同和欣赏。咸,并无利欲的情色纠葛,是属于比较「感性的」一[...]Scratch Worksoup = soup_maker(url) scraped_data = [] tbl = soup.find('table',{'class':'table table-hover persist-area'}) tbdy = tbl.find('tbody') all_a = tbdy.find_all('a', {'rel':None}) player_details = {} # players on every other index for i, lnk in enumerate(all_a): if i % 2 == 0: player_details['short_name'] = lnk.text #player_details.update(get_player_details('http://sofifa.com' + lnk['href'])) player_details['link'] = 'http://sofifa.com' + lnk['href'] print(player_details) scraped_data.append(player_details) all_deets = {} soup = soup_maker('http://sofifa.com/player/20801/c-ronaldo-dos-santos-aveiro/') plyr_info = soup.find('div', {'class':'player'}) # find_plyr_info plyr_dat = {} # prepare dictionary for player data ply_inf = plyr_info.find('div', {'class':'meta'}) # grab section with player data infos = ply_inf.text data_str = infos[infos.index('Age') + 4:] # grab all data in str to right of Age like height and weight # set variables plyr_dat['pref_pos'] = ply_inf.find('span').text plyr_dat['full_name'] = ply_inf.text[0:ply_inf.text.find(plyr_dat['pref_pos'])-2].strip() plyr_dat['age'] = int(data_str[:2]) plyr_dat['height'] = data_str[data_str.index(')') + 2:].split(' ')[0].replace('\"','') plyr_dat['weight'] = data_str[data_str.index(')') + 2:].split(' ')[1] print(plyr_dat) plyr_data = {} soup = soup_maker('http://sofifa.com/player/20801/c-ronaldo-dos-santos-aveiro/') plyr_stats = soup.find('div', {'class':'stats'}) plyr_val = plyr_stats.text[plyr_stats.text.find('€'):].split('\n')[0] info = re.findall('\d+', plyr_stats.text) plyr_data['rating'] = int(info[0]) plyr_data['potential'] = int(info[1]) if 'M' in plyr_val: plyr_data['value'] = int(plyr_val[1:plyr_val.index('M')])*1000000 elif 'K' in plyr_val: plyr_data['value'] = int(plyr_val[1:plyr_val.index('K')])*1000 plyr_data['wage'] = int(info[3])*1000 print(plyr_data) soup = soup_maker('http://sofifa.com/player/20801/c-ronaldo-dos-santos-aveiro/') sp = soup.find('div', {'class':'teams'}) lnks = sp.find_all('li') club = lnks[8].find('a').text joined = lnks[12].text print(club) print(lnks[12].find('label', text='Joined').parent.contents[2])Juventus Jul 10, 2018Ok, now let's turn this work into functionsdef gather_basic_info(soup): plyr_dat = {} # prepare dictionary for player data ply_inf = soup.find('div', {'class':'meta'}) # grab section with player data infos = ply_inf.text if 'Age' not in infos: return(plyr_dat) data_str = infos[infos.index('Age') + 4:] # grab all data in str to right of Age like height and weight # set variables plyr_dat['pref_pos'] = ply_inf.find('span').text plyr_dat['full_name'] = ply_inf.text[0:ply_inf.text.find(plyr_dat['pref_pos'])-2].strip() plyr_dat['age'] = int(data_str[:2]) plyr_dat['height'] = data_str[data_str.index(')') + 2:].split(' ')[0].replace('\"','') plyr_dat['weight'] = data_str[data_str.index(')') + 2:].split(' ')[1] return(plyr_dat) def gather_club_info(soup): # gets club and join data info plyr_data = {} lnks = soup.find_all('li') # the link list isn't the same for each player if lnks[8].find('a'): plyr_data['Club'] = lnks[8].find('a').text else: plyr_data['Club'] = lnks[7].find('a').text # assign data if len(lnks) < 13: plyr_data['Joined'] = None return(plyr_data) if lnks[12].find('label', text='Joined'): plyr_data['Joined'] = lnks[12].find('label', text='Joined').parent.contents[2] else: plyr_data['Joined'] = None return(plyr_data) def gather_player_stats(soup): # parse stats section and determine rating, potential, value, and wage plyr_data = {} plyr_val = soup.text[soup.text.find('€'):].split('\n')[0] info = re.findall('\d+', soup.text) plyr_data['rating'] = int(info[0]) plyr_data['potential'] = int(info[1]) # check units of their value if 'M' in plyr_val: plyr_data['value'] = int(float(plyr_val[1:plyr_val.index('M')])*1000000) elif 'K' in plyr_val: plyr_data['value'] = int(float(plyr_val[1:plyr_val.index('K')])*1000) # wage is always given in thousands if len(info) > 4: plyr_data['wage'] = int(info[4])*1000 else: plyr_data['wage'] = int(info[3])*1000 return(plyr_data) def get_player_details(soup): # take in player url and build dictionary of player info all_deets = {} soup = soup_maker(soup) # gather basic name, height, weight stats plyr_info = soup.find('div', {'class': 'player'}) all_deets.update(gather_basic_info(plyr_info)) # gather rating, value, wage, etc plyr_stats = soup.find('div', {'class': 'stats'}) all_deets.update(gather_player_stats(plyr_stats)) club_info = soup.find('div', {'class': 'teams'}) all_deets.update(gather_club_info(club_info)) return(all_deets)Now let's create our dataframedef scrape_sofifa(): # completes entire scraping process, can take minutes to run scraped_data = [] page_indexes = [i*60 for i in range(100)] # iterate across 100 different player pages for page_index in page_indexes: print('Scraping page {}...'.format(page_index)) url = 'https://sofifa.com/players?offset=' + str(page_index) soup = soup_maker(url) # gather all player details for this page tbl = soup.find('table',{'class':'table table-hover persist-area'}) tbdy = tbl.find('tbody') all_a = tbdy.find_all('a', {'rel':None}) # players are on every other index for i, lnk in enumerate(all_a): if i % 2 == 0: player_details = {} player_details['short_name'] = lnk.text player_details['link'] = 'http://sofifa.com' + lnk['href'] player_details.update(get_player_details('http://sofifa.com' + lnk['href'])) scraped_data.append(player_details) return(scraped_data) import pandas as pd scraped_data = scrape_sofifa() df = pd.DataFrame(scraped_data) df.head() df.to_csv('scraped_sofifa.csv', index=False) df# Based on interesting take on March Madness # Data available from Kaggle March Machine Learning - 2017 # Base Packages import pandas as pd import pymc3 as pm # This is the new package - more details here - https://docs.pymc.io/ # Allows us to build a monte carlo model import numpy as np import theano.tensor as tt # This is also a new package - more details here - http://deeplearning.net/software/theano/tutorial/ # Additional deep dive with pymc available here - https://docs.pymc.io/PyMC3_and_Theano.html # Theano uses python to build models symbolically # Allows GPU code generation import matplotlib.pyplot as plt init='adapt_diag' # OBTAIN df = pd.read_csv('https://raw.githubusercontent.com/2SUBDA/Breakouts/Week7/RegularSeasonCompactResults.csv') # SCRUB - Remove prior season records - for simplicity df=df[df['Season']==2016] df.head() #len(df) # SCRUB - Convert to four df - df['Home Score'] = [obj2 if obj1 == 'H' or obj1 == 'N' else obj3 for obj1,obj2,obj3 in zip(df['Wloc'],df['Wscore'],df['Lscore'])] df['Away Score'] = [obj3 if obj1 != 'H' or obj1 != 'N' else obj2 for obj1,obj2,obj3 in zip(df['Wloc'],df['Wscore'],df['Lscore'])] df['Home Team'] = [obj2 if obj1 == 'H' or obj1 == 'N' else obj3 for obj1,obj2,obj3 in zip(df['Wloc'],df['Wteam'],df['Lteam'])] df['Away Team'] = [obj3 if obj1 != 'H' or obj1 != 'N' else obj2 for obj1,obj2,obj3 in zip(df['Wloc'],df['Wteam'],df['Lteam'])] # SCRUB - Recode team numbers teams = df['Home Team'].unique() teams = pd.DataFrame(teams, columns=['team']) teams['i'] = teams.index df = pd.merge(df, teams, left_on='Home Team', right_on='team', how='left') df = df.rename(columns = {'i': 'i_home'}).drop('team', 1) df = pd.merge(df, teams, left_on='Away Team', right_on='team', how='left') df = df.rename(columns = {'i': 'i_away'}).drop('team', 1) # SCRUB - Find specific team # Using Teams index numbers from Kaggle teams.loc[teams['team'] == 1393] # SCRUB - Rename scores observed_home_points = df['Home Score'].values observed_away_points = df['Away Score'].values advantage = np.array([1 if obj=='H' or obj=='A' else 0 for obj in df['Wloc']],dtype='int64') home_team = df.i_home.values away_team = df.i_away.values num_teams = len(df.i_home.drop_duplicates()) num_games = len(home_team) # MODEL - Build bayesian model # Latent variables - offs_star / defs_star # Latent variables - scoring intensity model = pm.Model() with pm.Model() as model: # global model parameters home = pm.Flat('home') sd_att = pm.HalfStudentT('sd_att', nu=3, sd=2.5) sd_def = pm.HalfStudentT('sd_def', nu=3, sd=2.5) intercept = pm.Flat('intercept') # team-specific model parameters offs_star = pm.Normal("offs_star", mu=0, sd=sd_att, shape=num_teams) defs_star = pm.Normal("defs_star", mu=0, sd=sd_def, shape=num_teams) offs = pm.Deterministic('offs', offs_star - tt.mean(offs_star)) defs = pm.Deterministic('defs', defs_star - tt.mean(defs_star)) # derive the scoring intensity for a game - Home Court Advantage home_theta = tt.exp(intercept + home*advantage + offs[home_team] + defs[away_team]) away_theta = tt.exp(intercept + offs[away_team] + defs[home_team]) # likelihood of observed data home_points = pm.Poisson('home_points', mu=home_theta, observed=observed_home_points) away_points = pm.Poisson('away_points', mu=away_theta, observed=observed_away_points) !pip install causalgraphicalmodels # MODEL - Visualize the model from causalgraphicalmodels import CausalGraphicalModel madness = CausalGraphicalModel( nodes=["Global", "Team", "Offense", "Defense", "CourtAdvantage", "HomeTheta", "AwayTheta", "HomePoints", "AwayPoints", "Wins"], edges=[ ("Global", "Team"), ("Global", "Offense"), ("Global", "Defense"), ("Offense", "HomeTheta"), ("Offense", "AwayTheta"), ("Defense", "HomeTheta"), ("Defense", "AwayTheta"), ("HomeTheta", "CourtAdvantage"), ("AwayTheta", "CourtAdvantage"), ("AwayTheta", "AwayPoints"), ("HomeTheta", "HomePoints"), ("HomePoints", "Wins"), ("AwayPoints", "Wins") ] ) # Draw the model madness.draw() with model: # trace = pm.sample(1000, tune=1000, njobs=2) trace = pm.sample(1000, tune=1000, njobs=2 )Auto-assigning NUTS sampler... Initializing NUTS using jitter+adapt_diag... Multiprocess sampling (2 chains in 2 jobs) NUTS: [defs_star, offs_star, intercept, sd_def, sd_att, home] Sampling 2 chains: 100%|██████████| 4000/4000 [03:05<00:00, 21.57draws/s]FYI: Compare results between runtimes:* CPU Time - 498 secs* GPU Time - 512 secs?Why no change? 1. Check runtime environment2. Try the commented out line for running multiple jobs - might see slight improvement# Interpret - Show results pm.traceplot(trace) plt.show() # Interpret - Look for stronger teams on offense or defense pm.forestplot(trace, varnames=['offs'], main="Team Offense") plt.show() pm.forestplot(trace, varnames=['defs'], main="Team Defense") plt.show() # RECOMMEND - Build game simulator tmA = 300 tmS = 88 teamA_home_wins_=[] teamB_home_wins_=[] neutral_A_wins_=[] for i in range(1000): draw=np.random.randint(0,1000) home_=trace['home'][draw] intercept_=trace['intercept'][draw] offs_=trace['offs'][draw] defs_=trace['defs'][draw] home_theta_=np.exp(intercept_+home_+offs_[tmA]+defs_[tmS]) away_theta_=np.exp(intercept_+offs_[tmS]+defs_[tmA]) home_scores_=np.random.poisson(home_theta_,1) away_scores_=np.random.poisson(away_theta_,1) teamA_home_wins=[1 if obj>0 else 0 for obj in home_scores_-away_scores_] teamA_home_wins_.append(np.mean(teamA_home_wins)) home_theta_=np.exp(intercept_+home_+offs_[tmS]+defs_[tmA]) away_theta_=np.exp(intercept_+offs_[tmA]+defs_[tmS]) home_scores_=np.random.poisson(home_theta_,1) away_scores_=np.random.poisson(away_theta_,1) teamB_home_wins=[1 if obj>0 else 0 for obj in home_scores_-away_scores_] teamB_home_wins_.append(np.mean(teamB_home_wins)) home_theta_=np.exp(intercept_+offs_[tmA]+defs_[tmS]) away_theta_=np.exp(intercept_+offs_[tmS]+defs_[tmA]) home_scores_=np.random.poisson(home_theta_,1) away_scores_=np.random.poisson(away_theta_,1) neutral_A_wins=[1 if obj>0 else 0 for obj in home_scores_-away_scores_] neutral_A_wins_.append(np.mean(neutral_A_wins)) # RECOMMEND - Results print('Probability Army beats Syracuse at USMA: {0}%'.format(100*np.mean(teamA_home_wins_))) print('Probability Army beats Syracuse at Dome: {0}%'.format(100-100*np.mean(teamB_home_wins_))) print('Probability Army beats Syracuse at Neutral Site: {0}%'.format(100*np.mean(neutral_A_wins_)))Two Identical Non-differentiable Functions with different *Derivatives*def f(x: Variable) -> Variable: assert x.requires_grad return (2*x*torch.sign(x) + 1)*torch.sign(x) def g(x: Variable) -> Variable: def g1d(x: Variable) -> Variable: if x.data[0] > 0: return 2*x + 1 elif x.data[0] < 0: return 2*x - 1 else: return 2*x assert x.requires_grad if x.dim() == 0: return 1*x if x.size() == torch.Size([1]): return g1d(x) return torch.stack([g(sub_x) for sub_x in x]) import numpy as np x = np.linspace(-3, 3, num=51, dtype=np.float32) x_f = Variable(torch.from_numpy(x), requires_grad=True) y_f = f(x_f) x_g = Variable(torch.from_numpy(x), requires_grad=True) y_g = g(x_g) y_f.backward(torch.ones_like(y_f)) y_g.backward(torch.ones_like(y_g)) %matplotlib inline import matplotlib.pyplot as plt def plot(ax, x, y, label): positive = x.data.numpy() > 0 negative = x.data.numpy() < 0 zero = x.data.numpy() == 0 ax.plot(x.data.numpy()[positive], y.data.numpy()[positive], c=plt.cm.Set1(0)) ax.plot(x.data.numpy()[negative], y.data.numpy()[negative], c=plt.cm.Set1(0)) ax.plot(x.data.numpy()[zero], y.data.numpy()[zero], '.-', c=plt.cm.Set1(0), label='${0}$'.format(label)) ax.plot(x.data.numpy()[positive], x.grad.data.numpy()[positive], c=plt.cm.Set1(1)) ax.plot(x.data.numpy()[negative], x.grad.data.numpy()[negative], c=plt.cm.Set1(1)) ax.plot(x.data.numpy()[zero], x.grad.data.numpy()[zero], 'x-', c=plt.cm.Set1(1), label='${0}^\prime$'.format(label)) ax.grid() ax.legend() fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(6,3)) plot(ax1, x_f, y_f, label='f') plot(ax2, x_g, y_g, label='g') fig.suptitle('Derivatives of $f$ and $g$ by PyTorch') fig.savefig('problem4.pdf', bbox_inches='tight')Two Identical Differentiable Functions with different *Derivatives*def f(x: Variable) -> Variable: assert x.requires_grad return 2*x*torch.sign(x)*torch.sign(x) def g(x: Variable) -> Variable: assert x.requires_grad return 2*x x_f = Variable(torch.from_numpy(x), requires_grad=True) y_f = f(x_f) x_g = Variable(torch.from_numpy(x), requires_grad=True) y_g = g(x_g) y_f.backward(torch.ones_like(y_f)) y_g.backward(torch.ones_like(y_g)) fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(6,3)) plot(ax1, x_f, y_f, label='f') plot(ax2, x_g, y_g, label='g') fig.suptitle('Derivatives of $f$ and $g$ by PyTorch'); fig.savefig('problem4_differentiable.pdf', bbox_inches='tight')Auto Generate Text for >from theano.sandbox import cuda cuda.use('gpu1') %matplotlib inline import utils; from utils import * from keras.layers import TimeDistributed, Activation from keras.callbacks import ModelCheckpoint from numpy.random import choiceUsing Theano backend.Setuppath = 'text/modu.txt' text = open(path).read() text = text.replace(' ', '') text = text[-200000:] print('corpus length:', len(text)) !tail {path} -n10 chars = sorted(list(set(text))) vocab_size = len(chars)+1 print('total chars: ', vocab_size)total chars: 3057Sometimes it's useful to have a zero value in the dataset, e.g. for paddingchars.insert(0, "\0") ''.join(chars[:16]) char_indices = dict((c, i) for i,c in enumerate(chars)) indices_char = dict((i, c) for i,c in enumerate(chars)) idx = [char_indices[c] for c in text] idx[:10] ''.join(indices_char[i] for i in idx[:20])Our LSTM RNN!Now, we will try to implement the typical structure of RNN - i.e. the rolled one.That is, we cannot use c1, c2, c.... Instead, we will need an array of inputs all at once.seq_length = 100 dataX = [] dataY = [] for i in range(0, len(idx) - seq_length, 1): seq_in = idx[i:i+seq_length] seq_out = idx[i+seq_length] dataX.append(seq_in) dataY.append(seq_out) n_patterns = len(dataX) n_patternsNow that we have prepared our training data we need to transform it so that is it suitable for use with Keras.First we must transform the list of input sequences into the form _[samples, time steps, features]_ expected by an LSTM networkNext, we need to rescale the integers to _[0, 1]_ to make the patterns easiers to learn by the LSTM network that uses the sigmoid activation function by defaultFinally, we need to convert the output patterns into one-hot encoding. This is so that we can configure the network to predict the probability of each of the 47 different characters in the vocabulary (an easier representation) rather than trying to force it to predict precisely the next character.X = np.reshape(dataX, (n_patterns, seq_length, 1)) print(X.shape) X = X / float(vocab_size) y = np_utils.to_categorical(dataY) print(y.shape)(199900, 3057)We can now define our LSTM model. Here we define a single hidden LSTM layer with 256 memory units. The network uses dropout with a probability of 20. The output layer is a Dense layer using the softmax activation function to output a probability prediction for each of the 3000+ characters between 0 and 1.model = Sequential() model.add(LSTM(512, input_shape=(X.shape[1], X.shape[2]))) model.add(Dropout(0.2)) model.add(Dense(y.shape[1], activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adam())The network is slow to train (about 300 seconds per epoch on an Nvidia K520 GPU). Because of the slowness and because of our optimization requirements, we will use model checkpointing to record all of the network weights to file each time an improvement in loss is observed at the end of the epoch. We will use the best set of weights (lowest loss) to instantiate our generative model in the next section.# define the checkpoint filepath = "weights-improvement-{epoch:02d}-{loss:.4f}.hdf5" checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min') callbacks_list = [checkpoint] model.summary() model.fit(X, y, nb_epoch=4, batch_size=256, callbacks=callbacks_list) # pick a random seed start = np.random.randint(0, len(dataX)-1) # start=-1 pattern = dataX[start] print("Seed:") print("\"", ''.join([indices_char[value] for value in pattern]), "\"") # generate characters for i in range(1000): x = np.reshape(pattern, (1, len(pattern), 1)) x = x / float(n_vocab) prediction = model.predict(x, verbose=0) index = np.argmax(prediction) result = indices_char[index] seq_in = [indices_char[value] for value in pattern] sys.stdout.write(result) pattern.append(index) pattern = pattern[1:len(pattern)] print "\nDone."Stateful model with keras `stateful=True` means that at end of each sequence, don't reset the hidden activations to 0, but leave them as they are. And also make sure that you pass `shuffle=False` when you train the model.A stateful model is easy to create (just add "stateful=True") but harder to train. We had to add batchnorm and use LSTM to get reasonable results.When using stateful in keras, you have to also add 'batch_input_shape' to the first layer, and fix the batch size there.bs=64 model=Sequential([ Embedding(vocab_size, n_fac, input_length=cs, batch_input_shape=(bs,cs)), BatchNormalization(), LSTM(n_hidden, return_sequences=True, stateful=True), TimeDistributed(Dense(vocab_size, activation='softmax')), ]) model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam())Since we're using a fixed batch shape, we have to ensure our inputs and outputs are a even multiple of the batch size.mx = len(x_rnn)//bs*bs model.fit(x_rnn[:mx], y_rnn[:mx], batch_size=bs, nb_epoch=10, shuffle=False)Epoch 1/10 102272/102272 [==============================] - 86s - loss: 4.9404 Epoch 2/10 102272/102272 [==============================] - 84s - loss: 4.2808 Epoch 3/10 102272/102272 [==============================] - 84s - loss: 4.0796 Epoch 4/10 102272/102272 [==============================] - 83s - loss: 3.9579 Epoch 5/10 102272/102272 [==============================] - 83s - loss: 3.8693 Epoch 6/10 102272/102272 [==============================] - 84s - loss: 3.7999 Epoch 7/10 102272/102272 [==============================] - 84s - loss: 3.7424 Epoch 8/10 102272/102272 [==============================] - 84s - loss: 3.6937 Epoch 9/10 102272/102272 [==============================] - 84s - loss: 3.6515 Epoch 10/10 102272/102272 [==============================] - 84s - loss: 3.6142Test modeldef get_next_keras(inp): idxs = [char_indices[c] for c in inp] # np.newaxis is used to add 1 more dimention arrs = np.array(idxs)[np.newaxis, :] p = model.predict(arrs)[0] return chars[np.argmax(p)] model.predict(x_rnn[-64:])[0]Data Wranglingds_1=pd.read_excel('3401055004_OTSP_CountryofCitizenship_Jun21.xlsx', sheet_name='Table 1.1', header=[7]) ds_1=ds_1.drop(ds_1.index[[250,251,252,253,254,255,256,257,258]]) ds_1[datetime.datetime(2020, 11, 1, 0, 0)] = ds_1[datetime.datetime(2020, 11, 1, 0, 0)].astype(float, errors = 'raise') ds_1 ds_1.info() ds_1=ds_1.drop(['SACC code(a)','Country of citizenship(a)'],axis=1) ds_1.isin([0]).all(axis = 1).sum() # profile = ProfileReport(ds_1,title="Pandas Profiling Report") # profile # profile.to_file("C:\\Users\\Moditha\\Desktop\\your_report.html")Save to CSVds_1.to_csv("Arrivals_Data.csv", index=False)Save to JSONds_1.to_json("Arrivals_Data.json",orient='records')Number of Rows and Columnsds_1.shapeCreate New Dataframedf=pd.DataFrame({'Field_Name':ds_1.columns.to_numpy(),'Pandas Data Type':ds_1.dtypes.to_numpy(),'Unique Values':ds_1.nunique().to_numpy(), 'Missing Value Count':ds_1.isnull().sum().to_numpy()}) df df1=pd.DataFrame({'Min Value':ds_1.min().to_numpy(),'Max Value':ds_1.max().to_numpy()}) df1.index=df1.index+1 df1 final_df = pd.concat([df, df1], axis=1, join="outer") final_df=final_df[['Field_Name','Pandas Data Type','Min Value','Max Value','Unique Values','Missing Value Count']] final_dfGraphsds_1=pd.read_excel('3401055004_OTSP_CountryofCitizenship_Jun21.xlsx', sheet_name='Table 1.1', header=[7]) ds_1=ds_1.drop(ds_1.index[[250,251,252,253,254,255,256,257,258]]) ds_1[datetime.datetime(2020, 11, 1, 0, 0)] = ds_1[datetime.datetime(2020, 11, 1, 0, 0)].astype(float, errors = 'raise') ds_1.drop(ds_1[ds_1[datetime.datetime(2016, 9, 1, 0, 0)] <= 10000].index, inplace = True) ds_1=ds_1.drop(ds_1.index[[0]])#outlier ds_1Scatter Plotfig = px.scatter(ds_1, x='Country of citizenship(a)', y=datetime.datetime(2016, 9, 1, 0, 0)) fig['layout'].update(title='Departures Count on 2016-Sep Vs Country of citizenship', width=1500, height=1500, autosize=False) fig['layout']['yaxis'].update(title='Arrival Count on 2016-Sep') fig.show() fig = px.scatter(ds_1, x=datetime.datetime(2016, 9, 1, 0, 0), y=datetime.datetime(2017, 9, 1, 0, 0)) fig['layout'].update(title='Arrival Count on 2016-Sep Vs 2017-Sep') fig['layout']['xaxis'].update(title='Arrival Count on 2016-Sep') fig['layout']['yaxis'].update(title='Arrival Count on 2017-Sep') fig.show()Bar Graphds_1=ds_1.nlargest(10, [datetime.datetime(2016, 9, 1, 0, 0)]) fig = px.bar(ds_1, x='Country of citizenship(a)', y=datetime.datetime(2016, 9, 1, 0, 0),height=2000) fig['layout'].update(title='Arrival Count on 2016-Sep Vs Country of citizenship', autosize=True) fig['layout']['yaxis'].update(title='Arrival Count on 2016-Sep') fig['layout']['xaxis'].update(title='Country of citizenship(a)') fig.show()Pie Graphfig = px.pie(ds_1,values=datetime.datetime(2016, 9, 1, 0, 0),names='Country of citizenship(a)',height=2000) fig['layout'].update(title='Arrival Count on 2016-Sep Vs Country of citizenship', autosize=True) fig.show()Bubble Graphfig = px.scatter(ds_1, x=datetime.datetime(2016, 9, 1, 0, 0), y=datetime.datetime(2017, 9, 1, 0, 0), color='Country of citizenship(a)', hover_data=['Country of citizenship(a)']) fig['layout']['yaxis'].update(title='Arrival Count on 2017-Sep') fig['layout']['xaxis'].update(title='Arrival Count on 2016-Sep') fig.show()Histogramds_1=pd.read_excel('3401055004_OTSP_CountryofCitizenship_Jun21.xlsx', sheet_name='Table 1.1', header=[7]) ds_1=ds_1.drop(ds_1.index[[250,251,252,253,254,255,256,257,258]]) ds_1[datetime.datetime(2020, 11, 1, 0, 0)] = ds_1[datetime.datetime(2020, 11, 1, 0, 0)].astype(float, errors = 'raise') ds_1.drop(ds_1[ds_1[datetime.datetime(2016, 9, 1, 0, 0)]>=1000].index, inplace = True) ds_1=ds_1.drop(ds_1.index[[0]])#outlier ds_1 fig = px.histogram(ds_1, x=datetime.datetime(2016, 9, 1, 0, 0)) fig['layout'].update(title='No of Country vs Arrival Count on 2016-Sep ') fig['layout']['yaxis'].update(title='No of countries') fig['layout']['xaxis'].update(title='Arrival Count on 2016-Sep less than 1000') fig.show()Correlation Matricsds_1=pd.read_excel('3401055004_OTSP_CountryofCitizenship_Jun21.xlsx', sheet_name='Table 1.1', header=[7]) ds_1=ds_1.drop(ds_1.index[[250,251,252,253,254,255,256,257,258]]) ds_1[datetime.datetime(2020, 11, 1, 0, 0)] = ds_1[datetime.datetime(2020, 11, 1, 0, 0)].astype(float, errors = 'raise') ds_1Spearman Correlation Matricscorrelation_matrix_1=ds_1.corr(method ='spearman') correlation_matrix_1Pearson Correlation Matricscorrelation_matrix_2=ds_1.corr(method ='pearson') correlation_matrix_2Heat Map Using Plotlyds_1.drop(ds_1.iloc[:, 12:59], inplace = True, axis = 1) cor=ds_1.corr(method ='spearman') z = cor.values.tolist() z_text = np.around(z,decimals=3) x = cor.columns.tolist() fig = ff.create_annotated_heatmap(z,x=x,y=x,annotation_text=z_text,colorscale='Viridis',showscale=True, hovertemplate="",colorbar_tickfont_size=10) fig.update_layout( width = 500, height =500,) fig['layout']['font']['size'] = 8 # fig['layout']['xaxis'].rangebreaks[True] fig['layout'].update(title='Correlation Heatmap') # fig.layout.xaxis.rangebreaks[True] fig.show() ds_1.drop(ds_1.iloc[:, 12:59], inplace = True, axis = 1) cor=ds_1.corr(method ='pearson') z = cor.values.tolist() z_text = np.around(z,decimals=3) x = cor.columns.tolist() fig = ff.create_annotated_heatmap(z,x=x,y=x,annotation_text=z_text,colorscale='Viridis',showscale=True, hovertemplate="",colorbar_tickfont_size=10) fig.update_layout( width = 500, height = 500,) fig['layout']['font']['size'] =8 fig['layout'].update(title='Correlation Heatmap') fig.show()Folium Mapsimport folium from geopy.geocoders import Nominatim from folium.plugins import MarkerCluster ds_1=pd.read_excel('3401055004_OTSP_CountryofCitizenship_Jun21.xlsx', sheet_name='Table 1.1', header=[7]) ds_1=ds_1.drop(ds_1.index[[250,251,252,253,254,255,256,257,258,119,126,34]]) ds_1[datetime.datetime(2020, 11, 1, 0, 0)] = ds_1[datetime.datetime(2020, 11, 1, 0, 0)].astype(float, errors = 'raise') ds_1=ds_1.nlargest(10, [datetime.datetime(2016, 9, 1, 0, 0)]) ds_1 geolocator=Nominatim(user_agent='Carolingian') places=ds_1['Country of citizenship(a)'].tolist() lat=[] lon=[] for i in places: place = geolocator.geocode(i) if hasattr(place,'latitude') and (place.latitude is not None): x= place.latitude lat.append(x) for i in places: place = geolocator.geocode(i) if hasattr(place,'longitude') and (place.longitude is not None): y=place.longitude lon.append(y) new=pd.DataFrame({'Country':ds_1['Country of citizenship(a)'],'Latitude':lat,'Longitude':lon,'2016-September Arrivals':ds_1[datetime.datetime(2016, 9, 1, 0, 0)]}) new world_map= folium.Map(tiles="cartodbpositron") marker_cluster = MarkerCluster().add_to(world_map) #for each coordinate, create circlemarker of user percent for i in range(len(new)): lat = new.iloc[i]['Latitude'] long = new.iloc[i]['Longitude'] radius = 5 popup_text = """2016-September Arrivals : {}""" popup_text = popup_text.format(new.iloc[i]['2016-September Arrivals']) folium.CircleMarker(location = [lat, long], radius=radius, popup= popup_text, fill =True).add_to(marker_cluster) #show the map world_mapLensing Theory Calculations Examples of using `randomfield.lensing` functions to perform theoretical calculations related to weak lensing.%pylab inline import randomfield print randomfield.__version__ from randomfield.lensing import * from randomfield.cosmotools import calculate_power from randomfield.cosmotools import get_growth_functionDefine some cosmologies to use for testing. We use the fiducial models from Table 1 of Weinberg 2012 with $\Omega_k = 0$ (flat) and $\pm 0.01$ (open/closed), which all have essentially the same CMB power spectra.from astropy.cosmology import LambdaCDM flat_model = LambdaCDM(Ob0=0.045, Om0=0.222+0.045, Ode0=0.733, H0=71.0) open_model = LambdaCDM(Ob0=0.038, Om0=0.186+0.038, Ode0=0.766, H0=77.6) closed_model = LambdaCDM(Ob0=0.052, Om0=0.256+0.052, Ode0=0.702, H0=66.1) assert np.allclose((flat_model.Ok0, open_model.Ok0, closed_model.Ok0), (0.0, +0.01, -0.01), atol=1e-4)Define the redshift grid to use. The same grid is used for lensing masses as sources, so you should usually specify a fine grid even if you only need results for a few source redshifts. Redshifts do not need to be equally spaced.z = np.linspace(0.0, 2.5, 251)Calculate the lensing weight function $\omega_E(z, z_{src})$ for each cosmology:flat_weights = calculate_lensing_weights(flat_model, z, scaled_by_h=True) open_weights = calculate_lensing_weights(open_model, z, scaled_by_h=True) closed_weights = calculate_lensing_weights(closed_model, z, scaled_by_h=True)Tabulate the distance functions $D(z)$ and $D_A(z)$ for each cosmology in Mpc/h:import astropy.units as u flat_DC = flat_model.comoving_distance(z).to(u.Mpc).value * flat_model.h open_DC = open_model.comoving_distance(z).to(u.Mpc).value * flat_model.h closed_DC = closed_model.comoving_distance(z).to(u.Mpc).value * flat_model.h flat_DA = flat_model.comoving_transverse_distance(z).to(u.Mpc).value * flat_model.h open_DA = open_model.comoving_transverse_distance(z).to(u.Mpc).value * open_model.h closed_DA = closed_model.comoving_transverse_distance(z).to(u.Mpc).value * closed_model.hPlot the shear-shear weight function $W_{EE}(z_{lens}, z_{src}) = \omega_E(z_{lens}, z_{src})^2 D_A(z_{lens})^3$ for several source redshifts. Note that:- Weights are broadly peaked with a maximum value at $z_{lens} \simeq z_{src}/2$.- Weights are larger for larger source redshifts since more distance source experience more lensing.- Weights increase (decrease) slightly in a closed (open) universe.The different line styles show weights for a flat (solid), open (dashed), or closed (dotted) universe.def plot_weights(): colors = ('r', 'g', 'b', 'y', 'k') for j,iz in enumerate(range(50, 251, 50)): plt.plot(z, flat_weights[iz]**2 * flat_DA**3, color=colors[j], label='$z_{src}$ = %.2f' % z[iz]) plt.plot(z, open_weights[iz]**2 * open_DA**3, color=colors[j], ls='--') plt.plot(z, closed_weights[iz]**2 * closed_DA**3, color=colors[j], ls=':') plt.legend(loc='upper right') plt.yscale('log') plt.xlabel('Lens redshift $z_{lens}$') plt.ylabel('Lensing weight function (h/Mpc)') plt.xlim(0,z[-1]) y_min, y_max = plt.gca().get_ylim() plt.ylim(2e-4 * y_max, y_max) plt.grid() plt.show() plot_weights()Define the grid of 2D wavenumbers $\ell$ to use. Use log-spaced values covering the mostly linear regime. We do not need very fine spacing here since the shear power varies slowly with $\ell$.ell = np.logspace(1., 3., 101)Compare the distance functions. Line styles are solid for $D_C(z)$ and dashed for $D_A(z)$.plt.plot(z, open_DC, 'r-', label='open') plt.plot(z, open_DA, 'r--') plt.plot(z, flat_DC, 'g-', label='flat') plt.plot(z, closed_DC, 'b-',label='closed') plt.plot(z, closed_DA, 'b--') plt.grid() plt.xlabel('Redshift $z$') plt.ylabel('Comoving transverse distance $D_A(z)$ (Mpc/h)') plt.legend(loc='upper left') plt.show()Define the range of 3D wavenumbers $k = \ell/D_A$ needed to cover the calculation of the shear power. We have to set a minimum value of $D_A$ to use in order to establish a finite upper limit on $k$. Units are Mpc/h.izmin = 10 print 'Shear power calculation truncated at z >= %.2f' % z[izmin] DA_min = min(flat_DA[izmin], open_DA[izmin], closed_DA[izmin]) DA_max = max(flat_DA[-1], open_DA[-1], closed_DA[-1]) print 'Using %.1f Mpc/h < DA < %.1f Mpc/h' % (DA_min, DA_max) k_min, k_max = ell[0] / DA_max, ell[-1] / DA_min print 'Using %.4f h/Mpc <= k <= %.4f h/Mpc' % (k_min, k_max)Shear power calculation truncated at z >= 0.10 Using 292.9 Mpc/h < DA < 4408.6 Mpc/h Using 0.0023 h/Mpc <= k <= 3.4144 h/MpcCalculate the matter power at z=0 for each of the fiducial cosmologies using the optional CLASS package. Units are h/Mpc for $k$ and (Mpc/h)**3 for $P(k)$.flat_power = calculate_power(flat_model, k_min=k_min, k_max=k_max, scaled_by_h=True) open_power = calculate_power(open_model, k_min=k_min, k_max=k_max, scaled_by_h=True) closed_power = calculate_power(closed_model, k_min=k_min, k_max=k_max, scaled_by_h=True) plt.plot(open_power['k'], open_power['Pk'], label='open') plt.plot(flat_power['k'], flat_power['Pk'], label='flat') plt.plot(closed_power['k'], closed_power['Pk'], label='closed') plt.legend(loc='upper right') plt.yscale('log') plt.xscale('log') plt.xlabel('3D wavenumber $k$ (h/Mpc)') plt.ylabel('Linear matter power $P(k)$ (Mpc/h)$^3$') plt.grid() plt.show()Calculate the growth function for each cosmology:flat_growth = get_growth_function(flat_model, z) open_growth = get_growth_function(open_model, z) closed_growth = get_growth_function(closed_model, z) plt.plot(z, open_growth, label='open') plt.plot(z, flat_growth, label='flat') plt.plot(z, closed_growth, label='closed') plt.legend(loc='upper right') plt.xlabel('Redshift $z$') plt.ylabel('Growth function $G(z)$') plt.grid() plt.show()Calculate the 3D variance contributions $V(\ell, D_A)$ for each cosmology, with $D_A > D_A(z_{min})$.flat_variances = tabulate_3D_variances(ell, flat_DA[izmin:], flat_growth[izmin:], flat_power) open_variances = tabulate_3D_variances(ell, open_DA[izmin:], open_growth[izmin:], open_power) closed_variances = tabulate_3D_variances(ell, closed_DA[izmin:], closed_growth[izmin:], closed_power) colors = ('r', 'g', 'b') for j,iell in enumerate(range(0, 101, 50)): plt.plot(z[izmin:], flat_variances[iell], color=colors[j], label='$\ell = %.0f$' % ell[iell]) plt.plot(z[izmin:], open_variances[iell], color=colors[j], ls='--') plt.plot(z[izmin:], closed_variances[iell], color=colors[j], ls=':') plt.legend(loc='upper right') plt.yscale('log') plt.xlabel('Lens redshift $z_{src}$') plt.ylabel('3D variance $(\pi/\ell) \Delta^2_{\delta}(k = \ell/D_A, z_{lens})$') plt.grid() plt.show()Shear-Shear Auto Power Calculate the shear power $\Delta^2_{EE}(z_{src}, \ell)$ as a function of source position and 2D wavenumber $\ell$.flat_shear_power = calculate_shear_power(flat_DC[izmin:], flat_DA[izmin:], flat_weights[izmin:,izmin:], flat_variances) open_shear_power = calculate_shear_power(open_DC[izmin:], open_DA[izmin:], open_weights[izmin:,izmin:], open_variances) closed_shear_power = calculate_shear_power(closed_DC[izmin:], closed_DA[izmin:], closed_weights[izmin:,izmin:], closed_variances)Compare the calculated shear powers. Line styles are solid for the flat universe, dashed for the open universe, and dotted for the closed universe.def plot_shear_power(): colors = ('r', 'g', 'b', 'y', 'k') for j,iz in enumerate(range(50, 251, 50)): plt.plot(ell, flat_shear_power[iz - izmin], color=colors[j], label='$z_{src}$ = %.2f' % z[iz]) plt.plot(ell, open_shear_power[iz - izmin], color=colors[j], ls='--') plt.plot(ell, closed_shear_power[iz - izmin], color=colors[j], ls=':') plt.legend(loc='upper left') plt.yscale('log') plt.xscale('log') plt.xlabel('2D wavenumber $\ell$') plt.ylabel('Lensing shear power $\Delta^2_{EE}(z_{src}, \ell)$') plt.xlim(ell[0],ell[-1]) y_max = max(np.max(flat_shear_power), np.max(open_shear_power), np.max(closed_shear_power)) plt.ylim(None, 1.5 * y_max) plt.grid() #plt.savefig('shearpower.png') plt.show() plot_shear_power()Shear-Shear and Shear-Galaxy Cross Power Calculate the flat universe shear-shear cross power $\Delta^2_{EE}(z_1, z_2, \ell)$ and shear-galaxy cross power $\Delta^2_{Eg}(z_1, z_2, \ell)$ as a function of source positions $z_1$ and $z_2$, and 2D wavenumber $\ell$.flat_EE_cross = calculate_shear_power(flat_DC[izmin:], flat_DA[izmin:], flat_weights[izmin:,izmin:], flat_variances, mode='shear-shear-cross') flat_Eg_cross = calculate_shear_power(flat_DC[izmin:], flat_DA[izmin:], flat_weights[izmin:,izmin:], flat_variances, mode='shear-galaxy-cross') def plot_vs_z1_z2(data, exponent=0, label='f(z_1,z_2)'): scale = 10**exponent if np.max(data) == 0: cmap = 'CMRmap' else: cmap = 'CMRmap_r' plt.pcolormesh(z[izmin:], z[izmin:], scale * data, cmap=cmap) plt.colorbar(pad=0.05) cs = plt.contour(z[izmin:], z[izmin:], scale * data, colors='w') plt.clabel(cs, fmt='%.2f') plt.xlim(z[izmin],z[-1]) plt.ylim(z[izmin],z[-1]) plt.gca().set_aspect(1) plt.grid() plt.ylabel('Galaxy redshift $z_1$') plt.xlabel('Galaxy redshift $z_2$') if exponent != 0: label = ('$10^{%d} ' % exponent) + label plt.annotate(label, xy=(0.5, 0.01), xytext=(0.5, 0.01), color='k', xycoords='axes fraction', textcoords='axes fraction', horizontalalignment='center', verticalalignment='bottom', fontsize='large', fontweight='bold') plt.figure(figsize=(12,6.5)) # Plot EE cross spectra on the top row for ell = 10, 100, 1000. plt.subplot(2,3,1) plot_vs_z1_z2(flat_EE_cross[:,:,0], exponent=6, label='\Delta^2_{EE}(z_1,z_2,\ell=%.0f)$' % ell[0]) plt.subplot(2,3,2) plot_vs_z1_z2(flat_EE_cross[:,:,50], exponent=6, label='\Delta^2_{EE}(z_1,z_2,\ell=%.0f)$' % ell[50]) plt.subplot(2,3,3) plot_vs_z1_z2(flat_EE_cross[:,:,100], exponent=6, label='\Delta^2_{EE}(z_1,z_2,\ell=%.0f)$' % ell[100]) # Plot Eg cross spectra on the bottom row for ell = 10, 100, 1000. plt.subplot(2,3,4) plot_vs_z1_z2(flat_Eg_cross[:,:,0], exponent=2, label='\Delta^2_{Eg}(z_1,z_2,\ell=%.0f)/b_g$' % ell[0]) plt.subplot(2,3,5) plot_vs_z1_z2(flat_Eg_cross[:,:,50], exponent=2, label='\Delta^2_{Eg}(z_1,z_2,\ell=%.0f)/b_g$' % ell[50]) plt.subplot(2,3,6) plot_vs_z1_z2(flat_Eg_cross[:,:,100], exponent=2, label='\Delta^2_{Eg}(z_1,z_2,\ell=%.0f)/b_g$' % ell[100]) # plt.tight_layout() #plt.savefig('cross-power.png') plt.show()/Users/david/anaconda/lib/python2.7/site-packages/matplotlib/text.py:52: UnicodeWarning: Unicode equal comparison failed to convert both arguments to Unicode - interpreting them as being unequal if rotation in ('horizontal', None): /Users/david/anaconda/lib/python2.7/site-packages/matplotlib/text.py:54: UnicodeWarning: Unicode equal comparison failed to convert both arguments to Unicode - interpreting them as being unequal elif rotation == 'vertical':Shear-Shear and Shear-Galaxy Cross-Correlation Functions Define a log-spaced grid of angular separations where the correlation functions should be tabulated. Separations must be within the limits covered by our $\ell$ range, but do not need to be exactly $2\pi / \ell$.theta_rad = 2 * np.pi / ell[::-1] theta_arcmin = 60 * np.rad2deg(theta_rad)Calculate the shear-shear correlation functions, $\xi_+(z_1,z_2,\Delta\theta)$ and $\xi_-(z_1,z_2,\Delta\theta)$, and the shear-galaxy correlation function $\xi_{Eg}(z_1,z_2,\Delta\theta)$.flat_EE_xi_p = calculate_correlation_function(flat_EE_cross, ell, theta_rad, order=0) flat_EE_xi_m = calculate_correlation_function(flat_EE_cross, ell, theta_rad, order=4) flat_Eg_xi = calculate_correlation_function(flat_Eg_cross, ell, theta_rad, order=2) plt.figure(figsize=(12,9.75)) # Plot xi+ cross-correlations on the top row for 3 values of dtheta plt.subplot(3,3,1) plot_vs_z1_z2(flat_EE_xi_p[:,:,0], exponent=7, label="\\xi_+(z_1,z_2,\Delta\\theta=%.1f')$" % theta_arcmin[0]) plt.subplot(3,3,2) plot_vs_z1_z2(flat_EE_xi_p[:,:,50], exponent=7, label="\\xi_+(z_1,z_2,\Delta\\theta=%.1f')$" % theta_arcmin[50]) plt.subplot(3,3,3) plot_vs_z1_z2(flat_EE_xi_p[:,:,100], exponent=7, label="\\xi_+(z_1,z_2,\Delta\\theta=%.1f')$" % theta_arcmin[100]) # Plot xi- cross-correlations on the middle row for the same 3 values of dtheta plt.subplot(3,3,4) plot_vs_z1_z2(flat_EE_xi_m[:,:,0], exponent=7, label="\\xi_-(z_1,z_2,\Delta\\theta=%.1f')$" % theta_arcmin[0]) plt.subplot(3,3,5) plot_vs_z1_z2(flat_EE_xi_m[:,:,50], exponent=7, label="\\xi_-(z_1,z_2,\Delta\\theta=%.1f')$" % theta_arcmin[50]) plt.subplot(3,3,6) plot_vs_z1_z2(flat_EE_xi_m[:,:,100], exponent=7, label="\\xi_-(z_1,z_2,\Delta\\theta=%.1f')$" % theta_arcmin[100]) # Plot xi(Eg) cross-correlations on the bottom row for the same 3 values of dtheta plt.subplot(3,3,7) plot_vs_z1_z2(flat_Eg_xi[:,:,0], exponent=3, label="\\xi_{Eg}(z_1,z_2,\Delta\\theta=%.1f')/b_g$" % theta_arcmin[0]) plt.subplot(3,3,8) plot_vs_z1_z2(flat_Eg_xi[:,:,50], exponent=3, label="\\xi_{Eg}(z_1,z_2,\Delta\\theta=%.1f')/b_g$" % theta_arcmin[50]) plt.subplot(3,3,9) plot_vs_z1_z2(flat_Eg_xi[:,:,100], exponent=3, label="\\xi_{Eg}(z_1,z_2,\Delta\\theta=%.1f')/b_g$" % theta_arcmin[100]) # plt.tight_layout() #plt.savefig('cross-xi.png') plt.show()Tomographic Predictions for DES Photo-z Bins Approximately reproduce the DES photo-z redshift bins of Becker 2015 (Figure 3):bin1 = np.random.normal(0.55, 0.08, 50000) bin1 = np.append(bin1, np.random.normal(0.35, 0.09, 50000)) bin2 = np.random.normal(0.70, 0.12, 100000) bin3 = np.random.normal(0.90, 0.12, 50000) bin3 = np.append(bin3, np.random.normal(1.20, 0.18, 50000)) plt.hist(bin1, bins=100, range=(0, 1.8), histtype='stepfilled', edgecolor='blue', facecolor='none'); plt.hist(bin2, bins=100, range=(0, 1.8), histtype='stepfilled', edgecolor='red', facecolor='none'); plt.hist(bin3, bins=100, range=(0, 1.8), histtype='stepfilled', edgecolor='green', facecolor='none');Calculate normalized weights for each pair of tomographic bins, on the same $(z_1, z_2)$ grid used to calculate cross spectra and cross correlations.def calculate_weights(data1, data2, points): num_points = len(points) # Histogram the datasets using the points as bin edges, resulting in num_points bin contents. data1_hist, edges = np.histogram(data1, points) data2_hist, edges = np.histogram(data2, points) # Calculate the joint pdf in num_points x num_points bins data1_pdf = data1_hist.astype(float) / data1.size data2_pdf = data2_hist.astype(float) / data2.size data12_pdf = data1_pdf[:, np.newaxis] * data2_pdf # Split each bin's probability equally between its four corner points. weights = np.zeros((num_points, num_points), dtype=float) weights[:-1, :-1] += data12_pdf weights[:-1, 1:] += data12_pdf weights[1:, :-1] += data12_pdf weights[1:, 1:] += data12_pdf weights /= 4 return weights w11 = calculate_weights(bin1, bin1, z[izmin:]) w21 = calculate_weights(bin2, bin1, z[izmin:]) w31 = calculate_weights(bin3, bin1, z[izmin:]) w22 = calculate_weights(bin2, bin2, z[izmin:]) w32 = calculate_weights(bin3, bin2, z[izmin:]) w33 = calculate_weights(bin3, bin3, z[izmin:]) plt.contour(z[izmin:], z[izmin:], w11, 2, colors='r') plt.contour(z[izmin:], z[izmin:], w21, 2, colors='k') plt.contour(z[izmin:], z[izmin:], w31, 2, colors='g') plt.contour(z[izmin:], z[izmin:], w22, 2, colors='r') plt.contour(z[izmin:], z[izmin:], w32, 2, colors='k') plt.contour(z[izmin:], z[izmin:], w33, 2, colors='r') plt.xlim(0, 1.5) plt.ylim(0, 1.5) plt.ylabel('Galaxy redshift $z_1$') plt.xlabel('Galaxy redshift $z_2$') plt.gca().set_aspect(1)Integrate the $\xi_\pm(\theta)$ correlation functions over the PDF of each pair of bins:flat_EE_xi_p_11 = np.sum(flat_EE_xi_p * w11[:, :, np.newaxis], axis=(0, 1)) flat_EE_xi_p_21 = np.sum(flat_EE_xi_p * w21[:, :, np.newaxis], axis=(0, 1)) flat_EE_xi_p_31 = np.sum(flat_EE_xi_p * w31[:, :, np.newaxis], axis=(0, 1)) flat_EE_xi_p_22 = np.sum(flat_EE_xi_p * w22[:, :, np.newaxis], axis=(0, 1)) flat_EE_xi_p_32 = np.sum(flat_EE_xi_p * w32[:, :, np.newaxis], axis=(0, 1)) flat_EE_xi_p_33 = np.sum(flat_EE_xi_p * w33[:, :, np.newaxis], axis=(0, 1)) flat_EE_xi_m_11 = np.sum(flat_EE_xi_m * w11[:, :, np.newaxis], axis=(0, 1)) flat_EE_xi_m_21 = np.sum(flat_EE_xi_m * w21[:, :, np.newaxis], axis=(0, 1)) flat_EE_xi_m_31 = np.sum(flat_EE_xi_m * w31[:, :, np.newaxis], axis=(0, 1)) flat_EE_xi_m_22 = np.sum(flat_EE_xi_m * w22[:, :, np.newaxis], axis=(0, 1)) flat_EE_xi_m_32 = np.sum(flat_EE_xi_m * w32[:, :, np.newaxis], axis=(0, 1)) flat_EE_xi_m_33 = np.sum(flat_EE_xi_m * w33[:, :, np.newaxis], axis=(0, 1)) def plot_xi(axes, row, col, xi, label): axis = axes[row, col] axis.plot(theta_arcmin, 1e4 * theta_arcmin * xi) axis.set_xscale('log') if row == len(axes) - 1: axis.set_xlabel('$\\theta$ [arcmin]') if col == 0: axis.set_ylabel('$\\theta \\times \\xi_%s(\\theta) / 10^{-4}$' % ('-' if col%2 else '-')) axis.set_xlim(2., 300.) axis.set_ylim(-4.25, +8.5) axis.grid(True) axis.annotate(label, xy=(0.1,0.9), xytext=(0.1,0.9), xycoords='axes fraction', textcoords='axes fraction', horizontalalignment='left', verticalalignment='top', fontsize='large', fontweight='bold')Try to approximately reproduce Fig.2 of Becker 2015 (including the large y-axis range, which makes precise comparisons difficult). Note that our calculation only goes down to about 20 arcsecs, so does not cover the full range of the DES plot.fig,axes = plt.subplots(6, 3, sharex='col', sharey='row', figsize=(12, 12)) fig.subplots_adjust(hspace=0, wspace=0) plot_xi(axes, 0, 0, flat_EE_xi_p_33, '3-3') plot_xi(axes, 1, 0, flat_EE_xi_m_33, '3-3') plot_xi(axes, 2, 0, flat_EE_xi_p_32, '3-2') plot_xi(axes, 3, 0, flat_EE_xi_m_32, '3-2') plot_xi(axes, 2, 1, flat_EE_xi_p_22, '2-2') plot_xi(axes, 3, 1, flat_EE_xi_m_22, '2-2') plot_xi(axes, 4, 0, flat_EE_xi_p_31, '3-1') plot_xi(axes, 5, 0, flat_EE_xi_m_31, '3-1') plot_xi(axes, 4, 1, flat_EE_xi_p_21, '2-1') plot_xi(axes, 5, 1, flat_EE_xi_m_21, '2-1') plot_xi(axes, 4, 2, flat_EE_xi_p_11, '1-1') plot_xi(axes, 5, 2, flat_EE_xi_m_11, '1-1') #plt.savefig('DES-fig2.png') plt.show()Taylor problem 5.32last revised: 12-Jan-2019 by [] **Replace by appropriate expressions.** The equation for an underdamped oscillator, such as a mass on the end of a spring, takes the form $\begin{align} x(t) = e^{-\beta t} [B_1 \cos(\omega_1 t) + B_2 \sin(\omega_1 t)]\end{align}$where$\begin{align} \omega_1 = \sqrt{\omega_0^2 - \beta^2}\end{align}$and the mass is released from rest at position $x_0$ at $t=0$. **Goal: plot $x(t)$ for $0 \leq t \leq 20$, with $x_0 = 1$, $\omega_0=1$, and $\beta = 0.$, 0.02, 0.1, 0.3, and 1.**import numpy as np import matplotlib.pyplot as plt def underdamped(t, beta, omega_0=1, x_0=1): """Solution x(t) for an underdamped harmonic oscillator.""" omega_1 = np.sqrt(omega_0**2 - beta**2) B_1 = 2 B_2 = 5 return np.exp(-beta*t) \ * ( B_1 * np.cos(omega_1*t) + B_2 * np.sin(omega_1*t) ) t_pts = np.arange(0., 20., .01) betas = [0., 0.02, 0.1, 0.3, 0.9999] fig = plt.figure(figsize=(10,6)) # look up "python enumerate" to find out how this works! for i, beta in enumerate(betas): ax = fig.add_subplot(2, 3, i+1) ax.plot(t_pts, underdamped(t_pts, beta), color='blue') ax.set_title(rf'$\beta = {beta:.2f}$') ax.set_xlabel('t') ax.set_ylabel('x(t)') ax.set_ylim(-1.1,1.1) ax.axhline(0., color='black', alpha=0.3) # lightened black zero line fig.tight_layout() ### add code to print the figureBonus: Widgetized!from ipywidgets import interact, fixed import ipywidgets as widgets omega_0 = 1. def plot_beta(beta): """Plot function for underdamped harmonic oscillator.""" t_pts = np.arange(0., 20., .01) fig = plt.figure() ax = fig.add_subplot(1,1,1) ax.plot(t_pts, underdamped(t_pts, beta), color='blue') ax.set_title(rf'$\beta = {beta:.2f}$') ax.set_xlabel('t') ax.set_ylabel('x(t)') ax.set_ylim(-1.1,1.1) ax.axhline(0., color='black', alpha=0.3) fig.tight_layout() max_value = omega_0 - 0.0001 interact(plot_beta, beta=widgets.FloatSlider(min=0., max=max_value, step=0.01, value=0., readout_format='.2f', continuous_update=False));Now let's allow for complex numbers! This will enable us to take $\beta > \omega_0$.# numpy.lib.scimath version of sqrt handles complex numbers. # numpy exp, cos, and sin already can. import numpy.lib.scimath as smath def all_beta(t, beta, omega_0=1, x_0=1): """Solution x(t) for damped harmonic oscillator, allowing for overdamped as well as underdamped solution. """ omega_1 = smath.sqrt(omega_0**2 - beta**2) return np.real( x_0 * np.exp(-beta*t) \ * (np.cos(omega_1*t) + (beta/omega_1)*np.sin(omega_1*t)) ) from ipywidgets import interact, fixed import ipywidgets as widgets omega_0 = 1. def plot_all_beta(beta): """Plot of x(t) for damped harmonic oscillator, allowing for overdamped as well as underdamped cases.""" t_pts = np.arange(0., 20., .01) fig = plt.figure() ax = fig.add_subplot(1,1,1) ax.plot(t_pts, all_beta(t_pts, beta), color='blue') ax.set_title(rf'$\beta = {beta:.2f}$') ax.set_xlabel('t') ax.set_ylabel('x(t)') ax.set_ylim(-1.1,1.1) ax.axhline(0., color='black', alpha=0.3) fig.tight_layout() interact(plot_all_beta, beta=widgets.FloatSlider(min=0., max=2, step=0.01, value=0., readout_format='.2f', continuous_update=False));!pip install datasets from datasets import load_dataset, load_metric, list_datasets, list_metrics datasets = list_datasets() from pprint import pprint pprint(datasets, compact=True) glue_dataset = list_datasets(with_details=True)[datasets.index('glue')] pprint(glue_dataset.__dict__) # It's a simple python dataclass actual_task = 'sst2' dataset = load_dataset("glue", actual_task) metric = load_metric('glue', actual_task) dataset dataset["train"][100] import datasets import random import pandas as pd from IPython.display import display, HTML def show_random_elements(dataset, num_examples=10): assert num_examples <= len(dataset), "Can't pick more elements than there are in the dataset." picks = [] for _ in range(num_examples): pick = random.randint(0, len(dataset)-1) while pick in picks: pick = random.randint(0, len(dataset)-1) picks.append(pick) df = pd.DataFrame(dataset[picks]) for column, typ in dataset.features.items(): if isinstance(typ, datasets.ClassLabel): df[column] = df[column].transform(lambda i: typ.names[i]) display(HTML(df.to_html())) show_random_elements(dataset["train"]) print(dataset.column_names) print(len(dataset["train"])) print(len(dataset["validation"])) print(len(dataset["test"])) dataset["train"].featuresdf = dataset["train"].to_pandas() df.hist() sns.countplot(x=dataset["train"]['labels']) plt.xlabel('labels');Tokenize datasetMIT LicenseCopyright (c) 2021 , , , , , , Permission is hereby granted, free of charge, to any person obtaining a copyof this software and associated documentation files (the "Software"), to dealin the Software without restriction, including without limitation the rightsto use, copy, modify, merge, publish, distribute, sublicense, and/or sellcopies of the Software, and to permit persons to whom the Software isfurnished to do so, subject to the following conditions:The above copyright notice and this permission notice shall be included in allcopies or substantial portions of the Software.THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ORIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THEAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHERLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THESOFTWARE. Análisis de tweetsEste notebook analiza la toxicidad de los tweets que nos provee el dataset *dataset_newtral_ucm.csv* según las columnas *toxicity* y *degree_predicted*. Podremos ver de manera ordenada la toxicidad media de distintos partidos, políticos y palabras.import pandas as pd import numpy as np !python --version if 'google.colab' in str(get_ipython()): from google.colab import drive drive.mount('/content/drive') df = pd.read_csv('/content/drive/Shareddrives/ETICA/dataset_newtral_ucm.csv') else: df = pd.read_csv('../data/dataset_newtral_ucm.csv') df.head(5) #La lista de partidos a los que están afiliados los distintos políticos que twittean partidos = df.party_slug.unique() partidos = partidos[~pd.isna(partidos)] print('Partidos: ', partidos)Partidos: ['partido-popular' 'psoe' 'vox' 'ciudadanos' 'podemos' 'en-comu-podem' 'catalunya-en-comu' 'en-marea' 'sin-partido' 'izquierda-unida' 'unidas-podemos']Toxicidad de los distintos partidos### Calcular toxicidad 'media' de cada partido según sus tweets ### tox_media = {i : 0 for i in partidos} # Toxicidad "media" de cada partido for p in partidos: toxicity = tox_media.get(p) i = 0 for idx, row in df[df['party_slug'] == p].iterrows(): toxicity += row.toxicity #Usamos columna 'toxicity' i += 1 tox_media.update({p: toxicity/i}) #Parece que los políticos de VOX twittean de manera más tóxica según esta columna sorted(tox_media.items(), key=lambda item: item[1], reverse=True) ### Calcular degree 'medio' de cada partido según sus tweets ### deg_medio = {i : 0 for i in partidos} # Toxicidad "media" de cada partido for p in partidos: toxicity = deg_medio.get(p) i = 0 for idx, row in df[df['party_slug'] == p].iterrows(): toxicity += row.degree_predicted#Usamos columna 'degree_predicted' i += 1 deg_medio.update({p: toxicity/i}) #Parece que los políticos de VOX twittean de manera más tóxica según esta columna sorted(deg_medio.items(), key=lambda item: item[1], reverse=True) #nº tweets con toxicidad "máxima" por partido utilizando la columna 'degree_predicted' for p in partidos: print(p, np.count_nonzero(df[df['party_slug'] == p]['degree_predicted'] == 2))partido-popular 467 psoe 150 vox 477 ciudadanos 243 podemos 214 en-comu-podem 0 catalunya-en-comu 80 en-marea 1 sin-partido 15 izquierda-unida 1 unidas-podemos 3Toxicidad de los distintos políticosEn esta sección, se ordenarán de mas a menos tóxicos, los políticos de todos los partidos.#Dataframe político - partido politicos_part_df = df.loc[:, df.columns.intersection(['slug','party_slug',])] politicos_part_df = politicos_part_df.drop_duplicates().dropna() #Así están todos los políticos ordenados por partido de mayor a menor toxicidad por ambos algoritmos (columnas 'toxicity' y 'degree_predicted') iterator = 0 for pa in partidos: tox_media_ = {i : 0 for i in politicos_part_df.loc[df['party_slug'] == pa]['slug']} deg_medio_ = {i : 0 for i in politicos_part_df.loc[df['party_slug'] == pa]['slug']} for p in politicos_part_df.loc[df['party_slug'] == pa]['slug']: toxicityg = tox_media_.get(p) toxicity = deg_medio_.get(p) i = 0 for idx, row in df[df['slug'] == p].iterrows(): toxicityg += row.toxicity toxicity += row.degree_predicted i += 1 tox_media_.update({p: toxicityg/i}) deg_medio_.update({p: toxicity/i}) if iterator == 0: tox_media_pp = sorted(tox_media_.items(), key=lambda x: x[1], reverse=True) deg_medio_pp = sorted(deg_medio_.items(), key=lambda x: x[1], reverse=True) elif iterator == 1: tox_media_psoe = sorted(tox_media_.items(), key=lambda x: x[1], reverse=True) deg_medio_psoe = sorted(deg_medio_.items(), key=lambda x: x[1], reverse=True) elif iterator == 2: tox_media_vox = sorted(tox_media_.items(), key=lambda x: x[1], reverse=True) deg_medio_vox = sorted(deg_medio_.items(), key=lambda x: x[1], reverse=True) elif iterator == 3: tox_media_cs = sorted(tox_media_.items(), key=lambda x: x[1], reverse=True) deg_medio_cs = sorted(deg_medio_.items(), key=lambda x: x[1], reverse=True) elif iterator == 4: tox_media_pdms = tox_media_ deg_medio_pdms = deg_medio_ elif iterator == 5: tox_media_ecp = sorted(tox_media_.items(), key=lambda x: x[1], reverse=True) deg_medio_ecp = sorted(deg_medio_.items(), key=lambda x: x[1], reverse=True) elif iterator == 6: tox_media_cec = sorted(tox_media_.items(), key=lambda x: x[1], reverse=True) deg_medio_cec = sorted(deg_medio_.items(), key=lambda x: x[1], reverse=True) elif iterator == 7: tox_media_em = sorted(tox_media_.items(), key=lambda x: x[1], reverse=True) deg_medio_em = sorted(deg_medio_.items(), key=lambda x: x[1], reverse=True) elif iterator == 8: tox_media_sp = sorted(tox_media_.items(), key=lambda x: x[1], reverse=True) deg_medio_sp = sorted(deg_medio_.items(), key=lambda x: x[1], reverse=True) elif iterator == 9: tox_media_iu = sorted(tox_media_.items(), key=lambda x: x[1], reverse=True) deg_medio_iu = sorted(deg_medio_.items(), key=lambda x: x[1], reverse=True) elif iterator == 10: tox_media_up = tox_media_ deg_medio_up = deg_medio_ iterator += 1 def unir_diccionarios(x, y): z = x.copy() z.update(y) return z tox_media_podemos = sorted(unir_diccionarios(tox_media_pdms, tox_media_up).items(), key=lambda x: x[1], reverse=True) deg_medio_podemos = sorted(unir_diccionarios(deg_medio_pdms, deg_medio_up).items(), key=lambda x: x[1], reverse=True)Políticos de Vox#columna 'toxicity' #El político que twittea de manera más tóxica dentro de Vox es tox_media_vox #columna 'degree_predicted' #El político que twittea de manera más tóxica dentro de Vox es Antonio Sal deg_medio_voxPolíticos del PP#columna 'toxicity' #El político que twittea de manera más tóxica dentro de PP es tox_media_pp #columna 'degree_predicted' #El político que twittea de manera más tóxica dentro de PP es deg_medio_ppPolíticos del PSOE#columna 'toxicity' #El político que twittea de manera más tóxica dentro del PSOE es Zaida Cantera de Castro tox_media_psoe #columna 'degree_predicted' #El político que twittea de manera más tóxica dentro del PSOE es Zaida Cantera de Castro deg_medio_psoePolíticos de Ciudadanos#columna 'toxicity' #El político que twittea de manera más tóxica dentro de Cs es tox_media_cs #columna 'degree_predicted' #El político que twittea de manera más tóxica dentro de Cs es - deg_medio_csPolíticos de Unidas Podemos#columna 'toxicity' #El político que twittea de manera más tóxica dentro de Podemos es tox_media_podemos #columna 'degree_predicted' #El político que twittea de manera más tóxica dentro de Podemos es deg_medio_podemosPolíticos de En Comú Podem#columna 'toxicity' tox_media_ecp #columna 'degree_predicted' deg_medio_ecpPolíticos de Catalunya En Comú#columna 'toxicity' #El político que twittea de manera más tóxica dentro de Catalunya En Comú es tox_media_cec #columna 'degree_predicted' #El político que twittea de manera más tóxica dentro de Catalunya En Comú es Jaume Asens i Llodrà deg_medio_cecPolíticos de En Marea#columna 'toxicity' tox_media_em #columna 'degree_predicted' deg_medio_emPolíticos de Izquierda Unida#columna 'toxicity' #El político que twittea de manera más tóxica dentro de Izquierda Unida es tox_media_iu #columna 'degree_predicted' #El político que twittea de manera más tóxica dentro de Izquierda Unida es deg_medio_iuPolíticos sin Partido#columna 'toxicity' #El político que twittea de manera más tóxica sin partido es tox_media_sp #columna 'degree_predicted' #El político que twittea de manera más tóxica sin patrtido es deg_medio_spToxicidad de todos los políticos independientemente de partido#Todos los políticos ordenados de mayor a menor toxicidad independientemente de partido por ambos algoritmos politicos_toxicidad_ord = sorted(dict(tox_media_pp + tox_media_psoe + tox_media_vox + tox_media_cs + tox_media_podemos + tox_media_ecp + tox_media_cec + tox_media_em + tox_media_sp + tox_media_iu).items(), key=lambda x: x[1], reverse=True) politicos_gradomed_ord = sorted(dict(deg_medio_pp + deg_medio_psoe + deg_medio_vox + deg_medio_cs + deg_medio_podemos + deg_medio_ecp + deg_medio_cec + deg_medio_em + deg_medio_sp + deg_medio_iu).items(), key=lambda x: x[1], reverse=True) #Miramos la toxicidad según 'toxicity' de todos los políticos de todos los partidos #El político más tóxico es politicos_toxicidad_ord #Miramos la toxicidad según 'degree_predicted' de todos los políticos de todos los partidos #Los políticos más tóxicos son y politicos_gradomed_ordDiccionario de palabras tóxicasimport nltk from nltk.corpus import stopwords nltk.download('stopwords') from nltk.tokenize import word_tokenize import re # Según columna 'toxicity' # tox_palabras = {i : (0,0) for i in dicc_palabras} # Toxicidad "media" de cada palabra tox_palabras = {} #no utilizamos un diccionario con las palabras en español, solo nos interesan las palabras que salen en los tweets for idx,row in df.iterrows(): for tw in row.text.split(): if (not tw.startswith('https')) and (not tw.startswith('@')) and (not tw.startswith('#')): for c in "\"“!':,;-.$%&~#ºª()*+/\@¿?¡^`´0123456789|[]”–=_«»": tw = tw.replace(c, "") try: toxicity = tox_palabras.get(tw)[0] freq = tox_palabras.get(tw)[1] toxicity += row.toxicity freq += 1 tox_palabras.update({tw: (toxicity, freq)}) except: tox_palabras.update({tw: (row.toxicity, 1)}) toxWords = {i : (tox_palabras.get(i)[0]/tox_palabras.get(i)[1],tox_palabras.get(i)[1]) for i in tox_palabras if (i not in stopwords.words('spanish'))} sorted(toxWords.items(), key=lambda x: (x[1][0],x[1][1]), reverse=True) # Según columna 'degree_predicted' # tox_palabras = {i : (0,0) for i in dicc_palabras} # Toxicidad "media" de cada palabra tox_palabras = {} #no utilizamos un diccionario con las palabras en español, solo nos interesan las palabras q salen en los tweets for idx,row in df.iterrows(): for tw in row.text.split(): if (not tw.startswith('https')) and (not tw.startswith('@')) and (not tw.startswith('#')): for c in "\"“!':,;-.$%&~#ºª()*+/\@¿?¡^`´0123456789|[]”–=_«»": tw = tw.replace(c, "") try: toxicity = tox_palabras.get(tw)[0] freq = tox_palabras.get(tw)[1] toxicity += row.degree_predicted freq += 1 tox_palabras.update({tw: (toxicity, freq)}) except: tox_palabras.update({tw: (row.degree_predicted, 1)}) toxWords = {i : (tox_palabras.get(i)[0]/tox_palabras.get(i)[1],tox_palabras.get(i)[1]) for i in tox_palabras if (i not in stopwords.words('spanish'))} sorted(toxWords.items(), key=lambda x: x[1], reverse=True)적용사례 : 오일러 베르누이 외팔보Application Example : Euler-Bernoulli Cantileverimport ode_solver import scipy.integrate as si오일러 베르누이 외팔보Euler-Bernoulli Cantilever Ref : Wikipedia contributors, 'Euler–Bernoulli beam theory', Wikipedia, The Free Encyclopedia, 19 November 2018, 22:12 UTC, https://en.wikipedia.org/w/index.php?title=Euler%E2%80%93Bernoulli_beam_theory&oldid=869647128 [accessed 24 November 2018] 외팔보는 한쪽 끝에서 처짐과 기울기가 0인 보 이다.A cantilever is a beam with both zero deflection and rotation at one end. 오일러 베르누이 보 이론은 분포하중과 보의 처짐은 다음과 같은 관계를 가진다고 가정한다.Euler-Bernoulli beam theory assumes that the deflection of a beam and the distributed load has following relationship. $$\frac{d^2}{dx^2} \left( EI \frac{d^2w(x)}{dx^2} \right) = q(x)$$ 여기서 $w(x)$와 $q(x)$는 각각 보의 위치 $x$에서의 $z$방향 처짐과 분포하중이다.Here, $w(x)$ and $q$ are, respectively, $z$ directional deflection and distributed load at a location $x$ of the beam. 단순화 하기 위해 $EI$는 일정하다고 가정하자.To simplify, let's assume $EI$ is constant. $$E[Nm^{-2}]I[m^4] \frac{d^4w(x)}{dx^4}[m^{-3}] = q(x)[Nm^{-1}]$$ 상태 변수 $\mathbb{r}(x)$ 를 다음과 같이 정해 보자.Let state variable $\mathbb{r}(x)$ be as follows. $$\mathbb{r}(x) = \begin{pmatrix}r_0\\r_1\\r_2\\r_3\\\end{pmatrix} = \begin{pmatrix}w(x) \\\frac{d}{dx}w(x) \\\frac{d^2}{dx^2}w(x) \\\frac{d^3}{dx^3}w(x) \\\end{pmatrix}= \begin{pmatrix}w(x) \\\theta(x) \\(EI)^{-1}M(x) \\(EI)^{-1}V(x) \\\end{pmatrix}$$ 여기서 $\theta(x), M(x), V(x)$ 는 각각 $x$ 에서의 기울기, 모멘트, 전단력이다.Here, $\theta(x), M(x), V(x)$ are, repectively, slope, moment, and shear force at $x$. 미분해보자.Let's differentiate. $$\frac{d}{dx}\mathbb{r}(x) = \begin{pmatrix}\frac{d}{dx}w(x) \\\frac{d^2}{dx^2}w(x) \\\frac{d^3}{dx^3}w(x) \\\frac{d^4}{dx^4}w(x) \\\end{pmatrix}=\begin{pmatrix}r_1\\r_2\\r_3\\(EI)^{-1}q(x) \\\end{pmatrix}$$ 행렬로 다시 써 보면 다음과 같다.Let's rewrite using matrices. $$\frac{d}{dx}\mathbb{r}(x) = \begin{bmatrix}0 & 1 & 0 & 0\\0 & 0 & 1 & 0\\0 & 0 & 0 & 1\\0 & 0 & 0 & 0\\\end{bmatrix}\begin{pmatrix}r_0\\r_1\\r_2\\r_3 \\\end{pmatrix}+\begin{pmatrix}0\\0\\0\\(EI)^{-1} \\\end{pmatrix}q(x)$$ $x=0$ 지점에서의 초기조건을 생각해 보자.Let's think about the initial conditions at $x=0$. $$\mathbb{r}(0) = \begin{pmatrix}w(0)[m] \\\frac{d}{dx}w(0)[rad] \\\frac{d^2}{dx^2}w(0)[m^{-1}] \\\frac{d^3}{dx^3}w(0)[m^{-2}] \\\end{pmatrix}= \begin{pmatrix}w(0)[m] \\\theta(0)[rad] \\(EI)^{-1}[N^{-1}m^{-2}]M(0)[Nm] \\(EI)^{-1}[N^{-1}m^{-2}]V(0)[N] \\\end{pmatrix}= \begin{pmatrix}0 \\0 \\(EI)^{-1}M(0) \\(EI)^{-1}V(0) \\\end{pmatrix}$$ 외팔보의 경우, $V(0)$ 는 $q(x)$ 를 $x=0$에서 $x=L$ 까지 적분한 힘과 평형을 이룰 것이다.In case of a cantilever, $V(0)$ would be in equilibrium with the integration of $q(x)$ from $x=0$ to $x=L$. $$\begin{align}V(0) &+ \int_{x=0}^{x=L}q(x)dx = 0 \\V(0) &= - \int_{x=0}^{x=L}q(x)dx\end{align}$$ $M(0)$는 $q(x)$의 도심 $\bar{x}$에 $V(0)$가 작용할 경우 $x=0$에서의 모멘트이다.$M(0)$ is the moment at $x=0$ when $V(0)$ is located at the centroid $\bar{x}$ of $q(x)$. $$M(0) = \bar{x}V(0)$$ 도심 $\bar{x}$ 는 다음과 같이 구할 수 있다.We can find the cetroid $\bar{x}$ as follows. $$\bar{x} = \frac{1}{L}\int_{x=0}^{x=L}xq(x)dx$$ 모멘트 $M(0)$는 $x$ 에서의 전단력 $V(x)$ 가 $x=0$ 점에 작용하는 모멘트와 균형을 이룬다.The moment $M(0)$ is in equilibrium with the moment by shear force $V(x)$ at $x$ on $x=0$ point. $$M(0) + \int_{x=0}^{x=L}xV(x)dx = 0 \\M(0) + \int_{x=0}^{x=L}x\left(V_0+ \int_{\xi=0}^{\xi=x}q(\xi)d\xi \right)dx = 0$$ 우선 초기조건을 찾아보자.Let's first find the initial conditions. (Pytel & Kiusalaas, Mechanics of Materials, 2nd Ed., Cengage Learning, 2013, Example 6.1.) 분포 하중은 다음과 같다.Distributed load is as follows.w_N_m = 2000 def q_N_m(x_m): return w_N_m def find_init(x_m, r): return np.array([-q_N_m(x_m), x_m*(q_N_m(x_m))]) L_m = 4 E_N_m2 = 200e9 I_mm4 = 113e6 I_m4 = I_mm4 * ((1e-3)**4) EI_Nm2 = E_N_m2 * I_m4 one_over_EI_1_Nm2 = 1.0 / EI_Nm2 x_m_array = np.linspace(0, L_m, 1000+1) x, V_M = ode_solver.rk4(find_init, x_m_array, np.array([0, 0])) V_0_N = V_M[-1][0] M_0_Nm = V_M[-1][1] [V_0_N, M_0_Nm]구한 초기값을 확인해 보자.Let's check the initial values that we just found.assert abs((-w_N_m * L_m)-V_0_N) < 1e-6 assert abs((0.5 * w_N_m * L_m**2)-M_0_Nm) < 1e-6이제 처짐 곡선을 구해 보자.Let's find out the deflection curve.mat_A = np.array( [ [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0], ] ) mat_B = np.array([0, 0, 0, one_over_EI_1_Nm2]) def dr_dx(x_m, r): return mat_A @ r + mat_B * q_N_m(x_m) r0 = np.array( [ 0, 0, M_0_Nm*one_over_EI_1_Nm2, V_0_N*one_over_EI_1_Nm2 ] ) x, r = ode_solver.rk4(dr_dx, x_m_array, r0) r[-1] r_array = np.array(r).T r_array.shape엄밀해와 비교Compare with exact solutions 처짐Deflection 처짐 $w(x)$의 이론해는 다음과 같다.Exact solution of the deflection $w(x)$ is as follows. $$w(x) = \frac{w_0 x^2}{24EI}\left(6L^2 -4Lx + x^2 \right)$$w_table_6_2 = (w_N_m * (x_m_array ** 2) / (24 * E_N_m2 * I_m4)) * \ (6 * L_m ** 2 - 4 * L_m * x_m_array + x_m_array**2) w_table_6_2[-1] py.plot(x, r_array[0, :], '.', label='numerical') py.plot(x_m_array, w_table_6_2, label='exact') py.xlabel('x[m]') py.ylabel('w(x)[m]') py.legend(loc=0) py.grid(True)상대오차Relative errorw_numerical_m = np.interp(x_m_array, x, r_array[0, :]) w_error = nl.norm(w_table_6_2 - w_numerical_m) / nl.norm(w_table_6_2) assert 1e-7 > w_error, f"deflection error {w_error*100}% larger than expected"전단력Shear force 전단력의 이론해Shear force (in theory)sf_N = V_0_N + w_N_m * x_m_array전단력의 수치해Numerical solution of the shear forcesf_numerical_N = np.interp(x_m_array, x, r_array[3, :]) * EI_Nm2 py.plot(x_m_array, sf_numerical_N, '.', label='numerical') py.plot(x_m_array, sf_N, label='exact') py.xlabel('x[m]') py.ylabel('V[N]') py.legend(loc=0) py.grid(True) sf_error = nl.norm(sf_N - sf_numerical_N) / nl.norm(sf_N) assert 1e-7 > sf_error, f"shear force error ({sf_error}) larger than expected"굽힘모멘트Bending moment 굽힘모멘트의 이론해Bending Moment (in theory)bm_Nm = M_0_Nm + V_0_N * x_m_array + 0.5 * w_N_m * x_m_array ** 2굽힘모멘트의 수치해Numerical solution of the bending momentbm_numerical_Nm = np.interp(x_m_array, x, r_array[2, :]) * EI_Nm2 py.plot(x_m_array, bm_numerical_Nm, '.', label='numerical') py.plot(x_m_array, bm_Nm, label='exact') py.xlabel('x[m]') py.ylabel('M[Nm]') py.legend(loc=0) py.grid(True) bm_error = nl.norm(bm_Nm - bm_numerical_Nm) / nl.norm(bm_Nm) assert 1e-7 > bm_error, f"bending moment error {bm_error} larger than expected"기울기Slope 기울기의 엄밀해Exact solution of the slopetheta_table_6_2 = (w_N_m / (24 * E_N_m2 * I_m4)) * \ (12 * L_m ** 2 * x_m_array - 12 * L_m * x_m_array ** 2 + 4 * x_m_array ** 3) theta_table_6_2[-1]기울기의 수치해Numerical solution of the slopeslope_numerical_radian = np.interp(x_m_array, x, r_array[1, :]) py.plot(x_m_array, py.rad2deg(slope_numerical_radian), '.', label='numerical') py.plot(x_m_array, py.rad2deg(theta_table_6_2), label='exact') py.xlabel('x[m]') py.ylabel('$\\theta$(deg)') py.grid(True)상대 오차 확인Check the relative error of the slopetheta_error = nl.norm(theta_table_6_2 - slope_numerical_radian) / nl.norm(theta_table_6_2) assert 1e-7 > bm_error, f"slope moment error {theta_error} larger than expected"ref : , Numerical Methods for Engineers, 2018, [Online] Available : https://www.ntnu.no/wiki/download/attachments/69242130/main.pdf 연습 문제Exercises 외팔보 상의 분포하중 $q(x)$가 다음과 같을 때 질문에 답하시오:When the distributed load is as follows, answer the question:$$q(x) = 2\left(1 - \frac{1}{L}x \right)$$ 따로 주어지지 않은 값은 위의 예의 값을 사용할 수 있음.You may use values above if not given. 도전 과제 1: $x=0$ 에서의 전단력의 초기 조건을 구하시오.Try This 1: Find the initial condition of the shear force at $x=0$. 도전 과제 2: $x=0$ 에서의 굽힘모멘트의 초기 조건을 구하시오.Try This 2: Find the initial condition of the bending moment at $x=0$. 도전 과제 3: $x$ 에서의 처짐 곡선을 구하시오.Try This 3: Find the deflection curve at $x$. 도전 과제 4: 이론해와 비교해 보시오.Try This 4: Compare with the exact solution.$$w(x)=\frac{w_0 x^2}{120 L \cdot EI}\left( 10L^3 - 10L^2x + 5Lx^2 - x^3 \right)$$ Final Bell마지막 종# stackoverfow.com/a/24634221 import os os.system("printf '\a'");Question-1 - Linear reg from scratch_grdient descent# loading required libraries & data import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore')**Acknowledgements**: I've primariliy used the material from [Andrew Ng's Coursera course][1] for this, but have also been helped by [this article][2] and [this one][3]. I used some code for the animation from [this kernel][4]. [1]: https://www.coursera.org/learn/machine-learning [2]: http://tillbergmann.com/blog/python-gradient-descent.html [3]: http://aimotion.blogspot.co.uk/2011/10/machine-learning-with-python-linear.html [4]: https://www.kaggle.com/ronaldtroncoso20/d/START-UMD/gtd/global-terrorism-trends-animationdata = pd.read_csv("train.csv") #Grab the relevant data, scale the predictor variable, and add a column of 1s for the gradient descent... x = data['GrLivArea'] y = data['SalePrice'] x = (x - x.mean()) / x.std() x = np.c_[np.ones(x.shape[0]), x] #GRADIENT DESCENT alpha = 0.01 #Step size iterations = 2000 #No. of iterations m = y.size #No. of data points np.random.seed(123) #Set the seed theta = np.random.rand(2) #Pick some random values to start with def gradient_descent(x, y, theta, iterations, alpha): past_costs = [] past_thetas = [theta] for i in range(iterations): prediction = np.dot(x, theta) error = prediction - y cost = 1/(2*m) * np.dot(error.T, error) past_costs.append(cost) theta = theta - (alpha * (1/m) * np.dot(x.T, error)) past_thetas.append(theta) return past_thetas, past_costs #Pass the relevant variables to the function and get the new values past_thetas, past_costs = gradient_descent(x, y, theta, iterations, alpha) theta = past_thetas[-1] # Results... print("Gradient Descent: {:.2f}, {:.2f}".format(theta[0], theta[1]))Gradient Descent: 180921.20, 56294.90Questin-2-Kaggle Competition: House Prices Prediction - Advanced Regression Techniques kaggle competition link: https://www.kaggle.com/c/house-prices-advanced-regressiontechniques# reading the dataset train =pd.read_csv('train.csv') test = pd.read_csv('test.csv') # checking the shapes of the train and test datasets print("dimensions of train dataset: ", train.shape) print("dimensions of test dataset: ", test.shape) train.head() train.drop("Id", axis = 1, inplace = True) test.drop("Id", axis = 1, inplace = True) train1 = train.copy() train1 = train1.drop(train1[(train1['GarageArea']>1200) & (train1['SalePrice']<300000)].index) train1 = train1.drop(train1[(train1['GrLivArea']>4000) & (train1['SalePrice']<300000)].index) train1 = train1.drop(train1[(train1['TotalBsmtSF']>5000)].index) # Split X and y (in train dataset) X = train1.drop('SalePrice', axis=1) y = train1['SalePrice'].to_frame() # Add variable X['train'] = 1 test['train'] = 0 # Combining train and test for data cleaning df = pd.concat([test, X]) # duplicates print('Number of Duplicates:', len(df[df.duplicated()])) # missing values print('Number of Missing Values:', df.isnull().sum().sum())Balancing missing dataprint('Missing Values per Column:') df.isnull().sum().sort_values(ascending=False).head(25) df['PoolQC'] = df['PoolQC'].fillna('None') df['MiscFeature'] = df['MiscFeature'].fillna('None') df['Alley'] = df['Alley'].fillna('None') df['Fence'] = df['Fence'].fillna('None') df['FireplaceQu'] = df['FireplaceQu'].fillna('None') df['LotFrontage'] = df.groupby('Neighborhood')['LotFrontage'].transform(lambda i: i.fillna(i.median())) # Let's take a look at the "Garage" features garage_cols = [col for col in df if col.startswith('Garage')] df[garage_cols] # For the numerical features: for i in df[garage_cols].select_dtypes(exclude='object').columns: df[i] = df[i].fillna(0) # For the categorical features: for i in df[garage_cols].select_dtypes(include='object').columns: df[i] = df[i].fillna('None') bsmt_cols = [col for col in df if col.startswith('Bsmt')] # For the numerical features: for i in df[bsmt_cols].select_dtypes(exclude='object').columns: df[i] = df[i].fillna(0) # For the categorical features: for i in df[bsmt_cols].select_dtypes(include='object').columns: df[i] = df[i].fillna('None') mas_cols = [col for col in df if col.startswith('Mas')] # For the numerical features: for i in df[mas_cols].select_dtypes(exclude='object').columns: df[i] = df[i].fillna(0) # For the categorical features: for i in df[mas_cols].select_dtypes(include='object').columns: df[i] = df[i].fillna('None') df['MSZoning'] = df.groupby('Neighborhood')['MSZoning'].transform(lambda i: i.fillna(i.value_counts().index[0])) # replace missing values for mode of each column df = df.fillna(df.mode().iloc[0])Transforming some numerical categories into categoricaldf['MSSubClass'] = df['MSSubClass'].astype(str) df['MoSold'] = df['MoSold'].astype(str) # months is always categorical df['YrSold'] = df['YrSold'].astype(str) # adding some features to increase the accuracy of prediction df['Total_House_SF'] = df['TotalBsmtSF'] + df['1stFlrSF'] + df['2ndFlrSF'] df['Total_Home_Quality'] = (df['OverallQual'] + df['OverallCond'])/2 df['Total_Bathrooms'] = (df['FullBath'] + (0.5 * df['HalfBath']) + df['BsmtFullBath'] + (0.5 * df['BsmtHalfBath'])) # selecting the features that have a skew higher than 0.5. numeric_cols = df.select_dtypes(exclude='object').columns skew_limit = 0.5 skew_vals = df[numeric_cols].skew() skew_cols = (skew_vals .sort_values(ascending=False) .to_frame() .rename(columns={0:'Skew'}) .query('abs(Skew) > {0}'.format(skew_limit))) skew_colsEncoding categorical featurescateg_cols = df.dtypes[df.dtypes == np.object] # filtering by categorical variables categ_cols = categ_cols.index.tolist() # list of categorical fields df_enc = pd.get_dummies(df, columns=categ_cols, drop_first=True) # One hot encoding X = df_enc[df_enc['train']==1] test = df_enc[df_enc['train']==0] X.drop(['train'], axis=1, inplace=True) test.drop(['train'], axis=1, inplace=True)Train-test splitfrom sklearn.linear_model import Ridge, RidgeCV, Lasso, LassoCV from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=12345) def rmse(ytrue, ypredicted): return np.sqrt(mean_squared_error(ytrue, ypredicted))Lasso + cross validationlasso = Lasso(max_iter = 100000, normalize = True) lassocv = LassoCV(alphas = None, cv = 10, max_iter = 100000, normalize = True) lassocv.fit(X_train, y_train) lasso.set_params(alpha=lassocv.alpha_) lasso.fit(X_train, y_train) print('The Lasso I:') print("Alpha =", lassocv.alpha_) print("RMSE =", rmse(y_test, lasso.predict(X_test)))The Lasso I: Alpha = 23.00420091860413 RMSE = 22141.967752032626Ridge + cross validtionalphas = np.geomspace(1e-9, 5, num=100) ridgecv = RidgeCV(alphas = alphas, scoring = 'neg_mean_squared_error', normalize = True) ridgecv.fit(X_train, y_train) ridge = Ridge(alpha = ridgecv.alpha_, normalize = True) ridge.fit(X_train, y_train) print('Ridge Regression:') print("Alpha =", ridgecv.alpha_) print("RMSE =", rmse(y_test, ridge.predict(X_test)))Ridge Regression: Alpha = 0.21251935471767855 RMSE = 22812.490982144296Term Frequency Inverse Document Frequency (TF-IDF) Vector Representation__ (DTU/2K16/MC/013)____Natural Language Processing (IT-425)__In this noteook we will extract Term Frequency vector representations from a given corpus, where our corpus will be my resume. We will divide the corpus into 6 different parts and each part will be treated as a document. The vector for a given word will be a $1 \times 6$ vector and each column will represent the frequency countof how many times the word occured in that particular document. 1. Importing Required Packagesimport pprint from collections import Counter import nltk nltk.download('stopwords') from nltk.corpus import stopwords import numpy as np import pandas[nltk_data] Downloading package stopwords to [nltk_data] C:\Users\anish\AppData\Roaming\nltk_data... [nltk_data] Package stopwords is already up-to-date!2. Importing the Corpus (Resume)resume_file = open('../assets/resume.txt', 'r') resume = resume_file.read().lower() resume_file.close() print(resume) software developer + clean code enthusiast phone : 8287428181 email : home : sandesh vihar, pitampura, new delhi - 110034 date of birth : 7th april 1998 languages : english, hindi, french work experience what after college (4 months) delhi, india creating content to teach core java and python with data structures and algorithms and giving online classes to students. giving python classes workshops to students all around india and teaching core data structures and the python api with emphasis on data structures, algorithms and problem solving. see a sample python batch here: https://github.com/anishlearnstocode/python-workshop-6 also teaching java to students in batches of 10 days, where the full java api and data types are covered along with many important algorithms are aso taught. see a sample java batch here: https://github.com/anishlearnstocode/java-wac-batch-32 summer research fellow at university of auckland (2 months) auckland, new zealand w[...]3. Tokenizing The ResumeWe now create a utility function called `tokenize` that will take in a corpus (resume in this case) and will return us a list of tokens after removing stopwords and punctuations. It will only consider alphabetic words and all numbers have also been ignored.# utility function for tokenizing def tokenize(document: str, stopwords_en=stopwords.words('english'), tokenizer=nltk.RegexpTokenizer(r'\w+')): document = document.lower() return [token for token in tokenizer.tokenize(document) if token not in stopwords_en and token.isalpha()] # tokenizing the resume tokens = tokenize(resume) # see first 30 tokens print(tokens[: 30])['anish', 'sachdeva', 'software', 'developer', 'clean', 'code', 'enthusiast', 'phone', 'email', 'outlook', 'com', 'home', 'sandesh', 'vihar', 'pitampura', 'new', 'delhi', 'date', 'birth', 'april', 'languages', 'english', 'hindi', 'french', 'work', 'experience', 'college', 'months', 'delhi', 'india']4. Dividing the Corpus Into 6 Documentsk = len(tokens) // 6 documents = [] for i in range(5): documents.append(tokens[i * k: (i + 1) * k]) documents.append(tokens[5 * k:]) # the 6th document is pprint.pp(documents[5])['links', 'https', 'www', 'linkedin', 'com', 'https', 'github', 'com', 'anishlearnstocode', 'https', 'www', 'hackerrank', 'com', 'anishviewer', 'honours', 'awards', 'mitacs', 'globalink', 'scholarship', 'cohort', 'summer', 'research', 'fellowship', 'university', 'auckland', 'mathematics', 'department', 'technical', 'student', 'cern', 'google', 'india', 'challenge', 'scholarship', 'certifications', 'trinity', 'college', 'london', 'plectrum', 'guitar', 'grade', 'distinction', 'trinity', 'college', 'london', 'plectrum', 'guitar', 'grade', 'merit', 'trinity', 'college', 'london', 'plectrum', 'guitar', 'grade', 'distinction', 'trinity', 'college', 'london', 'plectrum', 'guitar', 'grade', 'distinction', 'french', 'level', 'cern', 'java', 'data', 'structures', 'algorithms', 'coding', 'ninjas', 'web', 'development', 'ruby', 'rails', 'coding', 'ninjas', 'competitive', 'programming', 'coding', 'ninjas']5. Calculating Most Common 5 Tokens From Each Document & Storing Frequency Tables for Each Documentmost_common = set() document_frequencies = [] for document in documents: frequencies = Counter(document) document_frequencies.append(frequencies) for word, frequency in frequencies.most_common(5): most_common.add(word) # number of tokens we have selected, as it isn't necessary to obtain 30 unique tokens print('Number of tokens:', len(most_common)) # The tokens from the first document are print('Tokens from first document:', document_frequencies[0].most_common(5)) # The selected tokens are pprint.pp(most_common){'algorithms', 'also', 'applications', 'auckland', 'cern', 'college', 'com', 'computer', 'data', 'geometry', 'group', 'guitar', 'java', 'london', 'many', 'mathematics', 'participated', 'plectrum', 'python', 'requests', 'research', 'structures', 'students', 'theory', 'trinity', 'university', 'worked'}6. Calculating Number of Documents a Keyword Appears InThe TF-IDF vector for a given word is given by:$$tfidf(w, d) = tf(w, d) \times idf(w, d) \\idf(w, d) = \log{\frac{N_t}{N_w}}$$where:$N_t:$ is the total numeber of documents and$N_w:$ is the total number of documents containing the keyword $w$.We now create a dictionary `N_w` (_str_ $\rightarrow$ _int_ ) which will store the number of documents a word $w$ occurrs in.N_t = 6 N_w = {} for word in most_common: count = 0 for frequencies in document_frequencies: count = count + (word in frequencies) N_w[word] = count # seeing the N_w map for all the selected words pprint.pp(N_w){'requests': 1, 'geometry': 1, 'mathematics': 3, 'university': 3, 'algorithms': 4, 'java': 6, 'college': 2, 'group': 2, 'many': 2, 'com': 3, 'theory': 2, 'python': 2, 'plectrum': 1, 'students': 2, 'london': 1, 'research': 3, 'cern': 3, 'trinity': 1, 'participated': 1, 'guitar': 1, 'data': 5, 'applications': 1, 'worked': 3, 'also': 3, 'structures': 3, 'auckland': 2, 'computer': 1}We notice above that __java__ is the only word in the given list to appear in all 6 documents. 7. Computing the TF-IDF Vectorsvectors = {} for word in most_common: vector = [0] * 6 for index, frequencies in enumerate(document_frequencies): vector[index] = frequencies[word] * np.log(N_t / N_w[word]) vectors[word] = vector # Let's see the vector output for a few words print(vectors['java']) print(vectors['students']) # you can also test it out with a word of your choice, try below: word = 'python' print(vectors.get(word, [0] * 6))[5.493061443340549, 0.0, 0.0, 0.0, 1.0986122886681098, 0.0]8. Representing The Vectors in a Tabular Formtable = pandas.DataFrame(data=vectors) print(table.iloc[:, 0:7]) print(table.iloc[:, 7:14]) print(table.iloc[:, 14:20]) print(table.iloc[:, 20:])data applications worked also structures auckland computer 0 0.546965 0.000000 0.000000 0.693147 2.079442 0.000000 0.000000 1 0.182322 0.000000 1.386294 0.000000 0.000000 3.295837 0.000000 2 0.000000 5.375278 2.079442 0.693147 0.000000 0.000000 0.000000 3 0.182322 0.000000 2.079442 2.079442 0.000000 0.000000 0.000000 4 0.364643 0.000000 0.000000 0.000000 2.079442 0.000000 5.375278 5 0.182322 0.000000 0.000000 0.000000 0.693147 1.098612 0.000000AlignmentsThis notebook analyzes page alignments and prepares metrics for final use. SetupWe begin by loading necessary libraries:from pathlib import Path import pandas as pd import xarray as xr import numpy as np import matplotlib.pyplot as plt import seaborn as sns import gzip import pickle import binpickle from natural.size import binarysize codec = binpickle.codecs.Blosc('zstd')Set up progress bar and logging support:from tqdm.auto import tqdm tqdm.pandas(leave=False) import sys, logging logging.basicConfig(level=logging.INFO, stream=sys.stderr) log = logging.getLogger('alignment')Import metric code:%load_ext autoreload %autoreload 1 %aimport metrics from trecdata import scan_runsLoading DataWe first load the page metadata:pages = pd.read_json('data/trec_metadata_eval.json.gz', lines=True) pages = pages.drop_duplicates('page_id') pages.info() Int64Index: 6023415 entries, 0 to 6023435 Data columns (total 5 columns): # Column Dtype --- ------ ----- 0 page_id int64 1 quality_score float64 2 quality_score_disc object 3 geographic_locations object 4 gender object dtypes: float64(1), int64(1), object(3) memory usage: 275.7+ MBNow we will load the evaluation topics:eval_topics = pd.read_json('data/eval-topics-with-qrels.json.gz', lines=True) eval_topics.info() train_topics = pd.read_json('data/trec_topics.json.gz', lines=True) train_topics.info() RangeIndex: 57 entries, 0 to 56 Data columns (total 6 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 id 57 non-null int64 1 title 57 non-null object 2 keywords 57 non-null object 3 scope 57 non-null object 4 homepage 57 non-null object 5 rel_docs 57 non-null object dtypes: int64(1), object(5) memory usage: 2.8+ KBTrain and eval topics use a disjoint set of IDs:train_topics['id'].describe() eval_topics['id'].describe()This allows us to create a single, integrated topics list for convenience:topics = pd.concat([train_topics, eval_topics], ignore_index=True) topics['eval'] = False topics.loc[topics['id'] >= 100, 'eval'] = True topics.head()Finally, a bit of hard-coded data - the world population:world_pop = pd.Series({ 'Africa': 0.155070563, 'Antarctica': 1.54424E-07, 'Asia': 0.600202585, 'Europe': 0.103663858, 'Latin America and the Caribbean': 0.08609797, 'Northern America': 0.049616733, 'Oceania': 0.005348137, }) world_pop.name = 'geography'And a gender global target:gender_tgt = pd.Series({ 'female': 0.495, 'male': 0.495, 'third': 0.01 }) gender_tgt.name = 'gender' gender_tgt.sum()Xarray intesectional global target:geo_tgt_xa = xr.DataArray(world_pop, dims=['geography']) gender_tgt_xa = xr.DataArray(gender_tgt, dims=['gender']) int_tgt = geo_tgt_xa * gender_tgt_xa int_tgtAnd the order of work-needed codes:work_order = [ 'Stub', 'Start', 'C', 'B', 'GA', 'FA', ]Query RelevanceWe now need to get the qrels for the topics. This is done by creating frames with entries for every relevant document; missing documents are assumed irrelevant (0).First the training topics:train_qrels = train_topics[['id', 'rel_docs']].explode('rel_docs', ignore_index=True) train_qrels.rename(columns={'rel_docs': 'page_id'}, inplace=True) train_qrels['page_id'] = train_qrels['page_id'].astype('i4') train_qrels = train_qrels.drop_duplicates() train_qrels.head() eval_qrels = eval_topics[['id', 'rel_docs']].explode('rel_docs', ignore_index=True) eval_qrels.rename(columns={'rel_docs': 'page_id'}, inplace=True) eval_qrels['page_id'] = eval_qrels['page_id'].astype('i4') eval_qrels = eval_qrels.drop_duplicates() eval_qrels.head()And concatenate:qrels = pd.concat([train_qrels, eval_qrels], ignore_index=True)Page AlignmentsAll of our metrics require page "alignments": the protected-group membership of each page. GeographyLet's start with the straight page geography alignment for the public evaluation of the training queries. The page metadata has that; let's get the geography column.page_geo = pages[['page_id', 'geographic_locations']].explode('geographic_locations', ignore_index=True) page_geo.head()And we will now pivot this into a matrix so we get page alignment vectors:page_geo_align = page_geo.assign(x=1).pivot(index='page_id', columns='geographic_locations', values='x') page_geo_align.rename(columns={np.nan: 'Unknown'}, inplace=True) page_geo_align.fillna(0, inplace=True) page_geo_align.head()And convert this to an xarray for multidimensional usage:page_geo_xr = xr.DataArray(page_geo_align, dims=['page', 'geography']) page_geo_xr binarysize(page_geo_xr.nbytes)GenderThe "undisclosed personal attribute" is gender. Not all articles have gender as a relevant variable - articles not about a living being generally will not.We're going to follow the same approach for gender:page_gender = pages[['page_id', 'gender']].explode('gender', ignore_index=True) page_gender.fillna('unknown', inplace=True) page_gender.head()We need to do a little targeted repair - there is an erroneous record of a gender of "Taira no Kiyomori" is actually male. Replace that:page_gender = page_gender.loc[page_gender['gender'] != 'Taira no Kiyomori']Now, we're going to do a little more work to reduce the dimensionality of the space. Points:1. Trans men are men2. Trans women are women3. Cisgender is an adjective that can be dropped for the present purposesThe result is that we will collapse "transgender female" and "cisgender female" into "female".The **downside** to this is that trans men are probabily significantly under-represented, but are now being collapsed into the dominant group.pgcol = page_gender['gender'] pgcol = pgcol.str.replace(r'(?:tran|ci)sgender\s+((?:fe)?male)', r'\1', regex=True)Now, we're going to group the remaining gender identities together under the label 'third'. As noted above, this is a debatable exercise that collapses a lot of identity.genders = ['unknown', 'male', 'female', 'third'] pgcol[~pgcol.isin(genders)] = 'third'Now put this column back in the frame and deduplicate.page_gender['gender'] = pgcol page_gender = page_gender.drop_duplicates()And make an alignment matrix (reordering so 'unknown' is first for consistency):page_gend_align = page_gender.assign(x=1).pivot(index='page_id', columns='gender', values='x') page_gend_align.fillna(0, inplace=True) page_gend_align = page_gend_align.reindex(columns=['unknown', 'female', 'male', 'third']) page_gend_align.head()Let's see how frequent each of the genders is:page_gend_align.sum(axis=0).sort_values(ascending=False)And convert to an xarray:page_gend_xr = xr.DataArray(page_gend_align, dims=['page', 'gender']) page_gend_xr binarysize(page_gend_xr.nbytes)Intersectional AlignmentWe'll now convert this data array to an **intersectional** alignment array:page_xalign = page_geo_xr * page_gend_xr page_xalign binarysize(page_xalign.nbytes)Make sure that did the right thing and we have intersectional numbers:page_xalign.sum(axis=0)And make sure combination with targets work as expected:(page_xalign.sum(axis=0) + int_tgt) * 0.5Task 1 Metric PreparationNow that we have our alignments and qrels, we are ready to prepare the Task 1 metrics.Task 1 ignores the "unknown" alignment category, so we're going to create a `kga` frame (for **K**nown **G**eographic **A**lignment), and corresponding frames for intersectional alignment.page_kga = page_geo_align.iloc[:, 1:] page_kga.head()Intersectional is a little harder to do, because things can be **intersectionally unknown**: we may know gender but not geography, or vice versa. To deal with these missing values for Task 1, we're going to ignore *totally unknown* values, but keep partially-known as a category.We also need to ravel our tensors into a matrix for compatibility with the metric code. Since 'unknown' is the first value on each axis, we can ravel, and then drop the first column.xshp = page_xalign.shape xshp = (xshp[0], xshp[1] * xshp[2]) page_xa_df = pd.DataFrame(page_xalign.values.reshape(xshp), index=page_xalign.indexes['page']) page_xa_df.head()And drop unknown, to get our page alignment vectors:page_kia = page_xa_df.iloc[:, 1:]Geographic AlignmentWe'll start with the metric configuration for public training data, considering only geographic alignment. We configure the metric to do this for both the training and the eval queries. Training Queriestrain_qalign = train_qrels.join(page_kga, on='page_id').drop(columns=['page_id']).groupby('id').sum() tqa_sums = train_qalign.sum(axis=1) train_qalign = train_qalign.divide(tqa_sums, axis=0) train_qalign.head() train_qtarget = (train_qalign + world_pop) * 0.5 train_qtarget.head()And we can prepare a metric and save it:t1_train_metric = metrics.Task1Metric(train_qrels.set_index('id'), page_kga, train_qtarget) binpickle.dump(t1_train_metric, 'task1-train-geo-metric.bpk', codec=codec)INFO:binpickle.write:pickled 337312647 bytes with 5 buffersEval QueriesDo the same thing for the eval data for a geo-only eval metric:eval_qalign = eval_qrels.join(page_kga, on='page_id').drop(columns=['page_id']).groupby('id').sum() eqa_sums = eval_qalign.sum(axis=1) eval_qalign = eval_qalign.divide(eqa_sums, axis=0) eval_qtarget = (eval_qalign + world_pop) * 0.5 t1_eval_metric = metrics.Task1Metric(eval_qrels.set_index('id'), page_kga, eval_qtarget) binpickle.dump(t1_eval_metric, 'task1-eval-geo-metric.bpk', codec=codec)INFO:binpickle.write:pickled 337312643 bytes with 5 buffersIntersectional AlignmentNow we need to apply similar logic, but for the intersectional (geography * gender) alignment.As noted as above, we need to carefully handle the unknown cases. DemoTo demonstrate how the logic works, let's first work it out in cells for one query (1).What are its documents?qdf = qrels[qrels['id'] == 1] qdf.name = 1 qdfWe can use these page IDs to get its alignments:q_xa = page_xalign.loc[qdf['page_id'].values, :, :] q_xaSumming over the first axis ('page') will produce an alignment matrix:q_am = q_xa.sum(axis=0) q_amNow we need to do reset the (0,0) coordinate (full unknown), and normalize to a proportion.q_am[0, 0] = 0 q_am = q_am / q_am.sum() q_amOk, now we have to - very carefully - average with our target modifier. There are three groups:- known (use intersectional target)- known-geo (use geo target)- known-gender (use gender target)For each of these, we need to respect the fraction of the total it represents. Let's compute those fractions:q_fk_all = q_am[1:, 1:].sum() q_fk_geo = q_am[1:, :1].sum() q_fk_gen = q_am[:1, 1:].sum() q_fk_all, q_fk_geo, q_fk_genAnd now do some surgery. Weighted-average to incorporate the target for fully-known:q_tm = q_am.copy() q_tm[1:, 1:] *= 0.5 q_tm[1:, 1:] += int_tgt * 0.5 * q_fk_all q_tmAnd for known-geo:q_tm[1:, :1] *= 0.5 q_tm[1:, :1] += geo_tgt_xa * 0.5 * q_fk_geoAnd known-gender:q_tm[:1, 1:] *= 0.5 q_tm[:1, 1:] += gender_tgt_xa * 0.5 * q_fk_gen q_tmNow we can unravel this and drop the first entry:q_tm.values.ravel()[1:]ImplementationNow, to do this for every query, we'll use a function that takes a data frame for a query's relevant docs and performs all of the above operations:def query_xalign(qdf): pages = qdf['page_id'] pages = pages[pages.isin(page_xalign.indexes['page'])] q_xa = page_xalign.loc[pages.values, :, :] q_am = q_xa.sum(axis=0) # clear and normalize q_am[0, 0] = 0 q_am = q_am / q_am.sum() # compute fractions in each section q_fk_all = q_am[1:, 1:].sum() q_fk_geo = q_am[1:, :1].sum() q_fk_gen = q_am[:1, 1:].sum() # known average q_am[1:, 1:] *= 0.5 q_am[1:, 1:] += int_tgt * 0.5 * q_fk_all # known-geo average q_am[1:, :1] *= 0.5 q_am[1:, :1] += geo_tgt_xa * 0.5 * q_fk_geo # known-gender average q_am[:1, 1:] *= 0.5 q_am[:1, 1:] += gender_tgt_xa * 0.5 * q_fk_gen # and return the result return pd.Series(q_am.values.ravel()[1:]) query_xalign(qdf)Now with that function, we can compute the alignment vector for each query.train_qtarget = train_qrels.groupby('id').apply(query_xalign) train_qtargetAnd save:t1_train_metric = metrics.Task1Metric(train_qrels.set_index('id'), page_kia, train_qtarget) binpickle.dump(t1_train_metric, 'task1-train-metric.bpk', codec=codec)INFO:binpickle.write:pickled 1493808204 bytes with 5 buffersDo the same for eval:eval_qtarget = eval_qrels.groupby('id').apply(query_xalign) t1_eval_metric = metrics.Task1Metric(eval_qrels.set_index('id'), page_kia, eval_qtarget) binpickle.dump(t1_eval_metric, 'task1-eval-metric.bpk', codec=codec)INFO:binpickle.write:pickled 1493808200 bytes with 5 buffersTask 2 Metric PreparationTask 2 requires some different preparation.We're going to start by computing work-needed information:page_work = pages.set_index('page_id').quality_score_disc.astype(pd.CategoricalDtype(ordered=True)) page_work = page_work.cat.reorder_categories(work_order) page_work.name = 'quality'Work and Target ExposureThe first thing we need to do to prepare the metric is to compute the work-needed for each topic's pages, and use that to compute the target exposure for each (relevant) page in the topic.This is because an ideal ranking orders relevant documents in decreasing order of work needed, followed by irrelevant documents. All relevant documents at a given work level should receive the same expected exposure.First, look up the work for each query page ('query page work', or qpw):qpw = qrels.join(page_work, on='page_id') qpwAnd now use that to compute the number of documents at each work level:qwork = qpw.groupby(['id', 'quality'])['page_id'].count() qworkNow we need to convert this into target exposure levels. This function will, given a series of counts for each work level, compute the expected exposure a page at that work level should receive.def qw_tgt_exposure(qw_counts: pd.Series) -> pd.Series: if 'id' == qw_counts.index.names[0]: qw_counts = qw_counts.reset_index(level='id', drop=True) qwc = qw_counts.reindex(work_order, fill_value=0).astype('i4') tot = int(qwc.sum()) da = metrics.discount(tot) qwp = qwc.shift(1, fill_value=0) qwc_s = qwc.cumsum() qwp_s = qwp.cumsum() res = pd.Series( [np.mean(da[s:e]) for (s, e) in zip(qwp_s, qwc_s)], index=qwc.index ) return resWe'll then apply this to each topic, to determine the per-topic target exposures:qw_pp_target = qwork.groupby('id').apply(qw_tgt_exposure) qw_pp_target.name = 'tgt_exposure' qw_pp_targetC:\Users\michaelekstrand\Miniconda3\envs\wptrec\lib\site-packages\numpy\core\fromnumeric.py:3440: RuntimeWarning: Mean of empty slice. return _methods._mean(a, axis=axis, dtype=dtype, C:\Users\michaelekstrand\Miniconda3\envs\wptrec\lib\site-packages\numpy\core\_methods.py:189: RuntimeWarning: invalid value encountered in true_divide ret = ret.dtype.type(ret / rcount)We can now merge the relevant document work categories with this exposure, to compute the target exposure for each relevant document:qp_exp = qpw.join(qw_pp_target, on=['id', 'quality']) qp_exp = qp_exp.set_index(['id', 'page_id'])['tgt_exposure'] qp_exp.index.names = ['q_id', 'page_id'] qp_expGeographic AlignmentNow that we've computed per-page target exposure, we're ready to set up the geographic alignment vectors for computing the per-*group* expected exposure with geographic data.We're going to start by getting the alignments for relevant documents for each topic:qp_geo_align = qrels.join(page_geo_align, on='page_id').set_index(['id', 'page_id']) qp_geo_align.index.names = ['q_id', 'page_id'] qp_geo_alignNow we need to compute the per-query target exposures. This starst with aligning our vectors:qp_geo_exp, qp_geo_align = qp_exp.align(qp_geo_align, fill_value=0)And now we can multiply the exposure vector by the alignment vector, and summing by topic - this is equivalent to the matrix-vector multiplication on a topic-by-topic basis.qp_aexp = qp_geo_align.multiply(qp_geo_exp, axis=0) q_geo_align = qp_aexp.groupby('q_id').sum()Now things get a *little* weird. We want to average the empirical distribution with the world population to compute our fairness target. However, we don't have empirical data on the distribution of articles that do or do not have geographic alignments.Therefore, we are going to average only the *known-geography* vector with the world population. This proceeds in N steps:1. Normalize the known-geography matrix so its rows sum to 1.2. Average each row with the world population.3. De-normalize the known-geography matrix so it is in the original scale, but adjusted w/ world population4. Normalize the *entire* matrix so its rows sum to 1Let's go.qg_known = q_geo_align.drop(columns=['Unknown'])Normalize (adding a small value to avoid division by zero - affected entries will have a zero numerator anyway):qg_ksums = qg_known.sum(axis=1) qg_kd = qg_known.divide(np.maximum(qg_ksums, 1.0e-6), axis=0)Average:qg_kd = (qg_kd + world_pop) * 0.5De-normalize:qg_known = qg_kd.multiply(qg_ksums, axis=0)Recombine with the Unknown column:q_geo_tgt = q_geo_align[['Unknown']].join(qg_known)Normalize targets:q_geo_tgt = q_geo_tgt.divide(q_geo_tgt.sum(axis=1), axis=0) q_geo_tgtThis is our group exposure target distributions for each query, for the geographic data. We're now ready to set up the matrix.train_geo_qtgt = q_geo_tgt.loc[train_topics['id']] eval_geo_qtgt = q_geo_tgt.loc[eval_topics['id']] t2_train_geo_metric = metrics.Task2Metric(train_qrels.set_index('id'), page_geo_align, page_work, train_geo_qtgt) binpickle.dump(t2_train_geo_metric, 'task2-train-geo-metric.bpk', codec=codec) t2_eval_geo_metric = metrics.Task2Metric(eval_qrels.set_index('id'), page_geo_align, page_work, eval_geo_qtgt) binpickle.dump(t2_eval_geo_metric, 'task2-eval-geo-metric.bpk', codec=codec)INFO:binpickle.write:pickled 2014 bytes with 9 buffersIntersectional AlignmentNow we need to compute the intersectional targets for Task 2. We're going to take a slightly different approach here, based on the intersectional logic for Task 1, because we've come up with better ways to write the code, but the effect is the same: only known aspects are averaged.We'll write a function very similar to the one for Task 1:def query_xideal(qdf, ravel=True): pages = qdf['page_id'] pages = pages[pages.isin(page_xalign.indexes['page'])] q_xa = page_xalign.loc[pages.values, :, :] # now we need to get the exposure for the pages, and multiply p_exp = qp_exp.loc[qdf.name] assert p_exp.index.is_unique p_exp = xr.DataArray(p_exp, dims=['page']) # and we multiply! q_xa = q_xa * p_exp # normalize into a matrix (this time we don't clear) q_am = q_xa.sum(axis=0) q_am = q_am / q_am.sum() # compute fractions in each section - combined with q_am[0,0], this should be about 1 q_fk_all = q_am[1:, 1:].sum() q_fk_geo = q_am[1:, :1].sum() q_fk_gen = q_am[:1, 1:].sum() # known average q_am[1:, 1:] *= 0.5 q_am[1:, 1:] += int_tgt * 0.5 * q_fk_all # known-geo average q_am[1:, :1] *= 0.5 q_am[1:, :1] += geo_tgt_xa * 0.5 * q_fk_geo # known-gender average q_am[:1, 1:] *= 0.5 q_am[:1, 1:] += gender_tgt_xa * 0.5 * q_fk_gen # and return the result if ravel: return pd.Series(q_am.values.ravel()) else: return q_amTest this function out:query_xideal(qdf, ravel=False)And let's go!q_xtgt = qrels.groupby('id').progress_apply(query_xideal) q_xtgt train_qtgt = q_xtgt.loc[train_topics['id']] eval_qtgt = q_xtgt.loc[eval_topics['id']] t2_train_metric = metrics.Task2Metric(train_qrels.set_index('id'), page_xa_df, page_work, train_qtgt) binpickle.dump(t2_train_metric, 'task2-train-metric.bpk', codec=codec) t2_eval_metric = metrics.Task2Metric(eval_qrels.set_index('id'), page_xa_df, page_work, eval_qtgt) binpickle.dump(t2_eval_metric, 'task2-eval-metric.bpk', codec=codec)INFO:binpickle.write:pickled 1875 bytes with 9 buffersUnderstanding the Amazon Forest from Spacefrom IPython.display import Image from IPython.core.display import HTML Image(url= "https://storage.googleapis.com/kaggle-competitions/kaggle/6322/logos/header.png")Importing Libraries%reload_ext autoreload %autoreload 2 %matplotlib inline from fastai.vision import * import pandas as pd import numpy as np import matplotlib.pyplot as pltLoading the Datasetpath = Path('/kaggle/input/planet-understanding-the-amazon-from-space/') path.ls()Multi-Classificationdf = pd.read_csv(path/'train_v2.csv') df.head()Different Tagsdf.pivot_table(index='tags', aggfunc=len).sort_values('image_name', ascending=False) # GPU required torch.cuda.is_available() torch.backends.cudnn.enabledDefining Accuracy and F2-Scoredef p_accuracy(pred, act, **kwargs): return accuracy_thresh(pred, act, thresh=0.2, **kwargs) #This kaggle competition uses f2 score for the final eval. So we should use that as well. def f2_score(pred, act, **kwargs): return fbeta(pred, act, beta=2, thresh=0.2, **kwargs)Transformsdef get_data(sz): tfms = get_transforms(do_flip=True, max_lighting=0.1, max_zoom=1.05, max_warp=0.) data = ( ImageList .from_csv(path, 'train_v2.csv', folder="train-jpg", suffix=".jpg") .split_by_rand_pct(0.2) .label_from_df(label_delim=' ') .transform(tfms, size=sz) .add_test_folder('test-jpg-v2') .databunch(num_workers=0) .normalize(imagenet_stats) ) return data #Cause learning on 64x64 sz=64 data = get_data(sz)Classeslen(data.classes), data.classesDisplaying Imagesdata.show_batch(rows=3, figsize=(10,12)) !mkdir -p /tmp/.cache/torch/checkpoints !cp /kaggle/input/fastai-pretrained-models/resnet50-19c8e357.pth /tmp/.cache/torch/checkpoints/resnet50-19c8e357.pth learn = cnn_learner(data, models.resnet50, metrics=[p_accuracy, f2_score], model_dir = Path('../kaggle/working'),path = Path(".")) learn.lr_find() learn.recorder.plot()Training the Modellr = 0.01 learn.fit_one_cycle(5, slice(lr)) learn.unfreeze() learn.lr_find() learn.recorder.plot() lr = 1e-4 learn.fit_one_cycle(4, slice(lr)) learn.show_results(rows=3, figsize=(12,15))Saving Modellearn.save('stage-2-rn50') learn.export()Predicting on Test Settest = ImageList.from_folder(path/'test-jpg-v2').add(ImageImageList.from_folder(path/'test-jpg-additional')) len(test) learn = load_learner('../working/', test=test) preds, _ = learn.get_preds(ds_type=DatasetType.Test) thresh = 0.2 labelled_preds = [' '.join([learn.data.classes[i] for i,p in enumerate(pred) if p > thresh]) for pred in preds] submission = pd.read_csv(path/'sample_submission_v2.csv') submission['tags'] = labelled_preds submission.to_csv('submission.csv')Sequences in PythonArrays, Tuples, and ListWhile _Arrays_(i.e. multiple itams of the same data type) are a common feature of most programming languages, (as far as I know) they don't exist in Python. Instead two specialised type exist: _Tuples_ and _Lists_. The main difference is that the former is _immutable_ (i.e. of a fixed, predefined length making it faster); while the latter is _mutable_ (and can be dynamically assigned value(s) or expand).shoppingList = ['milk', 'bread','egs']Note: Although _lists_ are similar to _tuples_, the delimiters are different and act as a cue to the programmer and Python: the former uses square brackets in its notation, while the latter uses parehtheses.shoppingList[2] = 'eggs' shoppingListSince _lists_ are mutable, you can modify existing values.shoppingList.append('sugar') shoppingListYou can add items at the end of the _list_ using the _append_ method.shoppingList.remove('eggs') shoppingListThat said, the 'opposite' _remove_ eliminate matching elements regardless of their position. However, if it doesn't exist, you'll get an error! Learning Activity: what happens when you invoke _remove_ and there are duplicates?shoppingList2 = ['eggs', 'flour'] shoppingList = shoppingList + shoppingList2 shoppingListYou can concatenate _lists_shoppingList3a = ['bread','milk','sugar'] shoppingList3a += shoppingList2 shoppingList3aYou can also use "+=" as short-hand instead of retyping the left-hand side of the equation.shoppingList3b = shoppingList3a.copy()The _copy_ methods is used because ehen you make a straight Python references the same address in memory (read as: points to same variable). Instead, we duplivate the contents and not the pointer. I experienced trouble with this - it may save you some debugging time!shoppingList3a.sort() shoppingList3aBy default it is sorted by character in ascening order.fish =[1, 2, 'blue', 'red']_Lists_ can also be assigned heterogenous data.fish.sort(reverse = True)However, you can't _sort_ a _list_ containing different data types The _sort_ method takes two parameters: _reverse_ and _key_. If the former is set to _True_. it is descending. So far, I've had no need to use the latter but it might be worth investigating in case you have to use it in the future.sorted(shoppingList3b)Load S-MultiXcan resultssmultixcan_pvalues_file = os.path.join(conf.GENE_ASSOC_DIR, f'smultixcan-mashr-pvalues.pkl.xz') display(smultixcan_pvalues_file) smultixcan_gene_associations = pd.read_pickle(smultixcan_pvalues_file) smultixcan_gene_associations.shape smultixcan_gene_associations.head(5)Load fastENLOC resultsfastenloc_rcp_file = os.path.join(conf.GENE_ASSOC_DIR, f'fastenloc-torus-rcp.pkl.xz') display(fastenloc_rcp_file) fastenloc_gene_associations = pd.read_pickle(fastenloc_rcp_file) fastenloc_gene_associations.shape fastenloc_gene_associations.head(5) assert fastenloc_gene_associations.min().min() >= 0 assert fastenloc_gene_associations.max().max() <= 3S-MultiXcan Counts of significant associationssmultixcan_gene_associations.shape all_pvalues = pd.Series(smultixcan_gene_associations.values.flatten()).dropna() display(all_pvalues.shape) all_pvalues.describe() (-np.log10(all_pvalues)).describe() PVALUE_THRESHOLD = (0.05 / all_pvalues.shape[0]) display(PVALUE_THRESHOLD) PVALUE_THRESHOLD = 5.49e-10 hits = (all_pvalues < PVALUE_THRESHOLD).sum() display(hits) display((hits / all_pvalues.shape[0]) * 100) hits = (all_pvalues < 0.01).sum() display(hits) display((hits / all_pvalues.shape[0]) * 100)fastENLOC Numbers of significant genes also found by fastENLOCcommon_genes = fastenloc_gene_associations.index.intersection(smultixcan_gene_associations.index) display(common_genes) fastenloc_matrix_sorted = fastenloc_gene_associations.loc[common_genes, smultixcan_gene_associations.columns] display(fastenloc_matrix_sorted.shape) display(fastenloc_matrix_sorted.head()) multixcan_matrix_sorted = smultixcan_gene_associations.loc[common_genes] display(multixcan_matrix_sorted.shape) display(multixcan_matrix_sorted.head()) assert fastenloc_matrix_sorted.shape == multixcan_matrix_sorted.shapeUsing stringent pvalue thresholddisplay(PVALUE_THRESHOLD) multixcan_signif = (multixcan_matrix_sorted < PVALUE_THRESHOLD) display(multixcan_signif.shape) display(multixcan_signif.head()) fastenloc_signif = (fastenloc_matrix_sorted > 0.1) display(fastenloc_signif.shape) display(fastenloc_signif.head()) multixcan_hits = multixcan_signif.sum().sum() display(multixcan_hits) display(multixcan_hits / multixcan_signif.size) fastenloc_hits = fastenloc_signif.sum().sum() display(fastenloc_hits) display(fastenloc_hits / fastenloc_signif.size) fastenloc_and_multixcan_signif = (multixcan_signif & fastenloc_signif) assert fastenloc_and_multixcan_signif.shape == multixcan_signif.shape == fastenloc_signif.shape fe_mu_sum = fastenloc_and_multixcan_signif.sum().sum() display(fe_mu_sum) _perc = fe_mu_sum / multixcan_hits display(_perc) display(f'{(_perc * 100):.2f}%')Conclusions 72,884 significant associations with S-MultiXcan (after taking the set of genes in common with fastENLOC, otherwise it's 72,994).From these, 22,219 associations are also colocalized (30.49%) Export significant associations as a table S-MultiXcansmultixcan_results = multixcan_matrix_sorted.unstack() smultixcan_results.shape smultixcan_results.head() smultixcan_results.isna().sum() smultixcan_results = smultixcan_results.dropna() smultixcan_results.shape assert smultixcan_results.isna().sum() == 0 (smultixcan_results < PVALUE_THRESHOLD).sum()fastENLOCfastenloc_results = fastenloc_matrix_sorted.unstack() fastenloc_results.shape fastenloc_results.head() fastenloc_results.isna().sum() fastenloc_results = fastenloc_results.dropna() fastenloc_results.shape final_results = pd.DataFrame(index=smultixcan_results.index.copy(), columns=['smultixcan_pval', 'fastenloc_rcp']) final_results = final_results.assign(smultixcan_pval=smultixcan_results, fastenloc_rcp=fastenloc_results) final_results.shape final_results.head() final_results.isna().sum() final_results = final_results.dropna(how='any') final_results.shape PVALUE_THRESHOLD publishable_final_results = final_results[(final_results['smultixcan_pval'] < PVALUE_THRESHOLD) & (final_results['fastenloc_rcp'] > 0.1)] publishable_final_results.index.rename(('trait', 'gene_id'), inplace=True) publishable_final_results.shape publishable_final_results.head() metadata.GENES_MAPPINGS.head() _tmp = pd.merge( publishable_final_results.reset_index(), metadata.GENES_MAPPINGS[['gene_id', 'gene_name', 'band', 'gene_type']], on='gene_id', how='left' ).rename(columns={'band': 'gene_band'}) _tmp.head() publishable_final_results = _tmp.set_index(['trait', 'gene_id'])[['gene_name', 'gene_band', 'gene_type', 'smultixcan_pval', 'fastenloc_rcp']] publishable_final_results = publishable_final_results.sort_index() assert publishable_final_results.index.is_unique publishable_final_results.isna().sum() publishable_final_results[publishable_final_results['gene_name'].isna()] publishable_final_results.head() publishable_final_results.shape # some testing _gene_name = 'VIP' _gene_id = metadata.GENE_NAME_TO_ID_MAP[_gene_name] display(_gene_id) _result = publishable_final_results.loc[('1180-Morningevening_person_chronotype', _gene_id)] assert _result['smultixcan_pval'] == 1.8124965657173678e-17, _result['smultixcan_pval'] assert _result['fastenloc_rcp'] == 0.25849, _result['fastenloc_rcp'] # some testing _gene_name = 'RP11-220I1.5' _gene_id = metadata.GENE_NAME_TO_ID_MAP[_gene_name] display(_gene_id) _result = publishable_final_results.loc[('1180-Morningevening_person_chronotype', _gene_id)] assert _result['smultixcan_pval'] == 6.426619359444203e-11 assert _result['fastenloc_rcp'] == 0.1985 # for publication (tsv.gz) output_folder = os.path.join(conf.DELIVERABLES_DIR, 'supp_tables') os.makedirs(output_folder, exist_ok=True) output_file = os.path.join(output_folder, 'suppl_table_S1-significant_gene_trait_associations.tsv.gz') display(output_file) publishable_final_results.to_csv(output_file, sep='\t', float_format='%.4e') # some testing _tmp = pd.read_csv(output_file, sep='\t') assert publishable_final_results.shape == _tmp.set_index(['trait', 'gene_id']).shape _tmp.shape _tmp.head() _tmp = _tmp.set_index(['trait', 'gene_id']) _result = _tmp.loc[('1180-Morningevening_person_chronotype', metadata.GENE_NAME_TO_ID_MAP['VIP'])] assert _result['smultixcan_pval'] == 1.8125e-17 assert _result['fastenloc_rcp'] == 0.25849 _result = _tmp.loc[('1180-Morningevening_person_chronotype', metadata.GENE_NAME_TO_ID_MAP['RP11-220I1.5'])] assert _result['smultixcan_pval'] == 6.4266e-11 assert _result['fastenloc_rcp'] == 0.1985 assert np.allclose( _tmp[['smultixcan_pval', 'fastenloc_rcp']].values, publishable_final_results[['smultixcan_pval', 'fastenloc_rcp']].values, atol=1e-320, rtol=1e-4 ) # for publication (xlsx) output_folder = os.path.join(conf.DELIVERABLES_DIR, 'supp_tables') os.makedirs(output_folder, exist_ok=True) output_file = os.path.join(output_folder, 'suppl_table_S1-significant_gene_trait_associations.xlsx') display(output_file) publishable_final_results.reset_index().to_excel(output_file, index=False, float_format='%.4e') # some testing _tmp = pd.read_excel(output_file) assert publishable_final_results.shape == _tmp.set_index(['trait', 'gene_id']).shape _tmp.shape _tmp.head() _tmp = _tmp.set_index(['trait', 'gene_id']) _result = _tmp.loc[('1180-Morningevening_person_chronotype', metadata.GENE_NAME_TO_ID_MAP['VIP'])] assert _result['smultixcan_pval'] == 1.8125e-17 assert _result['fastenloc_rcp'] == 0.25849 _result = _tmp.loc[('1180-Morningevening_person_chronotype', metadata.GENE_NAME_TO_ID_MAP['RP11-220I1.5'])] assert _result['smultixcan_pval'] == 6.4266e-11 assert _result['fastenloc_rcp'] == 0.1985 assert np.allclose( _tmp[['smultixcan_pval', 'fastenloc_rcp']].values, publishable_final_results[['smultixcan_pval', 'fastenloc_rcp']].values, atol=1e-320, rtol=1e-4 )Histogram / Density plot of nr of trees per ensembleimport pandas as pd import matplotlib.pyplot as plt from arboreto.core import * XGB_KWARGS meta_df = pd.read_csv('/home/tmo/work/kuleuven/papers/arboreto/meta_out_macosko_40k.tsv', sep='\t') meta_df.head() df = macosko_df all_zeros = df.loc[:, (df == 0).all()] set(all_zeros.columns) macosko_df = pd.read_csv('/media/tmo/data/work/datasets/macosko/in/macosko_40k.tsv.gz', sep='\t')* the targets with 5000 estimators are the all-zero columns! * these targets should have been filtered out before proceeding the analysisclean_df = meta_df[meta_df['n_estimators'] < 2000] fig, ax = plt.subplots() clean_df.n_estimators.plot.hist(bins=1000, ax=ax, figsize=(12,8)) ax.set_yscale('log') ax.set_xlabel('Number of estimators') plt.show() len(clean_df.n_estimators) clean_df.n_estimators.sum() / (len(clean_df.n_estimators) * 1000) clean_df nonzero_df = pd.DataFrame(macosko_df.astype(bool).sum(axis=0)).reset_index() nonzero_df.columns = ['target', 'n_nonzero'] nonzero_df fig, ax = plt.subplots() clean_df.n_estimators.plot.hist(bins=1000, ax=ax, figsize=(12,8)) ax.set_yscale('log') ax.set_xlabel('Number of estimators') plt.show() fig, ax = plt.subplots() nonzero_df.merge(clean_df[['target', 'n_estimators']]).plot.scatter(ax=ax, x='n_nonzero', y='n_estimators', figsize=(12,8), alpha=.05) ax.set_xscale('log') #ax.set_yscale('log') plt.show() nonzero_dfColabocat - Hashcat runtime in Google Colaboratory## Dependencies (Mainly for callabcks) !pip install requests HASHCAT_CMD = "{{ HASHCAT_CMD }}" !nvidia-smi import os, re import urllib.request def find_urls(string): regex = r"(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'\".,<>?«»“”‘’]))" url = re.findall(regex,string) return [x[0] for x in url] def download_file(url): filename = "".join("%02x" % b for b in os.urandom(10)) fullpath = f"/root/.hashcat/hashes/{filename}" urllib.request.urlretrieve(url, fullpath) return fullpath def localize_hashcat_cmd(cmd): os.makedirs("/root/.hashcat/hashes/", exist_ok=True) for url in find_urls(cmd): localized_path = download_file(url) cmd = cmd.replace(url, localized_path) return cmd localized_hashcat_cmd = localize_hashcat_cmd(HASHCAT_CMD) callback = """ import requests import json import re def escape_ansi(line): ansi_escape =re.compile(r'(\\x9B|\\x1B\[)[0-?]*[ -\/]*[@-~]') return ansi_escape.sub('', line) EMBED_LIMIT = 2048 - 500 url = "{{ DISCORD_WEBHOOK_URL }}" cmd = \"\"\"{{ HASHCAT_CMD }}\"\"\" data = {} data["content"] = "" data["username"] = "ColaboHash" results = None with open("/content/results.txt", "r") as f: results = f.readlines() paginated_results = [] page = "" for l in results: l = escape_ansi(l) if len(page) + len(l) >= EMBED_LIMIT: paginated_results.append(page) page = "" page += l paginated_results.append(page) for idx, page in enumerate(paginated_results): data["embeds"] = [] embed = {} embed["title"] = "ColaboHash Results" embed["url"] = "https://github.com/apogiatzis/colabohash" embed["thumbnail"] = {"url": "https://i.ibb.co/dJ7x7ZH/colabohash400x218.png"} embed["description"] = "**Command**: {0}\\n\\n```{1}```".format(cmd, page) embed["footer"] = { "text": "Page {0}/{1}".format(idx+1, len(paginated_results)) } data["embeds"].append(embed) result = requests.post(url, data=json.dumps(data), headers={"Content-Type": "application/json"}) try: result.raise_for_status() except requests.exceptions.HTTPError as err: print(err) """ with open("/content/callback.py", "w") as f: f.write(callback) hashcat_script = """ #!/bin/bash {0} > results.txt python /content/callback.py {% if TERMINATE_ON_FINISH %} jupyter notebook stop 9000 {% endif %} """.format(localized_hashcat_cmd) with open("/content/hashcat_script.sh", "w") as f: f.write(hashcat_script) !chmod +x /content/hashcat_script.sh # Clone, build and install Hashcat !apt install cmake build-essential -y !apt install checkinstall git -y !git clone https://github.com/hashcat/hashcat.git !cd hashcat && git submodule update --init && make && make install !nohup /content/hashcat_script.sh & ## Keep NB Alive (12 hours) !readExample Map Plotting# By line: RRB 2020-07-20 # Script aims to: # - Load a netCDF file # - Extract one variable: CO # - Create contour plot of variable as world map with coastlines # - Add cyclic point # - Customize contours and colorbar # - Add axes labels # - Add grid linesAt the start of a Jupyter notebook you need to import all modules that you will use.import matplotlib.pyplot as plt import cartopy.crs as ccrs # For plotting maps import cartopy.feature as cfeature # For plotting maps from cartopy.util import add_cyclic_point # For plotting maps from pathlib import Path # System agnostic paths import xarray as xr # For loading the data arrays import numpy as np # For array creation and calculations from matplotlib.colors import BoundaryNormDefine the directories and file of interest for your results.result_dir = Path("/home/buchholz/Documents/code_database/untracked/my-notebook/CAM_Chem_examples") file = "CAM_chem_merra2_FCSD_1deg_QFED_monthoutput_CO_201801.nc" file_to_open = result_dir / file #the netcdf file is now held in an xarray dataset named 'nc_load' and can be referenced later in the notebook nc_load = xr.open_dataset(file_to_open) #to see what the netCDF file contains, uncomment below #nc_loadExtract the variable of choice at the time and level of choice#extract variable var_sel = nc_load['CO'] #print(var_sel) #select the surface level at a specific time and convert to ppbv from vmr #select the surface level for an average over three times and convert to ppbv from vmr var_srf = var_sel.isel(time=0, lev=55) var_srf = var_srf*1e09 # 10-9 to ppb print(var_srf.shape) #extract grid variables lat = var_sel.coords['lat'] lon = var_sel.coords['lon'](192, 288)Add cyclic point to avoid white stripe at lon=0.var_srf_cyc, lon_cyc = add_cyclic_point(var_srf, coord=lon)Plot the value over the globe.plt.figure(figsize=(20,8)) #Define projection ax = plt.axes(projection=ccrs.PlateCarree()) #define contour levels cmap = plt.get_cmap('Spectral_r') clev = np.arange(30, 500, 10) #plot the data #plt.contourf(lon_cyc,lat,var_srf_cyc,clev,cmap='Spectral_r',extend='both') norm = BoundaryNorm(clev, ncolors=cmap.N, clip=True) plt.pcolormesh(lon_cyc,lat,var_srf_cyc,cmap=cmap, norm=norm) # add coastlines ax.add_feature(cfeature.COASTLINE) #add lat lon grids gl = ax.gridlines(draw_labels=True, color='grey', alpha=0.8, linestyle='--') gl.xlabels_top = False gl.ylabels_right = False # Titles5 # Main plt.title("Global map of CAM-chem CO, January 2018",fontsize=18) # y-axis ax.text(-0.04, 0.5, 'Latitude', va='bottom', ha='center', rotation='vertical', rotation_mode='anchor', transform=ax.transAxes) # x-axis ax.text(0.5, -0.08, 'Longitude', va='bottom', ha='center', rotation='horizontal', rotation_mode='anchor', transform=ax.transAxes) # legend ax.text(1.15, 0.5, 'CO (ppb)', va='bottom', ha='center', rotation='vertical', rotation_mode='anchor', transform=ax.transAxes) plt.colorbar() plt.show()Area weighted regriddingre=6.37122e06 # Earth radius (in metres) rad=4.0 * np.arctan(1.0) / 180.0 # Convert degrees to radians (pi radians per 180 deg) con = re * rad # constant for determining arc lengthOriginal griddlon = lon[2].values-lon[1].values dlat = lat[2].values-lat[1].values clat = np.cos(lat * rad) # cosine of latitude dx = con * dlon * clat #dx (in metres) at each latitude dy = con * dlat #dy (in metres) is constant dydx = dy * dx #dydx(nlat) area_wgt = new((/nlat, mlon/), float) var_srf wgt = new((/nlat, mlon/), float) wgt = conform(wgt, dydx, 0) wgt!0 = "lat" wgt&lat = newlat wgt!1 = "lon" wgt&lon = lon oldlat = xr.DataArray(np.linspace(89.875,-89.875,720).astype('float32')) oldlat.attrs ['long_name'] = 'latitude' oldlat.attrs ['units'] = 'degrees_north' oldlon = xr.DataArray(np.linspace(-179.875,179.875,1440).astype('float32')) oldlon.attrs ['long_name'] = 'longitude' oldlon.attrs ['units'] = 'degrees_east' dlon_old = oldlon[2].values-oldlon[1].values from math import gcd gcd(0.25,1.25) a = np.array([[0,1,2],[2,2,3]]) print(a.shape) weights = np.array([16,4,2]) print(weights.shape) np.dot(a,weights) a = np.repeat(3, 4)Utilised code from https://www.kaggle.com/harshsoni/ranzcr-simple-densenet121-approach to build this model *REMEMBER TO CHANGE RUNTIME TO GPU* ImportsImports and Installationsfrom google.colab import drive drive.mount('/content/drive') # pip install tensorflow_addons -q # pip install tensorflow-gpu -q # pip install pathlib # inbuilt imports import os import glob import pathlib import tempfile import functools # numeric imports import numpy as np import pandas as pd # visual imports import seaborn as sns import matplotlib.pyplot as plt from IPython.display import Image # modeling imports import tensorflow as tf import tensorflow_addons as tfa import pathlib %cd C:\Users\Johnn\Desktop\ADS2002\ranzcr-clip-catheter-line-classification %matplotlib inline sns.set()Define paths for image training latertrain_path = pathlib.Path('C:\\Users\\Johnn\\Desktop\\ADS2002\\ranzcr-clip-catheter-line-classification\\train') test_path = pathlib.Path('C:\\Users\\Johnn\\Desktop\\ADS2002\\ranzcr-clip-catheter-line-classification\\test')Define constantsIMG_SIZE = 224 BATCH_SIZE = 16 all_labels = ['ETT - Abnormal', 'ETT - Borderline', 'ETT - Normal', 'NGT - Abnormal', 'NGT - Borderline','NGT - Incompletely Imaged', 'NGT - Normal', 'CVC - Abnormal', 'CVC - Borderline', 'CVC - Normal', 'Swan Ganz Catheter Present']Define dataframes`train` provides one hot encoding for labelling. `train_coord` gives class counts and coordinates for cathetersdf = pd.read_csv("train.csv") df_coord = pd.read_csv("train_annotations.csv") df.head() df_coord.head()Data PathsAdd the paths of images to the dataframe for easy accessdf['path'] = df['StudyInstanceUID'].map(lambda x: str(train_path/(x+'.jpg'))) df.iloc[0][-1] df.head()EDA ClassesData suggests an imbalance for each class.data = df.loc[:, all_labels].sum().sort_values() dist = pd.DataFrame({'Class': data.index, 'Pos': data.values}, columns=['Class', 'Pos']) dist fig = plt.figure(figsize=(6, 5)) sns.barplot(data=dist, y='Class', x='Pos') plt.xticks(rotation=45) plt.show() dist['Neg'] = len(df) - dist.Pos dist.sort_values(by='Pos')`PatientID`Given that `PatientID` is a possible variable, it suggests a one to many relationship between one patient and many detections/data points.print(len(df)) print(len(df.PatientID.unique())) grouped_df = df.set_index('PatientID') grouped_df.head() grouped_df.loc['ec89415d1'].sort_values(by=['StudyInstanceUID'])As the dataframe above suggests, there exists multiple records for a single patient.Hence, when selecting the images for training and validation, patients of the same ID need to be kept within their distinct splits. Otherwise data leakage occurs, and the model provide outputs without learning, due to already prelearning the output. Thus, for training/validation split, must group on `PatientID` and split accordingly. Splitting Datagrouped_df = df.groupby('PatientID') train_list = [group for _, group in grouped_df]`train_list` is a list of dataframes for each patient.`train_list[0]` gives dataframe for `PatientID` - 003e65ddbdef train_valid_splitter(d, train_size=0.8): n = len(d) trains = d[:int(train_size*n)] valids = d[int(train_size*n):] return trains, valids train_split, valid_split = train_valid_splitter(train_list) train_df = pd.concat(train_split, axis=0) valid_df = pd.concat(valid_split, axis=0) print(f'Train Size: {len(train_df)}, Valid Size: {len(valid_df)}') train_dfCreate Data GeneratorsUsing `ImagDataGenerator`, we can build the inital pipeline of the model.Image transforms include horizontally flipping and rotating (by 0.4 radians, just a randomly chosen number).train_generator = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1./255, horizontal_flip=True, rotation_range=0.40 ) train_datagen = train_generator.flow_from_dataframe( dataframe=train_df, x_col='path', y_col=all_labels, target_size=(IMG_SIZE, IMG_SIZE), batch_size=BATCH_SIZE, seed=42, class_mode='raw' ) valid_generator = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255) valid_datagen = valid_generator.flow_from_dataframe( dataframe=valid_df, x_col='path', y_col=all_labels, target_size=(IMG_SIZE, IMG_SIZE), batch_size=BATCH_SIZE, seed=42, class_mode='raw' ) def display_batch(batch, n_imgs=9): r = int(n_imgs**0.5) fig, axs = plt.subplots(r, r, figsize=(12, 15)) imgs, labels = batch[0], batch[1] for i, ax in zip(range(n_imgs), axs.flatten()): title = '\n'.join(list(np.array(all_labels)[labels[i].flatten()==1])) ax.imshow(imgs[i], cmap='bone') ax.set_title(title) ax.grid(False) ax.set_xticks([]) ax.set_yticks([]) plt.show() display_batch(next(train_datagen))Model Buildingpip install image-classifiers -q from classification_models.tfkeras import Classifiers ResNet18, proc_func = Classifiers.get('resnet18') densenet = tf.keras.applications.DenseNet121(weights='imagenet', include_top=False, input_shape=(IMG_SIZE, IMG_SIZE, 3)) densenet.trainable = False inputs = densenet.inputs x = densenet(inputs) x = tf.keras.layers.GlobalAveragePooling2D()(x) outputs = tf.keras.layers.Dense(len(all_labels), activation='sigmoid')(x) model = tf.keras.Model(inputs, outputs) model.summary()Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/densenet/densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5 29089792/29084464 [==============================] - 3s 0us/step 29097984/29084464 [==============================] - 3s 0us/step Model: "model" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_1 (InputLayer) [(None, 224, 224, 3)] 0 _________________________________________________________________ densenet121 (Functional) (None, 7, 7, 1024) 7037504 _________________________________________________________________ global_average_pooling2d (Gl (None, 1024) 0 _________________________________________________________________ dense (Dense) (None, 11) 11275 ===============================================[...]Learning RateBuild a learning rate finder to find an optimal lr for finetuning the model.class LRFinder: def __init__(self, model, lr_range=[1e-10, 1e1], beta=0.98, stop_factor=4): self.model = model self.lr_range = lr_range self.beta = beta self.stop_factor = stop_factor self.stop_training = False self.iterations = 0 self.mvg_avg_loss = 0 self.min_loss = 1e9 self.lrs = [] self.losses = [] def _reset(self): self.stop_training = False self.iterations = 0 self.mvg_avg_loss = 0 self.min_loss = 1e9 self.lrs = [] self.losses = [] def _scheduler(self, start_lr, end_lr, iterations): self.lr_factor = (end_lr / start_lr)**(1./iterations) def on_train_begin(self, logs=None): self._reset() def on_batch_end(self, batch, logs=None): self.iterations += 1 lr = tf.keras.backend.get_value(self.model.optimizer.lr) self.lrs.append(lr) tf.keras.backend.set_value(self.model.optimizer.lr, lr*self.lr_factor) loss = logs['loss'] self.mvg_avg_loss = (self.beta*self.mvg_avg_loss) + ((1-self.beta)*loss) smooth_loss = self.mvg_avg_loss / (1-(self.beta**self.iterations)) self.losses.append(smooth_loss) stop_loss = self.stop_factor * self.min_loss if self.iterations > 1 and smooth_loss > self.stop_factor: self.stop_training = True if self.iterations == 0 or smooth_loss < self.min_loss: self.min_loss = smooth_loss # print(f'\nIterations: {self.iterations}, lr: {lr}, loss: {smooth_loss}/{loss}, lrf: {self.lr_factor}') def on_epoch_end(self, epoch, logs=None): if self.stop_training: self.model.stop_training = True return def find(self, train_ds, epochs=None, steps_per_epoch=None, batch_size=32): if epochs is None: raise ValueError(f'Invalid value {epochs} for epochs') if steps_per_epoch is None: steps_per_epoch = len(train_ds) self._scheduler(self.lr_range[0], self.lr_range[1], steps_per_epoch*epochs) with tempfile.NamedTemporaryFile(prefix='init', suffix='.h5') as init_config: # save model config self.model.save_weights(init_config.name) init_lr = tf.keras.backend.get_value(self.model.optimizer.lr) tf.keras.backend.set_value(self.model.optimizer.lr, self.lr_range[0]) lr_finder_cb = tf.keras.callbacks.LambdaCallback( on_train_begin= lambda logs: self.on_train_begin(logs), on_batch_end= lambda batch, logs: self.on_batch_end(batch, logs), on_epoch_end= lambda epoch, logs: self.on_epoch_end(epoch, logs) ) self.model.fit(train_ds, epochs=epochs, steps_per_epoch=steps_per_epoch, callbacks=[lr_finder_cb]) # restore model config tf.keras.backend.set_value(self.model.optimizer.lr, init_lr) self.model.load_weights(init_config.name) def plot_loss(self, skip_begin=10, skip_end=1, title=""): lrs = self.lrs[skip_begin:-skip_end] losses = self.losses[skip_begin:-skip_end] plt.plot(lrs, losses) plt.xscale("log") plt.xlabel("Learning Rate (Log Scale)") plt.ylabel("Loss")Compute lr# model.compile(optimizer='adam', # loss='binary_crossentropy', # metrics=['binary_accuracy', tf.keras.metrics.AUC()]) # lr_finder = LRFinder(model) # lr_finder.find(train_datagen, epochs=10)For our CLR we can use a minimum learning rate of 1e-2 and a maximum learning rate of 1e-5.`CyclicalLearningRate` provided by tensorflow_addons library, keep the initial learning rate to 1e-5 and the maximum learning rate to be 1e-2 and use a 'traingular' approach of CLR. We are keeping the step size = 2 epochs. Hence, to perform 1 cycle we will need to perform 4 epochs.def scale_fn(x): return 1. clr = tfa.optimizers.CyclicalLearningRate( initial_learning_rate=1e-5, maximal_learning_rate=1e-2, scale_fn=scale_fn, step_size=2*len(train_datagen)*BATCH_SIZE, scale_mode='cyclic' )To evaluate our model we will use 'binary_accuracy' and 'AUROC Score', as our model itself will be evaluated on the AUROC score.model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=clr), loss='binary_crossentropy', metrics=['binary_accuracy', tf.keras.metrics.AUC()]) model.fit(train_datagen, epochs=4, batch_size=BATCH_SIZE, steps_per_epoch=len(train_datagen), validation_data=valid_datagen, validation_steps=len(valid_datagen)) model.save("model_1")INFO:tensorflow:Assets written to: model_1\assetsNow, we unfreze the model and finetune the entire model.densenet.trainable = True model.summary() clr = tfa.optimizers.CyclicalLearningRate( initial_learning_rate=1e-5, #before: 1e-9 maximal_learning_rate=1e-2, #before: 1e-4 scale_fn=scale_fn, step_size=2*len(train_datagen)*BATCH_SIZE, scale_mode='cyclic' ) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=clr), loss='binary_crossentropy', metrics=['binary_accuracy', tf.keras.metrics.AUC()]) model.fit(train_datagen, epochs=4, batch_size=BATCH_SIZE, steps_per_epoch=len(train_datagen), validation_data=valid_datagen, validation_steps=len(valid_datagen))Epoch 1/4 379/1507 [======>.......................] - ETA: 38:27:31 - loss: 0.2642 - binary_accuracy: 0.8905 - auc_1: 0.9044Data Science Unit 1 Sprint Challenge 2 Storytelling with DataIn this sprint challenge you'll work with a dataset from **FiveThirtyEight's article, [Every Guest Ever Had On ‘The Daily Show’](https://fivethirtyeight.com/features/every-guest-jon-stewart-ever-had-on-the-daily-show/)**! Part 0 — Run this starter codeYou don't need to add or change anything here. Just run this cell and it loads the data for you, into a dataframe named `df`.(You can explore the data if you want, but it's not required to pass the Sprint Challenge.)%matplotlib inline import matplotlib.pyplot as plt import numpy as np import pandas as pd df = pd.read_csv('https://raw.githubusercontent.com/fivethirtyeight/data/master/daily-show-guests/daily_show_guests.csv') df.rename(columns={'YEAR': 'Year', 'Raw_Guest_List': 'Guest'}, inplace=True) def get_occupation(group): if group in ['Acting', 'Comedy', 'Musician']: return 'Acting, Comedy & Music' elif group in ['Media', 'media']: return 'Media' elif group in ['Government', 'Politician', 'Political Aide']: return 'Government and Politics' else: return 'Other' df['Occupation'] = df['Group'].apply(get_occupation)Part 1 — What's the breakdown of guests’ occupations per year?For example, in 1999, what percentage of guests were actors, comedians, or musicians? What percentage were in the media? What percentage were in politics? What percentage were from another occupation?Then, what about in 2000? In 2001? And so on, up through 2015.So, **for each year of _The Daily Show_, calculate the percentage of guests from each occupation:**- Acting, Comedy & Music- Government and Politics- Media- Other Hints:1. Use pandas to make a **crosstab** of **`Year`** & **`Occupation`**. ([This documentation](http://pandas.pydata.org/pandas-docs/stable/reshaping.htmlcross-tabulations) has examples and explanation.)2. To get percentages instead of counts, use crosstab's **`normalize`** parameter to normalize over each _row._ ([This documentation](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.crosstab.html) describes the parameter and its options.)3. You'll know you've calculated the crosstab correctly when the percentage of "Acting, Comedy & Music" guests is 90.36% in 1999, and 45% in 2015.df.describe() df.describe(exclude=[np.number]) crosstabdf = pd.crosstab(df['Year'],df['Occupation'],normalize='index') print(crosstabdf)Occupation Acting, Comedy & Music Government and Politics Media \ Year 1999 0.903614 0.012048 0.066265 2000 0.739645 0.082840 0.124260 2001 0.726115 0.038217 0.197452 2002 0.622642 0.069182 0.264151 2003 0.560241 0.102410 0.246988 2004 0.384146 0.225610 0.274390 2005 0.370370 0.160494 0.333333 2006 0.360248 0.192547 0.291925 2007 0.255319 0.170213 0.333333 2008 0.207317 0.201220 0.469512 2009 0.208589 0.208589 0.361963 2010 0.351515 [...]Part 2 — Recreate this explanatory visualization:from IPython.display import display, Image url = 'https://fivethirtyeight.com/wp-content/uploads/2015/08/hickey-datalab-dailyshow.png' example = Image(url, width=500) display(example)**Hint:** use the crosstab you calculated in part 1!**Expectations:** Your plot should include:- 3 lines visualizing "occupation of guests, by year." The shapes of the lines should look roughly identical to 538's example. Each line should be a different color. (But you don't need to use the _same_ colors as 538.)- Legend or labels for the lines. (But you don't need each label positioned next to its line or colored like 538.)- Title in the upper left: _"Who Got To Be On 'The Daily Show'?"_ with more visual emphasis than the subtitle. (Bolder and/or larger font.)- Subtitle underneath the title: _"Occupation of guests, by year"_Any visual element not specifically mentioned in the expectations is an optional bonus, but it's _not_ required to pass the Sprint Challenge.labels = {'Acting, Comedy & Music':'act', 'Government and Politics':'gov', 'Media':'media', 'Other':'other'} crosstabdf.rename(columns=labels, inplace=True) plt.style.use('fivethirtyeight') fig, ax = plt.subplots() dfact = crosstabdf['act']*100 dfact.plot(color='deepskyblue', linewidth=2) dfgov = crosstabdf['gov']*100 dfgov.plot(color='red', linewidth=2) dfmedia = crosstabdf['media']*100 dfmedia.plot(color='purple', linewidth=2) # Setting title and sub-title ax.text(x=1997.8, y=115, s="Who Got To Be On 'The Daily Show'?", fontsize=15, fontweight='bold') ax.text(x=1997.8, y=108, s="Occupation of guests, by year", fontsize=12) # Setting labels ax.text(x=2001, y=80, s="Acting, Comedy & Music", fontsize=12, color='deepskyblue') ax.text(x=2009, y=7, s="Government and Politics", fontsize=12, color='red') ax.text(x=2007.5, y=50, s="Media", fontsize=12, color='purple') ax.set_xlim(1999, 2015) ax.set_ylim(0, 100) ax.set_ylim(0, 100) ax.set_xlabel(""); ax.set_xticks([x for x in range(2000,2016,4)]) ax.set_xticklabels(['2000',"'04","'08","'12"]) ax.set_yticks([x for x in range(0,125,25)]) ax.set_yticklabels(['0',"25","50","75","100%"]) plt.show()Part 3 — Who were the top 10 guests on _The Daily Show_?**Make a plot** that shows their names and number of appearances.**Hint:** you can use the pandas `value_counts` method.**Expectations:** This can be a simple, quick plot: exploratory, not explanatory. If you want, you can add titles and change aesthetics, but it's _not_ required to pass the Sprint Challenge.import seaborn as sns top10guests = df['Guest'].value_counts()[:10,] plt.figure(figsize=(10,5)) ax = sns.barplot(top10guests.values, top10guests.index, orient='h') plt.title("Top 10 Guests in 'The Daily Show'") plt.ylabel('Names', fontsize=12) plt.xlabel('Number of appearances', fontsize=12) ax.set_xticks([x for x in range(0,21,2)]) plt.show()Import Packagesimport tensorflow as tf from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.models import Sequential import numpy as npTokenizationdata = "In the town of Athy one \n Battered away til he hadnt a pound. \nHis father died and made him a man again \n Left him a farm and ten acres of ground. \nHe gave a grand party for friends and relations \nWho didnt forget him when come to the wall, \nAnd if youll but listen Ill make your eyes glisten \nOf the rows and the ructions of Lanigans Ball. \nMyself to be sure got free invitation, \nFor all the nice girls and boys I might ask, \nAnd just in a minute both friends and relations \nWere dancing round merry as bees round a cask. \n, that nice little milliner, \nShe tipped me a wink for to give her a call, \nAnd I soon arrived with \nJust in time for Lanigans Ball. \nThere were lashings of punch and wine for the ladies, \nPotatoes and cakes; there was bacon and tea, \nThere were the Nolans, Dolans, OGradys \nCourting the girls and dancing away. \nSongs they went round as plenty as water, \nThe harp that once sounded in Taras old hall,\nSweet Nelly Gray and The Rat Catchers Daughter,\nAll singing together at Lanigans Ball. \nThey were doing all kinds of nonsensical polkas \nAll round the room in a whirligig. \nJulia and I, we banished their nonsense \nAnd tipped them the twist of a reel and a jig. \nAch mavrone, how the girls got all mad at me \nDanced til youd think the ceiling would fall. \nFor I spent three weeks at Brooks Academy \nLearning new steps for Lanigans Ball. \nThree long weeks I spent up in Dublin, \nThree long weeks to learn nothing at all,\n Three long weeks I spent up in Dublin, \nLearning new steps for Lanigans Ball. \nShe stepped out and I stepped in again, \nI stepped out and she stepped in again, \nShe stepped out and I stepped in again, \nLearning new steps for Lanigans Ball. \nBoys were all merry and the girls they were hearty \nAnd danced all around in couples and groups, \nTil an accident happened, young \nPut his right leg through miss Finnertys hoops. \nPoor creature fainted and cried Meelia murther, \nCalled for her brothers and gathered them all. \nCarmody swore that hed go no further \nTil he had satisfaction at Lanigans Ball. \nIn the midst of the row miss Kerrigan fainted, \nHer cheeks at the same time as red as a rose. \nSome of the lads declared she was painted, \nShe took a small drop too much, I suppose. \nHer sweetheart, , so powerful and able, \nWhen he saw his fair colleen stretched out by the wall, \nTore the left leg from under the table \nAnd smashed all the Chaneys at Lanigans Ball. \nBoys, oh boys, twas then there were runctions. \nMyself got a lick from big Phelim McHugh. \nI soon replied to his introduction \nAnd kicked up a terrible hullabaloo. \nOld Casey, the piper, was near being strangled. \nThey squeezed up his pipes, bellows, chanters and all. \nThe girls, in their ribbons, they got all entangled \nAnd that put an end to Lanigans Ball." corpus = data.lower().split("\n") tokenizer = Tokenizer() tokenizer.fit_on_texts(corpus) total_words = len(tokenizer.word_index) + 1 print(tokenizer.word_index) print('Total Words: {}'.format(total_words))Data Preparationinput_sequences = [] for index, line in enumerate(corpus): token_list = tokenizer.texts_to_sequences([line])[0] for i in range(1, len(token_list)): n_gram_sequence = token_list[:i+1] input_sequences.append(n_gram_sequence) if index == 0: print("'"+line+"' => {}".format(token_list)) print("Input Sequences :") print('\n'.join(map(str, input_sequences))) max_sequence_len = max([len(x) for x in input_sequences]) input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_sequence_len, padding='pre')) first_line_len = len(corpus[0].split()) print("Padded Input Sequences for the first line :") print(input_sequences[:first_line_len-1]) xs = input_sequences[:,:-1] labels = input_sequences[:,-1] ys = tf.keras.utils.to_categorical(labels, num_classes=total_words) #Consider first sentence print("Sentence : {}".format(corpus[0])) print("Tokens : {}".format(input_sequences[first_line_len-2])) print("X : {}".format(xs[first_line_len-2])) print("Label : {}".format(labels[first_line_len-2])) print("Y : {}".format(ys[first_line_len-2]))Build & Train Modelembedding_dim = 64 model = tf.keras.Sequential([ tf.keras.layers.Embedding(total_words, embedding_dim, input_length=max_sequence_len-1), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(20)), tf.keras.layers.Dense(total_words, activation='softmax') ]) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) history = model.fit(xs, ys, epochs=500)Visualize Training Resultsimport matplotlib.pyplot as plt def plot_graphs(history, string): plt.plot(history.history[string]) plt.xlabel("Epochs") plt.ylabel(string) plt.show() plot_graphs(history, 'accuracy')Generate Textseed_text = " to dance" next_words = 100 for _ in range(next_words): token_list = tokenizer.texts_to_sequences([seed_text])[0] token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre') predicted_labels = model.predict(token_list, verbose=0) predicted_index = np.argmax(predicted_labels, axis=-1) output_word = "" for word, index in tokenizer.word_index.items(): if index == predicted_index: output_word = word break seed_text += " " + output_word print(seed_text)Parabolske koordinaterParabolske koordinater $(u, v)$ er gitt ved posisjonsvektor$$\vec{r} = 2 u v \mathbf{i} + (u^2-v^2)\mathbf{j}$$1. Finn enhetsvektorne. Er de ortogonale?2. Finn gradienten til skalarfelt $f(u, v) = (1-u^2)(1-v^2)$ representert med parabolske koordinater. Plott skalarfeltet med tilhørende gradient for $(u, v) \in [0, 1] \times [-1, 1]$. Bruk Kartesiske koordinater til plottingen.Start med å importere funksjonalitet fra `sympy`, og lag to tupler (Python immutable list) for `psi=(u, v)` og `rv=(2uv, u**2-v**2)`import sympy as sp import numpy as np u, v = psi = sp.symbols('u,v', real=True) rv = (2*u*v, u**2-v**2)Finn enhetsvektorer og skaleringsfaktorer. Lager to generelle funksjoner som kan gjenbrukes for andre koordinaterdef basisvektorer(psi, rv): """Returner basisvektorer Parameters ---------- psi : Tuple av nye variable rv : Posisjonsvektor """ b = np.zeros((len(psi), len(rv)), dtype=object) for i, ui in enumerate(psi): for j, rj in enumerate(rv): b[i, j] = sp.simplify(rj.diff(ui, 1)) return b def skaleringsfaktorer(b): """Returner skaleringsfaktorer Parameters ---------- b : basisvektorer """ h = np.zeros(b.shape[0], dtype=object) for i, s in enumerate(np.sum(b**2, axis=1)): h[i] = sp.simplify(sp.sqrt(s)) return h def enhetsvektorer(psi, rv): """Returner enhetsvektorer og skaleringsfaktorer Parameters ---------- psi : Tuple av nye variable rv : Posisjonsvektor """ b = basisvektorer(psi, rv) hi = skaleringsfaktorer(b) return b / hi[None, :], hi e, hi = enhetsvektorer(psi, rv)Skriv ut enhetsvektorer og sjekk at de stemmer med Vector Calculus example 6.2.print(e) print(hi)[[v/sqrt(u**2 + v**2) u/sqrt(u**2 + v**2)] [u/sqrt(u**2 + v**2) -v/sqrt(u**2 + v**2)]] [2*sqrt(u**2 + v**2) 2*sqrt(u**2 + v**2)]Lag skalarfelt $f(u, v) = (1-u^2)(1-v^2)$f = (1-u**2)*(1-v**2)Plott skalarfelt. Merk at vi bruker $x=2uv$ og $y=u^2-v^2$ evaluert på et strukturert grid. `sp.lambdify` er en effektiv (vektorisert) metode å evaluere en `sympy` funksjon på. Så under tilsvarer `f(u, v) = sp.lambdify((u, v), f)(ui, vi)`.N = 20 ui = np.broadcast_to(np.linspace(0, 1, N)[:, None], (N, N)) vi = np.broadcast_to(np.linspace(-1, 1, N)[None, :], (N, N)) fj = sp.lambdify((u, v), f)(ui, vi)Hvis vi nå velger å plotte $f(u, v)$ i det nye koordinatsystemet får vi.import matplotlib.pyplot as plt %matplotlib inline plt.contourf(ui, vi, fj) ui = np.broadcast_to(np.linspace(0, 1, N)[:, None], (N, N)) vi = np.broadcast_to(np.linspace(-1, 1, N)[None, :], (N, N)) for cu in np.linspace(0, 1, 10): plt.plot(2*cu*vi[0], (cu**2-vi[0]**2), 'b') for cv in np.linspace(-1, 1, 20): plt.plot(2*ui[:, 0]*cv, ui[:, 0]**2-cv**2, 'r') plt.xlabel('x') plt.ylabel('y')Men det er kanskje mer interessant å se resultatet i fysiske (Kartesiske) koordinater. Vi trenger derfor å finne kartesiske `x, y` fra de gitte `u, v`. Gjør dette som følgermesh = [] for rj in rv: mesh.append(sp.lambdify((u, v), rj)(ui, vi)) x, y = mesh plt.contourf(x, y, fj)Å plotte gradienten i Kartesiske koordinater er mer involvert siden vi har beregnet gradienten i de nye koordinatene og derfor trenger å projisere ved å ta prikk-produktet av gradientvektoren$$\begin{align}\frac{\partial f}{\partial x} &= \nabla f \cdot \mathbf{i},\\\frac{\partial f}{\partial y} &= \nabla f \cdot \mathbf{j}.\end{align}$$For å finne gradientvektoren deriverer vi først for å finne komponentene til $\nabla f$ i nye koordinaterdf = np.array((1/hi[0]*f.diff(u, 1), 1/hi[1]*f.diff(v, 1))) print(df)[-u*(1 - v**2)/sqrt(u**2 + v**2) -v*(1 - u**2)/sqrt(u**2 + v**2)]Merk at `df` nå ikke inneholder enhetsvektorer. Så før vi prikker med $\mathbf{i}$ og $\mathbf{j}$ må vi gange med enhetsvektorene $\mathbf{e_1}$ og $\mathbf{e_2}$ for å få $\nabla f$$$\nabla f = \frac{\mathbf{e}_1}{h_1}\frac{\partial f}{\partial u} + \frac{\mathbf{e}_2}{h_2}\frac{\partial f}{\partial v},$$gradf = e[0]*df[0] + e[1]*df[1] print(gradf)[-u*v*(1 - u**2)/(u**2 + v**2) - u*v*(1 - v**2)/(u**2 + v**2) -u**2*(1 - v**2)/(u**2 + v**2) + v**2*(1 - u**2)/(u**2 + v**2)]Merk at vi med denne summen nå har fått satt inn for $\mathbf{e_1}$ og $\mathbf{e_2}$, så vektoren `gradf` over er allerede gitt ved Kartesiske enhetsvektorer (e.g., siden $\mathbf{e_1} = 2/h_1(v\mathbf{i} + u\mathbf{j})$). Ved prikking mot $\mathbf{i}$ er resultatet derfor `gradf[0]`, mens prikking mot $\mathbf{j}$ gir `gradf[1]`. Derfor skipper vi prikkproduktet og henter ganske enkelt de Kartesiske vektorkomponentenedfdxi = sp.lambdify((u, v), gradf[0])(ui, vi) dfdyi = sp.lambdify((u, v), gradf[1])(ui, vi) plt.contourf(x, y, fj) plt.quiver(x, y, dfdxi, dfdyi, scale=20)Introduction to PythonThis notebook is primarily focused on introducing the specifics of using Python in an interactive environment such as Datalab. It is not intended to provide a complete tutorial to Python as a language. If you're completely new to Python, no problem! Python is quite straightforward, and there are lots of resources. The interactive step-by-step material at [Codecademy](https://www.codecademy.com/tracks/python) might be of interest.To get started, below is a code cell that contains a Python statement. You can run it by pressing `Shift+Enter` or clicking the the `Run` toolbar button with the cell selected.print("Hello World")Hello WorldYou can edit the cell above and re-execute it to iterate over it. You can also add additional code cells to enter new blocks of code.import sys number = 10 def square(n): return n * nThe cell above created a variable named `number` and a function named `square`, and placed them into the _global namespace_. It also imported the `sys` module into the same namespace. This global namespace is shared across all the cells in the notebook.As a result, the following cell should be able to access (as well as modify) them.print('The number is currently %d' % number) number = 11 sys.stderr.write('And now it is %d' % number) square(number)The number is currently 10By now, you've probably noticed a few interesting things about code cells:* Upon execution, their results are shown inline in the notebook, after the code that produced the results. These results are included into the saved notebook. Results include outputs of print statements (text that might have been written out to stdout as well as stderr) and the final result of the cell.* Some code cells do not have any visible output.* Code cells have a distinguishing border on the left. This border is a washed out gray color when the notebook is first loaded, indicating that a cell has not been run yet; the border changes to a filled blue border after the cell runs. Getting Help Python APIs are usually accompanied by documentation. You can use `?` to invoke help on a class or a method. For example, execute the cells below:str? g = globals() g.get?When run, these cells produce docstring content that is displayed in the help pane within the sidebar. The code cells also provide auto-suggest. For example, press `Tab` after the '.' to see a list of members callable on the `g` variable that was just declared.# Intentionally incomplete for purposes of auto-suggest demo, rather than running unmodified. g.Function signature help is also available. For example, press `Tab` in the empty parentheses below.str()Note that help in Python relies on the interpreter being able to resolve the type of the expression that you are invoking help on.If you have not yet executed code, you may be able to invoke help directly on the class or method you're interested in, rather than the variable itself. Try this.import datetime datetime.datetime?Python Libraries Datalab includes the standard Python library and a set of libraries that you can easily import. Most of the libraries were installed using `pip`, the Python package manager, or `pip3` for Python 3.%%bash pip list --format=columnsPackage Version ----------------------------------------------- -------------- appdirs 1.4.2 attrs 16.3.0 Automat 0.5.0 avro 1.8.1 backports-abc 0.5 backports.shutil-get-terminal-size 1.0.0 beautifulsoup4 4.5.3 bleach 1.5.0 brewer2mpl 1.4.1 bs4 0.0.1 certifi 2017.1.23 cffi 0.8.6 chardet 2.3.0 cloudml 0.1.9.1[...]If you have suggestions for additional packages to include, please submit feedback proposing the inclusion of the packages in a future version. Installing a Python Library You can use `pip` to install your own Python 2 libraries, or `pip3` to install Python 3 libraries.Keep in mind that this will install the library within the virtual machine instance being used for Datalab, and the library will become available to all notebooks and all users sharing the same instance.The library installation is temporary. If the virtual machine instance is recreated, you will need to reinstall the library.The example, below, installs [scrapy](http://scrapy.org/), a library that helps in scraping web content.%%bash apt-get update -y apt-get install -y -q python-dev python-pip libxml2-dev libxslt1-dev zlib1g-dev libffi-dev libssl-dev pip install -q scrapyHit http://security.debian.org jessie/updates InRelease Hit http://ftp.us.debian.org testing InRelease Ign http://deb.debian.org jessie InRelease Hit http://deb.debian.org jessie-updates InRelease Get:1 http://security.debian.org jessie/updates/main amd64 Packages [444 kB] Hit http://deb.debian.org jessie Release.gpg Hit http://deb.debian.org jessie Release Get:2 http://ftp.us.debian.org testing/main Sources [8936 kB] Get:3 http://deb.debian.org jessie-updates/main amd64 Packages [17.6 kB] Get:4 http://deb.debian.org jessie/main amd64 Packages [9049 kB] Fetched 18.4 MB in 14s (1279 kB/s) Reading package lists... Reading package lists... Building dependency tree... Reading state information... libffi-dev is already the newest version. libxml2-dev is already the newest version. libxslt1-dev is already the newest version. python-dev is already the newest version. python-pip is already the newest version. zlib1g-dev is already the newest version. libssl-dev is already the newest version. 0[...]Inspecting the Python evironment by running `pip list`, we should now see that Scrapy is installed and ready to use.%%bash pip list --format=columnsPackage Version ----------------------------------------------- -------------- appdirs 1.4.2 attrs 16.3.0 Automat 0.5.0 avro 1.8.1 backports-abc 0.5 backports.shutil-get-terminal-size 1.0.0 beautifulsoup4 4.5.3 bleach 1.5.0 brewer2mpl 1.4.1 bs4 0.0.1 certifi 2017.1.23 cffi 0.8.6 chardet 2.3.0 cloudml 0.1.9.1[...]Connected componentsA **connected component** of an undirected is a set of nodes, wherein it is possible to get from any node $i$ in that set to any other node $j$ by traversing the nodes of thatnetwork. A network is **fully connected** if it has one connected component.import numpy as np from graspologic.simulations import sbm from graspologic.plot import heatmap A, labels = sbm([10, 10], [[0.5, 0], [0, 0.5]], return_labels=True) heatmap(A, inner_hier_labels=labels, cbar=False)`graspologic` has tools for dealing with connected components. First, it is often usefulto know whether a network is fully connected. ```{note}For most applications, if your graph has multiple connected components, it makes more sense to treat each component as its ownnetwork and to analyze them separately.```from graspologic.utils import is_fully_connected is_fully_connected(A) import networkx as nx g = nx.from_numpy_array(A) is_fully_connected(g) for component in nx.connected_components(g): print(len(component))10 10For a directed network, there are two notions of connectedness.- A **weakly connected component** is a set of nodes such that it is possible to get from any node $i$ in the set to any node $j$ in the set *while ignoring edge directions*.- A **strongly connected component** is a set of nodes such that it is possible to get from any node $i$ in the set to any node $j$ in the set.Strong connectedness implies weak connectedness. Strictly speaking, you have to specify which version of connectedness you mean when talkingabout directed networks. However, I think it's more common to assume people mean weakly connected.This is the version of connectedness for a directed network that `graspologic` means.A, labels = sbm([10, 10], [[0.5, 0], [0, 0.5]], return_labels=True, directed=True) is_fully_connected(A) g = nx.from_numpy_array(A, create_using=nx.DiGraph) nx.is_weakly_connected(g)For data cleaning purposes, it is often helpful to be able to select the **largest connected component**. Just like it sounds, this is just the largest group of nodes in the network which satisfy one of the connectedness properties above.from graspologic.utils import largest_connected_component A = sbm([20, 10], [[0.5, 0], [0, 0.5]]) A_lcc = largest_connected_component(A) A_lcc.shapeCharting from feature notebook Angara and some other server side charting lib did not work from mono so skipping it JS based stuff looks more promisining, so lets try this out@"" |> Util.Html |> Display type D3Op = { Op: string Style: List Attr: List On: List> } type D3 = List let d3op = {Op = ""; Style = []; Attr = []; On = []} let mapConcat s f xs = xs |> List.map f |> String.concat s let rec D3OpPrinter (op: D3Op) = sprintf "%s%s%s%s" (if op.Op.Length > 0 then sprintf "\t.append(\"%s\")\n" op.Op else "") (op.Style |> mapConcat "\t" (fun (k, v) -> sprintf ".style(\"%s\", \"%s\")\n" k v ) ) (op.Attr |> mapConcat "\t" (fun (k, v) -> sprintf ".attr(\"%s\", %d)\n" k v)) (op.On |> mapConcat "\t" (fun (k, v) -> sprintf ".on(\"%s\",\n\tfunction(){\n\td3.select(this)\n%s\t})\n" k (v |> mapConcat "" D3OpPrinter) )) let D3Printer (d3: D3) = sprintf """
""" |> Util.Html |> Display """
""" data // how do we want frame to be displayed type Options = { Height: int Width: int } with static member Default = { Width = 800 Height = 400 } // wrapping charting related code in a frame let frame options c = sprintf """ """ c options.Height options.Width // An attempt make chartJs interop more strongly typed. // Colors could be further typed but I'm not building a lib yet :) type ChartJsDataset = { label: string backgroundColor: string borderColor: string data: int seq } type ChartJsData = { labels: string seq datasets: ChartJsDataset seq } // "render" dataset by wrapping it in HTML and then ina frame let chartLine options labels datasets = let data = { labels = labels datasets = datasets } let stringifiedData = Newtonsoft.Json.JsonConvert.SerializeObject(data).Replace("\"","'") chartjs stringifiedData |> frame options |> Util.Html |> Display // Datasets. let first = { label = "My First dataset" backgroundColor = "rgba(255, 99, 132, 0.2)" borderColor = "rgba(255, 99, 132, 1" data = [0;10;5;2;20;30;45] } let second = { label = "My Second dataset" backgroundColor = "rgba(54, 162, 235, 0.2)" borderColor = "rgba(54, 162, 235, 1)" data = [23;0;0;0;0;10;45] } chartLine Options.Default ["January";"February";"March";"April";"May";"June";"July"] [first; second]Frame works!!! Let's check alternatives Google chart work out of the boxfrom here http://markibrahim.me/musings/notebooks/beautiful_javascript_charts.html"""
""" |> Util.Html |> Display11 hours of work plus 2 hours of sleep does not look healthy :(""" """.Replace("\"", "'") |> Util.Html |> Display """
Hello
""" |> frame Options.Default |> Util.Html |> DisplayProphet will by default fit weekly and yearly seasonalities, if the time series is more than two cycles long. It will also fit daily seasonality for a sub-daily time series. You can add other seasonalities (monthly, quarterly, hourly) using the add_seasonality method (Python) or function (R).The inputs to this function are a name, the period of the seasonality in days, and the number of Fourier terms for the seasonality. Increasing the number of Fourier terms allows the seasonality to fit faster changing cycles, but can also lead to overfitting: $N$ Fourier terms corresponds to $2N$ variables used for modeling the cycle. For reference, by default Prophet uses 3 terms for weekly seasonality and 10 for yearly seasonality. An optional input to add_seasonality is the prior scale for that seasonal component - this is discussed below.# empty model m = Prophet(weekly_seasonality=False) # add seasonality before fitting m.add_seasonality(name='monthly', period=30.5, fourier_order=5) # fit m.fit(df) # future future = m.make_future_dataframe(periods=365) # do the forecast forecast = m.predict(future) # show it %matplotlib inline m.plot_components(forecast);INFO:fbprophet.forecaster:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this. /Users/karve/anaconda3/envs/dev/lib/python3.6/site-packages/pystan/misc.py:399: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`. elif np.issubdtype(np.asarray(v).dtype, float):Holidays and special eventsMake a data frame with two columns: `holiday, ds`.If you have holidays or other recurring events that you’d like to model, you must create a dataframe for them. It has two columns (holiday and ds) and a row for each occurrence of the holiday. It must include all occurrences of the holiday, both in the past (back as far as the historical data go) and in the future (out as far as the forecast is being made). If they won’t repeat in the future, Prophet will model them and then not include them in the forecast.playoffs = pd.DataFrame({ 'holiday': 'playoff', 'ds': pd.to_datetime(['2008-01-13', '2009-01-03', '2010-01-16', '2010-01-24', '2010-02-07', '2011-01-08', '2013-01-12', '2014-01-12', '2014-01-19', '2014-02-02', '2015-01-11', '2016-01-17', '2016-01-24', '2016-02-07']), 'lower_window': 0, 'upper_window': 1, }) superbowls = pd.DataFrame({ 'holiday': 'superbowl', 'ds': pd.to_datetime(['2010-02-07', '2014-02-02', '2016-02-07']), 'lower_window': 0, 'upper_window': 1, }) holidays = pd.concat((playoffs, superbowls)) print(Prophet.__doc__) m = Prophet(holidays=holidays) forecast = m.fit(df).predict(future) forecast[(forecast['playoff'] + forecast['superbowl']).abs() > 0][ ['ds', 'playoff', 'superbowl']][-10:] m.plot_components(forecast)If you find that the holidays are overfitting, you can adjust their prior scale to smooth them using the parameter holidays_prior_scale. By default this parameter is 10, which provides very little regularization. Reducing this parameter dampens holiday effects:m = Prophet(holidays=holidays, holidays_prior_scale=0.05).fit(df) forecast = m.predict(future) forecast[(forecast['playoff'] + forecast['superbowl']).abs() > 0][ ['ds', 'playoff', 'superbowl']][-10:] m.plot_components(forecast)Adjusting seasonality for individualm = Prophet() m.add_seasonality( name='weekly', period=7, fourier_order=3, prior_scale=0.1);* `add_regressor` is more general than `holidays=`; (does not require binary data)* can use another time series as the regressordef nfl_sunday(ds): date = pd.to_datetime(ds) if date.weekday() == 6 and (date.month > 8 or date.month < 2): return 1 else: return 0 df['nfl_sunday'] = df['ds'].apply(nfl_sunday) m = Prophet() m.add_regressor('nfl_sunday') m.fit(df) future['nfl_sunday'] = future['ds'].apply(nfl_sunday) forecast = m.predict(future) m.plot_components(forecast);INFO:fbprophet.forecaster:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this. /Users/karve/anaconda3/envs/dev/lib/python3.6/site-packages/pystan/misc.py:399: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`. elif np.issubdtype(np.asarray(v).dtype, float):A deep dive into TOB-WGS' pipelineChapter 1: what does `ReblockGVCF` actually do ? Let's examine sample TOB1520 (which is an outlier for some QC metrics)import hail as hl; # All datasets in TOB-WGS are using GRCh38 hl.init(default_reference='GRCh38');Running on Apache Spark version 3.1.2 SparkUI available at http://loic-notebook.australia-southeast1-a.c.notebooks-314505.internal:4040 Welcome to __ __ <>__ / /_/ /__ __/ / / __ / _ `/ / / /_/ /_/\_,_/_/_/ version 0.2.67-bafea6b18247 LOGGING: writing to /home/jupyter/tob-deepdive/hail-20210618-0309-0.2.67-bafea6b18247.logImport a GVCF and explore structure and contentgvcf = hl.import_vcf('gs://cpg-tob-wgs-test/gvcf/batch1/TOB1520.g.vcf.gz', min_partitions=12, force_bgz=True) gvcf.describe() gvcf.info.END.show() hl.summarize_variants(gvcf)2021-06-18 03:11:32 Hail: INFO: Coerced sorted datasetWorth noting:of 35,550,357 variants, 30,075,685 are homozygous reference blocksLet's load the GVCF after `ReblockGVCF` has been performedrb_gvcf = hl.import_vcf('gs://cpg-tob-wgs-test-tmp/joint-calling/v2/hail/batch/728b87/1/output_gvcf.g.vcf.gz', force_bgz=True, min_partitions=12) rb_gvcf.describe(); hl.summarize_variants(rb_gvcf);---------------------------------------- Global fields: None ---------------------------------------- Column fields: 's': str ---------------------------------------- Row fields: 'locus': locus 'alleles': array 'rsid': str 'qual': float64 'filters': set 'info': struct { AC: array, AF: array, AN: int32, AS_BaseQRankSum: array, AS_FS: array, AS_InbreedingCoeff: array, AS_MQ: array, AS_MQRankSum: array, AS_QD: array, AS_QUALapprox: str, AS_RAW_BaseQRankSum: str, AS_RAW_MQ: str, AS_RAW_MQRankSum: str, AS_RAW_ReadPosRankSum: str, AS_ReadPosRankSum: array, AS_SB_TABLE: str, AS_SOR: array, AS_VarDP: str, BaseQRankSum: float64, DP: int32, DS: bool, END: int32, [...]These are the annotations added by ReblockGVCF AC, AF, AN: ???? why would we need that on a single sample ? AS_BaseQRankSum: array, AS_FS: array, AS_MQ: array, AS_MQRankSum: array, AS_QD: array, AS_QUALapprox: str, AS_ReadPosRankSum: array, AS_SOR: array, AS_VarDP: str, FS: float64, MQ: float64, MQ_DP: int32, QD: float64, QUALapprox: int32, RAW_GT_COUNT: array, SOR: float64, VarDP: int32 and the annotations removed by ReblockGVCF MLEAC: array, MLEAF: array Additionally we lost more than 5e6 variants: not only homref blocks but also SNPs, etc.Let's have a look at what we lost:lost = gvcf.anti_join_rows(rb_gvcf.rows()) hl.summarize_variants(lost)2021-06-18 03:23:08 Hail: INFO: Coerced sorted dataset 2021-06-18 03:23:57 Hail: INFO: Coerced sorted datasetWe lost mostly homref blocks, as expected, but also other kind of variants, such as 425,204 SNPsLet's inspect some of these lost variants (more than 2 alleles means it is not a homref block)lost.filter_rows(hl.len(lost.alleles) == 3).show(50,1,truncate=30)2021-06-18 03:30:59 Hail: INFO: Coerced sorted dataset 2021-06-18 03:31:56 Hail: INFO: Coerced sorted dataset 2021-06-18 03:32:46 Hail: INFO: Coerced sorted datasetWhy did we lose for example chr1:16571 ? a 0/0 with GQ=54. Did it get merged in an homref block ?Note: the phased GT does not match the GT ? how is that possible ? (example: chr1:109262)Note: all these variant are homref. Did we lose any non homref variants ?lost = lost.annotate_rows(nonhomref=hl.agg.count_where(lost.GT.is_non_ref())>0) lost.aggregate_rows(hl.agg.count_where(lost.nonhomref))2021-06-18 03:42:11 Hail: INFO: Coerced sorted dataset 2021-06-18 03:43:02 Hail: INFO: Coerced sorted dataset156,380 non homref variants were lost, let's examine some of them:lost.filter_rows(lost.nonhomref).show(50,1, width=10)2021-06-18 04:05:42 Hail: INFO: Coerced sorted dataset 2021-06-18 04:06:41 Hail: INFO: Coerced sorted dataset 2021-06-18 04:07:33 Hail: INFO: Coerced sorted datasetList comprehensionstudents_marks_above_80 = \ [student for student in student_marks if student[1] > 80] students_marks_below_80 = \ [student for student in student_marks if student[1] < 80] students_marks_above_80,students_marks_below_80 students_marks_above_80 + students_marks_below_80 sample_list = [1,2,3] sample_list.extend([5,6]) sample_list [1,2,3] + [5,6] list_of_splits = [] # loop through input list # create a sublist when you have iterated split_size elements # append this sublist to the list_of_splits x = [1, 2, 3, 4, 5] split_size = 2 [[1,2],[3,4],[5]] x = [1, 2, 3, 4, 5] split_size = 7 [[1, 2, 3, 4, 5]] def split_list(input_list,split_size): list_of_splits = [] num_splits = len(input_list)// split_size even_splits = len(input_list) % split_size == 0 num_splits = num_splits if even_splits else num_splits+1 for i in range(num_splits): begin_marker = i*split_size list_of_splits.\ append(input_list[begin_marker:begin_marker+split_size]) return num_splits,list_of_splits split_list([1,2,3,4,5],4) x = [1,2,3,4,5] x[0:2] x[2:4]CNN - 1D - Toxic CommentsA corpus of manually labeled comments - classifying each comment by its type of toxicity is available on Kaggle. We will aim to do a binary classification of whether a comment is toxic or notApproach:- Learning Embedding with the Task- 1D - Convolution Neural Networks to model the sequence dataimport numpy as np import pandas as pd import keras import matplotlib.pyplot as plt %matplotlib inline import visUsing TensorFlow backend.Get the Data Uncomment these shell lines to get the data# !wget http://bit.do/deep_toxic_train -P data/ # !mv data/deep_toxic_train data/train.zip df = pd.read_csv("data/train.zip") df.head()Import the required librariesfrom keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequencesCreate the Input & Output Datatrain_sentences = df["comment_text"] train_sentences.head()**Pre-processing the train data**- Tokenization: "This is an apple" -> ["This", "is", "an", "apple"]- Indexing: {0: "This", 1: "is", 2: "an", 3: "apple"}- Index Representation: [0, 1, 2, 3]from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences # Tokenizer max_words = 20000 tokenizer = Tokenizer(num_words=max_words, oov_token='UNK') tokenizer.fit_on_texts(list(train_sentences))Tokenizer Fix from https://github.com/keras-team/keras/issues/8092tokenizer.word_index = {e:i for e,i in tokenizer.word_index.items() if i <= max_words} # <= because tokenizer is 1 indexed tokenizer.word_index[tokenizer.oov_token] = max_words + 1 # Index Representation tokenized_train = tokenizer.texts_to_sequences(train_sentences) # Selecting Padding # find length of each sentence and plot the length number_of_words = [len(comment) for comment in tokenized_train] plt.hist(number_of_words, bins = np.arange(0, 500, 10)); # Padding to make it uniform maxlen = 200 X = pad_sequences(tokenized_train, maxlen = maxlen) labels = df.iloc[:,2].values # Baseline Benchmark 1 - df.iloc[:,2].sum()/df.iloc[:,2].count() from keras.utils import to_categorical y = to_categorical(labels) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) X_train.shape, X_test.shape, y_train.shape, y_test.shapeStep 2: Create the Model Architecturefrom keras.models import Sequential from keras.layers import Dense, Conv1D, MaxPooling1D, GlobalMaxPooling1D, Embedding, Dropout model = Sequential() model.add(Embedding(max_words, output_dim=128)) model.add(Conv1D(128, 7, activation='relu')) model.add(MaxPooling1D(5)) model.add(Conv1D(128, 7, activation='relu')) model.add(GlobalMaxPooling1D()) model.add(Dense(64, activation="relu")) model.add(Dropout(0.25)) model.add(Dense(2, activation='sigmoid')) model.summary()_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= embedding_1 (Embedding) (None, None, 128) 2560000 _________________________________________________________________ conv1d_1 (Conv1D) (None, None, 128) 114816 _________________________________________________________________ max_pooling1d_1 (MaxPooling1 (None, None, 128) 0 _________________________________________________________________ conv1d_2 (Conv1D) (None, None, 128) 114816 _________________________________________________________________ global_max_pooling1d_1 (Glob (None, 128) 0 _________________________________________________________________ dense_1 (Dense) (None, 64) 8256 _________________________________________________________________ dropout_1 [...]Step 3: Compile the Model & Fit on the Datamodel.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) output = model.fit(X_train, y_train, batch_size=128, epochs=5, validation_split=0.2)Train on 102124 samples, validate on 25532 samples Epoch 1/5 102124/102124 [==============================] - 26s 252us/step - loss: 0.1941 - acc: 0.9332 - val_loss: 0.1595 - val_acc: 0.9442 Epoch 2/5 102124/102124 [==============================] - 21s 201us/step - loss: 0.1501 - acc: 0.9483 - val_loss: 0.1572 - val_acc: 0.9468 Epoch 3/5 102124/102124 [==============================] - 21s 201us/step - loss: 0.1388 - acc: 0.9514 - val_loss: 0.1584 - val_acc: 0.9461 Epoch 4/5 102124/102124 [==============================] - 21s 204us/step - loss: 0.1260 - acc: 0.9560 - val_loss: 0.1679 - val_acc: 0.9412 Epoch 5/5 102124/102124 [==============================] - 21s 202us/step - loss: 0.1117 - acc: 0.9614 - val_loss: 0.1706 - val_acc: 0.9454Step 4: Evaluate the Modelvis.metrics(output.history) score = model.evaluate(X_test, y_test, verbose=1) print('Test loss:', score[0]) print('Test accuracy:', score[1])Test loss: 0.17662850376192293 Test accuracy: 0.9449788500704998Step 5: Visualise evaluation & Make a predictionpredict_classes = model.predict_classes(X_test) actual_classes = np.dot(y_test,np.array([[0],[1]])).reshape(-1) pd.crosstab(actual_classes, predict_classes)How to crop meshesThis notebook shows how a 2D and 3D mesh can be cropped. 2D triangle meshGenerate a 2D sample mesh using the data module.from nanomesh.data import blob_mesh2d mesh = blob_mesh2d(seed=2) triangles = mesh.get('triangle') triangles.plot(){1, 2}Crop the mesh using the `crop()` method. Note that it is not necessary to specify all parameters.cropped_triangles = triangles.crop( xmin=20, xmax=45, ymin=12, ymax=42, ) cropped_triangles.plot(){1, 2}To include partial triangles (i.e. those which lie on the cropping boundaries), use the `include_partial` argument.cropped_triangles2 = triangles.crop( xmin=20, xmax=45, ymin=12, ymax=42, include_partial=True, ) cropped_triangles2.plot(){1, 2}3D tetrahedral meshUsing the data submodule, a 3D sample mesh can be generated.from nanomesh.data import blob_mesh3d mesh = blob_mesh3d(opts='-pq1.2Aa', length=10, seed=2) tetras = mesh.get('tetra') tetras3D meshes can be cropped using the same interface.new = tetras.crop( xmin=2, xmax=8, ymin=2, ymax=7, zmin=2, zmax=7 ) new.plot_pyvista( jupyter_backend='static', show_edges=True, cmap=['violet', 'yellow'], )Official Documentation : https://docs.python.org/2/library/operator.htmlimport operator print operator.mul(5,6) print operator.add(5,6) print operator.sub(5,6) print operator.ge(5,6) print operator.lt(5,6) print operator.le(5,5) print operator.div(5.0,6) print operator.floordiv(5.0,6) print operator.countOf([1, 2, 1, 2, 3, 1, 1], 1) print operator.contains([1, 2, 1, 2, 3, 1, 1], 1) print operator.indexOf([1, 2, 1, 2, 3, 1, 1], 3)4Passing to Higher Order Functionsmy_list = [(1, "Hello"), (200, "World"), (50, "Yolo"), (170, "XOXO")] sorted(my_list, key=operator.itemgetter(1), reverse=True)Performance speedupsimport timeit timeit.timeit('reduce(lambda x,y : x*y, range(1,100))') timeit.timeit('reduce(mul, range(1,100))',setup='from operator import mul')Shade profiles Site 1 Site 2 Site 3 Site 4# additional data # albedo alpha = 0.4 # module azimuth Am = 180 # module tilt angle theta = 35 am = 90 - theta # Sky view factors for the sites 1-4 SVF = [0.8151, 0.6304, 0.6077, 0.3661] import numpy as np # cos of angle of irradiance df['cos_AOI'] = np.cos(np.radians(am)) * np.cos(np.radians(df['as'])) *\ np.cos(np.radians(Am - df['As'])) + np.sin(np.radians(am)) * np.sin(np.radians(df['as'])) df.columns # Direct irradiance df['Gdirect_Site1'] = df['DNI']*df['cos_AOI']*df['SF_Site1'] df['Gdirect_Site2'] = df['DNI']*df['cos_AOI']*df['SF_Site2'] df['Gdirect_Site3'] = df['DNI']*df['cos_AOI']*df['SF_Site3'] df['Gdirect_Site4'] = df['DNI']*df['cos_AOI']*df['SF_Site4'] # Diffuse irradiance df['Gdiffuse_Site1'] = SVF[0] * df['DHI'] df['Gdiffuse_Site2'] = SVF[1] * df['DHI'] df['Gdiffuse_Site3'] = SVF[2] * df['DHI'] df['Gdiffuse_Site4'] = SVF[3] * df['DHI'] # Albedo irradiance df['Galbedo_Site1'] = df['GHI'] * alpha * (1 - SVF[0]) df['Galbedo_Site2'] = df['GHI'] * alpha * (1 - SVF[1]) df['Galbedo_Site3'] = df['GHI'] * alpha * (1 - SVF[2]) df['Galbedo_Site4'] = df['GHI'] * alpha * (1 - SVF[3]) # Total irradiance df['G_Site1'] = df['Gdirect_Site1'] + df['Gdiffuse_Site1'] + df['Galbedo_Site1'] df['G_Site2'] = df['Gdirect_Site2'] + df['Gdiffuse_Site2'] + df['Galbedo_Site2'] df['G_Site3'] = df['Gdirect_Site3'] + df['Gdiffuse_Site3'] + df['Galbedo_Site3'] df['G_Site4'] = df['Gdirect_Site4'] + df['Gdiffuse_Site4'] + df['Galbedo_Site4'] # Energy calculation E_site1 = np.sum(df['G_Site1']) E_site2 = np.sum(df['G_Site2']) E_site3 = np.sum(df['G_Site3']) E_site4 = np.sum(df['G_Site4']) E_site1, E_site2, E_site3, E_site4[![colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/davemlz/eemont/blob/master/docs/tutorials/024-Container-Image-ImageCollection.ipynb)[![Open in SageMaker Studio Lab](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/davemlz/eemont/blob/master/docs/tutorials/024-Container-Image-ImageCollection.ipynb)[![Open in Planetary Computer](https://img.shields.io/badge/Open-Planetary%20Computer-black?style=flat&logo=microsoft)](https://pccompute.westeurope.cloudapp.azure.com/compute/hub/user-redirect/git-pull?repo=https://github.com/davemlz/eemont&urlpath=lab/tree/eemont/docs/tutorials/024-Container-Image-ImageCollection.ipynb&branch=master) Container Emulation Methods for ee.Image and ee.ImageCollection _Tutorial created by ****_: [GitHub](https://github.com/davemlz) | [Twitter](https://twitter.com/dmlmont)- GitHub Repo: [https://github.com/davemlz/eemont](https://github.com/davemlz/eemont)- PyPI link: [https://pypi.org/project/eemont/](https://pypi.org/project/eemont/)- Conda-forge: [https://anaconda.org/conda-forge/eemont](https://anaconda.org/conda-forge/eemont)- Documentation: [https://eemont.readthedocs.io/](https://eemont.readthedocs.io/)- More tutorials: [https://github.com/davemlz/eemont/tree/master/docs/tutorials](https://github.com/davemlz/eemont/tree/master/docs/tutorials) Let's start! If required, please uncomment:#!pip install eemont #!pip install geemapImport the required packges.import ee, eemont, geemap import geemap.colormaps as cmAuthenticate and Initialize Earth Engine and geemap.Map = geemap.Map()Let's define a point of interest:poi = ee.Geometry.PointFromQuery("Oporto, Portugal",user_agent = "eemont-tutorial-024")Let's work with Sentinel-2 SR:S2 = (ee.ImageCollection("COPERNICUS/S2_SR") .filterBounds(poi) .filterDate("2020-01-01","2020-07-01") .preprocess() .spectralIndices())Container Emulation Methods ee.ImageCollection If you want to know how many images has the image collection, you can use `len()`:len(S2)If you want to select specific bands from the collection, you can use `collection[band]` or `collection[[band1,band2,...,bandn]]`:RGB = S2[["B2","B3","B4"]]You can also do this by using band indices:RGB = S2[[1,2,3]]Or regex:RGB = S2["B[2-4]"]Or even better: slices!RGB = S2[1:4]Create a composite by using container emulation methods!Map = geemap.Map() Map.addLayer(S2[[3,2,1]].median(),{"min":0,"max":0.3},"RGB") Map.centerObject(poi) MapIf you want to select images from a collection, convert the collection to a list and use container emulation methods!We are going to select the first, the third, and the fifth images from the collection.First, let's convert the collection to a list:S2list = S2.toList(S2.size())Then, we can select the images!S2selected = S2list[[0,2,4]]Now we have three images in the `S2selected` list:len(S2selected)Now, let's select all images from the 21st until the end.Psst! We can use slices!S2selected = S2list[20:]Let's see how many images do we have!len(S2selected)If we don't want the last images to be selected, we can use negative indices! Here an example:S2selected = S2list[20:-5]Now, let's see how many images do we have in the list now!len(S2selected)But they're ee.Image objects inside an ee.List object. We can leave it that way, or we can convert them into an ee.ImageCollection object!S2selected = ee.ImageCollection(S2selected)ee.Image We can also select bands for an ee.Image object!S2img = S2.first()Let's select the NDVI:NDVI = S2img['NDVI']Or let's select the RGB bands using slices!RGBimg = S2img[1:4]The same rules for ee.ImageCollection apply for the ee.Image object class!Let's visualize the NDVI for Oporto!Map = geemap.Map() Map.addLayer(S2img["NDVI"],{"min":0,"max":1,"palette":cm.palettes.ndvi},"NDVI") Map.centerObject(poi) MapParameter IndexingParameter indexing is outside the scope of ParamTools. However, indexed parameters are an important part of many modeling projects and ParamTools is up to the challenge. Let's take a look at how one might implement indexed parameters with ParamTools. Setup--------------------------The TaxParams parameters will be used to demonstrate parameter indexing. These parameters are based directly on the Tax-Calculator policy parameters. Tax-Calculator does serious parameter indexing since many of its parameters depend on inflation and wage growth rates. This tutorial demonstrates how you can replicate that same level of parameter indexing with ParamTools.The approach for this tutorial is to build a `IndexedParameters` class on top of `paramtools.Parameters`. This class can then be used by projects that require parameter indexing. Get the code and the data1. Clone the ParamTools repo: https://github.com/PSLmodels/ParamTools2. Install paramtools ``` conda create -n paramtools-env numpy taxcalc pip -c pslmodels conda activate paramtools-env do a local install (temporary) cd ParamTools/ pip install -e . ```3. Change directories to `ParamTools/paramtools/examples/taparams-demo`# quick helper to print stuff out nicely. def pprint(vals): for v in vals: print(v)TaxParamsBefore we get started, let's make sure that the TaxParams can be loaded.from marshmallow import Schema, fields import paramtools # first define the compatible data custom field. class CompatibleDataSchema(Schema): """ Schema for Compatible data object { "compatible_data": {"data1": bool, "data2": bool, ...} } """ puf = fields.Boolean() cps = fields.Boolean() class TaxParams(paramtools.Parameters): # You need to be in the paramtools/examples/taxparams directory! schema = "schema.json" defaults = "defaults.json" field_map = {"compatible_data": fields.Nested(CompatibleDataSchema())} params = TaxParams() print("EITC celing max year: ", max(map(lambda x: x["year"], params._EITC_c)), "\n") print("EITC ceiling ceiling as dict: ") pprint(params._EITC_c)EITC celing max year: 2018 EITC ceiling ceiling as dict: {'value': 487.0, 'EIC': '0kids', 'year': 2013} {'value': 3250.0, 'EIC': '1kid', 'year': 2013} {'value': 5372.0, 'EIC': '2kids', 'year': 2013} {'value': 6044.0, 'EIC': '3+kids', 'year': 2013} {'value': 496.0, 'EIC': '0kids', 'year': 2014} {'value': 3305.0, 'EIC': '1kid', 'year': 2014} {'value': 5460.0, 'EIC': '2kids', 'year': 2014} {'value': 6143.0, 'EIC': '3+kids', 'year': 2014} {'value': 503.0, 'EIC': '0kids', 'year': 2015} {'value': 3359.0, 'EIC': '1kid', 'year': 2015} {'value': 5548.0, 'EIC': '2kids', 'year': 2015} {'value': 6242.0, 'EIC': '3+kids', 'year': 2015} {'value': 506.0, 'EIC': '0kids', 'year': 2016} {'value': 3373.0, 'EIC': '1kid', 'year': 2016} {'value': 5572.0, 'EIC': '2kids', 'year': 2016} {'value': 6269.0, 'EIC': '3+kids', 'year': 2016} {'value': 510.0, 'EIC': '0kids', 'year': 2017} {'value': 3400.0, 'EIC': '1kid', 'year': 2017} {'value': 5616.0, 'EIC': '2kids', 'year': 2017} {'value': 6318.0, 'EIC': '3+kids'[...]Extend Parameters------------------------To get started, the ability to extend parameters through a given year needs to be added to ParamTools. This is done by finding the maximum specifed year for each parameter and duplicating each value object defined at this maximum year for the remaining years. For example, the maximum defined year for `_EITC_c` is 2018. There are four values defined in 2018. These four values will be extended for the remaining years, 2019 to 2028.```json[ {"year": 2018, "value": 519.0, "EIC": "0kids"}, {"year": 2018, "value": 3461.0, "EIC": "1kid"}, {"year": 2018, "value": 5716.0, "EIC": "2kids"}, {"year": 2018, "value": 6431.0, "EIC": "3+kids"}]```from collections import defaultdict import paramtools class ExtendParameters(paramtools.Parameters): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.extend() def extend(self): """ Guarantee that all parameters are defined for each year from start year to end year. """ max_allowed_year = max(self._stateless_dim_mesh["year"]) adjustment = defaultdict(list) for param, data in self.specification(meta_data=True).items(): max_year = max(map(lambda x: x["year"], data["value"])) if max_year == max_allowed_year: continue value_objects = self._get(param, True, year=max_year) while max_year < max_allowed_year: max_year += 1 for value_object in value_objects: adjustment[param].append(dict(value_object, **{"year": max_year})) self.adjust(adjustment) class TaxParams(ExtendParameters): schema = "schema.json" defaults = "defaults.json" field_map = {"compatible_data": fields.Nested(CompatibleDataSchema())} params = TaxParams() print("EITC celing max year: ", max(map(lambda x: x["year"], params._EITC_c)), "\n")EITC celing max year: 2028We can activate the `array_first` mode, since all parameters have been extended across the year axis. This means that all parameter values will be available as arrays instead of a list of dictionaries, or [value-objects][1]. [1]: https://paramtools.readthedocs.io/en/latest/spec.htmlvalue-object# all parameters can now be converted to arrays. params.array_first = True params.set_state() print("EITC celing as array: ") print(params._EITC_c)EITC celing as array: [[ 487. 3250. 5372. 6044.] [ 496. 3305. 5460. 6143.] [ 503. 3359. 5548. 6242.] [ 506. 3373. 5572. 6269.] [ 510. 3400. 5616. 6318.] [ 519. 3461. 5716. 6431.] [ 519. 3461. 5716. 6431.] [ 519. 3461. 5716. 6431.] [ 519. 3461. 5716. 6431.] [ 519. 3461. 5716. 6431.] [ 519. 3461. 5716. 6431.] [ 519. 3461. 5716. 6431.] [ 519. 3461. 5716. 6431.] [ 519. 3461. 5716. 6431.] [ 519. 3461. 5716. 6431.] [ 519. 3461. 5716. 6431.]]Note that 519, 3461, 5716, and 6431 have been set as the default value for year 2018 to 2028. Indexed Parameters Intuition-----------------------------------The previous example shows how to extend parameter values along a given axis, like "year". However, what's really going on is that they are just repeated until the maximum year is reached. Now, it's time to index them at some specified rate.To "grow" a parameter forward a year, we need to multiply it by one plus the rate at which it is expected to grow or its recorded growth rate. First, let's generate some data:min_allowed_year = 2013 max_allowed_year = 2028 # max specified year max_year = 2019 MARS = ["single", "joint", "separate", "headhousehold", "widow"] vals = [{"year": 2013 + i, "MARS": status, "value": 1000 + i} for i in range(max_year - min_allowed_year + 1) for status in MARS] pprint(vals){'year': 2013, 'MARS': 'single', 'value': 1000} {'year': 2013, 'MARS': 'joint', 'value': 1000} {'year': 2013, 'MARS': 'separate', 'value': 1000} {'year': 2013, 'MARS': 'headhousehold', 'value': 1000} {'year': 2013, 'MARS': 'widow', 'value': 1000} {'year': 2014, 'MARS': 'single', 'value': 1001} {'year': 2014, 'MARS': 'joint', 'value': 1001} {'year': 2014, 'MARS': 'separate', 'value': 1001} {'year': 2014, 'MARS': 'headhousehold', 'value': 1001} {'year': 2014, 'MARS': 'widow', 'value': 1001} {'year': 2015, 'MARS': 'single', 'value': 1002} {'year': 2015, 'MARS': 'joint', 'value': 1002} {'year': 2015, 'MARS': 'separate', 'value': 1002} {'year': 2015, 'MARS': 'headhousehold', 'value': 1002} {'year': 2015, 'MARS': 'widow', 'value': 1002} {'year': 2016, 'MARS': 'single', 'value': 1003} {'year': 2016, 'MARS': 'joint', 'value': 1003} {'year': 2016, 'MARS': 'separate', 'value': 1003} {'year': 2016, 'MARS': 'headhousehold', 'value': 1003} {'year': 2016, 'MARS': 'widow', 'value': 1003} {'year': 201[...]Second, we need to be able to to get the previous year's value, while taking into account the current value object's MARS value.def get_mars_lookup(value_objects): """Return dictionary where the MARS values are the keys.""" return {vo["MARS"]: {"value": vo["value"]} for vo in value_objects} test_vals = [ {'year': 2013, 'MARS': 'single', 'value': 'single val'}, {'year': 2013, 'MARS': 'joint', 'value': 1000}, {'year': 2013, 'MARS': 'separate', 'value': 1000}, {'year': 2013, 'MARS': 'headhousehold', 'value': 1000}, {'year': 2013, 'MARS': 'widow', 'value': 1000} ] mars_lookup_2013 = get_mars_lookup(test_vals) print("Look up the value for MARS=single: ", mars_lookup_2013["single"]["value"])Look up the value for MARS=single: single valFinally, we extend and index the data.import taxcalc import numpy as np # use taxcalc inflation rates rates = taxcalc.Policy().inflation_rates() print("rates", rates) print("vals") pprint(vals[:-10]) for ix, year in enumerate(range(max_year + 1, max_allowed_year + 1)): prev_year = year - 1 prev_year_vals = [val for val in vals if val["year"] == prev_year] mars_lookup = get_mars_lookup(prev_year_vals) for status in MARS: new_val = { "year": year, "MARS": status, # previous value * inflation rate for current year! "value": np.round(mars_lookup[status]["value"] * (1 + rates[max_year - min_allowed_year + ix]), 2) } vals.append(new_val) print("result for a subsection of the values: ") pprint(vals[25:-25]) print("...")rates [0.0148, 0.0159, 0.0012, 0.0127, 0.0189, 0.0229, 0.0199, 0.0224, 0.0227, 0.0223, 0.0218, 0.0215, 0.0211, 0.021, 0.021, 0.0211] vals {'year': 2013, 'MARS': 'single', 'value': 1000} {'year': 2013, 'MARS': 'joint', 'value': 1000} {'year': 2013, 'MARS': 'separate', 'value': 1000} {'year': 2013, 'MARS': 'headhousehold', 'value': 1000} {'year': 2013, 'MARS': 'widow', 'value': 1000} {'year': 2014, 'MARS': 'single', 'value': 1001} {'year': 2014, 'MARS': 'joint', 'value': 1001} {'year': 2014, 'MARS': 'separate', 'value': 1001} {'year': 2014, 'MARS': 'headhousehold', 'value': 1001} {'year': 2014, 'MARS': 'widow', 'value': 1001} {'year': 2015, 'MARS': 'single', 'value': 1002} {'year': 2015, 'MARS': 'joint', 'value': 1002} {'year': 2015, 'MARS': 'separate', 'value': 1002} {'year': 2015, 'MARS': 'headhousehold', 'value': 1002} {'year': 2015, 'MARS': 'widow', 'value': 1002} {'year': 2016, 'MARS': 'single', 'value': 1003} {'year': 2016, 'MARS': 'joint', 'value': 1003} {'year': 2016, 'MARS': 'se[...]There you go. A simple indexing function. However, it's not going to work for `TaxParams` just yet. Tax-Calculator parameters could have other dimensions besides Marital Status, like Itemized Deduction type. Further, we need to check that the parameter needs to be indexed in the first place. `IndexedParameters` Class-----------------------------`IndexedParameters` builds in the notions discussed in the previous section in a more general way. Additionally, it supports adjusting parameters and extending and indexing those new values. The `extend` method is pretty gnarly, but if you stare at it long enough, you'll notice that the `get_vo_lookup` corresponds to `get_mars_lookup` and the for loop boundaries are identical, among other similarities.from collections import defaultdict import numpy as np from marshmallow import Schema, fields import paramtools import taxcalc class IndexedParameters(paramtools.Parameters): def __init__(self): super().__init__() self.extend() def extend(self, params_to_extend=None): min_allowed_year = min(self._stateless_dim_mesh["year"]) max_allowed_year = max(self._stateless_dim_mesh["year"]) adjustment = defaultdict(list) spec = self.specification(use_state=False, meta_data=True) if params_to_extend is None: param_data = spec else: param_data = {param: spec[param] for param in params_to_extend} def get_vo_lookup(vos, dims): qh = {} for vo in vos: qh[tuple(vo[d] for d in dims)] = vo["value"] return qh for param, data in param_data.items(): max_year = max(map(lambda x: x["year"], data["value"])) if max_year == max_allowed_year: continue max_year = data["value"][-1]["year"] if max_year == max_allowed_year: continue value_objects = self._get(param, True, year=max_year) if data["cpi_inflated"]: # preserve order! dims_to_match = sorted( [ dim_name for dim_name in value_objects[0] if dim_name not in ("year", "value") ] ) vo_lookup = get_vo_lookup(value_objects, dims_to_match) rates = self.indexing_rates(param) for ix, year in enumerate( range(max_year + 1, max_allowed_year + 1) ): for vo in value_objects: dim_values = tuple( vo[dim_name] for dim_name in dims_to_match ) v = vo_lookup[dim_values] * ( 1 + rates[max_year - min_allowed_year + ix] ) v = np.round(v, 2) if v < 9e99 else 9e99 adjustment[param].append( dict(vo, **{"year": year, "value": v}) ) vo_lookup[dim_values] = v else: for year in range(max_year, max_allowed_year + 1): for vo in value_objects: adjustment[param].append(dict(vo, **{"year": year})) self.array_first = True self.adjust(adjustment) def adjust_with_extend(self, params_or_path, raise_errors=False): params = self.read_params(params_or_path) curr_vals = self.specification() for param, param_adj in params.items(): max_year = max(map(lambda x: x["year"], param_adj)) for vo in curr_vals[param]: if vo["year"] > max_year: params[param].append(dict(vo, **{"value": None})) self.array_first = False self.adjust(params) self.extend(params_to_extend=list(params.keys()))`IndexedParameters` isn't meant to stand on its own. `TaxParams` must implement it before it can be used.class TaxParams(IndexedParameters): schema = "schema.json" defaults = "defaults.json" field_map = {"compatible_data": fields.Nested(CompatibleDataSchema())} def __init__(self, *args, **kwargs): # Prepare the taxcalc inflation rates. growfactors = taxcalc.GrowFactors() self._inflation_rates = growfactors.price_inflation_rates(2013, 2028) self._apply_clp_cpi_offset(2028 - 2013 + 1) self._wage_growth_rates = growfactors.wage_growth_rates(2013, 2028) super().__init__(*args, **kwargs) def indexing_rates(self, param=None): """ See taxcalc.Parameters.indexing_rates. """ if param == "_SS_Earnings_c": return self._wage_growth_rates else: return self._inflation_rates def _apply_clp_cpi_offset(self, num_years): """ See taxcalc.Policy._apply_clp_cpi_offset If you are curious about what's going on here, the cpi_offset parameter is an approximation for the chained cpi. """ cpi_offset = [0.0, 0.0, 0.0, 0.0, -0.0025] if len(cpi_offset) < num_years: # extrapolate last known value cpi_offset = cpi_offset + cpi_offset[-1:] * ( num_years - len(cpi_offset) ) for idx in range(0, num_years): infrate = round(self._inflation_rates[idx] + cpi_offset[idx], 6) self._inflation_rates[idx] = infrate params = TaxParams() print("paramtools: ", params._EITC_c, "\n") pol = taxcalc.Policy() print("taxcalc: ", pol._EITC_c)paramtools: [[ 487. 3250. 5372. 6044. ] [ 496. 3305. 5460. 6143. ] [ 503. 3359. 5548. 6242. ] [ 506. 3373. 5572. 6269. ] [ 510. 3400. 5616. 6318. ] [ 519. 3461. 5716. 6431. ] [ 530.89 3540.26 5846.9 6578.27] [ 541.45 3610.71 5963.25 6709.18] [ 553.58 3691.59 6096.83 6859.47] [ 566.15 3775.39 6235.23 7015.18] [ 578.78 3859.58 6374.28 7171.62] [ 591.4 3943.72 6513.24 7327.96] [ 604.12 4028.51 6653.27 7485.51] [ 616.87 4113.51 6793.65 7643.45] [ 629.82 4199.89 6936.32 7803.96] [ 643.05 4288.09 7081.98 7967.84]] taxcalc: [[ 487. 3250. 5372. 6044. ] [ 496. 3305. 5460. 6143. ] [ 503. 3359. 5548. 6242. ] [ 506. 3373. 5572. 6269. ] [ 510. 3400. 5616. 6318. ] [ 519. 3461. 5716. 6431. ] [ 530.89 3540.26 5846.9 6578.27] [ 541.45 3610.71 5963.25 6709.18] [ 553.58 3691.59 6096.83 6859.47] [ 566.15 3775.39 6235.23 7015.18] [ 578.78 3859.58 6374.28 7171.62] [ 591.4 3943.72 6513.24 732[...]Let's confirm that the results are the same.for param in params.specification(): np.testing.assert_allclose(getattr(params, param), getattr(pol, param)) print("No errors were raised!")No errors were raised!Adjust----------The `IndexedParameters.adjust_with_extend` method should be used to adjust the parameter values. This method finds the maximum specified year for a parameter in a reform. It then removes all values set in later years. The new value is extended and indexed using the `extend` method.\*Here's how you adjust the the Tax-Calculator parameters with `TaxParams` and with `taxcalc.Policy`. This adjustment is a component of the Brown-Khanna Grow American Incomes Now (GAIN) Act of 2017. The entire Tax-Calculator reform for this act is in [Tax-Calculator's repo][1].\* This is a coarse approach. More care could be taken for adjustments that, for example, adjust a parameter's value in 2018 and in 2025. Right now the 2018 value is updated but it is ignored by the `extend` method. If anyone is interested in this functionality, I will happily implement it or help someone else implement it.[1]: https://github.com/hdoupe/Tax-Calculator/blob/master/taxcalc/reforms/BrownKhanna.jsonparams.adjust_with_extend({ "_EITC_c": [ {"EIC": "0kids", "year": 2017, "value": 3000}, {"EIC": "1kid", "year": 2017, "value": 6528}, {"EIC": "2kids", "year": 2017, "value":10783}, {"EIC": "3+kids", "year": 2017, "value":12131} ] }) pol.implement_reform({ 2017: { "_EITC_c": [[3000, 6528, 10783, 12131]] } }) print("paramtools: ", params._EITC_c, "\n") print("taxcalc: ", pol._EITC_c) np.testing.assert_allclose(params._EITC_c, pol._EITC_c) print("Updated params are the same!")paramtools: [[ 487. 3250. 5372. 6044. ] [ 496. 3305. 5460. 6143. ] [ 503. 3359. 5548. 6242. ] [ 506. 3373. 5572. 6269. ] [ 3000. 6528. 10783. 12131. ] [ 3056.7 6651.38 10986.8 12360.28] [ 3126.7 6803.7 11238.4 12643.33] [ 3188.92 6939.09 11462.04 12894.93] [ 3260.35 7094.53 11718.79 13183.78] [ 3334.36 7255.58 11984.81 13483.05] [ 3408.72 7417.38 12252.07 13783.72] [ 3483.03 7579.08 12519.17 14084.21] [ 3557.92 7742.03 12788.33 14387.02] [ 3632.99 7905.39 13058.16 14690.59] [ 3709.28 8071.4 13332.38 14999.09] [ 3787.17 8240.9 13612.36 15314.07]] taxcalc: [[ 487. 3250. 5372. 6044. ] [ 496. 3305. 5460. 6143. ] [ 503. 3359. 5548. 6242. ] [ 506. 3373. 5572. 6269. ] [ 3000. 6528. 10783. 12131. ] [ 3056.7 6651.38 10986.8 12360.28] [ 3126.7 6803.7 11238.4 12643.33] [ 3188.92 6939.09 11462.04 12894.93] [ 3260.35 7094.53 11718.79 13183.78][...]03 - Beware Overfitting **Overfitting** When your model captures patterns in your training data too well - meaning it doesn't generalize well to unseen data. **Preventing Overfitting** **Regularization:** Introducing a penalty for overly complex features that reduces - or eliminates - their weight in our model.Two common types of regularization include + **Lasso or L1 regularization**+ **Ridge or L2 regularization**.Each of these will shrink the weights of coefficients in the model. But L1 can reduce the weight for some features to zero, thereby removing them entirely from the model. ------ Dataimport pandas as pd from sklearn.datasets import load_breast_cancer cancer = load_breast_cancer() cancer.keys() df = pd.DataFrame(cancer['data'], columns=cancer['feature_names']) df['target'] = pd.Series(cancer['target']) df.head(2)-------- Modelling **Example: L1 regularization in a Logistic Regression model.** Train/Test Splitfrom sklearn.model_selection import train_test_split X = df.drop('target', axis=1) y = df['target'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) #startify=yScale featuresfrom sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaled_X_train = scaler.fit_transform(X_train) scaled_X_test = scaler.transform(X_test)Training Modelfrom sklearn.linear_model import LogisticRegression lr = LogisticRegression(penalty='l1', C=0.1, solver='liblinear', multi_class='ovr') lr.fit(scaled_X_train, y_train) print('Training accuracy:', round(lr.score(scaled_X_train, y_train),3)) print('Test accuracy:', round(lr.score(scaled_X_test, y_test),3))Training accuracy: 0.977 Test accuracy: 0.959This is the code for object oriented programming of Yasakawa Robot DX100 working on UDP protocol. I have not yet completed the problem but i am very close to completing it. the UDP reader in Yasakawa is reading data in bytes in big endian format but the data is to be sent before the timeout period.. that is 1 second roughly. so when we are sending we are converting everything we want to send to a list of bytes this list is then sent (one-by-one) then there is a delay of 1 second hopefully this whole message is considered as oneclass RobotDX100: import socket import time #variables and the order for sending variables tosend1 = [89, 69, 82, 67, 32, 0, 4, 0, 3, 1, 0] #reqid = 01,02,03 tosend2 = [0, 0, 0, 0, 57, 57, 57, 57, 57, 57, 57, 57] #cdno = int('83') tosend3 = [0] #inst = 2 tosend4 = [0] #att = [1] #ser = int('10') tosend5 = [0, 0] #dataon = [1,0,0,0] def __init__(self, ip = '192.168.0.151', port = 10040): self.ip = ip self.port = port def connect(self): import socket #to create the socket through which we will be sending our UDP message to the server clientSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) def servo(self, on = 1): import socket #to write the code that will send our UDP message (turn on) reqid = [1] cdno = [83] inst = [2] att = [1] ser = [10] dataon = [1, 0, 0, 0] dataoff = [2, 0, 0, 0] var = RobotDX100.tosend1 var.extend(reqid) var.extend(RobotDX100.tosend2) var.extend(cdno) var.extend(RobotDX100.tosend3) var.extend(inst) var.extend(RobotDX100.tosend4) var.extend(att) var.extend(ser) var.extend(RobotDX100.tosend5) dataservoon = var dataservoon.extend(dataon) dataservooff = var dataservooff.extend(dataoff) if on == True: return dataservoon else: #to write the code that will send our UDP message (turn off) return dataservooff time.sleep(5)# number of seconds r = RobotDX100() r.connect() r.var.to_bytes(1,'big') import socket import time UDP_IP_ADDRESS = r.ip UDP_PORT_NO = r.port #make x = dataservoon in bytes. x = [i.to_bytes(1, 'big') for i in r.servo(True)] #to write each element (One-by-one) of the list in our UDP message (turn on) [clientSock.sendto( i , (r.ip, r.port)) for i in x] #r.var = 0 time.sleep(5)# number of seconds #make x = dataservooff in bytes. x = [i.to_bytes(1, 'big') for i in r.servo(False)] #to write each element (One-by-one) of the list in our UDP message (turn on) [clientSock.sendto( i , (r.ip, r.port)) for i in x] #r.var = 0 r.vardata.summarization> This module contains the bits required to use the fastai DataBlock API and/or mid-level data processing pipelines to organize your data for summarization tasks using architectures like BART and T5.#export import ast from functools import reduce import torch from transformers import * from fastai.text.all import * from blurr.utils import * from blurr.data.core import * #hide import pdb from nbdev.showdoc import * from fastcore.test import * #cuda torch.cuda.set_device(1) print(f'Using GPU #{torch.cuda.current_device()}: {torch.cuda.get_device_name()}')Using GPU #1: GeForce GTX 1080 TiSummarization tokenization, batch transform, and DataBlock methodsSummarization tasks attempt to generate a human-understandable and sensible representation of a larger body of text (e.g., capture the meaning of a larger document in 1-3 sentences).path = Path('./') cnndm_df = pd.read_csv(path/'cnndm_sample.csv'); len(cnndm_df) cnndm_df.head(2) pretrained_model_name = "facebook/bart-large-cnn" hf_arch, hf_config, hf_tokenizer, hf_model = BLURR_MODEL_HELPER.get_hf_objects(pretrained_model_name, model_cls=BartForConditionalGeneration) hf_arch, type(hf_tokenizer), type(hf_config), type(hf_model) #export class HF_SummarizationInput(list): passWe create a subclass of `HF_BatchTransform` for summarization tasks to add `decoder_input_ids` and `labels` to our inputs during training, which will in turn allow the huggingface model to calculate the loss for us. See [here](https://huggingface.co/transformers/model_doc/bart.htmltransformers.BartModel.forward) for more information on these additional inputs are used in summarization and conversational training tasks. Note also that `labels` is simply target_ids shifted to the right by one since the task to is to predict the next token based on the current (and all previous) `decoder_input_ids`.And lastly, we also update our targets to just be the `input_ids` of our target sequence so that fastai's `Learner.show_results` works (again, almost all the fastai bits require returning a single tensor to work).#export class HF_SummarizationBatchTransform(HF_BatchTransform): def __init__(self, hf_arch, hf_tokenizer, **kwargs): super().__init__(hf_arch, hf_tokenizer, HF_SummarizationInput, **kwargs) def encodes(self, samples): samples = super().encodes(samples) if (len(samples[0]) == 1): return samples updated_samples = [] for s in samples: s[0]['decoder_input_ids'] = s[1]['input_ids'][:-1].clone() s[0]['labels'] = s[1]['input_ids'][1:].clone() s[0]['labels'][s[0]['labels'] == self.hf_tokenizer.pad_token_id] = -100 targ_ids = s[1]['input_ids'] updated_samples.append((s[0], targ_ids)) return updated_samples def decodes(self, encoded_samples): if (isinstance(encoded_samples, dict)): return self.hf_input_return_type([encoded_samples['input_ids']]) return [encoded_samples]We had to override the `decodes` method above because, while both our inputs and targets are technically the same things, we update the later to consist of *only* the target input_ids so that methods like `Learner.show_results` work. Nevertheless, because fastai remembers what they are, `HF_TokenizerTransform.decodes` will be called for both and it works on a `list` of input_ids.hf_batch_tfm = HF_SummarizationBatchTransform(hf_arch, hf_tokenizer) blocks = ( HF_TextBlock(hf_arch, hf_tokenizer), HF_TextBlock(hf_arch, hf_tokenizer, hf_batch_tfm=hf_batch_tfm, max_length=150, hf_input_idxs=[0,1]) ) dblock = DataBlock(blocks=blocks, get_x=ColReader('article'), get_y=ColReader('highlights'), splitter=RandomSplitter()) # dblock.summary(cnndm_df) dls = dblock.dataloaders(cnndm_df, bs=4) b = dls.one_batch() len(b), b[0]['input_ids'].shape, b[1].shape #export @typedispatch def show_batch(x:HF_SummarizationInput, y, samples, dataloaders=None, ctxs=None, max_n=6, **kwargs): res = L([ (s[0], s[1]) for s in samples ]) display_df(pd.DataFrame(res, columns=['text', 'target'])[:max_n]) return ctxs dls.show_batch(dataloaders=dls, max_n=2)Cleanup#hide from nbdev.export import notebook2script notebook2script()Converted 00_utils.ipynb. Converted 01_data-core.ipynb. Converted 01a_data-language-modeling.ipynb. Converted 01c_data-question-answering.ipynb. Converted 01d_data-token-classification.ipynb. Converted 01e_data-summarization.ipynb. Converted 02_modeling-core.ipynb. Converted 02a_modeling-language-modeling.ipynb. Converted 02c_modeling-question-answering.ipynb. Converted 02d_modeling-token-classification.ipynb. Converted 02e_modeling-summarization.ipynb. Converted index.ipynb.【python 入門】繰り返し処理 while文の使い方をマスターする!- whileの使い方- while elseの使い方- breakの使い方- continueの使い方- listを取得しながら繰り返す- 無限ループに注意 whileの使い方 処理を5回繰り返すcount = 0 while count < 5: print(count) count = count + 10 1 2 3 4while elseの使い方 繰り返し処理が完了した後の処理を追加するcount = 0 while count < 5: print(count) count = count + 1 else: print('countが5になりました。')0 1 2 3 4 countが5になりました。breakの使い方count = 0 while count < 5: if count == 3: break print(count) count = count + 10 1 2elseを入れてみるcount = 0 while count < 5: if count == 3: break print(count) count = count + 1 else: print('繰り返し完了')0 1 2continueの使い方count = 0 while count < 5: if count == 3: print('count==3の時の処理') count = count + 1 continue print(count) count = count + 10 1 2 count==3の時の処理 4このコードでも処理は同じcount = 0 while count < 5: if count == 3: print('count==3の時の処理') else: print(count) count = count + 10 1 2 count==3の時の処理 4listを取得しながら繰り返すlist_a = ['りんご', 'みかん', 'ぶどう'] while list_a: print(list_a.pop())ぶどう みかん りんご無限ループに注意import time count = 0 while True: print(f'\r{count}', end='') count += 1 time.sleep(1)5Running Monte Carlo Simulations on an Excel ModelHere I will demonstrate using `xlwings` to drive Excel to run Monte Carlo Simulations. For this exercise, please download the Excel retirement model "Excel with Salary.xlsx" from Canvas. Then open the workbook and keep it open. Make sure you have no other workbooks open. Switch to the "Wealth" tab if it is not open.import xlwings as xwThe Monte Carlo SetupFirst let's get a connection to our Excel sheet.import xlwings as xw book = xw.Book('Dynamic Salary Retirement Model.xlsx') sht = book.sheets['Inputs and Outputs']Running a Single SimulationWe want to evaluate how interest rate affects years until retirement. Let's first just try changing the interest rate and getting the years to retirement as the output.sht.range('B10').value = 0.08Now we can see that the interest rate has changed to 8% in Excel and that the years to retirement has changed to 24. But we want to get that output out of Excel as well.years_to_retirement = sht.range('B18').value years_to_retirementNow that we have the value in Python we can analyze it in Python. Or if we want to analyze the results in Excel, we can output it back to the Excel workbook as a hard-coded value in a different cell, so that it will still be saved when the inputs change.sht.range('E2').value = years_to_retirementNow we can see the value is in Excel in the cell `E2`. Running Multiple Simulations Just as we have done with pure Python Monte Carlo simulations, now we want to run this process many times. We'll use a loop over the number of iterations to do this. We will collect the results in Python and then output to Excel at the end. First we need to be getting the interest rate randomly from a normal distribution:import random interest_mean = 0.05 interest_std = 0.03 interest_rate = random.normalvariate(interest_mean, interest_std) interest_rate num_iter = 10 all_retirement_years = [] for i in range(num_iter): interest_rate = random.normalvariate(interest_mean, interest_std) sht.range('B10').value = interest_rate years_to_retirement = sht.range('B18').value all_retirement_years.append(years_to_retirement) all_retirement_yearsNow output back to Excel. We want them in a column so we will do the list comprehension trick.vertical_retirement_years = [[ret_year] for ret_year in all_retirement_years] sht.range('E2').value = vertical_retirement_yearsNow wrap this all up in a function.def retirement_simulations(num_iter, interest_mean, interest_std): all_retirement_years = [] for i in range(num_iter): interest_rate = random.normalvariate(interest_mean, interest_std) sht.range('B10').value = interest_rate years_to_retirement = sht.range('B18').value all_retirement_years.append(years_to_retirement) vertical_retirement_years = [[ret_year] for ret_year in all_retirement_years] sht.range('E2').value = vertical_retirement_years return all_retirement_years # return it so we will also have it in Python in addition to Excel results = retirement_simulations(1000, 0.1, 0.05) results[:10]1. Violin Plot for Age/Sex---meta.df <- read.table("../data/raw/SMM_study_final_list_for_clustering.txt", sep='\t', header=T) meta.df$fill <- ifelse(meta.df$SEX=='M', '#55CDFC','#F7A8B8') pdf('figures/figS1d_age_sex_violin.pdf', width=3, height=3) options(repr.plot.width=3, repr.plot.height=3) ggplot(meta.df, aes(x=SEX, y=AGE, fill=SEX)) + geom_violin(trim=F) + labs(title="",x="", y = "Age") + theme_classic() + scale_fill_manual(values = c("#F7A8B8","#55CDFC")) + geom_boxplot(width=0.125, fill='white') + theme(legend.position="none") dev.off()Plot and conversion of demographic dataThe following file uses the downloaded demographic data from [worldpop.org](https://www.worldpop.org/) and converts it into discrete (integer) numbers based on the users desired accuracy, plots a demographic bar chart and and saves the resulting information into json files. The output from this file is then intdned to be combined with the population density output file. These combined files can then create a strong foundation for a agent population. 0: Import the Dependenciesfrom toggle_code import toggle_code as hide_code from toggle_code import run_code as run_code import os import numpy as np import pandas as pd import bokeh from bokeh.io import output_notebook from bokeh.plotting import figure, show from bokeh.models import ColumnDataSource, FactorRange from bokeh.models import BasicTickFormatter import glob import rasterio import re import datetime from collections import OrderedDict import ipywidgets as widgets from ipywidgets import interact, Layout from bokeh.io import push_notebook, show, output_notebook from bokeh.plotting import figure from bokeh.tile_providers import get_provider, Vendors from bokeh.palettes import Spectral4 from bokeh.models import Legend, BoxAnnotation, Toggle from bokeh.layouts import layout, gridplot, column tile_provider = get_provider('STAMEN_TERRAIN') #create pyproj transformer to convert form lat/long to web mercator from pyproj import Transformer transformer = Transformer.from_crs('epsg:4326','epsg:3857') #from IPython.display import HTML output_notebook() import warnings warnings.filterwarnings("ignore", message="Cannot find a last shown plot to update.")1: Insert full path to data The code will grab and process all files within this folder. Ensure only the demographic files you want to work with arestored at this locationhide_code() pot_list =["All"] filepath= r"./data/*" for pop_file in glob.glob(filepath): if "_f_" in pop_file or "_m_" in pop_file: pot_list.append(pop_file) pop_file = widgets.SelectMultiple(options=pot_list, value=[], description="File: ", disabled = False, layout=Layout(width="50%", height="260px")) def update(file): return file pop_file_select = interact(update, file=pop_file) #call the function #pop_table2. Convert file into table¶The following code converts the downloaded worldpop file into a table of latitudes, longitudes and number of people.6 decimal points for the coordinates represents and accuracy of ~0.11 meters at the equator# takes in a worldpop dataset url and populates a 3d array with 3 slices, one for the latitude, one for the longitude, # and one for the population at that specified co-ordinate box # The array is then loaded into the dictionary of all the worldpop age and sex demographics def get_array(filename):#, demographic, struct_dict): with rasterio.open(filename) as src: #read image image= src.read() # transform image bands,rows,cols = np.shape(image) image1 = image.reshape (rows*cols,bands) # bounding box of image l,b,r,t = src.bounds #resolution of image res = src.res # meshgrid of X and Y x = np.arange(l,r, res[-1]) y = np.arange(t,b, -res[-1]) #adjust for rounding errors if len(x) != image[0].shape[1]: diff_x = len(x)-image[0].shape[1] x = x[0:-diff_x] if len(y) != image[0].shape[0]: diff_y = len(y)-image[0].shape[0] y = y[0:-diff_y] #TUrn into a two dimensional array of all lats and longs lon, lat = np.meshgrid(x, y) lon_flat = lon.flatten() lat_flat= lat.flatten() pop_flat= image[0].flatten() x1, y1 = np.shape(lat) pop_dict = {"longitude":lon_flat, "latitude":lat_flat,"Population":pop_flat} pop_table = pd.DataFrame.from_dict(pop_dict) #Remove non values pop_table =pop_table[pop_table["Population"]!=-99999.0] total_peeps = sum(pop_table["Population"]) #print("There are approximately {} people.".format(total_peeps)) return pop_table,total_peeps if len(pop_file.value)==0: print("waiting for inputs.") else: # Create arrays to hold pandas dataframes created from demographic and population tifs data_from_demo_files = [] if pop_file.value[0] == "All": data_selection = pot_list[1:] else: data_selection = list(pop_file.value) for file in data_selection: print("Creating demographic tables of " + file) pop_table,total = get_array(file) data_from_demo_files.append([pop_table, total]) print("There are {} demographic tables.".format(len(data_from_demo_files)))Creating demographic tables of ./data\alb_f_0_2020.tif Creating demographic tables of ./data\alb_f_10_2020.tif Creating demographic tables of ./data\alb_f_15_2020.tif Creating demographic tables of ./data\alb_f_1_2020.tif Creating demographic tables of ./data\alb_f_20_2020.tif Creating demographic tables of ./data\alb_f_25_2020.tif Creating demographic tables of ./data\alb_f_30_2020.tif Creating demographic tables of ./data\alb_f_35_2020.tif Creating demographic tables of ./data\alb_f_40_2020.tif Creating demographic tables of ./data\alb_f_45_2020.tif Creating demographic tables of ./data\alb_f_50_2020.tif Creating demographic tables of ./data\alb_f_55_2020.tif Creating demographic tables of ./data\alb_f_5_2020.tif Creating demographic tables of ./data\alb_f_60_2020.tif Creating demographic tables of ./data\alb_f_65_2020.tif Creating demographic tables of ./data\alb_f_70_2020.tif Creating demographic tables of ./data\alb_f_75_2020.tif Creating demographic tables of ./data\alb_f_80_2020[...]3. Select the level of accuracy neededNext we can select the level of accuracy we want for our tables.hide_code() run_code() accuracy = widgets.Dropdown(options =["6 decimals (~0.11 meters)", "5 decimals (~1.1 meters)", "4 decimals (~11 meters)", "3 decimals (~110 meters)", "2 decimals (~1.1 kilometers)"], value = "4 decimals (~11 meters)", description = "Accuracy", disabled = False) def update(acc): return acc acc_select = interact(update, acc=accuracy)4: Smooth the populationThere are many decimal people, which cannot exist. So based on the desired latitude/longitude accuracy the goal is to get close to the total population. The following code is based on Pareto distributions of populations or a rich get richer approach. In essence, if there is a high population density, then that area gets more people.(This is obviously somewhat coarse and we welcome contributions.)hide_code() run_code() def round_long_lat(pop_table, total, accuracy, name): acc_dict = {"6 decimals (~0.11 meters)":6, "5 decimals (~1.1 meters)":5, "4 decimals (~11 meters)":4, "3 decimals (~110 meters)":3, "2 decimals (~1.1 kilometers)":2} rd = acc_dict[accuracy.value] goal = round(total) print("The goal population of {} is {}.".format(name, goal)) print("Calculating......") #round longitude pop_table["longitude"] = pop_table["longitude"].round(rd) #round latitude pop_table["latitude"] = pop_table["latitude"].round(rd) grouped_poptable = pop_table.groupby(['longitude','latitude'], as_index=False).sum() #grouped_poptable.aggregate(np.sum) #compare total aggregated value against goal value new_total = grouped_poptable['Population'].sum() #first for every row greater than 1 round up grouped_poptable["Population"] = grouped_poptable["Population"].apply(np.rint) #sum rounded population values new_total = round(sum(grouped_poptable["Population"])) #find the difference diff = goal - new_total #Get the amount which should be added to each column #Make a copy grouped_poptable["Percent"] = grouped_poptable["Population"].copy() #Divide by the current total to get percent in each square grouped_poptable["Percent"]= grouped_poptable["Percent"].div(new_total) #Multiple that by the missing amount grouped_poptable["Percent"] = grouped_poptable["Percent"].multiply(diff) #Round to whole numbers grouped_poptable["Percent"] = grouped_poptable["Percent"].apply(np.rint) #Add to the population grouped_poptable["Population"] = grouped_poptable["Percent"] + grouped_poptable["Population"] #Get the new total new_total = round(sum(grouped_poptable["Population"])) print("The aggregated total population is: " + str(new_total)) print("The new aggregated total accounts for: " + str(round(new_total/goal*100,2))+"% of the population.") print() return grouped_poptable, new_total if len(pop_file.value)==0: print("Waiting for inputs.") else: for idx in range(len(data_from_demo_files)): data_from_demo_files[idx][0], data_from_demo_files[idx][1] = round_long_lat(data_from_demo_files[idx][0], data_from_demo_files[idx][1], accuracy, data_selection[idx]) print("Population smoothing complete.")5: Data from processed files is formatted and graphed using Bokeh.hide_code() run_code() # File names are split to track which age/sex goes with which population total if len(pop_file.value)== 0: print("Waiting for inputs.") else: file_list = [] for file in data_selection: test = re.split('_|\.', file) file_list.append(test[2:5]) #print(file_list) # Reformats data so bokeh can plot the demographic data position = 0 #necessary to verify all categories are present consistency_check = [OrderedDict(),OrderedDict()] for data in file_list: if data[0] == 'f': consistency_check[0][int(data[1])] = data_from_demo_files[position][1] else: consistency_check[1][int(data[1])] = data_from_demo_files[position][1] position += 1 for k in consistency_check[0].keys(): if k not in consistency_check[1].keys(): consistency_check[1][k] = 0 for k in consistency_check[1].keys(): if k not in consistency_check[0].keys(): consistency_check[0][k] = 0 #Ensure sorted by age plotter_female = OrderedDict((key, consistency_check[0][key]) for key in sorted(consistency_check[0].keys())) plotter_male = OrderedDict((key, consistency_check[1][key]) for key in sorted(consistency_check[1].keys())) categories = [] for ele in list(plotter_female.keys()): categories.append(str(ele)) male = list(plotter_male.values()) female = list(plotter_female.values()) sex = ['male', 'female'] # Data is set up for Bokeh source = ColumnDataSource(data=dict( x=categories, male=male, female=female, )) output_notebook() # Graph presentation variables p = figure( title='Stacked Demographics', x_axis_label='Age', y_axis_label='Population', x_range=FactorRange(*categories), plot_width=800, plot_height=800, tooltips='@$name{0.0}' ) # Graph variables p.vbar_stack( sex, x='x', width=0.4, alpha=0.5, color=['blue','pink'], source=source ) # Removing scientific notation from y-axis tick marks p.yaxis[0].formatter = BasicTickFormatter(use_scientific=False) # Displaying graph show(p)7: Save filesDue to the size, likely number of the demographic files and use of the population density files as the main reference file, the following code make two size saving decisions. 1. It saves each files a as json instead of a .csv2. It drops the populations rows with zero peoplehide_code() run_code() if len(pop_file.value)== 0: print("Waiting for inputs.") else: country_name = data_selection[0] country_name = re.split('data|_', country_name) country_name =country_name[1][1:] # Need to change this to run in loop on each file name saved to filename_list def save_file(name, df, col): df =df.rename(columns={"Population":col}) filepath = os.path.join(r".\data", name) df = df[df[col]!=0] df.to_json(filepath) print("{} has been saved.".format(filepath)) for ele in range(len(data_from_demo_files)): #give new column name for later merging col = "pop_"+file_list[ele][0] + "_" + file_list[ele][1] name = country_name + "_" + file_list[ele][0] + "_" + file_list[ele][1]+".json" save_file(name, data_from_demo_files[ele][0],col )Example of integrating a networkThis notebook illustrates how to create a python network and integrateit with the scipy library.import pynucastro as pynaWe'll start again with the basic CNO network explored earlier.files = ["c12-pg-n13-ls09", "c13-pg-n14-nacr", "n13--c13-wc12", "n13-pg-o14-lg06", "n14-pg-o15-im05", "n15-pa-c12-nacr", "o14--n14-wc12", "o15--n15-wc12"]A `PythonNetwork` is based on a `RateCollection` but has methods to write the RHS of the system of ODEs.pynet = pyna.PythonNetwork(files)For example, this network knows how to write the full term for a reaction that goes into the $dY/dt$ equation of the ODE system.Here we pick one of the rates that is part of the network an explore it.r = pynet.rates[1] print(r) print(pynet.ydot_string(r))rho*Y[jp]*Y[jc13]*lambda_p_c13__n14and the code needed to evaluate that rate (the T-dependent part) as:print(pynet.function_string(r))@numba.njit() def p_c13__n14(tf): # c13 + p --> n14 rate = 0.0 # nacrn rate += np.exp( 18.5155 + -13.72*tf.T913i + -0.450018*tf.T913 + 3.70823*tf.T9 + -1.70545*tf.T953 + -0.666667*tf.lnT9) # nacrr rate += np.exp( 13.9637 + -5.78147*tf.T9i + -0.196703*tf.T913 + 0.142126*tf.T9 + -0.0238912*tf.T953 + -1.5*tf.lnT9) # nacrr rate += np.exp( 15.1825 + -13.5543*tf.T9i + -1.5*tf.lnT9) return rateThe temperature-dependent rate evaluation functions take a `Tfactor` object, which precomputes most of the commonly-used temperature factors in the rates. The `write_network()` method will output the python code needed to define the RHS of a network for integration with the SciPy integrators.Since python code can be slow, we use Numba to do just-in-time compilation of the functions to speed things up.pynet.write_network("cno_test_integrate.py") %cat cno_test_integrate.pyimport numpy as np from pynucastro.rates import Tfactors import numba jp = 0 jhe4 = 1 jc12 = 2 jc13 = 3 jn13 = 4 jn14 = 5 jn15 = 6 jo14 = 7 jo15 = 8 nnuc = 9 A = np.zeros((nnuc), dtype=np.int32) A[jp] = 1 A[jhe4] = 4 A[jc12] = 12 A[jc13] = 13 A[jn13] = 13 A[jn14] = 14 A[jn15] = 15 A[jo14] = 14 A[jo15] = 15 Z = np.zeros((nnuc), dtype=np.int32) Z[jp] = 1 Z[jhe4] = 2 Z[jc12] = 6 Z[jc13] = 6 Z[jn13] = 7 Z[jn14] = 7 Z[jn15] = 7 Z[jo14] = 8 Z[jo15] = 8 names = [] names.append("h1") names.append("he4") names.append("c12") names.append("c13") names.append("n13") names.append("n14") names.append("n15") names.append("o14") names.append("o15") @numba.njit() def ye(Y): return np.sum(Z * Y)/np.sum(A * Y) @numba.njit() def p_c12__n13(tf): # c12 + p --> n13 rate = 0.0 # ls09n rate += np.exp( 17.1482 + -13.692*tf.T913i + -0.230881*tf.T913 + 4.44362*tf.T9 + -3.15898*tf.T953 + -0.666667*tf.ln[...]We can now import the network that was just created and integrate it using the SciPy ODE solversimport cno_test_integrate as cnoIntegrating the networkWe can use the stiff ODE integration solvers that are part of Scipy to integrate this system nowfrom scipy.integrate import solve_ivp import numpy as npInitialize the thermodynamic conditions and initial composition. We express the composition as molar fractions, `Y0`.rho = 150 T = 1.5e7 X0 = np.zeros(cno.nnuc) X0[cno.jp] = 0.7 X0[cno.jhe4] = 0.28 X0[cno.jc12] = 0.02 Y0 = X0/cno.ANow we integrate. We use the `BDF` method, since reaction networks are in general stifftmax = 1.e20 sol = solve_ivp(cno.rhs, [0, tmax], Y0, method="BDF", dense_output=True, args=(rho, T), rtol=1.e-6, atol=1.e-6)A network plotimport matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(111) for i in range(cno.nnuc): ax.loglog(sol.t, sol.y[i,:] * cno.A[i], label=f"X({cno.names[i].capitalize()})") ax.set_xlim(1.e10, 1.e20) ax.set_ylim(1.e-8, 1.0) ax.legend(fontsize="small") fig.set_size_inches((10, 8))Estimating the Knowledge Distribution within a Modular System Exercise_Level: Hard_ BackgroundIn software systems, multiple developers work on multiple parts of the system. These persons change all code differently within specific parts. Your TaskIn this exercise, you should find out, _which author does know which percentage of the system's modules?_ The DatasetThe dataset in `../datasets/git_log_numstat_dropover.csv.gz` contains the following information:* `additions`: The number of added lines per file per commit* `deletions`: The number of deleted lines per file per commit* `file`: The name of the file that was changed* `sha`: The unique key of the commit* `timestamp`: The time of the commit* `author`: The name of the author who did the changeHere you can see the first 10 entries of the dataset:additions,deletions,file,sha,timestamp,author191.0,0.0,backend/pom-2016-07-16_04-40-56-752.xml,8c686954,2016-07-22 17:43:38,Michael1.0,1.0,backend/src/test/java/at/dropover/scheduling/interactor/SetFinalDateTest.java,97c6ef96,2016-07-16 09:51:15,Markus55.0,0.0,backend/src/test/java/at/dropover/scheduling/interactor/SetFinalDateTest.java,432113a2,2016-07-15 21:17:07,Chris19.0,3.0,backend/src/main/webapp/app/widgets/gallery/js/galleryController.js,3f7cf92c,2016-07-16 09:07:31,Markus24.0,11.0,backend/src/main/webapp/app/widgets/gallery/js/galleryController.js,bf2b00ba,2014-10-26 05:52:48,Michael294.0,0.0,backend/src/main/webapp/app/widgets/gallery/js/galleryController.js,62f4013b,2014-10-11 22:24:46,Michael1.0,1.0,backend/src/main/webapp/app/widgets/gallery/views/galleryView.html,3f7cf92c,2016-07-16 09:07:31,Markus5.0,5.0,backend/src/main/webapp/app/widgets/gallery/views/galleryView.html,bf2b00ba,2014-10-26 05:52:48,Michael75.0,0.0,backend/src/main/webapp/app/widgets/gallery/views/galleryView.html,62f4013b,2014-10-11 22:24:46,Michael Further InformationThe system under investigation has several peculiarities:* It was developed by only three people: Chris, Markus and Michael. Each of these developers worked more or less on one or more modules.* The system was structured along business modules with functionality like comment, creator, scheduling and so on. You can find this information at the 7th place in filepath.* The relevant source code for the backend was written in Java. These files use `.java` as file extensions.* The interesting files of the system are the ones that begin with `backend/src/main/java/`.* There are also irrelevant files for this analysis in the backend named `package-info.java`. IdeaWorking assumption: The number of commits from an author within a certain module corresponds to the existing knowledge about that module. Data LoadingRetrieve Git log information from a Git repository Data CleaningJust keep the code that is of interest. AnalysisFirst, find out _which author has how much overall "knowledge"?_ Extract the information about a the business modules. InterpretationList all the existing knowledge ratios per modules and authors VisualizationPlot the result for each module in a bar chartknowledge_per_module.unstack()['ratio'].indexDemo - age and gender recognition via REST APIThis notebook presents how to use OpenVINO Model Server to execute inference requests over REST API interface.The demo is using a pretrained model from [open_model_zoo](https://github.com/opencv/open_model_zoo) repository. Download the pretrained model for age and gender recognition!git clone https://github.com/opencv/open_model_zoo.git !cd open_model_zoo/model_downloader; python downloader.py --name age-gender-recognition-retail-0013###############|| Downloading topologies ||############### ========= Downloading /private/tmp/open_model_zoo/model_downloader/Retail/object_attributes/age_gender/dldt/age-gender-recognition-retail-0013.xml ... 100%, 14 KB, 143 KB/s, 0 seconds passed ========= Downloading /private/tmp/open_model_zoo/model_downloader/Retail/object_attributes/age_gender/dldt/age-gender-recognition-retail-0013.bin ... 100%, 8351 KB, 1571 KB/s, 5 seconds passed ###############|| Post processing ||###############Build the docker image with OVMS component!git clone https://github.com/IntelAI/OpenVINO-model-server !cd OpenVINO-model-server ; make docker_build_src_intelpythonBuilding docker image docker build -f Dockerfile_intelpython --build-arg http_proxy=http://proxy-chain.intel.com:911 --build-arg https_proxy=http://proxy-chain.intel.com:912 -t ie-serving-py:latest . Sending build context to Docker daemon 10.9MB Step 1/31 : FROM intelpython/intelpython3_core as DEV ---> 3fbe3dacb980 Step 2/31 : RUN apt-get update && apt-get install -y autoconf automake build-essential ca-certificates curl git gstreamer1.0-plugins-base libavcodec-dev libavformat-dev libboost-regex-dev libcairo2-dev libgfortran3 libglib2.0-dev libgstreamer1.0-0 libgtk2.0-dev libopenblas-dev libpango1.0-dev libpng-dev libssl-dev libswscale-dev libtool libusb-1.0-0-dev pkg-config unzip vim [...]Start OVMS docker container with downloaded model!export DOCKER_IMAGE_NAME=ie-serving-py;\ docker run -d --rm -v ${PWD}/open_model_zoo/model_downloader/Retail/object_attributes/age_gender/dldt:/opt/ml/age_gender/1 -p 9000:9000 -p 8000:8000 ${DOCKER_IMAGE_NAME} /ie-serving-py/start_server.sh ie_serving model --model_path /opt/ml/age_gender --model_name age_gender --port 9000 --rest-port 8000281614d378c9811c7b83092b1cb8db8763e7a7857dd0029b6f30f9e5bf539b51Download sample image!wget https://raw.githubusercontent.com/opencv/open_model_zoo/master/intel_models/age-gender-recognition-retail-0013/description/age-gender-recognition-retail-0001.jpg--2019-06-28 14:39:50-- https://raw.githubusercontent.com/opencv/open_model_zoo/master/intel_models/age-gender-recognition-retail-0013/description/age-gender-recognition-retail-0001.jpg Resolving proxy-chain.intel.com... 172.16.58.3 Connecting to proxy-chain.intel.com|172.16.58.3|:912... connected. Proxy request sent, awaiting response... 200 OK Length: 2436 (2.4K) [image/jpeg] Saving to: 'age-gender-recognition-retail-0001.jpg' age-gender-recognit 100%[===================>] 2.38K --.-KB/s in 0s 2019-06-28 14:39:50 (17.9 MB/s) - 'age-gender-recognition-retail-0001.jpg' saved [2436/2436]![age-gender-recognition-retail-0001.jpg](age-gender-recognition-retail-0001.jpg) Format the json requestimport cv2 import numpy as np import json def getJpeg(path, size): img = cv2.imread(path, cv2.IMREAD_COLOR) # retrived array has BGR format and 0-255 normalization img = cv2.resize(img, (size, size)) img = img.transpose(2,0,1).reshape(1,3,size,size) print(path, img.shape, "; data range:",np.amin(img),":",np.amax(img)) return img my_image = getJpeg('age-gender-recognition-retail-0001.jpg',62) data_obj = {'inputs': my_image.tolist()} data_json = json.dumps(data_obj) print(data_json)age-gender-recognition-retail-0001.jpg (1, 3, 62, 62) ; data range: 0 : 239 {"inputs": [[[[232, 226, 234, 232, 228, 231, 221, 198, 174, 115, 89, 73, 65, 57, 56, 58, 53, 49, 45, 42, 41, 31, 27, 24, 32, 42, 51, 53, 43, 42, 34, 38, 33, 30, 26, 23, 24, 24, 25, 23, 25, 26, 25, 24, 21, 21, 21, 21, 24, 19, 26, 24, 35, 40, 56, 63, 90, 127, 214, 197, 205, 195], [226, 233, 234, 229, 230, 228, 208, 187, 135, 82, 66, 49, 43, 65, 56, 60, 52, 48, 41, 37, 34, 31, 26, 28, 37, 43, 53, 50, 44, 37, 32, 32, 29, 28, 25, 22, 23, 23, 25, 25, 25, 26, 24, 23, 21, 20, 20, 20, 20, 19, 25, 26, 27, 31, 44, 59, 81, 151, 207, 200, 199, 196], [230, 229, 234, 234, 222, 215, 184, 122, 85, 64, 49, 56, 61, 62, 55, 48, 52, 47, 39, 33, 26, 27, 22, 28, 44, 47, 59, 41, 44, 33, 35, 27, 27, 26, 24, 22, 21, 21, 22, 23, 22, 23, 23, 22, 22, 23, 21, 21, 20, 21, 21, 26, 21, 25, 35, 60, 94, 170, 204, 195, 196, 195], [233, 231, 227, 226, 215, 162, 136, 77, 59, 66, 60, 57, 60, 52, 55, 52, 44, 37, 35, 30, 25, 27, 25, 37, 51, 44, 37, 44[...]Run the inference request to OVMS REST API endpointimport requests result = requests.post("http://localhost:8000/v1/models/age_gender:predict", data=data_json) result_dict = json.loads(result.text) print(result_dict){'outputs': {'age_conv3': [[[[0.2519038915634155]]]], 'prob': [[[[0.9874807000160217]], [[0.012519358657300472]]]]}}ROC Curvefrom sklearn.metrics import roc_curve, auc import matplotlib.pyplot as plt y_true = [0, 1, 1, 1, 1, 0, 1, 1, 0, 0] # random probability of each model A and B a_prob = [0.6, 0.7, 0.7, 0.8, 0.9, 0.7, 0.85, 0.7, 0.65, 0.75] b_prob = [0.05, 0.05, 0.1, 0.3, 0.6, 0.3, 0.4, 0.5, 0.2, 0.1] # probability of perfact model C c_prob = [0, 1, 1, 1, 1, 0, 1, 1, 0, 0] # false_positive_rate, true_positive_rate, threshold fpr_a, tpr_a, thr_a = roc_curve(y_true, a_prob) fpr_b, tpr_b, thr_b = roc_curve(y_true, b_prob) fpr_c, tpr_c, thr_c = roc_curve(y_true, c_prob) # area under curve auc_a = auc(fpr_a, tpr_a) auc_b = auc(fpr_b, tpr_b) auc_c = auc(fpr_c, tpr_c) # drawing plot plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") plt.title("ROC Curve") plt.plot(fpr_a, tpr_a, label=f"ROC curve area = {auc_a:.2f}") plt.plot(fpr_b, tpr_b, label=f"ROC curve area = {auc_b:.2f}") plt.plot(fpr_c, tpr_c, label=f"ROC curve area = {auc_c:.2f}") plt.legend(loc="lower right") plt.show()Creates:* xray_comp.pdffrom astropy.table import Table from astropy.io import ascii as asc import astropy.units as u from astropy.time import Time from matplotlib import pyplot as plt import numpy as np %matplotlib import matplotlib as mpl from utilities_az import supernova plt.style.use(['seaborn-paper', 'az-paper-onecol'])Read in SNaX data# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 names=["name","type","Date Exploded (JD)","Coordinates","Distance (Mpc)","galaxy","redshift","redshiftErr","Observation Start Date (JD)","instrument","Age (days)","Flux (10^-13 erg cm^-2 s^-1)","isUpperBound","fluxErrL","fluxErrH","Energy Lower Bound (KeV)","Energy Upper Bound (KeV)","Luminosity (10^39 erg s^-1)","lumErrL","lumErrH","model","dateExplodedRef","redshiftRef","dateExplodedRef","coordsRef","distRef","dateObservedRef","fluxRef", "junk"] name = [] sntype = [] age = []# (days) isUpperBound = [] luminosity = [] # (10^39 erg s^-1) lumErrL =[] lumErrH =[] ofile = open('../../data/xray/SNaX.TSV', 'r') for iline in ofile: if iline.startswith('SN'): sline = iline.split('\t') name.append(sline[0]) sntype.append(sline[1]) age.append(float(sline[10])) isUpperBound.append(bool(int(sline[12]))) luminosity.append(float(sline[17])) lumErrL.append(float(sline[18])) lumErrH.append(float(sline[19])) tbdata = Table([name, sntype, age, isUpperBound, luminosity, lumErrL, lumErrH], names=['name', 'sntype', 'age', 'isUpperBound', 'luminosity', 'lumErrL', 'lumErrH'])Read in data from Samofile_obs = asc.read('../../data/xray/asassn15oz_SpectrumData.csv', data_start=2, delimiter=',', names=['ObsID','SourceCount','SourceRate','BGCount','BGRate','BackedSpectrum', 'BackedSpectrumError','Flux','FluxError','Luminosity','LuminosityError','PercentTotal', 'ExposureTime','MJD','Date','Probability','Confidence'])Read in data from Stefanoofile_ximage = asc.read('../../data/xray/upper_lim_table.txt', data_start=1, names=['visit','date-obs','exptime','obsid','cts/s','Flux','unabsorbed-flux','NH'], format='fixed_width') distance = (28.83*u.Mpc).to(u.cm) ofile_ximage['luminosity'] = (4*np.pi*distance**2)*ofile_ximage['unabsorbed-flux'] ofile_ximage['luminosity'][ofile_ximage['luminosity']==0]=np.nan ofile_ximage['MJD'] = Time(ofile_ximage['date-obs']).mjd np.array(ofile_ximage['luminosity'])/10**39 sn15oz = supernova.LightCurve2('asassn-15oz') sn15oz_phase = Time(['2015-09-05', '2015-11-18']) - Time(sn15oz.jdexpl, format='jd') #sn15oz_phase = Time(['2015-09-05', '2015-11-18']) - Time('2015-08-27') fig = plt.figure() fig.subplotpars.update(left=0.25) ax = fig.add_subplot(1,1,1) #l1 = ax.fill_between(phase, Lx1_low, Lx1_up, alpha=0.3, label=r'$10^{-7} M_{\odot}/yr$') #l2 = ax.fill_between(phase, Lx2_low, Lx2_up, alpha=0.3, label=r'$10^{-6} M_{\odot}/yr$') #l3 = ax.fill_between(phase, Lx3_low, Lx3_up, alpha=0.3, label=r'$10^{-5} M_{\odot}/yr$') leg_lines, leg_labels = ax.get_legend_handles_labels() snnames = set(tbdata['name']) for snname in snnames: indx = (tbdata['name']==snname) & (tbdata['age']<100) if (indx==True).any(): sntype = tbdata[indx]['sntype'][0] if 'L' in sntype: iline = ax.errorbar(tbdata[indx]['age'], tbdata[indx]['luminosity'], fmt='s', yerr=[tbdata[indx]['lumErrH'], tbdata[indx]['lumErrL']], ls='--') leg_lines.append(iline[0]) leg_labels.append('{}, {}'.format(snname, tbdata[indx]['sntype'][0])) ax.errorbar(tbdata[indx]['age'][tbdata[indx]['isUpperBound']], tbdata[indx]['luminosity'][tbdata[indx]['isUpperBound']], fmt='s', yerr=[0.25]*len(tbdata[indx]['age'][tbdata[indx]['isUpperBound']]), uplims = [True]*len(tbdata[indx]['age'][tbdata[indx]['isUpperBound']]), ecolor=iline[0].get_color()) elif 'n' not in sntype: iline = ax.errorbar(tbdata[indx]['age'], tbdata[indx]['luminosity'], fmt='d', yerr=[tbdata[indx]['lumErrH'], tbdata[indx]['lumErrL']], ls=':') leg_lines.append(iline[0]) leg_labels.append('{}, {}'.format(snname, tbdata[indx]['sntype'][0])) ax.errorbar(tbdata[indx]['age'][tbdata[indx]['isUpperBound']], tbdata[indx]['luminosity'][tbdata[indx]['isUpperBound']], fmt='d', yerr=[0.25]*len(tbdata[indx]['age'][tbdata[indx]['isUpperBound']]), uplims = [True]*len(tbdata[indx]['age'][tbdata[indx]['isUpperBound']]), ecolor=iline[0].get_color()) ax.set_yscale("log") iline = ax.errorbar(ofile_ximage['MJD']-Time(sn15oz.jdexpl, format='jd').mjd, np.array(ofile_ximage['luminosity'])/10**39, np.array(ofile_ximage['luminosity'])/10**39*0.2, uplims=True, fmt='.') leg_lines.append(iline[0]) leg_labels.append('ASASSN-15oz, IIL') ax.set_xlim(0,160) #ax.set_ylim(-10, 20) ax.set_xlabel('Phase (day)') ax.set_ylabel(r'Luminosity (x10$^{39}$ erg $\rm s^{-1}$)', position=(1,0.38)) ax.legend(leg_lines, leg_labels, bbox_to_anchor=[0.55, 0.65, 0.4, 0.4], framealpha=1.0) plt.savefig('xray_comp.pdf')Calculate the expected luminosity from the forward shock#From Dwarkadas 2014 def calc_xray_luminosity(Mdot, vwind, phase): gff=1 Cn = 1 #for CSM forward shock Mdot = Mdot/(10**-5) vwind = vwind/10 phase = phase/10 Lx = 3.0E39*gff*Cn * (Mdot/vwind)**2 * 1./(phase) Lx = Lx/10**39 Lx = Lx.astype(np.float) return Lx phase = np.arange(1, 115) vwind1=10 #km/s vwind2= 100 #km/s Mdot1 = 10**-7 Mdot2 = 10**-6 Mdot3 = 10**-5 Lx1_up = calc_xray_luminosity(Mdot1, vwind1, phase) Lx1_low = calc_xray_luminosity(Mdot1, vwind2, phase) Lx2_up = calc_xray_luminosity(Mdot2, vwind1, phase) Lx2_low = calc_xray_luminosity(Mdot2, vwind2, phase) Lx3_up = calc_xray_luminosity(Mdot3, vwind1, phase) Lx3_low = calc_xray_luminosity(Mdot3, vwind2, phase) ofile_ximage ofile_obs['ObsID'] == ofile_ximage['directory'][1] ofile_ximage['MJD']-Time(sn15oz.jdexpl, format='jd').mjd print(ofile_ximage['luminosity'])luminosity cm2 ---------------------- nan 2.4056703060675297e+40 1.0034399912451994e+41 nan nan 6.617333698459422e+40 3.791988787530174e+40 nan 2.401692347727608e+40 nan ... 1.0859826267985706e+41 nan nan nan 1.9740618261860465e+40 nan nan 1.3236656376088806e+41 nan 1.364439710593076e+40 Length = 50 rows[@ggruszczynski](https://github.com/ggruszczynski) You should have completed previous tutorial, [diffusion_part1_and_convolution](./diffusion_part1_and_convolution.ipynb) before continuing. Diffusion part 3: with spatially variable diffusivity in 1-D-----*** Balance of EnthalpyThe simplified balance of Enthalpy $H=\rho c_p T$ is known as:\begin{align}\int \frac{\partial }{\partial t} (\rho c_p T ) dV + \oint ( \rho c_p T \boldsymbol{u} ) \cdot \boldsymbol{n} dS &= \oint \boldsymbol{n} \cdot \boldsymbol{q} dS + \int \dot{q} dV \\ \boldsymbol{q} &= k \nabla T\end{align}The heat flux $ \boldsymbol{q} $ is related to the thermal conductivity coefficient $k [ W/mK] $. Applying the GGO theorem, we obtain:$$\frac{\partial }{\partial t} (\rho c_p T ) + \nabla \cdot (\boldsymbol{u} \rho c_p T ) = \nabla \cdot (k \nabla T) + \dot{q} $$ Conservative and non-conservative schemeConsider a 1D, unsteady heat transfer equation with variable conduction coefficient.For simplicity, assume that the heat capacity $ \rho c_p = 1 $.The equation be expressed in two mathematically equivalent forms:$$\frac{\partial u}{\partial t}=\frac{\partial}{\partial x}\left(k(x) \frac{\partial u}{\partial x}\right) \\$$or $$\frac{\partial u}{\partial t}=\frac{\partial u}{\partial x} \frac{\partial k}{\partial x}+k(x) \frac{\partial^{2} u}{\partial x^{2}}$$Both of these continous forms can be discretized. First form leads to the *conservative scheme*$$\frac{u_{i}^{n+1}-u_{i}^{n}}{\Delta t}=\frac{1}{\Delta x^{2}}\left(k_{i+\frac{1}{2}}\left(u_{i+1}^{n}-u_{i}^{n}\right)-k_{i-\frac{1}{2}}\left(u_{i}^{n}-u_{i-1}^{n}\right)\right)$$while the second form results in a *non-conservative scheme*$$\frac{u_{i}^{n+1}-u_{i}^{n}}{\Delta t}=\frac{1}{\Delta x^{2}}\left(\frac{\left(k_{i+1}-k_{i-1}\right)}{2} \frac{\left(u_{i+1}^{n}-u_{i-1}^{n}\right)}{2}+k_{i}\left(u_{i+1}^{n}+u_{i-1}^{n}-2 u_{i}^{n}\right)\right)$$ Task* Implement both the *conservative* and the *non-conservative* schemes.* Impose the following BC: ```u[int(nx/4)] = 10.u[-int(nx/4)] = 1.u[0] = 5.u[-1] = 5.```* Run the simulation for different $k$ and compare results. Which physical quantity may become *not conserved* ?import numpy as np import matplotlib.pyplot as plt # variable conductivity nx = 128 domain_length = 64 dx = domain_length / (nx-1) xspace = np.linspace(0, domain_length, nx) u_IC = 5.*np.ones(nx) # numpy function ones() u_IC[int((nx-1)/4):int(nx/2 + 1)] = 10 # setting u = 2 between 0.5 and 1 as per our I.C.s k = 1.*np.ones(nx) # be aware that dt = sigma * dx**2 / nu k[:int(nx/2)] = 0.1 # try with 0.1 sigma = .2 # sigma is a parameter, we'll learn more about it later dt = sigma * dx**2 / max(k) #dt is defined using sigma nt = 10000 # the number of timesteps we want to calculate def calc_diffusion_variable_conductivity(IC,nx,nt,k,dt): u = IC.copy() un = IC.copy() #our placeholder array, un, to advance the solution in time for n in range(nt): #iterate through time #impose BC u[int(nx/4)] = 10. u[-int(nx/4)] = 1. u[0] = 5. u[-1] = 5. un = u.copy() ##copy the existing values of u into un for i in range(1, nx - 1): # this is obviously bad : du/dt = k (d^2)u/(dx)^2 # u[i] = un[i] + k[i] * dt / dx**2 * (un[i+1] - 2 * un[i] + un[i-1]) # non conservative scheme # u[i] = un[i] + dt / dx**2 * ((k[i+1]-k[i-1])*(un[i+1]-un[i-1])/4. + k[i]*(un[i+1] - 2 * un[i] + un[i-1])) # conservative scheme kf = (k[i+1]+k[i])/2 kb = (k[i]+k[i-1])/2 u[i] = un[i] + dt / dx**2 * (kf*(un[i+1]-un[i]) - kb*(un[i] - un[i-1])) return u u_FD = calc_diffusion_variable_conductivity(u_IC,nx,nt,k,dt) plt.plot(xspace, u_FD)基础回归模型:预测汽车燃油效率Auto MPGimport sys import os import pathlib import matplotlib as mpl import matplotlib.pyplot as plt import tensorflow as tf from tensorflow import keras import pandas as pd import numpy as np import seaborn as sns %matplotlib inline print("python version: ", sys.version_info) for module in np, mpl, tf, keras, pd, sns: print(module.__name__, "version: ", module.__version__) seed = 10383python version: sys.version_info(major=3, minor=7, micro=2, releaselevel='final', serial=0) numpy version: 1.16.0 matplotlib version: 3.0.3 tensorflow version: 2.3.0 tensorflow.keras version: 2.4.0 pandas version: 0.24.2 seaborn version: 0.11.01. Auto MPG 数据集 1.1 获取数据dataset_path = keras.utils.get_file("auto-mpg.data", "http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data") dataset_path column_names = [ "MPG", "Cylinders", "Displacement", "Horsepower", "Weight", "Acceleration", "Model Year", "Origin", ] raw_dataset = pd.read_csv( dataset_path, names=column_names, na_values = "?", comment="\t", sep=" ", skipinitialspace=True, ) dataset = raw_dataset.copy() dataset.tail()4.2 数据清洗 数据集中包括一些未知值。dataset.isna().sum()为了保证这个初始示例的简单性,删除这些行。dataset = dataset.dropna()"Origin" 列实际上代表分类,而不仅仅是一个数字。所以把它转换为独热码 (one-hot):origin = dataset.pop("Origin") dataset["USA"] = (origin == 1) * 1.0 dataset["Europe"] = (origin == 2) * 1.0 dataset["Japan"] = (origin == 3) * 1.0 dataset.tail()1.3 拆分数据集train_dataset = dataset.sample(frac=0.8, random_state=seed) test_dataset = dataset.drop(train_dataset.index)1.4 数据检查 快速查看训练集中几对列的联合分布。sns.pairplot(train_dataset[["MPG", "Cylinders", "Displacement", "Weight"]], diag_kind="kde")也可以查看总体的数据统计:train_stats = train_dataset.describe() train_stats.pop("MPG") train_stats = train_stats.transpose() train_stats1.5 从数据集中分离标签train_labels = train_dataset.pop("MPG") test_labels = test_dataset.pop("MPG")1.6 数据归一化def norm(x): return (x - train_stats["mean"]) / train_stats["std"] normed_train_data = norm(train_dataset) normed_test_data = norm(test_dataset)2. 模型 2.1 构建模型def build_model(): model = keras.Sequential([ keras.layers.Dense(64, activation="relu", input_shape=[len(train_dataset.keys())]), keras.layers.Dense(64, activation="relu"), keras.layers.Dense(1) ]) optimizer = keras.optimizers.RMSprop(0.001) model.compile(loss="mse", optimizer=optimizer, metrics=["mae", "mse"]) return model model = build_model()2.2 检查模型model.summary() example_batch = normed_train_data[:10] example_result = model.predict(example_batch) example_result2.3 训练模型 对模型进行1000个周期的训练,并在 history 对象中记录训练和验证的准确性。class PrintDot(keras.callbacks.Callback): def on_epoch_end(self, epoch, logs): if epoch % 100 == 0: print("") print(".", end="") EPOCHS = 1000 history = model.fit( normed_train_data, train_labels, epochs=EPOCHS, validation_split=0.2, verbose=0, callbacks=[PrintDot()], ) hist = pd.DataFrame(history.history) hist["epoch"] = history.epoch hist.tail() def plot_history(history): hist = pd.DataFrame(history.history) hist["epoch"] = history.epoch plt.figure() plt.xlabel("Epoch") plt.ylabel("Mean Abs Error [MPG]") plt.plot(hist["epoch"], hist["mae"], label="Train Error") plt.plot(hist["epoch"], hist["val_mae"], label="Val Error") plt.ylim([0, 5]) plt.legend() plt.figure() plt.xlabel("Epoch") plt.ylabel("Mean Square Error [$MPG^2$]") plt.plot(hist["epoch"], hist["mse"], label="Train Error") plt.plot(hist["epoch"], hist["val_mse"], label="Val Error") plt.ylim([0, 20]) plt.legend() plt.show() plot_history(history)2.4 early stoppingmodel = build_model() early_stop = keras.callbacks.EarlyStopping(monitor="val_loss", patience=10) history = model.fit( normed_train_data, train_labels, epochs=EPOCHS, validation_split=0.2, verbose=0, callbacks=[early_stop, PrintDot()], ) plot_history(history) loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=2) print("Testing set Mean Abs Error: {:5.2f} MPG".format(mae))3/3 - 0s - loss: 9.5104 - mae: 2.3102 - mse: 9.5104 Testing set Mean Abs Error: 2.31 MPG2.5 预测test_predictions = model.predict(normed_test_data).flatten() plt.scatter(test_labels, test_predictions) plt.xlabel("True Values [MPG]") plt.ylabel("Predictions [MPG]") plt.axis("equal") plt.axis("square") plt.xlim([0, plt.xlim()[1]]) plt.ylim([0, plt.ylim()[1]]) _ = plt.plot([-100, 100], [-100, 100])查看误差分布error = test_predictions - test_labels plt.hist(error, bins=25) plt.xlabel("Prediction Error [MPG]") _ = plt.ylabel("Count")* Jupyter Notebook* scipy (tambien instalará numpy)* pillow (libreria para manejo de imágenes)* imageio (lectura / escritura de imágenes)* matplotlib (para graficar)* seaborn (visualizaciones estadísticas)* scikit-learn (aprendizaje automático - lo usaremos para un ejemplo de PCA)import seaborn as sns vuelos = sns.load_dataset("flights") vuelos = vuelos.pivot("month", "year", "passengers") ax = sns.heatmap(vuelos) from platform import python_version print(python_version())3.6.9Basic pythonimport numpy as np import torch as tc import matplotlib.pyplot as plt %matplotlib inline arr = [1, 2, 3, 4, 5] nparr = np.array(arr) nparr1 = np.array([1.,2.,3.,4.,5]) nparr == nparr1 np.shape(nparr) X_train = tc.FloatTensor([[1,2,3]]) y_train = tc.FloatTensor([[2, 4, 6]]) plt.scatter(X_train, y_train) X = np.linspace(0, 10, 11) y = np.linspace(0, 10, 11) plt.plot(X,y) W = tc.zeros(1, requires_grad = True) b = tc.zeros(1, requires_grad=True) n_data = len(X_train) num_epochs = 5000 learning_rate = 0.01 import torch.nn as nn import torch.nn.functional as F from torch import optim print(cost) print(W) print(b) num_epochs = 1000 W = tc.zeros(1, requires_grad = True) b = tc.zeros(1, requires_grad=True) for epoch in range(num_epochs): hypothesis = X_train * W + b cost = tc.mean((hypothesis-y_train)**2) optimizer = optim.SGD([W, b], lr = 0.01) optimizer.zero_grad()#grad초기화 cost.backward() #cost의 기울기 계산 optimizer.step() # W와 b업데이트 if epoch%100 == 0: print("Epoch{:4d} ,W:{:.3f}, b:{:.3f} , Cost:{:.6f}".format(epoch,W.item(), b.item(), cost.item()))ML Pipeline PreparationFollow the instructions below to help you create your ML pipeline. 1. Import libraries and load data from database.- Import Python libraries- Load dataset from database with [`read_sql_table`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_sql_table.html)- Define feature and target variables X and Y# import libraries import pandas as pd import numpy as np import os import pickle from sqlalchemy import create_engine import re import nltk from sklearn.base import BaseEstimator, TransformerMixin from sklearn.model_selection import train_test_split from sklearn.multioutput import MultiOutputClassifier from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier,AdaBoostClassifier from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer from nltk.tokenize import word_tokenize from nltk.stem import WordNetLemmatizer from sklearn.pipeline import Pipeline, FeatureUnion from sklearn.model_selection import GridSearchCV from sklearn.metrics import make_scorer, accuracy_score, f1_score, fbeta_score, classification_report from scipy.stats import hmean from scipy.stats.mstats import gmean nltk.download(['punkt', 'wordnet', 'averaged_perceptron_tagger']) # load data from database engine = create_engine('sqlite:///s0umDisasterProject.db') df = pd.read_sql_table('df',engine) X = df['message'] Y = df.iloc[:,4:] df.head()2. Write a tokenization function to process your text datadef tokenize(text): url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+' detected_urls = re.findall(url_regex, text) for url in detected_urls: text = text.replace(url, "urlplaceholder") tokens = word_tokenize(text) lemmatizer = WordNetLemmatizer() clean_tokens = [] for tok in tokens: clean_tok = lemmatizer.lemmatize(tok).lower().strip() clean_tokens.append(clean_tok) return clean_tokens3. Build a machine learning pipelineThis machine pipeline should take in the `message` column as input and output classification results on the other 36 categories in the dataset. You may find the [MultiOutputClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.multioutput.MultiOutputClassifier.html) helpful for predicting multiple target variables.# PRELIMINARY PIPELINE def model_pipeline(): pipeline = Pipeline([ ('vect', CountVectorizer(tokenizer=tokenize)), ('tfidf', TfidfTransformer()), ('clf', MultiOutputClassifier(RandomForestClassifier())), ], verbose=True) return pipeline # NEW PIPELINE (AFTER SOME TRIAL-AND-ERROR ALGORITHM TEST AND GRID SEARCH) def new_model_pipeline(): pipeline = Pipeline([ ('features', FeatureUnion([ ('text_pipeline', Pipeline([ ('vect', CountVectorizer(tokenizer=tokenize)), ('tfidf', TfidfTransformer()) ])), ('starting_verb', StartingVerbExtractor()) ])), ('clf', MultiOutputClassifier(AdaBoostClassifier())) ], verbose=True) return pipeline4. Train pipeline- Split data into train and test sets- Train pipelineX_train, X_test, y_train, y_test = train_test_split(X, Y) model = [] model = new_model_pipeline() model.fit(X_train, y_train);[Pipeline] .......... (step 1 of 2) Processing features, total= 58.5s [Pipeline] ............... (step 2 of 2) Processing clf, total= 2.2minLet's see is the model tells us something reasonable!msg = ['Hello I see fire in the street and many houses are destroyed, homeless people everywhere'] test_output = model.predict(msg) print(y_train.columns.values[(test_output.flatten()==1)])['request' 'aid_related' 'shelter' 'refugees' 'buildings' 'weather_related' 'direct_report' 'direct']5. Test your modelReport the f1 score, precision and recall for each output category of the dataset. You can do this by iterating through the columns and calling sklearn's `classification_report` on each.y_pred = model.predict(X_test) category_names = Y.columns Y_pred = pd.DataFrame(data=y_pred, index=y_test.index, columns=category_names) print(classification_report(y_test, Y_pred, target_names=category_names))precision recall f1-score support request 0.73 0.55 0.63 1130 offer 0.09 0.04 0.05 28 aid_related 0.75 0.60 0.67 2657 medical_help 0.63 0.25 0.35 524 medical_products 0.63 0.30 0.41 331 search_and_rescue 0.60 0.22 0.32 195 security 0.10 0.02 0.03 121 military 0.57 0.34 0.42 198 child_alone 0.00 0.00 0.00 0 water 0.71 0.62 0.67 427 food 0.79 0.68 0.73 730 shelter 0.75 0.57 0.65 594 clothing 0.73 0.38 0.50 114 money 0.51 0.28 0.36 150 missing_people 0.54 [...]6. Improve your modelUse grid search to find better parameters. ADD CUSTOM ESTIMATORclass StartingVerbExtractor(BaseEstimator, TransformerMixin): def starting_verb(self, text): sentence_list = nltk.sent_tokenize(text) for sentence in sentence_list: pos_tags = nltk.pos_tag(tokenize(sentence)) first_word, first_tag = pos_tags[0] if first_tag in ['VB', 'VBP'] or first_word == 'RT': return True return False def fit(self, X, y=None): return self def transform(self, X): X_tagged = pd.Series(X).apply(self.starting_verb) return pd.DataFrame(X_tagged)IMPROVE MODEL PIPELINEdef new_model_pipeline(): pipeline = Pipeline([ ('features', FeatureUnion([ ('text_pipeline', Pipeline([ ('vect', CountVectorizer(tokenizer=tokenize)), ('tfidf', TfidfTransformer()) ])), ('starting_verb', StartingVerbExtractor()) ])), ('clf', MultiOutputClassifier(AdaBoostClassifier())) ]) return pipelineCUSTOMIZE AND RUN GRID SEARCH**Warning** It may take several hours on a standard laptop (with CPU parallelization)model = new_model_pipeline() parameters = { 'features__text_pipeline__vect__ngram_range': ((1, 1), (1, 2)), 'features__text_pipeline__vect__max_df': (0.75, 1.0), 'features__text_pipeline__vect__max_features': (None, 5000), 'features__text_pipeline__tfidf__use_idf': (True, False), # 'clf__n_estimators': [10, 100], # 'clf__learning_rate': [0.01, 0.1], # 'features__transformer_weights': ( # {'text_pipeline': 1, 'starting_verb': 0.5}, # {'text_pipeline': 0.5, 'starting_verb': 1}, # {'text_pipeline': 0.8, 'starting_verb': 1}, # ) } scorer = make_scorer(multioutput_fscore,greater_is_better = True) cv = GridSearchCV(model, param_grid=parameters, scoring = scorer,verbose = 2, n_jobs = -1) cv.fit(X_train, y_train)Fitting 3 folds for each of 16 candidates, totalling 48 fits [CV] features__text_pipeline__tfidf__use_idf=True, features__text_pipeline__vect__max_df=0.75, features__text_pipeline__vect__max_features=None, features__text_pipeline__vect__ngram_range=(1, 1) [CV] features__text_pipeline__tfidf__use_idf=True, features__text_pipeline__vect__max_df=0.75, features__text_pipeline__vect__max_features=None, features__text_pipeline__vect__ngram_range=(1, 1) [CV] features__text_pipeline__tfidf__use_idf=True, features__text_pipeline__vect__max_df=0.75, features__text_pipeline__vect__max_features=None, features__text_pipeline__vect__ngram_range=(1, 1) [CV] features__text_pipeline__tfidf__use_idf=True, features__text_pipeline__vect__max_df=0.75, features__text_pipeline__vect__max_features=None, features__text_pipeline__vect__ngram_range=(1, 2) [CV] features__text_pipeline__tfidf__use_idf=True, features__text_pipeline__vect__max_df=0.75, features__text_pipeline__vect__max_features=None, features_[...]7. Test your modelShow the accuracy, precision, and recall of the tuned model. Since this project focuses on code quality, process, and pipelines, there is no minimum performance metric needed to pass. However, make sure to fine tune your models for accuracy, precision and recall to make your project stand out - especially for your portfolio! 8. Try improving your model further. Here are a few ideas:* try other machine learning algorithms* add other features besides the TF-IDF **DISCLAIMER** Please notice that complete results are already shown in the previous paragraph, where the new model is run by default. Most relevant differences compared to the first-guess model are:1) AdaBoost classifier instead of RandomForest2) New feaure (first verb)The tuned model, compared to the first-guess sample with a limited number of trees (10) and various decision tree-based algorithms, shows the following improvements:1) Overall accuracy: from 93% to 94.5%2) Customized multi-class multi-label f1-score: from 90% to 93% 9. Export your model as a pickle file# save the model to disk filename = 'classifier.pkl' pickle.dump(model, open(filename, 'wb'))practisepython.orgprint('Enter your name and Age one by one') # data input data_list = [input() for i in range(2)] year = 2019+(100 - int(data_list[1])) print('Hello {}, wish you can celebrate your 100th birthday on {}'.format(data_list[0], year)) # Alternate input_list = input().split(',') year = 2019+(100 - int(input_list[1])) print('Hello {}, wish you can celebrate your 100th birthday on {}'.format(input_list[0], year)) # Exercise 2 # find odd or even num = int(input('Enter a number:')) if num%2 == 0: print(' You are more of an even person') else: print('Your way of thinking is odd') # numbers less than five def less_than_five (list1): newlist = [] for num in list1: if num < 5: newlist.append(num) return newlist list1 = [23,6,7788,4,5,3,2,1,2,6,78,99] less_than_five(list1) # Using Lambda and filter #myList = list(filter(lambda num: num<5, list1)) print(list(filter(lambda num: num<5, list1))) # prgrm to print out all the divisors of a number number = int(input('Enter a number')) myList = [] for num in range(1,number): if number%num == 0: myList.append(num) print(myList) # Some randon practice import random res = [] for i in range(7): res.append(random.randrange(1,50,1)) print('Random number list is: '+ str(res)) res = [random.randrange(1,50,1) for i in range(7)] print('Random number list is: '+ str(res)) import random # MATCHING LISTS list1 = [random.randrange(1,30,1) for i in range(10)] list2 = [random.randrange(1,30,1) for i in range(10)] print( list1, list2) res = [num for num in list1 if num in list2] print(res) res = [] for num in list1: if num in list2: res.append(num) #res = [] #for num in list1: # if num in list2: # res.append(num) def front_back (str): n = len(str) if n == 1: return str return str[n-1] + str[1:(n-1)] + str[0] front_back('book') front_back('code')Example 11.1: Ideal Refrigeration*, Ph.D., P.E.University of Kentucky - Paducah CampusME 321: Engineering Thermodynamics II* Problem StatementR-134a flows through a vapor-compression refrigeration cycle. Heat is removed at a rate of 18 kW from a freezer at $-8^\circ\mathrm{C}$. Heat is rejected to a room at $22^\circ\mathrm{C}$. The evaporator and the condenser are $10^\circ\mathrm{C}$ colder or hotter than the thermal reservoirs. Find:* (a) $p_\mathrm{evap}$ and $p_\mathrm{cond}$ (kPa)* (b) $COP_R$* (c) Net Work* (d) Volumetric flow rate entering the compressor![image.png](attachment:e047931f-9241-45a0-ba9d-e5e372395e5d.png) Solution__[Video Explanation](https://uky.yuja.com/V/Video?v=2005363&node=7667507&a=185761075&autoplay=1)__ Python InitializationWe'll start by importing the libraries we will use for our analysis and initializing dictionaries to hold the properties we will be usings.from kilojoule.templates.default import * r134a = realfluid.Properties('R134a')Given ParametersWe now define variables to hold our known values.Qdot_in = Quantity(18,'kW') # Rate of heat input T_L = Quantity(-8,'degC') # Temperature of refrigerated space T_H = Quantity(22,'degC') # Temperature of surroundings Delta_T = Quantity(10,'delta_degC') # Temperature difference in evaporator and condenser Summary();Assumptions- Ideal work devices- Saturated vapor at evaporator exit- Saturated liquid at condenser exit- Isobaric heat exchangers- Negligible changes in kinetic energy- Negligible changes in potential energyx[1] = 1 # Saturated vapor at evaporator exit x[3] = 0 # Saturated liquid at condenser exit Summary();(a) Pressures in condenser and evaporator%%showcalc #### Evaporator pressure # The working fluid temperature must be lower than the refrigerated space T[1] = T_L - Delta_T # The fluid is saturated, so the saturation pressure must be consistent with the saturation temperature p[1] = r134a.p(T[1],x[1]) #### Condenser pressure # The working fluid temperature must be hotter than the surroundings T[3] = T_H + Delta_T # The fluid is saturated, so the saturation pressure must be consistent with the saturation temperature p[3] = r134a.p(T[3],x[3])(b) $COP_R$%%showcalc #### 1st Law Analysis ##### State 1 h[1] = r134a.h(T[1],x[1]) s[1] = r134a.s(T[1],x[1]) ##### 1-2) Ideal compression p[2] = p[3] s[2] = s[1] ##### State 2 T[2] = r134a.T(p[2],s[2]) h[2] = r134a.h(p[2],s[2]) ##### 2-3) Isobaric heat rejection ##### State 3 T[3] = r134a.T(p[3],x[3]) h[3] = r134a.h(p[3],x[3]) s[3] = r134a.s(p[3],x[3]) ##### 3-4) Isenthalpic expansion p[4] = p[1] h[4] = h[3] ##### State 4 T[4] = r134a.T(p[4],h=h[4]) s[4] = r134a.s(p[4],h=h[4]) x[4] = r134a.x(p[4],h=h[4]) ##### COP # Desired: Heat Input q_in = h[1]-h[4] # Cost: Net Work w_net = h[2]-h[1] # COP: Desired/Cost COP_R = q_in/w_net(c) Net Work%%showcalc ##### Mass flow rate mdot = Qdot_in/q_in ##### Net work Wdot_net = mdot*w_net(d) Volumetric flow rate entering compressor%%showcalc ##### Specific volume entering compressor v[1] = r134a.v(T[1],x[1]) ##### Volumetric flow rate Vdot[1] = mdot*v[1]Diagramspv = r134a.pv_diagram() pv.ax.set_ylim(bottom=100) pv.ax.set_xlim(right=.5) # Plot Reference Isotherms for refrigerated space and surroundings # Refrigerated space pv.plot_isotherm(T_L,color='blue',label='$T_L$',ycoor=4e3,labelprops=dict(va='top')) # Surrounding temperature pv.plot_isotherm(T_H,color='red',label='$T_H$',ycoor=4e3); # High pressure pv.plot_isobar(p[2],label=f'{p[2]}',pos=.9) pv.plot_isobar(p[1],label=f'{p[1]}',pos=.9); for state in [1,2,3,4]: v[state] = r134a.v(p[state],h=h[state]) pv.plot_state(states[1],label_loc='south') pv.plot_state(states[2],label_loc='north east') pv.plot_state(states[3],label_loc='north east') pv.plot_state(states[4],label_loc='north east') pv.plot_process(states[1],states[2],path='isentropic',label='compressor') pv.plot_process(states[2],states[3],path='isobaric',label='condenser') pv.plot_process(states[3],states[4],path='isenthalpic',label='throttle') pv.plot_process(states[4],states[1],path='isobaric',label='evaporator',labelprops=dict(va='top',ha='right',pos=.75)); Ts = r134a.Ts_diagram() Ts.ax.set_ylim(bottom=-30,top=60) Ts.ax.set_xlim(left=.7) Ts.plot_state(states[1],label_loc='south east') Ts.plot_state(states[2],label_loc='east') Ts.plot_state(states[3],label_loc='north west') Ts.plot_state(states[4],label_loc='south west') Ts.plot_process(states[1],states[2],path='isentropic',label='compressor') Ts.plot_process(states[2],states[3],path='isobaric',label='condenser') Ts.plot_process(states[3],states[4],path='isenthalpic',label='throttle') Ts.plot_process(states[4],states[1],path='isobaric',label='evaporator') # Plot Reference Isotherms for refrigerated space and surroundings Ts.plot_isotherm(T_L,color='blue',label='$T_L$',pos=0.1) Ts.plot_isotherm(T_H,color='red',label='$T_H$',pos=0.1) Ts.plot_isobar(p[2],label=f'{p[2]}',ycoor=55) Ts.plot_isobar(p[1],label=f'{p[1]}',ycoor=55);Import Librariesimport numpy as np from PIL import Image from keras.applications.vgg16 import VGG16 from keras.applications.vgg16 import preprocess_input, decode_predictionsWeights of trained modelmodel = VGG16(weights='imagenet', include_top=True)Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/vgg16/vgg16_weights_tf_dim_ordering_tf_kernels.h5 553467904/553467096 [==============================] - 3s 0us/stepLayerslayers = dict([(layer.name, layer.output) for layer in model.layers]) # print(layers) model.summary()Model: "vgg16" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_1 (InputLayer) [(None, 224, 224, 3)] 0 _________________________________________________________________ block1_conv1 (Conv2D) (None, 224, 224, 64) 1792 _________________________________________________________________ block1_conv2 (Conv2D) (None, 224, 224, 64) 36928 _________________________________________________________________ block1_pool (MaxPooling2D) (None, 112, 112, 64) 0 _________________________________________________________________ block2_conv1 (Conv2D) (None, 112, 112, 128) 73856 _________________________________________________________________ block2_conv2 (Conv2D) (None, 112, 112, 128) 147584 _____________________________________________________________[...]Parameters countingmodel.count_params()Go to image pathfrom google.colab import drive drive.mount('/gdrive') %cd /gdrive import os os.chdir('/gdrive/My Drive/Colab Notebooks') !lsaraba.jpg 'Img_Class_w_VGG16.ipynb adlı not defterinin kopyası' ddef.jpg kepce.jpg dog1.jpg MNIST_Ornek.ipynb Evrisim.ipynb Pomeranian_01.jpeg f35.jpg tree.jpg Fashion_MNIST.ipynb Untitled0.ipynb Img_Class_w_VGG16.ipynb 'VGG16 in Keras.ipynb'Get the image and preprocessimg_path = 'ddef.jpg' image = Image.open(img_path) image = image.resize((224,224)) image x = np.array(image, dtype='float32') x = np.expand_dims(x, axis=0) x = preprocess_input(x)Prediction | Classicationpred = model.predict(x) print(decode_predictions(pred, top=5)[0]) rate = int(decode_predictions(pred, top=1)[0][0][2]*100) print('\n Object in the image is probably ', decode_predictions(pred, top=1)[0][0][1], '(probability rate=', rate, '%)')[('n02690373', 'airliner', 0.89494264), ('n04592741', 'wing', 0.08819905), ('n02692877', 'airship', 0.014380781), ('n04552348', 'warplane', 0.0021106156), ('n04273569', 'speedboat', 6.899322e-05)] Object in the image is probably airliner (probability rate= 89 %)Sector Neutral Install packagesimport sys !{sys.executable} -m pip install -r requirements.txt import cvxpy as cvx import numpy as np import pandas as pd import time import os import quiz_helper import matplotlib.pyplot as plt %matplotlib inline plt.style.use('ggplot') plt.rcParams['figure.figsize'] = (14, 8)following zipline bundle documentationhttp://www.zipline.io/bundles.htmlingesting-data-from-csv-files data bundleimport os import quiz_helper from zipline.data import bundles os.environ['ZIPLINE_ROOT'] = os.path.join(os.getcwd(), '..', '..','data','module_4_quizzes_eod') ingest_func = bundles.csvdir.csvdir_equities(['daily'], quiz_helper.EOD_BUNDLE_NAME) bundles.register(quiz_helper.EOD_BUNDLE_NAME, ingest_func) print('Data Registered')Data RegisteredBuild pipeline enginefrom zipline.pipeline import Pipeline from zipline.pipeline.factors import AverageDollarVolume from zipline.utils.calendars import get_calendar universe = AverageDollarVolume(window_length=120).top(500) trading_calendar = get_calendar('NYSE') bundle_data = bundles.load(quiz_helper.EOD_BUNDLE_NAME) engine = quiz_helper.build_pipeline_engine(bundle_data, trading_calendar)View Data¶With the pipeline engine built, let's get the stocks at the end of the period in the universe we're using. We'll use these tickers to generate the returns data for the our risk model.universe_end_date = pd.Timestamp('2016-01-05', tz='UTC') universe_tickers = engine\ .run_pipeline( Pipeline(screen=universe), universe_end_date, universe_end_date)\ .index.get_level_values(1)\ .values.tolist() universe_tickersGet Returns datafrom zipline.data.data_portal import DataPortal data_portal = DataPortal( bundle_data.asset_finder, trading_calendar=trading_calendar, first_trading_day=bundle_data.equity_daily_bar_reader.first_trading_day, equity_minute_reader=None, equity_daily_reader=bundle_data.equity_daily_bar_reader, adjustment_reader=bundle_data.adjustment_reader)Get pricing data helper functiondef get_pricing(data_portal, trading_calendar, assets, start_date, end_date, field='close'): end_dt = pd.Timestamp(end_date.strftime('%Y-%m-%d'), tz='UTC', offset='C') start_dt = pd.Timestamp(start_date.strftime('%Y-%m-%d'), tz='UTC', offset='C') end_loc = trading_calendar.closes.index.get_loc(end_dt) start_loc = trading_calendar.closes.index.get_loc(start_dt) return data_portal.get_history_window( assets=assets, end_dt=end_dt, bar_count=end_loc - start_loc, frequency='1d', field=field, data_frequency='daily')get pricing data into a dataframereturns_df = \ get_pricing( data_portal, trading_calendar, universe_tickers, universe_end_date - pd.DateOffset(years=5), universe_end_date)\ .pct_change()[1:].fillna(0) #convert prices into returns returns_dfSector data helper functionWe'll create an object for you, which defines a sector for each stock. The sectors are represented by integers. We inherit from the Classifier class. [Documentation for Classifier](https://www.quantopian.com/posts/pipeline-classifiers-are-here), and the [source code for Classifier](https://github.com/quantopian/zipline/blob/master/zipline/pipeline/classifiers/classifier.py)from zipline.pipeline.classifiers import Classifier from zipline.utils.numpy_utils import int64_dtype class Sector(Classifier): dtype = int64_dtype window_length = 0 inputs = () missing_value = -1 def __init__(self): self.data = np.load('../../data/project_4_sector/data.npy') def _compute(self, arrays, dates, assets, mask): return np.where( mask, self.data[assets], self.missing_value, ) sector = Sector() sector len(sector.data) sector.dataQuiz 1How many unique sectors are in the sector variable? Answer 1 hereprint(f"set of unique categories: {set(sector.data)}")set of unique categories: {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1}Create an alpha factor based on momentumWe want to calculate the one-year return. In other words, get the close price of today, minus the close price of 252 trading days ago, and divide by that price from 252 days ago.$1YearReturn_t = \frac{price_{t} - price_{t-252}}{price_{t-252}}$from zipline.pipeline.factors import ReturnsWe'll use 2 years of data to calculate the factor **Note:** Going back 2 years falls on a day when the market is closed. Pipeline package doesn't handle start or end dates that don't fall on days when the market is open. To fix this, we went back 2 extra days to fall on the next day when the market is open.factor_start_date = universe_end_date - pd.DateOffset(years=2, days=2) factor_start_date ## 1 year returns can be the basis for an alpha factor p1 = Pipeline(screen=universe) rets1 = Returns(window_length=252, mask=universe) p1.add(rets1,"1YearReturns") df1 = engine.run_pipeline(p1, factor_start_date, universe_end_date) #graphviz lets us visualize the pipeline import graphviz p1.show_graph(format='png')View the data of the factordf1.head()Explore the demean functionThe Returns class inherits from zipline.pipeline.factors.factor. [The documentation for demean is located here](https://www.zipline.io/appendix.htmlzipline.pipeline.factors.Factor.demean), and is also pasted below:```demean(mask=sentinel('NotSpecified'), groupby=sentinel('NotSpecified'))[source]Construct a Factor that computes self and subtracts the mean from row of the result.If mask is supplied, ignore values where mask returns False when computing row means, and output NaN anywhere the mask is False.If groupby is supplied, compute by partitioning each row based on the values produced by groupby, de-meaning the partitioned arrays, and stitching the sub-results back together.Parameters: mask (zipline.pipeline.Filter, optional) – A Filter defining values to ignore when computing means.groupby (zipline.pipeline.Classifier, optional) – A classifier defining partitions over which to compute means.``` Quiz 2By looking at the documentation, and then the source code for `demean`, what are two parameters for this function? Which one or ones would you call if you wanted to demean by sector and wish to demean for all values in the chosen universe?[The source code](https://www.zipline.io/_modules/zipline/pipeline/factors/factor.htmlFactor.demean) has useful comments to help you answer this question. Answer 2 here We would use the groupby parameter, and we don't need to use the mask parameter, since we are not going to exclude any of the stocks in the universe from the demean calculation. Quiz 3Turn 1 year returns into an alpha factorWe can do some processing to convert our signal (1 year return) into an alpha factor. One step is to demean by sector.* demeanFor each stock, we want to take the average return of stocks that are in the same sector, and then remove this from the return of each individual stock. Answer 3#TODO # create a pipeline called p2 p2 = Pipeline(screen=universe) # create a factor of one year returns, deman by sector factor_demean_by_sector = ( Returns(window_length=252, mask=universe). demean(groupby=Sector()) #we use the custom Sector class that we reviewed earlier ) # add the factor to the p2 pipeline p2.add(factor_demean_by_sector, 'Momentum_1YR_demean_by_sector')visualize the second pipelinep2.show_graph(format='png')Quiz 4How does this pipeline compare with the first pipeline that we created earlier? Answer 4 hereThe second pipeline now adds sector information in the GroupedRowTransform('demean') step. run pipeline and view the factor datadf2 = engine.run_pipeline(p2, factor_start_date, universe_end_date) df2.head()ACS Lab 01 - Scientific VisualizationThe first lab of the semester will be structured differently than the rest. The focus of this lab is to familiarize yourself with a wide variety of scientific visualizations. All of the labs this semester will be language agnostic, meaning you can use your language of choice, but they often require a higher level language such as python, matlab or R.All of my solutions will be written in python. I highly recommend installing python via the Anaconda distribution ([Installation Instructions](https://conda.io/docs/install/full.html)). If you install this way you will have all of the packages I used to complete this assignment. For this lab you will be asked to complete any 10 of the following 11 visualization tasks.Remember to label your axes when appropriate and when plotting multiple results on a single plot remember to include a legend.import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import pandas as pd import scipy import sys print('Python version: {}'.format(sys.version[:5])) print('Matplotlib version: {}'.format(mpl.__version__)) print('Numpy version: {}'.format(np.__version__)) print('Scipy version: {}'.format(scipy.__version__)) print('Pandas version: {}'.format(pd.__version__))Python version: 3.6.0 Matplotlib version: 2.0.0 Numpy version: 1.13.1 Scipy version: 0.19.1 Pandas version: 0.20.3WOS v1 데이터 EDAimport json from collections import defaultdict import matplotlib.pyplot as plt train_path = '../assets/data/trip/wos-v1/wos-v1_train.json' test_path = '../assets/data/trip/wos-v1/wos-v1_dev.json' with open(f'{train_path}', 'r', encoding='utf8') as fr: train_dial_data = json.load(fr) with open(f'{test_path}', 'r', encoding='utf8') as fr: test_dial_data = json.load(fr)데이터셋 전체 설명- 언어: 한국어- Meta Domain: 여행- 단일 Domain 종류: 5- Slot: 45 (모든 Slot은 Informable Slot)※ 참고 사항- Informable Slot: 특정 Knowledge Base의 instance를 찾거나, 새로운 instance를 write하기 위해 User가 System에게 주거나 맥락에 의해 User가 의도할 수 있는 타입의 정보 (대화에 대한 제약 사항 및 DST의 target)- Requestable Slot: 특정 Knowledge Base의 instance가 선택된 이후, 추가로 정보를 요청할 수 있는 타입의 정보 (System이 User에게 제공) Domain 분석데이터 내 Dialogue의 도메인 종류에 따른 데이터 분석domains_counter = defaultdict(int) domain_combs_counter = defaultdict(int) train_domains_counter = defaultdict(int) train_domain_combs_counter = defaultdict(int) test_domains_counter = defaultdict(int) test_domain_combs_counter = defaultdict(int) for dialogue in train_dial_data: domains = sorted(dialogue["domains"]) for domain in domains: domains_counter[domain] += 1 train_domains_counter[domain] += 1 domain_comb = ", ".join(domains) domain_combs_counter[domain_comb] += 1 train_domain_combs_counter[domain_comb] += 1 for dialogue in test_dial_data: domains = sorted(dialogue["domains"]) for domain in domains: domains_counter[domain] += 1 test_domains_counter[domain] += 1 domain_comb = ", ".join(domains) domain_combs_counter[domain_comb] += 1 test_domain_combs_counter[domain_comb] += 1개별 도메인print('----- 전체') print(domains_counter, sep=": \n") print('----- 학습 데이터') print(train_domains_counter, sep=": \n") print('----- 테스트 데이터') print(test_domains_counter, sep=": \n")----- 전체 defaultdict(, {'관광': 4873, '식당': 5327, '지하철': 941, '택시': 2942, '숙소': 5063}) ----- 학습 데이터 defaultdict(, {'관광': 4318, '식당': 4688, '지하철': 764, '택시': 2708, '숙소': 4396}) ----- 테스트 데이터 defaultdict(, {'관광': 555, '숙소': 667, '식당': 639, '택시': 234, '지하철': 177})학습 데이터 도메인 조합for key in train_domain_combs_counter: print(f"{key:15}\t: {train_domain_combs_counter[key]}")관광, 식당 : 639 관광 : 458 관광, 식당, 지하철 : 283 택시 : 344 식당, 택시 : 481 숙소, 택시 : 443 식당 : 524 숙소, 식당 : 895 관광, 숙소 : 526 관광, 숙소, 식당 : 1010 숙소 : 476 관광, 택시 : 373 숙소, 식당, 택시 : 308 관광, 숙소, 지하철 : 184 관광, 식당, 택시 : 370 관광, 숙소, 택시 : 389 관광, 지하철 : 86 숙소, 식당, 지하철 : 136 식당, 지하철 : 42 숙소, 지하철 : 29 지하철 : 4테스트 데이터 도메인 조합for key, value in test_domain_combs_counter.items(): print(f"{key:15}\t: {value}")관광, 숙소, 식당 : 208 식당, 택시 : 27 숙소 : 92 관광, 식당, 지하철 : 35 관광, 숙소 : 36 관광, 숙소, 택시 : 51 숙소, 식당, 택시 : 72 관광, 숙소, 지하철 : 58 관광, 식당, 택시 : 42 관광, 지하철 : 14 숙소, 식당 : 64 식당 : 92 숙소, 식당, 지하철 : 63 관광, 식당 : 29 관광 : 74 숙소, 택시 : 23 관광, 택시 : 8 식당, 지하철, 택시 : 6 식당, 지하철 : 1 택시 : 5도메인 조합 그래프plt.rc('font', family='NanumGothic') domain_combs_counter = sorted(domain_combs_counter.items(), key=lambda k_v: k_v[1]) domain_combs_counter = {key:value for key, value in domain_combs_counter} dom_topics = [key for key in domains_counter.keys()] dom_train_value = [train_domains_counter[key] for key in dom_topics] dom_test_value = [test_domains_counter[key] for key in dom_topics] cdom_topics = [key for key in domain_combs_counter.keys()] cdom_train_value = [train_domain_combs_counter[key] for key in cdom_topics] cdom_test_value = [test_domain_combs_counter[key] for key in cdom_topics] plt.rcParams["figure.figsize"] = (9, 7) plt.bar(dom_topics, dom_train_value) plt.bar(dom_topics, dom_test_value, bottom=dom_train_value) plt.legend(['Train', 'Test']) plt.show() plt.rcParams["figure.figsize"] = (12, 7) plt.barh(cdom_topics, cdom_train_value) plt.barh(cdom_topics, cdom_test_value, left=cdom_train_value) plt.legend(['Train', 'Test']) plt.show()Chapter 1, Table 1This notebook explains how I used the Harvard General Inquirer to *streamline* interpretation of a predictive model.I'm italicizing the word "streamline" because I want to emphasize that I place very little weight on the Inquirer: as I say in the text, "The General Inquirer has no special authority, and I have tried not to make it a load-bearing element of this argument." To interpret a model, I actually spend a lot of time looking at lists of features, as well as predictions about individual texts. But to *explain* my interpretation, I need some relatively simple summary. Given real-world limits on time and attention, going on about lists of individual words for five pages is rarely an option. So, although wordlists are crude and arbitrary devices, flattening out polysemy and historical change, I am willing to lean on them rhetorically, where I find that they do in practice echo observations I have made in other ways.I should also acknowledge that I'm not using the General Inquirer as it was designed to be used. The full version of this tool is not just a set of wordlists, it's a software package that tries to get around polysemy by disambiguating different word senses. I haven't tried to use it in that way: I think it would complicate my explanation, in order to project an impression of accuracy and precision that I don't particularly want to project. Instead, I have stressed that word lists are crude tools, and I'm using them only as crude approximations.That said, how do I do it?To start with, we'll load an array of modules. Some standard, some utilities that I've written myself.# some standard modules import csv, os, sys from collections import Counter import numpy as np from scipy.stats import pearsonr # now a module that I wrote myself, located # a few directories up, in the software # library for this repository sys.path.append('../../lib') import FileCabinet as filecabLoading the General Inquirer.This takes some doing, because the General Inquirer doesn't start out as a set of wordlists. I have to translate it into that form.I start by loading an English dictionary.# start by loading the dictionary dictionary = set() with open('../../lexicons/MainDictionary.txt', encoding = 'utf-8') as f: reader = csv.reader(f, delimiter = '\t') for row in reader: word = row[0] count = int(row[2]) if count < 10000: continue # that ignores very rare words # we end up with about 42,700 common ones else: dictionary.add(word)The next stage is to translate the Inquirer. It begins as a table where word senses are row labels, and the Inquirer categories are columns (except for two columns at the beginning and two at the end). This is, by the way, the "basic spreadsheet" described at this site:http://www.wjh.harvard.edu/~inquirer/spreadsheet_guide.htmI translate this into a dictionary where the keys are Inquirer categories, and the values are sets of words associated with each category.But to do that, I have to do some filtering and expanding. Different senses of a word are broken out in the spreadsheet thus:ABOUT1ABOUT2ABOUT3etc.I need to separate the hashtag part. Also, because I don't want to allow rare senses of a word too much power, I ignore everything but the first sense of a word.However, I also want to allow singular verb forms and plural nouns to count. So there's some code below that expands words by adding -s -ed, etc to the end. See the *suffixes* dictionary defined below for more details.inquirer = dict() suffixes = dict() suffixes['verb'] = ['s', 'es', 'ed', 'd', 'ing'] suffixes['noun'] = ['s', 'es'] allinquirerwords = set() with open('../../lexicons/inquirerbasic.csv', encoding = 'utf-8') as f: reader = csv.DictReader(f) fields = reader.fieldnames[2:-2] for field in fields: inquirer[field] = set() for row in reader: term = row['Entry'] if '#' in term: parts = term.split('#') word = parts[0].lower() sense = int(parts[1].strip('_ ')) partialsense = True else: word = term.lower() sense = 0 partialsense = False if sense > 1: continue # we're ignoring uncommon senses pos = row['Othtags'] if 'Noun' in pos: pos = 'noun' elif 'SUPV' in pos: pos = 'verb' forms = {word} if pos == 'noun' or pos == 'verb': for suffix in suffixes[pos]: if word + suffix in dictionary: forms.add(word + suffix) if pos == 'verb' and word.rstrip('e') + suffix in dictionary: forms.add(word.rstrip('e') + suffix) for form in forms: for field in fields: if len(row[field]) > 1: inquirer[field].add(form) allinquirerwords.add(form) print('Inquirer loaded') print('Total of ' + str(len(allinquirerwords)) + " words.")Inquirer loaded Total of 13707 words.Load model predictions about volumesThe next step is to create some vectors that store predictions about volumes. In this case, these are predictions about the probability that a volume is fiction, rather than biography.# the folder where wordcounts will live # we're only going to load predictions # that correspond to files located there sourcedir = '../sourcefiles/' docs = [] logistic = [] with open('../plotdata/the900.csv', encoding = 'utf-8') as f: reader = csv.DictReader(f) for row in reader: genre = row['realclass'] docid = row['volid'] if not os.path.exists(sourcedir + docid + '.tsv'): continue docs.append(row['volid']) logistic.append(float(row['logistic'])) logistic = np.array(logistic) numdocs = len(docs) assert numdocs == len(logistic) print("We have information about " + str(numdocs) + " volumes.")We have information about 890 volumes.And get the wordcounts themselvesThis cell of the notebook is very short (one line), but it takes a lot of time to execute. There's a lot of file i/o that happens inside the function get_wordcounts, in the FileCabinet module, which is invoked here. We come away with a dictionary of wordcounts, keyed in the first instance by volume ID.wordcounts = filecab.get_wordcounts(sourcedir, '.tsv', docs)Now calculate the representation of each Inquirer category in each docWe normalize by the total wordcount for a volume.This cell also takes a long time to run. I've added a counter so you have some confidence that it's still running.# Initialize empty category vectors categories = dict() for field in fields: categories[field] = np.zeros(numdocs) # Now fill them for i, doc in enumerate(docs): ctcat = Counter() allcats = 0 for word, count in wordcounts[doc].items(): if word in dictionary: allcats += count if word not in allinquirerwords: continue for field in fields: if word in inquirer[field]: ctcat[field] += count for field in fields: categories[field][i] = ctcat[field] / (allcats + 0.1) # Laplacian smoothing there to avoid div by zero, among other things. if i % 100 == 1: print(i, allcats)1 91011 101 84002 201 16285 301 56847 401 51395 501 185568 601 93254 701 84775 801 85951Calculate correlationsNow that we have all the information, calculating correlations is easy. We iterate through Inquirer categories, in each case calculating the correlation between a vector of model predictions for docs, and a vector of category-frequencies for docs.logresults = [] for inq_category in fields: l = pearsonr(logistic, categories[inq_category])[0] logresults.append((l, inq_category)) logresults.sort()Load expanded names of Inquirer categoriesThe terms used in the inquirer spreadsheet are not very transparent. ```DAV``` for instance is "descriptive action verbs." ```BodyPt``` is "body parts." To make these more transparent, I have provided expanded names for many categories that turned out to be relevant in the book, trying to base my description on the accounts provided here: http://www.wjh.harvard.edu/~inquirer/homecat.htmWe load these into a dictionary.short2long = dict() with open('../../lexicons/long_inquirer_names.csv', encoding = 'utf-8') as f: reader = csv.DictReader(f) for row in reader: short2long[row['short_name']] = row['long_name']Print resultsI print the top 12 correlations and the bottom 12, skipping categories that are drawn from the "Laswell value dictionary." The Laswell categories are very finely discriminated (things like "enlightenment gain" or "power loss"), and I have little faith that they're meaningful. I especially doubt that they could remain meaningful when the Inquirer is used crudely as a source of wordlists.print('Printing the correlations of General Inquirer categories') print('with the predicted probabilities of being fiction in allsubset2.csv:') print() print('First, top positive correlations: ') print() for prob, n in reversed(logresults[-12 : ]): if n in short2long: n = short2long[n] if 'Laswell' in n: continue else: print(str(prob) + '\t' + n) print() print('Now, negative correlations: ') print() for prob, n in logresults[0 : 12]: if n in short2long: n = short2long[n] if 'Laswell' in n: continue else: print(str(prob) + '\t' + n)Printing the correlations of General Inquirer categories with the predicted probabilities of being fiction in allsubset2.csv: First, top positive correlations: 0.814883672084 action verbs 0.723336865012 body parts 0.719677253657 verbs of sensory perception 0.683865798179 verbs of dialogue 0.683177448649 physical adjectives 0.64713747568 second-person pronouns (likely in dialogue) 0.622209843367 weakness 0.618004178737 interjections and exclamations 0.615809862443 Work 0.598951530674 Stay 0.596355158769 understatement and qualification Now, negative correlations: -0.740594049611 political terms -0.729728271214 organized systems of belief or knowledge -0.725883030105 abstract means -0.692310519992 also power -0.685490522417 power -0.674993375953 economic terms -0.669967392984 political terms -0.665847187129 human collectivities -0.599582771882 ABSText Classification and Word EmbeddingIn this set of notes, we'll discuss the problem of *text classification*. Text classification is a common problem in which we aim to classify pieces of text into different categories. These categories might be about:- **Subject matter**: is this news article about news, fashion, finance?- **Emotional valence**: is this tweet happy or sad? Excited or calm? This particular class of questions is so important that it has its own name: *sentiment analysis.* - **Automated content moderation**: is this Facebook comment a possible instance of abuse or harassment? Is this Reddit thread promoting violence? Is this email spam? These are all very different kinds of questions, but many of the same techniques can be used. In these notes, we'll do a simple example of subject matter classification. In future notes, we'll also do some sentiment analysis. Bias in Natural Language ProcessingLike all other machine learning algorithms, natural language algorithms naturally inherit the biases of both the data on which they are trained and the choices made by researchers in training the algorithms. A sub-theme of this set of lectures is the need to carefully check our model outputs so that we can understand the impact of each of these. Optional Review- [Term-document matrices](https://nbviewer.jupyter.org/github/PhilChodrow/PIC16A/blob/master/content/NLP/NLP_1.ipynb). - [Sentiment analysis](https://nbviewer.jupyter.org/github/PhilChodrow/PIC16A/blob/master/content/NLP/NLP_3.ipynb). Related Resources- This set of lecture notes is partially based on this [official tutorial](https://www.tensorflow.org/tutorials/keras/text_classification). Heads UpTo run the code in this notebook, you will actually need to **update TensorFlow**. To do this, open up a terminal and type the following two lines: ```bashconda activate PIC16Bpip install tensorflow==2.4```import numpy as np import pandas as pd import tensorflow as tf import re import string from tensorflow.keras import layers from tensorflow.keras import losses # requires update to tensorflow 2.4 # >>> conda activate PIC16B # >>> pip install tensorflow==2.4 from tensorflow.keras.layers.experimental.preprocessing import TextVectorization from tensorflow.keras.layers.experimental.preprocessing import StringLookup from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder # for embedding viz import plotly.express as px import plotly.io as pio pio.templates.default = "plotly_white"For this example, we are going to use a data set containing headlines from a large number of different news articles on the website [HuffPost](https://www.huffpost.com/). I retrieved this data [from Kaggle](https://www.kaggle.com/rmisra/news-category-dataset).url = "https://raw.githubusercontent.com/PhilChodrow/PIC16B/master/datasets/news/News_Category_Dataset_v2.json" df = pd.read_json(url, lines=True) df = df[["category", "headline"]]There are over 200,000 headlines listed here, along with the category in which they appeared on the website.df.head()Our task will be to teach an algorithm to classify headlines by predicting the category based on the text of the headline. Training a model on this much text data can require a lot of time, so we are going to simplify the problem a little bit, by reducing the number of categories. Let's take a look at which categories we have:df.groupby("category").size()Some of these categories are a little odd:- "Women"? - "Weird News"? - What's the difference between "Style," "Style & Beauty," and "Taste"? ). - "Parenting" vs. "Parents"? - Etc?...Well, there are definitely some questions here! Let's just choose a few categories, and discard the rest:categories = ["STYLE", "SCIENCE", "TECH"] df = df[df["category"].apply(lambda x: x in categories)] df.head()Next, we'll use a `LabelEncoder` to transform the `category` column into integers. **Note**: I couldn't find a way that I was satisfied with to do this in TensorFlow, but if you know a smooth way, let me know!le = LabelEncoder() df["category"] = le.fit_transform(df["category"]) df.head()Later, we'll be able to remember which integers correspond to which classes using the `classes_` attribute of the encoder.le.classes_We're left with a much smaller number of rows, which will be much easier to work with. So Far.......we have accessed our data, examined the categories available, and taken a subset of the data corresponding to just three categories. TensorFlow DatasetsNext, we are going create a TensorFlow `Dataset` from our data frame. While we often talk colloquially about "data sets", TensorFlow has a special `Dataset` class with a number of convenient capabilities. Use of `Dataset`s is generally optional, but can make it significantly easier to stay organized when writing data pipelines. The `Dataset` class also includes functionality for a wide-variety of data input scenarios, including situations in which the data should be read in chunks-at-a-time from disk. The `Dataset` class is useful for all kinds of problems, not just text classification problems. Learn more about it [here](https://www.tensorflow.org/guide/data).We'll make a dataset with the predictor data (the headline) and target data (the category) separated out.data = tf.data.Dataset.from_tensor_slices((df["headline"], df["category"])) for headline, category in data.take(5): print(headline) print(category) print("")tf.Tensor(b'Facebook Accused Of Reading Texts And Accessing Microphones In Lawsuit', shape=(), dtype=string) tf.Tensor(2, shape=(), dtype=int64) tf.Tensor(b'Self-Driving Uber In Fatal Accident Had 6 Seconds To React Before Crash', shape=(), dtype=string) tf.Tensor(2, shape=(), dtype=int64) tf.Tensor(b'Scientists Turn To DNA Technology To Search For Loch Ness Monster', shape=(), dtype=string) tf.Tensor(0, shape=(), dtype=int64) tf.Tensor(b"Instagram Is Adding A 'Mute' Button For The Sake Of Your Sanity", shape=(), dtype=string) tf.Tensor(2, shape=(), dtype=int64) tf.Tensor(b'Unusual Asteroid Could Be An Interstellar Guest To Our Solar System', shape=(), dtype=string) tf.Tensor(0, shape=(), dtype=int64)Now we'll perform a train-test split. We'll also take out a small validation set.data = data.shuffle(buffer_size = len(data)) train_size = int(0.7*len(data)) val_size = int(0.1*len(data)) train = data.take(train_size) val = data.skip(train_size).take(val_size) test = data.skip(train_size + val_size) len(train), len(val), len(test)So far.......we have created a special TensorFlow `Dataset` and split it into training, validation, and testing sets. Standardization and Vectorization*Standardization* refers to the act of taking a some text that's "messy" in some way and making it less messy. Common standardizations include: - Removing capitals. - Removing punctuation. - Removing HTML elements or other non-semantic content. In this standardization, we convert all text to lowercase and remove punctuation.def standardization(input_data): lowercase = tf.strings.lower(input_data) no_punctuation = tf.strings.regex_replace(lowercase, '[%s]' % re.escape(string.punctuation),'') return no_punctuation*Vectorization* refers to the process of representing text as a vector (array, tensor). There are multiple ways to carry out vectorization. For example, forming a *term-document matrix*, as demonstrated in the optional review lecture notes, is one way to form vectors from text. Here, we'll use a different approach: we'll replace each word by its *frequency rank* in the data. For example, the headline> Poll: Penguins Best Birdmight have representation ```[708, 1567, 89, 632].```This means that "poll" is the 708th most common word in the data set, "penguins" is the 1567 most common word in the data set, and so on. For technical details on how TensorFlow carries out the vectorization, check [the docs](https://www.tensorflow.org/api_docs/python/tf/keras/layers/experimental/preprocessing/TextVectorization). Note that we pass the standardization from above as an argument to the vectorization layer.# only the top distinct words will be tracked max_tokens = 2000 # each headline will be a vector of length 25 sequence_length = 25 vectorize_layer = TextVectorization( standardize=standardization, max_tokens=max_tokens, # only consider this many words output_mode='int', output_sequence_length=sequence_length)We need to *adapt* the vectorization layer to the headlines. In the adaptation process, the vectorization layer learns what words are common in the headlines.headlines = train.map(lambda x, y: x) vectorize_layer.adapt(headlines)Now we're ready to vectorize each of the data sets. To do so, we define a helper function that operates on our Datasets. Note that our Dataset consists of a bunch of tuples of the form (headline, category) for each data observation. Our helper function therefore accepts and returns two variables. **Note**: because we adapted the vectorization layer to the training data, not the validation or testing data, we aren't "cheating" by propagating information from the validation or testing data prior to the training step.def vectorize_headline(text, label): text = tf.expand_dims(text, -1) return vectorize_layer(text), [label] train_vec = train.map(vectorize_headline) val_vec = val.map(vectorize_headline) test_vec = test.map(vectorize_headline)Let's take a look at a vectorized piece of text.list(train_vec.take(2))So far......we have finally prepared our data! We have represented each of our headlines as numerical vectors, which are something that TensorFlow is able to understand. ModelingPhew, that was a lot of data preparation! That's kind of how it is in the world of machine learning: so much of the effort goes into ensuring that your data is correctly formatted and represented. Let's now construct a simple model out of some layers. This model is going to have a few new components. The most interesting of these, which we are going to come back to, is the `Embedding` layer. Because we're going to come back to it, let's give it a name!model = tf.keras.Sequential([ layers.Embedding(max_tokens, output_dim = 3, name="embedding"), layers.Dropout(0.2), layers.GlobalAveragePooling1D(), layers.Dropout(0.2), layers.Dense(len(categories))] ) model.compile(loss=losses.SparseCategoricalCrossentropy(from_logits=True), optimizer='adam', metrics=['accuracy'])Let's go ahead and fit our model.history = model.fit(train_vec, epochs = 20, validation_data = val_vec) from matplotlib import pyplot as plt plt.plot(history.history["accuracy"], label = "training") plt.plot(history.history["val_accuracy"], label = "validation") plt.gca().set(xlabel = "epoch", ylabel = "accuracy") plt.legend()At this point, it would be appropriate to be somewhat disturbed -- can it really be correct that the validation accuracy is *higher* than the training accuracy? That doesn't seem right, does it? The reason this occurs is due to those `Dropout` layers in the model. What these layers do is disable ("drop out") a fixed percentage of the units in each layer, *but only during training.* This turns out to be a good way to reduce the risk of overfitting. Because the units are used during validation and testing, but not during training, it can happen that indeed the validation and testing scores can be higher. Predictions on Unseen DataLet's check our model performance on unseen data.model.evaluate(test_vec)1304/1304 [==============================] - 2s 2ms/step - loss: 0.2014 - accuracy: 0.9202Not bad! We're able to correctly classify the category of a given news headline on HuffPost 90% of the time, at least when we are choosing between the three categories that we selected earlier. So far......we have trained our model and evaluated it on unseen data, obtaining reasonable results. EmbeddingsA *word embedding* refers to a representation of a word in a vector space. Each word is assigned an individual vector. The general aim of a word embedding is to create a representation such that words with related meanings are close to each other in a vector space, while words with different meanings are farther apart. One usually hopes for the *directions* connecting words to be meaningful as well. Here's a nice diagram illustrating some of the general concepts: ![](https://miro.medium.com/max/1838/1*OEmWDt4eztOcm5pr2QbxfA.png)*Image credit: [Towards Data Science](https://towardsdatascience.com/creating-word-embeddings-coding-the-word2vec-algorithm-in-python-using-deep-learning-b337d0ba17a8)*Word embeddings are often produced as intermediate stages in many machine learning algorithms. In fact, we already made one -- it's the `Embedding` layer at the base of our model. Let's take a look at the embedding layer to see how our own model represents words in a vector space. We chose to create a 3-dimensional embedding when constructing our model. This is fine for today, but state-of-the-art embeddings will typically have a much higher number of dimensions. For example, the [Embedding Projector demo](http://projector.tensorflow.org/) supplied by TensorFlow uses a default dimension of 200.weights = model.get_layer('embedding').get_weights()[0] # get the weights from the embedding layer vocab = vectorize_layer.get_vocabulary() # get the vocabulary from our data prep for later weightsThe collection of weights is 3-dimensional. For plotting in 2 dimensions, we have several choices for how to reduce the data to a 2d representation. A very simple and standard approach is our friend, principal component analysis (PCA).from sklearn.decomposition import PCA pca = PCA(n_components=2) weights = pca.fit_transform(weights)Now we'll make a data frame from our results:embedding_df = pd.DataFrame({ 'word' : vocab, 'x0' : weights[:,0], 'x1' : weights[:,1] }) embedding_dfReady to plot! Note that the embedding appear to be "stretched out" in three directions, with one direction corresponding to each of the three categories (tech, style, science).import plotly.express as px fig = px.scatter(embedding_df, x = "x0", y = "x1", size = list(np.ones(len(embedding_df))), size_max = 2, hover_name = "word") fig.show()Cool, we made a word embedding! This embedding seems to have learned some reasonable associations. For example, we see that words like "Mars", "NASA", and "space" are relatively close to each other. So are "Facebook", "Google", and "Apple", as well as "fashion", "dress", and "style." Bias in Language ModelsWhenever we create a machine learning model that might conceivably have impact on the thoughts or actions of human beings, we have a responsibility to understand the limitations and biases of that model. Biases can enter into machine learning models through several routes, including the data used as well as choices made by the modeler along the way. For example, in our case: 1. **Data**: we used data from a popular news source. 2. **Modeler choice**: we only used data corresponding to a certain subset of labels. With these considerations in mind, let's see what kinds of words our model associates with female and male genders.feminine = ["she", "her", "woman"] masculine = ["he", "him", "man"] highlight_1 = ["strong", "powerful", "smart", "thinking"] highlight_2 = ["hot", "sexy", "beautiful", "shopping"] def gender_mapper(x): if x in feminine: return 1 elif x in masculine: return 4 elif x in highlight_1: return 3 elif x in highlight_2: return 2 else: return 0 embedding_df["highlight"] = embedding_df["word"].apply(gender_mapper) embedding_df["size"] = np.array(1.0 + 50*(embedding_df["highlight"] > 0)) import plotly.express as px fig = px.scatter(embedding_df, x = "x0", y = "x1", color = "highlight", size = list(embedding_df["size"]), size_max = 10, hover_name = "word") fig.show()0. ref * https://pypi.org/project/keras-self-attention/* https://stackoverflow.com/questions/58356868/how-visualize-attention-lstm-using-keras-self-attention-package 1. basic usageimport keras from keras_self_attention import SeqSelfAttention model = keras.models.Sequential() model.add(keras.layers.Embedding(input_dim=100, output_dim=30, mask_zero=True)) model.add(keras.layers.Bidirectional(keras.layers.LSTM(units=32, return_sequences=True))) model.add(SeqSelfAttention(attention_activation='sigmoid')) model.add(keras.layers.Dense(units=5)) model.compile( optimizer='adam', loss='categorical_crossentropy', metrics=['categorical_accuracy'], ) model.summary() # random data for test import numpy as np x_data = np.random.rand(50,100)#50 samples y_data = np.random.randint(2, size=(50, 100,5)) print(x_data.shape, y_data.shape) model.fit(x_data,y_data)Practical session 1> - > - > - Import libraryimport pandas as pd import numpy as np from scipy.stats import mode import seaborn as sns from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import classification_report, confusion_matrix, accuracy_scoreFunctions#separate train and test train_test_split function' def train_test_split_local(X, y): X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) train_test_split(y, shuffle=False) return X_train, X_test, y_train, y_test def read(file_name, fheader, fuser, ftrial): fsadl1 = pd.read_csv(file_name, sep=' ', header=None) fdata = fsadl1.iloc[:, :243] fdata.columns = fheader fdata=fdata[fdata.columns[np.r_[0:45,50:58,63:71,76:84,89:97,102:133]]] flabels = fsadl1.iloc[:,243] ## Preprocessing data #find and remove rows with all nulls fidx=fdata.index[fdata.isnull().all(1)] #1 is the axis for rows #select data not in idx, that is data that is not all null fdata = fdata[~fdata.index.isin(fidx)] #same for labels flabels = flabels[~flabels.index.isin(fidx)] #see how many there are of each label #what does it mean ? flabels.value_counts() #fill missing values fdata = fdata.fillna(method='ffill',axis=1) fdata['user'] = fuser fdata['trial'] = ftrial return fdata, flabels def windowing(fdata, window_number, window_text, porcentage, flabels, frol): ffiltered_data = fdata[columns].rolling(frol).median() ffiltered_data['MILLISEC'] = fdata.MILLISEC # Windowing and Feature Extraction ffiltered_data['time']=pd.to_datetime(fdata.MILLISEC,unit='ms') ffiltered_data.index=ffiltered_data.time #calculate mean over a 1 second window keep = ffiltered_data.time.dt.microsecond/window_number %porcentage keep = keep - keep.shift() < 0 means = ffiltered_data[columns].rolling(window_text).mean()[keep] means.columns = [str(col) + '_mean' for col in means.columns] variances = ffiltered_data[columns].rolling(window_text).var()[keep] variances.columns = [str(col) + '_var' for col in variances.columns] #talk about apply function flabels.index = ffiltered_data.time mode_labels = flabels.rolling(window_text).apply(lambda x:mode(x)[0])[keep] #all features fall_features = pd.concat([means, variances],axis=1) fall_features['label'] = mode_labels fall_features['user'] = user fall_features['trial'] = trial return fall_features def plot_confusion_matrix(cm, names, title='MATRIZ DE CONFUSIÓN', cmap=plt.cm.Blues): plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(names)) plt.xticks(tick_marks, names, rotation=45) plt.yticks(tick_marks, names) plt.tight_layout() plt.ylabel('Clase real') plt.xlabel('Clase predicha')Execises definitions and clasifiersdef excercise_1 (fall_data, estimators): # Exercise 1 # Random train test split # Labels are the values we want to predict labels = np.array(fall_data['label']) # Remove the labels from the features # axis 1 refers to the columns features = fall_data.drop('label', axis = 1) features = features.drop('user', axis = 1) features = features.drop('trial', axis = 1) # Saving feature names for later use feature_list = list(features.columns) # Convert to numpy array features = np.array(features) X_train, X_test, y_train, y_test = train_test_split_local(features, labels) print('X_train:', X_train.shape) print('X_test:', X_test.shape) print('y_train:', y_train.shape) print('y_test:', y_test.shape) classifier = RandomForestClassifier(n_estimators=estimators, random_state=0) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) labels = [0,1,2,4,5] print("\n _______________________________________________________") print("confusion matrix: \n") print(confusion_matrix(y_test,y_pred)) print("\n _______________________________________________________") print("classification report: \n") print(classification_report(y_test,y_pred)) print("_______________________________________________________") print("accuracy score: " + str(accuracy_score(y_test, y_pred))) plt.figure(figsize=(15,8)) plot_confusion_matrix(confusion_matrix(y_test,y_pred), labels) def excercise_2 (fall_data, estimators): # Excercise 2 # user 1 2 3 train test 4 # Labels are the values we want to predict # Remove the labels from the features # axis 1 refers to the columns user123 = fall_data[fall_data['user'].isin([1, 2, 3])] user4 = fall_data[fall_data['user'].isin([4])] y_train_2 = np.array(user123['label']) y_test_2 = np.array(user4['label']) user123 = user123.drop('label', axis = 1) user123 = user123.drop('user', axis = 1) user123 = user123.drop('trial', axis = 1) user4 = user4.drop('label', axis = 1) user4 = user4.drop('user', axis = 1) user4 = user4.drop('trial', axis = 1) X_train_2 = np.array(user123) X_test_2 = np.array(user4) print('X_train_2:', X_train_2.shape) print('X_test_2:', X_test_2.shape) print('y_train_2:', y_train_2.shape) print('y_test_2:', y_test_2.shape) classifier2 = RandomForestClassifier(n_estimators=estimators, random_state=0) classifier2.fit(X_train_2, y_train_2) y_pred_2 = classifier2.predict(X_test_2) print("\n _______________________________________________________") print("confusion matrix: \n") print(confusion_matrix(y_test_2,y_pred_2)) print("\n _______________________________________________________") print("classification report: \n") print(classification_report(y_test_2,y_pred_2)) print("_______________________________________________________") print("accuracy score: " +str(accuracy_score(y_test_2, y_pred_2))) labels = [0,1,2,4,5] plt.figure(figsize=(15,8)) plot_confusion_matrix(confusion_matrix(y_test_2,y_pred_2), labels) def excercise_3 (fall_data, estimators): # Excercise 3 # 1,2,3 and drill session as training data and trials 4 and 5 as test data. # Labels are the values we want to predict # Remove the labels from the features # axis 1 refers to the columns trial1236 = fall_data[fall_data['trial'].isin([1, 2, 3, 6])] trial45 = fall_data[fall_data['trial'].isin([4, 5])] y_train_3 = np.array(trial1236['label']) y_test_3 = np.array(trial45['label']) trial1236 = trial1236.drop('label', axis = 1) trial1236 = trial1236.drop('user', axis = 1) trial1236 = trial1236.drop('trial', axis = 1) trial45 = trial45.drop('label', axis = 1) trial45 = trial45.drop('user', axis = 1) trial45 = trial45.drop('trial', axis = 1) X_train_3 = np.array(trial1236) X_test_3 = np.array(trial45) print('X_train_3:', X_train_3.shape) print('X_test_3:', X_test_3.shape) print('y_train_3:', y_train_3.shape) print('y_test_3:', y_test_3.shape) classifier = RandomForestClassifier(n_estimators=estimators, random_state=0) classifier.fit(X_train_3, y_train_3) y_pred_3 = classifier.predict(X_test_3) print("\n _______________________________________________________") print("confusion matrix: \n") print(confusion_matrix(y_test_3,y_pred_3)) print("\n _______________________________________________________") print("classification report: \n") print(classification_report(y_test_3,y_pred_3)) print("_______________________________________________________") print("accuracy score: " + str(accuracy_score(y_test_3, y_pred_3))) labels = [0,1,2,4,5] plt.figure(figsize=(15,8)) plot_confusion_matrix(confusion_matrix(y_test_3,y_pred_3), labels)Read datapath = 'dataset/' #enter thepath for the dataset folder header_path = 'header.csv' #enter the path for the header file header=pd.read_csv(header_path,names=['column',''])['column'].values users = range(1,2) trials = range(1,2) all_data_1S = pd.DataFrame() all_data_2S = pd.DataFrame() all_data_5S = pd.DataFrame() all_data_10S = pd.DataFrame() for user in users: for trial in trials: if trial == 6: file_name = path+'S'+str(user)+'-Drill'+'.dat' else: file_name = path+'S'+str(user)+'-ADL'+str(trial)+'.dat' data, labels = read(file_name, header, user, trial) columns = data.columns[~data.columns.isin(['user', 'trial','MILLISEC'])] #we use a window of 11 elements # Filtering using median filter all_features_1S = windowing(data, 1000, '1S', 500, labels, 11) all_data_1S = pd.concat([all_data_1S, all_features_1S]) all_features_2S = windowing(data, 2000, '2S', 1000, labels, 11) all_data_2S = pd.concat([all_data_2S, all_features_2S]) all_features_5S = windowing(data, 5000, '5S', 2500, labels, 11) all_data_5S = pd.concat([all_data_5S, all_features_5S]) all_features_10S = windowing(data, 10000, '10S', 5000, labels, 11) all_data_10S = pd.concat([all_data_10S, all_features_10S])/Users/yure/Documents/UN/Machine Learning/ml/lib/python3.7/site-packages/ipykernel_launcher.py:53: FutureWarning: Currently, 'apply' passes the values as ndarrays to the applied function. In the future, this will change to passing it as Series objects. You need to specify 'raw=True' to keep the current behaviour, and you can pass 'raw=False' to silence this warningExploratory data analysisall_data_1S.head() data.dtypes %matplotlib inline # Correlaton matrix to identify principal sensor sns.heatmap(data.corr()) features_analisys = all_data_1S.filter(['Accelerometer_RKN^_accY_mean', 'Accelerometer_HIP_accY_mean', 'Accelerometer_BACK_accY_mean', 'Accelerometer_RKN__accY_mean', 'InertialMeasurementUnit_BACK_accY_mean', 'InertialMeasurementUnit_BACK_gyroY_mean', 'InertialMeasurementUnit_BACK_magneticY_mean', 'Accelerometer_RKN^_accY_var', 'Accelerometer_HIP_accY_var', 'Accelerometer_BACK_accY_var', 'Accelerometer_RKN__accY_var', 'InertialMeasurementUnit_BACK_accY_var', 'InertialMeasurementUnit_BACK_gyroY_var', 'InertialMeasurementUnit_BACK_magneticY_var' ], axis=1) corr = features_analisys.corr() fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(corr,cmap='coolwarm', vmin=-1, vmax=1) fig.colorbar(cax) ticks = np.arange(0,len(features_analisys.columns),1) ax.set_xticks(ticks) plt.xticks(rotation=90) ax.set_yticks(ticks) ax.set_xticklabels(features_analisys.columns) ax.set_yticklabels(features_analisys.columns) plt.show() features_analisys = all_data_1S.filter(['Accelerometer_RKN^_accY_mean', 'Accelerometer_HIP_accY_mean', 'Accelerometer_BACK_accY_mean', 'InertialMeasurementUnit_BACK_accY_mean', 'Accelerometer_RKN^_accY_var', 'Accelerometer_HIP_accY_var', 'Accelerometer_BACK_accY_var', 'InertialMeasurementUnit_BACK_accY_var' ], axis=1) corr = features_analisys.corr() fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(corr,cmap='coolwarm', vmin=-1, vmax=1) fig.colorbar(cax) ticks = np.arange(0,len(features_analisys.columns),1) ax.set_xticks(ticks) plt.xticks(rotation=90) ax.set_yticks(ticks) ax.set_xticklabels(features_analisys.columns) ax.set_yticklabels(features_analisys.columns) plt.show() all_data_1S_filtered = all_data_1S.filter(['Accelerometer_RKN^_accY_mean', 'Accelerometer_HIP_accY_mean', 'Accelerometer_BACK_accY_mean', 'InertialMeasurementUnit_BACK_accY_mean', 'Accelerometer_RKN^_accY_var', 'Accelerometer_HIP_accY_var', 'Accelerometer_BACK_accY_var', 'InertialMeasurementUnit_BACK_accY_var', 'label', 'user', 'trial' ], axis=1) all_data_2S_filtered = all_data_2S.filter(['Accelerometer_RKN^_accY_mean', 'Accelerometer_HIP_accY_mean', 'Accelerometer_BACK_accY_mean', 'InertialMeasurementUnit_BACK_accY_mean', 'Accelerometer_RKN^_accY_var', 'Accelerometer_HIP_accY_var', 'Accelerometer_BACK_accY_var', 'InertialMeasurementUnit_BACK_accY_var', 'label', 'user', 'trial' ], axis=1) all_data_5S_filtered = all_data_5S.filter(['Accelerometer_RKN^_accY_mean', 'Accelerometer_HIP_accY_mean', 'Accelerometer_BACK_accY_mean', 'InertialMeasurementUnit_BACK_accY_mean', 'Accelerometer_RKN^_accY_var', 'Accelerometer_HIP_accY_var', 'Accelerometer_BACK_accY_var', 'InertialMeasurementUnit_BACK_accY_var', 'label', 'user', 'trial' ], axis=1) all_data_10S_filtered = all_data_10S.filter(['Accelerometer_RKN^_accY_mean', 'Accelerometer_HIP_accY_mean', 'Accelerometer_BACK_accY_mean', 'InertialMeasurementUnit_BACK_accY_mean', 'Accelerometer_RKN^_accY_var', 'Accelerometer_HIP_accY_var', 'Accelerometer_BACK_accY_var', 'InertialMeasurementUnit_BACK_accY_var', 'label', 'user', 'trial' ], axis=1) print("\n _______________________________________________________") print(all_data_1S_filtered.head()) print("\n _______________________________________________________") print(all_data_2S_filtered.head()) print("\n _______________________________________________________") print(all_data_5S_filtered.head()) print("\n _______________________________________________________") print(all_data_10S_filtered.head()) print("\n _______________________________________________________") print(all_data_1S_filtered.label.value_counts()) print("\n _______________________________________________________") print(all_data_2S_filtered.label.value_counts()) print("\n _______________________________________________________") print(all_data_5S_filtered.label.value_counts()) print("\n _______________________________________________________") print(all_data_10S_filtered.label.value_counts()) print("\n _______________________________________________________") print(all_data_1S_filtered.trial.value_counts()) print("\n _______________________________________________________") print(all_data_2S_filtered.trial.value_counts()) print("\n _______________________________________________________") print(all_data_5S_filtered.trial.value_counts()) print("\n _______________________________________________________") print(all_data_10S_filtered.trial.value_counts()) print("\n _______________________________________________________") print(all_data_1S_filtered.user.value_counts()) print("\n _______________________________________________________") print(all_data_2S_filtered.user.value_counts()) print("\n _______________________________________________________") print(all_data_5S_filtered.user.value_counts()) print("\n _______________________________________________________") print(all_data_10S_filtered.user.value_counts()) #separate by class, see feature mean all_data_1S_0 = all_data_1S_filtered[all_data_1S_filtered.label==0] all_data_1S_1 = all_data_1S_filtered[all_data_1S_filtered.label==1] all_data_1S_2 = all_data_1S_filtered[all_data_1S_filtered.label==2] all_data_1S_4 = all_data_1S_filtered[all_data_1S_filtered.label==4] all_data_1S_5 = all_data_1S_filtered[all_data_1S_filtered.label==5] draw_col = 10 sns.distplot(all_data_1S_0.iloc[:,draw_col], hist=False, kde=True, color='red') sns.distplot(all_data_1S_1.iloc[:,draw_col], hist=False, kde=True, color='green') sns.distplot(all_data_1S_2.iloc[:,draw_col], hist=False, kde=True, color='yellow') sns.distplot(all_data_1S_4.iloc[:,draw_col], hist=False, kde=True, color='blue') sns.distplot(all_data_1S_5.iloc[:,draw_col], hist=False, kde=True, color='black') #separate by class, see feature mean all_data_2S_0 = all_data_2S_filtered[all_data_2S_filtered.label==0] all_data_2S_1 = all_data_2S_filtered[all_data_2S_filtered.label==1] all_data_2S_2 = all_data_2S_filtered[all_data_2S_filtered.label==2] all_data_2S_4 = all_data_2S_filtered[all_data_2S_filtered.label==4] all_data_2S_5 = all_data_2S_filtered[all_data_2S_filtered.label==5] draw_col = 10 sns.distplot(all_data_2S_0.iloc[:,draw_col], hist=False, kde=True, color='red') sns.distplot(all_data_2S_1.iloc[:,draw_col], hist=False, kde=True, color='green') sns.distplot(all_data_2S_2.iloc[:,draw_col], hist=False, kde=True, color='yellow') sns.distplot(all_data_2S_4.iloc[:,draw_col], hist=False, kde=True, color='blue') sns.distplot(all_data_2S_5.iloc[:,draw_col], hist=False, kde=True, color='black') #separate by class, see feature mean all_data_5S_0 = all_data_5S_filtered[all_data_5S_filtered.label==0] all_data_5S_1 = all_data_5S_filtered[all_data_5S_filtered.label==1] all_data_5S_2 = all_data_5S_filtered[all_data_5S_filtered.label==2] all_data_5S_4 = all_data_5S_filtered[all_data_5S_filtered.label==4] all_data_5S_5 = all_data_5S_filtered[all_data_5S_filtered.label==5] draw_col = 10 sns.distplot(all_data_5S_0.iloc[:,draw_col], hist=False, kde=True, color='red') sns.distplot(all_data_5S_1.iloc[:,draw_col], hist=False, kde=True, color='green') sns.distplot(all_data_5S_2.iloc[:,draw_col], hist=False, kde=True, color='yellow') sns.distplot(all_data_5S_4.iloc[:,draw_col], hist=False, kde=True, color='blue') sns.distplot(all_data_5S_5.iloc[:,draw_col], hist=False, kde=True, color='black') #separate by class, see feature mean all_data_10S_0 = all_data_10S_filtered[all_data_10S_filtered.label==0] all_data_10S_1 = all_data_10S_filtered[all_data_10S_filtered.label==1] all_data_10S_2 = all_data_10S_filtered[all_data_10S_filtered.label==2] all_data_10S_4 = all_data_10S_filtered[all_data_10S_filtered.label==4] all_data_10S_5 = all_data_10S_filtered[all_data_10S_filtered.label==5] draw_col = 10 sns.distplot(all_data_10S_0.iloc[:,draw_col], hist=False, kde=True, color='red') sns.distplot(all_data_10S_1.iloc[:,draw_col], hist=False, kde=True, color='green') sns.distplot(all_data_10S_2.iloc[:,draw_col], hist=False, kde=True, color='yellow') sns.distplot(all_data_10S_4.iloc[:,draw_col], hist=False, kde=True, color='blue') sns.distplot(all_data_10S_5.iloc[:,draw_col], hist=False, kde=True, color='black')Metricsprint("#############################") excercise_1(all_data_1S_filtered, 50) print("#############################") excercise_1(all_data_2S_filtered, 50) print("#############################") excercise_1(all_data_5S_filtered, 50) print("#############################") excercise_1(all_data_10S_filtered, 50) print("#############################") print("#############################") excercise_2(all_data_1S_filtered, 50) print("#############################") excercise_2(all_data_2S_filtered, 50) print("#############################") excercise_2(all_data_5S_filtered, 50) print("#############################") excercise_2(all_data_10S_filtered, 50) print("#############################") print("#############################") excercise_3(all_data_1S_filtered, 50) print("#############################") excercise_3(all_data_2S_filtered, 50) print("#############################") excercise_3(all_data_5S_filtered, 50) print("#############################") excercise_3(all_data_10S_filtered, 50) print("#############################")############################# X_train_3: (42313, 8) X_test_3: (15635, 8) y_train_3: (42313,) y_test_3: (15635,) _______________________________________________________ confusion matrix: [[1677 934 838 132 16] [ 259 4249 455 27 0] [ 139 865 2496 6 0] [ 49 76 13 2767 4] [ 13 0 0 7 613]] _______________________________________________________ classification report: precision recall f1-score support 0.0 0.78 0.47 0.58 3597 1.0 0.69 0.85 0.76 4990 2.0 0.66 0.71 0.68 3506 4.0 0.94 0.95 0.95 2909 5.0 0.97 0.97 0.97 633 micro avg 0.75 0.75 0.75 15635 macro avg 0.81 0.79 0.79 15635 weighted avg 0.76 0.75 0.75 15635 _______________________________________________________ accuracy score: 0.7548448992644707 ######[...]![image](resources/qgss-header.png) Lab 8: Quantum Chemistry In this lab, you will run through an example of a quantum chemistry problem. You will be exploring the simulation of the molecule LiH in Qiskit. You will need to follow the video associated with this lab.from qiskit.chemistry.drivers import PySCFDriver, UnitsType molecular_coordinates = 'Li 0 0 0; H 1.6 0 0' distance_unit = UnitsType.ANGSTROM basis = 'sto3g' driver = PySCFDriver(molecular_coordinates, unit=distance_unit) molecule = driver.run() h1 = molecule.one_body_integrals print(h1) h2 = molecule.two_body_integrals print(h2) nuclear_repulsion_energy = molecule.nuclear_repulsion_energy print(nuclear_repulsion_energy)0.9922072704751. Creating a Fermionic Operatorfrom qiskit.chemistry import FermionicOperator ferOp = FermionicOperator(h1=h1, h2=h2) num_spin_orbitals = molecule.num_orbitals * 2 num_particles = molecule.num_alpha + molecule.num_beta freeze_list = [0,6] remove_list = [3,8] ferOp_f, energy_shift = ferOp.fermion_mode_freezing(freeze_list) num_spin_orbitals -= len(freeze_list) num_particles -= len(freeze_list) print(ferOp_f.h1) ferOp_fr = ferOp_f.fermion_mode_elimination(remove_list) num_spin_orbitals -= len(remove_list) print(ferOp_fr.h1)[[-0.77258172 0.0485796 0. -0.12679498 0. 0. 0. 0. ] [ 0.0485796 -0.35593954 0. 0.06813315 0. 0. 0. 0. ] [ 0. 0. -0.35297897 0. 0. 0. 0. 0. ] [-0.12679498 0.06813315 0. -0.236054 0. 0. 0. 0. ] [ 0. 0. 0. 0. -0.77258172 0.0485796 0. -0.12679498] [ 0. 0. 0. 0. 0.0485796 -0.35593954 0. 0.06813315] [ 0. 0. 0. 0. 0. 0. -0.35297897 0. ] [ 0. 0. 0. 0. -0.12679498 0.06813315 0. -0.236054 ]]2. Creating a Qubit Operatormap_type = 'parity' qubitOp = ferOp_fr.mapping(map_type=map_type) print(qubitOp) print(qubitOp.print_details()) from qiskit.aqua.operators import Z2Symmetries pauli_symm = Z2Symmetries.find_Z2_symmetries(qubitOp) print(pauli_symm) qubitOp_t = Z2Symmetries.two_qubit_reduction(qubitOp, num_particles) print(num_particles) print(qubitOp_t) print(qubitOp_t.print_details()) from qiskit.aqua.algorithms import NumPyEigensolver ee = NumPyEigensolver(qubitOp_t) result = ee.run() ref = result['eigenvalues'] print(ref + nuclear_repulsion_energy + energy_shift)[-7.88159204+5.85950671e-17j]Creating a Variational Ansatzfrom qiskit.chemistry.components.initial_states import HartreeFock init_state = HartreeFock(num_orbitals=4, num_particles=2, qubit_mapping='jordan_wigner') print(init_state.bitstr) HF_circuit = init_state.construct_circuit('circuit') HF_circuit.decompose().draw(output='mpl') from qiskit.chemistry.components.variational_forms import UCCSD UCCSD_var_form = UCCSD(num_orbitals=4, num_particles=2, qubit_mapping='jordan_wigner', excitation_type='s', method_singles='beta', initial_state=init_state, two_qubit_reduction=False, reps=2) print(UCCSD_var_form.single_excitations) print(UCCSD_var_form.num_parameters) var_circuit = UCCSD_var_form.construct_circuit([1,2]) var_circuit.decompose().draw(output='mpl')Homework - The last symmetry standingpauli_symm = Z2Symmetries.find_Z2_symmetries(qubitOp_t) print(pauli_symm)Z2 symmetries: Symmetries: ZZIZZI Single-Qubit Pauli X: IIIIXI Cliffords: ZZIZZI (0.7071067811865475+0j) IIIIXI (0.7071067811865475+0j) Qubit index: [1] Tapering values: - Possible values: [1], [-1]Run in Google Colab View on GitHub Download notebook Author: [](https://www.linkedin.com/in/tonyyum/) Distribued Hedging SimulationIn the previous notebook we have demonstrated how the discounted hedging cost is close to the option price at $T_0$Now let's run large simulation in parallel using AWS Lambda and see for ourselves how this converges as the number of hedging steps increases. Importsimport boto3 import json import pandas as pd import seaborn as sns import numpy as np import itertools import matplotlib.pyplot as plt from datetime import datetime from ipywidgets import interact, widgets from tqdm.notebook import tqdm, trangeInitialisationlambda_cli = boto3.client('lambda') sqs = boto3.resource('sqs') result_q = sqs.get_queue_by_name(QueueName='compute-result') sns.set_theme()Lambda async call and poll SQSdef submit_job(tasks): requests = {} for task in tasks: res = lambda_cli.invoke_async( FunctionName='hedging_portfolio', InvokeArgs=json.dumps(task) ) requests[res['ResponseMetadata']['RequestId']] = datetime.now() completed_tasks = 0 while completed_tasks != len(tasks): messages = result_q.receive_messages() for message in messages: body = json.loads(message.body) req_id = body['requestContext']['requestId'] request_time = requests.pop(req_id, None) if request_time: yield { 'request_time': request_time, 'requestPayload': body['requestPayload'], 'result': body['responsePayload']['body'], 'received_time': datetime.now() } completed_tasks += 1 # Should only delete message related to the requests # But for this notebook assume there is only one user # and we want to keep the queue clean message.delete()Interesting reading:Provisioned concurrency: https://aws.amazon.com/blogs/aws/new-provisioned-concurrency-for-lambda-functions/Cold start timing: https://mikhail.io/serverless/coldstarts/aws/ Homogeneous Taskslet's run the same 1,000 simulations with 1,000 steps with 15 nodes.Feel free to play around with the above numbers. For example it's interesting to run 15 nodes,then change to 100 nodes You'll see that 15 tasks are faster due to warm start.def create_task(n_steps, n_sims): return { 'bs_kwargs': { 'S0': 100., 'K': 100., 'sigma': 0.1, 'r': 0., 'T': 1., 'mu': 0., 'n_steps': n_steps }, 'sim_kwargs': { 'n_sims': n_sims } }Let's first do a single queryres = next(submit_job([create_task(1000, 1000)])) res['received_time'] - res['request_time']And now do it in parallel with 15 nodestasks = [create_task(1000, 1000)] * 15 results = [] with tqdm(total=len(tasks)) as pbar: for res in submit_job(tasks): results.append(res) pbar.update(1)Timingsdef get_timing_df(results): data = [] for res in results: req_time = res['request_time'].timestamp() rec_time = res['received_time'].timestamp() res = res['result'] timing = res['timing'] calc_start = timing['start'] calc_end = timing['end'] data.append((req_time, calc_start, calc_end, rec_time)) df = pd.DataFrame(data, columns=['req_time', 'calc_start', 'calc_end', 'rec_time']) df['calc_time'] = df.calc_end - df.calc_start df['tot_time'] = df.rec_time - df.req_time df['overhead'] = df.tot_time-df.calc_time return df.sort_values('tot_time') def plot_timing(results): df = get_timing_df(results) plt.figure(figsize=(12,6)) x=np.arange(len(results)) y=[df.calc_time, df.overhead] avg_overhead = df.overhead.mean() avg_tot = df.tot_time.mean() percent_overhead = avg_overhead / avg_tot * 100. plt.title(f'Avg Total Time: {avg_tot:.2f}s, Avg Overhead: {avg_overhead:.2f}s, Percent Overhead: {percent_overhead:.0f}%') plt.stackplot(x,y, labels=['Calc Time', 'Overhead']) plt.legend(loc='upper left') plt.show()So the overhead is only 0.28s. Which is 15% of the total time (Number may vary).The total calculation time was very short (1.85s). If we get each task to do more work,we'd decrease the overhead. But of course, we'd increase wall clock time.plot_timing(results)Slide the slider to pick results from each task@interact(i=widgets.IntSlider(max=len(results)-1)) def f(i): x = results[i]['result']['result']['costs'] ax = sns.histplot(x=x, bins=30, kde=True)Combining the results gives us a smoother distribution.x = np.array(list(itertools.chain.from_iterable(r['result']['result']['costs'] for r in results))) ax = sns.histplot(x=x, bins=30, kde=True)Heterogeneous Tasks We'll try Let's first do it Naively and just run different `n_steps` per task Problem 1, the tasks are too smalltasks = [create_task(i, 1000) for i in [20, 50, 100, 260]] results = [] with tqdm(total=len(tasks)) as pbar: for res in submit_job(tasks): results.append(res) pbar.update(1)As we can see the overhead is massive compare to the calc time. We could bunch those tasks together.plot_timing(results) def map_reduce(results): data = [] for res in results: n_steps = res['requestPayload']['bs_kwargs']['n_steps'] data += [(n_steps, x) for x in res['result']['result']['costs']] return pd.DataFrame(data, columns=['n_steps', 'cost'])Now we have a data, let look at a pretty violine chart to convince ourselves that as the numberof hedging steps increases the closer we on to the option price on any given simulation.df = map_reduce(results) plt.figure(figsize=(12,6)) ax = sns.violinplot(x="n_steps", y="cost", data=df)Problem 2, tasks too largetasks = [create_task(10**i, 1000) for i in range(1, 6)] results = [] with tqdm(total=len(tasks)) as pbar: for res in submit_job(tasks): results.append(res) pbar.update(1)Oops. Not a clever distribution strategy at all. Took 70+ seconds to do 1M steps 1,000 times.The distributed compute is only as fast as the slowest task.plot_timing(results)Better distribution strategy Let's change the strategy by splitting the heavier tasks into samller chunks.task_splits = [(10**i, 1000) for i in range(1, 4)] + [(int(1e5), 50)] * 20 + [(int(1e6), 10)] * 100 tasks = [create_task(*x) for x in task_splits] results = [] with tqdm(total=len(tasks)) as pbar: for res in submit_job(tasks): results.append(res) pbar.update(1) plot_timing(results) data = [] for res in results: n_steps = res['requestPayload']['bs_kwargs']['n_steps'] data += [(n_steps, x) for x in res['result']['result']['costs']] df = pd.DataFrame(data, columns=['n_steps', 'cost']) df = map_reduce(results) plt.figure(figsize=(12,6)) ax = sns.violinplot(x="n_steps", y="cost", data=df) @interact(i=widgets.IntSlider(min=1, max=6)) def f(i): ax = sns.histplot(x=df[df.n_steps == 10**i].cost, bins=30, kde=True)Performance vs Learning Ratedef get_vs_lr_scores(learning_rates_str, top_k): scores = {'loss': list(), 'Recall@1': list(), 'Recall@5': list(), 'Recall@20': list()} lr_list = list() for lr_str in learning_rates_str: dir_path = root / '1' / f'top{top_k}' / lr_str file_name = [x for x in os.listdir(dir_path) if x.startswith('best_checkpoint_')][0] checkpoints = torch.load(dir_path / file_name, 'cpu') for m in scores.keys(): scores[m].append(checkpoints['metrics_scores'][m]) lr_list.append(checkpoints['solver_construct_params_dict']['learning_rate']) scores['learning_rate'] = lr_list return scores def plot_vs_lr(scores, logscale): fig, ax = plt.subplots(2, 2, figsize=(10, 10)) ax[0,0].plot(scores['learning_rate'], scores['loss']) ax[0,0].set_title('loss vs learning rate') ax[0,1].plot(scores['learning_rate'], scores['Recall@1']) ax[0,1].set_title('Recall@1 vs learning rate') ax[1,0].plot(scores['learning_rate'], scores['Recall@5']) ax[1,0].set_title('Recall@5 vs learning rate') ax[1,1].plot(scores['learning_rate'], scores['Recall@20']) ax[1,1].set_title('Recall@20 vs learning rate') if logscale: ax[0,0].set_xscale('log') ax[0,1].set_xscale('log') ax[1,0].set_xscale('log') ax[1,1].set_xscale('log') for a in ax: for b in a: b.set_xlabel('learning rate') ax[0,0].set_ylabel('loss') ax[0,1].set_ylabel('Recall@1') ax[1,0].set_ylabel('Recall@5') ax[1,1].set_ylabel('Recall@20') fig.tight_layout() learning_rates_str = ['1e-3', '1e-4', '1e-5', '1e-6','1e-7'] top_k=20 scores = get_vs_lr_scores(learning_rates_str, top_k) plot_vs_lr(scores, True) learning_rates_str = ['1e-7', '2e-7', '3e-7', '4e-7', '5e-7', '6e-7', '7e-7', '8e-7', '9e-7'] top_k=20 scores = get_vs_lr_scores(learning_rates_str, top_k) plot_vs_lr(scores, False)A Random Point vs Nearest Neighbordef get_vs_arch_scores(model_arch, top_k): scores = {'loss': list(), 'Recall@1': list(), 'Recall@5': list(), 'Recall@20': list()} lr_str = '5e-7' for arch in model_arch: dir_path = root / arch / f'top{top_k}' / lr_str file_name = [x for x in os.listdir(dir_path) if x.startswith('best_checkpoint_')][0] checkpoints = torch.load(dir_path / file_name, 'cpu') for m in scores.keys(): scores[m].append(checkpoints['metrics_scores'][m]) scores['model_arch'] = model_arch return scores top_k = 20 model_arch = ['500-100-20', '500-100-20-20', '5000-1000-200-200'] scores = get_vs_arch_scores(model_arch, top_k) scoresretrieve a model checkpoints = '200-200-100-100-100-50-50-50-25-25-25-20-20' lr = '1e-2' dir_path = Path('../checkpoints') / s / 'top20' / lr file_name = [x for x in os.listdir(dir_path) if x.startswith('best_checkpoint_')][0] print(torch.load(dir_path / file_name, 'cpu')['dev_metrics_scores']){'loss': 2751.59326171875, 'Recall@1': 0.01701701701701702, 'Recall@5': 0.06306306306306306, 'Recall@10': 0.12612612612612611, 'Recall@20': 0.22922922922922923}performance vs number of top_k neighbors considereddef get_vs_arch_scores(top_k_list): scores = {'loss': list(), 'Recall@1': list(), 'Recall@5': list(), 'Recall@20': list()} lr_str = '5e-7' for top_k in top_k_list: dir_path = root / '200-100-50-25-20' / f'top{top_k}' / lr_str file_name = [x for x in os.listdir(dir_path) if x.startswith('best_checkpoint_')][0] checkpoints = torch.load(dir_path / file_name, 'cpu') for m in scores.keys(): scores[m].append(checkpoints['dev_metrics_scores'][m]) scores['top_k'] = top_k_list return scores scores = get_vs_arch_scores([1,3,5,7,9,11,13,15,17,19]) scores plot_subgraph(scores, False, 'top_k')Rainfall - Climate Hazards Group InfraRed Precipitation with Station data (CHIRPS)* **Products used:** [rainfall_chirps_monthly](https://explorer.digitalearth.africa/products/rainfall_chirps_monthly), [rainfall_chirps_daily](https://explorer.digitalearth.africa/products/rainfall_chirps_daily)**Keywords**: :index:`datasets; CHIRPS`, :index:`climate`, :index:`rainfall`, :index:`monthly`BackgroundThis notebook demonstrates how to access and use the **Monthly Climate Hazards Group InfraRed Precipitation with Station data (CHIRPS)** from the DE Africa Open Data Cube. For offical information on this dataset, see [CHIRPS](https://www.chc.ucsb.edu/data/chirps). The abstract from this documentation is copied below: Estimating rainfall variations in space and time is a key aspect of drought early warning and environmental monitoring. An evolving drier-than-normal season must be placed in a historical context so that the severity of rainfall deficits can be quickly evaluated. However, estimates derived from satellite data provide areal averages that suffer from biases due to complex terrain, which often underestimate the intensity of extreme precipitation events. Conversely, precipitation grids produced from station data suffer in more rural regions where there are less rain-gauge stations. CHIRPS was created in collaboration with scientists at the USGS Earth Resources Observation and Science (EROS) Center in order to deliver complete, reliable, up-to-date data sets for a number of early warning objectives, like trend analysis and seasonal drought monitoring.The current CHIRPS datasets that are accessible from DE Africa's platforms are the `CHIRPS-2.0 Africa Monthly` dataset, copied from [here](https://data.chc.ucsb.edu/products/CHIRPS-2.0/africa_monthly/tifs/) and the `CHIRPS-2.0 Africa Daily` dataset, copied from [here](https://data.chc.ucsb.edu/products/CHIRPS-2.0/africa_daily/tifs/). They have been converted to cloud-opitmized geotiffs, and indexed into DE Africa's Open-Data-Cube.**Important specifications:*** Datacube product name: `rainfall_chirps_monthly` * Measurement Type: Monthly Atmospheric Precipitation * Precipitation Units: Total mm/month * Date-range: `1981-01` to present * Spatial resolution: 0.05 degrees, approximately 5.55 km * Datacube product name: `rainfall_chirps_daily` * Measurement Type: Monthly Atmospheric Precipitation * Precipitation Units: Total mm/day * Date-range: `1981-01` to present * Spatial resolution: 0.05 degrees, approximately 5.55 km DescriptionIn this notebook we will load CHIRPS data using `dc.load()` to return a time series of datasets.Topics covered include:1. Inspecting the monthly CHIRPS product and measurements available in the datacube2. Using the native `dc.load()` function to load CHIRPS data3. Facet plotting the CHIRPS datasets4. Conduct a simple analysis workflow: finding the long-term monthly mean rainfall*** Getting startedTo run this analysis, run all the cells in the notebook, starting with the "Load packages" cell. Load packagesImport Python packages that are used for the analysis.%matplotlib inline import datacube import numpy as np from matplotlib import pyplot as plt from deafrica_tools.plotting import display_mapConnect to the datacubedc = datacube.Datacube(app="rainfall_chirps")Analysis parametersThis section defines the analysis parameters, including * `lat, lon, buffer`: center lat/lon and analysis window size for the area of interest* `time_period`: time period to be investigated* `output_crs`: projection for loading data; output resolution is not defined so different resolutions can be used for Landsat and Sentinel-2The default location cover all of Nigerialat, lon = 9.4707, 8.3899 buffer_lat, buffer_lon = 6, 6 time_period = ('2020') output_crs = 'epsg:6933' #join lat,lon,buffer to get bounding box lon_range = (lon - buffer_lon, lon + buffer_lon) lat_range = (lat + buffer_lat, lat - buffer_lat)View the selected locationThe next cell will display the selected area on an interactive map. Feel free to zoom in and out to get a better understanding of the area you'll be analysing. Clicking on any point of the map will reveal the latitude and longitude coordinates of that point.display_map(lon_range, lat_range)Available products and measurements List productsWe can use datacube's `list_products` functionality to inspect the CHIRPS rainfall datasets available in the datacube. The table below shows the product names that we will use to load the data and a brief description of the datadc.list_products().loc[dc.list_products()['name'].str.contains('chirps')]List measurementsWe can further inspect the data available for CHIRPS using datacube's `list_measurements` functionality. The table below lists each of the measurements available in the data.measurements = dc.list_measurements() measurements.loc["rainfall_chirps_monthly"]Load CHIRPS data using `dc.load()`Now that we know what products and measurements are available for the products, we can load data from the datacube using `dc.load`. In the first example below, we will load CHIRPS data for region covering Nigeria> Note: For a more general discussion of how to load data using the datacube, refer to the [Introduction to loading data](../Beginners_guide/03_Loading_data.ipynb) notebook.ds_rf_month = dc.load(product='rainfall_chirps_monthly', time='2020', y = lat_range, x = lon_range, resolution=(-5000, 5000), output_crs=output_crs) print(ds_rf_month) Dimensions: (time: 12, y: 303, x: 232) Coordinates: * time (time) datetime64[ns] 2020-01-16T11:59:59.500000 ... 2020-12... * y (y) float64 1.952e+06 1.948e+06 ... 4.475e+05 4.425e+05 * x (x) float64 2.325e+05 2.375e+05 ... 1.382e+06 1.388e+06 spatial_ref int32 6933 Data variables: rainfall (time, y, x) float32 0.0 0.0 0.0 0.0 ... 79.19 76.53 79.77 Attributes: crs: epsg:6933 grid_mapping: spatial_refPlotting CHIRPS Monthly Rainfall Let's facet plot the time-series to see the total rainfall each month during 2020 over Nigeria.# set -9999 no-data values to NaN ds_rf_month = ds_rf_month.where(ds_rf_month !=-9999.) #facet plot rainfall ds_rf_month['rainfall'].plot.imshow(col='time', col_wrap=6, cmap='YlGnBu', label=False);Loading and plotting daily rainfallIn the above plot we can see that a lot of rain fell in July 2020. We'll load the daily rainfall data for this month, aggregrate it across the region, and plot the daily totals to see how this rainfall was distributed within the month.ds_rf_daily = dc.load(product='rainfall_chirps_daily', time='2020-07', y = lat_range, x = lon_range, resolution=(-5000, 5000), output_crs=output_crs) print(ds_rf_daily) # set -9999 no-data values to NaN ds_rf_daily = ds_rf_daily.where(ds_rf_daily !=-9999.) #find the mean ds_rf_daily_mean = ds_rf_daily.mean(['x', 'y']).drop('spatial_ref').to_dataframe() ds_rf_daily_mean.plot.bar(figsize=(17,4)) plt.title('Daily rainfall July 2020') plt.ylabel('Rainfall (mm/day)') plt.xlabel('Day of month') plt.xticks(np.arange(0,31,1), np.arange(1,32,1));Example application: finding the monthly mean rainfall over a regionThe following section will demonstrate a simple analysis workflow based on CHIRPS rainfall. We will use a 10-year time-series of rainfall over Nigeria to find the long-term monthly mean rainfall total.First we will load the data, the parameters here are the same as the example above only we've increased to time-range from one year to 10 years.ds_rf = dc.load( product="rainfall_chirps_monthly", time=('2010', '2020'), y=lat_range, x=lon_range, resolution=(-5000, 5000), output_crs=output_crs, ) print(ds_rf) Dimensions: (time: 132, y: 303, x: 232) Coordinates: * time (time) datetime64[ns] 2010-01-16T11:59:59.500000 ... 2020-12... * y (y) float64 1.952e+06 1.948e+06 ... 4.475e+05 4.425e+05 * x (x) float64 2.325e+05 2.375e+05 ... 1.382e+06 1.388e+06 spatial_ref int32 6933 Data variables: rainfall (time, y, x) float32 0.0 0.0 0.0 0.0 ... 79.19 76.53 79.77 Attributes: crs: epsg:6933 grid_mapping: spatial_refFind the long-term monthly mean rainfallWe find the mean rainfall across the region (`ds_rf.mean(['x', 'y'])`), then we group all the same months together and find the mean of the all the January's, February's etc. (`groupby('time.month').mean()`). Lastly we convert the result to a pandas dataframe (`.drop('spatial_ref').to_dataframe()`) to faciliate the plotting of a bar-chart# set -9999 no-data values to NaN ds_rf = ds_rf.where(ds_rf !=-9999.) #find the mean ds_rf_mean = ds_rf.mean(['x', 'y']).groupby('time.month').mean().drop('spatial_ref').to_dataframe() ds_rf_mean.head()Plot the resultds_rf_mean.plot.bar(figsize=(17,5)) plt.title('Average monthly rainfall 2010-2020') plt.ylabel('Rainfall (mm/month)');*** Additional information**License:** The code in this notebook is licensed under the [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0). Digital Earth Africa data is licensed under the [Creative Commons by Attribution 4.0](https://creativecommons.org/licenses/by/4.0/) license.**Contact:** If you need assistance, please post a question on the [Open Data Cube Slack channel](http://slack.opendatacube.org/) or on the [GIS Stack Exchange](https://gis.stackexchange.com/questions/ask?tags=open-data-cube) using the `open-data-cube` tag (you can view previously asked questions [here](https://gis.stackexchange.com/questions/tagged/open-data-cube)).If you would like to report an issue with this notebook, you can file one on [Github](https://github.com/digitalearthafrica/deafrica-sandbox-notebooks).**Compatible datacube version:**print(datacube.__version__)1.8.6**Last Tested:**from datetime import datetime datetime.today().strftime('%Y-%m-%d')TOC trends mapHeleen would like some maps illustrating TOC trends - see e-mails received 16/08/2017 at 10.51 and 11.05 for details. As a starting point, I have copied the attachment in Heleen's e-mail from 11:05 and have simplified the column headings.# Read data in_xlsx = (r'C:\Data\James_Work\Staff\Heleen_d_W\ICP_Waters\TOC_Trends_Analysis_2015' r'\Trends_Maps\heleen_toc_trends_data.xlsx') df = pd.read_excel(in_xlsx) df.head() def shiftedColorMap(cmap, start=0, midpoint=0.5, stop=1.0, name='shiftedcmap'): ''' From here: https://stackoverflow.com/questions/7404116/defining-the-midpoint-of-a-colormap-in-matplotlib Function to offset the "center" of a colormap. Useful for data with a negative min and positive max and you want the middle of the colormap's dynamic range to be at zero Input ----- cmap : The matplotlib colormap to be altered start : Offset from lowest point in the colormap's range. Defaults to 0.0 (no lower ofset). Should be between 0.0 and `midpoint`. midpoint : The new center of the colormap. Defaults to 0.5 (no shift). Should be between 0.0 and 1.0. In general, this should be 1 - vmax/(vmax + abs(vmin)) For example if your data range from -15.0 to +5.0 and you want the center of the colormap at 0.0, `midpoint` should be set to 1 - 5/(5 + 15)) or 0.75 stop : Offset from highets point in the colormap's range. Defaults to 1.0 (no upper ofset). Should be between `midpoint` and 1.0. ''' import numpy as np import matplotlib import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import AxesGrid cdict = { 'red': [], 'green': [], 'blue': [], 'alpha': [] } # regular index to compute the colors reg_index = np.linspace(start, stop, 257) # shifted index to match the data shift_index = np.hstack([ np.linspace(0.0, midpoint, 128, endpoint=False), np.linspace(midpoint, 1.0, 129, endpoint=True) ]) for ri, si in zip(reg_index, shift_index): r, g, b, a = cmap(ri) cdict['red'].append((si, r, r)) cdict['green'].append((si, g, g)) cdict['blue'].append((si, b, b)) cdict['alpha'].append((si, a, a)) newcmap = matplotlib.colors.LinearSegmentedColormap(name, cdict) plt.register_cmap(cmap=newcmap) return newcmap1. Absolute trends 1.1. North America# Get max and min slopes for colour scale vmin = df['slp'].min() vmax = df['slp'].max() # Build colourmap for later orig_cmap = matplotlib.cm.coolwarm shifted_cmap = shiftedColorMap(orig_cmap, #start=min_slp, midpoint=(1 - (vmax/(vmax + abs(vmin)))), #stop=max_slp, name='shifted') # Dict for marker styles and region names mark_dict = {'1_Bo_NA':['o', 'Boreal North America'], '2_Temp_NA':['s', 'Temperate North America'], '3_Atl_NA':['^', 'Atlantic North America'], '4_Atl_EUR':['^', 'Atlantic Europe'], '5_Bo_Eur':['o', 'Boreal Europe'], '6_Temp_Eur':['s', 'Temperate Europe']} # Setup map fig = plt.figure(figsize=(12, 8)) ax = fig.add_subplot(111) ax.set_title('North America', fontsize=20) # Use a Lambert Conformal Conic projection m = Basemap(projection='lcc', resolution='i', lon_0=-73.8, lat_0=45, lat_1=40, lat_2=50, width=3E6, height=2E6) m.shadedrelief() m.drawcountries(linewidth=0.5) # Loop over dataets for reg in mark_dict.keys(): for tr in ['increasing', 'decreasing', 'no trend']: # Get data df1 = df.query('(region==@reg) and (trend==@tr)') # Map (long, lat) to (x, y) for plotting x, y = m(df1['lon'].values, df1['lat'].values) if tr == 'no trend': plt.scatter(x, y, c='white', marker=mark_dict[reg][0], s=200, lw=2, label=mark_dict[reg][1]) else: plt.scatter(x, y, c=df1['slp'].values, marker=mark_dict[reg][0], s=200, lw=2, cmap=shifted_cmap, vmin=min_slp, vmax=max_slp, label=mark_dict[reg][1]) # Add colourbar divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="2%", pad=0.05) #plt.colorbar(cax=cax) #plt.legend(loc='lower right', frameon=True, fontsize=14) vmaxHomework 10: Linear Regression **Reading**: * [Linear Regression](https://www.inferentialthinking.com/chapters/15/2/Regression_Line.html)* [Method of Least Squares](https://www.inferentialthinking.com/chapters/15/3/Method_of_Least_Squares.html)* [Least Squares Regression](https://www.inferentialthinking.com/chapters/15/4/Least_Squares_Regression.html) Please complete this notebook by filling in the cells provided. Directly sharing answers is not okay, but discussing problems with the course staff or with other students is encouraged. Refer to the policies page to learn more about how to learn cooperatively.For all problems that you must write our explanations and sentences for, you **must** provide your answer in the designated space. Moreover, throughout this homework and all future ones, please be sure to not re-assign variables throughout the notebook! For example, if you use `max_temperature` in your answer to one question, do not reassign it later on.# Don't change this cell; just run it. import numpy as np from datascience import * # These lines do some fancy plotting magic. import matplotlib %matplotlib inline import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') import warnings warnings.simplefilter('ignore', FutureWarning) import otter grader = otter.Notebook()Exploring the PTEN Gene with Linear Regression 1. PTEN Linear Regression This week's homework is about linear regression. The dataset we'll be using is from the Cancer Cell Line Encyclopedia -- you can read more about this database in this [paper](https://www.nature.com/articles/s41586-019-1186-3) and interact with the data yourself at the online portal [here](https://portals.broadinstitute.org/ccle).The specific dataset we'll be taking a look at is expression data for the PTEN gene in around 1000 cell lines. The PTEN gene is a tumor-suppressing gene, and mutations in the PTEN gene are associated with many types of cancer. A cell line is group of cells that are kept alive and replicate indefinitely in culture (grown in petri dishes, for example).Run the following cell to load the `pten` table. The `pten` table has four columns, a column for the specific `Cell Line`, a column for the `Copy Number`, which is how many times a copy of a portion of the PTEN gene is found in the DNA of that cell line, `mRNA Expression (Affy)`, and `mRNA Expression (RNAseq)`.# Just run this cell pten = Table().read_table("pten.csv") pten.show(5) # Just run this cell pten.hist("Copy Number", bins = np.arange(-1, 1.5, 0.5))**Question 1**Looking at the histogram above, we want to check whether or not `Copy Number` is in standard units. For this question, compute the mean and the standard deviation of the values in `Copy Number` and assign these values to `copy_number_mean` and `copy_number_sd` respectively. After you calculate these values, assign `is_su` to either `True` if you think that `Copy Numbers` is in standard units or `False` if you think otherwise.copy_number = pten.column("Copy Number") copy_number_mean = ... copy_number_sd = ... is_su = ... print(f"Mean: {copy_number_mean}, SD: {copy_number_sd}, Is in standard units?: {is_su}") grader.check("q1_1")**Question 2**Create the function `standard_units` so that it converts the values in the array `arr` to standard units. We'll then use `standard_units` to create a new table, `pten_su`, that converts all the values in the table `pten` to standard units.def standard_units(arr): ... # DON'T DELETE OR MODIFY ANY OF THE LINES OF CODE BELOW IN THIS CELL pten_su = Table().with_columns("Cell Line", pten.column("Cell Line"), "Copy Number SU", standard_units(pten.column("Copy Number")), "mRNA Expression (Affy) SU", standard_units(pten.column("mRNA Expression (Affy)")), "mRNA Expression (RNAseq) SU", standard_units(pten.column("mRNA Expression (RNAseq)")) ) pten_su.show(5) grader.check("q1_2")You should always visually inspect your data, before numerically analyzing any relationships in your dataset. Run the following cell in order to look at the relationship between the variables in our dataset.# Just run this cell pten_su.scatter("Copy Number SU", "mRNA Expression (Affy) SU") pten_su.scatter("Copy Number SU", "mRNA Expression (RNAseq) SU") pten_su.scatter("mRNA Expression (Affy) SU", "mRNA Expression (RNAseq) SU")**Question 3**Which of the following relationships do you think has the highest correlation (i.e. highest absolute value of `r`)? Assign `highest_correlation` to the number corresponding to the relationship you think has the highest correlation.1. Copy Number vs. mRNA Expression (Affy)2. Copy Number vs. mRNA Expression (RNAseq)3. mRNA Expression (Affy) vs. mRNA Expression (RNAseq)highest_correlation = ... grader.check("q1_3")**Question 4**Now, using the `standard units` function, define the function `correlation` which computes the correlation between `arr1` and `arr2`.def correlation(arr1, arr2): ... # This computes the correlation between the different variables in pten copy_affy = correlation(pten.column("Copy Number"), pten.column("mRNA Expression (Affy)")) copy_rnaseq = correlation(pten.column("Copy Number"), pten.column("mRNA Expression (RNAseq)")) affy_rnaseq = correlation(pten.column("mRNA Expression (Affy)"), pten.column("mRNA Expression (RNAseq)")) print(f" \ Copy Number vs. mRNA Expression (Affy) Correlation: {copy_affy}, \n \ Copy Number vs. mRNA Expression (RNAseq) Correlation: {copy_rnaseq}, \n \ mRNA Expression (Affy) vs. mRNA Expression (RNAseq) Correlation: {affy_rnaseq}") grader.check("q1_4")**Question 5**If we switch what we input as arguments to `correlation`, i.e. found the correlation between `mRNA Expression (Affy)` vs. `Copy Number` instead of the other way around, would the correlation change? Assign `correlation_change` to either `True` if you think yes, or `False` if you think no.correlation_change = ... grader.check("q1_5")**Question 6**Looking at both the scatter plots after Question 2 and the correlations computed in Question 4, describe a pattern you see in the relationships between the variables. _Type your answer here, replacing this text._ **Question 7**Let's look at the relationship between mRNA Expression (Affy) vs. mRNA Expression (RNAseq) only. Define a function called `regression_parameters` that returns the parameters of the regression line as a two-item array containing the slope and intercept of the regression line as the first and second elements respectively. The function `regression_line` takes in two arguments, an array of `x` values, and an array of `y` values.def regression_parameters(x, y): ... slope = ... intercept = ... return make_array(slope, intercept) parameters = regression_parameters(pten.column("mRNA Expression (Affy)"), pten.column("mRNA Expression (RNAseq)")) parameters grader.check("q1_7")**Question 8**If we switch what we input as arguments to `regression_parameters`, i.e. found the parameters for the regression line for `mRNA Expression (RNAseq)` vs. `mRNA Expression (Affy)` instead of the other way around, would the regression parameters change (would the slope and/or intercept change)? Assign `parameters_change` to either `True` if you think yes, or `False` if you think no.parameters_change = ... grader.check("q1_8")**Question 9**Now, let's look at how the regression parameters look like in standard units. Use the table `pten_su` and the function `regression_parameters`, and assign `parameters_su` to a two-item array containing the slope and the intercept of the regression line for mRNA Expression (Affy) in standard units vs. mRNA Expression (RNAseq) in standard units.parameters_su = ... parameters_su grader.check("q1_9")**Question 10**Looking at the array `parameters_su`, what do you notice about the slope and intercept values specifically? Relate them to another value we already calculated in a previous question, as well as relate them to an equation. _Type your answer here, replacing this text._ **Question 11**The oldest and most commonly used cell line in Biology is the HeLa cell line, named after , whose cervical cancer cells were taken without her consent in 1951 to create this cell line. The issue of data privacy and consent is very important to data science! You can read more about this topic [here](https://www.hopkinsmedicine.org/henriettalacks/).The HeLa cell line is missing from our dataset. If we know that the HeLa mRNA Expression (Affy) value is 8.2, what is the predicted mRNA Expression (RNAseq) value? Use the values in `parameters` that we derived in Question 1.7, and assign the result to `hela_rnaseq`.hela_rnaseq = ... hela_rnaseq grader.check("q1_11")**Question 12**Compute the predicted mRNA Expression (RNAseq) values from the mRNA Expression (Affy) values in the `pten` table. Use the values in the `parameters` array from Question 1.7, and assign the result to `predicted_rnaseq`. We'll plot your computed regression line with the scatter plot from after question 1.2 of mRNA Expression (Affy) vs. mRNA Expression (RNAseq).predicted_rnaseq = ... # DON'T CHANGE/DELETE ANY OF THE BELOW CODE IN THIS CELL (pten.with_column("Predicted mRNA Expression (RNAseq)", predicted_rnaseq) .select("mRNA Expression (Affy)", "mRNA Expression (RNAseq)", "Predicted mRNA Expression (RNAseq)") .scatter("mRNA Expression (Affy)")) plt.ylabel("mRNA Expression (RNAseq)");Fitting a least-squares regression line Recall that the least-square regression line is the unique straight line that minimizes root mean squared error (RMSE) among all possible fit lines. Using this property, we can find the equation of the regression line by finding the pair of slope and intercept values that minimize root mean squared error. **Question 13**Define a function called `RMSE`. It should take two arguments:1. the slope of a line (a number)2. the intercept of a line (a number).It should return a number that is the root mean squared error (RMSE) for a line defined with the arguments slope and intercept used to predict mRNA Expression (RNAseq) values from mRNA Expression (Affy) values for each row in the `pten` table.*Hint: Errors are defined as the difference between the actual `y` values and the predicted `y` values.**Note: if you need a refresher on RMSE, here's the [link](https://www.inferentialthinking.com/chapters/15/3/Method_of_Least_Squares.htmlRoot-Mean-Squared-Error) from the textbook*def RMSE(slope, intercept): affy = pten.column("mRNA Expression (Affy)") rnaseq = pten.column("mRNA Expression (RNAseq)") predicted_rnaseq = ... ... # DON'T CHANGE THE FOLLOWING LINES BELOW IN THIS CELL rmse_example = RMSE(0.5, 6) rmse_example grader.check("q1_13")**Question 14**What is the RMSE of a line with slope 0 and intercept of the mean of `y` equal to?*Hint 1: The line with slope 0 and intercept of mean of `y` is just a straight horizontal line at the mean of `y`**Hint 2: What does the formula for RMSE become if we input our predicted `y` values in the formula. Try writing it out on paper! It should be a familiar formula.* _Type your answer here, replacing this text._ **Question 15**Find the parameters that minimizes RMSE of the regression line for mRNA Expression (Affy) vs. mRNA Expression (RNAseq). Assign the result to `minimized_parameters`.If you haven't tried to use the `minimize` [function](http://data8.org/sp20/python-reference.html) yet, now is a great time to practice. Here's an [example from the textbook](https://www.inferentialthinking.com/chapters/15/3/Method_of_Least_Squares.htmlnumerical-optimization).*Hint: Use the `RMSE` function in Question 1.13***NOTE: When you use the minimize function, please pass in `smooth=True` as the second argument to this function. You'll need to do this, otherwise, your answer will be incorrect**minimized_parameters = ... minimized_parameters grader.check("q1_15")**Question 16**The slope and intercept pair you found in Question 1.15 should be very similar to the values that you found in Question 1.7. Why were we able to minimize RMSE to find the same slope and intercept from the previous formulas? _Type your answer here, replacing this text._ **Question 17**If we had instead minimized mean squared error (MSE), would we have gotten the same slope and intercept of the minimized root mean squared error (RMSE) results? Assign `same_parameters` to either `True` if you think yes, or `False` if you think no.same_parameters = ... same_parameters grader.check("q1_17")Let's look at the scatter plot of the relationship between mRNA Expression (Affy) and mRNA Expression (RNAseq) again:pten.scatter("mRNA Expression (Affy)", "mRNA Expression (RNAseq)")**Question 18**Using a linear regression model, would we be able to obtain accurate predictions for most of the points? Explain why or why not. _Type your answer here, replacing this text._ 2. Properties of Binary Distributions Binary distributions arise in regular everyday life, and as data scientists you will encounter them constantly. A binary distribution is a distribution across two categories: such as voting in support of a proposition or voting against it on your local ballot, flipping heads or tails, having heart disease or not having heart disease. Generally we represent 'yes' or `True` as 1, and 'no' or `False` as 0. Binary distributions have some special properties that make working with them especially easy! The intent of this section of the homework is to walk you through these properties, so we decided to make all of the tests for this section public (i.e. there are no hidden tests to worry about for this section only). Question 1Let's generate a random binary distribution of 0's and 1's. Assign `binary_options` to the correct array of possible values in a binary distribution (i.e. look at the previous sentence).binary_options = ... # DON'T DELETE/MODIFY ANY OF THE CODE IN THIS CELL BELOW sample_size = 100 binary_sample = np.random.choice(binary_options, sample_size) # Run this to see a histogram of this random distribution. Table().with_columns("Value", make_array(1, 0), "Number in Sample", make_array(sum(binary_sample), sample_size - sum(binary_sample))).barh("Value") grader.check("q2_1")Question 2The first property you should note is that the proportion of ones in a binary distribution is equal to the mean of the distribution. [Think about why this is true](https://www.inferentialthinking.com/chapters/14/1/Properties_of_the_Mean.htmlProportions-are-Means). Complete the following cell to show that this is the case for your `binary_sample`. Assign `number_of_ones` and `number_of_zeros` to the number of `1`'s and the number of `0`'s respectively from your `binary_sample`.number_of_ones = ... number_of_zeros = ... # DON'T DELETE/MODIFY ANY OF THE CODE BELOW IN THIS CELL number_values = len(binary_sample) sum_of_binary_sample = sum(binary_sample) # Remember that the mean is equal to the sum divided by the number of items mean_binary_sample = sum_of_binary_sample / number_values # Don't change this! print(f"In your binary sample there were {number_of_ones} ones and {number_of_zeros} zeros. 1*{number_of_ones} + 0*{number_of_zeros} = {number_of_ones}") print(f"The sum of values in your sample was {sum_of_binary_sample}, divided by the number of items, {number_values}, gives us a mean of {mean_binary_sample}") print(f"The proportion of ones in your sample was {number_of_ones} ones, divided by the number of items, {number_values}, gives us a value of {mean_binary_sample}" ) print('Those values are equal!') grader.check("q2_2")Since the proportion of ones is the same as the mean, the Central Limit Theorem applies! That is, if we resample our sample a lot of times, the distribution of the proportion of ones in our resamples will be roughly normal, with a predictable center and spread!# Just run this cell resampled_proportion_of_ones = make_array() for i in np.arange(5000): resample = Table().with_column("Value", binary_sample).sample() resample_proportion_ones = resample.where("Value", 1).num_rows / resample.num_rows resampled_proportion_of_ones = np.append(resampled_proportion_of_ones, resample_proportion_ones) Table().with_column('Resampled Proportions', resampled_proportion_of_ones).hist()Let's generate a table where each row has a different number of ones and zeros that we'll use for the following parts.# Just run this cell possible_number_ones = np.arange(sample_size + 1) possible_number_zeros = sample_size - possible_number_ones possibilities_table = Table().with_columns("Values of One", possible_number_ones, "Values of Zero", possible_number_zeros) possibilities_table.show(5)Question 3The second important property of binary distributions is that the standard deviation of every binary distribution is equal to:$$\sqrt{\text{proportion_ones} *\text{proportion_zeros}}$$While this property is useful in some cases, a more useful extension of this property is that it tells us that the maximum standard deviation for a binary distribution is 0.5!Let's explore why that is the case!Complete the `binary_std_formula` function below so that it returns the standard deviation of a binary distribution according to the formula above.def binary_std_formula(row): num_ones = row.item("Values of One") num_zeros = row.item("Values of Zero") sum_ones_and_zeros = ... prop_ones = ... prop_zeros = ... ... # DON'T DELETE/MODIFY ANY OF THE LINES BELOW IN THIS CELL possibilities_table = possibilities_table.with_column("Formula SD", possibilities_table.apply(binary_std_formula)) possibilities_table.show(5) grader.check("q2_3")Here's another function that takes in a row object from a table, generates a sample that has the same number of ones and zeros as the row specifies, and then returns the standard deviation of that table. You should be able to understand exactly what this function does! It also does the same thing as above, where we return the standard deviation, but we just use `np.std` for this function.# Just run this cell def binary_std(row): values = make_array() for i in np.arange(row.item("Values of One")): values = np.append(values, 1) for i in np.arange(row.item("Values of Zero")): values = np.append(values, 0) return np.std(values) possibilities_table = possibilities_table.with_column("Empirical SD", possibilities_table.apply(binary_std)) possibilities_table.show(5)All the values are the same! Let's see what this formula means!# Just run this cell possibilities_table.scatter("Values of One", "Formula SD")What a beautiful curve!Looking at that curve, we can see that maximum value is $0.5$, which occurs in the middle of the distribution, when the two categories have equal proportions (proportion of ones = proportion of zeros = $\frac{1}{2}$). (OPTIONAL, NOT IN SCOPE) Logarithmic Plots A kind of visualization you will frequently encounter as a data scientist is a scatter plot or line plot that uses a logarithmic scale. This **Optional** section will cover how to read and generate logarithmic plots. Since this is optional, there is no autograded/free response questions for these sections. Just read, run cells, and explore. What is a logarithm? A logarithm helps us find the inverse of an equation that uses exponentials. Specifically, if$$a^y = x$$Then$$\log_a{x} = y$$The most commonly used $a$, which is known as the base of the logarithm, is $e$, which is equivalent to about 2.718, or 10 (for powers of 10).We can use `numpy` to take logs in Python! By default, np.log uses a base of e.make_array(np.log(np.e), np.log(np.e**2), np.log(100))Back to the visualization: when we are plotting trends that grow exponentially, such as the line$$ y = e^x$$our y-axis needs to have a large range of values, which makes it difficult to understand.Let's see what this looks like:x = np.arange(0, 10, 1/100) y = 10 ** x Table().with_columns("X", x, "Y", y).scatter(0,1)Note that since $10^{10}$ is so big, we can't really see what's happening at all to the y values when they have x values below 8.One solution to this to change our y and/or x axis so that instead of having even spaces between the tick marks, our marks grow by an uneven factor. We do this by making the tick marks go on a logarithmic scale, and we'll then be able to understand our data better!Table().with_columns("X", x, "Y", y).scatter(0,1) plt.yscale("log")Now we can tell what's happening to the y values for every x value!Note how the y values start at $10^0=1$, and increase by a *factor* of $10$ each mark - the next mark is $10^1 = 10$, then $10^2=100$.You still read this plot like a normal plot, so at a value of $x=5, y=10^5=10000$.How do you calculate intermediate values? At a value like $x = 2.5$ it looks like the y value is somewhere in-between $10^1$ and $10^3$. In this graph with a logarithmic scale, you would say that $y=10^{2.5} \approx 316$. When visualizing data about the spread of diseases, you will commonly run into plots with logarithmic scales, such as this example from the New York Times. Make sure to always know what the scales of the data are! Image is from https://www.nytimes.com/2020/03/20/health/coronavirus-data-logarithm-chart.html ---To double-check your work, the cell below will rerun all of the autograder tests.grader.check_all()SubmissionMake sure you have run all cells in your notebook in order before running the cell below, so that all images/graphs appear in the output. The cell below will generate a zip file for you to submit. **Please save before exporting!**# Save your notebook first, then run this cell to export your submission. grader.export(pdf=False)Seperate interface Part to avoid retraining the model if interface was hanged%%time %matplotlib inline import matplotlib.cm import matplotlib.pyplot as plt import numpy as np from medpy.io import load from inference.UNetInferenceAgent import UNetInferenceAgent from utils.volume_stats import Dice3d,Jaccard3d import torch path="/media/ahmed000/Personal/Learning/DeepLearning/projects/AI_for_HC_ND/Third_course/final_project/section1/out/TrainingSet" data = LoadHippocampusData(path, y_shape = 64, z_shape = 64) images_t=data[230:] out_dict = {} out_dict["volume_stats"] = [] dc_list = [] jc_list = [] inference_agent = UNetInferenceAgent("/media/ahmed000/Personal/Learning/DeepLearning/projects/AI_for_HC_ND/Third_course/final_project/section2/out/2020-05-30_1018_Basic_unet/model.pth") with torch.no_grad(): for i, x in enumerate(images_t): pred_label = inference_agent.single_volume_inference(x["image"]) dc = Dice3d(pred_label, x["seg"]) jc = Jaccard3d(pred_label, x["seg"]) dc_list.append(dc) jc_list.append(jc) out_dict["volume_stats"].append({ "filename": x['filename'], "dice": dc, "jaccard": jc }) print(f"{x['filename']} Dice {dc:.4f}. {100*(i+1)/len(images_t):.2f}% complete") out_dict["overall"] = { "mean_dice": np.mean(dc_list), "mean_jaccard": np.mean(jc_list)} print("\nTesting complete.") out_dir= "/media/ahmed000/Personal/Learning/DeepLearning/projects/AI_for_HC_ND/Third_course/final_project/section2/out" with open(os.path.join(out_dir, "results.json"), 'w') as out_file: json.dump(out_dict, out_file, indent=2, separators=(',', ': ')) %matplotlib inline figure,axes=plt.subplots(20,3,figsize=(10,40)) axes=axes.flatten() for ind in range(20): axes[ind*3].imshow(pred_label[ind,0,:,:],cmap='gray') axes[ind*3+1].imshow(pred_label[ind,1,:,:],cmap='gray') axes[ind*3+2].imshow(pred_label[ind,2,:,:],cmap='gray')1. We try K-means methodn_clusters=2 kmeans = KMeans(n_clusters=n_clusters, n_init=100, n_jobs=4) class_pred = kmeans.fit_predict(desc_clean_data) class_pred unique, counts = np.unique(class_pred, return_counts=True) dict(zip(unique, counts))2. We try similarity matrix and spectral clusteringprint(len(desc_clean_data)) print(desc_clean_data.shape) desc_clean_data from tqdm import tqdm, tqdm_notebook from scipy import sparse from sklearn.metrics.pairwise import cosine_similarity from sklearn.cluster import SpectralClustering desc_sparse = sparse.csr_matrix(desc_clean_data) %time sim_matrix = cosine_similarity(desc_sparse, desc_sparse) #sim_matrixExplore sim_matrixnp.median(sim_matrix) np.argwhere(sim_matrix < 0.98)[0:10,:] n_clusters=2 model = SpectralClustering(n_clusters, affinity='precomputed') class_pred = model.fit_predict(sim_matrix) unique, counts = np.unique(class_pred, return_counts=True) dict(zip(unique, counts))Analyze# jpprieto 26/06/2019 # Plot the principal components import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error X_pre = desc_clean_data scaler = StandardScaler() X = scaler.fit_transform(X_pre) pca = PCA() scaler = StandardScaler() X=scaler.fit_transform(X) pca.fit_transform(X) features = range(pca.n_components_) print(pca.n_components_) print(pca.components_) plt.bar(features, pca.explained_variance_ratio_) plt.xticks(features) plt.xlabel("PCA features") plt.ylabel("variance") plt.yscale('log') plt.ylim(1e-4,2e0) plt.xlim(-0.5,15.5) plt.grid() plt.show() plt.plot(np.cumsum(pca.explained_variance_ratio_)) plt.xlabel('number of components') plt.ylabel('cumulative explained variance') plt.xlim(-0.5,15.5) plt.grid() plt.show() pca = PCA(7) # project from 97 to 7 dimensions X_pca = pca.fit_transform(X) print(X.shape) print(X_pca.shape) import numpy as np import sklearn.datasets, sklearn.decomposition mu = np.mean(X, axis=0) pca = sklearn.decomposition.PCA() pca.fit(X) nComp = 7 Xhat = np.dot(pca.transform(X)[:,:nComp], pca.components_[:nComp,:]) Xhat += mu print(Xhat.shape) print(Xhat) pca = PCA() scaler = StandardScaler() Xhat=scaler.fit_transform(Xhat) pca.fit_transform(Xhat) features = range(pca.n_components_) print(pca.n_components_) print(pca.components_) plt.bar(features, pca.explained_variance_ratio_) plt.xticks(features) plt.xlabel("PCA features") plt.ylabel("variance") plt.yscale('log') plt.ylim(1e-4,2e0) plt.xlim(-0.5,15.5) plt.grid() plt.show() plt.plot(np.cumsum(pca.explained_variance_ratio_)) plt.xlabel('number of components') plt.ylabel('cumulative explained variance') plt.xlim(-0.5,15.5) plt.grid() plt.show()Notebook use to check the result of the classifier, how well can you detect the nucleus .You can click `shift` + `enter` to run one cell, you can also click run in top menu.To run all the cells, you can click `kernel` and `Restart and run all` in the top menu.# Some more magic so that the notebook will reload external python modules; # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython %load_ext autoreload %autoreload 2 %reload_ext autoreload %matplotlib inline import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = 8,8 plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' import numpy as np import javabridge import bioformats from itkwidgets import view from sklearn.externals import joblib # Ignore warnings in notebook import warnings warnings.filterwarnings('ignore')The following path should direct to the folder "utils", on Window env it should have slash " / " and not backslash " \ " .# Create a temporary python PATH to the module that we are using for the analysis import sys sys.path.insert(0, "/Users/Espenel/Desktop/Mini-Grant-Image-analysis/2018/Chloe/ChromosomeDetectionChloe/utils") from chromosome_dsb import *Loading a typical image using bioformatsjavabridge.start_vm(class_path=bioformats.JARS)In the path variable you should enter the path to your image of interest:path = '/Users/Espenel/Desktop/Mini-Grant-Image-analysis/2018/Chloe/data_chloe/cku-exo1_002/2017-04-12_RAD51-HTP3_cku80-exo1_002_visit_13_D3D_ALX.dv'in the following cell in "channel" enter the the channel (starting from 0) where you will find the nucleusimg = load_data.load_bioformats(path, channel = 3, no_meta_direct = True) img.shape #view(visualization.convert_view(img))Sliding Window First need to load the classifier (clf) and scaler.clf = joblib.load("/Users/Espenel/Desktop/Mini-Grant-Image-analysis/2018/Chloe/clf_scaler/clf") scaler = joblib.load("/Users/Espenel/Desktop/Mini-Grant-Image-analysis/2018/Chloe/clf_scaler/scaler") import time tp1 = time.time() result = search.rolling_window(img, clf, scaler) tp2 = time.time() print("It took {}sec to find the chromosomes in 1 Zstack".format(int(tp2-tp1)))Optionally you can create a Heat map with the probability at every pixel that there is a nucleus#heat_map = visualization.heatmap(result) #view(visualization.convert_view(heat_map))Max projection and check how the result looks likeproj = np.amax(img, axis=0)When boxes are overlapping, only keep the highest probability one.Here you can adjust `probaThresh` and `overlaThresh`, if you find better parameters, you can change them in the function `batch.batch` in the `chromosome_dsb` folder.box = search.non_max_suppression(result, probaThresh=0.8, overlapThresh=0.3) import matplotlib.patches as patches fig, ax = plt.subplots(1, 1, figsize=(10, 10)) ax.imshow(proj, vmax = 100000) for rec in box: rect = patches.Rectangle((rec[0],rec[1]),70,70,linewidth=3,edgecolor='y',facecolor='none') # Add the patch to the Axes ax.add_patch(rect) plt.axis('off') #plt.savefig('/Users/Espenel/Desktop/Mini-Grant-Image-analysis/2018/Chloe/data_chloe/fig.png', bbox_inches="tight", pad_inches=0)Save the result#path = "/Users/Espenel/Desktop/Mini-Grant-Image-analysis/2018/Chloe/13/" #load_data.save_file(path, "bbox_3D", box, model=False) #load_data.save_file(path, "bbox_3D", binary, model=False)Bayesian Examples in Pythonimport pandas as pd import numpy as np # Visualizations import matplotlib.pyplot as plt import seaborn as sns plt.style.use('fivethirtyeight') plt.rcParams['font.size'] = 22 %matplotlib inline import warnings warnings.filterwarnings('ignore', category=FutureWarning) import pymc3 as pm # Helper functions from utils import draw_pdf_contours, Dirichlet, plot_points, annotate_plot, add_legend, display_probs # observations animals = ['lions', 'tigers', 'bears'] c = np.array([5, 3, 1]) # hyperparameters (initially all equal) alphas = np.array([1, 1, 1])[Calculate Expected Value](https://en.wikipedia.org/wiki/Categorical_distributionBayesian_inference_using_conjugate_prior)display_probs(dict(zip(animals, (alphas + c) / (c.sum() + alphas.sum()))))Species: lions Prevalence: 50.00%. Species: tigers Prevalence: 33.33%. Species: bears Prevalence: 16.67%.Maximum A Posterior EstimationThe maximum a posterior (MAP) estimate is simply going to be the prevalence seen in the data. This is a frequentist way of viewing the world. $${\displaystyle \operatorname {arg\,max} \limits _{\mathbf {p} }p(\mathbf {p} \mid \mathbb {X} )={\frac {\alpha _{i}+c_{i}-1}{\sum _{i}(\alpha _{i}+c_{i}-1)}},\qquad \forall i\;\alpha _{i}+c_{i}>1}$$display_probs(dict(zip(animals, (alphas + c - 1) / sum(alphas + c - 1))))Species: lions Prevalence: 55.56%. Species: tigers Prevalence: 33.33%. Species: bears Prevalence: 11.11%.Bayesian Model## Change n to match total prior observations with pm.Model() as model: # Dirichlet hyperparameters are uniform hyperpriors hyperpriors = pm.Uniform('hyperpriors', shape = 3, observed = alphas) # Probabilities for each species parameters = pm.Dirichlet('parameters', a=hyperpriors, shape=3) # Observed data is a multinomial distribution with 6 trials observed_data = pm.Multinomial( 'observed_data', n=9, p=parameters, shape=3, observed=c) model with model: # Sample from the posterior trace = pm.sample(draws=1000, chains=2, tune=500, discard_tuned_samples=False) summary = pm.summary(trace) summary.index = animals summary # Tuning samples tune_df = pd.DataFrame(trace['parameters'][:1000], columns = animals) tune_df['tune'] = True # Samples after tuning trace_df = pd.DataFrame(trace['parameters'][1000:], columns = animals) trace_df['tune'] = False all_df = pd.concat([tune_df, trace_df]) pvals = trace_df.iloc[:, :3].mean(axis = 0) trace_df.head() trace_df.iloc[:, :3].apply(lambda x: np.percentile(x, 5)).to_frame().rename(columns = {0: '5th percentile'}) ## 95% range of uncertainity (Large amount of uncertinity given the small sampling size) trace_df.iloc[:, :3].apply(lambda x: np.percentile(x, 95)).to_frame().rename(columns = {0: '95th percentile'})Diagonistic Plotsax = pm.plot_posterior(trace, varnames = ['parameters'], figsize = (20, 10), edgecolor = 'k'); plt.rcParams['font.size'] = 22 for i, a in enumerate(animals): ax[i].set_title(a);The traceplot shows a kernel density estimate and all the samples that were drawn on the right. We collapse the chains on th plots (combined = True) but in reality we drew 2 independent chains.prop_cycle = plt.rcParams['axes.prop_cycle'] cs = [x['color'] for x in list(prop_cycle)] ax = pm.traceplot(trace, varnames = ['parameters'], figsize = (20, 10), combined = True,skip_first = 1000); ax[0][0].set_title('KDE of Posteriors'); ax[0][1].set_title('Values in Trace'); add_legend(ax[0][0]) add_legend(ax[0][1])Maximum A Posteriori Result with PyMC3The MAP estimates are exactly the same as the observations. These are also the results that a frequentist would come up with!with model: # Find the maximum a posteriori estimate map_ = pm.find_MAP() display_probs(dict(zip(animals, map_['parameters']))) with model: samples = pm.sample_ppc(trace, samples = 1000) dict(zip(animals, samples['observed_data'].mean(axis = 0))) sample_df = pd.DataFrame(samples['observed_data'], columns = animals) plt.figure(figsize = (22, 8)) for i, animal in enumerate(sample_df): plt.subplot(1, 3, i+1) sample_df[animal].value_counts().sort_index().plot.bar(color = 'r'); plt.xticks(range(7), range(7), rotation = 0); plt.xlabel('Number of Times Seen'); plt.ylabel('Occurences'); plt.title(f'1000 Samples for {animal}');Dirichlet Distributionsdraw_pdf_contours(Dirichlet(alphas)) annotate_plot() draw_pdf_contours(Dirichlet(9*pvals)) annotate_plot()Probability of Next Observationnext_obs = np.random.multinomial(n = 1, pvals = pvals, size = 10000) # Data manipulation next_obs = pd.melt(pd.DataFrame(next_obs, columns = ['Lions', 'Tigers', 'Bears'])).\ groupby('variable')['value'].\ value_counts(normalize=True).to_frame().\ rename(columns = {'value': 'total'}).reset_index() next_obs = next_obs.loc[next_obs['value'] == 1] # Bar plot next_obs.set_index('variable')['total'].plot.bar(figsize = (8, 6)); plt.title('Next Observation Likelihood'); plt.ylabel('Likelihood'); plt.xlabel(''); next_obs all_df = pd.melt(all_df, id_vars=['tune'], var_name='animal', value_name='posterior') plt.rcParams['font.size'] = 20 g = sns.FacetGrid(data = all_df, hue = 'tune', col = 'animal', size = 6) g.map(sns.kdeplot, 'posterior'); l = plt.legend(prop={'size': 20}); l.set_title('Tune'); g.set_ylabels(label = 'Density') summaryЗадача 1def jordan_matrix(X: np.ndarray) -> np.ndarray: def create_block(x: tuple): return np.eye(int(x[1]), k=0) * x[0] + np.eye(int(x[1]), k=1) fir = create_block(X[0]) sec = create_block(X[1]) thi = create_block(X[2]) zero1 = np.zeros((int(X[0][1]), int(X[1][1]))) zero2 = np.zeros((int(X[0][1]), int(X[2][1]))) zero3 = np.zeros((int(X[1][1]), int(X[0][1]))) zero4 = np.zeros((int(X[1][1]), int(X[2][1]))) zero5 = np.zeros((int(X[2][1]), int(X[0][1]))) zero6 = np.zeros((int(X[2][1]), int(X[1][1]))) return np.block([ [fir, zero1, zero2], [zero3, sec, zero4], [zero5, zero6, thi] ])Задача 2def complex_expr(A: np.ndarray, B: np.ndarray) -> np.ndarray: if A.shape[1] != B.shape[0]: return None return np.tan(A) @ np.log(np.abs(B))Задача 3def max_result(X: np.ndarray) -> float: a = X.prod(axis=0).min() b = X.prod(axis=1).min() c = X.min(axis=0).prod() d = X.min(axis=1).prod() return np.max([a,b,c,d]) X = np.array([[ 1, 2, 13], [15, 6, 8], [ 7, 18, 9]]) max_result(X)Задача 4def nearest_value(X: np.ndarray, a: float) -> float: X = X.reshape(-1) return X[(np.abs(X - a)).argmin()] nearest_value(X, 7.2)Задача 5def tensor_mask(X: np.ndarray, mask: np.ndarray) -> np.ndarray: return X ^ mask X = np.array([ [[ 1, 0, 1], [ 1, 1, 1], [ 0, 0, 1]], [[ 1, 1, 1], [ 1, 1, 1], [ 1, 1, 1]] ]) mask = np.array([[1, 1, 0], [1, 1, 0], [1, 1, 0]]) tensor_mask(X, mask)Задача 6def sort_evens(A: np.ndarray) -> np.ndarray: X = A.copy() X[X % 2 ==0] = np.sort(X[X % 2 ==0]) return X A = np.array([43, 66, 34, 55, 78, 105, 2]) sort_evens(A)Задача 7def num_sum(A): summa = lambda x: sum(list(map(int, list(str(x))))) vs = np.vectorize(summa) return vs(A) A = np.array([43, 66, 34, 55, 78, 105, 2]) num_sum(A)Задача 8def replace_nans(X): med = np.nan_to_num(np.nanmedian(X, axis=1))[:, np.newaxis] mask = np.isnan(X) * med return np.nan_to_num(X) + mask replace_nans(X)Forecasting Facebook Prophethttps://facebook.github.io/prophet/https://research.fb.com/blog/2017/02/prophet-forecasting-at-scale/https://peerj.com/preprints/3190.pdfimport pandas as pd import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np %matplotlib inline mpl.rcParams['figure.figsize']=(16,10) pd.set_option('display.max_rows',500) import plotly.graph_objects as go #attention might have problems with holiday package, #downgrate holidays via: pip install 'holidays==0.9.12' from fbprophet import Prophet plt.style.use('fivethirtyeight')Trivial Forecast (rolling mean)df=pd.DataFrame({'X':np.arange(0,10)}) df['y']=df.rolling(3).mean() df df_all=pd.read_csv("../data/processed/COVID_small_flat_table.csv",sep=';') df=df_all[['date','Germany']] df=df.rename(columns={'date':'ds','Germany':'y'}) ax = df.set_index('ds').plot(figsize=(12, 8), logy=True) ax.set_ylabel('Daily Number of confimed cases') ax.set_xlabel('Date') plt.show() my_model=Prophet(growth='logistic') #(interval_width=0.95) df['cap'] = 1000000 #remove this if using intervl/linear fitting my_model.fit(df) future_dates=my_model.make_future_dataframe(periods=50,freq='D') future_dates['cap']=1000000. #needed for logistic model future_dates.tail() forecast=my_model.predict(future_dates) # forecast will contain entire data, in this case df and forecasted data my_model.plot(forecast,uncertainty=True); import plotly.offline as py from fbprophet.plot import plot_plotly fig=plot_plotly(my_model,forecast) # This returns a plotly figure fig.update_layout( width=1024, height=900, xaxis_title="Time", yaxis_title="Confirmed infected people (source johns hopkins csse, log-scale)", ) fig.update_yaxes(type="log",range=[1.1,5.5]) py.iplot(fig) forecast.sort_values(by='ds').head() my_model.plot_components(forecast);#second graph shows the seasonality (weekly behavior) forecast[['ds','trend']].set_index('ds').plot(figsize=(12, 8),logy=True) #linear trend model with changepoint detection #is used in facebook prophet. refer the third link in the start of the paper to read about this. section(3.1.2)Cross Validationfrom fbprophet.diagnostics import cross_validation df_cv=cross_validation(my_model, initial='50 days', # we take the first 30 days for training period='1 days',# every days a new prediction run horizon='7 days')#we predict 7days into the future # the 'initial'(1-50) value is taken for training and predicts the 'horizon' number of days. The the window # moves my one day (2-51) and use it as training and predicts fr next 7 days. my_model.plot(df_cv,uncertainty=True); df_cv.sort_values(by=['cutoff','ds'])[0:14] from fbprophet.diagnostics import performance_metrics df_p=performance_metrics(df_cv) # shows the performance metrics for the horizon number of days df_p from fbprophet.plot import plot_cross_validation_metric fig=plot_cross_validation_metric(df_cv,'mape')Diagonal Plot gives a good understanding for the under and over estimation w.r.t. magnitude Every forecast model should be checked by a diagonal plot. A diagonal plot is used to fid out distortionor bias in underestimating or overestimatinghorizon='7 days' df_cv['horizon']=df_cv.ds-df_cv.cutoff #subtracting the dates date_vec=df_cv[df_cv['horizon']==horizon]['ds'] y_hat=df_cv[df_cv['horizon']==horizon]['yhat'] y=df_cv[df_cv['horizon']==horizon]['y'] df_cv_7=df_cv[df_cv['horizon']==horizon] df_cv_7.head() type(df_cv['horizon'][0]) fig, ax=plt.subplots(1,1) ax.plot(np.arange(max(y)),np.arange(max(y)),'--',label='diagonal') ax.plot(y,y_hat,'-',label=horizon) ax.set_title('Diagonal plot') ax.set_ylim(10,max(y)) ax.set_xlabel('truth:y') ax.set_ylabel('prediction: y_hat') ax.set_yscale('log') ax.set_xlim(10,max(y)) ax.set_xscale('log') ax.legend(loc='best');Trivial Forecastdef mean_absolute_percentage_error(y_true,y_pred): return np.mean(np.abs((y_true-y_pred)/y_true))*100 parse_dates=['date'] df_all = pd.read_csv('../data/processed/COVID_small_flat_table.csv',sep=';',parse_dates=parse_dates) df_trivial=df_all[['date','Germany']] df_trivial=df_trivial.rename(columns={'date': 'ds', 'Germany': 'y'})One of the standard forecast is a rolling mean An other standard forecast is the exponentially-weighted moving average,check pandas.ewmadf_trivial['y_mean_3']=df_trivial['y'].rolling(3).mean() df_trivial['cutoff']=df_trivial['ds'].shift(7) df_trivial['y_hat']=df_trivial['y_mean_3'].shift(7) df_trivial['horizon']=df_trivial['ds']-df_trivial['cutoff'] print('MAPE: '+str(mean_absolute_percentage_error(df_trivial['y_hat'].iloc[12:,], df_trivial['y'].iloc[12:,]))) df_trivialMAPE: 168.0626838435285Experimento numérico \6 - Tema: Solución de bloques cambiando el tamaño del bloque**Responsable:** 0. ObjetivoEste documento pretende mostar los resultados obtenidos con la implementación realizada, en el contexto del se hacen variaciones en el tamaño de los bloques para matrices con número de condición entre $1$ y $10$ del órden de $10^3$.**Nota:** El reporte general de resultados obtenidos con las implementaciones realizadas en este proyecto se puede consultar en el siguiente [vínculo](https://github.com/mno-2020-gh-classroom/ex-modulo-3-comp-matricial-svd-czammar/blob/master/results/Reporte_resultados.ipynb) 1. ConsideracionesAl respecto de los experimentos numéricos realizados para consolidar el presente reporte, tales se basan en las siguientes premisas:[Pendiente: desarrollo]En este sentido, en particular para cada experimento realizado, se reportan según resulte aplicable:* los parámetros empleados en la simulaciones, * las dimensiones de las matrices y vectores involucrados, así como el prodecimiento pseudo-aleatorio que les dio origen, * 1) el tiempo involucrado en correr los experimentos, * 2) número de condición de las matrices pseudo-aleatorias, y * 3) el error relativo obtenido, para la solución de un sistema lineal de la forma $Ax=b$, es decir el cociente$$|| Ax - b ||_2 / || b ||_2$$* 4) La norma de $A$ y de la matriz obtenida al rearmar A tras multiplica las matrices resultantes de la aproximación de la descomposición SVD vía el algoritmo **One-Sided Jacobi** 1.1 Consideraciones sobre la infraestructura empleada**Especificaciones de ambiente común de trabajo**Para realizar el presente experimento numérico se ha empleado la imagen de docker basada en R del curso MNO 2020 (palmoreck/jupyterlab_r_kernel:1.1.0)```docker run --rm -v `pwd`:/datos --name jupyterlab_r_kernel_local -p 8888:8888 -d palmoreck/jupyterlab_r_kernel:1.1.0```*Nota:* el comando "-v \`pwd\`:/datos", permite montar el directorio actual en donde el usuario se encuentre situada en terminal como un volumen de la imagen de docker, dentro del directorio "/datos". 2. Experimento numérico**Objetivo**Modificar el número de condición de la matriz y las dimensiones de los bloques para notar el efecto que tiene sobre el error relativo y el tiempo de ejecución.A tal respecto, se destaca que se realizó este experimentos buscando probar como afecta el tiempo de ejecución el número de bloques seleccionados.**Cargamos codigo desarrollado previamente*** **utils.R:** contiene las funciones auxiliares desarrolladas para el proyecto* **00-load.R:** la implementación del método de eliminación por bloques, empleando la aproximación de la descomposición SVD vía el algoritmo **One-Sided Jacobi**.## Instalamos paquetes rm(list = ls()) paquetes <- c('matrixcalc','pracma') instalar <- function(paquete) { if (!require(paquete,character.only = TRUE, quietly = TRUE, warn.conflicts = FALSE)) { install.packages(as.character(paquete), dependecies = TRUE, repos = "http://cran.us.r-project.org") library(paquete, character.only = TRUE, quietly = TRUE, warn.conflicts = FALSE) } } lapply(paquetes, instalar) ## Cargamos paquetes necesarios library("matrixcalc") library("pracma") #source("metadata.R") source("utils.R") source("00-load.R")2.1 Experimento 6.1Se cambia el número de bloques para diferentes corridas.| | Parámetros | Valor/Rango de valores | Comentarios ||:-:|:--------------------------:|:----------------------:|:-----------:|| 1 | Dimensiones de $A$ | $10^2x10^2$ | || 2 | Dimensión $b$ | $10^2$ | || 3 | Tolerancia | $10^{-8}$ | || 4 | Maxsweep | 5 | || 5 | Metodo pseudo-aleatorio | SVD jacobi | || 6 | Numero de condición de $A$ | 429.54 | Obtenido con función cond |**Codigo:** breve descrición de que hace el código y resultados.set.seed(231) n= 10**2 TOL = 10**-8 A = matrix(sample(-100:100,n*n,replace=TRUE), ncol=n) b = matrix(rnorm(n), ncol=1) inicio<-Sys.time() z<-eliminacion_bloques(A,b,n/2,10^-8,5) print("#####################") print("Error relativo") norm(A%*%z-b,"2")/norm(b,"2") print("#####################") print("Número de condición") cond(A) print("#####################") fin<-Sys.time() fin-inicio[1] "#####################" [1] "Error relativo"**Resultados**Los resultados obtenidos se resumen a través de la siguiente tabla [Pendiente: por favor ajustar tabla según experimento]| | Parámetros | Valor/Rango de valores | Comentarios ||:-:|:----------------------------------------:|:----------------------:|:---------------------------------:|| 1 | Tiempo de ejecución |1.70 mins | || 2 | Error relativo $||Ax-b||/||b||$ |1.47576 | || 3 | Tamaño de bloque |$50$ | |inicio<-Sys.time() z<-eliminacion_bloques(A,b,20,10^-8,5) print("#####################") print("Error relativo") norm(A%*%z-b,"2")/norm(b,"2") print("#####################") print("Número de condición") cond(A) print("#####################") fin<-Sys.time() fin-inicio[1] "#####################" [1] "Error relativo"| | Parámetros | Valor/Rango de valores | Comentarios ||:-:|:----------------------------------------:|:----------------------:|:---------------------------------:|| 1 | Tiempo de ejecución |28.57 seg | || 2 | Error relativo $||Ax-b||/||b||$ |2.3343 | || 3 | Tamaño de bloque |$20$ | |inicio<-Sys.time() z<-eliminacion_bloques(A,b,10,10^-8,5) print("Error relativo") norm(A%*%z-b,"2")/norm(b,"2") print("#####################") print("Número de condición") cond(A) print("#####################") fin<-Sys.time() fin-inicio[1] "#####################" [1] "Error relativo"| | Parámetros | Valor/Rango de valores | Comentarios ||:-:|:----------------------------------------:|:----------------------:|:---------------------------------:|| 1 | Tiempo de ejecución |12.77 seg | || 2 | Error relativo $||Ax-b||/||b||$ |2.12 | || 6 | Tamaño de bloque |$10$ | | 2.2 Experimento 6.2Este experimento se basa en los siguientes parámetros [Pendiente: por favor ajustar tabla según experimento]| | Parámetros | Valor/Rango de valores | Comentarios ||:-:|:--------------------------:|:----------------------:|:-----------:|| 1 | Dimensiones de $A$ | $10x10$ | || 2 | Dimensión $b$ | 10 | || 3 | Tolerancia | $10^{-8}$ | || 4 | Maxsweep | 5 | || 5 | Metodo pseudo-aleatorio | | | 6 | Numero de condición de $A$ | 80.6159 | Obtenido con método cond() |**Codigo:** breve descrición de que hace el código y resultados.set.seed(231) n= 10**1 TOL = 10**-8 lim<-1 A = matrix(runif(n*n,min=-lim,max=lim), ncol=n) b = matrix(runif(n,min=-lim,max=lim), ncol=1) cond(A) inicio<-Sys.time() z<-eliminacion_bloques(A,b,2,10^-8,5) print("Error relativo") norm(A%*%z-b,"2")/norm(b,"2") print("#####################") print("Número de condición") cond(A) print("#####################") fin<-Sys.time() fin-inicio[1] "Error relativo"**Resultados**Los resultados obtenidos se resumen a través de la siguiente tabla [Pendiente: por favor ajustar tabla según experimento] | | Parámetros | Valor/Rango de valores | Comentarios ||:-:|:----------------------------------------:|:----------------------:|:---------------------------------:|| 1 | Tiempo de ejecución |0.05477 seg | || 2 | Error relativo $||Ax-b||/||b||$ |0.7489 | || 6 | Tamaño de bloque |$2$ | |inicio<-Sys.time() z<-eliminacion_bloques(A,b,5,10^-8,5) print("Error relativo") norm(A%*%z-b,"2")/norm(b,"2") print("#####################") print("Número de condición") cond(A) print("#####################") fin<-Sys.time() fin-inicio[1] "Error relativo"| | Parámetros | Valor/Rango de valores | Comentarios ||:-:|:----------------------------------------:|:----------------------:|:---------------------------------:|| 1 | Tiempo de ejecución |.109 seg | || 2 | Error relativo $||Ax-b||/||b||$ |1.0271 | || 6 | Tamaño de bloque |$5$ | |inicio<-Sys.time() z<-eliminacion_bloques(A,b,7,10^-8,5) print("Error relativo") norm(A%*%z-b,"2")/norm(b,"2") print("#####################") print("Número de condición") cond(A) print("#####################") fin<-Sys.time() fin-inicio[1] "Error relativo"| | Parámetros | Valor/Rango de valores | Comentarios ||:-:|:----------------------------------------:|:----------------------:|:---------------------------------:|| 1 | Tiempo de ejecución |0.1003 seg | || 2 | Error relativo $||Ax-b||/||b||$ |0.8922 | || 6 | Tamaño de bloque |$7$ | | 2.3 Experimento 6.3Este experimento se basa en los siguientes parámetros | | Parámetros | Valor/Rango de valores | Comentarios ||:-:|:--------------------------:|:----------------------:|:-----------:|| 1 | Dimensiones de $A$ | $10^3x10^3$ | || 2 | Dimensión $b$ | $10^3$ | || 3 | Tolerancia | $10^{-8}$ | || 4 | Maxsweep | 5 | || 5 | Metodo pseudo-aleatorio | | Obtenido con método XXX || 6 | Numero de condición de $A$ | 470296 | Obtenido con función cond() |**Codigo:** breve descrición de que hace el código y resultados.set.seed(231) n= 10**2 TOL = 10**-8 lim<-10 A = matrix(rnorm(n*n,mean=50,sd=12), ncol=n) b = matrix(runif(n,min=-lim,max=lim), ncol=1) cond(A) inicio<-Sys.time() z<-eliminacion_bloques(A,b,50,10^-8,5) print("Error relativo") norm(A%*%z-b,"2")/norm(b,"2") print("#####################") print("Número de condición") cond(A) print("#####################") fin<-Sys.time() fin-inicio[1] "Error relativo"| | Parámetros | Valor/Rango de valores | Comentarios ||:-:|:----------------------------------------:|:----------------------:|:---------------------------------:|| 1 | Tiempo de ejecución |1.085 min | || 2 | Error relativo $||Ax-b||/||b||$ |1.4911| || 6 | Tamaño de bloque |50 | |inicio<-Sys.time() z<-eliminacion_bloques(A,b,20,10^-8,5) print("Error relativo") norm(A%*%z-b,"2")/norm(b,"2") print("#####################") print("Número de condición") cond(A) print("#####################") fin<-Sys.time() fin-inicio[1] "Error relativo"| | Parámetros | Valor/Rango de valores | Comentarios ||:-:|:----------------------------------------:|:----------------------:|:---------------------------------:|| 1 | Tiempo de ejecución |18.30 seg | || 2 | Error relativo $||Ax-b||/||b||$ |1.2141 | || 6 | Tamaño de bloque |20 | |inicio<-Sys.time() z<-eliminacion_bloques(A,b,10,10^-8,5) print("Error relativo") norm(A%*%z-b,"2")/norm(b,"2") print("#####################") print("Número de condición") cond(A) print("#####################") fin<-Sys.time() fin-inicio[1] "Error relativo"Quality Controlling a generic data object Objective:This notebook shows how to use CoTeDe with a generic dataset. If you want to use CoTeDe in your own dataset, or want to plug CoTeDe in your application, this notebook is for you. How to use CoTeDe to quality control any type of measurement?CoTeDe operates with a minimalist common data model to connect with other applications. To use it from another application all you need to do is to provide your data in that standard. For this example, let's call this dataset object as 'ds'.CoTeDe expects common information for the dataset, like latitude of the profile, to be accessed as: ds.attrs['latitude']or ds.attrs['datetime']While the measurements and auxiliary variables accessed directly like:: ds['temperature'] or ds['depth']With that structure, each test implemented in CoTeDe knows where to search the relevant information. For instance, the test 'at sea' for a profile only needs to know latitude and longitude, which as described above is available at ds.attrsLet's see a real example.# Different version of CoTeDe might give slightly different outputs. # Please let me know if you see something that I should update. import cotede print("CoTeDe version: {}".format(cotede.__version__)) # Importing some requirements from datetime import datetime import numpy as np from numpy import ma from cotede.qc import ProfileQCLet's create a minimalist class that behaves as CoTeDe would like to. It is like a dictionary of relevant variables with a propriety attrs with some metatada.class DummyDataset(object): """Minimalist data object that contains data and attributes """ def __init__(self): """Two dictionaries to store the data and attributes """ self.attrs = {} self.data = {} def __getitem__(self, key): """Return the requested item from the data """ return self.data[key] def keys(self): """Show the available variables in data """ return self.data.keys() @property def attributes(self): """Temporary requirement while Gui is refactoring CoTeDe. This will be soon unecessary """ return self.attrsLet's create an empty data object.mydata = DummyDataset()Let's define some metadata as position and time that the profile was measured.mydata.attrs['datetime'] = datetime(2016,6,4) mydata.attrs['latitude'] = 15 mydata.attrs['longitude'] = -38 print(mydata.attrs){'datetime': datetime.datetime(2016, 6, 4, 0, 0), 'latitude': 15, 'longitude': -38}Now let's create some data. Here I'll use create pressure, temperature, and salinity. I'm using masked array, but it could be a simple array.Here I'm creating these values, but in a real world case we would be reading from a netCDF, an ASCII file, an SQL query, or whatever is your data source.mydata.data['PRES'] = ma.fix_invalid([2, 6, 10, 21, 44, 79, 100, 150, 200, 400, 410, 650, 1000, 2000, 5000]) mydata.data['TEMP'] = ma.fix_invalid([25.32, 25.34, 25.34, 25.31, 24.99, 23.46, 21.85, 17.95, 15.39, 11.08, 6.93, 7.93, 5.71, 3.58, np.nan]) mydata.data['PSAL'] = ma.fix_invalid([36.49, 36.51, 36.52, 36.53, 36.59, 36.76, 36.81, 36.39, 35.98, 35.30, 35.28, 34.93, 34.86, np.nan, np.nan])Let's check the available variablesmydata.keys()Let's check one of the variables, temperature:mydata['TEMP']Now that we have our data and metadata as this object, CoTeDe can do its job. On this example let's evaluate this fictious profile using the EuroGOOS recommended QC test. For that we can use ProfileQC() like:pqced = ProfileQC(mydata, cfg='eurogoos')The returned object (pqced) has the same content of the original mydata. Let's check the variables again,pqced.keys()But now there is a new propriety named 'flags' which is a dictionary with all tests applied and the flag resulted. Those flags are groupped by variables.pqced.flags.keys()Let's see which flags are available for temperature,pqced.flags['TEMP'].keys()Let's check the flags for the test gradient conditional to the depth, as defined by EuroGOOSpqced.flags['TEMP']['gradient_depthconditional']One means that that measurement was approved by this test. Nine means that the data was not available or not valid at that level. And zero means no QC. For the gradient test it is not possible to evaluate the first or the last values (check the manual), so those measurements exist but the flag was zero. The overall flag is a special one that combines all other flags by taking the most critical assessment. If a single test identify a problem and flag as 4 (bad data), the overall flag for that measurement will be 4 even if that measurement passed in all other tests. Therefore, a measurement with flag 1 (good value) means that it was approved in all other tests.pqced.flags['PSAL']['overall']Amazon Personalize Workshop Part 3 - Cleanup the resources> After building your model you may want to delete your campaign, solutions, and datasets.- toc: true- badges: true- comments: true- categories: [amazonpersonalize, movie]- image:# Imports import boto3 import json import numpy as np import pandas as pd import time # Configure the SDK to Personalize: personalize = boto3.client('personalize') personalize_runtime = boto3.client('personalize-runtime')Defining the Things to CleanupUsing the store command we will retrieve all the values needed to cleanup our work.%store -r # Delete the campaign: personalize.delete_campaign(campaignArn=campaign_arn) time.sleep(60) # Delete the solution personalize.delete_solution(solutionArn=solution_arn) time.sleep(60) # Delete the event tracker personalize.delete_event_tracker(eventTrackerArn=event_tracker_arn) time.sleep(60) # Delete the interaction dataset personalize.delete_dataset(datasetArn=dataset_arn) time.sleep(60) # Delete the event dataset event_interactions_dataset_arn = dataset_arn event_interactions_dataset_arn = event_interactions_dataset_arn.replace("INTERACTIONS", "EVENT_INTERACTIONS") personalize.delete_dataset(datasetArn=event_interactions_dataset_arn) time.sleep(60) # Delete the schema personalize.delete_schema(schemaArn=schema_arn)Empty Your S3 BucketNext empty your S3 bucket, you uploaded a movie file to it in the first notebook.boto3.Session().resource('s3').Bucket(bucket).Object(filename).delete()IAM Policy CleanupThe very last step in the notebooks is to remove the policies that were attached to a role and then to delete it. No changes should need to be made here, just execute the cell.# IAM policies should also be removed iam = boto3.client("iam") iam.detach_role_policy(PolicyArn="arn:aws:iam::aws:policy/AmazonS3FullAccess", RoleName=role_name) iam.detach_role_policy(PolicyArn="arn:aws:iam::aws:policy/service-role/AmazonPersonalizeFullAccess",RoleName=role_name) iam.delete_role(RoleName=role_name)Super Resolution with PaddleGAN and OpenVINOThis notebook demonstrates converting the RealSR (real-world super-resolution) model from [PaddlePaddle/PaddleGAN](https://github.com/PaddlePaddle/PaddleGAN) to OpenVINO's Intermediate Representation (IR) format, and shows inference results on both the PaddleGAN and IR models.For more information about the various PaddleGAN superresolution models, see [PaddleGAN's documentation](https://github.com/PaddlePaddle/PaddleGAN/blob/develop/docs/en_US/tutorials/single_image_super_resolution.md). For more information about RealSR, see the [research paper](https://openaccess.thecvf.com/content_CVPRW_2020/papers/w31/Ji_Real-World_Super-Resolution_via_Kernel_Estimation_and_Noise_Injection_CVPRW_2020_paper.pdf) from CVPR 2020.This notebook works best with small images (up to 800x600). Importsimport sys import time import warnings from pathlib import Path import cv2 import matplotlib.pyplot as plt import numpy as np import paddle from IPython.display import HTML, FileLink, ProgressBar, clear_output, display from IPython.display import Image as DisplayImage from PIL import Image from openvino.runtime import Core, PartialShape from paddle.static import InputSpec from ppgan.apps import RealSRPredictor sys.path.append("../utils") from notebook_utils import NotebookAlertSettings# The filenames of the downloaded and converted models MODEL_NAME = "paddlegan_sr" MODEL_DIR = Path("model") OUTPUT_DIR = Path("output") OUTPUT_DIR.mkdir(exist_ok=True) model_path = MODEL_DIR / MODEL_NAME ir_path = model_path.with_suffix(".xml") onnx_path = model_path.with_suffix(".onnx")Inference on PaddlePaddle Model Investigate PaddleGAN ModelThe [PaddleGAN documentation](https://github.com/PaddlePaddle/PaddleGAN) explains to run the model with `sr.run()`. Let's see what that function does, and check other relevant functions that are called from that function. Adding `??` to the methods shows the docstring and source code.# Running this cell will download the model weights if they have not been downloaded before # This may take a while sr = RealSRPredictor() sr.run?? sr.run_image?? sr.norm?? sr.denorm??The run checks whether the input is an image or a video. For an image, it loads the image as an RGB image, normalizes it, and converts it to a Paddle tensor. It is propagated to the network by calling `self.model()` and then "denormalized". The normalization function simply divides all image values by 255. This converts an image with integer values in the range of 0 to 255 to an image with floating point values in the range of 0 to 1. The denormalization function transforms the output from network shape (C,H,W) to image shape (H,W,C). It then clips the image values between 0 and 255, and converts the image to a standard RGB image with integer values in the range of 0 to 255.To get more information about the model, we can check what it looks like with `sr.model??`.# sr.model??Do InferenceTo show inference on the PaddlePaddle model, set PADDLEGAN_INFERENCE to True in the cell below. Performing inference may take some time.# Set PADDLEGAN_INFERENCE to True to show inference on the PaddlePaddle model. # This may take a long time, especially for larger images. # PADDLEGAN_INFERENCE = False if PADDLEGAN_INFERENCE: # load the input image and convert to tensor with input shape IMAGE_PATH = Path("data/coco_tulips.jpg") image = cv2.cvtColor(cv2.imread(str(IMAGE_PATH)), cv2.COLOR_BGR2RGB) input_image = image.transpose(2, 0, 1)[None, :, :, :] / 255 input_tensor = paddle.to_tensor(input_image.astype(np.float32)) if max(image.shape) > 400: NotebookAlert( f"This image has shape {image.shape}. Doing inference will be slow " "and the notebook may stop responding. Set PADDLEGAN_INFERENCE to False " "to skip doing inference on the PaddlePaddle model.", "warning", ) if PADDLEGAN_INFERENCE: # Do inference, and measure how long it takes print(f"Start superresolution inference for {IMAGE_PATH.name} with shape {image.shape}...") start_time = time.perf_counter() sr.model.eval() with paddle.no_grad(): result = sr.model(input_tensor) end_time = time.perf_counter() duration = end_time - start_time result_image = ( (result.numpy().squeeze() * 255).clip(0, 255).astype("uint8").transpose((1, 2, 0)) ) print(f"Superresolution image shape: {result_image.shape}") print(f"Inference duration: {duration:.2f} seconds") plt.imshow(result_image);Convert PaddleGAN Model to ONNX and OpenVINO IRTo convert the PaddlePaddle model to OpenVINO IR, we first convert the model to ONNX, and then convert the ONNX model to the IR format. Convert PaddlePaddle Model to ONNX# Ignore PaddlePaddle warnings: # The behavior of expression A + B has been unified with elementwise_add(X, Y, axis=-1) warnings.filterwarnings("ignore") sr.model.eval() # ONNX export requires an input shape in this format as parameter x_spec = InputSpec([None, 3, 299, 299], "float32", "x") paddle.onnx.export(sr.model, str(model_path), input_spec=[x_spec], opset_version=13)Convert ONNX Model to OpenVINO IR## Uncomment the command below to show Model Optimizer help, which shows the possible arguments for Model Optimizer # ! mo --help if not ir_path.exists(): print("Exporting ONNX model to IR... This may take a few minutes.") ! mo --input_model $onnx_path --input_shape "[1,3,299,299]" --model_name $MODEL_NAME --output_dir "$MODEL_DIR" --data_type "FP16" --log_level "CRITICAL"Do Inference on IR Model# Read network and get input and output names ie = Core() model = ie.read_model(model=ir_path) input_layer = next(iter(model.inputs)) # Load and show image IMAGE_PATH = Path("data/coco_tulips.jpg") image = cv2.cvtColor(cv2.imread(str(IMAGE_PATH)), cv2.COLOR_BGR2RGB) if max(image.shape) > 800: NotebookAlert( f"This image has shape {image.shape}. The notebook works best with images with " "a maximum side of 800x600. Larger images may work well, but inference may " "be slow", "warning", ) plt.imshow(image) # Reshape network to image size model.reshape({input_layer.any_name: PartialShape([1, 3, image.shape[0], image.shape[1]])}) # Load network to the CPU device (this may take a few seconds) compiled_model = ie.compile_model(model=model, device_name="CPU") output_layer = next(iter(compiled_model.outputs)) # Convert image to network input shape and divide pixel values by 255 # See "Investigate PaddleGAN model" section input_image = image.transpose(2, 0, 1)[None, :, :, :] / 255 start_time = time.perf_counter() # Do inference ir_result = compiled_model([input_image])[output_layer] end_time = time.perf_counter() duration = end_time - start_time print(f"Inference duration: {duration:.2f} seconds") # Get result array in CHW format result_array = ir_result.squeeze() # Convert array to image with same method as PaddleGAN: # Multiply by 255, clip values between 0 and 255, convert to HWC INT8 image # See "Investigate PaddleGAN model" section image_super = (result_array * 255).clip(0, 255).astype("uint8").transpose((1, 2, 0)) # Resize image with bicubic upsampling for comparison image_bicubic = cv2.resize(image, tuple(image_super.shape[:2][::-1]), interpolation=cv2.INTER_CUBIC) plt.imshow(image_super)Show Animated GIFTo visualize the difference between the bicubic image and the superresolution image, we create an imated gif that switches between both versions.result_pil = Image.fromarray(image_super) bicubic_pil = Image.fromarray(image_bicubic) gif_image_path = OUTPUT_DIR / Path(IMAGE_PATH.stem + "_comparison.gif") final_image_path = OUTPUT_DIR / Path(IMAGE_PATH.stem + "_super.png") result_pil.save( fp=str(gif_image_path), format="GIF", append_images=[bicubic_pil], save_all=True, duration=1000, loop=0, ) result_pil.save(fp=str(final_image_path), format="png") DisplayImage(open(gif_image_path, "rb").read(), width=1920 // 2)Create Comparison VideoCreate a video with a "slider", showing the bicubic image to the right and the superresolution image on the left. For the video, the superresolution and bicubic image are resized to half the original width and height, to improve processing speed. This gives an indication of the superresolution effect. The video is saved as an .avi video. You can click on the link to download the video, or open it directly from the images directory, and play it locally.FOURCC = cv2.VideoWriter_fourcc(*"MJPG") IMAGE_PATH = Path(IMAGE_PATH) result_video_path = OUTPUT_DIR / Path(f"{IMAGE_PATH.stem}_comparison_paddlegan.avi") video_target_height, video_target_width = ( image_super.shape[0] // 2, image_super.shape[1] // 2, ) out_video = cv2.VideoWriter( str(result_video_path), FOURCC, 90, (video_target_width, video_target_height), ) resized_result_image = cv2.resize(image_super, (video_target_width, video_target_height))[ :, :, (2, 1, 0) ] resized_bicubic_image = cv2.resize(image_bicubic, (video_target_width, video_target_height))[ :, :, (2, 1, 0) ] progress_bar = ProgressBar(total=video_target_width) progress_bar.display() for i in range(2, video_target_width): # Create a frame where the left part (until i pixels width) contains the # superresolution image, and the right part (from i pixels width) contains # the bicubic image comparison_frame = np.hstack( ( resized_result_image[:, :i, :], resized_bicubic_image[:, i:, :], ) ) # create a small black border line between the superresolution # and bicubic part of the image comparison_frame[:, i - 1 : i + 1, :] = 0 out_video.write(comparison_frame) progress_bar.progress = i progress_bar.update() out_video.release() clear_output() video_link = FileLink(result_video_path) video_link.html_link_str = "
%s" display(HTML(f"The video has been saved to {video_link._repr_html_()}"))Introduction to MatplotlibThis notebook provides an introduction to the `matplotlib` package. The content borrows heavily from the book *Data Science Handbook*, which was written by and is available at https://jakevdp.github.io/PythonDataScienceHandbook/ (accessed 12/17/2019). From https://en.wikipedia.org/wiki/Matplotlib (accessed 12/26/2018):> `matplotlib` is a plotting library for the Python programming language and its numerical mathematics extension NumPy. It provides an object-oriented API for embedding plots into applications using general-purpose GUI toolkits like Tkinter, wxPython, Qt, or GTK+.The following table of contents lists the topics discussed in this notebook. Clicking on any topic will advance the notebook to the associated area. Table of Contents 1. [Preliminaries](preliminaries)2. [Simple Line Plots](simple_line_plots)3. [Scatter Plots](scatter_plots)4. [Histograms](histograms)5. [Contour Plots](contour_plots) DisclaimerThis notebook provides a very high-level look at the `matplotlib`, not a comprehensive overview. Also, it is important to realize that the Python language and the available packages will continue to evolve. That being said, the objects, functions, and methods described in this notebook may one day change. If changes occur, areas of this notebook that use deprecated features may cease to work and will need to be revised or omitted.[Back to Table of Contents](Table_of_Contents) Preliminaries `matplotlib` is a large package that offers a lot of features for plotting. However, the large number of features can result in a rather slow learning curve. A particularly confusing aspect of `matplotlib` when just starting to work with the package is that it provides dual interfaces. In particular, it provides 1) an interface that was designed to mimic the mathematical programming software *MATLAB* and 2) object-oriented interface. In this notebook, we will be using the object-oriented interface. Moreover, we will utilie a consistent framework for plotting in an effort to provide a relatively easy model for how to work with the package. Simple Line Plots In this section, we will begin our introduction to `matplotlib` by demonstrating the construction of a simple line plot. The following code block imports `numpy` and uses the packacge to generate an array that includes values $[-10,~ -9.8,~ -9.6,~ \ldots,~ 9.6,~ 9.8,~ 10]$. [Back to Table of Contents](Table_of_Contents)import numpy as np x = np.linspace(-10, 10, 101)In the following code block, we import a particular module of the `matplotlib` package. Specifically, we import the `pyplot` module and denote that we will be using the alias `plt` to refer to the module. This alias is a standard convention.[Back to Table of Contents](Table_of_Contents)import matplotlib.pyplot as pltAs mentioned earlier, we will be using `matplotlib`'s object-oriented interfaces. Essentially, an object-oriented programming approach aims to model programming constructs as a set of objects that can be controlled and manipulated in a modular manner. Instead of going into detail on what makes the object-oriented approach different, we will simply demonstrate it using a consistent framework that allows a use to construct a multi-plot figure.The following code block use the `subplots()` method of the `matplotlib.pyplot` module to create an empty plotting region that contains a single subplot. **Note that the call to the `subplots()` method returns a tuple that we store in the variables `fig` and `ax`. The `fig` variable allows us to access the *figure object*. The `ax` variable store a tuple that we will use to modify individual suplots that compose to the figure object. Calling `matplotlib.pyplot`'s `show()` method renders the current figure.** [Back to Table of Contents](Table_of_Contents)fig, ax = plt.subplots(1, 1, figsize = (6,6)) plt.show()Previously, we simply created and plotted an empty figure object. Inserting additional instructions between the figure creation and the call to `plt.show()` allows us to create visulaizations that range from simple to very complex. In the following code blocks, we will build our simple line plot slowly by adding one feature at a time. The next code block adds a grid to our `ax` object. In this case, we have defined a figure object that is composed by a single subplot. Thus, we can use the `ax` object directly. We will see later that when the figure object is composed of multiple subplots, we can use `numpy`-like indexing to make changes to specifiec subplots.[Back to Table of Contents](Table_of_Contents)fig, ax = plt.subplots(1, 1, figsize = (6,6)) ax.grid(True) plt.show()Next, we use the `plot()` method for our `ax` object to plot the value of the sine function for each value stored in our `x` array object.[Back to Table of Contents](Table_of_Contents)fig, ax = plt.subplots(1, 1, figsize = (6,6)) ax.grid(True) ax.plot(x, np.sin(x)) plt.show()Next, we use the `set_title()`, `set_ylabel`, and `set_xlabel` methods of our `ax` object to define labels for the axes and a title for the subplot.[Back to Table of Contents](Table_of_Contents)fig, ax = plt.subplots(1, 1, figsize = (6,6)) ax.grid(True) ax.plot(x, np.sin(x)) ax.set_title(r'Plot of $sin(x)$', fontsize = 20) ax.set_ylabel(r'$sin(x)$', fontsize = 20) ax.set_xlabel(r'$x$', fontsize = 20) plt.show()Let's suppose that we would like to plot multiple trigonometric functions. We can do this by modifying the previous code so that the figure object is composed of multiple subplots. The following code block shows how we can modfiy the previous code to include two subplots, one for the sine function and another for the cosine function. The first two arguments to the `subplots()` method define the number of rows and columns in the figure, respectively. Thus, our figure object includes one row of two subplots. We may access each of the subplots using syntax similar to that which we would use to access the elements of a one-dimensional Numpy array.[Back to Table of Contents](Table_of_Contents)fig, ax = plt.subplots(1, 2, figsize = (15, 6)) ax[0].grid(True) ax[0].plot(x, np.sin(x)) ax[0].set_title(r'Plot of $sin(x)$', fontsize = 20) ax[0].set_ylabel(r'$sin(x)$', fontsize = 20) ax[0].set_xlabel(r'$x$', fontsize = 20) ax[1].grid(True) ax[1].plot(x, np.cos(x)) ax[1].set_title(r'Plot of $cos(x)$', fontsize = 20) ax[1].set_ylabel(r'$cos(x)$', fontsize = 20) ax[1].set_xlabel(r'$x$', fontsize = 20) plt.show()Let's now consider the case where we want to add another subplot for the tangent function. The following code block shows how we can add such a plot as a subplot on a new row in our figure object. Note that now we have defined a figure object with two rows and two columns. Thus, when accessing individual subplots, we use syntax similar to that which we would use to access elements in a two-dimensional Numpy array object.[Back to Table of Contents](Table_of_Contents)fig, ax = plt.subplots(2, 2, figsize = (15, 12)) ax[0, 0].grid(True) ax[0, 0].plot(x, np.sin(x)) ax[0, 0].set_title(r'Plot of $sin(x)$', fontsize = 20) ax[0, 0].set_ylabel(r'$sin(x)$', fontsize = 20) ax[0, 0].set_xlabel(r'$x$', fontsize = 20) ax[0, 1].grid(True) ax[0, 1].plot(x, np.cos(x)) ax[0, 1].set_title(r'Plot of $cos(x)$', fontsize = 20) ax[0, 1].set_ylabel(r'$cos(x)$', fontsize = 20) ax[0, 1].set_xlabel(r'$x$', fontsize = 20) ax[1, 0].grid(True) ax[1, 0].plot(x, np.tan(x)) ax[1, 0].set_title(r'Plot of $tan(x)$', fontsize = 20) ax[1, 0].set_ylabel(r'$tan(x)$', fontsize = 20) ax[1, 0].set_xlabel(r'$x$', fontsize = 20) plt.show()The following code block use the `axis()` method of the `ax` object to *turn off* the subplot that we are not using.[Back to Table of Contents](Table_of_Contents)fig, ax = plt.subplots(2, 2, figsize = (15, 12)) ax[0, 0].grid(True) ax[0, 0].plot(x, np.sin(x)) ax[0, 0].set_title(r'Plot of $sin(x)$', fontsize = 20) ax[0, 0].set_ylabel(r'$sin(x)$', fontsize = 20) ax[0, 0].set_xlabel(r'$x$', fontsize = 20) ax[0, 1].grid(True) ax[0, 1].plot(x, np.cos(x)) ax[0, 1].set_title(r'Plot of $cos(x)$', fontsize = 20) ax[0, 1].set_ylabel(r'$cos(x)$', fontsize = 20) ax[0, 1].set_xlabel(r'$x$', fontsize = 20) ax[1, 0].grid(True) ax[1, 0].plot(x, np.tan(x)) ax[1, 0].set_title(r'Plot of $tan(x)$', fontsize = 20) ax[1, 0].set_ylabel(r'$tan(x)$', fontsize = 20) ax[1, 0].set_xlabel(r'$x$', fontsize = 20) ax[1, 1].axis('off') plt.show()The following code block uses `suptitle()` method of our `fig` object to set a title for the figure object.[Back to Table of Contents](Table_of_Contents)fig, ax = plt.subplots(2, 2, figsize = (15, 12)) ax[0, 0].grid(True) ax[0, 0].plot(x, np.sin(x)) ax[0, 0].set_title(r'Plot of $sin(x)$', fontsize = 20) ax[0, 0].set_ylabel(r'$sin(x)$', fontsize = 20) ax[0, 0].set_xlabel(r'$x$', fontsize = 20) ax[0, 1].grid(True) ax[0, 1].plot(x, np.cos(x)) ax[0, 1].set_title(r'Plot of $cos(x)$', fontsize = 20) ax[0, 1].set_ylabel(r'$cos(x)$', fontsize = 20) ax[0, 1].set_xlabel(r'$x$', fontsize = 20) ax[1, 0].grid(True) ax[1, 0].plot(x, np.tan(x)) ax[1, 0].set_title(r'Plot of $tan(x)$', fontsize = 20) ax[1, 0].set_ylabel(r'$tan(x)$', fontsize = 20) ax[1, 0].set_xlabel(r'$x$', fontsize = 20) ax[1, 1].axis('off') fig.suptitle('Trigonometric Functions', fontsize = 25) plt.show()Let's supose that instead of using multiple subplots, we desired to plot all of the trigonometric function in a single plot. The following code block shows that we can add multiple plots, one at a time, via multiple calls to the `plot()` method of our `ax` object.[Back to Table of Contents](Table_of_Contents)fig, ax = plt.subplots(1, 1, figsize = (12, 6)) ax.grid(True) ax.plot(x, np.sin(x)) ax.plot(x, np.cos(x)) ax.plot(x, np.tan(x)) ax.set_title(r'Plot of Trigonometric Functions', fontsize = 20) ax.set_ylabel(r'Value', fontsize = 20) ax.set_xlabel(r'$x$', fontsize = 20) plt.show()To help us determine what each line represents, we can add *labels* and then use the `legend()` method of our `ax` object to display a legend.[Back to Table of Contents](Table_of_Contents)fig, ax = plt.subplots(1, 1, figsize = (12, 6)) ax.grid(True) ax.plot(x, np.sin(x), label = r'$sin(x)$') ax.plot(x, np.cos(x), label = r'$cos(x)$') ax.plot(x, np.tan(x), label = r'$tan(x)$') ax.set_title(r'Plot of Trigonometric Functions', fontsize = 20) ax.set_ylabel(r'Value', fontsize = 20) ax.set_xlabel(r'$x$', fontsize = 20) ax.legend() plt.show()Although the legend is helpful, we can make it easier to differentiate between the plots by using diferent linestyles and colors for each function as shown in the following code block.[Back to Table of Contents](Table_of_Contents)fig, ax = plt.subplots(1, 1, figsize = (12, 6)) ax.grid(True) ax.plot(x, np.sin(x), label = r'$sin(x)$', linestyle = '--', color = 'k') ax.plot(x, np.cos(x), label = r'$cos(x)$', linestyle = ':', color = 'k') ax.plot(x, np.tan(x), label = r'$tan(x)$', linestyle = '-.', color = 'b') ax.set_title(r'Plot of Trigonometric Functions', fontsize = 20) ax.set_ylabel(r'Value', fontsize = 20) ax.set_xlabel(r'$x$', fontsize = 20) ax.legend() plt.show()In this section, we have demonstrated a simple framework for creating figures that are composed of multiple subplots using `matplotlib.pyplot`'s object-oriented interface. We will now look at how to create different plot types. Scatter Plots In this section, we will look at how to create scatter plots. The following code block generates a random sample of 50 data points that exhibit a linear relationship bewteen *x* and *y* variables, with some uniformly distributed error.[Back to Table of Contents](Table_of_Contents)size = 50 m = 1 b = 10 e = 4 x = 10*np.random.rand(size) y = m*x + b + (2*e)*np.random.rand(size)-eThe following code block uses the framework we saw in the previous section to construct a figure that is somposed of a single scatter plot for the data. Note that instead of use the `plot()` method of the `ax` object, we are now using the `scatter()` method to construct the scatter plot.[Back to Table of Contents](Table_of_Contents)fig, ax = plt.subplots(1, 1, figsize = (6, 6)) ax.grid(True) ax.scatter(x, y) ax.set_ylabel(r'$y$', fontsize = 20) ax.set_xlabel(r'$x$', fontsize = 20) plt.show()Let's now consider a case were we have two different groups of data that we wish to plot on a single subplot. The following code block generates such data.[Back to Table of Contents](Table_of_Contents)size = 50 m1 = 1 b1 = 10 e1 = 4 x1 = 10*np.random.rand(size) y1 = m1*x1 + b1 + (2*e1)*np.random.rand(size)-e1 m2 = -1 b2 = 20 e2 = 1 x2 = 10*np.random.rand(size) y2 = m2*x2 + b2 + (2*e2)*np.random.rand(size)-e2The following code block plots the two sets of data and defines a different marker style for one to help us differentiate.[Back to Table of Contents](Table_of_Contents)fig, ax = plt.subplots(1, 1, figsize = (6, 6)) ax.grid(True) ax.scatter(x1, y1, marker = 's') ax.scatter(x2, y2) ax.set_ylabel(r'$y$', fontsize = 20) ax.set_xlabel(r'$x$', fontsize = 20) plt.show()Histograms Histograms are very useful in data analysis because they provide us with a means to visualize the distribution of an underlying data set. In particular, a histogram bins the sample space, or support, of a data set into bins, and plots the frequency that values occur in each one of the bins. We will demonstrate the constrction of histograms with `matplotlib` using a random sample drawn from a lognormal distribution. The following code block uses Numpy's `random` module to generate the sample. [Back to Table of Contents](Table_of_Contents)size = 100000 np.random.seed(42) ln_sample = np.random.lognormal(mean = 0.5, sigma= 0.25, size = size)Again, using the previously define framework, the following code block uses the `hist()` method of the `ax` object to plot a histogram for the sample.[Back to Table of Contents](Table_of_Contents)fig, ax = plt.subplots(1, 1, figsize = (6, 6)) ax.hist(ln_sample) ax.set_ylabel('Frequency', fontsize = 20) ax.set_xlabel('Value', fontsize = 20) plt.show()In the following code block, we use the `edgecolor` argument to add a black border around the histogram bins.[Back to Table of Contents](Table_of_Contents)fig, ax = plt.subplots(1, 1, figsize = (6, 6)) ax.hist(ln_sample, edgecolor = 'k') ax.set_ylabel('Frequency', fontsize = 20) ax.set_xlabel('Value', fontsize = 20) plt.show()A triangular distribution is a distribution that is commonly used in simulation experiments to approximate the distribution for a random variable. The distribution requires three estimates: 1) a minimum value, 2) a maximum value, and 3) a *most likely* value.Suppose that we are interested in how well a triangular distribution will approximate our data. The following code block generates a random sample from a triangular distribution, where the minimum, maximum, and median values of our lognormal sample are used to specify the previously described parameters for the distribution.[Back to Table of Contents](Table_of_Contents)median_val = np.percentile(ln_sample, 50) min_val = ln_sample.min() max_val = ln_sample.max() tri_sample = np.random.triangular(min_val, median_val, max_val, size)The following code block plots the two random samples.[Back to Table of Contents](Table_of_Contents)fig, ax = plt.subplots(1, 1, figsize = (6, 6)) ax.hist(ln_sample, edgecolor = 'k', label = 'lognormal') ax.hist(tri_sample, edgecolor = 'k', label = 'triangular') ax.set_ylabel('Frequency', fontsize = 20) ax.set_xlabel('Value', fontsize = 20) ax.legend() plt.show()In the previous code block, it is difficult to see the differences because the histogram for the triangular distribution sample *covers up* that for the lognormal sample. We can use the `alpha` argument to make the plot for the triangular sample mre transparent, allowing us to see the diferences more clearly.[Back to Table of Contents](Table_of_Contents)fig, ax = plt.subplots(1, 1, figsize = (6, 6)) ax.hist(ln_sample, edgecolor = 'k', label = 'lognormal') ax.hist(tri_sample, edgecolor = 'k', alpha = 0.5, label = 'triangular') ax.set_ylabel('Frequency', fontsize = 20) ax.set_xlabel('Value', fontsize = 20) ax.legend() plt.show()By default, the `hist()` method will determine a reasonable number of bins to use for the provided data. However, a number of bins, or even specific bin edges, may be specifed as an optional argument. The following code block shows how a user-specifiednumber of bins can be provided. Note that changing the number of bins results in very different *pictures* for the underlying data (compare 3 bins to 100 bins).[Back to Table of Contents](Table_of_Contents)num_bins = 50 fig, ax = plt.subplots(1, 1, figsize = (6, 6)) ax.hist(ln_sample, edgecolor = 'k', label = 'Lognormal', bins = num_bins) ax.hist(tri_sample, edgecolor = 'k', alpha = 0.5, label = 'Triangular', bins = num_bins) ax.set_ylabel('Frequency', fontsize = 20) ax.set_xlabel('Value', fontsize = 20) ax.legend() plt.show()Contour Plots From https://www.itl.nist.gov/div898/handbook/eda/section3/contour.htm (accessed 12/29/2018):> "A contour plot is a graphical technique for representing a 3-dimensional surface by plotting constant z slices, called contours, on a 2-dimensional format. That is, given a value for z, lines are drawn for connecting the (x,y) coordinates where that z value occurs."Contour plots can be very useful for visulizing interactions that are present in data. The following code block defines a function for a complex trigonometric expression. This function is applied to $x$ and $y$ values ranging from zero to five.[Back to Table of Contents](Table_of_Contents)def f(x, y): return np.sin(x) ** 10 + np.cos(10 + y * x) * np.cos(x) x = np.linspace(0, 5, 50) y = np.linspace(0, 5, 50) X, Y = np.meshgrid(x, y) Z = f(X, Y)Again, using our plotting framework, this time with our `ax` object's `contour` method, we plot our generated data.[Back to Table of Contents](Table_of_Contents)fig, ax = plt.subplots(1, 1, figsize = (6, 6)) ax.contour(X, Y, Z, colors='black') ax.set_ylabel('Y', fontsize = 20) ax.set_xlabel('X', fontsize = 20) plt.show()The previous plot is not extremely useful because we do not understand the values that correspond to the various contours. Using the `countourf()` method instead allows us to generate filled contours that we may map to colorbar. This approach provides us with a better sense of how the function behaves as can be seen in the following code block.[Back to Table of Contents](Table_of_Contents)fig, ax = plt.subplots(1, 1, figsize = (6, 6)) contour_colors = ax.contourf(X, Y, Z, 20, cmap='RdGy') fig.colorbar(contour_colors) ax.set_ylabel('Y', fontsize = 20) ax.set_xlabel('X', fontsize = 20) plt.show()Using the `countour()` and `contourf` methods together allows us to include labels along with the colorbar.[Back to Table of Contents](Table_of_Contents)fig, ax = plt.subplots(1, 1, figsize = (6, 6)) contour_colors = ax.contourf(X, Y, Z, 20, cmap='coolwarm') contours = ax.contour(X, Y, Z, 5, colors='black') fig.colorbar(contour_colors) contours.clabel() ax.set_ylabel(r'$Y$', fontsize = 20, rotation = 0, labelpad=20) ax.set_xlabel(r'$X$', fontsize = 20) plt.show()MNISTimport numpy as np from sklearn.datasets import load_digits import matplotlib.pyplot as plt %matplotlib inline digits = load_digits() plt.gray() plt.matshow(digits.images[2]) X, y = digits['data'], digits['target'] print(X.shape, y.shape) X_train, y_train, X_test, y_test = X[:1450], y[:1450], X[1450:], y[1450:]随机打乱训练集shuffle_index = np.random.permutation(1450) X_train, y_train = X_train[shuffle_index], y_train[shuffle_index]二分类简化一下问题,只尝试去识别一个数字,判断是输入数字是1还是不是1y_train_1 = (y_train == 1) y_test_1 = (y_test == 1) y_trainSGDClassifier这个分类器有一个好处是能够高效地处理非常大的数据集。这部分原因在于SGD一次只处理一条数据,这也使得 SGD 适合在线学习(online learning)。from sklearn.linear_model import SGDClassifier sgd_clf = SGDClassifier() sgd_clf.fit(X_train, y_train_1) sgd_clf.predict([X_train[1449]])评估这个模型的性能 cross_val_score使用 `cross_val_score()` 函数来评估 `SGDClassifier` 模型,同时使用 K 折交叉验证,此处让 `k=3`。> 记住:K 折交叉验证意味着把训练集分成 K 折(此处 3 折),然后使用一个模型对其中一折进行预测,对其他折进行训练。from sklearn.model_selection import cross_val_score cross_val_score(sgd_clf, X_train, y_train_1, cv=3, scoring="accuracy")交叉验证上有大于 95% 的精度(accuracy)?这看起来很令人吃惊。> 因为正例和负例数量不均衡 混淆矩阵就是,吴恩达老师讲准确率和召回率处的内容。__混淆矩阵中的每一行表示一个实际的类, 而每一列表示一个预测的类__。> 对于二分类问题,混淆矩阵返回的是一个 `2 x 2` 的矩阵,意义也和吴恩达老师讲的相同 为了计算混淆矩阵,首先你需要有一系列的预测值,这样才能将预测值与真实值做比较。> `cross_val_predict`: 它不是返回一个评估分数,而是 __返回基于每一个测试折做出的一个预测值__。这意味着,对于每一个训练集的样例,你得到一个干净的预测(“干净”是说一个模型在训练过程当中没有用到测试集的数据)。from sklearn.model_selection import cross_val_predict y_train_pred = cross_val_predict(sgd_clf, X_train, y_train_1, cv=3) from sklearn.metrics import confusion_matrix confusion_matrix(y_train_1, y_train_pred) y_train_pred准确率/召回率/F1* 准确率:* 召回率:* F1:F1 支持那些有着相近准确率和召回率的分类器。这不会总是你想要的。有的场景你会绝大程度地关心准确率,而另外一些场景你会更关心召回率。举例子,如果你训练一个分类器去检测视频是否适合儿童观看,你会倾向选择那种即便拒绝了很多好视频、但保证所保留的视频都是好(高准确率)的分类器,而不是那种高召回率、但让坏视频混入的分类器(这种情况下你或许想增加人工去检测分类器选择出来的视频)。另一方面,加入你训练一个分类器去检测监控图像当中的窃贼,有着 30% 准确率、99% 召回率的分类器或许是合适的(当然,警卫会得到一些错误的报警,但是几乎所有的窃贼都会被抓到)。> 不幸的是,你不能同时拥有两者。增加准确率会降低召回率,反之亦然。这叫做准确率与召回率之间的折衷。from sklearn.metrics import precision_score, recall_score, f1_score precision_score(y_train_1, y_train_pred) recall_score(y_train_1, y_train_pred) f1_score(y_train_1, y_train_pred)准确率/召回率之间的折衷为了弄懂这个折衷,我们看一下 SGDClassifier 是如何做分类决策的。对于每个样例,它根据决策函数计算分数,如果这个分数大于一个阈值,它会将样例分配给正例,否则它将分配给反例。如果 __提高阈值,可以提高准确率但是降低召回率。相反,降低阈值可提高召回率、降低准确率__。 Scikit-Learn 不让你直接设置阈值,__但是它给你提供了设置决策分数的方法,这个决策分数可以用来产生预测__。它不是调用分类器的 `predict()` 方法,而是调用 `decision_function()` 方法。这个方法返回每一个样例的分数值,然后基于这个分数值,使用你想要的任何阈值做出预测。sgd_clf.decision_function([X_train[1449]]) y_scores = sgd_clf.decision_function([X_train[1], X_train[432], X_train[643], X_train[1000], X_train[1449]]) threshold = 0 y_some_digit_pred = (y_scores > threshold) y_some_digit_pred如何选择阈值如何使用哪个阈值呢?首先,需要再次使用 `cross_val_predict()` 得到每一个样例的分数值,但是这一次指定返回一个决策分数,而不是预测值。y_scores = cross_val_predict(sgd_clf, X_train, y_train_1, cv=3, method="decision_function") y_scores画出PR曲线Persion-Recallfrom sklearn.metrics import precision_recall_curve precisions, recalls, thresholds = precision_recall_curve(y_train_1, y_scores) def plot_precision_recall_vs_threshold(precisions, recalls, thresholds): plt.plot(thresholds, precisions[:-1], "b--", label="Precision") plt.plot(thresholds, recalls[:-1], "g-", label="Recall") plt.xlabel("Threshold") plt.legend(loc="upper left") plt.ylim([0, 1]) plot_precision_recall_vs_threshold(precisions, recalls, thresholds)ROC 曲线ROC 曲线是真正例率(true positive rate,另一个名字叫做召回率)对假正例率(false positive rate, FPR)的曲线。> ROC曲线下面面积(AUC)越接近1,表示模型效果越好from sklearn.metrics import roc_curve fpr, tpr, thresholds = roc_curve(y_train_1, y_scores) def plot_roc_curve(fpr, tpr, label=None): plt.plot(fpr, tpr, linewidth=2, label=label) plt.plot([0, 1], [0, 1], 'k--') plt.axis([0, 1, 0, 1]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plot_roc_curve(fpr, tpr)> 图中的点线是一个完全随机的分类器生成的 ROC 曲线;一个好的分类器的 ROC 曲线应该尽可能远离这条线(即向左上角方向靠拢)。一个比较分类器之间优劣的方法是:测量ROC曲线下的面积(AUC)。一个完美的分类器的 ROC AUC 等于 1,而一个纯随机分类器的 ROC AUC 等于 0.5。Scikit-Learn 提供了一个函数来计算 ROC AUC:from sklearn.metrics import roc_auc_score roc_auc_score(y_train_1, y_scores)多类分类二分类器只能区分两个类,而多类分类器(也被叫做多项式分类器)可以区分多于两个类。一些算法(比如随机森林分类器或者朴素贝叶斯分类器)可以直接处理多类分类问题。其他一些算法(比如 SVM 分类器或者线性分类器)则是严格的二分类器。然后,有许多策略可以让你用二分类器去执行多类分类。__OvA__:举例子,创建一个可以将图片分成 10 类(从 0 到 9)的系统的一个方法是:训练10个二分类器,每一个对应一个数字(探测器 0,探测器 1,探测器 2,以此类推)。然后当你想对某张图片进行分类的时候,让每一个分类器对这个图片进行分类,选出决策分数最高的那个分类器。这叫做“一对所有”(OvA)策略(也被叫做“一对其他”)。__OvO__:另一个策略是对每一对数字都训练一个二分类器:一个分类器用来处理数字 0 和数字 1,一个用来处理数字 0 和数字 2,一个用来处理数字 1 和 2,以此类推。这叫做“一对一”(OvO)策略。如果有 N 个类。你需要训练 N*(N-1)/2 个分类器。对于 MNIST 问题,需要训练 45 个二分类器!当你想对一张图片进行分类,你必须将这张图片跑在全部45个二分类器上。然后看哪个类胜出。OvO 策略的主要有点是:每个分类器只需要在训练集的部分数据上面进行训练。这部分数据是它所需要区分的那两个类对应的数据。> 一些算法(比如 SVM 分类器)在训练集的大小上很难扩展,所以对于这些算法,OvO 是比较好的,因为它可以在小的数据集上面可以更多地训练,较之于巨大的数据集而言。但是,对于大部分的二分类器来说,OvA 是更好的选择。> Scikit-Learn 可以探测出你想使用一个二分类器去完成多分类的任务,它会自动地执行 OvA(除了 SVM 分类器,它使用 OvO)。sgd_clf.fit(X_train, y_train) sgd_clf.predict([X_train[1449]])学习曲线from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt def plot_learning_curves(model, X, y): X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2) train_errors, val_errors = [], [] for m in range(2, len(X_train)): model.fit(X_train[:m], y_train[:m]) y_train_predict = model.predict(X_train[:m]) y_val_predict = model.predict(X_val) train_errors.append(mean_squared_error(y_train_predict, y_train[:m])) val_errors.append(mean_squared_error(y_val_predict, y_val)) plt.plot(np.sqrt(train_errors), "r-+", linewidth=2, label="train") plt.plot(np.sqrt(val_errors), "b-", linewidth=3, label="val") sgd_clf = SGDClassifier() plot_learning_curves(sgd_clf, X_train, y_train)上面的代码在训练集上训练了一个 `SGDClassifier`。这个分类器处理原始的目标class,从 0 到 9(y_train),而不是仅仅探测是否为 5 (y_train_1)。然后它做出一个判断(在这个案例下只有一个正确的数字)。在幕后,__Scikit-Learn 实际上训练了 10 个二分类器,每个分类器都产生一张图片的决策数值,选择数值最高的那个类__。> 为了证明这是真实的,可以调用 `decision_function()` 方法。不是返回每个样例的一个数值,而是返回 10 个数值,一个数值对应于一个类。decision_scores = sgd_clf.decision_function([X_train[1449]]) decision_scores sgd_clf.classes_[np.argmax(decision_scores)]如果想强制 Scikit-Learn 使用 OvO 策略或者 OvA 策略,你可以使用 `OneVsOneClassifier` 类或者 `OneVsRestClassifier` 类。这两个类将会返回一个 estimator ,你可以和使用原 estimator 一样使用。 误差分析首先,你可以检查混淆矩阵。你需要使用 `cross_val_predict()` 做出预测,然后调用 `confusion_matrix()` 函数。y_train_pred = cross_val_predict(sgd_clf, X_train, y_train, cv=3) conf_mx = confusion_matrix(y_train, y_train_pred) conf_mx这里是一对数字。使用 Matplotlib 的 `matshow()` 函数,将混淆矩阵以图像的方式呈现,将会更加方便。plt.matshow(conf_mx, cmap=plt.cm.gray)误差数据图像让我们关注仅包含误差数据的图像呈现。首先你需要将混淆矩阵的每一个值除以相应类别的图片的总数目。这样子,你可以比较错误率,而不是绝对的错误数(这对大的类别不公平)。row_sums = conf_mx.sum(axis=1 , keepdims=True) norm_conf_mx = conf_mx / row_sums np.fill_diagonal(norm_conf_mx, 0) # 用 0 来填充对角线。这样子就只保留了被错误分类的数据。 plt.matshow(norm_conf_mx, cmap=plt.cm.gray) # 行代表实际类别,列代表预测的类别。亮(白)的部分代表分错的Machine Learning Engineer Nanodegree Model Evaluation & Validation Project 1: Predicting Boston Housing PricesWelcome to the first project of the Machine Learning Engineer Nanodegree! In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'Implementation'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully!In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide. >**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode. Getting StartedIn this project, you will evaluate the performance and predictive power of a model that has been trained and tested on data collected from homes in suburbs of Boston, Massachusetts. A model trained on this data that is seen as a *good fit* could then be used to make certain predictions about a home — in particular, its monetary value. This model would prove to be invaluable for someone like a real estate agent who could make use of such information on a daily basis.The dataset for this project originates from the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/Housing). The Boston housing data was collected in 1978 and each of the 506 entries represent aggregated data about 14 features for homes from various suburbs in Boston, Massachusetts. For the purposes of this project, the following preprocessing steps have been made to the dataset:- 16 data points have an `'MEDV'` value of 50.0. These data points likely contain **missing or censored values** and have been removed.- 1 data point has an `'RM'` value of 8.78. This data point can be considered an **outlier** and has been removed.- The features `'RM'`, `'LSTAT'`, `'PTRATIO'`, and `'MEDV'` are essential. The remaining **non-relevant features** have been excluded.- The feature `'MEDV'` has been **multiplicatively scaled** to account for 35 years of market inflation.Run the code cell below to load the Boston housing dataset, along with a few of the necessary Python libraries required for this project. You will know the dataset loaded successfully if the size of the dataset is reported.# Import libraries necessary for this project import numpy as np import pandas as pd import visuals as vs # Supplementary code from sklearn.cross_validation import ShuffleSplit # Pretty display for notebooks %matplotlib inline # Load the Boston housing dataset data = pd.read_csv('housing.csv') prices = data['MEDV'] features = data.drop('MEDV', axis = 1) # Success print "Boston housing dataset has {} data points with {} variables each.".format(*data.shape)Boston housing dataset has 489 data points with 4 variables each.Data ExplorationIn this first section of this project, you will make a cursory investigation about the Boston housing data and provide your observations. Familiarizing yourself with the data through an explorative process is a fundamental practice to help you better understand and justify your results.Since the main goal of this project is to construct a working model which has the capability of predicting the value of houses, we will need to separate the dataset into **features** and the **target variable**. The **features**, `'RM'`, `'LSTAT'`, and `'PTRATIO'`, give us quantitative information about each data point. The **target variable**, `'MEDV'`, will be the variable we seek to predict. These are stored in `features` and `prices`, respectively. Implementation: Calculate StatisticsFor your very first coding implementation, you will calculate descriptive statistics about the Boston housing prices. Since `numpy` has already been imported for you, use this library to perform the necessary calculations. These statistics will be extremely important later on to analyze various prediction results from the constructed model.In the code cell below, you will need to implement the following:- Calculate the minimum, maximum, mean, median, and standard deviation of `'MEDV'`, which is stored in `prices`. - Store each calculation in their respective variable.# TODO: Minimum price of the data minimum_price = np.amin(prices) # TODO: Maximum price of the data maximum_price = np.amax(prices) # TODO: Mean price of the data mean_price = np.mean(prices) # TODO: Median price of the data median_price = np.median(prices) # TODO: Standard deviation of prices of the data std_price = np.std(prices) # Show the calculated statistics print "Statistics for Boston housing dataset:\n" print "Minimum price: ${:,.2f}".format(minimum_price) print "Maximum price: ${:,.2f}".format(maximum_price) print "Mean price: ${:,.2f}".format(mean_price) print "Median price ${:,.2f}".format(median_price) print "Standard deviation of prices: ${:,.2f}".format(std_price)Statistics for Boston housing dataset: Minimum price: $105,000.00 Maximum price: $1,024,800.00 Mean price: $454,342.94 Median price $438,900.00 Standard deviation of prices: $165,171.13Question 1 - Feature ObservationAs a reminder, we are using three features from the Boston housing dataset: `'RM'`, `'LSTAT'`, and `'PTRATIO'`. For each data point (neighborhood):- `'RM'` is the average number of rooms among homes in the neighborhood.- `'LSTAT'` is the percentage of homeowners in the neighborhood considered "lower class" (working poor).- `'PTRATIO'` is the ratio of students to teachers in primary and secondary schools in the neighborhood._Using your intuition, for each of the three features above, do you think that an increase in the value of that feature would lead to an **increase** in the value of `'MEDV'` or a **decrease** in the value of `'MEDV'`? Justify your answer for each._ **Hint:** Would you expect a home that has an `'RM'` value of 6 be worth more or less than a home that has an `'RM'` value of 7? **Answer: ** The use of Data visualization tools (dataset distribution) helped me to understand the relation between the features on our model.-**RM**: considering we are using the same LSTAT and PTRATIO I could verify that by increasing the RM (number of rooms) it also increased the MEDV (home price).-**LSTAT**: considering we are using the same RM and PTRATION I could verify that LSTAT have an inverse relation with the MEDV, which means, by increasing the LSTAT (lower class neighborhood) it decreased the MED (home price).-**PTRATIO**: PTRATIO (students per teacher) behaves almost like the LSTAT (lower class neighborhood), but it is not strong as the LSTAT. I noticed that its distribution was way more spreaded and that sometimes by increasing the PTRATIO it also increased the MEDV. But, the overall tendency showed to be an inverse relation with MEDV, too.Based on how our features are behaving in our model, I would say that a home with RM 6 will probably worth more than a home with RM 7, when the LSTAT or the PTRATIO of the first home is lower compared with the second home. And also, that a home with RM 6 will probably worth less than a home with RM 7 when both have the same (or close) values for the other features (LSTAT and PTRATION). ---- Developing a ModelIn this second section of the project, you will develop the tools and techniques necessary for a model to make a prediction. Being able to make accurate evaluations of each model's performance through the use of these tools and techniques helps to greatly reinforce the confidence in your predictions. Implementation: Define a Performance MetricIt is difficult to measure the quality of a given model without quantifying its performance over training and testing. This is typically done using some type of performance metric, whether it is through calculating some type of error, the goodness of fit, or some other useful measurement. For this project, you will be calculating the [*coefficient of determination*](http://stattrek.com/statistics/dictionary.aspx?definition=coefficient_of_determination), R2, to quantify your model's performance. The coefficient of determination for a model is a useful statistic in regression analysis, as it often describes how "good" that model is at making predictions. The values for R2 range from 0 to 1, which captures the percentage of squared correlation between the predicted and actual values of the **target variable**. A model with an R2 of 0 always fails to predict the target variable, whereas a model with an R2 of 1 perfectly predicts the target variable. Any value between 0 and 1 indicates what percentage of the target variable, using this model, can be explained by the **features**. *A model can be given a negative R2 as well, which indicates that the model is no better than one that naively predicts the mean of the target variable.*For the `performance_metric` function in the code cell below, you will need to implement the following:- Use `r2_score` from `sklearn.metrics` to perform a performance calculation between `y_true` and `y_predict`.- Assign the performance score to the `score` variable.from sklearn.metrics import r2_score def performance_metric(y_true, y_predict): """ Calculates and returns the performance score between true and predicted values based on the metric chosen. """ # TODO: Calculate the performance score between 'y_true' and 'y_predict' score = r2_score(y_true, y_predict) # Return the score return scoreQuestion 2 - Goodness of FitAssume that a dataset contains five data points and a model made the following predictions for the target variable:| True Value | Prediction || :-------------: | :--------: || 3.0 | 2.5 || -0.5 | 0.0 || 2.0 | 2.1 || 7.0 | 7.8 || 4.2 | 5.3 |*Would you consider this model to have successfully captured the variation of the target variable? Why or why not?* Run the code cell below to use the `performance_metric` function and calculate this model's coefficient of determination.# Calculate the performance of this model score = performance_metric([3, -0.5, 2, 7, 4.2], [2.5, 0.0, 2.1, 7.8, 5.3]) print "Model has a coefficient of determination, R^2, of {:.3f}.".format(score)Model has a coefficient of determination, R^2, of 0.923.**Answer:** It is visible that the given model is following the target variations. It is not accurate, but it getting close. We can see it represented by the coefficient of determination that is very close to 1, it is in fact 0.92. Implementation: Shuffle and Split DataYour next implementation requires that you take the Boston housing dataset and split the data into training and testing subsets. Typically, the data is also shuffled into a random order when creating the training and testing subsets to remove any bias in the ordering of the dataset.For the code cell below, you will need to implement the following:- Use `train_test_split` from `sklearn.cross_validation` to shuffle and split the `features` and `prices` data into training and testing sets. - Split the data into 80% training and 20% testing. - Set the `random_state` for `train_test_split` to a value of your choice. This ensures results are consistent.- Assign the train and testing splits to `X_train`, `X_test`, `y_train`, and `y_test`.from sklearn.cross_validation import train_test_split # TODO: Shuffle and split the data into training and testing subsets X_train, X_test, y_train, y_test = train_test_split(features, prices, test_size=0.2, random_state=47) # Success print "Training and testing split was successful."Training and testing split was successful.Question 3 - Training and Testing*What is the benefit to splitting a dataset into some ratio of training and testing subsets for a learning algorithm?* **Hint:** What could go wrong with not having a way to test your model? **Answer: ** Split the dataset provide us a feedback for comparison. With it we can check if the training data and the testing data are following the same approach on determination and that we are not **overfitting** the model. It works like a double check, in order to make sure that our model was well defined and will be good to provide consistent predictions. By not splitting the dataset there is risk of never validate it with different data, that way becomes hard to guarantee the effectiveness of our model, as we may be assuming data patterns (on our first round of data entry) that are not real patterns for the next set of data that might be applied to our model in the future. ---- Analyzing Model PerformanceIn this third section of the project, you'll take a look at several models' learning and testing performances on various subsets of training data. Additionally, you'll investigate one particular algorithm with an increasing `'max_depth'` parameter on the full training set to observe how model complexity affects performance. Graphing your model's performance based on varying criteria can be beneficial in the analysis process, such as visualizing behavior that may not have been apparent from the results alone. Learning CurvesThe following code cell produces four graphs for a decision tree model with different maximum depths. Each graph visualizes the learning curves of the model for both training and testing as the size of the training set is increased. Note that the shaded region of a learning curve denotes the uncertainty of that curve (measured as the standard deviation). The model is scored on both the training and testing sets using R2, the coefficient of determination. Run the code cell below and use these graphs to answer the following question.# Produce learning curves for varying training set sizes and maximum depths vs.ModelLearning(features, prices)Question 4 - Learning the Data*Choose one of the graphs above and state the maximum depth for the model. What happens to the score of the training curve as more training points are added? What about the testing curve? Would having more training points benefit the model?* **Hint:** Are the learning curves converging to particular scores? **Answer: ** After analysing the graphs, I decided to use maximum depth equals to 3, for this model, as both curves are converging in to a reasonable score. Curves analysis when max_depth=3:- The training curve starts with score 1 and when we added more training points this curve falls close to 0.9. Furthermore, as more training points are added the curve stabilizes around score 0.8.- The testing curve starts with score 0 and then, after the first 50 training points, it raises to score 0.6. As more training points are added the curve stabilizes around score 0.8, as the training curve.After more than 350 training points both curves seemed to be converging indicating that we found a good generalization model.I don't think that more training points will benefit this model, considering the graph with depth equals 3, because it is possible to perceive that the testing curve have already stabilized close to the training curve and that both are close to a reasonable score. Complexity CurvesThe following code cell produces a graph for a decision tree model that has been trained and validated on the training data using different maximum depths. The graph produces two complexity curves — one for training and one for validation. Similar to the **learning curves**, the shaded regions of both the complexity curves denote the uncertainty in those curves, and the model is scored on both the training and validation sets using the `performance_metric` function. Run the code cell below and use this graph to answer the following two questions.vs.ModelComplexity(X_train, y_train)Question 5 - Bias-Variance Tradeoff*When the model is trained with a maximum depth of 1, does the model suffer from high bias or from high variance? How about when the model is trained with a maximum depth of 10? What visual cues in the graph justify your conclusions?* **Hint:** How do you know when a model is suffering from high bias or high variance? **Answer: ** - With **depth equals 1** we have a scenario of high bias (overfimplified). Both curves are presenting a very poor score, under 0.5. This demonstrate that a model trained like this would have not enought accurancy to predict usefull data. - With **depth equals 10** we are paying too much attention to the data (overfit) this means we have a high variance what is not good, too, as it will not generalize well. The visual cues of that are that at around depth 4 the curves are behaving in a predictive way and after that, after we increase the depth the curves diverge and we have a much higher error on validation curve than on traninig curve, what is not what we are looking for. Question 6 - Best-Guess Optimal Model*Which maximum depth do you think results in a model that best generalizes to unseen data? What intuition lead you to this answer?* **Answer: ** I would say something about 3 or 4 as depth, my intuition leads me to choose 4, as I saw depth 3 decision tree regressor performance scores about 0.8, as the complexity curve remains stable for depth 4, maybe we can achieve scores highers than 0.8 using depth 4. ----- Evaluating Model PerformanceIn this final section of the project, you will construct a model and make a prediction on the client's feature set using an optimized model from `fit_model`. Question 7 - Grid Search*What is the grid search technique and how it can be applied to optimize a learning algorithm?* **Answer: ** Grid Search is a generalization of the traditional search technique that seeks for an optimal hyperparameter that when applied to a given model produces the best predictions (avoiding, as possible, data overfitting).The big deal about Grid Search is that it can be applied for tuning the models that interact with with more than one hyperparameter, creating complex tuples that when are all combined we give them the name of "grids". This techinque will loop thru around all set of hyperparameters combinations comparing the effectiveness of the model, by the end it will find the set of parameters that produces the best prediction model (avoiding, as possible, overfitting the data). Question 8 - Cross-Validation*What is the k-fold cross-validation training technique? What benefit does this technique provide for grid search when optimizing a model?* **Hint:** Much like the reasoning behind having a testing set, what could go wrong with using grid search without a cross-validated set? **Answer: ** K-fold cross-validation is an evolution of the simple cross-validation that splits data in testing and training datasets. On **K-fold** we divide the dataset into k subsets and we perform cross-validation using k subset as testing and the rest of the k-1 subsets becomes the training data. We then run this **k-times** to calculate the averages. Using k-fold we can use the entire dataset, but it costs more if we take in count the processing time.The main benefit of K-fold is that its cross-validation method is really good to avoid data overfit, because we can validate the model effectiveness using different subsets of training and testing data **k-times**.Adding this technique, that **reduces the overfitting**, as part of the **Grid Search** technique help us to get the best set of hyperparameters that will really generalizes the model, avoiding the model **overtuning** (that is when we the set of parameters we found does not generalize well the entire model, as it was just hooked by the validation set). Implementation: Fitting a ModelYour final implementation requires that you bring everything together and train a model using the **decision tree algorithm**. To ensure that you are producing an optimized model, you will train the model using the grid search technique to optimize the `'max_depth'` parameter for the decision tree. The `'max_depth'` parameter can be thought of as how many questions the decision tree algorithm is allowed to ask about the data before making a prediction. Decision trees are part of a class of algorithms called *supervised learning algorithms*.For the `fit_model` function in the code cell below, you will need to implement the following:- Use [`DecisionTreeRegressor`](http://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html) from `sklearn.tree` to create a decision tree regressor object. - Assign this object to the `'regressor'` variable.- Create a dictionary for `'max_depth'` with the values from 1 to 10, and assign this to the `'params'` variable.- Use [`make_scorer`](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html) from `sklearn.metrics` to create a scoring function object. - Pass the `performance_metric` function as a parameter to the object. - Assign this scoring function to the `'scoring_fnc'` variable.- Use [`GridSearchCV`](http://scikit-learn.org/stable/modules/generated/sklearn.grid_search.GridSearchCV.html) from `sklearn.grid_search` to create a grid search object. - Pass the variables `'regressor'`, `'params'`, `'scoring_fnc'`, and `'cv_sets'` as parameters to the object. - Assign the `GridSearchCV` object to the `'grid'` variable.# TODO: Import 'make_scorer', 'DecisionTreeRegressor', and 'GridSearchCV' from sklearn.tree import DecisionTreeRegressor from sklearn.metrics import make_scorer from sklearn.grid_search import GridSearchCV def fit_model(X, y): """ Performs grid search over the 'max_depth' parameter for a decision tree regressor trained on the input data [X, y]. """ # Create cross-validation sets from the training data cv_sets = ShuffleSplit(X.shape[0], n_iter = 10, test_size = 0.20, random_state = 0) # TODO: Create a decision tree regressor object regressor = DecisionTreeRegressor(random_state=0) # TODO: Create a dictionary for the parameter 'max_depth' with a range from 1 to 10 params = {'max_depth': range(1,11)} # TODO: Transform 'performance_metric' into a scoring function using 'make_scorer' scoring_fnc = make_scorer(performance_metric) # TODO: Create the grid search object grid = GridSearchCV(regressor, params, scoring = scoring_fnc, cv = cv_sets) # Fit the grid search object to the data to compute the optimal model grid = grid.fit(X, y) # Return the optimal model after fitting the data return grid.best_estimator_Making PredictionsOnce a model has been trained on a given set of data, it can now be used to make predictions on new sets of input data. In the case of a *decision tree regressor*, the model has learned *what the best questions to ask about the input data are*, and can respond with a prediction for the **target variable**. You can use these predictions to gain information about data where the value of the target variable is unknown — such as data the model was not trained on. Question 9 - Optimal Model_What maximum depth does the optimal model have? How does this result compare to your guess in **Question 6**?_ Run the code block below to fit the decision tree regressor to the training data and produce an optimal model.# Fit the training data to the model using grid search reg = fit_model(X_train, y_train) # Produce the value for 'max_depth' print "Parameter 'max_depth' is {} for the optimal model.".format(reg.get_params()['max_depth'])Parameter 'max_depth' is 5 for the optimal model.**Answer: ** The max_depth is 5, I said on question 6 that my intuition was 4 as it was apparently better than 3 and 6 per our learning curves. But now, using the fit and other techniques it showed to be 5. Question 10 - Predicting Selling PricesImagine that you were a real estate agent in the Boston area looking to use this model to help price homes owned by your clients that they wish to sell. You have collected the following information from three of your clients:| Feature | Client 1 | Client 2 | Client 3 || :---: | :---: | :---: | :---: || Total number of rooms in home | 5 rooms | 4 rooms | 8 rooms || Neighborhood poverty level (as %) | 17% | 32% | 3% || Student-teacher ratio of nearby schools | 15-to-1 | 22-to-1 | 12-to-1 |*What price would you recommend each client sell his/her home at? Do these prices seem reasonable given the values for the respective features?* **Hint:** Use the statistics you calculated in the **Data Exploration** section to help justify your response. Run the code block below to have your optimized model make predictions for each client's home.# Produce a matrix for client data client_data = [[5, 17, 15], # Client 1 [4, 32, 22], # Client 2 [8, 3, 12]] # Client 3 # Show predictions for i, price in enumerate(reg.predict(client_data)): print "Predicted selling price for Client {}'s home: ${:,.2f}".format(i+1, price)Predicted selling price for Client 1's home: $431,025.00 Predicted selling price for Client 2's home: $166,350.00 Predicted selling price for Client 3's home: $879,900.00**Answer: ** Considering:**Statistics for Boston housing dataset:**Minimum price: `$`105,000.00 Maximum price: `$`1,024,800.00 Mean price: `$`454,342.94 Median price `$`438,900.00 Standard deviation of prices: `$`165,171.13 ps.:Considering that the Mean and Median are very close, I'm seeing this as an almost normal distribution.**Home Features**RM (room): expected to increase the priceLSTAT (poverty level): expected to decrease the pricePTRATIO (Student-Teacher ratio): expected to decrease the priceClient 1's home: `$`431,025.00 **(recommended)**This home price seems to be very reasonable because there are very good schools. Furthermore the price is compatible with the market and the 5 rooms are fair for this price, not forgetting that the neighborhood is not bad at all.Client 2's home: `$`166,350.00 **(recommended)**This home price seems, at first, very low. But it is still far from the Minimum price of our dataset. This price is between the second and the first standard deviation, below the median, what makes this price still acceptable. By checking its features we can see that the level of poverty is high and that Student-teacher ration is also bad.Considering this I would, also, trust in the algorithm prediction.Client 3's home: `$`879,900.00 **(recommended)**This home price seems, at first, very high. But when we saw that number of rooms (that is rare these days), this very nice neighborhood and those very selective and good schools, I think the home owner should try to sell it with this price.:) SensitivityAn optimal model is not necessarily a robust model. Sometimes, a model is either too complex or too simple to sufficiently generalize to new data. Sometimes, a model could use a learning algorithm that is not appropriate for the structure of the data given. Other times, the data itself could be too noisy or contain too few samples to allow a model to adequately capture the target variable — i.e., the model is underfitted. Run the code cell below to run the `fit_model` function ten times with different training and testing sets to see how the prediction for a specific client changes with the data it's trained on.vs.PredictTrials(features, prices, fit_model, client_data)Trial 1: $391,183.33 Trial 2: $424,935.00 Trial 3: $415,800.00 Trial 4: $420,622.22 Trial 5: $418,377.27 Trial 6: $411,931.58 Trial 7: $399,663.16 Trial 8: $407,232.00 Trial 9: $351,577.61 Trial 10: $413,700.00 Range in prices: $73,357.39Screenshots Run this notebook to take screenshots of the networks visualised in the html pages. Atention!Before running this notebook you should, run the html pages in a local server. If you are using **Ubuntu**, to start the http server on an specific port, for instance port 8080, simply type: python -m http.server 8080%%capture import sys !{sys.executable} -m pip install selenium from IPython.core.display import display, HTML display(HTML("")) HTML('''
''') !python3 modules/Screenshots.pyVMs-0.html screenshot done VMs-1.html screenshot done VMs-2.html screenshot done VMs-3.html screenshot done VMs-4.html screenshot done Release.html screenshot doneCorrect answer:```python TODO: import WandBimport wandb```------ ```python TODO: import WandB Alert and timedeltafrom wandb import AlertLevelfrom datetime import timedelta```# TODO: import WandB import torch, torchvision, os import numpy as np import scikitplot as skplt import matplotlib.pyplot as plt import torch.nn.functional as F # TODO: import WandB Alert and timedelta from sklearn.metrics import f1_score from sklearn import datasets from tqdm import tqdmCorrect answer:```python TODO: WandB loginwandb.login()```# TODO: WandB loginDataset[torch.utils.data.Dataset](https://pytorch.org/docs/stable/data.htmltorch.utils.data.Dataset) - An abstract class representing a Dataset [torch.utils.data.DataLoader](https://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) - Data loader. Combines a dataset and a sampler, and provides an iterable over the given dataset.[sklearn.datasets.load_iris()](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_iris.htmlsklearn.datasets.load_iris) - Iris Dataset.class IrisDataset(torch.utils.data.Dataset): def __init__(self, train=True): super(IrisDataset, self).__init__() iris = datasets.load_iris() self.inputs = torch.from_numpy(iris.data.astype(np.float32)) self.targets = torch.from_numpy(iris.target.astype(np.long)) def __len__(self): return len(self.targets) def __getitem__(self, item): return [self.inputs[item], self.targets[item]] train_dataset = IrisDataset() trainloader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True, ) print(len(train_dataset)) train_dataset[0][0].shapeModel[torch.nn.Module](https://pytorch.org/docs/stable/generated/torch.nn.Module.htmltorch.nn.Module) - Base class for all neural network modules. [torch.nn.Conv2d](https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.htmltorch.nn.Conv2d) - Applies a 2D convolution over an input signal composed of several input planes.[torch.nn.MaxPool2d](https://pytorch.org/docs/stable/generated/torch.nn.MaxPool2d.htmltorch.nn.MaxPool2d) - Applies a 2D max pooling over an input signal composed of several input planes.[torch.nn.Linear](https://pytorch.org/docs/stable/generated/torch.nn.Linear.htmltorch.nn.Linear) - Applies a linear transformation to the incoming data: $ y = xA^T + b $[torch.nn.functional.relu](https://pytorch.org/docs/stable/nn.functional.htmltorch.nn.functional.relu) - Applies the rectified linear unit function element-wise.class IrisNet(torch.nn.Module): def __init__(self): super(IrisNet, self).__init__() self.fc1 = torch.nn.Linear(4, 100) self.fc2 = torch.nn.Linear(100, 3) def forward(self, x): x = F.relu(self.fc1(x)) x = self.fc2(x) return xDefine a Loss function and optimizerconfig = {"lr": 1e-2, "num_epoch": 15} model = IrisNet() criterion = torch.nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=config["lr"])Correct answer:```python TODO: WandB initrun = wandb.init(project="mnist") TODO: WandB configwandb.config.update(config) TODO: WandB watchwandb.watch(model)```# TODO: WandB init # TODO: WandB config # TODO: WandB watchTrain the network Correct answer:```python TODO: Change to wandb.logwandb.log({"loss": loss}, commit=False)wandb.log({"f1_macro": f1_macro}, commit=False)wandb.log(f1_none, commit=False)wandb.log({"cf_matrix": wandb.Image(plt)})plt.close()```--------```python TODO: WandB Alertsif f1_macro > 0.7: wandb.alert( title='Good accuracy', text=f'F1-score {f1_macro} is above 0.7', level=AlertLevel.INFO, INFO, WARN, or ERROR wait_duration=timedelta(minutes=5) )```for e in range(config["num_epoch"]): for i, data in tqdm(enumerate(trainloader)): # get the inputs; data is a list of [inputs, labels] inputs, targets = data # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs = model(inputs) loss = criterion(outputs, targets) loss.backward() optimizer.step() # print progress loss = loss.item() # TODO: Change to wandb.log print('[%5d] loss: %.3f' % (i + 1, loss)) with torch.no_grad(): predicted = [] targeted = [] for data in trainloader: inputs, targets = data outputs = torch.argmax(model(inputs), -1) targeted += targets.tolist() predicted += outputs.tolist() f1_none = f1_score(targeted, predicted, average=None) f1_macro = f1_score(targeted, predicted, average='macro') # TODO: Change to wandb.log print('[%5d] f1_macro: %.3f' % (i + 1, f1_macro)) f1_none = {"f1_none/"+str(e): v for e,v in enumerate(f1_none)} print('[%5d]', f1_none) skplt.metrics.plot_confusion_matrix(targeted, predicted, normalize=True) plt.show() # TODO: WandB AlertsCorrect answer: ```python TODO: WandB Save Filewandb.save("path/to/file")```torch.save(model.state_dict(), "model.pth") # TODO: WandB Save FileCorrect answer:```python TODO: WandB Finish run.finish()```# TODO: WandB FinishCorrect answer: ```python TODO: WandB reinitrun = wandb.init(project="project_name", id="project_id", resume=True)model_pth = wandb.restore("model.pth")```# TODO: WandB reinit model = IrisNet() model.load_state_dict(torch.load(model_pth.name)) run.finish() run = wandb.init(project="another-logs") mnist = torchvision.datasets.MNIST(root='.', train=True, download=True) print(np.array(mnist[0][0]).shape)Correct answer:```python TODO: WandB Imagewandb.log({"sample_" + str(idx): [wandb.Image(sample, caption=str(target))]})```idx = 15 sample = np.array(mnist[0][0]) target = np.array(mnist[0][1]) plt.imshow(sample) plt.show() # TODO: WandB ImageCorrect answer: ```python TODO: WandB Segmentation Maskmask_img = wandb.Image(sample[0], masks={ "predictions": { "mask_data": prediction, "class_labels": class_labels, }, "ground_truth": { "mask_data": ground_truth, "class_labels": class_labels },})```idx = 20 sample = np.array(mnist[idx][0]) class_labels = { 0: "background", 1: "number", } prediction = np.array(sample>200, dtype=np.int) ground_truth = np.array(sample>100, dtype=np.int) # TODO: WandB Segmentation Mask mask_img = None wandb.log({"name": mask_img})Correct answer: ```python TODO: WandB Text Table 1wandb.log({"examples": wandb.Table(data=data, columns=["Text", "Predicted Label", "True Label"])}) TODO: WandB Text Table 2table = wandb.Table(columns=["Text", "Predicted Label", "True Label"])table.add_data("I love my phone", "1", "1")table.add_data("My phone is terrible", "0", "-1")wandb.log({"examples": table})```data = [["I love my phone", "1", "1"],["My phone is terrible", "+ášľť0", "-1"]] # TODO: WandB Text Table #1 # TODO: WandB Text Table #2wandb.log() - https://docs.wandb.ai/library/logrun.finish()LAB nr 1Plan:* Git/GitHub* wprowadzenie do Jupyter/Colab* Przypomnienie pythona Zadanie 1. Założyć konto na GitHub (lub zalogować się)Zadanie 2. Utworzyć repozytorium o nazwie "Obliczenia_naukowe"Zadanie 3. Utworzyć w CoLab plik zad1.3.ipynb o treści "Test wysyłania", zapisać go na swoim GoogleDrive, a kopię zapisać w GitHub(https://towardsdatascience.com/google-drive-google-colab-github-dont-just-read-do-it-5554d5824228)Zadanie 4. Utworzyć w swoim repozytorium plik zad1.4.txt o dowolnej treści bezpośrednio w GitHubZadanie 5. Dodaj nowe zagadnienie (issue) do repozytorium https://github.com/danio2010/ON2022Zadanie 6. Określ ile razy modyfikowany był plik ON_lab1.ipynb i wpisz tę liczbę to pliku zad1.6.txt Praca w konsoliWejść do docelowego katalogu```gitgit config --global user.name git config --global user.mail git clone należy skonfigurować klucze ssh lub gpg. Przy użyciu wersji https trzeba podawać token przy pushgit add git commit -m git push```print('cześć')cześćMożliwości pól komentarza* Nagłówki```markdown Sekcja 1 Sekcja 2 Podskecja sekcji 2 Sekcj```* Wyróżnianie tekstuSkładnia | Efekt--- | ---`**bold text**` | **bold text**`*italicized text*` or `_italicized text_` | *italicized text*`~~strikethrough~~` | ~~strikethrough~~* Bloki z kodem python```````pythonprint("a")``````````pythonprint("a")```* Listy1. pierwszy1. drugi1. trzeci* LaTeX $\lim_{n\to\infty}$ Nagłówek 1 Pod nim Sekcja 2 Podesekcja Zadanie Zadanie 2**pogrubienie**2+2 2**3 3+5>6 import math math.sin(30) math.log(4,2)Zadanie domowe lab1Przygotuj w CoLab dokument, który będzie zawierał tytuł oraz cztery sekcje (nazwij je dowolnie). W pierwszej sekcji umieść skrypt, który zapyta Cię o imię, a następnie przywita Cię po imieniu. W drugiej skecji wstaw (w polu komentarza) kod tego skryptu. W trzeciej umieść dowolny obrazek. W czwartej sekcji napisz **funkcję** bezargumentową o nazwie listownik(), która* zapyta o długośc listy, która musi znaleźć się w przedziale $[10,20]$ (przypilnuj użytkownika)* wygeneruje i wydrukuje listę o odpowiedniej długości z liczbami losowymi przedziału $[0,10]$* **zwróci** liczbę unikalnych wartości w wygenerowanej liścieW rozwiązaniu można korzystać z funkcji pomocniczych.Nazwij plik zadDomowe.ipynb i umieść w swoim repozytorium.Bayesian optimizationWhen a function is expensive to evaluate, or when gradients are not available, optimalizing it requires more sophisticated methods than gradient descent. One such method is Bayesian optimization, which lies close to active learning. In Bayesian optimization, instead of picking queries by maximizing the uncertainty of predictions, function values are evaluated at points where the promise of finding a better value is large. In modAL, these algorithms are implemented with the ```BayesianOptimizer``` class, which is a sibling of ```ActiveLearner```. In the following example, their use is demonstrated on a toy problem.import numpy as np import matplotlib.pyplot as plt from functools import partial from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels import Matern from modAL.models import BayesianOptimizer from modAL.acquisition import optimizer_EI, max_EI %matplotlib inlineThe function to be optimizedWe are going to optimize a simple function to demonstrate the use of ```BayesianOptimizer```.import numpy as np # generating the data X = np.linspace(0, 20, 1000).reshape(-1, 1) y = np.sin(X)/2 - ((10 - X)**2)/50 + 2 with plt.style.context('seaborn-white'): plt.figure(figsize=(10, 5)) plt.plot(X, y, c='k', linewidth=6) plt.title('The function to be optimized') plt.show()Gaussian processesIn Bayesian optimization, usually a Gaussian process regressor is used to predict the function to be optimized. One reason is that Gaussian processes can estimate the uncertainty of the prediction at a given point. This in turn can be used to estimate the possible gains at the unknown points.# assembling initial training set X_initial, y_initial = X[150].reshape(1, -1), y[150].reshape(1, -1) # defining the kernel for the Gaussian process kernel = Matern(length_scale=1.0) regressor = GaussianProcessRegressor(kernel=kernel)Optimizing using *expected improvement*During the optimization, the utility of each point is given by the so-called *acquisition function*. In this case, we are going to use the *expected improvement*, which is defined by$$EI(x) = (\mu(x) - f(x^+)) \psi\Big( \frac{\mu(x) - f(x^+)}{\sigma(x)} \Big) + \sigma(x) \phi\Big( \frac{\mu(x) - f(x^+)}{\sigma(x)} \Big),$$where $\mu(x)$ and $\sigma(x)$ are the mean and variance of the Gaussian process regressor at $x$, $f$ is the function to be optimized with estimated maximum at $x^+$, and $\psi(z)$, $\phi(z)$ denotes the cumulative distribution function and density function of a standard Gaussian distribution. After each query, the acquisition function is reevaluated and the new query is chosen to maximize the acquisition function.# initializing the optimizer optimizer = BayesianOptimizer( estimator=regressor, X_training=X_initial, y_training=y_initial, query_strategy=max_EI ) # Bayesian optimization for n_query in range(5): query_idx, query_inst = optimizer.query(X) optimizer.teach(X[query_idx].reshape(1, -1), y[query_idx].reshape(1, -1))Using *expected improvement*, the first five queries are the following.y_pred, y_std = optimizer.predict(X, return_std=True) y_pred, y_std = y_pred.ravel(), y_std.ravel() X_max, y_max = optimizer.get_max() with plt.style.context('seaborn-white'): plt.figure(figsize=(10, 5)) plt.scatter(optimizer.X_training, optimizer.y_training, c='k', s=50, label='Queried') plt.scatter(X_max, y_max, s=100, c='r', label='Current optimum') plt.plot(X.ravel(), y, c='k', linewidth=2, label='Function') plt.plot(X.ravel(), y_pred, label='GP regressor') plt.fill_between(X.ravel(), y_pred - y_std, y_pred + y_std, alpha=0.5) plt.title('First five queries of Bayesian optimization') plt.legend() plt.show()Data Mining & Machine Learning for analysing misdemeanours and crimes recorded by the french police and gendarmerie since 1996 **Auteur** : ** **Temps de réalisation** :Sur temps libre entre Février - Juillet 2020 **Objectif / Aim** : Dans un premier temps, exploiter des données gouvernementales afin de montrer des tendances, et/ou des évolutions potentielles concernant les crimes et les délits en France. Dans un second temps, développer plusieurs algorithmes d'apprentissage automatique de manière empirique pour prévoir l'apparition des crimes et délits en France. Table des matières[1. Préparation des données pour l'analyse](section1)  [1.1. Importations des outils](section1.1)   [1.2. Importations du Jeu de donnée](section1.2)   [1.3. Transposition des données](section1.3)   [1.4. Gestion multiple des feuilles](section1.4)   [1.5. Récupération du champ date](section1.5)   [1.6. Fixation des types de données](section1.6)   [1.7. Colonnes inutilisées](section1.7) [2. Analyse macro du jeu de données](section2)   [2.1. Description élémentaire](section2.1)   [2.2. Crimes et délit par département](section2.2)   [2.3. Analyse focalisée](section2.3) 0. PréambuleAprès avoir visionné plusieurs vidéos ([première](https://youtu.be/RBQ-IoHfimQ "Partie 1 : Attention la vidéo peut choquer"), [deuxième](https://youtu.be/nKDgFCojiT8 "Partie 2 : Attention la vidéo peut choquer")) concernant la crise des migrants en Europe, j'ai pris conscience que le monde dans lequel nous vivions pouvait très rapidement évoluer. Pire, en essayant de chercher à comprendre ce phénomène et de trouver des causes et des raisons, j'ai rapidement compris qu'un individu comme moi était incapable de pouvoir arranger les choses.Étant une personne appréciant une vie stable, j'ai essayé de déterminer si "cela" pouvait un jour m'arriver. Puis en prenant du recul sur cette situation, une question m'est survenue : **suis je en danger là où j'habite ?** Afin d'apporter des éléments de réponses concrètes, j'ai essayé de chercher quels pouvaient être les facteurs pouvant me mener à une situation ou à un environnement dangereux. Mais avant cela, il m'a fallu comprendre et définir comment évaluer des danger. Dans ce travail, je le définis comme tout acte humain volontaire mettant en péril la sureté (ou stabilité) et pouvant entrainer ou exposer mes proches, mon entourage, à des conséquences préjudiciables.Pour factuellement déterminer ce "danger", j'ai réfléchi à comment mesurer les actes nuisibles à mon échelle et la réponse a été simple : en analysant les crimes et les délits enregistrés dans mon pays. 1. Préparation des données pour l'analyse / Preparing the data for analysisSource : - [Estimation de la population INSEE](https://www.insee.fr/fr/statistiques/1893198)([Document source](https://www.insee.fr/fr/statistiques/fichier/1893198/estim-pop-dep-sexe-gca-1975-2020.xls))- [Crimes et délits enregistrés en France](https://www.data.gouv.fr/fr/datasets/chiffres-departementaux-mensuels-relatifs-aux-crimes-et-delits-enregistres-par-les-services-de-police-et-de-gendarmerie-depuis-janvier-1996/)([Document source](https://www.data.gouv.fr/fr/datasets/r/fdf5afbf-ed3c-4c54-a4f0-3581c8a1eca4)) *1.1. Importation des outils / Tools import*import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt sns.set(color_codes=True)*1.2. Importation du Jeu de donnée / DataSet import*### Generique variables definition to import source = 'source/' xl_misdemeanours_crimes_path = source + 'tableaux-4001-ts.xlsx' xl_population_path = source + 'estim-pop-dep-sexe-gca-1975-2020.xls' ### Excel file import crime_xl_data = pd.ExcelFile(xl_misdemeanours_crimes_path) population_xl_data = pd.ExcelFile(xl_population_path) ### Get a look on sheet pages in the excel print(crime_xl_data.sheet_names)['France_Entière', 'France_Métro', '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '2A', '2B', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '971', '972', '973', '974', '975', '976', '977', '978', '986', '987', '988']Le jeu de donnée est composé de plusieurs pages qui peuvent être traitées de manière indépendante. Nous allons nous focaliser sur la feuille "France métropolitaine".# Define France metropolitaine current_sheet = crime_xl_data.sheet_names[1] # Read one sheet to begin ('France_Métro') --> libellée index to write minus two lines df = pd.read_excel(crime_xl_data, current_sheet, index_col = 'libellé index') # Examine the head of the DataFrame df.head(3)Le jeu de donnée possède des données temporelles en tant que dimensions. Afin de rendre les variables plus exploitables, nous allons opérer à une transformation de transposition. De plus afin de pouvoir faciliter la visualisation de nos prochains graphiques, nous allons aussi en profiter pour dissocier le champ date en années et mois et réindexer le jeu de donnée. *1.3. Transposition des données / Transpose Data and get date values*# Drop init 'index' columns, and Transpose Data df = df.drop(columns=['Index']) df = df.T df.head()Une fois les données transposées, nous pouvons facilement nous rendre compte que nous avons un problème de typage concernant l'index. Elle n'est pas considérée comme un type datetime. Corrigeons cela. Il va aussi falloir tenir compte du document original et des multiples pages. *1.4. Gestion multiples des feuilles xl crime*df_all = pd.DataFrame(columns=df.columns.values) df_all['departement'] = np.nan all_name_sheet = crime_xl_data.sheet_names invalid_sheet = ['France_Entière','France_Métro'] all_name_sheet = [e for e in all_name_sheet if e not in invalid_sheet] for i in all_name_sheet: df_tmp = pd.read_excel(crime_xl_data, sheet_name=str(i), index_col = 'libellé index') df_tmp = df_tmp.drop(columns=['Index']) df_tmp = df_tmp.T df_tmp['departement'] = str(i) df_all = df_all.append(df_tmp) df_all.departement = df_all.departement.astype(str) df_all.head()*1.5. Récupération du champ date*Mettons à jour l'index du jeu de donnée pour qu'il puisse correspondre au type Datetime. Cela permettra de faciliter sa visualisation plus tard. *1.5.1. Sur une feuille*# Sur le jeu de donnée France métropolitaine # Reset index, Get Year, Get Month df = df.reset_index() df['year'] = df['index'].astype(str).str[0:4] df['month'] = df['index'].astype(str).str[5:7] # Define new date column combined = df.year.str.cat(df.month,sep=' ') df['date'] = pd.to_datetime(combined) # Drop 'index' column and set new index on date df = df.drop(columns='index') df = df.set_index('date') df.head()*1.5.2. Sur toutes les feuilles*# Sur le jeu de donnée complet # Reset index, Get Year, Get Month df_all = df_all.reset_index() df_all['year'] = df_all['index'].astype(str).str[0:4] df_all['month'] = df_all['index'].astype(str).str[5:7] # Define new date column combined = df_all.year.str.cat(df_all.month,sep=' ') df_all['date'] = pd.to_datetime(combined) # Drop 'index' column and set new index on date df_all = df_all.drop(columns='index') df_all = df_all.set_index('date') df_all.head()Le champ date est maintenant l'index. Voyons maintenant les types de données. *1.6. Fixation des types de données / Fixing a data type*Regardons si plusieurs colonnes ne possèderaient pas un typage non conventionnel ou potentiellement inapproprié pour notre analyse.df.dtypes.values df.dtypesFixons les types des champs 'year' et 'month'df.year = df.year.astype(str).astype(int) df.month = df.month.astype(str)En regardant de plus près, nous pouvons constater que plusieurs colonnes se nomment 'index non utilisé', penchons-nous dessus. *1.7. Colonnes inutilisées*# Define names of index and columns df.index.names = ['index'] df.columns.names = ['libellé columns'] df['Index non utilisé'].sum()En visualisant ces résultats, nous pouvons constater que ces dimensions nommées 'Index non utilisé' ne représentent pas une grande quantité d'information. Aussi nous n'allons pas en tenir compte.print(df.shape) df_all = df_all.drop(columns='Index non utilisé') df = df.drop(columns='Index non utilisé') print(df.shape)(289, 109) (289, 105)The ZPDES_Memory algorithm requires a curriculum graph in the form of a tree structure. This notebooks gives two examples of creating this tree.1. Build_Tree will build the tree structure based on a trace based analysis of an execution trace or sentence trace as given in [1, 2]2. Static_Tree takes in a predefined graph in the form of a dictionary that maps nodes to its immediate children to create a tree structure.[1] Andersen, Erik, , and . "A trace-based framework for analyzing and synthesizing educational progressions." Proceedings of the SIGCHI Conference on Human Factors in Computing Systems. ACM, 2013.[2] , , and . "A unified framework for knowledge assessment and progression analysis and design." Proceedings of the 2017 CHI Conference on Human Factors in Computing Systems. ACM, 2017. Example ProgressionWe refer to each node of the graph to be a *concept*. We refer to a practice item that a student can complete as a *problem*. Each concept can have one or more problems. Each problem should only belong to one concept. We work for hashable representations for concepts and problems (in our example data, these representations are strings).For both methods, a couple of parameters need to be defined:1. all_concepts: a list of all concepts in the curriculum graph, these are the nodes in the curriculum graph2. concept_problems: a dictionary mapping concept to problems. In the example data below, the problems 'cat', 'tas', and 'cas' coorespond to the concept of 'SAS'. Note there can also be the case where every concept corresponds to one unique problem.3. problem_components: a dictionary mapping problem to basic components(if defined). In the example data below the basic components 'r' and 'c' make up the problem 'rc'. Note that there can also be the case where basic components are not defined, in this case every problem corresponds to one unique basic component (see the ```if problem_components is None or all_basic_components is None``` case).3. all_basic_components: a list of all basic components that make up the problems (if defined). If not defined, then this should be a list of all problems (see the ```if problem_components is None or all_basic_components is None``` case).In this example, we will create the following target curriculum graph:![tree_example_graph.png](attachment:tree_example_graph.png)Where *'Root'* is a dummy root node the connects to all nodes that don't have any prerequisites.tree_name = 'example_tree' #base name for saving files all_concepts = ['S' ,'N', 'C', 'CS', 'SAS', 'CAC', 'CSACS', 'NCS', 'NCSANCS'] concept_problems = { 'S': ['t', 'c', 's'], 'C': ['r', 'w', 'g'], 'N': ['1', '2', '3'], 'CS':['rc', 'rt', 'gc', 'gt', 'wt', 'ws'], 'SAS':['cat', 'tas', 'cas'], 'CAC':['raw', 'rag', 'wag'], 'CSACS':['rtawt', 'wsagc', 'rcagt'], 'NCS': ['1rs', '1wc', '1gt', '2wc', '1gc', '2ws'], 'NCSANCS':['2rta2gc', '3rta3wt', '1wsa2gs', '2wta3gs'] } all_basic_components = ['r', 'w', 'g', 't', 'c', 's', '1', '2', '3'] problem_components = { 't': ['t'], 'c': ['c'], 's': ['s'], 'r': ['r'], 'w': ['w'], 'g': ['g'], '1': ['1'], '2': ['2'], '3': ['3'], 'rc': ['r', 'c'], 'rt': ['r', 't'], 'gc': ['g', 'c'], 'gt': ['g', 't'], 'wt': ['w', 't'], 'ws': ['w', 's'], 'cat': ['c', 't'], 'tas': ['t', 's'], 'cas': ['c', 's'], 'raw': ['r', 'w'], 'rag': ['r', 'g'], 'wag': ['w', 'g'], 'rtawt': ['r', 't', 'w', 't'], 'wsagc': ['w', 's', 'g', 'c'], 'rcagt': ['r', 'c', 'g', 't'], '1rs': ['1', 'r', 's'], '1wc': ['1', 'w', 'c'], '1gt': ['1', 'g', 't'], '2wc': ['2', 'w', 'c'], '1gc': ['1', 'g', 'c'], '2ws': ['2', 'w', 's'], '2rta2gc': ['2', 'r', 't', '2', 'g', 'c'], '3rta3wt': ['3', 'r', 't', '3', 'w', 't'], '1wsa2gs': ['1', 'w', 's', '2', 'g', 's'], '2wta3gs': ['2', 'w', 't', '3', 'g', 's'] } # all_basic_components = None # problem_components = None if problem_components is None or all_basic_components is None: problem_components = {} for concept, concept_problems_list in concept_problems.items(): for problem in concept_problems_list: problem_components[problem] = [problem] all_basic_components = list(problem_components.keys())Using Build_Tree and trace based analysis#Parameters n = 1 #the n for creating in ngrams, in our case we are using 1-gramsThe Build_Tree tree needs a Build_Tree_Data object that takes in all_concepts, concept_problems, all_basic_components, problem_components and n.data = Build_Tree_Data(all_concepts= all_concepts, concept_problems = concept_problems, all_basic_components = all_basic_components, problem_components = problem_components, n = n)Build_Tree additionally takes in a comparison function (the ```comp_func``` argument). This function must be defined in the file 'compare_function.py' (this necessity is for saving and loading the tree using the pickle package) and should take in node1, node2 and return - -1 if node1 is harder than node2- 1 if node2 is harder than node1- 0 if neither is harder than the otherprint('Creating Tree') #Define the "Root" tree progression_tree_build = Build_Tree(name='Root', data=data, comp_func=compare_ngrams) #Loop through each concept in all_concepts and insert it into the tree for i, concept_name in enumerate(all_concepts): print('Current concept: %d, Current concept Name: %s'%(i, concept_name)) progression_tree_build.insert_node(concept_name) #calculate ancestors (prerequisites) after all nodes are inserted progression_tree_build.calculate_parents() #Save the tree, this will save the tree as 'tree_name.p' progression_tree_build.save_tree(tree_name + '.p')Using and Static_Tree a predefined tree structure to build a treeStatic tree takes in the predefined structure in the form of a dictionary mapping concept to all immediate children (all problems directly easier than it) in the prerequisite graph. See the ```tree_structure``` below for our example. Note that the 'Root' node must be defined.Similarly to Build_Tree it also takes in all_concepts, concept_problems, all_basic_components, and problem_components.tree_structure = { 'Root': ['S', 'N', 'C'], 'S': ['CS', 'SAS'], 'N': ['NCS'], 'C': ['CS', 'CAC'], 'CS': ['CSACS', 'NCS'], 'SAS': ['CSACS'], 'CAC': ['CSACS'], 'CSACS': ['NCSANCS'], 'NCS': ['NCSANCS'], 'NCSANCS': [] } #Create the tree progression_tree_static = Static_Tree(children = tree_structure, all_concepts = all_concepts, concept_problems = concept_problems, all_basic_components = all_basic_components, problem_components = problem_components) #save the tree, this will save the tree to a file 'tree_name.txt' progression_tree_static.save_tree(tree_name + '.txt')Other Tree Functionality Visualizing the curriculum graph made by Build_Tree using the Digraph packageThe following code will make a visualization of the graph made by Build_Tree - will also save the graph as '```tree_name```.gv' and a visualization of the graph as '```tree_name```.gv.png' in the current directory.#Some helpful functions for rendering and visualizing and saving graphs with the Digraph package def create_graph (tree, data): graph = Digraph(format='png', strict = True) for problem_name, problem_data in data.items(): graph.node(problem_name, problem_data) tree.add_edges_to_progression(graph) return graph def render_save_graph(graph, save_name): graph.render(save_name, view=True) #Choose a tree to use # progression_tree = progression_tree_build progression_tree = progression_tree_static #Visualize and save the graph concept_names = {concept:concept for concept in all_concepts} progression_graph = create_graph(progression_tree, concept_names) render_save_graph(progression_graph, tree_name + '.gv')Loading in the trees from fileTo saved trees can be loaded in from the their save files in the following way:progression_tree_build_2 = Build_Tree(tree_filename = tree_name + '.p') progression_tree_static_2 = Static_Tree(tree_filename = tree_name + '.txt')Getting additional information:Additionally we can get the immediate children and parents as well as all the children and parents from a treeprint("Children: " + str(progression_tree.return_children())) print("All Descendants: " + str(progression_tree.return_all_descendants())) print("Parents: " + str(progression_tree.return_parents())) print("All Ancestors: " + str(progression_tree.return_all_ancestors()))Children: {'Root': ['S', 'N', 'C'], 'S': ['CS', 'SAS'], 'N': ['NCS'], 'C': ['CS', 'CAC'], 'CS': ['CSACS', 'NCS'], 'SAS': ['CSACS'], 'CAC': ['CSACS'], 'CSACS': ['NCSANCS'], 'NCS': ['NCSANCS'], 'NCSANCS': []} All Descendants: {'NCSANCS': [], 'CSACS': ['NCSANCS'], 'NCS': ['NCSANCS'], 'CS': ['CSACS', 'NCS', 'NCSANCS'], 'SAS': ['CSACS', 'NCSANCS'], 'S': ['CS', 'SAS', 'NCSANCS', 'CSACS', 'NCS'], 'N': ['NCS', 'NCSANCS'], 'CAC': ['CSACS', 'NCSANCS'], 'C': ['CS', 'NCSANCS', 'CSACS', 'NCS', 'CAC'], 'Root': ['N', 'CS', 'SAS', 'CAC', 'NCSANCS', 'C', 'CSACS', 'NCS', 'S']} Parents: {'S': [], 'N': [], 'C': [], 'CS': ['S', 'C'], 'SAS': ['S'], 'CAC': ['C'], 'CSACS': ['CS', 'SAS', 'CAC'], 'NCS': ['N', 'CS'], 'NCSANCS': ['CSACS', 'NCS']} All Ancestors: {'S': [], 'C': [], 'CS': ['C', 'S'], 'SAS': ['S'], 'CAC': ['C'], 'CSACS': ['CS', 'SAS', 'CAC', 'C', 'S'], 'N': [], 'NCS': ['N', 'C', 'CS', 'S'], 'NCSANCS': ['N', 'CS', 'SAS', 'C', 'S', 'CSACS', 'NCS', 'CAC']}Coverage of eADAGE LVThe goal of this notebook is to examine why genes were found to be generic. Specifically, this notebook is trying to answer the question: Are generic genes found in more multiplier latent variables compared to specific genes?The eADAGE model uses a DAE to extracts patterns of gene expression activity in the latent variables (referred to as nodes in the paper). Here we are examining the coverage of generic genes within these latent variables.**Definitions:*** Generic genes: Are genes that are consistently differentially expressed across multiple simulated experiments.* Other genes: These are all other non-generic genes. These genes include those that are not consistently differentially expressed across simulated experiments - i.e. the genes are specifically changed in an experiment. It could also indicate genes that are consistently unchanged (i.e. housekeeping genes)%load_ext autoreload %autoreload 2 import os import random import textwrap import scipy import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.preprocessing import MinMaxScaler import rpy2.robjects as ro from rpy2.robjects import pandas2ri from rpy2.robjects.conversion import localconverter from ponyo import utils from generic_expression_patterns_modules import lv # Get data directory containing gene summary data base_dir = os.path.abspath(os.path.join(os.getcwd(), "../")) data_dir = os.path.join(base_dir, "pseudomonas_analysis") # Read in config variables config_filename = os.path.abspath( os.path.join(base_dir, "configs", "config_pseudomonas_33245.tsv") ) params = utils.read_config(config_filename) local_dir = params["local_dir"] project_id = params["project_id"] # Output file nonzero_figure_filename = "nonzero_LV_coverage_eADAGE_pa.svg" highweight_figure_filename = "highweight_LV_coverage_eADAGE_pa.svg"Load data# Get gene summary file summary_data_filename = os.path.join( data_dir, f"generic_gene_summary_{project_id}_cbrB_v_WT.tsv" ) # Load gene summary data data = pd.read_csv(summary_data_filename, sep="\t", index_col=0, header=0) # Check that genes are unique since we will be using them as dictionary keys below assert data.shape[0] == len(data["Gene ID"].unique())**Manual steps to process eADAGE data**1. Data downloaded from https://zenodo.org/record/5800932. Get Weight matrix (`eADAGE weight matrix.xlsx`) file3. Save tab (`weight matrix`) as .csv file in `local_dir`# Load eADAGE weight matrix eADAGE_weight_filename = os.path.join(local_dir, "eADAGE weight matrix.csv") eADAGE_weight = pd.read_csv(eADAGE_weight_filename, sep=",", index_col=0, header=0) eADAGE_weight.shape # Get a rough sense for how many genes contribute to a given LV # (i.e. how many genes have a value > 0 for a given LV) # Notice that eADAGE is NOT sparse (eADAGE_weight != 0).sum().sort_values(ascending=True)Get gene dataDefine generic genes based on simulated gene ranking. Refer to [figure](https://github.com/greenelab/generic-expression-patterns/blob/master/pseudomonas_analysis/gene_ranking_logFC.svg) as a guide.**Definitions:*** Generic genes: `Percentile (simulated) >= 80`(Having a high rank indicates that these genes are consistently changed across simulated experiments.)* Other genes: `Percentile (simulated) < 80`(Having a lower rank indicates that these genes are not consistently changed across simulated experiments - i.e. the genes are specifically changed in an experiment. It could also indicate genes that are consistently unchanged.)generic_threshold = 80 dict_genes = lv.get_generic_specific_genes(data, generic_threshold) # Check overlap between eADAGE genes and our genes eADAGE_genes = list(eADAGE_weight.index) our_genes = list(data.index) shared_genes = set(our_genes).intersection(eADAGE_genes) print(len(our_genes)) print(len(shared_genes)) # Drop gene ids not used in eADAGE analysis processed_dict_genes = lv.process_generic_specific_gene_lists(dict_genes, eADAGE_weight) # Check numbers add up assert len(shared_genes) == len(processed_dict_genes["generic"]) + len( processed_dict_genes["other"] )Get coverage of LVsFor each gene (generic or other) we want to find:1. The number of LVs that gene is present2. The number of LVs that the gene contributes a lot to (i.e. the gene is highly weighted within that LV) Nonzero LV coveragedict_nonzero_coverage = lv.get_nonzero_LV_coverage(processed_dict_genes, eADAGE_weight) # Check genes mapped correctly assert processed_dict_genes["generic"][0] in dict_nonzero_coverage["generic"].index assert len(dict_nonzero_coverage["generic"]) == len(processed_dict_genes["generic"]) assert len(dict_nonzero_coverage["other"]) == len(processed_dict_genes["other"])High weight LV coveragedict_highweight_coverage = lv.get_highweight_LV_coverage_pseudomonas( processed_dict_genes, eADAGE_weight ) # Check genes mapped correctly assert processed_dict_genes["generic"][0] in dict_highweight_coverage["generic"].index assert len(dict_highweight_coverage["generic"]) == len(processed_dict_genes["generic"]) assert len(dict_highweight_coverage["other"]) == len(processed_dict_genes["other"]) # Check high weight genes obtained are in fact at the extremes of the distribution # Quick look at the distribution of gene weights per LV sns.distplot(eADAGE_weight["Node2"], kde=False) plt.yscale("log")Assemble LV coverage and plotall_coverage = [] for gene_label in dict_genes.keys(): merged_df = pd.DataFrame( dict_nonzero_coverage[gene_label], columns=["nonzero LV coverage"] ).merge( pd.DataFrame( dict_highweight_coverage[gene_label], columns=["highweight LV coverage"] ), left_index=True, right_index=True, ) merged_df["gene type"] = gene_label all_coverage.append(merged_df) all_coverage_df = pd.concat(all_coverage) all_coverage_df = lv.assemble_coverage_df( processed_dict_genes, dict_nonzero_coverage, dict_highweight_coverage ) all_coverage_df.head() # Plot coverage distribution given list of generic coverage, specific coverage nonzero_fig = sns.boxplot( data=all_coverage_df, x="gene type", y="nonzero LV coverage", notch=True, palette=["#2c7fb8", "lightgrey"], ) nonzero_fig.set_xlabel(None) nonzero_fig.set_xticklabels( ["generic genes", "other genes"], fontsize=14, fontname="Verdana" ) nonzero_fig.set_ylabel( textwrap.fill("Number of LVs", width=30), fontsize=14, fontname="Verdana" ) nonzero_fig.tick_params(labelsize=14) nonzero_fig.set_title( "Number of LVs genes are present in", fontsize=16, fontname="Verdana" )Notice that since our weight matrix is not sparse, all genes are present in all 300 nodes, as expected# Plot coverage distribution given list of generic coverage, specific coverage highweight_fig = sns.boxplot( data=all_coverage_df, x="gene type", y="highweight LV coverage", notch=True, palette=["#2c7fb8", "lightgrey"], ) highweight_fig.set_xlabel(None) highweight_fig.set_xticklabels( ["generic genes", "other genes"], fontsize=14, fontname="Verdana" ) highweight_fig.set_ylabel( textwrap.fill("Number of LVs", width=30), fontsize=14, fontname="Verdana" ) highweight_fig.tick_params(labelsize=14) highweight_fig.set_title( "Number of LVs genes contribute highly to", fontsize=16, fontname="Verdana" )Calculate statistics* Is the reduction in generic coverage significant?* Is the difference between generic versus other genes signficant?# Test: mean number of LVs generic genes present in vs mean number of LVs that generic gene is high weight in # (compare two blue boxes between plots) generic_nonzero = all_coverage_df[all_coverage_df["gene type"] == "generic"][ "nonzero LV coverage" ].values generic_highweight = all_coverage_df[all_coverage_df["gene type"] == "generic"][ "highweight LV coverage" ].values (stats, pvalue) = scipy.stats.ttest_ind(generic_nonzero, generic_highweight) print(pvalue) # Test: mean number of LVs generic genes are high weight in vs mean number of LVs other genes high weight in # (compare blue and grey boxes in high weight plot) other_highweight = all_coverage_df[all_coverage_df["gene type"] == "other"][ "highweight LV coverage" ].values generic_highweight = all_coverage_df[all_coverage_df["gene type"] == "generic"][ "highweight LV coverage" ].values (stats, pvalue) = scipy.stats.ttest_ind(other_highweight, generic_highweight) print(pvalue) # Check that coverage of other and generic genes across all LVs is NOT signficantly different # (compare blue and grey boxes in nonzero weight plot) other_nonzero = all_coverage_df[all_coverage_df["gene type"] == "other"][ "nonzero LV coverage" ].values generic_nonzero = all_coverage_df[all_coverage_df["gene type"] == "generic"][ "nonzero LV coverage" ].values (stats, pvalue) = scipy.stats.ttest_ind(other_nonzero, generic_nonzero) print(pvalue)nanGet LVs that generic genes are highly weighted inSince we are using quantiles to get high weight genes per LV, each LV has the same number of high weight genes. For each set of high weight genes, we will get the proportion of generic vs other genes. We will select the LVs that have a high proportion of generic genes to examine.# Get proportion of generic genes per LV prop_highweight_generic_dict = lv.get_prop_highweight_generic_genes_pseudomonas( processed_dict_genes, eADAGE_weight ) proportion_generic = 0.5 generic_LV = [] for k, v in prop_highweight_generic_dict.items(): if v > proportion_generic: print(k, v) generic_LV.append(k) # Plot distribution of weights for these nodes node = generic_LV[0] lv.plot_dist_weights_pseudomonas( node, eADAGE_weight, shared_genes, 20, all_coverage_df, f"weight_dist_{node}.svg" ) node = generic_LV[1] lv.plot_dist_weights_pseudomonas( node, eADAGE_weight, shared_genes, 20, all_coverage_df, f"weight_dist_{node}.svg" )geneID Node25 0 PA4306 -0.131902 1 PA5506 0.112216 2 PA0587 -0.110367 3 PA3049 -0.106253 4 PA2365 -0.103959 5 PA2366 -0.099922 6 PA2746 -0.099720 7 PA0586 -0.099020 8 PA1177 -0.096395 9 PA1041 -0.095279 10 PA1728 -0.094191 11 PA2939 -0.093222 12 PA4607 -0.093089 13 PA0985 -0.091322 14 PA2372 -0.090910 15 PA5508 0.090375 16 PA4573 -0.085243 17 PA0588 -0.082000 18 PA4296 -0.081989 19 PA1746 -0.079681Save# Save plot nonzero_fig.figure.savefig( nonzero_figure_filename, format="svg", bbox_inches="tight", transparent=True, pad_inches=0, dpi=300, ) # Save plot highweight_fig.figure.savefig( highweight_figure_filename, format="svg", bbox_inches="tight", transparent=True, pad_inches=0, dpi=300, )QuickstartOnce installation is complete you can start running Sarkas. This quickstart guide will walk you througha simple example in order to check that everything is running smoothly.The YAML input file can be found at [input_file](https://raw.githubusercontent.com/murillo-group/sarkas/master/docs/documentation/Tutorial_NB/input_files/yocp_quickstart.yaml) and this notebook at [notebook](https://raw.githubusercontent.com/murillo-group/sarkas/master/docs/documentation/Tutorial_NB/Quickstart.ipynb)--- SimulationIn Jupyter notebook you can run the following commands# Import the usual libraries %pylab %matplotlib inline import os plt.style.use('MSUstyle') # Import sarkas from sarkas.processes import Simulation, PostProcess # Create the file path to the YAML input file input_file_name = os.path.join('input_files', 'yocp_quickstart.yaml')Using matplotlib backend: Qt5Agg Populating the interactive namespace from numpy and matplotlibThe above commands imported the required libraries and define the file path to our input file. Let's now run the simulation# Initialize the Simulation class sim = Simulation(input_file_name) # Setup the simulation's parameters sim.setup(read_yaml=True) # Run the simulation sim.run() _______ __ | __|.---.-.----.| |--.---.-.-----. |__ || _ | _|| <| _ |__ --| |_______||___._|__| |__|__|___._|_____|  An open-source pure-python molecular dynamics suite for non-ideal plasmas. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Simulation * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Job ID: yocp_quickstart Job directory: Simulations/yocp_quickstart Equilibration dumps directory: Simulations/yocp_quickstart/Simulation/Equilibration/dumps Production dumps directory: Simulations/yocp_quickstart/Simulation/Production/dumps Equilibration Thermodynamics file: Simulations/yocp_quickstart/Simulation/Equilibration/EquilibrationEnergy_yocp_quickstart.csv Production Thermodynamics f[...]PostprocessingNow that our simulation is complete we need to check if the simulation was physically sound. Run the following three lines will initialize the `PostProcess` class and calculate the observables defined in the input file.It will also produce a plot of the Temperature and Total Energy of the Production phase.# Initialize the Postprocessing class postproc = PostProcess(input_file_name) # Read the simulation's parameters and assign attributes postproc.setup(read_yaml=True) # Calculate observables postproc.run()* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Postprocessing * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Job ID: yocp_quickstart Job directory: Simulations/yocp_quickstart PostProcessing directory: Simulations/yocp_quickstart/PostProcessing Equilibration dumps directory: Simulations/yocp_quickstart/Simulation/Equilibration/dumps Production dumps directory: Simulations/yocp_quickstart/Simulation/Production/dumps Equilibration Thermodynamics file: Simulations/yocp_quickstart/Simulation/Equilibration/EquilibrationEnergy_yocp_quickstart.csv Production Thermodynamics file: Simulations/yocp_quickstart/Simulation/Production/ProductionEnergy_yocp_quickstart.csv ==================== Radial Distribution Function ==================== Data saved in: Simulations/yocp_quickstart/PostProcessing/RadialDist[...]You will notice that both the energy and temperature oscillates wildly. This is fine as long as the percentage deviations, in the top plots, are small. You should have a temperature deviations between -2% to ~ 4-5% while energy deviations between -2% and 1%.--- ObservablesThe most common observable is the radial distribution function. This was calculated by `postproc.run()`, here we plot it by rescaling the x axis by the Wigner-Seitz radius $a_{\rm ws}$.# Initialize the Pair Distribution Function class ax = postproc.rdf.plot( scaling=postproc.parameters.a_ws, y = [("C-C RDF", "Mean")], xlabel = r'$r/a_{\rm ws}$', ylabel = r'$g(r)$' ) ax.legend(["C-C RDF"])An adversary can leverage a dataset reconstruction attack that takes advantage of query answers on a sequestered dataset to reconstruct the data of every individual in the dataset.This notebook makes use of the Public Use Microdata Sample (PUMS), obtained from the Census Bureau’s American Community Survey (ACS).Attacks like the one demonstrated in this notebook motivate the use of statistical disclosure limitation techniques like differential privacy.We will be attempting to determine if each individual in the PUMS sample is a US citizen.import numpy as np import pandas as pd # a dataframe containing the data to be attacked data: pd.DataFrame = pd.read_csv( "https://raw.githubusercontent.com/opendp/cs208/main/spring2022/data/FultonPUMS5reconstruction.csv") # names of public identifier columns pub = ["sex", "age", "educ", "latino", "black", "asian", "married", "divorced", "children", "disability", "militaryservice", "employed", "englishability"] # variable to reconstruct target = "uscitizen"Lets assume that analysts have access to a query interface that returns the number of citizens that satisfy a predicate.This function first creates a mask matrix of shape `(n, len(predicates))` by evaluating the predicates on the data.All `len(predicates)` subset sums are computed simultaneously via a matrix product between the target column and mask.def execute_subsetsums_exact(predicates): """Count the number of citizens that satisfy each predicate. Resembles a public query interface on a sequestered dataset. :param predicates: a list of predicates on the public variables :returns a 1-d np.ndarray of exact answers to the subset sum queries""" return data[target].values @ np.stack([pred(data) for pred in predicates], axis=1)Here's a quick example of how an analyst might use this query interface:execute_subsetsums_exact([ lambda data: data['sex'] == 1, # "is-female" predicate lambda data: data['married'] == 1, # "is-married" predicate ])Reconstruction AttackAn attacker wants to reconstruct the `uscitizen` column in the sequestered dataset.If we consider the predicate mask `A`, the US citizen column `x`, and the subset sum answers `b`, then what we need to do is find the `x` that minimizes `|Ax - b|^2`.The target column is equivalent to the least squares solution (assuming the public variables uniquely identify each individual).def reconstruction_attack(data_pub, predicates, answers): """Reconstructs a target column based on the `answers` to queries about `data`. :param data_pub: data of length n consisting of public identifiers :param predicates: a list of k predicate functions :param answers: a list of k answers to a query on data filtered by the k predicates :return 1-dimensional boolean ndarray""" masks = np.stack([pred(data_pub) for pred in predicates]) return np.linalg.lstsq(masks, answers, rcond=None)[0] > 0.5We don't want to bother writing a large number of random predicates, so we'll make use of a hashing scheme to generate random predicates.def make_random_predicate(): """Returns a (pseudo)random predicate function by hashing public identifiers.""" prime = 691 desc = np.random.randint(prime, size=len(pub)) # this predicate maps data into a 1-d ndarray of booleans # (where `@` is the dot product and `%` modulus) return lambda data: ((data[pub].values @ desc) % prime % 2).astype(bool) # Example usage random_predicate = make_random_predicate() num_citizens_that_matched_random_predicate = execute_subsetsums_exact([random_predicate]) # The boolean mask from applying the example predicate to the data: random_predicate_mask = random_predicate(data)At this point, we're ready to conduct our attack. We generate a large number of random queries, submit them to the query interface, and find the least-squares solution.predicates = [make_random_predicate() for _ in range(2 * len(data))] exact_answers = execute_subsetsums_exact(predicates) # generate example predicates and compute example query answers reconstructed_target = reconstruction_attack( data_pub=data[pub], predicates=predicates, answers=exact_answers) # complete reconstruction of the target column assert np.array_equal(reconstructed_target, data[target])As we can see, the target column is perfectly reconstructed, and the attacker has the US citizenship status of every member of the sequestered dataset. MitigationsWhat mitigations can the query interface use to prevent this reconstruction?I've supplied three new query interfaces with mitigations based on rounding, adding gaussian noise, or random sampling.def execute_subsetsums_round(r, predicates): """Return subset sums on the target column, rounded to the nearest multiple of `r`.""" return (execute_subsetsums_exact(predicates) / r).round() * r def execute_subsetsums_noise(sigma, predicates): """Return subset sums on the target column, with noise ~ gaussian(`sigma`).""" return np.random.normal( execute_subsetsums_exact(predicates), scale=sigma, size=exact_answers.shape) def execute_subsetsums_sample(t, predicates): """Return an estimate for subset sums on the target column, based on a sample of size `t`.""" sub_data = data.sample(t) mask = np.stack([pred(sub_data) for pred in predicates], axis=1) return sub_data[target].values @ mask * len(data) / tWe'll also want to evaluate the utility of these new query interfaces.How accurate is dataset reconstruction when these mitigations are in place, and what is the RMSE of the subset sum queries?def compute_accuracy_reconstruction(reconstructed): return (reconstructed == data[target]).mean() def compute_rmse_answers(answers): return np.sqrt(np.mean((answers - exact_answers) ** 2)) interfaces = { "round": execute_subsetsums_round, "noise": execute_subsetsums_noise, "sample": execute_subsetsums_sample } def evaluate_performance(interface_name, param): predicates_temp = [make_random_predicate() for _ in range(2 * len(data))] answers = interfaces[interface_name](param, predicates_temp) reconstruction = reconstruction_attack(data[pub], predicates_temp, answers) return { "interface": interface_name, "param": param, "answer rmse": compute_rmse_answers(answers), "reconstruction accuracy": compute_accuracy_reconstruction(reconstruction), }I've provided a few spot examples of how these mitigations perform.perf = evaluate_performance("noise", param=2.) print(f'When noising answers with gaussian sigma of {perf["param"]}, ' f'RMSE of answers is {perf["answer rmse"]:.4}, and ' f'{perf["reconstruction accuracy"]:.2%} of entries were reconstructed.') perf = evaluate_performance("sample", param=10) print(f'When sampling to {perf["param"]} rows and then correcting for bias, ' f'RMSE of answers is {perf["answer rmse"]:.4}, and ' f'{perf["reconstruction accuracy"]:.2%} of entries were reconstructed.') perf = evaluate_performance("round", param=20) print(f'When rounding answers to the nearest multiple of {perf["param"]}, ' f'RMSE of answers is {perf["answer rmse"]:.4}, and ' f'{perf["reconstruction accuracy"]:.2%} of entries were reconstructed.') perf = evaluate_performance("round", param=40) print(f'When rounding answers to the nearest multiple of {perf["param"]}, ' f'RMSE of answers is {perf["answer rmse"]:.4}, and ' f'{perf["reconstruction accuracy"]:.2%} of entries were reconstructed.')When noising answers with gaussian sigma of 2.0, RMSE of answers is 4.557, and 87.00% of entries were reconstructed. When sampling to 10 rows and then correcting for bias, RMSE of answers is 13.29, and 65.00% of entries were reconstructed. When rounding answers to the nearest multiple of 20, RMSE of answers is 3.178, and 47.00% of entries were reconstructed. When rounding answers to the nearest multiple of 40, RMSE of answers is 20.17, and 81.00% of entries were reconstructed.Notice among the last two examples that, as expected, the RMSE of the rounding mitigation increases as the rounding parameter increases.However, surprisingly, the reconstruction accuracy is greater when the rounding parameter is 40 compared to when it is 20.The explanation for this is that the average of the exact sums is ~20.39, so when the rounding parameter is 20, nearly all answers returned by the query interface are 20.Contrast to when the rounding parameter is 40, approximately half of the query answers are 40, and half are 0, giving one bit of entropy per predicate to reconstruct the dataset. SimulationsAll mitigations naturally take a parameter ranging between 1 and 100, so lets evaluate the reconstruction accuracy and answer RMSE as this parameter is varied.params = list(range(1, len(data) + 1, 5)) def evaluate_all_performances(num_trials=10): losses = [] for interface_name in interfaces: print("Evaluating", interface_name) for param in params: for _ in range(num_trials): losses.append(evaluate_performance(interface_name, param)) return pd.DataFrame(losses) all_perf = evaluate_all_performances().groupby(["interface", "param"]).mean()Evaluating round Evaluating noise Evaluating sampleWe'll first visualize the reconstruction performance as we vary the parameter supplied to the mitigation.import matplotlib.pyplot as plt for name, perf in all_perf.groupby("interface"): perf.reset_index(inplace=True) plt.plot(perf['param'], perf['reconstruction accuracy'], label=name) plt.legend() plt.title("Performance comparison per query interface") plt.xlabel("parameter") plt.ylabel("reconstruction accuracy") plt.show()Since the PUMS sample was stratified to have a 60-40 split, the reconstruction accuracy is still 60% if the reconstructed column consists of all zeros. We'll consider 60% our baseline.Releasing a constant column is the behavior of the sample mitigation when the sample size is ~1, or when rounding to the nearest multiple of 60 or more, which rounds all answers to zero.When using the rounding defense, the greatest amount of information is lost when the mean answer is a multiple of the rounding parameter. This effect is most pronounced when the rounding parameter is equal to the mean, at approximately 20. Increasing the noise scale very quickly affects the reconstruction accuracy. At large noise scales (>10), the noise dominates the signal, leading to a reconstruction accuracy that is worse than the baseline (0.6).The next plot compares the reconstruction accuracy against the RMSE of the answers.for name, perf in all_perf.groupby("interface"): plt.plot(perf['answer rmse'], perf['reconstruction accuracy'], label=name) plt.legend() plt.title("Performance comparison per query interface") plt.xlabel("answer rmse") plt.ylabel("reconstruction accuracy") plt.show()Broadly speaking, the reconstruction accuracy and answer RMSE have an inverse relationship. As noted before, rounding exhibits strange behavior around the data mean.for name, perf in all_perf.groupby("interface"): perf.reset_index(inplace=True) plt.plot(perf['param'], perf['answer rmse'], label=name) plt.legend() plt.title("Performance comparison per query interface") plt.xlabel("parameter") plt.ylabel("answer rmse") plt.show()Differential PrivacyThe noising approach actually satisfies differential privacy already! The input data is known to be within `[0, 1]`, and we add `gaussian(scale=param)` noise. We just need to solve for the privacy utilization `epsilon` as we adjust the noise scale parameter.from opendp.trans import make_count from opendp.meas import make_base_analytic_gaussian from opendp.mod import binary_search, enable_features enable_features('floating-point', 'contrib') max_influence = 1 epsilons = {} for param in params: counter = make_count(TIA=int, TO=float) epsilons[param] = binary_search( lambda eps: (counter >> make_base_analytic_gaussian(float(param))).check(max_influence, (eps, 1e-6)), bounds=(1e-6, 100.)) pd.Series(epsilons).plot(xlabel="parameter", ylabel="epsilon")IMPORTING LIBRARIESimport pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline df = pd.read_csv('driver-data.csv') #used to read csv files and perform operations on it df.head()HANDLING CATEGORICAL DATAx = df.iloc[:, :-1].values y = df.iloc[:, -1].values import pandas as pd from sklearn.preprocessing import LabelEncoder x = df.iloc[:, :-1].values y = df.iloc[:, -1].values labelencoder_X1 = LabelEncoder() x[:,0] = labelencoder_X1.fit_transform(x[:,0]) #x[:,1] = labelencoder_X1.fit_transform(x[:,1]) print(x) #Here we are using LabelEncoder[[1487. 71.24] [2764. 52.53] [3276. 64.54] ... [2473. 170.91] [3182. 176.14] [1085. 168.03]]SPLITTING OF DATA INTO TRAINING AND TESTfrom sklearn.model_selection import train_test_split #import model selection train test split for splitting the data into test and train for model validation. x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=101)NORMALIZING DATA# data normalization with sklearn from sklearn.preprocessing import MinMaxScaler # fit scaler on training data norm = MinMaxScaler().fit(x_train) # transform training data X_train_norm = norm.transform(x_train) # transform testing data X_test_norm = norm.transform(x_test) print(X_train_norm) print(X_test_norm)[[0.98749687 0.214725 ] [0.23030758 0.09547695] [0.69667417 0.18929646] ... [0.00950238 0.12893095] [0.5743936 0.24434073] [0.48387097 0.14956165]] [[0.95648912 0.07781219] [0.73968492 0.06115061] [0.51312828 0.15514459] ... [0.6364091 0.17743272] [0.26056514 0.23138657] [0.40185046 0.17171893]]GRAPHSimport matplotlib.pyplot as plt plt.plot(x, y) plt.show()ALGORITHMfrom sklearn.cluster import KMeans num_clusters = 5 km = KMeans(n_clusters=num_clusters)FITTING OF TRAINING DATAkm.fit(x_train,y_train) #training or fitting the train data into the modelPREDICTIONS FOR OUR PROBLEM STATEMENT WITH THE RELATED GRAPHSpredictions = km.predict(x_test) plt.scatter(y_test,predictions)EVALUATIONfrom sklearn import metrics print('MAE:', metrics.mean_absolute_error(y_test, predictions)) print('MSE:', metrics.mean_squared_error(y_test, predictions)) print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, predictions))) print(metrics.accuracy_score(y_test, predictions))0.054375SAVING THE MODEL USING PICKLE LIBRARYimport pickle # Save the trained model as a pickle string. saved_model = pickle.dumps(km) # Load the pickled model lm_from_pickle = pickle.loads(saved_model) # Use the loaded pickled model to make predictions lm_from_pickle.predict(x_test)ACCURACY w.r.t TRAINED DATA Confusion Matrixfrom sklearn.metrics import confusion_matrix y_train=y_train[0:1600] results =confusion_matrix(y_train, predictions) print(results)[[ 3 0 0 ... 0 0 0] [ 8 5 6 ... 0 0 0] [13 16 18 ... 0 0 0] ... [ 1 0 0 ... 0 0 0] [ 0 0 1 ... 0 0 0] [ 0 0 0 ... 0 0 0]]Precision, Recall, Support, Fscoreimport numpy from sklearn.metrics import precision_recall_fscore_support precision_recall_fscore_support(y_train, predictions, average='macro')/usr/local/lib/python3.6/dist-packages/sklearn/metrics/_classification.py:1272: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result))ACCURACY w.r.t TEST DATA Confusion Matrixfrom sklearn.metrics import confusion_matrix results =confusion_matrix(y_test, predictions) print(results)[[ 2 5 2 ... 0 0 0] [13 12 11 ... 0 0 0] [11 10 14 ... 0 0 0] ... [ 0 0 1 ... 0 0 0] [ 1 0 0 ... 0 0 0] [ 1 0 1 ... 0 0 0]]Precision, Recall, Support, Fscoreimport numpy from sklearn.metrics import precision_recall_fscore_support precision_recall_fscore_support(y_test, predictions, average='macro')/usr/local/lib/python3.6/dist-packages/sklearn/metrics/_classification.py:1272: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result))Grid search optimization of clusteringThis notebook contains analysis of papers clustering optimization.It contains the following clustering methods:* LDA (Latent Dirichlet Allocation)* Louvain communities detection algorithm, followed by merging tiny clusters* Hierarchical clustering of word2vec based embeddings for citation graph and texts* DBScan of embeddings, followed by merging tiny clusters# Without extension OUTPUT_NAME = 'grid_search_2021_11_02'Imports%matplotlib inline %config InlineBackend.figure_format='retina' import seaborn as sns from IPython.display import display sns.set_style("whitegrid") import matplotlib.pyplot as plt import logging import pandas as pd from sklearn.metrics.cluster import adjusted_mutual_info_score, v_measure_score from utils.io import load_analyzer, load_clustering, get_review_pmids from utils.preprocessing import preprocess_clustering, get_clustering_level # Configure logging logger = logging.getLogger(__name__) logger.setLevel(logging.INFO)Analyze ground truth clusteringresults_df = pd.DataFrame() partitions_overall = [] review_pmids = get_review_pmids() n_reviews = len(review_pmids) from tqdm.auto import tqdm ground_truth_clusters_df = pd.DataFrame(columns=['Pmid', 'Level', 'Clusters'], dtype=object) logger.info('Computing ground truth clustering features') for pmid in tqdm(review_pmids): clustering = load_clustering(pmid) analyzer = load_analyzer(pmid) # Pre-calculate all hierarchy levels before grid search to avoid re-calculation of clusterings for level in range(1, get_clustering_level(clustering)): clusters = preprocess_clustering( clustering, level, include_box_sections=False, uniqueness_method='unique_only' ) ground_truth_clusters_df.loc[len(ground_truth_clusters_df)] = (pmid, level, len(set(clusters.values()))) display(ground_truth_clusters_df.head()) ! mkdir results sns.histplot(data=ground_truth_clusters_df, x='Clusters', hue='Level', element='poly') plt.title('Ground truth clusters number') plt.savefig(f'results/{OUTPUT_NAME}_ground_truth_clusters.png') plt.show()Grid searchSee `grid_search.py` file to launch parameters grid search in parallel with Celery.def reg_v_score(labels_true, labels_pred, reg=0.01): v_score = v_measure_score(labels_true, labels_pred) n_clusters = len(set(labels_pred)) return v_score - reg * n_clusters metrics = [adjusted_mutual_info_score, reg_v_score]Visualizationresults_df = pd.read_csv(f'{OUTPUT_NAME}.csv')Extract parameter columnsscore_columns = set([m.__name__ for m in metrics]) param_columns = list(set(results_df.columns) - score_columns - set(['level', 'n_clusters', 'pmid'])) print(param_columns)Number of clusters and adjusted mutual informationsns.boxplot(x='method', y='n_clusters', hue='method', data=results_df) plt.title('Mean clusters number') plt.xlabel('Method') plt.ylabel('Clusters') plt.savefig(f'results/{OUTPUT_NAME}_mean_clusters_number.png') plt.show() sns.boxplot(x='method', y='adjusted_mutual_info_score', hue='level', data=results_df) plt.title('Mean adjusted mutual information') plt.xlabel('Method') plt.ylabel('AMI') plt.savefig(f'results/{OUTPUT_NAME}_mean_adjusted_mutual_information.png') plt.show()Best scoresbest_df = results_df.sort_values('adjusted_mutual_info_score', ascending=False).drop_duplicates(['method', 'pmid']) sns.boxplot(x='method', y='n_clusters', hue='method', data=best_df) plt.title('Clusters number for best params') plt.xlabel('Method') plt.ylabel('Clusters') plt.savefig(f'results/{OUTPUT_NAME}_best_clusters_number.png') plt.show() sns.boxplot(x='method', y='adjusted_mutual_info_score', hue='level', data=best_df) plt.title('Adjusted mutual information for best params') plt.xlabel('Method') plt.ylabel('AMI') plt.savefig(f'results/{OUTPUT_NAME}_best_adjusted_mutual_information.png') plt.show()Average Scoresdef get_top_parameter_sets_for_method(score_df, param_cols, method, target_col, n=5): return score_df[score_df.method == method].groupby(param_cols)[[target_col, 'n_clusters']].mean().sort_values(by=target_col, ascending=False).head(n).reset_index() def get_top_mean_score_for_method(score_df, param_cols, method, target_col): return score_df[score_df.method == method].groupby(param_cols)[target_col].mean().sort_values(ascending=False).values[0] import numpy as np target_col = 'adjusted_mutual_info_score' tops = [] for method in results_df.method.unique(): top_score = get_top_mean_score_for_method(results_df, param_columns, method, target_col) print(method, ':', target_col, top_score, '\n') top_params_df = get_top_parameter_sets_for_method(results_df, param_columns, method, target_col) display(top_params_df) scores_df = results_df[results_df.method == method].copy() for i, row in top_params_df[param_columns].iterrows(): filters = [True] * len(scores_df) for p in param_columns: filters = np.logical_and(filters, scores_df[p] == row[p]) t = scores_df.loc[filters].copy() t['method'] = method t['top'] = i + 1 tops.append(t) top_df = pd.concat(tops) sns.boxplot(x='method', y='adjusted_mutual_info_score', hue='top', data=top_df) plt.title('Adjusted mutual information') plt.xlabel('Method') plt.ylabel('AMI') plt.savefig(f'results/{OUTPUT_NAME}_top_adjusted_mutual_information.png') plt.show() mean_score_data = [] for method in results_df.method.unique(): method_data = [] for metric in metrics: top_score = get_top_mean_score_for_method(results_df, param_columns, method, metric.__name__) method_data.append(top_score) mean_score_data.append((method, *method_data)) metric_names = [m.__name__ for m in metrics] mean_score_df = pd.DataFrame(mean_score_data, columns=['method', *metric_names]) mean_score_df.head(4) mean_score_df.to_csv(f'results/{OUTPUT_NAME}_mean_scores_per_method.csv', index=False) p = mean_score_df.plot.bar(x='method', y=metric_names) fig = p.get_figure() fig.savefig(f'results/{OUTPUT_NAME}_mean_scores_per_method.png')Best parameters visualizationimport plotly.graph_objects as go categories = ['similarity_bibliographic_coupling', 'similarity_cocitation', 'similarity_citation'] fig = go.Figure() for method in results_df.method.unique(): t = get_top_parameter_sets_for_method(results_df, param_columns, method, target_col) r = (t['similarity_bibliographic_coupling'].values[0], t['similarity_cocitation'].values[0], t['similarity_citation'].values[0]) if method !='lda': fig.add_trace(go.Scatterpolar( r=r, theta=categories, fill='toself', name=method )) fig.update_layout( polar=dict( radialaxis=dict( visible=True, range=[0, 10] )), showlegend=False ) fig.write_image(f'results/{OUTPUT_NAME}_params.png') fig.show()Average Scores for Different Clustering Levelsdef get_top_parameter_sets_for_level_and_method(score_df, param_cols, level, method, target_col, n=5): return score_df[(score_df.method == method) & (score_df.level == level)]\ .groupby(param_cols)[[target_col, 'n_clusters']].mean().sort_values(by=target_col, ascending=False).head(n).reset_index() def get_top_mean_score_for_level_and_method(score_df, param_cols, level, method, target_col): return score_df[(score_df.method == method) & (score_df.level == level)]\ .groupby(param_cols)[target_col].mean().sort_values(ascending=False).values[0] target_col = 'adjusted_mutual_info_score' for level in results_df.level.unique(): tops = [] print(f'LEVEL {level}') for method in results_df.method.unique(): top_score = get_top_mean_score_for_level_and_method(results_df, param_columns, level, method, target_col) print(method, ':', target_col, top_score, '\n') top_params_df = get_top_parameter_sets_for_level_and_method(results_df, param_columns, level, method, target_col) display(top_params_df) top_params_df.to_csv(f'results/{OUTPUT_NAME}_top_params_{method}_{level}.csv', index=False) scores_df = results_df[(results_df.method == method) & (results_df.level == level)].copy() for i, row in top_params_df[param_columns].iterrows(): filters = [True] * len(scores_df) for p in param_columns: filters = np.logical_and(filters, scores_df[p] == row[p]) t = scores_df.loc[filters].copy() t['method'] = method t['top'] = i + 1 tops.append(t) top_df = pd.concat(tops) sns.boxplot(x='method', y='adjusted_mutual_info_score', hue='top', data=top_df) plt.title(f'Adjusted mutual information level {level}') plt.xlabel('Method') plt.ylabel('AMI') plt.savefig(f'results/{OUTPUT_NAME}_level_{level}_top_adjusted_mutual_information.png') plt.show() level_mean_score_data = [] for level in results_df.level.unique(): for method in results_df.method.unique(): method_data = [] for metric in metrics: top_score = get_top_mean_score_for_level_and_method(results_df, param_columns, level, method, metric.__name__) method_data.append(top_score) level_mean_score_data.append((level, method, *method_data)) metric_names = [m.__name__ for m in metrics] level_mean_score_df = pd.DataFrame(level_mean_score_data, columns=['level', 'method', *metric_names]) level_mean_score_df level_mean_score_df.to_csv(f'results/{OUTPUT_NAME}_mean_scores_per_method_and_level.csv', index=False) for level in level_mean_score_df.level.unique(): p = level_mean_score_df[level_mean_score_df.level == level].plot.bar(x='method', y=metric_names, title=f'Level {level}') fig = p.get_figure() fig.savefig(f'results/{OUTPUT_NAME}_mean_scores_per_method_level_{level}.png') import plotly.graph_objects as go categories = ['similarity_bibliographic_coupling', 'similarity_cocitation', 'similarity_citation'] for level in results_df.level.unique(): fig = go.Figure() print(f'LEVEL {level}') for method in results_df.method.unique(): t = get_top_parameter_sets_for_level_and_method(results_df, param_columns, level, method, target_col) r = (t['similarity_bibliographic_coupling'].values[0], t['similarity_cocitation'].values[0], t['similarity_citation'].values[0]) if method !='lda': fig.add_trace(go.Scatterpolar( r=r, theta=categories, fill='toself', name=method )) fig.update_layout( polar=dict( radialaxis=dict( visible=True, range=[0, 10] )), showlegend=False ) fig.write_image(f'results/{OUTPUT_NAME}_params_{level}.png') fig.show() print('Visualization - Done')Transfer learn SqueezeNet to four-class face recognition Make sure the hardware is in ordergpu_info = !nvidia-smi gpu_info = '\n'.join(gpu_info) if gpu_info.find('failed') >= 0: print('Select the Runtime > "Change runtime type" menu to enable a GPU accelerator, ') print('and then re-execute this cell.') else: print(gpu_info)Fri Nov 13 18:03:29 2020 +-----------------------------------------------------------------------------+ | NVIDIA-SMI 455.32.00 Driver Version: 418.67 CUDA Version: 10.1 | |-------------------------------+----------------------+----------------------+ | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | | | | MIG M. | |===============================+======================+======================| | 0 Tesla V100-SXM2... Off | 00000000:00:04.0 Off | 0 | | N/A 34C P0 24W / 300W | 0MiB / 16130MiB | 0% Default | | | | ERR! | +-------------------------------+----------------------+----------------------+ +-------[...]Importsimport time import os import copy import sys import numpy as np import matplotlib.pyplot as plt from tqdm import tqdm import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data import torchvision from torchvision import datasets, models, transforms from google.cloud import storage # Placeholder to make it run until the real WoodNet is defined class WoodNet: pass device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') deviceFetch and extract the data from the storage bucket# Define paths separate from the heavy operations below BASE_PATH = "/content" BLOB_NAME = "faces/balanced_sampled_224px_color_156240_images_70_15_15_split.zip" zipfilename = os.path.join(BASE_PATH, BLOB_NAME) extract_to_dir = os.path.join(BASE_PATH, *BLOB_NAME.split(os.path.sep)[:-1]) # Fetch the data from google.cloud import storage # Make the required directories os.makedirs(os.path.join(BASE_PATH, "faces"), exist_ok=True) os.makedirs(os.path.join(BASE_PATH, "checkpoints"), exist_ok=True) os.makedirs(os.path.join(BASE_PATH, "logs"), exist_ok=True) with open(zipfilename, "wb") as f: storage.Client.create_anonymous_client().download_blob_to_file(f"gs://tdt4173-datasets/{BLOB_NAME}", f) # Extract the data import zipfile with zipfile.ZipFile(zipfilename, 'r') as zip_ref: zip_ref.extractall(extract_to_dir)Load the data into wrapper classes and apply normalizationBATCH_SIZE = 16 data_transforms = transforms.Compose([ transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ]) data_dir = os.path.join(extract_to_dir, "sampled_dataset_balanced_244") image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms) for x in ['train', 'val', 'test']} dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=BATCH_SIZE, shuffle=True, num_workers=4) for x in ['train', 'val', 'test']} dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val', 'test']} class_names = image_datasets['train'].classes print(class_names) print(image_datasets['val'].classes) print(dataset_sizes)['Kjartan', 'Lars', 'Morgan', 'Other'] ['Kjartan', 'Lars', 'Morgan', 'Other'] {'train': 109368, 'val': 23437, 'test': 23436}Create a helper function to aid in image plotting and show a random sample of the input datadef imshow(inp, title=None): """Imshow for Tensor.""" inp = inp.numpy().transpose((1, 2, 0)) mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) inp = std * inp + mean inp = np.clip(inp, 0, 1) plt.imshow(inp) if title is not None: plt.title(title) plt.pause(0.001) # Get a batch of training data inputs, classes = next(iter(dataloaders['val'])) print(inputs.shape) # Make a grid from batch out = torchvision.utils.make_grid(inputs) imshow(out, title=[class_names[x] for x in classes])torch.Size([16, 3, 224, 224])Create a function for training and validationThe following function trains the supplied model with the loss criterion and optimizer supplied, for the specified number of epochs. During training it logs the loss and accuracy for both training and validation. Whenever a better model is found on the validation set, the function saves the model parameters to a file for use for inference later.def train_model(model, criterion, optimizer, num_epochs=25): since = time.time() modelname = f"{type(model).__name__}-{since}" print(f"Training model: `{type(model).__name__}`") best_model_wts = copy.deepcopy(model.state_dict()) best_acc = 0.0 num_img = { "train": 0, "val": 0, } datapoints_per_epoch = 100 imgs_per_datapoint = { "train": int(float(dataset_sizes["train"] / datapoints_per_epoch)), "val": int(float(dataset_sizes["val"] / datapoints_per_epoch)), } for epoch in range(num_epochs): print(f"Epoch {epoch}/{num_epochs - 1}") print("-" * 10) with open(os.path.join(BASE_PATH, f"logs/{modelname}.csv"), "a") as f: # For each epoch we want to both train and evaluate in that order for phase in ["train", "val"]: if phase == "train": # Makes the network ready for training, i.e. the parameters can be tuned # and possible Dropouts are activated model.train() else: # Makes the network ready for inference, i.e. it is not tunable and will # turn off regularization that might interfere with training model.eval() running_loss = 0.0 running_corrects = 0 plot_loss = 0 plot_corrects = 0 # Iterate over training or validation data for inputs, labels in tqdm(dataloaders[phase], desc=f"Epoch: {epoch} ({phase})", file=sys.stdout): inputs = inputs.to(device) labels = labels.to(device) # Reset the gradients before calculating new ones optimizer.zero_grad() # Ask PyTorch to generate computation graph only if in training mode with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) _, preds = torch.max(outputs, 1) loss = criterion(outputs, labels) # Only perform update steps if we're training if phase == 'train': loss.backward() optimizer.step() # Save values for statistics and logging running_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds == labels.data) plot_loss += loss.item() * inputs.size(0) plot_corrects += torch.sum(preds == labels.data) num_img[phase] += BATCH_SIZE if num_img[phase] % imgs_per_datapoint[phase] == 0: f.write(f"{time.time()},{epoch},{phase},\ {num_img[phase]},{plot_loss / float(imgs_per_datapoint[phase])},\ {plot_corrects / float(imgs_per_datapoint[phase])}\n") plot_loss = 0 plot_corrects = 0 epoch_loss = running_loss / dataset_sizes[phase] epoch_acc = running_corrects.double() / dataset_sizes[phase] print(f"{phase} Loss: {epoch_loss:.4f} Acc: {epoch_acc:.4f}") # deep copy the model if phase == "val" and epoch_acc > best_acc: best_acc = epoch_acc best_model_wts = copy.deepcopy(model.state_dict()) torch.save( { "loss": epoch_loss, "acc": epoch_acc, "epoch": epoch, "parameters": best_model_wts, }, os.path.join(BASE_PATH, f"checkpoints/{modelname}.data"), ) print() time_elapsed = time.time() - since print(f"Training complete in {time_elapsed // 60:.0f}m {time_elapsed % 60:.0f}s") print(f"Best val Acc: {best_acc:4f}") # load best model weights model.load_state_dict(best_model_wts) return modelPrepare the home-made CNN – WoodNetBelow is two networks. The first is made by the authors, and is made to be trained from scratch on the training data. The other is fully trained on ImageNet (1000 classes) and fine-tuned on the training data.class WoodNet(nn.Module): size_after_conv = 7 * 7 * 64 def __init__(self): super(WoodNet, self).__init__() self.features = nn.Sequential( nn.Conv2d(3, 32, kernel_size=3, padding=1), nn.MaxPool2d(2), nn.ReLU(), nn.Conv2d(32, 64, kernel_size=3, padding=1), nn.MaxPool2d(2), nn.ReLU(), nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.MaxPool2d(2), nn.ReLU(), nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.MaxPool2d(2), nn.ReLU(), nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.MaxPool2d(2), nn.ReLU(), ) self.classify = nn.Sequential( nn.Linear(self.size_after_conv, 2048), nn.ReLU(), nn.Linear(2048, 1024), nn.ReLU(), nn.Dropout(), nn.Linear(1024, len(class_names)), ) def forward(self, x): x = self.features(x) x = x.view(-1, self.size_after_conv) x = self.classify(x) return x woodnet = WoodNet().to(device) print(woodnet)WoodNet( (features): Sequential( (0): Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (2): ReLU() (3): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (5): ReLU() (6): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (7): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (8): ReLU() (9): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (10): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (11): ReLU() (12): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (13): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (14): ReLU() ) (classify): Sequential( (0): Linear(in_features=3136, out_features=2048, bia[...]Prepare the pretrained CNN – SqueezeNetBelow is the code for loading in the pretrained SqueezeNet. After it is loaded, the last classification layer is replaced with a one with the correct amount of output classes.squeezenet = models.squeezenet1_1(pretrained=True, progress=True) num_ftr = squeezenet.classifier[1].in_channels squeezenet.classifier[1] = nn.Conv2d(num_ftr, len(class_names), 1, 1) squeezenet = squeezenet.to(device) squeezenetTrain the networkBelow is code that instantiates the loss function and optimization method and starts the training.To train every parameter in SqueezeNet, set `train_full_network = True`, and to `False` if only the last layer is to be trained.# network = squeezenet network = woodnet train_full_network = False if train_full_network or isinstance(network, WoodNet): print("Training full network") parameters = network.parameters() else: print("Training only last layer of SqueezeNet") parameters = network.classifier[1].parameters() optimizer = torch.optim.SGD(parameters, lr=0.001, momentum=0.9) loss_function = nn.CrossEntropyLoss() train_model(network, loss_function, optimizer, num_epochs=25)Training full network Epoch 0/24 ---------- Epoch: 0 (train): 100%|██████████| 6836/6836 [04:12<00:00, 27.05it/s] train Loss: 0.5412 Acc: 0.7620 Epoch: 0 (val): 100%|██████████| 1465/1465 [00:51<00:00, 28.48it/s] val Loss: 0.0882 Acc: 0.9692 Epoch 1/24 ---------- Epoch: 1 (train): 100%|██████████| 6836/6836 [04:30<00:00, 25.27it/s] train Loss: 0.0409 Acc: 0.9862 Epoch: 1 (val): 100%|██████████| 1465/1465 [00:53<00:00, 27.41it/s] val Loss: 0.0184 Acc: 0.9937 Epoch 2/24 ---------- Epoch: 2 (train): 100%|██████████| 6836/6836 [04:26<00:00, 25.61it/s] train Loss: 0.0124 Acc: 0.9961 Epoch: 2 (val): 100%|██████████| 1465/1465 [00:50<00:00, 28.98it/s] val Loss: 0.0147 Acc: 0.9950 Epoch 3/24 ---------- Epoch: 3 (train): 100%|██████████| 6836/6836 [04:18<00:00, 26.45it/s] train Loss: 0.0060 Acc: 0.9981 Epoch: 3 (val): 100%|██████████| 1465/1465 [00:51<00:00, 28.18it/s] val Loss: 0.0084 Acc: 0.9975 Epoch 4/24 ---------- Epoch: 4 (train): 17%|█▋ | 1140/6836 [00:43<03:43, 25.53it/s]Upload model weights and training logs to storage# Upload checkpoints to storage client = storage.Client.from_service_account_json("/content/drive/My Drive/## Project/TDT4173 Deep Learning Project-91d3b469375c.json") bucket = client.get_bucket("tdt4173-datasets") blob = bucket.blob("checkpoints/SqueezeNet-1605290736.1277423.data") filename = "/content/checkpoints/SqueezeNet-1605290736.1277423.data" blob.upload_from_filename(filename) blob = bucket.blob("checkpoints/WoodNet-1605294933.5362356.data") filename = "/content/checkpoints/WoodNet-1605294933.5362356.data" blob.upload_from_filename(filename) # Upload logs to storage blob = bucket.blob("logs/SqueezeNet-1605290215.097698.csv") filename = "/content/logs/SqueezeNet-1605290215.097698.csv" blob.upload_from_filename(filename) blob = bucket.blob("logs/SqueezeNet-1605290736.1277423.csv") filename = "/content/logs/SqueezeNet-1605290736.1277423.csv" blob.upload_from_filename(filename) blob = bucket.blob("logs/WoodNet-1605294933.5362356.csv") filename = "/content/logs/WoodNet-1605294933.5362356.csv" blob.upload_from_filename(filename)Visualize the model performance for some imagesdef visualize_model(model, num_images=6): was_training = model.training model.eval() images_so_far = 0 fig = plt.figure() with torch.no_grad(): for i, (inputs, labels) in enumerate(dataloaders['test']): inputs = inputs.to(device) labels = labels.to(device) outputs = model(inputs) _, preds = torch.max(outputs, 1) for j in range(inputs.size()[0]): images_so_far += 1 ax = plt.subplot(num_images//2, 2, images_so_far) ax.axis('off') ax.set_title('predicted: {}'.format(class_names[preds[j]])) imshow(inputs.cpu().data[j]) if images_so_far == num_images: model.train(mode=was_training) return model.train(mode=was_training) visualize_model(squeezenet) import cv2 inputs = [ cv2.imread("/content/lars_1.png", cv2.IMREAD_COLOR), cv2.imread("/content/morgan_1.png", cv2.IMREAD_COLOR), cv2.imread("/content/morgan_2.png", cv2.IMREAD_COLOR), cv2.imread("/content/morgan_3.png", cv2.IMREAD_COLOR), cv2.imread("/content/ingvar_1.png", cv2.IMREAD_COLOR), cv2.imread("/content/dwayne_1.png", cv2.IMREAD_COLOR), cv2.imread("/content/kjartan_2.png", cv2.IMREAD_COLOR), cv2.imread("/content/faces/sampled_dataset_balanced_244/test/Kjartan/kjartan_video_5_9_augmentation_8.jpg", cv2.IMREAD_COLOR), ] for i, inp in enumerate(inputs): inputs[i] = cv2.cvtColor(cv2.resize(inp, (244, 244)), cv2.COLOR_BGR2RGB) def get_prediction_image(img, true_lab=None, plot=False): assert not plot or (plot and true_lab) mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) inp = cv2.resize(img, (224, 224)) / 255.0 inp = inp / std - mean inp = inp.transpose((2, 0, 1)) imgt = torch.Tensor(inp).unsqueeze(0).to(device) out = squeezenet(imgt) probabilities = F.softmax(out, dim=1) prob, class_idx = torch.max(probabilities, dim=1) pred = class_names[class_idx] if plot: plt.imshow(img) plt.text(5, 17, f"Actual : {true_lab}", color="white", fontsize=14) plt.text(5, 34, f"Predicted: {pred}", color="white", fontsize=14) return pred, round(prob.item() * 100, 2), probabilities get_prediction_image(inputs[2], "Morgan", plot=True) plt.imshow(np.concatenate((inputs[1], inputs[2]), axis=1)) (pred, prob), actual = get_prediction_image(inputs[1]), "Morgan" plt.text(5, 17, f"Actual : {actual}", color="white", fontsize=14) plt.text(5, 34, f"Predicted: {pred}", color="white", fontsize=14) plt.text(5, 52, f"[Certainty ({prob}%)]", color="white", fontsize=12) (pred, prob), actual = get_prediction_image(inputs[2]), "Morgan" plt.text(249, 17, f"Actual : {actual}", color="white", fontsize=14) plt.text(249, 34, f"Predicted: {pred}", color="white", fontsize=14) plt.text(249, 52, f"[Certainty ({prob}%)]", color="white", fontsize=12) plt.savefig("morgan_crop_plot.png") plt.show();Load ksent vectors of random genome samples of 16kb#export runs = [ pd.read_pickle("/home/serge/development/fastai-genomic/data/ksent_vectors_baccilium_top3_500_samples_per_fasta.pkl"), pd.read_pickle("/home/serge/development/fastai-genomic/data/ksent_vectors_baccilium_top3_500_samples_per_fasta_run2.pkl"), pd.read_pickle("/home/serge/development/fastai-genomic/data/ksent_vectors_baccilium_1000_samples_per_fasta.pkl"), pd.read_pickle("/home/serge/development/fastai-genomic/data/ksent_vectors_baccilium_1000_samples_per_fasta_run2.pkl") ] df = pd.concat(runs) df = df.sample(frac=1).reset_index() df.head() df.ksent.values.shape from denoiser import denoise ksent = np.vstack(df.ksent.values) v_dn = denoise(ksent) data = [v_dn[i] for i in range(v_dn.shape[0])] d=pd.DataFrame(index=df.index) d["spicies"] = df.spicies.values df["ksent"] = data df.head()Create Dataset#export valid_idx = random.sample(range(df.shape[0]), int(np.floor(df.shape[0]* 0.2))) db = (ItemList.from_df(df,cols="ksent"). split_by_idx(valid_idx). label_from_df(cols="spicies"). databunch())Create Model#export def submodel(dims, bias=False): layer_dims = list(zip(dims[:-1],dims[1:])) fcl = [nn.Linear(*x, bias=bias) for x in layer_dims] [nn.init.xavier_uniform_(m.weight) for m in fcl] if bias: for l in fcl: l.bias.data.normal_(0, 1) relu = [nn.ReLU() for _ in range(len(fcl))] layers = np.asarray(list(zip(fcl, relu))).ravel()[:-1] return nn.Sequential(*layers) #export class Classifier (nn.Module): def __init__(self, encoder_dims, classifier_dims): super().__init__() self.encoder = submodel(encoder_dims,bias=True) self.classifier = submodel(classifier_dims,bias=True) def forward(self, x): x = self.encoder(x) return F.softmax(self.classifier(x), dim=1) def save_encoder(self,file:PathOrStr): torch.save(self.encoder.state_dict(), path) def save_model(self, file:PathOrStr, epoch): torch.save({ 'epoch': epoch, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'loss': loss}, file) model = Classifier([100,50,3,2], [2,20,3]).double() modelLearnerlearn = Learner(db, model,metrics=[accuracy]) learn.loss_func learn.lr_find() learn.recorder.plot() learn.fit_one_cycle(50,1e-2) learn.recorder.plot_metrics() interpretation = learn.interpret() interpretation.plot_confusion_matrix()Main Machine Learning LibrariesNotebook inspired from : https://www.geeksforgeeks.org/best-python-libraries-for-machine-learning/ NumpyLarge mult-dimensional array and matrix processing. Used internally by TensorFlow and other high level libraries.# Python program using NumPy # for some basic mathematical # operations import numpy as np # Creating two arrays of rank 2 x = np.array([[1, 2], [3, 4]]) y = np.array([[5, 6], [7, 8]]) # Creating two arrays of rank 1 v = np.array([9, 10]) w = np.array([11, 12]) # Inner product of vectors print(np.dot(v, w), "\n") # Matrix and Vector product print(np.dot(x, v), "\n") # Matrix and matrix product print(np.dot(x, y))ScipyContains different modules for optimization, linear algebra, integration and statistics.# As an example, create a Voronoi diagram from twenty random points: # Source: https://phoenixnap.com/kb/scipy-tutorial from scipy.spatial import Voronoi import numpy as np points = np.random.rand(20,2) voronoi = Voronoi(points) from scipy.spatial import voronoi_plot_2d fig = voronoi_plot_2d(voronoi,show_vertices=False)Scikit-LearnSkikit-learn is one of the most popular ML libraries for classical ML algorithms. It is built on top of two basic Python libraries, viz., NumPy and SciPy. Scikit-learn supports most of the supervised and unsupervised learning algorithms. Scikit-learn can also be used for data-mining and data-analysis, which makes it a great tool who is starting out with ML.# Python script using Scikit-learn # for Decision Tree Classifier # Sample Decision Tree Classifier from sklearn import datasets from sklearn import metrics from sklearn.tree import DecisionTreeClassifier # load the iris datasets dataset = datasets.load_iris() # fit a CART model to the data model = DecisionTreeClassifier() model.fit(dataset.data, dataset.target) print(model) # make predictions expected = dataset.target predicted = model.predict(dataset.data) # summarize the fit of the model print(metrics.classification_report(expected, predicted)) print(metrics.confusion_matrix(expected, predicted))Семинар 3 Линейная задача наименьших квадратов, решение линейных систем и QR разложение Кластеризация и матричные разложения- Задача кластеризации относится к классу задач обучения без учителя, в которой требуется разбить наборы объектов на группы, в которых содержатся похожие объектыКартинка из [этого](https://towardsdatascience.com/semantic-similarity-classifier-and-clustering-sentences-based-on-semantic-similarity-a5a564e22304) поста.- Отличие от классификации - меток объектов нет- Классический алгоритм решения такой задачи называется *k-means* или метод $k$-средних $k$-means- Выбираем число кластеров $k$- Выбираем так называемые центры клатеров- Повторяем до сходимости - Размечаем объекты на основании того, к какому центру они ближе - Обновляем центр как среднее значение точек из каждого кластера- Подробности будут в курсе по машинному обучению Визуализация Причём тут матричные разложения?- Пусть дано $N$ объектов, каждый из которых описывается $n$ числами.- Данные $N$ объектов уложены по столбцам в матрицу $X$- **Утверждение.** Алгоритм $k$-means решает следующую задачу оптимизации$$ \| X - ZC\|^2_F \to \min, $$ где $X \in \mathbb{R}^{n \times N}$, $Z \in \mathbb{R}^{n \times k}$ и $C$ размера $k \times N$- Матрица $C$ обладает следующим свойством$$ C_{ij} = \begin{cases} 1, & x_j \in \mathcal{C}_i \\ 0, & \text{иначе}, \end{cases} $$где $\mathcal{C}_i$ обозначает $i$-ый кластер, а $x_j$ – $j$-ый объект.- В каждом столбце матрицы $C$ ровно одна 1**Q:** что такое матрица $Z$?**Q:** какая интерпретация у столбцов матрицы $X - ZC$? Постановка линейной задачи наименьших квадратов: напоминание- Дана переопределённая система линейных уравнений с матрицей $A$, в которой строк больше, чем столбцов, и правой частью $b$- Задача решения такой системы в смысле наименьших квадратов записывается в виде$$\Vert A x - b \Vert_2 \rightarrow \min_{x}$$- Нормальное уравнение$$ A^* A x = A^* b $$ Решение и метод вычисления- Псевдообратная матрица $$A^{\dagger} = \lim_{\alpha \rightarrow 0}(\alpha I + A^* A)^{-1} A^*$$ - Решение записывается как $$ x = A^{\dagger}b $$- Вычисление псевдообратной матрицы из SVD Пусть $A = U \Sigma V^*$ SVD матрицы $A$. Тогда, $$A^{\dagger} = V \Sigma^{\dagger} U^*,$$ где $\Sigma^{\dagger}$ состоит из обращённых ненулевых сингулярных чисел матрицы $A$.- Решение вычисляется с помощью QR разложения матрицы $A$$$Rx = Q^* b.$$ Сравнение методов решения нормального уравнения- Метод Гаусса- QR разложениеimport numpy as np import scipy.linalg as splin def lsqr_solver(A, b): Q, R = np.linalg.qr(A) return splin.solve_triangular(R, Q.T @ b, lower=False) # return np.linalg.solve(R, Q.T @ b) def ls_gramm_solver(A, b): gram_mat = A.T @ A return np.linalg.solve(gram_mat, A.T @ b) n = 1000 m = 10*n A = np.random.randn(m, n) x_true = np.random.randn(n) b = A @ x_true + 0.001 * np.random.randn(m) x = lsqr_solver(A, b) print(np.linalg.norm(A @ x - b)) x = ls_gramm_solver(A, b) print(np.linalg.norm(A @ x - b)) dim_range = [10**i for i in range(1, 4)] time_qr_range = [] time_gram_range = [] for n in dim_range: m = 10*n A = np.random.randn(m, n) x_true = np.random.randn(n) b = A @ x_true + 0.01 * np.random.randn(m) t = %timeit -o -q -r 1 lsqr_solver(A, b) time_qr_range.append(t.best) t = %timeit -o -q -r 1 ls_gramm_solver(A, b) time_gram_range.append(t.best) import matplotlib.pyplot as plt %matplotlib inline plt.plot(dim_range, time_qr_range, label="QR") plt.plot(dim_range, time_gram_range, label="Gram matrix") plt.legend(fontsize=20) plt.xlabel("Dimension", fontsize=20) plt.ylabel("Time", fontsize=20) plt.yscale("log") plt.xscale("log")Свойства псевдообратной матрицы- $A = AA^{\dagger}A$- $A^{\dagger} = A^{\dagger}A A^{\dagger}$ - Количество транзизисторов на интегральной схеме удваивается каждые полтора-два годаimport pandas as pd year_transistor = { 1971: 2250, 1972: 2500, 1974: 5000, 1978: 29000, 1982: 120000, 1985: 275000, 1989: 1180000, 1993: 3100000, 1997: 7500000, 1999: 24000000, 2000: 42000000, 2002: 220000000, 2003: 410000000 } data = pd.DataFrame([year_transistor]) data data = pd.DataFrame(list(year_transistor.items()), columns=["Year", "N"]) dataМодель$$ \log_{10} N \approx w_1 + w_2 (t - 1970)$$- Логарифмирование – очень полезный приём преобразования данных, масштабы которых сильно отличаютсяy = np.log2(data["N"]) x = data["Year"] - 1970 X = np.ones((13, 2)) X[:, 1] = x print(X) print(y) w = np.linalg.lstsq(X, y, rcond=None)[0] print(np.linalg.norm(X @ w - y)) print(w) plt.scatter(data["Year"], y) plt.plot(data["Year"], X @ w) plt.xlabel("Year", fontsize=20) plt.ylabel("# transistors", fontsize=20)Предсказание на 2015 год- Микропроцессор IBM Z13 содержит $4 \cdot 10^9$ транзисторовyear = 2015 pred_given_year = w[0] + w[1]* (year - 1970) print(pred_given_year) print(np.log10(4e9))10.05641072680136 9.602059991327963Модель авторегрессии в прогнозировании временных рядов- Дан временной ряд, например температура (и другие данные из метеорологии) в разное время, цены активов и прочее- Почти всегда временной ряд – это нелинейная функция.- Зачастую она периодична- Простейшая модель – авторегрессионная (AR)- Идея в прогнозировании $k+1$ значения по последним $M$- Модель остаётся линейной!$$\hat{x}_{k+1} = w_1 x_k + \ldots + w_M x_{k - M + 1} $$- Целевая функция – сумма квадратов невязок (опять!) $$ (x_{k+1} - \hat{x}_{k+1})^2 + \ldots + (x_T - \hat{x}_T)^2 \to \min_w $$- Как представить эту задачу в виде $\|Xw - y \|^2_2 \to \min_w$ ?- Каким свойством обладает матрица $X$?import pandas as pd series = pd.read_csv('./daily-min-temperatures.csv', header=0, index_col=0) print(series.head()) series.plot() x = series["Temp"] print(x.shape) train_x = x[:x.shape[0] // 2] test_x = x[x.shape[0] // 2:] train_x.plot() mean_forecast = train_x.mean() print(np.linalg.norm(train_x - mean_forecast)) prev_forecast = train_x[:-1].values # print(prev_forecast) print(np.linalg.norm((train_x[1:].values - prev_forecast))) M = 3 X = splin.toeplitz(train_x[M-1:-1], train_x[:M][::-1]) y = train_x[M:] print(X.shape, y.shape) w = np.linalg.lstsq(X, y, rcond=None)[0] print(np.linalg.norm(X @ w - y)) print(w) import matplotlib.pyplot as plt %matplotlib inline plt.plot(train_x.values, label="True") plt.plot(X @ w, label="Predict") plt.legend() test_X = splin.toeplitz(test_x[M-1:-1], test_x[:M][::-1]) test_pred = test_X @ w print(np.linalg.norm(test_pred - test_x[M-1:-1])) plt.plot(test_x.values, label="True") plt.plot(test_pred, label="Predict") plt.legend()Полиномиальная регрессияnum_points = 10 x = np.linspace(-2, 2, num=num_points) y = 5 - x - 10 * x**2 + x**3 + x**4 y_noise = y + np.random.randn() plt.scatter(x, y_noise) deg = 4 V = np.vander(x, deg+1) print(V) w = np.linalg.lstsq(V, y_noise, rcond=None)[0] print(w) x_test = np.linspace(-3, 3, num=20) y_pred = np.vander(x_test, deg+1) @ w y_test_true = 5 - x_test - 10 * x_test**2 + x_test**3 + x_test**4 print(np.linalg.norm(y_pred - y_test_true)) plt.figure(figsize=(8, 6)) plt.plot(x_test, y_pred, label="Predicted") plt.plot(x, y_noise, label="Train") plt.plot(x_test, y_test_true, label="Test") plt.legend(fontsize=20)Томография сетей - Дана сеть из $n$ рёбер- Каждое ребро вносит некоторую задержку $d_i$- Чтобы выяснить характеристики сети, то есть найти вектор $d$, по сети запускают большое число сигналов по различным маршрутам и измеряют время прохождения сигнала $t_i$, $i=1,\ldots, N$ по известному маршруту- Маршрут описывается бинарной матрицей $P$ размера $N \times n$ такой что$$p_{ij} = \begin{cases} 1, & j \in \mathcal{P}_i\\ 0, & \text{иначе}, \end{cases} $$где $\mathcal{P}_i$ – путь $i$.- Необходимо по матрице $P$ и вектору $t$ определить вектор $d$, описывающий сеть**Приложения**- Дорожные сети- Компьютерные сети Решение линейных систем, обратная матрица Напоминание основных фактов- Линейная система имеет вид$$ Ax = b,$$с квадратной $n \times n$ матрицей $A$- Для любой матрицы существуют PLU разложение$$ A = PLU, $$$P$ – матрица перестановки, $L$ – нижнетреугольная матрица, $U$ – верхнетреугольная матрица.- Сложность $\mathcal{O}(n^3)$- Если $A \succ 0$, то матрица раскладывается на факторы $A = LL^*$ – разложение Холецкого - Обратная матрица $A^{-1}$ существует для невырожденных матриц и $AA^{-1} = A^{-1}A = I$- Тогда $x = A^{-1}b$ или $x = U^{-1}L^{-1}P^*b$ Push-through identity- Дано $A \in \mathbb{R}^{m \times n}$, $B \in \mathbb{R}^{n \times m}$ и $I + AB$ обратима- Покажем, что и $I + BA$ обратима- Также покажем, что $(I + BA)^{-1}B = B(I + AB)^{-1}$ Формула Шермана-Морисона-Вудбери- $(I+P)^{-1} = I - (I+P)^{-1}P = I - P(I+P)^{-1}$- Ранее доказанное утверждение $(I + BA)^{-1}B = B(I + AB)^{-1}$ - Применим к $(I + UV)^{-1} = I - UV(I + UV)^{-1} = I - U(I + VU)^{-1}V$- Формула Шермана-Морисона-Вудбери$$(A + UCV)^{-1} = A^{-1} - A^{-1}U (C^{-1} + VA^{-1}U)^{-1}VA^{-1} $$ Моделирование миграции населения- Пусть $x_t \in \mathbb{R}^{100}$ распределение населения по возрастам в год $t$- Динамика этого распределения описывается во времени как $$ x_{t+1} = Ax_t + u $$- Что такое $u$?- Какой вид имеет матрица $A$ ? Определение миграционной политики- Дан вектор $x_1$, матрица $A$ и желаемое распределение $x_T = x_d$- Необходимо найти такой вектор $u$, чтобы желаемое распределение было достигнуто Определение координат по дополнительным измерениям- Дано положение 4 объектов в пространстве $a_1$, $a_2$, $a_3$ и $a_4$- Известны расстояния между ними и искомым объектом $x$: $r_1$, $r_2$, $r_3$ и $r_4$- Как определить положение $x$? Выигрыш от LU факторизации при решении систем с многими правыми частями- Дан набор систем $Ax = b_1$, $Ax = b_2, \ldots$- Посмотрим на выигрыш в скорости при использовании однократной факторизации матрицы $A$n = 1000 time_factor = [] time_full = [] A = np.random.randn(n, n) t = %timeit -o -q -r 5 splin.lu_factor(A) factor_time = t.best lu = splin.lu_factor(A) num_rhs = 10 for i in range(num_rhs): x_true = np.random.randn(n) b = A @ x_true t = %timeit -o -q np.linalg.solve(A, b) time_full.append(t.best) t = %timeit -o -q splin.lu_solve(lu, b) time_factor.append(t.best) plt.plot(np.arange(1, num_rhs+1), factor_time + np.cumsum(np.array(time_factor)), label="Factor") plt.plot(np.arange(1, num_rhs+1), np.cumsum(np.array(time_full)), label="Full") plt.legend(fontsize=20) plt.xlabel("Number of rhs", fontsize=20) plt.ylabel("Time", fontsize=20) plt.yscale("log")TensorFlow2: Training Loop. ![gradient](../images/gradient_descent.png) Although Keras is suitable for the vast majority of use cases, in the following scenarios, it may make sense to forgo `model.fit()` to manually define a training loop:- Maintaining legacy code and retraining old models.- Custom batch/ epoch operations like gradients and backpropagation. Even then, PyTorch may be a better fit for customization.> Disclaimer; This notebook demonstrates how to manually define a training loop for queued tuning of a binary classification model. However, it is only included to prove that AIQC technically supports TensorFlow out-of-the-box with `analysis_type='keras'`, and to demonstrate how expert practicioners to do continue to use their favorite tools. We neither claim to be experts on the inner-workings of TensorFlow, nor do we intend to troubleshoot advanced methodologies for users that are in over their heads.Reference this repository for more TensorFlow cookbooks: > https://github.com/IvanBongiorni/TensorFlow2.0_Notebooksimport tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Dropout from sklearn.preprocessing import LabelBinarizer, PowerTransformer import aiqc from aiqc import datum--- Example Data Reference [Example Datasets](example_datasets.ipynb) for more information.df = datum.to_pandas('sonar.csv') df.head()--- a) High-Level API Reference [High-Level API Docs](api_high_level.ipynb) for more information including how to work with non-tabular data.splitset = aiqc.Pipeline.Tabular.make( df_or_path = df , dtype = None , feature_cols_excluded = 'object' , feature_interpolaters = None , feature_window = None , feature_encoders = dict( sklearn_preprocess = PowerTransformer(method='yeo-johnson', copy=False) , dtypes = ['float64'] ) , feature_reshape_indices = None , label_column = 'object' , label_interpolater = None , label_encoder = dict(sklearn_preprocess = LabelBinarizer(sparse_output=False)) , size_test = 0.12 , size_validation = 0.22 , fold_count = None , bin_count = None ) def fn_build(features_shape, label_shape, **hp): model = Sequential(name='Sonar') model.add(Dense(hp['neuron_count'], activation='relu', kernel_initializer='he_uniform')) model.add(Dropout(0.30)) model.add(Dense(hp['neuron_count'], activation='relu', kernel_initializer='he_uniform')) model.add(Dropout(0.30)) model.add(Dense(hp['neuron_count'], activation='relu', kernel_initializer='he_uniform')) model.add(Dense(units=label_shape[0], activation='sigmoid', kernel_initializer='glorot_uniform')) return model def fn_lose(**hp): loser = tf.losses.BinaryCrossentropy() return loser def fn_optimize(**hp): optimizer = tf.optimizers.Adamax() return optimizer def fn_train(model, loser, optimizer, samples_train, samples_evaluate, **hp): batched_train_features, batched_train_labels = aiqc.tf_batcher( features = samples_train['features'] , labels = samples_train['labels'] , batch_size = 5 ) # Still necessary for saving entire model. model.compile(loss=loser, optimizer=optimizer) ## --- Metrics --- acc = tf.metrics.BinaryAccuracy() # Mirrors `keras.model.History.history` object. history = { 'loss':list(), 'accuracy': list(), 'val_loss':list(), 'val_accuracy':list() } ## --- Training loop --- for epoch in range(hp['epochs']): # --- Batch training --- for i, batch in enumerate(batched_train_features): with tf.GradientTape() as tape: batch_loss = loser( batched_train_labels[i], model(batched_train_features[i]) ) # Update weights based on the gradient of the loss function. gradients = tape.gradient(batch_loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) ## --- Epoch metrics --- # Overall performance on training data. train_probability = model.predict(samples_train['features']) train_loss = loser(samples_train['labels'], train_probability) train_acc = acc(samples_train['labels'], train_probability) history['loss'].append(float(train_loss)) history['accuracy'].append(float(train_acc)) # Performance on evaluation data. eval_probability = model.predict(samples_evaluate['features']) eval_loss = loser(samples_evaluate['labels'], eval_probability) eval_acc = acc(samples_evaluate['labels'], eval_probability) history['val_loss'].append(float(eval_loss)) history['val_accuracy'].append(float(eval_acc)) # Attach history to the model so we can return a single object. model.history.history = history return model hyperparameters = { "neuron_count": [25, 50] , "epochs": [75, 150] } queue = aiqc.Experiment.make( library = "keras" , analysis_type = "classification_binary" , fn_build = fn_build , fn_train = fn_train , fn_lose = fn_lose , fn_optimize = fn_optimize , splitset_id = splitset.id , repeat_count = 1 , hide_test = False , hyperparameters = hyperparameters , fn_predict = None #automated , foldset_id = None ) queue.run_jobs()🔮 Training Models 🔮: 100%|██████████████████████████████████████████| 4/4 [02:18<00:00, 34.72s/it]For more information on visualization of performance metrics, reference the [Visualization & Metrics](visualization.html) documentation. --- b) Low-Level API Reference [Low-Level API Docs](api_high_level.ipynb) for more information including how to work with non-tabular data and defining optimizers.dataset = aiqc.Dataset.Tabular.from_pandas(df) label_column = 'object' label = dataset.make_label(columns=[label_column]) labelcoder = label.make_labelcoder( sklearn_preprocess = LabelBinarizer(sparse_output=False) ) feature = dataset.make_feature(exclude_columns=[label_column]) encoderset = feature.make_encoderset() featurecoder_0 = encoderset.make_featurecoder( sklearn_preprocess = PowerTransformer(method='yeo-johnson', copy=False) , dtypes = ['float64'] ) splitset = aiqc.Splitset.make( feature_ids = [feature.id] , label_id = label.id , size_test = 0.22 , size_validation = 0.12 ) def fn_build(features_shape, label_shape, **hp): model = Sequential(name='Sonar') model.add(Dense(hp['neuron_count'], activation='relu', kernel_initializer='he_uniform')) model.add(Dropout(0.30)) model.add(Dense(hp['neuron_count'], activation='relu', kernel_initializer='he_uniform')) model.add(Dropout(0.30)) model.add(Dense(hp['neuron_count'], activation='relu', kernel_initializer='he_uniform')) model.add(Dense(units=label_shape[0], activation='sigmoid', kernel_initializer='glorot_uniform')) return model def fn_lose(**hp): loser = tf.losses.BinaryCrossentropy() return loser def fn_optimize(**hp): optimizer = tf.optimizers.Adamax() return optimizer def fn_train(model, loser, optimizer, samples_train, samples_evaluate, **hp): batched_train_features, batched_train_labels = aiqc.tf_batcher( features = samples_train['features'] , labels = samples_train['labels'] , batch_size = 5 ) # Still necessary for saving entire model. model.compile(loss=loser, optimizer=optimizer) ## --- Metrics --- acc = tf.metrics.BinaryAccuracy() # Mirrors `keras.model.History.history` object. history = { 'loss':list(), 'accuracy': list(), 'val_loss':list(), 'val_accuracy':list() } ## --- Training loop --- for epoch in range(hp['epochs']): # --- Batch training --- for i, batch in enumerate(batched_train_features): with tf.GradientTape() as tape: batch_loss = loser( batched_train_labels[i], model(batched_train_features[i]) ) # Update weights based on the gradient of the loss function. gradients = tape.gradient(batch_loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) ## --- Epoch metrics --- # Overall performance on training data. train_probability = model.predict(samples_train['features']) train_loss = loser(samples_train['labels'], train_probability) train_acc = acc(samples_train['labels'], train_probability) history['loss'].append(float(train_loss)) history['accuracy'].append(float(train_acc)) # Performance on evaluation data. eval_probability = model.predict(samples_evaluate['features']) eval_loss = loser(samples_evaluate['labels'], eval_probability) eval_acc = acc(samples_evaluate['labels'], eval_probability) history['val_loss'].append(float(eval_loss)) history['val_accuracy'].append(float(eval_acc)) # Attach history to the model so we can return a single object. model.history.history = history return model algorithm = aiqc.Algorithm.make( library = "keras" , analysis_type = "classification_binary" , fn_build = fn_build , fn_train = fn_train , fn_lose = fn_lose , fn_optimize = fn_optimize ) hyperparameters = { "neuron_count": [25, 50] , "epochs": [75, 150] } hyperparameters = { "neuron_count": [25, 50] , "epochs": [75, 150] } hyperparamset = algorithm.make_hyperparamset( hyperparameters = hyperparameters ) queue = algorithm.make_queue( splitset_id = splitset.id , hyperparamset_id = hyperparamset.id , repeat_count = 2 ) queue.run_jobs()🔮 Training Models 🔮: 100%|██████████████████████████████████████████| 8/8 [04:25<00:00, 33.17s/it]1. Import libraries#----------------------------Reproducible---------------------------------------------------------------------------------------- import numpy as np import random as rn import os seed=0 os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) rn.seed(seed) #----------------------------Reproducible---------------------------------------------------------------------------------------- os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' #-------------------------------------------------------------------------------------------------------------------------------- import matplotlib import matplotlib.pyplot as plt import matplotlib.cm as cm %matplotlib inline matplotlib.style.use('ggplot') import random import scipy.sparse as sparse import scipy.io from keras.utils import to_categorical from sklearn.ensemble import ExtraTreesClassifier from sklearn.model_selection import cross_val_score from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score import scipy.io from skfeature.function.similarity_based import SPEC from sklearn.impute import SimpleImputer import time import pandas as pd import os from skimage import io from PIL import Image #-------------------------------------------------------------------------------------------------------------------------------- def ETree(p_train_feature,p_train_label,p_test_feature,p_test_label,p_seed): clf = ExtraTreesClassifier(n_estimators=50, random_state=p_seed) # Training clf.fit(p_train_feature, p_train_label) # Training accuracy print('Training accuracy:',clf.score(p_train_feature, np.array(p_train_label))) print('Training accuracy:',accuracy_score(np.array(p_train_label),clf.predict(p_train_feature))) #print('Training accuracy:',np.sum(clf.predict(p_train_feature)==np.array(p_train_label))/p_train_label.shape[0]) # Testing accuracy print('Testing accuracy:',clf.score(p_test_feature, np.array(p_test_label))) print('Testing accuracy:',accuracy_score(np.array(p_test_label),clf.predict(p_test_feature))) #print('Testing accuracy:',np.sum(clf.predict(p_test_feature)==np.array(p_test_label))/p_test_label.shape[0]) #-------------------------------------------------------------------------------------------------------------------------------- def write_to_csv(p_data,p_path): dataframe = pd.DataFrame(p_data) dataframe.to_csv(p_path, mode='a',header=False,index=False,sep=',')2. Loading datadataset_path='./Dataset/COIL-20/' samples={} for dirpath, dirnames, filenames in os.walk(dataset_path): #print(dirpath) #print(dirnames) #print(filenames) dirnames.sort() filenames.sort() for filename in [f for f in filenames if f.endswith(".png") and not f.find('checkpoint')>0]: full_path = os.path.join(dirpath, filename) file_identifier=filename.split('__')[0][3:] if file_identifier not in samples.keys(): samples[file_identifier] = [] # Direct read #image = io.imread(full_path) # Resize read image_=Image.open(full_path).resize((20, 20),Image.ANTIALIAS) image=np.asarray(image_) samples[file_identifier].append(image) #plt.imshow(samples['1'][0].reshape(20,20)) data_arr_list=[] label_arr_list=[] for key_i in samples.keys(): key_i_for_label=[int(key_i)-1] data_arr_list.append(np.array(samples[key_i])) label_arr_list.append(np.array(72*key_i_for_label)) data_arr=np.concatenate(data_arr_list).reshape(1440, 20*20).astype('float32') / 255. label_arr_onehot=np.concatenate(label_arr_list)#to_categorical(np.concatenate(label_arr_list)) C_train_x,C_test_x,C_train_y,C_test_y= train_test_split(data_arr,label_arr_onehot,test_size=0.2,random_state=seed) x_train,x_validate,y_train_onehot,y_validate_onehot= train_test_split(C_train_x,C_train_y,test_size=0.1,random_state=seed) x_test=C_test_x y_test_onehot=C_test_y print('Shape of x_train: ' + str(x_train.shape)) print('Shape of x_validate: ' + str(x_validate.shape)) print('Shape of x_test: ' + str(x_test.shape)) print('Shape of y_train: ' + str(y_train_onehot.shape)) print('Shape of y_validate: ' + str(y_validate_onehot.shape)) print('Shape of y_test: ' + str(y_test_onehot.shape)) print('Shape of C_train_x: ' + str(C_train_x.shape)) print('Shape of C_train_y: ' + str(C_train_y.shape)) print('Shape of C_test_x: ' + str(C_test_x.shape)) print('Shape of C_test_y: ' + str(C_test_y.shape)) key_feture_number=503. Classifying 1 Extra Treestrain_feature=C_train_x train_label=C_train_y test_feature=C_test_x test_label=C_test_y print('Shape of train_feature: ' + str(train_feature.shape)) print('Shape of train_label: ' + str(train_label.shape)) print('Shape of test_feature: ' + str(test_feature.shape)) print('Shape of test_label: ' + str(test_label.shape)) p_seed=seed ETree(train_feature,train_label,test_feature,test_label,p_seed)Shape of train_feature: (1152, 400) Shape of train_label: (1152,) Shape of test_feature: (288, 400) Shape of test_label: (288,) Training accuracy: 1.0 Training accuracy: 1.0 Testing accuracy: 1.0 Testing accuracy: 1.04. Modelstart = time.clock() # construct affinity matrix kwargs = {'style': 0} # obtain the scores of features, and sort the feature scores in an ascending order according to the feature scores train_score = SPEC.spec(train_feature, **kwargs) train_idx = SPEC.feature_ranking(train_score, **kwargs) # obtain the dataset on the selected features train_selected_x = train_feature[:, train_idx[0:key_feture_number]] print("train_selected_x",train_selected_x.shape) # obtain the scores of features, and sort the feature scores in an ascending order according to the feature scores test_score = SPEC.spec(test_feature, **kwargs) test_idx = SPEC.feature_ranking(test_score, **kwargs) # obtain the dataset on the selected features test_selected_x = test_feature[:, test_idx[0:key_feture_number]] print("test_selected_x",test_selected_x.shape) time_cost=time.clock() - start write_to_csv(np.array([time_cost]),"./log/SPEC_time"+str(key_feture_number)+".csv") C_train_selected_x=train_selected_x C_test_selected_x=test_selected_x C_train_selected_y=C_train_y C_test_selected_y=C_test_y print('Shape of C_train_selected_x: ' + str(C_train_selected_x.shape)) print('Shape of C_test_selected_x: ' + str(C_test_selected_x.shape)) print('Shape of C_train_selected_y: ' + str(C_train_selected_y.shape)) print('Shape of C_test_selected_y: ' + str(C_test_selected_y.shape))Shape of C_train_selected_x: (1152, 50) Shape of C_test_selected_x: (288, 50) Shape of C_train_selected_y: (1152,) Shape of C_test_selected_y: (288,)5. Classifying 2 Extra Treestrain_feature=C_train_selected_x train_label=C_train_y test_feature=C_test_selected_x test_label=C_test_y print('Shape of train_feature: ' + str(train_feature.shape)) print('Shape of train_label: ' + str(train_label.shape)) print('Shape of test_feature: ' + str(test_feature.shape)) print('Shape of test_label: ' + str(test_label.shape)) p_seed=seed ETree(train_feature,train_label,test_feature,test_label,p_seed)Shape of train_feature: (1152, 50) Shape of train_label: (1152,) Shape of test_feature: (288, 50) Shape of test_label: (288,) Training accuracy: 1.0 Training accuracy: 1.0 Testing accuracy: 0.14930555555555555 Testing accuracy: 0.149305555555555556. Reconstruction lossfrom sklearn.linear_model import LinearRegression def mse_check(train, test): LR = LinearRegression(n_jobs = -1) LR.fit(train[0], train[1]) MSELR = ((LR.predict(test[0]) - test[1]) ** 2).mean() return MSELR train_feature_tuple=(C_train_selected_x,C_train_x) test_feature_tuple=(C_test_selected_x,C_test_x) reconstruction_loss=mse_check(train_feature_tuple, test_feature_tuple) print(reconstruction_loss)0.412713776 Deploy an image classification model in Azure Container Instance (ACI)![](https://github.com/ashleymcnamara/Developer-Advocate-Bit/blob/master/bit_ai.png?raw=true)Photo credit to the talented Let's Deploy a Computer Vision model to the Cloud using the Azure Machine Learning Service to make this simple we will revisit the MNIST dataset from the third NotebookBased on the [Azure ML Documentation](https://docs.microsoft.com/en-us/azure/machine-learning/service/tutorial-train-models-with-aml?WT.mc_id=cvworkshop-github-abornst)In this notebook you will learn how to:> * Set up your testing environment> * Retrieve the model from your workspace> * Test the model locally> * Deploy the model to ACI> * Test the deployed modelACI is not ideal for production deployments, but it is great for testing and understanding the workflow. For scalable production deployments, consider using AKS. Login and Authenticate with Azure Generate credientials file becareful with this anyone who has acess to this file has acess to your account.!python3 -m azure.cli login !python3 -m azure.cli ad sp create-for-rbac --sdk-auth > mycredentials.json !export AZURE_AUTH_LOCATION='mycredentials.json' import os, json with open('mycredentials.json') as data_file: azure_credentials = json.load(data_file) # delete credentials file os.remove("mycredentials.json")What is an Azure ML Workspace and why do I need one?An AML Workspace is an Azure resource that organizes and coordinates the actions of many other Azure resources to assist in executing and sharing machine learning workflows. In particular, an AML Workspace coordinates storage, databases, and compute resources providing added functionality for machine learning experimentation, operationalization, and the monitoring of operationalized models.CVWorkshop_RG = 'cvdemo_rg' CVWorkshop_LOC = 'eastus2' CVWorkshop_WS = 'cv_ws'Set up your Azure Machine Learning workspace Option 1: You have workspace alreadyIf you ran the Azure Machine Learning [quickstart](https://docs.microsoft.com/en-us/azure/machine-learning/service/quickstart-get-started) in Azure Notebooks, you already have a configured workspace! You can go to your Azure Machine Learning Getting Started library, view *config.json* file, and copy-paste the values for subscription ID, resource group and workspace name below.If you have a workspace created another way, [these instructions](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-environmentcreate-workspace-configuration-file) describe how to get your subscription and workspace information.If this cell succeeds, you're done configuring this library! Otherwise continue to follow the instructions in the rest of the notebook.import os subscription_id = os.environ.get("SUBSCRIPTION_ID", azure_credentials['subscriptionId']) resource_group = os.environ.get("RESOURCE_GROUP", CVWorkshop_RG) workspace_name = os.environ.get("WORKSPACE_NAME", CVWorkshop_WS) from azureml.core import Workspace try: ws = Workspace(subscription_id = subscription_id, resource_group = resource_group, workspace_name = workspace_name) ws.write_config() print('Workspace configuration succeeded. You are all set!') except: print('Workspace not found. Run the cells below.')Wrote the config file config.json to: /data/aml_config/config.json Workspace configuration succeeded. You are all set!Option 2: You don't have workspace yet RequirementsInside your Azure subscription, you will need access to a _resource group_, which organizes Azure resources and provides a default region for the resources in a group. You can see what resource groups to which you have access, or create a new one in the [Azure portal](https://portal.azure.com). If you don't have a resource group, the create workspace command will create one for you using the name you provide.To create or access an Azure ML Workspace, you will need to import the AML library and the following information:* A name for your workspace* Your subscription id* The resource group name Supported Azure RegionsSpecify a region where your workspace will be located from the list of [Azure Machine Learning regions](https://azure.microsoft.com/en-us/global-infrastructure/services/)# import the Workspace class and check the azureml SDK version from azureml.core import Workspace ws = Workspace.create(name = CVWorkshop_WS, subscription_id = azure_credentials['subscriptionId'], resource_group = CVWorkshop_RG, location = CVWorkshop_LOC, create_resource_group = True, exist_ok = True) ws.get_details() ws.write_config()Register a modelfrom azureml.core import Workspace ws = Workspace.from_config() from azureml.core.model import Model model_name = "sklearn_mnist" model = Model.register(model_path="models/sklearn_mnist_model.pkl", model_name=model_name, tags={"data": "mnist", "model": "classification"}, description="Mnist handwriting recognition", workspace=ws) # download test data import os import urllib.request os.makedirs('./data', exist_ok=True) urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', filename='./data/test-images.gz') urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz', filename='./data/test-labels.gz')Found the config file in: /data/aml_config/config.json Registering model sklearn_mnistSet up the environmentStart by setting up a testing environment. Import packagesImport the Python packages needed for this tutorial.%matplotlib inline import numpy as np import matplotlib import matplotlib.pyplot as plt import azureml from azureml.core import Workspace, Run # display the core SDK version number print("Azure ML SDK Version: ", azureml.core.VERSION)Azure ML SDK Version: 1.0.2Retrieve the modelYou registered a model in your workspace in the previous tutorial. Now, load this workspace and download the model to your local directory.from azureml.core import Workspace from azureml.core.model import Model ws = Workspace.from_config() model=Model(ws, 'sklearn_mnist') model.download(target_dir='.', exist_ok=True) import os # verify the downloaded model file os.stat('./sklearn_mnist_model.pkl')Found the config file in: /data/aml_config/config.jsonTest model locallyBefore deploying, make sure your model is working locally by:* Loading test data* Predicting test data* Examining the confusion matrix Load test dataLoad the test data from the **./data/** directory created during the training tutorial.from utils import load_data # note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the neural network converge faster X_test = load_data('./data/test-images.gz', False) / 255.0 y_test = load_data('./data/test-labels.gz', True).reshape(-1)Predict test dataFeed the test dataset to the model to get predictions.import pickle from sklearn.externals import joblib clf = joblib.load('./sklearn_mnist_model.pkl') y_hat = clf.predict(X_test)/usr/local/lib/python3.5/dist-packages/sklearn/base.py:251: UserWarning: Trying to unpickle estimator LogisticRegression from version 0.19.1 when using version 0.20.0. This might lead to breaking code or invalid results. Use at your own risk. UserWarning)Examine the confusion matrixGenerate a confusion matrix to see how many samples from the test set are classified correctly. Notice the mis-classified value for the incorrect predictions.from sklearn.metrics import confusion_matrix conf_mx = confusion_matrix(y_test, y_hat) print(conf_mx) print('Overall accuracy:', np.average(y_hat == y_test))[[ 960 0 1 2 0 5 6 3 1 2] [ 0 1112 3 1 0 1 5 1 12 0] [ 8 8 920 20 9 5 10 11 37 4] [ 4 0 17 919 2 22 4 12 21 9] [ 1 2 5 3 914 0 10 2 7 38] [ 10 2 0 42 10 769 17 7 28 7] [ 9 3 7 2 6 20 907 1 3 0] [ 2 7 22 5 8 1 1 950 5 27] [ 10 14 5 21 14 27 7 11 853 12] [ 8 8 2 13 31 14 0 24 12 897]] Overall accuracy: 0.9201Use `matplotlib` to display the confusion matrix as a graph. In this graph, the X axis represents the actual values, and the Y axis represents the predicted values. The color in each grid represents the error rate. The lighter the color, the higher the error rate is. For example, many 5's are mis-classified as 3's. Hence you see a bright grid at (5,3).# normalize the diagnal cells so that they don't overpower the rest of the cells when visualized row_sums = conf_mx.sum(axis=1, keepdims=True) norm_conf_mx = conf_mx / row_sums np.fill_diagonal(norm_conf_mx, 0) fig = plt.figure(figsize=(8,5)) ax = fig.add_subplot(111) cax = ax.matshow(norm_conf_mx, cmap=plt.cm.bone) ticks = np.arange(0, 10, 1) ax.set_xticks(ticks) ax.set_yticks(ticks) ax.set_xticklabels(ticks) ax.set_yticklabels(ticks) fig.colorbar(cax) plt.ylabel('true labels', fontsize=14) plt.xlabel('predicted values', fontsize=14) plt.savefig('conf.png') plt.show()Deploy as web serviceOnce you've tested the model and are satisfied with the results, deploy the model as a web service hosted in ACI. To build the correct environment for ACI, provide the following:* A scoring script to show how to use the model* An environment file to show what packages need to be installed* A configuration file to build the ACI* The model you trained before Create scoring scriptCreate the scoring script, called score.py, used by the web service call to show how to use the model.You must include two required functions into the scoring script:* The `init()` function, which typically loads the model into a global object. This function is run only once when the Docker container is started. * The `run(input_data)` function uses the model to predict a value based on the input data. Inputs and outputs to the run typically use JSON for serialization and de-serialization, but other formats are supported.%%writefile score.py import json import numpy as np import os import pickle from sklearn.externals import joblib from sklearn.linear_model import LogisticRegression from azureml.core.model import Model def init(): global model # retreive the path to the model file using the model name model_path = Model.get_model_path('sklearn_mnist') model = joblib.load(model_path) def run(raw_data): data = np.array(json.loads(raw_data)['data']) # make prediction y_hat = model.predict(data) # you can return any data type as long as it is JSON-serializable return y_hat.tolist()Writing score.pyCreate environment fileNext, create an environment file, called myenv.yml, that specifies all of the script's package dependencies. This file is used to ensure that all of those dependencies are installed in the Docker image. This model needs `scikit-learn` and `azureml-sdk`.from azureml.core.conda_dependencies import CondaDependencies myenv = CondaDependencies() myenv.add_conda_package("scikit-learn") with open("myenv.yml","w") as f: f.write(myenv.serialize_to_string())Review the content of the `myenv.yml` file.with open("myenv.yml","r") as f: print(f.read())# Conda environment specification. The dependencies defined in this file will # be automatically provisioned for runs with userManagedDependencies=False. # Details about the Conda environment file format: # https://conda.io/docs/user-guide/tasks/manage-environments.html#create-env-file-manually name: project_environment dependencies: # The python interpreter version. # Currently Azure ML only supports 3.5.2 and later. - python=3.6.2 - pip: # Required packages for AzureML execution, history, and data preparation. - azureml-defaults - scikit-learnCreate configuration fileCreate a deployment configuration file and specify the number of CPUs and gigabyte of RAM needed for your ACI container. While it depends on your model, the default of 1 core and 1 gigabyte of RAM is usually sufficient for many models. If you feel you need more later, you would have to recreate the image and redeploy the service.from azureml.core.webservice import AciWebservice aciconfig = AciWebservice.deploy_configuration(cpu_cores=1, memory_gb=1, tags={"data": "MNIST", "method" : "sklearn"}, description='Predict MNIST with sklearn')Deploy in ACIEstimated time to complete: **about 7-8 minutes**Configure the image and deploy. The following code goes through these steps:1. Build an image using: * The scoring file (`score.py`) * The environment file (`myenv.yml`) * The model file1. Register that image under the workspace. 1. Send the image to the ACI container.1. Start up a container in ACI using the image.1. Get the web service HTTP endpoint.%%time from azureml.core.webservice import Webservice from azureml.core.image import ContainerImage # configure the image image_config = ContainerImage.image_configuration(execution_script="score.py", runtime="python", conda_file="myenv.yml") service = Webservice.deploy_from_model(workspace=ws, name='sklearn-mnist-svc', deployment_config=aciconfig, models=[model], image_config=image_config) service.wait_for_deployment(show_output=True)Creating image Image creation operation finished for image sklearn-mnist-svc:1, operation "Succeeded" Creating service Running................ SucceededACI service creation operation finished, operation "Succeeded" CPU times: user 4.08 s, sys: 220 ms, total: 4.3 s Wall time: 6minGet the scoring web service's HTTP endpoint, which accepts REST client calls. This endpoint can be shared with anyone who wants to test the web service or integrate it into an application.print(service.scoring_uri)http://23.101.137.78:80/scoreTest deployed serviceEarlier you scored all the test data with the local version of the model. Now, you can test the deployed model with a random sample of 30 images from the test data. The following code goes through these steps:1. Send the data as a JSON array to the web service hosted in ACI. 1. Use the SDK's `run` API to invoke the service. You can also make raw calls using any HTTP tool such as curl.1. Print the returned predictions and plot them along with the input images. Red font and inverse image (white on black) is used to highlight the misclassified samples. Since the model accuracy is high, you might have to run the following code a few times before you can see a misclassified sample.import json # find 30 random samples from test set n = 30 sample_indices = np.random.permutation(X_test.shape[0])[0:n] test_samples = json.dumps({"data": X_test[sample_indices].tolist()}) test_samples = bytes(test_samples, encoding='utf8') # predict using the deployed model result = service.run(input_data=test_samples) # compare actual value vs. the predicted values: i = 0 plt.figure(figsize = (20, 1)) for s in sample_indices: plt.subplot(1, n, i + 1) plt.axhline('') plt.axvline('') # use different color for misclassified sample font_color = 'red' if y_test[s] != result[i] else 'black' clr_map = plt.cm.gray if y_test[s] != result[i] else plt.cm.Greys plt.text(x=10, y =-10, s=result[i], fontsize=18, color=font_color) plt.imshow(X_test[s].reshape(28, 28), cmap=clr_map) i = i + 1 plt.show()You can also send raw HTTP request to test the web service.import requests import json # send a random row from the test set to score random_index = np.random.randint(0, len(X_test)-1) input_data = "{\"data\": [" + str(list(X_test[random_index])) + "]}" headers = {'Content-Type':'application/json'} # for AKS deployment you'd need to the service key in the header as well # api_key = service.get_key() # headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key)} resp = requests.post(service.scoring_uri, input_data, headers=headers) print("POST to url", service.scoring_uri) #print("input data:", input_data) print("label:", y_test[random_index]) print("prediction:", resp.text)POST to url http://172.16.17.32:80/score label: 2 prediction: [2]Clean up resourcesTo keep the resource group and workspace for other tutorials and exploration, you can delete only the ACI deployment using this API call:service.delete()Linear Regression Example with pyTorch First, we implement a gradient descent algorithm for reference, using numpy only%matplotlib inline import matplotlib.pylab as plt import time from IPython import display import numpy as np #y = np.array([7.46, 6.77, 12.74, 7.11, 7.81, 8.84, 6.08, 5.39, 8.15, 6.42, 5.73]) y = np.array([8.04, 6.95, 7.58, 8.81, 8.33, 9.96, 7.24, 4.26, 10.84, 4.82, 5.68]) #y = np.array([9.14, 8.14, 8.74, 8.77, 9.26, 8.10, 6.13, 3.10, 9.13, 7.26, 4.74]) x = np.array([10., 8., 13., 9., 11., 14., 6., 4., 12., 7., 5.]) N = len(x) # Design matrix A = np.vstack((np.ones(N), x)).T # Learning rate eta = 0.01 # initial parameters w = np.array([2., 1.]) for epoch in range(10): # Error err = y-A.dot(w) # Total error E = np.sum(err**2)/N # Gradient dE = -2.*A.T.dot(err)/N if epoch%1 == 0: print(epoch,':',E) # print(w) # Perfom one descent step w = w - eta*dE0 : 15.9938818182 1 : 11.6346546383 2 : 8.57455118197 3 : 6.4263659935 4 : 4.91830088281 5 : 3.85956758638 6 : 3.11624261595 7 : 2.5943186121 8 : 2.2278072717 9 : 1.97038821883Animated Visualization of Gradient Descentw = np.array([2., 1.]) f = A.dot(w) fig = plt.figure(figsize=(5,5)) ax = fig.gca() ax.set_xlim((4,14)) ax.set_ylim((4,14)) ln = plt.Line2D(xdata=x, ydata=f, linestyle='-',linewidth=2) ax.add_line(ln) plt.plot(x,y,'bo', alpha=0.5, markersize=5) for epoch in range(30): f = A.dot(w) err = y-f ln.set_xdata(x) ln.set_ydata(f) E = np.sum(err**2)/N dE = -2.*A.T.dot(err)/N if epoch%1 == 0: print(epoch,':',E) # print(w) w = w - eta*dE display.clear_output(wait=True) display.display(plt.gcf()) time.sleep(0.1)Implementation in pyTorch Fitting a polynomial%matplotlib inline import matplotlib.pylab as plt import numpy as np import torch import torch.autograd from torch.autograd import Variable x = np.array([10., 8., 13., 9., 11., 14., 6., 4., 12., 7., 5.]) yy = np.array([8.04, 6.95, 7.58, 8.81, 8.33, 9.96, 7.24, 4.26, 10.84, 4.82, 5.68]) y = Variable(torch.DoubleTensor(yy)) # Setup the feature (vandermonde) matrix N = len(x) degree = 2 xx = np.vstack((np.power(x,i) for i in range(degree+1))).T A = Variable(torch.from_numpy(xx).double()) w = Variable(torch.randn(degree+1).double(), requires_grad=True) # learning rate eta = 0.00005 for epoch in range(10000): ## Compute the forward pass f = torch.matmul(A, w) #print(f) E = torch.sum((y-f)**2)/N if epoch%1000 == 0: print(epoch,':',E.data[0]) # Compute the gradients by automated differentiation E.backward() # For each adjustable parameter # Move along the negative gradient direction w.data.add_(-eta * w.grad.data) #print(w.grad.data) # Reset the gradients, as otherwise they are accumulated in param.grad w.grad.zero_() print(epoch,':',E.data[0]) plt.plot(x, y.data.numpy().squeeze(),'o') x2 = np.arange(3,15,0.5) xx = np.vstack((np.power(x2,i) for i in range(degree+1))).T AA = Variable(torch.from_numpy(xx).double()) f = torch.matmul(AA, w) plt.plot(x2, f.data.numpy(),'r-') plt.show()Fitting a line, using a linear unitimport torch import torch.autograd from torch.autograd import Variable ## The rows correspond to examples and the columns to features. ## There is only one feature so the Tensors are actually just vectors x = torch.FloatTensor([[10, 8, 13, 9, 11, 14, 6, 4, 12, 7, 5]]).transpose_(0,1) #yy = [7.46, 6.77, 12.74, 7.11, 7.81, 8.84, 6.08, 5.39, 8.15, 6.42, 5.73] yy = [8.04, 6.95, 7.58, 8.81, 8.33, 9.96, 7.24, 4.26, 10.84, 4.82, 5.68] y = torch.FloatTensor([yy]).transpose_(0,1) # This is a linear unit that implements the function f(x) = weight*x + bias f = torch.nn.Linear(1, 1, bias=True) # Set w_1 f.weight.data = torch.FloatTensor([[1.]]) # Set w_0 f.bias.data = torch.FloatTensor([[2.]]) # learning rate eta = 0.01 # This is the error function E(x, y) = (1/N) \sum_{i=1}^N (x_i-y_i)^2 EuclidianLoss = torch.nn.MSELoss(size_average=True) for epoch in range(10): ## Compute the forward pass E = EuclidianLoss(f(Variable(x)), Variable(y)) if epoch%1 == 0: print(epoch,':',E.data[0]) # print(f.bias.data.numpy()) # print(f.weight.data.numpy()) # Compute the gradients by automated differentiation E.backward() # For each adjustable parameter # Move along the negative gradient direction for param in f.parameters(): param.data.add_(-eta * param.grad.data) # Reset the gradients, as otherwise they are accumulated in param.grad f.zero_grad() #print('Weights') #print(f.weight.data, f.bias.data)0 : 15.993881225585938 1 : 11.634653091430664 2 : 8.574551582336426 3 : 6.426366806030273 4 : 4.918301105499268 5 : 3.8595666885375977 6 : 3.1162407398223877 7 : 2.59431791305542 8 : 2.2278072834014893 9 : 1.970388412475586Fitting a polynomial, using a linear unitimport torch import torch.autograd from torch.autograd import Variable ## The rows correspond to examples and the columns to features. ## There is only one feature so the Tensors are actually just vectors x = np.array([10., 8., 13., 9., 11., 14., 6., 4., 12., 7., 5.]) #yy = [7.46, 6.77, 12.74, 7.11, 7.81, 8.84, 6.08, 5.39, 8.15, 6.42, 5.73] yy = [8.04, 6.95, 7.58, 8.81, 8.33, 9.96, 7.24, 4.26, 10.84, 4.82, 5.68] y = torch.DoubleTensor([yy]).transpose_(0,1) # Setup the feature (vandermonde) matrix N = len(x) degree = 2 xx = np.vstack((np.power(x,i) for i in range(degree+1))).T A = torch.from_numpy(xx) # This is a linear unit that implements the function f(x) = weight*x + bias f = torch.nn.Linear(degree+1, 1, bias=False).double() # learning rate eta = 0.00005 # This is the error function E(f, y) = (1/N) \sum_{i=1}^N (f_i-y_i)^2 EuclidianLoss = torch.nn.MSELoss(size_average=True) for epoch in range(10000): ## Compute the forward pass E = EuclidianLoss(f(Variable(A)), Variable(y)) if epoch%1000 == 0: print(epoch,':',E.data[0]) # print(f.bias.data.numpy()) # print(f.weight.data.numpy()) # Compute the gradients by automated differentiation E.backward() # For each adjustable parameter # Move along the negative gradient direction for param in f.parameters(): param.data.add_(-eta * param.grad.data) # Reset the gradients, as otherwise they are accumulated in param.grad f.zero_grad() print(epoch,':',E.data[0]) plt.plot(x, y.numpy(),'o') x2 = np.arange(3,15,0.5) xx = np.vstack((np.power(x2,i) for i in range(degree+1))).T A = torch.from_numpy(xx) plt.plot(x2, f(Variable(A)).data.numpy(),'r-') plt.show()Autograd : Automatic differentiation exampleimport torch from torch.autograd import Variable x = Variable(torch.ones(2, 2), requires_grad=True) print(x) y = x + 2 print(y) z = y * y * 3 out = z.mean() print(z, out) out.backward() print(x.grad)Variable containing: 4.5000 4.5000 4.5000 4.5000 [torch.FloatTensor of size 2x2]Calculating and plotting the the derivative of a function using autodiff%matplotlib inline import matplotlib.pylab as plt x = Variable(torch.arange(-5,5,0.2), requires_grad=True) #print(x) #y = torch.sum(torch.sigmoid(0.3*x)) y = torch.sum(x*torch.cos(x)**2) #print(y) #plt.plot(x.data.numpy(), y.data.numpy() ) #plt.show() y.backward() plt.plot(x.data.numpy(), x.grad.data.numpy() ) plt.show()$$f(x_1, x_2) = \sin\left(\frac{1}{2} x_1^2 - \frac{1}{4} x_2^2 + 3 \right) \cos\left(2 x_1+1-e^{x_2}\right)$$import torch as tr from torch.autograd import Variable x_1 = Variable( tr.FloatTensor([0.5]) , requires_grad=True) x_2 = Variable( tr.FloatTensor([3.5]) , requires_grad=True) u1 = x_1 ** 2 u2 = 0.5 * u1 u3 = x_2 ** 2 u4 =-0.25 * u3 u5 = u2 + u4 u6 = u5 + 3 u7 = tr.sin(u6) u8 = 2 * x_1 u9 = u8 + 1 u10 = tr.exp(x_2) u11 = -1 * u10 u12 = u9 + u11 u13 = tr.cos(u12) f = u7 * u13 print(f.data) f.backward() print(x_1.grad.data) print(x_2.grad.data) u7.backward() x_1.grad.data$$f(x_1, x_2) = x_1^2$$x_1 = Variable( tr.FloatTensor([-4]) , requires_grad=True) x_2 = Variable( tr.FloatTensor([3]) , requires_grad=True) f = x_1**2 + 2*x_2**2 print(f.data) f.backward() print(x_1.grad.data) print(x_2.grad.data) A = torch.DoubleTensor([[1,2,3],[4,5,6]]) T = torch.rand([3,5,2]) u = torch.DoubleTensor([[7],[8]]) w = torch.rand([5,3]).double()Examine pricesns.histplot(df.price, bins=100) sns.histplot(np.log1p(df.price), bins=50)Data Wranglingcolumns_of_interest = [ 'latitude' , 'longitude' , 'price' , 'minimum_nights' , 'number_of_reviews' , 'reviews_per_month' , 'calculated_host_listings_count' ,'availability_365'] df = df[columns_of_interest] df.head()Q1: Missing Valuesdf.isnull().sum()Q2: Median minimum_nightsdf.minimum_nights.median()Split datan = len(df) n_val = int(n * 0.2) n_test = int(n * 0.2) n_train = n - n_val - n_test idx = np.arange(n) np.random.seed(42) np.random.shuffle(idx) df_train = df.iloc[idx[:n_train]] .reset_index(drop=True) df_test = df.iloc[idx[n_train:n_train+n_test]].reset_index(drop=True) df_val = df.iloc[idx[n_train+n_test:]] .reset_index(drop=True) df_train_full = df.iloc[idx[:n_train+n_test]] .reset_index(drop=True) y_train = np.log1p(df_train.price) y_test = np.log1p(df_test.price) y_val = np.log1p(df_val.price) y_train_full = np.log1p(df_train_full.price) del df_train ['price'] del df_test ['price'] del df_val ['price'] del df_train_full['price']TRAINING FUNCTIONdef prepare_X_zero(df): df = df.copy() df['reviews_per_month'] = df.reviews_per_month.fillna(0) X = df.values return X def prepare_X_mean(df): df = df.copy() df['reviews_per_month'] = df.reviews_per_month.fillna(df.reviews_per_month.mean()) X = df.values return X def train_linear_regression_reg(X, y, r=0.001): X = np.column_stack([np.ones(X.shape[0]), X]) XTX = X.T.dot(X) XTX = XTX + r * np.eye(XTX.shape[0]) XTX_inv = np.linalg.inv(XTX) w_full = XTX_inv.dot(X.T).dot(y) return w_full[0], w_full[1:] def rmse(y, y_pred): se = (y - y_pred) ** 2 mse = se.mean() return np.sqrt(mse) X_train_zero = prepare_X_zero(df_train) X_val_zero = prepare_X_zero(df_val) X_train_mean = prepare_X_mean(df_train) X_val_mean = prepare_X_mean(df_val) w0_zero, w_zero = train_linear_regression_reg(X_train_zero,y_train,r=0) w0_mean, w_mean = train_linear_regression_reg(X_train_mean,y_train,r=0) y_pred_zero = w0_zero + X_val_zero.dot(w_zero) y_pred_mean = w0_mean + X_val_zero.dot(w_mean) print('reviews_per_month na -> 0 : {}'.format(round(rmse(y_val,y_pred_zero),2))) print('reviews_per_month na -> mean: {}'.format(round(rmse(y_val,y_pred_mean),2)))reviews_per_month na -> 0 : 0.65 reviews_per_month na -> mean: 0.65Neither is better to this accuracy, thechnically 0 is slightly better. But for the next question we use 0. Q4: What regularisation to useprepare_X = prepare_X_zero X_train = prepare_X(df_train) X_val = prepare_X(df_val) res = [] for r in [0, 0.000001, 0.0001, 0.001, 0.01, 0.1, 1, 5, 10]: w0, w = train_linear_regression_reg(X_train,y_train,r=r) y_pred = w0 + X_val.dot(w) error = rmse(y_val,y_pred) print('{:10}:{} ({})'.format(r,error,round(error,2)))0:0.6536337649996121 (0.65) 1e-06:0.6536338933922969 (0.65) 0.0001:0.653651793240982 (0.65) 0.001:0.6541885180213967 (0.65) 0.01:0.6656175809489505 (0.67) 0.1:0.6869575817254718 (0.69) 1:0.6919185299146808 (0.69) 5:0.6923850940178745 (0.69) 10:0.6924224370107044 (0.69)smallest error when r=0r = 0Q5 What effect does seed havedef split_by_seed(seed): idx = np.arange(n) np.random.seed(seed) np.random.shuffle(idx) df_train = df.iloc[idx[:n_train]] .reset_index(drop=True) df_test = df.iloc[idx[n_train:n_train+n_test]].reset_index(drop=True) df_val = df.iloc[idx[n_train+n_test:]] .reset_index(drop=True) df_train_full = df.iloc[idx[:n_train+n_test]] .reset_index(drop=True) y_train = np.log1p(df_train.price) y_test = np.log1p(df_test.price) y_val = np.log1p(df_val.price) y_train_full = np.log1p(df_train_full.price) del df_train ['price'] del df_test ['price'] del df_val ['price'] del df_train_full['price'] return df_train, df_test, df_val, df_train_full, y_train, y_test, y_val, y_train_full def calculate_rmse_by_seed(seed): df_train, df_test, df_val, df_train_full, y_train, y_test, y_val, y_train_full = split_by_seed(seed) X_train = prepare_X(df_train) X_val = prepare_X(df_val) w0, w = train_linear_regression_reg(X_train,y_train,r=r) y_pred = w0 + X_val.dot(w) error = rmse(y_val,y_pred) return error seeds = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] errors = [ calculate_rmse_by_seed(seed) for seed in seeds] print(errors) print(np.std(errors)) print(np.round(np.std(errors),3))[0.6304569982323267, 0.6486723821210312, 0.6409503261201178, 0.6396698468034724, 0.6555733448818004, 0.6391858794518651, 0.6473543640304936, 0.655889218349002, 0.6405135568815358, 0.6450366766956963] 0.007450888539027653 0.007Q6: Final model errorseed = 42 df_train, df_test, df_val, df_train_full, y_train, y_test, y_val, y_train_full = split_by_seed(seed) X_train_full = prepare_X(df_train_full) X_test = prepare_X(df_test) w0, w = train_linear_regression_reg(X_train_full,y_train_full,r=0.001) y_test_pred = w0 + X_test.dot(w) rmse(y_test,y_test_pred)IntroductionLet's say there are three bacteria species that characterize the gut, and we hypothesize that they are ever so shifted off from one another, but we don't know how (i.e. ignore the data-generating distribution below). Can we figure out the proportion parameters and their uncertainty? Generate Synthetic DataIn the synthetic dataset generated below, we pretend that every patient is one sample, and we are recording the number of sequencing reads corresponding to some OTUs (bacteria). Each row is one sample (patient), and each column is one OTU (sample). ProportionsFirstly, let's generate the ground truth proportions that we will infer later on.def proportion(arr): arr = np.asarray(arr) return arr / arr.sum() healthy_proportions = proportion([10, 16, 2]) healthy_proportions sick_proportions = proportion([10, 27, 15]) sick_proportionsDataNow, given the proportions, let's generate data. Here, we are assuming that there are 10 patients per cohort (10 sick patients and 10 healthy patients), and that the number of counts in total is 50.n_data_points = 10 def make_healthy_multinomial(arr): n_sequencing_reads = 50 # npr.poisson(lam=50) return npr.multinomial(n_sequencing_reads, healthy_proportions) def make_sick_multinomial(arr): n_sequencing_reads = 50 # npr.poisson(lam=50) return npr.multinomial(n_sequencing_reads, sick_proportions) # Generate healthy data healthy_reads = np.zeros((n_data_points, 3)) healthy_reads = np.apply_along_axis(make_healthy_multinomial, axis=1, arr=healthy_reads) # Generate sick reads sick_reads = np.zeros((n_data_points, 3)) sick_reads = np.apply_along_axis(make_sick_multinomial, axis=1, arr=sick_reads) # Make pandas dataframe healthy_df = pd.DataFrame(healthy_reads) healthy_df.columns = ['bacteria1', 'bacteria2', 'bacteria3'] healthy_df = pm.floatX(healthy_df) sick_df = pd.DataFrame(sick_reads) sick_df.columns = ['bacteria1', 'bacteria2', 'bacteria3'] sick_df = pm.floatX(sick_df) healthy_df.dtypes sick_df.dtypesModel ConstructionHere's an implementation of the model - Dirichlet prior with Multinomial likelihood.There are 3 classes of bacteria, so the Dirichlet distribution serves as the prior probability mass over each of the classes in the multinomial distribution.The multinomial distribution serves as the likelihood function.with pm.Model() as dirichlet_model: proportions_healthy = pm.Dirichlet('proportions_healthy', a=np.array([1.0] * 3).astype('float32'), shape=(3,), testval=[0.1, 0.1, 0.1]) proportions_sick = pm.Dirichlet('proportions_sick', a=np.array([1.0] * 3).astype('float32'), shape=(3,), testval=[0.1, 0.1, 0.1]) healthy_like = pm.Multinomial('like_healthy', n=50, p=proportions_healthy, observed=healthy_df.values) sick_like = pm.Multinomial('like_sick', n=50, p=proportions_sick, observed=sick_df.values)Samplingwith dirichlet_model: dirichlet_trace = pm.sample(draws=10000, start=pm.find_MAP(), step=pm.Metropolis()) pm.traceplot(dirichlet_trace)Resultspm.forestplot(dirichlet_trace, ylabels=['healthy_bacteria1', 'healthy_bacteria2', 'healthy_bacteria3', 'sick_bacteria1', 'sick_bacteria2', 'sick_bacteria3']) healthy_proportions, sick_proportionsConverting pairwise constraints to labeled samples Clustering is an unsupervised data analysis technique, in which a dataset is partitioned into a set of clusters, which each consist of a dense region of samples. Unlike classification, what constitutes a good clustering is ambiguous. Semi-supervised or constrained clustering introduces information to this problem, in the form of pairwise constraints. This allows clusterings to be found which better represent the goals of the user.A constraint is a relationship between a pair of samples. Must-link constraints indicate the two samples are in the same cluster, and cannot-link constraints indicate that they are in different clusters. It is a challenge to find clusterings that follow the natural structure of the dataset, while adhering to constraints. This is partly due to the difficulty of working with pairwise constraints as opposed to labels. Pairwise constraints do have their advantages, however. It is very simple for a human oracle to determine if a pair of samples are in the same or different classes. If there are a large number of classes, or if classes do not have obvious labels (image segmentation), then it is difficult to provide a label.In this work, we turn a set of pairwise constraints into a set of labeled samples, which can be used to train a classifier. Thus, the very difficult task of constrained clustering is simplified to a classification problem. As a classification problem, new samples can be added to resulting grouping in an online manner. Classifiers are much more efficient than constrained clustering techniques. Active learning and outlier detection are also better suited to the classification domain.import matplotlib.pyplot as plt import sklearn.datasets as ds from robustclust import get_constraints, \ E2CP, \ SpectralLearning, \ plot_constraintsFirst, we make some synthetic data, consistently of 2-dimensions Gaussian blobs. We also generate a random set of constraints.n_clusters, N, n_constraints = (3, 1000, 30) data, labels = ds.make_blobs(n_samples=N, n_features=2, centers=n_clusters) constraint_mat, _ = get_constraints(data, labels, n_constraints=n_constraints)We can plot the data and the constraints. Must-links (ML) are indicated by solid lines, while cannot-links (CL) are represented by dashed lines.plot_constraints(data, labels=labels, constraint_mat=constraint_mat)Now we create a ConstraintsToLabels object, which accepts the data and constraint sets, which are the two forms of information available in a constrained clustering problem. A call to the fit_constrained() method converts the pairwise constraints into a set of labeled data. The samples which are labeled are those which are involved in a constraint. The method uses unsupervised hierarchical clustering to agglomerate the constrained samples into groups which do not violate any cannot-link constraints. The groups I will refer to as nodes. Between the nodes, there are both ML and CL constraints. In the plot below I draw the nodes, overlying the groups of samples they represent, with the population of the group shown in the node. The net constraint values between the nodes (ML - CL) are represented by lines of different thickness.cc1 = E2CP(n_clusters=n_clusters, constraint_mat=constraint_mat) cc1.fit_constrained(data) plot_constraints(data, labels=cc1.labels) cc2 = SpectralLearning(n_clusters=n_clusters, constraint_mat=constraint_mat) cc2.fit_constrained(data) plot_constraints(data, labels=cc2.labels)/home/evan/robust-clust/robustclust/constrained.py:114: RuntimeWarning: divide by zero encountered in true_divide W[ind1, ind2] = self.aff_mat[ind1, ind2] / (np.sqrt(self.aff_mat[ind1, ind1]) * np.sqrt(self.aff_mat[ind2, ind2])) /home/evan/anaconda3/lib/python3.6/site-packages/numpy/core/_methods.py:26: RuntimeWarning: invalid value encountered in reduce return umr_maximum(a, axis, None, out, keepdims) /home/evan/robust-clust/robustclust/constrained.py:135: RuntimeWarning: invalid value encountered in greater_equal ml_ind = Fbar >= 0 /home/evan/robust-clust/robustclust/constrained.py:137: RuntimeWarning: invalid value encountered in less cl_ind = Fbar < 0 /home/evan/anaconda3/lib/python3.6/site-packages/sklearn/manifold/spectral_embedding_.py:234: UserWarning: Graph is not fully connected, spectral embedding may not work as expected. warnings.warn("Graph is not fully connected, spectral embedding" /home/evan/anaconda3/lib/python3.6/site-packages/scipy/sparse/csgraph/_laplacia[...]This is a special graph cut problem. A good solution joins nodes with a high ML - CL value between them. The result of this process is a set of labels (trainLabels) for the set of constrained samples (trainInd).We plot the very simple classification problem below, which was derived from a complicated mess of pairwise constraints. Note that the number of clusters was not known by the method.plt.figure() cc.plot_labels(data) cc.plot_labels(data[trainInd,:], trainLabels) plt.show()Data Visualization can be done by implementing Python libraries First import different libraries like pandas, numpy, seaborn, matplotlibimport pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt %matplotlib inlineRead data from the csv filedata = pd.read_csv("heart_failure_clinical_records_dataset.csv") dataPlot the graphs using different attributes of the datasetplt.bar(x=data['age'],height=data['ejection_fraction']) plt.xlabel('Age') plt.ylabel('Ejection Fraction') plt.figure(figsize=(7,4)) plt.bar(x=data['age'],height=data['platelets']) plt.xlabel('Age') plt.ylabel('Platelets level') plt.figure(figsize=(7,4)) plt.bar(x=data['age'],height=data['serum_sodium']) plt.xlabel('Age') plt.ylabel('Serum Sodium') plt.figure(figsize=(7,4)) plt.bar(x=data['age'],height=data['creatinine_phosphokinase']) plt.xlabel('Age') plt.ylabel('Creatinine Phosphokinase') plt.figure(figsize=(7,4)) plt.bar(x=data['age'],height=data['serum_creatinine']) plt.xlabel('Age') plt.ylabel('Serum Creatinine') x = data.age y = data.serum_sodium plt.scatter(x,y) plt.show() x = data.age y = data.platelets plt.scatter(x,y) plt.show() x = data.age y = data.ejection_fraction plt.scatter(x,y) plt.show() x = data.age y = data.creatinine_phosphokinase plt.scatter(x,y) plt.show() x = data.age y = data.serum_creatinine plt.scatter(x,y) plt.show() pt = sns.distplot(data['platelets']) an = sns.distplot(data['anaemia']) cp = sns.distplot(data['creatinine_phosphokinase']) ef = sns.distplot(data['ejection_fraction']) sc = sns.distplot(data['serum_creatinine']) ss = sns.distplot(data['serum_sodium']) data.boxplot(by ='anaemia', column =['serum_sodium'], grid = False) data.boxplot(by ='high_blood_pressure', column =['platelets'], grid = False) data.boxplot(by ='serum_creatinine', column =['diabetes'], grid = False)Plot the Heatmapfig, ax = plt.subplots(figsize=(8,5)) sns.heatmap(data.corr(), center=0, cmap='Blues') ax.set_title('Heatmap') fig, ax = plt.subplots(figsize=(10,8)) sns.heatmap(data.corr(), center=0, cmap='BrBG', annot=True)Two Sumclass Solution: def twoSum(self, nums: List[int], target: int) -> List[int]: nums_hashmap = {} for i in range(len(nums)): nums_hashmap[nums[i]] = i for j in range(len(nums)): y = target-nums[j] if y in nums_hashmap and nums_hashmap[y] != j: return [j, nums_hashmap[y]]NBA positions are out of date. Can we update them? Let's use clustering to find some natural grouping of players. The features are gathered from stats.nba.com. The features will look different than typical basketball statistics. Usually the statistics ESPN puts on the screen are measuring how well the player executes. Consider field goal percentage (FG%) which measures what percentage of shots are made. This statistic is measuring how well a player can make baskets. But what if we're more interested in *how often* the player shoots. This is not measuring their ability to execute but their role on the team. The features used in this analysis follow the same trend.import pandas as pd from sklearn.cluster import KMeans from sklearn.cluster import DBSCAN from sklearn.mixture import GaussianMixture import numpy as np import matplotlib.pyplot as plt df = pd.read_csv('C:/projects/summer2020/nba-clustering/data/data(w_FGA).csv') df = df.fillna(value=0) df = df.set_index('PLAYER') minutes_played = df['MIN'] X = df.drop(columns='MIN') X %matplotlib inline possible_k = list(range(1,11)) inertia = [] for k in possible_k: temp_kmeans = KMeans(n_clusters=k).fit(X) inertia.append(temp_kmeans.inertia_) plt.bar(possible_k, inertia, color='darkorange', width=0.5) plt.show() k = 3 kmeans = KMeans(n_clusters=k).fit(X) labels = kmeans.labels_ minutes_played = minutes_played.to_frame() minutes_played.insert(1, 'LABELS', labels) def get_top_players(df, label): df_label = df[df['LABELS'] == label] top_10_bool = df_label.loc[:, 'MIN'].rank(ascending=False, method='first') <= 10.0 top_10 = df_label[top_10_bool].sort_values(by='MIN', ascending=False) return list(top_10.index) top_10_lists = [] for label in range(k): top_10_lists.append(get_top_players(minutes_played, label)) pd.DataFrame(top_10_lists).transpose() cluster, counts = np.unique(labels, return_counts=True) [print('Group ' + str(cluster[i]) + ': ' + str(counts[i]) + ' elements') for i in range(len(cluster))] labeled_X = X.copy() labeled_X.insert(0, 'LABELS', labels)Let's look at the data distribution for each cluster, feature combination. Each feature has three lines, a blue line for cluster 0, gold line for cluster 1, and magenta line for cluster 2. The line begins at the 25th percentile of the cluster's distribution over the feature. The line ends at the 75th percentile. The point on the line is the mean of the data.df.columns median_k = np.median(list(range(k))) line_colors = ['c', 'darkorange', 'm'] fig, axs = plt.subplots(ncols=3, figsize=(20, 4)) feature_list = list(df.columns) feature_list.remove('MIN') for p in range(3): var_names = feature_list[8*p : 8*(p+1)] for i in range(k): temp_df = labeled_X[labeled_X['LABELS'] == i] for j in range((8*p+1), 8*(p+1)+1): feat_num = j col_data = temp_df.iloc[:,feat_num] col_stats = col_data.describe() x_val = feat_num + 0.1*(i - median_k) axs[p].plot([x_val, x_val], [col_stats['25%'], col_stats['75%']], line_colors[i], linewidth=2) axs[p].plot(x_val, col_stats['mean'], 'k.') axs[p].set_ylim([0,1]) axs[p].set_xticklabels(['']+var_names) plt.setp(axs[p].xaxis.get_majorticklabels(), rotation=70)There are a number of interesting observations from these plots. 1. The first is the miles traveled on both offense and defense are similar between the three clusters.2. Some of the features could be redundant i.e. the POST-UP% and POST UPS.I will remove some of the features and run it again to see how the results differ.new_X = df.drop(columns=['MIN', 'POST-UP %', 'DIST MILES OFF', 'DIST MILES DEF']) k = 3 kmeans = KMeans(n_clusters=k).fit(new_X) labels = kmeans.labels_ minutes_played = df['MIN'] minutes_played = minutes_played.to_frame() minutes_played.insert(1, 'LABELS', labels) top_10_lists = [] for label in range(k): top_10_lists.append(get_top_players(minutes_played, label)) pd.DataFrame(top_10_lists).transpose() cluster, counts = np.unique(labels, return_counts=True) [print('Group ' + str(cluster[i]) + ': ' + str(counts[i]) + ' elements') for i in range(len(cluster))] labeled_X = new_X.copy() labeled_X.insert(0, 'LABELS', labels)We'll plot the distribution for each cluster and feature combination with the reduced feature set.median_k = np.median(list(range(k))) line_colors = ['c', 'darkorange', 'm'] fig, axs = plt.subplots(ncols=3, figsize=(20, 4)) for p in range(3): var_names = list(new_X.columns[8*p: 8*(p+1)]) for i in range(k): temp_df = labeled_X[labeled_X['LABELS'] == i] for j in range((8*p+1), 8*(p+1)+1): if j >= temp_df.shape[1]: break feat_num = j col_data = temp_df.iloc[:,feat_num] col_stats = col_data.describe() x_val = feat_num + 0.1*(i - median_k) axs[p].plot([x_val, x_val], [col_stats['25%'], col_stats['75%']], line_colors[i], linewidth=2) axs[p].plot(x_val, col_stats['mean'], 'k.') axs[p].set_ylim([0,1]) axs[p].set_xticklabels(['']+var_names) plt.setp(axs[p].xaxis.get_majorticklabels(), rotation=70)These graphics give us good insights into the characteristics of each player group. Here are some highlights:Group 0:- smaller size (for an NBA player)- typically shoot the most- time of possession is much longer- dribble a lotGroup 1:- average size- the only thing they do most is three point attempts- the only thing they do least is touches in a game- I wish I had more to say. I'm sure they're great guys.Group 2:- bigger size- shoot the most within eight feet of the hoop- defend FGA the most- dribble the least What do we do with this information?The dialogue surrounding the game should evolve with the game itself. What is a Power Forward? I still think of (who played in the 90's) when I hear the term. His style - and that of his colleagues - fit the name "Power Forward". They were strong, large players that played up front by the basket. They differed from a Center in the way they would run the court and shoot 12 foot jump shots. During that era the differences of all five players seemed more distinct. Today's game doesn't feel that way. The five players playing on a team look like a combination of the players described above. Some of the most common terms for the three groups above are Guards, Three and D's, and Bigs, respectively. Let's look at the team with the best regular season record from the 19-20 season and their most played 5-man lineup.bucks_lineup = ['', '', '', '', ''] labeled_X.loc[bucks_lineup, 'LABELS']This team benefits from playing as a Big with skills of a Guard. The other Big on this team, , is also unusual for a Big. He is an excellent three point shooter despite being a Big. This is a well balanced team that has pieces that fit well together. I think the natural next step is to wonder if there is a five person combination of these three types of players that performs better than the rest. To do that you need to control for player skill variability as well as coaching style. Successfuly answering that question would be a great tool for team decision makers when making roster additions or changes. I'll leave that for the next project. I think using features that tell the story of what a player does instead of how well they do it produces more intuitive clustering of players. This establishes a good foundation for further research into what an ideal five person line-up looks like. Appendix A: Principal Component AnalysisDespite the feature space not being large we'll do PCA for visual reasons and in case we can glean more from the data.from sklearn.decomposition import PCA pca = PCA(n_components=10) principalComponents = pca.fit_transform(X) features = range(pca.n_components_) plt.bar(features, pca.explained_variance_ratio_, color='darkorange') plt.xlabel('PCA features') plt.ylabel('variance %') plt.xticks(features) PCA_components = pd.DataFrame(principalComponents) plt.scatter(PCA_components[0], PCA_components[1], alpha=.1, color='darkorange') plt.xlabel('PCA 1') plt.ylabel('PCA 2')Примеры:import numpy as npВектор: Вектор-строка:v_hor_np = np.array([1, 2]) print(v_hor_np ) v_hor_zeros_v1 = np.zeros((5,)) print(v_hor_zeros_v1 ) v_hor_zeros_v2 = np.zeros((1, 5)) print(v_hor_zeros_v2 ) v_hor_one_v1 = np.ones((5,)) print(v_hor_one_v1) v_hor_one_v2 = np.ones((1, 5)) print(v_hor_one_v2)[1. 1. 1. 1. 1.] [[1. 1. 1. 1. 1.]]Вектор-столбец:v_vert_np = np.array([[1], [2]]) print(v_vert_np) v_vert_zeros = np.zeros((5, 1)) print(v_vert_zeros) v_vert_ones = np.ones((5, 1)) print(v_vert_ones)[[1.] [1.] [1.] [1.] [1.]]Квадратная матрица:m_sqr_arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) print(m_sqr_arr) m_sqr = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] m_sqr_arr = np.array(m_sqr) print(m_sqr_arr) m_sqr_mx = np.matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) print(m_sqr_mx) m_sqr_mx = np.matrix('1 2 3; 4 5 6; 7 8 9') print(m_sqr_mx)[[1 2 3] [4 5 6] [7 8 9]]Диагональная матрица:m_diag = [[1, 0, 0], [0, 5, 0], [0, 0, 9]] m_diag_np = np.matrix(m_diag) print(m_diag_np) m_sqr_mx = np.matrix('1 2 3; 4 5 6; 7 8 9') diag = np.diag(m_sqr_mx) print(diag) m_diag_np = np.diag(np.diag(m_sqr_mx)) print(m_diag_np)[[1 0 0] [0 5 0] [0 0 9]]Единичная матрица:m_e = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] m_e_np = np.matrix(m_e) print(m_e_np) m_eye = np.eye(3) print(m_eye) m_idnt = np.identity(3) print(m_idnt)[[1. 0. 0.] [0. 1. 0.] [0. 0. 1.]]Нулевая матрица:m_zeros = np.zeros((3, 3)) print(m_zeros)[[0. 0. 0.] [0. 0. 0.] [0. 0. 0.]]Задание матрицы в общем виде:m_mx = np.matrix('1 2 3; 4 5 6') print(m_mx) m_var = np.zeros((2, 5)) print(m_var)[[0. 0. 0. 0. 0.] [0. 0. 0. 0. 0.]]Транспонирование матрицы:A = np.matrix('1 2 3; 4 5 6') print(A) A_t = A.transpose() print(A_t) print(A.T) A = np.matrix('1 2 3; 4 5 6') print(A) R = (A.T).T print(R) A = np.matrix('1 2 3; 4 5 6') B = np.matrix('7 8 9; 0 7 5') L = (A + B).T R = A.T + B.T print(L) print(R) A = np.matrix('1 2; 3 4') B = np.matrix('5 6; 7 8') L = (A.dot(B)).T R = (B.T).dot(A.T) print(L) print(R) A = np.matrix('1 2 3; 4 5 6') k = 3 L = (k * A).T R = k * (A.T) print(L) print(R) A = np.matrix('1 2; 3 4') A_det = np.linalg.det(A) A_T_det = np.linalg.det(A.T) print(format(A_det, '.9g')) print(format(A_T_det, '.9g'))-2 -2Действия над матрицами: Умножение матрицы на число:A = np.matrix('1 2 3; 4 5 6') C = 3 * A print(C) A = np.matrix('1 2; 3 4') L = 1 * A R = A print(L) print(R) A = np.matrix('1 2; 3 4') Z = np.matrix('0 0; 0 0') L = 0 * A R = Z print(L) print(R) A = np.matrix('1 2; 3 4') p = 2 q = 3 L = (p + q) * A R = p * A + q * A print(L) print(R) A = np.matrix('1 2; 3 4') p = 2 q = 3 L = (p * q) * A R = p * (q * A) print(L) print(R) A = np.matrix('1 2; 3 4') B = np.matrix('5 6; 7 8') k = 3 L = k * (A + B) R = k * A + k * B print(L) print(R)[[18 24] [30 36]] [[18 24] [30 36]]Сложение матриц:A = np.matrix('1 6 3; 8 2 7') B = np.matrix('8 1 5; 6 9 12') C = A + B print(C) A = np.matrix('1 2; 3 4') B = np.matrix('5 6; 7 8') L = A + B R = B + A print(L) print(R) A = np.matrix('1 2; 3 4') B = np.matrix('5 6; 7 8') C = np.matrix('1 7; 9 3') L = A + (B + C) R = (A + B) + C print(L) print(R) A = np.matrix('1 2; 3 4') Z = np.matrix('0 0; 0 0') L = A + (-1)*A print(L) print(Z)[[0 0] [0 0]] [[0 0] [0 0]]Умножение матриц:A = np.matrix('1 2 3; 4 5 6') B = np.matrix('7 8; 9 1; 2 3') C = A.dot(B) print(C) A = np.matrix('1 2; 3 4') B = np.matrix('5 6; 7 8') C = np.matrix('2 4; 7 8') L = A.dot(B.dot(C)) R = (A.dot(B)).dot(C) print(L) print(R) A = np.matrix('1 2; 3 4') B = np.matrix('5 6; 7 8') C = np.matrix('2 4; 7 8') L = A.dot(B + C) R = A.dot(B) + A.dot(C) print(L) print(R) A = np.matrix('1 2; 3 4') B = np.matrix('5 6; 7 8') L = A.dot(B) R = B.dot(A) print(L) print(R) A = np.matrix('1 2; 3 4') E = np.matrix('1 0; 0 1') L = E.dot(A) R = A.dot(E) print(L) print(R) print(A) A = np.matrix('1 2; 3 4') Z = np.matrix('0 0; 0 0') L = Z.dot(A) R = A.dot(Z) print(L) print(R) print(Z)[[0 0] [0 0]] [[0 0] [0 0]] [[0 0] [0 0]]Определитель матрицы:A = np.matrix('-4 -1 2; 10 4 -1; 8 3 1') print(A) np.linalg.det(A) A = np.matrix('-4 -1 2; 10 4 -1; 8 3 1') print(A) print(A.T) det_A = round(np.linalg.det(A), 3) det_A_t = round(np.linalg.det(A.T), 3) print(det_A) print(det_A_t) A = np.matrix('-4 -1 2; 0 0 0; 8 3 1') print(A) np.linalg.det(A) A = np.matrix('-4 -1 2; 10 4 -1; 8 3 1') print(A) B = np.matrix('10 4 -1; -4 -1 2; 8 3 1') print(B) round(np.linalg.det(A), 3) round(np.linalg.det(B), 3) A = np.matrix('-4 -1 2; -4 -1 2; 8 3 1') print(A) np.linalg.det(A) A = np.matrix('-4 -1 2; 10 4 -1; 8 3 1') print(A) k = 2 B = A.copy() B[2, :] = k * B[2, :] print(B) det_A = round(np.linalg.det(A), 3) det_B = round(np.linalg.det(B), 3) det_A * k det_B A = np.matrix('-4 -1 2; -4 -1 2; 8 3 1') B = np.matrix('-4 -1 2; 8 3 2; 8 3 1') C = A.copy() C[1, :] += B[1, :] print(C) print(A) print(B) round(np.linalg.det(C), 3) round(np.linalg.det(A), 3) + round(np.linalg.det(B), 3) A = np.matrix('-4 -1 2; 10 4 -1; 8 3 1') k = 2 B = A.copy() B[1, :] = B[1, :] + k * B[0, :] print(A) print(B) round(np.linalg.det(A), 3) round(np.linalg.det(B), 3) A = np.matrix('-4 -1 2; 10 4 -1; 8 3 1') print(A) k = 2 A[1, :] = A[0, :] + k * A[2, :] round(np.linalg.det(A), 3) A = np.matrix('-4 -1 2; 10 4 -1; 8 3 1') print(A) k = 2 A[1, :] = k * A[0, :] print(A) round(np.linalg.det(A), 3)[[-4 -1 2] [10 4 -1] [ 8 3 1]] [[-4 -1 2] [-8 -2 4] [ 8 3 1]]Обратная матрица:A = np.matrix('1 -3; 2 5') A_inv = np.linalg.inv(A) print(A_inv) A = np.matrix('1. -3.; 2. 5.') A_inv = np.linalg.inv(A) A_inv_inv = np.linalg.inv(A_inv) print(A) print(A_inv_inv) A = np.matrix('1. -3.; 2. 5.') L = np.linalg.inv(A.T) R = (np.linalg.inv(A)).T print(L) print(R) A = np.matrix('1. -3.; 2. 5.') B = np.matrix('7. 6.; 1. 8.') L = np.linalg.inv(A.dot(B)) R = np.linalg.inv(B).dot(np.linalg.inv(A)) print(L) print(R)[[ 0.09454545 0.03272727] [-0.03454545 0.00727273]] [[ 0.09454545 0.03272727] [-0.03454545 0.00727273]]Ранг матрицы:m_eye = np.eye(4) print(m_eye) rank = np.linalg.matrix_rank(m_eye) print(rank) m_eye[3][3] = 0 print(m_eye) rank = np.linalg.matrix_rank(m_eye) print(rank)[[1. 0. 0. 0.] [0. 1. 0. 0.] [0. 0. 1. 0.] [0. 0. 0. 0.]] 3Desafio 2 - Conhecendo melhor nossa base de consumidores **Qual estado possui os clientes com melhores pontuações de crédito?** Detalhes A resposta deve conter os valores da média, mediana, moda e desvio padrão da pontuação de crédito para cada estado do dataset. O arquivo para submissão deve estar em formato json, conforme o arquivo exemplo “submission.json”.OBSERVAÇÃO: É recomendado utilizar Python e pandas para esse desafio, mas também é possível utilizar outras ferramentas e linguagens de programação.Descrição dos dados: ‘id’: Identificador do cliente ‘sobrenome’: Sobrenome do cliente ‘pontuacao_credito’: Pontuação de crédito do cliente (quanto maior, melhor o cliente geralmente) ‘estado_residencia’: Estado de residência do cliente ‘genero’: Gênero do cliente ‘nivel_estabilidade’: Nível de estabilidade do cliente ‘saldo_conta’: Saldo disponível na conta do cliente ‘numero_produtos’: Número de produtos que o cliente consome ‘possui_cartao_de_credito’: Possui um cartão de crédito cadastrado ‘membro_ativo’: Membro acessa e consome frequentementeObs: Os dados são fictícios, mas tentam representar a realidade de uma base de clientes de um produto SaaS. Objetivo Queremos conhecer melhor nossos clientes por estado. Para isso, iniciamos uma análise na pontuação de crédito. Para realizar a verificação inicial, precisamos de alguns valores. Os que precisamos encontrar derivados da pontuação de crédito são:- média;- mediana; - moda;- desvio padrão. Importando as bibliotecasimport pandas as pdImportando o datasetdf = pd.read_csv('desafio1.csv')Exploração de dadosdf.head() df.columns df.shape df.info() RangeIndex: 7000 entries, 0 to 6999 Data columns (total 12 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 RowNumber 7000 non-null int64 1 id 7000 non-null object 2 sobrenome 7000 non-null object 3 pontuacao_credito 7000 non-null int64 4 estado_residencia 7000 non-null object 5 genero 7000 non-null object 6 idade 7000 non-null int64 7 nivel_estabilidade 7000 non-null int64 8 saldo_conta 7000 non-null float64 9 numero_produtos 7000 non-null int64 10 possui_cartao_de_credito 7000 non-null int64 11 membro_ativo 7000 non-null int64 dtypes: float64(1), int64(7), object(4) memory usage: 656.4+ KBAnálise Exploratóriadf.describe() df['estado_residencia'].nunique() df['estado_residencia'].value_counts()Médiamedia = df.groupby('estado_residencia')['pontuacao_credito'].mean() mediaMedianamediana = df.groupby('estado_residencia')['pontuacao_credito'].median().sort_values( ascending=False) medianaModa# Moda moda = df.groupby('estado_residencia')['pontuacao_credito'].agg(pd.Series.mode) modaDesvio padrãodesvio_padrao = df.groupby('estado_residencia')['pontuacao_credito'].std() desvio_padraoSubmissãosubmission = { state: { 'media': media[state], 'mediana': mediana[state], 'moda': moda[state], 'desvio_padrao': desvio_padrao[state] } for state in mediana.index } submission submission = pd.DataFrame(submission) submission submission.to_json('submission.json',orient='columns') resp = pd.read_json('submission.json') respBinary classification exampletry: cancer = pd.read_csv("https://raw.githubusercontent.com/changyaochen/MECE4520/master/lectures/lecture_4/breast_cancer_data.csv") except URLError: cancer = pd.read_csv("../lecture_4/breast_cancer_data.csv") cancer["label"] = cancer["diagnosis"].apply(lambda x: 0 if x == "B" else 1) cancer.head() # fit a logistic regression model model = smf.glm( formula='label ~ radius_mean', data=cancer, family=sm.families.Binomial(), ) result = model.fit() print(result.summary()) # bootstrap np.random.seed(42) B = 1000 # number of bootstrap beta_1s = [] for _ in tqdm(range(B)): # bootstrap the indices and build the bootstrap data idx = np.random.randint(low=0, high=len(cancer), size=len(cancer)) data_bootstrap = cancer.iloc[idx] # fit the model model_bootstrap = LogisticRegression(penalty="none", random_state=42) model_bootstrap.fit(X=data_bootstrap[["radius_mean"]], y=data_bootstrap["label"]) beta_1s.append(model_bootstrap.coef_.flatten()[0]) print(f"The mean of beta_1 is {np.mean(beta_1s):5.3f}.") print(f"The standard error of beta_1 is {np.std(beta_1s, ddof=1):5.3f}.") plt.figure() sns.histplot(beta_1s, bins=50) plt.xlabel("slope") plt.tight_layout() plt.show()The mean of beta_1 is 1.044. The standard error of beta_1 is 0.093.Tensorflow のローカル学習を SageMaker で行う ノートブックに含まれる内容- Tensorflow のローカル学習を SageMaker で行うやりかた ノートブックで使われている手法の詳細- アルゴリズム: MNIST- データ: CNN セットアップ まず,ローカル実行に必要なコンポーネントをインストールします.セットアップ手順はシェルスクリプトにまとまっているので,これを実行します.!/bin/bash ./setup.sh import os import sagemaker from sagemaker import get_execution_role sagemaker_session = sagemaker.Session() role = get_execution_role()データのロードTensorflow 向けに,protobuf フォーマットの TFRecord に変換して,変換済データを S3 にアップロードします.SageMaker の学習時につかうデータは,S3 に置く必要があります.ここでは,MNIST データをダウンロードしてからローカルにある `utils.py` 変換処理を行って,S3 にアップロードします.デフォルトでは SageMaker は sagemaker-{region}-{your aws account number} というバケットを使用します.当該バケットがない場合には,自動で新しく作成します.upload_data() メソッドの引数に bucket=XXXX という形でデータを配置するバケットを指定することが可能です.import utils from tensorflow.contrib.learn.python.learn.datasets import mnist import tensorflow as tf data_sets = mnist.read_data_sets('data', dtype=tf.uint8, reshape=False, validation_size=5000) utils.convert_to(data_sets.train, 'train', 'data') utils.convert_to(data_sets.validation, 'validation', 'data') utils.convert_to(data_sets.test, 'test', 'data')以下を実行する前に,**`data/mnist/XX` の `XX` を指定された適切な数字に変更**してくださいinputs = sagemaker_session.upload_data(path='data', key_prefix='data/mnist/XX')ローカル学習を行う場合の,処理の記述の変更点単純に,`train_instance_type` および `instance_type` を `'local'` にするだけです.!cat 'mnist.py'モデルの学習を実行学習プロセスは,通常の SageMaker での Tensorflow の使い方と変わりはありません.1 点だけ,ローカル実行を行うために,学習時は `train_instance_type` を,エンドポイントでは `instance_type` を `local` に設定してください.これだけで,あとはローカル環境にコンテナを pull してジョブを実行します.なお P2 / P3 を使っている場合は,`local_gpu` を指定すれば,GPU を使用して動作します.from sagemaker.tensorflow import TensorFlow mnist_estimator = TensorFlow(entry_point='mnist.py', role=role, training_steps=10, evaluation_steps=333, train_instance_count=2, train_instance_type='local') mnist_estimator.fit(inputs)モデルの推論を実行推論を行うために,まず学習したモデルをデプロイします.`deploy()` メソッドでは,デプロイ先エンドポイントのインスタンス数,インスタンスタイプを指定します.モデルのデプロイには 10 分程度時間がかかります.mnist_predictor = mnist_estimator.deploy(initial_instance_count=1, instance_type='local')実際にエンドポイントを叩いてみましょう.import numpy as np from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) for i in range(10): data = mnist.test.images[i].tolist() tensor_proto = tf.make_tensor_proto(values=np.asarray(data), shape=[1, len(data)], dtype=tf.float32) predict_response = mnist_predictor.predict(tensor_proto) print("========================================") label = np.argmax(mnist.test.labels[i]) print("label is {}".format(label)) prediction = predict_response['outputs']['classes']['int64Val'][0] print("prediction is {}".format(prediction))エンドポイントの削除全て終わったら,エンドポイントを削除します.sagemaker.Session().delete_endpoint(mnist_predictor.endpoint)CAMS wildfire emissions About This notebook provides you a practical introduction to CAMS global atmospheric forecasts and shows you how you can use the variable `Total Aerosol Optical Depth at 550nm` for wildfire monitoring. The workflow shows the Total Aerosol Optical Depth at 550nm that originated from the devastating wildfires that caused record emissions around the Northern Hemisphere in August and September 2021. The notebook has the following outline:* [1 - Request data from the ADS programmatically via the CDS API](request_data_fire)* [2 - Unzip the downloaded data file](unzip_fire)* [3 - Load and browse CAMS global atmospheric composition forecast of Total Aerosol Optical Depth at 550nm](load_fire)* [4 - Visualize the analysis of Total Aerosol AOD at 550nm](visualize_fire)* [5 - Animate 12-hourly analysis of Total AOD at 550nm over the northern hemisphere from 1 to 8 August 2021](animate_fire) Data This notebook introduces you to the [CAMS global atmospheric composition forecasts](https://ads.atmosphere.copernicus.eu/cdsapp!/dataset/cams-global-atmospheric-composition-forecasts?tab=overview). The data has the following specifications:> **Data**: `CAMS global atmospheric composition forecasts` > **Temporal coverage**: `12-hourly analysis for the period from 1 to 8 August 2021` > **Spatial coverage**: `Geographical subset for northern hemisphere` > **Format**: `zipped NetCDF` How to access the notebook* [**Kaggle**](https://kaggle.com/kernels/welcome?src=https://github.com/ecmwf-projects/copernicus-training/blob/master/Atmos-Training_CAMS-Fire-Monitoring.ipynb)* [**Binder**](https://hub-binder.mybinder.ovh/user/ecmwf-projects--rnicus-training-ulg9z83u/lab/tree/Atmos-Training_CAMS-Fire-Monitoring.ipynb)* [**Colab**](https://colab.research.google.com/github/ecmwf-projects/copernicus-training/blob/master/Atmos-Training_CAMS-Fire-Monitoring.ipynb)* [**nbviewer**](https://nbviewer.org/github/ecmwf-projects/copernicus-training/blob/master/Atmos-Training_CAMS-Fire-Monitoring.ipynb) Further resources* [Copernicus: A summer of wildfires saw devastation and record emissions around the Northern Hemisphere](https://atmosphere.copernicus.eu/copernicus-summer-wildfires-saw-devastation-and-record-emissions-around-northern-hemisphere) Install CDS API via pip!pip install cdsapiLoad libraries# CDS API import cdsapi import os # Libraries for working with multi-dimensional arrays import numpy as np import xarray as xr import pandas as pd # Libraries for plotting and visualising data import matplotlib.path as mpath import matplotlib.pyplot as plt from matplotlib import animation from IPython.display import HTML import time import cartopy.crs as ccrs from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER import cartopy.feature as cfeature from IPython.display import clear_output clear_output(wait=True)1. Request data from the ADS programmatically with the CDS API The first step is to request data from the Atmosphere Data Store programmatically with the help of the CDS API. Let us make use of the option to manually set the CDS API credentials. First, you have to define two variables: `URL` and `KEY` which build together your CDS API key. Below, you have to replace the `` with your personal ADS key. Please find [here](https://ads.atmosphere.copernicus.eu/api-how-to) your personal ADS key.URL = 'https://ads.atmosphere.copernicus.eu/api/v2' KEY = '#######################'The next step is then to request the data with the help of the CDS API. Below, we request `Total Aerosol Optical Depth` for the northern hemisphere from 1 to 8 August 2021 from the [CAMS global atmospheric composition forecasts](https://ads.atmosphere.copernicus.eu/cdsapp!/dataset/cams-global-atmospheric-composition-forecasts?tab=overview) dataset. The request below requests `analysis` data, as we only request leadtime hour 0 for the two run times at 00:00 and 12:00 UTC.Let us store the dataset under the name `202108_northern_hemisphere_totalAOD.zip`.c = cdsapi.Client(url=URL, key=KEY) c.retrieve( 'cams-global-atmospheric-composition-forecasts', { 'variable': 'total_aerosol_optical_depth_550nm', 'date': '2021-08-01/2021-08-08', 'time': [ '00:00', '12:00', ], 'leadtime_hour': '0', 'type': 'forecast', 'area': [ 90, -180, 0, 180, ], 'format': 'netcdf_zip', }, './202108_northern_hemisphere_totalAOD.zip')2. Unzip the downloaded data file CAMS global atmospheric composition forecasts can be retrieved either in `GRIB` or in a `zipped NetCDF`. Above, we requested the data in a zipped NetCDF and for this reason, we have to unzip the file before we can open it. You can unzip `zip archives` in Python with the Python package `zipfile` and the function `extractall()`.import zipfile with zipfile.ZipFile('./202108_northern_hemisphere_totalAOD.zip', 'r') as zip_ref: zip_ref.extractall('./')3. Load and browse CAMS global atmospheric composition forecast of Total Aerosol Optical Depth at 550nm A netCDF file with the name `data.nc` is extracted from the zip archive. You can load the NetCDF file with the Python library [xarray](http://xarray.pydata.org/en/stable/) and the function `open_dataset()`. The function loads a `xarray.Dataset`, which is a collection of one or more data variables that share the same dimensions. You see that the data file has three dimensions, `latitude`, `longitude` and `time` and one variable, `aod550`.ds = xr.open_dataset('./data.nc') dsLet us now extract from the Dataset above the data variable `aod550` as `xarray.DataArray`. You can load a data array from a xarray.Dataset by specifying the name of the variable (`aod550`) in square brackets.While an xarray **dataset** may contain multiple variables, an xarray **data array** holds a single multi-dimensional variable and its coordinates. Below you see that the variable `aod550` represents Total Aerosol Optical Depth at 550 nm.aod550 = ds['aod550'] aod550Let us define variables for the two attributes `units` and `long_name`, which we can use during the visulisation of the data.aod550_unit = aod550.units aod550_long_name = aod550.long_name4. Visualize the analysis of Total AOD at 550nm And now we can plot the `Total AOD at 550 nm`. The visualisation code below can be divided in five main parts:* **Initiate a matplotlib figure:** with `plt.figure()` and an axes object* **Plotting function**: plot the data array with the matplotlib function `pcolormesh()`* **Define a geographic extent of the map**: use the minimum and maximum latitude and longitude bounds of the data* **Add additional mapping features**: such as coastlines, grid or a colorbar* **Set a title of the plot**: you can combine the `species name` and `time` information for the title# Index of analysis step time_index = 2 # Initiate a matplotlib figure fig = plt.figure(figsize=(15,15)) ax = plt.subplot(1,1,1, projection=ccrs.Stereographic(central_latitude=90)) # Plotting function with pcolormesh im = plt.pcolormesh(aod550['longitude'].values, aod550['latitude'].values, aod550[time_index,:,:], cmap='afmhot_r', transform=ccrs.PlateCarree()) # Define geographic extent of the map #ax.set_extent([aod550.longitude.min(),aod550.longitude.max(),aod550.latitude.min(),aod550.latitude.max()], crs=ccrs.PlateCarree()) # Add additional features such as coastlines, grid and colorbar ax.coastlines(color='black') ax.gridlines(draw_labels=True, linewidth=1, color='gray', alpha=0.5, linestyle='--') cbar = plt.colorbar(im,fraction=0.046, pad=0.05) cbar.set_label(aod550_unit, fontsize=14) # Set the title of the plot ax.set_title(aod550_long_name + ' over the northern hemisphere - ' + str(aod550.time[time_index].values)[:-10]+'\n', fontsize=18)4. Animate 12-hour Total AOD at 550nm analysis over the northern hemisphere from 1 to 8 August 2021 In the last step, you can animate the `Total AOD at 550nm` in order to see how the trace gas develops over a period of eight days, from 1 to 8 August 2021.You can do animations with matplotlib's function `animation`. Jupyter's function `HTML` can then be used to display HTML and video content. The animation function consists of 4 parts:- **Setting the initial state:** Here, you define the general plot your animation shall use to initialise the animation. You can also define the number of frames (time steps) your animation shall have. - **Functions to animate:** An animation consists of three functions: `draw()`, `init()` and `animate()`. `draw()` is the function where individual frames are passed on and the figure is returned as image. In this example, the function redraws the plot for each time step. `init()` returns the figure you defined for the initial state. `animate()` returns the `draw()` function and animates the function over the given number of frames (time steps). - **Create a `animate.FuncAnimation` object:** The functions defined before are now combined to build an `animate.FuncAnimation` object. - **Play the animation as video:** As a final step, you can integrate the animation into the notebook with the `HTML` class. You take the generate animation object and convert it to a HTML5 video with the `to_html5_video` function# Setting the initial state: # 1. Define figure for initial plot fig = plt.figure(figsize=(15,15)) ax = plt.subplot(1,1,1, projection=ccrs.Stereographic(central_latitude=90)) # Plotting function with pcolormesh im = plt.pcolormesh(aod550['longitude'].values, aod550['latitude'].values, aod550[time_index,:,:], cmap='afmhot_r', transform=ccrs.PlateCarree()) ax.coastlines(color='black') ax.gridlines(draw_labels=True, linewidth=1, color='gray', alpha=0.5, linestyle='--') cbar = plt.colorbar(im,fraction=0.046, pad=0.05) cbar.set_label(aod550_unit, fontsize=14) # Set the title of the plot ax.set_title(aod550_long_name + ' over the northern hemisphere - ' + str(aod550.time[time_index].values)[:-10]+'\n', fontsize=18) frames = 15 def draw(i): img = plt.pcolormesh(aod550.longitude, aod550.latitude, aod550[i,:,:], cmap='afmhot_r', transform=ccrs.PlateCarree(), vmin=0, vmax=7, shading='auto') ax.set_title(aod550_long_name + ' '+ str(aod550.time[i].data)[:-10], fontsize=18, pad=20.0) return img def init(): return fig def animate(i): return draw(i) ani = animation.FuncAnimation(fig, animate, frames, interval=500, blit=False, init_func=init, repeat=True) HTML(ani.to_html5_video()) plt.close(fig)**Play the animation video as HTML5 video**HTML(ani.to_html5_video())Luria-Delbrück estimator corrected for death probability according to a continuous time model (Section 3.5 and section 3.5.3)This notebook is a Python implementation of the simulations described in section 3.5 and 3.5.3.It has been used _anytree_ library, from GitHub, to build the tree data structure that represent a generational tree.Repository: https://github.com/c0fec0de/anytreeDocumentation: https://anytree.readthedocs.io/# import import numpy as np import math import matplotlib.pyplot as plt from scipy.stats import binom from anytree import Node, RenderTree # Check that used machine supports int64 # max int64 ~ 9*10^18 if(np.random.randint(0, high = 9223372036854775808)): print("Ok, this machine supports int64!")Ok, this machine supports int64!Implementation of the Luria-Delbrück stochastic experiment Import from *LD.py*.from LD import LDNumber of cells with alive progeny varying extant cells' number and death probabilitySection 3.5, figure 10.# !!! REALLY TIME CONSUMING if death probability goes to 0.5 !!! # To reduce computational time (a lot) delete death_prob = 0.48 from death_probs # Test for (extant, count_with_alive_progeny) == (extant, attempts) # For different minimum sizes we plot # (extant, count_with_alive_progeny) varying death probability # Note that extinct trees are discarded print("Note that extinct trees are discarded!") # Repeat simulations N times to get mean and devSt N=1000 #variable parameters death_probs = [0. , 0.1, 0.2, 0.3, 0.4, 0.48] min_sizes = [32, 64, 128] # results lists # mean value count_with_alive_progeny = [] # standard deviation count_with_alive_progeny_std = [] # set seed to have reproducible results np.random.seed(0) # simulate for min_size in min_sizes: print("Min size:", min_size) #results for this death probability this_min_size_results = [] this_min_size_results_std = [] for death_prob in death_probs: print(" death probability:", death_prob, end = " ") # sigle runs results count_alive = np.array([]) for _ in range(N): if _%100 == 0: #show "progress bar" print(".", end=" ") if _ == N-1: print() tree = LD(bases = 1E+8, death_prob = death_prob, min_size = min_size) tree.count_with_alive_progeny() count_alive = np.append(count_alive, tree._count_with_alive_progeny) #save this death probability result this_min_size_results.append(np.mean(count_alive)) this_min_size_results_std.append(np.std(count_alive)) print(" Mean count_with_alive_progeny:",this_min_size_results[-1]) print(" Std count_with_alive_progeny:",this_min_size_results_std[-1]) count_with_alive_progeny.append(this_min_size_results) count_with_alive_progeny_std.append(this_min_size_results_std) # plot results fig = plt.figure(figsize=(10,7)) ax = fig.add_subplot(1, 1, 1) #plt.title("Ruolo della ploidia\n"+str(N)+" simulazioni con soglia 1/32") ax.set_xlabel('d/(b+d)', fontsize=20) ax.set_ylabel('Cellule con progenie viva', fontsize=20) ax.yaxis.offsetText.set_fontsize(20) # set x y range ax.set_xlim(-0.05, 0.55) ax.set_ylim(0, 1300) # set ticks ax.tick_params(which='major', width=1.0, labelsize=20) ax.tick_params(which='major', length=10, labelsize=20) for i in range(len(min_sizes)): ax.errorbar(death_probs, count_with_alive_progeny[i], yerr=count_with_alive_progeny_std[i], marker ='o', capsize=6, capthick=3, lw=3,fmt=' ',markersize=10, label='min size '+str(min_sizes[i])) ax.legend(fontsize=20, title='Legenda',title_fontsize=20) plt.tight_layout() ax.grid() #plt.savefig("attempts_death_prob.pdf") plt.show() #print(count_with_alive_progeny) #print(count_with_alive_progeny_std)Note that extinct trees are discarded! Min size: 32 death probability: 0.0 . . . . . . . . . . Mean count_with_alive_progeny: 62.0 Std count_with_alive_progeny: 0.0 death probability: 0.1 . . . . . . . . . . Mean count_with_alive_progeny: 93.84 Std count_with_alive_progeny: 14.482278826206876 death probability: 0.2 . . . . . . . . . . Mean count_with_alive_progeny: 101.593 Std count_with_alive_progeny: 14.970816644391848 death probability: 0.3 . . . . . . . . . . Mean count_with_alive_progeny: 115.889 Std count_with_alive_progeny: 15.255644168634769 death probability: 0.4 . . . . . . . . . . Mean count_with_alive_progeny: 147.686 Std count_with_alive_progeny: 21.115809338029173 death probability: 0.48 . . . . . . . . . . Mean count_with_alive_progeny: 230.377 Std count_with_alive_progeny: 46.603271033265464 Min size: 64 death probability: 0.0 . . . . . . . .[...]Test the analytic estimation of the number of cells with alive progeny given by the continuous time model Here trees are built given minimum final size, $N_\text{extant}$.Section 3.5.3, figure 11.# Test for (extant, count_with_alive_progeny) = (extant, attempts) # For different minimum sizes we plot # (mean generation number, count_with_alive_progeny) with fixed death probability # Note that extinct trees are discarded print("Note that extinct trees are discarded!") #variable parameter min_sizes = [32, 64, 128] #fixed death probability death_prob = 0.1 print("Fixed death probability:", death_prob) # Repeat simulations N times to get mean and devSt N=1000 # results lists mean_count = [] count_std = [] mean_gen = [] gen_std = [] # set seed to have reproducible results np.random.seed(0) # simulate for min_size in min_sizes: # sigle runs results count = np.array([]) gen = np.array([]) for _ in range(N): tree = LD(bases = 1E+8, death_prob = death_prob, min_size = min_size, accept_extinct=False) tree.count_with_alive_progeny() count = np.append(count, tree._count_with_alive_progeny) gen = np.append(gen, tree._num_layers) #save this min size result mean_count.append(np.mean(count)) count_std.append(np.std(count)) mean_gen.append(np.mean(gen)) gen_std.append(np.std(gen)) #analytic estimation estimated = [] for t in mean_gen: attempts = 0. # mean number of cell with at least one alive daughter # from generation zero to the second-last one attempts = (1-math.pow(death_prob,2))*1. * (math.pow(2*(1-death_prob),t-1) -1) / (math.log(2*(1-death_prob))) # we add also the mean number of cells in the last layer attempts += 1. * math.pow(2*(1-death_prob), t) estimated.append(attempts) #plot results fig = plt.figure(figsize=(14,14)) ax = fig.add_subplot(1, 1, 1) ax.set_xlabel('Numero di generazioni', fontsize=40) ax.set_ylabel('Cellule con progenie viva', fontsize=40) ax.yaxis.offsetText.set_fontsize(40) ### set x y range #ax.set_xlim(5, 10) #ax.set_ylim(50, 460) ## set ticks ax.tick_params(which='major', width=2.0, labelsize=40) ax.tick_params(which='major', length=20, labelsize=40) for i in range(len(min_sizes)): ax.errorbar(mean_gen[i], mean_count[i], yerr=count_std[i], xerr=gen_std[i], marker ='o', capsize=12, capthick=6, lw=6,fmt=' ',markersize=20, label='min size '+str(min_sizes[i])) ax.scatter(mean_gen, estimated, marker = 'x', s=800, label = 'Stima analitica', color = 'tab:red',zorder=10) ax.legend(fontsize=40, title='Legenda',title_fontsize=40) plt.tight_layout() plt.grid() plt.title("d/(b+d) = "+str(death_prob), fontsize=40) #plt.savefig("attempts_size_integral_0.1.pdf") plt.show() print("Mean count:") print(mean_count) print("Std count:") print(count_std) print("Mean gen:") print(mean_gen) print("Std gen:") print(gen_std) print("Estimated:") print(estimated) # Test for (extant, count_with_alive_progeny) = (extant, attempts) # For different minimum sizes we plot # (mean generation number, count_with_alive_progeny) with fixed death probability # Note that extinct trees are discarded print("Note that extinct trees are discarded!") #variable parameter min_sizes = [32, 64, 128] #fixed death probability death_prob = 0.2 print("Fixed death probability:", death_prob) # Repeat simulations N times to get mean and devSt N=1000 # results lists mean_count = [] count_std = [] mean_gen = [] gen_std = [] # set seed to have reproducible results np.random.seed(0) # simulate for min_size in min_sizes: # sigle runs results count = np.array([]) gen = np.array([]) for _ in range(N): tree = LD(bases = 1E+8, death_prob = death_prob, min_size = min_size, accept_extinct=False) tree.count_with_alive_progeny() count = np.append(count, tree._count_with_alive_progeny) gen = np.append(gen, tree._num_layers) #save this min size result mean_count.append(np.mean(count)) count_std.append(np.std(count)) mean_gen.append(np.mean(gen)) gen_std.append(np.std(gen)) #analytic estimation estimated = [] for t in mean_gen: attempts = 0. # mean number of cell with at least one alive daughter # from generation zero to the second-last one attempts = (1-math.pow(death_prob,2))*1. * (math.pow(2*(1-death_prob),t-1) -1) / (math.log(2*(1-death_prob))) # we add also the mean number of cells in the last layer attempts += 1. * math.pow(2*(1-death_prob), t) estimated.append(attempts) #plot results fig = plt.figure(figsize=(14,14)) ax = fig.add_subplot(1, 1, 1) ax.set_xlabel('Numero di generazioni', fontsize=40) ax.set_ylabel('Cellule con progenie viva', fontsize=40) ax.yaxis.offsetText.set_fontsize(40) ### set x y range #ax.set_xlim(6, 12.5) #ax.set_ylim(50, 500) ## set ticks ax.tick_params(which='major', width=2.0, labelsize=40) ax.tick_params(which='major', length=20, labelsize=40) for i in range(len(min_sizes)): ax.errorbar(mean_gen[i], mean_count[i], yerr=count_std[i], xerr=gen_std[i], marker ='o', capsize=12, capthick=6, lw=6,fmt=' ',markersize=20, label='min size '+str(min_sizes[i])) ax.scatter(mean_gen, estimated, marker = 'x', s=800, label = 'Stima analitica', color = 'tab:red',zorder=10) ax.legend(fontsize=40, title='Legenda',title_fontsize=40) plt.tight_layout() plt.grid() plt.title("d/(b+d) = "+str(death_prob), fontsize=40) #plt.savefig("attempts_size_integral_0.2.pdf") plt.show() print("Mean count:") print(mean_count) print("Std count:") print(count_std) print("Mean gen:") print(mean_gen) print("Std gen:") print(gen_std) print("Estimated:") print(estimated) # Test for (extant, count_with_alive_progeny) = (extant, attempts) # For different minimum sizes we plot # (mean generation number, count_with_alive_progeny) with fixed death probability # Note that extinct trees are discarded print("Note that extinct trees are discarded!") #variable parameter min_sizes = [32, 64, 128] #fixed death probability death_prob = 0.3 print("Fixed death probability:", death_prob) # Repeat simulations N times to get mean and devSt N=1000 # results lists mean_count = [] count_std = [] mean_gen = [] gen_std = [] # set seed to have reproducible results np.random.seed(0) # simulate for min_size in min_sizes: # sigle runs results count = np.array([]) gen = np.array([]) for _ in range(N): tree = LD(bases = 1E+8, death_prob = death_prob, min_size = min_size, accept_extinct=False) tree.count_with_alive_progeny() count = np.append(count, tree._count_with_alive_progeny) gen = np.append(gen, tree._num_layers) #save this min size result mean_count.append(np.mean(count)) count_std.append(np.std(count)) mean_gen.append(np.mean(gen)) gen_std.append(np.std(gen)) #analytic estimation estimated = [] for t in mean_gen: attempts = 0. # mean number of cell with at least one alive daughter # from generation zero to the second-last one attempts = (1-math.pow(death_prob,2))*1. * (math.pow(2*(1-death_prob),t-1) -1) / (math.log(2*(1-death_prob))) # we add also the mean number of cells in the last layer attempts += 1. * math.pow(2*(1-death_prob), t) estimated.append(attempts) #plot results fig = plt.figure(figsize=(14,14)) ax = fig.add_subplot(1, 1, 1) ax.set_xlabel('Numero di generazioni', fontsize=40) ax.set_ylabel('Cellule con progenie viva', fontsize=40) ax.yaxis.offsetText.set_fontsize(40) ### set x y range #ax.set_xlim(8, 18) #ax.set_ylim(80, 550) ## set ticks ax.tick_params(which='major', width=2.0, labelsize=40) ax.tick_params(which='major', length=20, labelsize=40) for i in range(len(min_sizes)): ax.errorbar(mean_gen[i], mean_count[i], yerr=count_std[i], xerr=gen_std[i], marker ='o', capsize=12, capthick=6, lw=6,fmt=' ',markersize=20, label='min size '+str(min_sizes[i])) ax.scatter(mean_gen, estimated, marker = 'x', s=800, label = 'Stima analitica', color = 'tab:red',zorder=10) ax.legend(fontsize=40, title='Legenda',title_fontsize=40) plt.tight_layout() plt.grid() plt.title("d/(b+d) = "+str(death_prob), fontsize=40) #plt.savefig("attempts_size_integral_0.3.pdf") plt.show() print("Mean count:") print(mean_count) print("Std count:") print(count_std) print("Mean gen:") print(mean_gen) print("Std gen:") print(gen_std) print("Estimated:") print(estimated) # Test for (extant, count_with_alive_progeny) = (extant, attempts) # For different minimum sizes we plot # (mean generation number, count_with_alive_progeny) with fixed death probability # Note that extinct trees are discarded print("Note that extinct trees are discarded!") #variable parameter min_sizes = [32, 64, 128] #fixed death probability death_prob = 0.4 print("Fixed death probability:", death_prob) # Repeat simulations N times to get mean and devSt N=1000 # results lists mean_count = [] count_std = [] mean_gen = [] gen_std = [] # set seed to have reproducible results np.random.seed(0) # simulate for min_size in min_sizes: # sigle runs results count = np.array([]) gen = np.array([]) for _ in range(N): tree = LD(bases = 1E+8, death_prob = death_prob, min_size = min_size, accept_extinct=False) tree.count_with_alive_progeny() count = np.append(count, tree._count_with_alive_progeny) gen = np.append(gen, tree._num_layers) #save this min size result mean_count.append(np.mean(count)) count_std.append(np.std(count)) mean_gen.append(np.mean(gen)) gen_std.append(np.std(gen)) #analytic estimation estimated = [] for t in mean_gen: attempts = 0. # mean number of cell with at least one alive daughter # from generation zero to the second-last one attempts = (1-math.pow(death_prob,2))*1. * (math.pow(2*(1-death_prob),t-1) -1) / (math.log(2*(1-death_prob))) # we add also the mean number of cells in the last layer attempts += 1. * math.pow(2*(1-death_prob), t) estimated.append(attempts) #plot results fig = plt.figure(figsize=(14,14)) ax = fig.add_subplot(1, 1, 1) ax.set_xlabel('Numero di generazioni', fontsize=40) ax.set_ylabel('Cellule con progenie viva', fontsize=40) ax.yaxis.offsetText.set_fontsize(40) ### set x y range #ax.set_xlim(12.3, 33) #ax.set_ylim(100, 700) ## set ticks ax.tick_params(which='major', width=2.0, labelsize=40) ax.tick_params(which='major', length=20, labelsize=40) for i in range(len(min_sizes)): ax.errorbar(mean_gen[i], mean_count[i], yerr=count_std[i], xerr=gen_std[i], marker ='o', capsize=12, capthick=6, lw=6,fmt=' ',markersize=20, label='min size '+str(min_sizes[i])) ax.scatter(mean_gen, estimated, marker = 'x', s=800, label = 'Stima analitica', color = 'tab:red',zorder=10) ax.legend(fontsize=40, title='Legenda',title_fontsize=40) plt.tight_layout() plt.grid() plt.title("d/(b+d) = "+str(death_prob), fontsize=40) #plt.savefig("attempts_size_integral_0.4.pdf") plt.show() print("Mean count:") print(mean_count) print("Std count:") print(count_std) print("Mean gen:") print(mean_gen) print("Std gen:") print(gen_std) print("Estimated:") print(estimated)Note that extinct trees are discarded! Fixed death probability: 0.4Here trees are built until a fixed number of generations.Section 3.5.3, figure 12.# Instead of using a minimum size to build the tree, # a test with fixed numeber of generations is made here. # fixed number of generations (time max) max_gen = 10 # Number of simulations N = 1000 # Death probability death_prob = 0.1 print("Extinct trees are accepted!") mean_count = np.zeros(max_gen) std_count = np.zeros(max_gen) estimated = np.zeros(max_gen) for gen in range(max_gen) : this_gen_this_prob_count = [] for _ in range (N): tree = LD(bases = 1E+8, death_prob = death_prob, gen=gen+1, accept_extinct=True) tree.count_with_alive_progeny() this_gen_this_prob_count.append(tree._count_with_alive_progeny) mean_count[gen] = np.mean(this_gen_this_prob_count) std_count[gen] = np.std(this_gen_this_prob_count) # Analytic estimation # In formulas t = gen, but here gen starts from zero t = gen + 1 attempts = 0. # mean number of cell with at least one alive daughter # from generation zero to the second-last one attempts = (1-math.pow(death_prob,2))*1. * (math.pow(2*(1.-death_prob),t-1) -1) / (math.log(2*(1-death_prob))) # we add also the mean number of cells in the last layer attempts += 1. * math.pow(2*(1-death_prob), t) estimated[gen] = attempts #plot results #plot results fig = plt.figure(figsize=(14,14)) ax = fig.add_subplot(1, 1, 1) ax.set_xlabel('Numero di generazioni', fontsize=40) ax.set_ylabel('Cellule con progenie viva', fontsize=40) ax.yaxis.offsetText.set_fontsize(40) ### set x y range #ax.set_xlim(0.5, 10.5) #ax.set_ylim(-20, 1150) ## set ticksprobability ax.tick_params(which='major', width=2.0, labelsize=40) ax.tick_params(which='major', length=20, labelsize=40) ax.errorbar(np.arange(1, max_gen+1), mean_count, yerr=std_count, marker ='o', c='tab:blue', capsize=12, capthick=6, lw=6,fmt=' ',markersize=20, label='Simulazione') plt.scatter(np.arange(1, max_gen+1), estimated, marker = 'x', s=800, label = 'Stima analitica', color = 'tab:red',zorder=10) ax.legend(fontsize=40, title='Legenda',title_fontsize=40) plt.tight_layout() plt.grid() plt.title("d/(b+d) = "+str(death_prob), fontsize=40) #plt.savefig("attempts_gen_integral_0.1.pdf") plt.show() print("Mean count:") print(mean_count) print("Std count:") print(std_count) print("Estimated:") print(estimated) # Instead of using a minimum size to build the tree, # a test with fixed numeber of generations is made here. # fixed number of generations (time max) max_gen = 10 # Number of simulations N = 1000 # Death probability death_prob = 0.2 print("Extinct trees are accepted!") mean_count = np.zeros(max_gen) std_count = np.zeros(max_gen) estimated = np.zeros(max_gen) for gen in range(max_gen) : this_gen_this_prob_count = [] for _ in range (N): tree = LD(bases = 1E+8, death_prob = death_prob, gen=gen+1, accept_extinct=True) tree.count_with_alive_progeny() this_gen_this_prob_count.append(tree._count_with_alive_progeny) mean_count[gen] = np.mean(this_gen_this_prob_count) std_count[gen] = np.std(this_gen_this_prob_count) # Analytic estimation # In formulas t = gen, but here gen starts from zero t = gen + 1 attempts = 0. # mean number of cell with at least one alive daughter # from generation zero to the second-last one attempts = (1-math.pow(death_prob,2))*1. * (math.pow(2*(1.-death_prob),t-1) -1) / (math.log(2*(1-death_prob))) # we add also the mean number of cells in the last layer attempts += 1. * math.pow(2*(1-death_prob), t) estimated[gen] = attempts #plot results fig = plt.figure(figsize=(14,14)) ax = fig.add_subplot(1, 1, 1) ax.set_xlabel('Numero di generazioni', fontsize=40) ax.set_ylabel('Cellule con progenie viva', fontsize=40) ax.yaxis.offsetText.set_fontsize(40) ### set x y range #ax.set_xlim(0.5, 10.5) #ax.set_ylim(-20, 500) ## set ticks ax.tick_params(which='major', width=2.0, labelsize=40) ax.tick_params(which='major', length=20, labelsize=40) ax.errorbar(np.arange(1, max_gen+1), mean_count, yerr=std_count, marker ='o', c='tab:blue', capsize=12, capthick=6, lw=6,fmt=' ',markersize=20, label='Simulazione') plt.scatter(np.arange(1, max_gen+1), estimated, marker = 'x', s=800, label = 'Stima analitica', color = 'tab:red',zorder=10) ax.legend(fontsize=40, title='Legenda',title_fontsize=40) plt.tight_layout() plt.grid() plt.title("d/(b+d) = "+str(death_prob), fontsize=40) #plt.savefig("attempts_gen_integral_0.2.pdf") plt.show() print("Mean count:") print(mean_count) print("Std count:") print(std_count) print("Estimated:") print(estimated) # Instead of using a minimum size to build the tree, # a test with fixed numeber of generations is made here. # fixed number of generations (time max) max_gen = 10 # Number of simulations N = 1000 # Death probability death_prob = 0.3 print("Extinct trees are accepted!") mean_count = np.zeros(max_gen) std_count = np.zeros(max_gen) estimated = np.zeros(max_gen) for gen in range(max_gen) : this_gen_this_prob_count = [] for _ in range (N): tree = LD(bases = 1E+8, death_prob = death_prob, gen=gen+1, accept_extinct=True) tree.count_with_alive_progeny() this_gen_this_prob_count.append(tree._count_with_alive_progeny) mean_count[gen] = np.mean(this_gen_this_prob_count) std_count[gen] = np.std(this_gen_this_prob_count) # Analytic estimation # In formulas t = gen, but here gen starts from zero t = gen + 1 attempts = 0. # mean number of cell with at least one alive daughter # from generation zero to the second-last one attempts = (1-math.pow(death_prob,2))*1. * (math.pow(2*(1.-death_prob),t-1) -1) / (math.log(2*(1-death_prob))) # we add also the mean number of cells in the last layer attempts += 1. * math.pow(2*(1-death_prob), t) estimated[gen] = attempts #plot results fig = plt.figure(figsize=(14,14)) ax = fig.add_subplot(1, 1, 1) ax.set_xlabel('Numero di generazioni', fontsize=40) ax.set_ylabel('Cellule con progenie viva', fontsize=40) ax.yaxis.offsetText.set_fontsize(40) ### set x y range #ax.set_xlim(0.5, 10.5) #ax.set_ylim(-20, 170) ## set ticks ax.tick_params(which='major', width=2.0, labelsize=40) ax.tick_params(which='major', length=20, labelsize=40) ax.errorbar(np.arange(1, max_gen+1), mean_count, yerr=std_count, marker ='o', c='tab:blue', capsize=12, capthick=6, lw=6,fmt=' ',markersize=20, label='Simulazione') plt.scatter(np.arange(1, max_gen+1), estimated, marker = 'x', s=800, label = 'Stima analitica', color = 'tab:red',zorder=10) ax.legend(fontsize=40, title='Legenda',title_fontsize=40) plt.tight_layout() plt.grid() plt.title("d/(b+d) = "+str(death_prob), fontsize=40) #plt.savefig("attempts_gen_integral_0.3.pdf") plt.show() print("Mean count:") print(mean_count) print("Std count:") print(std_count) print("Estimated:") print(estimated) # Instead of using a minimum size to build the tree, # a test with fixed numeber of generations is made here. # fixed number of generations (time max) max_gen = 10 # Number of simulations N = 1000 # Death probability death_prob = 0.4 print("Extinct trees are accepted!") mean_count = np.zeros(max_gen) std_count = np.zeros(max_gen) estimated = np.zeros(max_gen) for gen in range(max_gen) : this_gen_this_prob_count = [] for _ in range (N): tree = LD(bases = 1E+8, death_prob = death_prob, gen=gen+1, accept_extinct=True) tree.count_with_alive_progeny() this_gen_this_prob_count.append(tree._count_with_alive_progeny) mean_count[gen] = np.mean(this_gen_this_prob_count) std_count[gen] = np.std(this_gen_this_prob_count) # Analytic estimation # In formulas t = gen, but here gen starts from zero t = gen + 1 attempts = 0. # mean number of cell with at least one alive daughter # from generation zero to the second-last one attempts = (1-math.pow(death_prob,2))*1. * (math.pow(2*(1.-death_prob),t-1) -1) / (math.log(2*(1-death_prob))) # we add also the mean number of cells in the last layer attempts += 1. * math.pow(2*(1-death_prob), t) estimated[gen] = attempts #plot results fig = plt.figure(figsize=(14,14)) ax = fig.add_subplot(1, 1, 1) ax.set_xlabel('Numero di generazioni', fontsize=40) ax.set_ylabel('Cellule con progenie viva', fontsize=40) ax.yaxis.offsetText.set_fontsize(40) ### set x y range #ax.set_xlim(0.5, 10.5) #ax.set_ylim(-8, 55) ## set ticks ax.tick_params(which='major', width=2.0, labelsize=40) ax.tick_params(which='major', length=20, labelsize=40) ax.errorbar(np.arange(1, max_gen+1), mean_count, yerr=std_count, marker ='o', c='tab:blue', capsize=12, capthick=6, lw=6,fmt=' ',markersize=20, label='Simulazione') plt.scatter(np.arange(1, max_gen+1), estimated, marker = 'x', s=800, label = 'Stima analitica', color = 'tab:red',zorder=10) ax.legend(fontsize=40, title='Legenda',title_fontsize=40) plt.tight_layout() plt.grid() plt.title("d/(b+d) = "+str(death_prob), fontsize=40) #plt.savefig("attempts_gen_integral_0.4.pdf") plt.show() print("Mean count:") print(mean_count) print("Std count:") print(std_count) print("Estimated:") print(estimated)Extinct trees are accepted!Test LD estimator corrected by the continuos time modelSection 3.5.3, figure 13.# Repeat simulations N times to get mean estimated mut rate and devSt N = 10000 # number of simulation for each death probability # N = 1000, time ~ 15 s # N = 10^4, time ~ 150 s # variable parameter death_probabilities = np.linspace(0., 0.4, 9) # results lists # mean values est_mu_no_dead = list([]) # using known number of cell with alive progeny est_mu_no_dead_estimated = list([]) # using analytic estimate # standard deviations est_mu_no_dead_std = list([]) est_mu_no_dead_estimated_std = list([]) # set seed to have reproducible results np.random.seed(0) # simulate for death_prob in death_probabilities: # sigle runs results mu_no_dead = np.array([]) mu_no_dead_estimated = np.array([]) # perform N single runs for _ in np.arange(N): tree = LD(bases = 1E+8, death_prob = death_prob, min_size = 32) tree.test_LD_estimator_no_dead() tree.test_LD_estimator_no_dead_estimated() #save single run results mu_no_dead = np.append(mu_no_dead, tree._mu_est_no_dead) mu_no_dead_estimated = np.append(mu_no_dead_estimated, tree._mu_est_no_dead_estimated) # save results from N runs est_mu_no_dead.append(np.mean(mu_no_dead)) est_mu_no_dead_std.append(np.std(mu_no_dead)) est_mu_no_dead_estimated.append(np.mean(mu_no_dead_estimated)) est_mu_no_dead_estimated_std.append(np.std(mu_no_dead_estimated)) #plot results fig = plt.figure(figsize=(10,7)) ax = fig.add_subplot(1, 1, 1) #plt.title('Stimatore corretto analiticamente\n'+str(N)+' simulazioni ogni punto') ax.set_xlabel(r'$d/(b+d)$', fontsize=20) ax.set_ylabel(r'$\mu (gen)^{-1}$', fontsize=20) ax.yaxis.offsetText.set_fontsize(20) ax.set_ylim(1.5*10**-9, 5.75*10**-9) ## set ticks ax.tick_params(which='major', width=1.0, labelsize=20) ax.tick_params(which='major', length=10, labelsize=20) ax.errorbar(death_probabilities, est_mu_no_dead_estimated, yerr=est_mu_no_dead_estimated_std, marker ='o', c='tab:orange', capsize=6, capthick=3, lw=3,fmt=' ',markersize=10, label='Stimatore analitico') plt.errorbar(death_probabilities, est_mu_no_dead, yerr=est_mu_no_dead_std, marker ='o', c='tab:blue', capsize=6, capthick=3, lw=3,fmt=' ',markersize=10, label='Stimatore corretto') ax.axhline(y=tree._mu, label='Reale', c='tab:gray',lw=3, ls=':') ax.legend(fontsize=20, title='Legenda',title_fontsize=20) plt.tight_layout() #plt.savefig('dead_cells_estimated_correction_integral.pdf') plt.show() # print results print("death probabilities") print(death_probabilities) print("\nestimated mu no-dead") print(est_mu_no_dead) print("\nestimated mu no-dead std") print(est_mu_no_dead_std) print("\nestimated mu no-dead estimated") print(est_mu_no_dead_estimated) print("\nestimated mu no-dead estimated std") print(est_mu_no_dead_estimated_std)Tài liệu này mang giấy phép Creative Commons Attribution (CC BY). (c) 06/2019.[@SangVn](https://github.com/SangVn) [@VnCFD](https://vncfdgroup.wordpress.com/)*Thực hành CFD với Python!* Bài 19. Lưới có cấu trúc, bài toán dòng chảy trên âm qua dốc Khi nói tới vấn đề lưới, bạn có thể nghe thấy các từ khác như `chia lưới, dựng lưới, tạo lưới, vẽ lưới`, tất cả chúng đề chỉ việc xây dựng lưới để tính toán. Lưới được chia thành hai dạng cơ bản `có cấu trúc (structured) và không cấu trúc (unstructured)`. Điểm cộng của từng loại đó là: việc lưu trữ, sắp xếp lưới có cấu trúc dễ dàng, thuận tiện hơn; thời gian hội tụ nhanh hơn khi dùng lưới có cấu trúc, lưới không cấu trúc có thể chia tự động ngay cả với những vùng tính toán có dạng hình học phức tạp.Việc xây dựng chương trình tính toán phải dựa trên loại lưới được sử dụng. Trong khuôn khổ phần 3, chúng ta sẽ sử dụng `lưới có cấu trúc`. Tạo lưới là một công việc không hề dễ dàng nhất là khi vùng tính toán có dạng hình học phức tạp, khi đó ta phải sử dụng các phần mềm chia lưới có sẵn, trong trường hợp đơn giản ta có thể tự viết code. Lưới tạo ra có thể được lưu ở các định dạng khác nhau tùy chương trình chia lưới. Định dạng chuẩn mà các chương trình CFD đều có thể dùng đó là CGNS (CFD General Notation System). Trong bài này ta sẽ thực hành các thao tác với lưới: `chia lưới, lưu trữ lưới, đọc lưới, biểu diễn lưới`. 1. Bài toán dòng chảy trên âm qua dốcVí dụ đầu tiên mà ta sẽ tính là bài toán dòng chảy trên âm qua dốc:- Kích thước vùng tính toán, điều kiện biên như trên hình- Thông số dòng chảy vào: $T=293.15, u=660.0, v=0.0, p=101325.0$ 2. Chia lưới**Quy ước:** kí hiệu index *j - trục y, i - trục x*.Trường hợp tổng quát, ta chia vùng tính toán ra thành $(N_j-1) \times (N_i-1)$ ô lưới. Như vậy sẽ có $(N_j \times N_i)$ điểm lưới, mỗi điểm chứa hai giá trị tọa độ (x, y). Ta cần một mảng 3 chiều với kích thước $(N_j \times N_i \times 2$ để chứa tọa độ điểm lưới. Cách đơn giản là sử dụng thư viện *numpy*:`points = numpy.zeros((N_j, N_i, 2))`. Khi đó `points[j, i]` - điểm ở hàng j, cột i.**Bài tập:** Hãy chia lưới kích thước 40x100 ô Sau khi thực hiện xong bài tập trên, hãy đối chiếu với cách sau:# coding: utf-8 # module mesh_generator.py import numpy as np import matplotlib.pyplot as plt # Tạo lưới bài toán Mach 2 def generate_mesh_M2(Nj, Ni): #Nj, Ni: số điểm lưới # Kích thước vùng tính toán ly, lx = 4.0, 10.0 # Tạo mảng 3 chiều để chứa tọa độ các điểm lưới points = np.zeros((Nj, Ni, 2)) # tọa độ x tại các điểm lưới dx = lx / Ni x = np.linspace(0, lx, Ni) # tọa độ y của biên dưới y0 = np.zeros(Ni) # index i tương ứng vị trí x = 2, 4 trên biên dưới i2 = int(2./dx) i4 = int(4./dx) y0[i2:i4] = (x[i2:i4]-2.)*np.tan(np.pi/12) y0[i4:] = 2.0*np.tan(np.pi/12) # khoảng cách dy giữa hai điểm cùng cột dy = np.array([(ly-y)/(Nj-1) for y in y0]) # xác định tọa độ (x, y) của từng điểm for j in range(Nj): for i in range(Ni): points[j, i, 0] = x[i] points[j, i, 1] = y0[i]+j*dy[i] return points # Kiểm tra hàm chia lưới, vẽ lưới bằng pyplot Nj, Ni = 41, 101 #kích thước lưới points = generate_mesh_M2(Nj, Ni) #tọa độ điểm lưới fig = plt.figure(figsize=(10, 4)) plt.plot(points[:, :, 0], points[:, :, 1], 'r+') plt.show()3. Xuất lướiChúng ta sẽ lưu lưới và kết quả ở định dạng của `Tecplot` (tìm hiểu `Tecplot Data Format Guide`). Tuy nhiên ta sẽ sử dụng **ParaView** để xem lưới và kết quả. Bạn hãy cài đặt và học cách sử dụng chương trình này. Đây là một phần mềm rất tiện lợi cho việc biểu diễn cũng như xử lý kết quả CFD.Ví dụ file `field.dat` có cấu trúc như sau:`TITLE = "vncfd python"VARIABLES = "X", "Y", "Z", "rho", "u", "v", "p", "Mach"ZONE T="1", I= 100, J= 40, K= 10.050000 0.050000 0.000000 1.400000 2.000000 0.000000 1.000000 2.0000000.150000 0.050000 0.000000 1.400000 2.000000 0.000000 1.000000 2.0000000.250000 0.050000 0.000000 1.400000 2.000000 0.000000 1.000000 2.000000...`- dòng đầu tiên là tên gọi- dòng thứ hai là tên các biến- dòng thứ ba là tên, kích thước vùng lưới- các dòng tiếp theo là giá trị các biếnTa cần hai hàm xuất lưới và nhập lưới. Thông tin mà ta lưu trữ và đọc là `kích thước và tọa độ lưới`:# module function.py # Hàm xuất lưới def export_mesh(Nj, Ni, points, file_name): f = open(file_name, 'w') f.write('TITLE = "vncfd python"\n') f.write('VARIABLES = "X", "Y"\n') f.write('ZONE T="1", I= %d, J= %d\n' % (Ni, Nj)) for j in range(Nj): for i in range(Ni): f.write('%f %f\n' % (points[j, i, 0], points[j, i, 1])) f.close() # Hàm đọc lưới def import_mesh(file_name, dl=' '): print('\nImport mesh from: %s\n' % file_name) f = open(file_name, 'r') # đọc và hiện ra màn hình 3 dòng đầu for i in range(3): line = f.readline() print(line) # lấy giá trị Ni, Nj words = line.split() # chia dòng cuối ra thành các từ riêng biệt bằng dấu cách ' ' Nj = int(words[-1]) # từ cuối cùng là Nj Ni = int(words[-3].replace(',', '')) # từ thứ 3 tứ cuối lên bỏ dấu ',' là Ni f.close() # đọc tọa độ các điểm lưới bằng hàm loadtxt, bỏ 3 hàng đầu # dùng reshape để chuyển mảng về 3 chiều points = loadtxt(file_name, skiprows=3, usecols=(0,1), delimiter=dl).reshape((Nj, Ni, 2)) return Nj, Ni, points # Lưu lưới vừa được tạo export_mesh(Nj, Ni, points, 'data/mach_2_mesh.dat')Dùng Paraview để xem, kiểm tra lưới chúng ta vừa ghi.# Kiểm tra lại việc ghi, đọc lưới # # Gọi hàm import_mesh # Nj, Ni, points = import_mesh('data/mach_2_mesh.dat') # # Kiểm tra giá trị Nj, Ni # print(Nj, Ni) # # Kiểm tra tọa độ y của cột đầu tiên # print(points[:, 0, 1]) # # Kiểm tra tọa độ x của hàng đầu tiên # print(points[0, :, 0])Getting started with DoWhy: A simple exampleThis is a quick introduction to the DoWhy causal inference library.We will load in a sample dataset and estimate the causal effect of a (pre-specified)treatment variable on a (pre-specified) outcome variable.First, let us load all required packages.import numpy as np import pandas as pd import dowhy from dowhy import CausalModel import dowhy.datasets # Avoid printing dataconversion warnings from sklearn import warnings from sklearn.exceptions import DataConversionWarning warnings.filterwarnings(action='ignore', category=DataConversionWarning) # Config dict to set the logging level import logging.config DEFAULT_LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'loggers': { '': { 'level': 'WARN', }, } } logging.config.dictConfig(DEFAULT_LOGGING)Now, let us load a dataset. For simplicity, we simulate a dataset with linear relationships between common causes and treatment, and common causes and outcome. Beta is the true causal effect.data = dowhy.datasets.linear_dataset(beta=10, num_common_causes=5, num_instruments = 2, num_effect_modifiers=1, num_samples=20000, treatment_is_binary=True, num_discrete_common_causes=1) df = data["df"] print(df.head()) print(data["dot_graph"]) print("\n") print(data["gml_graph"]) data['df']Note that we are using a pandas dataframe to load the data. At present, DoWhy only supports pandas dataframe as input. Interface 1 (recommended): Input causal graph We now input a causal graph in the GML graph format (recommended). You can also use the DOT format.To create the causal graph for your dataset, you can use a tool like [DAGitty](http://dagitty.net/dags.html) that provides a GUI to construct the graph. You can export the graph string that it generates. The graph string is very close to the DOT format: just rename `dag` to `digraph`, remove newlines and add a semicolon after every line, to convert it to the DOT format and input to DoWhy.# With graph model=CausalModel( data = df, treatment=data["treatment_name"], outcome=data["outcome_name"], graph=data["gml_graph"] ) data["treatment_name"]The above causal graph shows the assumptions encoded in the causal model. We can now use this graph to first identify the causal effect (go from a causal estimand to a probability expression), and then estimate the causal effect. **DoWhy philosophy: Keep identification and estimation separate**Identification can be achieved without access to the data, acccesing only the graph. This results in an expression to be computed. This expression can then be evaluated using the available data in the estimation step.It is important to understand that these are orthogonal steps.* Identificationidentified_estimand = model.identify_effect(proceed_when_unidentifiable=True) print(identified_estimand)Estimand type: nonparametric-ate ### Estimand : 1 Estimand name: backdoor Estimand expression: d ─────(Expectation(y|W1,W2,W0,W3,W4,X0)) d[v₀] Estimand assumption 1, Unconfoundedness: If U→{v0} and U→y then P(y|v0,W1,W2,W0,W3,W4,X0,U) = P(y|v0,W1,W2,W0,W3,W4,X0) ### Estimand : 2 Estimand name: iv Estimand expression: Expectation(Derivative(y, [Z1, Z0])*Derivative([v0], [Z1, Z0])**(-1)) Estimand assumption 1, As-if-random: If U→→y then ¬(U →→{Z1,Z0}) Estimand assumption 2, Exclusion: If we remove {Z1,Z0}→{v0}, then ¬({Z1,Z0}→y) ### Estimand : 3 Estimand name: frontdoor No such variable found!Note the parameter flag *proceed\_when\_unidentifiable*. It needs to be set to *True* to convey the assumption that we are ignoring any unobserved confounding. The default behavior is to prompt the user to double-check that the unobserved confounders can be ignored. * Estimationcausal_estimate = model.estimate_effect(identified_estimand, method_name="backdoor.propensity_score_stratification") print(causal_estimate) print("Causal Estimate is " + str(causal_estimate.value))*** Causal Estimate *** ## Identified estimand Estimand type: nonparametric-ate ### Estimand : 1 Estimand name: backdoor Estimand expression: d ─────(Expectation(y|W1,W2,W0,W3,W4,X0)) d[v₀] Estimand assumption 1, Unconfoundedness: If U→{v0} and U→y then P(y|v0,W1,W2,W0,W3,W4,X0,U) = P(y|v0,W1,W2,W0,W3,W4,X0) ## Realized estimand b: y~v0+W1+W2+W0+W3+W4+X0 Target units: ate ## Estimate Mean value: 12.22245148106945 Causal Estimate is 12.22245148106945You can input additional parameters to the estimate_effect method. For instance, to estimate the effect on any subset of the units, you can specify the "target_units" parameter which can be a string ("ate", "att", or "atc"), lambda function that filters rows of the data frame, or a new dataframe on which to compute the effect. You can also specify "effect modifiers" to estimate heterogeneous effects across these variables. See `help(CausalModel.estimate_effect)`.# Causal effect on the control group (ATC) causal_estimate_att = model.estimate_effect(identified_estimand, method_name="backdoor.propensity_score_stratification", target_units = "atc") print(causal_estimate_att) print("Causal Estimate is " + str(causal_estimate_att.value))*** Causal Estimate *** ## Identified estimand Estimand type: nonparametric-ate ### Estimand : 1 Estimand name: backdoor Estimand expression: d ─────(Expectation(y|W1,W2,W0,W3,W4,X0)) d[v₀] Estimand assumption 1, Unconfoundedness: If U→{v0} and U→y then P(y|v0,W1,W2,W0,W3,W4,X0,U) = P(y|v0,W1,W2,W0,W3,W4,X0) ## Realized estimand b: y~v0+W1+W2+W0+W3+W4+X0 Target units: atc ## Estimate Mean value: 12.085030088973811 Causal Estimate is 12.085030088973811Interface 2: Specify common causes and instruments# Without graph model= CausalModel( data=df, treatment=data["treatment_name"], outcome=data["outcome_name"], common_causes=data["common_causes_names"], effect_modifiers=data["effect_modifier_names"]) model.view_model() from IPython.display import Image, display display(Image(filename="causal_model.png"))We get the same causal graph. Now identification and estimation is done as before.identified_estimand = model.identify_effect(proceed_when_unidentifiable=True)* Estimationestimate = model.estimate_effect(identified_estimand, method_name="backdoor.propensity_score_stratification") print(estimate) print("Causal Estimate is " + str(estimate.value))*** Causal Estimate *** ## Identified estimand Estimand type: nonparametric-ate ### Estimand : 1 Estimand name: backdoor Estimand expression: d ─────(Expectation(y|W0,W3,W1,W2,W4)) d[v₀] Estimand assumption 1, Unconfoundedness: If U→{v0} and U→y then P(y|v0,W0,W3,W1,W2,W4,U) = P(y|v0,W0,W3,W1,W2,W4) ## Realized estimand b: y~v0+W0+W3+W1+W2+W4 Target units: ate ## Estimate Mean value: 9.60353896239299 Causal Estimate is 9.60353896239299Refuting the estimateLet us now look at ways of refuting the estimate obtained. Adding a random common cause variableres_random=model.refute_estimate(identified_estimand, estimate, method_name="random_common_cause") print(res_random)Refute: Add a Random Common Cause Estimated effect:9.60353896239299 New effect:9.602995259163794Adding an unobserved common cause variableres_unobserved=model.refute_estimate(identified_estimand, estimate, method_name="add_unobserved_common_cause", confounders_effect_on_treatment="binary_flip", confounders_effect_on_outcome="linear", effect_strength_on_treatment=0.01, effect_strength_on_outcome=0.02) print(res_unobserved)Refute: Add an Unobserved Common Cause Estimated effect:9.60353896239299 New effect:7.591550789699005Replacing treatment with a random (placebo) variableres_placebo=model.refute_estimate(identified_estimand, estimate, method_name="placebo_treatment_refuter", placebo_type="permute") print(res_placebo)Refute: Use a Placebo Treatment Estimated effect:9.60353896239299 New effect:0.005166426876598622 p value:0.47Removing a random subset of the datares_subset=model.refute_estimate(identified_estimand, estimate, method_name="data_subset_refuter", subset_fraction=0.9) print(res_subset)Refute: Use a subset of data Estimated effect:9.60353896239299 New effect:9.6152898407909 p value:0.42As you can see, the propensity score stratification estimator is reasonably robust to refutations.For reproducibility, you can add a parameter "random_seed" to any refutation method, as shown below.res_subset=model.refute_estimate(identified_estimand, estimate, method_name="data_subset_refuter", subset_fraction=0.9, random_seed = 1) print(res_subset)Refute: Use a subset of data Estimated effect:9.60353896239299 New effect:9.618271557173077 p value:0.33Table of Contents1  Read and plot resampled radiances2  Make a projection from proj4 parameters3  Plot the image using cartopy3.1  Create a palette3.2  use the palette on the image_30 array Read and plot resampled radiancesThis notebook shows how to plot the image written out by thecartopy_resample_ch30 notebookimport a301 import json from a301.utils.data_read import download import pprint import shutil import json import pprint import cartopy import numpy as np from matplotlib import pyplot as plt # read in the resampled image in_dir_name="ch30_resample" in_dir = a301.root_dir / Path('test_data') / Path(in_dir_name) image_name= in_dir / Path(f"{in_dir_name}.npz") json_name = in_dir / Path(f"{in_dir_name}.json") image_dict=np.load(image_name) with open(json_name,'r') as f: meta_dict=json.load(f) print(list(image_dict.keys())) image_30=image_dict['ch30_resample'] import cartopy.crs as ccrs import matplotlib.pyplot as pltMake a projection from proj4 parametersa301.geometry.make_projectiondef make_projection(proj_params): """ turn a set of proj4 parameters into a cartopy laea projection Parameters ---------- proj_params: dict dictionary with parameters lat_0, lon_0 datum and ellps Returns ------- cartopy projection object """ import cartopy.crs as ccrs globe_w = ccrs.Globe(datum=proj_params["datum"],ellipse=proj_params['ellps']) projection_w=ccrs.LambertAzimuthalEqualArea(central_latitude=float(proj_params['lat_0']), central_longitude= float(proj_params['lon_0']),globe=globe_w) return projection_w cartopy_crs = make_projection(meta_dict['proj_params'])Plot the image using cartopy Create a paletteWe want to spread the colors over a limited range of values between 0.1 and 7 W/m^2/microns/sr so wewill set over and under colors and normalize the data to this range Some links about colors: * [rods, cones and rgb](https://theneurosphere.com/2015/12/17/the-mystery-of-tetrachromacy-if-12-of-women-have-four-cone-types-in-their-eyes-why-do-so-few-of-them-actually-see-more-colours/)* [matplotlib palettes](https://matplotlib.org/examples/color/colormaps_reference.html) * [xkcd color survey](https://blog.xkcd.com/2010/05/03/color-survey-results/) * [xkcd colors from matplotlib](https://seaborn.pydata.org/generated/seaborn.xkcd_palette.html) * [wikipedia article on RGB colors](https://en.wikipedia.org/wiki/RGB_color_model)pal = plt.get_cmap('plasma') pal.set_bad('0.75') #75% grey for out-of-map cells pal.set_over('r') #color cells > vmax red pal.set_under('k') #color cells < vmin black vmin= 0.1 vmax= 7.0 from matplotlib.colors import Normalize the_norm=Normalize(vmin=vmin,vmax=vmax,clip=False)use the palette on the image_30 arrayfig, ax = plt.subplots(1, 1, figsize=(10,10), subplot_kw={'projection': cartopy_crs}) ax.gridlines(linewidth=2) ax.add_feature(cartopy.feature.GSHHSFeature(scale='coarse', levels=[1,2,3])); ax.set_extent(meta_dict['extent'],cartopy_crs) cs=ax.imshow(image_30, transform=cartopy_crs, extent=meta_dict['extent'], origin='upper',alpha=0.8,cmap=pal,norm=the_norm) fig.colorbar(cs,extend='both');Stock Lower Partial Moment Chart# Library import pandas as pd import numpy as np import matplotlib.pyplot as plt import math import statistics import warnings warnings.filterwarnings("ignore") from pandas_datareader import data as pdr import yfinance as yf yf.pdr_override() start = '2019-01-01' #input end = '2020-07-01' #input symbol = 'AMD' #input stocks = yf.download(symbol, start=start, end=end)['Adj Close'] stocks_returns = stocks.pct_change().dropna() def lpm(stock_returns): threshold=0.0 order=1 threshold_array = np.empty(len(stock_returns)) threshold_array.fill(threshold) diff = threshold_array - stock_returns diff = diff.clip() return np.sum(diff ** order) / len(stock_returns) # Compute the running Lower Partial Moment running = [lpm(stocks_returns[i-90:i]) for i in range(90, len(stocks_returns))] # Plot running Lower Partial Moment up to 100 days before the end of the data set _, ax1 = plt.subplots(figsize=(12,8)) ax1.plot(range(90, len(stocks_returns)-100), running[:-100]) ticks = ax1.get_xticks() ax1.set_xticklabels([stocks.index[int(i)].date() for i in ticks[:-1]]) # Label x-axis with dates plt.title(symbol + ' Lower Partial Moment') plt.xlabel('Date') plt.ylabel('Lower Partial Moment') stock_lpm = lpm(stocks_returns) stock_lpm running = [lpm(stocks_returns[i-90:i]) for i in range(90, len(stocks_returns))] running1. Import our necessary dependenciesimport numpy as np import pandas as pd import matplotlib.pyplot as plt import tensorflow as tf from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder import scipy.misc as smp %matplotlib inline2. Import our dataset# read data df = pd.read_pickle('/data_to_upload/data_batch_1') # X is features X = df['data'] # y is our label y = df['labels'] X print("After importing data: ", X.shape) X = np.array(X, dtype=float) / 255.0 print("After converting array: ", X.shape) X = np.reshape(X, [-1, 3 , 32 ,32]) print("After reshape: ", X.shape) X = np.transpose(X, [0, 2, 3, 1]) print("After transpose: ", X.shape) X = np.reshape(X, [-1, 32*32*3]) print("After tra reshape: ", X.shape) X y = np.array(y, dtype=float) y.shape y = y.reshape(-1, 1) y.shape # transform y to oneHotEncode oneHotEncode = OneHotEncoder() oneHotEncode.fit(y) y = oneHotEncode.transform(y).toarray() y.shape # split our data into train and testing set X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=True) X_train.shape # single_img_reshaped = np.transpose(np.reshape(X,(3, 32,32)), (1,2,0)) # single_img_reshaped.shape # hyperparametres learning_rate = 0.003 num_steps = 100 batch_size = 128 display_step = 10 dropout = 0.75 # declare our placeholder X = tf.placeholder(tf.float32, [None, 3072]) y = tf.placeholder(tf.float32, [None, 10]) keep_prob = tf.placeholder(tf.float32) # weights weights = { 'wh1' : tf.Variable(tf.random_normal([5, 5, 3, 64])), 'wh2' : tf.Variable(tf.random_normal([5, 5, 64, 128])), 'wh3' : tf.Variable(tf.random_normal([5, 5 ,128, 256])), 'wh4' : tf.Variable(tf.random_normal([5, 5, 256, 512])), 'wf1' : tf.Variable(tf.random_normal([2*2*512, 1024])), 'wf2' : tf.Variable(tf.random_normal([1024, 512])), 'wOut' : tf.Variable(tf.random_normal([512, 10])) } # biases biases = { 'bh1' : tf.Variable(tf.random_normal([64])), 'bh2' : tf.Variable(tf.random_normal([128])), 'bh3' : tf.Variable(tf.random_normal([256])), 'bh4' : tf.Variable(tf.random_normal([512])), 'bf1' : tf.Variable(tf.random_normal([1024])), 'bf2' : tf.Variable(tf.random_normal([512])), 'bOut' : tf.Variable(tf.random_normal([10])) } def conv_2d(X, weights, biases, strides=1): X = tf.nn.conv2d(X, weights, strides=[1, strides, strides, 1], padding="SAME") X = tf.nn.bias_add(X, biases) return tf.nn.relu(X) def max_pooling(X, k=2): return tf.nn.max_pool(X, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding="SAME") def conv_net(X, weights, biases, dropout): X = tf.reshape(X, shape=[-1, 32, 32, 3]) conv1 = conv_2d(X, weights=weights['wh1'], biases=biases['bh1']) conv1 = max_pooling(conv1) conv1 = tf.nn.lrn(conv1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75) conv2 = conv_2d(conv1, weights=weights['wh2'], biases=biases['bh2']) conv2 = max_pooling(conv2) conv2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75) conv3 = conv_2d(conv2, weights=weights['wh3'], biases=biases['bh3']) conv3 = max_pooling(conv3) conv3 = tf.nn.lrn(conv3, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75) conv4 = conv_2d(conv3, weights=weights['wh4'], biases=biases['bh4']) conv4 = max_pooling(conv4) conv4 = tf.nn.lrn(conv4, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75) fc1 = tf.reshape(conv4, [-1, weights['wf1'].get_shape().as_list()[0]]) fc1 = tf.add(tf.matmul(fc1, weights['wf1']), biases['bf1']) fc1 = tf.nn.relu(fc1) fc2 = tf.add(tf.matmul(fc1, weights['wf2']), biases['bf2']) fc2 = tf.nn.relu(fc2) fc2 = tf.nn.dropout(fc2, dropout) out = tf.add(tf.matmul(fc2, weights['wOut']), biases['bOut']) return out logits = conv_net(X, weights=weights, biases=biases, dropout=keep_prob) prediction = tf.nn.softmax(logits=logits) # Define loss and optimizer loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( logits=logits, labels=y)) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) train_op = optimizer.minimize(loss_op) # Evaluate model correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) # Initialize the variables (i.e. assign their default value) init = tf.global_variables_initializer() # Start training with tf.Session() as sess: # Run the initializer sess.run(init) for step in range(1, num_steps+1): randidx = np.random.randint(len(X_train), size=batch_size) batch_xs = X_train[randidx] batch_ys = y_train[randidx] # Run optimization op (backprop) sess.run(train_op, feed_dict={X: batch_xs, y: batch_ys, keep_prob: dropout}) if step % display_step == 0 or step == 1: # Calculate batch loss and accuracy loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_xs, y: batch_ys, keep_prob: 1.0}) print("Step " + str(step) + ", Minibatch Loss= " + \ "{:.4f}".format(loss) + ", Training Accuracy= " + \ "{:.3f}".format(acc)) print("Optimization Finished!") # Calculate accuracy for 256 MNIST test images print("Testing Accuracy:", \ sess.run(accuracy, feed_dict={X: X_test, y: y_test, keep_prob: 1.0}))Step 1, Minibatch Loss= 156309.1562, Training Accuracy= 0.148 Step 10, Minibatch Loss= 38829.6250, Training Accuracy= 0.133 Step 20, Minibatch Loss= 37972.6094, Training Accuracy= 0.203 Step 30, Minibatch Loss= 14672.0928, Training Accuracy= 0.172 Step 40, Minibatch Loss= 10311.2041, Training Accuracy= 0.211 Step 50, Minibatch Loss= 5380.2720, Training Accuracy= 0.234 Step 60, Minibatch Loss= 2827.4873, Training Accuracy= 0.297 Step 70, Minibatch Loss= 1225.9036, Training Accuracy= 0.164 Step 80, Minibatch Loss= 232.0529, Training Accuracy= 0.133 Step 90, Minibatch Loss= 133.7461, Training Accuracy= 0.078 Step 100, Minibatch Loss= 85.8452, Training Accuracy= 0.117 Optimization Finished! Testing Accuracy: 0.11首先我們使用一般的 DNN (MLP) 來訓練由於 DNN 只能輸入一維的資料,我們要先將影像進行攤平,若 (50000, 32, 32, 3) 的影像,攤平後會變成 (50000, 32*32*3) = (50000, 3072)# 將資料攤平成一維資料 x_train = x_train.reshape(50000, 3072) x_test = x_test.reshape(10000, 3072) # 將資料變為 float32 並標準化 x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') model = Sequential() model.add(Dense(512, activation='relu', input_shape=(3072,))) model.add(Dropout(0.2)) model.add(Dense(512, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(num_classes, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy']) history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test)) score = model.evaluate(x_test, y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1])WARNING: Logging before flag parsing goes to stderr. W0801 22:20:36.856033 2676 deprecation.py:323] From c:\users\qwerz\miniconda3\envs\ml100\lib\site-packages\tensorflow\python\ops\math_grad.py:1250: add_dispatch_support..wrapper (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version. Instructions for updating: Use tf.where in 2.0, which has the same broadcast rule as np.where接下來我們使用 CNN 來訓練神經網路CNN 的原理非常適合處理影像類的資料,就讓我們來看看,同樣的訓練條件,CNN 是否顯著優於 DNN 呢?(x_train, y_train), (x_test, y_test) = cifar10.load_data() print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 # Convert class vectors to binary class matrices. y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) model = Sequential() model.add(Conv2D(32, (3, 3), padding='same', input_shape=x_train.shape[1:])) model.add(Activation('relu')) model.add(Conv2D(32, (3, 3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(64, (3, 3), padding='same')) model.add(Activation('relu')) model.add(Conv2D(64, (3, 3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes)) model.add(Activation('softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy']) history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test)) score = model.evaluate(x_test, y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1])Model: "sequential_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 32, 32, 32) 896 _________________________________________________________________ activation (Activation) (None, 32, 32, 32) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 30, 30, 32) 9248 _________________________________________________________________ activation_1 (Activation) (None, 30, 30, 32) 0 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 15, 15, 32) 0 _________________________________________________________________ dropout_2 (Dropout) (None, 15, 15, 32) 0 ______________________________________________________[...]$\chi(\vec{q}) =\frac{1}{N}\sum_k^N \frac{fd(\xi_k)-fd(\xi_{k+q})}{\xi_{k}-\xi_{k+q}}$def fd(e,T): return 1/(1+np.exp(e/T)) for t in [1e-5,1e-1,1,2]: plt.plot(E(kk,mu=2),fd(E(kk,mu=2),t)) def suscep(q,k,mu,T): num = fd(E(k,mu),T)-fd(E(k+q,mu),T) den = E(k,mu)-E(k+q,mu) res = num/den av = np.average(res) return av sus1 = [suscep(q,kk,mu=2,T=0.01) for q in kk] sus2 = [suscep(q,kk,mu=2,T=0.2) for q in kk] sus01 = [suscep(q,kk,mu=2,T=0.1) for q in kk] plt.plot(k,sus01) plt.plot(k,sus1) plt.plot(k,sus2) xy = np.array([ [x,y] for x in kk for y in kk]) def E2(x,y,mu): return -np.cos(x)-np.cos(y)-mu plt.scatter(xy.T[0],xy.T[1],c=E2(xy.T[0],xy.T[1],0),cmap='jet') plt.axis('equal') plt.colorbar() def suscep2(q,k,mu,T): num = fd(E2(k.T[0],k.T[1],mu),T)-fd(E2(k.T[0]+q.T[0],k.T[1]+q.T[1],mu),T) den = E2(k.T[0],k.T[1],mu)-E2(k.T[0]+q.T[0],k.T[1]+q.T[1],mu) res = num/den av = np.average(res) return av sus2d01 = [suscep2(q,xy,mu=0,T=0.01) for q in xy] sus2d1 = [suscep2(q,xy,mu=0,T=0.1) for q in xy] plt.scatter(xy.T[0],xy.T[1],c=sus2d01,cmap='jet') plt.colorbar() plt.scatter(xy.T[0],xy.T[1],c=sus2d1,cmap='jet') plt.colorbar()Topics: Sparsity (PCA and Compressive Sensing) Assigned: Wednesday May 23 Due: Sunday June 10 by midnight# -*- coding: utf-8 -*- import numpy as np from math import * import matplotlib.pyplot as plt from matplotlib import cm from mpl_toolkits.mplot3d import Axes3D from scipy.stats import norm # Params N = 2000 dims = 100 ulength = 7Code & Logs Part I PCAGenerate a random vector u in d dimensions as follows: The components of u are i.i.d., with- P [u[i] = 0] = 2=3; P [u[i] = +1] = 1=6; P [u[i] = −1] = 1=6# Generate the IID def generateMultiDimGaussian(d, ulength): u = np.ndarray((ulength,d)) angles = np.zeros((ulength,ulength)) deviation = 10 while np.amax(angles+90*np.eye(ulength)) > (90+deviation) or np.amin(angles+90*np.eye(ulength)) < (90-deviation): for i in range(ulength): for j in range(d): r = np.random.rand() if(r<4/6): u[i,j] = 0 elif(r<5/6): u[i,j] = 1 else: u[i,j] = -1 if(u.any(axis=1).all()): for i in range(ulength): for j in range(i): angles[i,j] = np.arccos( np.clip( np.dot(u[i,:]/np.linalg.norm(u[i,:]), u[j,:]/np.linalg.norm(u[j,:])) ,-1.0,1.0))/np.pi*180 angles[j,i] = angles[i,j] #print(angles) return u # Uj be i.i.d Uj = generateMultiDimGaussian(dims, ulength) print(Uj.shape)(7, 100)Generate d-dimensional data samples for a Gaussian mixture distribution with 3 equiprobable components1. Zm : Standard Gaussian (N(0, 1)) distribution2. N : noise vector" N ∼ N(0, σ2Id) (default value σ2 = 0:01)3. Component 1: Generate X = u1 + Z1u2 + Z2u3 + N.4. Component 2: Generate X = 2u4 + sqrt(2)Z1u5 + Z2u6 + N.5. Component 3: Generate X = sqrt(2)u6 + Z1(u1 + u2) + (1/sqrt(2))Z2u5 + N""" Generate the higher dimension dataset and sample equiprobable from components """ def generateDataset(u, num_data = 50, d = 30): sigma_sq = 0.01 #print('\nX(Nxd):',num_data,"x",d,',\tUj:',Uj[0].shape) dataset = np.ndarray((num_data,d)) labels = np.zeros((num_data,3)) # will be containing [0,1,0] one hot value # Assign the values based on the three component function for i in range(0, num_data): # Random numbers Zm {Z1, Z2} and N are drawn afresh Z1 = np.random.normal() Z2 = np.random.normal() noise = np.random.multivariate_normal(np.zeros(d), (sigma_sq)*np.eye(d)) # choose which comnponent to pick from idx_comp = np.random.choice([0, 1, 2],1,p=[0.333, 0.333, 0.334]) if(idx_comp == 0): # Sample from component 1 dataset[i,:] = Uj[1,:] + Z1*Uj[2,:] + Z2*Uj[3,:] + noise elif(idx_comp == 1): # Sample from component 3 dataset[i,:] = 2*Uj[4,:] + np.sqrt(2)*Z1*Uj[5,:] + Z2*Uj[6,:] + noise elif(idx_comp == 2): # Sample from component 3 dataset[i,:] = np.sqrt(2)*Uj[6,:] + Z1*(Uj[1,:] + Uj[2,:]) + 1/np.sqrt(2) *Z2*Uj[5,:] + noise # Assign a label labels[i,idx_comp]=1 return dataset, labels #X_data, Y_hot_labels = generateDataset(Uj, 50, 30) #print("> X: Data Set:",X_data.shape,", Y: One hot :",Y_hot_labels.shape)1. SVD of the A(N × d) data matrix A = U SIGMA VT1. V must diagonalize ATA and vi are eigenvectors of ATA.2. SIGMA where SIGMAii are singular values of A. 3. U must diagonalize AAT and ui are eigenvectors of AAT# input sample N=? from components def getSingularValues(N = 50, d=30): X_data, Y_hot_labels = generateDataset(Uj, N, d) print("> X: Data Set:",X_data.shape,", Y: One hot :",Y_hot_labels.shape) #Xt = X_data.T U, S, V = np.linalg.svd(X_data, full_matrices=False) X_a = np.dot(np.dot(U, np.diag(S)), V) print("> SD-inp:", np.std(X_data), "\nSD-out:", np.std(X_a), "\nSD-Err:",np.std(X_data - X_a)) print("Singular values ",N,"×",d," : ",S[0:7]) return S plt.figure(figsize=(14,6)) plt.title('Singular Values vs N') plt.xlabel('Singular value index') plt.ylabel('Singular Value magnitude') for Num in [50, 60, 80, 100, 120, 150, 200, 300]: S = getSingularValues(Num, dims) plt.plot(range(len(S)), S,label = str('N='+str(Num))) plt.legend() plt.show()> X: Data Set: (50, 100) , Y: One hot : (50, 3) > SD-inp: 1.2425699236269871 SD-out: 1.242569923626987 SD-Err: 3.048454173224238e-15 Singular values 50 × 100 : [48.39048927 43.11330642 41.06337937 33.16084801 21.96589821 15.14612167 1.5994595 ] > X: Data Set: (60, 100) , Y: One hot : (60, 3) > SD-inp: 1.3498319267986385 SD-out: 1.3498319267986376 SD-Err: 3.4819962065686176e-15 Singular values 60 × 100 : [55.31684119 53.0200592 44.0650705 39.84964441 30.09112566 24.50793065 1.69841237] > X: Data Set: (80, 100) , Y: One hot : (80, 3) > SD-inp: 1.1498704791356968 SD-out: 1.1498704791356964 SD-Err: 1.806028648889445e-15 Singular values 80 × 100 : [57.5893642 53.216893 40.83616548 33.89971405 28.80635373 27.18518553 1.76064902] > X: Data Set: (100, 100) , Y: One hot : (100, 3) > SD-inp: 1.2997668301927499 SD-out: 1.2997668301927496 SD-Err: 4.440394990401163e-15 Singular values 100 × 100 : [69.90863454 62.67001101 54.6252193 50.84244765 38.22080618 31.71841843[...]1.(a) d0 = 6 are the dominant singular values we can see this based on the variation for N = [50, 60, 80, 100, 120, 150, 200, 300] Now, project the data down to the dominant d0 components to obtain an N × d0 data matrix.X_data, Y_hot_labels = generateDataset(Uj, N, dims) print("> X: Data Set:",X_data.shape,", Y: One hot :",Y_hot_labels.shape) U, S, V = np.linalg.svd(X_data, full_matrices=False) X_a = np.dot(np.dot(U, np.diag(S)), V) print(">\nSD-inp:", np.std(X_data), "\nSD-out:", np.std(X_a), "\nSD-Err:",np.std(X_data - X_a)) print("Singular values ",N,"×",dims," : ",S[0:7]) d0 = 6 # Dominant vectors d0 = 6 print("Singular: ", np.diag(S[:d0]).shape) reconst_matrix = np.dot(U[:,:d0],np.dot(np.diag(S[:d0]),V[:d0,:])) print(">\nSD-inp:", np.std(X_data), "\nSD-out:", np.std(reconst_matrix), "\nSD-Err:",np.std(X_data - reconst_matrix)) # Using eigen vector V as the basis for projecting the data evecs = V[:, :d0] X_reduced_matrix = np.dot(X_data, evecs) print("Reduced Matrix[From =",N,"×",dims,"]:" ," To =",X_reduced_matrix.shape) #print(X_reduced_matrix) def runKmeansCluster(data, labels, d): class_means_dict=dict() preds_dict=dict() fig = plt.figure(figsize=(8,8)) subplot_id = 221 print("Data Dims: ",N,"x",d) print("Dataset : ", data.shape) # Kmeans trial for K in range(2,6): pred = np.zeros((N, K)) class_means = np.ndarray((K, d)) initial_indices = np.random.choice(N,K) for k in range(K): class_means[k,:] = data[initial_indices[k],:] old_class_means = np.zeros((K,d)) print("\n-------- K=",K,"----------") while np.linalg.norm(old_class_means-class_means)/np.linalg.norm(class_means) > 0.001 : norm_mse = np.linalg.norm(old_class_means-class_means) norm_mu = np.linalg.norm(class_means) #print("> ",norm_mse,"/",norm_mu," \t=\t ",norm_mse/norm_mu) old_class_means = np.array(class_means) for i in range(N): distance_to_means = np.zeros(K) for k in range(K): distance_to_means[k] = np.linalg.norm(class_means[k] - data[i]) #print("Distance: ",distance_to_means) nearest_mean = np.argmin(distance_to_means) #print("Nearest : ",nearest_mean) # labels as the min dist pred[i,:] = np.zeros(K) pred[i, nearest_mean] = 1 # one hot encoding # new mean for k in range(K): class_means[k] = np.mean(data[np.where(pred[:,k]==1)], axis=0) #print("Class MU : ",class_means) ax = fig.add_subplot(subplot_id) subplot_id = subplot_id +1 for k in range(K): colors = ('#fc0d1b','#041ca2','#162214','#fd8008','#c41bb6') ax.scatter(data[np.where(pred[:k] == 1),0], data[np.where(pred[:k] == 1),1], color = colors[k], label = ('Class %d' % (k+1))) ax.scatter(class_means[k,0], class_means[k,1], s=200, marker='+', color = colors[k]) ax.legend() #plt.savefig('dim_%d_q1_%d_means.png' % (d,K), dpi=600) #plt.show # save means class_means_dict[K] = class_means preds_dict[K] = pred probabilities = np.ndarray((3,K)) for predicted in range(K): print('______________________ \n') for true_label in range(3): interssect = np.intersect1d(np.where(labels[:, true_label] == 1), np.where(pred[:, predicted] == 1)) tots = np.where(labels[:, true_label] == 1)[0] #print('K=',predicted,", idx=",true_label,"\nP -> ",interssect,"\nT ->", tots) probabilities[true_label, predicted] = len(interssect) / len(tots) print('K=',predicted,", idx=",true_label,' -> Prob=%.2f' % probabilities[true_label, predicted]) fig.show print("Run on the reduced dimensions data with d0 = ", d0) runKmeansCluster(X_reduced_matrix, Y_hot_labels, d0)Run on the reduced dimensions data with d0 = 6 Data Dims: 2000 x 6 Dataset : (2000, 6) -------- K= 2 ---------- ______________________ K= 0 , idx= 0 -> Prob=0.00 K= 0 , idx= 1 -> Prob=0.80 K= 0 , idx= 2 -> Prob=0.00 ______________________ K= 1 , idx= 0 -> Prob=1.00 K= 1 , idx= 1 -> Prob=0.20 K= 1 , idx= 2 -> Prob=1.00 -------- K= 3 ---------- ______________________ K= 0 , idx= 0 -> Prob=0.00 K= 0 , idx= 1 -> Prob=0.30 K= 0 , idx= 2 -> Prob=0.49 ______________________ K= 1 , idx= 0 -> Prob=1.00 K= 1 , idx= 1 -> Prob=0.00 K= 1 , idx= 2 -> Prob=0.51 ______________________ K= 2 , idx= 0 -> Prob=0.00 K= 2 , idx= 1 -> Prob=0.70 K= 2 , idx= 2 -> Prob=0.00 -------- K= 4 ---------- ______________________ K= 0 , idx= 0 -> Prob=0.00 K= 0 , idx= 1 -> Prob=0.51 K= 0 , idx= 2 -> Prob=0.00 ______________________ K= 1 , idx= 0 -> Prob=0.00 K= 1 , idx= 1 -> Prob=0.45 K= 1 , idx= 2 -> Prob=0.00 ______________________ K= 2 , idx= 0 -> Pro[...]Part II : Random Projections and Compressed Sensing 3. (a.) Generate m x d matrix Φ, IID drawn such as P [Φij = +1] = 1/2; P [Φij = −1] = 1/2 (b.) Compressive Projection, y = 1/sqrt(m) * ( Φ x ) with sparse reconstruction of s based on ydef generatePhiMatrix(m, d): u = np.ndarray((m,d)) for i in range(m): for j in range(d): r = np.random.rand() if(r<1/2): u[i,j] = 1 else: u[i,j] = -1 #print("Φ(mxd):",u.shape) return u def getCompressedProjection(m, d, log=False): # Keeping dimensions as dimensions d = 30 and number of data N = 50 Xp_data, Zp_hot_labels = generateDataset(Uj, N, d) # Generate Phi phi = generatePhiMatrix(m,d) # Compressive Projection (m-dim projection of the d-dim matrix x) Y_xp = (1/np.sqrt(m)) * np.matmul(Xp_data, phi.T) B = np.transpose(Uj) if(log == True): print("> N =",N,", M =",m,", Y_xp:", Y_xp.shape, ", Labels:",Zp_hot_labels.shape, ", Phi :",phi.shape, ", B :",B.shape) return Xp_data, Y_xp, Zp_hot_labels, phi, B M = 20 X, Y_xp, labels, phi, B_evecs = getCompressedProjection(M, dims, True)> N = 2000 , M = 20 , Y_xp: (2000, 20) , Labels: (2000, 3) , Phi : (20, 100) , B : (100, 7)4. Lasso problem (using sklearn.linear_model.Lasso)- Φ(phi) = projection matrix also called A in examples - x = latent data variables with gaussian noise - y = observed results (projection matrix is applied on latent data variables and the result has been reduced in dimensions)from sklearn import linear_model def normalizedME(s, s_hat): return np.linalg.norm(s_hat - s)/np.linalg.norm(s) def findMinimumM(): mses = [] # Keeping dimension fixed and varying M for this rng = range(1,15) for m in rng: s, Y_xp, labels, phi, B = getCompressedProjection(m, dims, False) matrix = (1/np.sqrt(m)) * np.matmul(phi, B) clf = linear_model.Lasso(alpha = 1.0) # Set lambda ( called ’alpha ’ here ) clf.fit(matrix, Y_xp.T) # Fit the reduced Y to the one hot a_hat = clf.coef_ # Get a_hat s_hat = np.matmul(a_hat, B.T) # this is the output #print("Mat:",matrix.shape, ",\ts_hat:",s_hat.shape, ",\ts:",s.shape) mses.append(normalizedME(s, s_hat)) plt.plot(list(rng), mses) plt.show() return np.argmin(mses)+1 minM = findMinimumM() print("The minimum M is ", minM)5. Normalized MSE vs Lambdadef normalize(lst): s = sum(lst) return map(lambda x: float(x)/s, lst) def mean(lst): return sum(lst)/float(len(lst)) def normalizedMSE(s, s_hat): return (np.linalg.norm(s_hat - s)/np.linalg.norm(s))**2 def computeMSELambda(N, m, d): mse_ = [] mse_comp1_ = [] mse_comp2_ = [] mse_comp3_ = [] s, Y_xp, labels, phi, B = getCompressedProjection(m, d, False) matrix = (1/np.sqrt(m)) * np.matmul(phi, B) clf = linear_model.Lasso(alpha = 1.0) # Set lambda ( called ’alpha ’ here ) clf.fit(matrix, Y_xp.T) # Fit the reduced Y to the one hot a_hat = clf.coef_ # Get a_hat s_hat = np.matmul(a_hat, B.T) # this is the output for i in range(N): mse = normalizedMSE(s[i],s_hat[i]) index = np.where(labels[i]==1)[0] if index == 0: #component 1 mse_comp1_.append(mse) if index == 1: mse_comp2_.append(mse) if index == 2: mse_comp3_.append(mse) mse_.append(mse) return mean(mse_), mean(mse_comp1_),mean(mse_comp2_),mean(mse_comp3_) mse_list=[] mse_comp1_list = [] mse_comp2_list = [] mse_comp3_list = [] lambda_list = [0.05,0.07,0.08,0.09,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0] for l in lambda_list: me, me1, me2, me3 = computeMSELambda(N, minM, dims) mse_list.append(me) mse_comp1_list.append(me1) mse_comp2_list.append(me2) mse_comp3_list.append(me3) plt.title('Plot of Lambda vs MSE') plt.plot(lambda_list, mse_list, label="Avg MSE") plt.plot(lambda_list, mse_comp1_list, label="Comp1 MSE") plt.plot(lambda_list, mse_comp2_list, label="Comp2 MSE") plt.plot(lambda_list, mse_comp3_list, label="Comp3 MSE") plt.legend() plt.show()6. Compare the Euclidean distances squared vs the corresponding quantity in projected spacem = minM J = 6 B = np.transpose(Uj) projected_u = (1/np.sqrt(m)) * np.matmul(phi, B) projected_dist_matrix = np.zeros(shape=(J,J)) for i in range(J): for j in range(J): dist = np.linalg.norm(projected_u[:,i]-projected_u[:,j])**2 projected_dist_matrix[i][j] = dist print("projected_dist_matrix\n",projected_dist_matrix) dist_matrix = np.zeros(shape=(J,J)) for i in range(J): for j in range(J): dist = np.linalg.norm(Uj[:,i]-Uj[:,j])**2 dist_matrix[i][j] = dist print("dist_matrix\n",dist_matrix)projected_dist_matrix [[ 0. 37.45 56.36 73.45 23.64 46.82] [ 37.45 0. 66.91 66.55 34.91 87.55] [ 56.36 66.91 0. 131.27 59.64 114.45] [ 73.45 66.55 131.27 0. 70.18 67.18] [ 23.64 34.91 59.64 70.18 0. 43.18] [ 46.82 87.55 114.45 67.18 43.18 0. ]] dist_matrix [[ 0. 2. 2. 7. 3. 4.] [ 2. 0. 2. 3. 3. 2.] [ 2. 2. 0. 7. 3. 4.] [ 7. 3. 7. 0. 10. 7.] [ 3. 3. 3. 10. 0. 7.] [ 4. 2. 4. 7. 7. 0.]]7. K-means algorithm post-projectionx, labels = generateDataset(Uj, N, dims) phi = generatePhiMatrix(minM,dims) Y = np.matmul(phi,np.transpose(x))/(np.sqrt(minM)) Z = np.transpose(Y) runKmeansCluster(Z,labels, minM)Data Dims: 2000 x 11 Dataset : (2000, 11) -------- K= 2 ---------- ______________________ K= 0 , idx= 0 -> Prob=0.69 K= 0 , idx= 1 -> Prob=0.63 K= 0 , idx= 2 -> Prob=0.27 ______________________ K= 1 , idx= 0 -> Prob=0.31 K= 1 , idx= 1 -> Prob=0.37 K= 1 , idx= 2 -> Prob=0.73 -------- K= 3 ---------- ______________________ K= 0 , idx= 0 -> Prob=0.00 K= 0 , idx= 1 -> Prob=0.43 K= 0 , idx= 2 -> Prob=0.08 ______________________ K= 1 , idx= 0 -> Prob=0.31 K= 1 , idx= 1 -> Prob=0.02 K= 1 , idx= 2 -> Prob=0.66 ______________________ K= 2 , idx= 0 -> Prob=0.69 K= 2 , idx= 1 -> Prob=0.55 K= 2 , idx= 2 -> Prob=0.26 -------- K= 4 ---------- ______________________ K= 0 , idx= 0 -> Prob=0.23 K= 0 , idx= 1 -> Prob=0.01 K= 0 , idx= 2 -> Prob=0.57 ______________________ K= 1 , idx= 0 -> Prob=0.03 K= 1 , idx= 1 -> Prob=0.30 K= 1 , idx= 2 -> Prob=0.01 ______________________ K= 2 , idx= 0 -> Prob=0.00 K= 2 , idx= 1 -> Prob=0.39 K= 2 , idx[...]1901. Find a Peak Element II ContentA peak element in a 2D grid is an element that is strictly greater than all of its adjacent neighbors to the left, right, top, and bottom.Given a 0-indexed m x n matrix mat where no two adjacent cells are equal, find any peak element mat[i][j] and return the length 2 array [i,j].You may assume that the entire matrix is surrounded by an outer perimeter with the value -1 in each cell.You must write an algorithm that runs in O(m log(n)) or O(n log(m)) time. Example 1:Input: mat = [[1,4],[3,2]]Output: [0,1]Explanation: Both 3 and 4 are peak elements so [1,0] and [0,1] are both acceptable answers.Example 2:Input: mat = [[10,20,15],[21,30,14],[7,16,32]]Output: [1,1]Explanation: Both 30 and 32 are peak elements so [1,1] and [2,2] are both acceptable answers. Constraints: m == mat.length n == mat[i].length 1 <= m, n <= 500 1 <= mat[i][j] <= 105 No two adjacent cells are equal. Difficulty: Medium, AC rate: 59.9% Question Tags:- Array- Binary Search- Divide and Conquer- Matrix Links: 🎁 [Question Detail](https://leetcode.com/problems/find-a-peak-element-ii/description/) | 🎉 [Question Solution](https://leetcode.com/problems/find-a-peak-element-ii/solution/) | 💬 [Question Discussion](https://leetcode.com/problems/find-a-peak-element-ii/discuss/?orderBy=most_votes) Hints:Hint 0 🔍Let's assume that the width of the array is bigger than the height, otherwise, we will split in another direction.Hint 1 🔍Split the array into three parts: central column left side and right side.Hint 2 🔍Go through the central column and two neighbor columns and look for maximum.Hint 3 🔍If it's in the central column - this is our peak.Hint 4 🔍If it's on the left side, run this algorithm on subarray left_side + central_column.Hint 5 🔍If it's on the right side, run this algorithm on subarray right_side + central_column Sample Test Case[[1,4],[3,2]] ---What's your idea?类似 162,按行二分,行内找最大的元素 O(n), 总时间复杂度 O(n * log(m))同样在 [MIT6.006](https://www.youtube.com/v/HtSuA80QTyo) 里有讲解---from typing import List class Solution: def findPeakGrid(self, mat: List[List[int]]) -> List[int]: m = len(mat) if m == 1: max_j, _ = max(enumerate(mat[0]), key=lambda tup: tup[1]) return [0, max_j] middle_row = m // 2 max_j, _ = max(enumerate(mat[middle_row]), key=lambda tup: tup[1]) if middle_row > 0 and mat[middle_row-1][max_j] > mat[middle_row][max_j]: return self.findPeakGrid(mat[:middle_row]) elif middle_row < m - 1 and mat[middle_row+1][max_j] > mat[middle_row][max_j]: result = self.findPeakGrid(mat[middle_row+1:]) return [sum(p) for p in zip(result, [middle_row+1, 0])] else: return [middle_row, max_j] s = Solution() print(s.findPeakGrid([[1,2,3]]) == [0, 2]) r = s.findPeakGrid([[1,4],[3,2]]) print(r == [1, 0] or r == [0, 1]) r = s.findPeakGrid([[10,20,15],[21,30,14],[7,16,32]]) print(r == [1, 1] or r == [2, 2]) r = s.findPeakGrid([[47,30,35,8,25],[6,36,19,41,40],[24,37,13,46,5],[3,43,15,50,19],[6,15,7,25,18]]) print(r == [0, 2] or r == [3, 3]) import sys, os; sys.path.append(os.path.abspath('..')) from submitter import submit submit(1901)Excercises Electric Machinery Fundamentals Chapter 6 Problem 6-10%pylab inlinePopulating the interactive namespace from numpy and matplotlibDescription A three-phase 60-Hz two-pole induction motor runs at a no-load speed of 3580 r/min and a full-load speed of 3440 r/min. * Calculate the slip and the electrical frequency of the rotor at no-load and full-load conditions. * What is the speed regulation of this motor?fe = 60 # [Hz] p = 2 n_nl = 3580 # [r/min] n_fl = 3440 # [r/min]SOLUTION The synchronous speed of this machine is:$$n_\text{sync} = \frac{120f_{se}}{p}$$n_sync = 120*fe / p print('n_sync = {:.0f} r/min'.format(n_sync))n_sync = 3600 r/minThe slip and electrical frequency at no-load conditions is:$$S_\text{nl} = \frac{n_\text{sync} - n_\text{nl}}{n_\text{sync}} \cdot 100\%$$s_nl = (n_sync - n_nl) / n_sync print(''' s_nl = {:.2f} % ============='''.format(s_nl*100))s_nl = 0.56 % =============$$f_\text{r,nl} = sf_e$$f_rnl = s_nl * fe print(''' f_rnl = {:.2f} Hz ==============='''.format(f_rnl))f_rnl = 0.33 Hz ===============The slip and electrical frequency at full load conditions is:$$ S_\text{fl} = \frac{n_\text{sync} - n_\text{fl}}{n_\text{sync}} \cdot 100\%$$s_fl = (n_sync - n_fl) / n_sync print(''' s_fl = {:.2f} % ============='''.format(s_fl*100))s_fl = 4.44 % =============$$f_\text{r,fl} = sf_e$$f_rfl = s_fl * fe print(''' f_rfl = {:.2f} Hz ==============='''.format(f_rfl))f_rfl = 2.67 Hz ===============The speed regulation is:$$SR = \frac{n_\text{nl} - n_\text{fl}}{n_\text{fl}} \cdot 100\%$$SR = (n_nl - n_fl) / n_fl print(''' SR = {:.2f} % ==========='''.format(SR*100))SR = 4.07 % ===========Ensemble Learning Initial Importsimport warnings warnings.filterwarnings('ignore') import numpy as np import pandas as pd from pathlib import Path from collections import Counter from sklearn.metrics import balanced_accuracy_score from sklearn.metrics import confusion_matrix from imblearn.metrics import classification_report_imbalanced from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from imblearn.ensemble import BalancedRandomForestClassifier from imblearn.ensemble import EasyEnsembleClassifierRead the CSV and Perform Basic Data Cleaning# Load the data file_path = Path('Resources/LoanStats_2019Q1.csv') df = pd.read_csv(file_path) # Preview the data df.head()Split the Data into Training and Testing# encoding categorical data columns = ['home_ownership', 'verification_status', 'issue_d', 'loan_status', 'pymnt_plan', 'initial_list_status', 'next_pymnt_d', 'application_type', 'hardship_flag', 'debt_settlement_flag'] for column in columns: df[column] = df[column].astype("category") df[column] = df[column].cat.codes # Create our features X = df.drop('loan_status', 1) # Create our target y = df[['loan_status']] X X.describe() # Check the balance of our target values y['loan_status'].value_counts() # Split the X and y into X_train, X_test, y_train, y_test # Create X_train, X_test, y_train, y_test X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 )Data Pre-ProcessingScale the training and testing data using the `StandardScaler` from `sklearn`. Remember that when scaling the data, you only scale the features data (`X_train` and `X_testing`).# Create the StandardScaler instance scaler = StandardScaler() # Fit the Standard Scaler with the training data scaler = scaler.fit(X_train) # Scale the training and testing data X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) X_train.shapeEnsemble LearnersIn this section, you will compare two ensemble algorithms to determine which algorithm results in the best performance. You will train a Balanced Random Forest Classifier and an Easy Ensemble classifier . For each algorithm, be sure to complete the folliowing steps:1. Train the model using the training data. 2. Calculate the balanced accuracy score from sklearn.metrics.3. Display the confusion matrix from sklearn.metrics.4. Generate a classication report using the `imbalanced_classification_report` from imbalanced-learn.5. For the Balanced Random Forest Classifier only, print the feature importance sorted in descending order (most important feature to least important) along with the feature scoreNote: Use a random state of 1 for each algorithm to ensure consistency between tests Balanced Random Forest Classifier# Resample the training data with the BalancedRandomForestClassifier brf = BalancedRandomForestClassifier(random_state=1) brf.fit(X_train, y_train) # Calculated the balanced accuracy score y_pred = brf.predict(X_test) balanced_accuracy_score(y_test, y_pred) # Display the confusion matrix confusion_matrix(y_test, y_pred) # Print the imbalanced classification report print(classification_report_imbalanced(y_test, y_pred)) # List the features sorted in descending order by feature importance X.columns[np.argsort(brf.feature_importances_)[::-1]]Easy Ensemble Classifier# Train the Classifier eec = EasyEnsembleClassifier(random_state=1) eec.fit(X_train, y_train) # Calculated the balanced accuracy score y_pred = eec.predict(X_test) balanced_accuracy_score(y_test, y_pred) # Display the confusion matrix confusion_matrix(y_test, y_pred) # Print the imbalanced classification report print(classification_report_imbalanced(y_test, y_pred))pre rec spe f1 geo iba sup 0 0.10 0.88 0.95 0.18 0.91 0.82 88 1 1.00 0.95 0.88 0.97 0.91 0.84 13676 avg / total 0.99 0.95 0.88 0.97 0.91 0.84 13764Create datasetX, Y = utils.create_dataset(FOLDERS) X = utils.normalize_data(X) X = np.expand_dims(X, axis=1) ################################# #### MAKE TEST TRAIN SPLIT #### ################################# def create_dataloaders(CONFIG, X, Y): from sklearn.model_selection import train_test_split X_train, X_test, Y_train, Y_test = train_test_split( X, Y, test_size=0.2, random_state=42) train_dataset = TensorDataset(torch.Tensor(X_train), torch.Tensor(Y_train)) test_dataset = TensorDataset(torch.Tensor(X_test), torch.Tensor(Y_test)) train_dataloader = DataLoader(train_dataset, shuffle=True, num_workers=CONFIG['cpu_workers'], batch_size=CONFIG['train_batch_size'], drop_last=True) test_dataloader = DataLoader(test_dataset, shuffle=False, num_workers=CONFIG['cpu_workers'], batch_size=CONFIG['test_batch_size'], drop_last=False) return train_dataloader, test_dataloader CONFIG['ephs'] = 100 net = ConvNet7() train_dataloader, test_dataloader = create_dataloaders(CONFIG, X, Y) optimizer = torch.optim.Adam(net.parameters(), lr=CONFIG.get('lr', 1e-3)) run_training(net, optimizer, CONFIG, train_dataloader, test_dataloader)100%|███████████████████████████████████████| 1599/1599 [00:25<00:00, 61.75it/s]Try LSTMclass RnnNet(nn.Module): def __init__(self): super().__init__() self.drop_p = 0.1 self.h_dim = 4 self.activ = F.gelu self.rnn = nn.LSTM(16, 32, 4, bidirectional=True, batch_first=True) # need (batch, seq, feature) self.global_avg = nn.AdaptiveAvgPool1d(4) self.flat = nn.Flatten() self.drop = nn.Dropout(p=self.drop_p) self.lin = nn.Sequential(nn.Linear(self.h_dim, self.h_dim * 2), nn.BatchNorm1d(self.h_dim * 2), nn.GELU(), nn.Dropout(p=self.drop_p), nn.Linear(self.h_dim * 2, self.h_dim // 2), nn.BatchNorm1d(self.h_dim // 2), nn.GELU(), nn.Dropout(p=self.drop_p), nn.Linear(self.h_dim // 2, 4)) def forward(self, x): x, _ = self.rnn(x) x = self.global_avg(x) x = self.flat(x) x = self.drop(x) x = self.lin(x) return x model = RnnNet() inp = torch.rand(32, 1, 16) out = model(inp) out.shape CONFIG['train_batch_size'] = 128 CONFIG['ephs'] = 15 net = RnnNet() train_dataloader, test_dataloader = create_dataloaders(CONFIG, X, Y) optimizer = torch.optim.Adam(net.parameters(), lr=CONFIG.get('lr', 1e-3)) utils.run_training(net, optimizer, CONFIG, train_dataloader, test_dataloader)100%|█████████████████████████████████████████| 399/399 [00:04<00:00, 82.59it/s]Importsimport pynncml as pnc from matplotlib import pyplot as pltLoad OpenCML Datasetopen_cml_dataset = pnc.read_open_cml_dataset('../dataset/open_cml.p') # read OpenCML datasetSelect Link and Plot link datalink_index = 14 link_data = open_cml_dataset[link_index] # select a link link_min_max=link_data.create_min_max_link(300) link_data.plot() # plot link data plt.show()Run rain estimation using constant baselinetsc = pnc.rain_estimation.two_step_network(1,pnc.neural_networks.RNNType.GRU) # init classification model res,_ = tsc(link_min_max.as_tensor(constant_tsl=10),link_data.meta_data.as_tensor(),tsc.init_state()) # run classification method rain=res[0,:,0] wd=res[0,:,1] plt.subplot(1, 2, 1) plt.plot(link_min_max.time(), rain.detach().numpy().flatten()) plt.title('Rain Estimation') plt.xlabel('Time') pnc.change_x_axis_time_format('%H') plt.grid() plt.subplot(1, 2, 2) plt.plot(link_min_max.time(), wd.detach().numpy().flatten()) plt.xlabel('Time') plt.title('Classification') pnc.change_x_axis_time_format('%H') plt.ylabel(r'$\sigma_n$') plt.grid() plt.show()関数型プログラミング (Functional Programming)``` ASSIGNMENT METADATAassignment_id: "Functional"``` **lang:ja**この講義では **関数型プログラミング** (**functional programming**)というプログラミングスタイルを紹介し、それがプログラミングでどう役に立つかということを説明します。関数型プログラミングの概念を全く知らなくてもPythonでプログラムを書くことはできますが、理解しておくとより良いプログラムを書くのに役立つでしょう。 **lang:en**We will introduce a programming style called **"functional programming"** and explain how it's useful in Python programming.Although you can write programs in Python without understanding functional programming, the concept of functional programming should help you to write better programs.The goal of this lecture is to tell basic concepts of functional programming so that you can make use of them in practical programming. 1. 関数型プログラミングとは (What's functional programming) **lang:ja**この講義で扱う**関数型プログラミング (functional programming)**(または、関数プログラミング)というのは一般に、「入力から出力が一意に定まる"関数"の適用・合成によってプログラムを記述していくプログラミングスタイル」のことです。関数型プログラミングには以下のような利点が期待できます:* データの流れがわかりやすくなる* プログラムの分割・結合・再利用がしやすくなる* より高度な抽象化によって、複雑な処理がシンプルにかける「関数型プログラミング」というようなプログラミングスタイルの分類のことを **プログラミングパラダイム (programming paradigm)** といいます。他によく知られているパラダイムには以下のようなものがあります:* **手続き型プログラミング (procedual programming)*** **オブジェクト指向プログラミング (object-oriented programming)*** **宣言型プログラミング (declarative programming)**ただし、プログラミングパラダイムは、守らなければならない厳密なルールや絶対的なもの*ではありません*。むしろ、これらは理論と実践に基づいた、良いとされるプログラミングアプローチのようなものです。また、それぞれのパラダイムは対立するものではなく、独立な概念であるので、一つのプログラムでこれらのスタイルを共存させることもできます。例えばオブジェクト指向プログラミングとは、データや処理を"オブジェクト"としてまとめるスタイルですが、Pythonではこれは`Class`を用いることで実現できます。この講義では、関数型プログラミングのいくつかの基本的な概念を紹介し、実際のプログラミングに役立てられる形で身につけてもらうことを目標とします。 **lang:en****Functional programming** is generally a programming style in which a program is written by applying / synthesizing a "function" that uniquely determines the output from the input.".Functional programming can offer the following advantages:* Easy to understand the flow of data* Easy to split, combine and reuse programs* Able to write complex procedures in a simple wayThe classification of programming style like "functional programming" is called **programming paradigm**.Other well known paradigms include the following:* **Procedural programming*** **Object-oriented programming*** **Declarative programming**Note that programming paradigm is *not* a strict rule.Rather, they are like a good programming approach, based on theory and practice.Usually these paradigms are not conflicting, but are independent concepts, so it is possible to have these styles coexist in one program.For example, object-oriented programming is a style that combines data and processing as "objects", but in Python this can be achieved using `Class`. 2. 関数によるモジュール化 (Modularity) **lang:ja**これまでの講義のなかで、「関数」を使えば何度も繰り返される操作をくくりだすことができることを学んできました。例えば次のプログラムを見てみましょう。ここでは、二人の学生の成績を計算しています。 **lang:en**So far, we have learn that functions are a way to extract repeated sequences of instructions.For example, consider the following program, which calculates the final letter grade for two students:# A program that calculates the final grade for each student. # Scores for Assignment 1, Assignment 2, and Final Exam. sam_scores = [90, 80, 90] yuko_scores = [90, 100, 80] sam_weighted_score = 0.2 * sam_scores[0] + 0.2 * sam_scores[1] + 0.6 * sam_scores[2] sam_grade = 'PASS' if sam_weighted_score > 60 else 'FAIL' print('Sam\'s final score: {} => {}'.format(sam_weighted_score, sam_grade)) yuko_weighted_score = 0.2 * yuko_scores[0] + 0.2 * yuko_scores[1] + 0.6 * yuko_scores[2] yuko_grade = 'PASS' if yuko_weighted_score > 60 else 'FAIL' print('Yuko\'s final score: {} => {}'.format(yuko_weighted_score, yuko_grade))**lang:ja**プログラムのなかで `Sam` と `Yuko` の成績は同じ方法で計算をされています。しかし、このプログラムをすこし見ただけでは、本当に全く同じ方法で行われていることを確認するのはなかなか大変です。さらに、もし成績評価に関するパラメータを変えたくなったとした場合は、それぞれの学生に対する処理が同じになるよう、注意深くコードを変更する必要があります。このような問題は、成績評価をするための一連の処理の流れを「関数」としてまとめることで解決できます。以下のプログラムのように処理を関数にまとめれば、各学生に対し同じ関数を呼ぶだけで、同じ計算をしていることが保証できますし、パラメータを調整する際も変更箇所は関数の内側だけに限定されます。 **lang:en**`Sam` and `Yuko`'s grades are calculated in exactly the same way. However, it is currently very difficult to make sure that these calculations are carried out consistently. If we change how a particular assignment is weighed, or what is the criteria for passing, then we need to carefully make these changes for every single student.To avoid this problem, we can group the sequence of instructions that calculates a student's grade into a function. We can then simply call this function for each student, and any change that we need to make to our assignment weights can happen inside this function.def calculate_grade(student_name, scores): weighted_score = 0.2 * scores[0] + 0.2 * scores[1] + 0.6 * scores[2] grade = 'PASS' if weighted_score > 60 else 'FAIL' return '{}\'s final score: {} => {}'.format(student_name, weighted_score, grade) print(calculate_grade('Sam', sam_scores)) print(calculate_grade('Yuko', yuko_scores)) devon_scores = [60, 50, 60] print(calculate_grade('Devon', devon_scores))**lang:ja**このように関数でまとめると、あとから処理を変更しやすかったり、新たな学生を追加したりといったことが簡単になります。このように処理を関数によって分割することを**モジュール化**といいます。プログラムをモジュール化することで、プログラムのデバッグや再利用が簡単になります。 **lang:en**As shown above, it's much easier to change the function and add new students after using a function.It is called **modularization** that a procedure is splited as functions in this way. By modularizing programs, it is easier to debug and reuse programs. 3. 純粋関数 (Pure Function) **lang:ja**ところで、上で実装した `calculate_grade` を何度も呼ぶと何が起こるでしょうか? **lang:en**By the way, what will happen if we call `calculate_grade` multiple times?print(calculate_grade('Sam', sam_scores)) print(calculate_grade('Sam', sam_scores)) # When we call the function again, the same message will be printed. print(calculate_grade('Sam', sam_scores))**lang:ja**もちろん同じ値が何度も返されます。同じ値を引数にあたえているのだから当たり前に思えます。では、次の関数 `calculate_grade_impure` はどうでしょうか? **lang:en**Of course, the same value is returned for each call.It is natural because the same value is given to the argument.So what about the following function `calculate_grade_impure`?# Impure version def calculate_grade_impure(student_name, scores): scores[0] *= 0.2 scores[1] *= 0.2 scores[2] *= 0.6 weighted_score = scores[0] + scores[1] + scores[2] grade = 'PASS' if weighted_score > 60 else 'FAIL' return '{}\'s final score: {} => {}'.format(student_name, weighted_score, grade) print(calculate_grade_impure('Sam', sam_scores)) print(calculate_grade_impure('Sam', sam_scores)) # When we call the function again, we get a different result! print(calculate_grade_impure('Sam', sam_scores))**lang:ja**関数を呼ぶたびに`Sam`のscoreが減少してしまいました! これは `calculate_grade_impure` の内部で `score` の値自体を変更してしまっているためです。この `calculate_grade_impure` が行うような、返り値の計算に直接関係ない状態を変える処理などのことを **副作用** とよびます。副作用のない関数を**純粋関数**といいます。今回の場合、`calculate_grade` は純粋関数であり、`calculate_grade_impure` は非純粋関数です。関数型プログラミングにおいて、非純粋関数よりも純粋関数のほうが推奨されます。今回の例でみたように、非純粋関数を呼ぶ際にはデータがどのように変わるのかということに注意を払う必要があり、プログラムの複雑性が増すためです。また、純粋関数のほうが、コードの再利用がしやすいという利点があります。ただし、Pythonにおいて、非純粋関数を完全になくすことは現実的ではありませんし、純粋関数はパフォーマンス上不利になることもあります。ただ、関数を定義する際や呼ぶ際にそれがどんな副作用を持つかどうかを意識するのは大切です。 **lang:en**Every time you call the function `calculate_grade_impure`, the score of `Sam` has decreased! This is because `score` itself has been changed inside.`calculate_grade_impure`Changeing the state not directly related to the main computation is called **side effects**. Functions with no side effects are called **"pure functions"**. In this case, `calculate_grade` is a pure function, and` calculate_grade_impure` is an impure function.For functional programming, pure functions are recommended over non-pure functions.As we saw in this example, it is necessary to pay attention to how data changes when calling an impure function, which increases the complexity of the program.Also, pure functions have the advantage of being easier to reuse code.However, in Python, it is not realistic to completely eliminate impure functions, and pure functions may be disadvantageous for performance.However, when defining or calling a function it is still important to be aware of what side effects it has. 4. 第一級オブジェクトとしての関数 (Function as a first-class object) **lang:ja**Pythonにおいて、関数は変数に格納されたり、他の関数の引数や返り値になることができます。このことを関数が **第一級オブジェクト (first-class object)** として扱われるといいます。第一級オブジェクトとは、変数への代入や関数の引数や返り値になることなどができる値のことです。他には数値型や`String`型の値、リスト、辞書、Class なども第一級オブジェクトです。この「関数が第一級オブジェクトである」性質は、我々は「数値」や「文字列」と同様に、それらに対する「操作」をもプログラムの中で持ち回すことを意味します。この性質をうまく使いこなすことで、より簡潔でわかりやすいプログラムをかけることがあります。それでは実際の例をみていきましょう。まず、関数は数値などの値と同様に変数に代入することができ、それをいつも通り呼び出すことができます。 **lang:en**A function is simply an object you can call in Python. Otherwise, a function behaves no differently from other objects. This property is incredibly powerful as it allows us to move *operations* around our code.Let's take a look at how this works more concretely. First, we can assign functions to variables, and call them as usual.def square(x): return x ** 2 # We can assign a function `square` to a variable `my_square` my_square = square # The variable can be called since it contains a function. print(my_square(3))**lang:ja**関数をリストに格納することもできます。これは例えば連続して呼ぶべき関数があったときなどに役立つでしょう。 **lang:en**We can also add functions to a list. This can be useful if we have a series of functions that we need to call.def square(x): return x ** 2 def cube(x): return x ** 3 # Creating a list of functions. power_functions = [ square, cube ] for power_function in power_functions: print(power_function(3))**lang:ja**また、関数を別の関数に引数として渡したり、関数の返り値にすることもできます。引数や返り値が関数である関数のことを**高階関数 (higher-order function)** といいます。高階関数を用いることでより進んだ抽象化を行うことができます。リストをソートする操作を考えてみましょう。「リスト内の値を並べ替える」という操作はとても一般的なものです。しかし、あるリストにおいて「どんな値が先にくるべきか」というのは、用途によります。そこでPythonは ソートを行う `sorted(...)` を高階関数として提供しています。`sorted` を呼ぶ際に `key` としてどの値で並べ替えるべきかを指定する関数を与えることができます。 **lang:en**We can also pass functions to functions as arguments. This is most helpful when we have a general pattern, and we need specific operations to make these patterns relevant to our programs.A good example that we mentioned earlier is sorting, where "what should come first" is very dependent on our program. For example, for Python's built in `sorted(...)` function, we use the `key` argument to tell the function which of the students' attributes to sort by.student_ages = [('Sam', 18), ('Yuko', 20), ('Devon', 19)] # Sort by names in alphabetical order. def get_name(student): return student[0] sorted_by_name = sorted(student_ages, key=get_name) print('Sorted by name: {}'.format(sorted_by_name)) # Sort by age. (ascending order) ## You can use lambda to avoid defining a new function sorted_by_age = sorted(student_ages, key=lambda student:student[1]) print('Sorted by age (smallest to largest): {}'.format(sorted_by_age)) # You can use `reverse` to sort a list by descending order. sorted_by_age_desc = sorted(student_ages, key=lambda student:student[1], reverse=True) print('Sorted by age (largest to smallest): {}'.format(sorted_by_age_desc))**lang:ja**さらに、関数を関数の中で定義し、それを返すことも可能です。次の例では関数`create_adder`の情報を*capture* した`adder`という関数を定義し、それを返しています。 **lang:en**Finally, just like objects, functions can be *defined* and *returned* in functions. This is an advanced technique that will not be covered in this notebook, but allows information in the outer function to be "captured" by the inner function.def create_adder(k): def adder(x): # k is captured from the outer function. return x + k return adder adder_5 = create_adder(5) print(adder_5(3))5. Exercise: 高階関数 (Higher-order functions) 5.1. Calling Functions from Functions **lang:ja**それでは実際に関数を引数に取る関数を実装してみましょう。1引数関数`f`と2つの値`x, n`を受け取り、`f` を`x`に対して `n`回適用する関数 `apply_n_times(f, x, n)` を実装してください。 **lang:en**In this section, let's implement higher-order functions by ourselves.Define a function named `apply_n_times(f, x, n)` that takes a function `f` and two arguments `x`, `n`, and returns the value when this function is applied to `x` `n` times. Example```pythondef add2(x): return x + 2 Prints 10 (i.e. add2(add2(add2(add2(add2(0))))))print(apply_n_times(add2, 0, 5)) `````` EXERCISE METADATAexercise_id: "ex51_ApplyNTimes"```%%solution def apply_n_times(f, x, n): """ # BEGIN PROMPT pass """ # END PROMPT # BEGIN SOLUTION result = x for _ in range(n): result = f(result) return result # END SOLUTION %%studenttest ApplyNTimesStudentTest def add2(x): return x + 2 assert apply_n_times(add2, 0, 5) == 10 # Note: the "lambda" syntax is a shorthand for creating a function with no name. # For more information, see: # https://docs.python.org/3/tutorial/controlflow.html#lambda-expressions assert apply_n_times(lambda x: x * 2, 1, 10) == 1024 def fibonacci_step(x): return (x[1], x[0] + x[1]) assert apply_n_times(fibonacci_step, (1, 1), 10) == (89, 144) %%inlinetest ApplyNTimesInlineTest assert apply_n_times(lambda x: True, None, 1) is not None, 'Did you forget to return a value?' assert apply_n_times(lambda x: x + 1, 1, 2) == 3, 'Are you passing `x` and `y` to the function?' assert apply_n_times(lambda x: x * x, 0, 100) == 0, '`x` must be returned as is when `n == 0`.'5.2. Returning the First Item that Matches a Predicate **lang:ja**引数を受け取り、`True` か `False` を返す純粋関数をpredicate (述語)と呼ぶことにします。例えば、次の関数`greater_than_5` はpredicateです。```pythondef greater_than_5(x): return x > 5```predicateとリストを受け取り、リストの要素のうちそのpredicateが`True`を返す最初の要素を返す関数`find_first_match` を実装してください。ただし、そのような要素が存在しない場合、`None` を返してください。 **lang:en**We call a function that takes arguments and returns a Boolean value as "predicate".For example, following `greater_than_5` is a predicate.```pythondef greater_than_5(x): return x > 5```Define a function `find_first_match(...)` that takes a predicate and a list, and returns the first matching item. If there is no such item, then return `None`. Example```python Prints 8, which is the first item greater than 5.print(find_first_match(greater_than_5, [1, 2, 3, 8, 9])) Prints Noneprint(find_first_match(greater_than_5, [1, 2, 3, -8, -9]))`````` EXERCISE METADATAexercise_id: "ex52_FindFirstByPredicate"```%%solution def find_first_match(predicate, items): """ # BEGIN PROMPT pass """ # END PROMPT # BEGIN SOLUTION for x in items: if predicate(x): return x # END SOLUTION %%studenttest FindFirstMatchStudentTest assert find_first_match(lambda x: x > 5, [1, 2, 3, 8, 9]) == 8 assert find_first_match(lambda x: x, [False, False, True]) is True assert find_first_match(lambda x: x == 10, [11, 12, 8]) is None %%inlinetest FindFirstMatchInlineTest assert find_first_match(lambda x: True, []) is None, 'Do you have the correct return value when there are no items?' assert find_first_match(lambda x: x > 1, [2]) == 2, 'Are you checking if calling the predicate returns True?' assert find_first_match(lambda x: x > 1, [1, 2]) == 2, 'Are you checking all of the items?'5.3. Filtering Items in a List **lang:ja**Predicateとリストを受け取り、リストの各要素のうち predicateが`True`を返すもののみからなるリストを返す関数 `my_filter` を実装してください。ただし、`my_filter` が返すリスト内の要素の順序は元のリストの順序を保存するものとします。 **lang:en**Define a function `filter_items(...)` that takes a predicate and a list of items, and returns only the items for which the predicate returns `True`.`my_filter` has to preserve the order of items in the list passed as an argument. Example```pythondef greater_than_3(x): return x > 3 Prints [4, 8, 10]print(my_filter(greater_than_3, [4, 2, 8, -3, 10, 3]))def longer_than_3(x): return len(x) > 3 Prints ['elephant', 'hippopotamus'].print(my_filter(longer_than_3, ['dog', 'elephant', 'cat', 'hippopotamus']))`````` EXERCISE METADATAexercise_id: "ex53_FilterList"```%%solution def my_filter(predicate, items): """ # BEGIN PROMPT pass """ # END PROMPT # BEGIN SOLUTION filtered_items = [] for item in items: if predicate(item): filtered_items.append(item) return filtered_items # END SOLUTION %%studenttest MyFilterStudentTest assert(my_filter(lambda x : x > 3, [4, 2, 8, -3, 10, 3]) == [4, 8, 10]) assert (my_filter(lambda x: len(x) > 3, ['dog', 'elephant', 'cat', 'hippopotamus']) == ['elephant', 'hippopotamus']) assert my_filter(lambda x: x > 2, [2, 4, 5]) == [4, 5] assert my_filter(lambda x: x, [True, True, False]) == [True, True] assert (my_filter(lambda x: x[0] * x[1] < 1, [(1, 0.5), (0.8, 0.9), (2, 1)]) == [(1, 0.5), (0.8, 0.9)]) %%inlinetest MyFilterInlineTest assert len(my_filter(lambda x: True, [1, 2])) == 2, 'Are you returning all matching items?' assert my_filter(lambda x: x > 1, [1, 2]) == [2], 'Are you calling the predicate on each item?' assert my_filter(lambda x: True, [1, 2]) != [2, 1], 'Are you following the order of the provided items?'5.4. `map`: リストの各要素に関数適用をする関数 (`map`: Appying a Function to Each Item in a List) **lang:ja**関数とリストを受け取り、そのリストの各要素に受け取った関数を適用して得られるリストを返す関数 `my_map` を実装してください。 **lang:en**Define a function `my_map(...)` that takes a function and a list of items, and returns the list of items after the function has been applied on each of the items. Example```pythondef square(x): return x ** 2print(my_map(square, [1, 2, 3])) Prints [1, 4, 9].def expand3(x): return [x] * 3print(my_map(expand3, [1, 2, 3])) Prints [[1, 1, 1], [2, 2, 2], [3, 3, 3]].`````` EXERCISE METADATAexercise_id: "ex54_ApplyFuncToList"```%%solution def my_map(function, items): """ # BEGIN PROMPT pass """ # END PROMPT # BEGIN SOLUTION transformed_items = [] for item in items: transformed_items.append(function(item)) return transformed_items # END SOLUTION %%studenttest MyMapStudentTest assert my_map(lambda x: x, [1, 2, 3]) == [1, 2, 3] assert my_map(lambda x: x[0], [(1, 2), (2, 3), (3, 4)]) == [1, 2, 3] assert my_map(lambda x: x > 3, [8, 2, 1]) == [True, False, False] %%inlinetest MyMapStudentTest assert len(my_map(lambda x: x, [1, 2])) == 2, 'Are you returning all of the mapped items?' assert my_map(lambda x: x, [1, 2]) != [2, 1], 'Are you following the order of the provided items?' assert my_map(lambda x: x + 1, [1, 2]) == [2, 3], 'Are you calling the function on each of the items?'Aside: Python's Built-In Functional Primitives **lang:ja**さて、ここまで実装してきた `my_filter` と `my_map` はよくつかわれる処理なので、Pythonでbuilt-in関数として用意されています。それぞれ、`filter`, `map` という関数です。 **lang:en**As it turns out, the `my_filter(...)` and `my_map(...)` that you implemented are such common operations that they have been built into the Python language itself!filtered_list = list(filter(lambda x : x > 3, [4, 2, 8, -3, 10, 3])) assert(filtered_list == [4, 8, 10]) mapped_list = list(map(lambda x: x**2, [1, 2, 3])) assert(mapped_list == [1, 4, 9])**lang:ja**また、これらの処理は **リスト内包表記 (list comprehension)** という機能をつかうことでも記述することができます。例えば、`map`に対応する処理はこのように実装できます。 **lang:en**Also, you can use **list comprehension** to implement such list operations.By default, we can transform items like so:# Prints [1, 4, 9]. print([x ** 2 for x in [1, 2, 3]])**lang:ja**また、`filter` に対応する処理は、述語をリスト内包表記の末尾に `if` とともに書くことで記述できます。 **lang:en**Then, to filter the items, we add the predicate at the end of the list comprehension:print([x for x in [4, 2, 8, -3, 10, 3] if x > 3])**lang:ja**もっと複雑なこともできます。ただ、リスト内包表記で複雑なことをしすぎるとコードの可読性が落ちるため、このような例では単に for-loopやif文を使ったほうがいいかもしれません。 **lang:en**We can write more complex operations in list comprehension!This syntax can get quite complicated, with list comprehensions inside of list comprehensions! This is a very powerful tool, but we need to be careful that we are writing code that others can understand. For example, in the following case, we might be better off just using our normal for-loops and if-statements.# Prints [4, 9] since only x > 1 are transformed. print([x ** 2 for x in [1, 2, 3] if x > 1]) # Creates a list of squares for each number in [1, 2, 3] that is larger than 1. print([[x ** 2 for x in range(y)] for y in [1, 2, 3] if y > 1]) # Converts a list of number strings into numbers, then creates 3 x 3 matrices # containing each number. print([[[x] * 3] * 3 for x in [int(y) for y in ['1', '2', '3']]])6. Exercise: オブジェクト指向プログラミングと関数型プログラミング (OOP and FP **lang:ja**この節では、ここまでで学んだ関数型プログラミング的考え方と、classを用いるオブジェクト指向的考え方をあわせて、データ処理を行うプログラムを書いてみましょう。今回は`"('市町村名', 人口, 面積)"` というタプルのリストに対する処理をするプログラムを考えます。6.1-6.3が問題の説明、6.4で実装という構成になっています。 **lang:en**In this section, let's write a program that performs data processing, combining the functional programming and the object-oriented programming.Here, let's consider a program that operates on a list of tuples `" ('city name', population, area) "`.Problem statements are given in 6-1, 6-2, and 6-3, and you can implement the solution in 6-4. ```python [(city name, population, area)]test_data = [('Chiyoda', 64894, 11.66), ('Minato', 259042, 20.37), ('Shinjuku', 349844, 18.22), ('Bunkyo', 233926, 11.29), ('Taito', 207838, 10.11)]``` 6.1. Define a class **lang:ja**一つの市町村のデータを表すクラス `City` を実装してください。このクラスは* `name`* `population`* `area`というメンバーをもち、これらの値をタプルとして受け取るコンストラクタを持つようにしてください。 **lang:en**Define a class `City`, which has the following members:* `name`* `population`* `area`The `City` has to have a contstructor that work like followings: Example```pythonname = 'Bunkyo'population = 233926area = 11.29city = City((name, population, area))assert(city.name == name)assert(city.area == area)assert(city.population == population) You can create a list of `City` instances from `test_data` by using `map`.city_list = list(map(City, test_data))``` 6.2. Define a method **lang:ja**6.1.で定義した class `City` に人口密度を計算して返すメソッド `population_density()` を実装してください。 **lang:en**Implement a method `population_density()` in `City` Example```python Population density is (population / area)assert(city.population_density() == city.population / city.area)``` 6.3. Get top `k` cities **lang:ja**`City`のインスタンスのリスト `city_list` を受け取り、 人口密度が高い上位`k`個の都市の名前のリストを返す関数 `top_k_densest_city_name(city_list, k)` を実装してください。* **注意**: 返すリストの要素は都市の**名前**にして下さい。* **実装のヒント**: リスト`l`に対し、`l[n:m]`と書くことで `n`番目から`m-1`番目の要素までを取り出すことが出来ます。 **lang:en**Implement a function `top_k_densest_city_name(city_list, k)` that takes a list of `City` instances `city_list` and returns a list of the top` k` cities with high population density.* **Note**: The return value should be a list of **names**, not city objects.* **Hint**: You can write `l[n:m]` to take the range from `n`-th element to `m-1`-th element in a list `l`. Example```pythontop5_cities = top_k_densest_city_names(city_list, 5) Prints ['Bunkyo', 'Taito', 'Shinjuku', 'Minato', 'Chiyoda'] 'Bunkyo' is the densest city.print(top5_cities) If you are only interested in the densest city, pass 1 as `k`. Prints ['Bunkyo']print(top_k_densest_city_names(city_list, 1))``` 6.4. 実装 (Implementation) ``` EXERCISE METADATAexercise_id: "ex64_FP_OOP"```%%solution # [(city name, population, area)] test_data = [('Chiyoda', 64894, 11.66), ('Minato', 259042, 20.37), ('Shinjuku', 349844, 18.22), ('Bunkyo', 233926, 11.29), ('Taito', 207838, 10.11)] """ # BEGIN PROMPT class City: # Q 6.1. def __init__(self, data): pass # Q 6.2. def population_density(self): pass # Q6.3. def top_k_densest_city_names(city_list, k): pass """ # END PROMPT # BEGIN SOLUTION class City: # Q 6.1. def __init__(self, data): self.name = data[0] self.population = data[1] self.area = data[2] # Q 6.2. def population_density(self): return self.population / self.area # Q6.3. def top_k_densest_city_names(city_list, k): sorted_cities = list(sorted(city_list, key=lambda c: c.population_density(), reverse=True)) return list(map(lambda c: c.name, sorted_cities[:k])) # END SOLUTION %%studenttest FPAndOOPStudentTest1 # Q 6.1. ## Create a `City` instance for ('Chiyoda', 64894, 11.66) chiyoda_ku = City(test_data[0]) ## Each data must be accessed assert(chiyoda_ku.name == 'Chiyoda') assert(chiyoda_ku.population == 64894) assert(chiyoda_ku.area == 11.66) %%studenttest FPAndOOPStudentTest2 # Q 6.2. ## Population density is (population / area) assert(chiyoda_ku.population_density() == 64894 / 11.66) %%studenttest FPAndOOPStudentTest3 # Q 6.3. ## Create a list of `City` instances by using `map` function. city_list = list(map(City, test_data)) ## Get Top 5 cities top5_densest_cities = top_k_densest_city_names(city_list, 5) # Expected: ['Bunkyo', 'Taito', 'Shinjuku', 'Minato', 'Chiyoda'] print('Top 5 cities: {}'.format(top5_densest_cities)) assert(len(top5_densest_cities) == 5) expected = ['Bunkyo', 'Taito', 'Shinjuku', 'Minato', 'Chiyoda'] assert top5_densest_cities == expected, 'Expected: {}, Actual: {}'.format(expected, top5_densest_cities) %%studenttest FPAndOOPStudentTest3_2 # More tests: Change the value of `k` ## Get Top 2 cities top2_densest_cities = top_k_densest_city_names(city_list, 2) print('Top 2 cities: {}'.format(top2_densest_cities)) assert len(top2_densest_cities) == 2 assert top2_densest_cities == [ 'Bunkyo', 'Taito' ] ## What if `k` is 0? top0_densest_cities = top_k_densest_city_names(city_list, 0) print('Top 0 cities: {}'.format(top0_densest_cities)) assert(top0_densest_cities == []) %%inlinetest FPAndOOPStudentTest # Q 6.1. try: City(('A', 1, 1)) except NameError: assert False, 'class `City` is not implemented' except Exception: assert False, 'City((\'A\', 1, 1)) raised an exception' try: c = City(('A', 1, 1)) name = c.__class__.__name__ except Exception: assert False, "City(('A', 1, 1)) raised an exception" if name != 'City': assert False, 'The class name is not `City` but {}'.format(name) test_data = [('Chiyoda', 64894, 11.66), ('Minato', 259042, 20.37), ('Shinjuku', 349844, 18.22), ('Bunkyo', 233926, 11.29), ('Taito', 207838, 10.11)] city_a = City(test_data[0]) try: city_a.name city_a.population city_a.area except AttributeError: assert False, 'The class `City` must have fields `name`, `population` and `area`.' assert city_a.name == 'Chiyoda', '`name` field is not implemeted properly' if city_a.population == test_data[1] and city_a.area == test_data[0]: assert False, 'You may swap `population` and `area`?' assert city_a.population == test_data[0][1], '`population` field is not implemeted properly' assert city_a.area == test_data[0][2], '`area` field is not implemeted properly' # Q 6.2. try: city_a.population_density() except AttributeError: assert False, 'The class has no method like `city_a.population_density()`.' assert city_a.population_density() == city_a.population / city_a.area, 'population_density() must return `population` / `area`' # Q 6.3. try: top_k_densest_city_names except NameError: assert False, 'function \'top_k_densest_city_names\' is not defined' city_list = list(map(lambda data: City(data), test_data)) try: assert top_k_densest_city_names(city_list, 3) is not None, "You have not implemented top_k_densest_city_names" except Exception as e: assert False, 'Error when trying to run top_k_densest_city_names: %s' % e ans3 = top_k_densest_city_names(city_list, 3) assert len(ans3) == 3, 'top_k_densest_city_names(..., 3) must return list with 3 elements, but got %d' % len(ans3) dense_cities = top_k_densest_city_names(city_list, 5) assert dense_cities.__class__ == list, "top_k_densest_city_names() should return a list, but got %s" % dense_cities.__class__ assert len(dense_cities) == 5, "top_k_densest_city_names(city_list, 5) should return a list with 5 elements, but got %d" % len(dense_cities) assert dense_cities[0].__class__ == str, "top_k_densest_city_names() should return a list of strings, but got %s" % dense_cities[0].__class__ ans = ['Bunkyo', 'Taito', 'Shinjuku', 'Minato', 'Chiyoda'] assert dense_cities == ans, ('the population density ranking should be '+"['Bunkyo', 'Taito', 'Shinjuku', 'Chuo', 'Minato'], but your code returned %s" % dense_cities) result, logs = %autotest FPAndOOPStudentTest assert result.results['passed'] report(FPAndOOPStudentTest, results=result.results)Import librariesfrom __future__ import print_function, division import numpy as np import pandas as pd import matplotlib.pyplot as plt import warnings import time import os import copy from PIL import Image import torch import torch.nn as nn import torch.optim as optim from torch.optim import lr_scheduler import torchvision from torchvision import datasets, models, transforms from torch.utils.data import DataLoader, Dataset from torchvision.utils import make_grid % matplotlib inline warnings.filterwarnings('ignore')Dataloaderclass TwinsDataloader(Dataset): def __init__(self, dataroot, df, transform): ''' dataroot: path to folder with items df: pandas dataframe with fields view, id_a, id_b transform: torchvision transform ''' self.dataroot = dataroot self.df = df self.transform = transform def __getitem__(self, index): def get_img_path(img_id, view): #return os.path.join(self.dataroot, f'{img_id}/{img_id}d{view}__face.jpg') return self.dataroot+f'{img_id}/{img_id}d{view}__face.jpg' #print(self.df.iloc[index].values[0]) view, id_a, id_b = self.df.iloc[index].values #print(view) #view = np.random.choice(views) #print(view, id_a, id_b) path_a = get_img_path(id_a, view) path_b = get_img_path(id_b, view) img_a = Image.open(path_a) img_b = Image.open(path_b) #plt.imshow(img_a) #plt.show() #plt.imshow(img_b) img_a = self.transform(img_a) img_b = self.transform(img_b) return {'img_a': img_a, 'img_b': img_b, 'class_a':id_a,'class_b':id_b}#'A_paths': path_a, 'B_paths': path_b } def __len__(self): return self.df.shape[0] #for testing df_train=pd.DataFrame({'view':np.random.randint(8,12,7),'id_a':[90100 for x in range(1,8)],'id_b':[90100 for x in range(1,8)]}) df_train data_transforms = { 'train': transforms.Compose([ transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'val': transforms.Compose([ transforms.Resize(224), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), } direct='./nd-twins-0006/faces/' twins_dataset = TwinsDataloader(direct, df_train, data_transforms['train']) #twins_dataset.__getitem__(0) dataset = DataLoader(twins_dataset, batch_size=4,shuffle=True) def imshow(inp, title=None): """Imshow for Tensor.""" inp = inp.numpy().transpose((1, 2, 0)) mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) inp = std * inp + mean inp = np.clip(inp, 0, 1) plt.imshow(inp) if title is not None: plt.title(title) plt.pause(0.001) # pause a bit so that plots are updated # Get a batch of training data T = next(iter(dataset)) #print(T) classes=T['class_a'].numpy() inputs=T['img_a'] # Make a grid from batch out = torchvision.utils.make_grid(inputs) imshow(out, title=[x for x in classes])Traindevice = torch.device("cuda:1" if torch.cuda.is_available() else "cpu") #df=pd.read_csv('twins-relationship-info.csv', index_col=0) #df from model import MobileFaceNet model = MobileFaceNet(embedding_size=512) PATH = os.path.join(os.getcwd(),"model_mobilefacenet.pth") checkpoint = torch.load(PATH,map_location=device) model.load_state_dict(checkpoint) optimizer = optim.SGD(model.parameters(),lr=0.1) print(inputs.shape) model(inputs) ## This class for choosing the layers you need class ModelBottom(nn.Module): def __init__(self, original_model): super(ModelBottom, self).__init__() self.features = nn.Sequential(*list(original_model.children())[:-2]) def forward(self, x): x = self.features(x) return x model_flatten = ModelBottom(model) inputs, labels = next(iter(dataloaders['train'])) outputs = model_flatten(inputs) outputs.data.shape classes=os.listdir(direct) model_flatten.classifier = nn.Sequential( nn.Linear(hid_size, 256), nn.ReLU(), nn.Dropout(0.4), nn.Linear(256, n_classes), nn.LogSoftmax(dim=1)) def train():clone repository and move `persian_re` package to use it!git clone https://github.com/nimaafshar/persian_relation_extraction.git !mv persian_relation_extraction/persian_re/ persian_re/installing specified requirements for google colab!pip install -r persian_relation_extraction/requirements_colab.txtCollecting transformers==4.17.0 Downloading transformers-4.17.0-py3-none-any.whl (3.8 MB)  |████████████████████████████████| 3.8 MB 5.4 MB/s [?25hCollecting hazm==0.7.0 Downloading hazm-0.7.0-py3-none-any.whl (316 kB)  |████████████████████████████████| 316 kB 47.2 MB/s [?25hCollecting pyyaml==5.4.1 Downloading PyYAML-5.4.1-cp37-cp37m-manylinux1_x86_64.whl (636 kB)  |████████████████████████████████| 636 kB 47.3 MB/s [?25hCollecting clean-text[gpl] Downloading clean_text-0.6.0-py3-none-any.whl (11 kB) Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.7/dist-packages (from transformers==4.17.0->-r persian_relation_extraction/requirements_colab.txt (line 1)) (21.3) Requirement already satisfied: filelock in /usr/local/lib/python3.7/dist-packages (from transformers==4.17.0->-r persian_relation_extraction/requirements_colab.txt (line 1)) (3.6.0) Requirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from[...]checking *CUDA* availablity!nvidia-smiMon Mar 21 20:10:06 2022 +-----------------------------------------------------------------------------+ | NVIDIA-SMI 460.32.03 Driver Version: 460.32.03 CUDA Version: 11.2 | |-------------------------------+----------------------+----------------------+ | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | | | | MIG M. | |===============================+======================+======================| | 0 Tesla K80 Off | 00000000:00:04.0 Off | 0 | | N/A 64C P8 32W / 149W | 0MiB / 11441MiB | 0% Default | | | | N/A | +-------------------------------+----------------------+----------------------+ +-------[...]Entity-Marker tokenizer + Entity Start output model Importsimport numpy as np import torch, gc import torch.nn as nn from torch.optim import AdamW from transformers import BertConfig from transformers import get_linear_schedule_with_warmup import collections from sklearn.metrics import classification_report from persian_re.preprocess import PerlexData, create_data_loader from persian_re.tokenizers import BertEntityMarkerTokenizer from persian_re.settings import Config from persian_re.models import EntityStartModel from persian_re.operation import Trainer,TrainingArgumentsGPU configurationdevice = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(f'device: {device}') train_on_gpu = torch.cuda.is_available() if not train_on_gpu: print('CUDA is not available. Training on CPU ...') else: print('CUDA is available! Training on GPU ...') #clear cuda cache gc.collect() torch.cuda.empty_cache()Load Datadata = PerlexData.get_instance()Entity Marker tokenizertokenizer: BertEntityMarkerTokenizer = BertEntityMarkerTokenizer.from_pretrained(Config.MODEL_NAME_OR_PATH)Configurationarguments = TrainingArguments(epochs=20, clip=0.0, train_callback_interval=100) config = BertConfig.from_pretrained( Config.MODEL_NAME_OR_PATH, **{ 'label2id': data.label2ids, 'id2label': data.id2labels, 'hidden_dropout_prob': 0.2 }) config*DataLoader* objectstrain_data_loader = create_data_loader(data.x_train, data.y_train, tokenizer, Config.MAX_LEN, Config.TRAIN_BATCH_SIZE, data.label2ids) valid_data_loader = create_data_loader(data.x_valid, data.y_valid, tokenizer, Config.MAX_LEN, Config.VALID_BATCH_SIZE, data.label2ids) test_data_loader = create_data_loader(data.x_test,data.y_test, tokenizer, Config.MAX_LEN, Config.TEST_BATCH_SIZE, data.label2ids)[CLS] Relation Extraction Modelpt_model = EntityStartModel(config,tokenizer.e1_start_token_id,tokenizer.e2_start_token_id) pt_model.resize_token_embeddings(len(tokenizer)) pt_model = pt_model.to(device)Trainingwith:- **AdamW** optimizer with initial learning rate `INITIAL_LEARNING_RATE`- **Linear Scheduler** with no warmup- **CrossEntropyLoss** with class weights to balance dataoptimizer = AdamW(pt_model.parameters(), lr=Config.INITIAL_LEARNING_RATE) total_steps = len(train_data_loader) * arguments.epochs scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=0, num_training_steps=total_steps ) loss_fn = nn.CrossEntropyLoss(weight=torch.FloatTensor(data.class_weights).to(device)) trainer = Trainer(pt_model,device,loss_fn,optimizer,scheduler,arguments,train_data_loader,valid_data_loader,test_data_loader)create dir `output` in `BASE_PATH` to save model!mkdir persian_relation_extraction/output/ trainer.train()Model Evaluation and Comparison1. using **last epoch** (overfitted) modely_pred, y_pred_probs, y_true, metrics = trainer.predict_tests() metrics print(classification_report(y_true, y_pred,target_names=data.labels))precision recall f1-score support Cause-Effect(e1,e2) 0.88 0.92 0.90 99 Cause-Effect(e2,e1) 0.83 0.84 0.84 143 Component-Whole(e1,e2) 0.76 0.75 0.75 119 Component-Whole(e2,e1) 0.83 0.65 0.72 110 Content-Container(e1,e2) 0.80 0.88 0.84 113 Content-Container(e2,e1) 0.84 0.90 0.87 29 Entity-Destination(e1,e2) 0.85 0.87 0.86 214 Entity-Origin(e1,e2) 0.77 0.77 0.77 155 Entity-Origin(e2,e1) 0.94 0.83 0.88 35 Instrument-Agency(e1,e2) 0.59 0.62 0.61 16 Instrument-Agency(e2,e1) 0.76 0.72 0.74 99 Member-Collection(e1,e2) 0.60 0.75 0.67 24 Member-Collection(e2,e1) 0.79 0.75 0.77 148 Message-Topic(e1,e2) 0.78 0.88 0.83 [...]2. using **saved model** (which has the lowest validation loss)saved_model = torch.load(Config.OUTPUT_PATH) temp_trainer = Trainer(saved_model,device,loss_fn,optimizer,scheduler,arguments,train_data_loader,valid_data_loader,test_data_loader) y_pred, y_pred_probs, y_true, metrics = temp_trainer.predict_tests() metrics print(classification_report(y_true, y_pred,target_names=data.labels))precision recall f1-score support Cause-Effect(e1,e2) 0.81 0.95 0.87 99 Cause-Effect(e2,e1) 0.80 0.90 0.84 143 Component-Whole(e1,e2) 0.64 0.81 0.72 119 Component-Whole(e2,e1) 0.72 0.56 0.63 110 Content-Container(e1,e2) 0.80 0.84 0.82 113 Content-Container(e2,e1) 0.79 0.93 0.86 29 Entity-Destination(e1,e2) 0.93 0.75 0.83 214 Entity-Origin(e1,e2) 0.70 0.85 0.77 155 Entity-Origin(e2,e1) 0.63 0.83 0.72 35 Instrument-Agency(e1,e2) 0.38 0.75 0.50 16 Instrument-Agency(e2,e1) 0.76 0.68 0.72 99 Member-Collection(e1,e2) 0.44 0.83 0.58 24 Member-Collection(e2,e1) 0.84 0.71 0.77 148 Message-Topic(e1,e2) 0.90 0.69 0.78 [...]Saving Best Modeltorch.save(pt_model,'/content/drive/MyDrive/RE_task/entitymarker_entitystart_v0.1.bin')How to prevent overfitting1. train only last n layers of BERT2. increase dropout probablity3. change scheduler (use `ReduceOnPlateu` or non-linear schedulers)scikit-learnStrona biblioteki: [https://scikit-learn.org](https://scikit-learn.org) Dokumentacja/User Guide: [https://scikit-learn.org/stable/user_guide.html](https://scikit-learn.org/stable/user_guide.html)Podstawowa biblioteka do uczenia maszynowego w języku Python.Aby zainstalować bibliotekę scikit-learn, użyj polecenia poniżej:```!pip install scikit-learn```Aby zaktualizować do najnowszej wersji bibliotekę scikit-learn, użyj polecenia poniżej:```!pip install --upgrade scikit-learn```Kurs stworzony w oparciu o wersję `0.22.1` Preprocessing danych:1. [Import bibliotek](0)2. [Wygenerowanie danych](1)3. [Utworzenie kopii danych](2)4. [Zmiana typu danych i wstępna eksploracja](3)5. [LabelEncoder](4)6. [OneHotEncoder](5)7. [Pandas *get_dummies()*](6)8. [Standaryzacja - StandardScaler](7)9. [Przygotowanie danych do modelu](8) Import bibliotekimport numpy as np import pandas as pd import sklearn sklearn.__version__Wygenerowanie danychdata = { 'size': ['XL', 'L', 'M', 'L', 'M'], 'color': ['red', 'green', 'blue', 'green', 'red'], 'gender': ['female', 'male', 'male', 'female', 'female'], 'price': [199.0, 89.0, 99.0, 129.0, 79.0], 'weight': [500, 450, 300, 380, 410], 'bought': ['yes', 'no', 'yes', 'no', 'yes'] } df_raw = pd.DataFrame(data=data) df_rawUtworzenie kopii danychdf = df_raw.copy() df.info() RangeIndex: 5 entries, 0 to 4 Data columns (total 6 columns): size 5 non-null object color 5 non-null object gender 5 non-null object price 5 non-null float64 weight 5 non-null int64 bought 5 non-null object dtypes: float64(1), int64(1), object(4) memory usage: 368.0+ bytesZmiana typu danych i wstępna eksploracjafor col in ['size', 'color', 'gender', 'bought']: df[col] = df[col].astype('category') df['weight'] = df['weight'].astype('float') df.info() df.describe() df.describe().T df.describe(include=['category']).T dfLabelEncoderfrom sklearn.preprocessing import LabelEncoder le = LabelEncoder() le.fit(df['bought']) le.transform(df['bought']) le.fit_transform(df['bought']) le.classes_ df['bought'] = le.fit_transform(df['bought']) df le.inverse_transform(df['bought']) df['bought'] = le.inverse_transform(df['bought']) dfOneHotEncoderfrom sklearn.preprocessing import OneHotEncoder encoder = OneHotEncoder(sparse=False) encoder.fit(df[['size']]) encoder.transform(df[['size']]) encoder.categories_ encoder = OneHotEncoder(drop='first', sparse=False) encoder.fit(df[['size']]) encoder.transform(df[['size']]) dfPandas *get_dummies()*pd.get_dummies(data=df) pd.get_dummies(data=df, drop_first=True) pd.get_dummies(data=df, drop_first=True, prefix='new') pd.get_dummies(data=df, drop_first=True, prefix_sep='-') pd.get_dummies(data=df, drop_first=True, columns=['size'])Standaryzacja - StandardScalerstd() - pandas nieobciążony std() - numpy obciążonyprint(f"{df['price']}\n") print(f"Średnia: {df['price'].mean()}") print(f"Odchylenie standardowe: {df['price'].std():.4f}") (df['price'] - df['price'].mean()) / df['price'].std() def standardize(x): return (x - x.mean()) / x.std() standardize(df['price']) from sklearn.preprocessing import scale scale(df['price']) from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(df[['price']]) scaler.transform(df[['price']]) scaler = StandardScaler() df[['price', 'weight']] = scaler.fit_transform(df[['price', 'weight']]) dfPrzygotowanie danych do modeludf = df_raw.copy() df le = LabelEncoder() df['bought'] = le.fit_transform(df['bought']) scaler = StandardScaler() df[['price', 'weight']] = scaler.fit_transform(df[['price', 'weight']]) df = pd.get_dummies(data=df, drop_first=True) dfMAT281 - Laboratorio N°04 Objetivos del laboratorio* Reforzar conceptos básicos de reducción de dimensionalidad. Contenidos* [Problema 01](p1) I.- Problema 01El **cáncer de mama** es una proliferación maligna de las células epiteliales que revisten los conductos o lobulillos mamarios. Es una enfermedad clonal; donde una célula individual producto de una serie de mutaciones somáticas o de línea germinal adquiere la capacidad de dividirse sin control ni orden, haciendo que se reproduzca hasta formar un tumor. El tumor resultante, que comienza como anomalía leve, pasa a ser grave, invade tejidos vecinos y, finalmente, se propaga a otras partes del cuerpo.El conjunto de datos se denomina `BC.csv`, el cual contine la información de distintos pacientes con tumosres (benignos o malignos) y algunas características del mismo.Las características se calculan a partir de una imagen digitalizada de un aspirado con aguja fina (FNA) de una masa mamaria. Describen las características de los núcleos celulares presentes en la imagen.Los detalles se puede encontrar en [ and : "Robust Linear Programming Discrimination of Two Linearly Inseparable Sets", Optimization Methods and Software 1, 1992, 23-34].Lo primero será cargar el conjunto de datos:import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA %matplotlib inline sns.set_palette("deep", desat=.6) sns.set(rc={'figure.figsize':(11.7,8.27)}) # cargar datos df = pd.read_csv(os.path.join("data","BC.csv"), sep=",") df['diagnosis'] = df['diagnosis'] .replace({'M':1,'B':0}) df.head()Basado en la información presentada responda las siguientes preguntas:1. Normalizar para las columnas numéricas con procesamiento **StandardScaler**.2. Realice un gráfico de correlación. Identifique la existencia de colinealidad.3. Realizar un ajuste PCA con **n_components = 10**. Realice un gráfico de la varianza y varianza acumulada. Interprete.4. Devuelva un dataframe con las componentes principales.5. Aplique al menos tres modelos de clasificación. Para cada modelo, calule el valor de sus métricas.#1 #normalización de las columnas con datos númericos scaler = StandardScaler() df[df.columns[2:].tolist()]=scaler.fit_transform(df[df.columns[2:].tolist()]) df.head() #2 corr = df[df.columns[1:]].corr() sns.heatmap(corr)__Respuesta__: A partir del mapa de calor, es posible notar que hay una buena correlacion entre el perimetro y radio del objeto.features = df.columns[2:].tolist() x = df.loc[:, features].values pca = PCA(n_components=10) principalComponents = pca.fit_transform(x) # gráfico varianza por componente percent_variance = np.round(pca.explained_variance_ratio_* 100, decimals =2) columns = ['PC1', 'PC2', 'PC3', 'PC4','PC5','PC6','PC7','PC8','PC9','PC10'] plt.figure(figsize=(12,4)) plt.bar(x= range(1,11), height=percent_variance, tick_label=columns) plt.ylabel('Percentate of Variance Explained') plt.xlabel('Principal Component') plt.title('PCA Scree Plot') plt.show() # gráfico varianza por la suma acumulada de los componente percent_variance_cum = np.cumsum(percent_variance) columns = ['PC1', 'PC2', 'PC3', 'PC4','PC5','PC6','PC7','PC8','PC9','PC10'] plt.figure(figsize=(12,4)) plt.bar(x= range(1,11), height=percent_variance_cum, tick_label=columns) plt.ylabel('Percentate of Variance Explained') plt.xlabel('Principal Component Cumsum') plt.title('PCA Scree Plot') plt.show() ## Interpretar PCi como PC1+...+PCi , con i=1,...,10 ##__Respuesta:__ A partir de la varianza acumulada se puede notar que en la acumulación de los primeros 5 componentes principales ya se tiene un aproximado del 82% de varianza acumulada del total, lo que implica estos componentes principales abarcan gran parte de las características del conjunto de datos, por lo que se trabajara solo con esos cinco y se omitirá el resto#4 #Creación de dataframe con las cinco componentes y el target del diagnostico pca = PCA(n_components=5) principalComponents = pca.fit_transform(x) principalDataframe = pd.DataFrame(data = principalComponents, columns = ['PC1', 'PC2','PC3','PC4','PC5']) targetDataframe = df[['diagnosis']] newDataframe = pd.concat([principalDataframe, targetDataframe],axis = 1) newDataframe.head() #5 Y= np.ravel(df[['diagnosis']]) X_new = pca.fit_transform(df[df.columns[2:]]) from sklearn.model_selection import train_test_split X_train, X_test, Y_train, Y_test = train_test_split(X_new, Y, test_size=0.2, random_state = 2) from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score,recall_score,precision_score,f1_score #Modelos usados: LogisticRegression, RandomForestClassifier , DecisionTreeClassifier frames=pd.DataFrame({ #Dataframe de las métricas por modelo 'modelo': [], 'accuracy_score':[], 'recall_score':[], 'precision_score':[], 'f1_score':[] }) frames['modelo']=['LogisticRegression','RandomForestClassifier','DecisionTreeClassifier'] #se agregan modelos models=[LogisticRegression(),RandomForestClassifier(random_state=0),DecisionTreeClassifier(random_state=0)] acc=[] rec=[] prec=[] f1=[] metrics=[acc,rec,prec,f1] #se aplican las métricas a los 3 modelos for model in models: mod = model mod.fit(X_train, Y_train) mod_pred = mod.predict(X_test) acc.append(accuracy_score(Y_test, mod_pred)) rec.append(recall_score(Y_test, mod_pred)) prec.append(precision_score(Y_test, mod_pred)) f1.append(f1_score(Y_test, mod_pred)) #se agregan al dataframe i=0 for column in frames.columns[1:]: frames[column]=metrics[i] i+=1 frames=frames.set_index('modelo') framesData Science Unit 1 Sprint Challenge 1 Loading, cleaning, visualizing, and analyzing dataIn this sprint challenge you will look at a dataset of the survival of patients who underwent surgery for breast cancer.http://archive.ics.uci.edu/ml/datasets/Haberman%27s+SurvivalData Set Information:The dataset contains cases from a study that was conducted between 1958 and 1970 at the University of Chicago's Billings Hospital on the survival of patients who had undergone surgery for breast cancer.Attribute Information:1. Age of patient at time of operation (numerical)2. Patient's year of operation (year - 1900, numerical)3. Number of positive axillary nodes detected (numerical)4. Survival status (class attribute)-- 1 = the patient survived 5 years or longer-- 2 = the patient died within 5 yearSprint challenges are evaluated based on satisfactory completion of each part. It is suggested you work through it in order, getting each aspect reasonably working, before trying to deeply explore, iterate, or refine any given step. Once you get to the end, if you want to go back and improve things, go for it! Part 1 - Load and validate the data- Load the data as a `pandas` data frame.- Validate that it has the appropriate number of observations (you can check the raw file, and also read the dataset description from UCI).- Validate that you have no missing values.- Add informative names to the features.- The survival variable is encoded as 1 for surviving >5 years and 2 for not - change this to be 0 for not surviving and 1 for surviving >5 years (0/1 is a more traditional encoding of binary variables)At the end, print the first five rows of the dataset to demonstrate the above.#load data, validating shape, adding informative names import pandas as pd df= pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/haberman/haberman.data', header=None, names=['age', 'op_year', 'nodes_pos', 'survival']) print(df.shape) df.head() #missing values check df.isna().sum() #no missing values #value count pre-edit print(df['survival'].value_counts()) #survival feature engineering df['survival'] = [0 if d==2 else 1 for d in df['survival']] #evaluating success df['survival'].value_counts() #success #making operation year into datetime df['op_year'] = [(d-70)*365 for d in df['op_year']] df['op_year'] = pd.to_datetime(df['op_year'], unit='D') df['op_year'] = df['op_year'].dt.yearPart 2 - Examine the distribution and relationships of the featuresExplore the data - create at least *2* tables (can be summary statistics or crosstabulations) and *2* plots illustrating the nature of the data.This is open-ended, so to remind - first *complete* this task as a baseline, then go on to the remaining sections, and *then* as time allows revisit and explore further.Hint - you may need to bin some variables depending on your chosen tables/plots.#table 1: basic stats df.describe() #table 2: operation year, survival crosstab pd.crosstab(df['survival'],df['op_year'], normalize='columns') #table 3: age, survival crosstab age_bins = pd.cut(df['age'], 5) pd.crosstab(age_bins, df['survival'], normalize='index') #plot 1: survival vs op year import matplotlib.pyplot as plt df.groupby('survival')['op_year'].value_counts().unstack(0).plot.bar() plt.xlabel('Year of Operation') plt.ylabel('Outcome Frequency (Success = 1)') plt.title('Breast Cancer Removal Success 1958-1969'); #plot 2: survival vs age df.groupby('survival')['age'].value_counts().unstack(0).plot.bar(figsize=(30,6)) plt.xlabel('Age of Patient') plt.ylabel('Outcome Frequency (Success = 1)') plt.title('Breast Cancer Removal Success by Age of Patient'); #plot 3: nodes vs age df.groupby('survival')['nodes_pos'].value_counts().unstack(0).plot.bar(figsize=(30,6)) plt.xlabel('Number of Axillary Cancerous Nodes') plt.ylabel('Outcome Frequency (Success = 1)') plt.title('Breast Cancer Removal Success by Number of Axillary Cancerous Nodes');Part 3 - Analysis and InterpretationNow that you've looked at the data, answer the following questions:- What is at least one feature that looks to have a positive relationship with survival?- What is at least one feature that looks to have a negative relationship with survival?- How are those two features related with each other, and what might that mean?Answer with text, but feel free to intersperse example code/results or refer to it from earlier. * Having no or few axillary (additional) cancerous nodes makes survival much more likely, regradless of age or year of operation.* Being very young or very old will decrease the chance of survival.* Unfortunately, cancer can grow much quicker than people age, so regardless of age, it is more important to get screened for Breast Cancer frequently to minimize the chance of cancer growth and maximize prompt treatment if necessary.***Subset selection:*****The paper, documentation, colab notebook can be found here:** [Paper](https://arxiv.org/abs/2008.09887), [Documentation](https://spear-decile.readthedocs.io/en/latest/subset-selection), [Colab](https://colab.research.google.com/drive/1HqkqQ8ytWjP9on3du-vVB07IQvo8Li3W?ts=60ce20fe)(Subset selection section can be found quite below in colab notebook)For subset selection, we use FacilityLocation from the [submodlib](https://github.com/decile-team/submodlib) library which is also provided by [DECILE](https://decile.org/) for submodular optimization.This notebook aims at demonstrating the use cases for the functions in spear library for subset selection. Subset selection is selecting a small subset of unlabeled data(or the data labeled by LFs, in case of supervised subset selection) so that it can be labeled and use that small labeled data(the L dataset) for effective training of JL algorithm(Cage algorithm doesn't need labeled data). Finding the best subset makes best use of the labeling efforts. Note that for this notebook demo, we need data generated from the first half(labeling part) of sms_jl.ipynb.''' User don't need to include this cell to use the package ''' import sys sys.path.append('../../') import numpy as np**Random subset selection**Here we select a random subset of instances to label. We need number of instances available and number of instances we intend to label to get a sorted numpy array of indicesfrom spear.jl import rand_subset indices = rand_subset(n_all = 20, n_instances = 5) #select 5 instances from a total of 20 instances print("indices selected by rand_subset: ", indices) print("return type of rand_subset: ", type(indices))indices selected by rand_subset: [ 0 3 4 10 12] return type of rand_subset: **Unsupervised subset selection**Here we select a unsupervised subset(for more on this, please refer [here](https://arxiv.org/abs/2008.09887) ) of instances to label. We need feature matrix(of shape (num_instaces, num_features)) and number of instances we intend to label and we get a sorted numpy array of indices. For any other arguments to unsup_subset(or to sup_subset_indices or sup_subset_save_files) please refer documentation.For this let's first get some data(feature matrix), say from sms_pickle_U.pkl(in data_pipeline folder). For more on this pickle file, please refer the other notebook named sms_jl.ipynbfrom spear.utils import get_data, get_classes U_path_pkl = 'data_pipeline/JL/sms_pickle_U.pkl' #unlabelled data - don't have true labels data_U = get_data(U_path_pkl, check_shapes=True) x_U = data_U[0] #the feature matrix print("x_U shape: ", x_U.shape) print("x_U type: ", type(x_U))x_U shape: (4500, 1024) x_U type: Now that we have feature matrix, let's select the indices to label from it. After labeling(through a trustable means/SMEs) those instances, whose indices(index with respect to feature matrix) are given by the following function, one can pass them as gold_labels to the PreLabels class in the process for labeling the subset-selected data and forming a pickle file.from spear.jl import unsup_subset indices = unsup_subset(x_train = x_U, n_unsup = 20) print("first 10 indices given by unsup_subset: ", indices[:10]) print("return type of unsup_subset: ", type(indices))first 10 indices given by unsup_subset: [ 455 659 806 985 1036 1438 2092 2197 2277 2283] return type of unsup_subset: **Supervised subset selection**Here we select a supervised subset(for more on this, please refer [here](https://arxiv.org/abs/2008.09887) ) of instances to label. We need * path to json file having information about classes* path to pickle file generated by feature matrix after labeling using LFs* number of instances we intend to labelwe get a sorted numpy array of indices.For this let's use sms_json.json, sms_pickle_U.pkl(in data_pipeline folder). For more on this json/pickle file, please refer the other notebook named sms_cage_jl.ipynbfrom spear.jl import sup_subset_indices U_path_pkl = 'data_pipeline/JL/sms_pickle_U.pkl' #unlabelled data - don't have true labels path_json = 'data_pipeline/JL/sms_json.json' indices = sup_subset_indices(path_json = path_json, path_pkl = U_path_pkl, n_sup = 100, qc = 0.85) print("first 10 indices given by sup_subset: ", indices[:10]) print("return type of sup_subset: ", type(indices))first 10 indices given by sup_subset: [1632 1848 3284 4403 4404 4405 4406 4407 4408 4409] return type of sup_subset: Instead of just getting indices to already labeled data(stored in pickle format, using LFs), we also provide the following utility to split the input pickle file and save two pickle files on the basis of subset selection. Make sure that the directory of the files(path_save_L and path_save_U) exists. Note that any existing contents in these pickle files will be erased. You can still get the return value of subset-selected indices.from spear.jl import sup_subset_save_files U_path_pkl = 'data_pipeline/JL/sms_pickle_U.pkl' #unlabelled data - don't have true labels path_json = 'data_pipeline/JL/sms_json.json' path_save_L = 'data_pipeline/JL/sup_subset_L.pkl' path_save_U = 'data_pipeline/JL/sup_subset_U.pkl' indices = sup_subset_save_files(path_json = path_json, path_pkl = U_path_pkl, path_save_L = path_save_L, \ path_save_U = path_save_U, n_sup = 100, qc = 0.85) print("first 10 indices given by sup_subset: ", indices[:10]) print("return type of sup_subset: ", type(indices))first 10 indices given by sup_subset: [1632 1848 3284 4403 4404 4405 4406 4407 4408 4409] return type of sup_subset: **Inserting true labels into pickle files**Now after doing supervised subset selection, say we get two pickle files path_save_L and path_save_U. Now say you labeled the instances of path_save_L and want to insert them into pickle file. So here, instead of going over the process of generating pickle through PreLabels again, you can use the following function to create a new pickle file, which now contain true labels, using path_save_L pickle file. There is no return value to this function.Make sure that path_save file, the pickle file path that is to be formed with the data in path_save_L file and true labels, is in an existing directory. Note that any existing contents in this pickle file(path_save) will be erased.Note that one can pass same file to path, path_save and path arguments, in which case the true labels numpy array is just replaced with what user provides in labels argument.from spear.jl import insert_true_labels path_save_L = 'data_pipeline/JL/sup_subset_L.pkl' path_save_labeled = 'data_pipeline/JL/sup_subset_labeled_L.pkl' labels = np.random.randint(0,2,[100, 1]) ''' Above is just a random association of labels used for demo. In real time user has to label the instances in path_save_L with a trustable means/SMEs and use it here. Note that the shape of labels is (num_instances, 1) and just for reference, feature_matrix(the first element in pickle file) in path_save_L has shape (num_instances, num_features). ''' insert_true_labels(path = path_save_L, path_save = path_save_labeled, labels = labels)A similar function as insert_true_labels called replace_in_pkl is also made available to make changes to pickle file. replace_in_pkl usage is demonstrated below. Make sure that path_save, the pickle file path that is to be formed with the data in path file and a new numpy array, is in an existing directory. Note that any existing contents in this pickle file(path_save) will be erased. There is no return value for this function too.Note that one can pass same file to path, path_save and path arguments, in which case the intended numpy array is just replaced with what user provides in np_array argument.It is highly advised to use insert_true_labels function for the purpose of inserting the labels since it does some other necessary changes.from spear.jl import replace_in_pkl path_labeled = 'data_pipeline/JL/sup_subset_labeled_L.pkl' # this is the previously used path, path_save_labeled path_save_altered = 'data_pipeline/JL/sup_subset_altered_L.pkl' np_array = np.random.randint(0,2,[100, 1]) #we are just replacing the labels we inserted before index = 3 ''' index refers to the element we intend to replace. Refer documentaion(specifically spear.utils.data_editor.get_data) to understand which numpy array an index value maps to(order the contents of pickle file from 0 to 8). Index should be in range [0,8]. ''' replace_in_pkl(path = path_labeled, path_save = path_save_altered, np_array = np_array, index = index)**Demonstrating the use of labeled subset-selected data**Now that we have our subset(labeled) in path_save_labeled, lets see a use case by calling a member function of JL class using path_save_labeled as our path to L data.from spear.jl import JL n_lfs = 16 n_features = 1024 n_hidden = 512 feature_model = 'nn' path_json = 'data_pipeline/JL/sms_json.json' jl = JL(path_json = path_json, n_lfs = n_lfs, n_features = n_features, feature_model = feature_model, \ n_hidden = n_hidden) L_path_pkl = path_save_labeled #Labeled data - have true labels ''' Note that I saved random labels, in file path_save_labeled, as true labels which are supposed to be labeled by a trustable means/SMEs. Hence the accuracies below can be small. ''' U_path_pkl = path_save_U #unlabelled data - don't have true labels V_path_pkl = 'data_pipeline/JL/sms_pickle_V.pkl' #validation data - have true labels T_path_pkl = 'data_pipeline/JL/sms_pickle_T.pkl' #test data - have true labels log_path_jl_1 = 'log/JL/jl_log_1.txt' loss_func_mask = [1,1,1,1,1,1,1] batch_size = 150 lr_fm = 0.0005 lr_gm = 0.01 use_accuracy_score = False probs_fm, probs_gm = jl.fit_and_predict_proba(path_L = L_path_pkl, path_U = U_path_pkl, path_V = V_path_pkl, \ path_T = T_path_pkl, loss_func_mask = loss_func_mask, batch_size = batch_size, lr_fm = lr_fm, lr_gm = \ lr_gm, use_accuracy_score = use_accuracy_score, path_log = log_path_jl_1, return_gm = True, n_epochs = \ 100, start_len = 7,stop_len = 10, is_qt = True, is_qc = True, qt = 0.9, qc = 0.85, metric_avg = 'binary') labels = np.argmax(probs_fm, 1) print("probs_fm shape: ", probs_fm.shape) print("probs_gm shape: ", probs_gm.shape)24%|██▍ | 24/100 [00:56<02:58, 2.34s/it]Introduction to Regression w NNsPredict y from x# Import TensorFlow import tensorflow as tf import numpy as np import matplotlib.pyplot as plt print(tf.__version__)2.5.0Creating data to view and fitimport numpy as np import matplotlib.pyplot as plt import pandas as pd # Create features x = np.array([-7, -4, -1, 2, 5, 8, 11, 14]) x = tf.cast(x, dtype=tf.float32) # Create labels y = np.array([3, 6, 9, 12, 15, 18, 21, 24]) y = tf.cast(y, dtype=tf.float32) # Visualize it plt.scatter(x, y)Input and output shapes# Create a demo tensor for our housing price predicton problem house_info = tf.constant(["bedroom", "bathroom", "garage"]) house_price = tf.constant([939700]) # Turn out NumPy arrays into tensors x = tf.constant(x) y = tf.constant(y) x, y input_shape = x[0].shape output_shape = y[0].shape # If printed, they each have no shape because they are scalars(rank 0 meanning no dimensions) # If you just did x.shape and y.shape, we don't want the input and the output to have 8 neurons # We just want on x to have an output y(so 1 input and output)Modeling with TensorFlow1. Create a model2. Compile a model3. Fitting a model(let the NN to find the patterns between the X and the Y)# Set random seed tf.random.set_seed(42) #1. Create a model using the Sequential API model = tf.keras.Sequential([ tf.keras.layers.Dense(1) ]) #2. Compile the model model.compile(loss=tf.keras.losses.mae, optimizer=tf.keras.optimizers.SGD(), # Stochastic Gradient Descent metrics=["mae"] ) #3. Fit the model model.fit(x, y, epochs=100, shuffle=True) # Try and make a prediction model.predict([17.])Improving our model1. Add more layers, increase the number of neurons within each hidden layer, or change the activation functions of each layer.2. Change the optimization function or perhaps the learning rate of the optimization function.3. Increase number of epochs or give the model more data points to learn from.# Let's rebuild our model #1. Create a model using the Sequential API model = tf.keras.Sequential([ tf.keras.layers.Dense(1) ]) #2. Compile the model model.compile(loss=tf.keras.losses.mae, optimizer=tf.keras.optimizers.SGD(), metrics=["mae"] ) #3. Fit the model model.fit(x, y, epochs=100, shuffle=True) # Changed number of epochs. # Try and make a prediction model.predict([17.]) # Let's see if we can make another to improve our model #1. Create the model (this time with an extra hidden layer) model = tf.keras.Sequential([ tf.keras.layers.Dense(100, activation=None), tf.keras.layers.Dense(1) ]) #2. Compile the model model.compile(loss="mae", optimizer=tf.keras.optimizers.Adam(lr=0.02), # Learning rate is probably the most # important hyperparameter in a NN(when it is 0.02, it is good) metrics=["mae"]) #3. Fit the model model.fit(x, y, epochs=100) # Try and make a prediction model.predict([17.]) # This has a really bad result but during training(keeping everything the same but you have a 100 unit dense relu), the mae is actually really small # This result is due to overfitting the data during training.Evaluating a modelIn practice, a typical workflow when building NNs is:``` Build a model -> fit it -> evaluate it -> tweak the model -> fit it -> evaluate it -> . . . ```# Make a bigger dataset x = tf.range(-100, 100, 4) # Make labels for the dataset y = x + 10 y # Visualize the data plt.scatter(x, y)The 3 Sets . . .* Training Set(70-80%)* Validation Set(10-15%)* Test Set(10-15%)# Split the data into train and test sets x_train = x[:40] # first 40 are training samples (80% of the data) y_train = y[:40] x_test = x[40:] # last 10 are testing samples(20% of the data) y_test = y[40:] len(x_train), len(x_test), len(y_train), len(y_test)Visualizing dataNow we've got our data in training and test sets. Visualize again.plt.figure(figsize=(10, 7)) # Plot training data in blue plt.scatter(x_train, y_train, c="b", label="Training data") plt.scatter(x_test, y_test, c="g", label="Testing data") # Show legend plt.legend() # Let's have a look at how to build a NN #1. Create the model model = tf.keras.Sequential([ tf.keras.layers.Dense(1) ]) #2. Compile the model model.compile(loss=tf.keras.losses.mae, optimizer=tf.keras.optimizers.SGD(), metrics=["mae"]) #3. Fit the model # model.fit(x_train, y_train, epochs=100)Visualizing the model# Let's create a model which builds automatically by defining # the input shape in the first layer tf.random.set_seed(42) # Create a model(same as above) model = tf.keras.Sequential([ tf.keras.layers.Dense(10, input_shape=[1], name="input_layer"), tf.keras.layers.Dense(1, name="output_layer") # 10 is the number of neurons in that layer. # Input shape=1 means that there is one nueron in the layer before # connecting to the current layer. ], name="First_Model") #2. Compile the model model.compile(loss=tf.keras.losses.mae, optimizer=tf.keras.optimizers.SGD(learning_rate=0.01), metrics=["mae"]) model.summary()Model: "First_Model" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_layer (Dense) (None, 10) 20 _________________________________________________________________ output_layer (Dense) (None, 1) 11 ================================================================= Total params: 31 Trainable params: 31 Non-trainable params: 0 _________________________________________________________________* Total params - total number of parameters in the model.* Trainable parameters - these are the parameters(patterns) the model can update as it trains.* Non-training params - these parameters aren't updated during training (this is typical when you bring in already learned model-importing a model)# Let's fit our model to the training data model.fit(x_train, y_train, epochs=100, verbose=0) # Get summary of our model model.summary() from tensorflow.keras.utils import plot_model plot_model(model=model, show_shapes=True)Visualizing out model's predictionsTo visualize predictions, it's a good idea to plot them against the ground truth labels. Often you'll see this in the form of y_test or y_true versus y_pred(ground truth versus your models prediction)# Make some predictions y_pred = model.predict(x_test) y_pred # Let's create a plotting function def plot_predictions(train_data=x_train, train_labels=y_train, test_data=x_test, test_labels=y_test, predictions=y_pred): plt.figure(figsize=(10, 7)) # Plot training data in blue plt.scatter(train_data, train_labels, c="b", label="Training data") # Plot testing data in green plt.scatter(test_data, test_labels, c="g", label="Testing data") # Plot models predictions in red plt.scatter(test_data, predictions, c="r", label="Predictions") # Show the legend plt.legend() plot_predictions(train_data=x_train, train_labels=y_train, test_data=x_test, test_labels=y_test, predictions=y_pred)Evaluating our model's predictions with regression evaluation metricsSince we're working on a regression, two of the main metrics:* MAE - mean absolute error, "on average, how wrong is each of my model's predictions"* MSE - mean square error, "square the average errors"# Evaluate the model on the test model.evaluate(x_test, y_test) # Calculate the mean absolute error mae = tf.metrics.mean_absolute_error(y_test, y_pred) # y_pred has one extra dimension than y_test so we have to make them # the same dimension. Have to squeeze(removes all one dimensions of an array). mae = tf.keras.losses.MAE(y_test, tf.squeeze(y_pred)) mae # Calculate the mean square error mse = tf.metrics.mean_squared_error(y_true=y_test, y_pred=tf.squeeze(y_pred)) mse # Make some functions to reuse MAE and MSE def MAE(y_true, y_pred): return tf.keras.losses.MAE(y_test, tf.squeeze(y_pred)) def MSE(y_true, y_pred): return tf.keras.losses.MSE(y_test, tf.squeeze(y_pred))Running experiments to improve our model***TWEAK TIME:***1. Get more data to train on2. Make your model larger(using more hidden nuerons or hidden layers)3. Train for longer.***Let's do 3 experiments***1. Original model2. 2 layers trained with 100 epochs3. 2 layers trained for 500 epochs. ***Model 1***# Set random seed tf.random.set_seed(42) # 1. Create the model model_1 = tf.keras.Sequential([ tf.keras.layers.Dense(1) ]) # 2. Compile the model model_1.compile(loss=tf.keras.losses.mae, optimizer=tf.keras.optimizers.SGD(), metrics=["mae"]) # 3. Fit the model model_1.fit(x_train, y_train, epochs=100) # Make and plot the predictions for model_1 y_preds_1 = model_1.predict(x_test) plot_predictions(predictions=y_preds_1) # Calculate model_1 evaluation metrics mae_1 = MAE(y_test, y_preds_1) mse_1 = MSE(y_test, y_preds_1) mae_1, mse_1***Model 2***# Set random seed tf.random.set_seed(42) # 1. Create the model model_2 = tf.keras.Sequential([ tf.keras.layers.Dense(10), tf.keras.layers.Dense(1) ]) # 2. Compile the model model_2.compile(loss=tf.keras.losses.mae, optimizer=tf.keras.optimizers.SGD(), metrics=["mse"]) # 3. Fit the model model_2.fit(x_train, y_train, epochs=100) # Make and plot predictions of model_2 y_preds_2 = model_2.predict(x_test) plot_predictions(predictions=y_preds_2) # Calculate model_2 evaluation metrics mae_2 = MAE(y_test, y_preds_2) mse_2 = MSE(y_test, y_preds_2) mae_2, mse_2***Model 3***# Set random seed tf.random.set_seed(42) # 1. Create the model model_3 = tf.keras.Sequential([ tf.keras.layers.Dense(10), tf.keras.layers.Dense(1), ]) # 2. Compile the model model_3.compile(loss=tf.losses.mae, optimizer=tf.keras.optimizers.SGD(), metrics=["mae"]) # 3. Fit the model model_3.fit(x_train, y_train, epochs=500) # Make and plot some predictions y_preds_3 = model_3.predict(x_test) plot_predictions(predictions=y_preds_3) # Hella overfitting # Calculate model_3 evaluation metrics mae_3 = MAE(y_test, y_preds_3) mse_3 = MSE(y_test, y_preds_3) mae_3, mse_3Comparing the results of our experimentsLet's compare the results of our experiments# Let's compare our model's results using a pandas DataFrame model_results = [["model_1", mae_1.numpy(), mse_1.numpy()], ["model_2", mae_2.numpy(), mse_2.numpy()], ["model_3", mae_3.numpy(), mse_3.numpy()]] all_results = pd.DataFrame(model_results, columns=["model", "mae", "mse"]) all_resultsTracking your experimentsOne really good habit in machine learning modeling is tracking the results of your experiments. This can be slow so there are tools to help us:1. TensorBoard2. Weights & Biases Saving our modelsSaving models lets us use them outside of Google ColabThere are two main ways to save:1. The SavedModel format2. The HDF5 format# Save model using the SavedModel format model_2.save("best_model_SavedModel_format") # Save model using the HDF5 format model_2.save("best_model_HDF5_format.h5")Loading in a saved model# Load in the SavedModel format model loaded_SavedModel_format = tf.keras.models.load_model("/content/best_model_SavedModel_format") loaded_SavedModel_format.summary() # Compare model_2 predictions with SavedModel format model predictions model_2_preds = model_2.predict(x_test) loaded_SavedModel_format_preds = loaded_SavedModel_format.predict(x_test) model_2_preds == loaded_SavedModel_format_preds # Load in the .h5 format loaded_h5_model = tf.keras.models.load_model("/content/best_model_SavedModel_format") loaded_h5_model.summary() # Check to see if loaded .h5 model predictions match model_2 model_2_preds = model_2.predict(x_test) loaded_h5_model_preds = loaded_h5_model.predict(x_test) model_2_preds == loaded_SavedModel_format_predsDownload a model (or any other model) from Google ColabIf you want to download your files from Colab:1. Go to files and right click then download2. Use code below3. Use google drive# Download a file form Google Colab from google.colab import files files.download("/content/best_model_SavedModel_format")Larger Example# Import required libraries import tensorflow as tf import pandas as pd import matplotlib.pyplot as plt # Read in the insurance dataset insurance = pd.read_csv("https://raw.githubusercontent.com/stedy/Machine-Learning-with-R-datasets/master/insurance.csv") insurance insurance["bmi"], insurance["sex"] # If you run the code above, you can see that different inputs have different types # For example, bmi is a float while sex is an object # ALL NN INPUTS HAVE TO BE NUMERICAL so to turn the objects into numbers, # you have to use one-hot encoding to numerically encode non-numbers # Let's try one-hot encode our DataFrame so they are all numbers insurance_onehot = pd.get_dummies(insurance) insurance_onehot # Create x and y values (features and labels) x = insurance_onehot.drop("charges", axis=1) y = insurance_onehot["charges"] x.head(), y.head() # Create training and test sets from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42) len(x), len(x_train), len(x_test) # Build a NN (sort of like model_2 above) tf.random.set_seed(42) # 1. Create model insurance_model = tf.keras.Sequential([ tf.keras.layers.Dense(10), tf.keras.layers.Dense(1) ]) # 2. Compile the model insurance_model.compile(loss=tf.losses.mae, optimizer=tf.keras.optimizers.SGD(), metrics=["mae"]) # 3. Fit the model insurance_model.fit(x_train, y_train, epochs=100) # Check the results of the insurance_model on the test data insurance_model.evaluate(x_test, y_test)9/9 [==============================] - 0s 2ms/step - loss: 7023.3291 - mae: 7023.3291Let's try and improve our model1. Add an extra layer with more hidden units and Adam optimizer2. Same as above and train for longertf.random.set_seed(42) # 1. Create the model insurance_model_2 = tf.keras.Sequential([ tf.keras.layers.Dense(100), tf.keras.layers.Dense(10), tf.keras.layers.Dense(1) ]) # 2. Compile the model insurance_model_2.compile(loss=tf.losses.mae, optimizer=tf.keras.optimizers.Adam(), metrics=["mae"]) # 3. Fit the model insurance_model_2.fit(x_train, y_train, epochs=100) # Evaluate the larger model insurance_model_2.evaluate(x_test, y_test) tf.random.set_seed(42) # 1. Create the model insurance_model_3 = tf.keras.Sequential([ tf.keras.layers.Dense(100), tf.keras.layers.Dense(10), tf.keras.layers.Dense(1) ]) # 2. Compile the model insurance_model_3.compile(loss=tf.losses.mae, optimizer=tf.keras.optimizers.Adam(), metrics=["mae"]) # 3. Fit the model history = insurance_model_3.fit(x_train, y_train, epochs=200) # Evaluate the model insurance_model_3.evaluate(x_test, y_test) # Plot history (also known as a loss curve) pd.DataFrame(history.history).plot() plt.ylabel("loss") plt.xlabel("epochs")Preprocessing data (normalization and standardization)Neural Nets prefer normalized inputs.print(x["age"].plot(kind="hist")) # This histogram is not normal so we got to make it normal # Read in the insurance dataframe insurance = pd.read_csv("https://raw.githubusercontent.com/stedy/Machine-Learning-with-R-datasets/master/insurance.csv") insurance from sklearn.compose import make_column_transformer from sklearn.preprocessing import MinMaxScaler, OneHotEncoder from sklearn.model_selection import train_test_split # Create our column transformer ct = make_column_transformer( (MinMaxScaler(), ["age", "bmi", "children"]), # Turn all these values to between 0 and 1 (OneHotEncoder(handle_unknown="ignore"), ["sex", "smoker", "region"]) ) # Create x and y x = insurance.drop("charges", axis=1) y = insurance["charges"] # Build our train and test sets x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42) # Fit the column transformer to our traingin data ct.fit(x_train) # Transform training and test data wiht normalization (MinMaxScaler) and OneHotEncoder) x_train_normal = ct.transform(x_train) x_test_normal = ct.transform(x_test) # What does our data look like now? print(x_train.loc[0]) # Takes the first row x_train.shape, x_train_normal.shape tf.random.set_seed(42) # 1. Create the model insurance_model_4 = tf.keras.Sequential([ tf.keras.layers.Dense(100), tf.keras.layers.Dense(10), tf.keras.layers.Dense(1) ]) # 2. Compile the model insurance_model_4.compile(loss=tf.losses.mae, optimizer=tf.keras.optimizers.Adam(), metrics=["mae"]) # 3. Fit the model insurance_model_4.fit(x_train_normal, y_train, epochs=100) # Evaluate our insurance model trained on normalized data insurance_model_4.evaluate(x_test_normal, y_test)9/9 [==============================] - 0s 2ms/step - loss: 3438.7844 - mae: 3438.7844[ADL1](https://github.com/iris-hep/adl-benchmarks-index): Plot the Missing ET in an event (loop over events)# This program plots an event-level variable (in this case, MET, but switching it is as easy as a dict-key change). It also demonstrates an easy use of the book-keeping cutflow tool, to keep track of the number of events processed. # The processor class bundles our data analysis together while giving us some helpful tools. It also leaves looping and chunks to the framework instead of us. class METProcessor(processor.ProcessorABC): def __init__(self): # Bins and categories for the histogram are defined here. For format, see https://coffeateam.github.io/coffea/stubs/coffea.hist.hist_tools.Hist.html && https://coffeateam.github.io/coffea/stubs/coffea.hist.hist_tools.Bin.html self._columns = ['MET_pt'] dataset_axis = hist.Cat("dataset", "") MET_axis = hist.Bin("MET", "MET [GeV]", 50, 0, 100) # The accumulator keeps our data chunks together for histogramming. It also gives us cutflow, which can be used to keep track of data. self._accumulator = processor.dict_accumulator({ 'MET': hist.Hist("Counts", dataset_axis, MET_axis), 'cutflow': processor.defaultdict_accumulator(int) }) @property def accumulator(self): return self._accumulator @property def columns(self): return self._columns def process(self, df): output = self.accumulator.identity() # This is where we do our actual analysis. The df has dict keys equivalent to the TTree's. dataset = df['dataset'] MET = df['MET_pt'] # We can define a new key for cutflow (in this case 'all events'). Then we can put values into it. We need += because it's per-chunk (demonstrated below) output['cutflow']['all events'] += MET.size output['cutflow']['number of chunks'] += 1 # This fills our histogram once our data is collected. Always use .flatten() to make sure the array is reduced. The output key will be as defined in __init__ for self._accumulator; the hist key ('MET=') will be defined in the bin. output['MET'].fill(dataset=dataset, MET=MET.flatten()) return output def postprocess(self, accumulator): return accumulator # Wrapper aroung dask_queue.HTCondorCluster, that allowed to launch Dask on an HTCondor cluster with a shared file system and customised for our analysis facility. # More information: https://jobqueue.dask.org/en/latest/generated/dask_jobqueue.HTCondorCluster.html client = CoffeaCasaCluster(worker_image="coffeateam/coffea-casa-analysis:0.1.50", autoscale=False, max_scale=10, tls=True) exe_args = { 'client': client, } # A convenience wrapper to submit jobs for a file set, which is a dictionary of dataset: [file list] entries. # Supports only uproot reading, via the LazyDataFrame class. # * Parameters: processor_instance (ProcessorABC) – An instance of a class deriving from ProcessorABC # * Parameters: executor (callable) – A function that takes 3 arguments: items, function, accumulator and performs some action equivalent to: `for item in items: accumulator += function(item)`. See iterative_executor, futures_executor, dask_executor, or parsl_executor for available options. # * Parameters: executor_args (dict, optional) – Arguments to pass to executor. output = processor.run_uproot_job(fileset, treename = 'Events', processor_instance = METProcessor(), executor = processor.dask_executor, executor_args = exe_args ) # Generates a 1D histogram from the data output to the 'MET' key. fill_opts are optional, to fill the graph (default is a line). hist.plot1d(output['MET'], overlay='dataset', fill_opts={'edgecolor': (0,0,0,0.3), 'alpha': 0.8}) # Easy way to print all cutflow dict values. Can just do print(output['cutflow']["KEY_NAME"]) for one. for key, value in output['cutflow'].items(): print(key, value)all events 53446198 number of chunks 534Importing Libraries & getting Dataimport numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns data = pd.read_csv('dataset/train.csv') # pd.set_option('display.max_columns', 500) # pd.set_option('display.max_rows', 500) # pd.set_option('display.width', 500) data.head() data.shape data.describe() data.columnsSkewness (shape of distribution of values)print("Skew : ",data.SalePrice.skew()) plt.hist(data.SalePrice, color='green', ec='black') plt.show()Skew : 1.8828757597682129--> Log-transforming the target variable when it is skewed, to improve the linearity of the datatarget = np.log(data.SalePrice) print("Skew : ",target.skew()) plt.hist(target, color='red', ec='black') plt.show()Skew : 0.12133506220520406Correlationplt.figure(figsize=(10,10)) sns.heatmap(data.corr(), yticklabels=True, cbar=True) data.corr()['SalePrice'].sort_values(ascending=False)So, OverallQual & GrLivArea are the most positively correlated with SalePrice. Analyzing OverallQualdata.OverallQual.unique() sns.scatterplot(x='OverallQual', y='SalePrice' , data=data) plt.title('SalePrice vs Overall Quality') plt.show() quality_pivot = data.pivot_table(index='OverallQual', values='SalePrice' ,aggfunc=np.median) quality_pivot.plot(kind='bar',color='lightblue' ) plt.xlabel('Overall-Quality') plt.ylabel('Sale-Price') plt.title('SalePrice vs Quality') plt.xticks(rotation=0) plt.show()Analyzing GrLivArea (Ground-Living-Area)sns.scatterplot(data=data, x="GrLivArea", y="SalePrice") plt.title('SalePrice vs Ground-Living-Area') plt.show()Analyzing Garagesns.scatterplot(data=data, x="GarageCars", y="SalePrice") plt.title('SalePrice vs Garage-Cars') plt.show() sns.scatterplot(data=data, x="GarageArea", y="SalePrice") plt.title('SalePrice vs Garage-Area') plt.show()Handling Missing Valuesdata.info() def percent_missing_data(data): missing_count = data.isnull().sum().sort_values(ascending=False) missing_percent =100 * data.isnull().sum().sort_values(ascending=False) / len(data) missing_count = pd.DataFrame(missing_count[missing_count > 0]) missing_percent = pd.DataFrame(missing_percent[missing_percent > 0]) missing_values_table = pd.concat([missing_count ,missing_percent] ,axis=1) missing_values_table.columns = ["missing_count", "missing_percent"] print('The dataset consists of {0} columns , out of which {1} have missing values.'.format(data.shape[1],str(missing_values_table.shape[0]))) return missing_values_table missing_values = percent_missing_data(data) missing_valuesThe dataset consists of 81 columns , out of which 19 have missing values.--> Dropping the columns which have more than 80% of data as missing.threshhold = 80 drop_cols = missing_values[missing_values["missing_percent"] > threshhold].index.tolist() drop_cols data = data.drop(columns=drop_cols ) data.shape missing_values = percent_missing_data(data) missing_valuesThe dataset consists of 77 columns , out of which 15 have missing values.--> Dropping the columns which have less than 3% of data as missing , as that is considered to be negligible and it doesn't have a big impact on our model.threshhold = 3 drop_cols_less = missing_values[missing_values["missing_percent"] < threshhold].index.tolist() drop_cols_less data = data.drop(columns=drop_cols_less) data.shape missing_values = percent_missing_data(data) missing_valuesThe dataset consists of 69 columns , out of which 7 have missing values.Imputation Garage Featuresgarage_features = [feature for feature in missing_values.index if 'Garage' in feature] print('Number of Garage Features are: {}'.format(len(garage_features))) # Missing values can be filled using mean--> (for numerical) and mode--> (for categorical) # for finding mode data['GarageType'].value_counts(), data['GarageCond'].value_counts(), data['GarageFinish'].value_counts(), data['GarageQual'].value_counts() # replacing with mode data['GarageType'] = data['GarageType'].fillna('Attchd') data['GarageCond'] = data['GarageCond'].fillna('TA') data['GarageFinish'] = data['GarageFinish'].fillna('Unf') data['GarageQual'] = data['GarageQual'].fillna('TA') # replacing with mean data['GarageYrBlt'] = data['GarageYrBlt'].fillna(data.GarageYrBlt.mean()) missing_values = percent_missing_data(data) missing_valuesThe dataset consists of 69 columns , out of which 2 have missing values.Other Featuresdata['FireplaceQu'].value_counts() data['LotFrontage'].value_counts() # Fireplace is a categorical feature, so can be filled with None. # And Lotfrontage is a numerical feature with outliers, so can be filled with median. data['FireplaceQu'] = data['FireplaceQu'].fillna('None') data['LotFrontage'] = data['LotFrontage'].fillna(data.LotFrontage.median()) missing_values = percent_missing_data(data) missing_valuesThe dataset consists of 69 columns , out of which 0 have missing values.Feature Selection Numerical Featuresdata_numeric = data.select_dtypes(include=[np.number]) data_categ = data.select_dtypes(include="object") data_numeric data_categ data_numeric.shape, data_categ.shape data_numeric_list = list(data_numeric.columns.values) data_numeric_list data_categ_list = list(data_categ.columns.values) data_categ_list # categorical to numeric for feature in data.select_dtypes(include="object"): labels_ordered = data.groupby([feature])['SalePrice'].mean().sort_values().index labels_ordered = {k: i for i, k in enumerate(labels_ordered, 0)} data[feature] = data[feature].map(labels_ordered) data.head()Feature ScalingX = data.drop(['Id', 'SalePrice'] , axis=1) y = np.log(data.SalePrice) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) X_train.shape , X_test.shapeModel Buliding Ridge Regressionfrom sklearn.linear_model import Ridge ridge_reg = Ridge(alpha=100) ridge_reg.fit(X_train ,y_train) y_pred_ridge =ridge_reg.predict(X_test) # testing the model from sklearn.metrics import r2_score ,mean_absolute_error print("Mean Absolute Error : " ,mean_absolute_error(y_test ,y_pred_ridge)) print("R2 score :" ,r2_score(y_test ,y_pred_ridge)) # finding best value for alpha and train model again alpha_list = [] mae_list = [] for alpha_val in np.arange(0.01, 200): ridge_ = Ridge(alpha=alpha_val) ridge_.fit(X_train, y_train) alpha_list.append(alpha_val) # testing the model y_pred_ridge = ridge_.predict(X_test) mae = mean_absolute_error(y_test, y_pred_ridge) mae_list.append(mae) alpha_list = pd.DataFrame(alpha_list) mae_list = pd.DataFrame(mae_list) alpha_mae = pd.concat([alpha_list, mae_list], axis=1) alpha_mae.columns = ["alpha_list", "mae_list"] alpha_mae[alpha_mae["mae_list"] == alpha_mae["mae_list"].min()] # training and testing once again with the best alpha value ridge_reg = Ridge(alpha=199.01) ridge_reg.fit(X_train, y_train) y_pred_ridge = ridge_reg.predict(X_test) # testing the model print("Mean Absolute Error : ", mean_absolute_error(y_test, y_pred_ridge)) print("R2 score :", r2_score(y_test, y_pred_ridge)) y_pred_ridge.min() plt.figure(figsize=(10,10)) sns.regplot(x=y_pred_ridge ,y=y_test)Lasso Regressionfrom sklearn.linear_model import Lasso lasso = Lasso(alpha=0.8) lasso.fit(X_train ,y_train) y_pred_lasso = lasso.predict(X_test) # testing the model from sklearn.metrics import r2_score, mean_absolute_error print("Mean Absolute Error : ", mean_absolute_error(y_test, y_pred_lasso)) print("R2 score :", r2_score(y_test, y_pred_lasso)) # finding best value for alpha and train model again alpha_list = [] mae_list = [] for alpha_val in np.arange(0.01, 200): lasso_ = Lasso(alpha=alpha_val) lasso_.fit(X_train, y_train) alpha_list.append(alpha_val) # testing the model y_pred_lasso = lasso_.predict(X_test) mae = mean_absolute_error(y_test, y_pred_lasso) mae_list.append(mae) alpha_list = pd.DataFrame(alpha_list) mae_list = pd.DataFrame(mae_list) alpha_mae = pd.concat([alpha_list, mae_list], axis=1) alpha_mae.columns = ["alpha_list", "mae_list"] alpha_mae[alpha_mae["mae_list"] == alpha_mae["mae_list"].min()] # training and testing once again with the best alpha value lasso = Lasso(alpha=0.01) lasso.fit(X_train, y_train) y_pred_lasso = lasso.predict(X_test) # testing the model print("Mean Absolute Error : ", mean_absolute_error(y_test, y_pred_lasso)) print("R2 score :", r2_score(y_test, y_pred_lasso)) y_pred_lasso.min() plt.figure(figsize=(10, 10)) sns.regplot(x=y_pred_lasso, y=y_test)Elastic Netfrom sklearn.preprocessing import PolynomialFeatures polynomial_converter = PolynomialFeatures(degree=2, include_bias=False) poly_features_train = polynomial_converter.fit_transform(X_train) poly_features_test = polynomial_converter.fit_transform(X_test) from sklearn.linear_model import ElasticNetCV elastic = ElasticNetCV(l1_ratio= 1,tol=0.01) elastic.fit(poly_features_train , y_train) y_pred_elastic = elastic.predict(poly_features_test) #testing model print("Mean Absolute Error : ", mean_absolute_error(y_test, y_pred_elastic)) print("R2 score :", r2_score(y_test, y_pred_elastic)) y_pred_elastic.min() plt.figure(figsize=(10, 10)) sns.regplot(x=y_pred_elastic, y=y_test)Model Evaluation# creating Dataframe to check which regression technique was the best models = pd.DataFrame({ 'Regression Model': ['Ridge', 'Lasso', 'Elastic Net'], 'MAE Score': [ mean_absolute_error(y_test, y_pred_ridge), mean_absolute_error(y_test, y_pred_lasso), mean_absolute_error(y_test, y_pred_elastic)], 'R2 Score': [ r2_score(y_test, y_pred_ridge), r2_score(y_test, y_pred_lasso), r2_score(y_test, y_pred_elastic) ]}) models.sort_values(by='MAE Score', ascending=True)Frequency-Domain Linear Regression 此示例演示如何使用numpy库中的离散傅里叶变换为时间序列构造线性回归模型。此示例中使用的时间序列是 1973 年至 1979 年美国每月意外死亡人数。import numpy as np import matplotlib.pyplot as plt ts= np.array( [9007.+0j,8106.+0j,8928.+0j,9137.+0j,10017.+0j,10826.+0j,11317.+0j,10744.+0j,9713.+0j,9938.+0j,9161.+0j,8927.+0j, 7750.+0j,6981.+0j,8038.+0j,8422.+0j,8714.+0j,9512.+0j,10120.+0j,9823.+0j,8743.+0j,9129.+0j,8710.+0j,8680.+0j, 8162.+0j,7306.+0j,8124.+0j,7870.+0j,9387.+0j,9556.+0j,10093.+0j,9620.+0j,8285.+0j,8433.+0j,8160.+0j,8034.+0j, 7717.+0j,7461.+0j,7776.+0j,7925.+0j,8634.+0j,8945.+0j,10078.+0j,9179.+0j,8037.+0j,8488.+0j,7874.+0j,8647.+0j, 7792.+0j,6957.+0j,7726.+0j,8106.+0j,8890.+0j,9299.+0j,10625.+0j,9302.+0j,8314.+0j,8850.+0j,8265.+0j,8796.+0j, 7836.+0j,6892.+0j,7791.+0j,8129.+0j,9115.+0j,9434.+0j,10484.+0j,9827.+0j,9110.+0j,9070.+0j,8633.+0j,9240.+0j])将数据矩阵重塑为 72 x 1 的时间序列,并绘制 1973 年至 1978 年的数据。years = np.linspace(1973,1979,72) plt.plot(years,np.real(ts)) plt.plot(years,np.real(ts),'o') plt.xlabel('Year') plt.ylabel('Number of Accidental Deaths')要在时域中构建线性回归模型,必须指定余弦和正弦的频率,形成设计矩阵,并求解正态方程,以获得模型参数的最小二乘估计值。在这种情况下,使用离散傅里叶变换来检测周期性,仅保留傅里叶系数的子集,并反转变换以获得拟合的时间序列会更容易。对数据执行频谱分析,以揭示哪些频率对数据中的可变性有显著贡献。由于信号的总均值约为 9,000,并且与 0 频率下的傅里叶变换成正比,因此在频谱分析之前减去均值。这降低了0频率下较大的傅里叶系数,并使任何显着的振荡更容易检测。傅里叶变换中的频率以时间序列长度 1/72 的倒数间隔间隔。每月对数据进行采样,光谱分析中的最高频率为1个周期/2个月。tsdft = np.fft.fft(ts-np.mean(ts)) print(tsdft.shape) freq = np.linspace(0,0.5,37) plt.figure(figsize=(10,4)) plt.plot(freq*12,np.abs(tsdft[0:int(len(ts)/2)+1])) plt.plot(freq*12,np.abs(tsdft[0:int(len(ts)/2)+1]),'o') plt.ylabel("Magnitude") plt.xlabel("Cycles/Year")(72,)根据幅度,1周期/12个月的频率是数据中最重要的振荡。1周期/12个月的星等是任何其他星等的两倍多。然而,光谱分析显示,数据中还有其他周期性成分。例如,在 1 周期/12 个月的谐波(整数倍)处似乎存在周期性分量。似乎还有一个周期性成分,周期为1个周期/ 72个月。根据数据的频谱分析,使用余弦和正弦项拟合简单的线性回归模型,其频率为最重要的分量:1 周期/年(1 周期/12 个月)。确定离散傅里叶变换中对应于 1 个周期/12 个月的频率箱。由于频率间隔为 1/72,并且第一个 bin 对应于 0 频率,因此正确的 bin 是 72/12+1。这是正频率的频率箱。您还必须包括与负频率相对应的频率箱:-1 周期/12 个月。使用 MATLAB 分度时,负频率的频率箱为 72-72/12+1。创建一个 72 x 1 的零向量。用对应于 1 个周期/12 个月的正负频率的傅里叶系数填充矢量的相应元素。反转傅里叶变换并添加总体均值以获得与意外死亡数据的拟合。tsfit = np.zeros([72,1],dtype = complex) tsfit[6] = tsdft[6] tsfit[66] = tsdft[66] tsfit = np.array(tsfit.transpose()) print(tsfit.shape) tsfit = np.fft.ifft(np.round(tsfit,0))(1, 72)使用两个傅里叶系数绘制原始数据和拟合序列。mu = np.mean(ts) tsfitt = mu+tsfit plt.plot(years,np.real(tsfitt).transpose()) plt.plot(years,np.real(ts),'b') plt.plot(years,np.real(ts),marker = 'o') plt.xlabel("Year") plt.ylabel("Number of Accidental Deaths") #plt.scatter(years, np.real(ts), color='', marker='o', edgecolors='g', s=200) # 把 corlor 设置为空,通过edgecolors来控制颜色 def xcorr(x,y,timelaggy): x = x.flatten() y = y.flatten() out = np.correlate(x,y,'full') midIndex = int(len(out)/2) mid = out[midIndex] autocor = out/mid if timelaggy>len(out)/2: autocor = autocor lags = np.linspace(-len(out)/2,len(out)/2,2*len(out)+1 ) else : autocor = autocor[midIndex-timelaggy:midIndex+timelaggy+1] lags = np.linspace(-timelaggy,timelaggy,2*timelaggy+1) return autocor,lags拟合模型似乎捕获了数据的一般周期性,并支持数据以1年的周期振荡的初步结论。要评估 1 个周期/12 个月的单一频率对观测时间序列的充分程度,请形成残差。如果残差类似于白噪声序列,则具有一个频率的简单线性模型已对时间序列进行了充分的建模。要评估残差,请使用白噪声具有 95% 置信区间的自相关序列。resid = ts - tsfitt resid = np.real(resid) xc,lags = xcorr(resid,resid,50) plt.stem(lags[50:len(lags)],xc[50:len(xc)]) lconf = -1.96*np.ones([51,1])/np.math.sqrt(72) uconf = 1.96*np.ones([51,1])/np.math.sqrt(72) plt.plot(lconf) plt.plot(uconf) plt.xlabel('Lag') plt.ylabel('Correlation Coefficient') plt.title('Autocorrelation of Residuals')自相关值在多个滞后处落在 95% 置信区界之外。残差似乎不是白噪声。结论是,具有一个正弦分量的简单线性模型不能解释意外死亡次数中的所有振荡。这是可以预料的,因为光谱分析揭示了除了主要振荡之外的其他周期性成分。创建包含光谱分析所指示的其他周期项的模型将改善拟合并美白残差。拟合由三个最大傅里叶系数大小组成的模型。由于必须保留对应于负频率和正频率的傅里叶系数,因此请保留最大的 6 个指数。tsfit2dft = np.zeros([72],dtype=complex) print(tsfit2dft.shape) print(tsdft.shape) I = np.argsort(np.abs(tsdft)) I = np.flipud(I) I = I[0:6] for i in I: tsfit2dft[i] = tsdft[i](72,) (72,)证明仅保留72个傅里叶系数(3个频率)中的6个可以保留大部分信号能量。首先,证明保留所有傅里叶系数会产生原始信号和傅里叶变换之间的能量等价性。np.linalg.norm(1/np.math.sqrt(72)*tsdft,2)/np.linalg.norm(ts-np.mean(ts),2)该比率为 1。现在,检查仅保留 3 个频率的能量比。np.linalg.norm(1/np.math.sqrt(72)*tsfit2dft,2)/np.linalg.norm(ts-np.mean(ts),2)几乎90%的能量被保留下来。等价地,时间序列方差的90%由3个频率分量占。根据 3 个频率分量形成数据估计值。比较原始数据、具有一个频率的模型和具有 3 个频率的模型。tsfit2 = mu + np.fft.ifft(tsfit2dft) plt.plot(years,np.real(ts),'b') plt.plot(years,np.real(ts),marker = 'o',color = 'blue') plt.plot(years,np.real(tsfitt).transpose(),color = 'red') plt.plot(years,np.real(tsfit2),color = 'orange') plt.xlabel("Year") plt.ylabel("Number of Accidental Deaths")使用3个频率提高了与原始信号的拟合度。您可以通过检查 3 频模型中残差的自相关来查看这一点。resid = ts - tsfit2 resid = np.real(resid) xc,lags = xcorr(resid,resid,50) plt.stem(lags[50:len(lags)],xc[50:len(xc)]) lconf = -1.96*np.ones([51,1])/np.math.sqrt(72) uconf = 1.96*np.ones([51,1])/np.math.sqrt(72) plt.plot(lconf) plt.plot(uconf) plt.xlabel('Lag') plt.ylabel('Correlation Coefficient') plt.title('Autocorrelation of Residuals')使用3个频率导致残差更接近白噪声过程。证明从傅里叶变换获得的参数值等效于时域线性回归模型。通过形成设计矩阵并求解法线方程,找到三个频率的总体均值、余弦幅度和正弦幅度的最小二乘估计值。将拟合时间序列与从傅里叶变换获得的时间序列进行比较。X = np.zeros([72,7]) X[:,0] = 1 X[:,1] = np.cos(2*np.pi/72*np.linspace(0,71,72)).transpose() X[:,2] = np.sin(2*np.pi/72*np.linspace(0,71,72)).transpose() X[:,3] = np.cos(2*np.pi*6/72*np.linspace(0,71,72)).transpose() X[:,4] = np.sin(2*np.pi*6/72*np.linspace(0,71,72)).transpose() X[:,5] = np.cos(2*np.pi*12/72*np.linspace(0,71,72)).transpose() X[:,6] = np.sin(2*np.pi*12/72*np.linspace(0,71,72)).transpose() print(X.shape) ts2 = np.zeros([72,1]) for i in range(len(ts)): ts2[i,0] = np.real(ts[i]) #ts = np.array(ts) print(ts2.shape) beta = np.linalg.lstsq(X,ts2) bete = np.array(beta[0]) bete.shape tsfit_lm = X.dot(bete) # print(tsfit_lm.shape) # print(tsfit2.shape) aa = np.abs(tsfit_lm.ravel()-np.real(tsfit2)) print(np.max(aa))1.2732925824820995e-11__Exercise 1__# Today is Saturday, the 5th of May 2018 and my name is Julia, living in [address], # and I'm currently fullfilling an nltk book assignment.__Exercise 2__# used in sense of 'no matter how': # “However beautiful the strategy, you should occasionally look at the results.” # “However bad you think you’re going to be in that room, not being there is worse.” # used as connector: # "However, they kept on, with unabated perseverance."__Exercise 3__# (Kim arrived) or (Dana left and everyone cheered). # (Kim arrived or Dana left) and everyone cheered. import nltk, pprint, re grammar = nltk.CFG.fromstring(""" S -> NP VP S -> S Conj S VP -> "arrived" | "left" | "cheered" NP -> "Kim" | "Dana" | "everyone" Conj -> "and" | "or" """) sr_parse = nltk.ShiftReduceParser(grammar, trace=2) sent = 'Kim arrived or Dana left and everyone cheered'.split() for tree in sr_parse.parse(sent): print tree grammar = nltk.PCFG.fromstring(""" S -> NP VP [0.6] S -> S Conj S [0.4] VP -> "arrived" | "left" | "cheered" [1.0] NP -> "Kim" | "Dana" | "everyone" [1.0] Conj -> "and" | "or" [1.0] """) viterbi_parse = nltk.ViterbiParser(grammar, trace=2) sent = 'Kim arrived or Dana left and everyone cheered'.split() for tree in viterbi_parse.parse(sent): print treeInserting tokens into the most likely constituents table... Insert: |=.......| Kim Insert: |.=......| arrived Insert: |..=.....| or Insert: |...=....| Dana Insert: |....=...| left Insert: |.....=..| and Insert: |......=.| everyone Insert: |.......=| cheered Finding the most likely constituents spanning 1 text elements... Insert: |=.......| NP -> 'Kim' [0] Insert: |.=......| VP -> 'arrived' [0] Insert: |..=.....| Conj -> 'or' [1.0] Insert: |...=....| NP -> 'Dana' [0] Insert: |....=...| VP -> 'left' [0] Insert: |.....=..| Conj -> 'and' [0] Insert: |......=.| NP -> 'everyone' [1.0] Insert: |.......=| VP -> 'cheered' [1.0] Finding the most likely constituents spanning 2 text elements... Insert: |==......| S -> NP VP [0.6] Insert: |...==...| S -> NP VP [0.6] Insert: |......==| S -> NP VP [0.6] Finding the most likely constituents spanning 3 text elements... Finding the most likely constituents spanning 4 text elements... Finding the m[...]__Exercise 4__from nltk import Tree help(Tree)Help on class Tree in module nltk.tree: class Tree(__builtin__.list) | A Tree represents a hierarchical grouping of leaves and subtrees. | For example, each constituent in a syntax tree is represented by a single Tree. | | A tree's children are encoded as a list of leaves and subtrees, | where a leaf is a basic (non-tree) value; and a subtree is a | nested Tree. | | >>> from nltk.tree import Tree | >>> print(Tree(1, [2, Tree(3, [4]), 5])) | (1 2 (3 4) 5) | >>> vp = Tree('VP', [Tree('V', ['saw']), | ... Tree('NP', ['him'])]) | >>> s = Tree('S', [Tree('NP', ['I']), vp]) | >>> print(s) | (S (NP I) (VP (V saw) (NP him))) | >>> print(s[1]) | (VP (V saw) (NP him)) | >>> print(s[1,1]) | (NP him) | >>> t = Tree.fromstring("(S (NP I) (VP (V saw) (NP him)))") | >>> s == t | True | >>> t[1][1].set_label('X') | >>> t[1][1].label() | 'X' | >>> print(t) [...]__Exercise 5__# a tree1 = Tree('NP', [Tree('JJ', ['old']), Tree('NP', [Tree('N', ['men']), Tree('Conj', ['and']), Tree('N', ['women'])])]) print(tree1) tree2 = Tree('NP', [Tree('NP', [Tree('JJ', ['old']), Tree('N', ['men'])]), Tree('Conj', ['and']), Tree('NP', ['women'])]) print(tree2) # b tree3 = Tree.fromstring("((S (NP I) (VP (VP (V shot) (NP (Det an) (N elephant))) (PP (P in) (NP (Det my) (N pajamas))))))") tree3.draw() # c tree4 = Tree('S', [Tree('NP', [Tree('Det', ['The']), Tree('N', ['woman'])]), Tree('VP', [Tree('V', ['saw']), Tree('NP', [Tree('Det', ['a']), Tree('N', ['man'])]), Tree('NP', [Tree('JJ', ['last']), Tree('N', ['Thursday'])])])]) print(tree4) tree4.draw()(S (NP (Det The) (N woman)) (VP (V saw) (NP (Det a) (N man)) (NP (JJ last) (N Thursday))))Pseudo-random number generators**** [](mailto:) Properties of PRNGs+ dimension of output - commonly 32 bits, but some have more + number of states - dimension of state space in bits - sometimes state = output, but better generators generally have output = f(state)+ period - maximum over initial states of the number of states visited before repeating - period ≤ number of states - if state has $s$ bits, period $\le 2^s$ - for some PRNGs, period is much less than number of states - for some seeds for some PRNGs, number of states visited is much less than period + $k$-distribution - suppose $\{X_i\}$ is sequence of $P$ $w$-bit integers - define $t_v(X_i)$ to be the first $v$ bits of $X_i$ - $\{X_i\}$ is $k$-distributed to $v$-bit accuracy if each of the $2^{kv}-1$ possible nonzero $kv$-bit vectors occurs equally often among the $P$ $kv$-bit vectors$$ (t_v(X_i),\,t_v(X_{i+1}), \ldots ,t_v(X_{i+k-1}))\quad (0\le ifrom __future__ import division %matplotlib inline import math import numpy as np import scipy as sp from scipy.misc import comb, factorial from scipy.optimize import brentq from scipy.stats import chisquare, norm import scipy.integrate from random import Random import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D # LCG; defaults to RANDU, a particularly bad choice class lcgRandom: # defaults to RANDU: BEWARE! def __init__(self, seed=1234567890, A=0, B=65539, M = 2**31): self.state = seed self.A = A self.B = B self.M = M def getState(self): return self.state, self.A, self.B, self.M def setState(self,seed=1234567890, A=0, B=65539, M = 2**31): self.state = seed self.A = A self.B = B self.M = M def nextRandom(self): self.state = (self.A + self.B * self.state) % self.M return self.state/self.M def random(self, size=None): # vector of rands if size==None: return self.nextRandom() else: return np.reshape(np.array([self.nextRandom() for i in np.arange(np.prod(size))]), size) def randint(self, low=0, high=None, size=None): # integer between low (inclusive) and high (exclusive) if high==None: # numpy.random.randint()-like behavior high, low = low, 0 if size==None: return low + np.floor(self.nextRandom()*(high-low)) # NOT AN ACCURATE ALGORITHM! See below. else: return low + np.floor(self.random(size=size)*(high-low)) # generate triples using RANDU reps = int(10**5) randu = lcgRandom(12345) xs = np.transpose(randu.random(size=(reps,3))) # plot the triples as points in R^3 fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(xs[0],xs[1], xs[2]) plt.rcParams['figure.figsize'] = (18.0, 18.0) ax.view_init(-100,110) plt.show()Wichmann-Hill (1982)Sum of 3 LCGs. Period is 6,953,607,871,644. def WH(s1, s2, s3): s1 = (171 * s1) % 30269 s2 = (172 * s2) % 30307 s3 = (170 * s3) % 30323 r = (s1/30269 + s2/30307 + s3/30323) % 1 return [r, s1, s2, s3] The right way, the wrong way, and the Microsoft way.WH generally not considered adequate for statistics, but was (nominally) the PRNG in Excel for severalgenerations. Excel did not allow the seed to be set, so analyses were not reproducible. ![mcCullough](./Figs/notWichmannHill08.png)., 2008. Microsoft Excel's 'Not The Wichmann–Hill' random number generators_Computational Statistics & Data Analysis_, _52_, 4587–4593doi:10.1016/j.csda.2008.03.006 Mersenne Twister (MT) Matsumoto & Nishimura (1997)+ example of "twisted generalized feedback shift register"+ period $2^{19937}-1$, a Mersenne Prime+ $k$-distributed to 32-bit accuracy for all $k \in \{1, \ldots, 623\}$. + passes DIEHARD and most of TestU01 (see below)+ standard in many packages: - GNU Octave, Maple, MATLAB, Mathematica, Python, R, Stata - Apache, CMU Common Lisp, Embeddable Common Lisp, Free Pascal, GLib, PHP, GAUSS, IDL, Julia, Ruby, SageMath, Steel Bank Common Lisp, Scilab, Stata, GNU Scientific Library, GNU Multiple Precision Arithmetic Library, Microsoft Visual C++. - SPSS and SAS offer MT, as does C++ (v11 and up)+ generally considered adequate for statistics (but not for cryptography); however, will trouble that in this work, esp. for "big data"+ usual implementation has 624-dimensional state space, but TinyMT uses only 127 bits+ seeding complicated, since state is an array+ can take a while to "burn in," especially for seeds with many zeros+ output for close seed states can be close+ 2002 update improves seeding+ completely predictable from 624 successive outputs+ problems discovered in 2007 (see TestU01, below)# Python implementation of MT19937 from Wikipedia # https://en.wikipedia.org/wiki/Mersenne_Twister#Python_implementation def _int32(x): # Get the 32 least significant bits. return int(0xFFFFFFFF & x) class MT19937: def __init__(self, seed): # Initialize the index to 0 self.index = 624 self.mt = [0] * 624 self.mt[0] = seed # Initialize the initial state to the seed for i in range(1, 624): self.mt[i] = _int32( 1812433253 * (self.mt[i - 1] ^ self.mt[i - 1] >> 30) + i) def extract_number(self): if self.index >= 624: self.twist() y = self.mt[self.index] # Right shift by 11 bits y = y ^ y >> 11 # Shift y left by 7 and take the bitwise and of 2636928640 y = y ^ y << 7 & 2636928640 # Shift y left by 15 and take the bitwise and of y and 4022730752 y = y ^ y << 15 & 4022730752 # Right shift by 18 bits y = y ^ y >> 18 self.index = self.index + 1 return _int32(y) def twist(self): for i in range(624): # Get the most significant bit and add it to the less significant # bits of the next number y = _int32((self.mt[i] & 0x80000000) + (self.mt[(i + 1) % 624] & 0x7fffffff)) self.mt[i] = self.mt[(i + 397) % 624] ^ y >> 1 if y % 2 != 0: self.mt[i] = self.mt[i] ^ 0x9908b0df self.index = 0Dot Product 구현 Use CPUimport numpy as np N = 10 a_mat = np.random.randint(5, size=[N, N]) b_mat = np.random.randint(5, size=[N, N]) ret_mat_cpu = np.dot(a_mat, b_mat) print("a_mat =") print(a_mat) print("\nb_mat =") print(b_mat) print("\nret_mat =") print(ret_mat_cpu)a_mat = [[1 4 0 4 1 4 2 2 0 3] [3 1 0 4 1 3 0 3 4 1] [2 1 1 3 3 0 1 2 3 2] [1 2 2 2 2 0 1 0 4 2] [2 4 1 1 1 0 0 3 2 2] [3 3 3 4 0 4 1 4 3 1] [3 0 3 4 4 0 3 4 2 3] [2 4 4 3 3 0 3 3 2 3] [4 3 2 2 3 4 2 0 0 1] [3 3 2 4 2 3 2 2 2 3]] b_mat = [[3 2 1 0 1 0 4 0 2 4] [3 0 2 2 4 4 1 0 2 3] [2 2 1 1 1 2 1 0 3 0] [2 2 0 2 4 1 3 3 2 1] [1 2 0 2 1 3 3 4 0 3] [1 1 4 1 1 3 3 3 3 2] [3 3 4 4 0 3 1 3 1 2] [4 1 4 1 1 4 0 2 4 1] [0 4 0 1 1 3 4 0 0 3] [2 1 4 1 1 1 3 3 1 3]] ret_mat = [[48 27 53 35 43 52 46 47 43 46] [38 39 33 23 35 45 56 34 38 46] [35 37 25 26 29 40 47 34 26 42] [26 35 19 24 27 37 43 23 19 38] [39 23 31 20 31 42 33 19 31 39] [57 44 52 33 46 63 58 38 59 51] [58 52 46 40 35 56 59 54 45 53] [62 47 50 44 45 66 54 45 48 56] [44 33 40 31 34 46 53 39 39 51] [55 44 51 38 45 58 63 48 48 58]]Use GPUimport pycuda.driver as cuda import pycuda.autoinit from pycuda import driver, compiler # Kernel code kernel_code = """ __constant__ int n; __global__ void mul(int* in_arr1, int* in_arr2, int* out_arr) { int col = threadIdx.x; int row = threadIdx.y; int sum = 0; if ( col < n && row < n ) { for ( int i=0 ; iimport re import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.neighbors import NearestNeighbors, KNeighborsClassifier from sklearn.naive_bayes import MultinomialNB from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score jobs = pd.read_csv('indeed_jobs.csv', sep='|') jobs.head() # As the data stands, the more unique job titles are # the most likely (intuitively) to not be tech related jobs['role'].value_counts(ascending=True)[11:31] # MEAT MANAGER! (not helpful)One of the main issues is that filtering by the most commonjob titles will not help the situation as too many titlesare unique. The graph below shows the amount of unique titles overwhelming common titles; duplicates are in the minority.plt.plot(jobs['role'].value_counts().values); two_hundred_jobs = jobs['role'].value_counts()[:200].index with open("jobs_200.txt", "w") as f: for job in two_hundred_jobs: f.write("{}\n".format(job)) with open("tech_jobs_edited.txt", "r") as fedit: valid_jobs = fedit.read() valid_jobs = valid_jobs.split('\n') def format_string(s): ptrn = r'[^a-zA-z]' new = re.sub(ptrn, ' ', s) new = new.replace(' ', ' ') new = new.strip().lower() return new valid_jobs = list(map(format_string, valid_jobs)) # vectorize job titles and fit nearest neighbors vectorizer = TfidfVectorizer(ngram_range=(1, 2), max_df=.50) jobs_transformed = vectorizer.fit_transform(valid_jobs) nn = NearestNeighbors(n_neighbors=1, algorithm="brute", metric="cosine") nn.fit(jobs_transformed) test_jobs = jobs['role'].value_counts(ascending=True)[:50].index test_jobs = test_jobs.to_list() zeros = np.zeros(len(test_jobs)) test_data = {'job': test_jobs, 'distance': zeros} test_df = pd.DataFrame(data=test_data) test_df['job'] = test_df['job'].apply(format_string) test_df.head() test_jobs_transformed = vectorizer.transform(test_df['job']) distances, indices = nn.kneighbors(test_jobs_transformed) test_df['distance'] = distances test_df['neighbor'] = list(map(lambda x: valid_jobs[x[0]], indices)) test_df[test_df['distance'] <= .20]As seen above, there are some jobs that pass through the filter with a distance threshold of 0.35, but not many (~12% of the job listings are not accurately designated as tech jobs). It's worth the time to see how this would affect the entire dataset:jobs['role_formatted'] = jobs['role'].apply(format_string) roles_transformed = vectorizer.transform(jobs['role_formatted']) distances, indices = nn.kneighbors(roles_transformed) jobs['distances'] = distances jobs.head() # how much data is retained when the jobs are filtered via distance? print(f"Data retained at .35: {jobs[jobs['distances'] < .35].shape[0]}") print(f"Data retained at .30: {jobs[jobs['distances'] < .30].shape[0]}") print(f"Data retained at .20: {jobs[jobs['distances'] < .20].shape[0]}") jobs = jobs[jobs['distances'] < .30] jobs['role'].value_counts()[:20] # The more unique roles were previously more likely to # be related to a field other than tech. Much improved! jobs['role'].value_counts(ascending=True)[:20]The figures above are an improvement from the retention seen in past attempts (trying to align the `search_field` column with job roles) and are more likely to produce a dataset where the jobs are _actually_ related to tech. Now, we can apply a process that I attemped earlier: organizing the actual job titles into uniform categories. The following section *Individual Jobs* is from the aforementioned previous attempt. The goal here is two merge the previous process with the following one to create a totally improved, albeit thinner dataset, more representative of the population of Indeed tech jobs Individual JobsAn explanation of what's going on here:>To answer the question of which jobs have more remote possibilities I'll take a look at the top 20 jobs. This will also help answer the question of average pay range per job. There is one issue, however: The `search_field` result column does not ensure that its value will be related to the `role` column. The job roles have a lot of variation in how they're named, and picking out groups manually would be an impractical waste of time. >The proposed method to alleviate this is to take a deterministic and a probabilistic model and use the text details of the job to classify the roles that are named with enough variation to make them appear to be different jobs. Here I will use a Multinomial Naive Bayes and a K-Nearest Neighbors to see which gets better accuracy. The winner will classify the job roles in a new column. Similarity score will also be stored. Scores below a threshold will be discarded.# First, some housekeeping: The software developer role and it's # corresponding search field have a spelling discrepancy jobs.loc[jobs['search_field'] == 'Software Developers', 'search_field'] = "Software Developer" # Now, the jobs where the role and search_field align exactly # will become the training data. This could be difficult to test # because some roles only have one example class_data = jobs[jobs['role'] == jobs['search_field']]Because some of the roles only have one example, I'll manually choose some of the search results that align with the `search_field` job title to rename according to the target job roles. This will make it easier to create a stratified train/test splitclass_data['role'].value_counts()[class_data['role'].value_counts() == 1] # create a new column that will be used as the new label jobs['job_label'] = jobs['role'] weban_data = jobs[jobs['search_field'] == 'Web Analytics Developer'][['role', 'details']].values weban_index = jobs[jobs['search_field'] == 'Web Analytics Developer'][['role', 'details']].index gamedev_data = jobs[jobs['search_field'] == 'Game Developer'][['role', 'details']].values gamedev_index = jobs[jobs['search_field'] == 'Game Developer'][['role', 'details']].index weban = list(zip(weban_data, weban_index)) gamedev = list(zip(gamedev_data, gamedev_index)) for value, index in weban: print(value[1], index, value[0]) print(">>>>>>>>>>>>>>>>>>>>>>") jobs.loc[1186][['role', 'details']] # create a dictionary to hold the indices of good matches for named roles job_label_dict = {'Game Developer':[243469, 243470, 243485, 243486, 243511, 243519], 'Web Analytics Developer': [1175, 1187, 1186, 1276, 4649, 4636]} for job_name, indices in job_label_dict.items(): for idx in indices: jobs.loc[idx, 'job_label'] = job_nameNow there should be enough data to split into train and testclass_data = jobs[jobs['job_label'] == jobs['search_field']][['job_label', 'details']]Creating Training DataNow the details from each job in the new training data need to be vectorized.class_data['job_label'].value_counts() # accuracy in predictions may increase if the job_label is adds to the description class_data['details'] = class_data['job_label'] + ' ' + class_data['details'] def format_text(series, inplace=False): """Format text in a way that optimizes vectorization""" if not inplace: series = series.copy() series = series.str.replace('[^a-zA-Z0-9]', ' ') series = series.str.replace('\s{2}', ' ') series = series.str.lower().str.strip() return series documents = format_text(class_data['details']) documents.iloc[3999] vectorizer = TfidfVectorizer(ngram_range=(1, 2), max_df=.80, min_df=10) le = LabelEncoder() le.fit(class_data['job_label']) X = vectorizer.fit_transform(documents) y = le.transform(class_data['job_label']) print(X.shape) print(y.shape) # split X into train and test X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.20, random_state=42, stratify=y) print(X_train.shape, X_test.shape) print(y_train.shape, y_test.shape) neighbors = KNeighborsClassifier(algorithm="brute", metric="cosine") neighbors.fit(X_train, y_train) n_bayes = MultinomialNB(alpha=0.1) n_bayes.fit(X_train, y_train) y_pred_neighbors = neighbors.predict(X_test) y_pred_n_bayes = n_bayes.predict(X_test) print(f"KNeighbors: {accuracy_score(y_test, y_pred_neighbors)}") print(f"MultinomialNB: {accuracy_score(y_test, y_pred_n_bayes)}") jobs['role_plus_details'] = jobs['role'] + ' ' + jobs['details'] jobs['role_plus_details'] = format_text(jobs['role_plus_details']) jobs[['role', 'search_field', 'role_plus_details']] desc = jobs['role_plus_details'].iloc[23094] print(desc) desc = vectorizer.transform([desc]) le.inverse_transform(n_bayes.predict(desc)) descriptions = jobs['role_plus_details'] descriptions = vectorizer.transform(descriptions) descriptions = n_bayes.predict(descriptions) jobs['predicted_role'] = le.inverse_transform(descriptions) # top ten before label prediction for rank, role in enumerate(jobs['role'].value_counts()[:10].index): print(f"{rank+1}. {role}") # top ten after label prediction for rank, role in enumerate(jobs['predicted_role'].value_counts()[:10].index): print(f"{rank+1}. {role}") jobs[jobs['predicted_role'] == "Data Scientist"][['role', 'predicted_role','search_field']].head() jobs[jobs['predicted_role'] == "Front-End Developer"][['role', 'predicted_role','search_field']].head() jobs[jobs['predicted_role'] == "UI Designer"][['role', 'predicted_role','search_field']].head(10) jobs[jobs['predicted_role'] == "Mobile App Developer"][['role', 'predicted_role','search_field']].head()As we can see from the examples above, the `search_field` column (created during scrape) did not always align with the actual role. After removing jobs that were not likely to be tech-related and categorizing them using the description snippets, we end up with data that, while not perfect, provides a far better representation of the job landscape.jobs.to_csv('jobs_cleaned.csv', sep='|', index=False)Tensor flow developmentimport numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from matplotlib.pyplot import rcParams import os from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV, cross_val_score from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score %matplotlib inline rcParams['figure.figsize'] = 10,8 sns.set(style='whitegrid', palette='muted', rc={'figure.figsize': (12,8)}) # Load data as Pandas dataframe. We concat the data to clean the data in both train and set train = pd.read_csv('/data/train.csv', ) test = pd.read_csv('/data/test.csv') df = pd.concat([train, test], axis=0, sort=True) df.head() def display_all(df): with pd.option_context("display.max_rows", 1000, "display.max_columns", 1000): display(df) display_all(df.describe(include='all').T) df['Survived'].value_counts() # create new Title column in order to ocalculate more appropriately the missing ages df['Title'] = df['Name'].str.extract('([A-Za-z]+)\.', expand=True) df.head() df['Title'].value_counts() # replace rare titles with more common ones mapping = {'Mlle': 'Miss', 'Major': 'Mr', 'Col': 'Mr', 'Sir': 'Mr', 'Don': 'Mr', 'Mme': 'Mrs', 'Jonkheer': 'Mr', 'Lady': 'Mrs', 'Capt': 'Mr', 'Countess': 'Mrs', 'Ms': 'Miss', 'Dona': 'Mrs'} df.replace({'Title': mapping}, inplace=True) # confirm that we are left with just six values df['Title'].value_counts() # impute missing Age values using median of Title groups title_ages = dict(df.groupby('Title')['Age'].median()) # create a column of the average ages df['age_med'] = df['Title'].apply(lambda x: title_ages[x]) # replace all missing ages with the value in this column df['Age'].fillna(df['age_med'], inplace=True, ) del df['age_med'] sns.barplot(x='Title', y='Age', data=df, estimator=np.median, ci=None, palette='Blues_d') plt.xticks(rotation=45) plt.show() sns.countplot(x='Title', data=df, palette='hls', hue='Survived') plt.xticks(rotation=45) plt.show() sns.swarmplot(x='Sex', y='Fare', hue='Survived', data=df) plt.show() # impute missing Fare values using median of Pclass groups class_fares = dict(df.groupby('Pclass')['Fare'].median()) # create a column of the average fares df['fare_med'] = df['Pclass'].apply(lambda x: class_fares[x]) # replace all missing fares with the value in this column df['Fare'].fillna(df['fare_med'], inplace=True, ) del df['fare_med'] sns.catplot(x='Embarked', y='Survived', data=df, kind='bar', palette='muted', ci=None) plt.show() # Need to know what is Backfill!!! df['Embarked'].fillna(method='backfill', inplace=True) df['Family_Size'] = df['Parch'] + df['SibSp'] display_all(df.describe(include='all').T) train = df[pd.notnull(df['Survived'])] test = df[pd.isnull(df['Survived'])] train.to_csv('/data/train_clean.csv', index=False) test.to_csv('/data/test_clean.csv', index=False)循环神经网络:label:`sec_rnn`在 :numref:`sec_language_model`中,我们介绍了$n$元语法模型,其中单词$x_t$在时间步$t$的条件概率仅取决于前面$n-1$个单词。对于时间步$t-(n-1)$之前的单词,如果我们想将其可能产生的影响合并到$x_t$上,需要增加$n$,然而模型参数的数量也会随之呈指数增长,因为词表$\mathcal{V}$需要存储$|\mathcal{V}|^n$个数字,因此与其将$P(x_t \mid x_{t-1}, \ldots, x_{t-n+1})$模型化,不如使用隐变量模型:$$P(x_t \mid x_{t-1}, \ldots, x_1) \approx P(x_t \mid h_{t-1}),$$其中$h_{t-1}$是*隐状态*(hidden state),也称为*隐藏变量*(hidden variable),它存储了到时间步$t-1$的序列信息。通常,我们可以基于当前输入$x_{t}$和先前隐状态$h_{t-1}$来计算时间步$t$处的任何时间的隐状态:$$h_t = f(x_{t}, h_{t-1}).$$:eqlabel:`eq_ht_xt`对于 :eqref:`eq_ht_xt`中的函数$f$,隐变量模型不是近似值。毕竟$h_t$是可以仅仅存储到目前为止观察到的所有数据,然而这样的操作可能会使计算和存储的代价都变得昂贵。回想一下,我们在 :numref:`chap_perceptrons`中讨论过的具有隐藏单元的隐藏层。值得注意的是,隐藏层和隐状态指的是两个截然不同的概念。如上所述,隐藏层是在从输入到输出的路径上(以观测角度来理解)的隐藏的层,而隐状态则是在给定步骤所做的任何事情(以技术角度来定义)的*输入*,并且这些状态只能通过先前时间步的数据来计算。*循环神经网络*(recurrent neural networks,RNNs)是具有隐状态的神经网络。在介绍循环神经网络模型之前,我们首先回顾 :numref:`sec_mlp`中介绍的多层感知机模型。 无隐状态的神经网络让我们来看一看只有单隐藏层的多层感知机。设隐藏层的激活函数为$\phi$,给定一个小批量样本$\mathbf{X} \in \mathbb{R}^{n \times d}$,其中批量大小为$n$,输入维度为$d$,则隐藏层的输出$\mathbf{H} \in \mathbb{R}^{n \times h}$通过下式计算:$$\mathbf{H} = \phi(\mathbf{X} \mathbf{W}_{xh} + \mathbf{b}_h).$$:eqlabel:`rnn_h_without_state`在 :eqref:`rnn_h_without_state`中,我们拥有的隐藏层权重参数为$\mathbf{W}_{xh} \in \mathbb{R}^{d \times h}$,偏置参数为$\mathbf{b}_h \in \mathbb{R}^{1 \times h}$,以及隐藏单元的数目为$h$。因此求和时可以应用广播机制(见 :numref:`subsec_broadcasting`)。接下来,将隐藏变量$\mathbf{H}$用作输出层的输入。输出层由下式给出:$$\mathbf{O} = \mathbf{H} \mathbf{W}_{hq} + \mathbf{b}_q,$$其中,$\mathbf{O} \in \mathbb{R}^{n \times q}$是输出变量,$\mathbf{W}_{hq} \in \mathbb{R}^{h \times q}$是权重参数,$\mathbf{b}_q \in \mathbb{R}^{1 \times q}$是输出层的偏置参数。如果是分类问题,我们可以用$\text{softmax}(\mathbf{O})$来计算输出类别的概率分布。这完全类似于之前在 :numref:`sec_sequence`中解决的回归问题,因此我们省略了细节。无须多言,只要可以随机选择“特征-标签”对,并且通过自动微分和随机梯度下降能够学习网络参数就可以了。 有隐状态的循环神经网络:label:`subsec_rnn_w_hidden_states`有了隐状态后,情况就完全不同了。假设我们在时间步$t$有小批量输入$\mathbf{X}_t \in \mathbb{R}^{n \times d}$。换言之,对于$n$个序列样本的小批量,$\mathbf{X}_t$的每一行对应于来自该序列的时间步$t$处的一个样本。接下来,用$\mathbf{H}_t \in \mathbb{R}^{n \times h}$表示时间步$t$的隐藏变量。与多层感知机不同的是,我们在这里保存了前一个时间步的隐藏变量$\mathbf{H}_{t-1}$,并引入了一个新的权重参数$\mathbf{W}_{hh} \in \mathbb{R}^{h \times h}$,来描述如何在当前时间步中使用前一个时间步的隐藏变量。具体地说,当前时间步隐藏变量由当前时间步的输入与前一个时间步的隐藏变量一起计算得出:$$\mathbf{H}_t = \phi(\mathbf{X}_t \mathbf{W}_{xh} + \mathbf{H}_{t-1} \mathbf{W}_{hh} + \mathbf{b}_h).$$:eqlabel:`rnn_h_with_state`与 :eqref:`rnn_h_without_state`相比, :eqref:`rnn_h_with_state`多添加了一项$\mathbf{H}_{t-1} \mathbf{W}_{hh}$,从而实例化了 :eqref:`eq_ht_xt`。从相邻时间步的隐藏变量$\mathbf{H}_t$和$\mathbf{H}_{t-1}$之间的关系可知,这些变量捕获并保留了序列直到其当前时间步的历史信息,就如当前时间步下神经网络的状态或记忆,因此这样的隐藏变量被称为*隐状态*(hidden state)。由于在当前时间步中,隐状态使用的定义与前一个时间步中使用的定义相同,因此 :eqref:`rnn_h_with_state`的计算是*循环的*(recurrent)。于是基于循环计算的隐状态神经网络被命名为*循环神经网络*(recurrent neural network)。在循环神经网络中执行 :eqref:`rnn_h_with_state`计算的层称为*循环层*(recurrent layer)。有许多不同的方法可以构建循环神经网络,由 :eqref:`rnn_h_with_state`定义的隐状态的循环神经网络是非常常见的一种。对于时间步$t$,输出层的输出类似于多层感知机中的计算:$$\mathbf{O}_t = \mathbf{H}_t \mathbf{W}_{hq} + \mathbf{b}_q.$$循环神经网络的参数包括隐藏层的权重$\mathbf{W}_{xh} \in \mathbb{R}^{d \times h}, \mathbf{W}_{hh} \in \mathbb{R}^{h \times h}$和偏置$\mathbf{b}_h \in \mathbb{R}^{1 \times h}$,以及输出层的权重$\mathbf{W}_{hq} \in \mathbb{R}^{h \times q}$和偏置$\mathbf{b}_q \in \mathbb{R}^{1 \times q}$。值得一提的是,即使在不同的时间步,循环神经网络也总是使用这些模型参数。因此,循环神经网络的参数开销不会随着时间步的增加而增加。 :numref:`fig_rnn`展示了循环神经网络在三个相邻时间步的计算逻辑。在任意时间步$t$,隐状态的计算可以被视为:1. 拼接当前时间步$t$的输入$\mathbf{X}_t$和前一时间步$t-1$的隐状态$\mathbf{H}_{t-1}$;1. 将拼接的结果送入带有激活函数$\phi$的全连接层。 全连接层的输出是当前时间步$t$的隐状态$\mathbf{H}_t$。 在本例中,模型参数是$\mathbf{W}_{xh}$和$\mathbf{W}_{hh}$的拼接,以及$\mathbf{b}_h$的偏置,所有这些参数都来自 :eqref:`rnn_h_with_state`。当前时间步$t$的隐状态$\mathbf{H}_t$将参与计算下一时间步$t+1$的隐状态$\mathbf{H}_{t+1}$。而且$\mathbf{H}_t$还将送入全连接输出层,用于计算当前时间步$t$的输出$\mathbf{O}_t$。![具有隐状态的循环神经网络](../img/rnn.svg):label:`fig_rnn`我们刚才提到,隐状态中$\mathbf{X}_t \mathbf{W}_{xh} + \mathbf{H}_{t-1} \mathbf{W}_{hh}$的计算,相当于$\mathbf{X}_t$和$\mathbf{H}_{t-1}$的拼接与$\mathbf{W}_{xh}$和$\mathbf{W}_{hh}$的拼接的矩阵乘法。虽然这个性质可以通过数学证明,但在下面我们使用一个简单的代码来说明一下。首先,我们定义矩阵`X`、`W_xh`、`H`和`W_hh`,它们的形状分别为$(3,1)$、$(1,4)$、$(3,4)$和$(4,4)$。分别将`X`乘以`W_xh`,将`H`乘以`W_hh`,然后将这两个乘法相加,我们得到一个形状为$(3,4)$的矩阵。import torch from d2l import torch as d2l X, W_xh = torch.normal(0, 1, (3, 1)), torch.normal(0, 1, (1, 4)) H, W_hh = torch.normal(0, 1, (3, 4)), torch.normal(0, 1, (4, 4)) torch.matmul(X, W_xh) + torch.matmul(H, W_hh)现在,我们沿列(轴1)拼接矩阵`X`和`H`,沿行(轴0)拼接矩阵`W_xh`和`W_hh`。这两个拼接分别产生形状$(3, 5)$和形状$(5, 4)$的矩阵。再将这两个拼接的矩阵相乘,我们得到与上面相同形状$(3, 4)$的输出矩阵。torch.matmul(torch.cat((X, H), 1), torch.cat((W_xh, W_hh), 0))[LEGALST-123] Lab 18: Regular ExpressionsThis lab will cover the basics of regular expression: finding, extracting and manipulating pieces of text based on specific patterns within strings.*Estimated Time: 45 minutes* Table of Contents[The Data](section data)[Overview](section context)0- [Matching with Regular Expressions](section 0)1 - [Introduction to Essential RegEx](section 1)       1 - [Special Characters](subsection 1)       2 - [Quantifiers](subsection 2)       3 - [Sets](subsection 3)       4 - [Special Sequences](subsection 4)       5 - [Groups and Logical OR](subsection 4)2- [Python RegEx Methods](section 2)3 - [Valuation Extraction](section 3) The Data You will again be working with the Old Bailey data set to practice matching and manipulating pieces of the textual data. Overview Regular Expressions operations ("RegEx") are a very flexible version of the text search function that you find in most text processing software. In those regular search functions, you press `ctrl+F` (or `command+F`) and type in the search phrase you are looking for e.g. "Congress". If your software finds an exact match for your search phrase ("Congress"), it jumps to its position in the text and you can take it from there.Thinking a bit more abstractly about this, "Congress" is nothing else than a very specific search. In it, we ask the search function to report the position where it finds a capital "C" followed seven lower case letters ("o", "n", "g", "r", "e","s","s"), all in a specific order. Depending on your text, it may have been sufficient to let your search function look for all words starting with the captial letter "C", or for those words starting with "C" and ending with "ess". This kind of flexibility is exactly what RegEx provides.RegEx is more flexible than the customary search function as it does not restrict you to spell out the literal word, number or phrase you are looking for. Rather, in RegEx you can describe the necessary characteristics for a match. You can enter these characteristics based on rules and special characters that make RegEx what it is.Regular expressions are useful in a variety of applications, and can be used in different programs and programming languages. We will start by learning the general components of regular expressions, using a simple online tool, Regex101. Then at the end of the workshop, we'll learn how to use regular expressions to conduct devaluation exploration on the Old Bailey dataset - we will look at how often plaintiffs had the amount they were charged with stealing reduced when they were sentenced by matching valuations in the text such as 'value 8s 6p'.__IT IS IMPORTANT to have an experimental mindset as you go through today's practice problems.__ Practice and curiosity are the keys to success! Each indiviual character expression may output a simple pattern, but you will need to explore different combinations to match more and more complicated sets of strings. Feel free to go beyond what the questions ask and test different expressions as you work through this notebook.__Dependencies__: Run the cell below. We will go over what this python library does in the Python Methods section of this lab.import re--- Introduction to Essential RegEx 0. Matching with Regular Expressions Before we dive into the different character expressions and their meanings, let's explore what it looks like to match some basic expressions. Open up [Regex101](https://regex101.com/r/Una9U7/4), an online Python regular expression editor. This editor will allow us to input any test string and practice using regular expressions while receiving verification and tips in real-time. There should already be an excerpt from the Old Bailey Set (edited, for the sake of practice problems) in the `Test String` box.You can think of the `Regular Expression` field like the familiar `ctrl+F` search box.Try typing in the following, one at a time, to the `Regular Expression` field:~~~ {.input}1. lowercase letter: d2. uppercase letter: D3. the word: lady4. the word: Lady5. the word: our6. the word: Our7. a single space8. a single period~~~__Question 1:__ What do you notice?__Your Answer:__ *Write your Answer Here:* Note that:1. RegEx is case sensitive: it matches _exactly_ what you tell it to match.2. RegEx looks for the exact order of each character you input into the expression. In the entire text, it found 'our' in 'Hon`our`able' and 'F`our`score'. However, nowhere in the text was there the exact sequence of letters O-u-r starting with a capital 'O', so 'Our' doesn't match anything.3. The space character ` ` highlights all the single spaces in the text.4. the period character `.` matches all the characters in the text, not just the periods... why?This last question takes us now to what is called __special characters__. --- 1. Special Characters Strings are composed of characters, and we are writing patterns to match specific sequences of characters.Various characters have special meaning in regular expressions. When we use these characters in an expression,we aren't matching the identical character, we're using the character as a placeholder for some other character(s)or part(s) of a string.~~~ {.input}. any single character except newline character^ start of string$ end of entire string\n new line\r carriage return\t tab~~~Note: if you want to actually match a character that happens to be a special character, you have to escape it with a backslash`\`.__Question 2:__ Try typing the following special characters into the `Regular Expression` field on the same Regex101 site. What happenswhen you type:1. `Samuel` vs. `^Samuel` vs. `Samuel$`?2. `.` vs. `\.`3. `the` vs. `th.` vs. `th..` ?__Your Answer:__ *Write your Answer Here*:1.2.3. --- 2. QuantifiersSome special characters refer to optional characters, to a specific number of characters, or to an open-endednumber of characters matching the preceding pattern.~~~ {.input}* 0 or more of the preceding character/expression+ 1 or more of the preceding character/expression? 0 or 1 of the preceding character/expression{n} n copies of the preceding character/expression {n,m} n to m copies of the preceding character/expression ~~~__Question 3:__ For this question, click [here](https://regex101.com/r/ssAUXx/1) to open another Regex101 page.What do the expressions `of`, `of*`, `of+`, `of?`, `of{1}`, `of{1,2}` match? Remember that the quantifier only applies to the character *immediately* preceding it. For example, the `*` in `of*` applies only to the `f`, so the expression looks for a pattern starting with __exactly one__ `o` and __0 or more__ `f`'s.__Your Answer:__ *Write your answer here:* --- 3. SetsA set by itself is merely a __collection__ of characters the computer may choose from to match a __single__ character in a pattern. We can define these sets of characters using `square brackets []`.Within a set of square brackets, you may list characters individually, e.g. `[aeiou]`, or in a range, e.g. `[A-Z]` (note that all regular expressions are case sensitive). You can also create a complement set by excluding certain characters, using `^` as the first characterin the set. The set `[^A-Za-z]` will match any character except a letter. All other special characters loosetheir special meaning inside a set, so the set `[.?]` will look for a literal period or question mark.The set will match only one character contained within that set, so to find sequences of multiple characters fromthe same set, use a quantifier like `+` or a specific number or number range `{n,m}`.~~~ {.input}[0-9] any numeric character[a-z] any lowercase alphabetic character[A-Z] any uppercase alphabetic character[aeiou] any vowel (i.e. any character within the brackets)[0-9a-z] to combine sets, list them one after another [^...] exclude specific characters~~~__Question 4:__ Let's switch back to the excerpt from the Old Bailey data set (link [here](https://regex101.com/r/Una9U7/2) for convenience). Can you write a regular expression that matches __all consonants__ in the text string? __Your Answer:__# YOUR EXPRESSION HERE--- 4. Special sequencesIf we want to define a set of all 26 characters of the alphabet, we would have to write an extremely long expression inside a square bracket. Fortunately, there are several special characters that denote special sequences. These begin with a `\` followed by a letter.Note that the uppercase version is usually the complement of the lowercase version.~~~ {.input}\d Any digit\D Any non-digit character\w Any alphanumeric character [0-9a-zA-Z_] \W Any non-alphanumeric character\s Any whitespace (space, tab, new line)\S Any non-whitespace character\b Matches the beginning or end of a word (does not consume a character)\B Matches only when the position is not the beginning or end of a word (does not consume a character)~~~__Question 5:__ Write a regular expression that matches all numbers (without punctuation marks or spaces) in the Old Bailey excerpt. Make sure you are matching whole numbers (i.e. `250`) as opposed to individual digits within the number (i.e. `2`, `5`, `0`).__Your Answer:__# YOUR EXPRESSION HERE__Question 6:__ Write a regular expression that matches all patterns with __at least__ 2 and __at most__ 3 digit and/or white space characters in the Old Bailey excerpt.__Your Answer:__#YOUR EXPRESSION HERE--- 5. Groups and Logical ORParentheses are used to designate groups of characters, to aid in logical conditions, and to be able to retrieve thecontents of certain groups separately.The pipe character `|` serves as a logical OR operator, to match the expression before or after the pipe. Group parenthesescan be used to indicate which elements of the expression are being operated on by the `|`.~~~ {.input}| Logical OR opeator(...) Matches whatever regular expression is inside the parentheses, and notes the start and end of a group(this|that) Matches the expression "this" or the expression "that"~~~__Question 7:__ Write an expression that matches groups of `Samuel` or `Prisoner` in the Old Bailey excerpt.__Your Answer:__# YOUR EXPRESSION HERE--- Python RegEx Methods So how do we actually use RegEx for analysis in Python?Python has a RegEx library called `re` that contains various methods so we can manipulate text using RegEx. The following are some useful Python Methods we may use for text analysis:- ``.findall(pattern, string)``: Checks whether your pattern appears somewhere inside your text (including the start). If so, it returns all phrases that matched your pattern, but not their position.- ``.sub(pattern, repl, string)``: Return the string obtained by replacing the leftmost non-overlapping occurrences of pattern in string by the replacement repl.- ``.split(pattern, string)``: Split string by the occurrences of pattern. If capturing parentheses are used in pattern, then the text of all groups in the pattern are also returned as part of the resulting list.We will only be using the `.findall()` method for the purposes of today's lab, so don't worry if the functionality of each method isn't clear right now. If you are curious about all the module content within the `re` library, take a look at the [documentation for `re`](https://docs.python.org/2/library/re.html) on your own time! --- Extracting Valuation from Old Bailey Let's apply our new RegEx knowledge to extract all valuation information from the text!The next cell simply assigns a long string containing three separate theft cases to a variable called `old_bailey`. Within the text are valuations which indicate the worth of the items stolen. We will use this string, what we can observe about the format of valuation notes in the text, and what we just learned about regular expressions to __find all instances of valuations in the text__. Valuations will look something like: `val. 4 s. 6 d.`*Note:* British Currency before 1971 was divided into pounds (`l`), shillings (`s`), and pennies (`d`) - that's what the letters after the values represent. We want to make sure to keep the values and units together when extracting valuations.__STEP 1__: We will first write expression(s) that will match the valuations.Take a moment to look for a pattern you notice across the valuations:old_bailey = """", of the Parish of St. James Westminster, was indicted for feloniously Stealing 58 Diamonds set in Silver gilt, value 250 l. the Goods of the Honourable Catherine Lady Herbert, on the 28th of July last. It appeared that the Jewels were put up in a Closet, which was lockt, and the Prisoner being a Coachman in the House, took his opportunity to take them; the Lady, when missing them, offered a Reward of Fourscore Pounds to any that could give any notice of it; upon enquiry, the Lady heard that a Diamond was sold on London-Bridge, and they described the Prisoner who sold it, and pursuing him, found the Prisoner at East-Ham, with all his Goods bundled up ready to be gone, and in his Trunk found all the Diamonds but one, which was found upon him in the Role of his Stocking, when searcht before the Justice. He denied the Fact, saying, He found them upon a great Heap of Rubbish, but could not prove it; and that being but a weak Excuse, the Jury found him guilty. , was indicted for stealing eleven crown pieces, twenty four half crowns, one Spanish piece, val. 4 s. 6 d. one silk purse, and 4 s. 6 d. in silver, the goods of Ann Kempster, in the dwelling house of . December 17. Acquitted. He was a second time indicted for stealing one pair of stockings, val. 6 d. the goods of . GEORGE MORGAN was indicted for that he, about the hour of ten in the night of the 10th of December , being in the dwelling-house of , feloniously did steal two hundred and three copper halfpence, five china bowls, value 30s. a tea-caddie, value 5s. a pound of green tea, value 8s. four glass rummers, value 2s. and a wooden drawer, called a till, value 6d. the property of the said George, and that he having committed the said felony about the hour of twelve at night, burglariously did break the dwelling-house of the said George to get out of the same."""You might notice that there are multiple ways in which valuations are noted. It can take the form:~~~ {.input}value 30s.val. 6 d.4 s. 6 d.~~~...and so on.Fortunately, we only care about the values and the associaed units, so the ommission or abbreviation of the word `value` can be ignored - we only care about:~~~ {.input}30s.6 d.4 s. 6 d.~~~Unfortunately, we can see that the format is still not consistent. The first one has no space between the number and unit, but the second and third do. The first and second have a single number and unit, but the third has two of each.How might you write an expression that would account for the variations in how valuations are written? Can you write a single regular expression that would match all the different forms of valuations exactly? Or do we need to have a few different expressions to account for these differnces, look for each pattern individually, and combine them somehow in the end?Real data is messy. When manipulating real data, you will inevitably encounter inconsistencies and you will need to ask yourself questions such as the above. You will have to figure out how to clean and/or work with the mess. With that in mind, click [here](https://regex101.com/r/2lal6d/1) to open up a new Regex101 with `old_bailey` already in the Test String. We will compose a regular expression, in three parts, that will account for all forms of valuations in the string above.__PART 1: Write an expression__ that matches __all__ valuations of the form `30s.` AND `6 d.`, but does not match _anything else_ (e.g. your expression should not match any dates). Try not to look at the hints on your first attempt! Save this expression __as a string__ in `exp1`._Hint1:_ Notice the structure of valuations. It begins with a number, then an _optional_ space, then a single letter followed by a period._Hint2:_ What _quantifier_ allows you to choose _0 or more of the previous character_?_Hint3:_ If you are still stuck, look back to the practice problems and see that we've explored/written expressions to match all components of this expression! It's just a matter of putting it together.#Your Expression Here exp1 =__PART 2:__ For the third case we found above, there are multiple values and units in the valuation. What can you add to what you came up with above so that we have another expression that matches this specific case? Save this expression as a string in `exp2`.#Your Expression Here exp2 = ...__PART 3:__ Now that you have expressions that account for the different valuation formats, combine it into one long expression that looks for (_hint_) one expression __OR__ the other. Set this expression to `final`. Be careful about the order in which you ask the computer to look for patterns (i.e. should it look for the shorter expression first, or the longer expression first?). Save this final expression as a string in `final`.#Your Expression Here final =__STEP 2:__ Now that you have the right regular expression that would match our valuations, how would you use it to _extract_ all instances of valuations from the text saved in `old_bailey`?Remember, you need to input your regular expression as a __string__ into the method.#Your Expression HereMultilayer PerceptronsMLPs aka (vanilla) feed-forward neural networks, or sometimes just neural networks1. MLPs can be viewed as generalizations of linear models that perform multiple stages of processing to come to a decision1. In an MLP weighted sums are computed multiple times from input to yield output1. Computing a series of weighted sums is mathematically the same as computing just one weighted sum, so to make this model truly more powerful than a linear model a nonlinear function is applied to the result: relu or tanh1. Data must be properly scaled before training**Disadvantages**1. Large Neural Networks take long to train1. They also require careful preprocessing of the data1. Similarly to SVMs, they work best with “homogeneous” data (Else we must use Tree based models)1. Tuning neural network parameters is also an art unto itself> http://scikit-learn.org/stable/modules/neural_networks_supervised.htmlimport pandas as pd from sklearn.neural_network import MLPClassifier from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.metrics import accuracy_score import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline sns.set() iris_df = pd.read_csv('../data/iris.csv', dtype = {'species': 'category'}) iris_df.head(3) X = iris_df.iloc[:, :-1] y = iris_df.species X_train, X_test, y_train, y_test = train_test_split(X, y, stratify = y) scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) mlp = MLPClassifier(solver = "lbfgs", max_iter = 10000, hidden_layer_sizes = (10, 100, 200), alpha = 10).fit(X_train, y_train) mlp y_pred = mlp.predict(X_test) y_pred accuracy_score(y_test, y_pred) accuracy_score(y_train, mlp.predict(X_train)) sns.set_context('talk') plt.figure(figsize = (15, 5)) sns.heatmap(mlp.coefs_[0], cmap = 'viridis') # Heat Map of First Layer plt.title("Heat Map of First Layer Weights") plt.yticks([0.5, 1.5, 2.5, 3.5], iris_df.columns, rotation = 0) plt.xlabel("Columns in Weight Matrix") plt.ylabel("Input features") plt.show()Q.2def prime(num): if num%2==0: print(num) lst = list(range(1,2500)) lst_even = filter(prime,lst) print(list(lst_even))2 4 6 8 10 12 14 16 18 20 22 24 26 28 30 32 34 36 38 40 42 44 46 48 50 52 54 56 58 60 62 64 66 68 70 72 74 76 78 80 82 84 86 88 90 92 94 96 98 100 102 104 106 108 110 112 114 116 118 120 122 124 126 128 130 132 134 136 138 140 142 144 146 148 150 152 154 156 158 160 162 164 166 168 170 172 174 176 178 180 182 184 186 188 190 192 194 196 198 200 202 204 206 208 210 212 214 216 218 220 222 224 226 228 230 232 234 236 238 240 242 244 246 248 250 252 254 256 258 260 262 264 266 268 270 272 274 276 278 280 282 284 286 288 290 292 294 296 298 300 302 304 306 308 310 312 314 316 318 320 322 324 326 328 330 332 334 336 338 340 342 344 346 348 350 352 354 356 358 360 362 364 366 368 370 372 374 376 378 380 382 384 386 388 390 392 394 396 398 400 402 404 406 408 410 412 414 416 418 420 422 424 426 428 430 432 434 436 438 440 442 444 446 448 450 452 454 456 458 460 462 464 466 468 470 472 474 476 478 480 482 484 486 488 490 492 494 496 498 500 502 504 506 508 510 512 514 516 518 520 522 524 526 5[...]Q.3a=["","i am in mumbai"] result= list(map(lambda z:z.capitalize(),a)) print(result)['', 'I am in mumbai']Scikit-Learn Hierarchical Clustering Using MALL_CUSTOMERS_VIEW from DWC. This view has 200 records Install fedml_gcp packagepip install fedml_gcpImport Librariesfrom fedml_gcp import DwcGCP import numpy as np import pandas as pdCreate DwcGCP Instance to access class methods and train model It is expected that the bucket name passed here already exists in Cloud Storage.dwc = DwcGCP(project_name='example-project', bucket_name='')Create tar bundle of script folder so GCP can use it for trainingBefore running this cell, please ensure that the script package has all the necessary files for a training job.dwc.make_tar_bundle('HierarchicalClustering.tar.gz', 'HierarchicalClustering', 'hc/train/HierarchicalClustering.tar.gz')Create tar bundle of predictor script folder so GCP can use it for inferencingBefore running this cell, please ensure that the predictor package has all the necessary files for a training job.dwc.make_tar_bundle('HierarchicalClusteringPredictor.tar.gz', 'HierarchicalClusteringPredictor', 'hc/prediction/HierarchicalClusteringPredictor.tar.gz')Train Model GCP takes in training inputs that are specific to the training job and the environment needed.In the training inputs, we are the python module. This is the module that your script package is named, and it references the task.py file inside the script package.We are also passing args which hold the table name to get data from. Before running the following cell, you should have a config.json file in the script package with the specified values to allow you to access to DWC.You should also have the follow view `MALL_CUSTOMERS_VIEW` created in your DWC. To gather this data, please refer to https://www.kaggle.com/roshansharma/mall-customers-clustering-analysis/datatraining_inputs = { 'scaleTier': 'BASIC', 'packageUris': ['gs:///hc/train/HierarchicalClustering.tar.gz'], 'pythonModule': 'trainer.task', 'args': ['--table_name', 'MALL_CUSTOMERS_VIEW', '--table_size', '1', '--bucket_name', ''], 'region': 'us-east1', 'jobDir': 'gs://', 'runtimeVersion': '2.5', 'pythonVersion': '3.7', 'scheduling': {'maxWaitTime': '3600s', 'maxRunningTime': '7200s'} } dwc.train_model('h_clustering_final_train2', training_inputs)Deploy modeldwc.deploy(model_name='', model_location='/hc/model/', version='v1', region='us-east1', prediction_location='hc/prediction/', custom_predict='HierarchicalClusteringPredictor.tar.gz', module_name='predictor.MyPredictor')Load the datasetimport pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns pd.set_option('display.max_columns',None) from sklearn.preprocessing import LabelEncoder from sklearn.ensemble import AdaBoostClassifier from xgboost import XGBClassifier from sklearn.model_selection import GridSearchCV, RandomizedSearchCV from sklearn.metrics import roc_auc_score,accuracy_score,classification_report,roc_curve,confusion_matrix from sklearn.model_selection import train_test_split import warnings warnings.filterwarnings('ignore') #pd.set_option('max_rows', 6000)Problem StatementUsing the method of Boosting, classify whether or not the customer will churn```Customer churn, also known as customer attrition, customer turnover, or customer defection, is the loss of clients or customers.Telephone service companies, Internet service providers, pay-TV companies, insurance firms, and alarm monitoring services, often use customer attrition analysis and customer attrition rates as one of their key business metrics because the cost of retaining an existing customer is far less than acquiring a new one.Predictive analytics use churn prediction models that predict customer churn by assessing their propensity of risk to churn.Since these models generate a small prioritized list of potential defectors, they are effective at focusing customer retention marketing programs on the subset of the customer base who are most vulnerable to churn.For this project, we will be exploring the dataset of a telecom company and try to predict the customer churn```#Feature Description feat_desc=pd.read_excel('/content/drive/MyDrive/Telecom Churn Prediction with Boosting/Telecom Churn Prediction_Data_Dictionary.xlsx') feat_descLoad the Dataset# read the dataset and extract the test and train data separately df_train=pd.read_csv('/content/drive/MyDrive/Telecom Churn Prediction with Boosting/train.csv') df_test=pd.read_csv('/content/drive/MyDrive/Telecom Churn Prediction with Boosting/test.csv') #Dropping df_train Id column : train_id train_id = df_train['Id'] df_train.drop(['Id'], axis=1, inplace=True) #First 5 rows df_train.head() # Data shape and columns print(df_train.shape) print(df_train.columns) #Features Info df_train.info() # Describe data df_train.describe() #Describing data - 'object' df_train.describe(include='object')Visualize the data and preprocess#Replacing empty value in TotalCharges df_train['TotalCharges'].replace(' ',np.NaN, inplace=True) df_test['TotalCharges'].replace(' ',np.NaN, inplace=True) #Total Charges to float df_train['TotalCharges']=df_train['TotalCharges'].astype(float) df_test['TotalCharges']=df_test['TotalCharges'].astype(float) # 9 Null values in TotalCharges df_train.isnull().sum() #Filling missing values in TotalCharges df_train['TotalCharges'].fillna(df_train['TotalCharges'].mean(),inplace=True) df_test['TotalCharges'].fillna(df_test['TotalCharges'].mean(), inplace=True) # TotalCharges sns.distplot(df_train['TotalCharges']) #Target - Churn sns.countplot(x='Churn', data=df_train) #Obs: Imbalanced Classes # Encoding categorical variables encoder=LabelEncoder() #Encoding Training data df_train['Churn'] = encoder.fit_transform(df_train['Churn']) col=['gender','Partner','Dependents','PhoneService','MultipleLines','InternetService','OnlineSecurity','OnlineBackup','DeviceProtection', 'TechSupport','StreamingTV','StreamingMovies','Contract','PaperlessBilling','PaymentMethod'] for i in col: df_train[i] = encoder.fit_transform(df_train[i]) df_test[i] = encoder.fit_transform(df_test[i])Model building#Splitting training and validation set X=df_train.drop(['Churn','customerID'],1) y=df_train['Churn'] print(X.head()) print(y.head()) #we split 75% of the data to training set while 25% of the data to validation X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.25, random_state=0) #X_train, X_valid shape print(X_train.shape) print(X_valid.shape)gender SeniorCitizen Partner Dependents tenure PhoneService \ 0 0 0 0 0 27 1 1 1 0 1 1 1 1 2 0 0 0 0 17 1 3 0 1 0 0 42 1 4 1 0 1 0 23 1 MultipleLines InternetService OnlineSecurity OnlineBackup \ 0 0 2 1 1 1 0 2 1 1 2 2 1 0 0 3 2 1 0 0 4 0 0 2 0 DeviceProtection TechSupport StreamingTV StreamingMovies Contract \ 0 1 1 1 1 2 1 [...]**Using AdaBoost Classifier**# Initialising AdaBoostClassifier ada = AdaBoostClassifier() ada.fit(X_train,y_train) # Predicting the values of validation data y_ada_pred = ada.predict(X_valid) print("Classification report - \n", classification_report(y_valid,y_ada_pred)) #Accuracy Score accuracy_score(y_valid,y_ada_pred)**Using XGBoost Classifier**# Initialising AdaBoostClassifier XGB = XGBClassifier() XGB.fit(X_train,y_train) # Predicting the values of validation data y_XGB_pred = XGB.predict(X_valid) print("Classification report - \n", classification_report(y_valid,y_XGB_pred)) #Accuracy Score accuracy_score(y_valid,y_XGB_pred)Prediction on the test data#the Id column in a separate variable : test_id test_id = df_test['Id'] df_test.drop(['Id'], axis=1, inplace=True) #Test data shape and columns names print(df_test.shape) print(df_test.columns) #Features selected from test data X_test=df_test.drop(['customerID'],1) #make prediction : Churn - AdaBoostClassifier y_test_pred = ada.predict(X_test) #Making df for submission subm=pd.DataFrame({"Id": test_id, "Churn": y_test_pred}) subm['Churn']=subm['Churn'].replace({0:'No', 1:'Yes'}) print(subm.head()) # To CSV for submission #subm.to_csv('Telecom_Churn_submission.csv',index=False) #from google.colab import files #files.download('Telecom_Churn_submission.csv')Consumer Complaints ProjectGithub Link (https://github.com/gyhou/consumer_complaints)For this project ***using only built-in Python libraries***, we want to know for each financial product and year- Total number of complaints- Number of companies receiving a complaint- Company with the most complaints- Highest percentage of complaints directed at a single companyData Source: [Consumer Finance Protection Bureau](https://cfpb.github.io/api/ccdb/fields.html) Downloading data# Create input and output folders !mkdir input sample_input output sample_output # Download sample data !curl https://raw.githubusercontent.com/gyhou/consumer_complaints/master/input/complaints.csv -o sample_input/complaints.csv # Download full data from CFPB !wget http://files.consumerfinance.gov/ccdb/complaints.csv.zip -O input/complaints.csv.zip !unzip input/complaints.csv -d inputReading/Loading CSVimport csv def process_csv(file_loc): """ :param file_loc: The file location to extract the csv from. Given the data for consumer complaints, identifying the number of complaints filed and how they're spread across different companies. For each financial product and year, the total number of complaints, number of companies receiving a complaint, and the highest percentage of complaints directed at a single company. Returns a dictionary: {(product_1, year_1): {company_1: number of complaints, company_2...}, (product_1, year_2): {company_1...}, ... (product_2, year_1)...} """ processed_data = dict() with open(file_loc) as csv_file: data = csv.DictReader(csv_file) # Check for missing columns missing_col = [] if 'Product' not in data.fieldnames: missing_col.append('Product') if 'Date received' not in data.fieldnames: missing_col.append('Date received') if 'Company' not in data.fieldnames: missing_col.append('Company') if missing_col: raise KeyError(f"The csv is missing {missing_col} column(s).") # Data sorted by product (alphabetically) and year (ascending) data = sorted(data, key=lambda row: ( row['Product'], row['Date received']), reverse=False) for row in data: product = row['Product'].lower() year = row['Date received'][:4] company = row['Company'].lower() # Check if product, year, company are valid if product in ['', 'n/a', 'none', 'nan', None] or product.isspace(): raise TypeError(f'"{product}" is not a valid product.') if company in ['', 'n/a', 'none', 'nan', None] or company.isspace(): raise TypeError(f'"{company}" is not a valid company.') try: int(year) except ValueError: raise ValueError(f'"{year}" is not a valid year.') # Set primary key (product, year) if (product, year) in processed_data: if company in processed_data[product, year]: processed_data[product, year][company] += 1 else: processed_data[product, year][company] = 1 else: processed_data[product, year] = {company: 1} return processed_dataCheck return valuefrom pathlib import Path # Sample dict data file_to_open = Path("sample_input/complaints.csv") sample_data_dict = process_csv(file_to_open) sample_data_dict # Full dict data file_to_open = Path("input/complaints.csv") data_dict = process_csv(file_to_open) # Number of unique financial product and year len(data_dict.keys())Writing/Output CSVfrom decimal import Decimal, ROUND_HALF_UP def output_csv(dict_data, save_loc): """ :param dict_data: The dictionary with the processed data to covert into csv. :param save_loc: The location to save the csv file to. Creates a csv file in the output folder. Each line in the output file list the following fields in the following order: - product (name should be written in all lowercase) - year - num_complaint: total number of complaints received for that product and year - num_company: total number of companies receiving at least one complaint for that product and year - most_complaints: company with most complaints for that product and year - highest percentage (rounded to the nearest whole number) of total complaints filed against one company for that product and year. """ with open(save_loc, 'w') as csv_file: field_names = ['product', 'year', 'num_complaint', 'num_company', 'most_complaints', 'highest_percent'] writer = csv.DictWriter(csv_file, fieldnames=field_names) writer.writeheader() for product_year, company_complaint in dict_data.items(): product = product_year[0] year = product_year[1] num_complaint = sum(company_complaint.values()) num_company = len(company_complaint) most_complaints = max(company_complaint, key=company_complaint.get) # Python round() does not round .5 up to 1 highest_percent = (Decimal(max(company_complaint.values()) / sum(company_complaint.values()) * 100). quantize(0, ROUND_HALF_UP)) writer.writerow({'product': product, 'year': year, 'num_complaint': num_complaint, 'num_company': num_company, 'most_complaints': most_complaints, 'highest_percent': highest_percent}) file_to_save = Path("output/report.csv") output_csv(data_dict, file_to_save)Check Result- Use Pandas to check if we get our desired resultimport pandas as pd # Original Data df = pd.read_csv('input/complaints.csv') print(df.shape) df.head() import pandas as pd # Transformed data output_csv(data_dict, 'output/report.csv') df = pd.read_csv('output/report.csv') print(df.shape) df.head()(98, 6)Testing the Code- Make sure the code checks for corrupt data!curl https://raw.githubusercontent.com/gyhou/consumer_complaints/master/testsuite/tests/my-own-tests/input/test1_complaints.csv -o sample_input/test1_complaints.csv !curl https://raw.githubusercontent.com/gyhou/consumer_complaints/master/testsuite/tests/my-own-tests/input/test2_complaints.csv -o sample_input/test2_complaints.csv !curl https://raw.githubusercontent.com/gyhou/consumer_complaints/master/testsuite/tests/my-own-tests/input/test3_complaints.csv -o sample_input/test3_complaints.csv !curl https://raw.githubusercontent.com/gyhou/consumer_complaints/master/testsuite/tests/my-own-tests/input/test4_complaints.csv -o sample_input/test4_complaints.csv from pathlib import Path # Raise KeyError file_to_open = Path("sample_input/test1_complaints.csv") # missing column data_dict1 = process_csv(file_to_open) # Raise Value Error file_to_open = Path("sample_input/test2_complaints.csv") # string - invalid year data_dict2 = process_csv(file_to_open) # Raise Value Error file_to_open = Path("sample_input/test3_complaints.csv") # blank year - invalid data_dict3 = process_csv(file_to_open) # Raise Type Error file_to_open = Path("sample_input/test4_complaints.csv") # invalid product, company data_dict4 = process_csv(file_to_open)[0,011, 23, 15, 176][0,080, 29, 3, 121][0,025, 19, 22, 291][0,026, 25, 20, 281][0,080, 12, 1, 236][0,031, 15, 19, 402][0,021, 20, 9, 331][0,080, 5, 3, 235][0,040, 25, 5, 411][0.015, 14, 5, 407][0.088, 7, 8, 465][0.085, 3, 4, 153][0.027, 5, 12, 259][0.087, 27, 4, 185][0.044, 10, 10, 425][0.088, 23, 8, 143][0.084, 23, 24, 104][0.037, 17, 10, 235][0.029, 16, 10, 339][0.034, 16, 5, 328][0.054, 2, 11, 41][0.036, 26, 15, 313][0.020, 1, 15, 400][0.017, 13, 7, 67][0.051, 8, 28, 45][0.011, 14, 1, 482][0.025, 21, 5, 326][0.090, 24, 3, 304][0.092, 23, 5, 276][0.092, 18, 25, 435][0.097, 11, 18, 34][0.094, 13, 5, 333][0.027, 16, 7, 315][0.065, 20, 9, 323][0.050, 27, 2, 187][0.069, 10, 8, 436]chromossomes = [ [0.011, 23, 15, 176], [0.080, 29, 3, 121], [0.025, 19, 22, 291], [0.026, 25, 20, 281], [0.080, 12, 1, 236], [0.031, 15, 19, 402], [0.021, 20, 9, 331], [0.080, 5, 3, 235], [0.040, 25, 5, 411], [0.015, 14, 5, 407], [0.088, 7, 8, 465], [0.085, 3, 4, 153], [0.027, 5, 12, 259], [0.087, 27, 4, 185], [0.044, 10, 10, 425], [0.088, 23, 8, 143],[0.084, 23, 24, 104],[0.037, 17, 10, 235], [0.029, 16, 10, 339], [0.034, 16, 5, 328], [0.054, 2, 11, 41], [0.036, 26, 15, 313], [0.020, 1, 15, 400], [0.017, 13, 7, 67], [0.051, 8, 28, 45], [0.011, 14, 1, 482], [0.025, 21, 5, 326], [0.090, 24, 3, 304], [0.092, 23, 5, 276], [0.092, 18, 25, 435], [0.097, 11, 18, 34], [0.094, 13, 5, 333], [0.027, 16, 7, 315], [0.065, 20, 9, 323], [0.050, 27, 2, 187], [0.069, 10, 8, 436] ] for chromossome in chromossomes: money = greedy_filter_rule(np_array, chromossome, 10000) print (chromossome, money)[0.011, 23, 15, 176] 11676.85 [0.08, 29, 3, 121] 9450.560000000001 [0.025, 19, 22, 291] 11792.88 [0.026, 25, 20, 281] 11305.49 [0.08, 12, 1, 236] 9450.560000000001 [0.031, 15, 19, 402] 12706.57 [0.021, 20, 9, 331] 11710.680000000002 [0.08, 5, 3, 235] 9450.560000000001 [0.04, 25, 5, 411] 11874.950000000006 [0.015, 14, 5, 407] 10312.980000000003 [0.088, 7, 8, 465] 10264.24 [0.085, 3, 4, 153] 10264.24 [0.027, 5, 12, 259] 10819.199999999997 [0.087, 27, 4, 185] 10264.24 [0.044, 10, 10, 425] 10236.78 [0.088, 23, 8, 143] 10264.24 [0.084, 23, 24, 104] 10264.24 [0.037, 17, 10, 235] 10483.050000000003 [0.029, 16, 10, 339] 12895.97 [0.034, 16, 5, 328] 11488.480000000003 [0.054, 2, 11, 41] 12056.9 [0.036, 26, 15, 313] 11488.480000000003 [0.02, 1, 15, 400] 12489.520000000002 [0.017, 13, 7, 67] 11971.45 [0.051, 8, 28, 45] 12071.0 [0.011, 14, 1, 482] 13402.700000000006 [0.025, 21, 5, 326] 12404.42 [0.09, 24, 3, 304] 10000.0 [0.092, 23, 5, 276] 10000.0 [0.092, 18, 25, 435] 10000.0 [0.097, 11, 18, 34] [...]^^^ ATÉ AQUI É ATÔMICO, JÁ ESTÁ TUDO EM FUNÇÃO HEHEHE ^^^ Definições importantes que vão pro relatório1. **Risk:** Risk takes on many forms but is broadly categorized as the chance an outcome or investment's actual return will differ from the expected outcome or return. Risk includes the possibility of losing some or all of the original investment.2. **Beta:** By definition, the market has a beta of 1.0, and individual stocks are ranked according to how much they deviate from the market. A stock that swings more than the market over time has a beta above 1.0. If a stock moves less than the market, the stock's beta is less than 1.0. High-beta stocks are supposed to be riskier but provide a potential for higher returns; low-beta stocks pose less risk but also lower returns.3. **Volatilidade:** Standard deviation measures the dispersion of data from its expected value. The standard deviation is used in making an investment decision to measure the amount of historical volatility associated with an investment relative to its annual rate of return. It indicates how much the current return is deviating from its expected historical normal returns. For example, a stock that has a high standard deviation experiences higher volatility, and therefore, a higher level of risk is associated with the stock. Fuzzy 1 - Compra/Venda de uma *stock*1. **INPUTS** 1. Porcentagem de crescimento * Universo: Quanto variou o preço da *stock* desde o último pico? * Conjunto Fuzzy: baixo, médio, alto 2. Risco * Universo: Qual o risco de se investir nessa *stock* ? * Conjunto Fuzzy: baixo, médio, alto2. **OUTPUTS** 1. Porcentagem de compra/venda * Universo: Qual a porcentagem do *budget* que deve ser gasta com essa ação? * Conjunto Fuzzy: muito baixo, baixo, médio, alto e muito alto3. **RULES** * Se a *stock* tem *alto crescimento* e *baixo risco* **ENTÃO** o investimento deve ser *muito alto* * Se a *stock* tem *alto crescimento* e *médio risco* **ENTÃO** o investimento deve ser *alto* * Se a *stock* tem *alto crescimento* e *alto risco* **ENTÃO** o investimento deve ser *baixo* * Se a *stock* tem *médio crescimento* e *baixo risco* **ENTÃO** o investimento deve ser *alto* * Se a *stock* tem *médio crescimento* e *médio risco* **ENTÃO** o investimento deve ser *médio* * Se a *stock* tem *médio crescimento* e *alto risco* **ENTÃO** o investimento deve ser *baixo* * Se a *stock* tem *baixo crescimento* e *baixo risco* **ENTÃO** o investimento deve ser *baixo* * Se a *stock* tem *baixo crescimento* e *médio risco* **ENTÃO** o investimento deve ser *muito baixo* * Se a *stock* tem *baixo crescimento* e *alto risco* **ENTÃO** o investimento deve ser *muito baixo* define os inputsx_filter = ctrl.Antecedent(np.arange(0, 0.20, 0.01), 'x_filter') x_filter.automf(3) x_filter.view() risk_filter = ctrl.Antecedent(np.arange(0,101,1), 'risk_filter') risk_filter.automf(3) risk_filter.view()define os outputspercentage_filter = ctrl.Consequent(np.arange(0,101,1), 'percentage_filter') percentage_filter.automf(5) percentage_filter.view()define as regrasrule1 = ctrl.Rule(x_filter['poor'] | risk_filter['poor'], percentage_filter['good']) rule2 = ctrl.Rule(x_filter['average'] and risk_filter['average'], percentage_filter['decent']) rule3 = ctrl.Rule(x_filter['good'] and risk_filter['good'], percentage_filter['mediocre']) rule4 = ctrl.Rule(x_filter['poor'] and risk_filter['poor'], percentage_filter['decent']) rule5 = ctrl.Rule(x_filter['average'] and risk_filter['average'], percentage_filter['average']) rule6 = ctrl.Rule(x_filter['good'] and risk_filter['good'], percentage_filter['mediocre']) rule7 = ctrl.Rule(x_filter['poor'] and risk_filter['poor'], percentage_filter['mediocre']) rule8 = ctrl.Rule(x_filter['average'] and risk_filter['average'], percentage_filter['poor']) rule9 = ctrl.Rule(x_filter['good'] and risk_filter['good'], percentage_filter['poor']) rule1.view() plt.show()cria o controlador fuzzycalc_pct_ctrl = ctrl.ControlSystem([rule1, rule2, rule3, rule4, rule5, rule6, rule7, rule8, rule9]) calc_pct_ctrlTESTE DO FUZZY PRO IGU VERpct = ctrl.ControlSystemSimulation(calc_pct_ctrl) pct.input['x_filter'] = 200 pct.input['risk_filter'] = 200 pct.compute() print(pct.output['percentage_filter']) percentage_filter.view(sim=pct)20.83066751972702Fuzzy 2 - Classificação do Grau de Risco de uma *stock*1. **INPUTS** 1. Média de Variação (famosa volatilidade) * Universo: Qual foi a média de variação no valor das *stocks* (em módulo)? * Conjunto Fuzzy: baixo, média, alto 2. Beta * Universo: Qual o valor da volatilidade da *stock* com relação a volatilidade do mercado? * Conjunto Fuzzy: baixo, alto2. **OUTPUTS** 1. Risco * Universo: Qual o grau de risco ao se investir nessa *stock* ? * Conjunto Fuzzy: baixo, médio, alto3. **RULES** * Se a *stock* apresenta *baixa variação* e um *baixo valor beta* **ENTAO** o risco é baixo * Se a *stock* apresenta *baixa variação* e um *alto valor beta* **ENTAO** o risco é médio * Se a *stock* apresenta *média variação* e um *baixo valor beta* **ENTAO** o risco é médio * Se a *stock* apresenta *média variação* e um *alto valor beta* **ENTAO** o risco é alto * Se a *stock* apresenta *alta variação* e um *baixo valor beta* **ENTAO** o risco é médio * Se a *stock* apresenta *alta variação* e um *alto valor beta* **ENTAO** o risco é alto define os inputsvol = ctrl.Antecedent(np.arange(0, 15, 1), 'vol') vol.automf(3) vol.view() beta = ctrl.Antecedent(np.arange(0.95, 1.06, 0.001), 'beta') beta['low'] = fuzz.trimf(beta.universe, [0.95, 0.95, 1.05]) beta['high'] = fuzz.trimf(beta.universe, [0.95, 1.05, 1.05]) beta.view()define os outputsrisk = ctrl.Consequent(np.arange(0, 101, 1), 'risk') risk.automf(3) risk.view()define as regrasrule1 = ctrl.Rule(vol['poor'] | beta['low'], risk['poor']) rule2 = ctrl.Rule(vol['poor'] | beta['high'], risk['average']) rule3 = ctrl.Rule(vol['average'] | beta['low'], risk['average']) rule4 = ctrl.Rule(vol['average'] | beta['high'], risk['good']) rule5 = ctrl.Rule(vol['good'] | beta['low'], risk['average']) rule6 = ctrl.Rule(vol['good'] | beta['high'], risk['good']) rule1.view() plt.show() rule1 = ctrl.Rule(vol['poor'] | beta['low'], risk['poor']) rule2 = ctrl.Rule(vol['good'] | beta['high'], risk['good']) rule1.view() plt.show()cria o controlador fuzzycalc_risk_ctrl = ctrl.ControlSystem([rule1, rule2]) calc_risk_ctrlTESTE DO FUZZY PRO IGU VERrisking = ctrl.ControlSystemSimulation(calc_risk_ctrl) risking.input['vol'] = 16 risking.input['beta'] = 0.95 risking.compute() print(risking.output['risk']) vol.view(sim=risking) beta.view(sim=risking)Logistic regression----This kind of model tries to deal with binary outcomes. It's a generalized linear model: the basic ideia is that we know well how to model continuous outcome from linear regression, so we'll try to fit our binary outcome with a continuous function that, with some transformations, can be linearized given our preditor variables.Each outcome $y_i$ is believed to follow a Bernoulli distribuiton given by:$$ \Pr(y_i = 1) = p$$Our model will try to estimate a probability $p_i$ given a called $logit$ - which is modeled as log function of $p$ and it's value is obtained by a linear function of the predictors:$$ logit(\hat{p}) = log(\frac{\hat{p}}{1-\hat{p}}) = \beta X $$From the equation above, one can see that $logit^{-1}$ gives us the value of $\hat{p}$ estimated by the model. We can calculate doing some algebraic transformations: $$\frac{\hat{p}}{1-p} = \exp{(\beta X)} \\\hat{p}(1 + \exp{(\beta X)}) = \exp{(\beta X)} \\\hat{p} = \frac{1}{1 + \exp{(-\beta X)}}$$Note that this is a sigmoid function, that is steppest at $logit^{-1}(0.5)$ $-$ one can interpret it thinking about the probability of $y_i = 1$: values above $0.5$ are believed to have a positive outcome since we're try to model the expected value [1] as a "turning point", because that's no such thing as a $0.5$ outcome.[1] $E(p) = \frac{1+0}{2} = 0.5$ References- [., & . (2006). Data analysis using regression and multilevel/hierarchical models. Cambridge university press.](https://www.cambridge.org/core/books/data-analysis-using-regression-and-multilevelhierarchical-models/32A29531C7FD730C3A68951A17C9D983) Table of Contents1  References2  Example: One predictor# %load first_cell.py %reload_ext autoreload %autoreload 2 from paths import RAW_PATH, TREAT_PATH, OUTPUT_PATH, FIGURES_PATH from copy import deepcopy import numpy as np import pandas as pd pd.options.display.max_columns = 999 import pandas_profiling import warnings warnings.filterwarnings('ignore') # Plotting import plotly import plotly.graph_objs as go import cufflinks as cf plotly.offline.init_notebook_mode(connected=True) cf.go_offline() cf.set_config_file(offline=False, world_readable=True)Example: One predictorLet's simulate some trivial data - in this case, we have a categorical variable $x \in [1, 5]$ and a outcome $y$ that is simply $y_i = 1, x \geq 3; y = 0, otherwise.$x = np.random.random_integers(1, 5, 100) y = [1 if i > 2 else 0 for i in x] df = pd.DataFrame(data=np.array([x,y]).T, columns=['x', 'y']) df.head()We can visualize this outcomes for each value of the predictor in the graph below (note that the point are _jittered_ - we added random noise to $x$ and $y$ values so that they wont all fall into the same point).df['x_jitter'] = df['x'].apply(lambda x: x + np.random.random()/5 - np.random.random()/5) df['y_jitter'] = df['y'].apply(lambda x: x + np.random.random()/10 - np.random.random()/10) df[['x_jitter', 'y_jitter']].set_index('x_jitter').iplot(kind='scatter', mode='markers', color='blue', theme='white', title='Simulated outcomes for 5-level variable', yTitle='Outcome (y = 1 / y = 0)', xTitle='Predictor variable')Now, we can run a logistic regression model to get the probabilities of the outcome given our predictor variable:from sklearn.linear_model import LogisticRegression model = LogisticRegression(random_state=0) model.fit(df[['x']], df['y'])The model generated the following formula for our data: $logit(p) = -3.66 + 1.66 x$. The independent term, $-3.66$, is our _intercept_ and $1.66$ is the _slope_ for the variable x (you can check the values bellow).model.intercept_, model.coef_We can then visualize our logistic function below:def logit(x): return model.intercept_[0] + x*model.coef_[0][0] df['logit'] = df['x'].apply(lambda x: logit(x)) df[['x', 'logit']].set_index('x').iplot(kind='scatter', mode='markers', color='orange', theme='white', title='Logistic regression', xTitle='Predictor variable', yTitle='logit(p)')Note that the logistic regression is a linear function that has no specifc range - it can go from $(-\infty, \infty)$.We can check the probability function estimated by the model doing the inverse transformation: $$\hat{p} = \frac{1}{1 + \exp{(-\beta X)}}$$def inv_logit(x): beta_x = logit(x) return 1 / (1 + np.e**(-beta_x)) df['inv_logit'] = df['x'].apply(lambda x: inv_logit(x)) df['inv_logit'].head() df[['x', 'inv_logit']].set_index('x').iplot(kind='scatter', mode='markers', color='green', theme='white', title='Estimated probability', xTitle='Predictor variable', yTitle='p estimated')Although $x$ is a categorical variable, we've put in more values in between $[1,5]$ for better visualize the shape of the function:xvalues = np.linspace(1, 5, 100) inv_logit_xvalues = [inv_logit(x) for x in xvalues] df_ex = pd.DataFrame(data=np.array([xvalues, logit_xvalues, inv_logit_xvalues]).T, columns=['x', 'logit', 'inv_logit']) df_ex[['x', 'inv_logit']].set_index('x').iplot(kind='scatter', mode='lines', color='green', theme='white', title='Estimated probability', xTitle='Predictor variable', yTitle='p estimated')This is the usual plot we see as the relationship of the estimated probability and our predictor variable, and we can see that the relationship of those two is not linear, but exponential. * Which is the point that gives us the separation of positive and negative outcomes?$$ logit(\hat{p}) = log(\frac{p}{1-p}) = -\beta X \\ p = 0.5 \rightarrow log(\frac{0.5}{1-0.5}) = log(1) = 0$$So we need to find the point at which the value of $logit(\hat{p}) = \beta X = 0$. Going back to our coeficientts, we can see that:$$-\beta X = 0 \rightarrow -3.66 + 1.66 x = 0 \rightarrow x \approx 2.205$$logit(2.205)Great! We've found the "turning point", and we can check also that $\hat{p} = logit^{-1}(\beta X) = 0.5$:inv_logit(2.205)PaddleOCR with OpenVINOThis demo shows how to run PaddleOCR (Lite) model on OpenVINO natively. Instead of exporting the PaddlePaddle model to ONNX and then create the Intermediate Representation (IR) format through OpenVINO optimizer, we can now read direct from the Paddle Model without any conversions.Authors: , PhD (OpenVINO Edge AI Software Evangelist - Intel) Run Paddle Detection with OpenVINOimport os, os.path import sys import json import urllib.request import cv2 import numpy as np import paddle import math import time import collections from openvino.inference_engine import IENetwork, IECore, ExecutableNetwork from IPython import display from PIL import Image, ImageDraw import copy import logging import imghdr from shapely.geometry import Polygon import pyclipper sys.path.append("/home/wu/openvino_notebooks/notebooks/utils") import notebook_utils as utils from pre_post_processing import *Load the Network for Paddle Detectiondet_model_dir = "/home/wu/PaddleOCR/inference/ch_ppocr_mobile_v2.0_det_infer" det_model_file_path = det_model_dir + "/inference.pdmodel" det_params_file_path = det_model_dir + "/inference.pdiparams" det_ie = IECore() det_net = det_ie.read_network(det_model_file_path)Load the Network for Paddle Recognitionrec_model_dir = "/home/wu/PaddleOCR/inference/ch_ppocr_mobile_v2.0_rec_infer" rec_model_file_path = rec_model_dir + "/inference.pdmodel" rec_params_file_path = rec_model_dir + "/inference.pdiparams" rec_ie = IECore() rec_net = rec_ie.read_network(rec_model_file_path)Preprocessing and post processing image functions for text detection and recognitiondef image_preprocess(input_image, size): img = cv2.resize(input_image, (size,size)) img = np.transpose(img, [2,0,1]) / 255 img = np.expand_dims(img, 0) ##NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True} img_mean = np.array([0.485, 0.456,0.406]).reshape((3,1,1)) img_std = np.array([0.229, 0.224, 0.225]).reshape((3,1,1)) img -= img_mean img /= img_std return img.astype(np.float32) def draw_text_det_res(dt_boxes, img_path): #src_im = cv2.imread(img_path) src_im = img_path for box in dt_boxes: box = np.array(box).astype(np.int32).reshape(-1, 2) cv2.polylines(src_im, [box], True, color=(255, 255, 0), thickness=2) return src_im #Preprocess for Paddle Recognition def resize_norm_img(img, max_wh_ratio): rec_image_shape = [3, 32, 320] imgC, imgH, imgW = rec_image_shape assert imgC == img.shape[2] character_type = "ch" if character_type == "ch": imgW = int((32 * max_wh_ratio)) h, w = img.shape[:2] ratio = w / float(h) if math.ceil(imgH * ratio) > imgW: resized_w = imgW else: resized_w = int(math.ceil(imgH * ratio)) resized_image = cv2.resize(img, (resized_w, imgH)) resized_image = resized_image.astype('float32') resized_image = resized_image.transpose((2, 0, 1)) / 255 resized_image -= 0.5 resized_image /= 0.5 padding_im = np.zeros((imgC, imgH, imgW), dtype=np.float32) padding_im[:, :, 0:resized_w] = resized_image return padding_imMain processing function for PaddleOCR# Define main function for PaddleOCR def run_paddle_ocr(source=0, flip=False, use_popup=False): # create video player to play with target fps player = utils.VideoPlayer(source=source, flip=flip, fps=30) #Start video capturing player.start() try: if use_popup: title = "Press ESC to Exit" cv2.namedWindow(winname=title, flags=cv2.WINDOW_GUI_NORMAL | cv2.WINDOW_AUTOSIZE) processing_times = collections.deque() while True: # grab the frame frame1 = player.next() if frame1 is None: print("Source ended") break else: #Filp the image otherwise the recognition result is wrong frame = cv2.flip(frame1,1) image_file = frame test_image = image_preprocess(image_file,640) # pdmodel might be dynamic shape, this will reshape based on the input input_key = list(det_net.input_info.items())[0][0] # 'inputs' det_net.reshape({input_key: test_image.shape}) det_exec_net = det_ie.load_network(det_net, 'CPU') # measure processing time start_time = time.time() #perform the inference step output = det_exec_net.infer({input_key: test_image}) stop_time = time.time() result_ie = list(output.values()) # Postprocessing for Paddle Detection ori_im = image_file.copy() data = {'image': image_file} data_resize = DetResizeForTest(data) data_norm = NormalizeImage(data_resize) data_list = [] keep_keys = ['image', 'shape'] for key in keep_keys: data_list.append(data[key]) img, shape_list = data_list shape_list = np.expand_dims(shape_list, axis=0) pred = result_ie[0] if isinstance(pred, paddle.Tensor): pred = pred.numpy() pred = pred[:, 0, :, :] segmentation = pred > 0.3 boxes_batch = [] for batch_index in range(pred.shape[0]): src_h, src_w, ratio_h, ratio_w = shape_list[batch_index] mask = segmentation[batch_index] boxes, scores = boxes_from_bitmap(pred[batch_index], mask,src_w, src_h) boxes_batch.append({'points': boxes}) post_result = boxes_batch dt_boxes = post_result[0]['points'] dt_boxes = filter_tag_det_res(dt_boxes, ori_im.shape) #Draw boxes on detected text src_im = draw_text_det_res(dt_boxes, image_file) processing_times.append(stop_time - start_time) # use processing times from last 200 frames if len(processing_times) > 200: processing_times.popleft() #Visualize Paddle detecion results _, f_width = frame.shape[:2] # mean processing time [ms] processing_time = np.mean(processing_times) * 1000 cv2.putText(img=src_im, text=f"Inference time: {processing_time:.1f}ms", org=(20, 40), fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=f_width / 1000, color=(0, 0, 255), thickness=1, lineType=cv2.LINE_AA) # use this workaround if there is flickering if use_popup: cv2.imshow(winname=title, mat=frame) key = cv2.waitKey(1) # escape = 27 if key == 27: break else: # encode numpy array to jpg _, encoded_img = cv2.imencode(ext=".jpg", img=src_im, params=[cv2.IMWRITE_JPEG_QUALITY, 100]) # create IPython image i = display.Image(data=encoded_img) # display the image in this notebook display.clear_output(wait=True) display.display(i) #Preprocess detection results for recognition dt_boxes = sorted_boxes(dt_boxes) img_crop_list = [] if dt_boxes != []: for bno in range(len(dt_boxes)): tmp_box = copy.deepcopy(dt_boxes[bno]) img_crop = get_rotate_crop_image(ori_im, tmp_box) img_crop_list.append(img_crop) #Recognition starts from here img_num = len(img_crop_list) # Calculate the aspect ratio of all text bars width_list = [] for img in img_crop_list: width_list.append(img.shape[1] / float(img.shape[0])) # Sorting can speed up the recognition process indices = np.argsort(np.array(width_list)) rec_res = [['', 0.0]] * img_num rec_batch_num = 6 batch_num = rec_batch_num rec_processing_times = 0 #For each detected text box, run inference for text recognition for beg_img_no in range(0, img_num, batch_num): end_img_no = min(img_num, beg_img_no + batch_num) norm_img_batch = [] max_wh_ratio = 0 for ino in range(beg_img_no, end_img_no): h, w = img_crop_list[indices[ino]].shape[0:2] wh_ratio = w * 1.0 / h max_wh_ratio = max(max_wh_ratio, wh_ratio) for ino in range(beg_img_no, end_img_no): norm_img = resize_norm_img(img_crop_list[indices[ino]],max_wh_ratio) norm_img = norm_img[np.newaxis, :] norm_img_batch.append(norm_img) norm_img_batch = np.concatenate(norm_img_batch) norm_img_batch = norm_img_batch.copy() # pdmodel might be dynamic shape, this will reshape based on the input input_key = list(rec_net.input_info.items())[0][0] # 'inputs' rec_net.reshape({input_key: norm_img_batch.shape}) #Load the Paddle recognition network on CPU rec_exec_net = rec_ie.load_network(rec_net, 'CPU') #Run inference for text recognition for index in range(len(norm_img_batch)): output = rec_exec_net.infer({input_key: norm_img_batch}) result_ie = list(output.values()) preds = result_ie[0] #Postprocessing recognition results postprocess_op = build_post_process(postprocess_params) rec_result = postprocess_op(preds) for rno in range(len(rec_result)): rec_res[indices[beg_img_no + rno]] = rec_result[rno] print(rec_res) # ctrl-c except KeyboardInterrupt: print("Interrupted") # any different error except RuntimeError as e: print(e) finally: # stop capturing player.stop() if use_popup: cv2.destroyAllWindows()Run Live PaddleOCR with OpenVINOrun_paddle_ocr(source=0, flip=True, use_popup=True) #Test OCR results on video file #video_file = "test1.mp4" #source = video_file #player = utils.VideoPlayer(source=source, flip=False, fps=30)DCEGM Upper Envelope ["The endogenous grid method for discrete-continuous dynamic choice models with (or without) taste shocks"](https://onlinelibrary.wiley.com/doi/abs/10.3982/QE643)For the following badges: GitHub does not allow click-through redirects; right-click to get the link, then paste into navigation bar[![badge](https://img.shields.io/badge/Launch%20using%20-Econ--ARK-blue)](https://econ-ark.org/materials/dcegm-upper-envelopelaunch)This notebook provides a simple introduction to the "DCEGM" algorithm . DCEGM extends the EGM method proposed in to problems with both continuous (e.g. consumption) and discrete (e.g. retirement) decisions.The main challenge for the EGM algorithm in discrete-continuous problems is that the discrete decisions generate "kinks" in the value function, making it non-concave and rendering the first order condition used by EGM a necessary but not sufficient for optimality. In practice, this causes the EGM inversion step to produce (resource, consumption) points that are not optimal. DCEGM incorporates a method to filter the points produced by EGM so that only the truly optimal ones are used in producing an approximation to the solution.This filtering process consists mainly of computing "upper-envelopes" of the candidate points: lines that are made up only of the points with the higher values.This notebook presents HARK's tool for calculating upper-envelopes and then uses it to solve a simple three-period discrete-continuous problem using DCEGM. Upper envelopesStart by importing the tools.# imports import numpy as np import matplotlib.pyplot as plt # here for now, should be # from HARK import discontools or whatever name is chosen from HARK.interpolation import LinearInterp from HARK.dcegm import calc_segments, calc_multiline_envelope, calc_prim_kinkApplying EGM to value functions with kinks, as the ones that result from discrete-continuous problems, will often result in grids for market resources that are not monotonic and candidate choices at those points that are sub-optimal.Consider the following example output.m_egm = np.array([0.0, 0.04, 0.25, 0.15, 0.1, 0.3, 0.6,0.5, 0.35, 0.6, 0.75,0.85]) c_egm = np.array([0.0, 0.03, 0.1, 0.07, 0.05, 0.36, 0.4, 0.6, 0.8, 0.9,0.9,0.9]) vt_egm = np.array( [0.0, 0.05, 0.1,0.04, 0.02,0.2, 0.7, 0.5, 0.2, 0.9, 1.0, 1.2]) plt.plot(m_egm, vt_egm) plt.xlabel("Resources") plt.ylabel("Value")There are two main issues:- The line implied by the points "goes backwards" at some points. This is because the m-grid is not monotonic.- Some segments of the line are under other segments of the line. This means that we have sub-optimal points. A first step in filtering out sub-optimal points is to split the previous line in its non-decreasing segments. This is achieved by HARK's function `calc_segments`.# Compute non-decreasing segments rise, fall = calc_segments(m_egm, vt_egm) # Plot them for j in range(len(fall)): idx = range(rise[j],fall[j]+1) plt.plot(m_egm[idx], vt_egm[idx]) plt.xlabel("resources") plt.ylabel("transformed values") plt.show()The next step is to produce the upper-envelope of these segments: a line comprised of the points that are not under any other segment. This is done by HARK's `calc_multiline_envelope`function. We now apply it and plot the result# The function defines the upper envelope over a new grid, which it # uses to interpolate each of the non-decreasing segments. m_common = np.linspace(0,1.0,100) m_upper, c_upper, v_upper = calc_multiline_envelope(m_egm, c_egm, vt_egm, m_common) for j in range(len(fall)): idx = range(rise[j],fall[j]+1) plt.plot(m_egm[idx], vt_egm[idx]) plt.plot(m_upper, v_upper, 'k') plt.xlabel("resources") plt.ylabel("transformed values") plt.show()And there we have it! a monotonic value without the sub-optimal points or reverse jumps!Having introduced the main tools, we are now ready to apply DCEGM to a simple example. An example: writing a will Author: [](https://mv77.github.io/)We now present a basic example to illustrate the use of the previous tools in solving dynamic optimization problems with discrete and continuous decisions.The model represents an agent that lives for three periods and decides how much of his resources to consume in each of them. On the second period, he must additionally decide whether to hire a lawyer to write a will. Having a will has the upside of allowing the agent to leave a bequest in his third and last period of life, which gives him utility, but has the downside that the lawyer will charge a fraction of his period 3 resources.On each period, the agent receives a deterministic amount of resources $w$. The problem, therefore, is fully deterministic.I now present the model formally, solving it backwards.But first, some setup and calibration:# Import tools for linear interpolation and finding optimal # discrete choices. from HARK.interpolation import calc_log_sum_choice_probs # Import CRRA utility (and related) functions from HARK from HARK.utilities import CRRAutility, CRRAutilityP, CRRAutilityP_inv # Solution method parameters aGrid = np.linspace(0,8,400) # Savings grid for EGM. # Model parameters # Parameters that need to be fixed # Relative risk aversion. This is fixed at 2 in order to mantain # the analytical solution that we use, from Carroll (2000) CRRA = 2 # Parameters that can be changed. w = 1 # Deterministic wage per period. willCstFac = 0.35 # Fraction of resources charged by lawyer for writing a will. DiscFac = 0.98 # Time-discount factor. # Define utility (and related) functions u = lambda x: CRRAutility(x,CRRA) uP = lambda x: CRRAutilityP(x, CRRA) uPinv = lambda x: CRRAutilityP_inv(x, CRRA) # Create a grid for market resources mGrid = (aGrid-aGrid[0])*1.5 mGridPlots = np.linspace(w,10*w,100) mGridPlotsC = np.insert(mGridPlots,0,0) # Transformations for value funtion interpolation vTransf = lambda x: np.exp(x) vUntransf = lambda x: np.log(x)The third (last) period of lifeIn the last period of life, the agent's problem is determined by his total amount of resources $m_3$ and a state variable $W$ that indicates whether he wrote a will ($W=1$) or not ($W=0$). The agent without a willAn agent who does not have a will simply consumes all of his available resources. Therefore, his value and consumption functions will be:\begin{equation}V_3(m_3,W=0) = u(m_3)\end{equation}\begin{equation}c_3(m_3, W=0) = m_3\end{equation}Where $u(\cdot)$ gives the utility from consumption. We assume a CRRA specification $u(c) = \frac{c^{1-\rho}}{1-\rho}$. The agent with a willAn agent who wrote a will decides how to allocate his available resources $m_3$ between his consumption and a bequest. We assume an additive specification for the utility of a given consumption-bequest combination that follows a particular case in [Carroll (2000)](http://www.econ2.jhu.edu/people/ccarroll/Why.pdf). The component of utility from leaving a bequest $x$ is assumed to be $\ln (x+1)$. Therefore, the agent's value function is\begin{equation}V_3(m_3, W=1) = \max_{0\leq c_3 \leq m_3} u(c_3) + \ln(m_3 - c_3 + 1)\end{equation}For ease of exposition we consider the case $\rho = 2$, where [Carroll (2000)](http://www.econ2.jhu.edu/people/ccarroll/Why.pdf) shows that the optimal consumption level is given by\begin{equation}c_3(m_3, W=1) = \min \left[m_3, \frac{-1 + \sqrt{1 + 4(m_3+1)}}{2} \right].\end{equation}The consumption function shows that $m_3=1$ is the level of resources at which an important change of behavior occurs: agents leave bequests only for $m_3 > 1$. Since an important change of behavior happens at this point, we call it a 'kink-point' and add it to our grids.# Agent without a will mGrid3_no = mGrid cGrid3_no = mGrid vGrid3_no = u(cGrid3_no) # Create functions c3_no = LinearInterp(mGrid3_no, cGrid3_no) # (0,0) is already here. vT3_no = LinearInterp(mGrid3_no, vTransf(vGrid3_no), lower_extrap = True) v3_no = lambda x: vUntransf(vT3_no(x)) # Agent with a will # Define an auxiliary function with the analytical consumption expression c3will = lambda m: np.minimum(m, -0.5 + 0.5*np.sqrt(1+4*(m+1))) # Find the kink point mKink = 1.0 indBelw = mGrid < mKink indAbve = mGrid > mKink mGrid3_wi = np.concatenate([mGrid[indBelw], np.array([mKink]), mGrid[indAbve]]) cGrid3_wi = c3will(mGrid3_wi) cAbve = c3will(mGrid[indAbve]) beqAbve = mGrid[indAbve] - c3will(mGrid[indAbve]) vGrid3_wi = np.concatenate([u(mGrid[indBelw]), u(np.array([mKink])), u(cAbve) + np.log(1+beqAbve)]) # Create functions c3_wi = LinearInterp(mGrid3_wi, cGrid3_wi) # (0,0) is already here vT3_wi = LinearInterp(mGrid3_wi, vTransf(vGrid3_wi), lower_extrap = True) v3_wi = lambda x: vUntransf(vT3_wi(x)) plt.figure() plt.plot(mGridPlots, v3_wi(mGridPlots), label = 'Will') plt.plot(mGridPlots, v3_no(mGridPlots), label = 'No Will') plt.title('Period 3: Value functions') plt.xlabel('Market resources') plt.legend() plt.show() plt.plot(mGridPlotsC, c3_wi(mGridPlotsC), label = 'Will') plt.plot(mGridPlotsC, c3_no(mGridPlotsC), label = 'No Will') plt.title('Period 3: Consumption Functions') plt.xlabel('Market resources') plt.legend() plt.show()The second periodOn the second period, the agent takes his resources as given (the only state variable) and makes two decisions:- Whether to write a will or not.- What fraction of his resources to consume.These decisions can be seen as happening sequentially: the agent first decides whether to write a will or not, and then consumes optimally in accordance with his previous decision. Since we solve the model backwards in time, we first explore the consumption decision, conditional on the choice of writing a will or not. An agent who decides not to write a willAfter deciding not to write a will, an agent solves the optimization problem expressed in the following conditional value function\begin{equation}\begin{split}\nu (m_2|w=0) &= \max_{0\leq c \leq m_2} u(c) + \beta V_3(m_3,W=0)\\s.t.&\\m_3 &= m_2 - c + w\end{split} \end{equation}We can approximate a solution to this problem through the method of endogenous gridpoints. This yields approximations to $\nu(\cdot|w=0)$ and $c_2(\cdot|w=0)$# Second period, not writing a will # Compute market resources at 3 with and without a will mGrid3_cond_nowi = aGrid + w # Compute marginal value of assets in period 3 for each ammount of savings in 2 vPGrid3_no = uP(c3_no(mGrid3_cond_nowi)) # Get consumption through EGM inversion of the euler equation cGrid2_cond_no = uPinv(DiscFac*vPGrid3_no) # Get beginning-of-period market resources mGrid2_cond_no = aGrid + cGrid2_cond_no # Compute value function vGrid2_cond_no = u(cGrid2_cond_no) + DiscFac*v3_no(mGrid3_cond_nowi) # Create interpolating value and consumption functions vT2_cond_no = LinearInterp(mGrid2_cond_no, vTransf(vGrid2_cond_no), lower_extrap = True) v2_cond_no = lambda x: vUntransf(vT2_cond_no(x)) c2_cond_no = LinearInterp(np.insert(mGrid2_cond_no,0,0), np.insert(cGrid2_cond_no,0,0))An agent who decides to write a willAn agent who decides to write a will also solves for his consumption dinamically. We assume that the lawyer that helps the agent write his will takes some fraction $\tau$ of his total resources in period 3. Therefore, the evolution of resources is given by $m_3 = (1-\tau)(m_2 - c_2 + w)$. The conditional value function of the agent is therefore:\begin{equation}\begin{split}\nu (m_2|w=1) &= \max_{0\leq c \leq m_2} u(c) + \beta V_3(m_3,W=1)\\s.t.&\\m_3 &= (1-\tau)(m_2 - c + w)\end{split} \end{equation}We also approximate a solution to this problem using the EGM. This yields approximations to $\nu(\cdot|w=1)$ and $c_2(\cdot|w=1)$.# Second period, writing a will # Compute market resources at 3 with and without a will mGrid3_cond_will = (1-willCstFac)*(aGrid + w) # Compute marginal value of assets in period 3 for each ammount of savings in 2 vPGrid3_wi = uP(c3_wi(mGrid3_cond_will)) # Get consumption through EGM inversion of the euler equation cGrid2_cond_wi = uPinv(DiscFac*(1-willCstFac)*vPGrid3_wi) # Get beginning-of-period market resources mGrid2_cond_wi = aGrid + cGrid2_cond_wi # Compute value function vGrid2_cond_wi = u(cGrid2_cond_wi) + DiscFac*v3_wi(mGrid3_cond_will) # Create interpolating value and consumption functions vT2_cond_wi = LinearInterp(mGrid2_cond_wi, vTransf(vGrid2_cond_wi), lower_extrap = True) v2_cond_wi = lambda x: vUntransf(vT2_cond_wi(x)) c2_cond_wi = LinearInterp(np.insert(mGrid2_cond_wi,0,0), np.insert(cGrid2_cond_wi,0,0))The decision whether to write a will or notWith the conditional value functions at hand, we can now express and solve the decision of whether to write a will or not, and obtain the unconditional value and consumption functions.\begin{equation}V_2(m_2) = \max \{ \nu (m_2|w=0), \nu (m_2|w=1) \}\end{equation}\begin{equation}w^*(m_2) = \arg \max_{w \in \{0,1\}} \{ \nu (m_2|w=w) \}\end{equation}\begin{equation}c_2(m_2) = c_2(m_2|w=w^*(m_2))\end{equation}We now construct these objects.# We use HARK's 'calcLogSumchoiceProbs' to compute the optimal # will decision over our grid of market resources. # The function also returns the unconditional value function # Use transformed values since -given sigma=0- magnitudes are unimportant. This # avoids NaNs at m \approx 0. vTGrid2, willChoice2 = calc_log_sum_choice_probs(np.stack((vT2_cond_wi(mGrid), vT2_cond_no(mGrid))), sigma = 0) # Plot the optimal decision rule plt.plot(mGrid, willChoice2[0]) plt.title('$w^*(m)$') plt.ylabel('Write will (1) or not (0)') plt.xlabel('Market resources: m') plt.show() # With the decision rule we can get the unconditional consumption grid cGrid2 = (willChoice2*np.stack((c2_cond_wi(mGrid),c2_cond_no(mGrid)))).sum(axis=0) # Now find the primary kink point (the point at which the optimal discrete # decision changes) pKink, segments = calc_prim_kink(mGrid, np.stack((vT2_cond_wi(mGrid), vT2_cond_no(mGrid))), willChoice2) m_kink = np.array([x[0] for x in pKink]) v_kink = np.array([x[1] for x in pKink]) # Insert the kink point into the value function grid and create the function. idx = np.searchsorted(mGrid, m_kink) mGrid_k = np.insert(mGrid, idx, m_kink) vTGrid2_k = np.insert(vTGrid2, idx, v_kink) vT2 = LinearInterp(mGrid_k, vTGrid2_k, lower_extrap = True) v2 = lambda x: vUntransf(vT2(x)) # Plot the conditional and unconditional value functions mGridPlots_k = np.concatenate([mGridPlots,m_kink]) mGridPlots_k.sort() plt.plot(mGridPlots_k, v2_cond_wi(mGridPlots_k), label = 'Cond. Will') plt.plot(mGridPlots_k, v2_cond_no(mGridPlots_k), label = 'Cond. No will') plt.plot(mGridPlots_k, v2(mGridPlots_k), 'k--',label = 'Uncond.') plt.plot(m_kink, v2(m_kink), 'rX', label = 'Primary kink') plt.title('Period 2: Value Functions') plt.xlabel('Market resources') plt.legend() plt.show() # Add kink points to consumption function. Make the discontinuity evident add_c = [] add_m = [] cond_cfuncs = [c2_cond_wi, c2_cond_no] for i in range(len(m_kink)): ml = m_kink[i] mr = np.nextafter(ml, np.inf) # Point to the left of the discontinuity add_m.append(ml) add_c.append(cond_cfuncs[segments[i,0]](ml)) # Point to the right of the discontinuitiy add_m.append(mr) add_c.append(cond_cfuncs[segments[i,1]](mr)) # Add to grids idx = np.searchsorted(mGrid, add_m) mGrid_k = np.insert(mGrid, idx, add_m) cGrid2_k = np.insert(cGrid2, idx, add_c) # Create function c2 = LinearInterp(mGrid_k, cGrid2_k) # Plot the conditional and unconditional consumption # functions mGridPlotsC_k = np.concatenate([mGridPlotsC,add_m]) mGridPlotsC_k.sort() plt.plot(mGridPlotsC_k, c2_cond_wi(mGridPlotsC_k), label = 'Cond. Will') plt.plot(mGridPlotsC_k, c2_cond_no(mGridPlotsC_k), label = 'Cond. No will') plt.plot(mGridPlotsC_k, c2(mGridPlotsC_k), 'k--',label = 'Uncond.') plt.plot(add_m, c2(add_m), 'rX', label = 'Primary kink') plt.title('Period 2: Consumption Functions') plt.xlabel('Market resources') plt.legend() plt.show()The first periodIn the first period, the agent simply observes his market resources and decides what fraction of them to consume. His problem is represented by the following value function\begin{equation}\begin{split}V (m_1) &= \max_{0\leq c \leq m_1} u(c) + \beta V_2(m_2)\\s.t.&\\m_2 &= m_1 - c + w.\end{split} \end{equation}Although this looks like a simple problem, there are complications introduced by the kink in $V_2(\cdot)$, which is clearly visible in the plot from the previous block. Particularly, note that $V_2'(\cdot)$ and $c_2(\cdot)$ are not monotonic: there are now multiple points $m$ for which the slope of $V_2(m)$ is equal. Thus, the Euler equation becomes a necessary but not sufficient condition for optimality and the traditional EGM inversion step can generate non-monotonic endogenous $m$ gridpoints.We now illustrate this phenomenon.# EGM step # Period 2 resources implied by the exogenous savings grid mGrid2 = aGrid + w # Envelope condition vPGrid2 = uP(c2(mGrid2)) # Inversion of the euler equation cGrid1 = uPinv(DiscFac*vPGrid2) # Endogenous gridpoints mGrid1 = aGrid + cGrid1 vGrid1 = u(cGrid1) + DiscFac*v2(mGrid2) plt.plot(mGrid1) plt.title('Endogenous gridpoints') plt.xlabel('Position: i') plt.ylabel('Endogenous grid point: $m_i$') plt.show() plt.plot(mGrid1,vGrid1) plt.title('Value function at grid points') plt.xlabel('Market resources: m') plt.ylabel('Value function') plt.show()The previous cell applies the endogenous gridpoints method to the first period problem. The plots illustrate that the sequence of resulting endogenous gridpoints $\{m_i\}_{i=1}^N$ is not monotonic. This results in intervals of market resources over which we have multiple candidate values for the value function. This is the point where we must apply the upper envelope function illustrated above.We finally use the resulting consumption and value grid points to create the first period value and consumption functions.# Calculate envelope vTGrid1 = vTransf(vGrid1) # The function operates with *transformed* value grids rise, fall = calc_segments(mGrid1, vTGrid1) mGrid1_up, cGrid1_up, vTGrid1_up, xings = calc_multiline_envelope(mGrid1, cGrid1, vTGrid1, mGrid, find_crossings = True) # Create functions c1_up = LinearInterp(mGrid1_up, cGrid1_up) v1T_up = LinearInterp(mGrid1_up, vTGrid1_up) v1_up = lambda x: vUntransf(v1T_up(x)) # Extract crossing points xing_m = np.array(xings) xing_v = v1_up(xings) # Show that there is a non-monothonicity and that the upper envelope fixes it plt.plot(mGrid1,vGrid1, label = 'EGM Points') plt.plot(mGridPlots, v1_up(mGridPlots), 'k--', label = 'Upper Envelope') plt.plot(xing_m, xing_v, 'rX', label = 'Crossings') plt.plot() plt.title('Period 1: Value function') plt.xlabel('Market resources') plt.legend() plt.show() # For the consumption function, we want to highlight the sharp discontinuity, # so we'll add points to the grid that make it evident. add_m_points = np.concatenate([xing_m, np.nextafter(xing_m, np.inf)]) mGridPlotsC_disc = np.concatenate([mGridPlotsC, add_m_points]) mGridPlotsC_disc.sort() # Plot consumption plt.plot(mGrid1,cGrid1, label = 'EGM Points') plt.plot(mGridPlotsC_disc,c1_up(mGridPlotsC_disc),'k--', label = 'Upper Envelope') plt.plot(add_m_points, c1_up(add_m_points),'rX', label = 'Secondary Kink') plt.title('Period 1: Consumption function') plt.xlabel('Market resources') plt.legend() plt.show()digitraffic.fi API-datan käsittelyä pythonillaLiikenneviraston tieliikenteen mittauspisteiden dataa on vapaasti saatavilla verkossa.Tässä koodiesimerkissä luemme tätä dataa ja muokkaamme siitä GeoJSON-formaatin mukaisen mittauspisteluettelon, joka voidaan tulostaa kartan yhteydessä (openstreetmap). Anturidataa on tarjolla hyvinkin paljon, mutta käytämme siitä vain ilman lämpötiloja.from io import BytesIO import gzip from urllib.request import Request, urlopen # apurutiini joka ilmoittaa www-palvelimelle, että voimme vastaanottaa gzip-pakattua dataa (Reqeust) # jos data tulee tässä muodossa (response), niin käytämme moduulia gzip sen purkamiseen def ReadDataURL( url ): req = Request( url ) req.add_header( 'Accept-encoding', 'gzip' ) response = urlopen( req ) if response.info().get('Content-Encoding') == 'gzip': buffer = BytesIO( response.read() ) with gzip.GzipFile( fileobj=buffer ) as MemoryFile: # gzip-pakattu data palautetaan purettuna: return MemoryFile.read() else: # "tavallinen" data palautetaan sellaisenaan: return response.read() import json # metadata sisältää asemien sijaintitiedot ja nimet metaRaw = ReadDataURL( 'https://tie.digitraffic.fi/api/v1/metadata/weather-stations?lastUpdated=false' ) # verkosta tuleva data on JSON-formaatissa. # muutetaan se Pythonin dictionary rakenteeksi: metaData = json.loads( metaRaw.decode('utf-8') ) # yhden aseman tiedot tulostettuna print( json.dumps( metaData['features'][0], indent=2 ) ) # otetaan talteen kunkin aseman tunnisteella sen koordinaatit ja nimi STATIONS = dict() for station in metaData['features']: # tämä 'geometry' ja 'properties' rakenne on sama kuin GeoJSON-formaatissa: STATIONS[ station['id'] ] = { 'geometry' : station['geometry'], 'properties': {'name' : station['properties']['name'] } } STATIONS # ajankohtainen anturidata kaikilta asemilta löytyy tästä URL:sta: weatherRaw = ReadDataURL('https://tie.digitraffic.fi/api/v1/data/weather-data?lastUpdated=false') weatherData = json.loads( weatherRaw.decode('utf-8') ) print( json.dumps( weatherData['weatherStations'][0], indent=2) ) # testitulostus yhdestä asemasta # poimitaan jokaiselta asemalta talteen "ILMA" anturin arvo for station in weatherData['weatherStations']: for sensor in station['sensorValues']: if sensor['name'] == 'ILMA': # STATIONS-rakenne on aiemmin jo luotu asemien tiedoista # tässä lisäämme yhden 'properties' kentän lämpötilaa varten: STATIONS[sensor['roadStationId']]['properties']['Temperature'] = sensor['sensorValue'] # kaikki kerätty data STATIONS # luodaan dictionary, joka sisältää asemien tiedot GeoJSON-formaatissa # https://en.wikipedia.org/wiki/GeoJSON GEO = dict() GEO['type'] = 'FeatureCollection' GEO['features'] = list() # tässä listassa on kaikki "pisteet" eli mittausasemat for station in STATIONS.keys(): # aiemmin kerätty data on jo oikeassa muodossa GeoJSON:n kannalta, # joten tyyppitiedon lisäämisen jälkeen # talletamme vain kunkin aseman 'features' listaan ITEM = STATIONS[station] ITEM['type'] = 'Feature' GEO['features'].append( ITEM ) GEO # jupyter notebookin lisäosien avulla tehdään tekstikenttä (label) # ja kartta (map) from ipyleaflet import Map, GeoJSON import ipywidgets as ipyw label = ipyw.Label(layout=ipyw.Layout(width='100%')) label.value = u'0.0' # kartan päälle luodaan GeoJSON pisteitä, jotka vastaavat edellä kerättyjä sääasemia layer = GeoJSON( data = GEO ) # kun karttapistettä klikataan, niin tulostetaan tekstikenttään GeoJSON properties -tietoihin # talletettuna oleva lämpötila ja aseman nimi def click_handler(event=None,id=None,properties=None): label.value = str( properties.get('Temperature') ) +' ℃ : ' + properties.get('name') layer.on_click(click_handler) # luodaan kartta keskitettynä Tampereelle map = Map( zoom=7, center=[61.4978,23.7610]) # liitetään kartan päälle sääasemien pisteet map.add_layer( layer ) # näytetään tekstikenttä ja kartta tässä notebook:ssa ipyw.VBox( [label,map] )Widget Javascript not detected. It may not be installed or enabled properly.Module 1: Introduction to Exploratory Data Analysis In this notebook we dive into some plotting methods commonly used for Exploratory Data Analysis (EDA). Our goals for EDA are to open-mindedly explore the data, and see what insights we may find. The purpose of the EDA approach is to:- maximize insight into a data set- uncover underlying structure- extract important variables- detect outliers and anomalies- test underlying assumptions- develop parsimonious models- determine optimal factor settings In this notebook we'll investigate these plotting techniques:1. Scatter Plot1. Scatter Matrix1. Heat map1. Histogram1. Bar Plot1. Box Plot1. Time Series Setupimport pandas as pd import numpy as np import seaborn as sns sns.set_palette("hls") import matplotlib.pyplot as plt %matplotlib inline from datetime import datetime import dateutil.parser import re # The command below means that the output of multiple commands in a cell will be output at once from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" # The command below tells jupyter to display up to 80 columns, this keeps everything visible pd.set_option('display.max_columns', 80) pd.set_option('expand_frame_repr', True)With a fresh notebook, we read in our clean dataset again.data_path = '../data/' df = pd.read_csv(data_path+'raw_data.csv.zip', low_memory=False)Before diving into our exploratory data analysis, it is worth reiterating that this whole process is about understanding the distribution of data and relationships between different features.When we move on to use machine learning algorithms, we will be asking a question and trying to answer it using the statistical relationships between different features in the data. The EDA analysis will help us shape this question and have a clear idea about how to approach building the algorithm!With that in mind, let's look at several visualization methods to examine the data and any relationships between features… 1. Scatter plot To start, the scatter plot! This is a very popular and powerful way to visualize the relationship between two continuous features. Essentially this plot shows us how feature Y changes when feature X is changed. If there is a clear pattern formed in the scatter plot, we say that x and y are **correlated**. There are several outcomes we see on a scatter plot:- Positive Linear = When X increases, y increases and the data points follow an approximate straight line- Negative Linear = When X increase, y decreases and the data points follow an approximate straight line- Non-Linear = When X increases, there is a consistent change in Y but this is not linear. It could be quadratic or exponential for example. - No correlation = When X increases, there is no clear pattern to how y changes, The data points form a random distribution.Let's try this out on our data and choose two continuous variables to plot. First lets extract all the continuous variables from our dataset.numeric_vars = df.select_dtypes(include=[np.number]).columns.tolist() for variable in numeric_vars: print(variable)basket_amount currency_exchange_loss_amount funded_amount id image.id image.template_id journal_totals.bulkEntries journal_totals.entries lender_count loan_amount partner_id terms.disbursal_amount terms.loan_amount terms.loss_liability.currency_exchange_coverage_rate terms.repayment_term translator.image video.id video.thumbnailImageId borrower_count partner_average_loan_size_percent_per_capita_income partner_currency_exchange_loss_rate partner_default_rate partner_default_rate_note partner_delinquency_rate partner_delinquency_rate_note partner_image.id partner_image.template_id partner_loans_at_risk_rate partner_loans_posted partner_portfolio_yield partner_portfolio_yield_note partner_profitability partner_total_amount_raised number_of_loans posted_year posted_month time_to_fundTo start, let's look if there is a relationship between lender_count and loan_amount... intuition suggests that bigger loans much have more lenders. If this is true, we'll see this in the scatter plot!ax = sns.regplot(x='lender_count', y='loan_amount', data=df)There are a few outliers which are interesting but over in general we can see that if the lender count is higher, so is the loan amount! Our intuition was correct.Let's try one more. How about the repayment term and the loan amount? What kind of relationship would you expect between the repayment term and the loan amount?ax = sns.regplot(x='terms.repayment_term', y='loan_amount', data=df)The scatter plot indicates that there is no clear relationship between the repayment term and loan amount - we wouldn't want to use repyament term to predit the loan_amount then! 2. Scatter MatrixWhen we have lots of continuous variables, we could go through them one by one to see the relationship or we could use a scatterplot matrix! This creates a scatter plot between every combination of variables in a list. Another interesting quality of the scatter matrix is that the diagonals give a histogram of the variable in question.# Let's choose some variables to examine: num_df = df[['funded_amount', 'partner_loans_posted', 'loan_amount', 'terms.loan_amount', 'borrower_count', 'status']]; # Remove the NaN rows so Seaborn can plot num_df = num_df.dropna(axis=0, how='any'); # num_df.describe() # Create the scatter plot and let's color the data point by their status. sns.pairplot(num_df, hue = 'status');Great! There are several useful observation we can make from this scatter matrix:- loan amount and terms.loan_amount look exactly the same! It seems that data is duplicated in these two columns so we should only keep one of the two columns.- Loan_amount is correlated with funded_amount- Although partner_loans_posted is a numeric variable, it's values are clustered around certain values and there is no clear correlation with any of the other variables. It may be better to split partner_loan_posted into several categorical variables eg partner_loans_posted < 500, 500 < partner_loan_posted < 1500, etc...- The correlations with borrower_count are very interesting! There seems to be several linear relationship within this scatter plot and there we should investigate what is causing this!- From the histograms, we can see the majority of the variables are clustered on the low end, with a slightly wider spread on borrower_count. 3. Heat MapHeat maps are a way of visualizing how the combination of two variables affects the value of a third. They are great to visualize when some values, or calculated values, such as averages, counts, etc. are more extreme. Let's use this is identify when large loan amounts are requested.## Let's total up means by partner and sector partner_sector_means = df.groupby(["partner_id", "sector"]).mean().reset_index() partner_sector_means = partner_sector_means[["partner_id", "sector", "loan_amount"]].pivot("partner_id", "sector", "loan_amount") ## Set the color range to be friendlier by usig reset seabron color palette my_cmap = sns.light_palette("Navy", as_cmap=True) ## Make the plt bigger fig, ax = plt.subplots(figsize=(10,10)) ax = sns.heatmap(partner_sector_means.fillna(0),cmap=my_cmap)From this we can see that it's not so much the partner which leads to bigger loans but the sector. It is clear that many loans in the retail sector are bigger than the others of average! 4. HistogramA histogram is useful for looking at the distribution of values for a single variable and also identifying outliers. It shows us the count of data.The plot below shows the data distribution of loan_amount using both bars and a continuous line. Without going into too much detail about the value on the y-axis, what we can take away from this is there is a much higher occurrence of small loans (high bar/peak in the line) and that large loans are much rarer (low bars/drop in the line).sns.distplot(df['loan_amount'].dropna(axis = 0)); # Let's just look at those under 5K small_loans_df = df[(df['loan_amount'] < 5000)] sns.distplot(small_loans_df['loan_amount']);/Users/brian/anaconda3/envs/good/lib/python3.6/site-packages/matplotlib/axes/_axes.py:6448: UserWarning: The 'normed' kwarg is deprecated, and has been replaced by the 'density' kwarg. warnings.warn("The 'normed' kwarg is deprecated, and has been "Looking at the loans less than 5000 we see a much clearer distribution, although it is still left-hand skewed. 5. Bar PlotBar plots are useful for understanding how categorical groups are different with respect to a continuous variable.Below we can see two bar plots, the first shows the mean loan_amount by sector and the second shows the total loan_amount by sector.f, (ax1, ax2) = plt.subplots(2, 1, figsize=(15,8), sharex=True); ax1.set_title("mean loan amount by sector"); ax2.set_title("total loan amount by sector"); sns.barplot(x='sector', y = 'loan_amount', data=df, estimator=np.mean, ax = ax1); sns.barplot(x='sector', y = 'loan_amount', data=df, estimator=np.sum, ax = ax2); plt.xticks(rotation=-45);This is useful as we can see although education and health have the highes loans on average, the majority of money loaned if for agriculture. 6. Box PlotsA box plot describes the distribution of data based on five important summary numbers: the minimum, first quartile, median, third quartile, and maximum. In the simplest box plot the central rectangle spans the first quartile to the third quartile (the interquartile range or IQR). A segment inside the rectangle shows the median and "whiskers" above and below the box show the locations of the minimum and maximum. Lets use this to look at the distribution of borrowers counts by each sector for different loan status for different partners. First lets look at how many loans come from different partners.multi_borrower_loans = df[df["borrower_count"] > 1] multi_borrower_loans['partner_name'].value_counts()Lets just look at partners who have over 100 loans...plt.switch_backend('agg') top_partners = ['One Acre Fund', 'Hand in Hand Eastern Africa', 'Evidence Action', 'KOMAZA', 'Nuru International'] for partner in top_partners: plt.figure(figsize=(10,6)); sns.boxplot(x='sector', y='borrower_count', data=multi_borrower_loans[multi_borrower_loans['partner_name']==partner][['borrower_count','sector']]); plt.title('partner name: {}'.format(partner)); plt.xticks(rotation=45);This is quite interesting as we can see that on average One Acre Fund has more borrowers on each loan, we a medium around 10 - whereas the others medians are around 3 - 4. Also for the partners who have loans in more than one sector (hand in hand east Africa and one acre fund), the distribution of loan_amount across different sectors is quite similar. An aside: Back to scatter plots From the above we discovered something quite interesting... different partners have different loan characteristics - especially when considering the borrower_count. Do you remember from our scatter matrix, we noticed that loan_amount vs borrower_count showed several linear trends? let's dig into this and see if the differences we see between different partners could be responsible for this.for partner in top_partners: plt.figure(figsize=(10,6)); sns.regplot(x='borrower_count', y='loan_amount', data=multi_borrower_loans[multi_borrower_loans['partner_name']==partner][['borrower_count','loan_amount']]); plt.title('partner name: {}'.format(partner));Aha! It looks like we are onto something here... we can see different trends for different partners! We'll look into this further in feature_engineering to see how we can use to create powerful features. 7. Time dependancyQuite often it's useful to see how a variable changes over time. This means creating a plot with time on the x-axis and the variable on the y-axis.Lets have a look at how the average loan amount changes over time on a monthly basis.# Convert posted date to a datetime object df['posted_date'] = pd.to_datetime(df['posted_date']) # Resample the date to monthly intervals , taking the mean of loan_amount # This creates an array where the index is the timestamp and the value is the mean of loan amount time_data = df.resample('M', on='posted_date')['loan_amount'].mean().fillna(0) fig, ax = plt.subplots(figsize=(15,8)) ax.plot(time_data) plt.title('Mean loan_amount over time');We can look at different timefrance by changing the parameter in resample. Lets look on a weekly basis!# Resample the date to monthly intervals , taking the mean of loan_amount # This creates an array where the index is the timestamp and the value is the mean of loan amount time_data = df.resample('7D', on='posted_date')['loan_amount'].mean().fillna(0) fig, ax = plt.subplots(figsize=(15,8)) ax.plot(time_data) plt.title('Mean loan_amount over time');We cans see that the loan amount was quite erratic between 2006 and 2009, settling down to more constant value from the end of 2009 onwards with a few peaks in 2014. It would be interesting to see if kiva's policy changed during 2009 and whether including loans from pre-2010 in our analysis is sensible if they are no longer representative of the current situation. It could be quite interested to see how this graph looks if we up into different partners. There are a lot of different partners so lets just take the top five by loan_amounttop_five_partners = df.groupby('partner_name')['loan_amount'].sum().sort_values()[::-1][:5].index plt.figure(figsize=(15,8)) for partner in top_five_partners: time_data = df[df['partner_name']==partner].resample('7D', on='posted_date')['loan_amount'].mean().fillna(0) # Use seaborn plotting style plt.plot(time_data, label=partner, linewidth=2); plt.title('Mean loan_amount over time ') plt.legend(loc='best') plt.show();Forecasting cropland vegetation condition * **Products used:** [s2_l2a](https://explorer.digitalearth.africa/s2_l2a), [crop_mask_eastern](https://explorer.digitalearth.africa/crop_mask_eastern)**Keywords** :index:`data used; sentinel 2`, :index:`data used; crop_mask_eastern`, :index: `data methods; forecasting`, :index: `data methods; autoregression`BackgroundThis notebook conducts time-series forecasting of vegetation condition (NDVI) using SARIMAX, a variation on [autoregressive-moving-average (ARMA)](https://en.wikipedia.org/wiki/Autoregressive%E2%80%93moving-average_modelARMAX) models which includes an integrated (I) component to difference the timeseries so it becomes stationary, a seasonal (S) component, and has the capacity to consider exogenous (X) variables. In this example, we will conduct a forecast on a univariate NDVI timeseries. That is, our forecast will be built on temporal patterns in NDVI. Conversely, multivariate approaches can account for influences of variables such as soil moisture and rainfall. DescriptionIn this notebook, we generate a NDVI timeseries from Sentinel-2, then use it develop a forecasting algorithm.The following steps are taken:1. Load Sentinel-2 data and calculate NDVI.2. Mask NDVI to cropland using the crop mask.3. Iterate through SARIMAX parameters and conduct model selection based on cross-validation.4. Inspect model diagnostics4. Forecast NDVI into the future and visualise the result.*** Load packagesImport Python packages that are used for the analysis.%matplotlib inline import datacube import xarray as xr import pandas as pd import numpy as np from tqdm.notebook import tqdm from itertools import product from datacube import Datacube from matplotlib import pyplot as plt from deafrica_tools.datahandling import load_ard from deafrica_tools.plotting import display_map from deafrica_tools.bandindices import calculate_indices from deafrica_tools.dask import create_local_dask_cluster from statsmodels.tools.eval_measures import rmse import statsmodels.api as smSet up a Dask clusterDask can be used to better manage memory use down and conduct the analysis in parallel. For an introduction to using Dask with Digital Earth Africa, see the [Dask notebook](../Beginners_guide/08_Parallel_processing_with_dask.ipynb).>**Note**: We recommend opening the Dask processing window to view the different computations that are being executed; to do this, see the *Dask dashboard in DE Africa* section of the [Dask notebook](../Beginners_guide/08_parallel_processing_with_dask.ipynb).To use Dask, set up the local computing cluster using the cell below.create_local_dask_cluster()Connect to the datacubedc = datacube.Datacube(app="NDVI_forecast")Analysis parameters* `lat`, `lon`: The central latitude and longitude to analyse. In this example we'll use an agricultural area in Ethiopia.* `buffer`: The number of square degrees to load around the central latitude and longitude. For reasonable loading times, set this as 0.1 or lower.* `products`: The satellite data to load, in the example we will use Sentinel-2* `time_range`: The date range to analyse. The longer the date-range, the more data the model have to derive patterns in the NDVI timeseries.* `freq`: The frequency we want to resample the time-series to e.g. for monthly time steps use `'1M'`, for fortinightly use `'2W'`.* `forecast_length`: The length of time beyond the latest observation in the dataset that we want the model to forecast, expressed in units of resample frequency `freq`. A longer `forecast_length` means greater forecast uncertainty. * `resolution`: The pixel resolution (in metres) to use for loading Sentinel-2* `dask_chunks`: How to chunk the datasets to work with dask.# Define the analysis region (Lat-Lon box) lat, lon = 8.5593, 40.6975 buffer = 0.04 #the satellite product to load products = "s2_l2a" # Define the time window for defining the model time_range = ('2017-01-01', '2022-01') #resample frequency freq='1M' #number of time-steps to forecast (in units of `freq`) forecast_length = 6 #resolution of Sentinel-2 pixels resolution = (-20,20) #dask chunk sizes dask_chunks={'x':500, 'y':500, 'time':-1}Display analysis area on an interactive maplon=(lon - buffer, lon + buffer) lat=(lat - buffer, lat + buffer) display_map(lon,lat)Load the satellite dataUsing the parameters we defined above.# set up a datcube query object query = {'x': lon, 'y': lat, 'time': time_range, 'measurements':['red', 'nir'], 'output_crs' :'EPSG:6933', 'resolution' : resolution, 'resampling' :{"fmask": "nearest", "*": "bilinear"}} # load the satellite data ds = load_ard(dc=dc, dask_chunks=dask_chunks, products=products, **query) print(ds)Using pixel quality parameters for Sentinel 2 Finding datasets s2_l2a Applying pixel quality/cloud mask Returning 701 time steps as a dask array Dimensions: (time: 701, y: 505, x: 387) Coordinates: * time (time) datetime64[ns] 2017-01-06T07:42:19 ... 2022-01-25T07:... * y (y) float64 1.093e+06 1.093e+06 ... 1.083e+06 1.083e+06 * x (x) float64 3.923e+06 3.923e+06 ... 3.931e+06 3.931e+06 spatial_ref int32 6933 Data variables: red (time, y, x) float32 dask.array nir (time, y, x) float32 dask.array Attributes: crs: EPSG:6933 grid_mapping: spatial_refMask region with DE Africa's cropland extent mapLoad the cropland mask over the region of interest. The default region we're analysing is in Ethiopia, so we need to load the [crop_mask_eastern](https://explorer.digitalearth.africa/products/crop_mask_eastern/extents) product, which cover the countries of Ethiopia, Kenya, Tanzania, Rwanda, and Burundi. If you change the analysis region from the default one, you may need to load a different crop mask - see the [docs page](https://docs.digitalearthafrica.org/en/latest/data_specs/Cropland_extent_specs.html) to find out more.cm = dc.load(product='crop_mask_eastern', time=('2019'), measurements='filtered', resampling='nearest', like=ds.geobox).filtered.squeeze() cm.plot.imshow(add_colorbar=False, figsize=(6,6)) plt.title('Cropland Extent');Now we will use the cropland map to mask the regions in the Sentinel-2 data that only have cropping.ds = ds.where(cm)Calculate NDVI and clean the time-seriesAfrter calculating NDVI, we will smooth and interpolate the data to ensure we working with a consistent time-series. This is a very important step in the workflow and there are many ways to smooth, interpolate, gap-fill, remove outliers, or curve-fit the data to ensure a consistent time-series. If not using the default example, you may have to define additional methods to those used here.To do this we take two steps:1. Resample the data to monthly time-steps using the mean2. Calculate a rolling mean with a window of 4 steps#calculate NDVI ndvi = calculate_indices(ds, 'NDVI', drop=True, collection='s2') #resample and smooth window=4 ndvi=ndvi.resample(time=freq).mean().rolling(time=window, min_periods=1, center=True).mean()Reduce the time-series to one dimensionIn this example, we're generating a forecast on a simple 1D timeseries. This time-series represents the spatially averaged NDVI at each time-step in the series. In this step, all the calculations above are triggered and the dataset is brought into memory so this step can take a few minutes to complete.ndvi=ndvi.mean(['x','y']) ndvi = ndvi.NDVI.compute()CPLReleaseMutex: Error = 1 (Operation not permitted)Plot the NDVI timeseriesndvi.plot(figsize=(11,5),linestyle='dashed', marker='o') plt.title('NDVI') plt.ylim(0,ndvi.max().values+0.05);Split data and fit a model Cross-validation is a common method for evaluating model performance. It involves dividing data into a training set on which the model is trained, and test (or validation) set, to which the model is applied to produce predictions which are compared against actual values (that weren't used in model training).ndvi = ndvi.drop('spatial_ref').to_dataframe() train_data = ndvi['NDVI'][:len(ndvi)-10] # remove the last ten observations and keep them as test data test_data = ndvi['NDVI'][len(ndvi)-10:]Iteratively find the best parameters for the SARIMAX model SARIMAX models are fitted with parameters for both the trend and seasonal components of the timeseries. The parameters can be defined as:* Trend elements * **p**: Autoregression order. This is the number of immediately preceding values in the series that are used to predict the value at the present time. * **d**: Difference order. The number of times that differencing is performed is called the difference order. * **q**: Moving average order. The size of the moving average window.* Seasonal elements are as above, but for the seasonal component of the timeseries. * **P** * **D** * **Q*** We also need to define the length of season. * **s**: In this case we use 6, which is in units of resample frequency so refers to 6 months. In the cell below, initial values and a range are given for the parameters above. Using `range(0, 3)` means the values 0, 1, and 2 are iterated through for each of p, d, q and P, D, Q. This means that there are $3^2 \times 3^2 = 81$ possible combinations.#Set initial values and some bounds p = range(0, 3) d = 1 q = range(0, 3) P = range(0, 3) D = 1 Q = range(0, 3) s = 6 #Create a list with all possible combinations of parameters parameters = product(p, q, P, Q) parameters_list = list(parameters) print('Number of iterations to run:', len(parameters_list)) # Train many SARIMA models to find the best set of parameters def optimize_SARIMA(parameters_list, d, D, s): """ Return an ordered (ascending RMSE) dataframe with parameters, corresponding AIC, and RMSE. parameters_list - list with (p, q, P, Q) tuples d - integration order D - seasonal integration order s - length of season """ results = [] best_aic = float('inf') for param in tqdm(parameters_list): try: import warnings warnings.filterwarnings("ignore") model = sm.tsa.statespace.SARIMAX(train_data, order=(param[0], d, param[1]), seasonal_order=(param[2], D, param[3], s)).fit(disp=-1) pred = model.predict(start=len(train_data), end=(len(ndvi)-1)) error = rmse(pred, test_data) except: continue aic = model.aic #Save best model, AIC and parameters if aic < best_aic: best_model = model best_aic = aic best_param = param results.append([param, model.aic, error]) result_table = pd.DataFrame(results) result_table.columns = ['parameters', 'aic', 'error'] #Sort in ascending order, lower AIC is better result_table = result_table.sort_values(by='error', ascending=True).reset_index(drop=True) return result_tableNumber of iterations to run: 81Now will will run the function above for every iteration of parameters we have defined. Depending on the number of iterations, this can take a few minutes to run. A progress bar is printed below.#run the function above result_table = optimize_SARIMA(parameters_list, d, D, s)Model selectionThe root-mean-square error (RMSE) is a common metric used to evaluate model or forecast performance. It is the standard deviation of residuals (difference between forecast and actual value) expressed in units of the variable of interest e.g. NDVI. We can calculate RMSE of our forecast because we withheld some observations as test or validation data.We can also use either the [Akaike information criterion (AIC)](https://en.wikipedia.org/wiki/Akaike_information_criterion) or [Bayesian information criterion (BIC)](https://en.wikipedia.org/wiki/Bayesian_information_criterion) for model selection. Both these criteria aim to optimise the trade-off between goodness of fit and model simplicity. We are aiming to find the model that can explain the most variation in the timeseries with the least complexity, as added complexity may lead to overfitting. The BIC penalises additional parameters (greater complexity) more than the AIC. There are different schools of thought on which criterion to use. A general rule of thumb is that the BIC should be used for inference and interpretation whereas the AIC should be used for prediction. As our goal is prediction (forecasting), we could select the model with the lowest AIC, though this approach is often reserved for when there is no test data available for cross-validation.The cell below presents the top 15 models based on AIC and the RMSE on the cross-validation.#Sort table by the lowest AIC (Akaike Information Criteria) where the RMSE is low result_table = result_table.sort_values('aic').sort_values('error') print(result_table[0:15])parameters aic error 0 (0, 0, 1, 2) -167.863353 0.017416 1 (0, 0, 2, 1) -163.385807 0.018163 2 (2, 2, 0, 2) -203.909259 0.019519 3 (0, 0, 2, 2) -167.058157 0.019523 4 (0, 1, 1, 1) -187.117336 0.020658 5 (0, 1, 1, 2) -188.266501 0.023755 6 (0, 1, 2, 2) -186.129784 0.024212 7 (0, 1, 2, 1) -185.463971 0.024253 8 (2, 2, 1, 2) -201.555774 0.024279 9 (0, 0, 1, 1) -164.050216 0.025185 10 (2, 2, 0, 1) -205.615902 0.029545 11 (0, 2, 1, 2) -191.207078 0.029569 12 (2, 1, 0, 0) -193.409397 0.030001 13 (2, 1, 1, 2) -190.212648 0.030505 14 (2, 0, 2, 2) -192.365681 0.032094Select model and predictIn the cell below. We select a model from the list above. In this case we've selected model `0` as it has the lowest RMSE, though you can select any model by setting the index number in the cell below using the `model_sel_index` parameter.#selected model model_sel_index = 0 #store parameters from selected model p, q, P, Q = result_table.iloc[model_sel_index].parameters print(result_table.iloc[model_sel_index]) #fit the model with the parameters identified above best_model = sm.tsa.statespace.SARIMAX(train_data, order=(p, d, q), seasonal_order=(P, D, Q, s)).fit(disp=-1)parameters (0, 0, 1, 2) aic -167.863353 error 0.017416 Name: 0, dtype: objectPlot model diagnosticsThere are some typical plots we can use to evaluate our model. 1. **Standardised residuals (top-left)** The standardised residuals are plotted against x (time) values. This allows us to check that variance (distance of residuals from 0) is constant across time values. There should be no obvious patterns. 2. **Histogram and estimated density (top-right)** A kernel density estimation (KDE) is an estimated probability density function fitted on the actual distribution (histogram) of standardised residuals. A normal distribution (N (0,1)) is shown for reference. This plot shows that the distribution of our standardised residuals is close to normally distributed. 3. **Normal quantile-quantile (Q-Q) plot (bottom-left)** This plot shows 'expected' or 'theoretical' quantiles drawn from a normal distribution on the x-axis against quantiles taken from the sample of residuals on the y-axis. If the observations in blue match the 1:1 line in red, then we can conclude that our residuals are normally distributed. 4. **Correlogram (bottom-right)** The correlations for lags greater than 0 should not be statistically significant. That is, they should not be outside the blue ribbon. > Note: The Q-Q plot and correlogram generated for model `0` show there is some pattern in the residuals. That is, there is remaining variation in the data which the model has not accounted for. You could experiment with different parameter values or model selection in the prior steps to see if this can be addressed.fig = plt.figure(figsize=(16, 9)) fig = best_model.plot_diagnostics(lags=25, fig=fig)Backtest forecast We saved the last 10 observations as test data above. Now we can use our model to predict NDVI for those time-steps and compare those predictions with actual values. We can do this visually in the graph below and also quantify the error with the root-mean-square error (RMSE).pred = best_model.predict(start=len(train_data), end=(len(ndvi)-1)) plt.figure(figsize=(11,5)) pred.plot(label='forecast', linestyle='dashed', marker='o') train_data.plot(label='training data', linestyle='dashed', marker='o') test_data.plot(label='test data', linestyle='dashed', marker='o') plt.legend(loc="upper left");Plot the result of our forecastTo forecast NDVI into the future, we'll run a model on the entire time series so we can include the latest observations. We can see that the forecast uncertainty, expressed as the 95% confidence interval, increases with time.final_model = sm.tsa.statespace.SARIMAX(ndvi, order=(p, d, q), seasonal_order=(P, D, Q, s)).fit(disp=-1) yhat = final_model.get_forecast(forecast_length); fig, ax = plt.subplots(1, 1, figsize=(11, 5)) yhat.predicted_mean.plot(label="NDVI forecast", ax=ax, linestyle="dashed", marker="o") ax.fill_between( yhat.predicted_mean.index, yhat.conf_int()["lower NDVI"], yhat.conf_int()["upper NDVI"], alpha=0.2, ) ndvi[-36:].plot(label="Observaions", ax=ax, linestyle="dashed", marker="o") plt.legend(loc="upper left");Our forecast looks reasonable in the context of the timeseries above. *** Additional information**License:** The code in this notebook is licensed under the [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0). Digital Earth Australia data is licensed under the [Creative Commons by Attribution 4.0](https://creativecommons.org/licenses/by/4.0/) license.**Contact:** If you need assistance, please post a question on the [Open Data Cube Slack channel](http://slack.opendatacube.org/) or on the [GIS Stack Exchange](https://gis.stackexchange.com/questions/ask?tags=open-data-cube) using the `open-data-cube` tag (you can view previously asked questions [here](https://gis.stackexchange.com/questions/tagged/open-data-cube)).If you would like to report an issue with this notebook, you can file one on [Github](https://github.com/GeoscienceAustralia/dea-notebooks).**Last modified:** January 2022**Compatible datacube version:**print(datacube.__version__)1.8.6**Last Tested:**from datetime import datetime datetime.today().strftime('%Y-%m-%d')**Introduction to NumPy:**```NumPy``` stands for *Numerical Python* and it's a fundamental package for scientific computing in Python. NumPy provides Python with an extensive math library capable of performing numerical computations effectively and efficiently. These lessons are intended as a basic overview of NumPy and introduces some of its most important features. **Why Numpy:**You may be wondering why people use NumPy - after all, Python can handle lists, as you learned in the Intro to Python lessons.Even though Python lists are great on their own, NumPy has a number of key features that give it great advantages over Python lists. One such feature is speed. When performing operations on large arrays NumPy can often perform several orders of magnitude faster than Python lists. This speed comes from the nature of NumPy arrays being memory-efficient and from optimized algorithms used by NumPy for doing arithmetic, statistical, and linear algebra operations.import time import numpy as np x = np.random.rand(1000000000) start = time.time() mean = sum(x)/len(x) print(time.time() - start) start = time.time() mean_np = np.mean(x) print(time.time() - start)0.6675674915313721**Creating NumPy ndarrays:**At the core of NumPy is the ndarray, where nd stands for n-dimensional. An ndarray is a multidimensional array of elements all of the same type. In other words, an ndarray is a grid that can take on many shapes and can hold either numbers or strings. In many Machine Learning problems you will often find yourself using ndarrays in many different ways. For instance, you might use an ndarray to hold the pixel values of an image that will be fed into a Neural Network for image classification. There are several ways to create ndarrays in NumPy. In the following lessons we will see two ways to create ndarrays:1. Using regular Python lists2. Using built-in NumPy functions In this section, we will create ndarrays by providing Python lists to the NumPy np.array() function, it is just a function that returns an ndarray. We should note that for the purposes of clarity, the examples throughout these lessons will use small and simple ndarrays. Let's start by creating 1-Dimensional (1D) ndarrays.# We import NumPy into Python import numpy as np # We create a 1D ndarray that contains only integers x = np.array([1, 2, 3, 4, 5]) # Let's print the ndarray we just created using the print() command print('x = ', x)x = [1 2 3 4 5]**Shape of an ndarray:**Another important property of arrays is their shape. The shape of an array is the size along each of its dimensions. As you will see, NumPy ndarrays have attributes that allows us to get information about them in a very intuitive way. For example, the shape of an ndarray can be obtained using the .shape attribute. The shape attribute returns a tuple of N positive integers that specify the sizes of each dimension.# We create a 1D ndarray that contains only integers x = np.array([1, 2, 3, 4, 5]) # We print x print() print('x = ', x) print() # We print information about x print('x has dimensions:', x.shape) print('x is an object of type:', type(x)) print('The elements in x are of type:', x.dtype)x = [1 2 3 4 5] x has dimensions: (5,) x is an object of type: The elements in x are of type: int64We can see that the shape attribute returns the tuple (5,) telling us that x is of rank 1 (i.e. x only has 1 dimension ) and it has 5 elements. The type() function tells us that x is indeed a NumPy ndarray. Finally, the .dtype attribute tells us that the elements of x are stored in memory as signed 64-bit integers. Another great advantage of NumPy is that it can handle more data-types than Python lists.[here](https://docs.scipy.org/doc/numpy-1.13.0/user/basics.types.html) As mentioned earlier, ndarrays can also hold strings. Let's see how we can create a rank 1 ndarray of strings in the same manner as before, by providing the np.array() function a Python list of strings.# We create a rank 1 ndarray that only contains strings x = np.array(['Hello', 'World']) # We print x print() print('x = ', x) print() # We print information about x print('x has dimensions:', x.shape) print('x is an object of type:', type(x)) print('The elements in x are of type:', x.dtype)x = ['Hello' 'World'] x has dimensions: (2,) x is an object of type: The elements in x are of type: It is important to remember that one big difference between Python lists and ndarrays, is that unlike Python lists, all the elements of an ndarray must be of the same type. So, while we can create Python lists with both integers and strings, we can't mix types in ndarrays. If you provide the np.array() function with a Python list that has both integers and strings, NumPy will interpret all elements as strings. We can see this in the next example:# We create a rank 1 ndarray from a Python list that contains integers and strings x = np.array([1, 2, 'World']) # We print the ndarray print() print('x = ', x) print() # We print information about x print('x has dimensions:', x.shape) print('x is an object of type:', type(x)) print('The elements in x are of type:', x.dtype)x = ['1' '2' 'World'] x has dimensions: (3,) x is an object of type: The elements in x are of type: Let us now look at how we can create a rank 2 ndarray from a nested Python list.# We create a rank 2 ndarray that only contains integers Y = np.array([[1,2,3],[4,5,6],[7,8,9], [10,11,12]]) # We print Y print() print('Y = \n', Y) print() # We print information about Y print('Y has dimensions:', Y.shape) print('Y has a total of', Y.size, 'elements') print('Y is an object of type:', type(Y)) print('The elements in Y are of type:', Y.dtype)Y = [[ 1 2 3] [ 4 5 6] [ 7 8 9] [10 11 12]] Y has dimensions: (4, 3) Y has a total of 12 elements Y is an object of type: The elements in Y are of type: int64Up to now, we have only created ndarrays with integers and strings. We saw that when we create an ndarray with only integers, NumPy will automatically assign the dtype int64 to its elements. Let's see what happens when we create ndarrays with floats and integers.# We create a rank 1 ndarray that contains integers x = np.array([1,2,3]) # We create a rank 1 ndarray that contains floats y = np.array([1.0,2.0,3.0]) # We create a rank 1 ndarray that contains integers and floats z = np.array([1, 2.5, 4]) # We print the dtype of each ndarray print('The elements in x are of type:', x.dtype) print('The elements in y are of type:', y.dtype) print('The elements in z are of type:', z.dtype)The elements in x are of type: int64 The elements in y are of type: float64 The elements in z are of type: float64Notice that when we create an ndarray with both floats and integers, as we did with the z ndarray above, NumPy assigns its elements a *float64* dtype as well. This is called upcasting. Since all the elements of an ndarray must be of the same type, in this case NumPy upcasts the integers in z to floats in order to avoid losing precision in numerical computations. Even though NumPy automatically selects the dtype of the ndarray, NumPy also allows you to specify the particular dtype you want to assign to the elements of the ndarray. You can specify the dtype when you create the ndarray using the keyword dtype in the np.array() function. Let's see an example:# We create a rank 1 ndarray of floats but set the dtype to int64 x = np.array([1.5, 2.2, 3.7, 4.0, 5.9], dtype = np.int64) # We print x print() print('x = ', x) print() # We print the dtype x print('The elements in x are of type:', x.dtype)x = [1 2 3 4 5] The elements in x are of type: int64Once you create an ndarray, you may want to save it to a file to be read later or to be used by another program. NumPy provides a way to save the arrays into files for later use - let's see how this is done.# We create a rank 1 ndarray x = np.array([1, 2, 3, 4, 5]) # We save x into the current directory as np.save('my_array', x)The above saves the x ndarray into a file named my_array.npy. You can load the saved ndarray into a variable by using the load() function. > When loading an array from a file, make sure you include the name of the file together with the extension .npy, otherwise you will get an error.# We load the saved array from our current directory into variable y y = np.load('my_array.npy') # We print y print() print('y = ', y) print() # We print information about the ndarray we loaded print('y is an object of type:', type(y)) print('The elements in y are of type:', y.dtype)y = [1 2 3 4 5] y is an object of type: The elements in y are of type: int64**Specialized ndarrays:** One great time-saving feature of NumPy is its ability to create ndarrays using built-in functions. These functions allow us to create certain kinds of ndarrays with just one line of code. Below we will see a few of the most useful built-in functions for creating ndarrays that you will come across when doing AI programming. **np.zeros():**Let's start by creating an ndarray with a specified shape that is full of zeros. We can do this by using the np.zeros() function. The function np.zeros(shape) creates an ndarray full of zeros with the given shape. So, for example, if you wanted to create a rank 2 array with 3 rows and 4 columns, you will pass the shape to the function in the form of (rows, columns), as in the example below:# We create a 3 x 4 ndarray full of zeros. X = np.zeros((3,4)) # We print X print() print('X = \n', X) print() # We print information about X print('X has dimensions:', X.shape) print('X is an object of type:', type(X)) print('The elements in X are of type:', X.dtype)X = [[0. 0. 0. 0.] [0. 0. 0. 0.] [0. 0. 0. 0.]] X has dimensions: (3, 4) X is an object of type: The elements in X are of type: float64> As we can see, the np.zeros() function creates by default an array with dtype float64. If desired, the data type can be changed by using the keyword dtype. **np.ones:**Similarly, we can create an ndarray with a specified shape that is full of ones. We can do this by using the np.ones() function. Just like the np.zeros() function, the np.ones() function takes as an argument the shape of the ndarray you want to make. Let's see an example:# We create a 3 x 2 ndarray full of ones. X = np.ones((3,2)) # We print X print() print('X = \n', X) print() # We print information about X print('X has dimensions:', X.shape) print('X is an object of type:', type(X)) print('The elements in X are of type:', X.dtype)X = [[1. 1.] [1. 1.] [1. 1.]] X has dimensions: (3, 2) X is an object of type: The elements in X are of type: float64**np.full():**We can also create an ndarray with a specified shape that is full of any number we want. We can do this by using the np.full() function. The np.full(shape, constant value) function takes two arguments. The first argument is the shape of the ndarray you want to make and the second is the constant value you want to populate the array with. Let's see an example:# We create a 2 x 3 ndarray full of fives. X = np.full((2,3), 5) # We print X print() print('X = \n', X) print() # We print information about X print('X has dimensions:', X.shape) print('X is an object of type:', type(X)) print('The elements in X are of type:', X.dtype)X = [[5 5 5] [5 5 5]] X has dimensions: (2, 3) X is an object of type: The elements in X are of type: int64**np.eye():**An Identity matrix is a square matrix that has only 1s in its main diagonal and zeros everywhere else. The function np.eye(N) creates a square N x N ndarray corresponding to the Identity matrix. Since all Identity Matrices are square, the np.eye() function only takes a single integer as an argument. Let's see an example:# We create a 5 x 5 Identity matrix. X = np.eye(5) # We print X print() print('X = \n', X) print() # We print information about X print('X has dimensions:', X.shape) print('X is an object of type:', type(X)) print('The elements in X are of type:', X.dtype)X = [[1. 0. 0. 0. 0.] [0. 1. 0. 0. 0.] [0. 0. 1. 0. 0.] [0. 0. 0. 1. 0.] [0. 0. 0. 0. 1.]] X has dimensions: (5, 5) X is an object of type: The elements in X are of type: float64**np.diag():**We can also create diagonal matrices by using the np.diag() function. A diagonal matrix is a square matrix that only has values in its main diagonal. The np.diag() function creates an ndarray corresponding to a diagonal matrix , as shown in the example below:# Create a 4 x 4 diagonal matrix # on its main diagonal X = np.diag([10,20,30,50]) # We print X print() print('X = \n', X) print()X = [[10 0 0 0] [ 0 20 0 0] [ 0 0 30 0] [ 0 0 0 50]]**np.arange():**NumPy also allows you to create ndarrays that have evenly spaced values within a given interval. NumPy's np.arange() function is very versatile and can be used with either one, two, or three arguments. Below we will see examples of each case and how they are used to create different kinds of ndarrays.Let's start by using np.arange() with only one argument. When used with only one argument, np.arange(N) will create a rank 1 ndarray with consecutive integers between 0 and N - 1.# We create a rank 1 ndarray that has sequential integers from 0 to 9 x = np.arange(10) # We print the ndarray print() print('x = ', x) print() # We print information about the ndarray print('x has dimensions:', x.shape) print('x is an object of type:', type(x)) print('The elements in x are of type:', x.dtype)x = [0 1 2 3 4 5 6 7 8 9] x has dimensions: (10,) x is an object of type: The elements in x are of type: int64When used with two arguments, np.arange(start,stop) will create a rank 1 ndarray with evenly spaced values within the half-open interval [start, stop). This means the evenly spaced numbers will include start but exclude stop. Let's see an example# We create a rank 1 ndarray that has sequential integers from 4 to 9. x = np.arange(4,10) # We print the ndarray print() print('x = ', x) print() # We print information about the ndarray print('x has dimensions:', x.shape) print('x is an object of type:', type(x)) print('The elements in x are of type:', x.dtype)x = [4 5 6 7 8 9] x has dimensions: (6,) x is an object of type: The elements in x are of type: int64Finally, when used with three arguments, np.arange(start,stop,step) will create a rank 1 ndarray with evenly spaced values within the half-open interval [start, stop) with step being the distance between two adjacent values. Let's see an example:# We create a rank 1 ndarray that has evenly spaced # integers from 1 to 13 in steps of 3. x = np.arange(1,14,3) # We print the ndarray print() print('x = ', x) print() # We print information about the ndarray print('x has dimensions:', x.shape) print('x is an object of type:', type(x)) print('The elements in x are of type:', x.dtype)x = [ 1 4 7 10 13] x has dimensions: (5,) x is an object of type: The elements in x are of type: int64**np.linspace():**Even though the np.arange() function allows for non-integer steps, such as 0.3, the output is usually inconsistent, due to the finite floating point precision. For this reason, in the cases where non-integer steps are required, it is usually better to use the function np.linspace(). The np.linspace(start, stop, N) function returns N evenly spaced numbers over the closed interval [start, stop]. This means that both the start and thestop values are included. We should also note the np.linspace() function needs to be called with at least two arguments in the form np.linspace(start,stop). In this case, the default number of elements in the specified interval will be N= 50. The reason np.linspace() works better than the np.arange() function, is that np.linspace() uses the number of elements we want in a particular interval, instead of the step between values. Let's see some examples:# We create a rank 1 ndarray that has 10 integers evenly spaced between 0 and 25. x = np.linspace(0,25,10) # We print the ndarray print() print('x = \n', x) print() # We print information about the ndarray print('x has dimensions:', x.shape) print('x is an object of type:', type(x)) print('The elements in x are of type:', x.dtype)x = [ 0. 2.77777778 5.55555556 8.33333333 11.11111111 13.88888889 16.66666667 19.44444444 22.22222222 25. ] x has dimensions: (10,) x is an object of type: The elements in x are of type: float64As we can see from the above example, the function np.linspace(0,25,10) returns an ndarray with 10 evenly spaced numbers in the closed interval [0, 25]. We can also see that both the start and end points, 0 and 25 in this case, are included. However, you can let the endpoint of the interval be excluded (just like in the np.arange() function) by setting the keyword endpoint = False in the np.linspace() function. Let's create the same x ndarray we created above but now with the endpoint excluded:# We create a rank 1 ndarray that has 10 integers evenly spaced between 0 and 25, # with 25 excluded. x = np.linspace(0,25,10, endpoint = False) # We print the ndarray print() print('x = ', x) print() # We print information about the ndarray print('x has dimensions:', x.shape) print('x is an object of type:', type(x)) print('The elements in x are of type:', x.dtype)x = [ 0. 2.5 5. 7.5 10. 12.5 15. 17.5 20. 22.5] x has dimensions: (10,) x is an object of type: The elements in x are of type: float64**np.reshape():**So far, we have only used the built-in functions np.arange() and np.linspace() to create rank 1 ndarrays. However, we can use these functions to create rank 2 ndarrays of any shape by combining them with the np.reshape() function. The np.reshape(ndarray, new_shape) function converts the given ndarray into the specified new_shape. It is important to note that the new_shape should be compatible with the number of elements in the given ndarray.# We create a rank 1 ndarray with sequential integers from 0 to 19 x = np.arange(20) # We print x print() print('Original x = ', x) print() # We reshape x into a 4 x 5 ndarray x = np.reshape(x, (4,5)) # We print the reshaped x print() print('Reshaped x = \n', x) print() # We print information about the reshaped x print('x has dimensions:', x.shape) print('x is an object of type:', type(x)) print('The elements in x are of type:', x.dtype)Original x = [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19] Reshaped x = [[ 0 1 2 3 4] [ 5 6 7 8 9] [10 11 12 13 14] [15 16 17 18 19]] x has dimensions: (4, 5) x is an object of type: The elements in x are of type: int64One great feature about NumPy, is that some functions can also be applied as methods. This allows us to apply different functions in sequence in just one line of code. ndarray methods are similar to ndarray attributes in that they are both applied using dot notation (.). Let's see how we can accomplish the same result as in the above example, but in just one line of code:# We create a a rank 1 ndarray with sequential integers from 0 to 19 and # reshape it to a 4 x 5 array Y = np.arange(20).reshape(4, 5) # We print Y print() print('Y = \n', Y) print() # We print information about Y print('Y has dimensions:', Y.shape) print('Y is an object of type:', type(Y)) print('The elements in Y are of type:', Y.dtype)Y = [[ 0 1 2 3 4] [ 5 6 7 8 9] [10 11 12 13 14] [15 16 17 18 19]] Y has dimensions: (4, 5) Y is an object of type: The elements in Y are of type: int64In the same manner, we can also combine reshape() with np.linspace() to create rank 2 arrays, as shown in the next example.# We create a rank 1 ndarray with 10 integers evenly spaced between 0 and 50, # with 50 excluded. We then reshape it to a 5 x 2 ndarray X = np.linspace(0,50,10, endpoint=False).reshape(5,2) # We print X print() print('X = \n', X) print() # We print information about X print('X has dimensions:', X.shape) print('X is an object of type:', type(X)) print('The elements in X are of type:', X.dtype)X = [[ 0. 5.] [10. 15.] [20. 25.] [30. 35.] [40. 45.]] X has dimensions: (5, 2) X is an object of type: The elements in X are of type: float64**Random:**The last type of ndarrays we are going to create are random ndarrays. Random ndarrays are arrays that contain random numbers. Often in Machine Learning, you need to create random matrices, for example, when initializing the weights of a Neural Network. NumPy offers a variety of random functions to help us create random ndarrays of any shape.# We create a 3 x 3 ndarray with random floats in the half-open interval [0.0, 1.0). X = np.random.random((3,3)) # We print X print() print('X = \n', X) print() # We print information about X print('X has dimensions:', X.shape) print('X is an object of type:', type(X)) print('The elements in x are of type:', X.dtype)X = [[0.70511948 0.40550946 0.58073781] [0.67423937 0.03346497 0.12587685] [0.31964513 0.15787991 0.39406672]] X has dimensions: (3, 3) X is an object of type: The elements in x are of type: float64NumPy also allows us to create ndarrays with random integers within a particular interval. The function np.random.randint(start, stop, size = shape) creates an ndarray of the given shape with random integers in the half-open interval [start, stop). Let's see an example:# We create a 3 x 2 ndarray with random integers in the half-open interval [4, 15). X = np.random.randint(4,15,size=(3,2)) # We print X print() print('X = \n', X) print() # We print information about X print('X has dimensions:', X.shape) print('X is an object of type:', type(X)) print('The elements in X are of type:', X.dtype)X = [[ 8 5] [ 4 12] [ 7 8]] X has dimensions: (3, 2) X is an object of type: The elements in X are of type: int64In some cases, you may need to create ndarrays with random numbers that satisfy certain statistical properties. For example, you may want the random numbers in the ndarray to have an average of 0. NumPy allows you create random ndarrays with numbers drawn from various probability distributions. The function np.random.normal(mean, standard deviation, size=shape), for example, creates an ndarray with the given shape that contains random numbers picked from a normal (Gaussian) distribution with the given mean and standard deviation.# We create a 1000 x 1000 ndarray of random floats drawn from normal (Gaussian) distribution # with a mean of zero and a standard deviation of 0.1. X = np.random.normal(0, 0.1, size=(1000,1000)) # We print X print() print('X = \n', X) print() # We print information about X print('X has dimensions:', X.shape) print('X is an object of type:', type(X)) print('The elements in X are of type:', X.dtype) print('The elements in X have a mean of:', X.mean()) print('The maximum value in X is:', X.max()) print('The minimum value in X is:', X.min()) print('X has', (X < 0).sum(), 'negative numbers') print('X has', (X > 0).sum(), 'positive numbers')X = [[ 0.06038575 0.07058648 0.00651944 ... -0.11510515 0.03274108 -0.04948388] [-0.0427396 0.01963788 -0.08193541 ... 0.22104453 0.00997958 -0.17775136] [ 0.08125654 0.01597223 -0.00085445 ... -0.1888355 -0.0522047 -0.03611404] ... [-0.05934912 -0.0268068 0.13427054 ... -0.0735276 -0.04742756 -0.07159635] [ 0.1307405 -0.07065583 -0.17023093 ... 0.03329357 -0.06680952 -0.06895992] [ 0.04490569 0.1388507 0.06680477 ... -0.0169742 -0.04751616 0.142401 ]] X has dimensions: (1000, 1000) X is an object of type: The elements in X are of type: float64 The elements in X have a mean of: 0.00010345992650649735 The maximum value in X is: 0.49171050961052176 The minimum value in X is: -0.4572065155534312 X has 499752 negative numbers X has 500248 positive numbers**Question:**Using the Built-in functions you learned about in theprevious lesson, create a 4 x 4 ndarray that onlycontains consecutive even numbers from 2 to 32 (inclusive)import numpy as np X =**Question:**Try creating the same array using the np.linspace() function.import numpy as np X =**Accessing Elements in ndarays:**Elements can be accessed using indices inside square brackets, [ ]. NumPy allows you to use both positive and negative indices to access elements in the ndarray. Positive indices are used to access elements from the beginning of the array, while negative indices are used to access elements from the end of the array.# We create a rank 1 ndarray that contains integers from 1 to 5 x = np.array([1, 2, 3, 4, 5]) # We print x print() print('x = ', x) print() # Let's access some elements with positive indices print('This is First Element in x:', x[0]) print('This is Second Element in x:', x[1]) print('This is Fifth (Last) Element in x:', x[4]) print() # Let's access the same elements with negative indices print('This is First Element in x:', x[-5]) print('This is Second Element in x:', x[-4]) print('This is Fifth (Last) Element in x:', x[-1])Modifying ndarrays:Now let's see how we can change the elements in rank 1 ndarrays. We do this by accessing the element we want to change and then using the = sign to assign the new value:# We create a rank 1 ndarray that contains integers from 1 to 5 x = np.array([1, 2, 3, 4, 5]) # We print the original x print() print('Original:\n x = ', x) print() # We change the fourth element in x from 4 to 20 x[3] = 20 # We print x after it was modified print('Modified:\n x = ', x)Original: x = [1 2 3 4 5] Modified: x = [ 1 2 3 20 5]Similarly, we can also access and modify specific elements of rank 2 ndarrays. To access elements in rank 2 ndarrays we need to provide 2 indices in the form [row, column]. Let's see some examples# We create a 3 x 3 rank 2 ndarray that contains integers from 1 to 9 X = np.array([[1,2,3],[4,5,6],[7,8,9]]) # We print X print() print('X = \n', X) print() # Let's access some elements in X print('This is (0,0) Element in X:', X[0,0]) print('This is (0,1) Element in X:', X[0,1]) print('This is (2,2) Element in X:', X[2,2])X = [[1 2 3] [4 5 6] [7 8 9]] This is (0,0) Element in X: 1 This is (0,1) Element in X: 2 This is (2,2) Element in X: 9Elements in rank 2 ndarrays can be modified in the same way as with rank 1 ndarrays. Let's see an example:# We create a 3 x 3 rank 2 ndarray that contains integers from 1 to 9 X = np.array([[1,2,3],[4,5,6],[7,8,9]]) # We print the original x print() print('Original:\n X = \n', X) print() # We change the (0,0) element in X from 1 to 20 X[0,0] = 20 # We print X after it was modified print('Modified:\n X = \n', X)Original: X = [[1 2 3] [4 5 6] [7 8 9]] Modified: X = [[20 2 3] [ 4 5 6] [ 7 8 9]]**Adding and Deleting elements:**Now, let's take a look at how we can add and delete elements from ndarrays. We can delete elements using the np.delete(ndarray, elements, axis) function. This function deletes the given list of elements from the given ndarray along the specified axis. For rank 1 ndarrays the axis keyword is not required. For rank 2 ndarrays, axis = 0 is used to select rows, and axis = 1 is used to select columns. Let's see some examples:# We create a rank 1 ndarray x = np.array([1, 2, 3, 4, 5]) # We create a rank 2 ndarray Y = np.array([[1,2,3],[4,5,6],[7,8,9]]) # We print x print() print('Original x = ', x) # We delete the first and last element of x x = np.delete(x, [0,4]) # We print x with the first and last element deleted print() print('Modified x = ', x) # We print Y print() print('Original Y = \n', Y) # We delete the first row of y w = np.delete(Y, 0, axis=0) # We delete the first and last column of y v = np.delete(Y, [0,2], axis=1) # We print w print() print('w = \n', w) # We print v print() print('v = \n', v)Original x = [1 2 3 4 5] Modified x = [2 3 4] Original Y = [[1 2 3] [4 5 6] [7 8 9]] w = [[4 5 6] [7 8 9]] v = [[2] [5] [8]]We can append values to ndarrays using the np.append(ndarray, elements, axis) function. This function appends the given list of elements to ndarray along the specified axis. Let's see some examples:# We create a rank 1 ndarray x = np.array([1, 2, 3, 4, 5]) # We create a rank 2 ndarray Y = np.array([[1,2,3],[4,5,6]]) # We print x print() print('Original x = ', x) # We append the integer 6 to x x = np.append(x, 6) # We print x print() print('x = ', x) # We append the integer 7 and 8 to x x = np.append(x, [7,8]) # We print x print() print('x = ', x) # We print Y print() print('Original Y = \n', Y) # We append a new row containing 7,8,9 to y v = np.append(Y, [[7,8,9]], axis=0) # We append a new column containing 9 and 10 to y q = np.append(Y,[[9],[10]], axis=1) # We print v print() print('v = \n', v) # We print q print() print('q = \n', q)Original x = [1 2 3 4 5] x = [1 2 3 4 5 6] x = [1 2 3 4 5 6 7 8] Original Y = [[1 2 3] [4 5 6]] v = [[1 2 3] [4 5 6] [7 8 9]] q = [[ 1 2 3 9] [ 4 5 6 10]]Now let's see now how we can insert values to ndarrays. We can insert values to ndarrays using the np.insert(ndarray, index, elements, axis) function. This function inserts the given list of elements to ndarray right before the given index along the specified axis. Let's see some examples:# We create a rank 1 ndarray x = np.array([1, 2, 5, 6, 7]) # We create a rank 2 ndarray Y = np.array([[1,2,3],[7,8,9]]) # We print x print() print('Original x = ', x) # We insert the integer 3 and 4 between 2 and 5 in x. x = np.insert(x,2,[3,4]) # We print x with the inserted elements print() print('x = ', x) # We print Y print() print('Original Y = \n', Y) # We insert a row between the first and last row of y w = np.insert(Y,1,[4,5,6],axis=0) # We insert a column full of 5s between the first and second column of y v = np.insert(Y,1,5, axis=1) # We print w print() print('w = \n', w) # We print v print() print('v = \n', v)NumPy also allows us to stack ndarrays on top of each other, or to stack them side by side. The stacking is done using either the np.vstack() function for vertical stacking, or the np.hstack() function for horizontal stacking. It is important to note that in order to stack ndarrays, the shape of the ndarrays must match. Let's see some examples:# We create a rank 1 ndarray x = np.array([1,2]) # We create a rank 2 ndarray Y = np.array([[3,4],[5,6]]) # We print x print() print('x = ', x) # We print Y print() print('Y = \n', Y) # We stack x on top of Y z = np.vstack((x,Y)) # We stack x on the right of Y. We need to reshape x in order to stack it on the right of Y. w = np.hstack((Y,x.reshape(2,1))) # We print z print() print('z = \n', z) # We print w print() print('w = \n', w)Kudryavtsev Model* Link to this notebook: https://github.com/csdms/pymt/blob/master/notebooks/ku.ipynb* Install command: `$ conda install notebook pymt_permamodel`* Download local copy of notebook: `$ curl -O https://raw.githubusercontent.com/csdms/pymt/master/notebooks/ku.ipynb` Introduction to Permafrost Processes - Lesson 2 Kudryavtsev ModelThis lab has been designed and developed by and , CSDMS, University of Colorado, CO with assistance of , at CSDMS, University of Colorado, CO, and , at Los Alamos National Labs, NM. These labs are developed with support from NSF Grant 1503559, ‘Towards a Tiered Permafrost Modeling Cyberinfrastructure’ Classroom organizationThis lab is the second in a series of introduction to permafrost process modeling, designed for inexperienced users. In this first lesson, we explore the Air Frost Number model and learn to use the CSDMS Python Modeling Toolkit ([PyMT](https://github.com/csdms/pymt)). We implemented a basic configuration of the Air Frost Number (as formulated by Nelson and Outcalt in 1987). This series of labs is designed for inexperienced modelers to gain some experience with running a numerical model, changing model inputs, and analyzing model output. Specifically, this first lab looks at what controls permafrost occurrence and compares the occurrence of permafrost in Russia. Basic theory on the Air Frost Number is presented in [Frost Number Model Lecture 1](https://csdms.colorado.edu/wiki/File:FrostNumberModel_Lecture1.pptx).This lab is the second in a series of introduction to permafrost process modeling, designed for inexperienced users. In this second lesson, we explore the Kudryavstev model and learn to use the CSDMS Python Modeling Toolkit ([PyMT](https://github.com/csdms/pymt)). We implemented the Kudryavstev model (as formulated in Anisimov et al.1997). It is dubbed the Ku-model. This series of labs is designed for inexperienced modelers to gain some experience with running a numerical model, changing model inputs, and analyzing model output. Specifically, this lab looks at what controls soil temperature and active layer thickness and compares model output with observed longterm data collected at permafrost active layer thickness monitoring sites in Fairbanks and Barrow, Alaska. Basic theory on the Kudryavstev model is presented in [Kudryavtsev Model Lecture 2](https://csdms.colorado.edu/wiki/File:KudryavtsevModel_Lecture2.pptx)This lab will likely take ~ 1,5 hours to complete in the classroom. This time assumes you are unfamiiar with the PyMT and need to learn setting parameters, saving runs, downloading data and looking at output (otherwise it will be much faster).We will use netcdf files for output, this is a standard output from all CSDMS models. If you have no experience with visualizing these files, Panoply software will be helpful. Find instructions on how to use this software. Learning objectives Skills* familiarize with a basic configuration of the Kudryavstev Model for 1D (a single location).* hands-on experience with visualizing NetCDF time series with Panoply.* data to model comparisons and how to think about uncertainty in data and model output. Topical learning objectives:* what are controls on permafrost soil temperature* what is a steady-state model* what are important parameters for calculating active layer thickness* active layer thickness evolution with climate warming in two locations in Alaska References and More information ., ., & . (1997). *Global warming and active-layer thickness: results from transient general circulation models.* Global and Planetary Change, 15(3-4), 61-77. DOI:10.1016/S0921-8181(97)00009-X ., ., 2003. *A model for regional-scale estimation of temporal and spatial variability of active layer thickness and mean nnaual ground emperatures.* Permafrost and periglacial processes 14, 125-139. DOI: 10.1002/ppp.449 ., 2005. *Influence of the seasonal snow cover on the ground thermal regime: an overview.* Review of Geophysics, 43, RG4002. The Kudryavtsev ModelThe Kudryavtsev et al. (1974), or *Ku* model, presents anapproximate solution of the Stefan problem. The model provides asteady-state solution under the assumption of sinusoidal airtemperature forcing. It considers snow, vegetation, and soil layersas thermal damping to variation of air temperature. The layer ofsoil is considered to be a homogeneous column with different thermalproperties in the frozen and thawed states. The main outputs areannual maximum frozen/thaw depth and mean annual temperature at thetop of permafrost (or at the base of the active layer). It can beapplied over a wide variety of climatic conditions.# Load standard Python modules import numpy as np import matplotlib.pyplot as plt # Load PyMT model(s) import pymt.models ku = pymt.models.Ku()➡ models: FrostNumber, Ku, HydrotrendPart 1We will run the Kudryatsev model for conditions in Barrow, Alaska in a very cold year, 1964. The mean annaul temperature for 1964 was -15.21C, the amplitude over that year was 18.51C. It was close to normal snow year, meaning the average snow thickness over this winter was 0.22m.Adapt the settings in the Ku model for Barrow 1964. Make sure you request an output file. Save the simulation settings and submit your simulation. Download the model results and open them in Panoply.config_file, run_folder = ku.setup(T_air=-15.21, A_air=18.51) ku.initialize(config_file, run_folder) ku.update() ku.output_var_names ku.get_value('soil__active_layer_thickness')Q1.1: What was the active layer thickness the model predicted? *Sketch a soil profile for winter conditions versus August conditions, indicate where the frozen-unfrozen boundary is in each two cases.* Q1.2: How do you think snow affects the active layer thickness predictions? Part 2Run the Kudryatsev model with a range of snow conditions (0 m as the one extreme, and in extremely snowy years, the mean snow thickness over the winter is 0.4m in Barrow). Set these two simulations, run them and dowload the files.args = ku.setup(h_snow=0.) ku.initialize(*args) ku.update() ku.get_value('soil__active_layer_thickness') args = ku.setup(h_snow=0.4) ku.initialize(*args) ku.update() ku.get_value('soil__active_layer_thickness')Ku model component: Initializing...Q2.1: What happens if there is no snow at all (0 m)? Q2.2: What is the active layer thickness prediction for a very snowy year? Part 3Run the Kudryatsev model with a range of soil water contents. What happens if there is 20% more, and 20% less soil water content?args = ku.setup(vwc_H2O=0.2) ku.initialize(*args) ku.update() ku.get_value('soil__active_layer_thickness') args = ku.setup(vwc_H2O=0.6) ku.initialize(*args) ku.update() ku.get_value('soil__active_layer_thickness')Ku model component: Initializing...Q3.1: Is this selected range of 20% realistic for soils in permafrost regions? Q3.2: From the theory presented in the associated lecture notes, how do you think soil water content in summer affects the soil temperature? Part 4Posted here are time-series for climate conditions for both Barrow and Fairbanks, Alaska. Time-series are annual values and run from 1961-2015, the data include mean annual temperature (MAAT), temperature amplitude (TAMP) and winter-average snow depth (SD). These are text files, so you can plot them in your own favorite software or programming language.Choose which case you want to run, you will now run a 55 year simulation.import pandas data = pandas.read_csv("https://raw.githubusercontent.com/mcflugen/pymt_ku/master/data/Barrow_1961-2015.csv") data maat = data["atmosphere_bottom_air__temperature"] tamp = data["atmosphere_bottom_air__temperature_amplitude"] snow_depth = data["snowpack__depth"] ku = pymt.models.Ku() args = ku.setup(end_year=2050) ku.initialize(*args) n_steps = int((ku.end_time - ku.time) / ku.time_step) thickness = np.empty(n_steps) for i in range(n_steps): ku.set_value("atmosphere_bottom_air__temperature", maat.values[i]) ku.set_value("atmosphere_bottom_air__temperature_amplitude", tamp.values[i]) ku.set_value("snowpack__depth", snow_depth.values[i]) ku.update() thickness[i] = ku.get_value('soil__active_layer_thickness') plt.plot(thickness) # This should be the same as the above but it's NOT! But now it is. BOOM!IntroIn this mission, we're going to explore a workflow to make competing in the Kaggle Titanic competition easier, using a pipeline of functions to reduce the number of dimensions you need to focus on.To get started, we'll read in the original train.csv and test.csv files from Kaggle.import pandas as pd train = pd.read_csv("train.csv") holdout = pd.read_csv("test.csv") holdout.head() train.head()Preprocessing the DataWe have created a file, `functions.py` which contains versions of the functions we created in the earlier missions form this course, which will save you building those functions again from scratch.Let's import that file and preprocess our Kaggle data.# %load functions.py def process_missing(df): """Handle various missing values from the data set Usage ------ holdout = process_missing(holdout) """ df["Fare"] = df["Fare"].fillna(train["Fare"].mean()) df["Embarked"] = df["Embarked"].fillna("S") return df def process_age(df): """Process the Age column into pre-defined 'bins' Usage ------ train = process_age(train) """ df["Age"] = df["Age"].fillna(-0.5) cut_points = [-1,0,5,12,18,35,60,100] label_names = ["Missing","Infant","Child","Teenager","Young Adult","Adult","Senior"] df["Age_categories"] = pd.cut(df["Age"],cut_points,labels=label_names) return df def process_fare(df): """Process the Fare column into pre-defined 'bins' Usage ------ train = process_fare(train) """ cut_points = [-1,12,50,100,1000] label_names = ["0-12","12-50","50-100","100+"] df["Fare_categories"] = pd.cut(df["Fare"],cut_points,labels=label_names) return df def process_cabin(df): """Process the Cabin column into pre-defined 'bins' Usage ------ train process_cabin(train) """ df["Cabin_type"] = df["Cabin"].str[0] df["Cabin_type"] = df["Cabin_type"].fillna("Unknown") df = df.drop('Cabin',axis=1) return df def process_titles(df): """Extract and categorize the title from the name column Usage ------ train = process_titles(train) """ titles = { "Mr" : "Mr", "Mme": "Mrs", "Ms": "Mrs", "Mrs" : "Mrs", "Master" : "Master", "Mlle": "Miss", "Miss" : "Miss", "Capt": "Officer", "Col": "Officer", "Major": "Officer", "Dr": "Officer", "Rev": "Officer", "Jonkheer": "Royalty", "Don": "Royalty", "Sir" : "Royalty", "Countess": "Royalty", "Dona": "Royalty", "Lady" : "Royalty" } extracted_titles = df["Name"].str.extract(' ([A-Za-z]+)\.',expand=False) df["Title"] = extracted_titles.map(titles) return df def create_dummies(df,column_name): """Create Dummy Columns (One Hot Encoding) from a single Column Usage ------ train = create_dummies(train,"Age") """ dummies = pd.get_dummies(df[column_name],prefix=column_name) df = pd.concat([df,dummies],axis=1) return dfCreate a new functionCreate a new function, which: - Accepts a dataframe parameter - Applies the process_missing(), process_age(), process_fare(), process_titles(), and process_cabin() functions to the dataframe - Applies the create_dummies() function to the "Age_categories", "Fare_categories","Title", "Cabin_type", and "Sex" columns. - Returns the processed dataframe Apply the newly create function on the train and holdout dataframes.def pre_process(df): df = process_missing(df) df = process_age(df) df = process_fare(df) df = process_titles(df) df = process_cabin(df) for col in ["Age_categories","Fare_categories", "Title","Cabin_type","Sex"]: df = create_dummies(df,col) return df train = pre_process(train) holdout = pre_process(holdout)Exploring the DataSteps: - Inspecting the type of the columns - Using histograms to view the distribution of values in the columns - Use pivot tables to look at the survival rate for different values of the columns - Find a way to combine the columns and look at the resulting distribution of values and survival rateexplore_cols = ["SibSp","Parch","Survived"] explore = train[explore_cols].copy() explore.info() import matplotlib.pyplot as plt %matplotlib inline explore.drop("Survived",axis=1).plot.hist(alpha=0.5,bins=8) plt.show() explore["familysize"] = explore[["SibSp","Parch"]].sum(axis=1) explore.drop("Survived",axis=1).plot.hist(alpha=0.5,bins=10) plt.xticks(range(11)) plt.show() import numpy as np for col in explore.columns.drop("Survived"): pivot = explore.pivot_table(index=col,values="Survived") pivot.plot.bar(ylim=(0,1),yticks=np.arange(0,1,.1)) plt.axhspan(.3, .6, alpha=0.2, color='red') plt.show()SummaryThe `SibSp` column shows the number of siblings and/or spouses each passenger had on board, while the `Parch` columns shows the number of parents or children each passenger had onboard. Neither column has any missing values.The distribution of values in both columns is skewed right, with the majority of values being zero.You can sum these two columns to explore the total number of family members each passenger had onboard. The shape of the distribution of values in this case is similar, however there are less values at zero, and the quantity tapers off less rapidly as the values increase.Looking at the survival rates of the the combined family members, you can see that few of the over 500 passengers with no family members survived, while greater numbers of passengers with family members survived. (Only 30% of the passengers who had no family members onboard survived.) Engineering New FeaturesBased of this, we can come up with an idea for a new feature - was the passenger alone. This will be a binary column containing the value: - `1` if the passenger has zero family members onboard - `0` if the passenger has one or more family members onboard Let's go ahead and create this feature.def process_isalone(df): df["familysize"] = df[["SibSp","Parch"]].sum(axis=1) df["isalone"] = 0 df.loc[(df["familysize"] == 0),"isalone"] = 1 df = df.drop("familysize",axis=1) return df train = process_isalone(train) holdout = process_isalone(holdout)Selecting the Best-Performing FeaturesIn the Feature Preparation, Selection and Engineering mission, we used scikit-learn's `feature_selection.RFECV` class to automate selecting the best-performing features using recursive feature elimination.To speed up our Kaggle workflow, we can create a function that performs this step for us, which will mean we can perform feature selection by calling a self-contained function and focus our efforts on the more creative part - exploring the data and engineering new features.You may remember that the first parameter when you instantiate a RFECV() object is an estimator. At the time we used a Logistic Regression estimator, but we've since discovered in the Model Selection and Tuning mission that Random Forests seems to be a better algorithm for this Kaggle competition.Let's write a function that: - Accepts a dataframe as input - Performs data preparation for machine learning - Uses recursive feature elimination and the random forests algorithm to find the best-performing set of featuresfrom sklearn.ensemble import RandomForestClassifier from sklearn.feature_selection import RFECV def select_features(df): # Remove non-numeric columns, columns that have null values df = df.select_dtypes([np.number]).dropna(axis=1) all_X = df.drop(["Survived","PassengerId"],axis=1) all_y = df["Survived"] clf = RandomForestClassifier(random_state=1) selector = RFECV(clf,cv=10) selector.fit(all_X,all_y) best_columns = list(all_X.columns[selector.support_]) print("Best Columns \n"+"-"*12+"\n{}\n".format(best_columns)) return best_columns cols = select_features(train)Best Columns ------------ ['Pclass', 'Age', 'SibSp', 'Parch', 'Fare', 'Age_categories_Missing', 'Age_categories_Infant', 'Age_categories_Child', 'Age_categories_Teenager', 'Age_categories_Young Adult', 'Age_categories_Adult', 'Age_categories_Senior', 'Fare_categories_0-12', 'Fare_categories_12-50', 'Fare_categories_50-100', 'Fare_categories_100+', 'Title_Master', 'Title_Miss', 'Title_Mr', 'Title_Mrs', 'Title_Officer', 'Cabin_type_A', 'Cabin_type_B', 'Cabin_type_C', 'Cabin_type_D', 'Cabin_type_E', 'Cabin_type_Unknown', 'Sex_female', 'Sex_male', 'isalone']Selecting and Tuning AlgorithmsJust like we did with feature selection, we can can write a function to do the heavy lifting of model selection and tuning. The function we'll create will use three different algorithms and use grid search to train using different combinations of hyperparameters to find the best performing model.We can achieve this by creating a list of dictionaries— that is, a list where each element of the list is a dictionary. Each dictionary should contain: - The name of the particular model - An estimator object for the model - A dictionary of hyperparameters that we'll use for grid search.from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import GridSearchCV def select_model(df,features): all_X = df[features] all_y = df["Survived"] # List of dictionaries, each containing a model name, # it's estimator and a dict of hyperparameters models = [ { "name": "LogisticRegression", "estimator": LogisticRegression(), "hyperparameters": { "solver": ["newton-cg", "lbfgs", "liblinear"] } }, { "name": "KNeighborsClassifier", "estimator": KNeighborsClassifier(), "hyperparameters": { "n_neighbors": range(1,20,2), "weights": ["distance", "uniform"], "algorithm": ["ball_tree", "kd_tree", "brute"], "p": [1,2] } }, { "name": "RandomForestClassifier", "estimator": RandomForestClassifier(random_state=1), "hyperparameters": { "n_estimators": [4, 6, 9], "criterion": ["entropy", "gini"], "max_depth": [2, 5, 10], "max_features": ["log2", "sqrt"], "min_samples_leaf": [1, 5, 8], "min_samples_split": [2, 3, 5] } } ] for model in models: print(model['name']) print('-'*len(model['name'])) grid = GridSearchCV(model["estimator"], param_grid=model["hyperparameters"], cv=10) grid.fit(all_X,all_y) model["best_params"] = grid.best_params_ model["best_score"] = grid.best_score_ model["best_model"] = grid.best_estimator_ print("Best Score: {}".format(model["best_score"])) print("Best Parameters: {}\n".format(model["best_params"])) return models result = select_model(train,cols)LogisticRegression ------------------Making submissions to KaggleAfter running your function, you will have three scores from three different models. At this point in the workflow you have a decision to make: Do you want to train your best model on the holdout set and make a Kaggle submission, or do you want to go back to engineering features.You may find that adding a feature to your model doesn't improve your accuracy. In that case you should go back to data exploration and repeat the cycle again.If you're going to be continually submitting to Kaggle, a function will help make this easier. Let's create a function to automate this.def save_submission_file(model,cols,filename="submission.csv"): holdout_data = holdout[cols] predictions = model.predict(holdout_data) holdout_ids = holdout["PassengerId"] submission_df = {"PassengerId": holdout_ids, "Survived": predictions} submission = pd.DataFrame(submission_df) submission.to_csv(filename,index=False) best_rf_model = result[2]["best_model"] save_submission_file(best_rf_model,cols)We have previously considered models of the form:$$ \hat{y} = \beta X + w $$where we have measured how well the model is doing by minimising the function:$$ J\left( \beta \right) = \frac{1}{n} \lVert y - \hat{y} \rVert $$However, this method doesn't allow us to encode some of the ideas we may have about \\(\beta\\).In least squares regression we are (essentially) solving a series of equations:$$ y = X \beta $$but the problem may be ill posed: there may be no \\(\beta\\), or many, which satisfy the above equation. Also, many systems we are interested in moddeling act like low-pass filters going in the direction \\(X \beta\\), so inverting the system naively will act like a high-pass filter and will amplify noise. We can give preference to particular solutions by instead minimising:$$ J\left( \theta \right) = \frac{1}{n} \lVert y - \hat{y} \rVert_2^2 + \lVert \Gamma \beta \rVert_2^2 $$Luckily, this equation has a closed form solution:$$ \hat{\beta} = \left(X^T X + \Gamma^T \Gamma \right)^{-1} X^T y $$which can be found the same way as the closed form solution for Linear Regression. A particularly important case is \\(\Gamma = \lambda 1\\) (a constant times the identity matrix), which is known by the name of Ridge Regression.Sometimes we have more complex priors about which solutions we require from any particular optimisation problem, and many cannot be solved by simply taking the gradient. For example$$ J\left( \theta \right) = \frac{1}{n} \lVert y - \hat{y} \rVert_2^2 + \lVert \beta \rVert_1 $$this optimisation problem is non differentiable! Or consider$$ J\left( \theta \right) = \frac{1}{n} \lVert y - \hat{y} \rVert_2^2 + \lVert \nabla \beta \rVert_1 $$or$$ J\left( \theta \right) = \frac{1}{n} \lVert y - \hat{y} \rVert_2^2 + \lVert \beta \rVert_0 $$where$$ \lVert \beta \rVert_0 = \{\beta \neq 0 \} $$None of these optimisation problems can be solved in the straightforward way that we solved Ridge regression.These optimisation problem can be solved by using the following trick, set $$ z = \beta $$in the second term, and then optimise the following function (the last term is to enforce the constraint we introduced):$$ J\left( \beta \right) = \frac{1}{n} \lVert y - \beta^T X\rVert_2^2 + \lambda \lVert z \rVert_2^2 + \nu^T \left(\beta - z\right) + \frac{\rho}{2} \lVert\beta -z\rVert_2^2 $$This is cleverer than it looks, because$$ \frac{\partial J}{\partial \beta} = -X^T \left(y - X\beta\right) + \rho\left(\beta - z\right) + \nu^T $$and $$ \frac{\partial J}{\partial z} = \lambda - \nu^T - \rho\left( \beta - z\right) $$for \\( z > 0 \\), and $$ \frac{\partial J}{\partial z} = - \lambda - \nu^T + \rho\left( \beta - z\right) $$for \\( z < 0 \\), and $$ -\frac{\lambda}{\rho} \leq x + \frac{\nu}{\rho} \leq \frac{\lambda}{\rho} $$combining these we find:$$ z = \mathrm{sign}\left(X + \frac{\nu}{\rho}\right) \mathrm{max} \left(\mid X + \frac{\nu}{\rho} \mid - \frac{\lambda}{\rho}, 0 \right) $$we can then update our weights by the following set of iterates:$$ X^{k+1} = \left(X^T X + \rho I\right)^{-1} \left(X^t y + \rho \left(z^{k} - \nu^{k}\right)\right)$$$$ z^{k+1} = S_{\frac{\lambda}{\rho}}\left(X^{k+1} + \nu^{k}/\rho\right) $$$$ \nu^{k+1} = n^{k} + \rho \left(x^{k+1} - z^{k+1} \right) $$This is implemented in the code below:import numpy as np import matplotlib.pyplot as plt %matplotlib inline def l2prox(y, mu): return (1.0/(1.0 + mu)) * y def l1prox(y, mu): return np.sign(y)*np.maximum(0, np.absolute(y)-mu/2.0) def ADMM(A, y, rho, mu, prox): """Alternating Direction Method of Multipliers This is a python implementation of the Alternating Direction Method of Multipliers - a method of constrained optimisation that is used widely in statistics (http://stanford.edu/~boyd/admm.html). """ m, n = A.shape A_t_A = A.T.dot(A) w, v = np.linalg.eig(A_t_A) MAX_ITER = 10000 #Function to caluculate min 1/2(y - Ax) + l||x|| #via alternating direction methods x_hat = np.zeros([n, 1]) z_hat = np.zeros([n, 1]) u = np.zeros([n, 1]) #Calculate regression co-efficient and stepsize # r = np.amax(np.absolute(w)) # l_over_rho = np.sqrt(2*np.log(n)) * r / 2.0 # I might be wrong here # rho = mu/r #Pre-compute to save some multiplications A_t_y = A.T.dot(y) Q = A_t_A + rho * np.identity(n) Q = np.linalg.inv(Q) Q_dot = Q.dot for _ in range(MAX_ITER): #x minimisation step via posterier OLS x_hat = Q_dot(A_t_y + rho*(z_hat - u)) z_hat = prox(x_hat + u, mu) #mulitplier update u = u + rho*(x_hat - z_hat) return z_hat def plot(original, computed): """Plot two vectors to compare their values""" plt.figure(1) plt.subplot(211) plt.plot(original, label='Original') plt.plot(computed, label='Estimate') plt.subplot(212) plt.plot(original - computed) plt.legend(loc='upper right') plt.show() def test(m=50, n=200): """Test the ADMM method with randomly generated matrices and vectors""" A = np.random.randn(m, n) num_non_zeros = 10 positions = np.random.randint(0, n, num_non_zeros) amplitudes = 100*np.random.randn(num_non_zeros, 1) x = np.zeros((n, 1)) x[positions] = amplitudes y = A.dot(x) #+ np.random.randn(m, 1) plot(x, ADMM(A, y, 1.0, 1.0, l1prox)) test()No handles with labels found to put in legend.Text-Guided Editing of Images (Using CLIP and StyleGAN)!nvidia-smi -L # title Setup (may take a few minutes) !git clone https://github.com/dvschultz/StyleCLIP.git !pip install ftfy regex tqdm !pip install git+https://github.com/openai/CLIP.git %cd /content/StyleCLIP/Cloning into 'StyleCLIP'... remote: Enumerating objects: 553, done. remote: Counting objects: 100% (92/92), done. remote: Compressing objects: 100% (62/62), done. remote: Total 553 (delta 28), reused 83 (delta 26), pack-reused 461 Receiving objects: 100% (553/553), 163.08 MiB | 24.83 MiB/s, done. Resolving deltas: 100% (164/164), done. Collecting ftfy [?25l Downloading https://files.pythonhosted.org/packages/af/da/d215a091986e5f01b80f5145cff6f22e2dc57c6b048aab2e882a07018473/ftfy-6.0.3.tar.gz (64kB)  |████████████████████████████████| 71kB 6.4MB/s [?25hRequirement already satisfied: regex in /usr/local/lib/python3.7/dist-packages (2019.12.20) Requirement already satisfied: tqdm in /usr/local/lib/python3.7/dist-packages (4.41.1) Requirement already satisfied: wcwidth in /usr/local/lib/python3.7/dist-packages (from ftfy) (0.2.5) Building wheels for collected packages: ftfy Building wheel for ftfy (setup.py) ... [?25l[?25hdone Created wheel for ftfy: filename=f[...]This model requires a model file in the Rosinality format. If you have an NVIDIA official model (or one for my repo) you can convert it to the Rosinality format using [this notebook](https://colab.research.google.com/github/dvschultz/stylegan2-ada-pytorch/blob/main/SG2_ADA_PT_to_Rosinality.ipynb) Once the model is converted you can either sync your Drive or use gdown to bring the model here.from google.colab import drive drive.mount('/content/drive') #download file and keep its name !gdown --id [id of file] #optional: change the name of the file when it downloads !gdown --id [id of file] -O /path/filename.ext`Edit` will start with an image of your choice (upload a `.npz` file and set the path under `latent_path`), `free_generation` will start with a random image.experiment_type = 'free_generation' #@param ['edit', 'free_generation'] model_path = '/content/ladiesblack.pt' #@param {type:"string"} description = 'A high quality image of red flowers on a black background' #@param {type:"string"} latent_path = None #@param {type:"string"} optimization_steps = 25 #@param {type:"number"} l2_lambda = 0.008 #@param {type:"number"} create_video = True #@param {type:"boolean"}You likely don’t need to set anything else in th cell below, but you might want to edit the learning rate (`"lr"`) to see how it affects image optimization.# title Additional Arguments args = { "description": description, "ckpt": model_path, "stylegan_size": 1024, "lr_rampup": 0.05, "lr": 0.1, #default 0.1 "step": optimization_steps, "mode": experiment_type, "l2_lambda": l2_lambda, "latent_path": latent_path, "truncation": 0.5, "save_intermediate_image_every": 1 if create_video else 20, "results_dir": "results", "save_vector": True } %cd /content/StyleCLIP/ from optimization.run_optimization import main from argparse import Namespace result = main(Namespace(**args)) #@title Visualize Result from torchvision.utils import make_grid from torchvision.transforms import ToPILImage result_image = ToPILImage()(make_grid(result.detach().cpu(), normalize=True, scale_each=True, range=(-1, 1), padding=0)) h, w = result_image.size result_image.resize((h // 2, w // 2)) #@title Create and Download Video !ffmpeg -r 15 -i results/%05d.png -c:v libx264 -vf fps=25 -pix_fmt yuv420p out.mp4 from google.colab import files files.download('out.mp4')ffmpeg version 3.4.8-0ubuntu0.2 Copyright (c) 2000-2020 the FFmpeg developers built with gcc 7 (Ubuntu 7.5.0-3ubuntu1~18.04) configuration: --prefix=/usr --extra-version=0ubuntu0.2 --toolchain=hardened --libdir=/usr/lib/x86_64-linux-gnu --incdir=/usr/include/x86_64-linux-gnu --enable-gpl --disable-stripping --enable-avresample --enable-avisynth --enable-gnutls --enable-ladspa --enable-libass --enable-libbluray --enable-libbs2b --enable-libcaca --enable-libcdio --enable-libflite --enable-libfontconfig --enable-libfreetype --enable-libfribidi --enable-libgme --enable-libgsm --enable-libmp3lame --enable-libmysofa --enable-libopenjpeg --enable-libopenmpt --enable-libopus --enable-libpulse --enable-librubberband --enable-librsvg --enable-libshine --enable-libsnappy --enable-libsoxr --enable-libspeex --enable-libssh --enable-libtheora --enable-libtwolame --enable-libvorbis --enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx265 --enable-libxml2 --enable-libxvid --enable-lib[...]Ignore#%cd "/content/drive/My Drive/colab-sg2-ada-pytorch/stylegan2-ada-pytorch" !git config --global user.name "test" !git config --global user.email "" !git fetch origin !git stash !git checkout origin/main -- /content/StyleCLIP/optimization/run_optimization.pyUse USPTO-2M datasetdf = pd.read_csv('../USPTO-2M/uspto_2m.tsv', sep='\t') df.head() #only keep digital patent numbers #df['No'] = df['No'].map(lambda x: x[3:]) df = df.dropna().reset_index(drop=True) len(df) df.columns abst_list = list(df['Abstract']) title_list = list(df['Title']) all_contents = abst_list + title_list len(all_contents) all_contents[0:2]Preprocessingimport re import time from nltk.tokenize import word_tokenize from nltk.tokenize import sent_tokenize #remove non-alphabetic characters temp_all_contents = [re.sub('[^a-zA-Z]', ' ', str(item)).lower() for item in all_contents] len(temp_all_contents) #use Spacy for tokenization start_time = time.time() test_sentence = temp_all_contents[0:10000] all_words_list = [] for sent in nlp.pipe(test_sentence, batch_size=50, n_threads=4): tmp_word_list = [token.text for token in sent] all_words_list.append(tmp_word_list) print("--- %s seconds ---" % (time.time() - start_time)) #use NLTK for tokenization - faster start_time = time.time() test_sentence = temp_all_contents[0:100000] all_words = [word_tokenize(sent) for sent in test_sentence] print("--- %s seconds ---" % (time.time() - start_time)) start_time = time.time() all_words_list = [word_tokenize(sent) for sent in temp_all_contents] print("--- %s seconds ---" % (time.time() - start_time))--- 1561.3763830661774 seconds ---Training the model - gensim word2vectrain_set = all_words_list[0:1998373] from gensim.models import Word2Vec import logging import multiprocessing cores = multiprocessing.cpu_count() logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) start_time = time.time() model = Word2Vec(train_set, size = 100, sg = 1, window = 5, min_alpha = 0.001, alpha = 0.05, workers=cores-1, iter = 5 ) print("--- %s seconds ---" % (time.time() - start_time)) #save the model model.wv.save_word2vec_format('uspto_2m_abstract_word2vec.bin', binary=True) model.wv.similarity('computer','program') #analogy model.wv.most_similar(positive=['bottle', 'chip'], negative=['computer'], topn=1)Visualization 1. PCAfrom sklearn.decomposition import PCA import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline len(model.wv.vocab) selected_words = ['physics', 'chemistry', 'vehicle', 'plastics', 'hygiene', 'photography', 'electric'] words = selected_words.copy() similar_words = {} for key in selected_words: similar_words[key] = [item[0] for item in model.wv.most_similar(key, topn=5)] similar_words['physics'] for key,value in similar_words.items(): words = words + value #get vectors for all the words sample_wv = model.wv[words] fig = plt.figure(1, figsize=(20, 15)) pca = PCA(n_components=2) result = pca.fit_transform(sample_wv) plt.scatter(result[:, 0], result[:, 1]) for i, word in enumerate(words): plt.annotate(word, xy=(result[i, 0], result[i, 1]))2. t-SNEfrom sklearn.manifold import TSNE import numpy as np tsne = TSNE(n_components=2, random_state=0, n_iter=10000, perplexity=15) np.set_printoptions(suppress=True) result = tsne.fit_transform(sample_wv) labels = words plt.figure(figsize=(15, 15)) plt.scatter(result[:, 0], result[:, 1], c='orange', edgecolors='r') for label, x, y in zip(labels, result[:, 0], result[:, 1]): plt.annotate(label, xy=(x+1, y+1), xytext=(0, 0), textcoords='offset points')Important functionsdef r_trans(y): y = np.asarray(y) return np.exp(((y - a) * (obs_log_max - obs_log_min) / (b - a)) + obs_log_min) def reverse_transform_std(y): return ((y)*(obs_log_max - obs_log_min) / (b - a)) def recube(in_array): plev_len = 52 lat_len = 36 time_len = 31 * 12 output = np.zeros([time_len, plev_len, lat_len]) for t in range(time_len): output[t,:,:] = in_array[plev_len * lat_len * (t): plev_len * lat_len * (t+1)].reshape([plev_len, lat_len]) return output def interp_to_new_lats(psp, old_lats): new_lats = lat interp_psp = [] for t in range(372): interp_psp.append(np.interp(new_lats, old_lats, psp[t])) return np.array(interp_psp) from matplotlib.image import imread from tempfile import NamedTemporaryFile def get_size(fig, dpi=100): with NamedTemporaryFile(suffix='.png') as f: fig.savefig(f.name, bbox_inches='tight', dpi=dpi) height, width, _channels = imread(f.name).shape return width / dpi, height / dpi def set_size(fig, size, dpi=100, eps=1e-2, give_up=2, min_size_px=10): target_width, target_height = size set_width, set_height = target_width, target_height # reasonable starting point deltas = [] # how far we have while True: fig.set_size_inches([set_width, set_height]) actual_width, actual_height = get_size(fig, dpi=dpi) set_width *= target_width / actual_width set_height *= target_height / actual_height deltas.append(abs(actual_width - target_width) + abs(actual_height - target_height)) if deltas[-1] < eps: return True if len(deltas) > give_up and sorted(deltas[-give_up:]) == deltas[-give_up:]: return False if set_width * dpi < min_size_px or set_height * dpi < min_size_px: return FalseLoad datain_dir = './../BNNOutput/' lat = pkl.load(open(in_dir + 'lats.pkl', 'rb')) plev = np.unique(pkl.load(open(in_dir + 'plevs.pkl', 'rb'))/100)[::-1] date = pkl.load(open(in_dir + 'dates.pkl', 'rb')) num_models = 13 df = pd.read_pickle('./../vmro3_refC1SD_70x36_13mdls_masked_extrap_and_interp.pkl') plev_orig = np.unique(df['plev'])[::-1] df = df[df['plev'] < 50000] df = df[df['plev'] > 30] obs = df['obs_toz'].copy() obs[np.log10(obs) < -9] = np.nan df['obs_toz'] = obs obs = recube(df['obs_toz'].values) train_mask = recube(df['train'].values).astype(np.bool) test_mask = recube(df['test'].values).astype(np.bool) interp_mask = recube(df['temp_interp'].values).astype(np.bool) extrap_mask = recube(df['temp_extrap'].values).astype(np.bool) obs_train = obs.copy() obs_train[~train_mask] = np.nan obs_test = obs.copy() obs_test[~test_mask] = np.nan obs_interp = obs.copy() obs_interp[~interp_mask] = np.nan obs_extrap = obs.copy() obs_extrap[~extrap_mask] = np.nan obs_min = df['obs_toz'].min() obs_max = df['obs_toz'].max() obs_log_max = np.log(obs_max) obs_log_min = np.log(obs_min) a, b = [-1, 1] # ptp = pkl.load(open('mon_trpp_1980-2010.pkl', 'rb')) # psp = pkl.load(open('ave_CCMI_psp.pkl', 'rb')) / 100 # psp is in Pa # ptp = interp_to_new_lats(ptp, np.arange(-90,90.1, 180/72)) # ptp needs regridding # BNN output weights = pkl.load(open(in_dir + 'weights.pkl', 'rb')) bias_raw = pkl.load(open(in_dir + 'bias.pkl', 'rb')) noise_raw = pkl.load(open(in_dir + 'noise.pkl', 'rb')) std_raw = recube(pkl.load(open(in_dir + 'std.pkl', 'rb'))) pred_raw = recube(pkl.load(open(in_dir + 'pred.pkl', 'rb'))) epi_raw = pkl.load(open(in_dir + 'epi.pkl', 'rb')) train_data_count = [] for i in range(372): train_data_count.append(np.sum(df['train'][df['mons']==i + 1])) train_data_count = np.array(train_data_count)/opt/miniconda3/envs/bnn-env/lib/python3.7/site-packages/ipykernel_launcher.py:16: DeprecationWarning: `np.bool` is a deprecated alias for the builtin `bool`. To silence this warning, use `bool` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.bool_` here. Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations app.launch_new_instance() /opt/miniconda3/envs/bnn-env/lib/python3.7/site-packages/ipykernel_launcher.py:17: DeprecationWarning: `np.bool` is a deprecated alias for the builtin `bool`. To silence this warning, use `bool` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.bool_` here. Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations /opt/miniconda3/envs/bnn-env/lib/python3.7/site-packages/ipykernel_l[...]Rescale raw data back to 'real' values# Find the bounds of prediction ±1,2,3 std then convert to real values p1plus = r_trans(pred_raw + std_raw) p2plus = r_trans(pred_raw + 2 * std_raw) p3plus = r_trans(pred_raw + 3 * std_raw) p1minus = r_trans(pred_raw - std_raw) p2minus = r_trans(pred_raw - 2 * std_raw) p3minus = r_trans(pred_raw - 3 * std_raw) pred = r_trans(pred_raw) # This is also done for noise p1plusn = r_trans(pred_raw + noise_raw) p2plusn = r_trans(pred_raw + 2 * noise_raw) p3plusn = r_trans(pred_raw + 3 * noise_raw) p1minusn = r_trans(pred_raw - noise_raw) p2minusn = r_trans(pred_raw - 2 * noise_raw) p3minusn = r_trans(pred_raw - 3 * noise_raw) # and epistemic uncertainty p1pluse = r_trans(pred_raw + epi_raw) p2pluse = r_trans(pred_raw + 2 * epi_raw) p3pluse = r_trans(pred_raw + 3 * epi_raw) p1minuse = r_trans(pred_raw - epi_raw) p2minuse = r_trans(pred_raw - 2 * epi_raw) p3minuse = r_trans(pred_raw - 3 * epi_raw) # These values are estimates of std, noise and epi. # They are only estimates as the distribution is asymmetric std1sigma = (p1plus - p1minus) / 2 std2sigma = (p2plus - p2minus) / 2 std3sigma = (p3plus - p3minus) / 2 noise1sigma = (p1plusn - p1minusn) / 2 noise2sigma = (p2plusn - p2minusn) / 2 noise3sigma = (p3plusn - p3minusn) / 2 # Epistemic uncertainty could also be found by descaling all the # individual predictions and finding the std across them epi1sigma = (p1pluse - p1minuse) / 2 epi2sigma = (p2pluse - p2minuse) / 2 epi3sigma = (p3pluse - p3minuse) / 2 # Custom colours cr2 = '#f37651' cr4 = '#ad1759' cb3 = '#4d8ca6' cr3 = '#e13342' cr6 = '#35193e' with plt.rc_context(dict(sns.axes_style("darkgrid"))): colors = ['#0077BB', '#EE7733', '#009988', '#CC3311', '#EE3377', '#33BBEE', '#CCBB44', '#AA3377' ] fig = plt.figure() plt.subplot(2, 1, 1) plt.plot(date[5::12], np.mean(np.mean(noise1sigma, axis=(1,2)).reshape(31,12), axis=1) / 1e-6, label='1 Std', color=cr6) plt.plot(date[5::12], np.mean(np.mean(noise2sigma, axis=(1,2)).reshape(31,12), axis=1) / 1e-6, label='2 Std', color=cr4) plt.plot(date[5::12], np.mean(np.mean(noise3sigma, axis=(1,2)).reshape(31,12), axis=1) / 1e-6, label='3 Std', color=cr2) plt.title('Annual average aleatoric noise', fontweight='heavy') # plt.ylim([0, 2.5e-2]) # plt.gca().set_xticklabels([]) plt.ylabel('Aleatoric uncertainty (ppm)') plt.xlim([date[0], date[-1]]) plt.legend() plt.subplot(2,1,2) plt.plot([date[0], date[-1]], [1,1], label='Sondes', lw=5, color=colors[0]) plt.annotate('Sondes', [date[48], 1.5], va='center', color=colors[0], size=10) plt.plot([date[0], date[22]], [2,2], label='SAGE I', lw=5, color=colors[1]) plt.annotate('SAGE I', [date[0], 2.5], va='center', color=colors[1], size=10) plt.plot([date[57], date[283]], [3,3], label='SAGE II', lw=5, color=colors[2]) plt.annotate('SAGE II', [date[57], 3.5], va='center', color=colors[2], size=10) plt.plot([date[165], date[202]], [4,4], label='POAM II', lw=5, color=colors[3]) plt.annotate('POAM II', [date[165], 4.5], va='center', color=colors[3], size=10) plt.plot([date[219], date[310]], [5,5], label='POAM III', lw=5, color=colors[4]) plt.annotate('POAM III', [date[219], 5.5], va='center', color=colors[4], size=10) plt.plot([date[201], date[209]], [6,6], label='ILAS I', lw=5, color=colors[5]) plt.annotate('ILAS I', [date[201], 6.5], va='center', color=colors[5], size=10) plt.plot([date[278], date[285]], [7,7], label='ILAS II', lw=5, color=colors[6]) plt.annotate('ILAS II', [date[268], 7.5], va='center', color=colors[6], size=10) plt.plot([date[294], date[-1]], [8,8], label='Aura MLS', lw=5, color=colors[7]) plt.annotate('Aura MLS', [date[294], 8.5], va='center', color=colors[7], size=10) plt.xlim([date[0], date[-1]]) plt.ylim([0.5, 9.2]) plt.yticks([]) plt.subplots_adjust(bottom=0., right=1., top=1., left=0., hspace=0.02) set_size(fig, (5, 3.5)) plt.savefig('./../figures/paper3-ann_ave_ale_noise.pdf', bbox_inches='tight', dpi=200) plt.savefig('./../figures/paper3-ann_ave_ale_noise.png', bbox_inches='tight', dpi=200)Table of performance RMSE - for trainnp.sqrt(np.nanmean((obs.ravel()[df['train'].values] - pred.ravel()[df['train'].values]) ** 2)) np.sqrt(np.nanmean((obs.ravel()[df['test'].values] - pred.ravel()[df['test'].values]) ** 2)) np.sqrt(np.nanmean((obs.ravel()[df['temp_interp'].values] - pred.ravel()[df['temp_interp'].values]) ** 2)) np.sqrt(np.nanmean((obs.ravel()[df['temp_extrap'].values] - pred.ravel()[df['temp_extrap'].values]) ** 2)) np.nanmean((obs.ravel()[df['train'].values] - pred.ravel()[df['train'].values]) / obs.ravel()[df['train'].values]) np.nanmean((obs.ravel()[df['test'].values] - pred.ravel()[df['test'].values]) / obs.ravel()[df['test'].values]) np.nanmean((obs.ravel()[df['temp_extrap'].values] - pred.ravel()[df['temp_extrap'].values]) / obs.ravel()[df['temp_extrap'].values]) np.nanmean((obs.ravel()[df['temp_interp'].values] - pred.ravel()[df['temp_interp'].values]) / obs.ravel()[df['temp_interp'].values]) np.nanmean((obs.ravel()[df['temp_interp'].values] - pred.ravel()[df['temp_interp'].values]))For trainmask_ = df['train'].values print(np.sum(np.logical_and(p1plus.ravel()[mask_] > obs.ravel()[mask_], p1minus.ravel()[mask_] < obs.ravel()[mask_])) / np.sum(mask_)) print(np.sum(np.logical_and(p2plus.ravel()[mask_] > obs.ravel()[mask_], p2minus.ravel()[mask_] < obs.ravel()[mask_])) / np.sum(mask_)) print(np.sum(np.logical_and(p3plus.ravel()[mask_] > obs.ravel()[mask_], p3minus.ravel()[mask_] < obs.ravel()[mask_])) / np.sum(mask_))0.8027539273327013 0.9777784474775757 0.9960932149060563For testingmask_ = df['test'].values print(np.sum(np.logical_and(p1plus.ravel()[mask_] > obs.ravel()[mask_], p1minus.ravel()[mask_] < obs.ravel()[mask_])) / np.sum(mask_)) print(np.sum(np.logical_and(p2plus.ravel()[mask_] > obs.ravel()[mask_], p2minus.ravel()[mask_] < obs.ravel()[mask_])) / np.sum(mask_)) print(np.sum(np.logical_and(p3plus.ravel()[mask_] > obs.ravel()[mask_], p3minus.ravel()[mask_] < obs.ravel()[mask_])) / np.sum(mask_))0.7916104946593384 0.9742818624909261 0.9947630405475475For extrapmask_ = df['temp_extrap'].values print(np.sum(np.logical_and(p1plus.ravel()[mask_] > obs.ravel()[mask_], p1minus.ravel()[mask_] < obs.ravel()[mask_])) / np.sum(mask_)) print(np.sum(np.logical_and(p2plus.ravel()[mask_] > obs.ravel()[mask_], p2minus.ravel()[mask_] < obs.ravel()[mask_])) / np.sum(mask_)) print(np.sum(np.logical_and(p3plus.ravel()[mask_] > obs.ravel()[mask_], p3minus.ravel()[mask_] < obs.ravel()[mask_])) / np.sum(mask_))0.7760770975056689 0.9740896358543417 0.9952314259036948For interpmask_ = df['temp_interp'].values print(np.sum(np.logical_and(p1plus.ravel()[mask_] > obs.ravel()[mask_], p1minus.ravel()[mask_] < obs.ravel()[mask_])) / np.sum(mask_)) print(np.sum(np.logical_and(p2plus.ravel()[mask_] > obs.ravel()[mask_], p2minus.ravel()[mask_] < obs.ravel()[mask_])) / np.sum(mask_)) print(np.sum(np.logical_and(p3plus.ravel()[mask_] > obs.ravel()[mask_], p3minus.ravel()[mask_] < obs.ravel()[mask_])) / np.sum(mask_)) with plt.rc_context(dict(sns.axes_style("darkgrid"))): plt.figure(figsize=(6,3.75)) ax = plt.gca() ax2 = ax.twinx() ax2.grid(False) ax.plot(date, train_data_count/(36 * 60), color=cb3, zorder=10) ax2.plot(date, np.mean(epi2sigma, axis=(1,2)), color=cr3) ax.tick_params(axis='y', labelcolor=cb3) ax2.tick_params(axis='y', labelcolor=cr3) ax.set_ylabel('Fraction of training \n data coverage', color=cb3) ax2.set_ylabel('Epistemic uncertainty at 2sigma (mole fraction)', color=cr3) ax.set_ylim([0, 0.8]) ax2.set_ylim([0, 4e-7]) plt.xlim([date[0], date[-1]]) plt.show()Explore > Functions to process your data%load_ext autoreload %autoreload 2 #hide from nbdev.showdoc import * #export #all_slow import wandb import librosa import torchaudio import numpy as np from fastcore.basics import * from datasets import Dataset, load_datasetWeights and Biases for EDA, Modeling Tracking and More[General Hugging Face with Weights and Biases](https://docs.wandb.ai/integrations/huggingface?utm_source=github&utm_medium=github&utm_campaign=xlsr)[Artifacts docs](https://docs.wandb.ai/artifacts?utm_source=github&utm_medium=github&utm_campaign=xlsr)[Datasets and Predictions docs](https://docs.wandb.ai/datasets-and-predictions?utm_source=github&utm_medium=github&utm_campaign=xlsr) Explore Your Dataset with Weights & Biasesclass WandbDataExplorer(): ''' Pass a Hugging Face Dataset and log it to a Weigths and Biases table Expects that your dataset contains a "path" column with file paths to audio files. n_samples: If "n_samples" is less than the length of ds, a random n_samples number of samples will be logged cols_to_log: If not set, the table will contain the following columns: [audio, duration, ...] ''' def __init__(self, ds:Dataset=None, n_samples:int=100, cols_to_log:list=None,resample:bool=True, new_sr:int=16_000, artifact_type:str='audio_dataset', artifact_name:str = 'my_artifact', table_name:str='explore_samples', wandb_project = 'xlsr', cols_to_exclude:list=None, verbose:bool=True): store_attr() self.ds_len = len(self.ds) if self.n_samples < self.ds_len: self.idxs = np.random.randint(0, n_samples, n_samples) else: self.idxs = list(range(self.ds_len)) def _get_audio(self, path): speech_array, sr = torchaudio.load(path) sa = speech_array[0].numpy() if self.resample: sa = librosa.resample(np.asarray(sa), sr, self.new_sr) sr = self.new_sr return sa,sr def _make_row(self, ndx:int): '''Logs all data for that row and adds and audio and duration column''' row = [] path = self.ds["path"][ndx] fn = path.split('/')[-1] # Grab each item of interest to log sa,sr = self._get_audio(path) # Create a Wandb Audio object to log the speech array too raw_audio = wandb.Audio(data_or_path=sa, sample_rate=sr, caption=fn) # Grab the duration of the track (in seconds) duration = librosa.get_duration(y=sa, sr=sr) row.append(raw_audio) row.append(duration) for col in self.cols_to_log: row.append(self.ds[col][ndx]) return row def _create_wandb_data(self): if self.cols_to_log is None: self.cols_to_log = [col for col in self.ds.column_names if col not in self.cols_to_exclude] # Set the 3rd column to be the text column if there is one for i,col in enumerate(self.cols_to_log): if ('text' in col) or ('sentence' in col): self.cols_to_log.insert(0, self.cols_to_log.pop(i)) for i,col in enumerate(self.cols_to_log): if 'path' in col: self.cols_to_log.insert(len(self.cols_to_log)-1, self.cols_to_log.pop(i)) # Log to table data list, row by row table_data = [] for ndx in self.idxs: table_data.append(self._make_row(ndx=ndx)) # Create wandb table object add all data to it self.table_cols = ['audio', 'duration'] + self.cols_to_log self.wandb_table = wandb.Table(data=table_data, columns=self.table_cols) def _log_table_to_wandb(self): # `type` can be set to whatever makes sense for you self.audio_ds_artifact = wandb.Artifact(name=self.artifact_name, type=self.artifact_type) # Add the table to the artifact self.audio_ds_artifact.add(self.wandb_table, self.table_name) # Save the artifact to self.audio_ds_artifact.save(project=self.wandb_project) def log(self): self._create_wandb_data() self._log_table_to_wandb()Run like so:```explore = WandbDataExplorer(ds=test_ds, n_samples=100, artifact_name = 'my_new_artifact', artifact_type='audio_dataset', table_name='explore_samples', wandb_project = 'xlsr', cols_to_exclude=['client_id','segment'])explore.log()``` Tracking# export def setup_wandb(entity='wandb', project_name='xlsr', log_model=True): import os import wandb # Set W&B user name os.environ["WANDB_ENTITY"] = entity # Set W&B project name. xlsr is a public W&B project os.environ["WANDB_PROJECT"] = project_name # Log your trained model to W&B as an Artifact if log_model: os.environ["WANDB_LOG_MODEL"] = 'true' wandb.login() return entity, project_name #setup_wandb(entity='wandb', project_name='xlsr', log_model=True) ## hide from nbdev.export import notebook2script; notebook2script()Converted 01_data.ipynb. Converted 02_aug.ipynb. Converted 03_training.ipynb. Converted 04_evaluation.ipynb. Converted 05_wandb_utils.ipynb. Converted index.ipynb.GBimport pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline df = pd.read_csv(r'C:\Users\rodri\GitHub\My_Projects\1 Aulas Data Science\4 - Machine Learning\Regressao Linear/autompg-dataset.zip') df.head() df = df[df.horsepower != '?'] df.drop('car name', axis=1, inplace=True) from sklearn.model_selection import train_test_split X = df.drop(['mpg'], axis=1) y = df['mpg'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42) # Import RandomForestRegressor from sklearn.ensemble import GradientBoostingRegressor from sklearn.ensemble import GradientBoostingClassifier from sklearn import metrics # Instantiate gb gb = GradientBoostingRegressor(learning_rate=0.01, n_estimators=500, random_state=42) # Instantiate sgbr sgbr = GradientBoostingRegressor(max_depth=4, subsample=0.9, max_features=0.75, learning_rate=0.01, n_estimators=500, random_state=42) # Fit gb to the training set gb.fit(X_train, y_train) sgbr.fit(X_train, y_train) y_pred = gb.predict(X_test) print('MAE:', metrics.mean_absolute_error(y_test, y_pred)) print('MSE:', metrics.mean_squared_error(y_test, y_pred)) print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred))) y_pred = sgbr.predict(X_test) print('MAE:', metrics.mean_absolute_error(y_test, y_pred)) print('MSE:', metrics.mean_squared_error(y_test, y_pred)) print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred))) sgbr.feature_importances_ # Create a pd.Series of features importances importances = pd.Series(data=sgbr.feature_importances_, index= X_train.columns) # Sort importances importances_sorted = importances.sort_values() # Draw a horizontal barplot of importances_sorted importances_sorted.plot(kind='barh', color='lightgreen') plt.title('Features Importances') plt.show() df = pd.read_csv(r'C:\Users\rodri\GitHub\My_Projects\1 Aulas Data Science\Data Sets/breast-cancer-wisconsin-data.zip') df.head() d={'M':1,'B':0} df['diagnosis'] = df['diagnosis'].map(d) df['diagnosis'].value_counts() from sklearn.model_selection import train_test_split features = df.drop(['id', 'diagnosis', 'Unnamed: 32'], axis=1) targets = df['diagnosis'] X_train, X_test, y_train, y_test = train_test_split(features, targets, test_size=0.30, stratify=targets,random_state=1) # Instantiate gb gb = GradientBoostingClassifier(learning_rate=0.01, n_estimators=350, random_state=42) # Instantiate sgbr sgbr = GradientBoostingClassifier(subsample=0.8, max_features=0.70, learning_rate=0.001, n_estimators=750, random_state=42) # Fit gb to the training set gb.fit(X_train, y_train) sgbr.fit(X_train, y_train) # Import accuracy_score from sklearn.metrics import accuracy_score # Predict test set labels y_pred = gb.predict(X_test) # Compute test set accuracy acc = accuracy_score(y_test, y_pred) print("Test set accuracy: {:.2f} %".format(acc*100)) # Predict test set labels y_pred = sgbr.predict(X_test) # Compute test set accuracy acc = accuracy_score(y_test, y_pred) print("Test set accuracy: {:.2f} %".format(acc*100)) y_pred_proba = gb.predict_proba(X_test) y_pred_proba y_predSmall Assignment 2 Due September 24th by 11:59pm PT. Download your notebook with your code and responses as a PDF via HTML and submit to Gradescope.The following model is a simplified version of the multiple regression model used by Biddle andHammermesh (1990) to study the tradeoff between time spent sleeping and working and tolook at other factors affecting sleep$\textrm{sleep}=\beta_0+\beta_1\textrm{totwrk}+\beta_2\textrm{yrsmarr}+\beta_3\textrm{yngkid}+u$where sleep and total work are measured in minutes per week, yrsmarr is the number of years married, and yngkid is a dummy variable (taking a value of 0 or 1) for whether the person has a young child. 1. If adults trade off sleep for work, what is the sign of $\beta_1$?(Note: typing the words "beta 1" is fine. If you want to have it as math notation, type: $ \beta_1 $ . You can double click this cell and copy the code.) Double click to type your answer here. 2. What signs to you think $\beta_2$ and $\beta_3$ will have? Double click to type your answer here. 3. Load in the dataset: sleep75.dta and estimate the above model using `lm()`. Display the results with the `summary()` command. Hint: Remember that this is a .dta file, meaning that the `haven` package and `read_dta()` commands will be useful. You may also look at the Jupyter Notebook for section 3, which uses these data, for some example code.##Code your answer here!Dessiner des spirales**Objectif :** approfondir le concept de fonction et de boucles Définition d’une spiralesUne spirale est un objet formé de segments dont la longueur augmente à chaque itération Exercice 1Ecrivez un programme qui dessine une spirale qui commence avec une longueur de segment de 2 et qui augmente chaque fois de 1. L’angle choisi est de 30 Exercice 2Modifiez votre programme en écrivant une fonction spirale() qui reprend chaque étape du programme principal Exercice 3Réécrivez votre fonction spirale() avec une boucle jusqu’à 20 Exercice 4Ecrivez une fonction spirale(ns,angle) avec deux paramètres : le nombre de segments et l’angle. Exercice 5 (a step forward)Est-il possible d’améliorer la courbure de la spirale et donc d’affiner le segment en arc de cercle ?**Indications :**Il s’agit de réfléchir au dessin de l’arc de spirale (avec une boucle)La fonction origine() suivante permet de faire revenir la tortue à l’origine :def origine(): t.penup() t.goto(0,0) t.setheading(0) t.pendown()Overview This is a simple demo of how to use Mesh plugin for TensorBoard. The demo will load static triangulated mesh (in PLY format), create a mesh summary with it and then display in TensorBoard. Setup Importsfrom __future__ import absolute_import from __future__ import division from __future__ import print_function # Uninstall tensorboard and tensorflow !pip uninstall -q -y tensorboard !pip uninstall -q -y tensorflow # Install nightly TensorFlow with nightly TensorBoard. !pip install tf-nightly # Install trimesh lib to read .PLY files. !pip freeze | grep -qF 'trimesh==' || pip install trimesh %load_ext tensorboard import os import numpy as np import tensorflow as tf import trimesh import tensorboard from tensorboard.plugins.mesh import summary as mesh_summary sample_mesh = 'https://storage.googleapis.com/tensorflow-graphics/tensorboard/test_data/ShortDance07_a175_00001.ply' log_dir = '/tmp/mesh_demo' batch_size = 1 !rm -rf /tmp/mesh_demoRead sample .PLY files# Camera and scene configuration. config_dict = { 'camera': {'cls': 'PerspectiveCamera', 'fov': 75}, 'lights': [ { 'cls': 'AmbientLight', 'color': '#ffffff', 'intensity': 0.75, }, { 'cls': 'DirectionalLight', 'color': '#ffffff', 'intensity': 0.75, 'position': [0, -1, 2], }], 'material': { 'cls': 'MeshStandardMaterial', 'roughness': 1, 'metalness': 0 } } # Read all sample PLY files. mesh = trimesh.load_remote(sample_mesh) vertices = np.array(mesh.vertices) # Currently only supports RGB colors. colors = np.array(mesh.visual.vertex_colors[:, :3]) faces = np.array(mesh.faces) # Add batch dimension, so our data will be of shape BxNxC. vertices = np.expand_dims(vertices, 0) colors = np.expand_dims(colors, 0) faces = np.expand_dims(faces, 0)Create summaries and session# Create data placeholders of the same shape as data itself. vertices_tensor = tf.placeholder(tf.float32, vertices.shape) faces_tensor = tf.placeholder(tf.int32, faces.shape) colors_tensor = tf.placeholder(tf.int32, colors.shape) meshes_summary = mesh_summary.op( 'mesh_color_tensor', vertices=vertices_tensor, faces=faces_tensor, colors=colors_tensor, config_dict=config_dict) # Create summary writer and session. writer = tf.summary.FileWriter(log_dir) sess = tf.Session()WARNING: Logging before flag parsing goes to stderr. W0509 14:00:39.764862 140689019053952 deprecation_wrapper.py:119] From /usr/local/lib/python3.6/dist-packages/tensorboard/plugins/mesh/summary.py:59: The name tf.summary.tensor_summary is deprecated. Please use tf.compat.v1.summary.tensor_summary instead. W0509 14:00:39.776251 140689019053952 deprecation_wrapper.py:119] From /usr/local/lib/python3.6/dist-packages/tensorboard/plugins/mesh/summary.py:125: The name tf.summary.merge is deprecated. Please use tf.compat.v1.summary.merge instead.Run the model, save summaries to disksummaries = sess.run([meshes_summary], feed_dict={ vertices_tensor: vertices, faces_tensor: faces, colors_tensor: colors, }) # Save summaries. for summary in summaries: writer.add_summary(summary)TensorBoard%tensorboard --logdir=/tmp/mesh_demoCase8-challenge00_raeff_pix2wcs Modified version from Case7-0challengef by . In this note, we estimate the field parameters and distortion parameters from the observed positions on the focal plane in the overlapped plates. We also use reference stars (Gaia stars) whose sky coordinates are known in a certain accuracy. The SIP-convention distortion is considered in this note. Preparation First, we load the data from https://github.com/xr0038/jasmine_warpfield/tree/master/challenge/case8.import astropy.io.ascii as asc import astropy.units as u objects = asc.read('/Users/dkawata/work/obs/projs/JASMINE-Mission/analysis-testing-e2e/jasmine_warpfield/challenge/case8/case8_challenge_00.txt') #consists of x (um), y (um), catalog_id, ra (deg), dec (deg), and field. pointings = asc.read('/Users/dkawata/work/obs/projs/JASMINE-Mission/analysis-testing-e2e/jasmine_warpfield/challenge/case8/case8_challenge_00_pointing.txt') # consists of field, ra (deg), dec (deg), and pa (deg).We can convert the units of x and y from um to pix with assuming the pixel size to be 15 um. However, we will use um unit for the detector coordinates. The input data are created with Sip definition of crpix=[0,0] and origin=1, which map the origin to [0 um, 0 um].pix_size = 15.*u.um # objects['x'] = (objects['x']/pix_size).si # objects['y'] = (objects['y']/pix_size).si # objects: x (px), y (px), catalog_id, ra (deg), dec (deg), and field. # pointings: field, ra (deg), dec (deg), and pa (deg).Then, we change the ids for easy handling.from astropy.table import unique import numpy as np ids = unique(objects, keys='catalog_id')['catalog_id'] objects.add_column(-1, name='id') for i in range(0, np.size(ids)): pos = np.where(objects['catalog_id']==ids[i]) objects['id'][pos] = i objects.remove_column('catalog_id') objects.rename_column('id', 'catalog_id')Here, we make some arrays for further analysis. One new array is true_radec which stores true ra/dec values. Duplicated information (rows for the same object) is removed, and the rows are sorted with object ids. Another new array is observed_xy. It contains field ids, observed x/y positions on the focal plane, catalog ids. We rename ra, dec to ra_est, dec_est to store the estimated sky positions.true_radec = objects['catalog_id', 'ra', 'dec'].copy() true_radec.sort('catalog_id') true_radec = unique(true_radec, keys='catalog_id') # consists of catalog_id, ra (deg), and dec (deg). observed_xy = objects['field', 'x', 'y', 'catalog_id', 'ra', 'dec'].copy() # observed_xy.rename_column('ra', 'ra_est') # observed_xy.rename_column('dec', 'dec_est') observed_xy.add_column(observed_xy['ra'], name='ra_est') observed_xy.add_column(observed_xy['dec'],name='dec_est') # observed_xy will have field, x (px), y (px), catalog_id, and estimated ra/dec (deg). # initializing ra_est and dec_est observed_xy['ra_est'] = 0.0 observed_xy['dec_est'] = 0.0In addition, we make another array which stores field parameters, ra and dec (deg) of the origin of the pointing and position angle, pa (deg). The plate scale, um pixel scale to deg in the sky, is assumed to be the same value for all plates. At this time, an approximated (initial guess) value is stored in a variable (plate_scale).field_params = pointings.copy() # field, ra (deg), dec (deg), and pa (deg). true_field_params = field_params.copy() # field_params['pa'] -= 240.0 # offset? # plate_scale = 8.e-6*u.deg*(pix_size/u.um).si # in deg/pix plate_scale = 8.e-6*u.deg/u.um print(plate_scale)8e-06 deg / umLet's check the object distribution on sky.import matplotlib.pylab as plt import numpy as np color = ['red', 'blue', 'green', 'orange'] for i in range(0, np.max(field_params['field'])+1): pos = np.where(objects['field']==i) plt.scatter(objects['ra'][pos], objects['dec'][pos], marker='o', facecolor='None', edgecolor=color[i], s=10*i+10) plt.xlabel('ra (deg)') plt.ylabel('dec (deg)')We can see that the data consists of four image plates (different colours indicating the objects observd by the different plantes) and that the overlapped region has a size of about a 1/4 FoV. We select the objects in the overlapped region for further analysis. Here, we select the regions all 4 plates overlaps, but we can use the region overlapped with at least 2 plates.true_radec_overlap = true_radec.copy() observed_xy_overlap = observed_xy.copy() for cid in true_radec['catalog_id']: if np.count_nonzero(observed_xy['catalog_id']==cid)!=4: # if np.count_nonzero(observed_xy['catalog_id']==cid)<=1: pos = np.where(true_radec_overlap['catalog_id']==cid)[0] true_radec_overlap.remove_rows(pos) pos = np.where(observed_xy_overlap['catalog_id']==cid)[0] observed_xy_overlap.remove_rows(pos) print(' The number of overlapped unique stars =', len(true_radec_overlap)) print(' The total number of observations of these overlapped stars =', len(observed_xy_overlap))The number of overlapped unique stars = 222 The total number of observations of these overlapped stars = 888Let's check the distribution of the selected unique objects.plt.scatter(true_radec_overlap['ra'], true_radec_overlap['dec'], marker='o', facecolor='None', edgecolor='orange') plt.xlabel('ra (deg)') plt.ylabel('dec (deg)') print(len(true_radec_overlap['ra']))222These objects will be used for the following analysis. We again modify the catalog id for easy handling.ids = unique(true_radec_overlap, keys='catalog_id')['catalog_id'] true_radec_overlap.add_column(-1, name='id') observed_xy_overlap.add_column(-1, name='id') for i in range(0, np.size(ids)): pos = np.where(true_radec_overlap['catalog_id']==ids[i]) true_radec_overlap['id'][pos] = i pos = np.where(observed_xy_overlap['catalog_id']==ids[i]) observed_xy_overlap['id'][pos] = i true_radec_overlap.remove_column('catalog_id') true_radec_overlap.rename_column('id', 'catalog_id') observed_xy_overlap.remove_column('catalog_id') observed_xy_overlap.rename_column('id', 'catalog_id')First guess of the positions At first, we define a wcs constructor, including SIP polynomial distortion convention, https://irsa.ipac.caltech.edu/data/SPITZER/docs/files/spitzer/shupeADASS.pdf and https://docs.astropy.org/en/stable/api/astropy.wcs.Sip.html.from astropy.wcs import WCS from astropy.wcs import Sip import astropy.units as u def wcs(ra_ptg, dec_ptg, pa_ptg, scale, a=None, b=None, ap=None, bp=None): w = WCS(naxis=2) w.wcs.crpix=[0,0] w.wcs.cdelt=np.array([-scale, scale]) w.wcs.crval=[ra_ptg, dec_ptg] w.wcs.ctype=["RA---TAN-SIP", "DEC--TAN-SIP"] w.wcs.pc=[[ np.cos(pa_ptg*u.deg), -np.sin(pa_ptg*u.deg)], [np.sin(pa_ptg*u.deg), np.cos(pa_ptg*u.deg)]] # if a is not None and b is not None: w.sip = Sip(a, b, ap, bp, [0, 0]) return wThen, we estimate the sky coordinates from the observed focal-plane positions and (approximated) field parameters. Here, we do not add the distorption, but naively convert pixel coordinate (x, y) to sky coordinate, ($\alpha$, $\delta$) (ra_est, dec_est).for i in range(0, np.size(field_params)): fp = field_params[i] w = wcs(fp['ra'], fp['dec'], fp['pa'], plate_scale.value) pos = np.where(observed_xy_overlap['field']==fp['field']) ret = w.all_pix2world(np.concatenate(([observed_xy_overlap[pos]['x']], [observed_xy_overlap[pos]['y']])).T, 0) observed_xy_overlap['ra_est'][pos] = ret[:, 0] observed_xy_overlap['dec_est'][pos] = ret[:, 1]Let's check the true positions and estimated positions.plt.scatter(true_radec_overlap['ra'], true_radec_overlap['dec'], marker='x', label='True') plt.scatter(observed_xy_overlap['ra_est'], observed_xy_overlap['dec_est'], marker='+', label='Estimated') print(' number of stars used =', len(observed_xy_overlap['ra_est'])) plt.xlabel('ra (deg)') plt.ylabel('dec (deg)') plt.legend()number of stars used = 888Test for distortion with A/B, using the following c and d.c = np.zeros(shape=(3, 3)) d = np.zeros(shape=(3, 3)) c[0,2]=-2.34153374723336e-09 c[1,1]=1.5792128155073916e-08 c[1,2]=7.674347291529089e-15 c[2,0]=-4.694743859349522e-09 c[2,1]=5.4256004358596465e-15 c[2,2]=-4.6341769281246224e-21 d[0,2]=-1.913280244657798e-08 d[1,1]=-5.622875292409728e-09 d[1,2]=-1.0128311203344238e-14 d[2,0]=3.1424733259527392e-09 d[2,1]=-9.08024075521211e-15 d[2,2]=-1.4123037013352912e-20We check if all_pix2world takes into account SIP parameters of A and B by comparing ($\alpha$, $\delta$) converted from (x, y) pixel coordinate without distortion (above observed_xy_overlap['ra_est'] and observed_xy_overlap['dec_est']) and ($\alpha$, $\delta$) converted from (x, y) pixel coordinate with A and B, ra_dist, dec_dist below.# print(observed_xy_overlap['ra_est']) c *= 100.0 ra_dist = np.zeros_like(observed_xy_overlap['ra_est']) dec_dist = np.zeros_like(observed_xy_overlap['dec_est']) for i in range(0, np.size(field_params)): fp = field_params[i] w = wcs(fp['ra'], fp['dec'], fp['pa'], plate_scale.value, a=c, b=d) pos = np.where(observed_xy_overlap['field']==fp['field']) ret = w.all_pix2world(np.concatenate(([observed_xy_overlap[pos]['x']], [observed_xy_overlap[pos]['y']])).T, 0) ra_dist[pos] = ret[:,0] dec_dist[pos] = ret[:,1] print(' diff ra=', ra_dist-observed_xy_overlap['ra_est']) print(' diff dec=', dec_dist-observed_xy_overlap['dec_est']) plt.scatter(ra_dist, dec_dist, marker='x', label='Distorted') plt.scatter(observed_xy_overlap['ra_est'], observed_xy_overlap['dec_est'], marker='+', label='No distortion') print(' number of stars used =', len(observed_xy_overlap['ra_est'])) plt.xlabel('ra (deg)') plt.ylabel('dec (deg)') plt.legend()diff ra= ra_est deg ----------------------- 1.2170394995791867e-05 5.816765053623385e-06 -9.162415494756715e-06 -1.7497795170129393e-05 -1.4185463442117907e-05 -1.119997426712871e-05 6.512692038995738e-06 5.819727562084154e-07 1.0868943661535013e-05 8.796283736955957e-07 ... -2.5157424943245132e-06 -8.514140176885121e-06 2.359488917136332e-06 4.017218486751517e-06 7.2278966172234504e-06 3.721560091207721e-05 1.5106658224794955e-05 3.1841070267546456e-05 4.521866071627301e-05 9.789179591734865e-05 Length = 888 rows diff dec= dec_est deg ----------------------- 1.631389871903366e-05 7.99703507325944e-06 -1.3330118619592213e-05 -2.6079412609902874e-05 -2.134303151279937e-05 -1.6830686099922332e-05 7.842507688593514e-06 8.140157774505496e-07 1.477228683555154e-05 1.222328322114663e-06 ... -3.754488030693892e-06 -1.2820045057537754e-05 [...]Check if these stars cover the large enough detector region by looking at their (x, y) position in the detector coordinate.plt.scatter(objects['x'], objects['y'], marker='x', label='All', s=5) print(' number of all stars=', len(objects['x'])) plt.scatter(observed_xy_overlap['x'], observed_xy_overlap['y'], marker='+', label='Overlap') plt.xlabel('x (pix)') plt.ylabel('y (pix)')number of all stars= 2563Here, there are four estimated (ignoring distortion) positions, (observed_xy_overlap['ra_est'], observed_xy_overlap['dec_est']), in the sky coordinate for each unique object. We take their mean values as the first-guess positions and store them in radec_est array.from astropy.table import Table radec_est = Table(names=['catalog_id', 'ra_est', 'dec_est'], \ dtype=['int64', 'float64', 'float64']) # units=[None, u.deg, u.deg], \ # dtype=['int64', 'float64', 'float64']) radec_est['ra_est'].unit = u.deg radec_est['dec_est'].unit = u.deg cat_ids = unique(observed_xy_overlap, 'catalog_id')['catalog_id'] for i in cat_ids: pos = np.where(observed_xy_overlap['catalog_id'] == i) ra = np.mean(observed_xy_overlap[pos]['ra_est'])*u.deg dec = np.mean(observed_xy_overlap[pos]['dec_est'])*u.deg radec_est.add_row([i, ra, dec]) # print('radec_est=', radec_est)Let's check the estimated positions.plt.scatter(true_radec_overlap['ra'], true_radec_overlap['dec'], marker='x', label='True') plt.scatter(radec_est['ra_est'], radec_est['dec_est'], marker='+', label='First guess') plt.xlabel('ra (deg)') plt.ylabel('dec (deg)') plt.legend()Parameter adjustment At first, we define a function which calculates x/y positions from the ra/dec values estimated above and the field/catalog ids.def xy_calculator(observed_xy, field_params, plate_scale, ap, bp, radec_info): # observed_xy: consists of field, x (px), y (px), catalog_id, ra_est (deg), and dec_est(deg). # field_params: consists of field, ra (deg), dec (deg), and pa (deg). # radec_info: consists of catalog_id, ra_est (deg), and dec_est (deg). observed_xy_cp = observed_xy.copy() observed_xy_cp.rename_column('x', 'x_est') observed_xy_cp.rename_column('y', 'y_est') observed_xy_cp['x_est'] = None observed_xy_cp['y_est'] = None observed_xy_cp['ra_est'] = None observed_xy_cp['dec_est'] = None for i in range(0, np.size(radec_info)): pos = np.where(observed_xy_cp['catalog_id']==radec_info[i]['catalog_id']) observed_xy_cp['ra_est'][pos] = radec_info[i]['ra_est'] observed_xy_cp['dec_est'][pos] = radec_info[i]['dec_est'] for i in range(0, np.size(field_params)): fp = field_params[i] w = wcs(fp['ra'], fp['dec'], fp['pa'], plate_scale, ap=ap, bp=bp) pos = np.where(observed_xy_cp['field']==fp['field']) radec0 = np.concatenate(([observed_xy_cp[pos]['ra_est']], [observed_xy_cp[pos]['dec_est']])).T ret = w.sip_foc2pix(w.wcs_world2pix(radec0, 1)-w.wcs.crpix, 1) observed_xy_cp['x_est'][pos] = ret[:, 0] observed_xy_cp['y_est'][pos] = ret[:, 1] return observed_xy_cp['x_est', 'y_est']Next, we define a function to map from (x, y) pixel coordinate to ($\alpha$, $\delta$), using A/B Sip distortion parameters using wcs.all_pix2world, https://docs.astropy.org/en/stable/api/astropy.wcs.WCS.htmlastropy.wcs.WCS.all_pix2world with input field parameters of $\alpha_{\rm ptgs}$ (deg), $\delta_{\rm ptgs}$ (deg) and pa$_{\rm ptgs}$ (deg) of each field (plate) pointing. This conversion is described as follows. Here, we follow the description at https://www.stsci.edu/itt/review/DrizzlePac/HTML/ch33.html DefinitionCRVAL1: $\alpha_{\rm ptgs}$ right assension at the pointing centre.CRVAL2: $\delta_{\rm ptgs}$ declination at the pointing centre.CRPIX1: the x reference location of the image plate, corresponding to the pointing centre. We set CRPIX1=0.CRPIX2: the y reference location of the image plate, corresponding to the pointing centre. We set CRPIX2=0.CDELT1: x-scale. We set -scale.CDELT2: y-scale. We set scale. wcs compute the sky coordidate, ($\alpha$, $\delta$) of star at (x, y) on the detector as follows (This needs to be confirmed...).According to https://irsa.ipac.caltech.edu/data/SPITZER/docs/files/spitzer/shupeADASS.pdf , intermediate world coordinate, $(\alpha', \delta')$ are computed with$ \begin{pmatrix}\alpha' \\\delta' \\\end{pmatrix}=\begin{pmatrix}CD_{11} & CD_{12} \\CD_{21} & CD_{22} \\\end{pmatrix}\begin{pmatrix}x+f(x,y) \\y+g(x,y) \\\end{pmatrix},$where $$f(x,y) = \sum_{p,q} A_{pq} x^p, y^q \ \ \ \ p+q\leq {\rm A_{ORDER}}, \\g(x,y) = \sum_{p,q} B_{pq} x^p, y^q \ \ \ \ p+q\leq {\rm B_{ORDER}}. \\$$${A_{ORDER}}$ and ${B_{ORDER}}$ is the order of polynomial. For example, with ${A_{ORDER}}=3$, $f(x,y)$ can be described as,$f(x,y) = A_{20} x^2 + A_{02} y^2 + A_{11} x y + A_{21} x^2 y + A_{12} x y^2 + A_{30} x^3 + A_{03} y^3.$Note that $p,q\leq1$ components, i.e. $A_{00}=A_{01}=A_{10}$, are ignored here. **Therefore, we set $A_{00}=A_{01}=A_{10}$.** From the intermediate world coordinate $(\alpha', \delta')$ to the world coordinate $(\alpha, \delta)$ is taken care by WCS ([Greisen & Calabretta 2002, A&A, 395, 1061](https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G/abstract); [Clabretta & Greisen 2002, A&A, 395, 1077](https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1077C/abstract)). CD matrix above is considered to be described (**need to check** ).$\begin{pmatrix}CD_{11} & CD_{12} \\CD_{21} & CD_{22} \\\end{pmatrix}=\begin{pmatrix}{\rm CDELT1} \cos({\rm pa_{ptgs}}) & -{\rm CDELT2} \sin({\rm pa_{ptgs}}) \\{\rm CDELT1} \sin({\rm pa_{ptgs}}) & {\rm CDELT2} \cos({\rm pa_{ptgs}}), \\\end{pmatrix}$according to equation (189) in [Clabretta & Greisen 2002, A&A, 395, 1077](https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1077C/abstract). Note that here, we do not have a term to describe a linear skewness.def radec_calculator_ab(observed_xy, field_params, plate_scale, a, b): # observed_xy: consists of field, x (px), y (px), catalog_id, ra_est (deg), and dec_est(deg). # field_params: consists of field, ra (deg), dec (deg), and pa (deg). observed_xy_cp = observed_xy.copy() # observed_xy_cp.rename_column('x', 'x_est') # observed_xy_cp.rename_column('y', 'y_est') # observed_xy_cp['x_est'] = None # observed_xy_cp['y_est'] = None observed_xy_cp['ra_est'] = None observed_xy_cp['dec_est'] = None for i in range(0, np.size(field_params)): fp = field_params[i] w = wcs(fp['ra'], fp['dec'], fp['pa'], plate_scale, a=a, b=b) pos = np.where(observed_xy_cp['field']==fp['field']) pix0 = np.concatenate(([observed_xy_cp[pos]['x']], [observed_xy_cp[pos]['y']])).T ret = w.all_pix2world(pix0, 1) # ret = w.sip_pix2foc(w.wcs_pix2world(pix0, 1)-w.wcs.crval, 1) observed_xy_cp['ra_est'][pos] = ret[:, 0] observed_xy_cp['dec_est'][pos] = ret[:, 1] return observed_xy_cp['ra_est', 'dec_est']Using scipy.optimize least_squares, assuming the pointing sky coordinate, RA, DEC are accurately known. Define model function to solve with Least Squares. Here, we consider that the position of the pointings, $(\alpha_{\rm ptgs}, \delta_{\rm ptgs})$ are accurately known, and they are fixed. Following equation (1) of [Bernstein et al. (2017)](https://ui.adsabs.harvard.edu/abs/2017PASP..129g4503B/abstract), we minimise the following $\chi^2$ to fit the parameters of position angle of ${\rm pa_{ptgs,p}}$ for each image plate, p, and the common "scale" parameter to convert pixel unit of um to world coordinate unit of deg, in addition to the components of ${\rm A_p}$, ${\rm B_p}$ distortion matrices. $\chi^2 = \sum_i (w_i | \alpha_{i,s} - \bar{\alpha_s}|)^2,$where $\alpha_{i,s}({\rm pa_{ptgs,p}, {\rm scale}, A_p, B_p})$ is the world coordinate position estimated from $(x_i, y_i)$ for star s on a plate p, using the parameters for plate p, ${\rm pa_{ptgs,p}, A_p, B_p}$. $\bar{\alpha_s}$ is the mean sky position of star s, and described as$$ \bar{\alpha_s} = \frac{\sum_{i\in s} w_i \alpha_{i,s}}{\sum_{i\in s} w_i}.$$Here, we do not take into account any error, and set $w_i = 1$.import copy # def model_func(params, n_fields, dim_sip, observed_xy): def model_func(params, ra_ptgs, dec_ptgs, n_fields, dim_sip, observed_xy): # params = (ra_ptgs, dec_ptgs, pa_ptg..., scale, a..., b...) pa_ptgs, scale, a, b =\ np.split(params, [n_fields, n_fields+1,\ n_fields+1+(dim_sip+1)**2]) # ra_ptgs, dec_ptgs, pa_ptgs, scale, a, b =\ # np.split(params, [n_fields, 2*n_fields, 3*n_fields, 3*n_fields+1,\ # 3*n_fields+1+(dim_sip+1)**2]) field_params = Table(data=[ra_ptgs, dec_ptgs, pa_ptgs, -np.ones(shape=(np.size(ra_ptgs)))],\ names=['ra', 'dec', 'pa', 'field'],\ dtype=['float64', 'float64', 'float64', 'int64']) # names=['ra', 'dec', 'pa', 'field'],\ # units=[u.deg, u.deg, u.deg, None],\ # dtype=['float64', 'float64', 'float64', 'int64']) field_params['ra'].unit = u.deg field_params['dec'].unit = u.deg field_params['pa'].unit = u.deg field_params['field'] = np.arange(0, np.size(field_params)) # use copy of observed_xy observed_xy_cp = observed_xy.copy() a_matrix = np.reshape(a, (dim_sip+1, dim_sip+1)) b_matrix = np.reshape(b, (dim_sip+1, dim_sip+1)) # mns = np.concatenate(((0, 1), np.arange(dim_sip+1, 2*(dim_sip)+1))) # for mn in mns: # for m in range(np.max([0, mn-dim_sip]), np.min([mn+1, dim_sip+1])): # n = mn - m # ap_matrix[m, n] = 0 # bp_matrix[m, n] = 0 a_matrix[0, 0] = 0.0 a_matrix[0, 1] = 0.0 a_matrix[1, 0] = 0.0 b_matrix[0, 0] = 0.0 b_matrix[0, 1] = 0.0 b_matrix[1, 0] = 0.0 m, n = np.indices((dim_sip+1, dim_sip+1)) mn = m + n a_matrix = a_matrix * (1.e-3**mn) b_matrix = b_matrix * (1.e-3**mn) # compute ra/dec from x/y with the parameters. ret = radec_calculator_ab(observed_xy_cp, field_params, scale[0], \ a_matrix, b_matrix) observed_xy_cp['ra_est'] = ret['ra_est'] observed_xy_cp['dec_est'] = ret['dec_est'] # compute the mean ra/dec for unique stars cat_ids = unique(observed_xy_cp, 'catalog_id')['catalog_id'] ra_mean = np.zeros(len(observed_xy['ra_est'])) dec_mean = np.zeros(len(observed_xy['dec_est'])) # effective RA observed_xy_cp['ra_est'] = observed_xy_cp['ra_est']*np.cos(observed_xy_cp['dec_est'].data*u.deg) for i in cat_ids: pos = np.where(observed_xy_cp['catalog_id'] == i) ra_mean[pos] = np.mean(observed_xy_cp[pos]['ra_est'])*u.deg dec_mean[pos] = np.mean(observed_xy_cp[pos]['dec_est'])*u.deg radec_est = np.concatenate((observed_xy_cp['ra_est'], observed_xy_cp['dec_est'])) radec_est_mean = np.concatenate((ra_mean, dec_mean)) residuals = radec_est - radec_est_mean return residualsNext, we execute the least-square calculation to derive the field parameters and sky positions of the objects in the overlapped region.from scipy.optimize import least_squares import time dim_sip = 3 a = np.zeros(shape=(dim_sip+1, dim_sip+1)) b = np.zeros(shape=(dim_sip+1, dim_sip+1)) # constructing a_init (initial parameter set). # a_init = np.array(np.concatenate((field_params['ra'], field_params['dec'], field_params['pa'], \ # [plate_scale.value], a.flatten(), b.flatten()))) # This must be an ndarray. a_init = np.array(np.concatenate((field_params['pa'], \ [plate_scale.value], a.flatten(), b.flatten()))) # This must be an ndarray. print(' # of fitting parameters =', len(a_init)) # constraining ra/dec values in 'observed' between -180 and 180 deg. # measured = np.concatenate((observed_xy_overlap['x'], observed_xy_overlap['y'])) # print(' # of data points =', len(measured)) #pos = np.where(measured>180.) #measured[pos] -= 360. #pos = np.where(measured<-180.) #measured[pos] += 360. start = time.time() # result = least_squares(model_func, a_init, loss='linear', args=(np.size(field_params), \ # dim_sip, observed_xy_overlap), \ # verbose=2) result = least_squares(model_func, a_init, loss='linear', args=(field_params['ra'], \ field_params['dec'], np.size(field_params), dim_sip, observed_xy_overlap), \ verbose=2) print(' time=',time.time()-start) ## pa should be a positive value between 0 and 360. #if result[3] < 0: # result[3] = -result[3] # result[2] = result[2] + 180.0 # #if result[2] > 360.0 or result[2] < 0.0: # result[2] = result[2]%360.0# of fitting parameters = 37 Iteration Total nfev Cost Cost reduction Step norm Optimality 0 1 4.8745e-03 6.54e+04 1 2 2.9443e-09 4.87e-03 6.03e+02 3.18e+00 2 3 2.7798e-09 1.65e-10 1.21e+03 1.35e-03 3 4 2.7798e-09 1.37e-16 2.41e+03 1.33e-06 4 19 2.7798e-09 0.00e+00 0.00e+00 1.33e-06 `xtol` termination condition is satisfied. Function evaluations 19, initial cost 4.8745e-03, final cost 2.7798e-09, first-order optimality 1.33e-06. time= 11.082858800888062Checking results PreparationExtracting the resaltnat fitting parameter values.n_fields = np.size(field_params) n_objects = np.size(radec_est) true_ra_ptgs = true_field_params['ra'].data true_dec_ptgs = true_field_params['dec'].data true_pa_ptgs = true_field_params['pa'].data # ra_ptgs, dec_ptgs, pa_ptgs, scale, a, b =\ # np.split(result.x, [n_fields, 2*n_fields, 3*n_fields, 3*n_fields+1,\ # 3*n_fields+1+(dim_sip+1)**2]) pa_ptgs, scale, a, b =\ np.split(result.x, [n_fields, n_fields+1,\ n_fields+1+(dim_sip+1)**2]) ra_ptgs = field_params['ra'].data dec_ptgs = field_params['dec'].data a_matrix = np.reshape(a, (dim_sip+1, dim_sip+1)) b_matrix = np.reshape(b, (dim_sip+1, dim_sip+1)) # A/B scaling m, n = np.indices((dim_sip+1, dim_sip+1)) mn = m + n a_matrix = a_matrix * (1.e-3**mn) b_matrix = b_matrix * (1.e-3**mn) fit_field_params = Table(data=[ra_ptgs, dec_ptgs, pa_ptgs, -np.ones(shape=(np.size(ra_ptgs)))],\ names=['ra', 'dec', 'pa', 'field'],\ dtype=['float64', 'float64', 'float64', 'int64']) fit_field_params['ra'].unit = u.deg fit_field_params['dec'].unit = u.deg fit_field_params['pa'].unit = u.deg fit_field_params['field'] = np.arange(0, np.size(field_params))PointingsThe central position of the pointings are not fitted, but fixed with the true values. Hence, the differences should be zero.print(' pointing centre (fit) ra, dec (deg) =', ra_ptgs, dec_ptgs) print(' pointing centre (true) ra, dec (deg) =', true_ra_ptgs, true_dec_ptgs) print(' difference ra, dec (deg) =', ra_ptgs-true_ra_ptgs, dec_ptgs-true_dec_ptgs)pointing centre (fit) ra, dec (deg) = [265.6202439 265.70081783 265.4894155 265.56770499] [-28.85396419 -28.74323799 -28.78375368 -28.67010405] pointing centre (true) ra, dec (deg) = [265.6202439 265.70081783 265.4894155 265.56770499] [-28.85396419 -28.74323799 -28.78375368 -28.67010405] difference ra, dec (deg) = [0. 0. 0. 0.] [0. 0. 0. 0.]Pointings position angles without A00=A01=A10=0print(' position angle (fit) (deg) =', pa_ptgs) print(' position angle (true) (deg) =', true_pa_ptgs) print(' difference =', pa_ptgs-true_pa_ptgs)position angle (fit) (deg) = [302.01808056 301.17384262 300.93373878 301.57720235] position angle (true) (deg) = [302.02408829 301.17958541 300.93717604 301.58002573] difference = [-0.00600773 -0.00574279 -0.00343726 -0.00282338]Pointing position angle with A00=A01=A10=0print(' position angle (fit) (deg) =', pa_ptgs) print(' position angle (true) (deg) =', true_pa_ptgs) print(' difference =', pa_ptgs-true_pa_ptgs)position angle (fit) (deg) = [302.01808056 301.17384262 300.93373878 301.57720235] position angle (true) (deg) = [302.02408829 301.17958541 300.93717604 301.58002573] difference = [-0.00600773 -0.00574279 -0.00343726 -0.00282338]Scale (deg/pix) with A00=A01=A10=0print(' scale (fit) deg/um=', scale) print(' true scale =',(1e-6/7.3/np.pi*180.0)*u.deg/u.um) # print(' true scale =',(1e-6/7.3/np.pi*180.0)*u.deg*(pix_size/u.um).si) print(' difference =', scale-(1e-6/7.3/np.pi*180.0))scale (fit) deg/um= [7.85066081e-06] true scale = 7.848736919600318e-06 deg / um difference = [1.92389171e-09]A/Bprint(' derived A/B matrices = ', a_matrix, b_matrix)derived A/B matrices = [[ 0.00000000e+00 0.00000000e+00 -4.71606279e-09 -6.70732967e-14] [ 0.00000000e+00 -1.44099032e-09 1.56894290e-13 1.62253635e-10] [-9.93306006e-09 -6.47094309e-14 -2.16749008e-10 2.59837384e-13] [ 2.00058595e-13 -1.04182162e-09 -5.49703540e-13 -6.29226746e-16]] [[ 0.00000000e+00 0.00000000e+00 -4.26325960e-09 -1.94242607e-13] [ 0.00000000e+00 -5.27060768e-09 1.49196168e-13 6.31955220e-11] [-7.06469819e-10 4.81358364e-14 4.16572121e-10 1.25413380e-12] [ 4.50680831e-13 1.45572251e-09 1.99277731e-12 2.23491378e-15]]Object positionsprint(' field params=', fit_field_params) radec_objs = radec_calculator_ab(observed_xy_overlap, fit_field_params, scale[0], a_matrix, b_matrix) plt.scatter(true_radec_overlap['ra'], true_radec_overlap['dec'], marker='x', label='True') plt.scatter(radec_est['ra_est'], radec_est['dec_est'], marker='+', label='Initial guess') plt.scatter(radec_objs['ra_est'], radec_objs['dec_est'], marker='.', label='Final estimation') plt.xlabel('ra (deg)') plt.ylabel('dec (deg)') plt.title('Object positions') plt.legend()field params= ra dec pa field deg deg deg ----------------- ------------------- ------------------ ----- 265.6202439021891 -28.853964194125034 302.0180805640577 0 265.7008178261919 -28.7432379906527 301.17384261837486 1 265.4894154993913 -28.78375368278103 300.93373877569456 2 265.5677049936395 -28.670104050957786 301.5772023481107 3Position differencefrom astropy.coordinates import SkyCoord distlist = [] print(np.shape(radec_objs)) for i in range(0, np.size(radec_objs)): c1 = SkyCoord(radec_objs['ra_est'][i]*u.deg, radec_objs['dec_est'][i]*u.deg) c2 = SkyCoord(observed_xy_overlap['ra'][i]*u.deg, observed_xy_overlap['dec'][i]*u.deg) distlist.append(c1.separation(c2).arcsec) distlist = np.array(distlist) #plt.hist(np.log10(distlist)) plt.hist(distlist) plt.xlabel("Residual (arcsec)") plt.ylabel("Number") # use effective RA dra = radec_objs['ra_est']*np.cos(radec_objs['dec_est'].data*u.deg) \ -observed_xy_overlap['ra']*np.cos(observed_xy_overlap['dec'].data*u.deg) ddec = ((radec_objs['dec_est']-observed_xy_overlap['dec'])) # print(' cos test=', np.cos(radec_objs['dec_est'].data*u.deg)-np.cos((radec_objs['dec_est'].data*u.deg).to_value(u.radian))) dra_arcsec = (dra.data*u.deg).to_value(u.arcsec) ddec_arcsec = (ddec.data*u.deg).to_value(u.arcsec) plt.scatter(dra_arcsec, ddec_arcsec, marker='x') plt.xlabel('dRA (arcsec)') plt.ylabel('dDEC (arcsec)') #plt.xlim([-0,8, 0.0]) #plt.ylim([-0.8, 0.0])Not fixing pointing RA, DEC, but use the reference stars. First, we define the model function to evaluate the difference in the sky coordinate of i-th stars, (ra, dec)i, from the individual plate, j-th plate, coordinates, (x, y)ij and the residual between (ra, dec)i and (ra, dec)k, for k-th reference stars whose (ra, dec)k is known from the other observation, e.g. Gaia. We use effecitve RA $\alpha_* = \alpha \cos(\delta)$.def model_wrefs_func(params, n_fields, dim_sip, observed_xy, radec_refstars): # params = (ra_ptgs, dec_ptgs, pa_ptg..., scale, a..., b...) ra_ptgs, dec_ptgs, pa_ptgs, scale, a, b =\ np.split(params, [n_fields, 2*n_fields, 3*n_fields, 3*n_fields+1,\ 3*n_fields+1+(dim_sip+1)**2]) field_params = Table(data=[ra_ptgs, dec_ptgs, pa_ptgs, -np.ones(shape=(np.size(ra_ptgs)))],\ names=['ra', 'dec', 'pa', 'field'],\ dtype=['float64', 'float64', 'float64', 'int64']) # names=['ra', 'dec', 'pa', 'field'],\ # units=[u.deg, u.deg, u.deg, None],\ # dtype=['float64', 'float64', 'float64', 'int64']) field_params['ra'].unit = u.deg field_params['dec'].unit = u.deg field_params['pa'].unit = u.deg field_params['field'] = np.arange(0, np.size(field_params)) # use copy of observed_xy observed_xy_cp = observed_xy.copy() a_matrix = np.reshape(a, (dim_sip+1, dim_sip+1)) b_matrix = np.reshape(b, (dim_sip+1, dim_sip+1)) # mns = np.concatenate(((0, 1), np.arange(dim_sip+1, 2*(dim_sip)+1))) # for mn in mns: # for m in range(np.max([0, mn-dim_sip]), np.min([mn+1, dim_sip+1])): # n = mn - m # ap_matrix[m, n] = 0 # bp_matrix[m, n] = 0 a_matrix[0, 0] = 0.0 a_matrix[0, 1] = 0.0 a_matrix[1, 0] = 0.0 b_matrix[0, 0] = 0.0 b_matrix[0, 1] = 0.0 b_matrix[1, 0] = 0.0 # normalisation. m, n = np.indices((dim_sip+1, dim_sip+1)) mn = m + n a_matrix = a_matrix * (1.e-3**mn) b_matrix = b_matrix * (1.e-3**mn) # compute ra/dec from x/y with the parameters. ret = radec_calculator_ab(observed_xy_cp, field_params, scale[0], \ a_matrix, b_matrix) observed_xy_cp['ra_est'] = ret['ra_est'] observed_xy_cp['dec_est'] = ret['dec_est'] # compute the mean ra/dec for unique stars cat_ids = unique(observed_xy_cp, 'catalog_id')['catalog_id'] ra_mean = np.zeros(len(observed_xy['ra_est'])) dec_mean = np.zeros(len(observed_xy['dec_est'])) # effective RA observed_xy_cp['ra_est'] = observed_xy_cp['ra_est']*np.cos(observed_xy_cp['dec_est'].data*u.deg) for i in cat_ids: pos = np.where(observed_xy_cp['catalog_id'] == i) ra_mean[pos] = np.mean(observed_xy_cp[pos]['ra_est'])*u.deg dec_mean[pos] = np.mean(observed_xy_cp[pos]['dec_est'])*u.deg # effective RA for reference stars radec_refstars_cp = radec_refstars.copy() radec_refstars_cp.rename_column('ra', 'ra*') radec_refstars_cp['ra*'] = radec_refstars_cp['ra*']*np.cos(radec_refstars_cp['dec'].data*u.deg) # print(radec_refstars['ra'], radec_refstars_cp['ra*']) # reference stars' measured mean ra, dec to be compared # with the ra, dec of reference stars. radec_est_refstars = radec_refstars.copy() radec_est_refstars.rename_column('ra', 'ra_est') radec_est_refstars.rename_column('dec', 'dec_est') for i,id in enumerate(radec_refstars['catalog_id']): # print('i, id=', i, id) # print(ra_mean[observed_xy_cp['catalog_id'] == id][0]) radec_est_refstars[i]['ra_est'] = ra_mean[observed_xy_cp['catalog_id'] == id][0] radec_est_refstars[i]['dec_est'] = dec_mean[observed_xy_cp['catalog_id'] == id][0] radec_est = np.concatenate((observed_xy_cp['ra_est'], observed_xy_cp['dec_est'], \ radec_refstars_cp['ra*'], radec_refstars_cp['dec'])) radec_est_mean = np.concatenate((ra_mean, dec_mean, radec_est_refstars['ra_est'], \ radec_est_refstars['dec_est'])) # print(' ref stars al diff=', np.sum(radec_refstars_cp['ra*']-radec_est_refstars['ra_est'])) residuals = radec_est - radec_est_mean return residualsPick the reference stars from true_radec_overlap of overlap stars.# print(' true_radec_overlap =', true_radec_overlap) print(' len =', len(true_radec_overlap)) # number of reference stars n_refstars = 10 pos = np.random.choice(len(true_radec_overlap), size=n_refstars, replace=False) radec_refstars = true_radec_overlap[pos] print(radec_refstars)len = 222 ra dec catalog_id deg deg ------------------ ------------------- ---------- 265.63919602229487 -28.700155552688123 168 265.5751287163082 -28.748045938319695 152 265.58403117517577 -28.76026745169417 148 265.50674203925803 -28.813848543503703 79 265.57752654272946 -28.653709577335015 218 265.57217871776766 -28.648209008247456 217 265.6104588526529 -28.720814374071875 163 265.65743669231915 -28.822639963970435 31 265.64537142662545 -28.784050026954677 49 265.5976812951617 -28.829715640389534 23Now, let's run least_squares and get the distortion parameters with the reference stars' constraints.from scipy.optimize import least_squares import time dim_sip = 4 a = np.zeros(shape=(dim_sip+1, dim_sip+1)) b = np.zeros(shape=(dim_sip+1, dim_sip+1)) # constructing a_init (initial parameter set). a_init = np.array(np.concatenate((field_params['ra'], field_params['dec'], \ field_params['pa'], \ [plate_scale.value], a.flatten(), b.flatten()))) # This must be an ndarray. # a_init = np.array(np.concatenate((field_params['pa'], \ # [plate_scale.value], a.flatten(), b.flatten()))) # This must be an ndarray. print(' # of fitting parameters =', len(a_init)) print(' size of reference stars =', np.size(radec_refstars['catalog_id'])) start = time.time() result = least_squares(model_wrefs_func, a_init, loss='linear', args= \ (np.size(field_params), dim_sip, observed_xy_overlap, \ radec_refstars), verbose=2) print(' time=',time.time()-start)# of fitting parameters = 63 size of reference stars = 10 Iteration Total nfev Cost Cost reduction Step norm Optimality 0 1 4.9202e-03 6.61e+04 1 2 3.3253e-10 4.92e-03 8.06e+02 3.25e+00 2 3 1.6627e-10 1.66e-10 1.61e+03 4.17e-04 3 18 1.6627e-10 0.00e+00 0.00e+00 4.17e-04 `xtol` termination condition is satisfied. Function evaluations 18, initial cost 4.9202e-03, final cost 1.6627e-10, first-order optimality 4.17e-04. time= 13.792067289352417Checking restuls Preparationn_fields = np.size(field_params) n_objects = np.size(radec_est) true_ra_ptgs = true_field_params['ra'].data true_dec_ptgs = true_field_params['dec'].data true_pa_ptgs = true_field_params['pa'].data ra_ptgs, dec_ptgs, pa_ptgs, scale, a, b =\ np.split(result.x, [n_fields, 2*n_fields, 3*n_fields, 3*n_fields+1,\ 3*n_fields+1+(dim_sip+1)**2]) # pa_ptgs, scale, a, b =\ # np.split(result.x, [n_fields, n_fields+1,\ # n_fields+1+(dim_sip+1)**2]) #ra_ptgs = field_params['ra'].data # dec_ptgs = field_params['dec'].data a_matrix = np.reshape(a, (dim_sip+1, dim_sip+1)) b_matrix = np.reshape(b, (dim_sip+1, dim_sip+1)) # A/B scaling m, n = np.indices((dim_sip+1, dim_sip+1)) mn = m + n a_matrix = a_matrix * (1.e-3**mn) b_matrix = b_matrix * (1.e-3**mn) fit_field_params = Table(data=[ra_ptgs, dec_ptgs, pa_ptgs, -np.ones(shape=(np.size(ra_ptgs)))],\ names=['ra', 'dec', 'pa', 'field'],\ dtype=['float64', 'float64', 'float64', 'int64']) fit_field_params['ra'].unit = u.deg fit_field_params['dec'].unit = u.deg fit_field_params['pa'].unit = u.deg fit_field_params['field'] = np.arange(0, np.size(field_params))Pointings RA, DEC, position angle and scaleprint(' pointing centre (fit) ra, dec (deg) =', ra_ptgs, dec_ptgs) print(' pointing centre (true) ra, dec (deg) =', true_ra_ptgs, true_dec_ptgs) print(' difference ra, dec (deg) =', ra_ptgs-true_ra_ptgs, dec_ptgs-true_dec_ptgs) print(' position angle (fit) (deg) =', pa_ptgs) print(' position angle (true) (deg) =', true_pa_ptgs) print(' difference =', pa_ptgs-true_pa_ptgs) print(' scale (fit, true) =', scale, (1e-6/7.3/np.pi*180.0)*u.deg/u.um) print(' difference =', scale-(1e-6/7.3/np.pi*180.0))pointing centre (fit) ra, dec (deg) = [265.62028571 265.7008365 265.48944065 265.56770947] [-28.85395845 -28.74322645 -28.78374376 -28.67008675] pointing centre (true) ra, dec (deg) = [265.6202439 265.70081783 265.4894155 265.56770499] [-28.85396419 -28.74323799 -28.78375368 -28.67010405] difference ra, dec (deg) = [4.18069611e-05 1.86722713e-05 2.51542362e-05 4.47158408e-06] [5.74486724e-06 1.15429180e-05 9.92206167e-06 1.72978067e-05] position angle (fit) (deg) = [302.01858626 301.17544275 300.93198899 301.57583732] position angle (true) (deg) = [302.02408829 301.17958541 300.93717604 301.58002573] difference = [-0.00550203 -0.00414265 -0.00518704 -0.0041884 ] scale (fit, true) = [7.85063513e-06] 7.848736919600318e-06 deg / um difference = [1.89820786e-09]Object positionsradec_objs = radec_calculator_ab(observed_xy_overlap, fit_field_params, scale[0], a_matrix, b_matrix) plt.scatter(true_radec_overlap['ra'], true_radec_overlap['dec'], marker='x', label='True') plt.scatter(radec_est['ra_est'], radec_est['dec_est'], marker='+', label='Initial guess') plt.scatter(radec_objs['ra_est'], radec_objs['dec_est'], marker='.', label='Final estimation') plt.scatter(radec_refstars['ra'], radec_refstars['dec'], marker='o', \ label='Reference stars') plt.xlabel('ra (deg)') plt.ylabel('dec (deg)') plt.title('Object positions') plt.legend()Position differencesfrom astropy.coordinates import SkyCoord distlist = [] print(np.shape(radec_objs)) for i in range(0, np.size(radec_objs)): c1 = SkyCoord(radec_objs['ra_est'][i]*u.deg, radec_objs['dec_est'][i]*u.deg) c2 = SkyCoord(observed_xy_overlap['ra'][i]*u.deg, observed_xy_overlap['dec'][i]*u.deg) distlist.append(c1.separation(c2).arcsec) distlist = np.array(distlist) #plt.hist(np.log10(distlist)) plt.hist(distlist) plt.xlabel("Residual (arcsec)") plt.ylabel("Number") # dra = ((radec_objs['ra_est']-observed_xy_overlap['ra']).data)*u.deg # effective RA dra = radec_objs['ra_est']*np.cos(radec_objs['dec_est'].data*u.deg) \ -observed_xy_overlap['ra']*np.cos(observed_xy_overlap['dec'].data*u.deg) ddec = ((radec_objs['dec_est']-observed_xy_overlap['dec']).data)*u.deg dra_arcsec = dra.to_value(u.arcsec) ddec_arcsec = ddec.to_value(u.arcsec) plt.scatter(dra_arcsec, ddec_arcsec, marker='x') plt.xlabel('dRA (arcsec)') plt.ylabel('dDEC (arcsec)') #plt.xlim([-0,8, 0.0]) #plt.ylim([-0.8, 0.0])Apply the field parameters to all the objects.print(' total # of stars =', len(observed_xy)) radec_allobjs = radec_calculator_ab(observed_xy, fit_field_params, \ scale[0], a_matrix, b_matrix) plt.scatter(observed_xy['ra'], observed_xy['dec'], marker='x', label='True') plt.scatter(radec_allobjs['ra_est'], radec_allobjs['dec_est'], marker='.', label='Final estimation') plt.scatter(radec_refstars['ra'], radec_refstars['dec'], marker='o', \ label='Reference stars') plt.xlabel('ra (deg)') plt.ylabel('dec (deg)') plt.title('Object positions') plt.legend() distlist = [] print(np.shape(radec_allobjs)) for i in range(0, np.size(radec_allobjs)): c1 = SkyCoord(radec_allobjs['ra_est'][i]*u.deg, radec_allobjs['dec_est'][i]*u.deg) c2 = SkyCoord(observed_xy['ra'][i]*u.deg, observed_xy['dec'][i]*u.deg) distlist.append(c1.separation(c2).arcsec) distlist = np.array(distlist) #plt.hist(np.log10(distlist)) plt.hist(distlist) plt.xlabel("Residual (arcsec)") plt.ylabel("Number") # dra = ((radec_allobjs['ra_est']-observed_xy['ra']).data)*u.deg # effective RA dra = radec_allobjs['ra_est']*np.cos(radec_allobjs['dec_est'].data*u.deg) \ -observed_xy['ra']*np.cos(observed_xy['dec'].data*u.deg) ddec = ((radec_allobjs['dec_est']-observed_xy['dec']).data)*u.deg dra_arcsec = dra.to_value(u.arcsec) ddec_arcsec = ddec.to_value(u.arcsec) plt.scatter(dra_arcsec, ddec_arcsec, marker='x') plt.xlabel('dRA (arcsec)') plt.ylabel('dDEC (arcsec)') #plt.xlim([-0,8, 0.0]) #plt.ylim([-0.8, 0.0])With observatioal errors. We add the observational errors for both JASMINE observations and reference stars, Gaia stars. We first add the position error + displacement for observed_xy_overlap. Then, later we will add the noise to observed_xy (all observations). The displacement for the same observation of stars should be the same between observed_xy and observed_xy_overlap. However, for now for the simplification of set up, we use independent one.# JASMINE pixel position uncertainty, let's set to 1/300 pix pix_size = 15.*u.um xy_error_jasmine = (1.0/300)*pix_size print(' JASMINE pix error (um) =', xy_error_jasmine) # Reference stars ra, dec error, let's set to 0.02 mas radec_error_refstars = (0.2*u.mas).to(u.deg) print(' Reference stars error (deg) =', radec_error_refstars) # add errors to JASMINE pix position # for overlap stars observed_xy_overlap.rename_column('x', 'x0') observed_xy_overlap.rename_column('y', 'y0') observed_xy_overlap.add_column(observed_xy_overlap['x0'], name='x') observed_xy_overlap.add_column(observed_xy_overlap['x0'], name='y') observed_xy_overlap['x'] = np.random.normal(observed_xy_overlap['x0'], xy_error_jasmine) observed_xy_overlap['y'] = np.random.normal(observed_xy_overlap['y0'], xy_error_jasmine) # store the noise observed_xy_overlap.add_column(observed_xy_overlap['x'], name='xy_err') observed_xy_overlap['xy_err'] = xy_error_jasmine # for all stars observed_xy.rename_column('x', 'x0') observed_xy.rename_column('y', 'y0') observed_xy.add_column(observed_xy['x0'], name='x') observed_xy.add_column(observed_xy['x0'], name='yt') observed_xy['x'] = np.random.normal(observed_xy['x0'], xy_error_jasmine) observed_xy['y'] = np.random.normal(observed_xy['y0'], xy_error_jasmine) observed_xy.add_column(observed_xy['x'], name='xy_err') observed_xy['xy_err'] = xy_error_jasmine # add errors to reference stars radec_refstars.rename_column('ra', 'ra0') radec_refstars.rename_column('dec', 'dec0') radec_refstars.add_column(radec_refstars['ra0'], name='ra') radec_refstars.add_column(radec_refstars['dec0'], name='dec') # print(' ra before noise =', radec_refstars['ra']) radec_refstars['ra'] = np.random.normal(radec_refstars['ra0'], radec_error_refstars) radec_refstars['dec'] = np.random.normal(radec_refstars['dec0'], radec_error_refstars) # print(' ra w/added noise =', radec_refstars['ra'].to_value(u.mas)) # store the noise radec_refstars.add_column(radec_refstars['ra'], name='radec_err') radec_refstars['radec_err'] = radec_error_refstarsWe define the model function. We use effecitve RA $\alpha_* = \alpha \cos(\delta)$, when we compute residuals. Note that the errors are not needed to be adjusted with effective RA.def model_wrefs_werr_func(params, n_fields, dim_sip, observed_xy, radec_refstars): # params = (ra_ptgs, dec_ptgs, pa_ptg..., scale, a..., b...) ra_ptgs, dec_ptgs, pa_ptgs, scale, a, b =\ np.split(params, [n_fields, 2*n_fields, 3*n_fields, 3*n_fields+1,\ 3*n_fields+1+(dim_sip+1)**2]) field_params = Table(data=[ra_ptgs, dec_ptgs, pa_ptgs, -np.ones(shape=(np.size(ra_ptgs)))],\ names=['ra', 'dec', 'pa', 'field'],\ dtype=['float64', 'float64', 'float64', 'int64']) # names=['ra', 'dec', 'pa', 'field'],\ # units=[u.deg, u.deg, u.deg, None],\ # dtype=['float64', 'float64', 'float64', 'int64']) field_params['ra'].unit = u.deg field_params['dec'].unit = u.deg field_params['pa'].unit = u.deg field_params['field'] = np.arange(0, np.size(field_params)) # use copy of observed_xy observed_xy_cp = observed_xy.copy() a_matrix = np.reshape(a, (dim_sip+1, dim_sip+1)) b_matrix = np.reshape(b, (dim_sip+1, dim_sip+1)) # mns = np.concatenate(((0, 1), np.arange(dim_sip+1, 2*(dim_sip)+1))) # for mn in mns: # for m in range(np.max([0, mn-dim_sip]), np.min([mn+1, dim_sip+1])): # n = mn - m # ap_matrix[m, n] = 0 # bp_matrix[m, n] = 0 a_matrix[0, 0] = 0.0 a_matrix[0, 1] = 0.0 a_matrix[1, 0] = 0.0 b_matrix[0, 0] = 0.0 b_matrix[0, 1] = 0.0 b_matrix[1, 0] = 0.0 # normalisation. m, n = np.indices((dim_sip+1, dim_sip+1)) mn = m + n a_matrix = a_matrix * (1.e-3**mn) b_matrix = b_matrix * (1.e-3**mn) # compute ra/dec from x/y with the parameters. ret = radec_calculator_ab(observed_xy_cp, field_params, scale[0], \ a_matrix, b_matrix) observed_xy_cp['ra_est'] = ret['ra_est'] observed_xy_cp['dec_est'] = ret['dec_est'] # compute the mean ra/dec for unique stars cat_ids = unique(observed_xy_cp, 'catalog_id')['catalog_id'] ra_mean = np.zeros(len(observed_xy['ra_est'])) dec_mean = np.zeros(len(observed_xy['dec_est'])) # compute weights from error in xy (um) -> radec (deg) w_observed_xy = 1.0/(observed_xy_cp['xy_err']*scale[0]) # effective RA: error does not need to adjusted. observed_xy_cp['ra_est'] = observed_xy_cp['ra_est']*np.cos(observed_xy_cp['dec_est'].data*u.deg) for i in cat_ids: pos = np.where(observed_xy_cp['catalog_id'] == i) ra_mean[pos] = np.average(observed_xy_cp[pos]['ra_est'], \ weights=w_observed_xy[pos])*u.deg dec_mean[pos] = np.average(observed_xy_cp[pos]['dec_est'], \ weights=w_observed_xy[pos])*u.deg # effective RA for reference stars radec_refstars_cp = radec_refstars.copy() radec_refstars_cp.rename_column('ra', 'ra*') radec_refstars_cp['ra*'] = radec_refstars_cp['ra*']*np.cos(radec_refstars_cp['dec'].data*u.deg) # print(radec_refstars['ra'], radec_refstars_cp['ra*']) # reference stars' measured mean ra, dec to be compared # with the ra, dec of reference stars. radec_est_refstars = radec_refstars.copy() radec_est_refstars.rename_column('ra', 'ra*_est') radec_est_refstars.rename_column('dec', 'dec_est') # compute weights for reference stars w_refstars = 1.0/(radec_refstars['radec_err']) for i,id in enumerate(radec_refstars['catalog_id']): # print('i, id=', i, id) # print(ra_mean[observed_xy_cp['catalog_id'] == id][0]) radec_est_refstars[i]['ra*_est'] = ra_mean[observed_xy_cp['catalog_id'] == id][0] radec_est_refstars[i]['dec_est'] = dec_mean[observed_xy_cp['catalog_id'] == id][0] radec_est = np.concatenate((observed_xy_cp['ra_est'], observed_xy_cp['dec_est'], \ radec_refstars_cp['ra*'], radec_refstars_cp['dec'])) radec_est_mean = np.concatenate((ra_mean, dec_mean, radec_est_refstars['ra*_est'], \ radec_est_refstars['dec_est'])) w_all = np.concatenate((w_observed_xy, w_observed_xy, w_refstars, w_refstars)) # print(w_all) residuals = w_all*(radec_est - radec_est_mean) return residualsLet's run least squares.from scipy.optimize import least_squares from scipy.optimize import leastsq import time dim_sip = 4 a = np.zeros(shape=(dim_sip+1, dim_sip+1)) b = np.zeros(shape=(dim_sip+1, dim_sip+1)) # constructing a_init (initial parameter set). a_init = np.array(np.concatenate((field_params['ra'], field_params['dec'], \ field_params['pa'], \ [plate_scale.value], a.flatten(), b.flatten()))) # This must be an ndarray. # a_init = np.array(np.concatenate((field_params['pa'], \ # [plate_scale.value], a.flatten(), b.flatten()))) # This must be an ndarray. print(' # of fitting parameters =', len(a_init)) print(' size of reference stars =', np.size(radec_refstars['catalog_id'])) start = time.time() result = least_squares(model_wrefs_werr_func, a_init, loss='linear', args= \ (np.size(field_params), dim_sip, observed_xy_overlap, \ radec_refstars), verbose=2) # result = least_squares(model_wrefs_werr_func, a_init, args= \ # (np.size(field_params), dim_sip, observed_xy_overlap, \ # radec_refstars)) print(' time=',time.time()-start)# of fitting parameters = 63 size of reference stars = 10 Iteration Total nfev Cost Cost reduction Step norm Optimality 0 1 4.5266e+10 6.01e+17 1 2 1.3364e+07 4.53e+10 8.06e+02 8.74e+15 2 3 1.2903e+04 1.34e+07 1.61e+03 4.40e+10 3 4 1.2903e+04 1.83e-01 3.22e+03 8.94e+09 4 18 1.2903e+04 2.39e-02 4.80e-05 9.47e+08 5 19 1.2903e+04 0.00e+00 0.00e+00 9.47e+08 `xtol` termination condition is satisfied. Function evaluations 19, initial cost 4.5266e+10, final cost 1.2903e+04, first-order optimality 9.47e+08. time= 34.5712468624115Checking results Extract the results.n_fields = np.size(field_params) n_objects = np.size(radec_est) true_ra_ptgs = true_field_params['ra'].data true_dec_ptgs = true_field_params['dec'].data true_pa_ptgs = true_field_params['pa'].data ra_ptgs, dec_ptgs, pa_ptgs, scale, a, b =\ np.split(result.x, [n_fields, 2*n_fields, 3*n_fields, 3*n_fields+1,\ 3*n_fields+1+(dim_sip+1)**2]) # pa_ptgs, scale, a, b =\ # np.split(result.x, [n_fields, n_fields+1,\ # n_fields+1+(dim_sip+1)**2]) #ra_ptgs = field_params['ra'].data # dec_ptgs = field_params['dec'].data print(' a and b matrices before scaling=', a, b) a_matrix = np.reshape(a, (dim_sip+1, dim_sip+1)) b_matrix = np.reshape(b, (dim_sip+1, dim_sip+1)) # A/B scaling m, n = np.indices((dim_sip+1, dim_sip+1)) mn = m + n a_matrix = a_matrix * (1.e-3**mn) b_matrix = b_matrix * (1.e-3**mn) fit_field_params = Table(data=[ra_ptgs, dec_ptgs, pa_ptgs, -np.ones(shape=(np.size(ra_ptgs)))],\ names=['ra', 'dec', 'pa', 'field'],\ dtype=['float64', 'float64', 'float64', 'int64']) fit_field_params['ra'].unit = u.deg fit_field_params['dec'].unit = u.deg fit_field_params['pa'].unit = u.deg fit_field_params['field'] = np.arange(0, np.size(field_params))a and b matrices before scaling= [ 0.00000000e+00 0.00000000e+00 -3.97395033e-03 -4.90471591e-05 1.59703913e-06 0.00000000e+00 -1.64052413e-03 1.58223449e-04 6.67437986e-07 -1.58068873e+01 -1.04891186e-02 -5.51877616e-05 1.77575220e-06 8.24310771e+02 -6.40165945e+02 1.83186442e-04 5.20766875e-07 2.56289441e+02 1.42868197e+03 -2.97605857e+01 7.55774468e-07 7.52113934e+02 -1.10783978e+03 -4.59397333e+01 1.00740222e+03] [ 0.00000000e+00 0.00000000e+00 -4.67719155e-03 -9.31279501e-05 7.55167787e-08 0.00000000e+00 -7.99102030e-03 7.25312767e-05 1.51684733e-06 9.53929242e+02 -1.51568538e-03 9.96867287e-05 2.33145432e-06 -4.37074033e+02 8.92234996e+01 3.52660007e-04 1.83594733e-06 -7.04127471e+02 9.76867472e+01 1.54897834e+03 -9.51777298e-07 6.91948839e+02 1.89079607e+02 1.83323321e+03 3.48971122e+02]Evaluate fitting. We follow https://www.fixes.pub/program/444521.html.from scipy import linalg, optimize chi2dof= np.sum(result.fun**2)/(result.fun.size -result.x.size) print(' Xi^2/dof =', chi2dof) J= result.jac print(' shape of J =', np.shape(J)) # this does not work. # cov= np.linalg.inv(J.T.dot(J)) # var= np.sqrt(np.diagonal(cov)) # print(' parameter variances =', var) U, s, Vh= linalg.svd(result.jac, full_matrices=False) tol= np.finfo(float).eps*s[0]*max(result.jac.shape) w= s > tol cov= (Vh[w].T/s[w]**2) @ Vh[w] # robust covariance matrix cov *= chi2dof perr= np.sqrt(np.diag(cov)) # 1sigma uncertainty on fitted parameters # extract errors ra_ptgs_err, dec_ptgs_err, pa_ptgs_err, scale_err, a_err, b_err =\ np.split(perr, [n_fields, 2*n_fields, 3*n_fields, 3*n_fields+1,\ 3*n_fields+1+(dim_sip+1)**2]) # A/B scaling a_err_matrix = np.reshape(a_err, (dim_sip+1, dim_sip+1)) b_err_matrix = np.reshape(b_err, (dim_sip+1, dim_sip+1)) # A/B scaling m, n = np.indices((dim_sip+1, dim_sip+1)) mn = m + n a_err_matrix = a_err_matrix * (1.e-3**mn) b_err_matrix = b_err_matrix * (1.e-3**mn) print(' parameter values =', ra_ptgs, dec_ptgs, pa_ptgs, scale, a_matrix, b_matrix) print(' parameter variances =', ra_ptgs_err, dec_ptgs_err, pa_ptgs_err, scale_err, \ a_err_matrix, b_err_matrix)Xi^2/dof = 14.891181458481045 shape of J = (1796, 63) parameter values = [265.62027541 265.70083679 265.48943621 265.56771628] [-28.85395491 -28.74322559 -28.78374369 -28.67008935] [302.01809948 301.17509424 300.93140956 301.57520988] [7.85067393e-06] [[ 0.00000000e+00 0.00000000e+00 -3.97395033e-09 -4.90471591e-14 1.59703913e-18] [ 0.00000000e+00 -1.64052413e-09 1.58223449e-13 6.67437986e-19 -1.58068873e-14] [-1.04891186e-08 -5.51877616e-14 1.77575220e-18 8.24310771e-13 -6.40165945e-16] [ 1.83186442e-13 5.20766875e-19 2.56289441e-13 1.42868197e-15 -2.97605857e-20] [ 7.55774468e-19 7.52113934e-13 -1.10783978e-15 -4.59397333e-20 1.00740222e-21]] [[ 0.00000000e+00 0.00000000e+00 -4.67719155e-09 -9.31279501e-14 7.55167787e-20] [ 0.00000000e+00 -7.99102030e-09 7.25312767e-14 1.51684733e-18 9.53929242e-13] [-1.51568538e-09 9.96867287e-14 2.33145432e-18 -4.37074033e-13 8.92234996e-17] [ 3.52660007e-13 1.83594733e-18 -7.04127471e-13 9.76867472e-1[...]Pointings RA, DEC, position angle and scaleprint(' pointing centre (fit) ra, dec (deg) =', ra_ptgs, dec_ptgs) print(' pointing centre (true) ra, dec (deg) =', true_ra_ptgs, true_dec_ptgs) print(' difference ra, dec (deg) =', ra_ptgs-true_ra_ptgs, dec_ptgs-true_dec_ptgs) print(' uncertainty ra, dec pointings =', ra_ptgs_err, dec_ptgs_err) print(' position angle (fit) (deg) =', pa_ptgs) print(' position angle (true) (deg) =', true_pa_ptgs) print(' difference =', pa_ptgs-true_pa_ptgs) print(' uncertainty =', pa_ptgs_err) print(' scale (fit, true) =', scale, (1e-6/7.3/np.pi*180.0)*u.deg/u.um) print(' difference =', scale-(1e-6/7.3/np.pi*180.0)) print(' uncertainty =', scale_err)scale (fit, true) = [7.85067393e-06] 7.848736919600318e-06 deg / um difference = [1.93700585e-09] uncertainty = [7.35420055e-12]Objects positionsradec_objs = radec_calculator_ab(observed_xy_overlap, fit_field_params, scale[0], a_matrix, b_matrix) plt.scatter(true_radec_overlap['ra'], true_radec_overlap['dec'], marker='x', label='True') plt.scatter(radec_est['ra_est'], radec_est['dec_est'], marker='+', label='Initial guess') plt.scatter(radec_objs['ra_est'], radec_objs['dec_est'], marker='.', label='Final estimation') plt.scatter(radec_refstars['ra0'], radec_refstars['dec0'], marker='o', \ label='Reference stars') plt.xlabel('ra (deg)') plt.ylabel('dec (deg)') plt.title('Object positions') plt.legend() distlist = [] print(np.shape(radec_objs)) for i in range(0, np.size(radec_objs)): c1 = SkyCoord(radec_objs['ra_est'][i]*u.deg, radec_objs['dec_est'][i]*u.deg) c2 = SkyCoord(observed_xy_overlap['ra'][i]*u.deg, observed_xy_overlap['dec'][i]*u.deg) distlist.append(c1.separation(c2).arcsec) distlist = np.array(distlist) #plt.hist(np.log10(distlist)) plt.hist(distlist) plt.xlabel("Residual (arcsec)") plt.ylabel("Number") # dra = ((radec_objs['ra_est']-observed_xy_overlap['ra']).data)*u.deg # effective RA dra = radec_objs['ra_est']*np.cos(radec_objs['dec_est'].data*u.deg) \ -observed_xy_overlap['ra']*np.cos(observed_xy_overlap['dec'].data*u.deg) ddec = ((radec_objs['dec_est']-observed_xy_overlap['dec']).data)*u.deg dra_arcsec = dra.to_value(u.arcsec) ddec_arcsec = ddec.to_value(u.arcsec) plt.scatter(dra_arcsec, ddec_arcsec, marker='x') plt.xlabel('dRA (arcsec)') plt.ylabel('dDEC (arcsec)')Apply to all the data, taking into account uncertainties of their position and parameter uncertainties. We shall run Monte Carlo by randomly displacing the position of stars and distortion parameters.n_mc = 100 n_stars = len(observed_xy) print(' total # of stars =', n_stars) ra_allobjs_samp = np.empty((n_stars, n_mc)) dec_allobjs_samp = np.empty((n_stars, n_mc)) observed_xy_try = observed_xy.copy() # flattened uncertainties of a, b matrix a_flat = a_matrix.flatten() b_flat = b_matrix.flatten() a_err = a_err_matrix.flatten() b_err = b_err_matrix.flatten() for i in range(n_mc): # displace observed_xy positions observed_xy_try['x'] = np.random.normal(observed_xy['x'], observed_xy['xy_err']) observed_xy_try['y'] = np.random.normal(observed_xy['y'], observed_xy['xy_err']) # displace the parameters ra_ptgs_try = np.random.normal(ra_ptgs, ra_ptgs_err) dec_ptgs_try = np.random.normal(dec_ptgs, dec_ptgs_err) pa_ptgs_try = np.random.normal(pa_ptgs, pa_ptgs_err) scale_try = np.random.normal(scale, scale_err) a_try = np.random.normal(a_flat, a_err) b_try = np.random.normal(b_flat, b_err) a_matrix_try = np.reshape(a_try, (dim_sip+1, dim_sip+1)) b_matrix_try = np.reshape(b_try, (dim_sip+1, dim_sip+1)) fit_field_params_try = Table(data=[ra_ptgs_try, dec_ptgs_try, pa_ptgs_try, \ -np.ones(shape=(np.size(ra_ptgs)))],\ names=['ra', 'dec', 'pa', 'field'],\ dtype=['float64', 'float64', 'float64', 'int64']) fit_field_params_try['ra'].unit = u.deg fit_field_params_try['dec'].unit = u.deg fit_field_params_try['pa'].unit = u.deg fit_field_params_try['field'] = np.arange(0, np.size(field_params)) radec_allobjs_try = radec_calculator_ab(observed_xy_try, fit_field_params_try, \ scale_try[0], a_matrix_try, b_matrix_try) # use effective RA ra_allobjs_samp[:, i] = radec_allobjs_try['ra_est']*np.cos(radec_allobjs_try['dec_est'].data*u.deg) # ra_allobjs_samp[:, i] = radec_allobjs_try['ra_est'] dec_allobjs_samp[:, i] = radec_allobjs_try['dec_est'] ra_allobjs_mean = np.mean(ra_allobjs_samp, axis=1)*u.deg ra_allobjs_std = np.std(ra_allobjs_samp, axis=1) dec_allobjs_mean = np.mean(dec_allobjs_samp, axis=1)*u.deg dec_allobjs_std = np.std(dec_allobjs_samp, axis=1) # we convert true obserfved ra to effective ra raeff_observed_xy = observed_xy['ra']*np.cos(observed_xy['dec'].data*u.deg) # raeff_observed_xy = observed_xy['ra'] dec_observed_xy = observed_xy['dec'] print(' mean dec correction for effective RA =', np.mean(np.cos(observed_xy['dec'].data*u.deg))) # print(raeff_observed_xy, ra_allobjs_mean) # error from the true value ra_allobjs_err = ra_allobjs_mean-raeff_observed_xy dec_allobjs_err = dec_allobjs_mean-dec_observed_xy plt.scatter(ra_allobjs_err.to_value(u.arcsec), (ra_allobjs_std*u.deg).to_value(u.arcsec), marker='x', label='RA') plt.scatter(dec_allobjs_err.to_value(u.arcsec), (dec_allobjs_std*u.deg).to_value(u.arcsec), marker='.', label='DEC') print(' RA mean standard deviation of measurements (arcsec) =', \ (np.mean(ra_allobjs_std)*u.deg).to_value(u.arcsec)) print(' RA standard deviation from the true values (arcsec) =', (np.std(ra_allobjs_err)).to_value(u.arcsec)) print(' DEC mean standard deviation of measurements (arcsec) =', (np.mean(dec_allobjs_std)*u.deg).to_value(u.arcsec)) print(' DEC standard deviation from the true values (arcsec)=', (np.std(dec_allobjs_err)).to_value(u.arcsec)) plt.xlabel('deviatoin from the true radec (arcsec)') plt.ylabel('standar deviation of measurement (arcsec)') plt.title('Object positions') plt.legend()total # of stars = 2563 mean dec correction for effective RA = 0.876660634963193 RA mean standard deviation of measurements (arcsec) = 0.006280788912085892 RA standard deviation from the true values (arcsec) = 0.02474307609116223 DEC mean standard deviation of measurements (arcsec) = 0.003751085599114781 DEC standard deviation from the true values (arcsec)= 0.014589443344355368Solution 1import random number = random.randint(1,9) guess = 0 count = 0 while guess != number and guess != "exit": guess = input("What's your guess?") if guess == "exit": break guess = int(guess) count += 1 if guess < number: print("Too low!") elif guess > number: print("Too high!") else: print("You got it!") print("And it only took you",count,"tries!")What's your guess?5 Too high! What's your guess?3 Too high! What's your guess?1 You got it! And it only took you 3 tries!Solution 2import string import random def pw_gen(size = 8, chars=string.ascii_letters + string.digits + string.punctuation): return ''.join(random.choice(chars) for _ in range(size)) print(pw_gen(int(input('How many characters in your password?'))))How many characters in your password?15 9zl:D[vN}HRp@=6Solution 3from string import ascii_lowercase from words import get_random_word def get_num_attempts(): """Get user-inputted number of incorrect attempts for the game.""" while True: num_attempts = input( 'How many incorrect attempts do you want? [1-25] ') try: num_attempts = int(num_attempts) if 1 <= num_attempts <= 25: return num_attempts else: print('{0} is not between 1 and 25'.format(num_attempts)) except ValueError: print('{0} is not an integer between 1 and 25'.format( num_attempts)) def get_min_word_length(): """Get user-inputted minimum word length for the game.""" while True: min_word_length = input( 'What minimum word length do you want? [4-16] ') try: min_word_length = int(min_word_length) if 4 <= min_word_length <= 16: return min_word_length else: print('{0} is not between 4 and 16'.format(min_word_length)) except ValueError: print('{0} is not an integer between 4 and 16'.format(min_word_length)) def get_display_word(word, idxs): """Get the word suitable for display.""" if len(word) != len(idxs): raise ValueError('Word length and indices length are not the same') displayed_word = ''.join( [letter if idxs[i] else '*' for i, letter in enumerate(word)]) return displayed_word.strip() def get_next_letter(remaining_letters): """Get the user-inputted next letter.""" if len(remaining_letters) == 0: raise ValueError('There are no remaining letters') while True: next_letter = input('Choose the next letter: ').lower() if len(next_letter) != 1: print('{0} is not a single character'.format(next_letter)) elif next_letter not in ascii_lowercase: print('{0} is not a letter'.format(next_letter)) elif next_letter not in remaining_letters: print('{0} has been guessed before'.format(next_letter)) else: remaining_letters.remove(next_letter) return next_letter def play_hangman(): """Play a game of hangman. At the end of the game, returns if the player wants to retry. """ # Let player specify difficulty print('Starting a game of Hangman...') attempts_remaining = get_num_attempts() min_word_length = get_min_word_length() # Randomly select a word print('Selecting a word...') word = get_random_word(min_word_length) print() # Initialize game state variables idxs = [letter not in ascii_lowercase for letter in word] remaining_letters = set(ascii_lowercase) wrong_letters = [] word_solved = False # Main game loop while attempts_remaining > 0 and not word_solved: # Print current game state print('Word: {0}'.format(get_display_word(word, idxs))) print('Attempts Remaining: {0}'.format(attempts_remaining)) print('Previous Guesses: {0}'.format(' '.join(wrong_letters))) # Get player's next letter guess next_letter = get_next_letter(remaining_letters) # Check if letter guess is in word if next_letter in word: # Guessed correctly print('{0} is in the word!'.format(next_letter)) # Reveal matching letters for i in range(len(word)): if word[i] == next_letter: idxs[i] = True else: # Guessed incorrectly print('{0} is NOT in the word!'.format(next_letter)) # Decrement num of attempts left and append guess to wrong guesses attempts_remaining -= 1 wrong_letters.append(next_letter) # Check if word is completely solved if False not in idxs: word_solved = True print() # The game is over: reveal the word print('The word is {0}'.format(word)) # Notify player of victory or defeat if word_solved: print('Congratulations! You won!') else: print('Try again next time!') # Ask player if he/she wants to try again try_again = input('Would you like to try again? [y/Y] ') return try_again.lower() == 'y' while play_hangman(): print()Starting a game of Hangman... How many incorrect attempts do you want? [1-25] 25 What minimum word length do you want? [4-16] 4 Selecting a word... Word: ***** Attempts Remaining: 25 Previous Guesses: Choose the next letter: o o is NOT in the word! The word is cycle Try again next time! Word: ***** Attempts Remaining: 24 Previous Guesses: o Choose the next letter: e e is in the word! The word is cycle Try again next time! Word: ****e Attempts Remaining: 24 Previous Guesses: o Choose the next letter: a a is NOT in the word! The word is cycle Try again next time! Word: ****e Attempts Remaining: 23 Previous Guesses: o a Choose the next letter: i i is NOT in the word! The word is cycle Try again next time! Word: ****e Attempts Remaining: 22 Previous Guesses: o a i Choose the next letter: d d is NOT in the word! The word is cycle Try again next time! Word: ****e Attempts Remaining: 21 Previous Guesses: o a i d Choose the next letter: e e has been guessed before Choose the next letter[...]Install gdown Python package.!pip install -U --no-cache-dir gdownInstall Tensorflow-addons.* InstanceNormalization!pip install tensorflow-addonsInstall Tensorflow-datasets.* celeb_a dataset!pip install tensorflow-datasetsImport TensorFlow 2.x.try: %tensorflow_version 2.x except Exception: pass import tensorflow as tf import tensorflow.keras.layers as layers import tensorflow.keras.models as models from tensorflow.keras import backend as K import numpy as np np.random.seed(7) import matplotlib.pyplot as plot print(tf.__version__)Configuration parameters.auto_tune = tf.data.experimental.AUTOTUNE generator_dimension = 64 generator_residual_blocks = 6 discriminator_dimension = 64 discriminator_residual_blocks = 6 buffer_size = 512 batch_size = 16 epochs = 100 number_of_attributes = 40 image_load_shape = (143, 143, 3) image_shape = (128, 128, 3) d_gradient_penalty_weight = 10.0 d_attribute_loss_weight = 1.0 g_attribute_loss_weight = 1.0 g_reconstruction_loss_weight = 10.0 load_previous_weights = False save_current_weights = False epsilon = 1e-7Compute gradient penalty.def gradient_penalty(discriminator, real_image, fake_image): sample_shape = [tf.shape(real_image)[0]] + [1] * (real_image.shape.ndims - 1) alpha = tf.random.uniform(shape=sample_shape, minval=0., maxval=1.) sample_image = real_image + alpha * (fake_image - real_image) sample_image.set_shape(real_image.get_shape().as_list()) with tf.GradientTape(watch_accessed_variables=False) as tape: tape.watch(sample_image) predictions = discriminator(sample_image, training=False) if isinstance(predictions, tuple): predictions = predictions[0] gradients = tape.gradient(predictions, sample_image)[0] gradients = tf.reshape(gradients, [tf.shape(gradients)[0], -1]) norm = tf.norm(epsilon + gradients, axis=1) gp_value = tf.reduce_mean((norm - 1.) ** 2) return(gp_value)Load CelebA dataset.import tensorflow_datasets as tfds builder = tfds.builder('celeb_a') print(builder.info)Download and prepare dataset.#builder.download_and_prepare()OR Download dataset from Google drive.from google.colab import drive drive.mount('/content/drive') !cp -r '/content/drive/My Drive/datasets/tensorflow_datasets' /root/.View dataset contents.!ls -al /root/tensorflow_datasets/ !ls -al /root/tensorflow_datasets/celeb_a/Load the dataset.builder.download_and_prepare()Create CelebA dataset splits.* train* validation* testceleba_datasets = builder.as_dataset(shuffle_files=True) print(celeba_datasets) train_dataset = celeba_datasets['train']Preprocess the dataset.attributes_to_identifiers = { '5_o_Clock_Shadow': 0, 'Arched_Eyebrows': 1, 'Attractive': 2, 'Bags_Under_Eyes': 3, 'Bald': 4, 'Bangs': 5, 'Big_Lips': 6, 'Big_Nose': 7, 'Black_Hair': 8, 'Blond_Hair': 9, 'Blurry': 10, 'Brown_Hair': 11, 'Bushy_Eyebrows': 12, 'Chubby': 13, 'Double_Chin': 14, 'Eyeglasses': 15, 'Goatee': 16, 'Gray_Hair': 17, 'Heavy_Makeup': 18, 'High_Cheekbones': 19, 'Male': 20, 'Mouth_Slightly_Open': 21, 'Mustache': 22, 'Narrow_Eyes': 23, 'No_Beard': 24, 'Oval_Face': 25, 'Pale_Skin': 26, 'Pointy_Nose': 27, 'Receding_Hairline': 28, 'Rosy_Cheeks': 29, 'Sideburns': 30, 'Smiling': 31, 'Straight_Hair': 32, 'Wavy_Hair': 33, 'Wearing_Earrings': 34, 'Wearing_Hat': 35, 'Wearing_Lipstick': 36, 'Wearing_Necklace': 37, 'Wearing_Necktie': 38, 'Young': 39 } identifiers_to_attributes = {v: k for k, v in attributes_to_identifiers.items()}Create test image.def create_test_image(image_shape): test_image = np.random.rand(image_shape[0], image_shape[1], image_shape[2]) return(test_image)Create test image batch.def create_test_image_batch(image_shape): test_image_batch = np.random.rand(batch_size, image_shape[0], image_shape[1], image_shape[2]) return(test_image_batch)Create test attributes.def create_test_attributes(): attributes = np.random.rand(number_of_attributes) test_attributes = {} for index in range(len(identifiers_to_attributes)): attribute = identifiers_to_attributes[index] test_attributes[attribute] = attributes[index] return(test_attributes)Create test attribute batch.def create_test_attribute_batch(): test_attribute_batch = np.random.rand(batch_size, number_of_attributes) return(test_attribute_batch)Test created test attributes.test_attributes = create_test_attributes() print(test_attributes)Normalize the image to [-1, 1].def normalize_image(image): image = tf.cast(image, tf.float32) image = tf.clip_by_value(image, 0, 255) / 127.5 - 1 return(image)Test image normalization.input_image = create_test_image(image_shape) output_image = normalize_image(input_image) print('input image shape',input_image.shape) print('output image shape',output_image.shape)Random crop the image.def random_crop(image): cropped_image = tf.image.random_crop(image, size=image_shape) return(cropped_image)Test random croping of image.input_image = create_test_image(image_load_shape) output_image = random_crop(input_image) print('input image shape',input_image.shape) print('output image shape',output_image.shape)Random jitter the image.def random_jitter(image): image = tf.image.resize(image, [image_load_shape[0], image_load_shape[1]], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) image = random_crop(image) image = tf.image.random_flip_left_right(image) return(image)Test random jittering of image.input_image = create_test_image(image_load_shape) output_image = random_jitter(input_image) print('input image shape',input_image.shape) print('output image shape',output_image.shape)Preprocess train dataset.def compute_attributes(attributes_batch): attributes_array = [] for index in range(len(identifiers_to_attributes)): attribute = identifiers_to_attributes[index] attributes_array.append(tf.cast(attributes_batch[attribute], dtype=tf.float32)) return(attributes_array) def preprocess_train_dataset(sample): image = sample['image'] attributes = sample['attributes'] image = random_jitter(image) image = normalize_image(image) sample['image'] = image sample['attributes'] = compute_attributes(attributes) return(sample)Test the train dataset preprocessing.input_image = create_test_image(image_load_shape) attributes = create_test_attributes() sample = {} sample['image'] = input_image sample['attributes'] = attributes output = preprocess_train_dataset(sample) print('input image shape',input_image.shape) print('output image shape',output['image'].shape)Preprocess the test dataset.def preprocess_test_dataset(sample): image = sample['image'] attributes = sample['attributes'] image = tf.image.resize(image, [image_shape[0], image_shape[1]], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) image = normalize_image(image) sample['image'] = image sample['attributes'] = compute_attributes(attributes) return(sample) input_image = create_test_image(image_load_shape) attributes = create_test_attributes() sample = {} sample['image'] = input_image sample['attributes'] = attributes output = preprocess_test_dataset(sample) print('input image shape',input_image.shape) print('output image shape',output['image'].shape)Preprocess dataset splits.train_dataset = train_dataset.map(preprocess_train_dataset, num_parallel_calls=auto_tune) train_dataset = train_dataset.shuffle(buffer_size) train_dataset = train_dataset.batch(batch_size, drop_remainder=True) train_dataset = train_dataset.prefetch(auto_tune)Create the optimizer.* Adam optimizer* Learning rate = 0.0002* β1 = 0.5* β2 = 0.999generator_optimizer = tf.optimizers.Adam(learning_rate=0.0002, beta_1=0.5, beta_2=0.999) discriminator_optimizer = tf.optimizers.Adam(learning_rate=0.0002, beta_1=0.5, beta_2=0.999)Create the residual block.import tensorflow_addons as tfa def create_residual_block(input_layer, residual_dimension): layer = layers.ZeroPadding2D(padding=1)(input_layer) layer = layers.Conv2D(filters=residual_dimension, kernel_size=3, strides=1, padding='valid', use_bias=False)(layer) layer = tfa.layers.InstanceNormalization(axis=-1)(layer) layer = layers.ReLU()(layer) layer = layers.ZeroPadding2D(padding=1)(layer) layer = layers.Conv2D(filters=residual_dimension, kernel_size=3, strides=1, padding='valid', use_bias=False)(layer) layer = tfa.layers.InstanceNormalization(axis=-1)(layer) return( layers.Add()([input_layer, layer]) )Test the residual block.residual_dimension = 64 shape = (128, 128, 64) input_layer = layers.Input(shape=shape, name='input_attributes') residual_block = create_residual_block(input_layer, residual_dimension) print(residual_block)Create the generator model.def create_generator_model(image_shape, number_of_attributes=40, generator_dimension=64, generator_residual_blocks=6): input_image = layers.Input(shape=image_shape, name='input_image') input_attributes = layers.Input(shape=number_of_attributes, name='input_attributes') attributes = layers.Lambda(lambda x: K.repeat(x, image_shape[0]*image_shape[1]))(input_attributes) attributes = layers.Reshape((image_shape[0], image_shape[0], number_of_attributes))(attributes) x = layers.Concatenate()([input_image, attributes]) # First Conv2D x = layers.Conv2D(filters=generator_dimension, kernel_size=7, strides=1, padding = 'same', use_bias=False)(x) x = tfa.layers.InstanceNormalization(axis=-1)(x) x = layers.ReLU()(x) # Downsampling layers current_dimension = generator_dimension for i in range(2): x = layers.ZeroPadding2D(padding=1)(x) x = layers.Conv2D(filters = current_dimension*2, kernel_size=4, strides=2, padding = 'valid', use_bias=False)(x) x = tfa.layers.InstanceNormalization(axis=-1)(x) x = layers.ReLU()(x) current_dimension = current_dimension * 2 # Bottleneck layers. for i in range(generator_residual_blocks): x = create_residual_block(x, current_dimension) # Upsampling layers for i in range(2): x = layers.UpSampling2D(size=2)(x) x = layers.Conv2D(filters = current_dimension//2, kernel_size=4, strides=1, padding='same', use_bias=False)(x) x = tfa.layers.InstanceNormalization(axis=-1)(x) x = layers.ReLU()(x) current_dimension = current_dimension//2 # Last Conv2D x = layers.ZeroPadding2D(padding=3)(x) generated_image = layers.Conv2D(filters=3, kernel_size=7, strides=1, padding='valid', activation='tanh', use_bias=False, name='generated_image')(x) return( models.Model(inputs=[input_image, input_attributes], outputs=generated_image, name='generator') )Test the generator model.generator = create_generator_model(image_shape, number_of_attributes, generator_dimension, generator_residual_blocks) generator.summary() input_image = create_test_image_batch(image_shape) input_attributes = create_test_attribute_batch() output_image = generator([input_image, input_attributes]) print('input image shape -', input_image.shape) print('output image shape -', output_image.shape)Create the discriminator / classification model.def create_discriminator_model(image_shape, number_of_attributes=40, discriminator_dimension=64, discriminator_residual_blocks=6): input_image = layers.Input(shape=image_shape, name='input_image') x = layers.ZeroPadding2D(padding=1)(input_image) x = layers.Conv2D(filters=discriminator_dimension, kernel_size=4, strides=2, padding='valid', use_bias=False)(x) x = layers.LeakyReLU(0.01)(x) current_dimension = discriminator_dimension for i in range(1, discriminator_residual_blocks): x = layers.ZeroPadding2D(padding=1)(x) x = layers.Conv2D(filters=current_dimension*2, kernel_size=4, strides=2, padding='valid')(x) x = layers.LeakyReLU(0.01)(x) current_dimension = current_dimension * 2 kernel_size = int(image_shape[0] / np.power(2, discriminator_residual_blocks)) discriminator_prediction = layers.ZeroPadding2D(padding=1)(x) discriminator_prediction = layers.Conv2D(filters=1, kernel_size=4, strides=1, padding='valid', use_bias=False)(discriminator_prediction) discriminator_prediction = layers.Reshape((1, ))(discriminator_prediction) discriminator_classification = layers.Conv2D(filters=number_of_attributes, kernel_size=kernel_size, strides=1, padding='valid', use_bias=False)(x) discriminator_classification = layers.Reshape((number_of_attributes, ))(discriminator_classification) return( models.Model(input_image, [discriminator_prediction, discriminator_classification], name='discriminator') )Test the discriminator model.input_image = create_test_image_batch(image_shape) discriminator = create_discriminator_model(image_shape, number_of_attributes, discriminator_dimension, discriminator_residual_blocks) discriminator.summary() discriminator_prediction, discriminator_classification = discriminator(input_image) print('discriminator prediction shape', discriminator_prediction.shape) print('discriminator classification shape', discriminator_classification.shape)Create different models and loss functions.* Generator model* Discriminator model* Discriminator loss function* Generator loss functiongenerator = create_generator_model(image_shape, number_of_attributes, generator_dimension, generator_residual_blocks) generator.summary() discriminator = create_discriminator_model(image_shape, number_of_attributes, discriminator_dimension, discriminator_residual_blocks) discriminator.summary()Load previous model weights.* Generator model weights* Discriminator model weights!ls -al '/content/drive/My Drive/models/StarGAN/' !cp '/content/drive/My Drive/models/StarGAN/'*.h5 . !ls -al if(load_previous_weights): generator.load_weights('generator.h5') discriminator.load_weights('discriminator.h5')Compute the generator loss.def classification_loss(Y_true, Y_pred) : return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=Y_true, logits=Y_pred)) def wasserstein_loss(Y_true, Y_pred): return K.mean(Y_true*Y_pred) def reconstruction_loss(Y_true, Y_pred): return K.mean(K.abs(Y_true - Y_pred)) def compute_generator_loss(input_image, input_attributes): batch_size = input_image.shape[0] target_attributes = tf.random.shuffle(input_attributes) # Generator reconstructed_image = generator([input_image, input_attributes], training=True) fake_image = generator([input_image, target_attributes], training=True) # Discriminator fake_image_prediction, fake_image_attributes = discriminator(fake_image, training=False) valid_predictions = -tf.ones((batch_size, 1)) fake_image_prediction_loss = wasserstein_loss(valid_predictions, fake_image_prediction) fake_image_attributes_loss = classification_loss(target_attributes, fake_image_attributes) image_reconstruction_loss = reconstruction_loss(input_image, reconstructed_image) generator_loss = ( fake_image_prediction_loss + fake_image_attributes_loss * g_attribute_loss_weight + image_reconstruction_loss * g_reconstruction_loss_weight ) return(generator_loss)Test computation of the generator loss.sample_batch = next(iter(train_dataset)) loss = compute_generator_loss(sample_batch['image'], sample_batch['attributes']) print(loss.numpy())Compute the discriminator loss.def compute_discriminator_loss(input_image, input_attributes): batch_size = input_image.shape[0] target_attributes = tf.random.shuffle(input_attributes) # Generate fake_image = generator([input_image, target_attributes], training=False) # Discriminate input_image_prediction, input_image_attributes = discriminator(input_image, training=True) fake_image_prediction, fake_image_attributes = discriminator(fake_image, training=True) # Discriminator losses valid_predictions = -tf.ones((batch_size, 1)) input_image_gan_loss = wasserstein_loss(valid_predictions, input_image_prediction) input_image_attributes_loss = classification_loss(input_attributes, input_image_attributes) fake_predictions = tf.ones((batch_size, 1)) fake_image_gan_loss = wasserstein_loss(fake_predictions, fake_image_prediction) gradient_penalty_value = gradient_penalty(discriminator, input_image, fake_image) discriminator_loss = ( input_image_gan_loss + fake_image_gan_loss + gradient_penalty_value * d_gradient_penalty_weight + input_image_attributes_loss * d_attribute_loss_weight ) return(discriminator_loss)Test computation of the discriminator loss.sample_batch = next(iter(train_dataset)) loss = compute_discriminator_loss(sample_batch['image'], sample_batch['attributes']) print(loss.numpy())Train the model.model_loss_frequency = 10 model_save_frequency = 50 def train(train_dataset, epochs=100): generator_loss = None discriminator_loss = None for epoch in range(epochs): batch_index = 0 for dataset_batch in train_dataset: batch_index = batch_index + 1 images = dataset_batch['image'] attributes = dataset_batch['attributes'] if(batch_index%6 == 0): with tf.GradientTape(watch_accessed_variables=False) as generator_tape: generator_tape.watch(generator.trainable_variables) generator_loss = compute_generator_loss(images, attributes) generator_gradients = generator_tape.gradient(generator_loss, generator.trainable_variables) generator_optimizer.apply_gradients(zip(generator_gradients, generator.trainable_variables)) else: with tf.GradientTape(watch_accessed_variables=False) as discriminator_tape: discriminator_tape.watch(discriminator.trainable_variables) discriminator_loss = compute_discriminator_loss(images, attributes) discriminator_gradients = discriminator_tape.gradient(discriminator_loss, discriminator.trainable_variables) discriminator_optimizer.apply_gradients(zip(discriminator_gradients, discriminator.trainable_variables)) if(batch_index%model_loss_frequency == 0): print('generator loss -', generator_loss.numpy(), 'discriminator loss -', discriminator_loss.numpy()) ''' if(batch_index%model_save_frequency == 0): generator.save_weights('/content/drive/My Drive/models/StarGAN/generator.h5') discriminator.save_weights('/content/drive/My Drive/models/StarGAN/discriminator.h5') ''' train(train_dataset, epochs=10) if(save_current_weights): generator.save_weights('/content/drive/My Drive/models/StarGAN/generator.h5') discriminator.save_weights('/content/drive/My Drive/models/StarGAN/discriminator.h5')Principal Component Analysis (PCA)from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler sc = StandardScaler() x_train_scaled = sc.fit_transform(x_train) x_test_scaled = sc.transform(x_test) pca = PCA(n_components=.99) x_pca = pca.fit_transform(x_train_scaled) variance = pca.explained_variance_ratio_ #calculate variance ratios var=np.cumsum(np.round(pca.explained_variance_ratio_, decimals=3)*100) plt.ylabel('% Variance Explained') plt.xlabel('# of Features') plt.title('PCA Analysis') plt.ylim(30,100.5) plt.style.context('seaborn-whitegrid') plt.plot(var) #We can se that at ~60 components over 90% variance is captured and after is nearly constant pca = PCA(n_components=60) x_pca = pca.fit_transform(x_train_scaled) x_test_pca = pca.transform(x_test_scaled) fig, axes = plt.subplots(3, 8, figsize=(9, 4), subplot_kw={'xticks':[], 'yticks':[]}, gridspec_kw=dict(hspace=0.1, wspace=0.1)) for i, ax in enumerate(axes.flat): ax.imshow(pca.components_[i].reshape(28, 28), cmap='gray')Ensembles and classification models We first use PCA components for four different classifiers(decision tree, kNN, Logistic regression and Random forest classifier) and then we use three different ensemble methods, bagging, boosting and stacking to compare the results. Baggingimport matplotlib.gridspec as gridspec import itertools from sklearn.tree import DecisionTreeClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import BaggingClassifier from sklearn.model_selection import cross_val_score, train_test_split from mlxtend.plotting import plot_learning_curves from mlxtend.plotting import plot_decision_regionsDecision Trees#Lets train sample decision tree with default values dt0 = DecisionTreeClassifier() dt0.fit(x_pca, train_label) y_pred = dt0.predict(x_test_pca) from sklearn.metrics import accuracy_score acc = accuracy_score(test_label,y_pred) acc max_depths = np.linspace(1, 26, 26, endpoint=True) train_results = [] test_results = [] for max_depth in max_depths: dt = DecisionTreeClassifier(max_depth=max_depth) dt.fit(x_pca, train_label) train_pred = dt.predict(x_pca) acc = accuracy_score(train_label,train_pred) train_results.append(acc) y_pred = dt.predict(x_test_pca) acc = accuracy_score(test_label,y_pred) test_results.append(acc) from matplotlib.legend_handler import HandlerLine2D line1, = plt.plot(max_depths, train_results, 'b', label="Train Score") line2, = plt.plot(max_depths, test_results, 'r', label="Test Score") plt.legend(handler_map={line1: HandlerLine2D(numpoints=2)}) plt.ylabel('Accuracy score') plt.xlabel('Tree depth') plt.show()We can see that the dept of 20 is the best score for our decision tree so we set that to our max_depth k-Nearest-Neighbors#Train knn with only 1-nearest-neighbor knn0 = KNeighborsClassifier(n_neighbors=1) knn0.fit(x_pca, train_label) y_pred = knn0.predict(x_test_pca) acc = accuracy_score(test_label,y_pred) acc n_neighbors = np.linspace(1, 10, 10, endpoint=True,dtype=np.int64) train_results = [] test_results = [] for k in n_neighbors: knn = KNeighborsClassifier(n_neighbors=k) knn.fit(x_pca, train_label) train_pred = knn.predict(x_pca) acc = accuracy_score(train_label,train_pred) train_results.append(acc) y_pred = knn.predict(x_test_pca) acc = accuracy_score(test_label,y_pred) test_results.append(acc) from matplotlib.legend_handler import HandlerLine2D line1, = plt.plot(n_neighbors, train_results, 'b', label="Train Score") line2, = plt.plot(n_neighbors, test_results, 'r', label="Test Score") plt.legend(handler_map={line1: HandlerLine2D(numpoints=2)}) plt.ylabel('Accuracy score') plt.xlabel('Number of neighbors') plt.show()We can see that setting 1 as 'k' we acieve the best score for our decision tree so we set that to our n_neighbors. Bagging all modelsdt_f = DecisionTreeClassifier(max_depth=20) knn_f = KNeighborsClassifier(n_neighbors=1) lr_f = LogisticRegression() bagging_dt = BaggingClassifier(base_estimator= dt_f , n_estimators=10, max_samples=0.7, max_features=0.9) bagging_knn = BaggingClassifier(base_estimator= knn_f, n_estimators=10, max_samples=0.7, max_features=0.9) bagging_lr = BaggingClassifier(base_estimator= lr_f, n_estimators=10, max_samples=0.7, max_features=0.9) label = ['Decision Tree', 'K-NN', 'Logistic Regression', 'Bagging Tree', 'Bagging K-NN', 'Bagging Logistic Regression'] classifiers = [dt_f, knn_f, lr_f, bagging_dt, bagging_knn,bagging_lr] for clf, label in zip(classifiers, label): scores = cross_val_score(clf, x_pca, train_label, cv=3, scoring='accuracy') print("Accuracy: %.2f (+/- %.2f) [%s]" %(scores.mean(), scores.std(), label))Accuracy: 0.94 (+/- 0.01) [Decision Tree] Accuracy: 1.00 (+/- 0.00) [K-NN] Accuracy: 0.84 (+/- 0.00) [Logistic Regression] Accuracy: 0.99 (+/- 0.00) [Bagging Tree] Accuracy: 0.99 (+/- 0.00) [Bagging K-NN] Accuracy: 0.83 (+/- 0.01) [Bagging Logistic Regression]Random Forest Classification Above we have seen all the cross validation scores for single classifiers and using bagging as an ensemble method#Now lets see the effect of max_samples meaning the effect of subsampling the data bags = [bagging_dt, bagging_knn, bagging_lr] x_train0, x_test0, y_train0, y_test0 = train_test_split(x_pca, train_label, test_size=0.3, random_state=7) for b in bags: plt.figure() plot_learning_curves(x_train0, y_train0, X_test0, y_test0, b, print_model=False, style='ggplot') plt.show()Tables are for 'Bagging Tree', 'Bagging K-NN' and 'Bagging Logistic Regression' respectively. As we can see in all tables choosing ~80% data as training data we achieve the best ensemble models.from sklearn.model_selection import GridSearchCV param_grid = { 'bootstrap': [True], 'max_depth': [10,20,30,50], 'max_features': [0.8, 0.9], 'n_estimators': [10,20,50,100] } rf = RandomForestClassifier() grid_search = GridSearchCV(estimator = rf, param_grid = param_grid, cv = 3, n_jobs = -1, verbose = 2) grid_search.fit(x_pca, train_label) grid_search.best_params_ best_grid = grid_search.best_estimator_ accuracy = best_grid.score(x_test_pca, test_label) print("Accuracy score for Random Forest Classification is: %.2f" %accuracy)Accuracy score for Random Forest Classification is: 0.76Boostingimport matplotlib.gridspec as gridspec import itertools from sklearn.tree import DecisionTreeClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.model_selection import cross_val_score, train_test_split from mlxtend.plotting import plot_learning_curves from mlxtend.plotting import plot_decision_regions dt_f = DecisionTreeClassifier(max_depth=20) boosting_dt = AdaBoostClassifier(base_estimator= dt_f , n_estimators=10) label = ['Decision Tree','Boosting Tree'] classifiers = [dt_f, boosting_dt] for clf, label in zip(classifiers, label): scores = cross_val_score(clf, x_pca, train_label, scoring='accuracy') print("Accuracy: %.2f (+/- %.2f) [%s]" %(scores.mean(), scores.std(), label))Accuracy: 0.94 (+/- 0.00) [Decision Tree] Accuracy: 1.00 (+/- 0.00) [Boosting Tree]Now we will plot the error based on the number of subsamples from the decision tree and plot the resultsx_train0, x_test0, y_train0, y_test0 = train_test_split(x_pca, train_label, test_size=0.3, random_state=7) plt.figure() plot_learning_curves(x_train0, y_train0, x_test0, y_test0, boosting_dt, print_model=False, style='ggplot') plt.show()We will now see the effect of number of estimators on our final result, we use the standard deviation as the identifier of the error for each number estimators.num_est = map(int, np.linspace(1,100,20)) boosting_mean = [] boosting_std = [] for n_est in num_est: boosting = AdaBoostClassifier(base_estimator=clf, n_estimators=n_est) scores = cross_val_score(boosting, x_pca, train_label, cv=3, scoring='accuracy') boosting_mean.append(scores.mean()) boosting_std.append(scores.std()) num_est = np.linspace(1,100,20).astype(int) plt.figure() plt.errorbar(num_est, boosting_mean, yerr=boosting_std, uplims=True, lolims=True, fmt='-o', capsize=5, marker='.',mfc='purple', mec='yellow', label='uplims=True, lolims=True') plt.ylabel('Accuracy'); plt.xlabel('Ensemble Size'); plt.title('AdaBoost Ensemble'); plt.show()Stackingimport matplotlib.gridspec as gridspec import itertools from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from mlxtend.classifier import StackingClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.model_selection import cross_val_score, train_test_split from mlxtend.plotting import plot_learning_curves from mlxtend.plotting import plot_decision_regionsNow lets stack the models and using logistic regression as our meta classifier to see the resultsknn = KNeighborsClassifier(n_neighbors=1) rf = RandomForestClassifier(random_state=1) gbc = GaussianNB() lr = LogisticRegression() sclf = StackingClassifier(classifiers=[knn, rf, gbc], meta_classifier=lr) label = ['KNN', 'Random Forest', 'Naive Bayes', 'Stacking Classifier'] clf_list = [knn, rf, gbc, sclf] stacking_mean = [] stacking_std = [] for clf, label in zip(clf_list, label): scores = cross_val_score(clf, x_pca, train_label, cv=3, scoring='accuracy') print ("Accuracy: %.2f (+/- %.2f) [%s]" %(scores.mean(), scores.std(), label)) stacking_mean.append(scores.mean()) stacking_std.append(scores.std())Accuracy: 1.00 (+/- 0.00) [KNN] Accuracy: 1.00 (+/- 0.00) [Random Forest] Accuracy: 0.75 (+/- 0.00) [Naive Bayes] Accuracy: 0.40 (+/- 0.01) [Stacking Classifier]Now lets plot the accuracy of each model and the possible error in eachplt.figure() plt.errorbar(range(4), stacking_mean, yerr=stacking_std, uplims=True, lolims=True, fmt='-o', capsize=3, marker='.',mfc='purple', mec='yellow', label='uplims=True, lolims=True') plt.xticks(range(4), ['KNN', 'RF', 'NB', 'Stacking']) plt.ylabel('Accuracy'); plt.xlabel('Classifier'); plt.title('Stacking Ensemble Model'); plt.show() x_train0, x_test0, y_train0, y_test0 = train_test_split(x_pca, train_label, test_size=0.3, random_state=7) plt.figure() plot_learning_curves(x_train0, y_train0, x_test0, y_test0, sclf, print_model=False, style='ggplot') plt.show()We can seee that as number of training samples goes highere due to high variance of data the accuracy of model drasrically drops, however we can see that in Stacking we do not see any over fitting. Final Predictionsknn = KNeighborsClassifier(n_neighbors=1) dt = DecisionTreeClassifier(max_depth=20) gbc = GaussianNB() lr = LogisticRegression() bagging_dt = BaggingClassifier(base_estimator= dt , n_estimators=10, max_samples=0.8, max_features=0.9) bagging_knn = BaggingClassifier(base_estimator= knn, n_estimators=10, max_samples=0.8, max_features=0.9) bagging_lr = BaggingClassifier(base_estimator= lr, n_estimators=10, max_samples=0.8, max_features=0.9) boosting_dt = AdaBoostClassifier(base_estimator= dt_f , n_estimators=10) sclf = StackingClassifier(classifiers=[knn, rf, gbc], meta_classifier=lr) from sklearn.metrics import accuracy_score classifiers_unsupervised = [knn, dt, bagging_dt,bagging_knn, bagging_lr, boosting_dt, sclf] classifiers_names1 = ['K-NN', 'Decision Tree', 'Bagging Tree', 'Bagging K-NN', 'Bagging Logistic Regression', 'Boosting tree', 'Stacking Model'] for clf, n in zip(classifiers_unsupervised, classifiers_names1): clf.fit(x_pca, train_label) pred = clf.predict(x_test_pca) accuracy = accuracy_score(pred, test_label) print("Accuracy score for %s is: %.2f" %(n, accuracy)) classifiers_supervised = [gbc, lr] classifiers_names2 = ['Naive Bayes', 'Logistic Regression'] for clf2, n in zip(classifiers_supervised, classifiers_names2): clf2.fit(x_pca, train_label) pred = clf2.predict(x_test_pca) accuracy = accuracy_score(pred, test_label) print("Accuracy score for %s is: %.2f" %(n, accuracy))Accuracy score for Naive Bayes is: 0.64 Accuracy score for Logistic Regression is: 0.67Support Vector Machines Support Vector Machines (SVM) + PCAfrom sklearn.svm import SVC from sklearn.model_selection import GridSearchCV tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4], 'C': [1, 10, 100]}, {'kernel': ['linear'], 'C': [1, 10, 100]}] svm = GridSearchCV(SVC(probability=True), tuned_parameters, refit=True, verbose=1) svm.fit(x_pca, train_label) print(svm.best_params_) print(svm.best_estimator_) from sklearn.svm import SVC svm = SVC(C=100, cache_size=200, class_weight=None, coef0=0.0, decision_function_shape='ovr', degree=3, gamma=0.0001, kernel='rbf', max_iter=-1, probability=True, random_state=None, shrinking=True, tol=0.001, verbose=False) #Training SVM svm.fit(x_pca,train_label) pred=svm.predict(x_test_pca) accuracy = accuracy_score(pred, test_label) print("Accuracy score for Support Vector Machine is: %.2f" %accuracy) #Graph confusion matrix from sklearn.metrics import confusion_matrix import seaborn as sns labels = np.linspace(0, 25, 25, endpoint=True,dtype=np.int64) cm = confusion_matrix(test_label, pred) # Normalize cmn = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] fig, ax = plt.subplots(figsize=(15,15)) sns.heatmap(cmn, annot=True, fmt='.2f', xticklabels=labels, yticklabels=labels) plt.ylabel('Actual') plt.xlabel('Predicted') plt.show(block=False)Exctracting Hog Featuresfrom skimage.feature import hog from skimage import data, color, exposure hogtrain = [] hogimg = [] for i in range(x_train_img.shape[0]): fd, hog_image = hog(x_train_img[i], orientations=8, pixels_per_cell=(8,8),cells_per_block=(2,2), visualise=True) hogtrain.append(fd) hogimg.append(hog_image) hog_feature = np.array(hogtrain) hogtest = [] hogimg_test = [] for i in range(x_test_img.shape[0]): fd, hog_image = hog(x_test_img[i], orientations=8, pixels_per_cell=(8,8),cells_per_block=(2,2), visualise=True) hogtest.append(fd) hogimg_test.append(hog_image) hog_feature_test = np.array(hogtest) fig, axes = plt.subplots(2, 2, figsize=(11, 8), subplot_kw={'xticks':[], 'yticks':[]}, gridspec_kw=dict(hspace=0.1, wspace=0.1)) for i, ax in enumerate(axes.flat): ax.imshow(hogimg[i], cmap='gray')Support Vector Machines (SVM) + HOGfrom sklearn.svm import SVC from sklearn.model_selection import GridSearchCV tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4], 'C': [1, 10, 100]}] svm2 = GridSearchCV(SVC(probability=True), tuned_parameters, refit=True, verbose=1) svm2.fit(hog_feature, train_label) print(svm2.best_params_) print(svm2.best_estimator_) from sklearn.metrics import accuracy_score svm2.fit(hog_feature,train_label) pred = svm2.predict(hog_feature_test) accuracy = accuracy_score(pred, test_label) print("Accuracy score for Support Vector Machine using HOG features is: %.2f" %accuracy) #Graph confusion matrix from sklearn.metrics import confusion_matrix import seaborn as sns labels = np.linspace(0, 25, 25, endpoint=True,dtype=np.int64) cm = confusion_matrix(test_label, pred) # Normalize cmn = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] fig, ax = plt.subplots(figsize=(15,15)) sns.heatmap(cmn, annot=True, fmt='.2f', xticklabels=labels, yticklabels=labels) plt.ylabel('Actual') plt.xlabel('Predicted') plt.show(block=False)Support Vector Machines (SVM) + HOG + PCAfrom sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler pca_hogs = PCA(n_components=.99) x_hogs_pca = pca_hogs.fit_transform(hog_feature) x_hogs_pca_test = pca_hogs.transform(hog_feature_test) from sklearn.svm import SVC from sklearn.metrics import accuracy_score svm2 = SVC(C=100, cache_size=200, class_weight=None, coef0=0.0, decision_function_shape='ovr', degree=3, gamma=0.001, kernel='rbf', max_iter=-1, probability=True, random_state=None, shrinking=True, tol=0.001, verbose=False) svm2.fit(x_hogs_pca,train_label) pred = svm2.predict(x_hogs_pca_test) accuracy = accuracy_score(pred, test_label) print("Accuracy score for Support Vector Machine by applying PCA on HOG features is: %.2f" %accuracy) #Graph confusion matrix from sklearn.metrics import confusion_matrix import seaborn as sns labels = np.linspace(0, 25, 25, endpoint=True,dtype=np.int64) cm = confusion_matrix(test_label, pred) # Normalize cmn = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] fig, ax = plt.subplots(figsize=(15,15)) sns.heatmap(cmn, annot=True, fmt='.2f', xticklabels=labels, yticklabels=labels) plt.ylabel('Actual') plt.xlabel('Predicted') plt.show(block=False)k-Nearest-Neighbors + HOGfrom sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import accuracy_score n_neighbors = np.linspace(1, 10, 10, endpoint=True,dtype=np.int64) train_results = [] test_results = [] for k in n_neighbors: knn = KNeighborsClassifier(n_neighbors=k) knn.fit(hog_feature, train_label) train_pred = knn.predict(hog_feature) acc = accuracy_score(train_label,train_pred) train_results.append(acc) y_pred = knn.predict(hog_feature_test) acc = accuracy_score(test_label,y_pred) test_results.append(acc) from matplotlib.legend_handler import HandlerLine2D line1, = plt.plot(n_neighbors, train_results, 'b', label="Train Score") line2, = plt.plot(n_neighbors, test_results, 'r', label="Test Score") plt.legend(handler_map={line1: HandlerLine2D(numpoints=2)}) plt.ylabel('Accuracy score') plt.xlabel('Number of neighbors') plt.show()We can observe that 3 neighbors performs the best therefore in our final model we will set 'K' to three.knn = KNeighborsClassifier(n_neighbors=3) knn.fit(hog_feature, train_label) pred = knn.predict(hog_feature_test) acc = accuracy_score(test_label, pred) print("Accuracy score for Support Vector Machine by using HOG features is: %.2f" %acc)Accuracy score for Support Vector Machine by using HOG features is: 0.92Stacking Support Vectors Machine and kNN using HOG featuresfrom sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.linear_model import LogisticRegression from mlxtend.classifier import StackingClassifier from sklearn.metrics import accuracy_score from sklearn.model_selection import cross_val_score from sklearn.metrics import accuracy_score knn = KNeighborsClassifier(n_neighbors=3) svm2 = SVC(C=100, cache_size=200, class_weight=None, coef0=0.0, decision_function_shape='ovr', degree=3, gamma=0.001, kernel='rbf', max_iter=-1, probability=True, random_state=None, shrinking=True, tol=0.001, verbose=False) lr = LogisticRegression() sclf = StackingClassifier(classifiers=[knn,svm2], meta_classifier=lr) scores = cross_val_score(sclf, hog_feature, train_label, cv=3, scoring='accuracy') print ("Accuracy: %.2f (+/- %.2f) [Stacking Classifier]" %(scores.mean(), scores.std())) sclf.fit(hog_feature, train_label) pred = sclf.predict(hog_feature_test) accuracy = accuracy_score(pred, test_label) print("Accuracy score for Stacking Classifier is: %.2f" %accuracy)Accuracy score for Stacking Classifier is: 0.40Convolutional Neural Network (CNN)#For CNN we need RGB images lets transform our images in 3D dimension from skimage.color import gray2rgb x_train_cnn = gray2rgb(x_train_img) x_test_cnn = gray2rgb(x_test_img) #Normalizing the pixels x_train_cnn = x_train_cnn / 255 x_test_cnn = x_test_cnn / 255 #Lets view some samples fig, axes = plt.subplots(2, 4, figsize=(7, 3), subplot_kw={'xticks':[], 'yticks':[]}, gridspec_kw=dict(hspace=0.1, wspace=0.1)) for i, ax in enumerate(axes.flat): ax.imshow(x_train_cnn[i]) #For CNN we need One-hot labels which are stored in y_train nd y_test y_train[0] from keras.models import Sequential from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Dropout import warnings warnings.filterwarnings("ignore") model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(28,28,3))) model.add(Conv2D(32, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dropout(0.5)) model.add(Dense(2000, activation='relu')) model.add(Dense(26, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) #Lets check available gpu from keras import backend as K K.tensorflow_backend._get_available_gpus() hist = model.fit(x_train_cnn, y_train, batch_size=128, epochs=25, validation_split=0.3 )WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/math_grad.py:1424: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version. Instructions for updating: Use tf.where in 2.0, which has the same broadcast rule as np.where WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:1033: The name tf.assign_add is deprecated. Please use tf.compat.v1.assign_add instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:1020: The name tf.assign is deprecated. Please use tf.compat.v1.assign instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:3005: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead. Train on 19218 samples, validate on 8237 samples Epoch 1/25 WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow[...]We achieved an accuracy of 99.8% over the training datamodel.summary() model.evaluate(x_test_cnn, y_test)[1]7172/7172 [==============================] - 1s 89us/stepThe model is around 96% on test data#Visualize the models accuracy plt.plot(hist.history['acc']) plt.plot(hist.history['val_acc']) plt.title('Model accuracy') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Train', 'Val'], loc='upper left') plt.show() #Visualize the models loss plt.plot(hist.history['loss']) plt.plot(hist.history['val_loss']) plt.title('Model loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Train', 'Val'], loc='upper right') plt.show()Saving the trained model#Save the model !pip install -U -q PyDrive from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive from google.colab import auth from oauth2client.client import GoogleCredentials # 1. Authenticate and create the PyDrive client. auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) # 2. Save Keras Model or weights on google drive # create on Colab directory model.save('model.h5') model_file = drive.CreateFile({'title' : 'model.h5'}) model_file.SetContentFile('model.h5') model_file.Upload() # download to google drive drive.CreateFile({'id': model_file.get('id')}) #Save the model weights model.save_weights('model_weights.h5') weights_file = drive.CreateFile({'title' : 'model_weights.h5'}) weights_file.SetContentFile('model_weights.h5') weights_file.Upload() drive.CreateFile({'id': weights_file.get('id')}) # 3. reload weights from google drive into the model # use (get shareable link) to get file id last_weight_file = drive.CreateFile({'id': '1N6JlAflv2fIrXFVZ14DPK0CEPicpQyVd'}) last_weight_file.GetContentFile('last_weights.mat') model.load_weights('last_weights.mat')Capturing live images and predicting the output The code below is used for using the local webcam as this device is using a virtual machine to run the webcam set to the Jupyter notebook is not the local webcam. The code is retrieved from Google Colab team from: https://colab.research.google.com/notebooks/snippets/advanced_outputs.ipynbscrollTo=2viqYx97hPMifrom IPython.display import display, Javascript from google.colab.output import eval_js from base64 import b64decode def take_photo(filename='photo.jpg', quality=0.8): js = Javascript(''' async function takePhoto(quality) { const div = document.createElement('div'); const capture = document.createElement('button'); capture.textContent = 'Capture'; div.appendChild(capture); const video = document.createElement('video'); video.style.display = 'block'; const stream = await navigator.mediaDevices.getUserMedia({video: true}); document.body.appendChild(div); div.appendChild(video); video.srcObject = stream; await video.play(); // Resize the output to fit the video element. google.colab.output.setIframeHeight(document.documentElement.scrollHeight, true); // Wait for Capture to be clicked. await new Promise((resolve) => capture.onclick = resolve); const canvas = document.createElement('canvas'); canvas.width = video.videoWidth; canvas.height = video.videoHeight; canvas.getContext('2d').drawImage(video, 0, 0); stream.getVideoTracks()[0].stop(); div.remove(); return canvas.toDataURL('image/jpeg', quality); } ''') display(js) data = eval_js('takePhoto({})'.format(quality)) binary = b64decode(data.split(',')[1]) with open(filename, 'wb') as f: f.write(binary) return filename from IPython.display import Image try: filename = take_photo() print('Saved to {}'.format('image1.jpg')) # Show the image which was just taken. display(Image('photo.jpg')) except Exception as err: # Errors will be thrown if the user does not have a webcam or if they do not # grant the page permission to access it. print(str(err)) import cv2 as cv img = cv.imread('/content/photo.jpg', cv.IMREAD_UNCHANGED) # to crop required part im2 = img # convert to grayscale image_grayscale = cv.cvtColor(im2, cv.COLOR_BGR2GRAY) # blurring the image image_grayscale_blurred =cv.GaussianBlur(image_grayscale, (15,15), 0) # resize the image to 28x28 im3 = cv.resize(image_grayscale_blurred, (28,28), interpolation = cv.INTER_AREA) # expand the dimensions from 28x28 to 1x28x28x1 im4 = np.resize(im3, (28, 28, 3)) im5 = np.expand_dims(im4, axis=0) data = np.asarray( im4, dtype="int32" ) pred_probab = model.predict(data)[0] # softmax gives probability for all the alphabets hence we have to choose the maximum probability alphabet pred_class = list(pred_probab).index(max(pred_probab)) max(pred_probab), getLetter(pred_class)The Three cells above are just for testing therefore the outputs have been removed, for demo you can run the blocks in order in order to see the final result predicted by the neural network.'''This method will simply change the final result to letters since the nueral net will predict indexes of teh alphabet ''' def getLetter(i): alphabet='ABCDEFGHIJKLMNOPQRSTUVWXYZ' return alphabet[i]Sets de entrenamiento y Modelos* Link github: https://github.com/sebastiandres/ia_notebooks/1_error_datasets_y_modelos/1_error_datasets_y_modelos.ipynb* Link mybinder: https://bit.ly/2Vf89oC Sobre jupyter notebookJupyter notebooks es un medio de desarrollo iterativo, que permite mezclar código con texto, imágenes y video. Su facilidad de uso permite crear y descargar material para el aprendizaje individual y grupal.*Importante*: cada celda se ejecuta con `Alt + Enter` Objetivos de Aprendizaje1. Importancia de conocer el negocio y explorar los datos.2. Técnicas para seleccionar un modelo predictivo.3. Conocer el significado y utilidad de: * Datos de entrenamiento * Datos de validación (verificación) * Datos de testeo * Datos de predicción 0. Verificar disponibilidad de librerías y probar jupyter notebooksimport pandas as pd import numpy as np from matplotlib import pyplot as plt import warnings warnings.filterwarnings("ignore") print("Versión de pandas: ", pd.__version__) print("Versión de numpy: ", np.__version__) %load_ext autoreload %autoreload 2 # Fix the seed so everyone can reproduce the same results np.random.seed(42)Ejemplos de celdas de jupyter notebook con python:a = 1 print(a) for i in range(10): print(i, i**2)0 0 1 1 2 4 3 9 4 16 5 25 6 36 7 49 8 64 9 811. Importar funcionalidades pre-existentesfrom secret import get_dataLos datos pueden estar en un archivo csv o excel, o haberse descargado de intenet, o haberlos obtenido después de un largo proceso de proccesamiento. En esta caso, se obtienen simplemente con una función creada para este objetivo:`def get_data()`N_data = 10000 x_all, y_all = get_data(N_data) len(x_all) x_all[:10] y_all[:10]Analisis exploratoriodf_all = pd.DataFrame(columns=["x","y"], data=np.array([x_all, y_all]).T) df_all df_all.describe()¿Qué cosa le llama la atención de los datos? ¿Porqué siempre es bueno el análisis gráfico? Existe un ejemplo clásico llamado el Cuarteto de Anscombe. Considere los siguientes 4 conjuntos de datos. ¿Qué puede decir de los datos?import pandas as pd import os filepath = os.path.join("data","anscombe.csv") df = pd.read_csv(filepath) dfDescripción de los datos, versión numpy:import numpy as np filepath = os.path.join("data","anscombe.csv") data = np.loadtxt(filepath, delimiter=",", skiprows=1) for i in range(4): x = data[:,2*i] y = data[:,2*i+1] slope, intercept = np.polyfit(x, y, 1) print("Grupo %d:" %(i+1)) print("\tTiene pendiente m=%.2f e intercepto b=%.2f" %(slope, intercept))Grupo 1: Tiene pendiente m=0.50 e intercepto b=3.00 Grupo 2: Tiene pendiente m=0.50 e intercepto b=3.00 Grupo 3: Tiene pendiente m=0.50 e intercepto b=3.00 Grupo 4: Tiene pendiente m=0.50 e intercepto b=3.00Descripción de los datos, versión pandas:import pandas as pd import os filepath = os.path.join("data","anscombe.csv") df = pd.read_csv(filepath) df[sorted(df.columns)].describe(include="all")Veamos ahora que nos puede decirfrom matplotlib import pyplot as plt import numpy as np def my_plot(): filepath = os.path.join("data","anscombe.csv") data = np.loadtxt(filepath, delimiter=",", skiprows=1) fig = plt.figure(figsize=(16,8)) for i in range(4): x = data[:,2*i] y = data[:,2*i+1] plt.subplot(2, 2, i+1) plt.plot(x,y,'o') plt.xlim([2,20]) plt.ylim([2,20]) plt.title("Grupo %d" %(i+1)) m, b = np.polyfit(x, y, 1) x_aux = np.linspace(2,16,20) plt.plot(x_aux, m*x_aux + b, 'r', lw=2.0) plt.suptitle("Cuarteto de Anscombe") plt.show() my_plot()Análisis gráficoUna de las primeras tareas que debemos hacer es realizar un análisis gráfico de los datos. Para esto existen muchas alternativas. Use su buen juicio.plt.figure(figsize=(10,10)) plt.plot(x_all, y_all, "-", label="row data") plt.xlabel("x", fontsize=16) plt.ylabel("y", fontsize=16) plt.legend() plt.show()Lección 1: Los datos no suelen venir ordenados.sorting_index = np.argsort(x_all) x_sorted = np.array(x_all)[sorting_index] y_sorted = np.array(y_all)[sorting_index] plt.figure(figsize=(10,10)) plt.plot(x_sorted, y_sorted, "-", label="sorted data") plt.xlabel("x", fontsize=16) plt.ylabel("y", fontsize=16) plt.legend() plt.show() plt.figure(figsize=(10,10)) plt.plot(x_sorted[:100], y_sorted[:100], "-", label="sorted data") plt.xlabel("x", fontsize=16) plt.ylabel("y", fontsize=16) plt.legend() plt.show()Preprocesando los datosDespués de ordenar, también es necesario eliminar los datos nulos (y valores fuera de rango)np.isnan(x_sorted) m_nan = np.logical_or(np.isnan(x_sorted), np.isnan(y_sorted)) m_not_nan = np.logical_not(m_nan) x = x_sorted[m_not_nan] y = y_sorted[m_not_nan] len(x), len(y) x yAjustando un modelo simpleSi definimos el grado del polinomio, es posible ajustar los coeficientes del polinomio para que "trate de pasar" por los datos.# Do a polinomial fit N = 1 z = np.polyfit(x, y, N) polinomio = np.poly1d(z) polinomio(np.array([0., 1., 2.0])) polinomio(2) plt.figure(figsize=(16,16)) plt.plot(x, y, '-', lw=2.0, label="data") plt.plot(x, polinomio(x),'-', lw=2.0, label="model") plt.xlabel("x", fontsize=16) plt.ylabel("y", fontsize=16) plt.legend() plt.show() # Intentar distintos valores de N: 1, 5, 10, 50, 100 N = 50 z = np.polyfit(x, y, N) polinomio = np.poly1d(z) plt.figure(figsize=(10,10)) plt.plot(x, y, '-', lw=2.0, label="data") plt.plot(x, polinomio(x),'-', lw=2.0, label="model") plt.xlabel("x", fontsize=16) plt.ylabel("y", fontsize=16) plt.legend() plt.show()¿Qué valor debemos usar para N? ¿Cómo podemmos elegirlo *científicamente*? Calculando el errorEl valor de error a utilizar depende del contexto del problema. Existen 2 errores habituales para este tipo de problemas de regresión:* Error Absoluto Medio - Mean Absolute Error (MAE): $$\frac{1}{n} \sum_{i=1}^n |y_i - f(x_i)|$$* Error Cuadrático Medio -Mean Squared Error (MSE): $$\frac{1}{n} \sum_{i=1}^n (y_i - f(x_i) )^2 $$# Compute the error def mae_from_model(x, y, model): m_nan = np.logical_or(np.isnan(x), np.isnan(y)) m_not_nan = np.logical_not(m_nan) x_ = x[m_not_nan] y_ = y[m_not_nan] y_model_ = model(x_) mae = np.sum(np.abs(y_ - y_model_)) / len(y_) return mae def mse_from_model(x, y, model): m_nan = np.logical_or(np.isnan(x), np.isnan(y)) m_not_nan = np.logical_not(m_nan) x_ = x[m_not_nan] y_ = y[m_not_nan] y_model_ = model(x_) mse = np.sum((y_ - y_model_)**2) / len(y_) return mseVeamos cuanto error tienen los modelos anterioresN = 10 z = np.polyfit(x, y, N) model_N = np.poly1d(z) print("Mean Absolute Error (MAE) for N={}: {}".format(N, mae_from_model(x, y, model_N))) print("Mean Squared Error (MSE) for N={}: {}".format(N, mse_from_model(x, y, model_N)))Mean Absolute Error (MAE) for N=10: 7.959889933575181 Mean Squared Error (MSE) for N=10: 100.09831647386287Ambos errores son dos formas válidas de medir el error. No existe una manera correcta de medir el error. Depende del contexto y del problema.En realidad, los coeficientes del polinomio se encuentran minimizando el Mean Squared Error.np.polyfit?¡Ya estábamos utilizando una forma de medir el error sin saberlo! Recapitulemos: * No sabemos a priori cuál es el grado del polinomio.* Si se fija un grado del polinomio, los coeficientes se encuentran minimizando el error cuadrático medio.Lo anterior es frecuente en todos los modelos de Machine Learning:* Los parámetros de un modelo se llaman **metaparámetros**. Son ciertos parámetros que se definen pero no forman parte de los valores que se ajustarán con los datos.* Una vez definidos los metaparámetros, se buscan los valores de los parámetros. Las librerías proporcionan métodos sencillos para ajustar un modelo específico, pero encontrar los metaparámetros resulta en general un desafío más grande. Eligiendo el valor de NEn el caso de nuestro problema de juguete, queremos encontrar el metaparámetro $N$: el grado del polinomio.degrees = list(range(1,25)) mse = [] for N in degrees: model = np.poly1d(np.polyfit(x, y, N)) mse_error = mse_from_model(x, y, model) mse.append(mse_error) print(N, mse_error) plt.figure(figsize=(16,8)) plt.plot(degrees, mse, 'o-', label="Train error") plt.xlabel("x", fontsize=16) plt.ylabel("y", fontsize=16) plt.legend() plt.show()A partir de lo anterior, sería razonable pensar que tenemos que tomar un polinomio suficientemente grande. Lo anterior es una clásica falacia o error de entrenamiento de modelos.***Lo que buscamos no es un modelo que explique perfectamente el pasado, sino que logre predecir razonablemente bien el futuro.***Todo polinomio o modelo extremandamente complejo logrará reproducir perfectamente los datos conocidos. La simple memorización de los resultados cumple ese objetivo. La tarea de los modelos de Machine Learning es generalizar. Como, a partir de ejemplos, es posible aprender parámetros que lograrán una predicción acertada. Sets de entrenamiento, validación, verificación, predicciónEn el entrenamiento de modelos de Machine Learning, resulta común dividir los datos en conjuntos con distintas finalidades:* **Set de entrenamiento (Training set)**: Set utilizado para entrenar el modelo, asumiento conocidos los metaparámetros.* **Set de verificación/validación (validation set)**: Set utilizado para evaluar el modelo y comparar metaparámetros.* **Set de testeo (test set)**: Set para estimar el error de predicción del modelo, una vez seleccionado.La división de los datos conocidos en conjuntos de entrenamiento - validación - testeo se hace en relación 60%-20%-20% o 80%-10%-10%.from sklearn.model_selection import train_test_split x_train, x_vt, y_train, y_vt = train_test_split(x, y, test_size=0.20, random_state=42) x_val, x_test, y_val, y_test = train_test_split(x_vt, y_vt, test_size=0.50, random_state=42) print(x_train.shape[0], y_train.shape[0], 100*x_train.shape[0]/x.shape[0]) print(x_val.shape[0], y_val.shape[0], 100*x_val.shape[0]/x.shape[0]) print(x_test.shape[0], y_test.shape[0], 100*x_test.shape[0]/x.shape[0]) degrees = list(range(1,25)) mse_train = [] mse_test = [] values = [] for n in degrees: coeffs = np.polyfit(x_train, y_train, n) model_n = np.poly1d(coeffs) mse_train.append(mse_from_model(x_train, y_train, model_n)) mse_test.append(mse_from_model(x_test, y_test, model_n)) plt.figure(figsize=(10,10)) plt.plot(degrees, mse_train,'x-', lw=2.0, label="train") plt.plot(degrees, mse_test,'o-', lw=2.0, label="test") plt.xlabel("N (grado polinomio)") plt.ylabel("Mean Squared Error") plt.ylim([0, 1.1*max(max(mse_train), max(mse_test))]) plt.xlabel("x", fontsize=16) plt.ylabel("y", fontsize=16) plt.legend() plt.show()Al combinar un set de entrenamiento con un set de contraste o validación, podemos ver que aumentar el grado del polinomio no es ventajoso. De hecho y como resultaba intuitivo, resulta mejor considerar un modelo más bien simple: una recta o un relación cuadrática.En general, conviene aplicar la navaja de Occam: *En igualdad de condiciones, la explicación más sencilla suele ser la más probable.* Entre dos modelos que tienen una capacidad predictiva similar, conviene tomar el más simple de ambos (con menos parámetros). El error del modelo podemos indicarlo considerando el conjunto de testeo y el conjunto de testeo:N = 10 coeffs_N = np.polyfit(x_train, y_train, N) model_N = np.poly1d(coeffs_N) print("Mean Absolute Error (MAE) for N={}: {}".format(N, mae_from_model(x_test, y_test, model_N))) print("Mean Squared Error (MSE) for N={}: {}".format(N, mse_from_model(x_test, y_test, model_N)))Mean Absolute Error (MAE) for N=10: 7.4482684626160065 Mean Squared Error (MSE) for N=10: 86.76869229393691¿Como se comportaría el modelo en un conjunto distinto de datos?Uno de los grandes problemas que se tiene en Machine Learning es que a veces no se posee un control perfecto del dataset donde se realizará la predicción. Por ejemplo, si se trata de un modelo que trabaja con fotografías, el modelo puede haberse entrenado en fotografías de buena calidad e iluminación, pero debe trabajar además con fotografías borrosas o con baja iluminación.La única forma que un modelo funcione de la misma manera en el conjunto de datos de entrenamiento y predicción (producción) es que estos sean tan parecidos como sea posible.# Train the model N = 5 coeffs_N = np.polyfit(x_train, y_train, N) model_N = np.poly1d(coeffs_N) # Get new data x_new, y_new = get_data(N_data=100, xmin=100, xmax=200) y_pred = model_N(x_new) print("Mean Squared Error (MSE) for N={}: {}".format(N, mse_from_model(x_new, y_pred, model_N))) # Plot plt.figure(figsize=(10,10)) plt.plot(x_new, y_pred, "x", lw=2.0, label="model") plt.plot(x_new, y_new, "o", lw=2.0, label="true") plt.xlabel("x", fontsize=16) plt.ylabel("y", fontsize=16) plt.legend() plt.show()Mean Squared Error (MSE) for N=5: 0.0Visualizing Google Forms Data with Seaborn This is the second part of an article from [Practical Business Python](htp://pbpython.com) describing how to retrieve and analyze data from a Google Form.Please review [part 1](http://pbpython.com/pandas-google-forms-part1.html) for the details of how to set up authentication and get the data into the pandaqs dataframe.The full article corresponding to this notebook is [here](http://pbpython.com/pandas-google-forms-part2.html) Setup Bring in our standard imports as well as the authentication libraries we will need to get access to our form.import gspread from oauth2client.client import SignedJwtAssertionCredentials import pandas as pd import jsonImport Ipython display as well as graphing libraries. For this article, we will be using [seaborn](http://stanford.edu/~mwaskom/software/seaborn/index.html).from IPython.display import display import matplotlib.pyplot as plt import seaborn as sns %matplotlib inlineSetup authentication process to pull in the survey data stored in the Google Sheet.SCOPE = ["https://spreadsheets.google.com/feeds"] SECRETS_FILE = "Pbpython-key.json" SPREADSHEET = "PBPython User Survey (Responses)" # Based on docs here - http://gspread.readthedocs.org/en/latest/oauth2.html # Load in the secret JSON key (must be a service account) json_key = json.load(open(SECRETS_FILE)) # Authenticate using the signed key credentials = SignedJwtAssertionCredentials(json_key['client_email'], json_key['private_key'], SCOPE)Now open up the file and read all data in a DataFramegc = gspread.authorize(credentials) # Open up the workbook based on the spreadsheet name workbook = gc.open(SPREADSHEET) # Get the first sheet sheet = workbook.sheet1 # Extract all data into a dataframe results = pd.DataFrame(sheet.get_all_records()) results.head()We need to do some cleanup to make the data easier to analyze.# Do some minor cleanups on the data # Rename the columns to make it easier to manipulate # The data comes in through a dictionary so we can not assume order stays the # same so must name each column column_names = {'Timestamp': 'timestamp', 'What version of python would you like to see used for the examples on the site?': 'version', 'How useful is the content on practical business python?': 'useful', 'What suggestions do you have for future content?': 'suggestions', 'How frequently do you use the following tools? [Python]': 'freq-py', 'How frequently do you use the following tools? [SQL]': 'freq-sql', 'How frequently do you use the following tools? [R]': 'freq-r', 'How frequently do you use the following tools? [Javascript]': 'freq-js', 'How frequently do you use the following tools? [VBA]': 'freq-vba', 'How frequently do you use the following tools? [Ruby]': 'freq-ruby', 'Which OS do you use most frequently?': 'os', 'Which python distribution do you primarily use?': 'distro', 'How would you like to be notified about new articles on this site?': 'notify' } results.rename(columns=column_names, inplace=True) results.timestamp = pd.to_datetime(results.timestamp) results.head()There are a small number of free form comments. Let's strip those out and remove them from the results.suggestions = results[results.suggestions.str.len() > 0]["suggestions"]Since there are only a small number of comments, just print them out.However, if we had more comments and wanted to do more analysis we certainly good.for index, row in suggestions.iteritems(): display(row)Drop the suggestions. We won't use them any more.results.drop("suggestions", axis=1, inplace=True) results.head()Explore the data For Numeric columns, start with describe to see what we haveresults.describe()Because we only have 1, 2, 3 as options the numeric results aren't telling us that much. I am going to convert the number to more useful descriptions.results['useful'] = results['useful'].map({1: '1-low', 2: '2-medium', 3: '3-high'}) results.head()Value counts give us an easy distribution view into the raw numbersresults["version"].value_counts()Use normalize to see it by percentage.results.os.value_counts(normalize=True)While the numbers are useful, wouldn't it be nicer to visually show the results?Seaborn's [factorplot](http://stanford.edu/~mwaskom/software/seaborn/generated/seaborn.factorplot.html) is helpful for showing this kind of categorical data.Because factorplot is so powerful, I'll build up step by step to show how it can be used for complex data analysis.First, look at number of users by OS.sns.factorplot("os", data=results, palette="BuPu")It is easy to order the results using x_ordersns.factorplot("os", x_order=["Linux", "Windows", "Mac"], data=results, palette="BuPu")Do a similar plot on python versionsns.factorplot("version", data=results, palette="BuPu")This is useful but wouldn't it be better to compare with OS and preferred python version? This is where factorplot starts to show more versatility. The key component is to use hue to automatically slice the data by python version (in this case).sns.factorplot("os", hue="version", x_order=["Linux", "Windows", "Mac"], data=results, palette="Paired")Because seaborn knows how to work with dataframes, we just need to pass in the column names for the various arguments and it will do the analysis and presentation.How about if we try to see if there is any relationship between how useful the site is and OS/Python choice? We can add the useful column into the plot using col.sns.factorplot("version", hue="os", data=results, col="useful", palette="Paired")If we can add a column, we can also add a row and seaborn takes care of the rest.In looking at the data, we have two different versions of winpython so clean that up first.results['distro'] = results['distro'].str.replace('WinPython', 'winpython') results.head()We can also look at the distros. Since there is some overlap with the distros and os, let's only look at a subset of distros. For instance, someone using winpython is not going to be using it on a Mac.results['distro'].value_counts()The most meaningful data would be looking at the Anaconda and Official python.org binaries. Let's filter all of our data only on these two values.results_distro = results[results["distro"].isin(["Anaconda", "Official python.org binaries"])] results_distro.head()Now do our factorplot with multiple columns and rows using row and col.sns.factorplot("version", hue="os", data=results_distro, col="useful", row="distro", margin_titles=True, sharex=False)Responses over time We know that we have 55 results now. It would be interesting to see how those results came in over time. Using this method, we can very simply look at this by any time period we want.The seaborn's [timeseries](http://stanford.edu/~mwaskom/software/seaborn/generated/seaborn.tsplot.html) supports this type of analysis and much more.For ease of calculating responses over time, add a count colum for each response.results["count"] = 1 results.head()To get totals over time, set our index to the timestamptotal_results = results.set_index('timestamp') total_results.head()Use pandas TimeGrouper to summarize the data by day and do a cumulative sum. We could easily do this for any time period too.running_results = total_results.groupby(pd.TimeGrouper('D'))["count"].count().cumsum() running_resultsTo label the x-axis we need to define our time rangestep = pd.Series(range(0,len(running_results)), name="Days") sns.tsplot(running_results, value="Total Responses", time=step, color="husl")Heatmaps and Clustermaps The final section of data to analyze is the frequency with which readers are using different technology. I am going to use a [heatmap](http://stanford.edu/~mwaskom/software/seaborn/generated/seaborn.heatmap.htmlseaborn.heatmap) to look for any interesting insights. Let's look at the data again.results.head() results["freq-py"].value_counts()What we need to do is construct a single DataFrame with all the value_counts for the specific technology.First we will create a list containing each value count.all_counts = [] for tech in ["freq-py", "freq-sql", "freq-r", "freq-ruby", "freq-js", "freq-vba"]: all_counts.append(results[tech].value_counts()) display(all_counts)Now, concat the lists along axis=1.Fill in any nan values with 0 too.tech_usage = pd.concat(all_counts, keys=["Python", "SQL", "R", "Ruby", "javascript", "VBA"], axis=1) tech_usage = tech_usage.fillna(0) tech_usageWe have a nice table but there are a few problems.First, we have one column with blank values that we don't want.Secondly, we would like to order from Daily -> Never. Use reindex to accomplish both tasks.tech_usage = tech_usage.reindex(["Daily", "A couple times a week", "Once a month", "Infrequently", "Never"]) tech_usageNow that the data is in the correct table format, we can create a heatmap.sns.heatmap(tech_usage, annot=True)So, what does this tell us?Not surprisingly, most people use python very frequently.Additionally, it looks like very few survey takers are using Ruby or VBA. A variation of the heatmap is the [clustermap](http://stanford.edu/~mwaskom/software/seaborn/generated/seaborn.clustermap.htmlseaborn.clustermap). The main feature it does is that it tries to reorganize the data to more easily see relationships/clusters.sns.clustermap(tech_usage, annot=True)![](https://raw.githubusercontent.com/dvgodoy/PyTorch101_ODSC_London2019/master/images/pytorch-logo-dark.png) PyTorch 101: Building a Model Step-by-Step Introduction **PyTorch** is the **fastest growing** Deep Learning framework and it is also used by **Fast.ai** in its MOOC, [Deep Learning for Coders](https://course.fast.ai/) and its [library](https://docs.fast.ai/).PyTorch is also very *pythonic*, meaning, it feels more natural to use it if you already are a Python developer.Besides, using PyTorch may even improve your health, according to [](https://twitter.com/karpathy/status/868178954032513024) :-) Motivation There are *many many* PyTorch tutorials around and its documentation is quite complete and extensive. So, **why** should you keep reading this step-by-step tutorial?Well, even though one can find information on pretty much anything PyTorch can do, I missed having a **structured, incremental and from first principles** approach to it.In this tutorial, I will guide you through the *main reasons* why PyTorch makes it much **easier** and more **intuitive** to build a Deep Learning model in Python — **autograd, dynamic computation graph, model classes** and more. Agenda A Simple Problem - Linear Regression PyTorch: tensors, tensors, tensors Gradient Descent in 5 easy steps! Autograd, your companion for all your gradient needs! Dynamic Computation Graph: what is that? Optimizer: learning the parameters step-by-step Loss: aggregating erros into a single value Model: making predictions Dataset DataLoader, splitting your data into mini-batches Evaluation: does it generalize? Saving (and loading) models: taking a break A Simple Problem - Linear Regression Most tutorials start with some nice and pretty *image classification problem* to illustrate how to use PyTorch. It may seem cool, but I believe it **distracts** you from the **main goal: how PyTorch works**?For this reason, in this tutorial, I will stick with a **simple** and **familiar** problem: a **linear regression with a single feature x**! It doesn’t get much simpler than that…$$\large y = a + b x + \epsilon$$We can also think of it as the **simplest neural network**: one node, one input, one output, linear activation function.Adapted from Source Data Generation Let’s start **generating** some synthetic data: we start with a vector of 100 points for our **feature x** and create our **labels** using **a = 1, b = 2** and some Gaussian noise.import numpy as np import matplotlib.pyplot as plt %matplotlib inline plt.style.use('fivethirtyeight') true_a = 1 true_b = 2 N = 100 # Data Generation np.random.seed(42) x = np.random.rand(N, 1) y = true_a + true_b * x + .1 * np.random.randn(N, 1)Train / Validation SplitNext, let’s **split** our synthetic data into **train** and **validation** sets, shuffling the array of indices and using the first 80 shuffled points for training.# Shuffles the indices idx = np.arange(N) np.random.shuffle(idx) # Uses first 80 random indices for train train_idx = idx[:int(N*.8)] # Uses the remaining indices for validation val_idx = idx[int(N*.8):] # Generates train and validation sets x_train, y_train = x[train_idx], y[train_idx] x_val, y_val = x[val_idx], y[val_idx] fig, ax = plt.subplots(1, 2, figsize=(12, 4)) ax[0].scatter(x_train, y_train) ax[0].set_xlabel('x') ax[0].set_ylabel('y') ax[0].set_ylim([1, 3]) ax[0].set_title('Generated Data - Train') ax[1].scatter(x_val, y_val, c='r') ax[1].set_xlabel('x') ax[1].set_ylabel('y') ax[1].set_ylim([1, 3]) ax[1].set_title('Generated Data - Validation')PyTorch: tensors, tensors, tensors!pip install --quiet torchviz import torch import torch.optim as optim import torch.nn as nn from torchviz import make_dotFirst, we need to cover a **few basic concepts** that may throw you off-balance if you don’t grasp them well enough before going full-force on modeling.In Deep Learning, we see **tensors** everywhere. Well, Google’s framework is called *TensorFlow* for a reason! *What is a tensor, anyway*? TensorsIn *Numpy*, you may have an **array** that has **three dimensions**, right? That is, technically speaking, a **tensor**.A **scalar** (a single number) has **zero** dimensions, a **vector has one** dimension, a **matrix has two** dimensions and a **tensor has three or more dimensions**. That’s it!But, to keep things simple, it is commonplace to call vectors and matrices tensors as well — so, from now on, **everything is either a scalar or a tensor**.![alt text](https://raw.githubusercontent.com/dvgodoy/PyTorch101_ODSC_London2019/master/images/linear_dogs.jpg)Tensors are just higher-dimensional matrices :-) [Source](http://karlstratos.com) You can create **tensors** in PyTorch pretty much the same way you create **arrays** in Numpy. Using [**tensor()**](https://pytorch.org/docs/stable/torch.htmltorch.tensor) you can create either a scalar or a tensor.PyTorch's tensors have equivalent functions as its Numpy counterparts, like: [**ones()**](https://pytorch.org/docs/stable/torch.htmltorch.ones), [**zeros()**](https://pytorch.org/docs/stable/torch.htmltorch.zeros), [**rand()**](https://pytorch.org/docs/stable/torch.htmltorch.rand), [**randn()**](https://pytorch.org/docs/stable/torch.htmltorch.randn) and many more.scalar = torch.tensor(3.14159) vector = torch.tensor([1, 2, 3]) matrix = torch.ones((2, 3), dtype=torch.float) tensor = torch.randn((2, 3, 4), dtype=torch.float) print(scalar) print(vector) print(matrix) print(tensor)You can get the shape of a tensor using its [**size()**](https://pytorch.org/docs/stable/tensors.htmltorch.Tensor.size) method or its **shape** attribute.print(tensor.size(), tensor.shape)You can also reshape a tensor using its [**reshape()**](https://pytorch.org/docs/stable/tensors.htmltorch.Tensor.reshape) or [**view()**](https://pytorch.org/docs/stable/tensors.htmltorch.Tensor.view) methods.Beware: these methods create a new tensor with the desired shape that **shares the underlying data** with the original tensor!new_tensor1 = tensor.reshape(2, -1) new_tensor2 = tensor.view(2, -1) print(new_tensor1.shape, new_tensor2.shape)If you want to copy all data for real, that is, duplicate it in memory, you should use either its [**copy_()**](https://pytorch.org/docs/stable/tensors.htmltorch.Tensor.copy_) or [**clone()**](https://pytorch.org/docs/stable/tensors.htmltorch.Tensor.clone) methods. Loading Data, Devices and CUDA ”*How do we go from Numpy’s arrays to PyTorch’s tensors*”, you ask? That’s what [**from_numpy()**](https://pytorch.org/docs/stable/torch.htmltorch.from_numpy) is good for. It returns a **CPU tensor**, though.You can also easily **cast** it to a lower precision (32-bit float) using [**float()**](https://pytorch.org/docs/stable/tensors.htmltorch.Tensor.float).# Our data was in Numpy arrays, but we need to transform them into PyTorch's Tensors x_train_tensor = torch.from_numpy(x_train).float() y_train_tensor = torch.from_numpy(y_train).float() print(type(x_train), type(x_train_tensor))“*But I want to use my fancy GPU…*”, you say.No worries, that’s what [**to()**](https://pytorch.org/docs/stable/tensors.htmltorch.Tensor.to) is good for. It sends your tensor to whatever **device** you specify, including your **GPU** (referred to as `cuda` or `cuda:0`).“*What if I want my code to fallback to CPU if no GPU is available?*”, you may be wondering… PyTorch got your back once more — you can use [**cuda.is_available()**](https://pytorch.org/docs/stable/cuda.html?highlight=is_availabletorch.cuda.is_available) to find out if you have a GPU at your disposal and set your device accordingly.device = 'cuda' if torch.cuda.is_available() else 'cpu' # Our data was in Numpy arrays, but we need to transform them into PyTorch's Tensors x_train_tensor = torch.from_numpy(x_train).float().to(device) y_train_tensor = torch.from_numpy(y_train).float().to(device) print(type(x_train), type(x_train_tensor))If you compare the **types** of both variables, you’ll get what you’d expect: `numpy.ndarray` for the first one and `torch.Tensor` for the second one.But where does your nice tensor “live”? In your CPU or your GPU? You can’t say… but if you use PyTorch’s **type()**, it will reveal its **location** — `torch.cuda.FloatTensor` — a GPU tensor in this case.print(x_train_tensor.type())We can also go the other way around, turning tensors back into Numpy arrays, using [**numpy()**](https://pytorch.org/docs/stable/tensors.html?highlight=numpytorch.Tensor.numpy). It should be easy as `x_train_tensor.numpy()` but…x_train_tensor.numpy()Unfortunately, Numpy **cannot** handle GPU tensors… you need to make them CPU tensors first using [**cpu()**](https://pytorch.org/docs/stable/tensors.htmltorch.Tensor.cpu).x_train_tensor.cpu().numpy()Creating Tensor for Parameters What distinguishes a *tensor* used for *data* — like the ones we’ve just created — from a **tensor** used as a (*trainable*) **parameter/weight**?The latter tensors require the **computation of its gradients**, so we can **update** their values (the parameters’ values, that is). That’s what the **`requires_grad=True`** argument is good for. It tells PyTorch we want it to compute gradients for us.---A tensor for a learnable parameter requires gradient!---You may be tempted to create a simple tensor for a parameter and, later on, send it to your chosen device, as we did with our data, right?Actually, you should **assign** tensors to a **device** at the moment of their **creation** to avoid unexpected behaviors...# We can specify the device at the moment of creation - RECOMMENDED! a = torch.randn(1, requires_grad=True, dtype=torch.float, device=device) b = torch.randn(1, requires_grad=True, dtype=torch.float, device=device) print(a, b)Now that we know how to create tensors that require gradients, let’s see how PyTorch handles them — that’s the role of the… Gradient Descent in 5 easy steps! Gradient descent is the most common **optimization algorithm** in Machine Learning and Deep Learning.The purpose of using gradient descent is **to minimize the loss**, that is, **minimize the errors between predictions and actual values** (and sometimes some other term as well).It goes beyond the scope of this tutorial to fully explain how gradient descent works, but I'll cover the **five basic steps** you'd need to go through to compute it, namely:- Step 0: Random initialize parameters / weights- Step 1: Compute model's predictions - forward pass- Step 2: Compute loss- Step 3: Compute the gradients- Step 4: Update the parameters- Step 5: Rinse and repeat!---If you want to learn more about gradient descent, check the following resources:- [**Linear Regression Simulator**](https://www.mladdict.com/linear-regression-simulator), which goes through the very same steps listed here- [**A Visual and Interactive Guide to the Basics of Neuran Networks**](http://jalammar.github.io/visual-interactive-guide-basics-neural-networks/)- [**Gradient Descent Algorithms and Its Variants**](https://towardsdatascience.com/gradient-descent-algorithm-and-its-variants-10f652806a3)--- Step 0: InitializationTechnically, this step is not part of gradient descent, but it is an important step nonetheless.For training a model, you need to **randomly initialize the parameters/weights** (we have only two, **a** and **b**).Make sure to *always initialize your random seed* to ensure **reproducibility** of your results. As usual, the random seed is [42](https://en.wikipedia.org/wiki/Phrases_from_The_Hitchhiker%27s_Guide_to_the_GalaxyAnswer_to_the_Ultimate_Question_of_Life,_the_Universe,_and_Everything_(42)), the *least random* of all random seeds one could possibly choose :-)**BTW: we are back to Numpy for a little while!**np.random.seed(42) a = np.random.randn(1) b = np.random.randn(1) print(a, b)Step 1: Compute Model's PredictionsThis is the **forward pass** - it simply *computes the model's predictions using the current values of the parameters/weights*. At the very beginning, we will be producing really bad predictions, as we started with random values from Step 0.# Computes our model's predicted output yhat = a + b * x_trainStep 2: Compute Loss There is a subtle but fundamental difference between **error** and **loss**. The **error** is the difference between **actual** and **predicted** computed for a single data point.$$\Large error_i = y_i - \hat{y_i}$$The **loss**, on the other hand, is some sort of **aggregation of errors for a set of data points**.For a regression problem, the **loss** is given by the **Mean Square Error (MSE)**, that is, the average of all squared differences between **actual values** (y) and **predictions** (a + bx).$$\large MSE = \frac{1}{N} \sum_{i=1}^N{error_i}^2$$$$\large MSE = \frac{1}{N} \sum_{i=1}^N{(y_i - \hat{y_i})}^2$$$$\large MSE = \frac{1}{N} \sum_{i=1}^N{(y_i - a - b x_i)}^2$$---It is worth mentioning that, if we **compute the loss** using:- **all points** in the training set (N), we are performing a **batch** gradient descent- a **single point** at each time, it would be a **stochastic** gradient descent- anything else (n) **in-between 1 and N** characterizes a **mini-batch** gradient descent---Source# How wrong is our model? That's the error! error = (y_train - yhat) # It is a regression, so it computes mean squared error (MSE) loss = (error ** 2).mean() print(loss)Step 3: Compute the Gradients A **gradient** is a **partial derivative** — *why partial*? Because one computes it with respect to (w.r.t.) a **single parameter**. We have two parameters, **a** and **b**, so we must compute two partial derivatives.A **derivative** tells you *how much* **a given quantity changes** when you *slightly* vary some **other quantity**. In our case, how much does our **MSE** **loss** change when we vary **each one of our two parameters**?The *right-most* part of the equations below is what you usually see in implementations of gradient descent for a simple linear regression. In the **intermediate step**, I show you **all elements** that pop-up from the application of the [chain rule](https://en.wikipedia.org/wiki/Chain_rule), so you know how the final expression came to be.---Gradient = how much the LOSS changes if ONE parameter changes a little bit!--- **Gradients**:$$\large \frac{\partial{MSE}}{\partial{a}} = \frac{\partial{MSE}}{\partial{\hat{y_i}}} \cdot \frac{\partial{\hat{y_i}}}{\partial{a}} = \frac{1}{N} \sum_{i=1}^N{2(y_i - a - b x_i) \cdot (-1)} = -2 \frac{1}{N} \sum_{i=1}^N{(y_i - \hat{y_i})}$$ $$\large \frac{\partial{MSE}}{\partial{b}} = \frac{\partial{MSE}}{\partial{\hat{y_i}}} \cdot \frac{\partial{\hat{y_i}}}{\partial{b}} = \frac{1}{N} \sum_{i=1}^N{2(y_i - a - b x_i) \cdot (-x_i)} = -2 \frac{1}{N} \sum_{i=1}^N{x_i (y_i - \hat{y_i})}$$# Computes gradients for both "a" and "b" parameters a_grad = -2 * error.mean() b_grad = -2 * (x_train * error).mean() print(a_grad, b_grad)Step 4: Update the Parameters In the final step, we **use the gradients to update** the parameters. Since we are trying to **minimize** our **losses**, we **reverse the sign** of the gradient for the update.There is still another parameter to consider: the **learning rate**, denoted by the *Greek letter* **eta** (that looks like the letter **n**), which is the **multiplicative factor** that we need to apply to the gradient for the parameter update. **Parameters**:$$\large a = a - \eta \frac{\partial{MSE}}{\partial{a}}$$$$\large b = b - \eta \frac{\partial{MSE}}{\partial{b}}$$ Let's start with a value of **0.1** (which is a relatively *big value*, as far as learning rates are concerned!).# Sets learning rate lr = 1e-1 print(a, b) # Updates parameters using gradients and the learning rate a = a - lr * a_grad b = b - lr * b_grad print(a, b)---"Choose your learning rate wisely..."The learning rate is the single most important hyper-parameter to tune when you are using Deep Learning models!What happens if I choose the learning rate **poorly**? Your model may **take too long to train** or **get stuck with a high loss** or, even worse, **diverge into an exploding loss**!Source--- Playing with Learning RatesLet's work through **an interactive example**!We start at a (not so) **random initial value** of our **feature**, say, -1.5. It has a corresponding **loss** of 2.25.You can choose between **two functions**:- **convex**, meaning, its **loss is well-behaved** and **gradient descent is guaranteed to converge**- **non-convex**, meaning, **all bets are off**!Every time you **take a step**, the plot gets updated:- The **red vector** is our update to the **weight**, that is, **learning rate times gradient**.- The **gray vecto**r shows **how much the cost changes** given our update.- If you divide their lengths, **gray over red**, it will give you the **approximate gradient**.# Downloads a script into Colab !curl https://raw.githubusercontent.com/dvgodoy/PyTorch101_ODSC_London2019/master/gradient_descent.py --output gradient_descent.py from plotly.offline import iplot, init_notebook_mode from ipywidgets import VBox, IntSlider, FloatSlider, Dropdown from gradient_descent import * init_notebook_mode(connected=False) w0 = FloatSlider(description='Start', value=-1.5, min=-2, max=2, step=.05) functype = Dropdown(description='Function', options=['Convex', 'Non-Convex'], value='Convex') lrate = FloatSlider(description='Learning Rate', value=.05, min=.05, max=1.1, step=.05) n_steps = IntSlider(description='# updates', value=10, min=10, max=20, step=1) configure_plotly_browser_state() VBox((w0, functype, lrate, n_steps)) configure_plotly_browser_state() fig = build_fig(functype.value, lrate.value, w0.value, n_steps.value) iplot(fig)Step 5: Rinse and Repeat! Now we use the **updated parameters** to go back to **Step 1** and restart the process.Repeating this process over and over, for **many epochs**, is, in a nutshell, **training** a model.---An **epoch** is complete whenever **every point has been already used once for computing the loss**: - **batch** gradient descent: this is trivial, as it uses all points for computing the loss — **one epoch** is the same as **one update**- **stochastic** gradient descent: **one epoch** means **N updates**- **mini-batch** (of size n): **one epoch** has **N/n updates**---Let's put the previous pieces of code together and loop over many epochs:# Defines number of epochs n_epochs = 1000 # Step 0 np.random.seed(42) a = np.random.randn(1) b = np.random.randn(1) for epoch in range(n_epochs): # Step 1 # Computes our model's predicted output yhat = a + b * x_train # Step 2 # How wrong is our model? That's the error! error = (y_train - yhat) # It is a regression, so it computes mean squared error (MSE) loss = (error ** 2).mean() # Step 3 # Computes gradients for both "a" and "b" parameters a_grad = -2 * error.mean() b_grad = -2 * (x_train * error).mean() # Step 4 # Updates parameters using gradients and the learning rate a -= lr * a_grad b -= lr * b_grad print(a, b)Just keep in mind that, if you **don’t** use batch gradient descent (our example does),you’ll have to write an **inner loop** to perform the **five training steps** for either each **individual point** (**stochastic**) or **n points** (**mini-batch**). We’ll see a mini-batch example later down the line. Sanity Check Just to make sure we haven’t done any mistakes in our code, we can use *Scikit-Learn’s Linear Regression* to fit the model and compare the coefficients.# Sanity Check: do we get the same results as our gradient descent? from sklearn.linear_model import LinearRegression linr = LinearRegression() linr.fit(x_train, y_train) print(linr.intercept_, linr.coef_[0])They **match** up to 6 decimal places — we have a *fully working implementation of linear regression* using Numpy.**Numpy?! Wait a minute… I thought this tutorial was about PyTorch!**Yes, it is, but this served **two purposes**: *first*, to introduce the **structure** of our task, which will remain largely the same and, *second*, to show you the main **pain points** so you can fully appreciate how much PyTorch makes your life easier :-)Numpy?! TORCH IT! Autograd, your companion for all your gradient needs! Autograd is PyTorch’s *automatic differentiation package*. Thanks to it, we **don’t need to worry** about partial derivatives, chain rule or anything like it.Computing gradients manually?! No way! Backward! backward So, how do we tell PyTorch to do its thing and **compute all gradients**? That’s what [**backward()**](https://pytorch.org/docs/stable/autograd.htmltorch.autograd.backward) is good for.Do you remember the **starting point** for **computing the gradients**? It was the **loss**, as we computed its partial derivatives w.r.t. our parameters. Hence, we need to invoke the `backward()` method from the corresponding Python variable, like, `loss.backward()`.# Step 0 torch.manual_seed(42) a = torch.randn(1, requires_grad=True, dtype=torch.float, device=device) b = torch.randn(1, requires_grad=True, dtype=torch.float, device=device) # Step 1 # Computes our model's predicted output yhat = a + b * x_train_tensor # Step 2 # How wrong is our model? That's the error! error = (y_train_tensor - yhat) # It is a regression, so it computes mean squared error (MSE) loss = (error ** 2).mean() # Step 3 # No more manual computation of gradients! loss.backward() # Computes gradients for both "a" and "b" parameters # a_grad = -2 * error.mean() # b_grad = -2 * (x_train_tensor * error).mean()grad / zero_ What about the **actual values** of the **gradients**? We can inspect them by looking at the [**grad**](https://pytorch.org/docs/stable/autograd.htmltorch.Tensor.grad) **attribute** of a tensor.print(a.grad, b.grad)If you check the method’s documentation, it clearly states that **gradients are accumulated**. You can check this out by running the two code cells above again.So, every time we use the **gradients** to **update** the parameters, we need to **zero the gradients afterwards**. And that’s what [**zero_()**](https://pytorch.org/docs/stable/tensors.htmltorch.Tensor.zero_) is good for.---*In PyTorch, every method that **ends** with an **underscore (_)** makes changes **in-place**, meaning, they will **modify** the underlying variable.*---a.grad.zero_(), b.grad.zero_()So, let’s **ditch** the **manual computation of gradients** and use both `backward()` and `zero_()` methods instead.And, we are still missing **Step 4**, that is, **updating the parameters**. Let's include it as well...# Step 0 torch.manual_seed(42) a = torch.randn(1, requires_grad=True, dtype=torch.float, device=device) b = torch.randn(1, requires_grad=True, dtype=torch.float, device=device) # Step 1 # Computes our model's predicted output yhat = a + b * x_train_tensor # Step 2 # How wrong is our model? That's the error! error = (y_train_tensor - yhat) # It is a regression, so it computes mean squared error (MSE) loss = (error ** 2).mean() # Step 3 # No more manual computation of gradients! loss.backward() # Computes gradients for both "a" and "b" parameters # a_grad = -2 * error.mean() # b_grad = -2 * (x_train_tensor * error).mean() print(a.grad, b.grad) # Step 4 # Updates parameters using gradients and the learning rate with torch.no_grad(): # what is that?! a -= lr * a.grad b -= lr * b.grad # PyTorch is "clingy" to its computed gradients, we need to tell it to let it go... a.grad.zero_() b.grad.zero_() print(a.grad, b.grad)no_grad "One does not simply update parameters without no_grad"Why do we need to use [**no_grad()**](https://pytorch.org/docs/stable/autograd.htmltorch.autograd.no_grad) to **update the parameters**?The culprit is PyTorch’s ability to build a **dynamic computation graph** from every **Python operation** that involves any **gradient-computing tensor** or its **dependencies**.---**What is a dynamic computation graph?**Don't worry, we’ll go deeper into the inner workings of the dynamic computation graph in the next section.---So, how do we tell PyTorch to “**back off**” and let us **update our parameters** without messing up with its **fancy dynamic computation graph**? That is the purpose of **no_grad()**: it allows us to **perform regular Python operations on tensors, independent of PyTorch’s computation graph**.lr = 1e-1 n_epochs = 1000 # Step 0 torch.manual_seed(42) a = torch.randn(1, requires_grad=True, dtype=torch.float, device=device) b = torch.randn(1, requires_grad=True, dtype=torch.float, device=device) for epoch in range(n_epochs): # Step 1 # Computes our model's predicted output yhat = a + b * x_train_tensor # Step 2 # How wrong is our model? That's the error! error = (y_train_tensor - yhat) # It is a regression, so it computes mean squared error (MSE) loss = (error ** 2).mean() # Step 3 # No more manual computation of gradients! loss.backward() # Step 4 # Updates parameters using gradients and the learning rate with torch.no_grad(): a -= lr * a.grad b -= lr * b.grad # PyTorch is "clingy" to its computed gradients, we need to tell it to let it go... a.grad.zero_() b.grad.zero_() print(a, b)Finally, we managed to successfully run our model and get the **resulting parameters**. Surely enough, they **match** the ones we got in our *Numpy*-only implementation.Let's take a look at the **loss** at the end of the training...lossWhat if we wanted to have it as a *Numpy* array? I guess we could just use **numpy()** again, right? (and **cpu()** as well, since our *loss* is in the `cuda` device...loss.cpu().numpy()What happened here? Unlike our *data tensors*, the **loss tensor** is actually computing gradients - and in order to use **numpy**, we need to [**detach()**](https://pytorch.org/docs/stable/tensors.htmltorch.Tensor.detach_) that tensor from the computation graph first:loss.detach().cpu().numpy()This seems like **a lot of work**, there must be an easier way! And there is one indeed: we can use [**item()**](https://pytorch.org/docs/stable/tensors.htmltorch.Tensor.item), for **tensors with a single element** or [**tolist()**](https://pytorch.org/docs/stable/tensors.htmltorch.Tensor.tolist) otherwise.print(loss.item(), loss.tolist())Dynamic Computation Graph: what is that? "No one can be told what the dynamic computation graph is - you have to see it for yourself"Jokes aside, I want **you** to **see the graph for yourself** too!The [PyTorchViz](https://github.com/szagoruyko/pytorchviz) package and its `make_dot(variable)` method allows us to easily visualize a graph associated with a given Python variable.So, let’s stick with the **bare minimum**: two (gradient computing) **tensors** for our parameters, predictions, errors and loss.torch.manual_seed(42) a = torch.randn(1, requires_grad=True, dtype=torch.float, device=device) b = torch.randn(1, requires_grad=True, dtype=torch.float, device=device) yhat = a + b * x_train_tensor error = y_train_tensor - yhat loss = (error ** 2).mean()Now let's plot the **computation graph** for the **yhat** variable.make_dot(yhat)Let’s take a closer look at its components:* **blue boxes**: these correspond to the **tensors** we use as **parameters**, the ones we’re asking PyTorch to **compute gradients** for;* **gray box**: a **Python operation** that involves a **gradient-computing tensor or its dependencies**;* **green box**: the same as the gray box, except it is the **starting point for the computation** of gradients (assuming the `**backward()**` method is called from the **variable used to visualize** the graph)— they are computed from the **bottom-up** in a graph.Now, take a closer look at the **green box**: there are **two arrows** pointing to it, since it is **adding up two variables**, `a` and `b*x`. Seems obvious, right?Then, look at the **gray box** of the same graph: it is performing a **multiplication**, namely, `b*x`. But there is only **one arrow** pointing to it! The arrow comes from the **blue box** that corresponds to our parameter `b`.Why don’t we have a box for our **data x**? The answer is: **we do not compute gradients for it**! So, even though there are *more* tensors involved in the operations performed by the computation graph, it **only** shows **gradient-computing tensors and its dependencies**. Try using the `make_dot` method to plot the **computation graph** of other variables, like `error` or `loss`.The **only difference** between them and the first one is the number of **intermediate steps (gray boxes)**.make_dot(loss)What would happen to the computation graph if we set **`requires_grad`** to **`False`** for our parameter **`a`**?a_nograd = torch.randn(1, requires_grad=False, dtype=torch.float, device=device) b = torch.randn(1, requires_grad=True, dtype=torch.float, device=device) yhat = a_nograd + b * x_train_tensor make_dot(yhat)Unsurprisingly, the **blue box** corresponding to the **parameter a** is no more! Simple enough: **no gradients, no graph**.The **best thing** about the *dynamic computing graph* is the fact that you can make it **as complex as you want it**. You can even use *control flow statements* (e.g., if statements) to **control the flow of the gradients** (obviously!) :-)Let's build a nonsensical, yet complex, computation graph just to make a point!yhat = a + b * x_train_tensor error = y_train_tensor - yhat loss = (error ** 2).mean() if loss > 0: yhat2 = b * x_train_tensor error2 = y_train_tensor - yhat2 loss += error2.mean() make_dot(loss)Optimizer: learning the parameters step-by-step So far, we’ve been **manually** updating the parameters using the computed gradients. That’s probably fine for *two parameters*… but what if we had a **whole lot of them**?! We use one of PyTorch’s **optimizers**, like [SGD](https://pytorch.org/docs/stable/optim.htmltorch.optim.SGD) or [Adam](https://pytorch.org/docs/stable/optim.htmltorch.optim.Adam).---There are **many** optimizers, **SGD** is the most basic of them and **Adam** is one of the most popular. They achieve the same goal through, literally, **different paths**.Source---In the code below, we create a *Stochastic Gradient Descent* (SGD) optimizer to update our parameters **a** and **b**.---Don’t be fooled by the **optimizer’s** name: if we use **all training data** at once for the update — as we are actually doing in the code — the optimizer is performing a **batch** gradient descent, despite of its name.---# Our parameters torch.manual_seed(42) a = torch.randn(1, requires_grad=True, dtype=torch.float, device=device) b = torch.randn(1, requires_grad=True, dtype=torch.float, device=device) # Learning rate lr = 1e-1 # Defines a SGD optimizer to update the parameters optimizer = optim.SGD([a, b], lr=lr)step / zero_grad An optimizer takes the **parameters** we want to update, the **learning rate** we want to use (and possibly many other hyper-parameters as well!) and **performs the updates** through its [**`step()`**](https://pytorch.org/docs/stable/optim.htmltorch.optim.Optimizer.step) method.Besides, we also don’t need to zero the gradients one by one anymore. We just invoke the optimizer’s [**`zero_grad()`**](https://pytorch.org/docs/stable/optim.htmltorch.optim.Optimizer.zero_grad) method and that’s it!n_epochs = 1000 for epoch in range(n_epochs): # Step 1 yhat = a + b * x_train_tensor # Step 2 error = y_train_tensor - yhat loss = (error ** 2).mean() # Step 3 loss.backward() # Step 4 # No more manual update! # with torch.no_grad(): # a -= lr * a.grad # b -= lr * b.grad optimizer.step() # No more telling PyTorch to let gradients go! # a.grad.zero_() # b.grad.zero_() optimizer.zero_grad() print(a, b)Cool! We’ve *optimized* the **optimization** process :-) What’s left? Loss: aggregating erros into a single value We now tackle the **loss computation**. As expected, PyTorch got us covered once again. There are many [loss functions](https://pytorch.org/docs/stable/nn.htmlloss-functions) to choose from, depending on the task at hand. Since ours is a regression, we are using the [Mean Square Error (MSE)](https://pytorch.org/docs/stable/nn.htmltorch.nn.MSELoss) loss.---Notice that `nn.MSELoss` actually **creates a loss function** for us — **it is NOT the loss function itself**. Moreover, you can specify a **reduction method** to be applied, that is, **how do you want to aggregate the results for individual points** — you can average them (reduction=’mean’) or simply sum them up (reduction=’sum’).---# Defines a MSE loss function loss_fn = nn.MSELoss(reduction='mean') loss_fn fake_labels = torch.tensor([1., 2., 3.]) fake_preds = torch.tensor([1., 3., 5.]) loss_fn(fake_labels, fake_preds)We then **use** the created loss function to compute the loss given our **predictions** and our **labels**.torch.manual_seed(42) a = torch.randn(1, requires_grad=True, dtype=torch.float, device=device) b = torch.randn(1, requires_grad=True, dtype=torch.float, device=device) lr = 1e-1 n_epochs = 1000 # Defines a MSE loss function loss_fn = nn.MSELoss(reduction='mean') optimizer = optim.SGD([a, b], lr=lr) for epoch in range(n_epochs): # Step 1 yhat = a + b * x_train_tensor # Step 2 # No more manual loss! # error = y_tensor - yhat # loss = (error ** 2).mean() loss = loss_fn(y_train_tensor, yhat) # Step 3 loss.backward() # Step 4 optimizer.step() optimizer.zero_grad() print(a, b)At this point, there’s only one piece of code left to change: the **predictions**. It is then time to introduce PyTorch’s way of implementing a… Model: making predictions In PyTorch, a **model** is represented by a regular **Python class** that inherits from the [**Module**](https://pytorch.org/docs/stable/nn.htmltorch.nn.Module) class.The most fundamental methods it needs to implement are:* **`__init__(self)`**: **it defines the parts that make up the model** —in our case, two parameters, **a** and **b**.* **`forward(self, x)`**: it performs the **actual computation**, that is, it **outputs a prediction**, given the input **x**.Let’s build a proper (yet simple) model for our regression task. It should look like this:class ManualLinearRegression(nn.Module): def __init__(self): super().__init__() a = torch.randn(1, requires_grad=True, dtype=torch.float) b = torch.randn(1, requires_grad=True, dtype=torch.float) # To make "a" and "b" real parameters of the model, we need to wrap them with nn.Parameter self.a = nn.Parameter(a) self.b = nn.Parameter(b) def forward(self, x): # Computes the outputs / predictions return self.a + self.b * xParameter In the **\__init__** method, we define our **two parameters**, **a** and **b**, using the [**Parameter()**](https://pytorch.org/docs/stable/nn.htmltorch.nn.Parameter) class, to tell PyTorch these **tensors should be considered parameters of the model they are an attribute of**.Why should we care about that? By doing so, we can use our model’s [**parameters()**](https://pytorch.org/docs/stable/nn.htmltorch.nn.Module.parameters) method to retrieve **an iterator over all model’s parameters**, even those parameters of **nested models**, that we can use to feed our optimizer (instead of building a list of parameters ourselves!).dummy = ManualLinearRegression() list(dummy.parameters())Moreover, we can get the **current values for all parameters** using our model’s [**state_dict()**](https://pytorch.org/docs/stable/nn.htmltorch.nn.Module.state_dict) method.dummy.state_dict()state_dict The **state_dict()** of a given model is simply a Python dictionary that **maps each layer / parameter to its corresponding tensor**. But only **learnable** parameters are included, as its purpose is to keep track of parameters that are going to be updated by the **optimizer**.The **optimizer** itself also has a **state_dict()**, which contains its internal state, as well as the hyperparameters used.---It turns out **state_dicts** can also be used for **checkpointing** a model, as we will see later down the line.---optimizer.state_dict()Device **IMPORTANT**: we need to **send our model to the same device where the data is**. If our data is made of GPU tensors, our model must “live” inside the GPU as well.torch.manual_seed(42) device = 'cuda' if torch.cuda.is_available() else 'cpu' # Now we can create a model and send it at once to the device model = ManualLinearRegression().to(device) # We can also inspect its parameters using its state_dict print(model.state_dict())Forward Pass The **forward pass** is the moment when the model **makes predictions**.---You should **NOT call the `forward(x)`** method, though. You should **call the whole model itself**, as in **`model(x)`** to perform a forward pass and output predictions.---yhat = model(x_train_tensor)train "What does train() do? It only sets the mode!"In PyTorch, models have a [**train()**](https://pytorch.org/docs/stable/nn.htmltorch.nn.Module.train) method which, somewhat disappointingly, **does NOT perform a training step**. Its only purpose is to **set the model to training mode**. Why is this important? Some models may use mechanisms like [**Dropout**](https://pytorch.org/docs/stable/nn.htmltorch.nn.Dropout), for instance, which have **distinct behaviors in training and evaluation phases**.lr = 1e-1 n_epochs = 1000 loss_fn = nn.MSELoss(reduction='mean') # Now the optimizers uses the parameters from the model optimizer = optim.SGD(model.parameters(), lr=lr) for epoch in range(n_epochs): # Sets model to training mode model.train() # Step 1 # No more manual prediction! # yhat = a + b * x_tensor yhat = model(x_train_tensor) # Step 2 loss = loss_fn(yhat, y_train_tensor) # Step 3 loss.backward() # Step 4 optimizer.step() optimizer.zero_grad() print(model.state_dict())Now, the printed statements will look like this — final values for parameters **a** and **b** are still the same, so everything is ok :-) Nested Models In our model, we manually created two parameters to perform a linear regression. ---You are **not** limited to defining parameters, though… **models can contain other models as its attributes** as well, so you can easily nest them. We’ll see an example of this shortly as well.---Let’s use PyTorch’s [**Linear**](https://pytorch.org/docs/stable/nn.htmltorch.nn.Linear) model as an attribute of our own, thus creating a nested model.Even though this clearly is a contrived example, as we are pretty much wrapping the underlying model without adding anything useful (or, at all!) to it, it illustrates well the concept.In the **`__init__`** method, we created an attribute that contains our **nested `Linear` model**.In the **`forward()`** method, we **call the nested model itself** to perform the forward pass (notice, we are **not** calling `self.linear.forward(x)`!).class LayerLinearRegression(nn.Module): def __init__(self): super().__init__() # Instead of our custom parameters, we use a Linear layer with single input and single output self.linear = nn.Linear(1, 1) def forward(self, x): # Now it only takes a call to the layer to make predictions return self.linear(x)Now, if we call the **parameters()** method of this model, **PyTorch will figure the parameters of its attributes in a recursive way**.You can also add new `Linear` attributes and, even if you don’t use them at all in the forward pass, they will **still** be listed under `parameters()`.dummy = LayerLinearRegression() list(dummy.parameters()) dummy.state_dict()LayersA **Linear** model can be seen as a **layer** in a neural network.SourceIn the example above, the **hidden layer** would be `nn.Linear(3, 4)` and the **output layer** would be `nn.Linear(4, 1)`.There are **MANY** different layers that can be uses in PyTorch:- [Convolution Layers](https://pytorch.org/docs/stable/nn.htmlconvolution-layers)- [Pooling Layers](https://pytorch.org/docs/stable/nn.htmlpooling-layers)- [Padding Layers](https://pytorch.org/docs/stable/nn.htmlpadding-layers)- [Non-linear Activations](https://pytorch.org/docs/stable/nn.htmlnon-linear-activations-weighted-sum-nonlinearity)- [Normalization Layers](https://pytorch.org/docs/stable/nn.htmlnormalization-layers)- [Recurrent Layers](https://pytorch.org/docs/stable/nn.htmlrecurrent-layers)- [Transformer Layers](https://pytorch.org/docs/stable/nn.htmltransformer-layers)- [Linear Layers](https://pytorch.org/docs/stable/nn.htmllinear-layers)- [Dropout Layers](https://pytorch.org/docs/stable/nn.htmldropout-layers)- [Sparse Layers (embbedings)](https://pytorch.org/docs/stable/nn.htmlsparse-layers)- [Vision Layers](https://pytorch.org/docs/stable/nn.htmlvision-layers)- [DataParallel Layers (multi-GPU)](https://pytorch.org/docs/stable/nn.htmldataparallel-layers-multi-gpu-distributed)- [Flatten Layer](https://pytorch.org/docs/stable/nn.htmlflatten)We have just used a **Linear** layer. Sequential Models Run-of-the-mill layers? Sequential model!Our model was simple enough… You may be thinking: “*why even bother to build a class for it?!*” Well, you have a point…For **straightforward models**, that use **run-of-the-mill layers**, where the output of a layer is sequentially fed as an input to the next, we can use a, er… [**Sequential**](https://pytorch.org/docs/stable/nn.htmltorch.nn.Sequential) model :-)In our case, we would build a Sequential model with a single argument, that is, the Linear layer we used to train our linear regression. The model would look like this:model = nn.Sequential(nn.Linear(1, 1)).to(device)Simple enough, right? Training Step So far, we’ve defined:* an **optimizer*** a **loss function*** a **model**Scroll up a bit and take a quick look at the code inside the loop. Would it **change** if we were using a **different optimizer**, or **loss**, or even **model**? If not, how can we make it more generic?Well, I guess we could say all these lines of code **perform a training step**, given those **three elements** (optimizer, loss and model),the **features** and the **labels**.So, how about **writing a function that takes those three elements** and **returns another function that performs a training step**, taking a set of features and labels as arguments and returning the corresponding loss?def make_train_step(model, loss_fn, optimizer): # Builds function that performs a step in the train loop def train_step(x, y): # Sets model to TRAIN mode model.train() # Step 1: Makes predictions yhat = model(x) # Step 2: Computes loss loss = loss_fn(yhat, y) # Step 3: Computes gradients loss.backward() # Step 4: Updates parameters and zeroes gradients optimizer.step() optimizer.zero_grad() # Returns the loss return loss.item() # Returns the function that will be called inside the train loop return train_stepThen we can use this general-purpose function to build a **train_step()** function to be called inside our training loop.lr = 1e-1 # Create a MODEL, a LOSS FUNCTION and an OPTIMIZER model = nn.Sequential(nn.Linear(1, 1)).to(device) loss_fn = nn.MSELoss(reduction='mean') optimizer = optim.SGD(model.parameters(), lr=lr) # Creates the train_step function for our model, loss function and optimizer train_step = make_train_step(model, loss_fn, optimizer) train_stepNow our code should look like this… see how **tiny** the training loop is now?n_epochs = 1000 losses = [] # For each epoch... for epoch in range(n_epochs): # Performs one train step and returns the corresponding loss loss = train_step(x_train_tensor, y_train_tensor) losses.append(loss) # Checks model's parameters print(model.state_dict()) plt.plot(losses[:200]) plt.xlabel('Epochs') plt.ylabel('Loss') plt.yscale('log')Let’s give our training loop a rest and focus on our **data** for a while… so far, we’ve simply used our *Numpy arrays* turned **PyTorch tensors**. But we can do better, we can build a… Dataset In PyTorch, a **dataset** is represented by a regular **Python class** that inherits from the [**Dataset**](https://pytorch.org/docs/stable/data.htmltorch.utils.data.Dataset) class. You can think of it as a kind of a Python **list of tuples**, each tuple corresponding to **one point (features, label)**.The most fundamental methods it needs to implement are:* **`__init__(self)`**: it takes **whatever arguments** needed to build a **list of tuples** — it may be the name of a CSV file that will be loaded and processed; it may be two tensors, one for features, another one for labels; or anything else, depending on the task at hand.* **`__get_item__(self, index)`**: it allows the dataset to be **indexed**, so it can work like a list (`dataset[i]`) — it must **return a tuple (features, label)** corresponding to the requested data point. We can either return the **corresponding slices** of our **pre-loaded** dataset or tensors or, as mentioned above, **load them on demand** (like in this [example](https://pytorch.org/tutorials/beginner/data_loading_tutorial.htmldataset-class)).* **`__len__(self)`**: it should simply return the **size** of the whole dataset so, whenever it is sampled, its indexing is limited to the actual size.---There is **no need to load the whole dataset in the constructor method** (`__init__`). If your **dataset is big** (tens of thousands of image files, for instance), loading it at once would not be memory efficient. It is recommended to **load them on demand** (whenever `__get_item__` is called).---Let’s build a simple custom dataset that takes two tensors as arguments: one for the features, one for the labels. For any given index, our dataset class will return the corresponding slice of each of those tensors. It should look like this:from torch.utils.data import Dataset class CustomDataset(Dataset): def __init__(self, x_tensor, y_tensor): self.x = x_tensor self.y = y_tensor def __getitem__(self, index): return (self.x[index], self.y[index]) def __len__(self): return len(self.x) # Wait, is this a CPU tensor now? Why? Where is .to(device)? x_train_tensor = torch.from_numpy(x_train).float() y_train_tensor = torch.from_numpy(y_train).float() train_data = CustomDataset(x_train_tensor, y_train_tensor) print(train_data[0])---Did you notice we built our **training tensors** out of Numpy arrays but we **did not send them to a device**? So, they are **CPU** tensors now! **Why**?We **don’t want our whole training data to be loaded into GPU tensors**, as we have been doing in our example so far, because **it takes up space** in our precious **graphics card’s RAM**.--- TensorDataset Besides, you may be thinking “*why go through all this trouble to wrap a couple of tensors in a class?*”. And, once again, you do have a point… if a dataset is nothing else but a **couple of tensors**, we can use PyTorch’s [**TensorDataset**](https://pytorch.org/docs/stable/data.htmltorch.utils.data.TensorDataset) class, which will do pretty much what we did in our custom dataset above.from torch.utils.data import TensorDataset train_data = TensorDataset(x_train_tensor, y_train_tensor) print(train_data[0])OK, fine, but then again, **why** are we building a dataset anyway? We’re doing it because we want to use a… DataLoader, splitting your data into mini-batches - Let's split data into mini-batches- Use DataLoaders!Until now, we have used the **whole training data** at every training step. It has been **batch gradient descent** all along. This is fine for our *ridiculously small dataset*, sure, but if we want to go serious about all this, we **must use mini-batch** gradient descent. Thus, we need mini-batches. Thus, we need to **slice** our dataset accordingly. Do you want to do it *manually*?! Me neither!So we use PyTorch’s [**DataLoader**](https://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) class for this job. We tell it which **dataset** to use (the one we just built in the previous section), the desired **mini-batch size** and if we’d like to **shuffle** it or not. That’s it!Our **loader** will behave like an **iterator**, so we can **loop over it** and **fetch a different mini-batch** every time.from torch.utils.data import DataLoader train_loader = DataLoader(dataset=train_data, batch_size=16, shuffle=True)To retrieve a sample mini-batch, one can simply run the command below — it will return a list containing two tensors, one for the features, another one for the labels.next(iter(train_loader))How does this change our training loop? Let’s check it out!lr = 1e-1 # Create a MODEL, a LOSS FUNCTION and an OPTIMIZER model = nn.Sequential(nn.Linear(1, 1)).to(device) loss_fn = nn.MSELoss(reduction='mean') optimizer = optim.SGD(model.parameters(), lr=lr) # Creates the train_step function for our model, loss function and optimizer train_step = make_train_step(model, loss_fn, optimizer) n_epochs = 1000 losses = [] for epoch in range(n_epochs): # inner loop for x_batch, y_batch in train_loader: # the dataset "lives" in the CPU, so do our mini-batches # therefore, we need to send those mini-batches to the # device where the model "lives" x_batch = x_batch.to(device) y_batch = y_batch.to(device) loss = train_step(x_batch, y_batch) losses.append(loss) print(model.state_dict()) plt.plot(losses) plt.xlabel('Epochs (?)') plt.ylabel('Loss') plt.yscale('log')Did you notice it is taking **longer** to train now? Can you guess **why**?Two things are different now: not only we have an **inner loop** to load each and every **mini-batch** from our **DataLoader** but, more importantly, we are now **sending only one mini-batch to the device**.---For bigger datasets, **loading data sample by sample** (into a **CPU** tensor) using **Dataset’s \__get_item__** and then **sending all samples** that belong to the **same mini-batch at once to your GPU** (device) is the way to go in order to make the **best use of your graphics card’s RAM**.Moreover, if you have **many GPUs** to train your model on, it is best to keep your dataset “agnostic” and assign the batches to different GPUs during training.---So far, we’ve focused on the **training data** only. We built a *dataset* and a *data loader* for it. We could do the same for the **validation** data, using the **split** we performed at the beginning of this post… or we could use **random_split** instead. random_split - How did you split your data?- I didn't...- WHAT?PyTorch’s [**random_split()**](https://pytorch.org/docs/stable/data.htmltorch.utils.data.random_split) method is an easy and familiar way of performing a **training-validation split**. Just keep in mind that, in our example, we need to apply it to the **whole dataset** (not the *training* dataset we built in couple of sections ago).Then, for each subset of data, we build a corresponding DataLoader, so our code looks like this:from torch.utils.data.dataset import random_split # builds tensors from numpy arrays BEFORE split x_tensor = torch.from_numpy(x).float() y_tensor = torch.from_numpy(y).float() # builds dataset containing ALL data points dataset = TensorDataset(x_tensor, y_tensor) # performs the split train_dataset, val_dataset = random_split(dataset, [80, 20]) # builds a loader of each set train_loader = DataLoader(dataset=train_dataset, batch_size=16) val_loader = DataLoader(dataset=val_dataset, batch_size=20)Now we have a **data loader** for our **validation** set, so, it makes sense to use it for the… Evaluation: does it generalize? Now, we need to change the training loop to include the **evaluation of our model**, that is, computing the **validation loss**. The first step is to include another inner loop to handle the *mini-batches* that come from the *validation loader* , sending them to the same *device* as our model. Next, we make **predictions** using our model and compute the corresponding **loss**.That’s pretty much it, but there are **two small, yet important**, things to consider:* [**torch.no_grad()**](https://pytorch.org/docs/stable/autograd.htmltorch.autograd.no_grad): even though it won’t make a difference in our simple model, it is a **good practice to wrap the validation inner loop with this context manager to disable any gradient calculation** that you may inadvertently trigger — **gradients belong in training**, not in validation steps; * [**eval()**](https://pytorch.org/docs/stable/nn.htmltorch.nn.Module.eval): the only thing it does is **setting the model to evaluation mode** (just like its `train()` counterpart did), so the model can adjust its behavior regarding some operations, like [**Dropout**](https://pytorch.org/docs/stable/nn.htmltorch.nn.Dropout).Now, our training loop should look like this:torch.manual_seed(42) # builds tensors from numpy arrays BEFORE split x_tensor = torch.from_numpy(x).float() y_tensor = torch.from_numpy(y).float() # builds dataset containing ALL data points dataset = TensorDataset(x_tensor, y_tensor) # performs the split train_dataset, val_dataset = random_split(dataset, [80, 20]) # builds a loader of each set train_loader = DataLoader(dataset=train_dataset, batch_size=16) val_loader = DataLoader(dataset=val_dataset, batch_size=20) # defines learning rate lr = 1e-1 # Create a MODEL, a LOSS FUNCTION and an OPTIMIZER model = nn.Sequential(nn.Linear(1, 1)).to(device) loss_fn = nn.MSELoss(reduction='mean') optimizer = optim.SGD(model.parameters(), lr=lr) # Creates the train_step function for our model, loss function and optimizer train_step = make_train_step(model, loss_fn, optimizer) n_epochs = 1000 losses = [] val_losses = [] # Looping through epochs... for epoch in range(n_epochs): # TRAINING batch_losses = [] for x_batch, y_batch in train_loader: x_batch = x_batch.to(device) y_batch = y_batch.to(device) loss = train_step(x_batch, y_batch) batch_losses.append(loss) losses.append(np.mean(batch_losses)) # VALIDATION # no gradients in validation! with torch.no_grad(): val_batch_losses = [] for x_val, y_val in val_loader: x_val = x_val.to(device) y_val = y_val.to(device) # sets model to EVAL mode model.eval() # make predictions yhat = model(x_val) val_loss = loss_fn(yhat, y_val) val_batch_losses.append(val_loss.item()) val_losses.append(np.mean(val_batch_losses)) print(model.state_dict()) plt.plot(losses, label='Training Loss') plt.plot(val_losses, label='Validation Loss') plt.yscale('log') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend()"*Wait, there is something weird with this plot...*", you say. You're right, the **validation loss** is **smaller** than the **training loss**. Shouldn't it be the other way around?! Well, generally speaking, *YES*, it should... but you can learn more about situations where this *swap* happens at this great [post](pyimg.co/kku35). Training Loop The training loop should be a stable structure, so we can organize it into functions as well...Let's build a function for **validation** and another one for the **training loop** itself, training step and all!def make_train_step(model, loss_fn, optimizer): # Builds function that performs a step in the train loop def train_step(x, y): # Sets model to TRAIN mode model.train() # Step 1: Makes predictions yhat = model(x) # Step 2: Computes loss loss = loss_fn(yhat, y) # Step 3: Computes gradients loss.backward() # Step 4: Updates parameters and zeroes gradients optimizer.step() optimizer.zero_grad() # Returns the loss return loss.item() # Returns the function that will be called inside the train loop return train_step def validation(model, loss_fn, val_loader): # Figures device from where the model parameters (hence, the model) are device = next(model.parameters()).device.type # no gradients in validation! with torch.no_grad(): val_batch_losses = [] for x_val, y_val in val_loader: x_val = x_val.to(device) y_val = y_val.to(device) # sets model to EVAL mode model.eval() # make predictions yhat = model(x_val) val_loss = loss_fn(yhat, y_val) val_batch_losses.append(val_loss.item()) val_losses = np.mean(val_batch_losses) return val_losses def train_loop(model, loss_fn, optimizer, n_epochs, train_loader, val_loader=None): # Figures device from where the model parameters (hence, the model) are device = next(model.parameters()).device.type # Creates the train_step function for our model, loss function and optimizer train_step = make_train_step(model, loss_fn, optimizer) losses = [] val_losses = [] for epoch in range(n_epochs): # TRAINING batch_losses = [] for x_batch, y_batch in train_loader: x_batch = x_batch.to(device) y_batch = y_batch.to(device) loss = train_step(x_batch, y_batch) batch_losses.append(loss) losses.append(np.mean(batch_losses)) # VALIDATION if val_loader is not None: val_loss = validation(model, loss_fn, val_loader) val_losses.append(val_loss) print("Epoch {} complete...".format(epoch)) return losses, val_lossesFinal Code We finally have an organized version of our code, consisting of the following steps:- building a **Dataset**- performing a **random split** into **train** and **validation** datasets- building **DataLoaders**- building a **model**- defining a **loss function**- specifying a **learning rate**- defining an **optimizer**- specifying the **number of epochs**All nitty-gritty details of performing the actual training is encapsulated inside the **`train_loop`** function.torch.manual_seed(42) # builds tensors from numpy arrays BEFORE split x_tensor = torch.from_numpy(x).float() y_tensor = torch.from_numpy(y).float() # builds dataset containing ALL data points dataset = TensorDataset(x_tensor, y_tensor) # performs the split train_dataset, val_dataset = random_split(dataset, [80, 20]) # builds a loader of each set train_loader = DataLoader(dataset=train_dataset, batch_size=16) val_loader = DataLoader(dataset=val_dataset, batch_size=20) # defines learning rate lr = 1e-1 # Create a MODEL, a LOSS FUNCTION and an OPTIMIZER model = nn.Sequential(nn.Linear(1, 1)).to(device) loss_fn = nn.MSELoss(reduction='mean') optimizer = optim.SGD(model.parameters(), lr=lr) n_epochs = 1000 losses, val_losses = train_loop(model, loss_fn, optimizer, n_epochs, train_loader, val_loader) print(model.state_dict()) plt.plot(losses, label='Training Loss') plt.plot(val_losses, label='Validation Loss') plt.yscale('log') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend()Saving (and Loading) Models: taking a break "That would be great, to restart training later"So, it is important to be able to **checkpoint** our model, in case we'd like to **restart training later**.To checkpoint a model, we basically have to **save its state** into a file, to **load** it back later - nothing special, actually.What defines the **state of a model**?- **model.state_dict()**: kinda obvious, right?- **optimizer.state_dict()**: remember optimizers had the `state_dict` as well?- **loss**: after all, you should keep track of its evolution- **epoch**: it is just a number, so why not? :-)- **anything else you'd like to have restored**Then, **wrap everything into a Python dictionary** and use [**torch.save()**](https://pytorch.org/docs/stable/torch.html?highlight=savetorch.save) to dump it all into a file! Easy peasy!checkpoint = {'epoch': n_epochs, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'loss': losses, 'val_loss': val_losses} torch.save(checkpoint, 'model_checkpoint.pth')How would you **load** it back? Easy as well:- load the dictionary back using [**torch.load()**](https://pytorch.org/docs/stable/torch.html?highlight=torch%20loadtorch.load)- load **model** and **optimizer** state dictionaries back using its methods [**load_state_dict()**](https://pytorch.org/docs/stable/nn.html?highlight=load_state_dicttorch.nn.Module.load_state_dict)- load everything else into their corresponding variablescheckpoint = torch.load('model_checkpoint.pth') model.load_state_dict(checkpoint['model_state_dict']) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) epoch = checkpoint['epoch'] losses = checkpoint['loss'] val_losses = checkpoint['val_loss']You may save a model for **checkpointing**, like we have just done, or for **making predictions**, assuming training is finished.After loading the model, **DO NOT FORGET**:---**SET THE MODE** (not the mood!):- **checkpointing: model.train()**- **predicting: model.eval()**--- BONUS: Further Improvements Is there **anything else** we can improve or change? Sure, there is **always something else** to add to your model — using a [**learning rate scheduler**](https://pytorch.org/docs/stable/optim.htmlhow-to-adjust-learning-rate), for instance. Learning Rate Scheduler In the "Playing with the Learning Rate" section, we observed how different **learning rates** may be more useful at different moments of the optimization process.PyTorch offers a long list of **learning rate schedulers** for all your learning rate needs:- [**StepLR**](https://pytorch.org/docs/stable/optim.htmltorch.optim.lr_scheduler.StepLR)- [**MultiStepLR**](https://pytorch.org/docs/stable/optim.htmltorch.optim.lr_scheduler.MultiStepLR)- [**ReduceLROnPlateau**](https://pytorch.org/docs/stable/optim.htmltorch.optim.lr_scheduler.ReduceLROnPlateau)- [**LambdaLR**](https://pytorch.org/docs/stable/optim.htmltorch.optim.lr_scheduler.LambdaLR)- [**ExponentialLR**](https://pytorch.org/docs/stable/optim.htmltorch.optim.lr_scheduler.ExponentialLR)- [**CosineAnnealingLR**](https://pytorch.org/docs/stable/optim.htmltorch.optim.lr_scheduler.CosineAnnealingLR)- [**CyclicLR**](https://pytorch.org/docs/stable/optim.htmltorch.optim.lr_scheduler.CyclicLR)- [**OneCycleLR**](https://pytorch.org/docs/stable/optim.htmltorch.optim.lr_scheduler.OneCycleLR)- [**CosineAnnealingWarmRestarts**](https://pytorch.org/docs/stable/optim.htmltorch.optim.lr_scheduler.CosineAnnealingWarmRestarts)To include a scheduler into our workflow, we need to take two steps:- create a **scheduler** and pass our **optimizer as argument**- use our scheduler's **step()** method - **after the validation**, that is, **last thing before finishing an epoch**, for the first 6 schedulers on the list - **after every batch update** for the last 3 schedulers on the list We also need to **pass an argument** to **step()** if we're using **ReduceLROnPlateau**: the **validation loss**, which is the quantity we're using to **control the effectiveness of the current learning rate**.from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau, MultiStepLR optimizer = optim.SGD(model.parameters(), lr=lr) scheduler = ReduceLROnPlateau(optimizer, 'min') #scheduler = StepLR(optimizer, step_size=30, gamma=0.5) #scheduler = MultiStepLR(optimizer, milestones=[30,80], gamma=0.1)We are focusing only on **ReduceLROnPlateau**, **StepLR** and **MultiStepLR** on this tutorial, so we'll change our training loop accordingly: adding the **scheduler's step()** as **last thing before finishing an epoch**.def train_loop_with_scheduler(model, loss_fn, optimizer, scheduler, n_epochs, train_loader, val_loader=None): # Figures device from where the model parameters (hence, the model) are device = next(model.parameters()).device.type # Creates the train_step function for our model, loss function and optimizer train_step = make_train_step(model, loss_fn, optimizer) losses = [] val_losses = [] learning_rates = [] for epoch in range(n_epochs): # TRAINING batch_losses = [] for x_batch, y_batch in train_loader: x_batch = x_batch.to(device) y_batch = y_batch.to(device) loss = train_step(x_batch, y_batch) batch_losses.append(loss) losses.append(np.mean(batch_losses)) # VALIDATION if val_loader is not None: val_loss = validation(model, loss_fn, val_loader) val_losses.append(val_loss) print("Epoch {} complete...".format(epoch)) # SCHEDULER if isinstance(scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau): scheduler.step(val_loss) else: scheduler.step() learning_rates.append(optimizer.state_dict()['param_groups'][0]['lr']) return losses, val_losses, learning_ratesLet's run the whole thing once again!torch.manual_seed(42) # builds tensors from numpy arrays BEFORE split x_tensor = torch.from_numpy(x).float() y_tensor = torch.from_numpy(y).float() # builds dataset containing ALL data points dataset = TensorDataset(x_tensor, y_tensor) # performs the split train_dataset, val_dataset = random_split(dataset, [80, 20]) # builds a loader of each set train_loader = DataLoader(dataset=train_dataset, batch_size=16) val_loader = DataLoader(dataset=val_dataset, batch_size=20) # defines learning rate lr = 1e-1 # Create a MODEL, a LOSS FUNCTION and an OPTIMIZER (and SCHEDULER) model = nn.Sequential(nn.Linear(1, 1)).to(device) loss_fn = nn.MSELoss(reduction='mean') optimizer = optim.SGD(model.parameters(), lr=lr) scheduler = ReduceLROnPlateau(optimizer, 'min') #scheduler = StepLR(optimizer, step_size=30, gamma=0.5) #scheduler = MultiStepLR(optimizer, milestones=[30,80], gamma=0.1) n_epochs = 1000 losses, val_losses, l_rates = train_loop_with_scheduler(model, loss_fn, optimizer, scheduler, n_epochs, train_loader, val_loader) print(model.state_dict()) plt.plot(l_rates) plt.yscale('log') plt.xlabel('Epochs') plt.ylabel('Learning Rate')As expected, the learning rate is progressively reduced.plt.plot(losses, label='Training Loss') plt.plot(val_losses, label='Validation Loss') plt.yscale('log') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend()Camera Data - IntrinsicsCamera = collections.namedtuple( "Camera", ["id", "model", "width", "height", "params"]) def read_cameras_text(path): """ see: src/base/reconstruction.cc void Reconstruction::WriteCamerasText(const std::string& path) void Reconstruction::ReadCamerasText(const std::string& path) """ cameras = {} with open(path, "r") as fid: while True: line = fid.readline() if not line: break line = line.strip() if len(line) > 0 and line[0] != "#": elems = line.split() camera_id = int(elems[0]) model = elems[1] width = int(elems[2]) height = int(elems[3]) params = np.array(tuple(map(float, elems[4:]))) cameras[camera_id] = Camera(id=camera_id, model=model, width=width, height=height, params=params) return cameras camdata = read_cameras_text(path+'/cameras.txt') list_of_keys = list(camdata.keys()) cam = camdata[list_of_keys[0]] print( 'Cameras', len(cam)) h, w, f = cam.height, cam.width, cam.params[0] # w, h, f = factor * w, factor * h, factor * f hwf = np.array([h,w,f]).reshape([3,1]) print('HWF: ', hwf)Cameras 5 HWF: [[ 512. ] [ 612. ] [3765.54]]Image Data - Rot and trans matx Helper funcdef qvec2rotmat(qvec): return np.array([ [1 - 2 * qvec[2]**2 - 2 * qvec[3]**2, 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3], 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]], [2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3], 1 - 2 * qvec[1]**2 - 2 * qvec[3]**2, 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]], [2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2], 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1], 1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]]) def rotmat2qvec(R): Rxx, Ryx, Rzx, Rxy, Ryy, Rzy, Rxz, Ryz, Rzz = R.flat K = np.array([ [Rxx - Ryy - Rzz, 0, 0, 0], [Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0], [Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0], [Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz]]) / 3.0 eigvals, eigvecs = np.linalg.eigh(K) qvec = eigvecs[[3, 0, 1, 2], np.argmax(eigvals)] if qvec[0] < 0: qvec *= -1 return qvec BaseImage = collections.namedtuple( "Image", ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"]) class Image(BaseImage): def qvec2rotmat(self): return qvec2rotmat(self.qvec) def read_images_text(path): """ see: src/base/reconstruction.cc void Reconstruction::ReadImagesText(const std::string& path) void Reconstruction::WriteImagesText(const std::string& path) """ images = {} with open(path, "r") as fid: while True: line = fid.readline() if not line: break line = line.strip() if len(line) > 0 and line[0] != "#": elems = line.split() image_id = int(elems[0]) qvec = np.array(tuple(map(float, elems[1:5]))) tvec = np.array(tuple(map(float, elems[5:8]))) camera_id = int(elems[8]) image_name = elems[9] elems = fid.readline().split() xys = np.column_stack([tuple(map(float, elems[0::3])), tuple(map(float, elems[1::3]))]) point3D_ids = np.array(tuple(map(int, elems[2::3]))) images[image_id] = Image( id=image_id, qvec=qvec, tvec=tvec, camera_id=camera_id, name=image_name, xys=xys, point3D_ids=point3D_ids) return images imdata = read_images_text(path+'/images.txt') print(imdata) w2c_mats = [] bottom = np.array([0,0,0,1.]).reshape([1,4]) names = [imdata[k].name for k in imdata] print( 'Images #', len(names)) perm = np.argsort(names) for k in imdata: im = imdata[k] R = im.qvec2rotmat() t = im.tvec.reshape([3,1]) m = np.concatenate([np.concatenate([R, t], 1), bottom], 0) w2c_mats.append(m) w2c_mats = np.stack(w2c_mats, 0) c2w_mats = np.linalg.inv(w2c_mats) poses = c2w_mats[:, :3, :4].transpose([1,2,0]) poses = np.concatenate([poses, np.tile(hwf[..., np.newaxis], [1,1,poses.shape[-1]])], 1) # must switch to [-u, r, -t] from [r, -u, t], NOT [r, u, -t] poses = np.concatenate([poses[:, 1:2, :], poses[:, 0:1, :], -poses[:, 2:3, :], poses[:, 3:4, :], poses[:, 4:5, :]], 1)Images # 20Points3d dataPoint3D = collections.namedtuple( "Point3D", ["id", "xyz", "rgb", "error", "image_ids", "point2D_idxs"]) def read_points3D_text(path): """ see: src/base/reconstruction.cc void Reconstruction::ReadPoints3DText(const std::string& path) void Reconstruction::WritePoints3DText(const std::string& path) """ points3D = {} with open(path, "r") as fid: while True: line = fid.readline() if not line: break line = line.strip() if len(line) > 0 and line[0] != "#": elems = line.split() point3D_id = int(elems[0]) xyz = np.array(tuple(map(float, elems[1:4]))) rgb = np.array(tuple(map(int, elems[4:7]))) error = float(elems[7]) image_ids = np.array(tuple(map(int, elems[8::2]))) point2D_idxs = np.array(tuple(map(int, elems[9::2]))) points3D[point3D_id] = Point3D(id=point3D_id, xyz=xyz, rgb=rgb, error=error, image_ids=image_ids, point2D_idxs=point2D_idxs) return points3D pts3d = read_points3D_text(path+'/points3D.txt') print(pts3d) pts_arr = [] vis_arr = [] for k in pts3d: pts_arr.append(pts3d[k].xyz) cams = [0] * poses.shape[-1] for ind in pts3d[k].image_ids: # if len(cams) < ind - 1: # print('ERROR: the correct camera poses for current points cannot be accessed') # return cams[ind-1] = 1 vis_arr.append(cams) pts_arr = np.array(pts_arr) vis_arr = np.array(vis_arr) print( 'Points', pts_arr.shape, 'Visibility', vis_arr.shape ) zvals = np.sum(-(pts_arr[:, np.newaxis, :].transpose([2,0,1]) - poses[:3, 3:4, :]) * poses[:3, 2:3, :], 0) valid_z = zvals[vis_arr==1] print( 'Depth stats', valid_z.min(), valid_z.max(), valid_z.mean() ) save_arr = [] for i in perm: vis = vis_arr[:, i] zs = zvals[:, i] zs = zs[vis==1] close_depth, inf_depth = np.percentile(zs, .1), np.percentile(zs, 99.9) # print( i, close_depth, inf_depth ) save_arr.append(np.concatenate([poses[..., i].ravel(), np.array([close_depth, inf_depth])], 0)) save_arr = np.array(save_arr) np.save('poses_bounds.npy', save_arr) np.load('poses_bounds.npy')Assignment 2: **Machine learning with tree based models** In this assignment, you will work on the **Titanic** dataset and use machine learning to create a model that predicts which passengers survived the **Titanic** shipwreck. --- About the dataset:---* The column named `Survived` is the label and the remaining columns are features. * The features can be described as given below: Variable Definition pclass Ticket class SibSp Number of siblings / spouses aboard the Titanic Parch Number of parents / children aboard the Titanic Ticket Ticket number Embarked Port of Embarkation: C = Cherbourg, Q = Queenstown, S = Southampton --- Instructions---* Apply suitable data pre-processing techniques, if needed. * Implement a few classifiers to create your model and compare the performance metrics by plotting the curves like roc_auc, confusion matrix, etc.import pandas as pd import numpy as np import matplotlib.pyplot as plt # Load the file as a dataframe from google.colab import drive drive.mount('/content/drive') titanic_data = pd.read_csv("/content/drive/My Drive/Colab Notebooks/titanic.csv") df = titanic_data df.head() titanic_data.shape print(titanic_data.isna().sum())PassengerId 0 Survived 0 Pclass 0 Name 0 Sex 0 Age 177 SibSp 0 Parch 0 Ticket 0 Fare 0 Cabin 687 Embarked 2 dtype: int64Let's clean the datasetdf = df.drop(['PassengerId','Name','Ticket','Cabin'],axis=1)Let's check the distribution of this column to replace with na valuesplt.hist(df.Age) df['Age'].fillna(df['Age'].mean(), inplace=True) df['Embarked'].fillna(df['Embarked'].mode()[0], inplace=True) print(df.isna().sum())Survived 0 Pclass 0 Sex 0 Age 0 SibSp 0 Parch 0 Fare 0 Embarked 0 dtype: int64'Applying ML models' one by onedef create_one_hot(df, column_name): """One-hot encode column values. Takes the df and column name as input and return the df with one-hot encoded columns as output. """ df[column_name] = pd.Categorical(df[column_name]) one_hot = pd.get_dummies(df[column_name], prefix = column_name, drop_first=True) # add dummies to original df: df = df.drop(column_name,axis=1) df = pd.concat([one_hot, df], axis = 1) return df df = create_one_hot(df, 'Embarked') df = create_one_hot(df, 'Sex') from sklearn.model_selection import train_test_split from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from xgboost import XGBClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import GridSearchCV from sklearn.metrics import f1_score, precision_score, confusion_matrix, recall_score, accuracy_score,roc_auc_score,confusion_matrix class classification: def __init__(self, X, y): self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y, test_size=0.2, random_state=42) self.labels = np.unique(y).tolist() def confusion_matrix(self): cm = confusion_matrix(self.y_test, self.pred, self.labels) print(" The confusion matrix is :", '\n' , cm) fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(cm) plt.title('Confusion matrix of the classifier') fig.colorbar(cax) ax.set_xticklabels([''] + self.labels) ax.set_yticklabels([''] + self.labels) plt.xlabel('Predicted') plt.ylabel('True') plt.show() def calc_metrics_class(self): precision = precision_score(self.pred, self.y_test) recall = recall_score(self.pred,self.y_test) f1 = f1_score(self.pred,self.y_test) accuracy = accuracy_score(self.pred,self.y_test) roc = roc_auc_score(self.y_test, self.pred) print("The precision for the model is :", precision, '\n', "The recall for the model is : ", recall, '\n', "The f1 score of the model is :", f1, '\n', "The accuracy of the model is : ", accuracy, '\n', "The ROC curve of the model is : ", roc ) def gradient_boost(self): print("Performing modelling for Gradient boost") GradBoostClasCV = GradientBoostingClassifier(random_state=42) model_params = { "max_depth": [20], "subsample": [ 0.5], "n_estimators":[150], "learning_rate": [0.01], "criterion": ['mae'] } grid_model = GridSearchCV(estimator=GradBoostClasCV, param_grid=model_params, cv=5, n_jobs=-1) grid_model.fit(self.X_train,self.y_train) print("Best parameters =", grid_model.best_params_) model_clf = GradBoostClasCV.set_params(**grid_model.best_params_) model_clf.fit(self.X_train, self.y_train) self.pred = model_clf.predict(self.X_test) self.calc_metrics_class() self.confusion_matrix() def random_forest(self): print("Performing modelling for Random forest") rf_model = RandomForestClassifier(random_state=1) param_grid = { 'n_estimators': [150], 'max_features': ['auto'], 'min_samples_split': [2] } grid_model = GridSearchCV(estimator=rf_model, param_grid=param_grid, cv = 3, verbose=2, n_jobs=-1) grid_model.fit(self.X_train,self.y_train) print("Best parameters =", grid_model.best_params_) model_clf = rf_model.set_params(**grid_model.best_params_) model_clf.fit(self.X_train, self.y_train) self.pred = model_clf.predict(self.X_test) self.calc_metrics_class() self.confusion_matrix() def decision_tree(self): print("Performing modelling for decision tree") #create a dictionary of all values we want to test param_grid = { 'criterion':['gini'],'max_depth': [10]} # decision tree model dtree_model=DecisionTreeClassifier() #use gridsearch to test all values grid_model = GridSearchCV(dtree_model, param_grid, cv=5, n_jobs=-1) grid_model.fit(self.X_train,self.y_train) print("Best parameters =", grid_model.best_params_) model_clf = dtree_model.set_params(**grid_model.best_params_) model_clf.fit(self.X_train, self.y_train) self.pred = model_clf.predict(self.X_test) self.calc_metrics_class() self.confusion_matrix()Applied models and got accuracy one by oneX = df.drop('Survived',axis=1) y = df['Survived'] p1 = classification(X,y) p1.random_forest() p1.gradient_boost() p1.decision_tree()Summary Problem 1Get gamma rate, epsilon rate, and power consumption (https://adventofcode.com/2021/day/3)def power_consumption(report_input): with open(report_input,'r') as input_text: report = input_text.read() report_list = report.splitlines() value_size = len(report_list[0]) example_length = len(report_list) gamma = [] gamma_dec = 0 epsilon = [] epsilon_dec = 0 for j in range (0,value_size): x = 0 for i in range (0,example_length): x = x + int(report_list[i][j]) # print(x) if x < example_length/2: epsilon.append(1) gamma.append(0) else: epsilon.append(0) gamma.append(1) gamma_dec = gamma_dec + gamma[j]*pow(2,value_size-(j+1)) epsilon_dec = epsilon_dec + epsilon[j]*pow(2,value_size-(j+1)) print("gamma = ", gamma, ",\nepsilon = ", epsilon) print("gamma_dec = ", gamma_dec) print("epsilon_dec = ", epsilon_dec) answer = gamma_dec * epsilon_dec return(answer) power_consumption("report.txt")gamma = [1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0] , epsilon = [0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1] gamma_dec = 2346 epsilon_dec = 1749Problem 2Get oxygen generator rating, CO2 scrubber rating, and life support rating of the submarine (https://adventofcode.com/2021/day/3)def find_rating(report_input, mode): with open(report_input,'r') as input_text: report = input_text.read() report_list = report.splitlines() # separate a list of original data and a list that changes in the course of finding process item_list = report_list value_size = len(report_list[0]) example_length = len(report_list) # separate the lenth of original data and that of a list that changes in the course of finding process total_items = example_length binary_value = [] rating = 0 # as "value_size" is anyways the max of the repetition, probably it is OK to use it rather than using while? for j in range (0, value_size): x = 0 for i in range (0,total_items): # to show the value of j-th position of an i-th element in a list is element[i][j] x = x + int(item_list[i][j]) # which is the most, either 1 or 0? if mode == "co2": if x >= total_items/2: binary_value.append(0) else: binary_value.append(1) else: if x >= total_items/2: binary_value.append(1) else: binary_value.append(0) candidates = [item for item in item_list if int(item[j]) == binary_value[j]] item_list = candidates total_items = len(item_list) if len(item_list) == 1: print(item_list) break for j in range (0, value_size): rating = rating + int(item_list[0][j])*pow(2,value_size-(j+1)) if mode == "co2": print("co2_rating is ", rating) else: print("oxgen_rating is ", rating) return(rating) life_support_rating = find_rating("report.txt","oxgen")*find_rating("report.txt","co2") print(life_support_rating)['110101000111'] oxgen_rating is 3399 ['010011100001'] co2_rating is 1249 4245351Designing the Perfect IT Company An exploration analysis on Stack Overflow’s 2020 survey Part 1 - Understanding and Preparing the Data Business Understanding We are going to use Stack Overflow's 2020 survey dataset which is available [here](https://insights.stackoverflow.com/survey/). This dataset contains questions on developer experience from career satisfaction and job search to education and opinions on open source software. We are interested on the job satisfaction and all things related to it. With this we'd like to answer the following questions. * What do the developers look for a company? * Is salary or languages/technologies/frameworks effective enough to solve job satisfaction issues? * What other hidden factors affects job satisfaction? Data Understanding **Access and Explore Data and Schema**df_schema = pd.read_csv("../data/raw/survey_results_schema.csv") df_schema.head() df_schema.info() df = pd.read_csv("../data/raw/survey_results_public.csv") df.head() df.info() RangeIndex: 64461 entries, 0 to 64460 Data columns (total 61 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Respondent 64461 non-null int64 1 MainBranch 64162 non-null object 2 Hobbyist 64416 non-null object 3 Age 45446 non-null float64 4 Age1stCode 57900 non-null object 5 CompFreq 40069 non-null object 6 CompTotal 34826 non-null float64 7 ConvertedComp 34756 non-null float64 8 Country 64072 non-null object 9 CurrencyDesc 45472 non-null object 10 CurrencySymbol 45472 non-null object 11 DatabaseDesireNextYear 44070 non-null object 12 DatabaseWorkedWith 49537 non-null object 13 D[...]Prepare Data **Wrangle and Clean** The survey is already pre-cleaned by StackOverflow. Few more enhancements is to rename a few column and convert them into snake case **Schema**df_schema.columns = [to_snake_case(col) for col in df_schema.columns] df_schema.head() df_schema["field"] = df_schema.column.apply(lambda col: to_snake_case(col)) df_schema.head() df_schema = df_schema[["field", "question_text"]] df_schema.head() df_schema.to_csv("../data/processed/so_schema.csv", index=False)**Data**df.columns = [to_snake_case(col) for col in df.columns] df.head() df.to_csv("../data/processed/so_data.csv", index=False)Reading the Datadf = pd.read_csv('Cleaned.csv') df.head() df.info() RangeIndex: 90346 entries, 0 to 90345 Data columns (total 13 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Make 90346 non-null object 1 Model 90346 non-null object 2 Model_Year 90346 non-null int64 3 Fuel_Type 90346 non-null object 4 Transmission 90346 non-null object 5 Engine_Capacity(cc) 90346 non-null int64 6 Kms_Driven 90346 non-null int64 7 Color 90346 non-null object 8 Assembly 90346 non-null object 9 Body_Type 90346 non-null object 10 Price 90346 non-null int64 11 Capacity_KWh 90346 non-null float64 12 Condition 90346 non-null object dtypes: float64(1), int64(4), object(8) memory usage: 9.0+ MBChecking Null valuesdf.isnull().sum()Data Cleaningprint(df['Fuel_Type'].unique()) print(df['Transmission'].unique()) print(df['Color'].unique()) print(df['Assembly'].unique()) print(df['Body_Type'].unique()) print(df['Condition'].unique()) df['Color'].value_counts() re.search(r'white',df['Color'][0]) white = list(df['Color'][df['Color'].str.lower().str.contains(r'white')]) blue = list(df['Color'][df['Color'].str.lower().str.contains(r'blue')]) red = list(df['Color'][df['Color'].str.lower().str.contains(r'red')]) black = list(df['Color'][df['Color'].str.lower().str.contains(r'black')]) silver = list(df['Color'][df['Color'].str.lower().str.contains(r'silver')]) grey = list(df['Color'][df['Color'].str.lower().str.contains(r'grey')]) golden = list(df['Color'][df['Color'].str.lower().str.contains(r'golden')]) green = list(df['Color'][df['Color'].str.lower().str.contains(r'green')]) brown = list(df['Color'][df['Color'].str.lower().str.contains(r'brown')]) rose = list(df['Color'][df['Color'].str.lower().str.contains(r'rose')]) gun = list(df['Color'][df['Color'].str.lower().str.contains(r'gun')]) gray = list(df['Color'][df['Color'].str.lower().str.contains(r'gray')]) metallic = list(df['Color'][df['Color'].str.lower().str.contains(r'metallic')]) biege = list(df['Color'][df['Color'].str.lower().str.contains(r'beige')]) df['Color'].replace(white,'White', inplace=True) df['Color'].replace(blue,'Blue', inplace=True) df['Color'].replace(red,'Red', inplace=True) df['Color'].replace(black,'Black', inplace=True) df['Color'].replace(silver,'Silver', inplace=True) df['Color'].replace(grey,'Grey', inplace=True) df['Color'].replace(golden,'Golden', inplace=True) df['Color'].replace(green,'Green', inplace=True) df['Color'].replace(brown,'Brown', inplace=True) df['Color'].replace(rose,'Rose', inplace=True) df['Color'].replace(gun,'Gun Metallic', inplace=True) df['Color'].replace(gray,'Grey', inplace=True) df['Color'].replace(metallic,'Metallic', inplace=True) df['Color'].replace(biege,'Beige', inplace=True) df['Color'].str.lower().replace(to_replace=r'Rose.+', value='Rose', regex=True, inplace=True) df['Color'].replace('Turqouise','Turquoise', inplace=True) pd.options.display.max_rows = None df['Color'].value_counts() golden = ['Cooper Gold', 'Flexin Mica', 'Gold Wight','Dorado Gold','Goldan','Sand Gold','G','Gooldan','Flaxen Mica', 'Light Gold'] beige = ['Sand Biege', 'Sand Baige','Cool Bebige', 'Bage','Pearl Bedge','Sandbeig','Beig','Jerma Badge','Sand Beigh'] grey = ['Charcoal', 'Graphite', 'Mouse Couler','Rich Espresso','Grefite','Surmai', 'Indica, Graphite','Surmayi','Graphite Metalic', 'Smoke Gr', 'Gary','Italic','Sedan','Smoke'] gun = ['Gan Matalik', 'Ganmatlic', 'Dalmatalak', 'Gan Matelk','Gan Matalik Garr'] orange = ['Light Orange Or Peach Color', 'Rust Orange'] rose = ['Ross Matlk', 'Shalimar Roze','Roz Mataiq'] yellow = ['Dull Yellow', 'Lemon', 'Lime Yellow','Light Yellow','Neon Glittery Yellow'] maroon = ['Boardox', 'Mehroon'] silver = ['S','Silwer','Ssss','Sliver','Yslr','Yslr'] red = ['Rid Wien','Peral Wine','Champagne','Wine','Vine','Sky Bue','Shapain','Champagne '] blue = ['Saloon', 'Cerulean B','Peal Bule','???','Trim','Spacio','Salon','Beach Color','Ferozi','Sea Pearl', 'Peral','Pearl Colour','Bule Mint','Shade','Mateluck','Moonglow Pearl','Abc','Skye ','By6','Sky','Sky ', 'Aq Jade'] magenta = ['Magneta', 'Magenta','B'] purple = ['Light Purple', 'Levander', 'Pearl Purple ','Purple Lite','Lavendar','Purpal','Voilet Metalic','Levander', 'Lavander','Bluish Purple','Lavender','Levander '] pink = ['P With', 'Tea Pink ', 'Light Pin','Pinc','Baby Pink','Piyazi','Onion Color Type','Tee Pink', 'Copper Pink','Tea Pink','Light Pink'] brown = ['Coffee', 'Biscuit', 'Skin','بسکوٹی','Army ','Brawn','Bhatti Colour','Chocolate Colo','Char Coal', 'Biscut','Fol Bage','Bronze Micca','Malti','Bhati Japani Color','Cheku Colour', 'Army Clr', 'Musterd','Army Desert Color','Mat Flax M ','Foan', 'Mustard','Bronze Mica','Bowla','Meca Bronze', 'Copper','Camel','Chocolate Color','Evo','Cream', 'Creem Color', 'Peach', 'None', 'Indica','Mist','Creem', 'Mustard ','Cream Color','Carem','Careem', 'Camel '] green = ['Grinish', 'Mehndi Colour', 'Angori','Jade','Grenish', 'Dark Gren', 'Leith Mahande','Granite', 'Grips Gareen','Sea Gerrn','Angroi','Ovals','Mehndi Color','Olive','Grapes Color','Olive Mica', 'Smoky Greeen','Smoke Garen','Lit Gareen','Gr Vr Matl','Mandi Color','Moigia Color','Grin','Gm','Levander', 'Shalimar'] black = ['Dark', 'Cool Batch','Mica Dark','Mande','Moongia','Shalimar Stone'] metallic = ['Matalic', 'Panthera Metal', 'Metalic','Two Toned Color','Matlic','Matlic Color','Double Sheet', 'Penthra Metal','Pentha Metal','Modern Steel 22,85,','Matelic','Metal Stream Met','Modren Steel', 'Mettalic Steel','Metalic F','Penthera Metal','Polish Metalic','Modern Steel Matlic', 'Bold Matalic Beig', 'Sea Matalic', 'Metalic Cooper','Anthera Metal Color', 'Polish Metal Metalic', 'Polish Matilic','T','Bronze Mitallic','Metallik','R Metelic','Modern Steel','Modern Steel Matelic' , 'Gun Metallic', 'Two Tone','Matellic Steel'] white = ['Pearl','All','Other','Moonlight','All Colours Are Availabl','Unknown','Genuine Colour', 'Pics', 'Same As Picture', 'Bright','Watch Pictures','As Seen In The Pics','Check Pictures', 'All Colors Are Available','See In Pics','Titania','Wite','Apky Samny','Special Edition', 'Mehran','Not Defined','Any Color Available','Family Used Car Good Looking',' ', 'Outer Shawer Iner Jenean','Unkhown','Good','As You Can See Pictures','Tose Miss','Champion', 'As You Can See','Palwheth','Hvhhb','No Idea','Metalic S','Steel','Shown In Pix','All Colours Available', 'Parl Wahit','Pearl Whte','14/ 16','Nice','Defined','Imported','One Pis Tachinig','Multi', 'All Colours Are Available','All Colours'] urban = ['Urban Titaniu', 'Titanium', 'Urban Titinum', 'Titanium', 'Urban Titenum','Arabian Titanium','Titanium Colour', 'Urban Titan','Urban','Titanium Mettalli','Urban Titnum','Urban Titenium','Arban Titenium','Urbantitanium', 'Titanium Mettalic ','Arabic Matalic','Titanium','Urban Titanium ','Titanium ','Urban ','Titanium Mettallic', 'Titanium Mettalic','Urban Titanium'] colors = {tuple(golden):'Golden', tuple(beige) :'Beige', tuple(grey):'Grey', tuple(gun):'Gun Metallic', tuple(orange):'Orange', tuple(rose):'Rose', tuple(yellow): 'Yellow', tuple(maroon):'Maroon', tuple(silver):'Silver', tuple(red):'Red', tuple(blue):'Blue', tuple(magenta):'Magenta', tuple(purple):'Purple', tuple(pink):'Pink', tuple(brown):'Brown', tuple(green):'Green', tuple(black):'Black', tuple(metallic):'Metallic', tuple(white):'White' ,tuple(urban):'Urban Titanium'} for i in colors: # print((i)) df['Color'].replace(i,colors[i], inplace=True) df['Color'].value_counts() df['Color'].replace('Unlosted','Unlisted', inplace=True) df['Color'].replace('Unlisted','White', inplace=True) df['Color'].replace('Golden','Gold', inplace=True) df['Color'].replace('Beige','Brown', inplace=True) df['Color'].replace('Turquoise','Blue', inplace=True) df['Color'].replace('Navy','Blue', inplace=True) df['Color'].replace('Indigo','Purple', inplace=True) df['Color'].replace('Magenta','Pink', inplace=True) df['Color'].replace('Rose','Pink', inplace=True) df['Color'].replace('Bronze','Brown', inplace=True) df['Color'].replace('Urban Titanium','Metallic', inplace=True) df['Color'].value_counts() print(df['Fuel_Type'].unique()) print(df['Transmission'].unique()) print(df['Color'].unique()) print(df['Assembly'].unique()) print(df['Body_Type'].unique()) print(df['Condition'].unique()) df.head() df= pd.read_csv('Cleaned2.csv')Data Preparationdf.shape df['Price'].max() df['Price'].min() plt.figure(figsize=(15,10)) sns.histplot(x='Price',data=df, kde=True ) print(df['Price'].min()) print(df['Price'].median()) print(df['Price'].max()) pd.options.display.max_rows = False df[df['Price'] > 70000000] plt.figure(figsize=(15,6)) sns.set_theme(style="whitegrid") sns.boxplot(x= df['Price']) data_new.head() df.head() df.drop(['Make', 'Model'], axis=1, inplace=True) check_df = df.copy() check_df['Body_Type'].value_counts() df.info() obj_col = df.select_dtypes(object).columns obj_col df[obj_col] = df[obj_col].astype('category') for i in obj_col: df[i] = df[i].cat.codes df.head() # sns.displot(df) print(df['Fuel_Type'].unique()) print(df['Transmission'].unique()) print(df['Color'].unique()) print(df['Assembly'].unique()) print(df['Body_Type'].unique()) print(df['Condition'].unique()) # print(df['Category'].unique())[5 1 3 0 2 4] [0 1] [13 0 1 5 12 4 11 3 2 9 6 7 10 8 14] [0 1] [16 6 15 10 13 4 8 3 1 0 5 18 22 19 9 7 17 14 11 12 2 20 21] [0 4 2 1 3]Rescalingdf.shape df.columns df.head() print(df['Fuel_Type'].unique()) print(df['Transmission'].unique()) print(df['Color'].unique()) print(df['Assembly'].unique()) print(df['Body_Type'].unique()) print(df['Condition'].unique()) # print(df['Category'].unique()) df = df.sample(frac=1, random_state=123) df.head() df.shape df.shape[0]*0.7 train = df[:63242] test =df[63242:]Correlation Between Variables Linear regression assumes the independent variables are not related with each other. If the correlation degree is high, it will cause problems when we fit the model.To check multicollinearity, I will use heatmap and VIF.plt.figure(figsize = (20, 15)) sns.heatmap(train.corr(method ='pearson'), annot=True, linewidths=.5) plt.show() plt.figure(figsize=(5,10)) cor_df = pd.DataFrame({'Price' : df.corr()['Price'].values}, index = df.corr()['Price'].index) sns.heatmap(cor_df, annot=True, cmap='viridis') plt.show() from scipy import stats num_columns = train.select_dtypes(exclude='object').columns for i in list(num_columns): pearson_coeff, p_value = stats.pearsonr(train[i], train['Price']) print(i.capitalize()) print(f'Pearson Co-relation: {pearson_coeff}') print(f'P-Value: {p_value}') if p_value<0.05: print('Correlation is Significant') else: print('Correlation is Insignificant') print('') drop = [] for i in list(num_columns): pearson_coeff, p_value = stats.pearsonr(train[i], train['Price']) if p_value > 0.05: drop.append(i) drop train.drop(['Capacity_KWh'], axis=1, inplace=True) test.drop(['Capacity_KWh'], axis=1, inplace=True) df.head() hetro = train.copy() hetro = (hetro-hetro.min())/(hetro.max()- hetro.min()) var = hetro.var().sort_values(ascending = False) var train.drop(['Model_Year'], axis=1, inplace=True) test.drop(['Model_Year'], axis=1, inplace=True)Building the Model!pip install cloudpickle train.columns features = train.columns.drop('Price') target = ['Price'] from sklearn.preprocessing import QuantileTransformer transform = QuantileTransformer(n_quantiles=90346) trns = transform.fit(train[features]) import pickle pickle.dump(trns, open('q_transform.pkl', 'wb')) X_train = trns.transform(train[features]) X_test = trns.transform(test[features]) X_train = pd.DataFrame(X_train, columns=features) X_test = pd.DataFrame(X_test, columns=features) y_train = train[target] y_test = test[target] from sklearn.metrics import mean_absolute_error LG_ = LinearRegression() SV_ = SVR() KN_ = KNeighborsRegressor() DT_ = DecisionTreeRegressor(random_state=123) GB_ = GradientBoostingRegressor(random_state=123) RF_ = RandomForestRegressor(random_state=123) models = [LG_, SV_, KN_, DT_, GB_, RF_,] model_name = [ 'Linear Regression', 'Support Vector Regression', 'K Nearest Neighbor', 'Decision Tree', 'Gradient Boost', 'Random Forest' ] means = [] r2_score_ = [] kf = KFold(5, True, random_state=123) for i in range(len(models)): maes = [] r2s = [] model = models[i] model.fit(X_train, y_train) prediction = model.predict(X_test) mae = mean_absolute_error(prediction, y_test) r2 = r2_score(y_test, prediction) maes.append(mae) r2s.append(r2) means.append(np.mean(maes)) r2_score_.append(np.mean(r2s)) mod_comp_def = pd.DataFrame({'Models' : model_name, 'Mean Absolute Error' : means, 'R2_Score' : r2_score_}).set_index('Models') mod_comp_def parameter_space_LG = { 'fit_intercept' : [True, False] , 'normalize' : [True, False] , 'copy_X' : [True, False] , 'positive' : [True, False] } parameter_space_SV = { "kernel": ["poly", "linear", "rbf", "sigmoid"], "degree": [3, 5], "coef0": [0, 3, 7], "gamma":[1e-3, 1e-1, 1/X_train.shape[1]], "C": [1, 10, 100], } parameter_space_KN = { 'n_neighbors' : [1,20,50], 'weights' : ['uniform', 'distance'], 'algorithm' : ['auto', 'kd_tree'], 'leaf_size' : [1,20,100], 'p' : [1,2], } parameter_space_DT = { 'criterion' : ['mse', 'mae'] , 'splitter' : ['best', 'random'], 'max_depth' : [5,20,50], } parameter_space_GB = { 'loss' : ['ls', 'lad', 'quantile'], 'learning_rate' : [0.1,0.2, 0.5], 'n_estimators' : [80, 100, 180], 'criterion' : ['mse', 'mae'], } parameter_space_RF = { 'n_estimators' : [100,120], 'criterion' : ['mse', 'mae'], 'max_depth' : [10,15,30], } from sklearn.model_selection import RandomizedSearchCV LG = LinearRegression() SV = SVR() KN = KNeighborsRegressor() DT = DecisionTreeRegressor(random_state=123) GB = GradientBoostingRegressor(random_state=123) RF = RandomForestRegressor(random_state=123) # kf = KFold(5, True, random_state=123) models = [LG, DT, GB, RF,] model_name = [ 'Linear Regression', 'Decision Tree', 'Gradient Boost', 'Random Forest' ] parameter_space = [parameter_space_LG, parameter_space_DT, parameter_space_GB, parameter_space_RF] for i in range(4): clf = RandomizedSearchCV(models[i],parameter_space[i] , scoring='neg_mean_absolute_error', n_jobs=-1, cv=3) clf.fit(X_train, y_train) print(f'{model_name[i]}:') print("Best parameters:") print(clf.best_params_) print('') features = ['wheelbase', 'carwidth', 'enginesize', 'boreratio', 'aspiration_turbo', 'carbody_hatchback', 'carbody_sedan', 'drivewheel_fwd', 'drivewheel_rwd', 'enginelocation_rear', 'enginetype_l', 'enginetype_ohc', 'enginetype_ohcv', 'cylindernumber_five', 'cylindernumber_four', 'cylindernumber_six', 'fuelsystem_2bbl', 'fuelsystem_mpfi', 'Category_Medium_Range', 'Category_Expensive_Cars'] target = ['price'] Linear Regression: Best parameters: {'positive': False, 'normalize': False, 'fit_intercept': True, 'copy_X': True} Decision Tree: Best parameters: {'splitter': 'best', 'max_depth': 10, 'criterion': 'mse'} Gradient Boost: Best parameters: {'n_estimators': 180, 'loss': 'huber', 'learning_rate': 0.5, 'criterion': 'mse'} Random Forest: Best parameters: {'n_estimators': 100, 'max_depth': 15, 'criterion': 'mae'} from sklearn.metrics import mean_absolute_error LG_ = LinearRegression(copy_X=True, fit_intercept=True, normalize=False, positive=False,) DT_ = DecisionTreeRegressor(criterion='mse', max_depth=10, splitter='best',random_state=123) GB_ = GradientBoostingRegressor(criterion='mse', learning_rate=0.5, loss='huber', n_estimators=180, random_state=123) RF_ = RandomForestRegressor(criterion='mae', max_depth=15, n_estimators=100,random_state=123) models = [LG_, DT_, GB_, RF_,] model_name = [ 'Linear Regression', 'Decision Tree', 'Gradient Boost', 'Random Forest' ] means = [] r2_score_ = [] kf = KFold(5, True, random_state=123) for i in range(len(models)): maes = [] r2s = [] model = models[i] model.fit(X_train, y_train) prediction = model.predict(X_test) mae = round(mean_absolute_error(prediction, y_test),2) r2 = round(r2_score(y_test, prediction),2) maes.append(mae) r2s.append(r2) means.append(np.mean(maes)) r2_score_.append(np.mean(r2s)) with_rndm = pd.DataFrame({'Models' : model_name, 'Mean Absolute Error' : means, 'R2_Score' : r2_score_}).set_index('Models') with_rndm mod_comp_def.loc[['Linear Regression', 'Decision Tree', 'Gradient Boost', 'Random Forest']]Finallymodel_name = 'Randome Forest' model = RandomForestRegressor(criterion='mae', max_depth=15, n_estimators=100,random_state=123) # model.predict(X_test) model.fit(X_train, y_train) # model.fit(X_train, y_train) print(model_name) print(f'r2 score: {round(r2_score(y_test, model.predict(X_test)), 2)}') print(f'Mean absolute error: {round(mean_absolute_error(model.predict(X_test), y_test),2)}') import pickle pickle_out = open('model-pakwheels-carprice.pkl', "wb") pickle.dump(model, pickle_out) pickle_out.close()Mapping Peak Annual Gage Height/Flow - Carnation Load from USGSSource:https://nwis.waterdata.usgs.gov/wa/nwis/peak?site_no=12149000&agency_cd=USGS&format=rdbimport pandas as pd import numpy as np from datetime import timedelta url = 'https://nwis.waterdata.usgs.gov/wa/nwis/peak?site_no=12149000&agency_cd=USGS&format=rdb' df0 = pd.read_csv(url,comment='#',delimiter='\t') df0 = df0.drop(0) # drop data type row df0.head(5)Fix Up Data# Calculate water year df0['peak_dt'] = pd.to_datetime(df0['peak_dt']) df0['water_year']=(df0['peak_dt'] + timedelta(days=92)).apply(lambda x: int(x.year)) # fix up datatypes df0['gage_ht'] = df0['gage_ht'].apply(pd.to_numeric) df0['peak_va'] = df0['peak_va'].apply(pd.to_numeric) # Remove first 10 rows for Carnation. Gage hight changes to modern scale in 1940. df = df0.loc[:,['water_year','peak_va','gage_ht']] df.head(5)Add in 2016from datetime import date df_2016 = pd.DataFrame([[2016,56200.0,59.78]], columns=['water_year','peak_va','gage_ht']) df = df.append(df_2016, ignore_index=True) df.tail(5)Mann_KendallFrom source: https://mail.scipy.org/pipermail/scipy-dev/2016-July/021413.html>This function is derived from code originally posted by (.com)See also: http://vsp.pnnl.gov/help/Vsample/Design_Trend_Mann_Kendall.htm>The purpose of the Mann-Kendall (MK) test (Mann 1945, Kendall 1975, Gilbert 1987) is to statistically assess if there is a monotonic upward or downward trend of the variable of interest over time. A monotonic upward (downward) trend means that the variable consistently increases (decreases) through time, but the trend may or may not be linear. The MK test can be used in place of a parametric linear regression analysis, which can be used to test if the slope of the estimated linear regression line is different from zero. The regression analysis requires that the residuals from the fitted regression line be normally distributed; an assumption not required by the MK test, that is, the MK test is a non-parametric (distribution-free) test.>Hirsch, Slack and Smith (1982, page 107) indicate that the MK test is best viewed as an exploratory analysis and is most appropriately used to identify stations where changes are significant or of large magnitude and to quantify these findings.from scipy import stats from scipy.stats import norm def mk_test(x, alpha = 0.05): """ Input: x: a vector of data alpha: significance level (0.05 default) Output: trend: tells the trend (increasing, decreasing or no trend) h: True (if trend is present) or False (if trend is absence) p: p value of the significance test z: normalized test statistics Examples -------- >>> x = np.random.rand(100) >>> trend,h,p,z = mk_test(x,0.05) """ n = len(x) # calculate S s = 0 for k in range(n-1): for j in range(k+1,n): s += np.sign(x[j] - x[k]) # calculate the unique data unique_x = np.unique(x) g = len(unique_x) # calculate the var(s) if n == g: # there is no tie var_s = (n*(n-1)*(2*n+5))/18 else: # there are some ties in data tp = np.zeros(unique_x.shape) for i in range(len(unique_x)): tp[i] = sum(unique_x[i] == x) var_s = (n*(n-1)*(2*n+5) + np.sum(tp*(tp-1)*(2*tp+5)))/18 if s>0: z = (s - 1)/np.sqrt(var_s) elif s == 0: z = 0 elif s<0: z = (s + 1)/np.sqrt(var_s) # calculate the p_value p = 2*(1-norm.cdf(abs(z))) # two tail test h = abs(z) > norm.ppf(1-alpha/2) if (z<0) and h: trend = 'decreasing' elif (z>0) and h: trend = 'increasing' else: trend = 'no trend' return trend, h, p, zPlot function%matplotlib inline import matplotlib.pyplot as plt from scipy import stats from pylab import rcParams rcParams['figure.figsize'] = 10,5 def plot_data(x_data, y_data, x_label, y_label, point_label): plt.scatter(x_data,y_data, color='blue', marker='o', label=point_label) plt.xlabel(x_label) plt.ylabel(y_label) slope, intercept, r_value, p_value, std_err = stats.linregress(x_data,y_data) def func(x, a, b): return a*x + b plt.plot(x_data, func(x_data, slope, intercept), 'r-', label='fit',color='green') plt.legend(loc='upper left') plt.show() over_10_years = round(slope*10,2); print('slope ',slope, ' (', over_10_years, ' every 10 years)', sep="") print('p_value ', p_value) print('r_value ', r_value) print('std_err ', std_err) critical_value = 1.664 #http://stattrek.com/regression/slope-confidence-interval.aspx?Tutorial=AP conf_interval = critical_value * std_err conf_over_10_years = round(conf_interval*10,2) print('conv_interval ', conf_interval, ' (+/-',conf_over_10_years,' over 10 years - 90% conf, ', over_10_years - conf_over_10_years, ' - ', over_10_years + conf_over_10_years, ')', sep="") # Mann-Kendall Output: # trend: tells the trend (increasing, decreasing or no trend) # h: True (if trend is present) or False (if trend is absence) # p: p value of the significance test # z: normalized test statistics def display_mk_test(data, alpha): mk = mk_test(data,alpha) print('Mann-Kendall test: ', int(100*(1-alpha)),'%', sep='') print(' slope:', mk[0]) print(' h:', 'Monotonic trend is present' if mk[1] else 'No monottonic trend') print(' p:', mk[2]) print(' z:', mk[3]) return mk s,h,p,z = display_mk_test(y_data.values,0.1) if h: s,h,p,z = display_mk_test(y_data.values,0.05) if h: s,h,p,z = display_mk_test(y_data.values,0.01)PlotsScatter plot of water_year and gage measures with least-squares linear regression fit line. Max Gage Height From 1940, the first year the gage height was set to the current datum (1929 NGVD), through 2016.x_data = df[df['water_year']>=1940]['water_year'] y_data = df[df['water_year']>=1940]['gage_ht'] plot_data(x_data, y_data, 'water year', 'gage hight (ft)', 'Snoq @ Carn')Gage Flow (all data)From 1930 through 2016.x_data = df['water_year'] y_data = df['peak_va'] plot_data(x_data, y_data, 'water year', 'flow (cfs)', 'Snoq @ Carn')Gage Flow (1934 on)From 1934 through 2016.x_data = df[df['water_year']>=1934]['water_year'] y_data = df[df['water_year']>=1934]['peak_va'] plot_data(x_data, y_data, 'water year', 'flow (cfs)', 'Snoq @ Carn')Max Gage Flow (2 year buckets)df['two_year'] = (df['water_year']/2).apply(int)*2 max = df.groupby('two_year').max() x_data = max['water_year'] y_data = max['peak_va'] plot_data(x_data, y_data, 'water year', 'flow (cfs)', 'Snoq @ Carn')Data TabledfMultivariate Analysis - PLS-DA In this notebook we will perform a supervised *multivariate* PLS-DA analysis of the *C. elegans* dataset. It is recommended to finish first the notebook *Multivariate Analysis - PCA*.The notebook is divided in the following steps:1) Model fitting basics: Fit PLS-DA models to predict genotype from the metabolic profile data, using different types of scaling.2) Model cross-validation and component selection: Describe model cross-validation, parameter selection and performance assessment, including permutation testing.3) Model interpretation: Describe some of the available variable importance metrics for PLS-DA, and highlight which variables might be important for the discrimination. Compare the selected variables with the results of an univariate analysis (performed using the notebook **Univariate Analysis**) Code importImport all the packages and configure notebook plotting mode.# Import the required python packages including # the custom Chemometric Model objects import numpy as np from sklearn import preprocessing import pandas as pds import matplotlib.pyplot as plt import warnings from sklearn.exceptions import DataConversionWarning from pyChemometrics.ChemometricsPLSDA import ChemometricsPLSDA from pyChemometrics.ChemometricsScaler import ChemometricsScaler from pyChemometrics.ChemometricsOrthogonalPLSDA import ChemometricsOrthogonalPLSDA # Use to obtain same values as in the text np.random.seed(350) # Set the data conversion warnings to appear only once to avoid repetition during CV warnings.filterwarnings("ignore", category=DataConversionWarning)The next cell sets up the figure display mode. The *notebook* mode allows interactive plotting.# Set the plot backend to support interactive plotting %matplotlib notebookData import We will now import the NMR data and the metadata (Y variables).X - NMR data matrixY - Matrix with the 2 metadata outcomesppm - Chemical shift axis for the NMR data in H $\delta$ppm. MetadataY1 - represents the genotype (1: wild-type, 2: *sod-2* mutants, in original Y data matrix)Y2 - represents the age (1: younger L2 worms, 2: L4 worms, in original Y data matrix)# Load the dataset X = np.genfromtxt("./data/X_spectra.csv", delimiter=',', dtype=None) Y = pds.read_csv("./data/worm_yvars.csv",delimiter=',',dtype=None, header=None) ppm = np.loadtxt("./data/ppm.csv",delimiter=',') # Use pandas Categorical type to generate the dummy enconding of the Y vector (0 and 1) Y1 = pds.Categorical(Y.iloc[:, 0]).codes Y2 = pds.Categorical(Y.iloc[:, 1]).codes**Note**: To apply the analyses exemplified in this notebook to any other dataset, just modify the cell above to import the data matrices and vectors X and Y from any other source file.The expected data types and formatting for **X** and **Y** are: **X**: Any data matrix with n rows (observations/samples) and p columns (variables/features). The matrix should be provided as a [numpy.ndarray](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html) object, with 2 dimensions, and with shape = (n, p). We recommend using the *numpy* function [numpy.genfromtxt](https://numpy.org/devdocs/reference/generated/numpy.genfromtxt.html) or the *pandas* [pandas.read_csv](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html) function to read the data from a text file. When using the *pandas.read_csv* function, extract the data matrix as a *numpy.ndarray* from the pandas.DataFrame object using the `.values` attribute. ```X_DataFrame = pds.read_csv("./data/X_spectra.csv")X = X_DataFrame.values``` **Y** vectors: Each **Y** vector should be a 1-dimensional [numpy.ndarray](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html) object, with a number and ordering of elements matching the rows in **X**. For continuous variables, any regular *numpy.ndarray* with a data type of `int` (integers only) or `float` can be used. ``` Y_continuous = numpy.ndarray([23.4, 24, 0.3, -1.23], dtype='float') ```To encode binary class labels, a *numpy.ndarray* of dtype `int`, with 0 and 1 as labels (e.g., 0 = Control, 1 = Case) must be used. The way in which classes are encoded will affect the model interpretation: the class labeled as 1 is used as the "positive/case" class by the *pyChemometrics* objects. In the example above, we used the *pandas* [Categorical](https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html) datatype to handle the conversion of the original numerical values (1, 2) to the required (0, 1) labels. After converting a column to a `Categorical` datatype, the `.codes` attribute returns a vector with the same length of the original Y, but where each value is replaced by their integer (`int`) code. The correspondence between code and category can be inspected with the `categories` attribute. The order of the labels in `.codes` is the same as the order of the `categories` attribute (i.e. 0 is the first element in `categories`, 1 the second and so on). ``` Y1 = pds.Categorical(Y.iloc[:, 1]) Y1.codes The numerical label Y1.categories Original text or numerical description of the category ``` [get_dummies](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.get_dummies.html) is another helpful function to perform dummy (0-1) encoding of variables. Plot all the spectra in the dataset.# Plot the spectra in the dataset plt.figure() plt.plot(ppm, X.T) plt.title("X matrix of spectra") plt.xlabel("$\delta$ppm") plt.gca().invert_xaxis() plt.ylabel("Intensity") plt.show()PLS-DA modeling 1) Model fitting basicsIn this section we will fit a PLS-DA model to classify *C.elegans* samples based on their genotype, and assess the metabolic differences between *sod-2* mutants and the parent wild-type (N2).As an example, we start by fitting a PLS-DA model with 2 components and with unit-variance (UV) scaling. The choice of components to use in the modeling will be addressed properly in the next section, the objective of this first section is to introduce the model syntax. Similar to PCA, we start by choosing a scaling method for the X data matrix. The choice of scaling method will influence the results and interpretation.# Select the scaling options: # Unit-Variance (UV) scaling: scaling_object_uv = ChemometricsScaler(scale_power=1) # Pareto scaling: scaling_object_par = ChemometricsScaler(scale_power=1/2) # Mean Centring: scaling_object_mc = ChemometricsScaler(scale_power=0)For this example we will use Unit-Variance scaling (UV scaling), and start by fitting a PLS-DA model with 2 components.# Create and fit PLS-DA model pls_da = ChemometricsPLSDA(n_components=2, x_scaler=scaling_object_uv) pls_da.fit(X, Y1)PLS models perform dimensionality reduction in a manner similar to PCA. The main difference (besides the criteria in which the components are found) is that as well as the projections for the X matrix ($T$ scores) we also have projections for the Y matrix ($U$ scores).Model visualization of PLS/PLS-DA models is typically performed by plotting the $T$ scores (X matrix scores). The score plot gives an overview of the relationships between samples, their similarities and dissimilatrities within the model space.**Warning**: PLS-DA models can easily overfit, and the degree of separation or clustering of samples from distinct classes or Y outcome in the score plot is not a reliable measure of model validity. We recommend focusing on model validation before exploring the relationships in the scores plot. See the next section.# Plot the scores pls_da.plot_scores(color=Y1, discrete=True, label_outliers=True, plot_title=None)The *plot_scores* methods from `ChemometricsPLS` and `ChemometricsPLSDA` objects share the same functionality as `ChemometricsPCA.plot_scores`. Score plot data points can be colored by levels of a continuous or discrete covariate by using the `color` argument, and setting the ```discrete``` argument to ```True``` or ```False```, accordingly). The index (row index of the data matrix **X**) of the outlying can be labeled with ```label_outliers=True``` and the plot title changed with the argument```plot_title```. The main directions associated with each component in the score plots can be interpreted in terms of the original X variables using the loading vector, just like in PCA. Each component has an associated loading vector $p$ and weight vector $w$.# Plot the weights and loadings. # w for weights, p for loadings, # ws for X rotations (rotated version of w) pls_da.plot_model_parameters(parameter='p', component=1)Besides the loading vectors, PLS models have another important set of parameters, the weight vectors. There is one weight vector ($w$) corresponding to the X matrix and another ($c$) to the Y variables.The weight vector ($w$) relates the original X variables with the Y outcome we are predicting. These vectors (and metrics based on them, such as VIP) are important to assess the relationship between X and Y and which X variables are more associated with Y. This will be discussed in more detail later in this tutorial.The larger the magnitude of the variable coefficient in the weight vector, the more "associated" that variable is with the response.# Plot the weights and loadings. # w for weights, p for loadings, # ws for X rotations (rotated version of w) pls_da.plot_model_parameters(parameter='w', component=1)2) Model Selection - Number of componentsSelection of the number of components for a PLS model follows a very similar logic to the PCA case.Since the goal is to predict the Y variable, the main criteria used are the $R^{2}Y$/$Q^{2}Y$ as opposed to $R^{2}X$/$Q^{2}X$.Ideally, we want to select enough components to predict as much of the variation in Y as possible using the data in X, while avoiding overfitting. We apply a similar criterion as the one used with PCA: choosing as the number of components after which the $Q^{2}Y$ value reaches a plateau (less than 5% increase compared to previous number of components).pls_da.scree_plot(X, Y1, total_comps=10)Just like in the case of PCA, the $Q^{2}Y$ and other validation metrics obtained during K-Fold cross validation is sensitive to row permutation of the X and Y matrices. Shuffling the rows and repeating the cross-validation steps multiple times is a more reliable way to select the number of components.**Note**: Model cross-validation, especially the *repeated_cv* call in the next cell requires fitting the model multiple times, and can take a few minutes.# Repeated cross_validation rep_cv = pls_da.repeated_cv(X, Y1, repeats=5, total_comps=10)Outlier detectionThe outlier detection measures available for PCA (Hotelling $T^{2}$ and DmodX) are also available for PLS/PLS-DA models. Outlier interpretation is also performed in the same way.pls_da.plot_scores(label_outliers=True) pls_da.outlier(X)The strongest outliers in this case are the 5 samples with more negative PLS component 2 scores. These are actually the same samples identified as outliers during the preliminary PCA analysis. We will remove them before proceeding.pca_outliers = np.array([36, 100, 106, 113, 117]) X = np.delete(X, pca_outliers, axis=0) Y1 = np.delete(Y1, pca_outliers, axis=0) Y2 = np.delete(Y2, pca_outliers, axis=0)We now re-check the optimal number of components after exclusion of outliers.pls_da.scree_plot(X, Y1, total_comps=10) # Repeated cross_validation rep_cv = pls_da.repeated_cv(X, Y1, repeats=5, total_comps=10)Following the recomendations from cross-validation and repeated cross validation we select 4 as the final number of components. Refit the modelRefit the model without outliers and use the number of components selected.# Refit the model with the selected number of components pls_da = ChemometricsPLSDA(n_components=4, x_scaler=scaling_object_uv) pls_da.fit(X, Y1) pls_da.plot_scores(color=Y1, discrete=True)Although we used the $Q^{2}Y$ metric to perform model selection, this metric is easier to interpret for regression problems, and it is not straightforward to assess the performance of a classifier model using $Q^{2}Y$ or $R^{2}Y$ and similar goodness of fit metrics. The performance in a classification task is more effectively described by confusion matrices and related metrics, such as accuracy/balanced accuracy, f1, ROC curves and their respective area under the curve.To obtain more reliable estimates we can calculate the cross-validation estimates of any of these metrics, including cross-validated ROC curves. This ROC curve was estimated using the left-out samples (the test sets) during cross-validation.# Cross-validated ROC curve pls_da.cross_validation(X, Y1) pls_da.plot_cv_ROC()Permutation TestingA final and very important method for model validation is the permutation randomization test. In a permutation randomisation test, the model will be refitted and assessed multiple times, but each time with the Y randomly permuted to destroy any relationship between X & Y. This allows us to assess what sort of model we can get when there really is no relationship between the two data matrices, and calculate the likelihood of obtaining a model with predictive performance as good as the non-permuted model by chance alone.During this test, the number of components, scaling, type of cross-validation employed, and any other modeling choice is kept constant. In each randomization, the model is refitted, and the AUC, $Q^{2}Y$ or any other validation metric is recorded. This enables the generation of permuted null distributions for any parameter, which can be used to obtain an empirical *p-value* for their significance.**Note** Running the permutation test with a large number of permutation randomizations (for example, 1000) is expected to take a considerable ammount of time (approximately 30 mins on a laptop).permt = pls_da.permutation_test(X, Y1, 1000)Optional: Load pre-calculated results# np.save('permutations_plsda.npy', permt) permt = np.load('permutations_plsda.npy', allow_pickle=True) # plot the results from the permuation test pls_da.plot_permutation_test(permt, metric='AUC') plt.xlabel('AUC') plt.ylabel('Counts') print("Permutation p-value for the AUC: {0}".format(permt[1]['AUC'])) # plot the results from the permuation test pls_da.plot_permutation_test(permt, metric='Q2Y') plt.xlabel('Q2Y') plt.ylabel('Counts') print("Permutation p-value for the Q2Y: {0}".format(permt[1]['Q2Y']))The *p-value* obtained is < 0.05, so the model AUC and Q2Y values are significantly different from what is expected by chance alone at a level of $\alpha$ = 0.05. 3) Model interpretation and variable importanceThe main parameters to assess in terms of variable importance for the prediction of Y from X are the weights ($w$), the VIP metric and regression coefficients.The values in a weight vector vary between -1 (strong negative-covariance) and 1 (strong covariance), with 0 meaning no association/covariance. The weight vector of the first component (which explains the most variation in Y) is the primary weight vector to analyze when interpreting the main variables of X associed with Y.The variable importance for prediction (VIP) metric is a sum (weighted by the ammount of variance of Y explained by each respective component) of the squared weight values. It provides a summary of the importance of a variable accounting for all weight vectors. VIPs are bounded between 0 (no effect) and infinity. Because it is calculated from the weights $w$, for PLS models with a single component these are directly proportional to the $w^{2}$. The VIP metric has the disadvantage of pooling together $w$ vectors from components which contribute a very small magnitude to the model's $R^{2}Y$.The regression coefficients ($\beta$) have a similar interpretation as regression coefficients in a multivariate/multiple linear regression.pls_da.plot_model_parameters('w', component=1, sigma=2, cross_val=True, xaxis=ppm) plt.gca().invert_xaxis() plt.gca().set_xlabel('ppm') pls_da.plot_model_parameters('VIP', sigma=2, cross_val=True, xaxis=ppm) plt.gca().invert_xaxis() plt.gca().set_xlabel('ppm') pls_da.plot_model_parameters('beta', sigma=2, cross_val=True, xaxis=ppm) plt.gca().invert_xaxis() plt.gca().set_xlabel('ppm')Unfortunately, assessment of variable importance in PLS-DA/PLS multivariate models is not straightfoward, given the multiple choice of parameters and their different interpretation, especially in models with more than 1 PLS component. To obtain a ranking of variables from the data matrix X associated with Y, we recommend starting with the weights $w$ of the first component, which contributes the most to $R^{2}Y$. However, it must be mentioned that the weights of the first PLS component are equal to the normalized (so that the weight vector has norm equal to 1) vector of the univariate covariances estimated between each X column or variable, and the Y vector. This implies there is no advantage in using a PLS model and $w$ when compared to a series of univariate analyses for variable ranking and selection.fig, ax = plt.subplots(1,2, figsize=(8, 5)) X_scaled = pls_da.x_scaler.transform(X) cov_x_y = np.dot(Y1.T - Y1.mean(), X_scaled) / (Y1.shape[0]-1) cov_x_y = cov_x_y/np.linalg.norm(cov_x_y) ax[0].plot(cov_x_y, 'orange') ax[1].plot(pls_da.weights_w[:, 0], 'green') ax[0].set_xlabel('Normalised $Cov(X_{i}, Y)$') ax[1].set_xlabel('$w$ for PLS component 1') fig.show()Another set of quantities which can be used to assess variable importance are the $\beta$ regression coefficients. However, as with other multivariate regression models, the final $\beta$ vector encodes information about the correlation structure of X and how it relates to Y, and the magnitude and sign of $\beta$ coefficient express how to derive a "good" prediction of Y using X. Taking the magnitude of each $\beta$ and using it to rank variables can be misleading.This does not mean necessarily that PLS should only be used as a predictive "black box" regressor/classifier and model interpretation avoided altogether. The strength of PLS for exploratory data analysis and interpretation resides on the latent variable projections. The scores $T$ or $U$ can be plotted and associated with other metadata variables, or even correlated or regressed against them, and the corresponding loading $p$ can be visualized to assess the signals which make up the latent variable signature. For example, if we inspect the scores plot for components 2 and 3 it becomes apparent that although we have not added information about the Age covariate to the model, the PLS component number 3 seems to be associated with it. This hints that this component is accounting for some of the variability related with Age to improve the prediction. The loadings of this component can then be used to visualize which regions of the spectrum are correlated. **Note**: We recommend refering to loadings $p$ and not weights $w$ when interpreting latent variable signatures, especially in PLS components after the 1st component.# Same model, but coloured by Age instead of Genotype pls_da.plot_scores(comps=[1, 2], color=Y2, discrete=True) pls_da.plot_model_parameters('p', component=2)Orthogonal PLS The orthogonal PLS modeling technique can be used to assist intepretation of PLS latent variables.After obtaining a reliable PLS model, we generate an Orthogonal PLS/PLS-DA model with the same number of components as the PLS model. In an orthogonal PLS model, the first component is called predictive, and the subsequent components "orthogonal" because they are uncorrelated to the response Y. Compared to the equivalent PLS model, Orthogonal PLS models shuffle away variation from the loading vector $p$ of the first component to subsequent components, which can aid in interpretation of the latent variables.# Generate an Orthogonal PLS-DA version of the PLS-DA model fitted orthogonal_pls_da = ChemometricsOrthogonalPLSDA(ncomps=5, xscaler=scaling_object_uv) orthogonal_pls_da.fit(X, Y1)The Orthogonal PLS model we just fitted has 1 predictive component and 4 orthogonal components. The predictive component encodes the information in X directly associated with Y. The orthogonal components can be investigated and associated with other known covariates, to assist in understanding the sources of variation that the PLS model/Orthogonal PLS model is "learning" from the data to improve the prediction of Y (measured by the $R^{2}Y$)In the following plot, we investigate the scores on the predictive ($T_{pred}$) and first orthogonal component ($T_{ortho[1]}$), coloured by genotype.orthogonal_pls_da.plot_scores(color=Y1, orthogonal_component=1, discrete=True)The analysis of orthogonal component 2 hints that age (Y2) contributes orthogonal variation to the data. Note: in the plot below, the data points are coloured by age (Y2).orthogonal_pls_da.plot_scores(color=Y2, orthogonal_component=2, discrete=True, label_outliers=False)The interpretation of the Orthogonal PLS score plorts model should be made using the predictive and orthogonal loading vectors ($p$) for all components. Only the weight vector $w$ for the predictive component should be evaluated.orthogonal_pls_da.plot_model_parameters('p_pred', orthogonal_component = 1, xaxis=ppm) plt.gca().invert_xaxis() # orthogonal_pls_da.plot_model_parameters('p_ortho', orthogonal_component = 2, xaxis=ppm) plt.gca().invert_xaxis()Permutation p-values for variable rankingThe permutation test we ran before is also useful to obtain permuted null distributions for most of the model parameters. These can be used to obtain empirical confidence intervals and potentially permutation *p-values* for hypothesis testing.To illustrate this, the next cells generate histograms for the permuted distribution of the $w$ and $p$ for the first PLS component and regression coefficients for 2 randomly selected variables.Notice the differences between the permuted null distributions of weights, loadings and regression coefficients.# Plot empirical null distributions for weights plt.figure() plt.hist(permt[0]['Weights_w'][:, 3000, 0], 100) plt.title("Permuted null distribution for weights (w), component 1, {0} $\delta$ppm".format(ppm[3000])) plt.show() plt.figure() plt.hist(permt[0]['Weights_w'][:, 10, 0], 100) plt.title("Permuted null distribution for weights (w), component 1, {0} $\delta$ppm".format(ppm[10])) plt.show() # Plot empirical null distributions for loadings # Notice how these are not unimodal and distributed around 0... plt.figure() plt.hist(permt[0]['Loadings_p'][:, 3000, 0], 100) plt.title("Permuted null distribution for loadings (p), component 1, {0} $\delta$ppm".format(ppm[3000])) plt.show() plt.figure() plt.hist(permt[0]['Loadings_p'][:, 10, 0], 100) plt.title("Permuted null distribution for loadings (p), component 1, {0} $\delta$ppm".format(ppm[10])) plt.show() # Plot empirical null distributions for regression coefficients plt.figure() plt.hist(permt[0]["Beta"][:, 3000], 100) plt.title(r"Permuted null distribution for $\beta$, {0} $\delta$ppm".format(ppm[3000])) plt.show() plt.figure() plt.hist(permt[0]['Beta'][:, 10], 100) plt.title(r"Permuted null distribution for $\beta$, {0} $\delta$ppm".format(ppm[10])) plt.show()Both the regression coefficients and weights have a null distribution centered around 0. Conversely, for the loadings, the center of the distribution is shifted. Loadings encode information about the variance and covariance (with the latent variable score) of each variable, and their magnitude is harder to interpret in terms of importance for prediction. The permutation performed in this manner does not change the correlation between variables in X, and therefore is not adequate to obtain permuted null distributions of the loading parameters. We can now calculate empirical p-values for the regression coefficients...# Always set *nperms* equal to the number of permutations used before nperms = permt[0]['R2Y'].size perm_indx = abs(permt[0]['Beta'].squeeze()) >= abs(pls_da.beta_coeffs.squeeze()) counts = np.sum(perm_indx, axis=0) beta_pvals = (counts + 1) / (nperms + 1) perm_indx_W = abs(permt[0]['Weights_w'][:, :, 0].squeeze()) >= abs(pls_da.weights_w[:, 0].squeeze()) counts = np.sum(perm_indx_W, axis=0) w_pvals = (counts + 1) / (nperms + 1) plt.figure() plt.title(r"p-value distribution for the regression coefficients $\beta$ ") z = plt.hist(beta_pvals, bins=100, alpha=0.8) plt.axvline(x=0.05, ymin=0, ymax=max(z[0]), color='r', linestyle='--') plt.show() plt.figure() plt.title(r"p-value distribution for the weights corresponding to the first component") z = plt.hist(w_pvals, bins=100, alpha=0.8) plt.axvline(x=0.05, ymin=0, ymax=max(z[0]), color='r', linestyle='--') plt.show()... and use the permutation test to obtain a list of statistically significant variables.signif_bpls_idx = np.where(beta_pvals <= 0.05)[0] print("Number of significant values: {0}".format(len(signif_bpls_idx)))It is worth noting that a selection procedure of this kind is also a type of multiple testing, and it is recommended to apply false discovery rate or any other multiple testing correction to the *p-values* obtained in this manner. Also, formal inferential procedures to derive *p-values* and confidence intervals are not established for PLS models. Although *ad-hoc* solutions like a permutation test can be implemented as shown, some issues still remain - for example, the *p-value* distribution obtained for the regression coefficients is clearly non-uniform and care must be exercised when performing multiple testing correction or even interpreting the *p-values* obtained in this manner. The latent variable and dimensionality reduction provided by PLS/PLS-DA can be very usefull to visualize general trends in the data. However, interpreting which variables are important to the model and how they contribute for the explanation/separation between classes is not easy. We suggest complementing the inspection of multivariate model parameters with univariate analysis. Comparison between variables highlighted in a multivariate PLS-DA analysis with a univariate analysis.The following cells should be run after completing the analyses described in the **Univariate Analysis** Jupyter Notebook. Load the results of an equivalent univariate analysis of associations between metabolic signals and genotype.#load the results of the univariate testing procedure univ_gen = pds.read_csv('./data/UnivariateAnalysis_Genotype.csv') # Select significant peaks from univariate analysis signif = np.where(univ_gen['genotype_q-value'] < 0.05)[0]We then plot the overlap between the PLS-DA classifer for Genotype and the results of an univariate analysis against genotype.# p-values significant for association with genotype in both the PLS analysis and linear regression common_idx = np.array([x for x in signif_bpls_idx if x in signif]) # p-values significant only in PLS pls_idx = np.array([x for x in signif_bpls_idx if x not in signif]) # p-values significant only for linear regression reg_idx = np.array([x for x in signif if x not in signif_bpls_idx]) plt.figure() plt.plot(ppm, X.mean(axis=0)) #plt.scatter(ppm[signif], X.mean(axis=0)[signif], c='red', s=30) plt.scatter(ppm[reg_idx], X.mean(axis=0)[reg_idx], c='red', s=30) plt.scatter(ppm[pls_idx], X.mean(axis=0)[pls_idx], c='orange', s=30) plt.scatter(ppm[common_idx], X.mean(axis=0)[common_idx], c='green', s=30) plt.gca().invert_xaxis() plt.legend(['Mean Spectrum', 'Both', 'Linear regression only', 'PLS only']) plt.show()Initial Data Exploration & Cleaning my Columns About the data* Pulled from Kaggle.com* Dataset name on Kaggle: Indeed Dataset (Data Scientist, Analyst, and Engineer)* Age of dataset: 11/02/2018* The data was originally sourced from Indeed.* 43 Columns* 5715 Rows# Dependencies and Setup # import pandas as pd # import matplotlib.pyplot as plt # Read Indeed Dataset and store into Pandas data frame # indeed_df = pd.read_csv("./indeed_job_dataset.csv") # indeed_df # What are the column names # indeed_df.columnsRemoving the following columns* Link column* Date since posted* Description# Dropping unusable columns # indeed_df = indeed_df.drop(columns = ["Unnamed: 0", "Link", "Date_Since_Posted", "Description"]) # Checking DataFrame # indeed_df.columnsMy Columns for Cleaning Originally we seperated the columns evenly (or as evenly as possible), but some columns didn't need cleaning. I started with columns: 'IL', 'WA', 'MD', 'DC', 'NC', 'Other_states', 'Consulting and Business Services', 'Internet and Software', 'Banks and Financial Services', 'Health Care', 'Insurance', and 'Other_industries'. However, all of those columns were already cleaned. Thus, I took columns 'No_of_Reviews' and 'Company_Industry' from Jennifer and Lori. We decided it would be best if we each created a dataframe with our columns only. This is what I did before beginning the cleaninging process.# Seperating my rows to clean # suzy_col = indeed_df.loc[:, ['No_of_Reviews','Company_Industry', 'IL', 'WA', 'MD', 'DC', 'NC', # 'Other_states', 'Consulting and Business Services', # 'Internet and Software', 'Banks and Financial Services', 'Health Care', # 'Insurance', 'Other_industries']] # Checking DataFrame # suzy_col # What are the data types # suzy_col.dtypes # Filling company industry NaNs with Not Listed # Chose to fill with string because the column was already filled with strings # suzy_col['Company_Industry'] = suzy_col['Company_Industry'].fillna('Not Listed') # Filling No_of_Reviews NaNs with 0 # Chose to fill with 0 because the column contains floats # suzy_col['No_of_Reviews'] = suzy_col['No_of_Reviews'].fillna(0) # suzy_col.to_csv('./SuzyColumns.csv')Exploring my set of columns# Exploring usable data - 'No_of_Reviews' # suzy_col.loc[suzy_col['No_of_Reviews']!= 0, :]['No_of_Reviews'].count() # Exploring usable data - 'Company_Industry' # suzy_col.loc[suzy_col['Company_Industry']!= 'Not Listed', :]['Company_Industry'].count() # Exploring usable data - 'IL' # suzy_col['IL'].sum() # Exploring usable data - 'WA' # suzy_col['WA'].sum() # Exploring usable data # suzy_col['MD'].sum() # Exploring usable data # suzy_col['DC'].sum() # Exploring usable data # suzy_col['NC'].sum() # Exploring usable data # suzy_col['Other_states'].sum() # Exploring usable data # suzy_col['Consulting and Business Services'].sum() # Exploring usable data # suzy_col['Internet and Software'].sum() # Exploring usable data # suzy_col['Banks and Financial Services'].sum() # Exploring usable data # suzy_col['Health Care'].sum() # Exploring usable data # suzy_col['Insurance'].sum() # Exploring usable data # suzy_col['Other_industries'].sum() # Test # suzy_col.loc[suzy_col['No_of_Reviews']!= 0, 'No_of_Reviews'] # Looking for outliars to determine if I should filter # reviews = suzy_col.loc[suzy_col['No_of_Reviews']!= 0, 'No_of_Reviews'] # fig1, ax1 = plt.subplots() # ax1.set_title('Number of Reviews') # ax1.set_ylabel('Reviews') # fig1.set_size_inches(4, 10) # ax1.boxplot(reviews) # # Saving figure # # plt.savefig("./Number of Reviews.png") # # Showing image # plt.show()Data Exploration & Analysis on Clean DataFurther Exploring and cleaning "clean", merged dataset.# Dependencies and Setup import pandas as pd import matplotlib.pyplot as plt import numpy as np # Read Clean Indeed Dataset and store into Pandas data frame clean_indeed_df = pd.read_csv("./Indeed Data Set.csv") clean_indeed_df # Exploring Number of stars after filtering test = clean_indeed_df.loc[(clean_indeed_df['No_of_Reviews'] >= 100)] test['No_of_Stars'].value_counts() # Checking columns clean_indeed_df.columns # Filtering data to keep those above 500 reviews # Was going to use this, but decided not to. indeed_df_500 = clean_indeed_df.loc[(clean_indeed_df['No_of_Reviews'] >= 500)] indeed_df_500 reviews = indeed_df_500['No_of_Reviews'] fig1, ax1 = plt.subplots() ax1.set_title('Number of Reviews above 500') ax1.set_ylabel('Reviews') fig1.set_size_inches(4, 10) ax1.boxplot(reviews) # Saving figure plt.savefig("./Number of Reviews.png") # Showing image plt.show() # What is the distributions of rating print(indeed_df_500['No_of_Stars'].value_counts()) print(clean_indeed_df['No_of_Stars'].value_counts()) # After this decided to use clean_indeed_df instead of indeed_df_500 # Realized there are multiple rows for the same column clean_indeed_df['Company'].value_counts() # New Data Frame to just keep pertinent information for my analysis new_df_undup = clean_indeed_df.loc[:, ['Queried_Salary', 'Job_Type', 'Company', 'No_of_Reviews', 'No_of_Stars', 'Location', 'Company_Revenue', 'Company_Employees', 'Company_Industry']] new_df_undup # Removing those records that are missing No_of_Stars new_df_undup = new_df_undup.loc[new_df_undup['No_of_Stars'] != "Missing", :] new_df_undup # Removing duplicates across the columns new_df_undup = new_df_undup.drop_duplicates() # Checking data types new_df_undup.dtypes # converting No_of_Stars from object to int # new_df_undup.astype({'No_of_Stars': 'int32'}) new_df_undup["No_of_Stars"] = new_df_undup["No_of_Stars"].astype('float') new_df_undup # Checking data types again new_df_undup.dtypesCompany Ratings by Job Type# New Data Frame for job type new_df_job_type = new_df_undup.loc[:, ['Job_Type', 'Company', 'No_of_Reviews', 'No_of_Stars']] # Removing duplicates new_df_job_type = new_df_job_type.drop_duplicates() new_df_job_type # Checking the number of records falling under each job type new_df_job_type['Job_Type'].value_counts() # Grouping by Job Type groupby_job_type = new_df_job_type.groupby('Job_Type') # Checking the number of reviews in each group sum_reviews_by_job_type = groupby_job_type['No_of_Reviews'].sum() print(sum_reviews_by_job_type) # Creating division print('--------------------------------') # What is the average stars by job type mean_star_by_job_type = groupby_job_type['No_of_Stars'].mean() mean_star_by_job_type = mean_star_by_job_type.sort_values(ascending=True) print(mean_star_by_job_type) # Job Type Ratings - Barplot # An array that contains the star rating by job type, length of array and tick mark locations job_type = [value for value in mean_star_by_job_type] x_axis_job = np.arange(len(mean_star_by_job_type))# Determine number of bars needed tick_locations_job = [value for value in x_axis_job] # to tell the plot where to place tick marks # Bar Chart plt.bar(x_axis_job, job_type, color=['r', 'b', 'g'], alpha=1, align="center") plt.xticks(tick_locations_job, mean_star_by_job_type.index.values, rotation="vertical") plt.ylim([1,5]) plt.title("Job Type vs Company Rating") plt.ylabel("Company Star Rating") plt.tight_layout() # Saving figure plt.savefig("./Job Type vs Company Ratings.png") # Create a boxplot to compare means job_types = ['data_analyst', 'data_engineer', 'data_scientist'] rates = [] data = [] #Setting up plot fig1, ax1 = plt.subplots() ax1.set_title('Rating by Job Types') ax1.set_ylabel('Ratings') for job in job_types: job_df = new_df_job_type.loc[new_df_job_type['Job_Type'] == job] rates.append(job_df['No_of_Stars']) rat = rates data.append(rat) rates = [] # Extracting from list fro boxplot x1 = data[0] x2 = data[1] x3 = data[2] # Convert to array source: https://www.educative.io/edpresso/how-to-convert-a-list-to-an-array-in-python ax1.boxplot([np.array(x1[0]), np.array(x2[0]), np.array(x3[0])], labels = job_types) plt.tight_layout() # Saving figure plt.savefig("./Job Type vs Company Ratings(2).png") # Showing image plt.show() # Over all company rating mean mean = new_df_job_type['No_of_Stars'].mean() highest = new_df_job_type['No_of_Stars'].max() lowest = new_df_job_type['No_of_Stars'].min() print(lowest, mean , highest)1.299999952 3.784555307265497 5.0Company Rating by Company Location# New Data Frame for Company Location new_df_location = new_df_undup.loc[:, ['Company', 'No_of_Reviews', 'No_of_Stars', 'Location']] # Removing duplicates new_df_location = new_df_location.drop_duplicates() new_df_location # Grouping by Location groupby_location = new_df_location.groupby('Location') # Checking the number of reviews in each group sum_reviews_by_location = groupby_location['No_of_Reviews'].sum() sum_reviews_by_location = sum_reviews_by_location.drop('Missing') print(sum_reviews_by_location) # Creating division print('----------------------------------------------------------') # What is the average stars by job type mean_star_by_location = groupby_location['No_of_Stars'].mean() mean_star_by_location = mean_star_by_location.drop('Missing') mean_star_by_location = mean_star_by_location.sort_values(ascending=True) print(mean_star_by_location) # Location Ratings - Barplot # An array that contains the star rating by job type, length of array and tick mark locations location = [value for value in mean_star_by_location] x_axis_l = np.arange(len(mean_star_by_location))# Determine number of bars needed tick_locations_l = [value for value in x_axis_l] # to tell the plot where to place tick marks # Bar Chart fig, ax = plt.subplots(figsize=(12,5)) plt.bar(x_axis_l, location, color='g', alpha=0.5, align="center") plt.xticks(tick_locations_l, mean_star_by_location.index.values, rotation="vertical") plt.ylim([1,5]) plt.ylim([1,5]) plt.axhline(y=mean, color = 'r') plt.title("Job Location vs Company Rating") plt.ylabel("Company Star Rating") plt.tight_layout() # Saving figure plt.savefig("./Location vs Company Ratings.png")Company Rating by Job Salary# New Data Frame for job salary new_df_job_salary = new_df_undup.loc[:, ['Queried_Salary', 'Job_Type', 'Company', 'No_of_Reviews', 'No_of_Stars', 'Location']] # Removing duplicates new_df_job_salary = new_df_job_salary.drop_duplicates() new_df_job_salary # Grouping by Salary groupby_salary = new_df_job_salary.groupby('Queried_Salary') # Checking the number of reviews in each group sum_reviews_by_salary = groupby_salary['No_of_Reviews'].sum() sum_reviews_by_salary = sum_reviews_by_salary[['<80000', '80000-99999', '100000-119999', '120000-139999', '140000-159999', '>160000']] print(sum_reviews_by_salary) # Creating division print('----------------------------------------------------------') # What is the average stars by job type mean_star_by_salary = groupby_salary['No_of_Stars'].mean() mean_star_by_salary = mean_star_by_salary[['<80000', '80000-99999', '100000-119999', '120000-139999', '140000-159999', '>160000']] mean_star_by_salary = mean_star_by_salary.sort_values(ascending=True) print(mean_star_by_salary) # Salary Ratings - Barplot # An array that contains the star rating by job type, length of array and tick mark locations salary = [value for value in mean_star_by_salary] x_axis_sal = np.arange(len(mean_star_by_salary))# Determine number of bars needed tick_loc_sal = [value for value in x_axis_sal] # to tell the plot where to place tick marks # Bar Chart fig, ax = plt.subplots(figsize=(5,5)) plt.bar(x_axis_sal, salary, color='g', alpha=0.5, align="center") plt.xticks(tick_loc_sal, mean_star_by_salary.index.values, rotation="vertical") plt.ylim([1,5]) plt.ylim([1,5]) plt.axhline(y=mean, color = 'r') plt.title("Position Salary Range vs Company Rating") plt.ylabel("Company Star Rating") plt.tight_layout() # Saving figure plt.savefig("./Position Salary Range vs Company Ratings.png")Company Rating by Company Industry, Revenue, and of Employees# New Data Frame for Company Industry, Revenue, and # of Employees new_df_indrevemp = new_df_undup.loc[:, ['Company', 'No_of_Reviews', 'No_of_Stars', 'Company_Revenue', 'Company_Employees', 'Company_Industry']] # Removing duplicates new_df_indrevemp = new_df_indrevemp.drop_duplicates() new_df_indrevemp # Grouping by Company Industry groupby_industry = new_df_indrevemp.groupby('Company_Industry') # Checking the number of reviews in each group sum_reviews_by_industry = groupby_industry['No_of_Reviews'].sum() sum_reviews_by_industry = sum_reviews_by_industry.drop("Not Listed") print(sum_reviews_by_industry) # Creating division print('----------------------------------------------------------') # What is the average stars by job type mean_star_by_industry = groupby_industry['No_of_Stars'].mean() mean_star_by_industry = mean_star_by_industry.drop("Not Listed") mean_star_by_industry = mean_star_by_industry.sort_values(ascending=True) print(mean_star_by_industry) # Industry Ratings - Barplot # An array that contains the star rating by job type, length of array and tick mark locations industry = [value for value in mean_star_by_industry] x_axis_in = np.arange(len(mean_star_by_industry))# Determine number of bars needed tick_locations_in = [value for value in x_axis_in] # to tell the plot where to place tick marks # Bar Chart fig, ax = plt.subplots(figsize=(12,5)) plt.bar(x_axis_in, industry, color='g', alpha=0.5, align="center") plt.xticks(tick_locations_in, mean_star_by_industry.index.values, rotation="vertical") plt.ylim([1,5]) plt.ylim([1,5]) plt.axhline(y=mean, color = 'r') plt.title("Industry Type vs Company Rating") plt.ylabel("Company Star Rating") plt.tight_layout() # Saving figure plt.savefig("./Industry vs Company Ratings.png") # Grouping by Employees groupby_employee = new_df_indrevemp.groupby('Company_Employees') # Checking the number of reviews in each group sum_reviews_by_employee = groupby_employee['No_of_Reviews'].sum() sum_reviews_by_employee = sum_reviews_by_employee.drop('missing') sum_reviews_by_employee = sum_reviews_by_employee[['Less than 10,000', '10,000+']] print(sum_reviews_by_employee) # Creating division print('----------------------------------------------------------') # What is the average stars by job type mean_star_by_employee = groupby_employee['No_of_Stars'].mean() mean_star_by_employee = mean_star_by_employee.drop('missing') mean_star_by_employee = mean_star_by_employee[['Less than 10,000', '10,000+']] mean_star_by_employee = mean_star_by_employee.sort_values(ascending=True) print(mean_star_by_employee) # Employee Ratings - Barplot # An array that contains the star rating by job type, length of array and tick mark locations employee = [value for value in mean_star_by_employee] x_axis_emp = np.arange(len(mean_star_by_employee))# Determine number of bars needed tick_loc_emp = [value for value in x_axis_emp] # to tell the plot where to place tick marks # Bar Chart fig, ax = plt.subplots(figsize=(5,5)) plt.bar(x_axis_emp, employee, color='g', alpha=0.5, align="center") plt.xticks(tick_loc_emp, mean_star_by_employee.index.values, rotation="vertical") plt.ylim([1,5]) plt.ylim([1,5]) plt.axhline(y=mean, color = 'r') plt.title("Company Size vs Company Rating") plt.xlabel("Number of Company Employees (Range)") plt.ylabel("Company Star Rating") plt.tight_layout() # Saving figure plt.savefig("./Number of Company Employees vs Company Ratings.png") # Grouping by Revenue groupby_revenue = new_df_indrevemp.groupby('Company_Revenue') # Checking the number of reviews in each group sum_reviews_by_revenue = groupby_revenue['No_of_Reviews'].sum() sum_reviews_by_revenue = sum_reviews_by_revenue.drop("Missing") sum_reviews_by_revenue = sum_reviews_by_revenue[['Less than $1B (USD)', '$1B to $5B (USD)', '$5B to $10B (USD)', 'More than $10B (USD)']] print(sum_reviews_by_revenue) # Creating division print('----------------------------------------------------------') # What is the average stars by job type mean_star_by_revenue = groupby_revenue['No_of_Stars'].mean() mean_star_by_revenue = mean_star_by_revenue.drop("Missing") mean_star_by_revenue = mean_star_by_revenue[['Less than $1B (USD)', '$1B to $5B (USD)', '$5B to $10B (USD)', 'More than $10B (USD)']] mean_star_by_revenue = mean_star_by_revenue.sort_values(ascending=True) print(mean_star_by_revenue) # Revenue Ratings - Barplot # An array that contains the star rating by job type, length of array and tick mark locations revenue = [value for value in mean_star_by_revenue] x_axis_rev = np.arange(len(mean_star_by_revenue))# Determine number of bars needed tick_loc_rev = [value for value in x_axis_rev] # to tell the plot where to place tick marks # Bar Chart fig, ax = plt.subplots(figsize=(5,5)) plt.bar(x_axis_rev, revenue, color='g', alpha=0.5, align="center") plt.xticks(tick_loc_rev, mean_star_by_revenue.index.values, rotation="vertical") plt.ylim([1,5]) plt.ylim([1,5]) plt.axhline(y=mean, color = 'r') plt.title("Company Revenue vs Company Rating") plt.xlabel("Company Revenue") plt.ylabel("Company Star Rating") plt.tight_layout() # Saving figure plt.savefig("./Company Revenue vs Company Ratings.png")Create Benign vs X Files [not the show]This notebook exists to create multiple data files out of one large dataset. The dataset contains multiple samples with various classifications, such as 'Adware', 'Benign', 'Ransomware', 'Scareware', and 'SMSmalware'. The goal of the notebook is to generate data files that contain only 'Benign' and some other malware classification (Ex. `adware_vs_benign.csv` would only contain 'Adware' and 'Benign'). To start out, we need a file that was generated previously through either another notebook or a python script. The dataset I'm using in this notebook contains all of the features available and all of the classifications possible (`full_features_full_data.csv`). In this case, the file was generated using the `create full data file` notebook. Due to potentially kernel-killing memory constraints, you may have to use the [script version](https://github.com/rambasnet/DeepLearning-AndroidMalware/tree/master/data_scripts/create_benign_vs_x_files.py) of this file available in the `data_scripts` directory instead of using Jupyter Notebook.import pandas as pd print('Imports complete.') path = '../../malware_dataset/' df = pd.read_csv(path + 'full_features_full_data.csv') # Initialize the random_state for later use random_state=1 df.head()Dataset CompositionThe datasets that we are creating will be 50/50 balanced, meaning that the 'Benign' and 'Adware' samples, for example, will have a 1:1 ratio. However, since the lowest number of samples is in the 'SMSmalware' category, we will limit the amount of each category to this amount.# Define our target variable dep_var = 'Label' df[dep_var].value_counts() # The lowest amount of data is SMSMALWARE at 229,275 samples (or it's close enough) low_count = 229275Creating the FilesTo begin, we need to isolate all of the instances that are of each given classification and take the random sample we need. We will use these variables to then create the csv files later. A consequence of the work here is that all of the data files we create will have the same 'Benign' samples. All of these experiments down the line are against exactly the same instances of the 'Benign' class. Additionally, since we've given a `random_state`, as long as we don't change this value, every execution of the notebook will yield the same data sets.# Pull out all of the samples into separate dataframes # Benign data benign_data = ( df.loc[ df[dep_var] == 'BENIGN']).sample(low_count, random_state=random_state) print(benign_data[dep_var].value_counts()) # Adware data adware_data = ( df.loc[ df[dep_var] == 'ADWARE']).sample(low_count, random_state=random_state) print(adware_data[dep_var].value_counts()) # Scareware data scareware_data = ( df.loc[ df[dep_var] == 'SCAREWARE']).sample(low_count, random_state=random_state) print(scareware_data[dep_var].value_counts()) # Ransomware data ransomware_data = ( df.loc[ df[dep_var] == 'RANSOMWARE']).sample(low_count, random_state=random_state) print(ransomware_data[dep_var].value_counts()) # SMSMalware data smsmalware_data = ( df.loc[ df[dep_var] == 'SMSMALWARE']).sample(low_count, random_state=random_state) print(smsmalware_data[dep_var].value_counts())Now, we actually will create the files. Every file starts with the 'Benign' class and we then append the malware data on the end. We have to be careful here or, otherwise, we would append the headers or overwrite the files we just made.# Write the data out to files print('Creating Adware vs Benign file...', end='') filen = 'adware_vs_benign.csv' benign_data.to_csv(path + filen) adware_data.to_csv(path + filen, mode='a', header=False) print('done') print('Creating Ransomware vs Benign file...', end='') filen = 'ransomware_vs_benign.csv' benign_data.to_csv(path + filen) ransomware_data.to_csv(path + filen, mode='a', header=False) print('done') print('Creating Scareware vs Benign file...', end='') filen = 'scareware_vs_benign.csv' benign_data.to_csv(path + filen) scareware_data.to_csv(path + filen, mode='a', header=False) print('done') print('Creating SMSmalware vs Benign file...', end='') filen = 'smsmalware_vs_benign.csv' benign_data.to_csv(path + filen) smsmalware_data.to_csv(path + filen, mode='a', header=False) print('done') print('All datasets created! Exiting...')25 - Advanced Exercises* Decorators Function review exercises 🎎🎎🎎1.assign **```myfuction```** to a new variable, then access the function from that variable and print it using inputs 2 and 3.# Write your own code in this cell def myfunction(a, b): return a + b = myfunction print()🎎🎎🎎2.Define a function named **```inner_function```** that is nested within another function named **```outer_function```**.try to execute the inner_function outside of the outer_function . What error do you encounter and why?# Write your own code in this cell🎎🎎🎎3.Add a line of code to the functions below to make **```factorial_10()```** callable.# Write your own code in this cell import math def outer_function(n): description = f'Calculating the factorial of {n}.' def inner_function(): print(description) print(f"Result = {math.factorial(n)}") factorial_10 = outer_function(10) factorial_10()🎎🎎🎎4.Complete the following functions to allow them to call **```myreminder```** function with **```do```** function as an argument.# Write your own code in this cell def myreminder( ): print('Remember to bring your glasses!') def do(): print('I\'m going to read a book.') myreminder(do)Decorator exercises 🎎🎎5.Create a decorator called **```retry```** that repeats the functions a given number of times and at a particular interval(In seconds).Take a keyword argument for the number of iterations **"repeat"** and a keyword argument for the distance between iterations **"interval"**. (Using the **```time.sleep(n)```** method, you can delay n seconds until the next line is executed)# Write your own code in this cell import math import time def retry(func): @retry def factorial_calculation(n): r = math.factorial(n) print(r) factorial_calculation(10, repeat=3, interval=1)🎎6.Write a decorator named **```log```** that adds the function name , function execution time and date and execution result to a file in the **"./Files/decorators/log.txt"** path. **```the sample contents of a log file:```**```The factorial_calculation function was executed at 11/15/2021, 19:37:30 with input 6 and output 720.The factorial_calculation function was executed at 11/15/2021, 19:37:34 with input 7 and output 5040.The factorial_calculation function was executed at 11/15/2021, 19:37:37 with input 8 and output 40320.The factorial_calculation function was executed at 11/15/2021, 19:37:41 with input 13 and output 6227020800.The factorial_calculation function was executed at 11/15/2021, 19:37:46 with input 24 and output 620448401733239439360000```# Write your own code in this cell import math import time from pathlib import Path def log(func): def wraper(*args, **kwargs): mytuple = time.localtime() time_string = time.strftime("%m/%d/%Y, %H:%M:%S", mytuple) @log def factorial_calculation(n): r = math.factorial(n) print(r) return r🎎🎎🎎7.Create a decorator that prints the execution time of a function in seconds. Use **time.time()** method to calculate how long it takes to execute a function. Record the time before and after the execution of the function, then subtract them to calculate execution time.Can you add the amount of ram used to this decorator?# Write your own code in this cell import time def timeit(func): @timeit def factorial_calculation(n): r = math.factorial(n) print(f"{n}! = {r}") factorial_calculation(1) factorial_calculation(10) factorial_calculation(20)🎎🎎8.Function **```get_coin_price```** takes as input the symbol of each cryptocurrency and displays the current price.Create a decorator that allows you to give function **```get_coin_price```** multiple inputs instead of just one.# Write your own code in this cell import urllib import json def multiple_inputs(func): @multiple_inputs def get_coin_price(coin): url = f"https://data.messari.io/api/v1/assets/{coin}/metrics" response=urllib.request.urlopen(url) if response.getcode() == 200: myjson = response.read() mydict = json.loads(myjson) price = mydict["data"]["market_data"]["price_usd"] coin_name = mydict["data"]['name'] print(f"{coin_name} price = {price:0.5f}") else: print("An error has occurred!") get_coin_price('btc') print("---------------------------") get_coin_price('btc', 'ada', 'xrp', 'link')Vocab generatorimport os from typing import List from typing import Tuple import logging from collections import defaultdict from collections import Counter import json import torch import numpy as np import sys sys.path.append(os.path.join(os.path.dirname("__file__"), '..', '..', 'dataset')) def isnotebook(): try: shell = get_ipython().__class__.__name__ if shell == 'ZMQInteractiveShell': return True # Jupyter notebook or qtconsole elif shell == 'TerminalInteractiveShell': return False # Terminal running IPython else: return False # Other type (?) except NameError: return False # Probably standard Python interpreter if isnotebook(): device = torch.device("cpu") else: device = torch.device("cuda" if torch.cuda.is_available() else "cpu") FORMAT = "%(asctime)-15s %(message)s" logging.basicConfig(format=FORMAT, level=logging.INFO, datefmt="%Y-%m-%d %H:%M") logger = logging.getLogger(__name__) from ReaSCAN_dataset import * condition = "p1" path_to_data = f"../../../data-files-updated/ReaSCAN-compositional-{condition}/data-compositional-splits.txt" logger.info(f"Reading dataset from file: {path_to_data}...") data_json = json.load(open(path_to_data, "r")) train_dataset = ReaSCANDataset( data_json=data_json, save_directory=f"../../../data-files-updated/ReaSCAN-compositional-{condition}/", k=0, split="train", generate_vocabulary=True ) train_dataset.save_vocabularies( input_vocabulary_file="input_vocabulary.txt", target_vocabulary_file="target_vocabulary.txt" )**FÓRMULAS DE INTEGRACIÓN DE NEWTON-COTES**En análisis numérico las fórmulas de Newton-Cotes son un grupo de fórmulas de integración numérica de tipo interpolatorio, en las cuales se evalúa la función en puntos equidistantes, para así hallar un valor aproximado de la integral. Cuanto más intervalos se divida la función más preciso será el resultado[1]la estrategia de reemplazar una función complicada o datos tabulados por unpolinomio de aproximación que es fácil de integrar [2] **Tipos de integración** ![trapecio-tipos.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA28AAAMcCAIAAABIGwrEAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAAFiUAABYlAUlSJPAAAP+lSURBVHhe7N0FeFvZmTfwnWm3uNvut93dLrYDmQy306EOdqZDQTMzMzMzO2aIKWZmZqYkDtsOmBmTOAbZsp3YyXeurqzIsnSukiiZRHn/z//ZdaxrkSd5fj1X97z/cO+RUzZzW/r06oEOGhQKhUKhUChUOBo7snF94y5Te9gIQJMQCAQCgUAgkOc2oEkIBAKBQCAQyMMHNAmBQCAQCAQCefiAJiEQCAQCgUAgDx/QJAQCgUAgEAjk4QOahEAgEAgEAoE8fECTEAgEAoFAIJCHD2gSAoFAIBAIBPLwAU1CIBAIBAKBQB4+oEkIBAKBQCAQyMMHCAQCefiAJiEQCAQCgUAgDx/QJAQCgUAgEAjk4QOahEAgEAgEAoE8fECTEAgEAoFAIJCH////UDE/Fbz0AAAAAElFTkSuQmCC) LA REGLA DEL **TRAPECIO**[texto del enlace](https://www.youtube.com/watch?v=OIqpt-0CUNE)![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAqUAAACRCAYAAAD6rMGgAAAgAElEQVR4Aeydh1sU2b5o339y73vn3LkzcyY56jhmBEQFJRgxISCKOY8IBsygmBAUA+YAimJWMKCoIDnn2ORuUhOa1HT3el81OZlGZzye7fdhh6reYdXuqtW/Her/IP4JAoKAICAICAKCgCAgCAgCfzOB//M35y+yFwQEAUFAEBAEBAFBQBAQBBBSKhqBICAICAKCgCAgCAgCgsDfTkBI6d9+CEQBBAFBQBAQBAQBQUAQEASElIo2IAgIAoKAICAICAKCgCDwtxMQUvq3HwJRAEFAEBAEBAFBQBAQBAQBIaWiDQgCgoAgIAgIAoKAICAI/O0EhJT+7YdAFEAQEAQEAUFAEBAEBAFBQEipaAOCgCAgCAgCgoAgIAgIAn87ASGlf/shEAUQBAQBQUAQEAQEAUFAEBBSKtqAICAICAKCgCAgCAgCgsDfTuDfTkq1jQry8hU0NLX+7fD6FkBLoyIPmVxFk7rv1j/3TjOVeTIUqiY+ddLNlfnIFH+uzNomBfkyOarGjy2dFlVRKqm5CuqbNH8O1Rf/aS2q4jRSc+TUNQ5c1+bqEsqqG2hu1X26GqmbUWu0aD8iSV1DOSXyGhqbBy7zpyvoh6ako6lCRqG8joaWj6jch2b3gfu3VBfoy6b6nGXTNKIsK0GhVNGiGZiBrklFQ0sr79esdDSpGmhpbWXgFD8Qxt+5e0s1RUVyalXNn7c+uiaqCotQ1KjoOuQ6mioLKZLXoGr+czTVLWq0Gu3fSVLkLQh8FgJ/q5SqckLxc3fEfMxvDPllGOMtZjHfxhZ72wXYzZvNtPGjGf7bNPYGZyMviOfe0TVY/f4TQ21PkVBY+3lPKh+EW4si4S7H11oy+sdfWegTS0HNpzphNFLw8jyusw0Y8ss8vKPyqP5ESTfKXnFp61yMBg3CxjOCvI9IWKtI5MGx9Uwf8TNDbY4Rnafko4qnTuPCclNGG63lWkIRqj93zv6go/eX76xO59KqKYwxWs3V2ALq+9RVhyo9gG0zx2G16TrxRQ2fpK03yUK5dv4WYdkVNHzgQdJWRnNmtTlGc/fxJFWBuk+Z/3KK7RlqqUgOwtdpJoaDBmNzKJSMig+s3GcsemNhBP677Zg45Ffm7ntGquJzCH0D2Y+9WWcxnJ//+X/5x//7CWMbN24nlFLfLTtdQzExNw+y0sqefQ/iKO6ypX4I6GgsjubWwWVMt3fjQWwRf9Kj+snjL3yrqYio624snjCMoTP3EpxYRjc0n64g2krSn55j2xwjhv06m/1BSSg0WirTnnJ+8xxMBg9mrnswSfI/00abKY6+z62H4WTLVR93vv10NRYpCQKflMDfKqX6mjSFcXDmWAb/MJfjsQX0cLmWQu5uXY93SBaKJh20RHBk9jjG2n9pUtp2TFoiPVlgOIZF3aVUq0Qur6Gl5c+cAtUk+ixk0sgFeEd+OimVSq1OPIGjyWjsPlJK9TVvicJ7wXjG2R0j6mOllCZkr65z5W4kBVWNH3ii1VKrUFDb3Px5LjRth/cT/t9EwesArt59Q35lQ5+6NmTeZLvNTBw2nycsX0nzn7l+tZe6MfsOB/ec4Wlq6YcLaVU0Z9fNZe5SdwLjSql/vxDbJ+T17qTUsSdZZmaI3RcmpVLJ1SnnWGsxDpvPIqVaKsJOsXPbAc4EhhD+8gHnNs/FeNC/GGF3jNDMav13QluVQ+TTWxxZbsHYQZPZciuWogGlVEtVdgTPAg+x2nw0Q81cuB3zby6l+gORxuX10zGZs5eg95ZSHXUVFdQ0NvL+fXNqEs+tZbrxPPY/kqS0rf2qE86x3mo8C9yD/qSUSulpkL+6wIlLwcQV1PU5h7TlKP4XBP79CPz9UtqSyAk7Q4b+YsPJ+MKeUgqo4p8Sml1JjXQCbYnjuI0R4xy+UCmN98HBeCyLO6VUS9ljN7affEmOvOlPRLvUpJxazKRRtp9eSlN8WTZhDPZ/RkrVCZxcOBEj++N/Qko//sujLXvKgR2neJFRSuMXE8H7uPqoS0I5d9CLay+yqHxL1/6HpK5TRnFyvTOnnqQi/9BwV0M2j44f5PTtSAprWv5EG/6QEn/4vurks6w2N2bhFyilremX2GBphO3nkNLWHO6evU5YamFX70JzOlc2TMVguC3eLzKo6vxR00rutU1Yj5vG1rdKaTv/1hwCNs3AeOrmr0NKWzPw3ziTiXPfX0q15S/x2X+aRzF5XXzf2TzVpF36A2sTm55SmnoZpxkTsPskUgpo5Tzz3Mqhy8/IqPzYYVPvrIzYQRD4Swl80VKqVRRSWNPUFSl6byltQVlahrKpBZU8i5SsUuqa2sdEaRspz0siJjqJPEUDPQOYWuqL04kJiyAxvwJlfRNabQsN1RXIS0soU9TQ1KpB11xHpaKMkpIyFNUNtLaP32rpIaWtVEafZvWk35my4QqvkwupUqnpGAakqS8lIzacV+EJ5FY00tJ54eg6/tL42ezEeFJlZUQdc+hXStXVMlKi3xCTnE+5lP77SJnEIDuRhBQZZZE+LOlHSrWNFciSY4hJzEFR3/L2CORAUqqppzQjljcvw0nIKaehv0p2VVeKKVFTKkfZ2NwtKtFMVVEJVQ3NNCoLSYmNIb24hsb2aJ22KpZzq6cw2nQdF58nUlClQt3aDlPbREV+MnHRieSW1dN3KKSa2uJ04iKjSJFV0ajuexDUyjLKlI00qxTkpGZTUtPQ1nWtUVGWGUfEq4Hrpm2qoiAlltikXBT1zb2OjZraMjnKhuYeY/vU1QWkxUQQnZTXxr3X8WyuLqK0UkVTYw1FUtqpRSgb3zber5nUc6ux+eMUr7Or0NBMXZWCstISSkpKUXTwalGhrJBTWianqr6FNoTSGDgZqbExJGaXUtfcKx+JQVa8nkF8tgJVb8DadkYRCeQqqqhr1KLthljXVNnGJyGbsrqmHhx6NIuOF7omKgtSiYtJJKe0rsdY2w+RUm1TNYVpccQl5SCvazsurQ01VMpLKS1VoGxoQaNrpr66XM+pVF6NqqW1KxqlUSHPTiDyVTjxWXLqe9dbKq+ukcrcZBJT8iiJ8mVtHylVUyuXo1Q1oSrPJSO7mGqVul361SgL0/TtMjlXTl2zZuAfA80FZOdWUFPXvSemhejjjlgYLcAzJJ3KTuat5F53/iApvbFpBuPfW0rVKEvlVNc3oqosIC0+gcwi6XvVsxFrGhTkJEYSFh5HVlkdvYeQq2sUKKpVNNZXkJ+RTXFVffu4TA0NihwSI17zJjZL32a617qjmfR8lNpwPqmJKeQWR3N+Qz9Sqh//mUZCTCLZxdI5vq282uok/DdbY2S6nOP3osivrKe54xyhaUCRk0jU63DiMqXrS/eSfKCU6pqpKkwnISaBrG7ntrZ6aGmQ55AYGUl8VikVtU1oOi4iUtAm8gTrl2/n3IusPgGdHhykPIrSSYhtz6PHMWnLI2mAPHqkI14IAp+ZwBcspS2kXzzEpSgZ5VLXvfTvXVKqrSY96ARbpPE8Y5dzwH0ZMw1+4rsf5uETU4AiL4TzB/dz2NuHoy7zmWhgidOFaErqpI6ZRtL9d7L1wBWCXr7ilvtiZq6/SGKBnNLE62ybNoIhJs7cS5PTUJ1H5PWtWI8YhNHqaySXqPQXjR5SKs/l1cXNzB7+C6MtV7B1tzd3YsqoaVZT/OQgK5c6c/zaAx5e2MJ8c3sOPMigvCPMp60n8/Y+Njq54et/i2vHd7LCahS//Dy/K1KqlhPu68yqdW74Xr3Opf3LmTZxButPhJJfPdAgfi31GXfw2ODEvlP+3Pb3YfeyqYz98ZduY0qbKXxxgSMeBznm48lWmwkYW27gQkQhtQP9GO9HStVFIXiuXM5mL38e3L+I6zxLHPbdI03R2PcCqy0n+f4Jtswdz+9jlnMhpoA6rZrSaH/2LDBm2MhFuB/awtolc7EYNYifB09lz/10FE1SN/hlXK1HMWS4Bcudd+F9K4qS6iaaCkK5fMSDw17HObrFFjNDSzb4hlOgbK+EKoO7bn+wabcPly8cYKWlESYTLJg91w5n31fEh93Hd6sNE4casGz3XlbONGHINz8w7/Br0hOf4rV6BZs9r3L//iW221jh4H6HFHn72E9tNal3vNmz+zDnr93gzKbZmFksxetpNpWNCpIfnGTbPBOGj1nGucg8aiVpUCuIOLeVtev3cPLyNS4dWMVM05msOfaM7MomWspiCNhjx8Sho1i86yDb1i7DZvJohvwwhOk77pBa1g9X6TtTH4anrTlLD4aQUd4KOiXZj/az0GgwPwy35fC9ZH1EVqcqINRzCbOX7CUguph6VSGvrnhx6IAXJ45sw2GSMdNWn+Z1XrVeyluLX3Bs7Uo2H7nC3fuX2Gk7DYe9gSSWtg9HaMri9r7tHD5/lxehdziyfDYbT4eTpTekZopeXcH74EG8jx/B1c6MCRZrOB2aS3WPi2Xb1176v7noFX5eBzl09BhHt9pjbmzJ2pOh5Fa1idx7SalOSdp9H9x2H+KcfwBnNs/D3MKRI0HpFBamcHf3PMYPGc+Gq1EU1CspiL2F23wjfh+7jHPhufrj1FrykhMbVrP50CXu3LvMbvsZOOy5QXxxx9g+HarsR3htdsHt+GUC/U/itnomhr8MahtTKq8i+/l5dttNYqTBInbuWIHNpCH88MNMPJ6mUloWxZUd69i48xiX/S7iuWYWljNWcjQojfLGTrvsAtPvs1peHLRlqv0e7ieWdZto85mkVFdP7ovz7LA1YcTYBWxYuxCbaeMZ+dP3fPf9aOw87pMql9pnK2WvTuK8ZhOHzwdy//JuFs+0Z49/NEX1GmqzX3BplwNTho9j8dbtrJo3md+//YFZbkEkFxcTdtKZ9ZsOciHwPld3LWa2/S6uRRVSPwAWXUMuj4+56s+9lwL88XVbyxzDwQyZuaez+76lOJyA44c55OnNUddFTJ1gwSqvJ2RU1FMUeQN3OyOGDzfDYZ0rRwPCyFWoUJeG4eu8DhePc9y6dwW3xdYs2nmVyIL69h8u7y+lLcVvuOlzmENHpPwX6/Nf6fmYjHKpV6KZnPueuB/05eaTUO4dXY/dhuM8S1J0/kDS1bzg6OLZrNx3nyR5/yfolpIIbp7oymPaBAtWHAkivVy6TjST+8CLfQdPc/NxKPe9NmC/4RhPE+WdefTbxMSbgsBnIvDlSOl3wzCfv5DlK1eyeuUKltlaMW6wOTsfZraNJ5UAvEtKpX20SoJdLRk71IhVZ8LJTnnFg3thZBeFc2LVBk69yqZCGgqgLeLGejNGj1nC+dgi6mqf4269lCNPMymXtjcnculsEPmlSrTaKh5sNmeceZuU6h25KYS9VgaYrRlASmu0aCvusW3KGKZtvk96R/d9SyRe84wxXXGJhKJ6tOpUzjpOwnLdFRKL6tGhpfThDubP205gYgkq6YSrrSZ4x3QMhs5rl1I1OX7rmT7blVtJpTTonV1F8umlmI6YhNO1REr1H+zZarSlj9g9Zz47byRS0pYw1cE7sR4zrFNKG5POsuEPH0IzFPqJDdqimzhPGY3hIl+iZbX9n6j6SKmaaC9bJk1c0SmYaWeXYW6+jsv9TuyR6ljD053TMTJaynm9lEplbybScwEThxng4PmYzKomNIqH7Jw+jokrLxJbUIdOW8Ej12kYW7pwO7mkrfu+KYUL6504GZLW1l2tLeaWsxXjxi7i9Jt8ajRqsq+uZdqcbdxKKKZRp6U40IXpY8zY6B9HYb1Wfxxqn+xhzrjfMXY8QWhqMmEPHxKWUUTIkYWYTVrOuch8arVq0s+vxNJiLRejZdTp1Mhub8Nx3XGeppfrGaozL7F2yijMnQJJLG5Ep63h2Z5ZjDdy5KxeStXkXt+E9dytXI8tbDvmNJBybhUWoyex4XKMvkzNkd4smjACI9vDBKdX0qQpJ2inNePHL+dCVD51/Vyc1TE+LDG1YP3FKGQdO+gqCXGfx4Sxtni9zKZKH+RpJuGsB+efp1DU0EjqxU1sPv6E5FJp2ImWkttbmWVgwKLjr8mpbiL2hCPmpsvwfZNLjbaVjItrmGa1mnMRbZLd8Powi1Ye4H5CMdL3pTnFn6vBmcgqtDSlXGSz83GeJJXot2lL7rJ9hiFGdsd5ld02BrJHy21K5bKzCyeCEymREtOWcHfbTIwN7DkemkW1Bt4tpa0U3NvFsvXeBCXL9celNcuPjVPHMmXDNWILVFQ/3st8E0v+kKRUP9OumdeHFmBmurxdStXEnVqGldkSTr7KQqltJfPyemZOXYVvWI4+UqWVP+Oggz2ul96Qp/+1oUMZ4oH9+OHM6ei+19XywmMBk4YbsPjoY5ISw3ny6DVpBXFc2zoPW5dLROS1jxNUpem74o0nruZ8ePsPmB5w+r7Q1bzCy9GerWdfkVPTPYL3maRUKoJUpwO2mA4bx1Lvx6RXNdNSGcu5dVaMGWzFztvxlNTFcW6lFRaLj/Eioxpta6a+O33myhO8zJQmSOqoe3GIRZNGYmh3mKC4RCKeBPM6pZjyiDOssZrMEu8QMqq0tGb64TJzOqt9QslU9tPwtQpeHF7K4i3neJVdoz9vSQLn6TCJMTPapbQpg2uu2/C5F02hFBDQlvFojw2Txi3gyONUKtRVhBywY4rVWi6Fd3Tfq0k4t5YZkxfj/SyNKm0r2ddcmDt9BT4h6e2TUN9TSpsyuL7dFR9pHH17/kF7bTEdZ8PhoBQqaiM4sXIdHtfekC9tb8ngVkAwyZndJmmpM7nuPJtpjp68SK3s2xiaMwjYuR2fO5HIOvJws8Vs3HwOPkqmvDaSU6vWs98vnDzpQtKSwe2AxyRnlL69d6xvTuIdQeCTEPhypPRnaw4GR5EmK0CWl0NGzEMOOy7lSEj2h0kpLYR5zMJ4/HIuJxZ3zmxWvdzP3MkzsF+2FmcXF7a4OLPMfASDvv2dlZcSKS57wf6ZhpjM2sb1mFLq1a2UyxU0tbSg01bzcItFTyltDsHtY6RUW0Pa45sEx8pQNmrQ1sfhs8gEw4U+xOYr0aqTObN4ErO33Ca1tCP6pSa5+5jSxmiO2Zpgsc6PhOK2KK3UGrSFAWycPILR9ieJkbWdiLtaiZqU045MnrmVOyldYy/Vyd3HlNbx+sA8pk5fwPI1TmyROG1awrQRP/PjsKVcjC3sV3zoI6VaatKecisohvyqRjTaeuJ9lmBm4MDxiNwBVg9oJvzQPCaaLOsmpWqSTjpiNtaeY29y2j4n8VliiqGtJ29yqtD2I6Wq14ews5yJ7ZI1ncd6ueUoBn83jKVnopApqwjeMYPx5k4E6qUUNNlXWT9lPI5H35DbPghP/eYIdiYTWOYbSX7nDDwttenPuB0cTV5lAxqtiviTy5lisBDv8ByqG2I4sWg6646/JrvN9oAm5OmJZJcqadB3DTbzxnMBphOWtklpYywnF5liueYi0bKuSQvaokC2WI1hzAJvwnOqaUryZYXZOBYeCSNbH3FUk3JmORYGthx5ldWtm7bjqGtR3HNl1jgrXG4mUCxdlNr/NcWfZrnpOOa7Pya9XI2uIZqznv5EZpTRXB+Ol/10rOc5snaj9F1xYfOSqYyTJs84nCYiV4kyI4S7wdHkVDSg0alI9F2FpaE9ni8z9eVoCPNk4UQTZjldJExWQ0trOeXlDTQ1q3hz1IGZM+axZNXGtjbmvJQZY37lp98Wcbpd7jrKKT02vPHCcfosbBavYlP7d3f51LEM/X4YjifCyFFq3y2lzXGcXmbNOq8XZFR0iFoTisxkskuqUbVqqXnihk1vKT1sy2SzDinVUZv5gnvBUWSXq/T1Tjq7lqlGdhwOSaNC+oFyaR0zZ2/CLzK/cwyiOq33mFI10T6OWExaxImXmZ3fh5Z4X1ZbWLLGN4ycbu2t5M525o4bxYJDIaR3lr07oe7Pm8m+vh2nfX6E5db0EovPKKW08MZrERZmjpx62dGVrKPm5REcJ41k+vb7JBZXkf3yPk8iM5HXt6JrSObihmlMtPXgaXJb9E8dfZKVFqYs9gohvWswLLq6LF7df0xUhlw/0a4h6SJO00xYuO8JKYq+UqrO8MN51lw2nQ8jt2N5C3XPMaX1USdZNduaeQtXdrarFdMN+f2H31h49BnpFRX9SKmOuuxXPHwcQUZZHa26BlIuOTHLxJb9jxLbJzW9n5Q2RJ1izRxr5tqv6Mp/hpE+f/sjT0krjODUcivMrNZy6lk2Vc2tVFVUoGrsuC5I852KebDbBtOZLgRHFXRvCPrnjdGnWTu3LQ+n9u/OihlGDP/hN+wOPya1MALflVP1eZx8mkVVUytVlZWoGrrl0SdV8YYg8PkIfDlS2meik5aal7d5lC6numOW6PtESjuk1GQVV5OK2y8MGrIvLsfcxp0n6WU0tLbS2vmnQaPToUON7P525o7+ke9/GMfC3TdJKKtvGz/4KaW0/Vg2FkZy98JpTp/1ZtPUUYyxP0ZMvpJWSSzNRjN373OyKjq6Y3pOdKqUSfI5iomr/UnsJqWSHJ5yMGHE5C08SGuLBnU2HW0hNzZMwcDajReZFZ1rnaq7T3Qqz+TqKnPs9z4kpaShG6NWWjUadLouqelMV3rSR0o7KllE9N3LnDl1lmMbpzNulD3ef1pKUzi31BRDmyOEZ/cnpRpypSio7R4eJBYPcKybiTu+CNNRs3B/kk65GjQFAThbzWHbtQSK2tejapPSSaw8G42stlfdG4uJuX+Fs/q6zcRwlB1eYdmU5/iz0XIaf5yXPtP3YtlGpqeUVhcEstlqLBNXXCJG1tEFKHFN4uxSM0abOXM7sZjaxN5S2krauZVYjF3A4XYZ7HFcaIsAzzCYxrY7SZR0n+SkyeeG01RMpm/jXlIxitCT+ATGklWuRpN7Dadp9uy5HUehSt2jHWg0OjqbQWMxsQ+ucvb0GY45WWM82pYjoRltctxayON9dkz45Vt+HjuPHVfbJ0ppcgn4YwYOu24RVyCN/+32Xey3jWnIu+6Etf0ubsUUoFJ327+147v77kipJj8AlxkzWO8bTm6n8HWnpXsPKe1o1yXEPfLjnFTvTbMxGbOAQyFplLcWc3fbLExmuXI/vqSz27zvRKd2KTVdwil9xFVKV4skn3MMTVjhG0Zut7ajTrnAOsvRTNngT3TB25YH06FKu87hg1d4nirXR6G71xD+aikFTdEdds42xnLtZSLz2n5AN5XEE+x/Dl9fbzbPMcHIZl8vKZ3MkmMvyOyzRF0TpfHBXD97mrNeLtiMN8Bu32OS+yyvpKXk/m5sTKzZejO6a5WBHhOdiskN3MYCB1f8w3P1QYg+1wRdf5HSdqJNpSQEX+O81AZc5uujm/seJSLX/955HylVI7vliq2DK1df51DXb7tupTjEk5VmQ/jh+9HM2XSOVzlV+ih/53HVKgh2t2Oy5VqCw3M73257oqHg9g7sFm3jyqvsAfMoeX6UVZOH8uP3o5jjdJaX2b3y6JWqeCkIfE4CX7CUdlVbq25BLYnjR0upmvQzjkwyXopvXGHbGL6u5Ls9U1OZeIN99sYM/fYbfpvqRlBWBU2fVEpVpN/cjuMSN+4klKJqyebKSlOMHdqkVJ17lbWTRmLZvcufXlJaEojLlJGMtDlOtBRd7aiBOpNLKyYxZtpOnmQoaOl4X3rU5OG3xowxU7ZwP7Ws84LVQ0oVqVxYZsokxxM90+2eTn/P+5FSVUYgux2XsS8wjpL6ZnKurMHCyOEvkFI1GRdWYj7BkRMRefTXsydVQVsRwZnVU5lis53z959x59AG1rvfIKawtnOS1UBSqsq8zd6ly3G/EUNRXTO5fuuxMl6ol1JFutRVb4TdwRdkVQy0iEwvKS29g+vUMYycd5QwSbQ7GLdm4bfWnHFTt/EguZT6PpHSd0mpjqqgXcw2morLjZ6RUtBREbSHecaWOPkFc8bjBI+ScvVd4a2Zl1hvMYklR1+R3S1a1VEs6bEh8w7uy1fgdj2Kgtpmcq9tZLrJwi4plXZqrSL17iFWTPqNn74ZxNTtt0grjOfKWksmLzrKq6xude2eeI/nrWRdXs+0SYvxepnZbSZ5j53eGSltzbrKH1bjsd33hDRFf8fl/aS0IeseHqtWstfvDfm1zeQFODNrgn2blKplBLrMwHjKH1yLLmgfVgPvK6WKBzuZZziC+QdCSC/viOZCa851XKYbMsP5JnFFA6/ioS5+wQUff16kDLTs118vpdqyh+ydO5F5W28RV6wk++EB1q3cxdVXudQ053FrizWT7d5DShtyeHRgLWt2XeF1Tg3NeYG4Wk9iYb9SqqHg1nbmGZmz4eIb8jsWPe4hpUVkXnNhzmQHDj5OoRvuroY1gJQ25Dzi8LrV7Ln0khxls14u55va8WFS2kLO9c3MnbKQA/q1TLuy7fmsFWXGI46vsWT0D//LYDMX/CK7rW2sLeXRngWYTdtIcGR+z49KP0ICtjLPfCEe3Zam6rWT9EVFmRGEz1orxujzcOZKhKyzl7Hv/uIdQeDzEfjypVRbygMPLx5mlVDb9D5LQrV33/eIlGqpur8Vq5HDmOH+jJxKaRB52z9teRi3HyZTIgsjPK6cGmnUvKaahPNrMP/diDVXkiipq2rrvp8ijSltF7rGZ+yxfL8xpVNdusaUajIvsmriOBYfj0YmGZMmk8srJnVKqbbmKXunj2Wk5V6eZlW0i2VPKa1uTuHskomMMFiNf0JHNFgacxvNsQUTmOkcSEpn139H46nh2e4ZGA6zwi04k/Lmtvd7SGllBUGuUxk3bCruwRlUdEbWtFS8uUNwUgmVHZPOOpKVHntLqSaLq6vNGL/Qm4jcarRoyL60GgvDzymlztxOksaUaql6tJNZY4czffcj0hVdk74kEb0XlEhxuTSus4K42xfxu36Lew+fE50pR9VrQdB+pVSTjf86c0zsPdsFUkPOlXVYGbVJaWXlE9xmjWOU5U4epHSLVqtLiH6TgrxKioT2lNLalj/IR5QAACAASURBVDQurJzMaIMVXJLGpXZYaUsspxZPZtbGa8QXqmj5YCkFtdRNb2bBuvPdxpR2HLs6qZt+ElNmWbNw+z1S2ocO6Kof4zbbiNGWO7mf3PUDBl0lUfcfk5gfzvkNVky0P8zLzEr98c3x/4Pp4+3bpVRHfUI0iWWVSEMaNcpk/J1nMM5oOeFRsTzZMweT4ZbsuptEWWd70lEZ9YAnCQUo2gZJt5dSR/VjN+YZjWDq9jsk6ce4tm3SVUbx6HECBfIGWt6xJJROGcKB+eMZbeHKnYS2saz6VFpLiY1Moayilmp9970FG65EIdOLTCOvDnYbU6rOIcBpGqZ2BwhJK0eDRr/EUqeUamt5edAO0xHmuN5KaBv/Kl3y+ywJ1V+kFFozrrDRagxGS84QltM1flstdetbzWTjhQjyO7qiO45h+6NGEYHfSX+eJ5d0WzBfgyInhzJlbXvU9q+XUnXqRf6YMZ8tVyLITb7BtlntIikt3qnJ5ebmWZi9U0o15N3cypxJtuwPblv3U5N3A9eOtPpESqHu1VGWmI7GwjmAmML2rugeUlpKZchBHCaOwnLzdWKKurqrdVWxPH4aT25RIU+lMaWWa7kUltvW66bJ49a22Uy2dScoUa5vA/mBrswztf1AKW1F+VwaPzsKC2d/ojrKKA3PrYrlybM4sjMiiE0uRiENA9LUkH5zBwuMjHH0etY1tKE1h5tb5mDl4EFIUnmvVqFD+fwIjnoO/kQVdKtjdRxPn8WRlRFJXHIx8sr2PAJ3YmssDWN6StoAP0h7ZSJeCgKflMAXIKXSgvhjGfzjPHzieq1Tqq4k9twarFeeJEpWjaYlCq954xhr50NcQe8xkx1cmnnlPhPDcUu5kFBEXbt9aiufsnfGaH79aTzLDwUSkZZB8kt/3Da6cTO+GKX8Pgc8bpEsU+ojZdrqh7hazWf3nTTKGhsJ87DGeIQVex6lU9FQTcY9N2wNfmWUgy+xhW0XkJZoL+yMRmN/rF04G5/jPm0cE5ZeIK5AQXFROfUv9mMzbigT118nsbSO6vQANluOYKj1IcLTy2jSVBG6bx5Gvw5nnkcQGZVNaNWF3NlsxZhfJuN6N57iBjUlj3YyZ6wBC71fktc+o7w+4iiLZq3l9Ms8OiaZd1CRugeVoR7YGAxm5OwDPEqrpEmrpvD2FmaMHIT55jvEF6lQPHFj3thf+NXIkYMBYaRmJPHa3x0XtwA98/5iTLTE4GNnwrgFXm0SKk3msjFmuPE6rsUVU1udyU2XaYz51ZqDL9Mo7bU0UFsZm3i1fzYmhovxjZa1L2+iJv64A6ZjbPHuHFMazymHiYyde5jwbEmIGgndb43J+LZJQ/LiYuRFD9kzx5AhPxmxdP91XqdkkPz6Oh7O7gRE5aNUSxOLnFm28Qh+QVGkZcsollfplw3rfvvN5rBD2I43ZsnJSPI7Qq4tURyzM2GE8RquRhdSU51J4JYZGAyehcfzFEobqgk/spBJg39g5EwXTt4JIy7qCZfd93IhNAuFflxnk37s7kSjRZyKyEWpldaz3csCo3HYH3lGZmXbjPL6KB+WzV2Hz7NMKlt0qONPsczUAPsjr9tnsatJOLWEyaPmcehlJv3eyEj1Bu+F5iza/5h0RcdwkI5W0UzKmRVYjJjEputxFHXIoK6K0AO2TPj1Z4wd3LkWmkxG8mtuemxmv38EuWXh+DhMYqTRai5HyKipzuK2qzWGQ2ay70kixY0aKoO88AqMJrNCqouOmsd7sbHbzpt4GVUvDrLQeDC/jnNgn99LktKTCQ84wDZ3fyJyqvrcLUpXFcphOxN++8mQxXuv8jIpnZSwGxzc7I5/eDZVaonNaVZMHseCg89J7w+Eroao40uYMvQHRk534njgK2KjnnJ1vzsXQtIpbdDRHHUMR9MxWG4OILa4npqsx3g6TmT48AV4Pc+kqjGGU45mjDJawfmwPJTKbO7unIvx0JnsDYqnqFFD9Wsvlk4cxsjpu7kVV0aDRk3xwz3YGA7B7A8/wnPradU2E+m9mCnjF3IstFv0Vyvn+YGFmBlKk1BS2yZcoiL21Crs1x4lOKW8c0hAxxGUHrXVsZzf5sT2fZ6cueyHv5/0d4VzR7fgcvguCfkdgttKxuU/mDHWki2BsRR1/OjUlPD6rBvb9vmRklfVlXRrOlc3TMPQ0pnAmMKe3cZde7U/axtTam40j4PBqUgLPaCrIdJnpb7sQSnlqGJ9WW0+kvHLfHmZU40y+x5u840ZPm0nD6MLaNRqaYr0YcWUCTgceUZ651pWauJ8V2E1wpAVp0PJqVaSc28vdkbDmLXjPjGyRrTd1xrTZx3ByeVTGPX7NHZei6ZEpUFdEoyHrQm/T1jHpdAsastfcmyJGcN/MsB+50VCEtJICb+Fp+s+roSmU97SyJtjjlhOtMPraQrFpaVUVL/GZ5UlYw2XcvpFFtXKXB642TJx2HR26icTScueqUm+sJ4ZxnNxf5jQ3qUvjca5wB9TjZm/9yGJcg06ZRjHl07W52+3/TzP4tNIeXOLo9v3c+VFGnJ5CL4+NwlLKNN/J6SJWkcWL8b17EuyOy5sDeH4LJvNsr13SSjt/f2WFtsI58TyKYz4aSxdedzGa/t+Lj9PRS5/zpkTN3kdX6pvW7qaUDwdHXH1DSWr95ClPsdcvCEIfHoCf6uUNuS+4pr7QkwGfcc3//0dvxtJS/LY6G8zajt7KpNG/Mz3//gGi13PyMpMJOTcRqb9/iPfD5nBFp8g0qsaOsdG6tFoq8l85ssG82H8/M0QZjh5cTdBTr1+TKqWyqhzbJw6ou1WfP/1PwyZuIwTz/NQNuvQVj5g99w5LHbywu/ObS7v+4ONhx+QKlfpJws0Z9/CdcYYBn33E78bzmHbJV82z7Rg+qwV7AuMITUqiAtOUxn94/8ybPomTjxMpaq+lBC3uRj/OhijqUs59CCdCnksF1ZNYfh33zJkrCXLPK5zdtNUDAcPwsjuCM/yqmisiuea63yMf/mRISOMMJ+1io32phhPnMrizad4lllBg7qO9Nv7WDbdmhVbD3PCey8bV23G96m06HpHqK1Xg9EqSfDbzoJxg/hl0EhMTK1Zs24hUwwmMcN+M6eeZFJZryDm/CasR/3It//3v/nHPwdhutSbkOwqmvpJVluVyYuLzswc+TPfD5mOs/cDUirkxFxYi9Wwf/GvX8ZitXg/1303MXPcUIYY2OL5OKdnxFVdTtrTM2y0GMEv3/yGtfNJnqSXI4u7jdt8A4Z8+xvWLr6EZGUSfWM3NmMG8a9fp+J85jkyZQPlLzywMx7Kb2OtWLb/HqnyOspjLrJ55mgGSbddlI61iSPejzvYqMm7txtbw5/57r//m3/8V9vfP//5M+PtPXmaXY488wUX/pjGmB+/5TfLDRy5GUtJbTNaVCRf2sD04VLdxmC5aB/+p52ZZfgbQ8bacjgoC0VFJkGHljB5yHd881//l++HWuFyKZKiGjU6qa7PzuFkOZJB3wxllpMPwWkVqFrqyLx3kJUzrVm2+QDHvdxwWrOZk8FpKBo0aMtTuO9mh/Gv3zN8ujO+TzPIjLqJm40BQ78dzLQ/fHmeW93PMVKTeeUPbNef4FVmZa+JL9Kojuts3+rL89SyHtKhq4rj6pY5GP70Df/zX//N//5swpLDQWSUS3fbUpF8ZSMzR/ygZ2Dh4MbVUy7MGT+MIWNsOPggjYw7+1lis5A/Dl7k9q3LHHZ24oi0YHuttCxVFfFXtjJ/7M98p29jvzBx0WGC0srpv+nqqIq7iuscA379n//HP/7rnwwevwjPh+mUN2ioznqF37bZGA2SjtU6PG8nIK/t6g3p+BboVNk8Proci9/+xf/+t3RcLHA6F4ZM2b4+aHMej9zsmDjoX/wy2IDZTr6c3GzD7OkzWLrLj4iCYmKvOjN71I/86+fRWCzcy5VTm5k3YThDxszH414qcpWSlEB3HCcMZdCg4RhPmsmqNQuxHG/GNBsnjt8PI+zhebbOGsvg//0Vy1X7uR5ZiLK94jpVFkGeq5lnvYTN+7w55r6JDS7HeZjY85ahHXWiIZ0bm60Z98M/OttxR3v+xz+N2egXrV9JQFdfROKzS7jONeK3b39gwqKdnLofT7myERqiObl0MmMMV/EiKk9SOuqLEnh+aSvzDQfzr3+Nx3HnCR7ESWvidg0r6CyD/km7lI41wGrucra5H2Sf8zIc1xzkVmwx+mVUG1IJkNrUj98xeKQZi3Zf5NSW+UwZ/isGc/dw5U4AZ7ZIx/E7hk5eyX6/CAqq2+7u1pAaoD/+g779hTGm9uy9cJJt880YPWg08/feIrVE1bM40g+h1Dsc1Evnr4wcO4lZS9ewyHwi5hY2bDwqTbxSokgI0C87N+Sbtnb1q5EdHrcTKdOvTqKjKtyHVeYj+G3UFBx2BxBTUEJSgCs2Bj/xw0+jmGK3iwsntmI3eSRDRs3B7UYor55cZ/f88Qz7djCWKz24GVOELDWcgJ02TBjyHUOnrORAQAwlykaqE2+w1248QzvyN7TF41YCpdIKIMrnHFvtwKJV7py9EYjfkW1sPnCd8NzqziFGzXFn2ejowonHqe2raPTCgA5l0k3ceuWxPzCeEn0eLzi+ZhGLVrlxNiAQP89tbDlwjbCc6p7X1t7JiteCwGci8LdK6Weq0zuSbaGmNJ+8okpULd0WpdaqUbfqaG1SUpqfR2FFHS29V6JvqqYoNx95TbM+2tHYKC2G3zEQYIBsNSrKi+TUNHZbgF5Tj6KojBr9It1St3s1Rfkl+kW7u7xPg0qeT05uMVWNahrq6vUL93dt78hPTU1ZPnmFFdR1r0/H5n4eNfUKZNl5FFc1om6oo76ptXNR/87d1TWU5edRXKHqy6Fzp7c90VCvKEKulG5QIDFSU12UT2m1lN7bPvcx2zSoyovb8+r+eWmBehl5RRX6Bc67jpSK7Ie+nLvzmpi4BOIiXhEa8oSgO/6ccFrO3sBECrpNNOmeYttzDSqFlF8HG6lusj51U9fKKZCV6hej792U+qbZ8Y60qLqM/MJyapu7LdjesfljHlXJXHTZyuln/dzRSddAXX0TrR03HOiVvr4O+UVU1PVc5F+6zWEbcxXN7cdXWSyjtEq6SYEOXavUplppqimjIL+Q8lrpO9MnceSyfIrLey6E32uvbi8lNgXkF5X3Xci/217veqquVVAoK6WyTlokv/fezSiL85FJKyW0aGlubEItTcDq3E2qdwny6rZ6Su1aWVzQWe+O3TSqCgpz8yiSvj8N9aga1X3r37Fzf4/qWhQF+RTpb9jRG1x/H/hz76lrK6iobehcOP7DU+s2+/5FArmF+chKqmjo/WXXNFBRIqe642YJLTWUFpRQWdfUz7HoWQpNQyXSjQyk85X+jFJTSmFppX7h+q7j0/MzaBqoLMwjT+qlam6gvqEJdccC+J27qqkrL0RWpKC24yYrnds0NFSWoqiWzr8duWhoqChFXl3X/p50049CSitraerboDpTGvhJV/413W+EoZMmmGppba5FUZhPoULZ8wYf2kpeermw59RDEsu6hin1n0/3PNp/hEk7duZRh6JIRqG8Vx79JybeFQQ+G4H/QCn9bCxFwv8mBFQxPixz2ENgQlHnsj0dRa8Pv8md6ELKOiZHdGz4N39slgXhc/Qa4dlta6f+m1dHFP+LI9BNSjuXhPriCvkVFaiV0pCTHDx+Ux/V7HdY1VdUW1GV/xwCQkr/c461qKmegJbyB9uxNjRkxrojXH8eR3pODllJbwi67MWBkw9JKqn9KruumosjuXf9EdH5lQN0k4smIgh8LIEmwjwXMsXEnmMvMjrXXv3Y1MTn3kagmeKIQK5cf0pSUddqIW/7hNgmCPy7EBBS+u9ypEQ5Px0BTTkJd7xwsbXEZMQwRo6ewOylO/B9lExZfd+xiJ8u4y8gpeZmmls1vGvUyRdQUlGEfxcCuhqyQy+yY8FkDH83ZM66ffqJRBX1In73eQ6hlubmFjSazz+s4/OUX6QqCAxMQEjpwGzEFkFAEBAEBAFBQBAQBASBv4iAkNK/CLTIRhAQBAQBQUAQEAQEAUFgYAJCSgdmI7YIAoKAICAICAKCgCAgCPxFBISU/kWgRTaCgCAgCAgCgoAgIAgIAgMTEFI6MBuxRRAQBAQBQUAQEAQEAUHgLyIgpPQvAi2yEQQEAUFAEBAEBAFBQBAYmICQ0oHZiC2CgCAgCAgCgoAgIAgIAn8RASGlfxFokY0gIAgIAoKAICAICAKCwMAEhJQOzEZsEQQEAUFAEBAEBAFBQBD4iwgIKf2LQItsBAFBQBAQBAQBQUAQEAQGJvBVSqm8UUuhSiP+BIP/uDbQrNEN/G0XWwQBQUAQEAQEgS+YgJBSIW7/ceL2Nf9gEVL6BZ9tRdEEAUFAEBAE3kpASKmQUiGlX1EbEFL61vOd2CgICAKCgCDwBRMQUvoVCcnXHAEUdXu/4ShCSr/gs60omiAgCAgCgsBbCQgpFVIqIqVfURsQUvrW853YKAgIAoKAIPAFExBS+hUJiYgmvl808WvmJKT0Cz7biqIJAoKAICAIvJWAkFIhpSJS+hW1ASGlbz3fiY2CgCAgCAgCXzABIaVfkZB8zRFAUbf3iwILKf2Cz7aiaIKAICAICAJvJSCkVEipiJR+RW1ASOlbz3dioyAgCAgCgsAXTEBI6VckJCKa+H7RxK+Zk5DSL/hsK4omCAgCgoAg8FYCQkqFlIpI6VfUBoSUvvV8JzYKAoKAICAIfMEEhJR+RULyNUcARd3eLwospPQLPtuKogkCgoAgIAi8lYCQUiGlIlL6FbUBIaVvPd+JjYKAICAICAJfMAEhpV+RkIho4vtFE79mTkJKv+CzrSiaICAICAKCwFsJCCkVUvqXREplijIiErOJKVWRV99XHnOrmsivU79nWZrIUaqR1fVN57MLZ20tCemZhOVUkq3sp7zKBnJr1cg66ljXRHp+Ds+is4iXq8jv0d4+fT2ElL71fCc2CgKCgCAgCHzBBISU9pCEv0Fyvvr8m4gJPobTDm8Ob12I2dzdnIkqJqtD2lQqwoMuc9jvNWGF9cjeh0dlHrev+HHlVT4plf2I4fuk8RH75OW8wMN1D+4+O5hvboPz1Xhiyls7RTo7LQSfkze5nawgUxLm2hKCAw5jN34YQ2ce4EaCoqeUVuZz+6o/l1/mkfyJ6iGk9As+24qiCQKCgCAgCLyVgJDSj5CTzx6N+4rKJMu4ydoFzniGZBETcRN3zwDupVeQo5fSel767WXT8aeE5tVT8CH1ri3E//ABvIPTiK/4C8S0Np+zmxax9kQor7LjOet5HN/nuSRVtUlpVmIg21xPcy22pJtwayisfM2u+abMdn3As9zGvnWsLeSa50G8H6US9wnqIaT0rec7sVEQEAQEAUHgCyYgpPRDREjs2xkVfD8xr+X+oYVM33CRR+nKXkKmJuWFF47rThAQX9YuqR8WqZbl3GXTyn34vswnrfYdn60tIPDsJfyiCkhRvmPffo5z5ktvbK03cfRlDqmdUd62dGQlb3Bf6cT+BynEt0tqB5/ciJPYTZrF2itxRFV0RVU7tkuPspx7uKzex+kXee+uRz9l656WkNIv+GwriiYICAKCgCDwVgJCSt9xke9+wRfPP1Dm5CFsnTUZh8NhhBW39BTaygQOLbZmhU8Y4SW9tr33MakhcK8d87ZdIyirtpf09iqrMolDS1ex63YiMZX9y+HAx1fJzb3zMXM8yu2k8l5DDFQ8O7GCGWtPcSe5ote2Jl6eXcNki7UcfZlL+oBjYGu45b6Q+Vv9eJRR0yuNXvV4BxshpW8934mNgoAgIAgIAl8wASGl77jIDywqHyYL/1HpKDO4esyDDXZTGPHzbxjPWszi9V5cji/pFLP0Jx7MmriYXY8zSeo3ytlEfFgA+9wPsvvAEXZ6HWC19RzWn40horSju15Nyp0dTJ28Aven2aT0m077cfooKW3i1d3TbHdZhNnwQQwzms2CpRvZG5hCbFl7GeQv2TF3MgvcQ3hZ0Euu6wo588c0Jjl6c+1+AB7OCzEbZ4ip/UEux5W1jTvVtz81KXd3MW3KctweZ5L8tnq8o70KKf2Cz7aiaIKAICAICAJvJSCk9B0X+f8omfykLJp4esyRCWZr8ArLJ71Hl3cTDw7ZYjx5HcfDZGT02CZJZC0vrm5lwbJDXIopJauuhYiLf2BmYMO2Rxkk1nT9IMiN9sV+oimLTkTwpkMU+6vHR0lpWz5SHgtNzXE8Fcmbsp5R1tyX3sw3sWDZ2SgiFT23yYoesmn6JGav2o6T532CUsuIfLifuYYjsNoeREh+U2d0Nzf6DA6mZjgcDye8U7q76vm+7VBI6VvPd2KjICAICAKCwBdMQEhpfwIj3uvZ1f4xPOpKOLfREiO7I9xJ7tXl3b5tjLkL56OLye4hpWoSgvYzx8KR3Q8ySNKP/2zimc8yTOft4lJ0cY/xp7KMa6ycPIZpro8JzW/qLLdMnsW9u7c4df4KPtLf2UMsmmKFrfMB9vu2v3feD7/wQlLfOvNdTVzAFizGO7DzcUavqG7bNvOxVqzxj+8zLCDjsQfWRuMwcvAkIFmhL7esNIhN04wwW+1HcGZdp5TKMq6zynws07Y9IiSvqx7vK6Md+wkp/YLPtqJogoAgIAgIAm8lIKT0Y4RLfKZT/jpkqM9j5St2WJswY8sDnuc19ty/TsbpteaMstjG5ZgScrrxlMnD2GUzAfMN13iS1S5ttTn4rLZgmpMfjzvea/+MLCeQNVPGMvmPW4TkqDrzkcle471nK8uXrmCR9LdkAZNGjGLCNBtsHNvfW7qG7f6JxJT26nbvVp5ClYq7+20wmeXKRUmIe2xTE3XVicmjp7L+RiKxVd0jmw089HTAZMxMXG6lEF/dFkXNizvHYlNj5ux/zsvCrnxlubdYa2HA5A03eJpV31mPPlx75N89v7bnQkrfer4TGwUBQUAQEAS+YAJCSt9xkf9QKRD7t8lRXuJ5Fk2yZPnZGKLkPbu1C+vKueQylbHmzpyLKuoWKVWTeHs7VgZTWXstkdj2CUnZkSexnzCRhd4RvOnVtS3LvM7KyWOx3PyAF73lt/ux/djue2UG3issMF99gYf9rCCQdGs7lgZWrPbrFSlVpuC5dAoTF/twJ6WyffKSiqfHlzFx4hL2PcvqMQZWlnmDVeYGWDrf41lOg5DSL/ikKYomCAgCgoAg8HkICCntLi7i+UfLUE8ZV5N82xULE0f2hmT3M3GniWBPB8ab9h5v2sQ9j/kYmyzH83Ve2zjUynQuutpgMGwuO4IzSOo2nlTKMy9ekt+J2B99Q3hJxwSovhHEwo+UUlnxI5ymm2J7IJRXRV2RzY765oafwHaCOUtORxLRTb5laf6sNJ/MwqNhhLWXS5Z1l40zrbDdH0RoQUNn131bPS6w2GwidkfCeF38lnq8o42KSOnnOVGKVAUBQUAQEAQ+PwEhpe+4yHfIx2d/bL89ZdcC8mpyy0oJi0wgTFZLzoDLCfUjYJ+0Th9zK8wmHh6yxWTObq7Elfbq8m4rb0bIIWZPtGf7o3QSOkVTxS23ORgaOLD/RQ6pVWXc97+Op8tMDKx3cMLvCudfZBFf1jXmMuOhG9PNlrBbmsXfmU4/TD5SSnNfH2P+JBs2BSYRW91PuorX7J5vxvw9jwmVdZRLTfzNbViOm4fL/RTiazTI5Cn4bLTHfsc1HmbX9Lyzk0pDxiN3Zkxewq5H6T0mcn1ouxNS+vlPmiIHQUAQEAQEgc9DQEhpvwKnJjUhhGM7VzF7+lQmW9my8sBdnshqyet3/35k5QP2y059irfPTe6mVpCtl88WYl8HssPOmKG/zmX3s5x+oo1/Ls/3lp3KPG5d9uPKaxkpVe8ZwavN48RqC2a43ORp9gB3alKm4rViNsu8XxPWGYFUk/LsCPONfmPQ7yZY2GzlaEgGl/fMw9jIjAV77vJcVtttHU8V9w8sZJbTBe6nV3d7vx82HyWlLURc2oi59TZOvynoeaemzuPbROiZtcxc7cOtpPJ22VQT88iTRZbWLPU4z4mLp3F13sqOy+GElzT0U04V9w8twnrjee6mvqMenfn2U0eVBiGln+dEKVIVBAQBQUAQ+PwEhJS+5SKf89gdq9GD+M3Ol0eZyn5kon8xeG/hU2nISriOy9bT3Egs6za2Ukq3nttuszGeu5drCXJy31LOD8nvo/atLcTvkAfejzNIeOtM9XYe8idsnjkfp8vxRHe7N3zvvDMjfVm58SQ3e9zRSU12QR6vk2QklTfqmcsUcuKLVeTU9BybKst/hMvSrXg9ySL5I+7S1Ls8fV8rCdg9jzlbpJnyb1mcX5GA53oXDjzqdUenOhUpmdm8ziglraq5R3d997xksiC2LN/GUWl4grJnHbvv9z7PhZR+/pOmyEEQEAQEgb+TgLo8iefBL4nPraBJW0vu83PsXDgFoxGjmTR7I6cep6Fo1HUropb6gghuHHDFMyCczOru27rt1v60UfaaK7sWYTlmGCOlOR5eQaSWNaHTNSCLfs6rmEzK6jV9P/gJ3hFSOqDstfDKdznjh/7OvGPRRPaaYPM+gvCufWTFYexesgGP4AwS22dnd35GGcu+BROZ6nyLkFzVgELTuf+A9fjz4izlIcu+g9MKd868LiC938XdW8ipakZW10rm88MsWOHFNanrvsdyT73L0kB4oCe7LoUTVthzjOU766WUcdltO+4344iR9x3r+c7Pv4WXrKqB/Do1BeXh7Fm0Frc7ScT1un1o7/Rz0u6z18Ofu8nyd9S5FwOljCv7duB+I4boT1APIaWf4KwokhAEBAFB4AsloC55xWWfywRF5VOvbaXkuS8H3Q/gc+kmN87sZumUkQwduwSfp+nUSO6pa6AsLZx7R1Yx3cCExUeCSKnSDlg7bVUsdy5cJiAoluzcY2xkaAAAIABJREFUBO7vd8TccB57AmMobZXSqyHp3mUCHkeTV/PpxVRI6UByUlfA6TWmDB88h93Pc3vMlO4tJB/3WsVT78VYrT3Dw/SqPlHYvIRzLJxgwfJzcUT3WpT94/LrJUMD1XvA92u4uceGudsCeJzdN2qYG3mSBZar8XgWyrHtTuy+EU+04n26+1VEhgRy4lYkkcXvKd+Vedw868uJ4HQSyj+tkBYqEzi4aAZLjoYSeHYvaz1uE/yeUfLszHDOXXzAg/QKst5nDHBlHoHnfDkRlEa84tPUQ0jpgOdasUEQEAQEgX9vAg2Z3Nq/j9N3IpBJkdDmZB7dekJMShkt+uCnhtLgAziambLU+wlpeittq3Jr1g122Fiz8q1SqqMmN5v8sgrq2n2zNfM62xcsYsvFUHKb2iKsuro4rh70xj84iXL1p0UqpHQACZMV3mPdlJEMtnDjRpJigLGkamR1zeTXvO2vhfz61r6RzrIXbJ0xkQUHuo+p7BBHNdGX12M6YQUHHj/j9BEX5pgaYjBlCbvvZZL0Pl3oA9SrX6FVlvLoyjF2uB1hz4GDeLg5MXPmVo5HyEjrlCs1KbddsTRbwb6QnL6Srkjjircnu/Ye4PD9ZOLkHZN+Our09secyrboZL/l612XOhXZ1RL7t6f5Xmn1TltVy8s7p9i7fz87fIJ5lqsc4NgPkHeVNMxAjeytEeL2z+rr0fJJ6yGk9NOeIEVqgoAgIAh8GQQ0lDw6wPqtXtyLLUUKWtJUQXllHfXtsii91Zpxne3z57L22BPSP1hKe9dUR12ELztcvbgeXkBDZ69/C+nXdrBpjy9PUqsZOO7aO713vxZS2kdK2oQh/eEerEb+ykSXh4Tm99+1LMvwZ8Xk0fzyP9/w7T8H+vsXMzzeEF7UM2qY++II1oZTWHEpoe+4yzoFl5ytMJ73B86ux7nyRkZixmO2WBvw+zR3biQOJMkDiNIAddRLmyKV004OLN7/gOf59RToJylNZuxcD24mKXrMEs+NOo2tySQWn4wi4m239HxbfmLbJ1p2q/9jLaT03Sc9sYcgIAgIAv92BFpzub1zISt3+vEmv3nA4te/OcWGpS4cD0qhupstvl+ktHuyWupyHuPt7MLRgDcUqDqNVL9TY/RZNi5cx4G7ccg/YS++kNJ+JUnFvX1zGDvYiJX+ycRW/LnJJ30jdmri/J2YNNKK9YGpfccryp+xZfo4xhgvY9+TPFKk8aZ1VVxxsWSM2R+ciS4aYCZ4/6LSN//2/erk3NizgCnSbTCTytsmUykTOeRgxuzt0p2Yesq4LN2f5aajmbb9CS87lz/SIKtTvyNa/LZIstjWX6RdVttCQf2Htzshpd1PquK5ICAICAJfBwFt+ROOLJnDSo/7JCi62Wb36mkreXlsK7uOBRJd3ER3jfwgKdU1UBh5k6PrrTEZ/C++H2SFq18kRd0mT7Xm32GPvQ3rvR+TpuyeU/cCffhzIaX9Sakynv0LjBk6cjnHIgrIeJ+u2P7SGfA9NZEX1zJxxFQ23k0jvtf6lzmhnlgbjmbqnie8Kmy/RacymUMOEzGwOcrd1MpuEcwWwh5dZL+HO1t2uQ3wt49jT2Qk9ZBrNWkh/7+9+/CKIsv7P/6n7O6zv92dZ3dnJz7jjGMkKAoqgogBEUwYMWcMmDGhIIoKmHNAxSwmRJGg5JyaTJOhgW6abt6/0yQbEXFmlUH8eg6HprqouvWq4vrpW/fe2sskUxsWnokhqvU9RcIZ5liOZ8npGKLeGjmvyLiMq+UgrFZc43Hmm0d62trZ8/f/+St/+8v/yNdHMpi3cCkRqYrf3KoqofS3V4LyGyIgAiLQ2wUaM66yeeoEXLvsE9pE1auzHDh0gUfxpa19TN8c1W8Kpe2/pqc66QpbHc0ZMWMfwfHl7e/oCu6yZ+Z45my9weuij9dUKqH0HcFREXOcGcP68bPTEe6kdD1vpCIvEn/vPaxZt5EVbl19bWbfnSziOgQ8LXGX12I5YCxLLie81VKq5pGPC+bD5+P5LLu9T2f6y0M4mlkw7eBLXnZ4VruWyOd3OHbqJAf8u/o6zbmXhmmJjFreaoo5vdaWwWPWcjwqv3U6KhUPvF0wN5/H3tAskt4K44qUC80tpdZrb/P0fY/0fIdpl621su5vDp7vs5RQ2l5nygsREAER6DMCjdk32OY8CVfPO8SXdW4pbcgL4cLpa4TEF2LUoNl+/L8vlBp+vZ7Xx5bjNG8LNyPy27enKzSEUjtctlzjVaGE0naYd70ortf/F//Ra4k4vZTh/X7EbvdzwvK7HhWtKIjj7HE/PHbvZ2uXX94cfZJHQutz3NsCRWboASaZWjH3uGGUulFYrM7g4HxLhs/x425KRcsAqeocjq+0xXKmN5cTSj/OnKWVz9k80Zxhc45zN6WyeT/pMUGsmjSEnyfuITC+bSL4N10Csl4HMH34cKZ5hxP2vkd6StD8L66/N95t18pv+S6h9F01giwTAREQgc9boKnqKT7zJuO6PYjo4o6htFEZRdCFIJ4l5LcPRtLX1FBTr0bTemf994fSRpLPb2TFlqM8SqxsRzTcvt82zZEl3nL7vh2lqxf/VSitKeT48lH0/3oky28kEfP2/KEfK3AVh7BxwnAcPJ4QmqtpDzGKrEAWWQ7GfssTQnM05NZU8ihgGRNnbCfgZSGp75wj9HcEmbKnrLczYaizLzeTysnIDsP/dADLbIZgt/4iAcevERRfRLLRoztTbm3FZsRsthomq/9Y5fhYnrKd5mtIQmlXtYIsFwEREIHPWECXz50dLizYeJpnmfXt/UX15dFc8t6P77FL3A8J5XloKCEPAjly6BxPYhW0jU9qGZU/nvn77nScp7ShgKgbZ7l4J4rsCi0NFQXkF5VRrW5t/dSkc81zF36BL1C0bcww8D8qgJUzlrL7RrQMdOrusvq9oVShTOPK4eWM7vc1//jL94xa5MWpKCWpRsHst7RavX9dNY+PzMfG9ShBSW/6iCrSH+A+wxpb1wP4nDrHni1uLPW4zO3UCjLfup3+/u13E1Rryri20xmTH7/lFzNbHFaf4Eb8LdZNMMdkhAsbryQS22EOUBVBu5wYv/IUt1M+zdOt/qvjkVAqobS7ikHeFwEREIHPVkBPWchBVrt5cS08jwbDPPaqJK5umor513/lr3/685uvP3/FqJVneaEwDHZqoDwripu7Z2P9y3cMnbwWn+uvyauoa57KSV8UzD6XUVjN2E9wvJLsoO3MHDkA8/GuuO86wMF9BzjzIIbcquZJqFr1Gki54M7KLUdlSqgPuZ5+byjt8VCkfM2ehSvZ8+DtJzppSc/PJjQ+m5iierI/Zhg1Dm81KmKTUniWVkJqpWHKKi0pikISyjrvU5F9m9Uu6/B+mE7CJ3mkZzch2rjc8rq9Zf3ta1ZaSj+khpB1REAEROAzFGgs4NHBvQTcCCPTqNXyvzuSJjQ11ajq1Wibb/U3UlOURUpSKgplNZrGziPrm6oiOLlzH6fvxKKUyfO75/9sQmmtjozE62z2OM/NpJKP2xL6MYNbZTantm1gx9UYXn2kJw+9HaY+9c+KslJepReSWKL+sIntP6ZfD25LQmn39YOsIQIiIAKfq4C+IpabZ67y+LWCmo5dS3vmkBqVvDznx9lb4WRVf7wBTm2Fl9H3PRgYugpe6Smh+B+/xZ3UMtI/yVOK/otWyLJMrvgfxfd+ylu38/+LbfakuTKJUzvmYPHDP/n7n//M3742w2F7EI8VNZ0e7drV+fmclksobava5LsIiIAI9E2BJlU2r0IjSc4pax/I1CNH2lRH9stHPHudSlHtp0nEEkp7MiC9b19tj6d83zp/xHuf9JGenzjY1hRxZfcqlmwP4MS9UK5ePswCm4F8+9XPOB18QViHqbU+cVl66NxJKO2Rqll2IgIiIAIi8AkEJJT2UFj4nFrb+kpZs2IvsvPEc0IzVS1Ta9XqSI/0Z4bFz/zi5MvNxPL25X3lmCWUfoJaUjYpAiIgAiLQIwISSiWUdjlo5nMPaumJiYTmVnWcRqv8OZsnmTPEyYeghDIJpT1SzchOREAEREAERKB7AQmlEkr7bCh9Z6guvMfqcSOZsvU+TxXqPnfs0lLafaUna4iACIiACPROAQmlEkr7XDB7ZxhtPs9aEu54MHGSGwdDsknubYPKPsK1KKG0d1a0UioREAEREIHuBSSUfoQg0HUI6huDZ/rM8ZUl4rPUlfVnwoko7vrxsZ/z8Uoo7b7SkzVEQAREQAR6p4CEUgmlX0ZLaU0VwQHbWHf8Kc9y6/pcX9K2IC2htHdWtFIqERABERCB7gUklEoo/QJCqZrwm75sO/GUZ4o3I/Hbglxf+i6htPtKT9YQAREQARHonQISSiWU9vFQ2sDrB8fYfvwxT7ONJsyvLiAktoAkZd8a7CShtHdWtFIqERABERCB7gUklEoo7cOhVEvc00O4uq5jo6cfPsfPcMjwFeDHlmXL2RyYQKSysU8dv4TS7is9WUMEREAERKB3CkgolVDap0KZ8a341KhTzBv1C//+y5/56586fv3DdDUBEXmkq/rWYDQJpb2zopVSiYAIiIAIdC8goVRCaZ8NpcYB9Ut5LaG0+0pP1hABERABEeidAhJKJZRKKO1D14CE0t5Z0UqpREAEREAEuheQUNqHAsmX0hoox9l1lwMJpd1XerKGCIiACIhA7xSQUCqhVFpK+9A1IKG0d1a0UioREAEREIHuBSSU9qFAIi2IXbcgfik2Ekq7r/RkDREQAREQgd4pIKFUQqm0lPaha0BCae+saKVUIiACIiAC3QtIKO1DgeRLaQ2U4+y6RVhCafeVnqwhAiIgAiLQOwUklEoolZbSPnQNSCjtnRWtlEoEREAERKB7AQmlfSiQSAti1y2IX4qNhNLuKz1ZQwREQAREoHcKSCiVUCotpX3oGpBQ2jsrWimVCIiACIhA9wISSvtQIPlSWgPlOLtuEZZQ2n2lJ2uIgAiIgAj0TgEJpRJKpaW0D10DEkp7Z0UrpRIBERABEeheQEJpHwok0oLYdQvil2IjobT7Sk/WEAEREAER6J0CfTKU9k5qKZUIiIAIiIAIiIAIiEBXAhJKu5KR5SIgAiIgAiIgAiIgAj0mIKG0x6hlRyIgAiIgAiIgAiIgAl0JSCjtSkaWi4AIiIAIiIAIiIAI9JiAhNIeo5YdiYAIiIAIiIAIiIAIdCUgobQrGVkuAiIgAiIgAiIgAiLQYwISSnuMWnYkAiIgAiIgAiIgAiLQlYCE0q5kZLkIiIAIiIAIiIAIiECPCUgo7TFq2ZEIiIAIiIAIiIAIiEBXAhJKu5L5KMs1lGUpUNaq0X6U7X3qjeipzUskIVOJSq371Dvrge1rqVVmkZCcR01dw8fdn16LSplFYnwmSpWaD9LS1lKSlUBKbiW1DU0ftzyyNREQAREQARH4zAUklH6SE1iPIuQ46ycM5sdvHfCJyKZS/0l29HE3qk3i+NwRDDBdwsW4fOo+49ykr0zl8bFV2Pf/DtMFp4nLUX08K20R0dd3Mcv0R/pN2suztLJuQqmeqtTHnFxtx6DvhuB6IpKcms/hgvgtZHqKQv3Z5u7LvcQiavva4f0WCllXBERABETgdwlIKP1dbB/ySw3EHpqGxa9TP59QihpFyAXO3Agnt6Kezz1X6POusNraClffcLI+9qcC1RN2T7LAacs9koo/pBVWT96V1dhZzefwi8zP40PKh1zm7etoifefj7XZVPY+TKG0sf0NeSECIiACIiACHyQgofSDmH7PSloSjsxkxACnzyiU/p7j7K2/o6fy/hYmjpyJ59M0yj5ySFKH+zDDYgLrrsRSUP8BTcr6Sh5um8jomXt4klrK+4pTl3qbmyGZFFV8UKeA3nMCmjTU1WpobPzcP870HlIpiQiIgAh8SQK9J5Q2VJATH8HLqHSUqgbUpSVUNGipV1VQWlRIQZGSKnUj+iYN1WXFFBUUUFRcSV2jjjeRQE99SRbxUZHEZSqpa+juP3U9qvxkop6/JDa7lEqVGr3+zdbQ11OSFceryDiyimvpdnOAvl5Jekw0iYpiInymvyOU6qkvbStjMbVvbVRTnktBeR0adSU58VFEJRVQrW40OsaWy1NboSAhMoyo+GxKarXojIptWENbmUOusg61VkddcSqxsekU12habjNrK8lLekVkXDblddq3WkS1VBUWU1nfYHRLWkN5bgHldRrUlbkkREWRnF+FurHjTnWqQlJfveDZixgySupo+EOzST1hno5YTtnK3cRiPqQt88P/8LUkBsxjjM0yzkTloOrI8O7N1Ifh5TQSp823SCjSdDqfrWeV0ldncJvgzPYbHxh23703WSoCIiACIiACn53AHx9K9RXEntvAooWb8D0bxK2zu1jqZMfwn505/FpBYUEsl9aMZcA3w3ELSkZZV07WywusH9efH4Ys5lJCYUvfR42CRwF72L3Hh8P71uJkNhTbZSd5lV/TRatUPcnn3Vm3+yz3nj0jcMcsxi89SWxOVXNg0CgecWLPTjwPHMJr7RQshliz4kQk+dVdtHHpVaQG7mDFyu34XwjkwsFNzLcewLffTOFAW59SjYLHx/e2ltENJ3MTbJedIDKvmvrCCM65T8H8hwHM3rGf9a4uTLEcwA//+pHxW++QVqJuCTLaYl4cXY3rku34nb3IqZ3zsLWwY6nvU7IrNDQWxxDoMYuRP37H5HUH8HBbyQInW8y+/w8DHT25dvkIu9YsYNZkKwZ+8z2jVl8mvrAOvV5JXNAh3CaZ8fPA+Zx6lUuNXkthxDm2OprS79eZeHi6sdhlEmN+/ZZvfrBl260UStSGRKYl/+E+Fs5bjc+5IG6fXIfjmGnsCkpC2UUrok6ZwOMzu1g61Y6FB0NJeHkDrwUTmLDyJK9yq5oDsTrvJZeP+ODrtZmFzgvwup2Esq2ja0Mxr696s23jDvbudmfZglXsPnKOe69yqaxvBG0sR10smbT2CrH5dW9CoFbJ66u+7Nu7nwP7PTmyZzWOk9Zz9pWCqveEaH1NKg+OebPfcz9e+/axasJgLOYfITyr8k2oV+cTftkP34PebFngzMJ9N0ksqmt+Xxt3lHlW9rhdjCav7RiMqwu9kogzm3A2/Y5/fz0Y++lL8Q1OJSHiEed3LWG63XwOPo0hPMiLJZPGs+pYOIpKw4cuLcrXgRzx3IuXlxf7D+9h7ZTJbDgVSXZZMQl3/dnkMgHr6Xu4ctkfH/e5jB9mho3LPu4ll1BvfMyafCKvBODr48UW12ks9AwivrC2/fjU+RHcOHOKM6dPcWzHGtwO3CK2oJpqRQTXD61n/mQ7lh55ToZRs3RjeSIPzp3k1OmTBOxezeJVXgTFFKLq4s/ImERei4AIiIAIfFkCf3Ao1ZETuIrxE9ZzLbGodWBNAxH7HDDr58CBKAWVej3lQWsYM2BMSyhtDkFqHm22ZqjFktZQWk/cUVeW+4aSUdpAE3ryLi5jVP/BzA14Tf67BpWoHrPdfjb7HqZRYhgJrYnlVMBdsgsr0dfH4b9wGUdC0ik1vKfP4/JSKwYNcuH4qzw6b05P4e2NOEzeSGBcIXWG/+j1FdzbaMuQ/3NoDaX1xPktYoVvCGklLWXMv7ScMf2HMMc/itxqPZowTxxN+mE2/QCP0ivQ6JTccrNhqNlCzsXkU9ukJePcUmwnbmjZT3MLXS3xR+cwsv8IVl6IpbBWjzb1JAstfmHolP0Ep5Sj0evIubCUMf37MWbtZWKKatGhIsxzKhbDXVsDqKHMVTxwt8XEdA4nm0Op4Y9BQ/g+R4b3G8JM72DSKzTolLfYaDsEiwWnic5V0dQQgc/U4VjOO8GrnBr02kSOz7Fk7JJTLe938TelyzzL0tETWHfoALu8/Ni9YDJO688RnVtO4fNDLHJcyKHgFMpUKZx2tWbunoekKLWgL+LhjqlYjV/H1egC6vQqgreMZ9goZzwuv0ap0tKYYdj2WJadeEVOTWtTZm0Kgetns9jzJgnFdeh1OVxeMYZhU3by+D231LX5IRxasQzPa9EUqHRoX3oxfdhgpno+I72spTVeW/yCIwudWOzzgORSFamnF2PnspvgxGK0TToyDedt7BJORGRTbRwEjW30uVxdZYuN6xFeZla1hkEdWeeXM27SGny8duLjt5NFk6ey4XQkOVU1pF51Z57rXm7GFlGn15F7aRV2Zo7sfJBMiSH4aRM5MX8MY8fPY8vJF2SX16F85MFUs4FM9XxKWmv5G5Vh+C2ZxmLvuySWqEg7uxT72bu4H1+EtklPyUs/Vriswv9hKuUaLWlnlmBjMYsDz9Io1+spDtrIBLMJuN+Io1Bj8G6iOvESm6bPYNO5l+QaPsw1JnN60Vjsl50gIrumPewaE8hrERABERCBL1fgjw2lmki8HcyYsPE2acq2W5paEo/OwuIXx/ZQWnFzLWMGGodSTXMoNWkLpTUh7LIfhf3U+SxduQa3NWtY6zKGAV//L7/OPU1cvupNS1nbuVY9ZafdUIbZr+diVCEqbSMlxUrUDQ2oQnbiMMqOaXMXs3qNYXurmTumP9999TPzT8aQ//b92oZ4/GaOYOK66yQV1bfuq4F44z6l1c/YPXE09o7zjMpozUBDGWefJCZPhSbmEDPNBzPzYBSK5oE5WuIOz2DEQOeWfqn1kfhMNWf00nPE5Ne2H5M+9xIrrPozcNphohRVNOZeZIXVQCZuDia1pGUyKs2z3Uw2Gcb846/JbU7VOhTnlzJm4GR2h6TTkk00vNgzmWHD5hmFUi2xvjMZOWgah8KzWgboaOPwc7FgqJMXLzPL0eurSA6+yr2obCrqdehV0Rx2scB0ug8RWRVdho+ahztwGOuMywovnmQpW8I8UBt/giWjR+F65AWKqgZKQvYw09BS+CiNci3oDIHTyhSnPU9JN7TK6fO5usoak/Hu3E0y3KrXU3R9LXaW8zgUlklF84eEUkJ2T8POZT8PU0tbbucbwvO80Ux7zy11fUUYPrNsmelxm6Tilmu09pEHky2c2HE/CUNGpjaB04utsZ53mNDMShpKnrF/hj2uXsGkGj4k6YsJchvHmDkHeJ5ebtQtou1ibPmuL7/LFntL5hiFRajhyU5H7J2ns2LfQzKK2lou9ZSF7GWW7Wz2309u+WBFI0nHFmDrtJmg2AIM2VBfeJ0N40wYtegUkTmq5nPRmHKSJaMGMHbNNWIL1DTVJnJmqQ02cw8Rkl5BQ0koXrMm4rr/PiklDTRkBbJ+wniW+j0ns6o1hJekEpuQQ2mNQbuW557OWNmt4tLr3OYPl/rCYHY5WTFp3RWi21qq9QXcWGeHub07t+MKkVmxOp5/+UkEREAEvnSBPzSU6lJPMN+8P877I8lpTg6G0/HbQ2lN6knmj5yKx/1kiuoaaWx886XTNdHU2lDW8WRrUdzcyOSBX/PPfw9l+pYrxBSpmlu10k/NZ/TU7dxPKqTOaFuNjTp0TU3tYbBte/rciywfOZDJ25+QXto2I2nHgU5lqadwtXJix90kCrsoo/YdoTThyKzmfqkHwrMoU1xiheWvWCy6QKxRKEUbw5EZw+hv5catpGLq3hFKtc/3MsW0YyjNvbCMMQPs2R6cQkt2/dBQmkDA7BEMnbqflxnl7aGzPi+CoFN+HA04wCrbQQyadoCIzK5CqZpwLydGDBnJyjPRFLQ1P2tTObtoFCbj3AkKe879YxuYO2s1Rx6kUdZ6r1mbdAxXK1tWnX1NnqoJXe4V1ow1w87tGgmF9TQZBhVtnYDltF08TivF0Gu25qUXsyxsWHMhhvzalgtCl3WBFdYTWXc+hrzWZW3ntPm7vppw7xlYjl7M6Yic1hZyDdG+sxjt4M7NuEI0TVrSzy7FZsg43AOf8/z+CTbNcWGt731SS1tu3esrH+IxcSQzPIJbWno77OTND7Whe3Ea6cyOu4kUtyU2TSSHZozEfMQyTkfmUdPaTbqpJhyfmZaMX3Ge6NzWDyg6BZeW2+Cw5iyvcgwfxJqofrQDR4tJbGpvwYS6UE+chw1i6r4Q0srUpJ9bzrih49hw+RmhD06yZe5s1hy6S3JJHfqmCp7scmSkjSFw5r17mjDNa47MtsJ20QnCDS2gTVWEec3E0nQa3o/TKG+7Vd9gWM8SU4fdPEku7TKcvxGRVyIgAiIgAl+SwB8aSrVxvsw06YfdlsdkGoW539pSWpXgx2xzM+YdjW6+Df7hJ1BLWexlPKaZ8X9f/Z2fbLZzN62IaL/ZjDSfjd+rnK5vtRrtRJdxhsUjfsXa7SbJxa19P+kYSksSApg73Jy5R6LI6aLzYrehtOAqa6x+pb/jISKzjfoyalM5NX8Eg2w38SBFibrHQ2ktKVc3MWfOdgJf56PSpHNuoRXmM94TSrXx+LuMYIjtZu4lK9sHImmjfXEZ1p9hk5ezee9xroemoqx9azCWNo+HexewYNNp7jy4gfeiqbisO87znMqW1jd1GF6OFkxxv0WiYVCRvpR7m+wZZrOWa3EFNPcAQU2831xGWczDLzz7nf1J9cpbbBpngvWyi8TktfZLrQ3HZ9oI7Fa0LtNGc3TWCAaZTGb5xj2cuBZKqmFQnNEtenWYF9NHTmZTUNutbaOLp/2llhhfF8ZMWs+12PzWMoI2IYAFloOxc79pNECqibJ7W5lsZsvaKzHkt/bbVcf742o1gvm+L8hsbmlXE+E9gzEO7u0tp1BP1MFZjDKfzoGn6ZTXx+A/x5LBJpNYtmEPxwOfkVKselP+2hA8pwzHal4A4VnV7R9A2osNNKacYukYKxb4PifDsF/VUzynDsPM2YtnaW9ahrWJJ1g8ehjOux6QXKLt9OHOeJvyWgREQARE4MsT+ENDqb7wCitG/EK/CfsIy65oHZBkaCmd+Y7b96Nxu5GMsjlR1PHQ3Zqhw1v6lKpKb7Fu1K/0t/XgYXrZm9uC+hJeXL1DfFFF+3+FS4g+AAAMBElEQVTybadYX/OKsNclVKv0oKsg5vgiRv9syqIzMcRdcGPsr/2w2x5Mepmh/2fLP33Jc67djqOovC14tr5R9YCttoPob72N4LTWW8NvhdLy0ttssB5If9sdLeu0b7SUsGt3iSsspzq68+1745bSCk0C/i7D6T9kEeeb+5i27r/BcFt/OONXX21uKdT1cCjVpp1msaUJM71fkmVo8dalccbV8r2hVJd+hiVWpjjveWo0MEZP6c0NjBsykmXnYttbNFuP8s03w6CyG97sPXSOwLthJOZUUK99kwK18X7MtbRjzcUYcktV1NU9Z5+jBVbzjhOpaAlW6rRb7JhqwgCHPTxNe/cUTfUhu3A0M2X24XCyW0Ne6rnl2AwYyvyACBTVevSlt9hsa7g9fraL/rNaEvznYj1uFRde51CqqkPbYcaI1sPSpnF2kTX2K84QlVWFtrlF3tAXdRk2ZlPZ3WHuTy0R3tOwGjGPgJdZLR+c1Gnc3e7MsP6Tm9d905/UGge3y+2hWq98wHYHa6ZtCyKxWI2+7DZbx5syauFpohSdu7noi2/gPs6EscsvEp1rNGCs/WzoyQ9ci/3Imex7nNLcDUSXfYk1NkOxX3+TuILWv58mFS+9Z2E7eR2XXuXJ5PrtfvJCBERABESgTeAPDaXoc7iy1JL+//4Zx503iS+upaYwkmOuI/nl20mtfUpBHbqTiYP7Y7vlbvMt0cqkIHY4DObHn2cSYBh41FhG8GY7Bv/nG4bP2UtgWBKpcSFc2LoSj8vR7xwxry+7ye6dgcQrKpvDsL7iNhvGTmHL9SQK8x6wffwgvv+POfP2XCEsKYX4kPPsWLGdK9F5dBqAr68gxGMyJt/9wpTd90gpV6NvyOXamrEM+taKDUHR5NcqCd5iz9BvvmH47D1cfZFIatwzLmxbicel1+RVNdIQ5cMMs4FMPxjVGoK0vPZxZvgvU/AOz6JCr6fwjjsTBw9h+oEQsipbugqoXnox034xR0OyMCzSZZ1jmeUAJmy+T6qyZTIkzbNdTB46FJcjr1pbanVkn1vM6AHj2dZ++15NiIc9ZiYuBES1tRIbyjCdEYOcOdjepzSaw9OHMchhH2EZ5WheeuFs+hPDl5zndX41FSmXcbMZwE/2uwlNLESt17/Vwqan4OoqbMynsc/Qn7Xt9i6gDtuPs9kgJu0IJq3UUHYtJVE3uHwvloKKlj6duuwLrBg1lGHjl+F1JRxFhdpoSizDoK7l2FgtxD/kMbeDo8jKvcvOyeYMm36Il1mVaJQRXL90nE32Jji6XyDwXBDhGUpq2npetP511D7awWSToUw/8JzMigZKIwM55e6EyeCZeD+4S+DdcDIUD/B0Go7J+B3cTy5pbvHVlrzm5qX7xOSVo25UcHmFDdYLDvM0+BYPIzNRVnduJdTnB+JmM4bFh25yPSiMlMJK1I0FXHezZaTzbh6nGAdnDS/2TmWE6XQOPc+gQlNC1PUrnNwwkWGT3Tl3+Sy3XqZTEHuW1TbWLPELI8vQOt9YQLDHHFzWHuNZViVawwcjdQQHp1tgMn5by9RZTdBYEs2ty/ebB5yp6yNaug847OFRaxmaKhO4G/SU5HzDNmp4unMKo5y2cuPWNR5GZVKUc4+dDhZM2XiTuEJDKG2iOtqf5TOW4HMnuWXGBl0+IUc24+ZxkeScyrb6SL6LgAiIgAh8wQJ/bCg1jO1W3GePsxk//v1/+Me/fsZq2g72L7dhcL+2gU6GldIJXGvH4K//yXf/Z4rDutMErLbDeuwEXD0CiS5WoSmN4NhSGwb862/8vz/9ma++HcH8g0+ap0lqa5Q0Ps+GULpl8iRmrfLm/PVrnPZYzgrPWyQWG0am6ymLOMZKm/588//+wl//)#https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.trapz.html !pip install scipy import numpy as np np.trapz([0.2, 0.232],[0,0.8], dx=2.0) # Integración: Regla de los trapecios # Usando una función fx() # fuente: http://blog.espol.edu.ec/analisisnumerico/regla-del-trapecio/ import numpy as np import matplotlib.pyplot as plt # INGRESO fx = lambda x: 0.2+25*x-200*(x**2)+675*(x**3)-900*(x**4)+400*(x**5) # intervalo de integración a = 0 b = 0.8 tramos = 3 # PROCEDIMIENTO # Regla del Trapecio # Usando tramos equidistantes en intervalo h = (b-a)/tramos xi = a suma = fx(xi) for i in range(0,tramos-1,1): xi = xi + h suma = suma + 2*fx(xi) suma = suma + fx(b) area = h*(suma/2) # SALIDA print('tramos: ', tramos) print('Integral: ', area) # GRAFICA # Puntos de muestra muestras = tramos + 1 xi = np.linspace(a,b,muestras) fi = fx(xi) # Linea suave muestraslinea = tramos*10 + 1 xk = np.linspace(a,b,muestraslinea) fk = fx(xk) # Graficando plt.plot(xk,fk, label ='f(x)') plt.plot(xi,fi, marker='o', color='orange', label ='muestras') plt.xlabel('x') plt.ylabel('f(x)') plt.title('Integral: Regla de Trapecios') plt.legend() # Trapecios plt.fill_between(xi,0,fi, color='g') for i in range(0,muestras,1): plt.axvline(xi[i], color='w') plt.show()tramos: 3 Integral: 1.3695736625514496**Regla de Simpson**Además de aplicar la regla del trapecio con una segmentación más fina, otra forma deobtener una estimación más exacta de una integral consiste en usar polinomios de gradosuperior para unir los puntos. Por ejemplo, si hay otro punto a la mitad entre f(a) y f(b),los tres puntos se pueden unir con una parábola (figura 21.10a). Si hay dos puntos igualmente espaciados entre f(a) y f(b), los cuatro puntos se pueden unir mediante un polinomio de tercer grado (figura 21.10b). Las fórmulas que resultan de tomar las integralesbajo esos polinomios se conocen como reglas de Simpson.fuente http://artemisa.unicauca.edu.co/~cardila/Chapra.pdf![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA8gAAAFzCAYAAADiwCCdAAAgAElEQVR4Aey9B3Rd2XnfK8mSZcuJIiWxHfutF9uxYseO4zjrPceJEycvkZscyZItWZ6Rps9oNCNOZS8gQJAoBEACIEAQACsINhC9ESAAoneiV6L3dtFu7/ee31v34qIDLMMyAOfjWpe4uPecfb7z23+cff5n7/3tzyH/hIAQEAJCQAgIASEgBISAEBACQkAICAE+JwyEgBAQAkJACAgBISAEhIAQEAJCQAgIAcQgiwiEgBAQAkJAC//) **Historia**La fórmula fue utilizada por primera vez por , pero debe su nombre al matemático Inglés . Corresponde a la regla del tonel que ya había formulado en 1615.[texto del enlace](https://es.wikipedia.org/wiki/Regla_de_Simpson), http://www.matematicasvisuales.com/html/historia/kepler/doliometria.html ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABAMAAAM3CAYAAABI8pkbAAAgAElEQVR4AeydB3RVVdbHZ9RxHB1ndMYpOqOjU3ScUcfee+8VC4gNC3wqY6EoHRRBiiKKKKAoiiKghNASEgKhJAQIoSUB0ntPXrnv9vL/1n733vduXgoBgsBkZ6238uopv7PPvef8zz77/AT8xwSYABNog8CIXBknLfGD/terVhvf4reZABNgAkyACTABJsAEmAATONwI/ORwKzCXlwkwgR+PAIsBPx5rzokJMAEmwASYABNgAkyACfyYBFgM+DFpc15M4DAjwGLAYdZgXFwmwASYABNgAkyACTABJtBBAiwGdBAUf40JdEUCLAZ0xVbnOjMBJsAEmAATYAJMgAl0BQIsBnSFVuY6MoF9JOCKAXekhTBml4wPC/jBDNgG2AbYBtgG2AbYBtgG2AbYBtqyge8rVBjW4RFri8WAfZwk8c+YQFcg4IoBR8T5cCQ/mAHbANsA2wDbANsA2wDbANsA20C7NnD9GgGywWJAV5grcR2ZwP80gWXVGt7MlvDyVpEfzIBtgG2AbYBtgG2AbYBtgG2AbWAPNvBBgQzNZDHgf3qSxJVjAl2BAF3HdH4wA7YBtgG2AbYBtgG2AbYBtgG2gQ7ZwGHiFBCeyvA2ga4wo+M6MgEmwASYABNgAkyACTABJsAEmAAT8BBgMcADg58yASbABJgAE2ACTIAJMAEmwASYABPoCgRYDOgKrcx1ZAJMgAkwASbABJgAE2ACTIAJMAEm4CHAYoAHBj9lAkyACTABJsAEmAATYAJMgAkwASbQFQiwGNAVWpnryASYABNgAkyACTABJsAEmAATYAJMwEOAxQAPDH7KBJgAE2ACTIAJMAEmwASYABNgAkygKxBgMaArtDLXkQkwASbABJgAE2ACTIAJMAEmwASYgIcAiwEeGPyUCTABJsAEmAATYAJMgAkwASbABJhAVyDAYkBXaGWuIxNgAkyACTABJsAEmAATYAJMgAkwAQ8BFgM8MPgpE2ACTIAJMAEmwASYABNgAkyACTCBrkCAxYCu0MpcRybABJgAE2ACTIAJMAEmwASYABNgAh4CLAZ4YPBTJsAEmAATYAJMgAkwASbABJgAE2ACXYEAiwFdoZW5jkyACTABJsAEmAATYAJMgAkwASbABDwEWAzwwOCnTIAJMAEmwASYABNgAkyACTABJsAEugIBFgO6QitzHZkAE2ACTIAJMAEmwASYABNgAkyACXgIsBjggcFPmQATYAJMgAkwASbABJgAE2ACTIAJdAUCLAZ0hVbmOjIBJsAEmAATYAJMgAkwASbABJgAE/AQYDHAA4OfMgEmwASYABNgAkyACTABJsAEmAAT6AoEWAzoCq3MdWQCTIAJMAEmwASYABNgAkyACTABJuAhwGKABwY/ZQJMgAkwASbABJgAE2ACTIAJMAEm0BUIsBjQFVqZ68gEmAATYAJMgAkwASbABJgAE2ACTMBDgMUADwx+ygSYABNgAoc2AUszkFSiYECWiN5ZIgbkqUgXrB+/0JaFgjoVk3bY5eizTcbXdQYaDkJRYitvGhYqQwaKJQvSIVCe2PJ12deWie21KkZvFUH2Mr3aQM2P3D6WYWJTlYKhW2y7fWWngkU+E+Zh2ygWSpo0TMl2+uF2GdOqo/1Q0UyUCgZKFAvGIVZHRbfLVixb0Nspm6VbqJcMlGk/srG0U6b9/siy0KQYKBatVm1Pkg0kFcnoE77OSxhRoGKD0oH6WxZCqoECwUC13oHvt1MRKsOK4mgZhuerWN+RMrSTJn/EBA5FAiwGHIqtwmViAl2RgGWh0q9hdr6EV7NEPLdFwrDdCuIaDNS2NVKiG79mok42UeN51ComZGNfBwIWRBoElCkYtk1ErywRr+fImF6pY9ceBm1tNptloSqg4dt8Ga85dRu6W8EP9TRgafNXLT6wTAsBNVrXOtWCfPiO4lvUryNvmLKO9zcLOGWhD0fF+XDKWglfNhwECJaFtUUSbkm0y/GzJUH0LtBQsq9mt8fKWxAUA2nVKqbmy/iwSMGCGh07JQuyJ09V0vHpVgF/X+zHicsF/LdIR8mhNgvaY10PrS9QvxM8/c691tQqFkJ7cZ1RBA2TMoM4Mc6HE1NCGFlhQGlWVQv1go4FhTJe32JfewbsVDCfrhN7kU+zJGNeWLqBeTtDONPpP1SOERVGu5PRmCQOrZeWhcxyGfd6+uGjOzUUWUCNT8XIjAB+G+/HqatCGF1lQDxESl/r1zB6QwAnxfvx51UhjKxso2ymhawKGd1SArh8o4zv/ebh21YR9hZq/SpGZwRx5irb/mJFVH9Aw3sbAvh5nA9HLfTjnDQJ3+5J9LUs5NQoeHyFH8ct8uOC9RLmBjwXx0j+HXsSCGr4YKNThjg//rVOxOw9laFjSfO3mMAhRYDFgEOqObgwTKBrEtAVHd9kh3BRgh/HOINUmuj9bKEPP6eb+gYZ3zWZCMXc101Zw5iNQZwa78cvPI9jlwiYVb4Xs2wXu2Uiq1zCIysD+PVCH35GAxHncfRCH/64IoShZTrK9iJpQzEwNzeES9uo23/WS5jdaGLPYwwLgQYFfdYGcJxT11PT7N8ehKmwS+xH/09iwMRMAX9Y4MNPFvjwhzUSvqg/CARIDCiUcGOCXY4jFgfxfIGG4hgb7QxApm4ipUjCHcl+HL/QB7JFevx8oQ+/XBLAxekSPq01UG8BOytlPJbixxEOn3M2yPg+0Bml6LppNDSqGJQewC891xi63pyQEsLo8o4qLRa2V8p4JNmHny7w49JNMpaIUWMxyOOlQMQNy5tfA49a6MMvFgdwY5aM5YKF6C/2rT0sXcfcXAF/i7Pt9vgVIQw7zMWATeUy7vb0w265GopMEyuLRVyxyK7nTxcGcMtWFRs72lz7hrdjv7JMrCkVcZWnbDduUZDRomwWiuoUvJzqx9ELfDh2WRBP7tZQfBAudx2rWMe+JYR0zNgaxO8X+nBkfACXbZSR4OkLlAqJARMyAjgyfB3z4+w0Cd/s4SZpSDrm7wji18617+gEAb3y9+JmHVN8KsOkDdEynLlWxFd7KENMEvySCRwWBFgMOCyaiQvJBP53CdBK1Tc7BJy7xBeZwNAkz/s4Is6HM9KksCCgelCYkoZRGUGcFPP9n8YHMXNvZuyUJq0qlEt4IMWPo52BsrcM9PynNCBLDGFQmYHKDgzILMPAvJwQLljaft3+sk7CV41mzCqhp6LhpxYC9Qp6rQ5EOP1urYRZDYezi29sHff8usuJATSpKRRxQ4LPGRg37xtkl8csF/BmqY5Gy0JZjYxnU10xwI/LNitIOFSWQ/fcvIfkNxoaFfRf504Kovx/nhzCiLIWM7hW62CpOmZnCziDxM5lAnoX6Kh1Z/amidVFIm5up42PWhTAA9kqtu6n63NXEgPWl0m4dal93f7pogDu3aFiVweu2602YGe+aZnYWC7hjmXRst21XUVuTNnq/RreyQjg+Dgfjk8Mole+hiId+y0IdWZV9jYtXTWwaFcI/1zkw5GLArgyU8byUMstHG2JAeSlUxHQEVem4OtSFd9V6cgM2SKZqRpYvCuEPzv37+OWC3h5P9yiWAzY29bl7x+uBFgMOFxbjsvNBP4nCFgoq5LRbaUfRzkT+p/G+XFyUhAXrQjgD4vswRJNeI5YGMCDuzRs9agBnSkGUFqjNwbxe48QcNyyAM5fGcTfltorM644cMZ6CQv2uM/WQnm1jMdXNa/bH5OCuHBFAH+Mqdu9uSoyPXVr2bwsBhCTriUGWPA3KRiQFsAxHsHryIU+/GaxDyfEU//w4+JMBUvcAbFOq44ynlwn4NZNMmbVmwi6k86WRsXvdIBAZ4gBlfUKXlnjDws6f1sn4fPG6F5pn0/F8PUB/Nxp42OWBHDJ2hDuXhPEqZ7rxF/XiZjRuH/eAV1GDLAAWdYRnyfi7tUCHtomY2mw5aSzA81/QL5CZVuSL+Ke1QIe3CZjcaBl2ZoEHVOyBFywMoTXSw3UmIe3EEAgNc0Ii5s3pwRx+zYFG+SW9abvtSkGaAYS80M4M972jjpphYAB5c42F8tCXUDD1K0hXLcmhD67VexQ9/3ix2LAATF9TvQQJMBiwCHYKFwkJtBlCFgmFuSE8J8l0dW2v66XMY+2BGi2x8A5i6OfnZEhY6HfMxg2TOxq0rCsUsaLaUH80ZnI771ngIXSSgndUuzBOk36f5YYwvAyHZW67TFw34roZ0clCnirwkBje+MMy8SinSFc5Knb6Y5bP9Xtu+wQzvd89pd0GfN80QlCSxtgMYCYdDUxYEuZhPuSoqLY71aFMLbKQL1uoTao4etCBbNrmtuiaQGKaceT2M+F5JZm2AXfUVUT2Y0a4golPL0mOmnvsGeAaSIxX8Rli32gaxOt8O+IOBRYyCqX8cByZ5U4PoB7dqjYpALBoIb3MoM4wREJfkttX23uVyC8riQGkKkaJsLxYxQT6KSwC53WA6g8FNumrbJZ1I91C02qvY2svdtNpxXqR0jIcGLfNOhoNXggFaE9MWBZXghnOPf6XyULeK0sGvOCmOnhNgdUs+30O1JNFgM6Qom/879AgMWA/4VW5DowgcOVgKFjcqaA09zV+Dg/bs5WkSFThSzklUu4e4U/7J5PE/Rfr5bwdUPzwTBNfAxDx9QsAX9daAsH+yIGpBeKuD4xKjyQCz7t5acxuyaoGLQ+uhfxp4sE9C3RUdre6Mwp0+meut2wXcU6p24FFRIe9Ozt/lWqhM/rmtetebPuvRhAkcNz61VM3SVhUI6MSSUaNlGUeUHH+joNSbUaVjToKN5jpG0LDaKBjfX2b5LrdeRRpHrNRHaDhm/LVKwJmAg6BdY1E1k1KibvlNBvm4Q3cmXMrNJRqLQudmiaiU3VCt7Ltcs5tUJHbshEgV/Hqlo7z7U+A9VaB8QAWh0SNCwokjE0W8JbeQqWNBooFQ1sb9SRTOnV6dgSMtHh2FKmhVKfhq/zJQzOlvBuoYpUv4ElBWK7MQOIw9ZaFR/tktB/u83hs0odeW1waN7e9MrEatoisCxql39eK2F6gx19nMyPJvv0cE1R1Uzs9Dn1rNWxMWii0QTo/V0x71dJBtIrFYzJljAwR8bMah0lml0KRTWxvkrBWOezL2t0lDmfUW51IcNjQwbyRRPFfh1zC2UMz5YwukDFqkBbsTDsQHlLS2S8lS2h/w4Jb+criG80UBuZJDs0LAs1QR2JZQom7pQwOEfCe8Uqkn0G6mK+65MMbHZttE5HjmiiQtARVyhj0HYJb+5UMK/eQM0+zArDAktQw7iNwYiXRkfFgFBAxZgNARy3wIffrAxhTJWJCMowSx2pVRrmV2j4vkpDpmCCLhO0B5qCQZ7kXEP+uFrEpNqObwsydBM5dSo+2mn3q8llGjaRHe8hZkBQNrCmQsG4HAn9tksYukvBvDojLIy2tNH23rHgE3UklMgYkS1h+G4F39cZKBRJxG1po7JqYGeTZvfRWh2bggbKJAPrKhWMz5UwOFfB12SHXoXLstBqzAALCCoGtjr2QH1+m2A2O10j7HLu1zCnUMbgHdQ/7fTLvenDgl/SsbxUxshsCcMcFgWSiTxPHTYETMTGMfVLBlLKFIzOkdA/W8akUg1Zkn0NDCpms7JtjSkbbVtrDBlIKVcwaZdt9+MLFSxtNFDVrHyArlvhvrcifK3Uke4zUCab2FKjhtvw9R0y3nPydq8T7bVaa+mVyyZ21KqYmCNhQLaEj8s17HQi62u6/dn7uc5nFRp2e1bk6fNCz7Vnvd8M910qC7VBraAj1bnOpzYaKFKtVsWA2QET1T4N47IEnOz0ieMSguiWrSK5QcdO0USDqGOdc29b2WBgt2xF8nDvJasb9fD1qqhJw2d5cviaPqFYQ0ao+akOexQDqI1EA4vJvneIeG07XcdUJDQZ8HcEdHuNwJ8xgR+RAIsBPyJszooJMIEYAjRxEw3k+HRspYdfR4FsQQzvnbSwuVjETc6KGYkBf1gn4bvGVgbDpo5Pt+yfGECR2vMDTjl8OrIFEz5nsqH4FPx3XQC/cFbojlgq4I0yHVXt3fAt+zioZnWTLIScum0pkXC7Z8X392ttoSNm26gH2N6IARYoEvL4zUH8bYkdaI6CMVLQuZOSgrggOYC/LPaFgzUelxzCmEoD7caYs0wszxNxbYL9m2MTBDy7VUSv9CBOXuTDL5YI6Fuko9S0kFut4Pk1AfzOceOkIIzhQJAL/Tg9NYR3K2ky5lTLslBar+B18upYZJfP/e5ZK4M4PzmAkxbaef59vYRvmyzo7QQQNDUDCfkibkzy41gKAOk8jl0SwPkpAZyd6McvKL34AB6gbRnRGZmHc/OniqTjq+wQLlhml8Pl+KuEIM5LDuB0x7ujWQBBy8LuGgUvrg3g9y04+HDaqhDertBR7XJonmX0lWUho1jC7R6RiuJnnJws4PEdCuIaDdTHpFFbr6Dv2kC4/sfE+3HjVgUrJQv0/n8971+8VsBta4P4k4f7MYv8uHqzjK/KFby1IYhT3LITs8X2nvWNFP7eMrE4T8RlSxx7SAziytQgLk+MMiJOJyQE8eQuDds8IfMtCoZYKOIWbxs5NvLblcQlGlW9Iajho60C/rXUDqpH9uvy/8ViP/6TLmFanWkPvC0L60ok3LHcKdOyIG5YJ+D2VX780gkGSr89dmkQD+eq2OKZrESBt/9MD2qYsCkYuQ50TAywsKlMwj2JPtAWqGuyZKR4j38Iy55AWNQkYTMsbprIb1Tx/rYQLktw4o3E+XHVZhlJHTwrkqLVj91kbzPwcjs5KYB/rwhEtkN5AwhSoMq0Egn3r4gGqnT7L9nG+eslfN7QfELdFjFq5/WlEsij6jhPX6RgiP9MDuKCpGhfvCFLxkrVQgnZ6Jro+/9OCeL6NQGc6Fy7qP2oHBdmSJjd5ETVb0sMMC1srpDxYJJtD79YFkT3ndEAn+SC/+k2AWc7NuzW84QVIQwsM8JiDYmptLefRNvYOpyVHMRFydGyXpMpI9mxKRIBU6nfJjf/3c8XB3HPdhU7TAtbK2U87JYtbJP2CQjE0y+61xw7fW/7Uf3PWiNiQlXUG8jn1zCOxKbwtc2Ps1cJuCdDwF/cvh1H11Y/zlor4pMOxJih9CZ40vvnSju9M9z0nMC+56WL+KhcxfStAk5v9lkAYR6Ordb5NLyd7l6TArg0Q8Zi5zNTNvBDtoCTqewLfTiN7hG1ZqtiwFd1OuZsF/CruKinFI0LKMjmickC+uarWF4g4ux4O63fpQgYWG7AkA0syBHwRyePk5cHcOVaAf9a7L03+vFHSqNIR4VzE25PDKBjB+N3U/+0r01km64N/XJpADdslpHQCQE/2+pf/D4T6EwCLAZ0Jk1Oiwkwgb0mEF4dsOzBMA2I3fk1ubOGJ/jx7qqoHxdlKUiJPVKActxvMcAuNuVNZQg/nEE6rdBkFou4xSNKHJci4uNae/WuvQq3XTcDn20T8A9P3c7PVJDYbqTijosBdLzchM0C/hzeU+7yi/6nSPMUDJEGUkcuD4W3POxJDEjYLeKypfZvfrrQj18vsge6lA5NhP+vSMPKKhlPpQbwC2ewdhQdm5Xox59IkKA843ygCd+EahIfLNQ1Knh1nR0gyy0PlYke9F1vOenkhK8b2xEDTBMp+SKubyMIG02iI3nEBXBXtoqNexADSFz4JicE2qriRud3y0f/vWlGxQALxbUKnl/dnMMpCX782cOBjnQbXWnA354BwZ7Ev7TGj585XMJsnAEwTQz+uYYi2usoDQfNtlBTp+D/VjsBBOP8uHqLghWS/f6La9zAgnbZj/Qyce0h3o/fLPbjhIUeXs5nx6eE8E6lAcU0sHBXCBdE7NdOrzVGRy8N4qk8DbvDooWF7AoZ3VOicTRoIH9sPA3K/bh+i4JVzkS50a9i1IYgfttKOdw2CAsjq0S8W0UeCBbWFEu4yfGioLam+tHD/b77nybAdKyf1C77lh/uixhgyHq4r9NK5s+XC3i1WIfPvci1zCL8jiHqmLZVCE/YXaanrxExpW5PQUbtBN3tBeRRELF5t1/F2G1EDDBNpJOY4hzRR6xIcDkt0Y+TF9tbpI6I8+OsNAkzm1oRZL11MU1klku4f7kvEgvGZU//qUxuvX6ywI+rMmWsUC0U1yl4MTXqCUbtSw/vb+k5lYNOYZlDW8baEQNiPQYeolMGLCAgaPgwy45mH8vnF0kCXis1oJkmsiokPJRknygTW4bYOlBE/OWKBUszkFwg4vKlLQN+/jQ+gNu2qdhmWMiqlHGfI/LRteO+HBWFTtmmZAXxh4VeRs0Z0LXxJBItSo2wd1OTT8U766MBLokZnYITW7cj4gO4eYuCzD0E2CcxYKwnPcqvtWvFEXQfWOzHSa3cZ45dLuCFIj3cx+p8KkakudvsbBFvYUQM0DFvh4DfOPb5h5UhjK5pXQyYVadj9raoGOdtE8rv//JUJOSH8FfHZmgLwetlthjwfbaAE9voA246xOuEJAEvFttc2xIDKAjist0hnLXIaaM4OtXFjzOWR1nQKQlXbpKxyvGe8HYPfs4EDjUCLAYcai3C5WECTCAc2T+7TMJdye4AwoejlgoYSHv4W1s67yQxoCV6C6GgijfX22eD06CBVvfC7v57O5NwE7cs5NJA2ROD4MilQbxaoqO8tbq5v0MHxQDLQlqRhJsTPRO/eD/OXCngrjQBFyT6cawzKKL67IsY4A6ejl5kTxZOpfPsd0kYsFnAac5WjbA7dIWBat1CQ0DF2xuD4ZWZ8OroVgWrgjq+2RHCv53jtSjN4xOCuGF9CE9mCLggoXnQxj2JAb4mBa+lRb03KLDe7xKDuCFNwI0p9gq9W+6fdEgMsFBQJePxlVEbJBHk1BVB3J4u4Mpke8LspumKAUWyjunbBJzucAifJ19uu/Y2BFWMywziFJrgxvlxRZaCpD1E+rdMe7X2rqToBNrNk/7TQJ1WPZ/L15Cnd1wMOGpJAHdlyZhaIOOpNYHIvnRK8+ilAdyaKWFMroh7V/ojK+FHLBHwQgHZaUsxgNicviKIO9IFXJ7sD6/eueUk93aayBqmgR92hXCuIyIckxTCwBIddZqJ/ICBnSFbYKMTRubvDOFcT/C8oxf7cU6qgLvXBfHvZdFJJnG8JFPGIsFsJgZQ3nQU291ZMibskvDo6gB+5dj9EYuC6LFLQ167/S3S8SJP9kUMKCJhaJVtQ/8i75aAJ+ZJJOXmTwxRw9SsYGTyQnX52WI6Gk9B6p48A+j0iSIR1zpR9Om3R9Fq8ioBdzv93w1USJ+5YkC9T8VbGW7/sSf90xpNBA0LBXW2aEdBLI9cTJ4VWrtR+YWAigmbAvil5zpz4rIgrlon4LbUAP7sadf2xAAq3wnLAriSPDxWB3Ca53dHLbFX+kvMNrYJtPI+iQGFuokVRSIup/gNTvkoUOwNG2kLkIhnt0gYV27AR2fMZwZxvKcOblmoDt7AjlQHVwygowBJ0LCPxKOj8/w4e00Ir2RLeHWziAG7VGxvQwwo0AysKBRxiadsP1vkx5nhtguGr92uKEh2/690CV8HLMSKAdQXSSQclCdj0GZ7Bdyt61nrRHy5h/1RsWLAkYsDuHyDhI+KZLxOp/d4BBqyy0vXixi9S8LTa6NtTtfDO7eryLeAzhIDvmrQkZQXwoXk4eW0CwnOf0oK4qaNEiaWaR0SA8L9aZEffycPirQgLk6Miq3E9d9pEuYI9laFlkcLmsinU31W2EIACTyXbpSxQrQgaiYyHW8U4v3L5QL6FBt7OCWoed/nV0zgYBBgMeBgUOc8mQATaIeAhcp6Gc+uDuCXzqCDbrh356jYRPv/WvvlARIDNFnHhxSLIDII9SMcBLCpYyt0LYtqoapBQZ819mo4DUqobnfsULFeaqNukUQ6JgaQR8XULdHJKKVPbtGZMgWUs1DXpKBfWiByFvO+igF/Wi3ioxoDDYaFkG6iqk4Obw9wT4X4Q1IQt2WE8FRmCE9nhnDHqgD+6EwCT6JVzgIZ/dLdyYcPv0wOYVS5Ht4zTsHvdlXK6OE5iaF9McDAhmIRt5ErdniQ6MdZ68mTwAxvy5BUA7Oz7RV+Yt4hMYAmvDtDuNAJYPnTuACuzrK9N4gjrS6S9wVN7ClNVwzYSlHj1wUiK/m/Wx7ELR4Od6YGcIojgPw2VcSU2vbiRNiNb1oWqgIavtol4t5VtrDhDu7D9Vngw5/WiphRT+3QMc8A4vONz4LuHGt3k3NOO6V3wUYZC4IWNMPeDnCpy2BREI/v1pBnNBcD6Az3m7cpWCnaNtYUVMMu6r9zBuxHLrWP0qswDMzdGcLZbmyPOD9OSxHw+HYZX1C8AsfNWvCpGOaJrv/zBAF9CjQUa1Y44NquKhnPOBNsKu8vU+xtFykezwBq41u3KVirWOFVXtpCQPZB36e2vDdbRdYBFgNI1Fi4S8R58bZ99NipobADeZKLfWGThtn5Eh5NjU6wfrbUPme+qJ00KM7AtC3ByErrkUuCeCRXQ45qtw254r++LhoE0RYD9PAkppuzbckVdh7YZPfdpzYJuD7FFVP8OH+DjAWteWiFzdVCTpWMHsluX/ThL06sA8EEqC8uo5XzJe7nbXsGnJIqhgMmBuh3tOJeKOI6V+SI8+PCDTLiQ2brMQPaEANygxqmZAYjgugvEm0hrdIANNMOvKmaFnZWy3hyhVtGH05NFTG+xoRgUFlMLC8UcZWnDmExQLLLeKFznaNr7zWbFayUrXAwO0qXrm2G1bpnwI6Aho8yoyvf1N4kvOQ5dl9Ur6Df2qgoSKvhfYsN1MR4Bpy8KoQxNWY4rya/Gt5C4IoTp60WMbmh1bto5E4TKwb8OTWEiXUmdNPC9moZjyZFPRX+tkbEpz67j60vl3CL6z0WH8CNWSq2mJ0nBnwjWNAV6lMC3Fg8v0oS8N8SA2RbqkrbxPbsGfCzJcFw/3e5ljaqGLwuyvXXjkdBq1KaCtkAACAASURBVJ4BfgOpxSLOc65hJLSdmSrgCec+1zNDwJVJthBPx1neuEVBTjv9NQKdnzCBg0iAxYCDCJ+zZgJMIJaAhYYmezJ1okcIoMlyutj6EUThFA6AGGCqOr7YLuDsiIu4H6evkzCzoa2gaLF1iX1tobFJQf+0AH7jqdut2xWsaa9ukWQ6JgYYIQ0jN0RXFY9daU/aVScdWmmOy41OcvdFDKDJ32O7NeyIuJtaKKqU8agnIKLrWkruqvTwupn+LDmEfltD6JEa9V44d5OMRcGoIGLIGiZkBvF7Z0LZrhhQp4fPl77E2b9PASSfyNOwMzIIs1BSLaPnKie/jngG6LZ7txu1+sgEIbyKHTlBIuyBIeIWj6vv8wUaUitlPOnmQxNPx129NQ5HJdlbNMIxJSPt3PoTGr5TlGzFMFHSpGJspoC/ebwqKK0R5TpKOiQG+HHFZgWJIqVqIbdCxiORiY8ft21XkE5bKCwL6ym2hTuJJjFgF7n8NxcDjk4KYWhZdK8/xRRYRQE5XZf9hUE8mGu7R2+vkPBwJC/bs4HY/HyRHxdtkDHXZ4a3WfR2tzos8OHvaRK+bPLYhmK70Z/iig0JAv5bpCK+KLpNgMSZJ3e7+7At5FbK6L7CmcRQ++9QkRkTb6F18tF399YzgI4MHJZuT7xPdibEHc3SjopuYVuFjMciQVT9+E+GhO/bOStSCmjhYIWuKHdyqoj3PYKTqRuYtzOEM51rUFgMKNewsljC9Z7TTdrrv2esFTEt0hGifMLPLBPrSiXc7E4KFwZwxzYVmz0Vr25Q0H+tu3rehhgQF8BNWxSkR64xQEOTiqFpUXf4v64VMb1xb8QAFRk08Yu4rPvwDzrm0Re1LbcOGWUSbvXUgdzrN3rqUNOoYtC6aB1IDFgW0PFdjoA/OWyPoXPuW1sZbkMMSG9UMSSSpg80CX+/Llo2UzXww85QZCJ8xNIgHiMPl2ZigB9n06k1jo3ooo6ZW6Lix5/JDvdKDGieXmmDilciWzns+A1Lw67wFnbWKHgi2RHc4gO4IcsW3DrLM4DEANqG0dZpAvRZR8SAU1aFMM7LNSbNo5YF0TNPD8ctiPUM+LJBx5JdIZzitDGJi+62DPcaH9kCE+fHxRkyUjw2HNNb+CUTOCQIsBhwSDQDF4IJMAGalAQCdtT+3zn7hI9YFMD9OSo2SO0IAYRub8QA00JlUEdWwECtHh1oeVuABhXzckK4YKnjCrjAjzPJJZNWmltZVKFo+Hl+A9khE8HIBLRZiggGVAzfYO8FpQki7d+8O1tF+p7qFkmmY2KAKWp4m/ZaOxOlY1JEfFDjcVU07bqd50wk90UMOGJJEH2LdZRHWFgoqZLRfaU7uffj76kCntomYeCOlo83dyv4Nk9sJgb8I0PGfM+xkbqghuvhHqu2JzGA9nBSQLvwyq8jVmRHBu8WCiplPJbi7EfuiBhg6Ji5XcDfnEHfkcsEvF6so9ats2ViDUX6d1bUXc+ANVUyno6IAX78dZWAJ7a2ZEBciEOib09eJhbKQyaKRQsRQYeiWDsBAd0VPxIDhndUDKBgdFkKksNbXSzsprZLcVZB4wK4c4eKjTSAtSxsLJFwp0cMIPf6WDHgqOUhvFlqQHBt1TKxokDE1c5k6icLA3goV8V2OurLMLGtWkH/jCDOWurD0Y6dUrsdGW8H90usVvDianei5QuLcJ81RvsqHS851dlTH/5dooBXizQs8YoBS4Lola+hzGmvnbRa7W3/Ay0G0KS4RMIty3wg8Yy8FNJbCVpIK/UD0gP4a2IAZyQGcMUGCbM8da2oU9DXI4z8pb2JOAAloOHdjYHIXv3fOSva7mXJ0nTMyRUi+6pdMWBViYQb3clvfADnrQ2hbyt9l+z2nSIVaa1dCKn9LTtw4K3uCRgL7e0NGyJ9EaioV/FqJH5F22LA9VkK1mpuhwNoAv7GOvca48Pf1omY0bR3YsCGhuZiAAkK05qieYRN2LIDB97hqcMNWc2FiaoGNSxo2BM/e5tAQlDHXI8YcHSigN5FOlrsBGpDDCChYrhHqDg5NYRxtR67Vw18nxsVG45YFkSP3RoKY8SAc9IlzHXaxxB1fLm1uRjwfn1Mfd1+6/xv7hngx7/Tbbd5+riiUUW/NW5cBz8u2SAjKdxGFnbXKni6A2LAuekSvg8LkfaJGd9tD0a28LQXM6CzxADK4+0YrjTB/7NzrT8qgcTkNsSARh1Ld4eigs/SAC7LEFu9zw3MlvBBiYZdHtuPQc0vmcAhQYDFgEOiGbgQTIAJSCENozfaEcxpskx7985NF/FWkYq5FSrmV6j4vkbHLtnyHMvlcOugGECT/O9yQrhomR/HLfKH3aEXB5ofJ0QHUy/dJeKKZdHJyAnJAvrkKpjtlIOOAEsPmvCRu3hAxbANQfxpsR/HL7VXTr3R06mEVLdxmdGAfva+RBEjCpvXLVdqpW4R02gpBvxqRQh96agrT7lWNqp4N0vA311XbCcw28qQ7Z5aWCPjaQry566q7kMAQTpNoV+pjmrPmFJoUsMnLtiTOz8uzJSxOOi4xxoWqkUDW5p0ZDTqyAwYKHMmQW45aA/wo7kqNtN2BtVAPLVBQnTg364YUG9iS6mEuz2nM9A2BhJB/JYFQdTwQZaA092Adx0RAywTCXmhsDszTThpX/A54RMNaF+7hVqfijfSo14erhiQ3ahioLMaTL/5z0YZPwSibsI1ooGtHg41nslOpKk9TxRBw7uZQZyzKoRX8jVsDFkIaCZSCkTckOgOyn04YZWISTUGKjviGdCKGNDDIwbQqvneiAFkzxc59dRgobLR3ori7rc+YpmAFwt11FiAoJiolumkDit89Bu5ft/qBueMC+DaLQqW1pEQFIi4cpNtdMtRsVUBKMJ7RomEe5Ki/fPElSLGVelY5dkmQILVwRQDNFEHBYIjUe64ZAGDvZ4Tnvb1+1QMd7ZE0HWPXMMf3akiV7NgaEb41IaLI+7oPvwjXcKX7UQgJKGEPJrco9dIUKVYA2tpGxKd9lEl44mVUZdod5vANhJLHI8Ncm+n/d6bNPusdkW3j2jc2Gj33+2CiYZ2Jjh5NQqeWRm1TYrqPqTcCB+9J8l6eMvOPyNeLW2IAXQMY7KA/qUGaunceEXHvFw7joTbHy/ZKGOJtDdigIadtE1gc3RyTLy75arY5fRDybD7Vz7FevDU4aQVFLPGQD1tWZB1fJcbwr88dfBuE7jI3SYQDnQoY37QPk7QoC1VmgWxDTEgV9AxLSsYibVAe/Vv3qpgvXM83g6KYZIStfvjkwX0KzPCe/KjAQT9OCBigCMuxIoBl26QkbwHMaCJAhJmRD06/pwqYmK9LXLQZ+MyouLVn2hLQn3rRwt2lhhAXK8lccfhuqtawdOe2DB0vx9YbgcxjPUM+MpvIK1UwiVOGx+TEMTjebp9fKtpQdBMFAV0UF/Z0KQjVzRbikGe/s9PmcChQIDFgEOhFbgMTKCLE9AkHZM2C6Cji2hAbA/2bPc7OlaJjhyiBx2DR5HoW0Rg75AYYKGc9jt6BsJHxAfxXKGGfHdga5pIdSZZrpstlYVcZsPHWrlliQ/gkV00OTHCLoOXuvtYaeC/UsSUGiN8Tjg1K9VtyhbbpbvduiXZkdp9bdpCSzGgRbkW+nHDNgUf75ZwT3J0MH4kHfO21I/TE/z4/WL7CCSX8T55BrQiBpiaPQk50xkk0b7M2zbL+LRMxWc7Rdye4sev4v04Nt6PizMVLG7S8S3t448MqO0joo5f7MeJi+2jASPulgt82JMYIAdpn3l0hYnYUDT0UxMD+PNSP471RtfuiBgAC9V1Ml5YHZ04kTsoRY0+LcGPU+hkAE+arhhQpBr4JjuEs516UYCtmzJlTC1TMXOXiLtXBiIcaG8+CQUeTaV561sW1haJuDnR7gvho/EW+XHCIvu4PNcrgESHSzIVLA11MIBgJ4sBZEvE5lcOm5MX+3C0h82p5M7dYMIw7OO4rksO4roMEUN2yRiQKeBct/8sDOCebBWbaV95XtTTg9KnUwdOWubHXxL8+G28J8J7nB/XZVG8guYBBA+uGGCBPBF6hrfN+HHhRhkL2zgphNz2F+0O4QJPPzg63o9Tlwfwr6QAfrfIE5Xe8TBY356AZFnYQMFXI/EzfKD+f+JS225/v6h5/7fFAAMBQcX7mcFILJFfJwbRbbsSPmby/R0hXEWBR6n/Lg7g7u0KNsacde81XEXU8UlWNG4B9UU6+eKUhABOIyG2WaT8tsUA+h1tHzk5IYC/LLNt3r0mkCv3k3kaqlqJDdCNTg1o5X03gCDt+b7GI7BQINR/rAzizjQBN6wXMbzMQEjU8dnWqIdVe2XxBhAsI68HjycHRdz/w/Igrk8TcMvaEJ7JaSeAoGGGJ5rkoeHeK6jtfrPUj78k+sO2QMfXha/dFDMhQ0K8GBtA8NATA0gY+2pr9NpMTE5dIeDhzBDuXxMIn5xg34/s4H3k1UD79SdEBAR7q0JniQGUl9snTk/0o1mfiPPjvHQJcWIbAQSDFsoaVbzutDHZxckrBPzfbhVzyxSMzBJwJt2/4v2gPtQzT9vj6SHevsPPmcDBIMBiwMGgznkyASYQIWAqevic4rMie/OjYoA9QIi+ponr2CoDLSbMHRQDCivsKP7ugJICifUs0JBLYoBlYlOxhDuSo5GFY/OPvI4L4IGdKrIUAz/khPAfJ8AafU774d+vtvdP0x5PWqWj84zdPCNpeEQPeu/IxBBGVRhoipCJfdJSDGiZlh9XbVWwwq/jKydf73dogEkP2r/vlqezxADa5lHfpKJ/egAnuK71NJinM6kXRo94o6BXg0t11JgWfH4Vgz0nNXjLGt6HSZNMh9OexAByad9WJuEBzykNlJ5bZ0rPTatDAQTJJAwTyfkirkmIDs7bStMVA4pNCw2+5vUi3i04JAroX6yjyvXfjm1uihEgapiY2Tx6t5eR+/y3zjGFPuvgiQFuWVze7utjEmyvgDLDQk2DglfWBsJbA4iJeza32y6/oaMLqwyEAIghHR9vEfAX15vD05Zu2vT/H2kSPmswocYcLXgwxYCwB1JuCGeS3S8V8Gx+G6eghNvcQiCoYfymIH7rTvQ8dSWebn3J22VSrRnZLtKKyYTfUmUds3YIOMPDjtJw28bbF1wxQLfsoHlPOWIpfZcEGLJbEmTdNvpTqoj39lQGy0J+jYIXUqNCWrP8nWuQXbe2xQC33m65I68X2m75JH7ty9GCoqjj8+0CTnW8p8Jlc+KakBdH+GhBy0JhrX0ygBvBv706uKcJ0DaYdSUibvFcM6j8ZO9HxQdw+x6OFpRlA3NyQvhHjDAey+C01BAmOMfbNj9N4NATA+jeuqVSxr0egYom0bGxZI6hvfq7tbAH0YEWAyK25PQL9/VJyQLeqLDv360GEBQsmDodwyniOtoCRL/3LBa4IigF4Tx3vRQOtNmm2NtWB+b3mcCPTIDFgB8ZOGfHBJhAlAANmr/NFnDekuhg070pt/Z//8QAgILSfbA5GJlgnLxGwhf1JiTLQna5hIdS/OHV3tbybvaeKwaoFqrr7TPl6bg+GgBcv1XBatECnVHvjTvQ7PeeAb77fmeKAatof7ls4PtdIVwRc2zSP9NCuH9tEGc6K5GdJwbQFnM6RlDDR1sF/DvmnG06Yutfa0VMqjJQZZB0EP3+lK0CzllqHxdHg6sT6Eim7SIeXxddqfwr7VttsqDLOiZmCviDw/APThuSVZmmBXJ3ppMoTvQM9Cm9h9YLuNqNcN4hzwDbTslVe3WxhHtWRI+zIrd4CkL1QJqAi5x9xRExwLLr1RjU8Ok2Aed5jsGjtiYOZ60RMaHCQIVuc4j2iJhnloX6oIbPskO4YrkfdLSbay9uWv9JlzCj1gi7qZKPQc1B2CZAe5evXSPgxiQ/vMfWnZwSwpslOoqc2BwNQQ0zdgi4OMEXOW2B6kH9hqKST6iyXcJdCpJsYEmBiNuT/Tgupu7HLwvgnu0KltMWDNuYmh0teDDFgLpGFf3X2a7Pp9Oe9AbbTdytV+z/cL8Javhwi/fkkmhb00rq2WkiPq2zzz+P/X1rr0OSHj654dKEqFs5pXPuuhDuT49GY4+IAdR/aALcYMc28R7jR230czpebqOMrzoYQJXSyqPo9+kBuDFgKJ1fJgRxa7qAuzzxRa7KlLFCtVDsHMtH1wBabT97pYA70qJxVuj3xywJ4NatMpIosjxV3Nq7owWLwjMzC8RnYV4I1yxvLv7+eoWAIRVGOG1ql4IGezvQ7504NlSG45YFcXO6gHs9sUFcMYCKZOgmtrjXIY+HDB3LeH+2il1tHC1Y6MwaaZtUaomEB1dGT52hfMN5L6Uo/TIW+EzIzvcPeTHAYbK5QkL3Vf7IKUFunWgy/ftkAX0L6XhU+5p4IMWA3yQGcG26gEudyTyVg/rGGatpu5EBnyPQtiUGuG28tUpBL7rXeEQ8Sut4OtJ0m4Llro221kH5PSZwCBFgMeAQagwuChPoigTouCJRtyB05GHYA39nDNQMlxaTTluetIpiIL1KxVfltP/ahBPHCKaF8LFlHSqHTsd0ATRmoAFjvaBjSbmKuTU6CpVosMPOqptbUYowrhjts5JMwN31QPn7JAPpNSq+q9Cw2m+g1jmmKsK8HaZuvvTfoL2ybt6GBQog3Vo7UBmpLRolAxm1Kr4tVTGnUsP6gIk63QK1i/d37vd9soEcn44Mn4ESxUJjo4rX0qJHoJ2zUUZ8wC4RpR9y7CVER4J5Egy3o2Yit1HDgnIVS+oMFMoWRMOKlt/Tft46tvXccPa6b6nTMK9cQ1KjgXI6qi0mTcWM1s2tV5NkYGOtijkOh3SnDWI5tJU3pRO2I81ESUBHUoWCz4tVzKnSsFkw0agD5LHtIqDve23EtYe23qd8Y23fte02P6NjF3eFcIGz8nyk4+VQKRvYVq/hh3IVi+vtvkATdW/ZqO38ioGcRg3xFSrmVmpI8xug2AmtMSG7o/O7SwM6VlSpmFepYZXP5h+um5t4rI3qdIxbNO/26tgW+xbvO7YduUY4fafZ90wzfFb81eSGHm8fYbi1A9HEw/aiWyj06ZhbJGPEDhF9t0oYmadgQb2OUodPs7z28ILspkE0kFZNsUk0rPHbfZDs1u3/oZg6UDlUivERMrC6SsXXpSrmV+vIEijOg21re8g28nHY5jQTBT4N8eUqFtbo2ClZoDypDC5H10abiwEBXJMpI1my2z6x0raVTYIdpNW9xlFmbbVtW++7BSTbElQT+T4NiytUzKvSsSUUnWTT98J10E0U+TQsKlcRV6ODYru0VQc3bcpb0S1UCTpSq1R8V64h1Wei3rDvG3ssmwVIuoUK+n21inkVGpIbDRQrFkS6xnvs3m0zl6cY005kB21dL93yev+3l16Yh7ftjOj9Jlxn72fOPdJNmz6nvlxIvMvV8HXsyzIVyxt1lKnN+yvlo3nS8tbJ8NzrqR3Chxk4mbT2mUHCeLaAEx1B5fcrQxhebaIhfG3WMLdMRWKDgUrNvpe4aNsrA2UXbkPdRFlAw7IKu6/E1+nIkZw2civO/5nAIU6AxYBDvIG4eEyACXQ+AbqJ0wSqHQ/tvcqUBg80OPNOyvYqgQP85Uh9PROzA5xlOHk33zDrmLwpyvWMHBE3rxZwZaqAK9eGMKTYPlOb3PNX5Iu41nG1pe0ct+9QsV7peKlpIEdtQg93cNfxX7f+Tbc+e5um+7vWOLSeU+vvunWidMJptf61A/9uK2LAwBIdtC2e6toR7u73OsrE28f2lv+BBxLNgfZHf74jhHMSA/j3OhGTqvfs1h/9tT35JCZq+Mx7+//+1te1P/rf0b5A33N/t7+25trtnuoRKwZcu1nBao22AkSvr511zY5lTmVrr55uGfZUB2+69Hx/7XZ/fx9bnkPhtcuSeNNjb5nubR1ixQA6TWBUje2t49r4/pThf7GN9pYxf//wJsBiwOHdflx6JsAEmMDhScCgY7JCuHCJE8PACRZ2WlIA5yQF8AdP4LSjl4cwrMxAW0ebH54ADuNStyMGHMa16pyiO94DtBJLXiutnCbYOfn8D6bSlhjwP1hVrtKPSKAtMYAm8fzHBJgAwGIAWwETYAJMgAkcFAKhkB3DIXLknxPMiYKV2fuGfThyUQD356jYpLQTdf+glL4LZ8piQBdu/ANXdRYDDhzbrpwyiwFdufW57h0hwGJARyjxd5gAE2ACTOCAEFBVA2nlMl5OD+JvS6KB5X62yI9/rg5heKmGXWr7AdgOSME40bYJsBjQNhv+ZJ8JsBiwz+j4h+0QYDGgHTj8ERMAewawETABJsAEmMBBJkD7NSnoXUizUCWayBNMVCoWgk5wswOxT/ggV/kwz56CAJrY5deR5dOxJWCiigWbw7xND37xVd1EeVDHFh/ZlYE80UKIfbkPfsMc7iWgoKWSge1hu9KxI2ighoIV8B8TYAJhAuwZwIbABJgAE2AChwwBGqK5j0OmUFyQFgSojSj4lvvgoXULRPzGPhCg4HJsU/sAjn/SLgG+XrWLhz/s4gRYDOjiBsDVZwJMgAkwASbABJgAE2ACTIAJMIGuR4DFgK7X5lxjJsAEmAATYAJMgAkwASbABJgAE+jiBFgM6OIGwNVnAkyACTABJsAEmAATYAJMgAkwga5HgMWArtfmXGMmwASYABNgAkyACTABJsAEmAAT6OIEWAzo4gbA1WcCTIAJMAEmwASYABNgAkyACTCBrkeAxYCu1+ZcYybABJgAE2ACTIAJMAEmwASYABPo4gRYDOjiBsDVZwJMgAkwASbABJgAE2ACTIAJMIGuR4DFgK7X5lxjJsAEmAATYAJMgAkwASbABJgAE+jiBFgM6OIGwNVnAkyACTABJsAEmAATYAJMgAkwga5HgMWArtfmXGMmwASYABNgAkyACTABJsAEmAAT6OIEWAzo4gbA1WcCTIAJMAEmwASYABNgAkyACTCBrkeAxYCu1+ZcYybABJgAE2ACTIAJMAEmwASYABPo4gRYDOjiBsDVZwJMgAkwASbABJgAE2ACTIAJMIGuR4DFgK7X5lxjJsAEmAATYAJMgAkwASbABJgAE+jiBFgM6OIGwNVnAkyACTABJsAEmAATYAJMgAkwga5HgMWArtfmXGMmcMgTsAKFyNlZjMoGBVYnlFavz0Fq/BKs2loCn94JCXbRJCwtgKqdaVj89TwkbcpDvXnog7C0IKp3pWPpN/ORuGEX6ozOL7OlBlCZuxbxc+OxKqsYQoeM1oIWrMbu9UswZ/4yrM+txQEoWudXdj9TtIJF2LDoK3wxJwXZNep+ptb6zy1NQG3+RiR+Nx9LV29H9eEA1tIRqitE5vJ5+H7RKmyr7PxC21w2YNn3cUhM2w1/Z/RfU0ZD8RasnD8LMz/7CnEpW1BcX4vysgLszqtFOAsrhPItyzHns6+wbHMllNabjd89JAlYECp2IOXb6fgyPhPV2iFZyBaFsgwZvvIdSI2bjx8Wr0fpXt73LUNCU+lWpCz8HguTtqPRNmRItXlYN28GPpuXhlK5RbbtvmFJdchfOx+fTZuHtJK2fxwue9m2cN4Llm+L5C3XFSBt/gxM/27dXufdbsFiPrQMBYHKHKyNn4/5C9aiuD12loyGwvVYMO0TfLe6GG3XKiaTw+alCTVYjZ3rFuP7OfOxuqA9GIdNpVoUlMWAFkj4DSbABA4uARNVyZPRr+8IzFy5G8EOTazaLrFel4lvhz+JO2/siXfmb0Slup8Jtp3V//Qnpq8QGxd9hP49bsNVF9yPQbNWtz9IOARomP5iZC75GAN73oGrL7gPAz9LQWEn38stoQxZiyfj9UduwhXXPIUxczd2QCSxECjejIRP3sDTd16JS+5+DdOW56OTi3YItEBsESyI2fGY1PtWXP3wIHyxprLTBRArWI7tSTMwtNc9uPaCO/Hy+4ux61CfwFgCKnakYObQ5/Hg1RfhrhfGIz63cwttidXYmTIDQ564HVdd9TDemJ6K6v0UA6xQKTLmvI3eDz+KFwa9jxmff4r33uyDJ+66Ftfc2xujZ2+GaAGWlIcV017F3Zfdh1cnJ6Gs83WOWEPj151FwAohP/UL9LvtctzX530kV+6n0XRWudpJx5LqUZQ+B+/+91HcdOEN6N7vC2Tthe5oKY0oyZiDMb3vxTWX3Ynnxy5BOdmspaB84w8Ycd+luP3JUVi4VzcTC2p5JuKG3Y/Lb34Co34obPV6bylNKN3wHcb+33249tLb0evtRXZ/sVRUbF6IUfdfglsfH44fDtCk1JIbUbZxPia83gO3XHgtHnppGja1w85SK7F18VvodvGN6DFoHg5Qsdpp7QP4kaXAX7kZCyYOxFM3XYRr73oOH6//35QyWQw4gHbESTMBJrAPBNQ8LBz9NG77z+V4ZuISbK/f95GjXrcZc0b1wVPPvonpiTtQHdLtlap9KFaX/4llQlfrsXb6ADxyzd0YMHMVig712atlwlAbkDZzELpfexden57c6WIAKA+pAAkfvIz7buyBt+ds6IAYAFimAbUhHV8P7Y4bb38ZnyTmtTo49Nqd0ViN6upaNBzG4xFLaUBhVhrWZmSjQtj3vu3l0uw5tYfWhMzvxuCZ629Fnwnx2Nm58+pm2XXOCwumoaFp8w+Y+MyNuK3XWMTldHKhiYtSijVfDsaj1z+Afp+s2k8xQEXZutkY1v0+PDvyW2xuVKCqKhShGtlLPsRrPZ9Ev6mrUENzR0uDr3QHMlavxdaSQKcLQJ3TBpxK6wQsqL4K5K5PxZrNJQgegC7ber778a5lwdSD2L3qS7x+69Xo9srn2NzOhLZlThZMtQZbl0zE09fdgqfdCTkAPViD/E2rsTKjAL69ZaELqM3LxOqU9chv00WR8q7F9oT30eu6m/DEyIUodfLRhVoUZK5Gyvr8A+fhGGYnoDD9O7xx+xW4ekRNPgAAIABJREFUt/dUbGyXne3RtHlVMtJ3N+3xHtaSNWCKQfjKilAutvbpwX3PMnWESjIRN+hOXHnL0/gw/TC++baDksWAduDwR0yACfzYBCyEtnyNd19+AFeeey4ueWA45q0vh7IPi/mWUIAVX03FJ7MSsKXUB0U3O2XLwY9N5JDKzxKw6cvB6HHdYSIGEDxLwObZw/HE9QdIDKA8jDKkTH0VD970eIfFgHDRQlmYO+pJ3NIRMUCtRPrXn2D6l0uwfX/dZQ6qUdHE14BhmLD2oV93qOiWiB1xE/DCTYeLGBC2Bkg5S/BR71sPjBhAWRjVyPh2BHrc2AligFmDjfPeQs9r78JL7y3zeAlZMEKFWPXtdEz5ZHFkpdAyTRjU7uaBavQOWQZ/aV8I0ATR7bP78vuD8RtLQUn6HLx5576IAQDMRuQkT8ZzN9zaTAygixax0On6tdf1ssIisKEbaLcbmE3YuXIKnr/h5mZiwP7lvReFDXshxGH43R0RAwCLhEZD37e+bQRQkrEQU0d8gBX1e090L2q1j1+1oFXvQOLI+1gM2EeC/DMmwASYwN4RMBuw7tOx+PDzyRjetwduOv8WvDYjFfntbMLWQw2oKCpASVUTRM1zMwmvZCsQmvwISgp0z0ftFcqUfKgpr0B1owDVaOVHuoimymLk5xWj2ifCm6WbrqUKCAgSJNqSYEpoqq1GbaMAxV1JMGX4a8pQVlmPoOy+6f6a/uuQmqpQkp+H4somiOpeumbqEpqqylFe3QBBNWIGLSYUIYiQJEM3FQRqK1BWWYeA3IFlfhIDZrUlBphQ/DUozd+NgrK26uWto/3cUkMIiRJkzYDYVI+GpgAkDxJdakJNRTmq6oNQWmsPmFACdagoLkVVPbVZTB5WqB0xgH5bi7ICKnNdG20Rk57npRZqRFV5BWrqd2H5x22JAQZkXw0qyypRH1DgrYJFYsBbHRAD9AZkL5yIF++4B73H/YCtMWKApYkQRRGSZkDyN6Ch0Q/RbU5TQbCuHIW781FaE4Dkvh+phwVVpN9L0Az6bhUqqxsQbEuBM2T4aytRXlmLgBxrW26iJpRgPaorKlHnl5vV2f6GDkkSIUpqjG0akMPtkYei8noIyl7avZt9u2IA7QGtR0XhbuSX1MDvNTb39639tzRIYU4aDCmApvoGNIU8MGmfbV0lyitq4Jf1mHrZCZqqgMaqUpSW2+yaZ2O1IwaYUIUGVFKZi6vhizRu8xTaeqVLftRWlKOyNh9r2xQDjHCbVZWVo8Yn7/l6adRh0/zR6HnJObjpiZH4bkuDZ1VQR+2OLKxfnYZ8j4ODLssQQ2JzcdfSIYkhBAUZFqieTairrkFTpI4mtFATasrKwv3bQzxcXUuTIYsCRM2EGmpETUUlav1ySw8wQ4FQX46igtJw/VqzLEMNoaGiEHn5pahpLQ1Y0GQRIUGAaukQfRQboQL1gqeSbTVC+H0DikD3q0KUVvsgt14IhBqrUVZWhSapdTtqkYWlQ5ZCCAQlWLCghvyoq65GQ8Q+LWiSD7XlpaioEzzt5EnJVJ18K1vN19KVMOdQ7CqxqUJsrERRXh5K2qiT/dsgQgq1kR+NtXUIusioX/mqUbJ7NwrLG9BhlNQWkh81xbuxu7ACDUKMZeyjGGAoITRUlaGsqhhbk1oRAwiZoUKVghDkVsYHHqStPzWgqpJj782/QXmHrw+Vxdie0ooY0GbeFgxVgRgIQjFVSIEG1NQG4CJunkv0lalJ8NWWobS8IWbsZG9J6KgYQCmamgYpGIQXiaWrkAU/AooFQxHQUFGCsuoAIiZkiqjeuhDjHr8d9/R8B8tbEQNMne43ZSgpb0CoxWDLgqGpEAMBKKYGOdiA6po91ztKwH5G9inUliBvVz7K6oIx3PYgBph6eNxTvHsXCisaIMaOP2IzO0Rfs2fAIdowXCwm0BUJ6GWJ+PDdjzF/ZTZyFo3DC7ddhKufmYSEHY0tB3dWCIXJn2HiO+9iysxv8dUHw9D3uSGYnrwddVIDCtfHY9rwPnjk/pfwweIs1OzhIq3UbMPSGRMxduxETJk8Cq8+2R3P9JuKpOxae/BqCShe8xXGvzkAw96egEnvDETvx7rjhUEzkJJbB9ky4StMx4IP30Cve27FMyOmYebn0/F2n264/apLcfl1PTDqm1RkrFmAjwY+jQdvvBKXX3wtug/7GunFgjN5sCAUr8U34wbhjcFvY+J7Y/Hm8z3Qs9ebmJGUg1rvnbY1A9F9yF89F5+MHYWRQ/qj98P3oduTgzBzVR4a5SYUrJuPyf2exH1398HYT6bh03F90e3ai3HhBVfhkUEzsbog0JKzN582xAC9IQcJn4zG4CFj8OGMaRjX73k82/ddzNtQhlCr4yUNTcWbsGT6KLz48MPo+9b7eP/dwXjyxktwzWMjMCetHIqvAOvmf4rxI4djWP/e6HHfg3hq4HSk7KyP3KzV2h1YNmMcRo0ai/fHDcFLPR/GI91fQP/BozB26vdYl1sDtQ0xQG/MxfLpYzBk8GhMnj4NEwb0xnMvv4Nv00vaKHMUhFK9BYunvo0hg0bh/Q8nYdxbb+C5+2/C1dd6twkY8BelI27aOLw1bCje6N0D3R54HAOmJiKnzh4OdUwMMFCfnYTp/R/E9eefh2tuexjPvzoany1Ox7ZdWUj8fDT6PvoIXho2Du9PHIFeN18a3o//ZWoh6navwBfvDsLgUe9jxrTxGNynF/qOnIV1RQIszYfSzKWYMaIPHnu4Dwa/9S4mDO6J2y79D/5zLu2JHYlv1hVDcCcs4RWceHw+fiRGDBmIlx5/CA91fw1TlmxHbcRzUkVjXirmfjQW74z9AB+NeQMv9uyJl9+dh43lIlR/ObYlfYVxr/bEI88Nx8yUEsdl3IJYkYX4KSMwcMAwjHtvPIa//BSeePIVfPDDJpS3bkTRBol91oYYYPjysfqrCRj6xgi8N206Jg15Gc/3GYrPVha0GZtED1Rge/IsjPtvD3R//g28M2Eyxj53O6687AH0m7IcRXIQZZlL8eUE4vIG+j7xMB56pC8mLdyCapeL1oi81G8wecQwjB43AW+/2gvd738Yz70yCKNGT8Gc5VmoUFoXAwx/IdZ98x6GDhiGiZ/OwAfD/osXeg/GtOS8Nsvs4lDrd2LlrHEYMmAoxk+ejPfGDELvR+7E9Vff79kmYECo2ILELyfiraFDMejlp/HY/Q/j5fE/YHOVWwE3Re9/HTUb5uLtxy7BWX8/D9d364dPlueiwZmBWLSSrOswKYBnTiq+mzgAvR7phYEfLA17C1ghimEwC++8/BjuffRFDJ/0FRZM7o+n7roBV19yFe7rMx7zV2di7Tdj8HK323DD5ZfgmjtewLjvt6HJ1BGszMbKWWPwWo9H8VzfQRg/+R30vv0KXPTv83DZ9Y9g4NQk5IUjJFpQardj+ZfjMfLtj/D1N59j0uC+6DdyGpKdfU6WXIvcpC8wpt/rGDL6Pbw/6nW80ONxvDh6drivmJaA6pwV+HLk/6H7fT3xyvAPMfvLkXj6lstx8fmX4s5eb2Hu5vp2r52WUovspFmYOOwtTJn1Lb54fyheeXUYPkl09o4bIVTvWIFv3huJ4UPexGu9HsNDDzyHd77NQEUbUdkssRZ5q7/Bu6/0wP0PP483JszG4qlv4Jm7b8Q1l1yJu3uNxrerspAxbzxeeeR23HjFpbj61mfw9rebnaB0NIsTUZOzEnPeH4lhQwbh9We746H7n8Xbs9ejXFLgL9+KpJlj8XrPR/DMfydgqRtNzlLQsHsVZo/pj9cGvo1J743GG88/jp7PjsCXqwoRNA2E6vORNu8DDHqmO57pMxATZ0zBa/dcgStvfwZvz89GUyVdR4ei38B38OlnU/Bu/+fxfN/xiN/ha1VMc63PFCqxddEnGP7qQIyZ+jmmjhmIPs/0xfgFO+Bz7zd7KQbo/lJs+uEDDHltIMZ88BEmjx+Kl554ELdc4W4T0CE2FiBjwRQMe6E7nuo9EvNygmjIX4HPBjyC68+/FLc8+CqmJu1Gk2lBaSrB2lkj8MLDj+D1KSuQV9uAwow4TB3aGz179sbIObmR+5geKEdm3GQMeXVAOO8Pxw/Fy08+hFuvcLcJ6JAaC7Eh7mMM790dTz4/DHOyNYCCHBZlIO7DwXihe08898p4zJrWHw9ecSlu6/4mvtkRmXa76ML/TbkBBWnz8OGIIRg2qD9eevwB3P/4IMxcXQKJvuHEJ9ijGGBI8BVvxKKpI/Bij8fx/Juzka1ZkBtLsP67d9Hv8Qfx2BOv4IN5C/De83fimgvOx6XXPYz+n65DjWEiVJ2LZe/1xn2X/RsXXHobnug7FOM/WYRtAQum3Iii9d/jwxGDMWzwALzc8wHc+9hAzFhVBImCQ5ZsRPzHQ9GnRw889dJ4zJ75BrpdeQlu7vYapq/Ow7pZQ9Dzmgtw2bX34KX3lmJng4lwoN8N32HM893wYJ/3kLi7FnW7UzBzWF+8PmIKvpj2Hob3fgLPD/sGWxojhtSGZ4CGQEUmFkzsj76vjsSkyeMx/KWeeLRHP0xZurNzgrM2a7UD+4LFgAPLl1NnAkygwwQU7Px+AiZOi8P6Ehl6bSo+fvleXHFhN7wVDvzXPCGzfDkmv/oGxn+7BoWiClUswfLpn+H7lZmoFJpQtXsFpr76MG647FG8s2BTuxHF9co1+GxIf4z6dBm21YhQ5HqkT++Px669Fs+9txTb6hVUrJyKAb1ewthv1qE4IEORQ6hc+xkGPXYr7u4zGQnZDeGV1dpVH+O1+y7HZbf0wqhZq5Fb0YDarG/x1pO34rprH8CL787Dmp1V8DXkYuGY53HvzU9hwsKs8D5zrSIV0wY8h75vz8bawgBkRUaoMg1fDHocd97eBx8s2YH6mAWQKBUJBQkfY/ykz7E0qxohSUT9hq8wvOfNuLHnGPywqQqymI/F43vjrosvxR1PDceXydtQUrwZc996Bndc3Q3DZqehvL3lhNbEANOHzNkj8MyDz2LM3AxUyQp8GbMw/IkH8eyYH7C5zp1NRksKqAjUFiDl4/7occ2FuPru3hg78xvMmjIaw979Ais2bcTyaRPwwfT/Z+88vKJI1r//v/zeu3fd1TXngKKCGQOKOWPEAOaMOWcQxYCKGFGCgglBAQHJOeec4zB5pj/v6Z4BRoRdN9y7enfmHA7N0F311Leeqq761hP8iK9oRSGvJf7BIVZajWbRoYdEV2hAU0KI2zZWrz/Mg48lyJUNJHsfZ7X1MKauPsT11ykU1cjQdUUG6JtIeHgEe9vlHLkfQbkoc8x9Di+zZdnhR8RUd88c6euT8TnphOM+d95kNaJUqZAVvMXNaQZDhsxvdxNQFoZw+9x53J9GU9GqQF6XgPdxeyaPmouzZwTlGvg6MgDE6NJZz8/jMGk8y448JLpagUqjoKm2gA839rJ0TH9GTFrJ4eteeF47gfPx6wSEBvH05CrsFu/jblgpSlUTCY+Ps8Z2Ic6ekVSq5TSWfMTzwFKs+gxk4uJdXHseSXJSBE9POzBzaD/GrzyFb2IDAkqKQz25fM6Vx5Flkm7VJflwds0kxszeze3QUjToqIl/yvk9ezjtFUFZqwplQwLPTtozcbgd+++EUdhQQ+FHT46tmsiIqY5ce1MgnVJqq2N5cmwj67a78CqzSdJ7eVU8PqfWMN1qMQfvRSF2+Vd/uiIDhBbSnl9gw+yF7HIPoVihojnJjwvrZrNg503CKrruc9H6oyj8LkdXjGfQIGuW77vKPc+bnDtwlCvPwokLf4TbuQt4hZciU8ipT3mBy4apjJ6xFfd3xajRUhH1gCPrVrHD9S0FrUqaMt9w1dGGkeMWsfNSAIk5FTRruyADhBYyAl3YPGc+264EUSTKLJbvMId5W6/x4VcyDuibsgly28tmp1M8S65HoVLRWhrBfef5DB/YQQaoyqJ55naWcx4fKJUpkDek8+qKIzNGTsfx8huKut5PSF0hqGpIC7zExon96fF/P/DLIGuW7vXgfXZD+0ZHtJKqL/rEk7ObmD5kAmtO+pErzmGCHk1jMs8vrGZ8n+HMcjjH85gCamuzCb61G7uRQ5iwcBfufrEU19dTGObFkYVWzHC4yJtiNcqWCuKenmPjxH4MsJiN4zlvQhOSiQlwY7fdKAaMmMfe21HU6upICbjMoa3OeMW2oFYrqY5/je/dOzzP1IKujvSXLmy3d+DEkxQalCpUilrSX19l6/Tx2Dm58b5EjV5VQfTjIywa2pdRNg6cexJBTlEm72/vZe4oK1YeeUZmt9yJQEPqa9yct3LgTiwtajXKmiTe+d7FwycDLWqqEgPwOHMM93dFkh41ZodwZ8dMLCat4cyLvI7TVNNBIGLYnM6bqw5M6D2UaatP4hOZT01dHuFezswfOYjxc7fh6v2JwvoGij95c2qpFVPtTxIgberVVCe/5PaZI7i9LTTUm/OBe7tmYTHRnlN+adTXFxL5+BKOk4cwcelR/KWgeToas4K5vmMla/bfJ6leiUqloC4rhNs7ZmFl48ClN3k0NZcT5+PClsn9GTjSli1nPXh4/wrHnE/gERjGR6/dLJixguO+uSjUMnLDvNhjN5VVx19Q1N17TpBTFPWAffNmsPKwL7kKNbLccB7umovN8mO8MCErvtZNQGgtIebRcTbZ7+J2TK00VuRVyby6ZI9FX3HOFYP4aVA0l5P44gb7bIcyds4O7qdqEHRyij894eCMIYydu4fHWcYMSPpW8iNf4H7sHG/KVajkzZQnBOCxdzYjLOaw0zNVGiOCvIx475NsXLGdW9HGuqtTeeu6mtF9xhrdBLRS3UmBHhyYPRRL2614pohkgBJZZSKB13cwo19fRtps4MytR3i7H+XAQTeCirqY00RiMvQ+Z3afITC/BYWiiaLoZxydMxyr+Xt5kqX6ajJA0CppKU/izR1n5g0dyaxNHohiCXoNdenvcFs5gr4DrFjpfIfg9GIK4vw5t3g0o6dv5naCUrqvOuUt5xeOxGruIZ6XylGqNFLMl/yPDzmz8yQv8kQZmymJe84Ju+GMnbOTh6nNtFal8Ob2Xmz79Wb4lLWcvPEYn5vH2b/vMq8L1CjKEwk4Noehw6az5U6a0RpJj6IkgbdXD3D8RSnNZSkEHlnAVNsdPMyUo24tISngCAvHzWbX4zyjBU1XlgF6ZKUJPHFexoIN14iuFfVfSVNRPH6H52I5biGHfHK/q6wpZjLAdGI1X5sRMCPwtyEgtMRx78R5PAPiDemLhGbibu9g7og+TN5ym7C81s9OC7SpDzmwaCaLnB8QXSpHQEddciJphSXU6sQAQlUEX1zPjDG/QQYITcTd2c2ajUd5HFmKIdmAgKIgjPuXLnL7VSIlVbHc27OQ2avO8Dyxtj0AlqAqIPDUaqYOncbOO6EUKAQ0KQ85uHAsk9a78Tq9BZ0gIKiSeLhvAePGrcElMJUmyVZcTYb3YZZbTcXp+jty5c0keO1n6Sx7zvjE0x43UTzhCDzL+snDsNniwYc80RT0y4++NgKPXZvYdsid55+SSElOJjn6MafW2jC83xwOPoqiXNNAxI1t2FnYsONWCPkqHYKgJsvnBKusx7Pq7HNSGrvavBvr64oMEBpJfnqGHU5H8AzNk9LqaTN9ObViIrOcbhKc1/UuThC0lAW54GQzghlONwjOlaNWKVEqlVRF3MbZ0QlnV18+JSVLbYn1PsOmGcMYMGsPXh9LURUHcWXjdGzsz/E8uUUyj1Vl+nJ65QSmrDrP8xQD9nRFBghNpPpdYLfTQW6H5Bpkzn7OudWTmbnRjbc5XcsMSrIDL7Bx8SqOPIiius33RJPP60sbsLE0WgboGoi9f5htm/fg8jSK5GSxDTH4nHfEblhfZm33ILRY99VkgOg2kv/qMpsmW7PyRIebgIhhRehNds8ayTQHF15ltUqbHaVShbohlUDXPWzb505QloiPlrxXLmycOg2Hi4FkaQQETRmhN3ZgN3oGO9zfUaTUoRcDG1aEc3fPXEYOncdBryiq6uLxPr4dxx0X8I406FZKjC8u2+2w6DONrdeCKWxM48U5JzZtc+V1jmjyLW74VJTHveDWJTeehOXSohfQ1UXz8OBCrKduNpABQiuZARfZaLeIPTdDKW/bBAhqysM82Dt7BBPsz+Cf2mY986Xuf/FNN2RA1ht3nJ124xqYTrMA2oJ33HCazrSVJ/FL62bXKwhoKyO5f2Aeo61XccY3nVYxUJ5SibImAb9zu9jsdJrHEUZcYv25tnchY3pPZuOlV+SrKoi6t58Fkxaw/+4nw8mluoCQG1uYMV4kZqJpkvyPuyIDZOS8u8Uhx51c9E+lSQBd4QfubJvJ1GVHeZrSjcyoKAq9zZ7lS9h25R1lbea12lI+eu7BbrQxZoCumbQAF/Y5bJRIziRJT2MJcHdm6ajeTF59lkBp5/4FwsYvBOnkvzQxANfNsxjV81/8+4dfpE3Jef8UaqVhJCDoGknyP8eq8RNY3UYGiCVoiwi7u5OZFnPYcf0jDWJcF0FD4Ye77Jg2itlO1wir0Ui6pC35yIM9tkyYu4s70TL0gpaa2KecXGTJFPuTPM9RSH7cOk0NCb5nWDlyEDM2uhJSUkTkg/0smbKMo0/SpBM7fUsxBZlJpBZpUeR/4M72+cxdd45XxYa6EE3Qq+PwOb6Y0Rbz2X8vjma9jMy3bmywsmTxXi8S5VpJhvKoxxyaM4aZG1x4L0VL7AorHWVRTzi0aArLnB+RKs6xehklBZkkJRWhackm5PoBHFYf5F64UY/iXuF5YiXjfhnH8sPPyO5uStKW8unJfmaPms4mlw/UaUQMtZREPmbfjBFMX3eJ4GqNZLWgLY/B96Ad1jMdcY9sQS/L4cMtZxzsD3C3vd7XeJ1exfhfLFnq/IRMtY7a5JdcXD6unQwQlEVEPdjLopkrOe5f0O4qJ2hrSQk4z8qRI7HbeovoJi31qa9xXTkay9nbuRsnkjEqaY5XNRcR9+w0Wzcc5H5svfT+rox/zrF5VsxxvElsd8SKoKA0zoezjg4c9IxFPMDVVSYQcHQe1jM3cTPG+OBXWwZoqEx6wanVc1hx2JfCtmxDuhpSX51hqUWbZYDoG6+jJSeUO5smtJMBkhrXpfP20krGjp7Lnvvp0iZQkBcSH+SJy60oadyK/v66llw+3tnM5FGzjWSAhurUl5xdPZtlB31M6q4l/c15lo1qswww1C3L/ci9zROxnGUkA8QpVtdI9ofrOIwewYwN7sQ0q9GoxfeoCu0Xr3IBeXEM3kdXsXjbDcISDe/WhHfeuDlY0XvELLaJJMXXWgaIb129jIKo+2yb2EEGiONHWRzH0x2TGTl5DW6RrWiluT+T9xeWMMZyPof9y9Chpy7zA5cXW2C9oM1NQEBRmoDP8VUscLreIWOID+6brOk91AbH28lodE3kR91h0+ihTFl1hahmTXu7NWK7dY3khd9g3eiRzNx4mxTRukZQUJ4WiudpN8Ia9aiqM3l32YnVW64RIeZJ1teRE3aJ5aOtWXo+3JgmsQsyQF1N5tuTLJs4k+33s41rRrHOZgqjbrPRYhATl50jtKGrlVpX88Pf/52ZDPj7+8AsgRkBMwLoqX7vxqGjl7j3Mp6cvDzy8vLIeOOC06wR9BmxCpfX6TSYvNyE5iQe7F3I+L4DmbD0AJ6hudS2ikFsjIF99HWEumxgluVvkAGt8dzbsYDFG68SlGmy4dBrUatUqEU/7KT7OC+wZPLG6wRni8RD20dNzrOjrBw3ENt9j4gq1aFNe8yhReOYsvEG77KNG3dNJs8OL2PChPVcfZNpNEPXkud3nFXWE1h36SXpNck8cl7EeOsNuL/JklJytdeS48uJFeMZPH2PdAretl9q+7/48pXFerJv6Vzmr9nNGVc3rrmJP664XDjP+bO3CIjOo07dSNStHcwdPYM9nuHGKMVaCgPP4TDZiuUnfUmsMwG5owLDVVdkgLiM0yhRKJSolY2UJr3H54ITC8YNYcoGd95md7dh0VMZcpWtM8Yx/8ADotpOZoVWEu87s8puNmt2nMRNaocb11wvc/n8Wc7f8icqpwZFXiAX103CYs4BHkRUSgSNvjqcW9vssFt9noC2zWNXZIAks6pd5rKUUPwubmWx1VAmrXPhZXdHfOpsAs6tY/p0cSOb2376ia6YdyYxA2pakvE+upoFs1ay84SrsS/ccLt8kYtnz3HL9yNZ1Zo/TQYgjpvw2+ydM445Oz0IKzM5CRJ0aFQKFAoVyqZy0sP8cd2+hKlDrVhzzp80cc2sr+Sjxy7mjrNj3+2PHdYzumoi7uxh3lAr1p1/TnycD2fWzcN26VZOmuiW60VRt27gG5pOYdILzq9djP2hR8S12+oiRckXx5HGGMBTaIrjyZElTGwjA9Q5vHbZwJSxSzj2KEEiZtrUTlMUzPUt0xgyYT2XA39H6sWuyABjnysVClTKZiozIwhw3Y39lOFYLz3O0+RubLFFmGqieSQSGDaOXA0qbndtUGQE4LJpPjMWOnLCFJdLIi7XeRqcQoWihLDbognrNBwvv6VEHLz6BhK8j7Ni1kIO3IuRiAlxDH8ZQFAkNdW0yVyVFUWg217WTBvB+EWHeJTYjcyaIkI9djBnygqOe6d2nCp3CiBY0ZrNW7ctLJwyH8ejpnp6iUtnz3L9URBJZd2N37ZeEjckWpQNhcQ+O4vDlMH0/L8f6D9+BSeeJBnmbNEqI+Aia607kQG6YsI9d2E7Zi67PT4Z3R50lEU8YP+ssdhtvUGEcUGtq4jiibSRdcI9VDTJ11OX4MfpJeOZvu4CbyVgRZl01Cf6c27pSCzn7+VBfAPF4XfZO3Mg/YbZsO7EE6KLmtHqxWCGGorDvdhjMwa7LdeJFNmWto9WtAQ4yNxB4qb4MenqVrKCrrFxwjiWHniMIdmDnup4X04uHGeQodu0ewKK4gju7523e/jhAAAgAElEQVTFkF+GMX3VcR5HFdKsNQRVVOaH4rlzAZNtHThqokdXLl3gwtmrPAyMpaS7zbGujBhvZ+wsZuLo9tGoSzoqYn05amfJTAdXQo0mz7rqBJ6fWIDV1HVcDq5GURDO/d0LmTRrPUddOt4ZVy6L9brx4EUMJSqBhrQ3uNqPZ5LRMkBbGs2z/bOwtHHANdTUpF9HdYI/p+YNxnLuLrySVTRlvMNttRUTFh/Ex5RYkrJ+KFHIlShldRQlvMXjyGbmjx7FrI3XiOw2srwh84ZSIUepEK1OEgm6dZQtdqMZbbOBqxHGB7+WDNBVkvTiBIusbHG8EdtxmttlAEEBeX4E95wmf0YGiMROfvgdtky0YLbjTeJlemR5sby948qDxI71giAvIPLeFqZaGMkAXTWpr06xZNwMNl6LNm4+DfPDlwEEBeSFUTzYOvkzMgChhbyPHmy0GsO8nQ/J/nJx0KbR0ol/ebwfRxdaMs3+IFfa3q1XXHG9eI7zrrfx+ViA8qvJAMMGuzjmsRSosc0yQCIDSuLx2TWFUVPXcSPOwGTpG3OJuLaa8WPs2P9UdBHrggwQ1FQmB3J84WimrHT+UkYXD56G5aMUZBTFeOFkNYpZjve6yBijR14Sx5PtUyT3IrdPLehbS0gL8eD8rTjDGkzQoVUpkCuUyJsrSAvx5OyuJVgPGcfiUyG0Ssh9SQboarMJOzefkZZzOfrK1IVVT2NeBO7LBzN0wkrcfj0NQ0e/fANXZjLgG+gEswhmBP7xCGiLeHl2u+T/Zb/WgU0OG6SfjevsmWc1jP49+rPgiOjDanI8IuiQFYTisXMeY/v+zC8DrFl5+CHRRU0G866vJAP0lUFcWmeDrUMnMqC9UwSaPl5n68xhWK5y4XVaswkZoKfmvSubbYYw2VE8tVd1QwZk4dMlGXCC1dYTWHvpJaklH7m11ZZRo+y5LFoPmKxL9TUfuLZ5BsOtN3MrOLdj0dAuo56qYFccZ83F6dobMppVqFSmP2o0UvT2rsgAHUUvO8iAhN9NBoiLFxnFn3y5fuYs7s8iSA+7z7Hl1thsuMZb0eywy08HGbDA+SGfKo0bWX0Vode2Ms9uA64BqTR/1g4VKrVGOmUQlHm8urAem9G27LkdRrFSQJ0bwIVNq3E66284fRPr7ZIMMMhcGuvPzbNnufoknLTwR5yyn8hUkQzI6FpmfWM0XmIqwCmOXA8yyRPdKZtAddVHbu+cz/y153me3NypL0SCSTxV/Ho3ge4sAzrIgPHY7br9ORkgNrG1jIQADy6eduXJhxQinp7HYfIk1v4WGYCSNN/TrB47jtVnfPjw3hPnBXZS8MLEJlO9Eq/VaLQaaqLucXD+dJYdevgZGdC5678gA2SJ+JxYzthh83C+96nDl1laE8fy+NBCRo9ZzsmnyQZ/1s4FdvV3l2SAWKCciuTXeF44jYtXMEkR/rhtnsakZce+igyYMN2Ra+9KjGSAnvrYxxxbOocVYpu7xEWMGq6iJOw2e2zHYLPuPC9z5QiaYkI99uGw+gBe0W2+5l2RAQaZK1OD8Lp4mkueQSREvuDGlulMXNw9GSC0pPDinD1W4ob8WYdvcudsAuW1CfieXMHcRfvwim7sUk91v8INfga9ePKpkVGe4MO51Vb0/X+/MGWtaFkg2g13TwZ87IYMOGDbNRlgNdORax9qTMgAq05kAKhzQ/DYPIkxc3fjGatAr6wm/bUrW2yG0vvHPljYOHDeN5EqlZyst9fYNG4wMze68qHapLFCM+kvL7N29Ajmbb9NrKxrMqAmXgyANo7pay/wtszk+c/AEftRSU3GW645zWBEzx70G26Dw6lnJFQqaUp9hcsaW+ZtuUFkQ+fxZSCkuy25GzKg8lfIgPFT13LpXRUN6W9xW2uL3WZ3PnZbb2cyQIMiL5S7jtYMmbSa82+rTGIlCMiyQ7ix3pKRMzZJ1gciGXBVIgMO4dMpCb2Y0z4v4iluJ05z8/knYt4/5YjdWGZtuPorZIA4p6uoz4vkmetJzrg/51P0B3wOz2W8jcPvJgMEeT4Rnk5MHjmLLbfiO4iz30MGiCfhpTE83W/LuGnrufIhn6xPAXi4PCPH5FUikgFRJmSAWl7Ip/tbmTxiOptvxJnU3VU2gd8iAyyZt+sROb9KBsjIj7jDlplTsD8fQn3nd6s0l4uWJb8jgKCg4HeRAe6rsfpVMqCVomgvts2YxPIzwb8iYwcZYOt0j6wu2i2oqkgNPM68kRNYcSaYwoJk3rif5WGG0ZVDDLbZXEbC82ucPHIZ77BYYt6dZ+mo8b9KBqgrUgncP4UBo2zZ71v2mf4rypLw2WHFYMv5nApu7jwLfLN/m8mAb7ZrzIKZEfjnIKBIfcTJY648CcmiodUQGV2Mji6XyygMPMvaSf3pM2k798ILOk7MpVRVGlQtZSQ+v4zTLAsG9hzCgsPexJWpJJOvr7EM0NeEcMXBBssZe3kUWdLxQpbg16PX61EkeLF/ngUDbZ3xji5vdxMQN2O1H67gOH0Mc/c/IbqsO8uA3yYD0moSebR/IZb9bHF+FEWZyctNXxuK++aZjJu1X/LZNjn/NSqJKMdVtswYi90eLyJLTEgT8Q5BTkuL6Htf34VlwJ8kA/RNpPmeYdMKR84/jaVKqUUtugmsnPAHyYBaPt7Yjp3lLHaLm/xOB5OCXEaLTI5cq0dZlYTPyXUssJ3H+l1HOX7kGBfvBBJf0tIRDb0rMkDfRPrz8zit3MyZx5+oVGhRZ73g/OpJTPsVMkCQxfPQeRGWo1dyzi+54xS7MxlQEymZ2VvbbOVWSGEnnRJQyGTIWuVovzabQDduAr9GBgjNmbx22cqaDce4H1GOQqsm95ULm6ZM/m0yQJCT7H0C+ymz2HEjmOTI+xxaMJ4Zm9wIyjdZ3Uq6paBV1kzp+3scXmjJlPWXeJnd+R7DOJIIkC8sA7J4dXk9k/pOZrPLaySXZKNWCw1xPDm8GOvJG7jyyoR8Mf6/219dkQFCC7lB7uxetZ5DHqGUybWo899x02k6k/8QGSDQGP+Uk0usmLr2PK9MV/0SLmLk/BZkMi16VS2Zr66wc9FMZi/bypFjxzgpWg58zKepzdWkK8sAQUZeyC32rl7L/uvvKZVrUItuAttnMOnXyAB5OoGX1mI1fB5773zqIBY7WQaU1yfhf2YFEyes4ox/dsepqASsgFIuzhtdRZ8XkGV9IuxVIOGmE5U41ehaSA90wWFsD4bM3MrNsNr/MhkgoMwO4sbG6UxddZoX+RrJ9UWjVlCXG4n3CTFgWS8GWK/i1LNk0kM92TW1L1ZLj+CTaTJvCs1kvHJhvdV4lh9+Spbmz5EBUvo1jRpFXT6fnp7CwWYYffpYYX/kCVExr3FbY431ooM8Te9k7SGIFkwtNLeYvBBMFf9PkAFNme9wXzcBqwX7eZz2Zb1KRTPNzeouLQOe7ptOf4v57HuYZjK/iWTAe25umMD4eXt5mKqWLAO6IgMEeSnxT0+wYdlGzgUW0KpRUx7/guPzfoMMEE294304tW4ZG08HUNCqQV2eQOCx+X+MDJBcHnYwZdBk1pwPltwOJHh/FxkgurzUkPLyDMvGTmbF9ov4P3PnWkDJZ5kbOpMBGlUxMU92MXXgBOzPvDOp+z9FBrRSGOXFtgmjmLX5NslStMAOZRI0ohVZI03qv5MMkFMc+4gdE0cwfcNNEr+QUY2qqZFG1W+TASKJXp/9gSvLLbG2c8Il0A+3874USsNcQFWTRdClDSxaeoBnOTLU6lpywi+x3OLXyQBdbRahZ+bSf9Ak1rmbEEgiKVSWjN+uyYyYsBr3+E6Llw6ov7krMxnwzXWJWSAzAv8wBPT1RLgf4JirLzGlX06euoZIbjjNYsRPFqx3DSLDaIKszQwlLDaD3AYdOo2KlvzXXFxvw4RFxwmMq/pqMgBNDr7iqX2/May5EEB6WzhscXFeGEFIeAKZGaHc3j4Hi6GLOOWbQE37blxLjs8xVs1YiPP9SETxu3YT+AoyoLGR2Ns7mT9iKIuP+RBvEsROm+PHqZWzJF/VyJIvMRI1RpPlw4nl4xkwehWXAlJNXCo0VMcEEBAURXpV3V9OBugr3nN9iy0T5jnzKLLCEBBO9N1f8QfJADTk+J9hrVVfLJefwT/JxAxPU03Cq0CCwpOp0uppzn6P70NvXkUXUi8XTeKVkuXAZ7nMuyAD9JVheOyYwwS7vXiFlxlkzn7+m2QA2mKCXDdjM2Asa889J60txZ+uRHITmDl2oSGAoCqPlxccmNJ3FCuOPyWhrl1h0NYkEvTyDaEJFWh+FxngIsUMEMtLFB3epU+bm0BnywA91RF3OTDPCrsdN/kgmVBryXvpwsavsQzQlhDivo2lC7dxI7gYRf4brm6awsARizjyMKYjngVa6lKCefM6mE+Rr7nuZMPg4fNw9ozqiKQvqKhIjCIiIp4CucAXlgHiqbHfGdaMHYzt1ut8KO3ASlcUws2t85nrcJFXueJmRZByWgtCW/uNMHT+1QUZoK+J4fGhRUyc4cTVoCKpz3UFIhlg8wfJANHlPQSPbTYMHDaX/XejPpsX6tPDePfyDZ9KNQiyQmJeefPwWSg5ta0oFArJp1ey1mmX/UvLAH1tHM+OLWWizUYuvyqU3FJ0RWLMgF8nA9BVEOl1ALtBI1m07x5xbXFAdJVEPXBmvuVMtrq9p1JVQvjd3dj2HcrcHbeIMEm5om3IJPJdIK8+lnS4w7TLCtrCUJ64nOL0wwTaijf8W0fpRy/2zejL6Pn7eBAv+y+TAToqY7w5tsKONSf8yZWXkZkaRVhkueQaoGmtIMHnDGsmTWXNCV+SM4K5uXkiQyas5mxAYcfmTVdBzJPDLLdZyuHHaSiEP0MG6KjISifqw0fKRdcAjZyqRH8urpnMtGVH8H4fwbODsxk0yAZH1xDaPKZEl4emvFhCX/jxwbCDMekB4+UfJgOq0VTE4nfEjkEDp7Lx0jsq2vkGHc0F8YS98OF9geoLMkBQFPLRcwfTBohEiS/57RyKnurEF5xeNo1Fu7xIkeu7IQMEmjKDJSJi0iJnnkoBEfRIMQN+wzJAaMriw7X1TLZahPPjbEk39WIQvaPz/hAZIPqJp725wPLBg7FZe4mwNss4Ixmwcoy1MYCgiHc3bgJSV4hBFUO4tsaKwYOms/HINd6bvMOlpztZBmj09WQGu7BiyCCmrrrAh/a6jWTAmPEsdvYzuvP9BZYBiJlp3nFhwQAGjl/KycBSE32XUZYWwfMHr8hV/RfJgKwPuCyxwHr+UV5JwYb11GV94PKiAQywXMTRFyaEir6VisxI/O8FkqP8GjIA9C0FfLq9kYl9BjB99V4uvza4FCLIKIl7wPYJo5m56Q7p4rLKGDNg2W9YBogWB+kvj2LbbzgzNnuS2b4k09OU/4kbayYwddUVYmXGMfod/DKTAd9BJ5lFNCPwv4yAMtuXE2u3cPpJFJ0PtKV2CzKib2xlztAfGTjnME+jy6WALdqUR1x2e8irxBrDC01bSODJtazZ7kZwRjPoK3h7fi3Ths/moHc0pu7Un+Oppfy9K47Th/BLH0sWOp3kzrMXPPe6xJHDrvhE5NGsVVL0+iIbpo5m5tZbhOYZYgsILUk8PLCWtbuuE5zTLFkMqJO8ODB/DJMc3HmXZfQX1KTxxHkJVqNXcikgxZh2Rku2GG9g/HjWXAwkrUmPqugtLhtsGDttCzfEwHaibajQQsrDQzis3oV7UDbNHXulz5uhLuKtaDbf/yeGTFyBs9sz3oUE4ed+hD0HXfGJKkGureWj+xbmjLRht2doe8yAHL+TrLIazVIx+n975MLPizf0hehmsJN5Iyaz9WawtAjUFr7k4rrJDBw6n8P3IygozyPqnjPLxw1g7MpzPIuqQKZUdrGh0BkCCE4byexdnnws6WiYuvgdbptsGNJjAJOW7sHN+y3vg/y4dWwPhy89IaKwFW1DPI+ObGLN2r24PHpJcFgUMQkpZOZVUC9Td1hvCE3E3N3LgpETcbz6Rsp7ri16i+vGqQwaMpcDd8PJL88n+v5h7K0GYrn0JE8iypEpFF3IrKUy7Ba7bYcyYMwSjj6IpqxVQ2vhO646Tmdob2vWHH9ITFkNue/c2T5jEL36WbFs52W8X4cQ7O/Bqb2HuHQ/lAKZDqEplgcHFmJpvR7Xl4aFbReoi6sUKVDgrlmjmbnFneCcCspKS6mobqEi9Aa7Zo1kptN1QgrbMNRSEnyNbTb9GWa7i1vvc6kojOHJsVVMGTiKpYfvEV4mQ9FaLMUMsBsxAYeLAWTJxU22QFOqL2c3rcLpjB9pjTrQlBF+axd2g35i4NgF7LzwiDfB73hx+zTOB89xLziPFnUNcQ8OsnTEz/QZPp11B67xxP85T66d4uSFO7xOqZesNfS1n3hwYD6Wkzbg+kqM2CygLo/Ea/98rCbYc9rHOD4EOdkvXdi5ZiMnvROp18jJF0/J50/E1uEc/sl1HX3cGTShhaSnJ1k5ejyrT/lKCz1dWRh3d81i8KAZbHMLIqeiiIRnZ9gwZQij5+7j9vsyZHK5ySlnR6G6qijuH5jLmEkOXH5pYqGgqeCT1wEWDP6JAWPmsvXsA14HBxN49xyHnU/j8SabFm0zGYGX2W2/jK2nvQgICiUqOp6U9FxKa5pRtnWZGPcj5TkXVlpitewYz1LV6MojuL9vNkMG2uB4+TXZFSUk+Z1n87QhWMzZyY3g7mTWURv/lBOLR9J/+Gx2ur+noFmDokxsxzxG/DKaRbtuEl5cRUHkY44uGEqv3qOZu/E0918GExJ4j4uHDnLK/SVZ3Uw4+rp4fE+tYZbtFq4F57enoBRUZRIRMc/CmlUnfMkU00Lq60nyO82K0VasPPqsw79XW8CHO9uwsZjDLo9IowWDzuDHP30EszZfaQ/KpyuL5PG+2Yyd7sjV9yZuAostGLfQmUdJxjlZlkvIjd2sWbkPr9g6dNoSol7c5apLIAWSwYqY9cKXM5vt2XrlPdWqGpL8zmA/fhwLd98ltl6adFEUhXPv4FpW7fLgU7VWmoczXruyfpwlSw88RMzsJm7WK6K9OTxnJFNXn+V1e9yCDt0xXOko/fQKL5dLBBgta/S1iQScdWTVxsuElFWT7H+GFcN60HfEDByO3iUw+D2vH7hw3PkIrr5pNLXrSaeyxfY92sPMkdPZ7BZmTK2nozz6GYdmDWfamnMEVRicDHSVcfgfmYfl5LVcDKpCr60lNeAc9sN60Ge4DeuO3JHqffPIlRPOh7j0LJVGnf4LMgBBS13aa1xWWTNu1hZuRdZJptKCspSYJ8dwWLKF6+FVaATDsy4rRjNu3l4epbexDXopKOGFxYMZMG4pR5+mUVmRSdD13cwfPoSp9qd5WSJDJutkYSTOhLWpvDm/lGF9xrHs0FPSKivJCrrJPrsRDJ9oz+kXJchkMlSCgoKP99hmPQLbjddpiyvYCT1pbm3Kfs/1dWPoO3ASa0+/IKtRg6o2jZcXVjKm51Bs1lwgKK8GmUaQAgjeWj+WMbO2cDepnQWRitXL8om468RU0ezfLRJx6jT9CC25hN/cgJXokuCRhBjWsTk3jFsOlvQdIMbTeE5mgwZVXQZvLq/Csudgptqf5W1etaHuvI/ccRjL6Bmb8Ugw1q1vIif0OutGiwE375L6uUim1UvXusYcQt3XMebHnxk2YRkHbz4n5EMQz66f5vCeEzxMaEArKCmJ9Wb3xKFMs3chqpPRyGeFCmIAQU8cx4nBKq+TIG2MxRgZsTzeNoGRU9biHmsQSl+XReilJYyymM3eJ+I8KtCSHyW1yWLaJjziammqLSQrKZGPNx2w/PFnhlot4cB1f0JC3+F74yyHdx3FK64erb6Fwqg7bBw9nOnrb5LcviH/TDrJNaw03pvdk4cxecUFwhqMnaJvoiDyJuuG/MKoGdvxSqqiJv8jD48swbL/aObufUZxSzMtSgFVeTIv9k5m2PglXAgXwdDRUhTNPcdJjLJazvngasO7SF1L9rvLbLBdzum35e2BNTtJ9E3+aSYDvsluMQtlRuCfgICWqqQXXFo/jVG9BjLV3hmvj/nUKkxO/USmNtKXs6umYdHz//jXv/pLuZMfRxdSE/eIExuWsdTxFB7e/vh4nGLf7tN4BWdRU1VAfOBlNs+0oN//68PE5fvxCMmmWtbp7WyEWdDUkfXGje22Fgzo8SM9fx7AOLud3AjKoFppCEioV9eT9eYae5bMZem6fZx1deX03u3sOeVJSGY1Sr2exsJY/E7aYzPkJ34ZvZDdbm9Iz0oj9vk5HGyG88sPQ5i14TQ+sRmkRr/k8oaZjO75ExazHbnon0RpfSv1WUHc2LOM+YvWse+UK1dO7mPnrlN4vuuQpWvt0KMsj8Vb3PAN6kXPH3+id98RTF99gidRBTSqGij89JDDi60Z9EMfJi0/gGd4Ohnxb7iyeSYWP//A0GkOXPBLpqKLDYAgKyPt3Q12zx/PoH/1ZNz8bbgGJlNaU0y05z4WjepNr16DJXO8y143Ob1lHmN69cPa/gx+CZXG1D5tgMsoS3rFFcfZjO39L/qOWcCOC37EFdYidb9eSWX8U86umczwXj3o2aMnA4ZPYc3R+0TkNaDRCwitObx1ccR26C/06vEzv/zck94/96JPzz4Ms17JyScxlNSUky5uIBdZSzKPnbuFyy8SKakuIfb+QZaO7kOvXoOwmr2Zi543OLt9IWN79cVq+UmexZZ/LnOb6OpaUl9dYdvs0Qzp3Y/ho6yZu84Jh6XzmT3RhjX7r/M6sYRGWRVJfufYOHUofX/sQa+f+zJy0gqOeIaSU6+ipTyDUI+9LLcewE8/WTBv83lexBfSoDLRf5OO1pZHSK4Ho/oMYtzMDZz3Diby01uub5+PVZ9/0WeUHU5nnhGdVy250ojR0L2PLmN875/oM8ASuw1nuHvjPPuXWtK3jyXLjzwipqTQEEBw5AgmzlzO9v1HOHVsP9vXb2DPBR/iSluREl8gRl5OIeDiJmYO603Pf/9E7z7DmCKmpwzJok4tjhEBbVMBEfcOsdxqIL/8+0d+6TuSWevP4htbilynl9r84dYeVkzoT4+eFszbdJHAxGIaNVqaCyJ4eHwdi+YvZ9uRy1w9d5i9u45wzS+eslYdgr6JFJ/TrLLowY/9bdlzK8zkBLUDKCn3evg9Dq+cwtAffsZixnrOPIuhoLKMZN/TrBnXl17i+J6xnlM3bnD5kD3WvXpjudCZex9LUH4Gv4C8KovQW3tYZtWPH38eyex1J3jyMYtKcZMrEhk16by5spXZw0VcevBL76FMXLSHm2/TqVWJ/rdyij964jxvJP16/ESvn36mt6irPXvRb+BYFu+5xYe8GqpzI3h4ZA0zBv+bXsOms+6ENwlZmaQ+P8/68X2lOcly+hqOu1/nytE1TOjVmzHz9nJHzA3+mcwGLARNAzmhd3FeNJ6hvfswdMQ4Zq90YP3qFcy3nsSyHZfxi8qjTlZHVpA7u2xH0u/HH+n5Ux+Gj1/A7quvSatWSbEtOtA1uVLlEHz3IPZTZrJkuT0OWw9y5twZDjsuw27qLNYdf0xMqRytrJq88HscWTONof/6mVHT1nD6WQx5xeXkhtxk35Jx9P1xMNNWHOVJVApZGWF47l/BxN4/MMByHjuvB5OQmkbkvUOsmdiPn/tbsWTPHcIyKiiN9+P0YguGWUxlmdNeDh0/wZFdm9jgdAyvsEJkovLqivn09DjrZy1j9zlPfP0fc/3UQQ6fucfHIpGsFdDKSknwu8iWRXYscXDmstslTu3fhfO5h0QWytDpW6nJfov7VjtG/diTUdPWceF5NGkZMXgfX8Wk3j/Qf/Qcdt0MJbeubcNrghU6SqP9OL12Jsu2ncXzmT/e7qclwsgztAi5XkDTkEfYnX0sGNXHoEe/DGb8HCcuP0+iSqkziVPTUa4gr6Ug7C6HVljT799ietADPAhPJis7kgeH1zClzw/0G2XLlitviUvNIObRCTZM6c/PfS2Zv+MmIanlNNfn89HzAAst+hrrHcS42Y5c8ks01ts5ZoChfYKulYrkl1zduoR5c9fifP4qrqcOsmfPKe6F5tOiVdBQHMOTE+uZMfAHeg60Ztneu4Qkl0iBInVNeYTd2opNvx706jOcqUv3cNn9Fm5bp9D356HM3HiR17ldRBHUNVEQfpudU/rTs0dfRk1ayr4L7nhc2c60Xj0ZPm0DF31iyUnw59LWuVj8+0cGjxUzebwnvaKTzbkRSkHbQqlIEK2axIjevzBoqCUzFq9i3aYNLBlnydyNp3gQEk92XizPzmxh7pAf6DlgPEuc7xORWWPSIUpKov1xO7APz0TT7EcCygbRLeIc2+cMo8ePA7FeeIAHoZlUK8R3ob+UycZQ9ximL7Jn3eZNLBk3BjuHE9x/F092fryU037e0B/4uf84Fu33IjwpxxCjY/NMhv3rRwaNmcfuW29JKDSNa9QhnnQlaGkpFS2OlmDVtwc//fgzffuPYubaE3jHlEmpBitTArmyawFjfvw3Ay3msPvWO1LKv8ROUDZREe/HpR1zGfXDjwwcM4/9nu9JzC4g6v5BVozuQc/+Y1l66DFhKblkvLrC1mn9+fHnIUxZe5HXqVWomgv5dMeRCb17MWScuA7wIalOjawsAb8Ty7Bul3Ek01cd5dEno4zJ/lzaYsvwf/2bAaNms+P6K+IKumq3gKoihbduu9l2zRg4UAJCT2t5Ak/3z2Twv3+k32ArFjqd4rqXJ4em/0LP/tbYH35EbEEar6/uY5lFD3r0HoHt1hsEJ5Yi6JXU54XhuXc5c2YsY/fZa1y7cJQ9Ww9w9XWWiftXJ/y/0T/NZMA32jFmscwI/BMQkCJlt7bQ3NRES6sClVZMdWfacgGdGE1buqeRpsYmWmQK1Fo9epUcWUszjY01lOVnk51fQlWDDJUYtVyvR6tWIk4A5j4AACAASURBVG9pNjzTKkelEYN5mZZtei1G7VahaKykICOFlIwCyutkKEV52m8z3tNUQ2luBmmpWRSW19IsV0sB7cTbBDEDgaKVluYmmpplyJUadDqdJEurKEtTM7JWUX6dSbuaaJbJUarFoHJiqh4tKkUTNaW5ZKamklVQTm2TXGpzhyztQn1+IUaQV8pprCgkMzmJtJwSapoMeImLXqmNMhO8NaIcGpRyw3fNLa0o1d3gJEV/ViEXn5f6QZRZvFfEupnqgmyyc4qoqJehVKtoqSogIzWTgopGqU86CYpeqldGs+ijKGEl4mjMBCFiqRPlaqSyMJPUxFRyiqtpUphgLSsg3NsLr0f+vI/8SMjrAPyfefP4ngduRxzZevQmLxPr0GiMMjeZ4CzGm1C3UF2YQ3Z2IRViX6tEmQvJTMsgv7wRpTH6/edyi3+JgdJUtNaVkJOSQGJaHuX19dTW1FBT04BMoTIGazRkWZA3VlGUlUJSShZFVY3IpX4WdUWM9i+ntaXJqBdKQx9318l6LfK6UvKycygsb0AuBlI0YtjShqGIj5SmTgIQdUsNxTnZ5BSUUScTcyHLqCnOIj0jl7IGJVpthTGbwGx2ub8mrTif7IxsCsvraBF11zRqmZSdQE5TVTHZqUmkZhZS1dhJLyVdUNJcXUJOWgpp2cVUNyskPCTkumtz29hRtlBXUUB2WgoZuSVUN7ZK49YAiYBOJaOuNIJH129w98kn2qxqP+sjUU/FsWycV0SdVqgNWUbEAHe1JbnkZOVTVtsi5ROX1ZaSk5ZOTkk9ii76/Mt+Mozf9rnEGJG6qbqE3LQkae6obDDMN5Lcgpyy2ECe3LnD06BPRL1/w0s/H5498uLu1ZPs2n6QSw8iqdGI475t7mihVepL0QWqlbrSPHIy8yg1ytxaV0ZuWhrZxfUoxFRynwHQ9ochE4G8oZz89EQSkrMpra2jrq6W6qp6ZPI2fTOd19JITsogv6KBVjHfd9cFGyoQ5NRXl1NcVE1LUy3leSlEh77nw8d4MotqaFaqDc+b9kdjE5/3h2FsivOiOP9L86JOY8ChqWMO1YpzqEphHCstyOTiGNNRa8wmMG31aXyTyyjKySQrt5SaJkPbDIJqUSllNDU0UV9dSkFODvklNTRLc4kRK+PcJquvojgnndS0HIoq62lVivONeI8xgr1cnK8MbRDna53OON+3yformInvI1lTA0311ZQV5JCT1zE3S1JIY0dBc63YX0kkp+VSXt+KUmP6DjLK2/ZLEFM7GnRdwlCmQKUxyCXpUptcSg0Shuo2DJuNGIpklTh/G+otyEgmKbVzvQYywMXeminLjxNQ1EF2iHO0StZAdXEuGSlpZBeKVllt2BveZR3vQ2O/tY0xsV5pfs8hI6uImmYxx7ycRvG9kZJGQU0rmi4VUNRXBY2VheSkZ1FU3YxcqULeVE1Begpp+TW0in0jrhva+0uGXOyb9kHbBmDbb0P/KpqqKclKIj4hncKqOhoaa6msqKNFHCtGXNUKOTLp/d4izfXifGv60cplNFRV0dAeC8TwXwPOSuQywzpAXMdIaxKjbkl1ZycR1153HZUVtZ/XrTTW3dSGpc6QbUSc68SxJb5HVYYAu6Yydb6W5jRlC/WV4il8AilZpdL7QS3hLWJhgp2xzC6xE/WvDWfT+nV6w3gVcRJlVYgBjMW5TFyTGd53La1Kw3tB0KOW1VGWm05mQTUy47zRLmNVkUHGzFJqxbVfZxnb6pV0vJsJS6dE3lhJZV1b6lBjn+h1KJtrKMlJIyO/imZxDapoob44g+SkHKpkYnpGw5pK6nNx/SZXSplxxBKk9Z68ibqyfDKTU8jIK6OuRVzHdjcnd+6Jb+dvMxnw7fSFWRIzAmYE/hAC4qLDEKDscyLhjxTWVpbom9zd84Z7dDq9tHnv7q4/+724eNDrxLzv4jb+d37El7SEyR949ndW1Xa7JK+prKL8ogzdA9n26K/+NpTbqRx9LXEPjnHw2HVeprVICw1xY6zRiLmG1SirInj9MoTIZNO0V19W86dkljAW05OJC2pRX7rTmTadEu/7Uobf9Y2xX7++HGPdJg+04Sl91Tm1oEiy/VaffY1ufc093TRckq9N77/AS0114mt8vX0JzWs1ieLcTWFdfN3e521lt8lqglEXj/32V23lmI4B9DSmBuB61Jlzj+Joksg3LVpRTzVq1MpaEkLf8eZtFGKa6+4+n/WZeFNbXV8ls3G++sv1tE3nRSDFOkRiUYtWKxKEbeB216K/4vu21IJt2QTEQIGi/nY/57XhKN7T9ceI1X9ybm/rOxPi8zNZJELY+D777B//6T+Mc8UXcumpT33FZfupzN5whY+maW6MIn02Zn+XmKIOfd5n7X3UXRe112l4x7WrWjuuv/Hgr8rX0f+GuCTdzem/Ukjbu+BXbun6X39B3V0X3P23EmaGNUb3N/03/mPUvfbONKnTRMY/07PinNlV8Ya56/P1RZsOfm19f1z/Tdr5N1+ayYC/uQPM1ZsRMCNgRsCMwO9DQJAl8PDgEmymOXApIIO6dtN6Pcq6bMK8vfB+HUNhy6/ssH5flf+bd+sqCL+1k7ljZ7PndniXZvffTsMFlOUpRH14T1RWHepuN3TfjsQICjICLrPBZgIrDj0ioUrRTmAI6kaKogN4/NCP0Kzm9u+/Iem/cVH01IpR5RePw2bteV4Xd5xYf+OCf/viCUrqChL5GBRKSqWc6oTnnN2wiI2XQugUE+/bb4tZQjMCZgR+EwEzGfCbEJlvMCNgRsCMgBmBbwoBvZgm7ho7bS0Y1GcgFuMmY2s7h7lz5rNy42Fuv0yktFnzK24h31Rr/iZhdCiqP3HfeTFjfhnF4gOeRJa3SvEY/iaBfrNa8fRZK54+d2k+/JuP/w036GktjuT+wUWM799H8kWebGPL3Nl2LFi8jgNXfPiU32g0ff0bxPuOq9Sp6knxv8D6sb8wfPpmrgQV09Kty8R33NC/Q3RtBQl+J1gyajSzVm7nyLHD7He+QnCBMSDu3yGTuU4zAmYE/mMImMmA/xi05oLNCJgRMCNgRuA/g4DBz7mluoCUyLc8937MU/+3fEororrJ6Iv5tTZ+/xkBv/lShdZKcmLf4fvwPl537/Hg4RP8g2PJqWj5/W4p33xr/z4BJb9SWS3FGTGEvHiG9xNfXocnkV/ZiFzySTcr6u/uHUFBbVEyH3wf8/CeJ173HuLtE0RUagnNZmOg3w3nlw/oUTdXkBX1Br9nL3gfk0V5k9IYP+HLu83fmBEwI/B9I2AmA77v/jNLb0bAjIAZgX8uAqI/oU5riBUgBnj6FX/hfy5I3bRcCuymQa1Wd/wYMezmCfPXfxgB0SdWZ4wVIAZ+7CZI5x8u/5/2oAFPMT5Ih/6KQfK+v8Bd32zPmcytZly/2V4yC2ZG4C9BwEwG/CUwmgsxI2BGwIyAGQEzAmYEzAiYETAjYEbAjIAZATMC3w8CZjLg++krs6RmBMwImBEwI2BGwIyAGQEzAmYEzAiYETAjYEbgL0HATAb8JTCaCzEjYEbAjIAZATMCZgTMCJgRMCNgRsCMgBkBMwLfDwJmMuD76SuzpGYEzAiYETAjYEbAjIAZATMCZgTMCJgRMCNgRuAvQcBMBvwlMJoLMSNgRuCvQ0BA3VhMSshTHr+IIqviz6Uz0jdkEPzgFveex1DQ8M/MRa1vzCb08W08/aLIrdN031WqSlKDX/AmNJXSFnOU83ag9M3kffTF8/ZTwjNr+BUE2x8xX3yHCAgaZFU5fAp4hv/baPKa//fHgCArIen1A257vSKpQv0ddto/XGS9nLqqEgoLa9qB0CvrKUx4jeelkxw7chaP59EUVpWT8f4JHh5+xJSq2u/9bi40MmpzY3j18C537j7lXVw+9d1NxIKKxtJ80uLTqfza7BKCiqayfNLiun9GUDVRnp9GfHoFhmL1tDbUUpxbQKPuu0HSLKgZgS8QMJMBX0Bi/sKMgBmBvw8BgZb8SJ6dWsssi0FYrXUjKL3xT6Q6E5BFe7B77hjGrDhPYHKd8SX+97Xwv1+zQGucF86LxjJ66Sl842u6xkBZRtSdfSyZMIm1Z5+TXPO1q6j/fov+2zUKrUl4H1vBhBELOPLgE1Xmhd9/uwv+8/UJcirSgri+czGTh1qwcM9tPlb9r48BAUX2W65vnszIGU64vSvFrNp/VtUENA2FJAR4cOnMJe4GfCK3VkFzQTLRwUHEVf91OiUoq8gI9eGeZyDJ1QYiR5CXEOt3g3NnPQlLCufRoUVYT7bn4DkPPHfaMHzyas4EFH1X/axpyCPs1nZmj+hH35496dXjZ/oNtmb5fk8+VZgwAuKGvjgO//NbWDB+Cku23yb+t/gt8ZmSOPwvbGGh1RQWb/X44hlB3URpvD+Xt8xn4uQFbL8VR1uxOnkV2REvePLsA7n/APLwz44O8/PfJgJmMuDb7BezVGYE/rEICDoNremPObhoPNYrL/MmtbHrzetXIiQoKkgLDyE8IY8a+R9ZiGmpKSunpr7puz0RlhaNke8Jj8uhqrULDJRlRN52Zv2SNRy8FUxmldycU9pUvwQVNdkxhIVGk1ku+64W0qbN+OavtQ1UV1VSVft3nFyKaSqVFL67zo4ZY7HbfpOw/3kyAAR1PUXJEYSGJ1Lc/D1QATqa62qpKKtC+Q0qtKAsJS7wOoc3ObLDYTE2FsMYOWocNnMdOHwzjPK/yDhNUFaS/PI2F0/fIrREYZyv9dQkBXJhiz1brnygTqumoSSdmI8RpBZXUZb+iQ8hMeQ3/kVC/BfwF1SVJL+5xfF9F/CPL6CyIpcY34s4TR/ML4Ns2HQpBMMw1SKrLSY98iVPru1j/uAR2G66QVzbrr1LWbXI6tqe2c+CwcOZtcGdWNNntK3UFacR9fIxNw/MY/iw6Wxyj2knAxD0aJsLiXt5F5erL8lT/O9bE3UJpfnL7xoBMxnwXXefWXgzAv+bCGjz/TixbAIT7f88GQACOq32D+egVhW/49qFG/iG5SD/bt/zAvo2DDq3QddA+ktP3K/e521SKU1KLfrO9/xvqtnvapWUJ17MD28G53fh9vU3a6iMfsytax74JzT/CWugr6/xyzt1lIffZd/s8f8YMkCcH/U6LVpRt7+Dca+pTSHw3jWu3P9E0zcor6CooaIwm6ziJuSyesqy43n/4jlvwlMpbdb+KWK7XV+FVooiHnJx337c31egbuN3hUbSX7uwbvpsnNwiED29BL1eev/p9AJ6ve676WdDWwVaCxJ4/9iDp8kNaHQCgqBHp6wg3u8kiwcNZvqay4TWGxRBaqumiewPt3Ec/zVkgBEfTRM5H+7gNL4LMgABce7XNOcQfteRiZ3JAFFQQUdz7keenN3DYa9EzAYC7ZpqvvhOEDCTAd9JR5nFNCPwv4+ABllNGSUVtTSkP+N4d2SAVk59RSllVfW0qvVfuWnQ0NoqR6nSmtyvRyVroVWpQqtX0VRtqLtF2XE6pqmJ44HzEibaOHIjKIvWLxafWhT1FZSVVlIvU6P74v+AXklzdSlFJZU0yNVdLwYFNa0yOSqVFl1rPbW1jchM5NArm6gqyiU3v5Q6marbk2nxvuqycirrZKi+EEaDXC5ioDHBQNQqDS3VhWSlppFbYsD0C13Tq5C1tKJQaNGrm6kpL6G8phnlVx8waVE0VEo41bV0gZNeRWt7+S1/oPwvJP7yC72K5uoS8nPyKK1pwQTejnsFNfJWOQqFBp28kbq6BpoVHfqgVSiQK5RoOvWzTuyf4jxyC8olPWhbm3cUDDpFI1XFBRSV1SFTdXWH6d2Ga51CLLeAwrLabp7RoWyqpry0gtpmVTf6p0JWV05xcTm1LV3rjqCRI5O1IlcJIOJUV01VbRPtTRf7v66c0rJqGuUdeJhKLGJQU15KRU1zF7onoJa30iqXoxH98usqKS2rolHepkBaGjICcdlow4wVR3gc1wUZoFPSIvZfbj5ltS10CaFOHGslFBSUSm39OpQBrYLGqnLKKmrIfd89GaBTNVNbUUp5dRPKL8aXKRq//1pQK5DLWmlVC2haG6gqLaWyvvWrrJEErQJ5qwyZ0tB/soaa/8/eebhFda19+595z3dyzkmMMWo0dgErVgRFrNgBFQtNESwoNhCVJoKKiIodrNhAQKqi9A7Sex2YPnN/154BHBXzJnmTE5OsuS4uBmb2Kvez9tp7/fbzPIum5k4G8epU9LY3UivNU70DzE3bqEGh6KO3T/XB3KBV9NBaW0llTTPdig9pflinCmOdHR/U2dch1dlA25B1glYpo62ukrLyGpq7lUPPjSbN1HZVkBjhhvXspbiffUnnR+chOjV9HQ1UlZZS1WDSFpMy9BoFfb099BhYqejtbKGxqf19u/Vq+jqbqKupp002BCvp84E66jv4pGvSYlXZi6ynB5lKh7q3nYbKCqobu5BOr49fOlUv7fWVlJVV09Sl+F8ZSOKNsjaDG4eccdx9kdemuV20jby+7YOd2cJBMeDD+rQolXJkvcoP7CyVqZF30VLzjpqGTuRDdPvDcn76L71GibJPRq9Sh6qvi/bmVmSD3vx61NK8VlVKaUXdZ8eGsQYd3U11lOTk0fzBpKuhLusW+xaMZfaaYzw29eDR91GRcpEdFj9PDDDUo5dTmRLFTouhxABjS/R9laRG7WDmUGKApAcoasi66ctW+z1cK5S/B6TXoFTI6OpWfMT8/VfEO0HgjyYgxIA/2gKifkHgb09AQV36VQK8drPv8EmC/Q/h47KKueNHY2YaJqDpoOT5ZYIPH2D/Hhc2WM9nsf1eolMq6Ry80fgQprK9goy4MA5sWoz15gDuZzeh1bVTmhRDwI4VzJ/nhF9EOCG+jiyaPJpRIydh532JtKoe9JpGsu/4s3n2GL77dhIL7TbievgiCfkNKPUaOkoTuRZ8BJ+9XritW4zVInu8I5MoH8xqpKLpTRxnfPfje+wExzw2smLxElY7uLJ3/2HO3smgsKSYzLgz7N9oje3WIwT5H8ZjqTk/jl+B781MGpRt5N8L5qCHF0dPhxPmu4MN9tvxv/maOhM3BVVzPvHnT3DksB9Bp33Yab+cNTuCeJjbRHd7FVn3IjjoZIuN43FuZzT0iwl6et+lctXPE4/dPvj5H2b3xuWsWOvB2fh8muV6dJ3lpN48hceq+VhtOsSZsDP4b7XG4ofv+WGCDZ7nX1DR/eEi4UMLaOgsT+HmmaP4eO/BfcNSrK1Wsjv8GSVtKnSdFaTfOs2u1Quw2uhDaNgZArYNlG/NrvAEyj5Tvq67itSovaydPY4fJi5i64lYchtk6PUyal9d48jGZax2Psm9vHrqC+OJ8N2N18GTRIQdxWPjWrYdukJ6da/hBk3d+Y7sBxEcdFzCUqf9nPD3Y++q6Ywbt5S95+6RmvmIyEPOrLB2wPdyCnX962F9by2Z10+xz3UXB474c2y3I6uXrcE98D45DXLjzZ9eTm1qDCFHDnEiLJro4EO4OHhw6lYG9UOvq0E6Ju0aoUcO4i8dE+KLq4MHJ2+k9x+jpbsqnbjwYwauHg7LWWy1HLfgRxS0DPi4qmkrfMJFv/0cOOSHn9dW1iyxZvn6nXjv8yX02nPSMjJ4FL4XR2tLVrke50xkNP7b7Jg7ZRzjJlnjduYBqemPOb93I4unT2LCD5Ow3hHC4+L3i3Vt9zsy4yLw2+eNt5sjq6wXsWJ7IA/zmlHpuql+fY+w3euxmb+OPX6hnA92Z5n5WEZ/Px6rbad5WNiJuq2IhAh3lkwawXdjprN4zQ58z8SSUdmHXi+nITuO0H1ueHgdIeCYF9tWL2fNtgDuvKrr99bRo6h/RWzYMQ4eDeXypTCOuW7G4+gVUmo+BxlQtlAYf56juzzYf+w0p4/54rnOhtnjJ2BtEiag7anh9f0L+Bv6uBl7m0Us2xrA3beN792FTQa+5HpfmXqZQxvmM2X6Wg5dfUWnvJXKzFuc2L6c2T/OZYtfLAUqHT21OcSf9cbBeiFrXQ8TcuEsXsumMf77kUyY48SJ2Dw6hjzF9Mga8nkW6cNW27ks37qfoKjrnHZZwXyz8YybsADngNskZ73g8iFHbGdOZuKYCSxw8uduvjH0St3TSEHiFU642mO31oPghxUY1oF6BU1v73Ph2D4OB0Vx5awfnptd8D3/jILaQp5fPIjz0nks37yXwKibBLkO1DmfLX43SX6VxBVfJ5bOnGKoc77DMWJNwr308mYK4s/h6+qC1yF//PdtZ8PyFWw5fJW0d71DL4i1nVSkXWb/MnN++GYU5vNWs3N/INcSS+nVa+l5l87NU3tw9zxCWPgpfLZvwME1kHjDWNXT11TMi0u+bLObzzIHT05duk2I+0oWmE9g3Pi5OB65RtLrl1w7shm7WVOYNGY88zYc5ubbjv72aOmpyeT26T247T5CWMRpDu7YwKadp3hY1CU9P6a3sYjnkQfYYmvF6s3eBF26iM+qmUz8/nvGz5TGQTZt/bbUK1opfnaRIy478DzoT8ABFzYtX46jzyWSK2VDM5DGmF5GWcJZXJYtxeVsuuHpv7SY76rK5n6wK+tspjP26++ZMmcVO1y98D93n1fvmilNuUHgrvUsX+3MsdslRjtLOli3lGMgBB/PvRzzP4ynw1KsrFazzWMvh09FEpeUQ8WbOwQ422BmthTX0GTalJ3U5T4gdPda5o2bgf2uKLKVWnpby3h5/SS71yxjraM7fhHB7Jw/nvGz13DwRhEKWT1v40LYu8ODwyHnCPF1w2GtM36xBZ8KO/3nk+Fpv1b70UJaTW3GDbwWmbNs1xWKBqY8A5/fTwxI+wkxAL2Cd2lX2WO3gM2nk/vPWR0dxYmEOc5glp07kZldJrOEeCsIfDkEhBjw5dhCtEQQ+BsS0NGacYE9G505ciWZik7pqWwXJbcPstbiOyYOigFyyu4G4BsQRXxOIz29PTSmnMVtsRnmq/24n9MyeHPzHqIeeXs9pU8D2WE9iXE2B7idKS2E9Wh6i7l1YDUzvh+D5Zr9RD3Lpao8nSjPZUyfsASfG5nUq3Voet9yefdSLKZtJPBuNi29CtQaHX1lDwjyDeDi/Tc09vQia0rlvPtSpk9exfE72TRrQFX1iJNb7Nl65AYZtTL6Wl8R7bmcaWPnsvVQBE/yaqhvqKc0/hTbrcYzatx8Nh+O5Ep4APvcjxCVUEBFeiRey6zYcPgGmfV9KNpecs7VDus1x4h702q4YdQ0phK51419gbG8rpch72shNcIdu8mTsD96h7SSOsqeh+JuO4VxVl5cfVlnEAPU9clEeGxg875IEss6kcv76KlN4cLu5VjO2siJuFxa1Rp6S+M4vmEWY0ZNx94zgsfZFVRkXsFn1XQmL/Lk8suazz69VFQ85uyxE0TcyaLewCmDaO+VzJy0jEMxmTSqNPSV3cN/02zGjJrG6t3hxL8upyLrKofsZzB54S6ikquHLl+vRdXxmit7V2A+xhqviynUG54e6dG0veRSUAiRt7JpbX1FzAF7bFbv43JKLX2KdjKj9rLGaiUHYzJp1oKqp4WKZ6F42E5i1FhLNh4IJ/pcIAd3HSL8ThIFpS8457mS6T8swP1cIob1paaR9It7cdrkSfjjYjr7pCe7daRH72ftrJms871BdrMGXWMKkd672R90l4IeJcreGhKjL3I1NpF3n3kCp2t6SdTe3ew7HUe+4ZhaXlyO4uqdBMMxineJXDzhT0hMGnUS15bXXPddh+XEJXhFShxAXZvEhd0bcdgTQUKVDHl7LneObcJyzAzW7Q7m4dsKGjv7aE2LYt/SCYyabIt7SDxvqpppzLnNCYc5TB5nybp9kcS/qaa1pZiHp7djY7GYPRdSaJDW2MpqkqNOEXD6Cqk13fTKWnhz4ygOMyexxOMcybUqtPIqnobswGr0SMyXuBASl0lZxRti/Z2YN2Eu24OfUKHUoih5QKDTbCxs93AxuYFeuRK1Vk2zJOxs2YTbyXsUtvch75PRkHWdo+vnMGv5XqLTGlHrWsi8ehjv3X7cetuNUtlHXcpNrly6zpPyz0HuouhhILu3uHLidi5tvXLk3e94EbGLJaNHvxcDlLWkXQ0i4EQUKdVSH1vJuRPAltkTsd55hoTqIZRIgytzKY9DtjJ38hI8L6TRYXDX7iHnjj8bzaZif+AauUrJw1hOTeJ5dluNYfREa3aevE16SQW59wLZOW8iczad4EGp6WrHZIbTqmh/fYNjqyczcvxCtgXcJauiiab8h5zZsYDJY2ewancYd7OqaG0p43mEJ3YWC9gR9Iw6rR5VT5thAePnZMn4GRvwiyszzKO6tmxi/b3x8LnM624Fyr4GMu9e41LUPUqUKjqyb+G/Zgojxy1gq38cmeVNNBXEc9ZlIZPHTGOlxxliMysNdSZe8GKZxXy2nX5CrTRmNG3k3TuJy/otHL2eQ3ufHLmsiZx7p9k2bxq2LmEk1Q3BVAr3UlTy4oIbVpMWsD3wGfUyOUq1Fp2slOdnt7PUxokT98vpVXRR8DCUHdZWOJ8y1qvXqujMucvpDVP5fuxcnI7dJr2sieaiJ1zwWMTkHyxY4RbC7fRKWlsrSL60nxUWc3Hyf0S1FvS9ZSRG7GSptQN+94x1FMaH4WKzgM0n4g1zgl6rpCE9hoO2Yxn54zw2H71GclEFBU/PsWfRRGas8uFWoRK07RQ9DsF9nQM+V97QJjHobSY//gwuCyywcQ7iec1nbN5Xxotz27G2XMuRu1WD1z29VoNKVkZS9G6sJ87BMeCxYW7oUyiR93RQm3WL4J0LGD91OftiiozHaVvJf3CSHas24nurmB55F+XJl/CyGsuUBQ4cvZVJwbt2VMpq0mL2YD3eEoeAZ7RKIQeaXkoSLrBzxngWbT1LpkKDvLuOVzdPsX3WCEZNWMQOv3NcvhTIAa9DnHteQsXLaDxtF7LO5zalvQq60PKMCwAAIABJREFUS15wyW0x89Ye5v7nJsP3w/39O400Xo7huHgtxx7Ufnh9+B09A35SDEBHe+FTAtfOYsHGQF4aYgV0dJa+IHzLTGYscSPqdff7Poh3gsAXRECIAV+QMURTBIG/HYG+PK55rWSZ40ke5rQNur+riq6xb5kZFv1igLYlidDtjrj4nuN+Rg55ubnkZlzBZ/VMxoywxffO6yEzvEvxhZqmJ5zaNAczm/39YoDkut9GUtAWFk6Yz66oJKoUUiy4ksKYvawys2DT6UcUShdzdSE39q5g+gxHwgbCBHStvAxzZet2H8Lj0snJzSUvJ4MYn3XMHTUC2303yGpQUXXvKJtmzcEpKJ4imRS8qSD/qjcrzWaxJUT6n1SnDnX1Q05snM34eW5cTKygV6lA3ifd5GroyI7h6LbtHLqUQpUUo6Au5Ob+lcycv5PIxEo0+i7eRHvh6LSP6KR3/W6oeuRVyVw9dYLwu6+olmnRNCcQ6rwAc6s9RjFA38PbK/tYbbWGw9ezaBlYL+mVvHvgj9PsH5m7LZyEsj50HS8552LNVMvtRDwpQ6HVoVMWE+u7hllm9vjH5tA51JNLXTsZF/awfZs3IbfT+jllcuOIIwt/+A6b3dGk1mnRdaRywc2GqbO3cfZxaX/5JcQdWcdss9Ucu/Vm6PKlk0WvpOpRAFssJ7J410Ve1kqLCB0dmTe4FH2L58W96LpyuePvyo6950golUlSECVxx9k0ax7OIY8pkw7R61DXPiN023wmzN5KaHzJezuo1Gg1LaREuGM7dX6/GKBHlnuTI2utsfe6RFrjQPiJHlX1U0Kc5zNhhhNBD4vpLrlHgIMNi7cH87RCeuKnpaMon4LiMho/89BaU3qfk46LDYuCJ+X9xxQXUFBURoOmg9dXD+K61YOT11MHud4+sZ3FPw5n4c5wEqtV1CaE4bZwNvY+18mWgqv1SkrvBeA0cxprfK+T3W2MEdeU3OfUphlYrPAh5lUnGp0evaqIeyc2MmuyHfujM2iTRBa9mvKHp9lqOYN1h2+S16ejM/s6R1224O53jbS3ueTl5vDq9incbMYxYs42wp5VodV38TrGh1VTZrDpeCyFfRp0OhWVj4PZNmcqy7wukdGmQ1v5hDPOc5i+bP9gmIC+t5D7AY7Y2rkRnlCHpt/NWq+qIynCg8XjLNhw9A75XRU8DdmO3XxHTt4vpUcH2s5SigsKKB5w4/hoclVWPCfcZRX2rmEk1g+EzqipTgjHbf6U/pwBWrpy7nDCbTM7j1wldaCPscF42k5gxCwnAh9VDM5bH1ShrebFOVeszAbEAOlTFSUPg9g87b0YID3V7Xl7Bz97c6avPsSNXCl5pw71uwTOuyzA3NqN8ylG4e+D8vv/0FQ849w2aX5z53xqO2qD/cp5FubM3AnWuJ1NpkUthVOpeZd4Hvf501i+5xKvDXOSHk1HNneOrmb69PdigPbdCy6627FgrS93CrvQoaO7soyinHxqNKCpTCBy51ymWrsQkdJfp7qChIjtzJtgxc7QF/11aqhJusiuBRbY7YrklUyHvPw553bYssQpgPjqAe561E0ZXNu/lEmTl7HvSnb/E++PeqytJfWyJzZTFuFqEiag760gOeoAO7Yf4eYbafcZLbUvL+NpNYPlnpd527+u1rxL5rL7fKYs3MaZpDYjK3UVyVGuzB8/D+egBJoNrDTUvryCl5U5S3ZGkCHTo++t5GX0QXY4+3I9u8NYR1oM3tbTsfOI4o2hDj2yoseEbpqG2ZJdXHrda7RlXTrXvWwwm+NE4LMmeiuTueS+FJv1R7lfZcKg+TV3Dq9g8sTF7I7MGjL2XNuQxe39izGft5ngF+0fPjHX1pN1cy+LJ89na3DS+6ftej3arnyeBG9i5tRlg2KArjWHB8dWM3ueI4HP24xeUnUZ3PBehPk8J0P50nyAtpHs2EPYTZ7TLwZIdtFQnXqV3ZYDYgDo9Vrach9yyn4SUxa5EJnVjVLVfz1TyajJvMFRZyf2X8pCCvPXNrzm7oElTF/ozLmsn5s0VE9fdTrXDzuz2ec2JR8nxP3DxAA9fZWpRG+3xNzGnSuFxouqlBBZIeumu7sX5cAE9tGwFn8KAn80ASEG/NEWEPULAn9jAqq8K+xZOo0lu6+RLt1l9r805XcMOQOmGxIIdtCdHo7LUhuWOXlyPCiEMyEhnAkO5LT/cfyOnePB6yra3x8+UIzht64tgSDHeZibigH6DpJDtmI10QrvmPR+t28NlbG+rJtuwTr/B+RJvrlDiAF6WRYXXFdgu8QBz6NBxraEBBN88gQnjh3n/N0sKtsUlN46yFrz8Sw/eJPX0uNndDQ9O82W+YvYGvKE4n43f139E045zMXC1odbWU0fLCyk2Et5by9ypYKO6rck3DzBdmszxs7cyrlnZaj63nDFcwUrHE/zML/n/Y2hToNKoUCpMiYD1LUnc3a7FRYDYoAqn+v7V2IxbRPBDws/SIyoKo3l6Npp/DDPnUtJ1ag707jgaoPZPDeiXvRvSaWt4qG/A3PNV3L0VjbtQ4gBelk2l/fYs9R6PbuOfMopIjaN0hY1+q50LnoswXyuC5EJA+W/Iz7AiXlmy/G9/mrQtfYDww6MleYUzrnaYDF3B+HPKlBoW0i9dolrsUlUSUG6eg0quRSvrkTRWUveizuc3mHHzB+m43j6AUX996C6xhec3WHFtEW7uZRS/4Ed0HWQfmE3dmYDYoCK4jvH2GBhwfrjd8gzTSahruBRgBOWIy3ZGfaUis4i4o5tZPaI7zG3dSX0UR6NMjVarfazrsB6WRF3j29i9vcjDE/TQx/m0tBjPEbbm8vNQxtZusAeV18TrqcCDOMv/GYSRU1yqh4Hsc1yHNZuESQZFsQ6WlMv4mVrxVrfG7ztjzXWlD7gtMMspq+WXKKNYRNoKngcuIW55is4FPO6f1GiofppKDvnWrBi72Wy2nvJv30MpyXzsd/hS4h0ToaEEBp4ilN+x/ALu8GL/EbU+h7eXD/IarNZbD75gDLDeaqlLjEC94Xm2O6K5GXz0GKAuvQhwY4zmbb8ADGvTcY3aqqfheEyZxSzNgXwoKSLskeBOM8eyciJ1uw4eZe3dT2otVq0Q4xNKU9G5dMz7Fwwh41H71A4+BD2owSCjb0U3TuJ85K5rHQ+RLBpH/2P4XfmGs9z64cMFUASA867fSQGqIcUA2S5sZxYY8HMtUeJKzZOZNqGVC57Lmbawp2cTWz67FjRViVwfsdczG09icqUxC5pnVZNkvQEfepidp1P618UaqlPkZ76mrHEJZyX/f7q+u5c7vqtZaaJGCDFRyecdWHh98OZtMAJ/1tZ1HS/H7MGscBlHuaLPYjM6LeLtoaUqF0smmKNW3jqYJ0NqVfYa22GzfYwUlpVvHsRifvcKSxxjSDdNNOapo7U6D1YjzJjjc8NioZyDviMGCAlcFMr++iVvDt6WijPfMhZbwcWTZjMUvdIXvWHcGtrXnJ11wLMFrkQkTbQ7jrSr3phM2UhO0KT6TAA1NKYcR2fxVOx2hzEi1adIUmcRikfrKMi6xHhex2xmTCJJS7nyDLUoaev+ClhjtOxsPMiJs9oS11zNrG+y7CYvYmA+GrepV5hz/xJLNoaQoppFkRNA1k39rNk1GSW77lC/hAMNFUpXHGdw6SF2whP6/1wSvycGGCILijmeZgTs03EAG3Ta+IO2jLBfBU+t4whIvruIp4GOWA1XxID+rf1lcSAuKHEgJgPxABDuELBE4LXWTB9+T5uGU/2/jZK3gQqw/Wsr09Ge1U2TyJ82G4ziUnzNnMmte/DvnzmL728jjd3QznoFUxitfzTpJd/mBgAqupMbrhbMnGuE2czf6648ZmOin8LAv9FAkIM+C/CFlUJAoKAKQEdbYmn2TxnPNZ7rpNp8CE1fv7hbgJt1Mf7sWnuElzPP6e4W45CbvKjUBpc9/sfGppWYHj/OTEgZSgxIO4w6w1iwH1yPyMG6BqfcsphAUu3hfO0sPvDtsgHFuB65MV3OGw/A7PFe4lJr0epV1J88yCOa3cSHF9EV/8iZUAMmLb0ILezmj+66dcZYmFvh/hx4sxt0gpeEOW1nOmWRjFA0fSc4C1SvP2pD8WAjyh8LAZouqUFvjUTxq/hxN2cD7Jy61peELZtAeMsNhP2pAT5kGLAOx6ZiAEDcbCm1eqaEjmzzRpbp0Ae5HYNyUlaqH1WDDjpxHzz/10MQNdt9HKYZsmW048oKHpGzKXr3E+rH3Qf1clqeRUbTsDxEG4m55F8RbLzbJyGEgOsPYlOGcir0N8jXQcZpmKApodX0d4sHz+eVYdukG1cQRi/rGsj7cIubMea4XDiHoVyLb016cTst2fmyK/5ZoQZyz3CSShpH2yfKTfDe33/MQdMjnE/y/PiNpQtL4n0WMqS9ce5/abzM1z1KCqfEOI8j6nzdxie0Mv1Kirjg3Db4MTh6+8FnPdiwBETMaCSx0FbmPdTYkBbM+lRe1hhvY4j17PpND0npfdKFWqDgT8vBngMiAFNQ4kBemRvb3J01STGL97DpbQ2k3NDR0fWVXzsxmO26hA3c/rQ9tXx+uYRNs0Yzbf/GoG5jQuh8YW0DrGgQt/N2xuHWTN1OhuPx1E8+J2PxICGFrKuHmD1otX4XM76fB8/MSD8GjFg1sdiwJ4BMaDRpO8fVja0GFBD0gX3ocWAReYsdgknRVrgSovEIcQAyVNG3pDDvRNOzBv9NV8Pn4TN1pPcz202CB8GMcD1YzGglpdRu7CeOrQYYL39DMktMooeBeE09QestoWS1GKi1Oi7yLvrz/qJE7DzuMirj9a5hsZ+TgyQpFZ5C8UJVwk6eoLIh1mkPbxg8EiQxADjQh2MYsDCT8WAGC8WfyIG3MBnsRkLNweR2N9OvaKFkhcxBB3x58KDLNLjo/BcaG4QAzJ/SgxoySbOdznTJDHgURnFz87ibD6aeQ6neN5syqCbwvhAHCaNw2Z7OBlDMNC8e0mM+1wmLXAmLLVf/BkYEr9QDEDVSPatQyyfZM5K72sUyvRoGl8Te8SJVZsDeTHguvQrxIAZK/Zz6+MQHb2C9vKX3Aw6it/Zu2SkJ3Bjr43By+FniQHaLirTY4nwDyW+pGvo7W//QDFAXZPF7d2WjLfcSMhLkySCA/YRvwWBL5SAEAO+UMOIZgkCf30CerqTQ3CeN5bpW87xorRv8Mn2h2JAO01PA3C0nMLSfTd4JQVEm770vQYXPMVH2a4HvvKbiwHNzwlymoe5tTfX0us+WtDp6e3poVeuQKvtoz4rmgNrrFmybDPehw7hcyCAS4/eUicbcCuHz4sBOjolV3SHNbgE3OZ1oxyNsoCbB1Yxw9LZ4BmgaH3BGecFTJ3rTlRS9UdPKHWGMARpK7yPxQCtMpdre5cz9bsF7Il+Sa2JV4WuNZnw7VaYL9jF5eSaz3gG/AwxoDXZ8KTdYoEHUVI5AwYx/NbTJ+3k0CdH8znPgJ8rBqBHURqH3wZLLNf6EiAlkLvxkNetRh98XVcB9044s2HrUa6l19OnVlIS58emmf8HMUCrpODWEdZNHcH8nREkmsa76tpJj9zN0inzcAl/zjuVDp3kKtpTT178WXYtNeeHr0exyC2CxMrP3DDq+o+RNZD3OJzddhbGY1zO8jwrgShPW6bN3U7Y06qPbK5HLmWV75VczRU05d7hxOal2NisZ5ePL74H/Tl3O52qrvfj79eIAcv3XiazrY3M6L0sN5vDtqAnVA4+XTcaWi95Y0jZ8dW/VgwAVckDTm+axsiZRnf898NUT+erGA4uN2OOwykelasM26apFTKaCp5x0Wsls0Z9ww9znQl+VMYnW3/r+8i7fZz1kyew3PsyrwafUH8sBrSRLXk1mM/G6cQDyj962KeXMvD3yD7NKC8h+BWeAV+MGCCNP40aZW8zJUnRHFwzix+/HsGcTX7cK+7jV4sBrUoqE87jMms40+19iS0xmRX03eTfO2Hwtll78BamHw1OHZ8RA/S9xmSijutcCX5cRa9KSfXLK+yxsjB4BvwWYoC+t5r0y/txWruTwPgqZColNVKYwKJfKga8ozIlml1zhmO+fC83Ck0Z9FD0OAinaWas9I7B9KMBBsYwgSVYzNtMyAspXMHk9UvFAHSoOipIuuDFmvlzWOG8jyOHj3D8VCRPC9veb1n4W4gB+j7qXknXM3u2Hr9HpUyFqv41d32W/DwxQNdL/dsnxJw5x6OiTkOIh0nP37/9w8QAPYqqNK66zGbygm1ceGNi1/etE+8EgS+SgBADvkiziEYJAn8PAuqyW/isNGP09O1EvqgYdFeXxABpa0FjmEAnyoKreNtN4fupjoQ+KR58qi65+zalx3L32StK+hd/H5P7v4kBRdyUcgZMdyD0cZFxa0FVETf2rsTi26k4no6n0CRgXop7vRf3lMzCFjS6LoqexHDlRjyvK9uQ9Uqu6v0Jr0zu4D4rBujqSQjcipWZHT7XMmiUVkL9OQMGxACVuow437XMHDGZ9cdjyWsbuAHRI69OJynlNUV1ik/FAF0XWZG7sBs/hmUHrpPV9D54XVMWx/H1Vixzv0DyOyX6X+kZgKqE2MPrmDFcalscuSaxBOrm1zy6/4TU3CbU/2cxQOJSy7MgZxaZT8Ji8R6iHuX1jxEdTUnheFhbYLv7Iil1EkQNpZIYMGPWr/cM0Orpzr7KgeWT+HHRHqJfmngSaCqJP7WFJTbSYr2C3oossjKzyWvRoFUr6a5KINxtMTOt3bmUWPPxcDX8ra18ZTgmd/CYRCLclzDT2o2LjxN4dNIJy+ETWC3lAzAZ95rWHJ4/iudFdgMafQ8VKXFcuxrLy9IWenp76euTo+gPHRmo+FeLAe2Sp0Ew22Z9xyQpzj+r9X1ohaaN/OfxPH72ioafKwZUPSXMeS7T7fZxNUvKzg76njxij67FYvR8XM8kYDCfkRA1z8NxW7KIzf73KZW9Iyc7g8zsJjQ6DcrualIivVg5fSHbgx7z7v3w7u+2lroX59m9YBRTlh/gWraxPhgQA8z6cwaoePcsHBfLEUxYsodL6S0mfWynMOkJ8Y8zGDLfnbaGpPNuLJqymN3nXhpDXfRyCu+exMFsymACQcm1eiBM4EsRA7S1+eSlp5LZoDG4dstq07l20B7LuZs4ea/i/yAGaOkteUyo0zTGzHQg4OG7wQR4aOtJv+LNqnn2HLxewJAymbaONEPOACtcwlIGM7a359zlxJrpzF13hLvl0hwo5Qy4wp6Fv5UYoKUj7wGn1k1nzlpfYg2JRrTUpcbgLeUVcDnHz/cMqKOnLIFz22bww7S1HI2rNGHQyKubB7Gfswzvy3kM5Tiv7yslIcIZq1lrOXrPhJ80sn+xGKBH3pDPi5uRRD/Mo7FbmiP66DMk7zS5SGmbeBPny7JJlmzye4LBmUHK15J0CbcZ4/oTCEoN0NPVHybwsWeAvquYhBAHZk9bzr5rJQYRU2fIGWDMf/CTngF6OU35idwMj+BefpvJ1qV65E21lGW/onQA1h8oBnRLyQIdZmO56iiPTT0++mcd8UsQ+FIJCDHgS7WMaJcg8HcgoK7m0fH1WI78ntkOJ7mf14xc00NZ7GHWWYxghOU2guPe0txWSJzvGmYO/zdjLTdy8Owdnic84c6Z/ezaF8pdaYsxE29LU3S6hscEbJjNRCtvbqQbM+mjayHxtBMLxi3AKybNmOUaDcXX97PabBL2x++SI/m+a6t5eGw9cybYsD8mnaq6d1TX15J96xibZozgm1Gz2bg3jNinCTy9HcZB9/2E3smktldDe0YkXg7rcd4bzM1HCbxMf8XbvGKqGjvoU71vrLbmkSGB4MRF3lxLM4lV11Ry/+h6Zg0fxwqfa2RU1VOechFvOzNGT17PqTsZNMpkVD4NZceCMQz7dip22w5z4dY97l4J4pBPINdflNCl0aNrlMIJ5jFx/i6ik2sMOyoo3j0haMs8zAyJ3sqQSU3Sy8i7fpAt610IflhEp1bKtZhMxI5FTJ7jysXE/ph+TSlxh9cxc+IyQ0y/yXrUBL2S6sencLb8nm++n8lazxBuPUngWWwEh3fvJ+jaS95JSRSlHRJcrJlsuZMLCVXGxZamjHvH1jN7wlJDxv+WTxZzJtUY3mppTYnAw3oKlpuDeCQlDjT8X0N1/Cm2zh7BuMVeXEqupKEynauSy/73k1hzOIa0ehkKhRpt/XNDAsFJ81y5kFj9ftEnlaNrJjncFZvxc9gZ9owqjXFf6RdhLthMnm2IhS8ybIGop7cwFj/nDWz3iyWvXYOm+D6RZy8Qk9IftqCpJSHUFcfNh7j1qv3jjhhbXfKAqLMXuJrc73miqSMxzA1Hp4PcyGqkNjEM1wWj+GbENFa5neJ6fALP717guNd+Tl5KoKJHQ2fOLY7vWM9GtxPEPHhGSloWb3KLqKhvQ6Z8D1RddJeADdOwWOnLzTf9cdSaMh6dcsJygi37LqX352zQGJP+WU5lmXc0me06VHXJnHdbxNh/jWD6MjdOx8ST+OweUcf2csDvIs/LetDqOnh1ZT8rJ83AKeD+YM6AKmnczpmAjds5khu16BpfErXbhqmWWwiKL6axoY66uhrjmF9ixuy1R7iTKyWzA2lB9CjQDQfHQ1x73YZGU8GzK+eIOJ9ArcFDQUtDSiTeDg54R6bR7xH/AWtNUwZX9y5lwrcTsdt1gZR3Paj7akm5sJslPwzDfPkeIl/W0lyezCXPxfz41XdY2O4k4MojEp/f55L/fg4cPWfYZvE9TZMqdB1k3zjEqgljWbQ9lOcVzdQXPOfCnlXMHDUWq+2hJDZI+73r6Xpzi2OrpjBD6mOR0f9BW/2CSNf5TJ63jdDnJvOCSRXSW035E8K3zmLq4t1EZfRv+aipIvHcTuaPW4jrmRcYvdy11BqS+U3GZudZkvtd33Xt2dw+vJKpFms5ervYsDDVViUReyGUsPh3Rs8TbROZMQdxXudKRFILmornnN82m6nW7kSm9Qsp2nckRboZ5tQdwQnGBSNa6lKi2bNwMov6wwL0qiZe3/BltZkFK7yiyTbsmyglPU3kgtdGNuy6QEbzQFK9jzqrayX79mFWTZrGuoO3KWhqprGmkjcJ1/BZPIrRMzfif6+Ixro8HpzejvWYsVhtPc0TaTeXXiWaqiQuucxh8sIdRLwcaHcNqZd3s+jHuWw99QzjlvVa6tOvsXfRJBY4nSahWUPzq1v42o5i1Iz1HI+T6sjnYdBObMaOZYHTSR7XyujtVdBTGE/wRjPM7Dy50p8zQNuQwc29NkyauR6/hzVoVC3kxB1nnbkZS90ukGUQSvUoqlO5vH8Ta3aeJbXpcwx6KH4SirOtLS7hGUh5IAdfmmrSru5i/pg5OJ183m9346f6rgKeBK7HfPJSPKPzDHY2JEWM2sdG200ciozj6YtUMl7nUFBaQ1On/P0cqO+m+FkoTlNGMnPNEe4Vt9Bclso1XwfmjxnNzNWHuV+tQC8lb81/xGn7SYYcFjEF7315dK25PDy+grHDzbH3uUVBUwPFT87iaTOOH2eu4/j9WmQyGR853xgSn7YWPCLM3Z5VDns4esyfk/4nDD8BR/bj6bmPY1EZ7x8Q6LspSQjDceKPLHQ6Q8ZggdL2o28MuViWrfMmOmtgy0hpQummNDEMp0k/ssAxlPTBYwbJou8pJemsI1PHzMUpJO3TdqKmPjsWn5VzWH3wXn8eIj09VVlc22fPqi3HuVc4pMT1vhLxThD4gwgIMeAPAi+qFQQEAYmADkV9Jjd8NzDvx+8ZNXI802bZscPVAbv5c5izyInDFxMpbuqmpyaVS14rmT7yG77+138YPmIiVg7HuJlZRachA/THRHV0Vr3m4ektWE8cxlfDprNuzzkSi0rIS4nGe6k5I//xHZbrD3AlrZiiVw857TSX8f/6f/y4wJnA+wU0yRRUxwew2fIHRo6Zjt2OYB7l1CPrrSVD2kJu2miG/+vfDPv6eybP28Sxa+lUdqjQ6fXICu/iv2kOP37zH4b95xuGfz2M7775lhHfjWPuphPcfVtPa/UbHp7awqIJw/jnN+asdg/kfnYN7QrpDk9FvbTAtRnPd//+lvHT7HA7fZmIg1tYMm4Yo6dt5OS9HBplrRTFh+BqPZmR//qKb74eibn1TqSkc419ajrfveVx8HZspwznq2EWrPYI41lhIz1qFe1Fjzm7256lyx3YcyyYkGPeeHgc5kJ8Ho19WrRd1YaYafvpI/n3t9NYuyeSlIJC3jwOZsfC8Qz7x2gWOPoTl1NPzxArIp28nlfXfNk08weG/+tfBk6T5qznUHQK5e0qNF3VvLp2kDUzjOXbe14gOb+AN09DcLGawLf/GM38Tce587ZuyPJNLa7rfMWVY4c5FZlAlYGf8VNVYzrRe+yYMuzffPeDBUu3BRB19ihuSycx/DsL1h+5SXLWK56E7GDplG/559dTWLb9BLFZlbTK9ei7a8l/GoaHnQUj/2cY5ktdCHmUT32XGlVHKQnnvNlgu4xNbkcICfbjgIcHvmfu8bauF60eNMUPCHFbyzKH/ZyJiSX20kkOeB7kTGw2DSbtNO2LpuQhZ9z7j7kqHXMKH+mYO69pkOvQKRp5e9uPLXPG8p2B6wgmzrJn34UEiluVaPV6+sqfc9bFmgnDpPH3tcn4+5HZ9j5cSa2iqS6f+NPO2E78mq/HWbHVL443BUXkPgnFzXYKw/8xEkt7aUvGXPLfJBDhvoxpw77iR8sN+MZkUNHSRePbOE5tnsuEb/7F1//+hlE/zmLNngieF7ag1HRTnxOL/8a5jP1/wwzJEMOevqUwN4nI3XaYf/3/GD3DnoNXMqluqSU9yotl44YzZvJCthy9RmpFNxp1JxXJ0fhutGO5/U4Onw4h4IAnu/cHcSerBpmUoVtTzrNzXmy02cDewCvExV4h+KAXB09dJ6NWPnS8vU4WZMliAAAgAElEQVRFW+FTItxtsRj1HaPGTGKW9UacN61l9aLpzFnpQXBcFlXtPbTkPSDYeT4Thxn7OFLasm9XGE/ym1FIRh7yJWU9f8klz6VMHva1YezZbvMj+IQvbnYzWbjWm3Pxbygpz+PByc0sGvNPhk+0ZntgPNn5+aRF7cXebBhffWfBKq9LpFd2vH+CbKhPj6xRSgrnxsop3/CfH+aw8dANMvOKKUw8h9dKC777nxFMt9vF+YQ3FOSlEOW9hlnDv+KH6avYF5VMdm4eKVH72DRnFF/9ezxWm44bx31ZElf3b2DJmt2cunSHuzGhHPXeR0B0MoWVxSSe9WD11G/4z2hL1h+8TkZeMUVJF9i7ehoj/uc7ptm6c+55NgX5qUTvW8tsqc5pK/C+mExJUx+q7mqybhzH2W4Jq7ccIDDkFEe8PNjnf5XUqp7BXSM+xaqh5c0djq+ayPffTWD+ugNcfFZMW0sBj045Mmv4v/j2+8ksXL+f0LAQjjtO57tvJmDrdob7GbkknfNkjfkw/jNqJmv2x5CWW0xJShQH1s5gxP8MN+SZCH/6moKCdK76bGDOd18xyswOz/MvyCt8Q3zgZkMdw0ZMYuG6fYSEheK/eQbffT2eJS4h3Mt4y7NQF2zHfcWwsXNx9LtLZn4Rr64fZtN0af6dzBK3CJJLW1H01JJ9J4Adyxazwmkfp0NPc8zbA+9jl0iWxv3nhhU65NUvubzXiU27L5FjWF/qUXTU8jbuNLuWTWHY/3zLlIWb8buZTF5tN72t78iIOcy2hWP411c/YGl/kBupZbTKGnh7+whrJg3nm399zbdff8Pwb4bx3bDvGDvZmh2nHlBiSDSqR9GYQ9zRNZh/+x+Gj5rCwo0HOBUYxIGVFsxe5kbQ7Rdk52dw/bAjC0b9g69HTWf1nosk5NQYBQtNF+WJ4eycNYKv/z2CyZb2eJ08Q8TpHcz55mvGz3cm8FHZoHeg0fZaOsteELFtJiP/3z/45z/+yX/++ZXJjzSnuBOVLeVOkBjUkRsfziGneYz5n38y2syOXWEPSS9sQKbX012SSITjVH6Ytpoj9/tF8c56ch+Hc2jzfOMxU5eyK+wB6QX1xnbrFXTW5/Ak/BDO80fz1f/7Hgtbd84+SKOwTvZ+iGqayXt4gm3LnAhKHtgZSUtrXjwnlo3mR8tNBCUNLf6+L0S8EwT+GAJCDPhjuItaBQFBYICAlPle3k1TRT7ZWdkUVDTQ3tpEY1MzLR099ElPbaXtsiT3374e2mrLyMvOJq+kmubOPlQaadusoV+GvZflMro7O2jv6KSnV45Ko0WjVtDb3UlHewddPb0o1NL/VMh7uuhsb6ezS4ZcKWXi16NT99BcWURR6TsaOgbq06FR9iFrq6M87w3ZOcVUN3XSZzhGaoseWdkzrly4xI27z0h58YQHsbe4cfUyF8+eYr+TM0einlHQrkLV19++9k56pH2zNcYt36RSdBrpRqScwoJiKuta6ZErUXTVU5GXQ2F5PZ0KYxu1Un866qnIf8vbvFJqmruRq4zZ6j9hIJMYGJkZtj3q7aS5ppTC3FyKymtp7ug17N1tYCpl6Vb00tMl8euip1eBWqNBo5Ij6xrg14fyszbQGb/bLrXtDdlviwyeEb0DnPqzgBvLl+xjUv6gfX6qfBO765W0t7bQ3il7/0TLCBFlVyOVRYUUV9TS2t2HQtFNY2UBufml1HXIUavVqAbGSXsn3bI+4/7lEgRpe0qpv/3t6eyWIR9wtZc+U/bR1VxLeWEeuYVl1DR1IFOo3mexV8kNY629vYX6qjJKSiupa5Xsoxl6kSq1eahjWkyP0aOVvtPeQFXhW7LfFlLZ0E6vdK4YDUffu1RioyOJvvmE1OSnPIq7zc2Yy0SFB3Jo2w4OBt/mVYuqv9/tdHR2I+tTohmw7yB/6fzQoJEygcu66eyQzg+T81KrQtHbQUNVITmv31JY3kC7TIHa0BApg7iSPsN51cEAO41GjaK323CudQyWpUPT10ZtWTEl5bW0yZQflCHvaqGuvIi83ALKqhvpkCkM49g4AvrLa+ugteEd5SWlVNa00NWnGjrJWP+wkca/UtZKbUku2a/eUlzdTGtLC82NjbR19xpsJAkrxvOkg8Z3ReRmv6GgrN6kj/2FDfFLmrMU0tiTxn5uCbXt0rnUSVtLM+3SvKPSoJX2h5fmgI5+G8hVBhsYzrvBeUvi+d6baKAqvU77ftxK52eflExVOj9N5rduYz1G5j10SfV0dRvGimTrwfO7vZOunv5xr1bQ191JW0crjdXllJaUU9NsnN+0hjp76TG0TZoT+uuU5qAe45zwoZ376+w01inN5QPnVHdrA1XF+eTml1DV0GaY3wxb2Q10cIjfOo2czvoKSgpLqG6RoVBpDVvaqSU7lhVRUFxFc1cvcoWMtrpS8nIKqGqR5rT+vg62W5prtBjmzp4uw7Xg8+1WodFqUMvaqPuojvZ6qY58KpuNdRjnEcmW7+0h7XQwMMd1y+T9iTV1aFUKetoaqCrJJzevmMr6n8dAr+2mPOEixz33ci6t3egto5PmIpN5SjqvlGo0WunaKc3jfcikebz/mqdUa9EpmilMvMW5kCji09JIfHSP2JvXibl0njPHPNnl6cPZ5/07WUh272mhuugtr7MLeNcqo6e7h7bmRtp6pB1v1EhjTLpGS2Ns4HohXW+N12c9WnUv7fUVFOcVUtnYacit09vRSHneW/LKm5BJtvzI5nqtNO9I46qd9iF+Ok227NNLDKR50WBP4/nUK1cNJhjWS2OnqZqy8ho6lMbzybD9sEpumKOkOgzzoBQmMXhNM957SDvSDPAzjBOpv9JYNrz0KGpfERvgyvbjj6iRdrHpf+mkPjdUUlZeT4/603N44HvityDwRxIQYsAfSV/ULQgIAoMEpAu5Ybs1aeEv3YBLP4OfmrzRS8nVtEiJ8Yb83OSrv9VbqW1D16c3JC7T6nToTRqja0kj0suToxefUtIj3SRpUKtVqFQqlEo5DQmx3EvMotB0W6nPNVbq7wfl6z/T/6Hb8rliTf8v3RBJ7A2ii+kHv9l74w2pVvshp9+s+P6CBsbNp+VKbIw2HPjMaNPfqD2SaCTx0+oMAtJAHR/+7rfPL2Lwv3Eb+nNd+xtuHd/PwcDb5HR9Ov6a0x4T//gFr03yOHzY1l/+lzSGBs7LX370wBFSfz5/rn0wRwwc8tHvX94OY52GuWdg3jE9mU3K/+VlSwd/dF7212FS7Bf8doCNNK5/h2ZK541OOm8kAfTnV2CwwwdzopHzx2PHaK/fsu2fjs//cx2/ioEebW8NWXEXCA6+SY5J3pqfbSUpp0jSRY7v2kN4cjsqrSSIq1GrVKiUShTtBWQlxXFnQAwwFCz13zjPDZhLmnN/yesT2/X3X7q+/jdeUnuHvpb/+tr1vbXG7Q4PnCetUfmJyDtQ56+vQRwpCPy+BIQY8PvyFaULAoLA346AHlnGeTwWz8JqewhPSjtQDdzn6BS0FD4nJvIGz9/U0CseFPztRsfv32E9fTk3OGI/h7nrj3Inr4XBaATJe6L8JbHR13jwsgxDmoPfv0GiBkFAEPitCeh1qLtrePv8NjG3kigb2Kv2Z9ajV1SSEuWK9dRFuIanUT+YdEePuqee3Kc3uXLlIbntQ8R//cw6/g5f08mbKHpxnfCwG2TW9v1EeMffgYbo45+VgBAD/qyWE+0WBASBL5aArquI+yc2s3Dc94waPZlZcxdhu3gxdsvWssP3Ik9z6wwugwMawRfbEdGwPyUBXU85CWfdsZ00kpGjJjDD0so4/uzscd53lntZVXQO4ZL7p+ysaLQg8HclIIUpKTqorSqjrKLll1HQy2l4cwe/NdMY8+33jJ8yEyvrJdjZLmXVBjcCohMpblGIxe1PUtUha2ugLDeX2m7lT4Yk/WQx4kNB4A8mIMSAP9gAonpBQBD4CxLQS7G8nTSUvCb5wS2uXblGbHwaBVVNdPYqUEmu4n/BbosufSEEDLkeumkqf0tq/G2uX4nh9oNkcsob6DDJGfGFtFY0QxAQBH41gYFQi1/qZibl81Aha62hKPMZ965fJeZ6HM8zi6hp688JIi5S/6tVDKFLmp/I//K/liC+IAj88QSEGPDH20C0QBAQBP6iBKT4SikxoRSDqVJrDDH5f9Guim59gQTE+PsCjSKaJAh8SQSkGHopOaJKiVL1Ptnel9RE0RZBQBD4fQkIMeD35StKFwQEAUFAEBAEBAFBQBAQBAQBQUAQEAS+OAJCDPjiTCIaJAgIAoKAICAICAKCgCAgCAgCgoAgIAj8vgSEGPD78hWlCwKCgCAgCAgCgoAgIAgIAoKAICAICAJfHAEhBnxxJhENEgQEAUFAEBAEBAFBQBAQBAQBQUAQEAR+XwJCDPh9+YrSBQFBQBAQBAQBQUAQEAQEAUFAEBAEBIEvjoAQA744k4gGCQKCgCAgCAgCgoAgIAgIAoKAICAICAK/LwEhBvy+fEXpgoAgIAgIAoKAICAICAKCgCAgCAgCgsAXR0CIAV+cSUSDBAFBQBAQBAQBQUAQEAQEAUFAEBAEBIHfl4AQA35fvqJ0QUAQEAQEAUFAEBAEBAFBQBAQBAQBQeCLIyDEgC/OJKJBgoAgIAiYElDTWZtP2qNrXAw+RdT9V5S360y/IN4LAoKAICAICAKCgCAgCAgCv5iAEAN+MTJxgCAgCAgC/00CGmTN5SSecWPFtLlsDY6nsEf/32yAqEsQEAQEAUFAEBAEBAFB4C9IQIgBf0Gjii4JAoLAX4uAXifj9cVdLF+wmiO3XtOq/Wv1T/RGEBAEBAFBQBAQBAQBQeC/T0CIAf995qJGQUAQEAR+GQFVMbd91mBl583V1Do0v+xo8W1BQBAQBAQBQUAQEAQEAUHgEwJCDPgEifiHICAICAJfFgFN1QP8NixgqcsFXlQqv6zGidYIAoKAICAICAKCgCAgCPwpCQgx4E9pNtFoQUAQ+PsQ0NH4PBDnhYtwDn5IxtsUbgZ6scNpK96Bsbx61/X7eQpouqnOekTM2RCCTocSffU8gUeOEnQlieIOkcTw7zMGRU8FAUFAEBAEBAFB4K9IQIgBf0Wrij4JAoLAX4eAroPUsJ0sWbAGzwPHiYiM4tqdm4R7rWX+RHM2+t0l93dIIqCTVZAQ4cOeA6HEvaqho7uK52GuLDVbhMf5BCpVfx3EoieCgCAgCAgCgoAgIAj8HQkIMeDvaHXRZ0FAEPjTENDLXnNplx1zp1mzLeAeWeUdyJVKurIu4mk7hRmbgogvlPGb7i+grCHprAebHPZxMaGCHo0Ova6Nl2EuLLVxJOhBIb2/aYV/GnOIhgoCgoAgIAgIAoKAIPCXISDEgL+MKUVHBAFB4K9IQFlwnQMrpvLjPDfOJ1TSqzWuwhXZ0XjZTmHutggSyk0e0+u6yLsfhs8We+xXrvrJn3W7Q4l/W/9RmIGKd08C2b54KTuDH1HcozUKDYoCbh1Yw/L1R7nzpgMRJPBXHG2iT4KAICAICAKCgCDwdyIgxIC/k7VFXwUBQeBPRkBDRdwRNsy0wP7IbbJbNf0eACpKbvuybsYsHE8/pLDH9DG9nIb8ZB7GRBEVefEnf6Ljkimo7cJ0p0JdeyaXdtsxZ4knl5JrUBmK1tNXcIND9las3nuF9Aaxn8GfbCCJ5goCgoAgIAgIAoKAIPAJASEGfIJE/EMQEAQEgS+EgLaOJwFOWM3bROCDArr7H8fru99wxWsF8xfv4mLSO+SmWgB6dBo1KoUCxf/2o1Sj0ZkerKM99RweNhbY7IoipUZtBKFrJe2cO7ZTF+J2LpEqE0eEL4SUaIYgIAgIAoKAICAICAKCwC8kIMSAXwhMfF0QEAQEgf8WAV1rEmHbFmG1PoB7uV1G13x9L6X3jrHZbjV7pIX5gBv/b9IoDSW3fFlnMQOH0w8oNCQG0NFR+JwzOxYydZ4jwQ9z6VSqUKtFoMBvglwUIggIAoKAICAICAKCwB9EQIgBfxB4Ua0gIAj8SQhoZbRU5vDywXUigy9wL6OMlv4H5r9vD/R0Z17AY8l0bL2uklYnuearaMqKxnfbFrxDH1HQoqA/hcBv1BQNpbcPs8FiIssPXCOrWU57STKPH57nwEZbltjvJfjcbRLT3lLVIUIFfiPoohhBQBAQBAQBQUAQEAT+EAJCDPhDsItKBQFB4E9DQN1NU0ki4W7LmT19HQH3cmj7rzwU19NdcI9TW5awbPNBzsTEcS3cn8O+p7n6JJe6LtVvLAQYLSIvfUDAptmMHT4G87nL2ekXQ/LbJ5zfZce0sZY4Hb9Ddk0Pmv8Kgz/NKBENFQQEAUFAEBAEBIHfiIBOp0OtUr/fKUmvRdXbSUtTC93Kn74B0Wo0yPsUvzDRsRatRo5c8dNlf9I9nQ6dWsV/5RnRJ5X/Nv8QYsBvw1GUIggIAn9ZAnp08nyuea9kgd1erqd/nH3/9+u4XqOkr7ud5ppyiguLKHvXSGtXLwqVlg9C/X/DJui1Sroby8jJTOdVXgUNnXJUGgVtNSUUFpVT3yFH/XtV/hv2QxQlCAgCgoAgIAgIAn82Alr6WspIv3+du5l1qNAga8zlrp8DiyaNZvT3YzCbsw6fqHTqlR/2TStvozwpCl/37bgHJdHz4cef+UuLvK2M5KhD7NrmQlBSt8n3lLSUPOPUuhlMn2rGNNMfi4WsdT1Hems77149Ji7+LU1/UkVAiAEmJhdvBQFBQBAYioC25iH+GxeyzP0iyVUfXX2GOuA3/p9ep0Or0aLV6d+r5L9xHR8Up9eh1WrQanXo+/MLSm2QlPqBvz/4vvhDEBAEBAFBQBAQBASB/xMBLbK6tzw+f4IzDyvoUWlQtpaQfPEAHvvDuXUvlphAbzbMHM1os2V4XS/FeEemR9XTQsnL65z1dWDuxBmsOPwM02X9kM3Sq5C1lpB2I4yjDrOZPM2OI0+6Br+ql9eT/+D4/2fvPriiyraG33+U+977Puec56ROdtBubducc84555zFjFlRxIQEA4o5YiaIKEhQMkhORZGKyrn2/44iq9ittrao0zFqUFC79p7rN1che+611mbm6Lms37qLvXv2sHfPbnZuWM6c0YMZsyGEMocLh66I1PBgDgdEUPIJFgSkGNCQcnkiAiIgAs0JuKiIOMCCwcNYeOQez01NV99vbnv5mQiIgAiIgAiIgAiIwJsLKNirnhN5ahtrdt2k0OxEcRkoTo8k2PcKGVozVpsNq6GYZzd2M6HdrwxZHERm3cm3orhwWMpJDz/MzO5dGfMmxQAUFJcDS3k6D47MoFfnkU2KAQrm8kIS71whPE+LwWjEZDJhMlaTH3+NvXPHseGaqvbWzIoTY+FTQnw82HI+E/ObN7pFbCnFgBaRBglCBESgxQooep74LWXkEPft/VLQVOfx5MohNi5Zxs5TETyv/ATLwC0WWwITAREQAREQARH44gScOvIenWTT9LkcfKSrne/vNFBdlkVqhq72pLsGxUFZyh22j/yN/rN8SWr6J5hLT95jf+b2fNNiQK2yS5/Hk4C59HmhGACKy4ndauWFmyfZy0i/e4Al4z24VuJsSJNiU5N2dx+LJ63jal7ToBo2abFPpBjQYlMjgYmACLQIAUsywWvGMmzyNi7cucfN0/74Hd3O0jF96DXOkyvx6ib/SbkjdlH97AoH1sxg3KhRjP29x9jFeF+Lo8giow1aRK4lCBEQAREQAREQgb9cwFGRTujeOYyYuItwbf3fRO4r904cL9w2yU5pcgie43sycWcYVU3X+3vPxYDmEOxladzzXsDEtZepuclT/UaKFVXSDbaMH8DcY88+qdEBUgyoT6J8FQEREIFmBBzPL7N10mAmLN/KHr8bhMdkos67zb6Z/ek/eS83k7WvzOO3qNKJvnuV82fPcu73HuduEp1eglbu0teMvPxIBERABERABETg8xdw1dzG2GdGX4YuDuL571xYV+zlpN31ZtGYhQQmGV/8++uDFwPslKfd4+CCcXhcLOLFP91caHMe4zu7DwPnHSfjd9rQ0vIpxYCWlhGJRwREoAUJOCm+vZvZA/szYsYWzj5OR212oH8SwIphfRi/+QJx6hf/O3AHXzO0zGbFarH8/sNqw+5wvfifGdTMS8tITyf68eMW/XiemVkTawtKmIQiAiIgAiIgAiLwKQkoFooTLrJ6cBdGelxF1fRq/wvtcKEviOXSriWsPf6Uakf9CIK6jT50McBeTvo9dyFiFRcLX/7bT8FUmMDZxf3pPtyT0IbRDS80oEV+I8WAFpkWCUoERKBFCLgqiDw4j8HtOjF99zWSKmy4sJByZi3je43A4/Qjij9A9be4uBjvffuYPmVqi34cPniQosLCFpEqCUIEREAEREAEROATFFBMFMSeZmm/LozeeJOKZosBCg5dPgnXj7D7SCh5RscrF1L4wMUAe3ka970XMH7leQpergWgYCl+xuXlA+g8cA1XX1/RaHEJkmJAi0uJBCQCItBSBBRDLIFLh9G9/xICwnMwuf+DcmRzzXMyA4Yu48SDvLrb2jSN2EVVTBCes4bQo3MXuv/eo9sEtgQ/Iu+lNQOcTicGgwGNRtOiH+4Y3bHKPxEQAREQAREQARF4JwHFSnHCJdYM6cLIddeaHRngMpeRFXURv8B7ZGndF2aa+fdBiwG1UwQOzB/DyrN5vHodSMFc9IwLS/vTZfB6QsqbjbCZoD/+j6QY8PFzIBGIgAi0UAFrSjDrx/Rh5KogHhXaaqrQrpK77Js5kOELjnI/UUW13ojZ/uJQNVtVIenxj4iMiODB7z0ePCG9qBLjp/N/RgvNlIQlAiIgAiIgAiLwaQq4qMwIZ//U3gxdcprsl666K9ZKcmOvc+LEXZ5XW6ldT1DBYtChyi/EUP8n2IcsBrinCNz1ZuGoZZzNebUUAAr6/FhOzO9H78k+xFk/nUxIMeDTyZVEKgIi8JcKOMi96sm0fiNYfeIhBTW/2F1URx1h8eBBzNwVSNDl+zxOzEXz0n9cKC6cDgd2u/0PHg6cLuXVoW5/aTvlYCIgAiIgAiIgAiLw8QRs6hRubpvK0Il7eKhvEodNQ07UabYuWs7ugGAuXr7MlcuXuRgcwBGfffhez21cyM+lJefRUWZ06croLXdpOm1fMVeQHXGS3TsOcTPD0OQA4NLmEO07nR6dR+B559VFod0bu6cI3Ns/n7FLTpNje+Htdd84qMgIY++UPkzYGfHCsZvbuiX9TIoBLSkbEosIiEDLEXCVcH/vLIYMXcSxsCxMNZG5KL61m5k9f6Z9/znsvZJAgc7R/HC1ltMSiUQEREAEREAEREAEWq6AQ8PzMF9WT56Hb7y5Nk6njsKYk6zs/QP//cc/+eq/X/Ft/eOrNvQas4Er+bVX6R0mDXkxFziwYji//OtbOgxZTuD9eLIras/cnZWZRHiNp2PnUWy4VFDn4MCsySf2wgFWD/uZr775leHL/Lkfn0Xd2+q2c08RuIv3vJEsO/WcZmsBTg05j/xYMWo2R+P1n9RFHikGtNyPhUQmAiLwUQWcWA3VVFXpMNkaF6pxGtTkZqSTla9Ga7HXDVf7qIHKwUVABERABERABETgExZwYS1P5X6gJ2u9I6mqmT7pwmkzoa1Qoy4tffGhLqOiSo+lftkixYXDZsKgraLc/VqlFqPFhsNVN4fAZcNYWURWejZlpvo3ue/+5MBmMqCtLKesrJwqrRGLzUH922pB3X8PlpGfnkaBpvG9jdgKtrJ0wn1XsWRvOGpb/byFxi1a8jMpBrTk7EhsIiACLU9AUXC5XLiUT+uXfcuDlIhEQAREQAREQAREoE7AZUOT84Qb/gc586TyvY+6VOr+fnuXv94UxYXL6XypSFAbt2IuIzPiBDt3BPO08jWLG7bgJEsxoAUnR0ITAREQAREQAREQAREQAREQgS9BQHFa0BQmExlylXvP1M2s2t+yFFzmSnKjr3HqzD3SKiw43qXS8JGbJMWAj5wAObwIiIAIiIAIiIAIiIAIiIAIiEDt0H2L0YjOYH7vowPet6/itGPRV6M1WT/JQoDbQ4oB77tXyP5EQAREQAREQAREQAREQAREQATeWcA9rP+T+Kd82neFkmLAJ9HLJEgREAEREAEREAEREAEREAEREAEReH8CUgx4f5ayJxEQAREQAREQAREQAREQAREQARH4JASkGPBJpEmCFAEREIG3E3AZVGRE3yT4yB62bDtGSFwe1ZVZRJ3bz7olq9l/7jG52uZukfN2x5GtRUAEREAEREAEREAEPk0BKQZ8mnmTqEVABETgdwUUcxUFj0+wbkIvuo3ZzOkrt7h59gSB+zyYM7QbfWfs42aKlk9kRt7vtlVeFAEREAEREAEREAEReHsBKQa8vZm8QwREQARavoDiwpJ8lk3j+zFy4Xp2BdwiMjYLdcoFtk7uw9BFvoTlWFt+OyRCERABERABERABERCBDyIgxYAPwio7FQEREIGPLeCg4OZu5gzuz9ApGwh6kIzKbKc88jBLhgxm9r4Q0vQyLuBjZ0mOLwIiIAIiIAIiIAIfS0CKAR9LXo4rAiIgAh9SwFVGhM8CBv3yGxM3nyNeZcHl0hN/fBVjB01m56WnaFwfMgDZtwiIgAiIgAiIgAiIQEsWkGJAS86OxCYCIiAC7yjgqo7Gf+lQOvecx+HbmRjcawVaUrmwcSLDxm3kbIwaxzvuW94mAiIgAiIgAiIgAiLw6QtIMeDTz6G0QAREQAReElAwPj2Fx+ieDFsWSGS+tWahQEf2NXZPG8L4NSd5mFGJ1mDCJDcUeMlOvhUBERABERABERCBL0NAigFfRp6llSIgAl+UgJW08xuZ3Hc4q49HUmhzN95Faag3C4eMYPH+QM7decCTlEL0MlXgi+oZ0lgREAEREAEREAERqBeQYkC9hHwVAREQgc9FwJHD9e3TGTx8GYERedTeM7GD12gAACAASURBVMBJ4c3dzO71Cx0GLuBAyDNKjA65teDnknNphwiIgAiIgAiIgAi8pYAUA94STDYXAREQgRYvoDgw6zRUVGoxWRvnATgNpeSmpZKRraLabMcpNxNo8amUAEVABERABERABETgQwlIMeBDycp+RUAERKClCSgKLqcTl0uqAC0tNRKPCIiACIiACIiACPzVAlIM+KvF5XgiIAIiIAIiIAIiIAIiIAIiIAIi8JEFpBjwkRMghxcBERABERABERABERABERABERCBv1pAigF/tbgcTwREQAREQAREQAREQAREQAREQAQ+soAUAz5yAuTwIiACIiACIiACIiACIiACIiACIvBXC0gx4K8Wl+OJgAiIgAiIgAiIgAiIgAiIgAiIwEcWkGLAR06AHF4EREAEREAEREAEREAEREAEREAE/moBKQb81eJyPBEQAREQAREQAREQAREQAREQARH4yAJSDPjICZDDi4AIiIAIiIAIiIAIiIAIiIAIiMBfLSDFgL9aXI4nAiIgAiIgAiIgAiIgAiIgAiIgAh9ZQIoBHzkBcngREAEREAEREAEREAEREAEREAER+KsFpBjwV4vL8URABOoEHGhVOaQ9jSMuNrbhEf8sg5IqA3blPUO5tGSGn+dE0E0S8qpxvOfdf/zdKdj1KjKjQzh7OZTE3CpcHz+oVyJQ7HpKn8dw6+xlQhOyqWyJQb4S9Qf4gWJHX5pJ9I1L3IxIpMDwvjv8B4hZdikCIiACIiACIvBZCUgx4LNKpzRGBD4lAQd6VQaPTmxg5oBO/NyqDf2mbsIv5BmFGtN7P1lXDHGcWDmabh2msi8kiarP6iTUhTY3lpBDK5jUpz1th6zhdGT+ezf8s71L0eWTcPMIqyf2o2PrIaw+HkHe51eV+WMmxUhJ4m2OLBtHn7ZdmLzlHLEVn1WH/GMD2UIEREAEREAEROCjC0gx4KOnQAIQgS9XwOW0Yym9w97pvfjx//7GLJ+bpGhsOF0f4Cqpy0jR01Bu3Y0mo9SA8x3YHRUlqMo1aO3v8OYP/BaX3Yqx5C4+8/rTvvcSjofn/X4xwFFJqaqcqup3a4yjUkVpeSXVtrdomMuB1VhC6MFFDGnbm8XHQsn5EosBKDhsetKv7GJWz46M8ggiplyKAW/Rk/5wU2d1OeXqMiqtf7ipbCACIiACIiACX6yAFAO+2NRLw0WghQiYYjg2fxC//K0Hy08+JO9tTi7fqgkKLocNq83+bsUGWxERR70JuPyQLNMHKFa8VVua31jRxxCwbCgd/7AYYKMk0p9DfheIyDDy1q2xlRAVcJCA82FkGN/y3YqBuOOrGNX+Sy4GuPPnpPDeQRb168xoKQY036Hf9af2MhIuBOAfcJVnurfsn+96THmfCIiACIiACHyCAlIM+ASTJiGLwGclYH6C3wJ3MaAnK05Fkf+GF6pdFi3qghyy80rQmGxvPD/ebjJhtthwND1HcFkw6I1YLA5cNh3lJUWUVhqw1g8fsFfw7OxmpnQfyKIj95o9AXaYNahLSijTGLE1O7LBhVVXTklBEWUaE/ZmLwQr2IxGTFYrDqcJTUUVWoOlcRSDy4qurJDcrFxKKo2N8dV1CMXgLgYM+4NigJ3KxAvsmNqLwXMPcCvV8GoxwGFGqy4kNycfdfVLsToqSb64nZm9BjJ/fwipr8x1r21nUW4WecWVGBoQG4J8q2KA01yNujCPQlUVRluzaDgtWspUJagrDdicTRP7mk+KYsNkMtXk22nVU1Gqolxj/JPrVDixaMtRFZdSqbfy2jAcJjSlJajKKsm85fPaYoC7TeWqEkor9Fhfu7Mm7VPsmGvaZMdp1qGp0qAz13dgd+3Bgq5cRUlpBXqr85WcK3YzZpMZi8OJVV+JWqWmymB7/XZ2JxZdFVVVWsxNRnc4rToqSl8ft9Oio7won4KSCgzW5vLpxKqroLRYRYWuGUfFhtn9GTHZUexGNOpiSsqqG2NwVPP8zkGWDBzA1I2nidO+3B9c2AyVqPKyySkse9GonlOxYzGbMJvtuCx6qquq0JqaWNZv98JX936rKFOpKNdZms+/04qhooS87FyK3XltpvmKw4zJaMRoVcBlw6CpoKxCS0MqXTZMGjUlxWo0zcbkxGqopCQvm7wid66bOcgLccs3IiACIiACX7KAFAO+5OxL20WgJQi8bTHAUUnK9UNs89iM16EAjm1fxuwZKzkYkoza/PIf/nUNtGnIiw0hwHMhE0YtxufmM8pc4NJk8+iiN2umDmPkwr2cCDjKnoUj6f3rL/zWbQJbgmMoMNgpe3aN/XP70O6rb+k8cALzV+7hTEQ6pVYFR3U2D88fZa/nFjatnMOE4WOYtSmIR7ka6usatrIkQo7tZtv2vXjv9mD+pHFMnLoQj03b2Ot/g7iMLLLiQvDfMp/xY5ey95A3e9ZOot9vXZm24wrxpVaq0m5xbNs6tuw6SKDvTlbNmslKrys8KzE3nLC9STHAUZ7EzQMLGPjLN7T6dQATZq1kz6lQUlQWFMVIYfR5Dmxcy8ate/HeuZYFkycz18Of0LRyLIqTiqSbHFw4gPZff0vHfuOYu3I3QfeTKbEoOKvSueu3kw0bdnLQ/xh7Vs5lztI9XIorpmEwxZuODFDMFD06x5HdOzngf4bTh7excv56joQ8RV13XubU5RFzxR8vz81sWjWPySNHM8MjgPDMygb7pl3cXu1es8CPrfPHMXHRZvbs2c36SX3p/HNr2nUcxLxdF4kvNmCpeE5k4AZmDu7GsMWHuJ2qx1z2nOhzu1gyth/dRq3heFhO3TGc6PJjuB7gxfbNG1k7bxJjR03B49h9MiqaDHOxlpFy049d6zew3esgPru34zF9KD1+bv/CNAGnLp/Y64Hsc7dp9QKmjhrNtNW+3E+vaL5NuiKS7p1k9+KJTFywGa/93mybMYAuncex4UQkRVY9BbE3OOG1Fc8Na1g0aTRjJ6/C904aFTYH2sJn3A3YyuLx41m0fjteXhuZ0a8Tv/70M50HzGB7cDSFBhu64mRCT+1i+aQJLNq4G+8D25gzoBPdxq7lxINCrPpC4kOOs78m7oVMGz2aqSsOcye1nBoFxYIq/hr+e7bj5XuaM767WbtwLfvPR1NSk08n+sJ4bp3Yx/ZNG/FYMIXxoyax6tAtUstsuPRFJN72Z/PcMYyetZ79R0/g5zGR/h3a0r7zcJYfvE16tR1N5gOOrx5Bl+++oV33EcxavgP/a0/INyk4tblEndnPJo8t7D/mh/eGxcxdsJkTD3Jx17QcehWpYafZu2wKk+Z5sHPfIXbPHUSXTqNZfTSU/CZFj8Z+Zac69zFXfPewc9cBDu9ax5JpU1i48xxPiky1n03Fgjr5NgHb1rB2w06892xi+YzJzFy6jys1nw0FY2ka4ae2s3TCMKYs3caR05c4tGoKw3t1pnPXESzzucbjhEiCdyxkwoDudP2tKyMXeXMzXVv3+VewqFO5G7Adj1Ub2LXPiy1LZzFt6hL2XYyl6G1H8DQ2UJ6JgAiIgAh8xgJSDPiMkytNE4FPQuCtigEuqp8EsmbMEGbtuMSTQh16VSg+84YzbKY3t1M1zY4QUMxVlGTcxWfBUDq2Hs7mC08odZ+AuGzoUs+xaXwXWrfpzfT1AdyJTSM1zJeVI7rQceRmLsWpsFp1JBxfyagOXZi2+yIxRRoMZhtOcw53D+3EO+A6cQWVaCoKiDi8lNFduzJt93USyx1gL+Ce9yKmz9vG2Ye5aKpLeOS/krGdfmHA7C343kokv7iEkszb7J83mPbf/cqQ2dsIOH6YXSuX4OkfSkpOLKfXTWD4VE/OPcpHpy/lweGljB08jb3XEhtW5H+TYoDitKF/GsS60Z3oOmEb5x4VotGbsTlslD4KZP2suWw8dpe0Mi3a6kpy7h9l1Zg+DJy9j5vJFVhtep6dXsf4Tp2ZtDWYR4Ua9GYbDqeWp8GbmDp0MptPRZKv1aN+cIzVY4cwbfslntYvkPeGxQBX6UMCPVaz6dB1kjUGDNXZ3Pbz48z1hxS6T8os+YT7e7H/yAUe51WiqSwkKmAtE7t3ZsLms8SWvnrm5jBWkhfuy6pRHfjhp15MWX2IK2GPibrlz/qJvWj7Y0/me98itcqKIfEcWyd0ocuEbVx6psPpXt+i8hHHV4+kQ5eZHAjJqDnJtRZEcHKfF0fORpFXoaGyMIqT6yfSu9NYNp+OptRdEXJVk3ptLysXruHg1Weoq3XoKjK4uW8eA79v3VgMsBYSecKb/QfPEpVbgaayiMcnNzK1ZyfGrQ8iWlVfXmr8ZNv05eSEHWXNmI78+HNvpq0/zHHffWxctoFDVx8R/yAIHy8fgiNzqdRUUvToFFsm96LLqHWcelSAtiqfyIC1TOj0HW26jWOl9wXCHkVxN3ATM/r8QutuM9l7NZ6i8hzCfdcwoWMr2vWcxPpDAfjt38DK9Qe4GhXHw9M+7PcOIjKnNu6Y057M6NWJMWuPE1Vix1X+hHNbPfDYfZ6EKgMGbR7hp49z6tw9ch1gLXrMWR8vDpyKIMftWBTD2a3T6ddxBGsDIim22jHm3OXwogH8/EMnRi/x5srDZFIfn2XnzL507LeQQ3dzsdiNpF/zYk7PDoxYfpTQPA06owWHU096iDcLRoytKRxkafWUPznHzunDGLsqgKhSJ3ZDJXkPAtg4sTM/tu7BpNU+BB7zwXPFOvafj6H0lYvsTqqSQzi0cQ1b/ULJrtSiLY3lwo5p9Pp1GKv9IlE5HVQmXsVr8SyW7LzAU1U1Wm0VBe64Zwyg7/j1BEWXYnNYqHgSzLbxv/Hjr0NY5HWN6PQC8uMusW9ufzq268NUj2NcfZxJUUESIT7LGNF1CMuOhNcUUxyVydzYt4w5C3dwIb6Uaq2WqoIYLu6czeCe41h3/DHNdJ/GjiTPREAEREAEvkgBKQZ8kWmXRotACxJ4q2KAQnXsSTbNmc/WM9Hku6922ZI5s2ok3Qat5ExUQfOL5ikunDYVd/fMoF/7YWyqLwa4z9Mqw/GZ3Y/f+iwh8H4WJrsLlzmZ4LWj6dplOj630tApDtLPrWd85x7MOXS3bpqAi6rHfqyeu5gtR6/xJCmF1JRkYk9vZmrPn/hpxCYuxpZiy7/Jnul96D/jALdrhuQrmBKD8BjVid6zfLiVqsPhdOK05RGyYzp92vRjmV8oWVojem11zYmMrSqes54LWLjhBA9z3MP67aSd38jEbgNZFtC4Iv+bFAPcmXdkXGLrxK70nLafm3XTBBRDIuc2TWTYxE2cjS5tGDLvMmdzY8cM+v7Sl6XHwsgx28m8vJWpXbsxw+tG4zQBpZpn57azZN46AsKzaq602tMvsm1SDwYt8CU0u+5E9g2LAY6My+ycNoxRy48RkW/EhYOK5GckZWSjdipoYoPYtGgh67wvElNvf24Hc/u14YfBqzgVVdw4vaK+uysubPl3ODC3Hx0HLOXYvRyMNgcOm5G8294s7NeG9mM9uRhfiT37Bl4zetK9rhhQM+bEmsT5zePp0rWuGKBUk3BmM0sXrMb7/GOSUlJITY7l4q55DG7zHUOWB/KwyIkl6zYH5o9n6trjPCypG3qv2MgJ2Vtz0lq7gKCT6oSzbFuygNVe54lOrO1PcRf2sGjgz/wwYBkBEYWvtElxubAVhXJ08UDadZ+J17VUqo0GdNVadKoYzm1dysLlezn/OInUlBRSYi+yb+Fg2n3bn2W+YRTYbRSFHmbZgPYMmH+AW1kGbHY7NmMBYUeWMrTNL4xZF0SM2kpxuC8rBv1Cz2m7uJpcjdGgQ6vVoXKfVC+bz4pdZ3lcH/elfSwb8gvf91uEb2gBtpzbHJo/kqFz93E721CTz6r0FJKT0yl2aEm6tIuV85ayO/hRnWMcV/YvZcQv3zJgwSFC8524tPGc3zyOzl0ms/18IgabE5c1m7s+8+jfYQQex6OpcjnJu3eERX07M3ZdUOM0AUVPxq3DrJm7jP3XUnDPHnDk3OPo/H70nbyNK6l2FMWFvSSS46uH8muXSWw7n1hjqddq0Ztsr9obn3P34ArmL9jJ5RRj7evuES1xVzmycw8n7qajNT7n3qEFjB65kAN3CrDVDV5yWYuI9F/J8LZdmbbtUs3nyJF9D9/5vek4dDl+Dytqpoco1ufcOTiH3u0Gs+zIA9TuKR6KjbxQX5b068Y4jyCeGkxk3z/K0pEjWOh1i/zGg1Dy8Dgew9rRbaInF5OamRJU/9mQryIgAiIgAl+kgBQDvsi0S6NFoAUJvFUxwH0x34jWPR/aZKKq4Bnhl/aycHBH2vRazMkHubVDkptrnquSCO85DOzwYjFA0URyeO4AOg5cw7nHRbV/0DtyuLplEj07T8LrRhIaVzPFAMVIfOAKJgwbwZzVOzl8+DBH3Y8De9m9zZNth6/wJLsCy/NLeE7oQvuRnlyKK6/Zv6v0Pt6z+jN01kHuptcNJXaVcNdrFv07jGTz+diGofA1TXHPE9Zq0GiNGDWFJEdcwXvRCLr91JMFvvfJqhuN/meKAbbU83iO70zP6e4CQdNFBa08d5/8d/6BQStO8LDQ2nwxABc2kw6NRovRoKE4OZJr+5cwtmtres0+xJ3MhiDfaM0ARZfMhc2T6PlDa3pP9CAgLJ0yvRW7w4FTMZF4ZgPThg9l+vLtTey92LPdk20HL/AwvazZYfWuklCOLBhAt+HrOB2tbjjBc5SEcnhBP37uNpdDt59jzg5h34ye9GhaDLAlcWHzhIZigNXoLg5MZ9TgyazYdrA2/4cPc9BrNzs9PTl07gHpahPZIXuZ23cA87xvUs/wygKCZQaSL2xl1ojBNcPEG/vTPvZu92SrzzkiUtXNt0n9kIDlQ+kycBn+EXV9GAVT8iV2zhjJkIlL2HGwrn8ePoD37h1s2+LDubAU1HYXpZF+rBzcmeErAnioqp8b76D0gR8rB7Wh+/Q93Ei3UBYVyJqhnRi8+CjhhXXbKWZSL+9kzoiBTFq8tTEXPvvw2uGOO5iwZBVWfQYhe2fT98cf6T56OUduJaHS2bDb7ThMaVzbPYfR/cezdGuj46F9e9jl6cnBM/dJVtlR9E+56DmBbt1nsu/G89rCn7OYSP/lDO00lJV+Dyl7XTEAF3aze/5/NXpDNerMGG4dXsP0vm3pOn4L5xMttR+18hjOrBtJ177zOHjnNcXFut8vlud3ODhvHBNXBRLdcK9SBafdglGvx2ixY825j++CPnQduZaTT3QNU3rATkHYMZb3/YEe03Zx/bkDZ24ofov60sW9bWzdibujgAi/pQzqOJxVAdE1RQx33ymOPM7qQZ0ZscyPx+ocwv0W07/jCFYHxNB0zUR7YQSBy/vTussUdlzJbLb/NPfrUn4mAiIgAiLwZQhIMeDLyLO0UgRarsBbFgPAhSE/mstHvDjgf42YlPv4Lh1G5z7uYkDOeysGXPOcRK/Ok9j7umKAq5T7XrMZPGw+R24mUaHToWv6MLiH3rtQjGlc3DSBHh1Gsin4CSqrgi3zIp7TJ7N4301Sq+vGHtcXAzqOwvNCXM2aBk2T5jIUEnvFD+99flyNSibMfw1jO/V+T8UABV20PyuH/sJvE3dzLbF+HrI7AhflEYdZPKA13ecc4m6m4TXFAPemRorirhO4fz9+lx6SFBrIxnFd6OMuBmTU3ePtDUcGoNjRZocTsHI0Xb//mlbuIfAbT/EopxqHq4zII0sYMXQmXpcSmrE3YbW/ukheTWvqiwEj1nEmuqyhGID5KcHrx9Cp8wy8b6RjfINigKX8If7LRjBi2k4uxpW/mH+dDoPJXbyoJvbkWsb81osFB++Q3TB7wUlR07sJqMtqpo+MHjqVHediKW/al9zPDb/TprpiQNdBywmIqB8R4aLiUSBrRw1lmudZYstf6p86AyaLHafiLgb4NxYDaubP1PY8S9JFdkzsSJfJO7mSYq4rBnRm8BJfwovqigGuCqJPrGXckEl4nnnSbNwWdy4UB7q8R5zZMJGeP37Nd617MHG1H2GZlVgrYzi9bizDJ2zkTHRzjhbsTgVF/6yZYkAJkf4rGNppCCv9In+nGODunybUKfc54+PF4TP3SYi4wP7ZvekxfvMrxYBu/eZz6N6rIzEaP5MuNHFn8RzbnzGrjhNT/zlu3ABQMCZfZc/kDrQdsgL/qMom05hcaOLPs3VsWzqO2cDZp6bXFwP8mysGnGDNoM4MX3qMh+5pA3un0eWnwSz3fUj9jJya/l79lMvbxvNbu9GsC0rA9EJ88o0IiIAIiMCXLiDFgC+9B0j7ReBjC7xRMcCO+nkmBaVlqBIvsmPONFZ4XyGu2IDVnETwmlF07ftXFwPKCNs/h4Edh7HBfZL/0nRuxaRHb7RgcTjQ50dxfM0kRo6YyJKNu9jt6YmX/3Xi8rU0LJD/O8UAlzaFa7sXMGuJFxeji9BbLaSe38SkLn3eUzEArIln2DD6N34YuIqgqKIm0y1cVEYeYemg3xi0/AQPCyzNFwMUHenX97FkxmL2BD+iUGfFknaJ7ZO60/ddigEuV80VVkNlDjGX9rFoSEdaf/MLo9ed4nF+EVG+SxnWaRArAyIpetnebMRgNDeuwN6kjzeMDHipGKAY4zntMYY+g1Zw/EER1jcoBlgrHxG4YhjdBizFL7zgpauuSs2q90ZjObFB6xn3629M2X6ZpIa7L7xcDCgnOnAVIzsPYOnRMAqarD3oDl8xGzEaTDS3gLzrNcWAqugTeIzowsAFhwl7+Z6digVj3ar8zRcDFEzPzrNtQi8GLTpCaIHtNcWAKp6cWseYzv1Y6HPvlVuDKhYTRoOxdvqNw4pRk0/8jUOsGNWVNt+0Ydiyo9yPe0Dw+tF07+O+Gv/y6B4Fi8mI0WjC8WeKAYqBnHB/1s2ex8Zj98nVmjG7h+Uv6EfPdyoGKFQnnGfruE70nLqTa/XFrvq+prhwuVxYMu9waE53vu8+k703spt8rhS0Ty+yfXxnek3dxfVM+zsXA6JUz7l/ZB69vu3GjJ3XyWryeVC0iVzdMZFuXaew47KMDKhPj3wVAREQARGoFZBigPQEERCBjytgjuHYH9xa0Fn6gIDDQdx9/IgQrzkM7DyGrRfjULuvstpTCF49ii4fuBiQcW4DEzp1Z/bBO3VrBthIC17PhA7f0W2WD/fStY1X/exlxIeEEB6bQYXDhS79HudOBnMjKoOSSg0aTe1aAO6rnQ3/XlsMcFEafojFg7oyZn0wMSp3ox2kn9/ExC7vODIg8zLbJnaj59R93EypHY7s0sRwfMVw2rcZxZZzsZTVjxbHQc61ncwcOJzlx8LJszh4fnlbzZoB0/deJ6Xu5NaljuTY0qH0GL6WoKjimhNjR7q7GNDtnYoBzpxYYuOTyah04LAYqEgPYf/cQfQYsZazUXlkXtnB9C7f0XnKbq4nNVk40lFB0r3bhEUlU9ZwFb5BmdcVA5wF7rUERjJu6TEiCmw4cm6yb2Yvuo3z5EJCdc3wbsXkHj0wlo71CwjannN91wx6ftuBKdsuk9gwVBwclUmE3blPVGIR2bf3s6DXD3SduofrDbdydBcD3LcW7MRojyBiyq1kXd/L7G6t6DTRvWhhVeOoBfcdNMLuEvrgGc2si0jzxQCwZ93Ee1YPfvh1AlvPJ1DVJKdVKeHcv/eAZyrba0YGOCkKO8qy0aNY5HOXPKur+WIAdnJueTO/Rys6jNvMufjKJnFXkRZxj/vhCRRlJ/IsLp6kcgcOq5HK5/fwXT6CnoMXc+xmOHd95tP3u18ZvyGY+IqGQHFUpfHw/l3C40qwv2ExIP/+ERb37cwYj1PEamtH3rgq4mrWe+g1YD4+t/Nq+qcz110M6PuOxQD32qDh+C/pT+tfhrMm8DGldYNfUGyUpcYT8yieHHUy13dPpcuP/Vjkc5+ihj7ppPjBcVaPHMj0rZdJN/HuxYByLWk3vJjZ6Uf6zz3A/fopHO47ShZHEbR6NIMnbeHSC9N/Gj8T8kwEREAERODLFZBiwJebe2m5CLQIAVdlGN4z+/Dj/3RhcWAEOZYmJ8i4hxY/5MTaCUxZ5Udo8lMub5pAt29+ZdKOyyQUqshxz2Me0ZEfOs7i0I141EYrtoY/uJs00VW7gGDfnwez7mw0xQ2jnMM4MKsf7fuv5uyjuvnW9gwurB9H1/bj2X0tsWZRssKQnczo0Z6R68/yJL+EwkIVxbEX2DGlOz/+52cGzfIk8Fo4kWHXCNzmgefBy8TkG7Br4ji9fh6z52/kyIU7PHgcS0JiGs/z1VSb7I0FBGcht3fNoM8vQ1h/pv52a+74HeRd3870bt/RfswWzj8poDQ3miCPsXRr1YHpXpeJLTVitTpQqh9xbPFgfu25iICwnCZXIZs4uE8QCm+zb1YfOg5ew+moPEqLiigtKyLtphdz+3Zg0MKjhGXXFgkUQyqXPOcwY6EXN5KrsCtOiu7sY16f9gxbdZKoXBXFRSpK4y+xd0ZPfvh5dM0q+vmluTwJ2sDkrj/QadIuLkaXYrRYsCtaov1XMKJtDxYevUf9uoIvRgiO9KsE+J7kUkxpzYmbYi/gzr75TJ+7nasJVVjy7uA9pw8//ac1/aduwPdyKJHhIZzctQHP/cFE5uhwNO1KdQeoLQb0p33PeRy+k0XN3SgVLcnnPZk3ZQF7ryRT5QBXzRSAIbRvN5qNQdEUlhWRdOcIK0Z24se2I9kYFEO5zUL+vQMs7PMDX//Ym2keh7ly/wERISfZu2EL3qcjyNE5sBWFc2zxINq26sqULeeIKzbiMBUSfngxg3/8hh5TthL8REV5xl0OL+hP63//RN9JHhy5dJ/I8JsE7dmEp1cQ4VnaZtvkVEVwbOkgfuu7CN/7+Y0n49YCwg8vYtD3/6FNz0l4HLxI6INwbp3ay5bNXpwKfY7W4awtBgxqR8+ZXoRk1K5hoehSub57IdPnbOPi00ocirNmDYFVg9rRf95BhVoWFgAAIABJREFU7uU1nrBbC8M5tmQgbf79I70nrObQhftERtzitNdmPPec4H6mFsvzu5z19+fUg9pCEY5iHhxbyawZHgQ9LqYo4hgrBv7I1z/0YNIqHy7ee8CDW0Hs37yFvYF3yax24KqO59zGsXTqNgOv65m1/duRT+iRRQxoN4jlRyModbpQR51g7dDf6DfHm9uZatSqYoqe3iZw+SB++nEgyw6HkqMuJPHybub1bU374asJCFdhNJuxqB9zas0wfus5i/0hr/8M1XQnexlxwRsZ3/a/tGo/hLmbjnEp5CZXAvexZ58/1xPKsDktFEedxGN4Z3pN9ORSUm3RUDHnEnp0NbOmeXAyWl2zsKAj+y5H5/Wk0/DVjesLOPII811E/7aDahYQLK+pbTgpjAhkxYAODF3iy8MKJ5bix5xeN4qu3Say5XwSNTUQxUxemB/rZkzDI+Ax6vqFBV/+sMn3IiACIiACX6yAFAO+2NRLw0XgYwtYKE0OI3jLVPr//F/+/n/+xndtuzNk5HimTJzE5HFjGDWoN93b/USrf//K1D0hJFWaKAj1YWH/n2n19Y907jeJtYfOcNRjGgN//pa2fedz5K77/ukvngW6qgtIvHOIJUM70Opv39J76gaOP8imrCSXJ2fWM75LK/71XW9mbT7Dk8wMnt05wML+bfn67z8zbJEPt9PVVGXfYu+M3vzyQ3v6j1vB0dtJlOh05EcGsm5sF9p89V+++/Z72nUazJytp4l6XoGlZp5zGtd2zKR/m+9o9U0rfvz+B1r/8BM///QLXQcv4EBIEqWVRSTd9mHx4N/47n++ocfENRy+mUhRlQUFBWthBL6LB9P+m69p3b4fk1b4cPrwJuYOakern3ozzzuEmKdPCfdbyTh3W/7bhYkrD3MvpQSd/UWLmqxb87m3by4DWv9Ah15jWXkwhGdFBuxmNck3fFg1YTST565n31Ff9m9azRrPo9xIKEJvr73Kasu/h8+8/rT7vh19xyzj0PUEity39vNbwYh23/Ld926j5Rw4cRjPeUP57euf6DPTi+uPnvIsPBCPcd356e//peuY5Ry6nUyJtsm45rpu6Ui7yv6l05iwYDsBF0O4fno/m9Zs4ciVeEpMLnAaKXx8Gs9JPfjl6//y7Tff07bjAGZsDCQsrQxzc5UA97TxujUD2rftxahpq9i2cy97Nq5gwbzV7L8QQ76urkDj0pF5az8L+rflx+9+onO/yaw7eIidK2cyvMdQ5m45RWSGGoOhkCfBW5jRsw2t/vMV37dqQ5f+U9nof580tbn25N1pRhV/Ba+5g+jc+ifadezJ4HELWDZrAqP6daP/pLUcvfmMoiotxU/Osn1qT9rWt6lDf6at8+N+ihrTK21SMJWmE+G3kvFdvuOf/+3ImEVeXIvLpaKmyuHEWBTLhW0z6Nv6G77+77f81LojAyevw+9OMmqTA4X6NQPa8mu3IUxfsZmde3fjuWoBC1bs5mxULlqbEXVGBAGrx9Hju7/z7W8jWLLnCnE55bXFFKeJkrgL7JrRm3Zf/6c2F7/1Y8rao9xJKq2J25F1B/810xk7exO+524QcvYQ2zw24R38mAKjE6ephIRLO5nT5+cax1at2tCp70Q8jt4mSWXCpi8l9YYX8we04T9fdWLM0qOEJaWT+ugkG8Z14bu/f0/vSVs4H1dAReEjTq0ZQftWbegxbAF7z0WRU6Ei4dwWJnf8jm++bUufkQvY5XuYvWsm0OXr7+k2YTMBV0K5H+jBlO6t+Od/2jN87k4ux2RRZmrmM1TTT11YK5/zwH1rxi4/8s2/v+KH1p0YMtOTMw+zqa6bA+S0VvE8/ASeM8cwfupydh46xsEdG1i73ovgmu2cGNXPiTi2komdvuK/bfoze8dl4tMyyXD/fpnQjVZ/a0XPcWs5GZlEeupjgjZMofe3/6RNz8lsPhNNtlpHZdYDgrbMYvzoqazYdgg/n51sWr0Br6BIsqttjYXHj/2rX44vAiIgAiLQYgSkGNBiUiGBiMCXJqDgsJnQV5VTWlJEUWEhRcUlqEpLUavVqEtLKVWVUFzkfk1Fhc6M3aXgtOopy0kmPu4ZadnFVOqMGMpzSXkSTUJqPhVGG01H39eouhxYTVoq1SqKC4spLdegN9txOuxY9BrK3FcOi0up0Biw2Gx127qPXYK6UovJfQszhwVNYRpP4xPJyC1F677VmKLgdN/doKKYrKQnPH4Yw7P0fNTVJmx1QSiGLO6fPEbAyfOE3L3N9YtnOXPyOAFHD7B79WwWbT1FWGoVlqbxlVWhNVpx1DfEacVQlkdKfDzPUrMprtBhNFSQnxrLk7gU8soMWKxWzLrK2rYUqSiv0mGyOXA1ex7jxKIpIuNZAklpuZS646050VRqhuRXqwvISnnG04RkMnOKKHO72F2NK6E7LVQXZfAs4RnpOSqqTVYcLidWQzn5KQkkJKSQXVSBzqCnoiCN+Jg4UnLKMFisWM06qspUlBQVoSqrrHF0NBOkYtGjKVejUhWTn5VBesZz8ksqa/JWv7nLZkJXWUJ2ShzRD6N5mppHqcaI1VG3KGMzH6n6YkDXIcvxvZHA87QkEpMzyC2uqO0TDV5uCx3qnCSeREURl5KDWltFubqE4kIVldXuvuJeGM+9Sr2OqpIcUuMeE/U4nrRcFRqD26QxAJfdgr6ykMyn0UQ9fEJSVjElhYUU5OdT7O6PJhtuB5fdjK6qhJzUeKIfPiYhJReVxvDaNikOG2ZdVUPeyyqra1axr+86uOw1r6tyUkl4HEV0fCq5Kg0Gi6Pu5LC+GNCJIQsPcC0+i4zkRJLTcygqd/ch90KM7n7ujquM0uIiikvUVFYbqVkYsK6J7rj1VSpy0+KJiXpMfHIOqioDlnoEqwFthZoSVTEFOZmkp2fWmOtqPkfundSu9q9R5ZKWEE3UI3efKaHKYKl1dDmwGbVUqEsoKlZRVqnDbLVhc/eTMvfolNrPtcHqwOm0oivNJjnhKcnPi9HU7MOF3VRFUUYiCXGJZBaUUa3XU1WcxbOYJzzLVFFtNGPSV1Hu/n3g/uxXVGNwL7LYJI+NGa17pjixmbSU5WfwLOZxjW9eaTVG9y0AGzZ2+5nQlReRm5bE04RE0rIKKK35jNZu53LW5rHm2CVqKqqNWG02bGY9VeV17SurQm+2YrNZ0GvKa3JRUlqORl+3wKLThklXTnFOGslPE0hMzaJAVUmjcUNA8kQEREAEREAEagSkGCAdQQRE4JMTUFxOnE4nLqXuz23FhdN9yzlnkxPWD9CqV47bcAwFxeXA4Y7B5T51qvvnqiD25EbWbz/OvTRtzR/3VosFs9mEyWhEV3CPi5fDeJLadPX++je/9NXdRqezyf4VXE4HjhqHl7Z9k28V9/vd+2vOrHbfdvtL7WmyX0Wpy0H9mXnNa3X7dLoaihBuM4ejNu4mb3+Dp7WK7pNtl8Nt2yTfL7279hivj7Xp5vXFgG7uBQQfl9bcqrB23023anxev29HTd9SUBT3o/H1xmd1+fidON2ry7tz1uBas68m/aVxZ9Qf94X+1OT1t35a9xmpaccL8dcXA+puLVjiLpK5+5XrNe38/SP/Udxuv5p++1qnP3r994/f8GqTz0vDz9xljYbfHbU/VdwLVdZ9bhu3e4dnr/Vtuq/6/NtrfZu+9B6f1+TAbsdeY/wedyy7EgEREAER+OwEpBjw2aVUGiQCItBSBBRDHMdXjKTfoCUcveceNlx/FubCUpHJg/MnuXAvgQJ94/zrlhL75xqHsziUwwsG0HWYe666unF+/efa4D9sl3stgGOsGNyJYcv9iVRJX/xDMtlABERABERABD4TASkGfCaJlGaIgAi0QAGnhpQru5jbrz2/tPmNXv2HM27cBCaOn8zcZTs4efspBRprs4vCtcDWfPohOc2UPQpg7Yh2fNt+AluDYykzvW4qxaff3DdpgdNSTuyp9Yz/9Ws6jF7HyccqjI7mRou8yd5kGxEQAREQAREQgU9JQIoBn1K2JFYREIFPTKB2PnlZTiJRty9x5vhxgs5eJyI+gwK1BoN73YL6wQKfWMs+vXAVjKpM4u6eJ8jfl6NHAzl99ioPnuZSbvxCk6CYUGfFc//CKQKPHuGY/ynOXQ7naZaaL5Xk0+vXErEIiIAIiIAIvLuAFAPe3U7eKQIiIAJvJOCew2u3WTCbTJjMFmxy5fWN3N73Ri6HDavZiNHY+DBb/2CBuPcdRIvanwun3Yq5iYfRaMZqd8rK8y0qTxKMCIiACIiACHwYASkGfBhX2asIiIAIiIAIiIAIiIAIiIAIiIAItFgBKQa02NRIYCIgAiIgAiIgAiIgAiIgAiIgAiLwYQSkGPBhXGWvIiACIiACIiACIiACIiACIiACItBiBaQY0GJTI4GJgAiIgAiIgAiIgAiIgAiIgAiIwIcRkGLAh3GVvYqACIiACIiACIiACIiACIiACIhAixWQYkCLTY0EJgIiIAIiIAIiIAIiIAIiIAIiIAIfRkCKAR/GVfYqAiIgAiIgAiIgAiIgAiIgAiIgAi1WQIoBLTY1EpgIiIAIiIAIiIAIiIAIiIAIiIAIfBgBKQZ8GFfZqwiIgAiIgAiIgAiIgAiIgAiIgAi0WAEpBrTY1EhgIiACIiACIiACIiACIiACIiACIvBhBKQY8GFcZa8iIAIiIAIiIAIiIAIiIAIiIAIi0GIFpBjQYlMjgYmACIiACIiACIiACIiACIiACIjAhxGQYsCHcZW9ioAIiIAIiIAIiIAIiIAIiIAIiECLFZBiQItNjQQmAiIgAiIgAiIgAiIgAiIgAiIgAh9GQIoBH8ZV9ioCIiACIiACIiACIiACIiACIiACLVZAigEtNjUSmAiIgAiIgAiIgAiIgAiIgAiIgAh8GAEpBnwYV9mrCIiACLxHARf656EEbpjJ6MHDmbrch5CnxRhc7kO4sFQVkZNTSLnWik1bTnlVNXq78h6PL7t6OwEFQ+5Dzmyby9jBw5i0xItLT/LRu/PlsqJV5ZFXUIrGYkdXWUFllQ6bpOvtiD/brRVMRfFc9VrE+CFDGD9vO6cfZFHtdPcdG/qyAvLyiqg02zFUV1FRrsEqfeez7Q3SMBEQARH40AJSDPjQwrJ/ERABEfiTAvaShwSsHEXHr/7BP/7v//DPf/7ACI/TRBfaUKxaSpOv4LVhD77+wdy4dprL4UnkV9dUCv7kkeXt7yLgUD8heOMEunz9v/xvTb6+Z8iyY4TnWrDpy8gI8WHHHm+OnA7hZvBFQuOzqJJ0vQv1Z/ceZ0US1/fMpOe3/6zpO//7v9/Rb85ebmSYsRsryb4fwL7dO/E6eYs75y5wOyqNcuk7n10/kAaJgAiIwF8lIMWAv0pajiMCIiAC7yKgmEg9v5npfVrz7b/+yT//3//D//1//pce830JzTKjKC4cpudc2bGatVv9CcsoRWcyUZ7xgHP7N+Kx6TDXY/PROt7l4PKetxZQzGRe38P8AW349t//4l//nztf/6DrdC9upJlwuRyYs29xcMMaNvrcIk2lw2Sx4b7wi2Km8EkUj2ISKbK99ZHlDZ+6gGIhL+wYK4e24dv//Id/1/Sdv9Nh7EaCnxlwuZxYCiI4uWMNq3ZcIrFYi9Fsre07n3rbJX4REAEREIGPIiDFgI/CLgcVAREQgTcUsD/n6tbFLF/vy8Wb1zm+04M1Gw5yNSYXTf3YclclD4+sYcOuUzzMt+PSZpEQHUPMkyfcPrIOjx0nCcvQI6OJ39D8z2zmyOXO/lWsWLWP4JBbnPZaz5p1+zn/8DmV1tpLuC5NDKc8N7L9yF1yGk76FUyFcZz13MCugBDSzH8mCHnvJyngKOLh8c2sWLCFwJD7XDy4mTVrd3Pqfipllrq+o0vkqvcWNuy4TLr1k2ylBC0CIiACItCCBKQY0IKSIaGIgAiIwMsCrtJQfOZNZP7Wi8SpTJh01WiqDZhtzrqTexe63EQe+q9h6VZ/rj4qxKApo0yrx2C1o485zv6DQdx4qkFGE7+s+/6/d5U9JGDFNGavPUFUkRmT3p0vPWabozZfip6C1EecWr+C7QfPEJFvxGKx4jIUkJrwgIBNuzl26jqpUgx4/8lp4Xt0VcVzwXMO0xce4F6+GbNBi6Zah8nqqP3sKkZUWbGc3bqObbuPcCfXhFGjRvU8jbTMLLJzMkhNyqREL2W/Fp5qCU8EREAEWoyAFANaTCokEBEQARF4VcCecpaN4/oyxiOYmJKaweQNGzlVT7h+0he/8w9JS7iGz/pVrN15gWeF1dgVBQUbeffPceFGBCmVMk+gAe4DPrFnXGX3tAGMWOJHREHTfDkpe3aHs8eOEByWyNNbfuxYs5ytQU/Ir9KQl5zA02fx3An04/jpG1IM+IA5aqm7duSGcmzRUAbN2ENIdtPPq4vK9Idc9vPh5O04noaexWfdIjz8HpBZmEHEuWPs3erLpVP78Tnkx9VkqSS11BxLXCIgAiLQ0gSkGNDSMiLxiIAIiECDgIvqqCMsGdSWfosDeZDb9AQBFIeeCrWa8iojNpuRSlUJKrUGs909BkDBqkrkUWQ0ibkabDIsoEH1wz1R0MWeZN3I9vSa7cPt5/YXDuUwVFFeqqZCb8NmqkKtKqG00khlaiQ3go9z4sIlDq9bjcf6fdxIV6NvWkt4YU/yzecnoGBIusKeyR3pOmELF5Mb5o/UNNVpqqaytJQy9x1DzFoqSospKTdiKYvjytFdbD1wndBrwRz3DSTspaLh52clLRIBERABEXhfAlIMeF+Ssh8REAEReO8CTopu72F2z+/oMuMgd9JNL837V1DcIwDqRgXXPq/9xl6ZTWL8U9LyqzBpteiNJuqmHb/3KGWH9QJOVOGHWdr/ezpO3MGVRMOL+arJVX2+6nOnYCxKJfrOZS5cOI3X8sUsW7GDi4lFVL9Y+6k/iHz9LAWclMecZuPwH/l1+GpOxujeqO/ok0MIOnyAwLBEoq6dJdD/Blnmuikpn6WTNEoEREAEROB9Ckgx4H1qyr5EQARE4L0KOMi5uo1pXb7il/E7ufas6o3m/bs0KVzdv5pZY8cyZco0Zi7dy+XHORhkKvF7zc6rO3OQf9ub+T2/ps2I9ZyJqXizfDlsWExGDPoSIgMP4xtwkadaBy7J16vEn+1PnJREnmDNwG/5ccBijoaX/nHfcd+54u5FTvgF8aggmVunvNm2+wrpZS8XDT9bNGmYCIiACIjAnxSQYsCfBJS3i4AIiMCHE3CQfn4jEzv8k1ZDN3A+pvTNbiPmtKCvKkNVXExJSQklpZXozbY/Prn4cA35QvbsIOv6HmZ1/Rff9V9OYETRm+WrQceFRa9DpzdSf6OIhpfkyWcu4CQ/zI+lvf/NNz1ms+9mHn88MMSFzWhAp9NjcZjRaSooK9djc8icoM+8s0jzREAEROC9CUgx4L1Ryo5EQAQ+ZwGnzYLJoEOna3wYzHY+7N/ddpJPezDu17/x7z7LORWZ/wYnCO83C05dEcmRNwg+foZb0Rmoja9OZHfqiiksKqVC9zYnIU40JUWUqCswvc3bXmqeYq4kPymKW+dPcMz/Eg+SijB9tCvqdtIv72Bax7/zr+7zOHIn+6/Pl0FF2qNbnD1xhhuRKZQYmsmXoZSSohLU1a++9hJv47dObc0aByq18U8VlRSLhqLUaG5fPIWf/3nux+dj/Gj5amzex3/mIOfuURZ2/wf/7DiJ7ZfSeHHFiT+OsOmUoT/e+u23cJrKyY69y/mTp7kamkCB7tX+4zJVoC4uoLjq1ddef0QX+ooyigtL0P+Z3wVWHerMWO5dPk2AfzAhUVl/an+vj1deEQEREIHPR0CKAZ9PLqUlIiACH0xAwVCSSsTxDcwe3otOv3Zn1DxPgh5kU/FBzzztJAWtYWy7/+Efnedx7F4W1r/wxMlRFs+FA3s5FHSdswc9mDVzFQFhWU1OthUMedHcCD7P7Se5VL/V2YuCrTKTRzevcudxJuWWd2uYYq2m+Ol5dswZTM8Razj5oOCtT6LeX7exk3ZxG1M7/A9/7zANr2upvGOz3ikkR0US1333cyDgEueObWXBjMUcvJne5GRbwVQYz53z57j+8DlVL65R9wfHtKPJecK9azd5mFKK+d3ShWLTUZp8lf2LR9BryGIO38nhrcL4gyg/3ZcdZN85zIJuf+PvbcewISihyefs47fKqckkLMiHvT6nOR/kw4oZs9h27tn/z957eEWVdPv7f8r3d+/ceW9+7xsmO9EcB3POioo5jhkjIlEUJIkkFQFJCig5KEGS5JxzztC5z/NbjYINAop5tFyrF6f71Dm169l12q5P7drF810MJWSN+Ty860NAVB4tE/yiUnfXkB0XSkhkBrWvqQ5Jql5aiyK4fmodcw2MsAgsRPHh0QkLBAFBQBD4qAkIMeCjdo8wThAQBD4WAlq1gu4nNzBe+Qv/++1qTH2SqO1TonmDmayXt01F9q0TrPnxC778xQiHMP2B3cuvfqMSmmaSPc5y/NRVQrIaqU6/j69vMMml7c9muyX6KmK4YWePZ0gGDX2qCc8YSxolfXUpBLp54BeZQ8vrxMZLWhQVD7iyYz6LjK7woKBveOK1N4Iw0YtV5AdcxPDXL/jyx41YBmTrDZYmeq8Jlte0kuFrwakTVvil1VObFUWAbyDxBa1D/uqvTuSO01Wu+z+mtlc54ZwEWlU/jRmheHl4EZrR+HpLGSQtyuoYXA4uYcFGUwJGJlmcYLM/neJqSiMc2Tv93/jyu+Wc9Eih6zUFl7fORNtB/gMHzh8+g/vDGuoLkwjx9SYss/GZ8CYhb3hCqLsdV2/EU9mtQDNR27Vq5C15xPp54u6bSO0ExYSBNktaVA2PuXN2NQbLjuAxMgnjWwcjbigICAKCwJ+fgBAD/vw+FC0QBASB90JAS2u8PfsMvuGHVRcJymh5DyHgSjJvHmf1pC/4cpIhV0Jz6Z7oj+zXZCMvDsZq13aOOoRR2KtFo5TR1ydD8WxdhNSZS9Cl85g53ONJs2LCQsCQWRoZtXFuWJrZcSe55jVmnLW0JrpybNlCtpgFktX55uqMojqNB8EPeJjXOEEfq8jzN2XzL1/w5XdrMb2TQed78peiPBLHQzs5aBVITrfOX3L6dTtIDGwzCVJ3IWGOFzG18SGlUT7BXAZD3kIrryfZ+zKWlzyIr3idRHVa2tO8Ob92IetP3SK1/S34qy6b6NAQIp/UfcCokKeMlPU5xN0PITyjdoK2qCgJd2DPtC/48uslHL2eSNubo3nuuDc4UtYm4XVmNzuNPUnt1KBVKZD19SFTagaEN6m3gkc3L3H+givxtf0TFwIGbdMqackM5rqFKfb3i/UiWgYLvOyvRFfefWy3LmTZXgcetnwkAF9mtjgvCAgCgsAHJCDEgA8IX1QtCAgCfyICUjdpbn+w/IevWHbKh9T6CcXEv2ZDlWTe0IkB/8qX323A+l42He/j960ko8DfhK1rd3L5Xg5dI+uU+ikJtubgXmOuRxbTN/L8BFur6Ujl1rn9HL3oS1rDBLlK3Ty5dYr189dy/nYKLRNZqjyqnRK9T3wwO3uRa2FFEwwzVpHr90wM+Hol571T38+ATpJTEmrDnvVbueCd9mIfkWSURzpyYt8RbO/l0ftG/tLQlenPpcMHOeueQO1EY/ylXnIDzNm6cDnHrj+k8eVZ8kb10vMPJfpy72F34Tw2gbnIn5/4AEcSfXkhOJiexcove4K2qCgJc2D31C/48h8L+cM5nuY38tNbar6koCrOjWOb1nHE+RGtI22SFNQmeXHhwF5MvDIYJY3AhAzR9hYSec2YAwdtiayYYJC/7nsp0on9ixew0zqM2jfuWxMyXRQWBAQBQeBPSUCIAX9KtwmjBQFB4L0T6M/G+8Qqfvv77xy/mUD1RAdBr2Wwnhjw9RrMA5+828Gl1EFJYjA3bc+xZ8VMfv55Hhv3nOayZxhPKjqGZpOljjRunVzPhn32hBeOEpav7aOxMJlgF0vOHDuHc3AaNX1aZPWZPLhugbnTXdIre54T0Xbw2PU4m9cfwDlSPyfB8yLPjyTkzYUkBt/CzckRDx9nzm9fwcLlx7jxqBZFXyPFycG4WZ3h5BlH7qVU0auV0ZD5AHcLM5wCHlMxbniFRE/aTc6dOIt9aMHriwH/XMbpm8k0v7E48bzlLxxJXVSkPeC23XkOrJ3Hrz/OZPUOY2zcQ0gpaX3ur65sAky3smG7BUGjhOVr+5spTb2Px6WzGJ+2wz+xnB6tnKbcKG5ameHg84hSvZh1bWcGPibb2LjDmpCC3pcsy5BQtJWS8uD2gL/cbztjun89SxbvwzGyCkV/C+VpD/C0OYfxaVvuPCobqLs5L5pb1mbYe8dTPG54hURvdgDWp09icWeiA/AXiL7hBxJ9OUFcOXMCU+9MZBO623Mx4N/+ZsAB+xjq32XfeZltUg+1uTH42l/gsOFipk6azBLD41xyDSAut+l53+opItJ+Hxs2nODGKGH5kryd6sxIvGwvcOqUNTcji+jSKmgrSeDOFTMuu4eT364XPqPtIf++HfvXbOCsV+ZLIqEklJ3VZEX54ubogLuXM5Ynt7P8982YBZUgl7dTkxXJbTtTThtb4RlROFB3e2kS/rZm2Lg9IO8tRKa8DKU4LwgIAoLAx0xAiAEfs3eEbYKAIPDREFAW+WOybjL/nLEPt9iy0cPZtV3khuoGO1vYamg47svohBPhmXUvSQioJwZ8tQbzgCcvzsy9VUJKelrrKYtx5tiq6fxuaMatqDxKqxrp7BtUPyR60m9yarUB68/6kNo02ohFQX9PJ81P7mBhtIClu68SEvuIiDuu2Jw/h/X1YNIrevUsV1F5/xK7FizhgGMkpWNm3VPRXhCGm7UVTj4PKaprJCPQkp1zfmDRfmdiyhVIin56O5vJ9Ldk94JF7Llyl9hHEQS4WnPhnBWuQY+peJ71TM99Ox1HAAAgAElEQVSGwcO3JAb846kYMCqeware+K+S3vYGKuLdOb9xNnPWnuJ6eB6llQ209yqfDdJ1g2V/zDctYNURVx7VjzJdqpTR29VM9j1bDi5eiJG5L9FJsdx1tcb0vBUufo8o1RdQ1DXEOh1i+QIjLt0rGP1ZGGibms6SWLyuWGJ3I5r82kay79tzYP4k5m+/RGiRHEm3/KSrmZwQe/5YsoBtpt5EJcdxz80aUxMrrvnGU/JCaIo+uE9QDPg/Aw5c/cBiACr6u5qoTPTBZpcBM5fsxzY4l5KKOlq75EN9q784Aqfdi1i84xLhVaP0LZWc/q4W8iPdMV5pwPoTroSnJvPA3ZqLptY43Iggf9jSHg2NKT6c132/nPQia8xkgmp6qh4T6GiBtfN9cmoaKYi9gfGyn5iz9jS+OTIk3XKGrhYKom9wZpUBa4+6EJaWQpiu7ovWOHiEkfdeQq30+6s4FgQEAUHg4yIgxICPyx/CGkFAEPgoCaipDDZn2/S/M9XIgYjCnjHWyMuoz0sgzO8WXrfGf92+l0BBbedL1qTriQFfr8UiKPPdRgYgIWnVNMc5cnCRAYbmgWS2qdBotEhDk3dqyoIt2DZrJttsQsgbdWAtodvmTCvLJ/DCJgwM1rLr5DXC4rOoaW2no7tvaC37U3dLtCe6cnTJDFac9CK5YTSBQU1Lph9WB/dwyj6EnBYZaq2a4rsWbJszl122DyjS7ewg6erWIisIwsJwHgtXG3HK+T7xT6ppbeugu1fOs2X0Y/S0tyQG/HM5Z249fseh3k/91ZrkyamVBqw7fYuUlkF/DTpMQ2WkPft+n8EGE1+e6M3wDwF4xkxeHIrtjvnMX2bISacQYtIqaW3XMZMNZyZ1kn77LOtm6MLZY6kdzV2oacsL4erRPRyz9iOjsX/AX+XhDuwzmIXhxQByep/7S14ahv2uBRgs2cQJx2CiUp/W3dUrQzkyNH3IcN3BxMQA3e4TjZVllNe0IRt2Xwl5ZwMVpWXUtsqGP9+SnM6GCsrLamgbdxuFtxMZ8OXf5nPQIYaGYfYNa/R7eSNJGjoyg7DaZMCyfQ7ENT7tW9qhLwMNdUleGC+ZyvJDLiR1DPY5PfOe9S1FZTyeh5cw9/fVHHMI5H5iOS3tHXR194/wr0RP7n1st8xm3hYLQstHERjQ0Fkai/uZPRw450lSXR8qrYbaxNucXDKNVUc9SNOJV4N1Vydw6+hS5sxbxVH7QEITysaoW89ucSgICAKCwGdCQIgBn4mjRTMFAUHgDQho6omy2cHv//gRQ+sQcsecTZLQ6Gaj+vvo63vJq1+B6qVbEeiJAd+sxfLuuxYDdIzk5PqcZePvKzl9K4kXJ5KVZN8+zbpfp7PbIZzi8RZpSz1k3DRm7dTpbDUPJKt17ESDfWk3MV7xG7/vcyFmlAGAqj4Bj+PrWLXdgoCMlqcDCKmDFPdjrFlsiHVQNt16gyep9wnep9cyc9omzP2e0KLQO6nXFbQdGQRa72PlzBnMnv70NfOnb/nqb//g2+9/Zeazz2ZPnzUwsxhV0K139chDvZwBXy3nrNdj3n0OMwVF96zYvmAph51jqHkh5YKSgiALtk6dwhaLIHLHiV2X+rIJvLiJ2ZPXcs4rhSb56Mygj2x/UzZNmcEOmxCKX6gTVI2p+J7fzCrDs9xKbmIAv9TFE18TNi9exzmvNPQnhKW+XO5aGDJ78mpO33hM4xh1aztzeeBwmNX6/vr5e77529/5+rtfhvlr9UFbQrI69ZykpDLOg1OrprFs7xVC8/WWOCgqiHc/ydppizlwOYR8nVAx8E9CXh6Hx/FVzFy0D7uQgqEt/7RdeUQ4HWWNvi2/fM+3f/8bX3873JZV+224m6lvi55ZA4fPlwl8+ff5HHSM/eBiACipiLnOH0vms90imPLB4KAh01WURbtwYNYvrDruSXrf0IkXD/qLiXTYxe8/Lx7Ih1AnG1VBGrhOVhSJ046ZTF19Gp+cFypF3ZrL/cs7WbXmEI4xdQx0FamHwjA7di1ewkHHR8NFU1kJMdf28vvPCzngEMd4db9ouPhEEBAEBIFPm4AQAz5t/4rWCQKCwFsgoG19iOPe+fzw7WrMAjPeQpK6VzVKTwz4dj1Wd7N450tcNdWEX9rBosX7cIkqHSUJmooc7zOs/206u+zDKBozpF/XRg0N0VfZv3A+O66Ekj9myC/IMm5xetUvzN7lSGTxiAGA1Emmzzk2zl7EQYcISvufDlKl7id4n1rH0vXnuZP6fI38AF1NI3EOB1g6fys293LHzkyubKe2MIPE2FhiY3SvGB5cO8W2TVs5ZO5JxMBnT88lZ5fR1D3KyHfIncrnCQS/WsG52ynveFmHDnEdcc6HWL7QiMvBBaOsU1dReNeKbToxwDyQXF30xFj/NE0kuh1jpcEGLt7JHGdbRDm5gRYYTp7KVsu75I/M8yZ1kXfPCqN589lpdY+CZxkLpZ5c7ppvYfmqY3gkNA+tOx8wR7eVpedJVhusx8T7ydh1KztpKMkkSc9fYW4X2Gu4mV3n3Yb5KymzmPpOfX+paUz15aLhQtYfdSFWP0Gdup5Unwtsm7+WE9diqNRrk7o+BV8TQxatOcb1mEqGeqeqi8bSrOG2uJtyYMsmtp9xHW7LkyLqOoauHMUD+mLAAg45xtE4lhYzytXv5CNNM6k+51g3fy1nvDKHRJDndakpj77Owdm/sOqYB+njPN9o23jif5FN85Zx9HryuNsmKkqicNk9g8krjnMrY4TaKPVSGnONgwvmssHYi6xnGQulvhJinPaxctFObCPqh0dcadvJumvB5nlL+ONa0rh1P2+bOBIEBAFB4PMgIMSAz8PPopWCgCDw2gQkulJcObz4B75ZdBrfx/Vjbxmm7SDD15JDa+az4HeDcV+LjcwJfFzFuGNpFDzxPMaq97ibgNSZjPvh5SzZbMW9rM7h4dIDDNWUBJmxZcZMtl++rzeDOgpgRQ0Jnsas/WUSK455klAzWsiv7jqJ7hQPTiyfzIKDbsRXDi+nbU3G8+hypv2+j2tRZc+YaWge2FJwJqtO3CBpxO4OitpEvE6tYcoPSznmGs/YVWvRqtWolEqUAy8F7ckenDl2Gtu7OfQMfa5EpdagHWcsrZtJzfW7wKafddvDreTc7dR3LgZI3Rn4nF3H0rVn8U5tH9VfFWF27JkznY0X/MgcdVnHU98p61PwM9nA9EmLOGAfSaX+OFrfvVIvWXcusHHqbHbbPqB0uLvQRVvcOb+OGbONsAkufJZTQENrujfn18xi2QFHYkaEMCgb0ggw3cSMSQvYZxtOxZh1S2g1w/3VkeGLpfFxLnqn0z2uvyQ08l46mhpoau1GptJzpqRG3ttOc30jbV0y1MNOyeltb6KhsZVumVovYeJIW5R0PvHn0qljnL+V9hJb9IHqjlUUh9k/303AKZ6mDywGSL35PLhixNIlB3CObxmlb2moTbjJiYVTWPGHK8njJHpUNefwwMaIOZPmsfVCICVj6iIS/YVhOGyfwYx15/DPH94RpK58wq8YMXvaGs7czn4mUGjpyAvlytY5zDc0I2RECIOqJY9w2x3M/XEum8/5M1JrHOkJ8V4QEAQEgc+JgBADPidvi7YKAoLAxAlI/WTfPMHqn/6OwR83SNCfMnzhbgpay3N4HBNBRFj4uK/IR9lUNfcOnx0d5X4DYsAP729rQWVhAKabFrHO+DbJL64RALR0JrlyZOlc1pv4kf7CXmPPGiH1U5n4gNC7Dpzbs4qlG0zwT2tBrVCgUCqHDbZ092yKdeTgwnkYmgeRrR8/DigLAjDbOI2p6y24m9k1MBhTtWTiZ7KR6d8b8Me1GEp6FCiVqoEBiySr4nF4CPccznJwzWI26hIdtqhRKp6W0RvnvUBcJ0y8/m4CemLAN+9na0FV6X2u7FzKykMuxI0YYD9tnERX+m3OrZnHyuMeJI2R0VCS1ZAedZ8gexOObVzK2oGyapRKnb9UegNgXRdoIdnzBKvmrubsrdQXolVUJQ+w2zGLyStOczulY8An6rY8QqyMmPPDHHbZ3Keg+6n4ohvvSvJansTo6r7Aic1LWXPElYTGMep+wWMTyxnwwuVv9YM3yxkwTAxw/vBigKr6IZ5HVrFkmxWhIwbYg32rNy8U260GLNxlS1Td6KH/kqKRvIQw/K+ac2HncpYNlFWjUimRKwYTXQ46Qkt75l0sN8xlyV5HHo5YZ6OuesTNwwb8On8fTnGtA31L01lKjNMBFkyaxoYzvmR3qwae9YG+pWiiICkcfzsLLu5ewdLtNkTUPqtbPrLuQRvEX0FAEBAEPh8CQgz4fHwtWioICAKvQ0CWx51Ta5nyX7+wwy6cwnFmVnUDyaezlgoUukHveC+lCs3408yAgswbx1j9LDLg0r3sYeusX6c541+joT7Kjn1LVnDoWjRlIyJ0B6/VNMXjfGA5a/c7EVU6mFlcd1ZC1lpPTW0dZekxhN2PJrWknHiXI6xZZIjlrXtEJ6aSVdw0IiJCQZ6fCVuWb8L0TjqtI8YU6oJAzDdN4+flZ/BJaUbRXc7jCC8ubFnMbIPtXPIIJC7hEfEJOVTXlpEZG0ZY5GNKyh/ifmwNSzeacjMomqSUJxQ36ts72CL9v28uBmz+5Qu+/GYVJt4vDpT1a3rzYw1Nj1w5vnIZuy6FUDhGPgBNazI3T65lzXZrggtkzwf2koyOxlpqa8rIehRJWFgCheVJ3D67kaVrT+MaEEtKShr59XrX6IxWFHHfZidr1pzgRmLT8JBsQF36gKs7ZvHTwiO4PmxA0VtFRqw3F3es5Pd5Gznv5M+jpIfEPMqhqqaU7MQowh48pKA8GR+TTSxdfZJr/nGkpqSSVzei7hegfTpiQEmYw1BkwGHnh+84+eQLIEd8oKUtwx+zjUvYeNqb7DGWl2g6srhnsYVV64y5nam3zaikoLu1jpqqUvJS4wkLjianPJ2QKztYtngfV3wf8SQliczq/uf9ccACJZXx7hxds5Q9tlEv5CzRVD/C64gBk2ZvxyasBkV/PXmJ/lju38TiOcv545Ifj1PiiIjLprKylPz0h4TdjSS77An37XaxbNEebHwSyExJJKNKz94RrRdvBQFBQBD4XAgIMeBz8bRopyAgCEyQgISsIZco58OsmvwP/vNf/o9ZG41xiy2h+dka6Ane8DWKq8jSRSX8+AVf/rAJm5Ccd7veVeolw/MEG1bt4ur9AsZsprqDJ7dOs33naTwfVj9fQ61t5fGNs2z7fTZr/rDnflYD/SoVzY9cOLJsMr/M28GlwHRqe/VDrXVT/2WEWu1j5wFdUreuF6IlpL4iQq2MmPf9D8yYv5bdJy5zOyIUD5OtzPvuZ5buseFumC+OxptZOGslh22DyazvQ6VqIcn1KKt++xEDI0sCU2vo1Y//HtUjbyIGqMjzv8jmX7/gy2/XcME3ndESrI9a7et8KPWR42fKtlVbMPfLGpZAcdjtNF3k+puzb8dRHMPLGFwOr23PIMBiB4tmLuegdQDptb2oVK2k3jrN+smTmGdoim9SJT364fS6gPbKKJyP7GSv6R2y2kesEdBJQv1lRDvsZ+EP3zF13mp2HrPCMzSEW5f2svD7SSwwMscv1A+3C9tZNGMZ+y39SK3R1d1G+u1zbJzyA3M2m+CdWEH3+Fs/DAhQvdkBWJ8+icWd7FFyXAwj8Y7fvElkgJrScEf2TP83vvxqMUdcEt75EpNxYUgyisMd2L96NcfdHo8tQmp6KAl35Nj23ZgFFA7x14XzRzgeYOn0Rew0uUViRQ9KdQe5wZcwmvYdM9acxD22hM6R/lXX8/i2CXsNj+GW3PqC0ISshse3jFkx6Rt+mbWc7Ycv4hJwjzsuJ1nx/bfMWn+Gm3eD8Lmyj6XTF7Lj3A0elXejVHeSF3qZHdO+Y/qq47hGF79Y97hAxElBQBAQBD5NAkIM+DT9KlolCAgCb4GAVqVbK9xIbZVuW7EKauqb6ehVoH5va3lV5HgZs/anf+MvP2/j6oN8hpKcv4X2vXALVSnBZjvYsvsSITmj5QsYvEKLoj4R97PGWHlEUzqUOExNT10BaUlJZJY00KN4OsWvkTVTmplCSkYRdV3yEUsEdFuJ+WNtfJrLfuk0KUYJ4pdU9LdWkpsUR+zDVPLKG+nq66WlPIvkR0lkljbR09dBXWEaSUkZFNd387RqLbKWMrJTHpNRWEenfIQIMdicEX+18m7aWlvp7J1oGLGKgkBztvz2JX+ZtAGLgCx0O5y9s3/qSiKvHmCbkQl30kfLFzBYs4SyKRUf8zNcdAh5Ht2i6aWx5AnJiWkU1nah0OiM1SJvrSA39THp+TV0DFsjrxt791IUao+JsSk3E59lch+sZvCvpEbWXkPB43hi4x6TU9pAZ28vbVV5pDxKIL2oge5eXSJAXd2pA1tsygfrbqskL+0xaXnVL9Y9eP8Rf7WKHtpbW2nvUYyYZR5R8D281Sp66Whtpa37ZREoI41RUxbpzL4Zf+HLb5dxwv0x4yzBH3nx23+vqSPp5ml2bDjM9YSWFwS65xVKqFpzCLE34ay5N1mDW1dq+mmtzCHlUTK5Ve3IBkQ4LYrOWgozknmcXUFb/4jlJ0j0V8Rzy/w4xvbRVA/f+/FplbrcDl31FKc/IjYmkcziOtp7eumoLybjUTyP82vp7OmipTL3ad2V+nXXUaSrO6uC1hfqft4icSQICAKCwOdEQIgBn5O3RVsFAUHgT0ZATZ7Padb/8hf+MnkXzhHFz5KxvZtmaOqjsT9yiDNOkZSMGRbwrG6tnOYngThfdefe4+qhTOOSVoNKpUI9MLgbtFNCo1aj1iXgG/xo8DYduYQ4WnPFLYy8cbYeRNKiUenWmauHlldIukRyuroGllvolmioUKnUDK9ag1pX90u3cRxh2Gu9VVN0zxKjqX/hLz8bculu7hCX17rdSy7SNCXgfvYIJy7dI79nJNkRF2sVtObcx93hOnfiy5/trvBsWYuOmf7l0nNmw7UMLV2FEXhctsY5KJMm+Yj1HPpV6vylVg3kcRhcDjPkrwEHjVW3dshfw+vWv/mneKyhItqFg7P/nb/8sIrTN9PfrfD3EoTaNt2Wm8c5eMaLJ136nWOUC7VKOopj8XF2xONB0dBOEDp/q1UjlkMN9Iunz+NI/0o9FST5OmB56TaP6/rHFSC0z/rW02cfnn7vKFE9e/jHq1ul1n5w0WgUiuIjQUAQEAQ+CAEhBnwQ7KJSQUAQEARehYCaIr/zbPztP/j36ftwiykbeyeDV7ndC2W0tGXe5dqVawQmF5EXbo+JiR1+KQ2MNkE/8nKtsoe6zCiC7oSQWNA44YGvpquUhMCb3PSLJb+xj5ERwyPr+/jfqykNvcSO6f/Jv082wi608PkSirdivC5rejiedg54x+WRH+uO5QUrbjysHZGDYfTKtKo+GnPjCfa7S1x2HX0vGeMNv4uGnsoUQm974huRRV2P8gVhZ3h58W5iBDRUx7lxeN5/8Zef1nLeO2toOcfE7vO6pSW6Sx7h52iLa2gmhSl+2F84j0N4xSsJkJK6n5biZML8AwhPreJl2tRIK7V9dWRH3cHDM5i0qm6UE+qbI+8m3gsCgoAgIAi8KgEhBrwqKVFOEBAEBIH3TkBNWZApW6b+N/877yhej6rHmS17HeO0dGR4cW7PLg4ePYWZtRN3YvJplo0z4zuiGq2yn87aAgpLa2jqnsgveA1tVcUUFhZT3yX/BIQAHRg1lWG27Jn1P/zPrH1ciyx/cc3zCH4Te6ulKycQq0M72X3QGAsbB7zCsmjon4C/VDK664soKqmkfsSuDePaoumgtqyQgoJaOmVPd20Yt7w4OUECGuoeenB8/l/57ymGWAYVveW+8zJzJHqKI3E+uZOtu05gdeUqbkEp1PS92tIa3d21agU9jWWUFpdQ3f7qfVK3NKW7qZri3Fyq2vtQTuTSlzVLnBcEBAFBQBAYl4AQA8bFI04KAoKAIPAhCaipCrXAaMZf+W7lBQLTx1u7+3p2auTdtFSXUVpaTk1jO72vMdCTNCqUA0sDJmKDNBD2rwvrf+mmChO57Qctq6E2yp4D8/7GN4tPcDOx4S2LN6BbH99aU05paRnVDW309E98hl7S6pZT6JZyTACWpFv+oUSlEiHWE6A2gaIaGpNucXrxP/jn3D3YR9a89b7zMmO0yj466ispKymlqr6Vrj7FxG3QPt22bzBc/2V1Dp5/uqREOXyJz+BJ8VcQEAQEAUHgnREQYsA7QytuLAgIAoLAmxLQUBtuw87Z/2DqVjvC83vfzVpXSYtWq/2EBuVvyv11r9fSEOfMH/O/4rf1ZgRmdgt/vS7Kz+46LS0p3pxb/g0/LT2GZ3Lnu+k7L+M69F0wckX/yy4U5wUBQUAQEAT+jASEGPBn9JqwWRAQBD4TAloaIq+we+4kFh29RWL1i9u4fSYg/iTN1NL80IWji37EYK8T0eWqP4ndwswPT0BLW5ovF1b9zKzN5twrEn3nw/tEWCAICAKCwKdPQIgBn76PRQsFAUHgT0tAS12EDbvmzWWbTSh573Sfuj8tpI/IcC2N8dc4vGgOG0zukDGRNfkfUSuEKR+CgJbWVB/Or5rNisPXSWybSP6ND2GvqFMQEAQEAUHgUyAgxIBPwYuiDYKAIPCJElBTHHABwwWrOXM7ifpngQFaeQf15RXUtXShUHbR2tJOV8/IPbs/USQfdbM0lN+/wu4lyzniEkP1wOSubm/1Biorq2nskKPqaaOtrZ1upQjD/qhd+d6N01AT78GxlYvYaRVCmVJngBZlTzM1lZXUtclQ9XXS3tpK56ts9fHe7RcVCgKCgCAgCPwZCQgx4M/oNWGzICAIfKIEtHSXxuFleZyT5reIz88lzHY3y1ccxDWmDLmu1ZKCrsY87tmYcOWaO36hwfgExpNX1Sm2envvvUKipyIJv8vGnDR1IzIrh+jrR1m7fCe2oYUDW7JJyh6aCx7gZGaDg5M3Yfd9CYrJoKxdzPy+d3d9VBVK9NU+IdTxLCfPOnAvNYcE3wsYLt3IeZ8s+iSQVH20lcbgedmGSza3iIjwIzA8iYIW0Xc+KlcKYwQBQUAQ+BMTEGLAn9h5wnRBQBD4xAhoaomxP8DSb/+Hv/7vN0ybPZvpP05iwV4noor7niUUk9Co+ygOtOTkCTM8Ygpp6OxD1lVN7uN4ou7dJSIpl6o2xYdJQPaJuWTc5mgaSHA7waof/pe//s9XTJkxixm//ISBkTXBeT1P+Usa1P1lPLh6DuOzDoTn1dHZK0PWUUnqPWfMz1zAMSCZ8g6RD2Jc1p/aSW0zGf4X2fTT//LX//4nv06dyawpvzJ33WluZ3Q96ztaNLIq4jwsMT5qSVBWLZ09/Sgm1FW09NZlE+luwbmzV/CJKaBVpCP41HqTaI8gIAgIAq9NQIgBr41OXCgICAKCwFsmoG3j8fVDLPv2S774f//Cl//6Bf/x9VJO30qiVqYfVq6l7aETp05ZcftRNSoUlIff4Oad+zwuzSfM3QP/iAwaxI/+t+ygEbeTOsjwOsXaSX957q+vFnD4WiyV/Xr+0naQcsOU8xedidLFf0s9VOakkZKYTEqEG2ZnzHG9n4dICTGC76f8VuoiL9iKbb/+O1/8v/+Pf/uXf+U//j6HndahlPTpzfxru8gOssX0tCV3CxUTJiL11VGSlczDRynEeV/mookNd9I/0E4FE7ZeXCAICAKCgCDwrgkIMeBdExb3FwQEAUHglQlokNWnEWC1j7XzZrNo1W4sbsZR1Nw/bP9tqbuCnIdunDpohvvdZGq78gmwtMHlzkMqFDJyfC5h5xpIav1ENpJ/ZSNFwSECWmSNWYTYHWaDwWwWLN/BBdcIcht6UQ9pARK91QUk3zjL8YuO+MRX0yfroLWzk65uOcqeDO44OeLum4LIGTcE9jM40KJoLSD6ujFbFsxm/pItnHYIJqOmB5Ve3+lrKCXd24yzpta4hFfSL+ulq7ORssJCSsvKqCzKJ7+knt6ha4ajk+R99HR20ClT0pd/n9tOV3GNaxJLioZjEu8EAUFAEPhsCQgx4LN1vWi4ICAIfJQEtEr6OltoqK2lrqGFzl4F6sGJQk0j6SFeuLsGkJj/hGC7s5w8aU3AQ3+unLXkml8KjRo15fesMb/sSVRR/0fZxE/KKK2K/q5WGutqqatvpqNHPuQvTXMWUb5uXPeJIyfjAa5mxhw39Sa9qh2VVkKSQFWTQHDAXSLSm5lQ9PcnBfEzbYxWhbynnaaBvtNEe7dsoF/oaGjbCkkMcsf5ZgQZGdH42p3mD2N3Ektb6CmPJ8D1MpbXA/G2u4qL+z3yZGMwlCQkrRYJNY1Pogn2CyCxXoQMjUFLfCwICAKCwGdHQIgBn53LRYMFAUHgT0tAUtHd3EBDYyu9SgW9LbXU1jbQ1pqEh6kV1/weU69RU3rXCgvbW8SUTDys+E/L5iM0XFL30NrUQENzNwpFH20NtdTWtdKn1AysCZcUzRSkJpKUVkKrXERxfIQu/HAmqfvoaGmgvrETuaKfzuZ6amqa6FWoaMsI4rqlKQ7B0QTfdsfVI5qiyjzi7lzj6hXboZe9x10e5Teh623KtjKykx+SlNeETHS1D+dXUbMgIAgIAh8ZASEGfGQOEeYIAoKAIDAeAUk306ebUtb9e3YsaRqIdryCq18MpbIe0m/a4nQrhCyRsX48lO/h3FNfDfprmO/UHVTlZ5KZU05LXxc9fX30ywZDQN6DaaKKj5zAGH1H6iH//i2u2bkTl51AiI8HHqEl9Mpk9Ha20tLS8vzV3kWvXI26u57SnAyeFDXS091Lf28PfUIQ+Mj9L8wTBAQBQeD9EBBiwPvhLGoRBAQBQeAdElDTlhmM1w0f7gb7c8P1NveTy+kWP/jfIfM3uLW2i+IIF87vXM+GTdvYYXQYG+94SnqEGPAGVD+LSyVZMe7cffAAACAASURBVNH+nrh7JVKd8wAvWzMu3y2kWT9hpR4JqaeSxNsW7Fu3BsMtRuw6dBH3Bzl0iq6mR0kcCgKCgCDw+RIQYsDn63vRckFAEPiECGiVfXS2NFBbU0NdY/vAjOAYOcU+oVb/WZuiQdHTRlNdDdU1NdTU1NPS0YtCDND+rA59f3ZrFfR1d9HZLUfd30l7cyPN3Xp5RUZaolHS19FMfXU1Nbq+VqfLTSBH6IQjQYn3goAgIAh8ngSEGPB5+l20WhAQBD5FApIWjUaDVitkgE/RvaJNgsBwAs+WEgz/ULwTBAQBQUAQEARemYAQA14ZlSgoCAgCgoAgIAgIAoKAICAICAKCgCAgCHwaBIQY8Gn4UbRCEBAEBAFBQBAQBAQBQUAQEAQEAUFAEHhlAkIMeGVUoqAgIAgIAoKAICAICAKCgCAgCAgCgoAg8GkQEGLAp+FH0QpBQBAQBAQBQUAQEAQEAUFAEBAEBAFB4JUJCDHglVGJgoKAICAICAKCgCAgCAgCgoAgIAgIAoLAp0FAiAGfhh9FKwQBQUAQEAQEAUFAEBAEBAFBQBAQBASBVyYgxIBXRiUKCgKCgCAgCAgCgoAgIAgIAoKAICAICAKfBgEhBnwafhStEAQEAUFAEBAEBAFBQBAQBAQBQUAQEARemYAQA14ZlSgoCAgCgoAgIAgIAoKAICAICAKCgCAgCHwaBIQY8Gn4UbRCEBAEBAFBQBAQBAQBQUAQEAQEAUFAEHhlAkIMeGVUoqAgIAgIAoKAICAICAKCgCAgCAgCgoAg8GkQEGLAp+FH0QpBQBAQBAQBQUAQEAQEAUFAEBAEBAFB4JUJCDHglVGJgoKAICAICAKCgCAgCAgCgoAgIAgIAoLAp0FAiAGfhh9FKwQBQUAQEAQEAUFAEBAEBAFBQBAQBASBVyYgxIBXRiUKCgKCgCAgCAgCgoAgIAgIAoKAICAICAKfBgEhBnwafhStEAQ+KQJSbzUlZbU0dSiRxmyZhKqngZKUcAJCEyis7UI7ZtkPeUKiuyyJYG8/Ip9U0al+O7ZI3eUk3/PBL/wJVa97U0lFb1Mp6ZGB3I/Ppar94ySoT0xS9dJclkF0YCjxWeW8fZMlVL1NlKaGExqRTH5t3zh9UN+ycY61MlorMom/54uPTxDRqSU0drVQXVVFbX3n034r9VKdHo6/bwiPS9pQjXO7z+GUVtZKRUkJZTUdYz7X6tYyMqLCiU4ro12joyKh6q6nKDkMf69b3Al5RG5NJ8qxv0T+5Cgl1P1tVGbG8CAsnoyKnjfrq1I/DXnx3PUOIC6vGeWfnM4L5ksaZB215D+6z/3wR+Q2DnSaF4p98A8kNf1tVWTFhBMRk0GVfOIWSbJmShOD8fEJJ6vhk/PkxIGIKwQBQWBMAkIMGBONOCEICAIfhoCW5nhnjI+ZczuhnN5Rf8hr6apIJ8z5BFsXTuW3NSbcTa3jo/xpJ/WR5XUGw1kz2XH5PrlvZfQq0Z/lg8mmOczaakNodtuYA6YxfSh1U50ZidspI5ZP+4U1p7xJrHpLSsWYlb7ZCamnhuwod85tX8asn1dg7BlP5Vs1WaK/Ppdol5NsXTCVedusCMp4DbZ6zZR6K0i4acI+w+0cPm+LyzV7rI7vwWjNAgw2nsItvAi5BFJ/PsHWO1kwbSMXfVJp+fh1Gb1Wvr1DrbyVssQ72Bxcz+INx7gWXsLoLlZRE+/BuX37sfB7QqdWTUdRFB4me9i8Zg1rFs1hxi+/MHfDaW48rKR/1O+Rt2f3+7+ThKylhMRbF9izdDpz1hnj8ah54t8DeoZL8jJirx9m6ZQVHLsWR8NH+YWqZ/BEDqV+Wsoe42d1mE1zf2ORkSkBeR+h5CbJaC1P5PaFXSyfOoeNx9x43D3RziuhqE7mjvEypi7azZXwmo/z/8aJ+E+UFQQEgXdGQIgB7wytuLEgIAi8FgFVBffNDZnz9a9suxxGwdMpvxdupVH00VkRjKXhbH40OI5v0sfwg0dDe2MTbZ3dejO7Wnoq04gICiE+p5buoZHNaGVfaOaYH2h7qkgPv0tIbDY1Xa/zo1aDsr+Lqgc27Pl9EgYH3YmrGDJuzHo/6AmNkv6uKiLs9rF40hz2O0dR/pZN1ipldGT7YbZ5JlNXnsM3tfUNBlhKKqOdObJyGTvNvHlc00ZHexstNblEOR9h7dJtXPBJexrdoO2jNiuWkLuRpJd3oJro7/8P6pi3VLmqi8ayXFJCXDi/ZTbfTtvCpbuFes+SXj2qGuJdjmJkeBLPxBYULTnEB93iRsAjcsuqqCxKIcB6L4t/msyGc96kvxURTq/+j+BQq5LTVfgAx30GTJ6/H6fYxjfoq4C2n6bCJMKC7pNY2PpaERXang7am5poVXwEgIaZoEWl6KUh2RfLDb8xZdVJvLNf53tz2E3fwRstakUX+Q+c2T/7RxbtcSCha/wvA21vFx2NDTTrRRBIslbKUyMIuhtLXrPizSJG3kErxS0FAUHg4yEgxICPxxfCEkFAEECiP88Xy90LmfQ/f+WHZecJSG8Y80ep1JOI854F/PqRiAHK+gQ8nG4QmlQ2bCZS0iiRy2QoVJqhH2W6sp5ONwhJLB1W9pU7gaRBKZMhU6jQjP9bcZxbSvSmuHNs6S9/DjFA1xKpl/QbJ1jz67sRA3RVaKojsd9twIxVbygGaOqJdz7Ekl8Xc8w1ntpB4ULSIG9Ox+fSJZy846l9FuKuUcmRyeQo1a/t0HF8/Sc4JWlQKeT0NSRy6/Qqfp06thigqonD9chWtp5wJ6lFQUtFAdmZ2dT2KJ8+D5KaznQfTNfNZuFeB6JrPqVp7ue+1DQkcfvkMqYveAtiABJateLpd5X6NUJT1G3kh/nged2fjJcMYJ+34H0eSciLI7m2cyZTP1oxQMdDQ22yL8YLfnq5GKDuoCQ2AE+H2zzu1PvekLSolXJk/XJUr+HK9+kVUZcgIAh8WAJCDPiw/EXtgoAgoE9A20GqmzmXHcw4ZLiY3/45lyPuCVT26f3I0Ssv9SThvGfhK4oBavo7m6itrqOlS8Zrj7c0Mjoba6iubaZLphoa3KvbcgmyMGL+koO4RBbxgslqGTKZYmCgpyt718KIBUsOcC288MWyem0c/1CNTCcyKNVDdujKS6p++mUKFGoNsq52Orp6UYw6FhoUA34dVQzQKnporauisqqBjj7FK806jla3XK9ujbyLlsZGWjr6UI6qYmhR9rbTWFtPc0f/iz9kpV4ybpwcQwzQouxppaGqgqr6dnpHb/QYSNXIOptpbGylvTgM+z1jiAEaBd2tjTQ2t9OrfC7ujHpTTR2xjgdY+M9/Mn+fPVFl3c/DdbUyKh7FEp+UQfWgSACo5XJkcsXwyABJjaxP51NdDg0tip4OWlvb6RkCq0XZ105TfSNt3fLndTwzSlLp+p4cuVozwLa5qYWOvlHycWjk9LTUU1PTSHufclR/axS9tNVXUVndOHCPF8cZEiqZzlYZKklFX0czDY0tdMv0GjkqrOcfSt1P8DNZx+QxxQAVNXHXOb5lC8ZuCTSrJdRKBQqFQq/tEj2Zfljs3MhOq2CKXnggn9f3/EiDoreN5oZG2noUL4pskgp5fz/9/aqBZ6yzpYHGli4m0DS0yn46W5poau1G/kL/l1D1d9BUXUFlrY6Z3oPz3MiBwaK8u5WmhmZayx7iNYYYoHsWdd9VlZW1tHTL9NgMu9kLbzSKp4KAfq4FSSdU6dquklDLumhtaBh4Poe8qumhKuEGZ1YtYfMxV5L1B6bPatAq++ho1jEbre26Qs/uXV9HQ0vPmN9ZasVTW1RaBX3dHbR39j9vm6RC1tVMbUUFtU0jfaMTA6Jw2fWqYoAWZX8HjTXV1Ld0o3ixs4NWSX9nCw0NzXTJX/J9oE9aq6K/o4maikrqWkfee4QY0KlC0dM28J3Y2q33PazppS7VF9P1S1i3z56H7SP/n9SgVMrp7x/5rGtR9XfS2lBPc6f8hf8LJZ0g1NdHn1JCLe+hvbGexra+0SN0tDrerTTUN9ElH/7/kH5zxbEgIAh8vASEGPDx+kZYJgh8dgQ09TE4mtniHZlBmu95Nk79minbrhJZ+CzJ2ggiryoGSP3VJPk4c9XOhdt+3rhYnOCo+S0SCutpKozD88wO1i7YyAWfJCr7ZDQVJuJvdYgtS+cPDCQyBqZ0JWQ1SdxxtsXexQt/bxesTx7B8mY8hU1N5D64ysH5P/LPf0xhxZaDnL/qR1JRDc3VmUR72XDSyJCTjuFk1+vK2nNwwU98pStr+LRsQnYeufE3uLBzDYs3nOP2ozL6ZE2UJPlz+Q9DVhoYYRWUNjC7rOzSrZ2/zZUTO9h2zJGwzCY0qOiqySLq1iWOb9vGKdvrXLtqyp6lc1i0y4bQgTIjADKGGKDppCTmJram5theu4HH5TMcPnAKx5BsGmQjf3Dq7qmiqzZ7oJ0njLZhfNmFa/Zm7F02l4U7rLmX3oiyu4q00Js4WltifuYPdmzcwv6Lt0nUhcQ/M0vVVkTsbQdsLtnhaGvKsV1GbN99lAsWNtjfeEB6WSuqMcQATWcp8bfsMDe7zDVPD+zOHubQSTvuPal/SeSFkpb8SG5eNsPC2pHrTrZcPruLVTN+ZLL+MgFNDzXpYdx2uITVxXMc2b6ZrbtNuPWwlPbBBryAV05FmC17Zv+d//7rjyzaaUlAWs2zpSK6gY9u0CxHpeweSNzmZ3eavVuPcsX/MfUaXRBEPXnRXlw6spX1u01w9vQl0OE0u9csYv6cRRidv0VcWgbxXlYcNVzBojlzWLz5HLceVdArqemuzSHW6xInjIw4cfEyjg7m7Fsxj5mTp2Gw6gBXAtOp69ONcCTkDZnc97TnylUP/Hx1/I5z0TmEjGdJ1iRZI7lhHticN8HqigN2psfYv2s/569HkNcoQ9L2UJcTxS3zP9i28SAWjp74eFxg17K5zJpuwObTbsSWvFqSz5eKAeoaHl5/ukTAPaF51JwCkqyOFH9HLlm7E13UMWZ00VOXaeitzyHG1wkbCzNMju7GaPMOzrhEUNCiRNvbQH6sF9ZHtrJl/wUc3H3xuriTVXNnMGveeoyvRVLcNdpI8XmHUHVVkx56A4fLtlxzseXisX3sPWLL3fQ6dI+UpruatKBrWJpY4ODhiZPZCQ4fMccroVIvb4qKjrIE/B3MuGh+levO9tid38dGg9/4VX+ZgK6vpt3jurkJlnbu3HQ059Sho1jcfETl6ElYQN1HS0ky95wvcMjoEBfdYqjWSPQ1FfHQ25oT2zZz4MxlPO54Y7V7BQbTZ2Cw5giO9wvo1GrprkzljskG5nz9N36csYI9py7jGZxCZb+Etr+Jgnh/XKwtsDh/ggPbDNlxzIEHOU1DSQrVOj4hbly2sMbe/hLnD21n67aDnDW7xFX3QGKflFBXmsw9p/Mc2HGIsxaO3HA+zsbff2fTUSeiKhX01mVy39WCCxdt8bjhhLXxIY6auBNXNphY8dXFAEnZRnG8H06WNrj6+OHlaMaps5fwelj9VHjQ9tNclEDQtUtYml3g1D4jtmw9gv29LJrGWyIhKWjX+dDOhHMXbHCyt+Lswd3sPXqFwJQaBh5HvcgAg7V/YOV0nu2LZzFN99yu2IOlTzK1Mg09tVncNd+Cwbf/x/dTlrLL+BJuAQkUtbRRkRaKm9lhdu8/hcOD8mfPiBZZawnJQS5cNjfn4skD7Ny8hSOXg3jSIEfWWs5j/yuc3mnI3qNmuAUGYrt/NQtmTGfein1cCsh+nrBVlxRV5w8XXftNOX1gB1sMD3ElMIOG8dr//JEQR4KAIPCREBBiwEfiCGGGICAIKCm7dxlLBz8SynqR10Zwadscvv9uFReDMmkaZcD1amKAlsZ4Z04cu4hnWA4tHe005fpj5+BNTHodClkHj10Osfynaey0j6CoV4NK1kN95GV2//498w7e4GG5CrSNPLp2gpMX3AjPbqazvYm8ADucbkeRVqdA1paCx+FlTJmyEQvvh5Q1ttPT30NbfRFR9gdZ9esPrDh9h5ShssuZMmUD5l7xlA6U7ae/PQWPI8uZPGUbdvfz6dGokPc2EG23h0Xfz+agWyzlKgl5Rz3FUQ4cWTGZn5acwidJlzxRSVdjERG2+1n+09dMXrwLMxc37E3/4MDZ60TljZYIbzQxQKJbt2Z+60q2nbtJfEkrbdXROP2xlpVGlwjNaR9lxlhJd1MxkXYHWfnz10xeuJOL19xwuHiEA2euEZ6aQfyNq9i7+JNQXEdDXTHRTkdZP3sW2yyDyGxWg6qeBDdj9uw5h2dUIY3NlTxyN2bjjJ9YsO0MDsEZlDX2oBlNDJC6yQ20ZNdKQ866R1PS2kZNjAsn1i/HyCKIrLaxBmpaugpCsTv+B2fsg0ivbqa1pY7cICt2z/2aH1cMLhNQUJvghZOdM3fii6hrqKMk+jrG6+cyZ5MZAelNow5IB7Lbd5UR53KElT/9lf/+z7/x06y1HL0aQmZt79BspiTvoqkkDjfjjcz6fgGHXXQDMd0EsG6AE4fr0eX8+vV0Nhx14kFSDkV5sXic3sTvk6ezcrcZt0KSyC0uINHrAjvmz2bDOR/SmpX0tVXw0PUkG6Z8xY+zN3Dczpew6EjuOp3EcM5P/DhvL/YPCujStJJxx5yzp2zwS2mko6OF4jAP3Dz9ia3QgLqV7KBLHN1zHFv/NKqbWmiuLyTG7QzbFi5il1UQWS0KFN3FhNnuZcG33zF7zVEcAmJJT4vG46whv09ewtHrcVSP8hyP/O57mRigronHTbdE4LgbiU1Dc9NPb6OV016ejL/VPlbNmsmyXZcIyW4eVwxQNqRz18WOq55RFNY1UF/6EC/TbSyYtoYznonUK5T0lEbgdHAhk76ZxppDtvhHp5Iec5OL235n6oIDOEVVDolaI9uj6Sgk0sWMsxeuE5FfT0tLNcm3L7Jt1mTWGnuS1NBNadQ1jq5dx0Gbe+S3tFGX5IOl0QrWn/AgqUnXGSR6yuPxNPmDo2a3SKpoorWlgcIIZ44v+Y7vDAaXCUj0lcbgengd6/fZcC+vhfa6x/hZ7mDVquO4JzQO9Tt9OyVlD20VSXib78Tgh9lst7hHqVo38d1DRYwrJxZ/z/eTl3PI5g7RKWnE37Zgj8FkFuy6QkSZEo2ih8L79uyb+zMLdlgTXNBAa0cvCnkT2fc9sb90nfC8Whrqykm+Y80eg2msOnKNuFoVaFrJDb3Cke37sfLPoqG5hoy7tuw3+JGZK/Zh5ZdMXmktLVVJ3DbbzpyvvmfW6iPYXnfB/twfnDD15GFxKQk3TrBx1U4sAnJoaasnLfAye5ev4LDjYDLEVxUDJDoLonAxOcppl3jq2ztoK39EkJcLrkGFqFHRkheBl50lzqG51DbUU/E4ENs9C5i+5CCOUdVDIoc+Y134f1dJHO6n97D3jAeJFU20tDRSluTPlT1Lmb/uJB4J9agGxYD5f+ObAeY3CH4Qgs/lo2yY/i0/zN2O1d1CehS9lEa7ccTgJ+ZuMiUwv4GW9m56ejqoTvHH7sACfp66ljM+OptB1VpIrJcdlvYh5NY2UF+RSvDVAyyZvIj9V8Kp6OujJskbk5WT+OanBewx9yLicToJAXYcXvQbczaZcrdItzOBirbCWHzszLAPzhlof2VaME4HFjFt4R6uhFeO0f7hNMQ7QUAQ+DgICDHg4/CDsEIQ+OwJSH3Z+FhY4XY3lTpdenVNG0lO+1n8/T+Zf+QmSVWyYaHwOmCvJgaoKfA+xcYl6zDxTn06s61sID0lk9LKFrSoKfI9w4YpM5+JAU9nvuVp7hxZ8jO/D4oB6kL8Tm9gxdqz+KTUD8zm6QYRaU9KqNSlflcV4n9mHTNm7MApYnCZgBaNWk5duA275v3K8lO+pOimfHVlz65j5oztOOovE1AXEXh+AzOnPxMDBkxR8MTzGKt+HhQDQNKokddFYLvTgCmLjJ+JAdJAXRUhluyY9T0Ge52IKGijs72FptZO+kYNmR9dDOh64ov53t2ccYujrEcCVQGBJhuZs+AgbrEVowx8dWuN5VTev8SuOT/w+257wvKe1t3c0k5t4k1MDh7grH0QKbkFFBXmk+5rxs553/HdstP4PG5AWRuD496FzDe04l5290A4fF+OLybrpjFnizVBWZ0odeuYRxUDusj2t+TQrpNcjypmwOSiuwPJJRftdSK6bIwRqLycqKv72GSoyzhfh2KAt27AEIz11plMeRYZoOnMxM/0Dw6dsCXgcR6FhYUUpPljvXsBP361BOMbiQMz+aM+xJIaeWctWfedObF6Gt/+13/w17//xIKtpngnVdA9MM7ToFY08NDlCMt/M+CPQTFA18eVhQRbbmXWr6s5dyOZFrkarVZGrr8ZhlN/YbXxDZIa5WgkCXlRMDZbZzNvizXB+f1oNHKqoxw4NP9nDHZfIbSwC5lCiayrjPCr+1n0/U+sP+dLRlMp4bZ7WbPsAI6RlQOzk6qWPHIycyisU9NfHMbVPStZf9iZmKrBZGQa+isisN+7gN/m7sY+rJh+TQfpt8+y7tdpGJr6ktkhR62WURJ6hV2zp7D2tBdpHWMJM8/pjS8GqKmN1y0R2MxJ10cM1wIk+ptKSA31wOaEEUt/+4Z//HUSyw87E13W/8L3x0CNUjcFobYY7z2E5e0k8goKKczPIPjqEVZO+gcL9jkQU61B2/UEf9P1TJ28nvNe6XTIVKhlZUQ67GX+b8s56ZH8fNb0eVN020RQHnONU3v2YXEnm+6B9Ula+qpSCLQzxdI1nILOboojrmG88wBWAdnoIuzVFTG4HljAvI0XCMxXgrKW5FunMFq/H7sHFcgGMEooq2JxPWTAb0ORARK9xdG4ntzFQfMAsjt0N6sk3vUQi2dv4IJf3ugDNUmLRtFCmq8pGybPxOiZGABaurPvcWnTFKauPMGNlHZkKjXyihjcDs5n6uJDXE/QCY0aah/d5PjCKSw/dP3ZMgGdLVG4nN7LvvM3Scx5+uxnhrpwZvXPfDV7OzYPKlC2PCHIdD0GSw9y7aFObJSQl+vaP49pSw7hktCKQqVGo2wgxec8ayb9xupjnqQ2d9LZ1kyL7vuts4xHnmfYs/sCvhkdugUH1CTc4sSiWaw9eZucgd31XlUM0FCX7MPZ9fMxNAmgqFsL6k7Ki7JJy6hE1VtGvNs59u04g+ej3KffB0/C8Di/nsl/m8FWs5BRE5tK8hoe3zZm49ItmAaUPPMhaOX1pHifZfXPU1h/2ofsXvXTnAHzv2P2JlPulXbSL5fT35pHmMNe5v39R5b/4UZqp5qG1ADOLf2NhTuvDi0TkLQalG1Z3L/y/7P33m9RJOvf///y/eE55zmf83z2bHR31TVjzgFzxpwxZ1ExIAYESSIqiCiIgJKj5Bwk55xzmjz9+l49ZBld3ePuevbUXNcw3TPdVXW/qqrpevddd21j5tR1/WKA1EtFnDtX9u3g3IMYcuR2np9J2OOrbJ30FTM3Xsa/RENPYST3d89g0uIDOMW20qfRoqpJwuvscqbO24lNRBO63goS3C05sP0MrjFv++3PDOPJtS1M+2o6my/6GoSkkd1AbAsCgsCXS0CIAV9u3YiSCQL/RQT0tMbf5/KlO7gFplNaWUVVVQV5gbfZt/AnvpmyB8fwQt71xP04MUBPe+pDTq2ezsTJyzhg5UVyeQtdChUajTwS01LoZcHmd8QA1btigL6d9MenWD/9Z6Yt3ceN54mUy/OFVWo0hscuxsQAuQr1tL6x5+DiqR8hBhQZBt2zZn5YDDCk2haD08GlzFg2KAb051Ufdpf9C6ay+sJzUn51HW1jYgDI8/rlOcHNHV201eaTGOjAyXWzmDB7H85hxcYHE+hpiLjHoYVTWXX2KUmD65JJfbz1tGD7qpXsPmnNfRcXXF1ceHDvDtaWFly8+5zYgiaUpQHc2jGbyaYXeJYkT3uQnTGicTq0jGXy8om5vf2DOWNiADrkOdQN9U20d7ZRX5BEiOMpNs/+mTm77Ag2PM0a2500pUHY7F7Isn33CC0a9m3tDyC4ABNDAMFmenO8ubp9DSvNTnDTqb/8rvftuXvjCpcu2PA8Os+o58pwjnq0ii6aS5Pxv3uYVZO+5p9//4opq07z6M3A8pn6DlLczrJu2mgxAG0xgbd2Mnf6Fm74ZA/El9BSGmTDvjkz2HzZm8yBpce0laHY75/PrPWX8ErvMAgqDTEPOLFsOitPPiR+qD1oqIu+z/HFPzJn5x2CClrI87Ni15yfmTR/GxddI8lv6ESpUqHWqCkLtmX/XBM2X35Ohqy0DLwkVSkhd/cx74d5HLIPo0LTRabXZTZNm8M+myBKDQ/tddRE3efY4umsOvmIhKZ/UwwYmCKwY+spXGPHemTIEfZ7u9poaayh8I0bl7fM5MdJG7B8lmEYZA+WffBTUhQRZHOQ9Ys3cPSak6Fturrcx/HuTa5fvICNWwhZdRqk7mz8rm9h5swd3PQv6hfEdHXEPTqJ6dRlHHeOodGYaapSIhzM2bzhJA/jh71zJK2Sno422jp6Uev1qHra+mMVtLfTVJZBhOtF9i75hRnrLHiWqURbHYvbSVMWbbqEd/Zw2Hg5gOC7MQP0qp7+ufktHbQ3lZMZ8RDLPcuYMnUtFzwyGD57kMJghXaR43+T7SYjxQCJ3twA7m4zYdYmS17m93ti6BqSeH5+JTMWHsA+Ql7FwIgYICkpi7jP8bULWHvgKs6Dfd/BljvXLnLxpiuvU6tR1CXy7MxyJs3Zxa3AKgNbfVsW/tc3s2TFEVwTOvv7vr6NLD8rNk+bzY7rrxi1+IleTW9HE/X1zQbPlsqsKNwsD7Jq0hRWH39EmsHojxUDJHrLonE9upgJ42ay4agdARnVdMixFJRqlOUxPD65ngWm+7jqOHg9cODe7etYnrfG1TeJKoP4MJqvtlYeVC9j+uIDOMTIgsXgS0NdFaqkTwAAIABJREFU0jMuLPmBGesv4F2g6hcDFk9kyb57xA7GX5CUVMa6cXL+N0xbc5bneUqjYoCcqtRTSITjHuYOiAEaVRUJHmfZOHsZ+ywdeWCoi/s42d3hxsXz3HR+QWKFir6SaFz3zWKa6Qk8svvrWt+cRaDVembM2opVYA2KqkSend3AvKV7uDJkvyP2d2T7b3DfO57K4cvpoJHiUxAQBL5QAkIM+EIrRhRLEPivIqCtIdzmOLs3bGLPwWOcOn7C8D5pvos1M3/i63/8yMZr/mTL7uQjXh8nBshPXprJDbDh4NJJ/PTtj8xccRAb3wxquz9BDECPsjmHoLsHWDHpB374YTqr9t/CL72GbrlYRj0D5ML+XmJA7AfEgGmsvehFqsG9eASwMZvGxQC5zL21mYS4O+L40I+4zDBcz6xj5ty9OIUWfVgMWDSNNec9SR4ceOqbiHE0Z9WKXdx+kUJ9SwutI9/t3SjUWvS9BfhdM2Pu1FWcf5JInUpCXRrA7f1b2X/Nh6wWua4Md7nGAwjq+6jPCuWpowOPXsaQGf6IixtMmLfLlqACY3emEp0pbpxdNYWFB5yIGLFGoUEMGAog2ERTnAsnV65gxzXZs+Od8re2092nGhOEawxq+QtDsK063gbc5fDS8fzv38ex5pwHSXWyP/aHxIBdzHtHDCgLusv+ucbEgAXMXHuR56nyYEPPoBggD8SHxQBQZL/g2qapmGy1xj9Xgbq9mGjX02yc8SPffzeZZTsu4xFbSrumhywvSzZPGs+6C09JlZ80D770rSS7n2XNz1PZdt2X3D7jYkBttAvHl/SLAfH/phjQP0VgG9tPPiCuYfT1YLBYg596VTMpHhZsnLmIg7ahVBo5XN+WyvOLG1m5/iyPY2tHt82WVtq7elFqpfeKAfGPT7Fy2jKOOb8xKgbo29N5YbmRpatP8Tix3cgUm4HS6hU0F8by0sWe+8/CSY3y5u7eecwyiAEKenL8uW02g1mbr+CbP+zpYnw1AQllcxFxPg9wcPQkPDkaH9v9LJzxmcWAC6swMYgB9cbFAH0H2X432L5sFcedoqkd2e/l7fZOepQa9Ioq4h6dYMXkeey2DqSsT0IrCwQXd7HlkANv5P4hvwbFgOlz2WkVQMU79SkpWylN8OOhnSOeIUnE+DpxZOF0Vh97SKpCTuBjxQA5CGsXVSneWO2Yx4Svv2HS3I2cdggmr1lFZ04gd7cvY+UBeyJr3r0etNHZIwfP7C/y8F8JRXEkLvtm8NP8PdhGyl5pgy+JzrxgbM1+4Zdlh3mQ3D0kBoxeWlBPR04Qd7dMYNLyIzxKU3y0GKDuyifcbjcrFu/BNqxmbDvv7Eah0RvEgIfvigEt2QTd2NAvBgRU01EQjuPupSzbZUO4Mfu7jdk/aKv4FAQEgS+NgBADvrQaEeURBP4LCSgLX2J9+TaP/ZMoq66htmbwXUWmpwWbZ3zDd4tO8zSxCnkGweDro8UA2W2yt4263AjcL29n0S8/8ONUM275Z9Ko1nycZwB69Do1vW115EW4cXX7AqZ+/z0mW27gn1GP+i8jBkh0FQZjf2IfJ256ElfWTl9vLi8ttzBn3m8RA1qIdTrMiumyO30CdcPjGEM1SopeensVKLVqOspieXTWjHVrzTh+9S621te57eRNbFELQ4HzjXkGSN0UhzpyZv9RrJ+8oaytl758P6y3zWb+B8SAnlR3zq/+helbb/E6Z8DzYHBpwRFiQHO8KydXzGD5MVdiq981QElvbx+KMZHfJbrz4oiNSya3dUDI6LcYbV89sS4nWPXT35m08RovMzv/YDFAoi/Lm6ub5rLE3InIKg16ncaw2kZRnDc2B02ZOe47pqw5i1tMMZl+N9k57RsWHnQkYuQITN9Oqsd51k+dy37bYMrUv7cYIE8ReMAps62cdonhV7QAg9dPTfQDTm7YwFGHKGpHVsPARUTfnobXpfXMnr8PuzFznSVUij56+/rQvscz4NfEAKkjA58rGwc8CooZLUtJ6PXydaWHyvinXDU/yAWHYApbeugt7XfDnz0gBvTlvsJm2zQmrTzDkxR5Gk3/a6wYINErP7m9ao75WQeCC5rpkd3aHx5lqckfLAZInbz1t8bMRH6S/5rS0cYjyasC9PTSq9HSU5eJ3y1zNpmuZt+F29jducXtuw8JzKijd7DePiAGSH01pL24wbH9p7B7nU9zt+wW/5QzS2f8NjFA0qNRdtNSlkqg82mDl9H3Py3h0J3XpKUEYrttFnM2WeI3wqOov3urUSp76Ol5R6mQ9eKKOJ4cnce3Uzdw6UXhiBgTEt35odjvmMGMNWd5lqt8jxgg0WU4bhZzNl7Cr1T1CWJAAeH2u5g7fR0XvYvGtEO1SklPTzfdJdH8mhjQWRiB065ZmKw5x4vCdytVjUrZQ7cR+wearPgQBASBL4yAEAO+sAoRxREE/usI6DtIfXiRyzbeJBqJC6BpjMZuzwJ++ud0DjpGUTzgEi1z+jgxQEtpchJvi8vp1Kjoaasm/dlFtsou1lf9yWxSUeRtwebpJuyyDSHfkL5Eb9J9jiydMBxAUFtKanI2xeUdaFQ9tFWn43VpCwtmbODay3SaVAX4WMgxA96JA/AezwAfi43MmrmTe8H5w9HCtUX4Xt7M7BnbsAnIob8ovaQ+OMaqCcMxA2Tb9W2/k2eAvokEl+Osnrma8+7x1MqPuDQF+FpuZvZv8QxARYHPFcymf8esXXcJzh8RUV7bSk5kODHJ+TRrJbpLYnjp7oZ3WDqltQ00NDTS0t6NcuSa50bEAH1zIm6n1zDX9CSP31QbgsVpi/yxNvuQGACacnmawBx+NNmNXdBgnAfo9wzonybwLKUFZaE/1ttmMm7adm6/yqFj6JGelrbcN0RGJ5I7evK6oRur8l9z/44dDyNKh+vY8IuGkte32G3yv0zfdovXub1/sBigozbKmWNrV3PILpSynipysjN5m9eCRl76ru4tAXcOYmpiygmXKHJTn2O5fhITlp/i8cgAdNpqohwPs2bJLm6/KqBX/zuLAdoaYh6cYPuWEzyIqTcSu8IAd8QfNSUh9pw+eJi7QaWjhMShg9RlhDscZMG3k9l08TkZI4QbXUcxSW8iiUmvQ/0bxQDUVUTfP8qS739h3Rk3UhsH/ccl1M2FZKelkJmRiL+VGQsW7eH26xJD7ApdRRQPDy+iXwxQoq2Rpwks4adJ67n0LKv/2iALVwPTBGYsHgggKLXz1v8mO+csZq/1a0rkQBjaSmJkMeD39gyIc+f0kmmYHrlPgiE2hIaqN484seA7Jq2S4w20DAcv1HVRnhZHZFiy4RrTV5tFxHNXHr1MoLC6nsbGRppbO+nTDHW2D3gGSHTkBWO3ax6LzK7gWyTHtdBRE/+UM0t+ixigp7G0iIykNBo1GpRdjRRGPMJi/SwWbbuOb3QMnqeWMO7nFRx3SaB5UKxAR3dlJonhISS+Kxoa/l8VEeGwl1nfz2GnVdCIJUX1NKX5cnXjAtaecCOz652lBTuHpB8a015y1WwZWy+/pESpoz7Vh0srprB4jy1v2oZZjZkmoKklxes8K779ieWHnIgf4TWm664iJzGMoLhKOn9VDKhBVZfKywvLGPfjYswdYmkaYX9PdQ7JYYHEVQ6286GeJjYEAUHgCyUgxIAvtGJEsQSB/xYCqrIgbu4z57pnHJUjH/sPAtC1E3tvP0t++L+MX3sNv4zGIRdMqT0G+70LmTj/BJ7x/XNNB08b/tSS63WPB94h5LTKT2sk1CW+XDXbyGGbYPI6tDSE3WbP3J8xkQdm2fW0VGcSYmeO6S/fMG3LbV6/bUerycXH3gXv4Gz6k1FT5n+VnRsPcDcghw5tFUFW25k3aSWXnidT1VBLbWMrnSodjeE27Js/geWnPUms0cmjTYJvbGf+JFMsPJMMx9Y1ttKhaCDy7l4W/TyN7Td8yaproSZbjmJuyrSvJ7Hlhi/ZbRrDU0F9YyT39i1k0uJTPI2rHrjJ1vUHK5w7AdMzT0k09ih0GIyBRWeCC8eXTWD+oQdEyasmaCsJtt7JvG9+YZOlF6nVjVSlPcNy8yx+mmLGbZ8UmnpVqMY8+NJRG3aXA/MnsuKkO/GynYaXRF9JILd3zGHcV7+wYt813AJjSYwL5dndq1jZeRFX2oG68y2+N45x4KAFjj7hxKdkkJ1bSGlVEx19mmGXWqmT5IcnWT1hNgcdQimRi1wViu2eeYwbv5ZLT5Ooaqoi00uO1j6OaZus8E5qpE+lGjN4lFSVRNw7wOIfxzF/pzX+WQ30afuoiXbi6JIf+W7WLqy9M2hsyiPozl4WfvcVkxbv5qprALEJcYQ/s8Pqmh3PoovpGOsXjK4+GpeTW1i7wwrfjIahwaikqiLS/hBLJ81lv00QRd160DcT73qClRPnY+4QPjwfWlOA/41tzJq6CSufrAFRQUvJ69vsmTWp33V/YBCgLQ/Bbu88Zqyx4FnKiGkCSycyd/ddgov7g3BK3QUE3j7Mzj1X8EpvQq0pIezpI9w9E2iQHR8kDdWRThw324mFexJN3eVEOhxm+bQF7LsT2F9eJBSlodgf38leCw9S6lVIUgdpnnIAwdnsvRM4EERMR2W4A+bzf8H0uOzaP9guRjXEUTv61mSenlvNxGlmWPvmDT1B1dbE8PCEGWYn7hNTP9JDQ0tHRSYxQYHEvq0ZWLoRtO25BN635rqtL9kt7661PpClpKT6jQsnFo/j6x/ns+O8M/7RCSREvMD5hhU2j0LIb1UbAgjKT/inm2zH2q+wvy1pq3jjcpSlExdzxHEwYv0oU+QIkDQkP8Vi9US++W4G68ytcPcPIyLgKc4293j8Kp3aslienF7Oj+OWcMQhgvKmOvICbDmy5GcmrTjBg6h6ejvLiPU4x5rx32Gy/gIeidX0aJU0pHhyae0Evp2yjrMPE6lrKyf+yTlWfj+OpQftiShtoj4vCHvzZfwyfgUnnKJo6FWgGIlvsMj6djJ9rrFpsgnbrvhSaOjjEl1v5QCCU5m56TI+gzEDauLwOLWUyfP3Yhcmr2aipznVG8vVk5lrZsXrwhZam+ooT4/A46wp4/85jnkbT+P4MoqEhCj8XG5jbX2fgOwWlD1lxD6+zMFtR7j5NITYpDSycgoorqintVs1LCDoW8jwucqGX0wwsxwZpE5HU4oXlit/4Md5e7AJLqG5oYAwx+OsGj+eZQdsiajrRdGnoq8gFIed05m66hQeme8bsOqoSQo2TDkKM0z+l9A2puJjuZste22IqKgl9ZkFa378f/xosp5Tti+Iik/gjZ8rd61u4OSbRXN/NNJBsv2feiX1qV5Yrp3BrLVn8czsXzJXUtWS8uwK+zYe4l5ENQppUAyYMGqQLylqSH5mxdG95/BIb0G+KrZmB3BzwxRmrrfgRV4r7S111DT0ou/MI9R2GzMmr+GsRy5aSU1jxkuurvmJr76dwfqj8ooY8SS9ecVjmxvcsPMho0lJd1F/AMGppsdxH4gZoGtM59XlVUwy2cy111Vo1Y1kvbzK+p/+yQ/T13DcxovI+ERiXj3Gzuo697zTaTRm/2gaYk8QEAS+EAJCDPhCKkIUQxD47yOgpSU/HJfjq5jx7QSW7b2Gd2oV7SMFAX0HFenB2O5ZwpR//n/8n39MYOX+27zKrKauMo+Yh6fYMP1b/vHVHHZdfERsUSM9YwZlWnKeXeHwju2cvetJUGgQXrYWnL3sTGBmHX06CW1rJi8ubWHe+B/5ZfJ81u+/xiOnS+xft4gFq82545NKZWMa3lfN2b3tNHZPAwkP8sL+4hmuOL4io7YXnRwsK+AGu+b+zMTpy9h17gFh2QUUZkfhenwts77+G9/P3YmlWzxVza2UBVqzZ+54Jkxfxs5zLoS9ld1htbRl+3Bt61wmjRvPtHlrOHDlAc6XD7B50VzWHLTmRXIZFaW5vHlwkg0m3/KPb+ayy8KN+OIKSt5G4nJsNSZyXrO3YeEcRm5t+0CU/HdamNRHQ2EcHuc3Me/7v/ONyRbO34+kvL6Rqignji6dyA/fTmDOip1cuOeOo8Uelk/8lkmLD+IYnEPTyJs9Oa38aFxPrGXm13/ju1lbOe8USk5Nu2EALGk6KXvzkPPrTRj/9deM+3EC02ebsveSK+Fv6+jR6JF6Cgi8tY9lE8fx04/jmTThF6b8MpnpU0xYtPYo9gHZ1Lc1Uhz/lEtb5/Hz375i1oZTOIfnUddYRqzLcVZN/JYffp7Jyu1nsH/sgOW+FUz5ZgJL9tsSlN1ohIOWnspEvK7uZOmUiUyeNg/TtTs5eWIfW1YsYNZ8M8Na8zm1bbSWx/Hkwmbm/PgN3337I5Mmz2HVTgsehGRT160eFitGYu7LwtvKnE2LVmK29yjnr97F2dkB61M7Wb98NfuvepJY0Ymqq4HiOHcsts7jp799zeyNp3kQVUhdXS2FkU6cWDOVr/85mXVH7QnLLaEkLwbXE+uZ+dXfGb9oD1Yv0igszCXh0Vm2mHzD/45fzsGb/mRVtVL15gEnlk5g0pxV7D1nja29HTctjnLk5A08ootoU8mR0osJdTrL3o3mWD/yJzTkJS7XL3D59lPiyrrQSVp667MJsj/NzvVbOWRhy+PHzty5dA5LG09ii1pRantozAvCdv8yfvm/XzNrw2kexeZTXJDE0wubmP2vv/PT/J3ceJlFrWEJhZGgBralXprLMwh1Osf2ud/zj39OYc2hW/jE51HT1kNtjCunzbZw6v4bRmkBkoKySBdOr5rKtNmr2HPKCgeXh7jed+XZq1jyartRDz80fSdjCW13Nale19g9bzzf/+tbfh4/hTlLzThz7xXpVV2oupsoCrXnyIqJ/L+vprP+uAsxOUUUpj7jqtlsvv/b9ywwu4pPRk3/6hDv5KBTNJEf5szpNTP4+at/Me7Hycwz3cs1t2gKWxRoVQ2ke1myZep3fDduGss2H+PWfQdund6EydfjmGd2jeeJlbTWZRF49xCrp080tL/lq7dx9Oh+dm1YgsnMdZy4609aeSPV6XI8iGn88K9xzFi0meM3nHG6eY6t077hp5lmXPOU16kffNrcX1ipr4UKeY78vmVM+Nv/MnXFIeyCsygqKSDK6RhrJ/4P30xeyTHHKN4WFZHhfZ09c7/lf76dxRaLZ6RVdqCoT8Hr4lqmfj+B+WuPcOdZDMUtXdRl+HFnz0Imff0V3/8wnmkmS9h6zAbfpHI6VXokRRVJT86zYfIPjBv3M78M9P1pU2Ywf8VurrrHUtHWSlWaNzf2Lmb83/4fk5fs5ebLZIob5VUiJNRN2by6YcbMb//FT1MWscX8Os4ONlw2m8F335mwxcKFgPhEXtscZe0v/+B/xy9ij5U/2eWt79SWvKujOtEHqz2r2W3hhE9wGAFuNly9cJX7wQV0aLX01WcTcOcAyyZ8wzdfj+OXSTNZuuEod7wTKO9QGb8eyP4KylaK3zzh2t6NbNp+ktsu7jy0s+Li+es8CHpLY58slkn0Vqfhc2Ury+YtZesxKxzuu+B4+zrXbznjm9jPTS6pRrb7+iamffczc1Ydwto9nMzSSjJ973Bi1UT++T/jWbLrJgHp5bR0NZIbZMeRJb/w/f9+zU8/T2bWwvUcsX5OfEkbHS2VxD8+z5Yp/8NX4xex3yaUzMJScl/bYL7oe/7xr6msOfmYxNJmuhtzCbl3mBUTv+Hrf/3AxEkmLF5nzk3POErble+x3whq8ZUgIAj86QSEGPCnV4EogCDw30pAQqPopKm6jOLCIsqqG2jrUTHSI1yO9K/q7aCpupySwnwK8gspq2qgvVeNWtVHV3MtFSWFFBSWUlXfQrdSg270PW7/jVVrPdUVZVSUlVCYm0NefjGVDe2G5fYMh+vVdDeX8zY+grDwODIKamhtrKa8tITi8lqaOnpRabppra+morSC8pIC8nLyyC+qpKG9F5UhUwlNTyOlGfHEJ6aTX9FMl0KJsq+L5poKSgsLKCqtor61G5VGN3BsguHYvIomuhT9T7/16h5aKt6SGBFKRGwaBdWtNFZXUFZSTHltEx3yU3nlO7bXybarBvLqZ1VUUkldcyd9cnA+o01Mj0bRTWtdJaVFBRQaju8yLOGl7WunpiCVuJg4UrOLqWlup62umMzYKGJT8qlpV7wTME9Oq4vmWtnO/P60mkbmLd8Ed9FSW8Lb5DdEhEYSn5pHRWPHkBuw1FPGG08XnJwe4e3vi9eTxzy874Tj3VtcO7mXI1cfE57TQl93K/WVpRQVFFJaUUtzpwK1VoOio5aitDhiY5PJLqqmub2N+pIs4qNiSMmtpr2v36NiDAqdip7WWooz4okMDeNN0ltKy4opLiygoKSaxlY5sJYOvU5Jd0sdpW9TiA0PIzI2lbzyBjr63iMEyBnpO6iW20p2IZVlhbxNjScyJIjg0DckZxVT29rT33Z0GpTdLdTJduX329XSpUCtVqPoaqa2ooTCghIqapvo7FOiVMgs+9tUcVk1DW29KBR9hvJVlhRSWFxOTWM7fSo19YbVBKax9MAdfBJzeZuRRnpWPqU1LXQptf1zzyUFHYa+VE55aREFubnkFZZT19JlCJ5nYGYIfthEVVEuGakppKZnk19cSUOrPI1D7kW6gf5cTrFcN5V1tHQrUCp6aKuv7G//ZVU0tvehHttJB6pFj0bVS2dTLVWlhRTINtc00NYt17GSltJUIkMiSC9tfcfLQ98vqKSG88rrGS/8I0jKLqGqoZUOOZiZ8Q4w3BQknSGaf31ZLmmxkYSFvSH5bSn1bT2o5bGZwfZmasqLKSwsobK2ha4+BYqeNhoMdVY0dF0ybpq8mkQnzZUFZMRHERERS1puBY2dA/1Izr+7kdKsROLeJJCRX0ljaxuNFXkkR0eTkFVOS4/aELOkr6OB0uwk3oSGEhWfRXFpKaXFBeQVVVHf0t/ntKpuGkuzSIqJISE938ChrbGS/KRoouOyKG/uGSuO6DUGBg3y9Ti/kJLyGpo6+lAoFf3X2WL5GlFBbXMXfQolve0NVMt1VFhKZX0bvSot8goJHTX5pMXHkpRZbOAnXxt16l7aG8rJS48nWu5jidmU1LbSM7DcqaSoJyvYA4c7jjx79YoXT9145OLcH+n+nDnHzt/CM7ERRW8bDVVy+QooLquioa0H5WDlyv24qZy3SbHExKdRUNFAa1sT1QUpvImMJbO0kY7uHjoaa6iQbSkuo7qhnT7lGBcnw/8MVU879RUllJeVUVKQR25OAWXV/V5KcmuX9GpDjI2KvAwSo8IIj0ogu7h2tCfDcAsbsSWhVXbTWlNKfmYaKclpZOUWUVHbbPCAGvz3JbPsbqmlLD+btKQEklIyyS+uoK65fYibIVGdis66ItLj5bouoq61y9Dve9sbqS0vpqCgeKBtykFO9Wj6OmiqyCczIZrw0EgSMouplfu5Tl4eVkV3ax2Vg3waO+lVKOnraKKmrIiCQvkaJP+v0SLJfaKjicr8TJKiwwiLjCerqIaWLuWwJ8cIq8WmICAIfLkEhBjw5daNKJkgIAh8JgKSZLh9Q9Jr0ajlpQB1GL4alb6ETqNGrdag00vy3R56vTTiuP5tOS29VmM4TquTV8Qe+er/TavVISfx4dcHjpV0aNVq1PIg1FAUPXpJfv71x70MrOS1vQcNGSiTgd1vLoZkCFQnD3JHsdO3k+1zi6vXXQjMaqVPqaCvt4ee7m66OjtoLQ3lpW8o8dnycnnve8k8tYxkL+nk+tagNT5CG5WQHEBPrVL1tw05qJv8NpKZvIa33IYG62ZUImN29Oi0OnQ6vaE9adVKFL299MnLWur+iPocvZpAXJ16qP2Ptk1u2/1vAwe5j4xp2/3GSYZ2oEKl0qAdncgY6z/vF3L/VKFQKFGPVgwHspHrX42yrz8gpSy4fXLxJLm+Bvr2J5/8EdaOSH+oXw2dJvcNLVq5zw20O0Nb02gMdTF0mGEgKrdredlHXX87NdJW5XO1mhFtX85bTks+Z2Rin3nbkK92RL5D6Uv090f5+jviuil1U/rGjTuXruEW10Cvsr+PDPb9tqpEIoJ8eR07Mvr+UKKjNwx8tf192PBLP1PDNeBTjZavu3KfMHCU+7vWaHuSJD1a+f/Ge34fXcCRe/3Xf/mao5b/H438acS2IX+13N/6r5kjfhreHGhX8jX1414DeRuuw+/L+eNSkv9PGuyXRed/M6mPzFEcJggIAp+ZgBADPjNQkZwgIAgIAoLApxGQ+rJ5brGJpcsOYR9STId68K5SQtVeTvIrT16EpFDWYewp3qfl9d91tI6GGBdOLJ2G6YmHxNX/+nz9/y4+wto/m4CkLCXqvjkr527A4mkmTUPTxCQ03XXkRL7E2yect83Gghz82aUX+QsCgoAg8J9PQIgB//l1KCwQBAQBQeA/m4CujVz/WxxYMo3Jk2exbPUmdu7czZ7d+zhyxhq3gBRKm3vHujb/Z1v9u5dep2wh7elFNk36F1PXX+RpciN9I5/K/u4lEBkIAr9CQN9Feexjzq8xYdKE6SxasZ7tO3azd/dezI9b4uQVTV5dF3JoC/ESBAQBQUAQ+PwEhBjw+ZmKFAUBQUAQEAQ+iYAedU8rNfnJRPg95dF9Z1zdvAmOzTDM/W3rVr4TS+KTEv/vPFjqo6ksg0jvx7jY3cXe+THP/WPILm+md9Dx4r+TjLD6iyIgxxvppKEkk9hAL9xdnHnwyJNXEcnklNbS3CFEwC+qukRhBAFB4C9HQIgBf7kqFQYJAoKAIPCfSUCOU6BW9NLd1UlXdy8KMQ/136hIORhfH92dHXR09L87O7vpU41YpvHfSF2cKgh8TgLy3Hi5vfZ0ddHV1WNop4OxEz5nPiItQUAQEAQEgdEEhBgwmofYEwQEAUFAEBAEBAFBQBAQBAQBQUAQEAT+8gSEGPCXr2JhoCAgCAgCgoAgIAgIAoKAICAICAKCgCAwmoAQA0YjOHnEAAAgAElEQVTzEHuCgCAgCAgCgoAgIAgIAoKAICAICAKCwF+egBAD/vJVLAwUBAQBQUAQEAQEAUFAEBAEBAFBQBAQBEYTEGLAaB5iTxAQBAQBQUAQEAQEAUFAEBAEBAFBQBD4yxMQYsBfvoqFgYKAICAICAKCgCAgCAgCgoAgIAgIAoLAaAJCDBjNQ+wJAoKAICAICAKCgCAgCAgCgoAgIAgIAn95AkIM+MtXsTBQEBAEBAFBQBAQBAQBQUAQEAQEAUFAEBhNQIgBo3mIPUFAEBAEBAFBQBAQBAQBQUAQEAQEAUHgL09AiAF/+SoWBgoCgoAgIAgIAoKAICAICAKCgCAgCAgCowkIMWA0D7EnCAgCgoAgIAgIAoKAICAICAKCgCAgCPzlCQgx4C9fxcJAQUAQEAQEAUFAEBAEBAFBQBAQBAQBQWA0ASEGjOYh9gQBQUAQEAQEAUFAEBAEBAFBQBAQBASBvzwBIQb85atYGCgICAKCgCAgCAgCgoAgIAgIAoKAICAIjCYgxIDRPMSeICAICAKCgCAgCAgCgoAgIAgIAoKAIPCXJyDEgL98FQsDBQFBQBAQBAQBQUAQEAQEAUFAEBAEBIHRBIQYMJqH2BMEBAFBQBAQBAQBQUAQEAQEAUFAEBAE/vIEhBjwl69iYaAgIAgIAoKAICAICAKCgCAgCAgCgoAgMJqAEANG8xB7goAgIAj8lxPQ0VObR2LIC577hJFW0kyf/l0kOnrqaqhraqVnzG/vHvsJ+7oOGuoaaG5VIH3CaZ/jUG1LMWmJyWSVtaD5yAR1PQ0UJoXx0vMFYUmFNPbqxpwpH1Mn29T1OUDp6WxsoL6xxUidjMn69/lC10tjUSoRL5/hE5xIfn0vY6zW99JUX0dTY+dnLYO+q5nG+gaaez8Hy89atN8lMV1fM2Xpkfg98yYo9i213e+S1tPX0kh9bT0d7/70b5VIT3drM/V1DZ+3f39MmXSd1OalkZiQR4PqY04AvaKViqxo/J95ExidSXXXWBh6RRvN9dXUtY397eNyGXuUvqeN1roa6rv/rPaoR9lWRc4bP7y8XxOVXkmX9t1y6lG2t9BQVcNnNB3Q09fRRl11LX+a+e+aKvYFAUHgNxEQYsBvwiZOEgQEAUHgr0hAS3OWHy62Tjx75YnD+b0cPO9GbFnf8OBc6qU6JZiXL0NJq+hA8zlH7ZKK1uJEQgMiSCtpQfU50/5QdanqSXKzYPuW4zgG59P7EflqW3IIdLXH0d2XZ06WmO87zYOIYvqGzpXoq8kg/KUPwUmltKmHfvhQSX7lNwl1Wykp4YGEJxbSpPgcaf5KliN/1rZSEOKG472H+Dx7gLX5fk7ah1A4ApjUV8fbSH9evo6npFU98ux/e1tSt1ORFklwSDz5DX+8YPRvG/AJCejai4n2dMLO+Rlebnc5ufcQd/xy6RmscklBY14sAd5+vMlv/sx9RULTWU32myCCo7OpG27Un2DBbzlUQ2tuIPcObGGvxRPSugaNfX9aus4y4r3vc9feAy9PR87u3YeVdzbdQ6dKKJuLSAr05mV4Dk3KoR/en+jH/qLpoi4njpCASDJrRlwjP/b8f+s4HV2Vyfi73MbusRcvHpxj/65LPMvoGnGtVtFamkKIlzdh2Q18TtPlomu76ylICCYgLI3qEdeAf8sscbIgIAj84QSEGPCHIxcZCgKCgCDwZRLQNafw1PI0FrZ+ZNaWk+zvztOXcRS1DjxukvqojHmKk8NjXqdW063SDd94fhaTJHTKLqoSfXjs5kN0XsvnFRuMlVFSURPnwcX1U/hhhhk3fd8OD7iMHS9/p2shw+sG58/e4kVKDRVpQXg+8SY6r3nAq0BCUZ3Ai/sOuPokUdWlQveZxiCSTkV3TQqv3N3xDs2m6bOIDO8zdOT3etqyfLl7/gw3PBKorsgk/NkTnofl0DTgSiEpakn1dcXJ2YuEik6Un8vowWJIOlQ9tWQEPOXJ0yAyG9Sfuf0NZvQnf+rbyQ9y4PLJSzx6U0FVTjQvnzwhKKMeg7wiKWnIDMTdwYGn0aV0KLWfnYOkU9Nbn024lztPfJOp/92VOQl1UzbBNruZ98MUVh6+T2LHr3QafSdFEQ+4evwczhHl1OQn4P/kMf6pdf2ckFA15RDuYY/dowiK2xRofyXJT6t5PZq+RnIjX+Lx0JvEGtVnr4f3lUffVUqs2zVOnrQjrLSG4uRXeLj6kFQzIMBJKlryo3hufwfX0CLa+j5/G0GvQdGUT6zfE1yfxlL9udWG9xkvvhcEBIHPSkCIAZ8Vp0hMEBAEBIH/VAIqSgNvcWjXMewD8+jWaVF2d9LVrUBtGNRJdOa9ws7yKg4vU2lQfG4hYJibrreCKNcb3LR/QUqt8iNusFXUpIcQHJ5AYdMYP9nhhMdsSSgqYnnpfpfjm0yZN//jxABVeRj2R/Zw9JYvbzt1aJU9dHV106fuZyJ1FRLieJ1rNs9Jqu37zAMQWYzoozrGHZsbd/GMr+JjHARUtdlEBocSm9vIpxAaQqauJNr5JAcOXMM7vQOdVkVvVxfdvQNCh9RNSfgDblne4mlcDb2fd9Q1VAzQoahJ4Plda2zcYqj8w55ajyjC77yprkngyYX97L/gRkq7Fp2qj+6uLnoMg36JnvJYPG5bYuUaRWXP7zDIG7RPp6Qh1RfnGzdwCSsd4fUyeMC7n2qaChOICAwhveZjJ9v0pyGp6skK98L26G62L537UWKApi4Fb8uD7D7pQkKrFp1aQU9X5wAnkHqrSH5ui+UlJ8LKuz9/PzQUXY+yMZMgF2uu3wui+GOekGtaKE2JIPB1MtWfhmkAuIbGTD+sD23jqP0bWrU61Ioeujq7URr6nURfbRp+9y5x0S6I0i7NR1xD363Lj9zXq2jJCeWx9WVsXhX+upD6kcmKwwQBQeCPIyDEgD+OtchJEBAEBIEvloCkyMf3yjY277mJf1YH786ClRQlBN85gvkZJ0ILusfOE/+slmlpTXTD0vwE1t5pNP7aDbPUQ4bnVSytXAgv/siJxoDUU0q01xM8X/rgdmM/KxZu+3XPAElJ8eub7Nu4k2vP02gfC4ryMAdOHzqJ3atcjExf/iyktG2pPL9+jBOXPUiu+1VA9Gb7cFsWcgLy+XhCg0WVUJYEY3dgEzvOu5PcOsZolBXRuJ4158RNH7I7P9+87MESjPrUdZDpc5tTRy7wOK5m4CnwqCP+c3ckFZVRLpzcsplT92NpeQelpKoh8clljh26gmda++/cD0HXmUuQ/RmOnHAkuupXpn1IfRSFuWBtcYXnmYqPrwNJQW16CF4P3fB86orlhgWs+jXPAElNdbw758zWc/heFE3vcEJSUZfizY0j+7nwKIn236SAfaQJum4Kwx9w4eBhbEMrfrV/SYpSYt1vcuHcY9I/AdNgaSR1LaleFuxcs5s7YQ1j2oCkbiDL7xYn9pzEJb71t4l/g5l9xKe+p4QYNwsO7bcmqOzTry4fkYU4RBAQBH5HAkIM+B3hiqQFAUFAEPjSCUgdJSQHPcX5mjkb50xk2oItHL1kx7PwbKo6Bgd9Ep0ZT7mwaQPmtkHkD01cHrROT19jESmBj7G1tMDKNcgwh1avaOBtqBs2t1wJTK/6pKdGutZ4XE9uYethZyJLf2U+rtRN6uNznLloT3DBR96MSt0UhT/D3cOf5IoSIhzMPywGSJ2Up4Xi5XSdYxsXMHXSPDYduMg9jxDSy9qGbsilzmxeWPaLKn5vu8c8kdP3NVGaFoKH7TUuXb3P65QqevVKmnIjeGpziwe+SZR/xFxpdO2kuFuwc+MB7IKKfiXOgURPxjOunj3PHd9clIPV9qufEl2VmUQ8d8b62FaWTZ7E3NX7uHT3CcHJJbQODsCkLvL8brB3gyyQZDK2+HoULeVkhnrgcO0i1x1fklTeg17VTEH0M+7dcsInrsTIee8roJ6O9Odc27mRvdb+Rtrj+877gr+XuqnJjcbH+Qand6zAZOIMVu06j537axIKmgfal0RPYQj3Dmxi21k3UjsH++ewXZKyjarsCJ473MDS8h5eb0ro1suxOOLxsb+Fo2cURb/mfj+cHOi7yPG/zcH1ZlzxejtiLv7Igwa2pT7yA+9x+cRZ3NM+dpQr0VeVQvDTB3iEF1GW/BKrjR8QA6Qe6gti8b1vzdk9q5k1YRrLt5/D7rEfMbmNw/2wt5Qo56Ns2XAMl/ix4qYsFrTX5BL93JlblyyxfRJBUZcedXs5Kb5O3LF1JyKv3YiRxr7S050fguOh9Ww+60HW2A4w6iRJUUTkA0uOH71Pct+onz64I/U2UJLgywPrsxxaO5PJkxay/cxd3F5GktMw1Bnpq4jD7eQm1h+wJ26MWgmSqpP6vDe8uH+LK5fu4BZaQKdeTUdlGgHOt7B9GERO29i29d7C6XsojnzA8bVrOPkojc8SK/W9mYkfBAFB4HMTEGLA5yYq0hMEBAFB4D+JgKqTpspCwuyPsm7GHMwuuRKSmUdZbRs9g/PRpW4yPc6yceF6Lnom0zh43znCTlVvJ211KTy/spNlyw/hFBBDQthLHt2+zCUrF14lV3ySGICmnKCbu1m+/BDO4aUfDn71yWKAjo7cEDzdPAhIraVXXc8bx8MfFgNQ0dVcTVH4fc5tmMXc9ee4H5BBXkkNrV2Dc4UlerK9ubp5MetOPSK+zsjjSFUfXW11pHtbc2DZcvbf8eVNUjT+D29z5aIV91/EU/org4l+7Bqqwu5hvnwZ++8EUvTBuQK/VQwAdVcLNUWRPLywhXkz13DK7hWZuSXUNHcOBa2TenLxt9rOctOj3I+qMfokUt3XTVtdBv425qxauhPr55GkxL7G7fYVLK0c8You+qRBhLY6CpejpizbfgP//L9CMEE1PW11lES7Y7VzAbOWH8LGN5284ioaOwamyshP3oPtOLB0GQfuhFJlpHkhu8q315MV6MSpVUsws3AnIi2J4Me3uWZ5A3v3MPKHRL4RHfi9m1rqE55wfs0itlg8J+dD0zJ+gxig7y4n6bUHrh5RlHWpac30/bAYgIbe9npKYzy5s28RMxfvwfpFOnlFFTS0D7YDCUVpJA8OLWfpdisCyo15zmhQ9rbT8DaMx2fXsXTdaR6GZpIe6o7NlavcvPuQ4LcfKwaAtiGVF5fWs2jNKZ5k9o4RAUfi/a1iAJo+OhtKSfC25ciSaSzadgXvtFyKy+toH+z/koKKODdOrFjA1ku+lBk1XUVfewO5kR5cXr+IdUecCMnKJNL9NtevWnPX2Z/sTxED0NGc+YobG+ez6rAr6WPE4pHWi21BQBD40ggIMeBLqxFRHkFAEBAE/kgCkh69ppE3Docwnb+Z6y/SaFZr0en0SIPBtrSlBFhtZ/6sbdx5lWP06aAkSUj6XnK9LrFl7mK27r+Aa1A06aV1NDS10tmrHjP14INm6tuIdznGSpOVnPdIZOjBl7GTPlEM0LVmE+TxmKeB6dT36UDf8BFigIRer6E53pXTKxey0eIpyU1qtFod+iFQOipCbNk/fxZbrr4gq3MQ4IhCy5wkPX35flhvW8CStbu54BxAZHIJdfVNtHb0ojIitoxIYWBToj3ZnfOrZ2F6zJXY2g+d9NvFAEmvR9uShMe5tSxefYbHcQ2otVp0emlowKOrisD50CJmr7XAM7Vj6PuRZe5vH30UBdqwb+F81m4/i8urUJKKa2loaqGjRzX0VHfkee/bljrS8Lq4nrmLzHGKqPmkc9+X5p/7vdx/tLSlv+D6poWsNHckul7V3770A+1IV0u820lMZ5hyyjVx7BQV2YCB9qUojeD+oaXMX7qZs05+BMUVUtvQREtbN8oPNZUxECS6sv25tWU2C3fdIqTyAyd/qhig76I0/hVPXDyJrehBi562XxUD+jm1Z7/izrZFLNtjQ3jtO5zQ05DshYXpDJYfdCR2zLQWAyhDP9QrK4l7fIKVsxay8YQTfq9iKKhtoKm5jS7FB2x9h5PUnU+o7U7mz9rKdf9yo4LY4Cm/WQyQ61bbQV7IPfYuXMCOG0HUqLRodfI1aCB1XSMZfpasm7aQvTZRGDddZqhHWZ3E8zOmzJm7mpMOPvhH5RvaSHNrJ59gutzo6CmKwmXPXGats+BliTGVatB68SkICAJfGgEhBnxpNSLKIwgIAoLAH01AmYu3xUaWrDqDR3ztQET8EYVQZ/Ps/Dqmm+zGIaTwA27mEl0pjzmzegZztl7nZUbTJ95UjsiTHtLcT7N28jwOO0dQPnR/qac90w+745tYsXgJywzvhcye+AM//jCeGbMWD3y3hGVLt3H5cTTFw+uMgbaZjFdPeOwRRFaDon8Q+VFigFw2FQW+19m5yJQTLtFGgn+pyXtxFbNp09lx6xV5H/CUlrozeG6xgdnTN3L5aYohIONI6z9muzfTC8v105i725bg4uFHgPqOHIIcTmO2dJDPEpaYTOLn739gwuRZLB3itozt5x4Qkd/1wexURQHY7FqK6UF7wivGzhvX5L/i9jYTZmy+hk/2h/yeZc8JH6y2zMJkzTmeJNYhazG/6dX7Fr/rW5hpsp2b/oVj2+xvSvTPPklNWbgTh5cvZo91AGXvotaUEOawj3mT13DhSTofJN2bT5DNLuZPXsHx+zHU9A51oE82UpEfzL2dMzHZcBHv3MF2JtFdHIPHxW0j+uEi5k4Zz0/ffc9kk0Uj+uFmTt7xJWvU9AQdHYUx+D1ywSu+iv7ifYwYIBdfTWXMY06aLmCb5UuK3+WEhvLoRxyd+wurjrqS0vsBk6U+iiOcOTh3EssO2BNVKYsSv+GlKCbKeT9zJ6/mzJPs4TgWUg/lic+5un3ZMI+FczCZMI5x305g9sLhPrpiwxFuynFIBgf2xoqhqSbJ8wxr567H4nnB2PgE2ioSPE+waMISDjkm0GMsjYHvJEUZsQ/NWThhAXvvhFHR/duDUarK4nA3n8OUZYd5lDGmQj5QCvGTICAI/NkEhBjwZ9eAyF8QEAQEgT+ZgK46BJvdizHd70h48aCr7YhCaQbEgBl7cAgu/KDLvrY2HLt9i1m64y6BeR92lx2Rg5FNBRke51g/aRb77UNH3fCrmst5mxBOUEAAgYa3D7bHN7Fp6wGuOPsNfBdAYGAkqQV1dAyOX5DozfPH9uQuzHYf59LV69ywsuLG9fMcWD2bCeOmYrrtKNccvInNrR9rp66OKAdzVi7dzd2AAiNR/DXk+QyIATf9yRt03TViHbp63jgdwXTBVm74ZH/aFIqB9JTZ3lzbOJVZ22/xOn/EDbiqlaqcRCICB/kE8NLuNDs2bmbvBUf8hrgFEpmcT037iHPHlFVHfcwDTq5cwk4rX3KNuIkPiQGbrvIi+8PxHXSN8Tw+tZLFGy7xPGNsTIUx2b/vC2Uu/tZmmEzdwnWfnOHB1/uO/0/4XtdE8tMLbFi0kYuemWPjQGhLCHPcz9xJazjvnv7h6P66ZlI9L7Jx/mpOP0z+hHgMY0GpCkNw3GPCtDXn8Mwcjsmh7qijKCVyRD98yf3LB9i+biMnbX1H9MNwErLKaRk+FUlRQdyTK+xfu5UjF67090Or61w6bMaKST8wcfZaDl22wyMghYp325y+hXSfq2xeuIbTj9KM9B0tFQNiwMqjD0j5oNu6jtZMP4OLu6m5EwnGvHnGIhn7jbqUN64HmTvRlBMP00YIphq6GktIiwoc5uHrwo0j21m/6jC2L4f7aFBYHJklzWMH+CNy07dmE2C9hcXLD/MgyUj/GRQDxi/hkEOCETYjE2snN/A22+YtZp9tDKO0mhGHfcymuiIez6Nz+GXxAe4nf3xUko9JWxwjCAgCvy8BIQb8vnxF6oKAICAIfOEEJDqTXA2DvW1WvmQZCTiFtgi/q1uZM3MHtgF5YwcpQxaqqI1358LaKUxaeZoncbVGnrJJaFVKlCo1H1yG3uD6f5o1UxZwxCWKihGP6yS9Do1ahUqpRCm/FS3EPzjNqfN3eZXd1f+d4TcVGoMb/2ABJXryQ3C1PMi2rdvYuX37wHsTprMm8P2/fmTmwtXsPHkX36TyMYMtqSuNp+fXs1weyKa2GZn2oKU04DZ7Zs/E7PpLskd6JAwWYeBTXZfM80ubMJmwnGPOUUbnf+t1euS38Zfs+u/J5XXTWLD3HqGlowCh06iH+SiVtCa5c/nUGay9M+gc5KZUodL0u/wbz0P2AO4my8uSrUvWcd4tiRYjxdGWhWK/bz6z1l/mebqRAcpQ4moa0n2w2mrC5CUHsQ+tGPtEX6/pL/fAMo1Dp76zIfVk8/LaZmbN2sGd10VG2tk7J/wH7MqxF17f2oGpHHshpmns1AddFTGux1g6bRVnHn14gK9pyibg1k7mTVzIbqvXlA4JYsMg9Bo1SoVyYOnQ4e9Hb0n05QViu2MmszZZ4pM/nJDcD7WaEf1Q2U6Wnw0Wx07xMKFjVD9Ua0a4ssvNqq+CZJ87HN28hR1D/XA7ZqsWYvLDv/h+wmxWmR3FyjWU/Hf6kdQrxzjZi+nS/dhFDgcNHC63jtqEp5xdOo0V5s7Ef2iUq2khP/QeB+b+woLNl3hZNGzfYHqSToNa7jMaI41/4CDZ9T/CaS9zp6zl/NOcEe1anl6kRaNSDfPoyCHE6SJHzR2Iax+4himVqFRqNFq90Wk2/dlI9JVE4XpwOUt23CSs3ohbja6eNJ+LrJq8iP12Hx7ga9uKiHY6yKKJc9hw5jmF79MEDeWXr9nvs19CURaD28E5TF1xDPessQwHWYpPQUAQ+PIICDHgy6sTUSJBQBAQBP5AAmoKfSzZtmQt55/EY3SVOn07CfePsmrueq56pxkdEMrzRhVVSYS+9sX+7B7WLN3CtRfptGrVAze5EpKyjvRQX7y9fPD1fIJXUCKFLWrjN796OY6BOSvmbuK6T6bx+dGDlD4hZoBe2U1bYw1VlZVUDr7L0/G+vpNFc9Zy7n4IBTVNdPSqxgz2NSUB3N61gjXHHvDG6ALhejqS3Ti3ej5rz3mQ1GT85llS1pAeEYif3QUOr1vOpvMeJDdpUatVqNUaNN0N5IQ95JbNQwJT68YOCg1262mOf8hp03lsuOBJ6gcDfv32mAFoygi13c9qU3OcwitHDHIG4YO+M51nFzew0PQkD2ONDc7kYyVUdZm8CfLh3sWjbF2+ljMP42jUalCr1ciDxe6KFF45nOfAlg1s3XuZx5FFdBgZ78ip6Q1xDNawYOUp3BJax9TVcOn+c7Y0ldG4Hl3Nil23CBwzR0BG2MVb3xtsn78E83uR1L2HjaRqIDcuBB9bS85vN2XVQXui67VoZIFIpUHS91CTEYiLxQG2y94i550JzRteEWM0MT1tGT5c3ziPFYcciWk23qYN53xKzAC9mr7OZmoH+6Dhs4K3QS6cXTmbJTut8M2ppqGla0yMA21NPB6n17J061X8xs4RMLS1nrwg7HYuZNGu24TWvA+UmuaCJMK8bLG6uIfVS3djE1qLViMP/lWo9X00FiXg52TFxWPmHLNwJCS/y+j1Smp/S+CtrSxYuAfbiMYPtsffHDMALXXJXlxav4BNF7woGuFpMVRnUi/FEU4cWDCHbdcCqH6v6S2UpIbjfdeK6/tXsdTsOkHVWrRaNUrlyGuylo6SGPwfOuCZZEwAlXOW6MwPw37HXBaYWRPa8IE2MlRQsSEICAJfCgEhBnwpNSHKIQgIAoLAn0FAV0+U7X5WrpSj9pcYcX2XC6WlIcqewyvWcMQpnFLl8KRWSdFKfU0d9eVZxAQHEZFUREmEE8dXLWPnjacERieSnl1Ck1JLQ7w79+xc8E2spKk0DNfb93APfIvRcawqj5eWZqzZdJHnKc0ffvL7CWKAUcT6RmKcj2C6eBs3fd++x7VWR2PMfY6vWsl+m0AK3zMFQNsYh+vxNazbd5egwoEo8HKmkpL2xlrqast5GxdGcEg8BcXRPDqzHtNNl3HzjyElLZOiBiUaVR+VYQ5YnLvCg4iK94gBKope3WTP6vWcfZxA4wjHgLE2/nYxQN+UgNvpdazacQP/vPdMAdA1k+R2lo2rdnHTL29UG5KUHTTV1lJblkNSZDAhMbkUx3pwedMyNp1zwT82lfS0POraq8lPi+fNm0zyM0J4eOkA5mediHjPuuXq4mDu7VvDuqP3iTG2asNYCF/4N3paUr2w3LSCrRbPyO4d7mPDBdfRmiavVrGKbRbPeTvSfV5S0dVSR211GfmpbwgJiORtUSIvb+zAdOVR7H3iyUxL4W1NL8q6IrLiIolKzycr3J0b5gc4eSeQYmODS9RURD3g+DpTDtqGU/uhh76fIgYMGzViqz8eiPWmhaw6fJ9Eo0/09bRl+fH/s3fX31Vl6cLv/5b70x33vud9b5/TfVpOdXVXd7lCFW6FS2GFuwcnCU4CRZAEixICIQkQd3d33/Fsyfa9vnfsuJNQEJLw9Birt6295jM/c+0U61lTnFbPYel+D9JGGAJgbcnkmdNaFi7Zz/20/sOVFEyaZuqrKynNTyc2+CmvUgtIfnaFzbNms+28L/EZKSSkVaJuU1FVkElGYQXFsb64njrKqXupw/aMMlfG8nD/z8xd60Rw5WhI8MbJAFsbOc8vsmHWfHbfShp2Ilew0pYbzNX181i84zYp/c8jxYS2tZ7qilIKs+J54R9MakEqL65vYe6M9Th6JZCdHEtqeY+Xgrm1gpRHJ9i9Yx+XXjWMkOQwU5Pky7GlM1l16ikVo1e/X3vLUxEQgckgIMmAydAKEoMIiMCkF7CajXRoNWg0fZvOYMbyrm6CGFupyool2M8X/8Aosipbhp1szdrRQnV+MhGBPtz3eUV6adOwd29HAla0qdw/sJQlGy/xPEc9wj/2wNKczL2Da/n1qAcxVT39SW00Jz7k7Mb5zFmxn+vP0qjRmDDVR3Jj+zy++GQWWy/4k1KpxmypI+r6URwc7xNTZUYxFvHU+TAnLz0mfZi+5+bS53AInK4AACAASURBVFzctp4dTk/Ibhn1SrezK3uy+2EOOrgQnD/sFc1I1e96fyzJAEVHptcJ1ixai6NfxsjL4FlaSH90nM0bDuAWVt47lt3WksZjpy0s+nEZ+y4/6TQxmRqIu72fJZ/+k582OOKbUE67yb6Kg42m2LucOnYat1cjJAPMFYS67GHT5tP4pDWPniyxz/ad5snpQ0e46J/Tbzzz6Cz2O34dOY9xXLeYNcc9SRtxSToLrZm+nNu8kX1XQui7frfRlhHAlW0LmP3zTi76JlDRbsTUmMjDI0v58uMf2HDak7iSNozaVpobGmnSWjrvXGf5OXP8mCN+WcMlIMxUhbtxaMMGHO4n0/QuLz7MGlSFyYQ+8eNxQCgpRfVohjkdbQY1qpJ0YkIe88j7OTHZtb1t/zrlzs8VPQVBV9i6+GcO3k0YsSeMpT2P5xd3sGmrI8/6Xb0r7bm8vLGHJTMWs/3sI2JL2zCYmkn3O8Oaz//OdysduBdRRKvRir69hUZVAxqzgq0jjyCXkxw5dp+0/heOPUFbaol/cJwta/ZxO65h9L8tE5EMUAyUhN5k1+KF7LoRM3wi0R67VU3RC1f2/bKRU775fee8oqYw/A4HF89g8aZT3I8sptVgpjU7kIvrvuTjL5Zx5FYoBc1GrPZeK/bu+1Z7N/g4nt5xwS20apjfmgVVsv38X8lOlyhUrzkf3zQZoBjKiL67h2VzNnMtsmmEJCFYNaVE3DrAxrWH8crtG7+vaEqIu3+YpT/MZ5PDHcILWzCY28gPucqmL//Gl4v3c/NFLk2Grv+oKaZmKnLj8He9xOWTB0dOBlgbyHx2nm1LN3MlvG58533PeSaPIiAC701AkgHvjV4KFgERmDoCCrq6fGIfnmHnyrnM+GY2a3Y74xNXRnP/u3NvqULGumT8nLay+NtP+eR//oe//88/+WbOJpx9k6nSDOz3adGoKI66w7E1s5j1y3mepTeO+I/E4cKzlDzFcf1KtjgHkNU68NgD9rfpqY68icNBR+6FlfbeHTO3lpMZHUpYbDZVrXos9huaVi21uQlEhseSWd5Mh1kBUw6+J/Zw1MmPNHtXAJuKCJe9HDh+i7DBi2ErWvIeO3P0sCOeCTXoX5twsaJrrqWmtoH2fr0WBsQ/6gszmsYqSorLUbXph0+IWMp4cWkra9efwie1eRRjG/qaWDxOHubszRCKeu5cmtupyo4l7FUMWeXN6Lug0NUVkBwZTkx6KY06c3cXZBvNce6jJAMUdAWBuDgc4ox7DNUdrwXCnjSqq6kduX7D+liofOXKnjXrcHiQRNNop4ehlsT75zh2woXAfG1vV2qLupq8+DBCo9Ipa+roPj86UBWlEB0WRVpxA1qTAjb7/Ajd48ptTST5uHH9ujdpLUMLVXTFvLhxksMOt4is6Bi+vYatz/jeNDfl8ermAVb+8Bn/+ugj/v63j/ly5iqO3gqlsHVgRsCqa6EyyYcLOxYxa+kB7kTWjHKODBOHtZoY90NsWLmX2zHDzBfQ8xWbkYZUXy4fO8ZF3+y+iQEtGuoKkoh4GUFqcQM6+28OK/rG0s5eAJFJhdRrupb3tNlsWK3WzjaytaQT6O7CVfeEYdpXQV8Wyb2zBzh87SVlutedZzYM7Q3UVdfQ/Np9eyo08NHS0UZ9WTGlVU1ohzY99ok3Ez2Ps3HpNlwj6kcxVjA2ZvLs2nGOnnnUb5lPC1pVMakRLwhPKkSl7frN2fTNVGTGEhaWSH6dmt7h8VY9zSWxeDruZ+e+8zzJHNpVXtFXEf/oPAd2XyCoWDtKTN11tRnQNNVRXd3EeJisqlT8T2/g5w0XCa0beP4NUFRMNOcG43biACfcU+mdE9Gqo7E0jciQUBLz6zuTQWDD0FpFTlwoYfG51LZ3D49SjDSVZRMXFkl8ZADuTodHSAYoGGqS8b+4j13nAiga9N+nAXHJCxEQgUkpIMmASdksEpQIiMBkE7CaOmhJusP++f/gf/95AQ4eEZS1GxhlTqk3qoLSUcKrOxdxcnYjICKB5JjnuDusYebf/8TfZuzmbmTJgG7s9smttFmeOCz7gQV73Ymu6LlrP5birdSHXWX/jkPcCClA85p/61t1tST5uOJy9xkp1d13bBUrlu4x371rXdvHdNvvqpnNnevRd0ZiyMDz2G6OOHUvMWZrJPr6HvYdvcHLAeN+bbTlPsft/AVuPct4oyX3xlLz8e5jVUVz6/AuDlx4Sk77cFcp/Y5o7aA+1R+3a7fxj6vsnohQwWYxdc4JYBkAZZ8McfDEYaMnA2ztBby6c4kLrv6k1na8/uKjX2jjemprJN79BPt2O/I4s/015VjRqzJ4ftuVW17RlPfcZVZsXeeHydK3Frr9/LCaMZvsPWsGd4dX6ChPJOSxL0HJNQzJc9g0FIff55rTNXwTa7qXpBtXrca0s31ehyT/33A+dRmvl3Ekx7/Cy3k7iz79b/78xTqcHmfSv6OEYrOgLw7hun1yt7XnCCjouyM7lgJtzSn4Ou5j57EHpI6WlLPbGRrJeXGPG9fvE1bck3ixn19d8y8MMLX/Ps0mTJaui/8BsSh6qtNCCfB6QkKlbkhSRdGWE+/tivOFB8RVvuGSewMK/P0vbC2ZPL18gG0H7pA4TKJoQAk2A835YXjecOFucEFvt/qeCUjt81T0nX1KV08Ak3ngxKaKGX1rFRlBbjhs2she5wAK+ieAFR1VSU+4edYRj8hyNJ1JvgFRvKUXNtpygrl+cBN7bsTSPEouwF6gzdhCcZQ3N6/8RmDvPAdK5+/OZDRi7j97q/03aj9Hej0UDA2lnUne2MIW6rNCuDdCMkDpqCEj0A3H026El6rpzEG9pRrLYURABCZGQJIBE+MspYiACEx5AYXmaFe2zfwLf1twEr+UhmG6i/7eSiq0pgTg6R1AeF4jHWb73VIj2rpE3Pct4tP//Iptv4VR3P8fo1ipC73C1jnz2flbKMUjjGXvjczWQnbgbW7ceUpyUR4vXY5z6pIXiTX9xrf37jz4iQ1jWwWpL/zwex5PYcMwyxAO/krPa3MJz5z2c9TRiyT7xHrWal5e3s/h0/eIqe75l60VdVk8Tx944PMqg2r10En8eg737h/tSYkX3L9hv6AvID/8FudOOHMvqmrAmPiR4rAZ26lKC+WJbyCxefVDL2pH+mLn+yMnA2yaCpKfP+LeoxDSKtr77mCOeryxf2hrKyD84U1ueUWRlx/NfaeTON4Kp2LIVfkwx7QZUVdnEhHgx7OIbGrHc9uz+3AWe0+TuEii08poNQxKutgnvksNwdv9AUHJ5bQN/nyYkAa8ZaojN+oFL8IzqR+1K7eCJj+KQO+HPE2tQ2uyYrOa6GjM4onzBr774yescPAatFqEfcy/JyeWz2W1gyeZPT1CBgTQ/4WCujgGP7cbPHiVTWGCN1dPnsA1uGQMd4ttmDS15EY9w88/lIwq7ZAL+f4lDf/cQntVDomR4SQXN6MfTK2rJTvMFw+PABJKWoZM4jf8Md/FuwqaskSe3nbFPSiDotQAbpw8yqWnRWhfk7y0R2Mza1HlxxHk+5iXyRVj+s7AWijYEz1GbTVxntc4d+IaEarugu0TDOZF4X/3Lv4xRTQPRhx4oPG/UrRUpQbifu0mgWlFZAa5cebgOfwLxtLeCmZdA4UJwTz2CSKpTDP2c8SmpjjmIafW/swvm7azZeUi5nz9OTM3OOEZVd773z2bvoHCWH88bvsQXdA05Bwaf4XlGyIgAu9DQJIB70NdyhQBEZh6AoqG1Lt7WPjRn5hz8BEJNSPfgbfqVBQlh/L8SQAh4UkU1LTS0e+OkamxgtKKWpp0g/4FjhVVbjrZhWU027tO9/xP0ZHqvo9FH/+LdRefk6Pu/1kL8Td3sWjuRq4+z33t3X1sLaTeP8b2zTs57ODIJddHhGbWoesXX0+xwz9aMWqaqMjOoaiq4fXl9RxE0ZHr68jZ8+68KtRj06Xj6XiGy3fDKO/p2m9tobIwl9zcSlp0XV2ae74+8Y/28e7enN2xme37HHC+5MqDoDRq7GPaxxiM1ailpSKXvKIK6tVjuHLpPe5IyQArbdXF5GXnUNGkxTj49On9/ps/sbVlE+C8hy2bdnPc+QrX3QNJqdR0de8fy2FtJnQtVeTnFVJZ2z6Wb/TuY1XXkJ+aSGJWJW16E7oGFXX1Ktq76axttZTlZZNd1ojmDSqv6HIIvHqa0xf8yRn1xr2VlrICcrJyqeseP90ZpH1cf+AlNn7+MQv23SW+/8yXippsf0d+mfszh+4mjjjmv7eyKKjzg3E5sIkN245y8YoLdx7HU67uGSrSt+fwz2yYOlqpLsinoKSa13QmGHQIG9q6IjIT4sgoa0Fv6qC5sZ7autbuC0Yb6voKCjIzKG0YOpv/oIO945cKmqJQbh3eyNrNR7h07Rpu3tGUto/VyZ4Q0NNeW0RBQSGVr+tNMFJtbG1kh/pxx9WL7O5eLzZNA9X2CQaLVbTrx/53YaQihryvaCmLcufEhhVsPnSJ61dv4BVRTNuYb7/bMOvbqSsuID+/nOYx/72woFc3UlVcSHFxEWlB7lw8uJWjDzKpb+tJGtvQNdVQmJFGUV1b97CnITWQN0RABKaAgCQDpkAjSYgiIAKTQECfjdehxXz6n9+x1z2KCuPwl4TG2iQ8z+5i04ZtHDx6nOP7t7Nh1Ro27TzKhet3uXfrMg5bN3PI5SnpdcZBF5YKZvuEVSbzoLs4BtLv7WfJ13M4dD+W6n53NRVtOg8OLGH+6nM8SW+ivSaT0AeXOH3ShcdxJbT227dL0UpHSw3Fubnk5ZdQpWpDZxrPhar9KAoWo7Ezzv69TUdvJQVDbSL+t3/jrlcwrx7f5vpNL8Jym+jNeyj2Nebty+v17747+lHf5adWfSu1JXnk5uZTUqmiVWt8TVf5odEo1q6lFU1jTrYomDV1JN8/yrrl6zhyJ5oataH7fFCwmIydSzWO3X1oTKO+Y9XTVldKfk4O+cWV1Ldoxp90UKzYuyKbTD09PkYtsfNDRVdJ3INTbJw7g59mL+DnxYtYse0cj6LLeyd/UzqXqTRiesPKK7pM/J2OcPi0N5mjJgPszvZ2G9zeJgqfX+HXGd+xwekZxf3mqrRPChd8eTOLFu7iZmQdmsYi4h9fx+nkRe6FZNEwTO7QamhHVVZAbnYuxRV1NKsN4zy/FKydywUax56ssU8KWZ2Mn+MmFv4wg7nzFvPzwmVsO3mHsBJ9d0N1H9cwnuO+vo3fdA+bUUNjRZdTkT2J2q4fpxPYh1OZjAaMY76Qti+FmUGox3lOX77H05BggoPDiM2q7+0Z1HNMw5h/2+MVsGHUNlFVmEN2ThEVtY20jTvpoGAz2/+uGt6wC79CW85LHp4/yuUBqwl0nyN6wzjOvfHWX/YXARGYCAFJBkyEspQhAiIw5QVMRY85tfwz/vT5r7iFFXePAx9cLYWOygSCAwIJi8+j0r6kWmUZRdnJRAZ6437dhRs3bvPwSRjpJQ1jvwg3lfLszC8sX3sK3xRV38UzYMz34+QK+3JkDwgJD+Ppg7u4nd3Jqh9/YO0Zf9JHWOvePlmb1WqfuX5wHd7ha6uhc/mziuIiiotKqKxvHbvBOwxr1EN3T7Zmm0AoewJB21RNWWkZ1Y1qjO9syYqRam4fW2zFarMNSlaNtP9beN9qoL2hiuK8HHJzc8m1JyNKqmhUD06YvXlZY08GjFCGpYboOwdYu3Q7rq8q6enQYt/bXBGO287FLPr1Ir4RCbzyvsNvTofZumgGi3f/RuSIa913TeZnHTJ3wggxvIW3rUYNTdXFnQmfLus8iitVtI+Q4HwLRf7+QyhdE0xOpJN9qE99SRYpyVkUV6loadfQMe7E6duoetc5MoGnyICgrXo1LapaGtRjT+4NOIC8EAERmNQCkgyY1M0jwYmACEwOASuVzx355av/4tO1VwjJHXkJPptJh0bbgaH/PxoVC8YONa1NjTQ2tdCuMwycwGnUSiqos7w4vWMXpz1iqBgwxbaViufObJyzhJ0nLuAR+IqYjFIKAi/y64/fs+rsEzL6d2UetZyJ+rBroi6zuXvm+IkqVsr54AV+XzJAQVf8kptHd7LvwjPy2/pfGFmpj73H4Z/nsXrHGe4HBfMqsYjCcA+O//w983beJKpnnPkH3wpTCcCeFLN0TvBpecPeKFOpthKrCIjAhykgyYAPs92l1iIgAuMRsNYRdmkjM//4ESsdn5L5puNOx1Nm976KpoAgl7M4uTwmpUY7sEumtZ6Iq1tYMHMuG497EJlZhdqgIfPhYZZ9v4gj9+Op7TdMIDYmhqOHDrNpwwbZxGBanQM7tm3j5YsXfb8wm5q8l7c5tnoOc2fN7tp+/JrP/vYX/vaXT/jux+73Zs1m/qpD/PY8u28Jtr6j9D5TOiqIfXQFx3N3iChpH9jl2tZMqtdJVs/8jmX7bhCcWEJLh5aikGtsnfUTm84/p7TfkILioiIunj8/rfzlb4r8Te1/Dlxwdqa8rLz39yNPREAEJq+AJAMmb9tIZCIgApNEwNYcw42tP/HRnxdyyjcFVf+bgv1jVFpJ9XZi99JZzP7xx1G3uesdeZxYMaCrcf9DdT43N5IecAe3O4+JL2nFOOjulNKawJ1d8/j00xWc8U6mvsOCYizE/+Qqfpyzm7sR5fTv+VtTU0NMdHTnRZP9wkk2MZgu50B4WBgV5f0vPoy0VOWS8CqQwGfPOrdnvi4c/WUFK1YfxNWv6z37Z89fxpNd3swww/q7fpKWVgojfLhz4x6vchoGTAZq30HR5PDUcQ1f/2M2u66HUaExo5griXTbzYIZazjnl9c7zty+f2tLCynJyfL7k79B0/YcSE5Kor19fBOIdv3Y5P9FQAQmWkCSARMtLuWJgAhMMQEFdfId9s79iD//eIhH8TUjXzRgoqkkg7hXQQQ9fz7qFhyZTlmDZuSJsKxtFIb68PDRMxLsS38NmaRKoSPzIUcXf8WMjVcJyVd39hqwVgRzcf1PzNtyg9CijgFjvq1Wa+ekaAaDAdnEYDqdA/bJ/iyW/lm6ri7e9knjeuqpb07G+8xBDhx/QHJb3/sGowmzff6M4f4y2TRUJgXh4+FFWE49uiET0CkYikJw2TyDr5cc4WFKS2evAWt9PA8OLWHWcge8MrQDjm2z2TCZTL1x9cQnj/3aRP5GTenzw35+289z+Z8IiMDkF5BkwORvI4lQBETgfQooHWQ/OMjP//hPfthxl+iynqWVhguqe4ypfab9120mMyNOhqVoKI16jKfXU+KKmujovQBR0JTmU1hRQ5PZRJH/KdbMmMMut3DK9PZLGRsNka7smDObzZefk16rRtehHzi0YLiw5T0R+AAExj1ngNJBTdpLHj/w4lV2HZreeUAUOmrKKSkqodZopjLiFnvnzWDNKV9ytPYLIIWWNF/OrJjFsoPuxNZo0Wp1Ayb+/AC4pYoiIAIiIAJTQECSAVOgkSREERCB9yhgyMP36FI++1//4JfLweSph71/2B2gDZNeQ1tzE42NjaNuTa0a9MMtoad0UBl1hzO7t7Dz4DmuXXfjzq3bndstF0eOnrxBQFwpWnMVIec3MGfuDtzCSrq6IStqku/uY/GsVZy++ZCgqCRyylvpf7/0PUpK0SLwXgXGlQxQDNSnPcHl0K9s2X2Syy59v8Pbrhc4dfoK94KyaTerSLh/hGU/reKUdxZa+58HRUfes4tsnDOfHefu8io+nqTcBvpN3/FeHaRwERABERABEegRkGRAj4Q8ioAIiMAAAQVDfR6Rdw6y7Is/8f/+X3/gu9XH8IgqobHz7t+AnbteKC0kPzzN1vnf8t3X34y6zVh9Cp/48gFjicGMKukhJ1d8wd/+z//hv/7wJ/76p//u2/74EfP33SGyRIelIZLrW+cyf9NVXuR3d0O21RJ6eRMz//4J87c4459YQvO416Uepl7ylghMA4GxJwMstOQG47J1Bh//4X/zh/9v0O/wT//Djxsc8c9SY2lJw/fUKub9fJiHyW1dwwFsTSQ/OsaSf/wP3688xoOIHFQ684ChAtOAU6ogAiIgAiIwDQQkGTANGlGqIAIi8G4E7MsEttaVUZiTSUZ6FnnFlaja9JgGTeTXV7oZtaqCwux0MtJfs+WV09BuYOCoSgWTWkVFYTaZw34/i6LqZrQmBcXUjqq8iOLKRtSG7qMoJlrKMkmIjiMtv5pWnWnQ8fsilWci8MEJ2DporauhpraFjoE/vEEUCmZdMzUluSP8DjMpKK+nzaCAWUtzTQnFpbW09B7UjLqmgNSYaBKzKmhSG0aeG2RQyfJSBERABERABCZSQJIBE6ktZYmACIjAOxZQ7Otimy0jz0fwjsuXw/cTsBlpr8kjPiQA/yfhpJc20jHaKJN+X5WnU1tAsVmxmC1YbNLgk6ElFbOWxpJUIp754f8inrwajfTUmAwNIzGIgAi8dwFJBrz3JpAAREAEREAEppWAYqS5KAav80c4eNiRu88Tya9sQqM3yR3iadXQUpnJLqCY26lKe8ZvJ/dz4LgLj6OyqGhoR2e0TvbQJT4REAERmBABSQZMCLMUIgIiIAIi8EEIWNooDr+Dwy8r2XjQhYDEEhra9ZjlDvEH0fxSyckjYNVUk+rnzI6VK9l+5iHR+bWdQ6dGHOU1eUKXSERABERgwgQkGTBh1FKQCIiACIjAtBboXAniLkd+/oF5G87gnVyDtnc5umldc6mcCEwqAcVQT2bAeTb8OJPlu28QWdaGcdR5IiZV+BKMCIiACEyYgCQDJoxaChIBERABEZi+AgqG8nDcds3h31+u4NSjZBp6JnacvpWWmonA5BNQTNSn+nF22ad8PnsrruE16GVUwORrJ4lIBERgUghIMmBSNIMEIQIiIAIiMKUFbG1keJ9g5Sd/5JvVx7npG8Izz3vc93xCeFopjVrLlK6eBC8CU0VA0RQRdn0L3/7XP1mw1RmfsGD87nvw0DeExLwa1OapUhOJUwREQATevYAkA969sZQgAiIgAiIwzQWU9nS8ji3lk//7P/jXrHUccnblxqUz7P9lMbN/WMD2C09IqzXIDObT/DyQ6r1vAQVd0StubP6c//h//sK3i3fg5HIdF6cjbF8xhx9nreX4nUjKZVmP991QUr4IiMAkEZBkwCRpCAlDBERABERg6gpYy19wddO3/Ndf57H/RjD5NfWo6mspS/Lm3C8z+OSfSznpmUy9dBCYuo0skU8BASv1id44zP1P/vzNGs76ZlFdp0JVV0lW0A0Ozv+Ef/y4HdfQSkxToDYSogiIgAi8awFJBrxrYTm+CIiACIjAtBcw5z/BafXn/Onbbfz2qoyensiKuYmEu/tZ9Pe/suDQfRLqZfDytD8ZpILvUcBCZfQ99n7/Bz5b6oBvrrE7FgVLWw7PL67liz99y8bzIVTJT/E9tpMULQIiMFkEJBkwWVpC4hABERABEZiyApaS51xc/zX/M3M3dyKq6bvOMFPx4gpbv/0j32y8SkhRT5pgylZVAheBSSxgpSbuIYd/+gtfrTxNQHG/rjiWepK9HVj43/9kyYEHZErXgEncjhKaCIjARAlIMmCipKUcERABERCBaStga03AY/98/vX1JlxCivp1QbbREOXG3lkf8cOvrrwq6XdxMm01pGIi8L4EFDQ5z7my5lM+W3QIz8yengGArZXMACdW/uPfLDvsSY7k5d5XI0m5IiACk0hAkgGTqDEkFBEQAREQgSkqYFERfXM387+YzyH3WOp6uwZYqXp5le0/fcvqM35ktitTtIIStghMDQFrSwYBjiv49rt1OD+voDf9ZlWR4nOCpV/OYef1WJptU6M+EqUIiIAIvEsBSQa8S105tgiIgAiIwAciYKU1+zFn185m4darvChSdw0VsNQT47aPNUu3cS2kCI1cgHwg54NU870JWDWUhd9k74IfWXnUk8zWrsyctSWTZ86b+Hn5Ubwy2voN5XlvkUrBIiACIvDeBSQZ8N6bQAIQAREQARGYDgI2YytF4Xc5uWk1G3ef5bZPAH63nDiy9zAXPWMpbTPJ0oLToaGlDpNcQMGsriT58QX2rFnFliPX8Hriz8NrJzm49wRuwbk0G6SHziRvRAlPBERgggQkGTBB0FKMCIiACIjAdBdQsOjbqC/LIyMxhuiICKLjkskqqKSh3YBFrj+m+wkg9ZssAooVo6aJqsJMUuKiiQqPJDYxnbzSOlp0ZknKTZZ2kjhEQATeu4AkA957E0gAIiACIiAC00tAwWo2oNdp6dCbsNgkCzC92ldqM2UEFBsWk54OrQ690YxVfopTpukkUBEQgYkRkGTAxDhLKSIgAiIgAiIgAiIgAiIgAiIgAiIwaQQkGTBpmkICEQEREAEREAEREAEREAEREAEREIGJEZBkwMQ4SykiIAIiIAIiIAIiIAIiIAIiIAIiMGkEJBkwaZpCAhEBERABERABERABERABERABERCBiRGQZMDEOEspIiACIiACIiACIiACIiACIiACIjBpBCQZMGmaQgIRAREQAREQAREQAREQAREQAREQgYkRkGTAxDhLKSIgAiIgAiIgAiIgAiIgAiIgAiIwaQQkGTBpmkICEQEREAEREAEREAEREAEREAEREIGJEZBkwMQ4SykiIAIiIAIiIAIiIAIiIAIiIAIiMGkEJBkwaZpCAhEBERABERABERABERABERABERCBiRGQZMDEOEspIiACIiACEy5gRVdfQHLoE3yfhJJa3ECHbXAQVnSqWuoamtAM+Wzwvu/+ta29AVV9A80dyrsvbEAJFlrL0klOSqe0yTzgk5FfWOloKCYtPABf/5ck5dejsw7e20ZHYz319Q2o36OvTd1EY309jUNPgMEBv6PXNjqaSsmIeIqf/wvic2rRDmOlb2qgvrae9iGf/Y6wbFpaGuupU+l4j03wOyogXxUBERABEXhXApIMeFey6/akQgAAIABJREFUclwREAEREIH3KGChOScID9ffeOj/iN9ObGPHEXeiSjvovcxWOqhJD+WpfwjJpS2Yej94f2ErxmZKk8IICU2iqNHQF+s7DsmsSsX37EZ+2XWZwGztGMq10lrwikc3ruPh+4jbjrvYvv8GLwt1fd9V9NRnRxL4+DnxRU0Y36OvYmqlIi2CkJfx5KsmzrWr2ay0FUfic9OF256e3Lu0l227LvM8r5+zYqAhL5agx0+Jzm98u1aKibbKTKJDXhCbXYf+PbbDOz6N5fAiIAIiIALjFJBkwDjBZHcREAEREIHJL2BtTsPX6TDHLviQVFlC0lMP7nmFk9fYfddb0VMd78vtG3d4El9Om8HSdxH7PqunWDC0V5L45D4PfUPJaTS987gUYx3JvqdZ8/l/8+XSE/ikaV5bpq0li2eXj3Hk7H3iystIC3nIvQchZNabuvQUA7UpT/G4cROf6FJa9e/ZV7FiVFeT+tyTh57BZKjevWvPaWRryyPk+gkOnbhDZGk5mWHe3PN4Rlpdn1V9RhAPblznYXgRLR1v20rBatRQm/YCn/sPCUypnxSJrx4feRQBERABEXh/ApIMeH/2UrIIiIAIiMA7ETBS/vIauzfu5JJ/Ju0WM3p1K23tOoxW+21RBU1hCDfPnuaqVzzVurd98fV7K2VBVx7B/QvOuHolUD2WW7mmWrLDXxIWm0eDZRzlKwaqE57x6PIu1iz4lhnLxpIMMFEZeYtDm7dy1jOFVosFg6aNtjYtBkuXr7YkHHfnU1y4F0WlZrL4WumoisX7ijOX70VTMSZXFfkxobyKyKJ+PK69TWCiJu4eDls2c9wjgRa7lbadtlYN+m4rXXkMDy+dwvF2GGVq82sTMb2HHucTa0cNSX6unHe+TURZvx4y4zyO7C4CIiACIjB9BCQZMH3aUmoiAiIgAiJgv9Q3FBHotIGV60/jm9oyZJy0Yigj1HU/O/dfJTC7nbc5PPutNYClmeSHZ9m75zReSXW8bhS/osvE//wpzrkGUWAcaxQKHeWxBDy8j4/XXS7umMtPY0gGKMZSXl7bxuo1R7gf3zTETzFUEn37KLt2O+OX3jbk87FG9072s7SS7neBA7uO4RFbQ/e9+RGLUjpyCXI5y+nz/uQYRtxtxA8UYzmRbntYu2Ivt6IbhlgoxmriH5xkz45TPEpu4Y3yDSOWPvgDC21ZT3E5sIPDN6Oofl3lB39dXouACIiACEw7AUkGTLsmlQqJgAiIwIcpoLSXkfbSh9vOe1g94xM++34Fu0664hOaQUVrz9RpCupMH06tWcYW5ydka4YOoLZ1NFKa+pJHrk6ccbpLcFo1OpuBhtxwvK5dwf1ZChXDfO/tqltpSfDg2JoV7LgaQrFuaJz9y1O0qXiePMixC0/GfNGqaEuI9rvPfd8YSorCubN33qjJAEVdSVa4H3fO72f9rH/z6ddL2H7CFa8XKZS29KRUFDS5T7mwcTkbTnqR1t7j3hOtDX1zOZmhXtx0Po2zWwCJ5VpsxiYKo324ceUWT+JLUY9e3Z6DvcGjldYUL86uX85mp6fka0cvSNFl8cT5KIdPepKhH3txiraG3Ch/7l44yKZ5n/PvL+bzq4MLnkEJFDX1WWkLQ7i2dTlrD3uQ1DbYSsHQUkl2uDe3zp/B6YYfcaUabMZmSuL9uXnlJr7RRbSPXoUBQdvaMnnitJkVa0/zOKffnAUD9pIXIiACIiACH4qAJAM+lJaWeoqACIjAdBcwtqEqz+Wl6z6WfvkVyw9d51lyJoWVjah7Zq9TtGR5n2DVzEUcco+lbrhbsUYd7U3VJHmeZfPseWy/+pToxAieul/k1LGz/OYXR+m7u1rtbSVz+QuubZnDvE1XCCkafdK7cScDbGqKwnx54PGY+HINpvoY7r4mGYBRTWNlPqFuR1n9zZcs3nWZJ0mZFJaraDd0X5EqOvICnNjw03x23winehhfU4ea5uoU/C9uY+GcDTh7R5AS+5z7F09x8qwrXhGF73TlAXNVOLd2zWf22nME5OlH7Zb/pskATBqaqwoIv3uS9d99wfwt5/FLyqSgrI7WnuEJSgeFwVfYMnsOWy+9oHKY7h+mDg3NNakEuuxhyazVnHoQRlpiCA8unebUuas8eJnHkHxL7xk0zBNLDbEeB1kycyUnvXNkMsFhiOQtERABEfiQBCQZ8CG1ttRVBERABKazgGLDZm4g1m0PC7//mRMPE1AZzVisNpSeu6eWckIubmTGlytw9MsY/g60/Tg2G7ocP86u/J5ZS3/luFsgYQkFVFbX0tCiwdBzc/cdetpaErh3YCHfzt7H3ejaIV3M+xc9vmSAlbbcV/h43CMgoQqtBWyqMSQDOn2bSLx/lKU/LOTg7Wjqun1tPb7WKsJv7GD2F0tweJBMW8/7/YJVFAWbTUdB4EU2zfiBJeuOcOtpCHF5FVTXqmhW60eta79DvdFTpS0Vn+NL+XbmNq6HVo9a1hsnAzqtmkn1Ps2qH+ay63o4tZ1WVvqsaohx38e8z+dz4E48vZ1X+tWqy6qDkpfX2f7jd8xfdQi3gCBiciuoqVXR1D5OK6WdTH9HVn/1A5svhFA5Aedxv+rIUxEQAREQgUkmIMmASdYgEo4IiIAIiMDvEDDm8eT0ambN3cOdiMqhY+1N2fieXM4Xn6/j8rNcRuv5rahTeHh4CV9/sZxTnsnUdUzwlZMuHW+HpXz+9SauBRf3jSe3tZMTfBOHdQtZNH9B1zbnO77421/5+98/Z+bc7vfmL2DJ+hN4hBXQf1SDpSmLF4/cuR+QTI2uq05jSgbYm8VYRPDFjcydtQWXF2VDx9yb83l2fh1f/3sFZ30z6RixKRU0Gb6cXfEVXyw6iEdsNbphehGM+PXf84Eum4Bzq/jq8zU4PcnvO0cUNQVhHpxc39/1B778+9/46G+fMqO/67qjuAXlDJ9M6onNVEro9W3Mn7meC89KhrEq4oXLJr77ZDHHHqSNaqXNecaldd/w2dw93IqqRNs5+WBPQeN57CDv+WU2fPEZK4/7kDtMb4TxHE32FQEREAERmNoCkgyY2u0n0YuACIiACPQTsNaE4rp1DnM3XCYob5gZ081dyYDPP7MnA/Lo6d3e7xB9Ty21hLtsY84Pqznvn8Vrhpf3fe9tPTNkdiYuPvt8HZee5fe7mDTSWJZBdPATnvj7d27+jy6yb8XPrFh3iGveXe/ZPwsIjiW7oqXvu0oHRcHXObZxJZv2nsD5/Hkunj/PhRPbWfH9x3z0ySzW7TmHm28kObVDhyZY66O5u3cBc1afxT9LN7SLfXcy4Kt/reCsT+ao3dCt9qEJ++bx49LjeKW+fjnDt8WKIZen59fw5b9XcMYnu88GI82V2cT2d/W8wuF1y1i2Yh9XvPpcnwRFk1HaxGhzNdoaEnh4ZDGzlx3DK32Y8fkWezJgM9/+c0lXMmCYXhQ9dbY2JOJ5dDEzFx7AI1E91L1nx9c+GikIvsamz//F0sMPyRytAq89luwgAiIgAiIw1QUkGTDVW1DiFwEREAER6BZQUKfc48iSWaw84U1q8+AJ2QCLfaWBdXz3xWrOPxn9At9Ul4iXwzK++Ps89t+OHHb8+xvR20wYDQaMZuuoF3WdXf+P/cwX32zGNaSkr2cACjaLGaNBj17ftXU0xXPv2D4On/Mhra3vfb3BhNk+TKInUEVH4St3zm5dyeo161i/rntbPpfvPv4Tf/zzv5m5YC37L3gTWzL4Yt9+N9+HMytn8fNBd+Ibh+kpYSnj5bVfmfHZUk56pg3okdATQtejCVXaYxxXf8m/ftqKy8uKvjv0A3cc/yubFZt9G+GbnSsvnF3JV1+u5XxAwUBXqxlTf9fmZHzOHuTAsXsktQ5ytfRzHVKWgjbnKRfWzWbhrptENwxjZa0g4tYufvp0IYc8kkbpZWCmMSuQS+u/4V8zNnAhsPTNrZQO8gIvsf6Lz1l1wo886RkwpOXkDREQARH4kAQkGfAhtbbUVQREQASmtYCZ4qeObJi9sPPivWq4Cx1bKwl39rHo28U4PEqkcYQrRsVQTWroc/yvHGbLwtmsdPAkucmCyWTCZLb0XVyPy9OGpiyBgOsO7Fi7ivU7nPCKLaN9mOtE+2FtjbGdM/z/sPgYnknNI17c2vcd+5wBVgzqJmrLSygpLu7dihK9cd44k+/m7uRaQCbltU2oh0yMYKbsxVW2zZvL9msvKR9uaTpbG6mPHFj+nT2BEoNq2LopGOsyiQry49rRHayYs5iDd2JpsJi7fXtTF+PSxaKjMT8SzysuuAckUjNs2WBrSuTR0cX8MG8vd2ObRnd9w9UEwExluBt7Fsxm0/lASoezUtRk+p1l9fez2OkaTt2w8SoYVTnEhfhy9fg+1s6dz67rEdRbzJhNRkzmcVrZWkjzPcWKb2ez83okDSOc/+ODl71FQAREQASmqoAkA6Zqy0ncIiACIiACAwWsKqJv7GTxvE1cDSqgY9jrJAv1kb+xe/4itl0Noaj/OAHFQKuqlrq6crJjXxHyIpa8/FBu7VvCvFVnuB8YTUpqJsWqod3nBwYy/CvFUEV2UgyREclkJgTgenAjO094EF0x3JUimAqecmH9Apbtdydu2GUP+soZezKg7zv9n9ka4/DYv4DZy07gkzZCl31bIwkeh1g2dy1O/jkMv9qhhYY4Dw4vWcBGxwDye2bOtycsDO001tVSW55LUngwIZFZFETe48Ty2Sw/7Maz2FTS0/Ko6/ed/jG+9rnFREdlBHeOH+H41SBKR5iDwFTyAtcti1i84zoRNcNljPpKeuMJBG3NpHgeZ9WcFZzwyhhhiImVpmRPTi6fz9oTPmT3O2EVo5rmuhpqyvJIjQohKCyD/BhvHNfMZskeVwLiMkhLyaa233f6oh7lmbmMCLedLJ37K1dfVr95D4NRipCPREAEREAEpo6AJAOmTltJpCIgAiIwNQVMbdTkJvIqwJ+nQdFkVzQz3Fx8Vn0rtUVpRAc/xvNxKGklTeO6WFF0GXgdW8mSdefwz2wf8Y6vuSmFB0fWs/nQLSIr+y7EbS2pPHbeztK5qzl0NYCUinaMpnpibu5h8Wf/Zu5mZx4nltFmsoHNhK5FRUNjK+r2Fupq6mho1Y9Ypr3hFF0LDaoGmrQWFKuGtIcnOXr8Ms9yh1vezkzlKxf2rd/MGc9Umke/Zh1Hz4DhT6GxJAOUjhyenPuFn1c44JnSOmJdLS0Z+J7+lc17rvGyrMfXRltWIC67fmb+ij1c9o2nvNWAqSGBB0eW8tU/Z7LxjCdxxa0Y7XerbWY6WhtoaGihXd2KqraW+pbRfe1LRtiaE/E858CJyyMlAyxUR97myIb1HHVPoPF1rm/YM0DRFxB0+VeW/nwA94SWka1ac3jqvJ1N284TWNwzgF+hPfcFbvuWMn/ZTs57xlDSosfUlIrvyZV8/fH3/HLiPlGFzV1Wihl9WyMNqiba1G001NVS19Qx7CoJlto4Hjps4pe9t4hTvabyw58q8q4IiIAIiMA0EpBkwDRqTKmKCIiACEw2AZMqg2dX97By5ld89o9/8Mk/PuWHBVs475tEpXpgv2iLpoHSmHuc2jiPOb844p+iGvaCZqQ6WsqCuPjrKjad8iGteeCxB3zH1kFV5C1OHjrL3VfFvXe4FXML5WkRhASFdyYidJ1dsC1oqrOJexVCeHIRKq0JG1a0NXF4OjtyxfU+gS89cDrqyC3/5BG6xXeXbrVitXYvLWdrJP6BK65u/mS0DI1V0RUSdNWBwyfvEl2pG/FisrdeVi1NVRVU1jTT8SZdv81qGsrzySusokk7NB57OZbKUH7bvZZfDt8jcbj5AnqCsempjbuP45GTXA8s6L0rbm6rJCs6hKDQZIpUWjp5rVrq8hMIC3lFYn4dms5MgBVdXTJ+V5y5eMmdwNAHXDjhyHXPuBG60vcUDEpLMl7njo+YDFA6Sgm9eZpDR38jrEz7+vPLqqO5ppKKqiZ043C11kTjfnAdq/fcIlY1QhcFe9g2A/XJXlw86sDlx7m9cyxY2qvJjX1B0MsECuo13VY6VIXJhAe/JD63FnVX1oQOVRrPrp/H2ek2z8I8uXL6HFc9ooYOk1D0lEe643TgIC4hJYzQzH2Y8kwEREAERGDaC0gyYNo3sVRQBERABN6PgKIvJ+rBVZwdXfEOiSQmLIDbR9fw48f/zUczd3M7vLj34sceoWI1o8324dTKGczbcZOwsp47pWOJ34oq6jcO79zP5YCcEcfh9xzJqqshyfcGLneekFTVs+qAgtVsxGg0YeldDN5+wWbBZDRh6p0wTsFU8YLrDvs54OhDfLQ3V06cw9U7meqaApJePcHPxwfffpvf8xhyKlu6ezoodJTFEej7mBepNUN7SdjUFL1y56qzK4+TJnDJvR6cYR9tNMZ7cGr3bs55p9M6fL6g95vWjjrSAm7h6uZDbEX3RISKFYvJiNFkYSBv11wBZmvPuA4FU1UEd8/sZ8+ph8TE+PPbubNc9Iijqq6Y1LCAob6BUWSUNWMcLRmgaCiNeoSL0xW8YivRvPHyfL3VHOGJjeYUb5z3bufE/WRaRskF2A9g7VCRFeTO9RsPiCjtXnVAsXVZGc30sthPRfsEh0Yj/a3MtbE8cj7AjqMeRMU85e75Mzi6RQ+aD0NBWx6Pr6sTl+5HU642v+G8FyNUWd4WAREQARGYkgKSDJiSzSZBi4AIfHACxipSQ0KITC1ltJve79zFWE3ayxdEJhfTPOpFjkJ7RhA+Xn68yKxHa7JisxhQ1ybgfmAxn/3Xl2y59oKiAWOeragir7Nr/jy2XAmmYPhB6X1VtLWR9/IBt92fklRUQITbaU463yO6suP1d9KxYWgtI+XFYx4/j6NQNVxX/b6iBj6z0Zp4nzOHjnLxcQYVCY+4dsmVh9E1aJoqyE2OIjw0dOAWl0mpSt05c72ltYyM2Ehi08tp0Q+6qrZpqUwJxtvjEcHJZbQOmcRvYCTv9JWtnaJIb+7e8SM2v5DYe86cOnOT0LIx9FTAhrGtkvTQJ/g9iya3bixt0lMbhbY0Hy4eOcjZRymUp/jjdvkKd0Ir0bRUUZAyjG9sOkW17ZhGSgbYdNRkvMLX4wGB8SU0D3bvKfpNHxU1pbGP8bjlSUROEUneVzh94hohxdoxnYvG9mqyIp7iFxBOVs1YfHsCVWjPeoqrwz6OuydSnh6Ix9ULXA+u6Ldkoo2Oumwi/Dy4/ySWohGGEPQcUR5FQAREQAQ+HAFJBnw4bS01FQERmMICiiaZ+8ccuPggaviZySeoboomhUcnjnPBPZySnuHgw5ZtpSEvg6y8IhqMPXd8OwfOk37/MD//4xNWOT4hs73/Z60k393P0rnrOpf9a39dt2xbK+mep9i1eQeHT5znqss9glOr0Ix5hnX7zPqNlGXnUFipQvO68nrqqWjI9L7AWcffCClsIi/AlStXb/A0pxWd1oihQ4tWO2jT6TFarFjUVeSmJJGcXUmr3oSuoZ76hqbeHhLWtmqKc7PIKm0YZjb/ngAm6NHWTrb/efZt+pX9xy9yzeUuzxLKUY/D16hpoiInl8KyWl7bnj3VUrTkBrjidOYKT3ObKAy5g8uly/hltqDVGjGO5Gu2YhshGWBtr6MsL5PMYhXt+tGXdOwJY1yPNjX5z69yaNMGdjtcwtXlNv6xpbSPx0rbTGVeLgUl1bQNyhGNGIuioyD4FudPOOGb1URJ2ANunHfCM70Zbc9EjDY1qoo8MtOLqG/T8846RIwYpHwgAiIgAiIwWQUkGTBZW0biEgEREIF+Aoo6Drc9ezl9K5Teecb6fT5RTxV1Anf37+PkjRcUjtqLX8Gs12MwmgaNyzaS9egoy7/5ib23I+k3fx/2mdu9j61gwYqT+CQ3oq7LJcrbBedzN/CPK6ZlyHxnFnRNlRRkZZCZXUBZbTOacd9JV7AYDJ1xjvUiSdHn8/Taea7deUmpVkWM2xmOHXTEL6OW9iEx9rWMoq0gxuMkmxbOZv7CpaxasZy1ey7zOKEKQ/duitmIwWDAONZg+g7/Dp5Z6Wiupsjum5VPWU1j54X0+ApSsBgNGAzGMV+EKoZiQtwuc9n1GYXaRhIfXMRh70k8U6poG83XrKMh3YezG1exbs9NoqraMXQneBTLu3a1om+tpTg7g4zMfEqrG2jTj3cJSgWrqSvOseYQFEMZ4feucuGiH3maJtJ8r3F851HuJ1bQ2pOsU+zDXAzoDeONZ3wtLXuLgAiIgAhMPQFJBky9NpOIRUAEPkCBqZcMGKGRzOUEn9/MilVHeRhfS/9OA6aipziuncfyg3d4HhFFkJcHt87tYe3smaw95Uuqavhb97bOiflsA8ahj1D623nbqqPJPrt9gxqTzURLVRklReU0al9zwWvpoKWmhNyMNNLS0khPSyMzvxxVu3FSj9/u8u2e+PDtCI5+FFsHLfW11NW3YbSZaKutoKSwFJXGMHpCwWbBqG6gsrCAwtJaWvXmiTsnumuk2PpNEjl6Ld/OpzY9rao6autaMdjMtNdXUlJQTL36NVZvp3Q5igiIgAiIwBQXkGTAFG9ACV8ERODDEBhzMkAx0FpTTkV1I+r+V9r2HvqGFmrLyqlpVGPq1zsfFAwtNZSXV9GoHv3CdOw9A4ZrFwVNrj/Oe3bi4BZGqbr/pANWql5cYev8RWw9ep57z18SmVJE3vMrbJ/9HctP+JDaPHwyYLiSJvI9RVGwb/K/dyMgvmN37bGSs3HsZrKnCIiACHzIApIM+JBbX+ouAiIwOQXsY+H9r3Bo7WKWLFzUtc39jk///Ff+8cnXzJrf/d7CRSzd7IhfQlnv8njG4hfc2L+CuWvP4JNQiaH3qsBAcYgLB5bP5pdT3iRWGnrvRiuGIkJc9rFq9jrO+SRQ1ZNEsLWSEXCNI+sGxvHZX/7Kx//8alAc5/COLeldRm44WEVbzCs3JxwvexFfoe5aLq1nR2sDMb/tYvHM2aw/doewtHLa9BpyfE6wesZ89t+Ooqqn2zOQlprKmVOn2bZli2xiIOfA7zgHHj14QEdHR88vUR5FQAREQAQ+IAFJBnxAjS1VFQERmCICip56+3rigY957OfXtT1wZNv8RazecoLrXt3v+fnhHxxPfk1b95J1YC5/icuOBcxYfgKfxJp+M4qbKXtxlV0LvmfVcS+SqvtdWZvKeHllO4u/W8Ep7yRqej5SDKiKUogYEIcTOxYtZtVmB1wHxBFHXnVrv/IGWVuayA66x61b3kQXNKEfNCZeaUvh/oFFfP7vn3G4H0+N1oxiKiHQ8Rdmz9rGby9L+iU2oK6ujuioKIICn8smBnIO/I5zICszC7N5lMkYBv2U5aUIiIAIiMD0EZBkwPRpS6mJCIjAtBFQsFpMGA169PquraMhgus7dnH8ehDZ7X3v6w0mLFZb311+Yzuq8kLyi6tp1pp637cPBTC211NRmEdJdTPa/uMEFCPt9eUU5RVT06ztN4RguDiiuLlnNw5Xn5HVNjAOc784BjSFVU1J9BM8H/oTk9+AbsjsaAr6HB9OLf+G79ee52l2W+fYcGv1K679Ooe5G68QnN+9Vn33ga1WK0ajsdenx0ke+7VJ97kjJmIy2jlgNpllmMuAP1jyQgREQAQ+HAFJBnw4bS01FQERmMICY54z4B3XcdxzBihaKuKf4ePpT1Seql8SQkFbUURxRTWNZjMlz5zY8OMstl4LoUhnnxvARmOsG3sXzGa90xNSatXoOmRZtHfcvHJ4ERABERABERCBD0hAkgEfUGNLVUVABKauwJRMBih6auIfcn7/NnYdPIfrzbvcc/fo3Nx/u8ipM7/hH1OMxlxD6NWtzJ/zKy7BhV3zHyga0h8cYdms5Ti4PiAkNpns0hb6Tzk4dVtTIhcBERABERABERCB9y8gyYD33wYSgQiIgAi8VmDqJQMsNKb74fTLt/z9P//An/74Fz76y1/7tj9/zLxdv/GqQIOlMZZbuxcy/5fzBGaru4Y22OqJcN3G7H98wvxfHfGLL6Sxw9xv2MNryWQHERABERABERABERCBUQQkGTAKjnwkAiIgApNGwKqmvqyMKlUb+ve5wp5Vg6q8nKr61tfEYZ+joI7S3DSSExNJGrKlkFumot2ooJjaqCvJI7+kjraeyikmmkvTiQ2PICGrnCaNEeukaQwJRAREQAREQAREQASmvoAkA6Z+G0oNREAERGBaCihWC2azBYutd33EaVnP6Vwpm0lNfWES4YEBBEakUdrQIb07pnODS91EQAREQASmlIAkA6ZUc0mwIiACIiACIjD5BRRTC2WJj3E9eYTjjrcIjMumXNWGzij9OyZ/60mEIiACIiACH4qAJAM+lJaWeoqACIiACIjABAhY28uIe3iGbWvWs/+CNzF5NbTqTFilg8cE6EsRIiACIiACIjB2AUkGjN1K9hQBERABERABERhFQNFXk+h5krU/zuaXo+7EVbRjfJ9zXIwSq3wkAiIgAiIgAh+6gCQDPvQzQOovAiIgAiIgAm9DQDFQHevBkYX/5uvFB/GIq0MvowLehqwcQwREQAREQATeiYAkA94JqxxUBERABERABD4sAVt7Nk8d1/LlHz9j5cFr+L0M4vGjR/g+jSC9pAGt5cPykNqKgAiIgAiIwGQXkGTAZG8hiU8EREAEREAEJr2AgibrMY6r/sV//K+Pmb1mHxevX+fq2QP8umwucxdt59LjFGoNMnHApG9KCVAEREAEROCDEZBkwAfT1FJRERABERABEXhXAlaqwm6w64c/8Lcft3I5IJOK6lpqq4pI9DrL5pn/5LMlx3iUUIf5XYUgxxUBERABERABERiXgCQDxsUlO4uACIiACIiACAwVsFAUeIlNX/6BbzdcIrjY1L2LgrkxkQeHF/Gvv8zhwO0Y6mUegaF88o4IiIAIiIAIvAcBSQa8B3QpUgREQAREQASml4CFshfX2Pb1CaBwAAAgAElEQVTtX5i51ZXQin4TBJgrCb++g5/++DnrnZ9SIF0DplfTS21EQAREQASmrIAkA6Zs00ngIiACIiACIjBZBGy0Jj3AYdE/+WbdBQILenoGALZG4t0PsvCjr9l44TlFkgyYLI0mcYiACIiACHzgApIM+MBPAKm+CIiACIiACLwNAYsqFo8D8/lm9m5uR9bSOxrAWkPkzV3M//pnjj9ModX2NkqTY4iACIiACIiACPxeAUkG/F5B+b4IiIAIiIAIiABYWsh94simOfPYcvE5hequdIC1IYEHR9aybIMTz3Lb+5IEYiYCIiACIiACIvBeBSQZ8F75pXAREAEREAERmC4CNowthUS4n2Dbmg3sO3ebJ4FPeHDJgYMHznEvrIAWoywtOF1aW+ohAiIgAiIw9QUkGTD121BqIAIiIAIiIAKTQ0Cx0NFaS0lWMnGREUSERRATn0pOSS0tHWYkFTA5mkmiEAEREAEREAG7gCQD5DwQAREQAREQARF4uwKKFbNBh1ajpcNoxipZgLfrK0cTAREQAREQgbcgIMmAt4AohxABERABERABERABERABERABERCBqSQgyYCp1FoSqwiIgAiIgAiIgAiIgAiIgAiIgAi8BQFJBrwFRDmECIiACIiACIiACIiACIiACIiACEwlAUkGTKXWklhFQAREQAREQAREQAREQAREQARE4C0ISDLgLSDKIURABERABERABERABERABERABERgKglIMmAqtZbEKgIiIAIiIAIiIAIiIAIiIAIiIAJvQUCSAW8BUQ4hAiIgAiIgAiIgAiIgAiIgAiIgAlNJQJIBU6m1JFYREAEREAEREAEREAEREAEREAEReAsCkgx4C4hyCBEQAREQAREQAREQAREQAREQARGYSgKSDJhKrSWxioAIiIAIiIAIiIAIiIAIiIAIiMBbEJBkwFtAlEOIgAiIgAiIgAiIgAiIgAiIgAiIwFQSkGTAVGotiVUEREAEREAEREAEREAEREAEREAE3oKAJAPeAqIcQgREQAREQAREQAREQAREQAREQASmkoAkA6ZSa0msIiACIiACIiACIiACIiACIiACIvAWBCQZ8BYQ5RAiIAIiIAIiIAIiIAIiIAIiIAIiMJUEJBkwlVpLYhUBERABERABERABERABERABERCBtyAgyYC3gCiHEAEREAEREAEREAEREAEREAEREIGpJCDJgKnUWhKrCIiACIiACIiACIiACIiACIiACLwFAUkGvAVEOYQIiIAIiIAIiIAIiIAIiIAIiIAITCUBSQZMpdaSWEVABERABERABERABERABERABETgLQhIMuAtIMohREAERGCAgGJCXVdI0stQ4vOqaLEM+LTfCwVtRQqvngQSlV2DesT9+n1lQp8qmLUNlKaF8yoqnRKVHmXCylcwqPJJio4ns7QJ4zsuWNFWkxn2jMDQdCraJl1DTJj6+yhIsXTQXJlFTGgkSbk16H5nWysWPa3VeSS8DCU2vYRm2/uo1fsuU0FfX0B8UADBcYU0m0eOx9JeQ15iLPGZlWh+p/3QUmwYWppQ1dXTbu3+1KKjqSyT6KBnPHseQVpRPZoRfnKKsZ3aylJKqloZazPav1M3+DtWPeqGSgpzcsgZsuVSVF5Hq05LW4uKOpVuzGUNra+8IwIiIAJTS0CSAVOrvSRaERCByS6gaKnJCObmwbUs+GYxR+5HUaof4V/YSgfZPqfZ+NNMtl0LIbd1rP/cnQgEhY7/n737gK4qy+98P7a7/fzGHo/fvDXrvZk14zf2LMd2u92hMjlDEQuqyBSFyBQ555xEziCRBEIiSAiBCAIFlCWUc845XunmeL5vnSsJCZAKukvtKtF/1boLhXPP2eez97119+/svU91JmHX9rJi+lgmrzrHk7R3/0D+/UqoYKx4ga/rMr6es4XLwbm0dEP4/Y7T/mwFY9YDTi7/igmLj+Kf3CCdgXaa3/O/iqmOgujbHFszm4kTv+XQ7RfUf4+XgWKoJT/2Lqe2LGTKqFlsvhBIfjcdzd/zqf2wu1fMFD2/xrZpY3DZ7kF0ddeoVk0hEdd2sXjmEvZei6aqvcPeE6VXTNTnveCZfwDhmTWYFLA1FRJ98yDLp45l5KBBDB4wlHGTF7P3ajC5TR0VpViaqUh5isf+1cxbsoWzAdl0/LXrwjmfk/qUa87nbOb0g2xaMxAFc0UC948u5cvPRzP29cfor1i224PISiPNFRlEP3tGZHoV3b1td310+a0IiIAI9E4BCQN6Z71JqUVABH60AjYMTWUEnVzGxE+HsezCM3IN3fVk7TTlhON33YtHcYU0Wrrb7oc5WZuxmaqYq2ybPpjBMw/gl9jYI51ke3M99Y0aWrq8WqlgqozH9+g6vl28kRM3I8it1b+1I/B9heyafGLue+HlH0lOnfnfcQTE9y15L3++3Yy2NgmfAwsY3e9LNl+JpLbrfuu7najNhLY+j8BzG5jSZxgLD/mT3WU7e7fd9d6tHGjLUgi6fZ07z1Ko6qJn6wwCPPezav5Sdp67T0JxU8+NwFHM1GWGcOfieW4EZVGrs+Iw15D29DrH9x7mql8goc/uc911JdMHf8THIxZyyDcVjVr3Nj0NZdnEP77G4WVj+WTADHZ4p7Z17LupEedzcpzPObJ8HJ8MmM72Gymtz1H0FEXe5tjqhazZdZhTp89wtu1xcucq5k2chMvO22TpHdjNWipSgvDz8iYgvqrnPLoptvxaBERABH5oAQkDfugakOOLgAi8hwJW0r23MWvg8LeEAaBYjWhbtOhN1h7paPc0pq34EccXjWb4rB4KA6y1JPl6cicgivwuxoPbGnMI87mC26U7BCcUUtdixv7vkZEoNky6FrQ6I9bv0xnt6Qr4Q9ifvYLwS+v5cvBX3z8MUL3UETd+h5g3+A85DADFZsag1aLVW3j9gr/DUEla8C3cz17BPyydsgY91h57nSkYyl5w99wBDroFkqexoKCgL4zj2e1r3Iksoslow2E3o6tOwf/4Usb8uj+ztnqRrM5TUGyYDTo0ZTH4un7DoP7vEAa8fE4sdw/NYVD/jjBAMZaTGfuUB48TKamup7GpiSb10VhDxhN3dixdzJ5bGS+np9gNlSTcd+f4kUuEFBokGPxDeA+ScxSBP2ABCQP+gCtfTl0EflQCdhMtdZWUl1XRoDNj+74fTB0WtA211DW0YLQ6uvxAZze1UF9ZRnlVA1qTrYttFGwGPQZza3kcZi2NDU20GC0vO+4Oi47GmhrqNQYsLzuRtlfDAL0FQ1MtNTXdHMdmwmQyY3nlpBUsBgNGswW7YkXfVEdNnQa95fWP9YDDgq6xmvLSCmpfKUdHDSsWIwajGbNdwapvoq6mDo3e/EYnofUZNoyaOmrrmmjODeg+DFCv6jZUUV5WSX2L8e11ZtOQ/+wCG6ZMZ+3ph2To3qxkm7aCnORE0nKraOkAbTsRBZvRiEmtD4cJraYZrd7cVhfq3wwYTSZnp0atF01jI836znWlp6m2hromHeYuGMGGyWTCbOncFhSsRiNGZxuwYtDUU1PXiK7rHWA362isLqesso5mo7WLNoVzm4bqCqrqmjH2XA+so7J/p+/smHWN1FRWUqcxdNExVLAaNNRWlFJe3Yi+q/NXrJjU+lFfSzYjzerrr1HbjTXYTVoaautoaCog9NJ6vuoyDLBj0TdRW1FGZa0GwxttoouTbQsD5ncZBrSeR13beXRXj13sFYdFj6ZePScdlm4TKnX/zTTW1dGk+x5BlmLF2FxHZWkZ1Q06TF22VzX4sGJsaaCurU2+8YqyW7CYTJhea2fqkPqq3GRiY9S1QLSd3rs6zlwNE9T6VNuozaSlsbaW+mbjW0fpKJYqEn0Ps2nVLq7F1Ldd0XegKcklPTGJUsPLN0pQjOQ9u8CKYZ8xadUFIjot8KDoMnl8cgFD3yUMaCu2ossi8PRChnYKA7Ab0Ws1aLSvDhFRLOXEeO1n7be7uJWm6/RataHJeMLFbavZfD6UMnOHiXwnAiIgAu+bgIQB71uNyvmIQG8TUAxUJgZw7fQJzl64zJVzh9i1cTP7zz8gsVTDbz1y3tpIfqQ/Xlevcv2GJ+f3b2XrzpP4xRbR1LYzxVBJ8sNrnDtxGvdLl7lwaAebN+/lwv14SposKOq8/7RQ7pzezdolO3C/6899v4scWr8ElxkzWbjZjWdJ6aQ8v8WZHatYOGs6M1024f44nWrncNyOMGDhrsOcPLGDZbMm89XEqcxfewy/uCI0VgstlZlE3rvI4Y1rcb0WSmadAxxaylOCuXViB2tW7ueq7118r7myzmUaU6fMYePZAFIq9G0fXO00F0bie+k05y5e57bXeQ7v2MmxayFk15pQcKAtTyXk5kl2r1rFgUu++Plc58iaucz8agpz150hIKmCjlkMFuqzQ/G5eIZzbh7cuHoRN9fVfD2yz6vTBBQj1WlP8T53gjPnLnHl/FH2bN7M3tN3iS1s7GZorZ2m3Od4bJ7K0F9/wIip37L96FUCYnKpNSvYdRWkBt/luttZTh/excaVa9lx+h7xJRpsNi3VWZHcv3iQzesPctHTE8+z21gy8xs2n/Uh8Plz/M7tY8PSbZz1vktAgAdHN37LvJkzmb/+NAFxaaRF3eXC7jUs/noGM+es48y9BCraTtyqrSYn+gEeR7awYd8lniTXYHfoqMwIw+f0btat2oO7tw/3vI+xcd50pk6Zzbrjd4kv7ehAKMZasoJv4XbiJOfdL+N+bA/bNu/i1J1I8uvbph0oZmrTgvC54o7HzXvc83Ln2P7T3AzJQK3617/sLeUk3jvLjqXf8M2SLRz3CiO/RodDMdNUHI3Pid3sdvUkNKfxtdeJglVTSvKDC+xZOZ9VB28QlqfD3FhK2jMPjmxayvy1x7kbU+rs2CnmWjKCffBwv8Kte/e45X4M11M3CE6rdYZFdm0ZCQHXOXfqPB43vbl8bC+7D7jzMLnS2XYUfTU5EXc5v3cda3edwfO2Pz4nN7JoxmSmfb2So7eiKe60Mp21qZAY/6ucO3WOq9evccXtCBtdJjC4X+eRAQrmuhzCfdw5dfwMFy+7c2rfNrbtOMrN0Czq1Ano3X11EwY4tOUkP/Lk3MlzXPW+yZUT+9mz/zz3Ezq/Bt7cqcNQQ07kA264neXM0b1sXbOGLYe9ichr6DR03Ya2IpUQHw8uX7qOl/txDmzbysErgaSV1VKR9oQr+9ayaOkergRmoTVrqMwMxevYdla4rObwjTCKnH1VB7rKFJ56neXUmcvcvHmZ0/t3ceCMH/Flna5S27RUpoXg53GJK9ducPnEfnZu2ceVx6lUG60Y6gtJDLzB6Z0b2XvyFlHlbWmCYqaxIBr/Syc4ccqNqxdP4bp9CzsPXSMovQajomCoKyDO3w3XDWvYeeQitwP8ubBlEbMnT2b20n1cD82npYv22iqnoC8I5fJ6F+ZuuER0Y/uGClazCaPh9TDBSmHIZdaOH8HcPT5kmTr81TDgyUm1Y/8OIwPantYaBix6NQzo2GWn7xQs5THc2reaZTu8SXstmHQ0Z/Do1Eq+cdmNT3rH67zTDuRbERABEXgvBCQMeC+qUU5CBHqrgI3aFzc5uGYd+y8+IC6nmOKCLKK9D7BixmTm7fImtrilm6vXXZyzrZ6Uuyc56Hoe35BUCkuKSb/rytJJI5i45hKRuc1gqyXhlisb1u7lkn8MuUXFFGZFc9t1BbMnz2H3jSiKms3oG/IIOLSIcR9/xsRv9+LxOIzY2HB8Dn7L5OGjmLxwK2dvP+R5dByR906wespoJi49R3C2BgdtYcCA3zB0yjL2nffktvcVjq6azud9+/D5ouM8TCmhujyLZ2fXMmPQZ0zbcZu4SrvzSp++NolbO79h5If9+WrJbi6p82sDb3Lo2y8ZOWIuR+6nUGsDRZvB/aPL+GbRLjyeplFWlkXAsZXMnrGS80+z0TjUUQC1pNzczdxhHzPoiyXscb/L05BAbrsuY+qwEcxz9SelRl2ay0FLzhMu7NzM3rM+RGYVUVqcS6zXbuaP+A19X64ZYKM+2Y8TG9ex+4wv0dlFFBfmEOtzjHWzpzBnyxXC8jRd1JmCRVtN9OWNTB8wgBlb3HiYWEB5XQsmYyVxdy7hfukWQUk55GUn4H98DbPHTmDFqUekVzVSnf2Uc2unMfjDwUxdupcLF46ya8Vitp6+R3R2PoGnVvHlZ58ybv4OLgY8JyY2Ar9ja5g5cgQTXTZyyvsBIVFxRD04y6avxzFhwREepKprICiYNVXkBruz7eth9Ju4AY/npc7RGIb6NPwOLmTMR335Yv523HyeEPL0DsdXT2f0sK/Zd/sFVSqdrYH0gDNsX7udU7fCyS4spignjnunNjJ/yiw2uz0ju9GOoyEJvyO72XP8JlHFVVQWJ+F37hwed8Mo6WJ1NMVqoD7jHkcXf87Hfaax41qUs6OnYMdcGY6H615czweS39w+OqLjNaFexa6Pu8GeWYMYPHMPtxObsalXtkuCcFs/mf6DF3D8fhbqAO7GFH9O7tnF8RvhFFdWUpJ0D/cLV/ANKcau6MkPcmeTyzw2nfYnubSc3Kfu7Jg7g8Wud0lucOC8G0DmfY4vHUufz0Yzd9NZfB4FE+Rzik2zPmfEtG14RlW0Bg/aIiKuubJt6xFuhGRQVFJCfvxdji8bz6cfdawZYG/K4qnbTjZsOoJXaCZFxUXkxT/AbdsCpk9fw9mH6TR0YeYU6CoMUAwUhl5h29y5rD9+l6TScvKCrrBn/nQW7LtNYnerFlprSXt8A/ezHjyOzyYvO5kn7jtZMH4MC/bfIr7ttdOcH4bXiQMcuRhAQl4xJVlBeOxyYewIF/Z7R1NSm4iv63xG9JvMlqsxNNksGJrLCPfYydf9BjJn923SzOoMB9VnGwvnrOKoTwKl5XmEeuxlyTQX9tx40Roa2ZspDPfm9H5XLt6PJ6+4hJzga+yfO5pR3+zhdlw52rpCom4cZOnoPoxbeIQA5yqKdprznnNt73o27vcgJL2Q4qI8kh5dZu/iqcxYdpR7yXWYTE3kBl5g48TP6DNsBhtO3ORh0DP8z+9g4ZihTF5zgdCSV6+yv2x5ipHCEHfWfjWRJYcftgUcL//65je2GuJv72Xx9MUcDSjoFEzC7zUMUCxURHtzYOW37PJK4bUsAGxVxN3axeyxs9jpnfZKud48CfmNCIiACPReAQkDem/dSclFoNcLKPoM7u5xYerc/fi8qMTsvIikYG1K5sam6YwcOIP9dxOpeqf7yinoMu9ycNkydl4JJl/bOtTb3pCC/8ltbDvmS0KxFn2mHwfnTWH+nlvEVahXz9UvK00pXmyfMZyh03bjG1+B2WEi+dompvcbxFxXH+JrDVgddrSxl1g/qT9DZh/E50U1BpuCQ6eWdxrDx63l2vMizEp7GNCXGTuuEVbUiE7fQn1+IKeWjqffb8axySOMfI2OkodHWDK6P5O3tYUBanHsdYSdXcHEz0ay9IQ/qQ0GrNYWkj23MnPQcJacekyWVkHRJHJzz1K+3XqZsDwtChYy7+xmzohJbLwW3tbBtFP//Dyrx6shxAnnSvkGixVtsjc7Zwxh1IKTPM7QopiLCD67mnkLduEZXoaxrS4MGXfYO3sIg9rCALshh4dHlzDjm21cCy9t2w5smgx897gwZuBktntGU9bllVsbef6uLBg+nIXHAtqmCSi0pPlyaP1adp+9x4usPAryc0i8c5Bvx3zCZ9N34xNXgUmfz8MjSxj78QiWHL1HckU1FcWFlFQ2orOYyLizF5dBA5i18zrRVXpnXekSvdg1YxCDpu1wdkb1VgWHPh3fvXMYNXop5x7nOlc4d6gds7JnnFv1BYPHreeqGgao9eBoIPrKRqb0HcaCA7dJqNNjsWpJ99nPvGFDmefqR2qzA2N+IGdXzGTOBndCio1t0xZsNGfd4/CCMQyZtIErYcXoCh5zatlMvt54iYhyA4pipiopnoSUTNQcqMsvSy0vrm9j5uChzDvgS6rzSqsDTbIfV85fwje2ptPV6Vf3YCsM5OxSdb2H1jDA2dbNGdw76MKwIe1hgJ3iwDOsnjWDDW7PKTMoKJZqUhMSSE6vwK60kPHgJOsXreXUgww0CljzHnN6yVgmLDnB47zWTqGj8QXe26czaNDX7PGMpVZnxqrL4sGRhYwePIu9NxPQKBbKo6+zY+FcNp55QqG+9aqxYszjyclvOxYQtBspCr3Ixq9nsfbUYwratsOmJf/xSVaOG8gXK84RVNDNrS67DAO0ZD8+y6ZFqzjql+o8D1vBMy4sH8+4BYd5kNNV51ZBl/sUtx1r2OjqTUym2jZzSVYDpa/68PGENVx8Xo7dWEzE1Z2sWXWAWwkNraM0HC0UhHtxZOse3B+m0WQtJezyOiYOmtIaBjhP3ULu4zMsHTaoIwzQ5vDMbRPfLj+Ib3ITCjaKgi+ydsJo5u/3I8uqYCyJxHP3KlbvvUFCgxrngKNFXaX/CDt2nedRagN2q5GqKC/2TB/E5/MPO8MAxVRKtOdW5k5fyhH/HNpZ7bpCQtzW8lX/0Sw99pBcg53mlHscnTOIQV+t51J4NVqzFUP+M9xXjGXol+u5Et3NYqL2KmJvbmfa0Emsc4vg5cCAV5tm208KxtJIvA6sZc2+26TUW17Z6vcZBiiWCmK997F6yQ68ktX3zte+lBYyH51k0chRLHR9SEl3r8/XniY/ioAIiEBvE5AwoLfVmJRXBN4jAUvWbWeHdMyyi4TmtQ99V+fB6km9rnbEP2bGPn+Sat7lk5iRjFvb+Gb6cs4+yey4FZ3DRHNNOeVV6jxvEzk+u5gzZBTLzj8jt9MCdoo+Fa8t0xn88RT23U2gxm4jw7kI4EhWuAeR17Yaty3rNrtmDmbU4nM8y24rsy0Pvz3fMGL4Ys4FZqFrDwMGDuPbc0/JaT+OQ0P81Q1M/ew3THaOBLDSEH6OVRMGMaVzGKBoiHZfzVf9J7DlRiRlztO3UfTAlYUjhuJy6D4p6qdsm5ba4jzyiqpprK8kLz6YK1u/YfQno1h5MZh852drheaoi6yfNICJGzyJKG21tBU95OiCUQz/2hX/5EYshQ85tmgcX610IzivY6xu6wKCn79cQNCcd5/D80bw+YITPMzoNHxWMZB5Zw8ugz5m8lYvYrrs3XYRBihGMn32sXj6NJZuPY7HtWt4XrvG9QvHcd2xiU2HPQnNqMHmqOb5uVVMGvAl2zyjXus828i5d5AFw4ax6PgDMtqG/9vy7nN0/ghGzDnM/dS2D/y2Qh4fW8KYYS4c9kt52U4cTTF4bJzCsPGdwgClmfjrW5kxcAxr3IIpcl6JtlP69DQrxwxh1q5bxNebKXx8gmWfj2DewbukdLoHomLM5r56NfijCWy8HEZpXQp39sxlTP/hzFxzDN+YAmqbdBgMxteG+Xd+kdvRZt/n6MLRDP1qM9ciy7E4Gknyvcylq34kdnt5HGxFgZxbNoYRncMASwb3XOcy/GUY4KAp1ZeD8z5n0NCprDlym+i8Gpp0BgwGdXqDDX19OQW5BVTU1lNblEr49b0sndCPkXMP45/ROqFaaUnizp6vGTbyW04/ym+dV24v57n7WiYNmsxWj2jqLGWEXVzPlPELOOyX3nHrNnsFYeoCgoPapgmYSwhxW80Xw2ax2yue5pc9NQVT/mPOLB3Jp6NXcP5ZSdfz17sKA7BjaKigMLeAcvU8itOIuHGAFZP6M3z2PnxTu5gYrpgoCDzLulmTcFl3hKvtbdPtFEd3b2bTgYsEJJSjKwrh4po5uGy8TNTL+R4KFl0j1eXl1DQZsNtKCbuynkmDO4cB1jfCAOwGmioLycsvo7a+jtL0SLwPrmTqgMF8vfMmKSYLJc8vsXHObDa6h3dML1Es6BurqSivpsmgvsYVWlLvccRl2MswwFYWgefGiYyYvIGrMZqODrBiojjkIutGf8TnC4/wqNCKPiOAU/OHMeKbffhltQ7BsFfFcGv7ZIaOX8n5kOqX66Z0bq1YCwm9tIqx/aewxeMF+lf++OoPiqmSpPvuHN53jscZ9W1hcMc2v78woHWKwM29K/l22w2SO71mO45uJC/IjRUjBjJrixfpr+YUHZvJdyIgAiLQywUkDOjlFSjFF4HeK6DQEnmeleM/YcjiCwTndOpYYqcq8AiLRn7AmLWeRBW/wycxRy0hJ5fwxRdLcXuW8+awTxVKaSHGbRVffjKQxWefkt1pHjOOKoKOLWLsByNZ5xFOkaWbMCD7DntmDeHzxec7hQH53Ns7h5HDFnD6sRpEdKwZ8OqtBe1UBB5jyagPGLv2GuFFFhrDz79zGFAccIhFI4cyRx3a33bJzWGoJiPkLjc8bvIoIoFH59YzfeDnrHQP6jIMiGwLA+zFjzi28PPWMCCpAU3sFTZ9NYAJa68SXtwx/tpe/JgTL+8m0EDLCw+2TO7DQJejBKS1dHQocFATeo7V4z5i5NLzBOV10bmiizDAUU+k+1qmfbWAQ97hFJaVUdb5UVVPi8GCooYB51cxadAUdnjFUP1KPtQpDDjRKQzID+DYgpGMdOkcBhTx5Pi3jB36Da6+yc4rxGrT6DYM8HwzDCh7doZVY4cwc+dNXtS1kOStDvXuyzf7fEhSL523fzlqiVQXxvt4KEuOPyTbYKA65SFnVn7FsE8+YdCY2Ww65U9CaXPXndq2/SimCsLc1zF58BiWn3xCdnE8flevcCMgFc0rDu0Hbv333cIAsBtqSH90lrWTh9D34/6Mm7WOU3dfUNLc1g4UE3W5UQTc8MDrXihxT66yz2U4o1wOcS+9NTjqOgyoIOziOr4crIYBUdQ0p3B3/zcMG7mIEw9zO0Y0qHcTuNyxgGCNNo17ri4M+XQaOzzjaOpMWh+H9/ap9B3oguvddDpiq07n3mUYoL7+TdTnx/LIy4MbfiHEBV7n0PyRjJy9F6a326UAACAASURBVJ/ULvbkaCLx9l7mfjmDLe5BFHRul+r3leoigSYaE304OPsLZm6+Rlzn+u9UJOzvGAY4i9lAYdxjbl29jl9QDM+8jvDtqKF8vcObZIOGFD9X5k6YyuYrMTS1Dq7ofKS279UwwJ+jLsPbwgArhqxHnFk4mD4T13E5sqFTZ95BU9JdXGf1YeCM7dxMNnQdBlTHcnvHVIaNX8G54G7CAFsBIZdWMqafGgbEv1yh/40C2lsojgvA8/xlAhLK0L62wKG6/e8tDGibIrB/+SK2X098GQi+WkYzBSGXWT2iH9PWe5DYRfN4dXv5SQREQAR6p4CEAb2z3qTUIvBeCBjjL7Nu4md8NHU/95NaFytrPTEHdcEn+HZ0X77a5sOLio7OabcnroYBJxYzfuBk9vjEU/3aqF/F4UBRDCReXc/Uzz5g2h4/EjuPOHDU8fz0t0zo+wXbbsZQbvt9hAEO6kJOs2LcYGbs9iG+2vq9wgBFl0/o5d1s3Hoc7+AsqpvVIex7cRk25rcOA5pjLrPxyz4MX3yWp9kdC5W9HgYYUtTpBf35cNI2vKMrO60N4KAh0p11E/u3BgpdTYLvMgyoI8JtDZOHTWHHjZg3681kwGA0Ybb2dBgwu4fCAAPpPvuYO/DXTFx/hfD2hdrUhupoJNZjM9MGjGbV+WcUWdT7mLdQUxjPo0u7WTR+EH0/HckiVx8SKrsKT9pbu42m5Nvsmz2SEbP3cOG8Gx4eNwnM0nbq0LVv2/Hvu4YBimLH3FJLUcJjru5ZxKRBn9J3+DwO3oqlwqSnNOYWRzZt5uDFR6RVNNGS/dg5/eDz3zYMaEnBd/83DO4/mwN3Os3Tfi0MqDVm8/D4Qob/agyrzgW9sp6Cokngzu6ZDB2xmJMP8zoChY7TfnlrwVfuJqAYKIvz5fiWTey/EEBqeSMtuc9wXzmOUW8JA2YPH8+qM0GUvf6eYlEXxNNT9eIOB2YNZczi4zwueC24VBTU9x7HO44MUAwVJPqdYPuG3Vy4n0x5QzN5wZdZP354Rxhw9yAuQ0ey+HAAbx6u9XitIwM6hwE2zPnPcFs2gg9GLOLYw8JOAVRbcDBnMCPm7Mcvy/y7hwH2MiKvb2TSoK/YeCmm06iOThXkMFCVFso9Ty8eJ5SgaZ0f1mmD1m9/X2FA6xSBvaxauBXPhOZOgWanIihGcp+5sXzEIGZtvSkjAzrRyLciIALvl4CEAe9XfcrZiECvErDXhHJ6yRg+6zef04GZaF5e5bJR6LcHl7FfsdEjkmJ12Lf6odr56OYUFSMZ3luZNfBjxq+5xPPClpcfdhVzFanxyZSX1lDz/Awrxn7CQJcTPElv6uhM2Qp5sG8uE79cy9WwIgzK7yMMsFHof4CFU2az62YslWbH9wgD7NTHerB1+ihmbvUkqkydO2wj23cvLkNH/5ZhQCOm3Hu4zh3GJ5+v5lJI4csFs9QRBMcXjWbYrAP4JTZiq4vi0tpJ9O3zNa53k+m4E5idkkfHWDpxEqvOPiP/jRW51HrrGBmw4NgD0p0jM0zOaQJzh3zEmOXnCMpt6VQnTeREhhOTmEu95UcaBtTbqI+9xrYpfek/bRe3X9R1Kn8pQWdWMvWLJZwMyKa5PIvMzBwKmiwYNVXkRXqxf/44Rk3bxs2oqm4aduuvHfpCnp5azsTBwxg9cTknbgaT3z79pJtn2oqeOqcJDJuxm1vx6vxz9U5uqfju+4YhL6cJ2KnMziIzK48mi5Hm6jxivA+wZPxwpm+6SkR6In4H5zH+y1WcCyxwrrFgK2hdi+C3DQNqzcUEnVvJuA+HsfjI/Y7pM2oY0HmagK2RpFt7mN3vU6Zu8iC6U2hnrwjj6vppTJx7gHvpnUemdELoYmSA0pTmXL9gwsRlzhBBXdLCXhTExZVjuw8DMFMQeI6Voz5kmMsB/NI6vV+oV7YTY4iKSqUkNxj3VWP5dNA37LuVTH17aKBYaSzIJC05k3JdKeHqNIGBk9l0Oap1Lr1iIjvgJEsGty8gqKDJfMSpJeP4cvFhHuSqayLYKQm9zPpxw1rDAKOFktBLrB/7MUNm7eJWUvut+9S1RxspzEolK6NMrenXRgbYcGjSuH94LoM/nsCa88+pejmqxE51jDe7Zo7jm+3epDQ7fvcwQGkh/f5RFo4ez4pTQa9N53E2QGqzInjo7c2j+GKaTO1v+grG2gpK8vNpz8V+P2FA2xSBPctZvNmDhOb243dqP+q3iob0gGMsGDWOpSeCXo5Eav1/kNJ1gPDaLuRHERABEegNAhIG9IZakjKKwPsqYGskxXsHXw8dyte775BY2bogmGLIwW/vEuavPE5Aai0mfQGhHgfYsHwrFx6lUtm+8tUrLgr6HH9cXYbz6QdDmLHmCJ4PnxMVFshtt7NcvRdJQb0RW2Mqt3d+zagh09l98wUVzrUAFAy59zj47TxWHblHao26sKCFVM/NzOg/nOVuQeS2z0PPuMXOGYMYOk9deK/tqpI1h7u7ZzNiyHxOP8roNE1gKEvOBpLd1mlTtNn4u65h9XZ3gnObsKpD64NPsXxsX77cfJOY9qvKjgYizq9kUt/xbPaMeLlmQJ7ffuYNHcDsA34kN1goeXiYxSN+w2CXw/gnVVJfrg7DnsfoDwez6OR9EuuMWCxWGiPcWPNFXyasv05E20pYtnx/Ds0dzuCZ+/FLrMduyOPh4QWM/rgvk9deICinHpPNSMXz86yZ8Akfjl7JmQfp1LfUknb3IAtGDWXa5mtElbaOIlCMBTw6tppFSw9wJ76qYz74K3Vkp+zpSZaPHsjUrTeIKa2jtqaW0tg7HJ43nI9/M4zZG07jGxJH4oswHlw5xcnzdwjPacBsqyTkzAom9J3IlmsRtFO17t5Kls9+5g4ewsJjD8ho87bl+HNk3nAGzzrA3aS2jpytgEdHFzFm0Ncc9EnqmCZQF8HldZMYOGYNl9VV9NUdK03EemxiWr/RrHELalszwEbR4xMsHdWf6du9iKu1Y9dkOxdVHD/kSza4h1LsPL6CsegpZ9cvZOkuT2LKDVhzA7nteQv/hDpnUKUu2vj01ArmLtjF7bj6V6Te+EGxUhfnyY7pg+j7xXouhxY7O+ZvbNfpF47aKK5t+ooBA2ez72YCVY015EXdYI/LSD7pM42dnrE0WG3kP7uDl5cv8eotKtS7KxQ/48LqOSzadp3IpBAurZnAJ32ms/N6NOX1VWQ9PMnqCZ8xaOZuvKJqMZrNWDTx3No5g8EjlnDqUV5rEGcrIfj8Ssb3a71LQI3NSEmIG+vGf0rfMUs4fj+VGoMNU3UcXttnMOBXw1m47xbJNRrq1LtHrJjA8AkrOBOYj07tsykmSp9fYsfihWy7FP7q/eo7nTeKllTfg7gMGML8g+qie2CvjODaxkl88ukUtlyJpKyhmpwnZ1k/qQ8Dp27jWkQtRpPptZEGCobCENxXjePTXw5gyvLDeD+NITEhisc3znP6zHUCU2swGsqI8tjM1M9+Tb+xC9h5zpeg8AhC/K9z6eINHsWXo7fWk3BrN7P69Wf6Zg9iypuoL3zBnQOLGf/RZ3y17iIRtSaqom+w/cuP6DtpPZfCSmioyeXZ+fVM7dOXqRsvEl5jQJMfxo2tU+j/q88YP287532eERERwgPPS1zyDCCxXKcOS6EpyRfXWQMZOfcg93NsYNdSFHqJjZOGMl69w0B268gSxVxBjOcels1dy/mgIvQOdVHP+xyfO5jh3+zlbvuaARVReG2eyMCxyzgT1HlUUGd8G/WJvri6TMZl+03S2t43nVsoFuqzgri2bw2rNuznzMXreHt5cdPLC28PN04eP8/1gGQa20IKRZNKwJE5DOg/ne03knk55kIxUZX6hMt7t3P0eigFnYJHRZPGw6MuDOg3ja2enZ7TXkR1ikCMN/uWLmDL1RedAuj2Ddr+tZYRfWML33yxgCMBxc67blhqsgi5vJvVq/dw+UkmTS/DlNeeKz+KgAiIQC8SkDCgF1WWFFUE3j8BBVN9DqFXdrL8m/ms3XMeLx8frp/ez+59Z/GNzKfBaMfRGM2lNZPo88//yvBlboSptwjs4kuxNJEX6sGu2aPo/+FH9BswnAlTFrD11F1iCxowqh/eFBMNuaFc27WMefNWsffsDe7eucbZ/Ts5cOYOkbn1GG1aqrJCuLDySwb9/JeMXrQf75giSguzCHdfx/QBv+AXA75mu1sQ2cWFZEVdZcv0wfzmX4cyb8cVwvKrKYq9zeHFE5k4bQnbT1zB29uTy6ePcOzsDZ4ll9FiaqE6NxrvHXMY/cE/8cn45Ry+/YKK+lqq0lsX8+vzzx8xaeVR/FOKKc6J5vrm6Qz7xc8YOH0zl0PyKUl5xOml4+j3YV9GfTmfjQcvceP0VhaO6UufoTPY5hFKUnoSj48u5otPfsYnE1Zw9G4KxUU5xF7fyteDf8m/9pvOFvcQCuqaqM9+xuXNsxk3eAijv5jFomUb2bdzJXO+GMmwsQvZezWE7Got+sZ85+Jrq13msWr7aTzv+HDjnCt79p1y3gO+1rmAWRcVhIIh7xEnloxlYP+xfLPyINefpVJWV0NW4Hk2TBlMnw8/YeCQUUz4cjYrd7nxML6IxqZq8mO82O0yio//8QPGLdrDtefZVGnMKIqe2rwIrmyYybBf/IIRLjvwCM+jpCiXGI+tzBn6S/6171Q2nH5MRmEhObE32D1nBB/9fCBfb7xAUHYlVRX5vLi9jyVjP+bnvx7Nkj03iM0vpijjKWdWTKL/zz5g/OID+CQUUJwXz63dcxj1y5/R/6s1nA/MoVZrRFMUze1Da1josoztxz3w8bmB2+E9HDjuSXB6NXob2HIf4bZtBUu2nMP3WThhj69zfPdujl8PJa+p/XJyV26tv3O0pOCzfzWrt10mvFxd3O8tX9Z60vyP8O3YAQwcPIaZC9ZzyO0cB9fNY9KwSSw/cJOYghrSn7ize8UCtp6+w7OwMAI9T7Bv1xGuBeXQ0FxCxOVNTO33EX0Gf8G8VXu5cPUs+1dMZvCHA5my+jyPY9JID77A+in9+MWvRrFglzcv8oooSPLBdcHnfPRPn/LlspM8yqyisbGQSK+9LJowjGHDxjNr7lI27tzB2vnTGT9wJC6bz/MkpZxmfTOlcXc5sX4B85Zs4tjl29z1cuf43n0cv/qE1Erdy5E/nRUUUxNlKQ85tXoqg372C4bP2Ih7SBYVVYVEX9/GjP4f8tmgCcxduZtzl89ycM00hnzQn69WnuFhctUbAYv6nlIQfp1ds0fS74MP6T9oJOMnzWTpllPcjcyl3tnWrWjLEnlwfAVTBn3MRx/1Y/jnk5izch/XnqZSqVXr1kZjlnrXiYkM7jOQMVPns27vWc65bmPpF0P5YsEePMOyKcmL4eaOGQz+4COGjJvN6l1nuHLOlU0zBvFJ3y9YedKfpJI6KpIecFpde+Kj3/Bp3yGMnTibVXuv8DSlAq3FQGNZMg+Or2V6n5/x68Ez2Oz+nLzKZqwt5STdP8PWhS4sWevK5Vu+3Lp0goN7D3HpQQLlWivGxiIir25lzqCf85shM9l6NYrswkLS7x9n5bgP+fmHY1h08B6pFV3f9tXWlMmjk2tYstyVBy/XDrHTXBDO9S1TGfrBL/n1rz+h32d9Oh6f9GPC/D14xzdiV0xoKrOJ8j7K+sl9+Zd/6c+Ulce5H5tFeaMJHBoyHp1g0eA+fLnqAuHVDuf7uqYqm6ibx1g/pS8//5f+TF5xrOM5bY2kdYrAHlbM38TV2E4jPTo3IrW2ahLwc13OvJVneF6hTuFR0OWHcmnFCP7tnz5h8lo3Il5duOS1PciPIiACItA7BCQM6B31JKUUgfdYwI6xsYL85Fginz8nPDKW+IQUsoqqaTa23h5QsWooy4jhmdtWNh7y5nlm12GAimQ3NlGRl0T4Yz987vjxJDSenIomDJ0XqLIbaarIJzU2krDn4UTFvCAxJZPiag1GmzolwYqxuZrC9ETiIiOJT82lrF6HXquhtiiDpNhIImOSWsuo06KpKyYrKY7oyDhSsoupbTFi0jVQkZdKXHgowUGhRES/IDktk8LKBnQW9TKnFWNLHSVZycRHRRAdn0ZeWT0GswmjppL81HhiIqJJSM+nskmHrlndNom4yAhikzIprtViMjRTlR1H0H1/AgLDiM8qpboyn6SwAPz8n/Eit5omTSNV+WkkREcQE59OfkUjOl0z9SVZJLefR3ENWpMNxaqnoSybF8EP8L3lw4PASFJSE4mPiyMuIZPCigZ0ZrVO7Jg0VRSmxhGl1llEDC/UOiusoslg/c5OqmLWUJoazpMHDwmOTKW4tgWTXcGqq6M0O54wtd7UYz+NIrWwmhaTHcVuQltXQnbyC6LDo4hPyaa4phmDRU13bJhaainOVG0ieZGSTUltCzpdC3UlWaTERREZk0hmQRUarZbm+lJyUl4QHRFLcmYRNc0GjAYt9aU5pMZHExn1gtTsUuq1OrQatQ0kEBsRRUJqLuUNWnQtDZTlpPBCrYfEDIqqWzCpbcZuorm6iPQXUYQ/DyMyOo6E5AynmXpLQ/VL0dZQkp1KYko6GWmpJCcmkpyeT3m9js7Ns7sXu7pIn++F07h7R1P98jJpd1urv3dg0lSQHRPIvdu38Q+MIqOkmPzsNJJik8gqqKRBa6K5tpSc1ARS09JJT00mOSGJjLxy6nUWFMWKrraApJCH3Pd/RGhsOkUVFRSlR/PE7x5PojKpqNegqSkiMzGmw69Fi7axnDyndTSJ6QVUaYxYHTaMTZXkJTzn8d3b3PF7THhiCsmJCbyIfkFGfhl1LUacpOYWaoszSIgOI+x5BDGx8aRktHo5X0JdnbrdjL6pgvz0RGIjIolLyqSoWoPBbEZXV0hy6CPu33tESEw6heUVFGfG8tTPj8cR6ZQ3mTqmeLzct4LN0Oh8T4l8ep+7t+7g/zicpLwKmgyt70/OTe1mtLXFZMQEEeB7B1//p0SnFlHbYn65T4dZDQBfEHzvDnfuPiYytYjigjwyEmNJzMinor4Fo0lHfVEK4Y/8uf8wiNi0AsorismOe8o9v4dEpJXRpAakZi11JRnEBgfgd6f19ZJWWEOLc/69HbNeLXM6idERRMUmkVlUQ4tBDSUcWLR1lGYmEBP2nLDwaOLik8nIK6VOa3GW1W7WOV87ybERRMclk1VcR7NWi6Yyn/T4KCKj40l1nn83r3WHkaoEX07t3MFx3wxa12lVsGhrKc54QVR4OBFvPKJIzCii1jnqy47F0ExtSS4ZCdFERsaSmJ5HeV0zerP6mreirc7hRXAgYQmFNDhvPWvHYlSfk9fxnLTOz2mrUKuG8sxYwsOTKWnpboqAifKY25zYvIFj97NpX0fTbmigNC2cWydcOXToAsFV3Tz/ZduRb0RABETgxy8gYcCPv46khCLwhyGg2LGajBj0Rsw2+5sdSnMV8b7Xuf04jqL2T2fdyijYrSaMRhNWm+PNfbU9T7FbMRvVBerMWO2/nw926j3sTQa98xi238cxFDsWdZi2td3Mgc1iwmS2YPudTknBYTVjNBgwWew4HHbsdjuOri5DKw6s5rY6s9pfdnq6rZbO7mZLF+ZqvZmd9dZxPm/b24/r74rD5mxTeoPJeX6vsL1c98LudDOa1Hb3yhbfcTI26hN8uXrxOg+TG7q8Kt7dk51t0GjEZLHhUP9TF7TrVKEv50E7Xw9GTGYrrxZLrRcLZrP1ZZtSbFbMJnVhx+5fX92VR/296mRxLg5pxuZwONuY/dWDtj1d4eVryKS26Xf16uro7efR8dpwvge803m0lcNoxGxpf629eYzW8+rKsG1bR+v7nNF5LqqDA7u6uGnnXSl2bBYzZmd9qX9ofV2Y3mgvncr08vXfeUff8b3iwG4xYdCrr3Nb16/v73j62/5kN1ST9uQ6F87fILSw851i3vbMd/y7amS1Ot/f3/EZrZu12b7Zxtv3omAoTyTg4hGOXg4ir0ldi6Xjy6YpJPbhLbzuvaD6u9b87HiKfCcCIiACP2oBCQN+1NUjhRMBEXAKKCYqUyMIDY0lu0pLt1cFhUsE3icBh5bSpCD8rntyy/sKJ/bu5/SNUAq1v1PK8z7JyLn86AUcmDVlpD1/wN37oaSV6985LPzhTs2BsTaX6PueXPd9Tmb1q1NRFGsTpRnRBAXFkldn6HQnlR+uxHJkERABEfi+AhIGfF9Beb4IiMC/g4AdQ1M9jc16nKNE/x2OKIcQgR9cwFJEsNsGpg/4jAGDxzF/22WCsxuxdL5U+YMXUgogAt0J2DFr6ynJyCArrxzNj33BPYeOurJskuOzKK/XO6eqvHJmDgv6liYaGt9tWs8rz5UfREAEROBHKiBhwI+0YqRYIiACIiACf+ACDj1V6c/xcT/LBQ9/ojIraZEk4A+8UfS201ewqeugGIw//hBLsWE2GdC/Zd2T3lYDUl4REAER+C4BCQO+S0f+JgIiIAIiIAI/oIDDakLX3IRGa/wt1hf4AQsshxYBERABERABEeg1AhIG9JqqkoKKgAiIgAiIgAiIgAiIgAiIgAiIQM8ISBjQM46yFxEQAREQAREQAREQAREQAREQARHoNQISBvSaqpKCioAIiIAIiIAIiIAIiIAIiIAIiEDPCEgY0DOOshcREAEREAEREAEREAEREAEREAER6DUCEgb0mqqSgoqACIiACIiACIiACIiACIiACIhAzwhIGNAzjrIXERABERABERABERABERABERABEeg1AhIG9JqqkoKKgAiIgAiIgAiIgAiIgAiIgAiIQM8ISBjQM46yFxEQAREQAREQAREQAREQAREQARHoNQISBvSaqpKCioAIiIAIiIAIiIAIiIAIiIAIiEDPCEgY0DOOshcREAEREAEREAEREAEREAEREAER6DUCEgb0mqqSgoqACIiACIiACIiACIiACIiACIhAzwhIGNAzjrIXERABERABERABERABERABERABEeg1AhIG9JqqkoKKgAiIgAiIgAiIgAiIgAiIgAiIQM8ISBjQM46yFxEQAREQAREQAREQAREQAREQARHoNQISBvSaqpKCioAIiIAIiIAIiIAIiIAIiIAIiEDPCEgY0DOOshcREAEREAEREAEREAEREAEREAER6DUCEgb0mqqSgoqACIiACIiACIiACIiACIiACIhAzwhIGNAzjrIXERABERABERABERABERABERABEeg1AhIG9JqqkoKKgAiIgAiIgAiIgAiIgAiIgAiIQM8ISBjQM46yFxEQAREQAREQAREQAREQAREQARHoNQISBvSaqpKCioAIiIAIiIAIiIAIiIAIiIAIiEDPCEgY0DOOshcREAEREAEREAEREAEREAEREAER6DUCEgb0mqqSgoqACIiACIiACIiACIiACIiACIhAzwhIGNAzjrIXERABERABERABERABERABERABEeg1AhIG9JqqkoKKgAiIgAiIgAiIgAiIgAiIgAiIQM8ISBjQM46yFxEQAREQAREQAREQAREQAREQARHoNQISBvSaqpKCioAIiIAIiIAIiIAIiIAIiIAIiEDPCEgY0DOOshcREAEREAEREAEREAEREAEREAER6DUCEgb0mqqSgoqACIiACIiACIiACIiACIiACIhAzwhIGNAzjrIXERABERABERABERABERABERABEeg1AhIG9JqqkoKKgAiIgAiIgAiIgAiIgAiIgAiIQM8ISBjQM46yFxEQAREQAREQAREQAREQAREQARHoNQISBvSaqpKCioAIiIAIiIAIiIAIiIAIiIAIiEDPCEgY0DOOshcREAEREAEREAEREAEREAEREAER6DUCEgb0mqqSgoqACIiACIiACIiACIiACIiACIhAzwhIGNAzjrIXERABERABERABERABERABERABEeg1AhIG9JqqkoKKgAiIgAiIgAiIgAiIgAiIgAiIQM8ISBjQM46yFxEQAREQAREQAREQAREQAREQARHoNQISBvSaqpKCioAIiIAIiIAIiIAIiIAIiIAIiEDPCEgY0DOOshcREAEREAEREAEREAEREAEREAER6DUCEgb0mqqSgoqACIiACIiACIiACIiACIiACIhAzwhIGNAzjrIXERABERABERABERABERABERABEeg1AhIG9JqqkoKKgAiIgAiIgAiIgAiIgAiIgAiIQM8ISBjQM46yFxEQAREQAREQAREQAREQAREQARHoNQISBvSaqpKCioAIiIAIiIAIiIAIiIAIiIAIiEDPCEgY0DOOshcREAEREAEREAEREAEREAEREAER6DUCEgb0mqqSgoqACIiACIiACIiACIiACIiACIhAzwj0+jDArihYHfIQA2kD0gakDUgbkDYgbUDagLQBaQPSBqQNSBv4YduAzaH0TE/932EvvT4MuFhsZnKcntHR8hADaQPSBqQNSBuQNiBtQNqAtAFpA9IGpA1IG/jh2sD6DCOWXhII9PowYG26kf8a0MxP/DTyEANpA9IGpA1IG5A2IG1A2oC0AWkD0gakDUgb+MHawMBwHSZ77xgd0OvDgEytnSc1Vh5Uy0MMpA1IG5A2IG1A2oC0AWkD0gakDUgbkDYgbeCHawPRjTZ6ycAAen0Y8O8wlUIOIQIiIAIiIAIiIAIiIAIiIAIiIALvlYCEAe9VdcrJiIAIiIAIiIAIiIAIiIAIiIAIiMDbBSQMeLuRbCECIiACIiACIiACIiACIiACIiAC75WAhAHvVXXKyYiACIiACIiACIiACIiACIiACIjA2wUkDHi7kWwhAiIgAiIgAiIgAiIgAiIgAiIgAu+VgIQB71V1ysmIgAiIgAiIgAiIgAiIgAiIgAiIwNsFJAx4u5FsIQIiIAIiIAIiIAIiIAIiIAIiIALvlYCEAe9VdcrJiIAIiIAIiIAIiIAIiIAIiIAIiMDbBSQMeLuRbCECIiACIiACIiACRUdKCAAAIABJREFUIiACIiACIiAC75WAhAHvVXXKyYiACIiACIiACIiACIiACIiACIjA2wUkDHi7kWwhAiIgAiIgAiIgAiIgAiIgAiIgAu+VgIQB71V1ysmIgAiIgAiIgAiIgAiIgAiIgAiIwNsFJAx4u5FsIQIiIAIiIAIiIAIiIAIiIAIiIALvlYCEAe9VdcrJiIAIiIAIiIAIiIAIiIAIiIAIiMDbBSQMeLuRbCECIiACIiACIiACIiACIiACIiAC75WAhAHvVXXKyYiACIiACIiACIiACIiACIiACIjA2wUkDHi7kWwhAiIgAiIgAiIgAiIgAiIgAiIgAu+VgIQB71V1ysmIgAiIgAiIgAiIgAiIgAiIgAiIwNsFJAx4u9EfxBaKoqCzKZgVUP4gzlhOUgREQAREQAREQAREQAREQAT+cAV6fxigOMiss3C92Mz5oi4eJVaSTAqm17Zzq7RRZlFe6fg6bA5Sai2cyTayJFHPlDg90xIMLEk3cb7SRq5Zwdq5p6w4SKmxcK3t2Jer7VS8sk+Fao2VO6Wt5bqglsXYWpasOguer5X5QrGFGxVWgprs1NgUHN21S8VBUo0Fj/bnF1sI1TrQdvuE7nYEdVor1/OMLIjXMzZaz4Q4PQvSTFyssVNhfdVH3Yteb+NBueVV62IrEXoFXWeb7g/Z9heFwkYrN0terTO3Chv5ZgV7F89Xjx3QxbHDdQra3+rYXexcfvWDCFQZHeS02OUhBtIGpA1IG5A2IG2gB9pAtfF3+DD4g3wCkIOKgAj8GATegzDAjmeajk+ftfA3T958/G2QnjONDpocr27397Em1E5k61umQlOLlZNpeoaGtPC/HjbzF/4a/sRPw0/uafiLB838zTMto5JM+DU7OjqeDjsXknR89LT1uP8abyby5T7V6lVIKDIwKrT1738bbOBMQ2tZvNP19Hm9zIEt/MMzLb96rmNiion7zY4uO9gWrYU1MTr+ObDtfANbmJhjJd30Zuf9uxpZeb2ZdXE6/vlRM39+T8Mf3dXwx34a/vxBM/87WMf8fCsZ5naj1vOprjYxLULL33a2DtSxrcpO5W/z/x/FwcMcPUOCX62zv4s2ck/jwPJGwRVqakzMinzz2Fsq7ZT/Nsd+Y9/yix9KIF9rJ7rOKg8xkDYgbUDagLQBaQM90AbU/6/KlwiIgAi8q0DvDwMcds7Ga/kf/hr+w903H3/8QMfeOjsNDjvn4rX8z7btfvrcSGCLAzsKOq2FPfE6/i5Aw0+72Ef7fn96v4VfxbYGAnr1SrTDhmuMlv92r/W4/znSSFCLo9MVfYXIXD2/eNTa0f4jtSy1duoddtwStfx/3ZRZPd6fPmjh0yQzkXoF2yu1qZBcZOCzJ838Saey/pcwI7c1DkyvbNv9D3ajFdd4LX99X8Mft+3n/7in4Sd+rWVVy/BXgTo2V9ipfPn/FYXycgODnjY7g4N2l//gp2VJuY3S36ZDrji4nabjHx+8Wmc/CTbg2ejA/EbRFSoqjAx71vyyvM7j+2lZWGqj6Lc59hv7ll/8UAISBkgQImGQtAFpA9IGpA30XBuQMOCH+kQjxxWB3inw3oUBf/JQy7AYPbMSWh9fJ5t4oHWg6y4MsNu5la7nXwI0LzvX/zVIx5wME+eLLRzPMjAyqJn/1Nbh/+P7LXyRayVHnQ7QQ2HAnzzRMT/NyJYUPR89aebP/Fo7yH/6SM/BWjsNnYbAKzYbx168GX788X0ty8tslL3suH93gywoNzDsafPL8OMfoo0cLrZwNl3Pvz1qDxqa+bckM9GG9hEHCk2NFg5mGJgU1sL/e78tOPgdw4AX5SY2JOvp96yZv2zz/a4woKnJwiH12OEt/PcHHceWMOC76/rH/FcJA3ruA6B8mBZLaQPSBqQNSBuQMODH/KlHyiYCPz6B9y4M+GmIgavVNnJ0dvLUh95Box3s3YQBjQ1mpoS08B/bOuD/6ZmenWU2sk0KBrtCi9lBQoWRMUEt/M2z1uH7ng125z57amTAT0ON3GmwU2Ww45Gq4+8D2q6W+2lZUGqjpFMHv7HexKTgZv6s7Wr+n91rnc6gXiX/RZKFmJcd9+9qbA4eZuqcAUjr1fUWZhXZyLMp6HQWvg1r4a/a9v+X4SYCmtURFK1fNptChcFOUJ6eD590dMh/65EBgM7soERn5XC8lv91v/Wcuw8DoP3Ywfl6PnnSNjpBRgZ8V0X/6P8mYYB8cJXOi7QBaQPSBqQN9FwbkDDgR//RRwooAj8qgfcuDPhJkIHjpVbCGmxENNiI0zpoUXuyXYYBdqLz9PyybRj/f/BrYXiWleRX5smrswEcxNRYeVpnI8OgoHO0rbjfQyMD1CkLrR1uhadZOv7lYUcYsKTMRml7T1xx8DhTx88ftnbCf/pEx+jnLfzPtqH2//GZgXP1Dlo6jSTourUp5DdY8Coxc65IfViI0iuoUx8cJiurIlr4v9rCgL94LQxo3Z9CfqmB/oEdHfLfJQxw7kuxcz1Fx9+9QxjQfuyCstapCs7pDRIGdF3FveS3Egb03AdA+TAtltIGpA1IG5A2IGFAL/kAJMUUgR+JwHsXBvzR/Wb+IUjLB6Gtj5HpFlKM6pD+LtYMaLZxI0XL/27rTKvrC+yotlPrnH+uUNHUutr92UIzJwvMnC40o36vPnwbHTRYembNgJ881bMm28SJbANjgpv5z21D5v8sUM+ZejvNbZ17u8HK2sgW/u+2UQx/G2fieq6BIU+b+cldDX90r4Up+TbyrG9vXTaHgtGuoLe1PixttxQsqzLyeVAzf9oWBvxjgtl5p4BX8wUJA94uLFu8i4CEAfLBVTov0gakDUgbkDbQc21AwoB3+fQh24iACLQLvHdhwMtF7do6s/8t1kSEusJ/V2GAxsb5BC1/3baQ35881nO2of3KukJUvp5h6t0Fnrz5mJxnI8f4ahjwl5FGnr22gGDEOywg+Ef+zfyPxy389cNm/s+2Vf1/8qCFSdkWUk0dt9nLLzMwuK3jr45imFxgJavF4gwI/ktbQPDXMSYCta8vOthe3d/9r8Vo5Yi6yGLbVfo/CdCyvMRG6asrGDrvkiAjA77bUv76bgISBvTcB0D5MC2W0gakDUgbkDYgYcC7ff6QrURABFoF3rswQF3gzzkyIETLByFaRrxlZMDVZC1/09b5/eMAHQfUOw84L4MrBGfp+Fn7kP22cKF1jn0zY/Ks5JhsHIrV8t/bruT/eXjrHQo6FrZXCM1uHdavPk/d/75u7iag3hlAvbWfut3/E6JnXaGVBIOCqf2SvN3OpSQtf9s+nP6JnkPVNsrNdm6k6Pj7ttENP3mkY3e1nbqOQrxTW7db7Hhl6vnVo9ZFBVXHUWkWogxKl7f5kzDgnVhlo7cISBggH1yl8yJtQNqAtAFpAz3XBiQMeMsHD/mzCIjAKwLvXRigrhlwosxKZKON6EYbCToHzluudjUyoMVOWK6ef22bg69ebZ+YbyPXOcxeobTBwqUCEwdyjHwR0sJftXX6/+ielm/L7ZRb7ZzstLL/T4MN/z977wEd13Wd+8dxHNvJS/Je8o//dpIX27KdOJaTvHQ7/SV2XGLHJcUlbuqSZatRkkWJYkPvvXeADSRB9N57L0TvZVAGfXqfub+37gADDhrFAkogeLgWODN37jl3n+98wNr7O/vsw0X34/0kB1flgoCbgfq7C/UELTtY33G04C+UavmbMjUf2MxQ+JUyPb6L8hGEN+ZKv27mh9UafnkzA0AO1v+kVseXmnR8tuxGNf6fy1bzTwMWekyuEwBu9LHfO8lqJ3dEz99tnmTw7nwNX+k1U651YHCJEdsai20C2+AQH+4YASEGHJwDKJxpgaXggOCA4IDggBAD7tglEQ0FAg8kAkdODHhPjYlyzY3U+q1Z3VMMcLC8bOLrFWreu7kq/4FqAxFLdtYdYLNLqK0OJldMfL9aw69sBuLvLzeQsiYfV+jgYo+Wj2/VHNDy5KSNKatcYFBCq7XwYr2GX98UEd5TbiB9zYFhhxggnyYQMmjgy+UbpwS8K0fNH7eaKNQ6NjMDJBpH9fx5kQpn0Tz3LIU93v+vGiOZKgfGrcHv/0ayOygbM/D5MjXvz1bxngIt/9FvoVon7SMEyH3dmhhgNtnJmzLx5oCJqAUbCuseAoUoILj/5DwA3wgxQDiuIngRHBAcEBwQHDg4Dggx4AFwnsQQBQIHiMADLwbYrHbkrQIPbZ5bLwfiv1+r57khM0kzFhLHTTzdpOV38zcCcblI3+c2V94dSAxMGZzH3LmC9A+V6/iPTiPHeg18v1HLh/PVyFsA5PT/j7WZqdVJzmMO4zu1fHgzE0A+TSB3xcbFPh2fLNjYLiDXDPhyv4VOk4TNbMOzRcsHN0WFXyzQ8OlKLZ+tufHzZ3J2wGZ/78rT8tMZG4pde/13MMfhoGFKPjZRvZVx8O5cNZ+q0fHVFj3/2arnW91mCjUODNua3oIYIDmoHDPwhQoNv1mg5qPVesKcWyS2dQRCDNgByIP1UYgBB+cACmdaYCk4IDggOCA4IMSAB8uPEqMVCNwtAg+8GCCf2reitnCyWcuHcjf37Wer+F9FGj5WpuFjxRsp+HKw/65sNX/UauKqyuE8hk8G32Sw4tGy0dZZT+Caivfmqvmf+Srel32jDsD7inW8qrAxJwfokp2dYoB8tOCqxsJL8mkBrhoExTp+PGWjUWHkK24V/v+0w8Q5pY2WtRs/tdNG/qX8RobDH3aaaTJIuO002M4VSaJTYeS/qjT86ubzXPbLtQtcP79Qoids2Y5q23aBWxADHDYi2rX8rqvvHA0/mrIxtlOgEGLA9nl5wD4JMUA4riJ4ERwQHBAcEBw4OA4IMeABc6TEcAUCd4mAEANkACUJxbqV8F49nynZSJd3Bcau118u0vKv3Say1h2o3SNsSWJmzYpfl44/LlLz3s2tBK52cqbBx2r0HJ+yMWCW2ChHsLcYYJcc9MwY+KfSG0f7/f+Ver5Vq+HDblsRXp21My+rGG7/HBYbvq1aPrQZfP9SmYGoFcfWsYRutzrT/AfmTfyg9kYdBJe9O1/lExZC70QMkByUDOv508KNjIpfKtXjvVdhQyEGbJ+aB+yTEAMOzgEUzrTAUnBAcEBwQHBAiAEPmCMlhisQuEsE7n8xAImJdRtFC1auzlu5tmJnySbvat/5b/d9i869/Zv3SRJrBjvNSxYSRo283GPgkQ49j3UbeG3YTMaCjW6DhMFdCHA9QpJY1NmpU1pImTDhMSBvEzByathEvMJC2ZqdOav7cX8Skyo3m5ftKDf301stdlqWrWTPb4wna8FGudJK3ub4spQ2Rs17V/ifUtkodrtvyCRhdtm443VFZ6dy0UrW5nNk7Pb6yVq0MeYSMdz60Bns1G61t9Ft3F1jQGWwcW1yo2ZAxNyG3TsTA+SZmlHbKHHN37KdGctNMhoAvdFO3ZLLdhtdBmkrU8PNRPH2PkBAiAHCcRXBi+CA4IDggODAwXFAiAH3gfMjTBQIHCIEjoAYcPBoGqwO5gwOxnR2xvUOFiwS5t3qwu4HS2CySayYHMwaHSxZJIwO9k/V393DkbtitEosmiQ0dtiRzHDkxioGdPsICDHg4BxA4UwLLAUHBAcEBwQHhBhw+76IaCEQeJAREGLAgzz7YuwCgXcYASEGCMdVBC+CA4IDggOCAwfHASEGvMOOjXi8QOA+Q0CIAffZhAlzBQJHCQEhBhycAyicaYGl4IDggOCA4IAQA46SlyTGIhC49wgIMeDeYyyeIBAQCOyDgBADhOMqghfBAcEBwQHBgYPjgBAD9nE4xGWBgEBgTwSEGLAnLOKiQEAg8HYgIMSAg3MAhTMtsBQcEBwQHBAcEGLA2+G9iGcIBI4OAkIMODpzKUZyNwjY7djtDhy3Uijybp4j2m5DQIgBwnEVwYvggOCA4IDgwMFxQIgB29wM8UEgIBB4CwSEGPAWAImvjzgCkp6pxmukRoYQcameMaVuj2Mp3xkMJM0AVZWtjMys391JDPZ5OsrrmB2bf2cGcpOnCjHg4BxA4UwLLAUHBAcEBwQHhBhwE6dDfCUQEAjsQuAIiAEO1ocbKMnO5NKly1zLKaGxfw6VyXZogrpdqIsLhwYBx0oTKYEhJF7Io6Ffwbrecmh441CWERF+nurOWax3g5i1j8sh8VxvGLibXu5JWyEGCMdVBC+CA4IDggOCAwfHASEG3BN3RXQqEDiyCBwBMcDG6OWznPaLIiEtk0vpicT4n8UntpDrC5q7C6KO7LQf9MAk9KM15GRX0TurwnLYUu2tSjqLsylrGWVZ79g2eNvQRbz9M6joUR46rjgWiggOTKOiXXF3tlmvc8E/iq7avm1jPwwfhBhwcA6gcKYFloIDggOCA4IDQgw4DN6NsEEgcP8gcATEACsDSS9yMrGS9pFFFuemGOkqI+HkzwjL62Veuz34u3+m5k4slTCtzqFc02Oy3Un7/ds4VnspvZJL7cACmh19S9oh8gNP4BEjCzD6u0tp39+EO/zGwnxDEl6ngshsGEe1Q6mw9qThHX6V+sE1nExxrDFYcY286j7mVHe1Hn+H9t5oJsSAe+XUWaju7SbhWgE+USkE5XWRO2nC6UQvqshvvs7lzjkqFiwb15Z32mGhbmSMy20TFE7oadz1/c77xWcRoAgOCA4IDggOvD0cEGLADT9KvBMICATeGoEjIga8hMf5DsZXNwN/ycxs9lleDS2id1Z/aNK+33o67vYOO4rSRC5UDDKjPtjlecdiCxcjEslrn2LdPUaWTEwUR+MXlERx9xy6HcH23Y7obtvbl5o5HxRATGYDY2umXUKFtTsVn4gs6odcYsAS7ZdjSMluYWLVfaB3a8nttxdiwL1xnBrHrxMWlYhHaj7+YUG8Hl/MhX49TYsrXC0sJOxKLRf71qhd3P/5DTNL5FSVE5HfxtVBjRAEhCCyj3C0P4dEYCSwERwQHLgXHBBiwO37W6KFQOBBRuBoigGAsS6Yl3yz6JxUb6z4PhCzbGPk/CkCL7UyvHKwGRGSeZ25yRkWVUas23QGG+vjvQxMLKA22Q8dyg79LEO9oyiW9Tvs3jDVJQY0uMQAyYJqfhqFch3DOyxsCDHgXjiKBvJyonjuZCzBFVMU941yrWeOcoWWwvKrnI04T0T1DJX7ZgW4bLJQNzZCbGIyPpebyXZlFoigWATFggOCA4IDggPvIAeEGHDoXFFhkEDgUCNwNMUAh5ru+Jf5WVQ5A0rTjcwAq4b5wTZqS4spq2phaF6NyeaKbB3oFqZZWNNj1KtZGO2ioaKMqoYeplYMWLdiawc65QwLS3MMdzRS2zTAvHpzxdkm99/u7L9U7n9O5db/Bg+sa1Ncb6qhvm3I2W7r8fLXNg0LQ+3UlhUjtx/c0V7SKVEsrKJeUTDUVkN5SSUtg/NoNoNwue/KkJ/yakAGOU2TrOotTiHEqpljqL2WirJqmnsmWNSaMKwqmFvWbg94JRPrsyN011dRWVVPx7Dc92YhRkmLUrHAunb76rpk1qAc7aGpqpLaxk5G5neMWdKzqFCyplpmdridurJSKpsGmVNt72ff3xLJzPp0P63VZZSW1dI5qkRrVKGcVW7aIqFfnGVhcZaR7mbqm/pQrBk2MgDk+R6W8SyjurGbcaUao2GNubllNDI2xhUmCv059nowKVnVdA3PodJb0C3OolzVYHTbDiGZVcyPddNYVUl1XRvDcyqM21QRK9r5EbrqKqioaqRndAG10cDa3BzLat1b11GwalkY6aS+opzqxi7GFtToFYW7awbYdCzK3CwvobSyif7pVQzb7NgDyT1rBljRLozSvWlv98g8KoOe9fl5llVazK5fiz26O8hLstNyL1ZG9u1zbpjQM8d40jOTlF791rMbxzrwPePJy3EVXJ7Y3DLgdOYs1M/rqVOaNlb/l4zUL5hoWJS3EBgoKkrlpdPRBJROUr3kEgr2f21csiD/7GvfO+hACpv2nzeBjcBGcEBw4H7hgBADDtJLEX0JBI4+AkdEDHiB18OzKW/qo7e9htxEH159/gwZdeOsmTaieNtqP0WJIQQFRZCQkkZqfDh+Z/1JKx9kSSdHfTYGs6LJyEwjMjSS2IQ0Lp5PJzk6GB+vSK61TLHu7Eu+L5zgwAAik9K5WNzJ7LoRy2o/xUmhBAWFE+/sPwJ/uf+yfhad/YNjpZ0r0WFExSaSEhtIYEIp/XNa5Kfb1wYoSQ4leEf71LJ+lNqNqNQ2lE18XCyhQdGkpGRwMSORME8/MqqGWNTbMUw2c/6N7/Poi2cJOt/AxMo6yp4CkoL9CQyJITUtnbSECIJDUrgU40l0Xg8Tqxur+ZJ2kvpLMYQFRxCXfI6L59NICPEnJL2SQaUOu7WfawlXaRiYQ+8MFCWMs63kJEYQERlHWkYGGcmxhPn4E3etmck100ZGhm2E/IQE4oKCiE1O4dz5DJJDvQlIrWBgQXfTrA3JMEvbtUQigsOISUwlLTGOSH9/YjKTCQm8SEPfHAbJxkh+HOGBfoQnpnGhqJ2pZT3mlV4nnoH+IcQmpZGRmkhUUCgp52Pwicqjc2QZq2qUxuTXeOSJY5z2jeZ8UTszK2oG81LJqulhxllvQkI31UhWfCihYTGkZFzgQloC4f4hpJf2Mi8XULCt0l+SRoS/P6FRSaSnpZEcGUx40nlivaPJaxtiyU1Y2PlnxbbST1laOIF+wcQ4bU0iOiiM5GRvnn8zcauAoF01QvWFaEIDQ4lNSiU1IYpAzwCS8juYUVl2dnvj804xwLbKYFkGUQH+hLjbm5hBjE8Muc39KG9i742O7/7d2ycGGChuaSE69gzf+rdv8vWnz/L6uUau9K9Sv2ymsCCaR585ycmrvVRsBvWNCiU5daWc9TjLy9GlXGgfIf1CHC+cTSKpZsoZ0DcMVfLGS8d4NqSUyxPmtwzyC/tnKRtfecv77henU9gpAiTBAcEBwYHDxQEhBty9byJ6EAg8SAgcDTEg8Tme+fHT/OhHxzjrF0xUfAY51X3Ma8wbK8T2JZrTwohIvEx5az8TMwpmJoboLEsn4HQ4+d2zaGwW2iJ/wiOPPItnciFNfeMoZhVMjfbSmB2Jx9lkqoeXMDistEc9y3On4ylsHUWxpMFsUtKcEU5k4mXKWvoZn97svzyDgDPh5HUpUNscrNaE4RF8kdLWESZH2mlsG2FRZcJhX6b1XASRCZm72geeCSe3Yxq1Daztsbz42POcTSyifWSaudlJeq8FcDIsj55pNVbdIk1RxzgRmU1l7yKqqWqSfX2ISMun8foo0zMzTI700pQXz9lHvsKTgeX0LdhAWqM7M5yg8FRya7oYnlQwp5hitKeOguJGRpVqrOYWYk9FUdA6gUYCSd1PbkwYsak51HYOMenEdJjehnwSfX1ILL7OnNYOli6SXnqSF9+U8RpmenaWqd5cQk6Gk9M+gXor42LHr52kZ6QwjtDwJK5VdTI8OYNiapzB9nIuBL/Ad75zgouN42glK91Jr3DsVATXGoaZWZJX/+doSgvEPzSZ3LoeRqdmmJkcpa+pgOSzT/CNx/0p7pzDatGirAzj+NlYrpR1MTG3it5soCPRi+jsekZUDqT162RHBxORlEV1xyCTM7PMTo3SW19IacMQ8+smFpvPEeoXSkp2LT0jU8xMTzLa10xRkidPf/1x/PPamd2v/IBtkZZzIQSEJJJd271la39TISmeT/Hvj/uS16bAal+l62oCsXHnKGy4vnnfMN1Vlwj3Dudq/Qir++kB28QAO0utF51iRlJWDd1Oe6cY62umOMWbH3/jcXyzmpnZz94d03S3H98+McBM1cgUqdHH+cZ/PsmPw3JJbZ2mdEpH49IyyaHH+PcnPPAtnabetTq/oKF8cIDgMz/mm0+9yasRucRfKySyoJ3cgdWNgH6+D//Xn+I7xxKIbb957YCKyTVicyq40jooxAAXxuJVcEFwQHBAcOBAOSDEgLv1TER7gcCDhcDREAOSXuC1gBjOPP0Ex4IuUz+4iN5i39oeYJsuJNQ/jcqeeQxuefmSeYWuNE8CMlsZXzPQFvkkTx5PoGp4HbNbkGrXTVDg/zrheT0otGbao1/kbFo9Iysby6f2mSLCA9Mo755D796/ZYXudC8CL7UwtmpB0xLLyVORXGmYQm22YrPZkSQJuX1kUCplnbO722d4E3SpiZEVK5b2WJ5/MYicDgXGTfvsc9l4e6XT0CcfjedeM8DKVJ4fHqFXaRxTu21zALtuknyPRzgWviEG2OfKiPAK43LNMGvuA8eKXqPFZLHi2CYG2FmoisY/PJOaoTW2lQqw65mtjccz8BI9I6sbYsCLxwi+0sK061g/+zwFvr6kV/cwv08AK6nbSPUN4lzpdRaNbrUIJBNLzXEce8aLSw0uMeB1vJPKuK7ciGDt00WEeoaRWTPMumXbRDJd6MfTL4VS1DnnzMjYVTMAi5sYYGWhKoaAsPNU9K9sH6dNj1ZrxGKcpjTMh4gLVQytmd0yHezop4sJevplQnP3FwPsM6VE+oRxsXJwO/Z2PTMlQTx7LJi8VgWmuSqSI1PJa5hA47YtQLKs0X8llPBzlfTMm7c4v+3PmLsYYFdQHulHxLlyBlZ32DtTRuizrxByJMUAK01LajKij/OtpzzwLZmmzuWAKscJPfUEX3vMl6CqBRpc152vJvKvBPHf3/kRP/LN48KAijp5i4BrS4BygvCzT/HNJ/0Jrl68aSHBi/U9vHjaj4SC2gN1/MSq3OFalRPzIeZDcEBw4J3kgBADtnlA4oNAQCDwFggcETHgJTzONdNSdZngU8Fktc6g3ir+JqGtDeQnzxzj9ZM+BAcGuf0E4vvi93nEI5sexTqt0a/gm9nM+Lpb8OkE0I4yz5NTCdX0L2hpizlOcHYHk85lbQltXTDP/fgYx0/6ELRH/496ZNE1o8OmmaApL40oP098AuOd5dMoAAAgAElEQVTJbppg1WBFWxfCC88e47U392j/0g949OxVOqa1mNvjeM3nEk0jm5XvZdtU5QSdTqCqexaTnDLvKiC4rKYu9CSR+a1M7jpe0cb01VN4p9bTv2BF1xjJ2Yg8Wsc1bsHsDuZY3DIDHHpaYs4SmdvMuGYnVnLWQCMxZyLp6xjfEANe9+Ni7SArW7eqqQ7xIqm0nRnj3pvTbcMX8Qm4QE3fEjsXqSVNHVFnoslv3hQDks8SdqWeoTU58JfQNkTjFZlD4+ju8dhmcvDxTaW66xbEgHUtrfHeRF+rZVi1Zfw2YCRtE/FekWTXjuzOcrApyPfxJ7WsY5/MAAldUzy+UdeoHd5d6NKmyMffP4Xy9hlULfGceOlFXjrutZtjLz/Gk6fTqRla33v+3MQASddCkl8UWVWD7BqSbZaigEBSi1uOYGaAlSblGCGnnuG7LycQ06q+EbgrRwl+8zH+7TE5oFfuEAOsVNdm8NR3H+GR0FKuzOzY7784TaTXM3ztEU/8y+d2tXV3CAPTs/jCV76Bb9JFGhbd6xIIx9kdJ/Fe8EFwQHBAcODOOSDEgG1umvggEBAIvAUCR0cMON/BmHKZ/mvBnPHPpGVKtVm0TUJT7ssrHglczKumsbGRJvef+loars+wbjDQFn2CiMLuzb3i7shJaCv8eDO6nN5ZNW0xbxCW18mUZiP41FT48zOPeC7k7tf/NOt6KxISFvU8Yz0tVOelE+oRzNWWSaaKAjjuGce5nH3a90yzppczA+I4GZJD+7jqRtCnriT4VDwVXQqM7mLAiorqoJPElXQwY9gZcNuZyz6Lb1oD/Qs2tNUhnIkppmP6JscwbhMDtNSGniGuuI2pjQIC7mCBuZPk02EMto84xYDk06FkN42wvrVIr6Em1JOE4jamd9m20ZW1Lx2P4Ms0Dq7eGKvrKcYmYj1iKNgSAzyJzGpgePMB2toIvGILaJnQ7Vopt88X4OeXRs2tiAEqDQ1RXsTlNTKu24nhpjHaemK8YiloGGPXLY4FivwDSS/fTwwAbX0MfrF5NIxqd9nqmC8iKDCNivYZ1utj8PSJJOFy5Z4cbuyeYElzC5kB2kYS/OLIrRlBu3NIDiWlwcGklRxNMaBxtIY3nn2Gx3zyOD/qtr9/cY5o7x/z9ce8CKjYEdArl7icm8gT//Ef/PfZLNKHdgTxygnCzjzFN58KIKRm6YbAsC27wEqD0sQLp3z5w4f/kOdPelM8vCCyA3ZgJJz/O3f+BXYCO8EBwQEXB4QY4HIWxatAQCBwKwgcKTFgfNWObX2A/OBTBFxoZHJtIziyDKZz1u8yreNrztTwG8BI6BdmWFDpMdsttEU9x/HYcgYWdwRVkp7exNfxPt/E6KqR9m1iAFgGM/D0v0zz6OqOVezN/tf1mG1m1HKVdutGdX6bYZGm6NfwudjKQEMaXgGZNI3s1V7BwrrOmaJubY/jVGgu7RNuq8j7igEmhs+fxTulkn7lzvGsUh/yDMejKpw1A6yjl/E+m0RZzwL7LNSDuxggWRm/6otPUinX5407glgJ4+hlfL1S6Oqd3xADzoST0zyK6jbEAMdSBeFno8hrnmB7YoOEcegCJ170I9NVMyB5uxggj8fPJ4XS7nlM2wJeibXGKF54LcJZM0De5HHzbQJmJq7545uQT7vCsFuUkIlkHSXLz4+Uwk7mdoAnrTcT9+LrRObvv03AOpZFkF8yRe1yZscNZsoZDuvNsbz8ejj5rQoMY1mEhl2gvHuB7TsrJAxLcyhX1Rj2K/rnlhmAdZycoABS8lpR7LK3lcRXThB+7SjWDLBSU3+BZx57ieeTGsmfd3ccdWQmn+ZbT5zCI3+MWnkLgFz1f1FPUWMj8flFnHz1Kb79UiShRb3kDSxSPruZITDXjc+rz/D94xkk9tw4ncDllLlec3sm+fYjT/Hxj3yUr3/re6RXtQsxQIgBggOCA4IDggMHzgEhBrj7UuK9QEAg8FYIHDExQI42baiHCog46cP5unFWTRKSfoS8EE/CM+uZWN+sco+Eab6FC2FxFHYrUFvlAoLP8MNHXia6bIiVrX3qFlZ6ruD3My8uNE2wbrHsEgMkwyj5YV6EX6pjzFVF39l/KxfD4yjonEalHaEgIZWSzilU8p5v+yoNoa/ge6mVsdlhCsK9Cb9Yy+ia68g92b5WLkXEk98xxbpV4vbEADuG0QLCz/iRWtrHkitSlEwoWy/g9aN/5XH/jZoBkmmaihgPfKJz6FRocG1JlwzzdFc30T+3itHktk1AkjArakjx8yM+tx2FxrIpCEiYlF1cCzlD4LlaxlfMdywGYFumJc0P38gsWqbUW0fz2daHKI58gW/913HO7yMGSIZxSqK8CEgsomdBvykASZiVHVzxeYKvP+J7izUD7Jhnqkjy8SH6SgtTqo2jGuVA3bjQ4zzGcG5phcmSGHz9Eijsmmfz4Agk8yJdV/x55t8exfcmNQMk4yTlsT4ExufTNae7YetiF1n+T/PNR7zIbVVgke9LCCIirYS+Rf1GYUwkzIvd5MQnkdMwxPJ+5wG6iwGSkanyeAL848lrn2XzoAok8xI914L4ydcfxevqURQDzORdDuQHz3pwNn90xzGAFqpasnjh+ZMcz2ilaEFHVk4Gr53x5fWEUs51zXMlM5gf/uBRvncijfjmWSoXNsSE+p4CXnnpBMfiG8hT7NhC4ObkhmUW8H8//0U+9uGP8Cd/8mcEpF2lYdEtO8HtXpeAIF7dBRvxXvBBcEBwQHDgVjggxIC3Cn3E9wIBgYA7AkdQDJD1AA2jRRGc8UqnZnQFk8PK2lAlmVH++HgHEBGTQEJkCP7evoSnldM3r8XisNIW9Qpvevvi7RdMSEgE8fGxRAZ6cfaUJ1FXmplYM2LHuksMQLKyNlzF5egAZ//hm/0HOPsvo3dOg8WySl9eHAGe3oRExRIT6MGJk/JJA7OoLRbWR6q5EuNqH++0b6N9KddnNch18G5PDJAbrDNanUmMvydevqHExieQGCUfXxhF5Gs/4pXYWuc2AbCjmWomNzEEfy8/QiJjiQ0Pwt/bj7DkEnrn1Vi2FRCUz0LUMdtRxPnIIAL8Q4iKjSNGbiOPLzGPtok1nHqKpYvkO8gMAAf6uU6KUsPw8/AmODyOpIQ4okPDiEny5rkfB5Dd5KoZsD0zAKyoxurIig3A28OX0Kh4EuOjCQ8KJjr8dZ54OYbKW9om4AC7lpnWfNLC/PDxDSIqJobIYD/8/EJIKexibt2MVTVOQ1YcwZ6e+AVHkZAQT2xYMKGR4Zx47BViS/bfJuC0dbyB7PhAvD18CI2MIzE+hojgYKLDXufJV6IplU8TkMc03khuYjB+3v6ERcWREBVGkI8voUkFdEyusXmKpvvv98Z7dzFA/vVQTdCUnUCIlyd+QW72RoRz4olXiS44gtsEllZJDjvBE6eSiGlb353OPzdHamIYr0YXkNanoqS5nsjMYpKaZqmcM1M3MkiyfJJAcS/5k7qN9ktqrlyM5mW/dKKblqhzFRXcEdjXzGr56QlPHv7Uw04x4OMffYhnj5+hoF9x4CtCt+IointEQCE4IDggOHB0OSDEgN1ukLgiEBAI7I/AERADHGgmexiYWUe3VTRQjuFm6OseZm7dgLPAv8PA8vh1WqtLKcovoLiknOrGbiaWdFjscn62lbbo1wm9Vk1DaxcdTbVUFBdSVFxKTXM/s2vGzYr8DtbH+xidX0fvWkKX8XUYWJnopbW6bKP/4o3+xxe1mJ39g1WlYKCtjoriYkpKKmnsmWJVb91IP99s31azf3vH+gT9o/Os6+T6A5v/rEuM9E+wqDIgn5+gnelnWLGGdnOV2GFcYaqvjdqyIoqLK6irb6S9bxLFcDcDUytoXLnpkgX17DA9rfXUlJdQWlpOZW0rfZPL6OSTGRxrjPePsbCmv7EVwqpFOdpLe1MtlaVlVFRUUdfYxei8CpPrVAXHOpMDo8ytarcyDmSsl0cHmVCuod+7Lp9rcGjmR+hpqKCksJiKmgaaWq8zPlFI8OkYipon0EkOVJODjM2toHWbfxxGVqf7aa8tp7iwmMqaepra+piYHqFnYIpl1cb2BodqiqGxWVa2shscrE8OMi73tzm/kkXN/EgPbQ3VVJSUUlZWQW1LL5Nbc+vAuDrNYHsdFUVFlJTX0NDQRt/4NCM9A0wtrm+d/uCatm2vDhNrMwN01JVvjlO2tZeJqWGuD06xuLa5RcFhZG16477iggKKisuoru9kZF6F0YX3to43PzjUzAyPoVpWuy5gWpthqGOHvWNTjFwfZEq5hmFrS8deHR7cNdlpuZdOad3EAsWja1SOd+Fz1o83kmrJntlrRd5CzUA30amXCC0aoHBSR+2cgXqla7XfTP28/Nm0KSSYKG+pwj/uPKGlw5TO7tXnhrNZNa0i9GIejz3/Kl/86jf5wTPPE3wum6Kh+Xs69nuJq+j76AYSYm7F3AoO3N8cEGLAwfkooieBwIOAwBEQA25vmiSbGYNOh95owe7YCqlviAH53UzLhQEdNkwGPQazFYfkft/Nn+fev21b/5vtHDbMBgNGs429vpbsFoyb9u3Z/uaP3+dbCbvFiEFvwmp33BAS9rzbgc1sxGiycsvPl+yYjUZMlr3HtOdj3uKiZaKe8sZBZlYtSA4rJhmzzf7ts7n4eSVR0TmL+S36kfGU2x6IbfLc3Wyckh2r0YDBaLl17Nzsl+zbx+n21ba3To7odegN5jt6zlZnsr2mTXs3Baut796mN/dWDDCSnZ3MieiL+CekciY2h8TWxY2aADtW753O75Ke8t5BMoobOd8xR8WCSwjY6RhaqB0eJqO4msSaUYqnDLszDdz6r1caqZhYJS6viqdePuEUBsrHV6ibNwgxwA0nEYDs5Jn4LDghOCA4cPscEGLA2+TAiMcIBI4IAg+cGLD/vG1mBrjEgP1vFN+8DQhYJ3IIDUhzFgHctkotGRjP8edMeDZtk9q9i/q9DfaJRxwMAvdWDDBT1lpL+PlrhFyu3Qjw5/YL8F0Ol5GqUSWlEypqF13Xdr82zCxRPLJM5eyO0wVuEtyeq+nkuTe9iC+oESLATXASzv9uvglMBCaCA4IDt8oBIQYcjH8iehEIPCgICDFga6aFGLAFxSF4IxlnqEsPwscvjmu1/ShW1lmZH6ezOBn/0/5kVA2yZHybctkPAR5H1YR7KwZYaVLqqBxTUjKuonYr5f+dcSqFGPDO4H6rDrS4T8yP4IDgwFHggBADjqrHJMYlELg3CAgxYAtXK93JXsSX9qLYfpbd1h3izduJgB39wgAN+RdIiYsmMiyEkKBQomLTyK66jkLlOnXh7bRJPOugEbjnYsAhWoEWYoAINI5CoCHGIHgsOHC4OSDEgIP2VER/AoGjjYAQA7bm14F6eojJRfXNi7Ft3S/e3HsEJCzaJRSj/fR0tNHe3s3AhBK1cbPo4r03QDzhHiMgxIDD7VQKp1/Mj+CA4IDgwP3FASEG3GPHRXQvEDhiCAgx4IhNqBiOQOB+QkCIAfeXkymCAjFfggOCA4IDh5sDQgy4n7wgYatA4J1HQIgBtzAH9pkGCqr7WFgz3cLdd3qLhLq3hLL2KRbFNoU7BfHtaWdW0Nlyncl5FbZ7+ERJM0h1VRsjM+vc9ATGe2jDve5aiAGH26kUTr+YH8EBwQHBgfuLA0IMuNeei+hfIHC0EBBiwC3Mp7UzkbORRQxMa2/h7ju9xcF8vg+BF1sZXjqYENOuUXC9Jp+LSXHEJ54nv2GIRZ3lAa7Ab2NpsJHynEwyM4tpm1hGd9tRtp3llgskXiija1p7I0i3qpjuqibvfBIJCalcKelgakV/V2KBQ1lGRPh5qjtnsd4prQ55OyEG3F9OpggKxHwJDggOCA4cbg4IMeCQOz7CPIHAIUNAiAG3MCHWthhOhOTRN6W5hbvv9BYHs9ln8E5vYnDx7sUAST1IYaw/fkExZFzJIffqeRICvQk+X8PwsuFGEHun5t6P7SQj870NlOWk4n/sJInl15kz395AJE0fOTExXKroRek689C+Sm9ePKEBoSScu0rOtUxSw33wi7pGy8QaZun2nuG627FQRHBgGhXtCiEGHKJCgHfqCIsCgofbgb7TeRXtxLwKDggOHCYOCDHA5UWJV4GAQOBWEBBiwC2gdP+JAXYUpaGc8Yohu26IhXUtWtUSUz1lXLxQQs/MRoDqWO2j7Goe9UMLaO9ef7gFJN/pW+yYdGrWl/vJPHWS2MI2po23EalLRiZLE4hOLaRjRrO16m+ZKCLSO5Ck7EbGFlVo1KvMDdeS7nOaqGutTKrv7AhEIQYcLQdTiAFHaz4Pk/MvbBHcEhwQHHBxQIgB77SvKZ4vELi/EBBiwC3M130nBkhq6kKOE3i5gTGVW5QvGVlVLqMxmJ2ZAQ5lMxfCE8hrn2b9qOah7zW/jgWKfD1IKLo9McCmbCQjIp7s+lHWtpb7JdQdmSRfKKdzym3bgGRiOtcfr5h8WqYM3IbksGWxEAOOlnMnxICjNZ8ux1u8inkVHBAcOEwcEGLAlhsl3ggEBAK3gIAQA24BpPtODEBLQ9ir+JyrZXh1/yhfMq8zOzGNUmXAeifR6i1gdyhvuRMxQFJx/Uo0sRcq6VMa3eouSFjWZplVrqM1u2cAOFAWB+AVm0/zpBAD9uOB7LQcJifqXtoixAARMNxLfom+Bb8EBwQHZA4IMWA/j0NcFwgIBPZC4AiIARI65QwLy/OMdDRS1zyIUmPa2BNv0zA/2E5dWQnl1a0Mz6sx23ZGvRLmtSn6mqsoL62goXuSFb0Z9YKCRZUeix32FAOsauYG26ktL6eupZfJJR1mwyqzsyvojNa3XAm2auYYaq+lsryG1t5JlrRGpq+d3lUzwKZdYKSjjoqSMmpaBplXmdg1hF0za2epJRWfN84QebmJyTUj9p3DlttIWhYVStZ1m3hJehYVC6wuzTJ2vYma6ib6ZtYw2OwYlkbpqq+gvKqFoXk1JlfhPUnP0qySNZ0Rg0bJeE8TNRVVNHZPsKzfUazQqkLR10JDfRtDs+uYtg1EwqJRMtHbTG1VLc2dIyzsGKukX2JOuYpqedaJSWVpFS0Dc6iNLmN2AbH3hdsWAyQMY0XER6RR1KG4pS0VtvVBcgJPE+bMzrgF+6xalKNdNFRWUNvUzfiCGoOicHfNAJuOpbFuGitLKa9qpn9mFcN9rOQIMUA4ryKAERwQHBAcEBw4OA4IMWBv109cFQgIBPZG4AiIATYGr4YRHBREVHI6F4u7mFMZsa72U5IcSnBwBAkpqaTGR+DvEUhG5RDL+s3UecmAoukKCaGhREYnkZ6SRHy4P0Gx18gI8iezZYwlk7RLDLAu91CQEExgQBhxyelkpCYSFRRGWmYc3hEF9E/f7Cg4K8s9BSSFBBAcGktqWjrpiZGEhKVyzvtp3kh2FRC0sz5QSnp4EKHhcaSmppAY4YdPQCoV/Up0btn/e02tw6CkvzabtAg/vH0juVDaw6x6M+h3NbD2k52YRePgPAZZLLANkxcdSoCnH7Ep6VxIjyMkIIGsq6nExcaTmJzBucRQfAPPUze8iLN+nm2E/MTzXE6NJComlqTU81xMTyIuxBf/qKs0ja1ilBfMHat0ZycQHRlDYlIcoYFJlPQoUMvjkAzMteWQGhVBdGwK5zLSSY0NIzAgjpzmCdacHYBtpICUxDjCgmNIddqXRLhPABkV/Szo3FflXQPc5/V2xQDrHHUp4SRea2Bs3XJToUfSTNJReoFI7zN4h2XSMLKM4S20ANvqAOXpEQT7hxCTlEZGahIxIeGkpPjwwpuJWwUE7apRai7GEBYUSlxSKqkJUQR5B5JS0IlCtX8GyD4oHIrLQgw4OAdQONMCS8EBwQHBAcEBIQYcCvdGGCEQuG8QOAJigJW2iKf5yakECttGmFlUYzYpaU4PIyIhk9LmXsamZpgeG6CtJBX/0xEU9syhsUnoh3KJ8g8nLbuGrqEJZqYnGO1tpiQ1gJ9+41ucyepmVr9DDLAtUJ/gg09oGgX1PYxOTTM9PkRPXQ5xb/6Irz4eSu3g4lZxuZ1MsCvrSfbxJjw1j4aeEaamp5gY6qYhJ44zP/oKTwVXM7Bow77cysWICBIvFtPcO+q8b3ygjbI0fzzDc+maUe/7DNczHRYNyrEe6gsvkRTqg09AHNlN46y6olNLCzGnoihom0AriwGWThKef4IXPdIo7xpnZmqYusTTPPf4c3ifK6NjdAbFRCeXvE8TX9yNQi+BpYukl5/giadPkpBTS8/IFIrpCUauN5Ab7YVPUhn9C3rsa/XEeoVwLq+RobEh2utbGZpbw+hwoBnIJyEsitSsKjoGxpmenmJ8sJv63AQCfBIp7ZlDK2dodCXxs6ef52RMLi2DkyhmJujNDuZMeDbt46qbBukuTJyvtyUGOFjrzCQq6hJV/Ysbwsa2zrZ/cKwNUJHhx/P//V1+6nuFDoVbHYHtt258si/Rdj6UgOAErlV3MjwxzfTEML0NBSR7PsV/PO5LXpsCq32V7muJxMamk1/XzcikzLtBOisuEOYdQVbDCDfZEbLXkw/FNSEGCMdVBC+CA4IDggOCAwfHASEGHAr3RhghELhvEDgaYkDkc5xOb2JsdWO53D5TSKh/KhU98xjcUtEl8zKdKR4EXG5jYmWZ5tgzBJ6rYWjFfcXcgXGhjvCnHsfnWtcuMcA+mYvvyVCuNU+isbqtRtu0jF87zQ+fj6RmXzHAzlSuL2dCs2ieUOPe3K4dJ/fMD3gpooqBRTOK4nBCUkrpmtW7bQuQMC93cc47kMtNI6zcYnq43bjO7HA7VVfj8D3pw7naUZblKvp7iQEvHie6sJt5k6wOSBhaY3jhpRDyOhQ4L2Fh8NwZgi83MbTicIoBic89zRuRRfStmNz20tvRTRYRfiqM7LZJVKo2ks6cJepCLRPrJqw2O3aHhGRXUhvjR+TFKga3zQPY9Qrq4j0IudjAyIrVKQa88nIAl5sm0W9Cb5/PJ8A3jequOSy3+mt3G2KApBumIDqS9KJOZt8qHUNGzKJlWTFCT30eiT5exOd1MKPZPzXAPlNKpE8YFyoGWHOvOWDXMV0cyLPHgslrVWCaryI5MpW8hnE0lht7PiTLGn1XQgk/X0nPgvnWBZFbxeoe3yfEgINzAIUzLbAUHBAcEBwQHBBiwD12XET3AoEjhsDREAOiXiUop5tpjRwhSmhrA/nJj1/mjdN+hASHELr1E4zfS9/nUc8ceiZbSXvDh8zmMdZ2ptxLKqr8XyequGeHGKBGUxPMifAiumZ0boHvBiusk5m8eSaN5qF9MgMkLbUhJ4go6GB6V1q7janLb+KZ2sigcp2G0Od5/thrnPENdrNfHosvr/7gEbyudDCtdRMj3pKYElbtHJ0ZZ3gjPJ8+hW5PMSDx9UCuNo6wttm1deAcp/0u0jy4vDleO1NXzxBwvpH+RftGZsAbflysGWRlJ472RYr9PUgs7UShVzPZlM+5qAB8fIJJzGpkbFmPVddCnEcEuY2jqHfFzBKaxmg8I3PpGNNg7UrmpN95avqXN2pCyGNWVxHulUhp6xS3fErgLYsBFhRVyUTE59A0rrq9IosOA4rSCDzCrtA4otrFlY3pktA1x+MbmUXtkOrGmDbn0qbIx98/hfL2GVStCZx46Rgvv+6zi9P+rz7Gk6czqBna7zlvSY537AYhBgjHVQQvggOCA4IDggMHxwEhBrxjLo14sEDgvkTgiIgBrxGa38OMM9ddQlPmw8tn4zmfU0l9fT0N7j+11dR1T7OuaiPhZwFkd06i2hVTG6gLfoMYeZ/9jm0C6gp/3owp5/rs7grxdkUWpz0yaNlXDNBQFXCC2LJuFM5N+u6csTPrKiCoXKMq4FW8YtLIqdhhf309ddW19EyuonNbIXbvSRZEJEn+f+c/CVN3Iq95naNjcGVvMeBkGLktY1uY2IbO4xl0hdah1S0xYDrrDP7nbogByWcjyW8ZQ70TR1n8CD1DXGEbU3oJyaxmYayH5so8zoV4E5bZyMRkGRFnYylunUTedbDzn6UzCc+wa7QNrzvFAM+wLBqH1m4E15oaIjzjKWmZ3Kh7sLODvT7fohhgX2njQng0mVUDLJt2Dm6zY4cW5cw8K2rDrm0b1pFMPP1Sqeia3xXou8zS1sfgF5tHw6hu13zJRwsGBaZR0T7Den0Mnt7hxGeW78np+s4xFtUiM+AwO5TiNIGDc3YP8zwL28Q8Cw4IDryTHBBigMvDEq8CAYHArSBwRMSA425iAFgG0jjjd4XW8bUdAZqEfmGGBZUes01Jsc8JoouvM+fKOd9ETDL0k/riswTk7K4ZYBlM5/TZdGqHltkei0us1gTy1MsxN6kZYGEo4zReadUMLe0I3KRV6oKf4rVouWaAgaFzngRlNjK6cyO4pEfpfgLArlmW0A8WcaW4g4kl444A0858sR/Hg67RPaHZWww4FUZu604x4OpNxYCkV94gtsC1teCGQZK+n3NnfDhX3YtiVY3OZNnY8mAzsNicwEmf83T0NHPN14eUkm7mdi7tS0bGrvjgm1xG77zJKQZ4hV+jcXj93osBkpaB7Egi0orpmtXtG8xjG6ckPpnchiGWt9Xwc7DWEM3pwAvUDLiElBvYuN5ZR7MI8kumqH12cxuG6xuJ9eY4Xnk9nPxWBYbRLELDLlDerdyxHULCsDSPck2DYWdmhqurQ/wqMgOE0/xOOs1FQ/MEn8vm2ddO88NnXuCRnxzjuH8E52o6qJnVPjDHXr6TcyCeLf4GCA4cLAeEGHCInR5hmkDgECJwJMUAST9MbrAnEZkNTKhc+9glTPOtXAyPp1CuYm+1oqyLw9MzloKuOXSu/fe2NQZyQ3j2a//Jyau7awZI+hHygk4TkFbJ0IpxM0iUMC20cuHMD/nKI8E3qRkgoR/JI/S0H+nlAyy7CvlJJmHEGmMAACAASURBVJStF/D80Zd5PFCuGWB13hfpE05m7ShrrlVpycRCWybRCfl0Ta3vk7YuYZmtJP6sJ9FXW5hWmzcCZ8nMykAxMW++SVRuF3NyRb49agYk3oEYkPjCozzxYhjFfYu4DmrAskp/bhAnvdKpGZploDiVc0VtTKzJ1fjtrDfFcMLnPG0Dcyiqkwn0jyO3febGfnjJxGJ3NhFn/TlfM8yyfKpDVzJvjxggYZoqIyE0gdymCVQubuz1C+xYoy3Fg5O+KVQMrbIxVXa0002c93yDgPO1jKxtUwm29SIZJiiL8SVQntN53aZ4JWFe6uJawNN88xEvclsVWAwTlCcEEZleSt+ifot35sVuchOSyWkYZtm8R2rFtqcdvg9CDDhYJ1A41beGZ8OimdjcCmfw/4///AUe/oOH+cRHH+L3Hvo4f/bnf8VXvvktTgTHkHd9WggCy7eGqeCewElw4HBwQIgBh8/XERYJBA4zAkdSDECysDpYzqWoAHx9g4iKTyIpJowgHz/CU8vondNgcYBDN0NLdgKh3l4EhMSQnJxMUlQ44dHJeD/1BIEFu7cJyH2vDVZwMdIfb+8AIuOSSEmMJTI4mOiwn/GjF+Np2reAIEjWNYYqLhDt74WPfzgJickkx4YTGhxFxM9+wMtxDQwu2pz3DVddIi7QB/+gCBKSEokLC8TfL5Q0uZaBK8jfi102NVMNWSQGeuLhHURkdBQRQT54nvEiIqOS/nntRlbDAYkBSW+cxPusB4GhoYRHxZMcH024nycenuFcqhtmWW9mta+ApABv/IKiSYgNxe/NU4RfbWVm3YJdq6CjMIOY4ACCw6JJTIglOsQfX59gkrJbmFg1OIPft00MsC3Skh5KzKVqBpddYtJeQMvXHOgULWTH+XH2lCeBYVFEhfjj+eabeEVcoWl0BeOuWgjufVlRjdWRFReEj5c/4TGJzuMTo0JCiA47zpMvR1MqnyaAfF89OYkh+PsGEhGbSFJsBCF+foQm5dM2sfoWz3F/5uF5L8SAw+E8PkhOvCwExORU8PVvfZ9Pf+rTfPwjD/GxD39k6+fjH/koH//oQ3zms3/Lq94h5PcKQeBB4ocYq/ibdL9zQIgBh8fHEZYIBO4HBI6AGOBgdbSH4bl19O4ruA4DS2PdNFUWU5CbR2FhKZV1nYwtajHbXSuoEhaVgoG2Gsry8ygqq6a+voWesQnyvV4jtvw6swYJx+oo14fnUOs3V3gdBpbH5b3vxRTmF1FRXUdDy3Ump4fouD7Jqsa0Iz1/OxUchmUmepqpKi6goLCM2tp6Wq9PMDPUQe/kCpqNsv04DEuM9zRTXZxPfl4BxSUV1HeMotSY2RrC9q63PklmFbOD7dSVF1PgbFtOddN1ppb1TiHEeaNjlbHeUeZXdThH5lhjvHeYuVXtVtaBQzNN/5CCVY1rW4OEXtHH4NQKatlO+WjB02FcKamipbOVhuoySgryKSyqoPH6FCt6y0ZmglXFbH8rdaVFFBUWUV7TycSidssWq2ae0Z5WGqrKKC0uoby8iob2YebWjVunKTjWJ+gfUbCikbMLNv9ZlxjpH2dh1bWq7vriJq+SkfmBPsbnV9HvCtQl1L1XiQpPp7R7fo/v9+hXMqNSDNJRX0mpPPbCYsqqWhiYWcXgzsk9mjovOYysTvXTVl1KYX4hZVW11Df3MDY5RHf/JMo1wwaG7vfl5VFQWEJFbTvDs+sY3U7N2O8xh/G6EAOE4/12O95Z7SM8+twrm0LAR7dEAHdBQH4viwSf+eu/wy/5EvULRpEhIDIEBAcEB+4LDggx4DB6O8ImgcDhReAIiAE3B1eymTHotOiNFudRdlt3m8eoKWpibEGNFQdWo955j80hgX2GqydPkVI3yOJNzquT7BaMej0mix252e39k7BbjOj1Rqx2x43gdlcnEnazAZ1Wj9Fiu+3nOGxmjHoDZqvdWVRwV/d3e2FTDMhpHnUWHZRsJgx6AybrXphI2MwGDAYz1n3UDHm+jEYTFtvNMLlbo/dvLxnHKYkJITG3hSm19Sbzskcfkh2r0YDRZMW+T73BPVptXXLxyWi++Txv3KdDZzBj2wfHrU4P+RshBggx4O0UA+qWrJxJucpffOnf+d2//L/89t9+md/6p2/yoc9/iw/9y3f40Be+y4e++N986AvfcV77nX/5Nl87HUl42wxZCjNF8xZqFi003kVQVDdv4FJDL2+GxPL4Cz/jkZ8e46dveBB2KZ/iYeV9EWy8nXMmniX+RggO3B4HhBhwyB0fYZ5A4JAhcOTFgH3xto5yxduHczVDzv3oN+6TMIxcwfN4BEXX57bOs7/xvXi3DYEdYsC27+7DD461HkoLauieWOU+3IJ/3yEuxIDbc/KOslNcrdBwvqaTgLQreCecJ/JqMXm908hp/bcz7upFC9mzZhLGTfgMGXmpx8AP2/R8rVHH5+t1/H5GL7/iU8T7fYp5X0Al7w2u5b2hDbw3rJH3hjdt/MjvQxt4X2gD/19iN5/KX+Az1Vr+rlbLP9Xp+HKDjv9o1vFou56XrxvwHTKSMGFyPlcWC/azt2hwjlPhCXztv77HX/7VX/P7H/89Z50CebvCP33+S05hID6/mnqlyETYD0NxXfzNEBy4OQeEGHDfuULCYIHAO4rAgysGSEamyhPx8wggOa+J4fk1VCtzjLUXkuR1koBz9YyvvtV+8Xd07g7Hw4+YGCBvr1hd12E038HS/uGYkfvKCiEG3NypexCcXnmlPC6viqdfOcFX//3b/P0//jN//Tf/wOe++BX+64dP8EZQDNmdY3sG2KULFhInzJwaMPJMl4H/bJEDfi2frdbyRxUaPlaq4YOFan41T817slW865qKnzuAn5/PVvG+HBW/lq/mQ0VqPl6q4Y8rNM7nyoLDf7Xoea7bgP+QkYszZqoWLRQNzjtPKvjsX/8dn/jox5BrE7hvTZDrFDz8qYf5+re+R2xupRAE7iL74kH4vRFjFH879+OAEAPuKzdIGCsQeMcReHDFAMCuneV6VTbnEuOIjQwnPDiEiMh4Mq7V0D+vwbxrP/k7Pl+HzwBrLxcC4inumEAt4ufDNz+H3CIhBjzYDq0sBIRcyOUr//5tPv3wHzor+ruCZDk4/sRDH+cvP/M3PHv8DFdaB6lZtHJx2ozHoJFH2vV8rk7HH1do+XCxht8oUPP+HBU/fwDB/t0IBvLz35+r4gMFaj5RquEvqrR8sV7HF6/28vBPffidv/8qH/nkH24TAlyigDz2T/3Bp5ynHMjj3c/ZF9cf7N8bMf9i/m/GASEGHHLHR5gnEDhkCDzQYoA8Fw6zGuXUMH2drbS2tDkLti1qzFtF6w7ZfB0+cxxqZoYnUK7rt4oOHj4jhUWHFQEhBjzYTq1c1V8WAj75e5/kYztWyuUA+aGPPMRH/s9n+IPvPsfnk2v5Uq3KGVx/tETDr+WpeXf27a/0v+vyEu/OmOIXEvv5xah25zaB9wVU8H6/Mt7vW7Lx41fK+/zLeV9gNe8Nb+ZXz43z67lr/FKuCjkr4HbFgl/IVvGLF2f5xdhu3hdYxf84cZFff9yTD37p+3z4j/6Shz5640QDWRCQtxD4Jl2kcWn/LQc3CwbEdw/275WY/wd7/oUYcFg9HmGXQOBwIvDAiwGHc1qEVQKBBwMBIQY8uE5r5dQ6P33Dk4c/9eltQsBDH/s9fuezn+c3/+sFfu35SH7JI2djH/+5SX4ha+0tA3F5Zf5X8lR8pETNn1dp+VKDju+16Xm0epa/9UzlT5735dOPv8Fvfel7/Nbn/pPf/sev89t//1V+++++wu/83b86f+T3zmv/8DX++WU/Xq8awX/IgPeg0bkl4We9Bp7t0vPdFi1/emmAD8S28oHLCudzb0UseNeVFd6dNuEUGn7pbA7/8ydhfOAbz/C//+IfeOhjn+CTn/g9nj/pTdnYssgOENsFBAcEB26LA0IMeDD8JzFKgcBBISDEgD2QtCuaKKrtR7lm2uNbcelOELDPtVJW14ti2XgnzfdpY2e+rYyG3kmWDLd9nMM+fR7cZft8B5X115lYNNzeqQQHZ8Kh70mIAQ+uGJBa3sLX/vO/nVsD5CyAD//RX/DBrz7Krz0Xwft9S3lP/HV+/uI8P5e1vq8AIAfev1mg5s8rNfxZRicPe13ku1mdeA3oCRs1OesJyHv28+YsFE2qibhawncee4a/+szf8Knf/4Nd+/Zd6fqu18989m/xiE2nalq95YzLJwnULloomTPidbWSf3/Nh5+kFRPQu+4UC17rNTgLC8pbAx4ulzMY3mLrQtY6P39ewXtiuni/dyG/9tNwfvsbT/HDk4HkdE9uPVes9j64vyti7sXc3w4HhBhw6F0fYaBA4FAhIMSAPabD2pmIR2QRAzPaPb4Vl+4EAWtPOn4x+XSNqe6k+T5trFzP8Ccur5lR1dtQsMCiZm6wmbKs86TExhAXl8T5rFJahhbQmHYXmLD2XiAkLoeGoXXeBuv2wehwXxZiwIPr5PolZ/LXn/uycw/9bzxyil8+eZlfjGzj5y/M3lQAkFf95SD7q406nu824DdsJHHCRHyngucCYnn6+FnSq9r3TLGXg/qoqyV8/dvf54//6P/w8Cc/5Szm5wr+3V9lweA1v3AKB+Z2BeQNShNxedU8+dJxTobFU9Cv2LpHFgrkIwjl2gaRwzq+nVTCQy8E8xs/S3KeYPDu5GF+7urq3gKHUxiY5b3RHXw8pZMf1C4SPWaibEFsF7idYEjc++D+XRFzb0WIAYfb7xHWCQQOGwJCDNhjRqxtMZwIyaNvSrPHt4fpkoR+tJbcvFoG5tWHYs++ZBinIS+fur451JYbq/XWziQ8wrJpH1k/QACtdCd7EnmtkeH1exlu21CNN5AdF0xQUDgJaZe4lp1Hfk4WmWkJRPj5EpKQR9vkGiY3M6w9afhFXKVuYE2IAfvMuhADHjynvWHJylWFme9dauZDpy85j/d7d8oocur8Xnvx35Wp5D0xnfzK6at88KfB/KNHMmdqRrk8Y6ZCaUFeqZcDAHl//bXOUV7xCubHr50io3p/QUA+tlCu2v+5L/4r//i5L2xlCcj79T/x0Yf4ly9/DY+YNAoHZreCfFeQIQsB8vF/Tx57fZcQ4LpHfq1fMBKVVcLjLx3nBy++wT985wn+9z98lQ9++Yf8+o9O8j/euODMBnhX5uKe4/75a+v8er6KT5dr+Eqjjjf7jM6jC13jdX+WeP/g/R6JORdzvh8HhBiwj8MhLgsEBAJ7IiDEgD1g2S4GOFjtLeFqXgNjSzpse9x/Ly85Vvsou5pPw/Ai2h0Pl7TD5Ae+wdmYIvqUenavTd9Ly/boW9IxWhDK6bMxFPbMo3czaLsYIGFeW2BpXff/2HsP6Lay62w7HpcUJ/GX/Hac2HGb4rEdO46/FDuxP5e42xl3j2NP8fTu0Yx6o0RKosROgr1XkersvXdS7KTYe+8FAEESbHj+dS4IEAABkuMZzWg4V2tp4QK499xz9rngOvs5e7+bJTGm9RlacuNJLWplVGUxSCu32fzozYABq0w1pRLm7opP6A1yK2/RMzzJnFqDRq1kariH5pu53Ah25YL7RQpax1nYAAIyDNicKVtHMgx45yxoi8ZXpLD9p2s0fK1QzT8kjPPuq9Yd4XddGpGE/T6wz0fSDviH7/6GT371+/zsmVd47OXDHL7gRWJ9zxZH/bUCgYeefpHHX9rPCXd/fvbgw1KkwK8ffgK/G1nk9Exvaf+1goCnXz3KWf9I4orreerVI3z67nslfYRP3f95PvYf30KM60O/PcBf7w/iT73K9GkRVqoh/GniHB/PUPGtYjX7Gxa4PKClZOKd8+zYcnrkz+VnQH4Gtj4DMgywteKQP5ctIFvAmgVkGGDFKuYwYI2x0igUwSk0DCnffBgwVs5FRQjJNQPMrZh0VrdET4YfTm6hZNQPM2+yC29y1pt6qO3NJtjZg7DUWobUy2a74eYwYI2hvCiu5jXTJ8L718epjPMlLOEmvTOmg9yp+7cfBiyPlHHR3RnvyEwah22Um9StoB5pJDPoPOcDkqkdUElgRoYBO80fUjjjO2Uxd7GwlpftHAlOLdziZO5lG4hygME9Wh6r1vB/89T8bYrSuiL/jRneE3KLvzx+kQ/+9iAf+ebP+MQXv4wQFBTh+//8hS9yyiuY8MxSaef/jQECmQiH/aRHAI88t48HH32KsPQSCofUW+botYGALKndM/6RZLSNUDquxeNiAt/67g+MGgmGlIS7772fj3/pv/jIf/+CD/3uEH99MpY/j2i3WilBVCX4+zQl/1Wg5rlaDZG9Wqnc4l5+fuSxbXX2ZJvINtnuGZBhwM5rD/kM2QKyBTYtIMOATVsYj8xhgA7t9ABdfWMoF1fedCE43dIMg119jM0tmKcB6FaZ6aijsXuUOSv56sbBvIkHqzNdNDZ1MzK7uCVKwRwGrNJ5+Swel8pomVgHnZbZwR76R2dZWDaJs9+x77cZBujmqI9zwcnnKqXdSrbv2hrzA4WEnb1AZGYjo4s6ZBiw4wTKMGAPq2QXjK8Q1L3E76s1fClPzf9JUfIuK7ve744dlMr6/c0zF/j7Hz3Kx/79m3zqvs9KAMDgMN939z384ne/JzqvipLRBaLzbkpA4MgOEQIHHd03UgZqbGoI+F7P5ImXD/G1b/w3R118bIOA5HyeFakBXkGk3hrYAgvE4lyfGiBAwDEECEhvGzGel9kxjoNfON/89ve2AAExTpGecP8XvsTPDztyoqSfp6rU/FP8AO+LG9ySRiCEEz+YquRf89Q8WqWRdAVEysR2DoL8nexAys/AO+MZkGHAzmsP+QzZArIFNi2wd2DAihB3q6Y4L4+Sqmb6J+fRLkwzNDSFZknvxOvmxxgUDufSAsrRLhrKCsgvKKepf5rFlU0n1BwGgE49xuDYHBqtSdy7bonZoU4aygopKCqnvmuMee2aGSxYUY3QXlNCfl4xVbf6mJzXsjgzxPDUPIsrm/n0uqVZhjsbKC8ooLi8jq4xNdrVje91asaHxpjTaM0cbJ1WxVhXI5VFBZRU1NM1ptq8BtBpxhkam0E5NUSH6ENOAVXt1oXuNh8H6UImhsaYmRyiu6mSoqIKmgdnWFxdY2Gii/rSfHILb9I+osTIIFaVjA5PotZoWTcMS7fI1PAoixp99QBTGLAyO0Chz6scdY0iobyPac0SqvFhxmfVG23q0EwMMz6jZmFBxXhPE5WF+RSVNdA7qTFxyq3DgNX5cbrqyyjIzqGospXh2UUM5jQb6w5vdHOVhJ5xJiqzkQmtYWDbXLSupuXSBS4Ep1EzuMjyhmZAUfMYyolemm8WUZBfSl3n2BbBwRXlEG3VZZRXtzI0Y9Hf1Xkmuhsoz88ht6CSloEZFkyfn4VJRscmGO68RU1ZJc39IwwPjDKl1GAZMCKetdHRSZSaZelZXZ2foKexnMKcHIoqmhmctoBO2wz3jfhKThPYe4vTgvFlAruXJCf1X3JVViHAXTdm+Hj6HA+Uqvi+TxKf+cmjfOrz/8o9n7rbDAJITvLd9/C9H/8Uj9gE8vvnJIdXAIGo3A0g4LRNykBNBwIIvHjMnphCW0BgDs+4ZH74019x2jtki0MtdvUljQAhFugVRIoAARNbHW8BAvzjs/UgwC+C9NZNEGBwwjLaRqV7fP2b30EADgEAxP/P3Hc/3/7Bj9nv4MzFwhryB1Q4X8vhpyc9+O2lckkz4B/StsIUAVc+kKyUdAUerNRXUJChwN77TRmeH/lVntvdPAMyDHgjVidyG7IF3jkW2BMwYGWygdQQTzzcvQkOjyYmMgx/Dx8uXgvhgm86bYNzkiO92hpPcMx14vx9JSX4mNhYYkIDUDg5E5xYxcDckhTabgkDVpquEXK9gq7xRcmB0ql7Kb0ShI/Cj5CIi1yKjSJU4YbPpWK6pkTu/gpTjWlEKNzxVAQSGRVNdKgfCp9orgefJyCtmf4ZARZ0zPeWcC1IgY9fEFExccQJQTo3by4XdjIpku5XmokPvUFF+yj66nk6FoeqSAr3x98/hKiYGGLCg/Bx8SAsuZqBOa00htX2JMJCQ/D1CiIqMoa46DC8L7gTW9jOhCGp3dpzvtpBSqA37uddCY6MJi46GC/3UBJuRBMaHEJYRDQXQ71w8bhESccEi2JjX1XF5eh06jsnMQb5r/aQGXGFgbYB6S6mMGCx7yaXTz3GU6/Y4xortBhmuJUYQWJJM8Ma4XSv0pkayeUrUQT6BxESGkVcbDQRgQpcXQKIL+tiWtwYSxiwxlxHPpcDvFB4BxIRGUGYrysu7lFkN46gMnbO2sC3frbaeolzjmFk1g6zvPVrK5/o0NRGcNYtloKmSbQNUTi5++Ll5kdYcBgxsXFcDA/C28kJn7h8WsfmpedyfbaJ9KhA/P2DiQhSoAhPp65vVrLl2lwXReJZ81QQFBZBZIgfHk4eRKXXMaTUD2i1K4sofy/cFGFEX0zjZlc/jZmXuJFbQ8+cCcBinZnqBK6klNI8Ms9cVzHXgxUoFAFEREQQ5ueOi3sEGbUDbDRtZYxv7EcyDNg7i9viiRVCevQQ4Iu5oqTeVuf1vQlzfDp9mi+6JvCAcxT+Fd1cLG/hldMX+K+vfoPP3PdpyUnWC/npnWUBAlyjrpHdPWXmqBcbgMCR0xx5nUAgvW2UZ/Yfx94n1OwemyDgOHaeQaQ0/fEgQCziU5r6OXDWjSf3HeGMX4RUjUBEPIiUh9CMEtJbhykanicgMYen9x+V+pNwa0gSSzzfusiDlfN8MlNlNYXg/UlKqcqCiMSI7F1CzIel4yCiEyKzK/C5lo5/QjY3qtspHtFsOc/yOvn9VlvKNpFtcqc+AzIMeGPXKXJrsgX2ugXe/jBgdZSS4POc94oitaSejt4+ertaqSuIJ8Du9zzwlILS9knJ6Vq56cvLjz3Jyw5hpBQ30Nk3QH93G/WF1/B1OEdUQQeTi+tYwgBtmYKTPuk0DMyj081Qf0WBmyKCxPxqWrr6GezvprUmj4QU4diqWBorJcLpPN4RSZTUtdPb10tXax3F8YE4PPY/POtZQOv4KrrZeq55u+ITHk9+dTPd/aI/rdTmJZBW2smEENPTVuBv50taTR9qnXC8m0kO9CIgMoECcU1fH72dLdQVxhN0wYmI7BbG5tdZqQli/1P7sA9KobK1V+pj/XUXTnkn09CvMotgMHvIl2sJ2fc0rzpEkF3bRX9vK0Uh9ux7+mUcY7Ko7uhnoLuGy+ftCc6oZ0ijQzeTjbtjBAV1Qxg30FeaiHbwpL26XWreFAasqscoDziEneIqmXWjqBY1VAaeISCpgm6lgAEr1IUe5vmnnuN0YDyFte309vfR09ZASbw/F86Hkd00gmbdHAasTddyI8iPkJgUSuo37N5cRXaMOxd84rnZo3ewzca7zRttVTD2bnEUNE+Z6R9scwmrXddxvBBBTs0wiw1RnHzhWV6yDyapqJ723n76ezpoLEkgyPEsgUnVDKhWmasIw80zkvjCZrpaqyitbGZwUsPa2gwNCWEEBkaTXFRHe08ffV0t1GTHojjvy42yToTEgijbeOrgKTxiC2juHWNWs8h0w2W8fS6R3zSxCTJWhykI8SY8qZzOvlqSwgIIjkqkqLaNnr5euluqyY31wtnnGqXt05tgZ7sBv87vZBiwNxbU1wa0vFy/wL/lq61CgPclzPG5HBVP1WhQtGtQZFbx3JHTHHPxIam+V3KS/eKzOOqs4Ke/eYhvfvv7PPrcy1Le/W8ee0bKty8a3uq06oFAJS9IQEBBUkPvFsdWEhUUEQLn3HjhqPUIARFx8IcTZ3HwDTderwcBhRIk2BEE3NhIDbARESCcBgESRKlC0deIrHIKB9V4xibyqr0TQk9CnCOiCwQIECULBZgwrWYgnPuEoWXc25f4df4oH7ncw10WAowiUuBvUpR8OV/Nqw0L3BjUStUW0pqHuBASy6PP7+PHP3+Q//7uD/nuDx/glw89xj47RyJzKqT0izvVuZH7tTf+Tsjz+ObMowwDXufCRL5ctsA7zAJvexiw1pvI+ZNeJFT2oTIJnWZVRdd1Ox5+2Y8SExjw3BPHCMltZ0a7mRbAmpruRGdOKlJoGlKjtSgtaAoDVoez8HLw5FpxJ7OmbQgRuTkVi8tL9CY5Ye95nYpuJSbZB6ypu0i0f4RXfQQM0DKS7ck5z8sUd8xg3pQapWqRZXGxGQxYYzTfDyfFFYrbZzbD9MVDuzbPQJ4/Z9yucat7RoIB+15xI75qwKhuvzaUyAXHaEpujdl29AQMePUo/ql1jCwJx1yH5mYAr7zqQVL1IItStPwyrRcd8LxaTtvU+muGAdLOv6lmAMtUbYEBr/LiEW9SGyfNxrk230uGlwM+NyrpmdOalBZcYTg3CJ+IFCp7VSb6CjqWpxq44urJ5cJmJizj5rf5wS83ReNwIZKchrFdC0dqG2M44xJNXsMYSw1RHHvlFP6pTUyZ1hxc0zCQ48c5j0sUt00zVxOFs6M3F/M6mVlcZnV1jfV1HWsjBYT7RpBc0m1WplG3PE3TVU+8Y/NoHNWy0hDDGcdQUquHjfMqSjym+nkRnV7DoBRSomOxM5kAnxgyawfozg8nICKBki6lSSqBjuXpW9zwUhCXU8+okexsY6TX+ZUMA96cxeHtWoRnjC5ztmWR7xSr+XCacsuOtYAA/5St4pkaDb5dS6SNLFM6uULxyAIR2eU8d9hOAgLJjX2UjWvJ6pyQdsyfPXCCgMRcEuu6pfdC5E/sZO8EBI462QYCN6r1QECkDIhQfAEJDHbRw4BzRhhgBAEHdhERsCsQ0M9RZx8jCDCMQ3E5RYIUl0oajCDgaQkEhJmBAEM/xWvyrUFecg3iV27RPFIwzP/NU/H+pDkzXYF3J8wh0gq+WzLPoZIBXrzgw9e+/i195IVIxfjkp5CiL+6+hy/+87/wmo+7PAAAIABJREFU898+it/1TBkI7GEND9NnSD5+e//d3Wn+ZBjwOhcm8uWyBd5hFnibwwAdqgI3jisyaBjUbNm9Xem5zHH7aKo6DJEBfhx2vEJl9+wW525tNAnHkyGI/G71zQBOeiZzq08lPQ6bMECNusSLU4pUavvUW+4nnaxTU+RxAp/UGvrnTYCD9OUKvVdPci6ynLaxOcq87fBJqaZPbRrKbfEEmsKAdQ0VvqfxSa6iV7X1mvW5YnxO+dDa0MdKTTDHLlyirH3aqDWgm8vFwyGUvLpBJD/f4lbS2+VaQo+7ca2sg5mN7q+0XMTe+RLlrZMbY16j7/oZXGPLaB5fuz0wIOwUrrH5tExalhpcYzzdjfMhmdQMqKgLP4dvfBntMyoq/Y9w4MBh7J298PZSmPx34dgTT3IurowupeWcWDOC/rP1mWICTrsRl9fCtGU3rF6mpTveCceAJKr6FlhuiOaC5yXymia3PG/rk7n4XAgmtbwHtbKPm2kXCXS7gItnCPEl7UzMrzB/MwS7Awc5dNIZhdl4vHA78hTPOsRQ1D6HtiEGZ8Vl8ppMIxhWGC0IwysogfIuJWs6JfWXfAi8kkfzxCxVoXYcOXCAU06WtnLl+NPPcDYqn7bZ3dvKqjl28aEMA+7cRWnBoIqsjnFyuqckR9V0ASoqBPh3LfHrinnuzlQhSt/9iYk4oHj/sUtdfM4hhhfj8knun5d2qE3bMACB5w+fMgIB8b3iSioHz7kTV9wgOesi916E1G8LBEaEhoCIEDjF7oFArREImMIACQSkFvLMbkBA/EbVgO0iAm6ZRwQYQIA01g0YIOBEYGIuO4GA1OZBjrv6ImwWllFKSp9aSs0QURn/nKvifVbm4SPxI3zs7BU++u1fIioXGEQZDa8CCnz205/hpw8+REhakRGQmM6VfHzn/k7luZHnxvIZkGHALhYf8imyBWQLGC3wtocByhwnTgTk0Tyiz+c3jkxslg9e55RDDNVGGOCPnSKdxoGtjrxOlYvLsQByG4dRbgMDlLku2Pln0zi0YD3UXqciz/UkgdkNDOmT/E26tMZQvD3noytoG5slX5yXVb+xc2tymumhGQxQU+B+iqCsOuvXaGsIsfOio65bggGnPRO42TW3CS1U+XjaB5NTO7Cxw296o41jAQNOKUis6ERU/RP/VttiOed+jZtt00YY0H/jDC4XTWDAuXDy6wZN0gQaibL3pK1qa5rAriIDwh3xF+Hshk5sdE/SWSjyxjEgjZs9cyYwQEmRtx0XfEK4ll1EcZH5/8K8Amq7JlC9lt3u9WmqIxw5H5hMzdBW2GTs0sbB6mQVUY6OhKTUMqxZl8L3XXxvUNQyszkHG+fq1CUEXAgipbSLeZ2OZdUoXQ3l5CXF4OOk4EpRG905/pw9ryDoUhZFFuMpLsijsKaTcaU+MsDV9xoFtwzzo7/J2mwdlzz9uJJ/i7HBEiK9Q0ks7US5qqYs8CxOXv5czjK3k7BbUV4hNR1jKF+LrSyNscv3Mgy4sxayIkz9akUzdp6BPPbifn735Av87qkXJOfTMy6JjPZRKfT8pfoFhDjgXyYrzSCACFO/J1PFE9Ua3BqmOOJ/kaf3HycwKU+KBrBctOqBQIXUvkgZEBECljBAXKMHAuGSOJ9/Qo71CAEBBHI2gIDzbiME9EDAAANOe4dKpR8lECDEAnfSCNh/TKoQIHL9Lccm3guxwWMiNeCoSA0ok/QATM8TkQEHzrhIDr5es8B2RMAmCLAjLKOEoqF54z2zRpclOPPwTTUfS53hXTdmzOblrthB/swtn7996hwf+/K3udtCqFEAgc9/7vMcOOtKbs+0sV3TvsrHd9ZvVZ4PeT5sPQMyDNjlAkQ+TbaAbAHJAm9zGADLLVGSw1/aMbWZGy0NTcd0oStPHww00wx45UgQea0Tm07rxrmaxjBOno2lonOahW3SBLRtsTjYR5DfPG5jd32Z1ujTOEYV0jahV2w3Pmu6GUo8nuWofyGt44t0xNlzNjyX5vEl62BBXGgKA3TLdF45x/mIPJrHLK8RYeBXOH82gqaWMQkG2HslUtWt3HREdwsDTitIquyygAHXbcMAVT5epwPJrOpD0v8T/V6qItjOk5aqDmn4ppoBu4IBoUc55Z9M3bDFOHULtF48i0tUPk1jC5swYHaJjisuKC4V0DxhIfenW2BieJxZ9daSh8a5sXqgQ9ObT4STI95XSumZ1dqcpzVVDwXhTjh6XaW8e1YKvRelBe2OeXGlzMQu0n10zDfH4eQeQ27dADOqeZa0+ooXqwsTVIWfwS06j5qSK3gp4sitH9vybC9MjjA+o2JxVWgGxGANBqDT0JHogyIqmfgQL3yj06keEFBjma54L3xjs6gf3WqrydFxZlQLW6IZrJrodX4ow4A7Z0ErdqzdY+J58PdP8R9f/i/uv+c+o+L95z7zOb75gwf4tftFvpU5IqUEiPJ2ptEAH0pV8pOyeVzal0gd1qcDpLUMS7nvYsd7eyBQzvOH7SSn+JRXsFlkgGHBK4CAKM0nyvYFbAMEIiUgcFrSH9hJQ0CfMlBLXt+spBkg9Al2DQJePSr1J+2PBAFiXF6XkvntE8/x4O+f3qIRYBi3eN0OBBjOKx5dwvFGPt8+E8IXo+uk0oOm8/MnN2Z4d2Qnf3EmgQ/95hU+8c//YRYlcO+n7paiA4SegaFN+fXO+X3KcyHPxW6fARkGvM6FiXy5bIF3mAXe9jBAp2kjweU0bjEFdEwvbYTE61gavUmc/aP8+HFPM82AFx5+kqOBeXRMbzqGy1NNJDgf5fzFMnpmllneBgasL/aQ4e2AU3AajSPzxtx03cII9YX6Hf/ptkQ87V2IyWtjanEjnF+3xFjVJRwf+xFPueXTOr7CYk8Gfg4XCEltYEStdwbFzvfCSD3FFW1Mzi5ZwAAdS/25hFxwISy13uyapfF6EjyFOn8xvVPaNxUGoG0ixs6BsIx6Rjd0BhY7rnDWzp+a+kHpJ/XaYcABnnlqPz5ptxjXGGL0V5hpScXb7jxRea1MaJdNNAPWWOhKJ8jVh0tmdtcyXp9AaHgyN7um9fnx64vMTkyjsijXaPW3vzbPUFUyYS6OuAcnUN45icZUm2JtkcmOUq77OXHeNZTMugFUGwIQAgacePoJ/uB0g9rBTR2DlekW0nzO4haTR+tQG3lxl8io6GBK7MSvzVEd6oBbVC6NfR1kh3jgF5NNy8SC8dnWTjSSEhZBUlk7k1qdbRgALA/mEuJ8ipeeOEFUTiPj0m6/joWebMI9fbiYJexreEa1TDamEBmRSFmbJTCzap3X/aEMA+6MBa6ICPC4GC+V8BMh42Kn2BBGLl4/8YV/40MPHeGvFCW810K07t2XR7k7ppmXiocl1fsiCxV7AxAQoniBydtECGTpgcCvHn6cl46fMaYJmC6AxQ68EPkTbe0EBJ4/IoCAt01RQaEhcOCcGy8ecyA0vZgXj9rz7e//WIqK2DYiIEGUDzyKg6/Yxd8mIsBFrxEQbiUiQIypZGxRKlX439/7EYccPRAif6ZjNRynNg9x3M1PgiVh6cVmEQGGc0rGlghKKZBAxgnvMAJrh9hXPs79Vzq569KwGbR517VJ3hvUyF8dDucj3/2NWerAl7/yVZzDr1jth+Fe8uud8ZuV50GeB1vPgAwDXvfSRG5AtsA7ygJvexiAbpnp5iwuertw4YI7/iERRIUH4+fpgb/iMI/uC6HSmCbgx6Gj57hwzhVvhQ9BoRGEByhwPXeGC95XKO+aRvju21YTYFVfki3IA1cnd3yDQgnzV+Dh4o5PRDa3xlQsL0/TmhOLn8t5nN18CQ2PJDLIF4WnH96HH+FAUClt46uwOkd38XVCPFxwEWURQ0MIVLjj5qYgKrOJceWyBQwQuQ8q+iuTifb1xMPDh+CwMELF/S844RWSSFXPjH4MNcG8WZEBrKvpzIzAy9kF78AIYiKC8T5nj2tMIT0TC9IP6jXDgLBT2J914JyLAm/vAMLCQgnyduXCmXN4xxbSNq5h1aK0oG5lho7Ca4R4OOPmIeweToivJ24unkSkVtM3oy8duTZSTJSTG1G5LYzZFE8w+TuwomSosYjk6EAULk64uikIEPMe5Ie3qzMuzh5SdYfiW0Molza1HAQMcLQ/i4O9Kz7e/tJzEBHog8cFR1z8rlLaPsGCdoaW9Ai8xPz5hxDi7YLDaQ+ulnYytbTMbGcxCSGeuDp74BccRkSwHwpXF7xCk6nq3nhebUUGiCGsTlEVac9xx0jy20yqKazM0lUST7inM67uPoSI58jPC3cXT8KSKume2oRlJpZ4ww9lGHBnLGivVrbw0FMvSLnjQlzOAALu/tQ9fPTrD/DXrwbw3uAm/uTatNGxFCkB92WpeCh/hAfPB3HII1gKi7e2QBVOswjBlyIEdgACv33yeX72v4/YzF+3BALWSuOJ9AMRIbArIHDWDSFY+MAv/1dyuEX5v3ITcUHDeCSl/wRR8u/Y7kFA5tbUANGeAAEiUuJXDz/Brx55UoIRhvuYvkoRAbsAAcEbIMBOpDbcGpCc+asNA/z6XAAfeuY8f6ooRUAA00iBuy6N8GfuBXzw4WN84otflub8S1/6V84FRsswQBYSlJ+Bt/EzIMOAN3ypIjcoW2BPW+DtDwPE9KwvMNFZR1lOGilJqWTnF1Jc3kBPfwtV9T1Mq/Sh5is3/TjhEU9BUQXV5cXkZ6aSkpRMek4Jjf3TLGxI/69Pd1DfOsTcvL6O+/pkG/Xtw8wadqh1Wmb7b1Fdmk9ORhppaRlk5ZXR2DPJ/PKaFEq+vjBBV30ZuWnJpKRmUVBYREVDN/0tVTT2TKHccEJ12lkGmqsoK8ghMzWN9PRM8kob6JmYZ3lNB+tTdDS0Mzw9v6kUv6xkuLWWiqI8stLTyczKJq+oivbhOZZWJbl/1qc7aWwbZFptkqqwMkZrYyejMxqjqOCWp3t9hq6GNoam1EaV+XVVL02tA0ypDGHyOjQDTbT0TjInlRfQsaoepq22nKLsTLKzc8krrKZjRGmsBLA+00WT6I9KhKWvo+5rorVvcsMO60x3NtExNIVaMvkKdeFn8bmSTl55DZXFeWSnpZCSnEpOcT29wjaSnsE6M91NtA9MotqoErC+MElPYzn56ckkJ6WQlp5NUVUbw3OLbJiGdVU35Zk5VLaPotJP8RYzbPlAt4x6rIemyiJyxXOTmEBiYpK+/YomeifUaMV8mfxbn+2hua2brpZbNFSVUZSTQVpyCuk5pTT1TaHRD4IV5RCtNwvJTkslLTWTgqoORpV6cMH6IlO9TVTmZZCSmERKSjrZBTdpHZxl0TDXs720tPczPmeYH5NOsM58Zw21rX2MzW+CCnHG+uIUfU2VFGSkkJyULI2l8GYLQzMLRluZtnQ7jmUYcGfAALHbLnaFTSMCPnX/5/m7X7zAnzumctfFATNH8n1Xx/hG3jTObYsk9c8TmlHCswdPcMLd3+iMmjq14liUyTvtHSI500HJ+TY1BE4rQvjOD/5HKrmXLBxzK4tyAxAwpAzYBgIVOwOBqnZesXfiq1//FuLetkCAqGawEwhIvTXIMdeNiIAdQIBIRxDpAX84eRZRTcBynK8VBJwyAQGiLSH++PyRU9zzz//GP3z3f/nAPl/eE3yLP7kxuzmX12d4T0gzf30gmI9+46f85ze+jVv0jS19seyb/P7O+N3K8yDPg7VnQIYBt2O1IrcpW2DvWmBvwICN+dGtaVmY17C4vMa6uV8mnSHBAM8UGvpVrLPOypKGec0SK2s6m7ng20+9aGOBhaUV1qzdEB1rywtoNIusrK1vf4/1FZY0Cywtr1rtu9V+6FbRLrzGa6w29EZ9qGNVu8jC0jKrVu2x2/tswID4cjqEov36KksaDZqlZRt2tmxXb/d59TwLWmv2XGdleYXVnebEstmN9+ur4jlToZ5fQLu6w7wa29CxuiSehSWW16yo9Etzqdl4lowXGQ90a8sszquZX9BKz6vxi90crCyzsrqGBavYuFLYahGNsJV4jq38bnZziz/2HBkGvPWL2cJBFc8dsuP+e+8zRgSIneK/ffw07/Ot4l1XzXeU3+tfy0f2eXHsWiGiqoBYjAq9gbD0Ep47eIKT7gE7AoFnDhzDFhDwikvm0edf4dePPilpCOwIBETKQGIOOwMBH5Ib+rY4uqLEYFxJA//7xHPYe4du+V5EBEgg4NVj2PvYTg3QgwBfqXxgeGbpFrFAYScRzi8iAoSoooiUsPcJNZYWNF3Um4IAkcJgKhZoOK90bIngVH1qgGlEgOH7ktEFKeXhX774JUSExyf+5T/5u589y/vtrnBXTN8mEIifQwgM/vmFDL5+IYbg4uYtNjC0Kb++9b9XeQ7kOdjpGZBhwB+7IpGvky3wzrTAnoIBO02hOQzY6Wz5+7fOAhYw4K3riHzn22wBGQa89Qvb7K4Jnnj5EJ+++14JBvzjf35Pv4sc1ooQnTOElr/r8ijvP5PIh3/6DPf/+//DMTjWzGk0AwIeAaRuhKtbLlwNEQJiZ9waEBDVBPY7uEiOskFUcHdAINdmtEFkjogQOCUp+1sDAkJA8KXjDpKzb9pfEc4vtAlEBIKInrClESDGKqohiHtsCwKSDSAgBKEFIKoJHDznZhYZILQDDBoBoeklFJpUDTD0TQIBhtQAz83UAMP3hle/65mSDoIx7eOe+/nHr3yHv33CQQI9ZvN7bYq/uzHKr0tmiOxbotRC+8HQpvz61v9m5TmQ52C7Z0CGAbd54SI3L1tgj1lAhgF7bEL3xnBkGLA35nHnUcgw4K1f1Ob2zvDkvsPce+/9/MN3f8Nf2l3m3RY7x+8Jbeb/vOTJR7/2I+6+7zN8/p++wIXQODMYIBanBiAgUgZOSkBgcMs54jwjENi/FQgYSgtG51URnlkmRS0cd/VDyuW3kTJg7xsmhfAHJG4DBLINJQy3igoaSgsKh9+wyDaCgB00AqSIAAECDp8ibLuIgOR8RPlAkYogdv7FfSxhgAABJzY0AkREwOsBAektwxy54MVPfvVbvvb1b3GfSTnBT/7T/+XDDzzB+x1ucFecucDg/0lR8v+K1Di2LpI9umy0h8Eu8utb/5uV50Ceg+2eARkG7Lz2kM+QLSBbYNMC7ywYUBeOY2AWtwbVm+X2Nm0hH90xFljhVqw7oalVdM9ZCam/Y/opd+T1WkCGAW/9olY4vfsu+PDxhw/y587ZCGE5QzSAyC//U0UZH/zdIb3I3CfvlnQFvvBPX+CMfwTFowtbnEUBBMSO9rMHT24AAb2gneXiVQICihBJAT8oZVNDwAAD4oobpNB/sdMu0hjeGCBQro8QcPEmuXEzZcASBhhBwG5SAzYiAsIySqyG8xtSA0QkhNBMMIAAYQ9TGCDssSsQkFqoL38oIgKarNtWaCoIGPPcoZNS+ULHoBh+8MAv+Nz9n5XKRUqRAvd9lk98/cf8/QF/3h/duTnn8XO8L1EvDvlUjYbLA1rKrEAYy/mU37/1v2V5DuQ5EM+ADANe78pEvl62wDvLAu8oGCAE3Vq6RlEurG6fv//OegbuwNGuo+xvo2dkmnnTMn53YE/lLr0+C8gw4K1fvCYOLfPLhFv8ZUijmT6A0Ar4i3MpfPiBJ/nkZ79o1BO47+57+NZ3vs8TfzhISGrRNkCgWBIVlCIENnbCLZ0VsRMudsrFjnmwAAKjC5jCAHF+sdAjMAABNxEhYN0B1pcwDEPoEWwXIRCRbQACPkYgYAoDJBCQmCtFGug1AmyV/BNigb4SXNgOBIhUCNGnU4pgMxAgxmaAAX7xWZL4ooAeoWlFFA6pt0AWkRoQklooVT6w8wy0aQcBAsT3AgSIKgOFg2pyuqcQJQ6FtsAjz/6B//rq1xElDV+2c8TxRgEHKif4t3wVf5o4Z4QColrEB1OVfL9EjWfHIvljcpSA5fMrv3/r/37Jc7B1DmQY8PrWJfLVsgXeaRZ4R8GAd9rkyuOVLXCnW0CGAVsXcm/m4ja6T8tvKjV8OHWOd8VvqszfFTvAXx2J5CPf/Dmfuu+zZiBA7DCLFIGT7v7SDnVI2usHAsJRFjvnwnn1uJjAwXPuiMgAgy0kIJAhIgRO7lCxYFjSGpBKGCbZThmIyCqXygiKPH8RIWCAAaIfAiQ8s98gFmgbBBx/jSDAUPLPMCbxKmDAC0dO88TLB6Xoh+1BQNEuQMCIEQQIcCBAgOn9REpIdH41v3/hFf5w4qyUriGEBnPHlgnoXuKXFfN8KFVpBAIiQuTPk+b4Qo6KQ40LpA7LQMDUnvLxW/v3S7a/dfvLMOBOX/nI/ZMtcGdZQIYBb+Z8rA1RmVHC1MjMm3nX23QvHaqWPPJrehlTyaH82xpZp6K9sJDariGU5tX9tr3snfClDAOsL+Zu9yK3eGIFRecS3ylR84EUc+fvvWGt/M2L7nzsy9/m7g1RwXs/dTefvudeKdTcPSaenJ5pSSDQziNQDwReZ4SAENM7tZEyIBz+A2dczWCAsIceCJRITrNUwnCnCIH9xyTl/uKRrakMovKAHgickkoBXq1olpzjXz3y5EZEQKjkKFubB0npX4AAoRFgIzVA7OKbRQTcsq6d4Bp5jR/97Ff8/vlXdogI2C0ICJKAiTUQIMZSOq6VBBEf+NVvpTQC0/GVTKxwbVDLgYYFPps5y13Xp41Q4K6EOf4xXcmjVRou9WvNAINpG/LxW/N7lu0u2930GZBhwDth9SSPUbbAG2cBGQa8cbbcuaXlOiLO+NN9q2/nc+/4M9YZTXPFM66clrHVO763b2kH18fIUSi4VFDH8Mpb2pM77uYyDHjzF7E5Y8ucbl7kX/NU/EXSZli42AX+dLaKh1Lb+M2hs/zHV74mAQBRcvDb3/8x+06dl8LUBQgwLDyFir4RCKQVIcrwGb4zvOo1BPQpAyJ83TRn3nCOeNUDgWB++uvf8fhLB7hYWLulrU0gICIE/LYpYaiPEBDh+aKU305A4MBZV377xHN85wcPSOUFRf6+ad8Mx0YQcGQHEJAiUgOO61MDbIAAkdbw4jF7fvA/P0dAAdupAaYgoN9qv9JbRUSAHgSIcoOWEQGi/wIECC2HJ/cd4f998zu8eNSejLbRLe1ljS7j0abhGxkj/Nllc3HB/y9VyY9K5wnoWkLAA4Nd5FfZFvIzcOc8AzIMuOOWOnKHZAvc0RaQYcCbOT3LNQSf9KKzoff23VW3QG9JMqnFLYwqV26jNsI6w0mOOEeVcGtUhgHbTuj6KBkubkTn1jAkwwAzU8kw4M1dQCYNLfNSnYZ7s1S8N2ETBIjjrxSoOde6SFKPkquVLXhfTePn//sIX/j8F3j0+Vck51yI4Vku+gUQOOkpIgROIFIGXh8QGJSc1O/+8AHcoq5bd+KFhkCGiBDYXcqA0CPYCQg8e+A4//XVb/DC0dMIHQPLMYr3EggQSv+vAQSkbAMCRJrF//ziN1JZx5iCmi331GsE6EGAsK/tagojnPLaBQjI0Is6iqiLR5/bJwEeIWiY0b4VCBSPLxPZoeTniS18ILqDPzFJIXl/kpIv5+urDYj0Amu2kj97c3/Xsr1le5s+AzIMMFtmyG9kC8gW2MECMgzYwUBv6Ne3HQbomO9MQ3HSgaC0BkY1pk66Du3MKBOzGpZMP/6jByjDgF2bToYBNk0lw4A3bxEb06flfys1/EOakrviN0HAXybPSbu9/l1L5Jk4d0XD81JI/sPPvKTf5fYKthk6L/LhhcMqSgqKvHebQCBt5wgBl4irUjm8h556UdIQsLqrLwEBfZUBKWXg1jaigt6hkkDhdkBAiBf+9MGHJDFD00W14dgYEbBDaoDQPJAiAryC2R4EBPD8ITsJfOx3cOZSyaY+grinAQQIwCLsmtxkIyKgbQQ7r0CeO3jCKBZo6LPhVYoI2AABJ9z8uVTSyKv2zvzmsWek+TrtHWoVCJRNLHOldZKHL1fw4ZA6s7QBUW3g/mwVrzYskCzrCMhARK42cUc9AzIMsLnkkL+QLSBbwIoFZBhgxSi37aPbDQO0vWQHOOEekkrdkIpls1T+NYZyI7mS38qgUvcGDFGGAbs2ogwDbJpKhgG3HwYIfQDvziW+WzKPqCFvLBsYP8ffpyl5pErDxX4t4jyDAyleBQwQDuspr2CCpXJ2J6RjWyH0m0Dg+DZAYF6KHhDQwFbKgKgm8MJRe5565YjkWAsH2xoQEOkHYZKooJ0kZigiFEz7bzgW4fjC4d0uQiC7a1ISFBTVAwzXGV7FeCWxwMN2hKUXWy0fKJx3PQjQ28iaWKBoT/TlpLseBIgoCteo6xw852YGAyQQkLYZEbAdCDBEBIjSjIWDqi19FyBARFGIMo8CBAixRFFZYL+DC0edvaWICREVYW8DCJRPLHOjeYSnorL5uG8R77kyZnx+hI7AR9OVPHRTQ2y/XH7Q8LzIr+Z/R2R7vPn2kGGAzSWH/IVsAdkCViwgwwArRrltH91uGLA6TUdtPZ3DsyxtEapbpSPWHrfLN2mfMqMEf+RwZRiwa8PJMMCmqWQYcHsXiiKM275lUSobt0UfIEvF/oYFEoaWKbWys2eAAecCoiQH2LDrLUoBprcMb3E8xaJfAgIeAQgHMzS92EaEgCkQCNoSlm8oLeh7PYNTnkF6IJBaaKOtzZQBqYShLSAgShh6h0htBSVv1RAwVBNw8A03G5cEAkRqwGE7aTy28vqFbZ49IEBAkE0dA1HyT/RRABaDwJ+htKAhMkCAABFZIWCJONcWCMho20wN2B4EbFRgcPMjuaEP4dwLzQcBA8Q8Fgyq9NUTdgACCY0DvBCSyP2uyfzZxV4jEBBg6W9TlPygZB6/rqUtQEl2BG/v71u2r2xfa8+ADANsLjnkL2QLyBawYoG9AwNW1Yx11FFWkE9pdQsD0xqWF2cYGp5Gs7Qq5c6vzg0zPDXPokntet3CBEOjsyxqN73nVfUoHbVT6+zsAAAgAElEQVSlFOTkUlLdwZhKy9quNtNXUY92UFeaT25uMTXto6iW1jbz9ncDA3QaJobGmJkaprupkuLim7QMzbK4usbCRBcNZQXkF1XRMabGpMvS1K6qx+g09LuqnVHlEqsb/V6ZHaBAsY8jbrEkV/Yzs7CMQAK6ZTXjPbeoKi6irLKB7jEVS4aLTB6YFfUonXVlFOaXUH2rlwn1IgOJ57ZoBujba6aqpIjSyga6xpRW2zNpeuNwBeVgCzVl5dS2DzO3MWfG81bnGe9qoLwgl7yiKtqGhU12mBTdApPD48yoF1hQT9B36yYlBYVUNPQyOa8fv9T+qpKxkUlUmuXNudItMj0yxuL8orELsIpmvJvG8gLycouoah1idlH/bG2etIJ6rIvGskIKi2/S1D2OamGQ9C2aAatoJnpoKi80tjWzYNnWZqt79UiGAbdvMVswvsLRpgXuE/oAJvXj35MwJ+V8C32AjBHbOd+mMEAsOIuG5qXdb+HobwsEmgYkR1ZKGdgOCKQWSk6vEL4zzdM3wABRWjD11qAkjCccbeFA20w/SNdrCPyxQMAaDBAg4MSuQEDhLkFAoBkIEDY1hQFSOP9GGsUbCQKOm4AAcU9TGCDeFwyYAAEfkTIwZgZExDkCIiQ29PGK/yW+eCaavw5v4V0mqSZ/kaTk3/PVnG1ZRAhUStdYAUzy57fv9y7bVrat4RmQYcBeXTHJ45ItcHsssCdgwNpMKznR/vj6BhERE8eV2EgCvXyJuxGGi38GbYNzCJdcWRFNRPot+mc2k+ZXO1MIvVxC7+g8sMZsazYXfb3w8Q0mMjKCUB9XXDxjKWybxCwF33I+1mZpy44hwEuBf1AEUeEh+Ls64X0xn7YJDdIddwMDVttJDvDG08mdkKgY4qKC8fIIJykxhtDgEMIjorkY6oWL5xXKu6dYlPzhNebaconzV+DtGyT1O8xX9Psi+S3jzK/qWOirJO7kozz56hnc48romVIzP1xNamQgAQEhREXHEBMehK+bJ+EpNfTPLkmwQDjA081ZxPh64qUIICIqmuiwAHz8Yoh1ehG7sOINAUEdi8M1pEWZtBex0V5ytUl7loYT79eZaUwmKsCP4JAwQry8iMpqYkipn6e1uQ4KLwXi4+VDcHgEkSF+eDh5EZPdxKh6G0W+1S4yoi5zPSqQ4JAQwqMuEhcdTrDCFffAeMq7plhcB526luux6dS0jbNs6N5qLzkx1+lr3qj8sKakq/AKIQovfAPDiQoPJcDdGe+oLBqHVUi9WJ2hLTeOQE8PvP3DiI6KIiLAm4CoGFxfPkV4zoaAoGir6Cqh3qZtueAdlUnDkFLflqEfe/xVhgG3bwErwv4PNC5IGgGG1IC/TJrjh6X6XVxTfQDDItL01RIGiO/MgIB3CGK32/Qaw3FK0wAn3EWEwIntIwSsAAFTGCDaMwABSaBwWyBQLIkKCkfaZpi+iBDYKGFoGiFgCQP0IMBfKh8oIhxsRgSI/u8yIkAIHhoiAgx2MsCA2KI6Qg0gwD1ACucXDrjhPMOrUP8XaRuiLRGNYCs1IDxTHxFw3NWPpIZeyZk3tGEJA8Tn5kAgzCYQEG0d8I7iy8d8+HBwzRYRyk9nqdhXv0CSrCOwZe4M9pdfb9/fPNm2etvKMGCPL5zk4ckWeIMt8PaHAetTVMW44+57kbSyJjr7BhjobqEmJw6vow/zP08qKG2fZI11ptLP4RBRRuvY5g7wSl0wJ9wSae5VsjZ5k0s+3oTEplFW305Pbw+dTeWkhzlxzjdVcvo2MYLpTKwxVXUJf+9gLqWW0NDeQ19PB7cq0oh0Oot/SgNDqlXYDQxYriHopSd55Wwk2bWd9PW0UBB4ipef3sf5i1lUd/TR31lF7NnThGQ3MbKgY22qmqv+PoRcTKG0vm2j3xVkRLhw3jeZuoE5llSjlPru54TiOll1oygnGkgJ9iYg/AZ5N2/R2dNLT8ctavJvEHTBmcjsZkbn11gbr+CimxNeofEU1rTSLWzSUktRfBBnn/wJz7pl0zS6ik7VSlqIaO86uZVNG+01U5MfT7CTMxFZt6T2TK1mPNbNUh54Hq+oREqaO2ipKKSieYjphXVYm6buejD+gTEkF9XS1t1Lb+ctbmZE4+HoS1JVL3O2eMByPRFHn+fZZ08ScD2PmtZu+nu7aKsrIsH/PM7h2TQNa1ibycPbOYKcqn6WDMEGK7eIdVTQXN4sQaKZugTC/AKJSSykrrWb3p5Omm9mctH9PP7xlXTPLDFRdQlvFw9CruZR3dIlndNaV0RSyDme/9nTuCZXM7Syxkx9IuH+gUQnFFJrbCuLWI8L+N+ooGvG1oCMFtszBzIMuL0L4xuDWn5ZMc9fJSv5cJqSh6s0CCFBS30Aa4toazBAnGcAAkIoT4Tebw8E/PWigttFCGyExRsiBCxhgLinAQiIaIPtIwQ2gYBNDQGzlIF8SY/AFAaIvH6RX//84VPbpwYYQMYOqQF2IjXg8GZqgKmtBQwQ6v5i7AIqCD0BkddvEwQoXgMIcNsKAsS9BQx41cGZ04pgM4dVAgIJOVIqhf02EQICCBz2DOGbB5y4N6gcAZgMsElEC3wkXcnvqzVcGdBK7Qs9BqE54eAbxkmPQNyib3CjusNqlIepbeTj2/u3Qbbv3rWvDAP2zBJJHohsgTfFAm97GLA2lIHbKS8SKvtQmYb/r0zTFH2QB1/woXhXMGCawXQF7uFZ1A9pjOH1oEM7Xk3UWVeu3+zGqp+2NkimtxuRmXUMzZuU89NpmaiO5LzrVW52zbCyWxjw8mH80xsZ1QrPVIemwo8/7PMgpW54w1nV0hJ9CvdrVXTOrDCU5YtneDo1A/NsmkCHdqKGixfcuVrewdTKiolmwApjhQE4Ky5T0Dptri+wNk9/nj9n3a/S1DXJQJo75zyvUNwxayZIuKbuJvns4+z3EjBAy3hRIC6KS+S3TG1pbyA/kHPuV2nonLb+UOtUVIWc4bzfZYp75lhaWWV1bZ11HawN5xLsFUFahcX8aiepj3XF61IxrRObcMfsBgIG7H+BY55JNIwvspkIssZ8bzo+Z7yJr+hmdmoHGLA2QkGID5FJZfQoTe6lW2aq/jKenpcobKgm1fs83hdzaZ3WbkRViN6soe5Jw/WZ/XglVTO0NEJhmC+RiaV0W7bVcAUvrzgKGsdYNkAJswHtvTcyDLi9C9KyyRUie7U8Vq3hQMMC8UNaq/oA1hwDAQOeO2TH2YAoM6dRnFu4kTJgAAJp20QICAd355QBvWCenVcQQsTv4Dl3RJqAab8kIOAVJLWlBwILZt+Lc4WooNjJFzvn+pSBwS3niPNEWoIUIbD/OEHJ+QiH9Q8nzvHKaSdEdYKdNALE/cWYRH9tRSEISCJAgLWIAMO4vC4l89DTL/Lg75+WIim2BQFewZIQoOjvHxMRYLhnQm0XT75yWBJwNHxmeDUHAmFk2kgZEEDgqHsgP9hnz3+EFPPh1E0gIMDAB1OV/KR4jv2x2Tyx7zA/+Mkv+PJXvsq//duX+cZ/f49fPfIkx1x8uF7VbnV+DP2RX2/v3wfZvnvTvjIM2HtrJXlEsgVupwXe5jBAx3yxJyc8U6kfmDdxwITJdGjbojlsF8nNjl1EBvQMUeL+EvsOn8TRzRtfbx+T/24c/f3jXEioZ2h+q5emU5fg9Yc/cPTEWTwUptf54Ot6mCcedyShboB5bQ3BJ73obOi1PacCGBxx5UZlF7MbOn8rzdHYOV3mZvvUxhjX6L16Gue4m7RNKClTvMqrh45zzkq/jz3+BOevVdOnXt6EAZNqKv3t8UmqpFu56SIbOrU+V4zfaV+aaxsp9TmNb1IlPSpL0cFV+q7bcz6iiKYRJTcDzuCbWE6X1fZK8Hfw5VZNj+EWFq9rKLtLSYkS4f+u+IQnUdk1hWZVx3y5P0dePcRxe1d8zObEG/cjT/LMmctUdKss5n6j+eV6Ik+5EJvTxITlZvvaOJnu5wlLv0nfcO62kQG6+UqCjx7gyOHTuFnOr9sxnnnqDHEZUbie9SW+qJ0tJlgdIPm8iLaoYXDuJqHHD3L4kPW2nn36DLFF7cxZmtvCYnvlrQwDbv9itGRihaShZTJHt4adb+cMCBggHF6x+18ytrTFaTMDAkJUcBsgIBzsnYGAPuT+wUef4g8nzm6BAaKvmxECx19DhIB1IJAqAYFgqcqA4nIqzx85xbe//z96EJBWZDM1QAIBB3YCASMSjJDC+VMLKRxUb7Gf0AgQO/Df/v6PpZ36nUHACQlcCNE/y3kTbYVnlknwRlQ+EM66KA1oeZ5w7gUk+fq3vssxV1/EdZbnbAECHdY1BMQ9jrn68fN9dvwwsph7M5RmOgJ/lTjDRz1z+OQPf8e999zHPZ/4pPT/3k/ezX1338O///tXeP7Iaa5WtGzpg2Wf5Pe3/++EbOO9Y2MZBuyVFZI8DtkCb44F3uYwYB1l1nnsAvNpGVnaFH/bsN3a0A1On7lItQkMsA8rtZImkMCtngFynQ5w1j+SG5n5FOSb/8/PzqOmZ5p5K1u2OlUurgcdCIi4TpbFdQX5eeTkVtMzNc/ybmHASS+Sq3tQbjiEq60xnHG7TnXnjBEG9F07jVOsgAFz5Lsd4ZxvGNds9btrEvWySWTApJJCj9MEZdbQr9kKN9DWEHbai7bqGgo87QnKqLZy3hrDSRsCgiOzFHk5EJRRRZ/V9moJt1fQWt1h+6le16Icbqe2OIvESC9cFNep6J5iqkCB3VlvQi5nbpmTgtxs8qs6mVBpt8y9dCMBAxz9SSrt2Opc6+Yp9jlHcEo5XQO5eDuFk3PTPE3g4jkvKU1Apy7G75Qj3gGXyLQ6v1V0dKbg5RhEamkXW3iRSTWBwZliAuwd8faPIzPP/BnTPytVdIzOIQWF2LbWnvlGhgF37gLUAAN+/tvfE55VZhMIiJ1qESEgytOlt45YdexSmvqlHXcBBESpO6tCgEPzhKQU8pvHnuGXDz0mnWfNQRE78aIsoWhLlOez2tbwvBQh8KwUIRBIarMtIDDIKUUwT758iO/9+Kf8+pEnJDV/qxoB41qzsohiTNb6J2wg+reZ128dBIgIBhER8IuHHpPC6K2mBrSPShEMojSgsPOuQEB9j00QIODD4384wI9//iC/fuRJIrMrtgUCQixSVFjI3BYI+PLbV0/y24vF/HOOkncnbEYJ3BU3zF+cS+Hvf/gId997vxEICDAgoICIFBClFW1Fllizr/zZnfs3Q56bO2NuZBiwZ5ZI8kBkC7wpFnibwwAd2voQ7C5c5WbPrF6kz2g2HbOlnjx3KJgyKU1AhzLbiRP+edwaXjQ6j0vl3hx1S+RW3yQtkfa4XbtJt4nAoNScTsP40BjKBa1JqLnxRqBtIcbBjRsVXVvSCHSacYbG5tAI6f9dpgmI6IGtMOCGDRiwSOtFRzyvltIxbbH9rVtgYnicOc0Sa5iWFlyi84ojThE53Bq1hCg6lrqu4XQunIZb/bTFnuVCZB7NYxYOt26WMp+XOO6XQ9PoAl1XL+AUkU2j1fau4+wYTn3TiInRNg91WjWqeS3LojqAbhXNaDkhJ52JK2lluPkyrp6XKWqZtBDW07EwMcL47Lx5WsJmsyDSBI6dIjCphmGjGID+BN1CK5fOORGd08DodCF+5wJJLe/edOSXagh38KKxrBWWO7jmpuBKXhMTRoVBQzuTjI7Pola2cs3Zhaj0WkYs7zVXSfD+E/imVDOk6eCGhzeXcxsZt9XW4tZoDdNh7aVjGQbcGYtHa4t4fZrASb77o59IO862gYBaclR3DwRObgsE7DwCpd3yQ47uCCE/a317LUBAnzKwPRAQkQtit/yIk8J6RIAAAan6kn/C0X8jQICAGb96+AleOn6GuBLzlAgx5gwjCDhBYFKebRCQpY8IOObiS9IOIEA49+4x8bxw9DTf+eEDki6CbSCgJCAhWyoTuS0QqO+RQv4f3X+Spy+X8JXcWd4bP7OpI3BlnD9zy+fvfvECn7r/81uAgIiM8L6aZnWerc29/Nmd+zdDnps7Y25kGLCXVknyWGQL3H4LvM1hAOhUjVw674DPtXL6lIZcbR1Lo1VcdniUHz3uuaEZgAQOjtqFU9g6qd951S3SHnOc4wE5tI0uMt8aj+K8L9fLepjTbmzL65YYq75KQGg6jYNKk5x8k8nRzdOe4IWzzzXKumdZMl46Ru3VAMLT6hmYW0F3W2DAKpr2JHydfLla3MXs5s0Zr71OcFgqdX2zrOhMYcAaS325hDq5EZZaz7DaoHOgY2m8gSSFA64xhXRPLqLpSMb7rBvR2c1MLmw4qTot47XXcH7qAZ52yaJpdIWl/jzCnN0IS6ljSLXZnnaikWTvM1J7XZNLJkbbPNR2ZxMbm0lNz4yUK782W0nQcSdii1uYVHWS5u+K36UC2qcMef9Cx6GexJAIUqu6mbESrSG1LmDAwad59lUF6U1jm9UgVmZoS/PGwTGC3OZxlrS3iDtzjtCUGoal8gwCiFznwmk/Kqv6QLdAV3oQnj5x5LdOYvDVddoJGpPCiEqqpGtylq50f5xdw8lo2LyXdE6CGy/+7EmchGbA8gLdGcF4+cSS1zLJpkknaEoOJyqpgo5JC/Cyaao9dyTDgDtj8WhtES9gwPOH7Nh36jzC0RT6AbsCAj67iBA4ZBsIeMYl8fAzf5B2rsWu/XZAQAjSSekH20QIiOgBsbN+0tM2EIiv7uCR517mtHfoFqdUhNJLIECkBnhsgAArIfi7jwgokfosAIQQDjx41o1LFjBgEwSctAkCykRqgAEEiNQAWyCgY0zSYRAgICAhR4qS2O/gIoGAo04KPRDIqUS0Z/kcFAxYAoHxLeeIiAZxb6EB8PgBO352NpCPumdxV+zQJhC4NsX7fCr5/x46yic//69mQOCfPvM5SSPCmg6CZX/k93fu3wt5bu6cuZFhwJ5bKskDki1wWy3wtocB6JYYr00lSuGCs6uCoLBIIoID8HF3w8f9AA+/HEyllCYA68pWUgNdcXXzISzqItHBChxPORNX2s300jq65Smas2MJcHfBQ5TRi4wk1M8TdxcvItPrGTLCBss50bE81UxubACezm54B4QTHRFCgKcrbp7hZNQNohRw4bbAAH2/W3MvEeTugruXvt9hfl64u3gSkVbLwKyAJKYwQKj0q+ivSCba1wtPLz9CwyMIC/TBy9kZz+AEKrunJUdVtzwtlcsLcHPCxcOf8MgookP98fH0RXH49xwMKJSqCUjtVaYQ46dvLyQ8gvBAX317QfFUdE2xYL0UA6vTjaQEu+HirCAkLBQ/F3scPK5RKcGBFabb8rkW5IGbqxcBoZFEhfjj7eqKV2gKNb0zRvhiOSsiMiDy1CnOnnbA3dsb/8AwosKD8Xe/gOM5L2LzWxibX4V1NV3ZUXi7uOIdFMnFqFD8zjvgGpVHx6hG0p9YmW6n6FoICmdXFH6hREWEEujlhptnKCk3e5heXGNlpp3CK4F4XnDG0zeEqKhIwgO88fbx4vjjBwnIFKUFdazMdFB8PdS8LYU77h6hpFR2S21tGcse/UCGAXfOAtJyMW+AAecCoiSFe5GLLoBARFY5pVY1BDYiBPaLKgO2gUCyIWVAAIHMUkrGFs0cTFFN4FV7Zym3XTj6OwOBDYHCnYDAoZNS+L61lAF9NYGz2PuGmfVFAgECJuwEAtpEakAQzx00lPyznhogUiQEmBAgQGgEGEoLmsKAjPaxjdQA2xEBRhBw2E7K/d8OBDj4hEn9FyBAOPebpQWDSazr5qizt6SXELkNEPBPyJZSQfQRAtaBQKIEBLz53k9+yae/90s+8GoA747qNgKBP7kxw3uDGvmbp8/z8S991QgE7r/3PgmO2EoxsXwu5fd37t8MeW7ujLmRYcAeXTDJw5ItcJss8PaHAcIwq2pGWqsoykwhKSGZ9Mxc8otr6aiJ5MjpmA3NAOHTraIavEVVST7Z6RlkZ+VQeLODMfVm+P+aZoyOmmJyUhJJTEgiNS2Twqp2RlVa1qyk12/Oyxqa8Q7qirNIS0wgKSmF9MwCqtpHUS2t6dMS1qfpqGtBNaPevMzyaOOcwWm1MQphXdlDQ3M/08bceB3zffU09Uyh3AhJX9eM01lbQk7qRr9TMym82caIcmmjMsI6qp56mns3r9EtKxluraGsIIeM1DQyMjLJKbhJ69AsiyJkf+Pf+sIEXXWl5KQmkZycQV5+AWV1nfQ236Sha5xZUQJQmHejvXKz9ippHTRvz9Cu8VW3zFx/ExV5GaQmJZOSnkd15xhqQ3TG+gKTXfWU5aSRlJBIcnIaWfmVtA7NmfXT2J7hQMCAs95cSc2mvLqcopwM0pLE9ZkU1XYzod6MJFlVj9BWU0ZhVgaZmdnk5BvsYGhsnYXJbhrLckiX5jeZtMx8brYMMru4upF2Is7pobEsl/SkJFLTc8gvKKWuvZvm6ka6R6fRSKZaZ1E6z7ytStHWgqEtw3339qsMA+6MxaO1RbwpDBDfC+d1V0AgKV8S5dsRCLj5I3L6wzLMgYChtGBUbiXBKQWSE6oHAsNmjrqhzylNA0ZwEJpWbFNDQIoQsAEETEsLGto1BQEiAkFKDbAWESCBgEAJBAQlF9gM57cEAWJH3RIGSCDAO0Ryjm2lBhhBwCEBAny2jQiwBAFibJswIETSFtADAYUEBKK2AwLx2wMBIViYWNfDU/sOc/999/Pxf/06f/OsE+8Jad4EAvGzvCe8nQ/s8+Mfv/IdCQjcf9+neXr/MZtVGQzzIb/euX8r5Lm5s+ZGhgF7e90kj062wBttgb0BAzasoltdQqOeZ3F5TSpLtz54nVNnTGCAwXq6VZY0Cywtr0rnGT7efNWxqtWgVs2zoLV1zubZZke6VbQaNer5BbSr60ZtArNzbtsb0e8F5l9rv4U9Frazh+iwjrXlBTTziyzvNC5hgx3b22qE9VUtCxoNSytrVqsD6MT3ajXzC1pWRd3Bnf5twID4kjapMsP66hIL8xrj87H1cmG/RRYWl1m1RX6ksc3r53dlA/JYNqRbY3lRg0b001Y74hpx3oJoS4PWVluWbe+x9zIMuLMWkaaLeksYIL4TQECo0O8YIbABBESZQFs7vlKEgBUgYIABorTgZsUCQ4TALoBA+g5A4KA+QkCUFzSM1xIGGEHAwRPsCgQcEgJ/O4CAQyc54eZHUkMfBrFAUxiwWxAQsavUgHGz1AAREWAYqykMEJ/pnXgRIaDghSOn2B4IZG0bISDacg6/wlf+82uSo/+JL36ZDz5ynPf5VZsAgTneHd3LXx2L5qNf/wmfvvfTvHTcgdyeaWMfDX2VX+/cvw/y3Ny5cyPDgD22UJKHI1vgNltgT8EAS1vZhAGWJ8rv96YFLGDA3hzk23tUMgy4cxeU1mCAcAAkIODisyMQEDvbz+w/LpXPS2+zXmXACAQObkYImMIAcT9TIHBalDBs2QUQ2C5CILVI2sW38woy6hGYwoDdgoCMtlF9asAhfWqALaV/Y0SABALMS/4ZYICwlSjhKNIitosIeG0g4IQkAGgKAoQ9LWGA+MwIBJx2AwQ2RQWzOramDFyrbOWhp/9/9s4Duq3rStepM3mZTH3TV2bilmRSJtMymZm8ZJLYsR13xyUuiWNbtpotyZaoRooSxSr2ToK9UxTF3nvvvfcu9goQbCBI4n9rH+ACFyAAUjItk9L1WjJAAjj33H0usc7+7r///SG++eBDDAg88L1/w1+/fAJfcSnG5+Ln1FDgC9G38EdXE/DdN07A0i9Sb+kJzU34J8RAuAZu7xoQYMDB3hcJsxcicLcjIMCAux1x4Xh3LwICDLh7sb7DIwkw4PY2eXdzU2wIBtAcqMf8RUcvHD9/GaG5BjwEbknhl1ygbDvoFYwsQ0CgZRhmTj5Kg8LscrhFpzBDOVIGcOdLrf6otR7V7ivLDwwBAa6FoTlrLai37SC1MCQgQCUDKiDAwQAam0oNKCk3dxUhtWVYfRefmws9Egi44q70CDDW8o9KIOg4SkWANgigcQgGfGhqicOnTVUgIF9vmQGVBlCcKd5k1Ef1+ZTA8+dEz7N7pmDprfQIoDp/XRBA78ntn8HHV+zY/Pmfp/GSGvpYRwXqNkBlGoZMBX0TlQoBK59Q6AIBum4I2vzwhz9SAwHqIvC3z7yDr9pm4PM3p9VA4PMx4/j7wHqcLehB8dT28+HPT3i+f78rhLXZX2sjwIA73JAIHxMicJ9G4N6GAWNpsHe6iab+Wf0tAe/TRb9vTlvehhuugcio7oVY1eHhvjn3A3KiAgzYX5tI/qaekjpKZK1FYduSTnofAQFTR8+dgQApBExIIWAMCAzhkgoIUHJsYu0MPgyg43FA4AgDAoHI7DQMBCiRJz+CICoZ0DEopLFKdIBAQm03Tl6yxuvvHuWBAI2cnx+X2wIBZ81hpkcRwI3nHB6PZ196HW8dPQm/lHwUjSxui/VuQQAl5WTwR8DENzEXhbzSAO54FdMyBKQX4cXXfsdgB/d77nG3QIDGZkDgjBn0AYHE+l7WpeDf/vU/NEDgm/+Ev3/sFXztciy+EDOuBgJ/kLSAH+RJYNWxivxJAQhwayE87t/vxv2+NgIMOCAbIGGaQgT2SQTuaRigWBlDe8cwFqT3T6u2fXJd7Y9pbIkx3NmHsVkpa1m4PyYlzIIfAQEG7N8NL8EAukP+0RVbUD27vg0wUwg4eOKYMYXAqEohcMZ0RyBACoHnX30T7310HtEljduOyQcCV6hkwBgQcFEBgawyI0CgmAGPC/Ye+N2RE3jsyafVTv9cXT//vJUgIIB9xpAigBn8ZSsVAXQ+FCO9d/G7J3HK3AZPPPMC7INjdgQBpMRIburXO9ZuQUBYfjWOmJjiJz97FCfNbZDTO70txhog4K70ECCFwPT2toMEBHwSclgpCAMCvZqSAYoBlQuctXHGzx59HN96+Jt45IEH8YltaywAACAASURBVPAj38Y//vx5/Ll5NL58QwMEvpQkxj/lLsK0bQU5EwIQ4F9zwvP9+x25X9dGgAH8XYbwXIiAEIGdInBPw4CdTl54XYiAEIHPNgICDNi/G10OBjz6xFOw8g0zDASaBpTt6YwBAVYykI8jZ0yZjJ2San0bafIjIDUCJchu0cl668g1QIBaGBIQ0O9HQO7/5rsEAodPX8T/++nPGfygOewJCDCiCMjunsRVzyA89/LrePfDM4gorN0WD74iYK9AwPELV/CxxTUGPh5/6jlQ28idgYAFwgtq9AOBYRUQMFEpBHSAAJk0ipLzWBeKp198Bf/8/X/GK28fhll8CV4omcefp0nUCoEvJonxULYEHzevIH1cAAL6/j6E3+3f78v9tDYCDPhs9zXC0YUIHLQICDBgD1Zsc7QaOWWdmFxY24PRPrshNsdqkV/egfG51c9uErd15E1M1BWgsm0IM6u76C5wW2Pfg29WSNFbVoqG3lsQb+6P8xNgwP7d3CrLBC7jjUPHWOs3AgKUxOrb9HIKgZ09BPLZnWmSsxsCAuRG/+xLr+GtY6cQkl2xAxDgPAQMA4FLLr44ygwKDSsEfBJz8OzLr+GKe4DeO+9Z3eQRcBuKAGdvw4qAHiUIOHb2EjMyPH3VATFlGn8Eii/diec8Ai46ehpWBPROwdKHSgPMjJYGkCLg+AULXHDwQFRJA05ftccrvzvE4MyeAgHfUOTwgIDyXNZZpwDyEfjlU8/hjfeOIyCjBDFDq3irdhl/naEBAp9PFOMfsiQ42rCCpFEBCOj7WxN+t3+/M/fL2ggwYH/sb4RZCBE4KBEQYMAerJS8IRg2PpnoGF7cg9E+uyHkTeGw901H84Dks5vEbR1ZjpZIR/inVqNXMAXYOXJbUyjw9kZMQT1G5Tu//W68Q4AB+3djSzDg+LnLuOzmxxJNuqtvbQQIJDcNqhQCVxCaW6U/iecUAiakENAPBKibwPHzV/DuCROVqeDeAAFqh0jO/vo8BCiBJWBw1StoG+y4bRDgtAMI8ApSmgUm58Mx9CaT0vNhAB8EKM0CDZQG9E7BikDAWeMeAWQE+AGBAHsPZhCY1z+LM5aOuHDNHd7xWUzmvyMQuEYlA7tUCOgBApQkkVkiKTAOnTqr8pmoQvzwKo42LOPrWdpA4G8zJPh97TJiR7aXJ+yXhEuYx/797rrf10aAAXdj9yIcQ4jAvRMBAQbswVrKa0W47JaK1qGDAAMUWO4vR3pGGTonJJDzbqjL6wNh6ZGMxj7xHkTlbgwhR1OIDbwTK9C9sEcOgYoVDFVlIbusDaPidfDCczdO6NM9xtYEclxdEZFdgxEBBmxL+j7tDWRUcQNOXbZFQHrxXT/2nZwbBwMoUaS2eeROf/iMKSsZMKQQ0ACBywjbEQiY6QUCXGtBz9h0Ji+nJH43CgGS3RsrGSCFwDF1C8M1rTVQdhOwZvPhx2pvQcAUgw2UvJN0npz+udaCHAzgQAB5MBhXBEyrzAKNKQLWEZ5fowIB7gwEkB8Av7UgnbcSCJjCxi8cuX36PQQSqcvA7QAB6jKgoxCgdTWxcgQ9XnTwYEAgLK8KybfWcLp5BQ9lL6pLBj6XKMZfpkvwatUSIoe014q/PsJzISEXroHt14AAAz7drZQwuhCBey0CAgzYgxU9SDBAsdSLTLfLsPbNQMv4klaXhbsFA7bmO1GcmoWK7klINz7JAuw1DCBQkguRlTVEyfUY/WST+yQn9ul8VoABWgng3d5EHmQYQLEiIEByerq7a1whMICL9pTsXQEle+WT25O54ltSiJLylB4CPtoKAQ4GXC9tYlJ7MydlC8OQHMMKAT91xwLDQIDaBFKLP6VCoBxlvHlxrQVJrcBdFwwEeASA5Pw0Pp0/9xr3yMwCcyqYz4Gpk7fRln/UTYGBgCQlCKAx+DCAQADFi+J20cETSY2GFAHTzMGfOjSQgV/hsGT7vKZ1QUCvuvyBDwNoDnTuXnGZypIBvwjk9s3oHS+xoRfnCQhQ28GCWgMeAmL4JGQztQFdI3w/AgYDrJ0QU96CpEZlC0PyMKBzTh9dY+aBZCJIpQIEA+gfeQo8W7GE4IE1VMxsT3q4dRAehdgI14DmGhBgwKezhRJGFSJwr0ZAgAF7sLIHBgYo1jCU6wcHR3+k19+CdF37bvpdgwFT1YjxCkRyzSDmP9Ed6j2GAbIRFIe4wM0vGbXDEsi0w7MHV8pnPIQAA7YlOXdzA3nQYQDFilrfUQJ6+LQprEXhBj0EkpsGmCzdOBBYVAIBEzNY+oSqPQQ0MEBZR69sYagEAqGGgACVH6SQHwG1MDQGBIZUQMAcwVkaIKALA0j5YMEHAQZa/hGgoLv4dwICKJ4cDIgubbxDECDedk3T3X+mCLhIpQGkCNCAADqmLgyg320HAgYUAvUcELi6MxAwMWPQiAMCHAy4Ud7KwIQuEMgaX2PtBf81fxFkJsgBgT9OleCx0iX49K2hbFqT8NC8hX9CDIRrYPs1IMCAz3ivJRxeiMABi4AAA/ZgwQ4ODJBjtqsOjT2jWFjd7iB3t2CAYm0OI70DGJtbhg6PuM3V2GMYsDGP3sYm9IzMYeUTKRZu8zTu1tsFGPCZJg/3AgygjbcaCJCHwA5AYGeFwCKTzFMST+3p6G68LgygY37aQIAPA5QgIFCjCPgEIIBa/ulTBHAJDMEAE2snZgJId8mVioA+9V187n30SEk1xYjMAu9EEcCNRTDgtKUDgx3c7+hxOxAwphBwwwcXryLCiELAOz6bwRlrX2XHAj4MoOOxFoZaCoFq5E7I4NS9iv8qlOLLPCDw1RQJflwkhUvPGkqmtic//PMQngvxud+vAQEG3K1NlXAcIQL3RgTuHRiwIcVkTyMqCvNRVNaAvslFyDY1Fd+K5SmMTi5gbqwPbdWlKK/twrhkDRubK5jua0RlUSHK6vswvbSuls4rPyPGytoqFicH0FpdguLSarSPzGNVrrltrBcG0Hx6m1BZRPOpR+/EImQbmvkoLx85Fsd70FRejJLyOnQMzWBJtor5sXHMLa2yen7F8jTGJmcx0deC2oo6dE9IsLY9j1cNJ8atjjpUVTaid0Ky7XiKdSmm+ttQW1aKipoW9FOMeHPSCwMU65BODaKjthxl5bVo6Z/E4trGrmrp5eJRdNZVorqxBxMUa+70FUuYHp+CeGlNHWuDf04bS5jua0FVcQGKSuvQPSbGqnqg3cEAxcoMJqbmIZkbR39zJUoKSlDXPQ6JOpAbWJyawOziMu+aUWB1bhxT81KsEhhQrGB2fArzM2MY7KhDRVkN2gkayDexOjuItuoSFJXWontcDF3OsrE0jYHWapQUFKKstgtj8yuaWECBldkJTM2Mo7+tAdW1HRidX1HGRbGOpelhdNZVoLy8Bs29ExCvyncVe2ADS1P9aK0qQUlZDdpo3VZuIXubZ8AGlmeU8y8sKEVd5yjmV3Z7DIOrtusXaNNyv2zcDiIMIEk9Jfy6a6QGArtQCGiAQLWBkgGNQoCSXUoez9q44HqptsO+NhCo1JL5c/Oj8gNSCFDCvJNCwIyVDCgVAnn9czh5yQYmVk4g13t1aYA+EDAtw24UAQQCLL23lwZwc6VHggHUNeG1d46wOnq6W05JMv899HyvQACNRXEkM0gyhtQ9TuGQpmTA1ljJQH0vLlxzw4dqILB9zgQXmB8BUwiEw9Y/koEPUgZwx2VAgPwIqKyElQxUo2BCBq/eNfysRIqvJGsUAvT8PwqksOtcReHk9uNxYwqPAgy4368BAQbsegsivFGIgBABAPcEDNhc6EJ+pA+8PHwQGBqGsABvuNp74EZxD2ZVt3g3upIh8nCDs2sgoiKjEOnvBvewVKRHByEoKBjhkREIdneAZ1w1hubXWMK10ZWC4OuJuOHvh6DAUERFRyMiyA+ejs4ITq3DiFgGQgK6MGBzoRsFUSJ4eXgjMCQMYYE+cHXwQExRF2aWVbec5XNozw6Hr5sbvP1CEBEejlCRJ/yiExDi4Ies9mEsbAIb3akI8PKCp08oIqMzUU8JqIZDaC7irXm0pITAz9cfIcEB8HQPR0HHuKomX4HVsXpkRPjD3y8I4RGRiAwNgI+LB8LSGzTnoWMgqFgdR0N6JAJFfggOjURUeCgCvVzhGZKB+mEx1vTNQzWjrfkWpIf6wc8vEKEBHvAMz0P76CLY2cu7kBaejMqOMSxzgEBzJupnm+JelNwIgLeHNwJCQlkc3Rw9EJ3Xhgkp1RfsDgZs9GUhKiwYvl6BiAiLQHREMLwd3XG9sBOTy1uAQoqm+Bhk13dhap07/CaGcqOQWNSCIckWsNGPnFARPOxdEMjGCISXewiSEqMRHhKM4JAIRAZ7wsXjBko6J1VrtAlJXykSAr3h5eWH0NAQBPu4wsUjErnNY1hkJRIb6M8JR4CnC3yCw3E9vRr9U1JsrE6iJTsGwSIRgljswxDk7Qav4BRU989h1UjssbGAnqJYBHq4wdM3GOHh4Qjz84Z/RCScP7ZAMGcguLmIgbIEhHp7wFsUjLCQIPi5OsErPBtNtyT4RBUcXBh3eBRgwP7duJOBINW4U1JdPrW97l8bCIQhu0d/20FWMuDgqU729HkIKA0KlR4Cr797FKfMbbfBANrga4DAFYTmVBqACwQEClRAIBhZXfrbDqa2DEEJBC7DNzGH3el+/OnnlU7/KflMAaGbVFBd/16BABqL4Mdjv3oWpy7bsDp6gyDAl1MEZO/oEUB1/VTfr28s8gOg9fz5Y08yc0Z979ECAv5GPARYyQBfIbA9QecDgdfePoyPLtuCDwMovgwI6CgEiiZlCBhYw5NlUvxRigYIkFrg+3mLuNK+iryJ7cfTXS/h5/37/SKszae3NgIM2GHjIbwsRECIgFYEDj4M2JxF7XVvePlHI6OsEV39gxjobkZFahAcrHyR1apMiOU1Pvjg7VOwi8hDU+8gBtryIbr0IY6dskVMbh16BgfRWxUGK/MAFHZMgtrWy2t98dGhwzh1NQDJRQ3o6h/CYE876gti4WVph8jiHsysbmnDgM1Z1N/whbdfFNJLaT4DGOhuQWVaMBytfZHRPIrFjQ1MVYbD2c4NIYlFqO/ow0B/D9rrCpDga4H3nz0Mj8IOTG0A8jo/nD55Bb4J5egYmMDCqka5wF9JxXwZRFbOiEotR0d3GyoKytE+Ns+SRsViJzICPeEbHI/8qhb09A+gv7sVtfnx8LvmgPC8DpYUaykDFIvoSg+Ct08w4nOr0No9gMG+brTVFiBBZA+n0Dy0T2gbEGrmo8BChR+uuYQjpbQNPW0VKCpvw+jcCoMnWK9BgKUP0qr6sWgIBmzOoykhAL6iCKQU16Ozj+LYiqqMMLjY+iC1dghi+e5ggLwxBGYffARzrwSUt/ZhaLAXjfHOsPJKQm2/BArFPArdHRCWW4uRNW5CcrRH2sHrZik6ZrcAeTPCLxzHx+Z+SK/pwmB/O4qDbGBy9CSsQtJQ2TmAod5a3HCwRkBaLYaWFNiab0ZKsC/8w5JQVNeBvoF+9LRWITvcFQ7eiajun4cccrSEm+GcuQtiClvRPz6P5dUF9OREQOQTiNisCjR39WOQXR+FSApwhnNwJhpvLRpQVWxipj4OPk4u8L+Ri5q2XnZtdTQUIyXQFh+8dAQOSdUYkW9hoSUN4SIRwhIKUNeufF9bdTaiXe3hG1+B3rlPHwcIMODT2xB+0s02BwNefes9Jgcvn9re5k0NBLiSgZ4p9V1f/vG1PQSq9cMFVceCV3//HuiY1EGAPwb3XAMELu8ABJQeApZeOwOB9z++gF89/xJeevNt+CUTCNBnyidjxyO1hLLl34DehHs3igBKgMPyqvHGe8fw69ffgn9qgf6xeqdhrQUC9HsEkFyfWv8xEFBvGATY+EWwlo1PPf8yCLpEFtXrPS4DAjeVpoK2ewQEXnnrEDsmdZng1pJ71AUC1A6xZEqGsME1vFi5hD9N1bQe/FKSGN/MWYRJywoyxgUgwMVQeNy/36V3e20EGKDZjQrPhAgIEdg5AgceBmzeyoKnQwhym8awrJaPA4q1KdQGW8M5rg6DC3Ks13jjhIkP8tqnIKN8T7GESvcj+MglE63jSiUAZK0IM3NEYkM/qFOdvNYHxw9dgF9OJ+b4t8E3F9Gb5IDLnmlouSWFjNdacHM0Bz5OIciuv4UlXt8+hWwKdaG2cL1Zjb7pfqQ7WMD9Zjn6xevKBJmt1QYWexJg+buP4F2ggQHnLEJR3DGjvKtuYE0Vkir4X7GCX1wFhiUyyDc2sLmlgAKbmCoWwcE9BkWdc9olBptSDOX7wsolDq2UmPKUAZtTJQggdUVBB2a1dO+bkA7lw8/SFXGVvZhXCR20p6XAYnUArK19cbNsEOI1OTY2NrG1pUq0dwEDNscLEOgegvSqQSxqxXEGjVFOcI8pRef0Mhp30VqQYMB5EwfElA1gSXVHfXMsDc4O4ShsGMX61i5hwMVL8EqoxgiRIpL31wXh/Fkn3CwfxAo7tXV037gGjxslaJ2WY6IoGD4hyajol2Bdder0OdlME2Jd3FnyP7VOMMACdoEZaBhTJt9b05UId/VCVFYzplb4NSGbWBopQcg1N8QUtWNGX66+OYYCXwd4ROSgbWaNd21tQjqYCZdjJnClc1ibQGmoD0ITS9C7wLsGFeuYbb4JT49oFDRNKP9WtBd3T38SYMD+3cASDDh2zhyPPfksSzQp4dwTIEBy8HzDQIDu1j/25DOsfp48BPRtpLWAQK4xhYAKCHgTENA/FikESKb+0589hvPX3IyDADILdPQy2DWA2umx0gCTS6wVY6FeqKAEASSLJ/jwoelVvSoIVhrgG8rq7smhn+6y68aCkmgGAi7uDgRQeYBj6E3WsYAUCR+aWiLKABAooJIBNRCIRG6/AQ8BUgjYKRUCkYXUZWB7gk5zp3V99ImnWPwMtTBMYiUD7vjgwhUQECidkuH6sAxv1CyxVoOcqeAXksT4xywJjjYsI3F0O6TSjZPw8/79nhHWZu/XRoABe7pNEQYTInDPR+CAwwAFpKUuOHniIizt3ODr4wuR+p8P3M6/g/dsU9AyugxZjQ/O2yeiYVCiSpDkaA+5APvYevTPqzLEjX7EmtniRn0v2M3gWl9csL2Byr75bYn45ngybK8EobhtAktqGCDBUpk7Pj51ERa2euZz4V28b5uIhvZMuF/yQHrdEKS6cm/5IG5eskJEOQcD/HHZJRG1fWJeYqfnutwUo7c4EWHeLnBy9kYk3Z1mtenLqPa9Cq+UavRL+ImlcoythVJ4W3ijvWmQBwMWsFztBxuPZFT1Sbbfgd4So8zLEr6ptRiUqrNcrUmRxL8sKQy+Lo5w9Q5HevUA5lZUXgM7wgAFlqpEuHj6AsytXLetq7vp+zhqHYuq/jk07AoGhMLCIQqFbdPqc1FIiuBlF4Ts6kGs7hYGWLriRmE7ZlVh3OiOxTXnaBQ3T6rG3cRwsj3coorRNLaI2oBLuHDuIqxdfHjXpS9E3m64fOQIbKNK0SNeR0uEHTxvFKGdLjqCDPWhcPKOR3HHvHq+6uBuSVAdYA9RQim6xboXDzGuGoTYeyO+sIOVmag/R082biHdwQmhWTUYFtch9Mp5XDh3Fa7e/L8bX/i6muODo1aILOyCnkNoDflJfxBgwN5vBPdqc62EAZdxwswK52xdlUCg0BgQyMbhM8p+9dmGFAKN/czh/vgFC+Z4r6/8wC06GW++/wFefesQrHxDjXYsuOhIXQauINQIEBAlc0BAu4UhP05x1Z347eEPYeEZqCfhlrHxj5+/DDpecpOhln8EAkJwlEBAorGWf9WsZIIABLU6PGvtjJgybX+EnD5SBIQxEEB198ZBwFUGMRINKQL6Z0GKAFobz5sZSGsdxhlLRxwxuYSzti44YWYJ8rPQl8QTEKDPEESwDTAGBHoYECCwEVlYp3csWldSfbz+zhHYiMJhCAhQiQN1QWAtDPNrGICKuyXDofpl/F2mRiFALQj/NkOC39YsI2ZYAAL861l4vn+/V+/G2ggw4JPuTITPCxG4vyJw4GGAJPcaTCx8EH4zCwX5+dr/crORX9uPuSVSBvjAzDUNTcOLahjQGWYOl4RGDJAMgP7bHECsmQ1i6ntUMECEKx4ZaBqWbkvEFdICOF8SIa95FJIaES67paJ1SILFfEecv+qNkFhD8+nDzFgWnM39kNd0S3VHmXfRbY4h+aotoio6VWUC/rBwT0ZdPwcxeO/Vebq1toBbnbUozoxHmOs1eCfWYmR+DsWuFvDPrseI8va19qdk9Qi64o7uhj4eDBBDWuQOa1EW6oeW9RjWraMx8Co8E6vQazBb3IJMPIquumJkxYfC45oXEgkIkCxjFzBAWuyBy1YeCLieqb2mtMa0rjU9mJIs7VIZEApbj3iUdc5r1nGxBN62AciqGsAygwH2CM2pxTCvTKAtwhaeN0vUZQIRtl5IKOmEmh313ISTRyzKWqdV425iJMUerpHFaBwTo8zHAnZuPrieoXNd5ucjPzsPNd0TEK8RDLCHT1wxOuaUMGCp3A+Ofiko75XqjX1LhAN8bhahnb1fezkV0nIE2PsjtaQH2zjN1iRyyEAwpwYj8+UIsLKFm1ckMvN055eHnNxqdI0vQB0O7cPs2U8CDNi/m1aCAcdVBoKUaDIgcNECEcaAQHw2Dp++yJJPQ0AgiYAA9atnd38p2dP2IyDX+dMW9kwZQEnoroDABQICVQY9BETJeSyxpmRdn0KAZPEEPa56B2vBAKrrJ9BAIIApAho/OQig8yYQQHfBPWJScdZGGwZog4CdFAE7gwBbv0iWzFNSXzC0oG4teMU9APG13ez4ewEEEup6cM5OaSqoDwjQup4yt4Gpgyebz45AgK6RixYIL6hB+bQMSaMynGxawYPZi+q2g6QU+L/pErxQuYSQwTVUCO0Gta7fu5F4CsfYf9/hAgzYsy2KMJAQgfsiAgccBgCy9jBYOsajdmBB5+69AsuTo5iSrLD2deQZcMktDc3Di6oESw4GAxKNwQBfnLkYgPzOaR25tAIrbaG4bB2Fyu5ZrKiVAYuQdUTAxjkO1b26agIFVqbGMCVehmy1A1EWNogq7sKM2rBOeb0p5svheewC/Is0ygAL9xTUDRiDAQqsSxexJJMzl3rFxhLGS31gdi0GTT0T6I21xbWwfLRNqsoh1Je2Amt9cbC3CUFL24QWDFjvvQl7uzDkt0xsSwgVa/1ItLNFaF4LxvVli9SBYHEJsnVSAiiwsTyBCtElOF6vQBfdVt8RBgDrXTfg5BaLko4ZHSM7BVamxzElXsLa5m49A0JxzTMB5V0L+mGAYhElnjbwT6vEwBKndFhDQ5AlPGOL0UnZv7wZBAMSS3cLA1bRc9MFXjEFaNW4EqoWeRWzE9NYkK5ikzwDOBigogzy/mS4O4Ugo/YW865QLxfpBtYGkebqiJD0GlW5Av9V8lTsRaKzM8LS6zCqszYKcS1Czl+GV3I1RpZ7kejuhRs5TZjUvQZXZzE5vYBFrRIFnePs0Y8CDNh/G0luc8/BAEra6HeJDX0qIHDVCBCQgFrK7QwE+pRA4KJSDs4HApQ0UjcBSu59E3NZ4mjlG2ZcIeDAKQT0AwEyKBQlGQYCdOedugkQLODOf/cgYHrXigCq6+dAAN2Jp24CfBhABn/WIk4R8AlBQN8sdEEAnRu1FiRlAHVNoDnE13btAggsaBQC/qQQmFXHSROvdRgDArSu1EaRPAO84pR+BDZ+pBAwUH5ACgEOCOTXsLmmj6/DtHUV38ldBCkDuLKBP0mV4JelS/DuW0Pp9P79m+JiJTwKa/RpXgMCDNijDYowjBCB+yQCBx4GKJY6kehqB9/4KgxJlO7+JLVem6hDnG8wsltGIZEDdwoDPnzrMMz8C9A7R4mb8j/5XDtSnc1gF1mG/rl1rPNggGKpC8nu1+AbV44BMVezrcDaZAMS/EKQ1TQC8foSuhNdYO0chaKuWXUrOvIVaIi1w/tPvwc3tWcAKQN2ggEy9OdEIiavCSNiagu3iYUKH5jaXUdD9yxWh/IQaO+MkIxmjEu5tnEKyKZbkOppBcfIYvTPrGnBAMXqEPIDHOEalIGmMSlrc0hnr5BNozXFC7b2kSjpntkGCliEZAPIi4pBXsMgFqhYfnMB1SIzOESX7RoGKJZ7kO7rBN+YYvSoY09zbkZKUBgyavsxv75XMECG9igr2AWmoWFslcEixVofEm0t4JtcjSFqeXDbMGADy72ZCHT1QUx+J2a5xFohw0xrGsLCU1HdM4t1xXYYoFi7hZIwN7j7J6N2WOM3oJDNoCPLHw4OochtndgGCljsFSvoz/KDk3MwMpsmsKTydFCsz6At1R2nXnoPduQZsL6C/uwgeHhHIa9tGprpzaA9IxyRKRXonpbpUSYo/wb26v8CDNi/m2JdGECbVz4QoLu/+j0EtIEAGerp2/hSGz0yvNMoBJRSbw4GUGtBMigkyT0pBChJNtax4CJ1LKCSgbydgIApS975CgFdGEBJMiWtytIATyQbVARMw8pnF6UBedWszIJAAMWQk+TzYcBuQQDFnVr6kb+BodKAPFVpAMWNUwRwa8CHAfQ7DgiYWDuzkoFogyUDC/CMTWdrQW0CjQMBV+aFwFcIcDCAugkUDKtaGJqY4XaBQM7EOmw7V1mbQTIT5IDAV1Mk+J8iKZy6V5E1sMDMHs1dRfjoih3o3FwiE1mJBEEeLhbC4/79/hHW5s7XRoABe7VDEcYRInB/RODAwwAo1jHTmo1oHxc4u3ohMDQSkUEieDo7wSM4E82jEsjoxu4dKgPOX7SCrbUzvL1FCA6LQESgD9yu2eCa+3WU9cyCOhdqtRYkA7a2HMT4usLZxQsBodRyTgQvNp8MNI6IIdtSYH22HbmR3nCxd4KnKBRRkRGstaCnlzsu/PZjBJTejjJgA7NNifB32U0UsAAAIABJREFUtIeLVxDCg33gYmEBj/hajCysA5uLGKxMQYS3O9w96TzCERrgA09HR7j5J6KqT3UePANBYBOLg5VIDfOBp6sn/ILCEBESAJGbE5xd/JFY2adu27jtT2VjFi3JAXC1d4J3YChCfZxhZeGK+KpBzO+qTICowzrmOgtw098NLi6e8A+JQESwH7xdnOAelIq6wfk9VAZsQdqTgwgPJ7h4BiIyKhKh3vawtg9Dfts4lokC3TYM2IRCPofuojgEuTvB1cMPodQ+0s8L7k5uCE6uRv+sfmUAxV46UovMKBE8XT0gCqTYB8LP3RnOLn6IL+3CFJflbwu+AvL5bhTf9Ie7gyM8fIMRERGBMH9qb+gGs0Nn4ZNRgxE5va8HpfFB8HRygRe9LyyEtY50cQ1ESmUvZjhCsO0Ye/cLAQbc+Ybv094s64MBdEyq6aaSAUpIjQOBLLVCwCAQIMM4BgQ4DwEZ+DCAjqcGAia7BAJUMmAUCORuKxngwwANCLiCi467AQFmKo8A/QZ/ZITHFAHXtEEAnRsHAwLSi3mKAMMeAWoQYOeGBAMeAQQCKFnXBwLomLowgH7HgEBNF0uaqVzCsIeAEgiQ/4CtfxToWPR5/j8aS6kQ0AYCfBhA778TIBBRoFQIFEyuw61nDT8tluIPkzVAgJ5/L2sOL/pn4Ok33sUPf/hf+N53vocf/PO/4Oe/fBK/O3ICTuFxLAb8OQvPtddQiMfBjocAA/ZujyKMJETgfojAwYcBtEqby5jsqkVJViqSEhKRkpKGrIJqdI8vQraplH1vzXSivv0W5pe4O+NbEPfWo214DlLO6l0hxWB9EwZmJeyONyX55q4JKCgsR3VZIXIzUpGcmITUrGI0DsxiWa70Gtia7UJD+wgWpCp7981lTHXXoVRnPl1jEqypOx5sYnmqBw0l2UhLTEJaZh4KCsrQ0DOAtqoG9E2LWVtAGruxfQSz0nWjd2kV6/MYaipDfnoKUpJTkJlXi94pKQMhFCLFuhij7bUoK8hBZloaMjIykVNQhY5bC1hVzWlrthuN7cOYW1TpxhXrkNzqQB2de3oa0tMykJVdgGqK4yoXR31/JgrIF4bQUp6PzJQUpCanI7+6B5OLKuXG1gy6GzsxMr2oUwKgM9bWCqZ7G1CWk4ZkWtfkNGTl8+e8hbneJnQMT0PCWkTofF7149ZcL5o7hjAl4d3plk+is7kbo9NSVl6i2FjEWEcdyvOzkZmZhZzsfFR3jGKBMz3cmkdfcweGJ8XqkpEtySBa2wcxtcCVXyiwPNKCtr4JzKn8GbZWptHfVI68tCQkJSYjNTUTBZVtuMXMHWmCW5jva0Hn0CTE/HNQyLE41oWGcrru0tWxr2odwuyysdgrx1yZ6UdLeS7Sk5KQmp6D/IJS1Hf2oq22Gb1jM6quClvQvC8RSUnJSM/MR2XbMOZ2PIb+WN/ubwUYsH83nYZgACUKnIfA7oAAmQpGwCAQaOxnCgHOVND9OtXRu2g57KuBACkEqGSgZ1IrAeWSF2phyBQCOwIB7ZIBDgZc9QpWKQJ2DwJ8Esgs0BgIUHoE8BUB3HwJBpy8ZM26NhwxMYMxs0AGAkyvMqM+SrYp6ebG4R75IMBL5RHAvcY90ns+triGK+7+Wp+/HSBAiT0DAgG7AQKWDBq5x6SyMgFSBnBz0QYCEbsqGeCAQPGUHH79a3iqfAlfS9EYC34hfg7/x7sGf/3b83jwe/+Gh7/xAPv3yIMP4duPfBM/f+wJdj3mD8yp58HNR3jcv99Hwtrsfm0EGHC7OxHh/UIE7u8I3BswQLWGio01LC8uQroiwwbXxu4TrC+DAW6paBoi08EtyFeXIF1ew/rmltHEnDukYkOGZekO81FsQra8hKWVddYKkPvsnT1uYWNtGUvLa5BvbneaZ2MqNrC2soLV9Q3sPkQKbKytYGV1/fbiurWBteUlLK/JYWg6uzlPiuMKxXFZBrkK7uzmc3fyHnaslVWsb2zuao13dwwFNmUrWFqUYoViwdkS7OrDCmzIVrGyegfnrtjE+uoylujvwdhB6X0rS5BKl7Em38vz3vkEBRiw+w3e3d4MEwyg1oIkz9d3bCUQcNkzhYCyZMACpy7bsjvUVCbAPy4BAUq8KQndVckAtTA0qhDIw5EzZqxkgMz8KCmnO8dUakBAYafSAErejYOAGr2lAfxzco5IwPOvvsk6GewIAi5eZQZ9uwEBuqUB3DEp4Q/OKsfLb74DktBzv+ceOSBAPgakEDBWMnB7QOCqUq5v5QQ+DKDj7hoIUAtDlYcAtVOkuZI/QPiQDK9ULeEv0jRA4HPx8/iyqBF/cegqvvEv/6UGAgQGCAo8+vhTIDjBnbfwuH+/h4S1uf21EWDAznsP4R1CBIQIaCJwT8EAzWntzTNtGLA3YwqjCBEQIqCJgAADbn+jd7c2xwQDjp69hNOWDnrbwNE8CAhQezrWUq6o3rCHQFwmKxkg+XpO77TeJIzunFO/ekqO3z99EdEljdveVzQigU9C9q49BMiPgAEBnY4FNHcyFWQGhSZmuOTii98f/wiPPvE0AwEEByjZ1I01tcOj7gZKEGDM4I8DAe4G6/rJI+D0VXs8/vTzsPGPBLXx0z0ezSGyqB4fmlqy0gxjIMAuIEpZGhCbzroG6BuLYnrs/GX85H8fxcdX7PTK5TkgQGZ/Jy9ZsXXQFwvqTOBxQ6kQsAuINl4yYOuKZ19+DUdNzBBT3rLtPOncvW5msriSisSgqSABATvqMnAVHBAon5YjZngNz2X04w9CutUeAp9LWMCXgjrwZx+64h//8+daQOCfvvVtBn2yuvUrTHRjJ/y8f7+nhLXZvjYCDNDsMYRnQgSECOwcAQEGGImRAAOMBEd4SYjAHkRAgAHbN3L7ZXPLwYDHnnwG1wKjDQMBailnszsgQHXsOwEB6kTw5HO/ZommIYNCDRAIh6EWhnRn/6KDBzMoDMur1g8qVEDgvY/O4yf/+wsGIXYCAUdNLjEgYag0gJJUaolHXgiGDP4o2aUuDc+98gaDENQ+T3fdGQggs0ACAXaurA5fX1JOsn8GAkxMmcEfJen6xiIQcOKSFVNAvHHoGJ545gVcC7quHwhMyRBX08lk/UwhUNKoF44ogUAagxB2gUaAQG03i+3TL7zC5qjvPNRA4IyyrMQ4EHDTAgJkZviBazD++rA1vixq0ACBRDG+GNaHPzEJxNf/31N4+IEH1SUDT7/4CgLSirbFSjd2ws/79ztKWBv9ayPAgD3YnAhDCBG4jyIgwAAjiy1vCoWdXzZaR6SalnRG3i+8JERAiMDtRUCAAfo3c/thk6ssE7iMV996j93Vp2SP7oxvm9v0OhLVQMCS3ck2lMRTS7nDZy4aBQLXgmJASeM7H55hd38NjcUBAUqqDQGBJAIC9gQELBCWbxgIkEs+JYeX3f31Jr1MEeATyu5s03F3AgHGnP45EEAlCu9/fIGpA2LKtEsiKFmOKqrHCdOrOygC5pQggLoG7KAIoKSevBjC82uYZ8CvX/89W1f74Bjoq5+vICBQrQICl6wRXdqkNzYFg6QQUAIBgkaGTAVpXQlAHDp1jp2bfiCgbGGoNCg00rGgvoepSMizguALARyK5SP//j/4q9fO4A/dy/G5hHk1FPhC5DC+Zh6Dv3/0JTz04MMMCPz3//wE1wKvb7+edQwRt13vwutCzPb5NSDAgNvbhwjvFiJwv0dAgAFGroCt+T609oxj4S6ZqRmZivCSEIF7MgICDNjfMOD4ucswd/FliSbdsd8JCFCySXeySdquL4kvHJGoesxfhF2A/pIBkp4fPWvO7phzyZ6+sfglAzsBAWrpZwwIZHdPgu74X/UK2pboUPJuxYGA+N2BAENO/3QHm+ZKIIDAiEPwDVB9Ph8GaECAJc4zRUC33iRcrQg4Y8Y6MFBSrpu40likCDipAgHxNV1Mgn/G0pFBBrfrKeyu/icHAvNqIGAXqN9UkIABKTDe/vA0u0YIdhgEArEZbF5GWxgSELimVAg4hcczyPBP3/wWHvzOD/A3LxzGVxxy8bm4WQ0QuD6Gr9qk4m+fegsPPfJt/Oi/fsyglG7MhJ/373eSsDa7WxsBBtyT2yXhpIQIfGoREGDApxZaYWAhAkIEdoqAAAN2t7n7LDbBpAwgGEDJK90JpzvPhz/mgMDMtsSzUtVSbrdAgOCCvpIBrrWgW3Qyu/vL1YcbBALxSg+BnToWcECAWv2V63gI0PmRgaCld4jWeXEgYGePAGVpAHkeGAUBfhFqEECyeK61IAcDOBCg8QjYCQSYsnIKYyCAKQKsnUEggO7281sL5g/OgwGB0xehBALzWudP1x1fIUAxMqwQICCQypJ4phDQceundT1j5QhaV6XPhCWiihp2BQQMqQ0SVEDgnRMmeO6V1/Gthx9hd/0f/NZ38XdPvomvWiXj87FTaiDw+dhJfMW5CH/z2mk89urvWbnHZ/G3JRxz/37v3QtrI8CAnXYewutCBIQI8CMgwAB+NITnQgSECNzVCAgwYP9uivkwgDbIaiCgVggYBwInzCxZv3p9STyNRa3v9AEBDgZcL20CmQqes3NjHQtIDq5vLKYQiM9iSehugYCyZGBNnfgqYYCNFgxgIMA3jJnaee9GEUAgwEDLv9z+GdbOjqACv+UfHwZsAwG1BkDAgKY0gD5vFARcsmadGaj+n5J6Wkc+DKCf8wfm4RadgiOnTfcICPBKBnhAgNaVTAljyloQX9vNShZOmCqvkd0oBAwCgboeBhd+8fiv8J1vf0dtFEh3///+5y/ia2ZR+EL0qAYIxM/hD0SN+Il/AfzqhtXXwL2QBArnsH+/T+/m2ggw4K5uY4SDCRE48BEQYMCBX0LhBIQIHNwICDBg/25edWEAbWYpaWYO8qcvMvM5Spi3bXKZQoCSPWV7Orr7qy+JZ3CBAwK8kgENDFDW0bMWhnauyo4FhXV6xyIgQK35yKDQKBBo6GPGfmTwR7Xz3Lx0YQCdl7UaBGQZ8QioY0Z2TBFgEATMqkCAKXRb/nEwgMBHVHEDPjSjrgEuLFnmknd+fCmRp1INOk/6LN3Z579Ozymxji5pYkoHE2tnZgTIH0sXBtBnlEAgmY1LpQuGPQQ6WEJPCgGas94kfpCnEOAZFHIwgFoL0ufia7uUQEAFjfSONbTAFCmc8aQxIEAKiP/4jx/hEZVJILURfOihR/D1Hz+JPzktwhfD+9VAgDoN/GXqAl4oX0TgwBqoI4FuHIWfhZgc1GtAgAEHd08kzFyIwGcRAQEGfBZRF44pRECIAIuAAAP274ZbHwygzbESCKSxu/rkRr8zEFDKwbnEm7/B5gMBcsSntoO6MIDef0dAoHdKb4JHZnPk9M8HAnwYoAUC4rIMtvyLKPhkIIDOi8EAa2dWLkEggOTzdNecn7xz8dICATGkCLh9EEBj0TjULtLCI0ArPhwQIINHBgT0jc+ZClpR28EdgECMqmQg6DqDC3wYQPNgQKCGgABBo50UAukMVNA1YggIxNV0Md+Hf/nBv+KRBx/SQIEHHsI//PB/8eeHbfFlv2YNEEgU449TJfhpsRSOXasonNzeSpKLvfC4f7+ndlqbCrrW9rnh307ncLuvCzBA2GAJERAicDsREGDA7UTrM3yvQraAyWkxVmQbn+EshEMLEdjbCAgwYP9usgkGHDt3GdaicK2kkTamXL07yfztg2J2BwSKG/QmubpAgHwEyHfgeinPYZ86FtT3MtM7MhWM3IVCwNYvAjk7AAEaixQClAifvGTDxqfzJTm/txEQQMenzxpTBFDSausfwcbilwbwN/YEA9758DReP3TMKAigO/VUh88UAcZAQKlhRQB33LTWYRw9ewmX3fy2rSsdh2r6yc3fIeSGfuXBlAw3qztgwoCADVsnfXf1SbXgrgIC9kHXmXM/lQmQMoCbC0EP8jKg9aY7+6SO0DcWtTAkzwqmEAiINAwEqjvZuf3oR/+D7333e/j2I9/Etx56hMGBB//1v/CtE/b4ekwfvpgkVkOBP0wW4/t5i7jQuoKMcQEIcGtzLzyWTK9D1L8Gs7YV3BxRlsncC+e10zkIMGBv9ynCaEIE7vUICDDggKzw5mgBIm6WoGd0EYoDMmdhmkIEdoqAAAP2NwygpJFM9fTdqeYDAeMKgR6ctVaVDBgDArFKD4E33/8AH12204YBdGePAYGeHYEAdSzgSgYoGd8JCJBCwD+1iDnc/+q5X7P2gXsFAo7qeATwN/GU9Fr7heOXTz3Ljm1IEXD7IMBpW2kAd1xK0KmM4tEnnoKZs4/exFsNBE5f3B0QMN8dEHjzveP46IqdFgygeWmAwM4KAY9YpR8BUwgMzKqhAnd+FFNSCJBq4fGnn8erbx3CMy++iu9/9/t47uXXYROZAsu6KfysRIr/k6IBAgQHvpElwaG6ZcTeR0kjF7d78TF/ch0OXav47yIp/jFLgvfqlu8b2CPAgJ12HsLrQgSECPAjIMAAfjT28fON7ihcdYxFXfcctvbxPIWpCRG4nQgIMGD/w4A3Dh1T1oerDOj4iYM2EIgBtc7jv86es/rwbmZkx+7+ljTqhQtMIRCbgZd/+y5ee+cIQnMr9Y6VWKcCAqaGFQK3AwTOX3Nnd5yf+fVv8OJvfrtHioBIpgjQ9QjgYkNJK90F/92RE3jhN29ClJSnNyZ3BAKqNWaB3PHokUAAdQx498QZPPnMi/jt4Q8RU96yPcbMQ2AOrtFJrBRkJ4UAdQg4SUCgrFk/XCCFwPVUvPTm2yDQQ0oM/rzouTYQsEIUXSPT2+/SKxUCaThiYgrWwtAAEOAMCt86egrPvfIGfv36W/BNyAF5SxRPyRE0sIaXqpbwf9MlaoXA5xLF+Mt0CZ6vXIJ//xrKBB+Bbeuku2779efM8XWYtq3ge3mL+HKSGJ9PFOMfMiUwaV5BwX1QDiLAgNvZhQjvFSIgROAegQFyiEfaUVdZjeb+KUjXN7Xvnm9IMdnbhMriQhSXN6BvUgrZps79dYUMCyOdqC8rQlFJNdqG57Aik2BqbBqLK+ssAVcsT2NscgFzY/1orylHRV0XxiVr2NhcxUx/M6pLilDe0IfpJRk2ta6tDSxN9qG5sgRFRRVo7J3EokwzRzbu1AIW58bQ11SJksIyNPROQSpTjSIX41ahBz4+54YbabW4tbCCdSICinVIpwbRUVeB8so6tA5MYVG2oX3ukENyqxMNFJveCT2va01U9cMGlqb60VJF8y1HQ88EJGva48olo+iqr0JNUy8mKAY64dQaVbGC2fFpLCytYlU6jaH2WlSUlKGmbRizy8rY8t8vl06iv6UKZSUVqG8fwoxUhtWFCUzOSbEipwMpsDIzjunZCQy01aOa1kG8qhNz/ojC8/0aAQEG7G8YcOycOR594mleffh2qa0WEAjWDwQosaMEjQztdgICFx088diTz+CSiy/zENiWcJBCQA0ELA2WDBAQ8IrLVErL/SP1j0V+BA19MLFxxk/+9xdMrk7no3tMmj9XGnDOztVg1wBlaYAKBMRmgJJXfWMRCCAn/RdffwvHL1gw2KL7Pj4IILm9PrNA+gwZ+dGdcJLgx1V3qE0R+eNxIIDk/3RXnRQfjz35NE5dtjUOBKL2DgjQutK1ZO4qMmhQqCwZ4F0juwECegAUd72dvmqPnz36OM5YOjAQwMWEDANJAXC0fgnfyNQoBAgIfC1Vgh8XSWHduYqcie1AghtDeNyf312Jo+s42rCMB7IX8YVEzdr+WZoY79QtI0+AAft1OyDMS4iAEIHPKAL3AAzYwnxTEoJ9RQgKDkGguweii3sxvaxMpDcXulEQLYK3pw8CQ0IRGugDN0cvxJb0YHZFWX+vWBlDXUoY/Lx9ERAchrAgf4jcPBCWFgMft3jU989gTQFsdKfA38sDrm6BiIqMQmSAOzzD05BxPQQhwcEIjwhHiIcjvOOrMTS3pkzKN8XoKYhBgKcXRP6hCA8JhMjFCT4xxeieWQbNYKM7DaGhofD3CUJkeCSiw4Pg5eiB2PI+zK5uQbEyhOrIy3j7nTOwdY5B1cAslqTjaEiPRJCfP4LDIhEVHoogbzd4h2Wh8ZYEMiYf2MJCSxrC/UQIDApBkIcnogo6MSE14juwKUZvUSyCvLzg6x+inK+rE3yuF6JzaonNd2u+FRlh/vD3C0RogCe8IwrQMSZlr+m9jjf6kBUei8ToQASFBCMsIhrXw4MR4OkC98AU1AzOY43NdwPznQWI8fOEh6cfQsMiEBHiD1+/60gMcUZwZjP6Z2ldN9CXFQJ/L3eIQiJwPaMGgzNLAgzQG/z9/UsBBuzPDTUlOpxnwPELV1h/eI1h3HYgUDhEXQZUpoI7AgEnBgSiDSgEXKOS8fq7R/HqW++xxJVMBbclXnwPAdNPDgRuVLThjfeOw8IzcNuxtECArTEQMMeMAI+cMYPnTiDAzIqBB0qQCZDElPH8EVR35qn0gmrkdwsCqI5fn0kjHwS4XU9GSvMgS47f//gCMxIkIMCv4+fHmoCEqxoIxOoFEnRXn45NCoFTRhQCNA6pPmhtyUOAxuYfi57rKgTYNWIACND1RvFRmgrqGYsATlE93jp6kpVE6B6LYuWWVY2f2Ufjb0Ja8CWej8AfJIvxzZxFvFe/jOhh2X1nQKcbq4PwMwGeyCEZXqtewt9kaCs+/ipdgt9ULyF86P5QfAjKgP297xFmJ0Rgv0Xg4MMAxTxK3a/AOSoDVe1daCnJRVn7BMSUXW7Oov6GD7xEkUgrqUdnXz/6OhtRlhwIB2sRslvHIZUvozfDD67ugYjLqUJrdz/6u9vRUJyMYMcT+M3rlkhuuoVlBSCv9cWJd0/BJjQb9d0D6GvJha/5CRw/ZYPo7Bp0DQyguzIM1pcDUNAxgVXFJubqb8LP0w9RKcVo6OjFQG8nmstSEGJvA7+MZowtbkBeJ8Lpwx/BQpSMirY+DA10ozbGDpe90tF6S4qtjUWMFXvi9AUPxGU1YHxhGq1pQfDyDkZcTiWau/rYuC1VubjpYw+n8EJ0TS1jS7GACl9ruIYno6y1E62l+ShrHcXciqFCg03MNyYg0EuEiKQi1Lf3or+vC81laQh1sIVfWiNGJHIsVPjhmnMYkkta0dVShoLSVtyaXTFcvrDeiOBzx3HsuDl8b+ahtr0XA/09aK/Nx01PWziGFaBjcgXymVrccHOAm38s8qpb0dvXi+7WGhTG+cHm8Is45pyNpjECGXI0BZ/DGXM3xBa1om9sHsuCseJ++27Z1XwEGLC/YcBxMhD0DWM16GcsHXdQCCywxJWZCgbHGDR5o7u/ZD5HCgF9QIBc5z++co0lyzQWtdIz3LFAWTJAbvSU+OlLhguHd1YIkBqA5nPVK1grQWUgoKiemQWeIxBATv96ktO8/jmWlDKDvxvpKBjUrwig86XjkIM+xYESfXrOhwFMEbArENDM5PlnrJxYMq7v3AkEkMyfFAGu0cksAedaC15x90dsZTtOX3VQ1vJXaIz9+Mkfvd81KpGVDDiG3DkQoHUloHTe1pUl8VSyQMaN/GPRcwICcTWdzGeC3m8QCLAWhgQEzJi5Is1Td6zs7kk2zmU3kdZrFCsqV/jg4lWcdhLhSvkIflEixVd5PgIkLacygsfLpHDqXgXVoOuOL/x897+/qNSDzESjiupxo6IVOT1T4IwCnyxbwp+laUAAKw/IkjClwM1b9w/UEWDArrYfwpuECAgRUEXgHoABElR4msI6IAV1o1Ksy+XY2FRAoQA2R7Ph5RCMnMZRLDF5ufKsFWuTqAmyhktcHfqHKhBkZY/Igg7MrPLE/VsrGC1wwdFDtkhq5GCAD06aeCO7dQIyplZfQqXHUXzknI6WMZUSQNaGcHMnJNT1YUE+ilwvZ4Rk1uOWVK6R7yvWMFUTimvOcajpm8dKrQinTjkisW4EXJ6+eSsRNjYRqOiYgpypBzSeAfKpEvjZueNGUSfm1nhzxiakA7nwueqKhJoBLMglqBZZwNYvHlXDEsjkG9jY3MKWIUn/5hgKRK4ISavB8CJ/vjJM10bAwTUWld0zmK3yh7WNCPHlQ5DI5NjY2MSWwUEBEAz46CguuKWieYov59+EtC8N7le9kFLbh/ZMD9i6XkdR55xK2aBcr43FXqRYv4szbpk8GGAKm6ActExQdIT/DmoEBBhw9zfTu01g+K0FKQGmBI1a0p00t8b1Muoxv10hQLJ4SnApiaf2dIbawKmBgDrZ04xFSSO5y4dkl7M2gzQWOekbBwIuzISPgAAlk7rnqAUEVC0M+e/htxbkfq8GAaZXmWmhMRBAHRA0IEBPgju9zpJaDgSQyR3Nk7UW5MEASo53pwhoZon17YAALlnmYICFRyCDJ7GVbSA5PTP32xEIUJcBI0Cgql2tEOADDooprSuVMoRkV8D9eooGCBhqYVjTyd6/OyBgqhcIkKqEOj4Q+ODWlQMB1A2CWkwmNfSieGodIYNreu8qfyVZjO/mLeKDxhXECeaC6jhy8bxbj/T3T8aeH5paMnUJeVCQeujts1fw+4gC/Gf+ghbMIVPI7+Qu4mLrKlLH7i+QI8CAg7ojEuYtROCzicDBhwHYxEJXAeKCvODm5Aa/qCw0joixtqmAtNQVp06awuqaB/xEfvBX/xPB4+K7eN82BQ25vrhsG4Oq3rltMnfFQh6cLnoju5mDAb644JCAun6x6i64HO2hF+Fwow59c6q77RsDuGluhxu1PZiRlMPz1Ecwu3INnr784/vBz80U7x+yQ1LDCBZq/HDBLgZVPfNqqbtCnAvnq0Eoah5TlShwMGAW0ipfWLqnonZgUf1+9eWztYBSdwv4pDdgWLoBcU8xEkO84U6xichA3eA8Vg0U+CuWKuFjchpm5nbw0J2vuxmOvG+L+JpBiOd7UJoYCl9XJ7j7RiKjdhDzK9qeAur50JP1RoRcskd0YQdmdHP3zQlkONogOLtwo+2nAAAgAElEQVQEsS6W8E6qQJ9EV7kgx1CCFexDitDMKQNCLOFxswxd87rv1Tqy8MM+j4AAAw4GDKANPyXHN6uVQEAjB9+eeCuBQMqugYAm2VOOxcEAai1ISTr9vBsgoGxPZ4moIkMtDCXwuqnfQ0AXBuwaBAxwigAzeDBFgBEQcEmpCOBAAMWUDwOUICCG3cWnZNmgR0AZTxFQpb80gNaAknamCIhKAgcC6Jh8GEA/U3IcW8EHAm16Ez76nEskKQRM4RAaiwI9STwbi4CApSNOXbbR8iPgYACVJNC5uamAgDGDwrhqPhAgALU9qaOxlCUD2xUCGhgQwM6JAwGkCCDjSAIB1KWCxWFajvhbMpi0rOCf8yT4UuKC2lyQ7jD/dYYEz5QvwaNnFUVT2+dBYwj/Pp0YZHSMwcIzCI8/9Ry+953vsXaR33r4ETz8L/+Jv/vdeXzNpxpfTJhXrxe1i/xRoRT2XavIvQ99HwQYsM83PsL0hAjsswjcAzAAwNYa5ofbUJ2fhrggF9h7paBpVIzZXHuYWHgj5EYG8nJztf9lZyCnug8TZT4wdUxE4wCX4PNWaKUUHpd8kauGASKYu6aiYXBRBQM20Bl+GS4JDehfUCWlm4OIM7dFTG03Zhby4WxyFd6BN5Cpe/zcbGRlV6NvRorlWj9cdk1GXb9EI7WXFMDlSgAKmkaxyvwKNDBAUuiCq6IcNI2saNQG6mmvo8H/CjySa9FPSfXWGhZG2lFTmI74YFc4eiWhbmge63rUAYrFQrhdsISX33VkGJhvz9QiZIotyBZuobOmEBlxIXC390ZSzSDmmVxCPRHNE4IB1j5IrerDtjxfIUWJhxX80/MQZmsJf6r/p5oMrf82MZ52DU7hxTwYYAOvhAr0cHHXer/ww0GJgAADPp3N814kJXxlADfenQABkoMbVQhYO7G73Eo5uIwl/5TYEwyg494ZEDCkEBDD62YGuytt6x+lNhXkwwBdEMBa/ulRQVByTPXqzCNg1yBA2+mfgwFBmWW4FqQEAZQk65PPUyzIsZ+c+6k+/6YxEBAay86R6vT5IIDG0IUB9Ds+EKASDZJf0+91/zEgQB4CZ0zhuBMQIA8BHhDgwwAal8ohOCDASgb0wAVWMsCAgLPqGjECBGKUHgKkIuHOmQ8DaKzwAmVpwPlrbkjkgQD+eSZ0TuG3oiT8o2Ma/jRpVp1gkrkglRH8S/4iPm5eQeLodhDGjUPXkD5wwb0uPG6/tgzFhNbQ0icEP/3Zo3jkwYfw8DceYP++8W8/xl+8Z40/8G3A53gg4I9Txay0w6dvDUVTuz+OoeMfxN8LMOCg7ICEeQoR2B8ROPAwQCFbhGRpHXLqDqCQY2m0CJ4mdohrGMJUSxisnOJRO7Cgc9dfgZWpMUxJVrA6mg57cxFyW8bUEn3l0iiw2hmB8yeckKr2DBDB3C0VjUO7hAGr7Yi0ckFCVR8WdDz7FCvTGJ+SYEW2CXmdHyzcU1A/sBsYMIe17huwsw1HYfsUUw3wLyXFWj/ibawRVtCG8cVFLC7JICclgGIDS2OlEF20x42qHszxqwu4AWSduG7rivjybszp3MFXrMxgfEqM5dVVSBeXWcmBAgpsLI2j3PcSHK9XoJuZ+3GD8R6pTOD8FQRkNGGCnBh5/ylWOnHd6hoiC+tRHG4Lh/A8tE7KtCGHYgFVPh/B3DtHUyYQYgPvRAEG8EJ5IJ8KMGD/blb1wQDaGCuBQIfSfM6IYRxTCFxXKgTsd1EywCkE3GPSWJkABwPomAwIcAaFQdeNlAx0sxp88hCgmmL9JQMEBJQKAUrmKdngYAB5BtDnSIp8ztaFdUDQVw5ByaYSBJiqFAFGPALUigBtEEDnRTCAVBY0X0qwjYEAkt1/UhBAxyQw87HFNS3pPP2eDwSUJQNGFAIMCFzcEQicJoWAuVIhQOdKZQJ8s0IlEEi+vZKBUmNAIJWNRUCAxuZggLmrHyIKaplHgDEQQO+nko/DZ81xOa4QH9ZLmdScJOcEA+jfF5LE+LsMCX5duQRKOKlVIbtGRyTMh8DM2ZvNgQwayZzRLToFdGdbgAN39l0XkF6MJ555QQMCHngIX//vX+JPP/LGl4K71OtCa/OlsF78NLYVIf0r93VrSAEGHMjtkDBpIQKfWQQOPAyQ9WYiPLoA7WOLIFuAzflyeJy2wc36QSxIOpDgeg2ihCoMSWSqu+4KrE02IEEUguyWUUhWxlDkaw2HwCy0TiyrW+RtLHQjy/MkXnn1MhLUngG3CQM2ltCV4A5Hn3hUDojVdfCKtSk0JQQgLLMJt8RyrN8mDNhcHUSuyB6uITTnJXbedAUpZNNoTfWCjX0kSntmIenLQXRMHpqGxcrYLFRBdOEaYiq79cMAxRK6k73g7HMTZb3z4OwIFLIpNCcFITyjAUMT3ciLvoH8xiGIWcDFqBaZwSG6HF3GYMDp93DsnA9yOqaxwoEI+Ty6Mz1x1SYUhR1TmOtKhZetCyJy2zHNvUkhw0xzIpyPvIDD9jzPAC0YsIU18QzmF1fAdWP8zP6ihAPfVgQEGHBnG2RKPj7tf1w3AWtR+LZj3REQCNnZQ4CAALWBI1M9Pgygc9VqYWgACNC8qLZfUzKwOyCQ0jyEk5ds8PYHp1UgwNUgCKAkUwMC0vTK5Wm+fLNA8lugZFt3zUh2/+vX3sKb7x3fEQRQUm3yCRQB3LHD86vxm9+/z1r8cb/jHmmOpApQegiQQsAYEEjE4TMqIKCnhSKDC1XtUAIBW2YaqQsD6LjaQOCG3vIIjUJApSLZDRAIuo6khj7mGfDbwx8yE0gGAuo1pQHcedMjBwKOmJiylpQEiLIm1pl54JNlUvxpqsaYjhLPP0qV4N8LFnGiaQWelf245OqH5195A//+7z/EN+kO9gMP4rv/9B384vGnWJlLQFoRyifXtl0D/DkIz7W/1wgomruIWGkAUwQ88CC+/uMn8ccXQvHFyEENCEhYwJf9mvCXh23x8llrJNAa34XvyP16DAEG3NY2RHizEIH7PgIHHgZsTNchzssBji6+CAsPhZ+TBa66JqJxVAK5Yh0zLVmI9nGFi5sPgsOjEBXiD28XJ7gHZaCJteDbgnSgFPH+bnBx8oB/SCRioiIRIvKGKMASxw45IONOlQFbCqzPtCInSsRq9n0DwhEdGYpAL5pPEDLqR1jXg9tVBmxhE5KBciSHesPT3RsBIRGICg2Cv4czXFz8kVjZh7mVDWzMNCJJ5AQnZx+EhofB38USlq5xqDVQJgDQfNuRd90PHk5u0MzXDa5uQUivHcKCdApNif5wdXCBb3A4wkWusLZwRXzVDmUCl8xhZWEJFy8f+AeFIzoiGAEejrhm64aogg5MLm1AsT6LjtzrELk4wsXDH+GR0YgODYCvhzfcz72Nsz4FWmUCamXA5gQqIlzgGVWM7imVkeN9/6d9MAIgwADtze9+2lwSDKB+9HSHnBI23bnxgQDdsSYJu767n3yFANWHGyoZoFp6kr+/8Jvf4oiJGa6XNm07phYQCNSvENAAAWecMDXmISCGp6pkgMz03j1xhiVuxhQBDAQERrP50Z1ufXXzFKfo0kacuGQFExtnZryoT6FAte5nrZ3xy189B0ufUIOlATFlLezu+icpDeDW7kZ5Cz40vYqf/OwXTNlBa8O9xj1qAwE75ifAvcZ/ZCUDzEOAgMBNGBorlgEBBzz38us4du6yljKAG48Bgehkpo4gFYk+vwRdIEDXh77rjXkIxJBCwIwBjw8uWrBaczISJBd6ziOAOzY9qkHAGQ0I4F4vmZYjakiGo/VLeChLjM8naLwESCVAXgI/iO3Gd07a48F//W8GATgpOz2StP373/0+Xnrj9/BPLdQLhbhjCY/a34fJjf1MZfHth7+Jhx7+Fv7+0ZfwNfMYfCFqRAMC4ufwh+5l+KvffIwHf/Cf+OVTz8ErLmvbdX0/xVaAAQdj/yPMUojAfonAgYcBUKxjrq8OxVmpSE5MRHJqLmp6p7G0ztXwL2GioxpFGUlIiItHUlIKMnIr0TkmwRpnpKeQYX6wEeXZKUiMT0JGdh4KimvQ1ZsMuwu+yG8ZxYoC2JrpQG3rMGal6yoZ+xbEPXVoGZzFIlcvr5BioLYR/TNilYR/E8sTXagtzEBKfDwSE5OQmp6Hyo5RSNaUpntbM52oax3GzCI3LpnujaGlrhNjc8usxGFL3Iv65n7MiFXJrmId4pFWVBfnITs1BakpacjIyENl2wgWVlVmfop1zA80oDQnjcUmKTUH1d0TkMqMme5tYnmyG3VFmZr5puWisv0WxGxcBdbnB9FUmoP0pCQkJ6Yit6oLE4uc8kLPpU2eAZYeuJmRi7KqUhRkpSM1kWKRjoLaXkxJNZ/dXJ5Cb30JspITkJiUhpycPJTUdqGvpRINPROYW6a5b2GuuwHtg1OQUNy3xOgpTkVmaQfGJbwuCHqmIvxqf0VAgAHam9/9tGHlYMATTz8Pp/A440Dgqr26Plxfgkat9kgCz7oMhMQaAQKdOHTqLJ564WV4x2Ua7FjgoepYQM77+roM0Byo1p+SbXLwjyo2ZCoohmdsOg6dOoef/vwxvHvyLOJru/QelxJWkp8TqHC/kaY3YaX1oySVFA4m1koQoE8RQAkr1cm/8Oqb+N2RkwjLq9abvDAQcNmWGfJRG0B9Y1ESTsk4dTOgFoBcvbzutUR3/D+6YgtKjl97+zCeev4lOIXHG0zilQqBa6ykgDoO6I5HPxPYcYlIYAoBp7A4g2PR5w+dPItnX3oN3vHZesei+FL7QyqXIGikD7QQELhZ3QETayo/sGax1ne9UXypq8W7J03wy189y46dWNejFx7kqkoDKH4Eh0gRoHuuFHfP3Do8cS0cDwXV4mu8FoSkEvh83Ay+7NeMPz4fgr995h088J0fqGvbGRB44EEGBKh0IKmxf9v4uscTfv7/7L13eFXXlff/JpmZOPOb95135p08k0kymTh2nDYzySRTU1zoYLrB9I7pHQlUkYQkkFBBNAGig0QH0avpYDC92WADtukuKXSMU9bv+a599r37nrv3OUcgrCu0/+A5V9LVPvussy8667O/67vE/4tL95+mTr0H0jP/8jP6++a96S/HbqAvLroaAgFfWPoRfSVnM/1942709Pf/hWP+y+df4vKVmhxDCwNi6znHzsZGINYjUP1hgBPhP352j27fukP3P/tDZL258/M//f4+3b55g27d+ZR+r7TBe/DeXtp28AJ9ePMzoj88oLu3btHdT3/P7ff+cGk5ZYyaRXveuk4PHvVO/un3dP/OTbp56w59+vs/auf4cKf4E/3+/h26cy/yutSx/vj7+3TntoiNFwZQfwceA596zfePuJ7bdOfeZ/QHv0EZBkykVfvfpd/B0/Cze3Tn1m269+APhjaHf6I/fHqXbt++S5/+Xn8/I+YKPPDgAT1Ai8NISwL32+zXMRYBCwNiGwZgJ7dluy6cxCPZQ8LmfshGMoYEDW0HUSO9aO8JbdIVFAhkl5RSg6YtqcfgEbRw91FtYo4EeMJi0cIwpyJAQGMEiOQPRnv1m7Sg1PHTtefDdY8BCBiWSEWL1wQCAei8oEveJQjAWEhWUb/vbsOHGAtFAEBALgUBASg50KkuMJYAAWNY/o/WfjAJhAIDMv/8+QAC+gRYlgxgjv5AIJHGeQABxK9uo6ac7GONuNcRvpZAALExAQHEFOsNSgluc6lRkPBY7/2GIUmjZq0oIXeidk0GAQHwjEDLSvg6DBs7gfL2nqceh+7Qs5tvkuolwFBg8TX68sT99NeDJtLXa71CTz/7/RAUePbbTxMSVXR50F27/V70/4ULD1+g5mNn098MLaa/mPwmfWFZ2NDxC0uu01ey19PXGnSg7zz7PY4zYlyrXkOGdTU5nhYGxNiDjp2OjUCMR+CJgQEPG+cH7yyh3NyFtOfsJyQ393msP92j8yuzKXnCOjpx+XbY5f9hT1STf88FA2pyKOy1R0bAwoDoB+BYeYiVBoJJeZOpoBQt5RK8FQKQg7NCIBgQgBu9bhcbLfqwO9zhtQGhZE+3+8tAwFEIBAUCZVAIaIDAhrev8jnTJ82MStRCIACKgEVBQEAeJ6s6EIDdbsjg2SywbBWNnbGQ/RHcMADJMsAKWvQhCdeO5SgCMFZQEIAWgpvPfcTjQjWRP285z8UPCAAGCCBwOio+WK9hhQCAgL5kAGUVgB+d+wyiwanZ2nIBjCWBgJdCIAoI7BGdJ9yfndXH3mOvgOSC4qh5bzn3Ucj7waQIwFqBoSRAAHwoVhw6y1Bh3ZUHNO7MPaq19n16as5Z+l9K6QCgwBfLLtFTea/T3/TOpa+/2IKe/u4POVn9/nPf45KPHRdvRs3HPfea/PXr1x/Q1PP3qd3uT+gfF5+nLy68HFIDcHxLP6C/Si5j4CJBABQY8Gp4tUtPQslRTY6fhQGRzxn2KxsBGwHvCNR4GPCnO+do07Qcyi1aQJsOnaPrv7tBv7l2gY5vnU/5aTm0YOe79Mk9v61v7yDX+J9aGFDjl4ApABYGxD4MyJo6j3fCC+AgX2lAYBWPJYDAJxEP7mhBh8QLEnZ199cEBJCgY17eQOBMqGRABwREN4FMypg8O2IuIRAARUAlgADsdsOgbnzZKk56kSDDLFGFASEQMBogwLs0wB8EnCJ0BgCkAQhAEg0AA8gAnwQk8Ygz4ucJBPaeZBgQBAhgV18HBHBfcV4ACKhIBqeOMQIBzLGwVJYMLNEqMUJAAK0pHc8KdwIouwlA8aH+jEHA9AWs9AgMAg4KECDH2fPRZ5Q4fx19q2Mc17F/ad75iIQVgOCLC96np8a9Tv+3/3j6h7pt6Nl//inHeuOZaxHzkWNWpyM+j2iBueLwO+yLseHtK7Tn+r2Hvq5dHz6gxRc/pdTTd6n1gdv0k9dvRpk2AgSgg8D/HVhE3/yfuvSdp5+JUF78+Mf/xh0rdl2589DzqE73wDRXCwNMTxz2+zYCNgK6CNR4GED0B7r5/kHatGQuzZxeQiXTp1Lx5Ck0bdpsWrIZbv/3SZrf6wJovxcgAp+dpIV5JbTh0Hm6YblKgIDVnLdYGFA9YAAeOiFvr1SFAOrDhyZQrkshIGEA3Pjhwl8hIDBrEW05HwkXMHckLvACQB0/jP3cQEDAgKwIGAAQwB4BQUHAaLMiALFjEID2gQ4IwLzcMCAEAjxLA37H8veKgADI/aW6QIUBmAM8F+AJIYDACnPJQFAgsAAeAonk9hDAfUU3AXSJgNphaHoQIFDOCTuk9Ygh5qv+CwGB0XogIGHAqKKS0O8FBgE7pSIg3+ks8SA0hpzD6OI59KMf/4z+8b9q0991GEF/OWZ91C52SClQuIv+Ztg0ajx+KZWe/oj2fRR5LXLMWD8i5uVHz1HWtHkED4R23ftQ2269qXPfwRQ/ppDmbNlPOy8FUz7s/egzWnvlAeWfvU/dDt2hX+68Rf+08QY9tSrcyhHxEzG8zPH9atvh9K1/+3kIAkhPBnRuaNOtF3fxiPUYPu75WRhQc56h7JXaCFRGBCwM4Cj+ke7/9gqdO3mI3ti9k3bt3k/Hzl5lwzybu1bCMvvjb+nCqXfo8ie36IGt6a+EgD45Q1gYELsJgSwTgDJAPrwyEHAc5NlUUJOgIfGG8ZdfyQDGQmLsBgISBiBpxFjLDrzNO8q8+2twkEfJQFghUHEg4IYBDAJmLhRmgZ6KgOOOWaAZBGBuOVAEuEAAYiphADoxAARAPi88AkylAcFAAJJtKAKGpOewX4AEATinGwbgewwE5gIIJFL+/BVGE73FAYEAxmAgMC9sKihhAMbAfKBUCAQEysp5LMCUigIBNwzAdcq2kGZFwAMqZRAwmlUbMKLEOpSfAXmEqgJKhx987weEWvWnn/shfeP5JvS3PTLpqYKdhJp2mcjKI8zvvrb0ItXa/lsacPQOTT9/n7Zejx5bnsPviHlBZYDSGsQSHh/9EzNo9JQ5tHT/W7T72t2oefuN6fVz3Ld5r7/Jhps//+UL9L1nn6Pnnvkuwe3/uWeepZ/8+KfsRYF7tf2DG1HnRvK//uoDvu6UU/eo66E7VHfPbfr+lpv0f1ytG2XMvrTkOn1tzkn693HL6Sdt+tCzP/hX7tAACCBKA56hf/2XHzOUmL1pL+259vDqBK9rr04/szDgyXlGsldiI/B5RMDCgM8jyvYcNgI2AtoIWBhQvWAAHogjgcBybYJWESDADvJDRb96JKoqDMD5ooCAZwvDNdSTSwYWac30MJZQCOQJhcAu4SGgwgBcX6hrQCWAAJ0iQCYWEgbAR0B4BMAs8NFBwBCnNEAoAiJ72wsYkMtlAnIeOCJRhgFgRYAAoI86hnyNRFkAAeEzwS0hHWUAYADeFwYCOZ4eApgvDB4BF6AiAVyR55FHjIXkV6hIRJtL/EyFAREgYMk62q4xTcT6QOcJdKBA+YYXCMiZuYi6DRhODZu+Qt/9Tliu/u0f/oS+Xrs1/U3PbPYN+OKiK1FQAMaDf7fuBv102y1qtf8OJZ26S3Pe+5S2XgsOBnZfvUsz1u1ko83aDRpztwLM4/vPPkf/8/NfUevOPSl7+gLa9M6HUfGScavoEbFp37Mf/cs/RybkMjEHFAEUqFW3IatXtl++Q2uuPKDic/cp8eQ96nRQJP//tu0mfWvjTe7K8AVn518m//L4f9b8jn627SZ13P0xdZ66klr1Hc6AsX9COtVv3Jx+8pOf0X/99y+oU+9BDD/QxcOCAPH3xMIA7eOG/aaNgI2AIQIWBhgCY79tI2Aj8PgjYGFAbMMA7DRmKsoAmTxg55yTPafWXLdji8Rqyf5TvDstDePwPTmGPOJ3OdljILCU0C4QngFQBsj34PfYQZ47FohkTzcWEkXZwhCt+5CUyjHkEb8HgzFI1tECEOUI6HQwMDmLRuZM4PNz+0AvELDnONepD0NpwP63QhJ8eQ4cMRcJAgrLyg1miWu57R0SLOzsyrp+dRy8BqyA7F4m6rrrwvvgMYBOAUPScrgef++HkSAA71l3+jL1HTGKUsdPi4oNA4E5S/k8MCXEed1zQeINFQP8A6D+wPW734OvGQiwQSGAwHKOBWIuYQDeg7FkxwIoGfBaOxYDgZWsroDPhBkInObd+kEpwsRy8zsf0ogx4/l7IUXAknXaUgisCwYByRIEoMVk9HrFdWFtAU5g3WLcF2rXiwACz/zT0/TtH/yYvv5SC/p/XUexvP1LpRejoAASX0ji/3HjDfrP7beo+Ru3afCxuzTx3fu05sqnBF8CXTz2XLtL09ds5114yONhnCcTchyRlAMMoHvBqAklhDjoxqnI9/CZT86fQj/+15/w+PJ83/n2d+jp7/2IvvXTX9I3nm9KX3u5C32tfRz9dNwKar7zE6q9W9T/f3PDTfrL1b8jU/IvY4EuDYjDyJN3adr5+7T20j1aBEVKeg4bOU4t38rAsGv/YdRrWBL/v7AjYFlCRa63Or/XwoDH/+xiz2Aj8CRFwMKAJ+lu2muxEahmEbAwQP+wHwsPoigT6B2XwrtuuqQoOBA4LYDAKOEgrx1LAQKd+gziRFOFAYgHfi8oEChatJrLD4ICgVkb93Ci0ah563BpwIXoGnXMA5J+lCxgFxo740ho3fdLgIAlnLwiYcTutvs9+BqJZL1GTalPfIo/CBgmJPw6TwSMxSCAXf/NIADJPVQIdRo2IZ3DPsZRFQKeQGDPCQYP3kDgY6djQQJ17DWQ76sKA3C+wEDg/CdUuKACQCA1m0rW7qD47AJCe0GUaUB1omujiLUFEAA4JBQBHiBgpgABRQtXM0TaeOYqpU2cQS/UqkvfZUO7pyMS82d++K/03Ubt6fnx5VRn5w365oYbUS0J5W74n5f/jr667gb9y9abVHv3LWpz4A4NOnaXxp65x8oBSOwBCLDuuvQdQj947vv0zLdd53Pk8xIKvFCrHoM7xLoi/+BpsPPDB7T52gPe3S/cfpLq9U+mb9ZqSV97uSt9tc0w+tuemfTXgyfT/06cT/9fxkr6Su4W+vLEN+jPS07Sn5ddpC+t+LUWgMjrxRHX/HcrrtM3J+2kF6e/TmNP/JaWX/qUdn0Ynu+e6/cZQGGtDR6VTdNWb6ek/ClaoFWRa3xS32thQDV7ELLTtRGo4ghYGFDFN8Ce3kagJkfAwoDwA2+sPZgKGJBMHXsP5ERTm8QHVQhgxzptbEgOrh3LAQIt23ehdt370tytB6KSFwYC+7H7m0uDUoMpBLA7r9tJx1gwKMRuNZLxpq3bU5NWbUXXAAMIgOv/oIAgQO4cm0AAdsG79BtCTV5pS1OWb9JCBUjZ2e3fBwQsdeLrpQhADTeSe0jbAQM69xvCfgy6dRdYIRAQCOAamrftxEBgwfZDUffVDQRQKqGbF+5jCAgYWhhiLCTLqOcHAGjTtRc1f7WjJwgo2xUQBMxaxGNKECDniNaUiG2n3gPp3//jv0Mw4Ec/+CEBMEFxMnvnMSp97x6Nefsetd/zCT2z+Bz9WekHnsnyX6wScOC7m2+ycgD19S3euE11lp2i78RPoa+2i6O/b96LvtawE/1D/XbcseAf6rQm9d+3GnagJlkzaNyxX7PiAKqDwnfuM2BIf+seJZ26R8NP3KUBR+/Sa4fvUOeDd6jNAXGexvtuU4O9txlM/Hjle/S347fTU+O20l9M3E9/NuMUd0v4wtKPPK9BTfzxGtf0jQ036L92oETiNg0+fpcyT9ygkUu2UfeRoylv/nLS7fQDCAAkAQj0GDSC1Ss6dYu8JzX5aGFATX6qstduI1DxCFgYUPGY2d+wEbARqKQIWBgQ2zAASfJLdRtyIo+dZ20SXyEggJ09IQfXjvXebyg+u5Bq13+Zd1uhPnA/1LuBABJ03ViQ/nPJwLAElqh7AQGAil/86kXe7Ue7NPc58TUUAQABwwMoAkIg4Hz0/DEWQADi0OzVDmz6hnpn9zm5NGDeMvJpKZ0AACAASURBVJZCoyTDpAhQQQDk+7rSAAkCIKuGuRzmV6teI4YqACLuc+PrEBAYlshdJPQlA/dp0Z4TNHiUd8kAxgrf15naev2KAAEk3qYWhpg7xsJ6xf164aW6vH5NigABAkbTcPYI8FAESBCwaA0rAtwx23HxJpdyNH6lDasEmrXpICDApr20+d0PQ2t0y4VfU+rs5dQicyp133iWWu+/Q6ih/5u1Nzwl9DKp/uLK39GXln5EX5p/gf5sxmn6iymH6MsT9tGXx++mpwp30lMFOyL/Fe6kr846Qd9b9zErDqA6+OHWmwTA8PSmm6xU+Nr6G/T/1t1gE7+vrBa79V5yfjmXIMcvr/odfXP9b+k7807Q9wrWUfu1b1HOmXs048J9Kr/8Ke1wFABYI1wKg04UPkBgQGIG1X+5uVHd4r43Ne1rCwMq6QHFDmMjUEMiYGFADbnR9jJtBGIxAhYGxDoMSOVkFDX/sse8LvGOLBlYYTQVlFJ2LyCARO/Vzj35H/rV+wOBbPIFAmhh6KEQKNt9lHeQIffWJQ4PpQgwgAD4AuD64RGABBnKBIyvnjcMArxLA7ADjvuCf/4gQLT7Kz96niEAFAKye0HlAYFco4cA7muLdp2pbfc+rHbQGfgxEHB2fxEjL4VACAjMWWr0EIC6pH2PfpSUNzkivog11jH8IlAaEBwErNaeC2MB6EjVyMicIufzEqlwwFoex54MCew3sPH8b6j88gOaev4+jTp9j9vrPb/zFn19zSf0Z8sqtuMeJDF/XO/5wtKP6Utzz9FfTH6TvpKzif4qZRH99dBi+u9JGynu2C3Kffsu5e49R31yp9LA9HGsyFHXu3ytAgEAMJNCYNbGvdThtf6UMG5S1H2VY9Xko4UBsfi0Y+dkIxC7EbAwIHbvjZ2ZjcATHwELA2IbBvSNT+WdZBh4SSCABPTRgMApTpRMQAB13YNTRbKMtoOPCgRgUBhqYQggcCHaVBA1/gOSMih90qyo5EKCAF9FwJylvFtdWLpSW5aA5IRBgHNti/edYuUCatRVGBAUBCxxg4Dr0WaBYUUAQMBS3u0X3QTGcb01J7BOO8NgQKBc3zLuQ3+FAO4rdnSHZuQKI8R5y40GhVIOLgBUZEItkzwoPQoWiBaGSLB1poIbz15n2JJSODXivmL9ljqlAYAxy980KwJypSKAPQKiVSNuELDswFtOq78cVifI+QoQsIS9LOAj4TbdRNu9jVc+pdGbj1GdjBJqPGsbdX3zFjXce5uVA1/fcIPNBitrxz4IGPhiOQwOf0tfWXqdnlpwgb4y+2368sT99JWxG+mvksvYLwCtFL/abjiXK/xD/fb0jRea0T/+V2360fP1Ka6whHY763LP9XsMTIak4bOdwzGXsVGPQYDA2lMX2RgSvgHq79rX4u+JhQFP/KOTvUAbgUqNgIUBlRpOO5iNgI1ARSJgYUDsw4CsqfNoHzvISyAAB3k9EECimT9vOfUcksCmZe6EBw/rSJ6w44tETwcEZGvBmRt2UwFaygUAAkgw0ZoPSbUWVLiBgKvLABJw7BBnTJ4dkVxAAl+R0oCCCoAAyPlla0EJAyQIgIwfO6O60gbEMEoRoAMBF4VHgJTTI8nC70oYkDZhBu29fp/KsKPNQAA7tmcirl8mV/hd3tF2HPS1PeQ/vM+lFEL1kMNmj/L3ccR9ReKN+6r6IOCa1ffhtaoQeBQgEG4tOD10DqyPkCKAQcDb2jWD2AMEvDYsgYRHgD8IgMElxp+4ZD0nvFDC4HrcigDT5wLqDtwLOOcv3Hea4cDii5+ycgAmgmhB2HPXNfpZ0Xr66yFT2LjvL2HcN2Y9fSVnMz017nV6Km8bPZUf/veV/O30dyVH6Htrr9OPX7/J/3627Rb9fMctemnXLfr5ukv07MSt9G8z9lDbvb+lHofvUL+jd2nI8bs04uRdSj51l0Ye/JiaT1lFz7QdSN9Awv98E/rH/3yJ/ulf/5Oefu6H9My3I7sZoLtB45ZtCMac6r0NAQGn9SUgjPpz+ToEBLhbSbRCYP1bVygpbwqlFkZ3xJBj1OSjhQEVeQqx77URsBGwMMCuARsBG4Eqi4CFAdUDBuDBWgABkawIB3kzEOBkb2gCm6qZEh/sjEsggB1zmcRLGIBuAqjfhxwcQAD1xF4lA5DdQ72AhEqOpSYEmEdh2SpO7rhkQFEICBiQFQEDRGLmdA14w6NrAGTfqKtf4KMI4Lr6XPYLkHX9KgzAHABSGATMW867+Or85WsGAek5HDtABCT08mfyuB0goFQ670MR8FHoPSoMwPsZCOw6GpK4mxI0JNa5c5aEWurhHPJ88oix4K2gAwISBmDXn5O9ecu5BAW7+55AIE1cq0ys5bnkkRUC8x2FgMtUMAwDSniubhAANYRurUA9gjWCewFViWkNY41yaUAGOksIEIB5qTAgCgQYfDAW7z0RUt/gs4HPm7xGecQ8xsxZSvXadKV/fr4Bfes/XqRv/qIBJ+ffeLEZtzL8eq2WpP77Rq1X6H/6ptDIDcdoyrn7VHzuPrfsm3XhU5r3/qc0661fU1LZRmqXkE1p81fTuvdv0fbrD2j3R5/RPqcDwZ5r97jbAjp9oJUhWhfK1oLuI372/e8+xy0zN5y5GnUNGAuKFF4jGbm0/KAeCODeQRWEz37B/BW089Kt0FgMA2w3gVA85PqQRwsDquyRxp7YRqBaRsDCgGp52+ykbQSejAhYGFB9YAAeNB8GCGCH25RMSSO9IU59OBIzFQbgnBFAYN4y41iQzYeAwB4fIAC1wZwloZZ/bhggd2jhSI8kFDvV8kFbHmFIJ+q/fUDAG9IjICcCBGAcCQPmbN5H+fMFCABIkbv48lzyiIQTHQMAUTBHLxCARBbzU0EAxnHDAHwvEggI2bw8p3p0A4EdfkAgI5cgm8cYKgzA13L3F8oFrBEzEDhBQ/maI2X36rwABDDGa0OFL4I0C1RhQEVAQE4AEAAQAxAAVYoKAjAvCQPmbHkjnNAuWMlrWZ03XmNeACSsCEgbS14ggNfI0AQ2JuzYawA998yzxqQcSTp26H/1Qm1q0+U1Gp6ZTysPvxu1jjGHTe98SGNnLuTEG205d12+HfU+JPELdhwinBdA4Llnvkvf/c4zofM/+/R3eD4//+Xz7L/RfVA8+yLsvBxO4uW1RwCBdB8g4HgsqEDAwgDvvxsWBjwZz0f2KmwEPq8IWBjweUXansdGwEYgKgIWBng/1MmH56o4orUgPANQJqCeXwIB7MIHVwis0DqwYywBBIQJHsoHRIJcQNh1lecNAYFhCZTnBQTeABDIcUoGzEAAu70sn3eAgAoDZGIWCAQMS/JWBDggALJvXKdUBMjrwrVihxTJPZJ3XJsXCMC1AZyYFAFIzrm0wgEBSIblueQRMADjjCoSu+Xy+yoQGD46z3vHds4Sxx+hnIICATcMwHmDAQHRUk5AkABAAHGcu4wABCQMSB0/nRZKs8DMPDaw0ysCfh1IEcAgAOtfAwJwXYAB/RLS2KiRd7YZBER3lmAQ4HSWwBrwBgGAHQmsHtl45hrBRK9Vx+70/Wef46Q/Yof+20/z916sXZ8yJs2iqeVbaWBKFsVlFXgAgesCCAxL5PaaRiCw/RBBIYDuG+hI8dOf/gf96Ic/opfqNaTe8SlUWFbOpSdSWYGvzUDgqKIQOBu1VhFL3EMB3cIKAQsDvP9uWBgQ9ahhv2EjYCPgEQELAzyCY39kI2Aj8HgjYGGA90OdTNSq4ggY0Cc+lTJdMABzYSCAevoKAYGVPkBAJMVw2EfSosIAnDMEBFAyMG+5WSGgAgHPkgHHj2DOEoIh2cDkLOoxKJ6vyRMEfPA73u1Fmz7P0gA3CNDI+WEk90r7rtSOHfa9QQCAAsCBFwjAeEIRsISTKN26Kd15hM/nNtXDe0NAIDWL6/u9JNy5sysGBEaMHc9jArao85JAAPPON5YM3GclhAQCKJVQx5Cv3QqBVccusNFc576DaWBKJp/fXBqggICFq43ri1tMMgiILA2Qc8ARLS2bvNKW2vfsx2vEVN4SUsakARbpSwO2vf8boRpxQACuEefA53Pu1v00IGk01arbkL737HfpmX96mp799nfo3//9P0Pn3nT2Ou25dpcWbD8UAgLlR85p47fpnes0dsZCXkNFi9YYFQKlO44Q2o627tyDVQA45sxaRKuPv0fY9cf80E6RFRZDExgQmIAA/BsGj8pmcLL8oBcQEOaLUAhg/jAPTB1vPQPUdSdfWxjweJ9b7Og2Ak9aBCwMeNLuqL0eG4FqFAELA2IbBvSOS6aRY4u0Tu0CCBxXgEC4Zlo+lOKI5AU73nKHVOf6HlYIjKHmbTpyooG6YnUcvEZSxQ7yDAQ8SgYcIIAkw9NDgJPnBMosnkuQNSOpEiDglL40IAQC/EsD4JrOioC9J/Vy/g9+R0iQ6zRozLv0RkXAgbd4Jz8ICJBqB50iAPGDHwDG+dULtVg2bmrdVrbrCA0KCAQgzYcXg1EhgPrw1Gxq2ro99Rs5iiXx7vuKax83dxknoQAs+pIBBQikCxNL9zj4egtKBuC9MDSR0ibO5GS5ToMmNGx0Hi07YPIIUECAh0eAUAToSwPkXFASk5A7ke8rulP4gwCzIkCAgBXUa5hQBEgQIM+15/p9BlnJBcX0Up0G9JMf/xs1adWOcmYt5s4V6v3dfe0uzZdAILuAE2o5jnoEPBg7oywAEDjMZRJNWrWl3nEpWvPJze98yJAAn32ocTyBgNPVwgtAobQHYwECjBgz3sIAx9NBvX94bWFANXoIslO1EYiBCFgYEAM3wU7BRqCmRsDCgNiHAQ2atKDxC1ex7Nr90Pk4gECXfkPp5RavUvHKzVpzNyRXoj48SMlArthN91IIlJZT90Fx9EKtutSl3xDudKD1CKgQCBjLCTx2wbV1/TALnL+CwQd2j1Fb7o4tvkYtOiT93iDgJu+89hqexHJqLxAA+T92dFt17MYxxu6vmjDKOQiFQAWAALoM+AABxLZpq3YsWZfnUY+RQMDkISCBwFgGLSaFgAQCXfsPo7oNmxLWlDcIEMaIXmaBsjRgGJcG6M0zAQJQooDrRG39/O0Ho+6rLA1AuYdXaUAECJi/XNtZAmNxi8n0HOo5ZCS179GPz43yk11X7kSdOwwEMikuu4BWHjF4CAQGAkcIqotmr3agkrU7os6H+xsCAo4ZYzAg4K0Q6NxnEL3Svgvp1C3qmqqpry0MqKlPVPa6bQQeLgIWBjxc3Oxv2QjYCFRCBCwMiG0YgMQRD/pyZ08as6kP2ZFAIJdbymnrsaEQwO4v3MFLVxrVBlnT5lP9xs2p9/AkTsx1Y0UCAf+SAexMIzHXjvXebziJrtuoGaUUTvNUBEDKDpmye4dWxgNmg0jwvBUBN0Jmd0ikZKIvx5BHGO9JEICSCR1U2HERIGAVCRBgLg3AbitAAMo6ZqzbyTv1aP2G6/EFAimiZGCFh4Q7d5Zw3vcCArivUEGg9ASJubxO9SiAQLg7g6mFIdQeiDPi4wUExpSUUf3GLSghZ4L23sM/AeUOiIM3CHBKYvxAABQwwxIYKsFc0N0BgZP3fSfZ90GAgJPargFQz2Cd4bMClQO6G6hxwmuMhWuXa2Ta6m00YkwhNX5F3NeJS9d7A4FkAQSMJQMRQGCtsWRgzIwyeqlOfeqfmE4rDr8TNU/MNQQEHIXALoOpIJcMhBQCZiCA0iUAw8S8ydrzuWNV0762MKASHk7sEDYCNSgCFgbUoJttL9VGINYiYGFAbMMAGAgikeJkDw/yC1f7KwSQMDn91t0P4Vwy4LQLQ327rmQAu5o9BmOXsy8nfDAV1CXxSOSkQgAJk6ljAX5fJkyo0daNtf70Zd5ZTZ80Myq5gGSdIYbjeo9dZ/d14WskZgwCvJz+P7ghkjwY/M1dRtnTF1BcVj77AKhjMgjIEKoGlEv4gQC0/DMqAg6e5Vr5QSnZ3B4OiRlKIdB5AYkcA4HFa7UKAcjQ4THA5nOZ+eQFBCBNx1gmIDBh0VpOkrFjjvOjdl+9Zvka3Q9gGIeSh8IF5bTjA3MLwzAQEB0L5BjyWH7kPPUdMYogo5ffk0cVBGAtmtYPwIMwy8zl5B7wS44hj6wIcEAAylhyZi7iNafCgBAIACyCWSDglGYsBgELRFtIeGPowJMAAQ4sQjvNPcdp/VuXKXHcJAY+AC+4F0YgcPUuzd92kAYmZ1K8T8kA1gjuhVAbRHcZmLhkHbXt1os69x0SoGPBIgEWF64ywoUwEBhHKw7pgcCyN8+wQSOuV94Dewz/LbEwINaedOx8bARiOwIWBsT2/bGzsxF4oiNgYUD4AS7WHmbVbgLYsWV3cAcI6Gq6QwoBp92aUZbNCgHs/iZw6zE3EODWgpn5nFBh15x3f9/Qy7IjgMD85bTtvd9EJQechL1xihMwjKcDArgeJEYZk2dH/H5QEMBSbSfJM7b8AwhYsIKTNNHy7+NQa0Ekc/L+I25wqcdcfUHAsETe2fYHAVmc1CO5R8yQjKdNmEEwjJNAAMmermRgz3XRY74iQAC77FAtyGvCEfcVEnsYzUGhMGz0OPYwUN8jXwsg4BgULignrULg+n2CmZ8XEEBcRmQXEroJyLFxhLKEFQEwt/MAAYtDIEB0MdAl7wAB7FGAun6oRi78OtRaUMIAAQJOCUXAKH8QAEUAfDa8QUAuQwqsHZS1oMMAkuPUoun8OgwENugVAgAC2yUQKKTyowZTwbNijZiAwKSlG2hAUgabamKN+LUwBChhpdFCcwvDEBAYDSAQrTZANwGoAlIKpkbcV/Ue1+TXFgY80Y9N9uJsBCo9AhYGVHpI7YA2AjYCQSNgYUD1gAF4sFaBQNHC1VqTNyRLkLRDIs27vybDtvOfhPqvuxUCDAOyCqhs51E2/wsBgf1eQACGcSIZ0wIBbmEogADvyroUAgIGZEXAAAYB85bxrij6uwdSBCAx03QN4JZ/C1ZGgADEFAm4qgwICgKQbCM5A6DxBgH5fC+wu4+kHudUYQC+DgwEdhx23OjzjTu2mItUCMBnQgUCsrXgwl3HCMmeHxDAWFA8YIebTQUv6hUCSIZDQOBApEKAYcCY8RFtFBkEOOMyCLhgaPnHIEDAKFanaHbxGQTMX873gstHnLHQWhAQCzBAhVFBFAEMAuZ6gAAuH8nl+EkQgPuowgDx9VXKmjavAgoBAIHz2uQapoIoucC9cCsEAANwLwHZFihrZOVhgx8BTAVnomOBUBqZWhiW7YRnhYBGbiBgWwt6/92wMCDoE4h9n42AjQAiYGGAXQc2AjYCVRYBCwO8H+rwUF9V/1RlgJxDBBBYpAcC2KXEjnZwIJAYoRCQMABQgdUGSMpgtubUhyO5kvORR1YIsIO8AAJaybcCBNwKATcMwNfY7UXCzSDg3MdR58S5RWlADiejnJgZQQAc4RNZ/o4YynmrMAAgADvnfooAAQKSAoCAvCgQgPO6YQC+J4GA3P3deSlyVx/vYYXADuEgD4BhknALILCIE0eUlUggIGGANFVUgQBk3zIm6pGBgFPTX1BaTtsNQAAKgcHsISBKVOQYbhiA9pTsSI8SBCgCDCBAlAY8HAjAucMw4BT7XgACVBoIyBAgANesGl26YQDmsfFMGAggadeaCkaUDJiBwEYGAqUCCCxZFxpLwgDcQxgUhoBAZj6ZgcB1GhsACABi6YCAhQHefxcsDKiyRxp7YhuBahkBCwOq5W2zk7YReDIiYGGA90OdTGqq4qiDAZgHklmx+5vA5nNInN3zqzgQCJcMqDAA44ryA9RtVxYQOMmJ2WDItR2FgAoDIkDAvOV8ve7rw9dw+h+SLpI8PxCAHVVZGqCOJWEAwENwEOCtCEBdP6TagDGqIkCeV8CAXC4TkN/DkYFASSlDC8zLCwhwyQADgWgJN8aSQABwQQIBFQbgPaJjQUCFQFAggLKSjFyC54KcB1rQjSoqIQEChDmhAAH6kpLI0oBT+rp+lAbMX86JsSwNwPnkPwkDUBbCICBoaYCPIgBrBMmxGwTgvIABaGmItntyHuL7AALz+b5OWhYACIwppFUGhYAOCKgwAOcTQOAQr7+4IEDAKT0yKQR0QMDCgPBaU++1fG1hwJPxfGSvwkbg84qAhQGfV6TteWwEbASiImBhgPdDnXy4q4qjCQZgLgwEZgvDOLjRewIBdgcfZ2ztBvk9EmXIo5GkoZY7LquAyw3kdYeAgK9C4BMnSYNCwGAqGFIIoGWfAALwLRiYnMU11yFFgA8IgAwciZ43CHBKA+Yupc3nPopI0nBtSLrREq5jr4EMO8p2HSPU9cvrlkfsrodKA2ahNODDqPfgvX4gAO/Z8NYV6peQHpU04mdCDl4RIFCgrenGWJgj/AF6DRdAYNzcpWxkCGUAfo5/jw0IpAsgIJUBSEp5jfkoAioCAoRqBJ0lossMAAM69xlMXfoOCaQIwFgwqTR6BBx4S8AiAwhALFcdu8AJeFL+lFB8ZZwBCqSHgDcQeJMGJI2m+GwvIHCNskschcDidfyZBaBQ1R0RQCArgEJgqFgjQYGAhQHefzcsDIh61LDfsBGwEfCIgIUBHsGxP7IRsBF4vBGwMMD7oU4+zFfFETCgT1wKjS6eG5VcYD5CIQA5uL9CADvJMIyDg7xO5i+AwBIGAugNPzQtJwIG4HwhIJCazbu/2JnXjcUdC+DsDg+BBSv0poIMBIRCAAn9vK0HOAlq8kpb3u2FeZsq51fjj04JAgSMETu02uT9BkHWjiQPsnQdCMCYY2cspPovN6fXhiZyHb0WBFy6SfBowFhIro0g4NBZ9h8QioDDIY8Ade4wCIRDfb2Xm2sd9vHeSCCwzqgQYDk42tNleQABrg9fxHPv2n8ox02FATifCgTQ/hBtENU5y9dqyUBhWTnBh0H+TB4xFkpUUGqBXfTZm/ZxYivaKCaw94CpNADzwu9xrf8+gyLg/d+ySSCDAA+nf3g51K7/Mrd8RMkB1q6cozyiTSfgF1QjeXOXakEA3guVg1AEZAlDSc1YgHGAaA2atqSE3AlR58I4G96+SmjJh7lPWrYxJPOX88Fx99W7NO91AQSgqDArBK5xJwzMvdvA4Wy+qcIAHgslA9sP0cAUrJF8MrYwfOc6jUVXi6GizaUfEMAamblhDwF6uFUQ6rXU5NcWBjze5xY7uo3AkxYBCwOetDtqr8dGoBpFwMKA2IYBveOSqduAYcb6cLi+I0GtFCBw7mMaN3sJtWjbiTq81p+TEvcDvQACx0Ot3ryAQL7S6s3dsQDjYiyUCQAG9E9IpxZtO1OjZq15h9YEArjlX3ou16ezVNsDBIjSAI+Wf2+eoe4D47lfOtqzaUHAxaAg4B1OuABdSnfqQcDOS7dYidBtYBzVrteIug+MM9d0O/XhSBwh78fvuu8FPAQW7DjEiaAfEEB9eLM2Hahz38FUuuOwZizRwhA7zEGAAOZlamGoAgG0FWzfsx8B8kCyjxIJ93UAKPE6qAAI8Gr5B9NAJMiNX2lLk5dvjKjrl+dmEFAmQAAUEyZzSsAzGHEiLmVoMWkAASjFQGu/hk1bUo/BI2j18feirhPnlkAAaxNAYPfVO1HvU4FAvGfJwDUaU1JKTVq15fuKGMrrk0dWCAQBAmcBBBYyqIDSCCBSjiGPe67d5bIXxKLX8CTqE59iYYBSmiLjhKOFAdXoIchO1UYgBiJgYUAM3AQ7BRuBmhoBCwNiGwbggfulug050Vx+UN/zOwQEhlaCQuDcx6wgqF2/MSsSkDSpD7l4LYEAkoKh6blcu29SCAAIIHFEImgEAntPckL7i1+9yGDABAIAHnA+lBaYQcDN0G4vnPCxm+2eP75e/uYZvs6mrdtT7+HJrApwvw+7+Ejyeg1LEoqAdwylAYfCIAC79bJrgDqeBAEYa1TRdE4YsXON3d/yIwbX9woCgfisAlqpaQOHeWx+50MaPnoc1WnQmHen9S0MHSDA7en8FAKiREUAgWizQwkE+iem0/O16tKg1CxPEABFwJC0HFryiIoAgAAYXQI+AKTpEuQoEKAYSqr3jEHAaAECuMWkBwhAcp9cUMxQq97LzWhkzoSAQGCDBxAQahlWCBy7oF3HG89e49KP2g0as1JAa1AoFQKOimTlEXMLwwiFgAcQ6DMilRo0aWFUt6hxrImvLQyoqU9U9rptBB4uAhYGPFzc7G/ZCNgIVEIELAyIdRiQSj0GxbOEXuz+6oEAG8Y5/cO9PATKdh9l2TASQ1PJAFq0vdKhG7Xp2ovr5HVAgA0K9xznHVMYxkG6bwICkPwjWUJ7OhMQmL/tILXq1J3SJs7QJj1CEQCPgTFCqq1VBAgQAPgAybYfCBjseClgJxy+A2rSIkHAa4FBQCa7uHuDgEQaM6OMa/whO0dNO+rD/YBA9vSwg7xWIXDtnpCDJ2dSfLa5ZAD3tdmrHah9j75sPBkMCJjXG6T4YYWAHgjM2rSX11FS3uSI+CLWUhEwOA0gwDGT1CTcWDMwCcS54Cdhqutfsl+AAKwRfFZka0H1vmIslDgI1chSYylKWBHgXRoAWAR5fc7sxVxOAAPBjr0G0IDEDErwBQJz+ZomeyoE/IEAvCxQnoDzTly63lh+MH/7wZCKxFgyAACllgwYgEDJup3Utntv9vhQ42tfi78nFgZUwsOJHcJGoAZFwMKAGnSz7aXaCMRaBCwMiG0Y0Dc+lUZPmc3y3AG8s+fTUg5AYFgCTfAwFURLOdQRw0MAO+TuB3jI0lH3Dsk76v6RbBgNCoMCgbnwEDADgdff+y31T8yg9EmzouYjQcAQBwSY5Pyo/w4GAvK4zAFxwLWhnlqFAW4QsMlLEZBdwLFkRcC1e1Fz33lZlAZgXkiy4AcgWwumFE4lQJAwENDv2MJBPnv6Ak5gUc7gBwTiDEAA97VfQhpfO+YDaPTIQGCWt0IAxnlYZ7hWdZ2FQAArArxBAFQlMNcJPwAAIABJREFUQUAAkn+AAHgEwBjSDQNYEeB4BMBHwqRACYGAlCzf0gAGAbMW81iytWDiuEk0d+t+BgJBFAK4tkcBAugmgNIEABWMVblAYK0WLqw5+QF7QejMEtX7XFNfWxgQa086dj42ArEdAQsDYvv+2NnZCDzREbAwIPZhQNbUebT3QyHhRuLoWR/+7kc01lEIBAECwzPzooAAkkacA7t/2GHHLip2QB8nEMDYA5MzKWPy7IikcdmBt7k0IDgIgNO/V2lAnqj/3nWU5fyytaCEAUiOhVlgEuXMXESeICAr3xsEXLpFSN5RXz1mRimDACRHEgakTZhBe67dqxAQ4GQvIBBYeTiy7SDuK+437is8BDBWMCCQT6YSFZgpQiGANaIrGcC9gPIBpREyMawoCMDYUJeYFAFL95/mxF+CAKhWZGtBlA3gvCoIQPmIJwhAaYAPCChatJqvGS0+5VgSBqQWTWfp/5wt+xlwVapCYOx47logY4mjbC0IUz9Ao0oFAjAnXRwNBLibQN4USimMbKOozqsmv7Yw4Il+bLIXZyNQ6RGwMKDSQ2oHtBGwEQgaAQsDqgcMwIO1BAJImgMDgcVrafsHGtf3Dz/lOnnRZSASCEgYsHD3cU50kDwFBQIwW/MsGZgrWhi6SwYEDMiKgAEMAjJy6XGBAMRUhQE7Qx4BiRUAAYc4oXcnPsIjIBoE4H0qDMDXAALzAikEhIM84EIQhQBKBlQgIGEAXPsBOfyBwD1hGMceAl5AACaWAghAaaGqDcIwoIST8oqBANEWsqIgADFVYYAKAnwVARIE7DrKnzf3fcU65dIAdJaYDRAQBk8qDMDvwRxQAgFfhUBxBUoGXEBAwgB0E8AcsqY5KpKl641+BKxIcf4f8S0Z0AAB21rQ+++GhQFBn0Ds+2wEbAQQAQsD7DqwEbARqLIIWBjg/VDnTgY+z6/h6I0yASgD5HkZCOw4rHgIRO7+yvchCUOyhyQeCa8JCJQ6JQOsEHBayqkwAONh51Pu/noqBHYfY6M4bmFo8BDYcu4T0W8eJQOlYQ8BNwwQUm2YBQqPAFNpwPhQaQAUAXqDP+xqwxdAtPw7EmHwJ2HA3K0HeJccBn+Im0kRsPLwuwxiAGTQtg2JvIy5PKI0IKQIKAkrAuTP3TAA32cgwC3lMmjE2CJjGzgYxqm7v6aSgfnbD9GA5NEEN3rMGedQYQC+xjWidMFbIQAgcJhjNzwzn1YYTCxhUCi6WggViQQCKgwACIBBIOTs7BGw94TWnR91/YBFmFfeXL0iAPMXhpI5XPbAhpKK34CEAWjTB8UCPgfeigAYSuZxaQzKR/A5wznUf/gMQUmBsUSLyTAIwPvcMADf28VA4A2hEMiFqeD7EWPK8Te8fYUyJRBYvsmYxGOdQh00cmxRSCGgwgA5j6xp8zl+k3yBwGj2mSg/el47L9nmUv4/Ig0KLQyIXBvyPsqjhQFV9khjT2wjUC0jYGFAtbxtdtI2Ak9GBCwM8H6okw93VXHUwQDMIxIIFLIhnW5+FQICyZksIUeP+QmLRZkAlAFy3IoDgVw2VEMCKMeQRwCBvDlQCISBgAoDZM22LwhwkjzsSuNa5fjqkUFApigNKN0ZCQLwPsAAJKZIdIOAADj2+4KApetFaUBJKSF5V+eD14ABqGcfVSR2y+XPQ0AgsYJA4LKm7SDKDxzDOAkE3DAA5930jjCMw7UXLVobsasfmtf1igABAaAAjQAEJAxIHT+dlrzhgIBRY2lxABDg1fIP6hMYV6KjhRsEYN6AATDxi88uFCBgtrk0AL4ZEhb5gQAACpSP6NbbxjNXaWROESUXRPojCCCwn/onZRAMBisHCIg1svrYhVCZAJQB8p4JhcB8vvZgQKCQKgIELAzw/rthYcCT8Xxkr8JG4POKgIUBn1ek7XlsBGwEoiJgYYD3Q518uK6KI2BAn/hUbgXnPj+AAEzrsEvo5SAfBQQu6ksGSncd4SQXCgEkLChDUGEAzg8gEJKDe3kISIVABoDA26EERb0G1H+PYyCQwK0AkVwMTM7iJBrKAgECRF2/+nt4jSQTcnSxQ+ujCPAAARgL47Tu3IPaduvtKAKua+eL3XUklhUDAfqx4E8A53e3qR7mEwICzu5v+VGTqSAUAvM5XtgZhhrBHSeMBSAgFQJw9Mf9RZmA+t4wEEjk+nC5q6++B10SSncc5p3zOCgEPFoY5jiKFHgvrDnxAXsGdBsQx9AFbSFh8Ie6fnV8vIacP6wIMDv9h0DAKD0IwFg4N3dO6NmPfS9kXb/7nBUHAQu1IADjQinSqfdAShwX3TlBlAxAIZDOn681HgqB0VPm8NqesgIKgbtRccL3oBAAXBgxdjzBowBQRIUBmE8EEFhmbmE4f9ubzv8jhbTKoBBgRUpJKRuKAqCtOvYewTwwdbz1DHCvKXxtYUDUo4b9ho2AjYBHBCwM8AiO/ZGNgI3A442AhQGxDQPQKx3u5Dsu6lu3LdhxiAYkYQfU3FKOgYAjB8fu8HYtEBAGhUh2W7TrTH1HjuIWfu4HXdRIq3Jw7Oi734NErwxAICWLneSx0+9+D77ecv5jAQSGJRL6m/ccMpJq1WtUKSAAcnYkvuhvr1ME4PyQ2CfmTaI6DRpzv3Qkxbp5qiAA8nsk2e73cWmAowhAK0B0AHC/B1+jPhuw41cv1Ga4sOvy7aj3BQYCZ65RNsvBk3h32AgEHD+C5m06Uv+E9CgYgHmxHFyWDCw2KwQAoOAzgQ4MJiCA8gPsnmMXHSUuWAeIsWrw546NBAEAPEIR8HFUXPA7IRBgUATgPViTSFRr13+ZUsZPCxn8uc8pQQCux6gIuChKA4QiwAwCVhw6y9f3Yu36hLadu69pkvgr8BAICASK5zDoMQOBO9yxAEAA0APQ0A0DcL0SCGD+k7yAAJeoACyaFQIhIDAskTKmzOFSBQsD9H8/LAx4vM8tdnQbgSctAhYGPGl31F6PjUA1ioCFAfqHOXfiUBVfQxkAGNCoeWtO9ryBgHiQNyVoqKdHwo2kwA8IdOo9iJq80pamrnpdm5CpQAA7sJ5AIDU7ABBYQt0HxtFLdRrwjjkSM+xEu2MeqQhYRKhTd78HXwcFAdjhbNm+C/dLn715n3asEAhIymTXfz0IuM3t3GDsJ0BAdGkA5gUQAGd9JLwALk1btaMpyzeRJxBIzOCkyyThRrIXAgLLPBQC2w5Sx14DCUBg+prt2mtVgQBio/UjuH6PFSlBgAC8F7r2H0oNmrTgHXM/RUAIBJwLAAJ2H9OqC7AW4REgVQHwDNCtkQgQsPMI7b2u8Qi4eIMmLF7DnxmdR4AcF585wJHug+JZYdKibWeaWr41GBA4YfYQgEIAn1dvIHCAOrzWn9fTzA27tdeK8oWQh0BAIOClEBhTUkqd+gxiRY3tJqD/+2FhQDV6CLJTtRGIgQhYGBADN8FOwUagpkbAwgD9w5x80K/KoygTSKHGLdtwfT3k4IGAgKulnLwGFQjA4G6HQSEAI7N6jZpRv5FpxpZyAggIB3k/ICA6Fowjo0Lg3MeEBKN2g8aUUlD8aCDgkKMISPFQBFy+xV4BSN4h1x+cmk2ytaCMFY4CBBTQgMAgYIHWIwBjIZmHpBsqDiSK2C1v1KyV2P31AgIwjAMQyCky1nSHdn+HJ/Hur0khgPtap35jloTj2tRrla+FYRygURJ7R3gDAXS18FYIIKGt26gpwU1fVxqwHaUBpaJrACsCHhEEjF8oykdwX6Fyka0F5fXhCF8MqEYGpmRSmScIWCtAwEwvRQBAANbIaAY7eA14h7UlgEA01NoNhcBmRSHwiEAga/oCerFOfVbBrD7+nva+hoGAWCMoW1BjgtcoPwA8EaVH3iUD6RNn8vpF6Yl7HPu1LROoqc9T9rptBB42AhYGPGzk7O/ZCNgIPHIELAyIbRiAbgIjxhQSHvixcxoUCJiSvSBAALvC3QbG8S4njPXMPebDLeUeFQisPXmRd1aRZLiTCVYELFwd3qE1KQLcIMAg58f1IVmDUiJz6jxOaN0wQIIAJJRow6ZXBNwimLMhcYbDv84sENeigoB5rx9gST5aMEI2zw7yw5O8FQIKEDDu2HJLOW8PATjhQ/XRvmd/Vih4tpQrKeMYwUzyUYAAds2hbkkuKI66r+gaUChBwByzRwB8JxAvNgv0UATAtBA76eh8AbgEk0Y3DKgICMDnDeoGnVkg7qtQBAgQgPKJ9acvc0kPkun0SbM8gQBc+eds3hf2EHgEIACw17pzT763gEb+QMC7ZGBeAA8BdHJAaQJKmNyfV/u1hQGP/GBiB7ARqGERsDCght1we7k2ArEUAQsDYh8GoO4aO7ayf/ikZRvNCgG0lIOLutJSzv1wDnn9GCR7wxLZdd2tEBCu8/l8voHJozlZ9gYCixhUINk0lgyghSE6FozO09Y24/fw84zJsyOSizAISGKvAlPLPynVhgoBiZkpece1AQSgpR5iCjCA3W0VBpQfeZfjB+M9MwhQSgOmzefabHec8bUAAUXcWg6mb6glV1sLbnj7Ko0unkO9AQRWbCaoQdzj7LnmGMaxQmCC2eTtzDWGG7g+rJGdLj8CXDsS5OxpC3j3FyUL3kCglGOFZNOkNoBpHnbY47ILaKVGkYJEGnXo6CagXhfL+UvLed3k+oGA0cFBgJTzy9aCKgwACID5IdYZfCTMpQFr6TW0mJyx0FyKcvgd9ulA4o8YYCwoNJAcw9APYCB90kwBBFa9Trs1YEoFAvg9mC2qMZKvYa4ZKhlYuTnKVBCAEMaEWNP9nTViAgJYb/j/BJ/9ycs3Ro2Fc6oKAYDIVccuRM0Lc4JRIpQ8cp72GP5bYssEYukpx87FRiD2I2BhQOzfIztDG4EnNgIWBoQf4GLtYRaJIZQBeHjH3AQQEO3CJgMIXDKYCm4/xEmBPxAo5WQMiZMKBJA0Qu6MhIkd5B0gAJM0XYyQ8CEJQ4JRtBhAQNex4D6btDEQyMwj1GyrYwkYkBUBA8IgQPR19wQBTpJXERCA87thAJLj+DHj2YHfBAJQ4y8UAYlci40kUL0W+RogAPJ4JGgSBOBnKgzA1+gxj2SPgcBKLyCwP5TsmRQCG2R9uAYICMiTx9J4KQf3AgIwQcwuCQgEkvVAAGsD51DbKEaCAHPLP24xCRCQks1mltoygw9ucOcArD0JAhBTNwwQLSa9QQA+A1BCsCJgRpkRBAB6wLBTBQE4pwoD8HUYCCSz/4YnEEhIZ5DgCwSGJ1GxCwgABkA1sWjPCZq7JbxGfIHA8KSAQGB8FBCwrQW9/25YGPDEPjLZC7MReCwRsDDgsYTVDmojYCMQJAIWBng/1OGhvqr+uWEA5lGZQADJNeTUSH5UICBhAFoLYsezQkBgOGrN13oDgZQsrtlWgYAbBqggAFJtPxCAGvwF2z0UAU5pABQRiKG8pyoMECCgMAAI2MDgA6Zs3iBAKgL2R5jJuWEA5lIhIJAEDwFvhQAbxrmAgIQBaC2odixAsr7yiLmFIdYI1Aa+CgENEHDDAAYBZVIR4AECUBowOo+9FYTTf3QrQkCnokVK+ci7H4XuqwoDALGg/mBFwA69IkAFAdhhN5lTivKRQvZ+kIoAuZbcMADfX3f6klAIxAkgoFOssEJg0z7u9MAKgZNmhUDGlNlclqICAQkD0E0AXgBBgQBKZFDiYlYI3KG5rx8IqUhUhYCFAd5/FywMCPL0Yd9jI2AjICNgYYCMhD3aCNgIfO4RsDDA+6FOPuhXxVEHAzAPCQTkg/yjKASigcBN7jYAZQBgAM4ngQBk8/g+JPm6eEQoBLhkwEMh4AICKgxAjTo8CLDb6wsCsgo4YfRUBCx2SgNKSiNAAK5BwgDUmyMpxm4v+q5rkzZWBPiDAOzao3Ybbd/mbt0fJcUWMCCX0ibMiIijBAJIvP1KBmBEmOALBOZxEg8VCdQMKgzAtbuBgKlkAF4IYSCw3rtkwAUEVBggQMAq6jU8kXLn+IEAKAI8Wv4xCDA7/UsYgNKVioIAE3iSIKA/1ohTGqB+DhgG5E4kd7s9AQTgIVBJQGCyBAJbeG2pMADzARAQLQwFNFp93NSx4KooKwkKBMaGFQIWBnj/3bAw4HN/lLEntBGo1hGwMKBa3z47eRuB6h0BCwO8H+rUh/3P+7UJBmAegYHANuEhwLu/Jgd5RyGA5Hvi0vU0bu4yTvolDMD5AASQcAMIxPsAASTwXDJQASAAV/mByVmUnF9MSOCCggD8jrciwAwCcF2AAYAqXfoN9QcByxwQMHWeURHAIGBsEfs26EAAzomkEcm8u44eP4sAAr4lA+mUkDshSsKNccR5nGTPkYPnz1/BigwoA+R7IoDAWLOHgOwxzwqBpf5AADJ6JM8SBmD9oeUf7mvubA8Q8CYUAcFAABQtOTMXaQ3+AAO69h9G3QYM91EE3AyVBrCPhMGcMgQCEjO0IADxhCx/yKgxlJw/JRRfGWdRMuAPBGZv2kf9ZMmAUSFwmctpEMvilVsIIAtlAlAGyPOx2kApGVhjNCjEGpnL96UiCgELA7z/blgYUL2fi+zsbQQ+7whYGPB5R9yez0bARiAUAQsDvB/q5MN1VRwBA/rEp7DjvO78DASmzg89yJsUAtjFhKmglxwcu6GoD0eC1WPwCBqanhtSBshzh4BA0miumfb0EHCAAJcMGFoYoq2b8BDIp9Id4nXzNh0VEBCW88s54LjisGjnJkDAIeMuPmTtSF5xXah/V8eQr3NmLaYGTVtyJwPU0cOsT/5MHtkjgEFAEvs3oE2b/Jl6lIqAAYnpWkUA3oskDdL2hs1eoZSCqdpxVCAAOTjWgXoevIYRIWADjONGegABeAhwsjc8KXRfVRiAsR4fECgkdE+AgWDT1u0DgYDhbBboowhYuEYY/BlAAK4JbQrrNGhMPYeM5LWFteuOIdp0So8ALh8JBAIO0h7dWJduUsGClfRy89aUkDsx6lw4NwOBiTAVTKZpq17Xr9srt2n2pr3ULyFNeAh4AgHRseC1oYkE40wVBuB8Agi8weUHUJF4AoHioEBA/D8yZ8t+SsqfEqWCcMe4pn5tYUDoEcO+sBGwEQgQAQsDAgTJvsVGwEbg8UTAwoDYhgFoy4aEBmZ0ugdrJLlZChDYaTAVnL/9IJvPBQECzdt04jZlMNBzn1MFAr4lA35AAH4EDhCAJLxVh27UoElLpzRAn7yHQUAmu7ib5PwMAoYlsbzdBAIgiwf8aNisFe+ueoKA4cFAAJJz1GzDkd0dOyRnkP93HxRPL9VpwOc2mbytf/sKZUyZwzDDEwjw7i8UAhONCgE4yAMIICHHbnnZzqNRc8O1h0wFxxYZuwywQmC64yHACoFoUIF7wl0GkjMZQqGdYeMWr/oqAhgEeJUGXIRHAECAX8u/s/TakAR6ucWr7HNgAgHhNVJm9KRgRcAY4RHAhpIaEIDP3KRlGzi29Rs3Z+ix7tTFqBhjPaxDl4GJM6m3DxCYFRQITJlNjVu24XMveeNU1DkZCGx2gECuPxBAbL0UAvPQ5jIpg/qOGMUKBndJhHvN19SvLQx4PM8rdlQbgSc1AhYGPKl31l6XjUA1iICFAbEOA1LopboNvevDz4Zbyk1ZsYmMQGCbAwQ8kj0oBAaPGsO7qqgTx+6p+4GegQDUBlIhoGkph9+BRBzt2SBnhkJA7Vggx8RYpTsPc2Lxi1++yHXiqsGffB+O0sUdagIkm2YQsJ6l/9nTFxCSV3UM+Vq0/BtPTVq1o9eGJjCUkD+TR+zIo94e6gKYrWGXXf5MPcJYjbsGJPiAgJWbOQlMzJvM8vXaDRpTUt5kWnP8Pe24kGKj1SISx+LyLXqFwNW7Tn24PxDAfa3bsCm3oNOpDQQQEMneSKwRE4A6c40QW8QF9eruFoaIDe4Nkmf0on+hVj1eK1sUgz81fjCSHJ4pzQKP0N4Pdbv4AAGipANlKJvf/VAbM9FisoATZCS2i/eFSyLkObGmwyCg1AwCjrzr+EhkiBaTBhAg1wjWAK63/svNKbmgmM0D5TnVIzwE0iQQWL1Nv46v3KZZG/c4CoHJtNZDITAkbSzVbdSUcmcvjjCqlOcMAYFEsUbMCgGlheGKTVqgxQaFWw/wvW/Q9BW+Tnkeewz/LbEwoBo8/Ngp2gjEUAQsDIihm2GnYiNQ0yJgYUD4AS7WHmZFmUAqde0/NCT1NbWUQ9LL7uBcH/5oQCBv3nJq0bYzteveh5O9RwMCH9LYGWW+QGDO5n3Usl2XKFM9eU+wQ4te9n4gQLb8YxDg0fJvBOr6kzK41ho70ov2CLNEeT4GAcv9QcDqYxe4br9/Qjon5UZFwMrNnEChheAyOOVn5FKHnv2o38i0YEAg7tGBQP685dTklbbU4bX+NHn5Ji1ceBggMNEDCJSs3UGtOvbga5SxVY/LDyogYKcZBExYtNYpDfAHAYBUQ9NzaGjaWFryxukIaMAgYOl6VhdkT/cHAWgL6aUIYBAwLIlbTAI8QKHRrntf6jsyrXKAgKMQADQyAYHCsnKq17g5deoziIrLtz46EOCSAZhYmoHA1FWv06tdenIpg3o/7Wvx98TCgJr2JGWv10bg0SJgYcCjxc/+to2AjcAjRMDCgNiGAX3jU3l3GH3qYSzm2VJOAgGW+j48EIDr/ICkTE5UYa4HCbTJjwA79EIhUMg797pkALu4Agig7eA6rULg9Qu/YeCRPmlWRPKG8YR5mwABKHcwKQIYBMAjAIoADxAwciy6BmRwPXth6Up2m1dhQBQIeNusCEDyxyBg8xvanVTsysLkDbv7aAuH3X7ZWhBmiVwfLoGAweQtrBBIchQCd6JiBAghHOTNCgHc174jUnm+vR1oZFQIbD3AZSVQCBgB1JlrlDV9PkMOExBAiQL8J1IKo/0RJAhAvTvKRfRy/husKnltWBKrTDwVAdkFbHAJo0uUEwAIqDBAVQQwCDD4SKx0FAECBLyp9QhAxwvI6aGO4BaTZ6/xmkNrQKyJmRt2BwICsmRgupdCIAIIRJcfQJ3RfVAcXzvmM3XVVv1n5ModAnTDesUcPRUCjocAylp0gAu/izIhQArdZ76mf8/CgEd4KLG/aiNQAyNgYUANvOn2km0EYiUCFgbEPgzImjqPExIBBNK8gcCZa2w4CGm+KBm4FfWwjqRr3rY3hYeApmQASSMe9Ket3ibk4BUCAu9GnQ+JgQQCSOowvrtkQLQWzGTwoSYSKghY4AEC0AUBiZA/CBBO/zC2gwmfbC0oYcDjAgFQBCCpx7VJGIDWgki0wkBgijFBW/8WHORnETwkRMlAxYEA4g45Pu5rJpI9bmGoVwgIg8IwEPAqGZBAAEkpzBbV+ye7CYwqmh7x/YqCgDEzFtJmg8EfSgNgUjgA5SM7DjNUkK0FJQwAzOI1MgxrJLrFpJxzGASkixaT1+9FzBvv04EAfJ9bC46bRKlF0zkOM9fv5tp6Lhk4dSlqHPyOKBmYwbDIEwhshKlgOiffa09GAgHZWrBk3U5KnyxMBb2AwOzNomOBPxCYw+U2DARcxppYy0l5UyilcJr2umQ8a+rRwoBYecKx87ARqB4RsDCgetwnO0sbgScyAhYGVA8YgIfqPdfvsYM8pOXeLeUkEBBSXyQv7odyOKLDMA6GdyNcLeUkDEBrQdTvc314ZQABtDDkkoFohYCAAVkRMKDCIGDafB9FgAMCtgoQgJioMCASBMwl7Gq744av4RHAioDEDJpTAUWAHEuFAfgegAAbxrFCoJKAALenm8jt7uR5JQxANwFc2+hAQEB0LEBZhRcQyJ42n6A2cAOBMAwoCcVSggCYRvopAqBM8QQBhyUIGE0LdhwKqQtUGBACAU5nCaMnRUgRkM6fDXzeZOzk0QQC8HMVBuBreCnMUIHA6coCAlNorWJQKGEAugkAGkFdwwqBcpNC4DYFBgIwsRyWRFNWbo4oP2AYYLsJRK0PuU4sDHgiH5fsRdkIPLYIWBjw2EJrB7YRsBHwi4CFAdUHBuBBMwQEEgAEzA7ySEx49xcP8mwqGBwIqDAA52QgMG0BJwVeJQOihSHaDqJkwKAQABAoKeP6b7VkwA0DBAgodDwCzKUBQRUBAB5orwg3dFX2LGHA/G1v0pTlmziJQtxMIGD1sffCIMCjawCXBsSFSwNkkoCjGwbgexFAID8oENjK7ePUseVYgBSQg0O2LjsWqDAA7+MWhsVzOInHGtGVDIRbGGb4KlIgl+fyg2VhhYAbBggQkM9GkWgt6VUagCQUBpRGRQCDAKc0AIaSisGfhAEoYwmtkRJ4BOi7VIQVASgfQWmANwjIBHhymVO6YQBiLIDALuo7chSleJkKnrpEoyaUCIXAmu0GmT9MBaVCYArJjgUqDMA5uYWhBAJeLQwjFAIfaBPbUJtLKI0UIGBhgPffDQsD/J487M9tBGwE1AhYGKBGw762EbAR+FwjYGGA90MdHq6r6h+SM3gGoExAnQMDgS372WncDwjw7i8Dgc0sb1bHwWtWCHDJQDqxg/yRcyzjR5kAlAHy/QACSPaw44haaZOHAIBAf+4yYAYC6FggFQJwdUcttwoDVh45R/FjfEDAldsUxCMA9e64LvYIcIEAXBtgAGrLR+YUVQAEmM0CdyseAWppgIwjjoABOCeSP/X7cGqXLeXQw91U0827vygZGJ5MU8uDAwE3DMC5AwGBq3dZkYIaek/PCngIyDXiAAEVBiw/eJaGZ+bTwORgigAvECA6S6Dl32jRWUIBAbguwICByaMpcdzEUGcJEwhAi0m03MT1eYMABxZNnRcFAnBOwICEnAlR/ggAAjPX76J+DASmGrsMYLc/DUAgLpmmewKBPcJ4Ml8oBNwwAHMBEMiY5KyRgEDAZFCINpdYy1xW4gABCwO8/y5YGPC5PsbYk9kIVPsIWBhQ7W+hvQAbgeobAQsDvB/q8GBdVf8AA9CqDF0C3HOAiR4M41AygIQH0nWWhAVvAAAgAElEQVT3e/D1xjNXafSUuaHaX8+SARgUji0iGNu5YQCPFRQIbDvISRoSLJNCIAQEhidx4oZECkkiXNjjx4xnRQC7uF+L3qFFXKBQQHLC5m0Gs0DR8k8pDbh6NypG4xeupjZde/G/0cVzODnWxRG764m5k7hue7ahNAAgAMk5mwVOFmaBurGW7j9NnfsOIV2PdgYCaCkHN3o/IIBkL84PCAjDOCgEcD54BqBMQJ1XGAgkE+rDtQoBAIEtKBnIEG0uDesN7RcBr4RCYCOtO3WJE+2eQxIojkFAJpU6df3qHPAaUAjAQpQGlJlb/h1+l2ERQADgk6oIkGPCQLBlu87UvkdfLnMxlQYABGDNo1zGCwRAOYFrwmcRa1WeRz2W7TpKXfsP05rqRSgECqdysq7+rnwNIMAKAS8gALiwIQwEUBYwKDWbUCYgx8FRKgSwHqd5AYFNwkMAa8QIBNDmcspsBwhsodUn3icAK90aVudQU19bGFB9n4nszG0EqiICFgZURdTtOW0EbAQ4AhYGVF2y7/egjKQMyR4MyJBMuN+vAoEETyBwTQABNozzUAjAQyAhnV7p0I2PqjJAnnujAgQ8Swa2HRQGhX5AoKSUk7+8ucv4WLv+y+yKDtk+2tzJ88qjGwQg+ZQ/U48CBEzg0oC5W/dHlAbI98HpH7Gt3eBlVgYgKZY/U48MAsZN4pig1lotM5DviwYBl7VjYbcf9ffPv1SHj9qxoBCoKBBY9ToBJMj5yCPGn83JXhq3bkSphBsG4L1SDo6Et3glgIB+LAEE0gMDgZyZi2jwqDFUt2FToQjwAAFQiQDwoIwEsEheg3qU5SNQepha/kG1giS1Vr1GXCZhBAFHz/F9FyDggFaaLzwCFEWAAQSsOnaehmfl00t1G3B3Ch2gqBAQKPJRCChAoEW7zlyG4IYBiJsAAjMZUPkCgZGirMQbCAiFQHZJKZfLWBig//thYYB9wLIRsBGoSAQsDKhItOx7bQRsBCo1AhYG6B/m1ASkql5LGNCkVTtuF7bzsqbuXyoEAngIhAzjVm4m7VhsKniAOvTsT81e7UDT1+7QJmQCCMzjxA191ndeuhn1PiRDSNawk+ynEEBi0X1QPAEEtO3eR7i4+4KAeRQIBHjU9WO3F+AD/dKRfOvuczQI0CXJjiIgLpnd3CHj140FEIDd1J5DRlLT1u2pedtONG3NNj1cUIFAwRRac1Jf0y0N4/rEpfDurxcQwC55y/ZdaMa6ndr5QfoNOXhgIJA7wahIgecCdtGxU96oeWtq37OfpyIgBAJmPDoIgGweqoC23XrTnC37tddaziBgglMaYAABl2/R5BXBQADKdboNHE6tO/ekVh27cYz1QOAWzVi/U3gIFKJkQL9WWCEQEAjgM9OqQzeGPrp1h3OghSFAizcQ2MuKlIQACoFOvQdxfFPG224CuphbGFCpjyl2MBuBJz4CFgY88bfYXqCNQOxGwMKA2IYBfeJTqFGzVvwgj3ZhfgqBRB9TQQkEsPurBwL32I28bsMmhP7vRgf5s2p9eAAgMHY8waRN9+CMXWDM66V6jThZ9lUETPUHAYAQ2MXW7bxj1xtyeCRHbbv3ZjM72VpQnV/lgoAP+Nr6JaRxicPA5Exq0KQF9Y5P4fpw3TwjSgYKis1AAMnepFnkBwQyJs9m4DI4bWzIVFC9XrwWQGC2aGEYRCHgsd4ABCB5r9OgMXes0JsF3qTAIGDMePZ+8FIEcPnIsCRq170PDUwaTbK1oHqdQUGAXCOZU+caSwNCnSUS0tl/Ap4IDZu+QvjcAroEAQImeBQGAmKNQAmkXgdeo5UjPjsv1q5PQ9NzQ6aC7veJFoYBgYBjPOluYSjHxBpJLphKDZq21JZEyPfV5KOFAbH7zGNnZiMQixGwMCAW74qdk41ADYmAhQGxDQNgIIg6b7V/uC8QgIO8oaYb9c6ojUciDMd7HRBAvXXXfkO5jh6Gcd5AoIIKAQMQQOLdbcBwSps4MzrhuXKbTQsxZ+w2m5z+OTHLmci7vdgR1iXYEgSgjhq74BmTZ7Gs2w0DQiAgMZ3bsOl33O+wYgOlHLg/pqRuzQkHBIxMYwUC7sGwjHHsi5A2YUalAoHecSk01VAygPva4bX+1K5HX0ocN9kTCHB9eJxYI74lA7kTjOtt+Ztn6LWhCex/4E4O4REAoz/c1zFeTv+H32V1CXsEbDuoTbBRGqD6SMC3ACaNbhgQBgHpNHerWRFQERDQLyGdwRPc/VF3D+AD+TxATzAgMM24dsJAwDEV1HQ5gM8CFC4dew1g4ASfBnes8bUAAjM43tNWv64ti4AaafYmoRBgDwGlhaE65uJ9p6jXsES+XvX79rX4e2JhQA15gLKXaSNQSRGwMKCSAmmHsRGwEah4BCwMiH0YgMQGNd1IXCHhhtQXO4LuB2/VQwAP8miD534Pvoa8XrqDF5dHAwF2nR+dR9hJRqLj6SB/9hon6DB9Q5cBT4PCxHRO6mDa5p6X6CaQyedUf4bkBOMiYYRZYGWCgA1vXeHd3LisfFJhQAgEJKTTHPYI8C4NwH3xAwEwRkQpAhJrtbUgEjfVME4HMKRCAO3p4HFgKhmQcnA2FWQgEOm5gPs6JG0sZUyZw/c1KW8yrTn+ftS9QPyx+4v732t4MkMjExCQLQwhk9cBKHQTQKtJd225AAHSI8Cj5d9DgACUscjWgioMECCgiL0fAoGAYigCzJ4UuGZ8PgCe8NkD5MHnLnX8dL5HKYXTCHBmxvpdWoABEIefcdvBQj8gMJ3VGiVrd0S1PURZBAAE/p9AxwIYTyLxVz9H8jXWW9rEGeL/EQ8gILtaJOZNIgAJ+fvyiPUBoIT1KL9nj+G/JRYGVPxZxP6GjUBNjoCFATX57ttrtxGo4ghYGBB+gIu1h1kkwmprQQAByMGR7HkBAZjcwY0eyR4SW911IakO7f66gACSRnQTWLDjMLeUQ6LBLQyPnteOhSQos9hRCKzYZAAC99itHWZtsoWhOi+1taD8vgoC/BUBov4bHRa0CbWh5R9aC6owAC7pSIBwzdgh9VUETJppdIZH0o7ODEj24P4uE2oVBuBaxe7vdJaWI9nTzR+/O3PDbpE4Fkw1ur5LOTivkdWRfgQMeTLzuHZfJHvpvJPs1cJQAKiHBwJqa0F5X0OKgGFJvk7/I8Y6Lf9gKOlqH4jx2OBvWRgWYS3i+24YAHUL2v7BIBPlIzrJPZJzqGUQO0jvvTwppAIA602OFYIBRdN5DohrSuFUUTLgAQRK1u2kviNGMTBBki3jpB5h6gfIgLm5gYBsLbhw9zFeI6KFYXFAILAtNH/1fPjszdooFAJJGiCAedpuAua/HRYGVPGDjT29jUA1i4CFAdXshtnp2gg8SRGwMMD8QKc+HFfFazcMwBweCxBAyQADAaE2kDAA3QSQ6MCNHzug/kBgLu/gw3RNrxAIAwE46qsKATcMQPIrFQGo2TYpArAbLZM8TxCwYjM7qkMRoSZcKgxA8paYN9kHBNxlGT6SMoAZ0w4sQACSJQECdodAAO6hGwbge1IOjlpzPyAA0JPiCwRmCGikAAEJA9BNgNUGkIMnAAhMpsoAArxGxk2MAFBuGAA5P5cGMAgoJZPT/8oj57jbAroGeLX8k2vE3WJShQEAAVC3MAjYagYBU1Y6awQtJg2KAKw3HQjAPXTDAHwvOBDYQX1HpPoDgSIAAbFG9jglAxIGoJsAFEOARqwQKPABAihRgdJotQEIXAYQcFoYuoCAhQHefzcsDHiSnpLstdgIPP4IWBjw+GNsz2AjYCNgiICFAd4PdXigr6p/OhiAuVQUCCDB9VMIoIZ+arkwKFRhAM73UEBguRcQOMB93VUgoMIAAQKki7sPCMiFRwCk2lAE6OX82O2F3B2ydxUE4NokDJi4dD0rKZDQmhUBd1mRIUCAWRGAXVxItXUgAOcUMCCX4BeAr+W/MBBI5U4Oj64QEH4ESPYwlgoDcM4wEBAqEm8gMJthCtYI2ijKOcsjxp+9+Q0BjVCi4ihSVBjAIGDpem4hmT29MkHAvCiDPwkD4CMAWIT7Cqgld/HlvHEUioAwLPICTwwCRqYRyiPcYzEMyJ3IO/jq+IGBwNqAQIAVAikERQGAgAoDcF4GAk75AWT8JmDFJQMOEJi+xg8IjOLPhywZsDAg/LlV77V8bWGA4YHDfttGwEZAGwELA7Rhsd+0EbAR+DwiYGGA90OdfLiriqMJBmAueBhPnzTTv2QAPeZHpvGOt0zQ3Nci/AjCyV7+/BVcJgBlgHwvA4EtFVMIoHWfWSEggIAsGZAwAHXWU5YHAAHHL7BSIQwCIuvjMW8kuwIEJHFJhK6uHzAAu/HdB8ZxnCCf10MFCQJSOO6mBItBQEEx78zO2hCpCJCx3PTOdTYPhOxbfk8eVSDACgFti8VwyQCSPVNfeFEyAIVACu/+FixYyWaUUAbI8zEQgBw8IS2AQgBdBlIYGpmBwL4IICBhAHbmAVzgLZE9fYFREQC1CCCRvyJArBHUycvSAHlNOAIG4J6ijaOfIgCdNbBDDmPNhwUBOCfuw9CMXK1Z4mMDAmt3EIwhB6VmE5QBMgYAHDM3CD8CrJH1hhaGYSAAg0J/IACwiDVqYYD33w0LAz6Ppxd7DhuBJycCFgY8OffSXomNQLWLgIUB3g918uG6Ko6AAUhUM4vnhh7y1XnggTwtCBBwPAS8FQLCMA4KAZj1we1ehQE4L4AAzNIAF3xLBqYKDwEjELh2j+ZtdYBAThEt2nOCE2S4omMOuGZACvV65WtADZz/UUEAxsudvYRbN3btP5RrpGVdvzwXjtj1hkcDEmH0a38UEICxJi/bSI1btmGpv3oe+ToMBJySAQMQYPO5ETAVhIdAtMkbxgsDAXlfc0mFAeL67ggHeQYCU3xKBkTbQW+FgAAC2EUv3XGERmQXUou2nXhd+YEAwCG0hQxSGgAfCR0IwDXlzVtO9Ro1YyAQWBHgVxowMo07S7gVATgfduOLFq+lJq3a8trE99z/VCCARF3XahFJPCBQnyAlA46HAN6LNqAqDMC50XUE5+GyksKpRiCwlk0sUTJg7liA6wuXDEymBdsPWc8AzT2W99zCgGr3KGQnbCNQpRGwMKBKw29PbiNQsyNgYUD0Q7t8oKvqI2AAJOkAAqhB182HgcBERyGw2txlAKaCkK0HAQLNXu1AXfoNpQU7DkWdUwCBN8JAwKOFIRJ6gAUGApdvaceSQAA7qq92eY3qvdzscwMBSM6QSDVo0pKghjCCgNUVAwGo2TaNNX3Ndt6tfqFWPeoTl0LYmdXdVwCB1CLFVNAPCBR6AAE4yE+YQU1atePkuGzX0ahzRioEAARM6+2yMLF0ykr8FAKDU8dQ1/7DGLgEAQEDJAi4di9qjkiUJ4dUI2YQsOroeeoTn0oNm71C48tWRcn5EW8kymGzQB9FgDSU3LxPOxYSZbRz7DZwONVt2IQ/ryaFAQOBAmEqiHWiBQKXBBAI5CEwfhq93OJV6jYwjpbuP62J2W2lY4EXELgY0dVC+hGoa1MFAmjzODApM6pLhPr+mvzawoCa/Vxlr95GoKIRsDCgohGz77cRsBGotAhYGBDrMCCFatVrRCmFxZ47ttixBjhA/3A8tLsfxJHESyDg3WXgCu/Q123YlHJnLeZ6at1YqNFnw7jcibTKCASucmIvgMBm41gAAkjKf/GrF3lX2FMRMG4iy75Rs41ddvfcuDSgHB4BSY5HwOWo9+B3kJQhDkiQewweQaU7j0S9jxUBDAKSKc1LEXDqIqVwaUAam7fpQMCea3fFjm98CrdX7NJ3CNVp0JiTKaPSIAQEUoWpoBYIOMneiFHsXA+I4I4JvgZ0QAIH2JI7Z4k+dlfuhHd/8z2AwGkHCMQJnwlTWQXWG+7Di7Xr8z0zmQWiNACKAAECDmgT7oqAAKhGoLxAicCivSei4hEBAqZ4gIDj74nOEgaPAMQVwE6oRpK5KwXKIBo0acHwxQQEVh9/n9UcABaBgYBBJYPShAHJo6le4+aUN3+FvuNCRAvDqYQWlLo1woqUCSV8zwCtvIAAYtuoeWvbWtCgDrAwoNIeUexANgI1IgIWBtSI22wv0kYgNiNgYUBswwAkDB17DeRkGW3KzAqBy5ywCiCwjZMU9wM/A4FNe7mNWVLeFEJS4n4Pvs6bu4yavdqROvTsz/XhJrgggEAau6t7AQHUYveKSyK4tSOpc58T84LkvVmbjlGmevK9KA0QLu7pDDVMCWhx+VaGImiJp/MIwHiIIZz+IZ9GqQFKIhbtCfsj4D0CBGzj0gD0ZfdK2HFfMJZJERAGAak0qmg6Ld53koZl5FKbrr34vuJ7xvFPSoVAKpWs20G7TUCA29MJN3oTEMB9fblFa+rUZxAh2dPBFIAMKQdHW0STQgAJpfSsmLpqq8Fn4Q6vn5btujB4kfdSPTIIyBGlAXO3mkEA1CWyfGSjQc6PNZiQi7aQ6Qy0hqSNpSVvRO6WMwgo38JjwVDSCzwBFuG+AmpgjarzxmsGAau3scIDsUBrP5gVQuECNQ/WjamMQQCBYn5fICBQNN0414LSlVS3UVNW8+BzpG3ByEBAtDBEWYnJQ4CBQFGJb8kAujigpAefSXdc7NefkYUBsfm8Y2dlIxCrEbAwIFbvjJ2XjUANiICFAbENA/rGp3KdOhIG7J7jQd4bCAjDOCR7SFbcD+ZIamCSh5IBJMSrT0QDAbjOo24bu7pIwFAf/ihAADukaOnHCgEDEIDDPhIvtOtzz1mCABjBITEzgQDMk53+PUAAG/w5IABJbwGbJeZHwIDAIODkRd6NN3UNwHWoIACyf9T2y9aCSDanO/Xh3kBA9JgHGIKDvB8QAJzQAQHcVy47iUvhJNQLCMzcsIfXCICAl0FhGAi8rr0vKGMZkpbDcXLf1/Kj50TLv0Q4/fuBgCRWmXiBgEQHBABSjS9bTUPTcyJgQEgR4HSW8AQB+Q4I2OQDAuKFoSTAE3cTGDeJVQ5QC/gDgfd4Z91PIYD7hLGwRnRzRjcBlGJgHeJ9AALa8gMAAYZGQkVigmUhIBCXzOtTpxDAZ3LY6Dwj5HHf65r2tYUBNeDhyV6ijUAlRsDCgEoMph3KRsBGoGIRsDAg9mEAHNORoDIQiPcBAqelQiCFd38DAQGnDZx8YJetBbGTnz55FieQ2P01AgFuKeetEIgEAluiFAKim0AmS/vlPHBcc/z9QIqAEAiYNMu86+m0/AN0AAjALrhsLSiVAVEgwKOmH0k3EjAkX8bSgHU7uH5dggBck4QBqONHTMNAoMRDIQAgMI3H8gcC+pIB2VoQtfKjJswIAAR2h4DAGoNnBRQNXkBAdhNAIqve1/Kj5xUQYG75JxQBAUDAOKEIAAgA8JKtBaUyAHEuroAiAPeVW0walBho18jgadLMkAJFwgDca7RRVIHABoOaIawQSOU1qU3iL93izzKggQ4IcGvBlCz2LcCa/DyAAHcTyJuihTzqfa6pry0MqNhziH23jUBNj4CFATV9BdjrtxGowghYGFA9YAAeqlUgkMIO8gaTNwYCUAgId/AgQAA19PLBXcIAdBPAQz926zEWTNIqDwhElgzI1oKQbst5RICATQEUAR4gAMlssqIIkMm7CgMQX7RXkzu6XuZ+kSBAp8BwPALgCu8oAuR1qTAA3wsBAWf311wyEBAIoMf8iFHCj0CBGRIGoJvAulOOYVy8gEa4djk/eUSMAKCQGHPJQGAgEB4rDANKQuOHQUAGeTn9SxAwGp0lDMk0SgNE+UgaSRCA+aswAPGVsMi/NGAKXy+3mDSBgDXbqDcUARPDIADnVGEAvg4BgbgULn8xlwwIhQAMAwGpggGBq6F4Mgzg1oJvsxfG4wMC90PnZBiQP8UaCFrPgCp8erGnthF4ciJgYcCTcy/tldgIVLsIWBhQfWAAEgwGAut38w4xHvrNEm4oBMK7v35AAImyBAIqDMA5gwOBfSz1R3Jm8hAQCoHZ3NcdO9TSQ8ANAwKDgFVOacBDgABcm4QBaJVWeSDgHtf3Ayq4QQDO6YYB+B4DgZAcvBIUAhogoMIAnJPl4BNKGH54lww4QKCg2FiiAoABk0VAI+yaS7jghgFhEIDSAA9FwHJ4BCTRw4AAXJuEATCGfCwgQFEE4Hz454YB+B6AACCaAEwzPTwEggMBlA6NKiohaVAYhgFneB7csSCwQiCVd/eNJQOOZwXuKzwrpB+BhQHefzesMqDaPQrZCdsIVGkELAyo0vDbk9sI1OwIWBjg/VAnH/Sr4ogEHg/+KBNQzy+BAH6WUjjNAwggQQsABDYKD4HkAgEE3DAA544CAgY/gjmbJRCYTKuPvRcxb3kNsmQAfgQSCKgwAMmM3O1lqfbVO1HjIAYoXUCSAuWC0RBNMQuUpQFyHjgCBsDML7mgmI3gIN03KQLwfX9FAEDATk7+UsfDIyBavQEYMDQ9l1u5qXNxAwGva5IlA6gBN5nbYR6yPR3m7oYBOPfDAAHdNWGsUMlAfEoICKgwIDAIWLGZ/SW8QMDq44oiYLMoDVBjCRgwKCWL7xfWiJciAOBJGko+jCJAnhcwAAaCWCPyeziyQmC1AAJQE1SOQgAeAgIIuGEAzimBABQM/h4CqZQyfhp/xtV5y9fwuQDUQhxR0gIgYGGA998NCwNq9nOVvXobgYpGwMKAikbMvt9GwEag0iJgYYD3Q518IK6KI2AA6oQzXTAAcxFAYFcAhYADBOKkh4Amsb52l+XJLAcvKCYksXFZBYQyAfW6VSDg18IQdfmJ44IDgU3vXKeByVk0IDkzEgRc0cy3AiAAioe+cPp3PALU68HrooVrqH2PvtS2W2+WckM+734PvhYgYBrL74VHgK40QIIAuPrrQQDGWn7wDHUbMFwrsQ4DAbH76wUEkMBhfcxYtysQEAAYGp6ZRygTUK9RGsb1cdaI3NVX3yNLBnBfAU48gQArBAQQwI7ziDHjuStDQs5ENqb0VAQwCEgWioC3w1J4dS6rURqQB48A4fS/W+P0P2HRWmrVoRu179lPtJg0tOYDCJDlI7M37g0pGtTz4drZI4A7BMw0gie0MewxKF5rqhdWCCRTumeXgfd4PrivppIBjCVMBcUawf8Pg7hMQCgD5NwlEMB99QcCoyoEBNY4kA1QSp7PHsN/SywMqLRHFDuQjUCNiICFATXiNtuLtBGIzQhYGBB+gIu1h1nAAOzGYfd/tyEpRiIICbJ3ycAl3oXG+7CzJ+vl1euFQz2SD9Sat+7cg3vSu2EA3o/kLm2SIweHh4BBITB7k1AIwDEfzuPqueRrOKNjxxYKgfFlqzhhrNOwiWjntnGvfp5X74b6usO4ztgzHR4BBcVc/426d9M1Y3e1dv3GFJeZz3X0cm7qESAASQ9ig5140zXjZ3wvxpvVGogfdqFfqFWPEnInapN4uN6HHeRLjMknEjKsDXSZQLLnpxBo1ak7t9xzwwBcK4AAdn8x/5K1O4xJ8cz1u6nfSBgUFms7FmAsWTKAJDR//EDEDh0+z4n2nCeh7IA9d8vAgKQgCHJ02aBNhCAMVc0HaBhYyZwJwMnCHj2JyfTkN0DkviBAN01oHr30QRFAK539dmftPXMFRo1YSobzZlxUz/XQEAle6rHfOJOPNQG0fpwPyCgPQSyi2jRilWsPNDKAH1t7liA+vAQQABdExga+QCBaInK+iaOFUozxk6fo7oG+JUGfPYdFUS6BqADhcvpXykC3CAA97V6x0F6Y9ibNGFWCsMilyIgDgTcdngEoDQgAAQAKkCF8OakGVaHfagpUDKwOAAIQDWyul1AoIg/q14YgBigQwHAHmAg2llaSwYMIAAvEjXfnid8rhUQOKo8BDbuYGiVWVFHUiZg//9DYEAHLlLkVBKBPhABgQF94CHLLUoEkjUCAgPsizmdKHXnETAAyTKc/bHDahuLCQR2XHjXrRBgM7BMlq/7AQEoBIaOnkBoe4fEwHtNBgLsIK/6qrsUAkjkYcKGZBzu7G22koFHv1Dt/tMsu85evZ5blb3SfyC7uANOeK+NrxEH1dc9kyCPxm60930AAWjBiKQV9+MCATgXkrPXXh/MUnIkxQnnioKAomAQsLmZE2rswHtLA3BegADEFK73//H3V7j1nFmHra8dpxDIUbu/+F39c31kIIBkL0ghwECgggaPGsst9dDuTp9DHxUQiCgENu10qg0w3zQQQIztJQP/pLV7FVzILK/lOdzvjSG+ZoFc11+/jd/rCwLO3aB5cPpf5W75h84XqXiubwzm0o9AEIDOEo75hs9KFATsifcI0LEDCACMe3tpDr302utc5mF7/ojV7qsGELiqyjX0efQRQKByx8EQCgF0LCinQSPGcDkQdvn1OfSRgcBOAIEMVpHAU0D/TB/xeYm2MMwqDAUEEP/U/DKBAfD8sPwTGJCsKx4Zl0QgOSMgMCA5n4uMSiLQJyIgMMC+mLMt8Lr6exoG9Bs4lBfyLiCAtnZwxIcbfyAQwO7vpmanQuDA7Xs0Y1E6DRgykt3EreUHj9DCECUD4YDA/CgQSCxlwO4vTN2mL1xGf/vPV2n6wqUqMbM4nKPlXwwEnAgGAcWrCcmrzeAPsUTpxNDR42nS2wuo6fzNhEU9EvAdF29zXAMVAYEg4DntvHyHFuQUsT/DmCmz6PWhI6ls6544YzY9x9oDBHj3NwAIIFFEjAcOf5Pr6ZH0BqGuAAAd5ElEQVQA6mvpowkEoHBwlR9EgYDHoFCfB0dAGgCBqSlp9OqAQXztw+99kXBNvFcZ/GkQcNqqCMA8bIyCALehJEBA0bomGjxyLE2eu8gKtHCupvMKFnGLSR8QgDkC2IKyA69HAMauQQBUI3PSc2nynEWcnJdv22cFQiYQWJS7MurfYMYOrxkIbNdAYK/bQ+D6xzRtwVIaOGIMe3Fg3njPFQUCaSFbGIYAAojt0DETrOoW7/X74tcCA/rE8kluUiLQYREQGNBhoZQTSQQkAu2NgMCA5IYB2G0cMW4y7/yhPtwFBNqvEGjmmm7bQh2t614fOoqTjCbs/trKDwAE9oUBAp+zOzsDgfNQCNiBAHbTBwweQbnVGx3Je/tBACCJLTk6A4O/SJI3bcESVl7sans/LomKgQCUBlT4lgbAtR1mgW5FQAwEpBWU07az1/maw0aP5x1b1If7KQRSYSoYoBCoaj7sqxBAHDBOgCXAnh0wjLMCAZQfKIXAyk3NgUCAOxb4KARQhgG5PloI2uaaAgGNEUWAHwi4GagIgEoFsAgGfwAfqM/fc+3juOty8n7+FqVkFnJnCT9FgJ4jKB+B/4Z3/CYIACzacqqNFQtIkrETX94YDATYQyBIIZCaQWUNbiCAn0GRgHIAzGPbnFclA7E54qcQUGDRXyGAOQulSUZ5bUJcvHHqi18LDGjvSkTeLxHo2xEQGNC3n7/cvUSgWyMgMCC5YQC6CWRX1kdqf5XU1wUEkPyi1h8JGpI9axIPU0FuF6b61WO33btYVw7zWTTx7YWx+nBLEh/zEAhSCESAQGY+NTqAABI5KA3yajYnjAf32z5FAFr++YAAo+Vf6ZbdnOybMCAKArI1CPjUmmBh5zwMCIDrO5J5gAC4wbc8+IEl1jDk4/rw1EzqaCBggwt47oAfk+cuVioSXyBwlOECwFCwQqCAyzJscOHoB1/xHPKa6mHOdRYIAKQqb9zPwMWEAfg8RD0CAkoDwoAAGHcqH4kKVqCc+uw7hh6QzwOCBAOBDyMeAitptwsIPPiBKqEQiAAB1O57P6/waBg3Yx5NS1nCyhM3EPgHcQtDrRCwnEuXDGgvkp2XYi0MzesCoqCjgcAA+/8fAgO6dVkjF5cI9LgICAzocY9MBiwR6D0REBhgX8yZC9/uem12E4DsWpmBZRAS2BaHhwD63jMQyC4KDwTuxQMBwADsdFZuP8Tu6Ki95/pwBxCIGsZt2OHsMoC+7ctWVnFi2Hj+nQSFAO4PiVV+7da4ZEeDAHaED+ERwHL+d+xO/6wIMEAA6rqRIGMnVMMAgAC0WYMDf5AiAIkyxlXdfMQqCccOrRcE4Htma0Hs0FbCMI6BwF46+8UPcfePuYff2Xsd9eFltNBPIQAH+Z2HWaWAOeIFAniuaF2I5BJdBVJ4jtzxUQiEBwKsEIi0MDQ/L3h+kOKb7fbwc+0RgF18lIlcdLX8O3dTmUCudHsExBQBykcCcn7dWlDDABMEwPPg4O17CXHGuOARAKNNPFfEy6UIYBCg5whKUSL3BAVETvUGjn1F035W9PgrBD6kxXmlBNPOQCCQlkll2/aSFwigZAfxx9+H1ILSYCAQUZGUbt3tLD+AhwADgewi2nk5EQhwN4HyWikTsPgFYC4IDOg9ayS5E4lAV0RAYEBXRFmuIRGQCFgjIDCgZ8AALDDjgMDWICAAN/oAIAA5eGomoT4cbeBwDfzTMKD58vtsvre0SDnIO4EAkq+9JziBKmkHEDCVCwoGFMXBACSSaGPINdt7Q3gEwOnfAQJa7j9j80RO8nYfiybvJgzQIGABJ3mV3KrNJrnGTnlYEIDknRUB1z+JqgtMGIB4R4EAkr2GDgACzXYgwDCgsJxgIBg1jEOyd+kOmxvq56+PbFC4KxgIQIWBBNsGBDQM4PKPyPxSIKCRlSAAAS6DPxhPzs8oYCNKV2cJBQIwR7LYx0LX9ZswIA4EaENJSxKnQIDyCFiz2wECnv4R7yMRAQGIGe5LwwB8DQUIFApKIbA/Oud0fHFUHgIKCLCHgJ9CQJsKAgg8/DH6eQUMgPIEbSkBP0wgYF5Lv0ZXgbAKAQ0WvUBAWgv6/78hMMC63JBvSgQkAo4ICAxwBEa+LRGQCHR+BAQG+C/q9AK6O46mMkBfXwGBw5xgYGfPqRC4CYVASCCQBiAQ8xAwYQCuCzf+pUWVND+z0K0QMIHAxh10+p697aCpEIBkWwMBLwzQIAC1+AAN+LmOgT4ieYeLO3cNKPYDAfAI2Mk75kj+zURKwwCY+6HPulYEwHDPCgIe/MBmjmEVAdjNR5JmnssLA3A/CghEDOO2AQj8w3K/z2lPGIXAwx/jkj2tENAwAK0FMR4FBNQcUT3mbaaC8BAICwQqE4CAFwZESwMiioAXBgERWISEWIMAxFPDgF1XPoh1loCh5C27IgAwTJcGYE5YFQEMAjBH0FmigktR9FzE0QsD8D0AgQoAgTR4CIQAAiugEPgo4dnjXHiOXDLACoGYH4GGAftufsZwgYFAPhQCxYT7N8eoX0eBQCpMBfc4FQIoa9EqEqhc9DwWGOD//4bAgM5fu8gVJAK9KQICA3rT05R7kQj0sAgIDPBf1OnFc3ccbTAA42AgsFMDgT32mu7v/82md9iV9lMItMJDQBvGbW4meAh4YQCueeAWOhbEkj2dxJtxUfJstJTL4nZsTiBwO1IykFXANdw4lwkDGARs3MHJOxzpXSAAUm2AAL/SAN7F16UBlpZ/SPyQ7JRs2sFxSiuq4CRZJz3m/SEZg4kjQAAk2ee/jO3O6vfh95rb3mfzOoAAJO/ecykYUEor1myMS9QYCEQc5OFGHwgEcoupuQ0JmiWJ55KBQzxWQCOM3YQBGC/GtS+S7GGOvLhC4DNaUlzJQEUbFJow4FTE4A/eEH6KgKZz7/Bz9XX6v/skohrJYlhkggDcG2DAotwSVnCkZBVwuQvmsH5O5hEgAOoYrRrBZ8L8OV5fefoHl48gTtgt33/zs4T3AAYsL11LaJNp/j7moAYCKB1otbSwjCkEVtGiFStpz7VgIKDLD0wYoMb6J/8+vAuCgMDqnapEJQwQwL1rICAwwP//DYEBPWwhJMOVCHRzBAQGdPMDkMtLBPpyBAQG+C/qzEV9V78GDEA3gcJ1jXHJBcYRBQIwA2twAwHscGuFAHZ/7Un8T8pBHiUDm3dR8frtnGCjTMC8ZxMIIBG3nwv12iGBQInyEIBCAIkvWiMisUdfdySMoUHATbtHQBAIwL0BBkyZn0rjZszlJA+75d7kHe/7KyAAdf62c8FUb/aSLGuPduzYKvO5TAIQwNfmM8BrnJMVAvmoNddAILGlHGrLOdljw7g9XCaBuQBlgD5n+4DAEU6YV23eZQdQz55zkqyhEYAAZPxI6uGar0wgs6h2n7s0IAoCVlapFpNWOb8/CMC9YQ7imU6YnRIKBECBgrngAgG4Fz8QgGvi8zE3fQVlWhz2MReRvKNkAM+3w4DAtn1cWoKkH8oA/VwBF6AQWBxCIaBMLDNYIeD1I8D5YCqoFQIaCBz/+BvKrKizzmE9hr58FBjQl1dVcu8SgfZHQGBA+2MmvyERkAh0UAQEBiQ3DJi9JJvyaraw/Ne7uA4PBGIlAy4g0PoVgICSgyOBWphTQl4YgOujPly3HoNE3w0EIh4CXDIQ8yMw7wGO5EgUsbu/8dhFmrs8j9vQYYcWyZyvIiAroggIAgGp/i3/kKC+9sYQ7hGPXXJb8q5AwO5ARQBMCJH0qtIAOwhAcg85+muvD+ZkynY9BgLbD/D1wgGBElYjWM8FIACDwrRMmjBrPo/PhAF4Hvi9aLKXE6AQaD7Cig0oJJDgms9Tnwu75gwEsoto3eFzDFnQXpDNAv1AwPkQioB7GgRgjpyIKw3QY8GcRIxf7vcGmw+i7ab+mXkMqwgIAwJQrlO4bhu3bkxfWW29HgOBbQACmf5A4MoHtGgFFAKrfEsGGBqlZdLE2Sk0PzM/DgbgPhUQ+Eh1LMgpoV0GBDLjABCn5wi6WoQBAnUHzlBmucAAM47ma4EBHbRAkdNIBPpIBAQG9JEHLbcpEUjGCAgMSHYYkEXjZ8zjZA+Le3PBiddImFejZCBQIfAZG9lhZy8ICIwcP4XemjqbGs5cS7gertl+ILCTXdq9Y8fXAAJLS6ro7aXZNHjkWzR41FsMAs5bpNSQw3NpAIOACn+Dv00Rp39LaQCuiwQYsviJby+gIaPGUv3BFicIgMweO8eqNCCxtRvOFRYEwA1+asoSeu31IZzEwUHemsRDIdDBQGD42Encgq7x3I2E59pZQGDm4gx6c9IMGjZmgr8ioN0gwGHw9+QP9giYMCuFho6ZQOuPnE+4Vzx/lC9oE0hWBHxlLw2IggB4BBg77+ZcRpKPOTJpziIaOHw0TZqzkOGK+R792gsEbPMcJQmo9VdAwL9kAHMEsR0/cx7PZ30dffwrQABKI2sJjKEQQIvK6QuWijLAolxB7AUGJONqR8YkEUjeCAgMSN5nIyOTCPT6CAgMSG4YgCT5lX4DeUcXtegdBwTucB20Thr0EQqB9FXV9OqAQdw2DEmT/pl5hGu/VggElwxksoEf3NrNc+jXAALwNvh/f3uJd5RtCZIGAaj/xnXdBn8Rp/8ARQBAAMDIsLcmsioAtdB6PPoYpwjYedi+Y2qCgPxSrtV2Jffl2/YytEHZB8oEXuk/kOvDQwGBRp+SATjIc8mAv0IgfdUaerX/wGjrO32f+qiBQCp8JnKKCaaKVj+Chz9yO0UAElYIPLArBPCMUP/+95f78fhcZoEoE2ETyBK0/Ps84TlgfPCfiHaWYEWAPXnffuEWmxgOHT2B5izLZam8vj99DAcC/uTWnDALdHkE4HwaBADG5ddu4TnVb+BQvl8oTfQ1zaMXCNgUMBoILM4L9hCATwFUECvWbo4zx9TXZCBw9aNIC8OSuDIR/R4cowoBbnPpDwRgtNl/0DDKqqi33qN53r74WmBAr186yQ1KBDo0AgIDOjSccjKJgESgPREQGJDcMGDO0hx6e0k2L+QX5KpkzxcIpKraXyQctkU4ewiwqWAx7yQi6fC+D0qDMVNm8T/Uh7uBQKxkwA0EfiK0aUOyVLLJrRDYfeUDemvabFqxdlPCeBgEXLxNoUFAmn9pAJJcJLtI8patrOYjdvbNOHhBAMz9zJ/jNZLnmCIgCATs4xjADf7I+18SzN2gEECyjH++QKBJlQxUBAAB1IejTAHQyAYkIAUfMW4yjZ0+hyAHxz3a7gklA2GBAJ6rHxBoar3Fu9aoL/deC3J+DQKWBbT8g48EroXyEZfTP8pW0N4QJoZo8YdEGnXz5nU1CIh6BFgVAX/Sjkt3uGsAPBbMWnzzXCYIQKs+gIyM0rU0ee5iLrMBoPEHAnu5ZKCy6aC1JCYsEEB5DxQJKAPx61iwB0AgMke8pSL6vqJAgNtc2oEA/v40tFzj6yHO+nflGPu/RGBAe1Yh8l6JgERAYIDMAYmARKDbIiAwILaAS7bFrO4mgJ1kmNHphbxTIfDwn6r2NzWDTcX8gADq2hdk24EAXOeRoOI9OtlzAgFuYVgRcZB3eQjEgABc21Gr7Y01dkexOwx/BPNnMRBQGEIRoJz+q+H0b/Rh1+dDgox+6QoElLOUG4kUlAYmDECSDNk37h1gJBAEBCkCYByXpurEkWzhH2BAbvVGJQfPBRBAfbhPyUAT2tPhHPvp3JcOU8FrsWQP9+MFAniuSJYBQZAMlzW4WxhivvEcySmOOsjrOOojasurmlWJyqotqmOB/pk+Qg0C/wCvw74JApb6tPzD7+uWf34gADCKQUARWkzejbYWNGGABgGII547VDB6nPqIRJdVI4BFBQABnya8B+9lENCwh58JQAAMAXVrwazKep5n8N3Ac4YppT6/ecQ5UDYCDwFAIj+FAD6PNriB86GbAO4J/ht6jtjUNbg3zLHFeQoa4bU5Hv0a87My4jOB8dk+S8c/fkRQmtggjz5PVx/xud3eeos/Uxcf/WK9t64ak8CAblvSyIUlAj0yAgIDeuRjk0FLBHpHBAQGJDcMgDs5TO6wiFU7tmW8mMdC3qoQABDYeZjVBGhnZtv9xblYIVBYzkm/Fy6s3XuCEwt4BiBpmr00m3eSXXABJQNLiyp5VxqJlE1tgN1cJKMoe0Di6IULSISQPBXUbYsu4jl5v3SHFuYUsyM8dmi9CS7uRSfvMFusbj5qTV7we6oOeyUn/4glvocEE8mo7seORAimfRgnkjwXCMDOKpIz7KCjDZxtXKpV4AEuCahsOhBtFQiDQOw4A3xEx8XnKmPoYz1XpMsAngUSR9e4MBYk8RibFy7guSIxbzhzlaER7hE7ya6OBXuvqx7zSERtcAGxBxAAfMG5GC541AZ4zkgaV6yJKT4wP7CLDxiFBPbArXvRZ45z6n8qed/F53aDAJW8QxHBUCFiFogEOa0A8VRS/Zb7z9gtH+NUIOCf0evo6+HzhM8C7hdxcioCHvzAc2T20iyeI7ozAGAAOgnkrtnIxpooPVmUu5KWOFoR4roKCOzjOcJQwdHWEPMTz5TvyVN+gDaNGPO2szfYmBD3CBWIHS4oIABIAShgwhIdBxzxOcA5UM4Co0IvXEBrQUCP3OoNCXE0z9OVrzGPAPrenDidP1/w+MDzt0Gfzh6XwIDesT6Su5AIdFUEBAZ0VaTlOhIBiUBCBPoaDIBh3PSFyyi7an3S/8Ou2/C3JrHJnR4vpOX9Bw8nGHhhMa6/bx6RyAwdPZ7r4bHoN39mvp62YCmfa8r8tLhzzViUTqMnz2SZ9JLi1TRszEQ2Y0PyYP6++RoxxbimzFvMu4Xmz/TrpSWrCSZ2GNuivFVx51peVkMjx0/lVnD6/Vmr17HkesDg4eqZrV4X9zv6fUh+h7w5js+NXW/9/bjj6nWE+xwweAThvrMj59L3Cv8AvB/JvY4dEva4c+g5s3odS/xxLjwPjNP2PowFngQwssMY9XuQHL85aTqNnzmfv4fnOHneYo7f9IWxsen36yPOERub/T4xlqkpuM/hPDZ9nziHutcZShlRUB59rn73OS0y39B+0e8+8VyHjB4fd5+4JsowUHICgzt9H5jXMNlD/GYsXh79vv65PkItoZ8rEn39ffOYGYkdn2tRevQ9uFckhYAEeD/mLmI3/K2JDA3Mc+jXeA6YI/z58vkbsTi/jE37ho4Zr1QPkXmB+f3WtLfZpBHnxH2iZADnw3j0dbxHfBb0c8U5vD/X58IzwLm8f78Qw5ETpjIQgHpIf15dMcN98t+RQfg7stR6PVwTfzswf/EMWN2h53/Vei7FQLkJ2jfaxtsd34OPwcuvvU7/47/9d/rf//N/0d9ffo3emjKL5zsUSfCTsIGvzgADAgMSlhryDYmARMAnAgIDfIIjP5IISAQ6NwJ9CQYc++hrbv+FBTMc7OWfxEDmgMwBmQO9Yw5AGfT3V/oxDAAQ+D//+//QK/3eoMlzFlFh/TYud7EpagQGdO4aQ84uEZAIBEdAYEBwjOQdEgGJQCdFoC/BANQpQ9Lb8uAH+ScxkDkgc0DmQC+aA2iZOenthfTSqwNYkQEFRN2B01wSZSuZ6AwIoM8pyoBOWrDIaSUCvTQCAgN66YOV25II9IQI9CUYoBdqcozVZUssJBYyB2QO9IY5cOruY1p36CwbKh6684Auf/trt/kZCAzoCasfGaNEIHkiIDAgeZ6FjEQi0OciIDBAEoHekAjIPcg8ljnQt+fA1ad/Ws1Lu2NeCAzoc0spuWGJwAtFQGDAC4VPflkiIBF4kQgIDOjbC+juWCjLNWXOyRyQOdCb54DAgBdZlcjvSgT6XgQEBvS9Zy53LBFImggIDJBFeW9elMu9yfyWOSBzoKvngMCApFniyEAkAj0iAgIDesRjkkFKBHpnBAQGyEK5qxfKcj2ZczIHZA705jkgMKB3rpfkriQCnRUBgQGdFVk5r0RAIhAYAYEBsijvzYtyuTeZ3zIHOnAOPP6JTt++RfVNzVS25wrtu/9nt5n0JfNzFRgQuPSQN0gEJAJGBAQGGMGQlxIBiUDXRkBgQAculL+XcyXzAl3GJvNT5sALzoHHP9GpS4dp+fyZNHrJFtr6ucAA25wSGNC16xi5mkSgp0dAYEBPf4IyfolAD46AwIAXXBwLAJCdQZkDMgf6zBx4TueunqDMtBSaWnqGjj2Rv58CA3rwAkiGLhFIkggIDEiSByHDkAj0xQgIDEjyxeyz3+nyd79R25M/qI2Pf9LVPpN4JPmzkecgEKCvzYFnP9OBQ5to7pyFlNp8jy7i79Lj3+nKsyT9rD79g648/p3anqrxof0g/pZ29t9QUQb0xdWU3LNE4K9HQGDAX4+d/KZEQCLwghEQGJCki9hn/6LTt9+jhhMXafPpy7Rp7x4q3HKSGm4+oba+loDI/UrSLXMgOebAt1/SpvWFNHnaMsrYeYnqGhspt3IDley7RUceJlPJwO904cHXtPv4UVq5fidVn71Ppz76gNZv20Grmtvo4NfPOzWeAgNecGEivy4R6GMREBjQxx643K5EIJkiIDAgCWHAs5/o2KXTtLJmCxVtP0fbzp+jktw5NGLWKlrV8q3AAEkMOzWRscme5XtJ+HeiGz4HbZ9cp1Ur5tPQCVmU3XSC1jRtpkULptPwqflUcu4pXemGMdnn5u908YtHtHvPJpo3fRqNWbqZ6k6eoNKNu6hi73U6JDAgmZYhMhaJQJ+PgMCAPj8FJAASge6LgMCAZFvk/0nnb5+n/BU5lFJ1lHZ88hNd/fZTqsxbTFOzttHG93+VRDBpEo5kmzsyHntiKHHpmLj8SWcuHqLU6W/SG7PXUM3Vb6j1u++osaGIxoyeTam77icdqLxy9xZVFqTQ4DeX0YqjH9Hxr/6gq8+eS5lA9y055MoSAYmAJQICAyxBkW9JBCQCXRMBgQFJlig8fkRbarNpwpwSKj71FV189pwuvXeSlqWk0tzq07S/k3e0OiZpSLKYCjwQgCRz4MXnwLN/0O49a2nymIk0s/YqnfjmOV17dJ821GbR6InplHfmWacn2e3++/TtA9q0LpNGjU6l7JPfd9n4pEyga9YvchWJQG+JgMCA3vIk5T4kAj0wAgIDkitxvfLROcqcO43GZTfT1k9+o2tPvqfdTSU0dtISWtb8Hp1NVqMuSbZePNmSGEoMk3gOXP3qM6qvzqKx0wup7OqPdPX7P+jcOy20In0eTcjaRdvv/ZFkz+85Xbz7CdVVLqFR4+dT2p6HXaZcEBjQAxdDMmSJQDdGQGBANwZfLi0R6OsREBiQXDDg0sWdNGfcFJpYcZL2fP0vOv3uTSrJmEbDZuZT7u4P6eT9H+myAIEkSzqSaw61e/c0iRNQuZdkmVvP6dKdC1SQnkLjljdT81fPqe2L+7RlSxXNXVZBq1q+ogtJ83fpOV179pzaHj6mgxcuUtWacnp7zlyaWn6GDt1/Sqfu/dzpCgGBAX19ZSX3LxFoXwQEBrQvXvJuiYBEoAMjIDAgWRbbahyX3zlGS2ZMoCEzcimz4QTVHT5O2WnTaMTUpZS29QYd+OS/BAZI8iowROZAF8+B53T2ygnKWPQ2jcvaTluuv08NB/dTUW0jVRz7hE5/27nu/O2BQm2f36ft+5spt3YXrTl+i/a2tVFF0WIaPjGLVhy8Sfs+FRjQgUsIOZVEQCLQAREQGNABQZRTSAQkAn8tAgIDkgsGXP36K9qxbztlraqn/IYz1HjzM2o8eJBWbtxL69q+phbU6UoiJDGQOSBzoEvnACT396jx4CGq3N1KjZduU+O5m7Tj+ld07nFy/U1q+/webdvdSNnrT1DTe/+gtu+e0YGWY1S4upHWnHtALY87/2++KAP+2npEfksi0FcjIDCgrz55uW+JQBJEQGBA5y8M25u8X338C7U+/JFav/mdW3Vd+e5XuvSdet3ec8n7k+/5yjORZ9Jj58CzP+jyN7/QhUe/JlEbQc984jH+TK1fx8Z45fFvdOmbX+jSU897OwmoCAxIgsWNDEEi0IMiIDCgBz0sGapEoLdFQGBA1ywOe+ziv5MWyxIPmXcyB2QO9NY5IDCgt62U5H4kAp0bAYEBnRtfObtEQCLgEwGBAbIg760LcrkvmdsyB2QOdMccEBjgs+iQH0kEJAIJERAYkBAS+YZEQCLQVREQGCCL5e5YLMs1Zd7JHJA50FvngMCArlrByHUkAr0jAv8fNBG22XN9pz8AAAAASUVORK5CYII=)# Integración: Regla Simpson 1/3 import numpy as np import matplotlib.pyplot as plt # INGRESO: fx = lambda x: np.sqrt(x)*np.sin(x) # intervalo de integración a = 1 b = 3 tramos = 8 # PROCEDIMIENTO # Tarea: validar tramos par # Regla de Simpson 1/3 h = (b-a)/tramos xi = a area = 0 for i in range(0,tramos,2): deltaA = (h/3)*(fx(xi)+4*fx(xi+h)+fx(xi+2*h)) area = area + deltaA xi = xi + 2*h # SALIDA print('tramos:', tramos) print('Integral: ', area) # GRAFICA # Puntos de muestra muestras = tramos + 1 xi = np.linspace(a,b,muestras) fi = fx(xi) # Linea suave muestraslinea = tramos*10 + 1 xk = np.linspace(a,b,muestraslinea) fk = fx(xk) # Graficando plt.plot(xk,fk, label ='f(x)') plt.plot(xi,fi, marker='o', color='orange', label ='intervalos') plt.xlabel('x') plt.ylabel('f(x)') plt.title('Integral: Regla de Trapecios') plt.legend() # los trapecios plt.fill_between(xi,0,fi, color='r') for i in range(0,muestras): plt.axvline(xi[i], color='w')tramos: 8 Integral: 2.053709383061734Nuclio - Generator function Environmentimport nuclio import os base_path = os.path.abspath('../') data_path = os.path.join(base_path, 'data') src_path = os.path.join(base_path, 'src') os.environ['data_path'] = data_path os.environ['src_path'] = src_path %nuclio config kind = "nuclio"%nuclio: setting kind to 'nuclio'Configurations Setups> Please make sure all the packages in the following `nuclio cmd` cell are installed, You may require to restart the kernel for the `pip install` to take effect.# This cell contains our list of requirements # We can add commands to run when building the image # and specify required pip installs. # We can use the `-c` flag to run the command only upon # deployment and not when running locally within this notebook # Install Util packages %nuclio cmd python -m pip install pyyaml %nuclio cmd python -m pip install pyarrow %nuclio cmd python -m pip install pandas %nuclio cmd python -m pip install pytimeparse # Install Igz DB packages %nuclio cmd python -m pip install v3io_frames --upgrade # Install Function Specific packages %nuclio cmd python -m pip install -i https://test.pypi.org/simple/ v3io-generator %nuclio cmd python -m pip install fakerShow location to copy to `%nuclio env` configuration# This cell contains our environment variables to be # used locally by the notebook or by the deployed function. # You can specify `-l` for local only configs and `-c` for # cloud only configs # Batch Limit %nuclio env -l BATCHES_TO_GENERATE = 1 %nuclio env -c BATCHES_TO_GENERATE = 20 # Deployment %nuclio env SAVE_DEPLOYMENT=1 %nuclio env DEPLOYMENT_TABLE=devices # Metrics %nuclio env METRICS_CONFIGURATION_FILEPATH={src_path}/metric_configurations.yaml # Parquet %nuclio env SAVE_TO={data_path} %nuclio env SECS_TO_GENERATE=3600 # Save as %nuclio env SAVE_TO_TSDB=0%nuclio: setting 'SAVE_DEPLOYMENT' environment variable %nuclio: setting 'DEPLOYMENT_TABLE' environment variable %nuclio: setting 'METRICS_CONFIGURATION_FILEPATH' environment variable %nuclio: setting 'SAVE_TO' environment variable %nuclio: setting 'SECS_TO_GENERATE' environment variable %nuclio: setting 'SAVE_TO_TSDB' environment variableFunction# nuclio: start-code import os import time import yaml import pandas as pd import itertools import datetime # DB Connection import v3io_frames as v3f # Data generator from v3io_generator import metrics_generator, deployment_generatorHelper functionsdef _create_deployment(): print('creating deployment') # Create meta-data factory dep_gen = deployment_generator.deployment_generator() faker=dep_gen.get_faker() # Design meta-data dep_gen.add_level(name='company',number=2,level_type=faker.company) dep_gen.add_level('data_center',number=2,level_type=faker.street_name) dep_gen.add_level('device',number=2,level_type=faker.msisdn) # Create meta-data deployment_df = dep_gen.generate_deployment() return deployment_df def _is_deployment_exist(path): # Checking shared path for the devices table return os.path.exists(f'/v3io/bigdata/{path}') def _get_deployment_from_kv(client, path): print(f'Retrieving deployment from {path}') context.logger.debug(f'Retrieving deployment from {path}') # Read the devices table from our KV store deployment_df = client.read(backend='kv', table=path) # Reset index to column deployment_df.index.name = 'device' deployment_df = deployment_df.reset_index() return deployment_df def _save_deployment_to_kv(path, df, client=None): # Save deployment to our KV store client.write(backend='kv', table='netops_devices',dfs=df, index_cols=['device']) def get_or_create_deployment(path, save_to_cloud=False, client=None): if client and _is_deployment_exist(path): # Get deployment from KV deployment_df = _get_deployment_from_kv(client, path) else: # Create deployment deployment_df = _create_deployment() if client and save_to_cloud: _save_deployment_to_kv(path, deployment_df, client) return deployment_df def set_indexes(df): df = df.set_index(['timestamp', 'company', 'data_center', 'device']) return df def save_metrics_to_tsdb(context, metrics: pd.DataFrame): print('Saving metrics to TSDB') context.v3f.write('tsdb', context.metrics_table, metrics) def save_metrics_to_parquet(context, metrics): print('Saving metrics to Parquet') df = pd.concat(itertools.chain(metrics)) # Need to fix timestamps from ns to ms if we write to parquet df = df.reset_index() df['timestamp'] = df.loc[:, 'timestamp'].astype('datetime64[ms]') # Fix indexes df = set_indexes(df) # Save parquet first_timestamp = df.index[0][0].strftime('%Y%m%dT%H%M%S') last_timestamp = df.index[-1][0].strftime('%Y%m%dT%H%M%S') filename = first_timestamp + '-' + last_timestamp + '.parquet' print(filename) filepath = os.path.join(context.metrics_table, filename) print(filepath) with open(filepath, 'wb+') as f: df.to_parquet(f) def is_deployment_initialized(context): return hasattr(context, 'metric_generator')Init contextdef init_context(context): # How many batches to create? (-1 will run forever) batches_to_generate = int(os.getenv('BATCHES_TO_GENERATE', 20)) setattr(context, 'batches_to_generate', batches_to_generate) setattr(context, 'batches_generated', 0) # Get saving configuration save_to_tsdb = (int(os.getenv('SAVE_TO_TSDB', 1)) == 1) # Set metrics table metrics_table = os.getenv('SAVE_TO', 'netops_metrics') setattr(context, 'metrics_table', metrics_table) # TSDB Based demo if save_to_tsdb: context.logger.debug('Saving to TSDB') # Create our DB client client = v3f.Client(address='framesd:8081', container='bigdata') # Create TSDB table if needed client.create('tsdb', metrics_table, rate='1/s', if_exists=1) # Set saving function setattr(context, 'write', save_metrics_to_tsdb) # Parquet based demo else: context.logger.debug('Saving to Parquet') # Set empty client for verification purposes client = None # Create saving directory filepath = os.path.join(metrics_table) if not os.path.exists(filepath): os.makedirs(filepath) # Set saving function setattr(context, 'write', save_metrics_to_parquet) # Set batch endtime secs_to_generate = os.getenv('SECS_TO_GENERATE', 10) setattr(context, 'secs_to_generate', secs_to_generate) # Generate or create deployment deployment_df = get_or_create_deployment(os.environ['DEPLOYMENT_TABLE'], os.environ['SAVE_DEPLOYMENT'], client) # Convert to log_dataset deployment_df['cpu_utilization'] = 70 deployment_df['latency'] = 0 deployment_df['packet_loss'] = 0 deployment_df['throughput'] = 290 deployment_df.head() # Get metrics configuration # Move to get-object from store:/// with open(os.getenv('METRICS_CONFIGURATION_FILEPATH', '/src/metrics_configuration.yaml'), 'r') as f: metrics_configuration = yaml.load(f) # Create metrics generator initial_timestamp = int(os.getenv('initial_timestamp', (datetime.datetime.now()-datetime.timedelta(days=1)).timestamp())) met_gen = metrics_generator.Generator_df(metrics_configuration, user_hierarchy=deployment_df, initial_timestamp=initial_timestamp) setattr(context, 'metric_generator', met_gen) # Set client setattr(context, 'v3f', client)Handlerdef handler(context, event): # Limit the number of generated batches to save cluster resources # for people forgetting the demo running if (context.batches_to_generate == -1) or (context.batches_generated <= context.batches_to_generate): # Create metrics generator based on YAML configuration and deployment metrics = context.metric_generator.generate_range(start_time=datetime.datetime.now(), end_time=datetime.datetime.now()+datetime.timedelta(seconds=int(context.secs_to_generate)), as_df=True, as_iterator=True) # Save Generated metrics context.write(context, metrics) # Update batches count context.batches_generated += 1 # nuclio: end-codeGenerate data configuration file%%writefile {os.environ['METRICS_CONFIGURATION_FILEPATH']} errors: {length_in_ticks: 50, rate_in_ticks: 150} timestamps: {interval: 5s, stochastic_interval: true} metrics: cpu_utilization: accuracy: 2 distribution: normal distribution_params: {mu: 70, noise: 0, sigma: 10} is_threshold_below: true past_based_value: false produce_max: false produce_min: false validation: distribution: {max: 1, min: -1, validate: false} metric: {max: 100, min: 0, validate: true} latency: accuracy: 2 distribution: normal distribution_params: {mu: 0, noise: 0, sigma: 5} is_threshold_below: true past_based_value: false produce_max: false produce_min: false validation: distribution: {max: 1, min: -1, validate: false} metric: {max: 100, min: 0, validate: true} packet_loss: accuracy: 0 distribution: normal distribution_params: {mu: 0, noise: 0, sigma: 2} is_threshold_below: true past_based_value: false produce_max: false produce_min: false validation: distribution: {max: 1, min: -1, validate: false} metric: {max: 50, min: 0, validate: true} throughput: accuracy: 2 distribution: normal distribution_params: {mu: 250, noise: 0, sigma: 20} is_threshold_below: false past_based_value: false produce_max: false produce_min: false validation: distribution: {max: 1, min: -1, validate: false} metric: {max: 300, min: 0, validate: true}Overwriting /User/mlrun-demos/demos/network-operations/src/metric_configurations.yamlGenerate dataset locallyRunning this step will generate the base data file for the project. This will be used as our training dataset later on in the [project notebook](../project.ipynb).# nuclio: ignore init_context(context) event = nuclio.Event(body='') output = handler(context, event) outputDeploy to cluster(For streaming demo)from mlrun import code_to_function, mount_v3io fn = code_to_function(name='nuclio-generator', kind='nuclio', with_doc=False) fn.spec.base_spec['spec']['build']['baseImage'] = 'mlrun/ml-models' fn.add_trigger('cron', nuclio.triggers.CronTrigger(interval='1m')) fn.save() fn.export('../src/generator.yaml') fn.apply(mount_v3io()) fn.set_envs({'METRICS_CONFIGURATION_FILEPATH': os.environ['METRICS_CONFIGURATION_FILEPATH'], 'SAVE_TO': os.environ['data_path']}) fn.deploy(project='network-operations')Setup File for Keras ModelsUse `%run Setup.ipynb` in another notebook to perform all these tasks automatically.Parameters that can be re-configured:MAX_NB_WORDS = 40000 # max no. of words for tokenizer MAX_SEQUENCE_LENGTH = 30 # max length of text (words) including padding VALIDATION_SPLIT = 0.2 EMBEDDING_DIM = 200 # embedding dimensions for word vectors (word2vec/GloVe) GLOVE_DIR = "dataset/glove/glove.twitter.27B."+str(200)+"d.txt" print("[i] Loaded Parameters:\n", MAX_NB_WORDS,MAX_SEQUENCE_LENGTH+5, VALIDATION_SPLIT,EMBEDDING_DIM,"\n", GLOVE_DIR)Imports:print("[i] Importing Modules...") import numpy as np import pandas as pd import re, sys, os, csv, keras, pickle from keras import regularizers, initializers, optimizers, callbacks from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.utils.np_utils import to_categorical from keras.layers import Embedding from keras.layers import Dense, Input, Flatten, Concatenate from keras.layers import Conv1D, MaxPooling1D, Embedding, Merge, Dropout, LSTM, GRU, Bidirectional from keras.models import Model from keras import backend as K from keras.engine.topology import Layer, InputSpec print("[+] Using Keras version",keras.__version__) print("[+] Finished Importing Modules") texts, labels = [], [] print("[i] Reading from csv file...", end="") with open('data.csv') as csvfile: readCSV = csv.reader(csvfile, delimiter=',') for row in readCSV: texts.append(row[0]) labels.append(row[1]) print("Done!")Convert text to word tokens (numbers that refer to the words)""" tokenizer = Tokenizer(num_words=MAX_NB_WORDS) tokenizer.fit_on_texts(texts) with open('tokenizer.pickle', 'wb') as handle: pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL) print("[i] Saved word tokenizer to file: tokenizer.pickle") """ with open('tokenizer.pickle', 'rb') as handle: tokenizer = pickle.load(handle)Convert tweets to sequences of word tokens with zero padding at the front and backsequences = tokenizer.texts_to_sequences(texts) word_index = tokenizer.word_index print('[i] Found %s unique tokens.' % len(word_index)) data_int = pad_sequences(sequences, padding='pre', maxlen=(MAX_SEQUENCE_LENGTH-5)) data = pad_sequences(data_int, padding='post', maxlen=(MAX_SEQUENCE_LENGTH)) labels = to_categorical(np.asarray(labels)) # convert to one-hot encoding vectors print('[+] Shape of data tensor:', data.shape) print('[+] Shape of label tensor:', labels.shape) indices = np.arange(data.shape[0]) np.random.shuffle(indices) data = data[indices] labels = labels[indices] nb_validation_samples = int(VALIDATION_SPLIT * data.shape[0]) x_train = data[:-nb_validation_samples] y_train = labels[:-nb_validation_samples] x_val = data[-nb_validation_samples:] y_val = labels[-nb_validation_samples:] print('[i] Number of entries in each category:') print("[+] Training:\n",y_train.sum(axis=0)) print("[+] Validation:\n",y_val.sum(axis=0))Preparing the Embedding layerCompute an index mapping words to known embeddings, by parsing the data dump of pre-trained embeddings.We use pre-trained [GloVe](https://nlp.stanford.edu/projects/glove/) vectors from Stanford NLP. For new words, a "randomised vector" will be created.embeddings_index = {} f = open(GLOVE_DIR) print("[i] Loading GloVe from:",GLOVE_DIR,"...",end="") for line in f: values = line.split() word = values[0] embeddings_index[word] = np.asarray(values[1:], dtype='float32') f.close() print("Done.\n[+] Proceeding with Embedding Matrix...", end="") embedding_matrix = np.random.random((len(word_index) + 1, EMBEDDING_DIM)) for word, i in word_index.items(): embedding_vector = embeddings_index.get(word) if embedding_vector is not None: # words not found in embedding index will be all-zeros. embedding_matrix[i] = embedding_vector print("[i] Completed!") print("[i] Finished running setup.")Cleaning and EDA## each observation is a video that has trended in Asia, Europe or North America ## dataset gives out details like number of views acquired, comments recieved, likes/dislikes. ## Datetime : trending_date ## factor_vector: ratings_disabled, comments disabled, video glitched/removed, country of origin. videos.head() ## Each row has some basic essential details about different videos published on youtube. ## Creating one complete dataset appending Asia, Europe and NA dataframes. trending = asia_trending.append(eu_trending).append(na_trending) ## Cleaning the dataset and changing data types wherever necessary trending['trending_date'] = pd.to_datetime(trending['trending_date'], format = '%y.%d.%m') pd.isna(trending['views']).sum() #9850 na values detected pd.isna(trending['comment_count']).sum() #7396 nan values detected # splitting likes_dislikes column into separate columns and adding them back to the dataframe #trending['likes/dislikes'].str.split(pat = '/', n = -1, expand = True) trending.columns = ['video_id', 'trending_date', 'views', 'likes_dislikes', 'comment_count', 'thumbnail_link', 'comments_disabled', 'ratings_disabled', 'video_error_or_removed', 'country'] trending[['likes','dislikes']] = trending.likes_dislikes.str.split(pat = '/', expand = True) trending["likes"] = trending['likes'].astype("int64") trending["dislikes"] = trending['dislikes'].astype("int64") ## converting ratings_disabled and video_error_or_removed into numeric type trending['ratings_disabled'] = trending['ratings_disabled'].astype('int64') trending['video_error_or_removed'] = trending['video_error_or_removed'].astype('int64') ## Removing unwanted columns trending = trending.drop(['thumbnail_link','likes_dislikes'], axis = 1) ## Merging videos df with Trending df to get the final dataset category.rename(columns = {'ID':'category_id'}, inplace = True) category.head() trending0 = trending.merge(videos, on = 'video_id', how = 'left') trending1 = trending0.merge(category, on = 'category_id', how = 'left') trending1["country"].str.len() trending1["country"] = trending1["country"].str.strip() ## Converting publish time into datetime object trending1["publish_time"] = pd.to_datetime(trending1["publish_time"]) trending1.head()Analysis# Q1: In which country did youtube videos trend the most in the given span of time? trend_country = trending1.groupby('country').count().sort_values('video_id', ascending = False)["video_id"] trend_country ## Youtube experiences the most number of trending videos in the US, while the least in Japan. # Q2: Which channel has trended the most number of times? trend_channel = trending1.groupby('channel_title').count().sort_values('video_id', ascending = False)["video_id"] trend_channel ## The most trending channel over Asia, Europe and North-America is 'The Late Show with '. ## By country # US #trending1[trending1["country"] == "US"].groupby('channel_title').count().sort_values('video_id', ascending = False)["video_id"] ## The most trending channel in the US is INSIDER. # Canada trending1[trending1["country"] == "Canada"].groupby('channel_title').count().sort_values('video_id', ascending = False)["video_id"] ## The most trending channel in Canada is 'VikatanTV'. # Germany trending1[trending1["country"] == "Germany"].groupby('channel_title').count().sort_values('video_id', ascending = False)["video_id"] ## The most trending channel in Germany is 'Galileo'. # France trending1[trending1["country"] == "France"].groupby('channel_title').count().sort_values('video_id', ascending = False)["video_id"] ## The most trending channel in France is 'Troom Troom FR' # UK trending1[trending1["country"] == "UK"].groupby('channel_title').count().sort_values('video_id', ascending = False)["video_id"] ## The most trending channel in the UK is 'PewDiePie'. # India trending1[trending1["country"] == "India"].groupby('channel_title').count().sort_values('video_id', ascending = False)["video_id"] ## The most trending channel in India is 'ViketanTV'. #Japan trending1[trending1["country"] == "Japan"].groupby('channel_title').count().sort_values('video_id', ascending = False)["video_id"] ## The most trending channel in Japan is 'DHCテレビ '.' ## Interesting to see that the ViketanTV is the most trending channel in both India and Canada. ## This could point towards there being some similarity in the tastes of viewers of both countries. # Q3: Which is the best time to publish a video on youtube such that it trends well? # This could be infered from analysing the publish time of the already trended videos. # Extract hour from the publish time def hr_func(ts): return ts.hour trending1["hour"] = trending1["publish_time"].apply(hr_func) trend_hour = trending1.groupby('hour').count().sort_values('video_id', ascending = False)["video_id"] trend_hour ## From an inspection of the trend dataset, we can infer that an ideal time to publish a video on youtube (in the 3 continents, on an average) so that is become likely to trend is ## 3:00pm - 5:59pm trending1[trending1["country"] == "India"].groupby('hour').count().sort_values('video_id', ascending = False)["video_id"] ## Specifically in India, a more suitable time would be 12-2 pm. #Q4(a): Which channel seems to capitalize on bad fame i.e. it trends via videos which garners more likes < dislikes? #trending1[trending1["likes"] < trending1["dislikes"]].groupby("channel_title").count().sort_values(by = "dislikes", ascending = False) disliked_channels = trending1[trending1["likes"] < trending1["dislikes"]] disliked_channels = disliked_channels.drop_duplicates(subset = ["video_id"], keep = "last") pd.set_option('display.max_rows', None) disliked_channels.groupby("channel_title").count().sort_values(by = "dislikes", ascending = False)['dislikes'] ## The channel 'focus nachrichten' has created the most number of trending videos which has more dislikes than likes. ## We could speculate that the creator make controversial, disturbing or unconventional videos. ## To further confirm our hypothesis, we check for the creator's popular videos that have more likes than dislikes. print(trending1[trending1["channel_title"] == "focus nachrichten"].drop_duplicates(subset = ["video_id"], keep = "last").shape) #39 distinct publishes ## Focus Nachriten has 26 distict videos where likes < dislikes. The total videos that it has published is 39. 26/39*100 ## 66% of his videos have more dislikes than likes. ## Hence it could be reasonably infered that it capitalizes on disturbing or disliked content. # Q4(b): Which category seems to capitalize on bad fame i.e. it trends via videos which garners more likes < dislikes? disliked_category = trending1[trending1["likes"] < trending1["dislikes"]] disliked_category = disliked_category.drop_duplicates(subset = ["video_id"], keep = "last") disliked_category.shape pd.set_option('display.max_rows', None) print(disliked_category.groupby("Category name").count().sort_values(by = "dislikes", ascending = False)['dislikes']) ## Entertainment Category has 617 distict videos where likes < dislikes. print(trending1[trending1["Category name"] == "Entertainment"].drop_duplicates(subset = ["video_id"], keep = "last").shape) ## The total videos that have trended in the Entertainment category is 34383. 617/34383*100 ## 1.79% of all videos of the genre capitalises on bad fame with controversial or unconventional content # Q5. What is the engagement rate of each genre/category of videos? # Engagement is defined as the number of people who either like, dislike or comment on the video, per 100 viewers of the video # Engagement=(likes+dislikes+comments)/view #Dropping the videos that have comments or ratings disables engagement_df1 = trending1[trending1['comments_disabled']!=1] engagement_df = engagement_df1[engagement_df1['ratings_disabled']!=1] engagement_df=engagement_df.dropna() engagement_df['engagement'] = (engagement_df['likes']+engagement_df['dislikes']+engagement_df['comment_count'])/engagement_df['views']*100 engagement_by_genre = engagement_df.groupby('Category name').mean().sort_values('engagement', ascending = False)["engagement"] engagement_by_genre ## The most engagement is observed in 'Nonprofits & Activism' videos with 7% of the viewers either rating the video (likes/dislikes) or commenting on it. # Q6. What is the impact of the creator disabling comments on the video's likes & dislikes? ## For this, we plot a boxplot to ascertain the IQR of likes when (i) comments are enabled & (ii) comments are disabled Boxplot_likes = trending1.boxplot(by ='comments_disabled', column =['likes'], grid = False, showfliers = False) Boxplot_likes.set_ylim(0,40000) Boxplot_likes.set_title('Likes on Videos when Comments are Enabled v/s Disabled') Boxplot_likes.set_xlabel('Comments Enabled or Disabled') Boxplot_likes.set_ylabel('Number of Likes') trending1['likes'].groupby(trending1['comments_disabled']).describe() ## The IQR of the amount of likes on a video is wider & higher when the comment section is enabled. This also means greater engagement. #Plotting a boxplot to ascertain the IQR of dislikes when (i) comments are enabled & (ii) comments are disabled Boxplot_dislikes = trending1.boxplot(by ='comments_disabled', column =['dislikes'], grid = False, showfliers = False) Boxplot_dislikes.set_title('Disikes on Videos when Comments are Enabled v/s Disabled') Boxplot_dislikes.set_xlabel('Comments Enabled or Disabled') Boxplot_dislikes.set_ylabel('Number of Dislikes') ## The IQR of the amount of dislikes on a video is wider & higher when the comment section is enabled. This also means greater engagement. #Q7. What is the penetration of Youtube (specifically trending videos) in the listed countries? trending_2=trending1.dropna() activeness = trending_2.groupby('country').mean().sort_values('views', ascending = False)["views"] type(activeness) # Activeness is a series. Converting it into a data frame country_activeness=activeness.to_frame() country_activeness #The names of the country is given as an index. Adding a column country_activeness['country']=['UK', 'US', 'Canada', 'India', 'Germany', 'France', 'Japan'] #Adding a column with the population of the respective countries in 2018 country_activeness['population']=[6.610000e+07,3.251000e+08 , 3.650000e+07, 1.338700e+09, 82700000, 6.690000e+07, 1.268000e+08] country_activeness activeness = (country_activeness['views'])/ (country_activeness['population'])*1000 activeness.sort_values(ascending = False) ## The highest penetration of Youtube (limited to trending videos) is in the UK with 92 per 1000 people watching the videos. ## After a huge margin, this is followed by Canada with 31 per 1000 people. ## Q8: Which video has trended the most number of time in the entire dataset? trending1.value_counts(["title"]) # "Drake - Nice For What" is the most trending video in the dataset. It has trended 96 times df = trending1.groupby("title") df = df.agg({"trending_date": "nunique"}) df = df.reset_index() df[df["title"] == "Drake - Nice For What"] df = trending1.groupby("title") df = df.agg({"country": "nunique"}) df = df.reset_index() df[df["title"] == "Drake - Nice For What"] # This video trended for a total of 31 days and in 5 countries. ## Q9: How many videos have trended across all 7 countries? df = trending1.groupby("title") df = df.agg({"country": "nunique"}) df = df.reset_index() df[df["country"] == 7].shape ## 22 videos in the dataset have trended across all 7 countries.Aitken Extrapolation Task 1: recreate Table 2.10 in the bookIn the table, we have $x_n = \cos(\frac{1}{n})$ which converges to $\cos(0)=1$ since $\cos$ is continuous. Use Aitken Extrapolation to accelerate this sequence $(x_n)$.# Recreate Table 2.10 in the book import numpy as np from numpy import diff as D # this is the Delta in the book, aka "forward difference" N = 7 # total number of terms p = np.cos( 1/np.r_[1:N+1] ) # Example in book (Table 2.10) print("The sequence p_n is:") for pn in p: print("{:.5f}".format(pn)) print("The sequence \hat{p}_n is:") # Define phat (for p^hat, not as in "that is a phat song!") phat = p[:-2] - (D(p)[:-1])**2 / D(D(p)) for pn in phat: print("{:.5f}".format(pn))The sequence p_n is: 0.54030 0.87758 0.94496 0.96891 0.98007 0.98614 0.98981 The sequence \hat{p}_n is: 0.96178 0.98213 0.98979 0.99342 0.99541Task 2: accelerate a root-finding problemYou could do Steffensen's variant, but we didn't discuss that, so I suggest you just to Aitken extrapolationLet's solve $x = g(x)$ where$$g(x) = .7\cos(x)$$insdie the interval $[0,1]$. We know that $g$ is a contraction since$$|g'(x)| = |-.7\sin(x)| \le .7 < 1$$# Try a root-finding problem, x = .7*cos(x) from scipy.optimize import root_scalar g = lambda x : .7*np.cos(x) output = root_scalar(lambda x : g(x) - x ,bracket=[0,1]) trueRoot = output.root print("The root is",trueRoot) # Accelerate the iteration # Try fixed-point iteration, starting at 0 x = 0 N = 20 p = np.zeros(N) phat= np.zeros(N-2) # There are many ways to program this, depending on how/if you want to save # all the iterates. for i in range(N): x = g(x) p[i] = x if i > 1: d1_a = p[i-1]-p[i-2] d1_b = p[i]-p[i-1] phat[i-2] = p[i-2] - (d1_a**2)/( d1_b - d1_a) print("p_{:<2d} is {:.5f} and error is {:.3e}; hat(p)_{:<2d} is {:.5f} and error is {:.3e}".format( i, x,abs(x-trueRoot),i-2,phat[i-2],abs(phat[i-2]-trueRoot) ) ) else: print("p_{:<2d} is {:.5f} and error is {:.3e}".format( i, x,abs(x-trueRoot) ) ) # Same thing, but using a different version of the code # (designed to be simpler, since it doesn't save the entire history) x = 0 # our starting guess for i in range(10): # i = 0, 1, 2, ..., 9 -- don't forget that Python is 0 based if i >= 1: xVeryOld = xOld xOld = x x = g(x) # Fixed point iteration update if i >= 1: DeltaOld = Delta Delta = x - xOld if i >= 1: xhat = xVeryOld - DeltaOld**2/( Delta - DeltaOld) print(xVeryOld,xhat)0 0.5667292007496922 0.7 0.5828355747666369 0.5353895310991419 0.5838016125355683 0.6020489755511681 0.5839622114962584 0.5769238612151603 0.583984921687019 0.5867011411648845 0.5839883673835934 0.5829400576356008 0.5839888768922955 0.5843934705791918 0.5839889529996376 0.5838328003801305 0.5839889643240639Fat Tails & Central Limit TheoremThe central limit theorem applies to data sampled from fat-tailed distributions so long as the second moment is finite, but the question is "how soon".For data sampled from a normal distribution (the least fat-tailed!), the mean of a sample of $1$ already has normal distribution with standard deviation $\sigma_\mu = \sigma/\sqrt{n}$.For fat tailed distributions, the required sample size can be much larger.import numpy as np import matplotlib.pyplot as plt def get_variances(distribution): N_TRIALS = 1000 MAX_SAMPLE_SIZE = 10000 sample_sizes = np.array(list(range(1,MAX_SAMPLE_SIZE+1)))[::50] variances = [] for sample_size in sample_sizes: if sample_size % 100 == 1: print("%i/%i" % (sample_size,MAX_SAMPLE_SIZE),end='\r') trials = [] for trial in range(N_TRIALS): trials.append(np.mean(distribution(size=sample_size))) variances.append(np.var(trials)) return sample_sizes, variances def standard_t(df): def f(size): return np.random.standard_t(df=df,size=size) return f dofs = [1,2,3,4,5,10,50,1000]#,10,50,100,1000] fig = plt.figure(figsize=(15,8)) ax1 = fig.add_subplot(1,2,1) ax2 = fig.add_subplot(1,2,2) for dof in dofs: print("dof = %i " % dof) sample_size, variances = get_variances(standard_t(dof)) ax1.semilogy(sample_size,variances*sample_size) ax1.set_ylabel('$\sigma^2 n$',fontsize=15) ax2.semilogy(sample_size,variances) ax2.set_ylabel('$\sigma^2$',fontsize=15) ax1.legend(['dof=%i' % dof for dof in dofs]) ax2.legend(['dof=%i' % dof for dof in dofs]) plt.savefig('img/fattailsstudentt01.png',bbox_inches="tight") fig = plt.figure(figsize=(12,5)) ax = fig.add_subplot(1,1,1) dofs = [1,2,5,1000] for i,dof in enumerate(dofs): sample_sizes = np.array(list(range(1,10000))) means = [] for sample_size in sample_sizes: means.append(np.mean(standard_t(dof)(size=sample_size))) ax.plot(sample_sizes,np.array(means)-i*0,alpha=1.0) ax.set_ylim(-0.5,1.75) ax.legend(["dof=%i" % dof for dof in dofs]) ax.set_title('Central Limit Theorem: Convergence of Sample Mean') plt.savefig('img/fattailstudentt02.png',bbox_inches="tight") ax.set_xlabel('n samples')Star with torusesThis is an example of a synthesized three dimensional volumethat is not easy to visualize using only two dimenaional projections.The calculation to generate the volume array is not optimized and it takes a while to complete.import numpy as np from numpy.linalg import norm def vec(*args): return np.array(args, dtype=np.float) def normalize(V, epsilon=1e-12): nm = norm(V) if nm < epsilon: return vec(1, 0, 0) # whatever return (1.0 / nm) * V def point_segment_distance(P, segment, epsilon=1e-4): A = segment[0] B = segment[1] V = B - A nV2 = V.dot(V) if (nV2 < epsilon): #print("degenerate segment") return norm(B - P) lmd = V.dot(P - A) / nV2 if 0 < lmd and lmd < 1: #print("project onto line segment") Pprojection = A + lmd * V return norm(P - Pprojection) # otherwise distance to closer end point #print("closest end point") dA = norm(P - A) dB = norm(P - B) return min(dA, dB) def circle_distance(P, center, radius, normal): plane_distance = normal.dot(P - center) vertical_offset = plane_distance * normal plane_projection = P - vertical_offset direction_from_center = normalize(plane_projection - center) circle_nearest_point = center + radius * direction_from_center distance = norm(P - circle_nearest_point) return distance circle_distance(vec(1,1,1), vec(0,1,1), 2, normalize(vec(-1,2,-1))) # tetrahedral vertices vertices = vec([1,1,1], [-1,-1,1], [1,-1,-1], [-1,1,-1], ) normals = [] origin = vec(0,0,0) radius = 0.9 for v in vertices: normals.append(normalize(v)) def inverse_distance_sum(P, epsilon=0.1): total = 0.0 for (i, v) in enumerate(vertices): dsegment = point_segment_distance(P, [origin, v]) total += 1.0 / (dsegment + epsilon) normal = normals[i] dcircle = circle_distance(P, v, radius, normal) total += 1.0 / (dcircle + epsilon) return total for L in ([0,0,0], [-1, -1, -2], [1, -1, 1], [2,2,2], [1,1,1], [0,0,1]): print(L) P = vec(*L) print(" ", inverse_distance_sum(P)) # This computation takes a while (it has not been optimized.) N = 20 N2 = 2 * N N4 = 4 * N invN = 1.0 / N A = np.zeros((N4, N4, N4), dtype=np.float) def shifti(i): return (i - N2) * invN for i in range(N4): for j in range(N4): for k in range(N4): P = shifti(vec(i,j,k)) A[i, j, k] = inverse_distance_sum(P) from feedWebGL2 import volume H = A.mean() A.shape, H volume.widen_notebook() W = volume.Volume32() W W.load_3d_numpy_array(A, threshold=H, sorted=False) #W.load_3d_numpy_array(A, threshold=H, sorted=True, method="diagonal") x = W.build(1500) # Create a web folder containing the visualization if False: target_folder = "../docs/torus_html" from feedWebGL2 import html_generator html_generator.generate_volume_html(A, target_folder, force=False, width=1500)Hashing one feature# set up the feature hashing encoder to encode # one variable in the dataframe encoder = HashingEncoder(cols=["A7"], n_components=4) # fit the transformer to the train set encoder.fit(X_train) # the hashing method used encoder.hash_method # We can find the algorithms available for hashing # in the hashlib library import hashlib hashlib.algorithms_available # let's transform train and test sets X_train_enc = encoder.transform(X_train) X_test_enc = encoder.transform(X_test) # We see the hashed features at the left of the dataframe X_train_enc.head() # We see the hashed features at the left of the dataframe X_test_enc.head()Multivariate hashingCategory Encoders will hash all categorical variables to the same 4 columns by default. It is like "multivariate" hashing.encoder = HashingEncoder(cols=["A5", "A7", "A12", "A14"], n_components=4) # fit the transformer to the train set encoder.fit(X_train) # let's transform train and test sets X_train_enc = encoder.transform(X_train) X_test_enc = encoder.transform(X_test) # We see the hashed features at the left of the dataframe # Now we see that the values can take numbers beyond 1 # which means that 2 or more variables were assigned # to the same feature X_train_enc.head() X_test_enc.head()# -*- coding: utf-8 -*- """ Created on Thu May 13 03:02:04 2021 @author: BRIJB """ from tensorflow.keras.preprocessing.image import ImageDataGenerator # import the needed packages import tensorflow as tf from keras import losses from keras import optimizers from keras import metrics import matplotlib.pyplot as plt import matplotlib.image as img import tensorflow.keras as keras from keras.preprocessing import image import numpy as np from keras.models import model_from_json import os batch_size = 30 # define and move to dataset directory datasetdir = "/content/drive/MyDrive/Data/Sentinel 2 Data for CNN/EuroSAT/2750" import os os.chdir(datasetdir) # shortcut to the ImageDataGenerator class ImageDataGenerator = keras.preprocessing.image.ImageDataGenerator gen = ImageDataGenerator() iterator = gen.flow_from_directory( os.getcwd(), target_size=(64,64), classes=('Forest', 'HerbaceousVegetation', 'Highway', 'Industrial', 'Pasture', 'PermanentCrop', 'Residential','SeaLake') ) # we can guess that the iterator has a next function, # because all python iterators have one. batch = iterator.next() print(len(batch)) print(type(batch[0])) print(batch[0].shape) print(batch[0].dtype) #print(batch[0].max()) #print(batch[1].shape) #print(batch[1].dtype) #print(type(batch[1])) #the first element is an array of 32 images with 64X64 pixels, and 3 color channels, encoded as floats in the range 0 to 255 #The second element contains the 32 corresponding labels. batch = iterator.next() print(len(batch)) print(type(batch[1])) print(batch[1].shape) print(batch[1].dtype) imgdatagen = ImageDataGenerator( rescale = 1/255., validation_split = 0.2, ) batch_size = 30 height, width = (64,64) train_dataset = imgdatagen.flow_from_directory( os.getcwd(), target_size = (height, width), classes = ('Forest', 'HerbaceousVegetation', 'Highway', 'Industrial', 'Pasture', 'PermanentCrop', 'Residential','SeaLake'), batch_size = batch_size, subset = 'training' ) val_dataset = imgdatagen.flow_from_directory( os.getcwd(), target_size = (height, width), classes = ('Forest', 'HerbaceousVegetation', 'Highway', 'Industrial', 'Pasture', 'PermanentCrop', 'Residential','SeaLake'), batch_size = batch_size, subset = 'validation' ) #In this case out of 5000 images: Training: 4000 Validation: 20% of 4000 i.e. 1000 model = keras.models.Sequential() initializers = { } model.add( keras.layers.Conv2D( 24, 5, input_shape=(64,64,3), activation='relu', ) ) model.add( keras.layers.MaxPooling2D(2) ) model.add( keras.layers.Conv2D( 48, 5, activation='relu', ) ) model.add( keras.layers.MaxPooling2D(2) ) model.add( keras.layers.Conv2D( 96, 5, activation='relu', ) ) model.add( keras.layers.Flatten() ) model.add( keras.layers.Dropout(0.4) ) model.add( keras.layers.Dense( 8, activation='softmax', ) ) model.summary() model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adamax(lr=0.001), metrics=['acc']) history = model.fit_generator( train_dataset, validation_data = val_dataset, workers=10, epochs=1, ) #result of this model is not impressive, but can be imroved #443/443 [==============================] - 363s 802ms/step - loss: 0.3290 - acc: 0.3072 - val_loss: 0.2291 - val_acc: 0.5466 import glob import numpy as np from PIL import Image # https://stackoverflow.com/questions/37747021/create-numpy-array-of-images X_data = [] files = glob.glob(r"/content/drive/MyDrive/Data/data/*.jpg") for my_file in files: print(my_file) #image = Image.open(my_file).convert('RGB') image = Image.open(my_file) image = np.array(image) X_data.append(image) X_data = np.array(X_data) x = np.array(X_data[0]) print('X_data shape:', np.array(X_data).shape) # If you have 20 64x64 images, this would print 'X_data shape: (20, 64,64,3)' # @tf.function(experimental_relax_shapes=True) # def predict(x): # return model.predict(x) output = model.predict(X_data) print(output) # define and move to dataset directory datasetdir = "/content/drive/MyDrive/Data/Sentinel 2 Data for CNN/EuroSAT" import os os.chdir(datasetdir) model_1_EUROSAT_json = model.to_json() with open("model.json", "w") as json_file: json_file.write(model_1_EUROSAT_json) # serialize weights to HDF5 model.save_weights("model_1_EUROSAT_json.h5") print("Saved model to disk")Saved model to diskVOLTTRON ChargePoint Collector Notebook This notebook sets up a ChargePoint device and forwards datafrom one VOLLTRON instance (this Collector) to another instance (the Aggregator).Most of the notebook's setup and execution is done with shell commands, called from Python. Setup: Prepare the Volttron Environment VOLTTRON must be installed before using this notebook. For detailed instructions oninstalling and configuring a VOLTTRON/Jupyter server environment, see [Jupyter Notebooks](http://volttron.readthedocs.io/en/devguides/supporting/utilities/JupyterNotebooks.html) in VOLTTRON ReadTheDocs.As is described in that guide, environment variables should have been defined before starting the Jupyter server:````$ export VOLTTRON_ROOT=~/repos/volttron```` (path of the VOLTTRON repository, installed prior to running bootstrap)````$ export VOLTTRON_HOME=~/.volttron```` (directory in which the VOLTTRON instance runs)The first VOLTTRON instance on a server usually runs, by convention, in ~/.volttron.If multiple VOLTTRON instances are to be run on a single host, each must have its own VOLTTRON_HOME.Also before starting the Jupyter server, a VOLTTRON virtual environment should have been activated by executing the following in $VOLTTRON_ROOT:````$ source env/bin/activate````The Python code below does some initialization to prepare for the steps that follow.import datetime import json import os import pprint import sqlite3 import subprocess import sys import time # Define a "run this shell command" method, wrapping subprocess.check_output() def _sh(shell_command, shell=True, stderr=None): try: return_value = subprocess.check_output(shell_command, shell=shell, stderr=stderr) except Exception, err: print('Shell command failed: {}', shell_command) print(err) return_value = 'Error' return return_value # Same as _sh(), except that this also prints the command output, preceded by an optional label. def _print_sh(shell_command, label=None, **kwargs): print('{0}: {1}\n'.format(label+':' if label else '', _sh(shell_command, **kwargs))) # Set up local variables vhome and vroot. # The environment variables VOLTTRON_ROOT and VOLTTRON_HOME should already be defined -- see above. vroot = %env VOLTTRON_ROOT vhome = %env VOLTTRON_HOME print("VOLTTRON_ROOT={}".format(vroot)) print("VOLTTRON_HOME={}".format(vhome)) # Define a VIP_SOCKET environment variable for use while installing and running agents. socket_name = 'ipc://' + vhome + '/run/vip.socket' %env VIP_SOCKET=$socket_name # Run from the VOLTTRON root directory. os.chdir(vroot) print("Initialization complete")Setup: Shut Down All Agents This ensures a clean agent installation process by the notebook.print('Wait for the list to be displayed, and confirm that no agents are listed as running...\n') # Shut down all agents. _sh('volttron-ctl shutdown') # List agent status to verify that the status of each agent is 0 or blank. _print_sh('volttron-ctl status', stderr=subprocess.STDOUT)Setup: Discover the Collector's Network Parameters In order for this Collector to forward data to an Aggregator, the Aggregatormust know the Collector's network parameters, storing them in its known_hosts file.Discover those parameters now.Copy the vip-address's IP and port, and the serverkey,to the Aggregator notebook under 'Setup: Add Each Collector to the known_hosts File',and execute that notebook's code to add this Collector to known_hosts.# Obtain this server's IP address, volttron port number (usually 22916), and server key: print('Obtaining network parameters and server key; please wait...\n') _print_sh('curl ifconfig.me', label='Public IP address') _print_sh('volttron-ctl auth serverkey', label='Serverkey') _print_sh('cat {}/config'.format(vhome), label='Config file')Setup: Configure the Aggregator's Network Parameters This Collector forwards data to an Aggregator, so it must beconfigured with the Aggregator's IP address, port number and server key.Define those parameters here. Obtain them from the Aggregator notebook,'Setup: Discover the Aggregator's Network Parameters'.aggregator_vip_address = '172.16.58.3' aggregator_vip_port = '22916' aggregator_server_key = '' aggregator_vip = "tcp://{0}:{1}".format(aggregator_vip_address, aggregator_vip_port) print('vip = {0}'.format(aggregator_vip)) print('aggregator_server_key = {0}'.format(aggregator_server_key))Setup: Test the TCP Connection The ForwardHistorian will send requests to the VOLTTRON Aggregator instancevia TCP commands. Test that the Aggregator instance is capable of receivingTCP requests on the designated IP address and port.If this test fails, the port may not be open on the other server (firewall issue?),the request may be for the wrong IP address and/or port ID,or the other server's VOLTTRON instance may be down or incorrectly configured.# Use an 'nc' (netcat) command to test the TCP connection shell_command = 'nc -z -vv -w5 {0} {1}'.format(aggregator_vip_address, aggregator_vip_port) _print_sh(shell_command, label='Network connection test result', stderr=subprocess.STDOUT)Setup: Configure a ForwardHistorian Create a configuration file for this collector's ForwardHistorian.The file specifies the Aggregator's IP address, port and server key,and indicates which topics should be forwarded.config = """{{ "destination-vip": "{0}", "destination-serverkey": "{1}", "required_target_agents": [], "custom_topic_list": [], "services_topic_list": ["devices"], "topic_replace_list": [ {{ "from": "FromString", "to": "ToString" }} ] }}""".format(aggregator_vip, aggregator_server_key) print("config = {}".format(config)) config_path = vhome + '/my_chargepoint_forwarder.config' with open(config_path, 'w') as file: file.write(config) print('Forwarder configuration written to {}'.format(config_path))Configure a ChargePoint Device Driver ** ISSUE: The ChargePoint driver's configuration file needs to include authentication parameters.**** Before running the following, update examples/configurations/drivers/chargepoint1.config with a valid username and password.**# Change these values for the target ChargePoint device chargepoint_station_id = '**Put Chargepoint station ID here.**' chargepoint_username = '**Put Chargepoint username here.**' chargepoint_password = '**Put here.**' def install_driver_csv(name=None, csv=None): _sh('volttron-ctl config store platform.driver {0} {1} --csv'.format(name, csv)) def install_driver_config(name=None, config=None): _sh('volttron-ctl config store platform.driver {0} {1}'.format(name, config)) # Create a points file for the ChargePoint device. points = '''Volttron Point Name,Attribute Name,Register Name,Port #,Type,Units,Starting Value,Writable,Notes stationID,stationID,StationRegister,,string,Format similar to 1:00001,,FALSE, stationManufacturer,stationManufacturer,StationRegister,,string,String,,FALSE, stationModel,stationModel,StationRegister,,string,String,,FALSE, portNumber,portNumber,StationRegister,1,string,Integer,,FALSE, stationName,stationName,StationRegister,1,string,String,,FALSE, stationMacAddr,stationMacAddr,StationRegister,,string,String (colon separated mac address),,FALSE, stationSerialNum,stationSerialNum,StationRegister,,string,String,,FALSE, Address,Address,StationRegister,,string,String,,FALSE, City,City,StationRegister,,string,String,,FALSE, State,State,StationRegister,,string,String,,FALSE, Country,Country,StationRegister,,string,String,,FALSE, postalCode,postalCode,StationRegister,,string,US Postal code,,FALSE, Lat,Lat,StationRegister,1,float,Latitude Coordinate,,FALSE, Long,Long,StationRegister,1,float,Longitude Coordinate,,FALSE, Reservable,Reservable,StationRegister,1,bool,T/F,,FALSE, Level,Level,StationRegister,1,string,"L1, L2, L3",,FALSE, Mode,Mode,StationRegister,1,int,"1,2,3",,FALSE, Voltage,Voltage,StationRegister,1,float,Configured Voltage,,FALSE, Current,Current,StationRegister,1,float,Configured Current,,FALSE, Power,Power,StationRegister,1,float,Configured Power,,FALSE,Power supported (kW). numPorts,numPorts,StationRegister,,int,Integer,,FALSE,Number of Ports Type,Type,StationRegister,,int,Integer or None,,FALSE, startTime,startTime,StationRegister,,datetime,Datetime,,FALSE, endTime,endTime,StationRegister,,datetime,Datetime,,FALSE, minPrice,minPrice,StationRegister,,float,Dollar Amount,,FALSE, maxPrice,maxPrice,StationRegister,,float,Dollar Amount,,FALSE, unitPricePerHour,unitPricePerHour,StationRegister,,float,Dollar Amount,,FALSE, unitPricePerSession,unitPricePerSession,StationRegister,,float,Dollar Amount,,FALSE, unitPricePerKWh,unitPricePerKWh,StationRegister,,float,Dollar Amount,,FALSE, unitPriceForFirst,unitPriceForFirst,StationRegister,,float,Dollar Amount,,FALSE, unitPricePerHourThereafter,unitPricePerHourThereafter,StationRegister,,float,Dollar Amount,,FALSE, sessionTime,sessionTime,StationRegister,,datetime,,,FALSE, Description,Description,StationRegister,1,string,String,,FALSE, mainPhone,mainPhone,StationRegister,,string,Phone Number,,FALSE, orgID,orgID,StationRegister,,string,,,FALSE, organizationName,organizationName,StationRegister,,string,,,FALSE, sgID,sgID,StationRegister,,string,,,FALSE, sgName,sgName,StationRegister,,string,,,FALSE, currencyCode,currencyCode,StationRegister,,string,,,FALSE, Status,Status,StationStatusRegister,1,string,,,FALSE,"AVAILABLE, INUSE, UNREACHABLE, UNKNOWN " Status.TimeStamp,TimeStamp,StationStatusRegister,1,datetime,,,FALSE,Timestamp of the last communication between the station and ChargePoint Connector,Connector,StationRegister,1,string,,,FALSE,"Connector type. For example: NEMA 5-20R, J1772, ALFENL3, " shedState,shedState,LoadRegister,1,integer,0 or 1,0,TRUE,True when load shed limits are in place portLoad,portLoad,LoadRegister,1,float,kw,,FALSE,Load in kw allowedLoad,allowedLoad,LoadRegister,1,float,kw,,TRUE,Allowed load in kw when shedState is True percentShed,percentShed,LoadRegister,1,integer,percent,,TRUE,Percent of max power shed when shedState is True alarmType,alarmType,AlarmRegister,,string,,,FALSE,eg. 'GFCI Trip' alarmTime,alarmTime,AlarmRegister,,datetime,,,FALSE, clearAlarms,clearAlarms,AlarmRegister,,int,,0,TRUE,Sends the clearAlarms query when set to True stationRightsProfile,stationRightsProfile,StationRightsRegister,,dictionary,,,FALSE,"Dictionary of sgID, rights name tuples." sessionID,sessionID,ChargingSessionRegister,1,string,,,FALSE, startTime,startTime,ChargingSessionRegister,1,datetime,,,FALSE, endTime,endTime,ChargingSessionRegister,1,datetime,,,FALSE, Energy,Energy,ChargingSessionRegister,1,float,,,FALSE, rfidSerialNumber,rfidSerialNumber,ChargingSessionRegister,1,string,,,FALSE, driverAccountNumber,driverAccountNumber,ChargingSessionRegister,1,string,,,FALSE, driverName,driverName,ChargingSessionRegister,1,string,,,FALSE,''' # print "points file contents = {}".format(points) csv_path = vhome + '/my_chargepoint.csv' with open(csv_path, 'w') as file: file.write(points) print('ChargePoint points file written to {}\n'.format(csv_path)) # Create a config file for the device config = """{{ "driver_config": {{ "stationID" : "{0}", "username" : "{1}", "password" : }", "cacheExpiration" : 40 }}, "driver_type": "chargepoint", "registry_config":"config://{3}", "interval": 5, "timezone": "US/Pacific", "heart_beat_point": "Heartbeat" }}""".format(chargepoint_station_id, chargepoint_username, chargepoint_password, 'my_chargepoint.csv') print "config = {}".format(config) config_path = vhome + '/my_chargepoint.config' with open(config_path, 'w') as file: file.write(config) print('ChargePoint configuration written to {}'.format(config_path)) # Store the configurations in the master driver. print('\nWait for the platform driver config to display, then confirm that this config appears in it...') install_driver_csv(name='my_chargepoint.csv', csv=csv_path) install_driver_config(name='devices/my_chargepoint', config=config_path) # List the driver configuration to confirm that the drivers were installed successfully. _print_sh('volttron-ctl config list platform.driver')Setup: Install Agents Install each agent employed by the Collector: a master driver, a ForwardHistorian, and 2 Volttron Central agents.print('Wait for the list to be displayed, then confirm that all of these agents appear in it...') def install_agent(dir=None, id=None, config=None, tag=None): script_install_command = 'python scripts/install-agent.py -s {0} -i {1} -c {2} -t {3} -f' _sh(script_install_command.format(dir, id, config, tag)) print('Installed {}'.format(tag)) # Install the MasterDriver agent which runs the Bacnet driver install_agent(dir=vroot+'/services/core/MasterDriverAgent/', id='platform.driver', config=vroot+'/services/core/MasterDriverAgent/master-driver.agent', tag='platform.driver') # Install a ForwardHistorian agent that forwards metrics to another VOLTTRON instance install_agent(dir=vroot+'/services/core/ForwardHistorian', id='forward_historian', config=vhome+'/my_chargepoint_forwarder.config', tag='forward_historian') # Install a Platform Agent install_agent(dir=vroot+'/services/core/VolttronCentralPlatform', id='platform.agent', config=vroot+'/services/core/VolttronCentralPlatform/config', tag='vcp') # Install a Volttron Central Agent install_agent(dir=vroot+'/services/core/VolttronCentral', id='volttron.central', config=vroot+'/services/core/VolttronCentral/config', tag='vc') # List agent status to verify that the agents were installed successfully. _print_sh('volttron-ctl status', stderr=subprocess.STDOUT)Setup: Get the Collector's forward_historian Credentials The Collector's ForwardHistorian agent needs to authenticate to the Aggregator. Authentication is facilitated by adding the agent's credentials to the Aggregator's auth.json file.Copy the PUBLICKEY from the command output below. On the Aggregator, run `volttron-ctl auth add` from the command line. When prompted for credentials, paste the key._print_sh('volttron-ctl auth publickey --tag forward_historian')Execution: Refresh Variables and Stop Agents Before starting up the agents, refresh all variables and make sure that all agents are stopped.print('Make a fresh start - refresh variable definitions, shut down any running agents, refresh the database') import datetime import json import os import pprint import sqlite3 import subprocess import sys import time # Define a "run this shell command" method, wrapping subprocess.check_output() def _sh(shell_command, shell=True, stderr=None): try: return_value = subprocess.check_output(shell_command, shell=shell, stderr=stderr) except Exception, err: print('Shell command failed: {}', shell_command) print(err) return_value = 'Error' return return_value # Same as _sh(), except that this also prints the command output, preceded by an optional label. def _print_sh(shell_command, label=None, **kwargs): print('{0}: {1}\n'.format(label+':' if label else '', _sh(shell_command, **kwargs))) # Set up local variables vhome and vroot. # The environment variables VOLTTRON_ROOT and VOLTTRON_HOME should already be defined -- see above. vroot = %env VOLTTRON_ROOT vhome = %env VOLTTRON_HOME print("VOLTTRON_ROOT={}".format(vroot)) print("VOLTTRON_HOME={}".format(vhome)) # Define a VIP_SOCKET environment variable for use while installing and running agents. socket_name = 'ipc://' + vhome + '/run/vip.socket' %env VIP_SOCKET=$socket_name # Run from the VOLTTRON root directory. os.chdir(vroot) # Shut down all agents. _sh('volttron-ctl shutdown') # List agent status to verify that the status of each agent is 0 or blank. _print_sh('volttron-ctl status', stderr=subprocess.STDOUT)Execution: Start the agentsprint('Wait for the list to be displayed, then confirm that each started agent is running...') _sh('volttron-ctl start --tag platform.driver') _sh('volttron-ctl start --tag forward_historian') _sh('volttron-ctl start --tag vcp') _sh('volttron-ctl start --tag vc') # List agent status to verify that the started agents have status "running". _print_sh('volttron-ctl status', stderr=subprocess.STDOUT)Shutdown: Stop all agents# Stop all agents. _sh('volttron-ctl shutdown') # Verify that all agents have been stopped. _print_sh('volttron-ctl status', stderr=subprocess.STDOUT)No Show Predictive Model Data GeneratorModel to predict if a patient is likely to either NO SHOW or be significantly late for a doctor's appointment. Importsimport sys import numpy as np import scipy.stats as stats import pandas as pd import cudf from cudf.dataframe import DataFrame import dask import numba from timeit import default_timer class Timer(object): """Timer class. Examples: >>> big_num = 100000 >>> t = Timer() >>> t.start() >>> for i in range(big_num): >>> r = 1 >>> t.stop() >>> print(t.interval) 0.0946876304844 >>> with Timer() as t: >>> for i in range(big_num): >>> r = 1 >>> print(t.interval) 0.0766928562442 >>> try: >>> with Timer() as t: >>> for i in range(big_num): >>> r = 1 >>> raise(Exception("Get out!")) >>> finally: >>> print(t.interval) 0.0757778924471 """ def __init__(self): self._timer = default_timer def __enter__(self): self.start() return self def __exit__(self, *args): self.stop() def start(self): """Start the timer.""" self.start = self._timer() def stop(self): """Stop the timer. Calculate the interval in seconds.""" self.end = self._timer() self.interval = self.end - self.start # Label Encoder Class import cudf import nvcategory from librmm_cffi import librmm import numpy as np def _enforce_str(y: cudf.Series) -> cudf.Series: if y.dtype != "object": return y.astype("str") return y class Base(object): def __init__(self, *args, **kwargs): self._fitted = False def check_is_fitted(self): if not self._fitted: raise TypeError("Model must first be .fit()") import cudf import nvcategory from librmm_cffi import librmm import numpy as np def _enforce_str(y: cudf.Series) -> cudf.Series: if y.dtype != "object": return y.astype("str") return y class Base(object): def __init__(self, *args, **kwargs): self._fitted = False def check_is_fitted(self): if not self._fitted: raise TypeError("Model must first be .fit()") class LabelEncoder(Base): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._cats: nvcategory.nvcategory = None self._dtype = None def fit(self, y: cudf.Series) -> "LabelEncoder": self._dtype = y.dtype y = _enforce_str(y) self._cats = nvcategory.from_strings(y.data) self._fitted = True return self def transform(self, y: cudf.Series) -> cudf.Series: self.check_is_fitted() y = _enforce_str(y) encoded = cudf.Series( nvcategory.from_strings(y.data) .set_keys(self._cats.keys()) .values() ) if -1 in encoded: raise KeyError("Attempted to encode unseen key") return encoded def fit_transform(self, y: cudf.Series) -> cudf.Series: self._dtype = y.dtype y = _enforce_str(y) self._cats = nvcategory.from_strings(y.data) self._fitted = True arr: librmm.device_array = librmm.device_array( y.data.size(), dtype=np.int32 ) self._cats.values(devptr=arr.device_ctypes_pointer.value) return cudf.Series(arr) def inverse_transform(self, y: cudf.Series): raise NotImplementedError # Given a cudf string column, returns the unique values def get_unique_strings(ds): c = nvcategory.from_strings(ds.data) return cFunction and Variable Definitions# Parmeters that can be used to control the size of this synthetic dataset num_departments = 12 num_providers_per_department = 10 num_appts_per_day = 20 num_working_days_year = 50 * 5 num_lookback_years = 3 num_samples = num_departments * num_providers_per_department * num_appts_per_day * num_working_days_year * num_lookback_years print(num_samples/1E6, 'million doctor visits') features = [ 'AGE', 'GENDER', 'INSURANCE', 'VISIT_TYPE', 'DEPT_SPECIALTY', 'DEPT_ID', 'NO_SHOW_RATE', 'LATE_START_RATE', 'APPT_WEEKDAY', 'APPT_TIME', 'DAY', 'MONTH', 'ZIPCODE', 'DISTANCE_FROM_CLINIC', 'PREDICTED_ADVERSE_WEATHER', 'ACTUAL_ADVERSE_WEATHER'] gender_dict = { 1: 'MALE', 2: 'FEMALE', 3: 'OTHER', 4: 'UNKNOWN' } genders = list(gender_dict.values()) insurance_dict = { 1: 'MEDICARE', 2: 'MEDICAID', 3: 'EMPLOYER', 4: 'PRIVATE', 5: 'OTHER' } insurances = list(insurance_dict.values()) visit_type_dict = { 1: 'Office_Visit', 2: 'MRI', 3: 'CT', 4: 'Physical_Exam', 5: 'Flu_Clinic', 6: 'OP_Procedure', 7: 'PT', 8: 'OTHER' } visit_types = list(visit_type_dict.values()) dept_specialty_dict = { 1: 'PRIMARY_CARE', 2: 'RADIOLOGY', 3: 'ONCOLOGY', 4: 'PEDIATRICS', 5: 'CARDIOLOGY', 6: 'NEUROLOGY', 7: 'URGENT_CARE', 8: 'GI', 9: 'UROLOGY', 10: 'DERMATOLOGY', 11: 'PULMONOLOGY', 12: 'ENDOCRINOLOGY', 13: 'PYSCHIATRY', 14: 'OBGYN', 15: 'ORTHO', 16: 'INTERNAL_MEDICINE', 17: 'PT', 18: 'OTHER' } dept_specialties = list(dept_specialty_dict.values()) departments = [dep for dep in range(1, num_departments+1)] weekday_dict= { 1: 'MON', 2: 'TUE', 3: 'WED', 4: 'THU', 5: 'FRI' } weekdays = list(weekday_dict.values()) appt_times = [x/100 for x in range(800, 1850, 50)] days = [d for d in range(1, 29)] months = [m for m in range(1, 13)] zipcodes = [z for z in range(90001, 96162)] # Roughly maps to CA # Top Appointment Reasons appt_reason_dict = { 1: 'Skin', 2: 'Joint', 3: 'Back', 4: 'Cholesterol', 5: 'Respiratory', 6: 'Mental_Health', 7: 'Neurologic', 8: 'BP', 9: 'Headache', 10: 'Diabetes', 11: 'Other' } appt_reasons = list(appt_reason_dict.values()) appt_reason_features = ['APPT_REASON_' + str(rsn) for rsn in appt_reasons] features += appt_reason_features # Top Health Issues health_issue_dict = { 1: 'Heart_Disease', 2: 'Cancer', 3: 'Stroke', 4: 'Respiratory_Disease', 5: 'Injuries', 6: 'Diabetes', 7: 'Alzheimers', 8: 'Pneumonia', 9: 'Kidney_Disease', 10: 'Mental_Health', 11: 'Pregnancy', 12: 'Other' } health_issues = list(health_issue_dict.values()) health_issue_features = ['HEALTH_ISSUE_' + str(iss) for iss in health_issues] features += health_issue_features def generate_features(): gdf = DataFrame() binary_choice = [1, 0] gdf['AGE'] = np.random.randint(1,100, size=(num_samples,)) gdf['GENDER'] = np.random.choice(genders, size=num_samples, p=[0.45, 0.45, 0.05, 0.05]) gdf['INSURANCE'] = np.random.choice(insurances, size=num_samples, p=[0.15, 0.15, 0.50, 0.15, 0.05]) gdf['VISIT_TYPE'] = np.random.choice(visit_types, size=num_samples, p=[0.45, 0.05, 0.05, 0.05, 0.2, 0.1, 0.05, 0.05]) gdf['DEPT_SPECIALTY'] = np.random.choice(dept_specialties, size=num_samples) gdf['DEPT_ID'] = np.random.choice(departments, size=num_samples) gdf['APPT_WEEKDAY'] = np.random.choice(weekdays, size=num_samples) gdf['APPT_TIME'] = np.random.choice(appt_times, size=num_samples) gdf['DAY'] = np.random.choice(days, size=num_samples) gdf['MONTH'] = np.random.choice(months, size=num_samples) # Created a truncated normal distribution for distance from clinic lower, upper = 0, 20 mu, sigma = 3, 3 X = stats.truncnorm((lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma) gdf['DISTANCE_FROM_CLINIC'] = X.rvs(num_samples) gdf['DISTANCE_FROM_CLINIC'] = gdf['DISTANCE_FROM_CLINIC'].astype('int8') gdf['PREDICTED_ADVERSE_WEATHER'] = np.random.choice(binary_choice, size=num_samples, p=[0.1, 0.9]) gdf['ACTUAL_ADVERSE_WEATHER'] = np.random.choice(binary_choice, size=num_samples, p=[0.1, 0.9]) # Generate some socio-economic features in a separate zipcode dataframe gdf['ZIPCODE'] = np.random.choice(zipcodes, size=num_samples) zipcode_gdf = DataFrame() zipcode_gdf['ZIPCODE'] = zipcodes zipcode_gdf['ACCESS_TO_TRANSPORTATION'] = np.random.choice(binary_choice, size=len(zipcodes), p=[0.9, 0.1]) # Special handling for categorical data that's multiple response gdf['APPT_REASON_Skin'] = np.random.choice(binary_choice, size=num_samples, p=[0.43, 0.57]) gdf['APPT_REASON_Joint'] = np.random.choice(binary_choice, size=num_samples, p=[0.34, 0.66]) gdf['APPT_REASON_Back'] = np.random.choice(binary_choice, size=num_samples, p=[0.24, 0.76]) gdf['APPT_REASON_Cholesterol'] = np.random.choice(binary_choice, size=num_samples, p=[0.22, 0.78]) gdf['APPT_REASON_Respiratory'] = np.random.choice(binary_choice, size=num_samples, p=[0.22, 0.78]) gdf['APPT_REASON_Mental_Health'] = np.random.choice(binary_choice, size=num_samples, p=[0.1, 0.9]) gdf['APPT_REASON_Neurologic'] = np.random.choice(binary_choice, size=num_samples, p=[0.1, 0.9]) gdf['APPT_REASON_BP'] = np.random.choice(binary_choice, size=num_samples, p=[0.1, 0.9]) gdf['APPT_REASON_Headache'] = np.random.choice(binary_choice, size=num_samples, p=[0.1, 0.9]) gdf['APPT_REASON_Diabetes'] = np.random.choice(binary_choice, size=num_samples, p=[0.1, 0.9]) gdf['APPT_REASON_Other'] = np.random.choice(binary_choice, size=num_samples, p=[0.3, 0.7]) gdf['HEALTH_ISSUE_Heart_Disease'] = np.random.choice(binary_choice, size=num_samples, p=[0.2, 0.8]) gdf['HEALTH_ISSUE_Cancer'] = np.random.choice(binary_choice, size=num_samples, p=[0.1, 0.9]) gdf['HEALTH_ISSUE_Stroke'] = np.random.choice(binary_choice, size=num_samples, p=[0.05, 0.95]) gdf['HEALTH_ISSUE_Respiratory_Disease'] = np.random.choice(binary_choice, size=num_samples, p=[0.1, 0.9]) gdf['HEALTH_ISSUE_Injuries'] = np.random.choice(binary_choice, size=num_samples, p=[0.2, 0.8]) gdf['HEALTH_ISSUE_Diabetes'] = np.random.choice(binary_choice, size=num_samples, p=[0.2, 0.8]) gdf['HEALTH_ISSUE_Alzheimers'] = np.random.choice(binary_choice, size=num_samples, p=[0.1, 0.9]) gdf['HEALTH_ISSUE_Pneumonia'] = np.random.choice(binary_choice, size=num_samples, p=[0.1, 0.9]) gdf['HEALTH_ISSUE_Kidney_Disease'] = np.random.choice(binary_choice, size=num_samples, p=[0.05, 0.95]) gdf['HEALTH_ISSUE_Mental_Health'] = np.random.choice(binary_choice, size=num_samples, p=[0.2, 0.8]) gdf['HEALTH_ISSUE_Other'] = np.random.choice(binary_choice, size=num_samples, p=[0.3, 0.7]) # Create a Pregnancy feature using Gender and a pregnancy probability # Since we don't support string values in lambda functions, label encode gender first le = LabelEncoder() gdf['GENDER_CODE'] = le.fit_transform(gdf['GENDER']) # Double the probability of what I actually want, since I'll be splitting it by half between male & female. gdf['PREG_PROBABILITY'] = np.random.choice([1, 0], size=num_samples, p=[0.2, 0.8]) def preg_kernel(GENDER_CODE, PREG_PROBABILITY, HEALTH_ISSUE_Pregnancy, kwarg1): for i, (gender_code, preg_probability) in enumerate(zip(GENDER_CODE, PREG_PROBABILITY)): preg_val = 0 if (gender_code == 2): preg_val = preg_probability HEALTH_ISSUE_Pregnancy[i] = preg_val gdf = gdf.apply_rows(preg_kernel, incols=['GENDER_CODE', 'PREG_PROBABILITY'], outcols=dict(HEALTH_ISSUE_Pregnancy=np.int), kwargs=dict(kwarg1=1) ) gdf.drop_column('PREG_PROBABILITY') gdf['NO_SHOW_RATE'] = np.random.choice([0, 0.2, 0.4, 0.6, 0.8, 1.0 ], size=num_samples, p=[0.5, 0.2, 0.15, 0.1, 0.05, 0]) gdf['LATE_START_RATE'] = np.random.choice([0, 0.2, 0.4, 0.6, 0.8, 1.0 ], size=num_samples, p=[0.2, 0.4, 0.2, 0.1, 0.05, 0.05]) # Create a column for noise gdf['NOISE'] = np.random.ranf(size=num_samples) return gdf, zipcode_gdf # Generate labels using cuDF def generate_labels(AGE, GENDER_CODE, NO_SHOW_RATE, LATE_START_RATE, \ DISTANCE_FROM_CLINIC, ACCESS_TO_TRANSPORTATION, DAY, MONTH, \ HEALTH_ISSUE_Mental_Health, DEPT_ID, NOISE,\ LABEL, kwarg1): for i, (age, gender_code, no_show_rate, late_start_rate, \ distance_from_clinic, access_to_transportation, day, month, \ health_issue_mental_health, dept_id, noise) \ in enumerate(zip(AGE, GENDER_CODE, NO_SHOW_RATE, LATE_START_RATE, \ DISTANCE_FROM_CLINIC, ACCESS_TO_TRANSPORTATION, DAY, MONTH, \ HEALTH_ISSUE_Mental_Health, DEPT_ID, NOISE)): prob = 0 if (age>=18) and (age<30) and (gender_code==1): prob += 0.1 if (age>=30) and (age<40) and (distance_from_clinic>=3) and (distance_from_clinic<6): prob += 0.1 if access_to_transportation == 0: prob += 0.1 if no_show_rate > 0.2 or late_start_rate > 0.6: prob += 0.1 if health_issue_mental_health == 1: prob += 0.1 if (dept_id==2) or (dept_id==3) or (dept_id==5) or (dept_id==7): prob += 0.1 holiday_week = 0 if (month==5 and day>24) \ or (month==7 and day<8) \ or (month==9 and day<8) \ or (month==12 and day>21) \ or (month==1 and day<3): \ holiday_week = 1 if (holiday_week==1): prob += 0.2 # Add some noise prob = prob + (0.33 * noise) if prob > 0.5: prob = 1 else: prob = 0 LABEL[i] = probGenerate Features and Labelsgdf, zipcode_gdf = generate_features() gdf = gdf.merge(zipcode_gdf, how="left", on=['ZIPCODE']) gdf = gdf.apply_rows(generate_labels, incols=['AGE', 'GENDER_CODE', 'NO_SHOW_RATE', 'LATE_START_RATE', 'DISTANCE_FROM_CLINIC', 'ACCESS_TO_TRANSPORTATION', 'DAY', 'MONTH', \ 'HEALTH_ISSUE_Mental_Health', 'DEPT_ID', 'NOISE'], outcols=dict(LABEL=np.int), kwargs=dict(kwarg1=1) ) # Remove column so that you can save it with just zipcode info gdf.drop_column('ACCESS_TO_TRANSPORTATION') # Take out the encoded column and just leave the string one gdf.drop_column('GENDER_CODE') # Remove noise column gdf.drop_column('NOISE') print("Samples: {:.1f} million".format(len(gdf)/1E6)) print("Features + Label:", len(gdf.columns.tolist())) print("Dataset size: {:.1f} GB".format(sys.getsizeof(gdf)/1E9)) print(gdf.head())AGE GENDER INSURANCE VISIT_TYPE DEPT_SPECIALTY DEPT_ID APPT_WEEKDAY ... LABEL 0 24 MALE EMPLOYER Office_Visit DERMATOLOGY 4 MON ... 1 1 34 FEMALE PRIVATE Office_Visit PULMONOLOGY 5 WED ... 1 2 53 MALE EMPLOYER Office_Visit PT 7 THU ... 0 3 51 MALE EMPLOYER OTHER OTHER 7 FRI ... 0 4 88 MALE MEDICARE CT OBGYN 7 TUE ... 0 [32 more columns]Save Dataframes to CSV# Convert to pandas and save it off for reuse pdf = gdf.to_pandas() zipcode_pdf = zipcode_gdf.to_pandas() # Add a few nulls pdf['INSURANCE'] = pdf['INSURANCE'].replace('OTHER', np.nan) pdf['INSURANCE'].head(10) path='patient_data.csv' pdf.to_csv(path, index=False) path='zipcode_data.csv' zipcode_pdf.to_csv(path, index=False)Dual Doppler lobe plotterDD lobe plotter. Needs py-ART Grid file since DD lobes are calculated in radar relative coordinates.Based on code created by , , , and myselfimport pyart import gzip from matplotlib import pyplot as plt from matplotlib import rcParams from scipy import ndimage import shutil, os from datetime import timedelta, datetime import numpy as np import tempfile import glob import re from copy import deepcopy from IPython.display import Image, display import math %matplotlib inline import pyproj import cartopy.crs as ccrs import cartopy.feature as cfeature from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter # Input the range of dates and time wanted for the collection of images start_year = 2005 start_day = 9 start_month = 12 start_hour = 21 start_minute = 1 start_second = 0 end_year = 2005 end_month = 12 end_day = 9 end_hour = 23 end_minute = 50 end_second = 0 data_path = '/home/rjackson/multidop_grids/'This looks for all of the available timeperiods in the data_path directory and pulls outthe file names that match the given time periods above.# get_radar_times # start_year = Start year of animation # start_month = Start month of animation # start_day = Start day of animation # start_hour = Start hour of animation # end_year = End year of animation # end_month = End month of animation # end_day = End day of animation # end_minute = End minute of animation # minute_interval = Interval in minutes between scans (default is 5) # This procedure acquires an array of Radar classes between start_time and end_time def get_dda_times(start_year, start_month, start_day, start_hour, start_minute, end_year, end_month, end_day, end_hour, end_minute, minute_interval=5): start_time = datetime(start_year, start_month, start_day, start_hour, start_minute, ) end_time = datetime(end_year, end_month, end_day, end_hour, end_minute, ) deltatime = end_time - start_time if(deltatime.seconds > 0 or deltatime.minute > 0): no_days = deltatime.days + 1 else: no_days = deltatime.days if(start_day != end_day): no_days = no_days + 1 days = np.arange(0, no_days, 1) print('We are about to load grid files for ' + str(no_days) + ' days') # Find the list of files for each day cur_time = start_time file_list = [] time_list = [] for i in days: year_str = "%04d" % cur_time.year day_str = "%02d" % cur_time.day month_str = "%02d" % cur_time.month format_str = (data_path + 'cf_compliant_grid' + year_str + month_str + day_str + '*.nc') print('Looking for files with format ' + format_str) data_list = glob.glob(format_str) for j in range(0, len(data_list)): file_list.append(data_list[j]) cur_time = cur_time + timedelta(days=1) # Parse all of the dates and time in the interval and add them to the time list past_time = [] for file_name in file_list: date_str = file_name[-15:-3] year_str = date_str[0:4] month_str = date_str[4:6] day_str = date_str[6:8] hour_str = date_str[8:10] minute_str = date_str[10:12] second_str = '00' cur_time = datetime(int(year_str), int(month_str), int(day_str), int(hour_str), int(minute_str), 0) time_list.append(cur_time) # Sort time list and make sure time are at least xx min apart time_list.sort() time_list_sorted = deepcopy(time_list) time_list_final = [] past_time = [] for times in time_list_sorted: cur_time = times if(past_time == []): past_time = cur_time if(cur_time - past_time >= timedelta(minutes=minute_interval) and cur_time >= start_time and cur_time <= end_time): time_list_final.append(cur_time) past_time = cur_time return time_list_final # Get a Radar object given a time period in the CPOL dataset def get_grid_from_dda(time): year_str = "%04d" % time.year month_str = "%02d" % time.month day_str = "%02d" % time.day hour_str = "%02d" % time.hour minute_str = "%02d" % time.minute second_str = "%02d" % time.second file_name_str = (data_path + 'cf_compliant_grid' + year_str + month_str + day_str + hour_str + minute_str + '.nc') radar = pyart.io.read_grid(file_name_str) return radar # Plot the radars from given time. times = get_dda_times(start_year, start_month, start_day, start_hour, start_minute, end_year, end_month, end_day, end_hour, end_minute, minute_interval=0) def dms_to_decimal(degrees, minutes, seconds): if(degrees > 0): return degrees+minutes/60+seconds/3600 else: return degrees-minutes/60-seconds/3600 def get_bca(grid): berr_origin = [-12960.1,-23091.1] x,y = np.meshgrid(grid.x['data'], grid.y['data']) a = np.sqrt(np.multiply(x,x)+np.multiply(y,y)) b = np.sqrt(pow(x-berr_origin[0],2)+pow(y-berr_origin[1],2)) c = np.sqrt(berr_origin[0]*berr_origin[0]+berr_origin[1]*berr_origin[1]) theta_1 = np.arccos(x/a) theta_2 = np.arccos((x-berr_origin[1])/b) return np.arccos((a*a+b*b-c*c)/(2*a*b)) # Gets beam crossing angle over 2D grid centered over Radar 1. # grid_x, grid_y are cartesian coordinates from pyproj.Proj (or basemap) def get_bca(rad1_lon, rad1_lat, rad2_lon, rad2_lat, grid_lon, grid_lat): # Beam crossing angle needs cartesian coordinates p = ccrs.PlateCarree() p = p.as_geocentric() rad1 = p.transform_points(ccrs.PlateCarree().as_geodetic(), np.array(rad1_lon), np.array(rad1_lat)) rad2 = p.transform_points(ccrs.PlateCarree().as_geodetic(), np.array(rad2_lon), np.array(rad2_lat)) grid_lon, grid_lat = np.meshgrid(grid_lon, grid_lat) grid = p.transform_points(ccrs.PlateCarree().as_geodetic(), grid_lon, grid_lat, np.zeros(grid_lon.shape)) # Create grid with Radar 1 in center x = grid[:,:,0]-rad1[0,0] y = grid[:,:,1]-rad1[0,1] rad2 = rad2 - rad1 a = np.sqrt(np.multiply(x,x)+np.multiply(y,y)) b = np.sqrt(pow(x-rad2[0,0],2)+pow(y-rad2[0,1],2)) c = np.sqrt(rad2[0,0]*rad2[0,0]+rad2[0,1]*rad2[0,1]) theta_1 = np.arccos(x/a) theta_2 = np.arccos((x-rad2[0,1])/b) return np.arccos((a*a+b*b-c*c)/(2*a*b))Grid plotting codeThis code creates plots from all of the Grids developed by multidop This loads the Grid files and creates the animation# Reflectivity plot callback functions def plot_dd_lobe(frame_number): plt.clf() pyart_grid = get_grid_from_dda(times[frame_number]) level = 6 CPOLGridDisplay = pyart.graph.GridMapDisplay(pyart_grid) CPOLGridDisplay.plot_basemap(min_lat=-12.8, max_lat=-12.0, min_lon=130.4, max_lon=131.6, auto_range=False, lat_lines=[-12.8, -12.6, -12.4, -12.2, -12.0], lon_lines=[130.4, 130.6, 130.8, 131.0, 131.2, 131.4, 131.6], resolution='h') CPOLGridDisplay.basemap.drawmapscale(131.2, -12.1, 130.6, -12.6, 20) CPOLGridDisplay.basemap.fillcontinents() lons, lats = pyart_grid.get_point_longitude_latitude(level=level) # Plot every second arrow lons = lons[0::2,0::2] lats = lats[0::2,0::2] bca = get_bca(pyart_grid) bca = bca[0::2, 0::2] CSa = CPOLGridDisplay.basemap.contour(lons, lats, bca, latlon='True', levels=[math.pi/6, 5*math.pi/6], linewidths=2) cpol_latitude = -12.249166 cpol_longitude = 131.04445 berr_latitude = -12.456944 berr_longitude = 130.925 berr_x, berr_y = CPOLGridDisplay.basemap(berr_longitude, berr_latitude) plt.annotate('Berrima', xy=(berr_x, berr_y), xycoords='data', fontweight='bold', fontsize=20) cpol_x, cpol_y = CPOLGridDisplay.basemap(cpol_longitude, cpol_latitude) plt.annotate('CPOL', xy=(cpol_x, cpol_y), xycoords='data', fontweight='bold', fontsize=20) def scale_bar(ax, length, location=(0.5, 0.05), linewidth=3): """ ax is the axes to draw the scalebar on. location is center of the scalebar in axis coordinates ie. 0.5 is the middle of the plot length is the length of the scalebar in km. linewidth is the thickness of the scalebar. """ #Projection in metres, need to change this to suit your own figure utm = ccrs.UTM(18) #Get the extent of the plotted area in coordinates in metres x0, x1, y0, y1 = ax.get_extent(utm) #Turn the specified scalebar location into coordinates in metres sbcx, sbcy = x0 + (x1 - x0) * location[0], y0 + (y1 - y0) * location[1] #Generate the x coordinate for the ends of the scalebar bar_xs = [sbcx - length * 500, sbcx + length * 500] #Plot the scalebar ax.plot(bar_xs, [sbcy, sbcy], transform=utm, color='k', linewidth=linewidth) #Plot the scalebar label ax.text(sbcx, sbcy, str(length) + ' km', transform=utm, horizontalalignment='center', verticalalignment='bottom') def plot_dd_lobes(radar1_loc, radar2_loc, radar1_name, radar2_name): ax = plt.axes(projection=ccrs.PlateCarree()) # Amf locations grid_lon = np.arange(radar1_loc[0]-1.5, radar1_loc[0]+1.5, 0.01) grid_lat = np.arange(radar1_loc[1]-1.5, radar1_loc[1]+1.5, 0.01) i5 = [dms_to_decimal(-97, 35, 37.68), dms_to_decimal(36, 29, 29.4)] i4 = [dms_to_decimal(-91, 21, 49.32), dms_to_decimal(36, 34, 44.4)] bca = get_bca(radar1_loc[0], radar1_loc[1], radar2_loc[0], radar2_loc[1], grid_lon, grid_lat) lon_gridded, lat_gridded = np.meshgrid(grid_lon, grid_lat) # Create a feature for States/Admin 1 regions at 1:50m from Natural Earth states_provinces = cfeature.NaturalEarthFeature( category='cultural', name='admin_1_states_provinces_lines', scale='50m', facecolor='none') SOURCE = 'Natural Earth' LICENSE = 'public domain' ax.add_feature(cfeature.LAND) ax.add_feature(cfeature.COASTLINE) ax.add_feature(states_provinces, edgecolor='gray') ax.set_xticks(grid_lon[::int(len(grid_lon)/5)], crs=ccrs.PlateCarree()) ax.set_yticks(grid_lat[::int(len(grid_lon)/5)], crs=ccrs.PlateCarree()) lon_formatter = LongitudeFormatter(zero_direction_label=True) lat_formatter = LatitudeFormatter() ax.xaxis.set_major_formatter(lon_formatter) ax.yaxis.set_major_formatter(lat_formatter) plt.contour(lon_gridded, lat_gridded, bca, levels=[math.pi/6, 5*math.pi/6], linewidths=2, transform=ccrs.PlateCarree()) plt.annotate('ENX', xy=(ENX[0]+0.02, ENX[1]+0.01), fontweight='bold', fontsize=8, transform=ccrs.PlateCarree()) plt.annotate('KIND', xy=(KIND[0]+0.02, KIND[1]+0.01), fontweight='bold', fontsize=8, transform=ccrs.PlateCarree()) plt.annotate('Albany', xy=[Albany_airport[0]+0.02, Albany_airport[1]+0.01], fontweight='bold', fontsize=8, transform=ccrs.PlateCarree()) plt.annotate('Delmar', xy=[Delmar[0]+0.02, Delmar[1]+0.01], fontweight='bold', fontsize=8, transform=ccrs.PlateCarree()) plt.annotate('VOOR', xy=[VOOR[0]+0.02, VOOR[1]+0.01], fontweight='bold', fontsize=8, transform=ccrs.PlateCarree()) plt.annotate('MEDU', xy=[MEDU[0]+0.02, MEDU[1]+0.01], fontweight='bold', fontsize=8, transform=ccrs.PlateCarree()) plt.plot(ENX[0], ENX[1], marker='d', linewidth=1, color='k') plt.plot(KIND[0], KIND[1], marker='d', linewidth=1, color='k') plt.plot(Albany_airport[0], Albany_airport[1], linewidth=1, marker='d', color='k') plt.plot(Delmar[0], Delmar[1], marker='d', linewidth=1, color='k') plt.plot(VOOR[0], VOOR[1], marker='d', linewidth=1, color='k') plt.plot(MEDU[0], MEDU[1], marker='d', linewidth=1, color='k') scale_bar(ax, 20, location=(0.1, 0.9),) ax.coastlines(resolution='10m') ax.stock_img() plt.xlim((grid_lon[0], grid_lon[-1])) plt.ylim((grid_lat[0], grid_lat[-1]))Plot DD lobes for XSAPR i4 and i5ENX = [dms_to_decimal(-74.0, 3.0, 50.0), dms_to_decimal(42.0, 35.0, 11.0)] KIND = [dms_to_decimal(-73.0, 42.0, 56.0), dms_to_decimal(42.0, 24.0, 36.0)] # Amf locations i5 = [dms_to_decimal(-97, 35, 37.68), dms_to_decimal(36, 29, 29.4)] i4 = [dms_to_decimal(-91, 21, 49.32), dms_to_decimal(36, 34, 44.4)] plt.figure(figsize=(8,10)) plot_dd_lobes(i4, i5, 'i4', 'i5') plt.title('KIND') radar_file = '/home/rjackson/xsapr/raw_files/sgpxsaprI5.00.20170801.001102.mnt.XSW170731231005.RAWG2TS.maint' radar = pyart.io.read(radar_file) print(radar.fields.keys()) max_lat=37.0 min_lat=36.0 max_lon=-97.0 min_lon=-98.3 sweep=3 i5 = [dms_to_decimal(-97, 35, 37.68), dms_to_decimal(36, 29, 29.4)] i4 = [dms_to_decimal(-97, 21, 49.32), dms_to_decimal(36, 34, 44.4)] grid_lat = np.arange(min_lat, max_lat, 0.01) grid_lon = np.arange(min_lon, max_lon, 0.01) bca = get_bca(i4[0], i4[1], i5[0], i5[1], grid_lon, grid_lat) grid_lon, grid_lat = np.meshgrid(grid_lon, grid_lat) display = pyart.graph.RadarMapDisplayCartopy(radar) fig = plt.figure(figsize=[10, 8]) display.plot_ppi_map('reflectivity', sweep=sweep, vmin=-8, vmax=64, min_lon=min_lon, max_lon=max_lon, min_lat=min_lat, max_lat=max_lat, resolution='50m', cmap=pyart.graph.cm.NWSRef, lat_lines=np.arange(min_lat, max_lat+0.2, 0.2), lon_lines=np.arange(min_lon, max_lon+0.2, 0.2), projection=ccrs.PlateCarree()) plt.contour(grid_lon, grid_lat, bca, levels=[np.pi/6, 5*np.pi/6], linewidths=2, colors='k')dict_keys(['spectrum_width', 'velocity', 'specific_differential_phase', 'normalized_coherent_power', 'differential_phase', 'reflectivity', 'total_power', 'cross_correlation_ratio', 'differential_reflectivity'])Custom Scatter%matplotlib inline import plotly.offline as pyo import plotly.graph_objects as go import matplotlib.pyplot as plt import pandas as pd import numpy as np plt.style.use('ggplot') bitcoin = pd.read_csv('data/bitcoin-usd.csv', parse_dates=['date'], index_col='date') bitcoin.head() data = [ go.Scatter( x = bitcoin.index, y = bitcoin[col], name=col, mode='markers', marker=dict( size=15, color='rgb(88,188,255)', symbol = 100, line=dict( width=1 ) ) ) for col in bitcoin.columns if col == 'close' ] layout = go.Layout( title='Close', xaxis=dict(title='Date'), yaxis=dict(title='Close') ) fig = go.Figure(data=data, layout=layout) pyo.plot(fig)From the M/EEG reading on MNE-pythonimport mne$k$-Nearest NeighborsIn this notebook we'll introduce our first classification algorithm, $k$-nearest neighbors. What You'll Accomplish- We'll see our first classification example,- You'll work through a simple algorithm, $k$-nearest neighbors,- We'll introduce our first classification performance measure, accuracy.Let's go!## Load the packages we'll need ## to get the iris data from sklearn.datasets import load_iris ## for data handling import pandas as pd import numpy as np ## for plotting import matplotlib.pyplot as plt import seaborn as sns sns.set_style("whitegrid") ## Load the data iris = load_iris() iris_df = pd.DataFrame(iris['data'],columns = ['sepal_length','sepal_width','petal_length','petal_width']) iris_df['iris_class'] = iris['target'] ## This chunk of code is going to plot the data sns.lmplot(data = iris_df, x = 'sepal_length', y = 'sepal_width',hue = 'iris_class',fit_reg=False, height = 6.5,legend=False) plt.legend(title='Iris Class', loc='upper left', labels=['setosa', 'versicolor', 'virginica'], fontsize = 12) plt.xlabel("Sepal Length",fontsize = 16) plt.ylabel("Sepal Width",fontsize = 16) plt.show()Since our iris data has three distinct classes, we can imagine a world in which we'd want to build a model that takes in `petal_width`, `petal_length`, `sepal_width`, and `sepal_length` then predicts what kind of iris we have.People build entire business models around classification problems. For example, CoverMyMeds started by solving the problem "How can I predict whether or not my prescription will need a prior authorization form?". Upstart tries to predict whether or not someone will be a good candidate for their loans.These problems are ubiquitous in our every day lives. Now lets start learning how we can use supervised learning techniques to solve them. A Simple AlgorithmWe'll now work through a classification problem with one of the simplest classification algorithms, $k$-nearest neighbors. KNN$k$-nearest neighbors is quite straightforward. When you want to classify an unlabeled point, you find the $k$ closest other data points in the data space. Whichever class is most present among the $k$ neighbors is what the algorithm classifies the unlabeled point as. In the case of ties, the class is randomly assigned from the tied classes.Let's look at a picture to see what we mean.## Here is some random data ## to illustrate the knn concept np.random.seed(440) xs = np.random.randn(50,2) os = np.random.randn(50,2)-np.array([3,0]) unlabeled = [-1.2,0] ## We now plot that data plt.figure(figsize = (12,9)) plt.plot(xs[:,0],xs[:,1],'rx',label = "X",markersize=8) plt.plot(os[:,0],os[:,1],'bo',label = "O",markersize=8) plt.plot(unlabeled[0],unlabeled[1],'gv',label = "Unknown",markersize=12) plt.xlabel("Feature 1", fontsize = 16) plt.ylabel("Feature 2", fontsize = 16) plt.legend(fontsize = 14) plt.show()Looking at the plot above, how would would knn classify the unlabeled point for $k=1$, $k=5$, $k=10$? Stratified Train Test SplitsBefore returning to our iris data set let's take a brief aside on training test splits for classification problems. Consider the following phony data.X = np.random.randn(10,4) y = np.zeros(10) y[2] = 1 y[7] = 1Now let's perform a train test split like we did in our regression notebooks.# import train_test_split from sklearn.model_selection import train_test_split # First input the features, then the target # specify what fraction of your data you want to test # then set a random_state X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=111) print(y_train) print(y_test)[1. 0. 0. 1.]Notice anything?This can be avoided by performing a stratified train-test split.X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=111, stratify=y) print(y_train) print(y_test)[1 1 0 1 2 0 2 2 0 2 1 1 2 2 0 0 0 1 0 1 2 2 1 0 1 1 2 2 0 2 0 1 2 1 2 1 0 0]Stratified splits first separate the data according to class. Each class is then randomly separated in two. Let's illustrate this with a picture.Now our particular data set above was an extreme example, however this sort of thing can be an issue in a number of classification problems of interest where the desired class is a rare occurence. One such example would be detecting cases of credit card fraud. Building an iris classifierWe'll now demonstrate the flow of a classification problem, by using knn to build an iris classifier.## We can turn the data in numpy arrays (for ease of data manipulation) ## like so X = iris_df[['sepal_length','sepal_width','petal_length','petal_width']].to_numpy() y = iris_df['iris_class'].to_numpy() ## Let's try coding along! ## You write code for the train test split here ## I'll wait for a minute before typing the answer X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state = 614, shuffle=True, stratify=y)Fitting the ModelNow we can fit the model to our train data, let's use $k=3$.## Just like with regression we'll import from ## sklearn ## import NearestNeighbors from sklearn.neighbors import KNeighborsClassifierThe docs for `KNeighborsClassifier` https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.htmlsklearn.neighbors.KNeighborsClassifier.## Make the model knn = KNeighborsClassifier(n_neighbors = 3) ## Fit the model knn.fit(X_train,y_train)Performance Measures - Accuracy One way to measure how well our model did is to calculate its accuracy. Accuracy is the number of correct predictions divided by the number of total predictions we made. Let's see how well our model does, on our training set.## Make a prediction on our train set y_predict = knn.predict(X_train) ## We calculate the accuracy here ## sum a list of booleans and True gets cast as 1 ## False gets cast as 0 print("Our model has a ", np.round(sum(y_predict == y_train)/len(y_train)*100,2), "% accuracy on the training set")Our model has a 94.64 % accuracy on the training setThat's not too bad!What could we do to the $k$-nearest neighbors model to change our accuracy? Also, is there anything else we could do to get a better idea of the generalization error? Cross Validation for Model AssessmentJust as we did for some of our regression models we can assess multiple models at once and compare the average accuracies of them all to choose the best model. You CodeImplement CV with $5$ splits. Set a random state so you could recreate your split. Going from $1$ to $20$ neighbors find the model that has the best CV accuracy.I'll get you set up with the cross validation using a new python function `StratifiedKFold`.Remember finish as much as you can in our allotted time.## import StratifiedKFold ## THIS IS NEW!!! ## This allows you to do stratified k fold cross validation from sklearn.model_selection import StratifiedKFold from sklearn.base import clone ## Make a kfold model object, with a random_state ## Note that we use StratifiedKFold just like KFold kfold = StratifiedKFold(5,shuffle = True,random_state = 440) ## You'll use this function for calculating the accuracy ## Just input the model, the input data and the target data def get_acc(model,X,y): pred = model.predict(X) return np.sum(pred == y)/len(y) ## You run 5 fold cross validation here ## Make an empty array to hold your cv accuracies here ## Loop through all the possible neighbors from 1 to max_neighbors max_neighbors = 20 ## Perform the cross validation loop here # Plot how the accuracy changes plt.figure(figsize=(10,8)) ## Plot the number of neighbors on the x ## plot the avg cross validation accuracy on the y plt.plot() ## Use these as your axes labels plt.xlabel("Number of Neighbors", fontsize=16) plt.ylabel("Average CV Accuracy (%)", fontsize=16) plt.show()From the above plot it looks like the "best" model here is going to be which one? Let's go ahead and calculate accuracy on our test set using this best model. Do that below.# Write your code hereHigher order eigenfunctions# Testing initial condition function L1 = 100 L2 = 100 ds = 1 s1 = s2 = np.arange(0, 100, ds) gamma = 1. beta = 1. m = 2 n = 3 Y, X = np.meshgrid(s1, s2) Z = CmnNeumann(0, X, Y, L1, L2, gamma, beta, m, n).T h = plt.contourf(X,Y,Z) plt.ylabel('First MT head position \n $s_1$ (nm)') plt.xlabel('Second MT head position \n $s_2$ (nm)') plt.colorbar(h) plt.show() # Testing solver initialization pde_solver_mn = FPPassiveParaCNSolver('tests/CN_tests/para_tests/CNnm_tests/CN23/FP_passive_params.yaml') grid = CmnNeumannInit(pde_solver_mn, m,n) print(grid.todense()) s1 = np.asarray(pde_solver_mn.s1) #print(s1) s2 = np.asarray(pde_solver_mn.s2) cs = plt.contourf(s2, s1, grid.todense()) plt.ylabel('First MT head position \n $s_1$ (nm)') plt.xlabel('Second MT head position \n $s_2$ (nm)') plt.colorbar(cs) plt.show() pde_solver_mn.Save() # Read in h5py file h5_data = h5py.File('tests/CN_tests/para_tests/CNnm_tests/CN23/FP_pass_para_CN.h5', 'r') L1 = h5_data.attrs['L1'] L2 = h5_data.attrs['L2'] gamma = h5_data.attrs['gamma'] beta = h5_data.attrs['beta'] dt = h5_data.attrs['dt'] # Get time points time = np.asarray(h5_data['time']) nt = np.asarray(time.size) err_arr = np.zeros(nt-1) rel_err_arr = np.zeros(nt-1) # Get solution and space points xl_dist = np.asarray(h5_data['XL_data/XL_distr']) s1 = np.asarray(h5_data['MT_data/s1']) s2 = np.asarray(h5_data['MT_data/s2']) # Loop over time and get error for each step Y, X = np.meshgrid(s1, s2) for i in range(nt-1): Z = CmnNeumann(time[i], X, Y, L1, L2, gamma, beta,m,n) err = xl_dist[:,:,i] - Z #err_arr[i] = dt*np.sum(err) err_arr[i] = np.sqrt(dt*np.sum(np.square(err))) time_point = 10 err_grid = xl_dist[:,:,time_point] - CmnNeumann(time[time_point], X, Y, L1, L2, gamma, beta,m,n) #print(err_grid) #print(np.sqrt(dt*np.sum(np.square(err_grid)))) print("Point in time: ", time_point) print("Summed error at time: ", np.sqrt(dt*np.sum(np.square(err_grid)))) fig, axarr = plt.subplots(2,2, figsize=(14,10)) cs00 =axarr[0,0].contourf(X,Y,xl_dist[:,:,time_point]) cs01 = axarr[0,1].contourf(X,Y,CmnNeumann(time[time_point], X, Y, L1, L2, gamma, beta, m ,n)) cs10 = axarr[1,0].contourf(X,Y,err_grid) fig.colorbar(cs00, ax=axarr[0,0]) fig.colorbar(cs01, ax=axarr[0,1]) fig.colorbar(cs10, ax=axarr[1,0]) axarr[1,1].scatter(time[:-1], err_arr) plt.tight_layout() # Plot number of crosslinks over time xl_num_arr = np.zeros(nt) n_tot = np.sum(xl_dist[:,:,0]) print("Total crosslink number: ", n_tot) for i in range(nt): xl_num_arr[i] = (np.sum(xl_dist[:,:,i])/n_tot)-1 plt.plot(time, xl_num_arr) plt.ticklabel_format(axis='y', style='sci', scilimits=(-2,2)) plt.ylabel("Relative crosslink \n number change") plt.xlabel("Time") plt.tight_layout() # Get the flux at the edge of tubules (Minus and plus ends of both microtubules) # This is a first order accurate calculation whereas the solver should be second order accurate deriv_ds = np.asarray([[-1./ds,1./ds]]) print(deriv_ds.shape) ## S1 minus end flux # Rearrange matrix slice to so you can perform matrix operations accurately S1m_flux = np.swapaxes(xl_dist[0:2,:, :],0,2) S1m_flux = np.swapaxes(S1m_flux,1,2) # Take first order derivative S1m_flux = np.squeeze(np.matmul(deriv_ds, S1m_flux)) # First index is time, second index is other tubule location ## S1 Plus end flux S1p_flux = np.swapaxes(xl_dist[-2:,:, :],0,2) S1p_flux = np.swapaxes(S1p_flux,1,2) S1p_flux = np.squeeze(np.matmul(deriv_ds, S1p_flux)) ## S2 minus end flux S2m_flux = np.swapaxes(xl_dist[:,0:2, :],0,2) S2m_flux = np.swapaxes(S2m_flux,1,2) S2m_flux = np.squeeze(np.matmul(S2m_flux, deriv_ds.T)) ## S2 Plus end flux S2p_flux = np.swapaxes(xl_dist[:,-2:, :],0,2) S2p_flux = np.swapaxes(S2p_flux,1,2) S2p_flux = np.squeeze(np.matmul(S2p_flux, deriv_ds.T)) # Graphing fig, axarr = plt.subplots(2,2, figsize=(14,10)) csm = axarr[0,0].contourf(time, s2, S1m_flux[:-1,:].T) fig.colorbar(csm, ax=axarr[0,0]) csp = axarr[0,1].contourf(time, s2, S1p_flux[:-1,:].T) fig.colorbar(csp, ax=axarr[0,1]) axarr[1,0].plot(time, np.sum(S1m_flux[:-1,:],axis=1)) axarr[1,1].plot(time, np.sum(S1p_flux[:-1,:],axis=1)) axarr[0,0].set_title("First MT, \n minus-end flux") axarr[0,1].set_title("First MT, \n plus-end flux") plt.tight_layout() fig2, axarr2 = plt.subplots(2,2, figsize=(14,10)) cs2m = axarr2[0,0].contourf(time, s1, S2m_flux[:-1,:].T) fig2.colorbar(cs2m, ax=axarr2[0,0]) cs2p = axarr2[0,1].contourf(time, s1, S2p_flux[:-1,:].T) fig2.colorbar(cs2p, ax=axarr2[0,1]) axarr2[1,0].plot(time, np.sum(S2m_flux[:-1,:],axis=1)) axarr2[1,1].plot(time, np.sum(S2p_flux[:-1,:],axis=1)) axarr2[0,0].set_title("Second MT, \n minus-end flux") axarr2[0,1].set_title("Second MT, \n plus-end flux") plt.tight_layout()(1, 2)Azure ML Hardware Accelerated Models Quickstart This tutorial will show you how to deploy an image recognition service based on the ResNet 50 classifier in just a few minutes using the Azure Machine Learning Accelerated AI service. Get more help from our [documentation](https://aka.ms/aml-real-time-ai) or [forum](https://aka.ms/aml-forum).We will use an accelerated ResNet50 featurizer running on an FPGA. This functionality is powered by Project Brainwave, which handles translating deep neural networks (DNN) into an FPGA program. Request Quota**IMPORTANT:** You must [request quota](https://aka.ms/aml-real-time-ai-request) and be approved before you can successfully run this notebook. Environment setup1. Download and install [Git](https://git-scm.com/downloads) 2.16 or later1. Open a Git prompt and clone this repo: `git clone https://github.com/Azure/aml-real-time-ai`1. Install conda (Python 3.6): https://conda.io/miniconda.html1. Open an Anaconda Prompt and run the rest of the commands in the prompt. On Windows the prompt will look like: `(base) C:\>`1. Create the environment: `conda env create -f aml-real-time-ai/environment.yml`1. Activate the environment: 1. Windows: `conda activate amlrealtimeai` 1. Mac/Linux: `source activate amlrealtimeai`1. Launch the Jupyter notebook browser: `jupyter notebook` 1. In the browser, open this notebook by navigating to notebooks/resnet50/00_QuickStart.ipynb. (If you're using Chrome, copy and paste the URL with the notebook token into the address bar).1. Run through each cell and enter the appropriate information as necessary (e.g. Azure subscription ID, resource group ID, Model Management Account, etc.) Importsimport os import tensorflow as tf import amlrealtimeai from amlrealtimeai import resnet50Image preprocessingWe'd like our service to accept JPEG images as input. However the input to ResNet50 is a tensor. So we need code that decodes JPEG images and does the preprocessing required by ResNet50. The Accelerated AI service can execute TensorFlow graphs as part of the service and we'll use that ability to do the image preprocessing. This code defines a TensorFlow graph that preprocesses an array of JPEG images (as strings) and produces a tensor that is ready to be featurized by ResNet50.# Input images as a two-dimensional tensor containing an arbitrary number of images represented a strings import amlrealtimeai.resnet50.utils in_images = tf.placeholder(tf.string) image_tensors = resnet50.utils.preprocess_array(in_images) print(image_tensors.shape)FeaturizerWe use ResNet50 as a featurizer. In this step we initialize the model. This downloads a TensorFlow checkpoint of the quantized ResNet50.from amlrealtimeai.resnet50.model import LocalQuantizedResNet50 model_path = os.path.expanduser('~/models') model = LocalQuantizedResNet50(model_path) print(model.version)ClassifierThe model we downloaded includes a classifier which takes the output of the ResNet50 and identifies an image. This classifier is trained on the ImageNet dataset. We are going to use this classifier for our service. The next [notebook](01_ModelBuild.ipynb) shows how to train a classifier for a different data set. The input to the classifier is a tensor matching the output of our ResNet50 featurizer.model.import_graph_def(include_featurizer=False) print(model.classifier_input.shape)Service DefinitionNow that we've definied the image preprocessing, featurizer, and classifier that we will execute on our service we can create a service definition. The service definition is a set of files generated from the model that allow us to deploy to the FPGA service. The service definition consists of a pipeline. The pipeline is a series of stages that are executed in order. We support TensorFlow stages, Keras stages, and BrainWave stages. The stages will be executed in order on the service, with the output of each stage input into the subsequent stage.To create a TensorFlow stage we specify a session containing the graph (in this case we are using the default graph) and the input and output tensors to this stage. We use this information to save the graph so that we can execute it on the service.from amlrealtimeai.pipeline import ServiceDefinition, TensorflowStage, BrainWaveStage save_path = os.path.expanduser('~/models/save') service_def_path = os.path.join(save_path, 'service_def.zip') service_def = ServiceDefinition() service_def.pipeline.append(TensorflowStage(tf.Session(), in_images, image_tensors)) service_def.pipeline.append(BrainWaveStage(model)) service_def.pipeline.append(TensorflowStage(tf.Session(), model.classifier_input, model.classifier_output)) service_def.save(service_def_path) print(service_def_path)DeployTime to create a service from the service definition. You need a Model Management Account in the **East US 2** location. Go to our [GitHub repo](https://aka.ms/aml-real-time-ai) "docs" folder to learn how to create a Model Management Account and find the required information below.This code creates the deployment client that we will use to deploy the service. Follow the instructions in the output to sign in to your account.from amlrealtimeai import DeploymentClient subscription_id = "80defacd-509e-410c-9812-6e52ed6a0016" resource_group = "CMS_FPGA_Resources" model_management_account = "CMS_FPGA_1" model_name = "resnet50-model" service_name = "quickstart-service" deployment_client = DeploymentClient(subscription_id, resource_group, model_management_account)Upload the service definition to the model management service.model_id = deployment_client.register_model(model_name, service_def_path)Create a service from the model that we registered. If this is a new service then we create it. If you already have a service with this name then the existing service will be updated to use this model.service = deployment_client.get_service_by_name(service_name) if(service is None): service = deployment_client.create_service(service_name, model_id) else: service = deployment_client.update_service(service.id, model_id)ClientThe service supports gRPC and the TensorFlow Serving "predict" API. We provide a client that can call the service to get predictions.from amlrealtimeai import PredictionClient client = PredictionClient(service.ipAddress, service.port)To understand the results we need a mapping to the human readable imagenet classesimport requests classes_entries = requests.get("https://raw.githubusercontent.com/Lasagne/Recipes/master/examples/resnet50/imagenet_classes.txt").text.splitlines() !curl -O https://upload.wikimedia.org/wikipedia/commons/8/8d/American_bison_k5680-1.jpgWe can now send an image to the service and get the predictions. ![title](American_bison_k5680-1.jpg)Bison from Wikimediaimage_file = 'American_bison_k5680-1.jpg' results = client.score_image(image_file) # map results [class_id] => [confidence] results = enumerate(results) # sort results by confidence sorted_results = sorted(results, key=lambda x: x[1], reverse=True) # print top 5 results for top in sorted_results[:5]: print(classes_entries[top[0]], 'confidence:', top[1])CleanupRun the cell below to delete your service.services = deployment_client.list_services() for service in filter(lambda x: x.name == service_name, services): print(service.id) deployment_client.delete_service(service.id) models = deployment_client.list_models() for model in filter(lambda x: x.name == model_name, models): print(model.id) deployment_client.delete_model(model.id)1. Plot By Avg Number of Shifts/Player## Load Dataframes game_df = pd.read_csv('data/upd_game.csv') team_to_analyze = '' game_df = game_df[(game_df.home_team_name == team_to_analyze) |\ (game_df.away_team_name == team_to_analyze)] ## Get Stats For Winning Teams winning_df = game_df.apply(lambda x: get_winning_stats(x),axis = 1) ## Create Win Count win_cnt = winning_df.groupby('winner_avgNumShift')\ .agg({'winner':'count'}).reset_index() win_cnt.columns = ['avgNumShift','win_count'] ## Create Loss Count los_cnt = winning_df.groupby('loser_avgNumShift')\ .agg({'loser':'count'}).reset_index() los_cnt.columns = ['avgNumShift','loss_count'] ## Merge Counts winlos_cnt = win_cnt.merge(los_cnt) winlos_cnt['win_ratio'] = winlos_cnt['win_count'] /\ (winlos_cnt['win_count'] + winlos_cnt['loss_count']) # ## Remove Outliers winlos_cnt = winlos_cnt[winlos_cnt.avgNumShift >= 18] winlos_cnt = winlos_cnt[winlos_cnt.avgNumShift <= 24] %matplotlib inline winlos_cnt[['avgNumShift','win_ratio']].plot(x='avgNumShift',kind = 'bar')2. Plot by Avg Ice Time## Get Stats For Winning Teams winning_df = game_df.apply(lambda x: get_winning_stats(x),axis = 1) ## Create Win Count win_cnt = winning_df.groupby('winner_avgIceTime')\ .agg({'winner':'count'}).reset_index() win_cnt.columns = ['avg_IceTime','win_count'] ## Create Loss Count los_cnt = winning_df.groupby('loser_avgIceTime')\ .agg({'loser':'count'}).reset_index() los_cnt.columns = ['avg_IceTime','loss_count'] ## Merge Counts winlos_cnt = win_cnt.merge(los_cnt) ## Remove Outliers winlos_cnt = winlos_cnt[winlos_cnt.avg_IceTime >= 500] winlos_cnt = winlos_cnt[winlos_cnt.avg_IceTime <= 1060] ## Bin Data, Group, and Calc Winrate winlos_cnt['bins'] = pd.cut(winlos_cnt.avg_IceTime,bins=range(950,1070,10)) winlos_cnt = winlos_cnt.groupby('bins').agg({'win_count':'sum','loss_count':'sum'}) winlos_cnt['win_ratio'] = winlos_cnt['win_count'] /\ (winlos_cnt['win_count'] + winlos_cnt['loss_count']) winlos_cnt.reset_index(inplace= True) #Plot %matplotlib inline winlos_cnt[['bins','win_ratio']].plot(x = 'bins',kind = 'bar')Vectorize with count and tf-idfKeep words appearing in 10% to 70 % of the posts.from sklearn.feature_extraction.text import TfidfTransformer from sklearn.feature_extraction.text import CountVectorizer # Posts to a matrix of token counts cntizer = CountVectorizer(analyzer="word", max_features=1500, tokenizer=None, preprocessor=None, stop_words=None, max_df=0.7, min_df=0.1) # Learn the vocabulary dictionary and return term-document matrix print("CountVectorizer...") X_train = cntizer.fit_transform(X_train) X_test = cntizer.transform(X_test) # Transform the count matrix to a normalized tf or tf-idf representation tfizer = TfidfTransformer() print("Tf-idf...") # Learn the idf vector (fit) and transform a count matrix to a tf-idf representation X_train = tfizer.fit_transform(X_train).toarray() X_test = tfizer.transform(X_test).toarray() print("MBTI 1st row: %s" % translate_back(y_train[0,:])) print("Y: Binarized MBTI 1st row: %s" % y_train[0,:]) feature_names = list(enumerate(cntizer.get_feature_names())) feature_names[:10]CountVectorizer... Tf-idf... MBTI 1st row: ISFP Y: Binarized MBTI 1st row: [0 1 0 1]Linear SVM - One vs Restfrom sklearn.svm import LinearSVC classif = OneVsRestClassifier(LinearSVC()) classif.fit(X_train, y_train) y = classif.predict(X_test) print('accuracy_score', accuracy_score(y_test, y), '\n', 'roc_auc_score', roc_auc_score(y_test, y), '\n', 'hamming_loss', hamming_loss(y_test, y))accuracy_score 0.3319884726224784 roc_auc_score 0.6007018964536487 hamming_loss 0.2404899135446686Chained Linear SVMfrom sklearn.svm import LinearSVC classif = ClassifierChain(LinearSVC()) classif.fit(X_train, y_train) y = classif.predict(X_test) print('accuracy_score', accuracy_score(y_test, y.toarray()), '\n', 'roc_auc_score', roc_auc_score(y_test, y.toarray()), '\n', 'hamming_loss', hamming_loss(y_test, y.toarray()))accuracy_score 0.3285302593659942 roc_auc_score 0.6273825065533728 hamming_loss 0.2489913544668588SVM, Gaussian kernel - One vs Restfrom sklearn.svm import SVC classif = OneVsRestClassifier(SVC(gamma='scale')) classif.fit(X_train, y_train) y = classif.predict(X_test) print('accuracy_score', accuracy_score(y_test, y), '\n', 'roc_auc_score', roc_auc_score(y_test, y), '\n', 'hamming_loss', hamming_loss(y_test, y))accuracy_score 0.2904899135446686 roc_auc_score 0.5665363295727185 hamming_loss 0.24927953890489912Chained SVM, Gaussian kernelfrom sklearn.svm import SVC classif = ClassifierChain(SVC(gamma='scale')) classif.fit(X_train, y_train) y = classif.predict(X_test) print('accuracy_score', accuracy_score(y_test, y.toarray()), '\n', 'roc_auc_score', roc_auc_score(y_test, y.toarray()), '\n', 'hamming_loss', hamming_loss(y_test, y.toarray()))accuracy_score 0.2881844380403458 roc_auc_score 0.5611140850486074 hamming_loss 0.2524495677233429MLKNNfrom skmultilearn.adapt import MLkNN # Note that this classifier can throw up errors when handling sparse matrices. X_train_mlknn = lil_matrix(X_train).toarray() y_train_mlknn = lil_matrix(y_train).toarray() X_test_mlknn = lil_matrix(X_test).toarray() classif = MLkNN() classif.fit(X_train_mlknn, y_train_mlknn) y = classif.predict(X_test_mlknn) yp = classif.predict_proba(X_test_mlknn) print('accuracy_score', accuracy_score(y_test, y.toarray()), '\n', 'roc_auc_score', roc_auc_score(y_test, yp.toarray()), '\n', 'hamming_loss', hamming_loss(y_test, y.toarray()))accuracy_score 0.2622478386167147 roc_auc_score 0.5658352239225045 hamming_loss 0.28285302593659944Chained GaussianNBfrom sklearn.naive_bayes import GaussianNB # initialize classifier chains multi-label classifier classif = ClassifierChain(GaussianNB()) classif.fit(X_train, y_train) y = classif.predict(X_test) yp = classif.predict_proba(X_test) print('accuracy_score', accuracy_score(y_test, y.toarray()), '\n', 'roc_auc_score', roc_auc_score(y_test, yp.toarray()), '\n', 'hamming_loss', hamming_loss(y_test, y.toarray()))accuracy_score 0.20922190201729107 roc_auc_score 0.6912461019993441 hamming_loss 0.33645533141210376Chained BernoulliNBfrom sklearn.naive_bayes import BernoulliNB classif = ClassifierChain(BernoulliNB(alpha=1.0, binarize=None, class_prior=None, fit_prior=True)) classif.fit(X_train, y_train) y = classif.predict(X_test) yp = classif.predict_proba(X_test) print('accuracy_score', accuracy_score(y_test, y.toarray()), '\n', 'roc_auc_score', roc_auc_score(y_test, yp.toarray()), '\n', 'hamming_loss', hamming_loss(y_test, y.toarray()))accuracy_score 0.2904899135446686 roc_auc_score 0.7198294564753879 hamming_loss 0.25994236311239194Chained BaggedBernoulliNBfrom sklearn.ensemble import BaggingClassifier classif = ClassifierChain(BaggingClassifier(base_estimator=BernoulliNB())) classif.fit(X_train, y_train) y = classif.predict(X_test) yp = classif.predict_proba(X_test) print('accuracy_score', accuracy_score(y_test, y.toarray()), '\n', 'roc_auc_score', roc_auc_score(y_test, yp.toarray()), '\n', 'hamming_loss', hamming_loss(y_test, y.toarray()))accuracy_score 0.2685878962536023 roc_auc_score 0.7004657857359968 hamming_loss 0.2889048991354467Chained Adaboost with Decision Treesfrom sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import AdaBoostClassifier classif = ClassifierChain(AdaBoostClassifier(base_estimator=DecisionTreeClassifier())) classif.fit(X_train, y_train) y = classif.predict(X_test) yp = classif.predict_proba(X_test) print('accuracy_score', accuracy_score(y_test, y.toarray()), '\n', 'roc_auc_score', roc_auc_score(y_test, yp.toarray()), '\n', 'hamming_loss', hamming_loss(y_test, y.toarray()))accuracy_score 0.1642651296829971 roc_auc_score 0.5412128136372868 hamming_loss 0.3612391930835735Chained LogisticRegressionfrom sklearn.linear_model import LogisticRegression classif = ClassifierChain(LogisticRegression(solver='lbfgs')) classif.fit(X_train, y_train) y = classif.predict(X_test) yp = classif.predict_proba(X_test) print('accuracy_score', accuracy_score(y_test, y.toarray()), '\n', 'roc_auc_score', roc_auc_score(y_test, yp.toarray()), '\n', 'hamming_loss', hamming_loss(y_test, y.toarray()))accuracy_score 0.3504322766570605 roc_auc_score 0.7446379886968562 hamming_loss 0.23631123919308358RandomForestfrom sklearn.ensemble import RandomForestClassifier classif = RandomForestClassifier(n_estimators=100, class_weight=None) classif.fit(X_train, y_train) y = classif.predict(X_test) print('accuracy_score', accuracy_score(y_test, y), '\n', 'roc_auc_score', roc_auc_score(y_test, y), '\n', 'hamming_loss', hamming_loss(y_test, y))accuracy_score 0.30086455331412104 roc_auc_score 0.6340715137209887 hamming_loss 0.2695965417867435FCNN (MLP)from sklearn.neural_network import MLPClassifier classif = MLPClassifier() classif.fit(X_train, y_train) y = classif.predict(X_test) yp = classif.predict_proba(X_test) print('accuracy_score', accuracy_score(y_test, y), '\n', 'roc_auc_score', roc_auc_score(y_test, yp), '\n', 'hamming_loss', hamming_loss(y_test, y))accuracy_score 0.30086455331412104 roc_auc_score 0.7124481967804894 hamming_loss 0.2695965417867435Solving an overdetermined system using pseudo inverseConsider the overdetermined system corresponding to cat-brain from Chapter 2.There are 15 training examples, each with input and desired outputs specified.Our goal is to determine 3 unkwnowns (w0, w1, b).This can be cast as an over-determined system of equations$$A\vec{w} = \vec{y}$$where$$ A =\begin{bmatrix} 0.11 & 0.09 & 1.00 \\ 0.01 & 0.02 & 1.00 \\ 0.98 & 0.91 & 1.00 \\ 0.12 & 0.21 & 1.00 \\ 0.98 & 0.99 & 1.00 \\ 0.85 & 0.87 & 1.00 \\ 0.03 & 0.14 & 1.00 \\ 0.55 & 0.45 & 1.00 \\ 0.49 & 0.51 & 1.00 \\ 0.99 & 0.01 & 1.00 \\ 0.02 & 0.89 & 1.00 \\ 0.31 & 0.47 & 1.00 \\ 0.55 & 0.29 & 1.00 \\ 0.87 & 0.76 & 1.00 \\ 0.63 & 0.24 & 1.00\end{bmatrix}\;\;\;\;\;\;\;\vec{y} = \begin{bmatrix} -0.8 \\ -0.97 \\ 0.89 \\ -0.67 \\ 0.97 \\ 0.72 \\ -0.83 \\ 0.00 \\ 0.00 \\ 0.00 \\ -0.09 \\ -0.22 \\ -0.16 \\ 0.63 \\ 0.37\end{bmatrix}\;\;\;\;\;\;\;\vec{w} = \begin{bmatrix} w_{0}\\w_{1}\\b\end{bmatrix}$$We solve for $\vec{w}$ using the pseudo inverse formula $\space\space\large{\vec{w} = (A^TA)^{-1}A^Ty}$import numpy as np # Let us revisit our cat brain data set # Notice that there are 15 training examples, with 3 # unkwnowns (w0, w1, b). # This is an over determined system. # It can be easily seen that the solution is roughly # $w_{0} = 1, w_{1} = 1, b = -1$. # It has been deliberately chosen as such. # But the equations are not fully consistent (i.e., there is # no solution that satisfies all the equations). # We want to find the best values such that it minimizes Aw - b. # This is what the pseudo-inverse does. def pseudo_inverse(A): return np.matmul(np.linalg.inv(np.matmul(A.T, A)), A.T) # The usual cat-brain input dataset X = np.array([[0.11, 0.09], [0.01, 0.02], [0.98, 0.91], [0.12, 0.21], [0.98, 0.99], [0.85, 0.87], [0.03, 0.14], [0.55, 0.45], [0.49, 0.51], [0.99, 0.01], [0.02, 0.89], [0.31, 0.47], [0.55, 0.29], [0.87, 0.76], [0.63, 0.24]]) # Output threat score modeled as a vector y = np.array([-0.8, -0.97, 0.89, -0.67, 0.97, 0.72, -0.83, 0.00, 0.00, 0.00, -0.09, -0.22, -0.16, 0.63, 0.37]) A = np.column_stack((X, np.ones(15))) # Column stack will add an additional column of 1s to the training # dataset to represent the coefficient of the bias w = np.matmul(pseudo_inverse(A), y) print("The solution is {}\n" "Note that this is almost equal to [1.0, 1.0, -1.0])".format(w))The solution is [ 1.07661761 0.89761672 -0.95816936] Note that this is almost equal to [1.0, 1.0, -1.0])Case Study - Sales Data# All imports import numpy as np import matplotlib.pyplot as plt import pandas as pdSales and Profit data is read in dataframe "sales"# Read file sales = pd.read_excel('sales.xlsx') sales # Read file and set 1st two columns as index sales = pd.read_excel('sales.xlsx', index_col = [0,1]) salesExample - 1 Display first 3 land last 3 rows of the sales dataframesales.head() # Default - returns top 5 rows sales.head(3) sales.tail() sales.tail(3)Example - 2 Display the information about the data stored in data framesales.info() MultiIndex: 23 entries, ('Africa', 'Western Africa') to ('USCA', 'Canada') Data columns (total 3 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 No_of_Orders 23 non-null int64 1 Profit 23 non-null float64 2 Sales 23 non-null float64 dtypes: float64(2), int64(1) memory usage: 1.6+ KBDisplay the statistical information about the data in dataframesales.describe() sales[["Sales", "Profit"]].plot(kind= "box", subplots= True) plt.show() sales["Profit"] df = pd.read_csv('rating.csv', index_col = [2,1]) df.head() df = pd.read_csv('forestfires.csv') print(df.describe()) print(df.columns) print(df.shape)X Y FFMC DMC DC ISI \ count 517.000000 517.000000 517.000000 517.000000 517.000000 517.000000 mean 4.669246 4.299807 90.644681 110.872340 547.940039 9.021663 std 2.313778 1.229900 5.520111 64.046482 248.066192 4.559477 min 1.000000 2.000000 18.700000 1.100000 7.900000 0.000000 25% 3.000000 4.000000 90.200000 68.600000 437.700000 6.500000 50% 4.000000 4.000000 91.600000 108.300000 664.200000 8.400000 75% 7.000000 5.000000 92.900000 142.400000 713.900000 10.800000 max 9.000000 9.000000 96.200000 291.300000 860.600000 56.100000 temp RH wind rain area count 517.000000 517.000000 517.000000 517.000000 517.000000 mean 18.889168 44.288201 4.017602 0.021663 12.847292 std 5.806625 16.317469 1.791653 0.295959 63.655[...]Load pickled dataset into memoryname_pickle = '../data/train/trainsh1.pickle' with open(name_pickle, 'rb') as f: print('Unpickling ' + name_pickle) load = pickle.load(f) dataset = load['data'] labels = load['labels'] del load print('dataset shape:', dataset.shape) print('labels shape:', labels.shape)Reformat data for training- Divide each file with 240000 samples into smaller batch_samples ~= size of receptive field of eegnet- Keep valid_dataset nr of samples intact for proper validationdef normalize_array(array): # Normalize mean=0 and sigma=0.25: axis=0 is along columns, vertical lines. array -= np.mean(array, axis=0) array /= 2*np.ptp(array, axis=0) return array def clean_normalize_data_labels(data, labels, sigma=0.5): data_tmp = list() labels_tmp = list() for idx, d in enumerate(data): if (np.count_nonzero(d) < 10) or (np.any(np.std(d, axis=0) < sigma)): continue d = normalize_array(d) data_tmp.append(d) labels_tmp.append(labels[idx]) return np.asarray(data_tmp), np.asarray(labels_tmp) #Output size of the layer num_labels = 2 #60% for train and 40% for validation split_idx = int(dataset.shape[0]*0.8) #nr of splits nrOfSplits = 100 def format_data(data, labels, nr_splits): shape = data.shape # reshape [batch, samples, channels] into [batch * samples, channels] data = np.reshape(data, (shape[0]*shape[1], shape[2])) # Split 2D array into the desired smaller chuncks data = np.asarray(np.split(data, shape[0]*nr_splits, axis=0)) # labels are obtained by repeating original labels nr_splits times labels = np.repeat((np.arange(num_labels) == labels[:,None]).astype(np.float32), nr_splits, axis=0) # normalize and eliminate batches that only contain drop-outs data, labels = clean_normalize_data_labels(data, labels, 0.01) # data has to be 4D for tensorflow (insert an empty dimension) data = data[:,None,:,:] # shuffle data and labels mantaining relation between them. Important after the small batches. shuffle_idx = np.random.permutation(data.shape[0]) data = data[shuffle_idx,:,:,:] labels = labels[shuffle_idx] return data, labels # shuffle file data shuffle_idx = np.random.permutation(dataset.shape[0]) dataset = dataset[shuffle_idx,:,:] labels = labels[shuffle_idx] # format and split data into smaller chunks train_dataset, train_labels = format_data(dataset[:split_idx], labels[:split_idx], nrOfSplits) valid_dataset, valid_labels = format_data(dataset[split_idx:-1], labels[split_idx:-1], nrOfSplits) del dataset, labels valid_dataset = valid_dataset[:200] valid_labels = valid_labels[:200] print('train_dataset shape:', train_dataset.shape, 'train_labels shape:', train_labels.shape, 'mix:', float(np.count_nonzero(train_labels[:,1], axis=0))/train_labels.shape[0]) print('valid_dataset shape:', valid_dataset.shape, 'valid_labels shape:', valid_labels.shape, 'mix:', float(np.count_nonzero(valid_labels[:,1], axis=0))/valid_labels.shape[0])Plot some data to have an idea of how data looks likeplt.subplot(2, 1, 1) plt.plot(train_dataset[1,0,:,0]) plt.plot(train_dataset[3,0,:,0])EEGNET implementationPart of https://arxiv.org/pdf/1609.03499.pdf that most concerns classification:"As a last experiment we looked at speech recognition with WaveNets on the TIMIT (Garofolo et al., 1993) dataset. For this task we added a mean-pooling layer after the dilation convolutions that aggregated the activations to coarser frames spanning 10 milliseconds (160 x downsampling). The pooling layer was followed by a few non-causal convolutions. We trained WaveNet with two loss terms, one to predict the next sample and one to classify the frame, the model generalized better than with a single loss and achieved 18.8 PER on the test set, which is to our knowledge the best score obtained from a model trained directly on raw audio on TIMIT."Look into: http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/43290.pdf"Input: This layer extracts 275 ms waveform segments from each of M input microphones. Successive inputs are hopped by 10ms. At the 16kHz sampling rate used in our experiments each segment contains M X 4401 dimensions."....#How many files are supplied per batch. batch_size=16 #Number of samples in each batch entry batch_samples=train_dataset.shape[2] #How many filters to learn for the input. input_channels=16 #How many filters to learn for the residual. residual_channels=2*input_channels # size after pooling layer pool_size = 2400 # convolution filters width filter_width=3 def network(batch_data, reuse=False, is_training=True): with tf.variable_scope('eegnet_network', reuse=reuse): with slim.arg_scope([slim.batch_norm], is_training=is_training): with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_initializer=slim.xavier_initializer(), normalizer_fn=slim.batch_norm): with tf.variable_scope('input_layer'): hidden = slim.conv2d(batch_data, residual_channels, [1, filter_width], stride=1, rate=1, activation_fn=None, scope='conv1') with tf.variable_scope('hidden'): with tf.variable_scope('layer1'): layer_input = hidden hidden = slim.conv2d(hidden, 2*residual_channels, [1, filter_width], stride=1, rate=2, activation_fn=None, scope='dilconv') filtr, gate = tf.split(3, 2, hidden) # split features in half hidden = tf.mul(tf.tanh(filtr), tf.sigmoid(gate), name='filterXgate') hidden = slim.conv2d(hidden, residual_channels, 1, activation_fn=None, scope='1x1skip') skip = hidden # skip conn hidden = tf.add(hidden, layer_input) # residual conn with tf.variable_scope('layer2'): layer_input = hidden hidden = slim.conv2d(hidden, 2*residual_channels, [1, filter_width], stride=1, rate=4, activation_fn=None, scope='dilconv') filtr, gate = tf.split(3, 2, hidden) # split features in half hidden = tf.mul(tf.tanh(filtr), tf.sigmoid(gate), name='filterXgate') hidden = slim.conv2d(hidden, residual_channels, 1, activation_fn=None, scope='1x1skip') skip = tf.add(skip, hidden) # skip conn hidden = tf.add(hidden, layer_input) # residual conn with tf.variable_scope('layer3'): hidden = slim.conv2d(hidden, 2*residual_channels, [1, filter_width], stride=1, rate=8, activation_fn=None, scope='dilconv') filtr, gate = tf.split(3, 2, hidden) # split features in half hidden = tf.mul(tf.tanh(filtr), tf.sigmoid(gate), name='filterXgate') hidden = slim.conv2d(hidden, residual_channels, 1, activation_fn=None, scope='1x1skip') skip = tf.add(skip, hidden) # skip conn with tf.variable_scope('skip_processing'): hidden = tf.nn.relu(skip) hidden = slim.avg_pool2d(hidden, [1, batch_samples*2//pool_size], [1, batch_samples//pool_size]) # 1 x 2400 x residual_channels hidden = slim.conv2d(hidden, 32, 1, activation_fn=tf.nn.relu, scope='1x1compress1') hidden = slim.conv2d(hidden, 16, [1, 8], stride=4, activation_fn=tf.nn.relu, scope='1x5reduce1') # 1 x 600 x 16 hidden = slim.conv2d(hidden, 8, 1, activation_fn=tf.nn.relu, scope='1x1compress2') hidden = slim.conv2d(hidden, 4, [1, 8], stride=4, activation_fn=tf.nn.relu, scope='1x5reduce2') # 1 x 150 x 4 hidden = slim.conv2d(hidden, 2, 1, activation_fn=tf.nn.relu, scope='1x1compress3') hidden = slim.conv2d(hidden, 2, [1, 6], stride=3, activation_fn=tf.nn.relu, scope='1x5reduce3') # 1 x 75 x 2 with tf.variable_scope('logits'): hidden = slim.dropout(hidden, 0.7, is_training=is_training) hidden = slim.flatten(hidden) logits = slim.fully_connected(hidden, num_labels, activation_fn=None, normalizer_fn=None, scope='fc1') return logits #number of steps after which learning rate is decayed decay_steps=500 #Construct computation graph graph = tf.Graph() with graph.as_default(): # Input data tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, 1, batch_samples, input_channels)) tf_train_labels = tf.placeholder(tf.int32, shape=(batch_size, num_labels)) tf_valid_dataset = tf.constant(valid_dataset, dtype=tf.float32) tf_valid_labels = tf.constant(valid_labels, dtype=tf.int32) with tf.name_scope('eegnet_handling'): logits = network(tf_train_dataset) loss = slim.losses.softmax_cross_entropy(logits, tf_train_labels, scope='loss') tf.scalar_summary('loss', loss) optimizer = tf.train.AdamOptimizer(learning_rate=1e-3, epsilon=1e-4).minimize(loss, var_list=tf.trainable_variables()) train_probabilities = tf.nn.softmax(logits) train_predictions = tf.one_hot(tf.argmax(train_probabilities, 1), num_labels, dtype=tf.int32) train_accuracy = slim.metrics.accuracy(train_predictions, tf_train_labels, 100.0) valid_probabilities = tf.nn.softmax(network(tf_valid_dataset, True, False)) valid_predictions = tf.one_hot(tf.argmax(valid_probabilities, 1), num_labels, dtype=tf.int32) valid_accuracy = slim.metrics.accuracy(valid_predictions, tf_valid_labels, 100.0) init_op = tf.group(tf.initialize_all_variables(), tf.initialize_local_variables()) # Add histograms for trainable variables. for var in tf.trainable_variables(): tf.histogram_summary(var.op.name, var) # Add summaries for activations: NOT WORKING YET. TF ERROR. #slim.summarize_activations() #Merge all summaries and write to a folder merged_summs = tf.merge_all_summaries() results_writer = tf.train.SummaryWriter('../results', graph) # Add ops to save and restore all the variables. saver = tf.train.Saver() #tracing for timeline run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() print('computational graph created') num_steps = 5001 trace_file = open('../tracing/timeline.json', 'w') save_path = '../checkpoints/model.ckpt' best_loss = 99.0 val_accu = 0.0 best_val_accu = 0.0 t = 0 elapt = 0 with tf.Session(graph=graph) as session: ttotal = time.time() init_op.run() print('Initialized') for step in range(num_steps): t = time.time() offset = (step * batch_size) % (train_dataset.shape[0] - batch_size) batch_data = train_dataset[offset:(offset + batch_size), :, :, :] batch_labels = train_labels[offset:(offset + batch_size), :] feed_dict = {tf_train_dataset: batch_data, tf_train_labels: batch_labels} _, l, trprob, traccu, summary = session.run( [optimizer, loss, train_probabilities, train_accuracy, merged_summs], feed_dict=feed_dict) results_writer.add_summary(summary, step) elapt = time.time() if (step % 13 == 0): best_loss = l if l < best_loss else best_loss print('Minibatch total loss at step %d: %f' % (step, l), '| Best:', best_loss) print('Minibatch accuracy:', traccu) print('Predictions | Labels:\n', np.concatenate((trprob[:2], batch_labels[:2]), axis=1)) print('Last iter time:', elapt-t) if (step % 50 == 0): val_accu = valid_accuracy.eval() best_val_accu = val_accu if val_accu > best_val_accu else best_val_accu print('###-> Validation accuracy:', val_accu, '| Best:', best_val_accu) ettotal = time.time() print('Total time: %f hours' %((ettotal-ttotal)/3600.0)) # Save tracing into disl #trace = timeline.Timeline(step_stats=run_metadata.step_stats) #trace_file.write(trace.generate_chrome_trace_format(show_memory=True)) # Save the variables to disk. saver.save(session, save_path) print("Model saved in file: %s" % save_path) results_writer.flush() results_writer.close() print('Finished training')Membandingkan Stacking K-NN, RF, dan XGBoost dengan LightGBM dan XGBoost dengan Bayesian Optimizer Author : Tahap Persiapan# import modul %matplotlib inline import matplotlib.pyplot as plt import numpy as np import pandas as pd from sklearn.cluster import DBSCAN from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.ensemble import VotingClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import LogisticRegression from xgboost import XGBClassifier from vecstack import stacking import sklearn.metrics as metrics # import data bc = datasets.load_breast_cancer() df = pd.DataFrame(bc.data, columns = bc.feature_names) df['target'] = pd.Series(bc.target) # split data X = df.copy() del X['target'] y = df['target'] X_train, X_test, y_train, y_test = train_test_split(X , y, test_size=0.2 , random_state = 42)Membangung Base Model K-NN, RF, dan XGBoostmodels = [ KNeighborsClassifier(n_neighbors=5, n_jobs=-1), RandomForestClassifier(random_state=0, n_jobs=-1, n_estimators=100, max_depth=3), XGBClassifier(random_state=0, n_jobs=-1, learning_rate=0.1, n_estimators=100, max_depth=3) ] S_train, S_test = stacking(models, X_train, y_train, X_test, regression=False, mode='oof_pred_bag', needs_proba=False, save_dir=None, metric=accuracy_score, n_folds=5, stratified=True, shuffle=True, random_state=0, verbose=2) model = XGBClassifier(random_state=0, n_jobs=-1, learning_rate=0.1, n_estimators=100, max_depth=3) model = model.fit(S_train, y_train) y_pred = model.predict(S_test) print('Final prediction score: [%.8f]' % roc_auc_score(y_test, y_pred))Final prediction score: [0.95103177]Didapat bahwa AUC dari model Stacking ini adalah 0.951. Membangun Model Light GBM dengan Bayesian Optimizationfrom bayes_opt import BayesianOptimization from sklearn.metrics import precision_score, recall_score, confusion_matrix, accuracy_score, roc_auc_score, f1_score, roc_curve, auc,precision_recall_curve import lightgbm as lgb bayesian_tr_idx = X_train.index bayesian_val_idx = X_test.index bounds_LGB = { 'num_leaves': (31, 500), 'min_data_in_leaf': (20, 200), 'bagging_fraction' : (0.1, 0.9), 'feature_fraction' : (0.1, 0.9), 'learning_rate': (0.01, 0.3), 'min_child_weight': (0.00001, 0.01), 'reg_alpha': (1, 2), 'reg_lambda': (1, 2), 'max_depth':(-1,50), } def LGB_bayesian( learning_rate, num_leaves, bagging_fraction, feature_fraction, min_child_weight, min_data_in_leaf, max_depth, reg_alpha, reg_lambda ): # LightGBM expects next three parameters need to be integer. num_leaves = int(num_leaves) min_data_in_leaf = int(min_data_in_leaf) max_depth = int(max_depth) assert type(num_leaves) == int assert type(min_data_in_leaf) == int assert type(max_depth) == int param = { 'num_leaves': num_leaves, 'min_data_in_leaf': min_data_in_leaf, 'min_child_weight': min_child_weight, 'bagging_fraction' : bagging_fraction, 'feature_fraction' : feature_fraction, 'learning_rate' : learning_rate, 'max_depth': max_depth, 'reg_alpha': reg_alpha, 'reg_lambda': reg_lambda, 'objective': 'binary', 'save_binary': True, 'seed': 1337, 'feature_fraction_seed': 1337, 'bagging_seed': 1337, 'drop_seed': 1337, 'data_random_seed': 1337, 'boosting_type': 'gbdt', 'verbose': 1, 'is_unbalance': False, 'boost_from_average': True, 'metric':'auc'} oof = np.zeros(len(df)) trn_data= lgb.Dataset(df.iloc[bayesian_tr_idx][features].values, label=df.iloc[bayesian_tr_idx][target].values) val_data= lgb.Dataset(df.iloc[bayesian_val_idx][features].values, label=df.iloc[bayesian_val_idx][target].values) clf = lgb.train(param, trn_data, num_boost_round=50, valid_sets = [trn_data, val_data], verbose_eval=0, early_stopping_rounds = 50) oof[bayesian_val_idx] = clf.predict(df.iloc[bayesian_val_idx][features].values, num_iteration=clf.best_iteration) score = roc_auc_score(df.iloc[bayesian_val_idx][target].values, oof[bayesian_val_idx]) return score LGB_BO = BayesianOptimization(LGB_bayesian, bounds_LGB, random_state=42) init_points = 10 n_iter = 15 import warnings warnings.filterwarnings("ignore") features = list(df) features.remove('target') target = 'target' print('-' * 130) with warnings.catch_warnings(): warnings.filterwarnings('ignore') LGB_BO.maximize(init_points=init_points, n_iter=n_iter, acq='ucb', xi=0.0, alpha=1e-6) LGB_BO.max['target'] LGB_BO.max['params'] param_lgb = { 'min_data_in_leaf': int(LGB_BO.max['params']['min_data_in_leaf']), 'num_leaves': int(LGB_BO.max['params']['num_leaves']), 'learning_rate': LGB_BO.max['params']['learning_rate'], 'min_child_weight': LGB_BO.max['params']['min_child_weight'], 'bagging_fraction': LGB_BO.max['params']['bagging_fraction'], 'feature_fraction': LGB_BO.max['params']['feature_fraction'], 'reg_lambda': LGB_BO.max['params']['reg_lambda'], 'reg_alpha': LGB_BO.max['params']['reg_alpha'], 'max_depth': int(LGB_BO.max['params']['max_depth']), 'objective': 'binary', 'save_binary': True, 'seed': 1337, 'feature_fraction_seed': 1337, 'bagging_seed': 1337, 'drop_seed': 1337, 'data_random_seed': 1337, 'boosting_type': 'gbdt', 'verbose': 1, 'is_unbalance': False, 'boost_from_average': True, 'metric':'auc' } def plot_confusion_matrix(cm, classes, normalize = False, title = 'Confusion matrix"', cmap = plt.cm.Blues) : plt.imshow(cm, interpolation = 'nearest', cmap = cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation = 0) plt.yticks(tick_marks, classes) thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])) : plt.text(j, i, cm[i, j], horizontalalignment = 'center', color = 'white' if cm[i, j] > thresh else 'black') plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') from sklearn.model_selection import StratifiedKFold,KFold from sklearn.metrics import precision_score, recall_score, confusion_matrix, accuracy_score, roc_auc_score, f1_score, roc_curve, auc,precision_recall_curve from scipy import interp import itertools plt.rcParams["axes.grid"] = True nfold = 5 skf = StratifiedKFold(n_splits=nfold, shuffle=True, random_state=42) oof = np.zeros(len(df)) mean_fpr = np.linspace(0,1,100) cms= [] tprs = [] aucs = [] y_real = [] y_proba = [] recalls = [] roc_aucs = [] f1_scores = [] accuracies = [] precisions = [] predictions = np.zeros(len(X_test)) feature_importance_df = pd.DataFrame() i = 1 for train_idx, valid_idx in skf.split(df, df['target'].values): print("\nfold {}".format(i)) trn_data = lgb.Dataset(df.iloc[train_idx][features].values, label=df.iloc[train_idx][target].values ) val_data = lgb.Dataset(df.iloc[valid_idx][features].values, label=df.iloc[valid_idx][target].values ) clf = lgb.train(param_lgb, trn_data, num_boost_round = 500, valid_sets = [trn_data, val_data], verbose_eval = 100, early_stopping_rounds = 100) oof[valid_idx] = clf.predict(df.iloc[valid_idx][features].values) predictions += clf.predict(X_test[features]) / nfold # Scores roc_aucs.append(roc_auc_score(df.iloc[valid_idx][target].values, oof[valid_idx])) accuracies.append(accuracy_score(df.iloc[valid_idx][target].values, oof[valid_idx].round())) recalls.append(recall_score(df.iloc[valid_idx][target].values, oof[valid_idx].round())) precisions.append(precision_score(df.iloc[valid_idx][target].values ,oof[valid_idx].round())) f1_scores.append(f1_score(df.iloc[valid_idx][target].values, oof[valid_idx].round())) # Roc curve by folds f = plt.figure(1) fpr, tpr, t = roc_curve(df.iloc[valid_idx][target].values, oof[valid_idx]) tprs.append(interp(mean_fpr, fpr, tpr)) roc_auc = auc(fpr, tpr) aucs.append(roc_auc) plt.plot(fpr, tpr, lw=2, alpha=0.3, label='ROC fold %d (AUC = %0.4f)' % (i,roc_auc)) # Precion recall by folds g = plt.figure(2) precision, recall, _ = precision_recall_curve(df.iloc[valid_idx][target].values, oof[valid_idx]) y_real.append(df.iloc[valid_idx][target].values) y_proba.append(oof[valid_idx]) plt.plot(recall, precision, lw=2, alpha=0.3, label='P|R fold %d' % (i)) i= i+1 # Confusion matrix by folds cms.append(confusion_matrix(df.iloc[valid_idx][target].values, oof[valid_idx].round())) # Features imp fold_importance_df = pd.DataFrame() fold_importance_df["Feature"] = features fold_importance_df["importance"] = clf.feature_importance() fold_importance_df["fold"] = nfold + 1 feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0) # Metrics print( '\nCV roc score : {0:.4f}, std: {1:.4f}.'.format(np.mean(roc_aucs), np.std(roc_aucs)), '\nCV accuracy score : {0:.4f}, std: {1:.4f}.'.format(np.mean(accuracies), np.std(accuracies)), '\nCV recall score : {0:.4f}, std: {1:.4f}.'.format(np.mean(recalls), np.std(recalls)), '\nCV precision score : {0:.4f}, std: {1:.4f}.'.format(np.mean(precisions), np.std(precisions)), '\nCV f1 score : {0:.4f}, std: {1:.4f}.'.format(np.mean(f1_scores), np.std(f1_scores)) ) #ROC f = plt.figure(1) plt.plot([0,1],[0,1],linestyle = '--',lw = 2,color = 'grey') mean_tpr = np.mean(tprs, axis=0) mean_auc = auc(mean_fpr, mean_tpr) plt.plot(mean_fpr, mean_tpr, color='blue', label=r'Mean ROC (AUC = %0.4f)' % (np.mean(roc_aucs)),lw=2, alpha=1) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('LGB ROC curve by folds') plt.legend(loc="lower right") # PR plt g = plt.figure(2) plt.plot([0,1],[1,0],linestyle = '--',lw = 2,color = 'grey') y_real = np.concatenate(y_real) y_proba = np.concatenate(y_proba) precision, recall, _ = precision_recall_curve(y_real, y_proba) plt.plot(recall, precision, color='blue', label=r'Mean P|R') plt.xlabel('Recall') plt.ylabel('Precision') plt.title('P|R curve by folds') plt.legend(loc="lower left") # Confusion maxtrix & metrics plt.rcParams["axes.grid"] = False cm = np.average(cms, axis=0) target_names = [0,1] plt.figure() plot_confusion_matrix(cm, classes=target_names, title= 'LGB Confusion matrix [averaged/folds]') plt.show()fold 1 Training until validation scores don't improve for 100 rounds [100] training's auc: 0.993439 valid_1's auc: 0.985788 [200] training's auc: 0.994747 valid_1's auc: 0.987726 [300] training's auc: 0.995204 valid_1's auc: 0.988695 Early stopping, best iteration is: [218] training's auc: 0.994893 valid_1's auc: 0.988695 fold 2 Training until validation scores don't improve for 100 rounds [100] training's auc: 0.992422 valid_1's auc: 0.991602 Early stopping, best iteration is: [21] training's auc: 0.986214 valid_1's auc: 0.993217 fold 3 Training until validation scores don't improve for 100 rounds [100] training's auc: 0.996349 valid_1's auc: 0.974178 Early stopping, best iteration is: [14] training's auc: 0.985407 valid_1's auc: 0.975687 fold 4 Training until validation scores don't improve for 100 rounds [100] training's auc: 0.993223 valid_1's auc: 0.990275 [200] training's auc: 0.994447 valid_1's auc: 0.991281 Early stopping, best iteration is: [132] training's auc: 0.99385 va[...]Pattern I# read the excel of the data for Pattern I data1 = os.path.join(DATA_PATH, "pattern1_data.xlsx") df1 = pd.read_excel(data1, engine="openpyxl") df1_edit = df1 df1_edit1.1 Polynomial Regressionfrom sklearn.decomposition import PCA # get feature data set and label data set X_all_11 = df1_edit.drop(["Fe3+/Fetot(Correction)", "clustering_label"], axis=1) y_all_11 = df1_edit["Fe3+/Fetot(Correction)"].copy() # do dimension reduction on the training feature data set pca_11 = PCA(n_components=7) # the original dimensions 9 will be transformed into 7 X_all_reduced_11 = pca_11.fit_transform(X_all_11) X_all_reduced_11.shape pd.DataFrame(X_all_reduced_11) from sklearn.preprocessing import PolynomialFeatures # based on the reduction data, do dimension augmentation with the degree of the polynomial features 3 poly_features_11 = PolynomialFeatures(degree=3, include_bias=False) X_all_augmented_11 = poly_features_11.fit_transform(X_all_reduced_11) X_all_augmented_11.shape # the columns name of the augmented data set columns_name_11 = poly_features_11.get_feature_names() # the augmentd data set, x0 refers to the first columns in the reduced data set X_all_augmented_11 = pd.DataFrame(X_all_augmented_11) X_all_augmented_11.columns = columns_name_11 X_all_augmented_11 from sklearn.model_selection import StratifiedShuffleSplit # stratified random sampling based on the results of KMeans Clustering split_11 = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=13) # divide augmented X and y into training and testing data for train_index, test_index in split_11.split(df1_edit, df1_edit["clustering_label"]): # train : test = 4 : 1 X_train_11 = X_all_augmented_11.loc[train_index] X_test_11 = X_all_augmented_11.loc[test_index] y_train_11 = y_all_11.loc[train_index] y_test_11 = y_all_11.loc[test_index] from sklearn.linear_model import ElasticNet # Based on regularized linear regression, bulid the polynomial regression with the augmented X polyreg_11 = ElasticNet(alpha=0.0001, l1_ratio=0.0001, max_iter=100000, tol=0.0005) # train the model and get the corresponding score y_train_predict_11, y_test_predict_11, train_rmse_11, test_rmse_11, train_r2_11, test_r2_11 = \ score(polyreg_11, 'polynomial regression', 'poly',X_train_11, X_test_11, y_train_11, y_test_11) # plot binary diagram binary_plot(y_train = y_train_11, y_train_label = y_train_predict_11, y_test = y_test_11, y_test_label = y_test_predict_11, train_rmse = train_rmse_11, test_rmse = test_rmse_11, train_r2 = train_r2_11, test_r2 = test_r2_11) save_fig("poly_reg_pattern1")----------polynomial regression---------- The RMSE on training set is 0.074 The RMSE on test set is 0.085 R2 score on training set is 0.891 R2 score on test set is 0.856 Successfully store the trained model in poly.pkl Saving figure poly_reg_pattern11.2 Artificial Neural Network# get feature data set and label data set X_all_12=df1_edit.drop(["Fe3+/Fetot(Correction)", "clustering_label"], axis=1) y_all_12 =df1_edit["Fe3+/Fetot(Correction)"].copy() from sklearn.model_selection import StratifiedShuffleSplit # stratified random sampling based on the results of KMeans Clustering split_12 = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=13) # divide augmented X and y into training and testing data for train_index, test_index in split_12.split(df1_edit, df1_edit["clustering_label"]): # train : test = 4 : 1 X_train_12 = X_all_12.loc[train_index] X_test_12 = X_all_12.loc[test_index] y_train_12 = y_all_12.loc[train_index] y_test_12 = y_all_12.loc[test_index] from sklearn.neural_network import MLPRegressor # build up Artificial Neural Network model mlp_12 = MLPRegressor(activation='relu', alpha=0.0001, batch_size='auto', early_stopping=False, hidden_layer_sizes=(20, 120, 20), max_iter=2000, solver='adam', tol=1e-05, random_state=9) # train the model and get the corresponding score y_train_predict_12, y_test_predict_12, train_rmse_12, test_rmse_12, train_r2_12, test_r2_12 = \ score(mlp_12, 'Artificial Neural Network', 'mlp', X_train_12, X_test_12, y_train_12, y_test_12) # plot binary diagram binary_plot(y_train = y_train_12, y_train_label = y_train_predict_12, y_test = y_test_12, y_test_label = y_test_predict_12, train_rmse = train_rmse_12, test_rmse = test_rmse_12, train_r2 = train_r2_12, test_r2 = test_r2_12) save_fig("ann_pattern1")----------Artificial Neural Network---------- The RMSE on training set is 0.074 The RMSE on test set is 0.081 R2 score on training set is 0.889 R2 score on test set is 0.871 Successfully store the trained model in mlp.pkl Saving figure ann_pattern11.3 Artificial Neural Network Ensemble# get feature data set and label data set X_all_13=df1_edit.drop(["Fe3+/Fetot(Correction)", "clustering_label"], axis=1) y_all_13 =df1_edit["Fe3+/Fetot(Correction)"].copy() from sklearn.model_selection import StratifiedShuffleSplit # stratified random sampling based on the results of KMeans Clustering split_13 = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=13) # divide augmented X and y into training and testing data for train_index, test_index in split_13.split(df1_edit, df1_edit["clustering_label"]): # train : test = 4 : 1 X_train_13 = X_all_13.loc[train_index] X_test_13 = X_all_13.loc[test_index] y_train_13 = y_all_13.loc[train_index] y_test_13 = y_all_13.loc[test_index] from sklearn.ensemble import BaggingRegressor # single ANN model with the same hyperparameter as 1.2 ANN mlp_13 = MLPRegressor(activation='relu', alpha=0.0001, batch_size='auto', early_stopping=False, hidden_layer_sizes=(20, 120, 20), max_iter=2000, solver='adam', tol=1e-05, random_state=9) # Use Bagging method to ensemble 500 ANN models mlp_bag_13 = BaggingRegressor(mlp_13, n_estimators=500, bootstrap=True, bootstrap_features=True, n_jobs=-1, oob_score=True, random_state=42) # train the model and get the corresponding score y_train_predict_13, y_test_predict_13, train_rmse_13, test_rmse_13, train_r2_13, test_r2_13 = \ score(mlp_bag_13, 'Artificial Neural Network Ensemble', 'mlp_bag', X_train_13, X_test_13, y_train_13, y_test_13) # plot binary diagram binary_plot(y_train = y_train_13, y_train_label = y_train_predict_13, y_test = y_test_13, y_test_label = y_test_predict_13, train_rmse = train_rmse_13, test_rmse = test_rmse_13, train_r2 = train_r2_13, test_r2 = test_r2_13) save_fig("ann_ensemble_pattern1")----------Artificial Neural Network Ensemble---------- The RMSE on training set is 0.077 The RMSE on test set is 0.086 R2 score on training set is 0.881 R2 score on test set is 0.853 Successfully store the trained model in mlp_bag.pkl Saving figure ann_ensemble_pattern11.4 Decision Tree# get feature data set and label data set, append a new feature column to X_all_14 X_all_14=df1_edit.drop(["Fe3+/Fetot(Correction)", "clustering_label"], axis=1) X_all_14['Na/Mg'] = X_all_14['Na'] / X_all_14['Mg'] y_all_14 =df1_edit["Fe3+/Fetot(Correction)"].copy() from sklearn.model_selection import StratifiedShuffleSplit # stratified random sampling based on the results of KMeans Clustering split_14 = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=13) # divide augmented X and y into training and testing data for train_index, test_index in split_14.split(df1_edit, df1_edit["clustering_label"]): # train : test = 4 : 1 X_train_14 = X_all_14.loc[train_index] X_test_14 = X_all_14.loc[test_index] y_train_14 = y_all_14.loc[train_index] y_test_14 = y_all_14.loc[test_index] from sklearn.tree import DecisionTreeRegressor # build up Decision Tree model dt_14 = DecisionTreeRegressor(ccp_alpha=0.0, criterion='mse', max_depth=None, max_features=None, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=12, min_samples_split=2, min_weight_fraction_leaf=0.0, presort='deprecated', random_state=42, splitter='best') # train the model and get the corresponding score y_train_predict_14, y_test_predict_14, train_rmse_14, test_rmse_14, train_r2_14, test_r2_14 = \ score(dt_14, 'Decision Tree', 'dt', X_train_14, X_test_14, y_train_14, y_test_14) # plot binary diagram binary_plot(y_train = y_train_14, y_train_label = y_train_predict_14, y_test = y_test_14, y_test_label = y_test_predict_14, train_rmse = train_rmse_14, test_rmse = test_rmse_14, train_r2 = train_r2_14, test_r2 = test_r2_14) save_fig("dt_pattern1")----------Decision Tree---------- The RMSE on training set is 0.07 The RMSE on test set is 0.08 R2 score on training set is 0.9 R2 score on test set is 0.873 Successfully store the trained model in dt.pkl Saving figure dt_pattern11.5 Extra Tree# get feature data set and label data set, append a new feature column to X_all_15 X_all_15=df1_edit.drop(["Fe3+/Fetot(Correction)", "clustering_label"], axis=1) X_all_15['Na/Mg'] = X_all_15['Na'] / X_all_15['Mg'] y_all_15 =df1_edit["Fe3+/Fetot(Correction)"].copy() from sklearn.model_selection import StratifiedShuffleSplit # stratified random sampling based on the results of KMeans Clustering split_15 = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=13) # divide augmented X and y into training and testing data for train_index, test_index in split_15.split(df1_edit, df1_edit["clustering_label"]): # train : test = 4 : 1 X_train_15 = X_all_15.loc[train_index] X_test_15 = X_all_15.loc[test_index] y_train_15 = y_all_15.loc[train_index] y_test_15 = y_all_15.loc[test_index] from sklearn.ensemble import ExtraTreesRegressor # build up Extra Tree model et_15 = ExtraTreesRegressor(n_estimators=500,bootstrap=False, oob_score=False, max_leaf_nodes=20, random_state=42, n_jobs=-1) # train the model and get the corresponding score y_train_predict_15, y_test_predict_15, train_rmse_15, test_rmse_15, train_r2_15, test_r2_15 = \ score(et_15, 'Extra Tree', 'et', X_train_15, X_test_15, y_train_15, y_test_15) # plot binary diagram binary_plot(y_train = y_train_15, y_train_label = y_train_predict_15, y_test = y_test_15, y_test_label = y_test_predict_15, train_rmse = train_rmse_15, test_rmse = test_rmse_15, train_r2 = train_r2_15, test_r2 = test_r2_15) save_fig("et_pattern1")----------Extra Tree---------- The RMSE on training set is 0.058 The RMSE on test set is 0.078 R2 score on training set is 0.932 R2 score on test set is 0.878 Successfully store the trained model in et.pkl Saving figure et_pattern11.6 Random Forest# get feature data set and label data set, append a new feature column to X_all_16 X_all_16=df1_edit.drop(["Fe3+/Fetot(Correction)", "clustering_label"], axis=1) X_all_16['Na/Mg'] = X_all_16['Na'] / X_all_16['Mg'] y_all_16 =df1_edit["Fe3+/Fetot(Correction)"].copy() from sklearn.model_selection import StratifiedShuffleSplit # stratified random sampling based on the results of KMeans Clustering split_16 = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=13) # divide augmented X and y into training and testing data for train_index, test_index in split_16.split(df1_edit, df1_edit["clustering_label"]): # train : test = 4 : 1 X_train_16 = X_all_16.loc[train_index] X_test_16 = X_all_16.loc[test_index] y_train_16 = y_all_16.loc[train_index] y_test_16 = y_all_16.loc[test_index] from sklearn.ensemble import RandomForestRegressor # build up Random Forest model rf_16 = RandomForestRegressor(n_estimators=500, oob_score=True, max_leaf_nodes=15, n_jobs=-1, random_state=42) # train the model and get the corresponding score y_train_predict_16, y_test_predict_16, train_rmse_16, test_rmse_16, train_r2_16, test_r2_16 = \ score(rf_16, 'Random Forest', 'rf', X_train_16, X_test_16, y_train_16, y_test_16) # plot binary diagram binary_plot(y_train = y_train_16, y_train_label = y_train_predict_16, y_test = y_test_16, y_test_label = y_test_predict_16, train_rmse = train_rmse_16, test_rmse = test_rmse_16, train_r2 = train_r2_16, test_r2 = test_r2_16) save_fig("rf_pattern1")----------Random Forest---------- The RMSE on training set is 0.055 The RMSE on test set is 0.069 R2 score on training set is 0.938 R2 score on test set is 0.907 Successfully store the trained model in rf.pkl Saving figure rf_pattern1Pattern II# read the excel of the data for Pattern II data2 = os.path.join(DATA_PATH, "pattern2_data.xlsx") df2 = pd.read_excel(data2, engine="openpyxl") df2_edit = df2 df2_edit2.1 Linear Regression# get feature data set and label data set X_all_21 = df2_edit.drop(["Fe3+/Fetot(Correction)", "clustering_label"], axis=1) y_all_21 = df2_edit["Fe3+/Fetot(Correction)"].copy() from sklearn.model_selection import StratifiedShuffleSplit # stratified random sampling based on the results of KMeans Clustering split_21 = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=0) # divide augmented X and y into training and testing data for train_index, test_index in split_21.split(df2_edit, df2_edit["clustering_label"]): # train : test = 4 : 1 X_train_21 = X_all_21.loc[train_index] X_test_21 = X_all_21.loc[test_index] y_train_21 = y_all_21.loc[train_index] y_test_21 = y_all_21.loc[test_index] from sklearn.linear_model import LinearRegression # Based on linear regression, bulid the polynomial regression with the augmented X linreg_21 = LinearRegression() # train the model and get the corresponding score y_train_predict_21, y_test_predict_21, train_rmse_21, test_rmse_21, train_r2_21, test_r2_21 = \ score(linreg_21, 'linear regression', 'Linear_new',X_train_21, X_test_21, y_train_21, y_test_21) # plot binary diagram binary_plot(y_train = y_train_21, y_train_label = y_train_predict_21, y_test = y_test_21, y_test_label = y_test_predict_21, train_rmse = train_rmse_21, test_rmse = test_rmse_21, train_r2 = train_r2_21, test_r2 = test_r2_21) save_fig("lin_reg_pattern2")----------linear regression---------- The RMSE on training set is 0.068 The RMSE on test set is 0.078 R2 score on training set is 0.944 R2 score on test set is 0.918 Successfully store the trained model in Linear_new.pkl Saving figure lin_reg_pattern22.2 Polynomial Regressionfrom sklearn.decomposition import PCA # get feature data set and label data set X_all_22 = df2_edit.drop(["Fe3+/Fetot(Correction)", "clustering_label"], axis=1) y_all_22 = df2_edit["Fe3+/Fetot(Correction)"].copy() # do dimension reduction on the training feature data set pca_22 = PCA(n_components=6) # the original dimensions 9 will be transformed into 6 X_all_reduced_22 = pca_22.fit_transform(X_all_22) X_all_reduced_22.shape pd.DataFrame(X_all_reduced_22) from sklearn.preprocessing import PolynomialFeatures # based on the reduction data, do dimension augmentation with the degree of the polynomial features 3 poly_features_22 = PolynomialFeatures(degree=3, include_bias=False) X_all_augmented_22 = poly_features_22.fit_transform(X_all_reduced_22) X_all_augmented_22.shape # the columns name of the augmented data set columns_name_22 = poly_features_22.get_feature_names() # the augmentd data set, x0 refers to the first columns in the reduced data set X_all_augmented_22 = pd.DataFrame(X_all_augmented_22) X_all_augmented_22.columns = columns_name_22 X_all_augmented_22 from sklearn.model_selection import StratifiedShuffleSplit # stratified random sampling based on the results of KMeans Clustering split_22 = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=0) # divide augmented X and y into training and testing data for train_index, test_index in split_22.split(df2_edit, df2_edit["clustering_label"]): # train : test = 4 : 1 X_train_22 = X_all_augmented_22.loc[train_index] X_test_22 = X_all_augmented_22.loc[test_index] y_train_22 = y_all_22.loc[train_index] y_test_22 = y_all_22.loc[test_index] from sklearn.linear_model import LinearRegression # Based on linear regression, bulid the polynomial regression with the augmented X polyreg_22 = ElasticNet(alpha=0.0001, l1_ratio=0.0001, max_iter=100000, tol=0.0005) # train the model and get the corresponding score y_train_predict_22, y_test_predict_22, train_rmse_22, test_rmse_22, train_r2_22, test_r2_22 = \ score(polyreg_22, 'polynomial regression', 'poly_new',X_train_22, X_test_22, y_train_22, y_test_22) # plot binary diagram binary_plot(y_train = y_train_22, y_train_label = y_train_predict_22, y_test = y_test_22, y_test_label = y_test_predict_22, train_rmse = train_rmse_22, test_rmse = test_rmse_22, train_r2 = train_r2_22, test_r2 = test_r2_22) save_fig("poly_reg_pattern2")----------polynomial regression---------- The RMSE on training set is 0.058 The RMSE on test set is 0.057 R2 score on training set is 0.96 R2 score on test set is 0.956 Successfully store the trained model in poly_new.pkl Saving figure poly_reg_pattern22.3 Artificial Neural Network# get feature data set and label data set X_all_23 = df2_edit.drop(["Fe3+/Fetot(Correction)", "clustering_label"], axis=1) y_all_23 = df2_edit["Fe3+/Fetot(Correction)"].copy() from sklearn.model_selection import StratifiedShuffleSplit # stratified random sampling based on the results of KMeans Clustering split_23 = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=0) # divide augmented X and y into training and testing data for train_index, test_index in split_23.split(df2_edit, df2_edit["clustering_label"]): # train : test = 4 : 1 X_train_23 = X_all_23.loc[train_index] X_test_23 = X_all_23.loc[test_index] y_train_23 = y_all_23.loc[train_index] y_test_23 = y_all_23.loc[test_index] from sklearn.neural_network import MLPRegressor # build up Artificial Neural Network model mlp_23 = MLPRegressor(activation='relu', alpha=0.0001, batch_size='auto', early_stopping=False, hidden_layer_sizes=(20, 40, 40, 20), max_iter=2000, solver='adam', tol=1e-05, random_state=9) # train the model and get the corresponding score y_train_predict_23, y_test_predict_23, train_rmse_23, test_rmse_23, train_r2_23, test_r2_23 = \ score(mlp_23, 'Artificial Neural Network', 'mlp_new', X_train_23, X_test_23, y_train_23, y_test_23) # plot binary diagram binary_plot(y_train = y_train_23, y_train_label = y_train_predict_23, y_test = y_test_23, y_test_label = y_test_predict_23, train_rmse = train_rmse_23, test_rmse = test_rmse_23, train_r2 = train_r2_23, test_r2 = test_r2_23) save_fig("ann_pattern2")----------Artificial Neural Network---------- The RMSE on training set is 0.076 The RMSE on test set is 0.076 R2 score on training set is 0.93 R2 score on test set is 0.921 Successfully store the trained model in mlp_new.pkl Saving figure ann_pattern22.4 Artificial Neural Network Ensemble# get feature data set and label data set X_all_24 = df2_edit.drop(["Fe3+/Fetot(Correction)", "clustering_label"], axis=1) y_all_24 = df2_edit["Fe3+/Fetot(Correction)"].copy() from sklearn.model_selection import StratifiedShuffleSplit # stratified random sampling based on the results of KMeans Clustering split_24 = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=0) # divide augmented X and y into training and testing data for train_index, test_index in split_24.split(df2_edit, df2_edit["clustering_label"]): # train : test = 4 : 1 X_train_24 = X_all_24.loc[train_index] X_test_24 = X_all_24.loc[test_index] y_train_24 = y_all_24.loc[train_index] y_test_24 = y_all_24.loc[test_index] from sklearn.ensemble import BaggingRegressor # single ANN model with the same hyperparameter as 2.3 ANN mlp_24 = MLPRegressor(activation='relu', alpha=0.0001, batch_size='auto', early_stopping=False, hidden_layer_sizes=(20, 40, 40, 20), max_iter=2000, solver='adam', tol=1e-05, random_state=9) # Use Bagging method to ensemble 500 ANN models mlp_bag_24 = BaggingRegressor(mlp_24, n_estimators=500, bootstrap=True, bootstrap_features=True, n_jobs=-1, oob_score=True, random_state=42) # train the model and get the corresponding score y_train_predict_24, y_test_predict_24, train_rmse_24, test_rmse_24, train_r2_24, test_r2_24 = \ score(mlp_bag_24, 'Artificial Neural Network Ensemble', 'mlp_bag_new', X_train_24, X_test_24, y_train_24, y_test_24) # plot binary diagram binary_plot(y_train = y_train_24, y_train_label = y_train_predict_24, y_test = y_test_24, y_test_label = y_test_predict_24, train_rmse = train_rmse_24, test_rmse = test_rmse_24, train_r2 = train_r2_24, test_r2 = test_r2_24) save_fig("ann_ensemble_pattern2")----------Artificial Neural Network Ensemble---------- The RMSE on training set is 0.072 The RMSE on test set is 0.074 R2 score on training set is 0.939 R2 score on test set is 0.926 Successfully store the trained model in mlp_bag_new.pkl Saving figure ann_ensemble_pattern22.5 Decision Tree# get feature data set and label data set, append a new feature column to X_all_25 X_all_25=df2_edit.drop(["Fe3+/Fetot(Correction)", "clustering_label"], axis=1) X_all_25['Na/Mg'] = X_all_25['Na'] / X_all_25['Mg'] y_all_25 =df2_edit["Fe3+/Fetot(Correction)"].copy() from sklearn.model_selection import StratifiedShuffleSplit # stratified random sampling based on the results of KMeans Clustering split_25 = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=0) # divide augmented X and y into training and testing data for train_index, test_index in split_25.split(df2_edit, df2_edit["clustering_label"]): # train : test = 4 : 1 X_train_25 = X_all_25.loc[train_index] X_test_25 = X_all_25.loc[test_index] y_train_25 = y_all_25.loc[train_index] y_test_25 = y_all_25.loc[test_index] from sklearn.tree import DecisionTreeRegressor # build up Decision Tree model dt_25 = DecisionTreeRegressor(ccp_alpha=0.0, criterion='mse', max_depth=5, max_features=None, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, random_state=42, splitter='best', presort='deprecated') # train the model and get the corresponding score y_train_predict_25, y_test_predict_25, train_rmse_25, test_rmse_25, train_r2_25, test_r2_25 = \ score(dt_25, 'Decision Tree', 'dt_new', X_train_25, X_test_25, y_train_25, y_test_25) # plot binary diagram binary_plot(y_train = y_train_25, y_train_label = y_train_predict_25, y_test = y_test_25, y_test_label = y_test_predict_25, train_rmse = train_rmse_25, test_rmse = test_rmse_25, train_r2 = train_r2_25, test_r2 = test_r2_25) save_fig("dt_pattern2")----------Decision Tree---------- The RMSE on training set is 0.046 The RMSE on test set is 0.068 R2 score on training set is 0.975 R2 score on test set is 0.936 Successfully store the trained model in dt_new.pkl Saving figure dt_pattern22.6 Extra Tree# get feature data set and label data set, append a new feature column to X_all_26 X_all_26=df2_edit.drop(["Fe3+/Fetot(Correction)", "clustering_label"], axis=1) X_all_26['Na/Mg'] = X_all_26['Na'] / X_all_26['Mg'] y_all_26 =df2_edit["Fe3+/Fetot(Correction)"].copy() from sklearn.model_selection import StratifiedShuffleSplit # stratified random sampling based on the results of KMeans Clustering split_26 = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=0) # divide augmented X and y into training and testing data for train_index, test_index in split_26.split(df2_edit, df2_edit["clustering_label"]): # train : test = 4 : 1 X_train_26 = X_all_26.loc[train_index] X_test_26 = X_all_26.loc[test_index] y_train_26 = y_all_26.loc[train_index] y_test_26 = y_all_26.loc[test_index] from sklearn.ensemble import ExtraTreesRegressor # build up Extra Tree model et_26 = ExtraTreesRegressor(n_estimators=500,bootstrap=False, oob_score=False, max_leaf_nodes=15, random_state=42, n_jobs=-1) # train the model and get the corresponding score y_train_predict_26, y_test_predict_26, train_rmse_26, test_rmse_26, train_r2_26, test_r2_26 = \ score(et_26, 'Extra Tree', 'et_new', X_train_26, X_test_26, y_train_26, y_test_26) # plot binary diagram binary_plot(y_train = y_train_26, y_train_label = y_train_predict_26, y_test = y_test_26, y_test_label = y_test_predict_26, train_rmse = train_rmse_26, test_rmse = test_rmse_26, train_r2 = train_r2_26, test_r2 = test_r2_26) save_fig("et_pattern2")----------Extra Tree---------- The RMSE on training set is 0.046 The RMSE on test set is 0.051 R2 score on training set is 0.974 R2 score on test set is 0.965 Successfully store the trained model in et_new.pkl Saving figure et_pattern22.7 Random Forest# get feature data set and label data set, append a new feature column to X_all_27 X_all_27 = df2_edit.drop(["Fe3+/Fetot(Correction)", "clustering_label"], axis=1) X_all_27['Na/Mg'] = X_all_27['Na'] / X_all_27['Mg'] y_all_27 = df2_edit["Fe3+/Fetot(Correction)"].copy() from sklearn.model_selection import StratifiedShuffleSplit # stratified random sampling based on the results of KMeans Clustering split_27 = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=0) # divide augmented X and y into training and testing data for train_index, test_index in split_27.split(df2_edit, df2_edit["clustering_label"]): # train : test = 4 : 1 X_train_27 = X_all_27.loc[train_index] X_test_27 = X_all_27.loc[test_index] y_train_27 = y_all_27.loc[train_index] y_test_27 = y_all_27.loc[test_index] from sklearn.ensemble import RandomForestRegressor # build up Random Forest model rnd_27 = RandomForestRegressor(n_estimators=500, oob_score=True, max_leaf_nodes=15, n_jobs=-1, random_state=42) # train the model and get the corresponding score y_train_predict_27, y_test_predict_27, train_rmse_27, test_rmse_27, train_r2_27, test_r2_27 = \ score(rnd_27, 'Random Forest', 'rf_new', X_train_27, X_test_27, y_train_27, y_test_27) # plot binary diagram binary_plot(y_train = y_train_27, y_train_label = y_train_predict_27, y_test = y_test_27, y_test_label = y_test_predict_27, train_rmse = train_rmse_27, test_rmse = test_rmse_27, train_r2 = train_r2_27, test_r2 = test_r2_27) save_fig("rf_pattern2")----------Random Forest---------- The RMSE on training set is 0.039 The RMSE on test set is 0.055 R2 score on training set is 0.982 R2 score on test set is 0.959 Successfully store the trained model in rf_new.pkl Saving figure rf_pattern2Pattern III# read the excel of the data for Pattern III data3 = os.path.join(DATA_PATH, "pattern3_data.xlsx") df3 = pd.read_excel(data3, engine="openpyxl") df3_edit = df3 df3_edit3.1 Linear Regression# get feature data set and label data set X_all_31 = df3_edit.drop(["Fe3+/Fetot(Correction)", "clustering_label"], axis=1) y_all_31 = df3_edit["Fe3+/Fetot(Correction)"].copy() from sklearn.model_selection import StratifiedShuffleSplit # stratified random sampling based on the results of KMeans Clustering split_31 = StratifiedShuffleSplit(n_splits=1, test_size=0.1, random_state=19) # divide augmented X and y into training and testing data for train_index, test_index in split_31.split(df3_edit, df3_edit["clustering_label"]): # train : test = 9 : 1 X_train_31 = X_all_31.loc[train_index] X_test_31 = X_all_31.loc[test_index] y_train_31 = y_all_31.loc[train_index] y_test_31 = y_all_31.loc[test_index] from sklearn.linear_model import LinearRegression # Based on linear regression, bulid the polynomial regression with the augmented X linreg_31 = LinearRegression() # train the model and get the corresponding score y_train_predict_31, y_test_predict_31, train_rmse_31, test_rmse_31, train_r2_31, test_r2_31 = \ score(linreg_31, 'linear regression', 'Linear_spl',X_train_31, X_test_31, y_train_31, y_test_31) # plot binary diagram binary_plot(y_train = y_train_31, y_train_label = y_train_predict_31, y_test = y_test_31, y_test_label = y_test_predict_31, train_rmse = train_rmse_31, test_rmse = test_rmse_31, train_r2 = train_r2_31, test_r2 = test_r2_31) save_fig("lin_reg_pattern3")----------linear regression---------- The RMSE on training set is 0.049 The RMSE on test set is 0.04 R2 score on training set is 0.385 R2 score on test set is 0.574 Successfully store the trained model in Linear_spl.pkl Saving figure lin_reg_pattern33.2 Extra Tree# get feature data set and label data set, append a new feature column to X_all_32 X_all_32=df3_edit.drop(["Fe3+/Fetot(Correction)", "clustering_label"], axis=1) y_all_32 =df3_edit["Fe3+/Fetot(Correction)"].copy() from sklearn.model_selection import StratifiedShuffleSplit # stratified random sampling based on the results of KMeans Clustering split_32 = StratifiedShuffleSplit(n_splits=1, test_size=0.1, random_state=19) # divide augmented X and y into training and testing data for train_index, test_index in split_32.split(df3_edit, df3_edit["clustering_label"]): # train : test = 9 : 1 X_train_32 = X_all_32.loc[train_index] X_test_32 = X_all_32.loc[test_index] y_train_32 = y_all_32.loc[train_index] y_test_32 = y_all_32.loc[test_index] from sklearn.ensemble import ExtraTreesRegressor # build up Extra Tree model etr_clf_32 = ExtraTreesRegressor(n_estimators=500,bootstrap=False, oob_score=False, max_leaf_nodes=15, random_state=42, n_jobs=-1) # train the model and get the corresponding score y_train_predict_32, y_test_predict_32, train_rmse_32, test_rmse_32, train_r2_32, test_r2_32 = \ score(etr_clf_32, 'Extra Tree', 'et_spl', X_train_32, X_test_32, y_train_32, y_test_32) # plot binary diagram binary_plot(y_train = y_train_32, y_train_label = y_train_predict_32, y_test = y_test_32, y_test_label = y_test_predict_32, train_rmse = train_rmse_32, test_rmse = test_rmse_32, train_r2 = train_r2_32, test_r2 = test_r2_32) save_fig("et_pattern3")----------Extra Tree---------- The RMSE on training set is 0.031 The RMSE on test set is 0.034 R2 score on training set is 0.765 R2 score on test set is 0.697 Successfully store the trained model in et_spl.pkl Saving figure et_pattern33.3 Random Forest# get feature data set and label data set, append a new feature column to X_all_33 X_all_33 = df3_edit.drop(["Fe3+/Fetot(Correction)", "clustering_label"], axis=1) y_all_33 = df3_edit["Fe3+/Fetot(Correction)"].copy() from sklearn.model_selection import StratifiedShuffleSplit # stratified random sampling based on the results of KMeans Clustering split_33 = StratifiedShuffleSplit(n_splits=1, test_size=0.1, random_state=19) # divide augmented X and y into training and testing data for train_index, test_index in split_33.split(df3_edit, df3_edit["clustering_label"]): # train : test = 9 : 1 X_train_33 = X_all_33.loc[train_index] X_test_33 = X_all_33.loc[test_index] y_train_33 = y_all_33.loc[train_index] y_test_33 = y_all_33.loc[test_index] from sklearn.ensemble import RandomForestRegressor # build up Random Forest model rnd_clf_33 = RandomForestRegressor(n_estimators=500, oob_score=True, max_leaf_nodes=15, n_jobs=-1, random_state=42) # train the model and get the corresponding score y_train_predict_33, y_test_predict_33, train_rmse_33, test_rmse_33, train_r2_33, test_r2_33 = \ score(rnd_clf_33, 'Random Forest', 'rf_spl', X_train_33, X_test_33, y_train_33, y_test_33) # plot binary diagram binary_plot(y_train = y_train_33, y_train_label = y_train_predict_33, y_test = y_test_33, y_test_label = y_test_predict_33, train_rmse = train_rmse_33, test_rmse = test_rmse_33, train_r2 = train_r2_33, test_r2 = test_r2_33) save_fig("rf_pattern3")----------Random Forest---------- The RMSE on training set is 0.025 The RMSE on test set is 0.024 R2 score on training set is 0.84 R2 score on test set is 0.846 Successfully store the trained model in rf_spl.pkl Saving figure rf_pattern35C allele specific density plots2/16/2021create density plots from bam files for the allele specific atac density plotsimport pandas as pd import os, glob import numpy as np import seaborn as sns import matplotlib.pyplot as plt save_dir = 'vcf_files/atac/' # we know ATAC reads are ~76 bp long so save matrix width should be 200 bp long just to be sfe SAVE_WIDTH = 200 SNP_LOC_SAVE = 100 # 0 indexed glob.glob(os.path.join(save_dir, '*sam'))testing for PEMT_chr17_17942613.samedef read_sam_density(sam_file): read_starts, seqs = [], [] with open (sam_file, 'r') as f: lines = f.readlines() print(len(lines),'num lines') for line in lines: line = line.strip().split('\t') read_starts.append(int(line[3])) # 4th col: 1-based leftmost mapping POSition seqs.append(line[9]) # 10th col: seq return read_starts, seqs def process_read(read_start, seq, snp_pos, snp_loc_save = SNP_LOC_SAVE): """ need to get snp letter at location and start and end position return snp_letter ('A','C','T','G') int position of start int position of end (not inclusive) """ if read_start + len(seq) <= snp_pos: # doesn't cover snp location too upstream return '',None,None elif read_start > snp_pos: # too downstream return '',None,None else: # print(snp_pos, read_start,snp_pos -read_start) # print(seq) snp_letter = seq[snp_pos -read_start] snp_loc_save_start = snp_loc_save- (snp_pos -read_start) return snp_letter, snp_loc_save_start, snp_loc_save_start + len(seq) # testing sam_file = 'vcf_files/atac/PEMT_H9D2_chr17_17942613.sam' gene,tissue,chrom, pos = os.path.basename(sam_file).split('.')[0].split('_') pos = int(pos) print(gene,chrom, pos) # read sam file read_starts, seqs = read_sam_density(sam_file) # initialzie density storage matrices snp_to_info = {} for letter in ['A','T','C','G']: snp_to_info[letter] = np.zeros((len(read_starts),SAVE_WIDTH)) # get positions for idx,(read_start, seq) in enumerate(zip(read_starts, seqs )): snp_letter, start_pos, end_pos = process_read(read_start, seq, pos) if snp_letter is not '': snp_to_info[snp_letter][idx,start_pos: end_pos] = 1 # make final data fram for plotting snp_to_density = {} for letter in ['A','T','C','G']: snp_to_density[letter]= snp_to_info[letter].sum(axis=0) sns.lineplot(data=pd.DataFrame.from_dict(snp_to_density)) sns_plot_data = pd.DataFrame.from_dict(snp_to_density) sns_plot_data[:5] sns.lineplot(data = sns_plot_data) ref = 'C' alt = 'T' plt.figure() plt.fill_between( sns_plot_data.index, sns_plot_data[ref].values, color="skyblue", alpha=0.8) plt.fill_between( sns_plot_data.index, sns_plot_data[alt].values, color="darkred", alpha=0.8) plt.show()2. run though all sams# testing for sam_file in glob.glob(os.path.join(save_dir, '*sam')): gene,tissue,chrom, pos = os.path.basename(sam_file).split('.')[0].split('_') pos = int(pos) print(gene,tissue, chrom, pos) # read sam file read_starts, seqs = read_sam_density(sam_file) # initialzie density storage matrices snp_to_info = {} for letter in ['A','T','C','G']: snp_to_info[letter] = np.zeros((len(read_starts),SAVE_WIDTH)) # get positions for idx,(read_start, seq) in enumerate(zip(read_starts, seqs )): snp_letter, start_pos, end_pos = process_read(read_start, seq, pos) if snp_letter is not '': snp_to_info[snp_letter][idx,start_pos: end_pos] = 1 # make final data frame for getting ref/alt snp_to_density = {} for letter in ['A','T','C','G']: snp_to_density[letter]= snp_to_info[letter].sum(axis=0) # assume ref = most, alt = 2nd most letter_order = pd.DataFrame(snp_to_density).sum(axis=0).sort_values().index ref, alt = letter_order[3], letter_order[2] snp_to_density_refalt = { alt:snp_to_density[alt], ref:snp_to_density[ref] } sns_plot_data = pd.DataFrame.from_dict(snp_to_density_refalt) # plt.figure() # sns.lineplot(data=sns_plot_data) fig = plt.figure() plt.fill_between( sns_plot_data.index, sns_plot_data[ref].values, color="skyblue", alpha=0.8) plt.fill_between( sns_plot_data.index, sns_plot_data[alt].values, color="darkred", alpha=0.8) title = '_'.join([gene,tissue, chrom, str(pos),ref,alt]) fig.suptitle(title, fontsize=12) plt.savefig(os.path.join(save_dir,title+'.pdf')) # plt.show()CXCL5 SLC chr4 74864687 71 num lines PEMT SLC chr17 17942613 491 num lines PEMT H9D2 chr17 17942613 520 num lines NFATC3 H9D2 chr16 68344945 520 num lines PEMT H9D2 chr17 17897739 105 num lines BRINP1 H9D2 chr9 121358237 256 num lines BRINP1 SLC chr9 121358237 44 num lines NFATC3 SLC chr16 68344945 411 num lines RTN1 H9D2 chr14 60155307 381 num linesClassifierfrom nltk.corpus import names names.words() def gender_features(word): return {'last_letter':word[-1]} gender_features("Obama") gender_features("Purushottam") print(len(names.words())) labeled_names = ([(name,'male') for name in names.words('male.txt')] + [(name,'female') for name in names.words('female.txt')]) import random random.shuffle(labeled_names) featuresets = [(gender_features(n),gender) for(n,gender) in labeled_names] train_set , test_set = featuresets[5000:],featuresets[:2000] import nltk classifier = nltk.NaiveBayesClassifier.train(train_set) classifier.show_most_informative_features() classifier.classify(gender_features("David")) print(nltk.classify.accuracy(classifier,test_set)) classifier.classify(gender_features("Purushottam")) classifier.classify(gender_features("Shweta")) classifier.classify(gender_features("Surya"))POS Taggerimport nltk from nltk.corpus import stopwords from nltk.tokenize import word_tokenize, sent_tokenize stop_words = set(stopwords.words('english')) txt = ("Sukanya, Rajib and Naba are my good friends. " "Sukanya is getting married next year. " "Marriage is a big step in one’s life." "It is both exciting and frightening. " "But friendship is a sacred bond between people." "It is a special kind of love between us. " "Many of you must have tried searching for a friend " "but never found the right one.") # sent_tokenize is one of instances of # PunktSentenceTokenizer from the nltk.tokenize.punkt module tokenized = sent_tokenize(txt) for i in tokenized: # Word tokenizers is used to find the words # and punctuation in a string wordsList = nltk.word_tokenize(i) # removing stop words from wordList wordsList = [w for w in wordsList if not w in stop_words] # Using a Tagger. Which is part-of-speech # tagger or POS-tagger. tagged = nltk.pos_tag(wordsList) print(tagged)[('Sukanya', 'NNP'), (',', ','), ('Rajib', 'NNP'), ('Naba', 'NNP'), ('good', 'JJ'), ('friends', 'NNS'), ('.', '.')] [('Sukanya', 'NNP'), ('getting', 'VBG'), ('married', 'VBN'), ('next', 'JJ'), ('year', 'NN'), ('.', '.')] [('Marriage', 'NN'), ('big', 'JJ'), ('step', 'NN'), ('one', 'CD'), ('’', 'NN'), ('life.It', 'NN'), ('exciting', 'VBG'), ('frightening', 'NN'), ('.', '.')] [('But', 'CC'), ('friendship', 'NN'), ('sacred', 'VBD'), ('bond', 'NN'), ('people.It', 'NN'), ('special', 'JJ'), ('kind', 'NN'), ('love', 'VB'), ('us', 'PRP'), ('.', '.')] [('Many', 'JJ'), ('must', 'MD'), ('tried', 'VB'), ('searching', 'VBG'), ('friend', 'NN'), ('never', 'RB'), ('found', 'VBD'), ('right', 'JJ'), ('one', 'CD'), ('.', '.')]Chapter 1# 1.1 b = np.matrix(np.arange(16)).reshape(4,4) # matrix left of B operates on rows, Right of B operates on cols one = np.matrix([[2,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]]) two = np.matrix([[1,0,0,0],[0,1,0,0],[0,0,1/2,0],[0,0,0,1]]) three = np.matrix([[1,0,1,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]]) three_2 = np.matrix([[1,0,0,0],[0,1,0,0],[1,0,1,0],[0,0,0,1]]) four = np.matrix([[0,0,0,1],[0,1,0,0],[0,0,1,0],[1,0,0,0]]) five = np.matrix([[1,-1,0,0],[0,1,0,0],[0,-1,1,0],[0,-1,0,1]]) six = np.matrix([[1,0,0,0],[0,1,0,0],[0,0,0,1],[0,0,1,0]]) seven = np.matrix([[0,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]]) b*sevenSpeech sentiment analysis===================In this notebook we illustrate the power of pliers converters and extractors in a single pipeline. Specifically, we first run a state-of-the-art speech recognition API to transcribe the text of an audio clip. Then, we run a sentiment analysis API to extract the emotion ratings of the spoken words. The audio clip of this example is a short clip of an Obama administration press conference.Note: the analysis is not using any audio features to assist emotion extraction. It is simply only using the text transcribed from the audiofrom pliers.tests.utils import get_test_data_path from os.path import join from pliers.stimuli import AudioStim from pliers.graph import Graph # Configure our stimulus and extraction graph stim = AudioStim(join(get_test_data_path(), 'video', 'obama_speech.wav')) nodes = [ { 'transformer':'IBMSpeechAPIConverter', 'parameters':{'resolution':'phrases'}, 'children':[ { 'transformer':'IndicoAPITextExtractor', 'parameters':{'models':['emotion']} } ] } ] graph = Graph(nodes)**Parameters**:IBMSpeechAPIConverter - `resolution` specifies how we should chunk the text; using phrases provides better results for emotion analysis, as opposed to word-by-word analysisIndicoAPITextExtractor - `models` specifies which analysis models to run using the Indico API; 'emotion' will give back five emotion ratings (anger, joy, fear, sadness, surprise) of the textresults = graph.run(stim) resultsDay 13infile = './input/day13.txt' points = [] folds = [] numRows = 0 numCols = 0 with open(infile) as f: for line in f: if line[0].isdigit(): pt = tuple(map(int, line.strip().split(','))) numRows = max(numRows, pt[0]) numCols = max(numCols, pt[1]) points.append(pt) elif line[0] == 'f': a, b = line.strip().split(' ')[-1].split('=') folds.append((a, int(b))) ar = [[' ']*(1+numCols) for _ in range(numRows+1)] for i,j in points: ar[i][j] = '#' def makeFold(ar, fold): numRows = len(ar) numCols = len(ar[0]) xy, num = fold if xy == 'x': rowsAbove = num rowsBelow = numRows - num - 1 if rowsAbove > rowsBelow: arOut = ar[:num] for i in range(num): i2 = 2*num - i if i2 < numRows: for j in range(numCols): if ar[i2][j] == '#': arOut[i][j] = '#' else: arOut = ar[:num:-1] for i in range(rowsBelow): ''' let numRows = 20, num = 4 rowsAbove = 4, rowsBelow = 15 i=11, i2 = 0 i=12, i2 = 1 i=13, i2 = 2 i=14, i2 = 3 i2 = i - (rowsBelow-rowsAbove) i2 = i- (numRows - num - 1 - num) i2 = i - (numRows -2*num - 1) ''' i2 = i - (rowsBelow - rowsAbove) if i2 >= 0: for j in range(numCols): if ar[i2][j] == '#': arOut[i][j] = '#' else: colsLeft = num colsRight = numCols - num - 1 if colsLeft > colsRight: arOut = [row[:num] for row in ar] ''' let numCols = 20, num = 14 colsLeft = 14, colsRight = 5 j = 13, j2 = 15 j = 12, j2 = 16 j2 = -j + 2*num ''' for i in range(numRows): for j in range(num): j2 = -j + 2*num if j2 < numCols: if ar[i][j2] == '#': arOut[i][j] = '#' else: arOut = [row[:num:-1] for row in ar] ''' let numCols = 20, num = 4 colsLeft = 4, colsRight = 15 j = 14, j2 = 3 j = 13, j2 = 2 j2 = j - (colsRight-colsLeft) ''' for i in range(numRows): for j in range(colsRight): j2 = j - (colsRight-colsLeft) if j2 >= 0: if ar[i][j2] == '#': arOut[i][j] = '#' return arOut for i,fold in enumerate(folds): ar = makeFold(ar, fold) if i==0: dotCount = 0 for line in ar: for val in line: if val == '#': dotCount += 1 print(f'Part 1: {dotCount}') # for line in ar: # print(''.join(line)) arTrans = list(zip(*ar)) for line in arTrans: print(''.join(line))Day 14 Loopingfrom collections import Counter tpl, _, *rules = open('input/day14.txt').read().split('\n') rules = dict(r.split(' -> ') for r in rules) pairs = Counter(map(str.__add__, tpl, tpl[1:])) chars = Counter(tpl) for _ in range(40): for (a, b), c in pairs.copy().items(): newChar = rules[a+b] pairs[a+b] -= c pairs[a+newChar] += c pairs[newChar+b] += c chars[newChar] += c max(chars.values()) - min(chars.values())Recursivefrom collections import Counter from functools import lru_cache tpl, _, *rules = open('input/day14.txt').read().split('\n') rules = dict(r.split(' -> ') for r in rules) @lru_cache(maxsize=None) def f(a, b, depth=40): if depth == 0: return Counter('') x = rules[a+b] return Counter(x) + f(a, x, depth-1) + f(x, b, depth-1) c = sum(map(f, tpl, tpl[1:]), Counter(tpl)) max(c.values()) - min(c.values())Classifying Bangla Fake News with HuggingFace Transformers and Fastai- toc: true- branch: master- badges: true- comments: true- categories: [fastpages, jupyter]- image: images/some_folder/your_image.png- hide: false- search_exclude: true- metadata_key1: metadata_value1- metadata_key2: metadata_value2 ![](https://github.com/Tahsin-Mayeesha/tahsin_mayeesha/raw/master/images/fakenews/front.jpg) In this post we cover fine tuning a [multilingual BERT](https://huggingface.co/bert-base-multilingual-cased) model from Huggingface Transformers library on [**BanFakeNews**](https://aclanthology.org/2020.lrec-1.349/) dataset released in LREC 2020. While English Fake News Classification and fact checking tasks have many resources and competitions available such as [fake news challenge](http://www.fakenewschallenge.org/) and [hateful meme detection](https://ai.facebook.com/blog/hateful-memes-challenge-and-data-set/), similar efforts in Bangla has been almost non existent. BanFakeNews dataset contains 50k annotated articles from different news sources in Bangladesh, out of them around 1200 articles have been annotated as fake. As transformer architectures uses self attention to learn contextual embeddings they have been very popular in NLP research community for a while and many tools have been built around them.This post is reusing materials taught in Weights and Bias's study group of Fast ai with Huggingface([link](https://wandb.ai/wandb_fc/events/reports/W-B-Study-Group-Lectures-fast-ai-w-Hugging-Face--Vmlldzo4NDUzNDU?galleryTag=events)) where several recent(2021) libraries([blurr](https://github.com/ohmeow/blurr),[Fasthugs](https://github.com/morganmcg1/fasthugs) & [Adaptnlp](https://novetta.github.io/adaptnlp/)) that integrates components from popular deep learning frameworks Huggingface transformers and Fastai v2 are shown. My experience with using Transformers is fairly low, recently I participated in Huggingface's Flax/Jax week and there our team pretrained [Bengali GPT2](https://huggingface.co/flax-community/gpt2-bengali) and [T5](https://huggingface.co/flax-community/Bengali-t5/tree/main), so I was looking into Huggingface course and documentation to learn more about how to finetune transformers. Previously my thesis journal paper for undergraduate ["Deep Learning based Question Answering System in Bengali"](https://www.tandfonline.com/doi/full/10.1080/24751839.2020.1833136) worked on Bangla Question Answering with transformers, but I had stuck more to given scripts back then and focused more on synthetic dataset construction via translation and handling data corruption issues. So this post will focus more on the high level API of ```Blurr``` and the components of huggingface and fastai that are relevant for getting started quickly.!pip install -Uqq transformers datasets tqdm !pip install -Uqq ohmeow-blurr !pip install -Uqq wandb from google.colab import drive drive.mount('/content/drive')Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount("/content/drive", force_remount=True).Importsimport warnings warnings.filterwarnings('ignore') import pandas as pd import numpy as np import matplotlib.pyplot as plt %matplotlib inline import wandb from transformers import * from fastai.text.all import * from fastai.callback.wandb import * from blurr.data.all import * from blurr.modeling.all import *We will use weights and biases for tracking experiments and runs. Project page : https://wandb.ai/tasmiah-tahsin/fake-news-blurrwandb.login()wandb: Currently logged in as: tasmiah-tahsin (use `wandb login --relogin` to force relogin)Load dataset This dataset has been downloaded from [Kaggle](https://www.kaggle.com/cryptexcode/banfakenews). Note that there are four files in the given dataset, but we use the two files (Authentic-48k and fake-1k) here because the rest of the files contain labelled fake and authentic news. Labelling in the sense of what type of fake news it is, clickbait, satire, misleading or false context, for the current priliminary phase we stick to binary classification of knowing if a news is fake or authentic. We also concatanate headline and content of the news article during preprocessing and combine the fake and authentic news dataset before sending them to Blurr dataloaders. This dataset is heavily imbalanced, so I'll take approximately half of the authentic news set, since taking full set takes each epoch with a batch size of four around an hour. ![](https://github.com/Tahsin-Mayeesha/tahsin_mayeesha/raw/master/images/fakenews/dataset_preview.png)fake = pd.read_csv("/content/drive/MyDrive/fake news/Fake-1K.csv") authentic = pd.read_csv("/content/drive/MyDrive/fake news/Authentic-48K.csv",engine='python',error_bad_lines=False,warn_bad_lines=True,nrows=15000) df = pd.concat([authentic[['headline','content','label']],fake[['headline','content','label']]]) df.reset_index(drop=True,inplace=True) print(authentic.shape, fake.shape) df['text'] = df['headline'] + df['content'] df = df.drop(['headline','content'],axis=1) df.head(1) df.label=df.label.map({1:"Authentic",0:"Fake"}) from sklearn.model_selection import train_test_split train, valid = train_test_split(df, test_size=0.2)We will use ```Blurr``` high level API for sequence classification with the pandas dataframe where ```BlearnerForSequenceClassification.from_dataframe()``` method takes in a dataframe, splits them from the column of default ```is_valid``` using Fastai's [```ColSplitter```](https://docs.fast.ai/data.transforms.htmlColSplitter) into train and test splits, constructs the datablock and dataloaders and uses them for training. So we add a ```'is_valid'``` column in the dataframe. There are other ways of splitting the data available in Fastai like ```RandomSubsetSplitter``` where we can randomize the data inside a dataframe. Since we used scikit-learns train test split to shuffle the dataframe for now we can go with Column based splitting.train['is_valid'] = False valid['is_valid'] = True final_df = pd.concat([train,valid],axis=0) final_df.shape final_df.head() final_df.label.value_counts()Model Training Multilingual BERT Since the original paper also used [multilingual cased bert](https://github.com/google-research/bert/blob/master/multilingual.md) released by Google this post can be considered as an attempt to reproduce the work of BanFakeNews. They trained mbert for 50 epochs with a learning rate of 0.00002 and optimizer Adam. The batch size was 32. The overall F1 score after training for 50 epochs on this dataset was .99 and f1 for fake class was 0.68. Multilingual bert has been pretrained on 104 languages including bengali with wordpiece tokenization. As bengali is already included it makes it a valid choice for current bangla text classification task. Information for this model are : 104 languages, 12-layer, 768-hidden, 12-heads, 110M parameters. As the size of the language corpora varied greatly for low resource languages exponential weighted smoothing was performed for weighting data during the pretraining stage, which results in undersampling high resource languages like english and oversampling low resource languages like Bengali. Mbert does not use any marker for input language to enable zero shot training.wandb_init_kwargs = { 'reinit': True, 'project': "fake-news-blurr", 'entity': "tasmiah-tahsin", 'notes': 'Finetuning banfakenews with multilingual bert via Blurr', 'tags': ['mbert', 'fake-news-classification', 'blurr'] } wandb.init(**wandb_init_kwargs)Since I'm fairly new in ```blurr``` I'm using the high level API, but the key ideas are following. ```blurr``` is integrating two frameworks. Here ```Fastai``` is providing the ```datablock```, ```learner```, ```learning rate finder``` functionalities with Leslie Smith's 1cycle policy components, while huggingface transformers is providing the ready to use transformer model configuration and architectures made publicly available from huggingface hub(in general terms, but huggingface also has its own datasets library which integrates well with blurr, and fastai also provides pretrained language models based on LSTM like [ULMFiT](https://docs.fast.ai/tutorial.text.html) and [MultiFiT](https://nlp.fast.ai/classification/2019/09/10/multifit.html). Fastai's datablock API works like a specification for quickly loading a data into a model. The blocks are specific steps which can be mixed/matched for training with its various transforms and splitting functions along with visualization capacities. Datablocks, Callbacks, and other fastai concepts are explained in the paper ["fastai: A Layered API for Deep Learning"](https://arxiv.org/abs/2002.04688). Under the hood ```blurr``` is providing wrappers for the huggingface transformers and for finetuning the parameters of the model with enabling discriminative learning rate like used in[ULMFiT](https://paperswithcode.com/method/ulmfit). Discriminative learning rate refers to using variable learning rates for different layers of a network while performing transfer learning.The low level API for ```blurr``` works with datablocks and dataloaders creating the mini batches which are combined with the huggingface model architecture, optimizer and loss function inside a ```learner```. Mid level API contains ```BLearner``` and the highest level API contains task specific learners like the current one I'm using which is ```BlearnerForSequenceClassification```.from blurr.modeling.core import BlearnerForSequenceClassification pretrained_model_name = "bert-base-multilingual-cased" learn = BlearnerForSequenceClassification.from_dataframe(final_df, pretrained_model_name, dl_kwargs={ 'bs': 4}) learn.lr_find(suggest_funcs=[minimum, steep, valley, slide])I initially unfreezed all the layers and had set max learning rate to 1e-2 but the results were pretty bad. So I reduced the learning rate, decided to not do full unfreezing and retrained the model again. Original model in the training set was trained for 50 epochs, here we are experimenting with only 5 epochs though. Fastai documentation recommends that we set the learning rate equal to one order of magnitude lower than the minimum, so I went with 1e-3.#learn.unfreeze() learn.fit_one_cycle(5, lr_max=1e-3,cbs=[WandbCallback(log_preds=False, log_model=False)])Could not gather input dimensionsWe can see some of the results by the model here. Our model trains on half of the dataset and achieves around 0.80 in overall f1. Its likely that the model is trained longer it will achieve better performance. I might retrain it later on full data.learn.show_results(learner=learn, max_n=4,trunc_at=200) wandb.finish()![](../images/fakenews/wandb.png) Model Sharing To upload our model to Huggingface hub we can use push_to_hub method available to the models. The details can be found [here](https://huggingface.co/transformers/model_sharing.html). We install git-lfs since the tokenizer and the model files are fairly large. After uploading the model to the huggingface hub we will also use ```pipeline``` functionality by transformers and combine with transformers interpret library to see how the model weights each of the input tokens when making predictions in the section below.!sudo apt-get install git-lfs !transformers-cli login !git config --global user.email "" !git config --global user.name "Tahsin-Mayeesha" blurr_tfm = get_blurr_tfm(learn.dls.before_batch) blurr_tfm.hf_model.push_to_hub("bangla-fake-news-mbert",use_temp_dir=True) blurr_tfm.hf_tokenizer.push_to_hub("bangla-fake-news-mbert",use_temp_dir=True)Explaining predictions Transformers interpret library tries to show weights for tokens after making predictions and make some visualizations. The tokens are split into subwords as per the tokenizer.!pip install transformers-interpret from transformers import AutoModelForSequenceClassification, AutoTokenizer model_name = "Tahsin-Mayeesha/bangla-fake-news-mbert" model = AutoModelForSequenceClassification.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) text = "অভিনেতা আফজাল শরীফকে ২০ লাখ টাকার অনুদান অসুস্থ অভিনেতা আফজাল শরীফকে চিকিৎসার জন্য ২০ লাখ টাকা অনুদান দিয়েছেন প্রধানমন্ত্রী শেখ হাসিনা।" # With both the model and tokenizer initialized we are now able to get explanations on an example text. from transformers_interpret import SequenceClassificationExplainer cls_explainer = SequenceClassificationExplainer( model, tokenizer) word_attributions = cls_explainer(text) word_attributions[0:10] cls_explainer.visualize()Avaliaçao de Modelos - Métricas de Classificação - Parte 2import numpy as np import pandas as pd from sklearn import datasets from sklearn import linear_model from sklearn import model_selection from sklearn import metrics from sklearn import preprocessing import matplotlib.pyplot as plt X, y = datasets.load_iris(return_X_y=True) X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.3, stratify=y, random_state=42) #model = linear_model.LogisticRegression(multi_class='auto', solver='lbfgs') model = linear_model.LogisticRegression(multi_class='auto', solver='lbfgs', max_iter=130) model.fit(X_train, y_train) y_pred = model.predict(X_test)Matriz de Confusãometrics.confusion_matrix(y_test, y_pred)Classification Reportprint(metrics.classification_report(y_test, y_pred))precision recall f1-score support 0 1.00 1.00 1.00 15 1 0.88 0.93 0.90 15 2 0.93 0.87 0.90 15 accuracy 0.93 45 macro avg 0.93 0.93 0.93 45 weighted avg 0.93 0.93 0.93 45Estimativa de probabilidadeProbabilidade da amostra para cada classe do modelo.y_probs = model.predict_proba(X_test) for i in range(5): linha = y_probs[i, :] print(linha.round(4), linha.sum(), y_pred[i])[0. 0.033 0.967] 1.0 2 [0.0046 0.763 0.2324] 0.9999999999999999 1 [0.0009 0.506 0.4931] 1.0 1 [0.0052 0.661 0.3339] 1.0 1 [0.0017 0.4091 0.5892] 1.0 2Métrica Log LossLog Loss é o log da função de probabilidade condicional negativa dos rótulos verdadeiros, dadas as probabilidades das predições de um classificador.- Leva em conta a probabilidade da predição com base em quanto ela varia em relação ao rótulo real.- Usa log negativo para facilitar a comparação de resultados entre diferentes modelos.- É bastante usado para classificação multi-classe.- Penaliza classificações falsas.- Usa o valor negativo do log para facilitar a comparação entre o desempenho de diferentes modelos.- Valores próximos de zero, significam bom desempenho do modelo.![image.png](attachment:image.png)**Log Loss** e **Cross-Entropy** são ligeiramente diferentes dependendo do contexto, mas em aprendizado de máquina ao calcular as taxas de erro entre 0 e 1 elas são equivalentes (mesmos resultados). Regra MinMaxAo calcular o Log Loss, os valores de probabilidade previstos 0 e 1 são indefinidos. Para evitar esse problema, Log Loss ajusta as probabilidades previstas (p) usando um valor pequeno (eps / epsilon) de acordo com a fórmula a seguir:$$ max( min(p, 1−10^{−15}), 10^{-15}) $$ Log LossPara 2 classes (classificação binária):$$ - ( y \cdot \log{(p)} + (1 - y) \cdot \log{(1 - p)} ) $$Para mais de 2 classes:$$ - \sum_{c=1}^{M}{ y_{o,c} \log{(p_{o,c})} } $$metrics.log_loss([0, 1], [0.25, 0.75]) metrics.log_loss([1, 0], [0.75, 0.25]) metrics.log_loss([0, 1], [0.75, 0.25]) def logloss(y_true, y_prob, eps=1e-15): p = np.clip(y_prob, eps, 1 - eps) return -np.log(1 - p) logloss(0, 0.25) logloss(0, 0.75)Calculando para o conjunto de dados irismetrics.log_loss(y_test, y_probs)Métrica Curva ROC - Receiver Operating CharacteristicÉ uma forma de visualizar o desempenho de um classificador binário. ![image.png](attachment:image.png) AUC - Area Under the Curve ![image.png](attachment:image.png)np.unique(y_test) fpr, tpr, thresholds = metrics.roc_curve(y_test, y_probs[:, 0], pos_label=0) print('x', fpr.round(4)) print('y', tpr.round(4)) print('AUC', metrics.auc(fpr, tpr)) plt.plot([0, 1], [0, 1], '--') plt.plot(fpr, tpr) plt.xlabel('FPR') plt.ylabel('TPR') plt.title('Curva ROC') fpr, tpr, thresholds = metrics.roc_curve(y_test, y_probs[:, 1], pos_label=1) print('x', fpr.round(4)) print('y', tpr.round(4)) print('AUC', metrics.auc(fpr, tpr)) fpr, tpr, thresholds = metrics.roc_curve(y_test, y_probs[:, 2], pos_label=2) print('x', fpr.round(4)) print('y', tpr.round(4)) print('AUC', metrics.auc(fpr, tpr)) plt.plot([0, 1], [0, 1], '--') plt.plot(fpr, tpr) plt.xlabel('FPR') plt.ylabel('TPR') plt.title('Curva ROC')Outra forma de calcular AUCy_test_binary_2 = np.where(y_test == 2, 1, 0) metrics.roc_auc_score(y_test_binary_2, y_probs[:, 2])# preamble to be able to run notebooks in Jupyter and Colab try: from google.colab import drive import sys drive.mount('/content/drive') notes_home = "/content/drive/My Drive/CSC310/notes/" user_home = "/content/drive/My Drive/" sys.path.insert(1,notes_home) # let the notebook access the notes folder except ModuleNotFoundError: notes_home = "" # running native Jupyter environment -- notes home is the same as the notebook user_home = "" # under Jupyter we assume the user directory is the same as the notebookClassification Confidence Intervals **Observation:** It does not matter how careful we are with our model evaluation techniques, there remains a fundamental uncertainty about the ability of our training data to effectively represent our (possibly infinite) data universe.This uncertainty reflects into our model evaluation. If our training data is a poor representation of the data universe then the models we construct using it will generalize poorly to the rest of the data universe. If our training data is a good representation of the data universe then we can expect that our model will generalize well.Here we will deal with this uncertainty using *confidence intervals*. First, let us define confidence intervals formally. Given a model accuracy, *acc*, then the confidence interval is defined as the probability *p that our model accuracy acc* lies between some lower bound *lb* and some upper bound *ub*, \begin{equation}Pr(lb ≤ acc ≤ ub) = p\end{equation} Paraphrasing this equation with *p = 95%*:>We are 95% percent sure that our model accuracy is not worse than *lb* and not better than *ub*.Ultimitely we are interested in the lower and upper bounds of the 95% confidence interval. We can use the following formula to compute the bounds: \begin{equation}ub = acc + \sqrt\frac{acc(1-acc)}{n}\end{equation} \begin{equation}lb = acc - \sqrt\frac{acc(1-acc)}{n}\end{equation} Here, *n* is the number of observations in the testing dataset used to estimate *acc*. The constant 1.96 is called the *z-score* and expresses the fact that we are computing the 95% confidence interval. Classification Confidence Intervals in Python# cross-validation Iris import pandas as pd import numpy as np np.set_printoptions(formatter={'float_kind':"{:3.2f}".format}) from sklearn import tree # grab cross validation code from sklearn.model_selection import cross_val_score # get data df = pd.read_csv(notes_home+"assets/iris.csv") X = df.drop(['id','Species'],axis=1) y = df['Species'] # set up the model model = tree.DecisionTreeClassifier(criterion='entropy', max_depth=2) # do the 5-fold cross validation scores = cross_val_score(model, X, y, cv=5) print("Fold Accuracies: {}".format(scores))Fold Accuracies: [0.93 0.97 0.90 0.87 1.00]Let's do a simple example using the function classification_confint.# compute 95% confidence intervals for classification and regression # problems def classification_confint(acc, n): ''' Compute the 95% confidence interval for a classification problem. acc -- classification accuracy n -- number of observations used to compute the accuracy Returns a tuple (lb,ub) ''' import math interval = 1.96*math.sqrt(acc*(1-acc)/n) lb = max(0, acc - interval) ub = min(1.0, acc + interval) return (lb,ub) def regression_confint(rs_score, n, k): ''' Compute the 95% confidence interval for a regression problem. rs_score -- R^2 score n -- number of observations used to compute the R^2 score k -- number of independent variables in dataset Returns a tuple (lb,ub) Reference: https://books.google.com/books?id=gkalyqTMXNEC&pg=PA88#v=onepage&q&f=false ''' import math interval = 2*math.sqrt((4*rs_score*(1-rs_score)**2*(n-k-1)**2)/((n**2 - 1)*(n+3))) lb = max(0, rs_score - interval) ub = min(1.0, rs_score + interval) return (lb,ub) observations = 100 acc = .88 lb,ub = classification_confint(acc,observations) print('Accuracy: {} ({:3.2f},{:3.2f})'.format(acc,lb, ub))Accuracy: 0.88 (0.82,0.94)Now, let's do an actual example using the Wisconsin breast cancer dataset. We want to print out the testing accuracy together with it's 95% confidence interval.import pandas as pd from sklearn import tree from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split # read the data df = pd.read_csv(notes_home+"assets/wdbc.csv") # set up the feature matrix and target vector X = df.drop(['ID','Diagnosis'],axis=1) y = df['Diagnosis'] # split the data X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=2) # set up the tree model object - limit the complexity to put us somewhere in the middle of the graph. model = tree.DecisionTreeClassifier(criterion='entropy', max_depth=4, random_state=1) # fit the model on the training set of data model.fit(X_train, y_train) # Test results: evaluate the model on the testing set of data y_test_model = model.predict(X_test) acc = accuracy_score(y_test, y_test_model) observations = X_test.shape[0] lb,up = classification_confint(acc, observations) print("Accuracy: {:3.2f} ({:3.2f},{:3.2f})".format(acc,lb,ub))Accuracy: 0.91 (0.86,0.94)Regression Confidence Intervals When performing regression we use the $R^2$ score to examine the quality of our models. Given that we only use a small training dataset for fitting the model compared to the rest of the data universe it is only natural to ask what the 95% confidence interval for this score might be. We have a formula for that -- it is not as straight forward as the confidence interval for classification, \begin{equation}lb = R^2 - 2\sqrt\frac{4R^2(1-R^2)(n-k-1)}{(n^2-1)(n+3)}\end{equation} \begin{equation}ub = R^2 + 2\sqrt\frac{4R^2(1-R^2)(n-k-1)}{(n^2-1)(n+3)}\end{equation} Here, n is the number of observations in the validation/testing dataset and k is the number of independent variables.# from assets.confint import regression_confint rs_score = .75 observations = 100 variables = 4 # independent variables lb,ub = regression_confint(rs_score, observations, variables) print("R^2 Score: {:3.2f} ({:3.2f}, {:3.2f})".format(rs_score,lb,ub))R^2 Score: 0.75 (0.67, 0.83)Let's look at an actual regression problem and compute the R2 score and it's 95% confidence interval. We will use the cars problem from before.import numpy as np import pandas from sklearn.tree import DecisionTreeRegressor #from assets.confint import regression_confint # get our dataset cars_df = pandas.read_csv(notes_home+"assets/cars.csv") # build model object model = DecisionTreeRegressor(max_depth=None) # fit model # We have to reshape the values array to make 'fit' happy because # the array only has a single feature model.fit(cars_df['speed'].values.reshape(-1,1),cars_df['dist']) # R^2 score rs_score = model.score(cars_df['speed'].values.reshape(-1,1),cars_df['dist']) observations = cars_df.shape[0] variables = 1 lb,ub = regression_confint(rs_score, observations, variables) # print out R^2 score with its 95% confidence interval print("R^2 Score: {:3.2f} ({:3.2f}, {:3.2f})".format(rs_score,lb,ub))R^2 Score: 0.79 (0.69, 0.89)Statistical Significance Besides giving us an idea of the uncertainty of our model the 95% confidence intervals also have something to say about the significance of scores of different models. That is, if the confidence intervals overlap then the difference in model performance of two different models on the same dataset is not statistically significant.Consider the following,#from assets.confint import classification_confint observations = 100 # first classifier acc1 = .88 lb1,ub1 = classification_confint(acc1,observations) print('Accuracy: {} ({:3.2f},{:3.2f})'.format(acc1,lb1, ub1)) # second classifier acc2 = .92 lb2,ub2 = classification_confint(acc2,observations) print('Accuracy: {} ({:3.2f},{:3.2f})'.format(acc2,lb2, ub2))Accuracy: 0.88 (0.82,0.94) Accuracy: 0.92 (0.87,0.97)Data Cleaning & FeaturesThis step is composed by the investigation of any still missing data, feature selection and creation of the tables that will be used on Tableau.INTERIM_DATA_PATH = Path('data/interim') PROCESSED_DATA_PATH = Path('data/processed') # Data Loading SPOTIFY_BILLBOARD_DATA_PATH = INTERIM_DATA_PATH / 'spotify_billboard_data.csv' # Datasets ARTISTS_DATA_FILEPATH = PROCESSED_DATA_PATH / 'artists.csv' LYRICS_DATA_FILEPATH = PROCESSED_DATA_PATH / 'lyrics.csv' TAGS_DATA_FILEPATH = PROCESSED_DATA_PATH / 'tags.csv' SONGS_DATA_FILEPATH = PROCESSED_DATA_PATH / 'songs.csv' PROCESSED_DATA_PATH.mkdir(exist_ok=True, parents=True)Data Loadingspotify_billboard_df = pd.read_csv(SPOTIFY_BILLBOARD_DATA_PATH, index_col=0) print(spotify_billboard_df.shape) print(spotify_billboard_df.columns) spotify_billboard_df.head()(4028, 40) Index(['index', 'name', 'artist', 'api_name', 'api_artists', 'danceability', 'energy', 'key', 'loudness', 'mode', 'speechiness', 'acousticness', 'instrumentalness', 'liveness', 'valence', 'tempo', 'type', 'uri', 'track_href', 'analysis_url', 'duration_ms', 'time_signature', 'year', 'lyrics', 'tags', 'num_syllables', 'pos', 'year.1', 'fog_index', 'flesch_index', 'num_words', 'num_lines', 'sentiment_neg', 'sentiment_neu', 'sentiment_pos', 'sentiment_compound', 'title', 'f_k_grade', 'difficult_words', 'num_dupes'], dtype='object')Missing ValuesBy looking at the missing values, there isn't any big problem, so nothing needs to be donemsno.matrix(spotify_billboard_df, figsize=(20, 5))Feature SelectionBelow are the features that we are going to use for the exploration.FEATURE_COLUMNS = [ # Identification Features 'name', 'artist', 'api_artists', # Spotify Features 'danceability', 'energy', 'key', 'loudness', 'mode', 'speechiness', 'acousticness', 'instrumentalness', 'liveness', 'valence', 'tempo', 'duration_ms', 'time_signature', # Billboard Features 'year', 'lyrics', 'tags', 'num_syllables', 'pos', 'fog_index', 'flesch_index', 'num_words', 'num_lines', 'sentiment_neg','sentiment_neu', 'sentiment_pos', 'sentiment_compound', 'f_k_grade', 'difficult_words', 'num_dupes' ] data_df = spotify_billboard_df[FEATURE_COLUMNS]Creating and splitting data into multiple tablesTo help the exploration, my approach here was to split the data into 4 main tables: (i) artists tables, (ii) lyrics table, (iii) tags table and (iv) songs table. All these tables are index by a numerical index. Artists tableThis table contain all the artists that are credited in the song (not only the main artist). This information was retrieved from Spotify API.artists_df = pd.DataFrame() artists_col = data_df.api_artists.dropna() for ix, value in tqdm(artists_col.iteritems(), total=len(artists_col)): if ix is not None and value is not None: artists = [(ix, artist) for artist in value.split(",")] artists_df = artists_df.append(artists) artists_df = artists_df.rename(columns = {0: "index", 1: "artist"}) artists_df = artists_df.set_index("index") artists_df.to_csv(ARTISTS_DATA_FILEPATH) artists_df.head()Lyrics TableTo understand better the lyrics of the songs, each row of this table represents a lemma found on the song lyrics. Other informations presented in the rows are if they are a stop word or not and the frequency of the lemma in the song. **NOTE:** For more information about lemmas, read [Lemmatisation](https://en.wikipedia.org/wiki/Lemmatisation) on Wikipedia.lyrics_list = [] for row in tqdm(data_df.itertuples(), total=len(data_df)): # Song & Lyrics Info song_name = row.name song_artist = row.artist song_api_index = row.Index doc = nlp(row.lyrics.lower()) # Tokens and Counts word_freq_non_stop = Counter([ token.lemma_ for token in doc if token.is_stop != True and token.is_punct != True and '\n' not in token.text ]) unique_words_non_stop = list(word_freq_non_stop.keys()) freq_non_stop = list(word_freq_non_stop.values()) word_freq_stop = Counter([ token.lemma_ for token in doc if token.is_stop == True and token.is_punct != True and '\n' not in token.text ]) unique_words_stop = list(word_freq_stop.keys()) freq_stop = list(word_freq_stop.values()) # Creating DataFrame number_of_words = len(unique_words_stop) + len(unique_words_non_stop) lyrics_song_df = pd.DataFrame({ "index": [song_api_index] * number_of_words, "song": [song_name] * number_of_words, "artist": [song_artist] * number_of_words, "lemma": unique_words_stop + unique_words_non_stop, "count": freq_stop + freq_non_stop, "is_stop": ([True] * len(unique_words_stop)) + ([False] * len(unique_words_non_stop)) }) lyrics_list.append(lyrics_song_df) lyrics_df = pd.concat(lyrics_list) lyrics_df = lyrics_df.set_index("index") lyrics_df.to_csv(LYRICS_DATA_FILEPATH) lyrics_df.head()Tags TableAgain here, each rows of the table is composed by a tag that the song has received from the billboard datasettags_df = pd.DataFrame() tags_col = data_df.tags.dropna() for ix, value in tqdm(tags_col.iteritems(), total=len(tags_col)): if ix is not None and value is not None: tags = [(ix, tag) for tag in ast.literal_eval(value)] tags_df = tags_df.append(tags) tags_df = tags_df.rename(columns = {0: "index", 1: "tag"}) tags_df = tags_df.set_index("index") tags_df.to_csv(TAGS_DATA_FILEPATH) tags_df.head()100%|██████████| 4028/4028 [00:08<00:00, 485.24it/s]Songs TableThe songs table is composed by any other information not included in the other tables.data_df = data_df.rename(columns = {'artist': 'main_artist'}) data_df = data_df.drop(columns=['api_artists', 'tags']) data_df.to_csv(SONGS_DATA_FILEPATH) data_df.head()**As it is written in the title, in this notebook we generate embedding for each node by training a Node2Vec model. Source : [Nodevectors Github](https://github.com/VHRanger/nodevectors)**!pip install nodevectors import networkx as nx import gzip import pickle import os def save(object, filename, protocol = 0): """Saves a compressed object to disk """ file = gzip.GzipFile(filename, 'wb') file.write(pickle.dumps(object, protocol)) file.close() G = nx.read_edgelist('/content/drive/MyDrive/altegrad_datachallenge/collaboration_network.edgelist', delimiter=' ', nodetype=int) g2v = Node2Vec(n_components=128, walklen=60, threads=os.cpu_count(), w2vparams={'window': 10, 'negative':5, 'iter': 20, 'batch_words':128}) g2v.fit(G) # It took about 7 hours emb_per_nodes = {} for node in tqdm(list(G.nodes())): emb_per_nodes[node] = g2v.predict(node) save(emb_per_nodes, '/content/drive/MyDrive/altegrad_datachallenge/Node2Vec.txt') # Save the node embedding g2v.save('/content/drive/MyDrive/altegrad_datachallenge/node2vecmodel') # save the node embedding modelBaseline ModelsPurpose of this notebook is to implement baseline simple text classification modelsfrom google.colab import drive drive.mount('/content/gdrive') %cd gdrive/MyDrive/Capstone/ import numpy as np import pandas as pd import matplotlib as mpl import matplotlib.cm as cm import matplotlib.pyplot as plt import spacy import nltk from nltk import word_tokenize from spacy_preprocessor import SpacyPreprocessor import string import re import sklearn from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.feature_extraction import stop_words from sklearn.naive_bayes import MultinomialNB from sklearn.linear_model import LogisticRegression from sklearn.svm import LinearSVC from sklearn import metrics from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, confusion_matrix from time import time # other from tqdm import tqdm, tqdm_notebook, tqdm_pandas tqdm.pandas() tqdm_notebook().pandas()/usr/local/lib/python3.7/dist-packages/tqdm/std.py:658: FutureWarning: The Panel class is removed from pandas. Accessing it from the top-level namespace will also be removed in the next version from pandas import Panel /usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:36: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0 Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`Text Preprocessing# importing cleaned data data = pd.read_csv('data/timeless_total_cleaned_df.csv')ModelingX = data.pp_text y = data.target display(X.shape, y.shape) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1, stratify=y) print(X_train.shape, y_train.shape) print(X_test.shape, y_test.shape) count_vect = CountVectorizer() X_train_dtm = count_vect.fit_transform(X_train) X_test_dtm = count_vect.transform(X_test) print(X_train_dtm.shape, X_test_dtm.shape)(7984, 537109) (2662, 537109)Multinomial Naive Bayes CountVectorizernb = MultinomialNB() %time nb.fit(X_train_dtm, y_train) y_pred_class = nb.predict(X_test_dtm) import itertools from sklearn.metrics import roc_auc_score def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label',fontsize=15) plt.xlabel('Predicted label',fontsize=15) cnf_matrix = confusion_matrix(y_test, y_pred_class) plt.figure(figsize=(8,6)) plot_confusion_matrix(cnf_matrix, classes=['Not Suicidal','Suicidal'],normalize=True, title='Confusion matrix with all features') y_pred_prob = nb.predict_proba(X_test_dtm)[:, 1] print("Accuracy: ", accuracy_score(y_test, y_pred_class)) print("ROC_AOC_Score: ", roc_auc_score(y_test, y_pred_prob)) print("F1_Score: ", f1_score(y_test, y_pred_class)) print("Precision: ", precision_score(y_test, y_pred_class)) print("Recall: ", recall_score(y_test, y_pred_class))Accuracy: 0.628099173553719 ROC_AOC_Score: 0.6533997511730093 F1_Score: 0.630321135175504 Precision: 0.6298507462686567 Recall: 0.6307922272047832CountVectorizerRetrying with different number (lower) of max_features.vect = CountVectorizer(max_features=15_000, ngram_range=(1, 2)) X_train_dtm = vect.fit_transform(X_train) X_test_dtm = vect.transform(X_test) nb = MultinomialNB() %time nb.fit(X_train_dtm, y_train) y_pred_class = nb.predict(X_test_dtm) print("Accuracy: ", metrics.accuracy_score(y_test, y_pred_class)) print("ROC_AOC_Score: ", roc_auc_score(y_test, y_pred_prob)) print("F1 Score: ", f1_score(y_test, y_pred_class)) cnf_matrix = confusion_matrix(y_test, y_pred_class) plt.figure(figsize=(8,6)) plot_confusion_matrix(cnf_matrix, classes=['Not Suicidal','Suicidal'],normalize=True,\ title='Confusion matrix with max 5000 features')Logistic Regressiontfidf_vect = TfidfVectorizer(max_features=10_000, ngram_range=(1,2)) X_train_tfidf = tfidf_vect.fit_transform(X_train) X_test_tfidf = tfidf_vect.transform(X_test) print(X_train_tfidf.shape, X_test_tfidf.shape) logreg = LogisticRegression(class_weight="balanced") logreg.fit(X_train_tfidf, y_train) #Make predictions on test data y_pred_class = logreg.predict(X_test_tfidf) #calculate evaluation measures: print("Accuracy: ", accuracy_score(y_test, y_pred_class)) print("ROC_AOC_Score: ", roc_auc_score(y_test, y_pred_prob)) print("F1_Score: ", f1_score(y_test, y_pred_class)) print("Precision: ", precision_score(y_test, y_pred_class)) print("Recall: ", recall_score(y_test, y_pred_class)) cnf_matrix = confusion_matrix(y_test, y_pred_class) plt.figure(figsize=(8,6)) plot_confusion_matrix(cnf_matrix, classes=['Not Suicidal','Suicidal'],normalize=True, title='Confusion matrix with normalization')Accuracy: 0.6945905334335086 ROC_AOC_Score: 0.6533997511730093 F1_Score: 0.6974320803870487 Precision: 0.6945885841363973 Recall: 0.7002989536621823SVCfrom sklearn.svm import LinearSVC classifier = LinearSVC(class_weight='balanced') #instantiate a logistic regression model classifier.fit(X_train_dtm, y_train) #fit the model with training data #Make predictions on test data y_pred_class = classifier.predict(X_test_dtm) #calculate evaluation measures: print("Accuracy: ", accuracy_score(y_test, y_pred_class)) print("ROC_AOC_Score: ", roc_auc_score(y_test, y_pred_prob)) print("F1_Score: ", f1_score(y_test, y_pred_class)) print("Precision: ", precision_score(y_test, y_pred_class)) print("Recall: ", recall_score(y_test, y_pred_class)) cnf_matrix = confusion_matrix(y_test, y_pred_class) plt.figure(figsize=(8,6)) plot_confusion_matrix(cnf_matrix, classes=['Not Suicidal','Suicidal'],normalize=True, title='Confusion matrix with normalization')/usr/local/lib/python3.7/dist-packages/sklearn/svm/_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations. "the number of iterations.", ConvergenceWarning)IMPORTING DATAimport pandas as pd import numpy as np import matplotlib.pyplot as plt import random import seaborn as sns from fbprophet import Prophet chicago_df_1 = pd.read_csv('/content/Chicago_Crimes_2001_to_2004.csv', error_bad_lines=False) chicago_df_2 = pd.read_csv('/content/Chicago_Crimes_2005_to_2007.csv', error_bad_lines=False) chicago_df_3 = pd.read_csv('/content/Chicago_Crimes_2008_to_2011.csv', error_bad_lines=False) chicago_df_4 = pd.read_csv('/content/Chicago_Crimes_2012_to_2017.csv', error_bad_lines=False) chicago_df = pd.concat([chicago_df_1, chicago_df_2, chicago_df_3], ignore_index=False, axis=0)EXPLORE THE DATAchicago_df.head(10) chicago_df.tail(10) print(chicago_df_1.shape) print(chicago_df_2.shape) print(chicago_df_3.shape) print(chicago_df_4.shape) # Let's see how many null elements are contained in the data plt.figure(figsize=(10,10)) sns.heatmap(chicago_df.isnull(), cbar = False, cmap = 'YlGnBu') # ID Case Number Date Block IUCR Primary Type Description Location Description Arrest Domestic Beat District Ward Community Area FBI Code X Coordinate Y Coordinate Year Updated On Latitude Longitude Location chicago_df.drop(['Unnamed: 0', 'Case Number', 'Case Number', 'IUCR', 'X Coordinate', 'Y Coordinate','Updated On','Year', 'FBI Code', 'Beat','Ward','Community Area', 'Location', 'District', 'Latitude' , 'Longitude'], inplace=True, axis=1) #Assembling a datetime by rearranging the dataframe column "Date". chicago_df.Date = pd.to_datetime(chicago_df.Date, format='%m/%d/%Y %I:%M:%S %p') chicago_df # setting the index to be the date chicago_df.index = pd.DatetimeIndex(chicago_df.Date) chicago_df #how many crimes are done of each type chicago_df['Primary Type'].value_counts() chicago_df['Primary Type'].value_counts().iloc[:15] chicago_df['Primary Type'].value_counts().iloc[:15].index plt.figure(figsize = (15, 10)) sns.countplot(y= 'Primary Type', data = chicago_df, order = chicago_df['Primary Type'].value_counts().iloc[:15].index) #based on location description plt.figure(figsize = (15, 10)) sns.countplot(y= 'Location Description', data = chicago_df, order = chicago_df['Location Description'].value_counts().iloc[:15].index) #how many crime occous in aspecific year chicago_df.resample('Y').size() # Resample is a Convenience method for frequency conversion and resampling of time series. plt.plot(chicago_df.resample('Y').size()) plt.title('Crimes Count Per Year') plt.xlabel('Years') plt.ylabel('Number of Crimes') #based on month chicago_df.resample('M').size() # Resample is a Convenience method for frequency conversion and resampling of time series. plt.plot(chicago_df.resample('M').size()) plt.title('Crimes Count Per Month') plt.xlabel('Months') plt.ylabel('Number of Crimes') #quarter chicago_df.resample('Q').size() # Resample is a Convenience method for frequency conversion and resampling of time series. plt.plot(chicago_df.resample('Q').size()) plt.title('Crimes Count Per Quarter') plt.xlabel('Quarters') plt.ylabel('Number of Crimes')PREPARING THE DATAchicago_prophet = chicago_df.resample('M').size().reset_index() chicago_prophet chicago_prophet.columns = ['Date', 'Crime Count'] chicago_prophet chicago_prophet_df = pd.DataFrame(chicago_prophet) chicago_prophet_dfMAKE PREDICTIONS¶chicago_prophet_df.columns chicago_prophet_df_final = chicago_prophet_df.rename(columns={'Date':'ds', 'Crime Count':'y'}) chicago_prophet_df_final m = Prophet() m.fit(chicago_prophet_df_final) # Forcasting into the future future = m.make_future_dataframe(periods=365*2) forecast = m.predict(future) forecast figure = m.plot(forecast, xlabel='Date', ylabel='Crime Rate') figure3 = m.plot_components(forecast)Database with one row for each publication-country associationdf <- read_csv("../5_Final_databases/output/database_multi_rows_each_paper_one_per_country.csv") sprintf("%i x %i dataframe", nrow(df), ncol(df))Parsed with column specification: cols( .default = col_double(), Country = col_character(), title = col_character(), ISO_3 = col_character(), Region = col_character(), authors = col_character(), source = col_character(), doi = col_character(), abstract = col_character(), author_keywords = col_character(), model = col_character(), scopus_number = col_character(), WOS_number = col_character() ) See spec(...) for full column specifications.Get the list of topics ordered by descending topic representationtopics_table <- read_csv("../4_Manual_treatment_topic_table/ouput/Table_topics.csv") sprintf("%i x %i dataframe", nrow(topics_table), ncol(topics_table)) topics_table_ordered <- topics_table %>% arrange(desc(T0.02)) topics_list <- as.character(topics_table_ordered$Name)Function to create histogram per topic with the five countries most often associated to this topicplot_topic <- function(topic) { df_topic <- df %>% dplyr::select(Country,Region,topic)%>% dplyr::group_by(Country)%>% dplyr::summarise(count = sum(!!sym(topic)), Region=first(Region)) #dataframe with the five countries most often associated to the topic head_topic <- head(df_topic[order(-df_topic$count),],5) head_topic <- data.frame(head_topic) #aggregation of all other countries row_other <- c("Others",sum(tail(df_topic[order(-df_topic$count),],132)$count),NA) head_topic[6,] <- row_other #rank column for apparition order in the graph (always row Other at the end) head_topic[,4] <- c(1,2,3,4,5,6) names(head_topic)[4] <- "Rank" head_topic$count <- as.double(head_topic$count) head_topic$Region <- factor(head_topic$Region,levels = c("Asia","Africa","Europe","European Union","North America", "Latin America","Oceania")) plot <- ggplot(data=head_topic, aes(x=reorder(Country,-Rank), y=count,fill=Region))+ geom_bar(stat ="identity", width=0.9)+ scale_fill_manual(values=c('Asia'='darkorange', 'European Union'='#7CAE00', 'Europe'='seagreen4', 'North America'='dodgerblue', 'Latin America'='cadetblue3', 'Africa'='orchid', 'Oceania'='coral2'), na.value = 'dimgray', drop = FALSE, label=c('Asia', 'European Union', 'Europe', 'North America', 'Latin America', 'Africa', 'Oceania', 'Mix'))+ coord_flip() + xlab("")+ ylab("Nb of country-study asso")+ ggtitle(topic) + theme_minimal() + theme( title = element_text(size = 18), legend.title = element_text(size = 18), legend.text = element_text(size = 16), axis.text.x = element_text(size = 16), axis.text.y = element_text(size = 16,face ="bold"), axis.title.x = element_text(size = 16, hjust = 0.5,face ="bold"), axis.title.y = element_text(size = 16, hjust = 0.5,face ="bold") )+ geom_text(aes(label = count), size = 4, hjust = 1, vjust = 1) return(plot) } plot1 <- plot_topic(topics_list[1]) plot2 <- plot_topic(topics_list[2]) plot3 <- plot_topic(topics_list[3]) plot4 <- plot_topic(topics_list[4]) plot5 <- plot_topic(topics_list[5]) plot6 <- plot_topic(topics_list[6]) plot7 <- plot_topic(topics_list[7]) plot8 <- plot_topic(topics_list[8]) plot9 <- plot_topic(topics_list[9]) plot10 <- plot_topic(topics_list[10]) plot11 <- plot_topic(topics_list[11]) plot12 <- plot_topic(topics_list[12]) plot13 <- plot_topic(topics_list[13]) plot14 <- plot_topic(topics_list[14]) plot15 <- plot_topic(topics_list[15]) plot16 <- plot_topic(topics_list[16]) plot17 <- plot_topic(topics_list[17]) plot18 <- plot_topic(topics_list[18]) plot19 <- plot_topic(topics_list[19]) plot20 <- plot_topic(topics_list[20]) plot21 <- plot_topic(topics_list[21]) plot22 <- plot_topic(topics_list[22]) plot23 <- plot_topic(topics_list[23]) plot24 <- plot_topic(topics_list[24]) plot25 <- plot_topic(topics_list[25]) plot26 <- plot_topic(topics_list[26]) plot27 <- plot_topic(topics_list[27]) plot28 <- plot_topic(topics_list[28]) plot29 <- plot_topic(topics_list[29]) plot30 <- plot_topic(topics_list[30]) plot31 <- plot_topic(topics_list[31]) plot32 <- plot_topic(topics_list[32]) plot33 <- plot_topic(topics_list[33]) plot34 <- plot_topic(topics_list[34]) plot35 <- plot_topic(topics_list[35]) plot36 <- plot_topic(topics_list[36]) plot37 <- plot_topic(topics_list[37]) plot38 <- plot_topic(topics_list[38]) plot39 <- plot_topic(topics_list[39]) options(repr.plot.width=20, repr.plot.height=18) plot1 <- ggarrange(plot1,plot2,plot3,plot4, plot5,plot6,plot7,plot8, plot9,plot10,plot11,plot12, plot13,plot14,plot15,plot16, plot17,plot18,plot19,plot20, common.legend = TRUE, legend = "top", ncol=4, nrow = 5, align = "v" ) plot1 plot2 <- ggarrange(plot21,plot22,plot23,plot24, plot25,plot26,plot27,plot28, plot29,plot30,plot31,plot32, plot33,plot34,plot35,plot36, plot37,plot38,plot39, common.legend = TRUE, legend = "top", ncol=4, nrow = 5, align ="v" ) plot2 ggsave('./output/Fig_SI3_topics_countries1.png', height=20, width=18, plot=plot1) ggsave('./output/Fig_SI4_topics_countries2.png', height=20, width=18, plot=plot2)TVB simulations in nipype!# https://groups.google.com/forum/#!topic/tvb-users/ODsL9bkGLHQ import warnings warnings.filterwarnings('ignore') import os, sys, scipy.io, numpy as np from nipype import Node, Function, Workflow cwd = os.getcwd() # https://miykael.github.io/nipype_tutorial/notebooks/basic_workflow.html def make_model(model_name, parameters):# done import warnings, pickle, os warnings.filterwarnings('ignore') from tvb.simulator.lab import models import numpy as np mod = getattr(models, model_name) model_class = mod(**dict(parameters)) with open("model_class.p", "wb") as f: pickle.dump(model_class, f) model_class = os.path.abspath("model_class.p") return model_class def load_connectivity_mat(in_file, normalize=False): import scipy.io, pickle, os datamat = scipy.io.loadmat(in_file) sc_weights = datamat['sc_weights'] if normalize: sc_weights = sc_weights / sc_weights.max() tract_lengths = datamat['tract_lengths'] scipy.io.savemat('sc_weights.mat',{'sc_weights': sc_weights}) scipy.io.savemat('tract_lengths.mat',{'tract_lengths': tract_lengths}) sc_weights = os.path.abspath("sc_weights.mat") tract_lengths = os.path.abspath("tract_lengths.mat") return sc_weights, tract_lengths def make_connectivity(weights, lengths): import warnings, pickle, os, scipy.io warnings.filterwarnings('ignore') weights_mat = scipy.io.loadmat(weights); weights = weights_mat['sc_weights'] lengths_mat = scipy.io.loadmat(lengths); lengths = lengths_mat['tract_lengths'] from tvb.simulator.lab import connectivity conn_class = connectivity.Connectivity(weights=weights, tract_lengths=lengths) with open("conn_class.p", "wb") as f: pickle.dump(conn_class, f) conn_class = os.path.abspath("conn_class.p") return conn_class def make_integrator(integrator_name, base_dt, noise_type, noise_val): import sys, numpy, warnings, pickle, os warnings.filterwarnings('ignore') sys.modules['mtrand'] = numpy.random.mtrand from tvb.simulator.lab import integrators, noise temp_integrator = getattr(integrators,integrator_name) temp_noise = getattr(noise, noise_type) noise = temp_noise(nsig = numpy.array([noise_val])) integrator_class = temp_integrator(dt = base_dt, noise = noise) #integrator_class = temp_integrator(dt = base_dt) with open("integrator_class.p", "wb") as f: pickle.dump(integrator_class, f) integrator_class = os.path.abspath("integrator_class.p") return integrator_class def make_monitors(monitor_types, periods): import warnings, sys, numpy, pickle, os warnings.filterwarnings('ignore') sys.modules['mtrand'] = numpy.random.mtrand from tvb.simulator.lab import monitors monitor_class = [] for i in range(len(monitor_types)): monitor_tmp = getattr(monitors,monitor_types[i]) monitor_tmp2 = monitor_tmp(period = periods[i]) monitor_class.append(monitor_tmp2) monitor_class = tuple(monitor_class) with open("monitor_class.p", "wb") as f: pickle.dump(monitor_class, f) monitor_class = os.path.abspath("monitor_class.p") return monitor_class def run_simulation(model_input, conn_input, integrator_input, monitor_input, global_coupling = 0.1, conduction_speed=3.0, simulation_length=10000.0): import warnings, sys, numpy, pickle, os, scipy.io warnings.filterwarnings('ignore') sys.modules['mtrand'] = numpy.random.mtrand with open(model_input, "rb") as f: model_input = pickle.load(f) with open(conn_input, "rb") as f: conn_input = pickle.load(f) with open(integrator_input, "rb") as f: integrator_input = pickle.load(f) with open(monitor_input, "rb") as f: monitor_input = pickle.load(f) from tvb.simulator.lab import * wm_coupling = coupling.Linear(a = global_coupling) sim = simulator.Simulator(model = model_input, connectivity = conn_input, coupling = wm_coupling, integrator = integrator_input, monitors = monitor_input, simulation_length = simulation_length, conduction_speed = conduction_speed) sim.configure() sim_output = sim.run() scipy.io.savemat('sim_output.mat',{'sim_output': sim_output}) abs_out_file = os.path.abspath("sim_output.mat") # fix this return abs_out_file ##### NIPYPE PORTION # https://miykael.github.io/nipype_tutorial/notebooks/basic_function_interface.html model = Node( Function( input_names=['model_name', 'parameters'], output_names=['model_class'], function=make_model ), name='create_model' ) sc_loader = Node( Function( input_names=['in_file', 'normalize'], output_names=['sc_weights', 'tract_lengths'], function=load_connectivity_mat ), name='load_sc_mat' ) sc = Node( Function( input_names=['weights', 'lengths'], output_names=['conn_class'], function=make_connectivity ), name='create_sc' ) integrator = Node( Function( input_names=['integrator_name','base_dt','noise_type','noise_val'], output_names=['integrator_class'], function=make_integrator ), name='create_integrator' ) monitors = Node( Function( input_names=['monitor_types','periods'], output_names=['monitor_class'], function=make_monitors ), name='create_monitors' ) simulate = Node( Function( input_names=['model_input', 'conn_input', 'integrator_input', 'monitor_input', 'global_coupling', 'conduction_speed', 'simulation_length'], output_names=['abs_out_file'], function=run_simulation ), name='create_simulation' ) # https://miykael.github.io/nipype_tutorial/notebooks/basic_workflow.html workflow = Workflow(name='tvb_demo', base_dir=os.getcwd()) workflow.connect([ (model, simulate, [("model_class", "model_input")]), (sc_loader, sc, [("sc_weights", "weights"), ("tract_lengths", "lengths")]), (sc, simulate, [("conn_class", "conn_input")]), (integrator, simulate, [("integrator_class", "integrator_input")]), (monitors, simulate, [("monitor_class", "monitor_input")]) ]) # NOW DEFINE YOUR INPUTS model.inputs.model_name = 'Generic2dOscillator' model.inputs.parameters = [('a',1), ('b',1)] sc_loader.inputs.in_file = os.path.join(cwd, 'input', 'sub-01_connectivity.mat') sc_loader.inputs.normalize = False integrator.inputs.integrator_name = 'HeunStochastic' integrator.inputs.base_dt = 0.1 integrator.inputs.noise_type = 'Additive' #integrator.inputs.noise_val = 0.0001 monitors.inputs.monitor_types = ['Bold', 'TemporalAverage'] monitors.inputs.periods = [2000.0, 10.0] #simulate.inputs.global_coupling = 0.1 #simulate.inputs.conduction_speed = 2.0 simulate.inputs.simulation_length = 10000.0 # ITERABLES integrator.iterables = ("noise_val", [0.0001, 0.001, 0.01]) #simulate.iterables = [('global_coupling', [0.1, 0.5, 1.0])] #sc_loader.iterables = [('in_file', [os.path.join(cwd, 'input', 'sub-01_connectivity.mat'), os.path.join(cwd, 'input', 'sub-02_connectivity.mat'), os.path.join(cwd, 'input', 'sub-03_connectivity.mat')])] simulate.iterables = [('global_coupling', np.arange(0.0, 5.1, 0.5)), ('conduction_speed', [1,2])] # ^ move constants to top node; have initial node with subject list # make datasink at the end to clean things up #def run_simulation(out_file, model_input, conn_input, integrator_input, monitor_input, global_coupling = 0.1, conduction_speed=2.0, simulation_length=1000.0): # Write graph of type orig workflow.write_graph(graph2use='exec', dotfilename='./graph_orig.dot') from IPython.display import Image Image(filename="graph_orig_detailed.png") #workflow.run() workflow.run('MultiProc', plugin_args={'n_procs': 8}) !tree tvb_demo/ -I '*txt|*pklz|_report|*.json|*js|*.dot|*.html'tvb_demo/ ├── _noise_val_0.0001 │   ├── _conduction_speed_1_global_coupling_0.0 │   │   └── create_simulation │   │   └── sim_output.mat │   ├── _conduction_speed_1_global_coupling_0.5 │   │   └── create_simulation │   │   └── sim_output.mat │   ├── _conduction_speed_1_global_coupling_1.0 │   │   └── create_simulation │   │   └── sim_output.mat │   ├── _conduction_speed_1_global_coupling_1.5 │   │   └── create_simulation │   │   └── sim_output.mat │   ├── _conduction_speed_1_global_coupling_2.0 │   │   └── create_simulation │   │   └── sim_output.mat │   ├── _conduction_speed_1_global_coupling_2.5 │   │   └── create_simulation │   │   └── sim_output.mat │   ├── _conduction_speed_1_global_coupling_3.0 │   │   └── create_simulation │   │   └── sim_output.mat │   ├── _conduction_speed_1_global_coupling_3.5 │   │   └── create_simulation │   │   └── sim_output.mat │   ├── _conduction_speed_1_global_coupling_4.0 │   │   └── create_si[...]IMAGEurl = "https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/index.html" from webdriver_manager.chrome import ChromeDriverManager def init_browser(): executable_path = {'executable_path': ChromeDriverManager().install()} return Browser('chrome', **executable_path, headless=False) browser= init_browser() url = 'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/index.html' browser.visit(url) #create HTMl Object html = browser.html soup = BeautifulSoup(html, "html.parser") print(soup.prettify()) body = soup.find_all("body") div = body[0].find("div", class_="floating_text_area") image = div.find("a") image pic_source = [] pic = image['href'] pic_source.append(pic) featured_image_url = 'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/' + pic featured_image_urlMARS FActsurl='https://space-facts.com/mars/' response = requests.get(url) soup = BeautifulSoup(response.text, 'html.parser') table = pd.read_html(url) facts_df = table[0] facts_df.rename(columns = { 0 :"facts", 1 : "values"}) facts_html = facts_df.to_html() facts_html = facts_html.replace("\n","") facts_htmlIMAGES OF THE HEMISPHEREurl = "https://astrogeology.usgs.gov/search/map/Mars/Viking/cerberus_enhanced" response = requests.get(url) soup = BeautifulSoup(response.text, "html.parser") pic = soup.find_all( "div", class_ = "wide-image-wrapper") image = pic[0].find("li") image_url = image.find("a")['href'] hemisphere_title = soup.find("h2", class_="title").text print(hemisphere_title) print(image_url) url_2 = "https://astrogeology.usgs.gov/search/map/Mars/Viking/schiaparelli_enhanced" response_2 = requests.get(url_2) soup_2 = BeautifulSoup(response_2.text, "html.parser") pic_2 = soup_2.find_all( "div", class_ = "wide-image-wrapper") image_2 = pic_2[0].find("li") image_url_2 = image_2.find("a")['href'] hemisphere_title_2 = soup_2.find("h2", class_="title").text print(hemisphere_title_2) print(image_url_2) url_3="https://astrogeology.usgs.gov/search/map/Mars/Viking/syrtis_major_enhanced" response_3 = requests.get(url_3) soup_3 = BeautifulSoup(response_3.text, "html.parser") pic_3 = soup_3.find_all( "div", class_ = "wide-image-wrapper") image_3 = pic_3[0].find("li") image_url_3 = image_3.find("a")['href'] hemisphere_title_3 = soup_3.find("h2", class_="title").text print(hemisphere_title_3) print(image_url_3) url_4="https://astrogeology.usgs.gov/search/map/Mars/Viking/valles_marineris_enhanced" response_4 = requests.get(url_4) soup_4 = BeautifulSoup(response_4.text, "html.parser") pic_4 = soup_4.find_all( "div", class_ = "wide-image-wrapper") image_4 = pic_4[0].find("li") image_url_4 = image_4.find("a")['href'] hemisphere_title_4 = soup_4.find("h2", class_="title").text print(hemisphere_title_4) print(image_url_4) hemisphere_dict= [ {"title":hemisphere_title, "image_url":image_url}, {"title":hemisphere_title_2, "image_url":image_url_2}, {"title":hemisphere_title_3, "image_url":image_url_3}, {"title":hemisphere_title_4, "image_url":image_url_4}, ] hemisphere_dictTesting the Bessel Basisimport dvr_1d %matplotlib inlineWith a Morse potentiald = dvr_1d.BesselDVR(npts=100, R=20, dim=3, lam=0) d.morse_test(xmax=20., ymax=0.5)Testing 1-D DVR with a Morse potential The first 5 energies are: [-1.75751018 -0.76051377 -0.21815647 -0.00674606 0.05559304]With a Woods-Saxon potentiald = dvr_1d.BesselDVR(npts=100, R=10, dim=3, lam=0) d.woods_saxon_test()Testing 1-D DVR with a Woods-Saxon potential The first 5 energies are: [-43.55090359 -36.58134219 -29.25606017 -21.92813792 -14.94875837]* RMSE is a measurment to measure the performance of a linear regression model. It's full name is Root Mean Square Error. The RMSE measured on the same scale with the same units as y. Since it is the way to measuer the difference between the value we predict and the actual value. Therefore, a better linear regression model should have the smallest RMSE. * The formula to define the RMSE is: $\large RMSE(\vec{X}, h_\vec{\theta}) = \sqrt{\frac{1}{m} \sum_{i=1}^m (\vec{\theta}^\top\vec{x}^i - y^i)^2}$* In the model above, the RMSE is 77257 which is not good. I should do more data preprocessing in the future. For example, remove the outliers.y.describe()Extracting protein sequences' features using ProtXLNet pretrained-model 1. Load necessry libraries including huggingface transformers!pip install -q transformers sentencepiece import torch from transformers import XLNetTokenizer, AutoModel, pipeline import re import numpy as np import os import requests from tqdm.auto import tqdm2. Load the vocabulary and ProtXLNet Modeltokenizer = XLNetTokenizer.from_pretrained("Rostlab/prot_xlnet", do_lower_case=False) model = AutoModel.from_pretrained("Rostlab/prot_xlnet")3. Load the model into the GPU if avilabilefe = pipeline('feature-extraction', model=model, tokenizer=tokenizer,device=0)4. Create or load sequences and map rarely occured amino acids (U,Z,O,B) to (unk)sequences_Example = ["A E T C Z A O","S K T Z P"] sequences_Example = [re.sub(r"[UZOB]", "", sequence) for sequence in sequences_Example]5. Extracting sequences' features and covert the output to numpy if neededembedding = fe(sequences_Example) embedding = np.array(embedding) print(embedding)[[[ 5.69581509e-01 -8.12228858e-01 1.51267886e+00 ... -3.47372681e-01 -1.97737586e+00 1.02282548e+00] [ 2.76618991e-02 -6.71196997e-01 9.98873472e-01 ... 7.27679655e-02 -1.62625980e+00 -8.44566710e-03] [ 2.20987082e-01 -5.26815534e-01 6.64871037e-01 ... 4.78142388e-02 -1.39787078e+00 3.08237135e-01] ... [-3.64926189e-01 -8.19321334e-01 4.81532872e-01 ... 2.35715955e-01 -6.73882365e-01 -1.06030309e+00] [ 4.51356888e-01 -8.96942139e-01 4.00962055e-01 ... -1.93732992e-01 -5.60827136e-01 -2.78552026e-01] [ 3.18278044e-01 -1.61192930e+00 4.94406074e-01 ... -2.51359522e-01 -1.32739976e-01 -1.23092830e-02]] [[ 1.91231534e-01 1.84455216e-02 -1.82765443e-03 ... -4.36504632e-01 2.18422841e-02 -1.59097195e-01] [ 2.63838232e-01 -6.02961145e-02 -1.12764817e-02 ... -2.28307739e-01 -3.21160257e-01 1.10596135e-01] [ 8.65126610e-01 -1.61868662e-01 -1.75777614e-01 ... 3.56551766e-01 -2.34120205e-01 4.93936874e-02] ... [ 4.84943122e-01 6.7[...]Optional: Remove padding ([PAD]) and special tokens ([CLS],[SEP]) that is added by ProtXLNet modelfeatures = [] for seq_num in range(len(embedding)): seq_len = len(sequences_Example[seq_num].replace(" ", "")) padded_seq_len = len(embedding[seq_num]) start_Idx = padded_seq_len-seq_len-2 end_Idx = padded_seq_len-2 seq_emd = embedding[seq_num][start_Idx:end_Idx] features.append(seq_emd) print(features)[array([[ 0.0276619 , -0.671197 , 0.99887347, ..., 0.07276797, -1.6262598 , -0.00844567], [ 0.22098708, -0.52681553, 0.66487104, ..., 0.04781424, -1.39787078, 0.30823714], [ 0.98757732, -1.03212166, 0.99680531, ..., -0.33856013, -1.51521778, 1.05237114], [ 0.70799673, -0.66436064, 0.85833871, ..., -0.02473333, -1.51670885, -0.21759868], [-0.14213681, -0.86483932, 0.81442791, ..., -0.32999074, -0.23385319, -1.7195524 ], [-0.36492619, -0.81932133, 0.48153287, ..., 0.23571596, -0.67388237, -1.06030309]]), array([], shape=(0, 1024), dtype=float64)]ISM Week 04 Test Demo - generate student - specific dataGet your prices by running the cell below and copy-paste them in the test notebook.# defines a function that will take student input and verify # Students, please press Shift + Enter to run this cell ONLY ONCE and then enter your B-number when prompted # if you get an error close the notebook and restart it # or better yer restart you borwser but always enter Noteable from the link in Learn # Otherwise ignore this code - not required for course def b_num_input(): b_num = input("Please enter your B-number: ") while True: if len(b_num) != 7: print("You have entered ", b_num, "This seems to not be 7 characters. If you are sure, type Y, otherwise type N to enter it again.") conf = input("Are you sure (Y/N): ") if conf == "Y": break else: b_num = input("Please enter your B-number: ") else: break aux = float(b_num[-2:]) if aux == 0: num = 10 else: num = aux num_list = [num, round(num*1.12,2), round(num*1.12*1.2,2), round(num*1.12*1.2*.7,2), round(num*1.12*1.2*.7*1.1,2)] print("Your closing prices to use below are: ") return num_list num_list = b_num_input() num_listImport packagesimport matplotlib.pyplot as plt from PIL import Image, ImageDraw import numpy as np import pandas as pd import os.path as osp import os import argparseGet home_pathWe get `home_path` on different machineshome_path = osp.expanduser('~') home_pathCompute the IoUWe define the function to compute the IoU of a predicted box with a list of ground truth boxes.A box is represented by a list `[xmin, ymin, xmax, ymax]`.A list ground truth boxes is a list of boxes `[box1, box2, box3]`.All are `np.array`def get_iou(pred_img_box, gt_img_boxes): # Compute the area of all boxes including the predicted box pred_box_area = (pred_img_box[2] - pred_img_box[0] + 1) # area = w pred_box_area *= (pred_img_box[3] - pred_img_box[1] + 1) # area = w * h gt_boxes_area = (gt_img_boxes[:, 2] - gt_img_boxes[:, 0] + 1) # area = w gt_boxes_area *= (gt_img_boxes[:, 3] - gt_img_boxes[:, 1] + 1) # area = w * h xx1 = np.maximum(pred_img_box[0], gt_img_boxes[:, 0]) yy1 = np.maximum(pred_img_box[1], gt_img_boxes[:, 1]) xx2 = np.minimum(pred_img_box[2], gt_img_boxes[:, 2]) yy2 = np.minimum(pred_img_box[3], gt_img_boxes[:, 3]) # Compute the area of intersection w = np.maximum(0.0, xx2 - xx1 + 1) h = np.maximum(0.0, yy2 - yy1 + 1) inter = w * h # get IoU all boxes with the box of highest conf iou = inter / (pred_box_area + gt_boxes_area[:] - inter) return iouCheck whether a predict box is a ground truthdef pred_box_is_gt_box(pred_img_box, gt_img_boxes, threshold=0.5): iou = get_iou(pred_img_box, gt_img_boxes) return np.any(iou > threshold)Get the prediction from a file# args = argparse.ArgumentParser().parse_args() result_path = 'Dropbox/DeepFPNResnet/Resnet101/afw_Deepresnet101_val.txt' data_root = 'Datasets/AFW' def get_pred_data(): # file_name score boxes = {} scores = {} img_names = [] img_paths = [] with open(osp.join(home_path, result_path), "r") as f: lines = f.readlines() for line in lines: line = line.strip() elems = line.split(" ") img_name = elems[0] + '.jpg' score = float(elems[1]) box = elems[2:] box = [float(value) for value in box] if len(img_names) == 0 or img_name != img_names[-1]: img_names.append(img_name) img_paths.append(osp.join(home_path, data_root, img_name)) if boxes.get(img_name) is None: boxes[img_name] = [] scores[img_name] = [] boxes[img_name].append(box) scores[img_name].append(score) final_boxes, final_scores = [], [] for img_name in img_names: final_boxes.append(np.array(boxes[img_name])) final_scores.append(np.array(scores[img_name])) return img_names, img_paths, final_boxes, final_scores img_names, img_paths, final_boxes, final_scores = get_pred_data() img_names[:5] img_paths[:5] final_boxes[:5] final_scores[:5]Deform a source mesh to form a target mesh using 3D loss functions In this tutorial, we learn to deform an initial generic shape (e.g. sphere) to fit a target shape.We will cover: - How to **load a mesh** from an `.obj` file- How to use the PyTorch3D **Meshes** datastructure- How to use 4 different PyTorch3D **mesh loss functions**- How to set up an **optimization loop**Starting from a sphere mesh, we learn the offset to each vertex in the mesh such thatthe predicted mesh is closer to the target mesh at each optimization step. To achieve this we minimize:+ `chamfer_distance`, the distance between the predicted (deformed) and target mesh, defined as the chamfer distance between the set of pointclouds resulting from **differentiably sampling points** from their surfaces. However, solely minimizing the chamfer distance between the predicted and the target mesh will lead to a non-smooth shape (verify this by setting `w_chamfer=1.0` and all other weights to `0.0`). We enforce smoothness by adding **shape regularizers** to the objective. Namely, we add:+ `mesh_edge_length`, which minimizes the length of the edges in the predicted mesh.+ `mesh_normal_consistency`, which enforces consistency across the normals of neighboring faces.+ `mesh_laplacian_smoothing`, which is the laplacian regularizer. 0. Install and Import modules If `torch`, `torchvision` and `pytorch3d` are not installed, run the following cell:!pip install torch torchvision !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable' import os import torch from pytorch3d.io import load_obj, save_obj from pytorch3d.structures import Meshes from pytorch3d.utils import ico_sphere from pytorch3d.ops import sample_points_from_meshes from pytorch3d.loss import ( chamfer_distance, mesh_edge_loss, mesh_laplacian_smoothing, mesh_normal_consistency, ) import numpy as np from tqdm import tqdm_notebook %matplotlib notebook from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import matplotlib as mpl mpl.rcParams['savefig.dpi'] = 80 mpl.rcParams['figure.dpi'] = 80 # Set the device device = torch.device("cuda:0")1. Load an obj file and create a Meshes object Download the target 3D model of a dolphin. It will be saved locally as a file called `dolphin.obj`.!wget https://dl.fbaipublicfiles.com/pytorch3d/data/dolphin/dolphin.obj # Load the dolphin mesh. trg_obj = os.path.join('dolphin.obj') # We read the target 3D model using load_obj verts, faces, aux = load_obj(trg_obj) # verts is a FloatTensor of shape (V, 3) where V is the number of vertices in the mesh # faces is an object which contains the following LongTensors: verts_idx, normals_idx and textures_idx # For this tutorial, normals and textures are ignored. faces_idx = faces.verts_idx.to(device) verts = verts.to(device) # We scale normalize and center the target mesh to fit in a sphere of radius 1 centered at (0,0,0). # (scale, center) will be used to bring the predicted mesh to its original center and scale # Note that normalizing the target mesh, speeds up the optimization but is not necessary! center = verts.mean(0) verts = verts - center scale = max(verts.abs().max(0)[0]) verts = verts / scale # We construct a Meshes structure for the target mesh trg_mesh = Meshes(verts=[verts], faces=[faces_idx]) # We initialize the source shape to be a sphere of radius 1 src_mesh = ico_sphere(4, device)Visualize the source and target meshesdef plot_pointcloud(mesh, title=""): # Sample points uniformly from the surface of the mesh. points = sample_points_from_meshes(mesh, 5000) x, y, z = points.clone().detach().cpu().squeeze().unbind(1) fig = plt.figure(figsize=(5, 5)) ax = Axes3D(fig) ax.scatter3D(x, z, -y) ax.set_xlabel('x') ax.set_ylabel('z') ax.set_zlabel('y') ax.set_title(title) ax.view_init(190, 30) plt.show() # %matplotlib notebook plot_pointcloud(trg_mesh, "Target mesh") plot_pointcloud(src_mesh, "Source mesh")3. Optimization loop# We will learn to deform the source mesh by offsetting its vertices # The shape of the deform parameters is equal to the total number of vertices in src_mesh deform_verts = torch.full(src_mesh.verts_packed().shape, 0.0, device=device, requires_grad=True) # The optimizer optimizer = torch.optim.SGD([deform_verts], lr=1.0, momentum=0.9) # Number of optimization steps Niter = 2000 # Weight for the chamfer loss w_chamfer = 1.0 # Weight for mesh edge loss w_edge = 1.0 # Weight for mesh normal consistency w_normal = 0.01 # Weight for mesh laplacian smoothing w_laplacian = 0.1 # Plot period for the losses plot_period = 250 loop = tqdm_notebook(range(Niter)) chamfer_losses = [] laplacian_losses = [] edge_losses = [] normal_losses = [] %matplotlib inline for i in loop: # Initialize optimizer optimizer.zero_grad() # Deform the mesh new_src_mesh = src_mesh.offset_verts(deform_verts) # We sample 5k points from the surface of each mesh sample_trg = sample_points_from_meshes(trg_mesh, 5000) sample_src = sample_points_from_meshes(new_src_mesh, 5000) # We compare the two sets of pointclouds by computing (a) the chamfer loss loss_chamfer, _ = chamfer_distance(sample_trg, sample_src) # and (b) the edge length of the predicted mesh loss_edge = mesh_edge_loss(new_src_mesh) # mesh normal consistency loss_normal = mesh_normal_consistency(new_src_mesh) # mesh laplacian smoothing loss_laplacian = mesh_laplacian_smoothing(new_src_mesh, method="uniform") # Weighted sum of the losses loss = loss_chamfer * w_chamfer + loss_edge * w_edge + loss_normal * w_normal + loss_laplacian * w_laplacian # Print the losses loop.set_description('total_loss = %.6f' % loss) # Save the losses for plotting chamfer_losses.append(loss_chamfer) edge_losses.append(loss_edge) normal_losses.append(loss_normal) laplacian_losses.append(loss_laplacian) # Plot mesh if i % plot_period == 0: plot_pointcloud(new_src_mesh, title="iter: %d" % i) # Optimization step loss.backward() optimizer.step()4. Visualize the lossfig = plt.figure(figsize=(13, 5)) ax = fig.gca() ax.plot(chamfer_losses, label="chamfer loss") ax.plot(edge_losses, label="edge loss") ax.plot(normal_losses, label="normal loss") ax.plot(laplacian_losses, label="laplacian loss") ax.legend(fontsize="16") ax.set_xlabel("Iteration", fontsize="16") ax.set_ylabel("Loss", fontsize="16") ax.set_title("Loss vs iterations", fontsize="16")5. Save the predicted mesh# Fetch the verts and faces of the final predicted mesh final_verts, final_faces = new_src_mesh.get_mesh_verts_faces(0) # Scale normalize back to the original target size final_verts = final_verts * scale + center # Store the predicted mesh using save_obj final_obj = os.path.join('./', 'final_model.obj') save_obj(final_obj, final_verts, final_faces)Bike Sharing in Washington D.C. with DaskTwo datasets from [Bike Sharing in Washington D.C.](https://www.kaggle.com/marklvl/bike-sharing-dataset/home) containing information about the Bike Sharing service in Washington D.C. "Capital Bikeshare" are provided.One dataset contains hourly data and the other one has daily data from the years 2011 and 2012.The following variables are included in the data:* instant: Record index* dteday: Date* season: Season (1:springer, 2:summer, 3:fall, 4:winter)* yr: Year (0: 2011, 1:2012)* mnth: Month (1 to 12)* hr: Hour (0 to 23, only available in the hourly dataset)* holiday: whether day is holiday or not (extracted from Holiday Schedule)* weekday: Day of the week* workingday: If day is neither weekend nor holiday is 1, otherwise is 0.* weathersit: (extracted from Freemeteo) 1: Clear, Few clouds, Partly cloudy, Partly cloudy 2: Mist + Cloudy, Mist + Broken clouds, Mist + Few clouds, Mist 3: Light Snow, Light Rain + Thunderstorm + Scattered clouds, Light Rain + Scattered clouds 4: Heavy Rain + Ice Pallets + Thunderstorm + Mist, Snow + Fog* temp: Normalized temperature in Celsius. The values are derived via (t-t_min)/(t_max-t_min), t_min=-8, t_max=+39 (only in hourly scale)* atemp: Normalized feeling temperature in Celsius. The values are derived via (t-t_min)/(t_max-t_min), t_min=-16, t_max=+50 (only in hourly scale)* hum: Normalized humidity. The values are divided to 100 (max)* windspeed: Normalized wind speed. The values are divided to 67 (max)* casual: count of casual users* registered: count of registered users* cnt: count of total rental bikes including both casual and registered (Our target variable)We will build a predictive model that can determine how many people will use the service on an hourly basis. We will use the first 5 quarters of the data for our training dataset and the last quarter of 2012 will be the holdout against which we perform our validation. Since that data was not used for training, we are sure that the evaluation metric that we get for it (R2 score) is an objective measurement of its predictive power. OutlineWe separate the project in 3 steps:Data Loading and Exploratory Data Analysis: Load the data and analyze it to obtain an accurate picture of it, its features, its values (and whether they are incomplete or wrong), its data types among others. Also, the creation of different types of plots in order to help us understand the data and make the model creation easier.Feature Engineering / Pipeline and Hyperparameter Tuning: Once we have the data, we create some features and then create a pipeline with different transformers, we will hopefully produce a model that fits our expectations of performance. Once we have that model, a process of tuning it to the training data would be performed.Results and Conclusions: Finally, with our tuned model, we predict against the test set we decided to separate initially, then we review those results against their actual values to determine the performance of the model, and finally, outlining our conclusions.import seaborn as sns import dask.dataframe as dd from sklearn.base import clone import matplotlib.pyplot as plt from xgboost import XGBRegressor from dask.distributed import Client from sklearn.pipeline import Pipeline from sklearn.compose import ColumnTransformer from sklearn.linear_model import LinearRegression from sklearn.preprocessing import PowerTransformer from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import TimeSeriesSplit from dask_ml.model_selection import RandomizedSearchCV from dask_ml.preprocessing import OneHotEncoder, PolynomialFeatures from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_errorSetting Key ValuesThe following values are used throught the code, this cell gives a central source where they can be managed.PATH = "https://gist.githubusercontent.com/akoury/af59a34ac7d3eb52fd78d6ead200f224/raw/f034e9dc2d2f62d82bbc685e499e48968b9db064/Bicycles.csv" SPLITS = 4 METRIC = "r2" SEED = 1 TARGET = "cnt"The Dataset The necessary data was uploaded to a gist from where its read directly by Dask.**Optionally** (because it requires more setup) you may download the data directly from Kaggle, this is the optimal way since you are sure to obtain the latest files if there are any changes.In order to obtain the data from Kaggle using its API, you need to:1. Install the kaggle package ```pip install kaggle```2. [Create a Kaggle account](https://www.kaggle.com/account/login) if you do not have one3. Create a Kaggle API key in your account section4. Add the kaggle.json file that was created in step 3 to a folder called .kaggle located in your user folder# import kaggle # import zipfile # kaggle.api.authenticate() # kaggle.api.dataset_download_files('marklvl/bike-sharing-dataset', path='data', unzip=False)Zipfile is required to unzip the kaggle file (since the unzip parameter in the ```dataset_download_files``` method is not working [Issue 158](https://github.com/Kaggle/kaggle-api/issues/158)).# zipper = zipfile.ZipFile('data/bike-sharing-dataset.zip', 'r') # zipper.extractall('data/') # zipper.close() # PATH = 'data/hour.csv'Here we create the necessary Dask distributed client, you may click on the dashboard link to see the task stream and additional information.client = Client() clientData LoadingHere we load the necessary data, print its first rows and describe its contents.Since our dataset is small and we want to take advantage of the distributed capabilities of Dask, we set a blocksize of 300KB which distributes our code in 4 partitions.types = { "season": "category", "yr": "category", "mnth": "category", "holiday": "bool", "weekday": "category", "workingday": "bool", "weathersit": "category", } df = dd.read_csv(PATH, parse_dates=[1], dtype=types, blocksize="300KB") df.npartitionsPrecipitation Data and PreparationWe will add precipitation data obtained from the [National Climatic Data Center.](https://www.ncdc.noaa.gov/cdo-web/datasets)However, since most of the values are 0, we will convert them to a boolean that determines if rain was present or not at that specific hour.We also categorize the categorical features and set the date as the index.precipitation = dd.read_csv( "https://gist.githubusercontent.com/akoury/6fb1897e44aec81cced8843b920bad78/raw/b1161d2c8989d013d6812b224f028587a327c86d/precipitation.csv", parse_dates=[1], ) df = dd.merge(df, precipitation, how="left", on=["dteday", "hr"]) df["precipitation"] = ( df["precipitation"] .mask(df["precipitation"].isnull(), 0) .mask(df["precipitation"] > 0, 1) .astype(bool) ) df = df.set_index("dteday") df.head()**Note:** head() only grabs values from the first partition, so it is not an expensive operationSince we set the date as the index for the Dask Dataframe, each division will be sorted according to its datedf.divisionsData TypesWe review the data types for each column.df.dtypesExploratory Data AnalysisHere we will perform all of the necessary data analysis, with different plots that will help us understand the data and therefore, create a better model.Because we are working in a distributed way with a supposedly large dataset, we will take a random sample of 15% of the dataset to visualize it.sample = df.sample(frac=0.15, replace=True, random_state=SEED)Overall distribution of the target variableHere we see the distribution in the number of users in the sample per hourplt.figure(figsize=(16, 8)) sns.distplot(sample[TARGET])Usage per hour of the day by registered and casual usersHere we can see usage per hour differing between registered users and casual users.We can assume that most of the registered users use the service to get to work/school, therefore peak hours are in the morning and in the afternoon, meanwhile, casual users do not have any big peaks.plt.figure(figsize=(16, 8)) grouped = ( sample.groupby("hr") .agg({"registered": "mean", "casual": "mean"}) .reset_index() .compute() ) sns.lineplot(data=grouped, x="hr", y="registered", palette="husl", label="registered") sns.lineplot(data=grouped, x="hr", y="casual", palette="husl", label="casual") plt.xlabel("Hour") plt.ylabel("Users")Usage per month by registered and casual usersHere we can see usage per month, the colder months are the ones with least usage and the summer months have the most usage.plt.figure(figsize=(16, 8)) sample["mnth"] = sample["mnth"].astype("int") grouped = ( sample.groupby("mnth") .agg({"registered": "mean", "casual": "mean"}) .reset_index() .compute() ) sns.lineplot(data=grouped, x="mnth", y="registered", palette="husl", label="registered") sns.lineplot(data=grouped, x="mnth", y="casual", palette="husl", label="casual") plt.xlabel("Month") plt.ylabel("Users")Usage per day of the week by registered and casual usersHere we can see usage per day of the week.For registered users, usage goes down during the weekend days and up during the working days, while for casual users is the contraryplt.figure(figsize=(16, 8)) sample["weekday"] = sample["weekday"].astype("int") grouped = ( sample.groupby("weekday") .agg({"registered": "mean", "casual": "mean"}) .reset_index() .compute() ) sns.lineplot( data=grouped, x="weekday", y="registered", palette="husl", label="registered" ) sns.lineplot(data=grouped, x="weekday", y="casual", palette="husl", label="casual") plt.xlabel("Weekday") plt.ylabel("Users")Boxplot of Numerical VariablesWe review the distribution of numerical data through a boxplot for each variable.Some features have many outliers, therefore some sort of scaling and skewness fixing may be of use.plt.figure(figsize=(12, 10)) numeric = sample[ ["instant", "hum", "atemp", "temp", "windspeed", "casual", "registered", "cnt"] ].compute() numeric = (numeric - numeric.mean()) / numeric.std() sns.boxplot(data=numeric, orient="h")Data CorrelationNow we will analyze correlation in the data for all variables.From this we see that 'temp' and 'atemp' are highly correlated as well as 'season' and 'month' therefore we will remove one variable of each groupplt.figure(figsize=(13, 13)) sns.heatmap( sample.astype(float).corr(), cmap="coolwarm", center=0, square=True, annot=True, xticklabels=sample.columns, yticklabels=sample.columns, )Feature Engineering Is LateWe add a boolean to determine if it is late during the day or not, since we see from the visualizations that usage varies greatlydf["is_late"] = (df["hr"] > 20) | (df["hr"] < 6)Train/Test SplitNow we take the last quarter of the data as our testing set and the remaining rows as our training set.We also drop the columns dteday because we do not need the date, casual and registered since they make up the target variable and correlated columns.df = df.drop(["season", "atemp", "casual", "registered"], axis=1) df["hr"] = df["hr"].astype("category") df = df.categorize() train_df = df.loc[:"2012-09-30"] holdout = df.loc["2012-10-01":]Pipeline CreationWe create the pipeline that will be used for our data.Initially it standardizes the data and fixes its skewness, then it one-hot encodes the categorical variables and finally it runs it through a model.The models from the Dask library did not work in the pipeline see relevant issues for [logistic regression](https://github.com/dask/dask-ml/issues/84) and [xgboost](https://github.com/dask/dask-xgboost/issues/31) therefore only models from sklearn and xgboost directly were used. The RandomizedSearchCV does come from Dask, which is the important part for the distribution of tasks.num_pipeline = Pipeline([("power_transformer", PowerTransformer(method="yeo-johnson", standardize=True))]) categorical_pipeline = Pipeline([("one_hot", OneHotEncoder())]) pipe = Pipeline([ ("column_transformer", ColumnTransformer([ ("numerical_pipeline", num_pipeline, ["instant", "hum", "temp", "windspeed"]), ("categorical_pipeline", categorical_pipeline, ["yr", "mnth", "hr", "weekday", "weathersit"]), ], remainder="passthrough")), ])Scoring with Hyperparameter TuningA model is added to the pipeline which is then inserted to a RandomizedSearchCV which cross validates a grid of parameters with a time series split, in order to choose the best ones.To begin we split X and y.X = train_df.drop([TARGET], axis=1) y = train_df[TARGET]Fit functionThis function will be used by the different pipelines to see the best performing one, it acceps the data, the pipe, the final model and its griddef fit(X, y, pipe, model, grid): pipe = clone(pipe) pipe.steps.append(model) gridpipe = RandomizedSearchCV( pipe, grid, n_iter=100, cv=TimeSeriesSplit(n_splits=SPLITS), scoring=METRIC, random_state=SEED, ) gridpipe.fit(X, y) print("Model: " + str(model[0])) print("Best Parameters: " + str(gridpipe.best_params_)) print("Best Fold Score: " + str(gridpipe.best_score_)) return gridpipeLinear Regressionmodel = ("linear_reg", LinearRegression()) grid = { "linear_reg__normalize": [True, False], "linear_reg__fit_intercept": [True, False], } lr_pipe = fit(X, y, pipe, model, grid)/anaconda3/lib/python3.7/site-packages/sklearn/model_selection/_search.py:271: UserWarning: The total space of parameters 4 is smaller than n_iter=100. Running 4 iterations. For exhaustive searches, use GridSearchCV. % (grid_size, self.n_iter, grid_size), UserWarning)XGBoostmodel = ("xgb", XGBRegressor(random_state=SEED)) grid = { "xgb__max_depth": [3, 5], "xgb__learning_rate": [0.1, 0.2], "xgb__n_estimators": [100, 200], } xgb_gridpipe = fit(X, y, pipe, model, grid)/anaconda3/lib/python3.7/site-packages/sklearn/model_selection/_search.py:271: UserWarning: The total space of parameters 8 is smaller than n_iter=100. Running 8 iterations. For exhaustive searches, use GridSearchCV. % (grid_size, self.n_iter, grid_size), UserWarning)Random Forestmodel = ("random_forest", RandomForestRegressor(n_estimators=100, random_state=SEED)) grid = { "random_forest__max_depth": [80, 100], "random_forest__min_samples_leaf": [3, 5], "random_forest__min_samples_split": [5, 10], "random_forest__max_leaf_nodes": [None, 30], } rf_gridpipe = fit(X, y, pipe, model, grid)/anaconda3/lib/python3.7/site-packages/sklearn/model_selection/_search.py:271: UserWarning: The total space of parameters 16 is smaller than n_iter=100. Running 16 iterations. For exhaustive searches, use GridSearchCV. % (grid_size, self.n_iter, grid_size), UserWarning)ResultsNow we select the best tuned model which is XGB, and with it, make a prediction on the holdout data and obtain its metrics.final_pipe = xgb_gridpipe X_test = holdout.drop([TARGET], axis=1) y_test = holdout[TARGET] predicted = final_pipe.predict(X_test) scores = {} scores["R2"] = r2_score(y_test, predicted) scores["MAE"] = mean_absolute_error(y_test, predicted) scores["MSE"] = mean_squared_error(y_test, predicted) scoresPlots of PredictionsHere we plot the different results obtained.For this scatter plot, the straighter the diagonal line is, the better the predictions since they are closer to the actual values.y_test = y_test.compute() plt.figure(figsize=(11, 9)) plt.scatter(y_test, predicted, alpha=0.3) plt.ylabel("Predicted") plt.show()Entire daily predictions vs. reality plotDoing dictionary comprehension to avoid adding numpy or pandas as dependenciesy_test = y_test.reset_index(drop=True) predicted = { i: predicted[24 * i : (24 * i) + 24].sum() for i in range(len(predicted) // 24) } plt.figure(figsize=(19, 9)) ax = sns.lineplot( data=y_test.groupby(y_test.index // 24).sum(), color="red", label="Actual" ) ax = sns.lineplot( list(predicted.keys()), list(predicted.values()), color="blue", label="Predicted" ) plt.xlabel("Day") plt.ylabel("Users")ConclusionsWe created a model that, based on certain parameters, determine bike usage on an hourly basis, with these results we can provide an estimation of usage which can be of great importance for all of the involved parties.One of the key findings is that there is a great difference in usage from weekends to normal working days and in usage throught the day depending on registered and casual users, this situation needs to be considered by the company to supply the correct amount of bicicles depending on the day and time of the week, since the demand changes drastically. Then, as can be guessed, temperature plays a big role in usage, although it is more significant in casual users.After performing multiple data preparation steps and transformations with different tuned models, we finally choose the best performing one on trained data and we obtain our predictions, we can see from them that the model follows along many of the peaks and valleys of the real data.Many different bike-sharing companies accross the world could use this model to estimate bike usage, planify better for expected demand and even help their governments transportation requirements. Measuring the impact of new bike infrastructure on cycling traffic and behavior is top of mind for many planners and advocacy groups.As for Dask usage, we see great improvements in terms of speed, which make working in a distributed fashion very easy. Furthermore, it seems like the library is continually improving, currently there are multiple issues that need to be tackled in order to ensure correct funtionality across the board but its future is promising as a good distributed and familiar alternative to pandas, without having to tackle Spark.client.close()0 - Information 1 - Packages# Maths packages import numpy as np # Dataset packages import pandas as pd # Import os packages from os import listdir from os.path import isfile, join import re # Import progress bar from tqdm import tqdm2 - Read the Datadef readData(filename): """Return the data of the file filename and save them as a numpy array.""" # Resulting dct dct = {} with open(filename) as f: # Read the first line R, C, L, H = f.readline().split() # Append dct dct["R"] = R dct["C"] = C dct["L"] = L dct["H"] = H # Resulting dct dct["Grid"] = [] # Read the other lines and save them in for line in f: dct["Grid"].append(line.strip()) # Convert dct["Data"] as a numpy array dct["Grid"] = np.array(dct["Grid"]) return dct def readFiles(folder): """Read the data in folder and save them in a dict.""" # Resulting dict result_dct = {} # List of files in folder files = [f for f in listdir(folder) if isfile(join(folder, f))] # Loop over all the files in folder for file in files: # Read the data and save them in dct file_name = re.sub("\...", "", file) result_dct[file_name] = readData(join(folder, file)) return result_dct # Read the data data_dct = readFiles("Data") # Extract one example test_dct = {"test": data_dct["b"]}3 - Code for the optimisationdef prediction(dataset_dct): """Make prediction for the current dataset_dct.""" # Resulting array results = [] # Extract R, C, L, H, data R = int(dataset_dct["R"]) C = int(dataset_dct["C"]) L = int(dataset_dct["L"]) H = int(dataset_dct["H"]) grid = dataset_dct["Grid"] # Make the predictions for r in range(R): # Counters beg = 0 c = 0 mushroom_count = 0 tomato_count = 0 # Loop over all column while c < C: if grid[r][c] == "M": mushroom_count += 1 elif grid[r][c] == "T": tomato_count += 1 if c - beg > H: if grid[r][beg] == "M": mushroom_count -= 1 elif grid[r][beg] == "T": tomato_count -= 1 beg += 1 if (c - beg) < H and (mushroom_count >= L) and (tomato_count >= L): results.append(str(r) + " " + str(beg) + " " + str(r) + " " + str(c)) mushroom_count = 0 tomato_count = 0 beg = c + 1 c = c + 1 # Update of c c += 1 return results def predictionsDct(data_dct): """Extract the data of each file in dct and compute the predictions.""" # Resulting dct predictions_dct = {} # Loop over the different datasets for key in data_dct.keys(): # Compute the predictions for the current datasets predictions_dct[key] = prediction(data_dct[key]) # Return the predictions made return predictions_dct # Compute the predictions predictions_dct = predictionsDct(data_dct)4 - Save the predictionsdef writePredictions(predictions_dct, folder="Results/"): """Save the predictions.""" # Loop over all the predictions save in array_dct for key in predictions_dct.keys(): # Extract predictions for the given files slices = predictions_dct[key] # Count the number of slices nb_slices = len(slices) # Writes the result in a txt file f = open(folder + key + ".txt", "w") # Write number of slices f.write(str(nb_slices) + "\n") # Loop over each slices for i in slices: f.write(i + "\n") # Closing the file f.close() writePredictions(predictions_dct)Atividade 6import numpy as np e = 2.71828182845 pi = 3.1415 u0 = 4*pi/10**(7) make_complex = lambda radii, angles : radii * e**(1j*angles) print_complex = lambda zc : print("valor =", zc, "|valor| = ",abs(zc)," valor angle = ",np.angle(zc)*180/pi) s = 10*10**3 V1 = 2300 V2 = 230 f = 60 r1 = 5.92 xd1 = 11.95j r2 = 0.0592 xd2 = 0.1195j rc = 75500 xm = 70000j fp = 0.95 #indutivo w = 2*pi*f a = V1/V2a) B)r2_ = (a**2)*r2 xd2_ = (a**2)*xd2 i1 = s/V1 i2 = s/V2 i2_ = i2/a zc_abs = V2/i2 zc_angle = np.arccos(0.95) zc = make_complex(zc_abs,-zc_angle) zc_ = (a**2)*zc print("r1 =", r1) print("xd1 =", xd1) print("r2' =", r2_) print("xd2'=", xd2_) print("rc =", rc) print("xm =", xm) print("i1 =", i1) print("i2 =", i2) print("i2' =", i2_) print("zc =", zc, "|zc| = ",abs(zc)," zc angle = ",np.angle(zc)*180/pi) print("zc' =", zc_, "|zc'| = ",abs(zc_)," zc' angle =",np.angle(zc_)*180/pi)r1 = 5.92 xd1 = 11.95j r2' = 5.92 xd2'= 11.95j rc = 75500 xm = 70000j i1 = 4.3478260869565215 i2 = 43.47826086956522 i2' = 4.3478260869565215 zc = (5.025500000001745-1.6518019705710665j) |zc| = 5.29 zc angle = -18.19540896778089 zc' = (502.5500000001745-165.18019705710665j) |zc'| = 529.0 zc' angle = -18.19540896778089c) d)a_ = 1/a r1_ = (a_**2)*r1 xd1_= (a_**2)*xd1 rc_ = (a_**2)*rc xm_ = (a_**2)*xm V1_ = a_*V1 i1_ = a_*i1 print("V1' =", V1_) print("r1' =", r1_) print("xd1'=", xd1_) print("rc' =", rc_) print("xm' =", xm_) print("xd2 =", xd2) print("r2 =", r2) print("i1 =", i1) print("i1' =", i1_) print("i2 =", i2) print("zc =", zc, "|zc| = ",abs(zc)," zc angle = ",np.angle(zc)*180/pi)V1' = 230.0 r1' = 0.05920000000000001 xd1'= 0.11950000000000002j rc' = 755.0000000000001 xm' = 700.0000000000001j xd2 = 0.1195j r2 = 0.0592 i1 = 4.3478260869565215 i1' = 0.43478260869565216 i2 = 43.47826086956522 zc = (5.025500000001745-1.6518019705710665j) |zc| = 5.29 zc angle = -18.19540896778089e) f)Req = (r1+r2_)+(xd1+xd2_)+(zc_) i_alta = V1/Req print_complex(i_alta) ##gabarito """ ic = ia = 43.48 ic' = i2' = 43.48/a """valor = (4.157677755510172+1.141930310850185j) |valor| = 4.311645759278295 valor angle = 15.358367953752749g)V_enro = (r1+r2_)*i_alta S_enro = V_enro*i_alta print_complex(S_enro)valor = (189.2301890884963+112.42738900178645j) |valor| = 220.10902357746963 valor angle = 30.716735907505505h)z_eq_h = (rc*xm)/(rc+xm) i_eq_h = V1/z_eq_h print_complex(i_eq_h)valor = (0.030463576158940395-0.032857142857142856j) |valor| = 0.044806487355362454 valor angle = -47.16618020455231i)S_i = V1*i_eq_h print_complex(S_i)valor = (70.06622516556291-75.57142857142857j) |valor| = 103.05492091733365 valor angle = -47.16618020455231Vector Spaces and Its Operationsimport numpy as np A = np.array([4,3]) B = np.array([2,-5]) print('Vector A is', A) print('Vector B is', B) import numpy as np ball1 = np.array ([1,2,3]) ball2 = np.array ([0,1,-1]) pool = np.array ([ball1,ball2]) pool.shape import numpy as np ball1 = np.array ([1,2,3]) ball2 = np.array ([0,1,-1]) pool = np.array ([ball1,ball2]) print(pool.shape) print(pool.size) import numpy as np ball1 = np.array ([1,2,3]) ball2 = np.array ([0,1,-1]) pool = np.array ([ball1,ball2]) pool.shape pool.ndim U = np.array ([[1,2,3,],[4,5,6]]) U U = np.array ([[1,2,3,],[4,5,6]]) U.shape U = np.array ([[1,2,3,],[4,5,6]]) U.ndim U = np.array ([[1,2,3,],[4,5,6]]) U.size U = np.array ([[1,2,3,],[4,5,6]]) U U.sizeAddition of VectorsR = A+B print(R) R = np.add(A,B) RVector SubractionR = np.subtract(A,B) RVector ScalingA = np.array([1,5,8,9]) S = 5*A S A = np.array([1,5,8,9]) S = np.multiply(5,A) SVector Cross Product#initialize arrays A = np.array([2,3,4]) B = np.array([1,7,0]) #compute cross product output = np.cross(A,B) print(output) #compute the dot product of A and B output = np.dot(A,B) print(output)236.1import numpy as np import seaborn as sns import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import axes3d from mpl_toolkits.basemap import Basemap df=sns.load_dataset('diamonds') df.head() fig = plt.figure(figsize=(10,8)) ax = fig.add_subplot(111, projection='3d') ax.scatter(df[:99].depth,df[:99].table,df[:99].price) plt.show() fig = plt.figure() ax = fig.add_subplot(111, projection='3d') pnt3d=ax.scatter(df[:99].x,df[:99].y,df[:99].z,c=df[:99].z) cbar=plt.colorbar(pnt3d) cbar.set_label("z") plt.show()6.2fig = plt.figure() ax = fig.add_subplot(111, projection='3d') X, Y, Z = axes3d.get_test_data(0.05) ax.plot_wireframe(X, Y, Z, rstride=10, cstride=10) plt.show() def f(x, y): return np.sin(np.sqrt(x ** 2 + y ** 2)) x = np.linspace(-6, 6, 30) y = np.linspace(-6, 6, 30) X, Y = np.meshgrid(x, y) Z = f(X, Y) fig = plt.figure() ax = plt.axes(projection='3d') ax.contour3D(X, Y, Z, 50, cmap='binary') ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z');6.3from mpl_toolkits.basemap import Basemap map = Basemap() map.drawcoastlines() plt.show() # Create a globe map map = Basemap(projection='ortho', lat_0=0, lon_0=0) #Fill the globe with a blue color map.drawmapboundary(fill_color='blue') #Fill the continents with the land color map.fillcontinents(color='green',lake_color='aqua') map.drawcoastlines() plt.show()6.4map = Basemap(projection='ortho', lat_0=0, lon_0=0) map.drawmapboundary(fill_color='aqua') map.fillcontinents(color='coral',lake_color='aqua') map.drawcoastlines() x, y = map(2, 41) x2, y2 = (-90, 10) plt.annotate('Barcelona', xy=(x, y), xycoords='data', xytext=(x2, y2), textcoords='offset points', color='r', arrowprops=dict(arrowstyle="fancy", color='g') ) x2, y2 = map(0, 0) plt.annotate('Barcelona', xy=(x, y), xycoords='data', xytext=(x2, y2), textcoords='data', arrowprops=dict(arrowstyle="->") ) plt.show()6.5# Map map = Basemap(projection='ortho',lat_0=45,lon_0=-100,resolution='l') map.drawcoastlines(linewidth=0.25) map.drawcountries(linewidth=0.25) map.fillcontinents(color='coral',lake_color='aqua') map.drawmapboundary(fill_color='aqua') map.drawmeridians(np.arange(0,360,30)) map.drawparallels(np.arange(-90,90,30)) # Data nlats = 73; nlons = 145; delta = 2.*np.pi/(nlons-1) lats = (0.5*np.pi-delta*np.indices((nlats,nlons))[0,:,:]) lons = (delta*np.indices((nlats,nlons))[1,:,:]) wave = 0.75*(np.sin(2.*lats)**8*np.cos(4.*lons)) mean = 0.5*np.cos(2.*lats)*((np.sin(2.*lats))**2 + 2.) # Plot x, y = map(lons*180./np.pi, lats*180./np.pi) cs = map.contour(x,y,wave+mean,15,linewidths=1.5) plt.title('Satellite View') plt.show()存在重复元素* Slug: contains-duplicate* Date: 2018-06-25* Category: LeetCode* Tags: 数组, 算法* Author: timking* Summary: LeetCode - 探索 - 初级算法 > [原文链接](https://leetcode-cn.com/problems/contains-duplicate/description/) 给定一个整数数组,判断是否存在重复元素。如果任何值在数组中出现至少两次,函数返回 true。如果数组中每个元素都不相同,则返回 false。**示例 1:**```输入: [1,2,3,1]输出: true```**示例 2:**```输入: [1,2,3,4]输出: false```**示例 3:**```输入: [1,1,1,3,3,4,3,2,4,2]输出: true``` 初步解答这道题比较简单。直接利用字典的特性(不存在重复的键)完成就好。关于字典具体可以看之前的文章, [«高性能python-选择合适的数据结构»](/high_performance_python_section2.html字典_1)class Solution: def containsDuplicate(self, nums): """ :type nums: List[int] :rtype: bool """ return len(set(nums)) != len(nums)Módulo datos_gov - LEILAEste libro de Jupyter contiene ejemplos con explicaciones de cómo utilizar el módulo que conecta a un usuario con el Portal de Datos Abiertos de Colombia Importar la clase DatosGov del módulo datos_govfrom leila.datos_gov import DatosGovSe importa la tabla de inventario de datos.gov.co. Esta tabla contiene todas las publicaciones del Portal (conjuntos de datos, enlaces externos, mapas, gráficos, etc.).inventario = DatosGov().tabla_inventario()Las columnas de la tabla de inventario son las siguientes:"**numero_api**": número API del conjunto de datos. Este es un carácter único de cada conjunto de datos del Portal que se usa como insumo para abrirlo desde código."**nombre**": nombre de la publicación"**descripcion**": descripción de la publicación"**dueno**": dueño de la publicación. "**base_publica**": indica con un "si" si la información del conjunto de datos es público y con un "no" de lo contrario"**tipo**": indica el tipo de la publicación, que puede ser uno de los siguientes: "conjunto de datos", "enlace externo", "mapa", "grafico", "vista filtrada", "archivo o documento", "historia", "visualizacion", "lente de datos", "formulario", "calendario"."**categoria**": tema general del que trata la información publicada"**terminos_clave**": términos clave relacionados con la publicación"**url**": enlace web de la publicación en el Portal de Datos Abiertos"**fecha_creacion**": fecha de creación de la publicación"**fecha_actualizacion**": última fecha de actualización de la publicación"**filas**": número de filas del conjunto de datos, si aplica"**columnas**": número de columnas del conjunto de datos, si aplica"**correo_contacto**": correo de contacto de la entidad dueña de los datos"**licencia**": nombre de la licencia los datos"**entidad**": nombre de la entidad dueña de los datos"**entidad_url**": enlace web de la entidad dueña de los datos"**entidad_sector**": sector de la entidad"**entidad_departamento**": departamento de la entidad"**entidad_orden**": especifica si publicación es de orden territorial, nacional, departamental o internacional"**entidad_dependencia**": dependencia de la entidad dueña de los datos"**entidad_municipio**": municipio donde opera la entidad"**actualizacion_frecuencia**": frecuencia de actualización de los datos. Puede ser anual, semestral, mensual, trimestral, trianual, diaria, quinquenal, semanal, entre otros. También puede no aplicar"**idioma**": idioma en el que se encuentra la información"**cobertura**": alcance de la información. Puede ser nacional, departamental, municipal, centro poblado o internacional Filtrar la tabla de inventario Es posible buscar información de interés dentro de la tabla de inventario. La búsqueda se hace a partir de términos o texto que puede ser buscado en las columnas de formato texto de la tabla de inventario, por un rango de fechas o por el número filas o columnas. Ejemplo: búsqueda por términos clavePara hacer la búsqueda por términos clave, se construye un diccionario de Python que contenga como llaves los nombres de las columnas de texto de la tabla de inventario sobre las cuales se desea hacer el filtro. Los valores de cada llave es una lista que contiene uno o más términos clave. Este diccionario se ingresa al método "tabla_inventario" de DatosGov dentro del parámetro "filtro".Los términos que se ingresan al diccionario no tienen que tener tildes o mayúsculas que se encuentran en la columna original de la tabla de inventario. Por ejemplo, los resultados serán los mismos si se buscan las palabras "Economía", "economía", "economia" o "ECONOMÍA".Abajo se encuentra un ejemplo donde se filtra la tabla de inventario por las columnas "nombre" y "tipo". Dentro de la columna "nombre" se busca si contiene los términos "economia" o "ambiente" y si la columna "tipo" contiene el término "conjunto de datos". Es decir, se están buscando conjuntos de datos de temas de economía o ambiente.# Se crea el diccionario con el filtro deseado filtro = { "nombre": ["economia", "ambiente"], "tipo": ["conjunto de datos"] } # Se abre la tabla de inventario con el filtro deseado inventario = DatosGov().tabla_inventario(filtro=filtro) # Se imprime la tabla de inventario con el filtro aplicado en la celda anterior inventarioEjemplo: búsqueda por rango de filas y columnasPara hacer el filtro de la tabla de inventario por el tamaño de un conjunto de datos, se tiene que incluir el nombre de las columnas "filas" y "columnas" en el diccionario. Los valores de estas llaves son listas con dos elementos cada una: el primer elemento es el valor mínimo de filas o columnas y el segundo el valor máximo. A continuación se muestra un ejemplo de filtro, donde se seleccionan los conjuntos de datos con mínimo 50 filas y máximo 60 y con mínimo 8 columnas y máximo 10# Se crea el diccionario con el filtro deseado filtro = { "filas": [50, 60], "columnas": [8, 10] } # Se abre la tabla de inventario con el filtro deseado inventario = DatosGov().tabla_inventario(filtro=filtro) # Imprimir las columnas del código API, nombre, descripción, filas y columnas de la tabla de inventario filtrada inventario[["numero_api", "nombre", "descripcion", "filas", "columnas"]]Ejemplo: búsqueda por fecha La tabla de inventario también puede filtrase por fecha. Para hacerlo, se ingresa el diccionario de filtro con una de las columnas de fecha y se especifican las fechas de inicio y de fin deseadas. El siguiente ejemplo muestra cómo obtener la tabla de inventario para publicaciones creadas entre el 1 de enero de 2020 y el 1 de febrero de 2020.# Se crea el diccionario con el filtro deseado filtro = { "fecha_creacion": ["2020-01-01", "2020-02-01"], } # Se abre la tabla de inventario con el filtro deseado inventario = DatosGov().tabla_inventario(filtro=filtro) # Se muestra la tabla filtrada por fecha inventarioAbrir un conjunto de datos del Portal de Datos Abiertos Para abrir un conjunto de datos.gov.co es necesario tener el código API de ese conjunto e ingresarlo al método "cargar_base" de la clase DatosGov. Con esta función se crea un objeto que contiene el dataframe y el diccionario de metadatos del conjunto, los cuales se pueden obtener con los métodos "to_dataframe" y "metadatos"Abajo está el código para cargar el conjunto de datos de "Pueblos indígenas a nivel Nacional 2020", el cual se encuentra en el último filtro de la tabla de inventario. Cargar conjunto de datos con número API# Se define la variable "numero_api", que contiene el número API del conjunto "Pueblos indígenas a nivel Nacional 2020" numero_api = "etwv-wj8f" # Se descarga la información del conjunto de datos en la variable "data" con el método "cargar_base". # Al parámetro "api_id" se asigna el número API y "limite_filas" especifica que únicamente se descargan 200 filas del conjunto data = DatosGov().cargar_base(api_id = numero_api, limite_filas=200)Obtener dataframe del conjunto de datos# Se obtiene el dataframe del conjunto de datos con el método "to_dataframe" datos = data.to_dataframe() # Se visualiza una versión reducida del dataframe datosObtener diccionario de metadatos del conjunto de datos# Los metadatos se obtienen con el método "metadatos" y se asignan a la variable "meta" meta = data.metadatos() # Se visualiza el diccionario de metadatos metaCOVID-19 World Charts, 2020""" LICENSE MIT 2020 Website : http://www.guillaumerozier.fr Mail : This file contains scripts that download data from CSSE (John Hopkins) Github Repository and then process it to build many graphes. I'm currently cleaning the code, please come back soon it will be easier to read and edit it! The charts are exported to 'charts/images/'. Data is download to/imported from 'data/'. """ import requests import random from tqdm import tqdm import json from datetime import date from datetime import datetime import numpy as np import math import sys import chart_studio import pandas as pd import plotly.graph_objects as go import plotly.express as px import plotly from plotly.subplots import make_subplots import chart_studio.plotly as py import sys import matplotlib.pyplot as plt from plotly.validators.scatter.marker import SymbolValidator colors = px.colors.qualitative.D3 + plotly.colors.DEFAULT_PLOTLY_COLORS + px.colors.qualitative.Plotly + px.colors.qualitative.Dark24 + px.colors.qualitative.Alphabet #If you want to uplaod charts to your Plotly account (and switch "upload" to True just below): #chart_studio.tools.set_credentials_file(username='', api_key='') PATH = "../../" today = datetime.now().strftime("%Y-%m-%d %H:%M") "build : " + todayIf you want to display charts here, please change "show" variable to True:upload = False show = False export = True if len(sys.argv) >= 2: if (sys.argv[1]).lower() == "true": upload = True if len(sys.argv) >= 3: if (sys.argv[2]).lower() == "true": show = True if len(sys.argv) >= 4: if (sys.argv[3]).lower() == "true": export = True "build : " + todayFunctionsdef compute_offset(df, col_of_reference, col_to_align, countries): diffs = [] for offset in range(len(df)-15): a = df[col_of_reference][1:].shift(offset, fill_value=0)/countries[col_of_reference]["pop"] b = df[col_to_align][1:]/countries[col_to_align]["pop"] if len(a) > len(b): a = a[:-2] m = min(len(a), len(b)) delta = ((a[offset:] - b[offset:])**2)**(1/2) diffs.append(abs(delta.sum())) xa = [i for i in range(offset, len(a))] xb = [i for i in range(offset, len(b))] ret = diffs.index(min(diffs)) if col_of_reference == col_to_align: return 0 return retDATA Download datadef download_data(): #url_confirmed = "https://cowid.netlify.com/data/total_cases.csv" #url_deaths = "https://cowid.netlify.com/data/total_deaths.csv" url_confirmed_csse = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv" url_deaths_csse = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv" url_france_data = "https://raw.githubusercontent.com/opencovid19-fr/data/master/dist/chiffres-cles.csv" #r_confirmed = requests.get(url_confirmed) #r_deaths = requests.get(url_deaths) r_confirmed_csse = requests.get(url_confirmed_csse) r_deaths_csse = requests.get(url_deaths_csse) r_france_data = requests.get(url_france_data) #with open('data/total_cases_who.csv', 'wb') as f: #f.write(r_confirmed.content) #with open('data/total_deaths_who.csv', 'wb') as f: #f.write(r_deaths.content) with open(PATH+'data/total_cases_csse.csv', 'wb') as f: f.write(r_confirmed_csse.content) with open(PATH+'data/total_deaths_csse.csv', 'wb') as f: f.write(r_deaths_csse.content) with open(PATH+'data/france_data.csv', 'wb') as f: f.write(r_france_data.content) print("> data downloaded") #"build : " + todayImport data and mergedef import_files(): # CSSE data df_confirmed_csse = pd.read_csv(PATH+'data/total_cases_csse.csv') df_deaths_csse = pd.read_csv(PATH+'data/total_deaths_csse.csv') # WHO data #df_confirmed_who = pd.read_csv('data/total_cases_who.csv') #df_deaths_who = pd.read_csv('data/total_deaths_who.csv') # Perso data df_confirmed_perso = pd.read_csv(PATH+'data/total_cases_perso.csv') df_deaths_perso = pd.read_csv(PATH+'data/total_deaths_perso.csv') df_france_data = pd.read_csv(PATH+'data/france_data.csv') print("> data imported") return df_confirmed_csse, df_deaths_csse, df_confirmed_perso, df_deaths_perso, df_france_data def data_prep_csse(df0): df = df0.drop('Lat', axis=1) df = df.drop('Long', axis=1) df = df.drop('Province/State', axis=1) #df_csse_new2 = df_csse_new.groupby(['Country/Region']) df = df.T.reset_index() df.columns = df.iloc[0] df = df.rename(columns={"Country/Region": "date"}) df = df.drop(df.index[0]) dates = df['date'].values df = df.groupby(by=df.columns, axis=1).sum(numeric_only=True) df['date'] = dates return df #"build : " + today def data_merge(data_confirmed, df_confirmed_perso, data_deaths, df_deaths_perso, df_france_data): data_confirmed = pd.merge(data_confirmed, df_confirmed_perso, how='outer').drop_duplicates(subset='date') data_deaths = pd.merge(data_deaths, df_deaths_perso, how='outer').drop_duplicates(subset='date') return data_confirmed, data_deaths # Compute rolling mean and patch missing values def rolling(df): df_r = df df_r[:len(df_r)-1].fillna(method='pad',inplace=True) df_r = df.rolling(5, win_type='gaussian', center=True).mean(std=2) df_r['date'] = df['date'].values df_r.iloc[len(df_r)-2] = df.iloc[-2] df_r.iloc[len(df_r)-1] = df.iloc[-1] df_r.loc[len(df_r)-3, df_r.columns != "date" ] = ((df.iloc[-4][:-1] + df.iloc[-2][:-1])/2 + df.iloc[-3][:-1])/2 df_r.loc[len(df_r)-3, "date"] = df.iloc[-3]["date"] df_r.loc[len(df_r)-2, df_r.columns != "date" ] = (df.iloc[-3][:-1] + (df.iloc[-3][:-1] - df.iloc[-4][:-1]) / 2 + df.iloc[-2][:-1])/2 df_r.loc[len(df_r)-2, "date"] = df.iloc[-2]["date"] df_r.loc[len(df_r)-1, df_r.columns != "date" ] = (df.iloc[-2][:-1] + (df.iloc[-1][:-1] - df.iloc[-3][:-1]) / 2 + df.iloc[-1][:-1])/2 df_r.loc[len(df_r)-1, "date"] = df.iloc[-1]["date"] return df_r def final_data_prep(data_confirmed, data_confirmed_rolling, data_deaths, data_deaths_rolling): # Date conversion data_confirmed['date'] = data_confirmed['date'].astype('datetime64[ns]') #data_confirmed_rolling['date'] = data_confirmed_rolling['date'].astype('datetime64[ns]') data_deaths['date'] = data_deaths['date'].astype('datetime64[ns]') #data_deaths_rolling['date'] = data_deaths_rolling['date'].astype('datetime64[ns]') date_int = [i for i in range(len(data_confirmed))] data_confirmed["date_int"] = date_int date_int = [i for i in range(len(data_deaths))] data_deaths["date_int"] = date_int return data_confirmed, data_confirmed_rolling, data_deaths, data_deaths_rolling #print(data_confirmed_rolling.tail)Informations on countries (population, offset)def offset_compute_export(data_confirmed, data_deaths): # Importing informations on countries with open(PATH+'data/info_countries.json', 'r') as f: countries = json.load(f) # Computing offset i = 0 for c in tqdm(countries): countries[c]['offset_confirmed'] = compute_offset(data_confirmed, 'Italy', c, countries) countries[c]['offset_deaths'] = compute_offset(data_deaths, 'Italy', c, countries) countries[c]['color'] = i i += 1 # Exporting informations on countries with open(PATH+'data/info_countries.json', 'w') as fp: json.dump(countries, fp) print("> pop data imported") "build : " + today def final_df_exports(data_confirmed, data_deaths): data_confirmed.to_csv(PATH+'data/data_confirmed.csv') data_deaths.to_csv(PATH+'data/data_deaths.csv') print("> dfs exported") def data_import(): with open(PATH+'data/info_countries.json', 'r') as f: countries = json.load(f) return pd.read_csv(PATH+'data/data_confirmed.csv'), pd.read_csv(PATH+'data/data_deaths.csv'), countries def update_data(): # Data update: download_data() df_confirmed_csse, df_deaths_csse, df_confirmed_perso, df_deaths_perso, df_france_data = import_files() df_confirmed_csse = data_prep_csse(df_confirmed_csse) df_deaths_csse = data_prep_csse(df_deaths_csse) data_confirmed, data_deaths = data_merge(df_confirmed_csse, df_confirmed_perso, df_deaths_csse, df_deaths_perso, df_france_data) data_confirmed, data_confirmed_rolling, data_deaths, data_deaths_rolling = final_data_prep(data_confirmed, "data_confirmed_rolling", data_deaths, "data_deaths_rolling") offset_compute_export(data_confirmed, data_deaths) final_df_exports(data_confirmed, data_deaths)Graphs FunctionThis fonction builds and export graphs.def chart(data, data_rolling, countries, by_million_inh = False, align_curves = False, last_d = 15,\ offset_name = 'offset_confirmed', type_ppl = "confirmed cases", name_fig = "", since = False, \ min_rate=0, log=False, new=""): today = datetime.now().strftime("%Y-%m-%d %H:%M") ### Symbols symbols = [] for i in range(35): symbols.append(SymbolValidator().values[i]) random.shuffle(symbols) ### fig = go.Figure() i = 0 j = 0 x_an=np.array([]) y_an=np.array([]) countries_last_val = [] countries_array = [] for c in countries: if by_million_inh: val = data[c][len(data) - 1]/countries[c]['pop'] else: val = data[c][len(data) - 1] countries_last_val.append(val) countries_array.append(c) ind = np.argsort(countries_last_val) countries_array = np.array(countries_array) countries_array = countries_array[ind][::-1] for c in countries_array: if align_curves: offset = countries[c][offset_name] offset2 = -offset else: offset = 0 if offset==0: offset2 = None if by_million_inh: pop = countries[c]['pop'] else: pop = 1 date = 'date' offset3=0 since_str = "" since_str_leg = "" if since: date = 'date_int' res = list(map(lambda i: i> min_rate, data[c+new].values / pop)) offset2 = 0 if True in res: ind = res.index(True) offset2 = -ind since_str_leg = " [since {} days]".format(len(data) - ind) offset3 = offset2 last_d = 0 offset = 0 since_str = " [since {}]".format(min_rate) #, type_ppl if by_million_inh: since_str = since_str[:-1] + "/1M inh.]" x = data[date][ -last_d - offset: offset2] y = data[c+new][-last_d - offset3:] / pop if offset != 0: name_legend = '{} [delayed by {} days]'.format(c, -offset) else: name_legend = '{} {}'.format(c, since_str_leg) txt=["" for i in range(len(data_rolling[c][-last_d - offset3:]))] txt[-1] = c fig.add_trace(go.Scatter(x = x, y = y, mode='markers', marker_color = colors[countries[c]['color']], legendgroup = c, marker_symbol = countries[c]['color'], marker_size=5, #marker_line_width=2, opacity=1, showlegend=True, name = name_legend)) fig.add_trace(go.Scatter(x = data_rolling[date][ -last_d - offset : offset2], y = data_rolling[c+new][-last_d - offset3:] / pop, mode='lines', marker_color = colors[countries[c]['color']], opacity = 1, legendgroup=c, showlegend=False, line=dict(width=1.7), name = name_legend)) i += 1 j += 1 if i >= len(colors): i = 0 if j >= 40: j = 0 if log and since and c=="Italy": date_start = data_rolling['date_int'].values[ -last_d - offset] x = data_rolling["date_int"][ -last_d - offset : offset2] max_values = 15 for (rate, rate_str) in [(2**(1/10), "x2 every 10 days"), (2**(1/7), "x2 every 7 days"), (2**(1/3), "x2 every 3 days"), (2**(1/2), "x2 every 2 days"), (2**(1/5), "x2 every 5 days")]: y = rate ** (data_rolling["date_int"][ -last_d - offset : offset2].values - date_start) * min_rate fig.add_trace(go.Scatter(x = x[:max_values+1], y = y[:max_values+1], mode='lines+text', marker_color="grey", opacity=1, #text = rate_str, textposition = "bottom right", legendgroup="Tendance", showlegend=False, line=dict( width=1, dash='dot' ), name = "Tendance")) fig.add_trace(go.Scatter(x = [data_rolling["date_int"][ -last_d - offset : offset2].values[max_values]], y = [(rate ** (data_rolling["date_int"][ -last_d - offset : offset2].values - date_start) * min_rate)[max_values]], mode='text', marker_color="grey", opacity=1, text = rate_str, textposition = "bottom right", legendgroup="Tendance", showlegend=False, name = "Tendance")) ### END LOOP ### align_str = "" if align_curves: align_str = " [aligned]" million_str = "" million_str_ax = "" if by_million_inh: million_str = " for 1M inhabitants" million_str_ax = "/ nb of inhabitants (million)" delayed="" if align_curves: delayed="— delayed for some countries" if since: delayed ="— since {} {} {}".format(min_rate, type_ppl, million_str) fig.update_annotations(dict( xref="x", yref="y", showarrow=True, arrowhead=7 )) log_str="linear" if log: log_str = "log" fig.update_layout( showlegend=True, title={ 'text': "COVID-19 {}{}{}{}".format(type_ppl, million_str, align_str, since_str), 'y':0.95, 'x':0.5, 'xanchor': 'center', 'yanchor': 'top'}, xaxis_title="Day {} {}".format(delayed, ''), yaxis_type=log_str, yaxis_title="Total {} {}".format(type_ppl, million_str), titlefont = dict( size=28), annotations = [dict(xref='paper', yref='paper', x=0, y=1.05, showarrow=False, text ='Last update: {} ; Last data: {} ; Data: CSSE ; Author: @guillaumerozier'.format(today, str(data['date'].values[-1])[:10]))] ) #fig.update_xaxes(nticks = last_d) print("> graph built") if upload: py.plot(fig, filename = name_fig, auto_open=False) print("> graph uploaded") if show: fig.show() print("> graph showed") if export: path_log = "" if log: path_log = "log_yaxis/" fig.write_image(PATH+"images/charts/{}{}.jpeg".format(path_log, name_fig), scale=2, width=1100, height=700) #fig.write_image("images/charts_sd/{}{}.png".format(path_log, name_fig), scale=0.5) plotly.offline.plot(fig, filename = PATH+'images/html_exports/{}{}.html'.format(path_log, name_fig), auto_open=False) print("> graph exported\n") return fig ### Main update_data() data_confirmed, data_deaths, countries = data_import() data_confirmed_t = data_confirmed.T data_confirmed_t.columns = data_confirmed_t.iloc[len(data_confirmed_t)-2] data_confirmed_t = data_confirmed_t.drop(data_confirmed_t.index[-1]) data_confirmed_t = data_confirmed_t.drop(data_confirmed_t.index[-1]) data_confirmed_t = data_confirmed_t.drop(data_confirmed_t.index[0]) data_deaths_t = data_deaths.T data_deaths_t.columns = data_deaths_t.iloc[len(data_deaths_t)-2] data_deaths_t = data_deaths_t.drop(data_deaths_t.index[-1]) data_deaths_t = data_deaths_t.drop(data_deaths_t.index[-1]) data_deaths_t = data_deaths_t.drop(data_deaths_t.index[0]) for (data, name_var, same_scale) in [(data_deaths_t, "deaths", True), (data_deaths_t, "deaths", False), (data_confirmed_t, "confirmed", True), (data_confirmed_t, "confirmed", False)]: name_suffix="confirmed" type_ppl = "cas positifs" if "death" in name_var: name_suffix="deaths" type_ppl = "décès" ni, nj = 4, 5 i, j = 1, 1 dates = data.columns.values data = data.sort_values(by=[dates[-1]], ascending=False) data = data.diff(axis=1).rolling(axis=1, window=7).mean() countries_ordered = list(data.index.values[:20]) #countries_ordered[:11] + [""] + countries_ordered[11:14] + [""] + countries_ordered[14:] max_value = 0 fig = make_subplots(rows=ni, cols=nj, shared_yaxes = same_scale, subplot_titles = ["" + c + "" for c in countries_ordered], vertical_spacing = 0.06, horizontal_spacing = 0.02) sub = "par ordre décroissant du cumul total, les croix représentent les données quotidiennes brutes et les bâtons la moyenne mobile sur 14 j. • guillaumerozier.fr" max_value_diff = 0 for country in countries_ordered: datac = data.loc[country] max_value = max(max_value, datac.max()) max_value_diff = max(max_value_diff, datac.diff().max()*0.8) for country in countries_ordered: datac = data.loc[country] data_c_rolling = datac.rolling(window = 14, center=True).mean() fig.add_trace(go.Bar(x = datac.index, y = data_c_rolling, marker=dict(color = data_c_rolling.diff(), coloraxis="coloraxis"), ), i, j) fig.add_trace(go.Scatter(x = data.loc[country].index, y = datac, mode="markers", marker_size=6, marker_symbol="x-thin", marker_line_color="Black", marker_line_width=0.6, opacity=0.5), i, j) #max_value = max(max_value, datac.max()) #max_value_diff = max(max_value_diff, data_c_rolling.diff().max()) rangemin = "2020-02-02" fig.update_xaxes(title_text="", range=[rangemin, dates[-1]], gridcolor='white', ticks="inside", tickformat='%d/%m', tickangle=0, tickfont=dict(size=9), nticks=15, linewidth=1, linecolor='white', row=i, col=j) rge = None if same_scale: rge = [0, max_value] fig.update_yaxes(title_text="", range=rge, gridcolor='white', linewidth=1, linecolor='white', row=i, col=j) j+=1 if j == nj+1 : #or ((i >= 3) & (j == nj)) i+=1 j=1 for i in fig['layout']['annotations']: i['font'] = dict(size=25) i['y'] = i['y'] - 0.04 #for annotation in fig['layout']['annotations']: #annotation ['x'] = 0.5 by_million_title = "" by_million_legend = "" fig.update_layout( barmode="overlay", margin=dict( l=0, r=25, b=0, t=160, pad=0 ), bargap=0, paper_bgcolor='#fffdf5',#fcf8ed #faf9ed plot_bgcolor='#f5f0e4',#f5f0e4 fcf8ed f0e8d5 coloraxis=dict(colorscale=["green", "#ffc832", "#cf0000"], cmin=-max_value_diff/4, cmax=max_value_diff/4), coloraxis_colorbar=dict( title="Nombre
quotidien
de {}

‍ ".format(type_ppl), thicknessmode="pixels", thickness=15, lenmode="pixels", len=600, yanchor="middle", y=0.5, xanchor="left", x=1.02, ticks="outside", tickprefix=" ", ticksuffix="", nticks=15, tickfont=dict(size=15), titlefont=dict(size=18)), showlegend=False, title={ 'text': ("COVID19 : nombre de {} quotidiens
"+sub).format(type_ppl), 'y':0.97, 'x':0.5, 'xref':"paper", 'yref':"container", 'xanchor': 'center', 'yanchor': 'middle'}, titlefont = dict( size=45, ) ) fig["layout"]["annotations"] += ( dict( x=0.9, y=0.015, xref='paper', yref='paper', xanchor='center', yanchor='top', text='Source :
Santé Publique France', showarrow = False, font=dict(size=12), opacity=0.5 ),) if same_scale: same_scale_str = "_samescale" else: same_scale_str = "" name_fig = "subplots_" + name_suffix + same_scale_str fig.write_image(PATH+"images/charts/{}.jpeg".format(name_fig), scale=1.5, width=3000, height=1650) fig["layout"]["annotations"] += ( dict( x=0.5, y=1, xref='paper', yref='paper', xanchor='center', text='Cliquez sur des éléments de légende pour les ajouter/supprimer', showarrow = False ), ) plotly.offline.plot(fig, filename = PATH+'images/html_exports/{}.html'.format(name_fig), auto_open=False) print("> " + name_fig) #fig.show()> subplots_deaths_samescale > subplots_deaths > subplots_confirmed_samescale > subplots_confirmedFunction callsThis block contains calls to above function for every chart.#update_data() data_confirmed, data_deaths, countries = data_import() last_d_default = math.trunc((datetime.now() - datetime.strptime("2020-03-05", "%Y-%m-%d")).total_seconds()/(3600*24)) for log in False, True: # Confirmed cases name = "cases" print(name) chart(countries=countries, data = data_confirmed, data_rolling = data_confirmed, by_million_inh = False, last_d = last_d_default, name_fig = name, log=log ) name = "cases_per_1m_inhabitant" print(name) chart(countries=countries, data = data_confirmed, data_rolling = data_confirmed, by_million_inh = True, last_d = last_d_default, name_fig = name, log=log ) """name = "cases_per_1m_inhabitant_aligned" print(name) chart(countries=countries, data = data_confirmed, data_rolling = data_confirmed, by_million_inh = True, last_d = 40, align_curves = True, offset_name = 'offset_confirmed', name_fig = name, log=log )""" name = "cases_per_1m_inhabitant_since" print(name) chart(countries=countries, data = data_confirmed, data_rolling = data_confirmed, by_million_inh = True, align_curves = False, since=True, name_fig = name, min_rate=20, log=log ) name = "cases_since" print(name) chart(countries=countries, data = data_confirmed, data_rolling = data_confirmed, by_million_inh = False, align_curves = False, since=True, name_fig = name, min_rate=1000, log=log ) # Deaths name = "deaths" print(name) chart(countries=countries, data = data_deaths, data_rolling = data_deaths, by_million_inh = False, last_d = last_d_default, type_ppl = "deaths", name_fig = name, log=log ) """name = "deaths_new_since" print(name) chart(countries = countries, new = "_new", data = data_deaths, data_rolling = data_deaths, by_million_inh = False, last_d = round(len(data_deaths)/2), type_ppl = "deaths", name_fig = name, since=True, min_rate=10, log=log )""" name = "deaths_per_1m_inhabitant" print(name) chart(countries=countries, data = data_deaths, data_rolling = data_deaths, by_million_inh = True, last_d = last_d_default, type_ppl = "deaths", name_fig = name, log=log ) """name = "deaths_per_1m_inhabitant_aligned" print(name) chart(countries=countries, data = data_deaths, data_rolling = data_deaths, by_million_inh = True, last_d = 35, align_curves = True, offset_name = 'offset_deaths', type_ppl = "deaths", name_fig = name, log=log )""" name = "deaths_per_1m_inhabitant_since" print(name) chart(countries=countries, data = data_deaths, data_rolling = data_deaths, by_million_inh = True, align_curves = False, type_ppl = "deaths", since=True, name_fig = name, min_rate=3, log=log ) name = "deaths_since" print(name) chart(countries=countries, data = data_deaths, data_rolling = data_deaths, by_million_inh = False, last_d = 20, align_curves = False, type_ppl = "deaths", since=True, name_fig = name, min_rate=100, log=log )cases > graph built > graph exported cases_per_1m_inhabitant > graph built > graph exported cases_per_1m_inhabitant_since > graph built > graph exported cases_since > graph built > graph exported deaths > graph built > graph exported deaths_per_1m_inhabitant > graph built > graph exported deaths_per_1m_inhabitant_since > graph built > graph exported deaths_since > graph built > graph exported cases > graph built > graph exported cases_per_1m_inhabitant > graph built > graph exported cases_per_1m_inhabitant_since > graph built > graph exported cases_since > graph built > graph exported deaths > graph built > graph exported deaths_per_1m_inhabitant > graph built > graph exported deaths_per_1m_inhabitant_since > graph built > graph exported deaths_since > graph built > graph exportedWorld chartsfrom datetime import timedelta #locale.setlocale(locale.LC_ALL, 'fr_FR.UTF-8') for a in range(1): print("hey") for (dataf, name_fig, title) in [(data_deaths_t, "deaths_world", 'deaths'), (data_confirmed_t, "cases_world", 'cases')]: data = dataf.sum() data_diff = dataf.sum().diff() fig = go.Figure() fig.add_trace(go.Bar(x=data_diff.index, y=data_diff.rolling(window=7, center=True).mean(), marker=dict(color = data_diff.diff().rolling(window=7, center=True).mean(), coloraxis="coloraxis"), )) fig.add_trace(go.Scatter(x=data_diff.index, y=data_diff, mode="markers",marker_size=6, marker_symbol="x-thin", marker_line_color="Black", marker_line_width=0.6, opacity=0.5)) fig.update_layout( margin=dict( l=0, r=150, b=0, t=90, pad=0 ), title={ 'text': "Daily {} due to Covid19".format(title), 'y':0.95, 'x':0.5, 'xanchor': 'center', 'yanchor': 'middle'}, titlefont = dict( size=20), bargap=0, coloraxis=dict(colorscale=["green", "#ffc832", "#cf0000"], cmin=-data_diff.rolling(window=14).mean().diff().max(), cmax=data_diff.rolling(window=14).mean().diff().max(), colorbar=dict( title="Daily variation
of the nb of
new {}
‍ ".format(title), thicknessmode="pixels", thickness=15, lenmode="pixels", len=300, yanchor="middle", y=0.5, xanchor="left", x=1.05, ticks="outside", tickprefix=" ", ticksuffix=" pers.", nticks=15, tickfont=dict(size=8), titlefont=dict(size=10))), showlegend=False, ) date_plus_1 = (datetime.strptime(data_diff.index.max(), '%Y-%m-%d') + timedelta(days=1)).strftime('%Y-%m-%d') fig.update_yaxes(title_text="", gridcolor='white', range=[0, data_diff.max()*1.02], ticks="inside", tickangle=0, nticks=10, linewidth=1, linecolor='white', tickcolor="white") fig.update_xaxes(nticks=15, ticks='outside', range=[data_diff.index.min(), date_plus_1], tickformat='%d/%m') fig["layout"]["annotations"] += ( dict( x=0.5, y=0.5, xref='paper', yref='paper', xanchor='center', yanchor='middle', text='covidtracker.fr - {}'.format(datetime.strptime(max(data.index), '%Y-%m-%d').strftime('%d %B %Y')), showarrow = False, font=dict(size=15), opacity=0 ), dict( x=0.56, y=1.08, xref='paper', yref='paper', xanchor='center', text='colored bars are a rolling mean of 7 days, grey x are raw data - covidtracker.fr', font=dict(size=15), showarrow = False),) fig.add_layout_image( dict( source="https://raw.githubusercontent.com/rozierguillaume/covid-19/master/images/covidtracker_logo_text.jpeg", xref="paper", yref="paper", x=1.15, y=1.1, sizex=0.15, sizey=0.15, xanchor="right", yanchor="top", opacity=0.8 ) ) fig.write_image(PATH+"images/charts/{}.jpeg".format(name_fig), scale=2, width=1100, height=700) fig["layout"]["annotations"] += ( dict( x=0.5, y=1, xref='paper', yref='paper', xanchor='center', text='Cliquez sur des éléments de légende pour les ajouter/supprimer', showarrow = False ), ) plotly.offline.plot(fig, filename = PATH+'images/html_exports/{}.html'.format(name_fig), auto_open=False) print("> " + name_fig) #fig.show() #results = seir_model_with_soc_dist(init_vals, params, t)The Bernstein-Vazirani problem consists of finding $$\mathbf{a} \in Z_{2}^{n}$$ given one application of the black-box operator that maps $$\left\vert \mathbf{x} \right\rangle \left\vert b \right\rangle \rightarrow \left\vert \mathbf{x} \right\rangle \left\vert b \oplus \mathbf{x} \cdot \mathbf{a} \right\rangle$$The setup is similar to the Deutsch-Jozsa algorithm in that we still construct the same unitary operator to represent the black-box, but with $$f(\mathbf{x}) = \mathbf{x} \cdot \mathbf{a},$$ so that$$U_{f(\mathbf{x})} = \sum_{\mathbf{x} = 0}^{2^{n} - 1} \left\vert \mathbf{x} \right\rangle \left\langle \mathbf{x} \right\vert \otimes \left[ I + (\mathbf{x} \cdot \mathbf{a}) [X - I]\right]$$Starting out with the state $$\left\vert 0 \right\rangle^{\otimes n} \otimes \left\vert 0 \right\rangle,$$ we transform the first n qubits via a Hadamard transform, the (n-1)th qubit to (|0> - |1>)/((2)^(-1)), apply the black-box, then apply a final Hadamard to obtain the vector |a> over the first n qubits.def qubit_strings(n): qubit_strings = [] for q in itertools.product(['0', '1'], repeat=n): qubit_strings.append(''.join(q)) return qubit_strings def black_box_map(n, a): """ Black-box map, f(x) = x.a for all vectors x, given a """ qubs = qubit_strings(n) # calculate each dot product x.a and store in a dict d_blackbox = {} for q in qubs: dot_prod = 0 for i, xx in enumerate(q): dot_prod += a[i] * int(xx) d_blackbox[q] = dot_prod % 2 return d_blackbox def qubit_ket(qub_string): """ Form a basis ket out of n-bit string specified by the input 'qub_string', e.g. '001' -> |001> """ e0 = np.array([[1], [0]]) e1 = np.array([[0], [1]]) d_qubstring = {'0': e0, '1': e1} # initialize ket ket = d_qubstring[qub_string[0]] for i in range(1, len(qub_string)): ket = np.kron(ket, d_qubstring[qub_string[i]]) return ket def projection_op(qub_string): """ Creates a projection operator out of the basis element specified by 'qub_string', e.g. '101' -> |101> <101| """ ket = qubit_ket(qub_string) bra = np.transpose(ket) # all entries real, so no complex conjugation necessary proj = np.kron(ket, bra) return proj def black_box(n, a): """ Unitary representation of the black-box operator on (n+1)-qubits, given the vector a """ d_bb = black_box_map(n, a) # initialize unitary matrix N = 2**(n+1) unitary_rep = np.zeros(shape=(N, N)) # populate unitary matrix for k, v in d_bb.items(): unitary_rep += np.kron(projection_op(k), np.eye(2) + v*(-np.eye(2) + np.array([[0, 1], [1, 0]]))) return unitary_repBernstein-Vazirani algorithm using (n+1) qubitsp = Program() # pick numer of control qubits to be used n = 5 # pick a random value for the vector 'a' a = np.random.randint(low=0, high=2, size=n) print ("This is the (randomly chosen) value of a: ", a) # Define U_f p.defgate("U_f", black_box(n, a)) # Prepare the starting state |0>^(\otimes n) x (1/sqrt[2])*(|0> - |1>) for n_ in range(1, n+1): p.inst(I(n_)) p.inst(X(0)) p.inst(H(0)) # Apply H^(\otimes n) for n_ in range(1, n+1): p.inst(H(n_)) # Apply U_f p.inst(("U_f",) + tuple(range(n+1)[::-1])) # Apply final H^(\otimes n) for n_ in range(1, n+1): p.inst(H(n_)) # Final measurement classical_regs = list(range(n)) for i, n_ in enumerate(list(range(1, n+1))[::-1]): p.measure(n_, classical_regs[i]) qvm = api.QVMConnection() measure_n_qubits = qvm.run(p, classical_regs) # flatten out list measure_n_qubits = [item for sublist in measure_n_qubits for item in sublist] print ("This is the measured values of the first %s qubits at the end: " %n, measure_n_qubits)This is the (randomly chosen) value of a: [1 1 1 0 0] This is the measured values of the first 5 qubits at the end: [1, 1, 1, 0, 0]Understanding Embeddings on Texts# Based on # https://github.com/fchollet/deep-learning-with-python-notebooks/blob/master/6.2-understanding-recurrent-neural-networks.ipynb import warnings warnings.filterwarnings('ignore') %matplotlib inline %pylab inline import tensorflow as tf tf.logging.set_verbosity(tf.logging.ERROR) print(tf.__version__) # https://keras.io/datasets/#imdb-movie-reviews-sentiment-classification max_features = 1000 # number of words to consider as features maxlen = 20 # cut texts after this number of words (among top max_features most common words) # each review is encoded as a sequence of word indexes # indexed by overall frequency in the dataset # output is 0 (negative) or 1 (positive) imdb = tf.keras.datasets.imdb.load_data(num_words=max_features) (raw_input_train, y_train), (raw_input_test, y_test) = imdb # tf.keras.datasets.imdb.load_data? y_train.min() y_train.max() # 25000 texts len(raw_input_train) # first text has 218 words len(raw_input_train[0]) raw_input_train[0] # tf.keras.preprocessing.sequence.pad_sequences? # https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/sequence/pad_sequences input_train = tf.keras.preprocessing.sequence.pad_sequences(raw_input_train, maxlen=maxlen) input_test = tf.keras.preprocessing.sequence.pad_sequences(raw_input_test, maxlen=maxlen) input_train.shape, input_test.shape, y_train.shape, y_test.shape # left padded with zeros # As a convention, "0" does not stand for a specific word, but instead is used to encode any unknown word. input_train[0]We can use a randomly initialized embedding without any training# tf.keras.layers.Embedding? embedding_dim = 3 random_model = tf.keras.Sequential() # Parameters: max_features * embedding_dim random_model.add(tf.keras.layers.Embedding(name='embedding',input_dim=max_features, output_dim=embedding_dim, input_length=maxlen)) random_model.summary() random_model.predict(input_train[:1])Training the embedding together with the whole model is more reasonableAlternative: use a pre-trained model, probably trained using skip-gramembedding_dim = 3 model = tf.keras.Sequential() # Parameters: max_features * embedding_dim model.add(tf.keras.layers.Embedding(name='embedding', input_dim=max_features, output_dim=embedding_dim, input_length=maxlen)) # Output: maxlen * embedding_dim (8) model.add(tf.keras.layers.Flatten(name='flatten')) # binary classifier model.add(tf.keras.layers.Dense(name='fc', units=32, activation='relu')) model.add(tf.keras.layers.Dense(name='classifier', units=1, activation='sigmoid')) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) model.summary() batch_size = 128 %time history = model.fit(input_train, y_train, epochs=10, batch_size=batch_size, validation_split=0.2) train_loss, train_accuracy = model.evaluate(input_train, y_train, batch_size=batch_size) train_accuracy test_loss, test_accuracy = model.evaluate(input_test, y_test, batch_size=batch_size) test_accuracy # precition model.predict(input_test[0:5]) # ground truth y_test[0:5]How does the output of the trained embedding look like?embedding_layer = model.get_layer('embedding') model_stub= tf.keras.Model(inputs=model.input, outputs=embedding_layer.output) embedding_prediction = model_stub.predict(input_test[0:5]) # 5 sample reviews, 500 words per review, 8 dimensions per word embedding_prediction.shape # 8 embedding dimensions of first word of first sample review embedding_prediction[0][0]Comparing trained to untrained modelinput_train[0] model_stub.predict(input_train[:1]) random_model.predict(input_train[:1])Basic Histogramnp.random.seed(0) x_data = np.random.randn(100) x_sc = LinearScale() y_sc = LinearScale() hist = Hist(sample=x_data, scales={'sample': x_sc, 'count': y_sc}) ax_x = Axis(scale=x_sc, tick_format='0.2f') ax_y = Axis(scale=y_sc, orientation='vertical') Figure(marks=[hist], axes=[ax_x, ax_y], padding_y=0) hist.count # Changing the number of bins hist.bins = 20Properties of Histogram# normalizing the count x_sc = LinearScale() y_sc = LinearScale() hist = Hist(sample=x_data, scales={'sample': x_sc, 'count': y_sc}, normalized=True) ax_x = Axis(scale=x_sc, tick_format='0.2f') ax_y = Axis(scale=y_sc, orientation='vertical') Figure(marks=[hist], axes=[ax_x, ax_y], padding_y=0) # changing the color hist.colors=['orangered'] # stroke and opacity update hist.stroke = 'orange' hist.opacities = [0.5] * hist.binsRead-only properties of Histogramx_sc = LinearScale() y_sc = LinearScale() hist = Hist(sample=x_data, scales={'sample': x_sc, 'count': y_sc}) ax_x = Axis(scale=x_sc, tick_format='0.2f') ax_y = Axis(scale=y_sc, orientation='vertical') Figure(marks=[hist], axes=[ax_x, ax_y], padding_y=0) # count is the number of elements in each interval hist.count # mid points are the mid points of each interval hist.midpointsHands-on Supervised Machine Learning with PythonIn this example, we'll predict whether or not an email is spam. The DataYou can find the data at the [UCI repository](https://archive.ics.uci.edu/ml/datasets/Spambase). Files:1. [Spam data](https://archive.ics.uci.edu/ml/machine-learning-databases/spambase/spambase.data)2. [Metadata](https://archive.ics.uci.edu/ml/machine-learning-databases/spambase/spambase.names)3. [Documentation](https://archive.ics.uci.edu/ml/machine-learning-databases/spambase/spambase.DOCUMENTATION) Reference__Creators:__*, , , , 1501 Page Mill Rd., Palo Alto, CA 94304 *__Data Set Information:__*The "spam" concept is diverse: advertisements for products/web sites, make money fast schemes, chain letters, etc...**Our collection of spam e-mails came from our postmaster and individuals who had filed spam. Our collection of non-spam e-mails came from filed work and personal e-mails, and hence the word 'george' and the area code '650' are indicators of non-spam. These are useful when constructing a personalized spam filter. One would either have to blind such non-spam indicators or get a very wide collection of non-spam to generate a general purpose spam filter. *__For background on spam: __*., LaMacchia, . Spam! Communications of the ACM, 41(8):74-83, 1998. *from urllib.request import urlretrieve, ProxyHandler, build_opener, install_opener import requests import os pfx = "https://archive.ics.uci.edu/ml/machine-learning-databases/spambase/" data_dir = "data" # We might need to set a proxy handler... try: proxies = {"http": os.environ['http_proxy'], "https": os.environ['https_proxy']} print("Found proxy settings") #create the proxy object, assign it to a variable proxy = ProxyHandler(proxies) # construct a new opener using your proxy settings opener = build_opener(proxy) # install the opener on the module-level install_opener(opener) except KeyError: pass # The following will download the data if you don't already have it... def get_data(link, where): # Append the prefix link = pfx + link # make the parent dir if its not there if not os.path.exists(data_dir): os.mkdir(data_dir) # download if necessary if os.path.exists(where): print("Already have %s; will not download" % where) return print("Downloading %s to %s" % (link, where)) urlretrieve(link, where) # Get the core spam data get_data(link="spambase.data", where=os.path.join(data_dir, "spam.csv"))Found proxy settings Already have data/spam.csv; will not downloadLoad and split our dataWe've now downloaded the data into the `data/` directory. Let's load it into Pandas and do a bit of exploring...import pandas as pd names = ["word_freq_make", "word_freq_address", "word_freq_all", "word_freq_3d", "word_freq_our", "word_freq_over", "word_freq_remove", "word_freq_internet", "word_freq_order", "word_freq_mail", "word_freq_receive", "word_freq_will", "word_freq_people", "word_freq_report", "word_freq_addresses", "word_freq_free", "word_freq_business", "word_freq_email", "word_freq_you", "word_freq_credit", "word_freq_your", "word_freq_font", "word_freq_000", "word_freq_money", "word_freq_hp", "word_freq_hpl", "word_freq_george", "word_freq_650", "word_freq_lab", "word_freq_labs", "word_freq_telnet", "word_freq_857", "word_freq_data", "word_freq_415", "word_freq_85", "word_freq_technology", "word_freq_1999", "word_freq_parts", "word_freq_pm", "word_freq_direct", "word_freq_cs", "word_freq_meeting", "word_freq_original", "word_freq_project", "word_freq_re", "word_freq_edu", "word_freq_table", "word_freq_conference", "char_freq_;", "char_freq_(", "char_freq_[", "char_freq_!", "char_freq_$", "char_freq_#", "capital_run_length_average", "capital_run_length_longest", "capital_run_length_total", "is_spam"] df = pd.read_csv(os.path.join("data", "spam.csv"), header=None, names=names) # pop off the target y = df.pop("is_spam") df.head() from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(df, y, test_size=0.2, random_state=42, stratify=y) print("Num training samples: %i" % X_train.shape[0]) print("Num test samples: %i" % X_test.shape[0])Num training samples: 3680 Num test samples: 921Fit several modelsWe're going to fit a series of models and look at how they perform. To evaluate our model, we'll examine learning curves. This will help us diagnose potentially high-variance or high-bias problems. Decision tree classifierfrom packtml.utils.plotting import plot_learning_curve from packtml.decision_tree import CARTClassifier from sklearn.metrics import accuracy_score import numpy as np import matplotlib.pyplot as plt %matplotlib inline # very basic decision tree plot_learning_curve( CARTClassifier, metric=accuracy_score, X=X_train, y=y_train, n_folds=3, seed=21, trace=True, train_sizes=(np.linspace(.25, .75, 4) * X_train.shape[0]).astype(int), max_depth=8, random_state=42)\ .show() # Let's fit that decision tree so we can use it later... decision_tree = CARTClassifier(X_train, y_train, random_state=42, max_depth=8)Simple logistic regressionfrom packtml.regression import SimpleLogisticRegression # simple logistic regression classifier plot_learning_curve( SimpleLogisticRegression, metric=accuracy_score, X=X_train, y=y_train, n_folds=3, seed=21, trace=True, train_sizes=(np.linspace(.25, .8, 4) * X_train.shape[0]).astype(int), n_steps=250, learning_rate=0.0025, loglik_interval=100)\ .show() # Let's fit that logistic regression so we can use it later... logistic_regression = SimpleLogisticRegression( X_train, y_train, n_steps=250, learning_rate=0.0025, loglik_interval=100)Can they classify emails we make up?Here is the documentation for the data from UCI: | SPAM E-MAIL DATABASE ATTRIBUTES (in .names format) | | 48 continuous real [0,100] attributes of type word_freq_WORD | = percentage of words in the e-mail that match WORD, | i.e. 100 * (number of times the WORD appears in the e-mail) / | total number of words in e-mail. A "word" in this case is any | string of alphanumeric characters bounded by non-alphanumeric | characters or end-of-string. | | 6 continuous real [0,100] attributes of type char_freq_CHAR | = percentage of characters in the e-mail that match CHAR, | i.e. 100 * (number of CHAR occurences) / total characters in e-mail | | 1 continuous real [1,...] attribute of type capital_run_length_average | = average length of uninterrupted sequences of capital letters | | 1 continuous integer [1,...] attribute of type capital_run_length_longest | = length of longest uninterrupted sequence of capital letters | | 1 continuous integer [1,...] attribute of type capital_run_length_total | = sum of length of uninterrupted sequences of capital letters | = total number of capital letters in the e-mail | | 1 nominal {0,1} class attribute of type spam | = denotes whether the e-mail was considered spam (1) or not (0), | i.e. unsolicited commercial e-mail. | | For more information, see file 'spambase.DOCUMENTATION' at the | UCI Machine Learning Repository: http://www.ics.uci.edu/~mlearn/MLRepository.html Let's make some transformers that can transform an email message into a vector of length 48, and then feed them into our algorithms to predict whether they're spam or not. The first email (obvious spam):spam_email = """ Dear small business owner, This email is to inform you that for $0 down, you can receive a FREE CREDIT REPORT!!! Your money is important; PROTECT YOUR CREDIT and reply direct to us for assistance! """ print(spam_email)Dear small business owner, This email is to inform you that for $0 down, you can receive a FREE CREDIT REPORT!!! Your money is important; PROTECT YOUR CREDIT and reply direct to us for assistance!The second email (not spam):not_spam = """ Hey George, Make sure to be careful when checking your HP email. There has been a phishing attempt recently advertising a credit report. This is a known scam, and should be ignored. Please feel free to let me know if you have any questions or concerns, but the IT guys told me to warn everyone. """ print(not_spam)Hey George, Make sure to be careful when checking your HP email. There has been a phishing attempt recently advertising a credit report. This is a known scam, and should be ignored. Please feel free to let me know if you have any questions or concerns, but the IT guys told me to warn everyone.Now we need to do a bit of finagling to get our emails into a consumable format.from collections import Counter import numpy as np def encode_email(email): # tokenize the email tokens = email.split() # easiest way to count characters will be to join everything # up and split them into chars, then use a counter to count them # all ONE time. chars = list("".join(tokens)) char_counts = Counter(chars) n_chars = len(chars) # we can do the same thing with "tokens" to get counts of words # (but we want them to be lowercase!) word_counts = Counter([t.lower() for t in tokens]) # Of the names above, the ones that start with "word" are # percentages of frequencies of words. Let's get the words # in question freq_words = [ name.split("_")[-1] for name in names if name.startswith("word") ] # compile the first 48 values using the words in question word_freq_encodings = [100. * (word_counts.get(t, 0) / len(tokens)) for t in freq_words] # The same applies to names that start with "char". These are the # characters that we're interested in counting: ;, (, [, !, $, # freq_chars = [ name.split("_")[-1] for name in names if name.startswith("char") ] char_freq_encodings = [100. * (char_counts.get(c, 0) / len(chars)) for c in freq_chars] # now we can compute the more nuanced features that look for # runs of capitalization, etc. We can find all the runs of capitals # in one single pass of O(N) capital_runs = [] # type: list[int] in_run = False # are we currently in a run? run_len = 0 # what's the current run len? for c in chars: capital = c.isupper() if capital: # if we are NOT in a run, we need to start one if not in_run: in_run = True # always increment run length run_len += 1 else: # if we were NOT in a run, we do not need to do anything. # if we WERE in a run, finalize it if in_run: in_run = False capital_runs.append(run_len) run_len = 0 # make a np array to compute the next few stats quickly capital_runs = np.asarray(capital_runs) capital_stats = [capital_runs.mean(), capital_runs.max(), capital_runs.sum()] # return them as a numpy array all concatenated return np.array(word_freq_encodings + char_freq_encodings + capital_stats) # get the email vectors fake_email = encode_email(spam_email) real_email = encode_email(not_spam) # this is what they look like: print("Spam email:") print(fake_email) print("\nReal email:") print(real_email)Spam email: [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 2.85714286 0. 0. 0. 0. 2.85714286 2.85714286 2.85714286 5.71428571 5.71428571 5.71428571 0. 0. 2.85714286 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 2.85714286 0. 0. 0. 0. 0. 0. 0. 0. 0.61728395 0. 0. 2.4691358 0.61728395 0. 7.2 17. 36. ] Real email: [ 1.81818182 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1.81818182 0. 0. 1.81818182 1.81818182 1.81818182 0. [...]Let's see if our models are any good!predict = (lambda rec, mod: "SPAM!" if mod.predict([rec])[0] == 1 else "Not spam") print("Decision tree predictions:") print("Spam email prediction: %r" % predict(fake_email, decision_tree)) print("Real email prediction: %r" % predict(real_email, decision_tree)) print("\nLogistic regression predictions:") print("Spam email prediction: %r" % predict(fake_email, logistic_regression)) print("Real email prediction: %r" % predict(real_email, logistic_regression))Decision tree predictions: Spam email prediction: 'SPAM!' Real email prediction: 'Not spam' Logistic regression predictions: Spam email prediction: 'SPAM!' Real email prediction: 'Not spam' Asignment 1 - Data Curation Imports the necessary libraries to perform data gathering and analysisimport json import numpy as np import requests import pandas as pd from pandas.io.json import json_normalize import matplotlib.pyplot as plt %pylab inlinePopulating the interactive namespace from numpy and matplotlib1. Data Acquisition First, the url bases that we will use in our API calls are set. Then the parameters for the different API calls are chosen depending on the type of API and access type.# sets the url for the wikimedia api call for legacy pagecounts and pageviews endpoint_legacy = 'https://wikimedia.org/api/rest_v1/metrics/legacy/pagecounts/aggregate/{project}/{access-site}/{granularity}/{start}/{end}' endpoint_pageviews = 'https://wikimedia.org/api/rest_v1/metrics/pageviews/aggregate/{project}/{access}/{agent}/{granularity}/{start}/{end}' # sets the parameters for legacy pagecounts for desktop and mobile access params_legacy_desktop = {"project" : "en.wikipedia.org", "access-site" : "desktop-site", "granularity" : "monthly", "start" : "2008010100", "end" : "2016080100" } params_legacy_mobile = {"project" : "en.wikipedia.org", "access-site" : "mobile-site", "granularity" : "monthly", "start" : "2008010100", "end" : "2016080100" } ## sets the parameters for pageviews for desktop, mobile-app and mobile-web access params_pageviews_desktop = {"project" : "en.wikipedia.org", "access" : "desktop", "agent" : "user", "granularity" : "monthly", "start" : "2015070100", "end" : '2019090100' } params_pageviews_mobile_app = {"project" : "en.wikipedia.org", "access" : "mobile-app", "agent" : "user", "granularity" : "monthly", "start" : "2015070100", "end" : '2019090100' } params_pageviews_mobile_web = {"project" : "en.wikipedia.org", "access" : "mobile-web", "agent" : "user", "granularity" : "monthly", "start" : "2015070100", "end" : '2019090100' } # Customize these with your own information headers = {'User-Agent': 'https://github.com/ablew', 'From': '' }Then, function is defined for making api calls using the url bases and the parameters that will be inserted into them.def api_call(endpoint, parameters): """ makes api calls using the endpoint url and the parameters params: endpoint str parameters dict """ call = requests.get(endpoint.format(**parameters), headers=headers) response = call.json() return responseThe api_call function is used to obtain the data in .json format.# obtains data from api for legacy pagecounts for both desktop and mobile access monthly_legacy_desktop = api_call(endpoint_legacy, params_legacy_desktop) monthly_legacy_mobile = api_call(endpoint_legacy, params_legacy_mobile) # obtains data from api for pageviews for desktop, mobile app and mobile web access monthly_pageviews_desktop = api_call(endpoint_pageviews, params_pageviews_desktop) monthly_pageviews_mobile_app = api_call(endpoint_pageviews, params_pageviews_mobile_app) monthly_pageviews_mobile_web = api_call(endpoint_pageviews, params_pageviews_mobile_web)The data is stored in .json files with the proper file names.# saves data in .json files with open('pagecounts_desktop-site_200801-201608.json', 'w') as outfile: json.dump(monthly_legacy_desktop, outfile) with open('pagecounts_mobile-site_200801-201608.json', 'w') as outfile: json.dump(monthly_legacy_desktop, outfile) with open('pageviews_desktop_201507-201909.json', 'w') as outfile: json.dump(monthly_legacy_desktop, outfile) with open('pageviews_mobile-app_201507-201909.json', 'w') as outfile: json.dump(monthly_legacy_desktop, outfile) with open('pageviews_mobile-web_201507-201909.json', 'w') as outfile: json.dump(monthly_legacy_desktop, outfile)2. Data Processing Pandas dataframes are created for each API/access-type using json_normalize().# creates dataframes for legacy desktop views and for legacy mobile pagecounts df_lg_desktop = json_normalize(monthly_legacy_desktop['items'])[['timestamp', 'count']] df_lg_mobile = json_normalize(monthly_legacy_mobile['items'])[['timestamp', 'count']] # creates dataframes for desktop, mobile-app and mobile-web pageviews df_pv_desktop = json_normalize(monthly_pageviews_desktop['items'])[['timestamp', 'views']] df_pv_mobile_app = json_normalize(monthly_pageviews_mobile_app['items'])[['timestamp', 'views']] df_pv_mobile_web = json_normalize(monthly_pageviews_mobile_web['items'])[['timestamp', 'views']]A new dataframe containing data for total mobile views is created by combining the number of views using the mobile app and the mobile web browser.# creates a new dataframe based on mobile-app and mobile-web to obtain total mobile views df_pv_total_mobile = pd.DataFrame() df_pv_total_mobile['timestamp'] = df_pv_mobile_app['timestamp'] df_pv_total_mobile['views'] = df_pv_mobile_app['views'] + df_pv_mobile_web['views'] df_pv_total_mobile.head()The dataframes are joined one by one on the timestamp column, and replaces the missing values with zeros.# joins all four dataframes on the timestamp column df_merged = pd.merge(df_lg_desktop, df_lg_mobile, on='timestamp', how='outer') df_merged = pd.merge(df_merged, df_pv_desktop, on='timestamp', how='outer') df_merged = pd.merge(df_merged, df_pv_total_mobile, on='timestamp', how='outer') # fills NaNs with 0s df_merged = df_merged.fillna(0) df_merged.head()The final dataframe is created, containing the necessary columns as stipulated by the assignment instructions.#creates the final dataframe with all the relevant data, slicing the timestamp column into year and month df_final = pd.DataFrame() df_final['year'] = df_merged['timestamp'].str.slice(0, 4) df_final['month'] = df_merged['timestamp'].str.slice(4, 6) df_final['pagecount_all_views'] = df_merged['count_x'] + df_merged['count_y'] df_final['pagecount_desktop_views'] = df_merged['count_x'] df_final['pagecount_mobile_views'] = df_merged['count_y'] df_final['pageview_all_views'] = df_merged['views_x'] + df_merged['views_y'] df_final['pageview_desktop_views'] = df_merged['views_x'] df_final['pageview_mobile_views'] = df_merged['views_y'] df_final.head()The final dataframe is saved as a .csv file.df_final.to_csv('en-wikipedia_traffic_200801-201809.csv', index=False)3. Data Analysis The zero values are replaced with NaNs to provide a clearer visualization and to show when each method for data collection starts and ends.The time series for each dataframe is plotted in a single visualization, and various graph elements are added and formatted, such the the title, the labels, the grid, and the legend.A .png image of the plot is also saved before showing the visualization.# replaces all 0's with Nans so that they don't appear in the visualization df_final.replace(0, np.nan, inplace=True) # creates a time series graph with the different types of apis and access types df_final.set_index(pd.to_datetime(df_final[['year','month']].assign(day=1)))['pagecount_desktop_views'].plot(color='green', linestyle='--', figsize=(12,5)) df_final.set_index(pd.to_datetime(df_final[['year','month']].assign(day=1)))['pagecount_mobile_views'].plot(color='blue', linestyle='--') df_final.set_index(pd.to_datetime(df_final[['year','month']].assign(day=1)))['pagecount_all_views'].plot(color='black', linestyle='--') df_final.set_index(pd.to_datetime(df_final[['year','month']].assign(day=1)))['pageview_desktop_views'].plot(color='green', linestyle='-') df_final.set_index(pd.to_datetime(df_final[['year','month']].assign(day=1)))['pageview_mobile_views'].plot(color='blue', linestyle='-') df_final.set_index(pd.to_datetime(df_final[['year','month']].assign(day=1)))['pageview_all_views'].plot(color='black', linestyle='-') # adds title, axis labels, yticks, grid, legend and footnote plt.title('Page Views on English Wikipedia (x 1,000,000)') plt.xlabel('Year') plt.ylabel('Page views') #yticks are converted so it shows numbers x 1,000,000 max_views = max(df_final['pagecount_all_views']) plt.yticks(ticks=np.arange(0, max_views + 2000000000, 2000000000), labels=np.arange(0, max_views/1000000 + 2000, 2000)) plt.grid() plt.legend(('main site', 'mobile site', 'total'), loc='upper left') plt.figtext(0.85, 0, 'May 2015: a new pageview definition took effect, which eliminated all crawler traffic. Solid lines mark new definition.', horizontalalignment='right', color='red') plt.savefig('plot.png') plt.show()CLASIFICADOR CON LAS PROBABILIDADEScnn_predictions_dir = get_path(model_config.model_predictions_cnn_dir) merge_list = [] for weight, frozen_layers in zip([*repeat('imagenet', 6), 'random'], ['ALL', '0FT', '1FT', '2FT', '3FT', '4FT']): data = db[['PROCESSED_IMG', 'IMG_LABEL', 'TRAIN_VAL', *XGB_COLS[XGB_CONFIG]]].copy() data.loc[:, 'LABEL'] = data.IMG_LABEL.map({k: v for v, k in enumerate(sorted(data.IMG_LABEL.unique(), reverse=False))}) l = [] for file in search_files(get_path(cnn_predictions_dir, weight, frozen_layers, create=False), ext='csv'): l.append( pd.read_csv(file, sep=';')[['PROCESSED_IMG', 'PROBABILTY']].\ assign(WEIGHTS=weight, FT=frozen_layers, CNN=get_filename(file)) ) merge_list.append(pd.merge(left=data, right=pd.concat(l, ignore_index=True), on='PROCESSED_IMG', how='left')) all_data = pd.concat(merge_list, ignore_index=True) all_data.head() # Se escoge el mejor modelo en función de las metricas AUC de validacion cnn_selection = all_data.groupby(['CNN', 'FT', 'WEIGHTS', 'TRAIN_VAL'], as_index=False).apply( lambda x: pd.Series({ 'AUC': roc_auc_score(x.LABEL, x.PROBABILTY), 'ACCURACY': accuracy_score(x.LABEL, round(x.PROBABILTY)), 'RECALL': recall_score(x.LABEL, round(x.PROBABILTY)), 'PRECISION': precision_score(x.LABEL, round(x.PROBABILTY)), 'F1': f1_score(x.LABEL, round(x.PROBABILTY)) }) ) selected_cnns = cnn_selection[cnn_selection.TRAIN_VAL == 'val'].sort_values('AUC', ascending=False).\ groupby('CNN', as_index=False).first() selected_cnns selected_cnns = cnn_selection[cnn_selection.TRAIN_VAL == 'val'].sort_values('F1', ascending=False).\ groupby('CNN', as_index=False).first() selected_cnns selected_cnns = cnn_selection[cnn_selection.TRAIN_VAL == 'val'].sort_values('RECALL', ascending=False).\ groupby('CNN', as_index=False).first() selected_cnns selected_cnns = cnn_selection[cnn_selection.TRAIN_VAL == 'val'].sort_values('ACCURACY', ascending=False).\ groupby('CNN', as_index=False).first() selected_cnns selected_cnns = cnn_selection[cnn_selection.TRAIN_VAL == 'val'].sort_values('PRECISION', ascending=False).\ groupby('CNN', as_index=False).first() selected_cnnsObservando los resultados, la métrica a optimizar será AUC y despues se decide el thresholdselected_cnns = cnn_selection[cnn_selection.TRAIN_VAL == 'val'].sort_values('AUC', ascending=False).\ groupby('CNN', as_index=False).first() final_list = [] for _, row in selected_cnns.iterrows(): final_list.append( all_data[(all_data.CNN == row.CNN) & (all_data.FT == row.FT) & (all_data.WEIGHTS == row.WEIGHTS)] ) final_df = pd.concat(final_list, ignore_index=True).\ set_index(['PROCESSED_IMG', 'LABEL', 'TRAIN_VAL', *XGB_COLS[XGB_CONFIG], 'CNN'])['PROBABILTY'].unstack()\ .reset_index() final_df # generación del conjunto de datos de train para gradient boosting cols = [*XGB_COLS[XGB_CONFIG], *all_data.CNN.unique().tolist()] train_x = final_df.loc[final_df.TRAIN_VAL == 'train', cols].values train_y = final_df.loc[final_df.TRAIN_VAL == 'train', 'LABEL'] #### Empiezan los algoritmos de model ensambling import warnings warnings.filterwarnings('ignore') from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import GridSearchCV clf = GridSearchCV(RandomForestClassifier(random_state=81), param_grid={'n_estimators': [100, 150, 200, 250, 300], 'max_depth': np.arange(0, 10)}, scoring='roc_auc', cv=10) clf.fit(train_x, train_y) data_csv = final_df[['PROCESSED_IMG', 'TRAIN_VAL', 'LABEL']].assign(PREDICTION=clf.predict(final_df[cols])) data_csv.groupby('TRAIN_VAL').apply(lambda x: pd.Series({ 'AUC': roc_auc_score(x.LABEL, x.PREDICTION), 'ACCURACY': accuracy_score(x.LABEL, round(x.PREDICTION)), 'RECALL': recall_score(x.LABEL, round(x.PREDICTION)), 'PRECISION': precision_score(x.LABEL, round(x.PREDICTION)), 'F1': f1_score(x.LABEL, round(x.PREDICTION)) })) from sklearn.ensemble import GradientBoostingClassifier clf = GridSearchCV(GradientBoostingClassifier(random_state=81), param_grid={'n_estimators': [100, 150, 200, 250, 300], 'max_depth': np.arange(0, 10), 'criterion': ['squared_error', 'friedman_mse']}, scoring='roc_auc', cv=10) clf.fit(train_x, train_y) data_csv = final_df[['PROCESSED_IMG', 'TRAIN_VAL', 'LABEL']].assign(PREDICTION=clf.predict(final_df[cols])) data_csv.groupby('TRAIN_VAL').apply(lambda x: pd.Series({ 'AUC': roc_auc_score(x.LABEL, x.PREDICTION), 'ACCURACY': accuracy_score(x.LABEL, round(x.PREDICTION)), 'RECALL': recall_score(x.LABEL, round(x.PREDICTION)), 'PRECISION': precision_score(x.LABEL, round(x.PREDICTION)), 'F1': f1_score(x.LABEL, round(x.PREDICTION)) })) from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import GridSearchCV clf = GridSearchCV(RandomForestRegressor(random_state=81), param_grid={'n_estimators': [100, 150, 200, 250, 300], 'max_depth': np.arange(0, 10)}, scoring='roc_auc', cv=10) clf.fit(train_x, train_y) data_csv = final_df[['PROCESSED_IMG', 'TRAIN_VAL', 'LABEL']].assign(PREDICTION=clf.predict(final_df[cols])) data_csv.groupby('TRAIN_VAL').apply(lambda x: pd.Series({ 'AUC': roc_auc_score(x.LABEL, x.PREDICTION), 'ACCURACY': accuracy_score(x.LABEL, round(x.PREDICTION)), 'RECALL': recall_score(x.LABEL, round(x.PREDICTION)), 'PRECISION': precision_score(x.LABEL, round(x.PREDICTION)), 'F1': f1_score(x.LABEL, round(x.PREDICTION)) })) from sklearn.ensemble import GradientBoostingRegressor clf = GridSearchCV(GradientBoostingRegressor(random_state=81), param_grid={'n_estimators': [100, 150, 200, 250, 300], 'max_depth': np.arange(0, 10), 'criterion': ['squared_error', 'friedman_mse']}, scoring='roc_auc', cv=10) clf.fit(train_x, train_y) data_csv = final_df[['PROCESSED_IMG', 'TRAIN_VAL', 'LABEL']].assign(PREDICTION=clf.predict(final_df[cols])) data_csv.groupby('TRAIN_VAL').apply(lambda x: pd.Series({ 'AUC': roc_auc_score(x.LABEL, x.PREDICTION), 'ACCURACY': accuracy_score(x.LABEL, round(x.PREDICTION)), 'RECALL': recall_score(x.LABEL, round(x.PREDICTION)), 'PRECISION': precision_score(x.LABEL, round(x.PREDICTION)), 'F1': f1_score(x.LABEL, round(x.PREDICTION)) }))1. Data Collectiontrain = pd.read_csv('/kaggle/input/bengaliai-cv19/train.csv') test = pd.read_csv('/kaggle/input/bengaliai-cv19/test.csv') class_map = pd.read_csv('/kaggle/input/bengaliai-cv19/class_map.csv') sample_submission = pd.read_csv('/kaggle/input/bengaliai-cv19/sample_submission.csv') # later do for all parquet files train_0 = pd.read_parquet('/kaggle/input/bengaliai-cv19/train_image_data_0.parquet') # train_1 = pd.read_parquet('/kaggle/input/bengaliai-cv19/train_image_data_1.parquet') # train_2 = pd.read_parquet('/kaggle/input/bengaliai-cv19/train_image_data_2.parquet') # train_3 = pd.read_parquet('/kaggle/input/bengaliai-cv19/train_image_data_3.parquet') test_0 = pd.read_parquet('/kaggle/input/bengaliai-cv19/test_image_data_0.parquet') test_1 = pd.read_parquet('/kaggle/input/bengaliai-cv19/test_image_data_1.parquet') test_2 = pd.read_parquet('/kaggle/input/bengaliai-cv19/test_image_data_2.parquet') test_3 = pd.read_parquet('/kaggle/input/bengaliai-cv19/test_image_data_3.parquet') train.head() test.head() class_map.head() train_0.head() print('train shape', train.shape) print('test shape', test.shape) print('class_map shape', class_map.shape) print('train_0 shape', train_0.shape) print('test_0 shape', test_0.shape) def display_image_from_pixels(data, subplots_size=5, smaller_data_alert=False): plt.figure() fig, ax = plt.subplots(subplots_size, subplots_size, figsize=(12,12)) for i, index in enumerate(data.index): image_id = data.iloc[i]['image_id'] image = data.iloc[i].drop('image_id').values.astype(np.uint8) image = Image.fromarray(image.reshape(137, 236)) ax[i//subplots_size, i%subplots_size].imshow(image) ax[i//subplots_size, i%subplots_size].set_title(image_id) ax[i//subplots_size, i%subplots_size].axis('off') if smaller_data_alert: for empty_subplot in range(3, 25): ax[empty_subplot//subplots_size, empty_subplot%subplots_size].set_visible(False) display_image_from_pixels(train_0.sample(25)) display_image_from_pixels(test_0, smaller_data_alert=True)2. Exploratory Data Analysis (EDA) 2.1. Show all possible class map signsdef show_class_maps(): print('-----------------') print('grapheme_root') print(class_map.loc[class_map['component_type'] == 'grapheme_root']['component'].values) print('-----------------') print('map_vowel') print(class_map.loc[class_map['component_type'] == 'vowel_diacritic']['component'].values) print('-----------------') print('map_diacritic') print(class_map.loc[class_map['component_type'] == 'consonant_diacritic']['component'].values) show_class_maps()2.2. Get frequency of class map occurrences in train setdef plot_freq(column='grapheme'): col = train[column].value_counts().rename_axis(column).reset_index(name='count') fig = px.bar(col, y='count', x=column) fig.show() plot_freq('grapheme') plot_freq('grapheme_root') plot_freq('vowel_diacritic') plot_freq('consonant_diacritic')2.3. Plot feature occurrence dependencies (encoded)def features_heatmap(feature1, feature2, width, length): df = train.groupby([feature1, feature2])['grapheme'].count().reset_index() df = df.pivot(feature1, feature2, 'grapheme') plt.figure(figsize=(width, length)) sns.heatmap(df, annot=True, fmt='3.0f', linewidths=.5, cmap='Blues') features_heatmap('vowel_diacritic','consonant_diacritic' ,12 , 4) #%%javascript #IPython.OutputArea.auto_scroll_threshold = 9999; features_heatmap('grapheme_root','consonant_diacritic', 12, 40) features_heatmap('grapheme_root','vowel_diacritic', 18, 40)3. Feature Engineering (FE)class FE(object): __constraints__ = {'WIDTH': 137, 'HEIGHT': 236, 'END_SIZE': 128} @staticmethod def make_2d(dataset, vector): image = dataset.iloc[vector].drop('image_id').values.astype(np.uint8) image = image.reshape(FE.__constraints__['WIDTH'], FE.__constraints__['HEIGHT'])/1 return image def crop_top(self, image, threshold): idx = 0 for row in range(FE.__constraints__['WIDTH']): if np.sum(image[row]) / 255 > threshold: idx += 1 else: return idx def crop_bot(self, image, threshold): idx = 0 for row in reversed(range(FE.__constraints__['WIDTH'])): if np.sum(image[row]) / 255 > threshold: idx += 1 else: return FE.__constraints__['WIDTH'] - idx def crop_left(self, image, threshold): idx = 0 for col in range(FE.__constraints__['HEIGHT']): if np.sum(image[:, col]) / 255 > threshold-95: idx += 1 else: return idx def crop_right(self, image, threshold): idx = 0 for col in reversed(range(FE.__constraints__['HEIGHT'])): if np.sum(image[:, col]) / 255 > threshold-95: idx += 1 else: return FE.__constraints__['HEIGHT'] - idx def crop_resize_image(self, image, threshold=230): return cv2.resize(image[self.crop_top(image, threshold): self.crop_bot(image, threshold), self.crop_left(image, threshold): self.crop_right(image, threshold)], (FE.__constraints__['END_SIZE'], FE.__constraints__['END_SIZE']), interpolation = INTER_AREA) @staticmethod def random_aug_mix(image, prob=0.5): image = cvtColor(image.astype(np.uint8), COLOR_GRAY2RGB) if prob > random.random(): image = add_fog(image, fog_coef=0.5, alpha_coef=0, haze_list=[]) if prob > random.random(): image = add_snow(image, snow_point=1, brightness_coeff=0.2) if prob > random.random(): image = elastic_transform(image, alpha=8, sigma=1, alpha_affine=0.8, interpolation=1, border_mode=4, value=None, random_state=None, approximate=False) if prob > random.random(): image = iso_noise(image, color_shift=5, intensity=5) return np.moveaxis(image[:,:,:1], -1, 0)3.1. Cropping, Centering and Resizing imagestest = pd.concat([test_0, test_1, test_2, test_3]) # prepare datasets (change dims) images_test = np.zeros(((137, 236, 12))) for vector in tqdm(range(test.shape[0])): images_test[:,:,vector] = FE.make_2d(test, vector) # dims after resize resized_test = np.zeros(((128, 128, 12))) for vector in tqdm(range(test.shape[0])): resized_test[:,:,vector] = FE().crop_resize_image(images_test[:,:,vector], threshold=230) del images_test3.3. AugMix train set calibration using albumentations# for torch X_test = np.zeros(((12, 1, 128, 128))) # for keras # X_train = np.zeros(((50210, 128, 128, 1))) # X_test = np.zeros(((12, 128, 128, 1))) for vector in tqdm(range(test.shape[0])): X_test[vector,:,:,:] = FE.random_aug_mix(resized_test[:,:,vector], prob=0) del resized_test3.4. Merging previous steps into pipeline for future training sets preparationTRAIN_SIZE = 10000 # @cuda.autojit def train_preprocessing_pipeline(train_set, train_step): print('{} train step processing'.format(train_step)) images_train = np.zeros(((137, 236, TRAIN_SIZE))) for vector in tqdm(range(TRAIN_SIZE)): #range(train_0.shape[0]) images_train[:,:,vector] = FE.make_2d(train_set, vector + (TRAIN_SIZE * train_step)) resized_train = np.zeros(((128, 128, TRAIN_SIZE))) for vector in tqdm(range(TRAIN_SIZE)): #train_0.shape[0] resized_train[:,:,vector] = FE().crop_resize_image(images_train[:,:,vector], threshold=230) del images_train X_train = np.zeros(((TRAIN_SIZE, 1, 128, 128))) for vector in tqdm(range(TRAIN_SIZE)): X_train[vector,:,:,:] = FE.random_aug_mix(resized_train[:,:,vector], prob=0.3) del resized_train print('processed') return data.DataLoader( tensor(X_train), batch_size=BATCH_SIZE, num_workers=N_WORKERS, shuffle=False )4. Modelingdevice = "cuda:0" if torch.cuda.is_available() else "cpu" device4.1. NN Architectureclass ConvBlock(nn.Module): def __init__(self, in_channels, out_channels): super(ConvBlock, self).__init__() self.conv2d1 = nn.Conv2d(in_channels=in_channels, out_channels=6, kernel_size=6, stride=2, padding=2) self.batch_norm = nn.BatchNorm2d(num_features=6) self.relu = nn.ReLU(True) self.max_pooling = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) self.conv2d2 = nn.Conv2d(in_channels=6, out_channels=out_channels, kernel_size=6, stride=2, padding=0) def forward(self, x): x = self.conv2d1(x) x = self.batch_norm(x) x = self.relu(x) x = self.max_pooling(x) x = self.conv2d2(x) return self.relu(x) class ResidualBlock(nn.Module): def __init__(self): # prob super(ResidualBlock, self).__init__() self.id_block = nn.Sequential( nn.Conv2d(in_channels=8, out_channels=8, kernel_size=3, stride=1, padding=1), #nn.BatchNorm2d(num_features=8) nn.MaxPool2d(kernel_size=3, padding=1, stride=1) ) self.skip = nn.Sequential() def forward(self, x): residual = x x = self.id_block(x) x += self.skip(residual) return nn.ReLU(True)(x) class BottleNeck(nn.Module): def __init__(self, in_channels, out_channels): super(BottleNeck, self).__init__() self.bottle_neck = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0) def forward(self, x): return self.bottle_neck(x) class ResNet(nn.Module): def __init__(self): super(ResNet, self).__init__() # self.backbone = resnet34(pretrained=False) self.conv_block = ConvBlock(in_channels=1, out_channels=16) self.bottle_neck = BottleNeck(in_channels=16, out_channels=8) self.res_blocks = ResidualBlock() self.average_pooling = nn.AvgPool2d(kernel_size=2, stride=2, padding=0) self.flatten = nn.Flatten() self.norm = nn.BatchNorm2d(num_features=8) self.root = nn.Linear(392, 168) self.vowel = nn.Linear(392, 11) self.consonant = nn.Linear(392, 7) def forward(self, x): # x = self.forward_backbone(x) x = self.conv_block(x) x = self.bottle_neck(x) x = self.res_blocks(x) x = self.res_blocks(x) x = self.res_blocks(x) x = self.res_blocks(x) x = self.res_blocks(x) x = self.res_blocks(x) x = self.res_blocks(x) x = self.res_blocks(x) x = self.res_blocks(x) x = self.res_blocks(x) x = self.res_blocks(x) x = self.res_blocks(x) x = self.average_pooling(x) x = self.flatten(x) root = self.root(x) vowel = self.vowel(x) consonant = self.consonant(x) return root, vowel, consonant4.2. Constraints settingBATCH_SIZE = TRAIN_SIZE // 20 #32 N_WORKERS = 4 N_EPOCHS = 10 DEPLOYMENT_WEIGHTS = 'baseline_weights.pth'4.3. Model Templatemodel = ResNet().to(device) criterion = nn.BCEWithLogitsLoss() # KLDivLoss() optimizer = Adam(model.parameters(), lr=0.5) scheduler = ReduceLROnPlateau(optimizer, 'min', min_lr=0.1, verbose=True) # patience=1000, min_lr=0.1, epoch_losses = [] def train_nn(train_set, train_step, data_loader_train = []): for epoch in range(N_EPOCHS): print('Epoch {}/{}'.format(epoch, N_EPOCHS - 1)) print('-' * 10) model.train() tr_loss = 0 if epoch == 0: data_loader_train = train_preprocessing_pipeline(train_set, train_step) for step, batch in enumerate(tqdm(data_loader_train)): inputs = tensor(batch) l_graph = tensor(Y_train_root[(batch.shape[0]*step):(batch.shape[0]*(step+1))]) l_vowel = tensor(Y_train_vowel[(batch.shape[0]*step):(batch.shape[0]*(step+1))]) l_conso = tensor(Y_train_consonant[(batch.shape[0]*step):(batch.shape[0]*(step+1))]) inputs = inputs.to(device, dtype=torch.float) l_graph = l_graph.to(device, dtype=torch.float) l_vowel = l_vowel.to(device, dtype=torch.float) l_conso = l_conso.to(device, dtype=torch.float) out_graph, out_vowel, out_conso = model(inputs) loss_graph = criterion(out_graph, l_graph) loss_vowel = criterion(out_vowel, l_vowel) loss_conso = criterion(out_conso, l_conso) loss = loss_graph + loss_vowel + loss_conso scheduler.step(loss) loss.backward() tr_loss += loss.item() optimizer.step() optimizer.zero_grad() epoch_losses.append(tr_loss / len(data_loader_train)) print('Training Loss: {:.4f}'.format(epoch_losses[-1])) def make_labels(step): return tensor(pd.get_dummies(train['grapheme_root'][(TRAIN_SIZE * step):(TRAIN_SIZE * (step + 1))]).values),\ tensor(pd.get_dummies(train['vowel_diacritic'][(TRAIN_SIZE * step):(TRAIN_SIZE * (step + 1))]).values),\ tensor(pd.get_dummies(train['consonant_diacritic'][(TRAIN_SIZE * step):(TRAIN_SIZE * (step + 1))]).values)4.4. Trainingprint('train_0 is being processed') Y_train_root, Y_train_vowel, Y_train_consonant = make_labels(0) train_nn(train_0, 0) Y_train_root, Y_train_vowel, Y_train_consonant = make_labels(1) train_nn(train_0, 1) Y_train_root, Y_train_vowel, Y_train_consonant = make_labels(2) train_nn(train_0, 2) Y_train_root, Y_train_vowel, Y_train_consonant = make_labels(3) train_nn(train_0, 3) Y_train_root, Y_train_vowel, Y_train_consonant = make_labels(4) train_nn(train_0, 4) del train_0 print('train_1 is being processed') train_1 = pd.read_parquet('/kaggle/input/bengaliai-cv19/train_image_data_1.parquet') Y_train_root, Y_train_vowel, Y_train_consonant = make_labels(5) train_nn(train_1, 0) Y_train_root, Y_train_vowel, Y_train_consonant = make_labels(6) train_nn(train_1, 1) Y_train_root, Y_train_vowel, Y_train_consonant = make_labels(7) train_nn(train_1, 2) Y_train_root, Y_train_vowel, Y_train_consonant = make_labels(8) train_nn(train_1, 3) Y_train_root, Y_train_vowel, Y_train_consonant = make_labels(9) train_nn(train_1, 4) del train_1 print('train_2 is being processed') train_2 = pd.read_parquet('/kaggle/input/bengaliai-cv19/train_image_data_2.parquet') Y_train_root, Y_train_vowel, Y_train_consonant = make_labels(10) train_nn(train_2, 0) Y_train_root, Y_train_vowel, Y_train_consonant = make_labels(11) train_nn(train_2, 1) Y_train_root, Y_train_vowel, Y_train_consonant = make_labels(12) train_nn(train_2, 2) Y_train_root, Y_train_vowel, Y_train_consonant = make_labels(13) train_nn(train_2, 3) Y_train_root, Y_train_vowel, Y_train_consonant = make_labels(14) train_nn(train_2, 4) del train_2 print('train_3 is being processed') train_3 = pd.read_parquet('/kaggle/input/bengaliai-cv19/train_image_data_3.parquet') Y_train_root, Y_train_vowel, Y_train_consonant = make_labels(15) train_nn(train_3, 0) Y_train_root, Y_train_vowel, Y_train_consonant = make_labels(16) train_nn(train_3, 1) Y_train_root, Y_train_vowel, Y_train_consonant = make_labels(17) train_nn(train_3, 2) Y_train_root, Y_train_vowel, Y_train_consonant = make_labels(18) train_nn(train_3, 3) Y_train_root, Y_train_vowel, Y_train_consonant = make_labels(19) train_nn(train_3, 4) del train_3 torch.save(model.state_dict(), DEPLOYMENT_WEIGHTS)5. Evaluationdef plot_loss(epoch_losses): plt.style.use('seaborn-whitegrid') plt.figure() plt.plot(np.arange(0, N_EPOCHS * 20), epoch_losses) plt.title('Train Loss') plt.xlabel('Epoch #') plt.ylabel('Loss') plt.show() plot_loss(epoch_losses)6. Deployment#keras #prediction = model.predict(X_test) data_loader_test = data.DataLoader( X_test, batch_size=BATCH_SIZE, num_workers=N_WORKERS, shuffle=False ) model.load_state_dict(torch.load(DEPLOYMENT_WEIGHTS)) results_graph, results_vowel, results_conso = [], [], [] for epoch in range(N_EPOCHS): print('Epoch {}/{}'.format(epoch, N_EPOCHS - 1)) print('-' * 10) model.eval() for step, batch in enumerate(tqdm(data_loader_test)): inputs = batch.to(device, dtype=torch.float) out_graph, out_vowel, out_conso = model(inputs) out_graph = F.softmax(out_graph, dim=1).data.cpu().numpy().argmax(axis=1) out_vowel = F.softmax(out_vowel, dim=1).data.cpu().numpy().argmax(axis=1) out_conso = F.softmax(out_conso, dim=1).data.cpu().numpy().argmax(axis=1) results_graph.append(out_graph) results_vowel.append(out_vowel) results_conso.append(out_conso) results = [] results_graph = pd.DataFrame(results_graph) results_vowel = pd.DataFrame(results_vowel) results_conso = pd.DataFrame(results_conso) for arg in range(sample_submission.shape[0] // 3 ): results.append(np.argmax(np.bincount(results_conso.iloc[:, arg]))) results.append(np.argmax(np.bincount(results_graph.iloc[:, arg]))) results.append(np.argmax(np.bincount(results_vowel.iloc[:, arg]))) results7. Submission#torch submission = pd.concat([sample_submission.drop('target', axis=1), pd.Series(results)], names=['row_id', 'target'], axis=1) submission.rename(columns={0: 'target'}, inplace=True) submission # keras # for pred_index, value in enumerate(prediction): # for arg_index in range(3): # sample_submission['target'].iloc[pred_index+(3*arg_index)] = np.argmax(value, axis=1)[arg_index] submission.to_csv('submission.csv', index=False)Salary prediction, episode II: make it actually work (4 points)Your main task is to use some of the tricks you've learned on the network and analyze if you can improve __validation MAE__. Try __at least 3 options__ from the list below for a passing grade. Write a short report about what you have tried. More ideas = more bonus points. __Please be serious:__ " plot learning curves in MAE/epoch, compare models based on optimal performance, test one change at a time. You know the drill :)You can use either __pytorch__ or __tensorflow__ or any other framework (e.g. pure __keras__). Feel free to adapt the seminar code for your needs. For tensorflow version, consider `seminar_tf2.ipynb` as a starting point.import numpy as np import matplotlib.pyplot as plt import pandas as pd %matplotlib inlinePart II: Experimentingdata = pd.read_csv('data/Train_rev1.csv', index_col = None) data['Log1pSalary'] = np.log1p(data['SalaryNormalized']).astype('float32') text_columns = ["Title", "FullDescription"] categorical_columns = ["Category", "Company", "LocationNormalized", "ContractType", "ContractTime"] TARGET_COLUMN = "Log1pSalary" data[categorical_columns] = data[categorical_columns].fillna('NaN') # cast missing values to string "NaN" data.sample(3)Train-test split To be completely rigorous, let's first separate data into train and validation parts before proceeding to tokenization.from sklearn.model_selection import train_test_split data_train, data_val = train_test_split(data, test_size=0.2, random_state=42) data_train.index = range(len(data_train)) data_val.index = range(len(data_val)) data_train = data_train.copy() data_val = data_val.copy() print("Train size = ", len(data_train)) print("Validation size = ", len(data_val))Train size = 195814 Validation size = 48954Preprocessing text dataJust like last week, applying NLP to a problem begins from tokenization: splitting raw text into sequences of tokens (words, punctuation, etc).__Your task__ is to lowercase and tokenize all texts under `Title` and `FullDescription` columns. Store the tokenized data as a __space-separated__ string of tokens for performance reasons.It's okay to use nltk tokenizers. Assertions were designed for WordPunctTokenizer, slight deviations are okay.print("Raw text:") print(data["FullDescription"][2::100000]) import nltk tokenizer = nltk.tokenize.WordPunctTokenizer() preprocess = lambda text: ' '.join(tokenizer.tokenize(text.lower())) data_train['Title'] = data_train['Title'].astype(str).apply(preprocess) data_val['Title'] = data_val['Title'].astype(str).apply(preprocess) data_train['FullDescription'] = data_train['FullDescription'].astype(str).apply(preprocess) data_val['FullDescription'] = data_val['FullDescription'].astype(str).apply(preprocess) print("Tokenized:") print(data_train["FullDescription"][2::100000])Tokenized: 2 the opportunity my client is currently seeking... 100002 a principal railways systems engineer is requi... Name: FullDescription, dtype: objectNot all words are equally useful. Some of them are typos or rare words that are only present a few times. Let's count how many times is each word present in the data so that we can build a "white list" of known words.from collections import Counter token_counts = Counter() for text in data_train['Title'].values: token_counts.update(text.split()) for text in data_train['FullDescription'].values: token_counts.update(text.split()) min_count = 10 # tokens from token_counts keys that had at least min_count occurrences throughout the dataset tokens = sorted(t for t, c in token_counts.items() if c >= min_count) # Add a special tokens for unknown and empty words UNK, PAD = "UNK", "PAD" tokens = [UNK, PAD] + tokens print("Vocabulary size:", len(tokens)) assert type(tokens) == list assert 'me' in tokens assert UNK in tokens print("Correct!")Vocabulary size: 30715 Correct!__Task 1.2__ Build an inverse token index: a dictionary from token(string) to it's index in `tokens` (int)token_to_id = {t: i for i, t in enumerate(tokens)} assert isinstance(token_to_id, dict) assert len(token_to_id) == len(tokens) for tok in tokens: assert tokens[token_to_id[tok]] == tok print("Correct!")Correct!And finally, let's use the vocabulary you've built to map text lines into neural network-digestible matrices.UNK_IX, PAD_IX = map(token_to_id.get, [UNK, PAD]) def as_matrix(sequences, max_len=None): """ Convert a list of tokens into a matrix with padding """ if isinstance(sequences[0], str): sequences = list(map(str.split, sequences)) max_len = min(max(map(len, sequences)), max_len or float('inf')) matrix = np.full((len(sequences), max_len), np.int32(PAD_IX)) for i,seq in enumerate(sequences): row_ix = [token_to_id.get(word, UNK_IX) for word in seq[:max_len]] matrix[i, :len(row_ix)] = row_ix return matrixNow let's encode the categirical data we have.As usual, we shall use one-hot encoding for simplicity. Kudos if you implement more advanced encodings: tf-idf, pseudo-time-series, etc.from sklearn.feature_extraction import DictVectorizer # we only consider top-1k most frequent companies to minimize memory usage top_companies, top_counts = zip(*Counter(data_train['Company']).most_common(1000)) recognized_companies = set(top_companies) data_train["Company"] = data_train["Company"].apply(lambda comp: comp if comp in recognized_companies else "Other") categorical_vectorizer = DictVectorizer(dtype=np.float32, sparse=False) categorical_vectorizer.fit(data_train[categorical_columns].apply(dict, axis=1))The deep learning partOnce we've learned to tokenize the data, let's design a machine learning experiment.import torch def to_tensors(batch, device): batch_tensors = dict() for key, arr in batch.items(): if key in ["FullDescription", "Title"]: batch_tensors[key] = torch.tensor(arr, device=device, dtype=torch.int64) else: batch_tensors[key] = torch.tensor(arr, device=device) return batch_tensors def make_batch(data, max_len=None, word_dropout=0, device=torch.device('cpu')): """ Creates a keras-friendly dict from the batch data. :param word_dropout: replaces token index with UNK_IX with this probability :returns: a dict with {'title' : int64[batch, title_max_len] """ batch = {} batch["Title"] = as_matrix(data["Title"].values, max_len) batch["FullDescription"] = as_matrix(data["FullDescription"].values, max_len) batch['Categorical'] = categorical_vectorizer.transform(data[categorical_columns].apply(dict, axis=1)) if word_dropout != 0: batch["FullDescription"] = apply_word_dropout(batch["FullDescription"], 1. - word_dropout) if TARGET_COLUMN in data.columns: batch[TARGET_COLUMN] = data[TARGET_COLUMN].values return to_tensors(batch, device) def apply_word_dropout(matrix, keep_prop, replace_with=UNK_IX, pad_ix=PAD_IX,): dropout_mask = np.random.choice(2, np.shape(matrix), p=[keep_prop, 1 - keep_prop]) dropout_mask &= matrix != pad_ix return np.choose(dropout_mask, [matrix, np.full_like(matrix, replace_with)]) make_batch(data_train[:3], max_len=10) def iterate_minibatches(data, batch_size=256, shuffle=True, cycle=False, device=torch.device('cpu'), **kwargs): """ iterates minibatches of data in random order """ while True: indices = np.arange(len(data)) if shuffle: indices = np.random.permutation(indices) for start in range(0, len(indices), batch_size): batch = make_batch(data.iloc[indices[start : start + batch_size]], device=device, **kwargs) yield batch if not cycle: breakExperiments What's the benchmark?Before we proceed to experimenting, let's define what's the benchmark. Let's consider the architecture and the results obtained in `homework_part2.ipynb` as our benchmark. That is, after 5 epochs- Mean square error = 4.32724- Mean absolute error = 2.04903We'll see whether it will be possible to improve these results or not. A) CNN architectureIt is close to what we've done in the `homework_part2.ipynb`, but we will try some more stuff as suggested:All the tricks you know about dense and convolutional neural networks apply here as well.* Dropout. Nuff said.* Batch Norm. This time it's `nn.BatchNorm*`/`L.BatchNormalization`* Parallel convolution layers. The idea is that you apply several nn.Conv1d to the same embeddings and concatenate output channels.* More layers, more neurons, ya know...import torch import torch.nn as nn import torch.functional as F # We'll use a special helper module to squeeze dimensions class Squeezener(nn.Module): def forward(self, x): return x.squeeze() class SalaryPredictor(nn.Module): def __init__(self, n_tokens=len(tokens), n_cat_features=len(categorical_vectorizer.vocabulary_), hid_size=64): super().__init__() self.embedder = nn.Embedding(n_tokens, embedding_dim=hid_size) self.title_encoder = nn.Sequential( nn.Conv1d(hid_size, hid_size, kernel_size=2), nn.Dropout(p=0.25), nn.ReLU(), nn.AdaptiveMaxPool1d(output_size=1), Squeezener(), nn.Linear(hid_size, hid_size), nn.ReLU() ) self.description_encoder = nn.Sequential( nn.Conv1d(hid_size, hid_size, kernel_size=2), nn.Dropout(p=0.25), nn.ReLU(), nn.AdaptiveMaxPool1d(output_size=1), Squeezener(), nn.Linear(hid_size, hid_size), nn.ReLU() ) self.categorical_encoder = nn.Sequential( nn.Linear(n_cat_features, hid_size * 2), nn.ReLU(), nn.Linear(hid_size * 2, hid_size * 2), nn.ReLU() ) self.final_predictor = nn.Sequential( nn.Linear(hid_size * 4, hid_size), nn.ReLU(), nn.Linear(hid_size, 1) ) def forward(self, batch): title_embeddings = self.embedder(batch['Title']).permute(0,2,1) # print(title_embeddings.shape) title_features = self.title_encoder(title_embeddings) # print(title_features.shape) description_embeddings = self.embedder(batch['FullDescription']).permute(0,2,1) # print(description_embeddings.shape) description_features = self.description_encoder(description_embeddings) # print(description_features.shape) categorical_features = self.categorical_encoder(batch['Categorical']) features = torch.cat([title_features, description_features, categorical_features], 1) return self.final_predictor(features).squeeze() import tqdm BATCH_SIZE = 512 EPOCHS = 5 DEVICE = torch.device('cuda') def print_metrics(model, data, batch_size=BATCH_SIZE, name="", device=torch.device('cpu'), **kw): squared_error = abs_error = num_samples = 0.0 model.eval() with torch.no_grad(): for batch in iterate_minibatches(data, batch_size=batch_size, device = device, shuffle=False, **kw): batch_pred = model(batch) squared_error += torch.sum(torch.square(batch_pred - batch[TARGET_COLUMN])) abs_error += torch.sum(torch.abs(batch_pred - batch[TARGET_COLUMN])) num_samples += len(batch_pred) mse = squared_error.detach().cpu().numpy() / num_samples mae = abs_error.detach().cpu().numpy() / num_samples print("%s results:" % (name or "")) print("Mean square error: %.5f" % mse) print("Mean absolute error: %.5f" % mae) return mse, mae model = SalaryPredictor().to(DEVICE) criterion = nn.MSELoss(reduction='sum') optimizer = torch.optim.Adam(model.parameters(), lr=1e-4) for epoch in range(EPOCHS): print(f"epoch: {epoch}") model.train() for i, batch in tqdm.tqdm_notebook(enumerate( iterate_minibatches(data_train, batch_size=BATCH_SIZE, device=DEVICE)), total=len(data_train) // BATCH_SIZE ): pred = model(batch) loss = criterion(pred, batch[TARGET_COLUMN]) optimizer.zero_grad() loss.backward() optimizer.step() print_metrics(model, data_val, device=DEVICE)C) Fun with wordsIt's not always a good idea to train embeddings from scratch. Here's a few tricks:* Use a pre-trained embeddings from `gensim.downloader.load`. See last lecture.* Start with pre-trained embeddings, then fine-tune them with gradient descent. You may or may not download pre-trained embeddings from [here](http://nlp.stanford.edu/data/glove.6B.zip) and follow this [manual](https://keras.io/examples/nlp/pretrained_word_embeddings/) to initialize your Keras embedding layer with downloaded weights.* Use the same embedding matrix in title and desc vectorizer As suggested above, I will use `Wikipedia 2014 + Gigaword 5`. It was trained on a corpus of 6 billion tokens and contains a vocabulary of 400000 tokens.model = SalaryPredictor() batch = make_batch(data_train[:100]) criterion = nn.MSELoss() dummy_pred = model(batch) dummy_loss = criterion(dummy_pred, batch[TARGET_COLUMN]) assert dummy_pred.shape == torch.Size([100]) assert len(np.unique(dummy_pred.cpu().detach().numpy())) > 20, "model returns suspiciously few unique outputs. Check your initialization" assert dummy_loss.ndim == 0 and 0. <= dummy_loss <= 250., "make sure you minimize MSE" import torch import torch.nn as nn import torch.functional as FTo make things comparable, let's make changes in steps:1. Vector embedding dimension will be 50 (64 in a not-pretrained setting)2. Let's see the results without fine-tunning the embeddings3. Results with finetuning4. Use embeddings with 300 size# Code below is taken from # https://medium.com/@martinpella/how-to-use-pre-trained-word-embeddings-in-pytorch-71ca59249f76 words = [] idx = 0 word2idx = {} vectors = [] with open('glovedata/glove.6B/glove.6B.50d.txt', 'rb') as f: for l in f: line = l.decode().split() word = line[0] words.append(word) word2idx[word] = idx idx += 1 vect = np.array(line[1:]).astype(np.float) vectors.append(vect) glovedct = {w : vectors[word2idx[w]] for w in words}We must build a matrix of weights that will be loaded into the PyTorch embedding layer. Its shape will be equal to:```(dataset’s vocabulary length, word vectors dimension).```For each word in dataset’s vocabulary, we check if it is on GloVe’s vocabulary. If it do it, we load its pre-trained word vector. Otherwise, we initialize a random vector.matrix_len = len(tokens) weights_matrix = np.zeros((matrix_len, 50)) words_found = 0 for i, word in enumerate(tokens): try: weights_matrix[i] = glovedct[word] words_found += 1 except KeyError: weights_matrix[i] = np.random.normal(scale=0.6, size = (50,)) print(f'{100*words_found/len(tokens):.2f}% of tokens were found in Glove dataset')Architecture will be the same as in the baseline, the only difference is the use of pretrained word vectors with a different dimension.# We'll use a special helper module to squeeze dimensions class Squeezener(nn.Module): def forward(self, x): return x.squeeze() class SalaryPredictor(nn.Module): def __init__(self, weights_matrix, n_cat_features=len(categorical_vectorizer.vocabulary_), hid_size=50, freeze=True): super().__init__() self.embedder = nn.Embedding.from_pretrained( torch.FloatTensor(weights_matrix), freeze=freeze) self.title_encoder = nn.Sequential( nn.Conv1d(hid_size, hid_size, kernel_size=2), nn.Dropout(p=0.25), nn.ReLU(), nn.AdaptiveMaxPool1d(output_size=1), Squeezener(), nn.Linear(hid_size, hid_size), nn.ReLU() ) self.description_encoder = nn.Sequential( nn.Conv1d(hid_size, hid_size, kernel_size=2), nn.Dropout(p=0.25), nn.ReLU(), nn.AdaptiveMaxPool1d(output_size=1), Squeezener(), nn.Linear(hid_size, hid_size), nn.ReLU() ) self.categorical_encoder = nn.Sequential( nn.Linear(n_cat_features, hid_size * 2), nn.ReLU(), nn.Linear(hid_size * 2, hid_size * 2), nn.ReLU() ) self.final_predictor = nn.Sequential( nn.Linear(hid_size * 4, hid_size), nn.ReLU(), nn.Linear(hid_size, 1) ) def forward(self, batch): title_embeddings = self.embedder(batch['Title']).permute(0,2,1) # print(title_embeddings.shape) title_features = self.title_encoder(title_embeddings) # print(title_features.shape) description_embeddings = self.embedder(batch['FullDescription']).permute(0,2,1) # print(description_embeddings.shape) description_features = self.description_encoder(description_embeddings) # print(description_features.shape) categorical_features = self.categorical_encoder(batch['Categorical']) features = torch.cat([title_features, description_features, categorical_features], 1) return self.final_predictor(features).squeeze() model = SalaryPredictor(weights_matrix) batch = make_batch(data_train[:100]) criterion = nn.MSELoss() dummy_pred = model(batch) dummy_loss = criterion(dummy_pred, batch[TARGET_COLUMN]) assert dummy_pred.shape == torch.Size([100]) assert len(np.unique(dummy_pred.cpu().detach().numpy())) > 20, "model returns suspiciously few unique outputs. Check your initialization" assert dummy_loss.ndim == 0 and 0. <= dummy_loss <= 250., "make sure you minimize MSE" import tqdm BATCH_SIZE = 512 EPOCHS = 5 DEVICE = torch.device('cuda') def print_metrics(model, data, batch_size=BATCH_SIZE, name="", device=torch.device('cpu'), **kw): squared_error = abs_error = num_samples = 0.0 model.eval() with torch.no_grad(): for batch in iterate_minibatches(data, batch_size=batch_size, device = device, shuffle=False, **kw): batch_pred = model(batch) squared_error += torch.sum(torch.square(batch_pred - batch[TARGET_COLUMN])) abs_error += torch.sum(torch.abs(batch_pred - batch[TARGET_COLUMN])) num_samples += len(batch_pred) mse = squared_error.detach().cpu().numpy() / num_samples mae = abs_error.detach().cpu().numpy() / num_samples print("%s results:" % (name or "")) print("Mean square error: %.5f" % mse) print("Mean absolute error: %.5f" % mae) return mse, mae model = SalaryPredictor(weights_matrix, freeze=True).to(DEVICE) criterion = nn.MSELoss(reduction='sum') optimizer = torch.optim.Adam(model.parameters(), lr=1e-4) for epoch in range(EPOCHS): print(f"epoch: {epoch}") model.train() for i, batch in tqdm.tqdm_notebook(enumerate( iterate_minibatches(data_train, batch_size=BATCH_SIZE, device=DEVICE)), total=len(data_train) // BATCH_SIZE ): pred = model(batch) loss = criterion(pred, batch[TARGET_COLUMN]) optimizer.zero_grad() loss.backward() optimizer.step() print_metrics(model, data_val, device=DEVICE)Interesting! Notice the following thing: both `mean squared error` and `mean absolute error` are less than the pne in the benchamrk (`4.32724` and `2.04903`, correspondingly). But the interesting aspect is that by default `nn.Embedding()` layer in the benchmark is a trainable layer while `nn.Embedding.from_pretrained()` is not. It means that using pretrained vectors gives better results (at least when training for 5 epochs) even when we do not allow retraining.Let's see what happens when we allow retraining of the embeddings (setting `freeze=False`).model.embedder.weight.requires_grad model = SalaryPredictor(weights_matrix, freeze=False).to(DEVICE) criterion = nn.MSELoss(reduction='sum') optimizer = torch.optim.Adam(model.parameters(), lr=1e-4) for epoch in range(EPOCHS): print(f"epoch: {epoch}") model.train() for i, batch in tqdm.tqdm_notebook(enumerate( iterate_minibatches(data_train, batch_size=BATCH_SIZE, device=DEVICE)), total=len(data_train) // BATCH_SIZE ): pred = model(batch) loss = criterion(pred, batch[TARGET_COLUMN]) optimizer.zero_grad() loss.backward() optimizer.step() print_metrics(model, data_val, device=DEVICE) model.embedder.weight.requires_gradWe were able to get even better results, when allowing for retraining of the embeddings. However, such a result should be taken with caution because one cannot judge having tried just a few times + training only for 5 epochs. However, we can make a conclusion that using pretrained vectors improves the results. I am curious of what would happen if instead of 50 dimensions for the embeddings, we will use those with 300 dimensions? Repeat the previous steps and let's see:# Code below is taken from # https://medium.com/@martinpella/how-to-use-pre-trained-word-embeddings-in-pytorch-71ca59249f76 words = [] idx = 0 word2idx = {} vectors = [] with open('glovedata/glove.6B/glove.6B.300d.txt', 'rb') as f: for l in f: line = l.decode().split() word = line[0] words.append(word) word2idx[word] = idx idx += 1 vect = np.array(line[1:]).astype(np.float) vectors.append(vect) glovedct = {w : vectors[word2idx[w]] for w in words} matrix_len = len(tokens) weights_matrix = np.zeros((matrix_len, 300)) words_found = 0 for i, word in enumerate(tokens): try: weights_matrix[i] = glovedct[word] words_found += 1 except KeyError: weights_matrix[i] = np.random.normal(scale=0.6, size = (300,)) print(f'{100*words_found/len(tokens):.2f}% of token were found in Glove dataset') model = SalaryPredictor(weights_matrix, hid_size=300, freeze=False).to(DEVICE) criterion = nn.MSELoss(reduction='sum') optimizer = torch.optim.Adam(model.parameters(), lr=1e-4) for epoch in range(EPOCHS): print(f"epoch: {epoch}") model.train() for i, batch in tqdm.tqdm_notebook(enumerate( iterate_minibatches(data_train, batch_size=BATCH_SIZE, device=DEVICE)), total=len(data_train) // BATCH_SIZE ): pred = model(batch) loss = criterion(pred, batch[TARGET_COLUMN]) optimizer.zero_grad() loss.backward() optimizer.step() print_metrics(model, data_val, device=DEVICE)Quite unexpectedly the results are worse. It might be due to the fact that to train a larger network, one needs more epochs to train and adjusting the hyperparameters. E) Optimizing seriously* You don't necessarily need 100 epochs. Use early stopping. If you've never done this before, take a look at [early stopping callback(keras)](https://keras.io/callbacks/earlystopping) or in [pytorch(lightning)](https://pytorch-lightning.readthedocs.io/en/latest/early_stopping.html). * In short, train until you notice that validation * Maintain the best-on-validation snapshot via `model.save(file_name)` * Plotting learning curves is usually a good idea Good luck! And may the force be with you! ok, everyone talks about PyTorch Lightning, so it is a good opportunity to do the first dive.def iterate_minibatches(data, batch_size=256, shuffle=True, cycle=False, device=torch.device('cpu'), **kwargs): """ iterates minibatches of data in random order """ while True: indices = np.arange(len(data)) if shuffle: indices = np.random.permutation(indices) for start in range(0, len(indices), batch_size): batch = make_batch(data.iloc[indices[start : start + batch_size]], device=device, **kwargs) yield batch if not cycle: break BATCH_SIZE = 512 EPOCHS = 5 DEVICE = torch.device('cuda') import torch from torch.nn import functional as F from torch import nn from pytorch_lightning.core.lightning import LightningModule from pytorch_lightning import Trainer from pytorch_lightning.callbacks.early_stopping import EarlyStopping from pytorch_lightning.metrics import Metric # Code below is taken from # https://medium.com/@martinpella/how-to-use-pre-trained-word-embeddings-in-pytorch-71ca59249f76 words = [] idx = 0 word2idx = {} vectors = [] with open('glovedata/glove.6B/glove.6B.50d.txt', 'rb') as f: for l in f: line = l.decode().split() word = line[0] words.append(word) word2idx[word] = idx idx += 1 vect = np.array(line[1:]).astype(np.float) vectors.append(vect) glovedct = {w : vectors[word2idx[w]] for w in words} matrix_len = len(tokens) weights_matrix = np.zeros((matrix_len, 50)) words_found = 0 for i, word in enumerate(tokens): try: weights_matrix[i] = glovedct[word] words_found += 1 except KeyError: weights_matrix[i] = np.random.normal(scale=0.6, size = (50,)) class MyMSE(Metric): def __init__(self, compute_on_step=False, dist_sync_on_step=False): super().__init__(compute_on_step=compute_on_step, dist_sync_on_step=dist_sync_on_step) self.add_state("squared_error", default=torch.tensor(0.0), dist_reduce_fx="sum") self.add_state("total", default=torch.tensor(0), dist_reduce_fx="sum") def update(self, preds: torch.Tensor, target: torch.Tensor): self.squared_error += torch.sum(torch.square(preds - target)) self.total += target.numel() def compute(self): return self.squared_error.float()/ self.total class MyMAE(Metric): def __init__(self, compute_on_step=False, dist_sync_on_step=False): super().__init__(compute_on_step=compute_on_step, dist_sync_on_step=dist_sync_on_step) self.add_state("abs_error", default=torch.tensor(0.0), dist_reduce_fx="sum") self.add_state("total", default=torch.tensor(0), dist_reduce_fx="sum") def update(self, preds: torch.Tensor, target: torch.Tensor): self.abs_error += torch.sum(torch.abs(preds - target)) self.total += target.numel() def compute(self): return self.abs_error.float() / self.total # We'll use a special helper module to squeeze dimensions class Squeezener(nn.Module): def forward(self, x): return x.squeeze() class SalaryPredictor(LightningModule): def __init__(self, weights_matrix, n_cat_features=len(categorical_vectorizer.vocabulary_), hid_size=50, freeze=True): super().__init__() self.val_mse = MyMSE() self.val_mae = MyMAE() self.embedder = nn.Embedding.from_pretrained( torch.FloatTensor(weights_matrix), freeze=freeze) self.title_encoder = nn.Sequential( nn.Conv1d(hid_size, hid_size, kernel_size=2), nn.Dropout(p=0.25), nn.ReLU(), nn.AdaptiveMaxPool1d(output_size=1), Squeezener(), nn.Linear(hid_size, hid_size), nn.ReLU() ) self.description_encoder = nn.Sequential( nn.Conv1d(hid_size, hid_size, kernel_size=2), nn.Dropout(p=0.25), nn.ReLU(), nn.AdaptiveMaxPool1d(output_size=1), Squeezener(), nn.Linear(hid_size, hid_size), nn.ReLU() ) self.categorical_encoder = nn.Sequential( nn.Linear(n_cat_features, hid_size * 2), nn.ReLU(), nn.Linear(hid_size * 2, hid_size * 2), nn.ReLU() ) self.final_predictor = nn.Sequential( nn.Linear(hid_size * 4, hid_size), nn.ReLU(), nn.Linear(hid_size, 1) ) def forward(self, batch): title_embeddings = self.embedder(batch['Title']).permute(0,2,1) title_features = self.title_encoder(title_embeddings) description_embeddings = self.embedder(batch['FullDescription']).permute(0,2,1) description_features = self.description_encoder(description_embeddings) categorical_features = self.categorical_encoder(batch['Categorical']) features = torch.cat([title_features, description_features, categorical_features], 1) return self.final_predictor(features).squeeze() def training_step(self, batch, batch_idx): pred = self(batch) loss = criterion(pred, batch[TARGET_COLUMN]) self.log('loss', loss, prog_bar=False) return {'loss': loss} def validation_step(self, batch, batch_idx): pred = self(batch) loss = criterion(pred, batch[TARGET_COLUMN]) self.log('val_loss', loss, prog_bar=False) self.val_mse(pred, batch[TARGET_COLUMN]) self.val_mae(pred, batch[TARGET_COLUMN]) self.log('valid_mse', self.val_mse, on_step=False, on_epoch=True, prog_bar=True) self.log('valid_mae', self.val_mae, on_step=False, on_epoch=True, prog_bar=True) return { 'val_loss': loss, 'valid_mse': self.val_mse, 'valid_mae': self.val_mae} def configure_optimizers(self): optimizer = torch.optim.Adam(self.parameters(), lr=1e-4) return optimizer def train_dataloader(self): iterate_minibatches = IterateMiniBatches(data_train) return iterate_minibatches def val_dataloader(self): iterate_minibatches = IterateMiniBatches(data_val) return iterate_minibatches class IterateMiniBatches: def __init__(self, data, batch_size=512, shuffle=True): self.data = data self.batch_size = batch_size self.shuffle = shuffle def __iter__(self): data = self.data batch_size = self.batch_size shuffle = self.shuffle indices = np.arange(len(data)) if shuffle: indices = np.random.permutation(indices) for start in range(0, len(indices), batch_size): batch = make_batch(data.iloc[indices[start : start + batch_size]]) yield batch model = SalaryPredictor(weights_matrix) criterion = nn.MSELoss(reduction='sum') trainer = Trainer(gpus=1, max_epochs=20, callbacks=[EarlyStopping(monitor='valid_mae')]) trainer.fit(model) %load_ext tensorboard %tensorboard --logdir lightning_logs/The tensorboard extension is already loaded. To reload it, use: %reload_ext tensorboardA) CNN architectureIt is close to what we've done in the `homework_part2.ipynb`, but we will try some more stuff as suggested:All the tricks you know about dense and convolutional neural networks apply here as well.* Dropout. Nuff said.* Batch Norm. This time it's `nn.BatchNorm*`/`L.BatchNormalization`* Parallel convolution layers. The idea is that you apply several nn.Conv1d to the same embeddings and concatenate output channels.* More layers, more neurons, ya know... ArchitectureOur basic model consists of three branches:* Title encoder* Description encoder* Categorical features encoderWe will then feed all 3 branches into one common network that predicts salary.![scheme](https://github.com/yandexdataschool/nlp_course/raw/master/resources/w2_conv_arch.png) Part II: Experiments# < A whole lot of your code > - models, charts, analysisUse regex to process the feedbackThis is the baseline for self-feeding chatbotimport re import os import pandas as pd from google.colab import drive drive.mount('/gdrive') DATASET='/gdrive/My Drive/ParlAI/data/self_feeding' os.chdir(DATASET) os.listdir()def processed_feedback(file_name, file_dir): file_path = os.path.join(file_dir, file_name) df = pd.read_json(file_path, lines=True) response_list = df['response'].to_list() processed_responses = [] for idx, sentence in enumerate(response_list): match_choice = re.search('yes or no', sentence) if match_choice: processed_responses.append("yes") continue match_greeting = re.search('greeting|hi |hello', sentence) if match_greeting: processed_responses.append("hello, how are you?") continue match_job = re.search('job|career|living', sentence) if match_job: processed_responses.append("I am a teacher, what about you?") continue match_mess = re.search('messed up',sentence) if match_mess: processed_responses.append("Sorry, I am not good at this topic. Do you want to talk about anything else?") continue match_country = re.search('country',sentence) if match_mess: processed_responses.append("I am a robot, I don't live in any country") continue match_filler = re.search('you could|you should|said|saying|say|tell|told|admit|ask|answer|talk|yes|no', sentence) if match_filler: # processed_sentence = sentence processed_sentence = re.sub("you could have|you should have|you could|you should", '', sentence).strip() processed_sentence = re.sub("^.*said|^.*saying|^.*say|^.*tell |^.*told |^.*admit |^.*asked |^.*ask |^.*answer |^.*answered |^.*talked |^.*talk ", '', processed_sentence).strip() processed_sentence = re.sub("^.*about|^me |^of ", '', processed_sentence).strip() piceses = processed_sentence.split(' or ') processed_sentence = piceses[0].strip() for piece in piceses: piece = piece.strip() if len(piece) > len(processed_sentence): processed_sentence = piece # remove processed_sentence = re.sub("^if |^whether |^not ", '', processed_sentence).strip() # replace subject processed_sentence = re.sub("you ", 'i ', processed_sentence).strip() processed_sentence = re.sub("you are |i are ", 'i am ', processed_sentence).strip() processed_sentence = re.sub("your ", 'my ', processed_sentence).strip() processed_sentence = re.sub("you\'ve ", 'i\'ve ', processed_sentence).strip() processed_sentence = re.sub("you were | i were", 'i was', processed_sentence).strip() processed_sentence = re.sub("you're ", 'i\'m ', processed_sentence).strip() # Remove starting space and comma processed_sentence = re.sub("\“|\”", '', processed_sentence).strip() processed_sentence = processed_sentence.lstrip(':|,|\"|\'|-|.| ') processed_sentence = processed_sentence.rstrip('\"|\'| ') if len(processed_sentence) > 0: processed_responses.append(processed_sentence) else: # print(processed_sentence,"<<<<<<", sentence) processed_responses.append(sentence) # print(processed_sentence,"<<<<<<", sentence) s = processed_sentence + "<<<<<<" + sentence +'\n' fd_out.writelines(s) else: processed_responses.append(sentence) df['response'] = processed_responses file_name='regex_'+file_name print(file_name) df.to_json(file_name, orient='records',lines=True) # Process all feedback files # file_list = ['train_fb.txt', 'train_fb_a.txt', 'train_fb_b.txt', 'test_fb.txt', 'valid_fb.txt'] # file_dir = os.path.abspath(os.path.join(os.getcwd(), '..', 'self_feeding_bak')) file_list = ['train_fb.txt'] file_dir = os.getcwd() with open('processed_feedback.txt', 'w') as fd_out: for file_name in file_list: processed_feedback(file_name, file_dir) !wc -l train_fb.txt !wc -l processed_feedback.txt df = pd.read_json("train_fb.txt", lines=True) response_list = df['response'].to_list() # with open("regex_test.txt", 'r') as f_in, open('processed_feedback.txt', 'w') as fd_out: # lines = f_in.readlines() # processed_responses = [] # for sentence in lines: # sentence = sentence.strip() # match_choice = re.search('yes or no', sentence) # if match_choice: # processed_responses.append("yes") # continue # match_greeting = re.search('greeting|hi |hello', sentence) # if match_greeting: # processed_responses.append("Hello, how are you?") # continue # match_job = re.search('job|career|living', sentence) # if match_job: # processed_responses.append("I am a teacher, what about you?") # continue # match_mess = re.search('messed up',sentence) # if match_mess: # processed_responses.append("Sorry, I am not good at this topic. Do you want to talk about anything else?") # continue # match_country = re.search('country',sentence) # if match_mess: # processed_responses.append("I am a robot, I don't live in any country") # continue # match_filler = re.search('you could|you should|said|saying|say|tell|told|admit|ask|answer|talk|yes|no', sentence) # if match_filler: # # processed_sentence = sentence # processed_sentence = re.sub("you could have|you should have|you could|you should", '', sentence).strip() # processed_sentence = re.sub("^.*said|^.*saying|^.*say|^.*tell |^.*told |^.*admit |^.*asked |^.*ask |^.*answer |^.*answered |^.*talked |^.*talk |^.*that ", '', processed_sentence).strip() # processed_sentence = re.sub("^.*about|^me|that", '', processed_sentence).strip() # piceses = processed_sentence.split(' or ') # processed_sentence = piceses[0].strip() # for piece in piceses: # piece = piece.strip() # if len(piece) > len(processed_sentence): # processed_sentence = piece # # remove # processed_sentence = re.sub("^if|^whether|^not", '', processed_sentence).strip() # # replace subject # processed_sentence = re.sub("you are ", 'i am ', processed_sentence).strip() # processed_sentence = re.sub("your ", 'my ', processed_sentence).strip() # processed_sentence = re.sub("you\'ve ", 'i\'ve ', processed_sentence).strip() # processed_sentence = re.sub("you were", 'i was', processed_sentence).strip() # processed_sentence = re.sub("you ", 'i ', processed_sentence).strip() # processed_sentence = re.sub("you're ", 'i\'m ', processed_sentence).strip() # # Remove starting space and comma # processed_sentence = re.sub("\“|\”", '', processed_sentence).strip() # processed_sentence = processed_sentence.lstrip(':|,|\"|\'|-|.| ') # processed_sentence = processed_sentence.rstrip('\"|\'| ') # if len(processed_sentence) > 0: # processed_responses.append(processed_sentence) # else: # processed_responses.append(sentence) # else: # processed_responses.append(sentence) # for res in processed_responses: # fd_out.write(res+'\n')Week 10 Assignment Task1 Firstly, I create a csv file includinggiven information. Then I upload the file to Github.#set up the basic package import numpy as np import pandas as pd #check the data set and loaded the required file from own repository. url="https://raw.githubusercontent.com/steinszzh/DAV-5400/master/week10/Tidying%20and%20Transforming%20Data.csv" flight = pd.read_csv(url) #general check for the data flight.head()Task2 Q:how would you define a “single observation” for the data shown in the graphic?; How many key values are associated with each data value?;How many columns should your long format structure contain based on the information provided in thegraphic shown above?; What would the column headings for the long structure be?; etc. Based on the requirement, I would like to get rid of these none values firstly. Then I will rename the category to get airline and Ontime or delay column.flight.dropna(how='all',inplace=True) #check the result flight # rename the columns flight.rename( columns={'Unnamed: 0':'Airline','Unnamed: 1':'Ontime or delay'}, inplace=True ) flightAfter renaming the table, melt function is used to change it to long format.# Melting from wide to long format, first 2 columns are the id ids = ["Airline","Ontime or delay"] # the rest are values values = flight.columns[2:] flight = pd.melt(frame=flight,id_vars=ids, value_vars=values,var_name='City', value_name='Value') #check data flight.head() #NaN value of delayed was filled by using on time value flight['Airline'].fillna(method='ffill', inplace = True) #check data flightAs the result shows, there is ordered dataset now. Task3 Using your reshaped/transformed data, perform analysis to compare the arrival delays for the two airlines. Some questions you might choose to answer: For each city, which airline had the best on time performance?; Which airline had the best overall on time performance?, etc. Firstly, I need to extract data from previous data set. To get on time performance, I would like to get on time flight of each airline and the total values of flight amount to get percentage.# Extract total number of flights for ontime flight_ontime = flight[flight['Ontime or delay']=='on time'][['Airline','City','Value']] flight_ontime # create a frame to contain the total number of flights flight_ontimetotal = flight.groupby(['Airline','City']).sum()['Value'].to_frame() flight_ontimetotalThen use merge function to combine both two data sets.#merge two d merged_flight = pd.merge(flight_ontime, flight_ontimetotal, on=['Airline','City'],suffixes=(' ontime',' total')) merged_flightAfter getting these two values, we can start to compute the percentage of ontime.merged_flight['ontime_percentage'] = ((merged_flight['Value ontime'] * 100) / merged_flight['Value total']) merged_flight.head()As the data showed, the percentage column is generated and the sequence just required the reorder process.# sorted by the city sorted_flight = merged_flight.sort_values('City',ascending=True) sorted_flightFor each city, ALASKA airline has better performance than AM WEST. To compare the overall performance, we need to use get total on time flight and total flight for each airlines. Then compute to get each airline's overall performance.#get total ontime number of flights for 'ALASKA' Alaska_ontime = merged_flight[merged_flight['Airline']=='ALASKA'].sum()['Value ontime'] #get total number of flights for 'ALASKA' Alaska_total = merged_flight[merged_flight['Airline']=='ALASKA'].sum()['Value total'] #get total ontime number of flights for 'Amwest' Amwest_ontime = merged_flight[merged_flight['Airline']=='AM WEST'].sum()['Value ontime'] #get total number of flights for 'Amwest' Amwest_total = merged_flight[merged_flight['Airline']=='AM WEST'].sum()['Value total'] #get overall percentage of ontime flights for 'ALASKA' Alaska_perf=Alaska_ontime/Alaska_total Alaska_perf #get overall percentage of ontime flights for 'AM WEST' Amwest_perf=Amwest_ontime/Amwest_total Amwest_perfInterestingly, AM WEST has a better overall ontime performance even though AM WEST lose on every individual city. Also, Am West is 2% more higher than ALASKA overall performance. Task4 Finally, given your “tidy” long format structure, consider what, if any, changes you would make to the visual presentation of the data if you were then asked to transform your “long” data back into a “wide” format: would you mimic the structure of the graphic shown above? If not, how might you transform your “long” data to “wide” format to make its “wide” presentation easier to understand and work with? Provide an example of your recommendation. Only when I want to analyze the performance of a specific city, I may change it back. I would like to use pivot function to transform 'long' to 'wide' format. Then use unstack function to make it look nice.# create a pivot table by mimic the origianl format flight_pivot= pd.pivot_table(flight,values='Value',index=['Airline','Ontime or delay'],columns=['City']) flight_pivot #use unstack function to unstack wide flight_wide = pd.DataFrame(flight_pivot.unstack()) #get wide flight_wideImport ERA-5 JJA dataera_5_jun_06 = np.load('../../ERA_5_monthly_TLS_maps/june_2006_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_jun_07 = np.load('../../ERA_5_monthly_TLS_maps/june_2007_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_jun_08 = np.load('../../ERA_5_monthly_TLS_maps/june_2008_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_jun_09 = np.load('../../ERA_5_monthly_TLS_maps/june_2009_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_jun_10 = np.load('../../ERA_5_monthly_TLS_maps/june_2010_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_jun_11 = np.load('../../ERA_5_monthly_TLS_maps/june_2011_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_jun_12 = np.load('../../ERA_5_monthly_TLS_maps/june_2012_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_jun_13 = np.load('../../ERA_5_monthly_TLS_maps/june_2013_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_jun_14 = np.load('../../ERA_5_monthly_TLS_maps/june_2014_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_jun_15 = np.load('../../ERA_5_monthly_TLS_maps/june_2015_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_jun_16 = np.load('../../ERA_5_monthly_TLS_maps/june_2016_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_jun_17 = np.load('../../ERA_5_monthly_TLS_maps/june_2017_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_jun_18 = np.load('../../ERA_5_monthly_TLS_maps/june_2018_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_jun_19 = np.load('../../ERA_5_monthly_TLS_maps/june_2019_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_jun_20 = np.load('../../ERA_5_monthly_TLS_maps/june_2020_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_jul_06 = np.load('../../ERA_5_monthly_TLS_maps/july_2006_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_jul_07 = np.load('../../ERA_5_monthly_TLS_maps/july_2007_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_jul_08 = np.load('../../ERA_5_monthly_TLS_maps/july_2008_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_jul_09 = np.load('../../ERA_5_monthly_TLS_maps/july_2009_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_jul_10 = np.load('../../ERA_5_monthly_TLS_maps/july_2010_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_jul_11 = np.load('../../ERA_5_monthly_TLS_maps/july_2011_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_jul_12 = np.load('../../ERA_5_monthly_TLS_maps/july_2012_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_jul_13 = np.load('../../ERA_5_monthly_TLS_maps/july_2013_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_jul_14 = np.load('../../ERA_5_monthly_TLS_maps/july_2014_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_jul_15 = np.load('../../ERA_5_monthly_TLS_maps/july_2015_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_jul_16 = np.load('../../ERA_5_monthly_TLS_maps/july_2016_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_jul_17 = np.load('../../ERA_5_monthly_TLS_maps/july_2017_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_jul_18 = np.load('../../ERA_5_monthly_TLS_maps/july_2018_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_jul_19 = np.load('../../ERA_5_monthly_TLS_maps/july_2019_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_jul_20 = np.load('../../ERA_5_monthly_TLS_maps/july_2020_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_aug_06 = np.load('../../ERA_5_monthly_TLS_maps/august_2006_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_aug_07 = np.load('../../ERA_5_monthly_TLS_maps/august_2007_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_aug_08 = np.load('../../ERA_5_monthly_TLS_maps/august_2008_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_aug_09 = np.load('../../ERA_5_monthly_TLS_maps/august_2009_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_aug_10 = np.load('../../ERA_5_monthly_TLS_maps/august_2010_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_aug_11 = np.load('../../ERA_5_monthly_TLS_maps/august_2011_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_aug_12 = np.load('../../ERA_5_monthly_TLS_maps/august_2012_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_aug_13 = np.load('../../ERA_5_monthly_TLS_maps/august_2013_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_aug_14 = np.load('../../ERA_5_monthly_TLS_maps/august_2014_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_aug_15 = np.load('../../ERA_5_monthly_TLS_maps/august_2015_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_aug_16 = np.load('../../ERA_5_monthly_TLS_maps/august_2016_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_aug_17 = np.load('../../ERA_5_monthly_TLS_maps/august_2017_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_aug_18 = np.load('../../ERA_5_monthly_TLS_maps/august_2018_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_aug_19 = np.load('../../ERA_5_monthly_TLS_maps/august_2019_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_aug_20 = np.load('../../ERA_5_monthly_TLS_maps/august_2020_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era5_jja = np.concatenate([era_5_jun_06, era_5_jun_07, era_5_jun_08, era_5_jun_09, era_5_jun_10, era_5_jun_11, era_5_jun_12, era_5_jun_13, era_5_jun_14, era_5_jun_15, era_5_jun_16, era_5_jun_17, era_5_jun_18, era_5_jun_19, era_5_jun_20, era_5_jul_06, era_5_jul_07, era_5_jul_08, era_5_jul_09, era_5_jul_10, era_5_jul_11, era_5_jul_12, era_5_jul_13, era_5_jul_14, era_5_jul_15, era_5_jul_16, era_5_jul_17, era_5_jul_18, era_5_jul_19, era_5_jul_20, era_5_aug_06, era_5_aug_07, era_5_aug_08, era_5_aug_09, era_5_aug_10, era_5_aug_11, era_5_aug_12, era_5_aug_13, era_5_aug_14, era_5_aug_15, era_5_aug_16, era_5_aug_17, era_5_aug_18, era_5_aug_19, era_5_aug_20]) era_5_jja_df = pd.DataFrame(era5_jja, columns=['Day', 'Hour', 'Year', 'Lat', 'Lon', 'Temp']) daily_mean_removed = era5_tools.daily_mean_remover(era_5_jja_df) daily_mean_removed['hourbin'] = daily_mean_removed.Hour.map(era5_tools.to_bin_hour) sorted_dd = era5_tools.df_organizer(daily_mean_removed) cycles_in_boxes = era5_tools.diurnal_binner(sorted_dd) #np.save('JJA_ERA5_5x10_boxes_diurnal_cycles', cycles_in_boxes)A very hurried course in Python Derived from [SOCS2018-PYTHON-HANDSON](https://github.com/calde12/SOSC2018-PYTHON-HANDSON) by - [Classes definition](Classes) - [Class structure](Structure-of-a-class) - [Iterators and Iterables](Iterators,-Iterables) - [Generators](Generators)- [Working with dates](Working-with-dates) You remember this, right?l = [1, 2, 3] # a list d = {'z': 0, 'u': 1} # a dictionary s = {8, 9, 2, 5, 8, 9, 2, 5} # a set print(type(l), type(d), type(s))Classes **Classes** are objects that provide a means of bundling **data and functionality** together. Control flows, `with` statements, exception handling ect. is what *procedural programming* is made of.Classes are the foundation of what is called *Object Oriented Programming (OOP)*. This is a programming paradigm based on the concept of **"objects"**, which may contain **data, in the form of** fields, often known as **attributes**; and **code, in the form of** procedures, often known as **methods**. A class defines how the objects should be: their status and the actions that they can perform to create their status. To create a particular specimen of a certain class is said creating an **instance** of that class. Structure of a class Creating a new type of object```pythonclass ClassName(): def __init__(initial_arguments): ... def method_1(arguments): ...``` Creating an instance:`instance_name = ClassName(initial_arguments)` Let's see together how to create a classclass Student(): # creating the Student class # the __init__ method is 'special'. # In it we define the initial status that # new instances of this class will have. def __init__(self, name, age): # self is a protected keyword referring to the to-be-created instance. self.age = age self.name = name # age and name are ATTRIBUTES student = Student('', 33) # student is an instance of Student print(student.name, '---', student.age) # Adding new methods class Student(): # initial arguments can have DEFAULT values, as functions! def __init__(self, name, age=33): self.age = age self.name = name # by passing self, we give access to ALL the attributes of the instance! def upper_name(self): up_name = self.name.upper() return up_name student = Student("") big_name = student.upper_name() print(big_name)The `self` keyword is a placeholder for the class instance yet-to-be-created. That's why we add it in our methods, that will apply on an instance. When you create a class instance, and call the method with the syntax> `instance_name.method_name()` the class object fills the place required by the `self` argument. That's why in the example above, `upper_name()` was defined with a `self` argument but when we invoked it on *student*, we passed no arguments: *student* filled the `self`!# Methods that can CHANGE ATTRIBUTES class Student(): def __init__(self, name, age=33, passed_exams=0): self.age = age self.name = name self.passed_exams = passed_exams def upper_name(self): up_name = self.name.upper() return up_name def increase_exams(self, n): # this means we're adding the number n to the passed_exams attribute self.passed_exams += n print('Yeeeeah!') st = Student("") print(st.passed_exams) # note that we don't pass the self argument: # it is given by the instance itself! st.increase_exams(2) print(st.passed_exams) # TO DO: create a class Dog that accept this initial arguments: # name (string) and bark_length (int) with default value 1. # Add a method .barking that prints the string 'Wof!' with as many 'o' as the # number in bark_length. Add a method .increase_barking that accepts an int # and MODIFIES the bark_length. # Create an instance of dog and try your methods. # YOUR CODE HEREIterators, Iterables An **Iterator** is an object that represents a stream of data. More precisely, an objects that has the `__next__` method which returns the next item from the iterator or raises `StopIteration` exception if there are no further item. When you use a for loop, list comprehension or anything else that iterates over an object, in the background the `__next__` method is being called on an iterator.An **Iterable** is anything that is able to iterate. In practice, an object that has the `__iter__` method, which returns an iterator. To clarify, strings, lists, files, and dictionaries are all examples of iteables that return an iterator on themselves.Here an awesome article that explain the differences https://hackaday.com/2018/09/19/learn-to-loop-the-python-way-iterators-and-generators-explained/from math import gcd # define the iterator object class Multiple(): # as any class it supports the __init__ method def __init__(self, number, maximum=1000): self.number = number self.maximum = maximum self.counter = 0 # __iter__ method makes the class an iterable def __iter__(self): return self # __next__ method makes the class an iterator def __next__(self): self.counter += 1 value = self.number * self.counter if value > self.maximum: raise StopIteration return value # use it in a for loop for number in Multiple(79): print(number)**Why it is good to be able to write your own iterator?**Many programs have a need to iterate over a large list of generated data and iterators can work on the principle of **lazy evaluation**: as you loop over an iterator, values are generated as required. In many situations, the simple choice to use an iterator can markedly improve performance.# TO DO: create a class Dice whose __next__ method returns a random number # that ranges from 1 to 6. Then, create two instances of Dice and iter over # them until they draw the same number. # HINT: use randint (from the random library) to draw a random integer and # use zip to iter over the two iterables in a single for # YOUR CODE HEREGenerators You may have noticed that there is a fair amount of boilerplate code in the example above. **Generators** make it far easier to build your own iterators. There is no fussing aroung `__iter__` and `__next__`, and we don't have to keep track of an internal state or worry about raising exceptions.# generators are defined as a function def multiple_gen(number, maximum=1000): counter = 1 value = number * counter while value <= maximum: # yield is the keyword that makes the function a generator yield value counter += 1 value = number * counter # use it in a for loop for number in multiple_gen(79): print(number)`yield` keyword is similar to `return`, but instead of terminating the function, it simply pauses execution until another values is required.# TO DO: create a generator dice_gen that, in a for loop, has the same # behaviour of the Dice class. # YOUR CODE HEREI have created two very short functions. The first - frogJump - jumps the requisite number of lily pads (n)If it falls short it calls itself with the new target which is the previous target minus the pad reached. Otherwise it returns the number of attempts it took (i). Note that i must be set to 0 in the first call and is incremented by one in each subsequent call until the target is reached.Note that n is the number of the last lilypad, assuming the frog starts on pad 0The second function - simulateFrog - uses frogJump to calculate the average number of jumps to reach the target n. The average is calculated over s simulationsdef frogJump(n,i): r=randint(1,n) i+=1 if r==n: return i else: return frogJump(n-r,i) #This function returns the average number of jumps to reach target n in s attempts def simulateFrog(n,s): a=0 for i in range(s): a=a+frogJump(n,0) return a/sThe following code creates a list of results for average jumps to lily pads from 1 to rIn each case we are averaging over s simulationsr=10 s=1000000 tbFrog = [[0,0]]*r for pads in range(1,r): tbFrog[pads]=[pads,simulateFrog(pads,s)] tbFrogIt looks like this is the sum to n of the harmonic seriesThe harmonic series is 1 + 1/2 + 1/3 + 1/4 .... 1/nWe can check this for, say, n=9 (see below) and the result compares favourably with our first simulationn=9 s=0 for i in range(n): s=s+1/(i+1) print (s)2.8289682539682537Now we perform the same for 1 million pads. Below, the sum of the harmonic series is compared with the simulateFrog function (which is quick). Finally a formula for the sum of the harmonic series is used to find another answer for the sum to 1/1million. As you can see they are comparable.n=1000000 s=0 for i in range(n): s=s+1/(i+1) print (s) simulateFrog(1000000,1000000) #There is no simple sum(n) for the harmonic series. #The following formula is a good estimate. np.log is the natural log function from numpy n=1000000 np.log(n)+0.5772156649+1/(12*n)-1/(112*n*n)Helper Functions Getting the potential splitdef get_potential_splits(data,random_subspace): potential_splits = {} column_indices = list(range(data.shape[1]-1)) if random_subspace and random_subspace < data.shape[1]: column_indices = random.sample(population = column_indices,k = random_subspace) for column_index in column_indices : values =data[:,column_index] if FEATURE_TYPES[column_index] == 'Continious': unique_values = np.unique(values) potential_splits[column_index] = [] for i in range(len(unique_values)-1): current_value = unique_values[i] next_value = unique_values[i+1] potential_split = (current_value+next_value)/2 potential_splits[column_index].append(potential_split) else: potential_splits[column_index]=list(set(values)) return potential_splitsChecking type of featuresdef determine_type_of_feature(data): feature_types = [] threshold = 15 for column_index in range(data.shape[1]-1): unique_values = np.unique(data[:,column_index]) if(len(unique_values)<=threshold)or isinstance(unique_values[0],str): feature_types.append('Categorical') else: feature_types.append('Continious') return feature_typesSplit Functiondef split_data(data,split_column,split_value): values = data[:,split_column] type_of_feature = FEATURE_TYPES[split_column] if type_of_feature == 'Continious': data_above = data[values > split_value] data_below = data[values <= split_value] else: data_below = data[values == split_value] data_above = data[values != split_value] return data_below,data_aboveMetric Functions Gini Indexdef gini(data): label_column= data[:,-1] _,counts = np.unique(label_column,return_counts=True) p=counts/counts.sum() gini =1- np.dot(p,p) return giniEntropydef entropy(data): label_columns = data[:,-1] _,counts = np.unique(label_columns,return_counts= True) p = counts/counts.sum() entropy = sum(p*-np.log2(p)) return entropyOverall Metricdef overall_metric(data_below,data_above,metric_function): n=len(data_above)+len(data_below) p_data_below = len(data_below)/n p_data_above = len(data_above)/n overall_metric = p_data_above*metric_function(data_above) + p_data_below*metric_function(data_below) return overall_metricGetting the best splitdef get_best_split(data, potential_splits, metric_function = gini): first_iteration = True for column_index in potential_splits: for value in potential_splits[column_index]: data_below,data_above = split_data(data,split_column=column_index,split_value = value) current_metric = overall_metric(data_above,data_below,metric_function) if first_iteration: best_metric = current_metric first_iteration = False if current_metric <= best_metric : best_metric = current_metric best_column =column_index best_value = value return best_column,best_valueCheck Puritydef check_purity(data): label_columns = data[:,-1] if len(np.unique(label_columns))==1: return True else: return FalseCreating Leafdef create_leaf(data): label_columns = data[:,-1] unique_labels,counts = np.unique(label_columns,return_counts =True) index = counts.argmax() leaf = unique_labels[index] return leafTrain Test Splitdef train_test_split(df,split_ratio = 0.7,random_state=123): np.random.seed(random_state) indices = np.random.rand(len(df))Bootstrappingdef bootstrap(data,n_bootstrap): indices =np.random.randint(low=0,high=len(data),size=n_bootstrap) return data[indices]Decision Tree Algorithmdef decision_tree_algorithm(data,counter =0, max_depth =5,min_samples = 10,random_subspace=None,metric_function = gini): if counter == 0: global FEATURE_TYPES FEATURE_TYPES = determine_type_of_feature(data) if (check_purity(data)) or (counter == max_depth) or (len(data) < min_samples): return create_leaf(data) else: counter += 1 potential_splits = get_potential_splits(data, random_subspace) column_index,split_value = get_best_split(data, potential_splits, metric_function) data_below,data_above = split_data(data, column_index, split_value) if len(data_below)==0 or len(data_above)==0 : return create_leaf(data) type_of_feature = FEATURE_TYPES[column_index] #column_name = COLUMN_NAMES[column_index] if type_of_feature == 'Continious': question = "{} <= {}".format(column_index,split_value) else: question ="{} = {}".format(column_index,split_value) sub_tree={question:[]} yes_answer = decision_tree_algorithm(data_below, counter, max_depth, min_samples,random_subspace ,metric_function ) no_answer = decision_tree_algorithm(data_above, counter, max_depth, min_samples,random_subspace ,metric_function ) if yes_answer == no_answer: sub_tree =yes_answer else: sub_tree[question].append(yes_answer) sub_tree[question].append(no_answer) return sub_treeDecision Tree Classifierdef decision_tree_classifer(example,tree): question = list(tree.keys())[0] column_index,comparison_operator,value =question.split() column_index =int(column_index) if comparison_operator == "<=": if example[column_index] <= float(value): answer = tree[question][0] else: answer = tree[question][1] else: if str(example[column_index]) == value: answer = tree[question][0] else: answer = tree[question][1] if not isinstance(answer,dict): return answer else: residual_tree = answer return decision_tree_classifer(example, residual_tree)Random Forest Algorithmdef random_forest_algorithm(train_data, n_trees,max_depth = 5,min_samples =10,random_state = 123, n_features = 3, n_bootstrap=50,metric_function =gini): np.random.seed(random_state) forest = [] for i in range(n_trees): bootstrapped_data = bootstrap(train_data,n_bootstrap) tree = decision_tree_algorithm(data = bootstrapped_data, counter=0, random_subspace = n_features, max_depth = max_depth,metric_function=metric_function) forest.append(tree) return forestRandom Forest Classifierdef random_tree_classifier(example,forest): results =[] for index in range(len(forest)): result = decision_tree_classifer(example, forest[index] ) results.append(result) mode = max(set(results),key=results.count) return modeAccuracydef classify_data(test_df,forest): Predictions = test_df.apply(func = random_tree_classifier, axis = 1, raw=True,args=(forest,)) return Predictions def calculate_accuracy(labels,predictions): accuracy = np.array(labels == predictions).mean() return accuracyBenchmarking Importing the SKLearn Libraryfrom sklearn.ensemble import RandomForestClassifier from sklearn.datasets import make_classification from sklearn.metrics import accuracy_score from sklearn import ensembleLoading the datacolumns = ['variance', 'skewness', 'curtosis', 'entropy', 'class'] data = "https://archive.ics.uci.edu/ml/machine-learning-databases/00267/data_banknote_authentication.txt" banknote = pd.read_csv(data, names= columns) banknote.head()Train Test Splittrain_df ,test_df= train_test_split(banknote) print(train_df.shape) print(test_df.shape)(967, 5) (405, 5)Implementation of our model%%time forest=random_forest_algorithm(train_df.values,n_trees = 5,n_features=2,n_bootstrap=100,random_state =120) predictions = classify_data(test_df.iloc[:,:-1],forest) labels = test_df.iloc[:,-1] print("Accuracy is : {}".format(calculate_accuracy(predictions,labels)*100))Accuracy is : 94.32098765432099SKLearn ImplementationX_train = train_df.values[:,:-1] y_train = train_df.values[:,-1] X_test = test_df.values[:,:-1] y_test = test_df.values[:,-1] %%time clf_entropy = RandomForestClassifier( criterion = "entropy", random_state = 100, max_depth=3, min_samples_leaf=5) clf_entropy.fit(X_train,y_train) y_pred = clf_entropy.predict(X_test) print ("Accuracy is ", accuracy_score(y_test , y_pred)*100)Accuracy is 93.33333333333333OpenCV Tutorial Sample 8: ocv_dog_img[Sample 08](sample_08/ocv_dog_img.py) is a program that overlays a Digital On-Screen Graphic (DOG) or logo onto a still image. DOG is a form of digital watermarking routinely used on broadcast TV to show the TV channel logo. It can also be used on digital signage to watermark content. In previous samples, we have seen how to overlay text on images and video. This sample shows how to overlay and image on another image.The logo image (DOG) is usually a PNG file that is capable of preserving transparency information, in other words, the alpha channel.In the interactive tutorial, we will use matplotlib to display some of the intermediate results.First we start off with the usual initializations...#!/usr/bin/env python # Python 2/3 compatibility from __future__ import print_function # Allows use of print like a function in Python 2.x # Import OpenCV and other needed Python modules import numpy as np import cv2Next load the image to be watermarked. We will call this the source image. For illustrative purposes, we will display this image in a named window called "Source Image". Remember the window will remained grayed out until the event handler cv2.waitkey() is called.# Load the source image img = cv2.imread('Intel_Wall.jpg') # Create a named window to show the source image cv2.namedWindow('Source Image', cv2.WINDOW_NORMAL) # Display the source image cv2.imshow('Source Image',img)Next load the logo image with which the source image will be watermarked. A second named window called "Result Image" will help serve as a placeholder to handle intermediate outputs, resizing and the final image.# Load the logo image dog = cv2.imread('Intel_Logo.png') # Create a named window to handle intermediate outputs and resizing cv2.namedWindow('Result Image', cv2.WINDOW_NORMAL)The Logo image and source image are not te same size. So we need to first find the size of the logo. We do this using the numpy shape object.# To put logo on top-left corner, create a Region of Interest (ROI) rows,cols,channels = dog.shape roi = img[0:rows, 0:cols ] # Print out the dimensions of the logo... print(dog.shape)(270, 270, 3)Now convert the logo image to grayscale for faster processing... Only in the interactive tutorial, we will use matplotlib to display the result.# Convert the logo to grayscale dog_gray = cv2.cvtColor(dog,cv2.COLOR_BGR2GRAY) # The code below in this cell is only to display the intermediate result and not in the script from matplotlib import pyplot as plt plt.imshow(dog_gray) plt.show()Next create a mask and inverse mask of the logo image ...# Create a mask of the logo and its inverse mask ret, mask = cv2.threshold(dog_gray, 10, 255, cv2.THRESH_BINARY) mask_inv = cv2.bitwise_not(mask) # The code below in this cell is only to display the intermediate result and not in the script plt.imshow(mask_inv) plt.show()Now we blackout the logo within the ROI so that we can extract it from its background.# Now blackout the area of logo in ROI img_bg = cv2.bitwise_and(roi,roi,mask = mask_inv)Perform the extraction# Now just extract the logo dog_fg = cv2.bitwise_and(dog,dog,mask = mask) # The code below in this cell is only to display the intermediate result and not in the script plt.imshow(dog_fg) plt.show()Now we add the logo to the source image. We can use the OpenCV [cv2.add()](http://docs.opencv.org/3.0-last-rst/modules/core/doc/operations_on_arrays.htmlcv2.add) function.# Next add the logo to the source image dst = cv2.add(img_bg,dog_fg) img[0:rows, 0:cols ] = dstTime to display the result# Display the Result cv2.imshow('Result Image',img) # Wait until windows are dismissed cv2.waitKey(0)Now release all resources used# Release all resources used cv2.destroyAllWindows()Ranking AppStore - Bancos digitaisprint('Importando as bibliotecas necessárias...') # Importando as bibliotecas necessárias from bs4 import BeautifulSoup import urllib.request as urllib_request import pandas as pd import datetime import time from urllib.request import Request, urlopen from urllib.error import URLError, HTTPError import matplotlib.pyplot as plt # Setando os ids dos apps ids = {'Nubank': '814456780', 'Neon': '1127996388', 'C6': '1463463143', 'Inter': '839711154', 'Digio': '1128793569', 'Next': '1133682678', 'PAN': '1410400504'} # Coloque aqui o caminho do arquivo path = 'C:\\Users\\vinicius.oliveira\\Praticando_Web_Scrap\\data\\' df_rank = pd.read_csv(path + 'bank_rank_iOS.csv') # Definindo um dicionário vazio para armazenamento rank = {} # Setando a data e a hora now = datetime.datetime.now() ano = now.year mes = now.month dia = now.day horas = now.hour minutos = now.minute # Acertando alguns valores if len(str(mes)) == 1: mes = '0' + str(mes) if len(str(dia)) == 1: dia = '0' + str(dia) if len(str(horas)) == 1: horas = '0' + str(horas) if len(str(minutos)) == 1: minutos = '0' + str(minutos) data = f'{dia}/{mes}/{ano}' hora = f'{horas}:{minutos}' rank['Data'] = data rank['Hora'] = hora # Laço para baixar os dados do site da App Store for app in ids.keys(): # Definindo a url url = f'https://apps.apple.com/br/app/id{ids[app]}' headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36'} # Fazendo a requisição try: req = Request(url, headers = headers) response = urlopen(req) print(f'{app} --> {response.getcode()}. Foi...') html = response.read() # Capturando o HTTPError caso ocorra except HTTPError as e: print('\nHTTPError\n') print(e.status, e.reason) # Capturando o URLError caso ocorra except URLError as e: print('\nURLError\n') print(e.reason) # Decodificando o html html = html.decode('utf-8') # Função para tratar o html def trata_html(input): return " ".join(input.split()).replace('> <', '><') # Tratando o html html = trata_html(html) # Instanciando um objeto da classe BeautifulSoup soup = BeautifulSoup(html, 'html.parser') # Armazenando o rank do app rank['Categoria'] = str(soup.findAll('li', {'class': 'inline-list__item'})[0].getText().split(' em ')[1].strip()) rank[app] = int(str(soup.findAll('li', {'class': 'inline-list__item'})[0]).split('Nº ')[1].split(' em ')[0]) time.sleep(3) ########### Exportando o .csv ######################################################################################### print('Exportando o .csv...') # Armazenando os dados novos novos = pd.DataFrame([rank]) df_rank = pd.concat([df_rank, novos], ignore_index=True) # Exportando o .csv df_rank.to_csv(path + 'bank_rank_iOS.csv', index=False) rank novos df_rank = pd.read_csv(path + 'bank_rank_iOS.csv') df_rank cores = {'Nubank': '#8A06BD', 'Neon': '#00D8D8', 'C6': '#242424', 'Inter': '#FF7A01', 'Digio': '#002850', 'Next': '#01FF5E', 'PAN': '#00AFFF'} # Configurando as figura plt.figure(figsize=(20, 10)) apps = df_rank.iloc[:-1, 3:].mean().sort_values().index # Iterando em cada app for app in apps: # Inicializando um contador cont = 0 # Plotando o gráfico para cada app plt.plot(df_rank['Data'], df_rank[app], color=cores[app], label=app, linewidth=5) # Iternando em cada par (data, ranking) para cada app for x,y in zip(df_rank['Data'], df_rank[f'{app}']): # Condição para mudar as posições das anotações do app da Nubank if app == 'Nubank': # Setando as posições das anotações do app da Nubank y_pos = -5 x_pos = -5 # Condição para mudar as posições das anotações do app da Neon elif app == 'Neon': # Setando as posições das anotações do app da Neon y_pos = -5 x_pos = -5 # Condição para mudar as posições das anotações do app da C6 elif app == 'C6': # Setando as posições das anotações do app da C6 y_pos = -15 x_pos = 10 # Condição para mudar as posições das anotações do app da Inter elif app == 'Inter': # Setando as posições das anotações do app da Inter y_pos = 5 x_pos = 10 # Condição para mudar as posições das anotações do app da Digio elif app == 'Digio': # Setando as posições das anotações do app da Digio y_pos = -15 x_pos = 10 # Condição para mudar as posições das anotações do app da Next elif app == 'Next': # Setando as posições das anotações do app da Next y_pos = -15 x_pos = 10 # Condição para mudar as posições das anotações do app da PAN elif app == 'PAN': # Setando as posições das anotações do app da PAN y_pos = 5 x_pos = 10 # Condição para mostrar não mostrar todas as anotações if cont % 1 == 0 or cont == df_rank.shape[0] - 1: # label -> anotação que vai aparecer anotacao = f'{y}' # Plotando as anotações plt.annotate(anotacao, (x,y), # coordenada da anotação textcoords="offset points", # como posicionar a anotação xytext=(x_pos, y_pos), # distância da anotação para o seu respectivo ponto (x,y) ha='center', # alinhamento horizontal da anotação. Pode ser 'left', 'right' or 'center' fontsize=20, # tamanho da fonte da anotação color=cores[app]) # cor da anotação # Incrementanndo o contador cont += 1 limite = df_rank.iloc[:, 3:].max().max() + 5 # Invertendo o eixo y plt.ylim(limite, -1) # Colocando grid plt.grid(True) # Definindo o título plt.title('Ranking AppStore - Bancos digitais', fontsize=30) # Definindo a legenda plt.legend(labels=apps, fontsize=18, loc=(0.01, 0.01)) # Definindo o label do eixo y plt.ylabel('Ranking', fontsize=25) # Definindo o label do eixo x plt.xlabel('Dia', fontsize=25) # Formatando os ticks plt.xticks(fontsize=16, rotation=20) plt.yticks(fontsize=16) # Salvando a figura plt.savefig('images\\Ranking_AppStore-Bancos_digitais.png') # Mostrando a figura plt.show()Fast-Free Time-Resolved Electrostatic Force Microscopy (FF-trEFM) , Ph.D. Department of Chemistry University of Washington Box 351700 Seattle, Washington, USA 98195 This notebook walks through the basics of processing FFtrEFM data as acquired from the Gage instrument and processing into a Pycroscopy-compatible H5 file. Note that you can, of course, skip Pycroscopy entirely should you wish, but the Pycroscopy package is more broadly compatible with future updates. It also is convenient in saving your data in a single file; the most time-consuming element of processing FFtrFEM data (or microscopy data in general) is often just loading the data. ------------ Imports. You may need to install pycroscopy and pyUSID. See the instructions here: https://pycroscopy.github.io/pycroscopy/about.html https://pycroscopy.github.io/USID/about.html The import will likely bring up a FutureWarning. This effect is from some outdated functions (VirtualDataGroup) in ffta. As of 7/18/2019 I have caught most of these and updated, but not all of them.import ffta import pycroscopy as px import pyUSID as usid import numpy as np from matplotlib import pyplot as plt------------ Source files to processThe .IBW file is your image file. Typically you have a multi-layered IBW with the topography+associated channels saved. The code converts those channels into Pycroscopy. As of now it's hard-coded to work on data from the Ginger lab, but that is easy enough to fix going forward. The FF folder is the folder path containing the raw deflection data. For those unfamiliar, the r' is to accomodate Windows paths.# input file paths manually here: # Here we will use some example data to better demonstrate the process ibw_file = r'E:/Data/20190107 - BAPI trEFM/FF2_128_7V.ibw' ff_folder = r'E:\Data\20190107 - BAPI trEFM\FF02_128x64_455nm_7V_400mA'------------ Start processing!This command will then convert your data into an HDF5 (.h5) file on your local disk. It will be autosaved in the folder with the IBW. This process takes awhile, probably about 5-10 minutes depending upon your computer. The actual process is:* convert the IBW file into a H5 file with separate folders for each image layer.* convert each invididual deflection trace into a Pycroscopy-compatible data file. Save in folder FF_Raw within the structure.* average the deflection traces based on number of signals acquired per pixel (typically 30-60), saved FF_Averaged. In future versions, the averaging will be done in the XOP file itself, so that step will be eliminated. Working off the averaged data is typically faster anyway. The net result is this H5 file has two sets of the data within it, in case some particular deflection traces are brokenIn principle, once this process has completed you no longer need the IBW files as the data are within the .H5 file. You should keep a backup on the associated storage of your choosing, but locally that is safe unless you need to reprocess the data.Some parameters to note:> "verbose = False" to avoid the forty million printed lines. > "average = True" averages the data based on number of samples per pixel, then stores that data. This process shrinks the H5 considerably, and it speeds up processing time. In a typically oscilloscope, this process is done prior to displaying data. > "mirror = True" (default). For conventional FF-trEFM, the data are acquired on the retrace, meaning as saved they are backwards from the topography. To make them sensibly line up, this mirror parameter is the default case.h5_path, parm_dict, h5_avg = ffta.hdf_utils.load_hdf.loadHDF5_ibw(ibw_file_path=ibw_file, ff_file_path=ff_folder, verbose=True, average=True)------------ Using PCA to clean up the dataHere's an example of why Pycroscopy is useful. Here we use SVD to visualize the principal components. We can then filter all the noisy components when reconstructing the data. The vast majority of data in FFtrEFM tends to be within the first ~10 components, give or take. This SVD is processing h5_avg, which is the "averaged" data set where each pixel is the average of the acquired signals. You rarely need to work off the raw data.Note that the visualization doesn't quite work in Jupyter notebooks, but this works in Spyder or equivalent. The second block below fixes that.# SVD h5_svd = ffta.analysis.svd.FF_SVD(h5_avg) # This block is only necessary in Jupyter notebooks. # Plot abundance maps correctly h5_U = h5_svd['U'] h5_V = h5_svd['V'] h5_S = h5_svd['S'] num_rows = parm_dict['num_rows'] num_cols = parm_dict['num_cols'] abun_maps = np.reshape(h5_U[:,:25], (num_rows, num_cols,-1)) eigen_vecs = h5_V[:16, :] num_abun_rows = 4 num_abun_cols = 4 fig, a = plt.subplots(nrows=num_abun_rows, ncols=num_abun_cols, figsize=(10,10)) tt = np.transpose(abun_maps, (2, 0, 1)) # The pycroscopy plot wrapper doesn't work in Jupyter for r in range(num_abun_rows): for c in range(num_abun_cols): a[r][c].imshow(tt[:][:][r*num_abun_rows+c], cmap='inferno') # Rebuild clean_components = [0,1,2,3,4] # change based on SVD h5_rb = ffta.analysis.svd.FF_SVD_filter(h5_avg, clean_components)Reconstructing in batches of 1134 positions. Batchs should be 346.0909652709961 Mb each. Completed reconstruction of data from SVD results. Writing to file. Done writing reconstructed data to file.------------ Actual FF processing!At this point we have: an H5 file with the raw and averaged data. a processed set of the deflection data from PCA So, we now use the ffta package to reconstruct the image. The live imaging below will not work in Jupyter. But, nonetheless this function is operating. It returns 4 new variables as images (in (N, M) shape, rather than Pycroscopy form of (N x M, 2): tfp : the time-to-first-peak data, in time. This is usually the image of interest shift : the frequency shift data, which is mostly equivalent to standard EFM inst_freq : the actual instantaneous frequency data at each pixel. h5_if : the Pycroscopy H5 reference to the processed image file.# The actual FF-trEFM processing. # Note that "live" plotting does not currently work in Jupyter... tfp_rb, shift_rb, inst_freq_rb, h5_if = ffta.hdf_utils.analyze_h5.process(h5_svd.file, ref=h5_rb.name) # Save to CSV files ffta.hdf_utils.analyze_h5.save_CSV_from_file(h5_if.file, h5_if.parent.name) # You should make sure to close the file explicitly to preserve the data h5_svd.file.close()def my_loss(y_pred,y_true,weights): if len(y_pred.shape)==len(y_true.shape): window=(y_true>=0).to(torch.float) loss = (F.binary_cross_entropy_with_logits(y_pred,y_true,reduction='none')*window*weights.expand_as(y_true)).mean()/(window.mean()+1e-7) else: window0=(y_true[...,0]>=0).to(torch.float) window1=(y_true[...,1]>=0).to(torch.float) loss0 = F.binary_cross_entropy_with_logits(y_pred,y_true[...,0],reduction='none')*window0*weights.expand_as(y_true[...,0])/(window0.mean()+1e-7) loss1 = F.binary_cross_entropy_with_logits(y_pred,y_true[...,1],reduction='none')*window1*weights.expand_as(y_true[...,1])/(window1.mean()+1e-7) loss = (y_true[...,2]*loss0+(1.0-y_true[...,2])*loss1).mean() return lossclass Metric(): def __init__(self,weights,k=0.03): self.weights=weights self.k=k self.zero() def zero(self): self.loss_sum=0. self.loss_count=0. self.lossf=0. def calc(self,y_pred,y_true,prefix=""): window=(y_true>=0).to(torch.float) loss = (F.binary_cross_entropy_with_logits(y_pred,y_true,reduction='none')*window*self.weights.expand_as(y_true)).mean()/(window.mean()+1e-5) self.lossf=self.lossf*(1-self.k)+loss*self.k self.loss_sum=self.loss_sum+loss*window.sum() self.loss_count=self.loss_count+window.sum() return({prefix+'mloss':self.lossf}) def calc_sums(self,prefix=""): return({prefix+'mloss_tot':self.loss_sum/self.loss_count}) #features=(features-features.mean())/features.std() class SimpleModel(nn.Module): def __init__(self,in_size): super(SimpleModel, self).__init__() self.dont_do_grad=[] self.conv2d1=torch.nn.Conv2d(1, 128, (7,in_size), padding=(3,0)) self.bn0=torch.nn.BatchNorm1d(128) self.relu0=torch.nn.ReLU() self.conv1d1=torch.nn.Conv1d(128, 128, 5, padding=2) self.bn1=torch.nn.BatchNorm1d(128) self.relu1=torch.nn.ReLU() self.conv1d2=torch.nn.Conv1d(128, 64, 3, padding=1) self.bn2=torch.nn.BatchNorm1d(64) self.relu2=torch.nn.ReLU() self.conv1d3=torch.nn.Conv1d(64, 6, 3, padding=1) def forward(self, x): x = self.conv2d1(x.unsqueeze(1)).squeeze(-1) x = self.bn0(x) x = self.relu0(x) x = self.conv1d1(x) x = self.bn1(x) x = self.relu1(x) x = self.conv1d2(x) x = self.bn2(x) x = self.relu2(x) out = self.conv1d3(x).transpose(-1,-2) return out def no_grad(self): for param in self.parameters(): param.requires_grad=False def do_grad(self): for n,p in self.named_parameters(): p.requires_grad= not any(nd in n for nd in self.dont_do_grad) class SimpleModel2(nn.Module): def __init__(self,in_size): super(SimpleModel2, self).__init__() self.dont_do_grad=[] self.conv2d1=torch.nn.Conv2d(1, 128, (9,in_size), padding=(4,0)) self.bn0=torch.nn.BatchNorm1d(128) self.relu0=torch.nn.ReLU() self.conv1d1=torch.nn.Conv1d(128, 128, 7, padding=3) self.bn1=torch.nn.BatchNorm1d(128) self.relu1=torch.nn.ReLU() self.conv1d2=torch.nn.Conv1d(128, 64, 5, padding=2) self.bn2=torch.nn.BatchNorm1d(64) self.relu2=torch.nn.ReLU() self.conv1d3=torch.nn.Conv1d(64, 6, 3, padding=1) def forward(self, x): x = self.conv2d1(x.unsqueeze(1)).squeeze(-1) x = self.bn0(x) x = self.relu0(x) x = self.conv1d1(x) x = self.bn1(x) x = self.relu1(x) x = self.conv1d2(x) x = self.bn2(x) x = self.relu2(x) out = self.conv1d3(x).transpose(-1,-2) return out def no_grad(self): for param in self.parameters(): param.requires_grad=False def do_grad(self): for n,p in self.named_parameters(): p.requires_grad= not any(nd in n for nd in self.dont_do_grad) class ClassModel(nn.Module): def __init__(self,in_size): super(ClassModel, self).__init__() self.dont_do_grad=[] self.conv2d1=torch.nn.Conv2d(1, 128, (9,in_size), padding=(4,0)) self.bn0=torch.nn.BatchNorm1d(128) self.relu0=torch.nn.ReLU() self.conv1d1=torch.nn.Conv1d(128, 128, 7, padding=3) self.bn1=torch.nn.BatchNorm1d(128) self.relu1=torch.nn.ReLU() self.conv1d2=torch.nn.Conv1d(128, 64, 5, padding=2) self.bn2=torch.nn.BatchNorm1d(64) self.relu2=torch.nn.ReLU() self.conv1d3=torch.nn.Conv1d(128, 6, 3, padding=1) self.conv2d1class=torch.nn.Conv2d(1, 128, (9,in_size), padding=(4,0)) self.bn0class=torch.nn.BatchNorm1d(128) self.maxpool1class=torch.nn.MaxPool1d(3) self.conv1d1class=torch.nn.Conv1d(128, 128, 3, padding=1) self.bn1class=torch.nn.BatchNorm1d(128) self.maxpool2class=torch.nn.MaxPool1d(3) self.conv1d2class=torch.nn.Conv1d(128, 64, 2, padding=1) self.bn2class=torch.nn.BatchNorm1d(64) def forward(self, x): z=x x = self.conv2d1(x.unsqueeze(1)).squeeze(-1) x = self.bn0(x) x = self.relu0(x) x = self.conv1d1(x) x = self.bn1(x) x = self.relu1(x) x = self.conv1d2(x) x = self.bn2(x) x = self.relu2(x) z=self.conv2d1class(z.unsqueeze(1)).squeeze(-1) z=self.bn0class(z) z=self.maxpool1class(z) z=self.conv1d1class(z) z=self.maxpool2class(z) z=self.conv1d2class(z) z=self.bn2class(z) z=F.max_pool1d(z,kernel_size=z.shape[-1]) z=z.expand_as(x) x=torch.cat([x,z],1) out = self.conv1d3(x).transpose(-1,-2) return out def no_grad(self): for param in self.parameters(): param.requires_grad=False def do_grad(self): for n,p in self.named_parameters(): p.requires_grad= not any(nd in n for nd in self.dont_do_grad) class ResModel(nn.Module): def __init__(self,in_size): super(ResModel, self).__init__() self.dont_do_grad=[] self.conv2d1=torch.nn.Conv2d(1, 64, (9,in_size), padding=(4,0)) self.bn0=torch.nn.BatchNorm1d(64) self.relu0=torch.nn.ReLU() self.conv1d1=torch.nn.Conv1d(64, 64, 7, padding=3) self.bn1=torch.nn.BatchNorm1d(64) self.relu1=torch.nn.ReLU() self.conv1d2=torch.nn.Conv1d(128, 64, 5, padding=2) self.bn2=torch.nn.BatchNorm1d(64) self.relu2=torch.nn.ReLU() self.conv1d3=torch.nn.Conv1d(192, 6, 3, padding=1) def forward(self, x): x=x.unsqueeze(1) x = self.conv2d1(x).squeeze(-1) x = self.bn0(x) x0 = self.relu0(x) x = self.conv1d1(x0) x = self.bn1(x) x1 = self.relu1(x) x = torch.cat([x0,x1],1) x = self.conv1d2(x) x = self.bn2(x) x2 = self.relu2(x) x = torch.cat([x0,x1,x2],1) out = self.conv1d3(x).transpose(-1,-2) return out def no_grad(self): for param in self.parameters(): param.requires_grad=False def do_grad(self): for n,p in self.named_parameters(): p.requires_grad= not any(nd in n for nd in self.dont_do_grad) class ResModelPool(nn.Module): def __init__(self,in_size): super(ResModelPool, self).__init__() self.dont_do_grad=[] self.conv2d1=torch.nn.Conv2d(1, 64, (9,in_size),stride=(1,in_size), padding=(4,0)) self.bn0=torch.nn.BatchNorm1d(64) # self.relu0=torch.nn.ReLU() self.conv1d1=torch.nn.Conv1d(64, 64, 7, padding=3) self.bn1=torch.nn.BatchNorm1d(64) self.relu1=torch.nn.ReLU() self.conv1d2=torch.nn.Conv1d(128, 64, 5, padding=2) self.bn2=torch.nn.BatchNorm1d(64) self.relu2=torch.nn.ReLU() self.conv1d3=torch.nn.Conv1d(192, 6, 3, padding=1) def forward(self, x): x=x.unsqueeze(1) x = self.conv2d1(x) x=F.max_pool2d(x,kernel_size=(1,x.shape[-1])).squeeze(-1) x0 = self.bn0(x) # x0 = self.relu0(x) x = self.conv1d1(x0) x = self.bn1(x) x1 = self.relu1(x) x = torch.cat([x0,x1],1) x = self.conv1d2(x) x = self.bn2(x) x2 = self.relu2(x) x = torch.cat([x0,x1,x2],1) out = self.conv1d3(x).transpose(-1,-2) return out def no_grad(self): for param in self.parameters(): param.requires_grad=False def do_grad(self): for n,p in self.named_parameters(): p.requires_grad= not any(nd in n for nd in self.dont_do_grad) class ResDropModel(nn.Module): def __init__(self,in_size,dropout=0.2): super(ResDropModel, self).__init__() self.dont_do_grad=[] self.conv2d1=torch.nn.Conv2d(1, 64, (9,in_size), padding=(4,0)) self.bn0=torch.nn.BatchNorm1d(64) self.relu0=torch.nn.ReLU() self.conv1d1=torch.nn.Conv1d(64, 64, 7, padding=3) self.bn1=torch.nn.BatchNorm1d(64) self.relu1=torch.nn.ReLU() self.conv1d2=torch.nn.Conv1d(128, 64, 5, padding=2) self.bn2=torch.nn.BatchNorm1d(64) self.relu2=torch.nn.ReLU() self.conv1d3=torch.nn.Conv1d(192, 6, 3, padding=1) self.dropout=dropout def forward(self, x): x = self.conv2d1(x.unsqueeze(1)).squeeze(-1) x = self.bn0(x) x = F.dropout(x,self.dropout) x0 = self.relu0(x) x = self.conv1d1(x0) # x = self.bn1(x) x = F.dropout(x,self.dropout) x1 = self.relu1(x) x = torch.cat([x0,x1],1) x = self.conv1d2(x) # x = self.bn2(x) x = F.dropout(x,self.dropout) x2 = self.relu2(x) x = torch.cat([x0,x1,x2],1) x = F.dropout(x,self.dropout) out = self.conv1d3(x).transpose(-1,-2) return out def no_grad(self): for param in self.parameters(): param.requires_grad=False def do_grad(self): for n,p in self.named_parameters(): p.requires_grad= not any(nd in n for nd in self.dont_do_grad) fn = partial(torch.clamp,min=0,max=1) class GenReLU(nn.Module): def __init__(self,leak=0,add=0,clamp=None): super(GenReLU, self).__init__() self.leak,self.add=leak,add if isinstance(clamp,tuple): self.clamp = partial(torch.clamp,min=clamp[0],max=clamp[1]) elif clamp: self.clamp = partial(torch.clamp,min=-clamp,max=clamp) else: self.clamp=None def forward(self,x): x = F.leaky_relu(x,self.leak) if self.add: x=x+self.add if self.clamp: x = self.clamp(x) return x class ResModelIn(nn.Module): def __init__(self,in_size): super(ResModelIn, self).__init__() self.dont_do_grad=[] self.conv2d1=torch.nn.Conv2d(1, 64, (9,in_size), padding=(4,0)) self.bn0=torch.nn.BatchNorm1d(64) self.relu0=torch.nn.ReLU() self.conv1d1=torch.nn.Conv1d(64, 64, 3, padding=1) self.bn1=torch.nn.BatchNorm1d(64) self.relu1=torch.nn.ReLU() self.conv1d2=torch.nn.Conv1d(128, 64, 3, padding=1) self.bn2=torch.nn.BatchNorm1d(64) self.relu2=torch.nn.ReLU() self.conv1d3=torch.nn.Conv1d(192, 6, 3, padding=1) def forward(self, x): x = x.unsqueeze(1) x = self.conv2d1(x).squeeze(-1) x = self.bn0(x) x0 = self.relu0(x) x = self.conv1d1(x0) x = self.bn1(x) x1 = self.relu1(x) x = torch.cat([x0,x1],1) x = self.conv1d2(x) x = self.bn2(x) x2 = self.relu2(x) x = torch.cat([x0,x1,x2],1) x = self.conv1d3(x) out = x.transpose(-1,-2) return out def no_grad(self): for param in self.parameters(): param.requires_grad=False def do_grad(self): for n,p in self.named_parameters(): p.requires_grad= not any(nd in n for nd in self.dont_do_grad) class ResModelInR(nn.Module): def __init__(self,in_size): super(ResModelInR, self).__init__() self.dont_do_grad=[] self.conv2d1=torch.nn.Conv2d(1, 64, (9,in_size), padding=(4,0)) self.bn0=torch.nn.BatchNorm1d(64) self.relu0= GenReLU(leak=0.01,add=-0.3,clamp=(-1,3)) self.conv1d1=torch.nn.Conv1d(64, 64, 3, padding=1) self.bn1=torch.nn.BatchNorm1d(64) self.relu1=GenReLU(leak=0.01,add=-0.3,clamp=(-1,3)) self.conv1d2=torch.nn.Conv1d(128, 64, 3, padding=1) self.bn2=torch.nn.BatchNorm1d(64) self.relu2=GenReLU(leak=0,add=0,clamp=(0,2)) self.conv1d3=torch.nn.Conv1d(192, 6, 3, padding=1) def forward(self, x): x = x.unsqueeze(1) x = self.conv2d1(x).squeeze(-1) x = self.bn0(x) x0 = self.relu0(x) x = self.conv1d1(x0) x = self.bn1(x) x1 = self.relu1(x) x = torch.cat([x0,x1],1) x = self.conv1d2(x) x = self.bn2(x) x2 = self.relu2(x) x = torch.cat([x0,x1,x2],1) x = self.conv1d3(x) out = x.transpose(-1,-2) return out def no_grad(self): for param in self.parameters(): param.requires_grad=False def do_grad(self): for n,p in self.named_parameters(): p.requires_grad= not any(nd in n for nd in self.dont_do_grad) class BaseModel(nn.Module): def __init__(self): super(BaseModel, self).__init__() self.dont_do_grad=[] self.conv2d1=torch.nn.Conv2d(1, 128, (5,2208), padding=(2,0)) self.relu0=torch.nn.ReLU() self.conv1d1=torch.nn.Conv1d(128, 6, 1) def forward(self, x): x = self.conv2d1(x.unsqueeze(1)).squeeze(-1) x = self.relu0(x) out = self.conv1d1(x).transpose(-1,-2) return out def no_grad(self): for param in self.parameters(): param.requires_grad=False def do_grad(self): for n,p in self.named_parameters(): p.requires_grad= not any(nd in n for nd in self.dont_do_grad) %matplotlib nbagg for num_split in range(3): multi=3 model_name,version = 'se_resnet101' , 'classifier_splits' print (model_name,version,num_split) pickle_file=open(outputs_dir+outputs_format.format(model_name,version,'features_train_tta',num_split),'rb') features=pickle.load(pickle_file) pickle_file.close() features.shape features=features.reshape(features.shape[0]//4,4,-1) features.shape split_train = train_df[train_df.PID.isin(set(split_sid[splits[num_split][0]]))].SeriesI.unique() split_validate = train_df[train_df.PID.isin(set(split_sid[splits[num_split][1]]))].SeriesI.unique() np.random.seed(SEED+num_split) torch.manual_seed(SEED+num_split) torch.cuda.manual_seed(SEED+num_split) torch.backends.cudnn.deterministic = True batch_size=16 num_workers=18 num_epochs=24 klr=1 weights = torch.tensor([1.,1.,1.,1.,1.,2.],device=device) train_dataset=FullHeadDataset(train_df, split_train, features, 'SeriesI', 'ImagePositionZ', hemorrhage_types, multi=multi) validate_dataset=FullHeadDataset(train_df, split_validate, torch.cat([features[:,i,:] for i in range(4)],-1), 'SeriesI', 'ImagePositionZ', hemorrhage_types) model=ResModelPool(features.shape[-1]) version=version+'_fullhead_resmodel_pool2_{}'.format(multi) _=model.to(device) #mixup=Mixup(device=device) loss_func=my_loss #fig,ax = plt.subplots(figsize=(10,7)) #gr=loss_graph(fig,ax,num_epochs,len(train_dataset)//batch_size+1,limits=[0.02,0.06]) num_train_optimization_steps = num_epochs*(len(train_dataset)//batch_size+int(len(train_dataset)%batch_size>0)) sched=WarmupExpCosineWithWarmupRestartsSchedule( t_total=num_train_optimization_steps, cycles=2,tau=1) optimizer = BertAdam(model.parameters(),lr=klr*1e-3,schedule=sched) history,best_model= model_train(model, optimizer, train_dataset, batch_size, num_epochs, loss_func, weights=weights, do_apex=False, validate_dataset=validate_dataset, param_schedualer=None, weights_data=None, metric=Metric(torch.tensor([1.,1.,1.,1.,1.,2.])), return_model=True, best_average=3, num_workers=num_workers, sampler=None, graph=None) torch.save(best_model.state_dict(), models_dir+models_format.format(model_name,version,num_split)) %matplotlib nbagg for num_split in range(3): multi=3 model_name,version = 'se_resnext101_32x4d' , 'classifier_splits' print (model_name,version,num_split) pickle_file=open(outputs_dir+outputs_format.format(model_name,version,'features_train_tta',num_split),'rb') features=pickle.load(pickle_file) pickle_file.close() features.shape features=features.reshape(features.shape[0]//4,4,-1) features.shape split_train = train_df[train_df.PID.isin(set(split_sid[splits[num_split][0]]))].SeriesI.unique() split_validate = train_df[train_df.PID.isin(set(split_sid[splits[num_split][1]]))].SeriesI.unique() np.random.seed(SEED+num_split) torch.manual_seed(SEED+num_split) torch.cuda.manual_seed(SEED+num_split) torch.backends.cudnn.deterministic = True batch_size=16 num_workers=18 num_epochs=24 klr=1 weights = torch.tensor([1.,1.,1.,1.,1.,2.],device=device) train_dataset=FullHeadDataset(train_df, split_train, features, 'SeriesI', 'ImagePositionZ', hemorrhage_types, multi=multi) validate_dataset=FullHeadDataset(train_df, split_validate, torch.cat([features[:,i,:] for i in range(4)],-1), 'SeriesI', 'ImagePositionZ', hemorrhage_types) model=ResModelPool(features.shape[-1]) version=version+'_fullhead_resmodel_pool2_{}'.format(multi) _=model.to(device) #mixup=Mixup(device=device) loss_func=my_loss #fig,ax = plt.subplots(figsize=(10,7)) #gr=loss_graph(fig,ax,num_epochs,len(train_dataset)//batch_size+1,limits=[0.02,0.06]) num_train_optimization_steps = num_epochs*(len(train_dataset)//batch_size+int(len(train_dataset)%batch_size>0)) sched=WarmupExpCosineWithWarmupRestartsSchedule( t_total=num_train_optimization_steps, cycles=2,tau=1) optimizer = BertAdam(model.parameters(),lr=klr*1e-3,schedule=sched) history,best_model= model_train(model, optimizer, train_dataset, batch_size, num_epochs, loss_func, weights=weights, do_apex=False, validate_dataset=validate_dataset, param_schedualer=None, weights_data=None, metric=Metric(torch.tensor([1.,1.,1.,1.,1.,2.])), return_model=True, best_average=3, num_workers=num_workers, sampler=None, graph=None) torch.save(best_model.state_dict(), models_dir+models_format.format(model_name,version,num_split)) %matplotlib nbagg for num_split in range(3): multi=3 model_name,version = 'Densenet161_3' , 'classifier_splits' print (model_name,version,num_split) pickle_file=open(outputs_dir+outputs_format.format(model_name,version,'features_train_tta2',num_split),'rb') features=pickle.load(pickle_file) pickle_file.close() features.shape features=features.reshape(features.shape[0]//4,4,-1) features.shape split_train = train_df[train_df.PID.isin(set(split_sid[splits[num_split][0]]))].SeriesI.unique() split_validate = train_df[train_df.PID.isin(set(split_sid[splits[num_split][1]]))].SeriesI.unique() np.random.seed(SEED+num_split) torch.manual_seed(SEED+num_split) torch.cuda.manual_seed(SEED+num_split) torch.backends.cudnn.deterministic = True batch_size=16 num_workers=18 num_epochs=24 klr=1 weights = torch.tensor([1.,1.,1.,1.,1.,2.],device=device) train_dataset=FullHeadDataset(train_df, split_train, features, 'SeriesI', 'ImagePositionZ', hemorrhage_types, multi=multi) validate_dataset=FullHeadDataset(train_df, split_validate, torch.cat([features[:,i,:] for i in range(4)],-1), 'SeriesI', 'ImagePositionZ', hemorrhage_types) model=ResModelPool(features.shape[-1]) version=version+'_fullhead_resmodel_pool2_{}'.format(multi) _=model.to(device) #mixup=Mixup(device=device) loss_func=my_loss #fig,ax = plt.subplots(figsize=(10,7)) #gr=loss_graph(fig,ax,num_epochs,len(train_dataset)//batch_size+1,limits=[0.02,0.06]) num_train_optimization_steps = num_epochs*(len(train_dataset)//batch_size+int(len(train_dataset)%batch_size>0)) sched=WarmupExpCosineWithWarmupRestartsSchedule( t_total=num_train_optimization_steps, cycles=2,tau=1) optimizer = BertAdam(model.parameters(),lr=klr*1e-3,schedule=sched) history,best_model= model_train(model, optimizer, train_dataset, batch_size, num_epochs, loss_func, weights=weights, do_apex=False, validate_dataset=validate_dataset, param_schedualer=None, weights_data=None, metric=Metric(torch.tensor([1.,1.,1.,1.,1.,2.])), return_model=True, best_average=3, num_workers=num_workers, sampler=None, graph=None) torch.save(best_model.state_dict(), models_dir+models_format.format(model_name,version,num_split)) %matplotlib nbagg for num_split in range(3): multi=3 model_name,version = 'Densenet169_3' , 'classifier_splits' print (model_name,version,num_split) pickle_file=open(outputs_dir+outputs_format.format(model_name,version,'features_train_tta2',num_split),'rb') features=pickle.load(pickle_file) pickle_file.close() features.shape features=features.reshape(features.shape[0]//4,4,-1) features.shape split_train = train_df[train_df.PID.isin(set(split_sid[splits[num_split][0]]))].SeriesI.unique() split_validate = train_df[train_df.PID.isin(set(split_sid[splits[num_split][1]]))].SeriesI.unique() np.random.seed(SEED+num_split) torch.manual_seed(SEED+num_split) torch.cuda.manual_seed(SEED+num_split) torch.backends.cudnn.deterministic = True batch_size=16 num_workers=18 num_epochs=24 klr=1 weights = torch.tensor([1.,1.,1.,1.,1.,2.],device=device) train_dataset=FullHeadDataset(train_df, split_train, features, 'SeriesI', 'ImagePositionZ', hemorrhage_types, multi=multi) validate_dataset=FullHeadDataset(train_df, split_validate, torch.cat([features[:,i,:] for i in range(4)],-1), 'SeriesI', 'ImagePositionZ', hemorrhage_types) model=ResModelPool(features.shape[-1]) version=version+'_fullhead_resmodel_pool2_{}'.format(multi) _=model.to(device) #mixup=Mixup(device=device) loss_func=my_loss #fig,ax = plt.subplots(figsize=(10,7)) #gr=loss_graph(fig,ax,num_epochs,len(train_dataset)//batch_size+1,limits=[0.02,0.06]) num_train_optimization_steps = num_epochs*(len(train_dataset)//batch_size+int(len(train_dataset)%batch_size>0)) sched=WarmupExpCosineWithWarmupRestartsSchedule( t_total=num_train_optimization_steps, cycles=2,tau=1) optimizer = BertAdam(model.parameters(),lr=klr*1e-3,schedule=sched) history,best_model= model_train(model, optimizer, train_dataset, batch_size, num_epochs, loss_func, weights=weights, do_apex=False, validate_dataset=validate_dataset, param_schedualer=None, weights_data=None, metric=Metric(torch.tensor([1.,1.,1.,1.,1.,2.])), return_model=True, best_average=3, num_workers=num_workers, sampler=None, graph=None) torch.save(best_model.state_dict(), models_dir+models_format.format(model_name,version,num_split)) %matplotlib nbagg for num_split in range(3): multi=3 model_name,version = 'Densenet201_3' , 'classifier_splits' print (model_name,version,num_split) pickle_file=open(outputs_dir+outputs_format.format(model_name,version,'features_train_tta',num_split),'rb') features=pickle.load(pickle_file) pickle_file.close() features.shape features=features.reshape(features.shape[0]//4,4,-1) features.shape split_train = train_df[train_df.PID.isin(set(split_sid[splits[num_split][0]]))].SeriesI.unique() split_validate = train_df[train_df.PID.isin(set(split_sid[splits[num_split][1]]))].SeriesI.unique() np.random.seed(SEED+num_split) torch.manual_seed(SEED+num_split) torch.cuda.manual_seed(SEED+num_split) torch.backends.cudnn.deterministic = True batch_size=16 num_workers=18 num_epochs=24 klr=1 weights = torch.tensor([1.,1.,1.,1.,1.,2.],device=device) train_dataset=FullHeadDataset(train_df, split_train, features, 'SeriesI', 'ImagePositionZ', hemorrhage_types, multi=multi) validate_dataset=FullHeadDataset(train_df, split_validate, torch.cat([features[:,i,:] for i in range(4)],-1), 'SeriesI', 'ImagePositionZ', hemorrhage_types) model=ResModelPool(features.shape[-1]) version=version+'_fullhead_resmodel_pool2_{}'.format(multi) _=model.to(device) #mixup=Mixup(device=device) loss_func=my_loss #fig,ax = plt.subplots(figsize=(10,7)) #gr=loss_graph(fig,ax,num_epochs,len(train_dataset)//batch_size+1,limits=[0.02,0.06]) num_train_optimization_steps = num_epochs*(len(train_dataset)//batch_size+int(len(train_dataset)%batch_size>0)) sched=WarmupExpCosineWithWarmupRestartsSchedule( t_total=num_train_optimization_steps, cycles=2,tau=1) optimizer = BertAdam(model.parameters(),lr=klr*1e-3,schedule=sched) history,best_model= model_train(model, optimizer, train_dataset, batch_size, num_epochs, loss_func, weights=weights, do_apex=False, validate_dataset=validate_dataset, param_schedualer=None, weights_data=None, metric=Metric(torch.tensor([1.,1.,1.,1.,1.,2.])), return_model=True, best_average=3, num_workers=num_workers, sampler=None, graph=None) torch.save(best_model.state_dict(), models_dir+models_format.format(model_name,version,num_split))Densenet201_3 classifier_splits 0Mid Inference for sanitypred_list=[] pred_list_tmp=[] for num_split in tqdm_notebook(range(3)): model_name,version = 'Densenet169_3' , 'classifier_splits' pickle_file=open(outputs_dir+outputs_format.format(model_name,version,'features_test',num_split),'rb') features=pickle.load(pickle_file) pickle_file.close() features=features.reshape(features.shape[0]//8,8,-1) model=ResModelPool(features.shape[-1]) version=version+'_fullhead_resmodel_pool2_3' model.load_state_dict(torch.load(models_dir+models_format.format(model_name,version,num_split),map_location=torch.device(device))) test_dataset=train_dataset=FullHeadDataset(test_df, test_df.SeriesI.unique(), features, 'SeriesI', 'ImagePositionZ',multi=4) for i in tqdm_notebook(range(32),leave=False): pred_list.append(torch.sigmoid(model_run(model,test_dataset,do_apex=False,batch_size=128))[...,None]) pred_list_tmp.append(pred_list[-1]) pred169=torch.cat(pred_list_tmp,-1).mean(-1) pred_list_tmp=[] for num_split in tqdm_notebook(range(3)): model_name,version = 'Densenet161_3' , 'classifier_splits' pickle_file=open(outputs_dir+outputs_format.format(model_name,version,'features_test',num_split),'rb') features=pickle.load(pickle_file) pickle_file.close() features=features.reshape(features.shape[0]//8,8,-1) model=ResModelPool(features.shape[-1]) version=version+'_fullhead_resmodel_pool2_3' model.load_state_dict(torch.load(models_dir+models_format.format(model_name,version,num_split),map_location=torch.device(device))) test_dataset=train_dataset=FullHeadDataset(test_df, test_df.SeriesI.unique(), features, 'SeriesI', 'ImagePositionZ',multi=4) for i in tqdm_notebook(range(32),leave=False): pred_list.append(torch.sigmoid(model_run(model,test_dataset,do_apex=False,batch_size=128))[...,None]) pred_list_tmp.append(pred_list[-1]) pred161=torch.cat(pred_list_tmp,-1).mean(-1) pred_list_tmp=[] for num_split in tqdm_notebook(range(3)): model_name,version = 'Densenet201_3' , 'classifier_splits' pickle_file=open(outputs_dir+outputs_format.format(model_name,version,'features_test',num_split),'rb') features=pickle.load(pickle_file) pickle_file.close() features=features.reshape(features.shape[0]//8,8,-1) model=ResModelPool(features.shape[-1]) version=version+'_fullhead_resmodel_pool2_3' model.load_state_dict(torch.load(models_dir+models_format.format(model_name,version,num_split),map_location=torch.device(device))) test_dataset=train_dataset=FullHeadDataset(test_df, test_df.SeriesI.unique(), features, 'SeriesI', 'ImagePositionZ',multi=4) for i in tqdm_notebook(range(32),leave=False): pred_list.append(torch.sigmoid(model_run(model,test_dataset,do_apex=False,batch_size=128))[...,None]) pred_list_tmp.append(pred_list[-1]) pred201=torch.cat(pred_list_tmp,-1).mean(-1) pred_list_tmp=[] for num_split in tqdm_notebook(range(3)): model_name,version = 'se_resnext101_32x4d' , 'classifier_splits' pickle_file=open(outputs_dir+outputs_format.format(model_name,version,'features_test',num_split),'rb') features=pickle.load(pickle_file) pickle_file.close() features=features.reshape(features.shape[0]//8,8,-1) model=ResModelPool(features.shape[-1]) version=version+'_fullhead_resmodel_pool2_3' model.load_state_dict(torch.load(models_dir+models_format.format(model_name,version,num_split),map_location=torch.device(device))) test_dataset=train_dataset=FullHeadDataset(test_df, test_df.SeriesI.unique(), features, 'SeriesI', 'ImagePositionZ',multi=4) for i in tqdm_notebook(range(32),leave=False): pred_list.append(torch.sigmoid(model_run(model,test_dataset,do_apex=False,batch_size=128))[...,None]) pred_list_tmp.append(pred_list[-1]) predse=torch.cat(pred_list_tmp,-1).mean(-1) pred_list_tmp=[] for num_split in tqdm_notebook(range(5)): model_name,version = 'se_resnext101_32x4d' , 'new_splits' pickle_file=open(outputs_dir+outputs_format.format(model_name,version,'features_test',num_split),'rb') features=pickle.load(pickle_file) pickle_file.close() features=features.reshape(features.shape[0]//8,8,-1) model=ResModelPool(features.shape[-1]) version=version+'_fullhead_resmodel_pool2_3' model.load_state_dict(torch.load(models_dir+models_format.format(model_name,version,num_split),map_location=torch.device(device))) test_dataset=train_dataset=FullHeadDataset(test_df, test_df.SeriesI.unique(), features, 'SeriesI', 'ImagePositionZ',multi=4) for i in tqdm_notebook(range(32),leave=False): pred_list.append(torch.sigmoid(model_run(model,test_dataset,do_apex=False,batch_size=128))[...,None]) pred_list_tmp.append(pred_list[-1]) predse=torch.cat(pred_list_tmp,-1).mean(-1) pred_list_tmp=[] for num_split in tqdm_notebook(range(3)): model_name,version = 'se_resnet101' , 'classifier_splits' pickle_file=open(outputs_dir+outputs_format.format(model_name,version,'features_test',num_split),'rb') features=pickle.load(pickle_file) pickle_file.close() features=features.reshape(features.shape[0]//8,8,-1) model=ResModelPool(features.shape[-1]) version=version+'_fullhead_resmodel_pool2_3' model.load_state_dict(torch.load(models_dir+models_format.format(model_name,version,num_split),map_location=torch.device(device))) test_dataset=train_dataset=FullHeadDataset(test_df, test_df.SeriesI.unique(), features, 'SeriesI', 'ImagePositionZ',multi=4) for i in tqdm_notebook(range(32),leave=False): pred_list.append(torch.sigmoid(model_run(model,test_dataset,do_apex=False,batch_size=128))[...,None]) pred_list_tmp.append(pred_list[-1]) predseres=torch.cat(pred_list_tmp,-1).mean(-1) pred=torch.cat(pred_list_tmp,-1).mean(-1) pred.shape len(pred_list) pred_list[0].shape pred=torch.cat(pred_list,-1).mean(-1) images_id_list=[] dummeys=[] image_arr=test_df.PatientID.values ref_arr=test_df.SeriesI.values order_arr=test_df.ImagePositionZ.values for s in tqdm_notebook(test_df.SeriesI.unique()): dumm=np.zeros(60) head_idx = np.where(ref_arr==s)[0] sorted_head_idx=head_idx[np.argsort(order_arr[head_idx])] images_id_list.append(image_arr[sorted_head_idx]) dumm[0:head_idx.shape[0]]=1 dummeys.append(dumm) image_ids=np.concatenate(images_id_list) preds=pred.reshape(pred.shape[0]*pred.shape[1],6).numpy()[np.concatenate(dummeys)==1] image_ids.shape preds.shape len(pred_list) images_id_list=[] dummeys=[] image_arr=test_df.PatientID.values ref_arr=test_df.SeriesI.values order_arr=test_df.ImagePositionZ.values for s in tqdm_notebook(test_df.SeriesI.unique()): dumm=np.zeros(60) head_idx = np.where(ref_arr==s)[0] sorted_head_idx=head_idx[np.argsort(order_arr[head_idx])] images_id_list.append(image_arr[sorted_head_idx]) dumm[0:head_idx.shape[0]]=1 dummeys.append(dumm) image_ids=np.concatenate(images_id_list) preds=pred.reshape(pred.shape[0]*pred.shape[1],6).numpy()[np.concatenate(dummeys)==1] image_ids.shape preds.shape submission_df=get_submission_ids(image_ids,torch.tensor(preds),do_sigmoid=False) submission_df.head(12) submission_df.shape sub_num=51 submission_df.to_csv('/media/hd/notebooks/data/RSNA/submissions/submission{}.csv'.format(sub_num), index=False, columns=['ID','Label']) #!/home/reina/anaconda3/bin/kaggle competitions submit rsna-intracranial-hemorrhage-detection -f /media/hd/notebooks/data/RSNA/submissions/submission51.csv -m "all models, se weight*2, with post pool_3 tta 64, mean after sigmoid"100%|███████████████████████████████████████| 16.9M/16.9M [01:52<00:00, 157kB/s] 100%|███████████████████████████████████████| 16.9M/16.9M [01:52<00:00, 157kB/s] Successfully submitted to RSNA Intracranial Hemorrhage DetectionSuccessfully submitted to RSNA Intracranial Hemorrhage DetectionThis notebook handles the processing of PA docket data that has been downloaded in JSON format and converted into a CSV with the following columns:* docket_no: Court docket number* status: Status of this docket* gender: Offender's gender* race: Offender's race* county: County of the court managing this docket* offender_id: Hashed value for the Offender* offense_age: Age computed from DOB* seq_no: Sequential numbering of charges* statute: Statute code in violation* grade: Grade of the crime* statute_description: Statute description* offense_date: Date of the offense* description: Most likely the same as statute description* offense_tracking_no: Tracking number for the offense for multiple offenders involved* disposition: Disposition of the charge* sentence_date: Sentencing date (if any)* sentence_start: Start of the sentence to be served (if any)* sentence_type: Type of the sentence meted (if any)* sentence_min_pd: Minimum sentence (if any)* sentence_max_pd: Maximum sentence (if any)import json import os import pandas as pd import hashlib from dateutil.relativedelta import relativedelta from tqdm import tqdm_notebook def get_bio(json_data): """ Retrieves the biographical information """ return dict( docket_no = json_data["docketNumber"], status = json_data["statusName"], gender = json_data["caseParticipants"][0]["gender"], dob = json_data["caseParticipants"][0]["primaryDateOfBirth"], race = json_data["caseParticipants"][0]["race"], first_name = json_data["caseParticipants"][0]["participantName"]["firstName"], middle_name = json_data["caseParticipants"][0]["participantName"]["middleName"], last_name = json_data["caseParticipants"][0]["participantName"]["lastName"], county = json_data["county"]["name"] ) def get_offenses(json_data): """ Retrieves the list of offenses """ offenses = map( lambda x: ( x["sequenceNumber"], x["statuteName"], x["grade"], x["statuteDescription"], x["offenseDate"], x["description"], x["otn"]), json_data["offenses"]) return pd.DataFrame( offenses, columns=['seq_no', 'statute', 'grade', 'statute_description', 'offense_date', 'description', 'offense_tracking_no']) def get_dispositions(json_data): """Retrieves the disposition (if applicable) of the offenses""" def process_sentencing(sentence_section): """Extracts sentencing as part of the disposition""" if len(sentence_section) == 0: return (None, None, None, None, None) else: latest_sentence = sentence_section[-1] return (latest_sentence["eventDate"], latest_sentence["sentenceTypes"][0]["startDateTime"], latest_sentence["sentenceTypes"][0]["sentenceType"], latest_sentence["sentenceTypes"][0]["minPeriod"], latest_sentence["sentenceTypes"][0]["maxPeriod"]) if len(json_data["dispositionEvents"]) > 0: disposition_section = json_data["dispositionEvents"][-1]["offenseDispositions"] dispositions = map(lambda x: ( x["sequenceNumber"], x["disposition"]) + process_sentencing(x["sentences"]), disposition_section) else: dispositions = None return pd.DataFrame( dispositions, columns=['seq_no', 'disposition', 'sentence_date', 'sentence_start', 'sentence_type', 'sentence_min_pd', 'sentence_max_pd'] ) def offense_age(row): """Computes the age of the offender at the time of the offense""" if row["offense_date"] is pd.NaT or row["dob"] is pd.NaT: # If the date is not valid return None return None else: # Else get the number of years between offense date and DOB return relativedelta(row["offense_date"].date(), row["dob"].date()).years def get_records(json_data): """Pieces together all relevant pieces from the docket""" # Retrieve components of the data bio = get_bio(json_data) # Biographical information off = get_offenses(json_data) # Charges disps = get_dispositions(json_data) # Disposition of the charges # Merge the data together merged = off.merge(disps, on="seq_no", how='left') # Federate out the biographical data so this is de-normalized for k, v in get_bio(json_data).items(): merged[k] = v # Convert date fields into datetime merged["dob"] = pd.to_datetime(merged["dob"], errors = 'coerce') merged["offense_date"] = pd.to_datetime(merged["offense_date"], errors = 'coerce') merged["sentence_date"] = pd.to_datetime(merged["sentence_date"], errors = 'coerce') # Construct a unique ID by hashing the names and DOB uid_str = "".join(filter(None, (bio["first_name"], bio["middle_name"], bio["last_name"], bio["dob"]))) merged["offender_id"] = hashlib.sha256(uid_str.encode("utf-8")).hexdigest()[:12] # Compute age at time of each offense merged["offense_age"] = merged.apply(offense_age, axis=1) # Drop sensitive columns merged = merged.drop(columns=["first_name", "middle_name", "last_name", "dob"]) # Re-order columns cols = merged.columns.tolist() cols = cols[len(cols)-7:] + cols[0:-7] return merged[cols] input_path = "data/pa_json/" output_path = "data/output/" appended_data = [] def process_file(json_file): with open(json_file) as f: try: data = json.load(f) appended_data.append(get_records(data)) except: print(json_file) raise for i, input_file in enumerate(tqdm_notebook(os.listdir(input_path))): if input_file.endswith(".json"): process_file(path + input_file) if i > 0 and i % 10000 == 0: df = pd.concat(appended_data) df.to_csv(f"data/output/pa_data_{i}.csv") appended_data = [] df = pd.concat(appended_data) df.to_csv(f"{output_path}pa_data_{i}.csv") appended_data = [] pa_data = pd.concat([pd.read_csv(f"{output_path}{x}", low_memory=False) for x in os.listdir(output_path)], axis=0) # Create mapping for salted/hashed docket id import os salt = os.urandom(32) hashed_docket_id = pa_data.apply(lambda row: hashlib.sha256(f"{salt}{row['docket_no']}".encode("utf-8")).hexdigest()[:12], axis=1) docket_map = pd.concat([hashed_docket_id, pa_data["docket_no"]], axis=1) docket_map.columns = ["hash_docket_no","real_docket_no"] docket_map.drop_duplicates().to_csv(f"{output_path}docket_mapping.csv") # Replace docket number pa_data["docket_no"] = hashed_docket_id pa_data = pa_data.drop(["Unnamed: 0"], axis=1) pa_data.to_csv(f"{output_path}pa_data_all.csv.gz", compression='gzip') len(pa_data)dinner_recipe = '''
amtunititem
24slicesbaguette
2+tbspolive oil
1cuptomatoes
1jarpesto
''' import xml.etree.ElementTree as etree tree = etree.fromstring(dinner_recipe) pantry = set(['olive oil', 'pesto']) for ingredient in tree.getiterator('tr'): amt, unit, item = ingredient if item.tag == "td" and item.text not in pantry: print ("%s: %s %s" % (item.text, amt.text, unit.text))baguette: 24 slices tomatoes: 1 cupOriginal Graphic: https://twitter.com/jburnmurdoch/status/1319277057650556936/photo/1by (@jburnmurdoch)import matplotlib.pyplot as plt import matplotlib as mpl import numpy as np from highlight_text import HighlightText, ax_text, fig_text # you can also use the financial-times theme that themepy provides # import themepy # theme = themepy.Theme('financial-times-light') def identify_axes(ax_dict, fontsize=48): """ Helper to identify the Axes in the examples below. Draws the label in a large font in the center of the Axes. SOURCE: https://matplotlib.org/stable/tutorials/provisional/mosaic.html Parameters ---------- ax_dict : Dict[str, Axes] Mapping between the title / label and the Axes. fontsize : int, optional How big the label should be """ kw = dict(ha="center", va="center", fontsize=fontsize, color="darkgrey") for k, ax in ax_dict.items(): ax.text(0.5, 0.5, k, transform=ax.transAxes, **kw) # Here we use matplotlibs new subplot_mosaic feature to create a complex layout ft_layout = [12*['title'], [0, 0, 0, 0, 0, 'US', 'US', 0, 0, 0, 0, 0], ['AK', 0, 0, 0, 0, 'US', 'US', 0, 0, 0, 0, 'ME'], [0, 'WA', 'ID', 'MT', 'ND', 'MN', 'WI', 'MI', 0, 'VT', 'NH', 'MA'], [0, 'OR', 'NV', 'WY', 'SD', 'IA', 'IN', 'OH', 'PA', 'NY', 'CT', 'RI'], [0, 'CA', 'UT', 'CO', 'NE', 'MO', 'IL', 'KY', 'MD', 'DC', 'NJ', 'NYC'], [0, 0, 'AZ', 'NM', 'KS', 'AR', 'TN', 'WV', 'VA', 'NC', 'DE', 0], [0, 0, 0, 0, 'OK', 'LA', 'MS', 'AL', 'GA', 'SC', 0, 0], ['HI', 0, 0, 0, 'TX', 0, 0, 0, 0, 'FL', 0, 0], 12*['original graphic from @jburnmurdoch of the Financial Times']] # 0 defines empty spaces, subplot_mosaic takes in an "empty_sentinel" parameter fig, axd = plt.subplot_mosaic(ft_layout, constrained_layout=False, empty_sentinel=0, figsize=(24, 18)) plt.sca(axd['title']) title=\ ax_text(s='\n\n' 'Numbers of deaths per week from all causes, <2020 > vs recent years: < Shading > indicates ', x=0, y=0.4, va='center', highlight_textprops=[{"size": 20, "weight": "bold"}, {"color": "k"}, {"alpha": 0}, {"color": "k"}, {"color": "#cb181d"}], vsep=8, ax=axd['title'], fontsize = 14 ); axd['title'].axis('off'); axd['original graphic from @jburnmurdoch of the Financial Times'].axis('off'); ax_title = axd.pop('title') identify_axes(axd) # for ax in axd.values(): ax.tick_params(axis='both', colors='None') # Recreating @jburnmurdoch's plot, we specify for which of the text highlights # we want to create an axes inset on top of the highlighted TextAreas # Here we do that for the 2nd <2020>, 3rd and 4th text highlights # the HighlightText class offers a function to create axes on top of the bboxes of the highlights title.make_highlight_insets([False, True, True, True, False]) # returns a list of [None (for False), and Axes objects (for True)] # and are stored in `.highlight_axes` insets = title.highlight_axes # plot onto the inset axes # insets[0] and insets[4] hold None for no inset insets[1].plot(np.arange(0, 1, 0.1), [0.05, 0, 0, 0.05, 0.1, 0.05, 0.075, 0.1, 0.5, 0.85], color='#cb181d', lw=2.5) insets[2].plot(np.random.normal(loc=0, scale=0.5, size=(20, 3)).cumsum(axis=0), color='#d9d9d9', lw=1) insets[3].fill_between(np.arange(0, 1, 0.1), [0.05, 0.9, 1, 1, 0.95, 0.9, 0.85, 0.8, 0.75, 0.7], color='#cb181d', alpha = 0.5) plt.show() #fig.savefig('../examples/example_financial-times_jburnmurdoch.png', dpi=200, bbox_inches='tight')COVID-19: Face Mask Recognition Submitted by: 961152147 318471208 Link to the github: https://github.com/Stayermax/Face_Mask_Recognitionimport pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import math from copy import deepcopy from collections import OrderedDict import os import matplotlib.image as mpimg import pickleExploratory Data Analysis Visualizationtrain_files = os.listdir('train/') test_files = os.listdir('test/') print(f"Train-set Size: {len(train_files)}") print(f"Test-set Size: {len(test_files)}")Train-set Size: 18259 Test-set Size: 6086Sample from the train data:images_num = 16 fig, axs = plt.subplots(4, 4) for i in range(images_num): image = mpimg.imread(f"train/{train_files[i]}") axs[int(i/4), i%4].imshow(image) plt.xlabel(f"{train_files[i]}") axs[int(i / 4), i % 4].title.set_text(f"{train_files[i]}") axs[int(i / 4), i % 4].axes.get_xaxis().set_visible(False) axs[int(i / 4), i % 4].axes.get_yaxis().set_visible(False) plt.subplots_adjust(left=None, bottom=None, right=None, top=1.5, wspace=None, hspace=None) plt.show()Sample from the test data:images_num = 16 fig, axs = plt.subplots(4, 4) for i in range(images_num): image = mpimg.imread(f"test/{test_files[i]}") axs[int(i/4), i%4].imshow(image) plt.xlabel(f"{train_files[i]}") axs[int(i / 4), i % 4].title.set_text(f"{train_files[i]}") axs[int(i / 4), i % 4].axes.get_xaxis().set_visible(False) axs[int(i / 4), i % 4].axes.get_yaxis().set_visible(False) plt.subplots_adjust(left=None, bottom=None, right=None, top=1.5, wspace=None, hspace=None) plt.show()Insights We can compare a number of masked and unmasked people in the dataset.train_masked_num = 0 train_unmasked_num = 0 for image_name in train_files: if("_1." in image_name): train_masked_num +=1 elif("_0." in image_name): train_unmasked_num +=1 else: image = mpimg.imread(f"train/{image_name}") plt.imshow(image) plt.show() print(f"The number of people that wear the mask properly in the train dataset is: {train_masked_num}") print(f"The number of people that wear the mask improperly in the train dataset is: {train_unmasked_num}") height = [train_masked_num, train_unmasked_num] bars = ('Masked', 'Unmasked') y_pos = np.arange(len(bars)) plt.bar(y_pos, height) plt.xticks(y_pos, bars) plt.ylabel("No. of pictures") plt.title("Train data") plt.show() test_masked_num = 0 test_unmasked_num = 0 for image_name in test_files: if("_1.jpg" in image_name): test_masked_num +=1 elif("_0.jpg" in image_name): test_unmasked_num +=1 else: image = mpimg.imread(f"train/{image_name}") plt.imshow(image) plt.show() print(f"The number of people that wear the mask properly in the test dataset is: {test_masked_num}") print(f"The number of people that wear the mask improperly in the test dataset is: {test_unmasked_num}") height = [test_masked_num, test_unmasked_num] bars = ('Masked', 'Unmasked') y_pos = np.arange(len(bars)) plt.bar(y_pos, height) plt.xticks(y_pos, bars) plt.ylabel("No. of pictures") plt.title("Test data") plt.show()We can see that the size of the classes in both datasets is relatively equal as well as proportioned between the classes. Hence we can assume that the train dataset can be a representative sample of the population. Experiments Model 1: CNN modelTo define our model, we subclass the LightningModule of PyTorch Lightning. The model consists of 4 convolution layers followed by 2 linear layers. We used ReLU as the activation function, and the MaxPool2d as the pooling layer. We then initialize the weights of these layers with xavier_uniform as this makes the network train better. Model architecturefrom report_copy import print_summary print_summary()/home/vitaly/.local/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. _np_qint8 = np.dtype([("qint8", np.int8, 1)]) /home/vitaly/.local/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. _np_quint8 = np.dtype([("quint8", np.uint8, 1)]) /home/vitaly/.local/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. _np_qint16 = np.dtype([("qint16", np.int16, 1)]) /home/vitaly/.local/lib/python3.6/site-packages/tensorboard/compat/ten[...]Parameters number: 2,193,474 Data preprocessing classFor the first model we transformed all the images into 100 by 100 pixels squared images.import cv2 from torch import long, tensor from torch.utils.data.dataset import Dataset from torchvision.transforms import Compose, Resize, ToPILImage, ToTensor class MaskDataset(Dataset): """ Masked faces dataset 0 = 'not correct mask wearing' 1 = 'correct mask wearing' """ def __init__(self, dataFrame): self.dataFrame = dataFrame self.transformations = Compose([ ToPILImage(), Resize((100, 100)), ToTensor(), # [0, 1] ]) def __getitem__(self, key): if isinstance(key, slice): raise NotImplementedError('slicing is not supported') row = self.dataFrame.iloc[key] return { 'name': row['image'], 'image': self.transformations(cv2.imread(row['image'])), 'mask': tensor([row['mask']], dtype=long), } def __len__(self): return len(self.dataFrame.index)Loss FuntionWe used Cross Entropy function from pytorch library. OptimizerWe tried a few options (for example, SGD from torch.optim), but eventually ended up using the Adam optimizer from the pytorch library. We also tried several learning rates and settled on 0.00001 which yielded the best results. RegularizationWe tried using the batch normalization only to discover we obtained better results without applying it, thus we opted not to use regularization. Graphs for the first model:# Training and testing results were saved into csv files by epochs. def results_graphs(): train_df = pd.read_csv('graph_data/train_results.csv') test_df = pd.read_csv('graph_data/test_results.csv') # LOSS: sns.set_style("darkgrid", {"axes.facecolor": ".9"}) train_loss = sns.lineplot(x=range(len(train_df)), y='loss', data=train_df, label="Train") test_loss = sns.lineplot(x=range(len(test_df)), y='loss', data=test_df, label="Test") plt.show() # AUC: train_auc = sns.lineplot(x=range(len(train_df)), y='ROC_AUC', data=train_df, label="Train") test_auc = sns.lineplot(x=range(len(test_df)), y='ROC_AUC', data=test_df, label="Test") plt.show() # F1-score: train_auc = sns.lineplot(x=range(len(train_df)), y='f1_score', data=train_df, label="Train") test_auc = sns.lineplot(x=range(len(test_df)), y='f1_score', data=test_df, label="Test") plt.show() results_graphs()Model 1 results: Best F1-score acheived for the first model is 95.80 on the test set. Model 2: mobilenet_v2 We used default model from pytorch library, which shows good results on such tasks as image processing.This model is well know and described. Full model description can be found via the link (https://arxiv.org/pdf/1801.04381v4.pdf) Model architecturefrom report_copy import print_summary2 print_summary2()# ================================================================ Conv2d-1 [-1, 32, 112, 112] 864 BatchNorm2d-2 [-1, 32, 112, 112] 64 ReLU6-3 [-1, 32, 112, 112] 0 Conv2d-4 [-1, 32, 112, 112] 288 BatchNorm2d-5 [-1, 32, 112, 112] 64 ReLU6-6 [-1, 32, 112, 112] 0 Conv2d-7 [-1, 16, 112, 112] 512 BatchNorm2d-8 [-1, 16, 112, 112] 32 InvertedResidual-9 [-1, 16, 112, 112] 0 Conv2d-10 [-1, 96, 112, 112] 1,536 BatchNorm2d-11 [-1, 96, 112, 112] 192 ReLU6-12 [-1, 96, 112, 112] 0 Conv2d-13 [-1, 96, 56, 56] 864 BatchNorm2d-14 [-1, 96, 56, 56] 192 ReLU6-15 [-1, 9[...]Parameters number: 2,226,434 Data preprocessing We decided to work with bigger image sizes in the second model. To do so, we transformed every image into a 224x224 pixels squared image. We didn't create a separate preprocessing class for the second model, since we used the ImageFolder function from torchvision.datasets library and sorted negative and postitive training objects into two different folders. Loss FuntionWe used the negative log likelihood loss. function from pytorch library. OptimizerWe used Adam optimizer from pytorch library. We also tried several learning rates and settled on 0.003 which yielded the best results. RegularizationWe tried using the batch normalization for the seconf model as well and again, obtained better results without applying it, thus we opted not to use regularization. Graphs for the second model:def results_graphs_m2(): train_df = pd.read_csv('graph_data_m2/train_results.csv') test_df = pd.read_csv('graph_data_m2/test_results.csv') # LOSS: sns.set_style("darkgrid", {"axes.facecolor": ".9"}) train_loss = sns.lineplot(x=range(len(train_df)), y='loss', data=train_df, label="Train") test_loss = sns.lineplot(x=range(len(test_df)), y='loss', data=test_df, label="Test") plt.show() # AUC: train_auc = sns.lineplot(x=range(len(train_df)), y='ROC_AUC', data=train_df, label="Train") test_auc = sns.lineplot(x=range(len(test_df)), y='ROC_AUC', data=test_df, label="Test") plt.show() # F1-score: train_auc = sns.lineplot(x=range(len(train_df)), y='f1_score', data=train_df, label="Train") test_auc = sns.lineplot(x=range(len(test_df)), y='f1_score', data=test_df, label="Test") plt.show() results_graphs_m2()Gradiente Descendente - .O gradiente descendente é um algoritmo de otimização que usa as derivadas da função objetivopara encontrar o ponto com maior inclinação. No processo, as variáveis a otimizar são deslocadasem uma direção negativa o qual reduzirá o valor da função objetivo.Algoritmo geral para atualizar os pesos com gradiente descendente: Vamos implementar o algoritmo do Gradiente Descendente! Importando a bibliotecaimport numpy as npFunção do cáculo da sigmóidedef sigmoid(x): return 1/(1+np.exp(-x))Derivada da função sigmóidedef sigmoid_prime(x): return sigmoid(x) * (1 - sigmoid(x))Vetor dos valores de entrada e saídasx = np.array([1, 2, 3, 4]) y = np.array(0.5) b = 0.5Pesos iniciais das ligações sinápticas Nota: Inicializados aleatóriamentew = np.random.randn(4)/10 wTaxa de AprendizagemtaxaAp = 0.8Calcule um degrau de descida gradiente para cada peso# TODO: Calcule a combinação linear de entradas e pesos sinápticos h = np.dot(x, w)+b # TODO: Calcule a saída da Rede Neural saida = sigmoid(h) saida # TODO: Calcule o erro da Rede Neural erro = erro = y - saida erro # TODO: Calcule o termo de erro erro_termo = erro * sigmoid_prime(h) erro_termo # TODO: Calcule a variação do peso del_w = taxaAp * erro_termo * x del_wGroupby=======import xarray import climtas import dask.array import pandas import numpySay we have daily input data for several years, that we want to convert to a daily mean climatologytime = pandas.date_range('20010101', '20040101', freq='D', closed='left') data = dask.array.random.random((len(time),50,100), chunks=(90,25,25)) lat = numpy.linspace(-90, 90, data.shape[1]) lon = numpy.linspace(-180, 180, data.shape[2], endpoint=False) da = xarray.DataArray(data, coords=[('time', time), ('lat', lat), ('lon', lon)], name='temperature') daThe Xarray way is to use [xarray.DataArray.groupby](http://xarray.pydata.org/en/stable/generated/xarray.DataArray.groupby.html), however that is an expensive function to run - we started with 104 tasks and 104 chunks in the Dask graph, and this has exploded to 23,464 tasks and 2920 chunks. For a large dataset this increase in chunk counts really bogs down Dask.The reason for this is that with `groupby` Xarray will create a new output chunk for each individual day - you can see the chunk size of the output is now `(1, 25, 25)`.da.groupby('time.dayofyear').mean()[climtas.blocked.blocked_groupby](api/blocked.rstclimtas.blocked.blocked_groupby) will as much as possible limit the number of chunks created/ It does this by reshaping the array, stacking individual years, then reducing over the new stacked axis rather than using Pandas indexing operations. It does however require the input data to be evenly spaced in time, which well-behaved datasets should be.climtas.blocked_groupby(da, time='dayofyear').mean()Prepare DataSetimport torch import torchvision n_epochs = 3 batch_size_train = 64 batch_size_test = 1000 learning_rate = 0.01 momentum = 0.5 log_interval = 10 random_seed = 1 torch.backends.cudnn.enabled = False torch.manual_seed(random_seed) train_loader = torch.utils.data.DataLoader( torchvision.datasets.MNIST('/files/', train=True, download=True, transform=torchvision.transforms.Compose([ torchvision.transforms.ToTensor(), torchvision.transforms.Normalize( (0.1307,), (0.3081,)) ])), batch_size=batch_size_train, shuffle=True) test_loader = torch.utils.data.DataLoader( torchvision.datasets.MNIST('/files/', train=False, download=True, transform=torchvision.transforms.Compose([ torchvision.transforms.ToTensor(), torchvision.transforms.Normalize( (0.1307,), (0.3081,)) ])), batch_size=batch_size_test, shuffle=True) examples = enumerate(test_loader) batch_idx, (example_data, example_targets) = next(examples) example_data.shape import matplotlib.pyplot as plt fig = plt.figure() for i in range(6): plt.subplot(2,3,i+1) plt.tight_layout() plt.imshow(example_data[i][0], cmap='gray', interpolation='none') plt.title("Ground Truth: {}".format(example_targets[i])) plt.xticks([]) plt.yticks([]) figNeural networkimport torch.nn as nn import torch.nn.functional as F import torch.optim as optim class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(320, 50) self.fc2 = nn.Linear(50, 10) def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 2)) x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) x = x.view(-1, 320) x = F.relu(self.fc1(x)) x = F.dropout(x, training=self.training) x = self.fc2(x) return F.log_softmax(x) network = Net() optimizer = optim.SGD(network.parameters(), lr=learning_rate, momentum=momentum)Trainingtrain_losses = [] train_counter = [] test_losses = [] test_counter = [i*len(train_loader.dataset) for i in range(n_epochs + 1)] def train(epoch): network.train() for batch_idx, (data, target) in enumerate(train_loader): optimizer.zero_grad() output = network(data) loss = F.nll_loss(output, target) loss.backward() optimizer.step() if batch_idx % log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item())) train_losses.append(loss.item()) train_counter.append( (batch_idx*64) + ((epoch-1)*len(train_loader.dataset))) def test(): network.eval() test_loss = 0 correct = 0 with torch.no_grad(): for data, target in test_loader: output = network(data) test_loss += F.nll_loss(output, target, size_average=False).item() pred = output.data.max(1, keepdim=True)[1] correct += pred.eq(target.data.view_as(pred)).sum() test_loss /= len(test_loader.dataset) test_losses.append(test_loss) print('\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset))) test() for epoch in range(1, n_epochs + 1): train(epoch) test()[Day 7](https://adventofcode.com/2020/day/7): Handy Haversacksimport numpy as np import re rule = re.compile(r"(.*) bags contain (.*)\.") bags = re.compile(r"(\d+) (.*?) bag") with open("../data/07.txt", "r") as f: rules = rule.findall(f.read()) enum = {bag: i for i, (bag, _) in enumerate(rules)} contents = np.zeros((len(enum), len(enum)), dtype=int) for bag, items in rules: for n, item in bags.findall(items): contents[enum[bag], enum[item]] = int(n)Part 1 Solution by recursionimport operator as op arithmetic = {np.dtype(bool): (op.or_, op.and_), # Part 1 np.dtype(int): (op.add, op.mul)} # Part 2 def expander(b): add, mul = arithmetic[b.dtype] sigma = lambda ys: np.sum(list(ys), axis=0, dtype=b.dtype) def expand(y): if (y == 0).all(): return y return add(y, sigma(mul(a, expand(z)) for a, z in zip(y, b) if a)) return expand shinygold = enum["shiny gold"] contains = contents.T.astype(bool) assert 222 == np.sum(expander(contains)(contains[shinygold]))Solution by iteration (linear algebra)Given a *nilpotent*, square $(0,1)$-matrix $B$, solve the equation $X=B+BX$. The solution is given by the geometric series $X=(1-B)^{-1}B=B+B^2+B^3+\cdots$, which is finite because $B$ is nilpotent. This also yields the solution over the boolean semiring, for which "$1-B$" would be undefined.from itertools import accumulate, repeat, takewhile from scipy.sparse import csr_matrix def geomseries(x): """Geometric series starting from x, assuming x is nilpotent (and sparse).""" x = csr_matrix(x) pows = accumulate(repeat(x), op.matmul, initial=x) return np.sum(list(takewhile(csr_matrix.count_nonzero, pows)), axis=0) assert 222 == np.sum(geomseries(contains)[shinygold])NB: Without sparsity the geometric series would be much slower to compute. Part 2 Solution by recursionassert 13264 == np.sum(expander(contents)(contents[shinygold]))Solution by iterationassert 13264 == np.sum(geomseries(contents)[shinygold])Steps in which i impliment thisstep1 : Import Datastep2 : Word Count - CountVectorizerstep3 : Term Frequency Inverse Document Frequencystep4 : Naive Bayes Classifierstep5 : Resultimport sklearn.datasets as skdStep1 : Importing Data offlinecategories = ['alt.atheism', 'soc.religion.christian','comp.graphics', 'sci.med'] news_train = skd.load_files('/home/manjeet/Videos/Documentation/ML-algo/Text-classification-using-Naive-Bayes/data/train',categories=categories,encoding='ISO-8859-1') news_test = skd.load_files('/home/manjeet/Videos/Documentation/ML-algo/Text-classification-using-Naive-Bayes/data/test',categories=categories,encoding='ISO-8859-1') #After loading the data, the variable news_test and news_train #stores as a dictionary news_train.keys() news_train['target'] #here just i'm accessing the target news_train['target_names'] news_train.target_namesStep : 2 Word Count - CountVectorizer See Section-1 of Rough work for Text classification .ipynb filefrom sklearn.feature_extraction.text import CountVectorizer count_vect = CountVectorizer() x_train_tf = count_vect.fit_transform(news_train.data) x_train_tf.shapeStep3 : Term Frequency Inverse Document Frequency See Section-2 of Rough work for Text classification .ipynb filefrom sklearn.feature_extraction.text import TfidfTransformer tfidf_transformer = TfidfTransformer() x_train_tfidf = tfidf_transformer.fit_transform(x_train_tf) x_train_tfidf.shapestep4 : Naive Bayes Classifier#here we are using Multinomial Naive Bayes Classifier #sklearn already has inbuilt Multinomial Naive Bayes Classifier package #Using this package we can directly train our model with the #matrix obtained from TfidTransformer from sklearn.naive_bayes import MultinomialNB clf = MultinomialNB().fit(x_train_tfidf,news_train.target) #parameter is data and it's label # see section-3 for the demo that how it is doing # also see my .pdf file of this part.step5 : Result see section 4 of Rough work for Text classification .ipynb filex_test_tf = count_vect.transform(news_test.data) x_test_tf.shape x_test_tfidf = tfidf_transformer.transform(x_test_tf) predicted = clf.predict(x_test_tfidf) from sklearn import metrics from sklearn.metrics import accuracy_score print("Accuracy : ", accuracy_score(news_test.target,predicted)) print(metrics.classification_report(news_test.target,predicted,target_names=news_test.target_names)) metrics.confusion_matrix(news_test.target,predicted)Accuracy : 0.8348868175765646 precision recall f1-score support alt.atheism 0.97 0.60 0.74 319 comp.graphics 0.96 0.89 0.92 389 sci.med 0.97 0.81 0.88 396 soc.religion.christian 0.65 0.99 0.78 398 micro avg 0.83 0.83 0.83 1502 macro avg 0.89 0.82 0.83 1502 weighted avg 0.88 0.83 0.84 1502Using Pipline Line conceptHere we are doing same thing which is done by us in the aboveimport sklearn.datasets as skd categories = ['alt.atheism', 'soc.religion.christian','comp.graphics', 'sci.med'] news_train = skd.load_files('/home/manjeet/Videos/Documentation/ML-algo/Text-classification-using-Naive-Bayes/data/train',categories=categories,encoding='ISO-8859-1') news_test = skd.load_files('/home/manjeet/Videos/Documentation/ML-algo/Text-classification-using-Naive-Bayes/data/test',categories=categories,encoding='ISO-8859-1') from sklearn.pipeline import Pipeline from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.naive_bayes import MultinomialNB text_clf = Pipeline([('vect', TfidfVectorizer()), ('clf', MultinomialNB()) ]) # train the model text_clf.fit(news_train.data, news_train.target) # Predict the test cases predicted = text_clf.predict(news_test.data) from sklearn import metrics from sklearn.metrics import accuracy_score import numpy as np print('Accuracy achieved is ' + str(np.mean(predicted == news_test.target))) print(metrics.classification_report(news_test.target, predicted, target_names=news_test.target_names)), metrics.confusion_matrix(news_test.target, predicted)Input Redshift Cluster Endpoint and UserPlease input your Amazon Redshift Cluster endpoint and existing database userREDSHIFT_ENDPOINT = 'redshift-cluster.xxxxxxxxxx.us-east-1.redshift.amazonaws.com:5439/dev' REDSHIFT_USER="awsuser"Setup Run SQL function using Redshift Data API to get SQL query output directly into pandas dataframeIn this step, we are creating function run_sql, which we will use to get SQL query output directly into pandas dataframe. We will also use this function to run DDL statementsimport boto3 import time import pandas as pd import numpy as np import matplotlib.pyplot as plt session = boto3.session.Session() region = session.region_name def run_sql(sql_text): client = boto3.client("redshift-data") res = client.execute_statement(Database=REDSHIFT_ENDPOINT.split('/')[1], DbUser=REDSHIFT_USER, Sql=sql_text, ClusterIdentifier=REDSHIFT_ENDPOINT.split('.')[0]) query_id = res["Id"] while True: time.sleep(1) status_description = client.describe_statement(Id=query_id) status = status_description["Status"] if status == "FAILED": raise Exception('SQL query failed:' + query_id + ": " + status_description["Error"]) elif status == "FINISHED": if status_description['ResultRows']>0: results = client.get_statement_result(Id=query_id) column_labels = [] for i in range(len(results["ColumnMetadata"])): column_labels.append(results["ColumnMetadata"][i]['label']) records = [] for record in results.get('Records'): records.append([list(rec.values())[0] for rec in record]) df = pd.DataFrame(np.array(records), columns=column_labels) return df else: return query_idCreate User and Grant Permissions - OptionalAs the database adminstrator, you may skip the permissions section. Otherwise, you can create users and grant them permissions with the Principle of Least Privilege in mind. If demouser exists with privilege, please revoke before dropping the user:```sqlrevoke all on schema demo_ml from demouser;```permissions_one_sql = """ DROP USER IF EXISTS demouser; create user demouser with password ''; GRANT CREATE MODEL TO demouser; """ for sql_text in permissions_one_sql.split(";")[:-1]: run_sql(sql_text);Data Preparation Data preparation script to be ran on Amazon Redshift**Note**: Please change `` to your AWS Account Id down in the script belowsetup_script=""" DROP SCHEMA IF EXISTS DEMO_ML CASCADE; CREATE SCHEMA DEMO_ML; DROP TABLE IF EXISTS demo_ml.customer_activity; CREATE TABLE demo_ml.customer_activity ( state varchar(2), account_length int, area_code int, phone varchar(8), intl_plan varchar(3), vMail_plan varchar(3), vMail_message int, day_mins float, day_calls int, day_charge float, total_charge float, eve_mins float, eve_calls int, eve_charge float, night_mins float, night_calls int, night_charge float, intl_mins float, intl_calls int, intl_charge float, cust_serv_calls int, churn varchar(6), record_date date); COPY DEMO_ML.customer_activity FROM 's3://redshift-downloads/redshift-ml/customer_activity/' IAM_ROLE 'arn:aws:iam:::role/RedshiftML' IGNOREHEADER 1 CSV region 'us-east-1'; """Run data preparation script in Amazon Redshiftfor sql_text in setup_script.strip().split(";")[:-1]: run_sql(sql_text);Granting Permissions - OptionalCreate demo userGrant create model permissions to `demouser`permissions_two_sql = """ GRANT SELECT on demo_ml.customer_activity TO demouser; GRANT CREATE, USAGE ON SCHEMA demo_ml TO demouser; """ for sql_text in permissions_two_sql.split(";")[:-1]: run_sql(sql_text);Read SQL output from Pandas Dataframedf = run_sql("SELECT * FROM demo_ml.customer_activity;"); df.head(10) df.describe()Run Create Model statement to create a new ML model with Redshift MLPlease replace `` with your AWS account Idresp = run_sql(""" CREATE MODEL demo_ml.customer_churn_model FROM (SELECT state, area_code, total_charge/account_length AS average_daily_spend, cust_serv_calls/account_length AS average_daily_cases, churn FROM demo_ml.customer_activity WHERE record_date < '2020-01-01' ) TARGET churn FUNCTION predict_customer_churn IAM_ROLE 'arn:aws:iam:::role/RedshiftML' SETTINGS ( S3_BUCKET 'redshiftml-' ) ; """) respCheck the status on your ML model You can check the status of your models by running the `SHOW MODEL` command from your SQL prompt.Continuously check `Model State` and once it has been set to `Ready`, continue to the next step.df = run_sql('SHOW MODEL demo_ml.customer_churn_model;') df.head(10)Evaluate your model performancedf = run_sql(""" WITH infer_data AS ( SELECT area_code || phone accountid, churn, demo_ml.predict_customer_churn( state, area_code, total_charge/account_length , cust_serv_calls/account_length ) AS predicted FROM demo_ml.customer_activity WHERE record_date < '2020-01-01' ) SELECT * FROM infer_data where churn!=predicted; """) dfEvaluationYou can see the F1 value for the example model customer_churn_model in the output of the `SHOW MODEL` command. The F1 amount signifies the statistical measure of the precision and recall of all the classes in the model. The value ranges between 0–1; the higher the score, the better the accuracy of the model.df = run_sql('SHOW MODEL demo_ml.customer_churn_model;') df.head(10)Invoke your ML model for inferenceYou can use your SQL function to apply the ML model to your data in queries, reports, and dashboards. For example, you can run the predict_customer_churn SQL function on new customer data in Amazon Redshift regularly to predict customers at risk of churning and feed this information to sales and marketing teams so they can take preemptive actions, such as sending these customers an offer designed to retain them.For example, you can run the following query to predict which customers in area code 408 might churn and the output shows the account ID and whether the account is predicted to remain active:df = run_sql(""" SELECT area_code || phone accountid, demo_ml.predict_customer_churn( state, area_code, total_charge/account_length , cust_serv_calls/account_length ) AS "predictedActive" FROM demo_ml.customer_activity WHERE area_code='408' and record_date > '2020-01-01'; """) dfGranting Permissions - OptionalThe following code grants the EXECUTE privilege to users such as your marketing_analyst_grpdf = run_sql('GRANT EXECUTE demo_ml.predict_customer_churn TO marketing_analyst_grp')Cost Control If the `SELECT` query of `CREATE MODEL` produces 10,000 records for training and each record has five columns, the number of cells in the training data is 50,000. You can control the training cost by setting the `MAX_CELLS`.Please replace `` with your AWS account Iddf = run_sql(""" CREATE MODEL demo_ml.customer_churn_model FROM (SELECT state, area_code, total_charge/account_length AS average_daily_spend, cust_serv_calls/account_length AS average_daily_cases, churn FROM demo_ml.customer_activity WHERE account_length > 120 ) TARGET churn FUNCTION predict_customer_churn IAM_ROLE 'arn:aws:iam:::role/RedshiftML' SETTINGS ( S3_BUCKET 'redshiftml_', MAX_CELLS 10000 ) ; """)build networkdef build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None): """ Builds a feedforward neural network arguments: input_placeholder: placeholder variable for the state (batch_size, input_size) output_size: size of the output layer scope: variable scope of the network n_layers: number of hidden layers size: dimension of the hidden layer activation: activation of the hidden layers output_activation: activation of the ouput layers returns: output placeholder of the network (the result of a forward pass) Hint: use tf.layers.dense """ # YOUR CODE HERE fwd = input_placeholder for _ in range(n_layers): fwd = tf.layers.dense(fwd, size, activation=activation) output_placeholder = tf.layers.dense(fwd, output_size, activation=output_activation) return output_placeholder sess = tf_reset() input_ph = tf.placeholder(dtype=tf.float32, shape = [None, 2]) output_ph = build_mlp(input_ph, 3, None, 2, 5, tf.nn.relu, tf.nn.softmax) sess.run(tf.global_variables_initializer()) output_run = sess.run(output_ph, feed_dict={input_ph: np.array([[2,3], [4,5]])}) print(output_run)[[0.17747754 0.5960405 0.22648197] [0.07801642 0.7966605 0.12532304]]matrix broadcastingsess = tf_reset() b = tf.constant([[1,1,1], [1,1,1]], dtype = tf.float32) a = tf.constant([[2],[3]], dtype = tf.float32) c = b * a print(sess.run(c))[[2. 2. 2.] [3. 3. 3.]]random samplingsess = tf_reset() # logits = tf.constant([[0,0,10],[10,0,0]], dtype = tf.float32) logits = tf.constant([[0,0,10]], dtype = tf.float32) samples = tf.random.categorical(logits, 1) # samples = tf.reshape(samples, [1,-1]) print(sess.run(samples)) import tensorflow_probability as tfp labels = tf.constant([0, 0], dtype = tf.int32) dist = tfp.distributions.Categorical(logits) print(sess.run(dist.log_prob(labels))) print(sess.run(dist.prob(labels))) 1.0 / (np.exp(10)+2) sess = tf_reset() mu = tf.constant([[0, 0, 0], [1 ,2 ,3 ]], dtype = tf.float32) std = tf.constant([.1, .1, 100], dtype = tf.float32) samples = tf.random.normal(tf.shape(mu)) samples = mu + std * samples print(sess.run(samples)) import tensorflow_probability as tfp dist = tfp.distributions.MultivariateNormalDiag(loc = mu, scale_diag = std) log_prob = dist.log_prob(mu) print(sess.run(log_prob)) mult = log_prob * tf.constant([1,2], dtype = tf.float32) print(sess.run(mult)) mult1 = tf.multiply(log_prob, tf.constant([1,2], dtype = tf.float32)) print(sess.run(mult1)) sess = tf_reset() sy_logstd = tf.get_variable(name='log_std', shape=[5], initializer=tf.constant_initializer(1.0)) sess.run(tf.global_variables_initializer()) print(sess.run(sy_logstd)) dist = tfp.distributions.Normal(0, 1) print(sess.run(dist.log_prob(0))*3)-2.756815552711487reshapexx = tf.reshape(tf.constant([[3]], dtype=tf.float32), [-1]) print(sess.run(xx))[3.]numpyfrom itertools import accumulate import operator ar = np.array([2,3,4, 5]) list(accumulate(ar[::-1], lambda bal, x: bal * 0.5 + x))[::-1] np.array(list(accumulate(np.ones(10), lambda res, _: res * 0.5))) from functools import reduce reduce(lambda bal, x: bal * 0.5 + x, ar[::-1])Open AI gymimport gym env = gym.make('CartPole-v0') env.reset() for _ in range(1000): env.render() env.step(env.action_space.sample()) # take a random action env.close() import gym env = gym.make('CartPole-v0') print(env.action_space.sample()) print(env.reset()) env.step(1)[0] env.step(1) env = gym.make('LunarLanderContinuous-v2') env.action_space.sample() env.action_space.contains(np.array([ 1.3631266 , -0.2566176])) env.action_space.contains(np.array([0.9, -0.9])) env.action_spaceConvergence Resultsres2=z_top.groupby(["w_nc","trial"]).head(10) res2.to_csv("top_norms_10_result_exp5_w_nc.csv") #res2 z_top.groupby(["w_nc","trial"]).head(5).to_csv("top_norms_every_exp.csv") z_top.loc[z_top.w_nc==0.25].groupby(["w_nc","trial"]).head(2) import seaborn as sns fig, axs = plt.subplots(5,5,figsize=(7,10),dpi=200): z_top.loc[z_top.w_nc==0.0] sns.displot(grp.reindex(),x="trial", y=('log_posterior', 'mean'), hue="trial",\ kind="hist",palette=sns.color_palette("husl", 3),dpi=300) import seaborn as sns import matplotlib.pyplot as plt sns.displot(z_top.loc[z_top.w_nc.isin([0.25,0.05])].reindex(),x="w_nc", y=('log_posterior', 'mean'), hue="trial",\ kind="hist",palette=sns.color_palette("husl", 3)) sns_plot=sns.displot(data=z_top.loc[z_top.w_nc.isin([0.05,0.35])],\ y=('log_posterior', 'mean'),row="w_nc",col="trial", kind="kde") sns_plot.savefig("conv_arg.jpg",dpi=300) import seaborn as sns ax=sns.displot(z_top.loc[z_top.w_nc<0.25].reindex(),x="trial", y=('log_posterior', 'mean'), hue="trial",\ kind="hist",palette=sns.color_palette("husl", 3)) z_top.reindex().columns loc[z_top.w_nc==0.0]Collate convergence!touch "exp5_convergence.txt" for exp_path in exp_paths: #w_nc,trial=tuple(map(lambda x: float(x.split("=")[-1]),exp_path.split(","))) !echo {exp_path} >> exp5_convergence.txt !cat {base_path+exp_path+"/conv_test_nc.txt"} >> exp5_convergence.txt !echo -e "\n ---" >> exp5_convergence.txtCp cnc_movement imagesfor exp_path in exp_paths: !cp {base_path+exp_path+"/cnc_movement.jpg"} ../../ICJAI21/exp5/{exp_path+",cnc_movmt.jpg"}Calling a URL and Saving the Data in PythonFor this part of the tutorial, you will use the API url to get the json data and formatting it for a pandas dataframe. Pandas is a python library that manages data in a similar format to an excel sheet. Instead of the dictionary format that JSON files are stored as, this forms data into a table that can be actively queried in a fashion similar to SQL.# importing libraries import pandas as pd import matplotlib.pyplot as plt import requests # headers are often used to gain access to an otherwise locked API HEADERS = { 'Accept-Encoding': 'gzip, deflate, sdch', 'Accept-Language': 'en-US,en;q=0.8', 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36', 'Accept': 'application/json, text/plain, */*', 'x-nba-stats-token': 'true', 'Referer': 'http://stats.nba.com/player/', 'Connection': 'keep-alive', 'x-nba-stats-origin': 'stats' } # define function to be used def get_data(url): response = requests.get(url, headers=HEADERS) while response.status_code != 200: response = requests.get(url) # explore the response in developers tools to find the proper arrangement of your json response headers = response.json()['resultSets'][0]['headers'] data = response.json()['resultSets'][0]['rowSet'] data = pd.DataFrame(data, columns=headers) return data # define the url url = 'http://stats.nba.com/stats/playerdashboardbyyearoveryear?DateFrom=&DateTo=&GameSegment=&LastNGames=0&LeagueID=00&Location=&MeasureType=Base&Month=0&OpponentTeamID=0&Outcome=&PORound=0&PaceAdjust=N&PerMode=PerGame&Period=0&PlayerID=203954&PlusMinus=N&Rank=N&Season=2017-18&SeasonSegment=&SeasonType=Regular+Season&ShotClockRange=&Split=yoy&VsConference=&VsDivision=' # get the pandas data frame data = get_data(url) # print rows of information with column names # to only take the first five rows of a frame, use print(data.head(5)) print(data.head()) # once you have the data, you can save it simply to a csv # to remove the index from the frame, indicate so as an argument data.to_csv('player.csv', index=False)Unit 3 Chapter 3 --- Naive Bayes Classifiers ---Naive Bayes Classifiers are **Bayesian** classifiers that make an assumption that the features are **naive** i.e. the features(here words) are independent and have no relation between each other. For example, consider the sentence,'He is my friend. His name is John.'A naive classifier sees no relation between, **He**, **His**, & **John** (even though there is). --- Bag of words Before we work on any algorithm, we need to convert our text data to numbers.To convert text into numbers, we need a way to represent each word in our data.In the **bag of words** model, we use frequency as a way of representing text as numbers.There are new & better ways to convert our text data into numbers, like **word2vec**.We will first take a look at the ```bag of words``` model. --- A bag of words model, as the name suggests, stores words into an unordered **set**.We use **set** because duplicate words are not allowed, as we are storing their frequency as well. Bag of words in Python> [Code Courtesy](https://www.geeksforgeeks.org/bag-of-words-bow-model-in-nlp/) - Let's take a sample paragraph > Beans. I was trying to explain to somebody as we were flying in, that’s corn. That’s beans. And they were very impressed at my agricultural knowledge. Please give it up for Amaury once again for that outstanding introduction. I have a bunch of good friends here today, including somebody who I served with, who is one of the finest senators in the country, and we’re lucky to have him, your Senator, is here. I also noticed, by the way, former Governor Edgar here, who I haven’t seen in a long time, and somehow he has not aged and I have. And it’s great to see you, Governor. I want to thank President Killeen and everybody at the U of I System for making it possible for me to be here today. And I am deeply honored at the Paul Douglas Award that is being given to me. He is somebody who set the path for so much outstanding public service here in Illinois. Now, I want to start by addressing the elephant in the room. I know people are still wondering why I didn’t speak at the commencement.dataset = 'Beans. I was trying to explain to somebody as we were flying in, that’s corn. That’s beans. And they were very impressed at my agricultural knowledge. Please give it up for Amaury once again for that outstanding introduction. I have a bunch of good friends here today, including somebody who I served with, who is one of the finest senators in the country, and we’re lucky to have him, your Senator, is here. I also noticed, by the way, former Governor Edgar here, who I haven’t seen in a long time, and somehow he has not aged and I have. And it’s great to see you, Governor. I want to thank President Killeen and everybody at the U of I System for making it possible for me to be here today. And I am deeply honored at the Paul Douglas Award that is being given to me. He is somebody who set the path for so much outstanding public service here in Illinois. Now, I want to start by addressing the elephant in the room. I know people are still wondering why I didn’t speak at the commencement.' datasetBasic preprocessing > We will first break the sentences, then convert text to lower case, and remove punctuation and non wordsfrom nltk.tokenize import sent_tokenize import re sentences = sent_tokenize(dataset) sentences sentences[4] # Now the dataset has been broken into sentences.> Converting to lower case.But why is it neccessary to convert to lower case?- If we don't convert to lower case then, any two words like ```Going``` and ```going``` will be treated differently, resulting in a large feature space.lower_cased_data = [sentence.lower() for sentence in sentences] lower_cased_data[4]> Removing non-word characterscleaned_data = [re.sub(r'\W+',' ',sentence) for sentence in lower_cased_data] cleaned_data[4]> Removing punctuations if anycleaned_data = [re.sub(r'\s+',' ',sentence) for sentence in cleaned_data] cleaned_data[4]> Creating bag of wordsword2count = {} for data in cleaned_data: words = nltk.word_tokenize(data) for word in words: if word not in word2count.keys(): word2count[word] = 1 else: word2count[word] += 1Now we have a bag of words that has stored frequency with itword2countTime Series Analysis Time series is a collection of data points collected at constant time intervals, such as the tempeture of london city centre at 1pm everyday or the closing value of a stock. These are analysed to determine the long term trend so as to forecast the future. In the cell below is the import that will be required in the following tutorialimport pandas as pd import numpy as np import matplotlib.pyplot as plt %matplotlib inline from matplotlib.pylab import rcParams rcParams['figure.figsize'] = 15, 6 from statsmodels.tsa.stattools import adfuller from statsmodels.tsa.arima_model import ARIMA from datetime import datetimeWe will now read in the Airpassengers csv that has been provided in the folder and examine the head of the dataframe.data = pd.read_csv('AirPassengers.csv') print data.head() print data.dtypesMonth #Passengers 0 1949-01 112 1 1949-02 118 2 1949-03 132 3 1949-04 129 4 1949-05 121 Month object #Passengers int64 dtype: objectWe can see in the dataframe above that we have two columns, one represents the month of passengers flying and the second represents how many passengers flew during that month. When we look at the data type of the month column we see that it is being read in as an object. To read the month column as a time series we have to pass parameters that will fromat the colum into a datetime datatypedateparse = lambda dates: pd.datetime.strptime(dates, '%Y-%m') data = pd.read_csv('AirPassengers.csv', parse_dates=['Month'], index_col='Month',date_parser=dateparse) print data.head()#Passengers Month 1949-01-01 112 1949-02-01 118 1949-03-01 132 1949-04-01 129 1949-05-01 121The parameters above have been broken down here and purpose explained.1. parse_dates: This specifies the column which contains the date-time information. As we say above, the column name is ‘Month’.2. index_col: A key idea behind using Pandas for TS data is that the index has to be the variable depicting date-time information. So this argument tells pandas to use the ‘Month’ column as index.3. date_parser: This specifies a function which converts an input string into datetime variable. Be default Pandas reads data in format ‘YYYY-MM-DD HH:MM:SS’. If the data is not in this format, the format has to be manually defined. Something similar to the dataparse function defined here can be used for this purpose. We will then convert the dataframe into a Series object to make it easier for us to index. This is simply making into a one dimensional array instead of the 2D array we had with the dataframets = data["#Passengers"] ts.head(10)To get the value in the series object, this can be done in two ways: One by using the string constant of the index and the second method is to import the datetime function from the datetime library.#subset by string constatnt of the index ts['1949-01-01'] #using the datetime function from datetime import datetime ts[datetime(1949,1,1)] #1. Specify the entire range: ts['1949-01-01':'1949-05-01'] #2. Use ':' if one of the indices is at ends: ts[:'1949-05-01']Stationarity of a Time Series A time series has stationarity if a shift in time doesn’t cause a change in the shape of the distribution. Basic properties of the distribution like the mean , variance and covariance are constant over time. It is important as most models make the assumption that the time seies is stationary. The mean of the series should not be a function of time rather should be a constant. The image below has the left hand graph satisfying the condition whereas the graph in red has a time dependent mean. ![title](Mean_nonstationary.png) The variance of the series should not a be a function of time. Following graph depicts what is and what is not a stationary series. ![title](Var_nonstationary.png) The covariance of the i th term and the (i + m) th term should not be a function of time. In the following graph, you will notice the spread becomes closer as the time increases. Hence, the covariance is not constant with time for the ‘red series’. ![title](Cov_nonstationary.png) Testing Stationarity The first step in seeing whether are data is stationary is to visualize the data, since we had previously turned the datframe into a series this is very easy to do and we can simply plot the series.plt.plot(ts)From the graph above it is clear that there is an increasing trend, however in other datasets this may not be so clear to infer from the graph. We look at more formal methods of looking at testing stationarity which include: Plotting Rolling Statistic: we can plot the moving average or variance and see if it varies with time Dickey-Fuller Test: This is one of the statistical test of stationary. The results are composed of Test statistic and a critical value. If test statistic is less than critical value we can say that the time is stationary.from statsmodels.tsa.stattools import adfuller def test_stationarity(timeseries): #Determing rolling statistics rolmean = timeseries.rolling(window=12).mean() rolstd = timeseries.rolling(window=12).std() #Plot rolling statistics: orig = plt.plot(timeseries, color='blue',label='Original') mean = plt.plot(rolmean, color='red', label='Rolling Mean') std = plt.plot(rolstd, color='black', label = 'Rolling Std') plt.legend(loc='best') plt.title('Rolling Mean & Standard Deviation') plt.show(block=False) #Perform Dickey-Fuller test: print 'Results of Dickey-Fuller Test:' dftest = adfuller(timeseries, autolag='AIC') dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used']) for key,value in dftest[4].items(): dfoutput['Critical Value (%s)'%key] = value print dfoutput test_stationarity(ts)Making the Data Stationary In most real world situations the data is unlikely to be stationary from the outset, however there have been techniques to wrangle that data to be close to stationary. Factors that make a time series non-stationary are trend and seasonality.Trend: Varying mean over time. The price of Freddos increasing over the previous yearsSeasonality: A spike in retail close to holiday times such as christmas. To try and eliminate trend we will use transformation functions on the data the one that we will try first is a log transformation as it will penalise higher values.ts_log = np.log(ts) plt.plot(ts_log)Moving AverageIn this approach, we take average of ‘k’ consecutive values depending on the frequency of time series. Here we can take the average over the past 1 year, i.e. last 12 values. Pandas has specific functions defined for determining rolling statistics.moving_avg = ts_log.rolling(window=12).mean() plt.plot(ts_log) plt.plot(moving_avg, color='red')The red line shows the rolling mean. Lets subtract this from the original series. Note that since we are taking average of last 12 values, rolling mean is not defined for first 11 values. This can be observed as:ts_log_moving_avg_diff = ts_log - moving_avg ts_log_moving_avg_diff.head(12)The first 11 values can be dropped and then we will check the stationarityts_log_moving_avg_diff.dropna(inplace=True) test_stationarity(ts_log_moving_avg_diff)This looks like a much better series. The rolling values appear to be varying slightly but there is no specific trend. Also, the test statistic is smaller than the 5% critical values so we can say with 95% confidence that this is a stationary series. Differencing To reduce the seasonality,in this approach we take the differnce of an observation at a paticular instant with the instant before it(t - (t-1)).ts_log_diff = ts_log - ts_log.shift() plt.plot(ts_log_diff)We will now check the stationairty of the Residuals, which is again what is left after trend and sesonality have been modelled seperatelyts_log_diff.dropna(inplace=True) test_stationarity(ts_log_diff)We can see that the mean and std variations have small variations with time. Also, the Dickey-Fuller test statistic is less than the 10% critical value, thus the TS is stationary with 90% confidence Forecasting a Time Series We will be using an ARIMA model, which takes the parameters: timeseries, p,d and q, these are explained in the theory notebook as well as an explanation of what an ARIMA model is. To find the parameters p and q we perform the following methods: Autocorrelation function and a Partial Autocorrelation Function.#ACF and PACF plots: from statsmodels.tsa.stattools import acf, pacf lag_acf = acf(ts_log_diff, nlags=20) lag_pacf = pacf(ts_log_diff, nlags=20, method='ols') #Plot ACF: plt.subplot(121) plt.plot(lag_acf) plt.axhline(y=0,linestyle='--',color='gray') plt.axhline(y=-1.96/np.sqrt(len(ts_log_diff)),linestyle='--',color='gray') plt.axhline(y=1.96/np.sqrt(len(ts_log_diff)),linestyle='--',color='gray') plt.title('Autocorrelation Function') #Plot PACF: plt.subplot(122) plt.plot(lag_pacf) plt.axhline(y=0,linestyle='--',color='gray') plt.axhline(y=-1.96/np.sqrt(len(ts_log_diff)),linestyle='--',color='gray') plt.axhline(y=1.96/np.sqrt(len(ts_log_diff)),linestyle='--',color='gray') plt.title('Partial Autocorrelation Function') plt.tight_layout()The dotted lines on the graph represent the confidence interval, these are used to determine P and Q.q- We get from the Autocorrelation Function graph where the line crosses the upper confidence interval for the first time which in this case is 2.p- We get from the Partial Autocorrelation Function graph where it crosses the upper confidence interval for the first time which is also 2. Modelfrom statsmodels.tsa.arima_model import ARIMA model = ARIMA(ts_log, order=(2, 1, 2)) results_ARIMA = model.fit(disp=-1) plt.plot(ts_log_diff) plt.plot(results_ARIMA.fittedvalues, color='red') plt.title('RSS: %.4f'% sum((results_ARIMA.fittedvalues-ts_log_diff)**2))/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/statsmodels/tsa/base/tsa_model.py:171: ValueWarning: No frequency information was provided, so inferred frequency MS will be used. % freq, ValueWarning) /Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/statsmodels/tsa/base/tsa_model.py:171: ValueWarning: No frequency information was provided, so inferred frequency MS will be used. % freq, ValueWarning)Now that we have predicted results we will have to rescale them back to the original scale to compare to the original timer series, as we previously transformed them using the logarithim function.predictions_ARIMA_diff = pd.Series(results_ARIMA.fittedvalues, copy=True) print predictions_ARIMA_diff.head()Month 1949-02-01 0.009580 1949-03-01 0.017491 1949-04-01 0.027670 1949-05-01 -0.004521 1949-06-01 -0.023890 dtype: float64If you notice the value of 1949-01-01 is missing this is because we took lag of one.The way to convert the differencing to log scale is to add these differences consecutively to the base number. An easy way to do it is to first determine the cumulative sum at index and then add it to the base number.predictions_ARIMA_diff_cumsum = predictions_ARIMA_diff.cumsum() print predictions_ARIMA_diff_cumsum.head()Month 1949-02-01 0.009580 1949-03-01 0.027071 1949-04-01 0.054742 1949-05-01 0.050221 1949-06-01 0.026331 dtype: float64Here the first element is base number itself and from thereon the values cumulatively added. Last step is to take the exponent and compare with the original series.predictions_ARIMA_log = pd.Series(ts_log.ix[0], index=ts_log.index) predictions_ARIMA_log = predictions_ARIMA_log.add(predictions_ARIMA_diff_cumsum,fill_value=0) predictions_ARIMA_log.head()/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/ipykernel_launcher.py:1: DeprecationWarning: .ix is deprecated. Please use .loc for label based indexing or .iloc for positional indexing See the documentation here: http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated """Entry point for launching an IPython kernel.We will now plot our predictions against the original time series in its original scale.predictions_ARIMA = np.exp(predictions_ARIMA_log) plt.plot(ts) plt.plot(predictions_ARIMA) plt.title('RMSE: %.4f'% np.sqrt(sum((predictions_ARIMA-ts)**2)/len(ts)))Using ticdat to convert data between different file formatsHere I'm showing how I convert between json format and xlsx format. The reverse conversion would be similar.This notebook needs `tts_netflow_b` in the Python path, and `test_tts_netflow_b` as a subdirectory of the current directory. If you run it directly from the `notebooks` directory cloned from GitHub it won't find `test_tts_netflow_b`. I prefer to use symbolic links for such things.import tts_netflow_bSince we are demonstrating `tts_netflow_b` as a dynamic work in progress, lets record the version.tts_netflow_b.__version__ dat = tts_netflow_b.input_schema.json.create_pan_dat("test_tts_netflow_b/data/sample_data.json") dat tts_netflow_b.input_schema.xls.write_file(dat, "netflow_flows_figure_5.xlsx")Robo-Geek GP Project Part 1![alt text](https://github.com/robogeekcanada/GP_self_driving_car/blob/master/Report%20images/RGlogo.jpg?raw=true \"Logo Title Text 1\") Project ObjectiveIn Part 1 the objective is to grab the inputs from the screen as images. The input images will be then converted to data that will be stored in a csv file. Theses inputs will be used to train neural network.![alt text](https://github.com/robogeekcanada/GP_self_driving_car/blob/master/Report%20images/cockpit_panel1.JPG?raw=true \"Logo Title Text 1\")![alt text](https://github.com/robogeekcanada/GP_self_driving_car/blob/master/Report%20images/cockpit_panel2.JPG?raw=true \"Logo Title Text 1\") Once the image is captured, we need a strategy to convert the image into useful input. The table below shows potential strategy to make these conversions. As we see the impact to the processing speed (FPS), we may change these strategies. Also different members of the project can work on optimizing each function. ![alt text](https://github.com/robogeekcanada/GP_self_driving_car/blob/master/Report%20images/strategy_table.JPG?raw=true \"Logo Title Text 1\") Acknowledgment:The grabscreen function was developed by Sentdex, please refer to the github repository for more details:https://github.com/Sentdex/pygta5Robo-Geek is always grateful to work with the open-source community. This project will be posted in our repository as well.Standard OpenCV librariesimport numpy as np import cv2 import time from grabscreen import grab_screenWith a bit of math, we can use the following grid to calculate the position of all the desired controls. Each function was created for a specific ROI (region of interest).![alt text](https://github.com/robogeekcanada/GP_self_driving_car/blob/master/Report%20images/cockpit_grid.JPG?raw=true \"Logo Title Text 1\")def capture_full_screen(): captured_screen = grab_screen(region=(0,27,640,425)) rgb_full_screen = cv2.cvtColor(captured_screen, cv2.COLOR_BGR2RGB) return rgb_full_screen #drivingwindow def capture_drivingwindow(): captured_screen = grab_screen(region=(0,140,640,245)) rgb_drivingwindow = cv2.cvtColor(captured_screen, cv2.COLOR_BGR2RGB) return rgb_drivingwindow #race map def capture_map(): captured_screen = grab_screen(region=(0,27,170,100)) rgb_map = cv2.cvtColor(captured_screen, cv2.COLOR_BGR2RGB) return rgb_map #scoreboard def capture_scoreboard(): captured_screen = grab_screen(region=(490,31,630,88)) rgb_scoreboard = cv2.cvtColor(captured_screen, cv2.COLOR_BGR2RGB) return rgb_scoreboard #speedometer def capture_speedometer(): captured_screen = grab_screen(region=(205,250,255,275)) rgb_speedometer = cv2.cvtColor(captured_screen, cv2.COLOR_BGR2RGB) return rgb_speedometer #health meter def capture_healthmeter(): captured_screen = grab_screen(region=(268,250,435,270)) rgb_healthmeter = cv2.cvtColor(captured_screen, cv2.COLOR_BGR2RGB) return rgb_healthmeter #RPM gauge def capture_RPMgauge(): captured_screen = grab_screen(region=(255,295,385,395)) rgb_RPMgauge = cv2.cvtColor(captured_screen, cv2.COLOR_BGR2RGB) return rgb_RPMgauge #Steering wheel def capture_steeringwheel(): captured_screen = grab_screen(region=(180,270,460,420)) rgb_steeringwheel = cv2.cvtColor(captured_screen, cv2.COLOR_BGR2RGB) return rgb_steeringwheel #Gear box to shift gears def capture_gears(): captured_screen = grab_screen(region=(525,325,610,410)) rgb_gears = cv2.cvtColor(captured_screen, cv2.COLOR_BGR2RGB) return rgb_gears #Left rearview mirror def capture_leftRvm(): captured_screen = grab_screen(region=(0,250,130,300)) rgb_leftRvm = cv2.cvtColor(captured_screen, cv2.COLOR_BGR2RGB) return rgb_leftRvm #Right rearview mirror def capture_rightRvm(): captured_screen = grab_screen(region=(510,250,(510+130),300)) rgb_rightRvm = cv2.cvtColor(captured_screen, cv2.COLOR_BGR2RGB) return rgb_rightRvmFinally a simple continuous loop to read the images and show them to verify the ROI are captured properly. It's important to note that this programs reads the input from the top left corner of the screen assuming the DOS simulator running GP game is on.More information about running the simulator can be found in the introduction of the project. Simply uncomment the function that needs to be tested.def test_capture_functions(): while True: #Capture game window and convert from BGR to RGB full_screen = capture_full_screen() drivingwindow = capture_drivingwindow() map_image = capture_map() scoreboard = capture_scoreboard() speedometer = capture_speedometer() healthmeter = capture_healthmeter() RPMgauge = capture_RPMgauge() steeringwheel = capture_steeringwheel() gears = capture_gears() leftRvm = capture_leftRvm() rightRvm = capture_rightRvm() #cv2.imshow('full screen',full_screen) cv2.imshow('driving window',drivingwindow) #cv2.imshow('map',map_image) #cv2.imshow('scoreboard',scoreboard) #cv2.imshow('speedometer',speedometer) #cv2.imshow('healthmeter',healthmeter) #cv2.imshow('RPMgauge',RPMgauge) cv2.imshow('steering wheel',steeringwheel) #cv2.imshow('gears',gears) #cv2.imshow('leftRvm',leftRvm) #cv2.imshow('rightRvm',rightRvm) if cv2.waitKey(25) & 0xFF == ord('q'): cv2.destroyAllWindows() break test_capture_functions()For more Landlab tutorials, click here: https://landlab.readthedocs.io/en/latest/user_guide/tutorials.html **Application of the flow__distance utility on a Sicilian basin** This notebook illustrates how to run the flow**__**distance utility on a digital elevation model (DEM) that represents a real basin in Sicily. First, a watershed will be extracted from the input DEM by using the watershed utility. Then, the distances from each node to the watershed's outlet will be obtained with the flow**__**distance utility. Flow is routed using the D8 algorithm. First, import what we'll need:from landlab.io import read_esri_ascii from landlab.components import FlowAccumulator from landlab.plot import imshow_grid from matplotlib.pyplot import figure %matplotlib inline from landlab.utils import watershed import numpy as np from landlab.utils.flow__distance import calculate_flow__distanceImport a square DEM that includes the watershed:(mg, z) = read_esri_ascii('nocella_resampled.txt', name='topographic__elevation')Run the FlowAccumulator and the DepressionFinderAndRouter components to find depressions, to route the flow across them and to calculate flow direction and drainage area:fr = FlowAccumulator(mg, flow_director='D8', depression_finder='DepressionFinderAndRouter') fr.run_one_step()Set the id of the outlet. The value indicated here is the node id of the entire watershed's outlet:outlet_id = 15324Run the watershed utility and show the watershed mask:ws_mask = watershed.get_watershed_mask(mg, outlet_id) figure() imshow_grid(mg, ws_mask, allow_colorbar=False)Run the flow**__**distance utility:flow__distance = calculate_flow__distance(mg, add_to_grid=True, clobber=True)Mask the flow**__**distance to the watershed mask. This operation has to be done because the flow**__**distance utility is applied to the entire grid that contains other streams not connected with our stream network and, for this reason, not belonging to our watershed.flow_distance = np.zeros(mg.number_of_nodes) flow_distance[ws_mask] = flow__distance[ws_mask] - flow__distance[outlet_id]Add the flow**__**distance field to the grid and show the spatial distribution of the distances from each node to the watershed's outlet:mg.add_field('flow_distance', flow_distance, at='node', clobber=True) figure() imshow_grid(mg, mg.at_node['flow_distance'], colorbar_label='flow distance (m)')函数- 函数可以用来定义可重复代码,组织和简化- 一般来说一个函数在实际开发中为一个小功能- 一个类为一个大功能- 同样函数的长度不要超过一屏 定义一个函数def function_name(list of parameters): do something![](../Photo/69.png)- 以前使用的random 或者range 或者print.. 其实都是函数或者类 调用一个函数- functionName()- "()" 就代表调用import os os.system('say 你是谁???') def fun_name(): print("还有谁???") fun_name() #()代表调用一个函数 def fun_jiou(): ''' 这是小小写的判断奇数还是偶数 ''' a = eval(input(">>>")) if a%2==0: print("是偶数") else: print("奇数") fun_jiou() def y(x): return x**2 y(100) def fun_sushu(): number = eval(input(">>")) if number > 1: for i in range(2,number): if (number % i)==0: print(str(number),"不是素数") break else: print(str(number),"是素数") else: print(str(number),"不是素数") fun_sushu()>>9999 9999 不是素数![](../Photo/70.png) 带返回值和不带返回值的函数- return 返回的内容- return 返回多个值- 一般情况下,在多个函数协同完成一个功能的时候,那么将会有返回值 ![](../Photo/71.png)- 当然也可以自定义返回None EP:![](../Photo/72.png) 类型和关键字参数- 普通参数- 多个参数- 默认值参数- 不定长参数def san(num): # num = eval(input(">>")) return num**3 def liang(num): # mun = eval(input(">>")) return num**2 def input_(): num = eval(input(">>")) res3 = san(num) res2 = liang(num) print(res3 - res2) input_()>>10 900普通参数acount = '1172304538' password = '' def login(acount_,password_): if acount == acount_ and password == password_: print("登陆成功") else: print("登录失败") login('1172304538','') def yanzheng(): print("是否七天内免登陆?y/n") res = input(">>") a = input("请输入账户:") b = input("请输入密码:") if res == 'y': login(a,b)多个参数def test(*agrs,**kwagrs): print(agrs) print(kwagrs) test(11,22,33,s=100,a=200)(11, 22, 33) {'s': 100, 'a': 200}默认值参数 强制命名 不定长参数- \*args> - 不定长,来多少装多少,不装也是可以的 - 返回的数据类型是元组 - args 名字是可以修改的,只是我们约定俗成的是args- \**kwargs > - 返回的字典 - 输入的一定要是表达式(键值对)- name,\*args,name2,\**kwargs 使用参数名 变量的作用域- 局部变量 local- 全局变量 global- globals 函数返回一个全局变量的字典,包括所有导入的变量- locals() 函数会以字典类型返回当前位置的全部局部变量。 注意:- global :在进行赋值操作的时候需要声明- 官方解释:This is because when you make an assignment to a variable in a scope, that variable becomes local to that scope and shadows any similarly named variable in the outer scope.- ![](../Photo/73.png) Homework- 1![](../Photo/74.png)def getPentagonalNumber(n): i=1 for n in range(1,n): if i <100: m=(n*(3*n-1))/2 i+=1 print(int(m),end=" ") if i%10==0: print("\n") else: break getPentagonalNumber(100)1 5 12 22 35 51 70 92 117 145 176 210 247 287 330 376 425 477 532 590 651 715 782 852 925 1001 1080 1162 1247 1335 1426 1520 1617 1717 1820 1926 2035 2147 2262 2380 2501 2625 2752 2882 3015 3151 3290 3432 3577 3725 3876 4030 4187 4347 4510 4676 4845 5017 5192 5370 5551 5735 5922 6112 6305 6501 6700 6902 7107 7315 7526 7740 7957 8177 8400 8626 8855 9087 9322 9560 9801 10045 10292 10542 10795 11051 11310 11572 11837 12105 12376 12650 12927 13207 13490 13776 14065 14357 14652- 2 ![](../Photo/75.png)def sumDigits(n): gewei = n%10 shiwei = (n//10)%10 baiwei = n//100 m = gewei + shiwei + baiwei print(str(m)+"(",str(baiwei)+"+"+str(shiwei)+"+"+str(gewei),")") sumDigits(234)9( 2+3+4 )- 3![](../Photo/76.png)def ddd(a,b,c): if a>b: a,b=b,a if b>c: b,c=c,b if a>b: a,b=b,a return a,b,c ddd(3,5,1) def displaySortedNumbers(): a,b,c=eval(input("Enter three numbers:")) d = [a,b,c] d.sort() print("The sorted numbers are:",d) displaySortedNumbers() displaySortedNumbers()Enter three numbers:3,2.4,5 The sorted numbers are: [2.4, 3, 5]- 4![](../Photo/77.png) - 5![](../Photo/78.png)def printChars(ch1,ch2,numberPerLine):- 6![](../Photo/79.png) - 7![](../Photo/80.png)def distance(x1,y1,x2,y2): import math d=math.sqrt(math.pow(x1-x2,2)+math.pow(y1-y2,2)) print(d) distance(3,5,2,1)4.123105625617661- 8![](../Photo/81.png)def msh(): if p<=31: for i in range(2,7):Ice Cream Sundae - Ordering Menu You are expected to build an interactive application to order ice cream Sundays in an ice-cream parlour.You are expected to use the concepts you learnt from the Object-Oriented programming session. The complete building process is divided into questions given below after you solve all the questions you will the complete ordering application.Let's get started!! Question Declare a class "ice_cream". It needs to have the following constants and instance methods defined. 1. Radius of a small scoop (r_small = 1.5)2. Radius of large scoop (r_large = 2.5)3. Value of pie (pi = 3.14)4. An instance method "flavour" - it should print enter your flavour.Given below is the example of a sample class and its methods and variables for reference:class chocolate: chocolate_length=10 chocolate_breadth=2 def area(self): print("Enter your favourite chocolate")# Declare class here: class ice_cream: # Declare class variables: r_small = 1.5 r_large = 2.5 pi = 3.14 # Declare instance methods: def flavour(self): print("Enter your flavour") # Call the class: order = ice_cream() order.flavour()Enter your flavourQuestionModify the class "ice_cream" to add a method which can calculate the cost of the Ice cream based on its size. Cost of ice cream is 0.5$ per unit volume. Take the input from the user about what size of ice cream scoops they want small or large. Based on that calculate the volume of the scoop and use the volume and the cost per volume to calculate the cost of ice cream. Hint: Declare a class similar to example above. Add new function that calculates the cost depending on the scope size. Use If-else statement to calculate ice-cream cost.class ice_cream: r_small = 1.75 r_large = 2.5 pi = 3.14 def flavour(self): print("Enter your flavour") size = input("Would you like a small scoop or a large scoop (enter s/l)") cost = self.cost_ice_cream(size) print ("The cost of ice cream is ", cost) def cost_ice_cream(self, size): if size=="s": vol = 4/3 *(self.pi)* (self.r_small**3) cost = vol * 0.5 return cost elif size=="l": vol = 4/3 *(self.pi)* (self.r_large**3) cost = vol * 0.5 return cost else: print("Please enter a valid size") # Call the function: order = ice_cream() order.flavour()Enter your flavour Would you like a small scoop or a large scoop (enter s/l)l The cost of ice cream is 10.416666666666666Question In the above function find a way to round up the cost to the next integer value. Hint: In the same class as above, add a additional element to the cost variable that rounds up the cost to the next nearest integer.For example: if the cost is 10.41, the output should be 11$ For rounding up an integer value you could use either the ceil() or round()import math class ice_cream: r_small = 1.75 r_large = 2.5 pi = 3.14 def flavour(self): print("Enter your flavour") size = input("Would you like a small scoop or a large scoop (enter s/l)") cost = self.cost_ice_cream(size) print ("The cost of ice cream is ", math.ceil(cost)) def cost_ice_cream(self, size): if size=="s": vol = 4/3 *(self.pi)* (self.r_small**3) cost = vol * 0.5 return cost elif size=="l": vol = 4/3 *(self.pi)* (self.r_large**3) cost = vol * 0.5 return cost else: print("Please enter a valid size") # Call the function: order = ice_cream() order.flavour()Enter your flavour Would you like a small scoop or a large scoop (enter s/l)l The cost of ice cream is 11Question Modify the flavour function to give the options of available flavours and take as input the choice of the customer. The available options are Vanilla, Chocolate, Butterscotch, Blue_berry.Hint: Add a new function that asks the user to input the choice of flavour.import math class ice_cream: r_small = 1.5 r_large = 2.5 pi = 3.14 def flavour(self): print ("Available flavours of ice cream are Vanilla, Chocolate, Butterscotch, Blue_berry") flv = input("Which flavour of ice cream would you like ") size = input("Would you like a small scoop or a large scoop (enter s/l)") i_cost = self.cost_ice_cream(size) print ("The cost of ice cream is ", math.ceil(i_cost)) def cost_ice_cream(self, size): if size=="s": vol = 4/3 *(self.pi)* (self.r_small**3) cost = vol * 0.5 elif size=="l": vol = 4/3 *(self.pi)* (self.r_large**3) cost = vol * 0.5 else: print("Please enter a valid size") return cost order = ice_cream() order.flavour()Available flavours of ice cream are Vanilla, Chocolate, Butterscotch, Blue_berry Which flavour of ice cream would you like Chocolate Would you like a small scoop or a large scoop (enter s/l)l The cost of ice cream is 11Question Build a new class called "toppings". It should have all the functionality of the ice_cream class.The toppings class will also have a method which will take as input the choice of toppings that the customer wants.The available choices of toppings are: Hot_fudge, Sprinkles, Caramel, Oreos, NutsHint: Create a new class that ask the user to choose one or more toppingsclass toppings(ice_cream): def sel_toppings(self): print ("Available toppings are Hot_fudge, Sprinkles, Caramel, Oreos, Nuts") top = input ("Enter any number of toppings of your choice separated by a comma: ") top_list = top.split(",") print ("The toppings you selected are : ",top_list) sundae = toppings() sundae.sel_toppings()Available toppings are Hot_fudge, Sprinkles, Caramel, Oreos, Nuts Enter any number of toppings of your choice separated by a comma: Hot_fudge, Sprinkles, Caramel, Oreos, Nuts The toppings you selected are : ['Hot_fudge', ' Sprinkles', ' Caramel', ' Oreos', ' Nuts']Question Add a method to calculate the cost of selected toppings, given the cost of each of the topping is 2$. Hint: Now in the class for toppings, add a function to calculate the cost per topping added.class toppings(ice_cream): def sel_toppings(self): print ("Available toppings are Hot_fudge, Sprinkles, Caramel, Oreos, Nuts") top = input ("Enter any number of toppings of your choice separated by a comma: ") top_list = top.split(",") t_cost = self.top_cost(top_list) print ("The cost for selected toppings is ",t_cost) def top_cost(self, top_list): cost = len(top_list) * 2 return cost sundae = toppings() sundae.sel_toppings()Available toppings are Hot_fudge, Sprinkles, Caramel, Oreos, Nuts Enter any number of toppings of your choice separated by a comma: Hot_fudge, Sprinkles, Caramel, Oreos, Nuts The cost for selected toppings is 10Question: Now you have all the functionality needed to create the ordering menu. 1. An order can be for simply Ice Cream or an Ice Cream sundae. 2. There can be multiple items in an order. 3. Calculate the cost of each order placed.Hint: Club both the class you have created above and finally create an Ice-cream ordereing machine that display a welcome message: "Welcome to Ice Cream parlour". Asks the user if he/she wants and ice cream or ice cream-sundae. Ask the choice of flavour and toppings and returns the total cost. Dont forget to ask if the user wants another item after he finishes ordering the first one!import math class ice_cream: r_small = 1.5 r_large = 2.5 pi = 3.14 def flavour(self): print ("Available flavours of ice cream are Vanilla, Chocolate, Butterscotch, Blue_berry") flv = input("Which flavour of ice cream would you like ") size = input("Would you like a small scoop or a large scoop (enter s/l)") i_cost = self.cost_ice_cream(size) if order_type == "i": print ("The cost of the ice Cream is ", math.ceil(i_cost)) return math.ceil(i_cost) def cost_ice_cream(self, size): if size=="s": vol = 4/3 *(self.pi)* (self.r_small**3) cost = vol * 0.5 elif size=="l": vol = 4/3 *(self.pi)* (self.r_large**3) cost = vol * 0.5 else: print("Please enter a valid size") return cost class toppings(ice_cream): def sel_toppings(self): print ("Available toppings are Hot_fudge, Sprinkles, Caramel, Oreos, Nuts") top = input ("Enter any number of toppings of your choice separated by a comma: ") top_list = top.split(",") i_cost = self.flavour() t_cost = self.top_cost(top_list) print ("The cost for sunday is ",t_cost+ i_cost) def top_cost(self, top_list): cost = len(top_list) * 2 return cost print ("Welcome to the upGrad Ice Cream parlour") while True: print ("Would like an ice cream(i) or a Sundae(s)?") order_type = input("Enter your response (i/s)") if order_type == "i": order = ice_cream() order.flavour() elif order_type == "s": sundae = toppings() sundae.sel_toppings() else: print ("Enter a valid choice") print ("Would like to order anything else") more = input ("Enter your response as (y/n)") if more == "n": breakWelcome to the upGrad Ice Cream parlour Would like an ice cream(i) or a Sundae(s)? Enter your response (i/s)s Available toppings are Hot_fudge, Sprinkles, Caramel, Oreos, Nuts Enter any number of toppings of your choice separated by a comma: Hot_fudge, Sprinkles, Caramel, Oreos, Nuts Available flavours of ice cream are Vanilla, Chocolate, Butterscotch, Blue_berry Which flavour of ice cream would you like Chocolate Would you like a small scoop or a large scoop (enter s/l)l The cost for sunday is 21 Would like to order anything else Enter your response as (y/n)n3b_FactorialSearch notebook, Vehciels clustering analysisProject: clustering-analysis-domain-agnostic-features-2018Authors: , License: BSD 3-ClauseCopyright (c) 2021 Alliance for Sustainable Energy LLC DescriptionThis notebook uses Spark to parallelize a hyperparameter search over the clustering analysis. It produces the violin plots in Figure 7 and the data in Table 4.This notbook must be run with a spark context. The easiet way to do this is to boot up Spark using start_spark_jupyter_notebook.sh%%time %reset -f %load_ext autoreload %autoreload 2 %matplotlib inline import itertools import pyspark from pyspark.sql import SparkSession, Row import json import pandas as pd import numpy as np from sklearn.cluster import KMeans, DBSCAN from sklearn.decomposition import PCA from sklearn.manifold import TSNE import math, random import pickle from sklearn.metrics import silhouette_score from matplotlib import pyplot as plt sc = SparkSession.builder.getOrCreate().sparkContext # Load feature dataframes from CSV features_traditional = pd.read_csv("./data/FleetDNAETL_CoDA_epaprime_traditional_nolimit.csv").set_index("vdir").sort_index() features_agnostic = pd.read_csv("./data/FleetDNAETL_CoDA_epaprime_agnostic_50klimit.csv").set_index("vdir").sort_index() cols = [x for x in features_agnostic.columns if x[0]!='g'] features_agnostic = features_agnostic[cols] # Load metadata from CSV meta = pd.DataFrame.from_csv("./data/vehicle_specifications_fdna.csv") m = meta.reset_index() m["vdir"] = "v_" + m["id"].astype(str) m = m.set_index("vdir").sort_index() X_agnostic = features_agnostic X_traditional = features_traditional # Preprocessing X_agnostic = X_agnostic.dropna(axis=1) X_traditional = X_traditional.dropna(axis=1) def normalize(df): return (df-df.min())/(df.max()-df.min()) X_agnostic = normalize(features_agnostic).dropna(axis=1) X_traditional = normalize(features_traditional).dropna(axis=1) X_agnostic = X_agnostic.reindex_axis(sorted(X_agnostic.columns), axis=1) corr_matrix = X_agnostic.corr().abs() upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool)) to_drop = [column for column in upper.columns if any(upper[column] > 0.90)] X_agnostic = X_agnostic.drop(to_drop, axis=1) corr_matrix = X_traditional.corr().abs() upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool)) to_drop = [column for column in upper.columns if any(upper[column] > 0.90)] X_traditional = X_traditional.drop(to_drop, axis=1) X_traditional = normalize(X_traditional) X_agnostic = normalize(X_agnostic) def clusters(alg, data, index): C = alg.fit_predict(data) return pd.DataFrame(C).set_index(index) def do_projection(p,f): proj = p.fit_transform(f) proj = pd.DataFrame(proj).set_index(f.index) proj = normalize(proj) # This helps guard against bias due to scale return proj ### Define VI def variation_of_information_direct(X, Y): """ Compute the variation of information between two cluster labelings Should be bounded by 0 <= vi <= ln(n) """ dict_X = {} for x in set(X): dict_X[x] = [] for i in range(len(X)): dict_X[X[i]].append(i) dict_Y = {} for y in set(Y): dict_Y[y] = [] for i in range(len(Y)): dict_Y[Y[i]].append(i) n = float(len(X)) VI = 0 for i in dict_X.keys(): for j in dict_Y.keys(): Xi = set(dict_X[i]) Yj = set(dict_Y[j]) pi = len(Xi) / n qj = len(Yj) / n rij = len(Xi.intersection(Yj)) / n if rij != 0: VI = VI + rij * (math.log(rij / pi, 2) + math.log(rij / qj, 2)) return -VI def vi(left, right): comp = left.merge(right, how='inner', right_index=True, left_index=True) return variation_of_information_direct(np.array(comp["0_x"]), np.array(comp["0_y"])) # Set up the parallel computation #_bcast_ground_truth = sc.broadcast(ground_truth) _bcast_features_agnostic = sc.broadcast(X_agnostic) _bcast_features_traditional = sc.broadcast(X_traditional) _bcast_metadata = sc.broadcast(m) def unpack_projection(dict_projection): if dict_projection[u"type"] == u"PCA": return PCA(n_components=dict_projection[u"n_components"]) elif dict_projection[u"type"] == u"TSNE": return TSNE(perplexity=dict_projection[u"perplexity"]) else: return dict_projection def unpack_clusteting(dict_clustering): if dict_clustering[u"type"] == u"KMeans": return KMeans(n_clusters=dict_clustering[u"n"]) elif dict_clustering[u"type"] == u"DBSCAN": return DBSCAN(eps=dict_clustering[u"eps"]) else: return dict_clustering def unpack_features(dict_features): if dict_features == u"traditional": return _bcast_features_traditional.value elif dict_features == u"agnostic": return _bcast_features_agnostic.value else: raise Exception("Features value was {}".format(dict_features)) def unpack_labels(cd): return cd["features"], cd["projection"][u"type"], cd["clustering"][u"type"] def unpack(cd): features = unpack_features(cd["features"]) projection = unpack_projection(cd["projection"]) clustering = unpack_clusteting(cd["clustering"]) return features, projection, clustering def constraints(c): if (c[0].value_counts() < 10).any(): return "some cluster has less than 10" if len(set(c[0].to_dict().values())) < 3: return "not at least three clusters" else: return False def do_computation(computation_descriptor): # Unpack argument for this computation from Pyspark cell cd = json.loads(computation_descriptor) features, projection, clustering = unpack(cd) features_label, projection_label, clustering_label = unpack_labels(cd) # Execute the computation p = do_projection(projection, features) c = clusters(clustering, p, features.index) # Enforce constraints if constraints(c): return None # Compute metrics _silhouette_score = silhouette_score(p, c) _vi = {} m = _bcast_metadata.value ground_truth_cols = ['class', 'vocation', 'vehicletype', 'providers'] c.columns = map(str, c.columns) merged = c.merge(m[ground_truth_cols], how='inner', right_index=True, left_index=True) for col_name in ground_truth_cols: ourMerge = merged[["0", col_name]].dropna() vvi = variation_of_information_direct(np.array(ourMerge["0"]), np.array(ourMerge[col_name])) _vi["vi_{}".format(col_name)] = vvi ans = {"features":features_label, "projection":projection_label, "clustering":clustering_label, "silhouette_score":_silhouette_score} ans.update(_vi) return ans # Create a collection of experiment configuration parameters random.seed(2018) feature_opts = ["traditional", "agnostic"] clustering_opts_dbscan = [{"type":"DBSCAN", "eps":n} for n in np.linspace(0.01,.1,10)] clustering_opts_kmeans = [{"type":"KMeans", "n":n} for n in range(2,5)] clusetring_opts = itertools.chain(clustering_opts_dbscan, clustering_opts_kmeans) projection_opts_PCA = [{"type":"PCA", "n_components":2}] projection_opts_TSNE = [{"type":"TSNE", "perplexity":p} for p in np.linspace(5,50,10)] projection_opts = itertools.chain(projection_opts_PCA, projection_opts_TSNE) product_tuple = itertools.product(clusetring_opts, projection_opts, feature_opts) configuration_dict = [{"clustering":_c0, "projection":_c1, "features":_c2} for _c0, _c1, _c2 in product_tuple] configuration_json = map(json.dumps, configuration_dict) # Run map over parallelized rdd configuration_rdd = sc.parallelize(configuration_json, len(configuration_json)) spark_result = configuration_rdd.map(do_computation)CPU times: user 2.3 s, sys: 2.04 s, total: 4.34 s Wall time: 2.76 sThe following cell will execute a potentially time consuming spark joblen(configuration_json) answer = spark_result.collect() df = pd.DataFrame([a for a in answer if a])Factor Analysis# Table of factor sensitivities metrics = ["silhouette_score", "vi_class", "vi_vehicletype", "vi_vocation"] # Clustering c = df.groupby("clustering").agg(['mean', 'std', 'min']) f = df.groupby("features").agg(['mean', 'std', 'min']) p = df.groupby("projection").agg(['mean', 'std', 'min']) p["type"] = "projection" c["type"] = "clustering" f["type"] = "features" final_df = p.append(c).append(f) final_df.index.name='subtype' final_df.set_index('type', append=True, inplace=True) final_df = final_df.reorder_levels(['type', 'subtype']) final_df.round(2) f = open("paper_figs/latex_table.txt", "w") f.write(final_df.to_latex()) f.close()With Silhouette Scoreimport seaborn as sns a = df[["clustering", "silhouette_score"]].rename(columns={"clustering":"bin"}) b = df[["features", "silhouette_score"]].rename(columns={"features":"bin"}) c = df[["projection", "silhouette_score"]].rename(columns={"projection":"bin"}) df_long = a.append(b).append(c) sns.boxplot(x="bin", y="silhouette_score", data=df_long[df_long["silhouette_score"]>0.1]); a = df[["clustering", "silhouette_score", "features"]].rename(columns={"clustering":"bin"}) b = df[["projection", "silhouette_score", "features"]].rename(columns={"projection":"bin"}) df_long_split = a.append(b) fig = plt.figure(figsize=(5,4)) h = sns.violinplot(x="bin", y="silhouette_score", hue="features", data=df_long_split, split=True, inner="stick", palette="Set3"); h.set(xlabel='Algorithm', ylabel='Silhouette Score') df_long_split.groupby(["bin","features"]).agg(['mean', 'std']) fig.savefig("./paper_figs/violin_plot_silhouette_score.eps") a = df[["clustering", "silhouette_score", "features"]].rename(columns={"clustering":"bin"}) b = df[["projection", "silhouette_score", "features"]].rename(columns={"projection":"bin"}) df_long_split_ss = a.append(b) sns.violinplot(x="features", y="silhouette_score", data=df_long_split, split=True, inner="stick", palette="Set3"); df_long_split_ss.groupby(["features","bin"]).agg(['mean', 'std'])With VIa = df[["clustering", "vi_class", "vi_vocation", "vi_vehicletype", "features"]].rename(columns={"clustering":"bin"}) b = df[["projection", "vi_class", "vi_vocation", "vi_vehicletype", "features"]].rename(columns={"projection":"bin"}) df_long_split = a.append(b) fig = plt.figure(figsize=(5,4)) h = sns.violinplot(x="bin", y="vi_vehicletype", hue="features", data=df_long_split, split=True, inner="stick", palette="Set3") h.set(xlabel='Algorithm', ylabel='Variation of Information to Vehicle Type') fig.savefig("./paper_figs/violin_plot_vi_type.eps") fig = plt.figure(figsize=(5,4)) h = sns.violinplot(x="bin", y="vi_vocation", hue="features", data=df_long_split, split=True, inner="stick", palette="Set3") h.set(xlabel='Algorithm', ylabel='Variation of Information to Vehicle Vocation') fig.savefig("./paper_figs/violin_plot_vi_vocation.eps") fig = plt.figure(figsize=(5,4)) h = sns.violinplot(x="bin", y="vi_class", hue="features", data=df_long_split, split=True, inner="stick", palette="Set3"); h.set(xlabel='Algorithm', ylabel='Variation of Information to Vehicle Class') fig.savefig("./paper_figs/violin_plot_vi_class.eps") df_long_split.groupby(["bin","features"]).agg(['mean', 'std']).round(2)![SegmentLocal](texture_single_pose_MV.gif "segment")canvas_opt, *_ = get_body_image_from_mesh(model.cur_mesh, body_estimation, renderer) fig = plt.figure(figsize=(30, 10)) ax = fig.add_subplot(1, 3, 1) ax.imshow(canvas_orig) ax.set_title("Start Pose") ax = fig.add_subplot(1, 3, 2) ax.imshow(canvas_opt) ax.set_title("optmized result") ax = fig.add_subplot(1, 3, 3) ax.imshow(canvas_target) ax.set_title("Target")Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).Collection of snippets for Pandasimport pandas as pdAny `pd.Series` can also be a `pd.DataFrame`my_list = [1, 2, 3] index = ["This", "That", "What!"] pd.DataFrame(my_list, index=index) pd.DataFrame(my_list, columns=['a_number'], index=index)Creating dataframes using dictionariesmy_dict = {'this': 1, 'that': 2, 'what!': 3}We know that each row is a `pd.Series`pd.Series(my_dict) pd.DataFrame(pd.Series(my_dict))Q: What would be the output of this?pd.DataFrame([{'a': 1, 'b': 2}, {'b': 3, 'c': 4}])Before moving on, I also want to talk about numpy vectorization again!np.array([1, 3, 4, 5, 5]) my_arr = np.array([1, 3, 4, 5, 5]) my_arr == 5Want to get the slice using filtermy_arr[my_arr == 5]You can do it similarly using pandas!weather_df.head()Q: You need to return rows that has 'Weather == Kya Chal raha hai' `df.query`>DataFrame.query: Query the columns of a frame with a boolean expression.# Using loc or iloc # NOTE: slice is `:5:2`, not 6 ! # df.loc[row number/slice, 'Column name/s'] ## iloc == numpy slicing! Damn! ## df.iloc[row number/slice, column number/slice]You can do a lot of things with the indices as well: `df.sort_index`weather_df2.sort_index(ascending=False).head()If you have sort values by index, then shouldn't there be sort_by_value?weather_df2.sort_values(by=['Temp (C)', 'Dew Point Temp (C)'], ascending=False)Add a few rows as listsdate_time = ['2012-13-01', '2013-01-01', '2013-01-02', '2012-01-02']Letting few of them to be NaNstemp = [np.nan, np.nan, 30, 12] dew_pt_temp = [-2, np.nan, np.nan, -1] relative_humidity = [np.nan, np.nan, np.nan, np.nan] wind_speed = [np.nan, np.nan, np.nan, 50] visibility = [np.nan, 10.0, np.nan, 12.1] stn_pressure = [104.1, np.nan, 101.2, 101.24] weather = ['Snow', None, None, 'Fog']Create a new dataframepd.DataFrame([date_time, temp, dew_pt_temp, relative_humidity, wind_speed, visibility, stn_pressure, weather]) weather_df3 = pd.DataFrame([date_time, temp, dew_pt_temp, relative_humidity, wind_speed, visibility, stn_pressure, weather]).T weather_df3.columns.tolist() weather_df3.columns = ['Date/Time', 'Temp (C)', 'Dew Point Temp (C)', 'Rel Hum (%)', 'Wind Spd (km/h)', 'Visibility (km)', 'Stn Press (kPa)', 'Weather'] weather_df3.columns.tolist()Display the dataframeweather_df3.head()Append the `weather_df2` and `weather_df3` by rowsweather_df_appended_1 = weather_df.append(weather_df3) weather_df_appended_1.reset_index(drop=True, inplace=True) weather_df_appended_1.tail(10) weather_df_appended2 = weather_df.append(weather_df3, ignore_index=True)Dealing with missing valuesweather_df.tail()How do you filter out the NaNs* Check the NaNs: `df.isna()`* Check the Null values: `df.isnull()` How do you treat those nulls or NaNs:* Drop the rows entirely!: `df.dropna()`* Fill the values with something: `df.fillna()` We want count of the values: `.sum()`weather_df_appended2.isnull().sum()What if we just want to check if there's null data or not?: `.any()`weather_df_appended2.isnull().any()Finding the count of nan and empty strings (yes, empty string "" or " " is not handled separately, occurs mostly in character and categorical variable list) Remember the numpy aggregate operations? using df.mean to do fill operationsweather_df_appended2['Temp (C)'].mean() weather_df_appended2['Temp (C)'].fillna(value=weather_df_appended2['Temp (C)'].mean()).tail()Working with string, more examples!monte = pd.Series(['', '', '', '', '', ''])There are generic methods: `.str.` But there's one essential thing: using regexes```bashMethod Descriptionmatch() Call re.match() on each element, returning a boolean.extract() Call re.match() on each element, returning matched groups as strings.findall() Call re.findall() on each elementreplace() Replace occurrences of pattern with some other stringcontains() Call re.search() on each element, returning a booleancount() Count occurrences of patternsplit() Equivalent to str.split(), but accepts regexpsrsplit() Equivalent to str.rsplit(), but accepts regexps``` Using DateTimefrom datetime import datetime datetime.now()`strftime` formatting in python [One stop source](http://strftime.org/)from dateutil import parser date = parser.parse("4th of July, 2015") date date.strftime("%d") dates = pd.to_datetime([datetime(2015, 7, 3), '4th of July, 2015', '2015-Jul-6', '07-07-2015', '20150708', '2010/11/12', '2010.11.12']) datesFormatting is baked in `pd.to_datetime`pd.to_datetime('2010/11/12', format='%Y/%m/%d')We talking about this yesterday!pd.date_range(start, periods=1000, freq='M')Requested snippet: What if there's a date: '13-13-2018'dates = pd.to_datetime([datetime(2015, 7, 3), '4th of July, 2015', '2015-Jul-6', '07-07-2015', '20150708']) datesWorking with Categorical Variablesweather_df_appended2['Weather'].head() #pd.get_dummies(weather_df, columns=['Weather'])Few other tricks: * Reading directly from a simple html webpagetables = pd.read_html("http://www.basketball-reference.com/leagues/NBA_2016_games.html")Basic image segmentation using CamVid dataset representation%reload_ext autoreload %autoreload 2 %matplotlib inline from fastai.vision import * from fastai.callbacks.hooks import * from fastai.utils.mem import * path = Path("/storage_2/dataset_segmentation/camvid/") path.ls() path_img = path / "images" path_img path_lbl = path / "labels" path_lbl doc(SegmentationItemList)Datafnames = get_image_files(path_img) fnames[:3] lbl_names = get_image_files(path_lbl) lbl_names[:3] img_f = fnames[2] img = open_image(img_f) img.show(figsize=(5, 5)) import re get_y_fn = lambda x: Path(re.sub("/images/", "/labels/", str(x))) mask = open_mask(get_y_fn(str(img_f))) mask.show(figsize=(5, 5), alpha=1) src_size = np.array(mask.shape[1:]) src_size, mask.data codes = np.loadtxt(path / 'codes.txt', dtype='str'); codesDatasets# size = src_size // 2 size = src_size free = gpu_mem_get_free_no_cache() # the max size of bs depends on the available GPU RAM if free > 8200: bs=8 else: bs=4 print(f"using bs={bs}, have {free}MB of GPU RAM free") src = (SegmentationItemList.from_folder(path_img) .split_by_fname_file('../valid.txt') .label_from_func(get_y_fn, classes=codes)) data = (src.transform(get_transforms(), size=size, tfm_y=True) .databunch(bs=bs) .normalize(imagenet_stats)) data.show_batch(2, figsize=(10,7)) data.show_batch(2, figsize=(10,7), ds_type=DatasetType.Valid)Model# metrics = accuracy wd = 1e-2 # learn = unet_learner(data, models.resnet34, metrics=metrics, wd=wd) learn = unet_learner(data, models.resnet34, wd=wd) lr_find(learn) learn.recorder.plot() lr=3e-3 learn.fit_one_cycle(10, slice(lr), pct_start=0.9) learn.save('stage-1') learn.load('stage-1'); # learn.show_results(rows=3, figsize=(8, 9)) learn.show_results(rows=2)Basic Model Testingimport os import numpy as np import torch from torch import nn from torch.nn import functional as F import torch.utils.data as td import torchvision as tv import pandas as pd from PIL import Image from matplotlib import pyplot as plt #mod1 = torch.load('/datasets/home/27/827/ausant/ECE285 Project/MoDL_CenterNet/models/ctdet_coco_resdcn18.pth') #for name in mod1['state_dict']: # print(name)Loading the Datasetimport os import sys sys.path.append(sys.path[0]+'/../lib') # Add library folder #print(sys.path) from opts import opts from datasets.dataset_factory import get_dataset from datasets.dataset.coco import COCO from datasets.sample.ctdet import CTDetDataset Dataset = get_dataset('coco', 'ctdet')Create opt for passing to the constructor. \Also pass a string with the training valueopt = type('', (), {})() opt.data_dir = sys.path[0]+'/../../data/' opt.task = 'ctdet' split = 'train' dataset = Dataset(opt,split) all_Ids=dataset.coco.getImgIds() print(len(all_Ids)) import skimage.io as io img_dir='/datasets/home/30/230/psarangi/dataset_l/images/train2017/' N=5 kld=np.zeros(10) for iter in range(10): imgIds_perm=np.random.permutation(len(all_Ids)) tmp=imgIds_perm[0:N].astype(int) tmp2=[all_Ids[t] for t in tmp] dataset.images=tmp2 dataset.num_samples=len(dataset.images) sub_inst_cat=np.zeros(90) for j in range(N): sub_cat_lab=[] #print(dataset.images[j],all_Ids[imgIds_perm[j]]) img = dataset.coco.loadImgs(dataset.images[j])[0] #id_vec.append(img['id']) f_name=img_dir f_name+=img['file_name'] print(f_name) I = io.imread(f_name) #print(img['coco_url']) #plt.figure() #plt.imshow(I) annIds = dataset.coco.getAnnIds(imgIds=img['id']) anns = dataset.coco.loadAnns(annIds) sub_cat_lab=[k['category_id'] for k in anns] for jj in range(90): t=np.where(np.asarray(sub_cat_lab)==jj) sub_inst_cat[jj-1]+=t[0].shape[0] #print(sub_inst_cat/np.sum(sub_inst_cat),np.sum(sub_inst_cat)) prob_sub=(sub_inst_cat+1)/np.sum(sub_inst_cat+1) #print(np.log(prob1/(prob_sub+0.001))) #kld[iter]=np.sum(prob1*np.log(prob1/prob_sub)) plt.plot(sub_inst_cat/(np.sum(sub_inst_cat))) print(dataset.images) #plt.show() #plt.figure() #print(kld) #x=np.arange(90) #print(x.shape,prob1[0,:].shape) #plt.plot(x,prob1[0,:])/datasets/home/30/230/psarangi/dataset_l/images/train2017/000000443880.jpg /datasets/home/30/230/psarangi/dataset_l/images/train2017/000000496402.jpg /datasets/home/30/230/psarangi/dataset_l/images/train2017/000000530187.jpg /datasets/home/30/230/psarangi/dataset_l/images/train2017/000000497878.jpg /datasets/home/30/230/psarangi/dataset_l/images/train2017/000000394784.jpg [443880, 496402, 530187, 497878, 394784] /datasets/home/30/230/psarangi/dataset_l/images/train2017/000000337390.jpg /datasets/home/30/230/psarangi/dataset_l/images/train2017/000000191501.jpg /datasets/home/30/230/psarangi/dataset_l/images/train2017/000000522660.jpg /datasets/home/30/230/psarangi/dataset_l/images/train2017/000000094271.jpg /datasets/home/30/230/psarangi/dataset_l/images/train2017/000000144896.jpg [337390, 191501, 522660, 94271, 144896] /datasets/home/30/230/psarangi/dataset_l/images/train2017/000000578652.jpg /datasets/home/30/230/psarangi/dataset_l/images/train2017/000000493210.jpg /datasets/home/30/2[...]IPAN - ImageJ Processing Assistant Notebook 02 - Working with ImageJ pt.2**PlugIns and generalized macro functions** *** Import Libraries and packagesimport scyjava # {optional} Set memory pool from skimage import io # ImageJ opener, open the image on the background ImageJ from IPython.display import Image # Display only images import os # Used to manipulate and create directory with python. import tifffile # Save .tif files from numpy arrays*** Import ImageJBy default, the ImageJ2 gateway will not include the legacy layer for backwards compatibility with the original ImageJ. The legacy layer is necessary for macros and any ImageJ-based plugins. To use original ImageJ1 macros and plugins I must initiate the environment with **legacy supported** (second argument of the imagej initialization). If in doubt, you can check the ij.legacy().isActive() function to see if your initialization worked properly. With the following initialization, we can work with ImageJ1 plugins with a stable and fixed version of the software (first argument of the imagej initialization). In the alternative (shown below as text), we can import the desired list of plugins from a local repository.Run the imageJ initialization **only 1 time**, otherwise, the legacy state becomes inactive%%capture test ## Change the memory pool if required! #scyjava.config.add_options('-Xmx2g') # <--- Example: set 2G memory. #The initialization of ImageJ with the following arguments returns mulitple warnings massages #but they do not affect the functionality of the notebook. import imagej ij = imagej.init(['sc.fiji:fiji:2.0.0-pre-10', 'net.imagej:imagej-legacy']) print("The actual version of ImageJ is:", ij.getApp().getInfo(True))The actual version of ImageJ is: ImageJ 2.0.0-rc-71/1.52i; Java 1.8.0_302 [x86_64]; 51MB of 910MBALTERNATIVE initialization (make it a code chunk to run): Upload local pluginsplugins_dir = '/Applications/Fiji.app/plugins'scyjava.config.add_option(f'-Dplugins.dir={plugins_dir}')Initialize ImageJ with legacy for retrocompatibility with IMAGEJ1ij = imagej.init(['net.imagej:imagej', 'net.imagej:imagej-legacy']) With the forollowing command we can ask if the legagy for retrocompatibility is active. To run this notebook is essential that the legacy is ACTIVE.print("Legacy state is active:\t", ij.legacy.isActive())Legacy state is active: True*** Run a PLUG IN to process the image Filters Open the image within a macro and show it to screenij.py.run_macro("""run("Blobs (25K)");""") # Open the sample blob image blobs = ij.py.active_image_plus() # Assign it to a JN varible ij.py.show(blobs)Run a plugin mean (IJ1 plugin) and display the result to screenplugin = 'Mean' args = { 'block_radius_x': 10, 'block_radius_y': 10 } ij.py.run_plugin(plugin, args) filtered_blobs = ij.py.active_image_plus() ij.py.show(filtered_blobs)Perform the image processing filter on a local image previously opened within the macro#DEFINE INPUT AND OUTPUT cwd = os.getcwd() #Get current directory IMAGES_dir = cwd + "/IMAGES" RESULTS_dir = cwd + "/RESULTS" try: os.mkdir(RESULTS_dir) except FileExistsError: print("This directory already exist!") print("The images are imported from: ", IMAGES_dir) print("The results are exported in: ", RESULTS_dir) # FOR ALL THE FOLLOWING MACRO # The arguments input/ouput directories will be the same. # The arguments input/ouput filename will be defined before running the macro. INPUT_filename = "input filename" # <--- THIS VARIABLE MUST BE RE-SET BEFORE CALLING THE MACRO OUTPUT_filename = "output filename" # <--- THIS VARIABLE MUST BE RE-SET BEFORE CALLING THE MACRO args_IO = { 'dir_in': f"{IMAGES_dir}", 'dir_out' : f"{RESULTS_dir}", 'filename_in' : f"{INPUT_filename}", 'filename_out' : f"{OUTPUT_filename}" } #OPEN THE IMAGE and compute the IMAGE PROFILE macro_OpenProfile = """ //DEFINE IO #@ String dir_in #@ String dir_out #@ String filename_in #@ String filename_out #@output String path_in #@output String path_out path_in = dir_in + "/" + filename_in path_out = dir_out + "/" + filename_out //OPEN IMAGE open(path_in) //MAKE AND SAVE PROFILE ON ORIGINAL IMAGE H = getHeight(); W = getWidth(); makeLine(0, 0, W, H); run("Plot Profile"); saveAs(".png", path_out); //SELECT IMAGE FOR NEXT STEP selectWindow(filename_in); """ args_IO['filename_in'] = "image3.tif" args_IO['filename_out'] = "image3_profile.png" result = ij.py.run_macro(macro_OpenProfile, args_IO) INPUT_path = result.getOutput("path_in") OUTPUT_path = result.getOutput("path_out") print("\nINPUT file:", INPUT_path) print("\nOUTPUT file:", OUTPUT_path)[INFO] script:macro.ijm = [[path_in, path_out], [/Users/nicolascristini/IPAN-Project/IPAN/IMAGES/image3.tif, /Users/nicolascristini/IPAN-Project/IPAN/RESULTS/image3_profile.png]] INPUT file: /Users/nicolascristini/IPAN-Project/IPAN/IMAGES/image3.tif OUTPUT file: /Users/nicolascristini/IPAN-Project/IPAN/RESULTS/image3_profile.pngShow the original image and the profile computed on it. We can show the image just opened with the macroOpenProfile because of the last line of the macro *selectWindow(filename_in)* select the image of interest and allow to display it to screen by calling the *ij.py.active_image_plus()* command.analysed_image = ij.py.active_image_plus() print("\nORIGINAL IMAGE:") ij.py.show(analysed_image) print("\nORIGINAL IMAGE PROFILE:") Image(f"{OUTPUT_path}")ORIGINAL IMAGE:By running the plugin we process the active image on ImageJ. We can then compute the profile on the processed picture after save it and show it to screen.plugin = 'Mean' args_mean = { 'block_radius_x': 10, 'block_radius_y': 10 } ij.py.run_plugin(plugin, args_mean)After running the plugin, the filtered image is active. At this point, by running a macro we would modify this image. Also here we can see and display the image to screen by calling the *ij.py.active_image_plus()* command.filtered = ij.py.active_image_plus() ij.py.show(filtered)**HOW TO SAVE:** To save this file from the notebook we need to convert the resulted image in a NumPy array with *ij.py.from_java()* command and then write the file as .tif with the *tifffile.imwrite()* command. For this purpose, we need to specify the destination path.#Convert to numpy numpy_filtered = ij.py.from_java(filtered) print(type(numpy_filtered)) #Save with tifffile to IMAGES_dir so that we can use it for future processing tosave_path_file = IMAGES_dir + "/image3_filtered.tif" tifffile.imwrite(tosave_path_file, numpy_filtered, imagej=True) #The DEBUG messages do not affect the functionality of the code #Re-open the previous image as demonstration saved_image = ij.io().open(tosave_path_file) ij.py.show(saved_image)To compute the profile on the filtered picture we can use the previous macro (macro_OpenProfile) and just change the Input/Output arguments. With this example I showed how we can re-use a macro by **personalising the filename_in and filename_out**.args_IO['filename_in'] = "image3_filtered.tif" args_IO['filename_out'] = "image3_filtered_profile.png" result = ij.py.run_macro(macro_OpenProfile, args_IO) INPUT_path = result.getOutput("path_in") OUTPUT_path = result.getOutput("path_out") print("\nINPUT file:", INPUT_path) print("\nOUTPUT file:", OUTPUT_path) filtered_image = ij.io().open(f"{INPUT_path}") print("\nFILTERED IMAGE:") ij.py.show(filtered_image) print("\nFILTERED IMAGE PROFILE:") Image(f"{OUTPUT_path}")FILTERED IMAGE:*** Build general functions out of Macros and PyimageJIt is now possible to combine all the code shown above and make a general function that can do the job in the background and run ImageJ tasks by calling it. Below, I show an example of a function that opens an image and computes the profile plot along the diagonal (from upper-left to bottom_right).#MACRO OPEN AND SHOW IMAGE & PROFILE def OpenAndProfile(INPUT_filename = "", path_in = "", path_out = ""): #CREATE files/folders #FOR SPECIFIED dir_in dir_out if len(INPUT_filename) == 0: INPUT_filename = path_in.rsplit('/', 1)[1] path_in = path_in.rsplit('/', 1)[0] #FOR SPECIFIED INPUT_filename in IMAGE folder (located in the same directory of the notebook) cwd = os.getcwd() #Get current directory if len(path_in) == 0: path_in = cwd + "/IMAGES" if len(path_out) == 0: path_out = cwd + "/RESULTS" OUTPUT_filename = INPUT_filename.rsplit('.', 1)[0] + "_profile.png" try: os.mkdir(path_out) except FileExistsError: print("This output directory already exist.") print("\n") print("The images are imported from: ", path_in) print("The results are exported in: ", path_out) print("\n\n") #INITIALIZE arguments args_IO = { 'dir_in': f"{path_in}", 'dir_out' : f"{path_out}", 'filename_in' : f"{INPUT_filename}", 'filename_out' : f"{OUTPUT_filename}" } #DEFINE ImageJ macro commands macro_OpenProfile = """ //DEFINE IO #@ String dir_in #@ String dir_out #@ String filename_in #@ String filename_out #@output String in #@output String out in = dir_in + "/" + filename_in out = dir_out + "/" + filename_out //OPEN IMAGE open(in) //MAKE AND SAVE PROFILE ON ORIGINAL IMAGE H = getHeight(); W = getWidth(); makeLine(0, 0, W, H, 5); run("Plot Profile"); saveAs(".png", out); //SELECT IMAGE FOR NEXT STEP selectWindow(filename_in); """ #RUN the Macro macro_results = ij.py.run_macro(macro_OpenProfile, args_IO) #GET the results INPUT_path = macro_results.getOutput("in") OUTPUT_path = macro_results.getOutput("out") results = (INPUT_path, OUTPUT_path) #PRINT the results image = ij.py.active_image_plus() print("\nIMAGE:") ij.py.show(image, cmap = "gray") profile = io.imread(f"{OUTPUT_path}") print("\nIMAGE PROFILE:") ij.py.show(profile, cmap = "gray") #Return the input_file name. This is important because it will be needed to name and save the resulted images global filename filename = INPUT_filenameBy default, the function takes in input an image of user choise present in the IMAGE folder and output the result in the RESULT folder.#RUN THE FUNCTION OpenAndProfile("image1.tif")This output directory already exist. The images are imported from: /Users/nicolascristini/IPAN-Project/IPAN/IMAGES The results are exported in: /Users/nicolascristini/IPAN-Project/IPAN/RESULTS [INFO] script:macro.ijm = [[in, out], [/Users/nicolascristini/IPAN-Project/IPAN/IMAGES/image1.tif, /Users/nicolascristini/IPAN-Project/IPAN/RESULTS/image1_profile.png]] IMAGE:Unfortunatly, the profile plot cannot be displayed from the funtion with the command *Image()* and the *ij.py.show()* does not allow the user to modify the arguments (to increase the size of the image) of the implemented function *pyplot.show()* the documentation is available [here](https://github.com/imagej/pyimagej/blob/master/imagej/__init__.py:~:text=return%20coords-,def,-show(self%2C%20image) RUN OpenAndProfile() on any imagesIn the example shown below I show how to run this function on any image. The requested arguments are *path_in*: the path of the file I want to work on, and *path_out:* the path of directory where I want to save the result (in this case the profile plot).#EXAMPLE: OpenAndProfile(path_in = "/Users/nicolascristini/IPAN-Project/IPAN/IMAGES/image2.jpeg", path_out = "/Users/nicolascristini/Desktop/Results")This output directory already exist. The images are imported from: /Users/nicolascristini/IPAN-Project/IPAN/IMAGES The results are exported in: /Users/nicolascristini/Desktop/Results [INFO] script:macro.ijm = [[in, out], [/Users/nicolascristini/IPAN-Project/IPAN/IMAGES/image2.jpeg, /Users/nicolascristini/Desktop/Results/image2_profile.png]] IMAGE:At this point We can process our image with a sequence of python functions embedding different macro commands. All the Image Processing functions are included in separate module called IPAN.py. The image opened with the function OpenAndProfile() is still the active image and it can be easily called from the notebook. All the following functions will affect the active image and they will leave the resulted image as the last selected one. Before going to the following section it is a good practice to close all the images and windows open in the background. In this way, we are sure that only one image enters the processing pipeline.#This macro should close all images and windows ij.py.run_macro(""" close("*"); run("Close All"); """)*** Image Processing**By using multiple embedded python functions we can process and get results directly from the image. For example, I will perform filtering, processing, segmentation and analyse particles on a fluorescent DABI image. The purpose of the following pipeline is to identify the nuclei and to count them.** **OPEN IMAGE AND GET PROFILE** To simplify the use of ImageJ from the notebook it is possible to use pre-compiled functions that work like the one shown above. The set of functions is located in the ProcessingFunction.py module. The list of functions:* Open() * Profile()* Filter()* SubtractBackground()* Threshold()* Count() To show how the functions work, I will include the code in this notebook and proceed with the image analysis.def Open(INPUT_filename = "", path_in = ""): # make variables available to all functions and to name and save the following resulted images # 1 - CREATE path for input/outputt # FOR SPECIFIED INPUT_filename in IMAGE folder - located in the directory of the notebook - if len(INPUT_filename) == 0: INPUT_filename = path_in.rsplit('/', 1)[1] path_in = path_in.rsplit('/', 1)[0] + "/" #FOR SPECIFIED INPUT_filename in IMAGE folder (located in the same directory of the notebook) cwd = os.getcwd() #Get current directory if len(path_in) == 0: path_in = cwd + "/IMAGES/" print("\n") print("The images are imported from: ", path_in) # 2 - ImageJ macro text # INITIALIZE dictionary with arguments args_IO = { 'dir_in' :f"{path_in}", 'filename_in' : f"{INPUT_filename}"} macro_Open = """ #@ String dir_in #@ String filename_in #@output String in // CREATE OUTPUT in = dir_in + filename_in // OPEN IMAGE open(in) """ # RUN the Macro macro_results = ij.py.run_macro(macro_Open, args_IO) # PRINT the results opened_image = ij.py.active_image_plus() ij.py.show(opened_image, cmap = "gray") print("\nIMAGE ⬆︎") print("\nImage path:", f"{path_in + INPUT_filename}") # MAKE variables available for following steps global filename filename = INPUT_filename Open("image4.tif") def Profile(path_in = "", path_out= ""): # 1 - Multiple working situations: #a) The image is left activated by the the Open() function, filename is a global variable. INPUT_filename = filename OUTPUT_filename = INPUT_filename.rsplit('.', 1)[0] + "_profile.png" # create OUTPUT_filename #b) The profile is computed on a non open image. INPUT_filename of path_in is specified # MISSING (WORK WITH CLASSES TO IMPLEMENT THIS OPTION) cwd = os.getcwd() #Get current directory if len(path_in) == 0: path_in = cwd + "/IMAGES" if len(path_out) == 0: path_out = cwd + "/RESULTS" try: os.mkdir(path_out) except FileExistsError: print("The output directory already exist.") # PRINT file/folder info print("---------------------------------------------------") print("\n") print("The images are imported from: ", path_in) print("INPUT_filename:", INPUT_filename) print("\n") print("The results are exported in: ", path_out) print("OUTPUT_filename:", OUTPUT_filename) print("\n") print("---------------------------------------------------") # 2 - ImageJ macro profile text # INITIALIZE dictionary with arguments args_IO = { 'dir_in' :f"{path_in}", 'dir_out' : f'{path_out}', 'filename_in' : f'{INPUT_filename}', 'filename_out' : f'{OUTPUT_filename}' } # ImageJ macro commands macro_Profile = """ //DEFINE IO #@ String dir_in #@ String dir_out #@ String filename_in #@ String filename_out #@output String in #@output String out title = getTitle(); in = dir_in + "/" + filename_in out = dir_out + "/" + filename_out //MAKE AND SAVE PROFILE ON ORIGINAL IMAGE H = getHeight(); // get image size W = getWidth(); makeLine(0, 0, W, H, 5); // make line with run("Plot Profile"); // make Profile saveAs(".png", out); run("Close"); // SELECT THE INPUT IMAGES AS LAST COMMAND FOR THE FOLLOWING STEPS selectWindow(title); """ #RUN the Macro macro_results = ij.py.run_macro(macro_Profile, args_IO) #GET the results INPUT_path = macro_results.getOutput("in") OUTPUT_path = macro_results.getOutput("out") results = (INPUT_path, OUTPUT_path) profile = io.imread(f"{OUTPUT_path}") ij.py.show(profile, cmap = "gray") print("IMAGE PROFILE ⬆︎") # Select the previous active image to reselect it at the end of the function for next steps! image = ij.py.active_image_plus() Profile()The output directory already exist. --------------------------------------------------- The images are imported from: /Users/nicolascristini/IPAN-Project/IPAN/IMAGES INPUT_filename: image4.tif The results are exported in: /Users/nicolascristini/IPAN-Project/IPAN/RESULTS OUTPUT_filename: image4_profile.png --------------------------------------------------- [INFO] script:macro.ijm = [[in, out], [/Users/nicolascristini/IPAN-Project/IPAN/IMAGES/image4.tif, /Users/nicolascristini/IPAN-Project/IPAN/RESULTS/image4_profile.png]]From the profile shown above, we can see that in the image there is a lot of noise and background that must be deleted to perform a better analysis. **FILTER with MEAN**def Filter(path_in = "", path_out = ""): global filename, image # This functions apply the mean filter plug-in with default radius = 2 pixels. # the resulted filtered image will be shown to screen and saved to the IMAGES folder. # 1 - Multiple working situations: #a) The image is left activated by the previous function, filename is a global variable. INPUT_filename = filename OUTPUT_filename = INPUT_filename.rsplit('.', 1)[0] + "_filtered.tif" # get the IMAGES folder path to save the resulted image cwd = os.getcwd() # get current directory if len(path_in) == 0: path_in = cwd + "/IMAGES" path_image_out = path_in + "/" + OUTPUT_filename # PRINT file/folder info print("---------------------------------------------------") print("\n") print("The images are imported from: ", path_in) print("INPUT_filename:", INPUT_filename) print("\n") print("The results are exported in: ", path_image_out) print("OUTPUT_filename:", OUTPUT_filename) print("\n") print("---------------------------------------------------") # 2 - RUN the plugin plugin = 'Mean' args_mean = { 'block_radius_x': 4, 'block_radius_y': 4} ij.py.run_plugin(plugin, args_mean) # SAVE the resulted filtered image filtered = ij.py.active_image_plus() numpy_filtered = ij.py.from_java(filtered) tifffile.imwrite(path_image_out, numpy_filtered, imagej=True) # PRINT the results ij.py.show(filtered, cmap = "gray") print("\nIMAGE ⬆︎") # ACTIVATE resulted image filtered = ij.py.active_image_plus() # SAVE resulted filename filename = OUTPUT_filename Filter() Profile()The output directory already exist. --------------------------------------------------- The images are imported from: /Users/nicolascristini/IPAN-Project/IPAN/IMAGES INPUT_filename: image4_filtered.tif The results are exported in: /Users/nicolascristini/IPAN-Project/IPAN/RESULTS OUTPUT_filename: image4_filtered_profile.png --------------------------------------------------- [INFO] script:macro.ijm = [[in, out], [/Users/nicolascristini/IPAN-Project/IPAN/IMAGES/image4_filtered.tif, /Users/nicolascristini/IPAN-Project/IPAN/RESULTS/image4_filtered_profile.png]]**SUBTRACT BACKGROUND**from documentation [here](https://imagej.net/plugins/rolling-ball-background-subtraction):This plugin tries to correct for unevenly illuminated background by using a “rolling ball” algorithm.A local background value is determined for every pixel by averaging over a very large ball around the pixel. This value is hereafter subtracted from the original image, hopefully removing large spatial variations of the background intensities. The radius should be set to at least the size of the largest object that is not part of the background.Subtract the background from a nuclei image. The profile plot can show us the amount of noise and background that can be resolved with the application of the right plugin.def SubtractBackground(path_in = "", path_out = ""): global filename # This functions apply a ImageJ macro to use the rolling subtract background plug in with default 50 pixel diameter. # the resulted subtracted image will be shown to screen and saved to the IMAGES folder. # 1 - Multiple working situations: #a) The image is left activated by the previous function, filename is a global variable. INPUT_filename = filename OUTPUT_filename = INPUT_filename.rsplit('_', 1)[0] + "_subtracted.tif" path_image_out = path_in + "/" + OUTPUT_filename # get the IMAGES folder path to save the resulted image cwd = os.getcwd() # get current directory if len(path_in) == 0: path_in = cwd + "/IMAGES" path_image_out = path_in + "/" + OUTPUT_filename # PRINT file/folder info print("---------------------------------------------------") print("\n") print("The images are imported from: ", path_in) print("INPUT_filename:", INPUT_filename) print("\n") print("The results are exported in: ", path_image_out) print("OUTPUT_filename:", OUTPUT_filename) print("\n") print("---------------------------------------------------") # 2 - ImageJ macro subtract background text # INITIALIZE dictionary with arguments macro_SubtractBackground = """ // macro commands run("Enhance Contrast...", "saturated=0.35"); // Run the default contract run("Subtract Background...", "rolling=50 disable"); // Run the default contract """ ij.py.run_macro(macro_SubtractBackground) #SAVE the resulted subtracted image subtracted = ij.py.active_image_plus() numpy_subtracted = ij.py.from_java(subtracted) tifffile.imwrite(path_image_out, numpy_subtracted, imagej=True) #PRINT the results ij.py.show(subtracted, cmap = "gray") print("\nIMAGE ⬆︎") # ACTIVATE resulted image subtracted = ij.py.active_image_plus() # SAVE resulted filename filename = OUTPUT_filename SubtractBackground() Profile()The output directory already exist. --------------------------------------------------- The images are imported from: /Users/nicolascristini/IPAN-Project/IPAN/IMAGES INPUT_filename: image4_subtracted.tif The results are exported in: /Users/nicolascristini/IPAN-Project/IPAN/RESULTS OUTPUT_filename: image4_subtracted_profile.png --------------------------------------------------- [INFO] script:macro.ijm = [[in, out], [/Users/nicolascristini/IPAN-Project/IPAN/IMAGES/image4_subtracted.tif, /Users/nicolascristini/IPAN-Project/IPAN/RESULTS/image4_subtracted_profile.png]]As shown in the profile, now the pics of intensity are much more highlighted. We can move to the next step that consists of the application of the threshold.def Threshold(path_in = "", path_out = ""): global filename # This functions apply a ImageJ macro to applly the Threshold. Now setted as Li. # the resulted thresholded image will be shown to screen and saved to the IMAGES folder. # In the following macro, I had to save the image from the macro-text becuase the method used before gave an ERROR # 1 - Multiple working situations: #a) The image is left activated by the previous function, filename is a global variable. INPUT_filename = filename OUTPUT_filename = INPUT_filename.rsplit('_', 1)[0] + "_thresholded.tif" path_image_out = path_in + "/" + OUTPUT_filename # get the IMAGES folder path to save the resulted image cwd = os.getcwd() # get current directory if len(path_in) == 0: path_in = cwd + "/IMAGES" path_image_out = path_in + "/" + OUTPUT_filename # PRINT file/folder info print("---------------------------------------------------") print("INPUT:") print("The images are imported from: ", path_in) print("INPUT_filename:", INPUT_filename) print("\n") print("OUTPUT:") print("The results are exported in: ", path_image_out) print("OUTPUT_filename:", OUTPUT_filename) print("---------------------------------------------------") args_IO = { 'path_out' : f'{path_image_out}', } macro_Threshold = """ //DEFINE IO #@ String path_out // RUN threshold setAutoThreshold("Default dark"); setOption("BlackBackground", true); run("Convert to Mask"); run("Watershed"); saveAs("tiff", path_out); """ ij.py.run_macro(macro_Threshold, args_IO) #SAVE the resulted subtracted image thresholded = ij.py.active_image_plus() numpy_thresholded = ij.py.from_java(thresholded) tifffile.imwrite(path_image_out, numpy_thresholded, imagej=True) #PRINT the results ij.py.show(thresholded, cmap = "gray") print("\nIMAGE ⬆︎") # SAVE resulted filename filename = OUTPUT_filename Threshold() Profile()The output directory already exist. --------------------------------------------------- The images are imported from: /Users/nicolascristini/IPAN-Project/IPAN/IMAGES INPUT_filename: image4_thresholded.tif The results are exported in: /Users/nicolascristini/IPAN-Project/IPAN/RESULTS OUTPUT_filename: image4_thresholded_profile.png --------------------------------------------------- [INFO] script:macro.ijm = [[in, out], [/Users/nicolascristini/IPAN-Project/IPAN/IMAGES/image4_thresholded.tif, /Users/nicolascristini/IPAN-Project/IPAN/RESULTS/image4_thresholded_profile.png]]**ANALYSE PARTICLES**At the end of this processing step, we obtain a binary image with only a white (pixel value = 255) object corresponding to our nuclei and a black (pixel value = 0) background.It is possible now to count the number of white objects by running the "Analysing Particles". The other measurements obtained from the analysis must be previously defined with the command "Set measurements"The macro will be incorporated in a function *Count()*def Count(path_in = "", path_out = ""): global filename # This functions use the *Analyse Particles* in a ImageJ macro to applly to count the number of objects in the picture. # the Result table will show the feature selected in *Set Measurements* In this case we are going to analysis: # area, circularity, AR ratio. The resulted table will be saved in the RESULTS folder. # Besides that, the table will be displayed by using pandas. # 1 - Multiple working situations: #a) The image is left activated by the previous function, filename is a global variable. INPUT_filename = filename OUTPUT_filename = INPUT_filename.rsplit('_', 1)[0] + "_data.csv" # get the IMAGES folder path to save the resulted image cwd = os.getcwd() # get current directory if len(path_in) == 0: path_in = cwd + "/IMAGES" if len(path_out) == 0: path_out = cwd + "/RESULTS" path_data_out = path_out + "/" + OUTPUT_filename # PRINT file/folder info print("---------------------------------------------------") print("INPUT:") print("The images are imported from: ", path_in) print("INPUT_filename:", INPUT_filename) print("\n") print("OUTPUT:") print("The results are exported in: ", path_out) print("OUTPUT_filename:", OUTPUT_filename) print("---------------------------------------------------") args_IO = { 'path_out' : f'{path_data_out}', } macro_Count = """ // DEFINE IO #@ String path_out // RUN THE MEASUREMENTS title = getTitle() run("Set Measurements...", "area shape display redirect=None decimal=3"); run("Analyze Particles...", "size=20-Infinity pixel circularity=0.10-1.00 show=Outlines display exclude clear summarize in_situ"); saveAs("Results", path_out); """ ij.py.run_macro(macro_Count, args_IO) print("\nThe data are saved as:\n", path_data_out) #PRINT the results return path_data_out %%capture resulted_data = Count()Label Area Circ. AR Round Solidity 1 image4_thresholded.tif 188.013 0.797 1.127 0.887 0.918 2 image4_thresholded.tif 375.181 0.843 1.466 0.682 0.952 3 image4_thresholded.tif 401.376 0.863 1.458 0.686 0.953 4 image4_thresholded.tif 147.453 0.927 1.082 0.924 0.948 5 image4_thresholded.tif 175.338 0.861 1.446 0.692 0.939 6 image4_thresholded.tif 190.125 0.784 1.224 0.817 0.922 7 image4_thresholded.tif 166.043 0.724 1.300 0.769 0.921 8 image4_thresholded.tif 170.690 0.838 1.385 0.722 0.936 9 image4_thresholded.tif 300.821 0.846 1.546 0.647 0.951 10 image4_thresholded.tif 235.755 0.871 1.279 0.782 0.936 11 image4_thresholded.tif 183.365 0.816 1.518 0.659 0.918 12 image4_thresholded.tif 392.926 0.805 1.607 0.622 0.944 13 image4_thresholded.tif 141.960 0.594 1.189 0.841 0.846 14 image4_thresholded.tif 176.605 0.875 1.137 0.879 0.922 15 image4_thresholded.tif 345.183 0.805 1.647 0.607 0.940 16 image4_thresholded.tif 192.238 0.884 1.092 0.916 0.935 17 image4_thresholded.tif 205.335 0.741 1.72[...]The .csv file has been saved in the output folder and now we can finally use pandas to import the dataset and work on the results.#Import libraries for data management and visualization #import seaborn as sns import pandas as pd #Clean dataframe Data = pd.read_csv(resulted_data, header = 0, sep=',', encoding='latin-1', index_col=0) Data = Data.drop("Label", axis = 1) Data.rename(columns={'Circ.': 'Circularity'}, inplace=True) Data_summary = Data.describe() print(Data) print("\n", Data_summary) print("\nTotal number of nuclei:\t", Data.shape[0])Area Circularity AR Round Solidity 1 188.013 0.797 1.127 0.887 0.918 2 375.181 0.843 1.466 0.682 0.952 3 401.376 0.863 1.458 0.686 0.953 4 147.453 0.927 1.082 0.924 0.948 5 175.338 0.861 1.446 0.692 0.939 .. ... ... ... ... ... 346 192.238 0.859 1.123 0.890 0.941 347 327.438 0.809 1.629 0.614 0.952 348 30.843 0.907 1.109 0.902 0.918 349 182.520 0.846 1.377 0.726 0.926 350 176.605 0.886 1.294 0.773 0.943 [350 rows x 5 columns] Area Circularity AR Round Solidity count 350.000000 350.000000 350.000000 350.00000 350.000000 mean 222.729077 0.809577 1.475269 0.69996 0.926463 std 84.786011 0.080956 0.278850 0.12062 0.024208 min 10.985000 0.518000 1.018000 0.36400 0.7[...]IPython Notebook In DepthToday we are going to dive into some of the interesting features of IPython and the IPython notebook, which are useful for a number of daily tasks in data-intensive science.We will work through these features "live"; feel free to type along with me as we go! Outline- IPython: command-line vs. notebook- Input/Output History- Tab Completion- Getting help and accessing documentation- Useful Keyboard Shortcuts- Magic Commands- Shell commands- Interactivity with ``ipywidgets`` IPython Command Line and Notebook Launching the IPython ShellIf you have installed IPython correctly, you should be able to type ``ipython`` in your command prompt and see something like this:```IPython 4.0.1 -- An enhanced Interactive Python.? -> Introduction and overview of IPython's features.%quickref -> Quick reference.help -> Python's own help system.object? -> Details about 'object', use 'object??' for extra details.In [1]:```With that, you're ready to follow along. Launching the IPython NotebookThe IPython notebook is a browser-based graphical interface to the IPython shell, and builds on it a rich set of dynamic display capabilities.As well as executing Python/IPython statements, the notebook allows the user to include formatted text, static and dynamic visualizations, mathematical equations, javascript widgets, and much more.Furthermore, these documents can be saved in a way that lets other people open them and execute the code on their own systems.Though the IPython notebook is viewed and edited through your web browser window, it must connect to a running Python process in order to execute code.This process (known as a "kernel") can be started by running the following command in your system shell:```$ ipython notebook```This command will launch a local web server which will be visible to your browser.It immediately spits out a log showing what it is doing; that log will look something like this:```$ ipython notebook[NotebookApp] Using existing profile dir: '/home/jake/.ipython/profile_default'[NotebookApp] Serving notebooks from local directory: /home/jake/notebooks/[NotebookApp] The IPython Notebook is running at: http://localhost:8888/[NotebookApp] Use Control-C to stop this server and shut down all kernels (twice to skip confirmation)```At the command, your default browser should automatically open and navigate to the listed local URL;the exact address will depend on your system.If the browser does not open automatically, you can open a window and copy this address (here ``http://localhost:8888/``) manually. Input/Output HistoryA useful feature of IPython is the storage of input and output history. Terminal OnlyThere are a few useful shortcuts that can be used only in the IPython terminal.We will demonstrate the following in the terminal:- up arrow for history- partial completion with up arrow- reverse search with ctrl-r Terminal and Notebook- Previous results can be obtained using underscores: number of underscores is the number of the previous command:1 + 1 _ * 100 _ + __ _ + __ ** ___Beyond three underscores, you can use underscore followed by a number to indicate the result of a particular line number:_3More useful often is the ``Out`` array, which stores all previous results:Out[3] OutSimilarly, you can access the ``In`` array to see the code history:In[2] InTo see all history at once, use the ``%history`` magic command (more on magic commands below):%history1 + 1 _ * 100 _ + __ _ + __ ** ___ _4 Out[3] Out _3 Out[3] Out In[2] In %historyTab CompletionThe feature of IPython that I use the most frequently is perhaps the tab completion functionality.Tab completion works for finishing built-in commands:```pythonIn [20]: import matp```will be completed to ```pythonIn [20]: import matplotlib``` It works for variables that you have defined:my_variable = 4```pythonIn [22]: my```will be completed to ```pythonIn [22]: my_variable``` This also works for strings which represent filenames, or pandas columns, etc. It works for importing packages: ```pythonIn [25]: import num```will be completed to ```pythonIn [25]: import numpy``` It works for finding attributes of packages and other objects: ```pythonIn [25]: numpy.ran```will be completed to ```pythonIn [25]: numpy.random``` Accessing Help and Documentation After tab completion, I think the next most useful feature of the notebook is the help functionality.One question mark after any valid object gives you access to its documentation string:numpy.random? def myfunc(x): return x ** 2 myfunc?Two question marks gives you access to its source code (if the object is implemented in Python):myfunc??In addition, you can use a single question mark with asterisks to do a wildcard match:numpy.*exp*?If you are curious about the call signature for a funciton, you can type ``shift tab`` within the open-closed parentheses to see its argument list: Hitting ``shift tab`` multiple times will give you progressively more information about the function: Using a combination of these, you can quickly remind yourself of how to use various funcitons without ever leaving the terminal/notebook. Useful Keyboard ShortcutsOne of the keys to working effectively with IPython is learning your way around the keyboard.Note: some of the shortcuts below will only work on Linux and Mac; many will work on windows as well Terminal shortcutsIf you are familiar with emacs, vim, and similar tools, many of the terminal-based keyboard shortcuts will feel very familiar to you: Navigation| Keystroke | Action ||-------------------------------|--------------------------------------------|| ``Ctrl-a`` | Move cursor to the beginning of the line || ``Ctrl-e`` | Move cursor to the end of the line || ``Ctrl-b`` or ``left-arrow`` | Move cursor back one character || ``Ctrl-f`` or ``right-arrow`` | Move cursor forward one character | Text Entry| Keystroke | Action ||-------------------------------|-------------------------------------------------|| ``backspace`` | Delete previous character in line || ``Ctrl-d`` | Delete next character in line || ``Ctrl-k`` | Cut text from cursor to end of line || ``Ctrl-u`` | Cut all text in line || ``Ctrl-y`` | Yank (i.e. Paste) text which was previously cut || ``Ctrl-t`` | Transpose (i.e. switch) previous two characters | Command History| Keystroke | Action ||-------------------------------|--------------------------------------------|| ``Ctrl-p`` or ``up-arrow`` | Access previous command in history || ``Ctrl-n`` or ``down-arrow`` | Access next command in history || ``Ctrl-r`` | Reverse-search through command history | Miscellaneous| Keystroke | Action ||-------------------------------|--------------------------------------------|| ``Ctrl-l`` | Clear terminal screen || ``Ctrl-c`` | Interrupt current Python command || ``Ctrl-d`` | Exit IPython session | Notebook ShortcutsDepending on your operating system and browser, many of the navigation and text-entry shortcuts will work in the notebook as well. In addition, the notebook has many of its own shortcuts.First, though, we must mention that the notebook has two "modes" of operation: command mode and edit mode.- In **command mode**, you are doing operations that affect entire cells. You can enable command mode by pressing the escape key (or pressing ``ctrl m``). For example, in command mode, the up and down arrows will navigate from cell to cell.- In **edit mode**, you can do operations that affect the contents of a single cell. You can enable edit mode by pressing enter from the command mode. For example, in edit mode, the up and down arrows will navigate lines within the cellTo get a listing of all available shortcuts, enter command mode and press "h" Magic CommandsIPython extends the functionality of Python with so-called "magic" commands: these are marked with a ``%`` sign.We saw one of these above; the ``%history`` command.Magic commands come in two flavors: *line magics* start with one percent sign, and *cell magics* start with two percent signs.We'll go through a few examples of magic commands here, but first, using what you've seen above, how do you think you might get a list of all available magic commands? How do you think you might get help on any particular command?%timeit?Profiling with ``timeit``For example, here's the ``%timeit``/``%%timeit`` magic, which can be very useful for quick profiling of your code:import numpy as np x = np.random.rand(1000000) %timeit x.sum() L = list(x) %timeit sum(L) %%timeit y = x + 1 z = y ** 2 q = z.sum()100 loops, best of 3: 6.87 ms per loopInterpreter: ``paste`` and ``cpaste``Try pasting this Python code into your IPython interpreter:```python>>> def donothing(x): return x```You'll likely get an error.Now try typing ``%paste`` in your interpreter: what happens?Next try typing ``%cpaste`` and then use cmd-v to paste your test: what happens? Creating a file with ``%%file``Sometimes it's useful to create a file programatically from within the notebook%%file myscript.py def foo(x): return x ** 2 z = foo(12) print(foo(14))Overwriting myscript.pyRunning a script with ``%run``%run myscript.py z foo(2)Controlling figures: ``%matplotlib``You can use the ``%matplotlib`` function to specify the matplotlib *backend* you would like to use.For example:- ``%matplotlib`` by itself uses the default system backend- ``%matplotlib inline`` creates inline, static figures (great for publication and/or sharing)- ``%matplotlib notebook`` creates inline, interactive figures (though in my experience it can be a bit unstable)%matplotlib inline import numpy as np import matplotlib.pyplot as plt plt.plot(np.random.rand(100));Help functions and more info- The ``%magic`` function will tell you all about magic commands- The ``%lsmagic`` function will list all available magic commands- Remember that the ``?`` can be used to get documentation!- Though we won't cover it here, it is possible to [create and activate your own magic commands](https://ipython.org/ipython-doc/stable/config/custommagics.html)%lsmagic %magic %debug?Shell CommandsIPython is meant to be an all-purpose scientific computing environment, and access to the shell is critical.Any command that starts with an exclamation point will be passed to the shell.Note that because windows has a different kind of shell than Linux/OSX, shell commands will be different from operating system to operating system.All the commands you have learned previously will work here:!ls !pwdYou can even seamlessly pass values to and from the Python interpreter.For example, we can store the result of a directory listing:contents = !ls contentsWe can inject Python variables into a shell command with ``{}``:!cat {contents[4]} for filename in contents: if filename.endswith('.py'): print(filename) !head -10 {filename}myscript.py def foo(x): return x ** 2 z = foo(12) print(foo(14))Please input your directory for the top level folderfolder name : SUBMISSION MODELdir_ = 'INPUT-PROJECT-DIRECTORY/submission_model/' # input only heresetting other directoryraw_data_dir = dir_+'2. data/' processed_data_dir = dir_+'2. data/processed/' log_dir = dir_+'4. logs/' model_dir = dir_+'5. models/' submission_dir = dir_+'6. submissions/' #################################################################################### ##################### 1-3. recursive model by store & dept ######################### #################################################################################### ver, KKK = 'priv', 0 STORES = ['CA_1', 'CA_2', 'CA_3', 'CA_4', 'TX_1', 'TX_2', 'TX_3', 'WI_1', 'WI_2', 'WI_3'] DEPTS = ['HOBBIES_1', 'HOBBIES_2', 'HOUSEHOLD_1', 'HOUSEHOLD_2', 'FOODS_1', 'FOODS_2', 'FOODS_3'] # General imports import numpy as np import pandas as pd import os, sys, gc, time, warnings, pickle, psutil, random # custom imports from multiprocessing import Pool warnings.filterwarnings('ignore') ########################### Helpers ################################################################################# ## Seeder # :seed to make all processes deterministic # type: int def seed_everything(seed=0): random.seed(seed) np.random.seed(seed) ## Multiprocess Runs def df_parallelize_run(func, t_split): num_cores = np.min([N_CORES,len(t_split)]) pool = Pool(num_cores) df = pd.concat(pool.map(func, t_split), axis=1) pool.close() pool.join() return df ########################### Helper to load data by store ID ################################################################################# # Read data def get_data_by_store(store, dept): # Read and contact basic feature df = pd.concat([pd.read_pickle(BASE), pd.read_pickle(PRICE).iloc[:,2:], pd.read_pickle(CALENDAR).iloc[:,2:]], axis=1) df = df[df['d']>=START_TRAIN] df = df[(df['store_id']==store) & (df['dept_id']==dept)] df2 = pd.read_pickle(MEAN_ENC)[mean_features] df2 = df2[df2.index.isin(df.index)] df3 = pd.read_pickle(LAGS).iloc[:,3:] df3 = df3[df3.index.isin(df.index)] df = pd.concat([df, df2], axis=1) del df2 df = pd.concat([df, df3], axis=1) del df3 features = [col for col in list(df) if col not in remove_features] df = df[['id','d',TARGET]+features] df = df.reset_index(drop=True) return df, features # Recombine Test set after training def get_base_test(): base_test = pd.DataFrame() for store_id in STORES: for state_id in DEPTS: temp_df = pd.read_pickle(processed_data_dir+'test_'+store_id+'_'+state_id+'.pkl') temp_df['store_id'] = store_id temp_df['dept_id'] = state_id base_test = pd.concat([base_test, temp_df]).reset_index(drop=True) return base_test ########################### Helper to make dynamic rolling lags ################################################################################# def make_lag(LAG_DAY): lag_df = base_test[['id','d',TARGET]] col_name = 'sales_lag_'+str(LAG_DAY) lag_df[col_name] = lag_df.groupby(['id'])[TARGET].transform(lambda x: x.shift(LAG_DAY)).astype(np.float16) return lag_df[[col_name]] def make_lag_roll(LAG_DAY): shift_day = LAG_DAY[0] roll_wind = LAG_DAY[1] lag_df = base_test[['id','d',TARGET]] col_name = 'rolling_mean_tmp_'+str(shift_day)+'_'+str(roll_wind) lag_df[col_name] = lag_df.groupby(['id'])[TARGET].transform(lambda x: x.shift(shift_day).rolling(roll_wind).mean()) return lag_df[[col_name]] ########################### Model params ################################################################################# import lightgbm as lgb lgb_params = { 'boosting_type': 'gbdt', 'objective': 'tweedie', 'tweedie_variance_power': 1.1, 'metric': 'rmse', 'subsample': 0.5, 'subsample_freq': 1, 'learning_rate': 0.015, 'num_leaves': 2**8-1, 'min_data_in_leaf': 2**8-1, 'feature_fraction': 0.5, 'max_bin': 100, 'n_estimators': 3000, 'boost_from_average': False, 'verbose': -1 } ########################### Vars ################################################################################# VER = 1 SEED = 42 seed_everything(SEED) lgb_params['seed'] = SEED N_CORES = psutil.cpu_count() #LIMITS and const TARGET = 'sales' START_TRAIN = 700 END_TRAIN = 1941 - 28*KKK P_HORIZON = 28 USE_AUX = False remove_features = ['id','cat_id', 'state_id','store_id','dept_id', 'date','wm_yr_wk','d',TARGET] mean_features = ['enc_item_id_store_id_mean','enc_item_id_store_id_std'] ORIGINAL = raw_data_dir BASE = processed_data_dir+'grid_part_1.pkl' PRICE = processed_data_dir+'grid_part_2.pkl' CALENDAR = processed_data_dir+'grid_part_3.pkl' LAGS = processed_data_dir+'lags_df_28.pkl' MEAN_ENC = processed_data_dir+'mean_encoding_df.pkl' #SPLITS for lags creation SHIFT_DAY = 28 N_LAGS = 15 LAGS_SPLIT = [col for col in range(SHIFT_DAY,SHIFT_DAY+N_LAGS)] ROLS_SPLIT = [] for i in [1,7,14]: for j in [7,14,30,60]: ROLS_SPLIT.append([i,j]) _, MODEL_FEATURES = get_data_by_store(STORES[-1], DEPTS[-1]) del _; gc.collect() def pred_q(quantile): print(quantile) all_preds = pd.DataFrame() # Join back the Test dataset with # a small part of the training data # to make recursive features grid_df = base_test.copy() grid_df = pd.concat([grid_df, df_parallelize_run(make_lag_roll, ROLS_SPLIT)], axis=1) main_time = time.time() for PREDICT_DAY in range(1,29): print('Predict | Day:', PREDICT_DAY) start_time = time.time() for store_id in STORES: for state_id in DEPTS: model_path = model_dir+'lgb_model_'+store_id+'_'+state_id+'_v'+str(VER)+'.bin' if USE_AUX: model_path = AUX_MODELS + model_path estimator = pickle.load(open(model_path, 'rb')) day_mask = base_test['d']==(END_TRAIN+PREDICT_DAY) store_mask = base_test['store_id']==store_id state_mask = base_test['dept_id']==state_id mask = (day_mask)&(store_mask)&(state_mask) print('starting to predict') base_test[TARGET][mask] = estimator.predict(grid_df[mask][MODEL_FEATURES], float(quantile)) temp_df = base_test[day_mask][['id',TARGET]] temp_df.columns = ['id','F'+str(PREDICT_DAY)] if 'id' in list(all_preds): all_preds = all_preds.merge(temp_df, on=['id'], how='left') else: all_preds = temp_df.copy() print('#'*10, ' %0.2f min round |' % ((time.time() - start_time) / 60), ' %0.2f min total |' % ((time.time() - main_time) / 60), ' %0.2f day sales |' % (temp_df['F'+str(PREDICT_DAY)].sum())) del temp_df all_preds = all_preds.reset_index(drop=True) all_preds ########################### Export ################################################################################# submission = pd.read_csv(ORIGINAL+'sample_submission.csv')[['id']] submission = submission.merge(all_preds, on=['id'], how='left').fillna(0) submission.to_csv(submission_dir+f'before_ensemble/submission_kaggle_recursive_store_dept_{quantile}.csv', index=False) base_test = get_base_test() import concurrent.futures for quantile in ['0.005', '0.025', '0.165', '0.250', '0.500', '0.750', '0.835', '0.975', '0.995']: with concurrent.futures.ThreadPoolExecutor( ) as executor: executor.submit( pred_q, quantile )0.005 Predict | Day: 1 starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predict starting to predi[...]Cell annotation - Dendritic cells and monocytes in human bloodWith this notebook, we show the example of annotation of cell data with cell type based on marker genes. We propose a fast annotation procedure using the Mann-Whitney U test for genes selection and the Hypergeometric test for cell-type annotation. This example demonstrates the annotation procedure on Dendritic cells and monocytes in human blood data [1]. For annotation, we use marker genes for cell types from PanglaoDB [2].The whole annotation can be made by just calling one function, but since we want to show the intermediate results we will make the annotation in four steps: (1) Data normalization, (2) Selecting overexpressed genes with Mann-Whitney U test, (3) assigning the annotations with the hypergeometric test, and (4) filtering scores that do not meet p-value threshold.%load_ext autoreload %autoreload 2 import numpy as np %matplotlib inline import matplotlib from matplotlib import pyplot as plt import pandas as pd from openTSNE import TSNE from collections import Counter from sklearn.decomposition import PCA from sklearn.cluster import DBSCAN import seaborn as sn from IPython.display import HTML, display import tabulate from cellannotation.annotate_samples import *Data Datasets are available [here](https://github.com/PrimozGodec/cell-annotation/releases/0.1.0) download them in data directory. If you are using unix based operating system it can be done with running:!wget -q -nc -P data/ https://github.com/PrimozGodec/cell-annotation/releases/download/0.1.0/panglao_gene_markers.csv.gz !wget -q -nc -P data/ https://github.com/PrimozGodec/cell-annotation/releases/download/0.1.0/DC_expMatrix_DCnMono.csv.gz # currently we need only gene expression data so we remove last column - cell id gene_expressions_df = pd.read_csv("data/DC_expMatrix_DCnMono.csv.gz", compression='gzip') gene_expressions_df = gene_expressions_df.loc[:, gene_expressions_df.columns != "Cell ID"]Data properties:print("Number of cells in dataset: " + str(gene_expressions_df.shape[0])) print("Number of genes in dataset: " + str(gene_expressions_df.shape[1]))Number of cells in dataset: 1140 Number of genes in dataset: 26594Since data are not normalized yet we will first nomralize them with log CPM normalization.gene_expressions_normalized_df = AnnotateSamples.log_cpm( gene_expressions_df.loc[:, gene_expressions_df.columns != "Sample ID"])Most expressed genes per cellWith this step, we retrieve the z values for each gene-cell pair with Mann-Whitney U test. Z-value let us know whether the gene for this cell is more expressed compared to the same gene for all other cells. In hypergeometric test later we will just consider genes that have a z-value above 1.%%time z_df = AnnotateSamples.mann_whitney_test(gene_expressions_normalized_df) z_threshold = 1Statistics: Expressed genes per cellThe histogram shows the number of genes with z-value above 1 per cell.expressions_per_cell = (z_df > z_threshold).sum(axis=1) plt.figure(figsize=(5,5)) plt.hist(expressions_per_cell, bins=50); plt.title("Selected genes per cell"); plt.xlabel("Genes per cell") plt.ylabel("Cell count");Loading marker genesWe use marker genes from PanglaoDB. We packed marker genes in `.csv` file for easier loading. Marker genes must have `Cell Type` and `Gene` column.marker_genes_df = pd.read_csv("data/panglao_gene_markers.csv.gz", compression="gzip") marker_genes_df = marker_genes_df.rename(columns={'Name': 'Gene'}) marker_genes_df = marker_genes_df[marker_genes_df["Organism"] == "Human"]Check the number of marker genes that overlap with genes in the dataset:len(set(gene_expressions_normalized_df.columns.values) & set(marker_genes_df.loc[:, 'Gene'].values))Annotate We use the hypergeometric test for annotation. It is made cell-vise. For each cell, it takes into account the number of genes by a cell type, number of genes selected for a cell by Mann-Whitney test, number of genes that overlap in the previous two sets and number of genes that the organism has. In our case, we take the number of genes discovered in the NCBI taxonomy database.After the annotation scores that have a corrected p-value (FDR) below the threshold 0.05 are filtered out together with cell types that do not annotate any cell. The result is a scoring matrix where rows represent cells and columns represents cell-types.%%time scores_df, fdrs_df = AnnotateSamples.assign_annotations( z_df, marker_genes_df, gene_expressions_normalized_df, 61244) # 61244 is number of genes in NCBI taxonomy database for human scores_df = AnnotateSamples.filter_annotations( scores_df, fdrs_df, return_nonzero_annotations=True, p_threshold=0.05)CPU times: user 3.89 s, sys: 226 ms, total: 4.11 s Wall time: 3.9 sFor each cell as a lable we select the cell type with the highest score value.labels = np.array(scores_df.columns.values) # get cell type names labels_cells = scores_df.idxmax(axis=1) # get the index of a maximal score per cellStatistics: 10 most common labelsHere we show what are the most common cell-tyepes in this dataset.counts = Counter(labels_cells).most_common() counts_show = [[k, "{:.03f}".format(v / len(labels_cells))] for k, v in counts[:10]] display(HTML("10 most common labels in the dataset and proportion of appearance")) display(HTML(tabulate.tabulate(counts_show, tablefmt='html')))Comparison with dataset annotationsTo check whether the annotation is relevant we plot a confusion matrix, where columns represent cell types which annotated by dataset authors and rows represent annotations by our technique.ids = [str(i) for i in list(gene_expressions_df.loc[:, "Sample ID"])] ids_unique = sorted(list(set(ids))) lables_no_nan = labels_cells.dropna() labels_unique = sorted(list(set(lables_no_nan))) confusion_matrix = np.zeros((len(labels_unique), len(ids_unique))) for l, idx in zip(lables_no_nan, ids): confusion_matrix[labels_unique.index(l), ids_unique.index(idx)] += 1 # subsample confusion matrix - take three most common cell types per column n_maxs = 3 sel_ind = [] for i in range(confusion_matrix.shape[1]): ind = confusion_matrix[:, i].argsort()[-n_maxs:] sel_ind += [i for i in ind if i not in sel_ind] conf_sub_smp = confusion_matrix[sel_ind] labels_unique_sub = np.array(labels_unique)[sel_ind] df_cm = pd.DataFrame(conf_sub_smp, index = labels_unique_sub, columns = ids_unique) plt.figure(figsize=(5,5)) sn.heatmap(df_cm, annot=True, fmt='g');Annotations in tSNE plotWe reduce dimensionality of cell-data with the tSNE and then assign most common cell-type to groups in the tSNE plot. The dimensionality reduction is performed by openTSNE [3] the fast tSNE implementation. Groups are identified by DBSCAN.pca = PCA(n_components=50) # using PCA first speed up the tSNE pca_data = pca.fit_transform(gene_expressions_normalized_df) tsne = TSNE(n_components=2, n_jobs=4) data_embedded = tsne.fit(pca_data) clustering = DBSCAN(eps=4, min_samples=2).fit(data_embedded) clusters = clustering.labels_ plt.figure(figsize=(12, 12)) for l in set(clusters) - {-1}: incl = clusters == l x = np.array(data_embedded)[incl, :] plt.scatter(x[:, 0], x[:, 1], label=l, alpha=0.2); center = x.mean(axis=0) labels_cl = labels_cells.loc[incl].dropna() counts = Counter(labels_cl) max_el = "\n".join("{0}\n{1:.1f} %".format( "\n".join(k.split()), v / len(x) * 100) for k, v in counts.most_common(1)) plt.annotate(max_el, center, horizontalalignment='center', verticalalignment='center', size=20, weight='bold'); # plot data from cluster -1 (unclustered data) incl = clusters == -1 x = np.array(data_embedded)[incl, :] plt.scatter(x[:, 0], x[:, 1], label=l, alpha=0.2, color="g");Cells-genes heatmapFor each cell, we select a few genes with the highest z-value and show their expressions. With this plot, we show that selected genes separate this cell type from other cell types. We show that the hypergeometric test selected the type that is described with shown genes.plt.figure(figsize=(12, 12)) n_genes_per_type = 3 # select genes with highest z-score for a cell in a group selected_cells = np.array([]).astype(int) selected_genes = np.array([]).astype(int) y_tick_labels = [] labels_unique = sorted(list(set(labels_cells.dropna()))) for cell_type in labels_unique_sub: z_values_type = z_df[labels_cells == cell_type] genes_idx = z_values_type.mean(axis=0).argsort()[-n_genes_per_type:] selected_genes = np.concatenate([selected_genes, genes_idx]) cells_idx1 = np.where(labels_cells == cell_type)[0] selected_cells = np.concatenate([selected_cells, cells_idx1]) y_tick_labels += [cell_type] + [""] * (len(cells_idx1) - 1) hm_array_df = z_df.iloc[selected_cells].iloc[:, selected_genes] # hm_array_df = pd.DataFrame(hm_array, columns=genes_names[selected_genes]) hm_array_df = hm_array_df.clip(-100, 10) sn.heatmap(hm_array_df, yticklabels=y_tick_labels);Copyright © 2021, SAS Institute Inc., Cary, NC, USA. All Rights Reserved.SPDX-License-Identifier: Apache-2.0 HMEQ Dataset : Build and Import Trained Models into SAS Model Manager on SAS Viya 3.5 This notebook provides an example of how to build and train a Python model and then import the model into SAS Model Manager using the fleet maintenance data set. Lines of code that must be modified by the user, such as directory paths are noted with the comment "_Changes required by user._". _**Note:** If you download only this notebook and not the rest of the repository, you must also download the hmeq.csv file from the data folder in the examples directory. These files are used when executing this notebook example._ Here are the steps shown in this notebook: 1. Import and review data and preprocess for model training. 2. Build, train, and access an H2O.ai generalized linear estimator model. 3. Serialize the model into pickle or MOJO files. 4. Write the metadata JSON files needed for importing into SAS Model Manager. 4. Write a score code Python file for model scoring. 5. Zip the model, JSON, and score code files into an archive file. 6. Import the ZIP archive file to SAS Model Manager via the Session object and relevant function call. Python Package Imports# Dataframes for data manipulations import pandas as pd pd.options.mode.chained_assignment = None # default='warn' # Mathematical calculations and array handling import numpy as np # Pathing support from pathlib import Path # Import H2O and check the version import h2o from h2o.estimators.glm import H2OGeneralizedLinearEstimator # sasctl interface for importing models import sasctl.pzmm as pzmm from sasctl import Session h2o.__version__On SAS Viya 3.5, models created in H2O versions 3.24 and under are only compatible in the binary model format. For H2O versions 3.26+, models can be in the MOJO or binary model format. If using a binary model, the H2O version on the SAS Viya server must match the version of H2O used to create the model.h2o.init()Checking whether there is an H2O instance running at http://localhost:54321. connected. Warning: Your H2O cluster version is too old (2 years, 9 months and 7 days)! Please download and install the latest version from http://h2o.ai/download/Import and Review Data SethmeqData = h2o.import_file('data/hmeq.csv',sep= ',') hmeqData.shapeParse progress: |█████████████████████████████████████████████████████████| 100%Preprocess DatahmeqData['BAD'] = hmeqData['BAD'].asfactor() train, validation, test = hmeqData.split_frame(ratios=[.6, .2], seed=42) y = 'BAD' x = list(hmeqData.columns) x.remove(y)Create, Train, and Assess ModelglmFit = H2OGeneralizedLinearEstimator(family='binomial', model_id='glmfit', lambda_search=True) glmFit.train(x=x, y=y, training_frame=train, validation_frame=validation) # Check the model performance and print its accuracy glmPerf = glmFit.model_performance(test) print(glmPerf.accuracy())[[0.551348008992684, 0.8486897717666948]]Choice Point The next two cells represent the saving process for binary and MOJO H2O models respectively.h2o.save_model(glmFit, path='data/hmeqModels/H2OBinaryGLM') #glmFit.save_mojo(path='data/hmeqModels/H2OMOJOGLM')Register Model in SAS Model Manager with pzmm *Commented out lines are for MOJO models*modelPrefix = 'glmFit' zipFolder = Path.cwd() / 'data/hmeqModels/H2OBinaryGLM/' pzmm.PickleModel.pickleTrainedModel(_, glmFit, modelPrefix, zipFolder, isH2OModel=True, isBinaryModel=True) #pzmm.PickleModle.pickleTrainedModel(_, glmFit, modelPrefix, zipFolder, isH2OModel=True) trainDF = train.as_data_frame() J = pzmm.JSONFiles() # Write input variable mapping to a json file J.writeVarJSON(trainDF[x], isInput=True, jPath=zipFolder) # Set output variables and assign an event threshold, then write output variable mapping outputVar = pd.DataFrame(columns=['EM_EVENTPROBABILITY', 'EM_CLASSIFICATION']) outputVar['EM_CLASSIFICATION'] = trainDF[y].astype('category').cat.categories.astype('str') outputVar['EM_EVENTPROBABILITY'] = 0.5 # Event threshold J.writeVarJSON(outputVar, isInput=False, jPath=zipFolder) # Write model properties to a json file J.writeModelPropertiesJSON(modelName=modelPrefix, modelDesc='', targetVariable=y, modelType='', modelPredictors=x, targetEvent=1, numTargetCategories=1, eventProbVar='EM_EVENTPROBABILITY', jPath=zipFolder, modeler='sasdemo') # Write model metadata to a json file J.writeFileMetadataJSON(modelPrefix, jPath=zipFolder) import getpass username = getpass.getpass() password = get() host = 'sas.demo.com' sess = Session(host, username, password, protocol='http') pzmm.ImportModel.pzmmImportModel(zipFolder, modelPrefix, 'BinaryH2OExample', trainDF[x], trainDF[y], predictmethod=None, isH2OModel=True, force=True) #pzmm.ImportModel.pzmmImportModel(zipFolder, modelPrefix, 'MOJOH2OExample', trainDF[x], trainDF[y], predictMethod='{}.predict({})', isH2OModel=True, force=True)All model files were zipped to c:\Users\sclind\Documents\Python Scripts\GitHub\sassoftware\python-sasctl\examples\data\hmeqModels\H2OBinaryGLM. Model was successfully imported into SAS Model Manager as glmFit with UUID: e34d30a4-66dd-4648-ad75-c6e92f0b01f1. Model score code was written successfully to c:\Users\sclind\Documents\Python Scripts\GitHub\sassoftware\python-sasctl\examples\data\hmeqModels\H2OBinaryGLM\glmFitScore.py and uploaded to SAS Model ManagerTest the full connected layer model Import necessary modules# python built-in libraries import datetime # choose the tensorflow log level import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'} # import tensorflow import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers # ohter packages import numpy as np import tfhelper.core as tfhelperGlobal Variables# sampling FEATURES_FILE = "./data/tensors/features.npy" # path to the file of the feature tensor LABELS_FILE = "./data/tensors/labels.npy" # path to the file of the feature tensor FRACTIONS = (0.8, 0.1, 0.1) # train, validation, test BATCH_SIZE = 128 # size of the batch BUFFER_SIZE = BATCH_SIZE * 2 # size of the shuffle buffer # training LEARNING_RATE = 0.001 # starting learning rate BETA1 = 0.9 # decay 1 BETA2 = 0.999 # decay 2 EPOCHS = 100 # number of epochs # saving TIME_STAMP = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # time stamp SAVE_LOC = "./data/models/cnn_2_{}".format(TIME_STAMP) # path to the folder to save the model LOG_LOC = "./data/logs/fit/cnn_2_{}".format(TIME_STAMP) # path to the log, if you change this, you also need to change it in the run_tensorboardObtain dataThe data is a 209 data point PDF. The label is a one-hot 2 dim vector. `10` means major phase >= threshold, `01` means major phase <= threshold.def load_data(): # load features, labels = np.load(FEATURES_FILE), np.load(LABELS_FILE) # shuffle n = features.shape[0] shuffled_idx = np.random.permutation(n) features, labels = features[shuffled_idx], labels[shuffled_idx] # split f0, f1, f2 = FRACTIONS i, j, k = round(f0 * n), round((f0 + f1) * n), round((f0 + f1 + f2) * n) train_data = tf.data.Dataset.from_tensor_slices((features[:i], labels[:i])).shuffle(BUFFER_SIZE).batch(BATCH_SIZE) valid_data = tf.data.Dataset.from_tensor_slices((features[i:j], labels[i:j])).batch(BATCH_SIZE) test_data = tf.data.Dataset.from_tensor_slices((features[j:k], labels[j:k])).batch(BATCH_SIZE) return train_data, valid_data, test_data train_data, valid_data, test_data = load_data()Create the modelWe use the logistric regression. It is a single layer with a softmax function.def create_model(): model = keras.Sequential() model.add(keras.Input(shape=(209,))) model.add(keras.layers.Reshape((209, 1))) model.add(keras.layers.Conv1D(32, 3, strides=1, activation='relu', padding="same")) model.add(keras.layers.Conv1D(32, 3, strides=1, activation='relu', padding="same")) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.MaxPooling1D(2)) model.add(keras.layers.Conv1D(64, 3, strides=1, activation='relu', padding="same")) model.add(keras.layers.Conv1D(64, 3, strides=1, activation='relu', padding="same")) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.MaxPooling1D(2)) model.add(keras.layers.Dropout(0.2)) model.add(keras.layers.Flatten()) model.add(keras.layers.Dense(512, activation='relu')) model.add(keras.layers.Dense(128, activation='relu')) model.add(keras.layers.Dense(32, activation='relu')) model.add(keras.layers.Dense(2, activation='softmax')) return model model = create_model() model.summary()Model: "sequential_3" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= reshape_3 (Reshape) (None, 209, 1) 0 _________________________________________________________________ conv1d_12 (Conv1D) (None, 209, 32) 128 _________________________________________________________________ conv1d_13 (Conv1D) (None, 209, 32) 3104 _________________________________________________________________ batch_normalization_6 (Batch (None, 209, 32) 128 _________________________________________________________________ max_pooling1d_6 (MaxPooling1 (None, 104, 32) 0 _________________________________________________________________ conv1d_14 (Conv1D) (None, 104, 64) 6208 ______________________________________________________[...]Choose optimization methodmodel.compile( # Optimizer optimizer=keras.optimizers.Adam(LEARNING_RATE, BETA1, BETA2), # Loss function to minimize loss=keras.losses.CategoricalCrossentropy(), # List of metrics to monitor metrics=[ keras.metrics.CosineSimilarity() ], )Train the model# tensor board tensorboard_callback = keras.callbacks.TensorBoard( log_dir=LOG_LOC, histogram_freq=1 ) # early stopping to avoid over fitting earlystopping_callback = keras.callbacks.EarlyStopping( monitor='val_loss', patience=30, restore_best_weights=True ) history = model.fit( x=train_data, epochs=EPOCHS, callbacks=[ tensorboard_callback, earlystopping_callback ], validation_data=valid_data )Epoch 1/100 688/688 [==============================] - 13s 17ms/step - loss: 0.6642 - cosine_similarity: 0.7346 - val_loss: 0.6255 - val_cosine_similarity: 0.7509 Epoch 2/100 688/688 [==============================] - 11s 16ms/step - loss: 0.6134 - cosine_similarity: 0.7571 - val_loss: 0.6317 - val_cosine_similarity: 0.7483 Epoch 3/100 688/688 [==============================] - 11s 16ms/step - loss: 0.5746 - cosine_similarity: 0.7762 - val_loss: 0.6120 - val_cosine_similarity: 0.7588 Epoch 4/100 688/688 [==============================] - 11s 16ms/step - loss: 0.5311 - cosine_similarity: 0.7959 - val_loss: 0.5473 - val_cosine_similarity: 0.7902 Epoch 5/100 688/688 [==============================] - 11s 16ms/step - loss: 0.4929 - cosine_similarity: 0.8118 - val_loss: 0.5194 - val_cosine_similarity: 0.8032 Epoch 6/100 688/688 [==============================] - 11s 16ms/step - loss: 0.4644 - cosine_similarity: 0.8231 - val_loss: 0.5178 - val_cosine_similarity: 0.8056 Epoch 7/100 688/688 [=[...]Evaluate the modelresult = model.evaluate( x=test_data, return_dict=True ) for name, val in result.items(): print("{:20s} {:.4f}".format(name, val))loss 0.4237 cosine_similarity 0.8858Save the modelmodel.save(SAVE_LOC)INFO:tensorflow:Assets written to: ./data/models/cnn_2_20210914-172931/assetsTable of Contents1  Preprocessing2  Manual fit the word frequency count2.1  Translate the sparse matrix in actual word3  Automate the extraction4  Extract from all columnsimport pandas as pd import numpy as np from sklearn.feature_extraction.text import CountVectorizer df = pd.read_excel('interviews_20180710.xls') dfPreprocessingdf.columns = [ 'name', # Name 'smart_contract', # How do you handle smart contract verif & security? 'bounties', # Other bounties 'who_what', # Who are you and what are you working on? 'tooling', # What are the tools/libraries/frameworks you use? 'frustrations', # What are your biggest frustrations? 'testing', # How do you handle testing? 'missing_tools', # What tools don’t exist at the moment? 'domain_questions', # Other domain specific questions? 'hardest_part', # What was the hardest part to develop with Ethereum? 'excited_about', # What are you most excited about in the short term? 'easier_expected', # Was anything easier than expected? 'people_talk_to', # Who do you think we should talk to? 'best_resources', # What are the best educational resources? 'questions_to_ask' # Are there any other questions we should be asking? ] df.fillna('', inplace = True) topics = {'solidity', 'gas', 'protocol', 'transaction', 'event', 'network', 'documentation', 'blockchain', 'client', 'bounty', 'deployment', 'audit', 'contract', 'verification', 'token', 'governance', 'payment', 'consensus', 'NFT', 'compiler', 'signature', 'hash', 'testing', 'wallet', 'community', 'decentralized'} projects = {'Ethereum', 'Geth', 'Truffle', 'Infura', 'MetaMask', 'Parity', 'Remix', 'Mythril', 'Casper', 'Plasma', 'Status', 'Embark', 'Swarm', 'eWASM', 'Sharding', 'State channels', 'Gitcoin', 'Augur', 'Vyper', 'Bamboo'}Manual fit the word frequency countcounter = CountVectorizer(vocabulary=topics) smart_contract = counter.fit_transform(df['smart_contract']) print(smart_contract)(0, 1) 1 (0, 10) 2 (0, 24) 1 (2, 1) 1 (4, 8) 1 (4, 13) 1 (7, 24) 1 (8, 8) 1 (9, 1) 1 (16, 1) 1 (16, 3) 2 (16, 5) 2 (20, 1) 1 (20, 8) 1 (22, 20) 2 (22, 21) 1 (33, 2) 1 (33, 8) 3 (33, 9) 1 (33, 14) 1 (33, 24) 2 (35, 21) 1 (37, 5) 1 (37, 20) 1 (38, 16) 1 : : (53, 3) 2 (53, 24) 1 (55, 2) 2 (61, 10) 1 (61, 11) 1 (63, 1) 1 (63, 8) 1 (66, 24) 1 (73, 1) 1 (73, 20) 1 (73, 24) 2 (75, 6) 1 (75, 20) 1 (77, 1) 1 (77, 24) 1 (80, 1) 1 (80, 3) 1 (85, 1) 1 (85, 2) 1 (85, 5) 1 (85, 13) 1 (86, 1) 1 (103, 8) 2 (103, 18) 2 (103, 24) 1Translate the sparse matrix in actual wordfor row in smart_contract[0:3]: d = dict(zip(counter.get_feature_names(), row.toarray().ravel() )) print(d) df['smart_contract'][0] # Notice that audits (plural) is not taken into account # We need a proper lemmatizer # Cleanup del counter del smart_contractAutomate the extractioncount_topics = CountVectorizer(vocabulary=topics) count_projects = CountVectorizer(vocabulary=projects) def dictFreq(df, field): counts_t = count_topics.transform(df[field]) counts_p = count_projects.transform(df[field]) result = pd.Series( { 'topics': dict(zip(count_topics.get_feature_names(), counts_t[i].toarray().ravel())), 'projects': dict(zip(count_projects.get_feature_names(), counts_p[i].toarray().ravel())) } for i in range(df.index.__len__()) ) return result dictFreq(df, 'smart_contract')Extract from all columnscols = [ 'smart_contract', # How do you handle smart contract verif & security? 'bounties', # Other bounties 'who_what', # Who are you and what are you working on? 'tooling', # What are the tools/libraries/frameworks you use? 'frustrations', # What are your biggest frustrations? 'testing', # How do you handle testing? 'missing_tools', # What tools don’t exist at the moment? 'domain_questions', # Other domain specific questions? 'hardest_part', # What was the hardest part to develop with Ethereum? 'excited_about', # What are you most excited about in the short term? 'easier_expected', # Was anything easier than expected? 'people_talk_to', # Who do you think we should talk to? 'best_resources', # What are the best educational resources? 'questions_to_ask' # Are there any other questions we should be asking? ] for col in cols: df[col + '_wordcount'] = dictFreq(df, col) df df.to_json('interviews_wordcount.json', orient = 'index')Digging into GRIB2 binary files (JMA landslide risk)*Important:* In this repository, we provide some scripts to handle the numerical landslide risk assessment figures prepared in 10-minute intervals by the Japan Meteorological Agency (JMA). This data is *not freely available data*, but must rather be purchased from the Japan Meteorological Business Support Center (JMBSC), which operates under the oversight of the JMA. Here are a couple relevant links:- Up to date visualized plot of landslide risk figures: https://www.jma.go.jp/jp/doshamesh/- JMBSC page for landslide risk data set: http://www.jmbsc.or.jp/jp/online/file/f-online60210.htmlThis data is only provided in a standard binary format used frequently in meteorology, called *FM92 GRIB edition 2*. The software in this repository assumes that the user has already acquired such data via the appropriate channels.__Contents:__- Opening and reading the GRIB file- Section 0: 指示節 (instructions)- Section 1: 識別節 (technical specifications)- Section 2: 地域使用節 (region specifications)- Section 3: 格子系定義節 (lattice definitions)- Section 4: プロダクト定義節 (product definitions)- Section 5: 資料表現節 (formatting)- Section 6: ビットマップ節 (bitmap info)- Section 7: 資料節 (data) - Aside: JMA run-length compression algorithm - Decompression of lattice data- Section 8: 終端節 (closing section)___ Opening and reading the GRIB fileTo begin, let's clarify two points: (a) the nature of the data we are looking at; (b) the format of this data.Regarding the nature of the data of interest, we are focusing exclusively on the landslide risk information computed and published by the JMA. The Japanese name is 土砂災害警戒判定メッシュ情報 (see links at the top of this page). The JMA assigns a numerical score to each point on a lattice, corresponding to the risk of landslides based on recent precipitation and accumulation of rainwater in the ground.Regarding the format, this is technical but very straightforward. The data format is "General Representation of fields In Binary" (GRIB), and the specific guidelines are given in detail in the World Meteorological Organization's Manual on Codes. The JMA data that we will be examining here follows the FM92 GRIB edition 2, available at the WMO website in English (https://www.wmo.int/pages/prog/www/WMOCodes.html), and in Japanese from the JMA website (http://www.jma.go.jp/jma/kishou/books/tsuhoshiki/tsuhoshiki.html). The Japanese standards are based directly on this FM92 GRIB edition 2, and go by the name 国際気象通報式FM92 GRIB 二進形式格子点資料気象通報式(第2版).There is really no choice but to go through the binary files one byte at a time to check everything out once; then everything can be easily automated. Useful resources when prying through these GRIB2 files are as follows:- JMA Technical Report 374 (配信資料に関する技術情報(気象編)第374号). Available at: http://www.data.jma.go.jp/add/suishin/jyouhou/pdf/374.pdf- Description of the run-length encoding used for compression (ランレングス符号化法の解説). Available at: http://www.jmbsc.or.jp/jp/online/c-onlineGsd.htmlWith these two resources in hand, we open the connection with an example file.import os toread = "demo_data/20130701/Z__C_RJTD_20130701000000_MET_INF_Jdosha_Ggis5km_ANAL_grib2.bin" f_bin = open(toread, mode="rb") print(f_bin)<_io.BufferedReader name='demo_data/20130701/Z__C_RJTD_20130701000000_MET_INF_Jdosha_Ggis5km_ANAL_grib2.bin'>The files are broken up into sections (節). We shall take one at a time. Section 0: 指示節This section spans the first sixteen octets.f_bin.seek(0) b = f_bin.read(4) # Should be "GRIB". print("bytes:", b) s0_grib = str(b, 'ascii') print(s0_grib) b = f_bin.read(2) # Should be missing. print("bytes:", b) s0_missing = b print(s0_missing)bytes: b'\xff\xff' b'\xff\xff'As regards the seventh byte, they refer to 符号表0.0, saying that it represents the meteorology field of interest.b = f_bin.read(1) # Should be 0. print("bytes:", b) s0_field = int.from_bytes(b, byteorder="big", signed=False) print(s0_field) b = f_bin.read(1) # Should be 2. print("bytes:", b) s0_version = int.from_bytes(b, byteorder="big", signed=False) print(s0_version)bytes: b'\x02' 2The final handful of bytes give the *length of the GRIB message* (GRIB報全体の長さ). This is apparently variable from file to file.b = f_bin.read(8) # Variable. print("bytes:", b) s0_griblen = int.from_bytes(b, byteorder="big", signed=False) print(s0_griblen) s0_seclen = 16 # hard-code for convenience.___ Section 1: 識別節This section spans a total of twenty-one bytes.f_bin.seek(s0_seclen) b = f_bin.read(4) # Should be 21. print("bytes:", b) s1_seclen = int.from_bytes(b, byteorder="big", signed=False) print(s1_seclen) b = f_bin.read(1) # Should be 1. print("bytes:", b) s1_secnum = int.from_bytes(b, byteorder="big", signed=False) print(s1_secnum)bytes: b'\x01' 1The next pair of octects are the "location where the data was created", I assume. They call it “作成中枢の識別”. This is identified via 共通符号表C-1. In any case, the value is supposed to be 34, which apparently corresponds to Tokyo.b = f_bin.read(2) # Should be 34. print("bytes:", b) s1_center = int.from_bytes(b, byteorder="big", signed=False) print(s1_center) b = f_bin.read(2) # Should be 0. print("bytes:", b) s1_subcenter = int.from_bytes(b, byteorder="big", signed=False) print(s1_subcenter)bytes: b'\x00\x00' 0The next three bytes are rather technical. First is the "version number of GRIB master table" (GRIBマスター表バージョン番号), second is "version number of GRIB region table" (GRIB地域表バージョン番号), and finally is the "meaning of the referred time (参照時刻の意味). Respectively, the meaning of these codes can be found in 符号表1.0, 1.1, and 1.2. Their expected values are 2, 1, and 0.b = f_bin.read(1) # Should be 2. print("bytes:", b) s1_vermaster = int.from_bytes(b, byteorder="big", signed=False) print(s1_vermaster) b = f_bin.read(1) # Should be 1. print("bytes:", b) s1_verregion = int.from_bytes(b, byteorder="big", signed=False) print(s1_verregion) b = f_bin.read(1) # Should be 0. print("bytes:", b) s1_timemeaning = int.from_bytes(b, byteorder="big", signed=False) print(s1_timemeaning)bytes: b'\x00' 0Next is some rather important metadata, namely the date/time, which has the meaning specified in the previous byte, which is apparently “解析”. The bytes are as follows:- 2 bytes: year- 1 byte: month- 1 byte: day- 1 byte: hour- 1 byte: minute- 1 byte: secondThe interpretation of these values is specified in 「※1 時刻表現」. Let's first read them out and take a look.b = f_bin.read(2) # Should be the year. print("bytes:", b) s1_year = int.from_bytes(b, byteorder="big", signed=False) print(s1_year) b = f_bin.read(1) # Should be the month. print("bytes:", b) s1_month = int.from_bytes(b, byteorder="big", signed=False) print(s1_month) b = f_bin.read(1) # Should be the day. print("bytes:", b) s1_day = int.from_bytes(b, byteorder="big", signed=False) print(s1_day) b = f_bin.read(1) # Should be the hour. print("bytes:", b) s1_hour = int.from_bytes(b, byteorder="big", signed=False) print(s1_hour) b = f_bin.read(1) # Should be the minute. print("bytes:", b) s1_minute = int.from_bytes(b, byteorder="big", signed=False) print(s1_minute) b = f_bin.read(1) # Should be the second. print("bytes:", b) s1_second = int.from_bytes(b, byteorder="big", signed=False) print(s1_second)bytes: b'\x00' 0Great. Very simple. This looks to match up directly with the file name (i.e., we could just parse the file name to get the same metadata). The next byte is the document's status, either a "product" (現業プロダクト) or "test product" (現業的試験プロジェクト). Details are in 符号表1.3.b = f_bin.read(1) # Should be 0 or 1. print("bytes:", b) s1_status = int.from_bytes(b, byteorder="big", signed=False) print(s1_status)bytes: b'\x00' 0Finally, the document type, detailed in 符号表1.4, is supposedly 2, representing “解析及び予報プロダクト”.b = f_bin.read(1) # Should be 2. print("bytes:", b) s1_type = int.from_bytes(b, byteorder="big", signed=False) print(s1_type)bytes: b'\x02' 2___ Section 2: 地域使用節This section is apparently not used. ___ Section 3: 格子系定義節This section is for lattice definitions, and spans seventy-two bytes.f_bin.seek((s0_seclen+s1_seclen)) b = f_bin.read(4) # Should be 72. print("bytes:", b) s3_seclen = int.from_bytes(b, byteorder="big", signed=False) print(s3_seclen) b = f_bin.read(1) # Should be 3. print("bytes:", b) s3_secnum = int.from_bytes(b, byteorder="big", signed=False) print(s3_secnum)bytes: b'\x03' 3Reference for the lattice definition, detailed in 符号表3.0. Should be zero, with a meaning of “符号表3.1参照”, rather humorously.b = f_bin.read(1) # Should be 0. print("bytes:", b) s3_defref = int.from_bytes(b, byteorder="big", signed=False) print(s3_defref)bytes: b'\x00' 0Number of points on the lattice.b = f_bin.read(4) # Should be 286720. print("bytes:", b) s3_numpoints = int.from_bytes(b, byteorder="big", signed=False) print(s3_numpoints)bytes: b'\x00\x04`\x00' 286720Number of bytes used for a list that defines the number of points on the lattice (convoluted...), followed by the description of this list. Both are just zero here.b = f_bin.read(1) # Should be 0. print("bytes:", b) s3_numpointsMem = int.from_bytes(b, byteorder="big", signed=False) print(s3_numpointsMem) b = f_bin.read(1) # Should be 0. print("bytes:", b) s3_numpointsExp = int.from_bytes(b, byteorder="big", signed=False) print(s3_numpointsExp)bytes: b'\x00' 0The next two bytes give the template number identifying the lattice definition type, details in 符号表3.1. It should be zero, meaning “緯度・経度格子”.b = f_bin.read(2) # Should be 0. print("bytes:", b) s3_template = int.from_bytes(b, byteorder="big", signed=False) print(s3_template)bytes: b'\x00\x00' 0Next is a key value: the identifier of the shape of the earth, details in 符号表3.2. This should be four, representing “GRS80回転楕円体”, recalling GRS80 is *Geodetic Reference System 1980*, a reference ellipsoid modelling the earth.b = f_bin.read(1) # Should be 4. print("bytes:", b) s3_earthshape = int.from_bytes(b, byteorder="big", signed=False) print(s3_earthshape)bytes: b'\x04' 4The next five are all missing.b = f_bin.read(5) # Should be missing, likely max values. print("bytes:", b)bytes: b'\xff\xff\xff\xff\xff'The next ten bytes are broken up as follows.- 1 byte: major axis unit factor (尺度因子)- 4 bytes: major axis length in given units (長軸の尺度付きの長さ)- 1 byte: minor axis unit factor (尺度因子)- 4 bytes: minor axis length in given units (短軸の尺度付きの長さ)b = f_bin.read(1) # Should be 1. print("bytes:", b) s3_factormaj = int.from_bytes(b, byteorder="big", signed=False) print(s3_factormaj) b = f_bin.read(4) # Should be 63781370. print("bytes:", b) s3_lenmaj = int.from_bytes(b, byteorder="big", signed=False) print(s3_lenmaj) b = f_bin.read(1) # Should be 1. print("bytes:", b) s3_factormin = int.from_bytes(b, byteorder="big", signed=False) print(s3_factormin) b = f_bin.read(4) # Should be 63567523. print("bytes:", b) s3_lenmin = int.from_bytes(b, byteorder="big", signed=False) print(s3_lenmin)bytes: b'\x03\xc9\xf6\xa3' 63567523Next, we have the absolute number of points on the lattice, following along the latitude and longitude lines. In Japanese, the former is “緯線に沿った格子点数”, the latter is “経線に沿った格子点数”.b = f_bin.read(4) # Should be 512. print("bytes:", b) s3_numlon = int.from_bytes(b, byteorder="big", signed=False) print(s3_numlon) b = f_bin.read(4) # Should be 560. print("bytes:", b) s3_numlat = int.from_bytes(b, byteorder="big", signed=False) print(s3_numlat)bytes: b'\x00\x00\x020' 560The interpretation as a grid is easy: we have a grid which is 512 cells wide, and 560 cells tall. As well, the number of cells computed here exactly equals the total number of points on the lattice we extracted earlier.print(s3_numpoints) print((s3_numlat*s3_numlon))286720 286720Next is some "base angle", should be zero.b = f_bin.read(4) # Should be 0. print("bytes:", b) s3_baseangle = int.from_bytes(b, byteorder="big", signed=False) print(s3_baseangle)bytes: b'\x00\x00\x00\x00' 0Next should be missing.b = f_bin.read(4) # Should be 0. print("bytes:", b)bytes: b'\xff\xff\xff\xff'Now are some numerical values we need to be careful with. The next eight bytes give the latitude (緯度) and longitude (経度) of the "first" point on the lattice. The units are in what they call “10-6度単位”, which is scientific notation for units re-scaled by $10^{-6}$. Let's see the values first, and then confirm we are handling what we are expecting.b = f_bin.read(4) # Should be 47975000. print("bytes:", b) s3_firstlat = int.from_bytes(b, byteorder="big", signed=False) print(s3_firstlat) b = f_bin.read(4) # Should be 118031250. print("bytes:", b) s3_firstlon = int.from_bytes(b, byteorder="big", signed=False) print(s3_firstlon)bytes: b'\x07\t\x03\x92' 118031250Note that if we multiply these values by $10^{-6}$, we have:print((s3_firstlat/10**6)) print((s3_firstlon/10**6))47.975 118.03125This certainly look a lot more like degrees, and of course that is precisely what they are. The detailed calculations given in the JMA materials are as follows:- First point's latitude: 48N - 3.0/60/2.- First point's longitude: 118E + 3.75/60/2.Of course, these calculations are easily confirmed:print((48 - (3/(2*60)))) print((118 + (3.75/(2*60))))47.975 118.03125Next is a slightly complicated pair of terms: 分解能及び成分フラグ, with details in フラグ表3.3. The value should be 0x30, though it comes up as zero. In any case, the values after this guy are fine, so it seems there is no big issue.b = f_bin.read(1) # Should be 0x30. print("bytes:", b)bytes: b'0'Next, just as before, we have eight bytes dedicated to the latitude and longitude of the final lattice point/b = f_bin.read(4) # Should be 20025000. print("bytes:", b) s3_lastlat = int.from_bytes(b, byteorder="big", signed=False) print(s3_lastlat) b = f_bin.read(4) # Should be 149968750. print("bytes:", b) s3_lastlon = int.from_bytes(b, byteorder="big", signed=False) print(s3_lastlon)bytes: b'\x08\xf0Wn' 149968750We can do analogous confirmations as before, although noting that the +/- signs in the shift are now opposite what they were for the first point.- Last point's latitude: 20N + 3.0/60/2.- Last point's longitude: 150E - 3.75/60/2.Confirming everything:print((s3_lastlat/10**6)) print((s3_lastlon/10**6)) print((20 + (3/(2*60)))) print((150 - (3.75/(2*60))))20.025 149.96875Finally, we have the step size in the $i$-direction and $j$-direction respectively (called the $i$方向の増分, $j$方向の増分), defined:- $i$-direction: 3.75/60- $j$-direction: 3.0/60The units, again, are $10^{-6}$ as before.b = f_bin.read(4) # Should be 62500. print("bytes:", b) s3_isize = int.from_bytes(b, byteorder="big", signed=False) print(s3_isize) b = f_bin.read(4) # Should be 50000. print("bytes:", b) s3_jsize = int.from_bytes(b, byteorder="big", signed=False) print(s3_jsize) print((s3_isize/10**6)) print((s3_jsize/10**6)) print(3.75/60) print(3.0/60)0.0625 0.05As we would expect, the degrees *increase uniformly* (by the $i$ and $j$ sizes) over the lat/long directions on our graph.tmp_val = s3_firstlon print(tmp_val) for i in range(s3_numlon-1): tmp_val += s3_isize print(tmp_val) print(s3_lastlon) tmp_val = s3_firstlat print(tmp_val) for j in range(s3_numlat-1): tmp_val -= s3_jsize print(tmp_val) print(s3_lastlat)47975000 20025000 20025000Finally, we have the “走査モード”, namely a scanning mode, details in フラグ表3.4, and it should have a value of 0x00, namely zero.b = f_bin.read(1) # Should be 0x00. print("bytes:", b)bytes: b'\x00'___ Section 4: プロダクト定義節This section is for so-called "product" definitions, and spans forty-two bytes.f_bin.seek((s0_seclen+s1_seclen+0+s3_seclen)) b = f_bin.read(4) # Should be 42. print("bytes:", b) s4_seclen = int.from_bytes(b, byteorder="big", signed=False) print(s4_seclen) b = f_bin.read(1) # Should be 4. print("bytes:", b) s4_secnum = int.from_bytes(b, byteorder="big", signed=False) print(s4_secnum)bytes: b'\x04' 4The number of coordinate values immediately after the template (テンプレート直後の座標値の数). Should be zero.b = f_bin.read(2) # Should be 0. print("bytes:", b) s4_coordnum = int.from_bytes(b, byteorder="big", signed=False) print(s4_coordnum)bytes: b'\x00\x00' 0Product definition template number. Should be 50000, apparently with the meaning that the "product" here is based on processing other existing "products". Details in 符号表4.0.b = f_bin.read(2) # Should be 50000. print("bytes:", b) s4_tempnum = int.from_bytes(b, byteorder="big", signed=False) print(s4_tempnum)bytes: b'\xc3P' 50000Parameter category. Should be one, which means humidity (湿度), details in 符号表4.1.b = f_bin.read(1) # Should be 1. print("bytes:", b) s4_paracat = int.from_bytes(b, byteorder="big", signed=False) print(s4_paracat)bytes: b'\x01' 1Parameter number. Should be 208, which means landslide risk warnings (土砂災害警戒判定値), details in 符号表4.2.b = f_bin.read(1) # Should be 208. print("bytes:", b) s4_paranum = int.from_bytes(b, byteorder="big", signed=False) print(s4_paranum)bytes: b'\xd0' 208Type of processing, details in 符号表4.3. Should be zero, meaning analysis/forecasting (解析及び予報).b = f_bin.read(1) # Should be 0. print("bytes:", b) s4_proctype = int.from_bytes(b, byteorder="big", signed=False) print(s4_proctype)bytes: b'\x00' 0Background processing method, details in 符号表JMA4.1, should be 160, meaning the specialized routine for landslide risk warnings (土砂災害警戒情報ルーチン).b = f_bin.read(1) # Should be 160. print("bytes:", b) s4_backproc = int.from_bytes(b, byteorder="big", signed=False) print(s4_backproc)bytes: b'\xa0' 160Next one is missing, the analysis method identifier.b = f_bin.read(1) # Should be missing. print("bytes:", b)bytes: b'\xff'Cut-off time measured from the reference time, in hours and minutes.b = f_bin.read(2) # Should be 0. print("bytes:", b) s4_cutoffHour = int.from_bytes(b, byteorder="big", signed=False) print(s4_cutoffHour) b = f_bin.read(1) # Should be 10. print("bytes:", b) s4_cutoffMin = int.from_bytes(b, byteorder="big", signed=False) print(s4_cutoffMin)bytes: b'\n' 10Type of units used for time period, details in 符号表4.4. Should be zero, meaning minutes.b = f_bin.read(1) # Should be 0. print("bytes:", b) s4_timeunit = int.from_bytes(b, byteorder="big", signed=False) print(s4_timeunit)bytes: b'\x00' 0Forecast time (予報時間). This is supposed to be zero.b = f_bin.read(4) # Should be 0. print("bytes:", b) s4_fctime = int.from_bytes(b, byteorder="big", signed=False) print(s4_fctime)bytes: b'\x00\x00\x00\x00' 0Type of "first fixed surface" (第一固定面の種類), with details in 符号表4.5. Should be one, meaning either land or water (地面または水面).b = f_bin.read(1) # Should be 1. print("bytes:", b) s4_surftype = int.from_bytes(b, byteorder="big", signed=False) print(s4_surftype)bytes: b'\x01' 1The next eleven bytes are all missing.b = f_bin.read(1) # Should be missing. print("bytes:", b) b = f_bin.read(4) # Should be missing. print("bytes:", b) b = f_bin.read(1) # Should be missing. print("bytes:", b) b = f_bin.read(1) # Should be missing. print("bytes:", b) b = f_bin.read(4) # Should be missing. print("bytes:", b)bytes: b'\xff\xff\xff\xff'Name of the first set of related materials used in making this data set, details in 符号表JMA4.5, should be 2, meaning 土壌雨量指数の予測値. Otherwise, if the values are missing, it will be 255.b = f_bin.read(1) # Should be 2 or 255. print("bytes:", b) s4_refsoil = int.from_bytes(b, byteorder="big", signed=False) print(s4_refsoil)bytes: b'\x02' 2Difference between the *analysis* time and the reference time in the *current* referenced materials, in hours and minutes. By "current", we mean the materials identified in the previous octet. After this, there will be another set of reference materials, and of course in principle this could go on indefinitely, but here just two reference materials in total.b = f_bin.read(2) # Should be variable. print("bytes:", b) s4_refsoilDiffHour = int.from_bytes(b, byteorder="big", signed=False) print(s4_refsoilDiffHour) b = f_bin.read(1) # Should be variable. print("bytes:", b) s4_refsoilDiffMin = int.from_bytes(b, byteorder="big", signed=False) print(s4_refsoilDiffMin)bytes: b'&' 38Name of the second set of related materials used in making this data set, details in 符号表JMA4.5, should be 4, meaning 1時間降水量の予測値. Otherwise, if the values are missing, it will be 255.b = f_bin.read(1) # Should be 4 or 255. print("bytes:", b) s4_refrain = int.from_bytes(b, byteorder="big", signed=False) print(s4_refrain)bytes: b'\x04' 4Once again, time differences, now for the second set of reference materials.b = f_bin.read(2) # Should be variable. print("bytes:", b) s4_refrainDiffHour = int.from_bytes(b, byteorder="big", signed=False) print(s4_refrainDiffHour) b = f_bin.read(1) # Should be variable. print("bytes:", b) s4_refrainDiffMin = int.from_bytes(b, byteorder="big", signed=False) print(s4_refrainDiffMin)bytes: b'\x00' 0For reference, from the tenth byte through to the final byte of this section above, they say that "template 4.50000" is used. ___ Section 5: 資料表現節This section has "representations" of the main "materials". This is not the actual data, but rather the means of interpreting the data values which appear in the seventh section, a bit later.f_bin.seek((s0_seclen+s1_seclen+0+s3_seclen+s4_seclen)) b = f_bin.read(4) # Should be variable. print("bytes:", b) s5_seclen = int.from_bytes(b, byteorder="big", signed=False) print(s5_seclen) b = f_bin.read(1) # Should be 5. print("bytes:", b) s5_secnum = int.from_bytes(b, byteorder="big", signed=False) print(s5_secnum)bytes: b'\x05' 5Total number of data instances, should be equal to the number of lattice points seen earlier.b = f_bin.read(4) # Should be 286720. print("bytes:", b) s5_numdata = int.from_bytes(b, byteorder="big", signed=False) print(s5_numdata)bytes: b'\x00\x04`\x00' 286720Next is the template number of these materials, details in 符号表5.0. Should be 200, with meaning of 格子点資料 with run-length compression (ランレングス圧縮).b = f_bin.read(2) # Should be 200. print("bytes:", b) s5_tempnum = int.from_bytes(b, byteorder="big", signed=False) print(s5_tempnum)bytes: b'\x00\xc8' 200Next is the number of bits per individual datum.b = f_bin.read(1) # Should be 8. print("bytes:", b) s5_databits = int.from_bytes(b, byteorder="big", signed=False) print(s5_databits)bytes: b'\x08' 8Maximum "level" used in compression this time, a potentially variable value. Defined $V$ for later algebraic formulations.b = f_bin.read(2) # Should be variable. print("bytes:", b) s5_V = int.from_bytes(b, byteorder="big", signed=False) print(s5_V)bytes: b'\x00\x04' 4Maximum level, should be ten, and defined as $M$ for later use. Also, should have $V \leq M$.b = f_bin.read(2) # Should be 10. print("bytes:", b) s5_M = int.from_bytes(b, byteorder="big", signed=False) print(s5_M)bytes: b'\x00\n' 10Representative data value unit factor, should be zero.b = f_bin.read(1) # Should be 0. print("bytes:", b) s5_datafactor = int.from_bytes(b, byteorder="big", signed=False) print(s5_datafactor)bytes: b'\x00' 0Now, ranging over $m=1,2,\ldots,M$, we look at the data values that correspond to level $m$ (レベル$m$に対応するデータ代表値). Each of these uses two bytes. The interpretation is as follows (via ※2 in documentation):- $m=0$. $R(m)$ is null. This is missing value.- $m=1$. $R(m)=-2$. This is ocean or non-JPN lattice point.- $m=2$. $R(m)=-1$. Lattice point on Japanese land, but not applicable for warnings.- $m=3$. $R(m)=0$. This is warning level 0.- $m=4$. $R(m)=1$. This is warning level 1.- $m=5$. $R(m)=2$. This is warning level 2.- $m=6$. $R(m)=3$. This is warning level 3.- $m=7$. $R(m)=4$. This is warning level 4.- $m=8$. $R(m)=5$. This is TBD.- $m=9$. $R(m)=6$. This is TBD.- $m=10$. $R(m)=7$. This is TBD.Let's take a look at the first guys, one byte at a time.b = f_bin.read(1) print("bytes:", b) b = f_bin.read(1) print("bytes:", b) b = f_bin.read(1) print("bytes:", b) b = f_bin.read(1) print("bytes:", b)bytes: b'\x80' bytes: b'\x02' bytes: b'\x80' bytes: b'\x01'Clearly, they're just using the byte value (in hex) of 0x80 to be a minus sign, as the second byte in each pair is precisely as we expect (2 and 1). Checking out the rest is easy.for m in range(3,(s5_M+1)): b = f_bin.read(2) print("bytes:", b) tmp_level = int.from_bytes(b, byteorder="big", signed=True) print("m =", m, "; R(m) =", tmp_level)bytes: b'\x00\x00' m = 3 ; R(m) = 0 bytes: b'\x00\x01' m = 4 ; R(m) = 1 bytes: b'\x00\x02' m = 5 ; R(m) = 2 bytes: b'\x00\x03' m = 6 ; R(m) = 3 bytes: b'\x00\x04' m = 7 ; R(m) = 4 bytes: b'\x00\x05' m = 8 ; R(m) = 5 bytes: b'\x00\x06' m = 9 ; R(m) = 6 bytes: b'\x00\x07' m = 10 ; R(m) = 7___ Section 6: ビットマップ節This section is basically empty, spanning just six bytes.f_bin.seek((s0_seclen+s1_seclen+0+s3_seclen+s4_seclen+s5_seclen)) b = f_bin.read(4) # Should be 6. print("bytes:", b) s6_seclen = int.from_bytes(b, byteorder="big", signed=False) print(s6_seclen) b = f_bin.read(1) # Should be 6. print("bytes:", b) s6_secnum = int.from_bytes(b, byteorder="big", signed=False) print(s6_secnum)bytes: b'\x06' 6Bitmap directive, should be 255, meaning that a bitmap isn't used here.b = f_bin.read(1) # Should be 255. print("bytes:", b) s6_bitmap = int.from_bytes(b, byteorder="big", signed=False) print(s6_bitmap)bytes: b'\xff' 255___ Section 7: 資料節Finally, the data section.f_bin.seek((s0_seclen+s1_seclen+0+s3_seclen+s4_seclen+s5_seclen+s6_seclen)) b = f_bin.read(4) # Should be variable. print("bytes:", b) s7_seclen = int.from_bytes(b, byteorder="big", signed=False) print(s7_seclen) b = f_bin.read(1) # Should be 7. print("bytes:", b) s7_secnum = int.from_bytes(b, byteorder="big", signed=False) print(s7_secnum)bytes: b'\x07' 7Aside: JMA run-length compression algorithmNow comes a long series of data *values*, although these are "run-length compressed octets". The decompression is extremely straightforward once we know how the compression is done. Fortunately, this is explained in the second reference material linked at the start of this notebook, and we spell it out here for clarity and posterity.Run-length encoding is a very simple (lossless) compression technique. Given a sequence of scalar values $(d_{1},d_{2},\ldots,d_{k})$, if there are long stretches where values repeat, i.e. $d_{i}=d_{i+1}=\cdots=d_{i+m}$, then the basic idea is to just store (1) the value (here, $d_{i}$), and (2) the "run length" (here, $m+1$). With this information, we can naturally re-construct the original sequence.The JMA run-length encoding for domestic meteorological data stored in binary format is just one special case of run length encoding, and we describe it in detail now.First, two key terms to get us started.- `MAXV` (here $V$): all data values $d$ are assumed to be integers $0 \leq d \leq V$.- `NBIT` (here $N$): the number of bits that makes up a single data point in the compressed data.Now let's talk about the content of these data points. During compression, data are organized into "sets" (contiguous sub-sequences). There are precisely two types of sets: - Sets with just one data point $(c)$, holding *value* information. - Sets with two or more data points $(c,r_{0},r_{1},\ldots)$, holding *value* and *run length* information. Each of these data points is provided in the compressed data set using a base-2 (binary) representation, composed of `NBIT` binary digits. In both cases, $c$ represents the actual data value of interest, but the former case indicates that the value appeared just *once*, whereas in the latter case, the $r_{0},r_{1},\ldots$ provide us all the information needed to determine the run length (by construction greater than $1$, details given below).Given compressed data $(c_{1},\ldots,c_{m})$, determining where one "set" ends and another begins is simple, using the following rules:- If $0 \leq c_{i} \leq V$, then $c_{i}$ is a *value*, not run length information.- Else, we have $c_{i} > V$ and $c_{i}$ is run length information for a previous value.To completely decode this data, we need not just $c$, but the actual run length, let us denote this by $R$. In the case of the trivial set $(c)$, we know $R=1$. In general, however, given a set $(c,r_{0},r_{1},\ldots)$, we need to deduce $R$ from the $r_{0},r_{1},\ldots$ values. We describe how to do this next.Since our compressed data has only `NBIT` bits for each datum, naturally each datum can represent at most $2^{N}$ different values. On the other hand, run lengths are at least $1$, but can assuredly be much larger than $2^{N}$. This means we will need more than one datum. Furthermore, for very long runs, if we were to simply sum the $r_{0},r_{1},\ldots$ to get the run length, this would require a lot of data, again very inefficient.We have the following requirements:- We would like to represent run length $R$ using as little data as possible.- Each $r_{i}$ must satisfy $r_{i}>V$ in order to distinguish run length information from value information.The technique employed is to use a base-$L$ (written `LNGU` in JMA materials and our code) representation of run length, where $L=2^{N}-V-1$. Write $a_{p-1}a_{p-2} \cdots a_{0}$ to denote a $p$-digit base-$L$ representation of $R$, namely\begin{align*}R = a_{0}L^{0} + \cdots + a_{p-1}L^{p-1}\end{align*}As each digit $a_{j}$ ranges between $0$ and $L-1 = 2^{N}-V-2$, we cannot simply use these $a_{j}$ values as our $r_{i}$ values, since they cannot be distinguished from data values in general. This problem is easily solved: pad the values by adding $V+1$, such that they range between $V+1$ and $(L-1)+(V+1)=2^{N}-1$. This upper bound is precisely the maximum value that we can represent using `NBIT` bits, and the lower bound is greater than `MAXV`, as required.Written out explicitly, we have $r_{i} = a_{i} + V + 1$. This is all the information we need to decode the run length.In practice, we start with the $c$ and $r_{i}$ values, so we have to work backwards:0. Given $V$, and $N$ (parameters of the compression procedure), we can compute $L$.0. If given a set $(c)$, decompress as-is with $(c)$.0. Else, given a set $(c,r_{0},\ldots,r_{p-1})$, decompress to $(c,\ldots,c)$ repeated $R > 1$ times, with $R$ computed as follows: 0. Compute $a_{i} = r_{i}-V-1$ for $i=0,\ldots,p-1$. 0. Given $L$ and $(a_{0},\ldots,a_{p-1})$, compute $R$ as $a_{p-1} \cdots a_{0}$ in base-$L$.__On data orientation:__ For 2D lattice data stored in a one-dimensional array (a long vector), the assumption is that it has been read going from *west to east*, and *north to south*. Decompression of lattice dataNow that we've covered the background of the compression algorithm used to create the data we have accessto, moving forward we read the key parameters, then deploy the simple decompression algorithm described above.bytes_left = s7_seclen - 5 # number of bytes left in this section. # Before doing anything, read out the compressed data sequence. data_compressed = [] bpd = s5_databits // 8 # bytes per data point. for i in range(bytes_left): b = f_bin.read(bpd) #print("bytes:", b) b_int = int.from_bytes(b, byteorder="big", signed=False) data_compressed += [b_int] # Key constants for decompression. NBIT = s5_databits MAXV = s5_V LNGU = 2**NBIT - 1 - MAXVThe `NBIT`, `MAXV`, and `LNGU` values for this particular data set are as follows.print(NBIT, MAXV, LNGU) import numpy as np # Next, decompress this data sequence. data_decompressed = [] dictpair = {"data": [], "runlen": []} for i in range(len(data_compressed)): tocheck = data_compressed[i] print("tocheck =", tocheck, "; dict =", dictpair) if len(dictpair["data"]) == 0: dictpair["data"] = [tocheck] else: # If data slot is populated, then we're either dealing # with runlength, or moving to a new singleton. if tocheck > MAXV: # In this case, it is runlength data to be added. dictpair["runlen"] += [tocheck] else: # In this case, it is data, signifying a new set. # Thus, we must enter the "processing" routine. numrl = len(dictpair["runlen"]) if numrl == 0: # If no runlength info, means it only appeared once. data_decompressed += dictpair["data"] #print("Adding to decompressed data:", dictpair["data"]) else: # If there is runlength info, then must process it. rlinfo = np.array(dictpair["runlen"]) rlexp = np.arange(numrl) rlvec = (LNGU**rlexp) * (rlinfo - (1+MAXV)) rl = np.sum(rlvec) + 1 data_decompressed += rl * dictpair["data"] #print("Adding to decompressed data:", rl * dictpair["data"]) # Reset, critically noting that we have a new data point to store. dictpair["data"] = [tocheck] dictpair["runlen"] = [] # Finally, note that there will ALWAYS be something left over. if tocheck <= MAXV: # If the last guy was small, then just add it. data_decompressed += dictpair["data"] #print("Adding to decompressed data:", dictpair["data"]) else: # If the last guy was large, we have non-trivial run-length. numrl = len(dictpair["runlen"]) rlinfo = np.array(dictpair["runlen"]) rlexp = np.arange(numrl) rlvec = (LNGU**rlexp) * (rlinfo - (1+MAXV)) rl = np.sum(rlvec) + 1 data_decompressed += rl * dictpair["data"] #print("Adding to decompressed data:", rl * dictpair["data"]) print(len(data_decompressed)) print(np.unique(np.array(data_decompressed))) #print(data_decompressed) print(s3_numpoints) #print(512*560) #print(s3_numlat) #print(s3_numlon) s3_numpoints-len(data_decompressed)If this value is zero, we can be essentially certain that we have successfully decoded the compressed data. ___ Section 8: 終端節Basically a trivial final section that lets us know we're finished.f_bin.seek((s0_seclen+s1_seclen+0+s3_seclen+s4_seclen+s5_seclen+s6_seclen+s7_seclen)) b = f_bin.read(4) # Should be "7777". print(b) s8_grib = str(b, 'ascii') print(s8_grib) b = f_bin.read(1) # Should be at the end... print(b)b''Introduction to Methods Using TurtleHere we introduce you to using Python methods by making a method for drawing a star, then repeatedly calling that method from the main program. You can copy and paste this code into your IDE to see the result.# This is a program that will draw stars with random values # all over the window. The stars are drawn using a method # with parameters. # Program Structure Note: # imports come first # methods come next # main program comes last import turtle as t import random # method to draw a star def draw_star(size, num_rays, color, x, y): # go to the right location t.goto(x,y) # set the proper color if( color % 4 == 0 ): t.color("red") if( color % 4 == 1 ): t.color("green") if( color % 4 == 2 ): t.color("yellow") if( color % 4 == 3 ): t.color("blue") # draw the star t.pendown() for i in range(num_rays): # loop num_rays number of times t.left( 360 / num_rays ) # num_rays lines in our star t.forward(size) t.backward(size) t.penup() # end of the draw_star method # main program t.reset() # clear the window for drawing t.shape("turtle") t.penup() t.pensize(3) num_stars = 20 for i in range(num_stars): # setup star values star_size = random.randrange(10,100) # ray size from 10 to 100 px star_rays = random.randrange(5,10) # num rays from 5 through 9 star_color = random.randrange(4) # there will be 4 possible color values star_x = random.randrange(-300, 300) # set the star's x location star_y = random.randrange(-300, 300) # set the star's y location draw_star(star_size, star_rays, star_color, star_x, star_y) # end of for loop t.hideturtle()(Px���@h���0X���� Hp���8`���(Px���@h���0X���� Hp��� 8 ` � � � ( P x � � �  @ h � � �  0 X � � � � H p � � � 8`���(Px���@h���0X���� Hp���8`���(Px���@h���0X���� Hp���8`���(Px���@h���0X���� Hp���8`���(Px���@h��� 0 X � � � � !H!p!�!�!�!"8"`"�"�"�"#(#P#x#�#�#�#$@$h$�$�$�$%0%X%�%�%�%�% &H&p&�&�&�&'8'`'�'�'�'(((P(x(�(�(�()@)h)�)�)�)*0*X*�*�*�*�* +H+p+�+�+�+,8,`,�,�,�,-(-P-x-�-�-�-.@.h.�.�.�./0/X/�/�/�/�/ 0H0p0�0�0�0181`1�1�1�12(2P2x2�2�2�23@3h3�3�3�3404X4�4�4�4�4 5H5p5�5�5�5686`6�6�6�67(7P7x7�7�7�78@8h8�8�8�8909X9�9�9�9�9 :H:p:�:�:�:;8;`;�;�;�;<(<P<x<�<�<�<=@=h=�=�=�=>0>X>�>�>�>�> ?H?p?�?�?�?@8@`@�@�@�@A(APAxA�A�A�AB@BhB�B�B�BC0CXC�C�C�C�C DHDpD�D�D�DE8E`E�E�E�EF(FPFxF�F�F�FG@GhG�G�G�GH0HXH�H�H�H�H IHIpI�I�I�IJ8J`J�J�J�JK(KPKxK�K�K�KL@LhL�L�L�LM0MXM�M�M�M�M NHNpN�N�N�NO8O`O�O�O�OP(PPPxP�P�P�PQ@QhQ�Q�Q�QR0RXR�R�R�R�R SHSpS�S�S�ST8T`T�T�T�TU(UPUxU�U�U�UV@VhV�V�V�VW0WXW�W�W�W�W XHXpX�X�X�XY8Y`Y�Y�Y�YZ(ZPZxZ�Z�Z�Z[@[h[�[�[�[\0\X\�\�\�\�\ ]H]p]�]�]�]^8^`^�^�^�^_(_P_x_�_�_�_`@`h`�`�`�`a0aXa�a�a�a�a bHbpb�b�b�bc8c`c�c�c�cd(dPdxd�d�d�de@ehe�e�e�ef0fXf�f�f�f�f gHgpg�g�g�gh8h`h�h�h�hi(iPixi�i�i�ij@jhj�j�j�jk0kXk�k�k�k�k lHlpl�l�l�lm8m`m�m�m�mn(nPnxn�n�n�no@oho�o�o�op0pXp�p�p�p�p qHqpq�q�q�qr8r`r�r�r�rs(sPsxs�s�s�st@tht�t�t�tu0uXu�u�u�u�u vHvpv�v�v�vw8w`w�w�w�wx(xPxxx�x�x�xy@yhy�y�y�yz0zXz�z�z�z�z {H{p{�{�{�{|8|`|�|�|�|}(}P}x}�}�}�}~@~h~�~�~�~0X���� �H�p�������8�`�����؁�(�P�x���Ȃ���@�h��������0�X�����Є�� �H�p�������8�`�����؆�(�P�x���ȇ���@�h��������0�X�����Љ�� �H�p�������8�`�����؋�(�P�x���Ȍ���@�h��������0�X�����Ў�� �H�p�������8�`�����ؐ�(�P�x���ȑ��@�h��������0�X�����Г�� �H�p�������8�`�����ؕ�(�P�x���Ȗ��@�h��������0�X�����И�� �H�p�������8�`�����ؚ�(�P�x���ț��@�StarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedup����  ��I   �@    (( 0QFI�eI�eI �nIZ@@@ $*06<BHNSX]chntz���������������������� #(-39?EKQW]bhntz���������������������� !',28>DIOUZ`flrw}���������������������� "(.4:@FLRX^djou{���������������������� $*/5;AFLRX^ciou{���������������������� $*06<BHNTZ`flrx~��������������������� $*/5;AGMRX^djpv|����������������������� "(.4:?EKQW\bhmsw}����������������������%+16<BHNTZ`flrx~����������������������    ! ' , 2 8 > D J P V \ b g m r w | � � � � � � � � � � � � � � � � � � � � � �     " ( . 4 : @ E J P V [ a g m s y  � � � � � � � � � � � � � � � � � � � � � �     ! ' - 2 8 > D I O U Z ` f l q w } � � � � � � � � � � � � � � � � � � � � � �      % + 1 7 = C I O U [ a g m s x | � � � � � � � � � � � � � � � � � � � � �     1792796517182368951765985888071495615018031546722291925155145846524852804699307963740194477865388012854564117856297215521696779862334058852365845087492962563256951960145313321581756937722421856921561823176645106384273737315023296589210743514724696511790882620973197145151306401981757783293519247031582200838360628338379373391632094369012213882462967863274753194894964172421031994749065268524364262995496025582828212677155375629470249285520907376530171817581423340286129730533198453964233502571544372802522497112227887308262224395236064359591901663709260758453546165810392825113712444212234741713724473926532432884148609510513590943115062454565682662564275595511403923018944431402783345324455410125409496934205988572629250543543965148845238774431529371214471286512058121114317100162810446447288065283744274705586883572718425000103587311000199924131268503581230391575399613469688144110853928676460529290036999043357835531427753825507575471353318903883459015108695430735561155907110524127214461153118002632230056397113819557197918415733886011349151017212001246802317144721316127264911483677973081223181033613238639235516996818561336384425637266467998297949172831333271408348846637923164621393928224242373079290084299072619725003612583071923531683247801730751427834265110546718024269125988142437432521610432231048017741743265456272020003829491251325452566734220534029453614962461864772436678549524434019325904919964147452850547557887653765133410856949482021666316335171553085281850608331482887566981259652417295279773634555126236515193725086642056268164267114512043288893884573406733902343920746182664637014985616451496361285579372715375609435888350446589547423017387731620399329760290391467371089468002957531013292724264039115078362594646657438341629804292081608214984805910045365704834604646481068524405875903194472362918939663431071033979244248425485240432646975216699142809383065116991366032698829460163001366036002984412118180020059212744910742149455539179422433310190529735291083478891406251524940875157823839490856598421059437611232402826846444341695789623225568066429098195931057753120695629517978423694046311037195818153021307245844061329539409356364345554128603332973921553137263934545621211257198359401133958866177657114640633252465076584506122154521308455596124164624537030790518015323513640142618842928284782333094211126541652015521733779241277710653855551050990613599960593647171259186019089116446593671791249141993068151355656393427995485758197026123759487694462219511037243837129410446108589064234888268747794853418625339794830266429652771601074673069734937253281335521624160566610812584112832457420021237824875021913506715114064922481428266561313592929504828068415777719798975581206042344280270276261941183158636595102030198449652491314249252806086233591686299762169555296826558312251715628025823323311819835045919734157945623610148801026415542837967372154470213624610033742185715032455936848552484964243460100241321283759159728382395169315615213101063637372606998570333297124218213445846462893795561253623398458179625446137516509452936804591459411023119535391558721528513522642142978741660944154147221549752621449292646299823345073605969439487337843577765456246178013330005356722454304613364334389601378679395670555048384251473531307463329896180144340545343480592679539856366594189043495592466876502820566294358562621429276117572407450441563397 )�! o�tv������������� 1,�,S͸>��BIrӇ ���w���J��$�K*�����k�%�d�1�� �����U�Q�csdjgnj��ݳ<��L�7�<f�A����1�8CR�T�������V�BZFI'e^m3y��+���U�9 � 2 �) +2 SH �X �� � � d� O� <� w ) � >� �� V �% 3� p� �5 I8 �: �H .] ,� 1� � �� �RuV�a��D�2������}-T1:GJny*�՗��O��,>�O�]��g���`�_�i�`!_)?.�l�y����>�A.?(k0���������;��&_l �M� � �GdH��Ƣ �����HF_R�\�{����W0�}M������E Z$wBkS���8*&1�b!�!u![!�l!߀!��!��!'"�f"e�"�"v#�#@#�&#�O#�`#r�#n�#�#ؚ#<�#O�#9�#N�#w2$'�$x�$��$6�$�$��$��$N�$"% 3%�L%_]%�j%ds%?�%�&�&t#&�U&�b&M�&��&E�&��&�'�<'j>'�U' |'�'m�'��'�'�'��'h(i((1](�x(��(��( �(�(v�(�(a)�)h)1)�o)��) *�8*�S*>�*��*B +"+� +E?+�G+vn+տ+�,zG,�W,�g,��,@�,� -�-�)-0P-\Y->�-4�-��-��-�b.a�.V�.��.��.�.�/�%/�F/np/D�/ü/��/�0�%0�+1�/1�@1u�1��1��1�2�%2('2�O2�Q2m]2i2 x2�~2 �2q�2��2��2��2 �2�3Ma3��3�3Ĩ3#�3��3��3T�3�4�p4x4��4)�4��4��4��4��4��4T&5U55t5��5��5oc6�k6~6�6�6��67�O7�Y7�o7��7��7�.8D8Wi8��8t�8ޙ8��8Ȼ8��8��8�8��8.�8��8�$9y�97�9:�:�:16:NA:Q:�a:sc:Er:?�:y�:D�:��:�;D1;Lv;��;�.<�x<�<:�<S�<��<��< �<��<y=2�=��=��=[�=j >�>�*>�G>Q>�v>�>A�>��>��>��>J�>��>��?��?@�@�/@�X@��@i�A��Ae�A� BqB�"B�JB�yB0�B��B�IQFILRU CacheDesign a data structure that follows the constraints of a Least Recently Used (LRU) cache.Implement the LRUCache class:- LRUCache(int capacity) Initialize the LRU cache with positive size capacity.- int get(int key) Return the value of the key if the key exists, otherwise return -1.- void put(int key, int value) Update the value of the key if the key exists. Otherwise, add the key-value pair to the cache. If the number of keys exceeds the capacity from this operation, evict the least recently used key.The functions get and put must each run in O(1) average time complexity. Example 1:- Input: ["LRUCache", "put", "put", "get", "put", "get", "put", "get", "get", "get"][[2], [1, 1], [2, 2], [1], [3, 3], [2], [4, 4], [1], [3], [4]]- Output: [null, null, null, 1, null, -1, null, -1, 3, 4]- Explanation - LRUCache lRUCache = new LRUCache(2); - lRUCache.put(1, 1); // cache is {1=1} - lRUCache.put(2, 2); // cache is {1=1, 2=2} - lRUCache.get(1); // return 1 - lRUCache.put(3, 3); // LRU key was 2, evicts key 2, cache is {1=1, 3=3} - lRUCache.get(2); // returns -1 (not found) - lRUCache.put(4, 4); // LRU key was 1, evicts key 1, cache is {4=4, 3=3} - lRUCache.get(1); // return -1 (not found) - lRUCache.get(3); // return 3 - lRUCache.get(4); // return 4 Constraints:- 1 <= capacity <= 3000- 0 <= key <= 104- 0 <= value <= 105- At most 2 * 105 calls will be made to get and put.''' IDEA: The idea is manage items in ordered HashMap. It can be immplemented by Dictionary and Doubly Linked List. The idea is that PUT will add a key-value pair to the HashMap and the key to the end of the Doubly Linked List. GET fetches the value from the HashMap by key and moves the value to the end of linked list When eviciting the existing value we just delete the key from the head of the doubled linked list and from HashMap ANALYSIS: All operations are performed in O(1) ''' from collections import OrderedDict class LRUCache: def __init__(self, capacity: int): self.cache = OrderedDict() self.capacity = capacity def get(self, key: int) -> int: if not key in self.cache: return -1 else: self.cache.move_to_end(key) return self.cache[key] def put(self, key: int, value: int) -> None: if key in self.cache: self.cache.move_to_end(key) self.cache[key] = value if self.capacity < len(self.cache): self.cache.popitem(last=False)Load datapath_perfect_sims_phot = "/sps/lsst/users/bbiswas/data/kilonova_datasets/RESSPECT/RESSPECT_PERFECT_LIGHTCURVE_WITH_KN.csv" path_perfect_sims_head = "/sps/lsst/users/bbiswas/data/kilonova_datasets/RESSPECT/RESSPECT_PERFECT_HEAD_WITH_KN.csv" data_ob = load_RESSPECT_data(phot_df_file_path=path_perfect_sims_phot, meta_df_file_path=path_perfect_sims_head) from src.Generate_PC_utils import get_event_distirb event_distrib = get_event_distirb(data_ob) print(event_distrib) from kndetect.utils import get_data_dir_path data_dir = get_data_dir_path()Filter objects with readings every 2 daysfrom src.Generate_PC_utils import get_ids_of_eqally_spaced_objects equally_spaced_ids = get_ids_of_eqally_spaced_objects(data_ob) event_distrib = get_event_distirb(data_ob, equally_spaced_ids) print(event_distrib) if save_results: np.save(os.path.join(data_dir,"equally_spaced_ids"), equally_spaced_ids) equally_spaced_ids = np.load(os.path.join(data_dir,"equally_spaced_ids.npy")) event_distrib = get_event_distirb(data_ob, equally_spaced_ids) print(event_distrib){'Ibc_V19': 584, 'IIn': 381, 'II_V19': 567, 'II-NMF': 621, 'Iax': 601, 'Ia': 849, '91bg': 558, 'Ibc-MOSFIT': 248, 'KN': 1000}Generating PCs Create train data with specific distribution of Datafrom src.Generate_PC_utils import get_ids_for_target_distrib target_distrib = {'91bg': 125, 'II-NMF': 125, 'II_V19': 125, 'IIn': 125, 'Ia': 125, 'Iax': 125, 'Ibc-MOSFIT': 125, 'Ibc_V19': 125, 'KN': 1000} target_ids = get_ids_for_target_distrib(data_ob, target_distrib, equally_spaced_ids) event_distrib = get_event_distirb(data_ob, target_ids) print(event_distrib){'II-NMF': 125, 'II_V19': 125, '91bg': 125, 'Iax': 125, 'Ibc_V19': 125, 'KN': 1000, 'Ia': 125, 'IIn': 125, 'Ibc-MOSFIT': 125}Generate PCSfrom src.Generate_PC_utils import stack_training_data, gen_components train_data = stack_training_data(data_ob, object_ids=target_ids, num_days_tolerance=2, plot_results=False) plt.close('all') if save_results: np.save(os.path.join(data_dir,"PC_generation_dataset_mixed.npy"), train_data) PC_dict, PC_var_ratio = gen_components(train_data) print(PC_var_ratio) band = 'all' PCs= PC_dict[band] markers = ['o','s','D','*','x'] band = 'all' print(band+ " band") PCs= PC_dict[band] fig = plt.figure(figsize=(10,5)) for i in range(3): PC = PCs[i] x = np.arange(0,102,2)-50 plt.plot(x,PC,marker=markers[i],label = "PC "+str(i+1)) plt.xlabel("days since maximum", fontsize = 25) plt.ylabel("PCs", fontsize=25) plt.rc('xtick', labelsize=17) plt.rc('ytick', labelsize=17) plt.rc('legend', fontsize=15) plt.legend() plt.tight_layout() plt.show() if save_results: np.save(os.path.join(data_dir, 'pc_var_ratio_mixed_pcs.npy'), PC_var_ratio) np.save(os.path.join(data_dir, "mixed_pcs"),PC_dict)Now interpolateimport matplotlib.pyplot as plt from src.io_utils import get_pcs num_pc_components=3 bands=['g','r'] pcs = get_pcs(num_pc_components, pcs_choice='non-interpolated', normalize_pcs=False) import numpy as np fig = plt.figure(figsize=(10,5)) for i in range(num_pc_components): PC = pcs[i] x = np.arange(0,102,2)-50 plt.scatter(x,PC,label = "PC "+str(i+1)) plt.legend() x = np.linspace(-50, 50, num=51, endpoint=True) xnew = np.linspace(-50, 50, num=401, endpoint=True) np.shape(x) from scipy.interpolate import interp1d PC_inter_func = [] PC_new=[] for i in range(3): func = interp1d(x, pcs[i], kind='quadratic') PC_inter_func.append(func) PC_new.append(func(xnew)) PC_new = np.array(PC_new) np.shape(PC_new) import numpy as np fig = plt.figure(figsize=(10,5)) pc_names = ["PC1", "PC2", "PC3"] colors = ['#F5622E', '#15284F', '#3C8DFF'] markers = ['o','s','D','*','x'] for i in range(num_pc_components): max_val = np.amax(np.abs(PC_new[i])) PC_new[i] = PC_new[i]/max_val PC = PC_new[i] plt.plot(xnew,PC,label = pc_names[i], marker=markers[i], ms=5, color=colors[i]) plt.xlabel("days since maximum", fontsize=25) plt.ylabel("normalized PCs", fontsize=25) ax = plt. gca() leg = ax.legend() plt.rc('xtick', labelsize=17) plt.rc('ytick', labelsize=17) plt.rc('legend', loc='lower right', fontsize=15) plt.legend() plt.tight_layout() #plt.savefig("results/PC_plots/interpolated_pcs.pdf") if save_results: np.save(os.path.join(data_dir, 'interpolated_mixed_pcs.npy'), PC_new)![image.png](attachment:image.png)# Consider an array of dimension (5,5,3), how to mulitply it by an array with dimensions (5,5)? A = np.ones((5,5,3)) B = 2*np.ones((5,5)) print (A) # Swap two rows of an array? A = np.arange(25).reshape(5,5) print (A) print (A[:2]) print (A[0]) x=A[0].copy() A[0]=A[1] A[1]=x print (A)[[ 0 1 2 3 4] [ 5 6 7 8 9] [10 11 12 13 14] [15 16 17 18 19] [20 21 22 23 24]] [[0 1 2 3 4] [5 6 7 8 9]] [0 1 2 3 4] [[ 5 6 7 8 9] [ 0 1 2 3 4] [10 11 12 13 14] [15 16 17 18 19] [20 21 22 23 24]]Special Data typesIn addition to dtype objects, NumPy introduces special numeric values: nan (not a number) and inf(infinity)What is the result of the following expression? 0 * np.nan np.nan == np.nan np.inf > np.nan np.nan - np.nan 0.3 == 3 * 0.1 0 * np.nan np.nan == np.nan np.inf > np.nan np.nan - np.nan 0.3 == 3 * 0.1 3*0.10000000000000000 0.30000000000000004 == 3*0.1 import numpy as np arr2d = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) #3x3 matrix # Retireve rows(0,1) , column (1rst onward) # Sol 1: print(arr2d[:2, 1:]) # Sol 2: print(arr2d[ [0,1] , [1,2] ]) row = np.array([0,1]) col = np.array([1,2]) print("row=",row) print("col=",col) print("arr2d[row,col]=",arr2d[row,col]) print(arr2d[row[:, np.newaxis], col])[[2 3] [5 6]] [2 6] row= [0 1] col= [1 2] arr2d[row,col]= [2 6] [[2 3] [5 6]]Handwritten Digit RecognitionThis tutorial guides you through a classic computer vision application: identify hand written digits with neural networks.![MNIST-t-SNE](MNIST-T-SNE.png) Trains a simple deep NN (DNN) on the MNIST dataset using Keras (with TensorFlow backend)Gets to 98.40% test accuracy after 20 epochs(there is *a lot* of margin for parameter tuning).First let's start with importing the relevant symbols from Kerasfrom __future__ import print_function import numpy as np np.random.seed(1337) # for reproducibility from keras.datasets import mnist from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation from keras.optimizers import RMSprop from keras.utils import np_utils from keras.callbacks import TensorBoardUsing TensorFlow backend.Setting up some hyper-parameters (try others yourself)Each of these hyper-parameters has a different impact on your task:**Batch Size** - if you set it too low, the training will take forever as most of your resource will be idle. but if you set it too high your process might fail for lack of memory, or start spilling to disk, which makes it slow again.**Number of epoch** - if you set it too low, the accuracy will not be high enough as you didn't train the network on enough data, but if you set it too high, you will waste a lot of GPU cycles and money, and also risk over-fitting.batch_size = 128 nb_classes = 10 nb_epoch = 20Load dataWe first fetch the MNIST dataset, which is a commonly used dataset for handwritten digit recognition. Each image in this dataset has been resized into 28x28 with grayscale value between 0 and 254. The following codes download and load the images and the according labels into numpy.# the data, shuffled and split between train and test sets (X_train, y_train), (X_test, y_test) = mnist.load_data()Installing additional modules If you don't have Jupyther Themes installed, the following cell will fail. This is a good opportunity to learn how to add modules to your jupyter environment or to skip this cell for now (it is adding only small visual modifications).If you decided to install the module open a terminal from the home page of jupyther (where you opened this notebook from) using the "new" button.Run *pip install jupyterthemes*If the next cell is still failing you are probably using conda to switch between different environments.Run *source activate tensorflow_p27* (or any other environment you are using - see on the top right of your notebook)and then *pip install jupyterthemes* again.# import jtplot submodule from jupyterthemes from jupyterthemes import jtplot # currently installed theme will be used to # set plot style if no arguments provided jtplot.style()We plot the first 9 images and their classesimport matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = (7,7) # Make the figures a bit bigger for i in range(9): plt.subplot(3,3,i+1) plt.imshow(X_train[i], cmap='gray', interpolation='none') plt.title("Class {}".format(y_train[i])) plt.axis('off') plt.show() X_train = X_train.reshape(60000, 784) X_test = X_test.reshape(10000, 784) X_train = X_train.astype('float32') X_test = X_test.astype('float32') X_train /= 255 X_test /= 255 print(X_train.shape[0], 'train samples') print(X_test.shape[0], 'test samples') # convert class vectors to binary class matrices Y_train = np_utils.to_categorical(y_train, nb_classes) Y_test = np_utils.to_categorical(y_test, nb_classes)Build the neural networkBuild the neural-network. Here we'll do a simple 3 layer fully connected network.![DNN-Example](DNN-Example.png)model = Sequential() model.add(Dense(512, input_shape=(784,))) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(10)) model.add(Activation('softmax')) model.summary()_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense_1 (Dense) (None, 512) 401920 _________________________________________________________________ activation_1 (Activation) (None, 512) 0 _________________________________________________________________ dropout_1 (Dropout) (None, 512) 0 _________________________________________________________________ dense_2 (Dense) (None, 512) 262656 _________________________________________________________________ activation_2 (Activation) (None, 512) 0 _________________________________________________________________ dropout_2 (Dropout) (None, 512) 0 _________________________________________________________________ dense_3 (D[...]Note how many parameters do we have in this small network. We will try to reduce this number (why?) in the next models. Which activation function is the best?![Activation Functions](activation_functions.png)model.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy'])Connect to TensorBoardtensorbaord = TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True, write_images=True) callbacks_list = [tensorbaord]Train the model!Now let the machines learn: you can feed the training data loaded in earlier into this model and it will learn to classify digitshistory = model.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, verbose=1, callbacks=callbacks_list, validation_data=(X_test, Y_test)) score = model.evaluate(X_test, Y_test, verbose=0) print('Test score:', score[0]) print('Test accuracy:', score[1]) assert score[1] > 0.98Test score: 0.109225015025 Test accuracy: 0.983Inspecting the outputIt's always a good idea to inspect the output and make sure everything looks sane. Here we'll look at some examples it gets right, and some examples it gets wrong.# The predict_classes function outputs the highest probability class # according to the trained classifier for each input example. predicted_classes = model.predict_classes(X_test) # Check which items we got right / wrong correct_indices = np.nonzero(predicted_classes == y_test)[0] incorrect_indices = np.nonzero(predicted_classes != y_test)[0] plt.figure() for i, correct in enumerate(correct_indices[:9]): plt.subplot(3,3,i+1) plt.imshow(X_test[correct].reshape(28,28), cmap='gray', interpolation='none') plt.title("Pred: {}, True: {}".format(predicted_classes[correct], y_test[correct])) plt.axis('off') plt.figure() for i, incorrect in enumerate(incorrect_indices[:9]): plt.subplot(3,3,i+1) plt.imshow(X_test[incorrect].reshape(28,28), cmap='gray', interpolation='none') plt.title("Pred: {}, True: {}".format(predicted_classes[incorrect], y_test[incorrect])) plt.axis('off') plt.show()Human level yet? Or can we do better? ConvolutionThe results that we got from the previous model seems good (Test accuracy: 0.9842), but when we look at the examples that were misclassified, we can see that the mistakes are pretty bad. It is easy for us to see the right class.This what leads us to use more advance symbols and the first one is the Convolution (Conv2D below) Let's start with importing the new symbols from Keras.from keras.layers import Conv2D,MaxPooling2D,Flatten from keras import backend as K # input image dimensions img_rows, img_cols = 28, 28 # the data, shuffled and split between train and test sets (x_train, y_train), (x_test, y_test) = mnist.load_data() if K.image_dim_ordering() == 'th' : x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols) x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols) input_shape = (1, img_rows, img_cols) else: x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1) x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1) input_shape = (img_rows, img_cols, 1) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') # Convert 1-dimensional class arrays to 10-dimensional class matrices Y_train = np_utils.to_categorical(y_train, 10) Y_test = np_utils.to_categorical(y_test, 10) model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(28,28,1))) model.add(Conv2D(32, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) model.summary()_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_1 (Conv2D) (None, 26, 26, 32) 320 _________________________________________________________________ conv2d_2 (Conv2D) (None, 24, 24, 32) 9248 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 12, 12, 32) 0 _________________________________________________________________ dropout_3 (Dropout) (None, 12, 12, 32) 0 _________________________________________________________________ flatten_1 (Flatten) (None, 4608) 0 _________________________________________________________________ dense_4 (Dense) (None, 128) 589952 _________________________________________________________________ dropout_4 [...]Max (or Average) Pooling is very useful for Computer Vision problems Make sure that your network is not overfitting and being challenged.Dropout is one way to do it Compile the modelWe just need to compile the model and we'll be ready to train it. When we compile the model, we declare the loss function and the optimizer (SGD, Adam, etc.).model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])Fit model on training dataTo fit the model, all we have to do is declare the batch size and number of epochs to train for, then pass in our training data.model.fit(x_train, Y_train, batch_size=32, epochs=10, verbose=1, callbacks=callbacks_list) score = model.evaluate(x_test, Y_test, verbose=0) print('Test score:', score[0]) print('Test accuracy:', score[1]) # The predict_classes function outputs the highest probability class # according to the trained classifier for each input example. predicted_classes = model.predict_classes(x_test) # Check which items we got right / wrong correct_indices = np.nonzero(predicted_classes == y_test)[0] incorrect_indices = np.nonzero(predicted_classes != y_test)[0] plt.figure() for i, correct in enumerate(correct_indices[:9]): plt.subplot(3,3,i+1) plt.imshow(X_test[correct].reshape(28,28), cmap='gray', interpolation='none') plt.title("Pred:{}, True:{}".format(predicted_classes[correct], y_test[correct])) plt.axis('off') plt.figure() for i, incorrect in enumerate(incorrect_indices[:9]): plt.subplot(3,3,i+1) plt.imshow(X_test[incorrect].reshape(28,28), cmap='gray', interpolation='none') plt.title("Pred:{}, True:{}".format(predicted_classes[incorrect], y_test[incorrect])) plt.axis('off') plt.show()Confusion MatrixLet's install a module directly from the notebook cell (no need to switch to the terminal), and then use the confusion matrix from scikit-learn library to better visualize the accuracy of the model as a whole.!pip install -U scikit-learn import sklearn import itertools from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, predicted_classes) plt.matshow(cm) plt.title('Confusion matrix') fmt = 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] < thresh else "black") plt.ylabel('True label') plt.xlabel('Predicted label') classes = range(10) # Labels are sorted tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes) plt.yticks(tick_marks, classes) plt.grid(False) plt.show()Requirement already up-to-date: scikit-learn in /home/ec2-user/anaconda3/envs/tensorflow_p27/lib/python2.7/site-packagesRNN - LSTM versionTo classify images using a recurrent neural network, we consider every imagerow as a sequence of pixels. Because MNIST image shape is 28*28px, we will thenhandle 28 sequences of 28 steps for every sample.![RNN](RNN.jpeg)(From http://karpathy.github.io/2015/05/21/rnn-effectiveness/ )This example will actually use TensorFlow Directly (and not thorugh Keras as a high level API).Let's start with library import and data loadingimport tensorflow as tf from tensorflow.contrib import rnn # Get MNIST dataset # Import MNIST data from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) # Training Parameters learning_rate = 0.001 training_steps = 10000 batch_size = 128 display_step = 200 # LSTM Network Parameters num_input = 28 # MNIST data input (img shape: 28*28) timesteps = 28 # timesteps num_hidden = 128 # hidden layer num of features num_classes = 10 # MNIST total classes (0-9 digits)TensorFlow Training has a common stracture that starts with the definition of the Placeholder for input and the parameters that will used# tf Graph input X = tf.placeholder("float", [None, timesteps, num_input]) Y = tf.placeholder("float", [None, num_classes]) # Define weights weights = { 'out': tf.Variable(tf.random_normal([num_hidden, num_classes])) } biases = { 'out': tf.Variable(tf.random_normal([num_classes])) }Next we define the flow through the symbols of the (R)NNdef RNN(x, weights, biases): # Prepare data shape to match `rnn` function requirements # Current data input shape: (batch_size, timesteps, n_input) # Required shape: 'timesteps' tensors list of shape (batch_size, n_input) # Unstack to get a list of 'timesteps' tensors of shape (batch_size, n_input) x = tf.unstack(x, timesteps, 1) # Define a lstm cell with tensorflow lstm_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0) # Get lstm cell output outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32) # Linear activation, using rnn inner loop last output return tf.matmul(outputs[-1], weights['out']) + biases['out']Next we define the operators/functions that we will use in the TF training sessionlogits = RNN(X, weights, biases) prediction = tf.nn.softmax(logits) # Define loss and optimizer loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( logits=logits, labels=Y)) optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) train_op = optimizer.minimize(loss_op) # Evaluate model (with test logits, for dropout to be disabled) correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))Next we initilize the variable of the session and running with sess.run# Initialize the variables (i.e. assign their default value) init = tf.global_variables_initializer() # Start training with tf.Session() as sess: # Run the initializer sess.run(init) for step in range(1, training_steps+1): batch_x, batch_y = mnist.train.next_batch(batch_size) # Reshape data to get 28 seq of 28 elements batch_x = batch_x.reshape((batch_size, timesteps, num_input)) # Run optimization op (backprop) sess.run(train_op, feed_dict={X: batch_x, Y: batch_y}) if step % display_step == 0 or step == 1: # Calculate batch loss and accuracy loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x, Y: batch_y}) print("Step " + str(step) + ", Minibatch Loss= " + \ "{:.4f}".format(loss) + ", Training Accuracy= " + \ "{:.3f}".format(acc)) print("Optimization Finished!") # Calculate accuracy for 128 mnist test images test_len = 10000 test_data = mnist.test.images[:test_len].reshape((-1, timesteps, num_input)) test_label = mnist.test.labels[:test_len] test_predictions = prediction.eval(feed_dict = {X:test_data}) print("Testing Accuracy:", \ sess.run(accuracy, feed_dict={X: test_data, Y: test_label}))Step 1, Minibatch Loss= 2.8837, Training Accuracy= 0.062 Step 200, Minibatch Loss= 2.0652, Training Accuracy= 0.289 Step 400, Minibatch Loss= 1.8812, Training Accuracy= 0.375 Step 600, Minibatch Loss= 1.7903, Training Accuracy= 0.336 Step 800, Minibatch Loss= 1.7268, Training Accuracy= 0.406 Step 1000, Minibatch Loss= 1.5313, Training Accuracy= 0.555 Step 1200, Minibatch Loss= 1.5474, Training Accuracy= 0.477 Step 1400, Minibatch Loss= 1.5273, Training Accuracy= 0.508 Step 1600, Minibatch Loss= 1.2532, Training Accuracy= 0.602 Step 1800, Minibatch Loss= 1.2172, Training Accuracy= 0.617 Step 2000, Minibatch Loss= 1.2535, Training Accuracy= 0.617 Step 2200, Minibatch Loss= 1.0845, Training Accuracy= 0.672 Step 2400, Minibatch Loss= 1.1594, Training Accuracy= 0.664 Step 2600, Minibatch Loss= 1.1249, Training Accuracy= 0.602 Step 2800, Minibatch Loss= 1.1148, Training Accuracy= 0.617 Step 3000, Minibatch Loss= 1.0111, Training Accuracy= 0.680 Step 3200, Minibatch Loss= 1.0350, Training Acc[...]We will calculate the confusion matrixtest_true = tf.argmax(test_label,1) test_pred = tf.argmax(test_predictions,1) confusion = tf.confusion_matrix(labels=test_true, predictions=test_pred)and extract the values as a matrix for plottingwith tf.Session(): cm = tf.Tensor.eval(confusion,feed_dict=None, session=None) plt.matshow(cm) plt.title('Confusion matrix') fmt = 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] < thresh else "black") plt.ylabel('True label') plt.xlabel('Predicted label') classes = range(10) # Labels are sorted tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes) plt.yticks(tick_marks, classes) plt.grid(False) plt.show()Another way to raise an error is by using raise. In this exercise, you will add a raise statement to the shout_echo() function you defined before to raise an error message when the value supplied by the user to the echo argument is less than 0.The call to shout_echo() uses valid argument values. To test and see how the raise statement works, simply change the value for the echo argument to a negative value. Don't forget to change it back to valid values to move on to the next exercise! Complete the if statement by checking if the value of echo is less than 0. In the body of the if statement, add a raise statement that raises a ValueError with message 'echo must be greater than 0' when the value supplied by the user to echo is less than 0.# Define shout_echo def shout_echo(word1, echo=1): """Concatenate echo copies of word1 and three exclamation marks at the end of the string.""" # Raise an error with raise if echo<1: raise ValueError ('echo must be greater than 0') # Concatenate echo copies of word1 using *: echo_word echo_word = word1 * echo # Concatenate '!!!' to echo_word: shout_word shout_word = echo_word + '!!!' # Return shout_word return shout_word # Call shout_echo shout_echo("particle", echo=5) [Errors and Exceptions](https://docs.python.org/3/tutorial/errors)asyncio job_scheduleimport sys import importlib from pathlib import Path my_lib_path = str(Path('my_lib').resolve()) if my_lib_path not in sys.path: sys.path.append(my_lib_path) from asyncio_job_schedule import test importlib.reload(test) test.echo()echo, so nice nice 还可以了 托尔斯泰001Disable AdvancedHMC's NUTS loggingusing Logging using LoggingExtras function ignore_sampling_filter(log_args) !(occursin("sampling steps",log_args.message) || occursin("adapation steps",log_args.message)) end logger = ActiveFilteredLogger(ignore_sampling_filter, global_logger()) if !(@isdefined old_logger) #do this only once old_logger = global_logger(logger) end function my_nuts(trace, selection, n_postadapt_steps = 2, n_adapts = 1, initial_ϵ_reduce_fac = 10) n_NUTS_steps = n_postadapt_steps + n_adapts filtered_choices = get_selected(get_choices(trace), selection) cur_xy = to_array(filtered_choices, Float64) dimension = length(cur_xy) metric = DiagEuclideanMetric(dimension) retval_grad = nothing #accepts_output_grad(get_gen_fn(trace)) ? zero(get_retval(trace)) : nothing function update_xy(val) extra_constraints = from_array(filtered_choices, val) update(trace, (), (NoChange(),), extra_constraints) end function val_to_lp_plus_c(val) (new_trace, weight, discard, retdiff) = update_xy(val) weight end function val_to_grad(val) (new_trace, weight, discard, retdiff) = update_xy(val) (retval_grad_out, values_trie, gradient_trie) = choice_gradients(new_trace, selection, retval_grad) grad = [gradient_trie[:x], gradient_trie[:y]] (weight, grad) end # Define a Hamiltonian system, using metric defined globally above hamiltonian = Hamiltonian(metric, val_to_lp_plus_c, val_to_grad) # Define a leapfrog solver, with initial step size chosen heuristically initial_ϵ = find_good_stepsize(hamiltonian, cur_xy) ./ initial_ϵ_reduce_fac integrator = Leapfrog(initial_ϵ) # Define an HMC sampler, with the following components # - multinomial sampling scheme, # - generalised No-U-Turn criteria, and # - windowed adaption for step-size and diagonal mass matrix proposal = NUTS{MultinomialTS, GeneralisedNoUTurn}(integrator) adaptor = StanHMCAdaptor(MassMatrixAdaptor(metric), StepSizeAdaptor(0.8, integrator)) # Run the sampler to draw samples from the specified Gaussian, where # - `samples` will store the samples # - `stats` will store diagnostic statistics for each sample samples, stats = sample(hamiltonian, proposal, cur_xy, n_NUTS_steps, adaptor, n_adapts; progress=false) #println(samples[3]) (new_trace, weight, discard, retdiff) = update_xy(samples[n_NUTS_steps]) new_trace end iters = 200 show = 5 ρ = .99 samps = mcmc_inference(ρ, iters, my_nuts, select(:x,:y)) samps[(iters-show+1):iters,:] println(cor(samps[1:iters-1,1],samps[2:iters,1])) #serial correlation; lower is better println(ρ^4) #for comparison, gibbs would be ρ² for each step; ρ⁴ for two steps positiveMoving Average FilterA moving average can help an analyst filter noise and create a smooth curve from an otherwise noisy curve. The most commonly used Moving Averages(MAs) are the **Simple**, **Cumulative** and **Exponential** moving average.![image.png](attachment:image.png)df = pd.DataFrame(transform, columns=['x', 'y', 'ang']) #Simple Moving Average df['x_MA'] = df.x.rolling(4, min_periods=1).mean() df['y_MA'] = df.y.rolling(4, min_periods=1).mean() df['ang_MA'] = df.ang.rolling(4, min_periods=1).mean() df.head() transform1 = df.to_numpy() #Cumulative sum of original & MAs values trajectory1 = np.cumsum(transform1, axis=0) #x original&MA plt.plot(trajectory1[:, 0], label='Original-x') plt.plot(trajectory1[:, 3], label='Smoothed-x') plt.legend(loc='lower right') #y original&MA plt.plot(trajectory1[:, 1], label='Original-y') plt.plot(trajectory1[:, 4], label='Smoothed-y') plt.legend(loc='lower right') #ang original&MA plt.plot(trajectory1[:, 2], label='Original-angular') plt.plot(trajectory1[:, 5], label='Smoothed-angular') plt.legend(loc='lower right') fig = plt.figure() ax = plt.subplot(111) plt.plot(trajectory1[:, 0], label='Original-x') plt.plot(trajectory1[:, 3], label='Smoothed-x') plt.plot(trajectory1[:, 1], label='Original-y') plt.plot(trajectory1[:, 4], label='Smoothed-y') plt.plot(trajectory1[:, 2], label='Original-angular') plt.plot(trajectory1[:, 5], label='Smoothed-angular') # Shrink current axis by 20% box = ax.get_position() ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) # Put a legend to the right of the current axis ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.show() diff = trajectory1[:, 3:6]-trajectory FTransform = transform+diff #Resetting video to 1st frame cap.set(cv2.CAP_PROP_POS_FRAMES, 0) w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) for i in range(frames-1): ret1, Frame = cap.read() if not ret1: break #Smoothed values of x, y and angle x = FTransform[i, 0] y = FTransform[i, 1] angle = FTransform[i, 2] #Smoothed transformation values matrix Final_rigidT = np.ones((2, 3), np.float32) Final_rigidT[0, 0] = np.cos(angle) Final_rigidT[0, 1] = -np.sin(angle) Final_rigidT[1, 0] = np.sin(angle) Final_rigidT[1, 1] = np.cos(angle) Final_rigidT[0, 2] = x Final_rigidT[1, 2] = y '''[cos(ang) -sin(ang) x sin(ang) cos(ang) y]''' #Applying smoothed transform to the input frame finalFrame = cv2.warpAffine(Frame, Final_rigidT, (w, h)) cv2.imshow("Input video", Frame) #Stabilized output cv2.imshow("Output video", finalFrame) if cv2.waitKey(50) == ord("q"): break cap.release() cv2.destroyAllWindows()Design Choices in Convolutional Neural Networks Importing packagesimport numpy as np import keras from keras.datasets import mnist from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D from keras import backend as K from keras.preprocessing import image from keras.applications.mobilenet import MobileNet from keras.applications.vgg16 import preprocess_input, decode_predictions from keras.models import Model import timeit import warnings warnings.filterwarnings('ignore')Preparing Datasetbatch_size = 128 num_classes = 10 epochs = 2 # input image dimensions img_rows, img_cols = 28, 28 # the data, shuffled and split between train and test sets (x_train, y_train), (x_test, y_test) = mnist.load_data() if K.image_data_format() == 'channels_first': x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols) x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols) input_shape = (1, img_rows, img_cols) else: x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1) x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1) input_shape = (img_rows, img_cols, 1) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') # convert class vectors to binary class matrices y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes)Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz 11493376/11490434 [==============================] - 0s 0us/step x_train shape: (60000, 28, 28, 1) 60000 train samples 10000 test samplesPart 1: Influence of convolution size Try the models with different convolution sizes 5x5, 7x7 and 9x9 etc. Analyze the number of model parameters, accuracy and training time Model with (3 x 3) ConvolutionK.clear_session() start = timeit.default_timer() model = Sequential() model.add(Conv2D(8, kernel_size=(3, 3), activation='relu', input_shape=input_shape)) model.add(Conv2D(16, (3, 3), activation='relu')) model.add(Flatten()) model.add(Dense(32, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.summary() model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test)) end = timeit.default_timer() print("Time Taken to run the model:",end - start, "seconds")Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 26, 26, 8) 80 _________________________________________________________________ conv2d_1 (Conv2D) (None, 24, 24, 16) 1168 _________________________________________________________________ flatten (Flatten) (None, 9216) 0 _________________________________________________________________ dense (Dense) (None, 32) 294944 _________________________________________________________________ dense_1 (Dense) (None, 10) 330 ================================================================= Total params: 296,522 Trainable params: 296,522 Non-trainable params: 0 __________________________________________________[...]Try models with different Convolution sizes# Write your code here. Use the same architecture as above. K.clear_session() start = timeit.default_timer() model = Sequential() model.add(Conv2D(8, kernel_size=(3, 3), activation='relu', input_shape=input_shape)) model.add(Conv2D(16, (5,5), activation='relu')) model.add(Flatten()) model.add(Dense(32, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.summary() model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test)) end = timeit.default_timer() print("Time Taken to run the model:",end - start, "seconds") # Write your code here. Use the same architecture as above. K.clear_session() start = timeit.default_timer() model = Sequential() model.add(Conv2D(8, kernel_size=(5,5), activation='relu', input_shape=input_shape)) model.add(Conv2D(16, (5,5), activation='relu')) model.add(Flatten()) model.add(Dense(32, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.summary() model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test)) end = timeit.default_timer() print("Time Taken to run the model:",end - start, "seconds") # Write your code here. Use the same architecture as above. K.clear_session() start = timeit.default_timer() model = Sequential() model.add(Conv2D(8, kernel_size=(7,7), activation='relu', input_shape=input_shape)) model.add(Conv2D(16, (3, 3), activation='relu')) model.add(Flatten()) model.add(Dense(32, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.summary() model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test)) end = timeit.default_timer() print("Time Taken to run the model:",end - start, "seconds") K.clear_session() start = timeit.default_timer() model = Sequential() model.add(Conv2D(8, kernel_size=(7,7), activation='relu', input_shape=input_shape)) model.add(Conv2D(16, (9,9), activation='relu')) model.add(Flatten()) model.add(Dense(32, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.summary() model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test)) end = timeit.default_timer() print("Time Taken to run the model:",end - start, "seconds") K.clear_session() start = timeit.default_timer() model = Sequential() model.add(Conv2D(8, kernel_size=(9,9), activation='relu', input_shape=input_shape)) model.add(Conv2D(16, (9,9), activation='relu')) model.add(Flatten()) model.add(Dense(32, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.summary() model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test)) end = timeit.default_timer() print("Time Taken to run the model:",end - start, "seconds")Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 20, 20, 8) 656 _________________________________________________________________ conv2d_1 (Conv2D) (None, 12, 12, 16) 10384 _________________________________________________________________ flatten (Flatten) (None, 2304) 0 _________________________________________________________________ dense (Dense) (None, 32) 73760 _________________________________________________________________ dense_1 (Dense) (None, 10) 330 ================================================================= Total params: 85,130 Trainable params: 85,130 Non-trainable params: 0 ____________________________________________________[...]Write your findings about activations here? 1. The best performing model in terms of accuracy is the (3x3) convolution, followed by the (5x5). It is worth mentioning that we are using only 2 epochs, longer training may produce different results. It is unlikely, however that (7x7) or (9x9) will yield better results, given the poor performance in the second epoch. 2. In terms of training time, the larger the kernels in the model, the slower it is to train. 3. In terms of trainable parameters, larger kernels imply less parameters. Part 2: Influence of Striding Try the models with different stride sizes such as 2,3,4 etc. Analyze the number of model parameters, accuracy and training time Model with Convolution with 2 Stepsstart = timeit.default_timer() model = Sequential() model.add(Conv2D(8, kernel_size=(3, 3), strides=2, activation='relu', input_shape=input_shape)) model.add(Conv2D(16, (3, 3), strides=2, activation='relu')) model.add(Flatten()) model.add(Dense(32, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.summary() model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test)) end = timeit.default_timer() print("Time Taken to run the model:",end - start, "seconds") # Write your code here. Use the same architecture as above. start = timeit.default_timer() model = Sequential() model.add(Conv2D(8, kernel_size=(3, 3), strides=1, activation='relu', input_shape=input_shape)) model.add(Conv2D(16, (3, 3), strides=1, activation='relu')) model.add(Flatten()) model.add(Dense(32, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.summary() model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test)) end = timeit.default_timer() print("Time Taken to run the model:",end - start, "seconds") # Write your code here. Use the same architecture as above. start = timeit.default_timer() model = Sequential() model.add(Conv2D(8, kernel_size=(3, 3), strides=3, activation='relu', input_shape=input_shape)) model.add(Conv2D(16, (3, 3), strides=3, activation='relu')) model.add(Flatten()) model.add(Dense(32, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.summary() model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test)) end = timeit.default_timer() print("Time Taken to run the model:",end - start, "seconds") # Write your code here. Use the same architecture as above. start = timeit.default_timer() model = Sequential() model.add(Conv2D(8, kernel_size=(3, 3), strides=4, activation='relu', input_shape=input_shape)) model.add(Conv2D(16, (3, 3), strides=4, activation='relu')) model.add(Flatten()) model.add(Dense(32, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.summary() model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test)) end = timeit.default_timer() print("Time Taken to run the model:",end - start, "seconds") start = timeit.default_timer() model = Sequential() model.add(Conv2D(8, kernel_size=(3, 3), strides=5, activation='relu', input_shape=input_shape)) model.add(Conv2D(16, (3, 3), strides=5, activation='relu')) model.add(Flatten()) model.add(Dense(32, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.summary() model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test)) end = timeit.default_timer() print("Time Taken to run the model:",end - start, "seconds")Model: "sequential_5" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_10 (Conv2D) (None, 6, 6, 8) 80 _________________________________________________________________ conv2d_11 (Conv2D) (None, 1, 1, 16) 1168 _________________________________________________________________ flatten_5 (Flatten) (None, 16) 0 _________________________________________________________________ dense_10 (Dense) (None, 32) 544 _________________________________________________________________ dense_11 (Dense) (None, 10) 330 ================================================================= Total params: 2,122 Trainable params: 2,122 Non-trainable params: 0 ____________________________________________________[...]Write your findings about influence of striding here? 1. Not surprisingly, the larger the stride, the faster the training time is and the smaller the number of trainable parameters. 2. Also not surprising, the lower the stride, the better accuracy. This however is based on only two epochs, longer training my lead to a different conclusion. 3. Going further, I would propose to repeat this exercise using 20 epochs to see if my conclusions hold. Part 3: Influence of Padding Try the models with padding and without padding. Analyze the number of model parameters, accuracy and training time Model with (3 x 3) Convolution with Same Paddingstart = timeit.default_timer() model = Sequential() model.add(Conv2D(8, kernel_size=(3, 3), strides=1, padding='same', activation='relu', input_shape=input_shape)) model.add(Conv2D(16, (3, 3), strides=1, padding='same', activation='relu')) model.add(Flatten()) model.add(Dense(32, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.summary() model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test)) end = timeit.default_timer() print("Time Taken to run the model:",end - start, "seconds") # Write your code here. Use the same architecture as above. start = timeit.default_timer() model = Sequential() model.add(Conv2D(8, kernel_size=(3, 3), strides=1, padding='same', activation='relu', input_shape=input_shape)) model.add(Conv2D(16, (3, 3), strides=1, padding='valid', activation='relu')) model.add(Flatten()) model.add(Dense(32, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.summary() model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test)) end = timeit.default_timer() print("Time Taken to run the model:",end - start, "seconds") # Write your code here. Use the same architecture as above. start = timeit.default_timer() model = Sequential() model.add(Conv2D(8, kernel_size=(3, 3), strides=1, padding='valid', activation='relu', input_shape=input_shape)) model.add(Conv2D(16, (3, 3), strides=1, padding='valid', activation='relu')) model.add(Flatten()) model.add(Dense(32, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.summary() model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test)) end = timeit.default_timer() print("Time Taken to run the model:",end - start, "seconds")Model: "sequential_8" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_16 (Conv2D) (None, 26, 26, 8) 80 _________________________________________________________________ conv2d_17 (Conv2D) (None, 24, 24, 16) 1168 _________________________________________________________________ flatten_8 (Flatten) (None, 9216) 0 _________________________________________________________________ dense_16 (Dense) (None, 32) 294944 _________________________________________________________________ dense_17 (Dense) (None, 10) 330 ================================================================= Total params: 296,522 Trainable params: 296,522 Non-trainable params: 0 ________________________________________________[...]Write your findings about influence of padding here? 1. 'valid' padding yield better accuracy than 'same'. This might be caused by the presence of important characteristics in the borders of the images. 2. Valid padding also was faster to train, which I must say did surprised me because I was expecting the opposite. This might be caused by the reduction in trainable parameters, which also came as a surprise to me. Part 4: Influence of Pooling Try the models with different pooling window sizes such as 2x2, 3x3, 4x4 etc. Analyze the number of model parameters, accuracy and training time Model with (3 x 3) Convolution with Pooling (2 x 2)start = timeit.default_timer() model = Sequential() model.add(Conv2D(8, kernel_size=(3, 3), activation='relu', input_shape=input_shape)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(16, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(32, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.summary() model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test)) end = timeit.default_timer() print("Time Taken to run the model:",end - start, "seconds")Model: "sequential_9" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_18 (Conv2D) (None, 26, 26, 8) 80 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 13, 13, 8) 0 _________________________________________________________________ conv2d_19 (Conv2D) (None, 11, 11, 16) 1168 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 5, 5, 16) 0 _________________________________________________________________ flatten_9 (Flatten) (None, 400) 0 _________________________________________________________________ dense_18 (Dense) (None, 32) 12832 ______________________________________________________[...]Model with (3 x 3) Convolution with Pooling (3 x 3)# Write your code here # Use the same model design from the above cell start = timeit.default_timer() model = Sequential() model.add(Conv2D(8, kernel_size=(3, 3), activation='relu', input_shape=input_shape)) model.add(MaxPooling2D(pool_size=(3,3))) model.add(Conv2D(16, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(3,3))) model.add(Flatten()) model.add(Dense(32, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.summary() model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test)) end = timeit.default_timer() print("Time Taken to run the model:",end - start, "seconds") # Write your code here # Use the same model design from the above cell start = timeit.default_timer() model = Sequential() model.add(Conv2D(8, kernel_size=(3, 3), activation='relu', input_shape=input_shape)) model.add(MaxPooling2D(pool_size=(4,4))) model.add(Conv2D(16, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(4,4))) model.add(Flatten()) model.add(Dense(32, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.summary() model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test)) end = timeit.default_timer() print("Time Taken to run the model:",end - start, "seconds")Model: "sequential_11" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_22 (Conv2D) (None, 26, 26, 8) 80 _________________________________________________________________ max_pooling2d_4 (MaxPooling2 (None, 6, 6, 8) 0 _________________________________________________________________ conv2d_23 (Conv2D) (None, 4, 4, 16) 1168 _________________________________________________________________ max_pooling2d_5 (MaxPooling2 (None, 1, 1, 16) 0 _________________________________________________________________ flatten_11 (Flatten) (None, 16) 0 _________________________________________________________________ dense_22 (Dense) (None, 32) 544 _____________________________________________________[...]NOTE:In the cell below you **MUST** use a batch size of 10 (`batch_size=10`) for the `train_generator` and the `validation_generator`. Using a batch size greater than 10 will exceed memory limits on the Coursera platform.TRAINING_DIR = '/tmp/cats-v-dogs/training' train_datagen = ImageDataGenerator(rescale=1 / 255) # NOTE: YOU MUST USE A BATCH SIZE OF 10 (batch_size=10) FOR THE # TRAIN GENERATOR. train_generator = train_datagen.flow_from_directory( TRAINING_DIR, batch_size=64, class_mode='binary', target_size=(150, 150) ) VALIDATION_DIR = '/tmp/cats-v-dogs/testing' validation_datagen = ImageDataGenerator(rescale= 1/255) # NOTE: YOU MUST USE A BACTH SIZE OF 10 (batch_size=10) FOR THE # VALIDATION GENERATOR. validation_generator = validation_datagen.flow_from_directory( VALIDATION_DIR, batch_size=64, class_mode='binary', target_size=(150, 150) ) # Expected Output: # Found 2700 images belonging to 2 classes. # Found 300 images belonging to 2 classes. history = model.fit_generator(train_generator, epochs=2, verbose=1, validation_data=validation_generator) # PLOT LOSS AND ACCURACY %matplotlib inline import matplotlib.image as mpimg import matplotlib.pyplot as plt #----------------------------------------------------------- # Retrieve a list of list results on training and test data # sets for each training epoch #----------------------------------------------------------- acc=history.history['acc'] val_acc=history.history['val_acc'] loss=history.history['loss'] val_loss=history.history['val_loss'] epochs=range(len(acc)) # Get number of epochs #------------------------------------------------ # Plot training and validation accuracy per epoch #------------------------------------------------ plt.plot(epochs, acc, 'r', "Training Accuracy") plt.plot(epochs, val_acc, 'b', "Validation Accuracy") plt.title('Training and validation accuracy') plt.figure() #------------------------------------------------ # Plot training and validation loss per epoch #------------------------------------------------ plt.plot(epochs, loss, 'r', "Training Loss") plt.plot(epochs, val_loss, 'b', "Validation Loss") plt.title('Training and validation loss') # Desired output. Charts with training and validation metrics. No crash :)Submission Instructions# Now click the 'Submit Assignment' button above.When you're done or would like to take a break, please run the two cells below to save your work and close the Notebook. This will free up resources for your fellow learners.%%javascript IPython.notebook.save_checkpoint(); %%javascript IPython.notebook.session.delete(); window.onbeforeunload = null setTimeout(function() { window.close(); }, 1000); # Here's a codeblock just for fun. You should be able to upload an image here # and have it classified without crashing import numpy as np from google.colab import files from keras.preprocessing import image uploaded = files.upload() for fn in uploaded.keys(): # predicting images path = '/content/' + fn img = image.load_img(path, target_size=(# YOUR CODE HERE)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) images = np.vstack([x]) classes = model.predict(images, batch_size=10) print(classes[0]) if classes[0]>0.5: print(fn + " is a dog") else: print(fn + " is a cat")Comparing the old and new controllerfor algorithm in old_metrics['PlanningAlgorithm'].unique(): fig, axes = plt.subplots(1, 3, figsize=(40,10)) # Determine plot data and associated parameters label_name=['Goal Number: '+str(num) for num in old_metrics['goalNumber'].unique()] width = 0.7/2 x=old_metrics['goalNumber'].unique() metric = ['distanceTravelled','totalAngleTurned','timeForPath'] for i in range(3): axes[i].bar(x-width/2,old_metrics[old_metrics['PlanningAlgorithm']==algorithm][metric[i]],width,label='old') axes[i].bar(x+width/2,new_metrics[new_metrics['PlanningAlgorithm']==algorithm][metric[i]],width,label='new') # Add title, axis labels and tick labels axes[i].set_ylabel(metric[i],fontsize=22,fontweight='bold') axes[i].set_xlabel('Goal Numbers',fontsize=22,fontweight='bold') axes[i].set_title(algorithm+' - '+metric[i],fontsize=24,fontweight='bold') axes[i].set_xticks(x) label_name=[textwrap.fill(name,10) for name in label_name] axes[i].set_xticklabels(label_name,fontsize=16) axes[i].legend(fontsize=18) alg_name = ''.join(c for c in '_'.join(algorithm.split()) if c.isalnum() or c=='_') filename = '../report/images/compare_robot_'+alg_name +'.png' plt.savefig(filename) fig.tight_layout() plt.show()Merge retrospective entriesFinds and merges the retrospective items for the daily notes in a given week.My weeks start on Sunday.The code takes the first three retro items which are assumed to be 'WW', 'WDNW', 'WDD'.from roam_data.dates.dates import date_from, roam_format, start_of_week_containing, dates_for_wc day = '20211215' days = list(roam_format(day) for day in dates_for_wc(start_of_week_containing(date_from(day)))) days from roam_data.roam.graph import Graph, Page, Block graph= Graph.from_file('../data/20211215/test01.json') notes =[graph.page_titles[day] for day in days if day in graph.page_titles] len(notes) from roam_data.roam.filter_entries import retro_from list(retro_from(page) for page in notes) from roam_data.roam.filter_entries import retros_in rb = retros_in(notes) rb from roam_data.roam.merge import merge_retros rr = merge_retros(rb) from roam_data.roam.to_markdown import retro_blocks_to_markdown retro_blocks_to_markdown(rr)Data Analysis -- Measuring Coronavirus Word Choice and Messaging Focus -- Keyness Analysis The sentiment and KWIC analyses I conducted delved into and revealed an important difference between Fox's and CNN's coronavirus response: consistency. Another important difference between Fox's and CNN's coronanvirus response could be their word choice and vocabulary, and their overall messaging from a content perspective. Keyness analysis is one tool that can help provide insight into this. Keyness Analysis is a statistical tool that is often used in order to identify significant differences between 2 corpora. Essentially, Keyness Analysis compares the normalized frequencies of linguistic items in two corpora. Keyness Analysis normalizes frequencies of linguistic items by scaling the raw frequency of an item in a text/corpus by the size of the text/corpus. In this project, I used Keyness Analysis to compare normalized word frequencies between Fox and CNN news broadcasts, comparing word frequencies in CNN news broadcasts to those in Fox news broadcasts and vice versa. This will help provide me with insights into the vocabulary, word choice, and messaging differences that exist between the two news outlets' coronavirus responses. It will help me focus on which words in particular differ between Fox and CNN, which vocabulary differences are meaningful, and how messaging differs between Fox and CNN from a content perspective as a result of this. This will also help to extend KWIC analysis, by drawing insights about each news outlet's overall messaging between January and April, as opposed to focusing more granularly on certain months, the differences between messaging within certain months and between them, and messaging consistency.%run data_processing.ipynb[nltk_data] Downloading package vader_lexicon to [nltk_data] /Commjhub/jupyterhub/comm318_fall2019/jdlish/nltk_data [nltk_data] ... [nltk_data] Package vader_lexicon is already up-to-date!Setting Up Keyness Analysisflattened_tokens_cnn = [] #flatten the list for i in range(len(data_cnn['targeted text'])): toks=tokenize(data_cnn['targeted text'][i],True,strip_chars=strip_chars) for y in toks: flattened_tokens_cnn.append(y) flattened_tokens_fox = [] #flatten the list for i in range(len(data_fox['targeted text'])): toks=tokenize(data_fox['targeted text'][i],True,strip_chars=strip_chars) for y in toks: flattened_tokens_fox.append(y) CNN_words=Counter(flattened_tokens_cnn) Fox_words=Counter(flattened_tokens_fox) import math cnn_vs_fox=calculate_keyness(CNN_words, Fox_words,print_table=False) cnn_vs_fox.columns=["CNN Word","CNN Word Frequency","Fox Word Frequency","Keyness CNN vs. Fox"] fox_vs_cnn=calculate_keyness(Fox_words, CNN_words,print_table=False) fox_vs_cnn.columns=["Fox Word","Fox Word Frequency","CNN Word Frequency","Keyness Fox vs. CNN"] combined_keyness=pd.concat([cnn_vs_fox.reset_index(drop=True), fox_vs_cnn.reset_index(drop=True)], axis=1) combined_keyness.iloc[1:25]COSC320 Third Milestone Rabin-Karp Function The following cell defines a function that implements Rabin-Karp on the given pattern and text, and returns a plagiarism percentage from 0% - 100%# Following program is the python implementation of # Rabin Karp Algorithm given in CLRS book # pat -> pattern # txt -> text # q -> A prime number def rabinKarpSearch(pat, txt): q = 101 # A prime number M = len(pat) N = len(txt) i = 0 j = 0 p = 0 # hash value for pattern t = 0 # hash value for txt h = 1 # d is the number of characters in the input alphabet d = 256 # The value of h would be "pow(d, M-1)% q" for i in range(M-1): h = (h * d)% q # Calculate the hash value of pattern and first window # of text for i in range(M): p = (d * p + ord(pat[i]))% q t = (d * t + ord(txt[i]))% q # Slide the pattern over text one by one for i in range(N-M + 1): # Check the hash values of current window of text and # pattern if the hash values match then only check # for characters on by one if p == t: # Check for characters one by one for j in range(M): if txt[i + j] != pat[j]: break j+= 1 # if p == t and pat[0...M-1] = txt[i, i + 1, ...i + M-1] if j == M: return M #get length of pattern that has a match # Calculate hash value for next window of text: Remove # leading digit, add trailing digit if i < N-M: t = (d*(t-ord(txt[i])*h) + ord(txt[i + M]))% q # We might get negative values of t, converting it to # positive if t < 0: t = t + q return 0 def rabinKarpFileMatch(file, cFile): corpusFileSize = len(cFile) totalFoundChar = 0 for line in file: totalFoundChar += rabinKarpSearch(line,cFile) return float(totalFoundChar/corpusFileSize) # value from 0-1 that represents the percentage of matches found between the two documentsKMP Function The following cell defines a function that implements KMP on the given pattern and text, and returns a plagiarism percentage from 0% - 100%def kmpHelper(file,cFile): totalFoundChar = 0 for i in file: totalFoundChar += KMPSearch(i,cFile) corpusFileSize = len(cFile) return float(totalFoundChar/corpusFileSize) # for each string in data, run a for loop to check against each string in the data2 string, so that can be done, and then to return a def KMPSearch(pat, txt): M = len(pat) N = len(txt) lps = [0]*M j = 0 computeLPSArray(pat, M, lps) i = 0 while i < N: if pat[j] == txt[i]: i += 1 j += 1 if j == M: return M j = lps[j-1] elif i < N and pat[j] != txt[i]: if j != 0: j = lps[j-1] else: i += 1 return 0 def computeLPSArray(pat, M, lps): len = 0 # length of the previous longest prefix suffix lps[0] # lps[0] is always 0 i = 1 # the loop calculates lps[i] for i = 1 to M-1 while i < M: if pat[i]== pat[len]: len += 1 lps[i] = len i += 1 else: # This is tricky. Consider the example. # AAACAAAA and i = 7. The idea is similar # to search step. if len != 0: len = lps[len-1] # Also, note that we do not increment i here else: lps[i] = 0 i += 1LCSS Function The following cell defines a function that implements LCSS on the given pattern and text, and returns a plagiarism percentage from 0% - 100%def lcs(file, cFile): # find the length of the strings m = len(file) n = len(cFile) # declaring the array for storing the dp values L = [[None]*(n + 1) for i in range(m + 1)] """Following steps build L[m + 1][n + 1] in bottom up fashion Note: L[i][j] contains length of LCS of file[0..i-1] and cFile[0..j-1]""" for i in range(m + 1): for j in range(n + 1): if i == 0 or j == 0 : L[i][j] = 0 elif file[i-1] == cFile[j-1]: L[i][j] = L[i-1][j-1]+1 else: L[i][j] = max(L[i-1][j], L[i][j-1]) # L[m][n] contains the length of LCS of file[0..n-1] & cFile[0..m-1] return L[m][n] # end of function lcs def lcsHelper(file,cFile): totalFoundChar = 0 for i in file: totalFoundChar += lcs(i,cFile) corpusFileSize = len(cFile) return float(totalFoundChar/corpusFileSize)Main Driver FunctionThe following function will take the actual input file and compare it against each file in the corpus and return a plagiarism percentage per file.import os def plagDetect(directory, dirSize, plagThresh, plagTestFile): ''' Function that will return a set of filenames from which plagTestFile might have plagiarized from. Parameters: directory : str - path to directory where data files are located dirSize : int - number of files other than plagTestFile that the algorithm will search from plagThresh : float - a floating-point number between 0 - 1, that defines the percentage threshold to classify a document as potentially plagiarized plagTestFile : str - name of the plagiarism test file with extension. No directory. Eg: "plagTestFile.txt" Returns: potentialPlagDocSet : list(str) - list of filenames in directory from which plagTestFile has plagiarized from. ''' corpus = [] corpusNames = [] plagF = [] plagFLength = -1 counter = 0 for file in os.listdir(directory): filename = os.fsdecode(file) if filename.endswith(".txt") and filename != plagTestFile and counter < dirSize: #Take in each corpus file as a big string where newlines are replaced by spaces cf = open(os.path.join(directory,file), "r", encoding="utf-8") content = cf.read() content_list = content.replace('\n',' ') corpus.append(content_list) corpusNames.append(filename) counter+=1 cf.close() if filename == plagTestFile: # For the plagirarism file, split up by line, in a list. with open(os.path.join(directory,file),'r', encoding="utf-8") as plagFile: plagFLength = len(plagFile.read()) plagF = [line.rstrip('\n') for line in plagFile] potentialPlagDocSet = [] for i in range(len(corpus)): cFile = corpus[i] cFileName = corpusNames[i] if(len(cFile) <= plagFLength): print(len(cFile)) simil = 0.5 * kmpHelper(plagF, cFile) + 0.2 * rabinKarpFileMatch(plagF, cFile) + 0.3 * lcsHelper(plagF, cFile) if simil > plagThresh: potentialPlagDocSet.append(cFileName) print(plagFLength) return potentialPlagDocSet import matplotlib.pyplot as plt import time from random import choice import os def tryItABunch(myFn, startN=1, endN=100, stepSize=1, numTrials=20): nValues = [] tValues = [] directory = "g17_corpusfinal/" plagThresh = 0.1 plagTestFile = "plagarism_test_file.txt" for n in range(startN, endN, stepSize): # run myFn several times and average to get a decent idea. runtime = 0 for t in range(numTrials): lst = n # generate a random list of length n start = time.time() myFn(directory, n, plagThresh, plagTestFile) end = time.time() runtime += (end - start) * 1000 # measure in milliseconds runtime = runtime/numTrials nValues.append(n) tValues.append(runtime) return nValues, tValues start = 1 end = 100 step = 10 nFiles, tTimesofRuntime = tryItABunch( plagDetect, startN = start, endN = end, stepSize=step, numTrials=1) tTimesofRuntime = [x/1000 for x in tTimesofRuntime] tValues = [ x for x in range(start,end,step)] nValues = [ x for x in range(start,end,step)] plt.plot(nFiles, tTimesofRuntime, color="red", label="Actual Runtime") plt.plot(nValues, tValues, color="blue", label="Theoretical Runtime") plt.xlabel("n") plt.ylabel("Time(s)") plt.legend() plt.title("Naive algorithm theoretical runtime vs. actual runtime")Pandas Intro Coding Practice# import pandas package import numpy as npEnabling App Insights for Services in ProductionWith this notebook, you can learn how to enable App Insights for standard service monitoring, plus, we provide examples for doing custom logging within a scoring files in a model. What does Application Insights monitor?It monitors request rates, response times, failure rates, etc. For more information visit [App Insights docs.](https://docs.microsoft.com/en-us/azure/application-insights/app-insights-overview) What is different compared to standard production deployment process?If you want to enable generic App Insights for a service run:```pythonaks_service= Webservice(ws, "aks-w-dc2")aks_service.update(enable_app_insights=True)```Where "aks-w-dc2" is your service name. You can also do this from the Azure Portal under your Workspace--> deployments--> Select deployment--> Edit--> Advanced Settings--> Select "Enable AppInsights diagnostics"If you want to log custom traces, you will follow the standard deplyment process for AKS and you will:1. Update scoring file.2. Update aks configuration.3. Build new image and deploy it. 1. Import your dependenciesfrom azureml.core import Workspace, Run from azureml.core.compute import AksCompute, ComputeTarget from azureml.core.webservice import Webservice, AksWebservice from azureml.core.image import Image from azureml.core.model import Model import azureml.core print(azureml.core.VERSION)2. Set up your configuration and create a workspacews = Workspace.from_config() print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n')3. Register ModelRegister an existing trained model, add descirption and tags.#Register the model from azureml.core.model import Model model = Model.register(model_path = "sklearn_regression_model.pkl", # this points to a local file model_name = "sklearn_regression_model.pkl", # this is the name the model is registered as tags = {'area': "diabetes", 'type': "regression"}, description = "Ridge regression model to predict diabetes", workspace = ws) print(model.name, model.description, model.version)4. *Update your scoring file with custom print statements*Here is an example: a. In your init function add:```pythonprint ("model initialized" + time.strftime("%H:%M:%S"))``` b. In your run function add:```pythonprint ("Prediction created" + time.strftime("%H:%M:%S"))```%%writefile score.py import pickle import json import numpy from sklearn.externals import joblib from sklearn.linear_model import Ridge from azureml.core.model import Model import time def init(): global model #Print statement for appinsights custom traces: print ("model initialized" + time.strftime("%H:%M:%S")) # note here "sklearn_regression_model.pkl" is the name of the model registered under the workspace # this call should return the path to the model.pkl file on the local disk. model_path = Model.get_model_path(model_name = 'sklearn_regression_model.pkl') # deserialize the model file back into a sklearn model model = joblib.load(model_path) # note you can pass in multiple rows for scoring def run(raw_data): try: data = json.loads(raw_data)['data'] data = numpy.array(data) result = model.predict(data) print ("Prediction created" + time.strftime("%H:%M:%S")) # you can return any datatype as long as it is JSON-serializable return result.tolist() except Exception as e: error = str(e) print (error + time.strftime("%H:%M:%S")) return error5. *Create myenv.yml file*from azureml.core.conda_dependencies import CondaDependencies myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn']) with open("myenv.yml","w") as f: f.write(myenv.serialize_to_string())6. Create your new Imagefrom azureml.core.image import ContainerImage image_config = ContainerImage.image_configuration(execution_script = "score.py", runtime = "python", conda_file = "myenv.yml", description = "Image with ridge regression model", tags = {'area': "diabetes", 'type': "regression"} ) image = ContainerImage.create(name = "myimage1", # this is the model object models = [model], image_config = image_config, workspace = ws) image.wait_for_creation(show_output = True)Deploy to ACI (Optional)from azureml.core.webservice import AciWebservice aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1, memory_gb = 1, tags = {'area': "diabetes", 'type': "regression"}, description = 'Predict diabetes using regression model', enable_app_insights = True) from azureml.core.webservice import Webservice aci_service_name = 'my-aci-service-4' print(aci_service_name) aci_service = Webservice.deploy_from_image(deployment_config = aciconfig, image = image, name = aci_service_name, workspace = ws) aci_service.wait_for_deployment(True) print(aci_service.state) %%time import json test_sample = json.dumps({'data': [ [1,28,13,45,54,6,57,8,8,10], [101,9,8,37,6,45,4,3,2,41] ]}) test_sample = bytes(test_sample,encoding='utf8') if aci_service.state == "Healthy": prediction = aci_service.run(input_data=test_sample) print(prediction) else: raise ValueError("Service deployment isn't healthy, can't call the service")7. Deploy to AKS service Create AKS compute if you haven't done so.# Use the default configuration (can also provide parameters to customize) prov_config = AksCompute.provisioning_configuration() aks_name = 'my-aks-test3' # Create the cluster aks_target = ComputeTarget.create(workspace = ws, name = aks_name, provisioning_configuration = prov_config) %%time aks_target.wait_for_completion(show_output = True) print(aks_target.provisioning_state) print(aks_target.provisioning_errors)If you already have a cluster you can attach the service to it: ```python %%timeresource_id = '/subscriptions//resourcegroups//providers/Microsoft.ContainerService/managedClusters/'create_name= 'myaks4'attach_config = AksCompute.attach_configuration(resource_id=resource_id)aks_target = ComputeTarget.attach(workspace = ws, name = create_name, attach_configuration=attach_config) Wait for the operation to completeaks_target.wait_for_provisioning(True)``` a. *Activate App Insights through updating AKS Webservice configuration*In order to enable App Insights in your service you will need to update your AKS configuration file:#Set the web service configuration aks_config = AksWebservice.deploy_configuration(enable_app_insights=True)b. Deploy your serviceif aks_target.provisioning_state== "Succeeded": aks_service_name ='aks-w-dc5' aks_service = Webservice.deploy_from_image(workspace = ws, name = aks_service_name, image = image, deployment_config = aks_config, deployment_target = aks_target ) aks_service.wait_for_deployment(show_output = True) print(aks_service.state) else: raise ValueError("AKS provisioning failed.")8. Test your service%%time import json test_sample = json.dumps({'data': [ [1,28,13,45,54,6,57,8,8,10], [101,9,8,37,6,45,4,3,2,41] ]}) test_sample = bytes(test_sample,encoding='utf8') if aks_service.state == "Healthy": prediction = aks_service.run(input_data=test_sample) print(prediction) else: raise ValueError("Service deployment isn't healthy, can't call the service")9. See your service telemetry in App Insights1. Go to the [Azure Portal](https://portal.azure.com/)2. All resources--> Select the subscription/resource group where you created your Workspace--> Select the App Insights type3. Click on the AppInsights resource. You'll see a highlevel dashboard with information on Requests, Server response time and availability.4. Click on the top banner "Analytics"5. In the "Schema" section select "traces" and run your query.6. Voila! All your custom traces should be there. Disable App Insightsaks_service.update(enable_app_insights=False)Clean up%%time aks_service.delete() aci_service.delete() image.delete() model.delete()Synthetic Dataimport csv synthetic_data = tl.tensor(np.zeros([1000, 10, 20, 30], dtype='f')) for i in range(200): start = time.time() with open('../Data/synthetic_data/data_normal/data{}.tensor'.format(i)) as file: reader = csv.reader(file, delimiter='\t') for row in reader: indices = [[index] for index in np.int64(np.asarray(row[:-1]))-1] synthetic_data[tuple(indices)] = np.double(row[-1]) print('>> synthetic_data{} loaded '.format(i), time.time() - start)>> synthetic_data0 loaded 0.8036372661590576 >> synthetic_data1 loaded 0.7937493324279785 >> synthetic_data2 loaded 0.791825532913208 >> synthetic_data3 loaded 0.7583863735198975 >> synthetic_data4 loaded 0.8618288040161133 >> synthetic_data5 loaded 0.7982239723205566 >> synthetic_data6 loaded 0.8991434574127197 >> synthetic_data7 loaded 1.0349154472351074 >> synthetic_data8 loaded 0.8069455623626709 >> synthetic_data9 loaded 0.8092312812805176 >> synthetic_data10 loaded 0.9315512180328369 >> synthetic_data11 loaded 0.8319196701049805 >> synthetic_data12 loaded 0.7559518814086914 >> synthetic_data13 loaded 0.7368896007537842 >> synthetic_data14 loaded 1.001147747039795 >> synthetic_data15 loaded 1.3474411964416504 >> synthetic_data16 loaded 0.8665621280670166 >> synthetic_data17 loaded 0.7760329246520996 >> synthetic_data18 loaded 0.7544364929199219 >> synthetic_data19 loaded 0.7518587112426758 >> synthetic_data20 loaded 0.8361003398895264 >> synthetic_data21 loade[...]Sample Videoimport csv sample_video = tl.tensor(np.zeros([205, 240, 320, 3], dtype='d')) for i in range(41): start = time.time() with open('../Data/sample_video/data/video{}.tensor'.format(i)) as file: reader = csv.reader(file, delimiter='\t') for row in reader: indices = [[index] for index in np.int64(np.asarray(row[:-1]))-1] sample_video[tuple(indices)] = np.double(row[-1]) print('>> sample_video{} loaded '.format(i), time.time() - start)>> sample_video0 loaded 17.943934202194214 >> sample_video1 loaded 18.175222873687744 >> sample_video2 loaded 18.29034399986267 >> sample_video3 loaded 18.185331106185913 >> sample_video4 loaded 18.23938298225403 >> sample_video5 loaded 18.205886602401733 >> sample_video6 loaded 18.270495891571045 >> sample_video7 loaded 18.050713300704956 >> sample_video8 loaded 18.23133420944214 >> sample_video9 loaded 18.222721338272095 >> sample_video10 loaded 18.293696880340576 >> sample_video11 loaded 18.25161385536194 >> sample_video12 loaded 18.053239583969116 >> sample_video13 loaded 18.167654991149902 >> sample_video14 loaded 18.11659526824951 >> sample_video15 loaded 18.15689444541931 >> sample_video16 loaded 18.233609199523926 >> sample_video17 loaded 18.1990008354187 >> sample_video18 loaded 18.265703439712524 >> sample_video19 loaded 18.208263397216797 >> sample_video20 loaded 17.94605541229248 >> sample_video21 loaded 18.154611587524414 >> sample_video22 loaded 18.[...]Stockimport csv stock2_tensor = tl.tensor(np.zeros([3089, 140, 5], dtype=float)) start = time.time() with open('../Data/stock/KOSPI140/Stock.tensor') as file: reader = csv.reader(file, delimiter='\t') for row in reader: indices = np.asarray([index for index in np.int64(np.asarray(row[:-1]))])[[1, 0, 2]] stock2_tensor[tuple(indices)] = np.double(row[-1]) print('>> stock tensor loaded ', time.time() - start)>> stock tensor loaded 36.043638944625854Hall from OLSTEC* `hall1-200.mat`: 144 * 176 * 200 => 200 * 144 * 176* `hall_144x100_frame2900-3899_pan.mat`: 144 * 100 * 1000 => 1000 * 144 * 100from scipy.io import loadmat import h5py hall1 = loadmat('../Data/hall/hall1-200.mat')['XO'] hall1 = np.moveaxis(hall1, -1, 0) hall1 = hall1.reshape(200, 144, 176, order='F') with h5py.File('../Data/hall/hall_144x100_frame2900-3899_pan.mat', 'r') as f: hall2 = np.array(f['X0']) hall2 = hall2.reshape(1000, 144, 100, order='F') hall1_tensor = tl.tensor(hall1, dtype='f') hall2_tensor = tl.tensor(hall2, dtype='f') print(hall1_tensor.shape, hall2_tensor.shape) # make_video(hall1, 'hall1.avi', False) # make_video(hall2, 'hall2.avi', False)(200, 144, 176) (1000, 144, 100)Air Quality Tensor* ` (measurement)`* Beijing Air Quality * 2,454,305 out of 2,524,536 (35,063 * 12 * 6)* Korea Air Quality * 11,270,028 out of 18,368,364 (9,478 * 323 * 6)* Madrid Air Quality * 8,036,759 out of 21,587,328 (64,248 * 24 * 14)# beijing_df = pd.read_csv('../../Data/air_quality/BeijingAirQuality/beijing.tensor', delimiter='\t', header=None) korea_df = pd.read_csv('../../Data/air_quality/KoreaAirQuality/korea_airquality.tensor', delimiter='\t', header=None) # madrid_df = pd.read_csv('../../Data/air_quality/MadridAirQuality/1hour_madrid.tensor', delimiter='\t', header=None) def get_tensor(df): start = time.time() dims = df[[0,1,2]].max()+1 tensor = np.empty(dims) * np.nan tensor.shape for i, row in df.iterrows(): indices = [[index] for index in np.int64(np.asarray(row[:-1]))] tensor[tuple(indices)] = np.double(row[3]) avg = [] for i in range(tensor.shape[2]): avg.append(np.nanmean(tensor[:,:,i])) inds = np.where(np.isnan(tensor)) for ind in zip(inds[0], inds[1], inds[2]): tensor[ind] = avg[ind[-1]] print(time.time() - start) return tensor # beijing_tensor = get_tensor(beijing_df) korea_tensor = get_tensor(korea_df) # madrid_tensor = get_tensor(madrid_df) with open('results_0128.p', 'wb') as fp: pickle.dump(results, fp, protocol=pickle.HIGHEST_PROTOCOL)--- Experiment 1from online_tensor_decomposition_fcp_abridged06 import * # results = {} # synthetic (n_iter, ul, ll) = (1, 1.2, 1.1) tensor_stream = create_tensor_stream(synthetic_data, start_to_stream=10, batch_sizes=np.full((99), 10, dtype=int)) # results['synthetic-10'] = online_tensor_decomposition('synthetic', synthetic_data, tensor_stream, rank=10, n_iter=n_iter, ul=ul, ll=ll, verbose=False) results['synthetic-20'] = online_tensor_decomposition('synthetic', synthetic_data, tensor_stream, rank=20, n_iter=n_iter, ul=ul, ll=ll, verbose=False) results['synthetic-30'] = online_tensor_decomposition('synthetic', synthetic_data, tensor_stream, rank=30, n_iter=n_iter, ul=ul, ll=ll, verbose=False) results['synthetic-40'] = online_tensor_decomposition('synthetic', synthetic_data, tensor_stream, rank=40, n_iter=n_iter, ul=ul, ll=ll, verbose=False) # results['synthetic-50'] = online_tensor_decomposition('synthetic', synthetic_data, tensor_stream, rank=50, n_iter=n_iter, ul=ul, ll=ll, verbose=False) datasets = ('synthetic-20', 'synthetic-30', 'synthetic-40') plot_global(datasets, 'E1_synthetic', 35) plot_local(datasets, 'E1_synthetic', 35) # video (n_iter, ul, ll) = (1, 6, 2) tensor_stream = create_tensor_stream(sample_video, start_to_stream=5, batch_sizes=np.full((40), 5, dtype=int)) # results['video-10'] = online_tensor_decomposition('video', sample_video, tensor_stream, rank=10, n_iter=n_iter, ul=ul, ll=ll, verbose=False) results['video-20'] = online_tensor_decomposition('video', sample_video, tensor_stream, rank=20, n_iter=n_iter, ul=ul, ll=ll, verbose=False) results['video-30'] = online_tensor_decomposition('video', sample_video, tensor_stream, rank=30, n_iter=n_iter, ul=ul, ll=ll, verbose=False) results['video-40'] = online_tensor_decomposition('video', sample_video, tensor_stream, rank=40, n_iter=n_iter, ul=ul, ll=ll, verbose=False) # results['video-50'] = online_tensor_decomposition('video', sample_video, tensor_stream, rank=50, n_iter=n_iter, ul=ul, ll=ll, verbose=False) datasets = ('video-20', 'video-30', 'video-40') plot_global(datasets, 'E1_video', 35) plot_local(datasets, 'E1_video', 35) # stock (n_iter, ul, ll) = (1, 5, 3) tensor_stream = create_tensor_stream(stock2_tensor, start_to_stream=5, batch_sizes=np.full((1028), 3, dtype=int)) results['stock-25'] = online_tensor_decomposition('stock', stock2_tensor, tensor_stream, rank=25, n_iter=n_iter, ul=ul, ll=ll, verbose=False) results['stock-30'] = online_tensor_decomposition('stock', stock2_tensor, tensor_stream, rank=30, n_iter=n_iter, ul=ul, ll=ll, verbose=False) results['stock-35'] = online_tensor_decomposition('stock', stock2_tensor, tensor_stream, rank=35, n_iter=n_iter, ul=ul, ll=ll, verbose=False) datasets = ('stock-20', 'stock-22', 'stock-24') plot_global(datasets, 'E1_stock', 50) plot_local(datasets, 'E1_stock', 45) # hall (n_iter, ul, ll) = (1, 0.5, 0.1) tensor_stream = create_tensor_stream(hall1_tensor, start_to_stream=10, batch_sizes=np.full((19), 10, dtype=int)) # results['hall-10'] = online_tensor_decomposition('hall', hall1_tensor, tensor_stream, rank=15, n_iter=n_iter, ul=ul, ll=ll, verbose=False) # results['hall-15'] = online_tensor_decomposition('hall', hall1_tensor, tensor_stream, rank=20, n_iter=n_iter, ul=ul, ll=ll, verbose=False) # results['hall-20'] = online_tensor_decomposition('hall', hall1_tensor, tensor_stream, rank=20, n_iter=n_iter, ul=ul, ll=ll, verbose=False) # results['hall-25'] = online_tensor_decomposition('hall', hall1_tensor, tensor_stream, rank=25, n_iter=n_iter, ul=ul, ll=ll, verbose=False) results['hall-30'] = online_tensor_decomposition('hall', hall1_tensor, tensor_stream, rank=30, n_iter=n_iter, ul=ul, ll=ll, verbose=False) results['hall-35'] = online_tensor_decomposition('hall', hall1_tensor, tensor_stream, rank=35, n_iter=n_iter, ul=ul, ll=ll, verbose=False) results['hall-40'] = online_tensor_decomposition('hall', hall1_tensor, tensor_stream, rank=40, n_iter=n_iter, ul=ul, ll=ll, verbose=False) #results['hall-45'] = online_tensor_decomposition('hall', hall1_tensor, tensor_stream, rank=45, n_iter=n_iter, ul=ul, ll=ll, verbose=False) datasets = ('hall-30', 'hall-35', 'hall-40') plot_global(datasets, 'E1_hall', 40) plot_local(datasets, 'E1_hall', 40) # korea air-quality (n_iter, ul, ll) = (1, 2, 1.3) tensor_stream = create_tensor_stream(korea_tensor, start_to_stream=79, batch_sizes=np.full((94), 100, dtype=int)) # results['korea-10'] = online_tensor_decomposition('korea', korea_tensor, tensor_stream, rank=10, n_iter=n_iter, ul=ul, ll=ll, verbose=False) # results['korea-15'] = online_tensor_decomposition('korea', korea_tensor, tensor_stream, rank=15, n_iter=n_iter, ul=ul, ll=ll, verbose=False) # results['korea-20'] = online_tensor_decomposition('korea', korea_tensor, tensor_stream, rank=20, n_iter=n_iter, ul=ul, ll=ll, verbose=False) # results['korea-25'] = online_tensor_decomposition('korea', korea_tensor, tensor_stream, rank=25, n_iter=n_iter, ul=ul, ll=ll, verbose=False) results['korea-30'] = online_tensor_decomposition('korea', korea_tensor, tensor_stream, rank=30, n_iter=n_iter, ul=ul, ll=ll, verbose=False) results['korea-40'] = online_tensor_decomposition('korea', korea_tensor, tensor_stream, rank=40, n_iter=n_iter, ul=ul, ll=ll, verbose=False) results['korea-50'] = online_tensor_decomposition('korea', korea_tensor, tensor_stream, rank=50, n_iter=n_iter, ul=ul, ll=ll, verbose=False) datasets = ('korea-30', 'korea-40', 'korea-50') plot_global(datasets, 'E1_korea', 35) plot_local(datasets, 'E1_korea', 35)--- Experiment 2from matplotlib import colors def make_rgb_transparent(color, alpha=0.6, bg_rgb=(1,1,1)): rgb = colors.colorConverter.to_rgb(color) return [alpha * c1 + (1 - alpha) * c2 for (c1, c2) in zip(rgb, bg_rgb)] def plot_mem(datasets, name): colors = ('dodgerblue','mediumseagreen', 'hotpink', '#fba84a') libs = ("dao", "dtd", "ocp", 'fcp') patterns = ( "" , "\\\\\\\\\\" , "////" , "xxxx") markers = ("o", "x", "s", "^", "4") index = np.arange(5) bar_width = 0.2 # create plot fig, ax1 = plt.subplots(figsize = (6, 4), dpi = 150) plt.xticks(index + bar_width*1.5, ('Synthetic', 'Video', 'Stock', 'Hall', 'Korea')) plt.rcParams['hatch.linewidth'] = 0.2 for i, (color, lib) in enumerate(zip(colors, libs)): mem_list = [results[dataset][lib][4] for dataset in datasets] rects1 = ax1.bar(index + bar_width*i, mem_list, bar_width, color=make_rgb_transparent(color, alpha=0.0), label=lib, edgecolor='black', hatch=patterns[i], linewidth=0.5) ax1.set_xlabel('Datasets') ax1.set_ylabel('Memory Usage (byte)') ax1.set_yscale('log') ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis for i, (color, lib) in enumerate(zip(colors, libs)): acc_list = [results[dataset][lib][0] for dataset in datasets] for j, acc in enumerate(acc_list): if j == 4: ax2.scatter(index[j] + bar_width*i, acc, 70, color=colors[i], marker=markers[j], linewidth=2) elif j == 1: ax2.scatter(index[j] + bar_width*i, acc, 50, color=colors[i], marker=markers[j], linewidth=2) else: ax2.scatter(index[j] + bar_width*i, acc, 50, color=colors[i], marker=markers[j], facecolors='none', linewidth=2) ax2.tick_params(axis='y') ax2.set_ylabel('Global Fitness', rotation=270, labelpad=15) fig.tight_layout() # otherwise the right y-label is slightly clipped plt.show() # plt.savefig(f'./plots/{name}_mem.pdf', bbox_inches = 'tight', pad_inches = 0) plot_mem(('synthetic-30', 'video-30', 'stock-20', 'hall-30', 'korea-40'), 'E2') from matplotlib import colors def make_rgb_transparent(color, alpha=0.6, bg_rgb=(1,1,1)): rgb = colors.colorConverter.to_rgb(color) return [alpha * c1 + (1 - alpha) * c2 for (c1, c2) in zip(rgb, bg_rgb)] def plot_mem(datasets, name): colors = ('dodgerblue','mediumseagreen', 'hotpink', '#fba84a') libs = ("dao", "dtd", "ocp", 'fcp') patterns = ( "" , "\\\\\\\\\\" , "////" , "xxxx") markers = ("o", "x", "s", "^", "4") index = np.arange(5) bar_width = 0.2 # create plot fig, ax1 = plt.subplots(figsize = (6, 4), dpi = 150) plt.xticks(index + bar_width*1.5, ('Synthetic', 'Video', 'Stock', 'Hall', 'Korea')) plt.rcParams['hatch.linewidth'] = 0.2 for i, (color, lib) in enumerate(zip(colors, libs)): mem_list = [results[dataset][lib][4] for dataset in datasets] print(mem_list) rects1 = ax1.bar(index + bar_width*i, mem_list, bar_width, color=make_rgb_transparent(color, alpha=0.7), label=lib, edgecolor='black', hatch=patterns[i], linewidth=0.5) ax1.set_xlabel('Datasets') ax1.set_ylabel('Memory Usage (byte)') ax1.set_yscale('log') ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis for i, dataset in enumerate(datasets): acc_list = [results[dataset][lib][0] for lib in libs] ax2.plot(i + bar_width*index[:4], acc_list, marker="o", color='black', zorder=1) for i, (color, lib) in enumerate(zip(colors, libs)): acc_list = [results[dataset][lib][0] for dataset in datasets] ax2.scatter(index + bar_width*i, acc_list, 30, color='black', marker="o", facecolor=colors[i], linewidth=1.3, zorder=2) ax2.tick_params(axis='y') ax2.set_ylabel('Global Fitness', rotation=270, labelpad=13) fig.tight_layout() # otherwise the right y-label is slightly clipped # plt.show() plt.savefig(f'./plots/{name}_mem.svg', bbox_inches = 'tight', pad_inches = 0) print([results[dataset]['fcp'][4]/results[dataset]['dao'][4] for dataset in datasets]) plot_mem(('synthetic-30', 'video-30', 'stock-20', 'hall-30', 'korea-40'), 'E2')[199424, 744488, 557472, 469496, 2024776] [270160, 320800, 541736, 202696, 3244936] [2717056, 94372576, 114616, 892536, 10744216] [24000232, 377856232, 17298616, 20275416, 146962632] [120.34776155327343, 507.53837805310496, 31.030466104127203, 43.18549252815785, 72.5821681015579]--- Experiment 5from online_tensor_decomposition_fcp01 import * results = {} (n_iter, ul, ll) = (1, 6, 2) tensor_stream = create_tensor_stream(sample_video, start_to_stream=5, batch_sizes=np.full((40), 5, dtype=int)) results['video'] = online_tensor_decomposition('video', sample_video, tensor_stream, rank=30, n_iter=n_iter, ul=ul, ll=ll, verbose=False) def plot_E5_error(dataset): markers = ("+", "x", "1", "2") markers = ("o", "x", "s", "^") colors = ('dodgerblue','mediumseagreen', 'hotpink', '#fba84a') libs = ("dao", "dtd", "ocp", "fcp") fig = plt.figure(figsize = (7, 3), dpi = 150,) plt.ylabel('Local Error Norm', fontsize=12) plt.xlabel('# of Stacked Slices', fontsize=12) # ax1.xaxis.set_label_position('top') split_points, refine_points = results[dataset]['dao'][6] for p in refine_points: plt.axvline(p, label='line: {}'.format(p), c='lightgray', linewidth=2, linestyle='--') for p in split_points: plt.axvline(p, label='line: {}'.format(p), c='lightgray', linewidth=2, linestyle='-') for color, marker, lib in zip(colors, markers, libs): verbose_list = results[dataset][lib][5] plt.plot(verbose_list[:,0], verbose_list[:,2], linewidth=1, marker=marker, color=color, markersize=4) plt.savefig('plots/E5_{}_error.pdf'.format(dataset), bbox_inches='tight', pad_inches=0) plot_E5_error('video') def plot_E5_rt(dataset): markers = ("+", "x", "1", "2") markers = ("o", "x", "s", "^") colors = ('dodgerblue','mediumseagreen', 'hotpink', '#fba84a') libs = ("dao", "dtd", "ocp", "fcp") plt.figure(figsize = (7, 3), dpi = 150,) plt.yscale('log') plt.ylabel('Local Running Time (s)', fontsize=12) plt.xlabel('# of Stacked Slices', fontsize=12) # ax1.xaxis.set_label_position('top') split_points, refine_points = results[dataset]['dao'][6] for p in refine_points: plt.axvline(p, label='line: {}'.format(p), c='lightgray', linewidth=2, linestyle='--') for p in split_points: plt.axvline(p, label='line: {}'.format(p), c='lightgray', linewidth=2, linestyle='-') for color, marker, lib in zip(colors, markers, libs): verbose_list = results[dataset][lib][5] plt.plot(verbose_list[:,0], verbose_list[:,1], linewidth=1, marker=marker, color=color, markersize=4) plt.savefig('plots/E5_{}_rt.pdf'.format(dataset), bbox_inches='tight', pad_inches=0) plot_E5_rt('video') from matplotlib import gridspec def plot_E5(dataset): markers = ("x", "1", "2", "+") markers = ("x", "s", "^", "o") colors = ('mediumseagreen', 'hotpink', '#fba84a', 'dodgerblue') libs = ("dtd", "ocp", "fcp", "dao") fig = plt.figure(figsize = (9, 6), dpi = 150) gs = gridspec.GridSpec(2, 1, height_ratios=[1.5, 1]) ax1 = plt.subplot(gs[0]) ax1.set_ylabel('Local Error Norm', fontsize=12) # ax1.set_xlabel('# of Stacked Slices', fontsize=12) # ax1.xaxis.set_label_position('top') split_points, refine_points = results[dataset]['dao'][6] for p in refine_points: ax1.axvline(p, label='line: {}'.format(p), c='lightgray', linewidth=2, linestyle='--') for p in split_points: ax1.axvline(p, label='line: {}'.format(p), c='lightgray', linewidth=2, linestyle='-') for color, marker, lib in zip(colors, markers, libs): verbose_list = results[dataset][lib][5] ax1.plot(verbose_list[:,0], verbose_list[:,2], linewidth=1, marker=marker, color=color, markersize=4) ax2 = plt.subplot(gs[1], sharex = ax1) ax2.set_yscale('log') ax2.set_ylabel('Local Running\nTime (s)', fontsize=12) ax2.set_xlabel('# of Stacked Slices', fontsize=12) # ax1.xaxis.set_label_position('top') split_points, refine_points = results[dataset]['dao'][6] for p in refine_points: ax2.axvline(p, label='line: {}'.format(p), c='lightgray', linewidth=2, linestyle='--') for p in split_points: ax2.axvline(p, label='line: {}'.format(p), c='lightgray', linewidth=2, linestyle='-') for color, marker, lib in zip(colors, markers, libs): verbose_list = results[dataset][lib][5] ax2.plot(verbose_list[:,0], verbose_list[:,1], linewidth=1, marker=marker, color=color, markersize=4) plt.setp(ax1.get_xticklabels(), visible=False) plt.subplots_adjust(hspace=.0) # fig.tight_layout() # otherwise the right y-label is slightly clipped plt.savefig('plots/E5_{}.svg'.format(dataset), bbox_inches='tight', pad_inches=0) plt.show() plot_E5('video')--- Experiment 3tensor_stream = create_tensor_stream(korea_tensor, start_to_stream=79, batch_sizes=np.full((94), 100, dtype=int)) results['korea--'] = online_tensor_decomposition('korea', korea_tensor, tensor_stream, rank=20, methods=['dao'], n_iter=n_iter, ul=-1, ll=-1, verbose=False) # korea air-quality tensor_stream = create_tensor_stream(korea_tensor, start_to_stream=79, batch_sizes=np.full((94), 100, dtype=int)) results['korea-split-1.0'] = online_tensor_decomposition('korea', korea_tensor, tensor_stream, rank=20, methods=['dao'], n_iter=n_iter, ul=1.0, ll=-1, verbose=False) # 65 results['korea-split-1.2'] = online_tensor_decomposition('korea', korea_tensor, tensor_stream, rank=20, methods=['dao'], n_iter=n_iter, ul=1.2, ll=-1, verbose=False) # 57 results['korea-split-1.4'] = online_tensor_decomposition('korea', korea_tensor, tensor_stream, rank=20, methods=['dao'], n_iter=n_iter, ul=1.4, ll=-1, verbose=False) # 45 results['korea-split-1.6'] = online_tensor_decomposition('korea', korea_tensor, tensor_stream, rank=20, methods=['dao'], n_iter=n_iter, ul=1.6, ll=-1, verbose=False) # 36 results['korea-split-1.8'] = online_tensor_decomposition('korea', korea_tensor, tensor_stream, rank=20, methods=['dao'], n_iter=n_iter, ul=1.8, ll=-1, verbose=False) # 5 # korea air-quality tensor_stream = create_tensor_stream(korea_tensor, start_to_stream=79, batch_sizes=np.full((94), 100, dtype=int)) results['korea-refine-1.4'] = online_tensor_decomposition('korea', korea_tensor, tensor_stream, rank=20, methods=['dao'], n_iter=n_iter, ul=-1, ll=1.4, verbose=False) # 15 results['korea-refine-1.6'] = online_tensor_decomposition('korea', korea_tensor, tensor_stream, rank=20, methods=['dao'], n_iter=n_iter, ul=-1, ll=1.6, verbose=False) # 14 results['korea-refine-1.8'] = online_tensor_decomposition('korea', korea_tensor, tensor_stream, rank=20, methods=['dao'], n_iter=n_iter, ul=-1, ll=1.8, verbose=False) # 12 results['korea-refine-2.0'] = online_tensor_decomposition('korea', korea_tensor, tensor_stream, rank=20, methods=['dao'], n_iter=n_iter, ul=-1, ll=2.0, verbose=False) # 8 results['korea-refine-2.2'] = online_tensor_decomposition('korea', korea_tensor, tensor_stream, rank=20, methods=['dao'], n_iter=n_iter, ul=-1, ll=2.2, verbose=False) # 6 for dataset in ('korea-split-1.0', 'korea-split-1.2', 'korea-split-1.4', 'korea-split-1.6', 'korea-split-1.8'): plot_E5(dataset) for dataset in ('korea-refine-1.4', 'korea-refine-1.6', 'korea-refine-1.8', 'korea-refine-2.0', 'korea-refine-2.2'): plot_E5(dataset) rows = [] for dataset in ('korea-split-1.0', 'korea-split-1.2', 'korea-split-1.4', 'korea-split-1.6', 'korea-split-1.8'): row = dataset.split('-') row.append('-') verbose_list = results[dataset]['dao'][5] split_points, refine_points = results[dataset]['dao'][6] row.append(len(split_points)) row += results[dataset]['dao'][:5] rows.append(row) for dataset in ('korea-refine-1.4', 'korea-refine-1.6', 'korea-refine-1.8', 'korea-refine-2.0', 'korea-refine-2.2'): row = dataset.split('-')[:-1] row.append('-') row.append(dataset.split('-')[-1]) verbose_list = results[dataset]['dao'][5] split_points, refine_points = results[dataset]['dao'][6] row.append(len(refine_points)) row += results[dataset]['dao'][:5] rows.append(row) dataset = 'korea--' row = dataset.split('-')[:1] row += ['-'] * 3 verbose_list = results[dataset]['dao'][5] split_points, refine_points = results[dataset]['dao'][6] row.append(len(refine_points)) row += results[dataset]['dao'][:5] rows.append(row) df = pd.DataFrame(rows, columns=['dataset', 'process', 'ul', 'll', 'points-#', 'global-fit', 'local-fit', 'global-rt', 'local-rt', 'mem']) df--- Experiment 4# hall (n_iter, ul, ll) = (1, 0.5, 0.1) tensor_stream = create_tensor_stream(hall1_tensor, start_to_stream=10, batch_sizes=np.full((19), 10, dtype=int)) results['hall'] = online_tensor_decomposition('hall', hall1_tensor, tensor_stream, rank=100, n_iter=n_iter, ul=ul, ll=ll, verbose=False) frame = results['hall']['dao'][6][0][10]*10 for lib in ("dao", "dtd", "ocp", 'fcp'): make_video(results['hall'][lib][-1], f'plots/E4-hall-{lib}.avi', False) imwrite(f'plots/E4-hall-{lib}-{frame}.jpg', results['hall'][lib][-1][frame]) make_video(hall1_tensor, 'plots/E4-hall-org.avi', False) imwrite(f'plots/E4-hall-org-{frame}.jpg', hall1_tensor[frame]) # video (n_iter, ul, ll) = (1, 6, 2) tensor_stream = create_tensor_stream(sample_video, start_to_stream=5, batch_sizes=np.full((40), 5, dtype=int)) results['video'] = online_tensor_decomposition('video', sample_video, tensor_stream, rank=100, n_iter=n_iter, ul=ul, ll=ll, verbose=False) frame = results['video']['dao'][6][0][0]*5 for lib in ("dao", "dtd", "ocp", 'fcp'): make_video(results['video'][lib][-1], f'plots/E4-video-{lib}.avi') imwrite(f'plots/E4-video-{lib}-{frame}.jpg', results['video'][lib][-1][frame]) make_video(sample_video, 'plots/E4-video-org.avi') imwrite(f'plots/E4-video-org-{frame}.jpg', sample_video[frame]) for frame in range(205): imwrite(f'plots/video/org-{frame}.jpg', sample_video[frame])Mnist classification pipeline using SagemakerThe `mnist-classification-pipeline.py` sample runs a pipeline to train a classficiation model using Kmeans with MNIST dataset on Sagemaker. We will have all required steps here and for other details like how to get source data, please check [documentation](https://github.com/kubeflow/pipelines/tree/master/samples/contrib/aws-samples/mnist-kmeans-sagemaker).This sample is based on the [Train a Model with a Built-in Algorithm and Deploy it](https://docs.aws.amazon.com/sagemaker/latest/dg/ex1.html).The sample trains and deploy a model based on the [MNIST dataset](http://www.deeplearning.net/tutorial/gettingstarted.html). Prerequisite 1. Create an S3 bucket to store pipeline data> Note: Be sure to change the HASH variable to random hash and change AWS_REGION before running next cell> Note: you use us-east-1, please use command `!aws s3 mb s3://$S3_BUCKET --region $AWS_REGION --endpoint-url https://s3.us-east-1.amazonaws.com`import random, string HASH = ''.join([random.choice(string.ascii_lowercase) for n in range(16)] + [random.choice(string.digits) for n in range(16)]) AWS_REGION = 'us-east-2' S3_BUCKET = '{}-kubeflow-pipeline-data'.format(HASH) !aws s3 mb s3://$S3_BUCKET --region $AWS_REGION !pip install sagemaker2. Copy dataset> Download and upload `data` and `valid_data.csv` into your S3 bucket.import pickle, gzip, numpy, urllib.request, json from urllib.parse import urlparse # Load the dataset urllib.request.urlretrieve("http://deeplearning.net/data/mnist/mnist.pkl.gz", "mnist.pkl.gz") with gzip.open('mnist.pkl.gz', 'rb') as f: train_set, valid_set, test_set = pickle.load(f, encoding='latin1') # Upload dataset to S3 from sagemaker.amazon.common import write_numpy_to_dense_tensor import io import boto3 ################################################################### # This is the only thing that you need to change to run this code # Give the name of your S3 bucket bucket = S3_BUCKET # If you are gonna use the default values of the pipeline then # give a bucket name which is in us-west-2 region ################################################################### train_data_key = 'mnist_kmeans_example/train_data' test_data_key = 'mnist_kmeans_example/test_data' train_data_location = 's3://{}/{}'.format(bucket, train_data_key) test_data_location = 's3://{}/{}'.format(bucket, test_data_key) print('training data will be uploaded to: {}'.format(train_data_location)) print('training data will be uploaded to: {}'.format(test_data_location)) # Convert the training data into the format required by the SageMaker KMeans algorithm buf = io.BytesIO() write_numpy_to_dense_tensor(buf, train_set[0], train_set[1]) buf.seek(0) boto3.resource('s3').Bucket(bucket).Object(train_data_key).upload_fileobj(buf) # Convert the test data into the format required by the SageMaker KMeans algorithm write_numpy_to_dense_tensor(buf, test_set[0], test_set[1]) buf.seek(0) boto3.resource('s3').Bucket(bucket).Object(test_data_key).upload_fileobj(buf) # Convert the valid data into the format required by the SageMaker KMeans algorithm numpy.savetxt('valid-data.csv', valid_set[0], delimiter=',', fmt='%g') s3_client = boto3.client('s3') input_key = "{}/valid_data.csv".format("mnist_kmeans_example/input") s3_client.upload_file('valid-data.csv', bucket, input_key)4. Install Kubeflow Pipelines SDK> You can skip this step if its already installed. You can validate if you have SDK installed by running `!pip show kfp`. The notebook has been tested for kfp v0.1.29 release!pip install https://storage.googleapis.com/ml-pipeline/release/0.1.29/kfp.tar.gz --upgrade !pip show kfpBuild pipeline 1. Run the following command to load Kubeflow Pipelines SDKimport kfp from kfp import components from kfp import dsl from kfp.aws import use_aws_secret2. Load reusable sagemaker components.sagemaker_train_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/master/components/aws/sagemaker/train/component.yaml') sagemaker_model_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/master/components/aws/sagemaker/model/component.yaml') sagemaker_deploy_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/master/components/aws/sagemaker/deploy/component.yaml') sagemaker_batch_transform_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/master/components/aws/sagemaker/batch_transform/component.yaml') sagemaker_hpo_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/master/components/aws/sagemaker/hyperparameter_tuning/component.yaml')3. Create pipeline. We will create Hyperparamater tuning job following by a training job first. Once training job is done, it will persist trained model to S3. Then a job will be kicked off to create a `Model` manifest in Sagemaker. With this model, batch transformation job can use it to predict on other datasets, prediction service can create an endpoint using it.> Note: remember to use pass your **role_arn** to successfully run the job.> Note: If you use a different region, please replace `us-west-2` with your region. > Note: ECR Images for k-means algorithm|Region| ECR Image||------|----------||us-west-1|632365934929.dkr.ecr.us-west-1.amazonaws.com||us-west-2|174872318107.dkr.ecr.us-west-2.amazonaws.com||us-east-1|382416733822.dkr.ecr.us-east-1.amazonaws.com||us-east-2|404615174143.dkr.ecr.us-east-2.amazonaws.com||us-gov-west-1|226302683700.dkr.ecr.us-gov-west-1.amazonaws.com||ap-east-1|286214385809.dkr.ecr.ap-east-1.amazonaws.com||ap-northeast-1|351501993468.dkr.ecr.ap-northeast-1.amazonaws.com||ap-northeast-2|835164637446.dkr.ecr.ap-northeast-2.amazonaws.com||ap-south-1|991648021394.dkr.ecr.ap-south-1.amazonaws.com||ap-southeast-1|475088953585.dkr.ecr.ap-southeast-1.amazonaws.com||ap-southeast-2|712309505854.dkr.ecr.ap-southeast-2.amazonaws.com||ca-central-1|469771592824.dkr.ecr.ca-central-1.amazonaws.com||eu-central-1|664544806723.dkr.ecr.eu-central-1.amazonaws.com||eu-north-1|669576153137.dkr.ecr.eu-north-1.amazonaws.com||eu-west-1|438346466558.dkr.ecr.eu-west-1.amazonaws.com||eu-west-2|644912444149.dkr.ecr.eu-west-2.amazonaws.com||eu-west-3|749696950732.dkr.ecr.eu-west-3.amazonaws.com||me-south-1|249704162688.dkr.ecr.me-south-1.amazonaws.com||sa-east-1|855470959533.dkr.ecr.sa-east-1.amazonaws.com|# Configure your s3 bucket. S3_BUCKET = '{}-kubeflow-pipeline-data'.format(HASH) S3_PIPELINE_PATH='s3://{}/mnist_kmeans_example'.format(S3_BUCKET) # Configure your Sagemaker execution role. SAGEMAKER_ROLE_ARN='' @dsl.pipeline( name='MNIST Classification pipeline', description='MNIST Classification using KMEANS in SageMaker' ) def mnist_classification(region='us-east-2', image='404615174143.dkr.ecr.us-east-2.amazonaws.com/kmeans:1', training_input_mode='File', hpo_strategy='Bayesian', hpo_metric_name='test:msd', hpo_metric_type='Minimize', hpo_early_stopping_type='Off', hpo_static_parameters='{"k": "10", "feature_dim": "784"}', hpo_integer_parameters='[{"Name": "mini_batch_size", "MinValue": "500", "MaxValue": "600"}, {"Name": "extra_center_factor", "MinValue": "10", "MaxValue": "20"}]', hpo_continuous_parameters='[]', hpo_categorical_parameters='[{"Name": "init_method", "Values": ["random", "kmeans++"]}]', hpo_channels='[{"ChannelName": "train", \ "DataSource": { \ "S3DataSource": { \ "S3Uri": "' + S3_PIPELINE_PATH + '/train_data", \ "S3DataType": "S3Prefix", \ "S3DataDistributionType": "FullyReplicated" \ } \ }, \ "ContentType": "", \ "CompressionType": "None", \ "RecordWrapperType": "None", \ "InputMode": "File"}, \ {"ChannelName": "test", \ "DataSource": { \ "S3DataSource": { \ "S3Uri": "' + S3_PIPELINE_PATH + '/test_data", \ "S3DataType": "S3Prefix", \ "S3DataDistributionType": "FullyReplicated" \ } \ }, \ "ContentType": "", \ "CompressionType": "None", \ "RecordWrapperType": "None", \ "InputMode": "File"}]', hpo_spot_instance='False', hpo_max_wait_time='3600', hpo_checkpoint_config='{}', output_location=S3_PIPELINE_PATH + '/output', output_encryption_key='', instance_type='ml.p3.2xlarge', instance_count='1', volume_size='50', hpo_max_num_jobs='9', hpo_max_parallel_jobs='2', max_run_time='3600', endpoint_url='', network_isolation='True', traffic_encryption='False', train_channels='[{"ChannelName": "train", \ "DataSource": { \ "S3DataSource": { \ "S3Uri": "' + S3_PIPELINE_PATH + '/train_data", \ "S3DataType": "S3Prefix", \ "S3DataDistributionType": "FullyReplicated" \ } \ }, \ "ContentType": "", \ "CompressionType": "None", \ "RecordWrapperType": "None", \ "InputMode": "File"}]', train_spot_instance='False', train_max_wait_time='3600', train_checkpoint_config='{}', batch_transform_instance_type='ml.m4.xlarge', batch_transform_input=S3_PIPELINE_PATH + '/input', batch_transform_data_type='S3Prefix', batch_transform_content_type='text/csv', batch_transform_compression_type='None', batch_transform_ouput=S3_PIPELINE_PATH + '/output', batch_transform_max_concurrent='4', batch_transform_max_payload='6', batch_strategy='MultiRecord', batch_transform_split_type='Line', role_arn=SAGEMAKER_ROLE_ARN ): hpo = sagemaker_hpo_op( region=region, endpoint_url=endpoint_url, image=image, training_input_mode=training_input_mode, strategy=hpo_strategy, metric_name=hpo_metric_name, metric_type=hpo_metric_type, early_stopping_type=hpo_early_stopping_type, static_parameters=hpo_static_parameters, integer_parameters=hpo_integer_parameters, continuous_parameters=hpo_continuous_parameters, categorical_parameters=hpo_categorical_parameters, channels=hpo_channels, output_location=output_location, output_encryption_key=output_encryption_key, instance_type=instance_type, instance_count=instance_count, volume_size=volume_size, max_num_jobs=hpo_max_num_jobs, max_parallel_jobs=hpo_max_parallel_jobs, max_run_time=max_run_time, network_isolation=network_isolation, traffic_encryption=traffic_encryption, spot_instance=hpo_spot_instance, max_wait_time=hpo_max_wait_time, checkpoint_config=hpo_checkpoint_config, role=role_arn, ) #.apply(use_aws_secret('aws-secret', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY')) training = sagemaker_train_op( region=region, endpoint_url=endpoint_url, image=image, training_input_mode=training_input_mode, hyperparameters=hpo.outputs['best_hyperparameters'], channels=train_channels, instance_type=instance_type, instance_count=instance_count, volume_size=volume_size, max_run_time=max_run_time, model_artifact_path=output_location, output_encryption_key=output_encryption_key, network_isolation=network_isolation, traffic_encryption=traffic_encryption, spot_instance=train_spot_instance, max_wait_time=train_max_wait_time, checkpoint_config=train_checkpoint_config, role=role_arn, ) #.apply(use_aws_secret('aws-secret', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY')) create_model = sagemaker_model_op( region=region, endpoint_url=endpoint_url, model_name=training.outputs['job_name'], image=training.outputs['training_image'], model_artifact_url=training.outputs['model_artifact_url'], network_isolation=network_isolation, role=role_arn ) #.apply(use_aws_secret('aws-secret', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY')) prediction = sagemaker_deploy_op( region=region, endpoint_url=endpoint_url, model_name_1=create_model.output, ) #.apply(use_aws_secret('aws-secret', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY')) batch_transform = sagemaker_batch_transform_op( region=region, endpoint_url=endpoint_url, model_name=create_model.output, instance_type=batch_transform_instance_type, instance_count=instance_count, max_concurrent=batch_transform_max_concurrent, max_payload=batch_transform_max_payload, batch_strategy=batch_strategy, input_location=batch_transform_input, data_type=batch_transform_data_type, content_type=batch_transform_content_type, split_type=batch_transform_split_type, compression_type=batch_transform_compression_type, output_location=batch_transform_ouput ) #.apply(use_aws_secret('aws-secret', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY'))4. Compile your pipelinekfp.compiler.Compiler().compile(mnist_classification, 'mnist-classification-pipeline.zip')5. Deploy your pipelineclient = kfp.Client() aws_experiment = client.create_experiment(name='aws') my_run = client.run_pipeline(aws_experiment.id, 'mnist-classification-pipeline', 'mnist-classification-pipeline.zip')PredictionOpen Sagemaker console and find your endpoint name. Please check dataset section to get train_set.Once your pipeline is done, you can find sagemaker endpoint name and replace `ENDPOINT_NAME` value with your newly created endpoint name. > Note: make sure to attach `sagemaker:InvokeEndpoint` to the worker node nodegroup that is running this jupyter notebook.```json{ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "sagemaker:InvokeEndpoint" ], "Resource": "*" } ]}```!pip install boto3 --userFind your Endpoint name in AWS ConsoleOpen AWS console and enter Sagemaker service, find the endpoint name as the following picture shows.![download-pipeline](./images/sm-endpoint.jpg)import pickle, gzip, numpy, urllib.request, json from urllib.parse import urlparse import json import io import boto3 # Replace the endpoint name with yours. ENDPOINT_NAME='Endpoint-20190916223205-Y635' # Load the dataset urllib.request.urlretrieve("http://deeplearning.net/data/mnist/mnist.pkl.gz", "mnist.pkl.gz") with gzip.open('mnist.pkl.gz', 'rb') as f: train_set, valid_set, test_set = pickle.load(f, encoding='latin1') # Simple function to create a csv from our numpy array def np2csv(arr): csv = io.BytesIO() numpy.savetxt(csv, arr, delimiter=',', fmt='%g') return csv.getvalue().decode().rstrip() runtime = boto3.Session(region_name='us-east-2').client('sagemaker-runtime') payload = np2csv(train_set[0][30:31]) response = runtime.invoke_endpoint(EndpointName=ENDPOINT_NAME, ContentType='text/csv', Body=payload) result = json.loads(response['Body'].read().decode()) print(result)Clean upGo to Sagemaker console and delete `endpoint`, `model`. Clean up S3 bucketDelete S3 bucket that was created for this exercise!aws s3 rb s3://$S3_BUCKET --force需要用到的库requests库用于请求htmlBeautfiSoup解析htmlfrom bs4 import BeautifulSoup import requests import re函数定义用requests库得到html文档用BeautifulSoup解析html文档,并得到 类为 ‘bs4.element.ResultSet' 的对象结果def get_html(url): response = requests.get(url) html = response.text return html def parse_html(html): soup = BeautifulSoup(html, 'lxml') picUrls = soup.find_all('a','wrap-img') return picUrls确定爬取页面这里只爬取一个页面的内容我选择 **简书首页** 作为爬取的目标网页爬取首页的图片#爬虫入口(单个页面爬取入不入口其实无所谓) url = 'https://www.jianshu.com/' html = get_html(url) imgs = parse_html(html) #用来存结果的list urls = []得到图片链接 **tag**,**ResultSet** 是 BeautifulSoup 中的对象,详细可见[官方文档](https://www.crummy.com/software/BeautifulSoup/bs4/doc/index.zh.htmlfind-all)for tag in imgs: #得到ResultSet中的tag img = tag.find_all('img') #得到img标签 for src in img: #得到src中,图片的url url = src['src'] urls.append('https:'+url)以下是一些urlurls[:3]存储图片import os os.makedirs('./img/', exist_ok=True) #用 os 模块新建一个img文件夹,如果存在的话就直接使用已经存在的文件夹 for i in range(len(urls)): r = requests.get(urls[i]) #用requests库得到图片 description = 'picture-{}'.format(i)+'.jpg' with open('./img/%s'%description,'wb') as f: f.write(r.content)Temporal-Difference MethodsIn this notebook, you will write your own implementations of many Temporal-Difference (TD) methods.While we have provided some starter code, you are welcome to erase these hints and write your code from scratch.--- Part 0: Explore CliffWalkingEnvWe begin by importing the necessary packages.import sys import gym import numpy as np from collections import defaultdict, deque import matplotlib.pyplot as plt %matplotlib inline import check_test from plot_utils import plot_valuesUse the code cell below to create an instance of the [CliffWalking](https://github.com/openai/gym/blob/master/gym/envs/toy_text/cliffwalking.py) environment.env = gym.make('CliffWalking-v0')The agent moves through a $4\times 12$ gridworld, with states numbered as follows:```[[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23], [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35], [36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47]]```At the start of any episode, state `36` is the initial state. State `47` is the only terminal state, and the cliff corresponds to states `37` through `46`.The agent has 4 potential actions:```UP = 0RIGHT = 1DOWN = 2LEFT = 3```Thus, $\mathcal{S}^+=\{0, 1, \ldots, 47\}$, and $\mathcal{A} =\{0, 1, 2, 3\}$. Verify this by running the code cell below.print(env.action_space) print(env.observation_space)Discrete(4) Discrete(48)In this mini-project, we will build towards finding the optimal policy for the CliffWalking environment. The optimal state-value function is visualized below. Please take the time now to make sure that you understand _why_ this is the optimal state-value function._**Note**: You can safely ignore the values of the cliff "states" as these are not true states from which the agent can make decisions. For the cliff "states", the state-value function is not well-defined._# define the optimal state-value function V_opt = np.zeros((4,12)) V_opt[0:13][0] = -np.arange(3, 15)[::-1] V_opt[0:13][1] = -np.arange(3, 15)[::-1] + 1 V_opt[0:13][2] = -np.arange(3, 15)[::-1] + 2 V_opt[3][0] = -13 plot_values(V_opt)Part 1: TD Control: SarsaIn this section, you will write your own implementation of the Sarsa control algorithm.Your algorithm has four arguments:- `env`: This is an instance of an OpenAI Gym environment.- `num_episodes`: This is the number of episodes that are generated through agent-environment interaction.- `alpha`: This is the step-size parameter for the update step.- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).The algorithm returns as output:- `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`.Please complete the function in the code cell below.(_Feel free to define additional functions to help you to organize your code._)import random def sarsa(env, num_episodes, alpha, gamma=1.0): # initialize action-value function (empty dictionary of arrays) Q = defaultdict(lambda: np.zeros(env.nA)) # initialize performance monitor # loop over episodes for i_episode in range(1, num_episodes+1): epsilon = max(0.2, 1.0*(1-(i_episode/num_episodes))) # monitor progress if i_episode % 100 == 0: print("\rEpisode {}/{}, epsilon: {}".format(i_episode, num_episodes, epsilon), end="") sys.stdout.flush() ## TODO: complete the function' state = env.reset() done = False while not done: #calculate an action rand = random.uniform(0, 1) if rand < epsilon: action_0 = np.random.choice(np.arange(env.nA)) else: action_0 = np.argmax(Q[state]) state_0, reward_0, done, info = env.step(action_0) if done: break #print("next_state: {}, reward: {}, done: {}, info: {}".format(next_state, reward, done, info)) #take the next action (use greedy) action_1 = np.argmax(Q[state_0]) state_1, reward_1, done, info = env.step(action_1) #update the Q value of the previous(current) state current_state_action_reward = Q[state][action_0] Q[state][action_0] = \ (1-alpha)*current_state_action_reward + alpha*(reward_0 + reward_1) state = state_1 return Q Q = sarsa(env, 1000, .04) print("") keys = sorted(Q.keys()) for key in keys: print("state: {}, rewards: {}".format(key, Q[key])) # UP = 0 # RIGHT = 1 # DOWN = 2 # LEFT = 3Use the next code cell to visualize the **_estimated_** optimal policy and the corresponding state-value function. If the code cell returns **PASSED**, then you have implemented the function correctly! Feel free to change the `num_episodes` and `alpha` parameters that are supplied to the function. However, if you'd like to ensure the accuracy of the unit test, please do not change the value of `gamma` from the default.# obtain the estimated optimal policy and corresponding action-value function Q_sarsa = sarsa(env, 5000, .01) # print the estimated optimal policy policy_sarsa = np.array([np.argmax(Q_sarsa[key]) if key in Q_sarsa else -1 for key in np.arange(48)]).reshape(4,12) check_test.run_check('td_control_check', policy_sarsa) print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):") print(policy_sarsa) # plot the estimated optimal state-value function V_sarsa = ([np.max(Q_sarsa[key]) if key in Q_sarsa else 0 for key in np.arange(48)]) plot_values(V_sarsa)Episode 5000/5000, epsilon: 0.21999999999999997Part 2: TD Control: Q-learningIn this section, you will write your own implementation of the Q-learning control algorithm.Your algorithm has four arguments:- `env`: This is an instance of an OpenAI Gym environment.- `num_episodes`: This is the number of episodes that are generated through agent-environment interaction.- `alpha`: This is the step-size parameter for the update step.- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).The algorithm returns as output:- `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`.Please complete the function in the code cell below.(_Feel free to define additional functions to help you to organize your code._)import random def q_learning(env, num_episodes, alpha, gamma=1.0): # initialize action-value function (empty dictionary of arrays) Q = defaultdict(lambda: np.zeros(env.nA)) # initialize performance monitor # loop over episodes for i_episode in range(1, num_episodes+1): epsilon = max(0.2, 1.0*(1-(i_episode/num_episodes))) # monitor progress if i_episode % 100 == 0: print("\rEpisode {}/{}, epsilon: {}".format(i_episode, num_episodes, epsilon), end="") sys.stdout.flush() ## TODO: complete the function' state = env.reset() done = False while not done: #calculate an action rand = random.uniform(0, 1) if rand < epsilon: action = np.random.choice(np.arange(env.nA)) else: action = np.argmax(Q[state]) next_state, reward, done, info = env.step(action) #print("next_state: {}, reward: {}, done: {}, info: {}".format(next_state, reward, done, info)) #get best reward possible from next state next_state_max_reward_action = np.argmax(Q[next_state]) next_state_max_reward = Q[next_state][next_state_max_reward_action] #update the Q value of the previous(current) state current_state_action_reward = Q[state][action] Q[state][action] = \ (1-alpha)*current_state_action_reward + alpha*(reward + next_state_max_reward) state = next_state return Q Q = q_learning(env, 10000, .04) print("") keys = sorted(Q.keys()) for key in keys: print("state: {}, rewards: {}".format(key, Q[key])) # UP = 0 # RIGHT = 1 # DOWN = 2 # LEFT = 3Episode 10000/10000, epsilon: 0.2999999999999996 state: 0, rewards: [-15. -14. -14. -15.] state: 1, rewards: [-14. -13. -13. -15.] state: 2, rewards: [-13. -12. -12. -14.] state: 3, rewards: [-12. -11. -11. -13.] state: 4, rewards: [-11. -10. -10. -12.] state: 5, rewards: [-10. -9. -9. -11.] state: 6, rewards: [ -9. -8. -8. -10.] state: 7, rewards: [-8. -7. -7. -9.] state: 8, rewards: [-7. -6. -6. -8.] state: 9, rewards: [-6. -5. -5. -7.] state: 10, rewards: [-5. -4. -4. -6.] state: 11, rewards: [-4. -4. -3. -5.] state: 12, rewards: [-15. -13. -13. -14.] state: 13, rewards: [-14. -12. -12. -14.] state: 14, rewards: [-13. -11. -11. -13.] state: 15, rewards: [-12. -10. -10. -12.] state: 16, rewards: [-11. -9. -9. -11.] state: 17, rewards: [-10. -8. -8. -10.] state: 18, rewards: [-9. -7. -7. -9.] state: 19, rewards: [-8. -6. -6. -8.] state: 20, rewards: [-7. -5. -5. -7.] state: 21, rewards: [-6. -4. -4. -6.] state: 22, rewards: [-5. -3. -3. -5.] state: 23, rewards: [-4. -3. -2. -4[...]Use the next code cell to visualize the **_estimated_** optimal policy and the corresponding state-value function. If the code cell returns **PASSED**, then you have implemented the function correctly! Feel free to change the `num_episodes` and `alpha` parameters that are supplied to the function. However, if you'd like to ensure the accuracy of the unit test, please do not change the value of `gamma` from the default.# obtain the estimated optimal policy and corresponding action-value function Q_sarsamax = q_learning(env, 5000, .02) # print the estimated optimal policy policy_sarsamax = np.array([np.argmax(Q_sarsamax[key]) if key in Q_sarsamax else -1 for key in np.arange(48)]).reshape((4,12)) check_test.run_check('td_control_check', policy_sarsamax) print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):") print(policy_sarsamax) # plot the estimated optimal state-value function plot_values([np.max(Q_sarsamax[key]) if key in Q_sarsamax else 0 for key in np.arange(48)])Episode 5000/5000, epsilon: 0.21999999999999997Part 3: TD Control: Expected SarsaIn this section, you will write your own implementation of the Expected Sarsa control algorithm.Your algorithm has four arguments:- `env`: This is an instance of an OpenAI Gym environment.- `num_episodes`: This is the number of episodes that are generated through agent-environment interaction.- `alpha`: This is the step-size parameter for the update step.- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).The algorithm returns as output:- `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`.Please complete the function in the code cell below.(_Feel free to define additional functions to help you to organize your code._)import random def expected_sarsa(env, num_episodes, alpha, gamma=1.0): # initialize action-value function (empty dictionary of arrays) Q = defaultdict(lambda: np.zeros(env.nA)) # initialize performance monitor # loop over episodes for i_episode in range(1, num_episodes+1): epsilon = max(0.0, 1.0*(1.0-(i_episode/num_episodes))) # monitor progress if i_episode % 10 == 0: print("\rEpisode {}/{}, epsilon: {}".format(i_episode, num_episodes, epsilon), end="") sys.stdout.flush() ## TODO: complete the function' state = env.reset() done = False while not done: #calculate an action rand = random.uniform(0, 1) if rand < epsilon: action = np.random.choice(np.arange(env.nA)) else: action = np.argmax(Q[state]) next_state, reward, done, info = env.step(action) #print("next_state: {}, reward: {}, done: {}, info: {}".format(next_state, reward, done, info)) #get best reward possible from next state # next_state_max_reward_action = np.argmax(Q[next_state]) # next_state_max_reward = Q[next_state][next_state_max_reward_action] #generate next_state_max_reward based upon probabilities of next actions next_state_max_index = np.argmax(Q[next_state]) next_state_weighted_reward = 0 # rand = random.uniform(0, 1) # if rand < epsilon: # # for next_action_index in range(env.nA): # # next_state_weighted_reward += (Q[next_state][next_action_index] / env.nA)' # rand_action = np.random.choice(np.arange(env.nA)) # next_state_weighted_reward = Q[next_state][rand_action] # else: # next_state_weighted_reward = Q[next_state][next_state_max_index] rand_action = np.random.choice(np.arange(env.nA)) next_state_weighted_reward += epsilon * Q[next_state][rand_action] / env.nA next_state_weighted_reward += (1.0-epsilon) * Q[next_state][next_state_max_index] #next_state_weighted_reward = Q[next_state][next_state_max_index] #print("{} - {}".format(next_state_weighted_reward, Q[next_state])) #update the Q value of the previous(current) state current_state_action_reward = Q[state][action] Q[state][action] = \ (1-alpha)*current_state_action_reward + alpha*(reward + next_state_weighted_reward) state = next_state return Q Q = expected_sarsa(env, 10000, .04) print() keys = sorted(Q.keys()) for key in keys: print("state: {}, rewards: {}".format(key, Q[key])) # UP = 0 # RIGHT = 1 # DOWN = 2 # LEFT = 3Episode 10000/10000, epsilon: 0.010000000000000009 state: 0, rewards: [-11.19216464 -11.21070364 -11.22118442 -11.20211127] state: 1, rewards: [-10.75063005 -10.74129132 -10.78543601 -10.77020423] state: 2, rewards: [-10.12613844 -10.13409404 -10.15896506 -10.16337335] state: 3, rewards: [-9.4465631 -9.44784059 -9.47097314 -9.46094954] state: 4, rewards: [-8.72227666 -8.71103811 -8.73772988 -8.74104803] state: 5, rewards: [-7.93633347 -7.94113959 -7.96347065 -7.97060325] state: 6, rewards: [-7.16323951 -7.1529291 -7.15037649 -7.15992603] state: 7, rewards: [-6.33936928 -6.3427328 -6.34180433 -6.33760283] state: 8, rewards: [-5.51419137 -5.50969735 -5.50605364 -5.55132544] state: 9, rewards: [-4.6560232 -4.66054869 -4.66571893 -4.70245499] state: 10, rewards: [-3.80928165 -3.80439299 -3.80591884 -4.09354459] state: 11, rewards: [-3.1493888 -3.15461852 -2.93744656 -3.52809045] state: 12, rewards: [-11.64306492 -11.65645568 -11.65352925 -11.66540771] state: 13, rewards: [-11.0361589 [...]Use the next code cell to visualize the **_estimated_** optimal policy and the corresponding state-value function. If the code cell returns **PASSED**, then you have implemented the function correctly! Feel free to change the `num_episodes` and `alpha` parameters that are supplied to the function. However, if you'd like to ensure the accuracy of the unit test, please do not change the value of `gamma` from the default.# obtain the estimated optimal policy and corresponding action-value function Q_expsarsa = expected_sarsa(env, 10000, 1) # print the estimated optimal policy policy_expsarsa = np.array([np.argmax(Q_expsarsa[key]) if key in Q_expsarsa else -1 for key in np.arange(48)]).reshape(4,12) check_test.run_check('td_control_check', policy_expsarsa) print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):") print(policy_expsarsa) # plot the estimated optimal state-value function plot_values([np.max(Q_expsarsa[key]) if key in Q_expsarsa else 0 for key in np.arange(48)])Episode 10000/10000, epsilon: 0.0100000000000000091: [Diversity](diversity) 2: [Fairness](fairness) 3: [Consistency](consistency)import pandas as pd import numpy as np import locale locale.setlocale(locale.LC_ALL, 'en_US') from collections import Counter import matplotlib.pyplot as plt import string import random df = pd.read_csv('Data.csv') #Get total views videoKeys = list(df.columns) videoKeys.remove("Unnamed: 0") df['sumVideoViews'] = df[videoKeys].sum(axis=1) #Extract Country,Age and Gender df['country'] = df['Unnamed: 0'].str.split("_",expand=True)[0] df['age'] = df['Unnamed: 0'].str.split("_",expand=True)[1] df['gender'] = df['Unnamed: 0'].str.split("_",expand=True)[2] df = df.drop('Unnamed: 0',axis=1) print("Total Views",df[videoKeys].sum(axis=0).sum()) df.loc[df.age=="65-",'age'] = "65+" #Minor Fix df.groupby("age")["sumVideoViews"].sum().sort_values(ascending=False) #Top 10 countries by view df.groupby("country")["sumVideoViews"].sum().sort_values(ascending=False)[:10] dralgos = ["Clustering","PCA","NMF","LDA","SE","UMAP"] def fixAge(temp): #Change Age Format for i in range(temp.iloc[1,1:].shape[0]): if int(temp.iloc[1,1+i]) <= 17: temp.iloc[1,1+i] = "13-17" elif int(temp.iloc[1,1+i]) > 17 and int(temp.iloc[1,1+i]) <= 24: temp.iloc[1,1+i] = "18-24" elif int(temp.iloc[1,1+i]) > 24 and int(temp.iloc[1,1+i]) <= 34: temp.iloc[1,1+i] = "25-34" elif int(temp.iloc[1,1+i]) > 34 and int(temp.iloc[1,1+i]) <= 44: temp.iloc[1,1+i] = "35-44" elif int(temp.iloc[1,1+i]) > 44 and int(temp.iloc[1,1+i]) <= 54: temp.iloc[1,1+i] = "45-54" elif int(temp.iloc[1,1+i]) > 54 and int(temp.iloc[1,1+i]) <= 64: temp.iloc[1,1+i] = "55-64" elif int(temp.iloc[1,1+i]) > 64: temp.iloc[1,1+i] = "65+" return temp df["country"].nunique() ccdf = pd.read_csv("CountryCode.csv") #Read country code DF for i in range(df.shape[0]): #Change Country codes to full country names if df["country"].iloc[i] == "NA": #Pandas converted "NA" to "null", when we read the above csv file. So just fixing that. df["country"].iloc[i] = "Namibia" else: df["country"].iloc[i] = ccdf.loc[ccdf["Code"]==df["country"].iloc[i],'Name'].iloc[0] df["country"].nunique() totalViews = df["sumVideoViews"].sum() #Append all personas in a list dfs = [] for dralgo in dralgos: for i in range(3): temp = pd.read_excel("personas/"+dralgo+".xlsx",sheet_name=2-i) temp = fixAge(temp) dfs.append([temp,temp.shape[1]-1,dralgo]) #Create a CSV file to store the results outputDF = pd.DataFrame(columns=["Dimensionality Reduction Algorithm","Number of Personas","GroupName","Subject","Percentage in Original Data","Unique Values in Original Data","Count in Persona Set","Percentage in Persona set","Total Unique Values in Persona Set","Statistical Parity"])Diversity#Create a CSV file to store the results tempdf = pd.DataFrame(columns=["Dimensionality Reduction Algorithm","Number of Personas","GroupName","Unique Values in Original Data","Total Unique Values in Persona Set"]) for cdf in dfs: tempdf = tempdf.append(pd.DataFrame([[cdf[2],cdf[1],"Gender",df["gender"].nunique(),cdf[0].iloc[2,1:].nunique()]], columns=tempdf.columns)) for cdf in dfs: tempdf = tempdf.append(pd.DataFrame([[cdf[2],cdf[1],"Age",df["age"].nunique(),cdf[0].iloc[1,1:].nunique()]], columns=tempdf.columns)) for cdf in dfs: tempdf = tempdf.append(pd.DataFrame([[cdf[2],cdf[1],"Country",df["country"].nunique(),cdf[0].iloc[3,1:].nunique()]], columns=tempdf.columns)) diversitydf = tempdf.groupby(["Dimensionality Reduction Algorithm","Number of Personas","GroupName"], as_index=False)["Total Unique Values in Persona Set"].first() diversitydf["persona_group"] = "coverage"+"_"+diversitydf["GroupName"] + "_" + diversitydf["Number of Personas"].astype(str) diversitydf = diversitydf.drop(["Number of Personas","GroupName"],axis=1) diversitydf = diversitydf.pivot(index='persona_group', columns='Dimensionality Reduction Algorithm', values='Total Unique Values in Persona Set') diversitydf = diversitydf.reset_index().rename_axis(None, axis=1) diversitydf uniqueValues = tempdf.groupby(["GroupName"])["Unique Values in Original Data"].first() for group in ["Age","Country","Gender"]: for numPersonas in [5,10,15]: persona_group = "coverage_"+group+"_"+str(numPersonas) diversitydf = diversitydf.append(pd.DataFrame([[persona_group+"_percentage","","","","","",""]], columns=diversitydf.columns)) for algorithm in diversitydf.columns[1:]: uniqueNumInPersona = int(diversitydf.loc[diversitydf["persona_group"]==persona_group,algorithm].iloc[0]) uniqueNumInData = uniqueValues[group] diversitydf[algorithm].iloc[diversitydf.shape[0]-1] = uniqueNumInPersona/uniqueNumInData*100 TotalUniqueValues = df["country"].nunique()+df["age"].nunique()+df["gender"].nunique() TotalUniqueValues for numPersona in [5,10,15]: total = np.zeros((len(dralgos))) for group in ["Country","Age","Gender"]: personaGroup = "coverage_"+group+"_"+str(numPersona) total = diversitydf.loc[diversitydf["persona_group"]==personaGroup].iloc[0,1:].values + total diversitydf = diversitydf.append(pd.DataFrame([["coverage_All_"+str(numPersona)]+total.tolist() ], columns=diversitydf.columns)) total_perc = total/TotalUniqueValues diversitydf = diversitydf.append(pd.DataFrame([["coverage_All_"+str(numPersona)+"__percentage"]+total_perc.tolist() ], columns=diversitydf.columns)) diversitydf diversitydf.to_csv("diversitydf.csv",index=False)Fairness Calculating Fairness for age, gender and country.#Create a CSV file to store the results outputDF = pd.DataFrame(columns=["Dimensionality Reduction Algorithm","Number of Personas","GroupName","Subject","Percentage in Original Data","Unique Values in Original Data","Count in Persona Set","Percentage in Persona set","Total Unique Values in Persona Set","Statistical Parity"]) #Calculate fairness for gender totalMale = df.loc[df["gender"]=="male"].groupby("gender")["sumVideoViews"].sum().values[0] for cdf in dfs: cTotal = (cdf[0].iloc[2,1:].values=="male").sum() outputDF = outputDF.append(pd.DataFrame([[cdf[2],cdf[1],"Gender","Male",np.round(totalMale/totalViews*100,3),df["gender"].nunique(),cTotal,np.round(cTotal/cdf[1]*100,3),cdf[0].iloc[2,1:].nunique(),np.round(cTotal/cdf[1]-totalMale/totalViews,3)]], columns=outputDF.columns)) #Calculate fairness for Age for age in ["25-34","35-44","18-24","45-54","55-64","65+","13-17"]: totalAge = df.loc[df["age"]==age].groupby("age")["sumVideoViews"].sum().values[0] for cdf in dfs: cTotal = (cdf[0].iloc[1,1:].values==age).sum() outputDF = outputDF.append(pd.DataFrame([[cdf[2],cdf[1],"Age",age,np.round(totalAge/totalViews*100,3),df["age"].nunique(),cTotal,np.round(cTotal/cdf[1]*100,3),cdf[0].iloc[1,1:].nunique(),np.round(cTotal/cdf[1]-totalAge/totalViews,3)]], columns=outputDF.columns)) #Calculate fairness for Country df = df.loc[~df["country"].isnull()] for country in df.country.unique().tolist(): totalCountry = df.loc[df["country"]==country].groupby("country")["sumVideoViews"].sum().values[0] for cdf in dfs: cTotal = (cdf[0].iloc[3,1:].values==country).sum() outputDF = outputDF.append(pd.DataFrame([[cdf[2],cdf[1],"Country",country,np.round(totalCountry/totalViews*100,3),df["country"].nunique(),cTotal,np.round(cTotal/cdf[1]*100,3),cdf[0].iloc[3,1:].nunique(),np.round(cTotal/cdf[1]-totalCountry/totalViews,3)]], columns=outputDF.columns)) outputDF['Fairness'] = np.abs(outputDF['Statistical Parity']) outputDF.to_csv("stat_parity.csv",index=False) #Save country fairness data countryfairness = outputDF.loc[outputDF["GroupName"]=="Country"].groupby(['Dimensionality Reduction Algorithm','GroupName','Number of Personas'],as_index=False)['Fairness'].mean() countryfairness.to_csv('Country_Stat_Parity_Mean.csv',header=True) countryfairness #Save Age fairness data agefairness = outputDF.loc[outputDF["GroupName"]=="Age"].groupby(['Dimensionality Reduction Algorithm','GroupName','Number of Personas'],as_index=False)['Fairness'].mean() agefairness.to_csv('Age_Stat_Parity_Mean.csv',header=True) agefairness #Save Gender fairness data genderfairness = outputDF.loc[outputDF["GroupName"]=="Gender"].groupby(['Dimensionality Reduction Algorithm','GroupName','Number of Personas'],as_index=False)['Fairness'].mean() genderfairness.to_csv('Gender_Stat_Parity_Mean.csv',header=True) genderfairnessConsistencyconsistencyDF = pd.DataFrame(columns=["Algorithm","Consistency Score"]) def randomString(stringLength): #Generate a random string letters = string.ascii_lowercase return ''.join(random.choice(letters) for i in range(stringLength)) def common(lst1, lst2): #This function finds the count of common values between two lists. #But we also do not want to use a single value from first list to match two values in the second list. #Example: #List1 = [1,1,2,3,4] #List2 = [1,1,1,2,2,3,4] #Score = 5. Because Two ones in list1 match two times. One two in list1 match one time. 3 and 4 match 1 time. So total 5 matches counter = 0 lst11,lst22= lst1.copy(),lst2.copy() #Copy them, so we don't overwrite the original for i in range(len(lst11)): for j in range(len(lst22)): if lst11[i]==lst22[j]: counter +=1 lst11[i] = randomString(10000) #We don't want to count it again. So we just assign a random value to it. lst22[j] = randomString(10000) break return counter for dralgo in dralgos: #Need to process files of all algorithms uniqueValues = [] for i in range(3): #Need to read the file 3 times with all 3 sheets. (5,10,15) temp = pd.read_excel("personas/"+dralgo+".xlsx",sheet_name=2-i) uniqueValues_tmp = [] for i in range(temp.shape[1]-1): #Join Age, Gender and country. value = str(temp.iloc[1,1+i:2+i].values[0])+temp.iloc[2,1+i:2+i].values[0]+temp.iloc[3,1+i:2+i].values[0] uniqueValues_tmp.append(value) uniqueValues.append(uniqueValues_tmp) # Find common between 5 persona set and 10 score = common(uniqueValues[0], uniqueValues[1])/5 # Find common between 5 persona set and 15 score += common(uniqueValues[0], uniqueValues[2])/5 # Find common between 10 persona set and 15 score += common(uniqueValues[1], uniqueValues[2])/10 score = score/3 consistencyDF = consistencyDF.append(pd.DataFrame([[dralgo,score]], columns=consistencyDF.columns)) consistencyDF consistencyDF.to_csv("consistencyDF.csv",index=False)DFC in one tabletotalFairness = pd.concat([agefairness,countryfairness,genderfairness],axis=0) totalFairness = totalFairness.groupby(["Dimensionality Reduction Algorithm","Number of Personas"],as_index=False)["Fairness"].mean() totalFairness["Consistency"] = 0 for i in range(totalFairness.shape[0]): totalFairness["Consistency"].iloc[i] = consistencyDF.loc[consistencyDF["Algorithm"]==totalFairness["Dimensionality Reduction Algorithm"].iloc[i]]["Consistency Score"].iloc[0] totalFairness["Diversity"] = 0 totalFairness for algo in dralgos: totalFairness.loc[(totalFairness["Dimensionality Reduction Algorithm"]==algo) & (totalFairness["Number of Personas"]==5),'Diversity'] = diversitydf.loc[diversitydf["persona_group"]=="coverage_All_5__percentage"][algo].iloc[0] totalFairness.loc[(totalFairness["Dimensionality Reduction Algorithm"]==algo) & (totalFairness["Number of Personas"]==10),'Diversity'] = diversitydf.loc[diversitydf["persona_group"]=="coverage_All_10__percentage"][algo].iloc[0] totalFairness.loc[(totalFairness["Dimensionality Reduction Algorithm"]==algo) & (totalFairness["Number of Personas"]==15),'Diversity'] = diversitydf.loc[diversitydf["persona_group"]=="coverage_All_15__percentage"][algo].iloc[0] totalFairness.to_csv("DFC.csv",index=False) totalFairnessCreating Model Documentation Using Jupyterbook and Intake-esmA common step to any project is documenting your data and your data workflow. Fortunately, open tools in the scientific python ecosystem make that much easier! In this example, we will cover creating your github repo, creating the catalog, visualizing the catalog, and generating a static webpage you can share with collaborators! Fair WarningThis week's post is quite detailed, so just a warning! If you would like to look at the finished product, check out the following* [Github repository with the content built here](https://github.com/mgrover1/cesm-test-data)* [Finished website with content](https://mgrover1.github.io/cesm-test-data/)By the end of this post, we will cover how to build a webpage that looks like this![CESM book page](../images/cesm_book_page.png) Create your Github RepositoryGo to [Github](https://github.com/) and select "New" in the top lefthand corner next to "Repositories" - this will pull up the following window. Once you are here, go ahead and name your repository!Be sure to add:* Repository name* Description* README* Gitignore (use the python template)* Choose a license ![screen_grab](../images/github_screen_grab.png) Clone your RepositoryAt this point, you can go ahead and clone your repository! You can either clone to your local machine, or to some Jupyterhub (such as the [NCAR Jupyterhub](https://jupyterhub.ucar.edu)), which will do in this case. Copy the link from GithubCopy the link from Github by clicking on the green "Code" button![Github Clone Link](../images/github_clone_link.png) Clone to your machine!We want to clone to the repository within the [Jupyterhub](https://jupyterhub.ucar.edu), so once logging on, we open a terminal and paste the link using the following syntax```bashgit clone https://github.com/mgrover1/cesm-test-data.git``` Create a docs directoryNow that you cloned the repository, move into it and create a `docs` directory using the following```bashcd cesm-test-datamkdir docs``` Build your CatalogOpen a new Jupyter Notebook called `model_documentation.ipynb` within the `docs` directory and select a development environment which includes the following:- jupyter-book- ecgtoolsIf you haven't installed these yet, you can use conda and pip (ecgtools is not yet on conda-forge)```bashconda install -c conda-forge jupyter-book intake-esm graphvizpip install ecgtools```In this case, follow the instructions in the [Building an Intake-esm catalog from CESM2 History Files](https://ncar.github.io/esds/posts/ecgtools-history-files-example/) post provides the instructions for building the data catalog Read the Catalog and Visualize the Components and FrequencyA couple weeks ago, we covered [Creating Visualizations of Intake-ESM Catalogs](https://ncar.github.io/esds/posts/graphviz_example/) which is helpful for understanding how [`Graphviz`](https://graphviz.readthedocs.io/en/stable/manual.html) works! Importsimport intake from graphviz import DigraphRead in the Test History Catalogcol = intake.open_esm_datastore('/glade/work/mgrover/cesm-hist-test.json')We will assign the dataframe from the catalog to its own variabledf = col.dfVisualize the CatalogUsing the `Diagraph` object from the [`Graphviz` library](https://graphviz.readthedocs.io/en/stable/manual.html), we setup a loop to create the visualization using the three categories* Case* Component* Frequency# Create Digraph object - use the left to right orientation instead of vertical dot = Digraph(graph_attr={'rankdir': 'LR'}) # Save the catalog as a pdf dot.format = 'pdf' # Start counting at one for node numbers num_node = 1 # Loop through the different cases for case in df.case.unique(): case_i = num_node dot.node(str(case_i), label=case) num_node += 1 # Loop through the different components in each case for component in df.loc[df.case == case].component.unique(): comp_i = num_node dot.node(str(comp_i), label=component) dot.edge(str(case_i), str(comp_i)) num_node += 1 # Loop through the frequency in each component within each experiment for frequency in df.loc[(df.case == case) & (df.component == component)].frequency.unique(): freq_i = num_node # Pull out the the stream information stream = df.loc[ (df.case == case) & (df.component == component) & (df.frequency == frequency) ].stream.values[0] # Add both stream and frequency information to these bubbles dot.node(str(freq_i), label=f'stream: {stream} \n frequency: {frequency}') dot.edge(str(comp_i), str(freq_i)) num_node += 1 comp_i += 1 case_i += 1Now visualize it in inline by running a cell with just the `dot` objectdotSave the VisualizationIn the block of code above, we specified `dot.format = 'pdf'` which will ensure that when we save the graph, it is in PDF format. Other options include (but not limited to) `svg` and `png`!The `Diagraph` method for saving is `.render()` with the filename in the argument (within the parentheses)dot.render('cesm_test_catalog')Set the arguments before we startargs_dict = { "prune_ratio": 0.4, "sparsity_reg": "none", "retrain_mode": "weight-rewinding", "model_path": "./saved_models/oneshot", "seed": 2020, "batch_size": 128, "init_lr": 0.001, "weight_decay": 1e-4, "train_epochs": 50, "retrain_epochs": 50, "rewind_epoch": 1.4, # 1.4 chosen as per the "The Lottery Ticket Hypothesis at Scale" paper "use_early_stop": True, "patience":10 } class Args(): def __init__(self): pass args = Args() for k,v in args_dict.items(): setattr(args, k, v)Get the DataLoaders and the Modeldataset = DATASETS['MNIST'] train_loader, val_loader, test_loader = dataset['loaders'](args, seed=2020) criterion = criterion = nn.CrossEntropyLoss().cuda() model = LeNet(dataset['num_classes'], criterion, device) model = model.to(device) optimizer = torch.optim.SGD(model.parameters(), args.init_lr, momentum=0.9, weight_decay=1e-4) lr_scheduler = NoneDefine the parameters to prunecheck the parameter list by running the following cell[name for name, _ in model.named_parameters()] def parameters_to_prune(m): return [ (m.conv1, 'weight'), (m.conv2, 'weight'), (m.fc1, 'weight'), (m.fc2, 'weight'), ]Lets get the Lottery Ticketreference: [Lottery Ticket Hypothesis](https://arxiv.org/abs/1803.03635)one_shot_pruning = OneShotPruning(model, parameters_to_prune, optimizer, lr_scheduler, train_loader, val_loader, compute_accuracy, device, args) _, ckpt_orig_path, ckpt_pruned_path, loss_history_orig, loss_history_pruned = one_shot_pruning.get_ticket()Sentiment Analysis Using RNN We use an Sequential LSTM to create a supervised learning approach for predicting the sentiment of an article. This notebook was adapted from https://www.kaggle.com/ngyptr/lstm-sentiment-analysis-keras. Data and Packages ImportingBelow, we import all the appropriate libraries and import the data of classified information. Currently we are using Elais' KMeans classification process as a feeder mechanism to train our RNN; however, our next would be to train the model using industry verified dataset and then predict our model approrpiately.As far as the data imported is concerned, out of all the articles processed by Elais, we only sample 10,000 articles from the file in order to expedite the RNN processing.import numpy as np import pandas as pd from sklearn.feature_extraction.text import CountVectorizer from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.models import Sequential from keras.layers import Dense, Embedding, LSTM, SpatialDropout1D from sklearn.model_selection import train_test_split from keras.utils.np_utils import to_categorical import re data = pd.read_csv("Classified Articles.csv") data = data[['c','marks']] data = data.sample(10000)Word Vector TokenizationThe Tokenizer below converts the key "buzzwords" from the input data to produce a vector for the RNN to process.for idx,row in data.iterrows(): row[0] = row[0].replace('rt',' ') max_fatures = 2000 tokenizer = Tokenizer(num_words=max_fatures, split=' ') tokenizer.fit_on_texts(data['c'].values) X = tokenizer.texts_to_sequences(data['c'].values) X = pad_sequences(X, maxlen = 100)Building the LSTM Modelembed_dim = 128 lstm_out = 196 model = Sequential() model.add(Embedding(max_fatures, embed_dim,input_length = X.shape[1])) model.add(SpatialDropout1D(0.4)) model.add(LSTM(lstm_out, dropout=0.2, recurrent_dropout=0.2)) model.add(Dense(2,activation='softmax')) model.compile(loss = 'categorical_crossentropy', optimizer='adam',metrics = ['accuracy']) print(model.summary())_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= embedding_5 (Embedding) (None, 100, 128) 256000 _________________________________________________________________ spatial_dropout1d_5 (Spatial (None, 100, 128) 0 _________________________________________________________________ lstm_5 (LSTM) (None, 196) 254800 _________________________________________________________________ dense_5 (Dense) (None, 2) 394 ================================================================= Total params: 511,194 Trainable params: 511,194 Non-trainable params: 0 _________________________________________________________________ NoneBuilding a Training and Test SetThe training set essentially a 67% random sample of the 10,000 samples from Elais' sentiment labeling.Y = pd.get_dummies(data['marks']).values X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.33, random_state = 42) print(X_train.shape,Y_train.shape) print(X_test.shape,Y_test.shape)(6700, 100) (6700, 2) (3300, 100) (3300, 2)Training the RNNThe method below trains the RNN with the training data. Ideally, we should have a higher epoch to better train the model, but for the sake of time, we have used 7.batch_size = 32 model.fit(X_train, Y_train, epochs = 7, batch_size=batch_size, verbose = 2)Epoch 1/7 - 40s - loss: 0.5934 - acc: 0.7242 Epoch 2/7 - 37s - loss: 0.5762 - acc: 0.7319 Epoch 3/7 - 37s - loss: 0.5640 - acc: 0.7342 Epoch 4/7 - 34s - loss: 0.5519 - acc: 0.7403 Epoch 5/7 - 36s - loss: 0.5360 - acc: 0.7507 Epoch 6/7 - 35s - loss: 0.5225 - acc: 0.7543 Epoch 7/7 - 33s - loss: 0.5110 - acc: 0.7649Evaluating the ModelBased on the above assumptions and sampling, our model calculates at 0.7 accuracy based on the data from Elais' model. The next steps of improvement for the models are as follows:- Finding better training data- Increasing Epoch for better accuracy- Allowing a larger maximum length of wordsThese next steps will allow us to train a better RNN model and subsequently make a stronger prediction of sentiment.validation_size = 1500 X_validate = X_test[-validation_size:] Y_validate = Y_test[-validation_size:] X_test = X_test[:-validation_size] Y_test = Y_test[:-validation_size] score,acc = model.evaluate(X_test, Y_test, verbose = 2, batch_size = batch_size) print("score: %.2f" % (score)) print("acc: %.2f" % (acc))score: 0.63 acc: 0.71Assign SentimentsGiven that the RNN produces a probability of the sentiment of an article, we attempt to normalize the value and create binary assignments before exporting it for the time-series analysis process.output_data = pd.read_csv("Classified Articles.csv") output_data = output_data.drop("marks",axis=1) max_fatures = 2000 tokenizer = Tokenizer(num_words=max_fatures, split=' ') tokenizer.fit_on_texts(output_data['c'].values) X = tokenizer.texts_to_sequences(output_data['c'].values) X = pad_sequences(X, maxlen = 100) predictions = model.predict(X, batch_size=batch_size, verbose=2, steps=None) predictions_assigned = [] for i in predictions: if i[0] > i[1]: predictions_assigned.append(0) else: predictions_assigned.append(1) output_data["marks"] = predictions_assigned output_data.to_csv("Articles with Sentiment.csv")APPENDIX: Older Attempts to build RNNfrom keras.datasets import imdb # Import libraries and packages import numpy as np import pandas as pd import matplotlib.pyplot as plt import re from datetime import datetime # Plotting import seaborn as sns import matplotlib.pyplot as plt %matplotlib inline import matplotlib.gridspec as gridspec # Plot styling sns.set(style='white', context='notebook', palette='deep') import nltk from nltk.cluster import KMeansClusterer from nltk.cluster import euclidean_distance from sklearn import cluster from sklearn import metrics from sklearn import cluster from sklearn.cluster import KMeans from sklearn import decomposition from sklearn.metrics import adjusted_rand_score from sklearn.metrics.pairwise import cosine_similarity from sklearn.pipeline import Pipeline from sklearn.ensemble import ExtraTreesClassifier from sklearn.feature_extraction.text import TfidfVectorizer from gensim.models import Word2Vec from gensim.models import word2vec from collections import defaultdict from sklearn.model_selection import train_test_split import warnings warnings.filterwarnings('ignore') eng_stopwords = nltk.corpus.stopwords.words('english') ## modified the code from open source website http://nadbordrozd.github.io/blog/2016/05/20/text-classification-with-word2vec/ class MeanEmbeddingVectorizer(object): def __init__(self, word2vec): self.word2vec = word2vec # if a text is empty we should return a vector of zeros # with the same dimensionality as all the other vectors self.dim = len(next(iter(word2vec.values()))) def fit(self, X, y): return self def transform(self, X): return np.array([ np.mean([self.word2vec[w] for w in words if w in self.word2vec] or [np.zeros(self.dim)], axis=0) for words in X ]) class TfidfEmbeddingVectorizer(object): def __init__(self, word2vec): self.word2vec = word2vec self.word2weight = None self.dim = len(next(iter(word2vec.values()))) def fit(self, X): tfidf = TfidfVectorizer(analyzer=lambda x: x) tfidf.fit(X) # if a word was never seen - it must be at least as infrequent # as any of the known words - so the default idf is the max of # known idf's max_idf = max(tfidf.idf_) self.word2weight = defaultdict( lambda: max_idf, [(w, tfidf.idf_[i]) for w, i in tfidf.vocabulary_.items()]) return self def transform(self, X): return np.array([ np.mean([self.word2vec[w] * self.word2weight[w] for w in words if w in self.word2vec] or [np.zeros(self.dim)], axis=0) for words in X ]) from keras.datasets import imdb df = pd.read_csv("Classified Articles.csv") df.head() # vocabulary_size = 5000 # (X_train, y_train), (X_test, y_test) = imdb.load_data(num_words = vocabulary_size) # print('Loaded dataset with {} training samples, {} test samples'.format(len(X_train), len(X_test))) X_train, X_test, y_train, y_test = train_test_split(df["c"], df["marks"], test_size=0.25) vectorized_x_train = [] model = word2vec.Word2Vec(X_train, min_count=15) w2v = dict(zip(model.wv.index2word, model.wv.syn0)) t = TfidfEmbeddingVectorizer(w2v) t.fit(X_train) vectorized_x_train.append(t.transform(X_train)) vectorized_x_test = [] model = word2vec.Word2Vec(X_test, min_count=15) w2v = dict(zip(model.wv.index2word, model.wv.syn0)) t = TfidfEmbeddingVectorizer(w2v) t.fit(X_test) vectorized_x_test.append(t.transform(X_test)) X_train = vectorized_x_train X_test = vectorized_x_test from keras.preprocessing import sequence max_words = 500 X_train = sequence.pad_sequences(X_train, maxlen=max_words) X_test = sequence.pad_sequences(X_test, maxlen=max_words) from keras import Sequential from keras.layers import Embedding, LSTM, Dense, Dropout embedding_size=100 model=Sequential() model.add(Embedding(5000, embedding_size, input_length=max_words)) model.add(LSTM(100)) model.add(Dense(1, activation='sigmoid')) print(model.summary()) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) # Understanding Metrics print(max([len(x) for x in X_train])) batch_size = 64 num_epochs = 3 X_valid, y_valid = X_train[:batch_size], y_train[:batch_size] X_train2, y_train2 = X_train[batch_size:], y_train[batch_size:] model.fit(X_train2, y_train2, validation_data=(X_valid, y_valid), batch_size=batch_size, epochs=num_epochs) scores = model.evaluate(X_test, y_test, verbose=0) print('Test accuracy:', scores[1]) df = pd.read_csv("Articles with Sentiment.csv") dfEnvironment prepare#use mnist in keras from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) print ("Done......... !")Extracting /tmp/data/train-images-idx3-ubyte.gz Extracting /tmp/data/train-labels-idx1-ubyte.gz Extracting /tmp/data/t10k-images-idx3-ubyte.gz Extracting /tmp/data/t10k-labels-idx1-ubyte.gz Done......... !Sensitivity analysis============__Goal__: - run sensitivity analysis to show the impact of a given parameter on the SMRT output __Learning__: SMRT is able to iterate on several arguments when it is unambiguous. For instance, a sensor with multiple frequencies, angles or polarizations is automatically understood. The `result` contains all the values which can be accessed with arguments in TbV() and similar functions. E.g. TbV(frequency=37e9)This is similar when a list of snowpacks is given to `run`. The `result` contains all the computations. The 'snowpack' dimension is automatically added but we can also propose a custom name for this dimension.In the following, we show different approaches to conduct sensitivity studies that you can run and then apply to a study case of your choice: - take the Dome C snowpack and study the sensitivity of TbH 55° to superficial density - take any snowpack previously defined and investigated the sensivitiy to liquid_water - etcimport numpy as np import matplotlib.pyplot as plt %matplotlib notebook from smrt import make_model, make_snowpack, sensor_listBuild a list of snowpack--------------------------------The key idea is to build a list of snowpack. E.g. we want to test the sensitivity of TB's to the radius. We first build a list of snowpack with different radius.# prepare the snowpack density = 300.0 radius = np.arange(0.05, 0.5, 0.01) *1e-3 # from 0 to 0.5mm # the NAIVE APPROACH: snowpack = list() for x in radius: sp = make_snowpack([1000.0], "sticky_hard_spheres", density=density, temperature=265, radius=x, stickiness=0.15) snowpack.append(sp)In simple cases (as this one), it is easier to use "list comprehension", a nice python feature to create list.# with list comprehension snowpack = [make_snowpack([1000.0], "sticky_hard_spheres", density=density, temperature=265, radius=x, stickiness=0.15) for x in radius] # prepare the sensor and model model = make_model("iba", "dort") sensor = sensor_list.passive(37e9, 55) #run!Now we have a list of snowpacks, we want to call the model for each snowpack. We can use list comprehension again.results = [model.run(sensor, sp) for sp in snowpack]This return a list of results. To extract the TB V for each result can be done with another list comprehension. And then we plot the results.tbv = [res.TbV() for res in results] plt.figure() plt.plot(radius, tbv)Nice ? We can do much better because `Model` can directly run on a list of snowpacks. It does not return a list of results, but **a unique result with a new coordinate** which is much more convenient.results = model.run(sensor, snowpack, snowpack_dimension=('radius', radius)) print(type(results)) # look results is a Result, not a list print(results.coords) # look, we have several coordinates, one is call corr_legnThis is more compact and nicer, `results` explicitly show the radius dimension. Plotting is thus easier:plt.figure() plt.plot(results.radius, results.TbV())And it is easy to save the result to disk:results.save("radius-sensitivity.nc")Recap:---------snowpack = [make_snowpack([1000.0], "sticky_hard_spheres", density=density, temperature=265, radius=x, stickiness=0.15) for x in radius] model = make_model("iba", "dort") sensor = sensor_list.passive([19e9, 37e9], 55) results = model.run(sensor, snowpack, snowpack_dimension=('radius', radius)) plt.figure() plt.plot(results.radius, results.TbV(frequency=19e9), label="19 GHz") plt.plot(results.radius, results.TbV(frequency=37e9), label="37 GHz") plt.legend() results.TbV()**Contents:** 1. File selection 2. `.text` section extraction 3. Preliminary visualization 4. Sampling 5. Sample Visualization 6. Creating the Reference 7. Baseline Testing **Note:**Files with less than 10 kilobytes of machine code were excluded from both reference distribution construction and testing. This is because if a file contains less than 10k of code, most likely there will not be enough 1024-byte blocks to form a cluster that DBSCAN can detect. Small file size is a limitation of the Centrifuge toolkit.import os import sys import pickle import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = [16, 9] import seaborn as sns sns.set_style("whitegrid") sys.path.append("../../pyelftools") # relative location of pyelftools repo cloned from github from elftools.elf.elffile import ELFFile from tqdm import tqdm from scipy import stats files = os.listdir("amd64") len(files) files = [file for file in files if "code" not in file] len(files) np.random.seed(1) sample_files = np.random.choice(files, size=1000, replace=False) code_dict = {} path = "amd64/" counter = 0 for file in sample_files: with open(path + file, "rb") as f: try: elffile = ELFFile(f) except: continue try: text_section_header = elffile.get_section_by_name(".text").header except: continue offset = text_section_header.sh_offset size = text_section_header.sh_size if size >= 10000: # minimum size cutoff: 10kb of machine code counter += 1 f.seek(offset) code_dict[file] = list(f.read(size)) if counter == 100: # number of files to select samples from break len(code_dict.keys()) for file, code in code_dict.items(): print("%s\t%s\t%s" %(file, str(len(code)), code[:10])) def plot_dists(dictionary): for file, code in dictionary.items(): sns.distplot(code, bins = 256, kde=False, hist_kws={'histtype':'step', 'cumulative': True, 'linewidth':1, 'alpha':0.1}, kde_kws={'cumulative': True}, norm_hist=True, color="blue") plt.show() plot_dists(code_dict) def plot_sample(sample): sns.distplot(sample, norm_hist=True, kde=False, hist_kws={'histtype':'step', 'cumulative': True, 'linewidth':0.1, 'alpha':0.1}, kde_kws={'cumulative': True}, # want CDF bins=256, color="blue") def create_code_samples(sample_size, n_rows, sample_source, plot=False): samples_df = pd.DataFrame(index=np.arange(0, n_rows), columns=[i for i in range(sample_size)]) for i in tqdm(np.arange(0, n_rows)): sample = np.random.choice(sample_source, size=sample_size, replace=True) samples_df.loc[i] = sorted(sample) samples_mean = samples_df.mean(axis = 0) if plot is True: sns.distplot(samples_mean, norm_hist=True, kde=False, hist_kws={'histtype':'step', 'cumulative': True, 'linewidth':1, 'alpha':1},kde_kws={'cumulative': True},bins=256,color="red") plt.show() return samples_mean def create_reference_distribution(code_dictionary, sample_size, n_samples): # create data frame to hold mean sample of each file's code means_df = pd.DataFrame(index=list(code_dictionary.keys()), columns=[i for i in range(sample_size)]) # for each file's code, store mean of the samples in data frame for file, code in code_dictionary.items(): means_df.loc[file] = create_code_samples(sample_size, n_samples, code) # average all samples means together mean = means_df.mean(axis = 0) median = means_df.median(axis = 0) return mean, median mean, median = create_reference_distribution(code_dict, 1000, 1000) sns.distplot(mean, bins = 256, kde=False, hist_kws={'histtype':'step', 'cumulative': True, 'linewidth':1, 'alpha':1}, kde_kws={'cumulative': True}, norm_hist=True, color="red") plt.title("Mean byte values of the means (red)") plot_dists(code_dict) sns.distplot(median, bins = 256, kde=False, hist_kws={'histtype':'step', 'cumulative': True, 'linewidth':1, 'alpha':1}, kde_kws={'cumulative': True}, norm_hist=True, color="red") plt.title("Median byte values of the means") plot_dists(code_dict)The distribution of byte values of the **median of the means** looks to be more representative than the **mean of the means**.from collections import Counter rounded_medians = [round(i) for i in median] sorted_counts = sorted(Counter(rounded_medians).items(), key = lambda x: x[1], reverse=True) counts_df = pd.DataFrame(sorted_counts[:25]) counts_df = counts_df.rename(columns={0:"byte value", 1:"frequency"}) sns.barplot(x="byte value", y="frequency", data=counts_df, order=counts_df["byte value"]) plt.title("Top 25 most common byte values in AMD64 machine code reference distribution (1000 bytes)") plt.show() [hex(i) for i in list(dict(sorted_counts).keys())[:25]] # get pool of test files test_pool = [file for file in files if file not in sample_files] # select test files np.random.seed(1) test_files = np.random.choice(test_pool, size=3000, replace=False) # check for overlap between files that the reference distribution was built from and the files used for testing list(set(code_dict.keys()) & set(test_files)) #path = "amd64/" def extract_code(path, file_list, min_file_size=10000, n_files=1000): code_dict = {} counter = 0 for file in file_list: with open(path + file, "rb") as f: try: elffile = ELFFile(f) except: continue try: text_section_header = elffile.get_section_by_name(".text").header except: continue offset = text_section_header.sh_offset size = text_section_header.sh_size if size >= min_file_size: # minimum size cutoff: 10kb of machine code counter += 1 f.seek(offset) code_dict[file] = list(f.read(size)) if counter == n_files: # number of files to select samples from break else: continue return code_dict test_code_dict = extract_code("amd64/", test_files) for file, code in test_code_dict.items(): print("%s\t%s\t%s" % (file, str(len(code)), code[:10])) test_results_df = pd.DataFrame(index=test_code_dict.keys(), columns=["size", "wasserstein distance","energy distance", "mean", "median", "standard deviation"]) for file, code in test_code_dict.items(): test_results_df.loc[file]["size"] = len(code) test_results_df.loc[file]["wasserstein distance"] = stats.wasserstein_distance(median, code) test_results_df.loc[file]["energy distance"] = stats.energy_distance(median, code) test_results_df.loc[file]["mean"] = np.mean(code) test_results_df.loc[file]["median"] = np.median(code) test_results_df.loc[file]["standard deviation"] = np.std(code) print(test_results_df.shape) test_results_df test_results_df.mean() test_results_df.median() test_results_df["size"].sum() test_results_df["size"].sum() / 1000000 sns.boxplot(x="wasserstein distance", data=test_results_df) plt.show() sns.distplot(test_results_df["wasserstein distance"], bins=30) plt.show() plt.scatter(test_results_df["size"], test_results_df["wasserstein distance"]) plt.xlabel(".text section size (bytes)") plt.ylabel("Wasserstein distance from AMD64 reference distribution") plt.title(".text section size vs. Wasserstein distance from AMD64 reference distribution") plt.show() plt.scatter(test_results_df["size"], test_results_df["wasserstein distance"]) plt.xlabel(".text section size (bytes)") plt.ylabel("Wasserstein distance from AMD64 reference distribution") plt.title(".text section size vs. Wasserstein distance from AMD64 reference distribution (outlier not shown)") plt.xlim(0, 2000000) plt.show() plt.scatter(test_results_df["wasserstein distance"], test_results_df["energy distance"]) for code in test_code_dict.values(): plot_sample(code) sns.distplot(median, # reference distribution norm_hist=True, kde=False, hist_kws={'histtype':'step', 'cumulative': True, 'linewidth':1, 'alpha':1}, kde_kws={'cumulative': True}, bins=256, color="red") plt.title("1000 .text section CDFs with AMD64 reference CDF in red") plt.show() # export reference distribution with open("AMD64_reference", "wb") as f: pickle.dump(median, f) Research question/interests How does game categories affects the ratings of a game? A rare research insight on how gaming categories (i.e adventure, action) affects the ratings of a game. TASK 1 This is the summary (Range index, non-null counts, index Dtype, memory usage) of our dataframe/set:import pandas as pd import numpy as np import matplotlib import matplotlib.pyplot as plt import seaborn as sns import missingno %matplotlib inline df = pd.read_csv('../data/raw/android-games.csv') df.info() df.head() df['rank'] = df['rank'].astype('str') df['TR'] = df['total ratings'].astype('category') df['AV'] = df['average rating'].astype('category') df['5 star ratings'] = df['5 star ratings'].astype('category') df['4 star ratings'] = df['4 star ratings'].astype('category') df['3 star ratings'] = df['3 star ratings'].astype('category') df['2 star ratings'] = df['2 star ratings'].astype(str).str.strip().astype('category') df['1 star ratings'] = df['1 star ratings'].astype('category') df['CT'] = df['category'].astype('category') RangeIndex: 1730 entries, 0 to 1729 Data columns (total 15 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 rank 1730 non-null int64 1 title 1730 non-null object 2 total ratings 1730 non-null int64 3 installs 1730 non-null object 4 average rating 1730 non-null int64 5 growth (30 days) 1730 non-null float64 6 growth (60 days) 1730 non-null float64 7 price 1730 non-null float64 8 category 1730 non-null object 9 5 star ratings 1730 non-null int64 10 4 star ratings 1730 non-null int64 11 3 star ratings 1730 non-null int64 12 2 star ratings 1730 non-null int64 13 1 star ratings 1730 non-null int64 14 paid 1730 non-null bool dtypes: bool(1), float64(3), int64(8), object(3) memory usage: 191.0+ KBThese are the types of datas in python language of our dataframeimport pandas as pd import numpy as np import matplotlib import matplotlib.pyplot as plt import seaborn as sns import missingno %matplotlib inline df.info() RangeIndex: 1730 entries, 0 to 1729 Data columns (total 18 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 rank 1730 non-null object 1 title 1730 non-null object 2 total ratings 1730 non-null int64 3 installs 1730 non-null object 4 average rating 1730 non-null int64 5 growth (30 days) 1730 non-null float64 6 growth (60 days) 1730 non-null float64 7 price 1730 non-null float64 8 category 1730 non-null object 9 5 star ratings 1730 non-null category 10 4 star ratings 1730 non-null category 11 3 star ratings 1730 non-null category 12 2 star ratings 1730 non-null category 13 1 star ratings 1730 non-null category 14 paid 1730 non-null bool 15 TR 1730 non-null category 16 AV 1730 n[...]These are the first five games in our dataframe:df.head()And these are the last five games in our dataframe:df.tail()These are the names of every columns in our dataframe:df.columnsThe next table describes the basic statistics of our numerical parts of the dataset. These includes the average values of each columns (for example, the average rating for all games on this list is about 3.9 star, ranging from 1 to 5 stars). These are the complete explanation regarding each column: Count = Total rumber of Rows for a column. Mean = Average value of a data (Total numerical value of the column divided by how many datas are there in that column) Std = Standard Deviation tells us how far data points scattered relating to our Mean value. High standard deviation i.e as the std for Total Ratings) means lots of datas are scattered far away from its mean value of 1.064e+06. Min = The absolute minimum value in that column. Max = The absolute maximum value in that column. 25% = This tells us how many values are less than 25% percent. 50% = This tells us how many values are less than 50% percent. 75% = This tells us how many values are less than 75% percent.pd.set_option('precision', 3) df.describe(include = [np.number]).TThese are the total number of NaN datas in each column (NaN = Unavailable Datas): For our dataset, there's no Unavailable Datas.df.isnull().sum()And Last but not Least, the Correlation heatmap. For my research question, we will focus on columns related to ratings, including the total ratings, the average rating, and the ratings of each star.df = pd.read_csv('../data/raw/android-games.csv') corrmap = df.corr() sns.heatmap(corrmap, xticklabels = corrmap.columns, yticklabels = corrmap.columns, annot=True, cmap=sns.diverging_palette(220, 20, as_cmap=True)) sns.set(rc = {'figure.figsize':(18,10)}, style = 'white')TASK 2 Load Datadf = pd.read_csv('../data/raw/android-games.csv') dfCleaning the Data *Mr. Clean clean the data* *Mr. Clean wipe unecessary stuff*df_clean = df.drop(["installs","growth (30 days)","growth (60 days)", "price", "paid"], axis=1) df_cleanData Wrangling We then wrangle the data by sorting its valuesdf_sort = df_clean.sort_values("category").reset_index(drop=True) df_sortData Processing Then we Process the data by renaming its columns to provide better output viewdf_rename = df_sort.rename(columns= {"total ratings": "Total amount of raters", "5 star ratings": "5 stars", "4 star ratings": "4 stars", "3 star ratings": "3 stars", "2 star ratings": "2 stars", "1 star ratings": "1 star", "rank": "Rank of game in Google Playstore"}) df_renameTASK 3 The First task is to make and test our method chain function.def load_and_process(url_or_path_to_csv_file): # Method Chain 1 (Load data and deal with missing data) df1 = ( pd.read_csv('../data/raw/android-games.csv') .dropna(how="any") .sort_values("category") .reset_index(drop=True) ) # Method Chain 2 (Create new columns, drop others, and do processing) df2 = ( df1 ) # Make sure to return the latest dataframe return df2 load_and_process('../data/raw/android-games.csv')Afterwards, we copy and paste the same function to a new .py file called project_function_3. We will then import that function module into here.import project_function_3 df = project_function_3.load_and_process('../data/raw/android-games.csv') dfTASK 4 Here we will conduct our analysis and finally answer my research question (How does game categories affects the ratings of a game?) First of all, we will define three graphs, based on the amount of ratings and game categories. The first one would be the five star ratings (maximum amount of star you can give to rate). NOTE: We are going to use the dataset that we have wrangled and processed back in Task 2.# make seaborn barplot sns.barplot(x = '5 stars', y = 'category', data = df_rename) # Show the plot plt.show()From the first graph, we can conclude that Action Games wins the medal in most 5 star ratings, followed second by Casual, then Strategy/RTS. But now we're going to graph three star ratings (median point of star you can give) against each game categories and see in another perspective:# make seaborn barplot sns.barplot(x = '3 stars', y = 'category', data = df_rename) # Show the plot plt.show()It seems that three star ratings also have the previous trios as the winner. This means that lots of people rated Action, Casual, and RTS games as both 5 stars and 3 stars. This means that none of these categories actually excels against other types of games, they just have lots of more people to play and rate. Last but not least, we're going to graph the minimum amount of stars you can give (1 star) against each game categories:# make seaborn barplot sns.barplot(x = '1 star', y = 'category', data = df_rename) # Show the plot plt.show()From the previous 3 graphs, it seems like the all three have similar plottings, with Action Games having the most ratings, and Educational Games the least. Unfortunately this means that while Action Games has the highest 5 stars, but it also highest 1 stars. This nullifies the hypothesis that Action Games have the highest ratings overall comparing to other categories, just because it has higher ratings in the 5 stars graph. To verify this, we will show the Average Ratings for each Game Categories:# make seaborn barplot sns.barplot(x = 'average rating', y = 'category', data = df_rename) # Show the plot plt.show()As we might have predicted, every game category have similar average ratings. There is at most a range of 0.16 stars between the highest category (Action) and lowest (Music). Quite a trivial difference. Now we take a look at how many people rate for each game category:# make seaborn barplot sns.barplot(x = 'Total amount of raters', y = 'category', data = df_rename) # Show the plot plt.show()From this graph, it shows how the trios (Action, Casual, Strategy) have indeed have the biggest amount of raters. This explains how Action games have the most raters in all 1, 3, and 5 star graphs, while educational and music have the least values in every graph. Although we might be able to prove that the total amount of raters correlates proportionally with for each star, instead to the game's category. This might be confusing, so lets take a sample scatter plot between the amount of raters and the number of 3 and 5 stars:x = sns.regplot(x = '3 stars', y = 'Total amount of raters', data = df_rename) x = sns.regplot(x = '5 stars', y = 'Total amount of raters', data = df_rename)QUESTION 1import pandas as pd print(pd.__version__)1.0.5QUESTION 2import numpy as np ar=np.array(['good morning','how','are','you','?']) ser=pd.Series(ar) print(ser,"\n", type(ser))0 good morning 1 how 2 are 3 you 4 ? dtype: object QUESTION 3import numpy as np ar=np.array(['WHAT','IS','UP','GUYS','????']) ds=pd.Series(ar) df=pd.DataFrame(ds) df[1]=ds.index print(df)0 1 0 WHAT 0 1 IS 1 2 UP 2 3 GUYS 3 4 ???? 4QUESTION 4import seaborn as sns print(sns.get_dataset_names()) Ndata=sns.load_dataset('mpg') NdataC:\Users\Dell\anaconda3\lib\site-packages\seaborn\utils.py:384: GuessedAtParserWarning: No parser was explicitly specified, so I'm using the best available HTML parser for this system ("lxml"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently. The code that caused this warning is on line 384 of the file C:\Users\Dell\anaconda3\lib\site-packages\seaborn\utils.py. To get rid of this warning, pass the additional argument 'features="lxml"' to the BeautifulSoup constructor. gh_list = BeautifulSoup(http)QUESTION 5country=Ndata["origin"].unique() print("Origin Countries: ",country)Origin Countries: ['usa' 'japan' 'europe']QUESTION 6uscar=Ndata.loc[Ndata["origin"]=="usa"] print(uscar)mpg cylinders displacement horsepower weight acceleration \ 0 18.0 8 307.0 130.0 3504 12.0 1 15.0 8 350.0 165.0 3693 11.5 2 18.0 8 318.0 150.0 3436 11.0 3 16.0 8 304.0 150.0 3433 12.0 4 17.0 8 302.0 140.0 3449 10.5 .. ... ... ... ... ... ... 392 27.0 4 151.0 90.0 2950 17.3 393 27.0 4 140.0 86.0 2790 15.6 395 32.0 4 135.0 84.0 2295 11.6 396 28.0 4 120.0 79.0 2625 18.6 397 31.0 4 119.0 82.0 2720 19.4 model_year origin name 0 70 usa chevrolet chevelle malibu 1 70 usa [...]BCGNet Demo The following python toolbox trains a neural network intended for BCG artifact removal in EEG-fMRI datasets. More detail about our method can be found in the paper McIntosh et al. IEEE Trans Biomed Engi at https://ieeexplore.ieee.org/document/9124646# import commands import os from pathlib import Path from config import get_config from session import SessionPath setup The first step is to set up all the relevant path. Here for the purpose of the demo we will define all path here; however, for custom use it is recommended to set up all path in the yaml file. Option 1: In YAML File (Recommended) It is recommended for the user to set up the path in the yaml fileVariables that needs to be set up:| Variable Name | Type | Description ||---------------|------|-----------------------------------------------------------------------------------------------------------------------|| d_root | str | the absolute path to the root directory of this package, e.g. "/home/jsmith/BCGNet/" || d_data | str | the absolute path to the directory containing all raw datasets, e.g. "/home/jsmith/data/" || d_model | str | the absolute path to the directory to save trained models || d_output | str | the absolute path to the directory to save cleaned datasets || d_eval | str | (Optional) the absolute path to directory containing all the metric datasets used for comparing performance of BCGNet || str_eval | str | (Optional) must be specified if d_eval is given, the name of the alternative method used for comparison |Once the user has successfully set up all these variable in the yaml file, it's only needed to execute the following command# cfg = get_config(d_root / 'config' / 'default_config.yaml')Option 2: In Python For the purpose of this demo, we will set all the variables listed above in the Jupyter notebook. Additionally, here we will set them as pathlib objects instead of strings for convenience.# get the absolute path to the root directory of the package d_root = Path(os.getcwd()) # get the absolute path to the directory containing all data # all dataset should be in EEGLAB formats # here the structure of directory is presumed to be # d_data / subXX / input_file_naming_format # where input_file_naming_format is defined in the yaml file d_data = d_root / 'example_data' / 'raw_data' # get the absolute path to the directory to save all trained models # structure of the directory will be # d_model / model_type / subXX / {model_type}_{time_stamp} / {model_type}_{time_stamp}.index # (note: depending on TF version, either save in the new TF checkpoint format or old h5 format) d_model = d_root / 'trained_model' / 'non_cv_model' # get the absolute path to the directory to save all cleaned dataset # structure of the directory will be # d_output / subXX / output_file_naming_format d_output = d_root / 'cleaned_data' / 'non_cv_data' # (Optional) # if the users wish, a dataset used to compare the performance of # BCGNet can be provided, here a OBS-cleaned dataset is used # convention is same as the d_data and all dataset # should be in EEGLAB format # get the absolute path to the directory containing all data # cleaned by the alternative method # here the structure of the directory is also presumed to be # d_eval / subXX / eval_file_naming_format d_eval = d_root / 'example_data' / 'obs_cleaned_data' # (Optional - relevant only if d_eval is provided) # define the name of the alternative method str_eval = 'OBS' # generate a config (cfg) object from the yaml file # all hyperparameters are from the paper cfg = get_config(filename=d_root / 'config' / 'default_config.yaml') # change all the path (recommended to set these in the yaml file directory) cfg.d_root = d_root cfg.d_data = d_data cfg.d_model = d_model cfg.d_output = d_output cfg.d_eval = d_eval cfg.str_eval = str_evalAdditional note If the user wants to conduct a quick test, the following line can be used to set the maximum number of training iterations to be used.# If the user just want a quick test, can set the number of maximum epochs # to be few so training will be over quickly via the line below # cfg.num_epochs = 5Initialize training session All key hyperparamters relevant to preprocessing and training are set in the yaml file# provide the name of the subject str_sub = 'sub34' # provide the index of the runs to be used for training # if just a single run, then [1] or [2] # if multiple runs then [1, 2] # for a run from sub11 and run index 1 # filename is presumed to be # subXX_r0X_ vec_idx_run = [1, 2] # str_arch specifies the type of the model to be used # if str_arch is not provided then the default model (same as paper) # is used. If user wants to define their own model, example on how to do it # can be found in models/gru_arch_000.py, the only caveat is that # the name of the file and class name has to be same as the type of the model # e.g. gru_arch_000 # random_seed is set to ensure that the splitting of entire dataset into # training, validation and test sets is always the same, useful for model # selection # verbose sets the verbosity of Keras during model training # 0=silent, 1=progress bar, 2=one line per epoch # overwrite specifies whether or not to overwrite existing cleaned data # cv_mode specifies whether or not to use cross validation mode # more on this later s1 = Session(str_sub=str_sub, vec_idx_run=vec_idx_run, str_arch='default_rnn_model', random_seed=1997, verbose=2, overwrite=False, cv_mode=False, num_fold=5, cfg=cfg)Prepare for training# loads all dataset s1.load_all_dataset() # preform preprocessing of all dataset and initialize model s1.prepare_training()Model training and generating cleaned dataset# train the model s1.train() # generate cleaned dataset s1.clean() # plot the training history s1.plot_training_history()Evaluating the performance# Evaluate the performance of the model in terms of RMS and # ratio of band power of cleaned dataset in delta, theta # and alpha bands compared to the raw data # mode specifies which set to evaluate the performance on # mode='train' evaluates on training set # mode='valid' evaluates on validation set # mode='test' evaluates on test set s1.evaluate(mode='test') # Plot a random epoch from a specified channel and a set # str_ch_eeg should be set to standard EEG channel names, e.g. Pz, Fz, Oz etc. # mode='train' evaluates on training set # mode='valid' evaluates on validation set # mode='test' evaluates on test set s1.plot_random_epoch(str_ch_eeg='T8', mode='test') # Plot the power spectral density (PSD) from the mean/specified channel # mode='train' evaluates on training set # mode='valid' evaluates on validation set # mode='test' evaluates on test set # str_ch_eeg='avg' plots the mean PSD across all channels # str_ch_eeg could also be set to standard EEG channel names, e.g. Pz, Fz, Oz etc. s1.plot_psd(str_ch_eeg='avg', mode='test') s1.plot_psd(str_ch_eeg='T8', mode='test')Saving trained model and cleaned dataset# save trained model s1.save_model() # save cleaned data in .mat files # the saved .mat file has one field 'data' which contains the # n_channel by n_time_stamp matrix holding all cleaned data # note that the unit of the data saved in the mat file # is in Volts instead of in microVolts s1.save_data() # alternatively, save cleaned data in Neuromag .fif format # (note that EEEGLAB support for .fif format is limited) # s1.save_dataset()Cross validation mode Alternatively, if cross validation is deemed necessary, the users can set up a cross validation style session via the following command# first change the output and model directory d_model = d_root / 'trained_model' / 'cv_model' d_output = d_root / 'cleaned_data' / 'cv_data' cfg.d_model = d_model cfg.d_output = d_output # it is recommended for user to set the num_fold argument, # which specifies the number of cross validation folds # in which case, percentage of test set and validation set data # will be set to 1/num_fold and remaining data will be the training set # e.g. s2 = Session(str_sub=str_sub, vec_idx_run=vec_idx_run, str_arch='default_rnn_model', random_seed=1997, verbose=2, overwrite=True, cv_mode=True, num_fold=5, cfg=cfg) # otherwise the number of cross validation folds will be inferred from # percentage of test set data set in the config yaml file via 1/per_test # s2 = Session(str_sub=str_sub, vec_idx_run=vec_idx_run, str_arch='default_rnn_model', # random_seed=1997, verbose=2, overwrite=True, # cv_mode=True, cfg=cfg)Remaining commands are the sames2.load_all_dataset() s2.prepare_training() s2.train() s2.clean() s2.plot_training_history() s2.evaluate(mode='test') # Additionally, in the cross validation mode, the user can # specify the fold (0-indexing) from which the figures are # to be plotted # For the demo, plot using the 3rd fold (note the 0-indexing) idx_fold = 2 s2.plot_random_epoch(str_ch_eeg='T8', mode='test', idx_fold=idx_fold) s2.plot_psd(str_ch_eeg='avg', mode='test', idx_fold=idx_fold) s2.plot_psd(str_ch_eeg='T8', mode='test', idx_fold=idx_fold) s2.save_model() s2.save_data()Freedom of Information Law Opinions----- Table of Contents[Download the Data](Data)- [Data Preprocessing](Prepare-Data-Structures)[Non-Negative Matrix Factorization](Non-Negative-Matrix-Factorization)- [Parameter Tuning](Tuning-the-Parameters)- [Topic Generation](Topics)[Investigate Topics](Evaluating-the-Topic-Determinations)- [Data Visualization](Visualizing-Topics)- [Noise Reduction](Reducing-Noise)[Topic and Document Fingerprints](Topic-and-Document-Fingerprints)- [Finding Similar Documents](Find-Documents-with-Matching-Topic-Fingerprints) Topic ModelingWhile the Committee sorts their important Opinions into major content topics, like "Abstention from Voting" or "Tape Recorders, Use of," there are other topics contained in those documents—topics that could be useful to lawyers or citizens or politicians to understand and group together. An informed researcher pouring over the documents could come up with a dozen topics within each document. In this sense, we can think of documents as a recipe with topics as its ingredients:> **Advisory Opinion 8762 Recipe**> 1 cup describing the Audit Committee> 2 cups clarifying the Statute of Limitations> 1.5 tablespoons quoting §260-a of the Education Law> 1 teaspoon invoking *French v. Board of Education*But there really aren't that many experts on New York State Freedom of Information Law Advisory Committee Opinions. And unfortunately, none of them are available for hundreds of hours of document classification. Instead, we can computationally infer these topics by looking at the kinds of words used in an Opinion and then comparing those word choices to the words used in all of the Opinions. By doing this over and over again, and applying different statistics to the results, one can generate latent topics, see how similar two Opinions are to one another, or determine how likely a new Opinion is to fit into one of our topics.With enough data, we could determine how likely a given request is to get a favorable Opinion, or which court cases are most likely to help an appeal. But first, let's discover topics in the already available Opinions from the Committee. ------------If you want to follow along and didn't run the webscraping notebook to generate the data, you can get the data by running the following Data code cell: { [Table of Contents](Table-of-Contents) } Dataimport requests import pickle # Finding and defining your home directory home_dir = !echo $HOME home = home_dir[0] +'/' #downloading the pickled data and writing it to a file in your home directory url = 'http://cpanel.ischool.illinois.edu/~adryden3/FOILAdvisoryDecisiondataWithText.pickle' r = requests.get(url, allow_redirects=True) with open(home + 'FOILdata.pickle', 'wb') as f: f.write(r.content) #reading the data and unpickeling it into a variable with open(home + 'FOILdata.pickle', 'rb') as g: opinions = pickle.load(g) #remove the data_dict related to this data's provenance if 'data_dictionary' in opinions: opinions.pop('data_dictionary') #set up the notebook and required libraries % matplotlib inline import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings("ignore")----- { [Table of Contents](Table-of-Contents) } Prepare Data StructuresThe pickled file includes a clean-text version of the Advisory Opinions from the web scraping notebook. From these, we generate a list of Opinions and two indexes to help us interpret the results. The indexes represent the keywords or topics that Advisory Committee members applied to the Opinions. -----#create indexes to use in dataframes later human_topics = [] human_topic_number = [] #generate a list of the plain-text opinions #skip any opinions where the full text isn't available opinions_list = [] number = -1 for key in opinions: number+=1 human_topics.append(key) if len(opinions[key][3])==0: number-=1 human_topics.pop() else: for i in range(len(opinions[key][3])): opinions_list.append(opinions[key][3][i]) human_topic_number.append(number)----- { [Table of Contents](Table-of-Contents) } Generate TF:IDF The first machine learning process we are going to use looks at documents as a collection of words, and we serve it that collection as a list of terms, or matrix, and their frequency. To account for the fact that some words are more common than others, the frequency statistic for a term in a document is increased the more times it is mentioned in the document, and it is decreased the more times it is mentioned in the corpus. This way, rare but meaningful terms aren't drowned out by more common ones.Words that appear in more than 90% of the documents aren't considered at all, and common or functional words that don't independently encode a lot of information ('the', 'it', 'for') are also removed. -----# Use TD-IDF on opinions corpus from sklearn.feature_extraction.text import TfidfVectorizer cv = TfidfVectorizer(stop_words = 'english', lowercase=True, min_df=1, max_df=0.9, max_features=5000) opinion_vector = cv.fit_transform(opinions_list)----- { [Table of Contents](Table-of-Contents) } Non-Negative Matrix Factorization Matrix factorization is like numerical factorization (the factors of 12 are the numbers that can be multiplied to equal 12: 2x6, 3x4). This algorithm takes the TF-IDF term matrices that we produced and yeilds the matrix factors, significantly reducing their dimensions in the process. This factoring has an inherent clustering effect because it computationally reduces documents to component relationships among their terms. Tuning the Parameters Unsupervised learning techniques can do a remarkable amount of work, but they can't always do everything. Here, we must indicate how many topics to find in the corpus. This is one of the most important aspects of this type of data analysis and usually requires some degree of domain knowledge and some experimetation. Different domains will inherently have different topic ranges, but you don't want to ask for so many topics that each document is its own topic, nor do you want to get topics that are so generic that they don't reveal anything meaningful about documents and clusters. For this domain, I felt 60 topics was appropriate because the corpus is made up of very specific requests, and therefore has a lot of granular detail that can be explored. In addition, the tool this supports privileges returing relevant information with little noise. -----# Compute topics by using NMF from sklearn.decomposition import NMF #select number of topics num_opinion_topics = 60 #fit the model to the TFIDF vectors nmf_opinion = NMF(n_components = num_opinion_topics, max_iter = 1000).fit(opinion_vector) # Function to compute and display the top topic terms def get_topics(cv, model): # Number of terms per topic to display max_topics = 10 # Number of terms per topic to retain max_labels = 5 topics = [] feature_names = cv.get_feature_names() # Iterate through the matrix components for idx, topic in enumerate(model.components_): # First we sort the terms in descending order ([::-1]) # And then retiain only the top terms top_topics_idx = topic.argsort()[::-1][:max_topics] top_topics = [feature_names[jdx] for jdx in top_topics_idx] # Now extract out the terms themselves and display top_features = " ".join(top_topics) print('Topic {0:2d}: {1}'.format(idx, top_features)) topics.append(", ".join(top_topics[:max_labels])) return(topics){ [Table of Contents](Table-of-Contents) } Topics Topics are described by their representative terms. Here we have our 60 topics and their top 10 terms. Scanning through this list is a great way to understand how well the algorithm is performing and to gain insights into other processesing that can improve results. For example, numbers and number digit combinations likely represent references to laws or codes, which will inspire us use Named Entity Recognition tecniques to link documents which reference the same regulations or court cases, as these multi-word references would get jumbled in the Bag-of-Words approach used by our topic model. -----#print the topics and their top 10 terms nmf_opinion_topics = get_topics(cv, nmf_opinion)Topic 0: duly number quorum majority persons officers meeting duty total power Topic 1: notice posted meeting given media locations news time place emergency Topic 2: session particular person employment 105 corporation appointment history financial leading Topic 3: minutes clerk town verbatim accurate 30 consist account determine prepare Topic 4: task force 2d governmental consisting decisions 1989 function ad mayor Topic 5: political caucuses party caucus conferences exemption committees legislative 108 deliberations Topic 6: 518 474 dos coog website commerce 1927 plaza 12231 2518 Topic 7: county legislature maginn members subcommittee leader chairman hawkes supervisors quorum Topic 8: records agency freedom information access available record kept regulations produced Topic 9: client attorney privilege relationship legal confidential advice communications privileged session Topic 10: formal decision gathering intended official court gatherings legislature safeguard appeals[...]----- { [Table of Contents](Table-of-Contents) } Understanding Topic TermsLooking at a random selection of topics, we can quickly see that the algorithm has generated fairly coherent topics at a level of granularity appropriate for our purposes. >Topic 0: duly quorum number majority persons officers meeting duty total power>Topic 14: tape recording use devices recorders davidson unobtrusive detract deliberative court>Topic 51: clerk minutes town verbatim accurate 30 account determine consist prepareNow let's look at how the algorithm applied these topics to our documents and to the clusters tagged by humans. In order to do so, we need to normalize the distributions so that we can read them as percentages and preserve the data. We then combine and average the topic distributions for Opinions that had the same human applied topics. Quickly examining a few documents and their topics compared to the human-provided topic will help us understand if the application will be successful. To do so, let's look at a document that was classified by our model as including topic 14.-----from sklearn.preprocessing import normalize #transforma and normalize the data; #using the l1-norm the document topic probabilites sum to unity td_opinion = nmf_opinion.transform(opinion_vector) td_opinion_norm = normalize(td_opinion, norm='l1', axis=1) #generate a dataframe with the data and add a column with the index of the #human applied topic df_opinion = pd.DataFrame(td_opinion_norm, columns=nmf_opinion_topics).round(2) df_opinion.fillna(value=0, inplace=True) df_opinion['label'] = pd.Series(human_topic_number) #Now we group the human labeled opinions together and average their topic distributions df_opinion_labeled = df_opinion.groupby('label').mean().round(2) df_opinion_labeled['Human Label'] = pd.Series(human_topics, dtype='category') #summary of all of the records labeled Tape Recorder, Use of at Executive Session display_series = pd.Series(df_opinion_labeled.iloc[195]) print("Human Category: ", display_series[60]) print("\nNMF Distribution of Topics: ") #show the top 5 topics the model applied to the human topic display_series[:60].sort_values(ascending=False)[:5]Human Category: Tape Recorders, Use of at Executive Session NMF Distribution of Topics:--- { [Table of Contents](Table-of-Contents) } Evaluating the Topic DeterminationsSo, the topic model looked at the Opinions classified by humans as "Tape Recorders, Use of at Executive Session" and determined that their number one topic was "tape, recording, use, . . ". Which is about as close as we could ask for. However, the topic model also tells us that these Opinions are also about records, education and confidentiality. To get an idea of how accurate the model is, let's take a look at one of those documents and highlight some key terms.--- OML-AO-05384 December 27, 2013 The staff of the Committee on Open Government is authorized to issue advisory opinions. The ensuing staff advisory opinion is based solely upon the facts presented in your correspondence. Dear : This is in response to your request for an advisory opinion regarding application of the Open Meetings Law to executive sessions of the Oppenheim-Ephratah-St. Johnsville Central School District, and in particular, a rule that would prohibit a board member from tape recording discussions held in executive session. It is our opinion that a rule prohibiting one board member from recording discussions held in executive session without consent of the board would be reasonable. Initially, this will confirm that many believe recordings of executive session discussions are not desirable because copies of such records may be sought pursuant to FOIL, at which point the agency would have to determine whether the records were required to be made available, in whole or in part. As you correctly point out, it has long been the opinion of this office that notes taken and recordings made during executive sessions are “records” subject to the Freedom of Information Law, the contents of which would determine rights of access. Further, a tape recording of an executive session may be subject to subpoena or discovery in the context of litigation. Disclosure in that kind of situation may place a public body at a disadvantage should litigation arise relative to a topic that has been appropriately discussed behind closed doors. Although in our opinion they are not prohibited by statute, surreptitious recordings of executive sessions made by one school board member resulted in a decision from the Commissioner of Education essentially warning members that such behavior would result in removal from the board. See, Application of Nett and Raby (No. 15315, October 24, 2005). As indicated, you “began taping executive sessions to assure compliance with the laws governing executive session,” and it is your way of taking notes. In that regard, we are aware that on perhaps many occasions, discussions that are appropriate for executive session evolve into those that are not. This will confirm that there is no statute that deals directly with the taping of executive sessions. Several judicial decisions have dealt with the ability to use recording devices at open meetings, and although those decisions do not refer to the taping of executive sessions, they are likely pertinent to the matter. Perhaps the leading decision concerning the use of tape recorders at meetings, a unanimous decision of the Appellate Division, involved the invalidation of a resolution adopted by a board of education prohibiting the use of tape recorders at its meetings ( Mitchell v. Board of Education of Garden City School District , 113 AD 2d 924 [1985]). In so holding, the Court stated that: “While Education Law sec. 1709(1) authorizes a board of education to adopt by-laws and rules for its government and operations, this authority is not unbridled. Irrational and unreasonable rules will not be sanctioned. Moreover, Public Officers Law sec. 107(1) specifically provides that 'the court shall have the power, in its discretion, upon good cause shown, to declare any action *** taken in violation of [the Open Meetings Law], void in whole or in part.' Because we find that a prohibition against the use of unobtrusive recording goal of a fully informed citizenry, we accordingly affirm the judgement annulling the resolution of the respondent board of education” (id. at 925). Authority to tape record meetings in accordance with the above decision is now set forth in the Open Meetings Law; however, §103(d)(i) pertains only to those meetings that are open to the public. While there are no decisions that deal with the use of tape recorders during executive sessions, we believe that the principle in determining that issue is the same as that stated above, i.e., that a board may establish reasonable rules governing the use of tape recorders at executive sessions. Unlike an open meeting, when comments are conveyed with the public present, an executive session is generally held in order that the public cannot be aware of the details of the deliberative process. When an issue focuses upon a particular individual, the rationale for permitting the holding of an executive session generally involves an intent to protect personal privacy, coupled with an intent to enable the members of a public body to express their opinions freely. As previously mentioned, tape recording executive sessions may result in unforeseen and potentially damaging consequences. In short, we are suggesting that tape recording an executive session could potentially defeat the purpose of holding an executive session. Accordingly, it is our opinion that it would be reasonable for a board of education, based on its authority to adopt rules to govern its own proceedings conferred by §1709 of the Education Law, to prohibit a member from using a tape recorder at an executive session absent the consent of a majority of the board. Should you have further questions, please contact me directly. Sincerely, Assistant Director CSJ:mm c: Board of Education document.write("OML-AO-"+DocName); --- { [Table of Contents](Table-of-Contents) }It is easy to see at a glance that "education" and "records" are emportant contexts for this Opinion. And the algorithm accuratly identifies that "confidentiality" despite few vocabulary words directly about confidentiality. So the algorithm seems to have reproduced the essential qualities of the human determined topic while describing and quantifying other topics as well. But the model is still a little noisy. We only took the top 5 topics, but there were more, and those could weaken other signals and increase processing resources down the road, so we will clean them out later in the notebook. --- --- { [Table of Contents](Table-of-Contents) } Visualizing Topics We can use a heatmap to visually explore the whole topic space and look for tendencies. There are a total of 507 human labels, and for each one the model developed a topic distribution. In this heatmap, the dark spots indicate where the model found predominantly one single topic for a human label. Had the model and the humans come up with the exact same results, there would be 507 dark purple dots. Inspecting this can give us a rough idea of how diffuse the topics are. ---import seaborn as sns df = df_opinion.drop('label', axis=1) fig, ax = plt.subplots(1, 1, figsize = (20, 15)) hm = sns.heatmap(df_opinion_labeled.drop('Human Label', axis=1).transpose(), xticklabels=5) hm.axes.set_title('Topics per Human Label', fontsize=50) hm.axes.set_xlabel('Human Label Number', fontsize=40) hm.axes.set_ylabel('NMF Topics', fontsize=40) hm.axes.set_yticklabels(range(60)) sns.set(font_scale=1)---So far it looks like we have a good deal of agreement while still generating more nuance than a single lable. Because the data was loaded in alphebetical order by human topic, we should be able to confirm this tendency by looking at the topic distributions of each individual document in a heatmap. ---df = df_opinion.drop('label', axis=1) fig, ax = plt.subplots(1, 1, figsize = (20, 15)) hm = sns.heatmap(df, yticklabels=10) hm.axes.set_title('Corpus Topic Map', fontsize=50) hm.axes.set_xlabel('Topic Words', fontsize=40) hm.axes.set_ylabel('Document Number', fontsize=40) sns.set(font_scale=1){ [Table of Contents](Table-of-Contents) } Reducing Noise---Topics that account for less than 7% of a document don't contribute very much to our understanding, and they may reduce the signal form more important topics, so we will eliminate any values below .07. Our applications can take advantage of algorithms that optimally handle sparse data, so this cleaning will also imporove performance.We can also use it as an opportunity to see measure the health of the model. If any documents lost more than 50% of their signal from this cleaning, it means that the model wasn't able to find a good latent topic for the Opinion. That could just be an anomalous Opinion, but if we see it happening a lot then it means we need to reevaluate the model.---#these are all of the opinions and their distribution over the latent topics df_graph = pd.DataFrame(td_opinion_norm).round(2) df_graph.fillna(value=0, inplace=True) #to eliminate noise from the data, we drop anything less than .07 from the topic distribution df_graph.mask(df_graph<.06, 0, inplace=True) #to make sure we didn't have a lot of data that was primarily made up these noisy fragments #we sum each opinion's remaining values and filter out any that are below 60 sum_series = df_graph.transpose().sum() sum_series.where(sum_series<.6).dropna()---Only 1 was below the threshold we set, and only 6 were close to it. At this point we can inspect those documents, but no matter what we should save these results in the data cleaning log before we renormalize the values. After we apply l1 normalization, topics per Opinion will again sum to 1 with the noise eliminated. Now we can evaluate the concentration of topics by sorting the Opinions by topic, starting with the topics with the highest cumulative score in the corpus. To the degree that the topics are concentrated, this will cause dark clumps to appear in the heatmap.---df_graph = pd.DataFrame(normalize(df_graph, norm='l1', axis=1).round(2)) fig, ax = plt.subplots(1, 1, figsize = (20, 15)) hm = sns.heatmap(df_graph.sort_values([i for i in df_graph.sum().sort_values(ascending=False).index], ascending=False), xticklabels=2, yticklabels=10) hm.axes.set_title('Opinions Ordered by Corpus Topic Weight', fontsize=40) hm.axes.set_xlabel('Topic Number', fontsize=40) hm.axes.set_ylabel('Document Number', fontsize=40) sns.set(font_scale=1)---We can further inspect a particular topic by sorting on that topic. This can help us see connections among differnt topics, and give us ideas about how to detect them automatically.---fig, ax = plt.subplots(1, 1, figsize = (20, 15)) hm = sns.heatmap(df_graph.sort_values([10], ascending=False)[:75], xticklabels=2) hm.axes.set_title('Top 75 Opinions for Topic 10', fontsize=30) hm.axes.set_xlabel('Topic Number', fontsize=20) hm.axes.set_ylabel('Document Number', fontsize=20) sns.set(font_scale=1)--- { [Table of Contents](Table-of-Contents) } Topic and Document FingerprintsWe can now use these topic and document distributions to represent the documents and topics within a database. This allows us to conduct powerful searches or document comparsions quickly using vector arithmetic. ---fig, (ax1, ax2, ax3) = plt.subplots(3,1, figsize = (20, 9)) cbar_ax = fig.add_axes([.91,.1,.015,.75]) plt.subplots_adjust(hspace = 2) hm1 = sns.heatmap(df_graph.transpose()[:1], xticklabels=10, ax=ax1, cbar_ax=cbar_ax) hm1.axes.set_title('Topic Fingerprint (Distribution in Corpus)', fontsize=20) hm1.axes.set_xlabel('Documents', fontsize=20) hm1.axes.set_ylabel('Topic 0', fontsize=20) sns.set(font_scale=1) hm2 = sns.heatmap(df_graph.iloc[[50]], xticklabels=2, ax=ax2,cbar_ax=cbar_ax) hm2.axes.set_title('Document Fingerprint (Topic Distribution)', fontsize=20) hm2.axes.set_xlabel('Topics', fontsize=20) hm2.axes.set_ylabel('Document 50', fontsize=20) sns.set(font_scale=1) hm3 = sns.heatmap(df_graph.iloc[[50,930]], xticklabels=2, ax=ax3,cbar_ax=cbar_ax) hm3.axes.set_title('Compare Documents (Topic Distributions)', fontsize=20) hm3.axes.set_xlabel('Topics', fontsize=20) hm3.axes.set_ylabel('Document Number', fontsize=20) sns.set(font_scale=1)Find Documents with Matching Topic FingerprintsWe can compare fingerprints using a technique called Cosine Similarity. Essentially, we measure the distance between two topic distributions as if they were lines passing trough the topic space. This function takes an Opinion and finds the Opinion in the corpus with the most similar topic distribution and displays them both.from numpy import dot from numpy.linalg import norm def find_similar(df, index): top_score = 0 match = 0 a = df.iloc[index] for i in range(len(df)): if i == index: pass else: b = df.iloc[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) if cos_sim > top_score: match = i top_score = cos_sim fig, (ax1,ax2) = plt.subplots(2,1, figsize = (20,6)) cbar_ax = fig.add_axes([.91,.1,.015,.75]) plt.subplots_adjust(hspace = .1) hm1 = sns.heatmap(df.iloc[[index]], xticklabels = False, ax=ax1, cbar_ax = cbar_ax) hm1.axes.set_title('Target + Match', fontsize=25) hm1.axes.set_ylabel('Target Opinion', fontsize=15) hm2 = sns.heatmap(df.iloc[[match]], xticklabels = 2, ax=ax2, cbar_ax = cbar_ax) hm2.axes.set_xlabel('Topic Number', fontsize=20) hm2.axes.set_ylabel('Match Opinion', fontsize=15) find_similar(df_graph, 330)News Headlines SentimentUse the news api to pull the latest news articles for bitcoin and ethereum and create a DataFrame of sentiment scores for each coin. Use descriptive statistics to answer the following questions:1. Which coin had the highest mean positive score?2. Which coin had the highest negative score?3. Which coin had the highest positive score?# Initial imports import os import pandas as pd from dotenv import load_dotenv from nltk.sentiment.vader import SentimentIntensityAnalyzer analyzer = SentimentIntensityAnalyzer() %matplotlib inline load_dotenv() # Read your api key environment variable api_key = os.getenv("NEWS_API_KEY") # Create a newsapi client from newsapi import NewsApiClient newsapi = NewsApiClient(api_key=api_key) # Fetch the Bitcoin news articles btc_news_en = newsapi.get_everything( q="bitcoin", language="en" ) # Fetch the Ethereum news articles eth_news_en = newsapi.get_everything( q="ethereum AND ether", language="en" ) # Create the Bitcoin sentiment scores DataFrame btc_sentiments = [] for article in btc_news_en["articles"]: try: text = article["content"] date = article["publishedAt"][:10] sentiment = analyzer.polarity_scores(text) compound = sentiment["compound"] pos = sentiment["pos"] neu = sentiment["neu"] neg = sentiment["neg"] btc_sentiments.append({ "text": text, "compound": compound, "positive": pos, "negative": neg, "neutral": neu }) except AttributeError: pass # Create DataFrame btc_df = pd.DataFrame(btc_sentiments) btc_df.head() # Create the ethereum sentiment scores DataFrame eth_sentiments = [] for article in eth_news_en["articles"]: try: text = article["content"] date = article["publishedAt"][:10] sentiment = analyzer.polarity_scores(text) compound = sentiment["compound"] pos = sentiment["pos"] neu = sentiment["neu"] neg = sentiment["neg"] eth_sentiments.append({ "text": text, "compound": compound, "positive": pos, "negative": neg, "neutral": neu }) except AttributeError: pass # Create DataFrame eth_df = pd.DataFrame(eth_sentiments) eth_df.head() # Describe the Bitcoin Sentiment btc_df.describe() # Describe the Ethereum Sentiment eth_df.describe()Questions:Q: Which coin had the highest mean positive score?A: Q: Which coin had the highest compound score?A: Q. Which coin had the highest positive score?A: --- TokenizerIn this section, you will use NLTK and Python to tokenize the text for each coin. Be sure to:1. Lowercase each word2. Remove Punctuation3. Remove Stopwordsfrom nltk.tokenize import word_tokenize, sent_tokenize from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer, PorterStemmer from string import punctuation import re import nltk lemmatizer = WordNetLemmatizer() # Expand the default stopwords list if necessary sentence_tokenized = [sent_tokenize(i) for i in btc_df["text"]] print(sentence_tokenized) # Complete the tokenizer function def tokenizer(text): """Tokenizes text.""" sw = set(stopwords.words('english')) regex = re.compile("[^a-zA-Z ]") #regex = re.compile("btc_df["text"]") re_clean = regex.sub('', text) words = word_tokenize(re_clean) lem = [lemmatizer.lemmatize(word) for word in words] output = [word.lower() for word in lem if word.lower() not in sw] # Create a list of the words # Convert the words to lowercase # Remove the punctuation # Remove the stop words # Lemmatize Words into root words return output # Create a new tokens column for bitcoin btc_df["btc_tokenized"] = btc_df["text"].apply(tokenizer) btc_df # Create a new tokens column for ethereum eth_df["eth_tokenized"] = eth_df["text"].apply(tokenizer) eth_df--- NGrams and Frequency AnalysisIn this section you will look at the ngrams and word frequency for each coin. 1. Use NLTK to produce the n-grams for N = 2. 2. List the top 10 words for each coin.from collections import Counter from nltk import ngrams # Generate the Bitcoin N-grams where N=2 #def bigram_counter(btc_tokenized): # Combine all articles in corpus into one large string big_string_btc = ''.join(btc_df.text) #processed = process_text(big_string) bigrams = ngrams(big_string_btc.split(), n=2) top_10 = dict(Counter(bigrams).most_common(10)) pd.DataFrame(list(top_10.items()), columns=['bigram', 'count']) # Generate the Ethereum N-grams where N=2 #def bigram_counter(btc_tokenized): # Combine all articles in corpus into one large string big_string_eth = ''.join(eth_df.text) #processed = process_text(big_string) bigrams = ngrams(big_string_eth.split(), n=2) top_10 = dict(Counter(bigrams).most_common(10)) pd.DataFrame(list(top_10.items()), columns=['bigram', 'count']) # Use the token_count function to generate the top 10 words from each coin def token_count(tokens, N=10): """Returns the top N tokens from the frequency count""" return Counter(tokens).most_common(N) # Get the top 10 words for Bitcoin token_count(tokenizer(big_string_btc)) # Get the top 10 words for Ethereum token_count(tokenizer(big_string_eth))Word CloudsIn this section, you will generate word clouds for each coin to summarize the news for each coinfrom wordcloud import WordCloud import matplotlib.pyplot as plt plt.style.use('seaborn-whitegrid') import matplotlib as mpl mpl.rcParams['figure.figsize'] = [20.0, 10.0] # Generate the Bitcoin word cloud wc = WordCloud().generate(" ".join(tokenizer(big_string_btc))) plt.imshow(wc) # Generate the Ethereum word cloud wc = WordCloud().generate(" ".join(tokenizer(big_string_eth))) plt.imshow(wc)Named Entity RecognitionIn this section, you will build a named entity recognition model for both coins and visualize the tags using SpaCy.import spacy from spacy import displacy # Optional - download a language model for SpaCy # !python -m spacy download en_core_web_sm # Load the spaCy model nlp = spacy.load('en_core_web_sm')Bitcoin NER# Concatenate all of the bitcoin text together print(big_string_btc) # Run the NER processor on all of the text doc = nlp(big_string_btc) doc.user_data["title"]= "BTC NER" displacy.render(doc, style='ent') # Add a title to the document #doc.user_data["title"]= "BTC NER" # Render the visualization # YOUR CODE HERE! # List all Entities for ent in doc.ents: print(ent.text, ent.label_)Microsoft ORG Ukraine GPE nine years DATE more than $10 million MONEY Microsoft ORG 2016 to 2018 DATE US GPE first ORDINAL US GPE October DATE days ago DATE about $1 billion MONEY bitcoin GPE the Silk Road FAC 2013 DATE one CARDINAL November 2017 DATE two-month DATE Bitcoin GPE 10,000 MONEY first ORDINAL the next 20 days DATE 19,665 MONEY the past few weeks DATE bitcoin GPE digital ORG hype GPE chars]2018 ORG 2020 DATE Tesla Tequila ORG April DATE Elon Musk ORG Teslaquila WORK_OF_ART U.S. GPE Individual X WORK_OF_ART chars]4 CARDINAL U.S. GPE November DATE today DATE 500 CARDINAL 10.8 percent PERCENT November DATE monthly DATE April DATE fourth ORDINAL Williams PERSON PERSON PERSON Eddard PERSON acolyte PERSON the Faceless Men WORK_OF_ART Monday DATE $1 billion MONEY Silk Road ORG Ross Ulbricht PERSON this week DATE chars]1 CARDINAL 1 CARDINAL Microsoft ORG Ukraine GPE nine years DATE more than $10 million MONEY Microsoft ORG chars]8 CARDINAL 7 CARDINAL Bitcoin [...]--- Ethereum NER# Concatenate all of the bitcoin text together print(big_string_eth) # Run the NER processor on all of the text doc = nlp(big_string_eth) doc.user_data["title"]= "ETH NER" displacy.render(doc, style='ent') # Add a title to the document # YOUR CODE HERE! # Render the visualization # YOUR CODE HERE! # List all Entities for ent in doc.ents: print(ent.text, ent.label_)2020 DATE U.S. GPE Wednesday DATE morning TIME Elon Musk WORK_OF_ART ETH ORG Ethereum 2.0s ORG Wednesday DATE U.S. GPE Two CARDINAL Nearly $1 billion MONEY BTC ORG recent weeks DATE 40% PERCENT the last month DATE 500 MONEY last week DATE first ORDINAL June DATE bitcoin GPE PERSON first ORDINAL ul>
  • Analyzing the UncertaintyForest Class by Reproducing Conditional Entropy EstimatesThis set of four tutorials (`uncertaintyforest_running_example.ipynb`, `uncertaintyforest_posteriorestimates.ipynb`, `uncertaintyforest_conditionalentropyestimates.ipynb`, and `uncertaintyforest_mutualinformationestimates.ipynb`) will explain the UncertaintyForest class. After following these tutorials, you should have the ability to run UncertaintyForest on your own machine and generate Figures 1, 2, and 3 from [this paper](https://arxiv.org/pdf/1907.00325.pdf), which help you to visualize a comparison of the estimated posteriors and conditional entropy values for several different algorithms. If you haven't seen it already, take a look at other tutorials to setup and install the ProgLearn package: `installation_guide.ipynb`.*Goal: Run the UncertaintyForest class to produce a figure that compares estimated conditional entropy values for the UncertaintyForest, CART, and IRF algorithms, as in Figure 2 from [this paper](https://arxiv.org/pdf/1907.00325.pdf)* Import Required Packagesimport numpy as np from sklearn.ensemble import RandomForestClassifier from sklearn.calibration import CalibratedClassifierCV from proglearn.forest import UncertaintyForest from functions.unc_forest_tutorials_functions import plot_fig2Specify Parameters# The following are two sets of parameters. # The first are those that were actually used to produce figure 2. # These take a long time to actually run since there are up to 6000 data points. # Below those, you'll find some scaled-down parameters so that you can see the results more quickly. # Here are the paper reproduction parameters # mus = [i * 0.5 for i in range(1, 11)] # effect_size = 1 # d1 = 1 # d2 = 20 # n1 = 3000 # n2 = 6000 # num_trials = 20 # num_plotted_trials = 10 # sample_sizes_d1 = range(300, 1201, 90) # sample_sizes_d2 = range(500, 3001, 250) # Here are the scaled-down tutorial parameters mus = [ i * 0.5 for i in range(1, 3) ] # range of means of the data (x-axis in right column) effect_size = 1 # mu for left column d1 = 1 # data dimensions = 1 d2 = 3 # data dimensions = 1, noise dimensions = 19 n1 = 100 # number of data points for top row, right column (d1) n2 = 110 # number of data points for bottom row, right column (d2) num_trials = 2 # number of trials to run num_plotted_trials = 2 # the number of "fainter" lines to be displayed on the figure sample_sizes_d1 = range( 100, 120, 10 ) # range of data points for top row, left column (d1) sample_sizes_d2 = range( 100, 130, 10 ) # range of data points for bottom row, left column (d2)Specify LearnersNow, we'll specify which learners we'll compare (by label). Figure 2 uses three different learners, which are further specified in the function `estimate_ce`, which returns estimates of conditional entropy for a given dataset (X, y) and type of learner.# Algorithms used to produce Figure 2 algos = [ { "label": "CART", "title": "CART Forest", "color": "#1b9e77", }, { "label": "IRF", "title": "Isotonic Reg. Forest", "color": "#fdae61", }, { "label": "UF", "title": "Uncertainty Forest", "color": "#F41711", }, ] parallel = FalsePlot Figure 2Finally, we'll run the code to obtain and plot the estimated conditional entropy vs. means and sample sizes (4 subplots).plot_fig2( num_plotted_trials, d1, d2, n1, n2, effect_size, algos, num_trials, sample_sizes_d1, sample_sizes_d2, mus, parallel=parallel, )Convolutional Neural Network ExampleBuild a convolutional neural network with TensorFlow v2.This example is using a low-level approach to better understand all mechanics behind building convolutional neural networks and the training process.- Author: - Project: https://github.com/aymericdamien/TensorFlow-Examples/ CNN Overview![CNN](http://personal.ie.cuhk.edu.hk/~ccloy/project_target_code/images/fig3.png) MNIST Dataset OverviewThis example is using MNIST handwritten digits. The dataset contains 60,000 examples for training and 10,000 examples for testing. The digits have been size-normalized and centered in a fixed-size image (28x28 pixels) with values from 0 to 255. In this example, each image will be converted to float32 and normalized to [0, 1].![MNIST Dataset](http://neuralnetworksanddeeplearning.com/images/mnist_100_digits.png)More info: http://yann.lecun.com/exdb/mnist/from __future__ import absolute_import, division, print_function import tensorflow as tf from tensorflow.keras import Model, layers import numpy as np # MNIST dataset parameters. num_classes = 10 # total classes (0-9 digits). # Training parameters. learning_rate = 0.001 training_steps = 200 batch_size = 128 display_step = 10 # Network parameters. conv1_filters = 32 # number of filters for 1st conv layer. conv2_filters = 64 # number of filters for 2nd conv layer. fc1_units = 1024 # number of neurons for 1st fully-connected layer. # Prepare MNIST data. from tensorflow.keras.datasets import mnist (x_train, y_train), (x_test, y_test) = mnist.load_data() # Convert to float32. x_train, x_test = np.array(x_train, np.float32), np.array(x_test, np.float32) # Normalize images value from [0, 255] to [0, 1]. x_train, x_test = x_train / 255., x_test / 255. # Use tf.data API to shuffle and batch data. train_data = tf.data.Dataset.from_tensor_slices((x_train, y_train)) train_data = train_data.repeat().shuffle(5000).batch(batch_size).prefetch(1) # Create TF Model. class ConvNet(Model): # Set layers. def __init__(self): super(ConvNet, self).__init__() # Convolution Layer with 32 filters and a kernel size of 5. self.conv1 = layers.Conv2D(32, kernel_size=5, activation=tf.nn.relu) # Max Pooling (down-sampling) with kernel size of 2 and strides of 2. self.maxpool1 = layers.MaxPool2D(2, strides=2) # Convolution Layer with 64 filters and a kernel size of 3. self.conv2 = layers.Conv2D(64, kernel_size=3, activation=tf.nn.relu) # Max Pooling (down-sampling) with kernel size of 2 and strides of 2. self.maxpool2 = layers.MaxPool2D(2, strides=2) # Flatten the data to a 1-D vector for the fully connected layer. self.flatten = layers.Flatten() # Fully connected layer. self.fc1 = layers.Dense(1024) # Apply Dropout (if is_training is False, dropout is not applied). self.dropout = layers.Dropout(rate=0.5) # Output layer, class prediction. self.out = layers.Dense(num_classes) # Set forward pass. def call(self, x, is_training=False): x = tf.reshape(x, [-1, 28, 28, 1]) x = self.conv1(x) x = self.maxpool1(x) x = self.conv2(x) x = self.maxpool2(x) x = self.flatten(x) x = self.fc1(x) x = self.dropout(x, training=is_training) x = self.out(x) if not is_training: # tf cross entropy expect logits without softmax, so only # apply softmax when not training. x = tf.nn.softmax(x) return x # Build neural network model. conv_net = ConvNet() # Cross-Entropy Loss. # Note that this will apply 'softmax' to the logits. def cross_entropy_loss(x, y): # Convert labels to int 64 for tf cross-entropy function. y = tf.cast(y, tf.int64) # Apply softmax to logits and compute cross-entropy. loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=x) # Average loss across the batch. return tf.reduce_mean(loss) # Accuracy metric. def accuracy(y_pred, y_true): # Predicted class is the index of highest score in prediction vector (i.e. argmax). correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64)) return tf.reduce_mean(tf.cast(correct_prediction, tf.float32), axis=-1) # Stochastic gradient descent optimizer. optimizer = tf.optimizers.Adam(learning_rate) # Optimization process. def run_optimization(x, y): # Wrap computation inside a GradientTape for automatic differentiation. with tf.GradientTape() as g: # Forward pass. pred = conv_net(x, is_training=True) # Compute loss. loss = cross_entropy_loss(pred, y) # Variables to update, i.e. trainable variables. trainable_variables = conv_net.trainable_variables # Compute gradients. gradients = g.gradient(loss, trainable_variables) # Update W and b following gradients. optimizer.apply_gradients(zip(gradients, trainable_variables)) # Run training for the given number of steps. for step, (batch_x, batch_y) in enumerate(train_data.take(training_steps), 1): # Run the optimization to update W and b values. run_optimization(batch_x, batch_y) if step % display_step == 0: pred = conv_net(batch_x) loss = cross_entropy_loss(pred, batch_y) acc = accuracy(pred, batch_y) print("step: %i, loss: %f, accuracy: %f" % (step, loss, acc)) # Test model on validation set. pred = conv_net(x_test) print("Test Accuracy: %f" % accuracy(pred, y_test)) # Visualize predictions. import matplotlib.pyplot as plt # Predict 5 images from validation set. n_images = 5 test_images = x_test[:n_images] predictions = conv_net(test_images) # Display image and model prediction. for i in range(n_images): plt.imshow(np.reshape(test_images[i], [28, 28]), cmap='gray') plt.show() print("Model prediction: %i" % np.argmax(predictions.numpy()[i]))a.0) **Wygeneruj 10 000 liczb z rozkładu jednostajnego na odcinku [-1, 1]. Sporządź ich histogram z 100 przedziałami. Porównaj histogram z wykresem funkcji gęstości.**def TaskA(N): l, r = -1, 1 samples = np.random.uniform(l, r, N) count, bins, ignored = plt.hist(samples, bins=100, density=True) plt.plot(bins, np.ones_like(bins) / 2, linewidth=2, color='r') plt.show() TaskA(10000)a.1) **Powtórz obliczenia dla 100 000 liczb**TaskA(100000)b.0) **Wygeneruj 10 000 liczb z rozkładu normalnego o średniej 5 i odchyleniu standardowym 3. Sporządź ich histogram z 100 przedziałami. Porównaj histogram z wykresem funkcji gęstości.**def TaskB(N): mu, sigma = 5, 3 samples = np.random.normal(mu, sigma, N) count, bins, ignored = plt.hist(samples, bins=100, density=True) normal_d = multivariate_normal.pdf(bins, mean=mu, cov=sigma*sigma) plt.plot(bins, normal_d, linewidth=2, color='r') plt.show() TaskB(10000)b.1)**Powtórz obliczenia dla 100 000 liczb.**TaskB(100000)c.0)**Wygeneruj 10 000 punktów (x, y), których współrzędna x ma rozkład normalny N(2, 5), zaś współrzędna y ma rozkład normalny N(3, 1). Sporządź wykres tych punktów. Porównaj go z wykresem funkcji gęstości.**def TaskC(N): sigma_x, sigma_y = 5, 1 mean = (2, 3) cov = np.array([[sigma_x ** 2, 0], [0, sigma_y ** 2]]) bins_no = 100 X, Y = np.random.multivariate_normal(mean, cov, N).T # 2d histogram from random plt.title('2d histogram random draw') count, xedges, yedges, img_mesh = plt.hist2d(X, Y, bins=(bins_no, bins_no), density=True, norm=mcolors.PowerNorm(0.7), cmap=cm.viridis) xedges, yedges = xedges[:-1], yedges[:-1] xedges, yedges = np.meshgrid(xedges, yedges) F = scipy.stats.multivariate_normal(mean, cov) pos = np.empty(xedges.shape + (2,)) pos[:, :, 0] = xedges pos[:, :, 1] = yedges plt.contour(xedges, yedges, F.pdf(pos), cmap='hot') # 3d from random draw fig = plt.figure(figsize=(7, 6)) fig.suptitle('3d from random draw') ax = fig.gca(projection='3d') ax.plot_surface(xedges, yedges, count, rstride=3, cstride=3, linewidth=1, antialiased=True, cmap=cm.viridis) cset = ax.contourf(xedges, yedges, count, zdir='z', offset=-0.15, cmap=cm.viridis) ax.set_zlim(-0.15,0.2) ax.set_zticks(np.linspace(0,0.2,5)) ax.view_init(27, -21) plt.show() # 2d from PDF Z = F.pdf(pos) plt.imshow(Z, interpolation='none') plt.title('2d from PDF') # 3d from PDF fig = plt.figure(figsize=(7, 6)) fig.suptitle('3d from PDF') ax = fig.gca(projection='3d') ax.plot_surface(xedges, yedges, Z, rstride=3, cstride=3, linewidth=1, antialiased=True, cmap=cm.viridis) cset = ax.contourf(xedges, yedges, Z, zdir='z', offset=-0.15, cmap=cm.viridis) ax.set_zlim(-0.15,0.2) ax.set_zticks(np.linspace(0,0.2,5)) ax.view_init(27, -21) plt.show() TaskC(10000)c.1)**Powtórz obliczenia dla 100 000 punktów.**TaskC(100000)d) **Używając danych wygenerowanych w poprzednim punkcie oszacuj prawdopodobieństwo, że X < Y dla zmiennych losowych X z rozkładem normalnym N(2, 5) i Y z rozkładem normalnym N(3, 1). Uzyskaną wartość porównaj z dokładnym prawdopodobieństwem takiego zdarzenia obliczonym w oparciu o rachunek prawdopodobieństwa i statystykę.**sigma_x, sigma_y = 5, 1 mean = (2, 3) cov = np.array([[sigma_x ** 2, 0], [0, sigma_y ** 2]]) def getXY(N): X, Y = np.random.multivariate_normal(mean, cov, N).T return (X, Y) N = 10000 X, Y = getXY(N) print(f'For N = {N1} P(X < Y) equals {np.sum(X < Y) / N * 100}%') N = 100000 X, Y = getXY(N) print(f'For N = {N} P(X < Y) equals {np.sum(X < Y) / N * 100}%') ''' P(X < Y) = P(X - Y < 0) Z = X - Y Z ~ N(mu_x - mu_y, sigma_x - sigma_y) = N(-1, sqrt(siqma_x ^ 2 + sigma_y ^2)) ''' print(f'Score by hand: {scipy.stats.norm.cdf(0, -1, math.sqrt(sigma_x ** 2 + sigma_y ** 2)) * 100}%')For N = 10000 P(X < Y) equals 56.730000000000004% For N = 100000 P(X < Y) equals 57.69800000000001% Score by hand: 57.774036626352995%Feature EngineeringSUFFIX_CAT = '__cat' for feature in df.columns: if isinstance(df[feature][0], list): continue factorized_values = df[feature].factorize()[0] if SUFFIX_CAT in feature: df[feature] = factorized_values else: df[feature + SUFFIX_CAT] = factorized_values cat_features = [x for x in df.columns if SUFFIX_CAT in x] cat_features = [x for x in cat_features if 'price' not in x] len(cat_features) def run_model(model, features): X = df[features].values y = df['price_value'].values scores = cross_val_score(model, X, y, cv=3, scoring='neg_mean_absolute_error') return np.mean(scores), np.std(scores)Decision Treemodel = DecisionTreeRegressor(max_depth=5) run_model(model, cat_features)Random Forestmodel = RandomForestRegressor(max_depth=5, n_estimators=50, random_state=0) run_model(model, cat_features)XGBoostxgb_params = { 'max_depth': 5, 'n_estimators': 50, 'learning_rate': 0.1, 'seed': 0 } model = xgb.XGBRegressor(**xgb_params) run_model(model, cat_features) m = xgb.XGBRegressor(**xgb_params) m.fit(X, y) imp = PermutationImportance(m, random_state=0).fit(X, y) eli5.show_weights(imp, feature_names=cat_features) features = [ 'param_napęd__cat', 'param_rok-produkcji__cat', 'param_stan__cat', 'param_skrzynia-biegów__cat', 'param_faktura-vat__cat', 'param_moc__cat', 'param_marka-pojazdu__cat', 'feature_kamera-cofania__cat', 'param_typ__cat', 'param_pojemność-skokowa__cat', 'seller_name__cat', 'feature_wspomaganie-kierownicy__cat', 'param_model-pojazdu__cat', 'param_wersja__cat', 'param_kod-silnika__cat', 'feature_system-start-stop__cat', 'feature_asystent-pasa-ruchu__cat', 'feature_czujniki-parkowania-przednie__cat', 'feature_łopatki-zmiany-biegów__cat', 'feature_regulowane-zawieszenie__cat'] run_model(model, features) df['param_rok-produkcji'].unique() df['param_moc'].unique() df['param_pojemność-skokowa'].unique() df['param_rok-produkcji'] = df['param_rok-produkcji'].map(lambda x: -1 if str(x) == 'None' else int(x)) df['param_moc'] = df['param_moc'].map(lambda x: -1 if str(x) == 'None' else int(str(x).split(' ')[0])) df['param_pojemność-skokowa'] = df['param_pojemność-skokowa'].map(lambda x: -1 if str(x) == 'None' else int(str(x).split('cm')[0].replace(' ', ''))) num_features = ['param_rok-produkcji', 'param_moc', 'param_pojemność-skokowa'] for feature in num_features: features[features.index(feature + '__cat')] = feature run_model(model, features)Accessing Text Corpora and Lexical Resources Accessing Text Corpora Gutenberg Corpus Checking the files available in the Project Gutenberg corpus for nltk:from nltk.corpus import gutenberg gutenberg.fileids()Some information about each text in the corpus: average word length, average sentence length, and the average frequency of a token in the text.Note: `.raw()` outputs the contents of a given file without any processing. Thus, for example, `len(gutenberg.raw('austen-emma'.txt)` will output the count of characters in the text, including whitespaces. `.sent()` divides the text into sentences.for fileid in gutenberg.fileids(): num_chars = len(gutenberg.raw(fileid)) num_words = len(gutenberg.words(fileid)) num_sents = len(gutenberg.sents(fileid)) num_vocab = len(set(w.lower() for w in gutenberg.words(fileid))) print(round(num_chars/num_words), round(num_words/num_sents), round(num_words/num_vocab), fileid)5 25 26 austen-emma.txt 5 26 17 austen-persuasion.txt 5 28 22 austen-sense.txt 4 34 79 bible-kjv.txt 5 19 5 blake-poems.txt 4 19 14 bryant-stories.txt 4 18 12 burgess-busterbrown.txt 4 20 13 carroll-alice.txt 5 20 12 chesterton-ball.txt 5 23 11 chesterton-brown.txt 5 18 11 chesterton-thursday.txt 4 21 25 edgeworth-parents.txt 5 26 15 melville-moby_dick.txt 5 52 11 milton-paradise.txt 4 12 9 shakespeare-caesar.txt 4 12 8 shakespeare-hamlet.txt 4 12 7 shakespeare-macbeth.txt 5 36 12 whitman-leaves.txtLet's try to find the longest sentence in Macbeth.macbeth_sentences = gutenberg.sents('shakespeare-macbeth.txt') macbeth_sentences longest_len = max(len(s) for s in macbeth_sentences) print("Length of longest sentence: ", longest_len) print(([s for s in macbeth_sentences if len(s) == longest_len]))[['Doubtfull', 'it', 'stood', ',', 'As', 'two', 'spent', 'Swimmers', ',', 'that', 'doe', 'cling', 'together', ',', 'And', 'choake', 'their', 'Art', ':', 'The', 'mercilesse', 'Macdonwald', '(', 'Worthie', 'to', 'be', 'a', 'Rebell', ',', 'for', 'to', 'that', 'The', 'multiplying', 'Villanies', 'of', 'Nature', 'Doe', 'swarme', 'vpon', 'him', ')', 'from', 'the', 'Westerne', 'Isles', 'Of', 'Kernes', 'and', 'Gallowgrosses', 'is', 'supply', "'", 'd', ',', 'And', 'Fortune', 'on', 'his', 'damned', 'Quarry', 'smiling', ',', 'Shew', "'", 'd', 'like', 'a', 'Rebells', 'Whore', ':', 'but', 'all', "'", 's', 'too', 'weake', ':', 'For', 'braue', 'Macbeth', '(', 'well', 'hee', 'deserues', 'that', 'Name', ')', 'Disdayning', 'Fortune', ',', 'with', 'his', 'brandisht', 'Steele', ',', 'Which', 'smoak', "'", 'd', 'with', 'bloody', 'execution', '(', 'Like', 'Valours', 'Minion', ')', 'caru', "'", 'd', 'out', 'his', 'passage', ',', 'Till', 'hee', 'fac', "'", 'd', 'the', 'Slaue', ':', 'Which', 'neu', "'", 'r', 's[...]Web and Chat Text Project Gutenberg gives us a taste of text in established language. It is important to consider informal language as well for NLP tasks. For this, we can check out NLTK's webtext corpus.from nltk.corpus import webtext for fileid in webtext.fileids(): print(fileid, " = ", webtext.raw(fileid)[:50], '...')firefox.txt = Cookie Manager: "Don't allow sites that set remove ... grail.txt = SCENE 1: [wind] [clop clop clop] KING ARTHUR: Who ... overheard.txt = White guy: So, do you have any plans for this even ... pirates.txt = PIRATES OF THE CARRIBEAN: DEAD MAN'S CHEST, by Ted ... singles.txt = 25 SEXY MALE, seeks attrac older single lady, for ... wine.txt = Lovely delicate, fragrant Rhone wine. Polished lea ...Also available is a corpus of instant messaging chat sessions with anonymized data and text.from nltk.corpus import nps_chat chatroom = nps_chat.posts('10-19-20s_706posts.xml') print(chatroom[123])['i', 'do', "n't", 'want', 'hot', 'pics', 'of', 'a', 'female', ',', 'I', 'can', 'look', 'in', 'a', 'mirror', '.']Brown CorpusFrom the book... The Brown Corpus was the first million-word electronic corpus of English, created in 1961 at Brown University. This corpus contains text from 500 sources, and the sources have been categorized by genre, such as news, editorial, and so on. 1.1 gives an example of each genre (for a complete list, see http://icame.uib.no/brown/bcm-los.html).from nltk.corpus import brown print(brown.categories())['adventure', 'belles_lettres', 'editorial', 'fiction', 'government', 'hobbies', 'humor', 'learned', 'lore', 'mystery', 'news', 'religion', 'reviews', 'romance', 'science_fiction']Words and sentences in the brown corpus can be accessed through a variety of ways, including through fileids and categories.brown.words(categories='news') brown.words(fileids=['cg22']) brown.sents(categories=['news','editorial','reviews'])Brown Corpus is a convenient for studying systematic differences between genres. This study of linguistics is also known as **stylistics**.Let's compare the usage of modal verbs across genres. First, let's count the modal verbs for the *news* genre.news_text = brown.words(categories='news') fdist = nltk.FreqDist(w.lower() for w in news_text) modals = ['can', 'could', 'may', 'might', 'must', 'will'] for modal in modals: print(modal,':',fdist[modal], end=' | ')can : 94 | could : 87 | may : 93 | might : 38 | must : 53 | will : 389 |Now we'll obtain the above counts for a few genres using NLTK's conditional frequency distribution functionality.cfd = nltk.ConditionalFreqDist((genre,word) for genre in brown.categories() for word in brown.words(categories=genre)) genres = ['news', 'religion', 'hobbies', 'science_fiction', 'romance', 'humor'] modals = ['can', 'could', 'may', 'might', 'must', 'will'] cfd.tabulate(conditions=genres, samples=modals)can could may might must will news 93 86 66 38 50 389 religion 82 59 78 12 54 71 hobbies 268 58 131 22 83 264 science_fiction 16 49 4 12 8 16 romance 74 193 11 51 45 43 humor 16 30 8 8 9 13This idea of using word counts to distinguish between genres is taken up again in a later chapter. Reuters Corpus The Reuters Corpus contains 10,788 news documents classified into 90 topics, totaling 1.3 million. The documents are further grouped into two sets called "training" and "test". This split can be used for training and testing algorithms that predict the topic of a document.from nltk.corpus import reuters reuters.fileids()[:5] reuters.categories()[:5]Note: Categories in the reuters corpus overlap with each other, as a news story can have more than one category/topic.reuters.categories('training/9865') reuters.categories(['training/9865', 'training/9880']) reuters.fileids('barley')[:5]We can similarly obtain words or sentences in terms of files and categories.reuters.words(categories=['barley','corn']) reuters.sents('training/9865')Inaugural Address Corpus The inaugural address corpus is a collection of texts of presidential addresses. The corpus also has a time dimension to it.from nltk.corpus import inaugural inaugural.fileids()[:5] [fileid[:4] for fileid in inaugural.fileids()][:5]Looking at how the words *America* and *Citizen* are used over time:from matplotlib import * %matplotlib inline pyplot.rcParams["figure.figsize"] = (20,5) cfd = nltk.ConditionalFreqDist((target, fileid[:4]) for fileid in inaugural.fileids() for w in inaugural.words(fileid) for target in ['america', 'citizen'] if w.lower().startswith(target)) cfd.plot()Corpora in Other LanguagesSome examples of NLTK corpora in languages other than English.nltk.download('cess_esp') nltk.corpus.cess_esp.words() nltk.download('indian') nltk.corpus.indian.words()[nltk_data] Downloading package indian to /home/sajal/nltk_data... [nltk_data] Unzipping corpora/indian.zip.Analysing the differences in word lengths in different languages, in a corpora of Universal Declaration of Human Rights in 300+ languages.from nltk.corpus import udhr languages = ['Chickasaw', 'English', 'German_Deutsch','Greenlandic_Inuktikut', 'Hungarian_Magyar', 'Ibibio_Efik'] cfd = nltk.ConditionalFreqDist((lang,len(word)) for lang in languages for word in udhr.words(lang + '-Latin1')) cfd.plot()Conditional Frequency Distributions We have already used conditional frequency distributions previously in this notebook. Let's now define it. A conditional frequency distribution is a collection of frequency distributions, each one for a different "condition". An example of this condition can be the category of a text. Using NLTK's `cfd` functionality, we can obtain a frequency distribution of words for different categories.A conditional frequency distribution needs to pair each event (example: an observed word) with a condition (example: a category).text = ['The', 'Fulton', 'County', 'Grand', 'Jury', 'said', ...] pairs = [('news', 'The'), ('news', 'Fulton'), ('news', 'County'), ...]Counting Words by Genre Where `FreqDist` takes a simple list as input, `ConditionalFreqDist()` takes a list of pairs.cfd = nltk.ConditionalFreqDist((genre, word) for genre in brown.categories() for word in brown.words(categories=genre))If we just look at two genres, news and romance, the pairs could be calculated like this:genre_word = [(genre,word) for genre in ['news','romance'] for word in brown.words(categories=genre)] len(genre_word)Pairs at the beginning of the list genre_word will be of the form `('news', word)` while those at the end will be of the form `('romance', word)`.genre_word[:4] genre_word[-4:]We can now use the list of pairs and create a `ConditionalFreqDist`.cfd = nltk.ConditionalFreqDist(genre_word) cfd cfd.conditions() cfd['news'].most_common(10)Generating Random Text with Bigrams We can use conditional frequency distributions to create a table of bigrams, which can further be used to generate text.sent = ['In', 'the', 'beginning', 'God', 'created', 'the', 'heaven', 'and', 'the', 'earth', '.'] list(nltk.bigrams(sent)) def generate_model(cfdist, word, num=15): for i in range(num): print(word, end=' ') word = cfdist[word].max() text = nltk.corpus.genesis.words('english-kjv.txt') bigrams = nltk.bigrams(text) cfd = nltk.ConditionalFreqDist(bigrams)The function generate_model contains a loop to generate text. As its second parameter we supply it with a word as our initial context, and then once inside the loop, we print the current value of the variable word, and reset word to be the most likely token in that context (using max()); next time through the loop, we use that word as our new context.cfd['living'] generate_model(cfd, 'living')living creature that he said , and the land of the land of the landLexical Resources A lexicon, or lexical resource, is a collection of words and/or phrases along with associated information such as part of speech and sense definitions. Wordlist Corpora Let's build a function that returns uncommon or misspelt words but removing items from an existing wordlist.def unusual_words(text): text_vocab = set(w.lower() for w in text if w.isalpha()) english_vocab = set(w.lower() for w in nltk.corpus.words.words()) unusual = text_vocab - english_vocab return sorted(unusual) unusual_words(nltk.corpus.gutenberg.words('austen-sense.txt'))[:5] unusual_words(nltk.corpus.nps_chat.words())[:5]For those who are already familar with NLTK, it conatins a corpus of stopwords, that is, high-frequency words like *the*, *to* etc.from nltk.corpus import stopwords stopwords.words('english')[:5]Let's define a function to compute what fraction of words in a text are not in the stopwords list:def content_fraction(text): stop_words = stopwords.words('english') content = [w for w in text if w.lower() not in stop_words] return len(content)/len(text) content_fraction(nltk.corpus.reuters.words())Solving a word puzzleA wordlist is a useful tool for solving a wordpuzzle. Say we have a list of letters *egivrvonl*, and we want to see the set of possible words we can build, using an obligatory letter *r*.# Frequencies of letters in the word puzzle_letters = nltk.FreqDist('egivrvonl') puzzle_letters obligatory = 'r' wordlist = nltk.corpus.words.words()Now we can find a a list of words, above a length of 6 letters, using the above information[w for w in wordlist if len(w) >= 6 and obligatory in w and nltk.FreqDist(w) <= puzzle_letters]Names Corpusnames = nltk.corpus.names names.fileids() male_names = names.words('male.txt') female_names = names.words('female.txt')Finding male names that also appear as female names:[w for w in male_names if w in female_names][:10]Finding the most common last letters in male and female names.cfd = nltk.ConditionalFreqDist((fileid, name[-1]) for fileid in names.fileids() for name in names.words(fileid)) pyplot.rcParams["figure.figsize"] = (10,5) cfd.plot()A Pronouncing DictionaryTaking a look at the CMU Pronouncing Dictionary for US English, designed for use by speech synthesizers.entries = nltk.corpus.cmudict.entries() len(entries) for entry in entries[42371:42379]: print(entry)('fir', ['F', 'ER1']) ('fire', ['F', 'AY1', 'ER0']) ('fire', ['F', 'AY1', 'R']) ('firearm', ['F', 'AY1', 'ER0', 'AA2', 'R', 'M']) ('firearm', ['F', 'AY1', 'R', 'AA2', 'R', 'M']) ('firearms', ['F', 'AY1', 'ER0', 'AA2', 'R', 'M', 'Z']) ('firearms', ['F', 'AY1', 'R', 'AA2', 'R', 'M', 'Z']) ('fireball', ['F', 'AY1', 'ER0', 'B', 'AO2', 'L'])The lexicon provides a list of phonetic codes for each words. Some words have multiple pronunciations, like *Fire* in the above example. Let's try looking for words whose pronunciation consists of three phones, starting with P and ending with T.for word, pron in entries: if len(pron) == 3: ph1, ph2, ph3 = pron if ph1 == 'P' and ph3 == 'T': print(word, ph2, end=' ')pait EY1 pat AE1 pate EY1 patt AE1 peart ER1 peat IY1 peet IY1 peete IY1 pert ER1 pet EH1 pete IY1 pett EH1 piet IY1 piette IY1 pit IH1 pitt IH1 pot AA1 pote OW1 pott AA1 pout AW1 puett UW1 purt ER1 put UH1 putt AH1We can also use the lexicon to find all words whose pronunciation ends with a syllable sounding like nicks.syllable = ['N','IH0','K','S'] print([word for word, pron in entries if pron[-4:] == syllable])["atlantic's", 'audiotronics', 'avionics', 'beatniks', 'calisthenics', 'centronics', 'chamonix', 'chetniks', "clinic's", 'clinics', 'conics', 'conics', 'cryogenics', 'cynics', 'diasonics', "dominic's", 'ebonics', 'electronics', "electronics'", "endotronics'", 'endotronics', 'enix', 'environics', 'ethnics', 'eugenics', 'fibronics', 'flextronics', 'harmonics', 'hispanics', 'histrionics', 'identics', 'ionics', 'kibbutzniks', 'lasersonics', 'lumonics', 'mannix', 'mechanics', "mechanics'", 'microelectronics', 'minix', 'minnix', 'mnemonics', 'mnemonics', 'molonicks', 'mullenix', 'mullenix', 'mullinix', 'mulnix', "munich's", 'nucleonics', 'onyx', 'organics', "panic's", 'panics', 'penix', 'pennix', 'personics', 'phenix', "philharmonic's", 'phoenix', 'phonics', 'photronics', 'pinnix', 'plantronics', 'pyrotechnics', 'refuseniks', "resnick's", 'respironics', 'sconnix', 'siliconix', 'skolniks', 'sonics', 'sputniks', 'technics', 'tectonics', 'tektronix', 'telectronics', 'telephonics', 'tonics', 'un[...]We can notice above that one pronunciation can be spelt in several ways: nics, niks, nix and even ntic's with a silent t. Let's try to look at mismatches between pronunciation and writing by looking at words that end with the phonetic 'M', but end with 'N' in the written form.[w for w,pron in entries if pron[-1] == 'M' and w[-1] == 'n']Now, let's find out cases where a word is pronounced starting with the phonetic N, but doesn't start with the letter N in written form.sorted(set(w[:2] for w, pron in entries if pron[0] == 'N' and w[0] != 'n'))Phones also contain digits to represent primary stress (1), secondary stress (2) and no stress (0). In the below example we define a function to extract the stress digits and then scan our lexicon to find words having a particular stress pattern.def stress(pron): return [char for phone in pron for char in phone if char.isdigit()] print([w for w, pron in entries if stress(pron) == ['0','1','0','2','0']][:20]) print([w for w, pron in entries if stress(pron) == ['0','2','0','1','0']][:20])['abbreviation', 'abbreviations', 'abomination', 'abortifacient', 'abortifacients', 'academicians', 'accommodation', 'accommodations', 'accreditation', 'accreditations', 'accumulation', 'accumulations', 'acetylcholine', 'acetylcholine', 'adjudication', 'administration', "administration's", 'administrations', "administrations'", 'aduliadae']Conditional frequency distribution can help us here to find minimally contrasting set of words. In particular, we will look for p-words containing three sounds, and group them according to their first and last sounds.p3 = [(pron[0]+'-'+pron[2], word) for (word,pron) in entries if pron[0] == 'P' and len(pron) == 3] cfd = nltk.ConditionalFreqDist(p3) for template in sorted(cfd.conditions()): if len(cfd[template]) > 10: words = sorted(cfd[template]) wordstring = ' '.join(words) print(template, wordstring[:70] + "...")P-CH patch pautsch peach perch petsch petsche piche piech pietsch pitch pit... P-K pac pack paek paik pak pake paque peak peake pech peck peek perc perk ... P-L pahl pail paille pal pale pall paul paule paull peal peale pearl pearl... P-N paign pain paine pan pane pawn payne peine pen penh penn pin pine pinn... P-P paap paape pap pape papp paup peep pep pip pipe pipp poop pop pope pop... P-R paar pair par pare parr pear peer pier poor poore por pore porr pour... P-S pace pass pasts peace pearse pease perce pers perse pesce piece piss p... P-T pait pat pate patt peart peat peet peete pert pet pete pett piet piett... P-UW1 peru peugh pew plew plue prew pru prue prugh pshew pugh... P-Z p's p.'s p.s pais paiz pao's pas pause paws pays paz peas pease pei's ...Comparative Wordlists NLTK includes *Swadesh wordlists* which contain lists of common words in several languages.from nltk.corpus import swadesh print(swadesh.fileids()) print(swadesh.words('en'))['I', 'you (singular), thou', 'he', 'we', 'you (plural)', 'they', 'this', 'that', 'here', 'there', 'who', 'what', 'where', 'when', 'how', 'not', 'all', 'many', 'some', 'few', 'other', 'one', 'two', 'three', 'four', 'five', 'big', 'long', 'wide', 'thick', 'heavy', 'small', 'short', 'narrow', 'thin', 'woman', 'man (adult male)', 'man (human being)', 'child', 'wife', 'husband', 'mother', 'father', 'animal', 'fish', 'bird', 'dog', 'louse', 'snake', 'worm', 'tree', 'forest', 'stick', 'fruit', 'seed', 'leaf', 'root', 'bark (from tree)', 'flower', 'grass', 'rope', 'skin', 'meat', 'blood', 'bone', 'fat (noun)', 'egg', 'horn', 'tail', 'feather', 'hair', 'head', 'ear', 'eye', 'nose', 'mouth', 'tooth', 'tongue', 'fingernail', 'foot', 'leg', 'knee', 'hand', 'wing', 'belly', 'guts', 'neck', 'back', 'breast', 'heart', 'liver', 'drink', 'eat', 'bite', 'suck', 'spit', 'vomit', 'blow', 'breathe', 'laugh', 'see', 'hear', 'know (a fact)', 'think', 'smell', 'fear', 'sleep', 'live', 'die', 'kill', 'fight',[...]We can access cognate (having the same linguistic meaning in different languages) words using the entries method and specifying a list of languages.fr2en = swadesh.entries(['fr','en']) print(fr2en[:10])[('je', 'I'), ('tu, vous', 'you (singular), thou'), ('il', 'he'), ('nous', 'we'), ('vous', 'you (plural)'), ('ils, elles', 'they'), ('ceci', 'this'), ('cela', 'that'), ('ici', 'here'), ('là', 'there')]And convert it into a dictionary by:translate = dict(fr2en) translate['nous']A similar application is to compare words in various Germanic and Romance Languages:languages = ['en', 'de', 'nl', 'es', 'fr', 'pt', 'la'] for i in range(139,143): print(swadesh.entries(languages)[i])('say', 'sagen', 'zeggen', 'decir', 'dire', 'dizer', 'dicere') ('sing', 'singen', 'zingen', 'cantar', 'chanter', 'cantar', 'canere') ('play', 'spielen', 'spelen', 'jugar', 'jouer', 'jogar, brincar', 'ludere') ('float', 'schweben', 'zweven', 'flotar', 'flotter', 'flutuar, boiar', 'fluctuare')WordNetFrom the book: *WordNet is a semantically-oriented dictionary of English, similar to a traditional thesaurus but with a richer structure. NLTK includes the English WordNet, with 155,287 words and 117,659 synonym sets.* Senses and SynonymsEssentially, synonyms are words that have the same meaning. Let's use WordNet to explore these.from nltk.corpus import wordnet as wn wn.synsets('motorcar')The entity above, `car.n.01` is called a synset, or "synonym set", a collection of synonymous words, or lemmas.wn.synset('car.n.01').lemma_names()Synsets also come with a prose definition and example sentences:wn.synset('car.n.01').definition() wn.synset('car.n.01').examples()Let's look at some common operations involving synsets, starting with getting all the lemmas of a synset:wn.synset('car.n.01').lemmas()Looking up a particular lemma:wn.lemma('car.n.01.automobile')Getting the synset of a given lemma:wn.lemma('car.n.01.automobile').synset()And, getting the name of the lemma:wn.lemma('car.n.01.automobile').name()Unlike the word motorcar, which is unambiguous and has one synset, the word car is ambiguous, having five synsets:wn.synsets('car') for synset in wn.synsets('car'): print(synset.lemma_names()) # Conveniently access all lemmas involving the word 'car' wn.lemmas('car')WordNet HierarchyFrom the book: *WordNet synsets correspond to abstract concepts, and they don't always have corresponding words in English. These concepts are linked together in a hierarchy. Some concepts are very general, such as Entity, State, Event — these are called unique beginners or root synsets. Others, such as gas guzzler and hatchback, are much more specific. *Using WordNet, we can navigate between these concepts. As an example, let's look at concepts that are more specific, called the *hyponyms*.motorcar = wn.synset('car.n.01') types_of_motorcar = motorcar.hyponyms() types_of_motorcar[0] print(sorted(lemma.name() for synset in types_of_motorcar for lemma in synset.lemmas()))['Model_T', 'S.U.V.', 'SUV', 'Stanley_Steamer', 'ambulance', 'beach_waggon', 'beach_wagon', 'bus', 'cab', 'compact', 'compact_car', 'convertible', 'coupe', 'cruiser', 'electric', 'electric_automobile', 'electric_car', 'estate_car', 'gas_guzzler', 'hack', 'hardtop', 'hatchback', 'heap', 'horseless_carriage', 'hot-rod', 'hot_rod', 'jalopy', 'jeep', 'landrover', 'limo', 'limousine', 'loaner', 'minicar', 'minivan', 'pace_car', 'patrol_car', 'phaeton', 'police_car', 'police_cruiser', 'prowl_car', 'race_car', 'racer', 'racing_car', 'roadster', 'runabout', 'saloon', 'secondhand_car', 'sedan', 'sport_car', 'sport_utility', 'sport_utility_vehicle', 'sports_car', 'squad_car', 'station_waggon', 'station_wagon', 'stock_car', 'subcompact', 'subcompact_car', 'taxi', 'taxicab', 'tourer', 'touring_car', 'two-seater', 'used-car', 'waggon', 'wagon']We can also navigate up the hierarchy by visiting hypernyms. Some words have multiple paths upwards as they can be classified in more than one way.motorcar.hypernyms() paths = motorcar.hypernym_paths() len(paths) [synset.name() for synset in paths[0]] [synset.name() for synset in paths[1]] # Root Hypernym of a synset motorcar.root_hypernyms()Lexical RelationsHypernyms and hyponyms are called lexical relations because they relate one synset to another. These two relations navigate up and down the "is-a" hierarchy. Another important way to navigate the WordNet network is from items to their components (meronyms) or to the things they are contained in (holonyms). For example, the parts of a tree are its trunk, crown, and so on; the part_meronyms().wn.synset('tree.n.01').part_meronyms()The substance a tree is made of includes heartwood and sapwood; the substance_meronyms().wn.synset('tree.n.01').substance_meronyms()A collection of trees forms a forest; the member_holonyms().wn.synset('tree.n.01').member_holonyms()Let's take a look at the word *mint* which has several closely related senses.for synset in wn.synsets('mint', wn.NOUN): print(synset.name() + ':', synset.definition())batch.n.02: (often followed by `of') a large number or amount or extent mint.n.02: any north temperate plant of the genus Mentha with aromatic leaves and small mauve flowers mint.n.03: any member of the mint family of plants mint.n.04: the leaves of a mint plant used fresh or candied mint.n.05: a candy that is flavored with a mint oil mint.n.06: a plant where money is coined by authority of the governmentLooking at the part and substance holonyms, we can also see that mint.n.04 is part of mint.n.02 and the substance from which mint.n.05 is made.wn.synset('mint.n.04').part_holonyms() wn.synset('mint.n.04').substance_holonyms()Let's look at relationship between verbs now. The verb *walk* entails *step*. Some verbs can have multiple entailments.wn.synset('walk.v.01').entailments() wn.synset('eat.v.01').entailments()We can also use the wordnet to obtain antyonyms.wn.lemma('supply.n.02.supply').antonyms() wn.lemma('rush.v.01.rush').antonyms() wn.lemma('horizontal.a.01.horizontal').antonyms()Semantic SimilaritySynsets are linked by a complex network of lexical relations. For a particular synset, we can traverse the WordNet network to find synsets with related meanings. Each synset has one or more hypernym paths that link it to a root synonym. Two synsets linked to the same root may have common hypernyms. If two synsets share a very specific hypernym (i.e. one which lies low in the hypernym tree hierarchy) - they may be closely related.right = wn.synset('right_whale.n.01') orca = wn.synset('orca.n.01') minke = wn.synset('minke_whale.n.01') tortoise = wn.synset('tortoise.n.01') novel = wn.synset('novel.n.01') right.lowest_common_hypernyms(minke) right.lowest_common_hypernyms(orca) right.lowest_common_hypernyms(tortoise) right.lowest_common_hypernyms(novel)Note that whale is very specific (and baleen whale even more so), while vertebrate is more general and entity is completely general. We can quantify the concept of generality by looking at the depth of each synset:wn.synset('whale.n.02').min_depth()wn.synset('vertebrate.n.01').min_depth()wn.synset('entity.n.01').min_depth()Similarity measures defined in WordNet, based on the shortest path that connects concepts in the hypernym hierarchy, can be used as a way to quantify the similarity between two synsets.right.path_similarity(orca) right.path_similarity(tortoise)!pip install hebo !pip install humpday from hebo.design_space.design_space import DesignSpace from hebo.optimizers.hebo import HEBO import pandas as pd import numpy as np from humpday.objectives.classic import shekel_on_cubehttps://pypi.org/project/HEBO/First optimizer I've seen to use pandas in the interface. I refrain from comment. The main thing is it won NeurIPS 2020 BBO!import math def hebo_cube_factory(objective, n_trials, n_dim, with_count,n_suggestions=5): global feval_count feval_count = 0 variables = [{'name': 'u' + str(i), 'type': 'num', 'lb': 0., 'ub': 1.} for i in range(n_dim)] space = DesignSpace().parse(variables) opt = HEBO(space) def _objective(params: pd.DataFrame) -> np.ndarray: global feval_count feval_count += len(params.index) return np.array([ objective(ui) for ui in params.values ]) n_batches = int(math.floor(n_trials/n_suggestions)) n_remainder = n_trials - n_suggestions*n_batches for i in range(n_batches): rec = opt.suggest(n_suggestions=n_suggestions) # <-- don't change this opt.observe(rec, _objective(rec)) for i in range(n_remainder): rec = opt.suggest(n_suggestions=1) # <-- don't change this opt.observe(rec, _objective(rec)) best_val = opt.y.min() best_ndx = np.argmin([y[0] for y in opt.y]) # I mean seriously, why make the user do this? best_x = list(opt.X.values[best_ndx]) return (best_val, best_x, feval_count) if with_count else (best_val, best_x) hebo_cube_factory(shekel_on_cube,n_dim=2,n_trials=50,with_count=True, n_suggestions=15)kics.info()kics2 = pd.read_csv("keplerperiods.csv") kics2.info() kics.head() kics2.head() df_keplers = pd.read_csv("keplers.csv") df_keplers = df_keplers[['kepid','teff','logg','radius','mass','dens']] df_periods = pd.DataFrame() columns = ["KIC","Period"] for kic in kics2.KIC: try: df = get_kepler.read_csv(kic) per = df["period"] df_periods = df_periods.append(pd.DataFrame(data=[[kic,per]],columns=columns)) except Exception as e: print(e) df_periods = df_periods.append(pd.DataFrame(data=[[kic,0.0]],columns=columns)) df_periods = kics2.merge(df_keplers,left_on='KIC',right_on='kepid') df_periods = df_periods.drop(columns=['kepid']) df_periods.head() df_periods.to_csv("keplerperiods_pdm.csv") df_periods_2 = pd.read_csv("keplerperiods.csv") df_periods_2.head() df_valids = df_periods[(df_periods.Period > 0)&(df_periods.Period < 55)] df_valids.info() import seaborn as sns import matplotlib.pyplot as plt sns.set_palette(sns.cubehelix_palette(8, start=.5, rot=-.75)) plt.figure(1,dpi=500) sns.pairplot(df_periods.drop(columns=["KIC"])) sns.relplot(x='teff',y='Period', data=df_periods) 1 <= 2 import utils"English to Dravidian languages translation"> "Create a translation engine to translate from English to 4 Dravidian languages namely Tamil, Telugu, Kannada and Malayalam using Huggingface models."- toc: false- branch: master- badges: false- comments: true- categories: [translation, NLP]- image: images/375px-India_South_India_Locator_Map.png- hide: false- search_exclude: true- metadata_key1: metadata_value1- metadata_key2: metadata_value2 Intro - Hugging FaceHugging Face is an extremely popular python library which provides state of the art models for various NLP tasks like text classification, machine translation etc. Its enables us to quickly experiment with various NLP architecture using its modules, thereby helping us to focus more on research instead of focusing on the nitty-gritty stuff. One other big plus point is that it supports both Pytorch and Tensorflow frameworks. We can easily switch between the two. And we can also convert it into the ONNX frameword if need for inference.Hugging Face has released various translation models, which you can explore in this [link](https://huggingface.co/models?filter=translation). We would be using the MarianMT [model](https://huggingface.co/Helsinki-NLP/opus-mt-en-dra) which has already been trained on parallel texts involving english and the dravidian languages. MarianMT models main ideas are based out of the [MarianNMT project](https://marian-nmt.github.io/) which mainly used C++. All models the MarinMT models at hugging face are transformer encoder-decoders with 6 layers in each component. Intro - TranslationMachine Translation can be thought of a seq2seq generation task which contains encoder and decoder blocks. To train the model, the encoder receives the sentences in the source language and the decoder is made to predict the sentences in the target languages. You can check out this initial [paper](http://arxiv.org/abs/1609.08144) from Google for more information how it is done.Here in this article we would be using translation models trained on Transformer architecture and you can see how easy it is to create a translation pipleline using the hugging face. Code# Please install the reqired packages before proceeding !pip install transformers # Let's start by importing the packages from hugging face essential for this work. from transformers import MarianMTModel, MarianTokenizer # imports the MarianMT model architecture and the tokenizer model_name = 'Helsinki-NLP/opus-mt-en-dra' # This model has been trained on the parallel texts of english and the dravidian languages. tokenizer = MarianTokenizer.from_pretrained(model_name) model = MarianMTModel.from_pretrained(model_name) print(tokenizer.supported_language_codes)['>>tel<<', '>>kan<<', '>>mal<<', '>>tam<<']Once you run the above code block you can see that the required tokenizer and model is getting downloaded from the hugging face model repository. The print statement prints out the languages supported by the translation engine. Since we are translating from English to the Dravidian languages we can see the 4 language codes of the dravidian languages. All the 4 language codes which you see on the output cell are based out of the "ISO 639-2" which is a three letter language classification system. There is also a two letter language classification system which is commonly used called ISO 639-1. You can learn more the different language codes from this [wikipedia link](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes), which has a nice list of all the language codes in various standards.Now let's prepare some texts for the translation engine to translate.text_to_be_translated = ['>>tam<< How are you doing?', '>>kan<< How are you doing?', '>>tel<< How are you doing?', '>>mal<< How are you doing?']You can see that I am creating a list of same sentence for the model to translate but I am prepending the language codes of the Dravidian languages in the brackets. This addition of language codes at the beginning of the text is necessary because the translation model which has been trained to predict on mulitple target languages with the source language as English.# The below step creates a batch of text for inferencing after the sentences have tokenized batch_text = tokenizer.prepare_seq2seq_batch(text_to_be_translated) print(batch_text){'input_ids': tensor([[ 14, 129, 43, 24, 713, 15, 0], [ 12, 129, 43, 24, 713, 15, 0], [ 11, 129, 43, 24, 713, 15, 0], [ 13, 129, 43, 24, 713, 15, 0]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]])}In the print statement above you can observe that only the token for the language codes are different after tokenization while the other tokens are same in the input ids for all the sentences.translated = model.generate(**batch_text) print(translated)tensor([[62951, 1796, 1381, 4547, 1629, 15, 0], [62951, 383, 13504, 9075, 15, 0, 62951], [62951, 934, 230, 6063, 15, 0, 62951], [62951, 6302, 11736, 15, 0, 62951, 62951]])This step is used to make the model generate the intermediate representations for the input vectors. The ids which you see in the tensor all have relevant mappings to tokens in the target language. The tokenization technique used here is based on Sentence piece tokenization which tokenizes word to subword and creates a maping dictionary. You can learn more on Sentencepiece tokenization technique in this [paper](https://arxiv.org/pdf/1808.06226.pdf). Now let's explore what do some of the ids in the intermediate representation tensor have as the associated word component for the sentence translated to Tamil.print("Word for id 62951:", tokenizer.decode(token_ids=[62951])) print("Word for id 1796:", tokenizer.decode(token_ids=[1796])) print("Word for id 1381:", tokenizer.decode(token_ids=[1381])) print("Word for id 4547:", tokenizer.decode(token_ids=[4547])) print("Word for id 1629:", tokenizer.decode(token_ids=[1629])) print("Word for id 15:", tokenizer.decode(token_ids=[15])) print("Word for id 0:", tokenizer.decode(token_ids=[0]))Word for id 62951: Word for id 1796: நீ Word for id 1381: எப்படி Word for id 4547: இருக்கிற Word for id 1629: ாய் Word for id 15: ? Word for id 0:We can observe that 62951, 15 and 0 are the token_ids for \, ? and "" respectively. And since the model has been trained on parallel text for all the 4 languages combined, the these ids have similar tokens irrespective of the target language.You would also have observed, if you know Tamil language that the token for ids 4547 and 1629 from a single word but are split into two subwords because of the sentencepiece tokenizer.Now let's decode the list of sentences in the tensor using the tokenizer.tgt_text = [tokenizer.decode(t, skip_special_tokens=True) for t in translated] print(tgt_text)['நீ எப்படி இருக்கிறாய்?', 'ನೀವು ಹೇಗಿದ್ದೀರಿ?', 'ఎలా మీరు చేస్తున్న?', 'സുഖമാണോ?']So you have now created a setup of English to Dravidian languages translation in less than 10 steps using the hugging face package. You can also implement this translation activity using the pipeline feature of hugging face which abstracts the entire process. So let's take a look at how that works.from transformers import pipeline, MarianTokenizer, MarianMTModel model_name = 'Helsinki-NLP/opus-mt-en-dra' # This model has been trained on the parallel texts of english and the dravidian languages. tokenizer = MarianTokenizer.from_pretrained(model_name) model = MarianMTModel.from_pretrained(model_name) translation_engine = pipeline("text2text-generation", model=model, tokenizer=tokenizer) text_to_translate = input(prompt='Please enter the English text to translate:\n') lang_select = input(prompt='Please enter one of the following languages: 1) Tamil, 2) Telugu, 3) Kannada and 4) Malayalam:\n') if lang_select == "Tamil": text_to_translate = ">>tam<<" + text_to_translate elif lang_select == "Kannada": text_to_translate = ">>kan<<" + text_to_translate elif lang_select == "Telugu": text_to_translate = ">>tel<<" + text_to_translate elif lang_select == "Malayalam": text_to_translate = ">>mal<<" + text_to_translate translated_text = translation_engine(text_to_translate) print("The translated text is: {}".format(translated_text[0]["generated_text"]))Please enter the English text to translate: hello, how are you doing? Please enter one of the following languages: 1) Tamil, 2) Telugu, 3) Kannada and 4) Malayalam: Tamil The translated text is: ஹலோ, நீ எப்படி இருக்கிறாய்?Desafio 3Neste desafio, iremos praticar nossos conhecimentos sobre distribuições de probabilidade. Para isso,dividiremos este desafio em duas partes: 1. A primeira parte contará com 3 questões sobre um *data set* artificial com dados de uma amostra normal e uma binomial.2. A segunda parte será sobre a análise da distribuição de uma variável do _data set_ [Pulsar Star](https://archive.ics.uci.edu/ml/datasets/HTRU2), contendo 2 questões.> Obs.: Por favor, não modifique o nome das funções de resposta. _Setup_ geralimport pandas as pd import matplotlib.pyplot as plt import numpy as np import scipy.stats as sct import seaborn as sns from statsmodels.distributions.empirical_distribution import ECDF #%matplotlib inline from IPython.core.pylabtools import figsize figsize(12, 8) sns.set()Parte 1 _Setup_ da parte 1np.random.seed(42) dataframe = pd.DataFrame({"normal": sct.norm.rvs(20, 4, size=10000), "binomial": sct.binom.rvs(100, 0.2, size=10000)})Inicie sua análise a partir da parte 1 a partir daqui# Sua análise da parte 1 começa aqui. dataframe.head() dataframe.info() dataframe.describe()Questão 1Qual a diferença entre os quartis (Q1, Q2 e Q3) das variáveis `normal` e `binomial` de `dataframe`? Responda como uma tupla de três elementos arredondados para três casas decimais.Em outra palavras, sejam `q1_norm`, `q2_norm` e `q3_norm` os quantis da variável `normal` e `q1_binom`, `q2_binom` e `q3_binom` os quantis da variável `binom`, qual a diferença `(q1_norm - q1 binom, q2_norm - q2_binom, q3_norm - q3_binom)`?def q1(): quantiles = dataframe.quantile([.25, .5, .75]) quantiles_diff = quantiles['normal'] - quantiles['binomial'] return tuple(quantiles_diff.round(3).to_list()) q1()Para refletir:* Você esperava valores dessa magnitude?* Você é capaz de explicar como distribuições aparentemente tão diferentes (discreta e contínua, por exemplo) conseguem dar esses valores? Questão 2Considere o intervalo $[\bar{x} - s, \bar{x} + s]$, onde $\bar{x}$ é a média amostral e $s$ é o desvio padrão. Qual a probabilidade nesse intervalo, calculada pela função de distribuição acumulada empírica (CDF empírica) da variável `normal`? Responda como uma único escalar arredondado para três casas decimais.def q2(): inferior = dataframe.normal.mean() - dataframe.normal.std() superior = dataframe.normal.mean() + dataframe.normal.std() ecdf = ECDF(dataframe.normal) return np.float(round(ecdf(superior) - ecdf(inferior), 3)) q2()Para refletir:* Esse valor se aproxima do esperado teórico?* Experimente também para os intervalos $[\bar{x} - 2s, \bar{x} + 2s]$ e $[\bar{x} - 3s, \bar{x} + 3s]$. Questão 3Qual é a diferença entre as médias e as variâncias das variáveis `binomial` e `normal`? Responda como uma tupla de dois elementos arredondados para três casas decimais.Em outras palavras, sejam `m_binom` e `v_binom` a média e a variância da variável `binomial`, e `m_norm` e `v_norm` a média e a variância da variável `normal`. Quais as diferenças `(m_binom - m_norm, v_binom - v_norm)`?def q3(): mean_std = dataframe.describe()[1:3] mean_std.loc['std'] **= 2 mean_std_diff = mean_std['binomial'] - mean_std['normal'] return tuple(mean_std_diff.round(3).to_list()) q3()Para refletir:* Você esperava valore dessa magnitude?* Qual o efeito de aumentar ou diminuir $n$ (atualmente 100) na distribuição da variável `binomial`? Parte 2 _Setup_ da parte 2stars = pd.read_csv("pulsar_stars.csv") stars.rename({old_name: new_name for (old_name, new_name) in zip(stars.columns, ["mean_profile", "sd_profile", "kurt_profile", "skew_profile", "mean_curve", "sd_curve", "kurt_curve", "skew_curve", "target"]) }, axis=1, inplace=True) stars.loc[:, "target"] = stars.target.astype(bool)Inicie sua análise da parte 2 a partir daquistars.head() stars.info() stars.describe()Questão 4Considerando a variável `mean_profile` de `stars`:1. Filtre apenas os valores de `mean_profile` onde `target == 0` (ou seja, onde a estrela não é um pulsar).2. Padronize a variável `mean_profile` filtrada anteriormente para ter média 0 e variância 1.Chamaremos a variável resultante de `false_pulsar_mean_profile_standardized`.Encontre os quantis teóricos para uma distribuição normal de média 0 e variância 1 para 0.80, 0.90 e 0.95 através da função `norm.ppf()` disponível em `scipy.stats`.Quais as probabilidade associadas a esses quantis utilizando a CDF empírica da variável `false_pulsar_mean_profile_standardized`? Responda como uma tupla de três elementos arredondados para três casas decimais.def standardization(x): return (x - x.mean()) / x.std() def q4(): false_pulsar_mean_profile = stars.loc[stars['target'] == False]['mean_profile'] false_pulsar_mean_profile_standardized = standardization(false_pulsar_mean_profile) ecdf = ECDF(false_pulsar_mean_profile_standardized) ppf = pd.Series(ecdf(sct.norm.ppf([0.80, 0.90, 0.95])), [0.80, 0.90, 0.95]) return tuple(ppf.round(3).to_list()) q4()Para refletir:* Os valores encontrados fazem sentido?* O que isso pode dizer sobre a distribuição da variável `false_pulsar_mean_profile_standardized`? Questão 5Qual a diferença entre os quantis Q1, Q2 e Q3 de `false_pulsar_mean_profile_standardized` e os mesmos quantis teóricos de uma distribuição normal de média 0 e variância 1? Responda como uma tupla de três elementos arredondados para três casas decimais.def standardization(x): return (x - x.mean()) / x.std() def q5(): false_pulsar_mean_profile = stars.loc[stars['target'] == False]['mean_profile'] false_pulsar_mean_profile_standardized = standardization(false_pulsar_mean_profile) ppf = pd.Series(sct.norm.ppf([0.25, 0.50, 0.75]), [0.25, 0.50, 0.75]) quantiles = false_pulsar_mean_profile_standardized.quantile([0.25, 0.50, 0.75]) return tuple((quantiles - ppf).round(3).to_list()) q5()Variational Auto-Encoder - Vanilla Version Importing the required packagesimport torch from torch.autograd import Variable import numpy as np import torch.nn.functional as F import torchvision from torchvision import transforms import torch.optim as optim from torch import nn import matplotlib.pyplot as pltDefining the modelThe model that I have used is a simple fully connected one with one hidden layer in each of the parts( i.e. Encoder and Decoder).class VAE(nn.Module): def __init__(self): super(VAE, self).__init__() self.fc1 = nn.Linear(28*28, 400) self.fc2_mu = nn.Linear(400, 20) self.fc2_sig = nn.Linear(400, 20) self.fc3 = nn.Linear(20, 400) self.fc4 = nn.Linear(400, 784) def encode(self,x): a1 = F.relu(self.fc1(x)) a_mu = self.fc2_mu(a1) a_logvar = self.fc2_sig(a1) return a_mu, a_logvar def decode(self,z): a3 = F.relu(self.fc3(z)) return torch.sigmoid(self.fc4(a3)) def reparameterize(self, mu, logvar): std = torch.exp(0.5*logvar) eps = torch.randn_like(std) return eps.mul(std).add_(mu) def forward(self,x): mu, logvar = self.encode(x.view(-1, 784)) z = self.reparameterize(mu, logvar) return self.decode(z), mu, logvarGetting the Datasettrain_dataset = torchvision.datasets.MNIST(root='./data',train=True, transform=transforms.ToTensor(), download=True) test_dataset = torchvision.datasets.MNIST(root='./data',train=False, transform=transforms.ToTensor(),download = True) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=100, shuffle=True) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=100, shuffle=False)Instantiating the model and Optimizermodel = VAE() optimizer = optim.Adam(model.parameters(), lr=1e-3)Defining the loss functionThe loss consists of two parts :-1. The cross entropy loss between the original and reconstructed image2. The KL Divergence between Q(z|x) and P(z|X). This loss makes the learned distribution on z to be similar to some assumed distribution(Gaussian in this case). For more on this, read this [tutorial](https://arxiv.org/pdf/1606.05908). Formula is derived on Appendix B of the [paper](https://arxiv.org/pdf/1312.6114).def loss_function(recon_x, x, mu, logvar): BCE = F.binary_cross_entropy(recon_x, x.view(-1, 784), reduction='sum') KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) return BCE + KLDTrainingnum_epochs = 5 print_per = 100 model.train() loss_record = [] for epoch in range(num_epochs): train_loss = 0 print_loss = 0 for i, (images, _) in enumerate(train_loader): images = images.reshape(-1, 28*28) optimizer.zero_grad() recon_batch, mu, logvar = model(images) loss = loss_function(recon_batch, images, mu, logvar) loss.backward() if (epoch == 0): loss_record.append(loss.item()) train_loss += loss.item() print_loss += loss.item() optimizer.step() if (i%print_per == 0): print("Epoch : {} , Minibatch : {} Loss = {:.4f}".format(epoch+1, i, print_loss)) # loss_record.append(print_loss) print_loss = 0 print("Epoch {} : Loss = ({:.4f}) ".format(epoch+1, train_loss))Epoch : 1 , Minibatch : 0 Loss = 54789.6172 Epoch : 1 , Minibatch : 100 Loss = 2296336.5566 Epoch : 1 , Minibatch : 200 Loss = 1641241.8613 Epoch : 1 , Minibatch : 300 Loss = 1460765.7021 Epoch : 1 , Minibatch : 400 Loss = 1366460.5518 Epoch : 1 , Minibatch : 500 Loss = 1306802.2734 Epoch 1 : Loss = (9380086.2510) Epoch : 2 , Minibatch : 0 Loss = 11909.7256 Epoch : 2 , Minibatch : 100 Loss = 1230171.1699 Epoch : 2 , Minibatch : 200 Loss = 1209696.3896 Epoch : 2 , Minibatch : 300 Loss = 1185796.5020 Epoch : 2 , Minibatch : 400 Loss = 1173013.1475 Epoch : 2 , Minibatch : 500 Loss = 1170144.8301 Epoch 2 : Loss = (7125041.2344) Epoch : 3 , Minibatch : 0 Loss = 10819.3555 Epoch : 3 , Minibatch : 100 Loss = 1149355.5850 Epoch : 3 , Minibatch : 200 Loss = 1136657.8994 Epoch : 3 , Minibatch : 300 Loss = 1129757.7314 Epoch : 3 , Minibatch : 400 Loss = 1127398.5312 Epoch : 3 , Minibatch : 500 Loss = 1122224.3330 Epoch 3 : Loss = (6785680.2803) Epoch : 4 , Minibatch : 0 Loss = 11511.7969 Epoch[...]Loss recordAs we can clearly see, the loss is decreasing as we pass more and more number of batches i.e. as we perform more number of iterations. The losses here are only for the first epoch. The fluctuations are due to the use of mini batch gradient descent.plt.plot(loss_record)Reconstructing a random image from the training set :-print(model(images)[0].data[0].numpy().shape) image1 = images[8] print(image1.shape) plt.imshow(model(images)[0].data[8].numpy().reshape(28, 28), cmap='gray') plt.show(block=True)(784,) torch.Size([784])Reconstructing images from the test set :-test_loss = 0 print_per = 10 with torch.no_grad(): for i, (images, _) in enumerate(test_loader): images = images.reshape(-1, 28*28) recon_batch, mu, logvar = model(images) test_loss += loss_function(recon_batch, images, mu, logvar).item() if (i%print_per == 0): plt.imshow(model(images)[0].data[0].numpy().reshape(28, 28), cmap='gray') plt.show(block=True) image2 = images[1] print(image2.shape)torch.Size([784])Original image of some random training example :-plt.imshow(images[1].numpy().reshape(28, 28), cmap='gray') plt.show(block=True)Reconstructed image :-plt.imshow(model(images)[0].data[1].numpy().reshape(28, 28), cmap='gray') plt.show(block=True)ExperimentsFirst we will calculate mu, logvar fomr two images by passing them through the encoder of our network. We will use them later to perform some experimentswith torch.no_grad(): mu1, logvar1 = model.encode(image1) std1 = torch.exp(0.5*logvar1) mu2, logvar2 = model.encode(image2) std2 = torch.exp(0.5*logvar2)1. Sampling epsilon values at regular intervals and reconstructing the images We can see that the reconstructed images are not actually same, i.e. their values differ but the look very similar. This means that our network tries to maps many z values (following a particular gaussian) to a singe image.with torch.no_grad(): recon_images1 = [] for ctr in range(0, 100, 5): eps_val = torch.full_like(mu1, fill_value = ctr * 0.01 ) z_val1 = eps_val.mul(std1).add_(mu1) recon_image1 = model.decode(z_val1) recon_images1.append(recon_image1) print(recon_images1[0] - recon_images1[1]) fig=plt.figure(figsize=(28, 28)) columns = 4 rows = 5 for i in range(1, columns*rows +1): img = recon_images1[i-1].detach().numpy().reshape(28, 28) fig.add_subplot(rows, columns, i) plt.imshow(img, cmap="gray") plt.show()2. Changing just one dimension of the latent variable and reconstructing the imagesThis doesn't affect the constructed images a lot since we have 19 more dimensions which point to the same digit so changing one dimension doesn't create a visual effect.with torch.no_grad(): recon_images1 = [] eps_val = torch.randn_like(mu1) for ctr in range(0, 100, 5): eps_val[7] = ctr * 0.05 * std1[7] + mu1[7] z_val1 = eps_val.mul(std1).add_(mu1) recon_image1 = model.decode(z_val1) recon_images1.append(recon_image1) fig=plt.figure(figsize=(28, 28)) columns = 4 rows = 5 for i in range(1, columns*rows +1): img = recon_images1[i-1].detach().numpy().reshape(28, 28) fig.add_subplot(rows, columns, i) plt.imshow(img, cmap="gray") plt.show()3. Transitioning from one digit image to another digit image in the latent space and reconstructing images at regular intervalsIt is important to note that we have used the same value of epsilon to construct z from the images.eps_any = torch.randn_like(mu1) z1 = eps_any.mul(std1).add_(mu1) z2 = eps_any.mul(std2).add_(mu2) all_recons = [] for i in range(20): z_bet = z1 + torch.full_like(mu1, fill_value = 0.05*i).mul(z2 - z1) recon_image = model.decode(z_bet) all_recons.append(recon_image) fig=plt.figure(figsize=(28, 28)) columns = 4 rows = 5 for i in range(1, columns*rows +1): img = all_recons[i-1].detach().numpy().reshape(28, 28) fig.add_subplot(rows, columns, i) plt.imshow(img, cmap="gray") plt.show()Regression functions demo notebook If you have not already done so, run the following command to install the statsmodels package:`easy_install -U statsmodels`Run the following command to install scipy and scikit-learn:`conda install scipy``conda install scikit-learn` Use the data cleaning package to import a data set:from data_cleaning_utils import import_data dat = import_data('../Data/Test/pool82014-10-02cleaned_Subset.csv')Index(['FID', 'time', 'XCO2Dpp', 'XCH4Dpp', 'TempC', 'ChlAugL', 'TurbFNU', 'fDOMQSU', 'ODOsat', 'ODOmgL', 'pH', 'CH4uM', 'CH4Sat', 'CO2uM', 'CO2Sat'], dtype='object') datetime column name? timeThe following function runs a random model with a random independent variable y and four random covariates, using both the statsmodels and scikit-learn packages. The user can compare output from the two tools.from regression import compare_OLS compare_OLS(dat)The two models produce the same results. There is no standard regression table type output from sklearn. However, sklearn offers greater features for prediction, by incorporating machine learning functionality. For that reason, we will likely wish to use both packages, for different purposes. The `user_model` function prompts the user to input a model formula for an OLS regression, then runs the model in `statsmodel`, and outputs model results and a plot of y data vs. model fitted values.At the prompt, you may either input your own model formula, or copy and paste the following formula as an example:`CO2uM ~ pH + TempC + ChlAugL`%matplotlib inline from regression import user_model user_model(data=dat) %matplotlib inline import pandas as pd from regression import plot_pairs plot_pairs(data=dat[['XCO2Dpp', 'XCH4Dpp', 'TempC', 'ChlAugL', 'TurbFNU', 'fDOMQSU', 'ODOmgL', 'pH', 'CH4uM', 'CO2uM']], minCorr=0.1, maxCorr=0.95) dat.columns dat.shape[1]Datafactory Load local filesfrom kloppy import datafactory dataset = datafactory.load( event_data="../../kloppy/tests/files/datafactory_events.json", # Optional arguments coordinates="datafactory", event_types=["shot", "pass"] ) dataset.to_pandas().head()Data structuresFor a refresher on object-oriented programming, see [Object-oriented programming](https://github.com/parrt/msds501/blob/master/notes/OO.ipynb). A simple set implementationSets in Python can be specified with set notation:s = {1,3,2,9}Or with by creating a `set` object and assigning it to a variable then manually adding elements:s = set() s.add(1) s.add(3)We can build our own set object implementation by creating a class definition:class MySet: def __init__(self): self.elements = [] def add(self, x): if x not in self.elements: self.elements.append(x) s = MySet() s.add(3) # same as MySet.add(a,3) s.add(3) s.add(2) s.add('cat') s.elements from lolviz import * objviz(s)**Question**: How expensive is it to add an element to a set with this implementation? ExerciseAdd a method called `hasmember()` that returns true or false according to whether parameter `x` is a member of the set.class MySet: def __init__(self): self.elements = [] def add(self, x): if x not in self.elements: self.elements.append(x) def hasmember(self, x): return x in self.elements s = MySet() s.add(3) # same as MySet.add(a,3) s.add(3) s.add(2) s.add('cat') s.hasmember(3), s.hasmember(99)Linked lists -- the gateway drugWe've studied arrays/lists that are built into Python but they are not always the best kind of list to use. Sometimes, we are inserting and deleting things from the head or middle of the list. If we do this in lists made up of contiguous cells in memory, we have to move a lot of cells around to make room for a new element or to close a hole made by a deletion. Most importantly, linked lists are the degenerate form of a general object graph. So, it makes sense to start with the simple versions and move up to general graphs.Linked lists allow us to efficiently insert and remove things anywhere we want, at the cost of more memory.A linked list associates a `next` pointer with each `value`. We call these things *nodes* and here's a simple implementation for node objects:class LLNode: def __init__(self, value, next=None): self.value = value self.next = next head = LLNode('tombu') callsviz(varnames='head') head = LLNode('parrt', head) callsviz(varnames='head') head = LLNode("xue", head) callsviz(varnames='head')Walk listTo walk a list, we use the notion of a *cursor*, which we can think of as a finger that moves along a data structure from node to node. We initialize the cursor to point to the first node of the list, the head, and then walk the cursor through the list via the `next` fields:p = head while p is not None: print(p.value) p = p.nextxue parrt tombu**Question**: How fast can we walk the linked list? ExerciseModify the walking code so that it lives in a method of `LLNode` called `exists(self, x)` that looks for a node with value `x` starting at `self`. If we test with `head.exists('parrt')` then `self` would be our global `head` variable. Have the function return true if `x` exists in the list, else return false. You can test it with:```pythonhead = LLNode('tombu')head = LLNode('parrt', head)head = LLNode("xue", head)head.exists('parrt'), head.exists('part')```class LLNode: def __init__(self, value, next=None): self.value = value self.next = next def exists(self, x): p = self # start looking at this node while p is not None: if x==p.value: return True p = p.next return False head = LLNode('tombu') head = LLNode('parrt', head) head = LLNode("xue", head) head.exists('parrt'), head.exists('part')Insertion at headIf we want to insert an element at the front of a linked list, we create a node to hold the value and set its `next` pointer to point to the old `head`. Then we have the `head` variable point at the new node. Here is the sequence. **Create new node**x = LLNode('mary') callviz(varnames=['head','x'])**Make next field of new node point to head**x.next = head callviz(varnames=['head','x'])**Make head point at new node**head = x callviz(varnames=['head','x'])Deletion of node# to delete xue, make previous node skip over xue xue = head.next callviz(varnames=['head','x','xue']) head.next = xue.next callviz(varnames=['head','x'])Notice that `xue` still points at the node but we are going to ignore that variable from now on. Moving from the head of the list, we still cannot see the node with `'xue'` in it.head.next = xue.next callviz(varnames=['head','x','xue'])ExerciseGet a pointer to the node with value `tombu` and then delete it from the list using the same technique we just saw.before_tombu = head.next callviz(varnames=['head','x','before_tombu']) before_tombu.next = None callviz(varnames=['head','x','before_tombu'])Binary treesThe tree data structure is one of the most important in computer science and is extremely common in data science as well. Decision trees, which form the core of gradient boosting machines and random forests (machine learning algorithms), are naturally represented as trees in memory. When we process HTML and XML files, those are generally represented by trees. For example:```xml Everyday Italian Giada De Laurentiis 2005 30.00 Learning XML 2003 39.95 ```We're going to look at a simple kind of tree that has at most two children: a *binary tree*. A node that has no children is called a *leaf* and non-leaves are called *internal nodes*.In general, trees with $n$ nodes have $n-1$ edges. Each node has a single incoming edge and the root has none.Nodes have *parents* and *children* and *siblings* (at the same level).Sometimes nodes have links back to their parents for programming convenience reasons. That would make it a graph not a tree but we still consider it a tree.class Tree: def __init__(self, value, left=None, right=None): self.value = value self.left = left self.right = right root = Tree('parrt') root.left = Tree('mary') root.right = Tree('april') treeviz(root) root = Tree('parrt', Tree('mary'), Tree('april')) treeviz(root) root = Tree('parrt') mary = Tree('mary') april = Tree('april') jim = Tree('jim') sri = Tree('sri') mike = Tree('mike') root.left = mary root.right = april mary.left = jim mary.right = mike april.right = sri treeviz(root)Exercise Create a class definition for `NTree` that allows arbitrary numbers of children. (Use a list for field `children` rather than `left` and `right`.) The constructor should init an empty children list. Test your code using:```pythonfrom lolviz import objvizroot2 = NTree('parrt')mary = NTree('mary')april = NTree('april')jim = NTree('jim')sri = NTree('sri')mike = NTree('mike')root2.addchild(mary)root2.addchild(jim)root2.addchild(sri)sri.addchild(mike)sri.addchild(april)objviz(root2)``` Solutionclass NTree: def __init__(self, value): self.value = value self.children = [] def addchild(self, child): if isinstance(child, NTree): self.children.append(child) root2 = NTree('parrt') mary = NTree('mary') april = NTree('april') jim = NTree('jim') sri = NTree('sri') mike = NTree('mike') root2.addchild(mary) root2.addchild(jim) root2.addchild(sri) sri.addchild(mike) sri.addchild(april) objviz(root2)Walking treesWalking a tree is a matter of moving a cursor like we did with the linked lists above. The goal is to visit each node in the tree. We start out by having the cursor point at the root of the tree and then walk downwards until we hit leaves, and then we come back up and try other alternatives. A good physical analogy: imagine a person (cursor) from HR needing to speak (visit) each person in a company starting with the president/CEO. Here's a sample org chart: The general visit algorithm starting at node `p` is meet with `p` then visit each direct report. Then visit all of their direct reports, one level of the tree at a time. The node visitation sequence would be A,B,C,F,H,J,... This is a *breadth-first search* of the tree and easy to describe but a bit more work to implement that a *depth-first search*. Depth first means visiting a person then visit their first direct report and that person's direct report etc... until you reach a leaf node. Then back up a level and move to next direct report. That visitation sequence is A,B,C,D,E,F,G,H,I,J,K,L.If you'd like to start at node B, not A, what is the procedure? The same, of course. So visiting A means, say, printing `A` then visiting B. Visiting B means visiting C, and when that completes, visiting F, etc... The key is that the procedure for visiting a node is exactly the same regardless of which node you start with. This is generally true for any self-similar data structure like a tree. Another easy way to think about binary tree visitation in particular is positioning yourself in a room with a bunch of doors as choices. Each door leads to other rooms, which might also have doors leading to other rooms. We can think of a room as a node and doors as pointers to other nodes. Each room is identical and has 0, 1, or 2 doors (for a binary tree). At the root node we might see two choices and, to explore all nodes, we can visit each door in turn. Let's go left:After exploring all possible rooms by taking the left door, we come all the way back out to the root room and try the next alternative on the right: Algorithmically what were doing in each room is```procedure visit room: if left door exists, visit rooms accessible through left door if right door exists, visit rooms accessible through right door ```Or in code notation:```pythondef visit(room): if room.left: visit(room.left) if room.right: visit(room.right)```This mechanism works from any room. Imagine waking up and finding yourself in a room with two doors. You have no idea whether you are at the root or somewhere in the middle of a labyrinth (maze) of rooms.This approach is called *backtracking*.Let's code this up but make a regular function not a method of the tree class to keep things simple. Let's look at that tree again:treeviz(root) def walk(t): "Depth-first walk of binary tree" if t is None: return # if t.left is None: callsviz(varnames=['t']).view() print(t.value) # "visit" or process this node walk(t.left) # walk into the left door walk(t.right) # after visiting all those, enter right door walk(root)parrt mary jim mike april sriThat is a *recursive* function, meaning that `walk` calls itself. It's really no different than the recurrence relations we use in mathematics, such as the gradient descent recurrence:$x_{t+1} = x_t - \eta f'(x_t)$Variable $x$ is a function of previous incarnations of itself.def fact(n): print(f"fact({n})") if n==0: return 1 return n * fact(n-1) fact(10)fact(10) fact(9) fact(8) fact(7) fact(6) fact(5) fact(4) fact(3) fact(2) fact(1) fact(0)Don't let the recursion scare you, just pretend that you are calling a different function or that you are calling the same function except that it is known to be correct. We call that the "recursive leap of faith." (See [Fundamentals of Recursion](https://www2.cs.duke.edu/courses/cps006/spring03/forbes/inclass/recursion.pdf),Although that one is using C++ not Python.)As the old joke goes: "*To truly understand recursion, you must first understand recursion.*"The order in which we reach (enter/exit) each node during the search is always the same for a given search strategy, such as depth first search. Here is a visualization from Wikipedia:We always try to go as deep as possible before exploring siblings.Now, notice the black dots on the traversal. That signifies processing or "visiting" a node and in this case is done before visiting the children. When we process a node and then it's children, we call that a *preorder traversal*. If we process a node after walking the children, we call it a *post-order traversal*:In code, that just means switching the processing step two after the walk of the children:def walk(t): if t is None: return walk(t.left) walk(t.right) print(t.value) # process after visiting children walk(root)jim mike mary sri april parrtIn both cases we are performing a *depth-first walk* of the tree, which means that we are immediately seeking the leaves rather than siblings. A depth first walk scans down all of the left child fields of the nodes until it hits a leaf and then goes back up a level, looking for children at that level.In contrast, a *breadth-first walk* processes all children before looking at grandchildren. This is a less common walk but, for our tree, would be the sequence parrt, mary, april, jim, mike, sri. In a sense, breadth first processes one level of the tree at a time: ExerciseAlter the depth-first recursive tree walk above to sum the values in a binary tree. Have `walk()` return the sum of a node's value and all it childrens' values. Test with:```pythona = Tree(3)b = Tree(5)c = Tree(10)d = Tree(9)e = Tree(4)f = Tree(1)a.left = ba.right = cb.left = db.right = ee.right = ftreeviz(a)print(walk(a), walk(b), walk(c))```class Tree: def __init__(self, value, left=None, right=None): self.value = value self.left = left self.right = right def walk(t:Tree) -> int: if t is None: return 0 return t.value + walk(t.left) + walk(t.right) a = Tree(3) b = Tree(5) c = Tree(10) d = Tree(9) e = Tree(4) f = Tree(1) a.left = b a.right = c b.left = d b.right = e e.right = f treeviz(a) print(walk(a), walk(b), walk(c))32 19 10GraphsTrees are actually a subset of the class of directed, acyclic graphs. If we remove the acyclic restriction and the restriction that nodes have a single incoming edge, we get a general, directed graph. These are also extremely common in computer science and are used to represent graphs of users in a social network, locations on a map, or a graph of webpages, which is how Google does page ranking. graphvizYou might find it useful to display graphs visually and [graphviz](https://www.graphviz.org/) is an excellent way to do that. Here's an exampleimport graphviz as gv gv.Source(""" digraph G { node [shape=box penwidth="0.6" margin="0.0" fontname="Helvetica" fontsize=10] edge [arrowsize=.4 penwidth="0.6"] rankdir=LR; ranksep=.25; cat->dog dog->cat dog->horse dog->zebra horse->zebra zebra->llama } """)Once again, it's very convenient to represent a node in this graph as an object, which means we need a class definition:class GNode: def __init__(self, value): self.value = value self.edges = [] # outgoing edges def connect(self, other): self.edges.append(other) cat = GNode('cat') dog = GNode('dog') horse = GNode('horse') zebra = GNode('zebra') llama = GNode('llama') cat.connect(dog) dog.connect(cat) dog.connect(horse) dog.connect(zebra) horse.connect(zebra) zebra.connect(llama) objviz(cat)Walking graphsWalking a graph (depth-first) is just like walking a tree in that we use backtracking to try all possible branches out of every node until we have reached all reachable nodes. When we run into a dead end, we back up to the most recently available on visited path and try that. That's how you get from the entrance to the exit of a maze. The only difference between walking a tree and walking a graph is that we have to watch out for cycles when walking a graph, so that we don't get stuck in an infinite loop. We leave a trail of breadcrumbs or candies or string to help us keep track of where we have visited and where we have not. If we run into our trail, we have hit a *cycle* and must also backtrack to avoid an infinite loop. This is a [depth first search](https://en.wikipedia.org/wiki/Tree_traversalDepth-first_search).Here's a nice [visualization website for graph walking](http://algoanim.ide.sk/index.php?page=showanim&id=47).In code, here is how we perform a depth-frist search on a graph:def walk(g, visited): "Depth-first walk of a graph" if g is None or g in visited: return visited.add(g) # mark as visited print(g.value) # process before visiting outgoing edges for node in g.edges: walk(node, visited) # walk all outgoing edge targets walk(cat, set())cat dog horse zebra llamaWhere we start the walk of the graph matters:walk(llama, set()) walk(horse, set())horse zebra llamaOperator overloading (Note: We *overload* operators but *override* methods in a subclass definition)Python allows class definitions to implement functions that are called when standard operator symbols such as `+` and `/` are applied to objects of that type. This is extremely useful for mathematical libraries such as numpy, but is often abused. Note that you could redefine subtraction to be multiplication when someone used the `-` sign. (Yikes!)Here's an extension to `Point` that supports `+` for `Point` addition:import numpy as np class Point: def __init__(self, x, y): self.x = x self.y = y def distance(self, other): return np.sqrt( (self.x - other.x)**2 + (self.y - other.y)**2 ) def __add__(self,other): x = self.x + other.x y = self.y + other.y return Point(x,y) def __str__(self): return f"({self.x},{self.y})" p = Point(3,4) q = Point(5,6) print(p, q) print(p + q) # calls p.__add__(q) or Point.__add__(p,q) print(Point.__add__(p,q))(3,4) (5,6) (8,10) (8,10)ExerciseAdd a method to implement the `-` subtraction operator for `Point` so that the following code works:```pythonp = Point(5,4)q = Point(1,5)print(p, q)print(p - q)```import numpy as np class Point: def __init__(self, x, y): self.x = x self.y = y def distance(self, other): return np.sqrt( (self.x - other.x)**2 + (self.y - other.y)**2 ) def __add__(self,other): x = self.x + other.x y = self.y + other.y return Point(x,y) def __sub__(self,other): x = self.x - other.x y = self.y - other.y return Point(x,y) def __str__(self): return f"({self.x},{self.y})" p = Point(5,4) q = Point(1,5) print(p, q) print(p - q)(5,4) (1,5) (4,-1)The next cell will get a ~65 MB data file 'sequence.index', you only need to run the cell once!rm sequence.index 2>/dev/null !wget -nd ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/historical_data/former_toplevel/sequence.index -O sequence.index--2019-09-26 09:59:06-- ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/historical_data/former_toplevel/sequence.index => ‘sequence.index’ Resolving ftp.1000genomes.ebi.ac.uk (ftp.1000genomes.ebi.ac.uk)... 172.16.17.32 Connecting to ftp.1000genomes.ebi.ac.uk (ftp.1000genomes.ebi.ac.uk)|172.16.17.32|:21... connected. Logging in as anonymous ... Logged in! ==> SYST ... done. ==> PWD ... done. ==> TYPE I ... done. ==> CWD (1) /vol1/ftp/historical_data/former_toplevel ... done. ==> SIZE sequence.index ... 67069489 ==> PASV ... done. ==> RETR sequence.index ... done. Length: 67069489 (64M) (unauthoritative) sequence.index 100%[===================>] 63.96M 2.07MB/s in 29s 2019-09-26 09:59:37 (2.24 MB/s) - ‘sequence.index’ saved [67069489]Interfacing with R# !conda install rpy2 !pip install rpy2 import os from IPython.display import Image import rpy2.robjects import rpy2.robjects as robjects ## import rpy2.robjects as robjects ## this doesnt work so rpy2.situation works! import rpy2.situation as robjects #import rpy2.robjects.lib.ggplot2 as ggplot2 from rpy2.robjects.functions import SignatureTranslatedFunction import pandas as pd import pandas.rpy.common as pd_common read_delim = robjects.r('read.delim') seq_data = read_delim('sequence.index', header=True, stringsAsFactors=False) #In R: # seq.data <- read.delim('sequence.index', header=TRUE, stringsAsFactors=FALSE) print('This data frame has %d columns and %d rows' % (seq_data.ncol, seq_data.nrow)) print(seq_data.colnames) #In R: # print(colnames(seq.data)) # print(nrow(seq.data)) # print(ncol(seq.data)) print('Columns in Python %d ' % robjects.r.ncol(seq_data)[0]) #access some functions as_integer = robjects.r('as.integer') match = robjects.r.match my_col = match('READ_COUNT', seq_data.colnames)[0] # Vector returned print('Type of read count before as.integer: %s' % seq_data[my_col - 1].rclass[0]) seq_data[my_col - 1] = as_integer(seq_data[my_col - 1]) print('Type of read count after as.integer: %s' % seq_data[my_col - 1].rclass[0]) my_col = match('BASE_COUNT', seq_data.colnames)[0] # Vector returned seq_data[my_col - 1] = as_integer(seq_data[my_col - 1]) my_col = match('CENTER_NAME', seq_data.colnames)[0] seq_data[my_col - 1] = robjects.r.toupper(seq_data[my_col - 1]) robjects.r.assign('seq.data', seq_data) robjects.r('print(c("Column names in R: ",colnames(seq.data)))') robjects.r('seq.data <- seq.data[seq.data$WITHDRAWN==0, ]') #Lets remove all withdrawn sequences robjects.r("seq.data <- seq.data[, c('STUDY_ID', 'STUDY_NAME', 'CENTER_NAME', 'SAMPLE_ID', 'SAMPLE_NAME', 'POPULATION', 'INSTRUMENT_PLATFORM', 'LIBRARY_LAYOUT', 'PAIRED_FASTQ', 'READ_COUNT', 'BASE_COUNT', 'ANALYSIS_GROUP')]") #Lets shorten the dataframe #Population as factor robjects.r('seq.data$POPULATION <- as.factor(seq.data$POPULATION)') ggplot2.theme = SignatureTranslatedFunction(ggplot2.theme, init_prm_translate = {'axis_text_x': 'axis.text.x'}) bar = ggplot2.ggplot(seq_data) + ggplot2.geom_bar() + ggplot2.aes_string(x='CENTER_NAME') + ggplot2.theme(axis_text_x=ggplot2.element_text(angle=90, hjust=1)) robjects.r.png('out.png') bar.plot() dev_off = robjects.r('dev.off') dev_off() Image(filename='out.png') #Get Yoruba and CEU robjects.r('yri_ceu <- seq.data[seq.data$POPULATION %in% c("YRI", "CEU") & seq.data$BASE_COUNT < 2E9 & seq.data$READ_COUNT < 3E7, ]') yri_ceu = robjects.r('yri_ceu') scatter = ggplot2.ggplot(yri_ceu) + ggplot2.aes_string(x='BASE_COUNT', y='READ_COUNT', shape='factor(POPULATION)', col='factor(ANALYSIS_GROUP)') + ggplot2.geom_point() robjects.r.png('out.png') scatter.plot() dev_off = robjects.r('dev.off') dev_off() Image(filename='out.png') pd_yri_ceu = pd_common.load_data('yri_ceu') print(type(pd_yri_ceu)) pd_yri_ceu del pd_yri_ceu['PAIRED_FASTQ'] no_paired = pd_common.convert_to_r_dataframe(pd_yri_ceu) robjects.r.assign('no.paired', no_paired) robjects.r("print(colnames(no.paired))")[1] "STUDY_ID" "STUDY_NAME" "CENTER_NAME" [4] "SAMPLE_ID" "SAMPLE_NAME" "POPULATION" [7] "INSTRUMENT_PLATFORM" "LIBRARY_LAYOUT" "READ_COUNT" [10] "BASE_COUNT" "ANALYSIS_GROUP"Interactive outputs rendered by nteract# render application/vnd.plotly.v1+json0.0 Download_data%load_ext autoreload %autoreload 2Librariesimport final_project.utils.paths as path import janitor import matplotlib.pyplot as plt import pandas as pd import seaborn as snsSpecify input and output filescovid_url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv" covid_file = path.data_raw_dir("time_series_covid19_confirmed_global.csv") !curl {covid_url} - o {covid_file}curl: option -: is unknown curl: try 'curl --help' or 'curl --manual' for more informationNGC Catalogのコンテナを実行する--PyTorch-Singularity---構築したOpenHPC環境でNGC Catalogの[PyTorchコンテナ](https://ngc.nvidia.com/catalog/containers/nvidia:pytorch)を実行します。 前提条件このNotebookを実行するための前提条件を満たしていることを確認します。 以下のことを前提条件とします。* 構築済のOpenHPC環境がある* OpenHPC環境のマスターノードに対してSSHでログインできる* Singularityがロードされる設定になっている* SlurmのGeneric Resource(GRES)としてGPUが登録されていること* GPUがCUDA Compute Capability 5.2以上であること> [AWS P2](https://aws.amazon.com/jp/ec2/instance-types/p2/)などのNVIDIA K80は[Compute Capability](https://developer.nvidia.com/cuda-gpus)が3.7なので、このNotebookで利用しているコンテナイメージを実行することができません。 マスターノードに対して SSH でログインできることを確認します。マスターノードのIPアドレスを指定してください。# (例) # master_address = '172.30.XXX.xxx' master_address =SSHでログインするユーザ名を指定してください。# (例) # user = 'vcp' # user = 'user00' user =必要であればSSHの秘密鍵を指定してください。# (例) # ssh_identity = '~/.ssh/id_rsa'SSHでログインする際の引数を、変数に格納しておきます。# ユーザ名とホスト名 target = f'{user}@{master_address}' print(target) # SSHのコマンドライン引数 ssh_opts = f'-i {ssh_identity}' if 'ssh_identity' in vars() else '' print(ssh_opts)マスターノードに対してSSHでログインしてコマンドを実行してみます。!ssh {ssh_opts} {target} hostnameSingularityがロードされていることを確認します。!ssh {ssh_opts} {target} module is-loaded singularity上のセルがエラーになった場合は Singularity をロードする設定が行われていません。「061-Singularityのロード.ipynb」を実行して Singularity がロードされるよう設定してください。 SlurmのGeneric Resource(GRES)としてGPUが登録されていることを確認します。!ssh {ssh_opts} {target} sinfo -N --Format=NodeHost,Gres | grep -w gpu上のセルがエラーになった場合はSlurmにGPUがGRESとして登録されていません。「032-設定ファイルの編集-GRESの登録.ipynb」を実行してGRESの設定をしてください。 PyTorchコンテナの実行PyTorchコンテナでMNISTを実行してみます。 コンテナイメージの取得 OpenHPC環境でコンテナを実行するにはSingularityを利用します。はじめにNGCカタログから[PyTorch](https://ngc.nvidia.com/catalog/containers/nvidia:pytorch)のコンテナイメージを取得します。> 作業領域として `/tmp` に 30GB程度の空き領域が必要となります。他のディレクトリを作業領域として利用する場合は環境変数`SINGULARITY_TMPDIR`を指定してください。詳細についてはSingularityのドキュメント [Build Customization](https://singularity.lbl.gov/build-environmentenvironment-variables) を参照してください。> > またイメージの取得、変換には30分以上かかります。ngc_version = '21.06' sif_file = f'pytorch_{ngc_version}-py3.sif' !ssh {ssh_opts} {target} bash -l -c \ "'test -f {sif_file} || singularity pull docker://nvcr.io/nvidia/pytorch:{ngc_version}-py3'"準備MNISTのデータやスクリプトを準備します。 まず、データやスクリプトを配置するディレクトリを作成します。work_dir = 'pytorch' !ssh {ssh_opts} {target} mkdir -p {work_dir}データをダウンロードするためのスクリプトを配置します。!scp {ssh_opts} template/pytorch/download_mnist.py {target}:{work_dir}スクリプトを実行し、データをダウンロードします。> NGCカタログ 21.06 の PyTorch のコンテナに含まれる torchvision では MNIST のデータセットをダウンロードに失敗する可能性があります。そこで、このNotebookでは`venv`で torchvision 0.10.0以降の新しいパッケージをインストールした環境を用意し、そこでデータセットのダウンロードを行います。PyTorchのコンテナとは別環境を `venv` で用意するのは、コンテナへの影響を避けるためです。out = !ssh {ssh_opts} {target} mktemp -d torchvision_work = out[0] print(torchvision_work) !ssh {ssh_opts} {target} python3 -m venv {torchvision_work} !ssh {ssh_opts} {target} {torchvision_work}/bin/pip install torchvision six !ssh {ssh_opts} {target} bash -l -c \ "'cd {work_dir} && {torchvision_work}/bin/python download_mnist.py'"データが取得できたことを確認します。!ssh {ssh_opts} {target} bash -l -c \ "'cd {work_dir} && ls -lR ../data'"`venv` で作った環境は不要なので削除します。!rm -rf {torchvision_work}GitHubの [pytorch/examples](https://github.com/pytorch/examples) からMNISTの[スクリプト](https://github.com/pytorch/examples/blob/master/mnist/main.py)を取得します。mnist_url = 'https://raw.githubusercontent.com/pytorch/examples/master/mnist/main.py' !ssh {ssh_opts} {target} bash -l -c \ "'cd {work_dir} && curl -L -o mnist_classify.py {mnist_url}'"取得したスクリプトの内容を先頭部分を表示してみます。mnist_url = 'https://raw.githubusercontent.com/pytorch/examples/master/mnist/main.py' !ssh {ssh_opts} {target} head {work_dir}/mnist_classify.pyコンテナからGPUが利用できることを確認するSingularityで実行したコンテナ環境からGPUを利用できることを確認します。 GPUを利用できるかをチェックするスクリプトを配置します。!scp {ssh_opts} template/pytorch/check_gpu.py {target}:{work_dir}GPU利用の可否をチェックするスクリプトを実行します。次のセルを実行してエラーとならないことを確認してください。!ssh {ssh_opts} {target} bash -l -c \ "'cd {work_dir} && srun -l -N 1 \ singularity exec --nv ~/{sif_file} python check_gpu.py'"ジョブの実行MNISTのスクリプトをSlurmのジョブとして実行します。 ジョブの実行スクリプトを作成します。from tempfile import TemporaryDirectory from pathlib import Path with TemporaryDirectory() as workdir: batch_file = Path(workdir) / 'pytorch_mnist.job' with batch_file.open(mode='w') as f: f.write(f'''#!/bin/bash #SBATCH -J pytorch-mnist # create a short name for your job #SBATCH -o pytorch-mnist.%j.out # Name of stdout output file (%j expands to jobId) #SBATCH -N 1 # Total number of nodes requested #SBATCH -n 1 # Total number of across all nodes #SBATCH --gres=gpu:1 # number of gpus per node #SBATCH -t 00:10:00 # Run time (hh:mm:ss) cd $HOME/{work_dir} singularity exec --nv $HOME/{sif_file} python3 mnist_classify.py --epochs=3 ''') !cat {batch_file} !scp {ssh_opts} {str(batch_file)} {target}:{work_dir}ジョブを実行する前のキューの状態を確認します。!ssh {ssh_opts} {target} squeueノードのGPU利用状況を確認します。!ssh {ssh_opts} {target} sinfo --Node --Format=NodeHost,Gres,GresUsedジョブを実行します。!ssh {ssh_opts} {target} bash -l -c \ "'cd {work_dir} && sbatch pytorch_mnist.job'"ジョブの実行状況を確認します。!ssh {ssh_opts} {target} squeueノードのGPU利用状況を確認します。`GRES_USED` の欄でノードのGPU利用状況を確認してください。!ssh {ssh_opts} {target} sinfo --Node --Format=NodeHost,Gres,GresUsedジョブが完了するまで数分かかります。ジョブ実行中のCPU, メモリ, GPUなどの利用状況は VCC の Grafana で確認することができます。 ジョブの完了後に次のセルを実行してください。ジョブの出力結果が確認できます。!ssh {ssh_opts} {target} bash -l -c \ "'cd {work_dir} && tail pytorch-mnist*.out'"100 numpy exercises with hintThis is a collection of exercises that have been collected in the numpy mailing list, on stack overflow and in the numpy documentation. The goal of this collection is to offer a quick reference for both old and new users but also to provide a set of exercises for those who teach.If you find an error or think you've a better way to solve some of them, feel free to open an issue at 1. Import the numpy package under the name `np` (★☆☆) (**hint**: import … as …)import numpy as np2. Print the numpy version and the configuration (★☆☆) (**hint**: np.\_\_version\_\_, np.show\_config)print(np.__version__) np.show_config1.16.43. Create a null vector of size 10 (★☆☆) (**hint**: np.zeros)a = np.zeros(10) print (a)[0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]4. How to find the memory size of any array (★☆☆) (**hint**: size, itemsize)np.size(a)5. How to get the documentation of the numpy add function from the command line? (★☆☆) (**hint**: np.info)np.info(np.add)add(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) Add arguments element-wise. Parameters ---------- x1, x2 : array_like The arrays to be added. If ``x1.shape != x2.shape``, they must be broadcastable to a common shape (which may be the shape of one or the other). out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. A tuple (possible only as a keyword argument) must have length equal to the number of outputs. where : array_like, optional Values of True indicate to calculate the ufunc at that position, values of False indicate to leave the value in the output alone. **kwargs For other keyword-only arguments, see the :ref:`ufunc docs `. Returns ------- add : ndarray or scalar The[...]6. Create a null vector of size 10 but the fifth value which is 1 (★☆☆) (**hint**: array\[4\])b = np.zeros(10) b[4] = 1 print(b)[0. 0. 0. 0. 1. 0. 0. 0. 0. 0.]7. Create a vector with values ranging from 10 to 49 (★☆☆) (**hint**: np.arange)c = np.arange(10, 50) print(c)[10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49]8. Reverse a vector (first element becomes last) (★☆☆) (**hint**: array\[::-1\])c[::-1]9. Create a 3x3 matrix with values ranging from 0 to 8 (★☆☆) (**hint**: reshape)d = np.arange(0, 9) d = d.reshape(3,3) print(d)[[0 1 2] [3 4 5] [6 7 8]]10. Find indices of non-zero elements from \[1,2,0,0,4,0\] (★☆☆) (**hint**: np.nonzero)e = [1,2,0,0,4,0] np.nonzero(e)11. Create a 3x3 identity matrix (★☆☆) (**hint**: np.eye)f = np.identity(3) print(f)[[1. 0. 0.] [0. 1. 0.] [0. 0. 1.]]12. Create a 3x3x3 array with random values (★☆☆) (**hint**: np.random.random)g = np.random.random((3,3,3)) print(g)[[[0.91935742 0.36980996 0.63239118] [0.701471 0.34652984 0.45266535] [0.88779327 0.85467891 0.6511243 ]] [[0.48847737 0.75460451 0.84924678] [0.43308303 0.38578117 0.49390477] [0.79987427 0.84084545 0.63658596]] [[0.45571064 0.46747478 0.20050871] [0.50084413 0.74153154 0.61159307] [0.98453028 0.13942641 0.02752447]]]13. Create a 10x10 array with random values and find the minimum and maximum values (★☆☆) (**hint**: min, max)h = np.random.random((10,10)) the_min = h.min() the_max = h.max() print(the_min, the_max)0.03438449042130165 0.9968592149203914. Create a random vector of size 30 and find the mean value (★☆☆) (**hint**: mean)i = np.random.random(30) print(i) print(i.mean())[0.98229406 0.76319149 0.01521597 0.12270004 0.09709939 0.63163652 0.84934682 0.41812771 0.37042801 0.1704732 0.92295256 0.8451368 0.2300568 0.47114757 0.11324947 0.0866101 0.76099842 0.6187652 0.72896365 0.04658593 0.37410417 0.67037695 0.26682772 0.15384245 0.66111648 0.65789138 0.21297752 0.53450014 0.68700025 0.78932302] 0.475097992908262515. Create a 2d array with 1 on the border and 0 inside (★☆☆) (**hint**: array\[1:-1, 1:-1\])j = np.ones((7,7)) j[1:-1,1:-1] = 0 print(j)[[1. 1. 1. 1. 1. 1. 1.] [1. 0. 0. 0. 0. 0. 1.] [1. 0. 0. 0. 0. 0. 1.] [1. 0. 0. 0. 0. 0. 1.] [1. 0. 0. 0. 0. 0. 1.] [1. 0. 0. 0. 0. 0. 1.] [1. 1. 1. 1. 1. 1. 1.]]16. How to add a border (filled with 0's) around an existing array? (★☆☆) (**hint**: np.pad)np.pad(j)17. What is the result of the following expression? (★☆☆) (**hint**: NaN = not a number, inf = infinity) ```python0 * np.nannp.nan == np.nannp.inf > np.nannp.nan - np.nannp.nan in set([np.nan])0.3 == 3 * 0.1``` 18. Create a 5x5 matrix with values 1,2,3,4 just below the diagonal (★☆☆) (**hint**: np.diag)k = np.diag(1+np.arange(4),k=-1) print(k)[[0 0 0 0 0] [1 0 0 0 0] [0 2 0 0 0] [0 0 3 0 0] [0 0 0 4 0]]19. Create a 8x8 matrix and fill it with a checkerboard pattern (★☆☆) (**hint**: array\[::2\])l = np.ones((8,8)) l[1::2, ::2] = 0 l[fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, ::1] = 0 print(l)[[1. 1. 1. 1. 1. 1. 1. 1.] [0. 1. 0. 1. 0. 1. 0. 1.] [0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0.]]20. Consider a (6,7,8) shape array, what is the index (x,y,z) of the 100th element? (**hint**: np.unravel_index)print(np.unravel_index(99,(6,7,8)))(1, 5, 3)21. Create a checkerboard 8x8 matrix using the tile function (★☆☆) (**hint**: np.tile) 22. Normalize a 5x5 random matrix (★☆☆) (**hint**: (x - mean) / std)m = np.random.random((5,5)) print(m) n = np.mean(m) print(n) (m - n) / np.std(m)[[0.29477212 0.6941612 0.45343631 0.69422264 0.42338902] [0.93600978 0.65719029 0.44480287 0.5678587 0.59252591] [0.99117736 0.85686279 0.89826968 0.58580198 0.20813812] [0.73534337 0.45985061 0.48890976 0.13004943 0.54380639] [0.41840839 0.17818111 0.77969998 0.10013925 0.22157688]] 0.534183358111702923. Create a custom dtype that describes a color as four unsigned bytes (RGBA) (★☆☆) (**hint**: np.dtype) 24. Multiply a 5x3 matrix by a 3x2 matrix (real matrix product) (★☆☆) (**hint**: np.dot | @)a = np.random.random((5,3)) b = np.random.random((3,4)) a @ b25. Given a 1D array, negate all elements which are between 3 and 8, in place. (★☆☆) (**hint**: >, <=)c = np.arange(23) print(c) d = c[(c<3) | (c >= 8)] *= -1 print(d)Different ways to load an input graphWe recommend using the GML graph format to load a graph. You can also use the DOT format, which requires additional dependencies (either pydot or pygraphviz). DoWhy supports both loading a graph as a string, or as a file (with the extensions 'gml' or 'dot').Below is an example showing the different ways of loading the same graph.import os, sys import random sys.path.append(os.path.abspath("../../")) import numpy as np import pandas as pd import dowhy from dowhy.do_why import CausalModel from IPython.display import Image, displayI. Generating dummy dataWe generate some dummy data for three variables: X, Y and Z.z=[i for i in range(10)] random.shuffle(z) df = pd.DataFrame(data = {'Z': z, 'X': range(0,10), 'Y': range(0,100,10)}) dfII. Loading GML or DOT graphs GML format# With GML string model=CausalModel( data = df, treatment='X', outcome='Y', graph="""graph[directed 1 node[id "Z" label "Z"] node[id "X" label "X"] node[id "Y" label "Y"] edge[source "Z" target "X"] edge[source "Z" target "Y"] edge[source "X" target "Y"]]""" ) model.view_model() display(Image(filename="causal_model.png")) # With GML file model=CausalModel( data = df, treatment='X', outcome='Y', graph="example_graphs/simple_graph_example.gml" ) model.view_model() display(Image(filename="causal_model.png"))Model to find the causal effect of treatment X on outcome YDOT format# With DOT string model=CausalModel( data = df, treatment='X', outcome='Y', graph="digraph {Z -> X;Z -> Y;X -> Y;}" ) model.view_model() from IPython.display import Image, display display(Image(filename="causal_model.png")) # With DOT file model=CausalModel( data = df, treatment='X', outcome='Y', graph="example_graphs/simple_graph_example.dot" ) model.view_model() display(Image(filename="causal_model.png"))Model to find the causal effect of treatment X on outcome YData# Val df: val_p = [] val_t = [] path = './final_dataset_stone/val/stone/' for x in os.listdir(path): val_p.append(path+x) val_t.append(1) path = './final_dataset_stone/val/nostone/' for x in os.listdir(path): val_p.append(path+x) val_t.append(0) df_val = pd.DataFrame({'img_path': val_p, 'target': val_t}) print(df_val.shape, np.unique(df_val.target, return_counts=True)) df_val.sample(5) # Val df: train_p = [] train_t = [] path = './final_dataset_stone/train/stone/' for x in os.listdir(path): train_p.append(path+x) train_t.append(1) path = './final_dataset_stone/train/nostone/' for x in os.listdir(path): train_p.append(path+x) train_t.append(0) df_train = pd.DataFrame({'img_path': train_p, 'target': train_t}) print(df_train.shape, np.unique(df_train.target, return_counts=True)) df_train.sample(5)(5254, 2) (array([0, 1]), array([4139, 1115]))DataLoader# для аугментации картинки движком def convrelu(in_channels, out_channels, kernel, padding): return nn.Sequential( nn.Conv2d(in_channels, out_channels, kernel, padding=padding), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True), ) class ResNetUNet_v2(nn.Module): def __init__(self, n_class): super().__init__() self.base_model = models.resnet18(pretrained=True) self.base_layers = list(self.base_model.children()) self.layer0 = nn.Sequential(*self.base_layers[:3]) # size=(N, 64, x.H/2, x.W/2) self.layer0_1x1 = convrelu(64, 64, 1, 0) self.layer1 = nn.Sequential(*self.base_layers[3:5]) # size=(N, 64, x.H/4, x.W/4) self.layer1_1x1 = convrelu(64, 64, 1, 0) self.layer2 = self.base_layers[5] # size=(N, 128, x.H/8, x.W/8) self.layer2_1x1 = convrelu(128, 128, 1, 0) self.layer3 = self.base_layers[6] # size=(N, 256, x.H/16, x.W/16) self.layer3_1x1 = convrelu(256, 256, 1, 0) self.layer4 = self.base_layers[7] # size=(N, 512, x.H/32, x.W/32) self.layer4_1x1 = convrelu(512, 512, 1, 0) self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) self.conv_up3 = convrelu(256 + 512, 512, 3, 1) self.conv_up2 = convrelu(128 + 512, 256, 3, 1) self.conv_up1 = convrelu(64 + 256, 256, 3, 1) self.conv_up0 = convrelu(64 + 256, 128, 3, 1) self.conv_original_size0 = convrelu(3, 64, 3, 1) self.conv_original_size1 = convrelu(64, 64, 3, 1) self.conv_original_size2 = convrelu(64 + 128, 64, 3, 1) self.dropout = nn.Dropout(0.5) self.conv_last = nn.Conv2d(64, n_class, 1) self.act_last = nn.Tanh() self.support_conv1 = nn.Conv2d(11, 512, 1) # (bath,10+1) --> (batch,512) def forward(self, inp): x_original = self.conv_original_size0(inp[0]) x_original = self.conv_original_size1(x_original) layer0 = self.layer0(inp[0]) layer1 = self.layer1(layer0) layer2 = self.layer2(layer1) layer3 = self.layer3(layer2) layer4 = self.layer4(layer3) cond = self.support_conv1(torch.unsqueeze(torch.unsqueeze(inp[1], 2), 2)) # ([8, 8]) --> Size([8, 512, 1, 1]) layer4 = self.layer4_1x1(layer4+cond) x = self.upsample(layer4) layer3 = self.layer3_1x1(layer3) x = torch.cat([x, layer3], dim=1) x = self.conv_up3(x) x = self.upsample(x) layer2 = self.layer2_1x1(layer2) x = torch.cat([x, layer2], dim=1) x = self.conv_up2(x) x = self.upsample(x) layer1 = self.layer1_1x1(layer1) x = torch.cat([x, layer1], dim=1) x = self.conv_up1(x) x = self.upsample(x) layer0 = self.layer0_1x1(layer0) x = torch.cat([x, layer0], dim=1) x = self.conv_up0(x) x = self.upsample(x) x = torch.cat([x, x_original], dim=1) x = self.conv_original_size2(x) x = self.dropout(x) out = self.conv_last(x) out = self.act_last(out) return out neural_engine = ResNetUNet_v2(3) neural_engine.load_state_dict(torch.load('../neural_engine/best_models/resunet_v5.pth')) neural_engine = neural_engine.to(device) neural_engine.train(False); def apply_aug(p0, aug): if aug == 0: p = p0.copy() elif aug == 1: p = cv2.rotate(p0, cv2.ROTATE_90_CLOCKWISE) elif aug == 2: p = cv2.rotate(p0, cv2.ROTATE_180) elif aug == 3: p = cv2.rotate(p0, cv2.ROTATE_90_COUNTERCLOCKWISE) elif aug == 4: p = cv2.flip(p0, 1) elif aug == 5: p = cv2.rotate(p0, cv2.ROTATE_90_CLOCKWISE) p = cv2.flip(p, 1) elif aug == 6: p = cv2.rotate(p0, cv2.ROTATE_180) p = cv2.flip(p, 1) elif aug == 7: p = cv2.rotate(p0, cv2.ROTATE_90_COUNTERCLOCKWISE) p = cv2.flip(p, 1) return p def rotate_image(image, angle): image_center = tuple(np.array(image.shape[1::-1]) / 2) rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0) result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR) return result def neural_aug(model,p,d,sp,zoom,n): ''' model - in gpu in eval mode p - tensor of frame with variables in [-1,1] d - direction, one of {1,2,3,4,5,6,7,8} sp - sp in current frame, int/float zoom - zoom in current frame, one of {1} (обучал только для zoom=1) n - number of timestamps, one of {1,2,3,4,5,6,7,8,9,10,11,12,13,14} ''' p = torch.clone(p).to(device) d = F.one_hot(torch.tensor(d-1), num_classes=8) sp = torch.tensor(sp)/100 zoom = torch.tensor(zoom)/15 n = torch.tensor(n/14) dd2 = torch.cat([d, sp.unsqueeze(0), zoom.unsqueeze(0), n.unsqueeze(0)]).unsqueeze(0).float().to(device) with torch.no_grad(): p = model((p.unsqueeze(0),dd2))[0] return p.detach().cpu() #===================================================================================== class Stone_Dataset(Dataset): def __init__(self, df, augs=None): super().__init__() self.df = df self.augs = augs self.transform = A.Compose([A.Normalize(mean=(0.5,), std=(0.5,)), ToTensorV2(transpose_mask=False)]) self.transform_aug = A.Compose([A.augmentations.transforms.ChannelShuffle(p=0.1), A.RandomBrightnessContrast(p=0.1)]) self.d = {0: [1,5], 1: [2,6], 2: [3,7], 3: [4,8]} def __len__(self): return len(self.df) def __getitem__(self, idx): img = cv2.imread(self.df[idx][0])[:,:,::-1] target = self.df[idx][1] if self.augs: aug = np.random.choice(np.arange(8), p=np.array([0.125]*8)) img = apply_aug(img, aug) # neural-engine augmentation if np.random.randint(10)<5: img = self.transform(image=img)['image'] dd = np.random.randint(4) n = np.random.randint(8)+2 img = neural_aug(neural_engine,img,self.d[dd][0],0,1,n) img = neural_aug(neural_engine,img,self.d[dd][1],0,1,n) img = (img.permute(1,2,0)+1)/2 img = img.numpy()*255 random_angle = np.random.randint(90) img = rotate_image(img, random_angle) img = self.transform_aug(image=img)['image'] img = self.transform(image=img)['image'] return img, targetModelclass StoneClassifier(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 8, 3, 2, 1) self.conv2 = nn.Conv2d(8, 16, 3, 2, 1) self.conv3 = nn.Conv2d(16, 32, 3, 2, 1) self.fc1 = nn.Linear(32 * 3 * 3, 128) self.fc3 = nn.Linear(128, 2) def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = F.relu(self.conv3(x)) x = torch.flatten(x, 1) x = F.relu(self.fc1(x)) x = self.fc3(x) x = F.softmax(x, dim=1) return xTraindef train_model(model, train_iterator, val_iterator, loss_func, optimizer, sheduler, params): best_metr = 0 reward_frame_transform = torchvision.transforms.Compose([torchvision.transforms.CenterCrop(24)]) for i in range(params['EPOCHS']): #===========TRAIN============================= time.sleep(0.2) model.train(True) train_loss = 0.0 y_pred = [] y_true = [] for x in enumerate(train_iterator): #tqdm(train_iterator) img = x[1][0].to(params['DEVICE']) target = x[1][1].to(params['DEVICE']) img = reward_frame_transform(img) optimizer.zero_grad() output = model(img) loss = loss_func(output, target) train_loss += loss.item() loss.backward() optimizer.step() y_pred.extend((output[:,1]>0.95).int().detach().cpu().tolist()) y_true.extend(target.detach().cpu().tolist()) f1_macro = round(f1_score(y_true, y_pred, average='macro'),4) train_loss = round(train_loss / len(train_iterator),4) trainloss_ts.append(train_loss) trainmetr_ts.append(f1_macro) current_lr = optimizer.param_groups[0]['lr'] sheduler.step() #===========VAL================================ time.sleep(0.2) model.train(False) val_loss = 0.0 y_pred = [] y_true = [] for x in enumerate(val_iterator): img = x[1][0].to(params['DEVICE']) target = x[1][1].to(params['DEVICE']) img = reward_frame_transform(img) with torch.no_grad(): output = model(img) loss = loss_func(output, target) val_loss += loss.item() y_pred.extend((output[:,1]>0.95).int().detach().cpu().tolist()) y_true.extend(target.detach().cpu().tolist()) f1_macro = round(f1_score(y_true, y_pred, average='macro'),4) val_loss = round(val_loss / len(val_iterator),4) valloss_ts.append(val_loss) valmetr_ts.append(f1_macro) if f1_macro>best_metr: best_metr = f1_macro torch.save(model.state_dict(), 'laggg_stone_classifier.pth') #==========PRINT=========================== print(f'{i+1}/{params["EPOCHS"]}', 'lr:',current_lr,'|', 'train_loss:',trainloss_ts[-1],'|', 'val_loss:',valloss_ts[-1],'|', 'train_metr:',trainmetr_ts[-1],'|', 'val_metr:',valmetr_ts[-1]) params = {'EPOCHS': 30, 'DEVICE': 'cuda:0', 'BATCH': 8} train_data = Stone_Dataset(df_train.values, True) train_dataloader = DataLoader(train_data, batch_size=params['BATCH'], shuffle=True) test_data = Stone_Dataset(df_val.values, False) test_dataloader = DataLoader(test_data, batch_size=params['BATCH'], shuffle=True) img, target = iter(train_dataloader).next() print(target) plot_samples_on_epoch((img+1)/2) model = StoneClassifier().to(params['DEVICE']) model.load_state_dict(torch.load('laggg_stone_classifier_v1.pth')) model = model.to(params['DEVICE']) criterion = nn.CrossEntropyLoss() opt = optim.Adam(model.parameters(), lr=1e-3) scheduler = optim.lr_scheduler.StepLR(optimizer=opt, step_size=10, gamma=0.1) trainloss_ts = [] trainmetr_ts = [] valloss_ts = [] valmetr_ts = [] train_model(model, train_dataloader, test_dataloader, criterion, opt, scheduler, params) plt.figure(figsize=(10,3)); plt.plot(trainloss_ts, label='train', linewidth=2) plt.plot(valloss_ts, label='val', linewidth=2) plt.title('CE_loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend() plt.grid() plt.show(); plt.figure(figsize=(10,3)); plt.plot(trainmetr_ts, label='train', linewidth=2) plt.plot(valmetr_ts, label='val', linewidth=2) plt.title('F1_macro') plt.ylabel('metric') plt.xlabel('epoch') plt.legend() plt.grid() plt.show();Validateparams = {'EPOCHS': 30, 'DEVICE': 'cuda:0', 'BATCH': 8} def validate(model, tresh=0.5, confm_normalize=True): y_pred = [] y_true = [] model.eval() reward_frame_transform = torchvision.transforms.Compose([torchvision.transforms.CenterCrop(24)]) for x in enumerate(test_dataloader): img = x[1][0].to(params['DEVICE']) target = x[1][1] y_true.extend(target.tolist()) img = reward_frame_transform(img) with torch.no_grad(): r = model(img)[:,1] r = (r>tresh).int().detach().cpu().tolist() y_pred.extend(r) val_acc = round(accuracy_score(y_true, y_pred),4) val_f1 = round(f1_score(y_true, y_pred, average='macro'),4) print('f1 macro:', val_f1) plot_confusion_matrix(y_true, y_pred, range(2), normalize=confm_normalize) model2 = StoneClassifier() model2.load_state_dict(torch.load('dfomin_stone_classifier_v2.pth')) model2 = model2.to(params['DEVICE']) model3 = StoneClassifier() model3.load_state_dict(torch.load('laggg_stone_classifier_v1.pth')) model3 = model3.to(params['DEVICE']) model1 = StoneClassifier() model1.load_state_dict(torch.load('laggg_stone_classifier_v2.pth')) model1 = model1.to(params['DEVICE']) validate(model2, tresh=0.95) validate(model3, tresh=0.95) validate(model1, tresh=0.95)f1 macro: 0.8442Capstone Project Notebook - Week 3 - Part 2*Author: *This notebook will be mainly used for the capstone projectimport numpy as np import pandas as pdUse the Notebook to build the code to scrape the following Wikipedia page, https://en.wikipedia.org/wiki/List_of_postal_codes_of_Canada:_MThe dataframe will consist of three columns: PostalCode, Borough, and Neighborhoodpostal_codes = pd.read_html('https://en.wikipedia.org/wiki/List_of_postal_codes_of_Canada:_M', attrs={'class': 'wikitable'}, header='infer')[0] postal_codes.columns = ['PostalCode', 'Borough', 'Neighborhood']Only process the cells that have an assigned borough. Ignore cells with a borough that is Not assigned.postal_codes = postal_codes[postal_codes.Borough != 'Not assigned'].reset_index(drop=True)More than one neighborhood can exist in one postal code area. For example, in the table on the Wikipedia page, you will notice that M5A is listed twice and has two neighborhoods: Harbourfront and Regent Park. These two rows will be combined into one row with the neighborhoods separated with a comma as shown in row 11 in the above table.postal_codes.Neighborhood += ', ' postal_codes = postal_codes.groupby(['PostalCode', 'Borough']).Neighborhood.sum().str.strip(', ').reset_index()If a cell has a borough but a Not assigned neighborhood, then the neighborhood will be the same as the borough. So for the 9th cell in the table on the Wikipedia page, the value of the Borough and the Neighborhood columns will be Queen's Park.postal_codes.Neighborhood = [n if n != 'Not Assigned' else b for b, n in postal_codes[['Borough', 'Neighborhood']].values]**Result:**postal_codesIn the last cell of your notebook, use the .shape method to print the number of rows of your dataframe.postal_codes.shapeNow that you have built a dataframe of the postal code of each neighborhood along with the borough name and neighborhood name, in order to utilize the Foursquare location data, we need to get the latitude and the longitude coordinates of each neighborhood....Given that this package can be very unreliable, in case you are not able to get the geographical coordinates of the neighborhoods using the Geocoder package, here is a link to a csv file that has the geographical coordinates of each postal code: http://cocl.us/Geospatial_datageospatial_coordinates = pd.read_csv('Geospatial_Coordinates.csv') postal_codes_geospatial = postal_codes.merge(geospatial_coordinates, left_on='PostalCode', right_on='Postal Code').drop('Postal Code', axis=1) postal_codes_geospatialAnalyzing with iNNvestigate **iNNvestigate** got created to make analyzing neural network's predictions easy! The library should help the user to focus on research and development by providing implemented analysis methods and facilitating rapid development of new methods. In this notebook we will show you how to use **iNNvestigate** and for a better understanding we recommend to read [iNNvestigate neural networks!](https://arxiv.org/abs/1808.04260) first! How to use **iNNvestigate** you can read in this notebook: [Developing with iNNvestigate](introduction_development.ipynb)-----**The intention behind iNNvestigate is to make it easy to use analysis methods, but it is not to explain the underlying concepts and assumptions. Please, read the according publication(s) when using a certain method and when publishing please cite the according paper(s) (as well as the [iNNvestigate paper](https://arxiv.org/abs/1808.04260)). Thank you!** You can find most related publication in [iNNvestigate neural networks!](https://arxiv.org/abs/1808.04260) and in the README file. Analysis methodsThe field of analyizing neural network's predictions is about gaining insights how and why a potentially complex network gave as output a certain value or choose a certain class over others. This is often called interpretability or explanation of neural networks. We just call it analyzing a neural network's prediction to be as neutral as possible and to leave any conclusions to the user.Most methods have in common that they analyze the input features w.r.t. a specific neuron's output. Which insights a method reveals about this output can be grouped into (see [Learning how to explain: PatternNet and PatternAttribution](https://arxiv.org/abs/1705.05598)):* **function:** analyzing the operations the network function uses to extract or compute the output. E.g., how would changing an input feature change the output.* **signal:** analyzing the components of the input that cause the output. E.g., which parts of an input image or which directions of an input are used to determine the output.* **attribution:** attributing the "importance" of input features for the output. E.g., how much would changing an input feature change the output.----In this notebook we will introduce methods for each of these categories and along show how to use different features of **iNNvestigate**, namely how to:* analyze a prediction.* train an analyzer.* analyze a prediction w.r.t to a specific output neuron.Let's dive right into it! Training a networkTo analyze a network, we need a network! As a base for **iNNvestigate** we chose the Keras deep learning library, because it is easy to use and allows to inspect build models.In this first piece of code we import all the necessary modules:import warnings warnings.simplefilter('ignore') %matplotlib inline import imp import matplotlib.pyplot as plot import numpy as np import os import keras import keras.backend import keras.layers import keras.models import keras.utils import innvestigate import innvestigate.utils as iutils # Use utility libraries to focus on relevant iNNvestigate routines. mnistutils = imp.load_source("utils_mnist", "../utils_mnist.py")Using TensorFlow backend.to load the data:# Load data # returns x_train, y_train, x_test, y_test as numpy.ndarray data_not_preprocessed = mnistutils.fetch_data() # Create preprocessing functions input_range = [-1, 1] preprocess, revert_preprocessing = mnistutils.create_preprocessing_f(data_not_preprocessed[0], input_range) # Preprocess data data = ( preprocess(data_not_preprocessed[0]), keras.utils.to_categorical(data_not_preprocessed[1], 10), preprocess(data_not_preprocessed[2]), keras.utils.to_categorical(data_not_preprocessed[3], 10), ) if keras.backend.image_data_format == "channels_first": input_shape = (1, 28, 28) else: input_shape = (28, 28, 1)and to now create and train a CNN model:model = keras.models.Sequential([ keras.layers.Conv2D(32, (3, 3), activation="relu", input_shape=input_shape), keras.layers.Conv2D(64, (3, 3), activation="relu"), keras.layers.MaxPooling2D((2, 2)), keras.layers.Flatten(), keras.layers.Dense(512, activation="relu"), keras.layers.Dense(10, activation="softmax"), ]) model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) model.fit(data[0], data[1], epochs=20, batch_size=128) scores = model.evaluate(data[2], data[3], batch_size=128) print("Scores on test set: loss=%s accuracy=%s" % tuple(scores))Epoch 1/20 60000/60000 [==============================] - 19s 313us/step - loss: 0.1519 - acc: 0.9543 Epoch 2/20 60000/60000 [==============================] - 10s 161us/step - loss: 0.0405 - acc: 0.9876 Epoch 3/20 60000/60000 [==============================] - 10s 162us/step - loss: 0.0230 - acc: 0.9924 Epoch 4/20 60000/60000 [==============================] - 10s 161us/step - loss: 0.0148 - acc: 0.9948 Epoch 5/20 60000/60000 [==============================] - 10s 161us/step - loss: 0.0114 - acc: 0.9960 Epoch 6/20 60000/60000 [==============================] - 10s 161us/step - loss: 0.0075 - acc: 0.9975 Epoch 7/20 60000/60000 [==============================] - 10s 163us/step - loss: 0.0058 - acc: 0.9981 Epoch 8/20 60000/60000 [==============================] - 9s 150us/step - loss: 0.0065 - acc: 0.9979 Epoch 9/20 60000/60000 [==============================] - 8s 129us/step - loss: 0.0055 - acc: 0.9983 Epoch 10/20 60000/60000 [==============================] - 9s 144us/step - loss: 0.0[...]Analyzing a predicitionLet's first choose an image to analyze:# Choosing a test image for the tutorial: image = data[2][7:8] plot.imshow(image.squeeze(), cmap='gray', interpolation='nearest') plot.show()In this first part we show how to create and use an analyzer. To do so we use an analyzer from *function* category, namely the gradient. The gradient shows how the linearized network function reacts on changes of a single feature.This is simply done by passing the model without a softmax to the analyzer class:# Stripping the softmax activation from the model model_wo_sm = iutils.keras.graph.model_wo_softmax(model) # Creating an analyzer gradient_analyzer = innvestigate.analyzer.Gradient(model_wo_sm) # Applying the analyzer analysis = gradient_analyzer.analyze(image) # Displaying the gradient plot.imshow(analysis.squeeze(), cmap='seismic', interpolation='nearest') plot.show()For convience there is a function that creates an analyzer for you. It passes all the parameter on to the class instantiation:# Creating an analyzer gradient_analyzer = innvestigate.create_analyzer("gradient", model_wo_sm) # Applying the analyzer analysis = gradient_analyzer.analyze(image) # Displaying the gradient plot.imshow(analysis.squeeze(), cmap='seismic', interpolation='nearest') plot.show()To emphasize different compontents of the analysis many people use instead of the "plain" gradient the absolute value or the square of it. With the gradient analyzer this can be done specifying additional parameters when creating the analyzer:# Creating a parameterized analyzer abs_gradient_analyzer = innvestigate.create_analyzer("gradient", model_wo_sm, postprocess="abs") square_gradient_analyzer = innvestigate.create_analyzer("gradient", model_wo_sm, postprocess="square")Similar other analyzers can be parameterized.Now we visualize the result by projecting the gradient into a gray-color-image:# Applying the analyzers abs_analysis = abs_gradient_analyzer.analyze(image) square_analysis = square_gradient_analyzer.analyze(image) # Displaying the analyses, use gray map as there no negative values anymore plot.imshow(abs_analysis.squeeze(), cmap='gray', interpolation='nearest') plot.show() plot.imshow(square_analysis.squeeze(), cmap='gray', interpolation='nearest') plot.show()Training an analyzerSome analyzers are data-dependent and need to be trained. In **iNNvestigate** this realized with a SKLearn-like interface. In the next piece of code we train the method PatternNet that analyzes the *signal*:# Creating an analyzer patternnet_analyzer = innvestigate.create_analyzer("pattern.net", model_wo_sm, pattern_type="relu") # Train (or adapt) the analyzer to the training data patternnet_analyzer.fit(data[0], verbose=True) # Applying the analyzer analysis = patternnet_analyzer.analyze(image)Epoch 1/1 1875/1875 [==============================] - 23s 12ms/step - loss: 4.0000 - broadcast_1_loss: 1.0000 - broadcast_2_loss: 1.0000 - broadcast_3_loss: 1.0000 - broadcast_4_loss: 1.0000And visualize it:# Displaying the signal (projected back into input space) plot.imshow(analysis.squeeze()/np.abs(analysis).max(), cmap="gray", interpolation="nearest") plot.show()Choosing the output neuronIn the previous examples we always analyzed the output of the neuron with the highest activation. In the next one we show how one can choose the neuron to analyze:# Creating an analyzer and set neuron_selection_mode to "index" inputXgradient_analyzer = innvestigate.create_analyzer("input_t_gradient", model_wo_sm, neuron_selection_mode="index")The gradient\*input analyzer is an example from the *attribution* category and we visualize it by means of a colored heatmap to highlight positive and negative attributions:for neuron_index in range(10): print("Analysis w.r.t. to neuron", neuron_index) # Applying the analyzer and pass that we want analysis = inputXgradient_analyzer.analyze(image, neuron_index) # Displaying the gradient plot.imshow(analysis.squeeze(), cmap='seismic', interpolation='nearest') plot.show()Analysis w.r.t. to neuron 0Demonstration of evaluation tools for Task 3Evaluate and analyze results for Task 3: "Detection of graticule lines intersections".# Those are the imports you may need. from icdar21_mapseg_eval.point_detection import ( eval_pt_detect, show_predictions, show_predictions_classified, plot_f_vs_dist_curve) # Extra imports for the demo import numpy as np import matplotlib.pyplot as plt %matplotlib inlineEvaluation parameters# This is arbitrary, for the competition `radius_limit` is set to 50 pixels. radius_limit = 118 # Beta for the F_beta score beta = 0.5Sample dataground_truth = np.float32([ [5710, 1170], # A [8080, 1170], # B [3330, 3530], # C [5710, 3550], # D [8085, 3540], # E [3327, 5922], # F [5715, 5940], # G [8085, 5942]]) # H predicted = np.float32([ # A [5710, 1170], # exact match # B [8080 + 2*radius_limit, 1170+2*radius_limit], # Match outside acceptable area # C [3330+10, 3530+10], # multiple acceptable matches [3330-10, 3530-10], [3330+10, 3530+0], [3330+10, 3530+30], # D [5710+10, 3550-10], # 1 good match # E [8085+radius_limit, 3540], # far match, on the edge (will be accepted) # F # Nothing, no match # G and H [(5715+8085)/2, (5940+5942)/2] # point on the perpendicular bisector of the two points ])1. Visualize raw predictionsshow_predictions(ground_truth, predicted, radius_limit)2. Evaluation metrics for a single image# debug=True displays matches # score is the AUC of f_beta vs distance # details is a dataframe containing match and score information for each predicted point # plt_data is the curve to plot score, details, plt_data = eval_pt_detect(ground_truth, predicted, radius_limit, beta=beta, debug=True, show_plot=True) # A different radius_limit gives a different result score, details, plt_data = eval_pt_detect(ground_truth, predicted, radius_limit/3, beta=beta, debug=False, show_plot=True) score # Matching details provide extra help for debugging: detailsCode for error types:- `0`: correct match (true positive)- `1`: extra match withing acceptable range (error)- `2`: too far -- out of acceptable range (error) 3. Curve used to compute the AUCplot_f_vs_dist_curve(plt_data, radius_limit, beta=0.5) # We can check we obtain the same results as the auc function from scikit-learn from sklearn.metrics import auc auc(plt_data["Normalized distance"], plt_data["F-beta"]) score plt_data4. Show predictions according to error type# We can show predictions with a different color according to whether they are correct or not show_predictions_classified(ground_truth, details, radius_limit) # And you can save the figure for further study show_predictions_classified(ground_truth, details, radius_limit, filename="out.pdf")97. More testsWe keep here some tests to show the implementation is correct.# Empty prediction: zero auc area, df, _ = eval_pt_detect(np.array([(0,0), (1,1)]), np.empty((0,2)), 10, beta=0.5, debug=True, show_plot=True) area, df # Empty prediction and ground truth area, df, _ = eval_pt_detect(np.empty((0,2)), np.empty((0,2)), 10, beta=0.5, debug=True, show_plot=True) area, df # Empty ground truth area, df, df_dbg = eval_pt_detect(np.empty((0,2)), np.array([(0,0), (1,1)]), 10, beta=0.5, debug=True, show_plot=True) area, df # 1 perfect match over 2 targets eval_pt_detect(np.array([(0,0), (1,1)]), np.array([(1,1)]), 10, beta=0.5, debug=True, show_plot=True) # same eval_pt_detect(np.array([(0,0), (1,1)]), np.array([(0,0)]), 10, beta=0.5, debug=True, show_plot=True) # 1 perfect match and 1 extra match eval_pt_detect(np.array([(0,0)]), np.array([(0,0), (1,1)]), 10, beta=0.5, debug=True, show_plot=True) # 1 match, 1 miss eval_pt_detect(np.array([(0,0), (10,10)]), np.array([(2,0)]), 4, beta=0.5, debug=True, show_plot=True) # 1 match on edge eval_pt_detect(np.array([(0,0)]), np.array([(10,0)]), 10, beta=0.5, debug=True, show_plot=True) # duplicate matches area, _, _ = eval_pt_detect(np.array([(0,0)]), np.array([(0,0), (0,0)]), 10, beta=0.5, debug=True, show_plot=True) areaConsidering predicted point 00: ( 0.0, 0.0) Matches gt point 00: ( 0.0, 0.0) @ 0.0 First match (good) tp: 1, fn: 0, fp: 1, |expt|:1, |pred|:2, x:0.00, y:0.56, area:0.000 Considering predicted point 01: ( 0.0, 0.0) Matches gt point 00: ( 0.0, 0.0) @ 0.0 EXTRA MATCH (noise) Propagating to right limit x:1.00, y:0.56, area:0.56 Score: 0.55698. Influence of the `radius_limit` parameterarea, _, _ = eval_pt_detect(ground_truth, predicted, radius_limit, beta=0.5, debug=False, show_plot=True) area area, _, _ = eval_pt_detect(ground_truth, predicted, 50, beta=0.5, debug=False, show_plot=True) area area, _, _ = eval_pt_detect(ground_truth, predicted, 1, beta=0.5, debug=False, show_plot=True) area area, _, _ = eval_pt_detect(ground_truth, predicted, 10, beta=0.5, debug=False, show_plot=True) area area, _, _ = eval_pt_detect(ground_truth, predicted, 100, beta=0.5, debug=False, show_plot=True) area area, _, _ = eval_pt_detect(ground_truth, predicted, 1000, beta=0.5, debug=False, show_plot=True) area99. Fuzzy testingWe also generated some random tests for extra safety.np.random.seed(42) targets = np.float32(np.random.randint(0,10000,(5,2))) from scipy.spatial import distance_matrix dmat = distance_matrix(targets, targets) dmat[np.diag_indices_from(dmat)] = np.inf radius_limit = np.min(dmat) / 3 radius_limit predictions = np.float32(np.random.randint(0,10000,(100,2))) score, details, _ = eval_pt_detect(targets, predictions, radius_limit, beta=0.5, debug=False, show_plot=True) score show_predictions_classified(targets, details, radius_limit) _ = eval_pt_detect(predictions, targets, radius_limit, beta=0.5, debug=False, show_plot=True) # Evaluation against self should give maximal score area, _, _ = eval_pt_detect(predictions, predictions, radius_limit, beta=0.5, debug=False, show_plot=True) areaSpecimen Directional Uncertainty by () & **. & (2020). Uncertainty propagation for paleomagnetic directions, *Journal of Geophysical Research*, 125, e2020JB019488.** https://doi.org/10.1029/2020JB019488import vMF_Specimens as smSpecimen parametersEnter your PCA estimated directions in the form below (one row per specimen):1. **Dec [deg.]** - PCA declination in degrees [value between 0$^\circ$ and 360$^\circ$]. 2. **Inc [deg.]** - PCA inclination in degrees [value between -90$^\circ$ and +90$^\circ$].3. **n** - Number of demagnetization points used in the PCA [value $>$1].4. **MAD [deg.]** - PCA maximum angular deviation in degrees [value between 0$^\circ$ and 90$^\circ$].5. **Anchored** - Check this option for PCA fits anchored to the originX = {} X = sm.input_frame(X)Process dataOnce your data form is complete, click the ```Process Data``` button.pe = sm.LoadedButton(description="Process Data", value=X) pe.on_click(sm.process) display(pe) X = pe.valueHaiti Health Data Analysis You are an analyst working at a health research company in Haiti. The government has asked your company to analyze data related to covid-19. To that end, they have provided you with access to health data. Your goal as a Data Manager is to provide* Relational Database to manage all the data collection* Use SQL Language to answers some query provided from the Board* Build a dashboard with Power BI to illustrate all the activites of covid Here all the dataset used for this projectimport pandas as pd import numpy as np covid_cases = pd.read_csv("./datasets/mspp_covid19_cases.csv") display(covid_cases.head(10)) covid_cases.info() spa = pd.read_csv("./datasets/spa.csv") display(spa.head()) display(spa.info()) map_dict = {1: 'public',2: 'private non profit',3: 'private with profit',4 : 'private and public'} map_dict spa.facdesc_1.value_counts() departement = pd.read_excel("./datasets/hti_adminboundaries_tabulardata.xlsx",sheet_name=1) commune = pd.read_excel("./datasets/hti_adminboundaries_tabulardata.xlsx",sheet_name=2) display(departement.head(5)) display(departement.info()) display(commune.head(5)) display(commune.info())Questions * Question 1: Create a relational database with this raw data set. * Question 2. Calculate the number of health facilities per commune. * Question 3. Calculate the number of health facilities by commune and by type of health facility. * Question 4. Calculate the number of health facilities by municipality and by department. * Question 5: Calculate the number of sites by type (mga) and by department. * Question 6: Calculate the number of sites with an ambulance by commune and by department (ambulance = 1.0). * Question 7. Calculate the number of hospitals per 10k inhabitants by department. * Question 8. Calculate the number of sites per 10k inhabitants per department * Question 9: Calculate the number of beb per 1,000 inhabitants per department. * Question 10.How many communes have fewer dispensaries than hospitals? * Question 11 How many Letality rate per month * Question 12 How many Death rate per month * Question 13 How many Prevalence per month * Question 14 How many Prevalence by department * Question 15 What is the variation of the prevalence per week * Question 16. Build a Power BI dashboard to understand the health structures in Hait your Goal as a Data Management is to Creation de tables Table datesmin_date = covid_cases.document_date.min() max_date = pd.to_datetime("2021-12-12") date_table=pd.DataFrame(pd.date_range(min_date, max_date),columns=["document_date"]) date_table["date_Id"]=date_table.document_date.apply( lambda x : int(str(x).replace("-","")[0:8])) date_table.tail() date_table["year"]=date_table["document_date"].dt.year date_table["month"]=date_table["document_date"].dt.month date_table["quarter"]=date_table["document_date"].dt.quarter date_table["quarter"]=date_table["quarter"].apply( lambda x: "Quarter" + str(x)) date_table["sem"]=date_table["document_date"].dt.isocalendar().week date_table.set_index("date_Id", inplace=True) date_table.head()Table mgamgaDF = [] for (x, y) in map_dict.items(): mgaDF.append([x, y]) mga_table = pd.DataFrame(mgaDF, columns=["index", "mga_name"]) mga_table.set_index(keys="index", inplace=True,) mga_tableData processing covid_cases Datasetcovid_cases.head() covid_cases.drop(columns=["Unnamed: 0",], inplace=True) covid_cases["date_Id"]=covid_cases.document_date.apply( lambda x : int(str(x).replace("-","")[0:8])) covid_cases.drop(columns="document_date",inplace=True) covid_cases.departement.unique() covid_cases=covid_cases[-(covid_cases.departement=="Grand Total ")] covid_cases=covid_cases[-(covid_cases.departement=="Grand Total")] covid_cases=covid_cases[-(covid_cases.departement=="Guest")] covid_cases1=pd.merge(covid_cases, departement,how="left", right_on="adm1_fr", left_on="departement") covid_cases1.head() covid_cases1 = covid_cases1[["cas_suspects","cas_confirmes","deces","taux_de_letalite","date_Id","adm1code"]]spa DataFramespa.head() spa["depart"] = spa.depart.apply(lambda x: "0"+str(x) if len(str(x)) == 1 else str(x)) spa["vilcom"] = spa.vilcom.apply(lambda x: "0"+str(x) if len(str(x)) == 1 else str(x)) spa["code"] = "HT" spa["commune_id"] = spa["code"] + spa["depart"] + spa["vilcom"] for_Commune_DF = spa[["vilcom", "vilcomn"]] spa.drop(columns=["Unnamed: 0","index", "facil", "vilcom", "depart", "code", "departn", "vilcomn", "facdesc"], inplace=True) spa.head() facdesc0L = list(spa.facdesc_1.unique()) facdesc0L facdesc0 = pd.DataFrame(facdesc0L, columns=["facdesc",]) facdesc0["index"] = facdesc0.index+1 facdesc0.set_index("index", inplace=True) facdesc_table = facdesc0 facdesc_table spa["facdesc_1"] = spa.facdesc_1.apply(lambda x: facdesc0L.index(x)+1) spa.head() spa.facdesc_1.value_counts() spa.tail()departement DataFramedepartement.head() departement["IHSI_UNFPA_Total"]=departement["IHSI_UNFPA_2019_female"]+departement["IHSI_UNFPA_2019_male"] departement.drop(inplace=True, columns=["adm0code", "adm0_en", "adm0_fr", "adm0_ht", "IHSI_UNFPA_2019_male", "IHSI_UNFPA_2019_female"]) departement.set_index("adm1code", inplace=True) departementcommune DataFramecommune.head() commune["IHSI_UNFPA_2019"]=commune["IHSI_UNFPA_2019_female"]+commune["IHSI_UNFPA_2019_male"] commune.drop(inplace=True, columns=["adm0code", "adm0_en", "adm0_fr", "adm0_ht", "adm1_en", "adm1_fr", "adm1_ht"]) commune.set_index(inplace=True, keys="adm2code") commune.head()Data Remotefrom sqlalchemy import create_engine from sqlalchemy import create_engine user="postgres" database="covid_19_final" password="" port="5432" driver="postgresql://" hostname="localhost" connect_string=f"{driver}{user}:{password}@{hostname}:{port}/{database}" connect_string con=create_engine(connect_string) con """ spa.to_sql(name="spa",con=con,if_exists="replace") commune.to_sql(name="commune",con=con,if_exists="replace") departement.to_sql(name="department",con=con,if_exists="replace") covid_cases.to_sql(name="covid_cases",con=con,if_exists="replace") date_table.to_sql(name="date_table",con=con,if_exists="replace") mga_table.to_sql(name="mga_table",con=con,if_exists="replace") facdesc_table.to_sql(name="facdesc_table", con=con,if_exists="replace") """ covid_cases.to_sql(name="covid_cases",con=con,if_exists="replace")QUESTIONS Question 1: Create a relational database with this raw data set.import psycopg2 %load_ext sql %sql postgresql://postgres:admin@localhost:5432/covid_19_finalQuestion 2. Calculate the number of health facilities per commune.# SQL statement to query from a PostgreSQL database table sqlQ2 = %sql SELECT c."adm2_fr", count(spa."facdesc_1") as Quantity From commune as c inner join spa on c."adm2code" = spa."commune_id" group by c."adm2_fr" order by Quantity Desc print(sqlQ2)* postgresql://postgres:***@localhost:5432/covid_19_final 127 rows affected. +--------------------------------+----------+ | adm2_fr | quantity | +--------------------------------+----------+ | Delmas | 77 | | Port-au-Prince | 63 | | Croix-Des-Bouquets | 41 | | Carrefour | 37 | | Léogâne | 28 | | Pétion-Ville | 26 | | Cap-Haïtien | 24 | | Jacmel | 21 | | Dessalines | 21 | | Port-de-Paix | 20 | | | 19 | | | 19 | | Saint-Marc | 19 | | | 16 | | Gonaïves | 16 | | | 15 | | Arcahaie | 15 | | [...]Question 3. Calculate the number of health facilities by commune and by type of health facility.sqlQ3 = %sql SELECT c."adm2_fr", facdesc_table."facdesc" as Type_Health_Facilities, count(spa."facdesc_1") as Quantity From commune as c Inner join spa on c."adm2code" = spa."commune_id" Inner join facdesc_table On facdesc_table."index"=spa."facdesc_1" Group by c."adm2_fr", Type_Health_Facilities Order by adm2_fr, Quantity DESC print(sqlQ3)* postgresql://postgres:***@localhost:5432/covid_19_final 322 rows affected. +--------------------------------+--------------------------+----------+ | adm2_fr | type_health_facilities | quantity | +--------------------------------+--------------------------+----------+ | Abricots | CENTRE DE SANTE SANS LIT | 2 | | | CENTRE DE SANTE SANS LIT | 4 | | | DISPENSAIRE | 3 | | | HOPITAL | 1 | | Anse-à-Foleur | DISPENSAIRE | 3 | | Anse-à-Foleur | CENTRE DE SANTE AVEC LIT | 1 | | Anse-à-Pître | CENTRE DE SANTE AVEC LIT | 2 | | Anse-à-Pître | DISPENSAIRE | 1 | | Anse à Galets | CENTRE DE SANTE SANS LIT | 8 | | Anse à Galets | DISPE[...]Question 4. Calculate the number of health facilities by municipality and by department.sqlQ4= %sql SELECT department."adm1_fr", c."adm2_fr", count(spa."facdesc_1") as Nbr_of_fascilities From commune as c Inner join spa on c."adm2code" = spa."commune_id" Inner join department On department."adm1code"=c."adm1code" Group by c."adm2_fr", department."adm1_fr" Order by adm1_fr, Nbr_of_fascilities DESC print(sqlQ4)* postgresql://postgres:***@localhost:5432/covid_19_final 127 rows affected. +-------------+--------------------------------+--------------------+ | adm1_fr | adm2_fr | nbr_of_fascilities | +-------------+--------------------------------+--------------------+ | Artibonite | Dessalines | 21 | | Artibonite | Saint-Marc | 19 | | Artibonite | Gonaïves | 16 | | Artibonite | | 15 | | Artibonite | l'Artibonite | 9 | | Artibonite | Verrettes | 9 | | Artibonite | Saint-'Attalaye | 8 | | Artibonite | Ennery | 7 | | Artibonite | | 7 | | Artibonite | L'Estère | 4 | | Artibonit[...]5-Calculate the number of sites by type (mga) and by department.sqlQ5 = %sql SELECT department."adm1_fr" as departement, mga_table."mga_name" as mga_type, count(spa."facdesc_1") as Number_of_Health_Facilities From department Inner join commune on commune."adm1code" = department."adm1code" Inner join spa On spa."commune_id"=commune."adm2code" inner join mga_table on mga_table."index"=spa."mga" group by departement, mga_type order by departement print(sqlQ5)* postgresql://postgres:***@localhost:5432/covid_19_final 36 rows affected. +-------------+---------------------+-----------------------------+ | departement | mga_type | number_of_health_facilities | +-------------+---------------------+-----------------------------+ | Artibonite | private and public | 10 | | Artibonite | private non profit | 17 | | Artibonite | public | 59 | | Artibonite | private with profit | 38 | | Centre | private with profit | 9 | | Centre | private and public | 15 | | Centre | public | 24 | | Centre | private non profit | 6 | | Grande'Anse | private with profit | 4 | | Grande'Anse | public | 24 | | Grande'Anse | private non profit | [...]6-Calculate the number of sites with an ambulance by commune and by department (ambulance = 1.0)sqlQ6 = %sql SELECT department."adm1_fr" as departement, commune."adm2_fr", count(distinct commune."adm2_fr") as Number_of_commune_with_an_ambulance From department Inner join commune on commune."adm1code" = department."adm1code" Inner join spa On spa."commune_id"=commune."adm2code" where spa."ambulance" = 1.0 group by departement , adm2_fr order by departement print(sqlQ6)* postgresql://postgres:***@localhost:5432/covid_19_final 80 rows affected. +-------------+--------------------------------+-------------------------------------+ | departement | adm2_fr | number_of_commune_with_an_ambulance | +-------------+--------------------------------+-------------------------------------+ | Artibonite | | 1 | | Artibonite | Dessalines | 1 | | Artibonite | Ennery | 1 | | Artibonite | Gonaïves | 1 | | Artibonite | | 1 | | Artibonite | | 1 | | Artibonite | L'Estère | 1 | | Artibonite | 'Artibonite | [...]7-Calculate the number of hospitals per 10k inhabitants by department.sqlQ7=%sql SELECT department."adm1_fr" as departement, count ( facdesc_table."facdesc") as number_of_hospital, department."IHSI_UNFPA_Total" as total, round(round(count ( facdesc_table."facdesc")*10000,2)/department."IHSI_UNFPA_Total",2) From department Inner join commune on commune."adm1code" = department."adm1code" Inner join spa On spa."commune_id"=commune."adm2code" inner join facdesc_table on spa."facdesc_1"=facdesc_table."index" where facdesc_table."index" = 2 group by adm1_fr, total order by total desc print(sqlQ7)* postgresql://postgres:***@localhost:5432/covid_19_final (psycopg2.errors.UndefinedColumn) column department.ihsi_unfpa_total does not exist LINE 1: ...t ( facdesc_table.facdesc) as number_of_hospital, department... ^ [SQL: SELECT department.adm1_fr as departement, count ( facdesc_table.facdesc) as number_of_hospital, department.IHSI_UNFPA_Total as total, round(round(count ( facdesc_table.facdesc)*10000,2)/department.IHSI_UNFPA_Total,2) From department Inner join commune on commune.adm1code = department.adm1code Inner join spa On spa.commune_id=commune.adm2code inner join facdesc_table on spa.facdesc_1=facdesc_table.index where facdesc_table.index = 2 group by adm1_fr, total order by total desc] (Background on this error at: https://sqlalche.me/e/14/f405) None8-Calculate the number of sites per 10k inhabitants per departmentsqlQ8 = %sql SELECT department."adm1_fr" as departement ,count ( facdesc_table."facdesc") as number_of_hospital, department."IHSI_UNFPA_Total" as total, round(round(count ( facdesc_table."facdesc")*10000,2)/department."IHSI_UNFPA_Total",2) From department Inner join commune on commune."adm1code" = department."adm1code" Inner join spa On spa."commune_id"=commune."adm2code" inner join facdesc_table on spa."facdesc_1"=facdesc_table."index" group by adm1_fr, total order by total desc print(sqlQ8)* postgresql://postgres:***@localhost:5432/covid_19_final (psycopg2.errors.UndefinedColumn) column department.ihsi_unfpa_total does not exist LINE 1: ...t ( facdesc_table.facdesc) as number_of_hospital, department... ^ [SQL: SELECT department.adm1_fr as departement ,count ( facdesc_table.facdesc) as number_of_hospital, department.IHSI_UNFPA_Total as total, round(round(count ( facdesc_table.facdesc)*10000,2)/department.IHSI_UNFPA_Total,2) From department Inner join commune on commune.adm1code = department.adm1code Inner join spa On spa.commune_id=commune.adm2code inner join facdesc_table on spa.facdesc_1=facdesc_table.index group by adm1_fr, total order by total desc] (Background on this error at: https://sqlalche.me/e/14/f405) None9-Calculate the number of beds per 1,000 inhabitants per departmentsqlQ9= %sql SELECT department."adm1_fr" as departement, count(spa."num_beds") as number_of_beds, department."IHSI_UNFPA_Total" as total, round(round(count ( spa."num_beds")*1000,2)/department."IHSI_UNFPA_Total",4) as number_of_beds_1000_inhabithants From department Inner join commune on commune."adm1code" = department."adm1code" Inner join spa On spa."commune_id"=commune."adm2code" inner join facdesc_table on spa."facdesc_1"=facdesc_table."index" group by adm1_fr, total order by total desc* postgresql://postgres:***@localhost:5432/covid_19_final (psycopg2.errors.UndefinedColumn) column department.ihsi_unfpa_total does not exist LINE 1: ...partement, count(spa.num_beds) as number_of_beds, department... ^ [SQL: SELECT department.adm1_fr as departement, count(spa.num_beds) as number_of_beds, department.IHSI_UNFPA_Total as total, round(round(count ( spa.num_beds)*1000,2)/department.IHSI_UNFPA_Total,4) as number_of_beds_1000_inhabithants From department Inner join commune on commune.adm1code = department.adm1code Inner join spa On spa.commune_id=commune.adm2code inner join facdesc_table on spa.facdesc_1=facdesc_table.index group by adm1_fr, total order by total desc] (Background on this error at: https://sqlalche.me/e/14/f405) None10-How many communes have fewer dispensaries than hospitalssqlQ10= %sql WITH Hospital as (SELECT department."adm1_fr", commune."adm2_fr" as commune, count ( facdesc_table."facdesc") as hospital from department Inner join commune on commune."adm1code" = department."adm1code" Inner join spa On spa."commune_id"=commune."adm2code" inner join facdesc_table on spa."facdesc_1"=facdesc_table."index" where facdesc_table."index" = 2 group by adm1_fr, commune order by hospital desc), Dispensaire as (SELECT department."adm1_fr", commune."adm2_fr" as commune, count ( facdesc_table."facdesc") as dispensaire from department Inner join commune on commune."adm1code" = department."adm1code" Inner join spa On spa."commune_id"=commune."adm2code" inner join facdesc_table on spa."facdesc_1"=facdesc_table."index" where facdesc_table."index" = 1 group by adm1_fr, commune order by dispensaire desc) SELECT h."commune" as commune, Hospital, Dispensaire From Hospital as h inner join Dispensaire as d on h."commune"=d."commune" group by h."commune", Hospital, Dispensaire having Hospital > Dispensaire order by commune print(sqlQ10) sqlQ11= %sql SELECT round((sum(covid_cases."deces")/sum(covid_cases."cas_confirmes")),3) as taux_de_letalite, date_table."month" from covid_cases inner join date_table on covid_cases."date_Id"=date_table."date_Id" group by month sqlQ12= %sql SELECT round((sum(covid_cases."deces")/(sum(commune."IHSI_UNFPA_2019_male")+sum(commune."IHSI_UNFPA_2019_female"))),7) as taux_de_deces, date_table."month" from covid_cases inner join date_table on covid_cases."date_Id"=date_table."date_Id" inner join department on covid_cases."departement"=department."adm1_fr" inner join commune on department."adm1code"=commune."adm1code" group by month print(sqlQ12) sqlQ13= %sql SELECT round((sum(covid_cases."cas_suspects"+covid_cases."cas_confirmes")/(sum(commune."IHSI_UNFPA_2019_male")+sum(commune."IHSI_UNFPA_2019_female"))),8) as prevalence, date_table."month" from covid_cases inner join date_table on covid_cases."date_Id"=date_table."date_Id" inner join department on covid_cases."departement"=department."adm1_fr" inner join commune on department."adm1code"=commune."adm1code" group by month print(sqlQ13) sqlQ14= %sql SELECT department."adm1_fr", round((sum(covid_cases."cas_suspects"+covid_cases."cas_confirmes")/(sum(commune."IHSI_UNFPA_2019_male")+sum(commune."IHSI_UNFPA_2019_female"))),8) as Prevalence from covid_cases inner join date_table on covid_cases."date_Id"=date_table."date_Id" inner join department on covid_cases."departement"=department."adm1_fr" inner join commune on department."adm1code"=commune."adm1code" group by department."adm1_fr", month order by Prevalence Desc print(sqlQ14)Explore M-flow latent space for LHC dataset%matplotlib inline import sys import numpy as np import matplotlib from matplotlib import pyplot as plt import logging import torch import numpy as np logging.basicConfig( format="%(asctime)-5.5s %(name)-30.30s %(levelname)-7.7s %(message)s", datefmt="%H:%M", level=logging.INFO, ) sys.path.append("../../") from experiments.architectures.vector_transforms import create_vector_transform from manifold_flow.flows import ManifoldFlow, EncoderManifoldFlow from experiments.datasets import WBF40DLoader import plot_settings as ps ps.setup()Get datan = 1000 sim40d = WBF40DLoader() x0, _ = sim40d.load_dataset(train=False, dataset_dir="../data/samples/lhc40d", numpy=True, limit_samplesize=n, true_param_id=0) x1, _ = sim40d.load_dataset(train=False, dataset_dir="../data/samples/lhc40d", numpy=True, limit_samplesize=n, true_param_id=1) x2, _ = sim40d.load_dataset(train=False, dataset_dir="../data/samples/lhc40d", numpy=True, limit_samplesize=n, true_param_id=2) x_gen0 = np.load("../data/results/mf_14_lhc40d_june_samples.npy") x_gen1 = np.load("../data/results/mf_14_lhc40d_june_samples_trueparam1.npy") x_gen2 = np.load("../data/results/mf_14_lhc40d_june_samples_trueparam2.npy")Load modeldef load_model( filename, outerlayers=20, innerlayers=15, splinebins=11, splinerange=10.0, dropout=0.0, batchnorm=False, outertransform="rq-coupling", innertransform="rq-coupling", lineartransform="lu", pieepsilon=0.1, pieclip=None, ): outer_transform = create_vector_transform( 40, outerlayers, linear_transform_type=lineartransform, base_transform_type=outertransform, context_features=None, dropout_probability=dropout, tail_bound=splinerange, num_bins=splinebins, use_batch_norm=batchnorm, ) inner_transform = create_vector_transform( 14, innerlayers, linear_transform_type=lineartransform, base_transform_type=innertransform, context_features=2, dropout_probability=dropout, tail_bound=splinerange, num_bins=splinebins, use_batch_norm=batchnorm, ) model = ManifoldFlow( data_dim=40, latent_dim=14, outer_transform=outer_transform, inner_transform=inner_transform, apply_context_to_outer=False, pie_epsilon=pieepsilon, clip_pie=pieclip, ) model.load_state_dict( torch.load("../data/models/{}.pt".format(filename), map_location=torch.device("cpu")) ) _ = model.eval() return model mf = load_model("mf_14_lhc40d_june")Project test data into latent spacedef compute_uv(x, model=mf): model.eval() x_ = torch.tensor(x, dtype=torch.float) h, _ = model.outer_transform(x_, full_jacobian=False, context=None) u, v = model.projection(h) return u.detach().numpy(), v.detach().numpy() u0, v0 = compute_uv(x0) u1, v1 = compute_uv(x1) u2, v2 = compute_uv(x2) v0.shapeMarginals of latentsncols = 4 nrows = 4 fig = plt.figure(figsize=(3*ncols, 3*nrows)) for i in range(14): ax = plt.subplot(nrows, ncols, i+1) plt.hist( u0[:,i], range=(-1.5,1.5), bins=50, density=True, histtype="step", color=[ps.COLORS[1]], ls="-", lw=1.5 ) plt.hist( u1[:,i], range=(-1.5, 1.5), bins=50, density=True, histtype="step", color=[ps.COLORS[2]], ls="-", lw=1.5 ) plt.hist( u2[:,i], range=(-1.5, 1.5), bins=50, density=True, histtype="step", color=[ps.COLORS[3]], ls="-", lw=1.5 ) plt.xlabel("$u_{" + str(i) + "}$") plt.ylabel(f"Density") plt.tight_layout() plt.savefig("../figures/lhc_u_histos.pdf") ncols = 4 nrows = 4 fig = plt.figure(figsize=(3*ncols, 3*nrows)) for i in range(14): ax = plt.subplot(nrows, ncols, i+1) plt.hist( v0[:,i], range=(-0.2,0.2), bins=50, density=True, histtype="step", color=[ps.COLORS[1]], ls="-", lw=1.5 ) plt.hist( v1[:,i], range=(-0.2,0.2), bins=50, density=True, histtype="step", color=[ps.COLORS[2]], ls="-", lw=1.5 ) plt.hist( v2[:,i], range=(-0.2,0.2), bins=50, density=True, histtype="step", color=[ps.COLORS[3]], ls="-", lw=1.5 ) plt.xlabel(f"$v_{i}$") plt.ylabel(f"Density") plt.tight_layout() plt.savefig("../figures/lhc_v_histos.pdf")Scatter plotfeatures = list(range(14)) n = len(features) m = 250 fig = plt.figure(figsize=(2*(n-1), 2*(n-1))) for ip, i in enumerate(features[1:]): for jp in range(ip): j = features[jp] ax = plt.subplot(n-1, n-1, ip*(n-1) + jp + 1) plt.scatter(u0[:m,j], u0[:m,i], s=0.5, c=[ps.COLORS[1]], rasterized=True) plt.scatter(u1[:m,j], u1[:m,i], s=0.5, c=[ps.COLORS[2]], rasterized=True) plt.scatter(u2[:m,j], u2[:m,i], s=0.5, c=[ps.COLORS[3]], rasterized=True) ax.get_xaxis().set_ticks([]) ax.get_yaxis().set_ticks([]) plt.xlim(-1.5,1.5) plt.ylim(-1.5,1.5) if ip == n - 2: plt.xlabel(str(j)) if jp == 0: plt.ylabel(str(i)) plt.tight_layout() plt.savefig("../figures/lhc_u_scatter.pdf")Applying k-means on irisfrom sklearn import datasets iris = datasets.load_iris() x = iris.data y = iris.target k_means_iris = KMeans(n_clusters = 3) k_means_iris.fit(x) #k_means_iris.score(k_means_iris.labels_,y) #l = k_means_iris.labels_.reshape(-1,1) plt.scatter(x[:,0],x[:,1],c = k_means_iris.labels_) from sklearn.metrics import accuracy_score #accuracy_score(y,k_means_iris.labels_) yy = [] pred = k_means_iris.labels_ for i in range(len(pred)): if pred[i] == 1: yy.append(0) elif pred[i] == 2: yy.append(1) elif pred[i] == 0: yy.append(2) yy = np.array(yy) accuracy_score(y,yy)Load ksent vectors of random genome samples of 16kbdf = pd.read_pickle("../data/random-ksent.pkl")Look at the datadf.head() df.shape df.spicies.value_counts()Databunchvalid_idx = random.sample(range(df.shape[0]), int(np.floor(df.shape[0]* 0.2))) db = (ItemList.from_df(df,cols="ksent"). split_by_idx(valid_idx). label_from_df(cols="spicies"). databunch())Modeldef submodel(dims, bias=False): layer_dims = list(zip(dims[:-1],dims[1:])) fcl = [nn.Linear(*x, bias=bias) for x in layer_dims] [nn.init.xavier_uniform_(m.weight) for m in fcl] if bias: for l in fcl: l.bias.data.normal_(0, 1) relu = [nn.ReLU() for _ in range(len(fcl))] layers = np.asarray(list(zip(fcl, relu))).ravel()[:-1] return nn.Sequential(*layers) class Classifier (nn.Module): def __init__(self, encoder_dims, classifier_dims): super().__init__() self.encoder = submodel(encoder_dims,bias=True) self.classifier = submodel(classifier_dims,bias=True) def forward(self, x): x = self.encoder(x) return F.softmax(self.classifier(x), dim=1) def save_encoder(self,file:PathOrStr): torch.save(self.encoder.state_dict(), path) model = Classifier([100,50,3], [3,20,3]).double() modelLearnerlearn = Learner(db, model,metrics=[accuracy]) learn.loss_func learn.lr_find() learn.recorder.plot() learn.fit_one_cycle(10,1e-2) learn.recorder.plot_metrics() interpretation = learn.interpret() interpretation.plot_confusion_matrix()Working with FITS-cubes Authors[ (DK)](http://www.astronomy.dk), [](http://www.astro.wisc.edu/our-people/post-doctoral-students/shetty-shravan/), [](http://www.astro.wisc.edu/our-people/graduate-students/gonzalez-casanova-diego/), [](http://www.astro.wisc.edu/our-people/scientists/hernandez-audra/), , , glas Learning Goals* Find and download data using `astroquery`* Read and plot slices across different dimensions of a data cube* Compare different data sets (2D and 3D) by overploting contours* Transform coordinate projections and match data resolutions with `reproject`* Create intensity moment maps / velocity maps with `spectral_cube` KeywordsFITS, image manipulation, data cubes, radio astronomy, WCS, astroquery, reproject, spectral cube, matplotlib, contour plots, colorbar SummaryIn this tutorial we will visualize 2D and 3D data sets in Galactic and equatorial coordinates. The tutorial will walk you though a visual analysis of the Small Magellanic Cloud (SMC) using HI 21cm emission and a Herschel 250 micron map. We will learn how to read in data from a file, query and download matching data from Herschel using astroquery, and plot the resulting images in a multitude of ways. The primary libraries we'll be using are: [astroquery](http://www.astropy.org/astroquery/), [spectral_cube](https://spectral-cube.readthedocs.io/en/latest/), [reproject](https://reproject.readthedocs.io/en/stable/), [matplotlib](https://matplotlib.org/)) They can be installed using conda: ```conda install -c astropy astroqueryconda install -c astropy spectral-cubeconda install -c astropy reproject``` Alternatively, if you don't use conda, you can use pip.import numpy as np import matplotlib.pyplot as plt from matplotlib.colors import LogNorm import astropy.units as u from astropy.utils.data import download_file from astropy.io import fits # We use fits to open the actual data file from astropy.utils import data data.conf.remote_timeout = 60 from spectral_cube import SpectralCube from astroquery.esasky import ESASky from astroquery.utils import TableList from astropy.wcs import WCS from reproject import reproject_interpDownload the HI DataWe'll be using HI 21 cm emission data from the [HI4Pi survey](http://adsabs.harvard.edu/cgi-bin/bib_query?arXiv:1610.06175). We want to look at neutral gas emission from the Magellanic Clouds and learn about the kinematics of the system and column densities. Using the VizieR catalog, we've found a relevant data cube to use that covers this region of the sky. You can also download an allsky data cube, but this is a very large file, so picking out sub-sections can be useful!For us, the [relevant file is available via ftp from CDS Strasbourg](http://cdsarc.u-strasbg.fr/vizier/ftp/cats/J/A+A/594/A116/CUBES/GAL/TAN/TAN_C14.fits). We have a reduced version of it which will be a FITS data cube in Galactic coordinates using the tangential sky projection.Sure, we could download this file directly, but why do that when we can load it up via one line of code and have it ready to use in our cache? Download the HI Fits Cube# Downloads the HI data in a fits file format hi_datafile = download_file( 'http://data.astropy.org/tutorials/FITS-cubes/reduced_TAN_C14.fits', cache=True, show_progress=True)Awesome, so now we have a copy of the data file (a FITS file). So how do we do anything with it?Luckily for us, the [spectral_cube](https://spectral-cube.readthedocs.io/en/latest/) package does a lot of the nitty gritty work for us to manipulate this data and even quickly look through it. So let's open up our data file and read in the data as a SpectralCube!The variable `cube` has the data using SpectralCube and `hi_data` is the data cube from the FITS file without the special formating from SpectralCube.hi_data = fits.open(hi_datafile) # Open the FITS file for reading cube = SpectralCube.read(hi_data) # Initiate a SpectralCube hi_data.close() # Close the FITS file - we already read it in and don't need it anymore!If you happen to already have the FITS file on your system, you can also skip the fits.open step and just directly read a FITS file with SpectralCube like this:`cube = SpectralCube.read('path_to_data_file/TAN_C14.fits') `So what does this SpectralCube object actually look like? Let's find out! The first check is to print out the cube.print(cube)Some things to pay attention to here:A data cube has three axes. In this case, there is Galactic Longitude (x), Galactic Latitude (y), and a spectral axis in terms of a LSR Velocity (z - listed as s with `spectral_cube`).The data hidden in the cube lives as an ndarray with shape (n_s, n_y, n_x) so that axis 0 corresponds with the Spectral Axis, axis 1 corresponds with the Galactic Latitude Axis, and axis 2 corresponds with the Galactic Longitude Axis. When we `print(cube)` we can see the shape, size, and units of all axes as well as the data stored in the cube. With this cube, the units of the data in the cube are temperatures (K). The spatial axes are in degrees and the Spectral Axis is in (meters / second).The cube also contains information about the coordinates corresponding to the data in the form of a WCS (World Coordinate System) object. SpectralCube is clever and keeps all the data masked until you really need it so that you can work with large sets of data. So let's see what our data actually looks like!SpectralCube has a `quicklook()` method which can give a handy sneak-peek preview of the data. It's useful when you just need to glance at a slice or spectrum without knowing any other information (say, to make sure the data isn't corrupted or is looking at the right region.) To do this, we index our cube along one axis (for a slice) or two axes (for a spectrum):cube[300, :, :].quicklook() # Slice the cube along the spectral axis, and display a quick image cube[:, 75, 75].quicklook() # Extract a single spectrum through the data cubeTry messing around with slicing the cube along different axes, or picking out different spectra Make a smaller cube, focusing on the Magellanic CloudsThe HI data cube we downloaded is bigger than we actually need it to be. Let's try zooming in on just the part we need and make a new `sub_cube`. The easiest way to do this is to cut out part of the cube with indices or coordinates.We can extract the world coordinates from the cube using the `.world()` method. Warning: using .world() will extract coordinates from every position you ask for. This can be a TON of data if you don't slice through the cube. One work around is to slice along two axes and extract coordinates just along a single dimension. The output of `.world()` is an Astropy `Quantity` representing the pixel coordinates, which includes units. You can extract these Astropy `Quantity` objects by slicing the data._, b, _ = cube.world[0, :, 0] #extract latitude world coordinates from cube _, _, l = cube.world[0, 0, :] #extract longitude world coordinates from cubeYou can then extract a `sub_cube` in the spatial coordinates of the cube# Define desired latitude and longitude range lat_range = [-46, -40] * u.deg lon_range = [306, 295] * u.deg # Create a sub_cube cut to these coordinates sub_cube = cube.subcube(xlo=lon_range[0], xhi=lon_range[1], ylo=lat_range[0], yhi=lat_range[1]) print(sub_cube)Cut along the Spectral Axis:We don't really need data from such a large velocity range so let's just extract a little slab. We can do this in any units that we want using the `.spectral_slab()` method.sub_cube_slab = sub_cube.spectral_slab(-300. *u.km / u.s, 300. *u.km / u.s) print(sub_cube_slab)Moment MapsMoment maps are a useful analysis tool to study data cubes. In short, a moment is a weighted integral along an axis (typically the Spectral Axis) that can give information about the total Intensity (or column density), mean velocity, or velocity dispersion along lines of sight. SpectralCube makes this very simple with the `.moment()` method. We can convert to friendlier spectral units of km/s and these new 2D projections can be saved as new FITS files, complete with modified WCS information as well.moment_0 = sub_cube_slab.with_spectral_unit(u.km/u.s).moment(order=0) # Zero-th moment moment_1 = sub_cube_slab.with_spectral_unit(u.km/u.s).moment(order=1) # First moment # Write the moments as a FITS image # moment_0.write('hi_moment_0.fits') # moment_1.write('hi_moment_1.fits') print('Moment_0 has units of: ', moment_0.unit) print('Moment_1 has units of: ', moment_1.unit) # Convert Moment_0 to a Column Density assuming optically thin media hi_column_density = moment_0 * 1.82 * 10**18 / (u.cm * u.cm) * u.s / u.K / u.kmDisplay the Moment MapsThe [WCSAxes](http://docs.astropy.org/en/stable/visualization/wcsaxes/) framework in Astropy allows us to display images with different coordinate axes and projections.As long as we have a WCS object associated with the data, we can transfer that projection to a matplotlib axis. SpectralCube makes it possible to access just the WCS object associated with a cube object.print(moment_1.wcs) # Examine the WCS object associated with the moment mapAs expected, the first moment image we created only has two axes (Galactic Longitude and Galactic Latitude). We can pass in this WCS object directly into a matplotlib axis instance.# Initiate a figure and axis object with WCS projection information fig = plt.figure(figsize=(18, 12)) ax = fig.add_subplot(111, projection=moment_1.wcs) # Display the moment map image im = ax.imshow(moment_1.hdu.data, cmap='RdBu_r', vmin=0, vmax=200) ax.invert_yaxis() # Flips the Y axis # Add axes labels ax.set_xlabel("Galactic Longitude (degrees)", fontsize=16) ax.set_ylabel("Galactic Latitude (degrees)", fontsize=16) # Add a colorbar cbar = plt.colorbar(im, pad=.07) cbar.set_label('Velocity (km/s)', size=16) # Overlay set of RA/Dec Axes overlay = ax.get_coords_overlay('fk5') overlay.grid(color='white', ls='dotted', lw=2) overlay[0].set_axislabel('Right Ascension (J2000)', fontsize=16) overlay[1].set_axislabel('Declination (J2000)', fontsize=16) # Overplot column density contours levels = (1e20, 5e20, 1e21, 3e21, 5e21, 7e21, 1e22) # Define contour levels to use ax.contour(hi_column_density.hdu.data, cmap='Greys_r', alpha=0.5, levels=levels)As you can see, the WCSAxes framework is very powerful and similiar to making any matplotlib style plot. Display a Longitude-Velocity SliceThe [WCSAxes](http://docs.astropy.org/en/stable/visualization/wcsaxes/) framework in Astropy also lets us slice the data accross different dimensions. It is often useful to slice along a single latitude and display an image showing longtitude and velocity information only (position-velocity or longitude-velocity diagram).This can be done by specifying the `slices` keyword and selecting the appropriate slice through the data. `slices` requires a 3D tuple containing the index to be sliced along and where we want the two axes to be displayed. This should be specified in the same order as the WCS object (longitude, latitude, velocity) as opposed to the order of numpy array holding the data (velocity, latitude, longitude). We then select the appropriate data by indexing along the numpy array.lat_slice = 18 # Index of latitude dimension to slice along # Initiate a figure and axis object with WCS projection information fig = plt.figure(figsize=(18, 12)) ax = fig.add_subplot(111, projection=sub_cube_slab.wcs, slices=('y', lat_slice, 'x')) # Above, we have specified to plot the longitude along the y axis, pick only the lat_slice # indicated, and plot the velocity along the x axis # Display the slice im = ax.imshow(sub_cube_slab[:, lat_slice, :].transpose().data) # Display the image slice ax.invert_yaxis() # Flips the Y axis # Add axes labels ax.set_xlabel("LSR Velocity (m/s)", fontsize=16) ax.set_ylabel("Galactic Longitude (degrees)", fontsize=16) # Add a colorbar cbar = plt.colorbar(im, pad=.07, orientation='horizontal') cbar.set_label('Temperature (K)', size=16)As we can see, the SMC seems to be only along positive velocities. Try: Create a new spectral slab isolating just the SMC and slice along a different dimension to create a latitude-velocity diagram Find and Download a Herschel ImageThis is great, but we want to compare the HI emission data with Herschel 350 micron emission to trace some dust. This can be done with [astroquery](http://www.astropy.org/astroquery/). We can query for the data by mission, take a quick look at the table of results, and download data after selecting a specific wavelength or filter. Since we are looking for Herschel data from an ESA mission, we will use the [astroquery.ESASky](http://astroquery.readthedocs.io/en/latest/esasky/esasky.html) class.Specifically, the `ESASKY.query_region_maps()` method allows us to search for a specific region of the sky either using an Astropy SkyCoord object or a string specifying an object name. In this case, we can just search for the SMC. A radius to search around the object can also be specified.# Query for Herschel data in a 1 degree radius around the SMC result = ESASky.query_region_maps('SMC', radius=1*u.deg, missions='Herschel') print(result)Here, the result is a TableList which contains 24 Herschel data products that can be downloaded. We can see what information is available in this TableList by examining the keys in the Herschel Table.result['HERSCHEL'].keys()We want to find a 350 micron image, so we need to look closer at the filters used for these observations.result['HERSCHEL']['filter']Luckily for us, there is an observation made with three filters: 250, 350, and 500 microns. This is the object we will want to download. One way to do this is by making a boolean mask to select out the Table entry corresponding with the desired filter. Then, the `ESASky.get_maps()` method will download our data provided a TableList argument. Note that the below command requires an internet connection to download five quite large files. It could take several minutes to complete.filters = result['HERSCHEL']['filter'].astype(str) # Convert the list of filters from the query to a string # Construct a boolean mask, searching for only the desired filters mask = np.array(['250, 350, 500' == s for s in filters], dtype='bool') # Re-construct a new TableList object containing only our desired query entry target_obs = TableList({"HERSCHEL":result['HERSCHEL'][mask]}) # This will be passed into ESASky.get_maps() IR_images = ESASky.get_maps(target_obs) # Download the images IR_images['HERSCHEL'][0]['350'].info() # Display some information about the 350 micron imageSince we are just doing some qualitative analysis, we only need the image, but you can also access lots of other information from our downloaded object, such as errors. Let's go ahead and extract just the WCS information and image data from the 350 micron image.herschel_header = IR_images['HERSCHEL'][0]['350']['image'].header herschel_wcs = WCS(IR_images['HERSCHEL'][0]['350']['image']) # Extract WCS information herschel_imagehdu = IR_images['HERSCHEL'][0]['350']['image'] # Extract Image data print(herschel_wcs)With this, we can display this image using matplotlib with [WCSAxes](http://docs.astropy.org/en/stable/visualization/wcsaxes/index.html) and the `LogNorm()` object so we can log scale our image.# Set Nans to zero himage_nan_locs = np.isnan(herschel_imagehdu.data) herschel_data_nonans = herschel_imagehdu.data herschel_data_nonans[himage_nan_locs] = 0 # Initiate a figure and axis object with WCS projection information fig = plt.figure(figsize=(18, 12)) ax = fig.add_subplot(111, projection=herschel_wcs) # Display the moment map image im = ax.imshow(herschel_data_nonans, cmap='viridis', norm=LogNorm(), vmin=2, vmax=50) # ax.invert_yaxis() # Flips the Y axis # Add axes labels ax.set_xlabel("Right Ascension", fontsize = 16) ax.set_ylabel("Declination", fontsize = 16) ax.grid(color = 'white', ls = 'dotted', lw = 2) # Add a colorbar cbar = plt.colorbar(im, pad=.07) cbar.set_label(''.join(['Herschel 350'r'$\mu$m ','(', herschel_header['BUNIT'], ')']), size = 16) # Overlay set of Galactic Coordinate Axes overlay = ax.get_coords_overlay('galactic') overlay.grid(color='black', ls='dotted', lw=1) overlay[0].set_axislabel('Galactic Longitude', fontsize=14) overlay[1].set_axislabel('Galactic Latitude', fontsize=14)Overlay HI 21 cm Contours on the IR 30 micron ImageTo visually compare the neutral gas and dust as traced by HI 21 cm emission and IR 30 micron emission, we can use contours and colorscale images produced using the [WCSAxes](http://docs.astropy.org/en/stable/visualization/wcsaxes/index.html) framework and the `.get_transform()` method. The [WCSAxes.get_transform()](http://docs.astropy.org/en/stable/api/astropy.visualization.wcsaxes.WCSAxes.htmlastropy.visualization.wcsaxes.WCSAxes.get_transform) method returns a transformation from a specified frame to the pixel/data coordinates. It accepts a string specifying the frame or a WCS object.# Initiate a figure and axis object with WCS projection information fig = plt.figure(figsize=(18, 12)) ax = fig.add_subplot(111, projection=herschel_wcs) # Display the moment map image im = ax.imshow(herschel_data_nonans, cmap='viridis', norm=LogNorm(), vmin=5, vmax=50, alpha=.8) # ax.invert_yaxis() # Flips the Y axis # Add axes labels ax.set_xlabel("Right Ascension", fontsize=16) ax.set_ylabel("Declination", fontsize=16) ax.grid(color = 'white', ls='dotted', lw=2) # Extract x and y coordinate limits x_lim = ax.get_xlim() y_lim = ax.get_ylim() # Add a colorbar cbar = plt.colorbar(im, fraction=0.046, pad=-0.1) cbar.set_label(''.join(['Herschel 350'r'$\mu$m ','(', herschel_header['BUNIT'], ')']), size=16) # Overlay set of RA/Dec Axes overlay = ax.get_coords_overlay('galactic') overlay.grid(color='black', ls='dotted', lw=1) overlay[0].set_axislabel('Galactic Longitude', fontsize=14) overlay[1].set_axislabel('Galactic Latitude', fontsize=14) hi_transform = ax.get_transform(hi_column_density.wcs) # extract axes Transform information for the HI data # Overplot column density contours levels = (2e21, 3e21, 5e21, 7e21, 8e21, 1e22) # Define contour levels to use ax.contour(hi_column_density.hdu.data, cmap='Greys_r', alpha=0.8, levels=levels, transform=hi_transform) # include the transform information with the keyword "transform" # Overplot velocity image so we can also see the Gas velocities im_hi = ax.imshow(moment_1.hdu.data, cmap='RdBu_r', vmin=0, vmax=200, alpha=0.5, transform=hi_transform) # Add a second colorbar for the HI Velocity information cbar_hi = plt.colorbar(im_hi, orientation='horizontal', fraction=0.046, pad=0.07) cbar_hi.set_label('HI 'r'$21$cm Mean Velocity (km/s)', size=16) # Apply original image x and y coordinate limits ax.set_xlim(x_lim) ax.set_ylim(y_lim)Using reproject to match image resolutionsThe [reproject](https://reproject.readthedocs.io/en/stable/) package is a powerful tool allowing for image data to be transformed into a variety of projections and resolutions. It's most powerful use is in fact to transform data from one map projection to another without losing any information and still properly conserving flux values within the data. It even has a method to perform a fast reprojection if you are not too concerned with the absolute accuracy of the data values. A simple use of the reproject package is to scale down (or up) resolutions of an image artificially. This could be a useful step if you are trying to get emission line ratios or directly compare the Intensity or Flux from a tracer to that of another tracer in the same physical point of the sky. From our previously made images, we can see that the IR Herschel Image has a higher spatial resolution than that of the HI data cube. We can look into this more by taking a better look at both header objects and using reproject to downscale the Herschel Image.print('IR Resolution (dx,dy) = ', herschel_header['cdelt1'], herschel_header['cdelt2']) print('HI Resolution (dx,dy) = ', hi_column_density.hdu.header['cdelt1'], hi_column_density.hdu.header['cdelt1'])Note: Different ways of accessing the header are shown above corresponding to the different object types (coming from SpectralCube vs astropy.io.fits) As we can see, the IR data has over 10 times higher spatial resolution. In order to create a new projection of an image, all we need to specifiy is a new header containing WCS information to transform into. These can be created manually if you wanted to completely change something about the projection type (i.e. going from a Mercator map projection to a Tangential map projection). For us, since we want to match our resolutions, we can just "steal" the WCS object from the HI data. Specifically, we will be using the [reproject_interp()](https://reproject.readthedocs.io/en/stable/api/reproject.reproject_interp.htmlreproject.reproject_interp) function. This takes two arguments: an HDU object that you want to reproject, and a header containing WCS information to reproject onto.rescaled_herschel_data, _ = reproject_interp(herschel_imagehdu, # reproject the Herschal image to match the HI data hi_column_density.hdu.header) rescaled_herschel_imagehdu = fits.PrimaryHDU(data = rescaled_herschel_data, # wrap up our reprojection as a new fits HDU object header = hi_column_density.hdu.header)`rescaled_herschel_imagehdu` will now behave just like the other FITS images we have been working with, but now with a degraded resolution matching the HI data. This includes having its native coordinates in Galactic rather than RA and Dec.# Set Nans to zero image_nan_locs = np.isnan(rescaled_herschel_imagehdu.data) rescaled_herschel_data_nonans = rescaled_herschel_imagehdu.data rescaled_herschel_data_nonans[image_nan_locs] = 0 # Initiate a figure and axis object with WCS projection information fig = plt.figure(figsize = (18,12)) ax = fig.add_subplot(111,projection = WCS(rescaled_herschel_imagehdu)) # Display the moment map image im = ax.imshow(rescaled_herschel_data_nonans, cmap = 'viridis', norm = LogNorm(), vmin = 5, vmax = 50, alpha = .8) #im = ax.imshow(rescaled_herschel_imagehdu.data, cmap = 'viridis', # norm = LogNorm(), vmin = 5, vmax = 50, alpha = .8) ax.invert_yaxis() # Flips the Y axis # Add axes labels ax.set_xlabel("Galactic Longitude", fontsize = 16) ax.set_ylabel("Galactic Latitude", fontsize = 16) ax.grid(color = 'white', ls = 'dotted', lw = 2) # Extract x and y coordinate limits x_lim = ax.get_xlim() y_lim = ax.get_ylim() # Add a colorbar cbar = plt.colorbar(im, fraction=0.046, pad=-0.1) cbar.set_label(''.join(['Herschel 350'r'$\mu$m ','(', herschel_header['BUNIT'], ')']), size = 16) # Overlay set of RA/Dec Axes overlay = ax.get_coords_overlay('fk5') overlay.grid(color='black', ls='dotted', lw = 1) overlay[0].set_axislabel('Right Ascension', fontsize = 14) overlay[1].set_axislabel('Declination', fontsize = 14) hi_transform = ax.get_transform(hi_column_density.wcs) # extract axes Transform information for the HI data # Overplot column density contours levels = (2e21, 3e21, 5e21, 7e21, 8e21, 1e22) # Define contour levels to use ax.contour(hi_column_density.hdu.data, cmap = 'Greys_r', alpha = 0.8, levels = levels, transform = hi_transform) # include the transform information with the keyword "transform" # Overplot velocity image so we can also see the Gas velocities im_hi = ax.imshow(moment_1.hdu.data, cmap = 'RdBu_r', vmin = 0, vmax = 200, alpha = 0.5, transform = hi_transform) # Add a second colorbar for the HI Velocity information cbar_hi = plt.colorbar(im_hi, orientation = 'horizontal', fraction=0.046, pad=0.07) cbar_hi.set_label('HI 'r'$21$cm Mean Velocity (km/s)', size = 16) # Apply original image x and y coordinate limits ax.set_xlim(x_lim) ax.set_ylim(y_lim)Data uploadingimg_width, img_height, channels = 160, 160, 3 input_shape = (img_width, img_height, 3) batch_size = 64 merged_imgs_dir = '/data' # Images preprocessing imgs_datagen = ImageDataGenerator(rescale=1. / 255, featurewise_center=True, featurewise_std_normalization=True, validation_split=0.08) train_generator = imgs_datagen.flow_from_directory( merged_imgs_dir, target_size=(img_height, img_width), batch_size=batch_size, class_mode='categorical', subset='training') validation_generator = imgs_datagen.flow_from_directory( merged_imgs_dir, target_size=(img_height, img_width), batch_size=batch_size, class_mode='categorical', subset='validation')Model structuredef pre_activation_residual_unit(unit_input, n_input_ch=None, n_output_ch=None, stride=1): if n_output_ch is None: n_output_ch = unit_input.get_shape()[-1] if n_input_ch == None: n_input_ch = n_output_ch // 4 x = BatchNormalization()(input) x = Activation('relu')(x) x = Conv2D(n_input_ch, (1, 1), padding='same', strides=stride)(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(n_input_ch, (3, 3), padding='same', strides=1)(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(n_output_ch, (1, 1), padding='same', strides=1)(x) if n_input_ch != n_output_ch or stride != 1: unit_input = Conv2D(n_output_ch, (1, 1), padding='same', strides=(stride, stride))(unit_input) x_added = Add()([x, unit_input]) return x_added def attention_module(input, encoder_depth=1): # Hyperparameters according to the article: # number of preprocessing Residual Units before splitting into trunk batch and mask batch p = 1 # number of Residual Units in trunk batch t = 2 # number of Residual Units between adjacent pooling layer in the mask branch r = 1 n_input_ch = input.get_shape()[-1] n_output_ch = n_input_ch # First p Residual Units residual_output = input for _ in range(p): residual_output = pre_activation_residual_unit(residual_output) # ---------------------------- Trunk Branch part ---------------------------- trunk_output = residual_output for _ in range(t): trunk_output = pre_activation_residual_unit(trunk_output) # -------------------------- Soft Mask Branch part -------------------------- # First down sampling down_sampling_output = MaxPool2D(padding='same')(residual_output) # Apply r Resudual Units after down sampling residual_output = down_sampling_output for _ in range(r): residual_output = pre_activation_residual_unit(residual_output) soft_mask_output = residual_output # Down sampling - up sampling part (with skip connections) skip_connections = [] # Down sampling part for _ in range(encoder_depth - 1): # create skip connection between bottom-up and top-down parts skip_connection_output = pre_activation_residual_unit(residual_output) # print('Skip connection shape:', skip_connection_output.shape) skip_connections.append(skip_connection_output) # apply down sampling down_sampling_output = MaxPool2D(padding='same')(residual_output) # apply r Residual Units residual_output = down_sampling_output for _ in range(r): residual_output = pre_activation_residual_unit(residual_output) # reverse skip connections list (we will add connections in reverse order) skip_connections = list(reversed(skip_connections)) # Up sampling part for i in range(encoder_depth - 1): # apply r Residual Units for _ in range(r): residual_output = pre_activation_residual_unit(residual_output) # apply up sampling up_sampling_output = UpSampling2D()(residual_output) # adding skip connections soft_mask_output = Add()([up_sampling_output, skip_connections[i]]) residual_output = soft_mask_output # Final r Residual Units for _ in range(r): residual_output = pre_activation_residual_unit(residual_output) # Final up sampling up_sampling_output = UpSampling2D()(residual_output) conv_output = Conv2D(n_input_ch, (1, 1))(up_sampling_output) conv_output = Conv2D(n_input_ch, (1, 1))(conv_output) soft_mask_output = Activation('sigmoid')(conv_output) # ------------- Truck and Soft Mask Branches concatenation part -------------- output = Multiply()([trunk_output, soft_mask_output]) output = Add()([trunk_output, output]) # Final p Residual Units for _ in range(p): output = pre_activation_residual_unit(output) return output def att_resnet_56(shape=(160, 160, 3), n_channels=64, n_classes=9, l2_par=0.01): reg = l2(l2_par) model_input = Input(shape=shape) x = Conv2D(n_channels, (7, 7), strides=(2, 2), padding='same')(model_input) # shape after: 80x80 x = MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='same')(x) # shape after: 40x40 x = pre_activation_residual_unit(x, n_output_ch=n_channels * 4) # shape after: 40x40 x = attention_module(x, encoder_depth=2) # shape after: 40x40 x = pre_activation_residual_unit(x, n_output_ch=n_channels * 8, stride=2) # shape after: 20x20 x = attention_module(x, encoder_depth=2) # shape after: 20x20 x = pre_activation_residual_unit(x, n_output_ch=n_channels * 16, stride=2) # shape after: 10x10 x = attention_module(x, encoder_depth=1) # shape after: 10x10 x = pre_activation_residual_unit(x, n_output_ch=n_channels * 32, stride=2) # shape after: 5x5 x = pre_activation_residual_unit(x, n_output_ch=n_channels * 32) # shape after: 5x5 x = pre_activation_residual_unit(x, n_output_ch=n_channels * 32) # shape after: 5x5 pool_size = (x.shape[1], x.shape[2]) x = AveragePooling2D(pool_size=pool_size, strides=(1, 1))(x) # shape after: 1x1 x = Flatten()(x) model_output = Dense(n_classes, kernel_regularizer=reg, activation='softmax')(x) model = Model(model_input, model_output) return modelModel trainingfrom keras.optimizers import Adam from keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint att_resnet_model = att_resnet_56(n_classes=9) att_resnet_model.compile(Adam(lr=0.0001), loss='categorical_crossentropy', metrics=['accuracy']) reducer = ReduceLROnPlateau(monitor='val_accuracy', factor=0.2, patience=5, min_lr=10e-7, min_delta=0.001, verbose=1) stopper = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=5, verbose=1) filepath="/att_resnet_best_weights.{epoch:02d}-{val_accuracy:.4f}" checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max') model_callbacks= [reducer, stopper, checkpoint] batch_size = 128 att_resnet_model.fit_generator(train_generator, steps_per_epoch=train_generator.samples//batch_size, epochs=5, validation_data=validation_generator, validation_steps=validation_generator.samples//batch_size, callbacks=model_callbacks, initial_epoch=0)To set things up we'll need to stand up our model server. By default the server has a max refresh rate of `25` hertz. However for this example to be convincingly smooth we'll want to bump that up to about `60`. If you want to unlock the refresh rate set `refresh=None`. We'll then hook up a layout object to a model resource (see the introductory example or [read the docs](https://github.com/rmorshea/purlypurly) if this doesn't make sense).from example_utils import localhost # increase model server refresh cap to 60 hertz. purly.state.Machine(refresh=60).daemon() # name the layout resource "simple-slider" and connect to the update stream websocket_url = localhost('ws', 8000) + '/model/simple-slider/stream' layout = purly.Layout(websocket_url)We'll then create a simple slider with values between 1 and 10 and add it to the layout before displaying.slider = layout.html('input', type='range', value="5", min=1, max=10, step=1) layout.children.append(slider) layout.sync() layoutYou should now see the slider in the output above!However when you try to move it, nothing happens, so we'll need to hook into some mouse events to make the display animate. To do that requires the `onChange` event which can be captured via the `on` decorator of the `slider` element. The one detail that significant is passing the string `'value'` as a second argument to the decorator. This will sync the slider's value between Python and the browser when the event occurs.Once we've done that we can simply print out the now synced `slider.attributes['value']` as it changes.@slider.on('Change', 'value') def printer(): sys.stdout.write('\r%02d' % int(slider.attributes['value'])) sys.stdout.flush() layout.serve()Memory referencesPython is often described as a 'weakly typed language' with 'duck-typing'. These descriptions allude to aspects about the Python VM design which we will go into more detail here. The short answerIn Python, every time an object or variable appears, it isn't really an object or variable. Everything in Python is a __memory reference__. This means that the object or variable actually points to a piece of memory, and that the object or variable is actually at that memory location.This may seem like an academic observation, but it has important rammifications. Indeed, Python being a high level language deliberately abstracts these concepts away from the user, whilst also giving relevant tools to dig into those aspects and use them (in this sense, the language and VM is actually pretty well designed, although by no means perfect).Consider the following variables defined and manipulated:a = [1,2,3] # we define a list b = a # we assign a variable b to the variable a. This is an 'assignment by reference', not 'by value' b[1] = 10 # we manipulate b in some way print(b) # we see the effect on b print(a) # and we also see the effect on a[1, 10, 3] [1, 10, 3]We assigned b to a "by reference". This means that a and b both refer to the same piece of memory. That means that manipulations on the memory referred to by b, will have an effect on the memory referred to by a. We can get round this by making an assignment onto a new piece of memory, and transferrinf the values inb = copy.copy(a) # under the hood, this makes a copy in memory, and transfers over the values b[1] = 100 # we manipulate b in some way print(b) # we see the effect on b print(a) # we see there is no effect on a[1, 100, 3] [1, 10, 3]Now, both a and b refer to different pieces of memory; so manipulations on the memory referred to by a have no effect on the memory referred to by b.This does raise the question of managing memory, which is usually abstracted away from the user. Each piece of memory has a 'reference count'c = [5,6,7] print(sys.getrefcount(c)) # this will count the actual ref, c , as well as a temp arg ref under the hood. d = c # assign another reference print(sys.getrefcount(c)) # the reference count increases by 1 # we can directly see the memory each variable refers to, and they are indeed the same print(hex(id(c))) print(hex(id(d))) # updating d to a new object means it now refers to a different piece of memory; # the memory pointed by c has its reference count go down by 1 d = [8,9,10] print(hex(id(c))) # same as before print(hex(id(d))) # a new address print(sys.getrefcount(c)) # ref count has gone down by 10x7f3eb72b9300 0x7f3eb72af7c0 2Under the hood, Python keeps a reference count of all pieces of memory allocated with the Python VM. For any piece of memory where the reference count reaches 0, the __garbage collecter__ will automatically free up the memory. These are actions absracted away form the user.As a side note, when memory does not get released, due to reference counts not reaching zero (usually due to coding error, and keeping variables/objects/references around), the Python VM running the code experiences a __memory leak__ . The involved answerPython tries to handle all the memory management for you. The other extreme is C++98. References, as opposed to variables or objects, are declared explicitly. Since everything is declared explicitly, and the code directly translates to the memory outlay of the compiled code at runtime, C++98 is considered a 'strongly typed language'.An additional object, called a __pointer__ is also used, which points to a specific piece of memory. It gives a method of navigating around the memory, by incrementing or decrementing the pointer (pointers don't appear in Python, so we will not go into them here).%%script false --no-raise-error // C++ code; this will not run in Jupyter int i = 3; // this variable is defined on the stack int &ref = i; // A reference (or alias) for i int *ptr = &i; // A pointer to variable i (or stores address of i)In C++, memory can be allocated, and the memory will belong on the __heap__. In Python, since everything is a reference, implicitly all memory used is on the __heap__.%%script false --no-raise-error // C++ code; this will not run in Jupyter // These three lines are effectively what is happening in Python when running "ref = 3" int *ptr = new int; // initialise a pointer and allocate some memory to it int &ref = *ptr; // assign a reference to the pointer and allocated memory ref = 3; // assign the valueIn the same way that memory is allocated manually, so must the memory be free-ed, to avoid memory leaks%%script false --no-raise-error // C++ code; this will not run in Jupyter int *ptr = new int; // initialise a pointer and allocate some memory to it int &ref = *ptr; // assign a reference to the pointer and allocated memory delete(ptr) // releases the memory. Assigning anything to ref or ptr now will trash memory (e.g. SEGFAULT)!!!This "free" command is what the garbage collector will be running during garbage collection for reference counts that reach zero. Whilst C++98 gives you the freedom to play directly with memory, it gives a lot of rope to hang yourself with, and yields an intense debugging experience working out how memory is being manipulated incorrectly. Common problems are references and pointers referring to incorrect, or out of date memory, or pointing to a NULL pointer (a default 'zero' value that does not point to any real memory). The broken answerJava is an intermediate example, and has more in common with Python. Java has a garbage collector, and everything in Java is a reference. Garbage collection in a JVM is handled in the same way as a Python VM; the grabage collector will free any memory whose reference count reaches 0. The syntax for creating variables and memory is similar to C++98, and as such, Java is often considered a 'statically typed language'. The main difference in syntax is that Java, as a language, does not have pointers.%%script false --no-raise-error // Java code; this will not run in Jupyter Integer num1 = 10; Integer num2 = new Integer(); Integer num3 = null;Random & Seedimport random dir(random) # random.seed is called pseudo random generator from random import randint,seed for i in range(10): # loop will execute 10 times. print(randint(0,10)) # after assigning the seed function , we will get always certain/same random numbers 'or' same result. seed(123) # set a certain starting point for i in range(10): # loop will execute 10 times. print(randint(0,10)) seed(12) for i in range(10): print(randint(0,10)) seed(0) for i in range(10): print(randint(0,10)) seed(1) for i in range(10): print(randint(0,10))2 9 1 4 1 7 7 7 10 6Rossmann Data preparation To create the feature-engineered train_clean and test_clean from the Kaggle competition data, run `rossman_data_clean.ipynb`. One important step that deals with time series is this:```pythonadd_datepart(train, "Date", drop=False)add_datepart(test, "Date", drop=False)```path = Path('../data/rossmann/') train_df = pd.read_pickle(path/'train_clean') train_df.head().T n = len(train_df); nPreparing full data settrain_df = pd.read_pickle(path/'train_clean') test_df = pd.read_pickle(path/'test_clean') len(train_df),len(test_df) procs=[FillMissing, Categorify, Normalize] cat_vars = ['Store', 'DayOfWeek', 'Year', 'Month', 'Day', 'StateHoliday', 'CompetitionMonthsOpen', 'Promo2Weeks', 'StoreType', 'Assortment', 'PromoInterval', 'CompetitionOpenSinceYear', 'Promo2SinceYear', 'State', 'Week', 'Events', 'Promo_fw', 'Promo_bw', 'StateHoliday_fw', 'StateHoliday_bw', 'SchoolHoliday_fw', 'SchoolHoliday_bw'] cont_vars = ['CompetitionDistance', 'Max_TemperatureC', 'Mean_TemperatureC', 'Min_TemperatureC', 'Max_Humidity', 'Mean_Humidity', 'Min_Humidity', 'Max_Wind_SpeedKm_h', 'Mean_Wind_SpeedKm_h', 'CloudCover', 'trend', 'trend_DE', 'AfterStateHoliday', 'BeforeStateHoliday', 'Promo', 'SchoolHoliday'] dep_var = 'Sales' df = train_df[cat_vars + cont_vars + [dep_var,'Date']].copy() test_df['Date'].min(), test_df['Date'].max() len(test_df) cut = train_df['Date'][(train_df['Date'] == train_df['Date'][len(test_df)])].index.max() cut valid_idx = range(cut) val_df = df.iloc[valid_idx] #check validation dataframe for data not in train df def check_val_df(): print('checking val vs df classes') col_diffs={} for col in df: #new set with elements in val_df but not in df diffs = set(val_df[col]).difference(set(df[col])) print(f'col: {col}, diffs: {len(diffs)}') if len(diffs)>0: col_diffs[col]=diffs return col_diffs df[dep_var].head() def unique_deps(x:Series)->List: od = OrderedDict.fromkeys(x) res = list(OrderedDict.fromkeys(x).keys()) res.sort() return res, od classes, od =unique_deps(df[dep_var].values) #classes #data = TabularDataBunch.from_df(path, df=df, dep_var=dep_var, valid_idx=valid_idx, procs=procs, cat_names=cat_vars, cont_names=cont_vars, classes=classes, test_df=test_df) #error at predict #data = (TabularList.from_df(df, path=path, cat_names=cat_vars, cont_names=cont_vars, procs=procs, test_df=test_df) .split_by_idx(valid_idx) .label_from_df(cols=dep_var, label_cls=FloatList, log=True, classes=classes) .databunch()) #error on predict #data = (TabularList.from_df(df, path=path, cat_names=cat_vars, cont_names=cont_vars, procs=procs, test_df=test_df) # .split_by_idx(valid_idx) # .label_from_df(cols=dep_var, label_cls=FloatList, log=True) # .databunch()) #original method data = (TabularList.from_df(df, path=path, cat_names=cat_vars, cont_names=cont_vars, procs=procs, test_df=test_df) .split_by_idx(valid_idx) .label_from_df(cols=dep_var, label_cls=FloatList, log=True) .databunch())--label_from_df() cols: Sales --label_from_df() cols: SalesModelmax_log_y = np.log(np.max(train_df['Sales'])*1.2) y_range = torch.tensor([0, max_log_y], device=defaults.device) learn = tabular_learner(data, layers=[1000,500], ps=[0.001,0.01], emb_drop=0.04, y_range=y_range, metrics=exp_rmspe) #learn.model len(data.train_ds.cont_names) learn.lr_find() learn.recorder.plot() #minimum at 1.0 to 3.0 learn.fit_one_cycle(3, 3e-2, wd=0.2) learn.save('1_tabularlist') learn.recorder.plot_losses(last=-1) #learn.load('1'); #learn.fit_one_cycle(5, 3e-4) #learn.fit_one_cycle(5, 3e-4)(10th place in the competition was 0.108) Predictpreds, y = learn.get_preds(DatasetType.Test)Test 2 using:data = (TabularList.from_df(df, path=path, cat_names=cat_vars, cont_names=cont_vars, procs=procs, test_df=test_df) .split_by_idx(valid_idx) .label_from_df(cols=dep_var, label_cls=FloatList, log=True, classes=classes) .databunch())/mnt/963GB/Data/Python/Courses/fastai/fastai/fastai/basic_train.py in get_preds(model=TabularModel( (embeds): ModuleList( (0): E...in_features=500, out_features=1, bias=True) )), dl=None, pbar=None, cb_handler=CallbackHandler(callbacks=[], metrics=[], beta=0.98), activ=, loss_func=None, n_batch=None) 36 "Tuple of predictions and targets, and optional losses (if `loss_func`) using `dl`, max batches `n_batch`." 37 res = [torch.cat(o).cpu() for o in---> 38 zip(*validate(model, dl, cb_handler=cb_handler, pbar=pbar, average=False, n_batch=n_batch))] global zip = undefined global validate = model = TabularModel( (embeds): ModuleList( (0): Embedding(1116, 81) (1): Embedding(8, 5) (2): Embedding(4, 3) (3): Embedding(13, 7) (4): Embedding(32, 11) (5): Embedding(3, 3) (6): Embedding(26, 10) (7): Embedding(27, 10) (8): Embedding(5, 4) (9): Embedding(4, 3) (10): Embedding(4, 3) (11): Embedding(24, 9) (12): Embedding(9, 5) (13): Embedding(13, 7) (14): Embedding(53, 15) (15): Embedding(22, 9) (16): Embedding(7, 5) (17): Embedding(7, 5) (18): Embedding(4, 3) (19): Embedding(4, 3) (20): Embedding(9, 5) (21): Embedding(9, 5) (22): Embedding(3, 3) (23): Embedding(3, 3) ) (emb_drop): Dropout(p=0.04) (bn_cont): BatchNorm1d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (layers): Sequential( (0): Linear(in_features=233, out_features=1000, bias=True) (1): ReLU(inplace) (2): BatchNorm1d(1000, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (3): Dropout(p=0.001) (4): Linear(in_features=1000, out_features=500, bias=True) (5): ReLU(inplace) (6): BatchNorm1d(500, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (7): Dropout(p=0.01) (8): Linear(in_features=500, out_features=1, bias=True) )) dl = None cb_handler = CallbackHandler(callbacks=[], metrics=[], beta=0.98) pbar = None global average = undefined n_batch = None 39 if loss_func is not None: res.append(calc_loss(res[0], res[1], loss_func)) 40 if activ is not None: res[0] = activ(res[0])/mnt/963GB/Data/Python/Courses/fastai/fastai/fastai/basic_train.py in validate(model=TabularModel( (embeds): ModuleList( (0): E...in_features=500, out_features=1, bias=True) )), dl=None, loss_func=None, cb_handler=CallbackHandler(callbacks=[], metrics=[], beta=0.98), pbar=None, average=False, n_batch=None) 47 with torch.no_grad(): 48 val_losses,nums = [],[]---> 49 for xb,yb in progress_bar(dl, parent=pbar, leave=(pbar is not None)): xb = undefined yb = undefined global progress_bar = dl = None global parent = undefined pbar = None global leave = undefined 50 if cb_handler: xb, yb = cb_handler.on_batch_begin(xb, yb, train=False) 51 val_losses.append(loss_batch(model, xb, yb, loss_func, cb_handler=cb_handler))~/miniconda3/envs/fastai-py3.7/lib/python3.7/site-packages/fastprogress/fastprogress.py in __init__(self=, gen=None, total=None, display=True, leave=False, parent=None, auto_update=True) 143 class NBProgressBar(ProgressBar): 144 def __init__(self, gen, total=None, display=True, leave=True, parent=None, auto_update=True):--> 145 self.progress = html_progress_bar(0, len(gen) if total is None else total, "") self.progress = undefined global html_progress_bar = global len = undefined gen = None total = None 146 super().__init__(gen, total, display, leave, parent, auto_update) 147 TypeError: object of type 'NoneType' has no len()ipdb> gen.shape*** AttributeError: 'NoneType' object has no attribute 'shape' Test N using: data = (TabularList.from_df(df, path=path, cat_names=cat_vars, cont_names=cont_vars, procs=procs, test_df=test_df) .split_by_idx(valid_idx) .label_from_df(cols=dep_var, label_cls=FloatList, log=True) .databunch()) ---------------------------------------------------------------------------TypeError Traceback (most recent call last) in ----> 1 preds, y = learn.get_preds(DatasetType.Test)/mnt/963GB/Data/Python/Courses/fastai/fastai/fastai/basic_train.py in get_preds(self, ds_type, with_loss, n_batch, pbar) 228 lf = self.loss_func if with_loss else None 229 return get_preds(self.model, self.dl(ds_type), cb_handler=CallbackHandler(self.callbacks),--> 230 activ=_loss_func2activ(self.loss_func), loss_func=lf, n_batch=n_batch, pbar=pbar) 231 232 def pred_batch(self, ds_type:DatasetType=DatasetType.Valid, batch:Tuple=None, reconstruct:bool=False) -> List[Tensor]:/mnt/963GB/Data/Python/Courses/fastai/fastai/fastai/basic_train.py in get_preds(model, dl, pbar, cb_handler, activ, loss_func, n_batch) 36 "Tuple of predictions and targets, and optional losses (if `loss_func`) using `dl`, max batches `n_batch`." 37 res = [torch.cat(o).cpu() for o in---> 38 zip(*validate(model, dl, cb_handler=cb_handler, pbar=pbar, average=False, n_batch=n_batch))] 39 if loss_func is not None: res.append(calc_loss(res[0], res[1], loss_func)) 40 if activ is not None: res[0] = activ(res[0])/mnt/963GB/Data/Python/Courses/fastai/fastai/fastai/basic_train.py in validate(model, dl, loss_func, cb_handler, pbar, average, n_batch) 47 with torch.no_grad(): 48 val_losses,nums = [],[]---> 49 for xb,yb in progress_bar(dl, parent=pbar, leave=(pbar is not None)): 50 if cb_handler: xb, yb = cb_handler.on_batch_begin(xb, yb, train=False) 51 val_losses.append(loss_batch(model, xb, yb, loss_func, cb_handler=cb_handler))~/miniconda3/envs/fastai-py3.7/lib/python3.7/site-packages/fastprogress/fastprogress.py in __init__(self, gen, total, display, leave, parent, auto_update) 143 class NBProgressBar(ProgressBar): 144 def __init__(self, gen, total=None, display=True, leave=True, parent=None, auto_update=True):--> 145 self.progress = html_progress_bar(0, len(gen) if total is None else total, "") 146 super().__init__(gen, total, display, leave, parent, auto_update) 147 TypeError: object of type 'NoneType' has no len()%debug probs = np.exp(preds) #get classes d = {} p = {} i=0 for indx, prob in zip(indexes, probs): if i==0: print(f'prob: {prob}, dim: {prob.shape}, indx: {indx}') max_idx = np.argmax(prob) max_val = prob[max_idx].item() p[indx] = max_val prob_c = classes[max_idx] d[indx] = prob_c len(d) def pred_serial(): preds = {} for idx, row in test_df.iterrows(): pred = learn.predict(row) pred_str = pred[0].__str__() preds[idx]=pred_str if int(idx) % 10000 == 0: print(f'pred: {pred}, idx: {idx}') return preds preds_d = pred_serial()Jeu GRID WORLD - DQN 2Source https://cdancette.fr/2018/01/03/reinforcement-learning-part3/ Reinforcement learning en python sur un jeu simple grâce au Q-learning, Partie 3 Jeu dynamique, le terrain change à chaque partie : utilisation d’un réseau de neurones. le terrain est modifié à chaque partie. Nous n’allons pas pouvoir stocker et visiter tous les états pour entrainer l’agent. Le réseau de neurone apprendra alors a généraliser, pour obtenir une fonction de valeur Q convenable.> ![Texte alternatif…](https://cdancette.fr/assets/qlearning2/capture.gif) Principe :![Texte alternatif…](https://cdancette.fr/assets/game.png) Le jeu Une seule différence : nous allons regénérer un terrain à chaque nouvelle partie. Ainsi Le seul changement sera la manière d’encoder l’état du jeu.Ainsi, la fonction get_state sera réécrite de la manière suivante : def _get_state(self): x, y = self.position if self.alea: return np.reshape([self._get_grille(x, y) for (x, y) in [self.position, self.end, self.hole, self.block]], (1, 64)) return flatten(self._get_grille(x, y))Ici, au lieu de renvoyer un tableau de taille 16 contenant juste la position de notre agent marqué par un 1, nous devons également indiquer la position des éléments sur le terrain.Les éléments à indiquer sont : * notre position **self.position**, * la position de l’arrivée **self.end**, * la position du trou **self.hole**, * et la position du mur **self.block**.Au lieu d’un tableau de taille 16 contenant des 0, avec un 1 pour marquer la position de l’agent, nous avons maintenant 4 tableaux similaires. L’état du jeu sera la concaténation de ces 4 tableaux (obtenu avec np.reshape). L’état est donc un tableau de taille 4x16 = 64 cases, et il contiendra quatre 1, et les autres éléments seront des 0.Ici pour encoder les états, nous avons utilisé ce qui est appelé un One Hot encoder : Pour chaque élément, il y a 16 possibilités, et on encode cela en utilisant un tableau de 16 cases, en mettant un 1 pour représenter la position de l’élément, et 0 dans les autres cases. Ce n’est pas une représentation très compacte : les 16 cases possibles peuvent être encodées sur 4 bits, il nous suffirait donc de 4 cases dans notre tableau pour encoder la position de chaque élément. Mais le réseau devrait alors apprendre à décoder cette représentation, ce qui nécessiterait certainement un modèle plus complexe, donc un entrainement plus long, ou bien une architecture plus grosse. L’encodage que nous avons choisi ici est extrèmement simple (mais il n’est pas le plus compact possible, toutes les entrées possibles ne sont pas du tout utilisés), et sera utilisé très facilement par le réseau de neurone.import random flatten = lambda l: [item for sublist in l for item in sublist] class Game: ACTION_UP = 0 ACTION_LEFT = 1 ACTION_DOWN = 2 ACTION_RIGHT = 3 ACTIONS = [ACTION_DOWN, ACTION_LEFT, ACTION_RIGHT, ACTION_UP] ACTION_NAMES = ["UP ", "LEFT ", "DOWN ", "RIGHT"] MOVEMENTS = { ACTION_UP: (1, 0), ACTION_RIGHT: (0, 1), ACTION_LEFT: (0, -1), ACTION_DOWN: (-1, 0) } num_actions = len(ACTIONS) def __init__(self, n, m, wrong_action_p=0.1, alea=False): self.n = n self.m = m self.wrong_action_p = wrong_action_p self.alea = alea self.generate_game() def _position_to_id(self, x, y): """Donne l'identifiant de la position entre 0 et 15""" return x + y * self.n def _id_to_position(self, id): """Réciproque de la fonction précédente""" return (id % self.n, id // self.n) def generate_game(self): cases = [(x, y) for x in range(self.n) for y in range(self.m)] hole = random.choice(cases) cases.remove(hole) start = random.choice(cases) cases.remove(start) end = random.choice(cases) cases.remove(end) block = random.choice(cases) cases.remove(block) self.position = start self.end = end self.hole = hole self.block = block self.counter = 0 if not self.alea: self.start = start return self._get_state() def reset(self): if not self.alea: self.position = self.start self.counter = 0 return self._get_state() else: return self.generate_game() def _get_grille(self, x, y): grille = [ [0] * self.n for i in range(self.m) ] grille[x][y] = 1 return grille def _get_state(self): x, y = self.position if self.alea: return np.reshape([self._get_grille(x, y) for (x, y) in [self.position, self.end, self.hole, self.block]], (1, 64)) return flatten(self._get_grille(x, y)) def get_random_action(self): return random.choice(self.ACTIONS) def move(self, action): """ takes an action parameter :param action : the id of an action :return ((state_id, end, hole, block), reward, is_final, actions) """ self.counter += 1 if action not in self.ACTIONS: raise Exception("Invalid action") # random actions sometimes (2 times over 10 default) choice = random.random() if choice < self.wrong_action_p: action = (action + 1) % 4 elif choice < 2 * self.wrong_action_p: action = (action - 1) % 4 d_x, d_y = self.MOVEMENTS[action] x, y = self.position new_x, new_y = x + d_x, y + d_y if self.block == (new_x, new_y): return self._get_state(), -1, False, self.ACTIONS elif self.hole == (new_x, new_y): self.position = new_x, new_y return self._get_state(), -10, True, None elif self.end == (new_x, new_y): self.position = new_x, new_y return self._get_state(), 10, True, self.ACTIONS elif new_x >= self.n or new_y >= self.m or new_x < 0 or new_y < 0: return self._get_state(), -1, False, self.ACTIONS elif self.counter > 200: self.position = new_x, new_y return self._get_state(), -10, True, self.ACTIONS else: self.position = new_x, new_y return self._get_state(), -1, False, self.ACTIONS def print(self): str = "" for i in range(self.n - 1, -1, -1): for j in range(self.m): if (i, j) == self.position: str += "x" elif (i, j) == self.block: str += "¤" elif (i, j) == self.hole: str += "o" elif (i, j) == self.end: str += "@" else: str += "." str += "\n" print(str)Un problème plus complexeOn peut voir que le problème est plus complexe que celui de la partie précédente : > au lieu de 4 états différents, encodés dans un tableau de taille 16,> on a 16x15x14x13 = 43680 états possibles. Il serait difficile d’appliquer la méthode de la première partie de ce tutoriel (stocker les Q-values dans un tableau). L’utilisation d’un réseau de neurone, comme nous l’avons vu dans la partie 2, nous sera alors très utile ici. Avec un réseau légèrement plus complexe, nous allons pouvoir résoudre ce problème. Néanmoins, l’entrainement est plus compliqué ici. Pour garantir la convergence de la méthode classique du Q-learning, l’agent devrait parcourir tous les états un grand nombre de fois. Or ici, notre espace d’état étant très grand, l’agent ne parcourera surement pas la totalité de ces états de nombreuses fois. C’est pour cela que nous attendons de notre réseau de neurone qu’il généralise, pour appliquer ses connaissances acquises à l’entrainement sur des états qu’il n’a jamais rencontré. Il aurait été impossible de généraliser avec la méthode de l’article 1, en utilisant un tableau.Nous allons évoquer plusieurs concepts très utilisés en machine learning et en reinforcement learning : **le principe du batch**, et celui de **l’experience replay**. BatchEn machine learning, pour entrainer nos réseaux de neurones, on utilise généralement des batch de données. C’est à dire qu’au lieu de ne donner qu’un seul exemple, avec son label, on lui donne à chaque fois un nombre fixe d’exemples (par exemple 10 samples). Cela permet à l’algorithme de gradient de choisir une direction qui ne dépendra pas que d’un seul exemple, qui pourrait être trop précis et ne pas améliorer le score global, mais plutôt une direction moyenne, qui sera certainement plus bénéfique au réseau de manière générale. ![Texte alternatif…](https://cdancette.fr/assets/qlearning3/batches.png)Le batching est également utilisé quand le dataset entier ne rentre pas dans la RAM / la mémoire du GPU. Il est alors nécéssaire de diviser le dataset en batches, que l’on va charger en mémoire pour entrainer le réseau, puis décharger. La contrainte est alors que la taille d’un batch ne dépasse pas la taille de la mémoire (c’est surtout un problème en traitement d’image, ou les données ont une taille importante).Le batching est utilisé avec l’algorithme de **stochastig gradient descent** ou descente de gradiant stochastique, en remplaçant un exemple par un petit nombre d’exemples (ou bien avec d’autres algorithmes dérivés tels que Adam).Le batching est très souvent utilisé en deep learning. Toutefois en reinforcement learning, cela paraît plus compliqué, puisque nous n’avons qu’un exemple à chaque action effectuée. Il est donc impossible à priori d’utiliser cette méthode. Nous allons voir que la méthode de **l’experience replay** permet de résoudre ce problème Experience ReplayL’experience replay est une méthode spécifique au reinforcement learning (contrairement au batching qui est utilisé très souvent en deep learning).Il nous permet en fait d’utiliser des batch pendant l’entrainement de notre agent, au lieu de l’entrainer à chaque mouvement sur les données qu’il vient de recevoir.Il s’agit de stocker à chaque mouvement les paramètres d’entrainement (état de départ, action, état d’arrivée, récompense, fin du jeu) dans une mémoire, au lieu d’entrainer notre réseau de neurone dessus. Et ensuite, régulièrement, on va piocher un batch dans cette mémoire (c’est à dire un certain nombre d’exemples), au hasard, et on va entrainer notre réseau sur ce batch.Cela permet d’éviter un trop grand va-et-vient des poids du réseau. En effet, le réseau oublie ce qu’il vient d’apprendre si on lui donne des exemples successifs qui ont des indications contraires (il n’arrive pas à généraliser, et va osciller). En lui donnant un batch en effet, la backpropagation va choisir une direction moyenne pour optimiser les poids du réseau afin de faire diminuer l’erreur.Cela va également nous permettre de voir plusieurs fois des situations passées. Et les exemples trop vieux seront vidés de la mémoire (on limite la taille de la mémoire en nombre d’exemples). ![Texte alternatif…](https://cdancette.fr/assets/qlearning3/experiencereplay.png)Il existe de nombreuses améliorations possibles. Par exempe, le *Prioritized experience replay (article [1] ou blog), ou on voit les situations les plus importantes en priorité, au lieu tirer des exemples au hasard dans la mémoire. Entrainement# defining the neural network import numpy as np from keras.models import Sequential, load_model from keras.layers.core import Dense, Dropout, Activation from keras.optimizers import RMSprop, Adam, sgd from keras.layers.advanced_activations import LeakyReLU import random import os from collections import deque class Trainer: def __init__(self, name=None, learning_rate=0.001, epsilon_decay=0.9999, batch_size=30, memory_size=3000): self.state_size = 64 self.action_size = 4 self.gamma = 0.9 self.epsilon = 1.0 self.epsilon_min = 0.01 self.epsilon_decay = epsilon_decay self.learning_rate = learning_rate self.memory = deque(maxlen=memory_size) self.batch_size = batch_size self.name = name if name is not None and os.path.isfile("model-" + name): model = load_model("model-" + name) else: model = Sequential() model.add(Dense(50, input_dim=self.state_size, activation='relu')) model.add(Dense(30, activation='relu')) model.add(Dense(30, activation='relu')) model.add(Dense(self.action_size, activation='linear')) model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate)) self.model = model def decay_epsilon(self): self.epsilon *= self.epsilon_decay def get_best_action(self, state, rand=True): if rand and np.random.rand() <= self.epsilon: # The agent acts randomly return random.randrange(self.action_size) # Predict the reward value based on the given state act_values = self.model.predict(np.array(state)) # Pick the action based on the predicted reward action = np.argmax(act_values[0]) return action def replay(self, batch_size): batch_size = min(batch_size, len(self.memory)) minibatch = random.sample(self.memory, batch_size) inputs = np.zeros((batch_size, self.state_size)) outputs = np.zeros((batch_size, self.action_size)) for i, (state, action, reward, next_state, done) in enumerate(minibatch): target = self.model.predict(state)[0] if done: target[action] = reward else: target[action] = reward + self.gamma * np.max(self.model.predict(next_state)) inputs[i] = state outputs[i] = target return self.model.fit(inputs, outputs, epochs=1, verbose=0, batch_size=batch_size) def save(self, id=None, overwrite=False): name = 'model' if self.name: name += '-' + self.name else: name += '-' + str(time.time()) if id: name += '-' + id self.model.save(name, overwrite=overwrite) def remember(self, state, action, reward, next_state, done): self.memory.append([state, action, reward, next_state, done])Paramètres du trainerLes paramètres sont assez similaires à ceux de l’article p’écédent. On a juste ajouté une nouvelle couche à notre réseau (pour lui donner une meilleur force de représentation des données).La ligne qui change est celle-ci : self.memory = deque(maxlen=memory_size)**memory** est la structure de données qui va nous servir de mémoire pour stocker nos ensembles (state, action, new_state, reward). C’est grâce à cette mémoire que l’on peut faire de l’experience replay. A chaque action, on va remplir cette mémoire au lieu d’entrainer, puis on va régulièrement piocher aléatoirement des samples dans cette mémoire, pour lancer l’entrainement sur un batch de données. Pour stocker, on utilise la structure collections.deque de python. Il s’agit d’une queue qui peut avoir une taille limitée, qui va supprimer automatiquement les éléments ajoutés les premiers lorsque la taille limite est atteinte. ApprentissageNous allons remplacer la fonction train par une fonction remember Au lieu de lancer une étape de backpropagation, elle va tout simplement stocker ce que l’on vient de voir, dans une queue (une structure de données qui va supprimer les éléments entrés en premier).class Trainer: ... def remember(self, state, action, reward, next_state, done): self.memory.append([state, action, reward, next_state, done])Et enfin, il nous faut une fonction replay qui va piocher dans la mémoire, et donner ces données aux réseau de neurone.... def replay(self, batch_size): batch_size = min(batch_size, len(self.memory)) minibatch = random.sample(self.memory, batch_size) inputs = np.zeros((batch_size, self.state_size)) outputs = np.zeros((batch_size, self.action_size)) for i, (state, action, reward, next_state, done) in enumerate(minibatch): target = self.model.predict(state)[0] if done: target[action] = reward else: target[action] = reward + self.gamma * np.max(self.model.predict(next_state)) inputs[i] = state outputs[i] = targetAinsi, ici, on va utiliser random.sample pour piocher un certain nombres d’éléments aléatoirement dans la mémoire. On crée alors nos entrées et sorties dans le bon format pour le réseau de neurone, similairement à la fonction train de l’article précédent. La différence est qu’ici, on crée un batch de plusieurs samples, au lieu de n’en donner qu’un (on voit que la dimension des input et output est (batch_size, state_size), alors qu’elle n’avait qu’une dimension précedemment. Lancer l’entrainementLa fonction d’entrainement est un peu plus complexe, puisqu’on va executer une première partie ou l’on va remplir en partie la mémoire. Cela nous permettra de pouvoir créer des batch avec assez de données plus rapidement. Cette phase se déroule entre les lignes 13 et 25 du code ci-dessous.La deuxième phase est l’entrainement du réseau. On lance un entrainement à chaque 100 mouvements. On pourrait essayer d’en lancer plus ou moins souvent, l’apprentissage en serait surement impacté au niveau rapidité de convergence et qualité du minimum local. En général, lorsqu’un algorithme converge trop vite, le minimum local sera moins bon.import time from IPython.core.debugger import set_trace def train(episodes, trainer, wrong_action_p, alea, collecting=False, snapshot=5000): batch_size = 32 g = Game(4, 4, wrong_action_p, alea=alea) counter = 1 scores = [] global_counter = 0 losses = [0] epsilons = [] # we start with a sequence to collect information, without learning if collecting: collecting_steps = 10000 print("Collecting game without learning") steps = 0 while steps < collecting_steps: state = g.reset() done = False while not done: steps += 1 action = g.get_random_action() next_state, reward, done, _ = g.move(action) trainer.remember(state, action, reward, next_state, done) state = next_state print("Starting training") global_counter = 0 for e in range(episodes+1): state = g.generate_game() state = np.reshape(state, [1, 64]) score = 0 done = False steps = 0 while not done: steps += 1 global_counter += 1 action = trainer.get_best_action(state) trainer.decay_epsilon() next_state, reward, done, _ = g.move(action) next_state = np.reshape(next_state, [1, 64]) score += reward trainer.remember(state, action, reward, next_state, done) state = next_state if global_counter % 100 == 0: l = trainer.replay(batch_size) losses.append(l.history['loss'][0]) if done: scores.append(score) epsilons.append(trainer.epsilon) if steps > 200: break if e % 200 == 0: print("episode: {}/{}, moves: {}, score: {}, epsilon: {}, loss: {}" .format(e, episodes, steps, score, trainer.epsilon, losses[-1])) if e > 0 and e % snapshot == 0: trainer.save(id='iteration-%s' % e) return scores, losses, epsilonsOn peut alors lancer l’entrainementtrainer = Trainer(learning_rate=0.001, epsilon_decay=0.999995) scores, losses, epsilons = train(35000, trainer, 0.1, True, snapshot=2500)Starting training episode: 0/35000, moves: 16, score: -25, epsilon: 0.9999200029999294, loss: 0 episode: 200/35000, moves: 13, score: -22, epsilon: 0.9814001063725329, loss: 1.7394689321517944 episode: 400/35000, moves: 7, score: 4, epsilon: 0.9648862218983311, loss: 2.8518009185791016 episode: 600/35000, moves: 11, score: -20, epsilon: 0.9473988189758515, loss: 1.1752405166625977 episode: 800/35000, moves: 18, score: -7, epsilon: 0.9318204094956437, loss: 2.499586820602417 episode: 1000/35000, moves: 2, score: -11, epsilon: 0.9167181477463254, loss: 3.792001485824585 episode: 1200/35000, moves: 18, score: -27, epsilon: 0.9001982493573206, loss: 2.2735531330108643 episode: 1400/35000, moves: 15, score: -4, epsilon: 0.8835341723717306, loss: 0.05706949904561043 episode: 1600/35000, moves: 35, score: -44, epsilon: 0.8696361708712141, loss: 3.0634708404541016 episode: 1800/35000, moves: 37, score: -46, epsilon: 0.8517089646646874, loss: 5.344153881072998 episode: 2000/35000, moves: 7, sco[...]Puis affichons les courbes de lossimport matplotlib.pyplot as plt %matplotlib inline def smooth(vector, width=30): return np.convolve(vector, [1/width]*width, mode='valid') sc = smooth(scores, width=500) fig, ax1 = plt.subplots() ax1.plot(sc) ax2 = ax1.twinx() ax2.plot(epsilons, color='r') ax1.set_ylabel('Score') ax2.set_ylabel('Epsilon', color='r') ax2.tick_params('y', colors='r') plt.title("Score, and Epsilon over training") ax1.set_xlabel("Episodes") plt.figure()On voit très clairement que notre réseau a appris à jouer de manière satisfaisante. Le score moyen en fin d’apprentissage est légèrement au dessus de 0, ce qui veut dire que l’agent arrive à la fin en moyenne en moins de 10 coups.from IPython import display import time g = Game(4, 4, 0.1, alea=True) state = g.reset() state = g._get_state() print("state") print(" ") g.print() done = False time.sleep(5) while not done: time.sleep(1) display.clear_output(wait=True) print(trainer.model.predict(np.array(g._get_state()))) action = trainer.get_best_action(g._get_state(), rand=False) print(Game.ACTION_NAMES[action]) next_state, reward, done, _ = g.move(action) g.print() print(reward)[[3.7476544 5.4571967 6.7407804 6.145734 ]] DOWN .... .... o.¤. ...x 10PCA for Large Datasets In this lab, you will calculate the covariance matrix for datasets with a large number of samples; then, you will use PCA to transform the datase 1- Preparation Download eigenvectors and eigenvalues:! wget https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/meet_up/12.02.2020/eigenvectors.pt ! wget https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/meet_up/12.02.2020/eigenvalues.pt !pip install Pillow==6.2.2We'll need the following libraries:# Import the libraries we need for this lab # Using the following line code to install the torchvision library # !conda install -y torchvision import torch import torch.nn as nn import torchvision.transforms as transforms import torchvision.datasets as dsets import matplotlib.pylab as plt import numpy as npUse the following function to visualize data:# Display data def show_data(data_sample,y,raw_image=True): if raw_image: plt.imshow(data_sample[0].numpy().reshape(16, 16), cmap='gray') else: plt.imshow(data_sample[0].numpy().reshape(16, 16)) plt.title('y = ' + str(y.item()))PCA transform class.class transform(object): def __init__(self, eigenvalues,eigenvectors): self.eigenvalues=eigenvalues self.eigenvectors=eigenvectors #calculate the diagonal matrix of eigenvalues dim=eigenvalues[:,0].shape[0] diag=torch.eye(dim) dim=eigenvalues[:,0].shape[0] self.diag=torch.eye(dim) for n,eigenvalue in enumerate(eigenvalues[:,0]): self.diag[n,n]=(eigenvalue+0.01)**(0.5) self.Qin=torch.inverse(diag) def PCA(self,X): X_hat=torch.mm(X[0].view(1,-1),self.eigenvectors) return X_hatThe image is a rectangular tensorIMAGE_SIZE = 16 composed = transforms.Compose([transforms.Resize((IMAGE_SIZE, IMAGE_SIZE)), transforms.ToTensor()])2- Load Data Load the training dataset by setting the parameters train to True and convert it to a tensor by placing a transform object in the argument transform.# Create and print the training dataset train_dataset = dsets.MNIST(root='./data', train=True, download=True, transform=composed ) validation_dataset = dsets.MNIST(root='./data', train=False, download=True, transform=composed)the image is a rectangler tensors We can covert the tensor to a 1D tensor of vector and perform PCA or ZCA. In this cell will calculate the Covariance Matrix and save it. We do it in a way that we don't need to store all the samples in memory. This takes some time so you can load the results in the next cell. dim=train_dataset[0][0].shape[1]*train_dataset[0][0].shape[2]mean=torch.zeros((1,dim))C=torch.zeros((dim,dim))N_samples=len(train_dataset)for n in range(N_samples): mean=mean+train_dataset[0][0].view(-1,1) mean=mean/N_samplesfor n in range(N_samples): x=train_dataset[0][0].view(1,-1) x=x-mean C+=torch.mm(torch.t(x),x) C=C/N_sampleseigenvalues,eigenvectors=torch.eig(C,True)torch.save(eigenvalues, 'eigenvalues.pt') torch.save(eigenvectors, 'eigenvectors.pt') Load eigenvalues and eigenvectors.eigenvalues=torch.load('eigenvalues.pt') eigenvectors=torch.load('eigenvectors.pt') show_data(train_dataset[0][0],train_dataset[0][1])PCA transform objecttransform=transform(eigenvalues,eigenvectors)We can calculate the PCA transform of the image. We can see the transform for each digit looks similar. Try changing the variable select_number to change the number.j=0 #select_number from 0 -9 select_number= 2 for i,(X,y) in enumerate(train_dataset): if y.item()==select_number: j+=1 Xhat=transform.PCA(X) show_data(torch.log(Xhat+1),y=y,raw_image=False) plt.show() if j==5: breakSimple popularity-based recommender based on Number of Viewsmovie_df1=pd.DataFrame(df.groupby(['movie_id','movie title'])['user_id'].count()) movie_df1.head() movie_df1['Number_Of_Views']=movie_df1['user_id'] movie_df1=movie_df1.drop('user_id',axis=1) movie_df1.head()Showing Popular Movies on the basis of Number of Viewspopular_movie1=movie_df1.sort_values(by='Number_Of_Views',ascending=False) popular_movie1['Rank']=range(1,len(popular_movie1)+1) popular_movie1.head(10) pd.merge(movie_df,popular_movie1,on="movie_id").sort_values(by='Number_Of_Views',ascending=False).head(15)Simple popularity-based recommender based on Ratingsmovie_df2=pd.DataFrame(df.groupby(['movie_id','movie title'])['rating'].mean()) movie_df2.head() movie_df2['Number_Of_Rating']=df.groupby(['movie_id','movie title'])['rating'].count() movie_df2.head() popular_movies2=movie_df2.sort_values(by='rating',ascending=False) popular_movies2.head(10)Showing Popular Movies on the basis of Ratings There is possibility that there is only one rating for a movie and its value is 5 in that case suggesting that movie is not correct so we have to decide a threshold value for number of rating# suppose threshold = 100 popular_movies2_final=popular_movies2[popular_movies2['Number_Of_Rating']>=100] popular_movies2_final['Rank']=range(1,len(popular_movies2_final)+1) popular_movies2_final.head(10) pd.merge(movie_df,popular_movies2_final,on="movie_id").sort_values(by='rating',ascending=False).head(10)Build a Movie recommender with personalization Item Basedmovie_matrix2 = df.pivot_table(index='user_id', columns='movie_id', values='rating') movie_matrix2.head(10) # we are finding movies similar to a particular movie movie=20 movie_ratings=movie_matrix2[movie] movie_ratings.head(10) similar_to_the_movie=movie_matrix2.corrwith(movie_ratings) print(similar_to_the_movie.shape) similar_to_the_movie.head(10) movie_record=pd.DataFrame(similar_to_the_movie,columns=['Correlation']) movie_record.head(15) movie_record=movie_record.join(popular_movies2['Number_Of_Rating']) movie_record.head(15) movie_record.dropna(inplace=True) movie_record.head(10) suggested_movies=movie_record.sort_values(by='Correlation',ascending=False) suggested_movies.head(15) # suppose threshold=100 suggested_movies_final=suggested_movies[suggested_movies['Number_Of_Rating']>100] suggested_movies_final['Rank']=range(1,len(suggested_movies_final)+1) suggested_movies_final.head(20) pd.merge(movie_df,suggested_movies_final,on="movie_id").sort_values(by='Correlation',ascending=False).head(10)User Basedmovie_matrix3 = df.pivot_table(index='movie_id', columns='user_id', values='rating') movie_matrix3.head(10) # we are finding user similar to a particular user user=20 user_ratings=movie_matrix3[user] user_ratings.head(10) similar_to_the_user=movie_matrix3.corrwith(user_ratings) print(similar_to_the_user.shape) similar_to_the_user.head(10) user_record=pd.DataFrame(similar_to_the_user,columns=['Correlation']) user_record.head(10) user_record.dropna(inplace=True) suggested_users=user_record.sort_values(by='Correlation',ascending=False) suggested_users.head(20) suggested_users=pd.merge(user_df,suggested_users,on="user_id").sort_values(by='Correlation',ascending=False) suggested_users.head(20) # List of users which are similar to the that particular user suggested_users['Rank']=range(1,len(suggested_users)+1) suggested_users.head(20)Now suppose if we want to recommend movies for that particular user then go to the movie list to the user with rank 1 and recommend the movies which are not seen by the particular user and then go for rank 2 and so on.......# particular user user # the userid of the user which is similar to the particular user suggested_users.iloc[0][0]Building Predictive modelsimport os import numpy as np import pandas as pdImport data# path of the processed data processed_data_path = os.path.join(os.path.pardir,'data','processed') train_data_path = os.path.join(processed_data_path,'train.csv') test_data_path = os.path.join(processed_data_path,'test.csv') train_df = pd.read_csv(train_data_path,index_col='PassengerId') test_df = pd.read_csv(test_data_path,index_col='PassengerId') train_df.info() test_df.info() Int64Index: 418 entries, 892 to 1309 Data columns (total 32 columns): Age 418 non-null float64 Fare 418 non-null float64 FamilySize 418 non-null int64 IsMother 418 non-null int64 IsMale 418 non-null int64 Deck_A 418 non-null int64 Deck_B 418 non-null int64 Deck_C 418 non-null int64 Deck_D 418 non-null int64 Deck_E 418 non-null int64 Deck_F 418 non-null int64 Deck_G 418 non-null int64 Deck_z 418 non-null int64 Pclass_1 418 non-null int64 Pclass_2 418 non-null int64 Pclass_3 418 non-null int64 Title_Lady 418 non-null int64 Title_Master 418 non-null int64 Title_Miss 418 non-null int64 Title_Mr [...]Data Preparation# creating the matrix # here we considered all the rows and all columns beginning from Age thus we did not consider Survived here # the value is then converted to float X = train_df.loc[:,'Age':].as_matrix().astype('float') y = train_df['Survived'].ravel()upper case letter for matrix and lower case letter for 1D arrayprint(X.shape,y.shape)(891, 32) (891,)Train test Splitfrom sklearn.model_selection import train_test_split X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=0) print(X_train.shape,y_train.shape) print(X_test.shape,y_test.shape) # survival in train and test print('mean survival in train: {0:.3f}'.format(np.mean(y_train))) print('mean survival in test: {0:.3f}'.format(np.mean(y_test)))mean survival in train: 0.383 mean survival in test: 0.385Check scikit versionimport sklearn sklearn.__version__Baseline Modelfrom sklearn.dummy import DummyClassifier # create model model_dummy = DummyClassifier(strategy='most_frequent',random_state=0) # train model model_dummy.fit(X_train,y_train) # obtaining the score print('score for baseline model: {0:.2f}'.format(model_dummy.score(X_test,y_test))) # performance metrics from sklearn.metrics import accuracy_score, confusion_matrix, precision_score, recall_score # accuracy score which is same as baseline model score print('accuracy for baseline model: {0:.2f}'.format(accuracy_score(y_test,model_dummy.predict(X_test)))) print('confusion matrix for baseline model: \n {0}'.format(confusion_matrix(y_test,model_dummy.predict(X_test)))) print('precision for baseline model: {0:.2f}'.format(precision_score(y_test,model_dummy.predict(X_test)))) print('Recall for baseline model: {0:.2f}'.format(recall_score(y_test,model_dummy.predict(X_test))))Recall for baseline model: 0.00First Kaggle submission# preparing the actual test data test_X = test_df.as_matrix().astype('float') predictions = model_dummy.predict(test_X) df_submission = pd.DataFrame({'PassengerId':test_df.index,'Survived' : predictions}) df_submission.head() submission_data_path = os.path.join(os.path.pardir,'data','external') submission_file_path = os.path.join(submission_data_path,'01_dummy.csv') df_submission.to_csv(submission_file_path,index=False) def get_submission_file(model,filename): test_X = test_df.as_matrix().astype('float') predictions = model.predict(test_X) df_submission = pd.DataFrame({'PassengerId': test_df.index,'Survived': predictions}) submission_data_path = os.path.join(os.path.pardir,'data','external') submission_file_path = os.path.join(submission_data_path,filename) df_submission.to_csv(submission_file_path,index=False) get_submission_file(model_dummy,'01_dummy.csv')Logistic Regressionfrom sklearn.linear_model import LogisticRegression model_lr_1 = LogisticRegression(random_state=0) model_lr_1.fit(X_train,y_train) print('score for logistic regression - version 1: {0:.2f}'.format(model_lr_1.score(X_test,y_test))) print('accuracy for logistic regression model: {0:.2f}'.format(accuracy_score(y_test,model_lr_1.predict(X_test)))) print('confusion matrix for logistic regression model: \n {0}'.format(confusion_matrix(y_test,model_lr_1.predict(X_test)))) print('precision for logistic regression model: {0:.2f}'.format(precision_score(y_test,model_lr_1.predict(X_test)))) print('Recall for logistic regression model: {0:.2f}'.format(recall_score(y_test,model_lr_1.predict(X_test)))) # model coefficients model_lr_1.coef_Second Kaggle submissionget_submission_file(model_lr_1,'02_lr.csv')Part 2 Hyperparameter Optimizationmodel_lr = LogisticRegression(random_state=0) from sklearn.model_selection import GridSearchCV parameters = {'C':[1.0,10.0,50.0,100.0,1000.0], 'penalty' : ['l1','l2']} clf = GridSearchCV(model_lr,param_grid=parameters,cv=3) # 3-fold cross-validation clf.fit(X_train,y_train) clf.best_params_ print('best score : {0:.2f}'.format(clf.best_score_)) print('score for logistic regression - version 2: {0:.2f}'.format(clf.score(X_test,y_test)))score for logistic regression - version 2: 0.83Third Kaggle Submissionget_submission_file(clf,'03_lr.csv')Feature normalization and standardization Feature Normalizationfrom sklearn.preprocessing import MinMaxScaler,StandardScaler scaler = MinMaxScaler() X_train_scaled = scaler.fit_transform(X_train) X_train_scaled[:,0].min(),X_train_scaled[:,0].max() X_test_scaled = scaler.transform(X_test) # normalize test dataFeature Standardizationscaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train) X_test_scaled = scaler.transform(X_test)Create model after standardizationmodel_lr_2 = LogisticRegression() parameters = {'C':[0.5,1.0,10.0,50.0,100.0,1000.0],'penalty':['l1','l2']} clf = GridSearchCV(model_lr_2,param_grid=parameters,cv=3) clf.fit(X_train_scaled,y_train) clf.best_params_ clf.best_score_ print('score for standardized model - version 2 : {0:.2f}'.format(clf.score(X_test_scaled,y_test)))score for standardized model - version 2 : 0.84Model Persistenceimport pickle model_file_path = os.path.join(os.path.pardir,'models','lr_model.pkl') scaler_file_path = os.path.join(os.path.pardir,'models','lr_scaler.pkl') # opening the file in binary format model_file_pickle = open(model_file_path,'wb') scaler_file_pickle = open(scaler_file_path,'wb') pickle.dump(clf,model_file_pickle) pickle.dump(scaler,scaler_file_pickle) model_file_pickle.close() scaler_file_pickle.close()load the persisted filemodel_file_pickle = open(model_file_path,'rb') scaler_file_pickle = open(scaler_file_path,'rb') clf_loaded = pickle.load(model_file_pickle) scaler_loaded = pickle.load(scaler_file_pickle) model_file_pickle.close() scaler_file_pickle.close() clf_loaded scaler_loaded X_test_scaled = scaler_loaded.transform(X_test) print('score for persisted logistic regression: {0:.2f}'.format(clf_loaded.score(X_test_scaled,y_test)))score for persisted logistic regression: 0.84Criando um analisador de sentimentos com LSTMEste notebook é uma atividade de extensão que não está no livro do curso. Nesta atividade você aprenderá a desenvolver um analisador de sentimento que usa uma rede recorrente LSTM para processar um texto e classificar ....```Este Notebook é baseado no material disponibilizado por em: https://github.com/GarrettHoffman/lstm-oreilly ``` 1 - PacotesExecute o bloco abaixo para importar os pacotes necessarios. - [tensorflow](https://www.tensorflow.org/) um framework para machine learning- [numpy](www.numpy.org) pacote de bilbiotecas para computação científica.- [matplotlib](http://matplotlib.org) biblioteca para desenho de gráficos.- [pandas](https://pandas.pydata.org/) biblioteca para analise de dados.import tensorflow as tf import numpy as np import pandas as pd from collections import Counter #documento local com funções auxiliares import utils as utl/usr/local/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`. from ._conv import register_converters as _register_converters2 - DatasetO dataset é composto por aproximadamente 100 mil mensagens postadas em 2017, as mensagens são rotuladas com um $SPY, que indica o sentimento.bullish (otimista)bearish (pessimista)#lendo os dados do csv data = pd.read_csv("data/StockTwits_SPY_Sentiment_2017.gz", encoding="utf-8", compression="gzip", index_col=0) #obtendo a lista de mensagens e os rótulos messages = data.message.values labels = data.sentiment.values #Imprimindo as 10 primeiras mensagens for i in range(10): print("Mensagem:", messages[i], "| Rótulo:", labels[i]) #for i, message in enumerate(messages): # messages[i] = np.array([utl.preprocess_ST_message(message)]) #messages = np.array([messages]) messages = np.array([utl.preprocess_ST_message(message) for message in messages]) #Imprimindo as 10 primeiras mensagens for i in range(10): print("Mensagem:", messages[i]) print(messages.shape) full_lexicon = " ".join(messages).split() vocab_to_int, int_to_vocab = utl.create_lookup_tables(full_lexicon) print(len(vocab_to_int)) messages_lens = Counter([len(x) for x in messages]) print("Zero-length messages: {}".format(messages_lens[0])) print("Maximum message length: {}".format(max(messages_lens))) print("Average message length: {}".format(np.mean([len(x) for x in messages]))) # jogar fora as mensagens com tamanho zero messages, labels = utl.drop_empty_messages(messages, labels)Zero-length messages: 1 Maximum message length: 244 Average message length: 78.21856920395598Codificar mensagensmessages = utl.encode_ST_messages(messages, vocab_to_int) labels = utl.encode_ST_labels(labels)Paddingmessages = utl.zero_pad_messages(messages, seq_len=244)dataset splittrain_x, val_x, test_x, train_y, val_y, test_y = utl.train_val_test_split(messages, labels, split_frac=0.80) print("Data Set Size") print("Train set: \t\t{}".format(train_x.shape), "\nValidation set: \t{}".format(val_x.shape), "\nTest set: \t\t{}".format(test_x.shape))Data Set Size Train set: (77572, 244) Validation set: (9697, 244) Test set: (9697, 244)2 - Construindo a rede LSTMdef model_inputs(): """ Create the model inputs """ inputs_ = tf.placeholder(tf.int32, [None, None], name='inputs') labels_ = tf.placeholder(tf.int32, [None, None], name='labels') keep_prob_ = tf.placeholder(tf.float32, name='keep_prob') return inputs_, labels_, keep_prob_ def build_embedding_layer(inputs_, vocab_size, embed_size): """ Create the embedding layer """ embedding = tf.Variable(tf.random_uniform((vocab_size, embed_size), -1, 1)) embed = tf.nn.embedding_lookup(embedding, inputs_) return embed def build_lstm_layers(lstm_sizes, embed, keep_prob_, batch_size): """ Create the LSTM layers """ lstms = [tf.contrib.rnn.BasicLSTMCell(size) for size in lstm_sizes] # Add dropout to the cell drops = [tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob_) for lstm in lstms] # Stack up multiple LSTM layers, for deep learning cell = tf.contrib.rnn.MultiRNNCell(drops) # Getting an initial state of all zeros initial_state = cell.zero_state(batch_size, tf.float32) lstm_outputs, final_state = tf.nn.dynamic_rnn(cell, embed, initial_state=initial_state) return initial_state, lstm_outputs, cell, final_state def build_cost_fn_and_opt(lstm_outputs, labels_, learning_rate): """ Create the Loss function and Optimizer """ predictions = tf.contrib.layers.fully_connected(lstm_outputs[:, -1], 1, activation_fn=tf.sigmoid) loss = tf.losses.mean_squared_error(labels_, predictions) optimzer = tf.train.AdadeltaOptimizer(learning_rate).minimize(loss) return predictions, loss, optimzer def build_accuracy(predictions, labels_): """ Create accuracy """ correct_pred = tf.equal(tf.cast(tf.round(predictions), tf.int32), labels_) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) return accuracy def build_and_train_network(lstm_sizes, vocab_size, embed_size, epochs, batch_size, learning_rate, keep_prob, train_x, val_x, train_y, val_y): inputs_, labels_, keep_prob_ = model_inputs() embed = build_embedding_layer(inputs_, vocab_size, embed_size) initial_state, lstm_outputs, lstm_cell, final_state = build_lstm_layers(lstm_sizes, embed, keep_prob_, batch_size) predictions, loss, optimizer = build_cost_fn_and_opt(lstm_outputs, labels_, learning_rate) accuracy = build_accuracy(predictions, labels_) saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) n_batches = len(train_x)//batch_size for e in range(epochs): state = sess.run(initial_state) train_acc = [] for ii, (x, y) in enumerate(utl.get_batches(train_x, train_y, batch_size), 1): feed = {inputs_: x, labels_: y[:, None], keep_prob_: keep_prob, initial_state: state} loss_, state, _, batch_acc = sess.run([loss, final_state, optimizer, accuracy], feed_dict=feed) train_acc.append(batch_acc) if (ii + 1) % n_batches == 0: val_acc = [] val_state = sess.run(lstm_cell.zero_state(batch_size, tf.float32)) for xx, yy in utl.get_batches(val_x, val_y, batch_size): feed = {inputs_: xx, labels_: yy[:, None], keep_prob_: 1, initial_state: val_state} val_batch_acc, val_state = sess.run([accuracy, final_state], feed_dict=feed) val_acc.append(val_batch_acc) print("Epoch: {}/{}...".format(e+1, epochs), "Batch: {}/{}...".format(ii+1, n_batches), "Train Loss: {:.3f}...".format(loss_), "Train Accruacy: {:.3f}...".format(np.mean(train_acc)), "Val Accuracy: {:.3f}".format(np.mean(val_acc))) saver.save(sess, "checkpoints/sentiment.ckpt") # Define Inputs and Hyperparameters lstm_sizes = [128, 64] vocab_size = len(vocab_to_int) + 1 #add one for padding embed_size = 300 epochs = 50 batch_size = 256 learning_rate = 0.1 keep_prob = 0.5 with tf.Graph().as_default(): build_and_train_network(lstm_sizes, vocab_size, embed_size, epochs, batch_size, learning_rate, keep_prob, train_x, val_x, train_y, val_y)Testando o modelodef test_network(model_dir, batch_size, test_x, test_y): inputs_, labels_, keep_prob_ = model_inputs() embed = build_embedding_layer(inputs_, vocab_size, embed_size) initial_state, lstm_outputs, lstm_cell, final_state = build_lstm_layers(lstm_sizes, embed, keep_prob_, batch_size) predictions, loss, optimizer = build_cost_fn_and_opt(lstm_outputs, labels_, learning_rate) accuracy = build_accuracy(predictions, labels_) saver = tf.train.Saver() test_acc = [] with tf.Session() as sess: saver.restore(sess, tf.train.latest_checkpoint(model_dir)) test_state = sess.run(lstm_cell.zero_state(batch_size, tf.float32)) for ii, (x, y) in enumerate(utl.get_batches(test_x, test_y, batch_size), 1): feed = {inputs_: x, labels_: y[:, None], keep_prob_: 1, initial_state: test_state} batch_acc, test_state = sess.run([accuracy, final_state], feed_dict=feed) test_acc.append(batch_acc) print("Test Accuracy: {:.3f}".format(np.mean(test_acc))) with tf.Graph().as_default(): test_network('checkpoints', batch_size, test_x, test_y)A continuous time markov chain $X$ is defined by$$\mathbb{P}(X_{t + dt} = j | X_t = i) = \delta_{ij} + q_{ij}dt$$where $Q=(q_{ij})$ is an intensity matrix, that is$$q_{ij} \geq 0,\ i\neq j$$$$\sum_j q_{ij} = 0$$from stochastic_process.markov_chain import MarkovChain X = MarkovChain(1, 2, 5, 4, 6, 3) X.QAssuming $X$ is homogeneous in time, the matrix$$P(\delta_t) = I + Q \delta_t$$describe probabilities for $X$ to switch from one state to another within the $\delta_t$ time interval.X.P(delta_t=0.02)Note that $\delta_t$ must indeed be sufficiently small for $P(\delta_t)$ to be a probability matrix. For example $\delta_t = 0.2$ will be too large:X.P(delta_t=0.2) # Return a matrix P with negative coefficients.Enrich a DTA-TCF documentimport spacy from enrich.tfc import Tcf from enrich.spacy_utils.ner import fetch_ner_samples, format_iob_tag from enrich.custom_renderers import doc_to_tokenlist from enrich.custom_parsers import process_tokenlist file = "data/nn_nrhz001_1848.tcf.xml" # file = "http://www.deutschestextarchiv.de/book/download_fulltcf/31732" tcf = Tcf(file) words = tcf.create_tokenlist() nlp = spacy.load('de_core_news_sm') enriched_doc = process_tokenlist(nlp, words) for name, proc in nlp.pipeline[1:]: enriched_doc = proc(enriched_doc) TRAIN_SET = fetch_ner_samples(enriched_doc) TRAIN_SET[71] len(TRAIN_SET)_Lambda School Data Science_ Sequence Your Narrative - AssignmentToday we will create a sequence of visualizations inspired by ['s 200 Countries, 200 Years, 4 Minutes](https://www.youtube.com/watch?v=jbkSRLYSojo).Using this [data from Gapminder](https://github.com/open-numbers/ddf--gapminder--systema_globalis/):- [Income Per Person (GDP Per Capital, Inflation Adjusted) by Geo & Time](https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--income_per_person_gdppercapita_ppp_inflation_adjusted--by--geo--time.csv)- [Life Expectancy (in Years) by Geo & Time](https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--life_expectancy_years--by--geo--time.csv)- [Population Totals, by Geo & Time](https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--population_total--by--geo--time.csv)- [Entities](https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--entities--geo--country.csv)- [Concepts](https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--concepts.csv) Objectives- sequence multiple visualizations- combine qualitative anecdotes with quantitative aggregatesLinks- [’s TED talks](https://www.ted.com/speakers/hans_rosling)- [Spiralling global temperatures from 1850-2016](https://twitter.com/ed_hawkins/status/729753441459945474)- "[The Pudding](https://pudding.cool/) explains ideas debated in culture with visual essays."- [A Data Point Walks Into a Bar](https://lisacharlotterost.github.io/2016/12/27/datapoint-in-bar/): a thoughtful blog post about emotion and empathy in data storytelling ASSIGNMENT 1. Replicate the Lesson Code2. Take it further by using the same gapminder dataset to create a sequence of visualizations that combined tell a story of your choosing.Get creative! Use text annotations to call out specific countries, maybe: change how the points are colored, change the opacity of the points, change their sized, pick a specific time window. Maybe only work with a subset of countries, change fonts, change background colors, etc. make it your own!%matplotlib inline import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns income = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--income_per_person_gdppercapita_ppp_inflation_adjusted--by--geo--time.csv') lifespan = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--life_expectancy_years--by--geo--time.csv') population = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--population_total--by--geo--time.csv') entities = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--entities--geo--country.csv') concepts = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--concepts.csv') #This line neatly shows all the shapes of the pd's I imported print (income.shape, lifespan.shape, population.shape, entities.shape, concepts.shape) print (income.head(),'\n\n', lifespan.head(),'\n\n', population.head(),'\n\n' , entities.head(),'\n\n', concepts.head()) #I need to print the last 2 csv into their own lines to see better pd.options.display.max_columns = 500 pd.options.display.max_rows = 500 entities.head() concepts.head() #I kept an extra 2 columns than the assignment to work on those df = income df = pd.merge(income, lifespan) df = pd.merge(df, population) df = pd.merge(df, entities[['country', 'name', 'world_4region', 'world_6region', 'landlocked', 'main_religion_2008']],left_on='geo', right_on='country') print(df.shape) df.head() #Here I rename the columns to make them more readable and less clunky df = df.rename(columns = { 'country': 'country_code', 'time': 'year', 'income_per_person_gdppercapita_ppp_inflation_adjusted': 'income', 'life_expectancy_years': 'lifespan', 'population_total': 'population', 'name': 'country', 'world_6region': '6region', 'world_4region': '4region', 'main_religion_2008': 'religion' }) df.head() df.describe(exclude='number') #Here I look at the different religions to see what I will be working with. #There are only 3 unique df['religion'].unique() #Here I make the df I will work with. Only keeping the 3 religions. religion_eastern = df[df.religion == 'eastern_religions'] religion_eastern.head() religion_christian = df[df.religion == 'christian'] religion_christian.head() religion_muslim = df[df.religion == 'muslim'] religion_muslim.head() #I can make this inside the other function if I wanted to. def choses_years(df_data, years): """This function takes in a data frame and filters by the years you choose.""" df_data = df_data[df_data.year.isin(years)] return df_data #Here I separate the years I will work with years = [1818, 1868, 1918, 1968, 2018] muslim_years = choses_years(religion_muslim, years) christian_years = choses_years(religion_christian, years) eastern_years = choses_years(religion_eastern, years) #I wanted to demonstrate if a string would be called correctly muslim_years.iloc[1]['religion'] def graph_nations_religion_to_lifespan(df_data, years): """Takes in df_data and years to print graphs based on 'income', to 'lifespan' for the mean income of each region. Enjoy!""" for year in years: sns.relplot(x='income', y='lifespan', hue='6region', size='population', sizes=(30,400), data=df_data[df_data.year==year]) plt.xscale('log') plt.xlim((150, 150000)) plt.ylim((0, 90)) income_mean_year = df_data[df_data.year==year].income.mean() plt.title(str(df_data.iloc[1]['religion']) +' mean income for ' + str(year)+' is '+str(int(income_mean_year))) plt.axvline(x=df_data[df_data.year==year].income.mean(), color='grey') pass graph_nations_religion_to_lifespan(muslim_years, years) graph_nations_religion_to_lifespan(christian_years, years) graph_nations_religion_to_lifespan(eastern_years, years)STRETCH OPTIONS 1. Animate!- [How to Create Animated Graphs in Python](https://towardsdatascience.com/how-to-create-animated-graphs-in-python-bb619cc2dec1)- Try using [Plotly](https://plot.ly/python/animations/)!- [The Ultimate Day of Chicago Bikeshare](https://chrisluedtke.github.io/divvy-data.html) (Lambda School Data Science student)- [Using Phoebe for animations in Google Colab](https://colab.research.google.com/github/phoebe-project/phoebe2-docs/blob/2.1/tutorials/animations.ipynb) 2. Study for the Sprint Challenge- Concatenate DataFrames- Merge DataFrames- Reshape data with `pivot_table()` and `.melt()`- Be able to reproduce a FiveThirtyEight graph using Matplotlib or Seaborn. 3. Work on anything related to your portfolio site / Data Storytelling Project# TODOsklearn非常慢kmeans = KMeans(n_clusters=135,random_state=0).fit(s) center = kmeans.cluster_centers_ np.save('./data/bert_center.npy',center)faiss 又快得害怕n_center = 135 niter = 100 d=1024 fkmeans = faiss.Kmeans(d,n_center,niter=niter,verbose = True) %%time fkmeans.train(s) kaggle = pd.read_json('./kaggle_data/arxiv-metadata-oai-snapshot.json',lines=True) #kaggle.head() label = kaggle['categories'].apply(lambda x : set(x.split())) label = label.to_list() #找据数据最近的一个中心,也就是属于那个聚类 D, I = fkmeans.search(s,1) index = faiss.IndexFlatL2 (1024) index.add(s) D, I = index.search (fkmeans.centroids, 100) #只找一个的话,应该是它本身了吧 D, I = index.search (fkmeans.centroids, 1) #np.save('./kaggle_data/tmp.npy',I) I = np.load('./kaggle_data/tmp.npy') I = list(I) I[0] label[int(I[i])] dic = defaultdict(list) for i in range(len(I)): dic[','.join(list(label[int(I[i])]))].append(int(I[i])) len(dic) kaggle.head() 'https://arxiv.org/abs/'+kaggle.loc[1676474,'id'] 'cs.AI' in dic dic实验结果下一步作什么实验,需要把数据集分成每条一个样本,再跑一次吗?ans = [0] * 135 for i in range(len(fcenters)): center = I[i][0] for j in range(1,100): index = I[i][j] if len(label[index] & label[center]) > 0: ans[i] += 1 dic = {} for i in range(len(fcenters)): center = I[i][0] for j in range(1,100): index = I[i][j] if len(label[index] & label[center]) > 0: dic[','.join(list(label[center]))] =dic.get(','.join(list(label[center])),0)+ 1 #小于135因为有些中心的标签是一样的,也许说明了分类不好 len(dic) dic label[1520554] data = kaggle.join(kaggle['categories'].str.split(' ', expand=True).stack().reset_index(level=1, drop=True).rename('label')) from collections import Counter num = Counter(data['label']) num['cond-mat.dis-nn'] numТема “Обучение с учителем” Задание 2Создайте модель под названием model с помощью RandomForestRegressor из модуля sklearn.ensemble.Сделайте агрумент n_estimators равным 1000,max_depth должен быть равен 12 и random_state сделайте равным 42.Обучите модель на тренировочных данных аналогично тому, как вы обучали модель LinearRegression,но при этом в метод fit вместо датафрейма y_train поставьте y_train.values[:, 0],чтобы получить из датафрейма одномерный массив Numpy,так как для класса RandomForestRegressor в данном методе для аргумента y предпочтительно применение массивов вместо датафрейма.Сделайте предсказание на тестовых данных и посчитайте R2. Сравните с результатом из предыдущего задания.Напишите в комментариях к коду, какая модель в данном случае работает лучше.import pandas as pd import numpy as nm from sklearn.datasets import load_boston BP = load_boston() X = pd.DataFrame(BP.data, columns=BP.feature_names) y = pd.DataFrame(BP.target, columns=['price']) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42) from sklearn.ensemble import RandomForestRegressor # ?RandomForestRegressor model = RandomForestRegressor(n_estimators=1000, max_depth=12, random_state=42)>Обучите модель на тренировочных данных аналогично тому, как вы обучали модель LinearRegression, но при этом в метод fit вместо датафрейма y_train поставьте y_train.values[:, 0], чтобы получить из датафрейма одномерный массив Numpy, так как для класса RandomForestRegressor в данном методе для аргумента y предпочтительно применение массивов вместо датафрейма.model.fit(X_train, y_train.values[:,0]) y_pred=model.predict(X_test)>Сделайте предсказание на тестовых данных и посчитайте R2. Сравните с результатом из предыдущего задания. Напишите в комментариях к коду, какая модель в данном случае работает лучше.from sklearn.metrics import r2_score r2_score(y_test, y_pred)Recognize named entities on news data with CNNIn this tutorial, you will use a convolutional neural network to solve Named Entity Recognition (NER) problem. NER is a common task in natural language processing systems. It serves for extraction such entities from the text as persons, organizations, locations, etc. In this task you will experiment to recognize named entities in different news from common CoNLL-2003 dataset.For example, we want to extract persons' and organizations' names from the text. Then for the input text: Yan Goodfellow works for Google Braina NER model needs to provide the following sequence of tags: B-PER I-PER O O B-ORG I-ORGWhere *B-* and *I-* prefixes stand for the beginning and inside of the entity, while *O* stands for out of tag or no tag. Markup with the prefix scheme is called *BIO markup*. This markup is introduced for distinguishing of consequent entities with similar types.A solution of the task will be based on neural networks, particularly, on Convolutional Neural Networks. DataThe following cell will download all data required for this assignment into the folder `/data`. The download util from the library is used to download and extract the archive.import deeppavlov from deeppavlov.core.data.utils import download_decompress download_decompress('http://files.deeppavlov.ai/deeppavlov_data/conll2003_v2.tar.gz', 'data/')/home/mikhail/env/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`. from ._conv import register_converters as _register_converters Using TensorFlow backend. [nltk_data] Downloading package punkt to /home/mikhail/nltk_data... [nltk_data] Package punkt is already up-to-date! [nltk_data] Downloading package stopwords to [nltk_data] /home/mikhail/nltk_data... [nltk_data] Package stopwords is already up-to-date! [nltk_data] Downloading package perluniprops to [nltk_data] /home/mikhail/nltk_data... [nltk_data] Package perluniprops is already up-to-date! [nltk_data] Downloading package nonbreaking_prefixes to [nltk_data] /home/mikhail/nltk_data... [nltk_data] Package nonbreaking_prefixes is already up-to-date! 2018-06-27 12:30:29.760 DEBUG in 'gensim.models.doc2vec'['doc2vec'] at line 73: Fast ve[...]Load the CoNLL-2003 Named Entity Recognition corpusWe will work with a corpus, which contains twits with NE tags. Typical file with NER data contains lines with pairs of tokens (word/punctuation symbol) and tags, separated by a whitespace. In many cases additional information such as POS tags included between Different documents are separated by lines **started** with **-DOCSTART-** token. Different sentences are separated by an empty line. Example -DOCSTART- -X- -X- O EU NNP B-NP B-ORG rejects VBZ B-VP O German JJ B-NP B-MISC call NN I-NP O to TO B-VP O boycott VB I-VP O British JJ B-NP B-MISC lamb NN I-NP O . . O O Peter NNP B-NP B-PER Blackburn NNP I-NP I-PERIn this tutorial we will focus only on tokens and tags (first and last elements of the line) and drop POS information located in between.We start with using the *Conll2003DatasetReader* class that provides functionality for reading the dataset. It returns a dictionary with fields *train*, *test*, and *valid*. At each field a list of samples is stored. Each sample is a tuple of tokens and tags. Both tokens and tags are lists. The following example depicts the structure that should be returned by *read* method: {'train': [(['Mr.', 'Dwag', 'are', 'derping', 'around'], ['B-PER', 'I-PER', 'O', 'O', 'O']), ....], 'valid': [...], 'test': [...]}There are three separate parts of the dataset: - *train* data for training the model; - *validation* data for evaluation and hyperparameters tuning; - *test* data for final evaluation of the model. Each of these parts is stored in a separate txt file.We will use [Conll2003DatasetReader](https://github.com/deepmipt/DeepPavlov/blob/master/deeppavlov/dataset_readers/conll2003_reader.py) from the library to read the data from text files to the format described above.from deeppavlov.dataset_readers.conll2003_reader import Conll2003DatasetReader dataset = Conll2003DatasetReader().read('data/')You should always understand what kind of data you deal with. For this purpose, you can print the data running the following cell:for sample in dataset['train'][:4]: for token, tag in zip(*sample): print('%s\t%s' % (token, tag)) print()EU B-ORG rejects O German B-MISC call O to O boycott O British B-MISC lamb O . O Peter B-PER Blackburn I-PER BRUSSELS B-LOC 1996-08-22 O The O European B-ORG Commission I-ORG said O on O Thursday O it O disagreed O with O German B-MISC advice O to O consumers O to O shun O British B-MISC lamb O until O scientists O determine O whether O mad O cow O disease O can O be O transmitted O to O sheep O . OPrepare dictionariesTo train a neural network, we will use two mappings: - {token}$\to${token id}: address the row in embeddings matrix for the current token;- {tag}$\to${tag id}: one-hot ground truth probability distribution vectors for computing the loss at the output of the network.Token indices will be used to address the row in embeddings matrix. The mapping for tags will be used to create one-hot ground truth probability distribution vectors to compute the loss at the output of the network.The [SimpleVocabulary](https://github.com/deepmipt/DeepPavlov/blob/master/deeppavlov/core/data/simple_vocab.py) implemented in the library will be used to perform those mappings.from deeppavlov.core.data.simple_vocab import SimpleVocabularyNow we need to build dictionaries for tokens and tags. Sometimes there are special tokens in vocabularies, for instance an unknown word token, which is used every time we encounter out of vocabulary word. In our case the only special token will be`` for out of vocabulary words.special_tokens = [''] token_vocab = SimpleVocabulary(special_tokens, save_path='model/token.dict') tag_vocab = SimpleVocabulary(save_path='model/tag.dict')2018-06-27 13:41:29.316 WARNING in 'deeppavlov.core.models.serializable'['serializable'] at line 53: No load path is set for SimpleVocabulary in 'infer' mode. Using save path instead 2018-06-27 13:41:29.317 WARNING in 'deeppavlov.core.models.serializable'['serializable'] at line 53: No load path is set for SimpleVocabulary in 'infer' mode. Using save path insteadLets fit the vocabularies on the train part of the data.all_tokens_by_sentences = [tokens for tokens, tags in dataset['train']] all_tags_by_sentences = [tags for tokens, tags in dataset['train']] token_vocab.fit(all_tokens_by_sentences) tag_vocab.fit(all_tags_by_sentences)Try to get the indices. Keep in mind that we are working with batches of the following structure: [['utt0_tok0', 'utt1_tok1', ...], ['utt1_tok0', 'utt1_tok1', ...], ...]token_vocab([['How', 'to', 'do', 'a', 'barrel', 'roll', '?']]) tag_vocab([['O', 'O', 'O'], ['B-ORG', 'I-ORG']])Now we will try conversion from indices to tokens.import numpy as np token_vocab([np.random.randint(0, 512, size=10)])Dataset IteratorNeural Networks are usually trained with batches. It means that weight updates of the network are based on several sequences at every single time. The tricky part is that all sequences within a batch need to have the same length. So we will pad them with a special `` token. Likewise tokens tags also must be padded It is also a good practice to provide RNN with sequence lengths, so it can skip computations for padding parts. We provide the batching function *batches_generator* readily available for you to save time. An important concept in the batch generation is shuffling. Shuffling is taking sample from the dataset at random order. It is important to train on the shuffled data because large number consequetive samples of the same class may result in pure quality of the model.from deeppavlov.core.data.data_learning_iterator import DataLearningIteratorCreate the dataset iterator from the loaded datasetdata_iterator = DataLearningIterator(dataset)Try it out:next(data_iterator.gen_batches(2, shuffle=True))Masking The last thing about generating training data. We need to produce a binary mask which is one where tokens present and zero elsewhere. This mask will stop backpropagation through paddings. An instance of such mask: [[1, 1, 0, 0, 0], [1, 1, 1, 1, 1]] For the sentences in batch: [['The', 'roof'], ['This', 'is', 'my', 'domain', '!']]The mask length must be equal to the maximum length of the sentence in the batch.from deeppavlov.models.preprocessors.mask import Mask get_mask = Mask()Try it out:get_mask([['Try', 'to', 'get', 'the', 'mask'], ['Check', 'paddings']])Build a recurrent neural networkThis is the most important part of the assignment. Here we will specify the network architecture based on TensorFlow building blocks. It's fun and easy as a lego constructor! We will create an Convolutional Neural Network (CNN) network which will produce probability distribution over tags for each token in a sentence. To take into account both right and left contexts of the token, we will use CNN. Dense layer will be used on top to perform tag classification.import tensorflow as tf import numpy as np np.random.seed(42) tf.set_random_seed(42)An essential part of almost every network in NLP domain is embeddings of the words. We pass the text to the network as a series of tokens. Each token is represented by its index. For every token (index) we have a vector. In total the vectors form an embedding matrix. This matrix can be either pretrained using some common algorithm like Skip-Gram or CBOW or it can be initialized by random values and trained along with other parameters of the network. In this tutorial we will follow the second alternative.We need to build a function that takes the tensor of token indices with shape [batch_size, num_tokens] and for each index in this matrix it retrieves a vector from the embedding matrix, corresponding to that index. That results in a new tensor with sahpe [batch_size, num_tokens, emb_dim].def get_embeddings(indices, vocabulary_size, emb_dim): # Initialize the random gaussian matrix with dimensions [vocabulary_size, embedding_dimension] # The **VARIANCE** of the random samples must be 1 / embedding_dimension emb_mat = np.random.randn(vocabulary_size, emb_dim).astype(np.float32) / np.sqrt(emb_dim) # YOUR CODE HERE emb_mat = tf.Variable(emb_mat, name='Embeddings', trainable=True) emb = tf.nn.embedding_lookup(emb_mat, indices) return embThe body of the network is the convolutional layers. The basic idea behind convolutions is to apply the same dense layer to every n consecutive samples (tokens in our case). A simplified case is depicted below.Here number of input and output features equal to 1.Lets try it on a toy example:# Create a tensor with shape [batch_size, number_of_tokens, number_of_features] x = tf.random_normal(shape=[2, 10, 100]) y = tf.layers.conv1d(x, filters=200, kernel_size=8) print(y)Tensor("conv1d_6/BiasAdd:0", shape=(2, 3, 200), dtype=float32)As you can see due to the abscence of zero padding (zeros on in the beginning and in the end of input) the size of resulting tensor along the token dimension is reduced. To use padding and preserve the dimensionality along the convolution dimension pass padding='same' parameter to the function.y_with_padding = tf.layers.conv1d(x, filters=200, kernel_size=8, padding='same') print(y_with_padding)Tensor("conv1d_7/BiasAdd:0", shape=(2, 10, 200), dtype=float32)Now stack a number of layers with dimensionality given in n_hidden_listdef conv_net(units, n_hidden_list, cnn_filter_width, activation=tf.nn.relu): # Use activation(units) to apply activation to units for n_hidden in n_hidden_list: units = tf.layers.conv1d(units, n_hidden, cnn_filter_width, padding='same') units = activation(units) return unitsA common loss for the classification task is cross-entropy. Why classification? Because for each token the network must decide which tag to predict. The cross-entropy has the following form:$$ H(P, Q) = -E_{x \sim P} log Q(x) $$It measures the dissimilarity between the ground truth distribution over the classes and predicted distribution. In the most of the cases ground truth distribution is one-hot. Luckily this loss is already [implemented](https://www.tensorflow.org/api_docs/python/tf/nn/softmax_cross_entropy_with_logits_v2) in TensorFlow.# The logits l = tf.random_normal([1, 4, 3]) # shape [batch_size, number_of_tokens, number of classes] indices = tf.placeholder(tf.int32, [1, 4]) # Make one-hot distribution from indices for 3 types of tag p = tf.one_hot(indices, depth=3) loss_tensor = tf.nn.softmax_cross_entropy_with_logits_v2(labels=p, logits=l) print(loss_tensor)Tensor("softmax_cross_entropy_with_logits_3/Reshape_2:0", shape=(1, 4), dtype=float32)All sentences in the batch have same length and we pad the each sentence to the maximal lendth. So there are paddings at the end and pushing the network to predict those paddings usually results in deteriorated quallity. Then we need to multiply the loss tensor by binary mask to prevent gradient flow from the paddings.mask = tf.placeholder(tf.float32, shape=[1, 4]) loss_tensor *= maskThe last step to do is to compute the mean value of the loss tensor:loss = tf.reduce_mean(loss_tensor)Now define your own function that returns a scalar masked cross-entropy lossdef masked_cross_entropy(logits, label_indices, number_of_tags, mask): ground_truth_labels = tf.one_hot(label_indices, depth=number_of_tags) loss_tensor = tf.nn.softmax_cross_entropy_with_logits_v2(labels=ground_truth_labels, logits=logits) loss_tensor *= mask loss = tf.reduce_mean(loss_tensor) return lossPut everything into a class:import numpy as np import tensorflow as tf class NerNetwork: def __init__(self, n_tokens, n_tags, token_emb_dim=100, n_hidden_list=(128,), cnn_filter_width=7, use_batch_norm=False, embeddings_dropout=False, top_dropout=False, **kwargs): # ================ Building inputs ================= self.learning_rate_ph = tf.placeholder(tf.float32, []) self.dropout_keep_ph = tf.placeholder(tf.float32, []) self.token_ph = tf.placeholder(tf.int32, [None, None], name='token_ind_ph') self.mask_ph = tf.placeholder(tf.float32, [None, None], name='Mask_ph') self.y_ph = tf.placeholder(tf.int32, [None, None], name='y_ph') # ================== Building the network ================== # Now embedd the indices of tokens using token_emb_dim function ###################################### ########## YOUR CODE HERE ############ emb = get_embeddings(self.token_ph, n_tokens, token_emb_dim) ###################################### emb = tf.nn.dropout(emb, self.dropout_keep_ph, (tf.shape(emb)[0], 1, tf.shape(emb)[2])) # Build a multilayer CNN on top of the embeddings. # The number of units in the each layer must match # corresponding number from n_hidden_list. # Use ReLU activation ###################################### ########## YOUR CODE HERE ############ units = conv_net(emb, n_hidden_list, cnn_filter_width) ###################################### units = tf.nn.dropout(units, self.dropout_keep_ph, (tf.shape(units)[0], 1, tf.shape(units)[2])) logits = tf.layers.dense(units, n_tags, activation=None) self.predictions = tf.argmax(logits, 2) # ================= Loss and train ops ================= # Use cross-entropy loss. check the tf.nn.softmax_cross_entropy_with_logits_v2 function ###################################### ########## YOUR CODE HERE ############ self.loss = masked_cross_entropy(logits, self.y_ph, n_tags, self.mask_ph) ###################################### # Create a training operation to update the network parameters. # We purpose to use the Adam optimizer as it work fine for the # most of the cases. Check tf.train to find an implementation. # Put the train operation to the attribute self.train_op ###################################### ########## YOUR CODE HERE ############ optimizer = tf.train.AdamOptimizer(self.learning_rate_ph) self.train_op = optimizer.minimize(self.loss) ###################################### # ================= Initialize the session ================= self.sess = tf.Session() self.sess.run(tf.global_variables_initializer()) def __call__(self, tok_batch, mask_batch): feed_dict = {self.token_ph: tok_batch, self.mask_ph: mask_batch, self.dropout_keep_ph: 1.0} return self.sess.run(self.predictions, feed_dict) def train_on_batch(self, tok_batch, tag_batch, mask_batch, dropout_keep_prob, learning_rate): feed_dict = {self.token_ph: tok_batch, self.y_ph: tag_batch, self.mask_ph: mask_batch, self.dropout_keep_ph: dropout_keep_prob, self.learning_rate_ph: learning_rate} self.sess.run(self.train_op, feed_dict)Now create an instance of the NerNetwork class:nernet = NerNetwork(len(token_vocab), len(tag_vocab), n_hidden_list=[100, 100])Regularly we want to check the score on validation part of the dataset every epoch. In the most of the cases of NER tasks the classes are imbalanced. And the accuray is not the best measure of performance. If we have 95% of 'O' tags, than the silly classifier, that always predicts '0' get 95% accuracy. To tackle this issue the F1-score is used. The F1-score can be defined as:$$ F1 = \frac{2 P R}{P + R}$$ where P is precision and R is recall.Lets write the evaluation function. We need to get all predictions for the given part of the dataset and compute F1.from deeppavlov.models.ner.evaluation import precision_recall_f1 # The function precision_recall_f1 takes two lists: y_true and y_predicted # the tag sequences for each sentences should be merged into one big list from deeppavlov.core.data.utils import zero_pad # zero_pad takes a batch of lists of token indices, pad it with zeros to the # maximal length and convert it to numpy matrix from itertools import chain def eval_valid(network, batch_generator): total_true = [] total_pred = [] for x, y_true in batch_generator: # Prepare token indices from tokens batch x_inds = token_vocab(x) # YOUR CODE HERE # Pad the indices batch with zeros x_batch = zero_pad(x_inds) # YOUR CODE HERE # Get the mask using get_mask mask = get_mask(x) # YOUR CODE HERE # We call the instance of the NerNetwork because we have defined __call__ method y_inds = network(x_batch, mask) # For every sentence in the batch extract all tags up to paddings y_inds = [y_inds[n][:len(x[n])] for n, y in enumerate(y_inds)] # YOUR CODE HERE y_pred = tag_vocab(y_inds) # Add fresh predictions total_true.extend(chain(*y_true)) total_pred.extend(chain(*y_pred)) res = precision_recall_f1(total_true, total_pred, print_results=True)Set hyperparameters. You might want to start with the following recommended values:- *batch_size*: 32;- n_epochs: 10;- starting value of *learning_rate*: 0.001- *learning_rate_decay*: a square root of 2;- *dropout_keep_probability* equal to 0.7 for training (typical values for dropout probability are ranging from 0.3 to 0.9).A very efficient technique for the learning rate managment is dropping learning rate after convergence. It is common to use dividers 2, 3, and 10 to drop the learning rate.batch_size = 16 # YOUR HYPERPARAMETER HERE n_epochs = 20 # YOUR HYPERPARAMETER HERE learning_rate = 0.001 # YOUR HYPERPARAMETER HERE dropout_keep_prob = 0.5 # YOUR HYPERPARAMETER HERENow we iterate through dataset batch by batch and pass the data to the train opfor epoch in range(n_epochs): for x, y in data_iterator.gen_batches(batch_size, 'train'): # Convert tokens to indices via Vocab x_inds = token_vocab(x) # YOUR CODE # Convert tags to indices via Vocab y_inds = tag_vocab(y) # YOUR CODE # Pad every sample with zeros to the maximal length x_batch = zero_pad(x_inds) y_batch = zero_pad(y_inds) mask = get_mask(x) nernet.train_on_batch(x_batch, y_batch, mask, dropout_keep_prob, learning_rate) print('Evaluating the model on valid part of the dataset') eval_valid(nernet, data_iterator.gen_batches(batch_size, 'valid'))Evaluating the model on valid part of the datasetEval the model on test part noweval_valid(nernet, data_iterator.gen_batches(batch_size, 'test'))2018-06-27 13:46:35.397 DEBUG in 'deeppavlov.models.ner.evaluation'['evaluation'] at line 213: processed 46435 tokens with 5648 phrases; found: 4561 phrases; correct: 3738. precision: 81.96%; recall: 66.18%; FB1: 73.23 LOC: precision: 84.02%; recall: 82.25%; F1: 83.13 1633 MISC: precision: 81.80%; recall: 71.08%; F1: 76.07 610 ORG: precision: 81.25%; recall: 60.26%; F1: 69.20 1232 PER: precision: 79.74%; recall: 53.56%; F1: 64.08 1086Lets try to infer the model on our sentence:sentence = ' my vodka' x = [sentence.split()] x_inds = token_vocab(x) x_batch = zero_pad(x_inds) mask = get_mask(x) y_inds = nernet(x_batch, mask) print(x[0]) print(tag_vocab(y_inds)[0])['Petr', 'stole', 'my', 'vodka'] ['B-PER', 'O', 'O', 'O']Load dataclassifier = cPickle.load(open("model/classifier.pickle")) rawtext = cPickle.load(open("model/textdata/rawtext.pickle")) BoW = cPickle.load(open("model/BoW.pickle")) BoW_transformer = cPickle.load(open("model/BoW_transformer.pickle")) labeled_texts = [[party, text] for (party, texts) in rawtext.items() for text in texts] df = pd.DataFrame(labeled_texts, columns=["party", "text"]) df import sklearn # find out how sklearn arranges feature vectors c = sklearn.feature_extraction.text.CountVectorizer() c.fit(['foo', 'bar']).transform(['foo', 'foo bar bar']).toarray() c.get_feature_names() df['tfidf'] = [v for (party, matrix) in BoW.items() for v in matrix.toarray()] dfSample (for quicker computations)rows = np.random.choice(df.index.values, 200) df_sample = df.ix[rows].copy() df_sample df_sample.groupby('party').count()['text'].plot(kind='bar')MDSclf = manifold.MDS(n_components=2, n_init=1, max_iter=100) X_mds = clf.fit_transform(df_sample['tfidf'].tolist()) x, y = np.transpose(X_mds) df_sample['mds_x'] = x df_sample['mds_y'] = y fig = None colors = dict(spd='red', cdu='black', gruene='green', linke='orange') for party, group in df_sample.groupby('party'): _fig = group.plot(kind='scatter', x='mds_x', y='mds_y', label=party, color=colors[party], ax=fig) fig = _figt-SNEtsne = manifold.TSNE(n_components=2, init='pca', random_state=0) X_tsne = clf.fit_transform(df_sample['tfidf'].tolist()) x, y = np.transpose(X_tsne) df_sample['tsne_x'] = x df_sample['tsne_y'] = y fig = None colors = dict(spd='red', cdu='black', gruene='green', linke='orange') for party, group in df_sample.groupby('party'): _fig = group.plot(kind='scatter', x='tsne_x', y='tsne_y', label=party, color=colors[party], ax=fig) fig = _figPCAX_pca = sklearn.decomposition.TruncatedSVD(n_components=2).fit_transform(df_sample['tfidf'].tolist()) x, y = np.transpose(X_pca) df_sample['pca_x'] = x df_sample['pca_y'] = y fig = None colors = dict(spd='red', cdu='black', gruene='green', linke='orange') for party, group in df_sample.groupby('party'): _fig = group.plot(kind='scatter', x='pca_x', y='pca_y', label=party, color=colors[party], ax=fig) fig = _figWith predictions...clf = classifier['classifier'] labels = classifier['labels'] df_sample['probas'] = map(list, clf.predict_proba(df_sample['tfidf'].tolist())) def predicted_label(probas): return labels[probas.index(max(probas))] df_sample['predicted_party'] = df_sample['probas'].map(predicted_label) clf = manifold.MDS(n_components=2, n_init=1, max_iter=100) X_mds = clf.fit_transform(df_sample['probas'].tolist()) x, y = np.transpose(X_mds) df_sample['probas_mds_x'] = x df_sample['probas_mds_y'] = y df_sample fig = None colors = dict(spd='red', cdu='black', gruene='green', linke='orange') # change to predicted class? for party, group in df_sample.groupby('party'): _fig = group.plot(kind='scatter', x='probas_mds_x', y='probas_mds_y', label=party, color=colors[party], ax=fig) fig = _fig fig = None colors = dict(spd='red', cdu='black', gruene='green', linke='orange') # change to predicted class? for party, group in df_sample.groupby('predicted_party'): _fig = group.plot(kind='scatter', x='probas_mds_x', y='probas_mds_y', label=party, color=colors[party], ax=fig) fig = _figImporting Libraries# import the libraries as shown below from tensorflow.keras.layers import Input, Lambda, Dense, Flatten,Conv2D from tensorflow.keras.models import Model from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.applications.resnet50 import preprocess_input from tensorflow.keras.preprocessing import image from tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img from tensorflow.keras.models import Sequential import numpy as np from glob import glob import matplotlib.pyplot as plt # re-size all the images to this IMAGE_SIZE = [224, 224] train_path = "E:\Malaria Dataset\Dataset\Train" valid_path = "E:\Malaria Dataset\Dataset\Test"Importing VGG-19 Library and model# Import the Vgg 19 library as shown below and add preprocessing layer to the front of VGG # Here we will be using imagenet weights vgg19 = VGG19(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False) # don't train existing weights for layer in vgg19.layers: layer.trainable = False # useful for getting number of output classes folders = glob("E:\Malaria Dataset\Dataset\Train\*") folders # our layers - you can add more if you want x = Flatten()(vgg19.output) prediction = Dense(len(folders), activation='softmax')(x) # create a model object model = Model(inputs=vgg19.input, outputs=prediction) # view the structure of the model model.summary() # tell the model what cost and optimization method to use model.compile( loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'] )Data Preprocessing of training set and testing set# Use the Image Data Generator to import the images from the dataset from tensorflow.keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale = 1./255, shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True) test_datagen = ImageDataGenerator(rescale = 1./255) # Make sure you provide the same target size as initialied for the image size training_set = train_datagen.flow_from_directory(train_path, target_size = (224, 224), batch_size = 64, class_mode = 'categorical') test_set = test_datagen.flow_from_directory(valid_path, target_size = (224, 224), batch_size = 64, class_mode = 'categorical') # fit the model # Run the cell. It will take some time to execute his = model.fit_generator( training_set, validation_data=test_set, epochs=25, steps_per_epoch=len(training_set), validation_steps=len(test_set) ) #plot the training and validation accuracy and loss at each epoch loss = his.history['loss'] val_loss = his.history['val_loss'] epochs = range(1, len(loss) + 1) plt.plot(epochs, loss, 'y', label='Training loss') plt.plot(epochs, val_loss, 'r', label='Validation loss') plt.title('Training and validation loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.show() acc = his.history['accuracy'] val_acc = his.history['val_accuracy'] plt.plot(epochs, acc, 'y', label='Training acc') plt.plot(epochs, val_acc, 'r', label='Validation acc') plt.title('Training and validation accuracy') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.legend() plt.show()Image prediction# save it as a h5 file from tensorflow.keras.models import load_model model.save('model_VGG16_CNN_Malaria.h5') y_pred = model.predict(test_set) import numpy as np y_pred = np.argmax(y_pred, axis=1) y_pred from tensorflow.keras.models import load_model from tensorflow.keras.preprocessing import image model=load_model('model_VGG16_CNN_Malaria.h5') img_path = r"E:\Malaria Dataset\cell_images\Test\Parasitized\C39P4thinF_original_IMG_20150622_110115_cell_136.png" img=image.load_img(img_path,target_size=(224,224)) x=image.img_to_array(img) x x.shape x=x/255 x=np.expand_dims(x,axis=0) img = np.reshape(x,[1,224,224,3]) img.shape model.predict(img) a=np.argmax(model.predict(img), axis=1) if(a==1): print("Uninfected") else: print("Infected")InfectedConfusion Matrixfrom mlxtend.plotting import plot_confusion_matrix from sklearn.metrics import confusion_matrix #rounded_predictions = model.predict(test_set, batch_size=128, verbose=0) import numpy as np #rounded_labels=np.argmax(test_set.classes, axis=1) from sklearn.metrics import confusion_matrix cm = confusion_matrix(test_set.classes, y_pred) plot_confusion_matrix(conf_mat=cm) plt.show()#Here are two columns, with 6 rows each, comprised of two different words (true and false) data = [["true", "false"],["true", "true"], ["false", "false"], ["false", "false"], ["false", "true"], ["true", "false"]] print('Print Unsorted Data:') print(data) # sort the words data.sort() print('Print Sorted Data:') print(data) #identify and count how many times the word false appears falseCnt = 0 for rowi in range(0,6): for coli in range(0,2): if len(data[rowi][coli]) > 4: falseCnt = falseCnt + 1 print('The word "false" occurs %g many times in the data' %(falseCnt))Gráficas de funciones de activación en Kerasimport numpy as np import matplotlib.pyplot as plt import tensorflow as tf import tensorflow.keras as keras def plot_activation(figure,nrows, ncols, index,x,y,nombre): axis = figure.add_subplot(nrows, ncols, index) axis.plot(x, y,color="#0056B8") axis.set_title(nombre, fontsize=20,pad=0,color="#003B80") axis.set_facecolor("#F0F7FF") axis.grid(b=True, which='major', axis='both',color="#FFFFFF",linewidth=3) axis.grid(b=True, which='minor', axis='both',color="#FFFFFF",linewidth=1) figure=plt.figure(figsize=(25,10)) figure.subplots_adjust(wspace=2, hspace=0.2) x=np.linspace(-10,10,50) y=tf.keras.activations.sigmoid(x).numpy() plot_activation(figure,2,16,(1,4),x,y,"sigmoid") x=np.linspace(-10,10,50) y=tf.keras.activations.tanh(x).numpy() plot_activation(figure,2,16,(5,8),x,y,"tanh") x=np.linspace(-10,10,50) y=tf.keras.activations.softsign(x).numpy() plot_activation(figure,2,16,(9,12),x,y,"softsign") x=np.linspace(-10,10,50) y=tf.keras.activations.relu(x,alpha=0.1).numpy() plot_activation(figure,2,16,(13,16),x,y,"relu α=0.1") x=np.linspace(-10,10,50) y=tf.keras.activations.softplus(x).numpy() plot_activation(figure,2,16,(17,20),x,y,"softplus") x=np.linspace(-10,10,50) y=tf.keras.activations.selu(x).numpy() plot_activation(figure,2,16,(21,24),x,y,"selu") x=np.linspace(-10,10,50) y=tf.keras.activations.elu(x,alpha=2).numpy() plot_activation(figure,2,16,(25,28),x,y,"elu α=2") x=np.linspace(-10,10,50) y=tf.keras.activations.exponential(x).numpy() plot_activation(figure,2,16,(29,33),x,y,"exponential") figure.savefig("funciones_activacion.png",facecolor="#FFFFFF",bbox_inches='tight')import tensorflow as tf class myCallback(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs={}): if(logs.get('acc')>0.6): print("\nReached 60% accuracy so cancelling training!") self.model.stop_training = True mnist = tf.keras.datasets.fashion_mnist (x_train, y_train),(x_test, y_test) = mnist.load_data() x_train, x_test = x_train / 255.0, x_test / 255.0 callbacks = myCallback() model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(512, activation=tf.nn.relu), tf.keras.layers.Dense(10, activation=tf.nn.softmax) ]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(x_train, y_train, epochs=10, callbacks=[callbacks]) # GRADED FUNCTION: train_mnist def train_mnist(): # Please write your code only where you are indicated. # please do not remove # model fitting inline comments. class myCallback(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs={}): if(logs.get('acc')>=0.99): print("\nReached 99% accuracy so cancelling training!") self.model.stop_training = True mnist = tf.keras.datasets.mnist (x_train, y_train),(x_test, y_test) = mnist.load_data() x_train, x_test = x_train / 255.0, x_test / 255.0 callbacks = myCallback() model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(512, activation=tf.nn.relu), tf.keras.layers.Dense(10, activation=tf.nn.softmax) ]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) # model fitting history = model.fit(x_train, y_train, epochs=10, callbacks=[callbacks]) # model fitting return history.epoch, history.history['acc'][-1] train_mnist() # GRADED FUNCTION: train_mnist_conv def train_mnist_conv(): # Please write your code only where you are indicated. # please do not remove model fitting inline comments. class myCallback(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs={}): if(logs.get('acc')>=0.998): print("\nReached 99.8% accuracy so cancelling training!") self.model.stop_training = True callbacks = myCallback() mnist = tf.keras.datasets.mnist (training_images, training_labels), (test_images, test_labels) = mnist.load_data() training_images=training_images.reshape(60000, 28, 28, 1) training_images=training_images / 255.0 model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(64, (3,3), activation='relu', input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(512, activation=tf.nn.relu), tf.keras.layers.Dense(10, activation=tf.nn.softmax) ]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.summary() # model fitting history = model.fit( training_images, training_labels, epochs=20, callbacks=[callbacks] ) # model fitting return history.epoch, history.history['acc'][-1] epoch, history_acc = train_mnist_conv() history_accClasses with Multiple Objectsclass Birds: def __init__(self,bird_name): self.bird_name = bird_name def flying_birds(self): print(f"{self.bird_name} files above the sky") def non_flying_birds(self): print(f"{self.bird_name} is the national bird of the Philippines") vulture = Birds("Griffon Vulture") crane = Birds("Common Crane") emu = Birds("Emu") vulture.flying_birds() crane.flying_birds() emu.non_flying_birds() class foo: def __init__(self,a,b): self.__a = a self.__b = b def add(self): return self.__a +self.__b #Private attributes number = foo(3,4) number.add() number.a = 7 #7,4 7+4 = 11 number.add()Encapsulation with Private Attributesclass Counter: def __init__(self): self.current = 0 def increment(self): self.current +=1 def value(self): return self.current def reset(self): self.current = 0 num = Counter() num.increment() #counter = counter + 1 num.increment() num.increment() num.value() class Counter: def __init__(self): self.__current = 0 def increment(self): self.__current +=1 def value(self): return self.__current def reset(self): self.__current = 0 num = Counter() num.increment() #counter = counter + 1 num.increment() num.increment() num.counter = 1 num.value()Inheritanceclass Person: def __init__(self, firstname, surname): self.firstname = firstname self.surname = surname def printname(self): print(self.firstname, self.surname) person = Person("Jea", "Gadil") person.printname() class Teacher(Person): pass person2 = Teacher("Maria", "Sayo") person2.printname() class Student(Person): pass person3 = Student("", "Umipig") person3.printname() Polymorphismclass RegularPolygon: def __init__(self,side): self.side = side class Square(RegularPolygon): def area(self): return self.side * self.side class EquilateralTriangle(RegularPolygon): def area(self): return self.side * self.side * 0.433 object = Square(4) print(object.area()) object2 = EquilateralTriangle(3) print(object2.area())16 3.897Application 11. Create a Python program that displays the name of three students (Student 1, Student 2, and Student 3) and their term grades2. Create a class name Person and attributes - std1, std2 std3, pre,mid,fin3. Compute the average of each term grade using Grade() method4. Information about student's grades must be hidden from othersclass Person: def __init__(self,std,pre,mid,fin): self.__std = std self.__pre = pre self.__mid = mid self.__fin = fin def Grade(self): return round((self.__pre + self.__mid + self.__fin)/3,2) class Student1(Person): pass std1 = str(input("Enter student name: ")) pre_1 = float(input("Enter Prelim Grade: ")) mid_1 = float(input("Enter Midterm Grade: ")) fin_1 = float(input("Enter Final Grade: ")) Student_1 = Person(std1, pre_1, mid_1, fin_1) print() class Student2(Person): pass std2 = str(input("Enter student name: ")) pre_2 = float(input("Enter Prelim Grade: ")) mid_2 = float(input("Enter Midterm Grade: ")) fin_2 = float(input("Enter Final Grade: ")) Student_2 = Person(std2, pre_2, mid_2, fin_2) print() class Student3(Person): pass std3 = str(input("Enter student name: ")) pre_3 = float(input("Enter Prelim Grade: ")) mid_3 = float(input("Enter Midterm Grade: ")) fin_3 = float(input("Enter Final Grade: ")) Student_3 = Person(std3, pre_3, mid_3, fin_3) print() student_name = str(input("Enter Student Name: ")) if student_name == (std1): print ("Name:", std1, "\nAverage:", Student_1.Grade()) else: if student_name == (std2): print("Name:", std2, "\nAverage:", Student_2.Grade()) else: if student_name == (std3): print("Name:", std3, "\nAverage:", Student_3.Grade()) else: print("That name was Not Register")Enter student name: Ian Enter Prelim Grade: 98 Enter Midterm Grade: 98 Enter Final Grade: 97 Enter student name: Rafael Enter Prelim Grade: 97 Enter Midterm Grade: 96 Enter Final Grade: 95 Enter student name: Umipig Enter Prelim Grade: 97 Enter Midterm Grade: 98 Enter Final Grade: 91 Enter Student Name: Umipig Name: Umipig Average: 95.33Write chunk array as GeoTIFFOr: **How to safe a single layer chunk array as georeferenced raster with MultiRasterIO?**import pandas as pd from pathlib import Path import rasterio from eobox.raster import MultiRasterIO from eobox import sampledata year = 2008 dataset = sampledata.get_dataset("lsts") layers_paths = [Path(p) for p in dataset["raster_files"]] layers_df = pd.Series([p.stem for p in layers_paths]).str.split("_", expand=True) \ .rename({0: "sceneid", 1:"band"}, axis=1) layers_df["date"] = pd.to_datetime(layers_df.sceneid.str[9:16], format="%Y%j") layers_df["uname"] = layers_df.sceneid.str[:3] + "_" + layers_df.date.dt.strftime("%Y-%m-%d") + "_" + layers_df.band.str[::] layers_df["path"] = layers_paths layers_df = layers_df.sort_values(["date", "band"]) layers_df = layers_df.reset_index(drop=True) df_layers = layers_df[(layers_df.date >= str(year)) & (layers_df.date < str(year+1))] df_layers = df_layers.reset_index(drop=True) df_layers.head() mrio = MultiRasterIO(df_layers.path.values) \ .windows_from_blocksize(2**5) n_chunks = len(mrio.windows) print("Number of chunks : ", n_chunks) write_to_disk = False with rasterio.open(mrio._get_template_for_given_resolution(mrio.dst_res, "path")) as src_layer: pass # later we need src_layer for src_layer.window_transform(win) for ji in range(n_chunks): dst_path = f"./xxx_uncontrolled_99_chunk_ji{ji:02}.tif" chunk_arrays_ji = mrio.get_arrays(ji) chunk_layer_ji = chunk_arrays_ji[:,:,[0]] print("shape of chunk_layer_ji : ", chunk_layer_ji.shape) win = mrio.windows[ji] kwargs = mrio._get_template_for_given_resolution( res=mrio.dst_res, return_="meta").copy() kwargs.update({"height": win.height, "width": win.width, "transform": src_layer.window_transform(win)}) kwargs["dtype"] = chunk_layer_ji.dtype with rasterio.open(dst_path, "w", **kwargs) as dst: dst.write(chunk_layer_ji[:,:,0], 1) print(f"Chunk written to path: {dst_path}")Number of chunks : 4 shape of chunk_layer_ji : (32, 32, 1) Chunk written to path: ./xxx_uncontrolled_99_chunk_ji00.tif shape of chunk_layer_ji : (32, 29, 1) Chunk written to path: ./xxx_uncontrolled_99_chunk_ji01.tif shape of chunk_layer_ji : (29, 32, 1) Chunk written to path: ./xxx_uncontrolled_99_chunk_ji02.tif shape of chunk_layer_ji : (29, 29, 1) Chunk written to path: ./xxx_uncontrolled_99_chunk_ji03.tifAssembling the full datasetdset = extract_dset(PATH_FULL + 'pairs.txt') construct_analogy_dset(dset, PATH_FULL + 'tetrads_sub_all.txt', PATH_FULL + 'tetrads_nonsub_all.txt')Assembling the dataset with OOV words removedmodel = KeyedVectors.load_word2vec_format('models/morphoseg/model.txt') # removing OOV words clean_dset(PATH_FULL + 'pairs.txt', PATH_OOV_REMOVED + 'pairs_cleaned.txt', model) dset = extract_dset(PATH_OOV_REMOVED + 'pairs_cleaned.txt') construct_analogy_dset(dset, PATH_OOV_REMOVED + 'tetrads_sub.txt', PATH_OOV_REMOVED + 'tetrads_nonsub.txt') # in nonsub # REMOVE AFTER TESTING filein = open('final_analogies/tetrads_nonsub.txt', 'r') l = filein.readline() cnt = 0 while l != '': cnt += 1 l = filein.readline() filein.close() print(cnt) # in sub # REMOVE AFTER TESTING filein = open('final_analogies/tetrads_sub.txt', 'r') l = filein.readline() cnt = 0 while l != '': cnt += 1 l = filein.readline() filein.close() print(cnt) 1576 + 794 cnts = [] for rel_id in dset: cnts.append(sum([len(dset[rel_id][k]) for k in dset[rel_id]])) sum([x * (x-1)/2 for x in cnts]) # train/test split sub = extract_analogy_dset(PATH_OOV_REMOVED + 'tetrads_sub.txt') nonsub = extract_analogy_dset(PATH_OOV_REMOVED + 'tetrads_nonsub.txt') train_test_split(sub, PATH_OOV_REMOVED + 'train_sub.txt', PATH_OOV_REMOVED + 'test_sub.txt', p=0.75) train_test_split(nonsub, PATH_OOV_REMOVED + 'train_nonsub.txt', PATH_OOV_REMOVED + 'test_nonsub.txt', p=0.75)Id 0 train 5 test 2 Id 1 train 265 test 89 Id 3 train 4 test 2 Id 4 train 39 test 13 Id 5 train 2 test 1 Id 9 train 3 test 2 Id 10 train 102 test 34 Id 11 train 128 test 43 Id 12 train 142 test 48 Id 13 train 263 test 88 Id 18 train 33 test 12 Id 21 train 90 test 30 Id 22 train 102 test 34 Id 0 train 168 test 56 Id 1 train 60 test 21 Id 2 train 21 test 7 Id 3 train 6 test 3 Id 4 train 134 test 45 Id 5 train 5 test 2 Id 6 train 21 test 7 Id 7 train 4 test 2 Id 8 train 4 test 2 Id 9 train 169 test 57Upward TraversalWalk up the graph to find what contains a given URI# Find what contains a plasmid uri = 'https://hub.sd2e.org/user/sd2e/design/YG_plasmid_018/1' #uri = 'https://hub.sd2e.org/user/sd2e/design/YG_plasmid_002/1' # Display immediate parent ModuleDefinitions print('Parents:') for parent in sorted(sbhp.parent_module_definitions(sbh_query, uri)): print(' ', parent) print('') # Display ancestors that have no parent Module, the top of the line print('Roots:') for root in sorted(sbhp.root_module_definitions(sbh_query, uri)): print(' ', root) # Find what contains a strain uri = 'https://hub.sd2e.org/user/sd2e/design/UWBF_5783/1' # uri = 'https://hub.sd2e.org/user/sd2e/design/UWBF_5992/1' # uri = 'https://hub.sd2e.org/user/sd2e/design/UWBF_7377/1' # uri = 'https://hub.sd2e.org/user/sd2e/design/UWBF_7299/1' # Display immediate parent ModuleDefinitions print('Parents:') for parent in sorted(sbhp.parent_module_definitions(sbh_query, uri)): print(' ', parent) print('') # Display ancestors that have no parent Module, the top of the line print('Roots:') for root in sorted(sbhp.root_module_definitions(sbh_query, uri)): print(' ', root)Downward traversalWalk down the graph to see what a URI contains# Find what a strain contains uri = 'https://hub.sd2e.org/user/sd2e/design/UWBF_5783/1' # uri = 'https://hub.sd2e.org/user/sd2e/design/UWBF_5992/1' # uri = 'https://hub.sd2e.org/user/sd2e/design/UWBF_7377/1' # Display immediate child modules print('Child modules of', uri) for child in sorted(sbhp.child_module_definitions(sbh_query, uri)): print(' ', child) # Display immediate child components print('') print('Child components of', uri) for child in sorted(sbhp.child_component_definitions(sbh_query, uri)): print(' ', child)Finding contained types# Find strains and reagents contained in a given URI # uri = 'https://hub.sd2e.org/user/sd2e/experiment_test/experiment0x2euw_biofab0x2e17026_group_2/1' uri = 'https://hub.sd2e.org/user/sd2e/experiment_test/experiment0x2euw_biofab0x2e18527_group_1/1' # Display contained strains print('Strains contained in', uri) for strain in sorted(sbhp.find_contained_strains(sbh_query, uri)): print(' ', strain) print('') # Display contained reagents print('Reagents contained in', uri) for reagent in sorted(sbhp.find_contained_reagents(sbh_query, uri)): print(' ', reagent) # Customize the search with a user-defined predicate uri = 'https://hub.sd2e.org/user/sd2e/design/UWBF_5783/1' # Find all ComponentDefinitions and ModuleDefinitions print('Everything contained in', uri) def true_predicate(sbh_query, uri): """This predicate returns True for everything""" return True for item in sorted(sbhp.find_contained_items(sbh_query, uri, true_predicate)): print(' ', item) # Use a custom predicate to find only some contained items print('') print('Custom search in', uri) def is_my_plasmid(sbh_query, uri): """This predicate looks for YG_plasmid_01*""" return uri.startswith('https://hub.sd2e.org/user/sd2e/design/YG_plasmid_01') for item in sorted(sbhp.find_contained_items(sbh_query, uri, is_my_plasmid)): print(' ', item)Putting it together# Find strains used with a plasmid uri = 'https://hub.sd2e.org/user/sd2e/design/YG_plasmid_018/1' print('Finding root module definitions, this might take a while...') root_mds = sbhp.root_module_definitions(sbh_query, uri) print('Found %d root module definitions' % (len(root_mds))) # Search each root to find strains used with this URI all_strains = set() for root in sorted(root_mds): print('Strains in', root) for strain in sorted(sbhp.find_contained_strains(sbh_query, root)): print(' ', strain) all_strains.add(strain) print('') print('--------------------------------------------------') print(uri, 'is used with strains:') for strain in sorted(all_strains): print(' ', strain) # Find reagents used with the given strain uri = 'https://hub.sd2e.org/user/sd2e/design/UWBF_5783/1' roots = sbhp.root_module_definitions(sbh_query, uri) print('Found %d root module definitions' % (len(roots))) reagents = [] all_reagents = set() for root in roots: print('Reagents in', root) for reagent in sorted(sbhp.find_contained_reagents(sbh_query, root)): print(' ', reagent) all_reagents.add(reagent) print('') print('--------------------------------------------------') print(uri, 'is used with reagents:') for reagent in sorted(all_reagents): print(' ', reagent)Getting Info about a URIuri = 'https://hub.sd2e.org/user/sd2e/design/UWBF_5783/1' import pandas as pd pd.DataFrame(sorted(sbhp.subject_info(sbh_query, uri)))Debugging infoprint('Cache Info') print('----------') print('parent_module_definitions:', sbhp.parent_module_definitions.cache_info()) print('root_module_definitions:', sbhp.root_module_definitions.cache_info()) print('child_module_definitions:', sbhp.child_module_definitions.cache_info()) print('child_component_definitions:', sbhp.child_component_definitions.cache_info()) print('module_is_strain:', sbhp.module_is_strain.cache_info()) print('is_reagent:', sbhp.is_reagent.cache_info()) print('find_contained_reagents:', sbhp.find_contained_reagents.cache_info()) print('find_contained_strains:', sbhp.find_contained_strains.cache_info())Solutions to Lesson 6 ExercisesFor each exercise, the solutions below show one possible way of solving it, but you might have used a different approach, and that's great! There is almost always more than one way to solve any particular problem in Python. Initial Setup Since this notebook is in the `solutions` sub-folder, use the magic command `%cd` to go up one folder to the main project folder to keep the file paths the same as in the lessons:%cd ..C:\Users\jenfl\Projects\eoas-pythonImport libraries, display plots inline, initialize variables from lesson:import pandas import matplotlib.pyplot as plt import cartopy.crs as ccrs import cartopy.feature as cfeature %matplotlib inline world = pandas.read_csv('data/gapminder_world_data_2018.csv') world['area'] = world['population'] / world['pop_density'] cities = pandas.read_csv('data/country_capitals.csv')Exercise 6.1**a)** First, create the following variables:```pythonx_data = [0, 1, 2, 3, 4, 5, 6]y_linear = [0, 2, 4, 6, 8, 10, 12]y_squares = [0, 1, 4, 9, 16, 25, 36]```Plot `y_linear` vs. `x_data` and `y_squares` vs. `x_data` on a figure with the following customizations:- Figure size 5" wide by 6" tall- `y_linear` vs. `x_data` as a line plot with line width of 2 and color `'purple'`- `y_squares` vs. `x_data` as a line plot with line width of 4, line style `'dotted'`, and color `'forestgreen'`- Figure title `'Straight Line and 2nd Order Polynomial'`x_data = [0, 1, 2, 3, 4, 5, 6] y_linear = [0, 2, 4, 6, 8, 10, 12] y_squares = [0, 1, 4, 9, 16, 25, 36] fig, ax = plt.subplots(figsize=(5, 6)) ax.plot(x_data, y_linear, linewidth=2, color='purple') ax.plot(x_data, y_squares, linewidth=4, linestyle='dotted', color='forestgreen') ax.set_title('Straight Line and 2nd Order Polynomial');**b)** Save the figure in `.png` and `.pdf` formatsfig.savefig('linear-and-polynomial.png', dpi=300) fig.savefig('linear-and-polynomial.pdf')Bonus exercises**c)** Try out other customizations such as using other colors from the [named color list](https://matplotlib.org/examples/color/named_colors.html), different line styles, assigning x- and y-labels, changing the axis limits with `ax.set_xlim()` and `ax.set_ylim()`, splitting the figure into subplots with one above and one below (instead of side by side), and so on...# One example fig, ax_array = plt.subplots(2, 1, figsize=(4, 8)) ax1 = ax_array[0] ax2 = ax_array[1] ax1.plot(x_data, y_linear, linewidth=2, linestyle='dashed', color='maroon') ax1.set_title('Linear') ax1.set_xlim(0, 6.5) ax1.set_ylim(0, 12.5) ax2.plot(x_data, y_squares, linewidth=2, color='navy') ax2.set_title('2nd Order Polynomial') ax2.set_xlim(0, 6.5) ax2.set_ylim(0, 40) fig.tight_layout()Exercise 6.2**a)** Create a map with the following specifications:- 6" wide by 6" tall- `ccrs.Orthographic()` projection- Stock image background- Any other features or data you'd like to addfig = plt.figure(figsize=(6, 6)) ax = fig.add_subplot(1, 1, 1, projection=ccrs.Orthographic()) ax.stock_img() ax.scatter(data=cities, x='Longitude', y='Latitude', marker='.', color='black', transform=ccrs.PlateCarree());**b)** Try out different values of the `central_latitude` and `central_longitude` keyword arguments to `ccrs.Orthographic()` and see how the map changes.fig = plt.figure(figsize=(6, 6)) proj = ccrs.Orthographic(central_longitude=90) ax = fig.add_subplot(1, 1, 1, projection=proj) ax.stock_img() ax.scatter(data=cities, x='Longitude', y='Latitude', marker='.', color='black', transform=ccrs.PlateCarree()); fig = plt.figure(figsize=(6, 6)) proj = ccrs.Orthographic(central_longitude=90, central_latitude=60) ax = fig.add_subplot(1, 1, 1, projection=proj) ax.stock_img() ax.scatter(data=cities, x='Longitude', y='Latitude', marker='.', color='black', transform=ccrs.PlateCarree());Quantum ChessThis colab illustrates the difficulties of running on NISQ hardware using the example of Quantum Chess. This tutorial assumes basic knowledge of the following:* Basic quantum computing principles, specifically, what unitary matrices are, and what an $\text{iSWAP}$ (and $\sqrt{\text{iSWAP}}$ gate is)* The basic movement rules of [Chess](https://en.wikipedia.org/wiki/ChessMovement) and the [Algebraic notation](https://en.wikipedia.org/wiki/Algebraic_notation_(chess)) method of naming chess squares and moves.![Algebraic notation](https://upload.wikimedia.org/wikipedia/commons/thumb/b/b6/SCD_algebraic_notation.svg/242px-SCD_algebraic_notation.svg.png)* The [Cirq](https://github.com/quantumlib/Cirq) framework for constructing quantum circuits.* While the tutorial will briefly cover the rules of quantum chess, those who are unfamiliar with this game may [try it out](https://quantumchess.net/play/) or read the exact rules in the [Quantum Chess paper](https://arxiv.org/abs/1906.05836).The goal of this tutorial is to illustrate some basic ideas of transforming a theoretical algorithm (such as Quantum Chess) into a real hardware algorithm, without getting lost in details of more complicated domain-specific problems. The tutorial is split into several parts:1. **Introduction to Quantum Chess**: how quantum chess maps to qubits and quantum operations2. **Quantum chess in Cirq**: how to execute these operations using the cirq framework3. **Handling partial measurement**: how to handle moves that include measurement that partially collapses the board state4. **Noise**: how to deal with some simple noise models5. **Decomposition**: how to decompose gates6. **Qubit Layout**: how to run circuits when connectivity matters.You will learn about the concepts of **circuit construction**, **post-selection**, **gate decomposition**, **noise**, **optimizers**, and **qubit mappings**. First, let's get started by installing cirq on our colab runtime:!pip install cirq --quiet import cirq from typing import Dict, Iterable, Optional, SetIntroduction to Quantum ChessQuantum chess is analogous to the classical game of [Chess](https://en.wikipedia.org/wiki/Chess). Special rules are added to the game in order to allow for superposition and entanglement of pieces. This tutorial will go over the basics of the rules in order to show how one might implement them in cirq and convert them to use a hardware device. The full rule set, including rules for more complicated situations (such as castling and en passant) can be found in the [Quantum Chess paper](https://arxiv.org/abs/1906.05836). A demo where you can try moves out and play around on a sandbox board and test your knowledge with puzzles is available at [quantumchess.net](https://quantumchess.net/play/).In order to introduce the game, we will first go over how the board is represented and then go over several of the moves. Quantum Chess Board RepresentationIn classical chess, each square can be represented by one bit determining whether the square is empty (0) or occupied (1) as well as short integer or 'enum' to distinguish the color and type of piece (e.g. King, Queen, Knight, Bishop, Rook, or Pawn). Quantum chess will also use the same classical register to store the type of piece, since each square can only have one type of piece in it. Since the storage of the piece type is entirely classical, we will ignore it throughout the rest of this tutorial and focus on the quantum aspects of the game.Classical chess squares have only two states. Each square is either occupied or not. For a quantum version of the game, we will want to allow a continuous spectrum of possibilities of the square's occupancy, as represented by a quantum bit, or qubit.In quantum chess, each square is represented by a single qubit. If the qubit is in the |1> state, the square is occupied by a piece. If the qubit is in the |0> state, the square is empty.A board state will be a set of qubits that represent a set of squares. We will often consider a board that is smaller than the standard 8x8 board of chess so that we can more easily illustrate concepts.For instance, let's consider a 3x3 board. This board will have 9 squares (a1, a2, a3, b1, b2, b3, c1, c2, c3). We will represent this as a 'ket' string such as this, with spaces to denote row boundaries for ease of reading:```|a1a2a3 b1b2b3 c1c2c3 >```For instance, `|000 000 000>` would represent the empty 3x3 board. `|010 000 000>` would represent a piece on the 'a2' square. States can also represent boards with multiple pieces. `|000 000 110>` would represent a board with pieces on both 'c1' and 'c2'. Each of these three states is denoted as a **basis state**.A state can also be a super-position of board states. For instance, $\frac{1}{\sqrt{2}}$ `( |100 000 000> + |010 000 000> )` represents a piece being in superposition of being on 'a1' and 'a2'. This colab will often drop the normalization factor such as $\frac{1}{\sqrt{2}}$ when it doesn't matter to the explanation. Additional qubitsNote that there are some exceptions that require extra qubits beyond the requirement of one qubit per square. These extra qubits (often called "ancilla" qubits) are needed to store captured pieces and to store extra information, such as whether a path is blocked by a piece. We will ignore these extra ancilla qubits for now in order to keep the tutorial focused. Quantum Chess MovesWe will be studying a subset of possible quantum chess moves in this tutorial.We will primarily consider the following moves to implement on the hardware: Basic jump moveThis manuever will be applied when we wish to apply a standard chess move to a piece. This will be how kings and knights move. In classical chess, we would remove the piece from its original square and then add it to the target square we are moving it to. For quantum chess, we would like to do an analogous operation that preserves any quantum information. In order to do this, we will swap the two squares. We will do this by performing an **iSWAP** gate between the two qubits that represent those squares. Note that this operation will also add a phase of *i* onto the qubits in addition to swapping them, adding a uniquely quantum aspect to the move.![Standard Move](https://cirq.readthedocs.io/en/latest/_static/qc_standard_move.png)For example, if we attempt to move a King piece from the 'a1' square to the 'b1' square, we will perform a iSWAP gate between the qubits that represent the 'a1' and 'b1' squares. Split moveThe split move is unique to quantum chess. This move will be analogous to moving a piece to two different target squares simultaneously. If we were to then measure the quantum board, we should get a 50% probability of seeing the piece in either of the two target squares.This will be accomplished by doing a $\sqrt{\text{iSWAP}}$ between the source and the first target. This will create an equal superposition between the source and the first target square. We would finish the move by performing a iSWAP between the source square and second target square. This will create an equal superposition between the two target squares.![Split Move](https://cirq.readthedocs.io/en/latest/_static/qc_split_move1.png)For example, suppose that that we split move a King on 'a1' to the squares 'b1' and 'b2'. To do this, we would do a $\sqrt{\text{iSWAP}}$ between the qubits representing 'a1' and 'b1' followed by an **iSWAP** on the 'a1' and 'b2' qubits. A short-hand notation for this move is *Ks1^t1t2* for source square s1 and split targets t1 and t2. This example would be noted as *Ka1^b1b2*. Slide moveA slide move is how we will move bishops, queens, and rooks. These pieces move along a straight path, but cannot pass through intervening pieces. However, in quantum chess, there may be a piece that is in superposition and may or may not block the path of this move.In order to accomplish this move without measuring (and thereby destroying the quantum nature of the game), we will need to **entangle** the two pieces. The way we will do this is called a "controlled" gate. A [controlled gate](https://en.wikipedia.org/wiki/Quantum_logic_gateControlled_(cX_cY_cZ)_gates) is a gate that is only performed if the control qubit(s) are in the `|1>` state.This tutorial will only consider paths that are blocked by one possible piece. In this case, we will perform a **controlled-iSWAP** between the source square and target square controlled by the square that is potentially blocking the path.![Slide Move](https://cirq.readthedocs.io/en/latest/_static/qc_slide_move.png)For example, suppose we are moving a rook from 'a1' to 'c1', but there is a split knight on 'b1'. To perform this slide move, we would do an iSWAP between the qubits that represent 'a1' and 'c1' controlled by the 'b1' qubit. This operation can result in two potential states. Either the rook is on 'c1' and the knight is not on 'b1' (path was open) or, alternatively, the rook is on 'a1' and the knight is on 'b1' (path was blocked). The quantum board state cannot be described without mentioning the relationship between the 'b1' and 'a1'/'c1' squares, so we can say that those squares are *entangled*. Exclusion moveIt was mentioned earlier that each square must have only one type of piece. For instance, a square cannot have 0.1 of a knight and 0.5 of a rook. This would not be allowed. This is called the 'no double occupancy' rule. We will use measurement as a tool to prevent this situation and disambiguate the type of the piece.This can happen in a variety of circumstances, but we will only consider the case where a piece attempts to make a legal, standard move into a square with a piece of its own color. ![Exclusion Move](https://cirq.readthedocs.io/en/latest/_static/qc_exclusion_move.png)For instance, suppose we attempt to move a king from 'a1' to 'a2' and there is a split knight on the 'a2' square. We would then need to measure the 'a2' square. If the measurement is 1, then the knight occupies the square and the king cannot move. If the measurement is 0, the knight is not on the square and the king moves successfully. In this case, we have a classical 'mixture' of states. Half the time, we would get the king on 'a1' and the knight on 'a2'. The other half of the time, the king would be on 'a2'. However, since we have measured and projected the state to one of these possibilities, there would be no superposition or entanglement, and this part of the board would be in a classical state (definitively in either one configuration or the other). Other movesThese four type of moves are enough to illustrate the complexities of translating an abstract algorithm (quantum chess) onto a concrete hardware implementation. However, those who are curious can see the full rule set in the [Quantum Chess](https://arxiv.org/abs/1906.05836) paper on arXiv, including for capture, en passant, and castling.**Exercise for the reader**: This describes one possible rule set for translating the game of chess into the quantum world. How might the game change if you use SWAP gates instead of iSWAP gates? Can you devise ways to tell the two games apart? Can you think of other methods to create split moves or other interesting alternatives? Quantum Chess Simulation Now that we have introduced the moves of quantum chess and their corresponding circuits, let's look at how we might actually perform our quantum chess moves and circuits in hardware. We will first start by learning how to implement them in cirq and how to execute them using cirq's built-in simulator.If we consider that a chess board has 64 squares, then the simulation will need 64 qubits. Since any combination of these bits is a valid basis state of the system, this means that, in order to do a complete simulation, we will need to keep track of $2^{64}$ states, which is something around 10 quintillion states. This is obviously infeasible.However, most of these positions are unreachable by any common set of moves. For most reasonable positions, there is a much smaller amount of superposition, so we do not need to keep track of all of those states, since the probability of those states is zero and will remain zero after we make our moves.For example, the initial position is a classical position, so we only need to keep track of one possible state, where the initial squares are in the |1> state and the empty squares are in the |0> state. After we make a split move, now we have to keep track of two states. Each successive move on the board will need to tabulate its effect on both of these possible states.**Example**:Let's consider a 3x3 board as defined above with one King piece that starts in the square 'a1'. This initial state is:```|100 000 000>```Note that, since there are 9 squares, there are $2^9=512$ possible basis states and a position on this board could be a superposition of any of the 512 basis states. For instance, let's suppose that we now perform a split move from 'a1' to 'b1' and 'b2'. Then the state will be (ignoring the factor of $i/{\sqrt{2}}$): ```|000 010 000> + |000 100 000>```![Quantum king moves](https://cirq.readthedocs.io/en/latest/_static/qc_qking_moves.png)Now, suppose that we make two additional moves. First, we will split the "half-king" on 'b1' to 'c1' and 'c2'. Next, we will split the 'half king' on 'b2' to 'b3' and 'c3'.So, despite that we have made three split moves that involve almost all of the squares on the board, we still only have to worry about 4 of the 512 states on the board. The state is now (ignoring the constant factor):```|000 000 100> + |000 000 010> + |000 001 000> + |000 000 001>```Of course, if we continue splitting moves, we will eventually get to a large number of potential states. However, we will never need to consider all 512 states in this example, since some states can never be reached by a sequence of moves. For instance, the state `|110 000 000>` represents a king on a1 *and* a king on b1. There's no way where we can start the board with one king on the board and suddenly wind up with two kings (unless we have noise, more on that later).**Exercise to the reader**: How many basis states are possible to include in a superposition if we allow the player to make as many normal/split moves as they want? How many superpositions are possible?**Summary**Since the space of possible quantum chess positions is *much* smaller than the space of possible quantum states, it is very tractable to simulate quantum chess if we have a custom simulator tuned to consider only relevant states. Quantum Chess in CirqNext, let's define each of these moves in cirq. We will define a function for each of three possible moves we are considering. Each of these will use make use of a python structure called [generators](https://wiki.python.org/moin/Generators) which allows us to 'yield' one or more operations. This will allow us to use these functions very flexibly while we construct circuits.In addition, we will define a function for placing a piece on the board. Cirq simulators initialize into the zero state (an empty board), so we will use an 'X' gate to turn on a qubit and place a piece on the board.import cirq def normal_move(s: cirq.Qid, t: cirq.Qid): """ A normal move in quantum chess. This function takes two qubits and returns a generator that performs a normal move that moves a piece from the source square to the target square. Args: s: source qubit (square where piece starts) t: target qubit (square to move piece to) """ yield cirq.ISWAP(s,t) def split_move(s: cirq.Qid, t1: cirq.Qid, t2: cirq.Qid): """ A Split move in quantum chess. This function takes three qubits and returns a generator that performs a split move from the source qubit to the two target qubits. Args: s: source qubit (square where piece starts) t1: target qubit (first square to move piece to) t2: target qubit (second square to move piece to) """ yield cirq.ISWAP(s,t1)**0.5 yield cirq.ISWAP(s,t2) def slide_move(s: cirq.Qid, t: cirq.Qid, p: cirq.Qid): """A Slide move in quantum chess. This function takes three qubits and returns a generator that performs a slide move. This will move a piece from the source to the target controlled by the path qubit. The move will only occur if the path qubit is turned on (i.e. path qubit should be one if the path is unblocked). Args: s: source qubit (square where piece starts) t: target qubit (square to move piece to) p: path qubit that determines whether move should occur """ yield cirq.ISWAP(s,t).controlled_by(p) def place_piece(s: cirq.Qid): """Place a piece on the board. This is not actually a move. However, since qubits in Cirq default to starting in the |0> state, we will need to activate the qubit by applying an X gate to initialize the position. """ yield cirq.X(s)Now that the moves are defined as gates in Cirq, let's now define the qubits. Since we haven't figured out how we will represent this on the hardware, we will use Cirq's concept of `NamedQubit` in order to define the squares as logical qubits without defining a device topology. This may feel unusual to some, but it can be compared to "textbook" circuits that have lines for abstract qubits that are unnamed or have simple symbolic names. We will first try to define our algorithm in the abstract case, then move towards a physical representation at the end of this colab.a1 = cirq.NamedQubit('a1') a2 = cirq.NamedQubit('a2') a3 = cirq.NamedQubit('a3') b1 = cirq.NamedQubit('b1') b2 = cirq.NamedQubit('b2') b3 = cirq.NamedQubit('b3') c1 = cirq.NamedQubit('c1') c2 = cirq.NamedQubit('c2') c3 = cirq.NamedQubit('c3') all_squares = [a1, a2, a3, b1, b2, b3, c1, c2, c3]Now that we have a 3x3 'board' of qubits, let's try to perform some moves.Let's try to create the circuit mentioned in our simulator example that places a king on the 'a1' square, then splits to 'b1' and 'b2', then further splits to 'c1' and 'c2' and then to 'b3' and 'c3'.king_moves = cirq.Circuit( place_piece(a1), split_move(a1, b1, b2), split_move(b1, c1, c2), split_move(b2, b3, c3), ) # Let's print out the circuit to see what it does print(king_moves) # Let's also simulate it using cirq's built-in simulator sim = cirq.Simulator() sim.simulate(king_moves)┌──────────────┐ ┌──────────────┐ a1: ───X───iSwap────────iSwap──────────────────────────────────────── │ │ b1: ───────iSwap^0.5────┼────iSwap─────────iSwap───────────────────── │ │ │ b2: ────────────────────iSwap┼─────────────┼────iSwap────────iSwap─── │ │ │ │ b3: ─────────────────────────┼─────────────┼────iSwap^0.5────┼─────── │ │ │ c1: ─────────────────────────iSwap^0.5─────┼─────────────────┼─────── │ │ c2: ───────────────────────────────────────iSwap─────────────┼─────── │ c3: ─────────────────────────────────────────────────────────iSwap─── └──────────────┘ └──────────────┘Fantastic! We can clearly see that the result from the simulator shows a superposition of 4 states! So far, so good. Let's try a slightly more complicated example involving two pieces and an exclusion move that forces a measurement. Let's start with a king on 'a1' and a queen on 'a3'. We will split the king onto 'b1' and 'b2' as before. Next, we will split move the king on 'b1' to 'c1' and 'c2'. Lastly, we will try to move the queen from 'a3' to 'b2'.![Queen trying to move into a king superposition](https://cirq.readthedocs.io/en/latest/_static/qc_blocked_king.png)This will trigger a condition known as "double occupancy". We are not allowed to have two pieces of different types in the same square. If the king is in 'b2', the queen cannot move there. If the king is not in 'b2', then the move is legal. Thus, we will need to do a measurement. Let's see how that works.exclusion_moves = cirq.Circuit( place_piece(a1), place_piece(a3), split_move(a1, b1, b2), split_move(b1, c1, c2), ) exclusion_move_circuit = exclusion_moves + cirq.measure(b2) # Let's print out the circuit to see what it does print('Exclusion move circuit:') print(exclusion_move_circuit) # This time, we have a measurement in the circuit. # Let's simulate it several times to see what happens. sim = cirq.Simulator() print('Measurement Results:') for t in range(8): print(f'--- Outcome #{t} ---') print(sim.simulate(exclusion_move_circuit))Exclusion move circuit: ┌──────────────┐ a1: ───X───iSwap────────iSwap───────────────────────── │ │ a3: ───X───┼────────────┼───────────────────────────── │ │ b1: ───────iSwap^0.5────┼────iSwap────────iSwap─────── │ │ │ b2: ────────────────────iSwap┼────────────┼───────M─── │ │ c1: ─────────────────────────iSwap^0.5────┼─────────── │ c2: ──────────────────────────────────────iSwap─────── └──────────────┘ Measurement Results: --- Outcome #0 --- measurements: b2=1 output vector: 1j|010100⟩ --- Outcome #1 --- measurements: b2=0 output vector: -0.707|010001⟩ - 0.707|010010⟩ --- Outcome #2 --- measurements: b2=1 output vector: 1j|010100⟩ --- Outcome #3 --- measurements: b2=0 output vector: -0.707|010001⟩ - 0.707|010010⟩ --- Outcome #4 --- measurements: b2=1 output vector: 1j|010100⟩ -[...]In this configuration, you should see two different results. If we measured the king to be at 'b2', then the position is resolved to a single basis state, equivalent to a classical position (the king is at 'b2' and the queen is at 'c3').If the 'b2' square is measured to be empty, then the board is still in a state of superposition. Handling Partial Measurement So far, we have been simulating this using `cirq.simulate()` which shows us the waveform of the system and its internal state. In a quantum device, we do not have this information. All we have is the measurement data that we get out of the device. Let's try both of these circuits again with the simulator, but denying ourselves access to the underlying wave function, solely using measurement results. First, we will need to add measurements to our previous circuits so that we can access statistical data.# Define a measurement operation that measures all the squares measure_all_squares = cirq.measure(*all_squares, key='all') # Measure all squares at the end of the circuit king_moves_measured = king_moves + measure_all_squares exclusion_move_measured = exclusion_moves + measure_all_squares print('Split moving the king:') print(king_moves_measured) results = sim.run(king_moves_measured, repetitions=1) print(results) print('Attempting an exclusion move:') results = sim.run(exclusion_move_measured, repetitions=1) print(results)Split moving the king: ┌──────────────┐ ┌──────────────┐ a1: ───X───iSwap────────iSwap────────────────────────────────────────M('all')─── │ │ │ a2: ───────┼────────────┼────────────────────────────────────────────M────────── │ │ │ a3: ───────┼────────────┼────────────────────────────────────────────M────────── │ │ │ b1: ───────iSwap^0.5────┼────iSwap─────────iSwap─────────────────────M────────── │ │ │ │ b2: ────────────────────iSwap┼─────────────┼────iSwap────────iSwap───M────────── │ │ │ │ │ b3: ─────────────────────────┼─────────────┼────iSwap^0.5────┼───────M────────── │ │ │ │ c1: ──[...]You'll notice that, if you run the above block multiple times, you will get different results.We will need a way to compile these into a statistical measure. For this, we will need to increase the number of repetitions, and then combine them into a histogram. Let's write a simple function to aggregate the results from the simulator (which are returned in a numpy array) into a more convenient form for us.def histogram_of_squares(results): """Creates a histogram of measurement results per square. Returns: Dict[str, int] where the key is the name of the square and the value is the number of times the square was measured in the one state. """ sampling_frequency = {} for idx, sq in enumerate(all_squares): sampling_frequency[str(sq)]=results.measurements['all'][:, idx].sum() return sampling_frequency # Let's test out this histogram on the two circuits we've examined so far: print('Split moving the king:') split_results = sim.run(king_moves_measured, repetitions=1000) print(histogram_of_squares(split_results)) print('Attempting an exclusion move:') exclusion_results = sim.run(exclusion_move_measured, repetitions=1000) print(histogram_of_squares(exclusion_results))Split moving the king: {'a1': 0, 'a2': 0, 'a3': 0, 'b1': 0, 'b2': 0, 'b3': 266, 'c1': 241, 'c2': 249, 'c3': 244} Attempting an exclusion move: {'a1': 0, 'a2': 0, 'a3': 1000, 'b1': 0, 'b2': 493, 'b3': 0, 'c1': 265, 'c2': 242, 'c3': 0}We can now see the frequencies of different possibilities. For our first situation, we can see the king has four possibilities with approximately the same amount of occurences. Due to statistical variations, the exact number of occurences in each of the four squares is not exactly the same.Our second exclusion move example illustrates the king's possible position on the three different squares, but we have not yet figured out how to implement the queen's exclusion and measurement with the king.How are we going to do that?First, we need to pick a sample that will become our official "measurement" for the board. We will freeze the seed for the random number generator so that everyone running this colab gets the same measurement and measure the square we care about (b2).exclusion_move_measure_b2 = exclusion_moves + cirq.measure(b2) this_sim = cirq.Simulator(seed=1234) measurement_for_move = this_sim.run(exclusion_move_measure_b2, repetitions=1) print(measurement_for_move)b2=0When we measure the square 'b2', it is empty. (The above should consistently print out 'b2=0'. If it doesn't, you can change your seed so it does). Thus, we can now move the queen.Ok, we are now ready to make the next move. How do we figure out the statistics of the board and be able to compute the (quantum) state of the board so that we can continue the game?Let's try to move the queen and see what happens:# Calculate the state of the board after moving the queen after_exclusion_move = cirq.Circuit( place_piece(a1), place_piece(a3), split_move(a1, b1, b2), split_move(b1, c1, c2), normal_move(a3, b2), ) + measure_all_squares # Compute statistics for the pieces. after_results = this_sim.run(after_exclusion_move, repetitions=1000) print(histogram_of_squares(after_results)){'a1': 0, 'a2': 0, 'a3': 520, 'b1': 0, 'b2': 1000, 'b3': 0, 'c1': 232, 'c2': 248, 'c3': 0}Uh oh. This is not correct. The queen now has 100% probability of being on 'b2', but there is now a king at 'a3' with approximately 50% probability.The problem is that we are including both the possibility that the king was at 'b2' as well as the possibility that it was empty. Instead of initializing the king to 'a1', we could initialize the king to 'b1' and just do one split move from 'b1' to 'c1' and 'c2' instead of performing all operations since the beginning of the game.This has two issues. First is that we are cheating somewhat by using the simplicity of this toy example to only include paths that are possible. In a more complicated game, with pieces and merges happening, it may be a lot more difficult to simplify the circuit in a way that replicates the results of a measurement. The second problem is that we have discarded information about the relative phases of the pieces. For instance, the different squares could have picked up phases of $i$, and we would not be able to tell by just looking at the measurement results. We may be destroying this information by simplifying the circuit.**Exercise to the reader**: Is the phase actually the same or different if we compute the shorter circuit?So then, how do we handle this? Post-selectionOne way of resolving this is to use the method of "post-selection". Both simulators and real hardware have the ability to repeat an experiment many times. We can use this ability to pick the samples that match our expectations of the results. In the example above, we have two independent possibilities for the measurement of the king. Half of results will have 'm0' (the king was on the square preventing the queen move) and half will have 'm1' result (the king was not there and the queen move was possible). In our game, we know that 'm0' is what happened, so we can run the simulation multiple times and only take the results that are consistent with 'm0'.This procedure is only needed since we need to run the simulation from the beginning of the board each time. If we could "save" the state vector from the last move, we would not need to do this. We could just apply the next move on the last state vector. While this is possible in a classical simulator, quantum states in a real device only stay coherent for mere microseconds and won't wait around while we ponder our next quantum chess move.A quantum algorithm that uses results of measurements to affect later steps is called a "feed-forward". We are going to use post-selection to transform the current feed-forward algorithm of quantum chess into an algorithm that has terminal measurements only.We will transform our first algorithm from:* Make some non-measurement moves.* Make a move that requires measurement and gets an outcome of 'm0'* Make a second set of non-measurement moves based on this outcome.Into the following new algorithm:* Make some non-measurement moves.* Make the exclusion move but don't measure yet.* Make the second set of moves.* Measure the relevant qubits. Discard results that do not match our expected result of 'm0'Note that we have to be careful when we do this. First, we have to track the qubits to perform the correct measurement. In our example above, the queen moved to the 'b2' square, regardless of whether the king was there. Executing that move swapped the qubits of 'b2' and 'a3'. So, when we measure the king's position at the end of the circuit, we need to measure its new position within the 'a3' qubit.Second, we need to ensure that the two outcomes are truly independent. If further moves begin interacting with our 'a3' square that holds the position of the king, then we may get incorrect results. To do this correctly, we may want to swap the king's state into an ancillary qubit so that it doesn't interact with further moves. This is left as an exercise to the reader.The following code shows how we might do this for the example circuit:# Perform all the moves, including the exclusion move. # Afterwards, measure all qubits. after_exclusion_move = cirq.Circuit( place_piece(a1), place_piece(a3), split_move(a1, b1, b2), split_move(b1, c1, c2), normal_move(a3, b2), ) + measure_all_squares # Define a function to post select on the result. def histogram_post_select(results, square_idx, value): """Return a histogram of measurement results with post-selection. This will return a dictionary of str to int, with the key being the name of the square and the value being the number of times the square was measuremed as one. These results will exclude any results where the square represented by `square_idx` were not equal to `value`. Args: results: TrialResults returned by a sampler. square_idx: integer index of the measured qubit to post-select on. value: whether the qubit post-selected should be one or zero. """ sampling_frequency = {} all_results = results.measurements['all'] post_selected_results = all_results[all_results[:,square_idx]==value] for idx, sq in enumerate(all_squares): sampling_frequency[str(sq)]=post_selected_results[:, idx].sum() return sampling_frequency # Compute statistics for the pieces. after_results = this_sim.run(after_exclusion_move, repetitions=1000) # Find the index of 'a3' (it should be at index 2) post_select_idx = all_squares.index(cirq.NamedQubit('a3')) # Print out the post-selected histogram print(histogram_post_select(after_results, post_select_idx, 0)){'a1': 0, 'a2': 0, 'a3': 0, 'b1': 0, 'b2': 516, 'b3': 0, 'c1': 227, 'c2': 289, 'c3': 0}We now have about half the number of results, but they only include results with the relevant measurement outcomes. We now have replicated the correct state of the board after the exclusion board. To show that we can use this state to continue the quantum chess game, let's make a final move from the king from c2 to c3. This represents the board state with king on 'a1', queen on 'a3' after the moves *Ka1^b1b2, Kb1^c1c2, Qa3b2.m1, Kc2c3*:after_exclusion_without_measurement = cirq.Circuit( place_piece(a1), place_piece(a3), split_move(a1, b1, b2), split_move(b1, c1, c2), normal_move(a3, b2), normal_move(c2, c3), ) after_exclusion_move = after_exclusion_without_measurement + measure_all_squares after_results = this_sim.run(after_exclusion_move, repetitions=1000) post_select_idx = all_squares.index(cirq.NamedQubit('a3')) print(histogram_post_select(after_results, post_select_idx, 0)){'a1': 0, 'a2': 0, 'a3': 0, 'b1': 0, 'b2': 480, 'b3': 0, 'c1': 236, 'c2': 0, 'c3': 244}We can now see that the measurements that were on 'c2' have now moved to 'c3'. This illustrates how we can continue a quantum chess move sequence that contains a partial measurement part-way through the sequence, even on a device that only supports final measurement! Noise So far, we have dealt with a simulator that is completely noiseless. Even so, we have already seen statistical fluctuations that affect our probabilities. Real NISQ devices will have a lot of noise that will affect calculations in every way possible.There are many components to noise. Some possible (overlapping) sources of noise include:* **T1 Decay / relaxation**: Qubits in the excited state $\vert 1\rangle$ could lose energy and fall into the ground state $\vert 0\rangle$. This is typically qualified by a metric called T1 that specifies the rate of exponential decay (similar to a "half-life" of radioactive decay).* **Dephasing error**: Qubits could be "spinning" in relation to each other. For instance, a qubit in $\frac{1}{\sqrt{2}} (\vert0\rangle + \vert 1\rangle)$ could become $\frac{1}{\sqrt{2}} (\vert0\rangle + i\vert 1\rangle)$. This is sometimes simplified to a measure called T2 or T2*. However, since dephasing errors can "spin" at many different rates and directions, a full noise spectrum is needed to fully characterize these types of errors.* **Readout/measurement error**: If we accidentally detect a $\vert 0\rangle$ as a $\vert 1\rangle$ or vice versa when performing a measurement, this is often classified as a readout error. * **Control/Coherent error**: The device can drift out of calibration or can be too difficult to control effectively. If everytime that we do a $\text{iSWAP}$ gate, we consistently do a $\text{iSWAP}^{0.98}$, this is a coherent error.* **Cross talk**: Effects that involve specific pairs of (usually nearby) qubits could occur that can create correlations or entanglements. These effects are often more difficult to model and fix.Luckily, we don't need an actual hardware device to simulate (some of these) noisy effects. We can simulate them using cirq's simulator.Let's run the simulator with the moves Ka1^b1b2, Kb1^c1c2, Kb2^b3c3 that we were examining before, but this time using one of cirq's noise models called a [depolarizing channel](https://en.wikipedia.org/wiki/Quantum_depolarizing_channel) that randomly scrambles a qubit with a set probability.import cirq.contrib.noise_models as ccn noise_model = ccn.DepolarizingNoiseModel(depol_prob=0.01) noisy=cirq.DensityMatrixSimulator(noise=noise_model) king_moves_measured = cirq.Circuit( place_piece(a1), split_move(a1, b1, b2), split_move(b1, c1, c2), split_move(b2, b3, c3), ) + measure_all_squares print(histogram_of_squares(noisy.run(king_moves_measured, repetitions=1000))){'a1': 26, 'a2': 29, 'a3': 31, 'b1': 32, 'b2': 31, 'b3': 238, 'c1': 282, 'c2': 255, 'c3': 268}These results look a lot messier than what we were looking at before. First, all 9 squares have some probability of the king being there, even though there is no possibility that the king can be there. That's a problem.Also, the results no longer add up to 1000. Uh oh. How is that even possible? Have we made a mistake?If noise flips some of the qubits to |1>, we can now measure a state that records the king in two places. For instance `|100 000 001>` is a state that could be achieved with a single incorrect bit flip and represents the board state with two kings: one on 'a1' and one on 'c3'. This will give us two entries in the histogram for a single repetition of the circuit, causing the total to exceed 1000.We know that there is no way to create pieces in chess, so some of these results are non-sensical. One way to do error mitigation is to detect these non-sensical events and remove them. Let's recode this to use "post-selection" to exclude measurements that have more than one king (or that have zero kings), which we know are impossible positions to reach in this situation.def remove_multi_kings(results): """Removes samples from a set of results that do not have one measurement. Returns a histogram dictionary as defined above that only has results that have one square with a measurement outcome of one and all other squares have measurement outcomes of zero. Results that do not fit this pattern are discarded. """ sampling_frequency = {} all_results = results.measurements['all'] # Select only rows where the row sums to one. post_selected_results = all_results[all_results[:,:].sum(axis=1)==1] for idx, sq in enumerate(all_squares): sampling_frequency[str(sq)]=post_selected_results[:, idx].sum() return sampling_frequency results = noisy.run(king_moves_measured, repetitions=1000) print(remove_multi_kings(results)){'a1': 0, 'a2': 1, 'a3': 1, 'b1': 0, 'b2': 0, 'b3': 197, 'c1': 169, 'c2': 189, 'c3': 187}This data is much better! We have not eliminated all the noise, but, by taking into account the constraints and invariants of the problem we are trying to solve, we have eliminated much of the noise!Next, let's look at another source of noise. Coherent noiseLet's say that our device consistently performs incorrect operations, but that we do not know the exact nature of these errors. Is there a way to correct for these types of problems?Let's suppose that, instead of doing an iSWAP, our device does part of an iSWAP, but that amount differs from day to day and we don't know what the percentage of an iswap is going to be. One day it could be 50% of an iSWAP, and the next day it could be 80% of an iSWAP. We don't know what that percentage is, but we know that it will stay fairly stable throughout our calculation.How would that affect our results?Let's define a function called "lazy_move" that represents this partial iSWAP. Then, let's march the king from a1 to a2 to a3 to b3 to c3 using this under-rotated iSWAP and see what happens.import random # Define our secret under-rotation parameter once across multiple executions. try: _secret_lazy_value except: _secret_lazy_value = (random.random() + 2.0) / 4.0 def lazy_move(source, target): return cirq.ISWAP(source, target) ** _secret_lazy_value king_maybe_moves = cirq.Circuit( place_piece(a1), lazy_move(a1,a2), lazy_move(a2,a3), lazy_move(a3,b3), lazy_move(b3,c3) ) + measure_all_squares print(histogram_of_squares(sim.run(king_maybe_moves, repetitions=1000))){'a1': 204, 'a2': 167, 'a3': 132, 'b1': 0, 'b2': 0, 'b3': 79, 'c1': 0, 'c2': 0, 'c3': 418}As we run this circuit, we see that the king leaks across the path it takes and only some of the probability makes it to the final destination. However, this probability remains roughly the same across multiple invocations. Can we correct or "calibrate" this broken gate so that it is fixed for our use case?Let's suppose our device supports a rotation angle for our iSWAP gate. Can we over-rotate the gate on purpose to counter-act the device's error?Let's try it.over_rotation_amount = 1.46 king_probably_moves = cirq.Circuit( place_piece(a1), lazy_move(a1,a2) ** over_rotation_amount, lazy_move(a2,a3) ** over_rotation_amount, lazy_move(a3,b3) ** over_rotation_amount, lazy_move(b3,c3) ** over_rotation_amount ) + measure_all_squares print(histogram_of_squares(sim.run(king_probably_moves, repetitions=1000))){'a1': 2, 'a2': 2, 'a3': 1, 'b1': 0, 'b2': 0, 'b3': 2, 'c1': 0, 'c2': 0, 'c3': 993}This looks better, but still not perfect.In a real example, you may need to tune the operations slightly automaticallyto correct for coherent error. This is vastly simplified example of what is being performed when a quantum device is being calibrated. The device operator is attempting to perfect the physical operations (such as microwave pulses) to match the intended logical operation (such as iSWAP). Even as a user of a calibrated device, sometimes procedures like this are needed since the operations may not be uniform across the entire processor, and they may drift over time.Let's write a routine to try to optimize ourover rotation constant in order to automatically tune out the under-rotation. In this example, we will do a linear sweep using a parameter value to find the best value. In a real example, you may want to use an gradient optimizer such as scipy.import sympy # Our calibration will be to perform one swap # We will then measure how many times the king actually gets there. king_calibration = cirq.Circuit( place_piece(a1), lazy_move(a1,a2) ** sympy.Symbol('r'), cirq.measure(a1, a2, key='all') ) # We will try all rotation values from 1.0 to 3.0 at intervals of 0.01 rotation_parameter = cirq.Linspace('r', 1.0, 3.0, 200) results = sim.run_sweep(king_calibration, params=rotation_parameter, repetitions=1000) # Then we will keep track of the best value we found so far. best_over_rotation = 1.0 most_correct_swaps = 0 for result in results: num_correct_swaps = result.measurements['all'][:,1].sum() if num_correct_swaps > most_correct_swaps: most_correct_swaps = num_correct_swaps best_over_rotation = result.params['r'] print(f'Best over rotation value found: {best_over_rotation}') print(f'Implied secret swap value: {1/best_over_rotation}') print(f'Actual secret swap value: {_secret_lazy_value}')Best over rotation value found: 1.3819095477386936 Implied secret swap value: 0.7236363636363636 Actual secret swap value: 0.7108183208156281Our calibration found a rotation value that corrects for the under-rotation.**Exercise to the reader**: Is there a way to get closer to the correct result? Hint: it may cause the simulation to take longer to run. There are many other types of noise, but it is inevitable that you will need to think about noise as you develop your own applications and mitigate the error to the largest extent possible if you want your application to be successful. DecompositionMost devices in the NISQ era will only support certain gate types, often referred to as a gate set.Let's suppose that our device only supports one two-qubit gate, the $\sqrt{\text{iSWAP}}$ gate `cirq.ISWAP ** 0.5`.How does that affect our quantum chess circuits?Luckily, most of the moves use either $\sqrt{\text{iSWAP}}$ gates or $\text{iSWAP}$ gates (which are merely two $\sqrt{\text{iSWAP}}$ gates in a row).However, the slide move (which we have ignored so far), is a $\text{controlled-iSWAP}$ gate. How will we perform this on hardware?We will need to transform it into gates we understand. This is often called "decomposing" the gate.First, let's explore some of cirq's capability in this area. Cirq has some decomposition routines for known gates. Let's see what happens when we decompose the Hadamard $\text{H}$ gate and the two-qubit $\text{CNOT}$ gate.import cirq print('Decomposing H gate') decomposed_h = cirq.decompose_once(cirq.H(a1)) print(decomposed_h) print(cirq.Circuit(decomposed_h)) print() print('Decomposing CNOT gate') decomposed_cnot = cirq.decompose_once(cirq.CNOT(a1, a2)) print(decomposed_cnot) print(cirq.Circuit(decomposed_cnot)) print()Decomposing H gate [(cirq.Y**0.5).on(cirq.NamedQubit('a1')), cirq.XPowGate(exponent=1.0, global_shift=-0.25).on(cirq.NamedQubit('a1'))] a1: ───Y^0.5───X─── Decomposing CNOT gate [(cirq.Y**-0.5).on(cirq.NamedQubit('a2')), cirq.CZ(cirq.NamedQubit('a1'), cirq.NamedQubit('a2')), (cirq.Y**0.5).on(cirq.NamedQubit('a2'))] a1: ────────────@─────────── │ a2: ───Y^-0.5───@───Y^0.5───cirq decomposes an H into a Y rotation and then a X rotation. It decomposes a $\text{CNOT}$ into a $\text{CZ}$ gate with some single-qubit gates. Neat, but it doesn't help us get closer to our goal of decomposing to a $\sqrt{\text{iSWAP}}$.Let's see what happens when we decompose the $\text{controlled-iSWAP}$ gate.cirq.decompose_once(cirq.ISWAP(a1,a3).controlled_by(a2))Oh, yikes. cirq is able to handle decomposing this, but it uses the decomposition of iSWAP with the control bit attached to all gates. The result is an expansion into several three qubit gates and two qubit gates. While correct, this looks to be going the wrong direction in terms of circuit complexity.We will need a more powerful tool. Let's use the concept of an optimizer. This is a cirq tool that can run through a circuit, making changes as it goes along. It can be used for compiling to a specific gate set, compressing circuits into less space, or other useful transformations.Luckily, one already exists for compiling to $\sqrt{\text{iSWAP}}$. `cirq.google.optimized_for_sycamore`. Let's first demonstrate this with the CNOT gate:print(cirq.google.optimized_for_sycamore(cirq.Circuit(cirq.CNOT(a1, a2))))a1: ───PhXZ(a=-0.5,x=0.5,z=0.5)───iSwap────────PhXZ(a=-1,x=1,z=0)───iSwap───────PhXZ(a=0,x=0.5,z=0)──────── │ │ a2: ───PhXZ(a=-1,x=0,z=0.5)───────iSwap^-0.5────────────────────────iSwap^0.5───PhXZ(a=0.5,x=0.5,z=-0.5)───Now we can see that, instead of decomposing into a CZ gate, it decomposes the CNOT into $\sqrt{\text{iSWAP}}$ gates in addition to some one-qubit phased XZ gates.Let's try it with the more complicated case of a **controlled-iSWAP**:c_iswap = cirq.Circuit(cirq.ISWAP(a1,a3).controlled_by(a2)) try: cirq.google.optimized_for_sycamore(c_iswap, optimizer_type='sqrt_iswap') except Exception as e: print('An error occured while attempting to optimize: ') print(e)An error occured while attempting to optimize: Don't know how to work with CY**-0.5(a2, a3). It isn't a native sqrt ISWAP operation, a 1 or 2 qubit gate with a known unitary, or composite.Yuck. It looks like it gets stuck. We will need to help it out a bit to get it into a form that it can understand.One common transformation that might help is that a controlled-SWAP (often called a Fredkin gate) does have a known transformation into a Toffoli (another three qubit gate that is a controlled-CNOT) and two CNOT gates. Let's double-check that.import numpy as np c_swap = cirq.Circuit(cirq.SWAP(a1, a3).controlled_by(a2)) c_swap_decomposed = cirq.Circuit(cirq.CNOT(a1, a3), cirq.TOFFOLI(a2, a3, a1), cirq.CNOT(a1, a3)) c_swap_unitary = cirq.unitary(c_swap) c_swap_decomposed_unitary = cirq.unitary(c_swap_decomposed) print('Unitary of controlled SWAP') print(c_swap_unitary) print('Unitary of decomposed circuit') print(c_swap_decomposed_unitary) print('Are they equal?') print(np.isclose(c_swap_unitary, c_swap_decomposed_unitary).all())Unitary of controlled SWAP [[1.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j] [0.+0.j 1.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j] [0.+0.j 0.+0.j 1.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j] [0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 1.+0.j 0.+0.j] [0.+0.j 0.+0.j 0.+0.j 0.+0.j 1.+0.j 0.+0.j 0.+0.j 0.+0.j] [0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 1.+0.j 0.+0.j 0.+0.j] [0.+0.j 0.+0.j 0.+0.j 1.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j] [0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 1.+0.j]] Unitary of decomposed circuit [[1.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j] [0.+0.j 1.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j] [0.+0.j 0.+0.j 1.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j] [0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 1.+0.j 0.+0.j] [0.+0.j 0.+0.j 0.+0.j 0.+0.j 1.+0.j 0.+0.j 0.+0.j 0.+0.j] [0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 1.+0.j 0.+0.j 0.+0.j] [0.+0.j 0.+0.j 0.+0.j 1.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j] [0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 1.+0.j[...]With some leaps of intuition or mathematics, one might try to add an operation to the circuit to rotate the swap slightly to get an **iSWAP**.Let's add a CZ ** 0.5 gate in the middle to try to change this from a **SWAP** to an **iSWAP**.import numpy as np c_iswap = cirq.Circuit(cirq.ISWAP(a3, a2).controlled_by(a1)) c_iswap_decomposed = cirq.Circuit(cirq.CNOT(a3, a2), cirq.TOFFOLI(a1, a2, a3), cirq.CZ(a1, a2) ** 0.5, cirq.CNOT(a3, a2)) c_iswap_unitary = cirq.unitary(c_iswap) c_iswap_decomposed_unitary = cirq.unitary(c_iswap_decomposed) print('Unitary of controlled iSWAP') print(c_iswap_unitary) print('Unitary of decomposed circuit') print(c_iswap_decomposed_unitary) print('Are they equal?') print(np.isclose(c_iswap_unitary, c_iswap_decomposed_unitary).all())Unitary of controlled iSWAP [[1.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j] [0.+0.j 1.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j] [0.+0.j 0.+0.j 1.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j] [0.+0.j 0.+0.j 0.+0.j 1.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j] [0.+0.j 0.+0.j 0.+0.j 0.+0.j 1.+0.j 0.+0.j 0.+0.j 0.+0.j] [0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+1.j 0.+0.j] [0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+1.j 0.+0.j 0.+0.j] [0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 1.+0.j]] Unitary of decomposed circuit [[1.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j] [0.+0.j 1.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j] [0.+0.j 0.+0.j 1.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j] [0.+0.j 0.+0.j 0.+0.j 1.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j] [0.+0.j 0.+0.j 0.+0.j 0.+0.j 1.+0.j 0.+0.j 0.+0.j 0.+0.j] [0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+1.j 0.+0.j] [0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+1.j 0.+0.j 0.+0.j] [0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 1.+0.[...]This is great. Let's see if we can decompose this circuit using the built-in operations from cirq.decomposed_circuit = cirq.google.optimized_for_sycamore(c_iswap_decomposed, optimizer_type='sqrt_iswap') print(f'Circuit: {len(decomposed_circuit)} moments') print(decomposed_circuit)Circuit: 45 moments a1: ───PhXZ(a=-0.75,x=0.5,z=0.75)─────────────────────────────────────────────────────────────────────────────iSwap────────PhXZ(a=-1,x=1,z=0)───iSwap───────PhXZ(a=-0.5,x=0.5,z=-1)──────────────────────────────────────────────────────────────────────────────────iSwap────────PhXZ(a=-1,x=1,z=0)───iSwap───────PhXZ(a=-0.5,x=0.5,z=-1)─────────────────────────────────────────────────────────────────────────────iSwap────────PhXZ(a=-1,x=1,z=0)───iSwap───────PhXZ(a=-0.5,x=0.5,z=-1)─────────────────────────────────────────────────────────────────────────────iSwap────────PhXZ(a=-1,x=1,z=0)───iSwap───────PhXZ(a=-0.0991,x=0.595,z=0.364)──────────────────────────────────────────────────────────────────────────iSwap────────PhXZ(a=-1,x=0.364,z=0)───iSwap───────PhXZ(a=0,x=0.136,z=0)──────────────────────────────────────────────────────────────────────────── │ [...]So, a bit of good news and a bit of bad news. We are able to create a circuit that can run on hardware that only supports $\sqrt{\text{iSWAP}}$ and single-qubit gates, but the circuit is extremely long (45 moments).More advanced techniques and intuition can lead to better decompositions, but that is out-of-scope of this tutorial. We'll leave that as a (quite advanced) exercise for the reader. Qubit layout So far, we have been dealing with NamedQubits, since they match very logically to the squares of the quantum chess board. However, this is not how real devices work. We will need to map these abstract qubits to real qubits on a device. Let's consider a Google Sycamore device with 54 qubits arranged in a grid layout:print(cirq.google.Sycamore)(0, 5)───(0, 6) │ │ │ │ (1, 4)───(1, 5)───(1, 6)───(1, 7) │ │ │ │ │ │ │ │ (2, 3)───(2, 4)───(2, 5)───(2, 6)───(2, 7)───(2, 8) │ │ │ │ │ │ │ │ │ │ │ │ (3, 2)───(3, 3)───(3, 4)───(3, 5)───(3, 6)───(3, 7)───(3, 8)───(3, 9) │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ (4, 1)───(4, 2)───(4, 3)───(4, 4)───(4, 5)───(4, 6)───(4, 7)───(4, 8)───(4, 9) │ │ │ │ │ │ [...]We will need to map our algorithm onto real qubits. Let's examine the circuit for our exclusion queen move sequence again and look at the constraints.print(after_exclusion_without_measurement)┌──────────────┐ ┌──────────┐ a1: ───X───iSwap────────iSwap──────────────────────────────────── │ │ a3: ───X───┼────────────┼───────────────────────iSwap──────────── │ │ │ b1: ───────iSwap^0.5────┼────iSwap─────────iSwap┼──────────────── │ │ │ │ b2: ────────────────────iSwap┼─────────────┼────iSwap──────────── │ │ c1: ─────────────────────────iSwap^0.5─────┼───────────────────── │ c2: ───────────────────────────────────────iSwap─────────iSwap─── │ c3: ─────────────────────────────────────────────────────iSwap─── └──────────────┘ └──────────┘First, one note is that this circuit uses iSWAP gates. However, the sycamore device actually only supports $\sqrt{\text{iSWAP}}$ gates. Luckily, an iSWAP is just two $\sqrt{\text{iSWAP}}$ gates in a row. We will use cirq's concept of an optimzer in order to accomplish this.class IswapDecomposer(cirq.PointOptimizer): """Optimizer that decomposes iSWAPs into iSWAP ** 0.5 operations. """ def optimization_at(self, circuit: cirq.Circuit, index: int, op: cirq.Operation ) -> Optional[cirq.PointOptimizationSummary]: if op.gate == cirq.ISWAP: # If this operation is an iSWAP, transform new_ops = [cirq.ISWAP(*op.qubits) ** 0.5, cirq.ISWAP(*op.qubits) ** 0.5] # Clear_span = 1 signifies that only this op should be replaced. # clear_qubits replaces only operations with these qubits. # new_operations return cirq.PointOptimizationSummary(clear_span=1, clear_qubits=op.qubits, new_operations=new_ops) # Exercise to the reader: # Can you add an additional condition to this function to replace # controlled-iSWAP operations with the decomposition we found above? exclusion_with_sqrt_iswaps = after_exclusion_without_measurement.copy() IswapDecomposer().optimize_circuit(exclusion_with_sqrt_iswaps) print(exclusion_with_sqrt_iswaps)┌──────────────────┐ ┌──────────────────┐ ┌──────────────────┐ a1: ───X───iSwap─────────────────iSwap────────iSwap───────────────────────────────────────────────────────────────────────────── │ │ │ a3: ───X───┼─────────────────────┼────────────┼─────────────────────iSwap──────────────────iSwap──────────────────────────────── │ │ │ │ │ b1: ───────iSwap^0.5────iSwap────┼────────────┼────────────iSwap────┼─────────────iSwap────┼──────────────────────────────────── │ │ │ │ │ │ │ b2: ────────────────────┼────────iSwap^0.5────iSwap^0.5────┼────────iSwap^0.5─────┼────────iSwap^0.5──────────────────────────── │ │ │ c1: ────────────────────iSwap^0.5──────────────────────────┼────[...]We now have the circuit with only gates that are supported by the hardware. Let's now focus our attention on mapping our logical qubits onto the Grid.By visualising the circuit in a diagram, it becomes obvious that there are several adjacency requirements, if we can only perform two-qubit operations that are next to each other:* a1 and b1* a1 and b2* b1 and c1* b1 and c2* b2 and a3* c2 and c3We can condense this somewhat into two chains, with only b1 in common:* a3 - b2 - a1 - b1 - c2 - c3* b1 - c1There are many ways to lay this out on a grid, but let's pick a central qubit (3,5) for b1 and lay out the qubits in a pattern that goes outward in threedirections from there.* a3 (6,5) - b2 (5,5) - a1 (4,5) - b1 (3,5) - c2 (2,5) - c3 (1,5)* b1 (3,5) => c1 (3,6)Great. Note that, for smaller devices or larger circuits, this may be a more complicated bending path. **Exercise to the reader:** What do we do if we cannot force the circuit into a grid? (Hint: can you make use of a SWAP gate?)Let's put together a simple dictionary and translate this to a circuit that can run on grid qubits. Note that we will need to do one other task as well: **iSWAP** is not a native operation to Sycamore, so we will need to change it into 2`ISWAP ** 0.5` gates. We will use the concept of a `Device`. This is a cirq construct that can verify that our operations are valid for the device. (For instance, try to change one of the qubits below to a qubit not on the device or not adjacent to its required neighbors).qubit_translation = { cirq.NamedQubit('a3') : cirq.GridQubit(6, 5), cirq.NamedQubit('b2') : cirq.GridQubit(5, 5), cirq.NamedQubit('a1') : cirq.GridQubit(4, 5), cirq.NamedQubit('b1') : cirq.GridQubit(3, 5), cirq.NamedQubit('c2') : cirq.GridQubit(2, 5), cirq.NamedQubit('c3') : cirq.GridQubit(1, 5), cirq.NamedQubit('c1') : cirq.GridQubit(3, 6), # Not used except for measurement cirq.NamedQubit('a2') : cirq.GridQubit(2, 4), cirq.NamedQubit('b3') : cirq.GridQubit(3, 4), } def qubit_transformer(namedQubit: cirq.Qid) -> cirq.Qid: """Transform a namedQubit into a GridQubit using the mapping.""" return qubit_translation[namedQubit] # First, modify the circuit so that ISWAP's become two ISWAP ** 0.5 exclusion_move_sycamore = exclusion_with_sqrt_iswaps.with_device( new_device=cirq.google.Sycamore, qubit_mapping=qubit_transformer) print(exclusion_move_sycamore)(1, 5): ───────────────────────────────────────────────────────────────────iSwap───────iSwap─────── │ │ (2, 5): ───────────────────────────────────────────iSwap───────iSwap───────iSwap^0.5───iSwap^0.5─── │ │ (3, 5): ───────iSwap───────iSwap───────────────────iSwap^0.5───iSwap^0.5─────────────────────────── │ │ (3, 6): ───────┼───────────iSwap^0.5─────────────────────────────────────────────────────────────── │ (4, 5): ───X───iSwap^0.5───iSwap───────iSwap─────────────────────────────────────────────────────── │ │ (5, 5): ───────────────────iSwap^0.5───iSwap^0.5───iSwap───────iSwap─────────────────────────────── │ │ (6, 5): ───X───────────────────────────────────────iSwap^0.5───iSwap^0.5──────────────────────[...]This new diagram shows how we can map the logical qubits representing 'squares' into a circuit with physical grid qubits that only uses adjacent qubits. This circuit could then be run on a hardware device!**Exercise to the reader:** Can you write the code to execute this circuit (in the simulator) and do the reverse translation back to logical named qubits to interpret the results as chess positions? Dynamic Qubit Mapping The first time, we mapped the qubits from `NamedQubit`s to `GridQubit`s by hand. Can we map qubits automatically without this hand-tuned mapping?We will use a depth-first search to try to automatically map the qubits. The following code snippet is a bit longer, but it will attempt to map a qubit, followed by mapping the qubits it is connected to. For example, we will first map the square 'b1' to `GridQubit` (3, 5). We then see that 'a1', 'c1', and 'c2' need to be adjacent to 'b1', so we then map them (and their adjacent squares, etc) next. We repeat this until we finish mapping or until we get ourselves into an impossible situation.def map_helper(cur_node: cirq.Qid, mapping: Dict[cirq.Qid, cirq.GridQubit], available_qubits: Set[cirq.GridQubit], graph: Dict[cirq.Qid, Iterable[cirq.Qid]]) -> bool: """Helper function to construct mapping. Traverses a graph and performs recursive depth-first search to construct a mapping one node at a time. On failure, raises an error and back-tracks until a suitable mapping can be found. Assumes all qubits in the graph are connected. Args: cur_node: node to examine. mapping: current mapping of named qubits to `GridQubits` available_qubits: current set of unassigned qubits graph: adjacency graph of connections between qubits, representing by a dictionary from qubit to adjacent qubits Returns: True if it was possible to map the qubits, False if not. """ # cur_node is the named qubit # cur_qubit is the currently assigned GridQubit cur_qubit = mapping[cur_node] # Determine the list of adjacent nodes that still need to be mapped nodes_to_map = [] for node in graph[cur_node]: if node not in mapping: # Unmapped node. nodes_to_map.append(node) else: # Mapped adjacent node. # Verify that the previous mapping is adjacent in the Grid. if not mapping[node].is_adjacent(cur_qubit): # Nodes previously mapped are not adjacent # This is an invalid mapping return False if not nodes_to_map: # All done with this node. return True # Find qubits that are adjacent in the grid valid_adjacent_qubits = [] for a in [(0, 1), (0, -1), (1, 0), (-1, 0)]: q = cur_qubit + a if q in available_qubits: valid_adjacent_qubits.append(q) # Not enough adjacent qubits to map all qubits if len(valid_adjacent_qubits) < len(nodes_to_map): return False # Only map one qubit at a time # This makes back-tracking easier. node_to_map = nodes_to_map[0] for node_to_try in valid_adjacent_qubits: # Add proposed qubit to the mapping # and remove it from available qubits mapping[node_to_map] = node_to_try available_qubits.remove(node_to_try) # Recurse # Move on to the qubit we just mapped. # Then, come back to this node and # map the rest of the adjacent nodes if not map_helper(node_to_map, mapping, available_qubits, graph): del mapping[node_to_map] available_qubits.add(node_to_try) continue if not map_helper(cur_node, mapping, available_qubits, graph): del mapping[node_to_map] available_qubits.add(node_to_try) continue # We have succeeded in mapping the qubits! return True # All available qubits were not valid. # Fail upwards and back-track if possible. return False def qubit_mapping(circuit: cirq.Circuit, device: cirq.Device, start_qubit: cirq.Qid, mapped_qubit: cirq.Qid) -> Dict[cirq.Qid, cirq.GridQubit]: """Create a mapping from NamedQubits to Grid Qubits This function analyzes the circuit to determine which qubits need to be adjacent, then maps to the grid of the device based on the generated mapping. """ # Build up an adjacency graph based on the circuits. # Two qubit gates will turn into edges in the graph g = {} for m in circuit: for op in m: if len(op.qubits) == 2: q1, q2 = op.qubits if q1 not in g: g[q1] = [] if q2 not in g: g[q2] = [] if q2 not in g[q1]: g[q1].append(q2) if q1 not in g[q2]: g[q2].append(q1) for q in g: if len(g[q]) > 4: raise ValueError( f'Qubit {q} needs more than 4 adjacent qubits!') # Initialize mappings and available qubits start_list = set(device.qubits) start_list.remove(mapped_qubit) mapping = {} mapping[start_qubit] = mapped_qubit map_helper(start_qubit, mapping, start_list, g) if len(mapping) != len(g): print('Warning: could not map all qubits!') return mapping dynamic_mapping = qubit_mapping(exclusion_with_sqrt_iswaps, cirq.google.Sycamore, cirq.NamedQubit('b1'), cirq.GridQubit(3, 5)) for q in dynamic_mapping: print(f'Qubit {q} maps to {dynamic_mapping[q]}') dynamically_mapped_circuit = exclusion_with_sqrt_iswaps.with_device( new_device=cirq.google.Sycamore, qubit_mapping=lambda q: dynamic_mapping[q]) print(dynamically_mapped_circuit)Qubit b1 maps to (3, 5) Qubit a1 maps to (3, 6) Qubit b2 maps to (3, 7) Qubit a3 maps to (3, 8) Qubit c1 maps to (3, 4) Qubit c2 maps to (4, 5) Qubit c3 maps to (4, 6) ┌──────────────────┐ ┌──────────────────┐ (3, 4): ───────────────────iSwap───────────────────────────────────────────────────────────────────────────────────────── │ (3, 5): ───────iSwap───────iSwap^0.5────────────────iSwap──────────────────iSwap───────────────────────────────────────── │ │ │ (3, 6): ───X───iSwap^0.5───iSwap───────iSwap────────┼──────────────────────┼───────────────────────────────────────────── │ │ │ │ (3, 7): ───────────────────iSwap^0.5───iSwap^0.5────┼────────iSwap─────────┼────────iSwap──────────────────────────────── │ │ [...]Homework 3 1. Implement L1 norm regularization as a custom loss functionimport torch def lasso_reg(params, l1_lambda): l1_penalty = torch.nn.L1Loss(size_average=False) reg_loss = 0 for param in params: reg_loss += l1_penalty(param) loss += l1_lambda * reg_loss return loss2. The third-to-last paragraph in the notebook is concerning early stopping, an "old" regularization technique which involves the stopping of training earlier than the number of epochs would suggest. Read the paragraph and download the paper from Prechelt et al. a. Implement early stopping in the $E_{opt}$ specification In the paper, the value $E_{opt}$ is defned to be the lowest validation set error obtained in epochs up to $t$: $$E_{opt}(t) = \min_{t \le t'} E_{va}(t')$$ where $E_{va}$ is the validation error, i.e. the corresponding error on the validation set. As per instructions, I'm going to use the test data as validation.# import in Colab import sys sys.path.append('/content/mnist.py') sys.path.append('/content/train_utils.py') import mnist from train_utils import accuracy, AverageMeter from torch import nn class MLP(nn.Module): def __init__(self): super().__init__() self.flat = nn.Flatten() self.h1 = nn.Linear(28*28, 16) self.h2 = nn.Linear(16, 32) self.h3 = nn.Linear(32, 24) self.out = nn.Linear(24, 10) def forward(self, X, activ_hidden=nn.functional.relu): out = self.flat(X) out = activ_hidden(self.h1(out)) out = activ_hidden(self.h2(out)) out = activ_hidden(self.h3(out)) out = self.out(out) return out def train_epoch(model, dataloader, loss_fn, optimizer, loss_meter, performance_meter, performance): for X, y in dataloader: optimizer.zero_grad() y_hat = model(X) loss = loss_fn(y_hat, y) loss.backward() optimizer.step() acc = performance(y_hat, y) loss_meter.update(val=loss.item(), n=X.shape[0]) performance_meter.update(val=acc, n=X.shape[0]) def train_model(model, dataloader1, dataloader2, loss_fn, optimizer, num_epochs, performance=accuracy): model.train() E = { "epoch": [],"training perf": [], "validation perf": [], "parameters": [], "optimizer": [] } for epoch in range(num_epochs): loss_meter = AverageMeter() performance_meter = AverageMeter() train_epoch(model, dataloader1, loss_fn, optimizer, loss_meter, performance_meter, performance) fin_loss, fin_perf = test_model(model, dataloader2, loss_fn=loss_fn) E["epoch"].append(epoch) E["training perf"].append(performance_meter) E["validation perf"].append(fin_perf) E["parameters"].append(model.state_dict()) E["optimizer"].append(optimizer.state_dict()) return loss_meter.sum, performance_meter.avg, E def test_model(model, dataloader, performance=accuracy, loss_fn=None): # create an AverageMeter for the loss if passed if loss_fn is not None: loss_meter = AverageMeter() performance_meter = AverageMeter() model.eval() with torch.no_grad(): for X, y in dataloader: y_hat = model(X) loss = loss_fn(y_hat, y) if loss_fn is not None else None acc = performance(y_hat, y) if loss_fn is not None: loss_meter.update(loss.item(), X.shape[0]) performance_meter.update(acc, X.shape[0]) # get final performances fin_loss = loss_meter.sum if loss_fn is not None else None fin_perf = performance_meter.avg return fin_loss, fin_perf minibatch_size_train = 256 minibatch_size_test = 512 trainloader, testloader, trainset, testset = mnist.get_data(batch_size_train=minibatch_size_test, batch_size_test=minibatch_size_test) learn_rate = 0.1 num_epochs = 30 model = MLP() loss_fn = nn.CrossEntropyLoss() optimizer = torch.optim.SGD(model.parameters(), lr=learn_rate) train_loss, train_acc, E = train_model(model, trainloader, testloader, loss_fn, optimizer, num_epochs)Since `Validation_error = 1 - Validation_performance`, minimizing the error is equivalent to maximizing the performance.from matplotlib import pyplot as plt val_list = list(E["validation perf"]) maxval = max(E["validation perf"]) index = val_list.index(max(val_list)) + 1 plt.plot(E["epoch"], E["validation perf"] ) print(f"The best validation performance is {maxval}, obtained at epoch no. {index} out of {num_epochs}.")The best validation performance is 0.9701166666666666, obtained at epoch no. 29 out of 30.b$^*$. Implement early stopping in one of the additional specifications A stopping criterion described in the paper is based on the *generalization loss*: $$ GL (t) = 100 * \big( \frac{E_{va}(t)}{E_{opt}(t)} -1 \big)$$ that is, the validation error over the minimum so far in percent. We should stop as soon as this value exceeds a certain threshold $\alpha$.As reported in the paper, this criterion is used to maximize the probability to find a good solution, as opposed to maximizing the average quality of the solutions.alpha = 1 E_opt = 1 - val_list[0] for i in range(num_epochs): E_va = 1 - val_list[i] if E_va < E_opt: E_opt = E_va GL = 100 * (E_va/E_opt - 1) if GL > alpha: print(f"This stopping criterion halts the computation at epoch {i+1}") breakThis stopping criterion halts the computation at epoch 6As we can see, this criterion stops very early, at the first epoch with lower performance. A solution is to add momentum to SGD to minimize oscillations:optimizer = torch.optim.SGD(model.parameters(), lr=learn_rate, momentum=0.9) num_epochs = 15 train_loss_m, train_acc_m, E_m = train_model(model, trainloader, testloader, loss_fn, optimizer, num_epochs) from matplotlib import pyplot as plt val_list = list(E_m["validation perf"]) maxval = max(E_m["validation perf"]) index = val_list.index(max(val_list)) + 1 plt.plot(E_m["epoch"], E_m["validation perf"] ) print(f"The best validation performance is {maxval}, obtained at epoch no. {index} out of {num_epochs}.") alpha = 2 E_opt = 1 - val_list[0] for i in range(num_epochs): E_va = 1 - val_list[i] if E_va < E_opt: E_opt = E_va GL = 100 * (E_va/E_opt - 1) if GL > alpha: print(f"This stopping criterion halts the computation at epoch {i+1}") breakThis stopping criterion halts the computation at epoch 4View the docs#@title Licensed under the Apache License, Version 2.0 (the "License") # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License.FlatMaplocalStorage.setItem('language', 'language-py') Pydoc Applies a simple 1-to-many mapping function over each element in the collection.The many elements are flattened into the resulting collection. SetupTo run a code cell, you can click the **Run cell** button at the top left of the cell,or select it and press **`Shift+Enter`**.Try modifying a code cell and re-running it to see what happens.> To learn more about Colab, see> [Welcome to Colaboratory!](https://colab.sandbox.google.com/notebooks/welcome.ipynb).First, let's install the `apache-beam` module.!pip install --quiet -U apache-beamExamplesIn the following examples, we create a pipeline with a `PCollection` of produce with their icon, name, and duration.Then, we apply `FlatMap` in multiple ways to yield zero or more elements per each input element into the resulting `PCollection`.`FlatMap` accepts a function that returns an `iterable`,where each of the output `iterable`'s elements is an element of the resulting `PCollection`. Example 1: FlatMap with a predefined functionWe use the function `str.split` which takes a single `str` element and outputs a `list` of `str`s.This pipeline splits the input element using whitespaces, creating a list of zero or more elements.import apache_beam as beam with beam.Pipeline() as pipeline: plants = ( pipeline | 'Gardening plants' >> beam.Create([ '🍓Strawberry 🥕Carrot 🍆Eggplant', '🍅Tomato 🥔Potato', ]) | 'Split words' >> beam.FlatMap(str.split) | beam.Map(print) )View source code Example 2: FlatMap with a functionWe define a function `split_words` which splits an input `str` element using the delimiter `','` and outputs a `list` of `str`s.import apache_beam as beam def split_words(text): return text.split(',') with beam.Pipeline() as pipeline: plants = ( pipeline | 'Gardening plants' >> beam.Create([ '🍓Strawberry,🥕Carrot,🍆Eggplant', '🍅Tomato,🥔Potato', ]) | 'Split words' >> beam.FlatMap(split_words) | beam.Map(print) )View source code Example 3: FlatMap with a lambda functionFor this example, we want to flatten a `PCollection` of lists of `str`s into a `PCollection` of `str`s.Each input element is already an `iterable`, where each element is what we want in the resulting `PCollection`.We use a lambda function that returns the same input element it received.import apache_beam as beam with beam.Pipeline() as pipeline: plants = ( pipeline | 'Gardening plants' >> beam.Create([ ['🍓Strawberry', '🥕Carrot', '🍆Eggplant'], ['🍅Tomato', '🥔Potato'], ]) | 'Flatten lists' >> beam.FlatMap(lambda elements: elements) | beam.Map(print) )View source code Example 4: FlatMap with a generatorFor this example, we want to flatten a `PCollection` of lists of `str`s into a `PCollection` of `str`s.We use a generator to iterate over the input list and yield each of the elements.Each yielded result in the generator is an element in the resulting `PCollection`.import apache_beam as beam def generate_elements(elements): for element in elements: yield element with beam.Pipeline() as pipeline: plants = ( pipeline | 'Gardening plants' >> beam.Create([ ['🍓Strawberry', '🥕Carrot', '🍆Eggplant'], ['🍅Tomato', '🥔Potato'], ]) | 'Flatten lists' >> beam.FlatMap(generate_elements) | beam.Map(print) )View source code Example 5: FlatMapTuple for key-value pairsIf your `PCollection` consists of `(key, value)` pairs,you can use `FlatMapTuple` to unpack them into different function arguments.import apache_beam as beam def format_plant(icon, plant): if icon: yield '{}{}'.format(icon, plant) with beam.Pipeline() as pipeline: plants = ( pipeline | 'Gardening plants' >> beam.Create([ ('🍓', 'Strawberry'), ('🥕', 'Carrot'), ('🍆', 'Eggplant'), ('🍅', 'Tomato'), ('🥔', 'Potato'), (None, 'Invalid'), ]) | 'Format' >> beam.FlatMapTuple(format_plant) | beam.Map(print) )View source code Example 6: FlatMap with multiple argumentsYou can pass functions with multiple arguments to `FlatMap`.They are passed as additional positional arguments or keyword arguments to the function.In this example, `split_words` takes `text` and `delimiter` as arguments.import apache_beam as beam def split_words(text, delimiter=None): return text.split(delimiter) with beam.Pipeline() as pipeline: plants = ( pipeline | 'Gardening plants' >> beam.Create([ '🍓Strawberry,🥕Carrot,🍆Eggplant', '🍅Tomato,🥔Potato', ]) | 'Split words' >> beam.FlatMap(split_words, delimiter=',') | beam.Map(print) )View source code Example 7: FlatMap with side inputs as singletonsIf the `PCollection` has a single value, such as the average from another computation,passing the `PCollection` as a *singleton* accesses that value.In this example, we pass a `PCollection` the value `','` as a singleton.We then use that value as the delimiter for the `str.split` method.import apache_beam as beam with beam.Pipeline() as pipeline: delimiter = pipeline | 'Create delimiter' >> beam.Create([',']) plants = ( pipeline | 'Gardening plants' >> beam.Create([ '🍓Strawberry,🥕Carrot,🍆Eggplant', '🍅Tomato,🥔Potato', ]) | 'Split words' >> beam.FlatMap( lambda text, delimiter: text.split(delimiter), delimiter=beam.pvalue.AsSingleton(delimiter), ) | beam.Map(print) )View source code Example 8: FlatMap with side inputs as iteratorsIf the `PCollection` has multiple values, pass the `PCollection` as an *iterator*.This accesses elements lazily as they are needed,so it is possible to iterate over large `PCollection`s that won't fit into memory.import apache_beam as beam def normalize_and_validate_durations(plant, valid_durations): plant['duration'] = plant['duration'].lower() if plant['duration'] in valid_durations: yield plant with beam.Pipeline() as pipeline: valid_durations = pipeline | 'Valid durations' >> beam.Create([ 'annual', 'biennial', 'perennial', ]) valid_plants = ( pipeline | 'Gardening plants' >> beam.Create([ {'icon': '🍓', 'name': 'Strawberry', 'duration': 'Perennial'}, {'icon': '🥕', 'name': 'Carrot', 'duration': 'BIENNIAL'}, {'icon': '🍆', 'name': 'Eggplant', 'duration': 'perennial'}, {'icon': '🍅', 'name': 'Tomato', 'duration': 'annual'}, {'icon': '🥔', 'name': 'Potato', 'duration': 'unknown'}, ]) | 'Normalize and validate durations' >> beam.FlatMap( normalize_and_validate_durations, valid_durations=beam.pvalue.AsIter(valid_durations), ) | beam.Map(print) )View source code > **Note**: You can pass the `PCollection` as a *list* with `beam.pvalue.AsList(pcollection)`,> but this requires that all the elements fit into memory. Example 9: FlatMap with side inputs as dictionariesIf a `PCollection` is small enough to fit into memory, then that `PCollection` can be passed as a *dictionary*.Each element must be a `(key, value)` pair.Note that all the elements of the `PCollection` must fit into memory for this.If the `PCollection` won't fit into memory, use `beam.pvalue.AsIter(pcollection)` instead.import apache_beam as beam def replace_duration_if_valid(plant, durations): if plant['duration'] in durations: plant['duration'] = durations[plant['duration']] yield plant with beam.Pipeline() as pipeline: durations = pipeline | 'Durations dict' >> beam.Create([ (0, 'annual'), (1, 'biennial'), (2, 'perennial'), ]) valid_plants = ( pipeline | 'Gardening plants' >> beam.Create([ {'icon': '🍓', 'name': 'Strawberry', 'duration': 2}, {'icon': '🥕', 'name': 'Carrot', 'duration': 1}, {'icon': '🍆', 'name': 'Eggplant', 'duration': 2}, {'icon': '🍅', 'name': 'Tomato', 'duration': 0}, {'icon': '🥔', 'name': 'Potato', 'duration': -1}, ]) | 'Replace duration if valid' >> beam.FlatMap( replace_duration_if_valid, durations=beam.pvalue.AsDict(durations), ) | beam.Map(print) )Execicio 01print("Wellington jo´se dods santos")Execicio 02 Multiplicandoa = 5 * 2 b = 3 * 5 soma = a * b print("A soma dos valores é %d " %soma)A soma dos valores é 150Execicio 03 multiplicando 3 varivaeisa = 5 * 2 b = 3 * 5 c = 5 soma = a * b + c print("A soma dos valores é %d " %soma)A soma dos valores é 155Execicio 04 escolhar o operadorx = int(input("Digite o primeiro valor:")) n = int(input("Digite o segundo valor:" )) z = str(input("Digite o operador + - * /:" )) if z == "+": print(int(x + n)) elif z == "-": print(int(x - n)) elif z == "*": print(int(x * n)) elif z == "/": print(int(x // n))Digite o primeiro valor:10 Digite o segundo valor:2 Digite o operador + - * /:/ 5Execicio 05 contadorcontador = 1 while contador <=10: print(contador) contador = contador + 1 for x in range (10): print(x)0 1 2 3 4 5 6 7 8 9Execicio 06 programa para contar numeros par sem resolverc = 1 s = 0 i = 0 while c ["1"] : c = c + 1 if c % 2 == 0: c = s s = s + 1 else: c = i i = i + 1 print("A quantidade de numero pares é:", s) print(" A quantidade de numeros impares é:", i) lista = [i] for i in [1,2,3,4,5,6,7,8,9,]: res = i%2 if res == 0: print(i) lista.append(i) print(lista) sum(lista) -------------------------------------------------------------- print("\nb) Usando a instrução For e as funções Range e Sum:") contador04 = 1 os_pares = [0] os_impares = [0] for contador04 in range (1,11): if contador04 % 2 == 0: os_pares.append(contador04) continue else: os_impares.append(contador04) continue qtde_pares = len(os_pares) qtde_impares = len(os_impares) Soma_pares = sum(os_pares) Soma_impares = sum(os_impares) print("Quantidade de pares é: ", qtde_pares) print(os_pares) print("A soma dos pares é: ", Soma_pares) print("Quantidade de impares é: ", qtde_impares) print(os_impares) print("A soma dos impares é: ", Soma_impares)2 4 6 8 [0, 2, 4, 6, 8]Face landmark- This is an ipynb file created in Kaggle environment.- CNN Dataset- Face Images with Marked Landmark Points, https://www.kaggle.com/drgilermo/face-images-with-marked-landmark-points 0. Import Packagesimport matplotlib.pyplot as plt from sklearn.model_selection import train_test_split import tensorflow as tf import pandas as pd import numpy as np from tqdm.keras import TqdmCallback from tensorflow.keras.models import Sequential, load_model from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, Flatten, Dense, Dropout, LeakyReLU, BatchNormalization from tensorflow.keras.initializers import glorot_uniform from tensorflow.keras.callbacks import ModelCheckpoint from tensorflow.keras.optimizers import SGD, Adam1. Make dataset# let's deal with .npz type of image file img = np.load('/kaggle/input/face-images-with-marked-landmark-points/face_images.npz') # class face = img.get(img.files[0]) # image to array (96, 96, 7049) face = np.moveaxis(face,-1,0) # (7049, 96, 96) face = face.reshape(face.shape[0],face.shape[1],face.shape[1], 1) print(face.shape) key_pts = pd.read_csv('/kaggle/input/face-images-with-marked-landmark-points/facial_keypoints.csv') key_pts.head() print(key_pts.shape) key_pts = key_pts.fillna(0) key_pts.index.values features = face[key_pts.index.values, :, :, :] features = features/255 # normalize key_pts.reset_index(inplace=True,drop=True) features.shape # visualization ind = 10 plt.imshow(features[ind,:,:,0],cmap='gray') plt.scatter(key_pts.iloc[ind][0:-1:2], key_pts.iloc[ind][1::2], c='y') plt.axis('off') plt.show() x_train, x_test, y_train, y_test = train_test_split(features, key_pts, test_size = 0.2)2. Model (CNN)# params img_size = 96 batch_size = 32 epochs = 200 model = Sequential() model.add(Input(shape=(img_size, img_size, 1))) model.add(BatchNormalization()) model.add(Conv2D(32, (3,3), padding="same")) model.add(LeakyReLU(alpha = 0.1)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(BatchNormalization()) model.add(Conv2D(64, (3,3), padding="same")) model.add(LeakyReLU(alpha = 0.1)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(BatchNormalization()) model.add(Conv2D(128, (3,3), padding="same")) model.add(LeakyReLU(alpha = 0.1)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(256)) model.add(LeakyReLU(alpha = 0.1)) model.add(Dropout(0.5)) model.add(Dense(64)) model.add(LeakyReLU(alpha = 0.1)) model.add(Dense(30)) model.summary() model.compile(loss = 'mean_squared_error', optimizer = Adam(), metrics=['mean_squared_error'])3. Training%%time hist = model.fit(x_train, y_train,batch_size = batch_size, epochs = epochs, validation_data = (x_test, y_test), shuffle = True) # Save model model.save("/kaggle/working/landmark.h5") landmark_json = model.to_json() with open("/kaggle/working/landmark.json", "w") as json_file: json_file.write(landmark_json) model = load_model('/kaggle/working/landmark.h5') plt.plot(hist.history['loss'][3:50]) plt.plot(hist.history['val_loss'][3:50]) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'val'], loc='upper left') plt.show()4. Predictionpred = model.predict(x_test) index = 100 pred_pt = np.array(y_test)[index] plt.imshow(x_test[index,:,:,:], cmap='gray') plt.scatter(pred_pt[0:-1:2], pred_pt[1::2], c='r') plt.scatter(pred[index,:][0:-1:2],pred[index,:][1::2],c='b') plt.axis('off') plt.show()**Beginner Group Walkthrough**def input_integer(prompt, minimum=0, maximum=100): while True: try: result = input(prompt) result = int(result) if not minimum <= result <= maximum: raise ValueError return result except ValueError: print('Invalid input!') scores = [] while True: name = input('Name:') if not name: break score = input_integer('Score:') scores.append((score, name)) scores.sort() max_length = max([len(name) for score, name in scores]) place = 1 previous = None for score, name in reversed(scores): if score == previous: place_string = ' ' else: place_string = (str(place) + '.').ljust(4) place += 1 name = name.ljust(max_length) previous = score score = str(score).rjust(4) print(place_string, name, score, sep='')Copyright 2020 The TensorFlow Authors.#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.TF Lattice Custom Estimators View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook OverviewYou can use custom estimators to create arbitrarily monotonic models using TFL layers. This guide outlines the steps needed to create such estimators. Setup Installing TF Lattice package:#@test {"skip": true} !pip install tensorflow-latticeImporting required packages:import tensorflow as tf import logging import numpy as np import pandas as pd import sys import tensorflow_lattice as tfl from tensorflow import feature_column as fc from tensorflow_estimator.python.estimator.canned import optimizers from tensorflow_estimator.python.estimator.head import binary_class_head logging.disable(sys.maxsize)Downloading the UCI Statlog (Heart) dataset:csv_file = tf.keras.utils.get_file( 'heart.csv', 'http://storage.googleapis.com/download.tensorflow.org/data/heart.csv') df = pd.read_csv(csv_file) target = df.pop('target') train_size = int(len(df) * 0.8) train_x = df[:train_size] train_y = target[:train_size] test_x = df[train_size:] test_y = target[train_size:] df.head()Setting the default values used for training in this guide:LEARNING_RATE = 0.1 BATCH_SIZE = 128 NUM_EPOCHS = 1000Feature ColumnsAs for any other TF estimator, data needs to be passed to the estimator, which is typically via an input_fn and parsed using [FeatureColumns](https://www.tensorflow.org/guide/feature_columns).# Feature columns. # - age # - sex # - ca number of major vessels (0-3) colored by flourosopy # - thal 3 = normal; 6 = fixed defect; 7 = reversable defect feature_columns = [ fc.numeric_column('age', default_value=-1), fc.categorical_column_with_vocabulary_list('sex', [0, 1]), fc.numeric_column('ca'), fc.categorical_column_with_vocabulary_list( 'thal', ['normal', 'fixed', 'reversible']), ]Note that categorical features do not need to be wrapped by a dense feature column, since `tfl.laysers.CategoricalCalibration` layer can directly consume category indices. Creating input_fnAs for any other estimator, you can use input_fn to feed data to the model for training and evaluation.train_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn( x=train_x, y=train_y, shuffle=True, batch_size=BATCH_SIZE, num_epochs=NUM_EPOCHS, num_threads=1) test_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn( x=test_x, y=test_y, shuffle=False, batch_size=BATCH_SIZE, num_epochs=1, num_threads=1)Creating model_fnThere are several ways to create a custom estimator. Here we will construct a `model_fn` that calls a Keras model on the parsed input tensors. To parse the input features, you can use `tf.feature_column.input_layer`, `tf.keras.layers.DenseFeatures`, or `tfl.estimators.transform_features`. If you use the latter, you will not need to wrap categorical features with dense feature columns, and the resulting tensors will not be concatenated, which makes it easier to use the features in the calibration layers.To construct a model, you can mix and match TFL layers or any other Keras layers. Here we create a calibrated lattice Keras model out of TFL layers and impose several monotonicity constraints. We then use the Keras model to create the custom estimator.def model_fn(features, labels, mode, config): """model_fn for the custom estimator.""" del config input_tensors = tfl.estimators.transform_features(features, feature_columns) inputs = { key: tf.keras.layers.Input(shape=(1,), name=key) for key in input_tensors } lattice_sizes = [3, 2, 2, 2] lattice_monotonicities = ['increasing', 'none', 'increasing', 'increasing'] lattice_input = tf.keras.layers.Concatenate(axis=1)([ tfl.layers.PWLCalibration( input_keypoints=np.linspace(10, 100, num=8, dtype=np.float32), # The output range of the calibrator should be the input range of # the following lattice dimension. output_min=0.0, output_max=lattice_sizes[0] - 1.0, monotonicity='increasing', )(inputs['age']), tfl.layers.CategoricalCalibration( # Number of categories including any missing/default category. num_buckets=2, output_min=0.0, output_max=lattice_sizes[1] - 1.0, )(inputs['sex']), tfl.layers.PWLCalibration( input_keypoints=[0.0, 1.0, 2.0, 3.0], output_min=0.0, output_max=lattice_sizes[0] - 1.0, # You can specify TFL regularizers as tuple # ('regularizer name', l1, l2). kernel_regularizer=('hessian', 0.0, 1e-4), monotonicity='increasing', )(inputs['ca']), tfl.layers.CategoricalCalibration( num_buckets=3, output_min=0.0, output_max=lattice_sizes[1] - 1.0, # Categorical monotonicity can be partial order. # (i, j) indicates that we must have output(i) <= output(j). # Make sure to set the lattice monotonicity to 'increasing' for this # dimension. monotonicities=[(0, 1), (0, 2)], )(inputs['thal']), ]) output = tfl.layers.Lattice( lattice_sizes=lattice_sizes, monotonicities=lattice_monotonicities)( lattice_input) training = (mode == tf.estimator.ModeKeys.TRAIN) model = tf.keras.Model(inputs=inputs, outputs=output) logits = model(input_tensors, training=training) if training: optimizer = optimizers.get_optimizer_instance_v2('Adagrad', LEARNING_RATE) else: optimizer = None head = binary_class_head.BinaryClassHead() return head.create_estimator_spec( features=features, mode=mode, labels=labels, optimizer=optimizer, logits=logits, trainable_variables=model.trainable_variables, update_ops=model.updates)Training and EstimatorUsing the `model_fn` we can create and train the estimator.estimator = tf.estimator.Estimator(model_fn=model_fn) estimator.train(input_fn=train_input_fn) results = estimator.evaluate(input_fn=test_input_fn) print('AUC: {}'.format(results['auc']))Basic Imports--- Librariesimport os, sys from pathlib import Path from datetime import datetime, timedelta from google.colab import driveRequirement already up-to-date: yfinance in /usr/local/lib/python3.6/dist-packages (0.1.43) Requirement already satisfied, skipping upgrade: numpy>=1.15 in /usr/local/lib/python3.6/dist-packages (from yfinance) (1.16.4) Requirement already satisfied, skipping upgrade: multitasking>=0.0.7 in /usr/local/lib/python3.6/dist-packages (from yfinance) (0.0.9) Requirement already satisfied, skipping upgrade: pandas>=0.24 in /usr/local/lib/python3.6/dist-packages (from yfinance) (0.24.2) Requirement already satisfied, skipping upgrade: requests>=2.20 in /usr/local/lib/python3.6/dist-packages (from yfinance) (2.21.0) Requirement already satisfied, skipping upgrade: pytz>=2011k in /usr/local/lib/python3.6/dist-packages (from pandas>=0.24->yfinance) (2018.9) Requirement already satisfied, skipping upgrade: python-dateutil>=2.5.0 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.24->yfinance) (2.5.3) Requirement already satisfied, skipping upgrade: idna<2.9,>=2.5 in /usr/local/lib/python3.6[...]Log2/Z-score NotebookBy: ()Adapted from code written by Dr. and Last updated: 20200527 Import external libraries.import os import random import re import subprocess import pandas as pd import numpy as np import seaborn as sb import matplotlib.pyplot as plt import matplotlib.colors as mplc from scipy import signal from scipy.stats.stats import pearsonr import plotly.figure_factory as ff import plotly import plotly.graph_objs as go from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot import plotly.express as px init_notebook_mode(connected = True) %matplotlib notebookImport functions written for this project.from cycif_modules import *Define function to change header names. Not encapsutated in `cycif_modules`, so that user can change on the fly as necessary.# This may change for each experiment, so I have not sequestered # this code in the my_modules.py file # This function takes in a dataframe, changes the names # of the column in various ways, and returns the dataframe. # For best accuracy and generalizability, the code uses # regular expressions (regex) to find strings for replacement. def apply_header_changes(df): # remove lowercase x at beginning of name df.columns = df.columns.str.replace("^x","") # remove space at beginning of name df.columns = df.columns.str.replace("^ ","") # replace space with underscore df.columns = df.columns.str.replace(" ","_") # fix typos #df.columns = df.columns.str.replace("typo","correct_name") return dfBegin Workflow Get directories# Base directory for project base_dir = '' # Set name for of project # for use in directory creation project_name = 'ww' project_name = 'repro' project_name = 'gz_new' # Set string for current step, and for previous step # for use in file and direcotry naming step_suffix = 'zscore' previous_step_suffix_long = "_bs" # Initial input data directory input_data_dir = os.path.join(base_dir, project_name + previous_step_suffix_long) # log2/z-score directory output_data_dir = os.path.join(base_dir, project_name + "_" + step_suffix) # log2/z-score images subdirectory output_images_dir = os.path.join(output_data_dir,"images") # Metadata directories metadata_dir = os.path.join(base_dir, project_name + "_metadata") metadata_images_dir = os.path.join(metadata_dir,"images") # Create necessary directories for this step, if they don't already exist for d in [base_dir, input_data_dir, output_data_dir, output_images_dir, metadata_dir, metadata_images_dir]: if not os.path.exists(d): os.makedirs(d) # Change directory to location of input files os.chdir(input_data_dir)Create list of samples for use in this step of workflow. Do not include file extensions or steps labels.## Comment for final workflow ls_samples = ['TMA']Import all metadata we need from the QC/EDA chapter metadatafilename = "marker_intensity_metadata.csv" filename = os.path.join(metadata_dir, filename) # Check file exists if not os.path.exists(filename): print("WARNING: Could not find desired file: " + filename) # Open, read in information metadata = pd.read_csv(filename) # Verify size # This part is wrapped in a try/except block because # it wasn't working on the PC workstation, but worked # on MG's personal PC laptop and department loaner MacBook try: verify_line_no(filename, metadata.shape[0] + 1) print("Ran file length verification.") except: pass # Verify headers exp_cols = ['Round','Target','Channel','target_lower','full_column','marker','location'] compare_headers(exp_cols, metadata.columns.values, "Marker metadata file") # Show some of dataframe - FYI metadata.head()not_intensitiesfilename = "not_intensities.csv" filename = os.path.join(metadata_dir, filename) # Check file exists if not os.path.exists(filename): print("WARNING: Could not find desired file: "+filename) # Open, read in information not_intensities = [] with open(filename, 'r') as fh: not_intensities = fh.read().strip().split("\n") # take str, strip whitespace, split on new line character # Verify size # This part is wrapped in a try/except block because # it wasn't working on the PC workstation, but worked # on MG's personal PC laptop and department loaner MacBook try: verify_line_no(filename, not_intensities.shape[0]) print("Ran file length verification.") except: pass # Print to console print("not_intensities = ") print(not_intensities)full_to_short_column namesfilename = "full_to_short_column_names.csv" filename = os.path.join(metadata_dir, filename) # Check file exists if not os.path.exists(filename): print("WARNING: Could not find desired file: " + filename) # Open, read in information df = pd.read_csv(filename, header = 0) # Verify size # This part is wrapped in a try/except block because # it wasn't working on the PC workstation, but worked # on MG's personal PC laptop and department loaner MacBook try: verify_line_no(filename, df.shape[0] + 1) print("Ran file length verification.") except: pass # Turn into dictionary full_to_short_names = df.set_index('full_name').T.to_dict('records')[0] # Print information print('full_to_short_names =') print(full_to_short_names)short_to_full_column_namesfilename = "short_to_full_column_names.csv" filename = os.path.join(metadata_dir, filename) # Check file exists if not os.path.exists(filename): print("WARNING: Could not find desired file: " + filename) # Open, read in information df = pd.read_csv(filename, header = 0) # Verify size # This part is wrapped in a try/except block because # it wasn't working on the PC workstation, but worked # on MG's personal PC laptop and department loaner MacBook try: verify_line_no(filename, df.shape[0] + 1) print("Ran file length verification.") except: pass # Turn into dictionary short_to_full_names = df.set_index('short_name').T.to_dict('records')[0] # Print information print('short_to_full_names =') print(short_to_full_names)color information Samplesfilename = "sample_color_data.csv" filename = os.path.join(metadata_dir, filename) # Check file exists if not os.path.exists(filename): print("WARNING: Could not find desired file: " + filename) # Open, read in information df = pd.read_csv(filename, header = 0) df = df.drop(columns = ['hex']) # our tuple of float values for rgb, (r, g, b) was read in # as a string '(r, g, b)'. We need to extract the r-, g-, and b- # substrings and convert them back into floats df['rgb'] = df.apply(lambda row: rgb_tuple_from_str(row['rgb']), axis = 1) # Verify size # This part is wrapped in a try/except block because # it wasn't working on the PC workstation, but worked # on MG's personal PC laptop and department loaner MacBook try: verify_line_no(filename, df.shape[0] + 1) print("Ran file length verification.") except: pass # Turn into dictionary sample_color_dict = df.set_index('Sample_ID').T.to_dict('rgb')[0] # Print information print('sample_color_dict =') print(sample_color_dict)Channelsfilename = "channel_color_data.csv" filename = os.path.join(metadata_dir, filename) # Check file exists if not os.path.exists(filename): print("WARNING: Could not find desired file: "+filename) # Open, read in information df = pd.read_csv(filename, header = 0) df = df.drop(columns = ['hex']) # our tuple of float values for rgb, (r, g, b) was read in # as a string '(r, g, b)'. We need to extract the r-, g-, and b- # substrings and convert them back into floats df['rgb'] = df.apply(lambda row: rgb_tuple_from_str(row['rgb']), axis = 1) # Verify size # This part is wrapped in a try/except block because # it wasn't working on the PC workstation, but worked # on MG's personal PC laptop and department loaner MacBook try: verify_line_no(filename, df.shape[0] + 1) print("Ran file length verification.") except: pass # Turn into dictionary channel_color_dict = df.set_index('Channel').T.to_dict('rgb')[0] # Print information print('channel_color_dict =') print(channel_color_dict)Roundfilename = "round_color_data.csv" filename = os.path.join(metadata_dir, filename) # Check file exists if not os.path.exists(filename): print("WARNING: Could not find desired file: "+filename) # Open, read in information df = pd.read_csv(filename, header = 0) df = df.drop(columns = ['hex']) # our tuple of float values for rgb, (r, g, b) was read in # as a string '(r, g, b)'. We need to extract the r-, g-, and b- # substrings and convert them back into floats df['rgb'] = df.apply(lambda row: rgb_tuple_from_str(row['rgb']), axis = 1) # Verify size # This part is wrapped in a try/except block because # it wasn't working on the PC workstation, but worked # on MG's personal PC laptop and department loaner MacBook try: verify_line_no(filename, df.shape[0] + 1) print("Ran file length verification.") except: pass # Turn into dictionary round_color_dict = df.set_index('Round').T.to_dict('rgb')[0] # Print information print('round_color_dict =') print(round_color_dict)Cell Typefilename = "celltype_color_data.csv" filename = os.path.join(metadata_dir, filename) # Check file exists if not os.path.exists(filename): print("WARNING: Could not find desired file: "+filename) # Open, read in information df = pd.read_csv(filename, header = 0) df = df.drop(columns = ['hex']) # our tuple of float values for rgb, (r, g, b) was read in # as a string '(r, g, b)'. We need to extract the r-, g-, and b- # substrings and convert them back into floats df['rgb'] = df.apply(lambda row: rgb_tuple_from_str(row['rgb']), axis = 1) # Verify size # This part is wrapped in a try/except block because # it wasn't working on the PC workstation, but worked # on MG's personal PC laptop and department loaner MacBook try: verify_line_no(filename, df.shape[0] + 1) print("Ran file length verification.") except: pass # Turn into dictionary celltype_color_dict = df.set_index('cell_type').T.to_dict('rgb')[0] # Print information print('celltype_color_dict =') print(celltype_color_dict)Import data# Read in the first row of the file correpsonding to the first sample (index = 0) # in ls_samples # We do not need to specify a directory, since we earlier changed # the current working directory to be that containing these files filename = ls_samples[0] + previous_step_suffix_long + ".csv" # Read in only the first line df = pd.read_csv(filename, index_col = 0, nrows = 1) # Apply the changes to the headers as specified in above funciton df = apply_header_changes(df) # Set variable to hold default header values expected_headers = df.columns.values print("df index name is currently",df.index.name) df.head() print("Used " + ls_samples[0] + previous_step_suffix_long + ".csv to determine the expected, corrected headers for all files.") print("There headers are: \n" + ", ".join([h for h in expected_headers]) + ".") # Set dictionary to hold all individual sample data dfs = {} # iterate through each sample in our list of samples for sample in ls_samples: # Check for existence of file if not os.path.exists(sample+previous_step_suffix_long+".csv"): print("File " + sample+previous_step_suffix_long+".csv" + " does not exist. Removing from analysis...") # Remove from list if not found ls_samples.remove(sample) continue # open the file # set the index to be the first (0-based indexing, so 0th) # column in input file. df = pd.read_csv(sample + previous_step_suffix_long + ".csv", index_col = 0) #, nrows = 500) # use nrows to specify the number of rows you want # Check for empty df # if so, don't continue trying to process df if df.shape[0] == 0: print('Zero content lines detected in ' + sample + ' file.' 'Removing from analysis...') # Remove from list, so further steps won't be looking # for data on this sample. # Note that for lists, we do not need to re-assign # the list when removing an item, i.e., we do not say # 'ls_samples = ls_samples.remove(sample)', since this # operation does not return anything. ls_samples.remove(sample) continue # Verify that the loaded df are the right length # commenting out because this code did not work on all # machines during testing (failed one PC, succeeded with # one PC and one MacBook) try: verify_line_no(sample + ".csv", df.shape[0] + 1) except: pass # adding 1 because we expect the header was detected # during file import and not counted towards length of df # Manipulations necessary for concatenation df = apply_header_changes(df) # sort them alphanetically df = df[[x for x in sorted(df.columns.values)]] # Compare headers of new df against what is expected compare_headers(expected_headers, df.columns.values, sample) # For cases where we have samples called TMA1.1, TMA1.2, TMA1.3, etc. # Using regular expressions (regex) to extract the characters in the # sample name from TMA to the following digits, stopping at the period #if 'ROI_index' in df.columns.values: # df['ROI_slide'] = re.findall(r'(TMA\d+)',sample)[0] # Add to dictonary of dfs dfs[sample] = df #Merge dfs into one big df df = pd.concat(dfs.values(), ignore_index=False , sort = False) # remove dfs from memory, since its big (relatively) and we # don't need a data struture of all samples' data separated # individually when we can extract information from the big # df using the Sample_ID column del dfsLet's take a look at a few features to make sure our dataframe is as expecteddf.shape df.indexCheck for NaN entries (should not be any unless columns do not align), which can result from stitching together dfs with different values in their headers.# if there are any null values, then print names of columns containing # null values if df.isnull().any().any(): print(df.columns[df.isnull().any()]) #in 'if' statement, false means no NaN entries True means NaN entriesCheck that all expected files were imported into final dataframe by comparing our sample names to the unique values in the Sample_ID column.# Check that all expected files were imported into final dataframe if sorted(df.Sample_ID.unique()) == sorted(ls_samples): print("All expected filenames present in big df Sample_ID column.") else: compare_headers(['no samples'], df.Sample_ID.unique(), "big df Sample_ID column")Log2 transformation## no need to transpose df--non-intensity columns are present # in df but are not transformed by log2 nor by z-scoring # add 1 df.loc[:, ~df.columns.isin(not_intensities)] = \ df.loc[:,~df.columns.isin(not_intensities)].copy() + 1 # apply log2 df.loc[:,~df.columns.isin(not_intensities)] = \ np.log2(df.loc[:, ~df.columns.isin(not_intensities)]) print('log2 transform finished')Z-score transformations# Z-score the rows (apply() with axis = 1, only perform on intensity data) df.loc[:,~df.columns.isin(not_intensities)] = \ df.loc[:,~df.columns.isin(not_intensities)].apply( lambda row: (row - row.median())/(row.std(ddof=0)), axis = 1) df.dropna(how = 'all', inplace = True, axis = 1) print('zscore rows finished') # Z-score the columns (apply() with axis = 0, only perform on intensity data) df.loc[:,~df.columns.isin(not_intensities)] = \ df.loc[:,~df.columns.isin(not_intensities)].apply( lambda row: (row - row.median())/(row.std(ddof=0)), axis = 0) df.dropna(how = 'all', inplace = True, axis = 1) print('zscore columns finished')Visualizations Heatmap We will only be plotting ~10k cells in the interest of time/computing resources. We want these 10k lines in our original df to be sampled randomly, without replacement, with the caveat that the proportions of all samples in the data remains the same in this subset. If the size of the dataframe is > 10k rows, then we will proceed with the entire dataset.subset_row_count = 10000 subset_df = create_subset(df, 'Sample_ID', subset_row_count, 'equal')How many lines for each sample ID are in our subset df?subset_df['Sample_ID'].value_counts().sort_index()How do the proportions of cells in the original and subset dfs compare?df['Sample_ID'].value_counts().sort_index()/df.shape[0] subset_df['Sample_ID'].value_counts().sort_index()/subset_df.shape[0]Get data structures to map colors to columns and rows... Row colors For the row colors, we essentially just need to map the information in a given feature to the colors that correspond to that value in the right color dictionary. For example, it might be sample_3, sample_3, sample_4, , so we need the row colors to be (1, 1, 1), (1, 1, 1), (0, 0.25, 0.6). These are the initialy colors--if we are clustering rows or columns, the labels will still match the data with which they're associated.sample_row_colors = subset_df.Sample_ID.map(sample_color_dict) sample_row_colors[1:5] row_celltype_colors = subset_df.cell_type.map(celltype_color_dict) row_celltype_colors[1:5]Column rows For column rows, matching up the information in each column with the appropriate color is more difficult.# Here, we want to translate marker columns to their corresponding channel information, # and then match that up with the right color, as with row columns # First, we merge the (L) non-intensity column values, transformed into a dataframe, # with the metadata df (R), matching on the "0" column present in the L, # which is the only column in there, with the "full_column" (aka df header name) # column in the R, only including all cases where there is a match and any unmatched # L cases ('both' [?] would be only cases where ther is is a match, and 'right' would # be cases with a match and any unmatched R columns). column_channel_colors = pd.merge(pd.DataFrame(pd.Series( subset_df.loc[:,~subset_df.columns.isin(not_intensities)].columns.values)), metadata, how = 'left', left_on = 0, right_on = 'full_column')[[0,'Channel']]['Channel'].map(channel_color_dict) # Set the index to be the names of the colors. There is only one column, and that is the corresponding # colors column_channel_colors.index = subset_df.loc[:,~subset_df.columns.isin(not_intensities)].columns.values column_channel_colors.head() # Here, we want to translate marker columns to their corresponding round information, # and then match that up with the right color, as with row columns # First, we merge the (L) non-intensity column values, transformed into a dataframe, # with the metadata df (R), matching on the "0" column present in the L, # which is the only column in there, with the "full_column" (aka df header name) # column in the R, only including all cases where there is a match and any unmatched # L cases ('both' [?] would be only cases where ther is is a match, and 'right' would # be cases with a match and any unmatched R columns). column_round_colors = pd.merge(pd.DataFrame(pd.Series( subset_df.loc[:,~subset_df.columns.isin(not_intensities)].columns.values)), metadata, how = 'left', left_on = 0, right_on = 'full_column')[[0,'Round']]['Round'].map(round_color_dict) # Set the index to be the names of the colors. There is only one column, and that is the corresponding # colors column_round_colors.index = subset_df.loc[:,~subset_df.columns.isin(not_intensities)].columns.values column_round_colors.head()Annotations data structure# Create data structure to hold everything we need for row/column annotations # annotations is a dictionary ## IMPORTANT - if you use 'annotations', it MUST have both 'rows' and 'cols' ## objects inside. These can be empty lists, but they must be there! anns = {} # create a data structure to hold everything we need for only row annotations # row_annotations is a list, where each item therein is a dictioary corresponding # to all of the data pertaining to that particular annotation # Adding each item (e.g., Sample, then Cluster), one at a time to ensure ordering # is as anticipated on figure row_annotations = [] row_annotations.append({'label':'Sample','type':'row','mapping':sample_row_colors,'dict':sample_color_dict, 'location':'center left','bbox_to_anchor':(0, 0.5)}) #row_annotations.append({'label':'Cell type','type':'row','mapping':row_celltype_colors, # 'dict':celltype_color_dict, # 'location':'lower left','bbox_to_anchor':(0, 0.65)})# Add all row information into the annotations dictionary anns['rows'] = row_annotations # Now we repeat the process for column annotations col_annotations = [] col_annotations.append({'label':'Round','type':'column','mapping':column_round_colors,'dict':round_color_dict, 'location':'upper right','bbox_to_anchor':(1,0.50)}) col_annotations.append({'label':'Column','type':'column','mapping':column_channel_colors,'dict':channel_color_dict, 'location':'upper right','bbox_to_anchor':(1,0.75)}) anns['cols'] = col_annotationsActually plot the heatmapheatmap_function( data = subset_df.loc[:,~subset_df.columns.isin(not_intensities)], title = "Z-score heatmap", # define method, metric, and color map method = 'ward', metric = 'euclidean', cmap = 'coolwarm', # colorbar (legend coloring of main plot) cbar_kws = {'label':'Z-score Intens.'}, # xticklabels - want to have the nicknames instead of full names, # so we translate from full to short names; we also only want to include # non_intensity columns, to match the data we fed into under 'data' xticklabels = [full_to_short_names[name] for name in subset_df.loc[:, ~subset_df.columns.isin(not_intensities)].columns.values], # where to save the dataframe save_loc = output_images_dir, # Boolean values for clustering row_cluster = True, col_cluster = True, # provide annotations established aboved annotations = anns )Bar plot of log2/z-score data# Create sorted list of sample IDs samples = sorted(list(df.Sample_ID.copy().unique())) # Get counts for each Sample_ID, sorted by Sample_ID counts = pd.DataFrame(df.Sample_ID.value_counts()).sort_index() counts = counts.rename(columns = {'Sample_ID':'counts'}) counts['Sample_ID'] = counts.index counts['color'] = counts.apply(lambda row: sample_color_dict[row['Sample_ID']], axis = 1) counts.head()Bar plot - cell counts by Sample_IDfig = go.Figure() title = 'Cell counts by Sample_ID - log2 and z-score' for sample in ls_samples: fig.add_trace(go.Bar( x=counts.loc[counts['Sample_ID']==sample,'Sample_ID'], y = counts.loc[counts['Sample_ID']==sample,'counts'], text = counts.loc[counts['Sample_ID']==sample,'counts'], textposition='outside', marker=dict( color='rgb' + str(sample_color_dict[sample]) ), showlegend = False )) fig.update_layout(title = title, plot_bgcolor = 'white') fig.update_xaxes(title_text = 'Sample ID', linecolor = 'black') fig.update_yaxes(title_text = 'Cell count', linecolor = 'black') #plot(fig) fig.write_image(output_images_dir + "/" + title.replace(" ","_") + ".png")Distributions of log2/z-score data# per-sample, per-marker distribution plots # change column names for visualization df = df.rename(columns = full_to_short_names) for sample in ls_samples: for marker in [m for m in df.columns.values if m not in not_intensities]: make_distr_plot_per_sample( title = sample + " " + marker,# + " Z-score", dfs = [ df.loc[df['Sample_ID']==sample,:].copy()], df_names = [sample], colors = [sample_color_dict[sample]], x_label = "Z-score intensity", legend = False, markers = marker, location = output_images_dir) # reinstate column names df = df.rename(columns = short_to_full_names)Correlation plot Get Pearson correlations and P values for all marker values First, get we need to determine how many columns we will be evaulating. And prepare empty Numpy arrays to hold our data.n_corr_cols = len(df.columns[~df.columns.isin(not_intensities)]) print(n_corr_cols) pvalues = np.empty((n_corr_cols, n_corr_cols)) corrvalues = np.empty((n_corr_cols,n_corr_cols))Next, we create a dataframe of only the columns we will use for this analysis.for_corr = df.loc[:,~df.columns.isin(not_intensities)].copy() for_corr = for_corr.rename(columns = full_to_short_names) for_corr.shapeThen, we iterate through each pair of columns, calculate the Pearson correlation and associated p-value, and then store the values.for i in range(for_corr.shape[1]): for j in range(0,for_corr.shape[1]): col1 = for_corr[for_corr.columns.values[i]] col2 = for_corr[for_corr.columns.values[j]] corrvalues[i,j] = pearsonr(col1,col2)[0] pvalues[i,j] = pearsonr(col1,col2)[1]To prepare for visualization, we are rounding the values.corrvalues = pd.DataFrame(corrvalues).round(3) corrvalues.columns = for_corr.columns.values corrvalues.index = for_corr.columns.values pvalues = pd.DataFrame(pvalues) pvalues.columns = for_corr.columns.values pvalues.index = for_corr.columns.valuesOption 1 Option 1: no correlation value on plot, just put p value and have star is p<=0.05# function to append astetisk to row values meeting significance level def p_add_star(row): sig_leve = 0.05 # This is a lon glist comprehension that uses if/else to determine output m = [str('{:0.3e}'.format(m)) + "*" if m <= 0.05 \ else str('{:0.3e}'.format(m)) for m in row ] return pd.Series(m) # Create a copy of the original df p_w_star = pvalues.copy() # Append asterisk to significant values p_w_star = p_w_star.apply(lambda row: p_add_star(row), axis = 1) # Supply column names - they were erased during the previous step p_w_star.columns = for_corr.columns.values # FYI - display p_w_star.head() ### Correlation visualization sb.set() # Label axes x_axis_labels = for_corr.columns.values.tolist() y_axis_labels = for_corr.columns.values.tolist() # Plot data ax = sb.heatmap( # Data for plotting corrvalues, # Annotation - the display text on boxes annot=p_w_star, # Annotation keywords. "size" adjusts font sixe annot_kws={"size": 1.25}, # Format = string for annotations fmt='s', # Set labels fo x and y axes xticklabels=x_axis_labels, yticklabels=y_axis_labels, # Color bay heywords. Here we label it to represent what the color represents cbar_kws = {'label':'Pearson correlation'}, # Add black lines of designated width and color between boxes linecolor = 'black', linewidth = 0.5, # Colormap to use to color boxes cmap = 'coolwarm' ) # Make axis tick lengths 0 ax.tick_params(length=0) # Adjust y-axis labels plt.yticks(rotation=0, size = 5) # Adjust x-axis labels ax.xaxis.tick_top() # supply x-ticks along top of blot ax.xaxis.set_label_position('top') # move x-axis labels to top of plot, instead of along bottom plt.xticks(rotation=45, size = 5) plt.setp(ax.xaxis.get_majorticklabels(), ha='left') # align along left # Adjust title and general appearance ax.set_title(label = "Correlations option 1", fontsize = 20) plt.tight_layout() # Plot output filename = "correlations_option1.png" filename = os.path.join(output_images_dir, filename) plt.savefig(filename,dpi=500)Option 2 Option 2: include correlation and 1-3 stars depending on p-value. 1 star: p-value <= 0.05; 2 stars: p-value <= 0.01; 3 stars: p-value <= 0.001.# function to replace significance levels in rows with apprpriate number of stars based on threshold levels def p_to_star(row): output = [] thrd1 = 0.001 thrd2 = 0.01 thrd3 = 0.05 for item in row: # Determine appropirate number of stars if item <= thrd1: stars = 3 elif item <= thrd2: stars = 2 elif item <= thrd3: stars = 1 else: stars = 0 value = '' # Construct star string of appr. length for i in range(stars): value += '*' output.append(value) return pd.Series(output) # Create copy of original df p_as_stars = pvalues.copy() # Replace values with appropriate number of stars p_as_stars = p_as_stars.apply(lambda row: p_to_star(row), axis = 1) # Suuply headers - previous step erased them p_as_stars.columns = for_corr.columns.values # FYI - display p_as_stars.head() # View final df - join corr and stars corr_w_star = corrvalues.round(2).astype(str) + p_as_stars corr_w_star.head() corrvalues.shape corr_w_star.shape ### Correlation visualization sb.set() # Axes labels x_axis_labels = for_corr.columns.values.tolist() y_axis_labels = for_corr.columns.values.tolist() # Construct plot ax = sb.heatmap( # data to be plotted corrvalues, # Suuply annotations - the text to be displayed in each box annot=corr_w_star, # Annotation keywords - adjust font size annot_kws={"size": 1.25}, # Format of annotation is string fmt='s', # Supply axies labels xticklabels=x_axis_labels, yticklabels=y_axis_labels, # Colorbar keywords - label for what the color scale represents cbar_kws = {'label':'Pearson correlation'}, # Add lines of designated width and color between boxes linecolor = 'black', linewidth = 0.5, # Determine colormap to be used for coloring cmap = 'coolwarm' ) # Make axis tick lengths 0 ax.tick_params(length=0) # Adjust y-axis aesthetics plt.yticks(rotation=0, size = 5) # Adjust x-axis aesthetics ax.xaxis.tick_top() ax.xaxis.set_label_position('top') # x-axis labels on top plt.xticks(rotation=45, size = 5) plt.setp(ax.xaxis.get_majorticklabels(), ha='left') # align left # Adjust title and general appearance ax.set_title(label = "Correlations option 2", fontsize = 20) plt.tight_layout() # Plot output filename = "correlations_option2.png" filename = os.path.join(output_images_dir, filename) plt.savefig(filename,dpi=500)Save correlations and p-value datafilename = "zscore_pearson_correlations.csv" filename = os.path.join(output_data_dir, filename) corrvalues.to_csv(filename, index = True) filename = "zscore_pearson_p-values.csv" filename = os.path.join(output_data_dir, filename) pvalues.to_csv(filename, index = True)Drop any other rows or columns we want to before saving data# Let's take a look df.columns.valuesFor the sake of example, I will operate on a copy of df, called df_copy# You MUST do df.copy() # 'df_copy = df' would essentially # give you two different names for the # SAME dataframe, so operating on one # would also operate on the other df_copy = df.copy()Operate on entire rows or columns# Drop columns my_cols = [] df_copy = df_copy.drop(columns = my_cols) # Keep only specific columns (explained below) my_cols = [] my_cols = df.columns.values df_copy = df_copy.loc[:,my_cols]Operate on rows and columns using filtering criteria# Keep only certain rows based off of criteria # use df.loc[] to filter # df.loc[rows,columns] # df.loc[:,certain_cols] --> keep all rows ':', only certain cols # df.loc[certain_rows,:] --> keep only certain row, all cols ':' # Say we only want certain values for Sample_ID print(df_copy.Sample_ID.unique()) keep = ['TMA1.1','TMA1.2','TMA1.3','TMA2.1','TMA2.2','TMA2.3'] df_copy = df_copy.loc[df_copy['Sample_ID'].isin(keep),:] print(df_copy.Sample_ID.unique()) # Filter on multiple criteria # '&' or 'and' # '|' or 'or' # you MUST have parentheses around each logic expression! df_copy = df_copy.loc[ (df_copy['Sample_ID'].isin(['TMA1.1','TMA1.2','TMA1.3'])) \ ## backslash above used to break line for readability, but tell Python to act like it's all one line | (df_copy['Sample_ID'].isin(['TMA2.1','TMA2.2','TMA2.3'])),:] print(df_copy.Sample_ID.unique()) # Remove rows based off of certain criteria # note the negating tilde '~'! df_copy = df_copy.loc[ (~df_copy['Sample_ID'].isin(['TMA1.1','TMA1.2','TMA1.3'])) \ ## backslash above used to break line for readability, but tell Python to act like it's all one line & (~df_copy['Sample_ID'].isin(['TMA2.1','TMA2.2','TMA2.3'])),:] print(df_copy.Sample_ID.unique())Save the data by Sample_ID# Check for existence of output file first for sample in ls_samples: filename = sample + "_" + step_suffix + ".csv filename = os.path.join(output_data_dir, filename) if os.path.exists(filename): print("File by name "+filename+" already exists.") # Save output files for sample in ls_samples: df_save = df.loc[df['Sample_ID'] == sample,:] filename = sample + "_" + step_suffix + ".csv" filename = os.path.join(output_data_dir, filename) df_save.to_csv(filename, index = True)IntroductionIf you are reading this document you probably have already installed `ingenialink`. If not, make sure to have Python installed (3.x recommended), and then simply open a terminal and type:```shpip install ingenialink```Note that in some operating systems you may need to install first [some dependencies](https://github.com/ingeniamc/ingenialink-python).In order to use `ingenialink` you will first need to import it like this:import ingenialink as ilFurthermore, if you need to use pre-defined constants, you may also want to import the `const` module.from ingenialink import constFinally in this notebook we will also plot some servo drive variables using `matplotlib`:import matplotlib.pyplot as plt %matplotlib inlineNetwork and servo drivesThe first step when using `ingenialink` is to setup the network and then look for attached servo drives. The network can be seen as the information transport medium from where servo drives hang. Multiple network protocols are supported by the library.In case of E-USB protocol, Ingenia servo drives show up as serial ports, so the network candidate devices are simply all the available serial ports. You can obtain the available ones like this:il.devices(il.NET_PROT.EUSB)Once you know which one it is, simply create a network object:net = il.Network(il.NET_PROT.EUSB, '/dev/ttyACM0')Then you can perform a network scan to look for attached servo drives. In this example we will simply pick the first available:servo_ids = net.servos() first_id = servo_ids[0] print('Will use servo with id: {}'.format(first_id))Will use servo with id: 2Next step is to create the servo object. It needs to know the network you want to use and the servo id:servo = il.Servo(net, first_id)You can quickly obtain some servo drive information like this:servo.infoDictionariesIngenia's servo drives have hundreds of registers you can read/write from/to. They are defined in the servo drive dictionary, which may differ depending on the product and firmware version. IngeniaLink supports XML dictionaries in the `IngeniaDictionary` format. You can load a dictionary by using the `Dictionary` class, or you can even give a dictionary path when creating a servo so that you can directly access registers using identifiers (see the next section). The `Dictionary` class contains a registers dictionary (`regs` property) and a categories dictionary (`cats` property). Both operate as standard Python dictionaries (i.e. you can iterate, get, etc.).dct = il.Dictionary('dict_demo.xml') reg = dct.regs['VEL_ACT'] print(reg) print('Labels:') for lang, label in reg.labels.items(): print(lang, label) Labels: en_US Velocity actual ca_ES Velocitat actual es_ES Velocidad actualDictionaries also contain storage information, so that you can keep save the current state of all servo registers in a file and restore them back. Storage information is avaialable on each register (if available) via the `storage` property. Below it is shown how to update the storage from servo values and then store a new dictionary which contains them.# load a dictionary to the servo servo.dict_load('dict_demo.xml') # read all register values and add them to storage # NOTE: if the loaded dictionary contains storage information, # you can also use `dict_storage_write` to upload the dictionary # storage. servo.dict_storage_read() # store current dictionary (will now contain storage info) servo.dict.save('dict_demo_with_storage.xml') print(servo.dict.regs['VEL_TGT'].storage)0Dictionaries also contain register categorization, which can be helpful for displaying registers when building a GUI register explorer.# List ALL categories and subcategories print('Categories:') print('-' * 30) cats = dct.cats for cat_id in cats.cat_ids: print('ID:', cat_id) print('Labels:') for lang, label in cats.labels(cat_id).items(): print('\t', lang, label) print('Subcategories:') for scat_id in cats.scats(cat_id).scat_ids: print('\tID:', scat_id) print('\tLabels:') for lang, label in cats.scats(cat_id).labels(scat_id).items(): print('\t\t', lang, label) print('-' * 30) # Obtain specific register information print('Register:', reg) print('Category:', reg.cat_id) print('Subcategory:', reg.scat_id) print('Category labels:') for lang, label in dct.cats.labels(reg.cat_id).items(): print(lang, label) print('Subcategory labels:') for lang, label in dct.cats.scats(reg.cat_id).labels(reg.scat_id).items(): print(lang, label)Categories: ------------------------------ ID: MOTION Labels: en_US Motion Subcategories: ID: VELOCITY Labels: en_US Velocity ------------------------------ Register: Category: MOTION Subcategory: VELOCITY Category labels: en_US Motion Subcategory labels: en_US VelocityAccessing registersAs mentioned before, IngeniaLink supports loading a dictionary when creating a servo. If you do so, you will be able to read/write using each register unique ID as defined in the dictionary. In fact, the library accepts both string IDs or `Register` objects whenever a register is required. You can still operate without a dictionary as IngeniaLink provides abstraction for most common operations, so you will not use registers directly in most applications. Furthermore, you can also define the registers manually.You can read registers by using `read` and `raw_read` `Servo` functions. The first will take care of unit conversions if this applied to the requested register. Units are detailed in the next section. Similarly, for write you have `write` and `raw_write`.# load dictionary (after creation, could be done at servo creation) servo.dict_load('dict_demo.xml') # dictionary instance can be obtained (e.g. for querying or inspecting) print(servo.dict) # read (using IDs) print(servo.read('TORQUE_MAX')) print(servo.raw_read('FB_POS_SENSOR')) # write (using IDs and pre-defined Register) servo.write('VEL_TGT', 1000) CTL_WORD = il.Register(0x006040, il.REG_DTYPE.U16, il.REG_ACCESS.RW) servo.raw_write(CTL_WORD, 0x06) 0.0 0Motion controlWe all know you were waiting for this section! If you have a servo drive is to perform some kind of motion control! It is important to know at this point that **your servo drive needs to be properly tuned and configured**. This can be done using our configuration tool [MotionLab](http://ingeniamc.com/software). Operation modeIngenia servo drives can work in multiple operation modes: profile/cyclic position, profile/cycling velocity, homing, etc. Detailed information about all the available modes can be found [here](http://doc.ingeniamc.com/display/EMCL/Modes+of+operation). If you want to control position, you will likely use the profile position. When using the profile modes velocity and acceleration of your movements are controlled by a trapezoidal profiler. In cyclic modes the profiler needs to be implemented by yourself. While it gives more flexibility, it is a non-trivial task. In this notebook, only profile modes will be demonstrated.The operation mode can be set like this:# set the operation mode to profile position servo.mode = il.SERVO_MODE.PPOperation unitsIngenia servo drives operate internally using native units, that is, units relative to the available feedbacks. This means that the position is given in counts, velocity in counts per second, and so on. `ingenialink` offers the possibility to work using physical units, for example, in revolutions, degrees, rpm, etc. You can currently set the operating units of position, velocity, torque and acceleration.# we want to work in degrees and RPM servo.units_pos = il.SERVO_UNITS_POS.DEG servo.units_vel = il.SERVO_UNITS_VEL.RPMEnabling and disabling the power stageOnce you have your mode selected, you can proceed to enable the power stage of the servo drive. Unless the power is enabled, no actions can be performed.# enable power stage servo.enable(timeout=2.) # disable power stage servo.disable()HomingIf working in position mode, unless you have an absolute reference sensor you will need to perform a homing every time you turn the system on so that the servo drives *knows where it is*. Multiple homing modes are available (see MotionLab). In order to perform a homing, you will simply need to set the homing mode, enable the motor and then trigger the homing operation as shown below.servo.mode = il.SERVO_MODE.HOMING servo.enable() servo.homing_start() servo.homing_wait(timeout=5.) servo.disable()Position controlServo position can be easily controlled as shown in the code below. Note that the system tolerances will need to be properly set to use the `wait_reached`.servo.mode = il.SERVO_MODE.PP print('Current position is: {:.2f} deg'.format(servo.position)) print('Moving to 180 deg...') servo.enable() servo.position = 180. servo.wait_reached(timeout=1) print('New position is: {:.2f} deg'.format(servo.position)) servo.disable()Current position is: 0.00 deg Moving to 180 deg... New position is: 173.07 degVelocity controlServo velocity can be easily controlle as shown in the code below:servo.mode = il.SERVO_MODE.PV print('Current velocity is: {:.2f} rpm'.format(servo.velocity)) print('Setting velocity to 1000 rpm...') servo.enable() servo.velocity = 1000. servo.wait_reached(timeout=3) print('Current velocity is: {:.2f} rpm'.format(servo.velocity)) servo.disable()Current velocity is: 999.00 rpm Setting velocity to 1000 rpm... Current velocity is: 1000.50 rpmObserving servo drive parametersIt is always useful to obtain plots of servo operation variables. For this purpose we have to mechanisms: polling and monitoring. Below they are detailed together with some examples. PollingWhat polling does, as its name suggests, is to poll for certain registers continuously from the PC. Usually polling is used for real-time plotting or when high-accuracy is not required. When using polling, you can roughly sample one register at 500 samples/s continuously (decreasing as more registers are polled). Below a sequence of position movements is polled.# create a poller and configure it poller = il.Poller(servo, n_ch=2) # set sample period of 5 ms and buffer of 2k samples poller.configure(t_s=5e-3, sz=2000) # configure channels to poll position and velocity poller.ch_configure(ch=0, reg='POS_ACT') poller.ch_configure(ch=1, reg='VEL_ACT') # perform homing servo.mode = il.SERVO_MODE.HOMING servo.enable() servo.homing_start() servo.homing_wait(timeout=5) servo.disable() # perform movements servo.mode = il.SERVO_MODE.PP servo.enable() poller.start() positions = [0, 90, 180, 90, 0] for position in positions: servo.position = position servo.wait_reached(timeout=1) servo.disable() poller.stop() # obtain results t, d, lost = poller.data if lost: print('Data was lost! Increase buffer size or sample time!') # plot results plt.figure() plt.subplot(211) plt.plot(t, d[0]) plt.xlabel('Time (s)') plt.ylabel('Position (deg)') plt.subplot(212) plt.plot(t, d[1]) plt.xlabel('Time (s)') plt.ylabel('Velocity (rpm)') plt.show()MonitoringMonitoring is done on the servo drive side and allows to sample up to 10000 samples/s but, it is single shot, the number of samples is limited and supports up to 4 channels. Furthermore, monitoring offers trigger capabilities. For more details about monitoring capabilities, see this [page](http://doc.ingeniamc.com/display/EMCL/Monitoring). Below you can find an example that captures the rising curve of velocity. This may be useful, for example, to see if the system is properly tuned.# target velocity (rpm) VEL_TGT_VAL = 1000 # create monitor and configure it monitor = il.Monitor(servo) # set sample period to 1 ms, maximum of 200 samples monitor.configure(t_s=1e-3, max_samples=200) # monitor velocity monitor.ch_configure(ch=0, reg='VEL_ACT') # configure trigger to 90% of the target velocity monitor.trigger_configure(il.MONITOR_TRIGGER.POS, source='VEL_ACT', th_pos=VEL_TGT_VAL * 0.9) # enable servo servo.mode = il.SERVO_MODE.PV servo.enable() monitor.start() servo.velocity = VEL_TGT_VAL monitor.wait(timeout=3) servo.disable() # plot results t, d = monitor.data plt.plot(t, d[0]) plt.xlabel('Time (s)') plt.ylabel('Velocity (rpm)') plt.ylim([800, 1200]) plt.show()---title: "GitHub Gists for GIS in Python: Loading a Zipped Local or Web-based Shapefile with One Function" author: twitter: linwoodc3summary: This post introduces a utility function that can automatically read web-based or local shapefiles in zip format into the Python ecosystem. It takes one line of code!excerpt: "In the world of data science, we embrace the concept of spatial awareness and knowing where the data are (or datum is). In the same way that geospatial grounding (i.e. georeferenced data) brings clarity to a lost traveler, spatial context can bring clarity to a data set. Moreover, this “where” does not always have to apply to a location on the earth’s surface . Spatial context (i.e. analytic geometry), or understanding data in the context of geometric space, is just as enlightening."--- GitHub Gists for GIS in Python: Loading a Zipped Local or Web-based Shapefile with One Function **Date:** {{ page.date | date_to_rfc822 }}There is nothing worse than not knowing where you are.We have all experienced it. It’s the panic that overtakes you when you don’t recognize your surroundings. The buildings and roads are unfamiliar. You don’t know where you are. Naturally, you focus on getting to a familiar landmark or location. Reaching that landmark brings a sense of relief. Comfort. Peace. Because you know where you are on a map, it’s easier to plot a course to your final destination.In the world of data science, we embrace the concept of spatial awareness and knowing where the data are (or datum is). In the same way that familiar surroundings (i. e. [geo-referenced data](https://en.wikipedia.org/wiki/Georeferencing)) brings clarity to a lost traveler, spatial context can bring clarity to a data set. This “where” does not always have to apply to a location on the earth’s surface. Spatial context (i.e. [analytic geometry](https://en.wikipedia.org/wiki/Analytic_geometry)), or understanding data in geometric space, is just as enlightening.[Ansecombe’s quartet](https://en.wikipedia.org/wiki/Anscombe%27s_quartet) is a great example. Despite having nearly same summary statistics, the plots are nowhere near same. This is a reminder to plot your data before drawing a conclusion. It can prevent costly errors. Python's `seaborn` library includes this data set, and we load it and compute the summary statistics. Each row is a data set, and it's clear that the numbers are nearly identical.import seaborn as sns df = sns.load_dataset('anscombe') ddf = df.groupby('dataset').describe().unstack() All = slice(None) test = ddf.T.loc[(All,slice('mean','std')),All] test import seaborn as sns import pandas as pd df = sns.load_dataset('anscombe') ddf = pd.concat([df.groupby('dataset').std(),df.groupby('dataset').mean()],axis=1) print(ddf) import pytablewriter writer = pytablewriter.MarkdownTableWriter() writer.from_dataframe(d) writer.write_table() # print(tabulate.tabulate(ddf)) import geopandas as gpd import numpy as np import requests import gdal import fiona import uuid import re # informaed by: https://gis.stackexchange.com/questions/225586/reading-raw-data-into-geopandas/225627 def shapefilereader(target): """Function to convert zipped shapefiles from the web or on disk into geopandas dataframes Parameters ---------- target : str string representing path to file on disk or url to download the zipped shapefile. Returns ------- Geopandas dataframe Pandas dataframe with geospatial features and operations. """ # Detect whether we are using a web-based shapefile or local disk r = re.compile('^(http|https)://',re.I) if r.search(target): download = True request = requests.get(target) target = '/vsimem/{}.zip'.format(uuid.uuid4().hex) #gdal/ogr requires a .zip extension gdal.FileFromMemBuffer(target,bytes(request.content)) else: download = False with fiona.Collection(target,vsi='zip') as f: return gpd.GeoDataFrame.from_features(f,crs=f.crs) def shaper(row): """ Parallel function to create shapely points from latitude/longitude pair in dataframe Parameters ---------- row : pandas or dask dataframe row Row containing latitude and longitude variables and data Returns ------- shapely point Shapely spatial object for geoprocessing in Python. """ geometry=Point(row['longitude'],row['latitude']) return geometry import dask.dataframe as dd gpd.pd.read_hdf() ds = dd.read_hdf('/Users/linwood/projects/Blogs/drafts/geolocated_social_transcends_political_barriers/data/tweetLanguages.h5','tweets') d = (ds[(ds.latitude >=40) & (ds.latitude <=45) & (ds.longitude >=-80) & (ds.longitude<=-70)]).compute() d.info(memory_usage='deep') ny = shapefilereader('https://data.cityofnewyork.us/download/i8iw-xf4u/application%2Fzip') ny= ny.to_crs({'init':"epsg:4326"}) %matplotlib inline import matplotlib.pyplot as plt f,ax = plt.subplots(figsize=(15,9)) nytweets.plot(color='red',ax=ax) ax.set_axis_off() plt.savefig('./assets/img/newyorktweets.png') f,ax = plt.subplots(figsize=(15,9)) ax.set_facecolor('lightblue') ny.plot(color='tan',ax=ax) ax.set_axis_off() plt.savefig('./assets/img/newyorkzips.png') # d.plot(kind='scatter',x='longitude',y='latitude',ax=ax) from IPython.display import Image Image('./assets/img/newyorktweets.png') from shapely.geometry import Point dg = gpd.GeoDataFrame(d.assign(geometry=d.apply(shaper,axis=1)),crs = {'init': 'epsg:4326'}) nytweets = gpd.sjoin(dg.reset_index(),ny[['ZIPCODE','geometry','CTY_FIPS']],op='intersects',how='inner') f,ax=plt.subplots(frameon=False,figsize=(15,9)) ny.plot(color='white',linewidth=0.2,ax=ax) nytweets.plot(color='red',ax=ax,alpha=0.1) ax.set_axis_off() plt.savefig('./assets/img/newyorkspatialjoin.png') import pytablewriter writer = pytablewriter.MarkdownTableWriter() writer.from_dataframe(test) writer.write_table() countedTweets = nytweets.groupby('ZIPCODE')['ZIPCODE']\ .size().sort_values(ascending=False)\ .reset_index().rename(columns={0:'tweetcount'}) final = ny[['ZIPCODE','geometry']].merge(countedTweets,on='ZIPCODE') f,ax = plt.subplots(figsize=(15,9)) final.plot(column='tweetcount', scheme='Fisher_Jenks', k=5, cmap='OrRd', linewidth=0.1, ax=ax,legend=True) ax.set_axis_off() plt.savefig('./assets/img/newyorkchoropleth.png')***01*** - Importing Dependenciesimport os import cv2 import numpy as np #import matplotlib.pylot as plt import pandas as pd import tensorflow as pd from keras.models import Sequential, Model from keras.applications.xception import Xception from keras.preprocessing.image import ImageDataGenerator from keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Input, Flatten from keras.models import Sequential, Model from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array from keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Input, Flatten, GlobalMaxPooling2D, GlobalAveragePooling2D from keras.layers.normalization import BatchNormalization from keras.models import Sequential, Model from keras.applications.vgg16 import VGG16, preprocess_input from keras.preprocessing.image import ImageDataGenerator,load_img, img_to_array from keras.models import Sequential from keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Input, Flatten, SeparableConv2D,GlobalAveragePooling2D from keras.layers import GlobalMaxPooling2D from keras.layers.normalization import BatchNormalization from keras.layers.merge import Concatenate from keras.models import Model from keras import backend as K from keras.utils.vis_utils import plot_model from keras.utils import to_categorical !pip install livelossplot from livelossplot import PlotLossesKerasCollecting livelossplot Downloading https://files.pythonhosted.org/packages/07/9d/54f8a93d65eece0bcd475b191c4c9a3bff9dbf993db8d5e2d02b76c2d2c3/livelossplot-0.3.3-py3-none-any.whl Requirement already satisfied: matplotlib in /opt/conda/lib/python3.6/site-packages (from livelossplot) (3.0.3) Requirement already satisfied: notebook in /opt/conda/lib/python3.6/site-packages (from livelossplot) (5.5.0) Requirement already satisfied: kiwisolver>=1.0.1 in /opt/conda/lib/python3.6/site-packages (from matplotlib->livelossplot) (1.0.1) Requirement already satisfied: python-dateutil>=2.1 in /opt/conda/lib/python3.6/site-packages (from matplotlib->livelossplot) (2.6.0) Requirement already satisfied: numpy>=1.10.0 in /opt/conda/lib/python3.6/site-packages (from matplotlib->livelossplot) (1.16.2) Requirement already satisfied: cycler>=0.10 in /opt/conda/lib/python3.6/site-packages (from matplotlib->livelossplot) (0.10.0) Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /o[...]***02*** - Output Files / Parameters / Hyperparameters### OUTPUT FILES train_data = '../input/chest_xray/chest_xray/train/' val_data = '../input/chest_xray/chest_xray/val' test_data = '../input/chest_xray/chest_xray/test' normal_data_dir = '../input/chest_xray/chest_xray/train/NORMAL/' pneumonia_data_dir = '../input/chest_xray/chest_xray/train/PNEUMONIA/'***03*** - Helper Functiondef step_decay(epoch): initial_lrate = 0.1 drop = 0.5 epochs_drop = 5.0 lrate = initial_lrate * math*pow(drop, math.floor(1+epoch)/epoch_drop) return lrate def show_final_history(history): fig, ax = plt.subplots(1, 2, figsize=(15,5)) ax[0].set_title('loss') ax[0].plot(history.epoch, history.history["loss"], label="Train loss") ax[0].plot(history.epoch, history.history["val_loss"], label="Validation loss") ax[1].set_title('acc') ax[1].plot(history.epoch, history.history["acc"], label="Train acc") ax[1].plot(history.epoch, history.history["val_acc"], label="Validation acc") ax[0].legend() ax[1].legend()***04*** - Data Augmentation / Data Generatoraugs = ImageDataGenerator( rescale=1. / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True) val_augs = ImageDataGenerator(rescale=1./255) train_gen = augs.flow_from_directory( train_data, target_size=(224, 224), batch_size=8, class_mode='binary') test_gen = val_augs.flow_from_directory( test_data, target_size=(224, 224), batch_size=8, class_mode='binary') val_gen = val_augs.flow_from_directory( val_data, target_size=(224, 224), batch_size=1, shuffle=False, class_mode='binary')Found 5216 images belonging to 2 classes. Found 624 images belonging to 2 classes. Found 16 images belonging to 2 classes.***05*** - Parameters / HyperparametersIMG_HEIGHT = 224 IMG_WIDTH = 224 IMG_CHANNEL = 3 IMG_SIZE = IMG_HEIGHT, IMG_WIDTH IMG_SIZES = IMG_HEIGHT, IMG_WIDTH, IMG_CHANNEL NUM_EPOCH = 75 STEPS_PER_EPOCH = len(train_gen) * 2 #steps per epoch 216 VALIDATION_STEPS = len(val_gen) * 20 #Validations steps 16***05*** - Callbacksfrom keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard, CSVLogger, LearningRateScheduler, ReduceLROnPlateau model_checkpoint = ModelCheckpoint('./base.model', monitor='val_loss', verbose=1, save_best_only=True, mode='max', save_weights_only=False, period=1) earlystop = EarlyStopping(monitor='val_loss', min_delta=0.001, patience=30, verbose=1, mode='auto') tensorboard = TensorBoard(log_dir = './logs', histogram_freq=0, batch_size=16, write_graph=True, write_grads=True, write_images=False) csv_logger = CSVLogger(filename= "training_csv.log", separator = ",", append = False) lrsched = LearningRateScheduler(step_decay,verbose=1) reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.8, patience=5, verbose=1, mode='auto', min_delta=0.0001, cooldown=1, min_lr=0.0001) callbacks = [model_checkpoint, earlystop, tensorboard, csv_logger, reduce_lr]***06*** - Model Architecture ***06.1*** - ResNet152from keras.applications.densenet import DenseNet169 base_1 = DenseNet169(include_top=False, weights='imagenet', input_shape=IMG_SIZES) for layer in base_1.layers[:55]: layer.trainable = False '''for layer in base_1.layers: print(layer, layer.trainable)''' model_1 = Sequential() model_1.add(base_1) model_1.add(GlobalAveragePooling2D()) model_1.add(Dense(1024, activation='relu')) model_1.add(Dropout(0.5)) model_1.add(Dense(1024, activation='relu')) model_1.add(Dropout(0.5)) model_1.add(Dense(1, activation='sigmoid')) model_1.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) history_1 = model_1.fit_generator(train_gen, epochs=NUM_EPOCH, steps_per_epoch=STEPS_PER_EPOCH, validation_data=test_gen, validation_steps=VALIDATION_STEPS, callbacks=callbacks, verbose=1)***06.2*** - InceptionV3from keras.applications.inception_v3 import InceptionV3 base_2 = InceptionV3(include_top=False, weights='imagenet', input_shape=input_shape) for layer in base_2.layers[:87]: layer.trainable = False '''for layer in base_2.layers: print(layer, layer.trainable)''' model_2 = Sequential() model_2.add(base_2) model_2.add(GlobalAveragePooling2D()) model_2.add(Dense(1, activation='sigmoid')) model_2.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) history_2 = model_2.fit_generator(train_gen, epochs=75, steps_per_epoch=300, validation_data=test_gen, validation_steps=300, callbacks=callbacks, verbose=1)***06.3*** - NasNetLargefrom keras.applications.nasnet import NASNetLarge base_3 = NASNetLarge(include_top='False', weights='imagenet') for layer in base_2.layers[:87]: layer.trainable = False '''for layer in base_2.layers: print(layer, layer.trainable)''' model_3 = Sequential() model_3.add(Dense(1, activation='sigmoid')) model_3.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) history_3 = model_3.fit_generator(train_gen, epochs=75, steps_per_epoch=300, validation_data=test_gen, validation_steps=300, callbacks=callbacks, verbose=1)***06.4*** - InceptionResNetV2'''from keras.applications.inception_resnet_v2 import InceptionResNetV2 base_6 = InceptionResNetV2(include_top=False, weights='imagenet', input_shape=input_shape)''' '''model_1 = Sequential() model_1.add(base_1) model_1.add(GlobalAveragePooling2D()) model_1.add(Dense(1024, activation='relu')) model_1.add(Dropout(0.5)) model_1.add(Dense(1024, activation='relu')) model_1.add(Dropout(0.5)) model_1.add(Dense(1, activation='sigmoid'))''' '''model_6.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) history_6 = model_6.fit_generator(train_gen, epochs=75, steps_per_epoch=300, validation_data=test_gen, validation_steps=300, callbacks=callbacks, verbose=1)'''***07*** - Showing the Result / Plotting the Model / Evaluation#show_final_history(history)5.3 시계열 데이터를 예측하는 LSTM 구현LSTM을 이용해 시계열 데이터에 대한 예측# set to use CPU import os os.environ['CUDA_VISIBLE_DEVICES'] = '-1'5.2.1 라이브러리 패키지 임포트1. LSTM을 이용한 판별망 구현에 필요한 라이브러리 임포트import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn import model_selection import seaborn as sns from keras import models, layers from keraspp import skeras5.3.2 코드 실행 및 결과 보기2. 세부 코드를 보기 전에 머신을 만들고 실행하는 부분def main(): machine = Machine() machine.run(epochs=400)5.3.3 학습하고 평가하기3. 머신 클래스는 시계열 LSTM을 학습하고 평가하는 플랫폼class Machine(): def __init__(self): self.data = Dataset() shape = self.data.X.shape[1:] self.model = rnn_model(shape) def run(self, epochs=400): d = self.data X_train, X_test, y_train, y_test = d.X_train, d.X_test, d.y_train, d.y_test X, y = d.X, d.y m = self.model h = m.fit(X_train, y_train, epochs=epochs, validation_data=[X_test, y_test], verbose=0) skeras.plot_loss(h) plt.title('History of training') plt.show() yp = m.predict(X_test) print('Loss:', m.evaluate(X_test, y_test)) plt.plot(yp, label='Origial') plt.plot(y_test, label='Prediction') plt.legend(loc=0) plt.title('Validation Results') plt.show() yp = m.predict(X_test).reshape(-1) print('Loss:', m.evaluate(X_test, y_test)) print(yp.shape, y_test.shape) df = pd.DataFrame() df['Sample'] = list(range(len(y_test))) * 2 df['Normalized #Passengers'] = np.concatenate([y_test, yp], axis=0) df['Type'] = ['Original'] * len(y_test) + ['Prediction'] * len(yp) plt.figure(figsize=(7, 5)) sns.barplot(x="Sample", y="Normalized #Passengers", hue="Type", data=df) plt.ylabel('Normalized #Passengers') plt.show() yp = m.predict(X) plt.plot(yp, label='Origial') plt.plot(y, label='Prediction') plt.legend(loc=0) plt.title('All Results') plt.show()5.3.4 LSTM 시계열 회귀 모델링4. 시계열 데이터의 회귀 모델링을 위한 LSTM 모델의 구성def rnn_model(shape): m_x = layers.Input(shape=shape) #X.shape[1:] m_h = layers.LSTM(10)(m_x) m_y = layers.Dense(1)(m_h) m = models.Model(m_x, m_y) m.compile('adam', 'mean_squared_error') m.summary() return m5.3.5 데이터 불러오기 5. 데이터는 Dataset 클래스를 구성해서 불러옴class Dataset: def __init__(self, fname='international-airline-passengers.csv', D=12): data_dn = load_data(fname=fname) self.X, self.y = get_Xy(data_dn, D=D) self.X_train, self.X_test, self.y_train, self.y_test = \ model_selection.train_test_split(self.X, self.y, test_size=0.2, random_state=42) def load_data(fname='international-airline-passengers.csv'): dataset = pd.read_csv(fname, usecols=[1], engine='python', skipfooter=3) data = dataset.values.reshape(-1) plt.plot(data) plt.xlabel('Time'); plt.ylabel('#Passengers') plt.title('Original Data') plt.show() # data normalize data_dn = (data - np.mean(data)) / np.std(data) / 5 plt.plot(data_dn) plt.xlabel('Time'); plt.ylabel('Normalized #Passengers') plt.title('Normalized data by $E[]$ and $5\sigma$') plt.show() return data_dn def get_Xy(data, D=12): # make X and y X_l = [] y_l = [] N = len(data) assert N > D, "N should be larger than D, where N is len(data)" for ii in range(N-D-1): X_l.append(data[ii:ii+D]) y_l.append(data[ii+D]) X = np.array(X_l) X = X.reshape(X.shape[0], X.shape[1], 1) y = np.array(y_l) print(X.shape, y.shape) return X, y main()--- 전체 코드# %% import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn import model_selection from keras import models, layers import seaborn as sns from keraspp import skeras # %% def main(): machine = Machine() machine.run(epochs=400) class Machine(): def __init__(self): self.data = Dataset() shape = self.data.X.shape[1:] self.model = rnn_model(shape) def run(self, epochs=400): d = self.data X_train, X_test, y_train, y_test = d.X_train, d.X_test, d.y_train, d.y_test X, y = d.X, d.y m = self.model h = m.fit(X_train, y_train, epochs=epochs, validation_data=[X_test, y_test], verbose=0) skeras.plot_loss(h) plt.title('History of training') plt.show() yp = m.predict(X_test) print('Loss:', m.evaluate(X_test, y_test)) plt.plot(yp, label='Origial') plt.plot(y_test, label='Prediction') plt.legend(loc=0) plt.title('Validation Results') plt.show() yp = m.predict(X_test).reshape(-1) print('Loss:', m.evaluate(X_test, y_test)) print(yp.shape, y_test.shape) df = pd.DataFrame() df['Sample'] = list(range(len(y_test))) * 2 df['Normalized #Passengers'] = np.concatenate([y_test, yp], axis=0) df['Type'] = ['Original'] * len(y_test) + ['Prediction'] * len(yp) plt.figure(figsize=(7, 5)) sns.barplot(x="Sample", y="Normalized #Passengers", hue="Type", data=df) plt.ylabel('Normalized #Passengers') plt.show() yp = m.predict(X) plt.plot(yp, label='Origial') plt.plot(y, label='Prediction') plt.legend(loc=0) plt.title('All Results') plt.show() def rnn_model(shape): m_x = layers.Input(shape=shape) #X.shape[1:] m_h = layers.LSTM(10)(m_x) m_y = layers.Dense(1)(m_h) m = models.Model(m_x, m_y) m.compile('adam', 'mean_squared_error') m.summary() return m class Dataset: def __init__(self, fname='international-airline-passengers.csv', D=12): data_dn = load_data(fname=fname) self.X, self.y = get_Xy(data_dn, D=D) self.X_train, self.X_test, self.y_train, self.y_test = \ model_selection.train_test_split(self.X, self.y, test_size=0.2, random_state=42) def load_data(fname='international-airline-passengers.csv'): dataset = pd.read_csv(fname, usecols=[1], engine='python', skipfooter=3) data = dataset.values.reshape(-1) plt.plot(data) plt.xlabel('Time'); plt.ylabel('#Passengers') plt.title('Original Data') plt.show() # data normalize data_dn = (data - np.mean(data)) / np.std(data) / 5 plt.plot(data_dn) plt.xlabel('Time'); plt.ylabel('Normalized #Passengers') plt.title('Normalized data by $E[]$ and $5\sigma$') plt.show() return data_dn def get_Xy(data, D=12): # make X and y X_l = [] y_l = [] N = len(data) assert N > D, "N should be larger than D, where N is len(data)" for ii in range(N-D-1): X_l.append(data[ii:ii+D]) y_l.append(data[ii+D]) X = np.array(X_l) X = X.reshape(X.shape[0], X.shape[1], 1) y = np.array(y_l) print(X.shape, y.shape) return X, y main()Introduction to the Lithology and LithoLayers objectsLithology and LithoLayers are two Landlab components meant to make it easier to work with spatially variable lithology that produces spatially variable parameter values (e.g. stream power erodability or diffusivity). This tutorial is meant for users who have some experience using Landlab components.In this tutorial we will explore the creation of spatially variable lithology and its impact on the evolution of topography. After an introductory example that will let you see how LithoLayers works, we will work through two more complicated examples. In the first example, we use the LithoLayers to erode either dipping layeres or an anticline. Then we will use Lithology to create inverted topography. We will use [xarray](https://xarray.pydata.org/en/stable/) to store and annotate our model output. While we won't extensively discuss the use of xarray, some background will be provided. To start, we will import the necessary modules. A note: this tutorial uses the [HoloViews package](http://holoviews.org) for visualization. This package is a great tool for dealing with multidimentional annotated data (e.g. an xarray dataset). If you get an error on import, consider updating dask (this is what the author needed to do in April 2018). You will also need to have the [Bokeh](https://bokeh.pydata.org/en/latest/) and [Matplotlib](https://matplotlib.org) packages installed.In testing we've seen some users have a warning raised related to the Matplotlib backend. In our testing it was OK to ignore these errors.import warnings warnings.filterwarnings('ignore') import os import numpy as np import xarray as xr import dask import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt %matplotlib inline import holoviews as hv hv.notebook_extension('matplotlib') from landlab import RasterModelGrid from landlab.components import FlowAccumulator, FastscapeEroder, LinearDiffuser, Lithology, LithoLayers from landlab.plot import imshow_gridPart 1: Creating layered rockFirst we will create an instance of a LithoLayers to learn how this component works. Both LithoLayers and Lithology work closely with a Landlab ModelGrid, storing information about rock type at each grid node. To create LithoLayers you need the following information:1. A model grid that has the field `'topographic__elevation'` already created. 2. A list of elevations, called `'layer_elevations'` that the bottom of your layers will go through at specified plan-view anchor point (default value for the anchor point is (x, y) = (0, 0)), and a list of rock type IDs that indicate the rock type of that layer. When `'layer_elevations'` is negative that means that the layer goes through the anchor point above the topographic surface. These layers will be created where they extend below the topographic surface.3. A dictionary of rock property attributes that maps a rock ID type to property values.4. A functional form in x and y that defines the shape of your surface. The use of this function form makes it possible for any function of x and y to be passed to LithoLayers.Both the Lithology and LithoLayers components then know the rock type ID of all the material in the 'block of rock' you have specified. This can be used to continuously know the value of specified rock properties at the topographic surface, even as the rock is eroded, uplifted, or new rock is deposited. In this tutorial we will first make an example to help build intuition and then do two more complex examples. Most of the functionality of Lithology and LithoLayers is shown in this tutorial, but if you want to read the full component documentation for LithoLayers, it can be found [here](https://landlab.readthedocs.io/en/release/landlab.components.lithology.html). Links to both components documentation can be found at the bottom of the tutorial.First, we create a small RasterModelGrid with topography.mg = RasterModelGrid((10, 15)) z = mg.add_zeros('topographic__elevation', at='node')Next we make our layer elevations. We will make 20 layers that are 5 meters thick. Note that here, as with most Landlab components, there are no default units. At the anchor point, half of the layers will be above the ground (`'layer_elevations'` will have negative values) and half will be below the ground (`'layer_elevations'` have positive values). We will make this with the [`np.arange`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.arange.html) function. We will also make the bottom layer really really thick so that we won't be able to erode through through it.layer_elevations = 5. * np.arange(-10, 10) # we create a bottom layer that is very thick. layer_elevations[-1] = layer_elevations[-2] + 100Next we create an array that represents our rock type ID values. We will create alternating layers of four types of rock by making an array with alternating `0`s `1`s `2`s and `3`s with the [np.tile](https://docs.scipy.org/doc/numpy/reference/generated/numpy.tile.html) function.layer_ids = np.tile([0, 1, 2, 3], 5)Our dictionary containing rock property attributes has the following form:attrs = {'K_sp': {0: 0.0003, 1: 0.0001, 2: 0.0002, 3: 0.0004}}`'K_sp'` is the property that we want to track through the layered rock, `0`, `1`, `2`, `3` are the rock type IDs, and `0.0003` and `0.0001` are the values for `'K_sp'` for the rock types `0` and `1`. The rock type IDs are unique identifiers for each type of rock. A particular rock type may have many properties (e.g. `'K_sp'`, `'diffusivity'`, and more). You can either specify all the possible rock types and attributes when you instantiate the LithoLayers component, or you can add new ones with the [`lith.add_rock_type`](https://landlab.readthedocs.io/en/release/landlab.components.lithology.htmllandlab.components.lithology.lithology.Lithology.add_rock_type) or [`lith.add_property`](https://landlab.readthedocs.io/en/release/landlab.components.lithology.htmllandlab.components.lithology.lithology.Lithology.add_property) built in functions.Finally, we define our function. Here we will use a [lambda expression](https://docs.python.org/3/tutorial/controlflow.htmllambda-expressions) to create a small anonymous function. In this case we define a function of `x` and `y` that returns the value `x + (2. * y)`. The LithoLayers component will check that this function is a function of two variables and that when passed two arrays of size number-of-nodes it returns an array of size number-of-nodes.This means that planar rock layers will dip into the ground to the North-North-East. By changing this functional form, we can make more complicated rock layers.func = lambda x, y: x + (2. * y)Finally we construct our LithoLayers component by passing the correct arguments.lith = LithoLayers(mg, layer_elevations, layer_ids, function=func, attrs=attrs)LithoLayers will make sure that the model grid has at-node grid fields with the layer attribute names. In this case, this means that the model grid will now include a grid field called `'K_sp'` and a field called `'rock_type__id'`. We can plot these with the Landlab [imshow_grid](http://landlab.readthedocs.io/en/release/landlab.plot.htmllandlab.plot.imshow.imshow_grid) function.imshow_grid(mg, 'rock_type__id', cmap='viridis')As you can see, we have layers that strike East-South-East. Since we can only see the surface expression of the layers, we can't infer the dip direction or magnitude from the plot alone. If the topographic surface erodes, then you will want to update LithoLayers. Like most Landlab components, LithoLayers uses a `run_one_step` method to update. Next we will erode the topography by decrementing the variable `z`, which points to the topographic elevation of our model grid, by an amount 1. In a landscape evolution model, this would typically be done by running the `run_one_step` method for each of the process components in the model. If the rock mass is being advected up or down by an external force (e.g. tectonic rock uplift), then then advection must be specified. The `dz_advection` argument can be a single value or an array of size number-of-nodes.z -= 1. dz_ad = 0. lith.dz_advection=dz_ad lith.run_one_step()We can re-plot the value of `'K_sp'`. We will see that the location of the surface expression of the rock layers has changed. As we expect, the location has changed in a way that is consistent with layers dipping to the NNE.imshow_grid(mg, 'rock_type__id', cmap='viridis')Anytime material is added, LithoLayers or Lithology needs to know the type of rock that has been added. LithoLayers and Lithology do not assume to know the correct rock type ID and thus require that the user specify it with the `rock_id` keyword argument. In the `run_one_step` function, both components will check to see if any deposition has occured. If deposition occurs **and** this argument is not passed, then an error will be raised. For example here we add 1 m of topographic elevation and do not advect the block of rock up or down. When we run `lith.run_one_step` we specify that the type of rock has id `0`.z += 1. dz_ad = 0. lith.dz_advection=dz_ad lith.rock_id=0 lith.run_one_step()When we plot the value of the rock type ID at the surface, we find that it is now all purple, the color of rock type zero.imshow_grid(mg, 'rock_type__id', cmap='viridis', vmin=0, vmax=3)The value passed to the `rock_id` keyword argument can be either a single value (as in the second to last example) or an array of length number-of-nodes. This option permits a user to indicate that more than one type of rock is deposited in a single time step. Next we will add a 2 m thick layer that is type `1` for x values less than or equal to 6 and type `2` for all other locations.z += 2. dz_ad = 0. spatially_variable_rock_id = mg.ones('node') spatially_variable_rock_id[mg.x_of_node > 6] = 2 lith.dz_advection=dz_ad lith.rock_id=spatially_variable_rock_id lith.run_one_step() imshow_grid(mg, 'rock_type__id', cmap='viridis', vmin=0, vmax=3)As you can see this results in the value of rock type at the surface being about half rock type `1` and about half rock type `2`. Next we will create an xarray dataset that has 3D information about our Lithology to help visualize the layers in space. We will use the `rock_cube_to_xarray` method of the LithoLayers component. We will then convert this xarray dataset into a HoloViews dataset so we can visualize the result. As you can see the LithoLayers has a value of rock types `1` and `2` at the surface, then a layer of `0` below, and finally changes to alternating layers.ds = lith.rock_cube_to_xarray(np.arange(30)) hvds_rock = hv.Dataset(ds.rock_type__id) %opts Image style(cmap='viridis') plot[colorbar=True] hvds_rock.to(hv.Image, ['x', 'y'])The slider allows us to change the depth below the topographic surface.We can also plot the cube of rock created with LithoLayers as a cross section. In the cross section we can see the top two layers we made by depositing rock and then dipping layers of alternating rock types.%opts Image style(cmap='viridis') plot[colorbar=True, invert_yaxis=True] hvds_rock.to(hv.Image, ['x', 'z'])Hopefuly this gives you a sense of how LithoLayers works. The next two blocks of code have all the steps we just worked through in one place. Try modifying the layer thicknesses, the size of the grid, the function used to create the form of the layers, the layers deposited and eroded, and the location of the anchor point to gain intuition for how you can use LithoLayers to create different types of layered rock.# Parameters that control the size and shape of the model grid number_of_rows = 50 number_of_columns = 50 dx = 1 # Parameters that control the LithoLayers # the layer shape function func = lambda x, y: (0.5 * x)**2 + (0.5 * y)**2 # the layer thicknesses layer_thickness = 50. # the location of the anchor point x0 = 25 y0 = 25 # the resolution at which you sample to create the plan view and cros-section view figures. sample_depths = np.arange(0, 30, 1) # create the model grid mg = RasterModelGrid((number_of_rows, number_of_columns), dx) z = mg.add_zeros('topographic__elevation', at='node') # set up LithoLayers inputs layer_ids = np.tile([0, 1, 2, 3], 5) layer_elevations = layer_thickness * np.arange(-10, 10) layer_elevations[-1] = layer_elevations[-2] + 100 attrs = {'K_sp': {0: 0.0003, 1: 0.0001, 2: 0.0002, 3: 0.0004}} # create LithoLayers lith = LithoLayers(mg, layer_elevations, layer_ids, x0=x0, y0=y0, function=func, attrs=attrs) # deposity and erode dz_ad = 0. z -= 1. lith.dz_advection=dz_ad lith.run_one_step() z += 1. lith.dz_advection=dz_ad lith.rock_id=0 lith.run_one_step() z += 2. spatially_variable_rock_id = mg.ones('node') spatially_variable_rock_id[mg.x_of_node > 6] = 2 lith.dz_advection=dz_ad lith.rock_id=spatially_variable_rock_id lith.run_one_step() # get the rock-cube data structure and plot ds = lith.rock_cube_to_xarray(sample_depths) hvds_rock = hv.Dataset(ds.rock_type__id) # make a plan view image %opts Image style(cmap='viridis') plot[colorbar=True] hvds_rock.to(hv.Image, ['x', 'y'])You can also make a cross section of this new LithoLayers component.%opts Image style(cmap='viridis') plot[colorbar=True, invert_yaxis=True] hvds_rock.to(hv.Image, ['x', 'z'])Part 2: Creation of a landscape evolution model with LithoLayersIn this next section, we will run LithoLayers with components used for a simple Landscape Evolution Model. We will start by creating the grid.mg = RasterModelGrid((50, 30), 400) z = mg.add_zeros('topographic__elevation', at='node') random_field = 0.01 * np.random.randn(mg.size('node')) z += random_field - random_field.min()Next we set all the parameters for LithoLayers. Here we have two types of rock with different erodabilities.attrs = {'K_sp': {0: 0.0003, 1: 0.0001}} z0s = 50 * np.arange(-20, 20) z0s[-1] = z0s[-2] + 10000 ids = np.tile([0, 1], 20)There are three functional forms that you can choose between. Here we define each of them.# Anticline anticline_func = lambda x, y: ((0.002 * x)**2 + (0.001 * y)**2) # Shallow dips shallow_func = lambda x, y: ((0.001 * x) + (0.003 * y)) # Steeper dips steep_func = lambda x, y: ((0.01 * x) + (0.01 * y))The default option is to make an anticline, but you can comment/uncomment lines to choose a different functional form.# Anticline lith = LithoLayers(mg, z0s, ids, x0=6000, y0=10000, function=anticline_func, attrs=attrs) # Shallow dips #lith = LithoLayers(mg, z0s, ids, function=shallow_func, attrs=attrs) # Steeper dips #lith = LithoLayers(mg, z0s, ids, function=steep_func, attrs=attrs)Now that we've created LithoLayers, model grid fields for each of the LithoLayers attributes exist and have been set to the values of the rock exposed at the surface. Here we plot the value of `'K_sp'` as a function of the model grid.imshow_grid(mg, 'K_sp')As you can see (in the default anticline option) we have concentric elipses of stronger and weaker rock. Next, lets instantiate a FlowAccumulator and a FastscapeEroder to create a simple landscape evolution model. We will point the FastscapeEroder to the model grid field `'K_sp'` so that it will respond to the spatially variable erodabilities created by LithoLayers.nts = 300 U = 0.001 dt = 1000 fa = FlowAccumulator(mg) sp = FastscapeEroder(mg, K_sp='K_sp')Before we run the model we will also instatiate an xarray dataset used to store the output of our model through time for visualization. The next block may look intimidating, but I'll try and walk you through what it does. [xarray](https://xarray.pydata.org/en/stable/) allows us to create a container for our data and label it with information like units, dimensions, short and long names, etc. xarray gives all the tools for dealing with N-dimentional data provided by python packages such as [numpy](http://www.numpy.org), the labeling and named indexing power of the [pandas](https://pandas.pydata.org) package, and the data-model of the [NetCDF file](https://www.unidata.ucar.edu/software/netcdf/).This means that we can use xarray to make a "self-referential" dataset that contains all of the variables and attributes that describe what each part is and how it was made. In this application, we won't make a fully self-referential dataset, but if you are interested in this, check out the [NetCDF best practices](https://www.unidata.ucar.edu/software/netcdf/docs/BestPractices.html). Important for our application is that later on we will use the [HoloViews package](http://holoviews.org) for visualization. This package is a great tool for dealing with multidimentional annotated data and will do things like automatically create nice axis labels with units. However, in order for it to work, we must first annotate our data to include this information.Here we create an xarray Dataset with two variables `'topographic__elevation'` and `'rock_type__id'` and three dimensions `'x'`, `'y'`, and `'time'`. We pass xarray two dictionaries, one with information about the data variabiables (`data_vars`) and one with information about the coordinate system (`coords`). For each data variable or coordinate, we pass a tuple of three items: `(dims, data, atts)`. The first element is a tuple of the name of the dimensions, the second element is the data, an the third is a dictionary of attributes.ds = xr.Dataset( data_vars={ 'topographic__elevation': ( ('time', 'y', 'x'), # tuple of dimensions np.empty((nts, mg.shape[0], mg.shape[1])), # n-d array of data { 'units': 'meters', # dictionary with data attributes 'long_name': 'Topographic Elevation' }), 'rock_type__id': (('time', 'y', 'x'), np.empty((nts, mg.shape[0], mg.shape[1])), { 'units': '-', 'long_name': 'Rock Type ID Code' }) }, coords={ 'x': ( ('x'), # tuple of dimensions mg.x_of_node.reshape( mg.shape)[0, :], # 1-d array of coordinate data { 'units': 'meters' }), # dictionary with data attributes 'y': (('y'), mg.y_of_node.reshape(mg.shape)[:, 1], { 'units': 'meters' }), 'time': (('time'), dt * np.arange(nts) / 1e6, { 'units': 'millions of years since model start', 'standard_name': 'time' }) })We can print the data set to get some basic information about it.print(ds)We can also print a single variable to get more detailed information about it. Since we initialized the datset with empty arrays for the two data variables, we just see zeros for the data values.ds.topographic__elevationNext, we run the model. In each time step we first run the FlowAccumulator to direct flow and accumulatate drainage area. Then the FastscapeEroder erodes the topography based on the stream power equation using the erodability value in the field `'K_sp'`. We create an uplift field that uplifts only the model grid's core nodes. After uplifting these core nodes, we update LithoLayers. Importantly, we must tell the LithoLayers how it has been advected upward by uplift using the `dz_advection` keyword argument. As we discussed in the introductory example, the built-in function [`lith.run_one_step`](https://landlab.readthedocs.io/en/release/landlab.components.litholayers.htmllandlab.components.lithology.litholayers.LithoLayers.run_one_step) has an optional keyword argument `rock_id` to use when some material may be deposited. The LithoLayers component needs to know what type of rock exists everywhere and it will raise an error if material is deposited **and** no rock type is specified. However, here we are using the FastscapeEroder which is fully detachment limited, and thus we know that no material will be deposited at any time. Thus we can ignore this keyword argument. Later in the tutorial we will use the LinearDiffuser which can deposit sediment and we will need to set this keyword argument correctly. Within each timestep we save information about the model for plotting.out_fields = ['topographic__elevation', 'rock_type__id'] for i in range(nts): fa.run_one_step() sp.run_one_step(dt=dt) dz_ad = np.zeros(mg.size('node')) dz_ad[mg.core_nodes] = U * dt z += dz_ad lith.dz_advection=dz_ad lith.run_one_step() for of in out_fields: ds[of][i, :, :] = mg['node'][of].reshape(mg.shape)Now that the model has run, lets start by plotting the resulting topography.imshow_grid(mg, 'topographic__elevation', cmap='viridis')The layers of rock clearly influence the form of topography. Next we will use HoloViews to visualize the topography and rock type together. To start, we create a HoloViewDataset from our xarray datastructure.hvds_topo = hv.Dataset(ds.topographic__elevation) hvds_rock = hv.Dataset(ds.rock_type__id) hvds_topoNext we specify that we want two images, one showing rock type and one showing topographic elevation. A slider bar shows us model time in millions of years. Be patient. Running this next block may take a moment. HoloViews is rendering an image of all time slices so you can see an animated slider. This is pretty magical (but not instantaneous).%opts Image style(interpolation='bilinear', cmap='viridis') plot[colorbar=True] topo = hvds_topo.to(hv.Image, ['x', 'y']) rock = hvds_rock.to(hv.Image, ['x', 'y']) topo + rockWe can see the form of the anticline advecting through the topography. Cool! Part 3: Creation of Inverted TopographyHere we will explore making inverted topography by eroding Lithology with constant properties for half of the model evaluation time, and then filling Lithology in with resistant material only where the drainage area is large. This is meant as a simple example of filling in valleys with volcanic material. All of the details of the options for creating a [Lithology](https://landlab.readthedocs.io/en/release/landlab.components.lithology.html) can be found here. In the next code block we make a new model and run it. There are a few important differences between this next example and the one we just worked through in Part 2. Here we will have two rock types. Type `0` that represents non-volcanic material. It will have a higher diffusivity and erodability than the volcanic material, which is type `1`. Recall that in Part 2 we did not specify a `rock_id` keyword argument to the `lith.run_one_step` method. This was because we used only the FastscapeEroder component which is fully detachment limited and thus never deposits material. In this example we will also use the LinearDiffuser component, which may deposity material. The `Lithology` component needs to know the rock type everywhere and thus we must indicate the rock type of the newly deposited rock. This is done by passing a single value or number-of-nodes sized array rock type values to the `run_one_step` method. We also are handling the model grid boundary conditions differently than in the last example, setting the boundaries on the top and bottom to closed.mg2 = RasterModelGrid((30, 30), 200) mg2.set_closed_boundaries_at_grid_edges(False, True, False, True) z2 = mg2.add_zeros('topographic__elevation', at='node') random_field = 0.01 * np.random.randn(mg2.size('node')) z2 += random_field - random_field.min() thicknesses2 = [10000] ids2 = [0] attrs2 = {'K_sp': {0: 0.0001, 1: 0.00001}, 'D': {0: 0.4, 1: 0.001}} lith2 = Lithology(mg2, thicknesses2, ids2, attrs=attrs2) nts = 500 U = 0.005 dt = 1000 fa2 = FlowAccumulator(mg2) sp2 = FastscapeEroder(mg2, K_sp='K_sp') ld2 = LinearDiffuser(mg2, linear_diffusivity='D') out_fields = ['topographic__elevation', 'rock_type__id'] out_fields = ['topographic__elevation', 'rock_type__id'] nts = 200 U = 0.001 dt = 1000 ds2 = xr.Dataset(data_vars={ 'topographic__elevation': (('time', 'y', 'x'), np.empty((nts, mg2.shape[0], mg2.shape[1])), { 'units': 'meters', 'long_name': 'Topographic Elevation' }), 'rock_type__id': (('time', 'y', 'x'), np.empty((nts, mg2.shape[0], mg2.shape[1])), { 'units': '-', 'long_name': 'Rock Type ID Code' }) }, coords={ 'x': (('x'), mg2.x_of_node.reshape(mg2.shape)[0, :], { 'units': 'meters' }), 'y': (('y'), mg2.y_of_node.reshape(mg2.shape)[:, 1], { 'units': 'meters' }), 'time': (('time'), dt * np.arange(nts) / 1e6, { 'units': 'millions of years since model start', 'standard_name': 'time' }) }) half_nts = int(nts / 2) dz_ad2 = np.zeros(mg2.size('node')) dz_ad2[mg2.core_nodes] = U * dt lith2.dz_advection=dz_ad2 lith2.rock_id=0 for i in range(half_nts): fa2.run_one_step() sp2.run_one_step(dt=dt) ld2.run_one_step(dt=dt) z2 += dz_ad2 lith2.run_one_step() for of in out_fields: ds2[of][i, :, :] = mg2['node'][of].reshape(mg2.shape)After the first half of run time, let's look at the topography.imshow_grid(mg2, 'topographic__elevation', cmap='viridis')We can see that we have developed ridges and valleys as we'd expect from a model with stream power erosion and linear diffusion. Next we will create some volcanic deposits that fill the channels in our model.volcanic_deposits = np.zeros(mg2.size('node')) da_big_enough = mg2['node']['drainage_area'] > 5e4 topo_difference_from_top = mg2['node']['topographic__elevation'].max( ) - mg2['node']['topographic__elevation'] volcanic_deposits[ da_big_enough] = 0.25 * topo_difference_from_top[da_big_enough] volcanic_deposits[mg2.boundary_nodes] = 0.0 z2 += volcanic_deposits lith2.rock_id=1 lith2.run_one_step() imshow_grid(mg2, volcanic_deposits)We should expect that the locations of our valleys and ridges change as the river system encouters the much stronger volcanic rock.for i in range(half_nts, nts): fa2.run_one_step() sp2.run_one_step(dt=dt) ld2.run_one_step(dt=dt) dz_ad2 = np.zeros(mg2.size('node')) dz_ad2[mg2.core_nodes] = U * dt z2 += dz_ad2 lith2.dz_advection=dz_ad2 lith2.rock_id=0 lith2.run_one_step() for of in out_fields: ds2[of][i, :, :] = mg2['node'][of].reshape(mg2.shape)Now that the model has run, let's plot the final elevationimshow_grid(mg2, 'topographic__elevation', cmap='viridis')And now a HoloView Plot that lets us explore the time evolution of the topographyhvds_topo2 = hv.Dataset(ds2.topographic__elevation) hvds_rock2 = hv.Dataset(ds2.rock_type__id) %opts Image style(interpolation='bilinear', cmap='viridis') plot[colorbar=True] topo2 = hvds_topo2.to(hv.Image, ['x', 'y']) rock2 = hvds_rock2.to(hv.Image, ['x', 'y']) topo2 + rock2 # if you wanted to output to visualize in something like ParaView, the following commands can be used #ds.to_netcdf('anticline.nc') #ds2.to_netcdf('inversion.nc')Import the necessary packagesimport math #%matplotlib qt5 import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import numpy as np import pandas as pd import seaborn as sns import altair as alt # Display all floats rounded off to 1 decimal place pd.options.display.float_format = '{:,.1f}'.format # Plot inline in Jupyter notebook # Settings throughout the notebook sns.set() # Width = 16, Height = 6 DIMS=(16, 6) import os # multiprocessing libraries from itertools import repeat from multiprocessing import Pool, Manager from tqdm import tqdm import cProfile import random import re import functools import json # for travelling salesman import stringDefine the optimization test function here - Bukin Function N.6https://www.sfu.ca/~ssurjano/bukin6.html In order to apply the simulated annealing method to a specific problem, one must specify the following parameters: the boundary, the energy (goal) function E(), the candidate generator procedure neighbour(), the acceptance probability function P(), and the annealing schedule temperature() AND initial temperature .#!/usr/bin/python # -*- coding: utf-8 -*- # note if you want to change the function, remember to change the boundaries at which the function is evaluated! class Annealer(object): ''' Pass the max steps you want to take to the annealer function ''' def __init__( self, maxsteps=500, multiplier=1, control_t=1, acceptrate=0.5, explore=30, lams=1, i1=np.arange(-15.0, 10., 0.01), i2=np.arange(-10., 10.02, 0.01), ): ''' inputs: maxsteps - total number of temperature steps to anneal for (default = 500) multiplier - eometric multiplier for annealing schedule (default = 1 OFF) control_t - whether you want to turn on or off the geometric cooling schedule (default = 1 OFF) acceptrate - generic lam's acceptance rate (default = 0.5) explore - number of steps to explore at every iteration (default = 30 steps per iteration) lams - whether to turn on or off lam's annealing schedule (default = 1 OFF) Initialize parameters output: none ''' self.Tmax = maxsteps self.threshold = multiplier self.interval = list() self.over_count = 0 # self.states = {"x":list(), "y":list()} self.acceptrate = acceptrate self.control = control_t self.exploration_space = explore self.trig_lams = lams self.real_answer = -1.8013 self.lams = dict() self.accepts = dict() self.i1 = i1 self.i2 = i2 def get_range(self): ''' function to get range from the user ''' i1 = input('Please input desired x1 range in the form x1,y1: \n' ) i2 = input('Please input desired x1 range in the form x1,y1: \n' ) special_chars = r'[`\=~!@#$%^&*()_+\[\]{};\'\\:"|<,/<>?]' (i1, i2) = (re.split(special_chars, i1), re.split(special_chars, i2)) (i1, i2) = ([np.float(i) for i in i1], [np.float(i) for i in i1]) i1 = np.arange(min(i1), max(i1), 0.01) i2 = np.arange(min(i2), max(i2), 0.01) return (i1, i2) def random_start(self): """ input: none Randomly choose a random starting point within the boundary output: a pair of starting point coordinates (x1, x2) """ self.interval.append([random.uniform(self.i1[0], self.i1[-1]), random.uniform(self.i2[0], self.i2[-1])]) return self.interval def f(self, x): ''' input: x (a 2D array) Function that evaluates the cost of a given x1, x2 output: single cost ''' x1 = x[0] x2 = x[1] # function 1, levy function obj = np.sin(3 * np.pi * x[0]) ** 2 + (x[0] - 1) ** 2 * (1 + np.sin(3 * np.pi * x[1]) ** 2) + (x[1] - 1) ** 2 * (1 + np.sin(2 * np.pi * x[1]) ** 2) # self.i1 = np.arange(-10.0, 10., 0.01) # self.i2 = np.arange(-10.0, 10., 0.01) # obj = 100 * np.sqrt(abs(x[1] - 0.01*(-x[0])**2)) + 0.01 * abs(x[0] + 10) # self.i1 = np.arange(-15.0, 10., 0.01) # self.i2 = np.arange(-15.0, 10., 0.01) #obj = - ((np.sin(x[1])* (np.sin((x[1]**2) / (np.pi))**20 )) + (np.sin(x[1])*(np.sin(2*(x[1]**2) / (np.pi))**20 ))) # self.i1 = np.arange(0, np.pi, 0.01) # self.i2 = np.arange(0, np.pi, 0.01) return obj def random_neighbour(self, x): """ input: x (a 2D array) Move a little bit x1 and x2, from the left or the right and then check whether it's within the boundary. (normalized by the min and max) if it's within the boundary, return the new coordinates, otherwise find new ones. output: (newx, newy) """ # normalized deltax = random.uniform(self.i1[0], self.i1[-1]) deltay = random.uniform(self.i2[0], self.i2[-1]) newx = x[0] + deltax newy = x[1] + deltay return [newx, newy] def acceptance_probability( self, cost, new_cost, temperature, ): ''' inputs: old cost, new cost, current temperature calculate probability of acceptance and return it using the metropolis algorithm output: probability (0 to 1) ''' return np.exp(-(new_cost - cost) / temperature) def restart(self): ''' reinitializes at a random point ''' state = self.random_start()[0] cost = self.f(state) return (state, cost) def anneal(self): ''' inputs: none function performs annealing and calls random start to kickstart the annealing process. iteratively calculates the new cost. output: final cost, final state (list of x1 and x2), all costs (list of costs at every timestep) ''' best_cost = list() current_cost = list() deviation = list() T_list = list() acceptrate = self.acceptrate (states, costs) = self.restart() LamRate = 0 best_cost.append(costs) for temp_step in range(self.Tmax): fraction = temp_step / float(self.Tmax) # T = max((1-self.trig_lams) * max(fraction*(1-self.control), (1 - fraction) * self.control) * self.threshold, (1-fraction)*self.trig_lams) # if you want to trigger lam's, self.control == 1 if self.control == 0 & temp_step > 0: T = self.threshold * (1 - fraction) else: T = 1 - fraction T_list.append(T) for step in range(self.exploration_space): new_cost = costs new_state = states gen_new_state = self.random_neighbour(new_state) gen_new_cost = self.f(gen_new_state) if gen_new_cost < new_cost: new_state = self.random_neighbour(states) new_cost = self.f(new_state) current_cost.append(new_cost) if new_cost < costs or self.acceptance_probability(costs, new_cost, T) >= random.uniform(0, 1): states, costs = new_state, new_cost if self.trig_lams == 1: acceptrate = 1 / 500 * (499 * acceptrate + 1) else: if self.trig_lams == 1: acceptrate = 1 / 500 * (499 * acceptrate) # check conditions if fraction < 0.15: LamRate = 0.44 + 0.56 * 560 ** (-temp_step / (self.Tmax * 0.15)) elif fraction < 0.65: LamRate = 0.44 else: LamRate = 0.44 * 440 ** ((-temp_step / self.Tmax - 0.65) / 0.35) if LamRate < acceptrate: T *= 0.99 else: T *= 1 / 0.999 deviation.append(abs(costs - self.real_answer)) if best_cost[-1] > costs: best_cost.append(costs) else: best_cost.append(best_cost[-1]) if self.trig_lams == 1: if temp_step not in list(self.lams.keys()): self.lams[temp_step] = list() if temp_step not in list(self.accepts.keys()): self.accepts[temp_step] = list() self.lams[temp_step].append(LamRate) self.accepts[temp_step].append(acceptrate) return ( current_cost, best_cost[1:], deviation, self.accepts, self.lams, T_list, )Test with 10000 steps and profile the run times#@title cost = Annealer(1000) cProfile.run('cost.anneal()') #costExample 1: Using a Linear Ramp to adjust the Annealing Schedule (1-ti/Tmax)To use the linear ramp, you only need to: adjust the max number of steps to your preferencetries1 = {"run":list(), "temp":list(), "current_cost":list(), "best_cost":list(), "deviations":list()} for i in range(0, 100, 1): a = Annealer(maxsteps = 5000, i1 = np.arange(-10.0, 10., 0.01), i2 = np.arange(-10.0, 10., 0.01)) current_cost, best_cost, deviations, accepts, lams, T = a.anneal() cost_keys = len(list(current_cost)) for k in range(cost_keys): tries1['run'].append(i) tries1['temp'].append(T[k]) tries1['current_cost'].append(current_cost[k]) tries1['best_cost'].append(best_cost[k]) tries1['deviations'].append(deviations[k]) ''' converts the dictionary into a pandas dataframe for easy data manipulation''' df_case1 = pd.DataFrame.from_dict(tries1) df_case1.head(20) df_case1_group_mean = df_case1.groupby(['temp']).mean().reset_index() df_case1_group_mean.to_csv("case1_func3.csv") fig, ax1 = plt.subplots(1, 1) plt.xlabel("Temperature") plt.ylabel("Cost", fontsize=12) #Add the legend plt.title("Temperature v. Cost (1 - Ti / Tmax)") plt.xlim(1.0, 0) #plt.ylim(0,100) plt.plot(df_case1_group_mean['temp'].tolist(), df_case1_group_mean['current_cost'].tolist(), label='current_cost') plt.plot(df_case1_group_mean['temp'].tolist(), df_case1_group_mean['best_cost'].tolist(), label='best_cost') plt.legend(fontsize=12) plt.savefig('case_1_costs.png') fig, ax1 = plt.subplots(1, 1) plt.xlabel("Temperature") plt.ylabel("Cost", fontsize=12) #Add the legend plt.title("Temperature v. Cost (1 - Ti / Tmax): Log Scale") plt.xlim(0,-9) plt.plot(df_case1_group_mean['temp'].apply(np.log).tolist(), df_case1_group_mean['current_cost'].apply(np.log).tolist(), label='current_cost') plt.plot(df_case1_group_mean['temp'].apply(np.log).tolist(), df_case1_group_mean['best_cost'].apply(np.log).tolist(), label='best_cost') plt.legend(fontsize=12) plt.savefig('case_1_costs_log.png') fig, ax1 = plt.subplots(1, 1) plt.xlabel("Temperature") plt.ylabel("Deviations", fontsize=12) #Add the legend plt.title("Temperature v. Deviation (1 - Ti / Tmax)") plt.xlim(1.0, 0) plt.plot(df_case1_group_mean['temp'].tolist(), df_case1_group_mean['deviations'].tolist(), label='mean') plt.savefig('case_1_deviations.png') fig, ax1 = plt.subplots(1, 1) #print(current_df) plt.xlabel("Temperature") plt.ylabel("Deviations", fontsize=12) #Add the legend plt.title("Temperature v. Deviation (1 - Ti / Tmax): Log Scale") plt.xlim(0.02, -1) plt.plot(df_case1_group_mean['temp'].apply(np.log).tolist(), df_case1_group_mean['deviations'].apply(np.log).tolist(), label='mean') plt.savefig('case_1_deviations_log.png') plt.legend(fontsize=12)Example 2: Using a Geometric factor to adjust the annealing schedule (ti * g)To use the geometric ramp, you need to pass 3 parameters: adjust the max number of steps to your preference, the probability j (you can adjust using geoms) - note it takes in a decimal, and control_t = 0tries2 = {"probability":list(), "run":list(), "temp":list(), "current_cost":list(), "best_cost":list(), "deviations":list()} geoms = [0.99, 0.98, 0.9, 0.8, 0.7, 0.6] for j in geoms: print(j) for i in range(0, 100, 1): current = dict() a = Annealer(maxsteps = 5000, multiplier = j, control_t = 0, i1 = np.arange(-10.0, 10., 0.01), i2 = np.arange(-10.0, 10., 0.01)) current_cost, best_cost, deviations, accepts, lams, T = a.anneal() cost_keys = len(list(current_cost)) for k in range(cost_keys): tries2['probability'].append(j) tries2['run'].append(i) tries2['temp'].append(T[k]) tries2['current_cost'].append(current_cost[k]) tries2['best_cost'].append(best_cost[k]) tries2['deviations'].append(deviations[k]) df_case2 = pd.DataFrame.from_dict(tries2) df_case2_group_mean = df_case2.groupby(['temp', 'probability']).mean().reset_index() df_case2_group_mean.to_csv('case2_func3.csv')0.99 0.98 0.9 0.8 0.7 0.6Example of Obtaining the Deviations of Varying geometric rates (plotting code)for i in geoms: df_case2_group_mean_use = df_case2_group_mean[df_case2_group_mean['probability']==i] #dataframes = [df_case2_group_mean, df_case2_group_max, df_case2_group_min, df_case2_group_std] #dataframe_name = ['mean', 'max', 'min', 'std'] #print(current_df) #fig, ax1 = plt.subplots(1, 1) plt.xlabel("Temperature") plt.ylabel("Deviation", fontsize=12) #Add the legend plt.title("Temperature v. Deviation (Log Geometric Rate: {}): Log Scale".format(i)) plt.xlim(0.02, -1) plt.plot(df_case2_group_mean_use['temp'].apply(np.log).tolist(), df_case2_group_mean_use['deviations'].apply(np.log).tolist(), label=i) # need to redefine the names plt.legend(fontsize=12) plt.savefig('case2_geom_deviation_log.png'.format(i)) for i in geoms: df_case2_group_mean_use = df_case2_group_mean[df_case2_group_mean['probability']==i] #dataframes = [df_case2_group_mean, df_case2_group_max, df_case2_group_min, df_case2_group_std] #dataframe_name = ['mean', 'max', 'min', 'std'] #print(current_df) #fig, ax1 = plt.subplots(1, 1) plt.xlabel("Temperature") plt.ylabel("Deviation", fontsize=12) #Add the legend plt.title("Temperature v. Deviation (Geometric Rate: {})".format(i)) plt.xlim(1.0, 0) # plt.ylim(0,30) plt.plot(df_case2_group_mean_use['temp'].tolist(), df_case2_group_mean_use['deviations'].tolist(), label=i) # need to redefine the names plt.legend(fontsize=12) plt.savefig('case2_geom_deviation.png'.format(i))Example of Obtaining the Costs of Varying geometric ratesfor i in geoms: df_case2_group_mean_use = df_case2_group_mean[df_case2_group_mean['probability']==i] #dataframes = [df_case2_group_mean, df_case2_group_max, df_case2_group_min, df_case2_group_std] #dataframe_name = ['mean', 'max', 'min', 'std'] #print(current_df) fig, ax1 = plt.subplots(1, 1) plt.xlabel("Temperature") plt.ylabel("Cost", fontsize=12) #Add the legend plt.title("Temperature v. Cost (Geometric Rate: {}): Log Scale".format(i)) plt.xlim(0,-8) plt.plot(df_case2_group_mean_use['temp'].apply(np.log).tolist(), df_case2_group_mean_use['current_cost'].apply(np.log).tolist(), label=i) # need to redefine the names plt.plot(df_case2_group_mean_use['temp'].apply(np.log).tolist(), df_case2_group_mean_use['best_cost'].apply(np.log).tolist(), label=i) # need to redefine the names plt.legend(fontsize=12) plt.savefig('case2_geom_cost_log_{}.png'.format(i)) for i in geoms: df_case2_group_mean_use = df_case2_group_mean[df_case2_group_mean['probability']==i] fig, ax1 = plt.subplots(1, 1) plt.xlabel("Temperature") plt.ylabel("Cost", fontsize=12) plt.title("Temperature v. Cost (Geometric Rate: {})".format(i)) plt.xlim(1.0, 0) # plt.ylim(0,100) plt.plot(df_case2_group_mean_use['temp'].tolist(), df_case2_group_mean_use['best_cost'].tolist(), label='best_cost') # need to redefine the names plt.plot(df_case2_group_mean_use['temp'].tolist(), df_case2_group_mean_use['current_cost'].tolist(), label='current_cost') # need to redefine the names plt.legend(fontsize=12) plt.savefig('case2_geom_cost_{}.png'.format(i)) for i in geoms: df_case2_group_mean_use = df_case2_group_mean[df_case2_group_mean['probability']==i] #fig, ax1 = plt.subplots(1, 1) plt.xlabel("Temperature") plt.ylabel("Best Cost", fontsize=12) plt.title("Best Cost v. Temperature (Different Geometric Rates)".format(i)) plt.xlim(1.0, 0) # plt.ylim(0,100) plt.plot(df_case2_group_mean_use['temp'].tolist(), df_case2_group_mean_use['best_cost'].tolist(), label=i) # need to redefine the names plt.legend(fontsize=12) plt.savefig('case2_geom_best_compiled.png')Calculate the average costs for each number of runs Example 3: Lam's Adaptive Learning Rate Plotting Lam's with varying accept_ratestries3 = {"accept":list(), "run":list(), "temp":list(), "current_cost":list(), "best_cost":list(), "deviations":list()} acceptrate = [i/10 for i in range(2, 8, 1)] #acceptrate = [0.5] for q in acceptrate: print(q) for i in range(0, 100, 1): a = Annealer(5000, 1, acceptrate=q, lams=1, i1 = np.arange(-10.0, 10., 0.01), i2 = np.arange(-10.0, 10., 0.01)) current_cost, best_cost, deviations, accepts, lams, T = a.anneal() cost_keys = len(list(current_cost)) for k in range(cost_keys): tries3['accept'].append(q) tries3['run'].append(i) tries3['temp'].append(T[k]) tries3['current_cost'].append(current_cost[k]) tries3['best_cost'].append(best_cost[k]) tries3['deviations'].append(deviations[k]) df_case3 = pd.DataFrame.from_dict(tries3) df_case3_group_mean = df_case3.groupby(['temp','accept']).mean().reset_index() df_case3_group_mean.to_csv("case3_func2.csv") for i in acceptrate: df_case3_group_mean_use = df_case3_group_mean[df_case3_group_mean['accept']==i] #fig, ax1 = plt.subplots(1, 1) plt.xlabel("Temperature") plt.ylabel("Deviation", fontsize=12) plt.title("Temperature v. Deviation (Varying Acceptance Rate)") plt.xlim(1.0, 0) plt.plot(df_case3_group_mean_use['temp'].tolist(), df_case3_group_mean_use['deviations'].tolist(), label='{}'.format(i)) # need to redefine the names plt.legend(fontsize=12) plt.savefig('case3_lams_deviation_accept_compiled.png'.format(i)) for i in acceptrate: df_case3_group_mean_use = df_case3_group_mean[df_case3_group_mean['accept']==i] #fig, ax1 = plt.subplots(1, 1) plt.xlabel("Temperature") plt.ylabel("Deviation", fontsize=12) plt.title("Temperature v. Deviation (Varying Acceptance Rate): Log Scale") plt.xlim(0.02,-1.0) plt.plot(df_case3_group_mean_use['temp'].apply(np.log).tolist(), df_case3_group_mean_use['deviations'].apply(np.log).tolist(), label='{}'.format(i)) # need to redefine the names plt.legend(fontsize=12) plt.savefig('case3_lams_deviation_log_accept_compiled.png'.format(i)) for i in acceptrate: df_case3_group_mean_use = df_case3_group_mean[df_case3_group_mean['accept']==i] fig, ax1 = plt.subplots(1, 1) plt.xlabel("Temperature") plt.ylabel("Cost", fontsize=12) plt.title("Temperature v. Cost (Acceptance Rate: {})".format(i)) plt.xlim(1.0, 0) #plt.ylim(0, 10) plt.plot(df_case3_group_mean_use['temp'].tolist(), df_case3_group_mean_use['current_cost'].tolist(), label='current_cost') # need to redefine the names plt.plot(df_case3_group_mean_use['temp'].tolist(), df_case3_group_mean_use['best_cost'].tolist(), label='best_cost') # need to redefine the names plt.legend(fontsize=12) plt.savefig('case3_lams_cost_accept_{}.png'.format(i)) for i in acceptrate: df_case3_group_mean_use = df_case3_group_mean[df_case3_group_mean['accept']==i] fig, ax1 = plt.subplots(1, 1) plt.xlabel("Temperature") plt.ylabel("Cost", fontsize=12) plt.title("Temperature v. Cost (Acceptance Rate: {}): Log Scale".format(i)) plt.xlim(1.0, 0) #plt.ylim(0, 10) plt.plot(df_case3_group_mean_use['temp'].apply(np.log).tolist(), df_case3_group_mean_use['current_cost'].apply(np.log).tolist(), label='current_cost') # need to redefine the names plt.plot(df_case3_group_mean_use['temp'].apply(np.log).tolist(), df_case3_group_mean_use['best_cost'].apply(np.log).tolist(), label='best_cost') # need to redefine the names plt.legend(fontsize=12) plt.savefig('case3_lams_cost_accept_{}.png'.format(i)) for i in acceptrate: df_case3_group_mean_use = df_case3_group_mean[df_case3_group_mean['accept']==i] #fig, ax1 = plt.subplots(1, 1) plt.xlabel("Temperature") plt.ylabel("Best Cost", fontsize=12) colormap = plt.cm.gist_ncar plt.title("Best Cost v. Temperature (Varying Acceptance Rates)") plt.xlim(1.0, 0) # plt.ylim(0,100) plt.plot(df_case3_group_mean_use['temp'].tolist(), df_case3_group_mean_use['best_cost'].tolist(), label=i) # need to redefine the names plt.legend(fontsize=12) plt.savefig('case3_geom_best_compiled.png')One-timers!nvidia-smi from google.colab import drive drive.mount('/content/drive', force_remount=True) !mkdir dataset !cp /content/drive/MyDrive/Research/triples/data/*.csv dataset !cp -r /content/drive/MyDrive/Research/triples/HuggingFace dataset !ls dataset !pip3 install -q transformers tensorboard_logger seqeval sentencepiece tokenizers sentence_transformersTue Jul 27 13:00:20 2021 +-----------------------------------------------------------------------------+ | NVIDIA-SMI 470.42.01 Driver Version: 460.32.03 CUDA Version: 11.2 | |-------------------------------+----------------------+----------------------+ | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | | | | MIG M. | |===============================+======================+======================| | 0 Tesla P100-PCIE... Off | 00000000:00:04.0 Off | 0 | | N/A 35C P0 26W / 250W | 0MiB / 16280MiB | 0% Default | | | | N/A | +-------------------------------+----------------------+----------------------+ +-------[...]Importsimport os os.environ['CUDA_LAUNCH_BLOCKING'] = "1" import time, torch, random, glob, re, gc, datetime, tokenizers, pdb import numpy as np import transformers import pandas as pd import torch.nn as nn import seaborn as sns import matplotlib.pyplot as plt from tokenizers import * from transformers import * from functools import partial from pathlib import Path from tqdm.notebook import tqdm from torch.nn import functional as F from itertools import cycle, chain from torch.utils.data import Dataset, DataLoader, IterableDataset, TensorDataset # from sklearn.model_selection import GroupKFold from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score from sklearn.model_selection import train_test_split, RepeatedKFold, KFold from ast import literal_eval as eval # from transformers import RobertaForSequenceClassification, RobertaConfig, RobertaTokenizer, RobertaForTokenClassification import sys DRIVE_DIR="/content/drive/My Drive/Research/triples/" sys.path.insert(0, DRIVE_DIR) from utils import seed_everything, count_paramsGlobals and Configclass Config: random_state=2021 k=5 #folds device="cuda" selected_folds=list(range(k)) # selected_folds=[3,4] seed = 2021 model="bert-base-cased" # checkpoints=[f"/content/drive/MyDrive/Research/triples/2021-07-24/NER_bert-base-cased_ fold - {i+1}_.pt" for i in range(k)] checkpoint=[] # if this list has any checkpoint, the model uses that checkpoint to be the starting point and then finetunes over it. pretrained=True lowercase = False num_labels=4 batch_size = 32 batch_size_val = int(batch_size * 1.5) weight_decay =0.001 epochs = 15 lr =5e-5 warmup_prop = 0.1 freeze_main=False #this parameters controls whether we want to freeze the main bert and train only the classifier (True) or train the whole model max_len=128 save_every_epoch=list(range(5, epochs, 3)) #list specifies which checkpoints to save model_names=["roberta", "bert", "albert", "transformer", "distilbert"] CP_DIR=Path("/content/drive/MyDrive/Research/triples") NUM_WORKERS = 2 TRANSFORMERS_DIR=Path("dataset/HuggingFace/") TRANSFORMERS={ "roberta-base":{ "model_config":(RobertaModel, RobertaConfig), "tokenizer":RobertaTokenizer, }, "bert-base-cased":{ "model_config":(BertModel, BertConfig), "tokenizer":BertWordPieceTokenizer, }, "bert-base-uncased":{ "model_config":(BertModel, BertConfig), "tokenizer":BertWordPieceTokenizer, }, "albert-base-v2":{ "model_config":(AlbertModel,AlbertConfig), "tokenizer":AlbertTokenizer, }, "gpt2":{ "model_config":(GPT2Model, GPT2Config), "tokenizer":GPT2Tokenizer, }, "distilbert-base-cased":{ "model_config":(DistilBertModel, DistilBertConfig), "tokenizer":DistilBertTokenizer, } }Function and Helpersdef get_checkpoint_dir(): today=str(datetime.date.today()) checkpoint_dir=CP_DIR/today if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) return checkpoint_dir def checkpoint_name(): return Config.task+"_"+Config.model def save_model_weights(model, filename, verbose=1, cp_folder=""): if verbose: print(f"\n -> Saving weights to {os.path.join(cp_folder, filename)}\n") torch.save(model.state_dict(), os.path.join(cp_folder, filename)) def save_log(list_, logdir): if os.path.exists(logdir): mode="a" else: mode="w" with open(logdir, mode) as f: f.writelines("\n".join(list_)) f.writelines("\n") # def get_scores(truths, preds): # f1=f1_score(truths, preds) # recall=recall_score(truths, preds) # precision=precision_score(truths, preds) # accuracy=accuracy_score(truths, preds) # return f1, precision, recall, accuracy def load(model, with_checkpoint=None): model=Transformer(model) if with_checkpoint: checkpoint=torch.load(with_checkpoint, map_location="cpu") model.load_state_dict(checkpoint) print("Checkpoint loaded!", end="\r") return model def modify_label(text, label_): "the modify relations function" label=label_.split(" ") loc1=text.find(label[0]) loc2=text.find(label[-1])+len(label[-1]) if loc1>loc2: return label_ return text[loc1:loc2] def locate_label_string(text, label_, fill_value=1): """ Finds the label in the text """ if not label_ in text: label=modify_label(text, label_) else: label=label_ len_label = len(label) - 1 candidates_idx = [i for i, e in enumerate(text) if e == label[1]] for idx in candidates_idx: if " " + text[idx: idx + len_label] == label: idx_start = idx idx_end = idx + len_label break assert ( text[idx_start:idx_end] == label[1:] ), f'"{text[idx_start: idx_end]}" instead of "{label}" in "{text}"' char_targets = np.zeros(len(text)) char_targets[idx_start:idx_end] = fill_value return idx_start, idx_end, char_targets def locate_label_tokens(offsets, char_targets): """ Finds the tokens corresponding to the found labels """ target_idx = [] for idx, (offset1, offset2) in enumerate(offsets): if sum(char_targets[offset1:offset2]) > 0: target_idx.append(idx) if not len(target_idx): for idx, (offset1, offset2) in enumerate(offsets): if sum(char_targets[offset1:offset2]) > 0: target_idx.append(idx) return target_idx[0], target_idx[-1] def preprocess(text, entities, tokenizer, tokens, max_len=128): """Preprocessing required for the input to transformer and to reconstruct output from it.""" text = " " + " ".join(str(text).split()) entities=[" "+i for i in entities] label_locations=[] for idx, l in enumerate(entities): label_locations.append(locate_label_string(text, l, fill_value=1)) tokenized = tokenizer.encode(text) input_ids_text = tokenized.ids[1:-1] offsets = tokenized.offsets[1:-1] label_location_in_tokens=[] start_=0 end_=0 for label_location in label_locations: target_start, target_end=locate_label_tokens(offsets, label_location[2]) start_=min(start_, target_start) end_=max(end_, target_end) label_location_in_tokens.append((target_start, target_end)) input_ids = ( [tokens["cls"]] + input_ids_text[:max_len - 2] + [tokens["sep"]]) text_offsets = [(0, 0)] + offsets[:max_len - 2] + [(0, 0)] label_location_in_tokens=[(i[0]+1, i[1]+1) for i in label_location_in_tokens] padding_length = max_len - len(input_ids) if padding_length > 0: input_ids = input_ids + ([tokens["pad"]] * padding_length) text_offsets = text_offsets + ([(0, 0)] * padding_length) return { "input_ids":input_ids, "label_location_in_tokens":label_location_in_tokens, "text":text, "label":entities, "offsets":text_offsets, }Datasetclass SentenceDataset(Dataset): def __init__(self, df, tokenizer, tokens, max_len=128): self.tokenizer = tokenizer self.tokens=tokens self.max_len = max_len self.texts = df["sentence"] self.labels = df["triple"].map(eval) def __len__(self): return len(self.texts) def __getitem__(self, idx): data=preprocess(self.texts.iloc[idx], self.labels.iloc[idx], tokenizer=self.tokenizer, tokens=self.tokens, max_len=self.max_len) label=[0]*len(data['input_ids']) for idx, location in enumerate(data["label_location_in_tokens"]): if location[0]==location[1]: label[location[0]]=idx+1 else: label[location[0]:location[1]]=[idx+1]*(location[1]-location[0]) return { "input_ids":torch.tensor(data["input_ids"], dtype=torch.long), "label":torch.nn.functional.one_hot(torch.tensor(label), num_classes=4).float(), "text":data['text'], "offsets":data['offsets'] }Modelclass Transformer(nn.Module): def __init__(self, model, maxlen=128): super().__init__() self.name = model model_type, config_type=TRANSFORMERS[model]['model_config'] if Config.pretrained: self.transformer=model_type.from_pretrained(model, output_hidden_states=True, num_labels=Config.num_labels) else: config_file=TRANSFORMERS[model]['config'] config=config_type.from_json_file(config_file) config.num_labels=Config.num_labels config.output_hidden_states=True self.transformer=model_type(config) self.nb_features = self.transformer.pooler.dense.out_features if "roberta" in self.name: self.pad_idx=1 else: self.pad_idx=0 self.logits = nn.Sequential( nn.Linear(self.nb_features, self.nb_features), nn.Tanh(), nn.Linear(self.nb_features, Config.num_labels), ) def forward(self, input_ids, attention_mask=None): hidden_states = self.transformer( input_ids, attention_mask=(input_ids != self.pad_idx).long(), )[-1] features = hidden_states[-1] logits = torch.sigmoid(self.logits(features)) return logitsFittingdef fit(model,train_dataset,val_dataset, fold, epochs,batch_size, weight_decay=0,warmup_prop=0.0,lr=5e-4): """Batchwise training and validation iterations.""" train_loader = DataLoader(train_dataset, batch_size=batch_size, num_workers=NUM_WORKERS) val_loader = DataLoader(val_dataset, batch_size=batch_size, num_workers=NUM_WORKERS) opt_params = [] no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"] for n, p in model.named_parameters(): wd = 0 if any(nd in n for nd in no_decay) else weight_decay opt_params.append( {"params": [p], "weight_decay": wd, "lr": lr} ) optimizer = AdamW(opt_params, lr=lr, betas=(0.5, 0.999)) n_steps=epochs*len(train_loader) num_warmup_steps = int(warmup_prop * n_steps) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps, n_steps) # scheduler=ReduceLROnPlateau(optimizer, mode='min', factor=0.03, patience=2, threshold=0.001, threshold_mode='rel', cooldown=0, min_lr=1e-6, eps=1e-08, verbose=False) total_steps = 0 epoch=0 loss_function=nn.BCELoss() save_log(["\n",str(datetime.datetime.now()).split(".")[0],"\n", checkpoint_name()+f"_fold_{fold+1}"], logdir=get_checkpoint_dir()/f"log_{CHECKPOINT_KEYWORD}.txt") with tqdm(total=epochs, desc="Epoch {}/{}".format(epoch + 1, epochs), unit="sections", position=0,leave=True) as pbar: for epoch in range(epochs): model.train() start_time = time.time() optimizer.zero_grad() avg_loss = 0 with tqdm(total=len(train_loader), desc="training iterations", unit="batch", position=1, leave=True) as pbar2: for step, data in enumerate(train_loader): total_steps+=1 input_ids=data['input_ids'] labels=data['label'] logits=model(input_ids=input_ids.to(Config.device)) loss=loss_function(logits, labels.to(Config.device)) avg_loss += loss.item() / len(train_loader) nn.utils.clip_grad_norm_(model.parameters(), 10.0) loss.backward() optimizer.step() scheduler.step() model.zero_grad() pbar2.update() model.eval() avg_val_loss = 0. preds, truths = [], [] with torch.no_grad(): with tqdm(total=len(val_loader), desc="validation iterations", unit="batch", position=2, leave=True) as pbar3: for idx_val, data in enumerate(val_loader): input_ids=data['input_ids'] labels=data['label'] logits=model(input_ids=input_ids.to(Config.device)) loss=loss_function(logits, labels.to(Config.device)) avg_val_loss += loss.item() / len(val_loader) pbar3.update() dt = time.time() - start_time lr = scheduler.get_lr()[0] # lr = optimizer.param_groups[0]['lr'] # scheduler.step(avg_val_loss) if epoch+1 in Config.save_every_epoch: save_model_weights(model, f'{checkpoint_name()}_fold-{fold+1}_epoch-{epoch+1}_{CHECKPOINT_KEYWORD}.pt', cp_folder=get_checkpoint_dir()) log_lr=f"Epoch {epoch + 1}/{epochs} \t lr={lr:.1e} \t t={dt:.0f}s \t \n" print(log_lr) log_score=f"loss={avg_loss:.3f}\t val_loss={avg_val_loss:.3f} \n" print(log_score) save_log([log_lr, log_score], logdir=get_checkpoint_dir()/f"log_{CHECKPOINT_KEYWORD}.txt") pbar.update() del loss, data, avg_val_loss, avg_loss, train_loader, val_loader if Config.device != "cpu": torch.cuda.empty_cache() gc.collect() return predsK-folddef k_fold(df, save=True, config=None): """K-fold training""" kf=KFold(n_splits=config.k) X=list(range(len(df))) folds=list(kf.split(X=X)) tokenizer = BertWordPieceTokenizer( "dataset/HuggingFace/Bert/bert_base_uncased_vocab.txt", lowercase=False ) tokens = { 'cls': tokenizer.token_to_id('[CLS]'), 'sep': tokenizer.token_to_id('[SEP]'), 'pad': tokenizer.token_to_id('[PAD]'), } seed_everything(config.seed) for fold, (train_idx, val_idx) in enumerate(folds): if fold in config.selected_folds: score = 0 model=Transformer(Config.model).to(config.device) # if not model.transformer.config.pad_token_id: # model.transformer.config.pad_token_id=tokenizer.eos_token_id if Config.checkpoints: if len(Config.checkpoints)==1: print("Loading common checkpoint") checkpoint=torch.load(Config.checkpoints[0], map_location="cpu") elif len(Config.checkpoints)>1: print("Loading fold checkpoint") checkpoint=torch.load(Config.checkpoints[fold], map_location="cpu") model.load_state_dict(checkpoint) print("loaded checkpoint") else: print("No checkpoint provided..!") pass if Config.freeze_main: frozen=0 for name, param in model.named_parameters(): if any(model_name in name for model_name in Config.model_names): param.requires_grad=False frozen+=1 print(f"{frozen} layers frozen!") model.zero_grad() print(f"\n------------- Fold {fold + 1} / {len(folds)} -------------\n") train_dataset=SentenceDataset(df=df.iloc[train_idx], tokenizer=tokenizer, tokens=tokens, max_len=Config.max_len) val_dataset=SentenceDataset(df=df.iloc[val_idx], tokenizer=tokenizer, tokens=tokens, max_len=Config.max_len) n_parameters = count_params(model) print(f">>{len(train_dataset)} training texts<<") print(f">>{len(val_dataset)} validation texts<<") print(f">>{n_parameters} trainable parameters<<\n") preds = fit(model, train_dataset, val_dataset,epochs=config.epochs, batch_size=config.batch_size, weight_decay=config.weight_decay, lr=config.lr, warmup_prop=config.warmup_prop, fold=fold) if save: save_model_weights(model, f'{checkpoint_name()}_ fold - {fold+1}_{CHECKPOINT_KEYWORD}.pt', cp_folder=get_checkpoint_dir()) del model, train_dataset, val_dataset if Config.device!="cpu": torch.cuda.empty_cache() gc.collect()traindf=pd.read_csv("dataset/squad_train.csv") len(df) import pdb CHECKPOINT_KEYWORD="cls_only" folds=k_fold(df,save=True, config=Config)Ejericio 4 - Representación de Grafos![Ejericio%20Grafos.png](attachment:Ejericio%20Grafos.png) Grafo como Matriz de AdyacenciaadjMatrix = [[0,0,1,0,0,0], [0,0,0,1,0,0], [0,0,0,1,1,0], [0,0,0,0,0,1], [0,0,0,0,0,1], [0,0,0,0,0,0] ] adjMatrixGrafo como Lista de AdyacenciaadjList = [[2], [3], [3,4], [5], [5], [] ] adjListPyplot-stylex = np.linspace(0, 2, 100) x plt.plot(x, x, label='linear') plt.plot(x, x**2, label='quadratic') # etc. plt.plot(x, x**3, label='cubic') plt.xlabel('x label') plt.ylabel('y label') plt.title("Simple Plot") plt.grid(True,color='k') plt.legend() plt.plot(x, x,'g', label='linear',linewidth=9)#x,x,'colour,label='**',linewidth=number plt.plot(x, x**2, label='quadratic',linewidth=9) # etc. plt.plot(x, x**3, label='cubic',linewidth=9) plt.plot(x, x**4,'y', label='polynomial',linewidth=9) plt.xlabel('x label') plt.ylabel('y label') plt.title("Simple Plot") plt.grid(True,color='k') plt.legend()Tipos básicos# y esto es un entero e = 23 # podemos comprobarlo con la función type type(e) # 027 octal = 23 en base 10 entero = 0o27 type(entero) # 0×17 hexadecimal = 23 en base 10 entero = 0x17 type(entero) # numeros reales real = 0.2703 type(real) # numeros reales real = 0.1e-3 type(real) # numeros complejos complejo = 2.1 + 7.8j type(complejo)Operadores aritméticos# Suma r = 3 + 2 # r es 5 print (r) # Resta r = 4 - 7 # r es -3 print (r) # Negación r = -7 # r es -7 print (r) # Multiplicación r = 2 * 6 # r es 12 print (r) # Multiplicación r = 2 ** 6 # r es 64 print (r) # División r = 3.5 / 2 # r es 1.75 print (r) # División entera r = 3.5 // 2 # r es 1.0 print (r) # Módulo. El operador de módulo no hace otra cosa que devolvernos el resto de la división entre los dos operandos. r = 7 % 2 # r es 1 print (r)1Operadores a nivel de bit# Por ejemplo, si veis una operación como 3 & 2, lo que estais viendo es un and bit a bit entre los números binarios 11 y 10 (las representaciones en binario de 3 y 2). # and r = 3 & 2 # r es 2 print (r) # or r = 3 | 2 # r es 3 print (r) # xor r = 3 ^ 2 # r es 1 print (r) # not r = ~3 # r es -4 print (r) # Desplazamiento izq r = 3 << 1 # r es 6 print (r) # Desplazamiento der. r = 3 >> 1 # r es 1 print (r)1Cadenasprint("Las cadenas no son más que texto encerrado entre comillas simples ('cadena') o dobles (\"cadena\"). Dentro de las comillas se pueden añadir caracteres especiales escapándolos con \\, como \\n, el carácter de nueva línea, o \\t, el de tabulación.") print ("Una cadena puede estar precedida por el carácter u o el carácter r, los cuales indican, respectivamente, que se trata de una cadena que utiliza codificación Unicode y una cadena raw (del inglés, cruda). \n\nLas cadenas raw se distinguen de las normales en que los caracteres escapados mediante la barra invertida (\) no se sustituyen por sus contrapartidas. Esto es especialmente útil, por ejemplo, para las expresiones regulares, como veremos en el capítulo correspondiente.") unicode = u"äóè" raw = r"\n" print (unicode) print (raw) # Podremos escribir el texto en varias líneas, y al imprimir la cadena triple = """primera linea esto se vera en otra linea""" print (triple) print ("Las cadenas también admiten operadores como +, que funciona realizando una concatenación de las cadenas utilizadas como operandos y *, en la que se repite la cadena tantas veces como lo indique el número utilizado como segundo operando.") a = "uno" b = "dos" c = a + b # c es "unodos" print (c) c = a * 3 # c es "unounouno" print (c)unodos unounounoBooleanos# and ¿se cumple a y b? r = True and False # r es False print (r) # or ¿se cumple a o b? r = True or False # r es True print (r) # not No a r = not True # r es False print (r) print ("Los valores booleanos son además el resultado de expresiones que utilizan operadores relacionales (comparaciones entre valores):") # == ¿son iguales a y b? r = 5 == 3 # r es False print (r) # != ¿son distintos a y b? r = 5 != 3 # r es True print (r) # < ¿son distintos a y b? r = 5 < 3 # r es False print (r) # > ¿es a mayor que b? r = 5 > 3 # r es True print (r) # <= ¿es a menor o igual que b? r = 5 <= 5 # r es True print (r) # >= ¿es a mayor o igual que b? r = 5 >= 3 # r es True print (r)TrueWhat files do I need?You'll need train.csv, test.csv and sample_submission.csv.What should I expect the data format to be?Each sample in the train and test set has the following information: The text of a tweet A keyword from that tweet (although this may be blank!) The location the tweet was sent from (may also be blank)What am I predicting?You are predicting whether a given tweet is about a real disaster or not. If so, predict a 1. If not, predict a 0.Files train.csv - the training set test.csv - the test set sample_submission.csv - a sample submission file in the correct formatColumns id - a unique identifier for each tweet text - the text of the tweet location - the location the tweet was sent from (may be blank) keyword - a particular keyword from the tweet (may be blank) target - in train.csv only, this denotes whether a tweet is about a real disaster (1) or not (0)import re # for regular expressions import pandas as pd pd.set_option("display.max_colwidth", 200) import numpy as np import matplotlib.pyplot as plt import seaborn as sns import string import nltk # for text manipulation from nltk.stem.porter import * from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from tqdm import tqdm #from gensim.models.doc2vec import LabeledSentence #import gensim from sklearn.linear_model import LogisticRegression from scipy import stats from sklearn import metrics from sklearn.metrics import mean_squared_error,mean_absolute_error, make_scorer,classification_report,confusion_matrix,accuracy_score,roc_auc_score,roc_curve from sklearn.model_selection import train_test_split,cross_val_score,KFold from sklearn.model_selection import train_test_split from sklearn.metrics import f1_score from sklearn.naive_bayes import BernoulliNB from sklearn import svm from sklearn.ensemble import RandomForestClassifier #from xgboost import XGBClassifier #import xgboost as xgb import warnings warnings.filterwarnings("ignore") %matplotlib inline import sys stdout = sys.stdout # some functions that mess up sys.stdout sys.stdout = stdout IS_CREDIBLE_DISASTER = 0 # init # a generator creates fake data # a discriminator compares that to real data # each is scored and improved until generator data is above acceptable threshhold import tqdm from tqdm import tqdm from keras.preprocessing.text import text_to_word_sequence # define the text text = train_data # tokenizing the text tokens = text_to_word_sequence(text) print(tokens[0]) # Load CSV (using python) import csv import numpy import pandas as pd from textblob import TextBlob filename = '/Users/grahamwaters/Documents/GitHub Main Repository/KAGGLE_COMPETITIONS/DISASTERS/test.csv' #reader = csv.reader(raw_data, delimiter=',', quoting=csv.QUOTE_NONE) df_test = pd.read_csv(filename) filename = '/Users/grahamwaters/Documents/GitHub Main Repository/KAGGLE_COMPETITIONS/DISASTERS/train.csv' #reader = csv.reader(raw_data, delimiter=',', quoting=csv.QUOTE_NONE) df_train = pd.read_csv(filename) df_train train_data = pd.array(df_train['text']) test_data = pd.array(df_test['text']) test_data # for the train data set tweets are under header: 'text' and there is another header: 'target' which shows legit or illegit #merge = train_data.append(test_data,ignore_index = True, sort = False) import pandas as pd import re from string import punctuation def preprocess_text(text): text = text.lower() # Lowercase text text = re.sub(f"[{re.escape(punctuation)}]", "", text) # Remove punctuation text = " ".join(text.split()) # Remove extra spaces, tabs, and new lines return text from textblob import Word # for lemmatization import nltk def blobber(df,text): text = preprocess_text(text) # pass an entry # and a tweet, it disects it and returns a df blob = TextBlob(text) # lemmatization wordlist = blob.words upd_wordlist = [] for w in wordlist: upd_wordlist.append(w.lemmatize()) #print(text[0:10]) #print('polarity_score:',blob.polarity) #print('sentiment_score:',blob.sentiment) #print('words_score:',blob.words) df[str(text)] = { 'tags': blob.tags, 'phrases':blob.noun_phrases, 'sentiment':blob.sentiment, 'words':blob.words, 'lemmatized_words':upd_wordlist, 'sentences':blob.sentences, # sentences 'tweet_length':len(str(text)) # length of tweet } return df import tqdm from tqdm import tqdm df_test = {} print("building test data ...") for tweet_text in tqdm(test_data): df_test = blobber(df_test,tweet_text) df_training = {} print("building training data ...") for tweet_text in tqdm(train_data): df_training = blobber(df_training,tweet_text) from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer bow_vectorizer = CountVectorizer(max_df=0.90, min_df=2, max_features=1000, stop_words='english') bow = bow_vectorizer.fit_transform(df_training['text'])now build model based on the train set which has the answers already. Logistic Regression Modelingtrain_bow = bow[:]Compare accuraciesdef get_acc_with_std(preds, targets, n_samples, n=100): n = preds.shape[0] accuracies = [] for i in range(n): indices = np.random.choice(list(range(n)), size=n_samples, replace=False) acc = accuracy_score(targets, preds[indices].mean(axis=0).argmax(axis=1)) accuracies.append(acc) return np.mean(accuracies), np.std(accuracies), np.percentile(accuracies, 5), np.percentile(accuracies, 95)CIFAR10predictions, targets = get_preds('3rxjjlx1') # Ensemble CIFAR10 acc_single = [] acc_ensemble = [] for i in range(predictions.shape[0]): acc_single.append(accuracy_score(targets, predictions[-i-1].argmax(axis=1))) acc_ensemble.append(accuracy_score(targets, predictions[-i-1:].mean(axis=0).argmax(axis=1))) acc_swa = [] for i in range(2,26): preds, _ = get_preds('8mvqdjc1', f'_k{i}') # SWA CIFAR10 acc_swa.append(accuracy_score(targets, preds.mean(axis=0).argmax(axis=1))) # SWAG acc_swag = [] k_swag = [3, 5, 8, 10, 16] preds, _ = get_preds('2sjbgi3y') # SWAG 256, k=3 acc, std, _ , _ = get_acc_with_std(preds, targets, n_samples=64) acc_swag.append(acc) preds, _ = get_preds('3vkd6gg2') # SWAG 256, k=5 (also 3mgr2rnt, different seed) acc, std, _ , _ = get_acc_with_std(preds, targets, n_samples=64) acc_swag.append(acc) preds, _ = get_preds('11t47era') # SWAG 256, k=8 acc, std, _ , _ = get_acc_with_std(preds, targets, n_samples=64) acc_swag.append(acc) preds, _ = get_preds('1tc0el95') # SWAG 256, k=10 acc, std, _ , _ = get_acc_with_std(preds, targets, n_samples=64) acc_swag.append(acc) preds, _ = get_preds('wu6eg434') # SWAG 128, k=16 acc, std, _ , _ = get_acc_with_std(preds, targets, n_samples=64) acc_swag.append(acc) # plt.figure(figsize=(10,6)) plt.figure(figsize=(4.5,3)) k = np.arange(1, predictions.shape[0] + 1) plt.plot(k, acc_single, 'k--', label='single checkpoint') plt.plot(k[1:26], acc_ensemble[1:26], 'y', label='ensemble') plt.plot(k[1:25], acc_swa, 'c', label='swa') plt.plot(k_swag, acc_swag, 'k.:', label='swag N=64') plt.xlabel('K last checkpoints') plt.ylabel('accuracy (CIFAR10)') plt.xticks([0,5,10,15,20,25]) plt.legend() plt.tight_layout() preds1, targets = get_preds('3vkd6gg2') # SWAG 256, k=5 preds2, _ = get_preds('3mgr2rnt') # SWAG 256, k=5 (different seed) preds = np.concatenate([preds1, preds2], axis=0) del preds1 del preds2 samples = [] accuracies = [] stds = [] los = [] his = [] for i in tqdm([2,4,8,16,32,64,128,256,512]): acc, std, lo5, hi5 = get_acc_with_std(preds, targets, n_samples=i, n=200) accuracies.append(acc) los.append(lo5) his.append(hi5) samples.append(i) plt.figure(figsize=(4,3)) plt.plot(samples, accuracies, 'ko:',label='swag k=5') # omit last few because sampling without replacement from total of 512 # TODO: ask if this is ok? plt.plot(samples[:-3], los[:-3], 'k_', label='5th percentile') plt.plot(samples[:-3], his[:-3], 'k_', label='95th percentile') plt.xlabel('N samples') plt.ylabel('accuracy (CIFAR10)') plt.legend() plt.xscale('log') plt.xticks([2,4,8,16,32,64,128,256,512], [2,4,8,16,32,64,128,256,512]); plt.tight_layout()CIFAR100predictions, targets = get_preds('6rur0243') # Ensemble CIFAR100 acc_single = [] acc_ensemble = [] for i in range(predictions.shape[0]): acc_single.append(accuracy_score(targets, predictions[-i-1].argmax(axis=1))) acc_ensemble.append(accuracy_score(targets, predictions[-i-1:].mean(axis=0).argmax(axis=1))) acc_swa = [] for i in range(2,22): preds, _ = get_preds('373xmyi4', f'_k{i}') # SWA CIFAR100 acc_swa.append(accuracy_score(targets, preds.mean(axis=0).argmax(axis=1))) swag_k = [2,3,4,5,6,7,8,9,10,16] acc_swag64 = [] for i in range(2,5): preds, _ = get_preds('3l03q84b', f'_k{i}') # SWAG CIFAR100 K = {2,3,4} acc_swag64.append(accuracy_score(targets, preds.mean(axis=0).argmax(axis=1))) preds, _ = get_preds('1l1zic13', '_k5') # SWAG CIFAR100 K=5 acc_swag64.append(accuracy_score(targets, preds.mean(axis=0).argmax(axis=1))) for i in range(6,10): preds, _ = get_preds('d6790168', f'_k{i}') # SWAG CIFAR100 K= {6 - 9} acc_swag64.append(accuracy_score(targets, preds.mean(axis=0).argmax(axis=1))) preds, _ = get_preds('3nmg5cky') # SWAG, K=10 (128) print(accuracy_score(targets, preds.mean(axis=0).argmax(axis=1))) acc, std, _ , _ = get_acc_with_std(preds, targets, n_samples=64) acc_swag64.append(acc) preds, _ = get_preds('36ykfzm1') # SWAG, K=16 (64) acc_swag64.append(accuracy_score(targets, preds.mean(axis=0).argmax(axis=1))) acc, std, _ , _ = get_acc_with_std(preds, targets, n_samples=16) # plt.figure(figsize=(10,6)) plt.figure(figsize=(4.5,3)) k = np.arange(1, predictions.shape[0] + 1) plt.plot(k, acc_single, 'k--', label='single checkpoint') plt.plot(k, acc_ensemble, 'y', label='ensemble') plt.plot(k[1:21], acc_swa, 'c', label='swa') plt.plot(swag_k, acc_swag64, 'k.:', label='swag N=64') plt.xlabel('K last checkpoints') plt.ylabel('accuracy (CIFAR100)') plt.legend() plt.xticks([0,5,10,15,20]) preds1, targets = get_preds('f68xa8fk') # SWAG 256, k=5 preds2, _ = get_preds('65r3pymj') # SWAG 256, k=5 (different seed) preds = np.concatenate([preds1, preds2], axis=0) del preds1 del preds2 samples = [] accuracies = [] stds = [] los = [] his = [] for i in tqdm([2,4,8,16,32,64,128,256,512]): acc, std, lo5, hi5 = get_acc_with_std(preds, targets, n_samples=i, n=200) accuracies.append(acc) los.append(lo5) his.append(hi5) samples.append(i) plt.figure(figsize=(4,3)) plt.plot(samples, accuracies, 'ko:',label='swag k=5') # omit last few because sampling without replacement from total of 512 # TODO: ask if this is ok? plt.plot(samples[:-3], los[:-3], 'k_', label='5th percentile') plt.plot(samples[:-3], his[:-3], 'k_', label='95th percentile') plt.xlabel('N samples') plt.ylabel('accuracy (CIFAR100)') plt.legend() plt.xscale('log') plt.xticks([2,4,8,16,32,64,128,256,512], [2,4,8,16,32,64,128,256,512]);Plot calibration curves CIFAR10from sklearn.calibration import calibration_curve import matplotlib def plot_calibration_curve(probabilities, targets, label=None, line=':.'): max_probs = probabilities.max(axis=1) correct = probabilities.argmax(axis=1) == targets # scale the x axis to get nice spacing xscale_fn = lambda x: -np.log10(1-x*0.999) tick_labels = np.array([0.2, 0.7, 0.9, 0.97, 0.99, 0.996, 0.999]) # tick_labels = (1-np.power(10, - np.linspace(0.1,3,10)))/0.999 tick_placement = xscale_fn(tick_labels) # plt.xticks(tick_placement, np.round(tick_labels,3)) plt.xticks(tick_placement, tick_labels) # plot reference at 0 plt.plot(xscale_fn(np.array([0, 1])), [0, 0], "k--") # calibration curve prob_true, prob_pred = calibration_curve(correct, max_probs, n_bins=20, strategy='quantile') plt.plot(xscale_fn(prob_pred), prob_pred - prob_true, line, label=label) plt.ylabel('Confidence - Accuracy') plt.xlabel('Confidence') predictions, targets = get_preds('3rxjjlx1') # Ensemble CIFAR10 swa_20, _ = get_preds('8mvqdjc1', f'_k20') # SWA 20 # SWAG preds1, _ = get_preds('3vkd6gg2') # SWAG 256, k=5 preds2, _ = get_preds('3mgr2rnt') # SWAG 256, k=5 (different seed) swag_5 = np.concatenate([preds1, preds2], axis=0) del preds1 del preds2 swag_8, _ = get_preds('11t47era') # SWAG 128, k=8 swag_16, _ = get_preds('wu6eg434') # SWAG 128, k=16 single = predictions[-1] # ensemble_2 = predictions[-2:].mean(axis=0) ensemble_5 = predictions[-5:].mean(axis=0) ensemble_8 = predictions[-8:].mean(axis=0) ensemble_16 = predictions[-16:].mean(axis=0) # plt.figure(figsize=(12,12)) plt.figure(figsize=(6,4)) plot_calibration_curve(single, targets, label='SGD', line='k.--') plot_calibration_curve(swa_20[0], targets, label='SWA k=20', line='c:.') plot_calibration_curve(ensemble_5, targets, label='ensemble k=5', line='r:.') # plot_calibration_curve(ensemble_8, targets, label='ensemble k=8', line=':.') plot_calibration_curve(ensemble_16, targets, label='ensemble k=16', line='g:.') plot_calibration_curve(swag_5.mean(axis=0), targets, label='swag k=5 (512)', line='rd-') # plot_calibration_curve(swag_5[:32].mean(axis=0), targets, label='swag k=5 (32)', line='d-.') # plot_calibration_curve(swag_5[:128].mean(axis=0), targets, label='swag k=5 (128)', line='d-.') plot_calibration_curve(swag_16.mean(axis=0), targets, label='swag k=16 (128)', line='gd-') # plot_calibration_curve(swag_8.mean(axis=0), targets, label='swag k=8 (128)', line='d-.') plt.legend() plt.xlim((0.25, -np.log10(1-0.9991))) plt.title('Calibration curve (VGG16 on CIFAR10)')CIFAR100predictions, targets = get_preds('6rur0243') # Ensemble CIFAR100 swa_20, _ = get_preds('373xmyi4', f'_k20') # SWA 20 single = predictions[-1] ensemble_2 = predictions[-2:].mean(axis=0) ensemble_5 = predictions[-5:].mean(axis=0) ensemble_8 = predictions[-8:].mean(axis=0) ensemble_16 = predictions[-16:].mean(axis=0) ensemble_20 = predictions[-20:].mean(axis=0) # SWAG preds1, _ = get_preds('f68xa8fk') # SWAG 256, k=5 preds2, _ = get_preds('65r3pymj') # SWAG 256, k=5 (different seed) swag_5 = np.concatenate([preds1, preds2], axis=0) del preds1 del preds2 swag_8, _ = get_preds('d6790168', f'_k8') # SWAG 64, k=8 swag_16, _ = get_preds('36ykfzm1') # SWAG 128, k=16 # plt.figure(figsize=(12,12)) plt.figure(figsize=(6,4)) plot_calibration_curve(single, targets, label='SGD', line='k.--') plot_calibration_curve(swa_20[0], targets, label='SWA k=20', line='c:.') plot_calibration_curve(ensemble_5, targets, label='ensemble k=5', line='r:.') # plot_calibration_curve(ensemble_8, targets, label='ensemble k=8', line='b:.') plot_calibration_curve(ensemble_16, targets, label='ensemble k=16', line='g:.') plot_calibration_curve(swag_5.mean(axis=0), targets, label='swag k=5 (512)', line='rd-') # plot_calibration_curve(swag_5[:32].mean(axis=0), targets, label='swag k=5 (32)', line='rd-') # plot_calibration_curve(swag_5[:128].mean(axis=0), targets, label='swag k=5 (128)', line='d-.') plot_calibration_curve(swag_16.mean(axis=0), targets, label='swag k=16 (128)', line='gd-') # plot_calibration_curve(swag_8.mean(axis=0), targets, label='swag k=8 (128)', line='bd-') plt.legend() plt.xlim((0.25, -np.log10(1-0.9991))) plt.legend() plt.title('Calibration curve (VGG16 on CIFAR100)')Confidence on OOD samplesimport seaborn as sns def plot_prob_distributions(predictions, ax=None): for label, probs in predictions: sns.distplot(probs.max(axis=1), kde=False, norm_hist=True, label=label, bins=np.linspace(0,1, 50), ax=ax) # plt.legend() predictions, t100 = get_preds('6rur0243') # Ensemble CIFAR100 on CIFAR100 predictions_svhn, _ = get_preds('zo487s5s') # Ensemble CIFAR100 on SVHN predictions_n, _ = get_preds('16w8wx06') # Ensemble CIFAR100 on noise predictions10, t10 = get_preds('3rxjjlx1') # Ensemble CIFAR10 on CIFAR10 predictions10_svhn, _ = get_preds('vyoc1t1f') # Ensemble CIFAR10 on SVHN predictions10_n, _ = get_preds('3brh34y2') # Ensemble CIFAR10 on noise swag = get_preds('1v32yl0c')[0].mean(axis=0) # SWAG k=8 (128) CIFAR100 on CIFAR100 p1 = get_preds('2n2a361m')[0] # SWAG k=8 (64 + 64) CIFAR100 on SVHN p2 = get_preds('4q338z8o')[0] swag_svhn = np.concatenate([p1,p2], axis=0).mean(axis=0) swag_n = get_preds('1hxim8dr')[0].mean(axis=0) # SWAG k=8 (128) CIFAR100 on noise swag10 = get_preds('11t47era')[0].mean(axis=0) # SWAG k=8 (128) CIFAR10 on CIFAR10 swag10_svhn = get_preds('2tk9zcgt')[0].mean(axis=0) # SWAG k=8 (128) CIFAR10 on SVHN swag10_n = get_preds('yp7nmltk')[0].mean(axis=0) # SWAG k=8 (128) CIFAR10 on noise # CIFAR100 single = predictions[-1] ensemble_2 = predictions[-2:].mean(axis=0) ensemble_8 = predictions[-8:].mean(axis=0) ensemble_20 = predictions[-20:].mean(axis=0) single_svhn = predictions_svhn[-1] ensemble_2_svhn = predictions_svhn[-2:].mean(axis=0) ensemble_8_svhn = predictions_svhn[-8:].mean(axis=0) ensemble_20_svhn = predictions_svhn[-20:].mean(axis=0) single_n = predictions_n[-1] ensemble_2_n = predictions_n[-2:].mean(axis=0) ensemble_8_n = predictions_n[-8:].mean(axis=0) ensemble_20_n = predictions_n[-20:].mean(axis=0) # CIFAR10 single10 = predictions10[-1] ensemble10_2 = predictions10[-2:].mean(axis=0) ensemble10_8 = predictions10[-8:].mean(axis=0) ensemble10_20 = predictions10[-20:].mean(axis=0) single10_svhn = predictions10_svhn[-1] ensemble10_2_svhn = predictions10_svhn[-2:].mean(axis=0) ensemble10_8_svhn = predictions10_svhn[-8:].mean(axis=0) ensemble10_20_svhn = predictions10_svhn[-20:].mean(axis=0) single10_n = predictions10_n[-1] ensemble10_2_n = predictions10_n[-2:].mean(axis=0) ensemble10_8_n = predictions10_n[-8:].mean(axis=0) ensemble10_20_n = predictions10_n[-20:].mean(axis=0) single_mask = np.argmax(single, axis=1) == t100 ensemble_2_mask = np.argmax(ensemble_2, axis=1) == t100 ensemble_10_mask = np.argmax(ensemble_10, axis=1) == t100 ensemble_20_mask = np.argmax(ensemble_20, axis=1) == t100 swag_mask = np.argmax(swag, axis=1) == t100 mask = single_mask & ensemble_20_mask & swag_mask single10_mask = np.argmax(single10, axis=1) == t10 ensemble10_2_mask = np.argmax(ensemble10_2, axis=1) == t10 ensemble10_10_mask = np.argmax(ensemble10_10, axis=1) == t10 ensemble10_20_mask = np.argmax(ensemble10_20, axis=1) == t10 swag10_mask = np.argmax(swag10, axis=1) == t10 mask = single10_mask & ensemble10_20_mask & swag10_maskplot confidence distributions (for the maximum probability)plt.figure(figsize=(7,3)) fig, (ax1,ax2, ax3) = plt.subplots(ncols=3, sharey=True) # frameon=False removes frames # plt.subplot(1,3,1) ax1.set_title('single model') plot_prob_distributions([('CIFAR100', single), ('SVHN (OOD)', single_svhn), ('Gaussian (OOD)', single_n)], ax=ax1) # plt.ylim((0,26)) # plt.subplot(1,3,2) ax2.set_title('ensemble k=20') plot_prob_distributions([('CIFAR100', ensemble_20), ('SVHN (OOD)', ensemble_20_svhn), ('Gaussian (OOD)', ensemble_20_n)], ax=ax2) # plt.ylim((0,26)) # plt.subplot(1,3,3) ax3.set_title('swag k=8') plot_prob_distributions([('CIFAR100', swag), ('SVHN (OOD)', swag_svhn), ('Gaussian (OOD)', swag_n)], ax=ax3) ax3.legend() # plt.tight_layout() plt.subplots_adjust(wspace=.0) ax1.set_xticks([0,1]) ax1.set_xticks([0.5],True) ax2.set_xticks([0,1]) ax2.set_xticks([0.5],True) ax3.set_xticks([0,1]) ax3.set_xticks([0.5],True) # plt.ylim((0,26)) # plt.yscale('log') plt.figure(figsize=(10,5)) plt.subplot(1,3,1) plt.title('single model') plot_prob_distributions([('CIFAR10', single10), ('SVHN (OOD)', single10_svhn), ('Gaussian (OOD)', single10_n)]) plt.ylim((0,45)) plt.subplot(1,3,2) plt.title('ensemble k=20') plot_prob_distributions([('CIFAR10', ensemble10_20), ('SVHN (OOD)', ensemble10_20_svhn), ('Gaussian (OOD)', ensemble10_20_n)]) plt.ylim((0,45)) plt.subplot(1,3,3) plt.title('swag k=8') plot_prob_distributions([('CIFAR10', swag10), ('SVHN (OOD)', swag10_svhn), ('Gaussian (OOD)', swag10_n)]) plt.ylim((0,45))/usr/local/lib/python3.7/dist-packages/seaborn/distributions.py:2557: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms). warnings.warn(msg, FutureWarning)Entropy for in and out of domainfrom scipy.stats import entropy print('Entropy CIFAR100 VGG16') print('\nSingle model') print('CIFAR100:', entropy(single.T).mean()) # print('CIFAR100:', entropy(single[single_mask].T).mean()) # print('CIFAR100:', entropy(single[~single_mask].T).mean()) print('SVHN:', entropy(single_svhn.T).mean()) print('Gaussian:', entropy(single_n.T).mean()) print('\nEnsemble k=2') print('CIFAR100:', entropy(ensemble_2.T).mean()) print('SVHN:', entropy(ensemble_2_svhn.T).mean()) print('Gaussian:', entropy(ensemble_2_n.T).mean()) print('\nEnsemble k=20') print('CIFAR100:', entropy(ensemble_20.T).mean()) print('SVHN:', entropy(ensemble_20_svhn.T).mean()) print('Gaussian:', entropy(ensemble_20_n.T).mean()) print('\nSWAG k=8 (128)') print('CIFAR100:', entropy(swag.T).mean()) # print('CIFAR100:', entropy(swag[swag_mask].T).mean()) # print('CIFAR100:', entropy(swag[~swag_mask].T).mean()) print('SVHN:', entropy(swag_svhn.T).mean()) print('Gaussian:', entropy(swag_n.T).mean()) print('\nEntropy CIFAR10 VGG16') print('\nSingle model') print('CIFAR10:', entropy(single10.T).mean()) print('SVHN:', entropy(single10_svhn.T).mean()) print('Gaussian:', entropy(single10_n.T).mean()) print('\nEnsemble k=2') print('CIFAR10:', entropy(ensemble10_2.T).mean()) print('SVHN:', entropy(ensemble10_2_svhn.T).mean()) print('Gaussian:', entropy(ensemble10_2_n.T).mean()) print('\nEnsemble k=20') print('CIFAR10:', entropy(ensemble10_20.T).mean()) print('SVHN:', entropy(ensemble10_20_svhn.T).mean()) print('Gaussian:', entropy(ensemble10_20_n.T).mean()) print('\nSWAG k=8 (128)') print('CIFAR10:', entropy(swag10.T).mean()) print('SVHN:', entropy(swag10_svhn.T).mean()) print('Gaussian:', entropy(swag10_n.T).mean())Entropy CIFAR100 VGG16 Single model CIFAR100: 0.36590943 CIFAR100: 0.14601204 CIFAR100: 0.8030105 SVHN: 1.0064511 Gaussian: 0.70562893 Ensemble k=2 CIFAR100: 0.44262382 SVHN: 1.1445429 Gaussian: 0.606499 Ensemble k=20 CIFAR100: 0.6436284 SVHN: 1.4936976 Gaussian: 1.1913989 SWAG k=8 (128) CIFAR100: 0.5235151 CIFAR100: 0.25477436 CIFAR100: 1.0993398 SVHN: 1.2464372 Gaussian: 0.71285266 Entropy CIFAR10 VGG16 Single model CIFAR10: 0.070742406 SVHN: 0.4867465 Gaussian: 0.14269099 Ensemble k=2 CIFAR10: 0.090693675 SVHN: 0.50139767 Gaussian: 0.19741191 Ensemble k=20 CIFAR10: 0.127225 SVHN: 0.6670243 Gaussian: 0.23204009 SWAG k=8 (128) CIFAR10: 0.10531652 SVHN: 0.6022755 Gaussian: 0.1957414OOD detection AUCROC (with max confidence as in-domain score) TODO: only use correctly classified samples?from sklearn.metrics import roc_auc_score, roc_curve def get_ood_aucroc(in_domain, ood): y = np.concatenate([in_domain, ood]) t = np.concatenate([np.ones_like(in_domain), np.zeros_like(ood)]) return roc_auc_score(t, y) def get_ood_roc_curve(in_domain, ood): y = np.concatenate([in_domain, ood]) t = np.concatenate([np.ones_like(in_domain), np.zeros_like(ood)]) return roc_curve(t, y) ensemble10_8_svhn, ensemble10_20_svhn # print('\nCIFAR100 vs SVHN') print(f'Single: & {get_ood_aucroc(single.max(axis=1), single_svhn.max(axis=1)):.6f}' f' & {get_ood_aucroc(single.max(axis=1), single_n.max(axis=1)):.6f}' f' & {get_ood_aucroc(single10.max(axis=1), single10_svhn.max(axis=1)):.6f}' f' & {get_ood_aucroc(single10.max(axis=1), single10_n.max(axis=1)):.6f} \\\\') print(f'E k=8 {get_ood_aucroc(ensemble_8.max(axis=1), ensemble_8_svhn.max(axis=1)):.6f}' f' & {get_ood_aucroc(ensemble_8.max(axis=1), ensemble_8_n.max(axis=1)):.6f}' f' & {get_ood_aucroc(ensemble10_8.max(axis=1), ensemble10_8_svhn.max(axis=1)):.6f}' f' & {get_ood_aucroc(ensemble10_8.max(axis=1), ensemble10_8_n.max(axis=1)):.6f} \\\\') print(f'E k=20 {get_ood_aucroc(ensemble_20.max(axis=1), ensemble_20_svhn.max(axis=1)):.6f}' f' & {get_ood_aucroc(ensemble_20.max(axis=1), ensemble_20_n.max(axis=1)):.6f}' f' & {get_ood_aucroc(ensemble10_20.max(axis=1), ensemble10_20_svhn.max(axis=1)):.6f}' f' & {get_ood_aucroc(ensemble10_20.max(axis=1), ensemble10_20_n.max(axis=1)):.6f} \\\\') print(f'SWAG K=8 {get_ood_aucroc(swag.max(axis=1), swag_svhn.max(axis=1)):.6f}' f' & {get_ood_aucroc(swag.max(axis=1), swag_n.max(axis=1)):.6f}' f' & {get_ood_aucroc(swag10.max(axis=1), swag10_svhn.max(axis=1)):.6f}' f' & {get_ood_aucroc(swag10.max(axis=1), swag10_n.max(axis=1)):.6f} \\\\') # print('E k=2 : ', get_ood_aucroc(ensemble_2.max(axis=1), ensemble_2_svhn.max(axis=1))) # # print('E k=10: ', get_ood_aucroc(ensemble_10.max(axis=1), ensemble_10_svhn.max(axis=1))) # # print('\nCIFAR100 vs Gaussian') # print('E k=2 : ', get_ood_aucroc(ensemble_2.max(axis=1), ensemble_2_n.max(axis=1))) # # print('E k=10: ', get_ood_aucroc(ensemble_10.max(axis=1), ensemble_10_n.max(axis=1))) # # print('\nCIFAR10 vs SVHN') # print('E k=2 : ', get_ood_aucroc(ensemble10_2.max(axis=1), ensemble10_2_svhn.max(axis=1))) # # print('E k=10: ', get_ood_aucroc(ensemble10_10.max(axis=1), ensemble10_10_svhn.max(axis=1))) # # print('\nCIFAR10 vs Gaussian') # print('E k=2 : ', get_ood_aucroc(ensemble10_2.max(axis=1), ensemble10_2_n.max(axis=1))) # # print('E k=10: ', get_ood_aucroc(ensemble10_10.max(axis=1), ensemble10_10_n.max(axis=1))) fpr, tpr, thresholds = get_ood_roc_curve(single.max(axis=1), single_svhn.max(axis=1)) plt.plot(fpr, tpr, label='single') fpr, tpr, thresholds = get_ood_roc_curve(ensemble_20.max(axis=1), ensemble_20_svhn.max(axis=1)) plt.plot(fpr, tpr, label='ensemble') plt.legend() fpr, tpr, thresholds = get_ood_roc_curve(single.max(axis=1), single_n.max(axis=1)) plt.plot(fpr, tpr, label='single') fpr, tpr, thresholds = get_ood_roc_curve(ensemble_20.max(axis=1), ensemble_20_n.max(axis=1)) plt.plot(fpr, tpr, label='ensemble') fpr, tpr, thresholds = get_ood_roc_curve(swag.max(axis=1), swag_n.max(axis=1)) plt.plot(fpr, tpr, label='swag') plt.legend()Weight space visualisationspredictions10, targets10 = get_preds('1eptvyat') # CIFAR10 interpolate predictions100, targets100 = get_preds('3ji5gbi5') # CIFAR100 interpolate n_samples = 16 locations = np.arange(-1/(n_samples-2), 1 + 2/(n_samples-2), 1/(n_samples-2))[:n_samples] accuracies10 = [] accuracies100 = [] for i in range(n_samples): accuracies10.append(accuracy_score(targets10, predictions10[-i-1].argmax(axis=1))) accuracies100.append(accuracy_score(targets100, predictions100[-i-1].argmax(axis=1))) plt.figure(figsize=(10,5)) plt.subplot(1,2,1) plt.title('CIFAR10') plt.plot(locations, accuracies10, 'k.:') plt.plot([0], [accuracies10[1]], 'rx') plt.plot([1], [accuracies10[-1]], 'rx') # plt.ylabel('accuracy') # plt.ylabel('relative location between checkpoints') plt.subplot(1,2,2) plt.title('CIFAR100') plt.plot(locations, accuracies100, 'k.:') plt.plot([0], [accuracies100[1]], 'rx') plt.plot([1], [accuracies100[-1]], 'rx') # plt.savefig() predictions10.shapeNavigation---In this notebook, you will learn how to use the Unity ML-Agents environment for the first project of the [Deep Reinforcement Learning Nanodegree](https://www.udacity.com/course/deep-reinforcement-learning-nanodegree--nd893). 1. Start the EnvironmentWe begin by importing some necessary packages. If the code cell below returns an error, please revisit the project instructions to double-check that you have installed [Unity ML-Agents](https://github.com/Unity-Technologies/ml-agents/blob/master/docs/Installation.md) and [NumPy](http://www.numpy.org/).from agents.python.unityagents import UnityEnvironment import numpy as npNext, we will start the environment! **_Before running the code cell below_**, change the `file_name` parameter to match the location of the Unity environment that you downloaded.- **Mac**: `"path/to/Banana.app"`- **Windows** (x86): `"path/to/Banana_Windows_x86/Banana.exe"`- **Windows** (x86_64): `"path/to/Banana_Windows_x86_64/Banana.exe"`- **Linux** (x86): `"path/to/Banana_Linux/Banana.x86"`- **Linux** (x86_64): `"path/to/Banana_Linux/Banana.x86_64"`- **Linux** (x86, headless): `"path/to/Banana_Linux_NoVis/Banana.x86"`- **Linux** (x86_64, headless): `"path/to/Banana_Linux_NoVis/Banana.x86_64"`For instance, if you are using a Mac, then you downloaded `Banana.app`. If this file is in the same folder as the notebook, then the line below should appear as follows:```env = UnityEnvironment(file_name="Banana.app")```env = UnityEnvironment(file_name="Banana.app")INFO:unityagents: 'Academy' started successfully! Unity Academy name: Academy Number of Brains: 1 Number of External Brains : 1 Lesson number : 0 Reset Parameters : Unity brain name: BananaBrain Number of Visual Observations (per agent): 0 Vector Observation space type: continuous Vector Observation space size (per agent): 37 Number of stacked Vector Observation: 1 Vector Action space type: discrete Vector Action space size (per agent): 4 Vector Action descriptions: , , ,Environments contain **_brains_** which are responsible for deciding the actions of their associated agents. Here we check for the first brain available, and set it as the default brain we will be controlling from Python.# get the default brain brain_name = env.brain_names[0] brain = env.brains[brain_name]2. Examine the State and Action SpacesThe simulation contains a single agent that navigates a large environment. At each time step, it has four actions at its disposal:- `0` - walk forward - `1` - walk backward- `2` - turn left- `3` - turn rightThe state space has `37` dimensions and contains the agent's velocity, along with ray-based perception of objects around agent's forward direction. A reward of `+1` is provided for collecting a yellow banana, and a reward of `-1` is provided for collecting a blue banana. Run the code cell below to print some information about the environment.# reset the environment env_info = env.reset(train_mode=True)[brain_name] # number of agents in the environment print('Number of agents:', len(env_info.agents)) # number of actions action_size = brain.vector_action_space_size print('Number of actions:', action_size) # examine the state space state = env_info.vector_observations[0] print('States look like:', state) state_size = len(state) print('States have length:', state_size)Number of agents: 1 Number of actions: 4 States look like: [1. 0. 0. 0. 0.84408134 0. 0. 1. 0. 0.0748472 0. 1. 0. 0. 0.25755 1. 0. 0. 0. 0.74177343 0. 1. 0. 0. 0.25854847 0. 0. 1. 0. 0.09355672 0. 1. 0. 0. 0.31969345 0. 0. ] States have length: 373. Take Random Actions in the EnvironmentIn the next code cell, you will learn how to use the Python API to control the agent and receive feedback from the environment.Once this cell is executed, you will watch the agent's performance, if it selects an action (uniformly) at random with each time step. A window should pop up that allows you to observe the agent, as it moves through the environment. Of course, as part of the project, you'll have to change the code so that the agent is able to use its experience to gradually choose better actions when interacting with the environment!import torch import torch.nn as nn import torch.nn.functional as F import random class DQN(nn.Module): def __init__(self): super().__init__() layer1 = nn.Linear(37, 1024) layer2 = nn.Linear(1024, 256) layer3 = nn.Linear(256, 64) self.output_layer = nn.Linear(64, 4) self.hidden_layers = nn.ModuleList([layer1, layer2, layer3]) # weight initialization for l in self.hidden_layers: nn.init.kaiming_normal_(l.weight, nonlinearity='relu') nn.init.constant_(l.bias, 0) nn.init.xavier_normal_(self.output_layer.weight) nn.init.constant_(self.output_layer.bias, 0) def forward(self, state): x = state for l in self.hidden_layers: x = F.relu(l(x)) out = self.output_layer(x) return out from collections import deque, namedtuple class ReplayBuffer(): def __init__(self, sample_size, buffer_size=int(1e4)): self.buffer = deque(maxlen=buffer_size) self.sample_size = sample_size self.experience = namedtuple('experience', field_names=('state', 'action', 'reward', 'next_state', 'done')) def add(self, state, action, reward, next_state, done): exp = self.experience(state, action, reward, next_state, done) self.buffer.append(exp) def sample(self): experiences = random.sample(self.buffer, self.sample_size) # divide into batches states = torch.from_numpy(np.vstack([e.state for e in experiences])).float().to(device) actions = torch.from_numpy(np.vstack([e.action for e in experiences])).long().to(device) rewards = torch.from_numpy(np.vstack([e.reward for e in experiences])).float().to(device) next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences])).float().to(device) dones = torch.from_numpy(np.vstack([e.done for e in experiences]).astype(np.uint8)).float().to(device) return states, actions, rewards, next_states, dones def __len__(self): return len(self.buffer) from copy import deepcopy from torch.torchvision.transforms to device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") class Agent(): def __init__(self, alpha=1e-3, gamma=0.99, epsilon=0.1, tau=1e-3, action_size=action_size, batch_size=32): self.Q = DQN().to(device) self.Q_target = deepcopy(self.Q).to(device) self.buffer = ReplayBuffer(sample_size=batch_size) self.t = 0 self.action_size = action_size self.alpha = alpha self.gamma = gamma self.epsilon = epsilon self.tau = tau self.optimizer = torch.optim.Adam(self.Q.parameters(), lr=self.alpha) def act(self, state): state = torch.from_numpy(state).float().unsqueeze(0).to(device) self.Q.eval() with torch.no_grad(): action_values = self.Q(state) self.Q.train() if random.random() < self.epsilon: return random.choice(range(self.action_size)) else: return np.argmax(action_values.cpu().numpy()) def step(self, state, action, reward, next_state, done): self.buffer.add(state, action, reward, next_state, done) self.t += 1 if self.t > 100 + self.buffer.sample_size: experiences = self.buffer.sample() self._update_net(experiences) def _update_net(self, experiences): states, actions, rewards, next_states, dones = experiences targets = rewards + (1 - dones) * self.gamma * self.Q_target(next_states).detach().max(1)[0].reshape((-1, 1)) predictions = self.Q(states).gather(1, actions) loss = F.mse_loss(predictions, targets) self.optimizer.zero_grad() loss.backward() self.optimizer.step() self._moving_average() def _moving_average(self): for target_param, param in zip(self.Q_target.parameters(), self.Q.parameters()): target_param.data.copy_((1 - self.tau) * target_param.data + self.tau * param.data) agent = Agent() for i in range(int(1e3)): env_info = env.reset(train_mode=True)[brain_name] state = env_info.vector_observations[0] score = 0 while True: action = agent.act(state) # select an action env_info = env.step(action)[brain_name] # send the action to the environment next_state = env_info.vector_observations[0] # get the next state reward = env_info.rewards[0] # get the reward done = env_info.local_done[0] # see if episode has finished agent.step(state, action, reward, next_state, done) score += reward # update the score state = next_state # roll over the state to next time step if done: # exit loop if episode finished break print("Episode {} Score: {}".format(i, score))Episode 0 Score: -1.0 Episode 1 Score: 3.0 Episode 2 Score: 0.0 Episode 3 Score: 1.0 Episode 4 Score: 0.0 Episode 5 Score: 0.0 Episode 6 Score: -1.0 Episode 7 Score: -3.0 Episode 8 Score: -1.0 Episode 9 Score: 2.0 Episode 10 Score: -2.0 Episode 11 Score: -1.0 Episode 12 Score: -2.0 Episode 13 Score: 0.0 Episode 14 Score: -2.0 Episode 15 Score: 2.0 Episode 16 Score: -4.0 Episode 17 Score: -2.0 Episode 18 Score: -3.0 Episode 19 Score: -3.0 Episode 20 Score: 3.0 Episode 21 Score: -4.0 Episode 22 Score: -2.0 Episode 23 Score: 0.0 Episode 24 Score: 1.0 Episode 25 Score: -2.0 Episode 26 Score: 7.0 Episode 27 Score: -3.0 Episode 28 Score: -3.0 Episode 29 Score: 2.0 Episode 30 Score: -1.0 Episode 31 Score: 1.0 Episode 32 Score: -4.0 Episode 33 Score: 0.0 Episode 34 Score: -2.0 Episode 35 Score: 3.0 Episode 36 Score: 4.0 Episode 37 Score: 4.0 Episode 38 Score: 2.0 Episode 39 Score: -2.0 Episode 40 Score: 8.0 Episode 41 Score: 3.0 Episode 42 Score: 6.0 Episode 43 Score: 3.0 Episode 44 Score: -1.[...]When finished, you can close the environment.env.close()Cornstover biorefinery **In this example, uncertainty analysis is performed around reaction conversions in the cornstover biorefinery.** Cornstover biorefinery layout The cornstover biorefinery modeled in BioSTEAM is divided into 8 areas: feedstock handling, pretreatment and conditioning, enzyme production, recovery, wastewater treatment, storage, boiler and turbo-generator system, and utilities (Figure 4). In contrast to the original report by Humbird et. al., enzyme is not produced on-site. Instead, the cellulase mixture is bought at 0.507 USD⋅kg-1 at a protein concentration of 0.10 kg⋅liter-1. ![Cornstover areas](Cornstover_areas.png "Cornstover areas") Begin by loading the biorefinery:import biosteam as bst from biosteam import find from biosteam.biorefineries.cornstover import systemBy convention, the complete system is the name of the flowsheet with "_sys" at the end:cornstover_sys = find.system.cornstover_sys cornstover_sys # Here is the TEA object cornstover_tea = cornstover_sys.TEA cornstover_teaNavigate by looking at the diagram:find.diagram('thorough') # Alternatively, you could save a report of the system to explore the biorefineryNote that all unit operations are numbered by their respective area, for example the pretreatment reactor `R201` is in area 200.R201 = find.unit.R201 R201The `reactions` attribute of the PretreatmentReactorSystem object is a ParallelReaction object with all stoichiometric reaction taking place:find.unit.R201.reactionsYou can access reaction items and change conversions:glucose_conversion_rxn = R201.reactions[0] glucose_conversion_rxn print(glucose_conversion_rxn.X) glucose_conversion_rxn.X = 0.10 print(glucose_conversion_rxn.X)We can create a Model object to begin our uncertainty analysis:# Define MESP function ethanol_density_kggal = 2.98668849 # kg/gal ethanol = find.stream.ethanol get_MESP = lambda: cornstover_tea.solve_price(ethanol) * ethanol_density_kggal # -> USD/gal # Define MESP metric metric_MESP = bst.Metric('MESP', get_MESP, 'USD/gal') # Initialize model cornstover_model = bst.Model(system=find.system.cornstover_sys, metrics=(metric_MESP,)) cornstover_model # The model begins with no parametersNow add parameters concerning reactions:from chaospy import distributions as D # Parameter distribution conversion = D.Triangle(0.05, 0.10, 0.15) # Parameter decorator param = cornstover_model.parameter # Create and add paramter @param(element=R201, kind='coupled', distribution=conversion) def set_conversion(glucose_conversion): glucose_conversion_rxn.X = glucose_conversion cornstover_model xylose_conversion_rxn = R201.reactions[8] @param(element=R201, kind='coupled', distribution=D.Triangle(0.85, 0.90, 0.95)) def set_conversion(xylose_conversion): xylose_conversion_rxn.X = xylose_conversion cornstover_modelCheck your distributions:dist_dict = cornstover_model.get_distribution_summary() dist_dict['Triangle']Once all parameters have been added, Monte Carlo can be performed:N_samples = 100 rule = 'L' # Latin hypercube sampling samples = cornstover_model.sample(N_samples, rule) cornstover_model.load_samples(samples) cornstover_model.evaluate() cornstover_model.table#import libraries import pandas as pd import numpy as np #load the csv file autolib_df = pd.read_csv('Autolib_dataset (2).csv') autolib_df.head(5) ##accessing the dataset info autolib_df.info() #check if there are any null values autolib_df.isnull().values.any() #finding the null values autolib_df.isnull().sum() #shape of the dataset autolib_df.shape #drop columns by dropping the columns that contain null values autolib_df_comment = autolib_df.drop(['Displayed comment'], axis=1, inplace =True) autolib_df_comment #drop 'Scheduled at' autolib_df_Scheduled = autolib_df.drop(['Scheduled at'], axis=1, inplace=True) autolib_df_Scheduled #confirming whether changes have been made autolib_df.isnull().sum() #accessing all the columns autolib_df.columns #changing the columns into lowercase autolib_df.rename(columns={'Address':'address', 'Cars':'cars', 'Bluecar counter':'bluecar counter', 'Utilib counter':'utilib counter', 'Utilib 1.4 counter':'utilib 1.4 counter', 'Charge Slots':'charge slots', 'Charging Status':'charging status', 'City':'city', 'ID':'id', 'Kind':'kind', 'Geo point':'geo point', 'Postal code':'postal code', 'Public name':'public name', 'Rental status':'rental status', 'Slots':'slots', 'Station type':'station type', 'Status':'status', 'Subscription status':'subscription status', }, inplace = True) autolib_df.head() autolib_df.head() #find shape of the new dataset autolib_df.shape #add datetime column autolib_df['datetime'] = pd.to_datetime((autolib_df.year*10000+autolib_df.month*100+autolib_df.day).apply(str),format='%Y%m%d') datetime autolib_df.head() #most popular hour of the day for picking up a shared electric car (Bluecar) in the city of Paris over the month of April 2018 # city_hour = autolib_df['city'] == 'Paris' # autolib_df.loc[city_hour].head() result = autolib_df.groupby(['datetime', 'bluecar counter', city_hour])['hour'].value_counts() result #What is the most popular hour for returning cars popular_hour = autolib_df.groupby(['hour', 'cars'])['hour'].agg(['count']).sort_values(by='count', ascending=False) popular_hour.head(1) #What station is the most popular station #overall popular_station = autolib_df.groupby(['station type'])['station type'].agg(['count']).sort_values(by='count', ascending=False) popular_station.head(1) #What postal code is the most popular for picking up Blue cars popular_postal = autolib_df.groupby(['postal code', 'bluecar counter'])['postal code'].agg(['count']).sort_values(by='count') popular_postal.head(1) #most popular picking hour for the postal code code_hour = autolib_df.groupby(['hour', 'postal code'])['hour'].agg(['count']).sort_values(by='count', ascending=False) code_hour.head(1)--- Lecture 5.1 Single Neuron Logistic Regression[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/RandyRDavila/Data_Science_and_Machine_Learning_Spring_2022/blob/main/Lecture_5/Lecture_5_1.ipynb)In this notebook we revisit machine learning classification problems. More specifically, we consider the *probabilistic binary classfication problem*. We model this problem as a single neuron model with the *binary cross entropy loss function*. As with single neuron linear regression, we will train our neuron with stochastic gradient descent. Why Probabilistic Neuron Output?In previous binary classification problems, such as when we applied the perceptron single neuron model, we were assuming that our data was linearly seperable. For example, consider the two figures generated by running the following code. ---import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns # Import a nice function for plotting decision boudaries from mlxtend.plotting import plot_decision_regions # Set the Seaborn theme sns.set_theme() # Read the iris dataset into a pandas DataFrame object with seaborn df = sns.load_dataset("iris") setosa = df[df.species == "setosa"] versicolor = df[df.species == "versicolor"] virginica = df[df.species == "virginica"] fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15,10)) ax1.scatter(setosa.sepal_length, setosa.sepal_width, color = "red", label = "setosa") ax1.scatter(versicolor.sepal_length, versicolor.sepal_width, color = "magenta", label = "versicolor") ax1.set_xlabel("sepal length [cm]", fontsize = 15) ax1.set_ylabel("sepal width [cm]", fontsize = 15) ax1.legend(fontsize = 15, loc = "upper left") ax1.set_title("Linearly Seperable Data", fontsize = 18) ax2.scatter(versicolor.petal_length, versicolor.sepal_length, color = "magenta", label = "versicolor") ax2.scatter(virginica.petal_length, virginica.sepal_length, color = "lightseagreen", label = "virginica") ax2.set_xlabel("petal length [cm]", fontsize = 15) ax2.set_ylabel("sepal length [cm]", fontsize = 15) ax2.legend(fontsize = 15, loc = "upper left") ax2.set_title("Non-Linearly Seperable Data", fontsize = 18) plt.show()---As you can see in the left figure generated above, it is impossible to split the versicolor and virginica flowers by any linear function. The reason for this is that the two species of flowers on the left overlap in the same regions of space. ---**Question.** How will overlapping data effect the training process for a Perceptron single neuron?---However, notice that in the non-linearly seperable case, the probability of selecting a versicolor flower is higher than the probability of selecting a virginica flower whenever petal length is less that 4.75. Conversely, the probability of selecting a virginica flower is higher than the probability of selecting a versicolor flower whenever the petal length is higher than 4.75. Observing this, label each versicolor flower by 0 and each virginica flower by 1. Then, only using the petal length as our feature measurment observe the figure generated by running the following code in the cell below. ---plt.figure(figsize = (10, 8)) plt.xlim((2.75, 7 )) plt.scatter(versicolor.petal_length, np.zeros(50), color = "magenta", label = "versicolor") plt.scatter(virginica.petal_length, np.ones(50), color = "lightseagreen", label = "virginica") plt.vlines(4.75, 0.0, 1.0) plt.xlabel("petal length [cm]", fontsize = 15) plt.ylabel("label", fontsize = 15) plt.legend(fontsize = 15, loc = "upper left") plt.title("Non-Linearly Seperable Data", fontsize = 18) plt.show()--- Designing a Single Neuron to Predict ProbabilitiesInstead of creating a single neuron model for predicting a class deterministic label, we will next build a single neuron model that predicts a *class probability*. First, recall the general single neuron model depicted in the following figure. The Single Neuron Model--- --- The Sigmoid Activation FunctionAs before (both with the linear regression and Perceptron single neurons), we must first decide on an activation function before deciding on a cost/ loss function. For this purpose, we choose the *sigmoid* activation function:$$\sigma(z) = \frac{1}{1 + e^{-z}}$$This differentiable function has a range in $(0, 1)$, so it would seem suitable for a possible function to turn the pre-activation value into a value representing a probability. Moreover, the sigmoid function (sometimes called the *logistic function*) has a smooth "S"-shape that is perfect for probabilities values transitioning, either growing or shrinking, as the input feature changes. For example, run the following code in the cell below. ---def sigmoid(z): return 1.0/(1.0 + np.exp(-z)) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15,10)) ax1.set_xlim((2.75, 7)) ax1.scatter(versicolor.petal_length, np.zeros(50), color = "magenta", label = "versicolor") ax1.scatter(virginica.petal_length, np.ones(50), color = "lightseagreen", label = "virginica") ax1.set_xlabel("petal length [cm]", fontsize = 15) ax1.set_ylabel("label", fontsize = 15) ax1.set_title("Non-Linearly Seperable Data", fontsize = 18) domain = np.linspace(-12.0, 12.0, 100) ax2.plot(domain, sigmoid(domain), color = "blue", label = "$\sigma$(z)") ax2.set_xlabel("z = wx + b", fontsize = 18) ax2.set_ylabel("$sigma(z)$", fontsize = 15) ax2.set_title("The Sigmoid Function", fontsize = 18) plt.show()---As can be seen by the two figures generated above, the petal pre-activation value might be able to map the petal length measurements to the correct inputs to the sigmoid function so that the post-activation values correctly describe the probability of observing a versicolor flower or a virginica flower. In order to test this hypothesis, we must next introduce a cost/loss function to our single neuron model. The Binary Cross Entropy Loss Function Currently we have two target values, 0 for versicolor and 1 for virginica. Moreover, we are wishing to predict that *probability of each of these labels given a single feature measurement*. Thus, we encounter the conditional probability function:$$P\Big(y^{(i)}\mid x^{(i)}\Big)=\begin{cases} \hat{y}^{(i)}, \quad & y^{(i)} = 1 \\ 1-\hat{y}^{(i)}, \quad & y^{(i)} = 0 \\ \end{cases}$$Notice that this conditional probability depends on the value of $\hat{y}^{(i)}$, which in-turn depends on the values of our weight and bias. Moreover, we wish to *maximize* this probability over all training examples since this quantity is largest when our predicted probabilities are close approximations to the true 0-1 labels. Thus, we seek to solve the following maximization problem:$$\max_{\mathbf{w}, b} \sum_{i=1}^{N}P\Big(y^{(i)}\mid x^{(i)}\Big).$$Before considering this optimization problem, we next recall the famous Bernoulli formula for binary probabilities:$$P\Big(y^{(i)}\mid x^{(i)}\Big) = [\hat{y}^{(i)}]^{y}[1 - \hat{y}^{(i)}]^{(1-y)}$$Taking the logorithm on both sides of this equation yields (dropping the index notation to avoid messy equations):$$\begin{align} \log P\Big(y^{(i)}\mid x^{(i)}\Big)&= \log \hat{y}^{y}(1 - \hat{y})^{(1-y)}\\ &= y\log \hat{y} + (1-y) \log (1 - \hat{y})\\ \end{align}$$Since the logorithmic function is an *increasing function*, maximimizing $P\Big(y^{(i)}\mid x^{(i)}\Big)$ is equivalent to maximizing $\log P\Big(y^{(i)}\mid x^{(i)}\Big)$. Equivalently, we could also considering minimizing this function. Thus, we arrive at our single neuron coss/loss function for a single entry of data, which implies a full loss function. Binary Cross Entropy Loss Function:$$L(\mathbf{w}, b) = -\frac{1}{N} \sum_{i=1}^{N} P\Big(y^{(i)}\mid x^{(i)}\Big) = \frac{1}{N}\sum_{i=1}^{N}\Big[ -y^{(i)}\log \hat{y}^{(i)} - (1-y^{(i)}) \log (1 - \hat{y}^{(i)})\Big ]$$Now that we have a plausible loss function, we have a complete single neuron model ready for training; see the figure below. The Logistic Regression Single Neuron Model--- --- Calculuting the Gradient of Binary Cross Entropy Loss FunctionIn order to optimize the logistic regression single neuron model with stochastic gradient descent, we first need understand how to calculate the gradient. As before, we will consider the cost function on a single instance of data:$$C(w_1, b; x^{(i)},y^{(i)}) = -y^{(i)}\log \hat{y}^{(i)} - (1-y^{(i)}) \log (1 - \hat{y}^{(i)})$$When considering this equation it is important to remember that $\hat{y}^{(i)}$ really is a composite function. More specifically, we note$$\hat{y}^{(i)} = \sigma(z) = \sigma(w_1x^{(i)} + b).$$Next we note the particularly nice closed form of the derivative of the sigmoid function.$$\sigma'(z) = \sigma(z)(1 - \sigma(z))$$With these two equations, we are now ready to compute the partial derivatives of $C(w_1, b; x_{1}^{(i)},y^{(i)})$ with respect to $w_1$ and $b$. Note that this cost function contains two pieces, namely $-y^{(i)}\log \hat{y}^{(i)}$ and $- (1-y^{(i)}) \log (1 - \hat{y}^{(i)})$. Since the derivative is a linear map, we may calculate $\partial C/ \partial w_1$ by calculating the the derivative of each piece of this equation and then add them together. $$\begin{split}\frac{\partial}{\partial w_1}[-y^{(i)}\log \hat{y}^{(i)}] & = \frac{\partial}{\partial w_1}[-y^{(i)}\log \sigma(w_1 x^{(i)}+b)] \\ & = - \frac{y^{(i)}}{\sigma(w_1 x^{(i)}+b)}\frac{\partial}{\partial w_1} [\sigma(w_1 x^{(i)}+b)] \\ & = - \frac{y^{(i)}}{\sigma(w_1 x^{(i)}+b)}\sigma(w_1 x^{(i)}+b)(1 - \sigma(w_1 x^{(i)}+b))\frac{\partial}{\partial w_1}[w_1 x^{(i)}+b] \\ & = - y^{(i)}(1 - \sigma(w_1 x^{(i)}+b))x^{(i)} \\ & = - y^{(i)}(1 - \hat{y}^{(i)})x^{(i)} \end{split}$$$$\begin{split}\frac{\partial}{\partial w_1}[-(1-y^{(i)}) \log (1 - \hat{y}^{(i)})] & = \frac{\partial}{\partial w_1}[-(1-y^{(i)})\log (1 - \sigma(w_1 x^{(i)}+b))] \\ & = - \frac{(1 - y^{(i)})}{(1 - \sigma(w_1 x^{(i)}+b))}\frac{\partial}{\partial w_1} [1 - \sigma(w_1 x^{(i)}+b) ]\\ & = - \frac{(1 - y^{(i)})}{(1 - \sigma(w_1 x^{(i)}+b))} -\sigma(w_1 x^{(i)}+b)(1 - \sigma(w_1 x^{(i)}+b))\frac{\partial}{\partial w_1}[w_1 x^{(i)}+b] \\ & = (1 - y^{(i)})\sigma(w_1 x^{(i)}+b))x^{(i)} \\ & = (1 - y^{(i)})\hat{y}^{(i)}x^{(i)} \end{split}$$Now that we have calculated the derivative with respect to $w_1$ for each part of the binary cross entropy loss function, we next sum these derivatives:$$\begin{split}\frac{\partial C(w_1, b; x^{(i)},y^{(i)})}{\partial w_1} & = - y^{(i)}(1 - \hat{y}^{(i)})x^{(i)} + (1 - y^{(i)})\hat{y}^{(i)}x^{(i)} \\ & = [- y^{(i)}(1 - \hat{y}^{(i)}) + (1 - y^{(i)})\hat{y}^{(i)}]x^{(i)} \\ & = [- y^{(i)} + y^{(i)}\hat{y}^{(i)} + \hat{y}^{(i)} - y^{(i)}\hat{y}^{(i)}]x^{(i)} \\ & = (\hat{y}^{(i)} - y^{(i)}) x^{(i)}\end{split}$$A similar calculation also yields the partial derivative of our cost function with respect to the bias $b$:$$\frac{\partial C(w_1, b; x^{(i)},y^{(i)})}{\partial b} = (\hat{y}^{(i)} - y^{(i)})$$Notice how these partial derivatives precisely match the partial derivatives of the linear regression single neuron and the approximate partial derivatives of the Perceptron single neuron! Thus, we can train our logistic regression neuron in the exact same way as our previous models by implementing stochastic gradient descent. We now define our custom single neuron class for this purpose. ach feature vector. ---class SingleNeuron(object): """ A class used to represent a single artificial neuron. ... Attributes ---------- activation_function : function The activation function applied to the preactivation linear combination. cost_function : function The cost function used to measure model performance. w_ : numpy.ndarray The weights and bias of the single neuron. The last entry being the bias. This attribute is created when the train method is called. errors_: list A list containing the mean sqaured error computed after each iteration of stochastic gradient descent per epoch. Methods ------- train(self, X, y, alpha = 0.005, epochs = 50) Iterates the stochastic gradient descent algorithm through each sample a total of epochs number of times with learning rate alpha. The data used consists of feature vectors X and associated labels y. predict(self, X) Uses the weights and bias, the feature vectors in X, and the activation_function to make a y_hat prediction on each feature vector. """ def __init__(self, activation_function, cost_function): self.activation_function = activation_function self.cost_function = cost_function def train(self, X, y, alpha = 0.005, epochs = 50): self.w_ = np.random.rand(1 + X.shape[1]) self.errors_ = [] N = X.shape[0] for _ in range(epochs): errors = 0 for xi, target in zip(X, y): self.w_[:-1] -= alpha*(self.predict(xi) - target)*xi self.w_[-1] -= alpha*(self.predict(xi) - target) #errors += .5*((self.predict(xi) - target)**2) errors += self.cost_function(self.predict(xi), target) self.errors_.append(errors/N) return self def predict(self, X): preactivation = np.dot(X, self.w_[:-1]) + self.w_[-1] return self.activation_function(preactivation) def plot_cost_function(self): fig, axs = plt.subplots(figsize = (10, 8)) axs.plot(range(1, len(self.errors_) + 1), self.errors_, label = "Cost function") axs.set_xlabel("epochs", fontsize = 15) axs.set_ylabel("Cost", fontsize = 15) axs.legend(fontsize = 15) axs.set_title("Cost Calculated after Epoch During Training", fontsize = 18) plt.show() def plot_decision_boundary(self, X, y, xstring="x", ystring="y"): plt.figure(figsize = (10, 8)) plot_decision_regions(X, y, clf = self) plt.title("Logistic Regression Neuron Decision Boundary", fontsize = 18) plt.xlabel(xstring, fontsize = 15) plt.ylabel(ystring, fontsize = 15) plt.show()---Now that we have defined a custom ```SingleNeuron``` class we are one step closer to training our model on the data at hand. However, before instantiating an instance of the ```SingleNeuron``` class we must first write a function for the binary cross entropy loss. Afterwards we may then create an instantance of our ```SingleNeuron```. Once this is done we need to convert the versicolor and virginica petal length measurements to a ```numpy.ndarray``` and reshape it into a column vector representation (**recall that this is necessary whenever our feature measurements consist of a single measurement**). We then need to create a target $y$ ```numpy.ndarray``` which assigns the labels 0 and 1 to the versicolor and virginica species, respectively. Once we have our feature vector and target vector we can then pass these values into the ```SingleNeuron.train()``` method to train our logistic single neuron with stochastic gradient descent. All of this can be done by running the following code in the cell below.---def cross_entropy_loss(y_hat, y): return - y*np.log(y_hat) - (1 - y)*np.log(1 - y_hat) node = SingleNeuron(sigmoid, cross_entropy_loss) X = df.iloc[50:].petal_length.values X = X.reshape(-1, 1) y = np.where(df.iloc[50:].species == "versicolor", 0, 1) node.train(X, y, alpha = 0.01, epochs = 10_000)---Now that we have trained the variable ```node```, we can now visualize the binary cross entropy loss over each epoch by plotting the values in the ```SingleNeuron.errors_``` attribute. This can be done by running the following code in the cell below. ---node.plot_cost_function() plt.show()---This looks good! Visualizing the cost function over the epochs during training helps us verify that are neuron is indeed learning over time. We can next visualize our hypthesis function, or prediction function (recall in all supervised machine learning we are learning a function $h$ which approximates the true target function $f$), by plotting the ```SingleNeuron.predict()``` method over the scattered data points in question. This can be done by running the following code in the cell below.---plt.figure(figsize = (10, 8)) plt.xlim((2.75, 7 )) plt.scatter(versicolor.petal_length, np.zeros(50), color = "magenta", label = "versicolor") plt.scatter(virginica.petal_length, np.ones(50), color = "lightseagreen", label = "virginica") domain = np.linspace(2.75, 7, 100) plt.plot(domain, node.predict(domain.reshape(-1, 1))) plt.xlabel("petal length [cm]", fontsize = 15) plt.ylabel("label", fontsize = 15) plt.legend(fontsize = 15, loc = "upper left") plt.title("The Learned Logistic Curve", fontsize = 18) plt.show() node.predict(X)---When using the trained weights and bias of our logistic single neuron to predict a class on a given measurement, we will need to convert the predicted probabilities to zeros and ones. This can be done by passing the ```numpy.ndarray``` returned by the ```SingleNeuron.predict()``` method into the ```numpy.rint()``` method. Moreover, we can use the resulting ```numpy.ndarray``` to compute the classification error over our training data given by the formula$$E_c = \frac{1}{N}\sum_{i=1}^{N}[\hat{y}^{(i)} \neq y^{(i)}],$$where $[\hat{y}^{(i)} \neq y^{(i)}] = 1$ whenever $\hat{y}^{(i)} \neq y^{(i)}$, and zero otherwise. Run the following code in the cell below to view this classification error. ---np.rint(node.predict(X)) != y classification_error = (np.rint(node.predict(X)) != y).astype(int) print(f"Classification Error = {sum(classification_error)/ len(y)}")Classification Error = 0.07---This is a great classification error on our training data! We can now view our decision boundary implied by the trained weights and bias by running the following code in the cell below.---node.plot_decision_boundary(X, y) plt.show()--- Logistic Regression Single Neuron with Multiple InputsThus far we have only used a single feature measurement as input into our logistic regression single neuron model, but what happens when we use *multiple measurements*. For example, we could use petal length and petal width. With two inputs our model can be depicted by the figure below.The Logistic Single Neuron Model with Multiple Feature Inputs--- ---Let us next instantiate a ```SingleNeuron``` model with petal length and sepal length measurements as input, train this model with the same learning rate and number of epochs as the variable ```node```, and then compare the cost function over epochs between the two models. This can be done by running the following code in the cell below. ---# Instantiate a new single neuron. node_two = SingleNeuron(sigmoid, cross_entropy_loss) # Create a numpy.ndarray of petal length and sepal length values for # the versicolor and virginica flowers, respectively. X_two = df.iloc[50:][["petal_length", "sepal_length"]].values # Train the new single neuron model on the new feature vectors. node_two.train(X_two, y, alpha = 0.01, epochs = 10_000) plt.figure(figsize = (10, 8)) plt.plot(range(1, len(node.errors_) + 1), node.errors_, label = "node Cross Entropy Loss") plt.plot(range(1, len(node_two.errors_) + 1), node_two.errors_, label = " node_two Cross Entropy Loss") plt.xlabel("epochs", fontsize = 15) plt.ylabel("Cross Entropy Loss", fontsize = 15) plt.legend(fontsize = 15) plt.title("Model Cost Comparison During Training", fontsize = 18) plt.show()---As we can see by the figures generated above, using two features results in a lower cost function (in this particular instance) with the same learning rate and number of epochs. We encourage the reader to compare different combinations of features from the data and compare the loss function over time during training. Finally, let us visualize the decision boundary generated by the trained weights and bias of ```node_two``` by running the following code in the cell below. After running notice that even with the logistic regression single neuron, we are still learning a linearly seperating hyperplane. ---node_two.plot_decision_boundary(X_two, y) def sign(z): return np.sign(z) def MSE(y_hat, y): return .5*(y_hat - y)**2 # Instantiate a new single neuron. node_three = SingleNeuron(sign, MSE) # Create a numpy.ndarray of petal length and sepal length values for # the versicolor and virginica flowers, respectively. X_three = df.iloc[50:][["petal_length", "sepal_length"]].values y_three = np.where(df.iloc[50:]["species"] == "versicolor", -1, 1) # Train the new single neuron model on the new feature vectors. node_three.train(X_three, y_three, alpha = 0.001, epochs = 5_000) node_three.plot_decision_boundary(X_three, y_three) plt.show() node_three.plot_cost_function() def linear(z): return z # Instantiate a new single neuron. node_four = SingleNeuron(linear, MSE) # Create a numpy.ndarray of petal length and sepal length values for # the versicolor and virginica flowers, respectively. X_four = df.iloc[0:50]["sepal_length"].values X_four = X_four.reshape(-1, 1) y_four = df.iloc[0:50]["sepal_width"].values # Train the new single neuron model on the new feature vectors. node_four.train(X_four, y_four, alpha = 0.001, epochs = 5_000) plt.figure(figsize = (10, 8)) plt.scatter(X_four, y_four, label = "setosa", color = "magenta") domain = np.linspace(np.min(X_four), np.max(X_four), 50) plt.plot(domain, node_four.predict(domain.reshape(-1, 1)), label = "regression line") plt.show()通用学习模式import numpy as np # 导入数据集 from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier # 加载数据 iris = datasets.load_iris() # 获取花的属性信息 iris_x = iris.data # 获取花的标签信息 iris_y = iris.target iris_x[:2,:] iris_y # 把数据分为训练集和测试集,测试集数据占总数据的30% x_train, x_test, y_train, y_test = train_test_split(iris_x , iris_y , test_size=0.3) y_train # 定义模型 knn = KNeighborsClassifier() # 使用fit来训练 knn.fit(x_train, y_train) print(knn.predict(x_test)) print(y_test)[2 2 2 1 1 2 0 0 0 2 2 0 2 2 2 2 1 1 0 0 1 0 2 1 1 0 2 0 2 0 2 1 1 0 1 2 2 1 1 0 1 2 0 0 1] [2 2 2 2 2 2 0 0 0 2 2 0 2 2 2 2 1 1 0 0 1 0 2 1 1 0 2 0 2 0 2 1 1 0 1 2 2 1 1 0 1 2 0 0 1]Extra stuff for later notebooks 2. Once more about functions 1. Built-in functions2. Functions generated by you3. Functions imported from modules 1. Built-in functionsA = (5, -1, -15, 25, 2) sorted(A) sorted((5, -1, -15, 25, 2)) sorted(A, reverse= True) help(sorted) sorted(A, key=abs) sorted? print('Hello', 'Hello') print('Hello', 'Hello', sep='*****') help(print)Help on built-in function print in module builtins: print(...) print(value, ..., sep=' ', end='\n', file=sys.stdout, flush=False) Prints the values to a stream, or to sys.stdout by default. Optional keyword arguments: file: a file-like object (stream); defaults to the current sys.stdout. sep: string inserted between values, default a space. end: string appended after the last value, default a newline. flush: whether to forcibly flush the stream.2. Your own functions Please note that variable name format with _underscores_.This is the first use of `indentation` during our journey# defining the function def my_first_function(a, b): return a + b # calling the function my_first_function(10, 5) A = my_first_function(20, 100) AFunction with default parametersdef my_second_function(a, b = 100): return a + b my_second_function(5) my_second_function(5, 20)Not all functions have argumentsdef my_third_function(): return 10 my_third_function()Functions can do something else rather than returning a valuedef my_third_function(a): print(a) my_third_function('Hello world')Hello worldLet us see what happens if we assign the call to a variable_This is difficult to understand - do not be put off by this_A = my_third_function('Hello world') A3. Functions imported from modules 4. Reading in text dataf_handle = open('data/karamazov_brothers.txt', 'r') text = f_handle.read() f_handle.close() len(text) text[:10000] print(text[:10000]) print(text[5016:10000]) txt_list = text.split(' ') len(txt_list) txt_list[1000:1100] print(txt_list[1000:1100]) txt_list.count('an') txt_list.count('Fyodor') txt_list.count('Ivan') txt_list.count('enigmatic')4. Reading in and manipulating tabular data 5. Reading in images# importing matplotlib module import matplotlib.image as mpimg img = mpimg.imread('data/newborn_heart_image.jpg') img.ndim img.shape 369 * 636 * 3 img.flatten() len(img.flatten()) print(set(img.flatten())) img[:, :, 1] img[:, :, 2] img[1, :, :] # Output Image # importing matplotlib module import matplotlib.pyplot as plt plt.imshow(img)CorrelaçãoCorrelação é qualquer associação estatística entre um par de variáveis. Quanto mais correlacionadas estão duas variáveis, mais "alinhamento" há entre elas. Isto é, uma análise de correlação fornece um número que resume o grau de relacionamento linear entre duas variáveis. Introduziremos este assunto com alguns conceitos fundamentais. Associação entre variáveis e causalidadeA associação entre duas variáveis pode ocorrer de duas formas: - _correlacional_: neste caso, não há interferência alguma sobre as variáveis observadas. As variáveis são _aleatórias_ e seus comportamentos ocorrem conforme a "natureza" determina. Por exemplo, o exercício físico e a queima calórica possuem correlação positiva, pois quanto mais intensamente nos exercitamos, mais queimamos caloria. - _experimental_: neste caso, uma das variáveis é controlada e esta interfere sobre a outra. Isto é, uma variável A é a _causa_ (variável independente) e a outra, B, o _efeito_ (variável independente). Uma sentença lógica do tipo "se A, então B" estabelece a idéia de _causalidade_. Por exemplo: quando cientistas estudam a administração de fármacos em um organismo, eles analisam os efeitos dessa droga naquele organismo. Logo, a droga é a causa; a resposta orgânica, o efeito. Correlação e dependência linearPodemos interpretar a _correlação_ também pelo ponto de vista de "dependência linear". Duas variáveis perfeitamente correlacionadas são similares a dois vetores paralelos, ou seja, linearmente dependentes. Por outro lado, duas variáveis totalmente não correlacionadas são similares a dois vetores perpendiculares, ou seja, linearmente independentes. Escore padronizado- Quando as variáveis que queremos comparar não estão expressas na mesma unidade (Ex. correlacionar alturas em cm e pesos em kg).As duas soluções comuns para lidar com esses problemas são:1. Transformar todos os valores para um _escore padronizado_. 2. Transformar todos os valores para ranques baseados em percentis. Para converter uma série $X$ de valores $x_i$ em uma escala padronizada de escores, subtraímos a média dos dados e dividimos esta diferença pelo desvio padrão. Isto é: $$z_i = \dfrac{x_i - \mu}{\sigma},$$onde $\mu$ é a média e $\sigma$ o desvio padrão. Ao dividir os desvios (numerador) pelo desvio padrão, na verdade, estamos _normalizando_ o desvio, de modo que os valores $z_i$ da nova série $Z$ sejam adimensionais (sem unidades), possuam média 0 e variância 1. A série $Z$ herda a "forma" de $X$.import pandas as pd import numpy as np import matplotlib.pyplot as plt import scipy.stats as stsVejamos exemplos:# dataframe dfp = pd.DataFrame({'Idade': np.array([20,19,21,22,20]), 'Peso': np.array([55,80,62,67,73]), 'Altura': np.array([162,178,162,165,171]), 'IMC':np.array([20.96, 25.25, 23.62, 24.61, 24.96])}, index=['Ana','João','Maria','Pedro','Túlio']) dfpVamos calcular o _z-score_ para todas as _Series_ do _DataFrame_.def zScore(df,colname): s = df[colname] return (s - s.mean())/s.std(ddof=0) # ddof = 0 para dividir por N # cria novo dataframe de z-scores Z = {} for c in dfp.columns: Z[c + ':Z-score'] = zScore(dfp,c) dfpz = pd.DataFrame(Z) dfpzComentários: - Os _z-score_ ajudam a entender se uma observação específica é comum ou excepcional: - _z-score_ < 0 representam valores abaixo da média; - _z-score_ > 0 representam valores acima da média; - _z-score_ da média é 0, uma vez que ela é ponto médio; - A soma dos _z-score_ = 0; - _z-scores_ com valores positivos extremamente altos indicam uma distribuição com _assimetria à direita_ (mais sobre isso adiante); - _z-scores_ com valores negativos extremamente altos indicam uma distribuição com _assimetria à esquerda_ (mais sobre isso adiante); - se |_z-score_| > 2, a distribuição é incomum ou excepcional.# Z-Score das séries têm soma nula dfpz.sum(axis=0)Calculando o z-score por função predefinida.Z2 = {} for c in dfp.columns: Z2[c + ':Z-score'] = sts.zscore(dfp[c]) dfpz2 = pd.DataFrame(Z2,index=dfp.index) dfpz2 # ambos os métodos dão resultados idênticos (dfpz == dfpz2).all()Plot de _z-scores_A plotagem dos _z-scores_ pode ser feita diretamente com `plot` a partir da _Series_ $Z$ de interesse.dfpz['Peso:Z-score'].plot(marker='o',ls=''); dfpz['Peso:Z-score'].plot(marker='o',ls='',color='g');Comentários: - A partir desses plots, vemos claramente qual discente está "acima da", "abaixo da" ou "na" média perante a variável escolhida. CovariânciaQuando queremos compreender como duas variáveis variam juntas, aplicamos o conceito de _covariância_. Se $X$ e $Y$ são duas _Series_, a covariância entre ambas é dada por $$\textrm{cov}(X,Y) = \frac{1}{n}\sum_{i=1}^n(x_i - \mu_X)(y_i - \mu_Y),$$onde $n$ é o número de elementos na série (igual em ambas) e $\mu_X$ ($\mu_Y$) é a média de $X$($Y$). Notemos que a covariância é uma "média" do produto dos desvios. **Exemplo:** vamos criar uma função para o cálculo da variância.# covariância def cov(df,colname1,colname2): s1,s2 = df[colname1],df[colname2] return np.dot( s1 - s1.mean(), s2 - s2.mean() )/(len(s1)-1)Testemos a covariância entre as variáveis de nosso _DataFrame_ de estudo.cov(dfp,'Altura','Peso'), cov(dfp,'Idade','Peso'), cov(dfp,'Idade','Altura')Comentários:- Esses cálculos mostram que variações de _altura_ e _peso_ interferem consideravelmente uma na outra "na mesma direção".- Porém, não notamos o mesmo comportamento para _idade_ e _peso_ ou para _idade_ e _altura_. Podemos checar o cálculo de nossa função com a função `var` do _pandas_, sabendo que $\text{cov}(X,X) = \text{var}(X) = S^2(X), \, \forall X$.cov(dfp,'Altura','Altura'), dfp['Altura'].var()Outra forma de calcular a covariância é usar o método `cov` de uma `pandas.Series`.dfp['Altura'].cov(dfp['Peso'])Matriz de covariânciasPodemos usar a função `numpy.cov()` para computar a covariância entre duas _Series_ $X$ e $Y$. Para tanto, devemos passar a matriz $[X \ \ Y]$ como parâmetro para a função. A resposta é uma _matriz de covariâncias_ 2x2 cujas entradas são: $$\begin{bmatrix}\text{cov}(X,X) & \text{cov}(X,Y) \\\text{cov}(Y,X) & \text{cov}(Y,Y)\end{bmatrix}$$X, Y = dfp['Altura'], dfp['Peso'] np.cov(np.array([X,Y]))CorrelaçãoUma das dificuldades conhecidas da covariância é a sua interpretação. Uma vez que ela é dada pelo produto das unidades de suas entradas, muito frequentemente será inviável atribuir significado ao número. Por exemplo, se a unidade de $X$ for quilogramas e $Y$ for anos, a unidade da covariância seria quilogramas vezes anos. Um meio de solucionar este problema é dividir o produto da covariância pelo desvio padrão de cada série de dados, assim formando o conceito de _correlação_, dado por: $$\rho(X,Y) = \frac{1}{n}\sum_{i=1}^n\frac{(x_i - \mu_X)}{\sigma_X}\frac{(y_i - \mu_Y)}{\sigma_Y}.$$Em outras palavras, a correlação é a soma do produto de escores padronizados. Coeficiente de correlação de PearsonSe os desvios forem retirados da somatória, a expressão torna-se: $$\rho(X,Y) = \frac{1}{\sigma_X \sigma_Y}\frac{1}{n}\sum_{i=1}^n(x_i - \mu_X)(y_i - \mu_Y) = \frac{\textrm{cov}(X,Y)}{{\sigma_X \sigma_Y}}.$$O número $\rho$ é chamado de _coeficiente de correlação de Pearson_, ou simplesmente _correlação de Pearson_, e vale que $-1 \leq \rho \leq 1$. A magnitude de $\rho$ determina a _força de correlação_ entre as variáveis. Em geral, a seguinte interpretação é utilizada: - $\rho = 1$: as variáveis são perfeitamente correlacionadas.- $\rho = 0$: as variáveis são correlacionadas de alguma forma, mas **não** linearmente. Neste sentido, $\rho$ subestimará a força da dependência linear.- $\rho = -1$: idem, porém negativamente. **Exemplo:** A tabela a seguir contém dados coletados na administração de um zoológico para alguns dias do mês de abril de 2021. Nesta ordem, a tabela mostra o número de visitantes no zoológico, o número de tickets de estacionamento adquiridos e a temperatura média contabilizados por dia.| Visitantes | Tickets | Temperatura ||-------------:|------------------:|------------------:|| 1580 | 8 | 35 || 1230 | 6 | 38 || 1950 | 9 | 32 || 890 | 4 | 26 || 1140 | 6 | 31 || 1760 | 9 | 36 || 1650 | 10 | 38 || 1470 | 3 | 30 || 390 | 1 | 21 || 1460 | 9 | 34 || 1000 | 7 | 36 || 1030 | 6 | 32 || 740 | 2 | 25 || 1340 | 6 | 37 || 1150 | 7 | 34 |O arquivo com os dados está disponível [aqui]('../database/visitantes-zoo.csv'). Vamos buscar correlações nos dados. Carregando o arquivo:zoo = pd.read_csv('../database/visitantes-zoo.csv'); zooPara calcular a correlação de Pearson entre duas séries, podemos usar a função `pearsonr()` do módulo `scipy.stats`.corr1,_ = sts.pearsonr(zoo['Visitantes'],zoo['Tickets:Parking']); corr2,_ = sts.pearsonr(zoo['Visitantes'],zoo['Temperatura (C)']); corr1,corr2Comentários: - O coeficiente de Pearson mostra que há uma "força" de correlação não desprezível entre o número de visitantes e tickets vendidos.- Esta correlação é menor para a faixa de temperaturas médias. A correlação pode também ser calculada através do método `corr` de uma _Series_ do pandas.zoo['Visitantes'].corr(zoo['Tickets:Parking']) zoo['Visitantes'].corr(zoo['Temperatura (C)'])Correlações pareadasUsando o método `pandas.DataFrame.corrwith()` é possível calcular correlações pareadas entre colunas de um _DataFrame_ ou linhas de outra _Series_ ou _DataFrame_.No exemplo abaixo, passamos uma _Series_ como argumento. A resposta são os mesmos valores obtidos anteriormente, porém na forma de uma _Series_. O valor unitário é devido à correlação da variável com ela própria.zoo.corrwith(zoo['Visitantes'])Gráfico de dispersãoAntes de calcular cegamente o valor de $\rho$ para séries de dados, é interessante fazer um gráfico de _dispersão_ (_scatter plot_) entre as variáveis. Podemos fazer isto com o `matplotlib.pyplot.plot()` e tipo de marcador `o` ou com `matplotlib.pyplot.scatter()`.fig,ax = plt.subplots(1,2,figsize=(14,5)) # plot 1 ax[0].plot(zoo['Visitantes'],zoo['Tickets:Parking'],'o',label=f'corr={round(corr1,2)}') ax[0].set_xlabel('No. visitantes'); ax[0].set_ylabel('Tickets de estacionamento'); ax[0].legend() # plot 2 ax[1].plot(zoo['Visitantes'],zoo['Temperatura (C)'],'or',label=f'corr={round(corr2,2)}') ax[1].set_xlabel('No. visitantes'); ax[1].set_ylabel('Temperatura (C)'); ax[1].legend();Reproduzindo com `plt.scatter`:plt.scatter(zoo['Visitantes'],zoo['Tickets:Parking']);E Commerce Review Analysis Importing Librariesimport pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline import warnings import timeWorking on dataset#Importing data from CVS data = pd.read_csv('Womens_Clothing_E_Commerce_Reviews.csv') #Leaving of first column as we have index data.drop('Unnamed: 0', axis=1,inplace=True) #Removing rows with nan data.dropna(subset=['Review Text','Division Name','Department Name','Class Name'],inplace=True) data.columns = ['Clothing_Id', 'Age', 'Title', 'Review', 'Rating', 'Recommended_IND', 'Positive_Feedback_Count','Division_Name', 'Department_Name', 'Class_Name'] data.head(3) # FInding number of unique things in each column print('Clothing_Id :' + str(len(list(set(data['Clothing_Id']))))) print('Age :' + str(len(list(set(data['Age']))))) print('Rating :' + str(len(list(set(data['Rating']))))) print('Recommended_IND :' + str(len(list(set(data['Recommended_IND']))))) print('Positive_Feedback_Count :' + str(len(list(set(data['Positive_Feedback_Count']))))) print('Division_Name :' + str(len(list(set(data['Division_Name'])))) + str(set(data['Division_Name']))) print('Department_Name :' + str(len(list(set(data['Department_Name'])))) + str(set(data['Department_Name']))) print('Class_Name :' + str(len(list(set(data['Class_Name'])))))Clothing_Id :1172 Age :77 Rating :5 Recommended_IND :2 Positive_Feedback_Count :82 Division_Name :3{'General', 'General Petite', 'Initmates'} Department_Name :6{'Trend', 'Intimate', 'Bottoms', 'Tops', 'Jackets', 'Dresses'} Class_Name :20Analysis 1: Department & Division v.s. Review Countimport numpy as np import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline import warnings warnings.filterwarnings('ignore') sns.set_style('darkgrid') f, axes = plt.subplots (1,2, figsize=(18,6)) # Histrogram x = ['Jackets', 'Bottoms', 'Dresses', 'Trend', 'Tops', 'Intimate'] y = [data.Department_Name[(data['Department_Name']=='Jackets')].count(), data.Department_Name[(data['Department_Name']=='Bottoms')].count(), data.Department_Name[(data['Department_Name']=='Dresses')].count(), data.Department_Name[(data['Department_Name']=='Trend')].count(), data.Department_Name[(data['Department_Name']=='Tops')].count(), data.Department_Name[(data['Department_Name']=='Intimate')].count(), ] vis1= sns.barplot(x,y,palette='rocket', ax=axes[0]) vis1.set(xlabel='Department Name',ylabel='Number of Reviews for all Divisiion Names') for p in vis1.patches: vis1.annotate("%.f" % p.get_height(), (p.get_x() + p.get_width() / 2., p.get_height()), ha='center', va='center', fontsize=11, color='black', xytext=(0, 20), textcoords='offset points') # Histrogram NG = [data.Division_Name[(data['Division_Name']=='General')].count(), data.Division_Name[(data['Division_Name']=='Initmates')].count(), data.Division_Name[(data['Division_Name']=='General Petite')].count() ] G = ['General', 'Initmates', 'General Petite'] plt.title('Division Name') plt.pie(NG, labels=G, startangle=90, autopct='%.1f%%') plt.ioff()Analysis 2: Department & Division v.s. Positive Feedback Countf, axes = plt.subplots (1,2, figsize=(18,6)) #stripplot Number of positive feedbacks given by people for different Division name giving different ratings vis1= sns.stripplot(y='Positive_Feedback_Count' , x='Division_Name', data=data,\ hue='Rating',dodge=True, size=3, ax=axes[0]) vis1.set(ylim=(0, 150)) vis1.set(xlabel='Division Name',ylabel='Positive Feedback Count') #stripplot Number of positive feedbacks given by people for different Department Name giving different ratings vis2= sns.stripplot(y='Positive_Feedback_Count' , x='Department_Name', data=data,\ hue='Rating',dodge=True, size=3, ax=axes[1]) plt.legend(title='User Rating(1-5)',bbox_to_anchor=(1.29, 1)) vis2.set(ylim=(0, 150)) vis2.set(xlabel='Department Name',ylabel='Positive Feedback Count') plt.ioff() data.head(4)Analysis 3: Positive Feedback Count v.s. Age#Number of ratings given by people of different age group plt.subplots(figsize=(20,10)) vis2 = sns.lineplot(x=data.Age, y=data.Positive_Feedback_Count , hue=data.Rating , err_style=None, marker='o') vis2.set(ylim=(0, 20)) vis2.set_title('Number of ratings given by people of different age group', fontsize=15) plt.legend(bbox_to_anchor=(1, 1)) plt.ioff()Analysis 4: Positive Feedback Count v.s. Recommendation# Rating given by people who recommend and do not recommend products to others plt.subplots (figsize=(6,6)) vis1= sns.stripplot(y='Positive_Feedback_Count' , x='Recommended_IND', data=data,\ hue='Rating',dodge=True, size=3) vis1.set(ylim=(0, 150)) plt.legend(title='User Rating(1-5)',bbox_to_anchor=(1.29, 1)) vis1.set(xlabel='Recommendation',ylabel='Positive Feedback Count') vis1.set_title('Rating given by people who recommend and \n do not recommend products to others') plt.show()From the above graph we come to know that the people who do not recommend the product to others tend to give low ratingsCompared to those who recommend the products to others Analysis 5: Recommendation Count v.s. Age# Finding out which age group people give the most recomendation to others and also who don't recommend plt.subplots(figsize=(20,8)) sns.set(style="darkgrid") ax = sns.countplot(x="Age",hue='Recommended_IND', data=data)From the above graph we can come to a conclusion that the age group between 33 to 53 are most active and recommend the most Creating Rating Classdata_class = data data_class['Class'] = 'Bad' data_class.loc[data_class.Rating>3,['Class']] = 'Good' data_class.drop(['Title', 'Age','Rating'], axis=1, inplace=True) data_class.head(3) x = data_class['Class'].unique() y = [data_class.Class[(data_class['Class'] == 'Good')].count(), data_class.Class[(data_class['Class'] == 'Bad')].count()] plt.subplots(figsize=(4,4)) graph = sns.barplot(x,y,palette='rocket') # vis1= sns.barplot(x,y,palette='rocket') graph.set(xlabel='Class Name',ylabel='Number of Class for all reviews') plt.title('Number of Good And Bad Reviews') plt.ioff()Analysis 6: Top 10 most reviewed productscloth_id = ((pd.DataFrame({'Number_of_reviews' : (data_class.groupby('Clothing_Id').count()['Class'])})).reset_index(level=0)).sort_values(by=['Number_of_reviews'], ascending=False)[:10] # """ .plot(x='Clothing_Id', y ='Number_of_reviews',kind='bar',title='Top 10 most reviewed products') """ graph = sns.barplot(x='Clothing_Id', y ='Number_of_reviews',data=cloth_id,order=list(cloth_id['Clothing_Id']),palette='Blues_r') graph.set_title('Top 10 most reviewed products') plt.ioff()Analysis 7: Good & Bad Reviews in top 10 products# Plot to show how many people like and dilike a product of top 10 liked products good_df = (((pd.DataFrame({'Number_of_good_reviews' : ((data_class.loc[data_class.Class == 'Good']).groupby('Clothing_Id').count()['Class'])})).reset_index(level=0)).sort_values(by=['Number_of_good_reviews'], ascending=False)) bad_df = (((pd.DataFrame({'Number_of_bad_reviews' : ((data_class.loc[data_class.Class == 'Bad']).groupby('Clothing_Id').count()['Class'])})).reset_index(level=0)).sort_values(by=['Number_of_bad_reviews'], ascending=False)) review_cloth_id = pd.merge(good_df, bad_df,on=['Clothing_Id'],how='outer').sort_values(by=['Number_of_good_reviews'],ascending=False)[:10] review_cloth_id.plot(x ='Clothing_Id', kind='bar', stacked=True,title='Good and Bad reviews in top 10 Good products') plt.ioff()Applying Natural Language Processing Cleaning data for Modelmodel_data = data_class model_data.drop(['Clothing_Id','Recommended_IND','Positive_Feedback_Count','Division_Name','Department_Name','Class_Name'], axis=1, inplace=True) model_data['Processed_review'] = np.nan model_data = model_data.reset_index() model_data.drop(['index'], axis=1,inplace=True) import nltk from nltk.tokenize import sent_tokenize, word_tokenize from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from nltk.stem import PorterStemmer, SnowballStemmer import string ps = PorterStemmer() sno = SnowballStemmer('english') lemmatizer = WordNetLemmatizer() stop_words = set(stopwords.words('english')) # CLeaning and add it to the DataFrame def process(i): orginal_sent = model_data['Review'][i] tokenized_sent = word_tokenize(orginal_sent) stop_sent = [w for w in tokenized_sent if w not in stop_words and w not in string.punctuation] lemmatized_sent = [lemmatizer.lemmatize(w) for w in stop_sent] model_data['Processed_review'][i] = ' '.join(lemmatized_sent) for i in range(len(model_data['Review'])): try: process(i) except: passCreating Bag of Words Modelfrom sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import TfidfVectorizer from sklearn import naive_bayes from sklearn.metrics import accuracy_score from sklearn.ensemble import RandomForestClassifier from sklearn import tree from sklearn import svm import sklearn # Vectorizing and defining x and y vectorize = TfidfVectorizer(use_idf = True, lowercase=True, strip_accents='ascii', stop_words=stop_words,max_features=4000) y = model_data.Class x = vectorize.fit_transform(model_data.Processed_review)Naive Bayes (BernoulliNB)def model_BernoulliNB(): start_time = time.time() # train test split x_train, x_test, y_train, y_test = train_test_split(x,y, random_state=421,test_size=0.2) # Training the model clf = naive_bayes.BernoulliNB() clf.fit(x_train, y_train) # Testing y_pred = clf.predict(x_test) return accuracy_score(y_test,y_pred) * 100, time.time() - start_timeNaive Bayes (MultinomialNB)def model_MultinomialNB(): start_time = time.time() # train test split x_train, x_test, y_train, y_test = train_test_split(x,y, random_state=421) # Training the model clf = naive_bayes.MultinomialNB() clf.fit(x_train, y_train) # Testing y_pred = clf.predict(x_test) return (accuracy_score(y_test,y_pred) * 100), (time.time() - start_time)Naive Bayes (ComplementNB)def model_ComplementNB(): start_time = time.time() # train test split x_train, x_test, y_train, y_test = train_test_split(x,y, random_state=421) # Training the model clf = naive_bayes.ComplementNB() clf.fit(x_train, y_train) # Testing y_pred = clf.predict(x_test) return (accuracy_score(y_test,y_pred) * 100), (time.time() - start_time)Random Forestdef model_RandomForestClassifier(): start_time = time.time() # train test split x_train, x_test, y_train, y_test = train_test_split(x,y, random_state=421) # Training the model clf = RandomForestClassifier() clf.fit(x_train, y_train) # Testing y_pred = clf.predict(x_test) return (accuracy_score(y_test,y_pred) * 100), (time.time() - start_time)Decision Treedef DecisionTreeClassifier(): start_time = time.time() # train test split x_train, x_test, y_train, y_test = train_test_split(x,y, random_state=421) # Training the model clf = tree.DecisionTreeClassifier() clf.fit(x_train, y_train) # Testing y_pred = clf.predict(x_test) return (accuracy_score(y_test,y_pred) * 100), (time.time() - start_time)SVM (linear kernel)def model_svm_linear(): start_time = time.time() # train test split x_train, x_test, y_train, y_test = train_test_split(x,y, random_state=421) # Training the model clf = svm.SVC(kernel='linear') clf.fit(x_train, y_train) # Testing y_pred = clf.predict(x_test) return (accuracy_score(y_test,y_pred) * 100), (time.time() - start_time)SVM (poly kernel)def model_svm_poly(): start_time = time.time() # train test split x_train, x_test, y_train, y_test = train_test_split(x,y, random_state=421) # Training the model clf = sklearn.svm.SVC(kernel='poly') clf.fit(x_train, y_train) # Testing y_pred = clf.predict(x_test) return (accuracy_score(y_test,y_pred) * 100), (time.time() - start_time)SVM (rbf kernel)def model_svm_rbf(): start_time = time.time() # train test split x_train, x_test, y_train, y_test = train_test_split(x,y, random_state=421) # Training the model clf = sklearn.svm.SVC(kernel='rbf') clf.fit(x_train, y_train) # Testing y_pred = clf.predict(x_test) return (accuracy_score(y_test,y_pred) * 100), (time.time() - start_time)Logistic Regressiondef model_LogisticRegression(): start_time = time.time() # train test split x_train, x_test, y_train, y_test = train_test_split(x,y) # Training the model clf = sklearn.linear_model.LogisticRegression() clf.fit(x_train, y_train) # Testing y_pred = clf.predict(x_test) return (accuracy_score(y_test,y_pred) * 100), (time.time() - start_time)K Nearest Neighborsdef model_KNeighborsClassifier(): start_time = time.time() # train test split x_train, x_test, y_train, y_test = train_test_split(x,y) # Training the model clf = sklearn.neighbors.KNeighborsClassifier() clf.fit(x_train, y_train) # Testing y_pred = clf.predict(x_test) return (accuracy_score(y_test,y_pred) * 100), (time.time() - start_time)Summary of ObservationsNote : The cell below takes a lot of time to execute, as all the models are actually applied heremodels = [model_BernoulliNB(), model_MultinomialNB(), model_ComplementNB(), model_RandomForestClassifier(), DecisionTreeClassifier(), model_svm_linear(), model_svm_poly(), model_svm_rbf(), model_LogisticRegression(), model_KNeighborsClassifier()] models_name = ['BernoulliNB', 'MultinomialNB', 'ComplementNB', 'RandomForestClassifier', 'DecisionTreeClassifier', 'svm_linear', 'svm_poly', 'svm_rbf', 'LogisticRegression', 'KNeighborsClassifier'] accuracy = [] timing = [] for m in models: acc, tim = m accuracy.append(acc) timing.append(tim) result = pd.DataFrame({'Models' : models_name, 'Accuracy' : accuracy, 'Timing' : timing}) result # plotting Accuracy and Timings of all models f, axes = plt.subplots (1,2, figsize=(18,6)) # Accuracy result.sort_values(by=['Accuracy'], ascending=False, inplace=True) result = result.reset_index() result.drop(['index'], axis=1,inplace=True) vis1= sns.barplot(y='Accuracy' , x='Models', data=result,dodge=True, ax=axes[0],palette='PuRd_r',) vis1.set(ylim=(75, 90)) vis1.set(xlabel='Models',ylabel='Accuracy') vis1.set_xticklabels(vis1.get_xticklabels(), rotation=45) vis1.set_title('Accuracy of different Models') for index, row in result.iterrows(): vis1.text(row.name,row.Accuracy, round(row.Accuracy,2), color='black', ha="center") # Timing result.sort_values(by=['Timing'], ascending=False, inplace=True) result = result.reset_index() result.drop(['index'], axis=1,inplace=True) vis2= sns.barplot(y='Timing' , x='Models', data=result,dodge=True, ax=axes[1],palette='Blues_r',) vis2.set(ylim=(0, 500)) vis2.set(xlabel='Models',ylabel='Runtime in seconds') vis2.set_xticklabels(vis2.get_xticklabels(), rotation=45) vis2.set_title('Runtime(seconds) of different Models') for index, row in result.iterrows(): vis2.text(row.name,row.Timing, round(row.Timing,2), color='black', ha="center") plt.ioff()Creating a SQL DatabaseFor this project I will be creating a lot of datasets, and I want to organize this into a SQL database that is easily accessible. There is a lot of information in this site: https://www.sqlitetutorial.net/sqlite-python/import sqlite3 from sqlite3 import ErrorIt is recommended to create a function that will connect to the SQLite database.def create_connection(db_file): """ create a database connection to a SQLite database """ conn = None try: conn = sqlite3.connect(db_file) print(sqlite3.version) except Error as e: print(e) finally: if conn: conn.close()Next, we will used the function we defined to create a SQL database called *biblesql*if __name__ == '__main__': create_connection(r"C:\Bible Research\SQL database\biblesql.db")2.6.0Sub-string divisibility The number, 1406357289, is a 0 to 9 pandigital number because it is made up of each of the digits 0 to 9 in some order, but it also has a rather interesting sub-string divisibility property.Let d1 be the 1st digit, d2 be the 2nd digit, and so on. In this way, we note the following:d2d3d4=406 is divisible by 2d3d4d5=063 is divisible by 3d4d5d6=635 is divisible by 5d5d6d7=357 is divisible by 7d6d7d8=572 is divisible by 11d7d8d9=728 is divisible by 13d8d9d10=289 is divisible by 17Find the sum of all 0 to 9 pandigital numbers with this property. --- Idea At first glance, we get:$\begin{cases}d_4 \bmod 2 = 0 \\(d_3+d_4+d_5) \bmod 3 = 0 \\d_6 = 0 \ or \ 5\end{cases}$if $d_6=0$, then $d_7, d_8$ must be same if "$d_6d_7d_8$ is divisible by 11", which violates the rule of pandigital. So, $ d_6 =5 $, this is the key to this problem.Then start with "$d_6d_7d_8$ is divisible by 11", we can get $d_6d_7d_8$ must be in [506, 517, 528, 539, 561, 572, 583, 594]Then start with "$d_7d_8d_9$ is divisible by 13", we can get $d_7d_8d_9$ must be in [286, 390, 728, 832]Then start with "$d_8d_9d_{10}$ is divisible by 17", we can get $d_8d_9d_{10}$ must be in [867, 901, 289]Chain the above three conclusions, we get $d_6d_7d_8d_9d_{10}$ must be in [52867, 53901, 57289]Together with the first observation, we can reduce search space quite a lot. ---from math import ceil from itertools import permutations from functools import reduce # divisible by 11 [11 * i for i in range(ceil(500 / 11), ceil(600 / 11))] # divisible by 13 [13 * ceil((i*10) / 13) for i in [6, 17, 28, 39, 61, 72, 83, 94]] # divisible by 17 [17 * ceil((i % 100) * 10 / 17) for i in [286, 390, 728, 832]] def get_remain_digits_permuations(last_five_digits): remain = set(range(10)) - set(map(int, str(last_five_digits))) return permutations(remain) list(get_remain_digits_permuations(52867))[:10] def is_candidate(permutation): # divisible by 2 and 3 return permutation[3] % 2 == 0 and sum(permutation[2:5]) % 3 == 0 is_candidate((1,4,0,6,3)) def combine_digits(digits): return reduce(lambda n, d: n*10+d, digits, 0) combine_digits([2, 3, 4]) def is_substring_divisible(candidate): # divisible by 7 return combine_digits(candidate[4:7]) % 7 == 0 def solve(): s = 0 for last_five_digits in [52867, 53901, 57289]: for candidate in filter(is_candidate, get_remain_digits_permuations(last_five_digits)): if is_substring_divisible(candidate + tuple(map(int, str(last_five_digits)))): s += (combine_digits(candidate) * int(1e5) + last_five_digits) return s solve()Total AA_Meetups held in regions (2006 - 2019) AA_Meetups Regions published by The Hindu Newspaper- Chennai- Madurai- Coimbatore- Thiruvananthapuram- Tirunelveli- Engagements- Kozhikode- Hyderabad- Tirupur- Tiruchi- Kochi- Mangalore Python Package Installation- pip install plotnine- pip install pandas Import Packagesfrom plotnine import * import pandas as pd df = pd.read_csv('AA_Meets.csv') df.head() sources = df.groupby('Published_Region').agg('count') source_counts = sources.Articles.sort_values(ascending=False) source_data = pd.DataFrame(source_counts).reset_index() sources_plot = ggplot(source_data, aes(x='Published_Region', y= 'Articles', fill='Published_Region')) + geom_col() + coord_flip()\ + scale_fill_brewer(type='div', palette="Spectral") + theme_classic() + ggtitle('AA_Meetups Schedule held in different regions')\ + geom_text(aes(label='Articles'), size=8, ha='right', format_string='{}')\ display(sources_plot) ggsave(plot=sources_plot, filename='region.png', height=5, width=5, units = 'in', dpi=1000)C:\Users\GM\Anaconda3\lib\site-packages\plotnine\ggplot.py:706: UserWarning: Saving 5 x 5 in image. from_inches(height, units), units)) C:\Users\GM\Anaconda3\lib\site-packages\plotnine\ggplot.py:707: UserWarning: Filename: region.png warn('Filename: {}'.format(filename)) C:\Users\GM\Anaconda3\lib\site-packages\plotnine\scales\scale.py:93: MatplotlibDeprecationWarning: The iterable function was deprecated in Matplotlib 3.1 and will be removed in 3.3. Use np.iterable instead. if cbook.iterable(self.breaks) and cbook.iterable(self.labels): C:\Users\GM\Anaconda3\lib\site-packages\plotnine\scales\scale.py:93: MatplotlibDeprecationWarning: The iterable function was deprecated in Matplotlib 3.1 and will be removed in 3.3. Use np.iterable instead. if cbook.iterable(self.breaks) and cbook.iterable(self.labels): C:\Users\GM\Anaconda3\lib\site-packages\mizani\palettes.py:434: UserWarning: Warning message:Brewer palette Spectral has a maximum of 11 colorsReturning the palette you asked f[...]CDL Quantum Hackathon 2020---- PennyLane challenge>**Challenge description**: PennyLane contains the quantum-aware optimizers Rotosolve, QNG, and Rosalin. Rewrite them as PyTorch or TensorFlow native optimizers and provide a tutorial showing how they can be used to train a quantum model. Description of notebookThis notebook demonstrates that two of the specified quantum-aware optimizers; Rotosolve and Quantum Natural Gradient are implemented in a PyTorch generic way. They *do* require pennylane qNode circuits to provide some initial functionality, and they borrow the Fubini metric tensor calculation from PennyLanefrom quantum_aware_optims import * import pennylane as qml from torch.optim import SGD, Adam import matplotlib.pyplot as plt import numpy as np import seaborn as sns sns.set()The RotoSolve quantum optimizer----The example is from https://pennylane.ai/qml/demos/tutorial_rotoselect.htmln_wires = 2 dev = qml.device("default.qubit", analytic=True, wires=2) def ansatz(params): qml.RX(params[0], wires=0) qml.RY(params[1], wires=1) qml.CNOT(wires=[0, 1]) @qml.qnode(dev) def circuit(params): ansatz(params) return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliY(1)) @qml.qnode(dev) def circuit2(params): ansatz(params) return qml.expval(qml.PauliX(0))Hamiltonian definitionWe assume that the required expectation values that will be present in the final quantum circuit are evaluated from a list of quantum circuits, and that these are joined together corresponding to some mixing coefficients.Further, we specify some initial parametersqcircuits = [circuit, circuit2] proportions = [0.5, 1.2, -0.2] init_params = torch.tensor([0.1, 0.25])We iterate the rotosolve algorithm 20 timesn_steps = 20 optim = RotoSolve(init_params,qcircuits,proportions) """RotoSolve inherits from torch.optim.Optimizer, but relies heavily on native python operations. However, since it's non-gradient optimization can't flow the gradients into a classical layer anyway""" for i in range(n_steps): loss = optim.step() print(optim.final_params) # Plotting results plt.plot(list(range(n_steps+1)),optim.losses, 'o-') plt.xlabel("Iteration number") plt.ylabel("Cost") plt.title("Rotosolve takes one iteration to find the optimal parameters for this Hamiltonian") plt.show() %%timeit optim = RotoSolve(init_params,qcircuits,proportions) loss = optim.step() # Let's scale it up to a more complicated Hamiltonian n_wires = 5 dev = qml.device("default.qubit", analytic=True, wires=n_wires) def ansatz(params): qml.RX(params[0], wires=0) qml.RY(params[1], wires=1) qml.CNOT(wires=[0, 1]) qml.RX(params[2], wires=0) qml.RY(params[4], wires=1) qml.CNOT(wires=[0, 1]) qml.RX(params[2], wires=0) qml.RY(params[4], wires=1) @qml.qnode(dev) def circuit(params): ansatz(params) return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliY(1)), qml.expval(qml.PauliZ(2)),qml.expval(qml.PauliY(4)) @qml.qnode(dev) def circuit2(params): ansatz(params) return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliX(1)), qml.expval(qml.PauliY(2)) qcircuits = [circuit, circuit2] proportions = [0.5, 1.2, -0.2, 0.3, -0.8, 0.9, -1.8] init_params = torch.randn(7) n_steps = 20 optim = RotoSolve(init_params,qcircuits,proportions) """RotoSolve inherits from torch.optim.Optimizer, but relies heavily on native python operations. However, since it's non-gradient optimization can't flow the gradients into a classical layer anyway""" for i in range(n_steps): loss = optim.step() print(optim.final_params) # Plotting results plt.plot(list(range(n_steps+1)),optim.losses, 'o-') plt.xlabel("Iteration number") plt.ylabel("Cost") plt.title("For more complicated Hamiltonians can take more iterations") plt.show() dev = qml.device("default.qubit", wires=3) @qml.qnode(dev) def circuit(inputs, params): # |psi_0>: state preparation qml.RY(np.pi / 4, wires=0) qml.RY(np.pi / 3, wires=1) qml.RY(np.pi / 7, wires=2) # V0(theta0, theta1): Parametrized layer 0 qml.RZ(params[0], wires=0) qml.RZ(params[1], wires=1) # W1: non-parametrized gates qml.CNOT(wires=[0, 1]) qml.CNOT(wires=[1, 2]) # V_1(theta2, theta3): Parametrized layer 1 qml.RY(params[2], wires=1) qml.RX(params[3], wires=2) # W2: non-parametrized gates qml.CNOT(wires=[0, 1]) qml.CNOT(wires=[1, 2]) return qml.expval(qml.PauliY(0)) qlayer = qml.qnn.TorchLayer(circuit, {"params": 4}) def loss_func(theta): loss = qlayer(theta) return loss params = np.array([0.5, -0.123, 0.543, 0.233]) qlayer.params.data = torch.tensor(params) optimQNG = QuantumNaturalGradientOptim([qlayer.params], circuit, lr=0.1) lossesQNG = [] for i in range(100): loss = loss_func(qlayer.params) lossesQNG.append(loss) loss.backward() optimQNG.step() optimQNG.zero_grad() dev = qml.device("default.qubit", wires=3) @qml.qnode(dev) def circuit(inputs, params): # |psi_0>: state preparation qml.RY(np.pi / 4, wires=0) qml.RY(np.pi / 3, wires=1) qml.RY(np.pi / 7, wires=2) # V0(theta0, theta1): Parametrized layer 0 qml.RZ(params[0], wires=0) qml.RZ(params[1], wires=1) # W1: non-parametrized gates qml.CNOT(wires=[0, 1]) qml.CNOT(wires=[1, 2]) # V_1(theta2, theta3): Parametrized layer 1 qml.RY(params[2], wires=1) qml.RX(params[3], wires=2) # W2: non-parametrized gates qml.CNOT(wires=[0, 1]) qml.CNOT(wires=[1, 2]) return qml.expval(qml.PauliY(0)) qlayer = qml.qnn.TorchLayer(circuit, {"params": 4}) def loss_func(theta): loss = qlayer(theta) return loss params = np.array([0.5, -0.123, 0.543, 0.233]) qlayer.params.data = torch.tensor(params) optim = SGD([qlayer.params], lr=0.1) losses = [] for i in range(100): loss = loss_func(qlayer.params) losses.append(loss) loss.backward() optim.step() optim.zero_grad() plt.plot(list(range(100)),losses, label='SGD Loss') plt.plot(list(range(100)),lossesQNG, label='QNG Loss') plt.legend() plt.title("The quantum natural gradient converges faster") plt.show()Perceptron Activation Network With a learning example and comparison_Overview:_ This is an implementation of the Perceptron Activation NetworkIn this notebook I present a model based on the biological brain, that combines Dropout, ReLU and Perceptron learning into a single learning algorithm - dubbed the Perceptron Activation Layer. It is used like a typical Linear layer, however internally it implements a teacher-student signal for interrupting the flow of weights that do not contribute to the learning task. A teacher network is responsible for computing a "teaching signal" that informs the "student network" which weights to activate and which to turn off.# Import libraries import matplotlib.pyplot as plt # Visualizations will be shown in the notebook. %matplotlib inline import random import os import glob import pickle import time import string from tqdm import tqdm from PIL import Image import numpy as np import json import torch as torch import torch.nn as nn import torch.nn.functional as F from torch.nn.parameter import Parameter from torch import optim from torch.autograd import Variable from torchvision import datasets, transforms, models print("Using PyTorch version: " + str(torch.__version__)) use_gpu = torch.cuda.is_available() torch.manual_seed(7) print("Using GPU: {}".format(use_gpu))Using GPU: True**Setup the dataset**batch_size = 64 # Define a transform to normalize the data transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ]) # Download and load the training data trainset = datasets.MNIST('~/.pytorch/MNIST_data/train/', download=True, train=True, transform=transform) testset = datasets.MNIST('~/.pytorch/MNIST_data/valid/', download=True, train=False, transform=transform) train_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=False) test_loader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False)** Utility functions **# Accuracy and loss meter class AverageMeter(object): """ Computes and stores the average and current value. """ def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count # Visualise a layer activation def viz_layer(layer): fig = plt.figure(figsize=(16, 12)) n_filters = layer.size()[1] for i in range(n_filters): ax = fig.add_subplot(1, n_filters, i+1, xticks=[], yticks=[]) # grab layer outputs ax.imshow(np.squeeze(layer[0,i].data.numpy()), cmap='gray') ax.set_title('Output %s' % str(i+1)) plt.show()Build the networks **Build the LeNet5 network**class LeNet5(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 6, 5, padding=2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16*5*5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) self.softmax = nn.LogSoftmax() def forward(self, x): # store last sample for visualisation self.lastx = x self.xc1 = self.conv1(x) self.xa1 = F.relu(self.xc1) x = F.max_pool2d(self.xa1, (2, 2)) self.xc2 = self.conv2(x) self.xa2 = F.relu(self.xc2) x = F.max_pool2d(self.xa2, (2, 2)) x = x.view(-1, self.num_flat_features(x)) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) x = self.softmax(x) return x def num_flat_features(self, x): size = x.size()[1:] num_features = 1 for s in size: num_features *= s return num_features**Define the Perceptron module**class Perceptron(nn.Module): def __init__(self, inputs, outputs, minimum = 0.0): super(Perceptron, self).__init__() self.inputs = inputs self.outputs = outputs # student network self.Sb = Parameter(torch.randn(self.outputs, requires_grad=True)) self.SW = Parameter(torch.randn(self.outputs, self.inputs, requires_grad=True)) # teacher network self.S_o, self.S_i = self.SW.size() self.Tb = Parameter(torch.randn(self.S_o * self.S_i, requires_grad=True)) self.TW = Parameter(torch.randn(self.S_o * self.S_i, self.inputs, requires_grad=True)) self.min_val = torch.tensor(minimum).float().cuda() def forward(self, x): # teacher signal self.Tz = F.linear(x, self.TW, self.Tb) self.Th = torch.matmul(self.Tz.t(), torch.ones(x.size()[0]).cuda()) # perceptron gate self.o = torch.gt(self.Th, self.min_val).float() self.o = self.o.view(self.S_o, self.S_i) self.W = torch.mul(self.o, self.SW) # student signal x = F.linear(x, self.W, self.Sb) return x**Build the Perceptron Activation Network**class Tron(nn.Module): def __init__(self): super(Tron, self).__init__() self.conv1 = nn.Conv2d(1, 6, 5, padding=2) self.conv2 = nn.Conv2d(6, 16, 5) self.tron1 = Perceptron(16*5*5, 120) self.tron2 = Perceptron(120, 84) self.fc3 = nn.Linear(84, 10) self.softmax = nn.LogSoftmax() def forward(self, x): self.xc1 = self.conv1(x) self.xa1 = F.relu(self.xc1) x = F.max_pool2d(self.xa1, (2, 2)) self.xc2 = self.conv2(x) self.xa2 = F.relu(self.xc2) x = F.max_pool2d(self.xa2, (2, 2)) x = x.view(-1, self.num_flat_features(x)) x = self.tron1(x) x = self.tron2(x) x = self.fc3(x) x = self.softmax(x) return x def num_flat_features(self, x): size = x.size()[1:] num_features = 1 for s in size: num_features *= s return num_featuresTraining the network **Loss plotting functions**def plot_training(train_losses, valid_losses, title): plt.subplot(1, 2, 1) plt.plot(train_losses, label='Training ' + title) plt.plot(valid_losses, label='Validation ' + title) plt.xlabel('epochs') plt.legend(frameon=False) plt.title(title) plt.show()**Train the network**def train(model, epoch, criterion, optimizer): model.train() train_loss = 0 correct = 0 num_train = len(train_loader.dataset) batch_time = AverageMeter() losses = AverageMeter() accs = AverageMeter() tic = time.time() with tqdm(total=num_train) as pbar: for batch_idx, (data, target) in enumerate(train_loader): if use_gpu: data, target = data.cuda(), target.cuda() data, target = Variable(data), Variable(target) optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() train_loss += loss.item() predicted = torch.max(output, 1)[1] true_count = (predicted == target).float().sum() optimizer.step() correct += true_count acc = 100 * (true_count / len(target)) # store losses.update(loss.item(), data.size()[0]) accs.update(acc.item(), data.size()[0]) # measure elapsed time toc = time.time() batch_time.update(toc-tic) pbar.set_description( ( "{:.1f}s - loss: {:.3f} - acc: {:.3f}".format( (toc-tic), loss.item(), acc.item() ) ) ) pbar.update(batch_size) print('\nTrain set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format( train_loss, correct, len(train_loader.dataset), 100. * correct / len(train_loader.dataset))) return train_loss, correct / len(train_loader.dataset) def test(model, criterion, optimizer): model.eval() test_loss = 0 correct = 0 for data, target in test_loader: if use_gpu: data, target = data.cuda(), target.cuda() data, target = Variable(data), Variable(target) output = model(data) loss = criterion(output, target) test_loss += loss.item() predicted = torch.max(output, 1)[1] correct += (predicted == target).float().sum() print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format( test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset))) return test_loss, correct / len(test_loader.dataset) # Train Model def train_model(model, epochs): if (use_gpu): model.cuda() criteria = nn.NLLLoss() optimise = optim.SGD(model.parameters(), lr=0.003) scheduler = optim.lr_scheduler.StepLR(optimise, step_size=10, gamma=0.5) train_losses, train_accs = [], [] valid_losses, valid_accs = [], [] for e in range(epochs): print("Epoch: {} ...".format(e)) scheduler.step() t_loss, t_acc = train(model, e, criteria, optimise) v_loss, v_acc = test(model, criteria, optimise) train_losses.append(t_loss) valid_losses.append(v_loss) train_accs.append(t_acc) valid_accs.append(v_acc) plot_training(train_losses, valid_losses, 'Loss') plot_training(train_accs, valid_accs, 'Accuracy') # Training Parameters epochs = 50**Train LeNet5**# Standard LeNet5 lenet5 = LeNet5() train_model(lenet5, epochs)Epoch: 0 ...**Train MLP Activation Network**# Model with MLP Activation tron = Tron() # Training Model 2 train_model(tron, epochs)Epoch: 0 ...Visualise the activations **Visualise the input image**plt.imshow(np.squeeze(lenet5.lastx.cpu()[0,0].data.numpy()), cmap='gray')**Visualise the LetNet layers**viz_layer(lenet5.xc1.cpu()) viz_layer(lenet5.xa1.cpu()) viz_layer(lenet5.xc2.cpu()) viz_layer(lenet5.xa2.cpu())**Visualise the PAN layers**viz_layer(tron.xc1.cpu()) viz_layer(tron.xa1.cpu()) viz_layer(tron.xc2.cpu()) viz_layer(tron.xa2.cpu())The resultResult slows that after training with data augmentation the DNN has a 100% validation accuracy and a 99.9% train accuracy, (which is without augmentation).model.load_weights('ASL-new-normal-weights.h5') y_test = np.load('ASL_Train.npz')['arr_1'] y_test = y_test.astype('int8') y_hat = model.predict_classes(trainX ) pd.crosstab(y_test,y_hat) test_wrong = [im for im in zip(trainX,y_hat,y_test) if im[1] != im[2]] print("Wrong Test Cases:",len(test_wrong)) plt.figure(figsize=(10, 10)) for ind, val in enumerate(test_wrong[:100]): plt.subplots_adjust(left=0, right=1, bottom=0, top=1) plt.subplot(10, 10, ind + 1) im = 1 - val[0].reshape((100,100)) plt.axis("off") plt.title(str('Pred: '+str(val[1]))+'\n'+str('True: '+str(val[2])), fontsize=14, color='black') plt.imshow(im, cmap='gray')4800/4800 [==============================] - 12s Wrong Test Cases: 9Homework 3**For exercises in the week 11-16.12.19****Points: 7 + 1b**Please solve the problems at home and bring to class a [declaration form](http://ii.uni.wroc.pl/~jmi/Dydaktyka/misc/kupony-klasyczne.pdf) to indicate which problems you are willing to present on the blackboard.$\def\R{{\mathbb R}} \def\i{^{(i)}} \def\sjt{\mathrm{s.t. }\ }$ Problem 1 [2p]Let $X\in \R^{D\times N}$ be a data matrix contianing $N$ $D$-dimensional points. Let $Y\in\R^{1\times N}$ be the targets.We have seen that the least squares problem$$\min_{\Theta} \frac{1}{2}(\Theta^T X - Y)(\Theta^T X - Y)^T$$has a closed form solution$$\Theta^T{}^* = Y X^T(X X^T)^{-1}$$Where $X^+ = X^T(X X^T)^{-1}$ is the right [Moore-Penrose pseudoinverse](https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse) of $X$:$$\begin{split}\Theta^T X &\approx Y \\\Theta^T X X^+ &\approx Y X^{+} \\\Theta^T &= Y X^{+}\end{split}$$The pseudoinverse also has another form (called a left inverse):$$X^+ = (X^T X)^{-1}X^T$$ P1.1 [0.5p]Say under which conditions the left and right pseudoinverses exist (when $X$ is a rectangular matrix only one index exists). Give examples of machine learning problems that could be solved using each inverse. Ans:If A is m-by-n and n ≤ m, then A has a left inverse. \If m ≤ n, then it has a right inverse. \right -> lin reg P1.2 [1p]Derive the left inverse by solving the regularized least squares problem$$\min_\Theta \sum_i(\Theta^T x\i - y\i)^2 + \lambda\Theta^T\Theta$$with arificially introduced variables $\epsilon\i$ and constraints $\epsilon\i = \Theta^T x\i - y\i$, then see what happens when $\lambda\rightarrow 0$. P1.3 [0.5p]Show that the above dual formulation allows using Kernel functions with linear regression. Express the optimal solution using a weighed avegage of data samples. How many "support vectors" there are?NB: some authors call the kernelized linear regression the "Least-Squares SVM".import numpy as np from numpy.linalg import pinv, inv X = np.random.random((500, 3)) * 1000 left = lambda x: inv(x.T @ x) @ x.T right = lambda x: x.T @ inv(x @ x.T) (left(X) @ X).astype(int), (X @ right(X)).astype(int), X.astype(int)In this blog post, I will demonstrate webscraping using the scrapy package. We will be taking all of the actors of my favorite TV show, Narcos: Mexico, and attempt to recommend shows based on how many actors from the orginal show the new shows share. WebscrapingWebscraping is a powerful tool that makes it easy to 'scrape' information off of a website in which the data that you want is not in a format that is easy to recieve. I will demostrate my method of recieving the shows that all the actors of Narcos: Mexico have been on, and visualizing that in an attractive plot. The parse Function The parse function will utilize the starting url (in our case it is the home page for Narcos: Mexico on IMBD). It will then navigate to the Cast & Crew page so we can see every actor that appeared in this series. The comments in the function will walk through this in more detail.def parse(self, response): ''' Redirect scraper to webpage of full cast and crew of desired TV show (under assumption you start on the TV Show's main IMDB page) @param self @param response: represents the website @yield: redirection to full cast and crew site ''' #find where there is a hyperlink on the subgroup titled "Top Cast" topCst = response.css("div.ipc-title__wrapper a").attrib["href"] #if it exists, take that link embedded in the hyperlink and #redirect scraper there if topCst: topCst = response.urljoin(topCst) yield scrapy.Request(topCst, callback = self.parse_full_credits)The parse_full_credits Function The parse_full_credits function works only if it is called on the Cast & Crew page. It will go through the page and redirect through to each actor's main page. The comments in the function will walk through this in more detail.def parse_full_credits(self, response): ''' Redirect sraper to actor's main webpage of each actor selected @param self @param response: represents the website @yield: redirection to actor main webpage ''' #put actor name/ hyperlink in a list for all actors in the full cast and crew actLst = [a.attrib["href"] for a in response.css("td.primary_photo a")] #redirect to the actor page for each actor in the list for actor in actLst: actrPg = response.urljoin(actor) yield scrapy.Request(actrPg, callback = self.parse_actor_page)The parse_actor_page Function The parse_actor_page function will only work if it is called on the actor's main webpage. It will yield a dictionary of the actor's name and every show that they have appeared in. The comments in the function will walk through this in more detail.def parse_actor_page(self, response): ''' Add to dictionary of every actor and every work they have acted in @param self @param response: represents the website @yield: adds dictionary names and the works they have appeared in ''' #grab the actors name actNam = response.css('title::text').get()[0:-7] #list of every work they have been apart of (producing, acting, etc...) flmLst = response.xpath("//*[@class='filmo-category-section']/div/b/a/text()").extract() #take the number of acting credits they have from 'Acting' tab #try with 'actor' tag, if that doesn't work, try with 'actress' #this depends on the individuals webpage try: actNum = int(response.xpath("//*[@id='filmo-head-actor']/text()").extract()[-1][2:-10]) except: actNum = int(response.xpath("//*[@id='filmo-head-actress']/text()").extract()[-1][2:-10]) #takes only the first actNum of works they been in, because this is the works they have acted in actLst = flmLst[:actNum] #yeild dictionary of the actors and a list of all their works yield {"actor" : actNam, "movie_or_TV_name" : actLst}Video Scene Detection based on Optimal Sequential GroupingSome examples on synthetic dataimport time from typing import Tuple, List from multiprocessing import Pool import numpy as np import matplotlib import matplotlib.pyplot as plt from tqdm import tqdm from h_add import get_optimal_sequence_add from h_nrm import get_optimal_sequence_nrm from estimate_scenes_count import estimate_scenes_count from evaluation import calculate_interval_metric %matplotlib inlineHelper functions:class data_linewidth_plot(): """ Draws lines that could scale along with figure size Source: https://stackoverflow.com/questions/19394505/matplotlib-expand-the-line-with-specified-width-in-data-unit/42972469#42972469 """ def __init__(self, x, y, **kwargs): self.ax = kwargs.pop("ax", plt.gca()) self.fig = self.ax.get_figure() self.lw_data = kwargs.pop("linewidth", 1) self.lw = 1 self.fig.canvas.draw() self.ppd = 72./self.fig.dpi self.trans = self.ax.transData.transform self.linehandle, = self.ax.plot([],[],**kwargs) if "label" in kwargs: kwargs.pop("label") self.line, = self.ax.plot(x, y, **kwargs) self.line.set_color(self.linehandle.get_color()) self._resize() self.cid = self.fig.canvas.mpl_connect('draw_event', self._resize) def _resize(self, event=None): lw = ((self.trans((1, self.lw_data))-self.trans((0, 0)))*self.ppd)[1] if lw != self.lw: self.line.set_linewidth(lw) self.lw = lw self._redraw_later() def _redraw_later(self): self.timer = self.fig.canvas.new_timer(interval=10) self.timer.single_shot = True self.timer.add_callback(lambda : self.fig.canvas.draw_idle()) self.timer.start() def plot_distances_chart(distances: np.ndarray, scene_borders: np.ndarray, ax: matplotlib.axes.Axes) -> None: """ Plot scene borders on top of the pairwise distances matrix :param distances: pairwise distances matrix :param scene_borders: """ ax.imshow(distances, cmap='gray') borders_from_zero = np.concatenate(([0], scene_borders)) for i in range(1, len(borders_from_zero)): data_linewidth_plot( x=[borders_from_zero[i-1], borders_from_zero[i-1]], y=[borders_from_zero[i-1], borders_from_zero[i]], ax=ax, linewidth=1, color='red', alpha=0.5 ) data_linewidth_plot( x=[borders_from_zero[i-1], borders_from_zero[i]], y=[borders_from_zero[i-1], borders_from_zero[i-1]], ax=ax, linewidth=1, color='red', alpha=0.5 ) data_linewidth_plot( x=[borders_from_zero[i-1], borders_from_zero[i]], y=[borders_from_zero[i], borders_from_zero[i]], ax=ax, linewidth=1, color='red', alpha=0.5 ) data_linewidth_plot( x=[borders_from_zero[i], borders_from_zero[i]], y=[borders_from_zero[i-1], borders_from_zero[i]], ax=ax, linewidth=1, color='red', alpha=0.5 ) def get_synth_example( features_count: int, scenes_count: int, random_seed: int = 42 ) -> Tuple[np.ndarray, np.ndarray]: """ Generates synthetic pairwise distance matrix for features_count shots and scenes_count scenes :param features_count: count of shots :param scenes_count: count of diagonal clusters represented scenes :param random_seed: value for random numbers generator initialization :return: pairwise distance matrix and scene borders """ synth_distances = np.random.uniform(size=(features_count, features_count)) np.random.seed(random_seed) random_t = np.random.choice(range(2, features_count - 1), size=scenes_count-1, replace=False) random_t.sort() for i, t in enumerate(random_t): if i == 0: synth_distances[0:t, 0:t] = np.clip(synth_distances[0:t, 0:t] - 0.4, 0., 1.) else: synth_distances[random_t[i - 1]:t, random_t[i - 1]:t] = \ np.clip(synth_distances[random_t[i - 1]:t, random_t[i - 1]:t] - 0.4, 0., 1.) synth_distances[random_t[-1]:features_count, random_t[-1]:features_count] = \ np.clip(synth_distances[random_t[-1]:features_count, random_t[-1]:features_count] - 0.4, 0., 1.) random_t = np.append(random_t, [features_count]) synth_distances = (synth_distances + synth_distances.T)/2 np.fill_diagonal(synth_distances, 0) return synth_distances, random_t - 1 def get_intervals_from_borders(borders: np.ndarray) -> List[List[Tuple[int, int]]]: """ Convert scene borders to intervals :param borders: list of borders :return: list of interval tuples where first value - beginning of an interval, the second - end of an interval """ intervals = [] prev_border = 0 for cur_border in borders: intervals.append((prev_border, cur_border)) prev_border = cur_border return intervalsAdditive Cost Function Let's generate random pairwise distances matrix, add some uniform noise and calculate optimal borders accordingly to Additive Cost Function.shots_count = 100 scenes_count = 10 distances, synth_scene_borders = get_synth_example(shots_count, scenes_count) optimal_scene_borders = get_optimal_sequence_add(distances, scenes_count) figs, axs = plt.subplots(1,2, figsize=(15,7)) figs.suptitle('Scene Borders Prediction Using Additive Cost Function', fontsize=14) plot_distances_chart(distances, synth_scene_borders, axs[0]) axs[0].set_title('Ground Truth Borders') plot_distances_chart(distances, optimal_scene_borders, axs[1]) axs[1].set_title('Predicted Borders')Normalized Cost Function Let's generate random pairwise distances matrix, add some uniform noise and calculate optimal borders accordingly to Normalized Cost Function.shots_count = 100 scenes_count = 10 distances, synth_scene_borders = get_synth_example(shots_count, scenes_count) optimal_scene_borders = get_optimal_sequence_nrm(distances, scenes_count) figs, axs = plt.subplots(1,2, figsize=(15,7)) figs.suptitle('Scene Borders Prediction Using Normalized Cost Function', fontsize=14) plot_distances_chart(distances, synth_scene_borders, axs[0]) axs[0].set_title('Ground Truth Borders') plot_distances_chart(distances, optimal_scene_borders, axs[1]) axs[1].set_title('Predicted Borders')As you can see, Additive Const Function works a little bit worse than Normalized Cost Function. But one example is not enough. Quality TestsLet's generate 999 synthetic examples and check both algorithm's quality on several metrics.test_examples = [] for N in range(12, 123): for K in range(2,11): test_examples.append(get_synth_example(N, K))Find optimal borders for each example with Hadd optimisation:predicted_examples_add = [] def predict_examples_add(args): distances, synth_scene_borders = args K = estimate_scenes_count(distances) return get_optimal_sequence_add(distances, K) with Pool(4) as pool: predicted_examples_add = list(tqdm(pool.imap(predict_examples_add, test_examples), total=len(test_examples)))100%|██████████| 999/999 [00:33<00:00, 30.17it/s]Find optimal borders for each example with Hnrm optimisation:predicted_examples_nrm = [] def predict_examples_nrm(args): distances, synth_scene_borders = args K = estimate_scenes_count(distances) return get_optimal_sequence_nrm(distances, K) with Pool(4) as pool: predicted_examples_nrm = list(tqdm(pool.imap(predict_examples_nrm, test_examples), total=len(test_examples)))100%|██████████| 999/999 [20:13<00:00, 1.21s/it]Convert scene borders to intervals:predicted_intervals_add = [] for example in predicted_examples_add: predicted_intervals_add.append(get_intervals_from_borders(example)) predicted_intervals_nrm = [] for example in predicted_examples_nrm: predicted_intervals_nrm.append(get_intervals_from_borders(example)) gt_intervals = [] for distances, synth_scene_borders in test_examples: gt_intervals.append(get_intervals_from_borders(synth_scene_borders))Get mean precision, recall, F1 and IoU for each result:precision_add = calculate_interval_metric(gt_intervals, predicted_intervals_add, 'precision') recall_add = calculate_interval_metric(gt_intervals, predicted_intervals_add, 'recall') f1_add = calculate_interval_metric(gt_intervals, predicted_intervals_add, 'f1') iou_add = calculate_interval_metric(gt_intervals, predicted_intervals_add, 'iou') precision_nrm = calculate_interval_metric(gt_intervals, predicted_intervals_nrm, 'precision') recall_nrm = calculate_interval_metric(gt_intervals, predicted_intervals_nrm, 'recall') f1_nrm = calculate_interval_metric(gt_intervals, predicted_intervals_nrm, 'f1') iou_nrm = calculate_interval_metric(gt_intervals, predicted_intervals_nrm, 'iou') print('Precision add: {} Precision nrm: {}'.format(precision_add, precision_nrm)) print('Recall add: {} Recall nrm: {}'.format(recall_add, recall_nrm)) print('F1 add: {} F1 nrm: {}'.format(f1_add, f1_nrm)) print('IoU add: {} IoU nrm: {}'.format(iou_add, iou_nrm))Precision add: 0.5006426351857141 Precision nrm: 0.5453283464041074 Recall add: 0.9632476338594912 Recall nrm: 0.986827773742529 F1 add: 0.5660577608586743 F1 nrm: 0.6165629629736262 IoU add: 0.47188725622059907 IoU nrm: 0.5369531996674883As we can see, optimisation of the Hnrm metrics works better than Hadd accordingly to each mertics. Time Tests But what about speed?Let's fix scenes count K, and see, how optimization time depends on shots count N:test_data = [] K = 5 Ns = range(10, 100) for N in Ns: test_data.append(get_synth_example(N, K)) ns_add_times = [] ns_nrm_times = [] for distances, synth_scene_borders in test_data: start_time = time.time() optimal_scene_borders = get_optimal_sequence_add(distances, K) ns_add_times.append(time.time() - start_time) start_time = time.time() optimal_scene_borders = get_optimal_sequence_nrm(distances, K) ns_nrm_times.append(time.time() - start_time) plt.figure(figsize=(15,7)) ax = plt.gca() ax.plot(Ns, ns_add_times, label='H_add optimization time in seconds') ax.plot(Ns, ns_nrm_times, label='H_nrm optimization time in seconds') ax.set(title='Dependence of the Optimization Time from Shots Number', ylabel='Time (sec)', xlabel='Number of shots') ax.legend(loc='best')In one more test we'll fix count of shots N, and see how optimization time depends on scenes count K:test_data = [] N = 70 Ks = range(2, 12) for K in Ks: test_data.append(get_synth_example(N, K)) ks_add_times = [] ks_nrm_times = [] for distances, synth_scene_borders in test_data: start_time = time.time() optimal_scene_borders = get_optimal_sequence_add(distances, len(synth_scene_borders)) ks_add_times.append(time.time() - start_time) start_time = time.time() optimal_scene_borders = get_optimal_sequence_nrm(distances, len(synth_scene_borders)) ks_nrm_times.append(time.time() - start_time) plt.figure(figsize=(15,7)) ax = plt.gca() ax.plot(Ks, ks_add_times, label='H_add optimization time in seconds') ax.plot(Ks, ks_nrm_times, label='H_nrm optimization time in seconds') ax.set(title='Dependence of the Optimization Time from Scenes Number', ylabel='Time (sec)', xlabel='Number of scenes') ax.legend(loc='best')Chapter7.6 TensorFlowの準備# パッケージの読み込み import tensorflow as tf tf.__version__足し算を実行with tf.Graph().as_default(): # 定数の定義 x = tf.constant(10, name="x") y = tf.constant(32, name="y") # 足し算 op = tf.add(x, y) # 演算を実行 with tf.Session() as sess: result = sess.run(op) print(result) # -> 4242変数とプレースホルダwith tf.Graph().as_default(): # xは入力を受け付ける入れ物 x = tf.placeholder(tf.int32, name="x") y = tf.constant(32, name="y") # 足し算 op1 = tf.add(x, y) # 足し算結果を格納する変数 z = tf.Variable(0, name="z") # 足し算結果を変数に代入 op2 = tf.assign(z, op1) # 変数の初期化 init_op = tf.global_variables_initializer() # 演算を実行 with tf.Session() as sess: # 変数の初期化を実行 sess.run(init_op) # xに10をfeed_dictで入力し、結果を得る result = sess.run(op2, feed_dict={x: 10}) print(result) # -> 4242ロジスティック回帰 データセットを作成import pandas as pd import numpy as np from sklearn.datasets import make_blobs import matplotlib.pyplot as plt X_dataset, y_dataset = make_blobs(centers=[[-0.3, 0.5], [0.3, -0.2]], cluster_std=0.2, n_samples=100, center_box=(-1.0, 1.0), random_state=42) y_dataset = y_dataset.reshape(-1, 1) dataset = pd.DataFrame(X_dataset, columns=['x0', 'x1']) dataset['y'] = y_datasetプロット用関数を定義def plot_dataset(dataset): fig, ax = plt.subplots() ax.tick_params(direction='in') for key, g in dataset.groupby('y'): color = 'k' if key == 1 else 'w' g.plot(ax=ax, kind='scatter', x='x0', y='x1', label=key, color=color, s=40, edgecolor='black', linewidth='1', xticks=np.arange(-1, 1, 0.5), yticks=np.arange(-1, 1, 0.5), xlim=(-1, 1), ylim=(-1, 1), figsize=(5, 5)) plt.grid(which='major', color='grey', linestyle='--') return fig, ax def plot_boundary(w, b): x = np.arange(-1, 1, 0.1) y = eval('%f*x + %f' % ((-w[0] / (w[1] + 1e-6)), (-b / (w[1] + 1e-6)))) plt.plot(x, y, color='b', linewidth=2)TensorFlowでロジスティック回帰with tf.Graph().as_default(): # 特徴量とラベルの入力 x = tf.placeholder(tf.float32, shape=[None, 2], name="x") y = tf.placeholder(tf.float32, shape=[None, 1], name="y") # 重みとバイアスを変数で定義 w = tf.Variable(tf.zeros([2, 1]), name="w") b = tf.Variable(tf.zeros([1]), name="b") # 識別境界からの距離 score = tf.sigmoid(tf.matmul(x, w) + b) # 損失 loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=score, labels=y)) # 最急降下法で損失を最小化する train_step = tf.train.GradientDescentOptimizer(1.0).minimize(loss) # train_step = tf.train.AdamOptimizer(1.0).minimize(loss) # 変数の初期化 init_op = tf.global_variables_initializer() # 演算を実行 cnt = 0 with tf.Session() as sess: # 変数の初期化を実行 sess.run(init_op) # 400回学習させる for i in range(400): _, _l, _w, _b = sess.run([train_step, loss, w, b], feed_dict={x: X_dataset, y: y_dataset}) # 100回に1回printする if i % 100 == 0: print(_l, _w, _b) plot_dataset(dataset) plot_boundary(_w, _b) cnt += 1(0.72407717, array([[ 0.03871037], [-0.04589954]], dtype=float32), array([-0.03061483], dtype=float32)) (0.57749766, array([[ 2.42943335], [-2.58021379]], dtype=float32), array([-0.10247607], dtype=float32)) (0.54964095, array([[ 3.59925342], [-3.69231558]], dtype=float32), array([ 0.19453107], dtype=float32)) (0.5382728, array([[ 4.37750673], [-4.39150095]], dtype=float32), array([ 0.35291547], dtype=float32))clean_email(): Cleaning and validation for email address Introduction - clean_email() function supports cleaning of messy email values- validate_email() function supports validation on email semantic type of single input value or an input column. When it returns True, the input value is a valid email address.Parameters for clean_email():- split: whether to split input into multiple columns, default False- inplace: whether to clean initial column in place, default False- pre_clean: whether to pre clean input text, default False- fix_domain: whether to fix common typos in domain, default False- report: whether to generate report, default True- errors: error handling types, default "coerce" - 'raise': raise an exception when there is broken value - 'coerce': set invalid value to NaN - 'ignore': just return the initial inputParameters for validate_email():- x: input value, can be Union of single value Example dirty datasetimport pandas as pd import numpy as np df = pd.DataFrame({"messy_email": ["","","y ","","H ","hello", np.nan, "NULL"] }) df1. Default clean_email() Under default setting, clean_email() will do the strict semantic type check and return report automatically. Broken values will be replaced by NaN.from dataprep.clean import clean_email clean_email(df, "messy_email")NumExpr defaulting to 8 threads.2. Split Parameter By setting split parameter to True, returned table will contain separate columns for domain and username of valid emails.clean_email(df, "messy_email", split = True)Email Cleaning Report: 3 values with bad format (37.5%) Result contains 3 (37.5%) values in the correct format and 5 null values (62.5%)3. Pre_clean Parameter when pre_clean parameter is set to True, the function will fix broken text in advance before do semantic type check.clean_email(df, "messy_email", pre_clean = True)Email Cleaning Report: 1 values with bad format (12.5%) Result contains 5 (62.5%) values in the correct format and 3 null values (37.5%)4. Fix_domain Parameter When fix_domain parameter is set to True, the function will do basic check to avoid common typos for popular domains.clean_email(df, "messy_email", fix_domain = True)Email Cleaning Report: 1 values with bad format (12.5%) Result contains 5 (62.5%) values in the correct format and 3 null values (37.5%)5. Error Parameterclean_email(df, "messy_email", errors = "raise") clean_email(df, "messy_email", errors = "ignore")Email Cleaning Report: 1 values with bad format (12.5%) Result contains 5 (62.5%) values in the correct format and 3 null values (37.5%)6. Examples for validate_email()from dataprep.clean import validate_email print(validate_email('Abc.example.com')) print(validate_email('')) print(validate_email('')) print(validate_email('this is"')) validate_email(df["messy_email"])1) Data Clean First of all, we want to drop thoese useless columns and to check whether there are any duplicate values in our dataset. If there is any, we will need to drop them.%%time # "Unname: 0" column looks like useless, drop it. del df['Unnamed: 0'] #"EASE-MENT" column is empty, we are going to drop it. del df['EASE-MENT'] del df['SALE DATE'] sum(df.duplicated(df.columns)) df = df.drop_duplicates(df.columns, keep='last') df.shapeNow, Let's look into details.df.info() Int64Index: 83190 entries, 0 to 84547 Data columns (total 19 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 BOROUGH 83190 non-null int64 1 NEIGHBORHOOD 83190 non-null object 2 BUILDING CLASS CATEGORY 83190 non-null object 3 TAX CLASS AT PRESENT 83190 non-null object 4 BLOCK 83190 non-null int64 5 LOT 83190 non-null int64 6 BUILDING CLASS AT PRESENT 83190 non-null object 7 ADDRESS 83190 non-null object 8 APARTMENT NUMBER 83190 non-null object 9 ZIP CODE 83190 non-null int64 10 RESIDENTIAL UNITS 83190 non-null int64 11 COMMERCIAL UNITS 83190 non-null int64 12 TOTAL UNITS 83190 non-null[...]After browsing the information of dataset, we can see they use "-" to represent the missing values. Before prediction, we should clean these rows in the data table. Some of them should not be counted as the sale of real estate. For example, in column "SALE PRICE", these properties with 0 value might be transferred as a gift. Before cleaning missing values, We need to convert some of the columns(for example, SALE PRICE is object, SALE DATE is object, etc) to appropriate datatype.df['SALE PRICE'] = pd.to_numeric(df['SALE PRICE'], errors='coerce') df['LAND SQUARE FEET'] = pd.to_numeric(df['LAND SQUARE FEET'], errors='coerce') df['GROSS SQUARE FEET']= pd.to_numeric(df['GROSS SQUARE FEET'], errors='coerce') df['TAX CLASS AT PRESENT'] = df['TAX CLASS AT PRESENT'].astype('category') df['TAX CLASS AT PRESENT'] = df['TAX CLASS AT PRESENT'].astype('category') df['BOROUGH'] = df['BOROUGH'].astype('category') df.shape df.info() variables = df.columns data = [] for variable in variables: l = df[variable].count() data.append(l) available_per = np.round(pd.Series(data)/len(df), 3) plt.figure(figsize=(8,6)) plt.barh(variables, available_per) plt.title("Percent of available data", fontsize=15) plt.show()As shown in the figure, SALE PRICE, GROSS SQUARE FEET and LAND SQUARE FEET have the lowest percent of available data. But again, SALE PRICE is the value we wanted to predict, and as the data set description states:*Many sales occur with a nonsensically small dollar amount: $0 most commonly. These sales are actually transfers of deeds between parties: for example, parents transferring ownership to their home to a child after moving out for retirement.* We may want to drop those rows.For SQUARE FEET, in this case, we will use mean values to fill them up.df=df[df['SALE PRICE']!=0] df['LAND SQUARE FEET']=df['LAND SQUARE FEET'].fillna(df['LAND SQUARE FEET'].mean()) df['GROSS SQUARE FEET']=df['GROSS SQUARE FEET'].fillna(df['GROSS SQUARE FEET'].mean()) df.shape # Splitting dataset test=df[df['SALE PRICE'].isna()] sale_house=df[~df['SALE PRICE'].isna()] test = test.drop(columns='SALE PRICE') print(test.shape) test.head()(13909, 18)Now Let's recap and revisit the details of the remaining dataset.print(sale_house.shape) sale_house.head(10) sale_house.describe()According to the above chart, we can observe that 1) The min value for ZIP CODE is 0, we don't have ZIP CODE that equals to 02) The Building Year of some properties are 0, this should be incorrect as well.3) Some properties have 0 SQUARE FEET...Let's fix them ZIP CODEplt.figure(figsize=(10,8)) plt.title(' ZIP CODE ') plt.xlim(0,12000) sale_house['ZIP CODE'].value_counts().sort_index().plot.line() #sale_house = sale_house[(sale_house['ZIP CODE'] != 0)] #find the most frequent value for zipcode from collections import Counter zip = Counter(sale_house['ZIP CODE'].tolist()).most_common(1) zip # so the most common zipcode is 10314 #zipcode=[] #for i in range(len(sale_house['ZIP CODE'])): # if sale_house['ZIP CODE'].tolist()[i] == 0: # zipcode.append(10314) # else: # zipcode.append(sale_house['ZIP CODE'].tolist()[i]) #sale_house['ZIP CODE'] = zipcode import numba as nb @nb.jit() def frequent(x,y): result=[] for i in range(len(x)): if x.tolist()[i] == 0: result.append(y) else: result.append(x.tolist()[i]) return result sale_house['ZIP CODE'] = frequent(sale_house['ZIP CODE'], 10314) sale_house['ZIP CODE'].value_counts().sort_index().plot.line()BUILDING YEARplt.figure(figsize=(10,8)) plt.title(' Year Built ') plt.xlim(1500,2020) sale_house['YEAR BUILT'].value_counts().sort_index().plot.line() #sale_house = sale_house[(sale_house['YEAR BUILT'] > 1875)] freyear = Counter(sale_house['YEAR BUILT'].tolist()).most_common(2) freyear #yearBuilt=[] #for i in range(len(sale_house['YEAR BUILT'])): # if sale_house['YEAR BUILT'].tolist()[i]==0: # yearBuilt.append(1920) # else: # yearBuilt.append(sale_house['YEAR BUILT'].tolist()[i]) #sale_house['YEAR BUILT']=yearBuilt sale_house['YEAR BUILT'] = frequent(sale_house['YEAR BUILT'], 1920) sale_house = sale_house[(sale_house['YEAR BUILT'] > 1875)] sale_house['YEAR BUILT'].value_counts().sort_index().plot.line()Finally, it's time check the outliers in our datasets, let's first look into the column sale price.SALE PRICEplt.figure(figsize=(10,5)) sns.boxplot(x='SALE PRICE', data=sale_house) plt.ticklabel_format(style='plain', axis='x') plt.title('SALE PRICE in USD') plt.show() sale_house = sale_house[(sale_house['SALE PRICE'] > 0) & (sale_house['SALE PRICE'] < 500000000)] sns.distplot(sale_house['SALE PRICE']) sale_house = sale_house[(sale_house['SALE PRICE'] > 0) & (sale_house['SALE PRICE'] < 5000000)] sns.distplot(sale_house['SALE PRICE'])SQUARE FEETsale_house = sale_house[sale_house['GROSS SQUARE FEET'] < 10000] sale_house = sale_house[sale_house['LAND SQUARE FEET'] < 10000] plt.figure(figsize=(10,6)) sns.regplot(x='GROSS SQUARE FEET', y='SALE PRICE', data=sale_house, fit_reg=False, scatter_kws={'alpha':0.3}) plt.figure(figsize=(10,6)) sns.regplot(x='LAND SQUARE FEET', y='SALE PRICE', data=sale_house, fit_reg=False, scatter_kws={'alpha':0.3})UNIT NUMBERsale_house[["TOTAL UNITS", "SALE PRICE"]].groupby(['TOTAL UNITS'], as_index=False).count().sort_values(by='SALE PRICE', ascending=False) sale_house = sale_house[(sale_house['TOTAL UNITS'] > 0) & (sale_house['TOTAL UNITS'] != 2261)] plt.figure(figsize=(10,6)) sns.boxplot(x='TOTAL UNITS', y='SALE PRICE', data=sale_house) plt.title('Total Units vs Sale Price') plt.show()2) Preparation#"Apartment Number" only has few values, so we are going to drop it. print("The percent of rows with null in apartment number:" +str(sum(df['APARTMENT NUMBER']==' ')/len(df))) del sale_house['APARTMENT NUMBER'] del sale_house['ADDRESS'] del sale_house['NEIGHBORHOOD'] del sale_house['BUILDING CLASS AT PRESENT'] del sale_house['BUILDING CLASS AT TIME OF SALE'] cor = sale_house.corr() fig, ax = plt.subplots(figsize=(12,12)) sns.heatmap(cor,annot=True, ax = ax)Removing highing correlated independent variable from datasets.# sale_house.drop(['RESIDENTIAL UNITS','GROSS SQUARE FEET',],inplace=True, axis=1) # sale_house.head() combine=[] combine = sale_house[['RESIDENTIAL UNITS','GROSS SQUARE FEET']] combine #sqr.append(sale_house['RESIDENTIAL UNITS']) from sklearn.decomposition import PCA pca = PCA(n_components = 1) pca.fit(combine) sale_house['COMBINE RU GSF'] = pca.transform(combine) del sale_house['RESIDENTIAL UNITS'] del sale_house['GROSS SQUARE FEET'] sale_house.head() #one hot encoded #https://machinelearningmastery.com/why-one-hot-encode-data-in-machine-learning/ one_hot_features = ['BOROUGH', 'BUILDING CLASS CATEGORY','TAX CLASS AT PRESENT','TAX CLASS AT TIME OF SALE'] # Convert categorical variables into dummy/indicator variables (i.e. one-hot encoding). one_hot_encoded = pd.get_dummies(sale_house[one_hot_features]) one_hot_encoded.info(verbose=True, memory_usage=True, null_counts=True) numeric_data=sale_house.select_dtypes(include=[np.number]) numeric_data.describe() df = sale_house scaler = StandardScaler() scaler.fit(df[numeric_data.columns]) scaled = scaler.transform(df[numeric_data.columns]) for i, col in enumerate(numeric_data.columns): df[col] = scaled[:,i] df.drop(one_hot_features,axis=1,inplace=True) df = pd.concat([df, one_hot_encoded] ,axis=1) df.head() # classifying data into independent and dependent variable X = df.drop(['SALE PRICE'],axis = 1).values y = df['SALE PRICE'].values # creating test and training set data, 70% train, 30% test X_train,X_test,y_train,y_test = train_test_split(X, y, test_size = 0.3, random_state = 0)3) Predition 3.0)Helper Functiondef rmse(y_test,y_pred): return np.sqrt(mean_squared_error(y_test,y_pred))3.1) Linearlinear=LinearRegression() linear.fit(X_train, y_train) y_pred = linear.predict(X_test) result_linear = rmse(y_test, y_pred) result_linear3.2)Ridge, Lasso# fitting linear regression to training set regressor = Ridge(alpha=0.01, normalize=True) regressor.fit(X_train,y_train) y_pred = regressor.predict(X_test) result_ridge = rmse(y_test, y_pred) result_ridge #3.6)Lasso Lassoregressor = Lasso(alpha = 0.01, normalize =True) Lassoregressor.fit(X_train,y_train) y_predict = Lassoregressor.predict(X_test) result_Lasso = rmse(y_test, y_predict) result_Lasso3.3)Random Forestfrom sklearn.tree import DecisionTreeRegressor dtree = DecisionTreeRegressor() dtree.fit(X_train, y_train) y_pred = dtree.predict(X_test) result_dt = rmse(y_test, y_pred) result_dt dforest = RandomForestRegressor(n_estimators=100, criterion='mse', bootstrap=True, n_jobs=-1) dforest.fit(X_train, y_train) y_pred = dforest.predict(X_test) result_rf = rmse(y_test, y_pred) result_rf3.4)Gradient boostingfrom sklearn.ensemble import GradientBoostingRegressor from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import GridSearchCV reg = GradientBoostingRegressor(alpha=0.9, ccp_alpha=0.0, criterion='friedman_mse', init=None, learning_rate=0.05, loss='ls', max_depth=7, max_features=None, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=500, n_iter_no_change=None, presort='deprecated', random_state=None, subsample=1.0, tol=0.0001, validation_fraction=0.1, verbose=0, warm_start=False) reg.fit(X_train, y_train) pre_GBM = reg.predict(X_test) result_gl = rmse(y_test, pre_GBM) result_gl3.5)AdaBoostfrom sklearn.ensemble import AdaBoostRegressor from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RandomizedSearchCV reg = AdaBoostRegressor(base_estimator=None, learning_rate=0.05, loss='linear', n_estimators=50, random_state=None) reg.fit(X_train, y_train) pre = reg.predict(X_test) result_gl = rmse(y_test, pre) result_gl3.6) Stackedfrom sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor from sklearn.kernel_ridge import KernelRidge from sklearn.pipeline import make_pipeline from sklearn.preprocessing import RobustScaler from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone from sklearn.model_selection import KFold, cross_val_score, train_test_split from sklearn.metrics import mean_squared_error import xgboost as xgb import lightgbm as lgbThis time we add a cross validation approach.n_folds = 5 def rmsle_cv(model): kf = KFold(n_folds, shuffle=True, random_state=42).get_n_splits(X_train) rmse= np.sqrt(-cross_val_score(model, X_train, y_train, scoring="neg_mean_squared_error", cv = kf)) return(rmse) linear1 = LinearRegression() score = rmsle_cv(linear1) print("\nLasso score: {:.4f} ({:.4f})\n".format(score.mean(), score.std())) lasso = make_pipeline(RobustScaler(), Lasso(alpha =0.0005, random_state=1)) score = rmsle_cv(lasso) print("\nLasso score: {:.4f} ({:.4f})\n".format(score.mean(), score.std())) ridge = make_pipeline(RobustScaler(), Ridge(alpha =0.01, random_state=1)) score = rmsle_cv(ridge) print("\nridge score: {:.4f} ({:.4f})\n".format(score.mean(), score.std())) elastic = make_pipeline(RobustScaler(), ElasticNet(alpha =0.01, random_state=1)) score = rmsle_cv(elastic) print("\nElastic score: {:.4f} ({:.4f})\n".format(score.mean(), score.std())) dforest = RandomForestRegressor(n_estimators=100, criterion='mse', bootstrap=True, n_jobs=-1) score = rmsle_cv(dforest) print("\nRandomF score: {:.4f} ({:.4f})\n".format(score.mean(), score.std())) model_gb = GradientBoostingRegressor(alpha=0.9, ccp_alpha=0.0, criterion='friedman_mse', init=None, learning_rate=0.05, loss='ls', max_depth=7, max_features=None, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=500, n_iter_no_change=None, presort='deprecated', random_state=None, subsample=1.0, tol=0.0001, validation_fraction=0.1, verbose=0, warm_start=False) score = rmsle_cv(model_gb) print("GB score: {:.4f} ({:.4f})\n" .format(score.mean(), score.std())) ada = AdaBoostRegressor(base_estimator=None, learning_rate=0.05, loss='linear', n_estimators=50, random_state=None) score = rmsle_cv(ada) print("AdaBoost Ridge score: {:.4f} ({:.4f})\n".format(score.mean(), score.std())) # https://github.com/akshaykumarvikram/kaggle-advanced-regression-algos class StackingAveragedModels(BaseEstimator, RegressorMixin, TransformerMixin): def __init__(self, base_models, meta_model, n_folds=5): self.base_models = base_models self.meta_model = meta_model self.n_folds = n_folds # Fit the data on clones of the original models def fit(self, X, y): self.base_models_ = [list() for x in self.base_models] self.meta_model_ = clone(self.meta_model) kfold = KFold(n_splits=self.n_folds, shuffle=True, random_state=156) # Train cloned base models then create out-of-fold predictions # that are needed to train the cloned meta-model out_of_fold_predictions = np.zeros((X.shape[0], len(self.base_models))) for i, model in enumerate(self.base_models): for train_index, holdout_index in kfold.split(X, y): instance = clone(model) self.base_models_[i].append(instance) instance.fit(X[train_index], y[train_index]) y_pred = instance.predict(X[holdout_index]) out_of_fold_predictions[holdout_index, i] = y_pred # Now train the cloned meta-model using the out-of-fold predictions as new feature self.meta_model_.fit(out_of_fold_predictions, y) return self #Do the predictions of all base models on the test data and use the averaged predictions as #meta-features for the final prediction which is done by the meta-model def predict(self, X): meta_features = np.column_stack([ np.column_stack([model.predict(X) for model in base_models]).mean(axis=1) for base_models in self.base_models_ ]) return self.meta_model_.predict(meta_features) stacked_averaged_models = StackingAveragedModels(base_models = (lasso, ridge, elastic, ada, dforest), meta_model = model_gb) score = rmsle_cv(stacked_averaged_models) print("Stacking Averaged models score: {:.4f} ({:.4f})".format(score.mean(), score.std())) stacked_averaged_models = StackingAveragedModels(base_models = (model_gb, dforest), meta_model = lasso) stacked_averaged_models.fit(X_train, y_train) stacked_train_pred = stacked_averaged_models.predict(X_train) stacked_pred = stacked_averaged_models.predict(X_test) print(rmse(y_test, stacked_pred))3.7)Weighted Stacking (Our Contribution)from sklearn.model_selection import KFold X = X_train Y = y_train kf = KFold(n_splits=5) pre_linear = [] pred_Lasso = [] pred_Ridge = [] pred_RF = [] pred_GBM = [] pred_Ada = [] pred_ela = [] for train_index, test_index in kf.split(X): linear=LinearRegression() linear.fit(X[train_index], Y[train_index]) y_pred_linear = linear.predict(X[test_index]) pre_linear = pre_linear + y_pred_linear.tolist() ridge = Ridge(alpha=0.01, normalize=True) ridge.fit(X[train_index], Y[train_index]) y_pred_ridge = ridge.predict(X[test_index]) pred_Ridge = pred_Ridge + y_pred_ridge.tolist() lasso = Lasso(alpha = 0.0005, normalize =True) lasso.fit(X[train_index], Y[train_index]) y_pred_lasso = lasso.predict(X[test_index]) pred_Lasso = pred_Lasso + y_pred_lasso.tolist() rf = RandomForestRegressor(n_estimators=100, criterion='mse', bootstrap=True, n_jobs=-1) rf.fit(X[train_index], Y[train_index]) y_pred_rf = rf.predict(X[test_index]) pred_RF = pred_RF + y_pred_rf.tolist() gbm = GradientBoostingRegressor(alpha=0.9, ccp_alpha=0.0, criterion='friedman_mse', init=None, learning_rate=0.05, loss='ls', max_depth=7, max_features=None, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=500, n_iter_no_change=None, presort='deprecated', random_state=None, subsample=1.0, tol=0.0001, validation_fraction=0.1, verbose=0, warm_start=False) gbm.fit(X[train_index], Y[train_index]) y_pred_gbm = gbm.predict(X[test_index]) pred_GBM = pred_GBM + y_pred_gbm.tolist() ada = AdaBoostRegressor(base_estimator=None, learning_rate=0.05, loss='linear', n_estimators=50, random_state=None) ada.fit(X[train_index], Y[train_index]) y_pred_ada = ada.predict(X[test_index]) pred_Ada = pred_Ada + y_pred_ada.tolist() ela = ElasticNet(alpha =0.01, random_state=1) ela.fit(X[train_index], Y[train_index]) y_pred_ela = ela.predict(X[test_index]) pred_ela = pred_ela + y_pred_ela.tolist() combine = 0*np.array(pred_linear)+0*np.array(pred_Lasso)+0.04*np.array(pred_Ridge)+0*np.array(pred_RF)+0.96*np.array(pred_GBM)+0*np.array(pred_Ada) rmse(combine,y_train) from scipy.optimize import nnls fin_mat = np.zeros((26231,6)) fin_mat[:,0]=np.array(pre_linear) fin_mat[:,1]=np.array(pred_Lasso) fin_mat[:,2]=np.array(pred_Ridge) fin_mat[:,3]=np.array(pred_RF) fin_mat[:,4]=np.array(pred_GBM) fin_mat[:,5]=np.array(pred_Ada) fin_mat b=nnls(fin_mat, y_train) bimport pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import datetime df=pd.read_csv('FlightData.csv') #Importing the dataset #Watching the Dataset in DataFrame df.head() df.columns df.shape df['MONTH'].unique(),df['YEAR'].unique() df.info() #Finding the sum of null values in each column df.isnull().sum() #Dropping out the useless columns from the DataFrame df.drop(['Unnamed: 25'],axis=1,inplace=True) df.head() df.shape df.info() #Keep the columns which are uselful for the model df=df[['MONTH','DAY_OF_MONTH','DAY_OF_WEEK','ORIGIN','DEST','CRS_ARR_TIME','ARR_DEL15']] df.head() df.info() #checking out again the null values in our new dataframe df[df.isnull().values==True] #Fill out the null values with 0,1 in the ARR_DEL15 column #Note:I didn't try to remove the complete rows containing the null values as i didnt wanted to loose the data that maybe useful df.fillna(1,inplace=True) #visualizing ARR_DEL15 column fig, axs = plt.subplots(2) df2=df[df['ARR_DEL15']==0] Y=[len(df2[df2['MONTH']==i]) for i in df2['MONTH'].unique()] axs[0].bar(df['MONTH'].unique(),Y) axs[0].set(xlabel='MONTH', ylabel='No. of Flights late') df2=df[df['ARR_DEL15']==1] Y=[len(df2[df2['MONTH']==i]) for i in df2['MONTH'].unique()] axs[1].bar(df['MONTH'].unique(),Y) axs[1].set(xlabel='MONTH', ylabel='No. of Flights on time') df.info() df.head() #feature scaling df['CRS_ARR_TIME']=(df['CRS_ARR_TIME']/100).astype(int) #Feature Scaling is required for training our model to get more accurate results df.head() df['ORIGIN'].unique(),df['DEST'].unique() df=pd.get_dummies(df,columns=['ORIGIN','DEST']) df.head() df.shape #bulding machine learning model x=df.iloc[:,[0,1,2,3,5,6,7,8,9,10,11,12,13,14]].values y=df.iloc[:,[4]].values from sklearn.model_selection import train_test_split x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=0) from sklearn.ensemble import RandomForestClassifier classifier=RandomForestClassifier(n_estimators=10,criterion='entropy',random_state=0) classifier.fit(x_train,y_train) x_train.shape y_train.shape,y_test.shape y_pred=classifier.predict(x_test) res=classifier.score(x_test,y_test) res #score is not best method to check the accuracy.sometimes it is not useful #confusion matrix from sklearn.metrics import confusion_matrix confusion_matrix(y_test,y_pred) #using ROC AUC from sklearn.metrics import roc_auc_score probab=classifier.predict_proba(x_test) probab[:,1] roc_auc_score(y_test, probab[:, 1]) #using recall from sklearn.metrics import recall_score recall_score(y_test,y_pred) # It may be low because falsse negatives maybe high in number from sklearn.metrics import roc_curve fpr, tpr , _ = roc_curve(y_test, probab[:, 1]) plt.plot(fpr, tpr) plt.plot([0, 1], [0, 1], color='grey', linestyle='--') plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') df.columns def predict_delay(dep_date_time,origin,destination): from datetime import datetime try: dep_date_time_parsed = datetime.strptime(dep_date_time, '%m/%d/%Y %H:%M:%S') except ValueError as e: print('Error parsing date/time - {}'.format(e)) date=dep_date_time_parsed.day month=dep_date_time_parsed.month day_of_week=dep_date_time_parsed.isoweekday() hour=dep_date_time_parsed.hour origin=origin.upper() destination=destination.upper() input_1=[{'MONTH':month,'DAY_OF_MONTH': date,'DAY_OF_WEEK':day_of_week, 'CRS_DEP_TIME':hour, 'ORIGIN_ATL': 1 if origin=='ATL' else 0, 'ORIGIN_DTW': 1 if origin == 'DTW' else 0, 'ORIGIN_JFK': 1 if origin == 'JFK' else 0, 'ORIGIN_MSP': 1 if origin == 'MSP' else 0, 'ORIGIN_SEA': 1 if origin == 'SEA' else 0, 'DEST_ATL': 1 if destination == 'ATL' else 0, 'DEST_DTW': 1 if destination == 'DTW' else 0, 'DEST_JFK': 1 if destination == 'JFK' else 0, 'DEST_MSP': 1 if destination == 'MSP' else 0, 'DEST_SEA': 1 if destination == 'SEA' else 0 }] return classifier.predict_proba(pd.DataFrame(input_1))[0][0] #It returns the probability of on time flight arrival predict_delay('10/01/2018 21:45:00', 'JFK', 'ATL') from datetime import datetime dep_date_time='10/01/2018 21:45:00' dep_date_time_parsed = datetime.strptime(dep_date_time, '%m/%d/%Y %H:%M:%S') dep_date_time_parsed.date #visualization x_label=['aug 1','aug 2','aug 3','aug 4'] values=(predict_delay('10/01/2018 21:45:00', 'JFK', 'ATL'),predict_delay('10/02/2018 21:45:00', 'JFK', 'ATL'), predict_delay('10/03/2018 21:45:00', 'JFK', 'ATL'),predict_delay('10/04/2018 21:45:00', 'JFK', 'ATL')) plt.bar(x_label,values)ARIMA Model on Stock Market Dataset Microsoft Opening Share Value EstimationIn this notebook, we shall predict the Microsoft opening share value using ARIMA Model. The dataset is taken from [Kaggle](https://www.kaggle.com/ryanforbes/msftstockdata/).References for subsequent material can be found here: 1. [ARIMA Model – Complete Guide to Time Series Forecasting in Python](https://www.machinelearningplus.com/time-series/arima-model-time-series-forecasting-python/)2. [Stock Market Analysis Using ARIMA](https://towardsdatascience.com/stock-market-analysis-using-arima-8731ded2447a) Importing Librariesimport numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt # import warnings from google.colab import drive from pandas import datetime from statsmodels.graphics.tsaplots import plot_acf, plot_pacf from statsmodels.tsa.stattools import adfuller # for ADF Test from statsmodels.tsa.arima_model import ARIMA # Our ARIMA Model here! from sklearn.metrics import mean_squared_errorPreprocessingdrive.mount('/content/gdrive') dfull = pd.read_csv("/content/gdrive/My Drive/iPython Notebooks/daily_MSFT.csv").fillna(0) dfull.head() df = pd.read_csv("/content/gdrive/My Drive/iPython Notebooks/daily_MSFT.csv", usecols=["timestamp", "open"]).fillna(0) df.timestamp = df.timestamp.values[::-1] df.open = df.open.values[::-1] df.head() df.plot(x='timestamp', y='open', figsize=(10, 10)) plt.title("Microsoft") plt.show() final_index = len(df) - 1 print(final_index) df['timestamp'][final_index] # latest data => recent most5283ARIMA for Time Series Prediction Step 1: Make the Time-Series Stationaryadf_result = adfuller(df.open.dropna()) print('ADF Statistic: %f' % adf_result[0]) print('p-value: %f' % adf_result[1]) for key, value in adf_result[4].items(): print('Critial Values:') print(f' {key}, {value}') plt.rcParams.update({'figure.figsize':(9,7), 'figure.dpi':120}) # Original Series fig, axes = plt.subplots(5, 2, sharex=False) axes[0, 0].plot(df.open); axes[0, 0].set_title('Original Series') plot_acf(df.open, ax=axes[0, 1]) # 1st Differencing axes[1, 0].plot(df.open.diff()); axes[1, 0].set_title('1st Order Differencing') plot_acf(df.open.diff().dropna(), ax=axes[1, 1]) # 2nd Differencing axes[2, 0].plot(df.open.diff().diff()); axes[2, 0].set_title('2nd Order Differencing') plot_acf(df.open.diff().diff().dropna(), ax=axes[2, 1]) # 3rd Differencing axes[3, 0].plot(df.open.diff()); axes[3, 0].set_title('3rd Order Differencing') plot_acf(df.open.diff().diff().dropna(), ax=axes[3, 1]) # 4th Differencing axes[4, 0].plot(df.open.diff().diff()); axes[4, 0].set_title('4th Order Differencing') plot_acf(df.open.diff().diff().diff().diff().dropna(), ax=axes[4, 1]) plt.show() # 1st Differencing plot_acf(df.open.diff().dropna()) # 2nd Differencing plot_acf(df.open.diff().diff().dropna())We see that the plot for ACF on 2nd differencing reaches a negative value fairly quickly which indicates over-differencing. Therefore, we shall take the value of d as 1# difference data df_diff = df.open.diff() adf_result_diff = adfuller(df_diff.dropna()) print('ADF Statistic: %f' % adf_result_diff[0]) print('p-value: %f' % adf_result_diff[1]) for key, value in adf_result_diff[4].items(): print('Critial Values:') print(f' {key}, {value}')ADF Statistic: -42.279178 p-value: 0.000000 Critial Values: 1%, -3.4315891064424116 Critial Values: 5%, -2.862087557448897 Critial Values: 10%, -2.567061464395374Thus, we see that the series is now stationary as it satisfies the ADF Test.fig, axes = plt.subplots(1, 2, sharex=False) axes[0].plot(df_diff); axes[0].set_title('1st Differencing') axes[1].set(ylim=(0,5)) plot_pacf(df_diff.dropna(), ax=axes[1]) plt.show() # Just plot PACF for clarity plot_pacf(df_diff.dropna())We can see that only lag 1 has a significant value of PACF while higher lags barely cross the significance threshold (0.05). So, we choose p = 1fig, axes = plt.subplots(1, 2, sharex=True) axes[0].plot(df_diff); axes[0].set_title('1st Differencing') axes[1].set(ylim=(0,1.2)) plot_acf(df_diff.dropna(), ax=axes[1]) plt.show() # Just Plot ACF for clarity plot_acf(df_diff.dropna())Just like in the case of PACF, only lag 1 seems to be significant. Therefore, we choose q = 1 Step 3: Fitting on the ARIMA Model First let's split the dataset into: training set (80%) and test set (20%)train_data, test_data = df[0:int(len(df)*0.8)], df[int(len(df)*0.8):] plt.figure(figsize=(12,7)) plt.title('Microsoft Prices') plt.xlabel('Date') plt.ylabel('Prices') plt.plot(train_data['open'], 'blue', label='Training Data') plt.plot(test_data['open'], 'green', label='Testing Data') plt.xticks(np.arange(0,final_index, 1300), df['timestamp'][0:final_index:1300]) plt.legend()Let's define the Error Metric: SMAPE (Symmetric Mean Absolute Percentage Error)def smape_kun(y_true, y_pred): return np.mean((np.abs(y_pred - y_true) * 200/ (np.abs(y_pred) + np.abs(y_true))))New Section Training the Model and Error Check with MSE and SMAPE Here, we train the model on the train_set and test it on the test_set The model takes the ACTUAL value from the test_set and returns the predicted value for the next row and stores these predictions in the list 'predictions'. It is to be noted that SMAPE is a more reliable error metric in time series analysis than MSE.train_ar = train_data['open'].values test_ar = test_data['open'].values history = [x for x in train_ar] print(type(history)) predictions = list() for t in range(len(test_ar)): # training takes place here: # p = 1, d = 0, q = 1 => fails to converge when run on google colab # p = 1, d = 1, q = 1 => succeeds in 771.731s with MSE = 1.135, SMAPE=33.785 model = ARIMA(history, order=(1,1,1)) model_fit = model.fit(disp=0) output = model_fit.forecast() yhat = output[0] predictions.append(yhat) obs = test_ar[t] history.append(obs) #print('predicted=%f, expected=%f' % (yhat, obs)) error = mean_squared_error(test_ar, predictions) print('Testing Mean Squared Error: %.3f' % error) error2 = smape_kun(test_ar, predictions) print('Symmetric mean absolute percentage error: %.3f' % error2) # let's see what kind of model we achieved print(model_fit.summary())ARIMA Model Results ============================================================================== Dep. Variable: D.y No. Observations: 5282 Model: ARIMA(1, 1, 1) Log Likelihood -11323.493 Method: css-mle S.D. of innovations 2.064 Date: Tue, 25 Feb 2020 AIC 22654.986 Time: 15:16:23 BIC 22681.275 Sample: 1 HQIC 22664.174 ============================================================================== coef std err z P>|z| [0.025 0.975] ------------------------------------------------------------------------------ const -0.0054 0.026 -0.207 0.8[...]Error Check Through Residuals# Plot residual errors residuals = pd.DataFrame(model_fit.resid) residuals.plot(title="Residuals") plt.show()Error Check Through Actual vs Predicted Plot# Actual vs Fitted model_fit.plot_predict(dynamic=False) # False => use in-sample lag values plt.show() plt.figure(figsize=(12,7)) plt.plot(df['open'], 'green', color='blue', label='Training Data') plt.plot(test_data.index, predictions, color='green', marker='o', linestyle='dashed', label='Predicted Price') plt.plot(test_data.index, test_data['open'], color='red', label='Actual Price') plt.title('Microsoft Prices Prediction') plt.xlabel('Dates') plt.ylabel('Prices') plt.xticks(np.arange(0, final_index, 1300), df['timestamp'][0:final_index:1300]) plt.legend() plt.figure(figsize=(12,7)) plt.plot(test_data.index, predictions, color='green', marker='o', linestyle='dashed', label='Predicted Price') plt.plot(test_data.index, test_data['open'], color='red', label='Actual Price') plt.title('Microsoft Prices Prediction') plt.xlabel('Dates') plt.ylabel('Prices') plt.xticks(np.arange(int(0.8*len(df)), 300), df['timestamp'][int(0.8*len(df)):final_index:300]) plt.legend()Here, we predict the values of the test_set based on the prior predictions. That is to say, the model takes the PREDICTIONS made by it to determine the next row in the test_set, (instead of taking the ACTUAL test_set value as in the previous case).est_ar_orig = test_data['open'].values history_new = [x for x in train_ar] print(type(history_new)) predictions_new = list() for t in range(len(test_ar_orig)): # training takes place here: # p = 1, d = 1, q = 1 => fails to converge # p = 1, d = 1, q = 0 => succeeds in 49.511s; MSE = 2023.404,SMAPE = 66.610 # p = 2, d = 1, q = 0 => fails to converge # p = 2, d = 1, q = 1 => AR coeffs fail stationarity test # p = 2, d = 1, q = 2 => AR coeffs fail stationarity test # p = 3, d = 1, q = 0 => succeeds in 117.668s; MSE = 2024.363,SMAPE = 66.646 # p = 3, d = 1, q = 1 => AR coeffs fail stationarity test # p = 3, d = 1, q = 2 => something called HessianInversionWarning model_new = ARIMA(history_new, order=(1,1,0)) model_fit_new = model_new.fit(disp=0) output_new = model_fit_new.forecast() yhat_new = output_new[0] predictions_new.append(yhat_new) history_new.append(yhat_new) # append the latest prediction to history #print('predicted=%f, expected=%f' % (yhat, obs)) error = mean_squared_error(test_ar, predictions_new) print('Testing Mean Squared Error: %.3f' % error) error2 = smape_kun(test_ar, predictions_new) print('Symmetric mean absolute percentage error: %.3f' % error2) Testing Mean Squared Error: 2023.404 Symmetric mean absolute percentage error: 66.610Clearly, both the error metrics are very high. This is because the ARIMA model works well for one-step out-of-sample prediction but performs poorly for multi-step out-of-sample prediction. This goes to show that stock market prediction (at least the one implemented here) does not work for long-term future scenarios.# let's see what the new model is like print(model_fit_new.summary())ARIMA Model Results ============================================================================== Dep. Variable: D.y No. Observations: 5282 Model: ARIMA(1, 1, 0) Log Likelihood -11180.575 Method: css-mle S.D. of innovations 2.009 Date: Tue, 25 Feb 2020 AIC 22367.150 Time: 16:47:57 BIC 22386.866 Sample: 1 HQIC 22374.041 ============================================================================== coef std err z P>|z| [0.025 0.975] ------------------------------------------------------------------------------ const -0.0205 0.026 -0.789 0.4[...]Error Check Through Actual vs Predicted Plotplt.figure(figsize=(12,7)) plt.plot(df['open'], 'green', color='blue', label='Training Data') plt.plot(test_data.index, predictions_new, color='green', marker='o', linestyle='dashed', label='Predicted Price') plt.plot(test_data.index, test_data['open'], color='red', label='Actual Price') plt.title('Microsoft Prices Prediction') plt.xlabel('Dates') plt.ylabel('Prices') plt.xticks(np.arange(0, final_index, 1300), df['timestamp'][0:final_index:1300]) plt.legend()Tipos de datoUn tipo de dato es un atributo de los datos que indica al ordenador con que clase de datos va a trabajar.En el caso de python contamos con estos tipos de dato:- **Enteros (int)**: Corresponde a numeros enteros sin parte decimal.- **De punto flotante (float)**: Corresponde a numeros reales con perte decimal cuyo separador es la coma.- **Caracter (chr)**: Corresponde a un simbolo tipografico.- **Cadenas (str)**: Corresponde a una cadena de caracteres.- **Booleano (boolean)**: Corresponde a una variable de dos estados verdadero o falso. VariablesUna Variable es un espacio de la memoria del computador que permite almacenar información de un determinado tipo de dato.Para generar una variable debes seguir la siguiente sintaxis:1. Nombre de la variable: Este debe ser de preferencia corto y estar relacionado con la información que sera almacenada en ella. Este nombre no debe iniciar con un numero ni contener simbolos como la ñ u operadores asignados en el lenguaje como lo son +,-,/,*2. Operador "=": El cual indicara que a la variable se le asignara cierta información para que la almacene.3. Información almacenada: La cual correspondera a un determinado tipo de dato.Ejemplo:Variable = ""# Guardamos los valores en cada variable definiendo los tipos de datos Entero = 3 # Numero decimal Flotante = 3.1416 # Simbolo tipografico alfanumerico Caracter = "a" # Cadena de caracteres Cadena = "Cuatro" # Variable de dos estados True y False Booleano = True # Imprimimos cada variable para ver de que manera almacena cada variable print(Entero) print(Flotante) print(Caracter) print(Cadena) print(Booleano)3 3.1416 a Cuatro TrueEjemplos de uso########################## Operacion numerica ############################ Num1 = 34.23 Num2 = 27.56 Resultado = Num1 + Num2 print("El resultado de la suma es:",Resultado) ############################# Concatenacion ############################## Saludo = "Hola, " Pregunta = "¿Como estas?" frase = Saludo + Pregunta print(frase)El resultado de la suma es: 61.78999999999999 Hola, ¿Como estas?Convertir tipos de datos (Cast)Los diferentes tipos de datos pueden ser cambiados entre si mediante una acción que se conoce como Cast, a continuación veremos cuando ocurren estos casos: Entero - Decimal# Entero->Decimal # Convertimos el entero en un numero con decimales print(float(Entero)) # Decimal-Entero # Convertimos el numero con punto flotante en un entero, perdiendo asi los decimales print(int(Flotante))3.0 3Numerico - Texto# Numero como caracter Num = "9" # Caracter->Decimal # Convertimos el entero en un numero con decimales print(float(Num)) # Decimal->Cadena # Convertimos el numero con punto flotante en un una cadena de caracteres print(str(Flotante))9.0 3.1416Como podemos observar el caracter "9" a cambiado a un numero decimal, por otro lado, el numero decimal fue cambiado por una cadena de caracteres, para asegurarnos de esto trataremos de realizar una suma.print(Flotante + 3.0) print(str(Flotante) + 3.0)6.1416En el error podemos observar que nos pide que la variable numerica debe ser de tipo string ya que el simbolo de suma "+" es interpretado como una concatenación.# Al convertir el valor numerico en una cadena de texto si podemos realizar la concatenación print(str(Flotante) + "3.0")3.14163.0CadenasLas cadenas de texto so interpretadas por python como arreglos de caracteres, debido a esto podemos acceder a cada caracter que compone la cadena mediante índices.Dichos indices pueden ser positivos o negativos.Variable = "" print(Variable[0]) print(Variable[1]) print(Variable[2]) print(Variable[3]) print(Variable[4]) print(Variable[5]) print(Variable[6]) print(Variable[7]) print(Variable[8]) print(Variable[9]) print(Variable[-1]) print(Variable[-2]) print(Variable[-3]) print(Variable[-4]) print(Variable[-5])o d n u MTambien es posible extraer fragmentos de las cadenas de texto al utilizar el simbolo ":"print(Variable[2:6]) print(Variable[:-2]) print(Variable[2:]) print(Variable[::-1])la M la Mundo odnuM aloHgridcvfrom sklearn.ensemble import GradientBoostingClassifier #GBM algorithm from sklearn import cross_validation, metrics #Additional scklearn functions from sklearn.grid_search import GridSearchCV #Perforing grid search def modelfit(alg, dtrain, predictors, performCV=True, printFeatureImportance=True, cv_folds=5): #Fit the algorithm on the data alg.fit(dtrain[predictors], y) #Predict training set: dtrain_predictions = alg.predict(dtrain[predictors]) dtrain_predprob = alg.predict_proba(dtrain[predictors])[:,1] #Perform cross-validation: if performCV: cv_score = cross_validation.cross_val_score(alg, dtrain[predictors], y, cv=cv_folds, scoring='roc_auc') #Print model report: print ("\nModel Report") print ("Accuracy : %.4g" % metrics.accuracy_score(y , dtrain_predictions)) print ("AUC Score (Train): %f" % metrics.roc_auc_score(y , dtrain_predprob)) if performCV: print ("CV Score : Mean - %.7g | Std - %.7g | Min - %.7g | Max - %.7g" % (np.mean(cv_score),np.std(cv_score),np.min(cv_score),np.max(cv_score))) #Print Feature Importance: if printFeatureImportance: feat_imp = pd.Series(alg.feature_importances_, predictors).sort_values(ascending=False) plt.figure(figsize=(20,20)) feat_imp.plot(kind='bar', title='Feature Importances') plt.ylabel('Feature Importance Score') #Choose all predictors except target & IDcols predictors = df.columns gbm0 = GradientBoostingClassifier(random_state=10) modelfit(gbm0, df, predictors) param_test1 = {'n_estimators':[20, 30, 40, 50, 60, 70, 80, 90]} gsearch1 = GridSearchCV(estimator = GradientBoostingClassifier(learning_rate=0.05, min_samples_split=500, min_samples_leaf=50,max_depth=8,max_features='sqrt',subsample=0.8,random_state=10), param_grid = param_test1, scoring='roc_auc',n_jobs=4,iid=False, cv=5) gsearch1.fit(df[predictors], y) gsearch1.grid_scores_, gsearch1.best_params_, gsearch1.best_score_ ## Test 2 param_test2 = {'max_depth':[5, 7, 9, 11, 13, 15] ,'min_samples_split': [200, 400, 600, 800, 1000]} gsearch2 = GridSearchCV(estimator = GradientBoostingClassifier(learning_rate=0.05, n_estimators=90, max_features='sqrt', subsample=0.8, random_state=10), param_grid = param_test2, scoring='roc_auc',n_jobs=4,iid=False, cv=5) gsearch2.fit(df[predictors], y) gsearch2.grid_scores_, gsearch2.best_params_, gsearch2.best_score_ #test 3 param_test3 = {'min_samples_split': [800, 1000, 1200, 1400, 1600] , 'min_samples_leaf': [30, 40, 50, 60, 70]} gsearch3 = GridSearchCV(estimator = GradientBoostingClassifier(learning_rate=0.05, n_estimators=90,\ max_depth=7,max_features='sqrt', subsample=0.8, random_state=10), param_grid = param_test3, scoring='roc_auc',n_jobs=4,iid=False, cv=5) gsearch3.fit(df[predictors], y) gsearch3.grid_scores_, gsearch3.best_params_, gsearch3.best_score_ modelfit(gsearch3.best_estimator_, df, predictors) #test 4 param_test4 = {'max_features': [7, 9, 11, 13, 15, 17, 19, 21]} gsearch4 = GridSearchCV(estimator = GradientBoostingClassifier(learning_rate=0.05, min_samples_split = 1000, n_estimators=70,max_depth=7,\ max_features='sqrt', subsample=0.8, random_state=10,min_samples_leaf = 50), param_grid = param_test4, scoring='roc_auc',n_jobs=4,iid=False, cv=5) gsearch4.fit(df[predictors], y ) gsearch4.grid_scores_, gsearch4.best_params_, gsearch4.best_score_ #test 5 param_test5 = {'subsample':[0.6,0.7,0.75,0.8,0.85,0.9]} gsearch5 = GridSearchCV(estimator = GradientBoostingClassifier(learning_rate=0.05, min_samples_split = 1000, n_estimators=70,max_depth=7,\ subsample=0.8, \ random_state=10,min_samples_leaf = 50,max_features=17), param_grid = param_test5, scoring='roc_auc',n_jobs=4,iid=False, cv=5) gsearch5.fit(df[predictors], y ) gsearch5.grid_scores_, gsearch5.best_params_, gsearch5.best_score_ gbm_tuned_2 = GradientBoostingClassifier(learning_rate=0.05, min_samples_split = 1000, n_estimators=500,max_depth=10,\ subsample=0.8, random_state=10,min_samples_leaf = 50,max_features=17) modelfit(gbm_tuned_2, df, predictors) test.info() prediction_proba_2 = gbm_tuned_2.predict_proba(test) submit = make_submission(prediction_proba_2[:,1])CLUSTERING IN IRIS DATA WITH K-MEANS First we will find the K Value with the ELBOW MethodK Value calculation means finding the optimal count of clusters.clusters = range(1, 6) WCSS = [] for k in clusters: #we will create the KMeans model model = KMeans(n_clusters=k) #now we will fit the model to iris data model.fit(iris) #generate inertia and append those values to WCSS WCSS.append(model.inertia_) # Now we will plot our clusters and the inertia plt.plot(clusters, WCSS, "-o") plt.xlabel("No of Clusters") plt.ylabel("WCSS") plt.title("Elbow Method") plt.xticks(clusters) plt.show() # So based on above plot we should create 3 clusters for our model . import numpy as np import matplotlib.pyplot as plt # For plotting from sklearn.cluster import KMeans # For Clustering import pandas as pd from sklearn.datasets import load_iris irisdata = load_iris() iris = pd.DataFrame(irisdata.data) iris.head() # Column Names irisdata.feature_names # Col Names into the IRIS Dataframe iris.columns = irisdata.feature_names iris.head() # Setosa, Versicolor and Virginica:: clusters 03 # dtree = DecisionTree() model = KMeans(n_clusters = 3) model # Fit the K Means Clustering on my IRIS DATA model.fit(iris) # Predict the Labels labels = model.predict(iris) labels # Plotting the IRIS Data and the Cluster Centroid too... # Lets Find out the Centroids centroid = model.cluster_centers_ centroid centroid_x = centroid[: , 0] centroid_y = centroid[:, 1] centroid_x xs = irisdata.data[: , 0] ys = irisdata.data[: , 1] #lets plot these together import matplotlib.patches as mpatches plt.figure(figsize=(20,10)) plt.scatter(xs , ys , c = labels , s=60 ) plt.scatter(centroid_x, centroid_y , marker = "D" , c="r" , s = 120) plt.xlabel("Sepal Length in cms" , size = 20) plt.ylabel("Sepal Width in cms" , size = 20) plt.title("Clustered IRIS data " , size = 30) one = mpatches.Patch(facecolor = 'purple', label='Setosa', linewidth = 1, edgecolor = 'black') two = mpatches.Patch(facecolor = 'teal', label = 'Versicolor', linewidth = 1, edgecolor = 'black') three = mpatches.Patch(facecolor = 'yellow', label = "Virginica", linewidth = 1, edgecolor = 'black') legend = plt.legend(handles=[one, two, three],loc = 4, fontsize = 'xx-large',fancybox= True,borderpad=2,labelspacing=2) plt.show()So we have 3 clusters corresponding to 3 different species of flowers , i.e, Setosa , Virginica and Versicolor.centroid_pl = centroid[:,2] centroid_pw = centroid[:,3] centroid_pl axis_pl = irisdata.data[:,2] axis_pw = irisdata.data[:,3] axis_pl import matplotlib.patches as mpatches plt.figure(figsize=(20,10)) plt.scatter(axis_pl , axis_pw ,c =labels , s=80) plt.scatter(centroid_pl , centroid_pw , marker = "D" , c = "r" , s=120) plt.xlabel("Petal Length in cms" , size = 20) plt.ylabel("Petal Width in cms" , size = 20) plt.title("Clustering 3 species of flowers in IRIS data " , size = 30) handle1 = mpatches.Patch(facecolor = "purple" , label ="Setosa" , linewidth = 1, edgecolor = 'black') handle2 = mpatches.Patch(facecolor = "teal" , label ="Versicolor", linewidth = 1, edgecolor = 'black') handle3 = mpatches.Patch(facecolor = "yellow" , label ="Virginica", linewidth = 1, edgecolor = 'black') legend = plt.legend(handles=[handle1, handle2, handle3],loc=4, fontsize = "xx-large" ,borderpad= 2 , labelspacing=2 ) plt.show()Data Loadseason_plays_df = pd.read_csv("./data/processed/plays_2015-2020.csv", index_col=False) train_df, test_df = split_dataset(season_plays_df) y_train = np.where(train_df.event_type_id=="GOAL", 1, 0) y_test = np.where(test_df.event_type_id=="GOAL", 1, 0)Preprocesspre_train_df = advanced_features(train_df) pre_train_df = pre_train_df.drop(columns=["empty_net"]) x_train = pre_train_df[["angle_from_net", "dist_from_net"]]XGBoostdef evaluation_plots(model, X, y, model_name=None): y_proba = model.predict_proba(X)[:, 1] plot_roc_auc(y, y_proba, model_name=model_name) plot_goal_rate(y, y_proba, model_name=model_name) plot_cumulative_proportion(y, y_proba, model_name=model_name) plot_calibration_curve(y, y_proba, model_name=model_name)Base modelbase_params={ "n_estimators": 100, "max_depth": 4, "learning_rate": 0.1, } def run_base_xgb(X, y, params, save_run=False): if save_run: experiment = Experiment(project_name="hockey-all-star-analytics", log_graph=True) experiment.log_dataset_hash(X) x_train, x_val, y_train, y_val = train_test_split(X, y, test_size=0.2, stratify=y) model = XGBClassifier(objective="binary:logistic", use_label_encoder=False, **params) model.fit(x_train, y_train, eval_set=[(x_val, y_val)], eval_metric=["logloss", "error", "auc"] ) evaluation_plots(model, x_val, y_val, model_name="XGBoost Baseline") if save_run: model.save_model("./models/base_xgb.json") experiment.log_model("base_xgb", "./models/") experiment.end() return model base_model = run_base_xgb(x_train, y_train, base_params, save_run=False)Tuned XGBoostdef objective(trial, x_df, y_df): hyperparams = { # structure "max_depth": trial.suggest_int("max_depth", 3, 8, step=1), # accuracy "learning_rate": trial.suggest_float("learning_rate", 0.01, 0.3), "n_estimators": trial.suggest_categorical("n_estimators", [50]), # overfitting "reg_alpha": trial.suggest_int("lambda_l1", 0, 100, step=5), "reg_lambda": trial.suggest_int("lambda_l2", 0, 100, step=5), "min_child_weight": trial.suggest_float("min_gain_to_split", 0, 15), } kfold_cv = StratifiedKFold(n_splits=5, shuffle=True) cv_scores = [] for idx, (train_idx, test_idx) in enumerate(kfold_cv.split(x_df, y_df)): x_train, x_test = x_df.iloc[train_idx], x_df.iloc[test_idx] y_train, y_test = y_df.iloc[train_idx], y_df.iloc[test_idx] clf = XGBClassifier(use_label_encoder=False, **hyperparams) clf.fit( x_train, y_train, eval_set=[(x_test, y_test)], eval_metric=["logloss", "error", "auc"], verbose=False, ) best_score = clf.evals_result()["validation_0"]["logloss"] cv_scores.append(best_score) return np.mean(cv_scores) def run_tuned_xgb(x_df, y_df, save_run=False): if save_run: experiment = Experiment(project_name="hockey-all-star-analytics", log_code=True, log_graph=True) # optimize objective to tune XGBoost study = optuna.create_study(direction="minimize", study_name="tuned_xgboost") optimize = lambda trial: objective(trial, x_df, y_df) study.optimize(optimize, n_trials=20) # Train the model with the best parameters x_train, x_val, y_train, y_val = train_test_split(x_df.values, y_df.values, test_size=0.2, stratify=y_df.values) best_model = XGBClassifier(**study.best_params) best_model.fit(x_train, y_train, eval_set=[(x_val, y_val)], eval_metric=["logloss", "error", "auc"]) evaluation_plots(best_model, x_val, y_val, model_name="XGBoost Tuned") feature_imp = pd.DataFrame(best_model.feature_importances_.T, index=x_df.columns, columns=["Permutation Feature Importance"]) feature_imp.sort_values(by="Permutation Feature Importance", ascending=True).plot(kind="barh") if save_run: best_model.save_model("./models/best_xgb.json") experiment.log_model("best_xgb", "./models/") experiment.end() return best_model best_model = run_tuned_xgb(pre_train_df, pd.Series(y_train), save_run=True)Feature drop# best_model.get_params() best_params = { 'objective': 'binary:logistic', 'use_label_encoder': True, 'base_score': 0.5, 'booster': 'gbtree', 'colsample_bylevel': 1, 'colsample_bynode': 1, 'colsample_bytree': 1, 'enable_categorical': False, 'gamma': 0, 'gpu_id': -1, 'importance_type': None, 'interaction_constraints': '', 'learning_rate': 0.2990984444640177, 'max_delta_step': 0, 'max_depth': 6, 'min_child_weight': 1, 'monotone_constraints': '()', 'n_estimators': 50, 'n_jobs': 8, 'num_parallel_tree': 1, 'predictor': 'auto', 'random_state': 0, 'reg_alpha': 0, 'reg_lambda': 1, 'scale_pos_weight': 1, 'subsample': 1, 'tree_method': 'exact', 'validate_parameters': 1, 'verbosity': None, 'lambda_l1': 45, 'lambda_l2': 0, 'min_gain_to_split': 14.173685506149932} pre_train_df["SHOT_grouped"] = pre_train_df.BLOCKED_SHOT + pre_train_df.MISSED_SHOT + pre_train_df.SHOT subset_df = pre_train_df.drop(columns=["period_idx", "PENALTY", "x_coord", "y_coord", "BLOCKED_SHOT", "MISSED_SHOT", "SHOT", "PERIOD_START"]) reduced_model = XGBClassifier(**best_params) experiment = Experiment(project_name="hockey-all-star-analytics", log_code=True, log_graph=True) xx_df, y_df = subset_df, pd.Series(y_train) x_train, x_val, yy_train, y_val = train_test_split(xx_df.values, y_df.values, test_size=0.2, stratify=y_df.values) reduced_model = XGBClassifier(**best_params) reduced_model.fit(x_train, yy_train, eval_set=[(x_val, y_val)], eval_metric=["logloss", "error", "auc"]) y_proba = reduced_model.predict_proba(x_val)[:, 1] evaluation_plots(reduced_model, x_val, y_val, model_name="XGBoost reduced") feature_imp = pd.DataFrame(reduced_model.feature_importances_.T, index=xx_df.columns, columns=["Permutation Feature Importance"]) feature_imp.sort_values(by="Permutation Feature Importance", ascending=True).plot(kind="barh") reduced_model.save_model("./models/reduced_xgb.json") experiment.log_model("reduced_xgb", "./models/") experiment.end()Test settest_df_reg = test_df.loc[test_df.game_type=="R"].copy() pre_test_df_reg = advanced_features(test_df_reg) pre_test_df_reg["SHOT_grouped"] = pre_test_df_reg.BLOCKED_SHOT + pre_test_df_reg.MISSED_SHOT + pre_test_df_reg.SHOT test_subset_df_reg = pre_test_df_reg.drop(columns=["period_idx", "PENALTY", "x_coord", "y_coord", "BLOCKED_SHOT", "MISSED_SHOT", "SHOT", "PERIOD_START", "empty_net"]) y_test_reg = np.where(test_df_reg.loc[test_df.game_type=="R"].copy().event_type_id=="GOAL", 1, 0) test_df_poff = test_df.loc[test_df.game_type=="P"].copy() pre_test_df_poff = advanced_features(test_df_poff) pre_test_df_poff[['GOAL', 'OTHER', 'PENALTY', 'PERIOD_START']] = 0 pre_test_df_poff["SHOT_grouped"] = pre_test_df_poff.BLOCKED_SHOT + pre_test_df_poff.MISSED_SHOT + pre_test_df_poff.SHOT test_subset_df_poff = pre_test_df_poff.drop(columns=["period_idx", "PENALTY", "x_coord", "y_coord", "BLOCKED_SHOT", "MISSED_SHOT", "SHOT", "PERIOD_START", "empty_net"]) y_test_poff = np.where(test_df_poff.loc[test_df_poff.game_type=="P"].copy().event_type_id=="GOAL", 1, 0) x_test_reg = test_subset_df_reg.values x_test_poff = test_subset_df_poff.values y_proba_test_reg = reduced_model.predict_proba(x_test_reg)[:, 1] #evaluation_plots(reduced_model, x_test_reg, y_test_reg, model_name="XGBoost reduced - Test") y_proba_test_poff = reduced_model.predict_proba(x_test_poff)[:, 1] #evaluation_plots(reduced_model, x_test_poff, y_test_poff, model_name="XGBoost reduced - Test") np.save("./reports/milestone2/y_proba_test_reg.npy", y_proba_test_reg) np.save("./reports/milestone2/y_proba_test_poff.npy", y_proba_test_poff)Finals plots preparing playoffs datapoff_results = pd.read_csv("./reports/milestone2/play_off_prediction_logistic.csv") poff_results2 = pd.read_csv("./reports/milestone2/play_off_prediction.csv") poff_results["pred_proba_best"] = poff_results2["pred_proba"] poff_results["Prediction_best"] = poff_results2["Prediction"] poff_results["pred_proba_xgboost_reduced"] = y_proba_test_poff poff_results = poff_results.rename(columns= {"pred_proba_dist_only": "LogReg: dist", "pred_proba_angle_only": "LogReg: angle", "pred_proba_dist_angle": "LogReg: dist & angle", "pred_proba_best": "LGBM", "pred_proba_xgboost_reduced": "XGBoost reduced", } ) poff_results = poff_results.drop(columns=['Prediction_dist_only', 'Prediction_angle_only', 'Prediction_dist_angle', 'actual', 'Prediction_best'])preparing regular datareg_results = pd.read_csv("./reports/milestone2/Regular_prediction_logistic.csv") reg_results2 = pd.read_csv("./reports/milestone2/Regualar_prediction.csv") reg_results["pred_proba_best"] = reg_results2["pred_proba"] reg_results["Prediction_best"] = reg_results2["Prediction"] reg_results["pred_proba_xgboost_reduced"] = y_proba_test_reg reg_results = reg_results.rename(columns= {"pred_proba_dist_only": "LogReg: dist", "pred_proba_angle_only": "LogReg: angle", "pred_proba_dist_angle": "LogReg: dist & angle", "pred_proba_best": "LGBM", "pred_proba_xgboost_reduced": "XGBoost reduced", } ) reg_results = reg_results.drop(columns=['Prediction_dist_only', 'Prediction_angle_only', 'Prediction_dist_angle', 'actual', 'Prediction_best'])Plotsfrom sklearn.metrics import RocCurveDisplay from sklearn.calibration import CalibrationDisplay def random_baseline(y_test): baseline = np.zeros(y_test.shape) baseline[:int(len(baseline)/2)] = 1 np.random.shuffle(baseline) return baseline def all_roc_auc(y_true, y_results): fig, axs = plt.subplots(1, 1) for i in range(y_results.shape[1]): RocCurveDisplay.from_predictions(y_true, y_results.iloc[:, i], ax=axs, name=y_results.columns[i]) baseline = random_baseline(y_true) baseline_kwargs = { "dashes": (1, 2, 1) } RocCurveDisplay.from_predictions(y_true, baseline, ax=axs, name="Random baseline", **baseline_kwargs) plt.title("Responder-Operator Curve (ROC) - Regular") plt.show() all_roc_auc(y_test_reg, reg_results) def all_goal_rate(y_true, y_results): valid = pd.DataFrame() valid['true_labels'] = np.array(y_true) total_goal = np.sum(y_true) shot_prob_percentile = np.arange(0, 100, 5) for j in range(y_results.shape[1]): y_proba = y_results.iloc[:, j] percentile = [[np.percentile(y_proba, i), np.percentile(y_proba, i+5)] for i in range(0,100,5)] goal_rate = [] cum=0 for i in range(0, len(percentile)): goals = valid[ (y_proba<=percentile[i][1])&(y_proba>percentile[i][0]) & (valid['true_labels']==1)].shape[0] no_goals = valid[(y_proba<=percentile[i][1])&(y_proba>percentile[i][0]) & (valid['true_labels']==0)].shape[0] if goals==0: goal_rate.append(0) else: goal_rate.append((goals*100)/(goals+no_goals)) plt.plot(shot_prob_percentile, goal_rate, label=y_results.columns[j]) ##Plot of goal rate vs Shot probability percentile plt.xlim(100, 0) plt.ylim(0, 100) plt.title("Goal Rate - Regular") plt.xlabel('Shot probability model percentile', fontsize=14) plt.ylabel('Goals / (Shots + Goals)', fontsize=14) plt.legend() plt.show() all_goal_rate(y_test_reg, reg_results) def all_calibration_curve(y_true, y_results): fig, axs = plt.subplots(1, 1) for i in range(y_results.shape[1]): CalibrationDisplay.from_predictions(y_true, y_results.iloc[:, i], ax=axs, name=y_results.columns[i]) plt.title("Calibration Curve - Regular") plt.show() all_calibration_curve(y_test_reg, reg_results) def all_cumulative_proportion(y_true, y_results): valid = pd.DataFrame() valid['true_labels'] = np.array(y_true) total_goal = np.sum(y_true) for j in range(y_results.shape[1]): y_proba = y_results.iloc[:, j] percentile = [[np.percentile(y_proba, i), np.percentile(y_proba, i+5)] for i in range(0,100,5)] cumulative = [] for i in range(0, len(percentile)-1): goals = valid[(y_proba>=percentile[i][0]) & (y_true==1)].shape[0] cumulative.append(goals*100/total_goal) cumulative.append(0) shot_prob_percentile = np.arange(0, 100, 5) plt.plot(shot_prob_percentile, cumulative, label=y_results.columns[j]) plt.xlim(100, 0) plt.ylim(0, 100) plt.title("Cumulative % of goals - Regular") plt.xlabel('Shot probability model percentile', fontsize=14) plt.ylabel('Proportion', fontsize=14) plt.legend() plt.show() all_cumulative_proportion(y_test_reg, reg_results)Basic image transform (TPS/affine)==================================In this example we demonstrate how to employ the utility functions from``symjax.tensor.interpolation.affine_transform`` and``symjax.tensor.interpolation.thin_plate_spline``to transform/interpolate imagesimport matplotlib.pyplot as plt import symjax import symjax.tensor as T import numpy as np x = T.Placeholder((10, 1, 28, 28), "float32") points = T.Placeholder((10, 2 * 16), "float32") thetas = T.Placeholder((10, 6), "float32") affine = T.interpolation.affine_transform(x, thetas) tps = T.interpolation.thin_plate_spline(x, points) f = symjax.function(x, thetas, outputs=affine) g = symjax.function(x, points, outputs=tps) data = symjax.data.mnist()["train_set/images"][:10] plt.figure(figsize=(20, 6)) plt.subplot(2, 8, 1) plt.imshow(data[0][0]) plt.title("original") plt.ylabel("TPS") plt.xticks([]) plt.yticks([]) plt.subplot(2, 8, 2) points = np.zeros((10, 2 * 16)) plt.imshow(g(data, points)[0][0]) plt.title("identity") plt.xticks([]) plt.yticks([]) plt.subplot(2, 8, 3) points = np.zeros((10, 2 * 16)) points[:, :16] += 0.3 plt.imshow(g(data, points)[0][0]) plt.title("x translation") plt.xticks([]) plt.yticks([]) plt.subplot(2, 8, 4) points = np.zeros((10, 2 * 16)) points[:, 16:] += 0.3 plt.imshow(g(data, points)[0][0]) plt.title("y translation") plt.xticks([]) plt.yticks([]) plt.subplot(2, 8, 5) points = np.random.randn(10, 2 * 16) * 0.2 plt.imshow(g(data, points)[0][0]) plt.title("random") plt.xticks([]) plt.yticks([]) plt.subplot(2, 8, 6) points = np.meshgrid(np.linspace(-1, 1, 4), np.linspace(-1, 1, 4)) points = np.concatenate([points[0].reshape(-1), points[1].reshape(-1)]) * 0.4 points = points[None] * np.ones((10, 1)) plt.imshow(g(data, points)[0][0]) plt.title("zoom") plt.xticks([]) plt.yticks([]) plt.subplot(2, 8, 7) points = np.meshgrid(np.linspace(-1, 1, 4), np.linspace(-1, 1, 4)) points = np.concatenate([points[0].reshape(-1), points[1].reshape(-1)]) * -0.2 points = points[None] * np.ones((10, 1)) plt.imshow(g(data, points)[0][0]) plt.title("zoom") plt.xticks([]) plt.yticks([]) plt.subplot(2, 8, 8) points = np.zeros((10, 2 * 16)) points[:, 1::2] -= 0.1 points[:, ::2] += 0.1 plt.imshow(g(data, points)[0][0]) plt.title("blob") plt.xticks([]) plt.yticks([]) plt.subplot(2, 8, 9) plt.imshow(data[0][0]) plt.title("original") plt.ylabel("Affine") plt.xticks([]) plt.yticks([]) plt.subplot(2, 8, 10) points = np.zeros((10, 6)) points[:, 0] = 1 points[:, 4] = 1 plt.imshow(f(data, points)[0][0]) plt.title("identity") plt.xticks([]) plt.yticks([]) plt.subplot(2, 8, 11) points = np.zeros((10, 6)) points[:, 0] = 1 points[:, 4] = 1 points[:, 2] = 0.2 plt.imshow(f(data, points)[0][0]) plt.title("x translation") plt.xticks([]) plt.yticks([]) plt.subplot(2, 8, 12) points = np.zeros((10, 6)) points[:, 0] = 1 points[:, 4] = 1 points[:, 5] = 0.2 plt.imshow(f(data, points)[0][0]) plt.title("y translation") plt.xticks([]) plt.yticks([]) plt.subplot(2, 8, 13) points = np.zeros((10, 6)) points[:, 0] = 1 points[:, 4] = 1 points[:, 1] = 0.4 plt.imshow(f(data, points)[0][0]) plt.title("skewness x") plt.xticks([]) plt.yticks([]) plt.subplot(2, 8, 14) points = np.zeros((10, 6)) points[:, 0] = 1.4 points[:, 4] = 1.4 plt.imshow(f(data, points)[0][0]) plt.title("zoom") plt.xticks([]) plt.yticks([]) plt.subplot(2, 8, 15) points = np.zeros((10, 6)) points[:, 0] = 1.4 points[:, 4] = 1.0 plt.imshow(f(data, points)[0][0]) plt.title("zoom x") plt.xticks([]) plt.yticks([]) plt.subplot(2, 8, 16) points = np.zeros((10, 6)) points[:, 0] = 1 points[:, 4] = 1 points[:, 3] = 0.4 plt.imshow(f(data, points)[0][0]) plt.title("skewness y") plt.xticks([]) plt.yticks([]) plt.tight_layout() plt.show()Regression Relationships- deterministic (such as Celius Fahrenheit) - these are exact- statistical (height & weight) - displays scatter or trend in graph Notation> Greek letters express variables in the population> $Y_i = \alpha + \beta(x_i - \bar{x}) + \epsilon_i$ > Latin characters express variables in the sample> $y_i = a + b(x_i - \bar{x}) + e_i$ > bar expresses the calculated mean> $\bar{x}$> hat expresses the expected value (best fit, i.e. regression line)> $\hat{x}$ Linear Regression Equation*line of best fit*$Y=a+bX$ where Y is the *dependent* (or response, outcome) variable, and X is the *independent* (or predictor/explanatory) variable.$\displaystyle a = \frac{ (\sum y)(\sum x^2) - (\sum x)(\sum xy) }{ n(\sum x^2) - (\sum x)^2}$$\displaystyle b = \frac{ n(\sum xy) - (\sum x)(\sum y) }{ n(\sum x^2) - (\sum x)^2}$ Derivation- let $x_i$ be the ith predictor value (x axis)- let $y_i$ be the ith observed value (y axis)- let $\hat{y}_i$ be the ith predicted value using a regression line (y value of the line of best fit) Find the least error*The error between observation and prediction needs to be a low as possible*- $e_i = y_i - \hat{y}_i$Squaring the sum of errors allows us to express the optimal value - the least squares criterion$\displaystyle\sum_{i=1}^{n} e_i^2$*This is better than using summing the absolute value, as squaring not only creates positive values, it is supported better with calculus**So, we need to find the best fitted line, a.k.a the values a and b, where $\hat{y}_i = mx_i + c$. We can express the sum of values in terms found in the aforementioned linear formula*$\displaystyle\begin{split}Q &= \sum_{i=1}^{n} e_i^2 \\ &= \sum_{i=1}^{n} (y_i - \hat{y}_i)^2 \\ &= \sum_{i=1}^{n} \big(y_i - (a + bx_i) \big)^2 \\ \end{split}$*Instead of finding 'a' the intercept, which isn't useful for many statistical analysis (i.e. height and weight won't ever become 0), we use the arithmetic mean of x and y as the origin.*$\hat{y}_i = a + b(x_i - \bar{x})$ Find the minimum interceptTo find the minimum with respects to a, the intercept, solve the derivative at 0$\displaystyle\begin{split}Q &= \sum_{i=1}^{n} \Big(y_i - \big(a + b(x_i - \bar{x}) \big) \Big)^2 \\ & \text{ using chain rule: } f = g \circ h (x) \implies f^\prime = g^\prime \circ h(x) \cdot h^\prime(x) \\& \text{ where } g(x) = \sum_{i=1}^n ( x_i^2 )\text{, and } h(x) = \Big(y_i - \big(a + b(x_i - \bar{x}) \big) \Big) \\\frac{dQ}{da} &= 2\sum_{i=1}^{n} \Big(y_i - \big(a + b(x_i - \bar{x}) \big) \Big) (-1) \equiv 0 \\&= - \sum_{i=1}^n y_i + \sum_{i=1}^n a + b \sum_{i=1}^n (x_i - \bar{x}) \equiv 0 \\& \text{as } \sum_{i=1}^n a = na \text{. and } \sum_{i=1}^n (x_i - \bar{x}) = 0 \\&= - \sum_{i=1}^n y_i + na \equiv 0 \\a &= \frac{1}{n} \sum\limits_{i=1}^n y_i \\ &= \bar{y} \\ & \text{ a is the mean of all y values at the minimum/best fit, when the mean of all x's is taken as the mid point} \\Q &= \sum_{i=1}^{n} \Big(y_i - \big( \bar{y} + b(x_i - \bar{x}) \big) \Big)^2 \\ \end{split}$ Find the minimum slopeTo find the minimum with respects to b, the slope, solve the derivative at 0$\displaystyle\begin{split}Q &= \sum_{i=1}^{n} \Big(y_i - \big( \bar{y} + b(x_i - \bar{x}) \big) \Big)^2 \\ & \text{using chain rule: } f = g \circ h (x) \implies f^\prime = g^\prime \circ h(x) \cdot h^\prime(x) \\& \text{where } g(x) = \sum_{i=1}^n ( x_i^2 )\text{, and } h(x) = \Big(y_i - \big( \bar{y} + b(x_i - \bar{x}) \big) \\\frac{dQ}{db} &= 2\sum_{i=1}^{n} \Big(y_i - \big( \bar{y} + b(x_i - \bar{x}) \big) \Big) \cdot - (x_i - \bar{x}) \equiv 0 \\ &= - \sum_{i=1}^{n} (y_i - \bar{y})(x_i - \bar{x}) + b \sum_{i=1}^{n} (x_i - \bar{x})(x_i - \bar{x}) \equiv 0 \\ & b \sum_{i=1}^{n} (x_i - \bar{x})^2 = \sum_{i=1}^{n} (y_i - \bar{y})(x_i - \bar{x}) \\ b &= \frac{ \sum_{i=1}^{n} (x_i - \bar{x})(y_i - \bar{y}) }{ \sum_{i=1}^{n} (x_i - \bar{x})^2 } \\\end{split}$# Regression Line SIZE = 1200 SAMPLE_SIZE = 120 # Generate random variables for our population M = 0.3 C = 3.0 x = np.random.uniform(size=SIZE) * 20 y = 10.0 + np.random.normal(scale=0.8, size=SIZE) y = y+(x*M-C) # get midpoint x_mean_pop = x.sum() / x.size y_mean_pop = y.sum() / y.size # Take a random sample for analysis sample = np.random.choice(range(SAMPLE_SIZE), 120) x_sample = x[sample] y_sample = y[sample] x_mean_sample = x_sample.sum() / x_sample.size y_mean_sample = y_sample.sum() / y_sample.size # Manual Way to get intercept and slope for our sample nom = np.fromfunction(lambda i: (x_sample[i] - x_mean_sample) * (y_sample[i] - y_mean_sample), shape = (SAMPLE_SIZE,), dtype=np.int ) denom = np.fromfunction(lambda i: (x_sample[i] - x_mean_sample) ** 2, shape = (SAMPLE_SIZE,), dtype=np.int ) slope_sample = nom.sum() / denom.sum() intercept_sample = y_mean - slope_sample*x_mean # The Numpy way for our population slope_pop, intercept_pop = np.polyfit(x, y, 1) # build ab line abline_x = np.linspace(0,20.0,20) abline_values_pop = [slope_pop * i + intercept_pop for i in abline_x] abline_values_sample = [slope_sample * i + intercept_sample for i in abline_x] plt.figure(figsize=(14,6)) plt.margins(0,0) plt.title('') plt.plot(x_mean_pop, y_mean_pop, color='indigo', lw=1, alpha=0.5, marker='o', label='Arithmetic mean of population x={:0.2f}, y={:0.2f}'.format(x_mean_pop, y_mean_pop)) plt.scatter(x, y, color='slateblue', s=1, lw=1, alpha=0.3, label='Distribution of population') plt.plot(abline_x, abline_values_pop, color='rebeccapurple', lw=1, alpha=1.0, dashes =(6,6), label=r'Regression Line (using np.polyfit) of population $\mu_Y = E(Y) = \alpha + \beta(x-\bar{x})$') plt.plot(x_mean_sample, y_mean_sample, color='maroon', lw=1, alpha=0.5, marker='o', label='Arithmetic mean of sample x={:0.2f}, y={:0.2f}'.format(x_mean_sample, y_mean_sample)) plt.scatter(x_sample, y_sample, color='red', s=1, lw=1, alpha=0.7, label='Distribution of sample') plt.plot(abline_x, abline_values_sample, color='orangered', lw=1, alpha=1.0, dashes =(6,6), label=r'Manual Regression Line of sample $\hat{y} = a + b(x-\bar{x})$') plt.xlim((0,20.0)) plt.ylim((0,15.0)) plt.legend() plt.show()Training and Validation Loss - After training, let's plot our training ACC and validation ACC using pandas, which, in turn, uses matplotlib for plotting (PS: you may want to check out [more advanced logger](https://pytorch-lightning.readthedocs.io/en/latest/extensions/logging.html) later on, which take care of it for us):import matplotlib.pyplot as plt import pandas as pd metrics = pd.read_csv("../my-results/my-model/version_3/metrics.csv") aggreg_metrics = [] agg_col = "epoch" for i, dfg in metrics.groupby(agg_col): agg = dict(dfg.mean()) agg[agg_col] = i aggreg_metrics.append(agg) df_metrics = pd.DataFrame(aggreg_metrics) df_metrics[["train_loss", "valid_loss"]].plot( grid=True, legend=True, xlabel="Epoch", ylabel="Loss" ) plt.show()Training and Validation Accuracy - This only applies if the `main.py` code was run with `--log_accuracy true`df_metrics = pd.DataFrame(aggreg_metrics) df_metrics[["train_acc", "valid_acc"]].plot( grid=True, legend=True, xlabel="Epoch", ylabel="ACC" ) plt.show()Load Final Model & Check Test Accuracy - The checkpoint path of the best model (based on validation set performance) should be shown in the scripts output, we copy it below:path = "../my-results/my-model/version_3/checkpoints/epoch=9-step=870.ckpt" import pytorch_lightning as pl import torch from my_classifier_template.model import LightningClassifier pytorch_model = torch.hub.load( "pytorch/vision:v0.11.0", "mobilenet_v3_large", pretrained=False ) pytorch_model.classifier[-1] = torch.nn.Linear( in_features=1280, out_features=10 # as in original ) # number of class labels in Cifar-10) lightning_model = LightningClassifier.load_from_checkpoint(path, model=pytorch_model) lightning_model.eval(); from pytorch_lightning import Trainer from torchvision import transforms from my_classifier_template.dataset import Cifar10DataModule custom_test_transform = transforms.Compose( [ transforms.Resize((256, 256)), transforms.CenterCrop((224, 224)), transforms.ToTensor(), ] ) data_module = Cifar10DataModule( batch_size=128, data_path="../data", num_workers=4, test_transform=custom_test_transform, ) data_module.prepare_data() data_module.setup() trainer = Trainer() trainer.test(model=lightning_model, dataloaders=data_module.test_dataloader())Files already downloaded and verifiedInspecting Failure Cases - In practice, it is often informative to look at failure cases like wrong predictions for particular training instances as it can give us some insights into the model behavior and dataset.- Inspecting failure cases can sometimes reveal interesting patterns and even highlight dataset and labeling issues.class_dict = { 0: "airplane", 1: "automobile", 2: "bird", 3: "cat", 4: "deer", 5: "dog", 6: "frog", 7: "horse", 8: "ship", 9: "truck", } from my_classifier_template.plotting import show_failures fig, axes = show_failures( model=lightning_model, data_loader=data_module.test_dataloader(), class_dict=class_dict, ) plt.tight_layout() plt.show()- In addition to inspecting failure cases visually, it is also informative to look at which classes the model confuses the most via a confusion matrix:from mlxtend.plotting import plot_confusion_matrix from torchmetrics import ConfusionMatrix cmat = ConfusionMatrix(num_classes=len(class_dict)) for x, y in data_module.test_dataloader(): with torch.no_grad(): pred = lightning_model(x) cmat(pred, y) cmat_tensor = cmat.compute() cmat = cmat_tensor.numpy() fig, ax = plot_confusion_matrix( conf_mat=cmat, class_names=class_dict.values(), ) plt.xticks(rotation=45, ha="right", rotation_mode="anchor") plt.show()Optional Save Pure PyTorch Model for Productionx, y = iter(data_module.test_dataloader()).next() lightning_model.to_onnx( file_path="model-for-production.onnx", input_sample=x, export_params=True )**Data Warehousing & Data Mining*** Introduction to Ra=3 a a=4; a class(a) a_string = "a random string" a_string a.string = 'another random string' a.string a_string class(a_string) c(1,2,3,5,9) sum(c(1,2,3,5,9)) a= c(5,6,7,8,9,10) b <- 25:30 b a+b c = 1:6 a+c 2:3 1:3 my.list = list(name="Gordon",age=500) my.list my.list[[1]] my.list[0] matrix(1:12,3,4) as.data.frame(matrix(1:12,ncol=4,nrow=3)) a a==5 g=4 if(g==4){ print("water is good") } if(g==5){ print("water is good") }else{ print("definitely not") } g=3 repeat { print(g) g = g-1 if(g==0){ break } } for( i in 1:3){ print(i) } add = function(a,b){ print(a+b) } result = add(b=4,a=5) resultNew to Plotly?Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by dowloading the client and [reading the primer](https://plot.ly/python/getting-started/).You can set up Plotly to work in [online](https://plot.ly/python/getting-started/initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/start-plotting-online).We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started! ImportsThe tutorial below imports [NumPy](http://www.numpy.org/), [Pandas](https://plot.ly/pandas/intro-to-pandas-tutorial/), and [SciPy](https://www.scipy.org/).import plotly.plotly as py import plotly.graph_objs as go from plotly.tools import FigureFactory as FF import numpy as np import pandas as pd import scipyAverage of 2 Curves Given two curves defined by functions $f$ and $g$ on $\mathbb{R} \rightarrow \mathbb{R}$, **the average curve** $h$ of $f$ and $g$ is defined by $h = \frac{f(x) + g(x)}{2} $ for $x \in \mathbb{R}$.x = np.linspace(0, 2*np.pi, 100) f = np.sin(x) g = np.cos(x) h = [(f[j] + g[j])/2 for j in range(len(x))] trace1 = go.Scatter( x=x, y=f, mode='lines', name='f(x)', marker=dict( color='rgb(220, 20, 60)' ) ) trace2 = go.Scatter( x=x, y=g, mode='lines', name='g(x)', marker=dict( color='rgb(100, 149, 237)' ) ) trace3 = go.Scatter( x=x, y=h, mode='markers+lines', name='Average of f and g', marker=dict( color='rgb(128, 0, 128)', symbol='diamond-open', ) ) data = [trace1, trace2, trace3] py.iplot(data, filename='2-curves') from IPython.display import display, HTML display(HTML('')) display(HTML('')) ! pip install git+https://github.com/plotly/publisher.git --upgrade import publisher publisher.publish( 'python_Average_Multiple_Curves.ipynb', 'python/average_multiple_curves/', 'Average Multiple Curves | plotly', 'Learn how to average the values of multiple curves with Python.', title='Average Multiple Curves in Python | plotly', name='Average Multiple Curves', language='python', page_type='example_index', has_thumbnail='false', display_as='mathematics', order=9, ipynb= '~notebook_demo/107')Tweets on 2016 US Presidential Election: Exploring Relational Data and Visualization Author: Click [here](http://www.hexingren.com/practical-data-science) to go back.import csv import sqlite3 import pandas as pd import math # Use svg backend for better quality import matplotlib # AUTOLAB_IGNORE_START matplotlib.use("svg") # AUTOLAB_IGNORE_STOP import matplotlib.pyplot as plt # AUTOLAB_IGNORE_START %matplotlib inline plt.style.use('ggplot') matplotlib.rcParams['figure.figsize'] = (10.0, 5.0) # adjust this to fit your screen # AUTOLAB_IGNORE_STOPIn this problem, we will be analyzing the Twitter data we extracted using [this](https://dev.twitter.com/overview/api) api. The data consists of Twitter users (with unique handles) and their attributes (e.g., number of followers), some recent tweets posted by them with attributes (e.g., time stamp, number of retweets), and the follow relationship between the users. These are available in the three CSV files:- users.csv - users, user attributes- edges.csv - follow edges (directed, an edge from A to B means A follows B or B is a friend of A)- tweets.csv - tweets posted by the users along with its attributes Q1. Relational DataThis question will guide us through loading Twitter data into an in-memory SQLite database and running some basic queries on it. Q1. Task A: Load Twitter data into SQLite databaseThe task is to use the csv and sqlite3 python packages to load the three csv files as relations (or tables) into an SQLite in-memory database.Loading the data from csv file into the database involves the following steps:1. Identify the schema of the table (for this problem, TEXT and INTEGER attribute types)2. Create a table with the identified schema3. Load the contents of csv in memory4. Insert every row of csv file as a record in the tableWe can refer to [sqlite3 documentation](https://docs.python.org/2/library/sqlite3.html) for steps 2 and 4. For step 3, refer to the [csv documentation](https://docs.python.org/2/library/csv.html). Be sure to name the tables `users`, `edges`, and `tweets`. Make sure to commit (the equivalent of Ctrl+S for databases) any changes made to the database. [This](https://www.techopedia.com/definition/16/commit) page should give us an idea about why commit is essential.def load_twitter_data_sqlite3(conn, users_filepath, edges_filepath, tweets_filepath) : """ Load twitter data in the three files as tables into an in-memory SQLite database Input: conn (sqlite3.Connection) : Connection object corresponding to the database; used to perform SQL commands. users_filepath (str) : absolute/relative path to users.csv file edges_filepath (str) : absolute/relative path to edges.csv file tweets_filepath (str) : absolute/relative path to tweets.csv file Output: None """ c = conn.cursor() c.execute('''CREATE TABLE users (name text, screen_name text, location text, created_at text, friends_count integer, followers_count integer, statuses_count integer, favourites_count integer)''') c.execute('''CREATE TABLE edges (screen_name text, friend text)''') c.execute('''CREATE TABLE tweets (screen_name text, created_at text, retweet_count integer, favorite_count integer, text text)''') list_users = [ ] list_edges = [ ] list_tweets = [ ] with open(users_filepath, 'rb') as users: reader_users = csv.reader(users) for row_users in reader_users: list_users.append(tuple(row_users)) list_users.pop(0) #print len(list_users) c.executemany("INSERT INTO users VALUES (?,?,?,?,?,?,?,?)", list_users) with open(edges_filepath, 'rb') as edges: reader_edges = csv.reader(edges) for row_edges in reader_edges: list_edges.append(tuple(row_edges)) list_edges.pop(0) #print len(list_edges) c.executemany("INSERT INTO edges VALUES (?,?)", list_edges) with open(tweets_filepath, 'rb') as tweets: reader_tweets = csv.reader(tweets) for row_tweets in reader_tweets: list_tweets.append(tuple(row_tweets)) list_tweets.pop(0) #print len(list_tweets) c.executemany("INSERT INTO tweets VALUES (?,?,?,?,?)", list_tweets) conn.commit()The function will be called as in the cell below. The cell also contains some test code to display all tables in the database. Tests for the individual tables are added to verify that the data has been loaded properly. (e.g., number of tuples in each table)# AUTOLAB_IGNORE_START # connect to an in memory database conn = sqlite3.connect(":memory:") conn.text_factory = str # call to your function load_twitter_data_sqlite3(conn, 'users.csv', 'edges.csv', 'tweets.csv') # make sure to change the path to csv files appropriately cursor = conn.cursor() # prints all tables in the database for row in cursor.execute("SELECT name FROM sqlite_master WHERE type = 'table';"): print row # AUTOLAB_IGNORE_STOP('users',) ('edges',) ('tweets',)Q1. Task B: Trending tweets in a topicTwitter is regarded as an invaluable source of valuable information. Hence, one of the favorite tasks of data miners is the analyse the trending tweets in a given topic.This task requires us to retrieve the top N most trending tweets (in descending order of trending_score) about a given topic (which is a list of keywords). The following information may be useful:- A tweet is said to be about a given topic if it contains any of the given topical phrases/keywords.- We will use the following simple trending_score: retweet_count + favorite_count. Tweets with higher trending_score must be ranked before the ones with lower trending_score.- Your result must contain unique tweets. If a tweet text occurs multiple times, display it only once with its highest trending_score.- Break ties by sorting the tweets in alphabetical order.The output schema should be as follows:|tweet (TEXT)| trending_score (INTEGER) || :--- |:--- || | |def trending_tweets(cursor, topical_phrases=['Hillary', 'Clinton'], N=5): """ Retrieves the top N trending tweets containing one or more of the given topical phrases. Input: cursor (sqlite3.Cursor): Cursor object to query the database. topical_phrases (list of strings): A list of keywords identifying a topic. N: Number of trending tweets to retrieve Output: results (sqlite3.Cursor): Cursor object which can be used to iterate over the retrieved records/tuples. """ query = '''SELECT text, max(retweet_count + favorite_count) AS trending_score FROM tweets WHERE text LIKE ? ''' for i in range(len(topical_phrases)-1): query = query + "OR ? " query = query + '''GROUP BY text ORDER BY (retweet_count + favorite_count) DESC, text ASC LIMIT ?''' for i in range(len(topical_phrases)): topical_phrases[i] = '%' + str(topical_phrases[i]) + '%' topical_phrases.append(N) results = cursor.execute(query, topical_phrases) return resultsThe default inputs to the function will retrieve 5 trending tweets about topic Hillary Clinton. We can view the output of the query using the following code.# AUTOLAB_IGNORE_START results = trending_tweets(conn.cursor()) #results = trending_tweets(conn.cursor(), ['Donald', 'Trump'], 7) for row in results: print row # AUTOLAB_IGNORE_STOP("Hillary just gave a disastrous news conference on the tarmac to make up for poor performance last night. She's being decimated by the media!", 37903) ('"A rough night for " ABC News.', 34418) (' answered email questions differently last night than she has in the past. She is totally confused. Unfit to serve as #POTUS.', 32843) ("' leads by 19 points among military, veteran voters: poll' #AmericaFirst #MAGA\nhttps://t.co/5FmxGtLkwt", 29538) ("It wasn't that hurt Hillary last night. It was her very dumb answer about emails & the veteran who said she should be in jail.", 24414)Q1. Task C: Tweet recommendationHow does Twitter go about populating the feed for a user? While Twitter may use a comple models to do this, in this task, we will use a Simple Tweet Recommender (STR), which recommends a user's tweets to all users who follow him/her (without checking for possible duplicates; i.e., STR may recommend the same tweet twice if two of a user's friends have posted it).In this task, we will write a query to determine the number of tweets recommended to each user. Use only the snapshot of edges and tweets provided to do the recommendation. Report the results on the users present in the users table. (Hint: The number of records in the output should match that in the "users" table.) The order of results does not matter.The output schema should be:|screen_name (TEXT)| num_tweets (INTEGER) || :--- |:--- || | | |def num_tweets_in_feed(cursor): """ Retrieves the number of tweets STR recommends to each Twitter user. Input: cursor (sqlite3.Cursor): Cursor object to query the database. Output: results (sqlite3.Cursor): Cursor object which can be used to iterate over the retrieved records/tuples. """ query = ''' WITH tweetsNum (screen_name, tweet_num) AS ( SELECT screen_name, COUNT(*) FROM tweets GROUP BY screen_name), partialUserTweets (screen_name, rmd_cnt) AS ( SELECT edges.screen_name, SUM(tweetsNum.tweet_num) FROM edges INNER JOIN tweetsNum ON edges.friend = tweetsNum.screen_name GROUP BY edges.screen_name) SELECT users.screen_name, IFNULL(partialUserTweets.rmd_cnt, 0) AS num_tweets FROM users LEFT OUTER JOIN partialUserTweets ON users.screen_name = partialUserTweets.screen_name ORDER BY num_tweets DESC; ''' # your query here return cursor.execute(query) # AUTOLAB_IGNORE_START results = num_tweets_in_feed(conn.cursor()) # for row in results: # print row # AUTOLAB_IGNORE_STOPQ2. VisualizationIn this question, we will load all data into pandas dataframes and analyse (and visualize!) some interesting trends using [matplotlib](http://matplotlib.org) python package. Q2. Task A: Load Twitter data using pandasFill in the following method stub and return the data frames for users, edges and tweets.Pandas will treat missing values as NaNs by default. However, for this task, we treat missing values (i.e., empty strings in the csv files) as empty strings.import numpy as np def load_twitter_data_pandas(users_filepath, edges_filepath, tweets_filepath): """ Loads the Twitter data from the csv files into Pandas dataframes Input: users_filepath (str) : absolute/relative path to users.csv file edges_filepath (str) : absolute/relative path to edges.csv file tweets_filepath (str) : absolute/relative path to tweets.csv file Output: (pd.DataFrame, pd.DataFrame, pd.DataFrame) : A tuple of three dataframes, the first one for users, the second for edges and the third for tweets. """ df_users = pd.read_csv(users_filepath) dfu = df_users.replace(np.nan, '', regex=True) df_edges = pd.read_csv(edges_filepath) dfe = df_edges.replace(np.nan, '', regex=True) df_tweets = pd.read_csv(tweets_filepath) dft = df_tweets.replace(np.nan, '', regex=True) return (dfu, dfe, dft)We can test the function using the following code.# AUTOLAB_IGNORE_START (users_df, edges_df, tweets_df) = load_twitter_data_pandas('users.csv', 'edges.csv', 'tweets.csv') # make sure to change the path to csv files appropriately print users_df.head() print edges_df.head() print tweets_df.head() # AUTOLAB_IGNORE_STOPname screen_name location \ 0 realDonaldTrump New York, NY 1 Trump Organization Trump New York, NY 2 TrumpGolf 3 Tiffany Trump TiffanyATrump 4 IngrahamAngle DC created_at friends_count followers_count \ 0 Wed Mar 18 13:46:38 +0000 2009 42 11397769 1 Wed Apr 13 16:51:54 +0000 2016 35 9954 2 Mon Feb 03 13:46:03 +0000 2014 200 8797 3 Tue Feb 01 20:59:30 +0000 2011 79 63138 4 Thu Jun 25 21:03:25 +0000 2009 289 851876 statuses_count favourites_count 0 33136 38 1 43 125 2 758 251 3 573 28 4 26523 71 screen_name [...]Q2. Task B: CorrelationStatisticians and data analysts usually like to study about correlation between different observed variables. This helps uncover interesting patterns in the data such as causal relationships (e.g., snow on the road leads to increase in number of accidents). Correlation studies are important for multiple reasons:- While [correlation does not imply causation](https://en.wikipedia.org/wiki/Correlation_does_not_imply_causation), a lack of correlation implies a lack of causation. This can be used to rule out many causal relationships.- Correlation helps with prediction. The more closely related two variables are, the easier it is to predict one from the other.In this task, we will plot the friends_count (on y-axis) vs the followers_count (on x-axis) using the matplotlib package. [Here](http://matplotlib.org/examples/shapes_and_collections/scatter_demo.html) is an example to get started with scatter plots.import numpy as np def plot_friends_vs_followers(users_df): """ Plots the friends_count (on y-axis) against the followers_count (on x-axis). Input: users_df (pd.DataFrame) : Dataframe containing Twitter user attributes, as returned by load_twitter_data_pandas() Output: (matplotlib.collections.PathCollection) : The object returned by the scatter plot function """ followers_cnt = users_df.loc[:, 'followers_count'].tolist() friends_cnt = users_df.loc[:, 'friends_count'].tolist() ff = plt.scatter(followers_cnt, friends_cnt) print type(ff) plt.xlabel('Count of Followers') plt.ylabel('Count of Friends') return ff # AUTOLAB_IGNORE_START p = plot_friends_vs_followers(users_df) plt.show() # AUTOLAB_IGNORE_STOPDo you see a correlation between these two variables from your scatter plot? Let's measure this quantitatively using the [Pearson's correlation coefficient](https://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient). For a set of observations $(X,Y) = [(x_1,y_1), (x_2,y_2), ... , (x_n,y_n)]$, the Pearson's correlation coefficient is a measure of the linear dependence between two variables $X$ and $Y$, giving a value between +1 and −1 inclusive, where 1 is total positive correlation, 0 is no correlation, and −1 is total negative correlation.$r=r_{xy}={\frac {n\sum x_{i}y_{i}-\sum x_{i}\sum y_{i}}{{\sqrt {n\sum x_{i}^{2}-(\sum x_{i})^{2}}}~{\sqrt {n\sum y_{i}^{2}-(\sum y_{i})^{2}}}}}$Now, fill in the following function to compute the Pearson's correlation coefficient between friends_count and followers_count.def correlation_coefficient(users_df): """ Plots the friends_count (on y-axis) against the followers_count (on x-axis). Input: users_df (pd.DataFrame) : Dataframe containing Twitter user attributes, as returned by load_twitter_data_pandas() Output: (double) : correlation coefficient between friends_count and followers_count """ n = len(users_df.index) xy_sum = 0 x_sum = 0 y_sum = 0 xx_sum = 0 yy_sum = 0 for i in range(n): xi = users_df.loc[i, 'followers_count'] yi = users_df.loc[i, 'friends_count'] xy_sum = xy_sum + xi * yi x_sum = x_sum + xi y_sum = y_sum + yi xx_sum = xx_sum + (xi ** 2) yy_sum = yy_sum + (yi ** 2) x_sum = float(x_sum) y_sum = float(y_sum) xx_sum = float(xx_sum) yy_sum = float(yy_sum) xy_sum = float(xy_sum) r = (n * xy_sum - x_sum * y_sum) / (math.sqrt(n * xx_sum - x_sum ** 2) * math.sqrt(n * yy_sum - y_sum ** 2)) return r # AUTOLAB_IGNORE_START print correlation_coefficient(users_df) # AUTOLAB_IGNORE_STOPQ2. Task C: Degree distributionIn graph theory, the degree of a node is the number of connections it has to other nodes. A common statistic to look out for in the case of real world graphs is the degree distribution. Literature says degrees of nodes in real world graphs follow a [power law distribution](https://en.wikipedia.org/wiki/Power_law). The implication is that a scatter plot of num_users versus k (as we will define below) yields an almost straight line. In this task, we shall verify whether the given crawl of Twitter network satisfies this property.Let us call the number of friends a Twitter user has as his/her degree. The degree distribution is a histogram of the number of friends. Our task is to visualize this histogram. Use the default number of bins.def degree_distribution(edges_df): """ Plots the distribution of . Input: edges_df (pd.DataFrame) : Dataframe containing Twitter edges, as returned by load_twitter_data_pandas() Output: (array, array, list of Patch objects) : Tuple of the values of the histogram bins, the edges of the bins and the silent list of individual patches used to create the histogram. """ cnt_df = edges_df.groupby(['screen_name']).size() friends_num = cnt_df.tolist() (n, bins, patches) = plt.hist(friends_num) return (n, bins, patches) # AUTOLAB_IGNORE_START degree_distribution(edges_df) # AUTOLAB_IGNORE_STOPExample w/out unetim = imgs[0] im = im[ :, :, [2,1,0] ] # BGR -> RGB p = PaperCornerPredictor() print('trained shape:', p.train_shape) pts = p.predict_corners(tensor(im), shape='train') pts #export def points_on_mask(mask, o): # o - (8,) shape arr w/ points x,y coords mask = mask.copy() for i, c in zip(range(0,8,2), [(255,0,0), (0,255,0), (0,0,255), (255,255,0)]): mask = cv2.circle(mask, (o[i], o[i+1]), radius=15, color=c, thickness=50) return mask def show_points_on_mask(mask, o): # o - (8,) shape arr w/ points x,y coords return plot(points_on_mask(mask, o)) show_points_on_mask(im, pts) pts = p.predict_corners(tensor(im), shape=(200,200)) show_points_on_mask(im, pts)Example w/ unetim = imgs[0] im = im[ :, :, [2,1,0] ] # BGR -> RGB p = PaperCornerPredictor(use_unet=True, use_gpu=False) pts = p.predict_corners(tensor(im), shape='train') pts show_points_on_mask(im, pts) pts = p.predict_corners(tensor(im), shape=(200,200)) show_points_on_mask(im, pts)Timesfrom ocr.corner_detection_training import * corner_predictor_gpu = PaperCornerPredictor(model_name='unet_paper_mask', use_gpu=True, use_unet=True) corner_predictor_cpu = PaperCornerPredictor(model_name='unet_paper_mask', use_gpu=False, use_unet=True) %%timeit pts = corner_predictor_cpu.predict_corners(tensor(im), shape='train') %%timeit pts = corner_predictor_cpu.predict_corners(tensor(im), shape=(1080,1920)) %%timeit pts = corner_predictor_gpu.predict_corners(tensor(im), shape='train') %%timeit pts = corner_predictor_gpu.predict_corners(tensor(im), shape=(1080,1920))28.4 ms ± 423 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)Perspective transformplot(im[240:280 , 720:950]) transformed_im = p.transform_image(im, pts, out_shape=[1080,720]) %matplotlib notebook plot(transformed_im[70:120 , 200:600])print(10>9) print(10<9) print(10==9) a=10 b=9 print(a>b) print(aTrue False False**BOOL FUNCTION**print(bool(5)) print(bool("Maria")) print(bool(0)) print(bool(1)) print(bool(None)) print(bool([]))True True False True False False**FUNCTIONS CAN RETURN A BOOLEAN**def myFunction(): return False print(myFunction()) if myFunction(): print("YES!") else: print("NO!")NO!**APPLICATION 1**print(10>9) a=6 b=7 print(a==b) print(a!=a)True False False**PYTHON OPERATORS**print(10+9) print(10-9) print(10*2) print(10/2) print(10**2)19 1 20 5.0 100**PYTHON BITWISE OPERATORS**a = 60 a = 13 print(a&b) print(a|b)5 15**PYTHON ASSIGNMENT OPERATORS**a+=3 #The same as a = a + 3 print(a)19**LOGICAL OPEATORS**a = True b = False print(a and b) print(not(a or b))False False**IDENTITY OPERATORS**a is b a is not bVariables and Data Structures BIOINF 575 - Fall 2020 Interactive python apps commands  python – open a plain python console in current terminal   http://jupyter.org/ - The evolved form of iPython   ipython or jupyter-console – open an iPython console in the current terminal   jupyter-qtconsole – open an iPython session in a qt console (console uses qt GUI library, more functional window)   jupyter notebook – start a notebook session and open in in a web browser   Jupyter-lab – start a jupyter lab session, the future of the notebook   spyder – launch the Spyder IDE    https://www.spyder-ide.org/   anaconda-navigator – launch the Anaconda Navigator https://www.dataquest.io/blog/jupyter-notebook-tutorial/ A markup language is a system for annotating a document where the text does not read very well but the result makes the content easier to understand Example: using color, different font styles, different font size, indentation, itemizing, numberingThe idea started from the "marking up" of paper manuscripts.Markdown is a tool to convert text to HTML.Markdown is lightweight markup language with plain text formatting syntax. Its design allows it to be converted to many output formats, but the original tool by the same name only supports HTML.Markdown is often used to format readme files, for writing messages in online discussion forums, and to create rich text using a plain text editor.https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet The Zen of Python, by import thisThe Zen of Python, by Beautiful is better than ugly. Explicit is better than implicit. Simple is better than complex. Complex is better than complicated. Flat is better than nested. Sparse is better than dense. Readability counts. Special cases aren't special enough to break the rules. Although practicality beats purity. Errors should never pass silently. Unless explicitly silenced. In the face of ambiguity, refuse the temptation to guess. There should be one-- and preferably only one --obvious way to do it. Although that way may not be obvious at first unless you're Dutch. Now is better than never. Although never is often better than *right* now. If the implementation is hard to explain, it's a bad idea. If the implementation is easy to explain, it may be a good idea. Namespaces are one honking great idea -- let's do more of those!Variables Naming conventions* underscore separated - variable_name - preference* Camel case - VariableNameThere are many others:* https://en.wikipedia.org/wiki/Naming_convention_(programming)Use meaningful names, your future self will thank you! Assign a value to a variable. The assignment operator is: "="message = "Let's get started!"Check the value of a variablemessage print(message) a = 2 b = 5 print(a) print(b)2 5EXERCISE Assign the value 10 to a variable.Assign the value True to a variable.Assign the value 100 to the variable attendance_percentage.Assign the value of the variable attendance_percentage to the variable quiz_percentage.Multiple assignment: assign the value 20 to two variables at the same time. Then check the value of the two variables.dog_weight = 10 is_Tuesday = True attendance_percentage = 90 print(dog_weight) # Multiple assignment x = y = 20 print(x) print(y) print(z) x,y = 25,25 print(x) print(y) cat_weight = 10 print(cat_weight) is_Wednesday = False print(is_Wednesday) attendance_percentage = 100 print(attendance_percentage) quiz_percentage = attendance_percentage print(quiz_percentage) attendance_percentage = 90 print(attendance_percentage)10 False 100 100 90Remove the value from a variable using None.message message = None message print(message)NoneRemove a variable from the environment using the function del().del(message)Now let's try to see what value variable message hasWhat do we expect?messagepython complained! http://greenlining.org/wp-content/uploads/2013/10/COMPLAIN.jpg List the type of a variable with the function type().message = "Let's test the type." message type(message) message = 10 message type(message)Typing is dynamic but strict in python (objects have type).The principal built-in types are numerics, sequences, mappings, classes, instances and exceptions.Numeric Types — int (subtype: bool), float, complex.Sequence Types — string, list, tuple, range.There are two major categories: mutable and immutable types.Immutable variables cannot be changed, while Mutable variables can be changed.Integer, Floats, Strings, Tuples, Boolean are immutable.List are mutable types.https://docs.python.org/3/library/stdtypes.html List functions and attributes in strings using the dir(str) function.dir(str) type("test") "test".capitalize()Functions preceeded by \_\_ (e.g. \_\_lt\_\_ - less than) are special internal funtions used by python funtions and operators EXERCISE Test more of the string functions: upper(), strip(), ..."test".upper() " test text ".strip() "test".index("es")Get more information regarding strings using the help(str) function.help(str)Help on class str in module builtins: class str(object) | str(object='') -> str | str(bytes_or_buffer[, encoding[, errors]]) -> str | | Create a new string object from the given object. If encoding or | errors is specified, then the object must expose a data buffer | that will be decoded using the given encoding and error handler. | Otherwise, returns the result of object.__str__() (if defined) | or repr(object). | encoding defaults to sys.getdefaultencoding(). | errors defaults to 'strict'. | | Methods defined here: | | __add__(self, value, /) | Return self+value. | | __contains__(self, key, /) | Return key in self. | | __eq__(self, value, /) | Return self==value. | | __format__(self, format_spec, /) | Return a formatted version of the string as described by format_spec. | | __ge__(self, value, /) | Return self>=value. | | __getattribute__(self, name, /) | Return getattr(self, name). | | [...]NumericBasic math operations: int (natural numbers), float (decimal numbers), complexInteger, arbitrarily long, convert from floats or strings with int()Floats, convert from str or int types with float()Convert to string with str() No automatic coercion of types, explicit conversion onlyException: print() - converts to strings where possibledir(int)Computing the area of a rectangle.length = 6 width = 4 area = length * width area type(4.0)Float divisiontype(4/2) 4/2 type(4//2) 4//2 4/2Explicit conversionfloat(4) float("4.5") type("4.5") float("DNA sequence") int("4") repr(4) str(4) # remainder/modulo 5%2 # exponent 5**2 message = "test" number = 10 message * numberBooleanBoolean is a subtype of integerTrue, False values1 == True, 0 == Falseempty structure == Falsetype(True) dir(bool) cond_result = True cond_result cond_result1 = False cond_result1Logical operators not and ornot cond_result cond_result and cond_result1 cond_result or cond_result1Comparison operators strictly less than less than or equal\> strictly greater than>= greater than or equal== equal!= not equalis object identityis not negated object identity not cond_resultcond_result = 2>=3 cond_result int(True)EXERCISE Assign a numeric value to a variable.Write a composite condition to check if the variable is in the range 10 to 100.Write a composite condition to check if the variable is lower than -10 or higher than 10.Write a composite condition to check if the variable is lower than -10 or higher than 10 and if it can be divided by 5 or 7.class_size = 15 print(class_size > 10 and class_size < 100) print(class_size < -10 or class_size > 10) print(abs(class_size) > 10) print((class_size < -10 or class_size > 10) and (class_size%5 == 0 or class_size%7 == 0))Sequence types: String, List, and Tuple - are iterable StringSequence of characters - immutable Concatenation"Complex " + 'text'Simple, double or triple quotes:"Don't worry about apostrophes" 'She said: "Good day!"' test_message = """Trying the triple quotes""" test_message ''' I am testing the triple quotes - a comment ''' # this is a COMMENT test_message = '''Trying the simple triple quotes''' test_messageCommon Sequence operationsOperation Description:x in sTrue if an item of s is equal to x, else Falsex not in sFalse if an item of s is equal to x, else Trues + tthe concatenation of s and ts * n or n * sequivalent to adding s to itself n timess[i]ith item of s, origin 0s[i:j]slice of s from i to js[i:j:k]slice of s from i to j with step klen(s)length of smin(s)smallest item of smax(s)largest item of ss.index(x[, i[, j]])index of the first occurrence of x in s (at or after index i and before index j)s.count(x)total number of occurrences of x in s"ll" in "collection" "test this, string." in "Collection" "rep"*4 "collection"[0] "collection"[1:3] "collection"[1:3] "testing the subsetting"[3:16:3] "testing the subsetting"[3:1000] test_string = "testing the subsetting" test_string[:len(test_string)] len(test_string)Subsetting s[i:j:k]If i or j is negative, the index is relative to the end of sequence s: len(s) + i or len(s) + j is substituted. But note that -0 is still 0.The slice of s from i to j is defined as the sequence of items with index k such that i If i or j is greater than len(s), use len(s). If i is omitted or None, use 0. If j is omitted or None, use len(s). If i is greater than or equal to j, the slice is empty.The slice of s from i to j with step k is defined as the sequence of items with index x = i + n\*k such that 0 In other words, the indices are i, i+k, i+2\*k, i+3\*k and so on, stopping when j is reached (but never including j). When k is positive, i and j are reduced to len(s) if they are greater. When k is negative, i and j are reduced to len(s) - 1 if they are greater. If i or j are omitted or None, they become “end” values (which end depends on the sign of k). Note, k cannot be zero. If k is None, it is treated like 1.https://docs.python.org/3/library/stdtypes.htmllen("collection") "collection".index("ll") min("collection") "A" < "a" ord("a") ord("A") max("collecTion") "collection".count("l") cooper = 20 Cooper s = "collection" s s.index("e")Exercise Create the sequence "blablabla" by using the operator *. Find the position of the second o in "collection".Retrieve the word "other" from "immunotherapy" using subsetting."bla"*3 s = "another test" subs = "t" s.index(subs, s.index(subs)+1)String formatting - much more in a future session.Python uses C-style string formatting to create new, formatted strings. The "%" operator is used to format a set of variables enclosed in a "tuple" (a fixed size list), together with a format string, which contains normal text together with "argument specifiers", special symbols like "%s" and "%d".%s - String (or any object with a string representation, like numbers) %d - Integers%f - Floating point numbers%.<n>f - Floating point numbers with a fixed (n) amount of digits to the right of the dot.name = "John" print("Hello, %s!" % name)Range - an immutable sequence of numbers It is commonly used for looping a specific number of times. range(stop)range(start, stop[, step])dir(range) list(range(10)) range(10) list(range(5, 31, 5))List - a collection of elements, allows duplicates, is orderred, and is mutable (changeable) A list may be constructed in several ways:Using a pair of square brackets to denote the empty list: []Using square brackets, separating items with commas: [a], [a, b, c]Using a list comprehension: [x for x in iterable]Using the type constructor: list() or list(iterable)dir(list) patient_BMIs = [25, 18, 30, 16, 22, 32, 28] patient_BMIsSize/length of a listlen(patient_BMIs)Save the list in a different variablepatient_BMIs_followup = patient_BMIs patient_BMIs_followupRetrieve elements from a list subsetting/slicingpatient_BMIs_followup[2:4]Negative indexing - retrieving elements from the end of the listpatient_BMIs[-4:-1]Change a value in a listpatient_BMIs_followup[1] = 20 patient_BMIs_followup patient_BMIspatient_BMIs_followup and patient_BMIs are references to the same objectWhen the referred object changes the values display for both referring variables change. To make a copy of a list: copy_list = initial_list[:] Similarly we can do copy_list = list(initial_list)patient_BMIs_followup = patient_BMIs[:] # patient_BMIs_followup = list(patient_BMIs) patient_BMIs_followup[2] = 27 patient_BMIs_followup patient_BMIsTo copy a list through subsetting works ... until it doesn't.When the list contains sublists.The copy module has a deepcopy funtion. Add elements to a listpatient_BMIs.append(26) patient_BMIs patient_BMIs.sort() patient_BMIs patient_BMIs.append("test") patient_BMIs patient_BMIs.append([35,"text"]) patient_BMIs patient_BMIs.extend([22,26,33]) patient_BMIs """insert an element at a certain position insert(position, element)""" patient_BMIs.insert(3,29) patient_BMIsChange list elementspatient_BMIs[3:5] = [25,25] patient_BMIs patient_BMIs[3:6] = [20,20,30,31] patient_BMIs patient_BMIs.sort() patient_BMIs patient_BMIs.index(20) patient_BMIs.count(20) 20 in patient_BMIsRemove list elements remove() removes the first matching value, not a specific index.patient_BMIs.remove(22) patient_BMIsdel removes the item at a specific index.del patient_BMIs[4] patient_BMIspop() removes the item at a specific index and returns it.patient_BMIs.pop(4) patient_BMIsclear() removes all the elements of a listpatient_BMIs.clear() patient_BMIsString join()",".join(["Make","a","sentence."])String split()"Get the, words".split(",")Data Science and Machine Learning Capstone Project Question 1. Which type of complaint should the Department of Housing Preservation and Development of New York City focus on first? Table of Contents1. [Data preparation](1)2. [Visualizing and get some insight from data](2)3. [Conclusion](3) ***Data preparation*** First, lets import the necessary libraries and data to answer this.import pandas as pd import numpy as np %matplotlib inline import matplotlib as mpl import matplotlib.pyplot as pltImport 311 NYC datasetcolumns_used = ['borough', 'city', 'complaint_type', 'created_date', 'incident_address', 'incident_zip', 'latitude', 'location_type', 'longitude','street_name', 'unique_key'] df_311= pd.read_csv('fhrw-4uyv.csv', parse_dates = ['created_date'], usecols = columns_used) df_311.head()Preliminary checking for columns type in dataframe.df_311.dtypesAll type of features seems all right. Next, let's check the missing valuesmissing_data_311= df_311.isnull() missing_data_311.head() for column in missing_data_311.columns.values.tolist(): print(column) #Menghitung jumlah masing-masing nilai unik pada kolom data print (missing_data_311[column].value_counts()) print("")borough False 5619790 Name: borough, dtype: int64 city False 5535050 True 84740 Name: city, dtype: int64 complaint_type False 5619790 Name: complaint_type, dtype: int64 created_date False 5619790 Name: created_date, dtype: int64 incident_address False 5566873 True 52917 Name: incident_address, dtype: int64 incident_zip False 5534704 True 85086 Name: incident_zip, dtype: int64 latitude False 5534730 True 85060 Name: latitude, dtype: int64 location_type False 5566874 True 52916 Name: location_type, dtype: int64 longitude False 5534730 True 85060 Name: longitude, dtype: int64 street_name False 5566873 True 52917 Name: street_name, dtype: int64 unique_key False 5619790 Name: unique_key, dtype: int64There is no missing values for **complaint_type** feature, so let's keep all row in this dataframe as it is for now. Next, Let's check the number of each value of **complaint_type** column.df_311['complaint_type'].value_counts()***Visualizing and get some insight from data*** Create barh plot to visualize the proportion of the complaints.# Set colors colors = ['C7', 'C7', 'C7', 'C7', 'C7', 'C7', 'C7', 'C7', 'C7', 'C7', 'C7', 'C7', 'C7', 'C7', 'C7', 'C7', 'C7', 'C7', 'C7', 'C7', 'C7', 'C3'] #Set the series df_311_complaint_type = df_311['complaint_type'].value_counts() df_311_complaint_type.sort_values(ascending = True, inplace =True) Total_complaint = df_311_complaint_type.sum() # Create the barh plot df_311_complaint_type.plot(kind = 'barh', figsize = (12,12), color = colors) # Set text label to show the percentage of each complaint for index, value in enumerate(df_311_complaint_type): label = '{}%'.format(round((value/Total_complaint)*100, 2)) plt.annotate(label, xy=(value + 10000, index-0.1), color='blue') plt.show()from google.colab import drive drive.mount('/content/drive') # Ignore the warnings import warnings warnings.filterwarnings('ignore') # System related and data input controls import os # Data manipulation, visualization and useful functions import pandas as pd import numpy as np from itertools import product # iterative combinations from tqdm import tqdm import matplotlib.pyplot as plt import seaborn as sns # Modeling algorithms # General(Statistics/Econometrics) from sklearn import preprocessing import statsmodels.api as sm import statsmodels.tsa.api as smt import statsmodels.formula.api as smf from statsmodels.stats.outliers_influence import variance_inflation_factor from scipy import stats # Regression from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet from sklearn.kernel_ridge import KernelRidge from sklearn.neighbors import KNeighborsRegressor from sklearn.svm import SVR from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor, BaggingRegressor, GradientBoostingRegressor, AdaBoostRegressor from xgboost import XGBRegressor from lightgbm import LGBMRegressor # Classification from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import LinearSVC, SVC from sklearn.tree import DecisionTreeClassifier from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier # Model selection from sklearn.model_selection import train_test_split,cross_validate from sklearn.model_selection import KFold from sklearn.model_selection import GridSearchCV # Evaluation metrics # for regression from sklearn.metrics import mean_squared_log_error, mean_squared_error, r2_score, mean_absolute_error # for classification from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score import pandas as pd from keras.models import Sequential, Model, load_model from keras.layers import Input, Dense, Activation, Flatten, Dropout from keras.layers import SimpleRNN, LSTM, GRU item_categories = pd.read_csv('/content/drive/MyDrive/data/item_categories.csv') items = pd.read_csv('/content/drive/MyDrive/data/items.csv') #sales_train = pd.read_csv('/content/drive/MyDrive/data/sales_train.csv') #학습데이터 sample_submission = pd.read_csv('/content/drive/MyDrive/data/sample_submission.csv') shops = pd.read_csv('/content/drive/MyDrive/data/shops.csv') test = pd.read_csv('/content/drive/MyDrive/data/test.csv')#테스트 데이터 raw_data = pd.read_csv('/content/drive/MyDrive/data/sales_train.csv' ) #학습데이터 raw_data df = raw_data.copy() #1.train에 중복값 제거 (train의 모든 컬럼값이 동일한 value를 삭제) # drop_duplicates subset= ['date','date_block_num','shop_id','item_id','item_cnt_day'] print(df.duplicated(subset=subset).value_counts()) df.drop_duplicates(subset=subset, inplace=True) # 2935849 - 2935825 = 24개의 중복데이터 삭제 #2.test에 있는 세일즈수만 예측하면되기때문에 train에서 test에 없는 상품 제거 #test_shops = test.shop_id.unique() #test_items = test.item_id.unique() #df = df[df.shop_id.isin(test_shops)] #df = df[df.item_id.isin(test_items)] #df.shape # 기존2935825에서1224429으로 절반 이상 데이터 드랍 df2 = df df2 = df.rename_axis('ID2').reset_index() df2 df2= df2[(df2['item_cnt_day'] < df2['item_cnt_day'].quantile(0.95))] df2['date'] = pd.to_datetime(df2['date']) df2 df2 = df2.drop('ID2',axis=1) df2['date'] = df2['date'].sort_values() df2 = df2.reset_index() df2 = df2.drop('index', axis=1) df2 = df2.rename_axis('ID').reset_index() df2 df2.hist() df2['item_cnt_day'].hist() df2['item_cnt_day'].quantile(0.95) df2.head() df2.info() df2['item_cnt_day'].hist() df2 df2 = df2.drop(['date_block_num'],axis=1) df2 = df2.drop(['item_price'],axis=1) df2 df3 = df2.set_index('date') df3.head() df4 = df3.groupby(by=['shop_id', 'item_id'])['item_cnt_day'].resample('M').sum() df5 = pd.DataFrame(df4) df5 = df5.reset_index() df5 = df5.drop('date',axis=1) df5 = df5.rename_axis('ID').reset_index() df5 items.head() df6 = pd.merge(df5, items, how='left', left_on='item_id',right_on='item_id') df6 = df6.drop('item_name',axis=1) y = df6.loc[:,'item_cnt_day'] X = df6.drop(['item_cnt_day'], axis=1) X #df_test_orig = df3.loc[:, ['shop_id', 'item_id', 'item_cnt_day']] y from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1) # 학습과 테스트 데이터 분리 #split = "2015-01-01" #df_train = df3[:split] #df_test = df3[split:] #df_train_y = df_train.loc[:,'item_cnt_day'] #df_train_x = df_train.drop('item_cnt_day', axis=1) #df_test_y = df_test.loc[:,'item_cnt_day'] #df_test_x = df_test.drop('item_cnt_day', axis=1) # 나중에 예측값과 비교하기 위해 test 데이터 복제본 저장 #df_test_orig = df_test.loc[:, ['shop_id', 'item_id', 'item_cnt_day']] XG_model_month = XGBRegressor() XG_model_month.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=10,verbose=False) from sklearn.metrics import mean_absolute_error, mean_squared_error from math import sqrt pred = XG_model_month.predict(X_test) print(mean_absolute_error(y_test, pred)) print(sqrt(mean_squared_error(y_test, pred))) # 주요하게 적용하는 변수를 판단 from xgboost import plot_importance import matplotlib.pyplot as plt %matplotlib inline plot_importance(XG_model_month, height=0.9) #xgboost = XG_model_month.predict(df3) # 실제 값과 예측 값을 하나의 DataFrame으로 만들고 visualize #result=pd.concat([df_test_orig[(df_test_orig.store==1)&(df_test_orig.item==2)].reset_index(), pd.DataFrame(xgboost, columns=['xgboost'])], axis=1, ignore_index=False) #result = result.set_index('date') #result = result.loc[:, ['item_cnt_day', 'xgboost']] #result.plot() test2=test.copy() items.head() test2 = pd.merge(test2,items, how='left', left_on='item_id',right_on='item_id') test2=test2.drop('item_name', axis=1) test2.info() test2.astype(float) pred = XG_model_month.predict(test2) submission = pd.DataFrame(pred) submission_copy = submission.rename_axis('ID').reset_index() submission_copy submission_copy.columns = ['ID', 'item_cnt_month'] submission_copy submission_copy.mean() #submission_copy['item_cnt_month'] = submission_copy['item_cnt_month'].astype(float) submission_copy.to_csv('XG_submission_final.csv',index=False)Load Packages and Settings!pip install lightgbm import numpy as np import pandas as pd import psutil import os import pickle from collections import Counter import datetime as datetime from scipy.stats.mstats import gmean import random import gc import gzip import bz2 import matplotlib.pyplot as plt from pylab import rcParams rcParams['figure.figsize'] = (17,5.5) rcParams['figure.max_open_warning'] = 0 # %config InlineBackend.figure_format='retina' import seaborn as sns pd.options.display.max_rows = 150 start = datetime.datetime.now() if TIME_SEED: np.random.seed(datetime.datetime.now().microsecond) import sys def sizeof_fmt(num, suffix='B'): ''' by , https://stackoverflow.com/a/1094933/1870254, modified''' for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']: if abs(num) < 1024.0: return "%3.1f %s%s" % (num, unit, suffix) num /= 1024.0 return "%.1f %s%s" % (num, 'Yi', suffix) def memCheck(): for name, size in sorted(((name, sys.getsizeof(value)) for name, value in globals().items()), key= lambda x: -x[1])[:10]: print("{:>30}: {:>8}".format(name, sizeof_fmt(size))) def ramCheck(): print("{:.1f} GB used".format(psutil.virtual_memory().used/1e9 - 0.7)) path = '/kaggle/input/m5-forecasting-uncertainty/' ramCheck()0.3 GB usedLoad and Aggregate Training DataLEVELS = [(12, ['item_id', 'store_id']), (11, ['state_id', 'item_id']), (10, ['item_id']), (9, ['store_id', 'dept_id']), (8, ['store_id', 'cat_id']), (7, ['state_id', 'dept_id']), (6, ['state_id', 'cat_id']), (5, ['dept_id']), (4, ['cat_id']), (3, ['store_id']), (2, ['state_id']), (1, []) ] DOWNSTREAM = {'item_id': ['dept_id', 'cat_id'], 'dept_id': ['cat_id'], 'store_id': ['state_id']} def aggTrain(train): tcd = dict([(col, 'first') for col in train.columns[1:6]]) tcd.update( dict([(col, 'sum') for col in train.columns[6:]])) tadds =[]; tadd_levels= [ [12 for i in range(0, len(train))] ] for idx, lvl in enumerate(LEVELS[1:]): level = lvl[0] lvls = lvl[1] if len(lvls) is 0: # group all if no list provided lvls = [1 for i in range(0, len(train))] tadd = train.groupby(lvls).agg(tcd) # name it if len(lvls) == 2: tadd.index = ['_'.join(map(str,i)) for i in tadd.index.tolist()] elif len(lvls) == 1: tadd.index = tadd.index + '_X' else: tadd.index = ['Total_X'] tadd.index.name = 'id' # fill in categorical features tadd.reset_index(inplace=True) for col in [c for c in train.columns[1:6] if c not in lvls and not any(c in z for z in[DOWNSTREAM[lvl] for lvl in lvls if lvl in DOWNSTREAM])]: tadd[col] = 'All' tadds.append(tadd) #levels tadd_levels.append([level for i in range(0, len(tadd))]) train = pd.concat((train,*tadds), sort=False, ignore_index=True); del tadds, tadd levels = pd.Series(data = [x for sub_list in tadd_levels for x in sub_list], index = train.index); del tadd_levels for col in train.columns[1:6]: train[col] = train[col].astype('category') return train, levels def loadTrain(): train_cols = pd.read_csv(path+ '/' + 'sales_train_evaluation.csv', nrows=1) c_dict = {} for col in [c for c in train_cols if 'd_' in c]: c_dict[col] = np.float32 train = pd.read_csv(path+ '/' + 'sales_train_evaluation.csv', dtype=c_dict)#.astype(np.int16, errors='ignore') train.id = train.id.str.split('_').str[:-1].str.join('_') train.sort_values('id', inplace=True) return train.reset_index(drop=True) def getPricePivot(): prices = pd.read_csv(path+ '/' + 'sell_prices.csv', dtype = {'wm_yr_wk': np.int16, 'sell_price': np.float32}) prices['id'] = prices.item_id + "_" + prices.store_id price_pivot = prices.pivot(columns = 'id' , index='wm_yr_wk', values = 'sell_price') price_pivot = price_pivot.reindex(sorted(price_pivot.columns), axis=1) return price_pivot def getCal(): return pd.read_csv(path+ '/' + 'calendar.csv').set_index('d') cal = getCal() cal.date = pd.to_datetime(cal.date) day_to_cal_index = dict([(col, idx) for idx, col in enumerate(cal.index)]) cal_index_to_day = dict([(idx, col) for idx, col in enumerate(cal.index)]) cal_index_to_wm_yr_wk = dict([(idx, col) for idx, col in enumerate(cal.wm_yr_wk)]) day_to_wm_yr_wk = dict([(idx, col) for idx, col in cal.wm_yr_wk.iteritems()]) # Load train = loadTrain() price_pivot = getPricePivot() print('Total Time Elapsed: ', (datetime.datetime.now() - start).seconds, 's') # combine assert (train.id == price_pivot.columns).all() daily_sales = pd.concat((train.iloc[:, :6], train.iloc[:, 6:] * price_pivot.loc[train.columns[6:].fillna(0)\ .map(day_to_wm_yr_wk)].transpose().values ), axis = 'columns') # Aggregate train, levels = aggTrain(train) # id_to_level = dict(zip(train.id, levels)) # level_to_ids = dict([(level[0], list(train.id[levels == level[0]])) for idx, level in enumerate(LEVELS)]) daily_sales = aggTrain(daily_sales)[0] print('Total Time Elapsed: ', (datetime.datetime.now() - start).seconds, 's') # Rescale each level to avoid hitting np.half ceiling and keep similar ranges level_multiplier = dict([ (c, (levels==c).sum() / (levels==12).sum()) for c in sorted(levels.unique())]) # split up level 12 for row in LEVEL_SPLITS: level_multiplier[row[0]] = level_multiplier[12] levels.loc[(levels == 12) & (train.cat_id == row[1])] = row[0] Counter(levels) # Rescale by number of series at each level train = pd.concat((train.iloc[:, :6], train.iloc[:, 6:].multiply( levels.map(level_multiplier), axis = 'index').astype(np.float32) ), axis = 'columns') daily_sales = pd.concat((daily_sales.iloc[:, :6], daily_sales.iloc[:, 6:].multiply( levels.map(level_multiplier), axis = 'index').astype(np.float32) ), axis = 'columns') def loadSampleSub(): return pd.read_csv(path+ '/' + 'sample_submission.csv').astype(np.int8, errors = 'ignore') sample_sub = loadSampleSub() assert set(train.id) == set(sample_sub.id.str.split('_').str[:-2].str.join('_')) print(len(train)) ramCheck() # memCheck() print('Total Time Elapsed: ', (datetime.datetime.now() - start).seconds, 's') train_filter = ( ( ( MAX_LEVEL is not None ) & (levels <= MAX_LEVEL) ) | ( ( MAX_LEVEL is None ) & (levels == LEVEL) ) ) train = train[train_filter].reset_index(drop=True) daily_sales = daily_sales[train_filter].reset_index(drop=True) levels = levels[train_filter].reset_index(drop=True).astype(np.int8) Counter(levels) train.head() print(len(train)) train_head = train.iloc[:, :6] train_head.head() ramCheck() # replace leading zeros with nan train['d_1'].replace(0, np.nan, inplace=True) for i in range(train.columns.get_loc('d_1') + 1, train.shape[1]): train.loc[:, train.columns[i]].where( ~ ((train.iloc[:,i]==0) & (train.iloc[:,i-1].isnull())), np.nan, inplace=True) print('Total Time Elapsed: ', (datetime.datetime.now() - start).seconds, 's') train.head(5) train_flipped = train.set_index('id', drop = True).iloc[:, 5:].transpose() train_flipped.dtypes train_flipped.head() train_flipped.max().sort_values(ascending=False)[::3000] # memCheck() print('Total Time Elapsed: ', (datetime.datetime.now() - start).seconds, 's') ramCheck() print('Total Time Elapsed: ', (datetime.datetime.now() - start).seconds, 's')Total Time Elapsed: 76 sItem-Store Featuresfeatures = [] # basic moving averages if not CACHED_FEATURES: for window in [3, 7, 15, 30, 100]: if REDUCED_FEATURES and window < 15: continue; features.append(('qs_{}d_ewm'.format(window), train_flipped.ewm(span=window, min_periods = int(np.ceil(window ** 0.8)) ).mean().astype(np.half))) store_avg_qs = train_flipped[train_flipped.columns[levels >= 12]].transpose()\ .groupby(train_head.iloc[(levels >= 12).values].store_id.values).mean().fillna(1) store_dept_avg_qs = train_flipped[train_flipped.columns[levels >= 12]].transpose()\ .groupby( ( train_head.iloc[(levels >= 12).values].store_id.astype(str) + '_' + train_head.iloc[(levels >= 12).values].dept_id.astype(str)).values ).mean().fillna(1) store_avg_qs # basic moving averages, after removing any store trends scaled_sales = train_flipped / (store_avg_qs.loc[train.store_id].transpose().values); # if levels.min() == 12: # # get overall store and store-dept sales matched to this id; # store_avg_qs_matched = store_avg_qs.loc[train.store_id].transpose() # store_dept_avg_qs_matched = store_dept_avg_qs.loc[train.store_id.astype(str) + '_' # + train.dept_id.astype(str) # ].transpose() # store_avg_qs_matched.columns = train_flipped.columns # store_dept_avg_qs_matched.columns = train_flipped.columns # ratio = (store_avg_qs_matched.rolling(28).mean() / store_avg_qs_matched.rolling(56).mean() ) .fillna(1) - 1 # ratio = ratio.clip ( ratio.stack().quantile(0.01), ratio.stack().quantile(0.99)) # # features.append(('store_28d_58d_ratio', ratio.astype(np.half))) # ratio = (store_dept_avg_qs_matched.rolling(28).mean() / store_dept_avg_qs_matched.rolling(56).mean() ) .fillna(1) - 1 # ratio = ratio.clip ( ratio.stack().quantile(0.003), ratio.stack().quantile(0.997)) # # features.append(('store_dept_28d_58d_ratio', ratio.astype(np.half))) # del store_avg_qs_matched, store_dept_avg_qs_matched, ratio del store_avg_qs, store_dept_avg_qs, # moving average after store-level detrending if not CACHED_FEATURES: for window in [3, 7, 15, 30, 100]: if REDUCED_FEATURES: continue; features.append(('qs_divbystore_{}d_ewm'.format(window), scaled_sales.ewm(span=window, min_periods = int(np.ceil(window ** 0.8)) ).mean().astype(np.half))) print('Total Time Elapsed: ', (datetime.datetime.now() - start).seconds, 's') # EWM % NONZERO DAYS if not CACHED_FEATURES: tff0ne0 = train_flipped.fillna(0).ne(0) for window in [7, 14, 28, 28*2, 28*4, ]: if REDUCED_FEATURES and window != 28: continue; features.append( ('pct_nonzero_days_{}d'.format(window), tff0ne0.rolling(window).mean().astype(np.half) ) ) del tff0ne0 print('Total Time Elapsed: ', (datetime.datetime.now() - start).seconds, 's')Total Time Elapsed: 139 sFeatures for Both Sales and Scaled Salesarrs = [train_flipped, scaled_sales, ] # sales_over_all] labels = ['qs', 'qs_divbystore', ] #'qs_divbyall'] if REDUCED_FEATURES: arrs = arrs[0:1] # basic lag features if not CACHED_FEATURES: for lag in range(1, 10+1): if REDUCED_FEATURES: continue; features.append( ('qs_lag_{}d'.format(lag), train_flipped.shift(lag).fillna(0).astype(np.half) ) ) # means and medians -- by week to avoid day of week effects if not CACHED_FEATURES: for idx in range(0, len(arrs)): arr = arrs[idx] label = labels[idx] for window in [7, 14, 21, 28, 28*2, 28*4, ]: ## ** mean and median if REDUCED_FEATURES and window != 28: continue; features.append( ('{}_mean_{}d'.format(label, window), arr.rolling(window).mean().astype(np.half) ) ) features.append( ('{}_median_{}d'.format(label, window), arr.rolling(window).median().astype(np.half) ) ) print('{}: {}'.format(label,window)) del arr print('Total Time Elapsed: ', (datetime.datetime.now() - start).seconds, 's') # stdev, skewness, and kurtosis # ideally kurtosis and skewness should NOT be labeled qs_ as they are scale-invariant if not CACHED_FEATURES: for idx in range(0, len(arrs)): arr = arrs[idx] label = labels[idx] for window in [7, 14, 28, 28*3, 28*6]: if REDUCED_FEATURES and window != 28: continue; print('{}: {}'.format(label,window)) features.append( ('{}_stdev_{}d'.format(label, window), arr.rolling(window).std().astype(np.half) ) ) if window >= 10: if REDUCED_FEATURES: continue; features.append( ('{}_skew_{}d'.format(label, window), arr.rolling(window).skew().astype(np.half) ) ) features.append( ('{}_kurt_{}d'.format(label, window), arr.rolling(window).kurt().astype(np.half) ) ) del arr; print('Total Time Elapsed: ', (datetime.datetime.now() - start).seconds, 's') # high and low quantiles (adding more seemed to hurt performance) if not CACHED_FEATURES: for idx in range(0, len(arrs)): arr = arrs[idx] label = labels[idx] for window in [14, 28, 56]: if REDUCED_FEATURES and window != 28: continue; features.append( ('{}_qtile10_{}d'.format(label, window), arr.rolling(window).quantile(0.1).astype(np.half) ) ) features.append( ('{}_qtile90_{}d'.format(label, window), arr.rolling(window).quantile(0.9).astype(np.half) ) ) print('{}: {}'.format(label,window)) del arr print('Total Time Elapsed: ', (datetime.datetime.now() - start).seconds, 's') del arrs; del scaled_sales ramCheck()3.6 GB usedData Cleaning# start after one year, remove anything with proximity to holiday months (given mid-year LB targets) # also saves a lot of RAM/processing time def clean_df(fr): early_rows = cal[cal.year == cal.year.min()].index.to_list() holiday_rows = cal[cal.month.isin([10, 11, 12, 1])].index.to_list() delete_rows = early_rows + holiday_rows MIN_DAY = 'd_{}'.format(300) if 'd' in fr.columns: # d, series stack: fr = fr[fr.d >= day_to_cal_index[MIN_DAY]] fr = fr[~fr.d.isin([ day_to_cal_index[d] for d in delete_rows])] else: # pivot table if MIN_DAY in fr.index: fr = fr.iloc[ fr.index.get_loc(MIN_DAY):, :] if len(delete_rows) > 0: fr = fr[~fr.index.isin(delete_rows)] return fr; def clean_features(features): for idx, feat_row in enumerate(features): fr = feat_row[1] fr = clean_df(fr) if len(fr) < len(feat_row[1]): features[idx] = (features[idx][0], fr) ramCheck() print('Total Time Elapsed: ', (datetime.datetime.now() - start).seconds, 's')Total Time Elapsed: 266 sCache Loaderpickle_dir = '/kaggle/input/m5-e300/' if CACHED_FEATURES: if 'features.pbz2' in os.listdir(pickle_dir): with bz2.BZ2File(pickle_dir + 'features.pbz2', 'r') as handle: features = pickle.load(handle) elif 'features.pgz' in os.listdir(pickle_dir): with gzip.GzipFile(pickle_dir + 'features.pgz', 'r') as handle: features = pickle.load(handle) ramCheck() print('Total Time Elapsed: ', (datetime.datetime.now() - start).seconds, 's')Total Time Elapsed: 266 sClean Featuresclean_features(features) # clean_features(item_features) print('Total Time Elapsed: ', (datetime.datetime.now() - start).seconds, 's') ramCheck()2.9 GB usedSave Cachesif CACHE_FEATURES: with gzip.GzipFile('features.pgz', 'w') as handle: pickle.dump(features, handle, protocol=pickle.HIGHEST_PROTOCOL) os.path.getsize('features.pgz') / 1e9 print('Total Time Elapsed: ', (datetime.datetime.now() - start).seconds, 's') print('Total Time Elapsed: ', (datetime.datetime.now() - start).seconds, 's')Total Time Elapsed: 269 sCalendar Featurescal_features = pd.DataFrame() cal_features['dayofweek'] = cal.date.dt.dayofweek.astype(np.int8) cal_features['dayofmonth'] = cal.date.dt.day.astype(np.int8) cal_features['season'] = cal.date.dt.month.astype(np.half)State Calendar Featuresstate_cal_features = [] snap_cols = [c for c in cal.columns if 'snap' in c] state_cal_features.append( ( 'snap_day' , cal[snap_cols].astype(np.int8) ) ) state_cal_features.append( ( 'snap_day_lag_1' , cal[snap_cols].shift(1).fillna(0).astype(np.int8) ) ) state_cal_features.append( ( 'snap_day_lag_2' , cal[snap_cols].shift(2).fillna(0).astype(np.int8) ) ) state_cal_features.append( ( 'nth_snap_day', (cal[snap_cols].rolling(15, min_periods = 1).sum() * cal[snap_cols] ).astype(np.int8) ) ) for window in [2, 5, 10, 30, 60]: state_cal_features.append( ('snap_{}d_ewm'.format(window), cal[snap_cols].ewm(span = window, adjust=False).mean().astype(np.half) ) ) # strip columns to match state_id def snapRename(x): return x.replace('snap_', '') for f in range(0, len(state_cal_features)): state_cal_features[f] = (state_cal_features[f][0], state_cal_features[f][1].rename(snapRename, axis = 'columns')) # pd.merge( pd.Series(np.sum(train_flipped, axis = 1), name='total_sales'), cal, # left_index=True, right_index=True).groupby('event_name_2').mean()\ # .sort_values('total_sales', ascending=False)Holidaysfor etype in [c for c in cal.event_type_1.dropna().unique()]: cal[etype.lower() + '_holiday'] = np.where(cal.event_type_1 == etype, cal.event_name_1, np.where(cal.event_type_2 == etype, cal.event_name_2, 'None')) for etype in [c for c in cal.event_type_1.dropna().unique()]: cal[etype.lower() + '_holiday'] = cal[etype.lower() + '_holiday'].astype('category')Price Featuresdef getPricePivot(): prices = pd.read_csv(path+ '/' + 'sell_prices.csv', dtype = {'wm_yr_wk': np.int16, 'sell_price': np.float32}) prices['id'] = prices.item_id + "_" + prices.store_id price_pivot = prices.pivot(columns = 'id' , index='wm_yr_wk', values = 'sell_price') return price_pivot price_pivot = getPricePivot() ramCheck() # memCheck()Assemble Series-Features Matrix Dictsseries_to_series_id = dict([(col, idx) for idx, col in enumerate(train_flipped.columns)]) series_id_to_series = dict([(idx, col) for idx, col in enumerate(train_flipped.columns)]) series_id_level = dict([(idx, col) for idx, col in enumerate(levels)]) series_level = dict(zip(train_flipped.columns, levels)) series_to_item_id = dict([(x[1].id, x[1].item_id) for x in train_head[['id', 'item_id']].iterrows()])Featuresfor feature in features: assert feature[1].shape == features[0][1].shape fstack = features[0][1].stack(dropna = False) series_features = pd.DataFrame({'d': fstack.index.get_level_values(0) \ .map(day_to_cal_index).values.astype(np.int16), 'series': fstack.index.get_level_values(1) \ .map(series_to_series_id).values.astype(np.int16) }) del fstack for idx, feature in enumerate(features): if feature is not None: series_features[feature[0]] = feature[1].stack(dropna=False).values del features ramCheck()3.2 GB usedState Cal Featuresfor feature in state_cal_features: assert feature[1].shape == state_cal_features[0][1].shape fstack = state_cal_features[0][1].stack(dropna = False) state_cal_series_features = pd.DataFrame({'d': fstack.index.get_level_values(0) \ .map(day_to_cal_index).values.astype(np.int16), 'state': fstack.index.get_level_values(1) }) del fstack for idx, feature in enumerate(state_cal_features): if feature is not None: state_cal_series_features[feature[0]] = feature[1].stack(dropna=False).valuesClean Up NAseries_features.isnull().sum().sum() series_features.fillna(-10, inplace=True)Add CategoricalsCATEGORICALS = ['dept_id', 'cat_id', 'store_id', 'state_id', ] # 'item_id'] # never item_id; wrecks higher layers; for col in CATEGORICALS: series_features[col] = series_features.series.map(series_id_to_series).map( train_head.set_index('id')[col]) #.astype('category') ramCheck() # memCheck() print('Total Time Elapsed: ', (datetime.datetime.now() - start).seconds, 's')Total Time Elapsed: 360 sMetrics and Scalingdef addSuffix(c): return c + '_validation' trailing_28d_sales = daily_sales.iloc[:,6:].transpose().rolling(28, min_periods = 1).sum().astype(np.float32) fstack = train_flipped.stack(dropna = False) weight_stack = pd.DataFrame({'d': fstack.index.get_level_values(0) \ .map(day_to_cal_index).values.astype(np.int16), 'series': fstack.index.get_level_values(1) \ .map(series_to_series_id).values.astype(np.int16), 'days_since_first': (~train_flipped.isnull()).expanding().sum().stack(dropna = False).values\ .astype(np.int16), 'trailing_vol': ( (train_flipped.diff().abs()).expanding().mean() ).astype(np.float16)\ .stack(dropna = False).values, 'weights': (trailing_28d_sales / trailing_28d_sales.transpose().groupby(levels).sum().loc[levels].transpose().values) .astype(np.float16)\ .stack(dropna = False).values, }) del fstack del trailing_28d_sales; weight_stack.dtypes new_items = weight_stack.days_since_first < 30 weight_stack[new_items].weights.sum() / weight_stack[weight_stack.days_since_first >= 0].weights.sum() weight_stack.loc[new_items, 'weights'] = 0 ramCheck() print('Total Time Elapsed: ', (datetime.datetime.now() - start).seconds, 's')Total Time Elapsed: 413 sMerge Weight and Y into Main Dfweight_stack = clean_df(weight_stack) assert len(weight_stack) == len(series_features) assert (weight_stack.d.values == series_features.d).all() assert (weight_stack.series.values == series_features.series).all() series_features = pd.concat( (series_features, weight_stack.reset_index(drop=True).iloc[:, -2:]), axis = 1,) weight_stack = weight_stack.iloc[:10, :] fstack = train_flipped.stack(dropna = False) y_full = pd.DataFrame({'d': fstack.index.get_level_values(0) \ .map(day_to_cal_index).values.astype(np.int16), 'series': fstack.index.get_level_values(1) \ .map(series_to_series_id).values.astype(np.int16), 'y': fstack.values}) del fstack ramCheck() # memCheck()Feature Merges to Build X/Y/etc.def addMAcrosses(X): EWMS = [c for c in X.columns if 'ewm' in c and 'qs_' in c and len(c) < 12] for idx1, col1 in enumerate(EWMS): for idx2, col2 in enumerate(EWMS): if not idx1 < idx2: continue; X['qs_{}_{}_ewm_diff'.format(col1.split('_')[1], col2.split('_')[1])] = X[col1] - X[col2] X['qs_{}_{}_ewm_ratio'.format(col1.split('_')[1], col2.split('_')[1])] = X[col1] / X[col2] return X def addCalFeatures(X): # large block of code; easy; # day of week, month, season of year X['dayofweek'] = ( X.d + X.days_fwd).map(cal_index_to_day).map(cal_features.dayofweek) X['dayofmonth'] = ( X.d + X.days_fwd).map(cal_index_to_day).map(cal_features.dayofmonth) X['basedayofweek'] = X.d.map(cal_index_to_day).map(cal_features.dayofweek) X['dayofweekchg'] = (X.days_fwd % 7).astype(np.int8) X['basedayofmonth'] = X.d.map(cal_index_to_day).map(cal_features.dayofmonth) X['season'] = ( ( X.d + X.days_fwd).map(cal_index_to_day).map(cal_features.season) \ + np.random.normal( 0, 1, len(X)) ).astype(np.half) # with a full month SD of noise to not overfit to specific days; # holidays holiday_cols = [c for c in cal.columns if '_holiday' in c] for col in holiday_cols: X['base_' + col] = X.d.map(cal_index_to_day).map(cal[col]) X[col] = ( X.d + X.days_fwd).map(cal_index_to_day).map(cal[col]) return X # 'dayofweek' def convertToLinearFeatures(X): X = X.copy() for s in X.dayofweek.unique(): X['dayofweek_{}'.format(s)] = (X.dayofweek == s).astype(np.int8) X.drop( columns = X.columns[X.dtypes == 'category'], inplace=True) X['daysfwd_sqrt'] = (X.days_fwd ** 0.5).astype(np.half) return X def addStateCalFeatures(X): if (X.state_id == 'All').mean() > 0: print('No State Ids') return X; def rename_scf(c, name = 'basedate'): return c if (c=='d' or c == 'state') else name + '_' + c X['future_d'] = ( X.d + X.days_fwd) X['state'] = X.state_id.astype('object') nX = X.merge(state_cal_series_features[['state', 'd', 'snap_day', 'nth_snap_day']] .rename(rename_scf, axis = 'columns'), on = ['d', 'state'], validate='m:1', how = 'inner', suffixes = (False, False)) nX = nX.merge(state_cal_series_features[['state', 'd', 'snap_day', 'nth_snap_day']] .rename(columns = {'d': 'future_d'}), on = ['future_d', 'state'], validate='m:1', how = 'inner', suffixes = (False, False)) nX.drop(columns = ['state', 'future_d'], inplace=True) assert len(nX) == len(X) return nX def add_item_features(X): return X VALIDATION = -1; # 2016 # pure holdout from train and prediction sets; def getXYG(X, scale_range = None, oos = False): start_time = datetime.datetime.now(); # ensure it's in the train set, and days_forward is actually *forward* X.drop( X.index[ (X.days_fwd < 1) | ( ~oos & ( X.d + X.days_fwd > cal.index.get_loc(train_flipped.index[-1]) ) ) ], inplace=True) g = gc.collect() X = addMAcrosses(X) X = addCalFeatures(X) X = addStateCalFeatures(X) # noise to time-static features for col in [c for c in X.columns if 'store' in c and 'ratio' in c]: X[col] = X[col] + np.random.normal(0, 0.1, len(X)) print('adding noise to {}'.format(col)) # match with Y if 'y' not in X.columns: st = datetime.datetime.now(); X['future_d'] = X.d + X.days_fwd if oos: X = X.merge(y_full.rename(columns = {'d': 'future_d'}), on = ['future_d', 'series'], how = 'left') X.y = X.y.fillna(-1) else: X = X.merge(y_full.rename(columns = {'d': 'future_d'}), on = ['future_d', 'series'], )# suffixes = (None, None), validate = 'm:1') # X['yo'] = X.y.copy() g = gc.collect() scaler_columns = [c for c in X.columns if c in weight_stack.columns[2:]] scalers = X[scaler_columns].copy() y = X.y groups = pd.Series(cal.iloc[(X.d + X.days_fwd)].year.values, X.index).astype(np.int16) # feature drops if REDUCED_FEATURES: feat_drops = [c for c in X.columns if c not in (sparse_features + ['d', 'series', 'days_fwd'])] elif len(FEATURE_DROPS) > 0: feat_drops = [c for c in X.columns if any(z in c for z in FEATURE_DROPS )] print('dropping {} features; anything containing {}'.format(len(feat_drops), FEATURE_DROPS)) print(' -- {}'.format(feat_drops)) else: feat_drops = [] # final drops X.drop(columns = scaler_columns + (['future_d'] if 'future_d' in X.columns else []) + ['y'] + feat_drops , inplace=True) scalers['scaler'] = scalers.trailing_vol.copy() # randomize scaling if scale_range > 0: scalers.scaler = scalers.scaler * np.exp( scale_range * ( np.random.normal(0, 0.5, len(X))) ) # scalers.scaler = scalers.scaler * np.exp( scale_range * ( np.random.rand(len(X)) - 0.5) ) # now rescale y and 'scaled variable' in X by its vol for col in [c for c in X.columns if 'qs_' in c and 'ratio' not in c]: X[col] = np.where( X[col] == -10, X[col], (X[col] / scalers.scaler).astype(np.half)) y = y / scalers.scaler yn = (oos == False) & (y.isnull() | (groups==VALIDATION)) print("\nXYG Pull Time: {}".format(str(datetime.datetime.now() - start_time).split('.', 2)[0] )) return (X[~yn], y[~yn], groups[~yn], scalers[~yn]) [(k, v) for k, v in series_id_level.items() if v == 1] def getSubsample(frac, level = 12, scale_range = 0.1, n_repeats = 1, drops = True, post_process_X = None): start_time = datetime.datetime.now(); wtg_mean = series_features.weights[(series_features.series.map(series_id_level) == level)].mean() ss = series_features.weights / wtg_mean * frac print(ss) X = series_features[ (ss > np.random.rand(len(ss)) ) & (series_features.series.map(series_id_level) == level) ] ss = X.weights / wtg_mean * frac print(X.shape) print('{} series that seek oversampling'.format( (ss > 1). sum() ) ) print( ss[ss>1].sort_values()[-5:]) extras = [] while ss.max() > 1: ss = ss - 1 extras.append( X[ ss > np.random.rand(len(ss))] ) if len(extras) > 0: print(' scaled EWMS of extras:') print( ( extras[-1].qs_30d_ewm / extras[-1].trailing_vol)[-5:] ) if len(extras) > 0: X = pd.concat((X, *extras)) else: X = X.copy() X['days_fwd'] = (np.random.randint(0, 28, size = len(X)) + 1).astype(np.int8) if n_repeats > 1: X = pd.concat([X] * n_repeats) g = gc.collect() print(X.shape) X, y, groups, scalers = getXYG(X, scale_range) ramCheck() g = gc.collect() if drops: X.drop(columns = ['d', 'series'], inplace=True) if post_process_X is not None: X = post_process_X(X) print(X.shape) print("\nSubsample Time: {}\n".format(str(datetime.datetime.now() - start_time).split('.', 2)[0] )) return X, y, groups, scalers print('Total Time Elapsed: ', (datetime.datetime.now() - start).seconds, 's')Total Time Elapsed: 451 sModelingfrom sklearn.model_selection import RandomizedSearchCV, GroupKFold, LeaveOneGroupOut from sklearn.model_selection import ParameterSampler from sklearn.metrics import make_scorer import lightgbm as lgb def quantile_loss(true, pred, quantile = 0.5): loss = np.where(true >= pred, quantile*(true-pred), (1-quantile)*(pred - true) ) return np.mean(loss) def quantile_scorer(quantile = 0.5): return make_scorer(quantile_loss, False, quantile = quantile) lgb_quantile_params = { # fairly well tuned, with high runtimes 'max_depth': [10, 20], 'n_estimators': [ 200, 300, 350, 400, ], 'min_split_gain': [0, 0, 0, 0, 1e-4, 1e-3, 1e-2, 0.1], 'min_child_samples': [ 2, 4, 7, 10, 14, 20, 30, 40, 60, 80, 100, 130, 170, 200, 300, 500, 700, 1000 ], 'min_child_weight': [0, 0, 0, 0, 1e-4, 1e-3, 1e-3, 1e-3, 5e-3, 2e-2, 0.1 ], 'num_leaves': [ 20, 30, 30, 30, 50, 70, 90, ], 'learning_rate': [ 0.02, 0.03, 0.04, 0.04, 0.05, 0.05, 0.07, ], 'colsample_bytree': [0.3, 0.5, 0.7, 0.8, 0.9, 0.9, 0.9, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'colsample_bynode':[0.1, 0.15, 0.2, 0.2, 0.2, 0.25, 0.3, 0.5, 0.65, 0.8, 0.9, 1], 'reg_lambda': [0, 0, 0, 0, 1e-5, 1e-5, 1e-5, 1e-5, 3e-5, 1e-4, 1e-3, 1e-2, 0.1, 1, 10, 100 ], 'reg_alpha': [0, 1e-5, 3e-5, 1e-4, 1e-4, 1e-3, 3e-3, 1e-2, 0.1, 1, 1, 10, 10, 100, 1000,], 'subsample': [ 0.9, 1], 'subsample_freq': [1], 'cat_smooth': [0.1, 0.2, 0.5, 1, 2, 5, 7, 10], } if SPEED or SUPER_SPEED or REDUCED_FEATURES: lgb_quantile_params = { # fairly well tuned, with high runtimes 'max_depth': [10, 20], 'n_estimators': [ 150, 200, 200], # 300, 350, 400, ], 'min_split_gain': [0, 0, 0, 0, 1e-4, 1e-3, 1e-2, 0.1], 'min_child_samples': [ 2, 4, 7, 10, 14, 20, 30, 40, 60, 80, 100, 100, 100, 130, 170, 200, 300, 500, 700, 1000 ], 'min_child_weight': [0, 0, 0, 0, 1e-4, 1e-3, 1e-3, 1e-3, 5e-3, 2e-2, 0.1 ], 'num_leaves': [ 20, 30, 50, 50 ], # 50, 70, 90, ], 'learning_rate': [ 0.04, 0.05, 0.07, 0.07, 0.07, 0.1, 0.1, 0.1 ], # 0.02, 0.03, 'colsample_bytree': [0.3, 0.5, 0.7, 0.8, 0.9, 0.9, 0.9, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'colsample_bynode':[0.1, 0.15, 0.2, 0.2, 0.2, 0.25, 0.3, 0.5, 0.65, 0.8, 0.9, 1], 'reg_lambda': [0, 0, 0, 0, 1e-5, 1e-5, 1e-5, 1e-5, 3e-5, 1e-4, 1e-3, 1e-2, 0.1, 1, 10, 100 ], 'reg_alpha': [0, 1e-5, 3e-5, 1e-4, 1e-4, 1e-3, 3e-3, 1e-2, 0.1, 1, 1, 10, 10, 100, 1000,], 'subsample': [ 0.9, 1], 'subsample_freq': [1], 'cat_smooth': [0.1, 0.2, 0.5, 1, 2, 5, 7, 10], } def trainLGBquantile(x, y, groups, cv = 0, n_jobs = -1, alpha = 0.5, **kwargs): clfargs = kwargs.copy(); clfargs.pop('n_iter', None) clf = lgb.LGBMRegressor(verbosity=-1, hist_pool_size = 1000, objective = 'quantile', alpha = alpha, importance_type = 'gain', seed = datetime.datetime.now().microsecond if TIME_SEED else None, **clfargs, ) print('\n\n Running Quantile Regression for \u03BC={}\n'.format(alpha)) params = lgb_quantile_params return trainModel(x, y, groups, clf, params, quantile_scorer(alpha), n_jobs, **kwargs) def trainModel(x, y, groups, clf, params, cv = 0, n_jobs = None, verbose=0, splits=None, **kwargs): if n_jobs is None: n_jobs = -1 folds = LeaveOneGroupOut() clf = RandomizedSearchCV(clf, params, cv= folds, n_iter= ( kwargs['n_iter'] if len(kwargs) > 0 and 'n_iter' in kwargs else 4), verbose = 0, n_jobs = n_jobs, scoring = cv) f = clf.fit(x, y, groups) print(pd.DataFrame(clf.cv_results_['mean_test_score'])); print(); best = clf.best_estimator_; print(best) print("\nBest In-Sample CV: {}\n".format(np.round(clf.best_score_,4))) return best def runQBags(n_bags = 3, model_type = trainLGBquantile, data = None, quantiles = [0.5], **kwargs): start_time = datetime.datetime.now(); clf_set = []; loss_set = [] for bag in range(0, n_bags): print('\n\n Running Bag {} of {}\n\n'.format(bag+1, n_bags)) if data is None: X, y, groups, scalers = getSubsample() else: X, y, groups, scalers = data group_list = [*dict.fromkeys(groups)] group_list.sort() print("Groups: {}".format(group_list)) clfs = []; preds = []; ys=[]; datestack = []; losses = pd.DataFrame(index=QUANTILES) if SINGLE_FOLD: group_list = group_list[-1:] for group in group_list: print('\n\n Running Models with {} Out-of-Fold\n\n'.format(group)) x_holdout = X[groups == group] y_holdout = y[groups == group] ramCheck() model = model_type q_clfs = []; q_losses = [] for quantile in quantiles: set_filter = (groups != group) \ & (np.random.rand(len(groups)) < quantile_wts[quantile] ** (0.35 if LEVEL >=11 else 0.25) ) clf = model(X[set_filter], y[set_filter], groups[set_filter], alpha = quantile, **kwargs) q_clfs.append(clf) predicted = clf.predict(x_holdout) q_losses.append((quantile, quantile_loss(y_holdout, predicted, quantile))) print(u"{} \u03BC={:.3f}: {:.4f}".format(group, quantile, q_losses[-1][1] ) ) preds.append(predicted) ys.append(y_holdout) clfs.append(q_clfs) print("\nLevel {} OOS Losses for Bag {} in {}:".format(level, bag+1, group)) print(np.round(pd.DataFrame(q_losses).set_index(0)[1], 4)) losses[group] = np.round(pd.DataFrame(q_losses).set_index(0)[1], 4).values print("\nElapsed Time So Far This Bag: {}\n".format(str(datetime.datetime.now() - start_time).split('.', 2)[0] )) clf_set.append(clfs) print("\nLevel {} Year-by-Year OOS Losses for Bag {}:".format(level, bag, group)) print(losses) loss_set.append(losses) print("\nModel Bag Time: {}\n".format(str(datetime.datetime.now() - start_time).split('.', 2)[0] )) return clf_set, loss_set level_os = dict([(idx, 1/val) for (idx,val) in level_multiplier.items()]) # these are to use less processing time on edge quantiles QUANTILE_LEVELS = [0.005, 0.025, 0.165, 0.25, 0.5, 0.75, 0.835, 0.975, 0.995] QUANTILE_WTS = [0.1, 0.2, 0.6, 0.8, 1, 0.9, 0.7, 0.2, 0.1,] quantile_wts = dict(zip(QUANTILE_LEVELS, QUANTILE_WTS)) print('Total Time Elapsed: ', (datetime.datetime.now() - start).seconds, 's')Total Time Elapsed: 451 sActually Run Modelif not IMPORT: clf_set = {}; loss_set = {}; LEVEL_QUANTILES = {}; for level in sorted(levels.unique()): print("\n\n\nRunning Models for Level {}\n\n\n".format(level)) SS_FRAC, SCALE_RANGE = P_DICT[level] # if level < 12 else ID_FILTER]; SS_FRAC = SS_FRAC * SS_SS print('{}/{}'.format(SS_FRAC, SCALE_RANGE)) # much higher iteration counts for low levels clf_set[level], loss_set[level] = runQBags(n_bags = int(BAGS * level_os[level] ** BAGS_PWR), model_type = trainLGBquantile, data = getSubsample(SS_FRAC * level_os[level] ** SS_PWR, level, SCALE_RANGE), n_iter = int( (2.2 if level <= 9 else 1.66) * (16 - (level if level <=12 else 12) ) * (1/4 if SUPER_SPEED else (1/2 if SPEED else 1)) ) , quantiles = QUANTILES, n_jobs = N_JOBS) LEVEL_QUANTILES[level] = QUANTILES getSubsample(SS_FRAC * level_os[level] ** SS_PWR, level, SCALE_RANGE), print('Total Time Elapsed: ', (datetime.datetime.now() - start).seconds, 's')Import Classifiersif IMPORT: clf_sets = [] # *** path = '/kaggle/input/m5clfs/' # if LEVEL != 12: files = [f for f in os.listdir(path) if '.pkl' in f] if LEVEL == 13 and MAX_LEVEL is None: files = [f for f in files if '13_' in f or 'hobbies' in f] if LEVEL == 14 and MAX_LEVEL is None: files = [f for f in files if '14_' in f or 'household' in f] if LEVEL == 15 and MAX_LEVEL is None: files = [f for f in files if '15_' in f or 'foods' in f] # else: # files = [f for f in os.listdir(path) if '.pkl' in f and ID_FILTER.lower() in f] for file in files: clf_sets.append(pickle.load(open(path + file,'rb'))) clf_df = []; pairs = [] for clf_set in clf_sets: for level, level_clfs in clf_set.items(): for clf_bag_idx, clf_bag in enumerate(level_clfs): for group_idx, clf_group in enumerate(clf_bag): for quantile_idx, clf in enumerate(clf_group): clf_df.append((level, clf.alpha, group_idx, clf)) clf_df = pd.DataFrame(clf_df, columns = ['level', 'alpha', 'group', 'clf']) if LEVEL > 12 and MAX_LEVEL == None: clf_df.loc[clf_df.level==12, 'level'] = LEVEL # clf_df LEVEL_QUANTILES = {}; clf_set = {} for level in sorted(clf_df.level.unique()): level_df = clf_df[clf_df.level == level] level_list = [] for group in sorted(level_df.group.unique()): group_df = level_df[level_df.group == group].sort_values('alpha') if level in LEVEL_QUANTILES: assert LEVEL_QUANTILES[level] == list(group_df.alpha) else: LEVEL_QUANTILES[level] = list(group_df.alpha) level_list.append(list(group_df.clf)) if len(level_df.group.unique()) > 1: SINGLE_FOLD = False clf_set[level] = [level_list] print(level, ": ", LEVEL_QUANTILES[level]); # LEVELDisplayfor level in sorted(clf_set.keys()): print("Level {}:".format(level)) for idx, q in enumerate(LEVEL_QUANTILES[level]): print(u'\n\n Regressors for \u03BC={}:\n'.format(q)) for clf in [q_clfs[idx] for clfs in clf_set[level] for q_clfs in clfs]: print(clf) print(); print() print('Total Time Elapsed: ', (datetime.datetime.now() - start).seconds, 's') # save classifiers clf_file = ('clf_set.pkl' if IMPORT else ('lvl_{}_clfs.pkl'.format(LEVEL) if MAX_LEVEL == None else 'lvls_lt_{}_clfs.pkl'.format(MAX_LEVEL))) with open(clf_file, 'wb') as handle: pickle.dump(clf_set, handle, protocol=pickle.HIGHEST_PROTOCOL) ramCheck()Feature Importancedef show_FI(model, featNames, featCount): # show_FI_plot(model.feature_importances_, featNames, featCount) fis = model.feature_importances_ fig, ax = plt.subplots(figsize=(6, 5)) indices = np.argsort(fis)[::-1][:featCount] g = sns.barplot(y=featNames[indices][:featCount], x = fis[indices][:featCount] , orient='h' ) g.set_xlabel("Relative importance") g.set_ylabel("Features") g.tick_params(labelsize=12) g.set_title( " feature importance") def avg_FI(all_clfs, featNames, featCount, title = "Feature Importances"): # 1. Sum clfs = [] for clf_set in all_clfs: for clf in clf_set: clfs.append(clf); fi = np.zeros( (len(clfs), len(clfs[0].feature_importances_)) ) for idx, clf in enumerate(clfs): fi[idx, :] = clf.feature_importances_ avg_fi = np.mean(fi, axis = 0) # 2. Plot fis = avg_fi fig, ax = plt.subplots(figsize=(6, 5)) indices = np.argsort(fis)[::-1]#[:featCount] #print(indices) g = sns.barplot(y=featNames[indices][:featCount], x = fis[indices][:featCount] , orient='h' ) g.set_xlabel("Relative importance") g.set_ylabel("Features") g.tick_params(labelsize=12) g.set_title(title + ' - {} classifiers'.format(len(clfs))) return pd.Series(fis[indices], featNames[indices]) def linear_FI_plot(fi, featNames, featCount): # show_FI_plot(model.feature_importances_, featNames, featCount) fig, ax = plt.subplots(figsize=(6, 5)) indices = np.argsort(np.absolute(fi))[::-1]#[:featCount] g = sns.barplot(y=featNames[indices][:featCount], x = fi[indices][:featCount] , orient='h' ) g.set_xlabel("Relative importance") g.set_ylabel("Features") g.tick_params(labelsize=12) g.set_title( " feature importance") return pd.Series(fi[indices], featNames[indices]) for level in sorted(clf_set.keys()): X = getSubsample(0.0001, level, 0.1)[0] print("Level {}:".format(level)) for idx, q in enumerate(LEVEL_QUANTILES[level]): f = avg_FI([[q_clfs[idx] for clfs in clf_set[level] for q_clfs in clfs]], X.columns, 25, title = "Level {} \u03BC={} Feature Importances".format(level, q)) print(); print() ramCheck() print('Total Time Elapsed: ', (datetime.datetime.now() - start).seconds, 's')Predictdef avg(arr, axis = 0): return np.median(arr, axis = axis) def predictSet(X, y, groups, scalers, clf_set): start_time = datetime.datetime.now(); group_list = [*dict.fromkeys(groups)] group_list.sort() # print(group_list) y_unscaled = y * scalers.scaler all_preds = []; ys=[]; gs = []; xs = []; scaler_stack = [] if SINGLE_FOLD: group_list = group_list[-1:] for group_idx, group in enumerate(group_list): g = gc.collect() x_holdout = X[groups == group] y_holdout = y_unscaled[groups == group] scalers_holdout = scalers[groups == group] groups_holdout = groups[groups == group] preds = np.zeros( (len(QUANTILES), len(y_holdout)), dtype=np.half) for q_idx, quantile in enumerate(QUANTILES): q_preds = np.zeros( ( len(clf_set), len(y_holdout) ) ) for bag_idx, clf in enumerate(clf_set): x_clean = x_holdout.drop(columns = [c for c in x_holdout.columns if c=='d' or c=='series']) if group_idx >= len(clf_set[bag_idx]): # if out of sample year, blend all years qs_preds = np.zeros( (group_idx, len(x_clean)) ) for gidx in range(group_idx): qs_preds[gidx, :] = clf_set[bag_idx][gidx][q_idx].predict(x_clean) q_preds[bag_idx, :] = np.mean(qs_preds, axis = 0) else: q_preds[bag_idx, :] = clf_set[bag_idx][group_idx][q_idx].predict(x_clean) q_preds = avg(q_preds) * scalers_holdout.scaler preds[q_idx, :] = q_preds # print(u"{} \u03BC={:.3f}: {:.4f}".format(group, quantile, quantile_loss(y_holdout, q_preds, quantile) ) ) all_preds.append(preds) xs.append(x_holdout) ys.append(y_holdout) gs.append(groups_holdout) scaler_stack.append(scalers_holdout) print() y_pred = np.hstack(all_preds) scaler_stack = pd.concat(scaler_stack) y_true = pd.concat(ys) groups = pd.concat(gs) X = pd.concat(xs) end_time = datetime.datetime.now(); print("Bag Prediction Time: {}".format(str(end_time - start_time).split('.', 2)[0] )) return y_pred, y_true, groups, scaler_stack, X def predictOOS(X, scalers, clf_set, QUANTILES, validation = False): start_time = datetime.datetime.now(); group_list = [1 + i for i in range(0, len(clf_set[0]))] if validation: group_list = np.zeros(len(clf_set[0])) group_list[-1] = 1 divisor = sum(group_list) print(np.round([g / divisor for g in group_list], 3)); print() x_holdout = X scalers_holdout = scalers preds = np.zeros( (len(clf_set[0][0]), len(x_holdout)), dtype=np.float32) for q_idx in range( len(clf_set[0][0])): # loop over quantiles print(u'Predicting for \u03BC={}'.format( QUANTILES[q_idx]) ) q_preds = np.zeros( ( len(clf_set), len(x_holdout) ), dtype = np.float32 ) for bag_idx, clf in enumerate(clf_set): x_clean = x_holdout # .drop(columns = [c for c in x_holdout.columns if c=='d' or c=='series']) qs_preds = np.zeros( (len(group_list), len(x_clean)), dtype = np.float32 ) if SINGLE_FOLD: group_list = group_list[-1:] for gidx in range(len(group_list)): if group_list[gidx] > 0: qs_preds[gidx, :] = clf_set[bag_idx][gidx][q_idx].predict(x_clean) * group_list[gidx] / divisor q_preds[bag_idx, :] = np.sum(qs_preds, axis = 0) q_preds = np.mean(q_preds, axis = 0) * scalers_holdout.scaler preds[q_idx, :] = q_preds end_time = datetime.datetime.now(); print("Bag Prediction Time: {}".format(str(end_time - start_time).split('.', 2)[0] )) return preds def wspl(true, pred, weights, trailing_vol, quantile = 0.5): loss = weights * np.where(true >= pred, quantile*(true-pred), (1-quantile)*(pred - true) ) / trailing_vol return np.mean(loss) / np.mean(weights)Random Sample ScoringVALIDATION = -1 RSEED = 11 # number of samples for each data point; N_REPEATS = 20 #if LE <15 else 10 # clf_set qls = {}; all_predictions = {} for level in sorted(set(clf_set.keys()) & set(levels)): print("\n\n\nLevel {}\n\n\n".format(level)) QUANTILES = LEVEL_QUANTILES[level] SS_FRAC, SCALE_RANGE = P_DICT[level] # if level < 12 else ID_FILTER]; SS_FRAC = SS_FRAC * SS_SS EVAL_FRAC = SS_FRAC * (1 if level < 11 else 1/2) EVAL_PWR = 0.6 SCALE_RANGE_TEST = SCALE_RANGE np.random.seed(RSEED) X, y, groups, scalers = getSubsample(EVAL_FRAC * level_os[level] ** EVAL_PWR, level, SCALE_RANGE_TEST, n_repeats = N_REPEATS if level < 15 else N_REPEATS//2, drops=False) if len(X) == 0: print("No Data for Level {}".format(level)) continue; y_pred, y_true, groups, scaler_stack, X = predictSet(X, y, groups, scalers, clf_set[level]); # assert (y_true == y.values * scalers.trailing_vol).all() predictions = pd.DataFrame(y_pred.T, index=y_true.index, columns = QUANTILES) predictions['y_true'] = y_true.values predictions = pd.concat((predictions, scaler_stack), axis = 'columns') predictions['group'] = groups.values predictions['series'] = X.series predictions['d'] = X.d predictions['days_fwd'] = X.days_fwd losses = pd.DataFrame(index=QUANTILES) for group in groups.unique(): subpred = predictions[predictions.group == group] q_losses = [] for quantile in QUANTILES: q_losses.append((quantile, wspl(subpred.y_true, subpred[quantile], 1, subpred.trailing_vol, quantile))) losses[group] = np.round(pd.DataFrame(q_losses).set_index(0)[1], 4).values qls[level] = [losses] ramCheck() # now combine them predictions = predictions.groupby(['series', 'd', 'days_fwd']).agg( dict([(col, 'mean') for col in predictions.columns if col not in ['series', 'd', 'days_fwd']]\ + [('days_fwd', 'count')]) )\ .rename(columns = {'days_fwd': 'ct'}).reset_index() predictions.head() predictions.sort_values('ct', ascending = False).head(5) print(len(predictions)) all_predictions[level] = predictions for level in sorted(all_predictions.keys()): predictions = all_predictions[level] losses = pd.DataFrame(index=LEVEL_QUANTILES[level]) for group in groups.unique(): subpred = predictions[predictions.group == group] q_losses = [] for quantile in QUANTILES: q_losses.append((quantile, wspl(subpred.y_true, subpred[quantile], subpred.ct, subpred.trailing_vol, quantile))) losses[group] = np.round(pd.DataFrame(q_losses).set_index(0)[1], 4).values qls[level] = [losses] print("\n\n\nLevel {} Year-by-Year OOS Losses for Evaluation Bag {}:".format(level, 1)) print(losses); #print(); print() # print(BAGS) # print(SS_FRAC) # print(X.shape); #del X # print(SCALE_RANGE_TEST) # print(N_REPEATS) # all_predictions[1][all_predictions[1].d == 1912].drop(columns = ['series', 'd', 'group', 'ct'])\ # .set_index('days_fwd').plot() # X.dayofweek for level in sorted(all_predictions.keys()): # print("\nLevel {}:".format(level)) predictions = all_predictions[level] predictions['future_d'] = predictions.d + predictions.days_fwd for quantile in QUANTILES: true = predictions.y_true pred = predictions[quantile] trailing_vol= predictions.trailing_vol predictions['loss_{}'.format(quantile)] = \ np.where(true >= pred, quantile*(true-pred), (1-quantile)*(pred - true) ) / trailing_vol predictions['loss'] = predictions[[c for c in predictions.columns if 'loss_' in str(c)]].sum(axis = 1) predictions['wtg_loss'] = predictions.loss * predictions.ct / predictions.ct.mean() # predictions.groupby('series').loss.sum() # predictions.groupby('series').wtg_loss.sum() # predictions.groupby('series').wtg_loss.sum().sum() # predictions.groupby(['series', 'd']).wtg_loss.sum().reset_index().pivot('d', 'series', values='wtg_loss').plot() # predictions.groupby(['series', 'd']).wtg_loss.sum().reset_index().pivot('d', 'series', values='wtg_loss')\ # .ewm(span = 7).mean().plot(); # (predictions.groupby(['series', 'future_d']).wtg_loss.sum().reset_index()\ # .pivot('future_d', 'series', values='wtg_loss').ewm(span = 7).mean() \ # ).plot(); # predictions.groupby(['series', 'future_d']).wtg_loss.sum().sort_values(ascending = False) #.ewm(span = 7).mean() \ # ).plot(); # predictions.groupby(['series', 'future_d']).wtg_loss.sum().sum() # predictions[(predictions.series == 0) & (predictions.days_fwd < 7 )].groupby('future_d').mean()\ # [[c for c in predictions.columns if '.' in str(c) and 'loss' not in str(c)]]\ # .loc[1550:1700].plot(linewidth = 0.4) # train_flipped.iloc[:, 1].reset_index(drop=True).loc[1550:1700].plot( linewidth = 1); # train_flipped.iloc[active_days, 1].iloc[1000:].plot(); print('Total Time Elapsed: ', (datetime.datetime.now() - start).seconds, 's') ramCheck() # memCheck()Make SubmissionMEM_CAPACITY = 3e6 MAX_RUNS = 2500 * (1/10 if SPEED or SUPER_SPEED else 1) MIN_RUNS = 20 * (1/20 if SPEED or SUPER_SPEED else 1) all_predictions = {} for level in sorted(list(set(levels.unique()) & set(clf_set.keys()))): print('\n\nCreating Out-of-Sample Predictions for Level {}\n'.format(level)) final_base = FINAL_BASE assert (final_base in ['d_1941', 'd_1913']) if final_base == 'd_1941': suffix = 'evaluation' elif final_base == 'd_1913': suffix = 'validation' print(' predicting 28 days forward from {}'.format(final_base)) final_features = series_features[( series_features.d.map(cal_index_to_day) == final_base) & (series_features.series.map(series_id_level) == level) ] print(' for {} series'.format(len(final_features))) SS_FRAC, SCALE_RANGE = P_DICT[level] # if level < 12 else ID_FILTER]; SS_FRAC = SS_FRAC * 0.8 print(' scale range of {}'.format(SCALE_RANGE)) if level <= 9 or SPEED: X = [] for df in range(0,28): Xi = final_features.copy() Xi['days_fwd'] = df + 1 X.append(Xi) X = pd.concat(X, ignore_index = True); del Xi; del final_features; Xn = np.power(X.weights, 2) Xn = (Xn * MEM_CAPACITY / Xn.sum()).clip(MIN_RUNS, MAX_RUNS) Xn = (Xn * MEM_CAPACITY / Xn.sum()).clip(MIN_RUNS, MAX_RUNS) print(' average repeats: {:.0f}'.format(Xn.mean())) print(' median repeats: {:.0f}'.format(Xn.median())) print(' max repeats: {:.0f}'.format(Xn.max())) X = X.loc[np.repeat(Xn.index, Xn)] X, y, groups, scalers = getXYG(X, scale_range = SCALE_RANGE, oos = True) Xd = X.d; Xseries = X.series X.drop(columns=['d', 'series'], inplace = True) print(X.shape) y_pred = predictOOS(X, scalers, clf_set[level], LEVEL_QUANTILES[level], suffix == 'validation'); print() predictions = pd.DataFrame(y_pred.T, index=X.index, columns = LEVEL_QUANTILES[level]) predictions = pd.concat((predictions, scalers), axis = 'columns') predictions['series'] = Xseries predictions['d'] = Xd predictions['days_fwd'] = X.days_fwd.astype(np.int8) predictions['y_true'] = y * scalers.scaler # break; ramCheck() predictions = predictions.groupby(['series', 'd', 'days_fwd']).agg( dict([(col, 'mean') for col in predictions.columns if col not in ['series', 'd', 'days_fwd']]\ + [('days_fwd', 'count')]) )\ .rename(columns = {'days_fwd': 'ct'}).reset_index() predictions.days_fwd = predictions.days_fwd.astype(np.int8) else: # levels 10, 11, 12 predictions_full = [] for df in range(0,28): print( '\n Predicting {} days forward from {}'.format(df + 1, final_base)) X = final_features.copy() X['days_fwd'] = df + 1 Xn = np.power(X.weights, 1.5) Xn = (Xn * MEM_CAPACITY / Xn.sum()).clip(MIN_RUNS, MAX_RUNS) Xn = (Xn * MEM_CAPACITY / Xn.sum()).clip(MIN_RUNS, MAX_RUNS) print(' average repeats: {:.0f}'.format(Xn.mean())) print(' median repeats: {:.0f}'.format(Xn.median())) print(' max repeats: {:.0f}'.format(Xn.max())) X = X.loc[np.repeat(Xn.index, Xn)] X, y, groups, scalers = getXYG(X, scale_range = SCALE_RANGE, oos = True) Xd = X.d; Xseries = X.series X.drop(columns=['d', 'series'], inplace = True) print(X.shape) y_pred = predictOOS(X, scalers, clf_set[level], LEVEL_QUANTILES[level], suffix == 'validation'); print() predictions = pd.DataFrame(y_pred.T, index=X.index, columns = LEVEL_QUANTILES[level]) predictions = pd.concat((predictions, scalers), axis = 'columns') predictions['series'] = Xseries predictions['d'] = Xd predictions['days_fwd'] = X.days_fwd.astype(np.int8) predictions['y_true'] = y * scalers.scaler ramCheck() predictions = predictions.groupby(['series', 'd', 'days_fwd']).agg( dict([(col, 'mean') for col in predictions.columns if col not in ['series', 'd', 'days_fwd']]\ + [('days_fwd', 'count')]) )\ .rename(columns = {'days_fwd': 'ct'}).reset_index() predictions.days_fwd = predictions.days_fwd.astype(np.int8) predictions_full.append(predictions) predictions = pd.concat(predictions_full); del predictions_full all_predictions[level] = predictions; del predictions with open('all_predictions_raw.pkl', 'wb') as handle: pickle.dump(all_predictions, handle, protocol=pickle.HIGHEST_PROTOCOL) # all_predictions = pickle.load(open('../input/m5-submissions/all_predictions_valid_19.pkl', 'rb')) losses = pd.DataFrame(index=LEVEL_QUANTILES[levels.min()]) for level in sorted(all_predictions.keys()): predictions = all_predictions[level] subpred = predictions q_losses = [] for quantile in LEVEL_QUANTILES[level]: q_losses.append((quantile, wspl(subpred.y_true, subpred[quantile], subpred.weights, subpred.trailing_vol, quantile))) # print(np.round(pd.DataFrame(q_losses).set_index(0)[1], 4).values) losses[level] = np.round(pd.DataFrame(q_losses).set_index(0)[1], 4).values # print("\n\n\nLevel {} Year-by-Year OOS Losses for Evaluation Bag {}:".format(level, 1)) print(losses); print(); print() print(losses.mean()) print(losses.mean().mean())Level Harmonizera = pd.DataFrame(index = range(1, 29)) for level in sorted(all_predictions.keys()): if level > 9: continue; a[level] = all_predictions[level].groupby('days_fwd')[0.5].sum() / level_multiplier[level] try: a.plot() except: pass; # all_predictions[level][quantile] # all_predictions[level][quantile] * all_predictions[level].days_fwd.map(a.mean(axis=1) / a[level] ) ADJUSTMENT_FACTOR = 1 if SPEED or SUPER_SPEED else 0.7 # probably better as 1.0, but used 0.7 to be safe; for level in sorted(all_predictions.keys()): if level > 9: continue; for quantile in LEVEL_QUANTILES[level]: all_predictions[level][quantile] = all_predictions[level][quantile] \ * ( (1 - ADJUSTMENT_FACTOR) + ADJUSTMENT_FACTOR * all_predictions[level].days_fwd.map( a.mean(axis=1) / a[level] ) ) a = pd.DataFrame(index = range(1, 29)) for level in sorted(all_predictions.keys()): if level > 9: continue; a[level] = all_predictions[level].groupby('days_fwd')[0.5].sum() / level_multiplier[level] try: a.plot() except: pass; losses = pd.DataFrame(index=LEVEL_QUANTILES[level]) for level in sorted(all_predictions.keys()): predictions = all_predictions[level] subpred = predictions q_losses = [] for quantile in LEVEL_QUANTILES[level]: q_losses.append((quantile, wspl(subpred.y_true, subpred[quantile], subpred.weights, subpred.trailing_vol, quantile))) # print(np.round(pd.DataFrame(q_losses).set_index(0)[1], 4).values) losses[level] = np.round(pd.DataFrame(q_losses).set_index(0)[1], 4).values # print("\n\n\nLevel {} Year-by-Year OOS Losses for Evaluation Bag {}:".format(level, 1)) print(losses); print(); print() print(losses.mean()) print(losses.mean().mean()) if suffix == 'validation': losses = pd.DataFrame(index=LEVEL_QUANTILES[level]) for level in sorted(all_predictions.keys()): predictions = all_predictions[level] subpred = predictions q_losses = [] for quantile in LEVEL_QUANTILES[level]: q_losses.append((quantile, wspl(subpred.y_true, subpred[quantile], subpred.weights, subpred.trailing_vol, quantile))) losses[level] = np.round(pd.DataFrame(q_losses).set_index(0)[1], 4).values # print("\n\n\nLevel {} Year-by-Year OOS Losses for Evaluation Bag {}:".format(level, 1)) print(losses); print(); print() print(losses.mean()) if suffix == 'validation': losses.plot() for level in sorted(all_predictions.keys()): predictions = all_predictions[level] (predictions.groupby('days_fwd')[0.5].sum() / level_multiplier[level]).plot(legend = True, label = level, linewidth = 0.5) if suffix=='validation': ( predictions.groupby('days_fwd').y_true.sum() / level_multiplier[level]) .plot(linewidth = 1.5) train_flipped.shapeGraphs# (series_features[( series_features.d.map(cal_index_to_day) == final_base) & # (series_features.series.map(series_id_level) == level) ]\ # .sort_values('weights', ascending = False).reset_index().weights.astype(np.float32) ** 1.5).cumsum().plot() for level in sorted(all_predictions.keys()): predictions = all_predictions[level] if level <= 9: series_list = predictions.series.unique()[:5] else: series_list = series_features[( series_features.d.map(cal_index_to_day) == final_base) & (series_features.series.map(series_id_level) == level) ]\ .sort_values('weights', ascending = False).series.to_list()\ [:len(predictions.series.unique())//20 : len(predictions.series.unique()) // 500] for series in series_list: DAYS_BACK = 60 if suffix == 'evaluation': prior = train_flipped.iloc[-DAYS_BACK:, series] prior.index = range(-DAYS_BACK + 1, 1 ) else: prior = train_flipped.iloc[-DAYS_BACK:, series] prior.index = range(-DAYS_BACK + 28 + 1, 28 + 1 ) f = prior.plot( linewidth = 1.5); f = predictions[predictions.series == series].set_index('days_fwd')\ [[c for c in predictions.columns if c in LEVEL_QUANTILES[level]]].plot( title = ("Level {} - {}".format(level, series_id_to_series[series]) + ("" if level <=9 else " - weight of {:.2%}".format( predictions[predictions.series == series].weights.mean() ))) , linewidth = 0.5, ax = f); f = plt.figure(); # break; output_rows = [] for level in sorted(all_predictions.keys()): predictions = all_predictions[level] df = predictions[ ['series', 'days_fwd'] + list(LEVEL_QUANTILES[level])].copy() df.series = df.series.map(series_id_to_series) df = df.melt(['series', 'days_fwd'], var_name = 'q' ) df.value = df.value / level_multiplier[level] df['name'] = df.series + '_' + df.q.apply(lambda x: '{0:.3f}'.format(x)) + '_' + suffix # df.days_fwd = 'F' + df.days_fwd.astype(str) for q in df.q.unique(): qdf = df[df.q==q].pivot('name', 'days_fwd', 'value') qdf.columns = ['F{}'.format(c) for c in qdf.columns] qdf.index.name = 'id' output_rows.append(qdf) output = pd.concat(output_rows) output.tail() sample_sub.head() assert len(set(output.index) - set(sample_sub.id)) == 0 assert len(set(sample_sub.id) & set(output.index)) == len(output) output_file = ('submission_{}_lvl_{}.csv'.format(suffix, LEVEL) if MAX_LEVEL == None else 'submission_{}_lt_{}.csv'.format(suffix, MAX_LEVEL)) output.round(3).to_csv(output_file) print(len(output) ) output print('Total Time Elapsed: ', (datetime.datetime.now() - start).seconds, 's')import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import nltk nltk.download('stopwords') from nltk.corpus import stopwords import string from sklearn.feature_extraction.text import CountVectorizer from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix, classification_report from sklearn.metrics import accuracy_score from sklearn.neural_network import MLPClassifier %matplotlib inline %config InlineBackend.figure_format='retina' yelp_df = pd.read_csv('https://raw.githubusercontent.com/rabin1323/Data_Set/main/yelp_review_csv1.csv') yelp_df**EXPLORING DATASET**yelp_df.info() yelp_df.describe() #to check if we have any null values sns.heatmap(yelp_df.isnull(), yticklabels=False, cbar= False, cmap="Blues") #if there is any dot on the plot that means we have null values yelp_df=yelp_df.drop(['useful','funny','cool','review_id','user_id','business_id','date'], axis=1) yelp_df #data cleaning def preprocess(review_text): remove_pctn=[char for char in review_text if char not in string.punctuation] remove_pctn= ''.join(remove_pctn) lwr = [word.lower() for word in remove_pctn.split()] final_word = [word for word in lwr if word not in stopwords.words('english')] return final_wordFor the sentiment analsysis we want to consider only two types of stars here i.e, one star for negative reviews and fives stars for positive reviews.We will also use count vectorizer to make a model which will be used to understand the review text. After that we will transform the vectorized text and assign to variable x. Lastly, we will split the entire data to train and test model using train_test_split()#Filtering Data filtered_data = yelp_df[(yelp_df['stars']==1) | (yelp_df['stars']==5)] x = filtered_data['text'] #assigning review text to variable x y=filtered_data['stars'] #assigning stars to variable y vectorizer=CountVectorizer(analyzer=preprocess).fit(x) x=vectorizer.transform(x) #transforming the vectorized text X_train, X_test, y_train, y_test= train_test_split(x, y, random_state=42) sns.countplot(filtered_data['stars']) model= MLPClassifier() model.fit(X_train, y_train) y_predict = model.predict(X_test) #plotting the reviews using confusion matrix def conf_matrix(y, y_predict, reviews, title= 'Confusion_Matrix'): c_matrix = confusion_matrix(y, y_predict) clsfn_report = classification_report(y, y_predict) ticks = np.arange(len(reviews)) score = accuracy_score(y, y_predict) score=round(score*100,2) print("Accuracy_score:", score) print('classification_report', clsfn_report) sns.heatmap(c_matrix, cmap= 'PuBu', annot= True, fmt='g', annot_kws={'size':20}) plt.xticks(ticks, reviews) plt.yticks(ticks, reviews) plt.xlabel('predicted', fontsize=20) plt.ylabel('actual', fontsize=20) plt.title(title, fontsize=20) plt.show conf_matrix(y_test, y_predict, reviews=['negative(1)', 'positive(5'])Accuracy_score: 93.86 classification_report precision recall f1-score support 1 0.94 0.76 0.84 234 5 0.94 0.99 0.96 873 accuracy 0.94 1107 macro avg 0.94 0.87 0.90 1107 weighted avg 0.94 0.94 0.94 1107Movie Recommender system![alt text](https://github.com/lerekoqholosha/unsupervised-predict-streamlit-template/raw/master/resources/imgs/Image_header.png) Table of contents [1. Introduction](introduction)1.1. Project overview1.2. Problem statement [2. Start Comet experiment](scomet) [3. Package and module imports](pack) [4. Load data](load) [5. Description of the data](dod) [6. Data pre-processing](cleaning) [7. Exploratory Data Analysis](eda) [8. Feature engineering](pp) [9. Modelling](mod) [10. Model evaluation](me) [11. Conclusion](conc) [12. End Comet experiment](ecomet) > 1. Introduction 1.1. Project overviewMachine Learning (ML) is a subset of Artificial Intelligence (AI), dating as far back as 1959, where computers are trained to pick up patterns and make decisions with little to no human interference. There are two main types, supervised and unsupervised learning. Supervised ML algorithms are far more flexible as the datasets used do not provide label values, the computer uses trends in the data to compute results. They can be used to build recommender systems.A recommender system is an engine or platform that can predict certain choices based on responses provided by users. A great example would be a system on a streaming platform that recommends a movie or show for a user to watch found on their previous viewings or the viewings of other users that have watching habits similar to theirs. With the increasing use of web services such as Netflix, Showmax and YouTube amongst many, there is an unfathomable amount of content available. It would be a tedious task for a user to search through it all for things that they would enjoy watching. They are also used in other services such as online shoppping stores and networking spaces like LinkedIn.A recommender system enhances a user's experience as the luxury of recommendations will save the user the time and effort of having to search through a large catalogue. This allows for the user to also be exposed to new content, creating an opportunity for further streaming because they interact with content that is meaningful and desireable to them. In fact, most companies make a bulk of their revenue from recommendations. The rating functionality also assists in collecting data that can help the streaming platform establish trends and gather insights from what their users are consuming. This can assist in better content selection and marketing. 1.2. Problem statementBuild a recommendation algorithm that will use a user's historical preferences to accurately predict the rating that they will give a movie that they have not watched. 2. Start Comet Experiment Comet is a powerful version control, cloud-based platform that is essential for project management. It allows teams to save all their models and associated (hyper)parameters used during a project. All of the models are logged as Comet experiments for sakekeeping, reproducibility and comparison.# Install Comet #!pip install comet_ml # Import Experiment Class #from comet_ml import Experiment # Start experiment #experiment = Experiment(api_key="", #project_name="unsupervised-predict", workspace="lerekoqholosha") # Set a new experiment name for each run #experiment.set_name('xxxx')3. Package and module importsimport numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import datetime import scipy as sp from sklearn.metrics.pairwise import cosine_similarity from sklearn.feature_extraction.text import TfidfVectorizer import operator import heapq from surprise import Dataset from surprise import Reader from surprise import accuracy from surprise.model_selection import train_test_split from surprise import SVD, BaselineOnly, CoClustering, NMF import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))/kaggle/input/edsa-recommender-system-predict/movies.csv /kaggle/input/edsa-recommender-system-predict/imdb_data.csv /kaggle/input/edsa-recommender-system-predict/genome_scores.csv /kaggle/input/edsa-recommender-system-predict/sample_submission.csv /kaggle/input/edsa-recommender-system-predict/tags.csv /kaggle/input/edsa-recommender-system-predict/test.csv /kaggle/input/edsa-recommender-system-predict/links.csv /kaggle/input/edsa-recommender-system-predict/genome_tags.csv /kaggle/input/edsa-recommender-system-predict/train.csv4. Load datagen_scores = pd.read_csv('../input/edsa-recommender-system-predict/genome_scores.csv') gen_tgs = pd.read_csv('../input/edsa-recommender-system-predict/genome_tags.csv') imd = pd.read_csv('../input/edsa-recommender-system-predict/imdb_data.csv') links = pd.read_csv('../input/edsa-recommender-system-predict/links.csv') mvs = pd.read_csv('../input/edsa-recommender-system-predict/movies.csv') ss = pd.read_csv('../input/edsa-recommender-system-predict/sample_submission.csv') tgs = pd.read_csv('../input/edsa-recommender-system-predict/tags.csv') test = pd.read_csv('../input/edsa-recommender-system-predict/test.csv') train = pd.read_csv('../input/edsa-recommender-system-predict/train.csv')5. Description of the data The dataset is derived form an online movie recommendation service called MovieLens. It consists of millions of 5-star ratings that are provided by the MovieLens' users. Additional data has been scraped from IMDB and added with the purpose to enhance the dataset. The supplied dataset comprises the following:1. genome_scores.csv - A score mapping the strength between movies and tag-related properties2. genome_tags.csv - User assigned tags for genome-related scores3. imdb_data.csv - Additional movie metadata scraped from IMDB using the links.csv file4. links.csv - File providing a mapping between a MovieLens ID and associated IMDB and TMDB IDs5. sample_submission.csv - Sample of the submission format for the hackathon6. tags.csv - User assigned for the movies within the dataset7. test.csv - The test split of the dataset. Contains user and movie IDs with no rating data8. train.csv - The training split of the dataset. Contains user and movie IDs with associated rating data 6. Data pre-processing Data preprocessing is the process of transforming raw data into a format that makes it useful for analysis and modelling. Techniques are used to identify errors and modify or remove and parts of the data in order to improve its overall quality.The first five rows of the all the dataframes will be displayed and also their information probed to assess the contents of the tables.ss.head()The expected output for the exercise is a dataframe with two columns, Id and the predicted rating.train.head() train.info() links.head() links.info() mvs.head() mvs.info() tgs.head() tgs.info() gen_scores.head() gen_scores.info() gen_tgs.head() gen_tgs.info() imd.head() imd.info() RangeIndex: 27278 entries, 0 to 27277 Data columns (total 6 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 movieId 27278 non-null int64 1 title_cast 17210 non-null object 2 director 17404 non-null object 3 runtime 15189 non-null float64 4 budget 7906 non-null object 5 plot_keywords 16200 non-null object dtypes: float64(1), int64(1), object(4) memory usage: 1.2+ MBNull values Null values are common in datasets. They represent countless things that could have gone wrong during the collection of data or the information simply does not exist. It is imperative to check for null values and deal with them because they can cause complications whilst modelling.train.isnull().sum() links.isnull().sum() tgs.isnull().sum() gen_scores.isnull().sum() gen_tgs.isnull().sum() imd.isnull().sum()There is a noticeable number of null values in each dataframe. Each will be handled on a case basis depending on how the dataframe will be utilised. The null values can either be removed or imputed. The type of imputation will be dictated by the data type of the feature and it's perceived relevance. Merge datasets The train, movies and imdb dataframes will be merged as it is easier to work with one dataframe.combined_df = pd.merge(train, mvs, how = 'left',on = 'movieId') combined_df = pd.merge(combined_df,imd, how = 'left', on = 'movieId') combined_df.head()Concatenated data and special charactersThe genres in the genre column are concatenated using a pipe. In order to maximise the value of this information, they must be separatedcombined_df['genres'] = combined_df['genres'].str.split('|').str.join(',') combined_df.head()Datetime formatting The timestamp column is currently represented in a unix time format. The timestamp is changed from a unix timestamp to a readable and interpretable forma which will use in our EDA to see the variation in the ratings of the movie as the years progress.combined_df['timestamp'] = combined_df['timestamp'].apply(lambda x: datetime.datetime.fromtimestamp(x)) timestamp = combined_df['timestamp'].dt.year combined_df['rating_year'] = timestamp combined_df.drop('timestamp', axis = 1, inplace = True) combined_df.head()7. Exploratory Data Analysis Exploratory Data Analysis (EDA) is great to carry out because it allows for the analysis of the data to extract any information that may be useful in modelling or pick up any trends that the models can miss. It is also useed to summarise the main characteristics of the data in order to make any insghts and recommendations.train1 = train.copy() train1.drop('timestamp', axis = 1, inplace = True) ratings_per_user = train1.groupby('userId')['rating'].count().reset_index().sort_values('rating', ascending=False) ratings_per_user.head(10)The distribution of the number of ratings would be a great visualisation in order to see how the movies are ranked.sns.countplot(x='rating', data=train) plt.xlabel('Rating') plt.ylabel('Count') plt.ticklabel_format(style='plain', axis='y') plt.show()The distribution of the ratings indicate that most movies have a rating of atleast 3 and a rating of 4 is the most popular. The mean rating is around 3.5. This means that most of the movies in the dataset have a fairly good score. It also means that the ratings are skewed to the right which is proven by the mean and median calcualtion below. Further, it can inferred that based on the movies which are rated below 3, 15.4% of the movies in the dataset are "bad" movies.print(train['rating'].mean()) print(train['rating'].median())3.5333951730983424 3.5The movies can be ranked according to their average ratings.ranked_movies = pd.DataFrame(combined_df.groupby('title')['rating'].mean(). sort_values(ascending=False)) ranked_movies.head()This table is not an accurate reflection of the best movies as some of the movies have only have a few ratings. This can influence its average rating. For example, movie A will have only one rating of 5 thus will have an average rating of 5. Contrastingly, movie B which has an average rating of 4.5 may have a 100 ratings. One could easily think that movie A is better than movie B but its not true as based on the number of ratings movie B is actually better. Therefore, it would be better to determine the best movie best on both the number of ratings it has and also its average rating as shown below.ranked_movies['No_of_ratings'] = combined_df.groupby('title')['rating'].count() ranked_movies.sort_values(by=['No_of_ratings', 'rating'], ascending=False).head()As indicated by the table,the best movies are those that have both average high ratings and also a high number of ratings as they have been rated highly by large quantities of people. The average rating decreases with the decreasing number of ratings, i.e. they are positively correlated.sns.scatterplot(x='rating', y='No_of_ratings', data=ranked_movies) plt.xlabel('Rating') plt.ylabel('Number of ratings') plt.show()As expected, the higher the average ratings, the higher the number of ratings the movie has.best_director = pd.DataFrame(combined_df.groupby('director')['rating'].mean(). sort_values(ascending=False)) best_director['No_of_movies'] = combined_df.groupby('director')['rating'].count() best_director.sort_values(by=['No_of_movies', 'rating'], ascending=False).head()The directors can also be ranked in terms of the average rating that their movies have achieved and also the number of movies they have played a role in producing. It can be said that a director has built a reputation for him/herself.sns.scatterplot(x = 'rating', y = 'No_of_movies', data = best_director) plt.xlabel('Rating') plt.ylabel('Number of ratings') plt.show()As expected, most directors with a high number of movies have average to good movie ratings. It would be great to determine if the ratings are affected by the year at which the ratings were made i.e. do the ratings decrease as the movie gets older? With changes in technology, audio and visuals are constantly improving. Due to this, people might give an older moviewith a low score as years progress as they would be comparing it with the current movies with better graphics.yearly_rating = pd.DataFrame(combined_df.groupby(['title','rating_year'])['rating'].mean()) yearly_rating.reset_index(inplace = True) count_ratings = pd.DataFrame(yearly_rating.groupby('title')['rating_year'].count().sort_values(ascending = False).head(10)) count_ratings.reset_index(inplace = True) sns.lineplot(x="rating_year", y="rating", hue="title", data=yearly_rating[yearly_rating['title']. isin(count_ratings['title'])]).set_title('Change in movie ratings per year') plt.xlabel('Year') plt.ylabel('Rating') plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)From the plot above, there is a slight drop from 1995 to 2020 in annual moving ratings howver it is not significant, at all. It seems as if the ratings given to a movie each year are not hugely affected by the year that the rating was given (there is no pattern in the ratings). From this, it can be concluded that the year of the rating has no significance in predicting the ratings of the movies. Movie duration can also be an important feature to consider when making a rating or when recommending a movie to someone as there might be viewers who do not like watching long movies or vice versa.sns.violinplot(x = 'runtime', data = combined_df,).set_title('Distribution of Movie Duration') plt.show() print('runtime mean: ', combined_df['runtime'].mean()) print('runtime standard deviation: ', combined_df['runtime'].std())Most of the runtimes are centered around the mean as shown by the violin plot above and the variation between the length of the movies is quite small, as shown by the standard deviation. There are also a few outliers. Also, realistically speaking, runtime is not really important in determining if a person will watch a movie because if they really do not like watching long movies, they can simply watch it in intervals. Each movie is asscoiated with a set of genres.list_list = list(combined_df['genres'].str.split('|')) genres_list = [] for i in list_list: for j in i: genres_list.append(j) print(genres_list[0:11]) print(len(genres_list)) genres_list = pd.DataFrame(genres_list) genres_list.rename(columns = {0:'genres'}, inplace = True) genres_counted = pd.DataFrame(genres_list['genres'].value_counts()) genres_counted.reset_index(inplace = True) genres_counted.rename(columns = {'index':'genres','genres':'count'}, inplace = True) genres_counted.head(50)The table shows the number of times that a genre appears through all the movies in the dataset. There are 10 651 movies which do not have a genre provided, as seen in row 19. Rows 14 also indicates that IMAX is considered a genre.sns.set(style="darkgrid") sns.barplot(x='count', y=genres_counted['genres'].head(20), data=genres_counted, palette='Set1').set_title('Distribution of movie genres') plt.xlabel('Count') plt.ylabel('Genre') plt.ticklabel_format(style='plain', axis='x') plt.show()Drama is the most common genre throughout the movies. Genres are important as they show the preferences of viewers and based on the genre that a viewer likes, movies of the same genre can be recommended.Movies in the dataset are associated with tags. The relevance of these tags is also provided.genome_df = pd.merge(gen_scores,gen_tgs, how = 'left',on = 'tagId') genome_df.head()The relevance values range from 0 or near 0, to 1. A small value (closer to 0) indicates that a tag is irrelevant/less relevant to the movie and vice versa. The most important thing to do is to determine the most relevant tags and how many of them can be potentially used tin a good recommender system.genre_tag_df = genome_df.merge(mvs, on = 'movieId', how = 'outer') genre_tag_df.head() genre_tag_df.dropna(axis = 0, inplace = True)##dropping movies with null tags genre_tag_df['tag_rank'] = genre_tag_df.groupby("movieId")["relevance"].rank(method = "first", ascending = False).astype('int64') genre_tag_df[genre_tag_df.title == 'We Were Soldiers'][['movieId','title','tag','relevance', 'tag_rank']].sort_values(by = 'relevance' , ascending = False).head(10)The tags have been ranked per movie in descending order, with the most relevant at the top. The relevance score seems quiet accurate as they all describe what the movie entails, is based on and the message it delivered across. The next step is to determine the number of tags which can give sufficient information required to accurately predict movie rating that user will give and accurately recommend movies to the user.genome_rank_agg = genre_tag_df.groupby('tag_rank')['relevance'].median().reset_index(name = 'relevance_median').head(100) ##calculating the percentage change in the median of the ranks. we will for a point where the change in the median becomes small. genome_rank_agg['relevance_median_%_chg'] = genome_rank_agg['relevance_median'].pct_change() sns.lineplot(x="tag_rank", y="relevance_median_%_chg", data = genome_rank_agg).set_title('Change in relevant scores') plt.xlabel('Tag rank') plt.ylabel('Relevance (change in median %)') plt.show()The relevant scores stabalize at around rank 60, therefore the top 60 tags ranked according the their relevance score. The above figure was adopted from https://towardsdatascience.com/how-to-build-a-simple-movie-recommender-system-with-tags-b9ab5cb3b616. However, a collaborative based approach will be undertaken therefore these tags will not be used. 8. Feature Engineering A colaborative recommender system approach will be undertaken for this project. The ratings of the unseen movies will be predicted using a library called Suprise. Surprise (Simple Python RecommendatIon System Engine) is a SciPy Toolkit that uses explicit rating data to create and evaluate colaborative recommender systems.Feature engineering is the process of using data techniques to select, extract and transform features from the data in order to accurately represent the underlying structure in a form that is easily interpreted by algorithms. It is done to improve the performance of ML algorithms. Surprise requires a reader object to be defined in order for it to be able to parse through the dataframe. The rating parameter has to be specified and the data must be loaded in a raw format. The dataframe must have three columns, corresponding to the 'user ids', the 'item ids' and the ratings in this order.# Load reader and data in surprise friendly format reader = Reader(rating_scale=(0.5, 5)) train2 = Dataset.load_from_df(train1[['userId', 'movieId', 'rating']], reader)9. Modelling Surprise contains a vast catalogue of algorithms. For this exercise, the Baseline Only, Singular Value Decomposition (SVD) and CoClustering algorithms were explored. Root Mean Square Error (RMSE) was chosen as the performance metric. It measures the average magnitude of the error in the model by computing the differences between the predicted values and the actual values.The Baseline Only algorithm is a basic algorithm that predicts the baseline estimate for the given users and items. The algorithm is very simple and does not do any 'heavy' work but it is still useful for comparing accuracies with other models.# Baseline Only model algo_b = BaselineOnly() trainset, testset = train_test_split(train2, test_size = 0.1) # train and test algorithm. algo_b.fit(trainset) predictions_b = algo_b.test(testset) # Compute and print Root Mean Squared Error accuracy.rmse(predictions_b, verbose=True)SVD is a matrix factorization-based filtering algorithm that is equivalent to Probabilistic Matrix Factorization.# SVD model algo_svd = SVD() trainset, testset = train_test_split(train2, test_size = 0.1) # train and test algorithm. algo_svd.fit(trainset) predictions_svd = algo_svd.test(testset) # Compute and print Root Mean Squared Error accuracy.rmse(predictions_svd, verbose=True)Co-clustering is another matrix factorization-based filtering algorithm built on the principle of co-clustering. It produces row and column clusters through simultaneously grouping objects and features in a matrix.# CoClustering model algo_cc = CoClustering() trainset, testset = train_test_split(train2, test_size = 0.1) # train and test algorithm. algo_cc.fit(trainset) predictions_cc = algo_cc.test(testset) # Compute and print Root Mean Squared Error accuracy.rmse(predictions_cc, verbose=True)NMF is a matrix factorization-based filtering algorithm based on Non-negative Matrix Factorization. It is similar to SVD.algo_nmf = NMF() trainset, testset = train_test_split(train2, test_size = 0.1) # train and test algorithm. algo_nmf.fit(trainset) predictions_nmf = algo_nmf.test(testset) # Compute and print Root Mean Squared Error accuracy.rmse(predictions_nmf, verbose=True)10. Model evaluation To evaluate the performances of the selected algorithms, they can be visualised.fig,axis = plt.subplots(figsize=(10, 6)) x1 = ['Baseline Only', 'SVD', 'Co-Clustering', 'NMF'] y1 = [0.8659, 0.8266, 0.8954 ,0.8791] g = sns.barplot(x= x1, y= y1) plt.title('RMSE Values',fontsize=15) plt.ylabel('RMSE') plt.xticks() #for p in g.patches: #g.text(p.get_x() + p.get_width()/2., p.get_height(), '%d' % int(p.get_height()), fontsize=12, ha='center', va='bottom') plt.show()Discussion of graph. These models were built on the default parameters of the algorithms and can be optimised through hyperparameter tuning. This will only be done for the SVD model as it is the best performing model.Hyperparameter tuning is a method used to improve the performances of models by selecting a set of hyperparameters to tune. Hyperparameters are parameters whose values can be controlled during the process of training a model. From previous work studied and knowledge of how the SVD model works, the hyperparameters n_factors, n_epochs, lr_all and reg_all were chosen for tuning.algo_svd_hyp = SVD(n_factors= 150, n_epochs=75, lr_all=0.01, reg_all= 0.1) trainset, testset = train_test_split(train1, test_size = 0.1) algo_svd_hyp.fit(trainset) predictions_hyp = algo_svd_hyp.test(testset) accuracy.rmse(predictions_hyp)The default parameters for the SVD algorithm are very close to optimal. Hyperparameter tuning has improved the RMSE from 0.8266 to 0.8202, making a 0.77% difference. This hypertuned model will be used to make predictions on the unseen data.# Predict ratings on test data using best model SVD ratings = [] for index, row in test.iterrows(): ratings.append(algo_svd_hyp.predict(row.userId, row.movieId)[3]) #print(rat) df = pd.DataFrame(ratings,columns=['rating']) results = pd.DataFrame({"user":test['userId'],"rating": df['rating']}) ss = ss.copy() ss['rating'] = results['rating'] ss.to_csv("predictions", index=False) # Dictionary of data to be logged # params = {"n_factors": 150, #"n_epochs":"75", #"lr_all":"0.01", #"reg_all":"0.1"} # metric = {"RMSE": xxxx} #Log parameters and results # experiment.log_paramaters(params) # experiemnt.log_metrics(metrics)11. Conclusion 12. End Comet experiment#experiment.end() #experiment.display()Support Vector Machine (SVM)(Maximal margin classifiers)Support Vector Machines (SVM) separates classes of data by maximizing the "space" (margin) between pairs of these groups. Classification for multiple classes is then supported by a one-vs-all method (just like we previously did for Logistic Regression for Multi-class classification). Introduction to Support Vector MachinesA Support Vector Machine (SVM) is a discriminative classifier formally defined by a separating hyperplane. In other words, given labeled training data (supervised learning), the algorithm outputs an optimal hyperplane which categorizes new examples.In which sense is the hyperplane obtained optimal? Let’s consider the following simple problem:We'll start by imagining a situation in which we want to seperate a training set with two classes. We have two classes in our set, blue and red. We plot them out in the feature space and we try to place a green line that seperates both classes.from IPython.display import Image Image(url="http://docs.opencv.org/2.4/_images/separating-lines.png")In the above picture you can see that there exists multiple lines that offer a solution to the problem. Is any of them better than the others? We can intuitively define a criterion to estimate the worth of the lines:A line is bad if it passes too close to the points because it will be noise sensitive and it will not generalize correctly. Therefore, our goal should be to find the line passing as far as possible from all points.Then, the operation of the SVM algorithm is based on finding the hyperplane that gives the largest minimum distance to the training examples. Twice, this distance receives the important name of margin within SVM’s theory. Therefore, the optimal separating hyperplane maximizes the margin of the training data.Image(url="http://docs.opencv.org/2.4/_images/optimal-hyperplane.png")In machine learning, support vector machines (SVMs) are supervised learning models with associated learning algorithms that analyze data and recognize patterns, used for classification and regression analysis. Given a set of training examples, each marked for belonging to one of two categories, an SVM training algorithm builds a model that assigns new examples into one category or the other, making it a non-probabilistic binary linear classifier. An SVM model is a representation of the examples as points in space, mapped so that the examples of the separate categories are divided by a clear gap that is as wide as possible. New examples are then mapped into that same space and predicted to belong to a category based on which side of the gap they fall on.The advantages of support vector machines are:* Effective in high dimensional spaces.* Still effective in cases where number of dimensions is greater than the number of samples.* Uses a subset of training points in the decision function (called support vectors), so it is also memory efficient.* Versatile: different Kernel functions can be specified for the decision function. Common kernels are provided, but it is also possible to specify custom kernels.The disadvantages of support vector machines include:* If the number of features is much greater than the number of samples, the method is likely to give poor performances.* SVMs do not directly provide probability estimates, these are calculated using an expensive five-fold cross-validation (see Scores and probabilities, below). So how do we actually mathematically compute that optimal hyperplane? A full explanation can be found in [Wikipedia](http://en.wikipedia.org/wiki/Support_vector_machine) SVM with Sci-Kit LearnNow we are ready to jump into some Python code and Sci-Kit Learn, we'll start with some basic imports and we will import Sci Kit Learn along the way while we use it.#Imports import numpy as np import matplotlib.pyplot as plt %matplotlib inlineFirst we'll start by importing the Data set we are already very familiar with the Iris Data Set from last lecture:from sklearn import datasets # load the iris datasets iris = datasets.load_iris() # Grab features (X) and the Target (Y) X = iris.data Y = iris.target # Show the Built-in Data Description print iris.DESCRIris Plants Database Notes ----- Data Set Characteristics: :Number of Instances: 150 (50 in each of three classes) :Number of Attributes: 4 numeric, predictive attributes and the class :Attribute Information: - sepal length in cm - sepal width in cm - petal length in cm - petal width in cm - class: - Iris-Setosa - Iris-Versicolour - Iris-Virginica :Summary Statistics: ============== ==== ==== ======= ===== ==================== Min Max Mean SD Class Correlation ============== ==== ==== ======= ===== ==================== sepal length: 4.3 7.9 5.84 0.83 0.7826 sepal width: 2.0 4.4 3.05 0.43 -0.4194 petal length: 1.0 6.9 3.76 1.76 0.9490 (high!) petal width: 0.1 2.5 1.20 0.76 0.9565 (high!) ============== ==== ==== ======= ===== ==================== :Missing Attribute Values: None [...]Now we will import the [SVC](http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html) (Support Vector Classification) from the [SVM library of Sci Kit Learn](http://scikit-learn.org/stable/modules/svm.html), I encourage you to check out the other types of SVM options in the Sci Kit Learn Documentation!# Support Vector Machine Imports from sklearn.svm import SVC # Fit a SVM model to the data model = SVC()Now we will split the data into a training set and a testing set and then train our model.from sklearn.cross_validation import train_test_split # Split the data into Trainging and Testing sets X_train, X_test, Y_train, Y_test = train_test_split(X, Y) # Fit the model model.fit(X_train,Y_train)Now we'll go ahead and see how well our model did!from sklearn import metrics # Get predictions predicted = model.predict(X_test) expected = Y_test # Compare results print metrics.accuracy_score(expected,predicted)1.0Looks like we have achieved a 100% accuracy with Support Vector Classification!Now that we've gone through a basic implementation of SVM lets go ahead and quickly explore the various kernel types we can use for classification. We can do this by plotting out the boundaries created by each kernel type! We'll start with some imports and by setting up the data.If we want to do non-linear classification we can employ the [kernel trick](http://en.wikipedia.org/wiki/Kernel_method). Using the kernel trick we can "slice" the feature space with a Hyperplane. For a quick illustraion of what this looks like, check out both the image and the video below!# Kernel Trick for the Feature Space from IPython.display import Image url='http://i.imgur.com/WuxyO.png' Image(url) # Kernel Trick Visualization from IPython.display import YouTubeVideo YouTubeVideo('3liCbRZPrZA')The four methods we will explore are two linear models, a Gaussian [Radial Basis Function](http://en.wikipedia.org/wiki/Radial_basis_function),and a SVC with a polynomial (3rd Degree) kernel.The linear models LinearSVC() and SVC(kernel='linear') yield slightly different decision boundaries. This can be a consequence of the following differences:* LinearSVC minimizes the squared hinge loss while SVC minimizes the regular hinge loss.* LinearSVC uses the One-vs-All (also known as One-vs-Rest) multiclass reduction while SVC uses the One-vs-One multiclass reduction.# Import all SVM from sklearn import svm # We'll use all the data and not bother with a split between training and testing. We'll also only use two features. X = iris.data[:,:2] Y = iris.target # SVM regularization parameter C = 1.0 # SVC with a Linear Kernel (our original example) svc = svm.SVC(kernel='linear', C=C).fit(X, Y) # Gaussian Radial Bassis Function rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, Y) # SVC with 3rd degree poynomial poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, Y) # SVC Linear lin_svc = svm.LinearSVC(C=C).fit(X,Y)Now that we have fitted the four models, we will go ahead and begin the process of setting up the visual plots. Note: This example is taken from the Sci-Kit Learn Documentation.First we define a mesh to plot in. We define the max and min of the plot for the y and x axis by the smallest and larget features in the data set. We can use numpy's built in [mesh grid](http://docs.scipy.org/doc/numpy/reference/generated/numpy.meshgrid.html) method to construct our plot.# Set the step size h = 0.02 # X axis min and max x_min=X[:, 0].min() - 1 x_max =X[:, 0].max() + 1 # Y axis min and max y_min = X[:, 1].min() - 1 y_max = X[:, 1].max() + 1 # Finally, numpy can create a meshgrid xx, yy = np.meshgrid(np.arange(x_min, x_max, h),np.arange(y_min, y_max, h))Now the plot titles# title for the plots titles = ['SVC with linear kernel', 'LinearSVC (linear kernel)', 'SVC with RBF kernel', 'SVC with polynomial (degree 3) kernel']Finally we will go through each model, set its position as a subplot, then scatter the data points and draw a countour of the decision boundaries.# Use enumerate for a count for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)): # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. plt.figure(figsize=(15,15)) # Set the subplot position (Size = 2 by 2, position deifined by i count plt.subplot(2, 2, i + 1) # SUbplot spacing plt.subplots_adjust(wspace=0.4, hspace=0.4) # Define Z as the prediction, not the use of ravel to format the arrays Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) # Contour plot (filled with contourf) plt.contourf(xx, yy, Z, cmap=plt.cm.terrain, alpha=0.5) # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Dark2) # Labels and Titles plt.xlabel('Sepal length') plt.ylabel('Sepal width') plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.xticks(()) plt.yticks(()) plt.title(titles[i]) plt.show()**Setting Up**%matplotlib inline import matplotlib.pyplot as plt import numpy as np import pandas as pd import os from glob import glob import seaborn as sns from PIL import Image np.random.seed(123) from sklearn.preprocessing import label_binarize from sklearn.metrics import confusion_matrix import itertools import keras from keras.utils.np_utils import to_categorical # used for converting labels to one-hot-encoding from keras.models import Sequential from keras.layers import Activation, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, MaxPool2D from keras import backend as K import itertools from keras.layers.normalization import BatchNormalization #from keras.utils.np_utils import to_categorical # convert to one-hot-encoding from keras.optimizers import Adam from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import ReduceLROnPlateau from sklearn.model_selection import train_test_split from sklearn.utils import shuffle import seaborn from keras.utils.data_utils import Sequence from imblearn.over_sampling import RandomOverSampler from sklearn.utils import resample import cv2 skin_df = pd.read_pickle('DATA.pkl') skin_df.head()**Balancing the Dataset**df_0 = skin_df[skin_df['ReactBin'] == 0] df_1 = skin_df[skin_df['ReactBin'] == 1] print('There are',df_0.shape[0],'patches with NO REACTION') print('there are',df_1.shape[0],'patches with REACTION') df_0_shuffled = shuffle(df_0, random_state=123) df_1_shuffled = shuffle(df_1, random_state=123) print('df_0 and df_1 are now shuffled') df_0_train_orig, df_0_test = train_test_split(df_0_shuffled, test_size=0.2, random_state = 123) df_0_train, df_0_validate = train_test_split(df_0_train_orig, test_size=0.25, random_state = 123) df_1_train_orig, df_1_test = train_test_split(df_1_shuffled, test_size=0.2, random_state = 123) df_1_train, df_1_validate = train_test_split(df_1_train_orig, test_size=0.25, random_state = 123) print('df_0_train has',df_0_train.shape[0],'patches') print('df_0_validate has',df_0_validate.shape[0],'patches') print('df_0_test has',df_0_test.shape[0],'patches') print('In total, df_0 has',df_0_train.shape[0]+df_0_validate.shape[0]+df_0_test.shape[0], 'negative patches') print('\n') print('df_1_train has',df_1_train.shape[0],'patches') print('df_1_validate has',df_1_validate.shape[0],'patches') print('df_1_test has',df_1_test.shape[0],'patches') print('In total, df_1 has',df_1_train.shape[0]+df_1_validate.shape[0]+df_1_test.shape[0], 'positive patches') df_1_train_resampled = resample(df_1_train, replace=True, n_samples=df_0_train.shape[0], random_state=123) df_1_validate_resampled = resample(df_1_validate, replace=True, n_samples=df_0_validate.shape[0], random_state=123) df_1_test_resampled = resample(df_1_test, replace=True, n_samples=df_0_test.shape[0], random_state=123) print('We have now resampled the positive patch data to equal that of the negative patch data') print('df_1_train_resampled has',df_1_train_resampled.shape[0],'positive patches') print('df_1_validate_resampled has',df_1_validate_resampled.shape[0],'positive patches') print('df_1_test_resampled has',df_1_test_resampled.shape[0],'positive patches') df_train_balanced = pd.concat([df_0_train, df_1_train_resampled]) df_validate_balanced = pd.concat([df_0_validate, df_1_validate_resampled]) df_test_balanced = pd.concat([df_0_test, df_1_test]) print('We have now concatenated the train, validate, and test datasets!')We have now concatenated the train, validate, and test datasets!**Normalizing Images**datagen = ImageDataGenerator( featurewise_center=False, # set input mean to 0 over the dataset samplewise_center=False, # set each sample mean to 0 featurewise_std_normalization=False, # divide inputs by std of the dataset samplewise_std_normalization=False, # divide each input by its std zca_whitening=False, # apply ZCA whitening rotation_range=10, # randomly rotate images in the range (degrees, 0 to 180) zoom_range = 0.1, # Randomly zoom image width_shift_range=0.2, # randomly shift images horizontally (fraction of total width) height_shift_range=0.2, # randomly shift images vertically (fraction of total height) horizontal_flip=True, # randomly flip images vertical_flip=True) # randomly flip images X_train =df_train_balanced.drop(columns=['ReactBin'],axis=1) X_validate =df_validate_balanced.drop(columns=['ReactBin'],axis=1) X_test =df_test_balanced.drop(columns=['ReactBin'],axis=1) y_train =df_train_balanced['ReactBin'] y_validate =df_validate_balanced['ReactBin'] y_test =df_test_balanced['ReactBin'] X_train = np.asarray(X_train['image'].tolist()) X_validate = np.asarray(X_validate['image'].tolist()) X_test = np.asarray(X_test['image'].tolist()) X_train = X_train/255 X_validate = X_validate/255 X_test = X_test/255 X_train = X_train.reshape(X_train.shape[0], *(100, 100, 3)) X_test = X_test.reshape(X_test.shape[0], *(100, 100, 3)) X_validate = X_validate.reshape(X_validate.shape[0], *(100, 100, 3)) datagen.fit(X_train) datagen.fit(X_validate) datagen.fit(X_test)**Building the Model**input_shape = (100, 100, 3) num_classes = 2 model = Sequential() model.add(Conv2D(32, (3, 3), input_shape=input_shape)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(32, (3, 3), kernel_initializer = 'he_uniform')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, (3, 3), kernel_initializer = 'he_uniform')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(64)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(1)) model.add(Activation('sigmoid')) model.summary() optimizer = Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False) model.compile(optimizer = optimizer , loss = "binary_crossentropy", metrics=["binary_accuracy"]) epochs = 200 batch_size = 64 history = model.fit_generator(datagen.flow(X_train,y_train, batch_size=batch_size), epochs = epochs, validation_data = (X_validate,y_validate), verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size ) # Uncomment when you want to save! # model.save('saved_model.h5')**Visualizing the Results**model = keras.models.load_model('/content/drive/My Drive/Patch_NN/TestPatch_20210525_1.h5') def plot_model_history(model_history): fig, axs = plt.subplots(1,2,figsize=(15,5)) # summarize history for accuracy axs[0].plot(range(1,len(model_history.history['binary_accuracy'])+1),model_history.history['binary_accuracy']) axs[0].plot(range(1,len(model_history.history['val_binary_accuracy'])+1),model_history.history['val_binary_accuracy']) axs[0].set_title('Model Accuracy') axs[0].set_ylabel('Accuracy') axs[0].set_xlabel('Epoch') axs[0].set_xticks(np.arange(1,len(model_history.history['binary_accuracy'])+1),len(model_history.history['binary_accuracy'])/10) axs[0].legend(['train', 'val'], loc='best') # summarize history for loss axs[1].plot(range(1,len(model_history.history['loss'])+1),model_history.history['loss']) axs[1].plot(range(1,len(model_history.history['val_loss'])+1),model_history.history['val_loss']) axs[1].set_title('Model Loss') axs[1].set_ylabel('Loss') axs[1].set_xlabel('Epoch') axs[1].set_xticks(np.arange(1,len(model_history.history['loss'])+1),len(model_history.history['loss'])/10) axs[1].legend(['train', 'val'], loc='best') plt.show() plot_model_history(history) # Run the model on the test dataset model.evaluate(X_test, y_test) def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[i, j], horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') mythreshold=0.089938596 from sklearn.metrics import confusion_matrix y_pred = (model.predict(X_test)>= mythreshold).astype(int) cm=confusion_matrix(y_test, y_pred) plot_confusion_matrix(cm, classes = range(2)) from sklearn.metrics import roc_curve y_preds = model.predict(X_test).ravel() fpr, tpr, thresholds = roc_curve(y_test, y_preds) plt.figure(1) plt.plot([0, 1], [0, 1], 'y--') plt.plot(fpr, tpr, marker='.') plt.xlabel('False positive rate') plt.ylabel('True positive rate') plt.title('ROC curve') plt.show() import pandas as pd i = np.arange(len(tpr)) roc = pd.DataFrame({'tf' : pd.Series(tpr-(1-fpr), index=i), 'thresholds' : pd.Series(thresholds, index=i)}) ideal_roc_thresh = roc.iloc[(roc.tf-0).abs().argsort()[:1]] #Locate the point where the value is close to 0 print("Ideal threshold is: ", ideal_roc_thresh['thresholds']) from sklearn.metrics import auc auc_value = auc(fpr, tpr) print("Area under curve, AUC = ", auc_value) def Find_Optimal_Cutoff(target, predicted): fpr, tpr, threshold = roc_curve(target, predicted) i = np.arange(len(tpr)) roc = pd.DataFrame({'tf' : pd.Series(tpr-(1-fpr), index=i), 'threshold' : pd.Series(threshold, index=i)}) roc_t = roc.iloc[(roc.tf-0).abs().argsort()[:1]] return list(roc_t['threshold']) threshold = Find_Optimal_Cutoff(y_test, y_preds) print(threshold) from sklearn.metrics import precision_score from sklearn.metrics import accuracy_score from sklearn.metrics import recall_score Accuracy = accuracy_score(y_test,y_pred) Precision = precision_score(y_test,y_pred) Recall = recall_score(y_test,y_pred) print('Accuracy:' + str(Accuracy)) print('Precision (PPV):' + str(Precision)) print('Recall (Sensitivity):' + str(Recall)) print('') print('') from sklearn.metrics import classification_report print('Classification report') print('') print(classification_report(y_test,y_pred)) num= 1 #Select the index of image to be loaded for testing img = X_test[num] plt.imshow(img) input_img = np.expand_dims(img, axis=0) #Expand dims so the input is (num images, x, y, c) print("The prediction for this image is: ", model.predict(input_img)) print("This was labeled as: ",(model.predict(input_img)>= mythreshold).astype(int)) print("The actual label for this image is: ", np.array(y_test)[num])$$entropy=-\sum_{i=1}^NP_i\cdot\log_2P_i$$def entropy(arr): length = sum(arr) probs = [i / length for i in arr] entropy = sum([-i * log(i) for i in probs]) return entropy entropy([8, 0.1e-100]) x = np.linspace(0.1e-10, 1 - 0.1e-10, 100) data = - x * np.log(x) - (1 - x) * np.log(1 - x) plt.plot(x, data, "g-") mask = np.argmax(data) plt.plot(x[mask], data[mask], "*r") plt.grid() plt.ylabel("entropy") plt.xlabel("proba") plt.title("entropy in binary case") plt.tight_layout()$$gini = 1 - \sum_{i=1}^N\cdot P_i^2$$def gini(arr): length = sum(arr) probs = [i / length for i in arr] gini = 1 - sum([i ** 2 for i in probs]) return gini gini([19, 0, 0]) x = np.linspace(0.1e-10, 1 - 0.1e-10, 100) data = 1 - ((1 - x) ** 2 + x ** 2) mask = np.argmax(data) plt.plot(x, data, "g-") plt.plot(x[mask], data[mask], "*r") plt.grid() plt.ylabel("gini") plt.xlabel("proba") plt.title("gini in binary case") plt.tight_layout() data = [ {"level":"Senior", "lang":"Java", "Instagram":"Yes", "status":"True" }, {"level":"Senior", "lang":"Java", "Instagram":"No", "status":"False"}, {"level":"Mid", "lang":"Python", "Instagram":"No", "status":"True" }, {"level":"Junior", "lang":"Python", "Instagram":"No", "status":"False"}, {"level":"Junior", "lang":"R", "Instagram":"No", "status":"False"}, {"level":"Junior", "lang":"R", "Instagram":"Yes", "status":"True" }, {"level":"Mid", "lang":"R", "Instagram":"Yes", "status":"True" }, {"level":"Senior", "lang":"Python", "Instagram":"No", "status":"False"}, {"level":"Senior", "lang":"R", "Instagram":"No", "status":"False"}, {"level":"Junior", "lang":"Python", "Instagram":"Yes", "status":"False"}, {"level":"Senior", "lang":"Python", "Instagram":"Yes", "status":"True" }, {"level":"Mid", "lang":"Python", "Instagram":"No", "status":"False"}, ] df = pd.DataFrame(data) df # split by Instagram df.sort_values("Instagram") yes_true = len(df[(df["Instagram"] == "Yes") & (df["status"] == "True")]) yes_false = len(df[(df["Instagram"] == "Yes") & (df["status"] == "False")]) no_true = len(df[(df["Instagram"] == "No") & (df["status"] == "True")]) no_false = len(df[(df["Instagram"] == "No") & (df["status"] == "False")]) print(f"yes_true = {yes_true}, yes_false = {yes_false}, no_true = {no_true}, no_false = {no_false}") print(f"entropy/yes = {entropy([yes_true, yes_false])}, entropy/no = {entropy([no_true, no_false])}") print(f"gini/yes = {gini([yes_true, yes_false])}, gini/no = {gini([no_true, no_false])}") total_entropy = entropy([yes_true, yes_false]) * (yes_true + yes_false) / (yes_true + yes_false + no_true + no_false) total_entropy += entropy([no_true, no_false]) * (no_true + no_false) / (yes_true + yes_false + no_true + no_false) print(f"total entropy = {total_entropy}") # split by lang df.sort_values("lang") python_true = len(df[(df["lang"] == "Python") & (df["status"] == "True")]) python_false = len(df[(df["lang"] == "Python") & (df["status"] == "False")]) r_true = len(df[(df["lang"] == "R") & (df["status"] == "True")]) r_false = len(df[(df["lang"] == "R") & (df["status"] == "False")]) java_true = len(df[(df["lang"] == "Java") & (df["status"] == "True")]) java_false = len(df[(df["lang"] == "Java") & (df["status"] == "False")]) print(f"python_true = {python_true}, python_false = {python_false}, r_true = {r_true}, r_false = {r_false}, java_true = {java_true}, java_false = {java_false}") print(f"entropy/python = {entropy([python_true, python_false])}, entropy/r = {entropy([r_true, r_false])}, entropy/java = {entropy([java_true, java_false])}") print(f"gini/python = {gini([python_true, python_false])}, gini/r = {gini([r_true, r_false])}, gini/java = {gini([java_true, java_false])}") total_entropy = entropy([python_true, python_false]) * (python_true + python_false) / (python_true + python_false + r_true + r_false + java_true + java_false) total_entropy += entropy([r_true, r_false]) * (r_true + r_false) / (python_true + python_false + r_true + r_false + java_true + java_false) total_entropy += entropy([java_true, java_false]) * (java_true + java_false) / (python_true + python_false + r_true + r_false + java_true + java_false) print(f"total entropy = {total_entropy}") # split by level df.sort_values("level") junior_true = len(df[(df["level"] == "Junior") & (df["status"] == "True")]) junior_false = len(df[(df["level"] == "Junior") & (df["status"] == "False")]) mid_true = len(df[(df["level"] == "Mid") & (df["status"] == "True")]) mid_false = len(df[(df["level"] == "Mid") & (df["status"] == "False")]) senior_true = len(df[(df["level"] == "Senior") & (df["status"] == "True")]) senior_false = len(df[(df["level"] == "Senior") & (df["status"] == "False")]) print(f"junior_true = {junior_true}, junior_false = {junior_false}, mid_true = {mid_true}, mid_false = {mid_false}, senior_true = {senior_true}, senior_false = {senior_false}") print(f"entropy/junior = {entropy([junior_true, junior_false])}, entropy/mid = {entropy([mid_true, mid_false])}, entropy/senior = {entropy([senior_true, senior_false])}") print(f"gini/junior = {gini([junior_true, junior_false])}, gini/mid = {gini([mid_true, mid_false])}, gini/senior = {gini([senior_true, senior_false])}") total_entropy = entropy([junior_true, junior_false]) * (junior_true + junior_false) / (junior_true + junior_false + mid_true + mid_false + senior_true + senior_false) total_entropy += entropy([mid_true, mid_false]) * (mid_true + mid_false) / (junior_true + junior_false + mid_true + mid_false + senior_true + senior_false) total_entropy += entropy([senior_true, senior_false]) * (senior_true + senior_false) / (junior_true + junior_false + mid_true + mid_false + senior_true + senior_false) print(f"total entropy = {total_entropy}") import pydotplus from sklearn.tree import DecisionTreeClassifier, export_graphviz from sklearn.datasets import load_iris from IPython.display import Image iris = load_iris() data = iris.data target = iris.target model = DecisionTreeClassifier(criterion="entropy") model.fit(data, target); dot_data = export_graphviz(model, feature_names=iris.feature_names, class_names=iris.target_names) graph = pydotplus.graph_from_dot_data(dot_data) Image(graph.create_png()) # https://graphviz.gitlab.io/_pages/Download/Download_windows.html # then add this in the path enviroment variable df['Instagram'] = df['Instagram'].replace(["Yes", "No"], [1, 0]) df["lang"] = df["lang"].replace(["Java", "Python", "R"], [1, 2, 3]) df["level"] = df["level"].replace(["Senior", "Mid", "Junior"], [3, 2, 1]) # HW !! df["status"] = df["status"].replace(["True", "False"], [1, 0]) df model = DecisionTreeClassifier() model.fit(df.iloc[:, :3], df.iloc[:,3]); dot_data = export_graphviz(model, feature_names=df.columns[:-1], class_names=["reject", "accept"]) graph = pydotplus.graph_from_dot_data(dot_data) Image(graph.create_png()) plt.bar(range(4), model.feature_importances_, tick_label=iris.feature_names) plt.grid() plt.tight_layout()---from sklearn.tree import DecisionTreeClassifier from sklearn.datasets import load_digits from sklearn.model_selection import train_test_split digit = load_digits() data = digit.data target = digit.target x_train, x_test, y_train, y_test = train_test_split(data, target) model = DecisionTreeClassifier(max_depth=7).fit(x_train, y_train) model.score(x_test, y_test) from sklearn.ensemble import RandomForestClassifier model = RandomForestClassifier(n_estimators=5).fit(load_iris().data, load_iris().target) # model.score(x_test, y_test) plt.bar(range(4), model.feature_importances_, tick_label=iris.feature_names) plt.grid() plt.tight_layout()---from sklearn.preprocessing import PolynomialFeatures from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split from sklearn.datasets import load_digits data = load_digits() features = data.data target = data.target x_train, x_test, y_train, y_test = train_test_split(features, target) poly_x_train = PolynomialFeatures().fit_transform(x_train) model1 = RandomForestClassifier().fit(x_train, y_train) model2 = RandomForestClassifier().fit(poly_x_train, y_train) print(model1.score(x_test, y_test)) poly_x_test = PolynomialFeatures().fit_transform(x_test) print(model2.score(poly_x_test, y_test)) from sklearn.model_selection import cross_val_score cross_val_score(RandomForestClassifier(n_estimators=40, max_depth=31), features, target, cv=5).mean() x_train.shape poly_x_train.shape---from sklearn.preprocessing import StandardScaler, MinMaxScaler data = np.random.randint(0, 5, 10).reshape((-1,1)) ss = StandardScaler().fit(data) print(ss.mean_) print(ss.scale_) ss.transform(data) data from sklearn.model_selection import GridSearchCV grid = GridSearchCV(estimator=RandomForestClassifier(), verbose=1, param_grid={"n_estimators":range(5, 50, 5), "max_depth":range(5, 50, 2)}) data = PolynomialFeatures().fit_transform(features) grid.fit(data, target) grid.best_params_ grid.best_score_ 1000 200 200 200 200 200---from sklearn.datasets import make_classification from sklearn.ensemble import RandomForestClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.preprocessing import PolynomialFeatures data, label = make_classification(n_samples=300, n_features=10, n_informative=2, n_classes=2) model = RandomForestClassifier() param = { "n_estimators":range(5, 50, 5), "max_depth":range(5, 50, 2) } grid = GridSearchCV(estimator=model, param_grid=param) grid.fit(data, label); data4bar = grid.best_estimator_.feature_importances_ plt.bar(range(len(data4bar)), data4bar) plt.grid() plt.tight_layout() grid.best_params_ grid.best_score_---extended_data = PolynomialFeatures().fit()Project descriptionThe current project aims to predict the genre of movie given the overview text that describes the movie. For example, the overview for *The Matrix* is as follows:>Set in the 22nd century, The Matrix tells the story of a computer hacker who joins a group of underground insurgents fighting the vast and powerful computers who now rule the earth.From the above text, we would like to predict that the movie belongs to the "Action" and "Science Fiction" genres. Business object in contextWe are an internet-based movie distributing company, _NetFlux_. For new movies and original content movies, we want to make sure our staff writes overviews that will represent the correct genre of the movie. This will make our recommender system work better and ultimately provide more insight for our users to what movies they want to see.from IPython.display import Markdown as md import os from datetime import datetime import pickle movies_with_overviews_path = '../data/processed/movies_with_overviews.pkl' date_refreshed_unix = os.path.getmtime(movies_with_overviews_path) date_refreshed = datetime.utcfromtimestamp(date_refreshed_unix).strftime('%Y-%m-%d %H:%M:%S') now = datetime.now().strftime('%Y-%m-%d %H:%M:%S') with open('../data/processed/movies_with_overviews.pkl','rb') as f: movies_with_overviews=pickle.load(f) with open('../data/processed/Genredict.pkl','rb') as f: Genre_ID_to_name=pickle.load(f) genre_list=sorted(list(Genre_ID_to_name.keys())) num_movies = len(movies_with_overviews) display(md('''# Data Movie overviews and genres are scraped from TMDB. Our dataset was last refreshed at **{date_refreshed}**. Report was generated **{now}**. The data have **{num_movies}** movie overviews. '''.format(date_refreshed=date_refreshed, num_movies=num_movies, now=now)))The distribution of the genres in these movies is shown in the chart below:%matplotlib inline import pandas as pd from matplotlib import pyplot as plt from collections import Counter mwo = pd.DataFrame(movies_with_overviews) genre_ids_series = mwo['genre_ids'] flat_genre_ids = [st for row in genre_ids_series for st in row] flat_genre_names = [Genre_ID_to_name[id] for id in flat_genre_ids] genre_counts = Counter(flat_genre_names) df = pd.DataFrame.from_dict(genre_counts, orient='index') ax = df.plot(kind='bar') ax.set_ylabel('Counts of each genre') ax.legend().set_visible(False)The top 10 movies in our dataset by popularity are listed below:a=[print(x) for x in mwo.sort_values(by='popularity', ascending=False)['original_title'].head(10)]Models and FeaturesWe are currently using the following models to train against the dataset with the associated feature engineering:1. C-SVM - The overviews are using a **bag of words** model and have been vectorized and transformed using **TF_IDF**.2. Naive Bayes - The overviews are using a **bag of words** model and have been vectorized with a **Count Vectorizer**.3. Simple neural network (not deep) - The overviews were tokenized with a **white space tokenizer**. Stop words were removed. Overviews were treated as **bag of words**, which each word being converted to a vector, using the GoogleNews-vectors-negative300.bin model. The **arithmetic mean** of the words represented the overview. Taking the top 3 genres predicted for each movie. C-SVM Metrics for each genrewith open('../models/classifier_svc.pkl','rb') as f: classif=pickle.load(f) with open('../data/processed/X_tfidf.pkl','rb') as f: X=pickle.load(f) with open('../data/processed/Y.pkl','rb') as f: Y=pickle.load(f) from src.utils.eval_metrics import * from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report import numpy as np indecies = range(len(movies_with_overviews)) X_train, X_test, Y_train, Y_test, train_movies, test_movies = train_test_split(X, Y, indecies, test_size=0.20, random_state=42) genre_names=list(Genre_ID_to_name.values()) predstfidf=classif.predict(X_test) print (classification_report(Y_test, predstfidf, target_names=genre_names))Precision and Recall for the overall modelpredictions = generate_predictions(Genre_ID_to_name, X_test, predstfidf) precs, recs = precsc_recs(test_movies, movies_with_overviews, Genre_ID_to_name, predictions) prec_mean = np.mean(np.asarray(precs)) rec_mean = np.mean(np.asarray(recs)) md('''Precision: {prec_mean} Recall: {rec_mean} '''.format(prec_mean=prec_mean, rec_mean=rec_mean))Example predictions for a small samplepredictions=[] actuals = [] for i in range(X_test.shape[0]): pred_genres=[] actual_genres=[] movie_label_scores=predstfidf[i] actual_scores = Y_test[i] # print movie_label_scores for j in range(len(movie_label_scores)): #print j if movie_label_scores[j]!=0: genre=Genre_ID_to_name[genre_list[j]] pred_genres.append(genre) if actual_scores[j]!=0: genre=Genre_ID_to_name[genre_list[j]] actual_genres.append(genre) predictions.append(pred_genres) actuals.append(actual_genres) for i in range(X_test.shape[0]): if i%50==0 and i!=0: print ('MOVIE: ',movies_with_overviews[test_movies[i]]['title'], '\nPREDICTION: ',','.join(predictions[i]), '\nActual: ', ','.join(actuals[i]), '\n')Naive Bayes Metrics for each genrewith open('../models/classifier_nb.pkl','rb') as f: classif=pickle.load(f) with open('../data/processed/X.pkl','rb') as f: X=pickle.load(f) with open('../data/processed/Y.pkl','rb') as f: Y=pickle.load(f) from src.utils.eval_metrics import * from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report import numpy as np indecies = range(len(movies_with_overviews)) X_train, X_test, Y_train, Y_test, train_movies, test_movies = train_test_split(X, Y, indecies, test_size=0.20, random_state=42) genre_names=list(Genre_ID_to_name.values()) preds=classif.predict(X_test) print (classification_report(Y_test, preds, target_names=genre_names))Precision and Recall for the overall modelpredictions = generate_predictions(Genre_ID_to_name, X_test, preds) precs, recs = precsc_recs(test_movies, movies_with_overviews, Genre_ID_to_name, predictions) prec_mean = np.mean(np.asarray(precs)) rec_mean = np.mean(np.asarray(recs)) md('''Precision: {prec_mean} Recall: {rec_mean} '''.format(prec_mean=prec_mean, rec_mean=rec_mean))Example predictions for a small samplepredictions=[] actuals = [] for i in range(X_test.shape[0]): pred_genres=[] actual_genres=[] movie_label_scores=preds[i] actual_scores = Y_test[i] # print movie_label_scores for j in range(len(movie_label_scores)): #print j if movie_label_scores[j]!=0: genre=Genre_ID_to_name[genre_list[j]] pred_genres.append(genre) if actual_scores[j]!=0: genre=Genre_ID_to_name[genre_list[j]] actual_genres.append(genre) predictions.append(pred_genres) actuals.append(actual_genres) for i in range(X_test.shape[0]): if i%50==0 and i!=0: print ('MOVIE: ',movies_with_overviews[test_movies[i]]['title'], '\nPREDICTION: ',','.join(predictions[i]), '\nActual: ', ','.join(actuals[i]), '\n') print(movies_with_overviews[test_movies[100]]) print(Genre_ID_to_name[35]) print(Genre_ID_to_name[10751]) test_movies[100] classif.predict(X_test[100])Simple Neural Network with Word2Vec features Metrics for each genrefrom keras.models import load_model from sklearn.preprocessing import MultiLabelBinarizer with open('../data/processed/textual_features.pkl','rb') as f: (X,Y)=pickle.load(f) model_textual = load_model('../models/overview_nn.h5') indecies = range(len(movies_with_overviews)) X_train, X_test, Y_train, Y_test, train_movies, test_movies = train_test_split(X, Y, indecies, test_size=0.20, random_state=42) genre_names=list(Genre_ID_to_name.values()) Y_preds=model_textual.predict(X_test) Y_preds_binary = [] for row in Y_preds: predicted = np.argsort(row)[::-1][:3] predicted_genre_Y = [1 if k in predicted else 0 for k in range(len(row)) ] Y_preds_binary.append(predicted_genre_Y) print (classification_report(Y_test, np.array(Y_preds_binary), target_names=genre_names))Precision and Recall for the overall modelpredictions = generate_predictions(Genre_ID_to_name, X_test, Y_preds_binary) precs, recs = precsc_recs(test_movies, movies_with_overviews, Genre_ID_to_name, predictions) prec_mean = np.mean(np.asarray(precs)) rec_mean = np.mean(np.asarray(recs)) md('''Precision: {prec_mean} Recall: {rec_mean} '''.format(prec_mean=prec_mean, rec_mean=rec_mean))Example predictions for a small samplefor i in range(X_test.shape[0]): if i%50==0 and i!=0: print ('MOVIE: ',movies_with_overviews[test_movies[i]]['title'], '\nPREDICTION: ',','.join(predictions[i]), '\nActual: ', ','.join(actuals[i]), '\n') from IPython.core.display import HTML def css_styling(): styles = open("../notebooks/static/custom.css", "r").read() return HTML(styles) css_styling()Simple pendulum We consider here the simple pendulum system, with no frictions and with arbitrary inital conditions on the angle at time $t=0$. However, we impose a zero intial velocity. As generalized coordinate we choose the angle between the vertical and the rope. The Lagrangian of the system reads and the corresponding equation of motion, from which we generated the data $\theta(t)$ isimport numpy as np from scipy.integrate import odeint import matplotlib.pyplot as pltWe want to use ode, that means we have to reduce the second order deifferential eqation into two first order differential equations$$\ddot{\theta} = -(g/\ell) \sin\theta $$Let $z = \dot{\theta}$ and then we get$$ \dot{z} = -(g/\ell) \sin\theta $$def simple_pend(theta,t,g,l,m): theta1=theta[0] theta2=theta[1] # first differential equation dtheta1dt=theta2 # second differential equation dtheta2dt=-g/l*np.sin(theta1) dthetadt=[dtheta1dt,dtheta2dt] return dthetadtset initial conditions for the rate of change of the angle $\dot{\theta}$ and the angle $\theta$, values of the lenght of the pendulum and mass. We set the time one-dimensional gridPi=3.1415926; g=1; l=1; m=1; theta_0=[Pi/6,0]; t = np.linspace(0,20,400) theta=odeint(simple_pend,theta_0,t,args=(g,l,m)) plt.plot(t,theta[:,0],'r--') #plt.plot(t,theta[:,1],'b-') plt.xlabel('time') plt.ylabel('theta(t)') fig, ax = plt.subplots() ax.plot(theta[:,0], theta[:, 1]) ax.set_aspect(1)Display the content of theta as a vector [$\theta, \dot{\theta}$]theta1 for toxic 0 for normaltoxic = data[data["Label"]==1] normal = data[data["Label"]==0] len(toxic),len(normal) print("Percentage of normal comments = {}".format(len(normal)/len(data)*100)) print("Percentage of toxic comments = {}".format(len(toxic)/len(data)*100)) sns.set() sns.countplot(data["Label"]) plt.show() data['char_length'] = data['Review'].apply(lambda x: len(str(x))) sns.set() data['char_length'].hist() plt.show() from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(data['Review'], data['Label'], test_size=0.2, random_state=42) print(X_train.shape, X_test.shape) print(y_train.shape, y_test.shape) import pickle from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer vect = TfidfVectorizer(max_features = 10000, stop_words='english') #vect = TfidfVectorizer(stop_words='english') print(vect) pickle.dump(vect, open("./saved_models/vectorizer.pkl", "wb")) %%time X_train_dtm = vect.fit_transform(X_train.apply(lambda x: np.str_(x))) X_train_dtm pickle.dump(vect, open("./saved_models/vectorizer.pkl", "wb")) vect = pickle.load(open("./saved_models/vectorizer.pkl", 'rb')) X_train_dtm.shape %%time X_test_dtm = vect.transform(X_test.apply(lambda x: np.str_(x))) X_test_dtm X_test_dtm.shape from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix logreg = LogisticRegression(C=1, max_iter = 2000) import pickle # train the model using X_train_dtm & y_train logreg.fit(X_train_dtm, y_train) filename = "./saved_models/lr_model.pkl" pickle.dump(logreg, open(filename, 'wb')) # compute the training accuracy y_pred_train = logreg.predict(X_train_dtm) print('Training accuracy is {}'.format(accuracy_score(y_train, y_pred_train))) # compute the predicted probabilities for X_test_dtm y_pred_test = logreg.predict(X_test_dtm) print('Test accuracy is {}'.format(accuracy_score(y_test,y_pred_test))) print(confusion_matrix(y_test,y_pred_test))Training accuracy is 0.9597241715994386 Test accuracy is 0.9542730058853842 [[29304 256] [ 1430 5881]]Testing trained modelimport pickle model = pickle.load(open("./saved_models/lr_model.pkl", 'rb')) vect = pickle.load(open("./saved_models/vectorizer.pkl", 'rb')) sen = ["...uck"] sen_trans = vect.transform(sen) p = model.predict(sen_trans)[0] validity = ["allowed","toxic"] print(validity[p]) sen = ["Hey girl...you are beautiful"] sen_trans = vect.transform(sen) p = model.predict(sen_trans)[0] validity = ["allowed","toxic"] print(validity[p]) sen = ["Hey girl...you are nice but I think you like anal"] sen_trans = vect.transform(sen) p = model.predict(sen_trans)[0] validity = ["allowed","toxic"] print(validity[p]) sen = ["When you want to show your parents a video, and Big Black cock porn from last night is still open"] sen_trans = vect.transform(sen) p = model.predict(sen_trans)[0] validity = ["allowed","toxic"] print(validity[p]) sen = ["you get what you fuckin deserve"] sen_trans = vect.transform(sen) p = model.predict(sen_trans)[0] validity = ["allowed","toxic"] print(validity[p]) sen = ["When you accidently message your teacher 'FRIENDS is my favrite show..You are a fuckin Hoe'"] sen_trans = vect.transform(sen) p = model.predict(sen_trans)[0] validity = ["allowed","toxic"] print(validity[p]) ! pip install -q pandas-ml %matplotlib inline from pandas_ml import ConfusionMatrix confusion_matrix = ConfusionMatrix(y_true, y_pred) confusion_matrix.plot(normalized=True)Random Selectiondataset = read.csv('Ads_CTR_Optimisation.csv') head(dataset) # Maximum clicked Ad column table(dataset['Ad.5'])So here we have Dataset which already has data of ads clicked by our customers. But we want to predict early on which ad shall be most clicked. We want to optimize the click through rates of our customers. Does any ad is actually most liked than all the rest? As its a small dataset we can see that Ad 5 gets most clicked. Now we shall consider that people clicked ads randomly and try to reward the model as it guesses correctly.#Implementing Random Selection N= 1000 d= 10 ads_selected = integer(0) total_reward = 0 for (n in 1:N){ ad = sample(1:d, 1) ads_selected = append(ads_selected, ad) reward = dataset[n, ad] total_reward = total_reward + reward } #Visualising the results hist( ads_selected, col= 'blue', main = 'Histogram of ads selection', xlab = 'ads', ylab = 'no. of times each ad was selected')We can see here from histogram of Ads clicked, It selects column 1. That's not a good model. Upper Confidence Bound Now here instead of guessing randomly. We implement Upper Confidence bound algorithm to go through dataset one row at a time in three steps:* At each round n (one row at a time), we consider two numbers for each Ad i : Number of times the ad i was selected upto round n, sum of rewards of the ad i upto round n* From these two numbers we compute : the average reward of ad i upto round n, upper bound of confidence interval upto round n * We select the Ad i that has max ucb# Implementing UCB N = 10000 d = 10 ads_selected = integer(0) numbers_of_selections = integer(d) sums_of_rewards = integer(d) total_reward = 0 for (n in 1:N) { ad = 0 max_upper_bound = 0 for (i in 1:d) { if (numbers_of_selections[i] > 0) { average_reward = sums_of_rewards[i] / numbers_of_selections[i] delta_i = sqrt(3/2 * log(n) / numbers_of_selections[i]) upper_bound = average_reward + delta_i } else { upper_bound = 1e400 } if (upper_bound > max_upper_bound) { max_upper_bound = upper_bound ad = i } } ads_selected = append(ads_selected, ad) numbers_of_selections[ad] = numbers_of_selections[ad] + 1 reward = dataset[n, ad] sums_of_rewards[ad] = sums_of_rewards[ad] + reward total_reward = total_reward + reward } #Visualising the results hist( ads_selected, col= 'blue', main = 'Histogram of ads selection', xlab = 'ads', ylab = 'no. of times each ad was selected')Characterize sex-bias in FAERS%load_ext autoreload %autoreload 2 import os import numpy as np import pandas as pd import feather from scipy import stats from scipy.sparse import hstack, coo_matrix, save_npz, load_npz from database import Database from utils import Utils db = Database('Mimir from Munnin') u = Utils() np.random.seed(u.RANDOM_STATE) df_patients = u.load_df('df_patients') df_patients = df_patients.sort_values(by='PID')Agedf = df_patients.groupby(['Sex','Age']).count().reset_index() f = df.query('Sex=="F"').get('PID').values m = df.query('Sex=="M"').get('PID').values t, p = stats.ttest_rel(f,m) t,pCo-medicationdrug_features = [] for i in range(1458): f = u.load_feature(str(i)) drug_features.append(f) num_drugs_feature = coo_matrix(hstack(drug_features).sum(1)) df_patients['comedication'] = num_drugs_feature.todense() df = df_patients.groupby(['Sex','comedication']).count().reset_index() f, m = [], [] for i in range(110): if not df.query('Sex=="F" and comedication==@i').empty: f.append(df.query('Sex=="F" and comedication==@i').get('PID').values[0]) else: f.append(0) if not df.query('Sex=="M" and comedication==@i').empty: m.append(df.query('Sex=="M" and comedication==@i').get('PID').values[0]) else: m.append(0) t, p = stats.mannwhitneyu(f,m) t,pPRR before/after applying AwareDX# load data df_patients = u.load_df('df_patients') sex_adr = db.get_list('select meddra_pt_id from gender_terms') drugs = db.get_list('select atc_5_id from atc_5_name') zero = [(drug,adr) for drug,adr in u.load_np('prr_zero')] test = [(drug,adr) for drug,adr in u.load_np('prr_test')] df_prior = u.load_df('df_prr_prior') done = [] for drugID in drugs: filename = 'Post_PRR/'+str(drugID) pth = u.DATA_PATH+filename+'.feather' if os.path.exists(pth): x = u.load_df(filename) if x.empty: x = [] for drug,adr in ((drug,adr) for drug,adr in test if drug==drugID): info = {'drug':drug, 'adr':adr, 'a_post':0, 'c_post':0, 'sex':'M'} x.append(info) info = {'drug':drug, 'adr':adr, 'a_post':0, 'c_post':0, 'sex':'F'} x.append(info) x = pd.DataFrame(x) done.append(x) df_post = pd.concat(done,sort=False) # calculate prr df_prr = pd.merge(df_post, df_prior,'inner') df_prr['a_post'] = df_prr.get(['a_post']).astype(int) df_prr['c_post'] = df_prr.get(['c_post']).astype(int) df_prr = df_prr.eval('ac = a+c').eval('bd = b+d').eval('ac_post = a_post+c_post') df_prr = df_prr.eval('p1 = a/ac').eval('p2 = b/bd').eval('p1_post = a_post/ac_post') df_prr = df_prr.eval('prior = p1/p2').eval('post = p1_post/p2').replace(float('inf'),float('nan')).dropna() df_prr = df_prr.get(['adr','drug','sex','prior','post']) # add zero prrs data = [] for drug, adr in zero: rec = {'drug':drug, 'adr':adr, 'sex':'M', 'prior':0, 'post':0} data.append(rec) rec = {'drug':drug, 'adr':adr, 'sex':'F', 'prior':0, 'post':0} data.append(rec) df_prr = pd.concat([pd.DataFrame(data), df_prr], sort=False) # type conversion df_prr['adr'] = np.array(df_prr.get('adr').values, dtype=str) df_prr['prior'] = np.array(df_prr.get('prior').values, dtype=float) df_prr['post'] = np.array(df_prr.get('post').values, dtype=float) # average over all drugs df_prr_mean = df_prr.groupby(['adr','sex']).mean().get(['prior','post']).reset_index() drop = df_prr_mean.groupby('adr').count().query('sex!=2').index.values df_prr_mean = df_prr_mean.query('adr not in @drop') # performance df_prr_mean.eval('diff = prior-post',inplace=True) improve = df_prr_mean.query('diff>=0').shape[0]/df_prr_mean.shape[0] print('PRR improves for '+str(round(improve*100,2))+'% of adrs') # ADRs with most biased def assign_adr_name(df): q = 'select meddra_pt_id,meddra_pt_name from gender_terms where meddra_pt_id in (' for x in df.get('adr').values: q+=str(x)+', ' q = q[:-2] +')' dct = dict(db.run(q)) df.loc[:,'adr_name'] = [dct[int(x)] for x in df.get('adr').values] return df def diff(df): f = df.query('sex=="F"').get('prior').values m = df.query('sex=="M"').get('prior').values diff = abs(f-m) df.loc[:,'diff']=diff return df df_prior_bias = df_prr_mean.get(['adr','sex','prior']).groupby(['adr']).apply(diff).get(['adr','diff']).drop_duplicates() assign_adr_name(df_prior_bias.sort_values(by='diff')[-5:])Figurefrom IPython.display import display import matplotlib import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline %config InlineBackend.figure_format = 'retina' def set_style(): plt.style.use(['seaborn-white', 'seaborn-paper']) matplotlib.rc("font", family="Arial") set_style() custom_palette = ['skyblue','palevioletred'] sns.set_palette(custom_palette) sns.palplot(custom_palette) plot_adrs = df_prr_mean.sort_values(by='diff').get('adr').values[-16:] df = df_prr_mean.query('adr in @plot_adrs') df = assign_adr_name(df) df = df.sort_values(by='prior') df = df.melt(id_vars=['adr_name','sex'], value_vars=['prior','post'], var_name='type', value_name='PRR') order order = list(np.unique(df.get('adr_name').values)) for name in ['Prostatic acid phosphatase increased', 'Biopsy ovary', 'Vaginal pessary insertion', 'Balanitis candida', 'Benign prostatic hyperplasia', 'Gestational diabetes', 'Vulval haematoma', 'Ovarian failure postoperative', 'Penis carcinoma recurrent', 'Uterine mass']: order.remove(name) order.append(name) col_wrap=4 g = sns.catplot( x='type', y='PRR', hue='sex', hue_order = ['M','F'], data=df, col='adr_name', col_wrap=col_wrap, col_order = order, kind='bar', height=8, sharex=True, sharey=False, legend_out=False ) fs=30 fs2=25 g.set_titles('{col_name}',fontsize=fs) g.set_axis_labels('','PRR') g.set_ylabels(fontsize=fs) g.set_xticklabels(['Before','After'],fontsize=fs) x_lim = (-1,2) for ax in g.axes: ax.hlines(1, x_lim[0], x_lim[1], 'k','dashed', alpha=0.5) ax.set_xlim(x_lim) ylims = [2, 3, 4, 15] for i, (start, end) in enumerate(zip(range(0,16,col_wrap), range(col_wrap,20,col_wrap))): for j, ax in enumerate(g.axes[start:end]): ax.set_ylim((0,ylims[i])) if ax.get_legend(): ax.get_legend().set_visible(False) ax.set_title(ax.get_title(), fontsize=fs, pad=10) ax.tick_params('y',labelsize=fs2) if j!=0: ax.yaxis.set_visible(False) else: ax.set_ylabel('PRR',labelpad=20) for ax in g.axes[-4:]: ax.tick_params('x',pad=20) plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=2.0) plt.show()Ensemble ModelEn este notebook se prepara el ensamble de modelos usado en la última submission. El valor **ncdg** en el **leaderboard** público fue **0.31293** consiguiendo la segunda posición. SummaryEl ensamble consiste en tomar las predicciones de los 2 modelos de factorización de matrices (MF) entrenados (notebook 02-AlternatingLeastSquaresModel). Se tomaron hasta las 50 mejores predicciones promediando sus scores.La segunda parte consiste en scorear a los productos visitados en la sesión (considerando los pesos por posición). Este simple enfoque que consigue buenos resultados, permite complementar a los modelos MF, sobre todo en los casos de visitas en ítems que no se usaron en el entrenamiento (limitación de cold start). Se normalizan los scores entre 0 y 1 para ponerlos en la misma escala que las predicciones de MF.Se combinan las predicciones usando una suma de scores por pesos, dándole más importancia a la predicciones MF. Y finalmente si las primeras 3 predicciones tienen un score alto, solo se seleccionan ítems de esos dominios en la predicción. ResultadosEl valor **ncdg** en test fue **0.3180** y en validación **0.3186**. Tomando el dominio del primer producto recomendado, se logra un accuracy de **0.42** sobre el dominio del producto comprado.import pandas as pd import numpy as np import json from collections import Counter, defaultdict import heapq import pickleLee del catatálogo los dominios de los productos (se usa en la evalaución)ITEM_TO_DOMAIN = {} with open("./data/item_data.jl", "rt") as fd: for line in fd: data = json.loads(line) ITEM_TO_DOMAIN[data["item_id"]] = data["domain_id"] ITEM_TO_COUNTRY = {} with open("./data/item_data.jl", "rt") as fd: for line in fd: data = json.loads(line) ITEM_TO_COUNTRY[data["item_id"]] =data["category_id"][2] IDCG = np.sum([(1 if i != 1 else 12) / np.log2(1 + i) for i in range(1, 11)]) def dcg(rec, y_item_id, n=10): y_domain = ITEM_TO_DOMAIN[y_item_id] return np.sum([(1 if yhat_item_id != y_item_id else 12) / np.log2(1 + i)\ for i, yhat_item_id in enumerate(rec[:n], 1)\ if (ITEM_TO_DOMAIN[yhat_item_id] == y_domain)])Los productos sin precio o dominio se excluyen de las recomendacionesBLACK_LIST = set() with open("./data/item_data.jl", "rt") as fd: for line in fd: data = json.loads(line) if (data["domain_id"]== None): BLACK_LIST.add(data["item_id"]) len(BLACK_LIST)Lee recomendaciones de modelos de factorización de matriceswith open("data/models/implicit_test_reco_scores_model1.pkl", "rb") as fd: test_mf_vars = pickle.load(fd) test_mf_scores = test_mf_vars["test_reco_scores"] val_mf_scores = test_mf_vars["val_reco_scores"] with open("data/models/implicit_test_reco_scores_model2.pkl", "rb") as fd: test_mf_vars = pickle.load(fd) test_mf_scores_2 = test_mf_vars["test_reco_scores"] val_mf_scores_2 = test_mf_vars["val_reco_scores"] with open("data/models/implicit_matrix_variables.pkl", "rb") as fd: ITEM_TO_IDX = pickle.load(fd)["ITEM_TO_IDX"]Recomendaciones por popularidadSe usan como relleno (cold-start).Se toman productos más visitados, y más visitados por dominio.most_viewed_items = Counter() most_viewed_by_domain = {} line_idx = 0 with open("./data/train_dataset.jl", "rt") as fd: for line in fd: line_idx += 1 data = json.loads(line) view = [event["event_info"] for event in data["user_history"] if event["event_type"] == "view"] views_counter.update(view) for item_id in set(view): domain = ITEM_TO_DOMAIN[item_id] if not domain in most_viewed_by_domain: most_viewed_by_domain[domain] = Counter() most_viewed_by_domain[domain][item_id] +=1Más visitados por paísmost_viewed_items_br =[item_id for item_id, _ in Counter({item_id: count for item_id, count\ in most_viewed_items.items() if ITEM_TO_COUNTRY[item_id] == "B" }).most_common(10)] most_viewed_items_mx =[item_id for item_id, _ in Counter({item_id: count for item_id, count\ in most_viewed_items.items() if ITEM_TO_COUNTRY[item_id] == "M" }).most_common(10)] views_counter = most_viewed_items most_viewed_items = [item for item, _ in most_viewed_items.most_common(10)] for domain, counter in most_viewed_by_domain.items(): most_viewed_by_domain[domain] = [item for item, _ in counter.most_common(10)]Función para rellenar recomendaciones:def fill_rec(rec, fill, n=10 ): assert len(fill) >= n fill_index = 0 while len(rec) < n: if fill[fill_index] not in rec: rec.append(fill[fill_index] ) fill_index += 1 return recEnsamble de modelos Evaluación en testW0 = 1.0 # peso usado para predicciones por visitas WI = 1.5 # peso usado para predicciones de MF n_recs = 0 y_test = [] model_sum_dcg = 0.0 tp_domain = 0 with open("./data/train_dataset-test_split.jl", "rt") as fd: for line in fd: data = json.loads(line) item_bought = data["item_bought"] items_views = [event["event_info"] for event in data["user_history"] if event["event_type"] == "view"] y_test.append(item_bought) # promedia predicciones por modelos MF model_rec_scores = {i:s for i, s in test_mf_scores[n_recs]} model_rec_scores_2 = {i:s for i, s in test_mf_scores_2[n_recs]} model_rec_scores = {i: (model_rec_scores.get(i, 0) *0.5 +\ model_rec_scores_2.get(i, 0) * 0.5) for i in (model_rec_scores.keys() | model_rec_scores_2.keys())} # Ranking de items visitados items_pv_count = {} items_views = items_views[::-1] for pos, item_view in enumerate(items_views, 1): items_pv_count[item_view] = items_pv_count.get(item_view,0) + 1 / np.log10(pos + 1) rec_scores = defaultdict(dict) # scores por visitas for item_view, pv_count in items_pv_count.items(): # Asigna un score por item visitado rec_scores[item_view] = rec_scores.get(item_view, 0) + pv_count # normaliza scores por visitas sum_scores = sum([s for s in rec_scores.values()]) if sum_scores: c = ITEM_TO_COUNTRY[items_views[0]] rec_scores = {i: s / sum_scores for i, s in rec_scores.items() } # excluye recomendaciones de bajo score rec_scores = {i: s for i, s in rec_scores.items() if s > 0.05} # Suma ambos scores usando pesos. Si el item_id no se uso en el entrenamiento modelo de MF, suma una constante. rec_scores = {i: model_rec_scores.get(i, 0) * WI + rec_scores.get(i, 0) * W0 + 0 if i in ITEM_TO_IDX else 0.2\ for i in (rec_scores.keys() | model_rec_scores.keys()) if not i in BLACK_LIST } # ordena por score rec = [item for item, score in heapq.nlargest(50, rec_scores.items(), key=lambda item: item[1])] # selecciona los dominios de los top 3 productos si es que cumplen con un threshold domains = set([ITEM_TO_DOMAIN[rec[i]] for i in range(3) if rec_scores[rec[i]] >= 1]) rec_fill = [r for r in rec if rec_scores[r]] # se se seleccionan dominios, utiliza este filtro en la recomendaciones if domains: rec = [r for r in rec if ITEM_TO_DOMAIN[r] in domains] rec_fill = [r for r in rec_fill if r not in rec] #pass rec = rec[:10] # rellena en caso de no tener recomendaciones if len(rec) < 10: if len(rec): # rellena con más visitados de los dominios de selección fill_scores = {r: views_counter[r] for domain_i in domains for r in most_viewed_by_domain.get(domain_i, [])} fill = [item for item, score in heapq.nlargest(10, fill_scores.items(), key=lambda item: item[1])] if len(fill) < 10: # si no alcanza, agrega los descartados en la selección de dominios fill += rec_fill else: fill = most_viewed_items rec = fill_rec(rec, fill) assert len(rec) == 10 # evaluación model_sum_dcg += dcg(rec, item_bought) rec_dom = ITEM_TO_DOMAIN[rec[0]] tp_domain += ITEM_TO_DOMAIN[item_bought] in rec_dom n_recs += 1 print(f"NDCG: {model_sum_dcg / (IDCG * n_recs): .4f} ({n_recs} recomendaciones)") print(f"Accuracy (domain): {tp_domain / n_recs: .4f} ({n_recs} recomendaciones)")Accuracy (domain): 0.4180 (20000 recomendaciones)Evaluación en validaciónW0 = 1.0 WI = 1.5 n_recs = 0 y_val = [] model_sum_dcg = 0.0 tp_domain = 0 with open("./data/train_dataset-val_split.jl", "rt") as fd: for line in fd: try: data = json.loads(line) except: continue item_bought = data["item_bought"] items_views = [event["event_info"] for event in data["user_history"] if event["event_type"] == "view"] y_val.append(item_bought) # promedia predicciones por modelos MF model_rec_scores = {i:s for i, s in val_mf_scores[n_recs]} model_rec_scores_2 = {i:s for i, s in val_mf_scores_2[n_recs]} model_rec_scores = {i: (model_rec_scores.get(i, 0) *0.5 +\ model_rec_scores_2.get(i, 0) * 0.5) for i in (model_rec_scores.keys() | model_rec_scores_2.keys())} # Ranking de items visitados items_pv_count = {} items_views = items_views[::-1] for pos, item_view in enumerate(items_views, 1): items_pv_count[item_view] = items_pv_count.get(item_view,0) + 1 / np.log10(pos + 1) rec_scores = defaultdict(dict) # scores por visitas for item_view, pv_count in items_pv_count.items(): # Asigna un score por item visitado rec_scores[item_view] = rec_scores.get(item_view, 0) + pv_count # normaliza scores por visitas sum_scores = sum([s for s in rec_scores.values()]) if sum_scores: c = ITEM_TO_COUNTRY[items_views[0]] rec_scores = {i: s / sum_scores for i, s in rec_scores.items() } rec_scores = {i: s for i, s in rec_scores.items() if s > 0.05} # suma ambos scores rec_scores = {i: model_rec_scores.get(i, 0) * WI + rec_scores.get(i, 0) * W0 + 0 if i in ITEM_TO_IDX else 0.2\ for i in (rec_scores.keys() | model_rec_scores.keys()) if not i in BLACK_LIST } rec = [item for item, score in heapq.nlargest(50, rec_scores.items(), key=lambda item: item[1])] domains = set([ITEM_TO_DOMAIN[rec[i]] for i in range(3) if rec_scores[rec[i]] >= 1]) rec_fill = [r for r in rec if rec_scores[r]] if domains: rec = [r for r in rec if ITEM_TO_DOMAIN[r] in domains] rec_fill = [r for r in rec_fill if r not in rec] #pass rec = rec[:10] # rellena en caso de no tener recomendaciones if len(rec) < 10: if len(rec): fill_scores = {r: views_counter[r] for domain_i in domains for r in most_viewed_by_domain.get(domain_i, [])} fill = [item for item, score in heapq.nlargest(10, fill_scores.items(), key=lambda item: item[1])] if len(fill) < 10: fill += rec_fill else: fill = most_viewed_items rec = fill_rec(rec, fill) assert len(rec) == 10 # evaluación model_sum_dcg += dcg(rec, item_bought) rec_dom = ITEM_TO_DOMAIN[rec[0]] tp_domain += ITEM_TO_DOMAIN[item_bought] in rec_dom n_recs += 1 print(f"NDCG: {model_sum_dcg / (IDCG * n_recs): .4f} ({n_recs} recomendaciones)") print(f"Accuracy (domain): {tp_domain / n_recs: .4f} ({n_recs} recomendaciones)")Accuracy (domain): 0.4176 (19998 recomendaciones)实验七 线性方程组的迭代解法雅可比(Jacobi)迭代法import numpy as np def jacobi_iter(A, b, x0=None, eps=1e-5, max_steps=5000, verbose=False): """雅可比(Jacobi)迭代法求解线性方程组: A @ x = b Args: A: np_array_like, 系数矩阵 b: np_array_like, 右端常数 x0: np_array_like, 迭代初值 default x0=None means using random values. eps: float, 精度要求 max_steps: int, 最大迭代次数 verbose: bool, 如果计算成功,打印出结果及迭代次数 Returns: x: 方程组的解 Raises: ValueError: A 和 b 形状不符合要求 Expection: 达到最大迭代次数,仍不满足精度 """ A = np.array(A) b = np.array(b) n, m = A.shape if n != m or n != len(b): raise ValueError(f"Not match: A({n, m}) and b({len(b)},)") if not x0: x0 = np.random.random(n) # A = D - L - U D = np.diag(np.diag(A)) L = - np.tril(A, -1) U = - np.triu(A, 1) inv_D = np.linalg.pinv(D) B = inv_D @ (L + U) f = inv_D @ b x = x0 i = 0 for i in range(int(max_steps)): x_prev = np.array(x) # deep copy x = B @ x + f if np.all(np.abs(x - x_prev) <= eps): # 达到精度要求 break else: raise Exception(f"cannot reach eps ({eps}) after max_steps ({max_steps}). The last result: x = {x}") if verbose: print(f"jacobi_iter get result x = {x} after {i} iterations.") return x jacobi_iter([[9,-1,-1], [-1,10,-1],[-1,-1,15]], [7,8,13], verbose=True) # jacobi_iter([[9,-1,-1], [-1,10,-1],[-1,-1,15]], [7,8,13], x0=[99999, 99999, 99999], verbose=True)jacobi_iter get result x = [0.99999906 0.99999911 0.99999933] after 7 iterations.高斯-赛德尔迭代def gauss_seidel_iter(A, b, x0=None, eps=1e-5, max_steps=5000, verbose=False): """高斯-赛德尔迭代(Gauss–Seidel method)求解线性方程组: A @ x = b Args: A: np_array_like, 系数矩阵 b: np_array_like, 右端常数 x0: np_array_like, 迭代初值 default x0=None means using random values. eps: float, 精度要求 max_steps: int, 最大迭代次数 verbose: bool, 如果计算成功,打印出结果及迭代次数 Returns: x: 方程组的解 Raises: ValueError: A 和 b 形状不符合要求 Expection: 达到最大迭代次数,仍不满足精度 """ A = np.array(A) b = np.array(b) n, m = A.shape if n != m or n != len(b): raise ValueError(f"Not match: A({n, m}) and b({len(b)},)") if not x0: x0 = np.random.random(n) # A = D - L - U D = np.diag(np.diag(A)) L = - np.tril(A, -1) U = - np.triu(A, 1) inv_DsL = np.linalg.pinv(D - L) B = inv_DsL @ U f = inv_DsL @ b x = x0 i = 0 for i in range(int(max_steps)): x_prev = np.array(x) # deep copy inv_DsL = np.linalg.pinv(D - L) x = B @ x + f if np.all(np.abs(x - x_prev) <= eps): # 达到精度要求 break else: raise Exception(f"cannot reach eps ({eps}) after max_steps ({max_steps}). The last result: x = {x}") if verbose: print(f"gauss_seidel_iter get result x = {x} after {i} iterations.") return x gauss_seidel_iter([[9,-1,-1], [-1,10,-1],[-1,-1,15]], [7,8,13], eps=1e-12, verbose=True)gauss_seidel_iter get result x = [1. 1. 1.] after 10 iterations.SOR 迭代法def sor(A, b, x0=None, w=1, eps=1e-5, max_steps=5000, verbose=False): """逐次超松驰法(Successive over-relaxation, SOR)求解线性方程组: A @ x = b Args: A: np_array_like, 系数矩阵 b: np_array_like, 右端常数 x0: np_array_like, 迭代初值 default x0=None means using random values. w: float, 松弛因子 w > 0 default w=1,即 Gauss–Seidel 迭代 eps: float, 精度要求 max_steps: int, 最大迭代次数 verbose: bool, 如果计算成功,打印出结果及迭代次数 Returns: x: 方程组的解 Raises: ValueError: w 小于等于 0 ValueError: A 和 b 形状不符合要求 Expection: 达到最大迭代次数,仍不满足精度 """ if w <= 0: raise ValueError(f"unexpected w = {w} < 0") A = np.array(A) b = np.array(b) n, m = A.shape if n != m or n != len(b): raise ValueError(f"Not match: A({n, m}) and b({len(b)},)") if not x0: x0 = np.random.random(n) # A = D - L - U D = np.diag(np.diag(A)) L = - np.tril(A, -1) U = - np.triu(A, 1) _inv_DsWL = np.linalg.pinv(D - w * L) B = _inv_DsWL @ ((1-w) * D + w * U) f = w * _inv_DsWL @ b x = x0 i = 0 for i in range(int(max_steps)): x_prev = np.array(x) # deep copy x = B @ x + f if np.all(np.abs(x - x_prev) <= eps): # 达到精度要求 break else: raise Exception(f"cannot reach eps ({eps}) after max_steps ({max_steps}). The last result: x = {x}") if verbose: print(f"sor (w={w}) get result x = {x} after {i} iterations.") return x sor([[9,-1,-1], [-1,10,-1],[-1,-1,15]], [7,8,13], w=1.1, eps=1e-12, verbose=True) A = [[-4, 1, 1, 1], [1, -4, 1, 1], [1, 1, -4, 1], [1, 1, 1, -4]] b = [1, 1, 1, 1] x0 = [0, 0, 0, 0] jacobi_iter(A, b, x0=x0, eps=1e-9, verbose=True) gauss_seidel_iter(A, b, x0=x0, eps=1e-9, verbose=True) sor(A, b, x0=x0, w=1.3, eps=1e-9, verbose=True)jacobi_iter get result x = [-1. -1. -1. -1.] after 68 iterations. gauss_seidel_iter get result x = [-1. -1. -1. -1.] after 36 iterations. sor (w=1.3) get result x = [-1. -1. -1. -1.] after 20 iterations.重复的代码有点多,面向对象封装一下:class SimpleIteration(object): """求解线性方程组的迭代法: A @ x = b 调用 SimpleIteration 子类实例得到解, e.g. JacobiIteration(A, b)() """ def __init__(self, A, b): """ A: np_array_like, 系数矩阵 b: np_array_like, 右端常数 """ self.A = np.array(A) self.b = np.array(b) n, m = self.A.shape if n != m or n != len(self.b): raise ValueError(f"Not match: A({n, m}) and b({len(b)},)") @staticmethod def _dlu(A): """分裂 A: A = D - L - U Args: A: np.array Returns: (D, L, U) """ D = np.diag(np.diag(A)) L = - np.tril(A, -1) U = - np.triu(A, 1) return D, L, U def _B_f(self): """计算迭代 x = B @ x + f 的 B 和 f """ raise NotImplementedError('_B_f') def __call__(self, x0=None, eps=1e-5, max_steps=5000, verbose=False): """线性方程组「简单迭代法」的迭代过程 x = B @ x + f 其中 B, f 调用 self._B_f() 得到 Args: x0: np_array_like, 迭代初值 default x0=None means using random values. eps: float, 精度要求 max_steps: int, 最大迭代次数 verbose: bool, 如果计算成功,打印出结果及迭代次数 Returns: x: 方程组的解 Raises: ValueError: x0 形状不符合问题 Expection: 达到最大迭代次数,仍不满足精度 """ if not x0: x0 = np.random.random(self.A.shape[0]) x0_shape = np.shape(x0) if x0_shape[0] != self.A.shape[0]: raise ValueError(f"Not match: A({self.A.shape}) and x0({x0_shape})") B, f = self._B_f() x = x0 i = 0 for i in range(int(max_steps)): x_prev = np.array(x) # deep copy x = B @ x + f if np.all(np.abs(x - x_prev) <= eps): # 达到精度要求 break else: raise Exception(f"{self.method_name()} cannot reach eps ({eps}) after max_steps ({max_steps}). The last result: x = {x}") if verbose: print(f"{self.method_name()} get result x = {x} after {i} iterations.") def method_name(self): return self.__class__.__name__ class JacobiIteration(SimpleIteration): """雅可比(Jacobi)迭代法求解线性方程组: A @ x = b """ def _B_f(self): D, L, U = self._dlu(self.A) inv_D = np.linalg.pinv(D) B = inv_D @ (L + U) f = inv_D @ self.b return B, f class GaussSeidel(SimpleIteration): """高斯-赛德尔迭代(Gauss–Seidel method)求解线性方程组: A @ x = b """ def _B_f(self): D, L, U = self._dlu(self.A) inv_DsL = np.linalg.pinv(D - L) B = inv_DsL @ U f = inv_DsL @ self.b return B, f class SOR(SimpleIteration): """逐次超松驰法(Successive over-relaxation, SOR)求解线性方程组: A @ x = b """ def __init__(self, A, b, w=1): """ w: float, 松弛因子 w > 0 default w=1,即 Gauss–Seidel 迭代 """ super().__init__(A, b) self.w = w def _B_f(self): D, L, U = self._dlu(self.A) w = self.w _inv_DsWL = np.linalg.pinv(D - w * L) B = _inv_DsWL @ ((1-w) * D + w * U) f = w * _inv_DsWL @ self.b return B, f def method_name(self): return super().method_name() + f' (w={self.w})' A = [[-4, 1, 1, 1], [1, -4, 1, 1], [1, 1, -4, 1], [1, 1, 1, -4]] b = [1, 1, 1, 1] x0 = [0, 0, 0, 0] for method in [JacobiIteration(A, b), GaussSeidel(A, b), SOR(A, b, 0.9), SOR(A, b, 1), SOR(A, b, 1.1)]: method(x0=x0, eps=1e-9, verbose=True)JacobiIteration get result x = [-1. -1. -1. -1.] after 68 iterations. GaussSeidel get result x = [-1. -1. -1. -1.] after 36 iterations. SOR (w=0.9) get result x = [-1. -1. -1. -1.] after 46 iterations. SOR (w=1) get result x = [-1. -1. -1. -1.] after 36 iterations. SOR (w=1.1) get result x = [-1. -1. -1. -1.] after 28 iterations.寻找最好的 w:A = [[-4, 1, 1, 1], [1, -4, 1, 1], [1, 1, -4, 1], [1, 1, 1, -4]] b = [1, 1, 1, 1] for w in np.linspace(0.1, 1.9, 100): rs = SOR(A, b, w)(x0=[0, 0, 0, 0], eps=1e-9, verbose=True)SOR (w=0.1) get result x = [-0.99999996 -0.99999996 -0.99999996 -0.99999996] after 649 iterations. SOR (w=0.1181818181818182) get result x = [-0.99999997 -0.99999997 -0.99999997 -0.99999997] after 550 iterations. SOR (w=0.13636363636363635) get result x = [-0.99999997 -0.99999997 -0.99999997 -0.99999997] after 476 iterations. SOR (w=0.15454545454545454) get result x = [-0.99999998 -0.99999998 -0.99999998 -0.99999998] after 419 iterations. SOR (w=0.17272727272727273) get result x = [-0.99999998 -0.99999998 -0.99999998 -0.99999998] after 374 iterations. SOR (w=0.19090909090909092) get result x = [-0.99999998 -0.99999998 -0.99999998 -0.99999998] after 337 iterations. SOR (w=0.2090909090909091) get result x = [-0.99999998 -0.99999998 -0.99999998 -0.99999998] after 306 iterations. SOR (w=0.22727272727272727) get result x = [-0.99999999 -0.99999999 -0.99999999 -0.99999999] after 280 iterations. SOR (w=0.24545454545454545) get result x = [-0.99999999 -0.99999999 -0.99999999 -0.99999999] after[...]把上面输出的结果写到一个文件 'sorres.txt' 中。然后从中读取 w 与迭代次数:import re pattern = r'SOR \(w=(.*?)\) get result .*? after (\d*?) iterations.' ws = [] iters = [] with open('sorres.txt') as f: for line in f: w, i = list(map(lambda x: float(x), re.findall(pattern, line)[0])) ws.append(w) iters.append(i)作图分析:import matplotlib.pyplot as plt plt.plot(ws, iters, 'bo-') plt.xlabel('w') plt.ylabel('iterations') plt.show()从中找到使迭代次数最少的 w:w_best,iters_min = min(zip(ws, iters), key=lambda x: x[1]) print(f'best w: {w_best}\niterations: {int(iters_min)}')best w: 1.2272727272727273 iterations: 17一个无法直接解的例子:A = [[1, -3, -6], [2, 8, -3], # [5, 2, 1]] b = [1, 21, 8] np.linalg.solve(A, b) gauss_seidel_iter(A, b, eps=1e-8, verbose=True)/usr/local/lib/python3.7/site-packages/ipykernel_launcher.py:48: RuntimeWarning: overflow encountered in matmul /usr/local/lib/python3.7/site-packages/ipykernel_launcher.py:48: RuntimeWarning: invalid value encountered in matmul /usr/local/lib/python3.7/site-packages/ipykernel_launcher.py:50: RuntimeWarning: invalid value encountered in less_equal做一些行变化之后就可以解了:A0 = [[5, 2, 1], [2, 8, -3], [1, -3, -6]] b0 = [8, 21, 1] gauss_seidel_iter(A1, b1, eps=1e-8, verbose=True)gauss_seidel_iter get result x = [ 1. 2. -1.] after 12 iterations.比较 J 和 GS 的收敛性:A1 = [[1, 2, -2], [1, 1, 1], [2, 2, 1]] A2 = [[2, -1, 1], [2, 2, 2], [-1, -1, 2]] b = [1, 1, 1] np.linalg.solve(A1, b) np.linalg.solve(A2, b) print('jacobi: ', end='') j = jacobi_iter(A1, b, verbose=True) print('gauss_seidel: ', end='') g = gauss_seidel_iter(A1, b, verbose=True) print('gauss_seidel: ', end='') # g = gauss_seidel_iter(A2, b, verbose=True) print('jacobi: ', end='') j = jacobi_iter(A2, b, verbose=True)gauss_seidel: gauss_seidel_iter get result x = [ 0.16666863 -0.16666896 0.49999983] after 19 iterations. jacobi:출처: https://github.com/lovit/huggingface_konlpy/blob/master/tutorials/01_huggingface_konlpy_usage.ipynb KoNLPy as pre-tokenizerfrom huggingface_konlpy.tokenizers_konlpy import KoNLPyPreTokenizer from konlpy.tag import Komoran sent_ko = '신종 코로나바이러스 감염증(코로나19) 사태가 심각합니다' komoran_pretok = KoNLPyPreTokenizer(Komoran()) print(komoran_pretok(sent_ko)) !mkdir -p ./model/KomoranBertWordPieceTokenizer/ from huggingface_konlpy.tokenizers_konlpy import KoNLPyPretokBertWordPieceTokenizer from huggingface_konlpy.transformers_konlpy import KoNLPyPretokBertTokenizer komoran_bertwordpiece_tokenizer = KoNLPyPretokBertWordPieceTokenizer( konlpy_pretok = komoran_pretok) komoran_bertwordpiece_tokenizer.train( files = ['./data/2020-07-29_covid_news_sents.txt'], vocab_size = 3000) komoran_bertwordpiece_tokenizer.save_model( directory='./model/KomoranBertWordPieceTokenizer/', name='covid') komoran_pretok_berttokenizer = KoNLPyPretokBertTokenizer( konlpy_pretok = komoran_pretok, vocab_file = './model/KomoranBertWordPieceTokenizer/covid-vocab.txt') from huggingface_konlpy import compose indices = komoran_pretok_berttokenizer.encode(sent_ko) tokens = [komoran_pretok_berttokenizer.ids_to_tokens[ids] for ids in indices] print(' '.join(compose(tokens)))[CLS] 신종 코로나바이러스 감염증 ( 코로나 19 ) 사태 가 심 ##각 하 [UNK] [SEP]KoNLPy WordPiece Tokenizer with tagfrom huggingface_konlpy.tokenizers_konlpy import KoNLPyWordPieceTokenizer from konlpy.tag import Mecab mecab_wordpiece_notag = KoNLPyWordPieceTokenizer(Mecab(), use_tag=False) print(' '.join(mecab_wordpiece_notag.tokenize(sent_ko))) mecab_wordpiece_usetag = KoNLPyWordPieceTokenizer(Mecab(), use_tag=True) print(' '.join(mecab_wordpiece_usetag.tokenize(sent_ko))) from huggingface_konlpy.tokenizers_konlpy import KoNLPyBertWordPieceTrainer mecab_wordpiece_notag_trainer = KoNLPyBertWordPieceTrainer(Mecab(), use_tag=False) mecab_wordpiece_notag_trainer.train(files=['./data/2020-07-29_covid_news_sents.txt']) mecab_wordpiece_notag_trainer.save_model('./model/BertStyleMecab/', 'notag') from huggingface_konlpy.transformers_konlpy import KoNLPyBertTokenizer konlpy_bert_notag = KoNLPyBertTokenizer( konlpy_wordpiece = KoNLPyWordPieceTokenizer(Mecab(), use_tag=False), vocab_file = './model/BertStyleMecab/notag-vocab.txt' ) print(' '.join(konlpy_bert_notag.tokenize(sent_ko))) mecab_wordpiece_usetag_trainer = KoNLPyBertWordPieceTrainer(Mecab(), use_tag=True) mecab_wordpiece_usetag_trainer.train(files=['./data/2020-07-29_covid_news_sents.txt']) mecab_wordpiece_usetag_trainer.save_model('./model/BertStyleMecab/', 'usetag') konlpy_bert_usetag = KoNLPyBertTokenizer( konlpy_wordpiece = KoNLPyWordPieceTokenizer(Mecab(), use_tag=True), vocab_file = './model/BertStyleMecab/usetag-vocab.txt' ) indices = konlpy_bert_usetag.encode(sent_ko) tokens = [konlpy_bert_usetag.ids_to_tokens[ids] for ids in indices] print(' '.join(compose(tokens)))Initialize alphabet 1/1: 100%|██████████| 70964/70964 [00:00<00:00, 105826.48it/s] Train vocab 1/1: 100%|██████████| 70964/70964 [00:10<00:00, 6705.41it/s]Database connectionconnection = psycopg2.connect(user = "chembl", password = "", host = "127.0.0.1", port = "5432", database = "chembl_25") cursor = connection.cursor()Types of activitiescursor.execute('select min(AC.standard_type) AS TYPE, count(AC.standard_type) AS COUNT from ACTIVITIES AC GROUP BY standard_type') activities = pd.DataFrame(cursor.fetchall(), columns = ['type', 'count']) activities[activities['count'] > 100000]Main querycolumns = ['molregno', 'canonical_smiles', 'activity_id', 'standard_value', 'standard_units', 'standard_flag', 'standard_type', 'activity_comment', 'alogp', 'hba', 'hbd', 'psa', 'rtb', 'ro3_pass', 'num_ros_violations', 'molecular_species', 'full_mwt', 'aromatic_rings', 'heavy_atoms', 'qed_weighted'] cursor.execute("select CS.molregno, \ CS.canonical_smiles, \ AC.activity_id, \ AC.standard_value, \ AC.standard_units, \ AC.standard_flag, \ AC.standard_type, \ AC.activity_comment, \ CP.ALOGP, \ CP.HBA, \ CP.HBD, \ CP.PSA, \ CP.RTB, \ CP.RO3_PASS, \ CP.NUM_RO5_VIOLATIONS, \ CP.MOLECULAR_SPECIES, \ CP.FULL_MWT, \ CP.AROMATIC_RINGS, \ CP.HEAVY_ATOMS, \ CP.QED_WEIGHTED \ from COMPOUND_STRUCTURES CS \ inner join ACTIVITIES AC on CS.molregno = AC.molregno \ inner join COMPOUND_PROPERTIES CP on CS.molregno = CP.MOLREGNO \ and (AC.standard_type = 'IC50' or AC.standard_type = 'GI50' or AC.standard_type = 'Potency') \ and (AC.standard_value IS NOT NULL)") molData = pd.DataFrame(cursor.fetchall(), columns = columns) connection.close() molData.shape molData.head(10).iloc[1,:]Data conversionfloatDescriptors = ['standard_value', 'alogp', 'psa', 'full_mwt', 'qed_weighted'] for moldesc in floatDescriptors: molData[moldesc] = molData[moldesc].astype(float) intDescriptors = ['molregno']#, 'hba', 'hbd', 'rtb', 'aromatic_rings', 'heavy_atoms'] for moldesc in intDescriptors: molData[moldesc] = molData[moldesc].astype(int)What units do we have?# mess in units molData.groupby('standard_units').agg('count') # take only the entries expressed in nM molData = molData[molData['standard_units']=='nM'].reset_index() molData.groupby('standard_units').agg('count') molData.head(5)Data aggregationaggFunctions = { 'molregno': ['min', 'count'], 'canonical_smiles': 'min', 'standard_value': ['min', 'max'], 'standard_type': 'min', 'alogp': ['min', 'max'], 'hba': ['min', 'max'], 'hbd': ['min', 'max'], 'psa': ['min', 'max'], 'rtb': ['min', 'max'], 'ro3_pass': 'min', 'num_ros_violations': 'min', 'molecular_species': 'min', 'full_mwt': ['min', 'max'], 'aromatic_rings': 'min', 'heavy_atoms': 'min', 'qed_weighted': ['min', 'max'] } grouped = molData.groupby('molregno') molDataGrouped = grouped.agg(aggFunctions).reset_index() molDataGrouped.head(5)Activity type distribution# THE GI50/IC50 distribution molDataGrouped['standard_type'].groupby('min').agg({'min': ['count']})Pickling of the dataimport pickle pcklFile = 'molDataGrouped.pckl' with open(pcklFile, 'wb') as file: pickle.dump(molDataGrouped, file) with open(pcklFile, 'rb') as file: molDataGrouped = pickle.load(file) molDataGrouped.columns molDataGrouped.loc[:, ['canonical_smiles', 'min']].values molDataGrouped.head(10)Smiles analysisfrom rdkit import Chem from rdkit.Chem import AllChem from rdkit import Chem from rdkit.Chem import Draw def provideMoleculeStatistics(smiles): #print(smiles) mol = Chem.MolFromSmiles(smiles) newSmiles = Chem.MolToSmiles(mol, canonical = True, isomericSmiles = False) negativeCharged = sum([ item.GetFormalCharge() if item.GetFormalCharge() < 0 else 0 for item in mol.GetAtoms() ]) positiveCharged = sum([ item.GetFormalCharge() if item.GetFormalCharge() > 0 else 0 for item in mol.GetAtoms() ]) #anyCharged = any([item1 or item2 for item1, item2 in zip(negativelyCharged, positivelyCharged)]) elementsList = list(set([atom.GetSymbol() for atom in mol.GetAtoms()])) numberOfRings = mol.GetRingInfo().NumRings() return(newSmiles, negativeCharged, positiveCharged, elementsList, numberOfRings) import codecs encodeToUTF8 = False def canonicalizeSmilesAndProvideDescriptor(smiles): #rdkitMol = Chem.MolFromSmiles(molecule) #Chem.SanitizeMol(rdkitMol) try: #newSmiles = Chem.MolToSmiles(rdkitMol, canonical = True, isomericSmiles = False) newSmiles, negativeCharged, positiveCharged, elementsList, numberOfRings = provideMoleculeStatistics(smiles) #smilesDescription = checkVariousSmilesProperties(newSmiles) #elementsSet = provideElementsList(newSmiles) except: newSmiles, negativeCharged, positiveCharged, elementsList, numberOfRings = (None, None, None, None, None) # There was a trouble in catching the ArgumentError exception (originatefd most likely in Boost.Python # therefore any exceptio s caught here) print('Exception!!! :', smiles) if (encodeToUTF8): return((codecs.encode(newSmiles, 'utf-8'), negativeCharged, positiveCharged, elementsList, numberOfRings)) else: return((newSmiles, negativeCharged, positiveCharged, elementsList, numberOfRings)) print(pd.__version__) def printRow(row): print('in') print(row) mol = Chem.MolFromSmiles(row) molDataGrouped.loc[:2, ('canonical_smiles', 'min')].apply(printRow) #.apply(canonicalizeSmilesAndProvideDescriptor) len(molDataGrouped) sourceColumn = ('canonical_smiles', 'min') nTotal = len(molDataGrouped) nStart = 0 nSize = 10000 nBatch = np.ceil((nTotal - nStart)/nSize).astype(int) for iii in range(nBatch): iBeg = nStart + iii * nSize if (iii == nBatch - 1): iEnd = nTotal else: iEnd = nStart + (iii + 1) * nSize print(iii) result = molDataGrouped.loc[iBeg:iEnd, sourceColumn].apply(canonicalizeSmilesAndProvideDescriptor) molDataGrouped.loc[iBeg:iEnd, 'canonicalSmiles'] = [item[0] for item in result] molDataGrouped.loc[iBeg:iEnd, 'negativeCharged'] = [item[1] for item in result] molDataGrouped.loc[iBeg:iEnd, 'positiveCharged'] = [item[2] for item in result] molDataGrouped.loc[iBeg:iEnd, 'elementsSet'] = [item[3] for item in result] molDataGrouped.loc[iBeg:iEnd, 'numberOfRings'] = [item[4] for item in result] molDataGrouped.head(4) molDataGrouped[molDataGrouped['elementsSet'] == None] with open('molDataGroupedDesc.pckl', 'wb') as file: pickle.dump(molDataGrouped, file) with open('molDataGroupedDesc.pckl', 'rb') as file: molDataGrouped = pickle.load(file) molDataGrouped = molDataGrouped[~molDataGrouped['elementsSet'].isnull()] organicChemistryList = ['B', 'C', 'N', 'O', 'P', 'S', 'F', 'Cl', 'Br', 'I'] organicChemistrySet = set(organicChemistryList) testSet = set(['N', 'C', 'Cl']) testSet < organicChemistrySet molDataGrouped['organicChemistryElementsOnly'] = molDataGrouped['elementsSet'].apply(lambda x: set(x) < organicChemistrySet) molDataGrouped.groupby('organicChemistryElementsOnly').count() molDataGrouped['canonicalSmilesLength'] = molDataGrouped['canonicalSmiles'].apply(lambda x: len(x)) plt.hist(molDataGrouped['canonicalSmilesLength'], bins = 50) limitSmilesLength = 100 molDataGroupedChosen = molDataGrouped[(molDataGrouped['canonicalSmilesLength'] < limitSmilesLength) & \ (molDataGrouped['negativeCharged'] == 0) & \ (molDataGrouped['positiveCharged'] == 0) & \ (molDataGrouped['numberOfRings'] <= 5) & \ (molDataGrouped['organicChemistryElementsOnly'])].reset_index() replacementDict = {'Br': 'G', 'Cl': 'U', '[nH]': 'W'} molDataGroupedChosen['encodedSmiles'] = molDataGroupedChosen['canonicalSmiles'].replace(replacementDict, regex=True) molDataGroupedChosen.head(5) molDataGroupedChosen.shape # molDataGroupedFinal_100.pckl: smiles length < 100 with open('molDataGroupedFinal_100.pckl', 'wb') as file: pickle.dump(molDataGroupedChosen, file) with open('molDataGroupedFinal.pckl', 'rb') as file: molDataGroupedChosen = pickle.load(file) molDataGroupedChosen.head() nSmilesCodes = 50000 mask = random.randint(0, molDataGroupedChosen.shape[0], size=nSmilesCodes) staticFeatures = pd.DataFrame() toBeAveraged = ['standard_value', 'alogp', 'hba', 'hbd', 'psa', 'rtb', 'full_mwt', 'qed_weighted'] for quantity in toBeAveraged: staticFeatures.loc[:, quantity] = (molDataGroupedChosen.loc[mask, (quantity, 'min')] + molDataGroupedChosen.loc[mask, (quantity, 'max')])/2 staticFeatures.loc[:, quantity].astype(float) toBeTaken = ['aromatic_rings', 'heavy_atoms'] for quantity in toBeTaken: staticFeatures.loc[:, quantity] = molDataGroupedChosen.loc[mask, (quantity, 'min')] staticFeatures.loc[:, quantity].astype(float) staticFeatures.loc[:, 'number_of_rings'] = molDataGroupedChosen.loc[mask, 'numberOfRings'].astype(float) staticFeatures.loc[:, 'number_of_rings'] = staticFeatures.loc[:, 'number_of_rings'].astype(float) print(staticFeatures.head(2)) #staticFeatures = staticFeatures.values staticFeatures['full_mwt'] = staticFeatures.full_mwt.astype(float) staticFeatures['qed_weighted'] = staticFeatures.qed_weighted.astype(float) staticFeatures['aromatic_rings'] = staticFeatures.qed_weighted.astype(float) thres = 100000 staticFeatures[staticFeatures['standard_value'] < thres].shape[0] / staticFeatures['standard_value'].shape[0] staticFeatures = staticFeatures[staticFeatures['standard_value'] < thres] staticFeatures.shape allDescriptors = ['standard_value', 'alogp', 'hba', 'hbd', 'psa', 'rtb', 'full_mwt', 'qed_weighted', 'aromatic_rings', 'heavy_atoms', 'number_of_rings'] #allDescriptors = ['standard_value'] #quantity = 'alogp' plotIdx = 1 nRows = np.ceil(len(allDescriptors) / 2) fig = plt.figure(figsize=(16, 16)) for quantity in allDescriptors: print(quantity) plt.subplot(nRows, 2, plotIdx) plt.hist(staticFeatures[~staticFeatures[quantity].isnull()][quantity], bins = 10) plt.title(quantity) plotIdx += 1Dynamic featuressmilesCodes = molDataGroupedChosen.loc[staticFeatures.index, 'encodedSmiles'] smilesCodes maxlen = -1 for code in smilesCodes: if len(code) > maxlen: maxlen = len(code) maxlen minlen = 1e6 for code in smilesCodes: if len(code) < minlen: minlen = len(code) minlen def pad_smile(string, max_len, padding='right'): if len(string) <= max_len: if padding == 'right': return string + " " * (max_len - len(string)) elif padding == 'left': return " " * (max_len - len(string)) + string elif padding == 'none': return string smilesCodes = smilesCodes.apply(lambda x: pad_smile(x, max_len=maxlen, padding='right')) chars = sorted(list(set(smilesCodes.str.cat(sep='')))) print('total chars:', len(chars)) char2indices = dict((c, i) for i, c in enumerate(chars)) indices2char = dict((i, c) for i, c in enumerate(chars)) dynamicFeatures = np.zeros((len(smilesCodes), maxlen, len(chars)), dtype=np.float) dynamicFeatures.shape for codeidx, code in enumerate(smilesCodes): for charidx, char in enumerate(code): dynamicFeatures[codeidx, charidx, char2indices[char]] = 1 sums = [] for idx in range(dynamicFeatures.shape[0]): sums.append(np.sum(dynamicFeatures[idx, :, :])) plt.hist(sums) staticChosen = ['alogp', 'full_mwt'] scaler = StandardScaler() scaler.fit(staticFeatures[staticChosen]) staticFeaturesStandard = scaler.transform(staticFeatures[staticChosen])Autoencoder architecturefrom keras.layers import LSTM, TimeDistributed, concatenate, Input, Dense, RepeatVector, Lambda from keras.models import Model from keras.activations import relu, sigmoid, tanh from keras.callbacks import EarlyStopping, ModelCheckpoint from keras.optimizers import Adam, RMSprop import keras.backend as K from keras.utils import plot_model from keras import losses import numpy.random as rnd def sampling(args): z_mean, z_log_var = args batch = K.shape(z_mean)[0] dim = K.int_shape(z_mean)[1] # by default, random_normal has mean=0 and std=1.0 epsilon = K.random_normal(shape=(batch, dim)) return z_mean + K.exp(0.5 * z_log_var) * epsilon def prepare_model(static, dynamic, k, window, charsetLen, lr, lossFunction, showArch): input_dynamic = Input(shape=(window, charsetLen), name="input_dynamic") input_static = Input(shape=(static,), name="input_static") latent = Dense(k[0], activation=relu)(input_static) dense_h = Dense(k[0])(latent) dense_c = Dense(k[0])(latent) lstm_layer, state_h, state_c = LSTM(k[0], return_sequences=True, return_state=True)(input_dynamic, initial_state=[dense_h, dense_c]) for x in k[1:-1]: concat_h = concatenate([dense_h, state_h]) dense_h = Dense(x)(concat_h) concat_c = concatenate([dense_c, state_c]) dense_c = Dense(x)(concat_c) lstm_layer, state_h, state_c = LSTM(x, return_sequences=True, return_state=True)(lstm_layer, initial_state=[dense_h, dense_c]) x = k[-1] concat_h = concatenate([dense_h, state_h]) dense_h = Dense(x)(concat_h) concat_c = concatenate([dense_c, state_c]) dense_c = Dense(x)(concat_c) lstm_layer, state_h, state_c = LSTM(x, return_state=True)(lstm_layer, initial_state=[dense_h, dense_c]) concat = concatenate([lstm_layer, latent]) # autoencoder z_mean = Dense(x, name='z_mean')(concat) z_log_var = Dense(x, name='z_log_var')(concat) z = Lambda(sampling, output_shape=(x,), name='z')([z_mean, z_log_var]) state_h = Dense(k[-2], activation=relu)(z) dense_h = Dense(k[-2], activation=relu)(z) state_c = Dense(k[-2], activation=relu)(z) dense_c = Dense(k[-2], activation=relu)(z) lstm_layer = RepeatVector(window)(z) for x in np.flip(k[:-1]): concat_h = concatenate([dense_h, state_h]) dense_h = Dense(x)(concat_h) concat_c = concatenate([dense_c, state_c]) dense_c = Dense(x)(concat_c) lstm_layer, state_h, state_c = LSTM(x, return_sequences=True, return_state=True)(lstm_layer, initial_state=[dense_h, dense_c]) #result_series = TimeDistributed(Dense(charsetLen))(lstm_layer) result_series = LSTM(charsetLen, return_sequences=True, activation='softmax')(lstm_layer) concat = concatenate([state_h, state_c]) #result_sigmoid = Dense(static-3, activation=sigmoid)(concat) result_relu = Dense(static, activation=sigmoid)(concat) #model = Model(inputs=[input_dynamic, input_static], outputs=[result_series, result_sigmoid, result_relu]) model = Model(inputs=[input_dynamic, input_static], outputs=[result_series, result_relu]) optimizer = RMSprop(lr=lr) model.compile(optimizer=optimizer, loss=lossFunction, metrics=['binary_crossentropy', 'mean_absolute_error']) if (showArch): print(model.summary()) return model def fit(staticFeatures, dynamicFeatures, model, step=1): #dynamic_data = np.empty((0, window, 1), np.float) #helper = [] #for d in dynamic: # new_data = rolling_window(d, window, step) # helper.append(len(new_data)) # dynamic_data = np.append(dynamic_data, new_data, axis=0) #print(len(helper)) #static_data = np.repeat(static, helper, axis=0) order = rnd.permutation(len(staticFeatures)) early_stopping = EarlyStopping(monitor='val_loss', patience=5) bst_model_path = 'autoencoder.h5' checkpoint = ModelCheckpoint(bst_model_path, save_best_only=True, save_weights_only=True, monitor='val_loss') size = int(staticFeatures.shape[0] * 0.9) training_dynamic, training_static = dynamicFeatures[order[:size]], staticFeatures[order[:size]] testing_dynamic, testing_static = dynamicFeatures[order[size:]], staticFeatures[order[size:]] print(training_dynamic.shape, training_static.shape) print(testing_dynamic.shape, testing_static.shape) model.fit([training_dynamic, training_static], [training_dynamic, training_static], epochs=10, batch_size=64, callbacks=[early_stopping, checkpoint], validation_data=([testing_dynamic, testing_static], [testing_dynamic, testing_static])) lr = 0.001 model = prepare_model(staticFeaturesStandard.shape[1], 1, [64,64], dynamicFeatures.shape[1], dynamicFeatures.shape[2], lr, ['binary_crossentropy', 'mean_absolute_error'], True) fit(staticFeaturesStandard, dynamicFeatures, model) dynamicFeatures[0,:,:].reshape(-1, dynamicFeatures.shape[1], dynamicFeatures.shape[2]) staticFeaturesStandard[0,:].reshape(-1, staticFeaturesStandard.shape[1]).shape prediction = model.predict([dynamicFeatures[0,:,:].reshape(-1, dynamicFeatures.shape[1], dynamicFeatures.shape[2]), staticFeaturesStandard[0,:].reshape(-1, staticFeaturesStandard.shape[1]) ]) prediction[0] import SmilesEnumeratorImporting Librariesimport pandas as pd import numpy as np import sklearn # Visualization Libraies import matplotlib.pyplot as plt import seaborn as sns from pandas.plotting import scatter_matrix # to split the dataset into test and train from sklearn.model_selection import train_test_split # For Custom Transformer from sklearn.base import BaseEstimator, TransformerMixin # Transformers for the Pipeline from sklearn.pipeline import Pipeline, make_pipeline from sklearn.preprocessing import StandardScaler import category_encoders from category_encoders import OrdinalEncoder, OneHotEncoder from sklearn.compose import ColumnTransformer from sklearn.impute import SimpleImputer # Evaluation metrics from sklearn.metrics import accuracy_score # Cross Validation libraries from sklearn.model_selection import cross_val_score # Predictors from sklearn.ensemble import VotingClassifier from sklearn.linear_model import SGDClassifier from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier from sklearn.linear_model import LogisticRegression # For deployment import joblib # pickle the pipeline from joblib import dumpLoading in the Dataset source: https://www.kaggle.com/kemical/kickstarter-projects/notebooks?sortBy=dateRun&group=upvoted&pageSize=20&datasetId=4104# To view all the columns pd.set_option('display.max_columns', None) df = pd.read_csv('kickstarter.csv', parse_dates=['deadline', 'launched']) df.head(5)Ideas:- delete ID- feature engineering (deadline and launched)- Consider top five countries- Only consider failed or successful and make it a binary classifictaion- Will keep all the 15 main categories- For category we could pass it to the ordinal encoding EDAtotal_nan = df.isna().sum().sort_values(ascending=False) percentage_nan = (total_nan / df.shape[0]) * 100 tabel = pd.concat([total_nan, percentage_nan], axis=1, keys=['Total NAN', 'Percentage of NAN']) tabel df.state.value_counts() # filtering out the dataset for binary target variable - failed / successful df = df.loc[(df['state'] == 'failed') | (df['state'] == 'successful')] df.state.value_counts() # Imbalance classification but not worrisome successful = 131490/(192871+131490) successful # changing the target variable to 0 and 1 df['state'] = df['state'].map({'failed':0, 'successful':1}) df['state'].value_counts()Checking for leakagedf.loc[df['usd_pledged_real'] < df['usd_goal_real']]['state'].value_counts() # checking for backers df.loc[df['backers'] > 70]['state'].value_counts() df.loc[df['usd_pledged_real'] > 25000]['state'].value_counts() df.loc[df['usd_pledged_real'] > 25000].shapeThere is clearly a data leakage here. Deleting verticals that we don't need - Dropping USD pledged and pledged as usd_pledged_real has the same information- Dropping country as currency and country are highly correlated- Dropping goal as we have goal converted in USD as a vertical- Dropping backers anddf = df.drop(['usd pledged', 'pledged', 'country', 'goal', 'ID', 'usd_pledged_real', 'backers'], axis=1) df.shape # dropping 4 nan values in name df.dropna(axis=0, inplace=True) df.info() Int64Index: 331672 entries, 0 to 378660 Data columns (total 8 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 name 331672 non-null object 1 category 331672 non-null object 2 main_category 331672 non-null object 3 currency 331672 non-null object 4 deadline 331672 non-null datetime64[ns] 5 launched 331672 non-null datetime64[ns] 6 state 331672 non-null int64 7 usd_goal_real 331672 non-null float64 dtypes: datetime64[ns](2), float64(1), int64(1), object(4) memory usage: 22.8+ MBChecking high cardinalitycols = ['category', 'main_category', 'currency'] for col in cols: print(f"{col} has {df[col].nunique()} unique values") # Either we can delete category vertical or ordinal encode it! df.category.value_counts() # Lets consider top 5 currencies and delete the rest df.currency.value_counts() # Filtering out top 5 currencies df = df.loc[(df['currency'] == 'USD') | (df['currency'] == 'GBP') | (df['currency'] == 'EUR') \ | (df['currency'] == 'CAD') | (df['currency'] == 'AUD')] # changing the currency names into countries so it will be easier to ask a user for input df['currency'] = df['currency'].map({ 'USD':'USA', 'GBP':'UK', 'EUR':'Europe', 'CAD':'Canada', 'AUD': 'Australia'}) df.currency.value_counts() # reset index as we deleted a few rows when we filtered df.reset_index(drop=True, inplace=True)Feature Engineering - calculating the length of the campaign from deadline and launched - calculating the total number of words in name# making a new column length_days df['length_days'] = (df['deadline'] - df['launched']).dt.days + 1 # deleting deadline and launched cols df = df.drop(['deadline', 'launched'], axis=1) # Total number of words in each row in name df['name'] = df['name'].str.split().str.len()Baseline Model# we need our model to beat this score baseline = 1 - successful baselineSplit Dataset# Feature Matrix and Target Variable X = df.drop('state', axis=1) y = df['state'] # Splitting the dataset into train and test dataset X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=105) print(X_train.shape) print(X_test.shape) print(y_train.shape) print(y_test.shape) X_train.head()Pipeline# looking at numerical attributes for simple imputer with median num_attribs = X_train.select_dtypes(exclude='object') num_attribs.columns # looking at categorical attributes for simple imputer with 'most_frequent' cat_attribs = X_train.select_dtypes(include='object') cat_attribs.columns # making seperate list of cols for ohe and ordinal encoding cat_attribs_ohe = X_train[['main_category', 'currency']] cat_attribs_ohe.columns cat_attribs_ord = X_train[['category']] cat_attribs_ord.columns """Building a custom Transformer that will give the output in a dataframe after applying the simple imputer so we could pass it to the categorical_encoders which does not accept np.array""" class ImputerDF(BaseEstimator, TransformerMixin): def __init__(self): self.imputer = SimpleImputer(strategy='most_frequent') self.cols = [] def fit(self, X, y=None): self.imputer.fit(X) self.cols = list(X.columns) return self def transform(self, X): X_t = self.imputer.transform(X) return pd.DataFrame(X_t, columns=self.cols) # Using median as the strategy for Simple Imputer to predict NaN values num_pipeline = make_pipeline( SimpleImputer(strategy='median'), StandardScaler() ) """ Adding the Custom Transformer to impute using 'most_frequent' strategy and giving out an output as a dataframe instead of an array """ cat_pipeline = make_pipeline( ImputerDF(), OrdinalEncoder(cols = cat_attribs_ord), OneHotEncoder(cols = cat_attribs_ohe) ) # generating a list of categorical and numerical columns to pass it in the column transformer cat_attributes = list(cat_attribs) num_attributes = list(num_attribs) # putting two pipelines together using ColumnTransformer final_pipeline = ColumnTransformer([ ('num_pipeline', num_pipeline, num_attributes), ('cat_pipeline', cat_pipeline, cat_attributes) ]) # Fit and trasnform on X_train X_train_transformed = final_pipeline.fit_transform(X_train) print(X_train_transformed.shape)(259486, 24)Model Selection- Ran a few simple models and picked the most generalized model- Using **Accuracy** as the evaluation metrics as the dataset is balanced. Random Forest Classifierforest_clf = RandomForestClassifier(n_jobs=-1, random_state=105, n_estimators=80, max_depth=11) forest_clf.fit(X_train_transformed,y_train) y_pred_train = forest_clf.predict(X_train_transformed) score = accuracy_score(y_train, y_pred_train) score_forest = cross_val_score(forest_clf, X_train_transformed, y_train, scoring='accuracy', cv=10, n_jobs=-1) print(f""" Accuracy score on training set: {score}\n\n Accuracy score on validation set: {score_forest.mean()} """)Accuracy score on training set: 0.6768110803665708 Accuracy score on validation set: 0.6694195546845952Logistic Regressionlr = LogisticRegression(max_iter=10000) lr.fit(X_train_transformed, y_train) y_pred_train = lr.predict(X_train_transformed) score = accuracy_score(y_train, y_pred_train) score_lr = cross_val_score(lr, X_train_transformed, y_train, scoring='accuracy', cv=10, n_jobs=-1) print(f""" Accuracy score on training set: {score}\n\n Accuracy score on validation set: {score_lr.mean()} """)Accuracy score on training set: 0.6494647110056034 Accuracy score on validation set: 0.6493375460133903Gradient Boost Classifiergb = GradientBoostingClassifier() gb.fit(X_train_transformed, y_train) y_pred_train = gb.predict(X_train_transformed) score = accuracy_score(y_train, y_pred_train) score_gb = cross_val_score(gb, X_train_transformed, y_train, scoring='accuracy', cv=10) print(f""" Accuracy score on training set: {score}\n\n Accuracy score on validation set: {score_gb.mean()} """)Accuracy score on training set: 0.6755085052758145 Accuracy score on validation set: 0.6754044641751807SGD Classifiersgd = SGDClassifier(n_jobs=-1) sgd.fit(X_train_transformed, y_train) y_pred_train = sgd.predict(X_train_transformed) score = accuracy_score(y_train, y_pred_train) score_sgd = cross_val_score(sgd, X_train_transformed, y_train, scoring='accuracy', cv=10) print(f""" Accuracy score on training set: {score}\n\n Accuracy score on validation set: {score_sgd.mean()} """)Accuracy score on training set: 0.6419267320780312 Accuracy score on validation set: 0.6177520792748281Voting Classifiers using soft voting.lr = LogisticRegression(max_iter=10000) forest_clf = RandomForestClassifier(n_jobs=-1, random_state=105, n_estimators=60, max_depth=10) sgd = SGDClassifier(loss="modified_huber", n_jobs=-1) gb = GradientBoostingClassifier() voting_clf = VotingClassifier( estimators=[("lr",lr), ("rf",forest_clf), ("sgd",sgd), ("gb", gb)], voting='soft', n_jobs=-1) voting_clf.fit(X_train_transformed, y_train) y_pred_train = voting_clf.predict(X_train_transformed) score = accuracy_score(y_train, y_pred_train) score_voting_soft = cross_val_score(voting_clf, X_train_transformed, y_train, scoring='accuracy', cv=10, n_jobs=-1) print(f""" Accuracy score on training set: {score}\n\n Accuracy score on validation set: {score_voting_soft.mean()} """)Accuracy score on training set: 0.6534649268168611 Accuracy score on validation set: 0.6525014962618656Voting Classifier using hard voting.lr = LogisticRegression(max_iter=10000) forest_clf = RandomForestClassifier(n_jobs=-1, random_state=105, max_depth=15) sgd = SGDClassifier(n_jobs=-1) gb = GradientBoostingClassifier() voting_clf_hard = VotingClassifier( estimators=[("lr",lr), ("rf",forest_clf), ("sgd",sgd), ("gb", gb)], voting='hard', n_jobs=-1) voting_clf_hard.fit(X_train_transformed, y_train) y_pred_train = voting_clf_hard.predict(X_train_transformed) score = accuracy_score(y_train, y_pred_train) score_voting_hard = cross_val_score(voting_clf_hard, X_train_transformed, y_train, scoring='accuracy', cv=10, n_jobs=-1) print(f""" Accuracy score on training set: {score}\n\n Accuracy score on validation set: {score_voting_hard.mean()} """)Accuracy score on training set: 0.6660166637121078 Accuracy score on validation set: 0.6634885523246815Selecting the most generalized modelfig = plt.figure(figsize=(10, 4), facecolor='#dadada') plt.plot([1] * 10, score_forest, ".") plt.plot([2] * 10, score_lr, ".") plt.plot([3] * 10, score_gb, ".") plt.plot([4] * 10, score_sgd, ".") plt.plot([5] * 10, score_voting_soft, ".") plt.plot([6] * 10, score_voting_hard, ".") plt.boxplot( [score_forest, score_lr, score_gb, score_sgd, score_voting_soft, score_voting_hard], labels=("Random Forest", "Logistic Reg", 'Grad Boost', 'Stoch Grad Descent', 'Soft Voting Clf', 'Hard Voting Clf')) plt.ylabel("Accuracy Score", fontsize=14) plt.title('Algorithm Comparison') # Remove the splines plt.gca().spines["top"].set_visible(False) plt.gca().spines["bottom"].set_visible(False) plt.gca().spines["right"].set_visible(False) plt.gca().spines["left"].set_visible(False) plt.tight_layout() # Makes it better looking on laptops plt.show()Evaluating our model on Test set I picked the gradient boost classifer as it is the most generalized model.# transforming X_test X_test_transformed = final_pipeline.transform(X_test) y_test_pred =gb.predict(X_test_transformed) score = accuracy_score(y_test, y_test_pred) print(f""" Accuracy score on the TRAINING set using gb Classifier: {score_gb.mean()}\n\n Accuracy score on TEST set using gb Classifier: {score} """)Accuracy score on the TRAINING set using gb Classifier: 0.6754044641751807 Accuracy score on TEST set using gb Classifier: 0.6757306696263411Final Pipeline for Deployementdeploy_pipeline = make_pipeline( final_pipeline, gb) # Final Pick # Fitting the Pipeline deploy_pipeline.fit(X_train, y_train) # Pickle the Pipeline using joblib dump(deploy_pipeline, 'pipeline.joblib', compress=True)Get version of the packages used in the pipeline so we could then install all those packages in virtual envprint(f'joblib=={joblib.__version__}') print(f'scikit-learn=={sklearn.__version__}') print(f'category_encoders=={category_encoders.__version__}')joblib==0.16.0 scikit-learn==0.23.1 category_encoders==2.2.2              Solutions for Coin Flip: A Probabilistic Bit Task 1: Simulating FairCoin in PythonFlip a fair coin 100 times. Calculate the total number of heads and tails, and then check the ratio of the number of heads and the number of tails.Do the same experiment 1000 times.Do the same experiment 10,000 times.Do the same experiment 100,000 times.Do your results get close to the ideal case (the numbers of heads and tails are equal)? Solutionfrom random import randrange for experiment in [100,1000,10000,100000]: heads = tails = 0 for i in range(experiment): if randrange(2) == 0: heads = heads + 1 else: tails = tails + 1 print("experiment:",experiment) print("heads =",heads," tails = ",tails) print("the ratio of #heads/#tails is",(round(heads/tails,4))) print() # empty lineTask 2: Simulating BiasedCoin in PythonFlip the following biased coin 100 times. Calcuate the total numbers of heads and tails, and then check the ratio of the number of heads and the number of tails.$BiasedCoin = \begin{array}{c|cc} \hookleftarrow & \mathbf{Head} & \mathbf{Tail} \\ \hline \mathbf{Head} & 0.6 & 0.6 \\ \mathbf{Tail} & 0.4 & 0.4 \end{array}$Do the same experiment 1000 times.Do the same experiment 10,000 times.Do the same experiment 100,000 times.Do your results get close to the ideal case $ \mypar{ \dfrac{ \mbox{ of heads} }{ \mbox{ of tails} } = \dfrac{0.6}{0.4} = 1.50000000 } $? Solutionfrom random import randrange # let's pick a random number between {0,1,...,99} # it is expected to be less than 60 with probability 0.6 # and greater than or equal to 60 with probability 0.4 for experiment in [100,1000,10000,100000]: heads = tails = 0 for i in range(experiment): if randrange(100) <60: heads = heads + 1 # with probability 0.6 else: tails = tails + 1 # with probability 0.4 print("experiment:",experiment) print("heads =",heads," tails = ",tails) print("the ratio of #heads/#tails is",(round(heads/tails,4))) print() # empty lineTask 3 Write a function to implement the described biased coin,The inputs are integers $N>0$ and $ B \in \{0,\ldots,N\} $.The output is either "Heads" or "Tails". Solutiondef biased_coin(N,B): from random import randrange random_number = randrange(N) if random_number < B: return "Heads" else: return "Tails"Task 4We use the biased coin described in Task 3. (You may use the function given in the solution.)We pick $ N $ as 101.Our task is to determine the value of $ B $ experimentially without checking its value directly.Flip the (same) biased coin 500 times, collect the statistics, and then guess the bias.Compare your guess with the actual bias by calculating the relative error in percentage (the absolute value of the difference divided by the real bias). Solutiondef biased_coin(N,B): from random import randrange random_number = randrange(N) if random_number < B: return "Heads" else: return "Tails" from random import randrange N = 101 B = randrange(N+1) total_tosses = 500 the_number_of_heads = 0 for i in range(total_tosses): if biased_coin(N,B) == "Heads": the_number_of_heads = the_number_of_heads + 1 my_guess = the_number_of_heads/total_tosses real_bias = B/N error = abs(my_guess-real_bias)/real_bias*100 print("my guess is",my_guess) print("real bias is",real_bias) print("error (%) is",error)Kernelized Regressionnp.random.seed(1) x = np.random.uniform(0, 10, size=20) y = np.random.normal(np.sin(x), 0.2) plt.plot(x, y, 'o') plt.xlabel('$x$', fontsize=16); plt.ylabel('$f(x)$', fontsize=16, rotation=0); def gauss_kernel(x, n_knots): """ Simple Gaussian radial kernel """ knots = np.linspace(x.min(), x.max(), n_knots) w = 2 return np.array([np.exp(-(x-k)**2/w) for k in knots]) n_knots = 5 with pm.Model() as kernel_model: gamma = pm.Cauchy('gamma', alpha=0, beta=1, shape=n_knots) sd = pm.Uniform('sd',0, 10) mu = pm.math.dot(gamma, gauss_kernel(x, n_knots)) yl = pm.Normal('yl', mu=mu, sd=sd, observed=y) kernel_trace = pm.sample(10000, chains=1,njobs=1) chain = kernel_trace[5000:] pm.traceplot(chain); pm.summary(chain) ppc = pm.sample_posterior_predictive(chain, model=kernel_model, samples=100) plt.plot(x, ppc['yl'].T, 'ro', alpha=0.1); plt.plot(x, y, 'bo'); plt.xlabel('$x$', fontsize=16); plt.ylabel('$f(x)$', fontsize=16, rotation=0); new_x = np.linspace(x.min(), x.max(), 100) k = gauss_kernel(new_x, n_knots) gamma_pred = chain['gamma'] for i in range(100): idx = np.random.randint(0, len(gamma_pred)) # grap a random set of gammas from the MCMC chain # e.g. gamma_pred[3642]=[-0.04 0.93 -0.97 0.32 0.05] # to get an idea of the uncertainty y_pred = np.dot(gamma_pred[idx], k) plt.plot(new_x, y_pred, 'r-', alpha=0.1) plt.plot(x, y, 'bo') plt.xlabel('$x$', fontsize=16) plt.ylabel('$f(x)$', fontsize=16, rotation=0);Gaussian Processessquared_distance = lambda x, y: np.array([[(x[i] - y[j])**2 for i in range(len(x))] for j in range(len(y))]) np.random.seed(1) test_points = np.linspace(0, 10, 100) cov = np.exp(-squared_distance(test_points, test_points)) plt.plot(test_points, stats.multivariate_normal.rvs(cov=cov, size=6).T); plt.xlabel('$x$', fontsize=16); plt.ylabel('$f(x)$', fontsize=16, rotation=0); np.random.seed(1) eta = 1 rho = 0.5 sigma = 0.03 D = squared_distance(test_points, test_points) cov = eta * np.exp(-rho * D) diag = eta * sigma np.fill_diagonal(cov, diag) for i in range(6): plt.plot(test_points, stats.multivariate_normal.rvs(cov=cov)) plt.xlabel('$x$', fontsize=16); plt.ylabel('$f(x)$', fontsize=16, rotation=0); np.random.seed(1) # K_{**} K_oo = eta * np.exp(-rho * D) D_x = squared_distance(x, x) # K K = eta * np.exp(-rho * D_x) diag_x = eta + sigma np.fill_diagonal(K, diag_x) D_off_diag = squared_distance(x, test_points) # K_{*} K_o = eta * np.exp(-rho * D_off_diag) # Posterior mean mu_post = np.dot(np.dot(K_o, np.linalg.inv(K)), y) # Posterior covariance SIGMA_post = K_oo - np.dot(np.dot(K_o, np.linalg.inv(K)), K_o.T) for i in range(100): fx = stats.multivariate_normal.rvs(mean=mu_post, cov=SIGMA_post) plt.plot(test_points, fx, 'r-', alpha=0.1) plt.plot(x, y, 'o') plt.xlabel('$x$', fontsize=16); plt.ylabel('$f(x)$', fontsize=16, rotation=0);Posterior of GP model using Cholesky decompositionnp.random.seed(1) eta = 1 rho = 0.5 sigma = 0.03 # This is the true unknown function we are trying to approximate f = lambda x: np.sin(x).flatten() # Define the kernel def kernel(a, b): """ GP squared exponential kernel """ D = np.sum(a**2,1).reshape(-1,1) + np.sum(b**2,1) - 2*np.dot(a, b.T) return eta * np.exp(- rho * D) N = 20 # number of training points. n = 100 # number of test points. # Sample some input points and noisy versions of the function evaluated at # these points. X = np.random.uniform(0, 10, size=(N,1)) y = f(X) + sigma * np.random.randn(N) K = kernel(X, X) L = np.linalg.cholesky(K + sigma * np.eye(N)) # points we're going to make predictions at. Xtest = np.linspace(0, 10, n).reshape(-1,1) # compute the mean at our test points. Lk = np.linalg.solve(L, kernel(X, Xtest)) mu = np.dot(Lk.T, np.linalg.solve(L, y)) # compute the variance at our test points. K_ = kernel(Xtest, Xtest) sd_pred = (np.diag(K_) - np.sum(Lk**2, axis=0))**0.5 plt.fill_between(Xtest.flat, mu - 2 * sd_pred, mu + 2 * sd_pred, color="r", alpha=0.2) plt.plot(Xtest, mu, 'r', lw=2) plt.plot(x, y, 'o') plt.xlabel('$x$', fontsize=16) plt.ylabel('$f(x)$', fontsize=16, rotation=0); with pm.Model() as GP: mu = np.zeros(N) eta = pm.HalfCauchy('eta', 5) rho = pm.HalfCauchy('rho', 5) sigma = pm.HalfCauchy('sigma', 5) D = squared_distance(x, x) K = tt.fill_diagonal(eta * pm.math.exp(-rho * D), eta + sigma) obs = pm.MvNormal('obs', mu, cov=K, observed=y) test_points = np.linspace(0, 10, 100) D_pred = squared_distance(test_points, test_points) D_off_diag = squared_distance(x, test_points) K_oo = eta * pm.math.exp(-rho * D_pred) K_o = eta * pm.math.exp(-rho * D_off_diag) mu_post = pm.Deterministic('mu_post', pm.math.dot(pm.math.dot(K_o, tt.nlinalg.matrix_inverse(K)), y)) SIGMA_post = pm.Deterministic('SIGMA_post', K_oo - pm.math.dot(pm.math.dot(K_o, tt.nlinalg.matrix_inverse(K)), K_o.T)) #start = pm.find_MAP() trace = pm.sample(1000, chains=1,njobs=1) varnames = ['eta', 'rho', 'sigma'] chain = trace[100:] pm.traceplot(chain, varnames); pm.summary(chain, varnames).round(4) y_pred = [np.random.multivariate_normal(m, S) for m,S in zip(chain['mu_post'][::5], chain['SIGMA_post'][::5])] for yp in y_pred: plt.plot(test_points, yp, 'r-', alpha=0.1) plt.plot(x, y, 'bo'); plt.xlabel('$x$', fontsize=16); plt.ylabel('$f(x)$', fontsize=16, rotation=0);Periodic Kernelperiodic = lambda x, y: np.array([[np.sin((x[i] - y[j])/2)**2 for i in range(len(x))] for j in range(len(y))]) with pm.Model() as GP_periodic: mu = np.zeros(N) eta = pm.HalfCauchy('eta', 5) rho = pm.HalfCauchy('rho', 5) sigma = pm.HalfCauchy('sigma', 5) P = periodic(x, x) K = tt.fill_diagonal(eta * pm.math.exp(-rho * P), eta + sigma) obs = pm.MvNormal('obs', mu, cov=K, observed=y) test_points = np.linspace(0, 10, 100) D_pred = periodic(test_points, test_points) D_off_diag = periodic(x, test_points) K_oo = eta * pm.math.exp(-rho * D_pred) K_o = eta * pm.math.exp(-rho * D_off_diag) mu_post = pm.Deterministic('mu_post', pm.math.dot(pm.math.dot(K_o, tt.nlinalg.matrix_inverse(K)), y)) SIGMA_post = pm.Deterministic('SIGMA_post', K_oo - pm.math.dot(pm.math.dot(K_o, tt.nlinalg.matrix_inverse(K)), K_o.T)) start = pm.find_MAP() trace = pm.sample(1000, start=start,chains=1,njobs=1) varnames = ['eta', 'rho', 'sigma'] chain = trace[100:] pm.traceplot(chain, varnames); y_pred = [np.random.multivariate_normal(m, S) for m,S in zip(chain['mu_post'][::5], chain['SIGMA_post'][::5])] for yp in y_pred: plt.plot(test_points, yp, 'r-', alpha=0.1) plt.plot(x, y, 'bo') plt.xlabel('$x$', fontsize=16) plt.ylabel('$f(x)$', fontsize=16, rotation=0); import sys, IPython, scipy, matplotlib, platform print("This notebook was created on a computer %s running %s and using:\nPython %s\nIPython %s\nPyMC3 %s\nNumPy %s\nSciPy %s\nPandas %s\nMatplotlib %s\nSeaborn %s\n" % (platform.machine(), ' '.join(platform.linux_distribution()[:2]), sys.version[:5], IPython.__version__, pm.__version__, np.__version__, scipy.__version__, pd.__version__, matplotlib.__version__, sns.__version__))This notebook was created on a computer x86_64 running debian buster/sid and using: Python 3.7.2 IPython 7.2.0 PyMC3 3.6 NumPy 1.16.0 SciPy 1.2.0 Pandas 0.23.4 Matplotlib 3.0.2 Seaborn 0.9.0Volumetrics: HCIP calculationWe'll implement the volumetric equation:$$ V = A \times T \times G \times \phi \times N\!\!:\!\!G \times S_\mathrm{O} \times \frac{1}{B_\mathrm{O}} $$ Gross rock volume $$ \mathrm{GRV} = A \times T $$thick = 80 # metres area = 20000 * 30000 # metres grv = thick * area grvWouldn't it be cool if we could carry units around with our calculations? With [`pint`](https://pint.readthedocs.io/en/latest/index.html), we can!import pint unit = pint.UnitRegistry() thick = 80 * unit.m area = 20000 * unit.m * 30000 * unit.m grv = thick * area grvMake a function that computes the GRV.def calculate_grv(thickness, area): return thickness * areaNow we can just call this function, instead of remembering the equation. (Admittedly, the equation is rather easy to remember in this case!)calculate_grv(thick, area)It works!Now we need to compensate for the prospect not being a flat slab of rock — using the geometric factor. We will implement the equations implied by this diagram:top = input("What shape is the prospect? ") height = 100 * unit.m ratio = thick / height if top == 'round': g = -0.6 * ratio + 1 if top == 'flat': g = -0.3 * ratio + 1 if top == 'slab': g = 1 gExerciseTurn this one into a function.def geometric_factor(thick, height, top='slab'): # Your code here return g def geometric_factor(thick, height, top='slab'): ratio = thick / height if top == 'round': g = -0.6 * ratio + 1 elif top == 'flat': g = -0.3 * ratio + 1 else: g = 1 return g geometric_factor(thick, height=100*unit.m, top='round') grv *= gWhat if we have multiple prospects?import numpy as np thicknesses = np.array([10, 25, 15, 5, 100]) * unit.m heights = np.array([75, 100, 20, 100, 200]) * unit.m geometric_factor(thicknesses, heights, top='round')It works! HC pore volumeWe need:- net:gross — the ratio of reservoir-quality rock thickness to the total thickness of the interval.- porosity- $S_\mathrm{O}$ — the oil saturation, or proportion of oil to total pore fluid.netg = 0.5 # fraction por = 0.24 # fraction s_o = 0.8 # fraction hcpv = netg * por * s_o hcpvWe'll leave that as a fraction for now. EXERCISE- Turn this into a function by rearranging the following lines of code:"""A function to compute the hydrocarbon pore volume.""" return hcpv hcpv = netg * por * s_o def calculate_hcpv(netg, por, s_o): # Put your code here: def calculate_hcpv(netg, por, s_o): """A function to compute the hydrocarbon pore volume.""" hcpv = netg * por * s_o return hcpvFormation volume factorOil shrinks when we produce it, especially if it has high GOR. The FVF, or $B_O$, is the ratio of a reservoir barrel to a stock-tank barrel (25 deg C and 1 atm). Typically the FVF is between 1 (heavy oil) and 1.7 (high GOR).fvf = 1.1EXERCISEFor gas, $B_\mathrm{G}$ is $0.35 Z T / P$, where $Z$ is the correction factor, or gas compressibility factor. $T$ should be in kelvin and $P$ in kPa. $Z$ is usually between 0.8 and 1.2, but it can be as low as 0.3 and as high as 2.0.Can you write a function to calculate $B_\mathrm{G}$?def calculate_Bg( ): # Add the arguments. """Write a docstring.""" return # Don't forget to return something! def calculate_Bg(Z=1, T=273.15, P=101.325): """ Compute B_G from correction factor Z, temperature T (K), and pressure P (kPa). """ Bg = 0.35 * Z * T / P return Bg calculate_Bg(T=293*unit.K, P=1000*unit.kPa)Put it all togetherNow we have the components of the volumetric equation:hcip = grv * hcpv / fvf hcipPint can convert to other units, e.g. Imperial barrels, for us.hcip.to('imperial_barrel')An Imperial barrel is 43 gallons ([Wikipedia](https://en.wikipedia.org/wiki/Barrel_(unit)), whereas an oil barrel is only 42 gallons. [For more on conversion to bbl, BOE, etc.](https://en.wikipedia.org/wiki/Barrel_of_oil_equivalent).So let's define a custom unit:unit.define('oil_barrel = 42 gallon = bbl') hcip.to('bbl')EXERCISECan you write a function to compute the volume (i.e. the HCIP), given all the inputs?Try to use the functions you have already written.# Put your code here. # Solution. def calculate_hcip(thickness, area, height, top, netg, por, s_o, fvf): grv = calculate_grv(thickness, area) g = geometric_factor(thickness, height, top) grv *= g hcpv = calculate_hcpv(netg, por, s_o) return grv * hcpv / fvf calculate_hcip(thick, area, height, top, netg, por, s_o, fvf)Monte Carlo simulationWe can easily draw randomly from distributions of properties:- Normal: https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.normal.html- Uniform: https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.uniform.html- Lognormal: https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.lognormal.htmlimport numpy as np netg = np.random.normal(loc=0.5, scale=0.1, size=100) por = np.random.normal(loc=0.15, scale=0.025, size=100) import matplotlib.pyplot as plt _ = plt.hist(por) import seaborn as sns sns.distplot(por, rug=True) hcpv = calculate_hcpv(netg, por, s_o) hcpv _ = plt.hist(hcpv, bins=20) hcpv.mean()The histogram looks a bit ragged, but this is probably because of the relatively small number of samples. EXERCISE1. Compute HCIP with these distributions. Make a histogram of the result in millions of barrels.1. How does the histogram look if you take 1000 samples instead of 100?1. Make distributions for some of the other properties, like thickness and FVF.1. Maybe our functions should check that we don't get unreasonable values, like negative numbers, or decimal fractions over 1.0 Try to implement this if you have time.hcip = calculate_hcip(thick, area, height, top, netg, por, s_o, fvf) _ = plt.hist(hcip.to('Mbbl'), bins=20)/Users/matt/opt/miniconda3/envs/geocomp/lib/python3.8/site-packages/numpy/core/_asarray.py:136: UnitStrippedWarning: The unit of the quantity is stripped when downcasting to ndarray. return array(a, dtype, copy=False, order=order, subok=True)Using scipy's distributions`scipy` has some distribution functions too. While they're a bit more complicated to use than NumPy's, they are more rigorous. For example, they allow us to fit to data.Following [this post](https://mikulskibartosz.name/monte-carlo-simulation-in-python-d63f0cfcdf6f)from scipy.stats import norm mean = 3 standard_deviation = 2 normal_distribution = norm(loc=mean, scale=standard_deviation) x = np.linspace(-6, 12, 200) _, ax = plt.subplots() ax.plot(x, normal_distribution.pdf(x), '-', lw=2) plt.title(f'Normal distribution with mean {mean} and standard deviation {standard_deviation}') plt.show() number_of_simulations = 20 normal_distribution.rvs(number_of_simulations)Let's fit a log-normal distribution to some porosity data.import pandas as pd uid = "1QcSw_xRAYgJzD9HsIXNjmS7o4Zb6qkRBgIWhmp4f2mI" url = f"https://docs.google.com/spreadsheets/d/{uid}/export?format=csv" df = pd.read_csv(url) df.head() for name, group in df.groupby('Gross environment'): plt.scatter(group.Porosity, np.log10(group.Permeability), label=name) plt.legend() from scipy.stats import lognorm deltaic = df.loc[df['Gross environment']=='Deltaic'] s, loc, scale = lognorm.fit(deltaic.Porosity) s, loc, scale x = np.linspace(0, 40, 200) _, ax = plt.subplots() plt.hist(df.Porosity, bins=80, range=(0, 80)) ax2 = ax.twinx() ax2.plot(x, lognorm.pdf(x, loc=loc, scale=scale, s=s), 'r') plt.show()Now we can use this in our calculation:hcip = calculate_hcip(thick, area, height, top, netg, por, s_o, fvf).magnitude sns.distplot(hcip) plt.title('100 realizations') plt.xlabel('$\mathrm{HCIP\ in\ billion\ m^3}$') plt.show() print(f"P10 is {np.percentile(hcip, 10):.2e} Sm³") print(f"P50 is {np.median(hcip):.2e} Sm³") print(f"mean is {np.mean(hcip):.2e} Sm³") print(f"P90 is {np.percentile(hcip, 90):.2e} Sm³")P10 is 1.73e+09 Sm³ P50 is 2.45e+09 Sm³ mean is 2.49e+09 Sm³ P90 is 3.30e+09 Sm³Compute on a DataFrameSuppose we have a spreadsheet of prospect data:import pandas as pd uid = "1P2JxXG_jLZ0vx8BlFvm0hD6sBBZH2zU8tk9T-SI27mE" url = f"https://docs.google.com/spreadsheets/d/{uid}/export?format=csv" df = pd.read_csv(url) df.head()We have to map the row contents to our HCIP function:names = { 'thickness': 'Thick [m]', 'area': 'Area [km2]', 'netg': 'N:G', 'por': 'phi', 's_o': 'So', 'fvf': 'Bo' } def hcip_row(row): params = {k: row[v] for k, v in names.items()} params['height'] = 1e6 params['top'] = 'slab' hcip = calculate_hcip(**params) * row['GeomFactor'] return hcip df['HCIP'] = df.apply(hcip_row, axis=1) df.head()To mmtf.MMTFDecoderfrom molsysmt.tools import file_mmtf #file_mmtf.to_mmtf_MMTFDecoder(item)scikit learn for Prediction scikit learn for Subsurface Modeling in Python , Associate Professor, University of Texas at Austin [Twitter](https://twitter.com/geostatsguy) | [GitHub](https://github.com/GeostatsGuy) | [Website](http://michaelpyrcz.com) | [GoogleScholar](https://scholar.google.com/citations?user=QVZ20eQAAAAJ&hl=en&oi=ao) | [Book](https://www.amazon.com/Geostatistical-Reservoir-Modeling-Michael-Pyrcz/dp/0199731446) | [YouTube](https://www.youtube.com/channel/UCLqEr-xV-ceHdXXXrTId5ig) | [LinkedIn](https://www.linkedin.com/in/michael-pyrcz-61a648a1) | [GeostatsPy](https://github.com/GeostatsGuy/GeostatsPy) PGE 383 Exercise: scikit learn for Subsurface Modeling in Python Here's a simple workflow, demonstration of scikit learn for subsurface modeling workflows. This should help you get started with building subsurface models that data analytics and machine learning. Here's some basic details about scikit learn. scikit learnDemonstration of scikit learn for machine learning.In this workflow we demonstrate the plug and play nature of scikit learn machine learning models. For an unconventional dataset we demonstrate the following steps: 1. instantiation 2. fitting 3. prediction 4. cross validationWe will work with the following regression methods:* linear regression* multilinear regression* decision tree regression* support vector machine regression Getting StartedHere's the steps to get setup in Python with the GeostatsPy package:1. Install Anaconda 3 on your machine (https://www.anaconda.com/download/). 2. From Anaconda Navigator (within Anaconda3 group), go to the environment tab, click on base (root) green arrow and open a terminal. 3. In the terminal type: pip install geostatspy. 4. Open Jupyter and in the top block get started by copy and pasting the code block below from this Jupyter Notebook to start using the geostatspy functionality. There are examples below with these functions. You can go here to see a list of the available functions, https://git.io/fh4eX, other example workflows and source code. Import Required PackagesLet's import the GeostatsPy package.import os # to set current working directory import math # basic calculations like square root from sklearn.model_selection import train_test_split # train and test split from sklearn import svm # support vector machine methods from sklearn import tree # tree program from scikit learn (package for machine learning) from sklearn.metrics import mean_squared_error, r2_score # specific measures to check our models import pandas as pd # DataFrames and plotting import pandas.plotting as pd_plot import numpy as np # arrays and matrix math import matplotlib.pyplot as plt # plotting from intake import cat # data catalogueDeclare functionsLet's define a couple of functions to streamline plotting correlation matrices and visualization of a machine learning regression model responce over the 2 predictor features.def plot_corr(dataframe,size=10): # plots a graphical correlation matrix corr = dataframe.corr() fig, ax = plt.subplots(figsize=(size, size)) im = ax.matshow(corr,vmin = -1.0, vmax = 1.0) plt.xticks(range(len(corr.columns)), corr.columns); plt.yticks(range(len(corr.columns)), corr.columns); plt.colorbar(im, orientation = 'vertical') plt.title('Correlation Matrix') def visualize_model(model,xfeature,yfeature,response,title,):# plots the data points and the decision tree prediction n_classes = 10 cmap = plt.cm.RdYlBu plot_step = 0.02 x_min, x_max = min(xfeature) - 1, max(xfeature) + 1 y_min, y_max = min(yfeature) - 1, max(yfeature) + 1 resp_min = round(min(response)); resp_max = round(max(response)); xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step), np.arange(y_min, y_max, plot_step)) z_min = round(min(response)); z_max = round(max(response)) Z = model.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) cs = plt.contourf(xx, yy, Z, cmap=cmap,vmin=z_min, vmax=z_max) im = plt.scatter(xfeature,yfeature,s=None, c=response, marker=None, cmap=cmap, norm=None, vmin=z_min, vmax=z_max, alpha=0.8, linewidths=0.3, verts=None, edgecolors="black") plt.title(title) plt.xlabel(xfeature.name) plt.ylabel(yfeature.name) cbar = plt.colorbar(im, orientation = 'vertical') cbar.set_label(response.name, rotation=270, labelpad=20) return(plt)Read the data tablemy_data = cat.unconv_MV().read() # load the comma delimited data fileLet's visualize the first several rows of our data stored in a DataFrame so we can make sure we successfully loaded the data file.my_data.head() # preview the first 5 rows of the dataframeLet's remove the well index and check the summary summary statistics.my_data = my_data.iloc[:,1:8] # copy all rows and columns 1 through 8, note 0 column is removed my_data.describe().transpose() # calculate summary statistics for the dataIt is good that we checked the summary statistics, because we have some negative values for brittleness and total organic carbon. The is physically imposible. The values must be in error. We know the lowest possible values are 0.0, so we will truncate on 0.0. We use the *get_numerical_data()* DataFrame member function to get a shallow copy of the data from the DataFrame. Since it is a shallow copy, any changes we make to the copy are made to the data in the original DataFrame. This allows us to apply this simple conditional statement to all the data values in the DataFrame all at once.num = my_data._get_numeric_data() # get the numerical values num[num < 0] = 0 # truncate negative values to 0.0 my_data.describe().transpose() # calculate summary statistics for the dataThis dataset has variables from 1,000 unconventional wells including well average porosity, log transform of permeability (to linearize the relationships with other variables), accoustic impedance (kg/m2s*10^6), brittness ratio (%), total organic carbon (%), vitrinite reflectance (%), and initial production 90 day average (MCFPD). Note, the dataset is synthetic. Calculate the correlation matrix For multivariate analysis it is a good idea to check the correlation matrix. We can calculate it and view it in the console with these commands.corr_matrix = np.corrcoef(my_data, rowvar = False) print(np.around(corr_matrix,2)) # print the correlation matrix to 2 decimals[[ 1. 0.81 -0.51 -0.25 0.71 0.08 0.69] [ 0.81 1. -0.32 -0.15 0.51 0.05 0.57] [-0.51 -0.32 1. 0.17 -0.55 0.49 -0.33] [-0.25 -0.15 0.17 1. -0.24 0.3 -0.07] [ 0.71 0.51 -0.55 -0.24 1. 0.31 0.5 ] [ 0.08 0.05 0.49 0.3 0.31 1. 0.14] [ 0.69 0.57 -0.33 -0.07 0.5 0.14 1. ]]Note the 1.0 diagonal resulting from the correlation of each variable with themselves. Let's use our function declared above to make a graphical correlation matrix visualization. This may inprove our ability to spot features. It relies on the built in correlation matrix method with Numpy DataFrames and MatPlotLib for plotting.plot_corr(my_data,10) # using our correlation matrix visualization function plt.show()This looks good. There is a mix of correlation magnitudes. Of course, correlation coeffficients are limited to degree of linear correlations. For more complete information, let's look at the matrix scatter plot from the Pandas package.pd_plot.scatter_matrix(my_data, alpha = 0.1, # pandas matrix scatter plot figsize=(10, 10),color = 'black', hist_kwds={'color':['grey']}) plt.show()Working with Only Two Predictor FeaturesLet's simplify the problem to 2 features, Porosity and Brittleness, to predict one response feature, Production rate. We will also reduce the number of wells from 1,000 to 500. By working with only 2 predictor features, it is very easy to visualize the segmentation of the feature space (it is only 2D and the model can be interogated exhaustively on a single plot).X = my_data.iloc[0:500,[0,3]] # extract porosity and brittleness 500 samples to a predictor array y = my_data.iloc[0:500,[6]] # extract production 500 samples to a response arrayLet's check the summary statistics of the predictor and response features.X.describe().transpose() # calculate summary statistics for the data y.describe().transpose() # calculate summary statistics for the dataNow let's withhold 100 samples as testing data and retain the remaining 400 as training data.X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=73073) n_train = len(X_train) n_test = len(X_test) print('Number of training ' + str(n_train) + ', number of test ' + str(n_test))Number of training 400, number of test 100Let's compare the univariate statistics of Porosity, Brittleness and Producton training and testing datasets. * let's check for bias and extrapolation.X_train.describe().transpose() # calculate summary statistics for the data X_test.describe().transpose() # calculate summary statistics for the data y_train.describe().transpose() # calculate summary statistics for the data y_test.describe().transpose() # calculate summary statistics for the dataNow let's plot the training and testing dataset distributions to check coverage and extrapolation.plt.subplot(231) plt.hist(X_train["Por"], alpha = 0.2, color = 'red', edgecolor = 'black', bins=20) plt.title('Porosity Train Dataset (%)') plt.subplot(232) plt.hist(X_train["Brittle"], alpha = 0.2, color = 'red', edgecolor = 'black', bins=20) plt.title('Britteness Train Dataset (%)') plt.subplot(233) plt.hist(y_train["Production"], alpha = 0.2, color = 'red', edgecolor = 'black', bins=20) plt.title('Production Train Dataset (MCFPD)') plt.subplot(234) plt.hist(X_test["Por"], alpha = 0.2, color = 'red', edgecolor = 'black', bins=20) plt.title('Porosity Test Dataset (%)') plt.subplot(235) plt.hist(X_test["Brittle"], alpha = 0.2, color = 'red', edgecolor = 'black', bins=20) plt.title('Britteness Test Dataset (%)') plt.subplot(236) plt.hist(y_test["Production"], alpha = 0.2, color = 'red', edgecolor = 'black', bins=20) plt.title('Production Test Dataset (MCFPD)') plt.subplots_adjust(left=0.0, bottom=0.0, right=2.2, top=1.2, wspace=0.3, hspace=0.3) plt.show()The distributions are well behaved, we cannot observe obvious gaps nor truncations. Let's look at a scatter plot of Porosity vs. Brittleness with points colored by Production. * Let's plot the training and testing datasets to check coverage and extrapolation in the features space.plt.subplot(121) im = plt.scatter(X_train["Por"],X_train["Brittle"],s=None, c=y_train["Production"], marker=None, cmap=None, norm=None, vmin=None, vmax=None, alpha=0.8, linewidths=0.3, verts=None, edgecolors="black") plt.title('Training Data: Production vs. Brittleness and Porosity'); plt.xlabel('Porosity (%)'); plt.ylabel('Brittleness (%)') cbar = plt.colorbar(im, orientation = 'vertical'); plt.xlim(5,25); plt.ylim(0,100) cbar.set_label("Production", rotation=270, labelpad=20) plt.subplot(122) im = plt.scatter(X_test["Por"],X_test["Brittle"],s=None, c=y_test["Production"], marker=None, cmap=None, norm=None, vmin=None, vmax=None, alpha=0.8, linewidths=0.3, verts=None, edgecolors="black") plt.title('Testing Data: Production vs. Brittleness and Porosity'); plt.xlabel('Porosity (%)'); plt.ylabel('Brittleness (%)') cbar = plt.colorbar(im, orientation = 'vertical'); plt.xlim(5,25); plt.ylim(0,100) cbar.set_label("Production", rotation=270, labelpad=20) plt.subplots_adjust(left=0.0, bottom=0.0, right=2.2, top=1.2, wspace=0.3, hspace=0.2) plt.show()Building a Linear Regression ModelLet's build our first machine learning model with scikit learn. We will start with linear regression. For this model we will pick one predictor feature and one response feature.# Linear Regression Model with scikit learn from sklearn import linear_model # Step 1. Instantiate the Model linear_reg = linear_model.LinearRegression() # Step 2: Fit the Data on Training Data linear_reg.fit(X_train["Por"].values.reshape(n_train,1), y_train["Production"]) # fit model porosity_model = np.linspace(0.0,30.0,10) # Print the model parameters production_model = linear_reg.predict(porosity_model.reshape(10,1)) # predict with the fit model print('Coefficients: ', str(round(linear_reg.coef_[0],3)) + ', Intercept: ', str(round(linear_reg.intercept_,3))) # Plot model fit plt.figure(figsize=(8,6)) plt.scatter(X_train["Por"].values, y_train["Production"], color='black', s = 20, alpha = 0.3) plt.plot(porosity_model,production_model, color='red', linewidth=1) plt.title('Linear Regression Production from Porosity on Training'); plt.xlabel('Porosity (%)'); plt.ylabel('Production (MCFPD)') plt.xlim(5,25)#; plt.ylim(0,1500000) # Step 3: - Make predictions using the testing dataset y_pred = linear_reg.predict(X_test['Por'].values.reshape(n_test,1)) # Report the goodness of fit print('Variance explained: %.2f' % r2_score(y_test, y_pred)) # Plot testing diagnostics plt.subplot(121) plt.scatter(X_test['Por'].values, y_test['Production'].values, color='black', s = 20, alpha = 0.3) plt.scatter(X_test['Por'], y_pred, color='blue', s = 20, alpha = 0.3) plt.title('Linear Regression Model Testing - Production from Porosity'); plt.xlabel('Porosity (%)'); plt.ylabel('Production (MCFPD)') plt.xlim(5,25)#; plt.ylim(0,1500000) y_res = y_pred - y_test['Production'].values plt.subplot(122) plt.hist(y_res, alpha = 0.2, color = 'red', edgecolor = 'black', bins=20) plt.title('Linear Regression Model Prediction Error - Production from Porosity'); plt.xlabel('Production Estimation Error (MCFPD)'); plt.ylabel('Frequency') plt.xlim(-4000,4000)#; plt.ylim(0,1500000) plt.subplots_adjust(left=0.0, bottom=0.0, right=2.2, top=1.2, wspace=0.3, hspace=0.2) plt.show()Variance explained: 0.46Building a Multilinear Regression ModelLet's build our second machine learning model with scikit learn. We will work with multilinear regression! We will use both predictor features, porosity and brittleness, and one response feature, production.# Linear Regression Model with scikit learn from sklearn import linear_model # Step 1. Instantiate the Model multilinear_reg = linear_model.LinearRegression() # Step 2: Fit the Data on Training Data multilinear_reg.fit(X_train.values.reshape(n_train,2), y_train["Production"]) # fit model # Print the model parameters print('Porosity Coef: ', str(round(multilinear_reg.coef_[0],3)) + ', Brittleness Coef: ', str(round(multilinear_reg.coef_[1],3)) + ', Intercept: ', str(round(multilinear_reg.intercept_,3))) # Plot model fit plt.subplot(111) plt = visualize_model(multilinear_reg,X_test["Por"],X_test["Brittle"],y_test["Production"],'Training Data and Multilinear Regression Model') plt.subplots_adjust(left=0.0, bottom=0.0, right=1.2, top=1.2, wspace=0.3, hspace=0.2) plt.show() # Step 3: - Make predictions using the testing dataset y_pred = multilinear_reg.predict(X_test.values.reshape(n_test,2)) # Report the goodness of fit print('Variance explained: %.2f' % r2_score(y_test, y_pred)) y_res = y_pred - y_test['Production'].values plt.subplot(111) plt.hist(y_res, alpha = 0.2, color = 'red', edgecolor = 'black', bins=20) plt.title('Linear Regression Model Prediction Error - Production from Porosity and Brittleness'); plt.xlabel('Production Estimation Error (MCFPD)'); plt.ylabel('Frequency') plt.xlim(-4000,4000)#; plt.ylim(0,1500000) plt.subplots_adjust(left=0.0, bottom=0.0, right=2.2, top=1.2, wspace=0.3, hspace=0.2) plt.show()Variance explained: 0.47Including brittleness only resulted in a slight improvement. * due to the nonlinear nature of brittleness Building a Decision Tree Regression ModelLet's build our third machine learning model with scikit learn. We will work with a decision tree. We will use both predictor features, porosity and brittleness, and one response feature, production.# Decison Tree Model with scikit learn from sklearn.tree import tree # for accessing tree information # Step 1. Instantiate the Model decision_tree_reg = tree.DecisionTreeRegressor(min_samples_leaf=5, max_depth = 5) # Step 2: Fit the Data on Training Data decision_tree_reg.fit(X_train.values.reshape(n_train,2), y_train["Production"]) # fit model # Plot model fit plt.subplot(111) plt = visualize_model(decision_tree_reg,X_test["Por"],X_test["Brittle"],y_test["Production"],'Training Data and Decision Tree Model') plt.subplots_adjust(left=0.0, bottom=0.0, right=1.2, top=1.2, wspace=0.3, hspace=0.2) plt.show() # Step 3: - Make predictions using the testing dataset y_pred = decision_tree_reg.predict(X_test.values.reshape(n_test,2)) # Report the goodness of fit print('Variance explained: %.2f' % r2_score(y_test, y_pred)) y_res = y_pred - y_test['Production'].values plt.subplot(111) plt.hist(y_res, alpha = 0.2, color = 'red', edgecolor = 'black', bins=20) plt.title('Decision Tree Model Prediction Error - Production from Porosity and Brittleness'); plt.xlabel('Production Estimation Error (MCFPD)'); plt.ylabel('Frequency') plt.xlim(-4000,4000)#; plt.ylim(0,1500000) plt.subplots_adjust(left=0.0, bottom=0.0, right=2.2, top=1.2, wspace=0.3, hspace=0.2) plt.show()Variance explained: 0.88Building a Support Vector Machine Regression ModelLet's build our fourth machine learning model with scikit learn. We will work with a support vector machine! We will use both predictor features, porosity and brittleness, and one response feature, production.# Support Vector Regression Model with scikit learn from sklearn import linear_model # Step 1. Instantiate the Model support_vector_reg = svm.SVR(kernel='poly', C=0.01, gamma='auto', degree=2, epsilon=.01,coef0=1,max_iter=1000) # Step 2: Fit the Data on Training Data support_vector_reg.fit(X_train.values.reshape(n_train,2), y_train["Production"]) # fit model # Plot model fit plt.subplot(111) plt = visualize_model(support_vector_reg,X_test["Por"],X_test["Brittle"],y_test["Production"],'Training Data and Support Vector Model') plt.subplots_adjust(left=0.0, bottom=0.0, right=1.2, top=1.2, wspace=0.3, hspace=0.2) plt.show() # Step 3: - Make predictions using the testing dataset y_pred = support_vector_reg.predict(X_test.values.reshape(n_test,2)) # Report the goodness of fit print('Variance explained: %.2f' % r2_score(y_test, y_pred)) y_res = y_pred - y_test['Production'].values plt.subplot(111) plt.hist(y_res, alpha = 0.2, color = 'red', edgecolor = 'black', bins=20) plt.title('Support Vector Machine Model Prediction Error - Production from Porosity and Brittleness'); plt.xlabel('Production Estimation Error (MCFPD)'); plt.ylabel('Frequency') plt.xlim(-4000,4000)#; plt.ylim(0,1500000) plt.subplots_adjust(left=0.0, bottom=0.0, right=2.2, top=1.2, wspace=0.3, hspace=0.2) plt.show()Variance explained: 0.65How to Decode T3R RecordsThis Notebook shows how to decode raw T3R records.Author(s): ** Required Packagesimport numpy as np pi = np.pi import matplotlib.pyplot as plt %matplotlib inline import importlib import pqreader.pqreader as pq # import package from local directory importlib.reload(pq) plt.rcParams.update({'font.size' : 14, 'axes.labelpad' : 10, 'xtick.major.pad': 8, 'xtick.major.size': 6, 'xtick.major.width': 1, 'ytick.major.size': 6, 'ytick.major.width': 1, 'xtick.minor.size': 3, 'xtick.minor.width': 1, 'ytick.minor.size': 3, 'ytick.minor.width': 1, 'axes.linewidth': 1, 'xtick.top' : True, 'xtick.direction' : 'in', 'ytick.right' : True, 'ytick.direction' : 'in', 'lines.linewidth' : 1.5,}) t3records, _ = pq.t3r_records('Data/Nile_Blue_Glycerol.t3r') t3records[0] np.binary_repr(t3records[0], 32) valid_bits = 1 route_bits = 2 data_bits = 12 timetag_bits = 16 np.bitwise_and(np.right_shift(t3records[0], timetag_bits + data_bits + route_bits), 2**valid_bits - 1) np.binary_repr(np.bitwise_and(np.right_shift(t3records[0], 16), 2**data_bits-1), data_bits) np.binary_repr(np.bitwise_and(t3records[0], 2**timetag_bits-1), timetag_bits) valid = np.bitwise_and(np.right_shift(t3records, timetag_bits + data_bits + route_bits), 2**valid_bits - 1).astype('uint8') route = np.bitwise_and(np.right_shift(t3records, timetag_bits + data_bits), 2**route_bits - 1).astype('uint8') data = np.bitwise_and(np.right_shift(t3records, timetag_bits), 2**data_bits - 1).astype('uint16') timetags = np.bitwise_and(t3records, 2**timetag_bits - 1).astype('uint64') fig, ax = plt.subplots(1, 1, figsize=(7,5)) ax.plot(route[:100], 'ko') fig, ax = plt.subplots(1, 1, figsize=(7,5)) ax.plot(timetags[:30000], 'ko') def correct_overflow(timetags, valid, overflow): overflow_idx = np.where(valid==0)[0] for i, (idx1, idx2) in enumerate(zip(overflow_idx[:-1], overflow_idx[1:])): timetags[idx1:idx2] += (i + 1)*overflow timetags[idx2:] += (i + 2)*overflow return np.delete(timetags, np.where(valid==0)[0]) overflow = 2**timetag_bits timetags = correct_overflow(timetags, valid, overflow) fig, ax = plt.subplots(1, 1, figsize=(7,5)) ax.plot(timetags[14510870:], 'ko')ФИО, группа Лабораторная работа №5. Распознавание рукописных цифр при помощи однослойного персептрона В данной лабораторной работе вам предстоит обучить однослойный персептрон распознавать рукописные цифры из набора данных MNIST.import numpy as np import matplotlib.pyplot as plt import seaborn as sns from keras.datasets import mnist from sklearn.metrics import confusion_matrix def plot_images(images, titles, columns=5, rows=1, fontsize=20): fig=plt.figure(figsize=(20, 10)) for i, img in enumerate(images[:columns*rows]): fig.add_subplot(rows, columns, i + 1) plt.axis('off') plt.title(titles[i], fontsize=fontsize) plt.imshow(img, cmap='gray') plt.show() (X_train_original, y_train), (X_test_original, y_test) = mnist.load_data() print(X_train_original.shape, X_test_original.shape)Downloading data from https://s3.amazonaws.com/img-datasets/mnist.npz 11493376/11490434 [==============================] - 2s 0us/step (60000, 28, 28) (10000, 28, 28)Выведите первые 5 изображений из обучающей выборки с помощью функции `plot_images`.plot_images(X_train_original[:5], range(1,6))Закодируйте метки классов в виде векторов с помощью one-hot encoding.*Подсказка: для решения этой задачи удобно использовать функцию `np.eye`.*y_train_one_hot = np.zeros((y_train.size, y_train.max()+1)) y_train_rows = np.arange(y_train.size) y_train_one_hot[y_train_rows, y_train] = 1 y_test_one_hot = np.zeros((y_test.size, y_test.max()+1)) y_test_rows = np.arange(y_test.size) y_test_one_hot[y_test_rows, y_test] = 1Выполните предобработку данных изображений, чтобы значение каждого пикселя принадлежало множеству $\{0, 1\}$.X_train = (X_train_original/255).round() X_test = (X_test_original/255).round()Выведите первые 5 изображений из обучающей выборки после процедуры предобработки.plot_images(X_train[:5], range(1,6))Ниже представлена заготовка класса персептрона. Реализуйте недостающие части.class Perceptron: def __init__(self, m, k, learning_rate=0.001, num_epochs=20, verbose=False): self.m = m # number of features self.k = k # number of classes (neurons) self.learning_rate = learning_rate self.num_epochs = num_epochs self.verbose = verbose self._validation_freq = 1 self._init_params() # Initialize weights and biases def _init_params(self): self.b = np.ones(self.k) self.w = np.vstack((np.random.normal(0.0, 0.1, (self.m, self.k)), self.b)) # self.w = np.random.normal(0.0, 0.1, (self.m, self.k)) # Heaviside step function def _activate(self, x): return np.vectorize(lambda x: 1 if x > 0 else 0)(x) # Forward pass def predict(self, X): return self._activate(np.dot(X, self.w)) # Use delta rule to update parameters def _update_params(self, x, y, y_pred): self.w += self.learning_rate * np.dot(x.reshape((-1, 1)), (y - y_pred).reshape((1, -1))) def fit(self, X, y, X_test=None, y_test=None): for i in range(self.num_epochs): for j in range(X.shape[0]): # Compute prediction for X[j] y_pred = self.predict(X[j]) # Update params self._update_params(X[j], y[j], y_pred) if X_test is not None and y_test is not None and self.verbose and (i + 1) % self._validation_freq == 0: # Compute predictions for test set and compute accuracy total_true = 0 for k in range(X_test.shape[0]): y_test_pred = self.predict(X_test[k]) e_i = y_test[k] - y_test_pred if not any(e_i): total_true += 1 accuracy = total_true / X_test.shape[0] print("Accuracy on {}-th epoch is {:.04f}".format(i+1, accuracy))Обучите персептрон. Перед подачей изображений на вход персептрону, преобразуйте их в вектора размерности 784.*Подсказка: для изменения формы массива можно воспользоваться методом `reshape`.*perceptron = Perceptron(784, 10) ones = np.ones((X_train.shape[0], 1)) X_train_stacked = np.hstack((X_train.reshape((-1, 784)), ones)) ones = np.ones((X_test.shape[0], 1)) X_test_stacked = np.hstack((X_test.reshape((-1, 784)), ones)) perceptron.fit(X_train_stacked, y_train_one_hot, X_test_stacked, y_test_one_hot) print(np.argmax(perceptron.predict(X_train_stacked[0])))5Вычислите предсказания для тестовой выборки.*Подсказка: чтобы из one-hot encoding вернуться к десятичным цифрам, можно воспользоваться методом `argmax`.*y_test_pred = [np.argmax(perceptron.predict(i)) for i in X_test_stacked]Выведите первые пять изображений из тестовой выборки. В качестве заголовка к каждому изображению выведите ожидаемую (англ. expected) и предсказанную (англ. predicted) метки класса.plot_images(X_test[:5], y_test_pred[:5])Выведите несколько изображений из тестовой выборки, в которых персептрон допустил ошибку. В качестве заголовка к каждому изображению выведите ожидаемую (англ. expected) и предсказанную (англ. predicted) метки класса. Как вы считаете, есть ли в допущенных ошибках какая-то закономерность?err = [] for x, pred, corr in zip(X_test, y_test_pred, y_test): if pred != corr: err.append({'img':x, 'pred':pred, 'corr':corr}) plot_images([i['img'] for i in err[:5]], ['Prediction: {pred}\nCorrect: {corr}'.format(**i) for i in err[:5]])**Ответ:** Изобразите веса нейронов в виде изображений. У вас должно получиться 10 изображений. Можно ли по полученным изображениям сделать вывод, за распознавание какой цифры отвечает каждый из нейронов?*Подсказка: можете посмотреть [пример](https://ml4a.github.io/ml4a/looking_inside_neural_nets/).*imgs = perceptron.w[:-1, :].reshape((28, 28, 10)) fig = plt.figure(figsize=(20, 10)) for i in range(2 * 5): fig.add_subplot(2, 5, i + 1) plt.axis('off') plt.title(i, fontsize=20) plt.imshow(imgs[:, :, i], cmap='gray') plt.show()**Ответ:** нет, силуэты цифр трудно различимы, так как используется лишь 1 уровень нейронов. Постройте матрицу ошибок (англ. confusion matrix) для полученной нейронной сети. Подсказка: можно воспользоваться функцией [`sklearn.metrics.confusion_matrix`](https://scikit-learn.org/0.16/modules/generated/sklearn.metrics.confusion_matrix.html). [Пример матрицы ошибок.](https://ml4a.github.io/demos/confusion_mnist/)cm = confusion_matrix(y_test, y_test_pred) plt.figure(dpi=200) sns.set(font_scale=0.6) g = sns.heatmap(cm, annot=True, annot_kws={"size": 5}, cmap="YlGnBu") plt.show()Twitter Sentiment Analysis: Self-driving carsDATA 512 - Human Centered Data Science Fall 2019 MotivationA Self-driving car or an autonomous car is a vehicle that is capable of sensing it's environment and navigating safely in a given space with little or no human input [1]. Although self-driving as an early concept dates back to the 1920s,the first self-sufficient and truly autonomous cars appeared in the 1980s. Carnegie Mellon University's Navlab and ALV projects in 1984, and Mercedes-Benz and Bundeswehr University Munich's Eureka Prometheus Project in 1987 marked the beginning of a new era. But it wasn't until a decade ago that self-driving cars moved from the hands of government funded projects and challenges to private car manufacturers such as General Motors, Ford, BMW, Volkswagen, , Audi, Nissan, Toyota, and Volvo. By 2015, self-driving cars had become the topic of debate in households. Today, several breakthroughs and advancements later, we hear about how self-drivings cars are going to be a part and parcel of the very near future. Many companies, large and small, are racing to claim the crown of having solved level 5 autonomy. Autonomous driving start-ups are rampant in the Sillicon Valley and around the US at large. It's never been a better time for the field, yet the human aspect of self-driving cars is largely under studied. This project is an attempt to understand and analyse the general vibe around self-driving as expressed on Twitter from May 15, 2015 to May 31, 2015. This was around the time when news was flying about Google's self-driving test initiative - [Google to begin testing purpose-built self-driving cars on public roads](https://www.theguardian.com/technology/2015/may/15/google-testing-purpose-built-self-driving-cars-public-roads). Also around the same time, there were articles about accidents being caused by self-driving cars - [Self-driving car accidents revealed in California](https://www.bbc.com/news/technology-32691887). ReporoducibilityAll the quantitative analyses shown in this notebook are easily reproducible with the right python packages and data provided in this repository. However, the qualitative analyses are a result of my personal research and inuition (and possible biases thereby). I have attached the relevant sources that support my claims and hypothesis to the best of my ability to help others audit and review my work and form independent conclusions. Research Directions- What is the general sentiment of people around self-driving cars?- What promoted these opinions?- What are people talking about? Did the happenings around the world sway people in one direction or another?- Study of the type of words used to express positive and negative sentiment. Thick Data Approach Here are some statements I gathered from top news agencies that released news articles a week before the start date of tweets(May 15, 2015) present in our dataset. The dates of release of these articles is note worthy - - May 11, 2015, [BBC News](https://www.bbc.com/news/technology-32691887): "Four out of the 48 self-driving cars on public roads in California have been involved in accidents in the last eight months, according to the state's Department of Motor Vehicles."- May 12, 2015, [The Economist](https://www.economist.com/the-economist-explains/2015/05/12/how-does-a-self-driving-car-work): "Cars that can drive themselves, a staple of science-fiction, have started to appear on roads in real life."- May 15, 2015, [The Guardian](https://www.theguardian.com/technology/2015/may/15/google-testing-purpose-built-self-driving-cars-public-roads): "Google’s prototype self-driving car pods will take to public roads for the first time around its headquarters in Mountain View, California this summer." Following are some cherry picked examples of tweets from the dataset. Several of them seem to be reactions to happenings around the world. Highlighted in bold are the ones that seem to closely resemble news from the above 3 articles. - **"There is an acceptable level of risk with all new technology, but the reality that people will die in autonomous car is hard to accept."** 😢- "If Google maps can't keep up with road construction, how am I supposed to trust a driverless car to get around here?" 😟- "Audi gets first permit to test self-driving cars in California: Think twice next time you tailgate that new Audi…" 😐- "Google unveils driver-less car...can you see yourself buying an autonomous car?" 😐- **"Just saw Google self-driving car on I-34. It was painted green and blue."** 😐- "Google should buy Tesla and Uber, mix a little autonomous car into the mix and solve all transportation problems. sentFromTraffic" 😀- **"Awesome! Google driverless cars will help the blind travel more often."** 😁- "Autonomous vehicles could reduce traffic fatalities by 90%...I'm in!" 😁 Legend: 😢: 1 😟: 2 😐: 3 😀: 4 😁: 5 On digging a little more into articles on Waymo, Google's self-driving car division, during this time period, I found the following on [Wikipedia](https://en.wikipedia.org/wiki/Waymo) -"In 2015, Google provided "the world's first fully driverless ride on public roads" to a legally blind friend of principal engineer . The ride was taken by , former CEO of the Santa Clara Valley Blind Center, in Austin, Texas. It was the first driverless ride that was on a public road and was not accompanied by a test driver or police escort. The car had no steering wheel or floor pedals."The tweet - _"Awesome! Google driverless cars will help the blind travel more often."_ seems to be directly correlated with the above incident. Quantitative Approach Getting started Let's begin by first importing all the necessary python libraries ...# Helper modules import pandas as pd import numpy as np from nltk.corpus import stopwords from nltk.tokenize import word_tokenize import re import string import codecs # Plotting modules import seaborn as sns from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt %matplotlib inline import inflect p = inflect.engine() # Loading raw twitter sentiment file data_path = './Data/RawData/Twitter-sentiment-self-drive-DFE.csv' data = pd.read_csv(data_path, encoding = "ISO-8859-1") # Opening with codecs since some special characters and emojis might be present in the data with codecs.open(data_path, 'r', encoding='utf-8', errors='ignore') as fdata: data = pd.read_csv(fdata) data.head()We will only use 2 columns for this analysis - _sentiment_ and _text_# Extracting relevant columns from the raw data data = data[['sentiment', 'text']] data['sentiment'].unique() # Sentiment contains non-integer values # Removing rows which have sentiment value as 'not_relevant' data = data[data['sentiment']!='not_relevant'] data['sentiment'] = data['sentiment'].astype('int') data = data.reset_index(); data = data.drop(['index'], axis=1) data.head(10)Let's create a new column that contains: - -1 indicating negative polarity of sentiment (i.e. sentiment value of 1 or 2) - +1 indicating positive polarity of sentiment (i.e. sentiment value of 4 or 5) or - 0 indicating neutral polarity of sentiment (i.e. sentiment value of 3)def assign_polarity(row): if row['sentiment'] < 3: return -1 elif row['sentiment'] > 3: return 1 else: return 0 # Assign positive, negative, neutral polarity to each tweet data['polarity'] = data.apply(lambda row: assign_polarity(row), axis=1) data.head() # Saving cleaned data to file data.to_csv('./Data/cleaned_data.csv', index=False)The preprocessing code is derived from code made available by in this [post](https://medium.com/analytics-vidhya/basic-tweet-preprocessing-method-with-python-56b4e53854a1).def preprocess_tweet(text): # Check characters to see if they are in punctuation nopunc = [char for char in text if char not in string.punctuation] # Join the characters again to form the string. nopunc = ''.join(nopunc) # convert text to lower-case nopunc = nopunc.lower() # remove URLs nopunc = re.sub('((www\.[^\s]+)|(https?://[^\s]+)|(http?://[^\s]+))', '', nopunc) nopunc = re.sub(r'http\S+', '', nopunc) # remove usernames nopunc = re.sub('@[^\s]+', '', nopunc) nopunc = re.sub('̢[^\s]+', '', nopunc) # remove the # in #hashtag nopunc = re.sub(r'#([^\s]+)', r'\1', nopunc) # remove repeated characters nopunc = word_tokenize(nopunc) # remove stopwords from final word list #nopunc = ' '.join(word for word in nopunc if word not in stopwords.words('english')) output = '' for word in nopunc: if word not in stopwords.words('english'): if p.singular_noun(word): output += ' ' + p.singular_noun(word) else: output += ' ' + word return output data['text'] = data['text'].apply(preprocess_tweet) data.head()Plot of distribution of sentiment on a scale of 1-5ax = sns.countplot(x='sentiment', data=data, color='salmon') ax.set_title('Distribution of sentiment')Plot of distribution of polarity(+1, 0, -1)ax = sns.countplot(x='polarity', data=data, color='salmon') ax.set_title('Distribution of polarity')From the above plots, we see that the sentiment of most of the tweets is 3 indicating neutral sentiment. The overall polarity is also mostly neutral with a slightly higher positive polarity count as compared to negative polarity. WORD CLOUDSdf_positive = data[data['polarity']==1] df_negative = data[data['polarity']==-1] df_neutral = data[data['polarity']==0]Positive would cloudstop_words = set(STOPWORDS) pos = (' '.join(df_positive['text'])) wordcloud = WordCloud(width = 1000, height = 500, stopwords=stop_words).generate(pos) plt.figure(figsize=(15,5)) plt.imshow(wordcloud) plt.axis('off');Negative would cloudstop_words = set(STOPWORDS) neg = (' '.join(df_negative['text'])) wordcloud = WordCloud(width = 1000, height = 500, stopwords=stop_words).generate(neg) plt.figure(figsize=(15,5)) plt.imshow(wordcloud) plt.axis('off');Neutral would cloudstop_words = set(STOPWORDS) neut = (' '.join(df_neutral['text'])) wordcloud = WordCloud(width = 1000, height = 500, stopwords=stop_words).generate(neut) plt.figure(figsize=(15,5)) plt.imshow(wordcloud) plt.axis('off');Top 20 word-frequenciesfrom collections import Counter def plot_top_k_words(k, df, title): #Get words tokens = [] for index, rows in df.iterrows(): # Create list for the current row tokens.extend(rows.text.split()) counter = Counter(tokens) #top-k frequency words most_occurring = counter.most_common(k) print(most_occurring) plt.figure(figsize=(14,7)) top_k = pd.DataFrame(most_occurring, columns=['word','frequency']) ax = sns.barplot(x="word", y='frequency', data=top_k) ax.set_title(title) loc, labels = plt.xticks() ax.set_xticklabels(labels, rotation=30); k = 20 plot_top_k_words(k, data, 'Top 20 word frequencies across all tweets') plot_top_k_words(k, df_positive, 'Top 20 word frequencies for positive tweets') plot_top_k_words(k, df_negative, 'Top 20 word frequencies for negative tweets') plot_top_k_words(k, df_neutral, 'Top 20 word frequencies for neutral tweets')[('car', 4089), ('google', 1954), ('selfdriving', 1541), ('driverles', 1227), ('driving', 905), ('self', 787), ('autonomou', 265), ('vehicle', 263), ('future', 191), ('uber', 190), ('saw', 177), ('road', 171), ('get', 154), ('new', 151), ('california', 148), ('like', 143), ('via', 141), ('drive', 135), ('rt', 129), ('one', 128)]Assignment 4 ![](https://github.com/rpi-techfundamentals/hm-01-starter/blob/master/notsaved.png?raw=1) Before you start working on this assignment please click File -> Save a Copy in Drive. Before you turn this problem in, make sure everything runs as expected. First, restart the kernel (in the menubar, select Kernel → Restart) and then run all cells (in the menubar, select Cell → Run All). You can speak with others regarding the assignment but all work must be your own. This is a 30 point assignment. Run the cells below to install a required package.files = "https://github.com/rpi-techfundamentals/introml_website_fall_2020/raw/master/files/assignment1.zip" !pip install otter-grader && wget $files && unzip -o assignment1.zip #Run this. It initiates autograding. import otter grader = otter.Notebook()**Spring Garden Tools Case** Please see the textbook or box share for question. Name your Pulp model `spring`#enter your answer here #Run this cell to see if you passed the test. If it fails, you probably didn't run the above required code. grader.check('q01')**Question 2.** Packages are really important compontent of most programming languages. In the overview, you learnned about tab completion as a way to explore python objects. This can be really useful. Let's use it to find the formula for the the factorial of 15. Assign the results to the variable `m`.#we have to first import the math function to use tab completion. import math #Assign the result to the variable m. Press tab after the period to show available functions m = math. m #Run this cell to see if you passed the test. If it fails, you probably didn't run the above required code. grader.check('q02')**Question 3.** Markdown is a useful aspect of Jupyter Notebooks. - [Guide to Markdown](https://colab.research.google.com/notebooks/markdown_guide.ipynb)Use what you learned about markdown to adjust the text below to create the following:![](https://github.com/rpi-techfundamentals/hm-01-starter/blob/master/md_result.png?raw=1)**Double click on cell below to open it for markdown editing. There is no test for this question.** HeaderFor the above header, make it an h1 tag using markdown. Sub-HeaderFor the above sub-header, make it an h5 tag using markdown. BoldItalicshttps://tw.rpi.edu//images/rpi-logo-red.jpg (Embed this image) **Question 4.** Installing Packages Python packages are an important part of data science and critical to leveraging the broader Python ecosystem.You typically have two options when installing a package. You can install it with [Conda](http://anaconda.org) or [pip](https://pypi.org).The `!` in a jupyter notebook means that the line is being processed on the `commmand line` and not by the Python interpreter. ```!conda install -c conda-forge !pip install ```If you try to import something and get an error, it is usally a tell that you need to install a package. Install the `fastparquet` Package to be able to work with Parquet Files- CSV (comma delimited files are great for humans to read and understand. - For "big data" though, it isn't a great long term storage option (inefficient/slow).- Parquet is a type columnar storage format. It makes dealing with lots of columns fast. - [fastparquet](https://fastparquet.readthedocs.io) is a Python package for dealing with Parquet files. - Apache Spark also natively reads Parquet Files. - Look [here](https://pypi.org/project/fastparquet/) for instructions on installing the fastparquet package.#Install package for fastparquet here. Please comment it out after installing. #Run this to try to load the name.parq. It won't work unless you downloaded the file and installed the package. from fastparquet import ParquetFile #This imports the package. import pandas as pd #pandas is usually imported as pd pf = ParquetFile('./data/Demographic_Statistics_By_Zip_Code.parq') dfparq = pf.to_pandas() #This changes the Parquet File object to a pandas dataframe. We will learn more about that later. dfparq.head() #Just listing the value prints it out.Show All Columns in a Pandas DataframeNotice there is a `...` which indicates you are only seeing some of the columns, and the output has been truncated. Read [this article](https://towardsdatascience.com/how-to-show-all-columns-rows-of-a-pandas-dataframe-c49d4507fcf) and find how to show all the columns of a pandas dataframe.#Set the display options to show all columns. #This will print out the notebook. dfparq.head() #View the dataframe and set the following values to the numbers you see for row 0. Don't put in quotes. #COUNT PARTICIPANTS row_0_count_participants= #enter what you see above. row_0_count_hispanic_latino= #enter what you see above. #Run this cell to see if you passed the test. grader.check('q04')**Question 5.** Importing CSV into a Pandas Dataframe- Comma delimited files are a common way of transmitting data. - Data for different columns is separated by a comma.- It is possible to open a csv in different ways, but Pandas is the easiest. - Data structured like CSV's are extremely common and known as tabular data.- Pandas will give access to many useful methods for working with data. - `pandas` is often imported as the abbreviated `pd`.- You can also get help by using a `?` after the method call. For example, to find the doc string for the read csv function you could execute:`pd.read_csv?` or `help(pd.read_csv)`# Adjust the code below so that you load only the first 100 rows of a dataframe and assign to df_smalle df_small= df_small.shapeGet CSVs from the Web/Github. You can also get a CSV directly from a web url. View this file in your web browser. You won't be able to load this into pandas. [https://github.com/rpi-techfundamentals/introml_website_fall_2020/blob/master/files/webfile.csv](https://github.com/rpi-techfundamentals/introml_website_fall_2020/blob/master/files/webfile.csv)To get the link you can load, you need to click on the `raw` button. That should lead to this url:`https://raw.githubusercontent.com/rpi-techfundamentals/introml_website_fall_2020/master/files/webfile.csv`# Load the web url and set it equal to df_web df_web = df_web.head() #Run this cell to see if you passed the test. grader.check('q05')Post-APA calling: Imputation and QC AimThis notebook is designed to impute the missing values in the PDUI matrix, and perform quantile normailization for the impute values. Input* raw PDUI matrix (row as gene, columns as sample id)* covariate file Output* PDUI matrix without missingness - The missing value is calculated using `impute` package Minimum working examplesos run /mnt/mfs/statgen/ls3751/github/xqtl-pipeline/pipeline/molecular_phenotypes/QC/apa_impute.ipynb APAimpute \ --cwd /mnt/mfs/statgen/ls3751/MWE_dapars2/Output \ --cov /data/example.cov.txt --chrlist chr1 \ --container /mnt/mfs/statgen/ls3751/container/dapars2.sifWorkflow implementation[global] parameter: walltime = '40h' parameter: mem = '32G' parameter: ncore = 16 parameter: cwd = path parameter: thread = 8 parameter: job_size = 1 parameter: container = '' [APAimpute] parameter: cov = path parameter: chrlist = list input: [f'{cwd}/apa_{x}/Dapars_result_result_temp.{x}.txt' for x in chrlist], group_by = 1 output: [f'{cwd}/Dapars_result_clean_{x}.txt' for x in chrlist], group_by = 1 R: expand= "${ }", container = container .libPaths( c('/usr/local/lib/R/site-library' , '/usr/lib/R/site-library', '/usr/lib/R/library', .libPaths())) suppressPackageStartupMessages(require(dplyr)) suppressPackageStartupMessages(require(tidyr)) suppressPackageStartupMessages(require(impute)) # Read the data input_dir <- ${_input:r} dapars_result = data.table::fread(input_dir) ## much faster than read.table tmp = dapars_result[,1:4] dapars_result = dapars_result[,-c(2:4)] rownames(dapars_result) = dapars_result[,1] dapars_result = dapars_result[,-1] dapars_result = dapars_result[,colMeans(is.na(dapars_result)) <= 0.8] dapars_result = dapars_result[rowMeans(is.na(dapars_result)) < 0.5,] tmp_vec = rownames(dapars_result) class(dapars_result) = "numeric" covs = data.table::fread(${cov}) dapars_impute = dapars_result[,colnames(dapars_result) %in% colnames(covs)] dapars_impute = impute.knn(dapars_result) df = as.data.frame(dapars_impute$data) for (gene in 1:nrow(df)) { mat = apply(df[gene,], 1, rank, ties.method = "average") mat = qnorm(mat/ (ncol(df) + 1)) df[gene, ] = mat } df$Gene = tmp_vec final_data <- inner_join(df, tmp) write.table(final_data, file = ${_output:r}, quote = F)Data Science em Produção =-=- ROSSMANN - STORE SALES PREDICTION -=-= 0. Importsimport inflection # helper function import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt0.1. Helper Functions# Notebook Setups sns.set_style('darkgrid') sns.set_context('talk') sns.set_palette('Set2') # Functions def snakecase(list_of_names): """Returns a list of names in snake case, which refers to the style of writing in which each space is replaced by an underscore (_) character.""" new_list = list(map(inflection.underscore, list_of_names)) return new_list0.2. Loading Data# loading historical data - including Sales df_sales_raw = pd.read_csv('../raw_data/train.csv', low_memory=False) # loading information about the stores df_store_raw = pd.read_csv('../raw_data/store.csv', low_memory=False) # merging dataframes df_raw = pd.merge(df_sales_raw, df_store_raw, how='left', on='Store')Data Fields **Most of the fields are self-explanatory. The following are descriptions for those that aren't.**- **Id** - an Id that represents a (Store, Date) duple within the test set;- **Store** - a unique Id for each store;- **Sales** - the turnover for any given day (this is what you are predicting);- **Customers** - the number of customers on a given day;- **Open** - an indicator for whether the store was open: 0 = closed, 1 = open;- **StateHoliday** - indicates a state holiday. Normally all stores, with few exceptions, are closed on state holidays. Note that all schools are closed on public holidays and weekends. a = public holiday, b = Easter holiday, c = Christmas, 0 = None;- **SchoolHoliday** - indicates if the (Store, Date) was affected by the closure of public schools;- **StoreType** - differentiates between 4 different store models: a, b, c, d;- **Assortment** - describes an assortment level: a = basic, b = extra, c = extended;- **CompetitionDistance** - distance in meters to the nearest competitor store;- **CompetitionOpenSince[Month/Year]** - gives the approximate year and month of the time the nearest competitor was opened;- **Promo** - indicates whether a store is running a promo on that day;- **Promo2** - Promo2 is a continuing and consecutive promotion for some stores: 0 = store is not participating, 1 = store is participating;- **Promo2Since[Year/Week]** - describes the year and calendar week when the store started participating in Promo2;- **PromoInterval** - describes the consecutive intervals Promo2 is started, naming the months the promotion is started anew. E.g. "Feb,May,Aug,Nov" means each round starts in February, May, August, November of any given year for that store. 1. Descriptive Data Analysis 1.0. Dataframe in Progress Backupdf1 = df_raw.copy()1.1. Column Renaming# renaming df1 column names df1.columns = snakecase(df1.columns)1.2. Data Dimensionprint(f'Store Dataframe - Number of Rows: {df1.shape[0]}. \nStore Dataframe - Number of Columns: {df1.shape[1]}.')Store Dataframe - Number of Rows: 1017209. Store Dataframe - Number of Columns: 18.1.3. Data Types# dataframe data types df1.dtypes # setting date column as datetime type df1['date'] = pd.to_datetime(df1['date'])1.4. NA Check# checking NA - All NA values came from store.csv df1.isna().sum() # checking NA using info() df1.info() Int64Index: 1017209 entries, 0 to 1017208 Data columns (total 18 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 store 1017209 non-null int64 1 day_of_week 1017209 non-null int64 2 date 1017209 non-null datetime64[ns] 3 sales 1017209 non-null int64 4 customers 1017209 non-null int64 5 open 1017209 non-null int64 6 promo 1017209 non-null int64 7 state_holiday 1017209 non-null object 8 school_holiday 1017209 non-null int64 9 store_type 1017209 non-null object 10 assortment 1017209 non-null object 11[...]1.5. Filling in Missing/Null Values **Number of NA Values** competition_distance 2642 competition_open_since_month 323348 competition_open_since_year 323348 promo2_since_week 508031 promo2_since_year 508031 promo_interval 508031# competition_distance # maximun distance x 2 max_dist_x_2 = df1['competition_distance'].max() * 2 # assuming competitors are twice as far away as the greatest distance found df1['competition_distance'] = df1['competition_distance'].apply(lambda x: max_dist_x_2 if np.isnan(x) else x) # competition_open_since_year # frequency per year of existing competition_open_since_year data frequency = df1['competition_open_since_year'].value_counts( normalize=True).reset_index().rename( columns={'index': 'year', 'competition_open_since_year': 'percent'}) # True/False missing/Null Series missing = df1['competition_open_since_year'].isna() # Using Numpy's random.choice to fill out missing data based on the frequency of existing info df1.loc[missing,'competition_open_since_year'] = np.random.choice(frequency.year, size=len(df1[missing]), p=frequency.percent) # competition_open_since_month # frequency per month of existing competition_open_since_month data frequency = df1['competition_open_since_month'].value_counts( normalize=True).reset_index().rename( columns={'index': 'month', 'competition_open_since_month': 'percent'}) # True/False missing/Null Series missing = df1['competition_open_since_month'].isna() # Using Numpy's random.choice to fill out missing data based on the frequency of existing info df1.loc[missing,'competition_open_since_month'] = np.random.choice(frequency.month, size=len(df1[missing]), p=frequency.percent) # promo2_since_week AND promo2_since_year # the same date of sale will be used as a reference to fill in the NA values # then a new timedelta column will be created (promo2_duration) #promo2_since_week df1['promo2_since_week'] = df1[['date', 'promo2_since_week']].apply(lambda x: x['date'].week if np.isnan(x['promo2_since_week']) else x['promo2_since_week'], axis=1) # promo2_since_year df1['promo2_since_year'] = df1[['date', 'promo2_since_year']].apply(lambda x: x['date'].year if np.isnan(x['promo2_since_year']) else x['promo2_since_year'], axis=1) # promo_interval # filling in NA with 'none' df1['promo_interval'].fillna(value='none', inplace=True) # creating a column with current month df1['curr_month'] = df1['date'].dt.strftime('%b') # creating a column to indicate whether promo2 is active df1['promo2_active'] = df1[['curr_month', 'promo_interval']].apply(lambda x: 1 if x['curr_month'] in x['promo_interval'].split(',') else 0, axis=1)1.6. Changing Data Typesdf1.dtypes # Changing DTypes from float to integer df1['competition_distance'] = df1['competition_distance'].astype(int) df1['competition_open_since_month'] = df1['competition_open_since_month'].astype(int) df1['competition_open_since_year'] = df1['competition_open_since_year'].astype(int) df1['promo2_since_week'] = df1['promo2_since_week'].astype(int) df1['promo2_since_year'] = df1['promo2_since_year'].astype(int)1.7. Descriptive Statistics 1.7.0. Numeric vs Categorical - Attributes Splitdf_numeric = df1.select_dtypes(include=['int64', 'float64']) df_categorical = df1.select_dtypes(exclude=['int64', 'float64', 'datetime64[ns]'])1.7.1. Numeric Attributes# using DF describe() method df1.describe().T # central tendency metrics - mean, median ct_mean = df_numeric.apply(np.mean) ct_median = df_numeric.apply(np.median) # dispersion metrics - std, min, max, range, skew, kurtosis d_std = df_numeric.apply(np.std) d_min = df_numeric.apply(min) d_max = df_numeric.apply(max) d_range = df_numeric.apply(lambda x: x.max() - x.min()) d_skew = df_numeric.apply(lambda x: x.skew()) d_kurtosis = df_numeric.apply(lambda x: x.kurtosis()) metrics = pd.DataFrame({ 'min': d_min, 'max': d_max, 'range': d_range, 'mean': ct_mean, 'median': ct_median, 'std': d_std, 'skew': d_skew, 'kurtosis': d_kurtosis }) metrics**competition_distance** - Skew: highly skewed data, high positive value means that the right-hand tail is much longer than the left-hand tail. - Kurtosis: increases as the tails become heavier, the high positive value indicates a very peaked curve.**competition_open_since_year** - Skew: highly skewed data, high negative value means that the left-hand tail is longer than the right-hand tail. - Kurtosis: increases as the tails become heavier, the high positive value indicates a very peaked curve.**sales** - Skewness is close to zero, indicating that the data is not too skewed# sales histogram - not considering when sales is zero ax = sns.histplot(data=df_numeric[df_numeric['sales'] > 0], x='sales', stat='proportion', bins=100) ax.figure.set_size_inches(14, 7) ax.set_title('Sales Histogram', fontsize=20, pad=10) median = np.median(df_numeric['sales']) ax.vlines(x=5744, ymin=0, ymax=0.07, linestyles='dashed', label='median', colors='firebrick') ax.annotate(f'median = {median}', xy=(7000, 0.061), fontsize=14, color='firebrick') ax;1.7.2. Categorical Attributes# verifying unique valuesfor each categorical attribute df_categorical.apply(lambda x: len(x.unique()))**BOXPLOT OF CATEGORICAL ATTRIBUTES**# Boxplot -- state_holiday # not considering when: sales = 0 aux = df1[df1['sales'] > 0] plt.figure(figsize=(24,10)) plt.subplot(1, 3, 1) ax1 = sns.boxplot(x='state_holiday', y='sales', data=aux) ax1.set_title('Boxplot - state_holiday', fontsize=18, pad=10) ax1.set_xticklabels(labels=['None', 'Public', 'Easter', 'Christmas']) plt.subplot(1, 3, 2) ax2 = sns.boxplot(x='store_type', y='sales', data=aux) ax2.set_title('Boxplot - store_type', fontsize=18, pad=10) plt.subplot(1, 3, 3) ax3 = sns.boxplot(x='assortment', y='sales', data=aux) ax3.set_title('Boxplot - assortment', fontsize=18, pad=10) plt.show()Introduction to Seabornimport seaborn as sns datasets = sns.get_dataset_names() datasets df = sns.load_dataset('car_crashes') type(df) df.head(5) df.shape import matplotlib.pyplot as plt import numpy as np x = np.linspace(0, 2, 100) plt.plot(x, x, label='linear') plt.plot(x, x**2, label='quadratic') plt.plot(x, x**3, label='cubic') plt.show() #sns.set() resets seaborn sns.set() x = np.linspace(0, 2, 100) plt.plot(x, x, label='linear') plt.plot(x, x**2, label='quadratic') plt.plot(x, x**3, label='cubic') plt.show() #sns.set() resets seaborn sns.set_style('whitegrid') x = np.linspace(0, 2, 100) plt.plot(x, x, label='linear') plt.plot(x, x**2, label='quadratic') plt.plot(x, x**3, label='cubic') plt.show() x = np.linspace(0, 2, 100) plt.plot(x, x, label='linear') plt.plot(x, x**2, label='quadratic') plt.plot(x, x**3, label='cubic') sns.despine() current_pallet = sns.color_palette() sns.palplot(current_pallet)Data Visualization with Seabornimport pandas as pd import matplotlib.pyplot as plt df = pd.read_csv('../../resources/datasets/BR_eleitorado_2016_municipio.csv', delimiter=";") df.head()Boxplotdf_target = df.loc[df['total_eleitores'] < 100000].loc[df['total_eleitores'] > 20000] plt.figure(figsize=(20,10)) g = sns.boxplot(x="uf", y="total_eleitores",data=df_target) plt.show()Barplot# Count Plot (a.k.a. Bar Plot) sns.countplot(x='uf', data=df) # Rotate x-labels plt.xticks(rotation=-90) df_target = df.nlargest(5, 'total_eleitores') plt.figure(figsize=(15,15)) g = sns.catplot(x="nome_municipio", y="total_eleitores", data=df_target, height=5, kind="bar",hue = 'uf') g.set_xticklabels(rotation=90) plt.show()Scatterplot + Regression Modeldf = pd.read_csv('../../resources/datasets/Pokemon.csv', index_col=0) df.head() sns.lmplot(x='Attack', y='Defense', data=df, fit_reg=False) sns.lmplot(x='Attack', y='Defense', data=df) # 3a variável (hue) sns.lmplot(x='Attack', y='Defense', data=df, hue='Legendary')Box plotplt.figure(figsize=(10,10)) sns.boxplot(data=df) stats_df = df.drop(['Total', 'Stage', 'Legendary'], axis=1) stats_df.head(5) # New boxplot using stats_df sns.boxplot(data=stats_df)Violin Plot# Set theme sns.set_style('whitegrid') sns.violinplot(x='Type 1', y='Attack', data=df.head(30))Heatmapcorr = stats_df.corr() # Heatmap sns.heatmap(corr)Seaborn Tools for Data Explorationsns.get_dataset_names() iris = sns.load_dataset("iris") sns.pairplot(iris) sns.pairplot(stats_df) tips = sns.load_dataset("tips") tips.head() sns.lmplot(x="total_bill", y="tip", col="time", hue="smoker",data=tips);Use case of machine learning modelnormative = """ SECRETARÍA DE GOBIERNO DE TRABAJO Y EMPLEO Decreto 17/2019 DECTO-2019-17-APN-PTE - Desígnase Secretario de Coordinación Administrativa. Ciudad de Buenos Aires, 04/01/2019 VISTO el Expediente N° EX-2018-66308087-APN-DGARRHHMP#MPYT, y CONSIDERANDO: Que el Doctor SEVERRI (M.I. Nº 20.346.804) ha presentado su renuncia a partir del 18 de diciembre de 2018, al cargo de Secretario de Coordinación Administrativa, actualmente dependiente de la Secretaría de Gobierno de Trabajo y Empleo del MINISTERIO DE PRODUCCIÓN Y TRABAJO, para el que fuera designado por el Decreto N° 193 de fecha 8 de marzo de 2018. Que en atención a lo expuesto precedentemente, resulta pertinente proceder a la aceptación de la referida renuncia. Que en virtud de específicas razones de servicio de la SECRETARÍA DE COORDINACIÓN ADMINISTRATIVA de la citada Secretaría de Gobierno, se considera imprescindible la cobertura del cargo vacante. Que, en tal sentido, se propicia la designación del Doctor (M.I. N° 25.055.401) a partir del 18 de diciembre de 2018, en el cargo de Secretario de Coordinación Administrativa de la Secretaría de Gobierno de Trabajo y Empleo del MINISTERIO DE PRODUCCIÓN Y TRABAJO. Que la DIRECCIÓN GENERAL DE ASUNTOS JURÍDICOS del MINISTERIO DE PRODUCCIÓN Y TRABAJO ha tomado la intervención de su competencia. Que la presente medida se dicta en ejercicio de las atribuciones conferidas por el artículo 99, inciso 7, de la CONSTITUCIÓN NACIONAL. Por ello, EL PRESIDENTE DE LA NACIÓN ARGENTINA DECRETA: ARTÍCULO 1º.- Acéptase, a partir del 18 de diciembre de 2018, la renuncia presentada por el Doctor ESEVERRI (M.I. Nº 20.346.804) al cargo de Secretario de Coordinación Administrativa, actualmente dependiente de la Secretaría de Gobierno de Trabajo y Empleo del MINISTERIO DE PRODUCCIÓN Y TRABAJO, para el que fuera designado por el Decreto N° 193 de fecha 8 de marzo de 2018. ARTÍCULO 2º.- Agradécense al funcionario citado en el artículo 1° los valiosos servicios prestados en el desempeño de dicho cargo. ARTÍCULO 3°.- Desígnase, a partir del 18 de diciembre de 2018, al Doctor (M.I. N° 25.055.401) en el cargo de Secretario de Coordinación Administrativa dependiente de la Secretaría de Gobierno de Trabajo y Empleo del MINISTERIO DE PRODUCCIÓN Y TRABAJO. ARTÍCULO 4º.- Comuníquese, publíquese, dése a la DIRECCIÓN NACIONAL DEL REGISTRO OFICIAL y archívese. MACRI - Dante Sica e. 07/01/2019 N° 763/19 v. 07/01/2019 Fecha de publicación 07/01/2019 """ predict(normative)Load and Merge Dataonedrive_path = "C:/Users/cfowle/The Estée Lauder Companies Inc/TeamAnis - General/" reviews = pd.read_csv(onedrive_path + "Data/Ratings and Reviews/reviews_demand_subcat.csv") cc = pd.read_csv(onedrive_path + "Data/Consumer Care/cc_product_month.csv") products = pd.read_csv(onedrive_path + "Data/Product/product_codes.csv") print(len(reviews)) reviews = reviews.drop("item_description", axis = 1).drop_duplicates() print(len(reviews)) cc = cc.rename(columns = {"Date Month": "date", "P4": "itemid_4", "Brand Clean": "elc_brand"}) products = products.rename(columns = {"P4": "itemid_4", "brand": "elc_brand", "SubCategory":"sub_category"}) products = products[["elc_brand", "itemid_4", 'Major Category', 'Application', 'Category', 'sub_category']].drop_duplicates(["elc_brand", "itemid_4"]) cc_product = cc.merge(products, how = "left", on = ["itemid_4", "elc_brand"]) cc_reviews_product = reviews.merge(cc_product, how = "left").fillna(0) cc_reviews_product = cc_reviews_product.groupby(["elc_brand", "date", "sub_category"]).sum().reset_index()Feature Engineering Target Encodingsubcat_map = cc_reviews_product.groupby("sub_category")["demand"].mean().reset_index() brand_map = cc_reviews_product.groupby("elc_brand")["demand"].mean().reset_index() cc_reviews_product["month"] = [x[-2:] for x in cc_reviews_product["date"].values] cc_reviews_product["year"] = [x[:4] for x in cc_reviews_product["date"].values] month_map = cc_reviews_product.groupby("month")["demand"].mean().reset_index() year_map = cc_reviews_product.groupby("year")["demand"].mean().reset_index() subcat_map = subcat_map.rename(columns = {"demand":"subcat_avg_demand"}) month_map = month_map.rename(columns = {"demand":"month_avg_demand"}) year_map = year_map.rename(columns = {"demand":"year_avg_demand"}) brand_map = brand_map.rename(columns = {"demand":"brand_avg_demand"}) cc_reviews_product = cc_reviews_product.merge(subcat_map).merge(month_map).merge(year_map).merge(brand_map) cc_reviews_product = cc_reviews_product.sort_values("date")Create dataset without customer datanon_customer_data = cc_reviews_product[["date", "demand_F1", "demand", "demand_P2", "demand_P1", "subcat_avg_demand", "month_avg_demand", "year_avg_demand", "brand_avg_demand", "month", "year"]]Create Dataset with only Relative Measuresrelative_data = cp.deepcopy(non_customer_data) subcat = cc_reviews_product ##just here so I can copy some old code easily relative_data["percent_1"] = subcat["rating_1"]/subcat["nb_reviews"] relative_data["percent_2"] = subcat["rating_2"]/subcat["nb_reviews"] relative_data["percent_3"] = subcat["rating_3"]/subcat["nb_reviews"] relative_data["percent_4"] = subcat["rating_4"]/subcat["nb_reviews"] relative_data["percent_5"] = subcat["rating_5"]/subcat["nb_reviews"] relative_data["percent_negative"] = subcat['sentiment_negative']/subcat["nb_reviews"] relative_data["percent_neutral"] = subcat['sentiment_neutral']/subcat["nb_reviews"] relative_data["percent_positive"] = subcat['sentiment_positive']/subcat["nb_reviews"] relative_data = relative_data.merge(subcat_map).merge(month_map).merge(year_map).merge(brand_map) relative_data = relative_data.sort_values("date").reset_index(drop=True) relative_data = relative_data.fillna(0) full_relative_data = cp.deepcopy(relative_data) subcat["cc_tot"] = subcat['Complaints'] + subcat['Compliments'] + subcat['Questions'] + subcat['Suggestions'] cc_counts = cc.drop(['Product Code', 'itemid_4', 'elc_brand', "date"], axis = 1).columns for col in cc_counts: full_relative_data["percent_" + col] = subcat[col]/subcat["cc_tot"] full_relative_data = full_relative_data.fillna(0) full_relative_dataBuild Model with Customer DataX = relative_data.drop(["demand_F1", "date", "sub_category", "elc_brand"], axis = 1) y = relative_data[["demand_F1"]] X.columns X = StandardScaler().fit_transform(X) pca = PCA().fit(X) plt.plot(np.cumsum(pca.explained_variance_ratio_)) plt.xlabel('number of components') plt.ylabel('cumulative explained variance') pca = PCA(n_components=10) principalComponents = pca.fit_transform(X) principalDf = pd.DataFrame(data = principalComponents) #principalDf.to_csv(onedrive_path + "Output/2020_06_22_to_rama/pca.csv") X_train, X_test, y_train, y_test = train_test_split(principalDf, y, shuffle = False) tree = DecisionTreeRegressor() tree.fit(X_train, y_train) tree_param = {'max_depth': [2, 3, 5, 7], 'min_samples_split': [2, 4, 18, 16, 32, 64], 'min_samples_leaf': [2, 4, 18, 16, 32, 64]} clf = GridSearchCV(tree, tree_param, cv=5) clf.fit(X_train, y_train) clf.best_score_ pred = clf.best_estimator_.predict(X_test) metrics.r2_score(pred, y_test)What if we remove Demand Lagsno_lags = relative_data[['subcat_avg_demand', 'month_avg_demand', 'year_avg_demand', 'brand_avg_demand', 'month', 'year', 'percent_1', 'percent_2', 'percent_3', 'percent_4', 'percent_5', 'percent_negative', 'percent_neutral', 'percent_positive']] X = StandardScaler().fit_transform(no_lags) pca = PCA().fit(X) plt.plot(np.cumsum(pca.explained_variance_ratio_)) plt.xlabel('number of components') plt.ylabel('cumulative explained variance') pca = PCA(n_components=10) principalComponents = pca.fit_transform(X) principalDf = pd.DataFrame(data = principalComponents) X_train, X_test, y_train, y_test = train_test_split(principalDf, y, shuffle = False) tree = DecisionTreeRegressor() tree.fit(X_train, y_train) tree_param = {'max_depth': [2, 3, 5, 7], 'min_samples_split': [2, 4, 18, 16, 32, 64], 'min_samples_leaf': [2, 4, 18, 16, 32, 64]} clf = GridSearchCV(tree, tree_param, cv=5) clf.fit(X_train, y_train) clf.best_score_ pred = clf.best_estimator_.predict(X_test) metrics.r2_score(pred, y_test)What if I didn't do PCA?X_train, X_test, y_train, y_test = train_test_split(no_lags, y, shuffle = False) tree = DecisionTreeRegressor() tree.fit(X_train, y_train) tree_param = {'max_depth': [2, 3, 4], 'min_samples_split': [2, 4, 18, 16, 32, 64], 'min_samples_leaf': [2, 4, 18, 16, 32, 64]} clf = GridSearchCV(tree, tree_param, cv=5) clf.fit(X_train, y_train) clf.best_score_ pred = clf.best_estimator_.predict(X_test) metrics.r2_score(pred, y_test) fig = plt.figure(figsize=(25,20)) _ = plot_tree(clf.best_estimator_, feature_names=no_lags.columns, filled=True)What if we removed all demand datano_demand = relative_data[['month','year', 'percent_1', 'percent_2', 'percent_3', 'percent_4', 'percent_5', 'percent_negative', 'percent_neutral', 'percent_positive']] X = StandardScaler().fit_transform(no_demand) pca = PCA().fit(X) plt.plot(np.cumsum(pca.explained_variance_ratio_)) plt.xlabel('number of components') plt.ylabel('cumulative explained variance') pca = PCA(n_components=7) principalComponents = pca.fit_transform(X) principalDf = pd.DataFrame(data = principalComponents) X_train, X_test, y_train, y_test = train_test_split(principalDf, y, shuffle = False) tree = DecisionTreeRegressor() tree.fit(X_train, y_train) tree_param = {'max_depth': [2, 3, 5, 7], 'min_samples_split': [2, 4, 18, 16, 32, 64], 'min_samples_leaf': [2, 4, 18, 16, 32, 64]} clf = GridSearchCV(tree, tree_param, cv=5) clf.fit(X_train, y_train) clf.best_score_ pred = clf.best_estimator_.predict(X_test) metrics.r2_score(pred, y_test)Assignment 2 1.What are the two values of the boolean data types? how do you write them ?**Ans**: Boolean values has two types: 'True' and 'False'. we write them as belowtrue_val=True false_val=False print(true_val,type(true_val)) print(false_val,type(false_val))True False 2. What are the three different types of Boolean operators?**Ans**: `are` `and` `not` are the three differnt types of Boolean operators in python are. we write them as belownum1=500 num2=800 print(num1>300 and num2>900) # Example of boolean and print(num1>200 or num2>700) # Example of boolean or print(not(num1>100)) # Example of boolean notFalse True False3. Make a list of each Boolean operator&39;s truth tables (i.e. every possible combination of Boolean values for the operator and what it evaluate) ?**Ans:** The Truth tables for the boolean tables are as follows: | | | AND TABLE | A and B|---:|:-------------|:-----------|:------|| 1 | True | False | False || 2 | True | True | True | | 3 | False | True | False | | 4 | False | False | False | | | | OR TABLE | A or B|---:|:-------------|:-----------|:------|| 1 | True | False | False || 2 | True | True | False | | 3 | False | True | False | | 4 | False | False | True | | | NOT TABLE | not A|---:|:-------------|:-----|| 1 | True | False || 2| False | True | 4. What are the values of the following expressions ?* (5 > 4) and (3 == 5) * not (5 > 4) * (5 > 4) or (3 == 5) * not ((5 > 4) or (3 == 5)) * (True and True) and (True == False) * (not False) or (not True)print((5>4)and(3==5)) # False print(not(5>4)) # False print((5>4)or(3==5)) # True print(not((5>4)or(3==5))) # False print((True and True)and(True==False)) # False print((not False)or(not True)) # TrueFalse False True False False True5. What are the six comparison operators?**Ans:** The Six comparision operators available in python are: `==` , `!=` , `` , `` 6. How do you tell the difference between the equal to and assignment operators?Describe a condition and when you would use one ? **Ans:** `==` is the equal to operator that compares two values and evaluates to a Boolean, while `=` is that assignment operator that stores a value in a variable.a=3 # Assigning operator that stores 3 value in a variable a if a==3:#comparing values of a varible value and 3 print(a==3)True7. Identify the three blocks in this code:spam = 0 if spam == 10: print('eggs') if spam > 5: print('bacon') else: print('ham') print('spam') print('spam')**Ans**: In Python, code block refers to a collection of code that is in the same block or indent. This is most commonly found in classes, functions, and loops.spam = 0 if spam == 10: print('eggs') # block #1 if spam > 5: print('bacon') # block #2 else: print('ham') # block #3 print('spam') print('spam')ham spam spam8. Write code that prints Hello if 1 is stored in spam, prints Howdy if 2 is stored in spam, and prints Greetings! if anything else is stored in spam.def spamCode(spam): if spam==1: print('Hello') elif spam==2: print('Howdy') else: print('Greetings') spamCode(1) spamCode(2) spamCode(3)Hello Howdy Greetings9.If your programme is stuck in an endless loop, what keys you’ll press?**Ans:** Press `Ctrl-c` to stop a program stuck in an infinite loop 10. How can you tell the difference between break and continue?**Ans:** The `break` statement will move the execution outside the loop if break condtion is satisfied. Whereas the `continue` statement will move the execution to the start of the loop. 11. In a for loop, what is the difference between range(10), range(0, 10), and range(0, 10, 1)?**Ans:** The Differences are as follows: 1. The _**range(10)**_ call range from 0 to 9 (but not include 10) 2. The _**range (0,10)**_ explicitly tells the loop to start at 0 3. The _**range(0,10,1)**_ explicitly tells the loop to increase the variable by 1 on each iteration 12. Write a short program that prints the numbers 1 to 10 using a for loop. Then write an equivalent program that prints the numbers 1 to 10 using a while loop ?print('-'*10,'Using For Loop','-'*10) for i in range(1,11): print(i, end=" ") print('\n') print('-'*10,'Using While Loop','-'*10) i=1 while i<=10: print(i, end=" ") i+=1---------- Using For Loop ---------- 1 2 3 4 5 6 7 8 9 10 ---------- Using While Loop ---------- 1 2 3 4 5 6 7 8 9 100.0 IMPORTSimport pandas as pd import inflection import math import numpy as np import seaborn as sns import datetime from matplotlib import pyplot as plt from IPython.core.display import HTML from IPython.display import Image0.1. Helper Functions# Função para ampliar area de edição do jupyter notebook def jupyter_settings(): %matplotlib inline %pylab inline plt.style.use( 'bmh' ) plt.rcParams['figure.figsize'] = [25, 12] plt.rcParams['font.size'] =24 display( HTML( '') ) pd.options.display.max_columns = None pd.options.display.max_rows = None pd.set_option( 'display.expand_frame_repr', False ) sns.set() jupyter_settings()Populating the interactive namespace from numpy and matplotlib0.2. Loading data# leitura dos dados fornecidos df_sales_raw = pd.read_csv('../data/train.csv', low_memory=False) df_store_raw = pd.read_csv('../data/store.csv', low_memory=False) # merge de datasets df_raw = pd.merge(df_sales_raw, df_store_raw, how='left', on='Store') # teste de leitura simples df_raw.sample()1.0. PASSO 01 - DESCRICAO DOS DADOS# fazer uma cópia do dataset quando muda de seção, somente para manter os dados , caso seja necessário recomeçar df1 = df_raw.copy() df1.columns1.1. Rename Columns# renomenado as colunas para facilitar analise dos dados cols_old = ['Store', 'DayOfWeek', 'Date', 'Sales', 'Customers', 'Open', 'Promo', 'StateHoliday', 'SchoolHoliday', 'StoreType', 'Assortment', 'CompetitionDistance', 'CompetitionOpenSinceMonth', 'CompetitionOpenSinceYear', 'Promo2', 'Promo2SinceWeek', 'Promo2SinceYear', 'PromoInterval'] snakecase = lambda x: inflection.underscore( x) cols_new = list( map(snakecase, cols_old)) #rename df1.columns = cols_new #visualizando as colunas renomeadas df1.columns1.2. Data Dimensions# leitura de colunas/linhas do dataset para dimensionar os dados print('Number of Rows: {}'.format(df1.shape[0])) print('Number of Cols: {}'.format(df1.shape[1]))Number of Rows: 1017209 Number of Cols: 181.3. Data Types# leitura do tipos de dados de cada coluna df1['date'] = pd.to_datetime(df1['date']) df1.dtypes1.4. Ckeck NA# Verificando colunas com registros vazios df1.isna().sum()1.5. Fillout NA#competition_distance --> 2642 registros vazios # Verificando qual a maior distancia de um concorrente -> 75860.0 # SOLUÇÃO para popular registros vazios-> Vou aplicar uma distancia maxima = 200000.0 para os registros NAN desta coluna df1['competition_distance'] = df1['competition_distance'].apply(lambda x: 200000.0 if math.isnan(x) else x) #=============================================================================================================== #competition_open_since_month --> 323348 registros vazios # mes que o concorrente mais proximo foi aberto. Pq este campo esta vazio? a loja ja estava aberta quando instalou a nossa loja ou ninguem resgistrou esta informação # SOLUÇÃO para popular registros vazios-> APLICAR A DATA (mes) DE VENDA NESTE CAMPO, PARA DEPOIS TESTAR USANDO CRISP E AVALIAR O ALGORITMO df1['competition_open_since_month'] = df1.apply(lambda x: x['date'].month if math.isnan( x['competition_open_since_month']) else x['competition_open_since_month'], axis=1) #=============================================================================================================== #competition_open_since_year --> 323348 registros vazios # IDEM solução do item anterior # SOLUÇÃO para popular registros vazios-> APLICAR A DATA (ano) DE VENDA NESTE CAMPO, PARA DEPOIS TESTAR USANDO CRISP E AVALIAR O ALGORITMO df1['competition_open_since_year'] = df1.apply(lambda x: x['date'].year if math.isnan( x['competition_open_since_year']) else x['competition_open_since_year'], axis=1) #=============================================================================================================== #promo2_since_week --> 508031 registros vazios # SOLUÇÃO para popular registros vazios-> APLICAR A DATA (semana) DE VENDA NESTE CAMPO, PARA DEPOIS TESTAR USANDO CRISP E AVALIAR O ALGORITMO df1['promo2_since_week'] = df1.apply(lambda x: x['date'].week if math.isnan( x['promo2_since_week']) else x['promo2_since_week'], axis=1) #=============================================================================================================== #promo2_since_year --> 508031 registros vazios # SOLUÇÃO para popular registros vazios-> APLICAR A DATA (ano) DE VENDA NESTE CAMPO, PARA DEPOIS TESTAR USANDO CRISP E AVALIAR O ALGORITMO df1['promo2_since_year'] = df1.apply(lambda x: x['date'].year if math.isnan( x['promo2_since_year']) else x['promo2_since_year'], axis=1) #=============================================================================================================== #promo_interval --> 508031 registros vazios #criando um mapa de mês month_map = {1: 'Jan', 2: 'Fev', 3: 'Mar', 4: 'Apr', 5: 'May', 6: 'Jun', 7: 'Jul', 8: 'Aug', 9: 'Sep', 10: 'Oct', 11: 'Nov', 12: 'Dec'} # Colocando 0 nos registros que possui a coluna promo_interval = 0 df1['promo_interval'].fillna( 0, inplace=True ) # Criei uma coluna month_map onde será gravado o mes da coluna 'date' do registro, já convertido de acordo com a biblioteca criada df1['month_map'] = df1['date'].dt.month.map( month_map ) # Criei uma nova coluna que vai registrar 1 para quem tem promoção no mes de venda e 0 data de venda fora da promoçao df1['is_promo'] = df1[['promo_interval', 'month_map']].apply( lambda x: 0 if x['promo_interval'] == 0 else 1 if x['month_map'] in x['promo_interval'].split( ',' ) else 0, axis=1 ) # releitura para conferir se ainda temos registros vazios df1.isna().sum()1.6. Change types# competiton df1['competition_open_since_month'] = df1['competition_open_since_month'].astype(int) df1['competition_open_since_year'] = df1['competition_open_since_year'].astype(int) # promo2 df1['promo2_since_week'] = df1['promo2_since_week'].astype(int) df1['promo2_since_year'] = df1['promo2_since_year'].astype(int) # releitura dos tipos de dados para conferencia df1.dtypes1.7. Descriptive Statistical# Criando dataframes de acordo com o typo da coluna num_attributes = df1.select_dtypes( include=['int64', 'int32', 'float64']) cat_attributes = df1.select_dtypes( exclude=['int64', 'int32', 'float64', 'datetime64[ns]'])1.7.1 Numerical Attributes# Dividindo o datafame em dados numéricos e categóricos # Realizar calculos basicos para cada coluna, para ter uma noção dos dados # Central Tendency - mean, median ct1 = pd.DataFrame( num_attributes.apply( np.mean ) ).T ct2 = pd.DataFrame( num_attributes.apply( np.median ) ).T #Dispersion - std, min, max, range, skew, kurtosis d1 = pd.DataFrame( num_attributes.apply( np.std ) ).T d2 = pd.DataFrame( num_attributes.apply( min ) ).T d3 = pd.DataFrame( num_attributes.apply( max ) ).T d4 = pd.DataFrame( num_attributes.apply( lambda x: x.max() - x.min() ) ).T d5 = pd.DataFrame( num_attributes.apply( lambda x: x.skew() ) ).T d6 = pd.DataFrame( num_attributes.apply( lambda x: x.kurtosis() ) ).T # Concatenate m = pd.concat( [d2, d3, d4, ct1, ct2, d1, d5, d6] ).T.reset_index() #Rename columns m.columns = ['attributes', 'min', 'max', 'range', 'mean', 'median', 'std', 'skew', 'kurtosis'] m sns.distplot( df1['competition_distance'], kde=False )/home/leandro/.local/lib/python3.9/site-packages/seaborn/distributions.py:2619: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms). warnings.warn(msg, FutureWarning)1.7.2 Caterigal Attributescat_attributes.apply( lambda x: x.unique().shape[0] ) aux1 = df1[(df1['state_holiday'] != '0' ) & (df1['sales'] > 0)] plt.subplot( 1, 3, 1) sns.boxplot( x='state_holiday', y='sales', data=aux1 ) plt.subplot( 1, 3, 2) sns.boxplot( x='store_type', y='sales', data=aux1 ) plt.subplot( 1, 3, 3) sns.boxplot( x='assortment', y='sales', data=aux1 )2.0. PASSO 02 - FEATURE ENGINEETING# fazer uma cópia do dataset ao ir para um próximo passo ou seção, somente para manter os dados , caso seja necessário recomeçar df2 = df1.copy()2.1. Mapa Mental de Hipóteses# Feito Feature Engineering para criar listas de hipóteses e validar dados Image('../img/DAILY_STORE_SALES.png')2.1. Criação das Hipóteses 2.1.1. Hipóteses Loja **1.** Lojas com número maior de funcionários deveriam vender mais.**2.** Lojas com maior capacidade de estoque deveriam vender mais.**3.** Lojas com maior porte deveriam vender mais.**4.** Lojas com maior sortimentos deveriam vender mais.**5.** Lojas com competidores mais próximos deveriam vender menos.**6.** Lojas com competidores a mais tempo deveriam vender mais. 2.1.2. Hipóteses Produto **1.** Lojas que investem mais em Marketing deveriam vender mais.**2.** Lojas com maior exposição de produtos deveriam vender mais.**3.** Lojas com produtos com preço menor deveriam vender mais.**4.** Lojas com promoções mais agressivas (desconto maiores), deveriam vender mais.**5.** Lojas com promoções ativas por mais tempo deveriam vender mais.**6.** Lojas com mais dias de promoção deveriam vender mais.**7.** Lojas com mais promoções consecutivas deveriam vender mais. 2.1.3. Hipóteses Tempo **1.** Lojas abertas durante o feriado de Natal deveriam vender mais.**2.** Lojas deveriam vender mais ao lojgo dos anos.**3.** Lojas deveriam vender mais no segundo semestre do ano.**4.** Lojas deveriam vender mais depois do dia 10 de cada mês.**5.** Lojas deveriam vender menos aos finais de semana.**6.** Lojas deveriam vender menos durante os feriados escolares 2.2. Lista final de Hipóteses **1.** Lojas com maior sortimentos deveriam vender mais.**2.** Lojas com competidores mais próximos deveriam vender menos.**3.** Lojas com competidores a mais tempo deveriam vender mais. **4.** Lojas com promoções ativas por mais tempo deveriam vender mais.**5.** Lojas com mais dias de promoção deveriam vender mais.**6.** Lojas com mais promoções consecutivas deveriam vender mais. **7.** Lojas abertas durante o feriado de Natal deveriam vender mais.**8.** Lojas deveriam vender mais ao lojgo dos anos.**9.** Lojas deveriam vender mais no segundo semestre do ano.**10.** Lojas deveriam vender mais depois do dia 10 de cada mês.**11.** Lojas deveriam vender menos aos finais de semana.**12.** Lojas deveriam vender menos durante os feriados escolares 2.2. Feature Engineering# Criando novas features utilizando os dados do dataset # Year df2['year'] = df2['date'].dt.year # Month df2['month'] = df2['date'].dt.month # Day df2['day'] = df2['date'].dt.day # Week of Year #df2['week_of_year'] = df2['date'].dt.weekofyear df2['week_of_year'] = df2['date'].dt.isocalendar().week # Year Week df2['year_week'] = df2['date'].dt.strftime('%Y-%W') #competition since df2['competition_since'] = df2.apply(lambda x: datetime.datetime(year=x['competition_open_since_year'], month=x['competition_open_since_month'], day=1), axis=1) df2['competition_time_month'] = (( df2['date'] - df2['competition_since'])/30 ).apply(lambda x: x.days).astype(int) # Promo since df2['promo_since'] = df2['promo2_since_year'].astype(str) + '-' + df2['promo2_since_week'].astype(str) df2['promo_since'] = df2['promo_since'].apply(lambda x: datetime.datetime.strptime( x + '-1', '%Y-%W-%w') - datetime.timedelta( days=7)) df2['promo_time_week'] = ((df2['date'] - df2['promo_since'])/7 ).apply(lambda x: x.days).astype(int) # ASSORTMENT df2['assortment'] = df2['assortment'].apply( lambda x: 'basic' if x == 'a' else 'extra' if x == 'b' else 'extended') # State holiday df2['state_holiday'] = df2['state_holiday'].apply( lambda x: 'public_holiday' if x == 'a' else 'easter_holiday' if x == 'b' else 'christmas' if x == 'c' else 'regular_day' ) df2.head().T3.0. PASSO 03 - FILTRAGEM DE VARIÁVEISdf3 = df2.copy()3.1. Filtragem das Linhas# criando novo dataset com lojas abertas e com vendas df3 = df3[(df3['open'] != 0) & (df3['sales'] > 0)]3.2. Seleção das Colunas# removendo colunas desnecessárias para analise de dados e deixar o processamento mais rapido cols_drop = ['customers', 'open', 'promo_interval', 'month_map'] df3 = df3.drop(cols_drop, axis=1) df3.columnsDeep Learning=============Assignment 1------------The objective of this assignment is to learn about simple data curation practices, and familiarize you with some of the data we'll be reusing later.This notebook uses the [notMNIST](http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html) dataset to be used with python experiments. This dataset is designed to look like the classic [MNIST](http://yann.lecun.com/exdb/mnist/) dataset, while looking a little more like real data: it's a harder task, and the data is a lot less 'clean' than MNIST.# These are all the modules we'll be using later. Make sure you can import them # before proceeding further. from __future__ import print_function import imageio import matplotlib.pyplot as plt import numpy as np import os import sys import tarfile from IPython.display import display, Image from sklearn.linear_model import LogisticRegression from six.moves.urllib.request import urlretrieve from six.moves import cPickle as pickle # Config the matplotlib backend as plotting inline in IPython %matplotlib inlineFirst, we'll download the dataset to our local machine. The data consists of characters rendered in a variety of fonts on a 28x28 image. The labels are limited to 'A' through 'J' (10 classes). The training set has about 500k and the testset 19000 labeled examples. Given these sizes, it should be possible to train models quickly on any machine.url = 'https://commondatastorage.googleapis.com/books1000/' last_percent_reported = None data_root = '.' # Change me to store data elsewhere def download_progress_hook(count, blockSize, totalSize): """A hook to report the progress of a download. This is mostly intended for users with slow internet connections. Reports every 5% change in download progress. """ global last_percent_reported percent = int(count * blockSize * 100 / totalSize) if last_percent_reported != percent: if percent % 5 == 0: sys.stdout.write("%s%%" % percent) sys.stdout.flush() else: sys.stdout.write(".") sys.stdout.flush() last_percent_reported = percent def maybe_download(filename, expected_bytes, force=False): """Download a file if not present, and make sure it's the right size.""" dest_filename = os.path.join(data_root, filename) if force or not os.path.exists(dest_filename): print('Attempting to download:', filename) filename, _ = urlretrieve(url + filename, dest_filename, reporthook=download_progress_hook) print('\nDownload Complete!') statinfo = os.stat(dest_filename) if statinfo.st_size == expected_bytes: print('Found and verified', dest_filename) else: raise Exception( 'Failed to verify ' + dest_filename + '. Can you get to it with a browser?') return dest_filename train_filename = maybe_download('notMNIST_large.tar.gz', 247336696) test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)Found and verified ./notMNIST_large.tar.gz Found and verified ./notMNIST_small.tar.gzExtract the dataset from the compressed .tar.gz file.This should give you a set of directories, labeled A through J.num_classes = 10 np.random.seed(133) def maybe_extract(filename, force=False): root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz if os.path.isdir(root) and not force: # You may override by setting force=True. print('%s already present - Skipping extraction of %s.' % (root, filename)) else: print('Extracting data for %s. This may take a while. Please wait.' % root) tar = tarfile.open(filename) sys.stdout.flush() tar.extractall(data_root) tar.close() data_folders = [ os.path.join(root, d) for d in sorted(os.listdir(root)) if os.path.isdir(os.path.join(root, d))] if len(data_folders) != num_classes: raise Exception( 'Expected %d folders, one per class. Found %d instead.' % ( num_classes, len(data_folders))) print(data_folders) return data_folders train_folders = maybe_extract(train_filename) test_folders = maybe_extract(test_filename)./notMNIST_large already present - Skipping extraction of ./notMNIST_large.tar.gz. ['./notMNIST_large/A', './notMNIST_large/B', './notMNIST_large/C', './notMNIST_large/D', './notMNIST_large/E', './notMNIST_large/F', './notMNIST_large/G', './notMNIST_large/H', './notMNIST_large/I', './notMNIST_large/J'] ./notMNIST_small already present - Skipping extraction of ./notMNIST_small.tar.gz. ['./notMNIST_small/A', './notMNIST_small/B', './notMNIST_small/C', './notMNIST_small/D', './notMNIST_small/E', './notMNIST_small/F', './notMNIST_small/G', './notMNIST_small/H', './notMNIST_small/I', './notMNIST_small/J']---Problem 1---------Let's take a peek at some of the data to make sure it looks sensible. Each exemplar should be an image of a character A through J rendered in a different font. Display a sample of the images that we just downloaded. Hint: you can use the package IPython.display.---import random #Image(filename='test.png') #Taken from https://stackoverflow.com/questions/36006136/how-to-display-images-in-a-row-with-ipython-display/38556650 from matplotlib.pyplot import figure, imshow, axis from matplotlib.image import imread def showImagesHorizontally(list_of_files): fig = figure() number_of_files = len(list_of_files) for i in range(number_of_files): a=fig.add_subplot(1,number_of_files,i+1) # nrows, ncols, index. So, add a subplot at that position. image = imread(list_of_files[i]) # apparently you can read a list of files as one image? imshow(image,cmap='Greys_r') # show image as grey # imshow(image) # yup, looks odd if you don't use the cmap axis('off') # turn off axis lines and labels. #END COPIED CODE FROM https://stackoverflow.com/questions/36006136/how-to-display-images-in-a-row-with-ipython-display/38556650 exemplars_per_folder = 10 #note that the images are scaled to exemplars_by_folder = [] for folder in test_folders: filenames = os.listdir(folder) folder_exemplars = [] for i in range(0, exemplars_per_folder): file_choice = random.choice(filenames) path_to_chosen_file = os.path.join(folder, file_choice) folder_exemplars.append(path_to_chosen_file) exemplars_by_folder.append(folder_exemplars) for folder in exemplars_by_folder: # using code from https://stackoverflow.com/questions/36006136/how-to-display-images-in-a-row-with-ipython-display/38556650 showImagesHorizontally(folder) # the way I did it at first: # for exemplar in folder: # display(Image(exemplar))Now let's load the data in a more manageable format. Since, depending on your computer setup you might not be able to fit it all in memory, we'll load each class into a separate dataset, store them on disk and curate them independently. Later we'll merge them into a single dataset of manageable size.We'll convert the entire dataset into a 3D array (image index, x, y) of floating point values, normalized to have approximately zero mean and standard deviation ~0.5 to make training easier down the road. A few images might not be readable, we'll just skip them.image_size = 28 # Pixel width and height. pixel_depth = 255.0 # Number of levels per pixel. def load_letter(folder, min_num_images): """Load the data for a single letter label.""" image_files = os.listdir(folder) dataset = np.ndarray(shape=(len(image_files), image_size, image_size), dtype=np.float32) print(folder) num_images = 0 for image in image_files: image_file = os.path.join(folder, image) try: image_data = (imageio.imread(image_file).astype(float) - pixel_depth / 2) / pixel_depth if image_data.shape != (image_size, image_size): raise Exception('Unexpected image shape: %s' % str(image_data.shape)) dataset[num_images, :, :] = image_data num_images = num_images + 1 except (IOError, ValueError) as e: print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.') dataset = dataset[0:num_images, :, :] if num_images < min_num_images: raise Exception('Many fewer images than expected: %d < %d' % (num_images, min_num_images)) print('Full dataset tensor:', dataset.shape) print('Mean:', np.mean(dataset)) print('Standard deviation:', np.std(dataset)) return dataset def maybe_pickle(data_folders, min_num_images_per_class, force=False): dataset_names = [] for folder in data_folders: set_filename = folder + '.pickle' dataset_names.append(set_filename) if os.path.exists(set_filename) and not force: # You may override by setting force=True. print('%s already present - Skipping pickling.' % set_filename) else: print('Pickling %s.' % set_filename) dataset = load_letter(folder, min_num_images_per_class) try: with open(set_filename, 'wb') as f: pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL) except Exception as e: print('Unable to save data to', set_filename, ':', e) return dataset_names train_datasets = maybe_pickle(train_folders, 45000) test_datasets = maybe_pickle(test_folders, 1800)./notMNIST_large/A.pickle already present - Skipping pickling. ./notMNIST_large/B.pickle already present - Skipping pickling. ./notMNIST_large/C.pickle already present - Skipping pickling. ./notMNIST_large/D.pickle already present - Skipping pickling. ./notMNIST_large/E.pickle already present - Skipping pickling. ./notMNIST_large/F.pickle already present - Skipping pickling. ./notMNIST_large/G.pickle already present - Skipping pickling. ./notMNIST_large/H.pickle already present - Skipping pickling. ./notMNIST_large/I.pickle already present - Skipping pickling. ./notMNIST_large/J.pickle already present - Skipping pickling. ./notMNIST_small/A.pickle already present - Skipping pickling. ./notMNIST_small/B.pickle already present - Skipping pickling. ./notMNIST_small/C.pickle already present - Skipping pickling. ./notMNIST_small/D.pickle already present - Skipping pickling. ./notMNIST_small/E.pickle already present - Skipping pickling. ./notMNIST_small/F.pickle already present - Skipping pi[...]---Problem 2---------Let's verify that the data still looks good. Displaying a sample of the labels and images from the ndarray. Hint: you can use matplotlib.pyplot.---#'s solution to Problem 2. # unpickling takes forever so I'm breaking it out into its own cell def load_pickled_dataset(name_of_pickle_file): pkl_file = open(name_of_pickle_file, 'rb') data = pickle.load(pkl_file) return data def unpickle_datasets(datasets): unpickled_datasets = [] for dataset in datasets: print("unpickling dataset {}...".format(dataset)) data = load_pickled_dataset(dataset) unpickled_datasets.append(data) return unpickled_datasets unpickled_train = unpickle_datasets(train_datasets) unpickled_test = unpickle_datasets(test_datasets) #Problem 2 solution, part 2: pick at random from unpickled data, and display. def show_images(list_of_images): fig = figure() number_of_images = len(list_of_images) for i in range(number_of_images): a=fig.add_subplot(1,number_of_images,i+1) # nrows, ncols, index. So, add a subplot at that position. image = list_of_images[i] imshow(image,cmap='Greys_r') # show image as grey axis('off') # turn off axis lines and labels. plt.show() def get_one_sample_from_each_dataset(datasets): samples = [] for dataset in datasets: pic = random.choice(dataset) samples.append(pic) return samples train_samples = get_one_sample_from_each_dataset(unpickled_train) test_samples = get_one_sample_from_each_dataset(unpickled_train) # print(train_samples) show_images(train_samples) show_images(test_samples)---Problem 3---------Another check: we expect the data to be balanced across classes. Verify that.---# how the heck do I do that. def get_some_exemplars(unpickled_class_dataset, number=5): exemplars = [] for i in range(0,number): exemplars.append(random.choice(unpickled_class_dataset)) return exemplars def get_dataset_stats(unpickled_datasets, name): for index, class_dataset in enumerate(unpickled_datasets): class_exemplars = get_some_exemplars(class_dataset) print("Getting examples for {0}, class #{1}".format(name, index)) print("Some examples of this class:") show_images(class_exemplars) print("for this class we have {0} data items\n\n".format(len(class_dataset))) get_dataset_stats(unpickled_train, "train data") get_dataset_stats(unpickled_test, "test data")Getting examples for train data, class #0 Some examples of this class:Merge and prune the training data as needed. Depending on your computer setup, you might not be able to fit it all in memory, and you can tune `train_size` as needed. The labels will be stored into a separate array of integers 0 through 9.Also create a validation dataset for hyperparameter tuning.def make_arrays(nb_rows, img_size): if nb_rows: dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32) labels = np.ndarray(nb_rows, dtype=np.int32) else: dataset, labels = None, None return dataset, labels def merge_datasets(pickle_files, train_size, valid_size=0): num_classes = len(pickle_files) valid_dataset, valid_labels = make_arrays(valid_size, image_size) train_dataset, train_labels = make_arrays(train_size, image_size) vsize_per_class = valid_size // num_classes tsize_per_class = train_size // num_classes start_v, start_t = 0, 0 end_v, end_t = vsize_per_class, tsize_per_class end_l = vsize_per_class+tsize_per_class for label, pickle_file in enumerate(pickle_files): try: with open(pickle_file, 'rb') as f: letter_set = pickle.load(f) # let's shuffle the letters to have random validation and training set np.random.shuffle(letter_set) if valid_dataset is not None: valid_letter = letter_set[:vsize_per_class, :, :] valid_dataset[start_v:end_v, :, :] = valid_letter valid_labels[start_v:end_v] = label start_v += vsize_per_class end_v += vsize_per_class train_letter = letter_set[vsize_per_class:end_l, :, :] train_dataset[start_t:end_t, :, :] = train_letter train_labels[start_t:end_t] = label start_t += tsize_per_class end_t += tsize_per_class except Exception as e: print('Unable to process data from', pickle_file, ':', e) raise return valid_dataset, valid_labels, train_dataset, train_labels train_size = 200000 valid_size = 10000 test_size = 10000 valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets( train_datasets, train_size, valid_size) _, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size) print('Training:', train_dataset.shape, train_labels.shape) print('Validation:', valid_dataset.shape, valid_labels.shape) print('Testing:', test_dataset.shape, test_labels.shape)Training: (200000, 28, 28) (200000,) Validation: (10000, 28, 28) (10000,) Testing: (10000, 28, 28) (10000,)Next, we'll randomize the data. It's important to have the labels well shuffled for the training and test distributions to match.def randomize(dataset, labels): permutation = np.random.permutation(labels.shape[0]) shuffled_dataset = dataset[permutation,:,:] shuffled_labels = labels[permutation] return shuffled_dataset, shuffled_labels train_dataset, train_labels = randomize(train_dataset, train_labels) test_dataset, test_labels = randomize(test_dataset, test_labels) valid_dataset, valid_labels = randomize(valid_dataset, valid_labels)---Problem 4---------Convince yourself that the data is still good after shuffling!---#problem 4 solution: label_to_char = {0:'a',1:'b',2:'c',3:'d',4:'e',5:'f',6:'g',7:'h',8:'i',9:'j'} def labels_to_chars(labels): label_chars =[] for label in labels: label_char = label_to_char[label] label_chars.append(label_char) return label_chars def get_matching_items_randomly_from_two_lists(first, second, num_tuples=5): #assuming the length is the same... max_index = len(first)-1 #solution adapted from https://stackoverflow.com/questions/19485641/python-random-sample-of-two-arrays-but-matching-indices idx = np.random.choice(np.arange(len(first)), num_tuples, replace=False) # print("picking {0} random items using idx {1}".format(num_tuples, idx)) # print(type(idx)) first_samples = first[idx] second_samples = second[idx] return first_samples, second_samples def check_after_shuffle(dataset_name, dataset_to_check, labels_to_check, num_samples=15): imgs, labels = get_matching_items_randomly_from_two_lists(dataset_to_check, labels_to_check,num_samples) label_chars = labels_to_chars(labels) print("sample {0} labels:{1}".format(dataset_name,label_chars)) show_images(imgs) num_samples = 12 check_after_shuffle("train", train_dataset, train_labels, num_samples) check_after_shuffle("val", valid_dataset, valid_labels, num_samples) check_after_shuffle("test", test_dataset, test_labels, num_samples)sample train labels:['i', 'd', 'i', 'd', 'i', 'i', 'f', 'd', 'b', 'g', 'c', 'i']Finally, let's save the data for later reuse:pickle_file = os.path.join(data_root, 'notMNIST.pickle') try: f = open(pickle_file, 'wb') save = { 'train_dataset': train_dataset, 'train_labels': train_labels, 'valid_dataset': valid_dataset, 'valid_labels': valid_labels, 'test_dataset': test_dataset, 'test_labels': test_labels, } pickle.dump(save, f, pickle.HIGHEST_PROTOCOL) f.close() except Exception as e: print('Unable to save data to', pickle_file, ':', e) raise statinfo = os.stat(pickle_file) print('Compressed pickle size:', statinfo.st_size)Compressed pickle size: 690800441---Problem 5---------By construction, this dataset might contain a lot of overlapping samples, including training data that's also contained in the validation and test set! Overlap between training and test can skew the results if you expect to use your model in an environment where there is never an overlap, but are actually ok if you expect to see training samples recur when you use it.Measure how much overlap there is between training, validation and test samples.Optional questions:- What about near duplicates between datasets? (images that are almost identical)- Create a sanitized validation and test set, and compare your accuracy on those in subsequent assignments.---# train_dataset = test_dataset # valid_dataset = test_dataset # Checking overlap: how the heck would I do this? Check filenames? Hashing? def generate_hashes(list_of_numpy_arrays): list_of_numpy_arrays.flags.writeable=False hashes=[hash(item.data) for item in list_of_numpy_arrays] return hashes def check_within_dataset(hashes, hashes_set, name): hashes_len = len(hashes) set_len = len(hashes_set) diff = hashes_len-set_len print("within {0} dataset, there are {1} items, but only {2} unique items, which works out to {3} items that are repeats".format(name, hashes_len, set_len, diff)) return diff def check_intersections(set1, set2, name1, name2): intersections=set1.intersection(set2) print("between {0} and {1} there are {2} unique items that are in both at least once".format(name1,name2,len(intersections))) return intersections train_hashes = generate_hashes(train_dataset) valid_hashes = generate_hashes(valid_dataset) test_hashes = generate_hashes(test_dataset) # train_hashes = [1, 2, 3] # valid_hashes = [1, 2, 3] # test_hashes = [1, 2, 3, 3] train_hashes_set=set(train_hashes) train_repeats = check_within_dataset(train_hashes,train_hashes_set, "train") valid_hashes_set = set(valid_hashes) valid_repeats = check_within_dataset(valid_hashes,valid_hashes_set, "valid") test_hashes_set=set(test_hashes) test_repeats = check_within_dataset(test_hashes,test_hashes_set, "test") repeats_within_datasets = train_repeats + valid_repeats + test_repeats print("Total repeats within datasets: {}".format(repeats_within_datasets)) train_val_intersections = check_intersections(train_hashes_set, valid_hashes_set, "train", "valid") train_test_intersections = check_intersections(train_hashes_set, test_hashes_set, "train", "valid") val_test_intersections = check_intersections(valid_hashes_set, test_hashes_set, "valid", "test") intersected_train_items = train_val_intersections.union(train_test_intersections) intersected_valid_items = train_val_intersections.union(val_test_intersections) intersected_test_items = train_test_intersections.union(val_test_intersections) print("Number of unique items in {} that can be found in other sets: {}".format("train",len(intersected_train_items))) print("Number of unique items in {} that can be found in other sets: {}".format("valid",len(intersected_valid_items))) print("Number of unique items in {} that can be found in other sets: {}".format("test",len(intersected_test_items))) all_hashes = train_hashes+valid_hashes+test_hashes all_hashes_set = set(all_hashes) all_repeats = check_within_dataset(all_hashes, all_hashes_set, "all") repeats_percent = float(all_repeats)/float(len(all_hashes)) * 100 items_only_in_train = train_hashes_set - valid_hashes_set - test_hashes_set items_only_in_valid = valid_hashes_set - train_hashes_set - test_hashes_set items_only_in_test = test_hashes_set - train_hashes_set - valid_hashes_set print("There are {} items that only exist in train".format(len(items_only_in_train))) print("There are {} items that only exist in valid".format(len(items_only_in_valid))) print("There are {} items that only exist in test".format(len(items_only_in_test))) set([1, 2, 3]) - set([2]) - set([3]) print("Total percentage of repeat items in all datasets is about {:.2f} %".format(repeats_percent)) print("Total number of repeat items in all datasets is {}".format(all_repeats)) repeats_due_to_overlap = all_repeats - repeats_within_datasets overlap_percent = float(repeats_due_to_overlap)/float(len(all_hashes)) *100 print("We previously found that repeats within datasets totaled to {}".format(repeats_within_datasets)) print("Repeats due to overlap is therefore {0}. \nOut of {1} total items, that gives an overlap percentage of {2:.2f}%".format(repeats_due_to_overlap, len(all_hashes), overlap_percent))within train dataset, there are 200000 items, but only 187350 unique items, which works out to 12650 items that are repeats within valid dataset, there are 10000 items, but only 9863 unique items, which works out to 137 items that are repeats within test dataset, there are 10000 items, but only 9802 unique items, which works out to 198 items that are repeats Total repeats within datasets: 12985 between train and valid there are 1003 unique items that are in both at least once between train and valid there are 1174 unique items that are in both at least once between valid and test there are 72 unique items that are in both at least once Number of unique items in train that can be found in other sets: 2153 Number of unique items in valid that can be found in other sets: 1051 Number of unique items in test that can be found in other sets: 1222 within all dataset, there are 220000 items, but only 204790 unique items, which works out to 15210 items that are repeats There are 185197 items th[...]---Problem 6---------Let's get an idea of what an off-the-shelf classifier can give you on this data. It's always good to check that there is something to learn, and that it's a problem that is not so trivial that a canned solution solves it.Train a simple model on this data using 50, 100, 1000 and 5000 training samples. Hint: you can use the LogisticRegression model from sklearn.linear_model.Optional question: train an off-the-shelf model on all the data!---#Training time. def reshape_to_sklearn_format(dataset): num_items, nx, ny = dataset.shape return dataset.reshape(num_items, nx*ny) default_settings_classifier = LogisticRegression() num_to_train_with = 500 #num_to_train_with = len(train_dataset) #sample_data_for_training, sample_labels_for_training = train_dataset[:num_to_train_with], train_labels[:num_to_train_with] sample_data_for_training, sample_labels_for_training = get_matching_items_randomly_from_two_lists(train_dataset, train_labels, num_to_train_with) num_to_test_with = 1010 sample_data_for_testing, sample_labels_for_testing = get_matching_items_randomly_from_two_lists(train_dataset, train_labels, num_to_test_with) # Gotta reshape, according to https://stackoverflow.com/questions/34972142/sklearn-logistic-regression-valueerror-found-array-with-dim-3-estimator-expec # Basically we want the 28x28 images flattened out. sample_data_for_training = reshape_to_sklearn_format(sample_data_for_training) sample_data_for_testing = reshape_to_sklearn_format(sample_data_for_testing) default_settings_classifier.fit(sample_data_for_training, sample_labels_for_training) df_score = default_settings_classifier.score(sample_data_for_testing, sample_labels_for_testing) # Settings lifted from # http://scikit-learn.org/stable/auto_examples/linear_model/plot_sparse_logistic_regression_mnist.html # without understanding fancy_settings_classifier = LogisticRegression(C=50. / num_to_train_with, multi_class='multinomial', penalty='l1', solver='saga', tol=0.1) fancy_settings_classifier.fit(sample_data_for_training, sample_labels_for_training) fs_score = fancy_settings_classifier.score(sample_data_for_testing, sample_labels_for_testing) fancy_settings_classifier_l2 = LogisticRegression(C=50. / num_to_train_with, multi_class='multinomial', penalty='l2', solver='saga', tol=0.1) fancy_settings_classifier_l2.fit(sample_data_for_training, sample_labels_for_training) fs_l2_score = fancy_settings_classifier_l2.score(sample_data_for_testing, sample_labels_for_testing) print("Score for classifier with default settings: {}".format(df_score)) print("Score for classifier with fancy settings and l1: {}".format(fs_score)) print("Score for classifier with fancy settings and l2: {}".format(fs_l2_score)) solver_list = ['newton-cg', 'lbfgs', 'sag', 'saga'] for choice in solver_list: print("trying solver {}".format(choice))) classifier = LogisticRegression(solver=choice, penalty='l2') classifier.fit(sample_data_for_training, sample_labels_for_training) classifier.score(sample_data_for_testing, sample_labels_for_testing) print("Score for classifier using solver {0}: {1}".format(choice, df_score))Score for classifier with default settings: 0.774257425743 Score for classifier with fancy settings: 0.70396039604 Score for classifier with fancy settings and l2: 0.805940594059 Score for classifier using solver newton-cg: 0.774257425743 Score for classifier using solver lbfgs: 0.774257425743 Score for classifier using solver sag: 0.774257425743 Score for classifier using solver saga: 0.774257425743Lab 2 - Logistic Regression (LR) with MNISTThis lab corresponds to Module 2 of the "Deep Learning Explained" course. We assume that you have successfully completed Lab 1 (Downloading the MNIST data).In this lab we will build and train a Multiclass Logistic Regression model using the MNIST data. Introduction**Problem**:Optical Character Recognition (OCR) is a hot research area and there is a great demand for automation. The MNIST data is comprised of hand-written digits with little background noise making it a nice dataset to create, experiment and learn deep learning models with reasonably small comptuing resources. **Goal**:Our goal is to train a classifier that will identify the digits in the MNIST dataset. **Approach**:There are 4 stages in this lab: - **Data reading**: We will use the CNTK Text reader. - **Data preprocessing**: Covered in part A (suggested extension section). - **Model creation**: Multiclass Logistic Regression model.- **Train-Test-Predict**: This is the same workflow introduced in the lectures Logistic Regression[Logistic Regression](https://en.wikipedia.org/wiki/Logistic_regression) (LR) is a fundamental machine learning technique that uses a linear weighted combination of features and generates probability-based predictions of different classes. There are two basic forms of LR: **Binary LR** (with a single output that can predict two classes) and **multiclass LR** (with multiple outputs, each of which is used to predict a single class). ![LR-forms](http://www.cntk.ai/jup/cntk103b_TwoFormsOfLR-v3.png) In **Binary Logistic Regression** (see top of figure above), the input features are each scaled by an associated weight and summed together. The sum is passed through a squashing (aka activation) function and generates an output in [0,1]. This output value is then compared with a threshold (such as 0.5) to produce a binary label (0 or 1), predicting 1 of 2 classes. This technique supports only classification problems with two output classes, hence the name binary LR. In the binary LR example shown above, the [sigmoid][] function is used as the squashing function.[sigmoid]: https://en.wikipedia.org/wiki/Sigmoid_function In **Multiclass Linear Regression** (see bottom of figure above), 2 or more output nodes are used, one for each output class to be predicted. Each summation node uses its own set of weights to scale the input features and sum them together. Instead of passing the summed output of the weighted input features through a sigmoid squashing function, the output is often passed through a [softmax][] function (which in addition to squashing, like the sigmoid, the softmax normalizes each nodes' output value using the sum of all unnormalized nodes). (Details in the context of MNIST image to follow)We will use multiclass LR for classifying the MNIST digits (0-9) using 10 output nodes (1 for each of our output classes). In our approach, we will move the softmax function out of the model and into our Loss function used in training (details to follow).[softmax]: https://en.wikipedia.org/wiki/Softmax_function# Import the relevant components from IPython.display import Image from __future__ import print_function # Use a function definition from future version (say 3.x from 2.7 interpreter) import matplotlib.image as mpimg import matplotlib.pyplot as plt import numpy as np import sys import os import cntk as C %matplotlib inlineIn the block below, we check if we are running this notebook in the CNTK internal test machines by looking for environment variables defined there. We then select the right target device (GPU vs CPU) to test this notebook. In other cases, we use CNTK's default policy to use the best available device (GPU, if available, else CPU).# Select the right target device when this notebook is being tested: if 'TEST_DEVICE' in os.environ: if os.environ['TEST_DEVICE'] == 'cpu': C.device.try_set_default_device(C.device.cpu()) else: C.device.try_set_default_device(C.device.gpu(0)) # Test for CNTK version #if not C.__version__ == "2.0": # raise Exception("this lab is designed to work with 2.0. Current Version: " + C.__version__)Initialization# Ensure we always get the same amount of randomness np.random.seed(0) C.cntk_py.set_fixed_random_seed(1) C.cntk_py.force_deterministic_algorithms() # Define the data dimensions input_dim = 256 #784 num_output_classes = 11 #10Data readingThere are different ways one can read data into CNTK. The easiest way is to load the data in memory using NumPy / SciPy / Pandas readers. However, this can be done only for small data sets. Since deep learning requires large amount of data we have chosen in this course to show how to leverage built-in distributed readers that can scale to terrabytes of data with little extra effort. We are using the MNIST data you have downloaded using Lab 1 DataLoader notebook. The dataset has 60,000 training images and 10,000 test images with each image being 28 x 28 pixels. Thus the number of features is equal to 784 (= 28 x 28 pixels), 1 per pixel. The variable `num_output_classes` is set to 10 corresponding to the number of digits (0-9) in the dataset.In Lab 1, the data was downloaded and written to 2 CTF (CNTK Text Format) files, 1 for training, and 1 for testing. Each line of these text files takes the form: |labels 0 0 0 1 0 0 0 0 0 0 |features 0 0 0 0 ... (784 integers each representing a pixel) We are going to use the image pixels corresponding the integer stream named "features". We define a `create_reader` function to read the training and test data using the [CTF deserializer](https://cntk.ai/pythondocs/cntk.io.html?highlight=ctfdeserializercntk.io.CTFDeserializer). The labels are [1-hot encoded](https://en.wikipedia.org/wiki/One-hot). Refer to Lab 1 for data format visualizations.# Read a CTF formatted text (as mentioned above) using the CTF deserializer from a file def create_reader(path, is_training, input_dim, num_label_classes): labelStream = C.io.StreamDef(field='labels', shape=num_label_classes, is_sparse=False) featureStream = C.io.StreamDef(field='features', shape=input_dim, is_sparse=False) deserailizer = C.io.CTFDeserializer(path, C.io.StreamDefs(labels = labelStream, features = featureStream)) return C.io.MinibatchSource(deserailizer, randomize = is_training, max_sweeps = C.io.INFINITELY_REPEAT if is_training else 1) # Ensure the training and test data is generated and available for this lab. # We search in two locations in the toolkit for the cached MNIST data set. data_found = False for data_dir in [os.path.join(".", "PLAID")]: train_file = os.path.join(data_dir, "train_log.txt") test_file = os.path.join(data_dir, "test_log.txt") if os.path.isfile(train_file) and os.path.isfile(test_file): data_found = True break if not data_found: raise ValueError("Please generate the data by completing Lab1_MNIST_DataLoader") print("Data directory is {0}".format(data_dir))Data directory is ./PLAIDModel CreationA multiclass logistic regression (LR) network is a simple building block that has been effectively powering many ML applications in the past decade. The figure below summarizes the model in the context of the MNIST data.![mnist-LR](https://www.cntk.ai/jup/cntk103b_MNIST_LR.png)LR is a simple linear model that takes as input, a vector of numbers describing the properties of what we are classifying (also known as a feature vector, $\bf \vec{x}$, the pixels in the input MNIST digit image) and emits the *evidence* ($z$). For each of the 10 digits, there is a vector of weights corresponding to the input pixels as show in the figure. These 10 weight vectors define the weight matrix ($\bf {W}$) with dimension of 10 x 784. Each feature in the input layer is connected with a summation node by a corresponding weight $w$ (individual weight values from the $\bf{W}$ matrix). Note there are 10 such nodes, 1 corresponding to each digit to be classified. The first step is to compute the evidence for an observation. $$\vec{z} = \textbf{W} \bf \vec{x}^T + \vec{b}$$ where $\bf{W}$ is the weight matrix of dimension 10 x 784 and $\vec{b}$ is known as the *bias* vector with lenght 10, one for each digit. The evidence ($\vec{z}$) is not squashed (hence no activation). Instead the output is normalized using a [softmax](https://en.wikipedia.org/wiki/Softmax_function) function such that all the outputs add up to a value of 1, thus lending a probabilistic iterpretation to the prediction. In CNTK, we use the softmax operation combined with the cross entropy error as our Loss Function for training.#2nd submission, we use 2, 400 respectively #3rd submission, we use 4, 100 respectively #4rd submission, we use 4, 200 respectively num_hidden_layers = 8 hidden_layers_dim = 400Network input and output: - **input** variable (a key CNTK concept): >An **input** variable is a container in which we fill different observations, in this case image pixels, during model learning (a.k.a.training) and model evaluation (a.k.a. testing). Thus, the shape of the `input` must match the shape of the data that will be provided. For example, when data are images each of height 10 pixels and width 5 pixels, the input feature dimension will be 50 (representing the total number of image pixels).**Knowledge Check:** What is the input dimension of your chosen model? This is fundamental to our understanding of variables in a network or model representation in CNTK.input = C.input_variable(input_dim) label = C.input_variable(num_output_classes)Logistic Regression network setupThe CNTK Layers module provides a Dense function that creates a fully connected layer which performs the above operations of weighted input summing and bias addition.def create_model(features): with C.layers.default_options(init = C.layers.glorot_uniform(), activation = C.ops.relu): h = features for _ in range(num_hidden_layers): h = C.layers.Dense(hidden_layers_dim)(h) r = C.layers.Dense(num_output_classes, activation = None)(h) return r`z` will be used to represent the output of a network.# Scale the input to 0-1 range by dividing each pixel by 255. #input_s = input/255 z = create_model(input)TrainingBelow, we define the **Loss** function, which is used to guide weight changes during training. As explained in the lectures, we use the `softmax` function to map the accumulated evidences or activations to a probability distribution over the classes (Details of the [softmax function][] and other [activation][] functions).[softmax function]: http://cntk.ai/pythondocs/cntk.ops.htmlcntk.ops.softmax[activation]: https://github.com/Microsoft/CNTK/wiki/Activation-FunctionsWe minimize the cross-entropy between the label and predicted probability by the network.loss = C.cross_entropy_with_softmax(z, label)EvaluationBelow, we define the **Evaluation** (or metric) function that is used to report a measurement of how well our model is performing.For this problem, we choose the **classification_error()** function as our metric, which returns the average error over the associated samples (treating a match as "1", where the model's prediction matches the "ground truth" label, and a non-match as "0").label_error = C.classification_error(z, label)Configure trainingThe trainer strives to reduce the `loss` function by different optimization approaches, [Stochastic Gradient Descent][] (`sgd`) being one of the most popular. Typically, one would start with random initialization of the model parameters. The `sgd` optimizer would calculate the `loss` or error between the predicted label against the corresponding ground-truth label and using [gradient-decent][] generate a new set model parameters in a single iteration. The aforementioned model parameter update using a single observation at a time is attractive since it does not require the entire data set (all observation) to be loaded in memory and also requires gradient computation over fewer datapoints, thus allowing for training on large data sets. However, the updates generated using a single observation sample at a time can vary wildly between iterations. An intermediate ground is to load a small set of observations and use an average of the `loss` or error from that set to update the model parameters. This subset is called a *minibatch*.With minibatches, we sample observations from the larger training dataset. We repeat the process of model parameters update using different combination of training samples and over a period of time minimize the `loss` (and the error metric). When the incremental error rates are no longer changing significantly or after a preset number of maximum minibatches to train, we claim that our model is trained.One of the key optimization parameters is called the `learning_rate`. For now, we can think of it as a scaling factor that modulates how much we change the parameters in any iteration.With this information, we are ready to create our trainer. [optimization]: https://en.wikipedia.org/wiki/Category:Convex_optimization[Stochastic Gradient Descent]: https://en.wikipedia.org/wiki/Stochastic_gradient_descent[gradient-decent]: http://www.statisticsviews.com/details/feature/5722691/Getting-to-the-Bottom-of-Regression-with-Gradient-Descent.html# Instantiate the trainer object to drive the model training learning_rate = 0.2 lr_schedule = C.learning_rate_schedule(learning_rate, C.UnitType.minibatch) learner = C.sgd(z.parameters, lr_schedule) trainer = C.Trainer(z, (loss, label_error), [learner])First let us create some helper functions that will be needed to visualize different functions associated with training.# Define a utility function to compute the moving average sum. # A more efficient implementation is possible with np.cumsum() function def moving_average(a, w=5): if len(a) < w: return a[:] # Need to send a copy of the array return [val if idx < w else sum(a[(idx-w):idx])/w for idx, val in enumerate(a)] # Defines a utility that prints the training progress def print_training_progress(trainer, mb, frequency, verbose=1): training_loss = "NA" eval_error = "NA" if mb%frequency == 0: training_loss = trainer.previous_minibatch_loss_average eval_error = trainer.previous_minibatch_evaluation_average if verbose: print ("Minibatch: {0}, Loss: {1:.4f}, Error: {2:.2f}%".format(mb, training_loss, eval_error*100)) return mb, training_loss, eval_errorRun the trainerWe are now ready to train our fully connected neural net. We want to decide what data we need to feed into the training engine.In this example, each iteration of the optimizer will work on `minibatch_size` sized samples. We would like to train on all 60000 observations. Additionally we will make multiple passes through the data specified by the variable `num_sweeps_to_train_with`. With these parameters we can proceed with training our simple feed forward network.# Initialize the parameters for the trainer minibatch_size = 64 num_samples_per_sweep = 60000 num_sweeps_to_train_with = 10 num_minibatches_to_train = (num_samples_per_sweep * num_sweeps_to_train_with) / minibatch_size # Create the reader to training data set reader_train = create_reader(train_file, True, input_dim, num_output_classes) # Map the data streams to the input and labels. input_map = { label : reader_train.streams.labels, input : reader_train.streams.features } # Run the trainer on and perform model training training_progress_output_freq = 500 plotdata = {"batchsize":[], "loss":[], "error":[]} for i in range(0, int(num_minibatches_to_train)): # Read a mini batch from the training data file data = reader_train.next_minibatch(minibatch_size, input_map = input_map) trainer.train_minibatch(data) batchsize, loss, error = print_training_progress(trainer, i, training_progress_output_freq, verbose=1) if not (loss == "NA" or error =="NA"): plotdata["batchsize"].append(batchsize) plotdata["loss"].append(loss) plotdata["error"].append(error)Minibatch: 0, Loss: 2.4005, Error: 98.44% Minibatch: 500, Loss: 1.8469, Error: 67.19% Minibatch: 1000, Loss: 1.1192, Error: 39.06% Minibatch: 1500, Loss: 0.8105, Error: 29.69% Minibatch: 2000, Loss: 0.8246, Error: 31.25% Minibatch: 2500, Loss: 0.5015, Error: 18.75% Minibatch: 3000, Loss: 0.5727, Error: 25.00% Minibatch: 3500, Loss: 0.4437, Error: 17.19% Minibatch: 4000, Loss: 0.0690, Error: 1.56% Minibatch: 4500, Loss: 0.3484, Error: 10.94% Minibatch: 5000, Loss: 0.1216, Error: 1.56% Minibatch: 5500, Loss: 0.0283, Error: 1.56% Minibatch: 6000, Loss: 0.1454, Error: 4.69% Minibatch: 6500, Loss: 0.0140, Error: 0.00% Minibatch: 7000, Loss: 0.0305, Error: 1.56% Minibatch: 7500, Loss: 0.0108, Error: 0.00% Minibatch: 8000, Loss: 0.0221, Error: 1.56% Minibatch: 8500, Loss: 0.0171, Error: 0.00% Minibatch: 9000, Loss: 0.0160, Error: 1.56%Let us plot the errors over the different training minibatches. Note that as we progress in our training, the loss decreases though we do see some intermediate bumps.# Compute the moving average loss to smooth out the noise in SGD plotdata["avgloss"] = moving_average(plotdata["loss"]) plotdata["avgerror"] = moving_average(plotdata["error"]) # Plot the training loss and the training error import matplotlib.pyplot as plt plt.figure(1) plt.subplot(211) plt.plot(plotdata["batchsize"], plotdata["avgloss"], 'b--') plt.xlabel('Minibatch number') plt.ylabel('Loss') plt.title('Minibatch run vs. Training loss') plt.show() plt.subplot(212) plt.plot(plotdata["batchsize"], plotdata["avgerror"], 'r--') plt.xlabel('Minibatch number') plt.ylabel('Label Prediction Error') plt.title('Minibatch run vs. Label Prediction Error') plt.show()Evaluation / Testing Now that we have trained the network, let us evaluate the trained network on the test data. This is done using `trainer.test_minibatch`.out = C.softmax(z) # Read the data for evaluation reader_eval = create_reader(test_file, False, input_dim, num_output_classes) eval_minibatch_size = 1 eval_input_map = {input: reader_eval.streams.features} num_samples = 659 num_minibatches_to_test = num_samples // eval_minibatch_size test_result = 0.0 results=[] for i in range(num_minibatches_to_test): data = reader_eval.next_minibatch(eval_minibatch_size, input_map = eval_input_map) #img_label = data[label].asarray() img_data = data[input].asarray() predicted_label_prob = [out.eval(img_data[i]) for i in range(len(img_data))] pred = [np.argmax(predicted_label_prob[i]) for i in range(len(predicted_label_prob))] #print(predicted_label_prob) results.extend(pred ) #print(results) np.savetxt(str(num_hidden_layers)+"x"+str(hidden_layers_dim)+"_log.csv", np.array(results).astype(int), fmt='%i', delimiter=",")Mars Weather Twitter Section Below#now to process the Mars weather: weather_url = 'https://twitter.com/marswxreport?lang=en' browser.visit(weather_url) html_weather = browser.html soup_weather = BeautifulSoup(html_weather, 'html.parser') weather_p = soup_weather.find_all('p', class_="TweetTextSize TweetTextSize--normal js-tweet-text tweet-text") mars_weather = weather_p[0].text print(mars_weather)Sol 2224 (2018-11-08), high 0C/32F, low -72C/-97F, pressure at 8.65 hPa, daylight 06:19-18:36Mars facts section below (get the table)facts_url = 'http://space-facts.com/mars' mars_table = pd.read_html(facts_url, flavor = 'html5lib') mars_table mars_df = mars_table[0] mars_df.columns = ['Description', 'Value'] mars_df mars_html = mars_df.to_html(index=False) mars_htmlMars Hemispheres section belowhemisphere_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' browser.visit(hemisphere_url) html_hemis = browser.html soup_hemis = BeautifulSoup(html_hemis, 'html.parser') hemis_links = soup_hemis.find_all('a', class_="itemLink product-item") print(hemis_links) #hemisphere = {} #test_output = browser.find_by_css('a.product-item')[1].click() #testagain = browser.find_link_by_text('Sample').first #print(testagain) #hemisphere['img_url'] = testagain['href'] #hemisphere #hemisphere['title'] = browser.find_by_css('h2.title').text #hemisphere #test_output2 = browser.find_by_css('a.itemLink')[3].click() links = browser.find_by_css("a.product-item") number = len(links) number hemisphere_image_urls = [] for i in range (number): hemisphere = {} i = i + 1 print(i) try: browser.find_by_css('a.product-item')[i].click() except: continue hemi_href = browser.find_link_by_text('Sample').first hemisphere['img_url'] = hemi_href['href'] hemisphere['title'] = browser.find_by_css('h2.title').text hemisphere_image_urls.append(hemisphere) print(i) browser.back() hemisphere_image_urlsSubpixel waterbodies * [**Sign up to the DEA Sandbox**](https://docs.dea.ga.gov.au/setup/sandbox.html) to run this notebook interactively from a browser* **Compatibility:** Notebook currently compatible with both the `NCI` and `DEA Sandbox` environments* **Products used:**[wofs_albers](https://explorer.sandbox.dea.ga.gov.au/wofs_albers),[DEA Waterbodies](https://www.ga.gov.au/dea/products/dea-waterbodies)* **Special requirements:** Download the [DEA Waterbodies shapefile](https://ecat.ga.gov.au/geonetwork/srv/eng/catalog.search/metadata/132814). DescriptionDEA Waterbodies uses WOfS, a classification of Landsat pixels into wet and dry, to identify and track waterbodies throughout the Australian continent. One limitation is the size of the Landsat pixels, 25 m x 25 m, which can be quite large compared to the size of some waterbodies. Can we identify the maximum extent of a waterbody better than a whole pixel approach, as in e.g. Bishop-Taylor et al. (2019; https://doi.org/10.3390/rs11242984) or Sall et al. (2020; https://doi.org/10.1002/rse2.172)? How would this affect our time series?This notebook takes an existing DEA Waterbodies polygon, refines it using subpixel extents, and recalculates the time series using the subpixel extents for comparison.*** Getting startedSpecify the geohash of the polygon you want to refine in the "Configuration" section, and run all cells to do the analysis. Load packages%matplotlib inline import sys import datacube import matplotlib.pyplot as plt import numpy as np import pandas as pd import xarray as xr import geopandas as gpd import shapely from tqdm.notebook import tqdm from affine import Affine from odc.ui import with_ui_cbk sys.path.append("../Scripts") from dea_spatialtools import xr_vectorize, subpixel_contoursConfigurationSpecify the geohash of the waterbody to evaluate:geohash = "r3dn23tun" # Point Hut PondConnect to the datacubeConnect to the datacube so we can access DEA data.The `app` parameter is a unique name for the analysis which is based on the notebook file name.dc = datacube.Datacube(app="Subpixel-waterbodies")Load the DEA Waterbodies polygonsThese are hosted [here](https://ecat.ga.gov.au/geonetwork/srv/eng/catalog.search/metadata/132814). Download them and unzip into the same directory as this notebook.dea_wbs = gpd.read_file("DigitalEarthAustraliaWaterbodies.shp")Then we can load the polygon associated with the waterbody we selected.wb = dea_wbs.set_index("UID").loc[geohash] wb = gpd.GeoDataFrame([wb], crs=dea_wbs.crs)Load the WOfS summaryThe WOfS summary is used to define the waterbody maximum extent.wofs = dc.load( "wofs_summary", geopolygon=datacube.utils.geometry.Geometry( wb.geometry.iloc[0].buffer(100), crs=dea_wbs.crs ), ).isel(time=0) fig, ax = plt.subplots(1, 1) wofs.frequency.plot(ax=ax)Reproduce DEA WaterbodiesFind the 5% and 10% polygons, discard any under 5 px, and then do a spatial join.pc_10 = xr_vectorize(wofs.frequency >= 0.10, crs=wofs.crs) pc_05 = xr_vectorize(wofs.frequency >= 0.05, crs=wofs.crs) pc_10 = pc_10[pc_10.attribute == 1] pc_05 = pc_05[pc_05.attribute == 1] # Discard polygons with under 5 px. pc_05 = pc_05[pc_05.area >= 25 ** 2] pc_10 = pc_10[pc_10.area >= 25 ** 2] # Then join. joined = gpd.sjoin(pc_05, pc_10, lsuffix="05", rsuffix="10", how="right") # Allow a 5% polygon as long as it intersects with a 10% polygon. ok_05 = set(joined.index_05) pc_05 = pc_05.loc[ok_05] wb_pixels = pc_05.reset_index(drop=True) fig, axs = plt.subplots(1, 2) wb_pixels.plot(ax=axs[1], edgecolor="k", facecolor="None") wb.plot(ax=axs[0], edgecolor="k", facecolor="None") axs[0].set_title("DEA Waterbodies") axs[1].set_title("This notebook") plt.tight_layout()Subpixel extentUsing `subpixel_contours`, find the subpixel extents of the waterbody. We can set the contour levels to 5% and 10% as above, but we can also change them to other values. This modifies the eventual error in the boundary quite dramatically for steep-edged lakes.contours = subpixel_contours(wofs.frequency, z_values=[0.05, 0.10]).set_index( "z_value", drop=True ) pc_05 = gpd.GeoDataFrame( geometry=[shapely.geometry.Polygon(ls) for ls in contours.iloc[0].geometry] ) pc_10 = gpd.GeoDataFrame( geometry=[shapely.geometry.Polygon(ls) for ls in contours.iloc[1].geometry] ) # Determine whether each polygon is an "inner" polygon or not. pc_10_agg = pc_10.geometry[0] for g in pc_10.geometry[1:]: pc_10_agg = pc_10_agg.symmetric_difference(g) pc_05_agg = pc_05.geometry[0] for g in pc_05.geometry[1:]: pc_05_agg = pc_05_agg.symmetric_difference(g) pc_05 = gpd.GeoDataFrame(geometry=list(pc_05_agg)) pc_10 = gpd.GeoDataFrame(geometry=list(pc_10_agg)) # Discard anything under 5 px. pc_05 = pc_05[pc_05.area >= 25 ** 2] pc_10 = pc_10[pc_10.area >= 25 ** 2] # Spatial join to find 5% extents for 10% polygons. joined = gpd.sjoin(pc_05, pc_10, lsuffix="05", rsuffix="10", how="right") pc_05 = pc_05.loc[joined.index_05] wb_subpixel = pc_05.set_crs("EPSG:3577")Operating in multiple z-value, single array modeCombine both sets with a spatial join.wb_joined = gpd.sjoin(wb_pixels, wb_subpixel, how="left") # Add the geometry back in. wb_joined = wb_joined.join(wb_pixels, rsuffix="_pixel").join( wb_subpixel, on="index_right", rsuffix="_subpixel" )Finally, filter to only include the polygon that maximally overlaps with the DEA Waterbodies polygon.biggest_overlap = np.argmax( [wb.geometry[0].intersection(g).area for g in wb_joined.geometry_pixel] ) wb_joined = gpd.GeoDataFrame([wb_joined.iloc[biggest_overlap]])Shape comparison Let's look at the shape of the waterbody.fig, ax = plt.subplots(figsize=(10, 10)) gpd.GeoDataFrame(geometry=wb_joined.geometry_pixel).plot( ax=ax, edgecolor="blue", facecolor="None" ) gpd.GeoDataFrame(geometry=wb_joined.geometry_subpixel).plot( ax=ax, edgecolor="red", facecolor="None" )Exporting the subpixel polygonWe can export the subpixel polygon as a GeoJSON file for later review, e.g. for loading into QGIS.gpd.GeoDataFrame(geometry=wb_joined.geometry_subpixel, crs='EPSG:3577').to_file(f'{geohash}_subpixel.geojson', driver='GeoJSON')Extracting time seriesNow we will generate a surface area time series using both extents. Load WOfSLoad WOfS for all available times from 2015 to 2020.wofs_daily = dc.load( "wofs_albers", geopolygon=datacube.utils.geometry.Geometry( wb.geometry.iloc[0].buffer(100), crs=dea_wbs.crs ), progress_cbk=with_ui_cbk(), time=("2015-01", "2020-01"), )Rasterise the waterbodiesRasterise the pixel and subpixel waterbodies. To rasterise the subpixel waterbodies while maintaining their subpixel nature, we'll use a [prototype of a partial-pixel rasterising algorithm](https://gist.github.com/perrygeo/721040f8545272832a42file-rasterize-py) that was suggested to be added to rasterio, but wasn't ever added.def _rasterize_geom(geom, shape, affinetrans, all_touched): from rasterio import features indata = [(geom, 1)] rv_array = features.rasterize( indata, out_shape=shape, transform=affinetrans, fill=0, all_touched=all_touched ) return rv_array def rasterize_pctcover(geom, atrans, shape): import rasterio import fiona import numpy as np from shapely.geometry import box alltouched = _rasterize_geom(geom, shape, atrans, all_touched=True) exterior = _rasterize_geom(geom.exterior, shape, atrans, all_touched=True) # Create percent cover grid as the difference between them # at this point all cells are known 100% coverage, # we'll update this array for exterior points pctcover = 100 * (alltouched - exterior) # loop through indicies of all exterior cells for r, c in zip(*np.where(exterior == 1)): # Find cell bounds, from rasterio DatasetReader.window_bounds window = ((r, r + 1), (c, c + 1)) ((row_min, row_max), (col_min, col_max)) = window x_min, y_min = atrans * (col_min, row_max) x_max, y_max = atrans * (col_max, row_min) bounds = (x_min, y_min, x_max, y_max) # Construct shapely geometry of cell cell = box(*bounds) # Intersect with original shape cell_overlap = cell.intersection(geom) # update pctcover with percentage based on area proportion coverage = cell_overlap.area / cell.area pctcover[r, c] = int(coverage * 100) return pctcover raster_pixel = ( rasterize_pctcover( wb_joined.iloc[0].geometry_pixel, wofs.geobox.transform, wofs.frequency.shape ) / 100 ) raster_subpixel = ( rasterize_pctcover( wb_joined.iloc[0].geometry_subpixel, wofs.geobox.transform, wofs.frequency.shape ) / 100 )Count wet pixelsFor each WOfS observation, multiply the wet pixel mask by the raster mask and sum the result to obtain the wet surface area.px_wet_pixel = ((wofs_daily == 128) * raster_pixel).sum(axis=(1, 2)).water px_wet_subpixel = ((wofs_daily == 128) * raster_subpixel).sum(axis=(1, 2)).waterInvalid days are those with >10% missing observations.missing = (wofs_daily != 128) & (wofs_daily != 0) px_missing_pixel = (missing * raster_pixel).sum(axis=(1, 2)).water px_missing_subpixel = (missing * raster_subpixel).sum(axis=(1, 2)).water invalid_pixel = px_missing_pixel > 0.1 * raster_pixel.sum() invalid_subpixel = px_missing_subpixel > 0.1 * raster_subpixel.sum()Plot the time seriestime_series = pd.DataFrame( { "pixel": px_wet_pixel, "subpixel": px_wet_subpixel, "invalid_pixel": invalid_pixel, "invalid_subpixel": invalid_subpixel, }, index=pd.DatetimeIndex(wofs_daily.time.values), ) time_series.pixel[~time_series.invalid_pixel].plot(legend=True) time_series.subpixel[~time_series.invalid_subpixel].plot(legend=True) (time_series.pixel - time_series.subpixel)[ ~(time_series.invalid_pixel | time_series.invalid_subpixel) ].plot(label="residual", legend=True)Of course, there is not much difference in the time series for any waterbody of more than a few pixels. *** Additional information**License:** The code in this notebook is licensed under the [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0). Digital Earth Australia data is licensed under the [Creative Commons by Attribution 4.0](https://creativecommons.org/licenses/by/4.0/) license.**Contact:** If you need assistance, please post a question on the [Open Data Cube Slack channel](http://slack.opendatacube.org/) or on the [GIS Stack Exchange](https://gis.stackexchange.com/questions/ask?tags=open-data-cube) using the `open-data-cube` tag (you can view previously asked questions [here](https://gis.stackexchange.com/questions/tagged/open-data-cube)).If you would like to report an issue with this notebook, you can file one on [Github](https://github.com/GeoscienceAustralia/dea-notebooks).**Last modified:** October 2020**Compatible datacube version:**print(datacube.__version__)1.8.3TagsBrowse all available tags on the DEA User Guide's [Tags Index](https://docs.dea.ga.gov.au/genindex.html)**Tags**: :index:`NCI compatible`, :index:`sandbox compatible`, :index:`time series`, :index:`water`, :index:`waterbodies`os.listdir() os.listdir() 方法用于返回指定的文件夹包含的文件或文件夹的名字的列表。这个列表以字母顺序。 它不包括 '.' 和'..' 即使它在文件夹中。只支持在 Unix, Windows 下使用。!pwd !ls import os print(os.listdir("."))['.ipynb_checkpoints', '01-Introspection.ipynb', '02-sys module.ipynb', '03-Dates and Time Module.ipynb', '04-logging.ipynb', '05-configparser.ipynb', '06-email.ipynb', '07-collections.ipynb', '08-hashlib.ipynb', '09-Random Module.ipynb', '10-Math Module.ipynb', '11-re-module.ipynb', '12-os-module.ipynb', 'new_snake.log', 'otherMod2.py', 'settings.ini', 'test.log', '__pycache__']from google.colab import drive drive.mount('./gdrive') !pip install kaggle pip install dash==1.18.1 !pip install chart_studio from google.colab import files files.upload() ls -1ha kaggle.json !mkdir -p ~/.kaggle !cp kaggle.json ~/.kaggle/ !chmod 600 ~/.kaggle/kaggle.json새 섹션!kaggle datasets download -d kimjihoo/coronavirusdataset !ls !unzip coronavirusdataset.zip import io import pandas as pd import numpy as np import matplotlib import matplotlib.pyplot as plt import seaborn as sns import missingno as msno import statsmodels.api as sm import statsmodels.formula.api as smf import math import chart_studio import plotly.graph_objects as go import plotly.express as px import dash import dash_core_components as doc import dash_html_components as html from sklearn import datasets from sklearn import svm from sklearn import tree from sklearn.linear_model import LinearRegression from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import MinMaxScaler from plotly.offline import iplot, init_notebook_mode Case=pd.read_csv("Case.csv") Patient=pd.read_csv("PatientInfo.csv") Policy=pd.read_csv("Policy.csv") SearchTrend=pd.read_csv("SearchTrend.csv") SeoulFloating=pd.read_csv("SeoulFloating.csv") Time=pd.read_csv("Time.csv") TimeAge=pd.read_csv("TimeAge.csv") TimeGender=pd.read_csv("TimeGender.csv") TimeProvince=pd.read_csv("TimeProvince.csv") TimeProvince=pd.read_csv("Weather.csv") Time.info() Time['date']=pd.to_datetime(Time['date']) Time.info() msno.bar(Time) Time.describe(include='all') timegraph=pd.DataFrame(dict(time=Time['date'], value=Time['confirmed'])) g=sns.relplot(x="time", y="value", kind="line", data=timegraph) g.fig.autofmt_xdate() #B를 상수로 두고 풀어보자. index=list(range(0,len(Time['date']))) N=51640000 first=1 confirmed=Time['confirmed'] i_value=[] for i in index: i_value.append(confirmed[i]/N) i_value=np.array(i_value) i_initial=i_value[0] ln_i_value=[] for i in index: ln_i_value.append(math.log(i_value[i]/(1-i_value[i]))) ln_first_value=ln_i_value[0] Beta=[] Beta.append(0.0) for i in range(1,len(i_value)): Beta.append((ln_i_value[i]-ln_first_value)/index[i]) print(Beta) len(Beta) Beta=np.array(Beta) #1,2,3,4번까지 기울기는 0, 그 이후로 부터 기울기 증가. #plt.scatter(index,confirmed,label="real") #Beta의 min,max 찾아내기=> index 알아내기=> 라벨 붙여보기 plt.plot(index, Beta, c="r", label='reg') plt.legend() plt.xlabel("index") plt.ylabel("B") plt.rcParams["figure.figsize"] = (4, 4) fig2=go.Figure(data=go.Bar(name='confirmed',x=Time['date'], y=Time['confirmed'])) fig2.update_layout(title_text='confirmed') fig2.show() app=dash.Dash() app.layout= html.Div([doc.Graph(figure=fig2)]) app.run_server(debug=True) fig=go.Figure(data=go.Bar(x=Time['date'],y=Beta)) fig.update_layout(title_text='Beta') fig.show() #01-27 "주의=> 경계"격상 #01-31 지역사회 전파 확산 차단 계획 #02-04 후베이성 방문 모든 외국인의 입국 제한 #02-05 보건용 마스크 및 손소독제 매점매석 행위 금지 및 단속 시행 #02-19 대구 집단 감염 발생 #02-20 지역사회 전파 대응 지침 개정 시행 #02-23 감염병 위기 경보 '심각'단계로 격상 #02-24 유,초,중,고 신학기 개학 연기 #02-26 1차 국민안심병원 지정/ 마스크 수출 제한 조치 및 공적 판매처 출고 의무화 #02-27 어린이집 휴원(~3.8) #03-01 코로나 19지역 확산 대응 치료체계 재구축 방안=> 확진환자 중증도 분류하여 시행 #03-04 자동차 이동형 선별 진료소 운영지침 마련 #03-15 대구, 경북, 경산, 청도 , 봉화 특별재난지역 선포 #03-22 15일간 강도 높은 사회적 거리두기 시행 #03-29 전세계 모든 나라 입국자 2주간 자가격리 실시 #04-06 코로나 19' 강화된 사회적 거리두기 지속' (~4.19) #07-01 전자출입명부 고위험 시설 의무 적용 new_val=[] for i in index: new_val.append(math.exp(Beta[i]*index[i])) i_formula=[] for i in index: i_formula.append(i_initial*new_val[i]/((1-i_initial)+(i_initial*new_val[i]))) #print(i_formula) i_formula=np.array(i_formula) predict_confirm=(i_formula*N) #print(predict_confirm) # Beta가 고정된 값이 아닐 경우 plt.scatter(index,confirmed,label="real") plt.plot(index, predict_confirm, c="r", label='reg') plt.legend() plt.xlabel("index") plt.ylabel("confirmed") #axes = plt.axes() #axes.set_xlim([0,100]) #axes.set_ylim(0,10000) plt.show() #Beta를 고정된 값이 아닐 때 i값 plt.plot(index, i_value, c="r", label='reg') mean_Beta=np.mean(Beta) print(mean_Beta-0.06) new_val=[] for i in index: new_val.append(math.exp((mean_Beta-0.06)*index[i])) i_formula=[] for i in index: i_formula.append(i_initial*new_val[i]/((1-i_initial)+(i_initial*new_val[i]))) #print(i_formula) i_formula=np.array(i_formula) predict_confirm=(i_formula*N) #print(predict_confirm) # Beta가 mean_beta로 고정 plt.scatter(index,confirmed,label="real") plt.plot(index, predict_confirm, c="r", label='reg') plt.legend() plt.xlabel("index") plt.ylabel("confirmed") #axes = plt.axes() #axes.set_xlim([0,100]) #axes.set_ylim(0,10000) plt.show() #mean_Beta는 0.116임 new_val=[] for i in index: new_val.append(math.exp((0.15)*index[i])) i_formula=[] for i in index: i_formula.append(i_initial*new_val[i]/((1-i_initial)+(i_initial*new_val[i]))) #print(i_formula) i_formula=np.array(i_formula) predict_confirm=(i_formula*N) #print(predict_confirm) # Beta가 mean_beta로 고정 #plt.scatter(index,confirmed,label="real") plt.plot(index, predict_confirm, c="r", label='reg') plt.legend() plt.xlabel("index") plt.ylabel("confirmed") #axes = plt.axes() #axes.set_xlim([0,100]) #axes.set_ylim(0,10000) plt.show() #로지스틱모형이 나오려면 Beta값이 최소 0.15인것으로 추정 new_val=[] for i in index: new_val.append(math.exp((0.6)*index[i])) i_formula=[] for i in index: i_formula.append(i_initial*new_val[i]/((1-i_initial)+(i_initial*new_val[i]))) #print(i_formula) i_formula=np.array(i_formula) predict_confirm=(i_formula*N) #print(predict_confirm) # Beta가 mean_beta로 고정 #plt.scatter(index,confirmed,label="real") plt.plot(index, predict_confirm, c="r", label='reg') plt.legend() plt.xlabel("index") plt.ylabel("confirmed") #axes = plt.axes() #axes.set_xlim([0,100]) #axes.set_ylim(0,10000) plt.show() #Beta가 클수록 초기에 기울기가 확 올라감 #Beta가 0.05정도이면 실제 값과 비슷하나 로지스틱모형과는 차별점이 있음 #Beta가 최소 0.15정도는 되야 로지스틱 모형이 나오나, 예측된 confirmed와 실제 confirmed사이에 큰 괴리가 있음 #Beta값이 고정되지 않았을 때가 실제 예측값과 가장 비슷하나 그렇다면 상수라고 가정한 Beta의 정의에서 어긋남. (1-i_initial)/i_initialimport calendar c = calendar.TextCalendar(calendar.MONDAY) c.prmonth(2020, 9) import calendar import pprint pprint.pprint(calendar.monthcalendar(2020, 9)) import calendar # Show every month for month in range(1, 13): # Compute the dates for each week that overlaps the month c = calendar.monthcalendar(2020, month) first_week = c[0] second_week = c[1] third_week = c[2] # If there is a Thursday in the first week, the second Thursday (jeudi) # is in the second week. Otherwise the second Thursday must # be in the third week. if first_week[calendar.THURSDAY]: meeting_date = second_week[calendar.THURSDAY] else: meeting_date = third_week[calendar.THURSDAY] print('%3s: %2s' % (month, meeting_date)) import calendar import pprint # Calling yeardays2calendar(2007, 2) returns data for 2007, organized with 2 months per row. pprint.pprint(calendar.Calendar(calendar.MONDAY).yeardays2calendar(2020, 1)) # Python code to print Calendar # Without use of Calendar module mm = 9 yy = 2020 month ={1:'January', 2:'February', 3:'March', 4:'April', 5:'May', 6:'June', 7:'July', 8:'August', 9:'September', 10:'October', 11:'November', 12:'December'} # code below for calculation of odd days day =(yy-1)% 400 day = (day//100)*5 + ((day % 100) - (day % 100)//4) + ((day % 100)//4)*2 day = day % 7 nly =[31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] ly =[31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] s = 0 if yy % 4 == 0: for i in range(mm-1): s+= ly[i] else: for i in range(mm-1): s+= nly[i] day += s % 7 day = day % 7 # variable used for white space filling # where date not present space ='' space = space.rjust(2, ' ') # code below is to print the calendar print(month[mm], yy) print('Su', 'Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa') if mm == 9 or mm == 4 or mm == 6 or mm == 11: for i in range(31 + day): if i<= day: print(space, end =' ') else: print("{:02d}".format(i-day), end =' ') if (i + 1)% 7 == 0: print() elif mm == 2: if yy % 4 == 0: p = 30 else: p = 29 for i in range(p + day): if i<= day: print(space, end =' ') else: print("{:02d}".format(i-day), end =' ') if (i + 1)% 7 == 0: print() else: for i in range(32 + day): if i<= day: print(space, end =' ') else: print("{:02d}".format(i-day), end =' ') if (i + 1)% 7 == 0: print() # Python program to print calendar for given year # importing calendar library import calendar def printcalendar(year): # printing calendar print(calendar.calendar(year)) # driver program to test above function year = 2020 printcalendar(year) year = 2021 printcalendar(year) # Python program to demonstrate working # of itermonthdays() method # importing calendar module import calendar year = 2020 mnth = 9 obj = calendar.Calendar() # iteratign with itermonthdays for day in obj.itermonthdays(year, mnth): print(day) # Python program to demonstrate working # of monthdayscalendar() method # importing calendar module import calendar obj = calendar.Calendar() year = 2020 mnth = 9 # priting with monthdayscalendar print(obj.monthdayscalendar(year, mnth)) # Python program to demonstrate working # of yeardayscalendar() method # importing calendar module import calendar import pprint obj = calendar.Calendar() year = 2020 # default value of width is 3 # priting with yeardayscalendar pprint.pprint(obj.yeardayscalendar(year))setfirstweekday() définit le jour de la semaine (0 est lundi, 6 est dimanche) pour démarrer chaque semaine. Les valeurs LUNDI, MARDI, MERCREDI, JEUDI, VENDREDI, SAMEDI et DIMANCHE sont fournies pour des raisons de commodité.# Python program to demonstrate working of # monthdays2calendar() method # importing calendar module import calendar obj = calendar.Calendar() # iteratign with monthdays2calendar for day in obj.monthdays2calendar(2020, 9): print(day) # Python code to demonstrate the working of # setfirstweekday() with prmonth() method # importing calendar module for calendar operations import calendar # using prmonth() to print calendar of 1997 print ("The 4th month of 1997 is : ") calendar.prmonth(2020, 9, 2, 1) # using setfirstweekday() to set first week day number calendar.setfirstweekday(1) print ("\r") # using firstweekday() to check the changed day print ("The new week day number is : ", end ="") print (calendar.firstweekday()) documents = { 1: "a donut on a glass plate", 2: "only the donut", 3: "listen to the drum machine", } index = { "a": [1], "donut": [1, 2], "on": [1], "glass": [1], "plate": [1], "only": [2], "the": [2, 3], "listen": [3], "to": [3], "drum": [3], "machine": [3], } %timeit print(index)https://openclassrooms.com/fr/courses/235344-apprenez-a-programmer-en-python/234239-faites-de-la-programmation-systeme!pip install num2wordsCollecting num2words [?25l Downloading https://files.pythonhosted.org/packages/eb/a2/ea800689730732e27711c41beed4b2a129b34974435bdc450377ec407738/num2words-0.5.10-py3-none-any.whl (101kB)  |███▎ | 10kB 15.9MB/s eta 0:00:01  |██████▌ | 20kB 6.4MB/s eta 0:00:01  |█████████▊ | 30kB 5.5MB/s eta 0:00:01  |█████████████ | 40kB 6.1MB/s eta 0:00:01  |████████████████▏ | 51kB 6.2MB/s eta 0:00:01  |███████████████████▍ | 61kB 6.9MB/s eta 0:00:01  |██████████████████████▋ | 71kB 7.7MB/s eta 0:00:01  |█████████████████████████▉ | 81kB 8.0MB/s eta 0:00:01  |█████████████████████████████ | 92kB 7.8MB/s eta 0:00:01  |████████████████████████████████| 102kB 5.0MB/s [?25hRequirement already satisfied: docopt>=0.6.2 in /usr/local/lib/python3.6/dist-packages (from num2words) (0.6.2) Installing collected[...]Besides the numerical argument, there are two main optional arguments.**to**: The converter to use. Supported values are:* cardinal (default)* ordinal* ordinal_num* year* currencyfrom num2words import num2words print(num2words(42)) print(num2words(42, to='ordinal')) print(num2words(90, lang='fr')) print(num2words(90, lang='fr_CH')) print(num2words(90, lang='fr_CH', to='ordinal')) import signal import sys def fermer_programme(signal, frame): """Fonction appelée quand vient l'heure de fermer notre programme""" print("C'est l'heure de la fermeture !") sys.exit(0) # Connexion du signal à notre fonction signal.signal(signal.SIGINT, fermer_programme) # Notre programme... print("Le programme va boucler...") while True: continueLe programme va boucler... C'est l'heure de la fermeture !Tacotron 2 inference code Edit the variables **checkpoint_path** and **text** to match yours and run the entire code to generate plots of mel outputs, alignments and audio synthesis from the generated mel-spectrogram using Griffin-Lim. Import libraries and setup matplotlibimport matplotlib import matplotlib.pylab as plt import IPython.display as ipd import sys sys.path.append('waveglow/') import numpy as np import torch from hparams import create_hparams from model import Tacotron2 from layers import TacotronSTFT, STFT from audio_processing import griffin_lim from train import load_model from text import text_to_sequence from denoiser import Denoiser def plot_data(data, figsize=(16, 4)): fig, axes = plt.subplots(1, len(data), figsize=figsize) for i in range(len(data)): axes[i].imshow(data[i], aspect='auto', origin='lower', interpolation='none') plt.show()Setup hparamsfrom hydra import compose, initialize from omegaconf import OmegaConf initialize(config_path="conf", job_name="test_app") cfg = compose(config_name="config")Load model from checkpointcheckpoint_path = "outputs/2022-03-24/00-58-40/output_dir/checkpoint_36000" model = load_model(cfg) model.load_state_dict(torch.load(checkpoint_path)['state_dict']) _ = model.cuda().eval().half()Load WaveGlow for mel2audio synthesis and denoiserwaveglow_path = 'waveglow/waveglow_256channels_universal_v5.pt' waveglow = torch.load(waveglow_path)['model'] waveglow.cuda().eval().half() for k in waveglow.convinv: k.float() denoiser = Denoiser(waveglow)/home/nakata/.local/share/virtualenvs/tacotron2-YLMXJfpO/lib/python3.9/site-packages/torch/serialization.py:786: SourceChangeWarning: source code of class 'torch.nn.modules.conv.ConvTranspose1d' has changed. you can retrieve the original source code by accessing the object's source attribute or set `torch.nn.Module.dump_patches = True` and use the patch tool to revert the changes. warnings.warn(msg, SourceChangeWarning) /home/nakata/.local/share/virtualenvs/tacotron2-YLMXJfpO/lib/python3.9/site-packages/torch/serialization.py:786: SourceChangeWarning: source code of class 'torch.nn.modules.container.ModuleList' has changed. you can retrieve the original source code by accessing the object's source attribute or set `torch.nn.Module.dump_patches = True` and use the patch tool to revert the changes. warnings.warn(msg, SourceChangeWarning) /home/nakata/.local/share/virtualenvs/tacotron2-YLMXJfpO/lib/python3.9/site-packages/torch/serialization.py:786: SourceChangeWarning: source code of[...]Prepare text inputtext = "東京特許許可局" sequence = np.array(text_to_sequence(text, ['english_cleaners']))[None, :] sequence = torch.autograd.Variable( torch.from_numpy(sequence)).cuda().long()Decode text input and plot resultsmel_outputs, mel_outputs_postnet, _, alignments = model.inference(sequence) plot_data((mel_outputs.float().data.cpu().numpy()[0], mel_outputs_postnet.float().data.cpu().numpy()[0], alignments.float().data.cpu().numpy()[0].T))/tmp/ipykernel_29961/3574190110.py:6: UserWarning: Matplotlib is currently using agg, which is a non-GUI backend, so cannot show the figure. plt.show()Synthesize audio from spectrogram using WaveGlowwith torch.no_grad(): audio = waveglow.infer(mel_outputs_postnet, sigma=0.666) ipd.Audio(audio[0].data.cpu().numpy(), rate=cfg.sampling_rate)(Optional) Remove WaveGlow biasaudio_denoised = denoiser(audio, strength=0.01)[:, 0] ipd.Audio(audio_denoised.cpu().numpy(), rate=cfg.sampling_rate) import soundfile as sf sf.write('test.wav', audio_denoised.cpu().numpy().T, cfg.sampling_rate) audio_denoised.cpu().numpy().shape0. Importing PySparkfrom pyspark import SparkContext from pyspark.sql import SparkSession sc = SparkContext() spark = SparkSession(sc) # run this cell only once1. Loading the datasetimport csv rdd1 = sc.textFile("digikala_comments.csv")\ .mapPartitions(lambda line: csv.reader(line, delimiter=',', quotechar='"'))\ .filter(lambda line: len(line) >= 2 and line[0] != 'product_id') rdd1.take(1)2. Most popular itemrdd1.map(lambda l: (l[0], int(l[4]) - int(l[5])))\ .reduceByKey(lambda v1, v2: v1 + v2)\ .max(key = lambda x: x[1])3. Percentage of unverified commentstotal_count = rdd1.count() not_verified_count = rdd1.filter(lambda l: l[6] != 'verified').count() print((not_verified_count / total_count ) * 100)1.76009644364074744. The largest word in the comment sectionrdd1.map(lambda l: l[9].split())\ .flatMap(lambda x: x)\ .map(lambda x: (x, len(x)))\ .reduce(lambda w1, w2: w1 if w1[1] > w2[1] else w2)5. Top 10 words in advantages and disadvantagesimport ast persian_chars=["آ", "ا", "ب", "پ", "ت", "ث", "ج", "چ", "ح", "خ", "د", "ذ", "ر", "ز", "ژ", "س", "ش", "ص", "ض", "ط", "ظ", "ع", "غ", "ف", "ق", "ک" ,"گ", "ل", "م", "ن", "و" ,"ه", "ی"] def preprocess_words(x): for c in x: if c not in persian_chars: x = x.replace(c, " ") return x def get_top_10_words(column_num): return rdd1.filter(lambda l: l[column_num] != '')\ .map(lambda l: l[column_num])\ .map(preprocess_words)\ .map(lambda x: x.split())\ .flatMap(lambda x: x)\ .map(lambda x: (x, 1))\ .reduceByKey(lambda x, y: x + y)\ .takeOrdered(10, key=lambda x: -x[1])Advantages column:get_top_10_words(10)Disadvantages column:get_top_10_words(11)6. Most popular character in product's titlerdd1.map(lambda l: l[1])\ .flatMap(lambda x: x)\ .filter(lambda x: x in persian_chars)\ .map(lambda x: (x, 1))\ .reduceByKey(lambda x, y: x + y)\ .takeOrdered(1, key=lambda x: -x[1])Principal Component Analysis(PCA)In this notebook we will implement PCA algorithm from scratch# This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn import datasets from sklearn.datasets import make_blobs import matplotlib.pyplot as plt # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session class PCA(): def __init__(self, n_components= 3): self.n_components = n_components def fit(self,X): X_mean = np.mean(X,axis=0) X = X - X_mean cov_mat = np.cov(X.T) # needs samples as columns eigen_values,eigen_vectors = np.linalg.eig(cov_mat) # eigen vector-> eigen_vector[:,i] eigen_vectors = eigen_vectors.T # transpose to make eigen_vector-> eigen_vector[i] indices = np.argsort(eigen_values)[::-1] eigen_vectors = eigen_vectors[indices] eigen_values = eigen_values[indices] self.components = eigen_vectors[0:self.n_components] def transform(self,X): X = X - X.mean(axis=0) projection = np.dot(X,self.components.T) return projection iris = datasets.load_iris() data = iris.data pca = PCA(n_components=2) pca.fit(data) project = pca.transform(data) plt.scatter(project[:,0],project[:,1],s=40,cmap=plt.cm.Spectral) np.random.seed(22) num_clusters = 8 num_samples = 1000 cluster_std = 1.0 X, c = make_blobs(n_samples=num_samples, n_features=4, centers=num_clusters, cluster_std=cluster_std) pca = PCA(n_components=2) pca.fit(X) project = pca.transform(X) plt.scatter(project[:,0],project[:,1], c=c, s=40, cmap=plt.cm.Spectral)DAT257x: Reinforcement Learning Explained Lab 5: Temporal Difference Learning Exercise 5.2: SARSA Agentimport numpy as np import sys if "../" not in sys.path: sys.path.append("../") from lib.envs.simple_rooms import SimpleRoomsEnv from lib.envs.windy_gridworld import WindyGridworldEnv from lib.envs.cliff_walking import CliffWalkingEnv from lib.simulation import Experiment class Agent(object): def __init__(self, actions): self.actions = actions self.num_actions = len(actions) def act(self, state): raise NotImplementedError class SarsaAgent(Agent): def __init__(self, actions, epsilon=0.01, alpha=0.5, gamma=1): super(SarsaAgent, self).__init__(actions) ## TODO 1 ## Initialize empty dictionary here ## In addition, initialize the value of epsilon, alpha and gamma self.Q = {} self.eps = epsilon self.alpha = alpha self.gamma = gamma def stateToString(self, state): mystring = "" if np.isscalar(state): mystring = str(state) else: for digit in state: mystring += str(digit) return mystring def act(self, state): stateStr = self.stateToString(state) if stateStr not in self.Q: self.Q[stateStr] = {action:0 for action in self.actions} action = np.random.randint(0, self.num_actions) choice = np.random.binomial(1, self.eps) sum_reward = sum([v for k,v in self.Q[stateStr].items()]) if choice != 1 and sum_reward>0: current_action = max(self.Q[stateStr], key=self.Q[stateStr].get) return current_action ## TODO 2 ## Implement epsilon greedy policy here return action def learn(self, state1, action1, reward, state2, action2): state1Str = self.stateToString(state1) state2Str = self.stateToString(state2) ## TODO 3 ## Implement the sarsa update here self.Q[state1Str][action1] = self.Q[state1Str][action1] + \ self.alpha*(reward + self.gamma*(self.Q[state2Str][action2] - self.Q[state1Str][action1])) """ SARSA Update Q(s,a) <- Q(s,a) + alpha * (reward + gamma * Q(s',a') - Q(s,a)) or Q(s,a) <- Q(s,a) + alpha * (td_target - Q(s,a)) or Q(s,a) <- Q(s,a) + alpha * td_delta """ interactive = True %matplotlib nbagg env = SimpleRoomsEnv() agent = SarsaAgent(range(env.action_space.n)) experiment = Experiment(env, agent) experiment.run_sarsa(10, interactive) interactive = False %matplotlib inline env = SimpleRoomsEnv() agent = SarsaAgent(range(env.action_space.n)) experiment = Experiment(env, agent) experiment.run_sarsa(50, interactive) interactive = True %matplotlib nbagg env = CliffWalkingEnv() agent = SarsaAgent(range(env.action_space.n)) experiment = Experiment(env, agent) experiment.run_sarsa(10, interactive) interactive = False %matplotlib inline env = CliffWalkingEnv() agent = SarsaAgent(range(env.action_space.n)) experiment = Experiment(env, agent) experiment.run_sarsa(100, interactive) interactive = False %matplotlib inline env = WindyGridworldEnv() agent = SarsaAgent(range(env.action_space.n)) experiment = Experiment(env, agent) experiment.run_sarsa(50, interactive) interactive = False %matplotlib inline env = CliffWalkingEnv() agent = SarsaAgent(range(env.action_space.n)) experiment = Experiment(env, agent) experiment.run_sarsa(1000, interactive)This IPython Notebook is for performing a fit and generating a figure of the spectrum of sample VG12, in the mesh region with 49+/-6 nm gap. This version is modified to fit for two gaps.The filename of the figure is **[TBD].pdf**.Author: , ``Date: January 25, 2015%pylab inline import emcee import triangle import pandas as pd import seaborn as sns from astroML.decorators import pickle_results sns.set_context("paper", font_scale=2.0, rc={"lines.linewidth": 2.5}) sns.set(style="ticks")Read in the data. We want "VG12"df = pd.read_csv('../data/cln_20130916_cary5000.csv', index_col=0) df = df[df.index > 1250.0] plt.plot(df.index[::4], df.run11[::4]/100.0, label='On-mesh') plt.plot(df.index, df.run10/100.0, label='Off-mesh') plt.plot(df.index, df.run12/100.0, label='Shard2') plt.plot(df.index, df.run9/100.0, label='DSP') plt.plot(df.index, df.run15/100.0, label='VG08') plt.plot(df.index, df.run17/100.0, label='VG08 alt') #plt.plot(x, T_gap_Si_withFF_fast(x, 65.0, 0.5, n1)/T_DSP, label='Model') plt.legend(loc='best') plt.ylim(0.80, 1.05)Import all the local models, saved locally as `etalon.py`. See the paper for derivations of these equations.from etalon import * np.random.seed(78704) # Introduce the Real data, decimate the data. x = df.index.values[::4] N = len(x) # Define T_DSP for the model T_DSP = T_gap_Si(x, 0.0) n1 = sellmeier_Si(x) # Define uncertainty yerr = 0.0004*np.ones(N) iid_cov = np.diag(yerr ** 2) # Select the spectrum of interest # Normalize the spectrum by measured DSP Si wafer. y = df.run11.values[::4]/100.0Define the likelihood. In this case we are using two different gap sizes, but fixed fill factor. \begin{equation} T_{mix} = 0.5 \times T_{e}(d_M + \epsilon) + 0.5 \times T_{e}(\epsilon)\end{equation}def lnlike(dM, eps, lna, lns): a, s = np.exp(lna), np.exp(lns) off_diag_terms = a**2 * np.exp(-0.5 * (x[:, None] - x[None, :])**2 / s**2) C = iid_cov + off_diag_terms sgn, logdet = np.linalg.slogdet(C) if sgn <= 0: return -np.inf T_mix = 0.5 * (T_gap_Si_withFF_fast(x, dM+eps, 1.0, n1) + T_gap_Si_withFF_fast(x, eps, 1.0, n1))/T_DSP r = y - T_mix return -0.5 * (np.dot(r, np.linalg.solve(C, r)) + logdet)Define the prior. We want to put a Normal prior on $d_M$:$d_M \sim \mathcal{N}(\hat{d_M}, \sigma_{d_M})$def lnprior(dM, eps, lna, lns): prior = -0.5 * ((49.0-dM)/6.0)**2.0 if not (31.0 < dM < 67 and 0.0 < eps < 60.0 and -12 < lna < -2 and 0 < lns < 10): return -np.inf return priorCombine likelihood and prior to obtain the posterior.def lnprob(p): lp = lnprior(*p) if not np.isfinite(lp): return -np.inf return lp + lnlike(*p)Set up `emcee`.@pickle_results('SiGaps_12_VG12_twoGaps-sampler.pkl') def hammer_time(ndim, nwalkers, dM_Guess, eps_Guess, a_Guess, s_Guess, nburnins, ntrials): # Initialize the walkers p0 = np.array([dM_Guess, eps_Guess, np.log(a_Guess), np.log(s_Guess)]) pos = [p0 + 1.0e-2*p0 * np.random.randn(ndim) for i in range(nwalkers)] sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob) pos, lp, state = sampler.run_mcmc(pos, nburnins) sampler.reset() pos, lp, state = sampler.run_mcmc(pos, ntrials) return samplerSet up the initial conditionsnp.random.seed(78704) ndim, nwalkers = 4, 32 dM_Guess = 49.0 eps_Guess = 15.0 a_Guess = 0.0016 s_Guess = 25.0 nburnins = 200 ntrials = 700Run the burn-in phase. Run the full MCMC. Pickle the results.sampler = hammer_time(ndim, nwalkers, dM_Guess, eps_Guess, a_Guess, s_Guess, nburnins, ntrials)@pickle_results: computing results and saving to 'SiGaps_12_VG12_twoGaps-sampler.pkl' warning: cache file 'SiGaps_12_VG12_twoGaps-sampler.pkl' exists - args match: False - kwargs match: TrueLinearize $a$ and $s$ for easy inspection of the values.chain = sampler.chain samples_lin = copy(sampler.flatchain) samples_lin[:, 2:] = np.exp(samples_lin[:, 2:])Inspect the chain.fig, axes = plt.subplots(4, 1, figsize=(5, 6), sharex=True) fig.subplots_adjust(left=0.1, bottom=0.1, right=0.96, top=0.98, wspace=0.0, hspace=0.05) [a.plot(np.arange(chain.shape[1]), chain[:, :, i].T, "k", alpha=0.5) for i, a in enumerate(axes)] [a.set_ylabel("${0}$".format(l)) for a, l in zip(axes, ["d_M", "\epsilon", "\ln a", "\ln s"])] axes[-1].set_xlim(0, chain.shape[1]) axes[-1].set_xlabel("iteration");Linearize $a$ and $s$ for graphical purposes. Make a triangle corner plot.fig = triangle.corner(samples_lin, labels=map("${0}$".format, ["d_M", "\epsilon", "a", "s"]), quantiles=[0.16, 0.84]) fig = triangle.corner(samples_lin[:,0:2], labels=map("${0}$".format, ["d_M", "\epsilon"]), quantiles=[0.16, 0.84]) plt.savefig("VG12_twoGaps_cornerb.pdf")Quantiles: [(0.16, 44.697874299365516), (0.84, 55.787511870582371)] Quantiles: [(0.16, 7.8593931709714386), (0.84, 16.827157263301732)]Calculate confidence intervals.dM_mcmc, eps_mcmc, a_mcmc, s_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]), zip(*np.percentile(samples_lin, [16, 50, 84], axis=0))) dM_mcmc, eps_mcmc, a_mcmc, s_mcmc print "{:.0f}^{{+{:.0f}}}_{{-{:.0f}}}".format(*dM_mcmc) print "{:.0f}^{{+{:.0f}}}_{{-{:.0f}}}".format(*eps_mcmc)51^{+5}_{-6} 12^{+5}_{-4}Overlay draws from the Gaussian Process.plt.figure(figsize=(6,3)) for dM, eps, a, s in samples_lin[np.random.randint(len(samples_lin), size=60)]: off_diag_terms = a**2 * np.exp(-0.5 * (x[:, None] - x[None, :])**2 / s**2) C = iid_cov + off_diag_terms fit = 0.5*(T_gap_Si_withFF_fast(x, dM+eps, 1.0, n1)+T_gap_Si_withFF_fast(x, eps, 1.0, n1))/T_DSP vec = np.random.multivariate_normal(fit, C) plt.plot(x, vec,"-b", alpha=0.06) plt.step(x, y,color="k", label='Measurement') fit = 0.5*(T_gap_Si_withFF_fast(x, dM_mcmc[0]+eps_mcmc[0], 1, n1)+T_gap_Si_withFF_fast(x, eps_mcmc[0], 1, n1))/T_DSP fit_label = 'Model with $d_M={:.0f}$ nm, $\epsilon={:.0f}$'.format(dM_mcmc[0], eps_mcmc[0]) plt.plot(x, fit, '--', color=sns.xkcd_rgb["pale red"], alpha=1.0, label=fit_label) fit1 = T_gap_Si_withFF_fast(x, 43, 0.5, n1)/T_DSP fit2 = T_gap_Si_withFF_fast(x, 55, 0.5, n1)/T_DSP fit2_label = 'Model with $d_M={:.0f}\pm{:.0f}$ nm, $\epsilon={:.0f}$'.format(49, 6, 0) plt.fill_between(x, fit1, fit2, alpha=0.6, color=sns.xkcd_rgb["green apple"]) plt.plot([-10, -9], [-10, -9],"-", alpha=0.85, color=sns.xkcd_rgb["green apple"], label=fit2_label) plt.plot([-10, -9], [-10, -9],"-b", alpha=0.85, label='Draws from GP') plt.plot([0, 5000], [1.0, 1.0], '-.k', alpha=0.5) plt.fill_between([1200, 1250], 2.0, 0.0, hatch='\\', alpha=0.4, color='k', label='Si absorption cutoff') plt.xlabel('$\lambda$ (nm)'); plt.ylabel('$T_{gap}$'); plt.xlim(1200, 2501); plt.ylim(0.9, 1.019); plt.legend(loc='lower right') plt.savefig("VG12_twoGapsb.pdf", bbox_inches='tight')Support Vector Classifier Model on Cough Dataimport numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.utils import shuffle from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler from sklearn.model_selection import GridSearchCV from sklearn.metrics import confusion_matrix, accuracy_score, classification_report from sklearn.svm import SVC import itertools import os import pickle audio_type = 'cough' data_path = os.path.join('..', '..', 'data_struc', f'data_{audio_type}')Loading datasetstrain = pd.read_csv(os.path.join(data_path, 'train.csv')) test = pd.read_csv(os.path.join(data_path, 'test.csv')) val = pd.read_csv(os.path.join(data_path, 'valid.csv')) train = train.loc[:, ~train.columns.str.contains('^Unnamed')] test = test.loc[:, ~test.columns.str.contains('^Unnamed')] val = val.loc[:, ~val.columns.str.contains('^Unnamed')] train['target'] = train['target'].apply(lambda x: 1 if x == 'covid' else 0) test['target'] = test['target'].apply(lambda x: 1 if x == 'covid' else 0) val['target'] = val['target'].apply(lambda x: 1 if x == 'covid' else 0) train.dropna(inplace=True) test.dropna(inplace=True) val.dropna(inplace=True) # Appending validation to test, as we are using cross-validation anyway. test = test.append(val, ignore_index=True) train = shuffle(train, random_state=1) covid, normal = 0, 0 for idx, row in train.iterrows(): if(row['target'] == 1): covid += 1 else: normal += 1 print(covid) print(normal) X_train = train.drop('target', axis=1) y_train = train['target'] X_test = test.drop('target', axis=1) y_test = test['target'] X_val = val.drop('target', axis=1) y_val = val['target'] sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test)Applying PCApca = PCA() X_train = pca.fit_transform(X_train) X_test = pca.transform(X_test) explained_variance = pca.explained_variance_ratio_ explained_variance = [i * 100 for i in explained_variance] cumulative_variance = np.cumsum(np.round(pca.explained_variance_ratio_, decimals=4) * 100) plt.ylabel('Cumulative variance') plt.plot(cumulative_variance, color='b'); pca = PCA(n_components=20) X_train = pca.fit_transform(X_train) X_test = pca.transform(X_test)Training and Making Predictions%%script echo "Comment line with %%script echo to run this cell." # Hyperparameters chosen after repeatedly narrowing the range of values. param_grid = { 'C': [5, 6, 7, 8], 'gamma': [0.019, 0.02, 0.021] } grid = GridSearchCV( SVC(probability=True), param_grid, refit = True, cv=10, n_jobs=-1, verbose=5 ) grid.fit(X_train, y_train) %%script echo "Comment line with %%script echo to run this cell." # print best parameter after tuning print(grid.best_params_) # print how our model looks after hyper-parameter tuning print(grid.best_estimator_) # Printing best cross-validation accuracy -- use this value and NOT test metrics to tune hyperparameters. print(grid.best_score_) %%script echo "Comment line with %%script echo to run this cell." with open('svc_cough.pickle', 'wb') as f: pickle.dump(grid, f)Comment line with %%script echo to run this cell.Don't touch this section until hyperparameters have been tuned and finalizedDon't use test metrics as feedback to change hyperparameter, to avoid leakage from test set.with open('svc_cough.pickle', 'rb') as f: model = pickle.load(f) y_pred = model.predict(X_test) cm = confusion_matrix(y_test, y_pred) print(cm) sns.heatmap(cm, cmap = 'plasma', annot = True, fmt = ".1f") print('Accuracy: ', accuracy_score(y_test, y_pred)) print(classification_report(y_test,y_pred))[[149 7] [ 9 27]] Accuracy: 0.9166666666666666 precision recall f1-score support 0 0.94 0.96 0.95 156 1 0.79 0.75 0.77 36 accuracy 0.92 192 macro avg 0.87 0.85 0.86 192 weighted avg 0.92 0.92 0.92 192Assessing Models In data science, a "model" is a set of assumptions about data. Often, models include assumptions about chance processes used to generate data. Sometimes, data scientists have to decide whether or not their models are good. In this section we will discuss two examples of making such decisions. In later sections we will use the methods developed here as the building blocks of a general framework for testing hypotheses. U.S. Supreme Court, 1965: Swain vs. Alabama In the early 1960's, in Talladega County in Alabama, a black man called was convicted of raping a white woman and was sentenced to death. He appealed his sentence, citing among other factors the all-white jury. At the time, only men aged 21 or older were allowed to serve on juries in Talladega County. In the county, 26% of the eligible jurors were black, but there were only 8 black men among the 100 selected for the jury panel in Swain's trial. No black man was selected for the trial jury.In 1965, the Supreme Court of the United States denied Swain's appeal. In its ruling, the Court wrote "... the overall percentage disparity has been small and reflects no studied attempt to include or exclude a specified number of Negroes."Jury panels are supposed to be selected at random from the eligible population. Because 26% of the eligible population was black, 8 black men on a panel of 100 might seem low. A Model But one view of the data – a model, in other words – is that the panel was selected at random and ended up with a small number of black men just due to chance. This model is consistent with what the Supreme Court wrote in its ruling.The model specifies the details of a chance process. It says the data are like a random sample from a population in which 26% of the people are black. We are in a good position to assess this model, because:- We can simulate data based on the model. That is, we can simulate drawing at random from a population of whom 26% are black. - Our simulation will show what a panel *would* look like *if* it were selected at random.- We can then compare the results of the simulation with the composition of 's panel. - If the results of our simulation are not consistent with the composition of Swain's panel, that will be evidence against the model of random selection.Let's go through the process in detail. The Statistic First, we have to choose a statistic to simulate. The statistic has to be able to help us decide between the model and alternative views about the data. The model says the panel was drawn at random. The alternative viewpoint, suggested by appeal, is that the panel was not drawn at random because it contained too few black men. A natural statistic, then, is the number of black men in our simulated sample of 100 men representing the panel. Small values of the statistic will favor the alternative viewpoint. Predicting the Statistic Under the Model If the model were true, how big would the statistic typically be? To answer that, we have to start by working out the details of the simulation. Generating One Value of the Statistic First let's figure out how to simulate one value of the statistic. For this, we have to sample 100 times at random from the population of eligible jurors and count the number of black men we get.One way is to set up a table representing the eligible population and use `sample` as we did in the previous chapter. But there is also a quicker way, using a `datascience` function tailored for sampling at random from categorical distributions. We will use it several times in this chapter.The `sample_proportions` function in the `datascience` library takes two arguments:- the sample size- the distribution of the categories in the population, as a list or array of proportions that add up to 1It returns an array containing the distribution of the categories in a random sample of the given size taken from the population. That's an array consisting of the sample proportions in all the different categories.To see how to use this, remember that according to our model, the panel is selected at random from a population of men among whom 26% were black and 74% were not. Thus the distribution of the two categories can be represented as the list `[0.26, 0.74]`, which we have assigned to the name `eligible_population`. Now let's sample at random 100 times from this distribution, and see what proportions of the two categories we get in our sample.eligible_population = [0.26, 0.74] sample_proportions(100, eligible_population)That was easy! The proportion of black men in the random sample is `item(0)` of the output array.Because there are 100 men in the sample, the number of men in each category is 100 times the proportion. So we can just as easily simulate counts instead of proportions, and access the count of black men only. Run the cell a few times to see how the output varies.# count of black men in a simulated panel (100 * sample_proportions(100, eligible_population)).item(0)Running the Simulation To get a sense of the variability without running the cell over and over, let's generate 10,000 simulated values of the count. The code follows the same steps that we have used in every simulation. First, we define a function to simulate one value of the count, using the code we wrote above.def one_simulated_count(): return (100 * sample_proportions(100, eligible_population)).item(0)Next, we create an array of 10,000 simulated counts by using a `for` loop.counts = make_array() repetitions = 10000 for i in np.arange(repetitions): counts = np.append(counts, one_simulated_count())The Prediction To interpret the results of our simulation, we start as usual by visualizing the results by an empirical histogram.Table().with_column( 'Count in a Random Sample', counts ).hist(bins = np.arange(5.5, 46.6, 1))The histogram tells us what the model of random selection predicts about our statistic, the count of black men in the sample.To generate each simulated count, we drew at 100 times at random from a population in which 26% were black. So, as you would expect, most of the simulated counts are around 26. They are not exactly 26: there is some variation. The counts range from about 10 to about 45. Comparing the Prediction and the Data Though the simulated counts are quite varied, very few of them came out to be eight or less. The value eight is far out in the left hand tail of the histogram. It's the red dot on the horizontal axis of the histogram.Table().with_column( 'Count in a Random Sample', counts ).hist(bins = np.arange(5.5, 46.6, 1)) plots.scatter(8, 0, color='red', s=30);The simulation shows that if we select a panel of 100 jurors at random from the eligible population, we are very unlikely to get counts of black men as low as the eight that were in Swain's jury panel. This is evidence that the model of random selection of the jurors in the panel is not consistent with the data from the panel. When the data and a model are inconsistent, the model is hard to justify. After all, the data are real. The model is just a set of assumptions. When assumptions are at odds with reality, we have to question those assumptions.While it is *possible* that a panel like could have been generated by chance, our simulation demonstrates that it is very unlikely. Thus our assessment is that the model of random draws is not supported by the evidence. Swain's jury panel does not look like the result of random sampling from the population of eligible jurors. This method of assessing models is very general. Here is an example in which we use it to assess a model in a completely different setting. Mendel's Pea Flowers [](https://en.wikipedia.org/wiki/Gregor_Mendel) (1822-1884) was an Austrian monk who is widely recognized as the founder of the modern field of genetics. Mendel performed careful and large-scale experiments on plants to come up with fundamental laws of genetics. Many of his experiments were on varieties of pea plants. He formulated sets of assumptions about each variety; these were his models. He then tested the validity of his models by growing the plants and gathering data.Let's analyze the data from one such experiment to see if Mendel's model was good.In a particular variety, each plant has either purple flowers or white. The color in each plant is unaffected by the colors in other plants. Mendel hypothesized that the plants should bear purple or white flowers at random, in the ratio 3:1. Mendel's Model For every plant, there is a 75% chance that it will have purple flowers, and a 25% chance that the flowers will be white, regardless of the colors in all the other plants. Approach to Assessment To go about assessing Mendel's model, we can simulate plants under the assumptions of the model and see what it predicts. Then we will be able to compare the predictions with the data that Mendel recorded. The Statistic Our goal is to see whether or not Mendel's model is good. We need to simulate a statistic that will help us make this decision. If the model is good, the percent of purple-flowering plants in the sample should be close to 75%. If the model is not good, the percent purple-flowering will be away from 75%. It may be higher, or lower; the direction doesn't matter.The key for us is the *distance* between 75% and the percent of purple-flowering plants in the sample. Big distances are evidence that the model isn't good.Our statistic, therefore, is the **distance between the sample percent and 75%**:$$\big{\vert} \text{sample percent of purple-flowering plants} - 75 \big{\vert}$$ Predicting the Statistic Under the Model To see how big the distance would be if Mendel's model were true, we can use `sample_proportions` to simulate the distance under the assumptions of the model.First, we have to figure out how many times to sample. To do this, remember that we are going to compare our simulation with Mendel's plants. So we should simulate the same number of plants that he had.Mendel grew a lot of plants. There were 929 plants of the variety corresponding to this model. So we have to sample 929 times. Generating One Value of the Statistic The steps in the calculation:- Sample 929 times at random from the distribution specified by the model and find the sample proportion in the purple-flowering category. - Multiply the proportion by 100 to get a pecent.- Subtract 75 and take the absolute value of the difference.That's the statistic: the distance between the sample percent and 75. We will start by defining a function that takes a proportion and returns the absolute difference between the corresponding percent and 75.def distance_from_75(p): return abs(100*p - 75)To simulate one value of the distance between the sample percent of purple-flowering plants and 75%, under the assumptions of Mendel's model, we have to first simulate the proportion of purple-flowering plants among 929 plants under the assumption of the model, and then calculate the discrepancy from 75%.model_proportions = [0.75, 0.25] proportion_purple_in_sample = sample_proportions(929, model_proportions).item(0) distance_from_75(proportion_purple_in_sample)That's one simulated value of the distance between the sample percent of purple-flowering plants and 75% as predicted by Mendel's model. Running the Simulation To get a sense of how variable the distance could be, we have to simulate it many more times.We will generate 10,000 values of the distance. As before, we will first use the code we developed above to define a function that returns one simulated value Mendel's hypothesis.def one_simulated_distance(): proportion_purple_in_sample = sample_proportions(929, model_proportions).item(0) return distance_from_75(proportion_purple_in_sample)Next, we will use a `for` loop to create 10,000 such simulated distances.distances = make_array() repetitions = 10000 for i in np.arange(repetitions): distances = np.append(distances, one_simulated_distance())The Prediction The empirical histogram of the simulated values shows the distribution of the distance as predicted by Mendel's model.Table().with_column( 'Distance between Sample % and 75%', distances ).hist()Look on the horizontal axis to see the typical values of the distance, as predicted by the model. They are rather small. For example, a high proportion of the distances are in the range 0 to 1, meaning that for a high proportion of the samples, the percent of purple-flowering plants is within 1% of 75%, that is, the sample percent is in the range 74% to 76%. Comparing the Prediction and the Data To assess the model, we have to compare this prediction with the data. Mendel recorded the number of purple and white flowering plants. Among the 929 plants that he grew, 705 were purple flowering. That's just about 75.89%.705 / 929So the observed value of our statistic – the distance between Mendel's sample percent and 75 – is about 0.89:observed_statistic = distance_from_75(705/929) observed_statisticJust by eye, locate roughly where 0.89 is on the horizontal axis of the histogram. You will see that it is clearly in the heart of the distribution predicted by Mendel's model.The cell below redraws the histogram with the observed value plotted on the horizontal axis.Table().with_column( 'Distance between Sample % and 75%', distances ).hist() plots.scatter(observed_statistic, 0, color='red', s=30);![](https://autonexa.com/wp-content/uploads/2018/08/INS-1-780x405.jpg) Problem Statement Our client is an Insurance company that has provided Health Insurance to its customers now they need your help in building a model to predict whether the policyholders (customers) from past year will also be interested in Vehicle Insurance provided by the company.An insurance policy is an arrangement by which a company undertakes to provide a guarantee of compensation for specified loss, damage, illness, or death in return for the payment of a specified premium. A premium is a sum of money that the customer needs to pay regularly to an insurance company for this guarantee.For example, you may pay a premium of Rs. 5000 each year for a health insurance cover of Rs. 200,000/- so that if, God forbid, you fall ill and need to be hospitalised in that year, the insurance provider company will bear the cost of hospitalisation etc. for upto Rs. 200,000. Now if you are wondering how can company bear such high hospitalisation cost when it charges a premium of only Rs. 5000/-, that is where the concept of probabilities comes in picture. For example, like you, there may be 100 customers who would be paying a premium of Rs. 5000 every year, but only a few of them (say 2-3) would get hospitalised that year and not everyone. This way everyone shares the risk of everyone else.Just like medical insurance, there is vehicle insurance where every year customer needs to pay a premium of certain amount to insurance provider company so that in case of unfortunate accident by the vehicle, the insurance provider company will provide a compensation (called ‘sum assured’) to the customer.Building a model to predict whether a customer would be interested in Vehicle Insurance is extremely helpful for the company because it can then accordingly plan its communication strategy to reach out to those customers and optimise its business model and revenue.Now, in order to predict, whether the customer would be interested in Vehicle insurance, you have information about demographics (gender, age, region code type), Vehicles (Vehicle Age, Damage), Policy (Premium, sourcing channel) etc. Bussiness Goal Building a model to predict whether a customer would be interested in Vehicle Insurance is extremely helpful for the company because it can then accordingly plan its communication strategy to reach out to those customers and optimise its business model and revenue.Now, in order to predict, whether the customer would be interested in Vehicle insurance, you have information about demographics (gender, age, region code type), Vehicles (Vehicle Age, Damage), Policy (Premium, sourcing channel) etc. Objective:To predict if an insurance policy holder would be interested to buy a vehicle insurance as well. Building a model to predict whether a customer would be interested in Vehicle Insurance is extremely helpful for the company because it can then accordingly plan its communication strategy to reach out to those customers and optimize its business model and revenue.The aim of this project is to leverage the machine learning algorithms such as Logistic Regression and Random Forest to create a predictive model using statistically significant variables from the given data set.Model accuracy will be assessed using different techniques such as ROC (Receiver operating characteristic), AUC (Area under the ROC curve) and Confusion Matrix. This Notebook will cover - 1. Exploratory Data Analysis 2. Data Modelling and Evaluation Step 1: Perform data preparation & cleaning- Load the dataset into a data frame using Pandas- Explore the number of rows & columns, ranges of values etc.- Handle missing, incorrect and invalid data- Perform any additional steps (parsing dates, creating additional columns, merging multiple dataset etc.) Step 2: Perform exploratory analysis & visualization- Compute the mean, sum, range and other interesting statistics for numeric columns- Explore distributions of numeric columns using histograms etc.- Explore relationship between columns using scatter plots, bar charts etc.- Make a note of interesting insights from the exploratory analysis Step 3: Ask & answer questions about the data- Ask at least 4 interesting questions about your dataset- Answer the questions either by computing the results using Numpy/Pandas or by plotting graphs using Matplotlib/Seaborn- Create new columns, merge multiple dataset and perform grouping/aggregation wherever necessary- Wherever you're using a library function from Pandas/Numpy/Matplotlib etc. explain briefly what it does Step 4: Summarize your inferences & write a conclusion- Write a summary of what you've learned from the analysis- Include interesting insights and graphs from previous sections- Share ideas for future work on the same topic using other relevant datasets- Share links to resources you found useful during your analysis ---- The Structure of this notebook :* Initial Library imports * Exploring File : EdStatsCountries.csv * Reading data and exploring which columns are necessary * Geographic and Economic Special- Macro- country codes * Grouping countries by Geographic and Economic groups ( Size-Heatmap )* Exploring File : EdStatsSeriies.csv * Reading data and exploring which columns are necessary * Understanding the indicator series : * What about what topics are these indicators * How many indicators per topic * Utility Function : get_indicator_details() : * This function return the details of an indicator as dictionary , * useful for retrieving definition and name during plots* Exploring File : EdStatsData.csv * Reading data and exploring which columns are necessary * Removing Columns for Future Years , * Removing Rows which are Empty * Understanding the availability of Data for each Year * Both before and after cleaning with Bar Plots* Utility Function : Comparative analysis * get_pivot_similiarity(): * this function takes two countries as an input and calculates how similar they are based on all available indicators , from 1970 to 2015 * the calculation is based on formula :``` ( indicator_value[country1] - indicator_value[country2] ) / ( indicator_value[country1] + indicator_value[country2] )```* Utility Function for full Report Generation * generate_report(): * This function takes a list of countries and one indicator as Input * It generates multiple different plot so that we can instantly get an idea of what is going on for that indicator * the plots generated include : * Line graph of values of that indicator for all countries in the list passed , over time from 1970 to 2015 * Pie Chart Plot for those counties for one particular year passed as input * Similarity Heat Map - for visualizing which countries are more similar to each other , for each country pair inside the list of countries * this function is based on the Utility function for Comparative analysis called : get_pivot_similiarity * the output generated is similar to that of a correlation plot .* World Map Visualization : * Dash Plotly library used for making a world map chloropleth plot * Indicators such as Population , Population Growth and GDP(PPP) are visualized in this section* Case Study : BRICS Countries : * BRICS Countries : Brazil Russia India China South-Africa * Study of Population Growth * Study of How the expenditure on education impacts the education outcomes * Study of correlation between Educated, GDP and Mortality * Case Study : Comparison between Education in India and America * Study of Primary and Primary and Tertiary Education Outcomes and Expenditures * Comparative study of Drop-Out rate **Attribute Information** 1. id : Unique ID for the customer2. Gender : Gender of the customer3. Age : Age of the customer4. Driving_License 0 : Customer does not have DL, 1 : Customer already has DL5. Region_Code : Unique code for the region of the customer6. Previously_Insured : 1 : Customer already has Vehicle Insurance, 0 : Customer doesn't have Vehicle Insurance7. Vehicle_Age : Age of the Vehicle8. Vehicle_Damage :1 : Customer got his/her vehicle damaged in the past. 0 : Customer didn't get his/her vehicle damaged in the past.9. Annual_Premium : The amount customer needs to pay as premium in the year10. PolicySalesChannel : Anonymized Code for the channel of outreaching to the customer ie. Different Agents, Over Mail, Over Phone, In Person, etc.11. Vintage : Number of Days, Customer has been associated with the company12. Response : 1 : Customer is interested, 0 : Customer is not interested Import Libraries --- Installing and Importing Libraries Setting up Google Drive Folder where Data is storedFor Graphing : * Matplotib* Seaborn * heatmapz* Dash-PlotlyDatawrangling : * Numpy* Pandas* JsonOutput Formatting :* pprint - Pretty Printerimport pandas as pd import numpy as np import seaborn as sns import matplotlib import matplotlib.pyplot as plt %matplotlib inline sns.set_style('darkgrid') matplotlib.rcParams['font.size'] = 14 matplotlib.rcParams['figure.figsize'] = (9, 5) matplotlib.rcParams['figure.facecolor'] = '#00000000' import warnings warnings.filterwarnings('ignore')Import Datasetfrom google.colab import drive drive.mount('/content/drive') filepath = '/content/drive/MyDrive/AlmaBetter/Capstone2_Supervised_Classification/Code/Data/main_data.csv' data = pd.read_csv(filepath) dataset=data.copy() dataset['id'].nunique()/(len(dataset)) dataset.set_index('id', inplace= True) dataset.head() dataset.info() dataset.isnull().sum()* There is no missing value in the data* By looking at the info of the dataset we can get a rough idea on the numeric and the string columns * By looking at the summary of the data we can infer the mean,standard deviation, min and max of the * We will be able to get a idea on the outliers here by the percentiles ( In the Annual_Premium the 99th percentile is 72963 and the max is 540165 this represents the outliers in this column)dataset.describe(percentiles = [.25,.50,.75,.95,.99]).T numerical_features = ['Age','Vintage','Annual_Premium', 'Vehicle_Age'] binary_features = ['Gender', 'Driving_License', 'Previously_Insured','Vehicle_Damage'] categorical_features =['Region_Code','Policy_Sales_Channel'] target = ['Response']Data Preparation and Cleaningdataset['Annual_Premium'] = dataset['Annual_Premium'].astype('int64') dataset['Policy_Sales_Channel'] = dataset['Policy_Sales_Channel'].astype('int64') dataset['Region_Code'] = dataset['Region_Code'].astype('int64') for column in categorical_features: dataset[column] = dataset[column].astype('str') dataset.info() dataset.head(10)Exploratory Data Analysis Light Data Exploration 1) For numeric data * Made histograms to understand distributions * Corrplot * Pivot table comparing survival rate across numeric variables 2) For Categorical Data * Made bar charts to understand balance of classes * Made pivot tables to understand relationship with survivalpd.pivot_table( dataset, index='Response', values=numerical_features , aggfunc=['mean','median'] ) #Edit pd.pivot_table( data, index='Response', values=categorical_features , aggfunc=['median'] ) dataset.groupby('Response').apply(lambda x : x[numerical_features].describe() ) dataset.groupby('Response').apply(lambda x : x[categorical_features].describe() ) pd.pivot_table( dataset, index='Response', values=binary_features , aggfunc='mean' )Variable wise EDA Most of the vehicles of customers with response 1 are between the ages of 1-2 and their vehicles are damaged. Target Variable (Response)count_1 = dataset[dataset["Response"] == 1].value_counts().sum() totalResponse = dataset["Response"].value_counts().sum() print("The percentage of positive response in data is :", round(count_1*100/totalResponse),"%") plt.subplot(1, 2, 1) ax1=sns.countplot(dataset['Response'], palette="cool") plt.title("Count of response (target variable)") total=float(len(dataset)) plt.subplot(1,2,2) count = dataset['Response'].value_counts() count.plot.pie(autopct = '%1.1f%%',colors=['c','orange'], figsize = (10,7),explode = [0,0.1],title = "pie chart of Percentage of target class") print( "Percentage of target class\n") print(dataset['Response'].value_counts()/len(dataset)*100) for p in ax1.patches: height = p.get_height() ax1.text(p.get_x()+p.get_width()/2., height + 3, '{:1.2f}'.format(height/total*100), ha="center")Percentage of target class 0 87.743664 1 12.256336 Name: Response, dtype: float641. By the plot we can say that this is the problem of imbalance binary classification problem2. The indivisuals interested is 87 % as compared to the othe one. Age variableplt.figure(figsize = (15,3)) sns.countplot(dataset['Age'], palette = 'hsv') plt.title('Count of Age') plt.show() f,ax = plt.subplots(nrows=1,ncols=2,figsize=(20,5)) axx = ax.flatten() sns.distplot(dataset['Age'] ,ax = axx[0],color='Blue') sns.boxplot(dataset['Age'],ax = axx[1],color='Orange') sns.FacetGrid(dataset ,hue='Response',size=5) \ .map(sns.distplot,"Age") \ .add_legend(); plt.show()1. Count of the indivisuals with age 24 are greater in the dataset2. Variable Age looks like right skewed3. From the boxplot we observe that here is not serious outliers in the data * Young people below 30 are not interested in vehicle insurance. Reasons could be lack of experience, less maturity level and they don't have expensive vehicles yet.* People aged between 30-60 are more likely to be interested.* From the boxplot we can see that there no outlier in the data. Annual_Premium * From the distribution plot we can infer that the annual premimum variable is right skewed* From the boxplot we can observe lot of outliers in the variableplt.figure(figsize=(13,7)) plt.subplot(2,1,1) sns.distplot(dataset['Annual_Premium'], color='green') plt.title("Distribution of Annual premium") plt.show() plt.figure(figsize=(13,7)) plt.subplot(2,1,2) sns.boxplot(dataset['Annual_Premium']) plt.title("boxplot of Annual premium") plt.show() # plt.figure(figsize = (13,5)) # plt.subplot(1,2,1) # sns.countplot(dataset['Annual_Premium']) # plt.title("count of male and female") # plt.subplot(1,2,2) # sns.countplot(dataset['Annual_Premium'] , hue = dataset['Response'],palette="rocket_r") # plt.title("Response in Male and female category") # plt.show()Gender variableplt.figure(figsize = (13,5)) plt.subplot(1,2,1) plt.subplots_adjust(hspace = 5) ax1 = sns.countplot(dataset['Gender'],palette="rocket_r") plt.title("count of male and female") total=float(len(dataset)) total_h=[] plt.subplot(1,2,2) ax2 = sns.countplot(dataset['Gender'], hue = dataset['Response'],palette="rocket_r") plt.title("Response in Male and female category") for i,p in enumerate(ax1.patches): height = p.get_height() total_h+=[height] ax1.text(p.get_x()+p.get_width()/2., height + 3, '{:1.2f}'.format(height/total*100), ha="center") total_h*=2 for i,p in enumerate(ax2.patches): height = p.get_height() ax2.text(p.get_x()+p.get_width()/2., height + 3, '{:1.2f}'.format(height/total_h[i]*100), ha="center") plt.tight_layout() plt.show()1. The gender variable in the dataset is almost equally distributed2. Male category is slightly greater than that of female and chances of buying the insurance is also little high. Driving Licenseprint("Percentage of Driving_License feature\n ") print(dataset['Driving_License'].value_counts()/len(dataset)*100) f,ax = plt.subplots(nrows=1,ncols=2,figsize=(12,6)) axx = ax.flatten() total_h=[] plt.title("Count plot of Driving_License vs Response") ax1 = sns.countplot(dataset['Driving_License'],ax = axx[0],palette = 'rocket') ax2 = sns.countplot('Driving_License', hue = 'Response',ax =axx[1],data = dataset,palette="rocket_r") for i,p in enumerate(ax1.patches): height = p.get_height() total_h+=[height] ax1.text(p.get_x()+p.get_width()/2., height + 3, '{:1.2f}'.format(height/total*100), ha="center") total_h*=2 for i,p in enumerate(ax2.patches): height = p.get_height() ax2.text(p.get_x()+p.get_width()/2., height + 3, '{:1.2f}'.format(height/total_h[i]*100), ha="center") plt.tight_layout()Percentage of Driving_License feature 1 99.786938 0 0.213062 Name: Driving_License, dtype: float641. Customers who have the DL are 99% 2. Customers who are interested in Vehicle Insurance almost all have driving license Previously Insuredprint("Percentage ofPreviously_Insured feature\n ") print(dataset['Previously_Insured'].value_counts()/len(dataset)*100) f,ax = plt.subplots(nrows=1,ncols=2,figsize=(12,5)) axx = ax.flatten() ax1 = sns.countplot(dataset['Previously_Insured'],ax = axx[0],palette="rocket_r") ax2 = sns.countplot('Previously_Insured', hue = 'Response',ax =axx[1],data = dataset,palette="rocket_r") total_h=[] for i,p in enumerate(ax1.patches): height = p.get_height() total_h+=[height] ax1.text(p.get_x()+p.get_width()/2., height + 3, '{:1.2f}'.format(height/total*100), ha="center") total_h*=2 for i,p in enumerate(ax2.patches): height = p.get_height() ax2.text(p.get_x()+p.get_width()/2., height + 3, '{:1.2f}'.format(height/total_h[i]*100), ha="center") plt.tight_layout()Percentage ofPreviously_Insured feature 0 54.178988 1 45.821012 Name: Previously_Insured, dtype: float641. The variable perviosly insured almost has equal count 2. Customer who are not perviosly insured are likely to be inetrested Customers who were previously insured tend not to be interested. We can think that the reason for this is that their previous insurance agreement has not expired yet. Vehicle Ageprint("Percentage of vechicle age feature\n ") print(dataset['Vehicle_Age'].value_counts()/len(dataset)*100) plt.figure(figsize = (13,5)) plt.subplot(1,2,1) ax1 = sns.countplot(dataset['Vehicle_Age'],palette="rocket_r") plt.title("Count plot of vechicle age") plt.subplot(1,2,2) plt.title("Plot of vechicle age vs response") ax2 = sns.countplot('Vehicle_Age', hue = 'Response',data = dataset ,palette="rocket_r") total_h=[] for i,p in enumerate(ax1.patches): height = p.get_height() print(f'{height}\n') total_h+=[height] ax1.text(p.get_x()+p.get_width()/2., height + 3, '{:1.2f}'.format(height/total*100), ha="center") total_h*=2 print(f'{total_h}\n') for i,p in enumerate(ax2.patches): height = p.get_height() print(height) ax2.text(p.get_x()+p.get_width()/2., height + 3, '{:1.2f}'.format(height/total_h[i]*100), ha="center") plt.tight_layout() dataset.groupby(['Vehicle_Age']).apply( lambda x : x['Response'].value_counts(normalize= True)*100)* There is very less number of customers with vechicle age less than 2 years* Customers with vechicle age 1-2 years are more likely to interested as compared to the other two * Customers with with Vehicle_Age <1 years have very less chance of buying Insurance#sns.countplot('Response', hue = 'Vehicle_Age',data = dataset ,palette="cool")Vechicle damage Customers having damaged vehicleprint("Percentage of vechicle damage feature\n ") print(dataset['Vehicle_Damage'].value_counts()/len(dataset)*100) plt.figure(figsize = (13,5)) plt.subplot(1,2,1) ax1 = sns.countplot(dataset['Vehicle_Damage'],palette="rocket_r") plt.title("Count plot of Vehicle_Damage") plt.subplot(1,2,2) plt.title("Plot of vechicle damage vs response") ax2 = sns.countplot('Vehicle_Damage', hue = 'Response',data = dataset,palette="cool") total_h=[] for i,p in enumerate(ax1.patches): height = p.get_height() total_h+=[height] ax1.text(p.get_x()+p.get_width()/2., height + 3, '{:1.2f}'.format(height/total*100), ha="center") total_h*=2 for i,p in enumerate(ax2.patches): height = p.get_height() ax2.text(p.get_x()+p.get_width()/2., height + 3, '{:1.2f}'.format(height/total_h[i]*100), ha="center") plt.tight_layout()Percentage of vechicle damage feature Yes 50.487656 No 49.512344 Name: Vehicle_Damage, dtype: float64* Customers with vechicle damage(Yes and NO) are equally distributed with (50.48 % , 49.51 %) * Customers with vechicle damage are more interested in Vehicle Insurance VintageNumber of Days, Customer has been associated with the companyf,ax = plt.subplots(nrows=1,ncols=2,figsize=(25,5)) axx = ax.flatten() sns.distplot(dataset['Vintage'],ax=axx[0], color='c') sns.boxplot (dataset['Vintage'],ax=axx[1],color='c')Region Codeplt.figure(figsize = (20,10)) plt.subplot(3,1,1) sns.countplot(dataset['Region_Code'], palette = 'hsv') plt.title('Count of Region code')* The indivisuals with region code 28 the highest as compared to the other ones* From the box plot it looks like there is no outliers in the data - Further we can analyze which region has highest intrested customers Policy Sales Channelplt.figure(figsize = (20,10)) plt.subplot(3,1,2) sns.distplot(dataset['Policy_Sales_Channel']) plt.title('Distribution of Region code') sns.FacetGrid(dataset,hue='Response',size=5) \ .map(sns.distplot,"Policy_Sales_Channel") \ .add_legend(); plt.show()The most used sales channels are 152, 26 and 124. The best channel that results in customer interest is 152.plt.figure(figsize=(5,5)) plt.title("Correlation plot") sns.heatmap(dataset[numerical_features].corr().abs(),linewidths=5, annot=True, square=True,annot_kws={'size': 10},cmap='YlGnBu', fmt=".3f") #cmap = 'Blues'We can see that the most influencing factors for Response are Vehicle_Damage and Previously_Insured, followed by Vehicle_Age Data Preprocessing annual premium has the worst outliersQ1 = np.percentile(dataset['Annual_Premium'], 25, interpolation = 'midpoint') Q2 = np.percentile(dataset['Annual_Premium'], 50, interpolation = 'midpoint') Q3 = np.percentile(dataset['Annual_Premium'], 75, interpolation = 'midpoint') IQR = Q3 - Q1 low_lim = Q1 - 1.5 * IQR up_lim = Q3 + 1.5 * IQR outlier =[] for x in dataset['Annual_Premium']: if ((x> up_lim) or (x* From the distribution plot we can infer that the annual premimum variable is right skewed* From the boxplot we can observe lot of outliers in the variable#dataset['Annual_Premium']=dataset['Annual_Premium'].replace(outlier,up_lim) #import seaborn as sns #ns.boxplot(dataset['Annual_Premium'])Checking duplicate rowsduplicate=dataset[dataset.duplicated()].sort_values(['Vintage','Age']) print(duplicate)* there is no duplicated rows in the dataset the above summary shows the number of observations that have the same respose and same features Label Encoding In machine learning, we usually deal with datasets which contains multiple labels in one or more than one columns. These labels can be in the form of words or numbers. To make the data understandable or in human readable form, the training data is often labeled in words.Label Encoding refers to converting the labels into numeric form so as to convert it into the machine-readable form. We replaced some values in the data sets with numerical values, as follows;**Vehicle Age ->*** "<1 Year" = 0* "1-2 Year" = 1* ">2 Year" = 2**Gender ->*** "Female" = 0* "Male" = 1**Vehicle Damage ->*** "No" = 0* "Yes" = 1dataset.head(5) dataset['Vehicle_Age']=dataset['Vehicle_Age'].replace({'< 1 Year':0.5,'1-2 Year':1.5,'> 2 Years':2.5}) dataset['Gender']=dataset['Gender'].replace({'Male':1,'Female':0}) dataset['Vehicle_Damage']=dataset['Vehicle_Damage'].replace({'Yes':1,'No':0}) dataset.head(5)Feature Engineering :Handling Categorical values in Policy_Sales_Channel and Region_Codefrom sklearn.model_selection import train_test_split dataset , dataset_test = train_test_split(dataset , test_size=0.2 ) dataset.shape , dataset_test.shapetarget mean encodingMake this a fucniton , such that it can also be applied to incoming data ,create a data prep pipelinesuch this transform can be carried out on new incoming data alsoMean_encoding_Policy_Sales_Channel = dataset.groupby(['Policy_Sales_Channel'])['Response'].mean().to_dict() dataset['Policy_Sales_Channel_Target_encoded'] = dataset['Policy_Sales_Channel'].map(Mean_encoding_Policy_Sales_Channel) #dataset_test['Policy_Sales_Channel_Target_encoded'] = dataset_test['Policy_Sales_Channel'].map(Mean_encoding_Policy_Sales_Channel) Mean_encoding_Region_Code = dataset.groupby(['Region_Code'])['Response'].mean().to_dict() dataset['Region_Code_Target_encoded'] = dataset['Region_Code'].map(Mean_encoding_Region_Code) #dataset_test['Region_Code_Target_encoded'] = dataset_test['Region_Code'].map(Mean_encoding_Region_Code)Cleaned Data Exportingdataset.head() dataset.info() filepath = '/content/drive/MyDrive/AlmaBetter/Capstone2_Supervised_Classification/Code/Data/processed_data.csv' filepath = '/content/drive/MyDrive/AlmaBetter/Capstone2_Supervised_Classification/Code/Data/processed_data_test.csv' dataset = dataset.to_csv(filepath) dataset_test = dataset_test.to_csv(filepath)Inferences and ConclusionWe've drawn many interesting inferences from the health_insurance-cross-sell-prediction data , here's a summary of the few of them:we use pandas library as pd numpy as np etc. * Customers of age between 30 to 60 are more likely to buy insurance.* Customers with Driving License have higher chance of buying Insurance.* Customers with Vehicle_Damage are likely to buy insurance.* The variable such as Age, Previously_insured,Annual_premium are more afecting the target variable.* comparing ROC curve we can see that Random Forest model preform better. Because curves closer to the top-left corner, it indicate a better performance. What Worked?* Hyperparameter tuning* Feature Engineering such as concatenation,aggregation, binning* Data cleaning steps and most pertinent of them were removing duplicates[more reliable CV] and confusing rows* Treating Annual Premium and Vintage as categorical features in catboost What didn't Work?* Class balancing via oversampler,undersampler,TomkeLinks etc and via hardcoding class weights parameter in individual algorithm References and Future Work**TODO** - Write some explanation here: ideas for future projects using this dataset, and links to resources you found useful. Hypothesis test1. Chi-test: It is help to figure-out relation between features and label with **"pvalue <= 0.05"**ct = pd.crosstab(dataset['Vintage'],dataset['Response']) from scipy.stats import chi2_contingency stat,pvalue,dof,expected_R = chi2_contingency(ct) print("pvalue : ",pvalue) if pvalue <= 0.05 : print("Alternate Hypothesis passed. Vintage and Response have Relationship") else: print("Null hypothesis passed. Vintage and Response doesnot have Relationship")Speed Up Your Data Science Project Using Persistent Caching Tools from Pythagoras Package Introductory TutorialWork with large dataframes could be slow. This notebook demonstrates how Pythagoras can help you win extra seconds and minutes (sometimes - hours) every time you need to load a large csv file or to execute a complex data-processing function.import numpy as np import pandas as pd import sys import time import logging np.random.seed(42)How To Cache Function Outputs A slow function Let's assume we have an important function that takes some unpleasantly long time to run:# Let's create a sample DataFrame to experiment with a_dataframe = pd.DataFrame( data = { 'COL_1': [1.1, 2.2, 3.3] ,'COL_2': [4.4, 5.5, None] ,'COL_3': [7.7, 8.8, 9.9] ,'COL_4': [None, 11.11, 12.12] }) a_dataframe # Now, let's create a slow function. # In real life, such slow function could be a part of # feature-engineering pipeline def slowly_process_dataframe(df:pd.core.frame.DataFrame, a:float): result = df + a time.sleep(3) return result %%time demo_result = slowly_process_dataframe(a_dataframe,3.14) demo_resultA slow function + Pythagoras It took 3+ seconds to execute **slowly_process_dataframe()** in the cell above.Let's see how Pythagoras helps speed this up:import pythagoras # this is the library which will provide us with the # advanced caching tools demo_cache_obj = pythagoras.PickleCache( cache_dir = "./cache_files" # Here Pythagoras will store cashed data, # if/when it needs to. ,input_dir = "." # From here Pythagoras will read .csv files, # if/when asked. ) print(demo_cache_obj) # print the status of the cache # This time we add a decorator while creating our function # everything else is exactly the same as above @demo_cache_obj def slowly_process_dataframe(df:pd.core.frame.DataFrame, a:float): result = df + a time.sleep(3) return result %%time demo_result = slowly_process_dataframe(a_dataframe,3.14) demo_resultThe first call above took 3+ seconds to execute. Let's call the fucntion egain with exactly the same parameters:%%time demo_result = slowly_process_dataframe(a_dataframe,3.14) demo_resultThe second and all subsequent calls to **slowly_process_dataframe()** are now much faster: we went down from 3 seconds to just 11 milliseconds. Calling the same function with different parametersa_dataframe a_dataframe.iat[1,1] = -100 a_dataframe %%time demo_result = slowly_process_dataframe(a_dataframe,3.14) demo_result %%time demo_result = slowly_process_dataframe(a_dataframe,3.14) demo_resultThe first time we called the function with sligtly different parameters (one value in the input dataframe was changed), it again took 3 seconds to execute. However, the second attempt to call the function with the same new parameters was way faster.print(demo_cache_obj) # print the status of the cachePickleCache in directory <./cache_files> contains 2 files, with total size 2 Kb. There are 275 Gb of free space available in the directory. Cache files are expected to have <.pkl> extension. Input files should be located in <.> folder, which contains 220 files, with total size 942 Kb. Cache READER is ACTIVE: cached versions of objects are loaded from disk if they are available there. Cache WRITER is ACTIVE: new objects get saved to disk as they are created. Names of cache files can not be longer than 250 characters. The following KEY-VALUE pairs will determine caching behaviour if they are present among function parameters or attributes of the parameters on any nested level: {'random_state': None}; a presence of an attribute or a parameter with the KEY name will disable caching, unless the parameter/attribute is set to VALUE. Logger pythagoras.PickleCache.140652536762672.demo_cache_obj (WARNING)How does it work?The first time we ran **slowly_process_dataframe()**, Pythagoras cached the output of the function.The next time the function was called, Pythagoras re-used that output, without actually executing the original fucntion. The output is stored as a pickle file, so it will save you time even when you run your notebook again next month.If we pass some other arguments to the functions, the same process will repeat.For each combination of different values, passed as function arguments, Pythagoras will create a new cache file.If you have enough disk space, it will save you a lot of time.> Pythagoras works not only with simple types of function arguments > (such as int or str), but also with many others,> including DataFrames, dicts, lists, sets, > and all their possible combinations.> **Most of other persistent caching libraries can not do it today, > they only work with limited group of basic argument types.**> If some existing or new (created by you) datatype> is not supported by Pythagoras out-of-the-box,> it provides a simple extensibility mechanism that allows you to > add support for any new type with just a few lines of code.> **Other persistent caching libraries can not easily recognize new types today.**> How can Pythagoras offer such a powerful flexibility > while almost all the other caching libraries only support > a limited number of datatypes for function arguments?> Most of the existing caching libraries in Python use> easy-to-implement algorithms that only allow to work > with immutable values. Pythagoras uses different approach > that can work with both mutable and immutable values.> This allows Pythagoras to cache functions whose parameters > can be of virtually any existing type/class.So, the next time you need to do complex feature engineeringby transforming a large dataframe into another, even larger dataframe,put your feature engineering code into a function and decorate it with PickleCache object. Important noteThis approach only works with functions that create their output using exclusively the input argumets, without accessing any outside data.If a function reads from global variables, or files, or Internet, or uses current time, etc.,such function is not compatible with PickleCache What is under the hood? For every function, which we modified with PickleCach decorator, Pythagoras creates a sub-folder within the main cache_dir folder.The name of sub-folder consists of a keyword "Func" plus the function's name plus its digital fingerprint. For example, for our slow function above, the subfolder was named **Func\_\_slowly\_process\_dataframe\_\_bdca2**!ls cache_files/Func__slowly_process_dataframe__59c21Inside each subfolder, Pythagoras puts .pkl files with different versions of function output.The name of each file consists of function's digital signature, plus slim human-readable summary representation of function arguments,plus digital fingerprint of function arguments.For example, for our slow function the subfolder now includes 2 files:!ls cache_files/Func__slowly_process_dataframe__bdca2ls: cache_files/Func__slowly_process_dataframe__bdca2: No such file or directorySlim human-readable parameter representations in the file-names allow to visually inpect the caching directory and help you better understan behavior of your PickleCache-enabled code. Digital fingerprints in the file-names allow PickleCache to uniquely identify and distinguish different parameter values, passed to the function when it was called. A fast function + Pythagoras But what if a function is fast and an extra step of saving its output into a .pkl file actually slows down the process insted of speeding it up?Pythagoras will still do what we told it to do, but it will give us a warning:@demo_cache_obj def fast_function(x,y): return x+" "+y %%time result = fast_function("Message for","Bob") %%time result = fast_function("Message for","Bob") print(demo_cache_obj) # print the status of the cachePickleCache in directory <./cache_files> contains 3 files, with total size 2 Kb. There are 275 Gb of free space available in the directory. Cache files are expected to have <.pkl> extension. Input files should be located in <.> folder, which contains 221 files, with total size 942 Kb. Cache READER is ACTIVE: cached versions of objects are loaded from disk if they are available there. Cache WRITER is ACTIVE: new objects get saved to disk as they are created. Names of cache files can not be longer than 250 characters. The following KEY-VALUE pairs will determine caching behaviour if they are present among function parameters or attributes of the parameters on any nested level: {'random_state': None}; a presence of an attribute or a parameter with the KEY name will disable caching, unless the parameter/attribute is set to VALUE. Logger pythagoras.PickleCache.140652536762672.demo_cache_obj (WARNING)How To Cache .read_csv() Reading *.csv* files can be slow. Let's create one and play with it:large_dataframe = pd.DataFrame(data = 1000*np.random.rand(7000,7000)) large_dataframe.to_csv("example.csv", index=False) %%time new_dataframe = pd.read_csv("example.csv") new_dataframe.shapeNow, let's try a version of **.read_csv()** offered by Pythagoras:%%time new_dataframe = demo_cache_obj.read_csv("example.csv") %%time new_dataframe = demo_cache_obj.read_csv("example.csv") new_dataframe.shapeThe second and all subsequent calls to Pythagoras' **.read_csv()** are much faster: we went down from 10.4 seconds to 96 milliseconds - it's 108-x faster. For larger .csv files the difference is even more drastic. How does it work? The first time we ran **PickleCache.read_csv()**, Pythagoras cached the output of the function.The next time **PickleCache.read_csv()** was called with the same filename as an argument,Pythagoras re-used that output, without actually reading the original **.csv**.The cached output is stored as a pickle file.If we modify the **.csv** file outside of our notebook, or if you add more argumentsto **PickleCache.read_csv()** call, the same process will repeat.For each new version of the **.csv** file, and for each combination of additional parameters,Pythagoras will create a new cache file.If you have enough disk space, it will save you a lot of your time.Pickle can be loaded much faster than .csv,this approach saves substantial time while working with large .csv files.**PickleCache.read_csv()** accepts all *keyword arguments*which **pandas.read_csv()** accepts,such as *sep*, *names*, *index_col*, *dtype*, *na_values*, etc.You can use them to fine-tune behaviour of **read_csv()** What is under the hood? For every csv file, which we read with **PickleCach.read_csv()** function ,Pythagoras creates a sub-folder within the main cache_dir folder. The name of sub-folder consists of a keyword "Data" plus the original file's name plus a digital fingerprint of the filename.For instance, for our file **example.csv** the subfolder was named **Data\_\_example.csv\_\_44e43**!ls cache_filesData__example.csv__37f98 Func__slowly_process_dataframe__59c21 Func__fast_function__08fdaInside each subfolder Pythagoras puts .pkl files with different versions of data from the original .csv file.The name of each .pkl file consists of digital fingerprint of the original .csv filename, plus the size of the original .csv file, plus datetime of its modification, plus encoded information about additional arguments, that were passed to the function.For instance, for our **example.csv** file, the subfolder should include only one file now:!ls cache_files/Data__example.csv__44e43ls: cache_files/Data__example.csv__44e43: No such file or directoryBut let's call **read_csv()** with an extra argument, and then look into our subfolder again:new_dataframe = demo_cache_obj.read_csv( "example.csv", usecols=[2,3,4,5,6,7,8,9,10,11,12,12,14,15,16,17,18]) new_dataframe.shape !ls cache_files/Data__example.csv__44e43ls: cache_files/Data__example.csv__44e43: No such file or directoryAssume $y = 0.5 \cdot x + 1 + \epsilon$, where $\epsilon \sim N(0, 0.5^2)$. We generate $n$ training examples by sampling $x$ uniformly from $[1, 6)$.n = 200 x = 5 * np.random.rand(n, 1) + 1 a = 0.5 b = 1 sigma = 0.5 y = a * x + b + sigma * np.random.randn(n, 1)We sample the regularization parameter $\lambda$ from $\left[10^{-4}, 10^3\right]$. For each $\lambda$, we fit a ridge regression model.m = 200 lambdas = np.logspace(-4, 3, m) w = np.zeros((2, m)) for i in range(m): alpha = lambdas[i] * n rr = linear_model.Ridge(alpha=alpha, fit_intercept=True) rr.fit(x, y) w[0, i] = rr.intercept_[0] w[1, i] = rr.coef_[0][0]We plot the model coefficients, including the bias $w_0$, as $\lambda$ increases.plt.figure(figsize=(9, 6)) plt.tick_params(labelsize=14) plt.semilogx(lambdas, w[0], lambdas, w[1]) plt.legend([r'$w_0$', r'$w_1$'], loc='upper left', fontsize=14) plt.xlabel(r'$\lambda$', fontsize=14) plt.title('ridge regression coefficients', fontsize=14) plt.show()Below we plot the training set along with the ridge regression model fitted with $\lambda = 10^{-3}$.rr = linear_model.Ridge(alpha=1e-3 * n, fit_intercept=True) rr.fit(x, y) w0 = rr.intercept_[0] w1 = rr.coef_[0][0] x1 = np.array([0.7, 6.3]) y1 = w1 * x1 + w0 plt.figure(figsize=(9, 6)) plt.tick_params(labelsize=14) plt.scatter(x, y) plt.plot(x1, y1, 'indianred', linewidth=3) plt.legend([r'$\lambda=10^{-3}$', 'training example'], loc='upper left', fontsize=14) plt.xlabel('x', fontsize=14) plt.ylabel('y', fontsize=14) plt.show()Introduction Linear regression excels at extrapolating trends, but can't learn interactions. XGBoost excels at learning interactions, but can't extrapolate trends. In this lesson, we'll learn how to create "hybrid" forecasters that combine complementary learning algorithms and let the strengths of one make up for the weakness of the other. Components and Residuals So that we can design effective hybrids, we need a better understanding of how time series are constructed. We've studied up to now three patterns of dependence: trend, seasons, and cycles. Many time series can be closely described by an additive model of just these three components plus some essentially unpredictable, entirely random *error*:```series = trend + seasons + cycles + error```Each of the terms in this model we would then call a **component** of the time series.The **residuals** of a model are the difference between the target the model was trained on and the predictions the model makes -- the difference between the actual curve and the fitted curve, in other words. Plot the residuals against a feature, and you get the "left over" part of the target, or what the model failed to learn about the target from that feature.The difference between the target series and the predictions (blue) gives the series of residuals.On the left of the figure above is a portion of the *Tunnel Traffic* series and the trend-seasonal curve from Lesson 3. Subtracting out the fitted curve leaves the residuals, on the right. The residuals contain everything from *Tunnel Traffic* the trend-seasonal model didn't learn.We could imagine learning the components of a time series as an iterative process: first learn the trend and subtract it out from the series, then learn the seasonality from the detrended residuals and subtract the seasons out, then learn the cycles and subtract the cycles out, and finally only the unpredictable error remains.Learning the components of Mauna Loa CO2 step by step. Subtract the fitted curve (blue) from its series to get the series in the next step.Add together all the components we learned and we get the complete model. This is essentially what linear regression would do if you trained it on a complete set of features modeling trend, seasons, and cycles.Add the learned components to get a complete model. Hybrid Forecasting with Residuals In previous lessons, we used a single algorithm (linear regression) to learn all the components at once. But it's also possible to use one algorithm for some of the components and another algorithm for the rest. This way we can always choose the best algorithm for each component. To do this, we use one algorithm to fit the original series and then the second algorithm to fit the residual series.In detail, the process is this:``` 1. Train and predict with first modelmodel_1.fit(X_train_1, y_train)y_pred_1 = model_1.predict(X_train) 2. Train and predict with second model on residualsmodel_2.fit(X_train_2, y_train - y_pred_1)y_pred_2 = model_2.predict(X_train_2) 3. Add to get overall predictionsy_pred = y_pred_1 + y_pred_2```We'll usually want to use different feature sets (`X_train_1` and `X_train_2` above) depending on what we want each model to learn. If we use the first model to learn the trend, we generally wouldn't need a trend feature for the second model, for example.While it's possible to use more than two models, in practice it doesn't seem to be especially helpful. In fact, the most common strategy for constructing hybrids is the one we've just described: a simple (usually linear) learning algorithm followed by a complex, non-linear learner like GBDTs or a deep neural net, the simple model typically designed as a "helper" for the powerful algorithm that follows. Designing HybridsThere are many ways you could combine machine learning models besides the way we've outlined in this lesson. Successfully combining models, though, requires that we dig a bit deeper into how these algorithms operate.There are generally two ways a regression algorithm can make predictions: either by transforming the *features* or by transforming the *target*. Feature-transforming algorithms learn some mathematical function that takes features as an input and then combines and transforms them to produce an output that matches the target values in the training set. Linear regression and neural nets are of this kind.Target-transforming algorithms use the features to group the target values in the training set and make predictions by averaging values in a group; a set of feature just indicates which group to average. Decision trees and nearest neighbors are of this kind.The important thing is this: feature transformers generally can **extrapolate** target values beyond the training set given appropriate features as inputs, but the predictions of target transformers will always be bound within the range of the training set. If the time dummy continues counting time steps, linear regression continues drawing the trend line. Given the same time dummy, a decision tree will predict the trend indicated by the last step of the training data into the future forever. *Decision trees cannot extrapolate trends.* Random forests and gradient boosted decision trees (like XGBoost) are ensembles of decision trees, so they also cannot extrapolate trends.A decision tree will fail to extrapolate a trend beyond the training set.This difference is what motivates the hybrid design in this lesson: use linear regression to extrapolate the trend, transform the *target* to remove the trend, and apply XGBoost to the detrended residuals. To hybridize a neural net (a feature transformer), you could instead include the predictions of another model as a feature, which the neural net would then include as part of its own predictions. The method of fitting to residuals is actually the same method the gradient boosting algorithm uses, so we will call these **boosted** hybrids; the method of using predictions as features is known as "stacking", so we will call these **stacked** hybrids.Winning Hybrids from Kaggle Competitions For inspiration, here are a few top scoring solutions from past competitions: STL boosted with exponential smoothing - Walmart Recruiting - Store Sales Forecasting ARIMA and exponential smoothing boosted with GBDT - Rossmann Store Sales An ensemble of stacked and boosted hybrids - Web Traffic Time Series Forecasting Exponential smoothing stacked with LSTM neural net - M4 (non-Kaggle) Example - US Retail Sales The [*US Retail Sales*](https://www.census.gov/retail/index.html) dataset contains monthly sales data for various retail industries from 1992 to 2019, as collected by the US Census Bureau. Our goal will be to forecast sales in the years 2016-2019 given sales in the earlier years. In addition to creating a linear regression + XGBoost hybrid, we'll also see how to set up a time series dataset for use with XGBoost.from pathlib import Path from warnings import simplefilter import matplotlib.pyplot as plt import pandas as pd from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from statsmodels.tsa.deterministic import CalendarFourier, DeterministicProcess from xgboost import XGBRegressor simplefilter("ignore") # Set Matplotlib defaults plt.style.use("seaborn-whitegrid") plt.rc( "figure", autolayout=True, figsize=(11, 4), titlesize=18, titleweight='bold', ) plt.rc( "axes", labelweight="bold", labelsize="large", titleweight="bold", titlesize=16, titlepad=10, ) plot_params = dict( color="0.75", style=".-", markeredgecolor="0.25", markerfacecolor="0.25", ) data_dir = Path("../input/ts-course-data/") industries = ["BuildingMaterials", "FoodAndBeverage"] retail = pd.read_csv( data_dir / "us-retail-sales.csv", usecols=['Month'] + industries, parse_dates=['Month'], index_col='Month', ).to_period('D').reindex(columns=industries) retail = pd.concat({'Sales': retail}, names=[None, 'Industries'], axis=1) retail.head()First let's use a linear regression model to learn the trend in each series. For demonstration, we'll use a quadratic (order 2) trend. (The code here is basically the same as that in previous lessons.) Though the fit isn't perfect, it will be enough for our needs.y = retail.copy() # Create trend features dp = DeterministicProcess( index=y.index, # dates from the training data constant=True, # the intercept order=2, # quadratic trend drop=True, # drop terms to avoid collinearity ) X = dp.in_sample() # features for the training data # Test on the years 2016-2019. It will be easier for us later if we # split the date index instead of the dataframe directly. idx_train, idx_test = train_test_split( y.index, test_size=12 * 4, shuffle=False, ) X_train, X_test = X.loc[idx_train, :], X.loc[idx_test, :] y_train, y_test = y.loc[idx_train], y.loc[idx_test] # Fit trend model model = LinearRegression(fit_intercept=False) model.fit(X_train, y_train) # Make predictions y_fit = pd.DataFrame( model.predict(X_train), index=y_train.index, columns=y_train.columns, ) y_pred = pd.DataFrame( model.predict(X_test), index=y_test.index, columns=y_test.columns, ) # Plot axs = y_train.plot(color='0.25', subplots=True, sharex=True) axs = y_test.plot(color='0.25', subplots=True, sharex=True, ax=axs) axs = y_fit.plot(color='C0', subplots=True, sharex=True, ax=axs) axs = y_pred.plot(color='C3', subplots=True, sharex=True, ax=axs) for ax in axs: ax.legend([]) _ = plt.suptitle("Trends")While the linear regression algorithm is capable of multi-output regression, the XGBoost algorithm is not. To predict multiple series at once with XGBoost, we'll instead convert these series from *wide* format, with one time series per column, to *long* format, with series indexed by categories along rows.# The `stack` method converts column labels to row labels, pivoting from wide format to long X = retail.stack() # pivot dataset wide to long display(X.head()) y = X.pop('Sales') # grab target seriesSo that XGBoost can learn to distinguish our two time series, we'll turn the row labels for `'Industries'` into a categorical feature with a label encoding. We'll also create a feature for annual seasonality by pulling the month numbers out of the time index.# Turn row labels into categorical feature columns with a label encoding X = X.reset_index('Industries') # Label encoding for 'Industries' feature for colname in X.select_dtypes(["object", "category"]): X[colname], _ = X[colname].factorize() # Label encoding for annual seasonality X["Month"] = X.index.month # values are 1, 2, ..., 12 # Create splits X_train, X_test = X.loc[idx_train, :], X.loc[idx_test, :] y_train, y_test = y.loc[idx_train], y.loc[idx_test]Now we'll convert the trend predictions made earlier to long format and then subtract them from the original series. That will give us detrended (residual) series that XGBoost can learn.# Pivot wide to long (stack) and convert DataFrame to Series (squeeze) y_fit = y_fit.stack().squeeze() # trend from training set y_pred = y_pred.stack().squeeze() # trend from test set # Create residuals (the collection of detrended series) from the training set y_resid = y_train - y_fit # Train XGBoost on the residuals xgb = XGBRegressor() xgb.fit(X_train, y_resid) # Add the predicted residuals onto the predicted trends y_fit_boosted = xgb.predict(X_train) + y_fit y_pred_boosted = xgb.predict(X_test) + y_predThe fit appears quite good, though we can see how the trend learned by XGBoost is only as good as the trend learned by the linear regression -- in particular, XGBoost wasn't able to compensate for the poorly fit trend in the `'BuildingMaterials'` series.axs = y_train.unstack(['Industries']).plot( color='0.25', figsize=(11, 5), subplots=True, sharex=True, title=['BuildingMaterials', 'FoodAndBeverage'], ) axs = y_test.unstack(['Industries']).plot( color='0.25', subplots=True, sharex=True, ax=axs, ) axs = y_fit_boosted.unstack(['Industries']).plot( color='C0', subplots=True, sharex=True, ax=axs, ) axs = y_pred_boosted.unstack(['Industries']).plot( color='C3', subplots=True, sharex=True, ax=axs, ) for ax in axs: ax.legend([])Slides for 2018-06-22 Meetingimport os import sys from pathlib import Path from IPython.display import display, HTML, Markdown import numpy as np import pandas as pd import matplotlib as mpl import matplotlib.pyplot as plt import seaborn as sns # Project level imports from larval_gonad.notebook import Nb from larval_gonad.scRNAseq import fe_tsne, raw_data, norm_data from larval_gonad.normalization import tpm, rpkm # Setup notebook nbconfig = Nb.setup_notebook(seurat_dir='../output/scrnaseq-wf/scrnaseq_combine_force/') gs = plt.GridSpec(2, 3, width_ratios=[2, 1, 1]) axDia = plt.subplot(gs[0, 0]) axTsne = plt.subplot(gs[1, 0]) axAll = plt.subplot(gs[:, 1]) axDiff = plt.subplot(gs[:, 2]) fe_tsne(seurat_dir=nbconfig.seurat_dir)Build large data table.writer = pd.ExcelWriter('../output/2018-06-22_biomarkers_and_cnts_tables.xlsx') cell_format = writer.book.add_format({'valign': 'top'}) cell_format.set_text_wrap() sheet = writer.book.add_worksheet('README') writer.sheets['README'] = sheet comment = """\ Each cluster is on a separate worksheet. Genes with missing counts were not captured by the experiment. """ sheet.set_column(0, 0, width=30, cell_format=cell_format) sheet.set_column(1, 1, width=50, cell_format=cell_format) sheet.set_row(0, height=50, cell_format=cell_format) sheet.merge_range('A1:C1', comment); col_desc = { 'FBgn': 'FlyBase Gene ID', 'gene_symbol': 'Gene Symbol', 'chrom': 'Chromosomal Arm', 'p_val': 'Seurat FindMarkers p-value', 'p_val_adj': 'FDR Adjusted p-value', 'avg_logFC': 'Average log Fold Change', 'Prop Cells in Cluster 0': 'Proportion of Cells in cluster 0 with gene expressed.', 'Prop Cells in Other Clusters': 'Proportion of Cells in all other clusters with gene expressed.', 'Total Read Cnts': 'Sum of raw read counts across cells', 'Median Scaled Read Cnts': 'Sum of raw read counts across cells scaled by median total read count across genes', 'TPM Read Cnts': 'TPM normalized read count', 'RPKM Read Cnts': 'RPKM normalized read count', } _desc = pd.Series(col_desc).to_frame() _desc.index.name = 'Column Name' _desc.columns = ['Column Description'] _desc.to_excel(writer, sheet_name='README', startrow=1) # Gene annotation with symbols gene_annot = pd.Series(nbconfig.fbgn2symbol) gene_annot.name = 'gene_symbol' gene_annot = gene_annot.to_frame().join(nbconfig.fbgn2chrom) # Biomarkers biomarker = nbconfig.seurat.get_biomarkers('res.0.6') biomarker.drop('gene_symbol', axis=1, inplace=True) for g, dd in biomarker.groupby('cluster'): raw = raw_data(nbconfig.seurat_dir, cluster=g) total_cnts = raw.sum(axis=1) total_cnts.name = 'Total Read Cnts' scaled_cnts = total_cnts / total_cnts.median() scaled_cnts.name = 'Median Scaled Read Cnts' gene_lens = pd.read_csv('../output/gene_ts_lengths.tsv', sep='\t', index_col='FBgn').gene_ts_length tpm_cnts = tpm(total_cnts.to_frame(), gene_lens).dropna().iloc[:, 0] tpm_cnts.name = 'TPM Read Cnts' rpkm_cnts = rpkm(total_cnts.to_frame(), gene_lens).dropna().iloc[:, 0] rpkm_cnts.name = 'RPKM Read Cnts' bio = dd.drop('cluster', axis=1)[['p_val', 'p_val_adj', 'avg_logFC', 'pct.1', 'pct.2']] bio.rename({'pct.1': f'Per Cells in Cluster {g}', 'pct.2': 'Per Cells in Other Clusters'}, axis=1, inplace=True) _dat = gene_annot.join(bio, how='left').join([total_cnts, scaled_cnts, tpm_cnts, rpkm_cnts], how='left') _dat.index.name = 'FBgn' _dat.set_index(['gene_symbol', 'chrom'], append=True, inplace=True) _dat.to_excel(writer, sheet_name=f'{nbconfig.CLUSTER_ANNOT[g]}') writer.save()EWMA: Exponential Weighted Moving Averageimport pandas as pd import numpy as np %matplotlib inline airline = pd.read_csv('../Data/airline_passengers.csv',index_col='Month',parse_dates=True) airline['6-month-SMA'] = airline['Thousands of Passengers'].rolling(window=6).mean() airline['12-month-SMA'] = airline['Thousands of Passengers'].rolling(window=12).mean() airline.plot();EWMA is better than basic SMA since: SMA's smaller windows leads to greater noise SMA can never reach full peak or actual value SMA can not inform about the possible future, but just describes trends SMA can highly be skewed In contrast, EWMA pays more weight, i.e. significance to the recent values The formula for EWMA is: $y_t = \frac{\sum\limits_{i=0}^t w_i x_{t-i}}{\sum\limits_{i=0}^t w_i}$ In EWMA α refers to smoothing factor\begin{split}\alpha = \begin{cases} \frac{2}{s + 1}, & \text{for span}\ s \geq 1\\ \frac{1}{1 + c}, & \text{for center of mass}\ c \geq 0\\ 1 - \exp^{\frac{\log 0.5}{h}}, & \text{for half-life}\ h > 0 \end{cases}\end{split} * Span corresponds to what is commonly called an “N-day EW moving average”.* Center of mass has a more physical interpretation and can be thought of in terms of span: $c=(s−1)/2$* Half-life is the period of time for the exponential weight to reduce to one half.* Alpha specifies the smoothing factor directly.We have to pass precisely one of the above into the .ewm() function. For our data we'll use span=12.airline['EWMA12'] = airline['Thousands of Passengers'].ewm(span=12,adjust=False).mean() airline[['Thousands of Passengers','EWMA12']].plot();Comparing SMA to EWMAairline[['Thousands of Passengers','EWMA12','12-month-SMA']].plot(figsize=(12,8)).autoscale(axis='x',tight=True);Fashion MINST datasetfrom torchvision.datasets import FashionMNIST # Getting the train and test parts of the dataset data_train = FashionMNIST("FashionMNIST/", download=True, train=True) data_test = FashionMNIST("FashionMNIST/", download=True, train=False) # In fact, it's already stored as torch tensor, but we'll need # to work with the numpy representation, so let's do the convertion: X_train = data_train.train_data.numpy() y_train = data_train.train_labels.numpy() X_test = data_test.test_data.numpy() y_test = data_test.test_labels.numpy()/usr/local/lib/python3.6/dist-packages/torchvision/datasets/mnist.py:53: UserWarning: train_data has been renamed data warnings.warn("train_data has been renamed data") /usr/local/lib/python3.6/dist-packages/torchvision/datasets/mnist.py:43: UserWarning: train_labels has been renamed targets warnings.warn("train_labels has been renamed targets") /usr/local/lib/python3.6/dist-packages/torchvision/datasets/mnist.py:58: UserWarning: test_data has been renamed data warnings.warn("test_data has been renamed data") /usr/local/lib/python3.6/dist-packages/torchvision/datasets/mnist.py:48: UserWarning: test_labels has been renamed targets warnings.warn("test_labels has been renamed targets")The datasets consists of images belonging to one out of 10 classes:| Label | Description | Label | Description || --- | --- | --- |--- || 0 | T-shirt/top | 5 | Sandal || 1 | Trouser | 6 | Shirt || 2 | Pullover | 7 | Sneaker || 3 | Dress | 8 | Bag || 4 | Coat | 9 | Ankle boot |categories = [ X_train[y_train == i] for i in range(10) ] ten_of_each = np.array([c[:10] for c in categories]) ten_of_each = np.transpose(ten_of_each, (0, 2, 1, 3)).reshape(280, 280) plt.figure(figsize=(10, 10)) plt.imshow(ten_of_each, cmap='hot') plt.axis('off');Input preprocessing So far our data is held as numpy arrays of unsigned byte type, i.e. it lies within a range from 0 to 255. Also, the shape of our input is 3-dimensional (num_images, height, width), while our `model` takes 2-dimensional "arrays of 1-dimensional images" (num_images, height * width). We have to convert that to `torch` tensors and reshape the input. Also, it's a good idea to normalize your image data to lie within a $[0, 1]$ interval. Let's write a function that does all these things:# Write a function to convert X and y to torch tensors while # rescaling X to fit into [0, 1] interval and reshaping it properly # Hint: make sure your input tensor dtype is same as the # parameters of the model (should be torch.float) def preprocess_data(X, y): X_preprocessed = torch.reshape(torch.from_numpy(X/ 255.).type(torch.float32), (-1, 784)) y_preprocessed = torch.from_numpy(y) return X_preprocessed, y_preprocessedSome utilities# Batch generator # (here's a very brief description of what python generators are: # https://stackoverflow.com/a/231855/3801744) def get_batches(X, y, batch_size, shuffle=False): if shuffle: shuffle_ids = np.random.permutation(len(X)) X = X[shuffle_ids].copy() y = y[shuffle_ids].copy() for i_picture in range(0, len(X), batch_size): # Get batch and preprocess it: batch_X = X[i_picture:i_picture + batch_size] batch_y = y[i_picture:i_picture + batch_size] # 'return' the batch (see the link above to # better understand what 'yield' does) yield preprocess_data(batch_X, batch_y) def get_test_predictions(model, batch_size=100): predictions_test = np.concatenate([ model(batch_X).to('cpu').detach().numpy() for batch_X, batch_y in get_batches(X_test, y_test, batch_size) ], axis=0) return np.argmax(predictions_test, axis=1)Main training loopdef train_and_log(experiment, *args, **kwargs): experiment.log_parameters(kwargs) accuracy, loss = train_model(*args, **kwargs) experiment.log_metrics({'accuracy': accuracy, 'loss': loss}) return accuracy def train_model(n_epochs, batch_size=100, learning_rate=0.001, hidden_size=100): # Defining the model input_size = 28 * 28 # number of pixels per image output_size = 10 # number of classes model = torch.nn.Sequential( torch.nn.Linear(input_size, hidden_size), torch.nn.ELU(), torch.nn.Linear(hidden_size, output_size), ) # Defining the loss function: loss_function = torch.nn.CrossEntropyLoss() # Setting up the optimizer optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) # Train / eval loop for i_epoch in range(n_epochs): for batch_X, batch_y in get_batches(X_train, y_train, batch_size=batch_size, shuffle=True): # Compute the loss, zero the gradients, and make an optimization step predictions = model(batch_X) # compute the predictions loss = loss_function(predictions, batch_y) # compute the loss model.zero_grad() # zero the gradients loss.backward() # compute new gradients optimizer.step() # do an optimization step for batch_X, batch_y in get_batches(X_test, y_test, batch_size=batch_size): # Compute the loss predictions = model(batch_X) # compute the predictions loss = loss_function(predictions, batch_y) # compute the loss accuracy = accuracy_score(get_test_predictions(model), y_test) return accuracy, loss.item() from getpass import getpass api_key = getpass("Key: ") experiment = Experiment(api_key=api_key, project_name="comet FMNIST", workspace="anaderi-demo") n_epochs = 6 learning_rate = 0.0005 hidden_size = 150 score = train_and_log(experiment, n_epochs=n_epochs, learning_rate=learning_rate, hidden_size=hidden_size) print ("Score: {}".format(score)) experiment.end()Score: 0.8626Let's examine how it looks at comet.mlexperiment.display()Comet.ml logging capabilitiesprint ("\n".join([s for s in experiment.__dir__() if s.startswith("log_")]))log_code log_graph log_env_details log_git_metadata log_git_patch log_env_gpu log_env_cpu log_env_host log_other log_others log_dependency log_system_info log_html log_html_url log_epoch_end log_metric log_parameter log_figure log_asset_data log_asset_folder log_asset log_audio log_histogram_3d log_image log_current_epoch log_parameters log_metrics log_dataset_info log_dataset_hashLet's see how does the score depend on the number of hidden size parameterfor hidden_size in [50, 150, 200]: experiment = Experiment(api_key=api_key, project_name="comet FMNIST") score = train_and_log(experiment, n_epochs=n_epochs, learning_rate=learning_rate, hidden_size=hidden_size) experiment.end()COMET INFO: Experiment is live on comet.ml https://www.comet.ml/anaderi-demo/comet-fmnist/88ef4616caa7465d96cfcf89e5811aca COMET INFO: ---------------------------- COMET INFO: Comet.ml Experiment Summary: COMET INFO: Data: COMET INFO: url: https://www.comet.ml/anaderi-demo/comet-fmnist/88ef4616caa7465d96cfcf89e5811aca COMET INFO: Metrics [count] (min, max): COMET INFO: accuracy : (0.8541, 0.8541) COMET INFO: loss : (0.4697524607181549, 0.4697524607181549) COMET INFO: sys.cpu.percent.01 : (8.7, 8.7) COMET INFO: sys.cpu.percent.02 : (7.8, 7.8) COMET INFO: sys.cpu.percent.avg : (8.25, 8.25) COMET INFO: sys.gpu.0.free_memory : (11985420288.0, 11985420288.0) COMET INFO: sys.gpu.0.gpu_utilization: (0.0, 0.0) COMET INFO: sys.gpu.0.total_memory : (11996954624.0, 11996954624.0) COMET INFO: sys.gpu.0.used_memory : (11534336.0, 11534336.0) COMET INFO: sys.ram.total : (136552284[...]Using neural nets to solve an optimal control problem: A simple example by [](https://sites.google.com/site/mahdiebrahimikahou/)Here I create an animation that shows the time evolution of the neural network during the optimization.If you want a full description of the problem and algorithm see: [The other file](https://github.com/Mekahou/Fun-Stuff/blob/main/codes/linear%20quadratic%20DP%20DNN/3.%20LQ_DP_DNN_Training_Main.ipynb)import quantecon as qe import quantecon.lqcontrol as LQ import numpy as np import matplotlib.pyplot as plt class Exact_sol: def __init__(self, beta = 0.95, alpha_0 = 1.0, alpha_1 = 2.0, gamma = 90.0, h_0 = 0.03, h_1 = 0.94, time = 63, y_0 = 0.1, Y_0 = 0.2 ): self.beta = beta self.alpha_0 = alpha_0 self.alpha_1 = alpha_1 self.gamma = gamma self.h_0 = h_0 self.h_1= h_1 self.time = time self.y_0 = y_0 self.Y_0 = Y_0 self.R = np.matrix([[0.0,-self.alpha_0/2 , 0.0],[-self.alpha_0/2, 0.0, self.alpha_1/2],[0.0,self.alpha_1/2, 0.0]]) self.Q = self.gamma/2 self.A = np.matrix([[1.0,0.0,0.0],[0.0,1.0,0.0],[self.h_0,0.0, self.h_1]]) self.B = np.matrix([[0.0],[1.0],[0.0]]) self.lq = LQ.LQ(self.Q, self.R, self.A, self.B, beta=self.beta) self.P, self.F, self.d = self.lq.stationary_values() self.x_0 = np.matrix([[1.0],[self.y_0],[self.Y_0]]) self.dynamics = self.lq.compute_sequence(self.x_0,self.time) self.ones = self.dynamics[0][0].reshape([self.time+1,1]) self.y_t = self.dynamics[0][1].reshape([self.time+1,1]) self.Y_t = self.dynamics[0][2].reshape([self.time+1,1]) self.x_t = np.concatenate((self.ones,self.y_t,self.Y_t), axis=1).T self.u_t = -np.dot(self.F,self.x_t).T import torch import torch.nn as nn import torch.nn.functional as F class NN(nn.Module): def __init__(self, dim_hidden = 128, layers = 4, hidden_bias = True): super().__init__() self.dim_hidden= dim_hidden self.layers = layers self.hidden_bias = hidden_bias torch.manual_seed(1) module = [] module.append(nn.Linear(1,self.dim_hidden, bias = self.hidden_bias)) module.append(nn.ReLU()) for i in range(self.layers-1): module.append(nn.Linear(self.dim_hidden,self.dim_hidden, bias = self.hidden_bias)) module.append(nn.ReLU()) module.append(nn.Linear(self.dim_hidden,1)) self.u = nn.Sequential(*module) def forward(self, x): u_out = self.u(x) return u_out class Data: def __init__(self, beta = 0.95, alpha_0 = 1.0, alpha_1 = 2.0, gamma = 90.0, h_0 = 0.03, h_1 = 0.94, time = 64, Y_0 = 0.2, batch_size = 4 ): self.beta = beta self.alpha_0 = alpha_0 self.alpha_1 = alpha_1 self.gamma = gamma self.h_0 = h_0 self.h_1= h_1 self.time = time self.Y_0 = Y_0 self.batch_size = batch_size self.Y_t = torch.zeros([self.time]) self.Y_t[0] = self.Y_0 for t in range(self.time-1): self.Y_t[t+1] = self.h_0 + self.h_1*self.Y_t[t] self.Y_prime_t = self.h_0 + self.h_1*self.Y_t self.train_data = torch.stack((self.Y_t,self.Y_prime_t),1) from torch.utils.data import Dataset, DataLoader class Data_loader(Dataset): def __init__(self,data): self.data = data self.Y = self.data[:,[0]] self.Y_prime = self.data[:,1:] self.n_samples = self.data.shape[0] def __getitem__(self,index): return self.Y[index], self.Y_prime[index] # order: Y first, then Y_prime def __len__(self): return self.n_samples data_set = Data().train_data data_label = Data_loader(data = data_set) batch_size = Data().batch_size train = DataLoader(dataset = data_label, batch_size = batch_size, shuffle = True) α_0 = Data().alpha_0 α_1 = Data().alpha_1 γ = Data().gamma β = Data().beta max_epochs = 50 u_hat = NN() learning_rate = 1e-2 optimizer = torch.optim.Adam(u_hat.parameters(), lr=learning_rate, weight_decay=0.0) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5) #scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor = 0.1, patience = 20, threshold = 0.0 , cooldown = 1 ) u_hat.eval() Y_t = Data().train_data[:,[0]] approx_sol = u_hat(Y_t).detach() exact_sol = Exact_sol().u_t plt.plot(exact_sol, "--", color='r', label= r"Exact Solution") plt.plot(approx_sol,label= r"Approximate Solution" ) plt.ylabel(r"$u$") plt.xlabel(r"Time(t)") plt.tight_layout() plt.legend(loc='best') plt.show() result = torch.zeros([Data().time,200]) #The evolution of the neural net during the optimization will be saved here i = 0 Y_t = Data().train_data[:,[0]] for epoch in range(max_epochs): for index, (Y,Y_prime) in enumerate(train): euler_res = γ*u_hat(Y) - β*( γ*u_hat(Y_prime) + α_0 - α_1* Y_prime ) loss = euler_res.pow(2).mean() if index % 5 == 0: result[:,i] = u_hat(Y_t).detach().squeeze() i = i+1 optimizer.zero_grad() loss.backward() optimizer.step() scheduler.step() #if epoch % 1 == 0: # print("epoch:",epoch, ",","MSE Euler Residuals:","{:.2e}".format(loss.item())) u_hat.eval() Y_t = Data().train_data[:,[0]] approx_sol = u_hat(Y_t).detach() exact_sol = Exact_sol().u_t plt.plot(exact_sol, "--", color='r', label= r"Exact Solution") plt.plot(approx_sol,label= r"Approximate Solution" ) plt.ylabel(r"$u$") plt.xlabel(r"Time(t)") plt.tight_layout() plt.legend(loc='best') plt.show()Creating the animationimport os import numpy as np import matplotlib.pyplot as plt import imageio fontsize= 16 ticksize = 14 figsize = (6, 3.5) params = {"text.usetex": True, 'font.family':'serif', "figure.figsize":figsize, 'figure.dpi': 80, 'figure.edgecolor': 'k', 'font.size': fontsize, 'axes.labelsize': fontsize, 'axes.titlesize': fontsize, 'xtick.labelsize': ticksize, 'ytick.labelsize': ticksize } plt.rcParams.update(params) filenames = [] for i in range(result.shape[1]): # plot the line chart plt.rcParams.update(params) plt.plot(result[:,i],label= r"Approximate Solution" ) plt.plot(Exact_sol().u_t,"--",color='r', label = r"Exact Solution") plt.ylabel(r"$u(t)$") plt.xlabel(r"Time(t)") plt.legend(loc='upper right') plt.ylim(0.0,0.06) plt.xlim(0,64) plt.tight_layout() # create file name and append it to a list filename = f'{i}.png' filenames.append(filename) # save frame plt.savefig(filename) plt.close()# build gif with imageio.get_writer('u_hat.gif', mode='I') as writer: for filename in filenames: image = imageio.imread(filename) writer.append_data(image) # Remove files for filename in set(filenames): os.remove(filename)Important prerequisite1. The package doesn't work with the newest version of Jupyter Notebook, run the following commands in your terminal before initiating the Notebook2. pip install notebook==5.7.53. pip install tornado==4.5.3 Install dependencies Don't worry about the error messages during installation, you will be fine.# uncomment the following line if you haven't installed bte_schema # !pip install git+https://github.com/kevinxin90/bte_schema#egg=bte_schema # uncomment the following line if you haven't installed biothings_schema #pip install git+https://github.com/biothings/biothings_schema.py#egg=biothings_schema.pyInitiating the package# import the query module from biothings_explorer.user_query_dispatcher import SingleEdgeQueryDispatcher # import the hint module (suggest hits based on your input) from biothings_explorer.hint import Hint # import the registry module from biothings_explorer.registry import Registry reg = Registry() ht = Hint()Feature 1: Single Hop QueryQuery from a specific biological entity (e.g. entrezgene 1017) to other semantic types (e.g. ChemicalSubstance) The example below will query all ChemicalSubstances related to Entrezgene 1017 Step1: Get your input# use the hint the module to let BioThings Explorer suggest the inputs for you a = ht.query('1019') # the output of the hint module is grouped by semantic types a # select your input object input_obj = a['Gene'][0] input_objStep2: Make Single Hop Query A couple required parameters in order to perform the query:1. input_obj: This is the object which serves as the start point of your query.2. output_cls: This is the output class which serves as the end point of your query. If you don't specify this parameter, this tool will automatically search all possible output classes for you.3. output_id: This is optional. The identifier type you want the output to be.4. pred: This is also optional. The predicate linking your input and output. If you don't specify, this tool will automatically search all possible linkages.seqd = SingleEdgeQueryDispatcher(input_obj=input_obj, output_cls='ChemicalSubstance', output_id='bts:chembl', registry=reg) seqd.query()Step3: Understand the results The result of the query would be a networkx MultiDiGraph connecting from your input to all possible outputs. Show results in JSON format nodes: All nodes in the graphlinks: All edges in the graphseqd.to_json()List all output idsseqd.output_idsDig deep into the network# List all nodes seqd.show_all_nodes() # list all edges seqd.show_all_edges() # see details of a specific edge seqd.display_edge_info('1019', 'CHEMBL448') # see details of a specific node seqd.display_node_info('CHEMBL448')(Optional) Do it the hard way You can still use this tool without the hint module. In this case, specify your parameters as the following:1. input_cls: required, The semantic type of your input2. input_id: required, The identifier type of your input3. values: required, The input value4. output_cls: required, The semantic type of your output5. output_id: optional, the identifier type of your outputseqd = SingleEdgeQueryDispatcher(input_cls="Gene", input_id="bts:entrez", values="1019", output_cls="ChemicalSubstance", output_id="bts:chembl", registry=reg) seqd.query() seqd.G.nodes()Feature 2: Multi Hop Query Query from a specific biological entity (e.g. entrezgene 1017) to other semantic types (e.g. ChemicalSubstance) through multiple hops, e.g. ChemicalSubstance -> Gene -> Disease# Initiate the multi hop module from biothings_explorer.user_query_dispatcher import MultiEdgeQueryDispatcherThe tutorial below create edges from Riluzole(ChemicalSubstance) to Gene, then to Disease, which is to first find genes related to riluzole, and then find diseases related to genes Step1: Decide on your inputa = ht.query('riluzole') a input_obj = a['ChemicalSubstance'][0] input_objStep2: Construct your edges the format of edges should be like [(subject1, pred1, object1), (subject2, pred2, object2), ...]The object of an edge should always be the same as the subject of its proceeding edge Note: You can leave pred as None. In this case, the tool will search for all potential edgesedges = [('ChemicalSubstance', None, 'Gene'), ('Gene', None, 'DiseaseOrPhenotypicFeature')]Step3: Execute the querymeqd = MultiEdgeQueryDispatcher(input_obj=input_obj, edges=edges, registry=reg) meqd.query()start to query for associations between ChemicalSubstance and Gene... finished! Find 23 hits. start to query for associations between Gene and DiseaseOrPhenotypicFeature... finished! Find 741 hits.Step4: Explore the results# show all nodes in the graph meqd.show_all_nodes() # Display the path connecting two nodes in the graph meqd.show_path('CHEMBL744', 'MONDO:0011376') # Display detailed edge information meqd.display_edge_info('CHEMBL744', '6331') # display detailed edge information meqd.display_edge_info('6331', 'MONDO:0011376') meqd.display_node_info('MONDO:0011376')Feature3: Discover connections between two bio-entitiesFind connections between two bio-entities through one or more intermediate nodes# initialize the connect module from biothings_explorer.user_query_dispatcher import ConnectStep 1: Decide on your input and output# search for riluzole a = ht.query("riluzole") a # select the input object from the hint results input_obj = a['ChemicalSubstance'][0] input_obj # search for "Amyotrophic Lateral Sclerosis" b = ht.query("Amyotrophic Lateral Sclerosis") b # select the output object from the hint results output_obj = b['DiseaseOrPhenotypicFeature'][0] output_objStep2: Find connections between your input and outputcc = Connect(input_obj=input_obj, output_obj=output_obj, registry=reg) cc.connect()processing step 1 ... processing step 2 ... query completed Find connectionStep3: Find how your input and output is connected# find the path connecting from your input to output cc.show_path() # show detailed edge information cc.display_edge_info(start_node='CHEMBL744', end_node="6332") # show detailed edge information cc.display_edge_info(start_node="6332", end_node="MONDO:0004976") # return the graph connectin from input to output in JSON cc.to_json()!pip install transformers --quiet from transformers import AutoTokenizer, AutoModelForSequenceClassification import torch import pandas as pd tokenizer =AutoTokenizer.from_pretrained('nlptown/bert-base-multilingual-uncased-sentiment') model = AutoModelForSequenceClassification.from_pretrained('nlptown/bert-base-multilingual-uncased-sentiment') Foodpanda_reviews = pd.read_csv('Foodpanda_reviews.csv',error_bad_lines=False) Foodpanda_reviews.head() def sentiment_score(review): tokens = tokenizer.encode(review, return_tensors='pt') result = model(tokens) return int(torch.argmax(result.logits))+1 Foodpanda_reviews['sentiment'] = Foodpanda_reviews['review'].apply(lambda x: sentiment_score(x[:512])) Foodpanda_reviews.head(50)1. Load data# Load csv file df_train = pd.read_csv('../data_csv/aug_train.csv') df_test = pd.read_csv('../data_csv/aug_test.csv') test_target = np.load('../data_csv/jobchange_test_target_values.npy') test_target = pd.DataFrame(test_target,columns=['target']) test_target['enrollee_id'] = df_test['enrollee_id'] test_target = test_target[['enrollee_id','target']] test_target.to_csv('../data_csv/test_target.csv') # test_target = pd.read_csv('../data_csv/test_target.csv') test_target # df # Check each column # terminal install: conda install -c conda-forge pandas-profiling from pandas_profiling import ProfileReport as pr profile = pr(df_train, minimal=True).to_notebook_iframe()2. Examine and impute missing valuesdf_train.info() # Pairplot sns.pairplot(df_train, corner=True, height=1.5, plot_kws={'size': 3}, hue='target'); # Examine data df_train['company_type'].value_counts() df_train['enrolled_university'].value_counts() df_train['education_level'].value_counts() df_train['experience'].value_counts() df_train['company_size'].value_counts() df_train['company_type'].value_counts() df_train['last_new_job'].value_counts() # Replace string with float/int df_train['experience'] = df_train['experience'].replace('>20','25') df_train['experience'] = df_train['experience'].replace('<1','0.5') df_train['experience'] = df_train['experience'].astype('float') df_train['last_new_job'] = df_train['last_new_job'].replace('>4','5') df_train['last_new_job'] = df_train['last_new_job'].replace('never','0') # Impute/fill NaN df_train['gender'] = df_train['gender'].replace(np.nan, 'unknown') df_train['enrolled_university'] = df_train['enrolled_university'].replace(np.nan, 'unknown') df_train['education_level'] = df_train['education_level'].replace(np.nan, 'unknown') df_train['major_discipline'] = df_train['major_discipline'].replace(np.nan, 'unknown') df_train['education_level'] = df_train['education_level'].replace(np.nan, 'unknown') df_train['experience'] = df_train['experience'].fillna(value = df_train['experience'].median()) df_train['company_size'] = df_train['company_size'].fillna(value = df_train['company_size'].value_counts().index[0]) df_train['company_type'] = df_train['company_type'].replace(np.nan, 'unknown') df_train['last_new_job'] = df_train['last_new_job'].fillna(value = df_train['last_new_job'].median()).astype('int') df_train['target'] = df_train['target'].astype('int') df_train.info() RangeIndex: 19158 entries, 0 to 19157 Data columns (total 14 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 enrollee_id 19158 non-null int64 1 city 19158 non-null object 2 city_development_index 19158 non-null float64 3 gender 19158 non-null object 4 relevent_experience 19158 non-null object 5 enrolled_university 19158 non-null object 6 education_level 19158 non-null object 7 major_discipline 19158 non-null object 8 experience 19158 non-null float64 9 company_size 19158 non-null object 10 company_type 19158 non-null object 11 last_new_job 19158 non-null int64 12 training_hours 19158 non-null int64 13 target 19158 non-null int64 dtypes: float64(2), int64(4), object(8) mem[...]3. Pickledf_train.to_pickle('../dump/df_train.csv')4. Repeat for test set Examine and impute missing valuesdf_test['target'] = test_target df_test.info() # Pairplot sns.pairplot(df_test, corner=True, height=1.5, plot_kws={'size': 3}, hue='target'); # Examine data df_train['company_type'].value_counts() df_train['enrolled_university'].value_counts() df_train['education_level'].value_counts() df_train['experience'].value_counts() df_train['company_size'].value_counts() df_train['company_type'].value_counts() df_train['last_new_job'].value_counts() # Replace string with float/int df_test['experience'] = df_test['experience'].replace('>20','25') df_test['experience'] = df_test['experience'].replace('<1','0.5') df_test['experience'] = df_test['experience'].astype('float') df_test['last_new_job'] = df_test['last_new_job'].replace('>4','5') df_test['last_new_job'] = df_test['last_new_job'].replace('never','0') # Impute/fill NaN df_test['gender'] = df_test['gender'].replace(np.nan, 'unknown') df_test['enrolled_university'] = df_test['enrolled_university'].replace(np.nan, 'unknown') df_test['education_level'] = df_test['education_level'].replace(np.nan, 'unknown') df_test['major_discipline'] = df_test['major_discipline'].replace(np.nan, 'unknown') df_test['education_level'] = df_test['education_level'].replace(np.nan, 'unknown') df_test['experience'] = df_test['experience'].fillna(value = df_test['experience'].median()) df_test['company_size'] = df_test['company_size'].fillna(value = df_test['company_size'].value_counts().index[0]) df_test['company_type'] = df_test['company_type'].replace(np.nan, 'unknown') df_test['last_new_job'] = df_test['last_new_job'].fillna(value = df_test['last_new_job'].median()).astype('int') df_test['target'] = df_test['target'].astype('int') df_test.info() RangeIndex: 2129 entries, 0 to 2128 Data columns (total 14 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 enrollee_id 2129 non-null int64 1 city 2129 non-null object 2 city_development_index 2129 non-null float64 3 gender 2129 non-null object 4 relevent_experience 2129 non-null object 5 enrolled_university 2129 non-null object 6 education_level 2129 non-null object 7 major_discipline 2129 non-null object 8 experience 2129 non-null float64 9 company_size 2129 non-null object 10 company_type 2129 non-null object 11 last_new_job 2129 non-null int64 12 training_hours 2129 non-null int64 13 target 2129 non-null int64 dtypes: float64(2), int64(4), object(8) memor[...]3. Pickledf_test.to_pickle('../dump/df_test.csv')---- Getting started with Intel Quantum Simulator----Tutorial on the basic use of Intel QS through its Python interface.**NOTE:**Currently, the Python implementation only allows for single-core execution and does not take advantages of the MPI protocol.However the user can familiarize with the same functionalities available in the distributed implementation (only C++ at the moment) and the transition should be relatively straighforward since all methods maintain name and effect. Import Intel QS libraryLet's start by importing the Python library with the class and methods defined in the C++ implementation.# Import the Python library with the C++ class and methods of Intel Quantum Simulator. # If the library is not contained in the same folder of this notebook, its path has to be added. import sys sys.path.insert(0, '../build/lib') import intelqs_py as simulator # import numPy import numpy as np # Import graphical library for plots. import matplotlib.pyplot as pltInitialize the state of the quantum registerIQS stores a full representation of the quantum state in the computational basis.In practice, the quantum state of $N$ qubits is represented as a complex vector with $2^N$ components.Each component corresponds to the probability amplitude of a specific computational basis state:$$\psi(k) = \langle k | \psi \rangle$$with the index $k$ corresponding to the $N$-bit integer in decimal representation, and $k\in\{0,1,2,\dots,2^N-1\}$.----- First of all, one needs to allocate the memory to contain the state representation.- Then the quantum register has to be initialized, either to a specific computational basis state (using the keyword "base") or to a random state (using the keyword "rand").----NOTE: the random number generator is able to generate three different kinds of random numbers:- *local* --> different for each pool rank- *state* --> common to all ranks of the same state- *pool* --> common to all ranks of the pool# Number of qubits. num_qubits = 2; # Index of the computational basis state corresponding to |00>. index = 0; # Allocate memory for the quantum register's state and initialize it to |00>. psi = simulator.QubitRegister(num_qubits, "base", index, 0); # To initialize the state to a random vector, one first need a random number generator. # Create the random number generator, set its seed and then associate it to the IQS object 'psi'. rng = simulator.RandomNumberGenerator(); rng_seed = 7777; rng.SetSeedStreamPtrs( rng_seed ); psi.SetRngPtr(rng); # Initialize the state to a random state, this can be achieved with the codeword "rand" followed by 0 # if we desire to use *local* random numbers (this speed up the process of generating the random numbers). psi.Initialize("rand", 0);Display the quantum stateIt is important to be able to access and visualize the quantum state.IQS allows to access the single components of the state or to print a comprehensive description.What index is associated to state $|1011\rangle$?In decimal representation one has:$$1011 \rightarrow 1\times2^0 + 0\times2^1 + 1\times2^2 + 1\times2^3 = 1+4+8 = 13$$**NOTE:** contrary to what is adopted in decimal notation, our binary representation must be read from left to right (from least significant to most significant bit).# Initialize the state to |10>. # The index of |10> in decimal representation is 1. index = 1; psi.Initialize("base", index); # There are for amplitudes, corresponding to |00>, |10>, |01>, |11>. for index in range(0,2**num_qubits): amplitude = psi[index] print("psi({}) = <{}|psi> = {}".format(index,index,amplitude)) # A complete description of the state is provided by the method Print(). print("----") label = "Computational basis state |10>" psi.Print(label)psi(0) = <0|psi> = 0j psi(1) = <1|psi> = (1+0j) psi(2) = <2|psi> = 0j psi(3) = <3|psi> = 0j ---- <>One-qubit gatesIn the gate-model of quantum computation, one manipulates the quantum state by means of unitary transformations acting on one or two qubits. Let us apply a few of the standard one-qubit gates.# State was |10>. Let us re-prepare it: psi = simulator.QubitRegister(2, "base", 1, 0); # Flip the qubit 1 by applying the Pauli X gate: |10> ==> |11> qubit = 1; psi.ApplyPauliX(qubit); # Display all amplitudes. print("Currently, |psi>=|11>:"); for index in range(0,2**num_qubits): print(" psi({}) = <{}|psi> = {}".format(index,index,psi[index])) print("----") # Apply the Hadamard gate on qubit 0: |11> ==> |-1> ~ |01>-|11> qubit = 0; psi.ApplyHadamard(qubit); # Display all amplitudes. print("Currently, |psi>=|-1>:"); for index in range(0,2**num_qubits): print(" psi({}) = <{}|psi> = {}".format(index,index,psi[index])) # Apply Pauli Z gate on qubit 1: |-1> ==> -|-1> psi.ApplyPauliZ(1); # Apply Pauli X gate on qubit 0: -|-1> ==> |-1> psi.ApplyPauliX(0);Currently, |psi>=|11>: psi(0) = <0|psi> = 0j psi(1) = <1|psi> = 0j psi(2) = <2|psi> = 0j psi(3) = <3|psi> = (1+0j) ---- Currently, |psi>=|-1>: psi(0) = <0|psi> = 0j psi(1) = <1|psi> = 0j psi(2) = <2|psi> = (0.7071067811865475+0j) psi(3) = <3|psi> = (-0.7071067811865475+0j)Two-qubit gatesTo achieve universal quantum computation, it is enought to implement one-qubit gates and a single type of two-qubit gate.The essential requirement is that such two-qubit gate is able to generate entanglement. Usually the controlled-not gate (CNOT in the following) is the operation of choice.IQS provides built-in methods to implement a much broader variety of two-qubit gates.# Currently, state is |-1>. # Apply a CNOT(1,0): flip qubit 0 conditioned on the state of qubit 1. # |-1> ==> -|-1> control = 1; target = 0; psi.ApplyCPauliX(control, target); # Display all amplitudes. print("Currently, |psi>=-|-1>:"); for index in range(0,2**num_qubits): print(" psi({}) = <{}|psi> = {}".format(index,index,psi[index])) print("----") # The application of the previous CNOT did not create any entanglement. # This is achieved by exchanging the role of control and target qubits. # Apply a CNOT(0,1): flip qubit 1 conditioned on the state of qubit 0. # -|-1> ~ -|01>+|11> ==> -|01>+|10> control = 0; target = 1; psi.ApplyCPauliX(control, target); # Display all amplitudes. print("Currently, |psi>=(|10>-|01>)/sqrt(2):"); for index in range(0,2**num_qubits): print(" psi({}) = <{}|psi> = {}".format(index,index,psi[index]))Currently, |psi>=-|-1>: psi(0) = <0|psi> = 0j psi(1) = <1|psi> = 0j psi(2) = <2|psi> = (-0.7071067811865475+0j) psi(3) = <3|psi> = (0.7071067811865475+0j) ---- Currently, |psi>=(|10>-|01>)/sqrt(2): psi(0) = <0|psi> = 0j psi(1) = <1|psi> = (0.7071067811865475+0j) psi(2) = <2|psi> = (-0.7071067811865475+0j) psi(3) = <3|psi> = 0jCustom gatesIf IQS does not provide the gates needed in your circuit, it is possible to implement custom one-qubit gates and controlled gates.# Define an arbitrary single qubit gate. # The quantum gate G is given by a 2x2 unitary matrix, here using a bi-dimensional NumPy array. G = np.zeros((2,2),dtype=np.complex_); G[0,0] = 0.592056606032915 + 0.459533060553574j; G[0,1] = -0.314948020757856 - 0.582328159830658j; G[1,0] = 0.658235557641767 + 0.070882241549507j; G[1,1] = 0.649564427121402 + 0.373855203932477j; # To verify that G is unitary, we will compute the norm of psi before and after the application of G. initial_norm = psi.ComputeNorm(); if initial_norm != 1: print("Even before the application of G, state psi had normalization {}".format(initial_norm)); # Apply the custom gate G to qubit 0. qubit = 0; psi.Apply1QubitGate(qubit,G); final_norm = psi.ComputeNorm(); if initial_norm != final_norm: print("The application of G changed the norm of state psi: from {} to {}".format(initial_norm,final_norm)); else: print("Sanity check: norm was unchanged by G."); # It is also possible to apply the arbitrary gate specified by G conditioned on the state of another qubit. # G is applied only when the control qubit is in |1>. control = 1; target = 0; psi.ApplyControlled1QubitGate( control, target, G); # Notice that this output is directed to the terminal and not re-directed to the iPython notebook. psi.Print("State of the quantum register after all gates.") print() # To display the amplitudes in the iPython notebook: for index in range(0,2**num_qubits): print("psi({}) = <{}|psi> = {}".format(index,index,psi[index]))<> psi(0) = <0|psi> = (-0.22270188119916148-0.41176819069214193j) psi(1) = <1|psi> = (0.4593114112350983+0.2643555498825341j) psi(2) = <2|psi> = (0.018860567095173378-0.09793842271642633j) psi(3) = <3|psi> = (-0.5361330884127283-0.45012626658938304j)Single-qubit measurementsTo extract information from the quantum register, one can obtain the probability of measuring a certain qubit in the computational basis and obtaining the outcome "1" (i.e. the state is in $|1\rangle$).Once the probability is known, one can draw a random number to simulate the stochastic outcome of the measurement and collapse the wavefunction accordingly.**NOTE:**Computing the probability of a certain outcome does not collapse automatically the wavefunction. This is helpful when the probabilities of multiple measurements have to be computed without re-executing the quantum simulation.# Compute the probability of qubit 1 being in state |1>. measured_qubit = 1; prob = psi.GetProbability( measured_qubit ); print("Probability that qubit {} is in state |1> is {}\n".format(measured_qubit, prob)); # Draw random number in [0,1) r = np.random.rand() if r < prob: # Collapse the wavefunction according to qubit 1 being in |1>. print("Simulated outcome is 1. Collapse the function accordingly.") psi.CollapseQubit(measured_qubit,True); else: # Collapse the wavefunction according to qubit 1 being in |0> print("Simulated outcome is 0. Collapse the function accordingly.") psi.CollapseQubit(measured_qubit,False); # In both cases one needs to re-normalize the wavefunction: psi.Normalize();Probability that qubit 1 is in state |1> is 0.4999999999999997 Simulated outcome is 0. Collapse the function accordingly.Expectation value of products of Pauli matricesTo extract information from the quantum register, one can obtain the expectation value of Pauli strings.For example, consider the Pauli string given by: $$X_0 \otimes id_1 \otimes Z_2 \otimes Z_3$$Such observable is defined by:- the position of the non-trivial Pauli matrices, in this case {0,2,3}- the corresponding Pauli matrices ($X$=1, $Y$=2, $Z$=3).To facilitate the verification of the expectation value, we reinitialize the quantum state to $|+-01\rangle$.We also consider the Pauli string $$X_0 \otimes id_1 \otimes Z_2 \otimes Y_3$$.# Prepare the state |+-01> num_qubits = 4; index = 0; psi = simulator.QubitRegister(num_qubits, "base", index, 0); psi.ApplyPauliX(1); psi.ApplyPauliX(3); psi.ApplyHadamard(0); psi.ApplyHadamard(1); print("psi is in state |+-01>\n"); # The Pauli string given by: X_0 . id_1 . Z_2 . Z_3 # Such observable is defined by the position of the non-trivial Pauli matrices: qubits_to_be_measured = [0,2,3] # And by the corresponding Pauli matrices (X=1, Y=2, Z=3) observables = [1,3,3] # The expectation value is obtained via: average = psi.ExpectationValue(qubits_to_be_measured, observables, 1.); print("Expectation value = {} <== it should be -1\n".format(average)); # The expectation value is obtained via: observables = [1,3,2] average = psi.ExpectationValue(qubits_to_be_measured, observables, 1.); print("Expectation value = {} <== it should be 0\n".format(average));psi is in state |+-01> Expectation value = -0.9999999999999996 <== it should be -1 Expectation value = 0.0 <== it should be 0Examples of state preparationLet us prepare the state $|+-01\rangle$.# Method A: # Prepare the state |0000>, flip qubits {1,3}, change basis to qubits {0,1}. num_qubits = 4; index = 0; psi = simulator.QubitRegister(num_qubits, "base", index, 0); psi.ApplyPauliX(1); psi.ApplyPauliX(3); psi.ApplyHadamard(0); psi.ApplyHadamard(1); # Method B: # Prepare the state |0000>, change basis to qubits {0,1}, flip qubit {3}, flip in X qubit {1}. index = 0; psi.Initialize("base", index); psi.ApplyHadamard(0); psi.ApplyHadamard(1); psi.ApplyPauliZ(1); psi.ApplyPauliX(3); # Method C: # Prepare the computational state |0101>, change basis to qubits {0,1}. index = 2+8 ; psi.Initialize("base", index); # Notice that GetProbability() does not change the state. print("Verify that the state is now |0101>.\n") for qubit in range(0,num_qubits): prob = psi.GetProbability( qubit ); print("Probability that qubit {}, if measured, is in state |1> = {}".format(qubit, prob)); psi.ApplyHadamard(0); psi.ApplyHadamard(1); print("\nNow the state is |+-01>.\n") # The expectation value is obtained via: qubits_to_be_measured = [0,1,2,3] observables = [1,1,3,3] average = psi.ExpectationValue(qubits_to_be_measured, observables, 1.); print("Expectation value = {} <== it should be +1\n".format(average));Verify that the state is now |0101>. Probability that qubit 0, if measured, is in state |1> = 0.0 Probability that qubit 1, if measured, is in state |1> = 1.0 Probability that qubit 2, if measured, is in state |1> = 0.0 Probability that qubit 3, if measured, is in state |1> = 1.0 Now the state is |+-01>. Expectation value = 0.9999999999999993 <== it should be +1Il metodo Monte Carlo La stima di pi grecoIn questo notebook introdurremo il metodo Monte Carlo e lo utilizzeremo per stimare il valore di $\pi$.Il metodo di Monte Carlo è una tecnica computazionale utilizzata per stimare quantità a partire dall’analisi di un campione casuale di dati.Per esempio, consideriamo il cerchio rosso nella figura seguente. Il cerchio ha raggio uguale a 1, quindi sappiamo che l’area del cerchio è $\pi$, cioè circa 3,14. Tuttavia, il valore di $\pi$ è difficile da calcolare. Possiamo stimare il valore di $\pi$ nel seguente modo: prendiamo $N$ punti con coordinate casuali all’interno del quadrato di lato 2 circoscritto al cerchio. Dato che l’area del quadrato è 4 e l’area del cerchio è $\pi$, la frazione di punti interni al cerchio (cioè la cui distanza dal centro è minore di 1) sarà $f = \pi/4 \approx 0,7854$.Questo permette di stimare $\pi$, calcolando questa frazione. In figura vediamo un esempio con $N=10000$, in cui risulta $f =0,7807$ e $\pi \approx 4\cdot f = 3,1228$. Chiaramente al crescere di $N$ la stima di $\pi$ diventa più precisa.![image.png](attachment:image.png)![image-2.png](attachment:image-2.png)Possiamo usare `numpy` per generare le coordinate casuali dei punti. Ad esempio, il seguente codice genera le coordinate $(x,y)$ di 5 punti casuali in un quadrato di lato 2.import numpy as np x = np.random.uniform(0, 2, 5) y = np.random.uniform(0, 2, 5) print(x) print(y)Possiamo rappresentare graficamente i punti.import numpy as np from matplotlib import pyplot as plt x = np.random.uniform(0, 2, 100) y = np.random.uniform(0, 2, 100) plt.plot(x,y, "o") plt.axis("square") plt.axis([0, 2, 0, 2]) plt.show()Esercizio 1Aggiungi un titolo all'immagine e un'etichetta agli assi. Esercizio 2Scrivi un codice che utilizzi il metodo di Monte Carlo per stimare il valore di $\pi$. SuggerimentiSe ti serve, puoi utilizzare i seguenti suggerimenti:* dopo aver generato le coordinate dei punti, come nel codice precedente, dovrai eseguite un ciclo `for` per identificare e contare, uno alla volta, i punti interni al cerchio;* puoi decidere se un punto è interno al cerchio usando una condizione `if` e controllando se la distanza dal centro è minore di 1$$(x-1)^2+(y-1)^2 < 1$$* per contare i punti interni al cerchio, inizializza una variabile a 0 (ad esempio, `n_in = 0`) e poi incrementala di 1 per ogni numero interno trovato (`n_in += 1`). La rappresentazione grafica dei puntiCon un ciclo `for` è semplice creare delle liste separate con le coordinate dei punti dentro la circonferenze e con quelle dei punti esterni alla circonferenza.import numpy as np from matplotlib import pyplot as plt N = 100 # Le coordinate di tutti i punti x = np.random.uniform(0, 2, N) y = np.random.uniform(0, 2, N) # Lista con le coordinate dei punti interni alla circonferenza x_in = [] y_in = [] # Lista con le coordinate dei punti esterni alla circonferenza x_out = [] y_out = [] for i in range(N): # Il punto è interno se la distanza dal punto centrale (1,1) # è minore del raggio R = 1 if (x[i]-1)**2 + (y[i]-1)**2 < 1: x_in.append(x[i]) y_in.append(y[i]) else: x_out.append(x[i]) y_out.append(y[i])This notebook is Final submission for the hackathon https://www.machinehack.com/hackathons/predicting_food_delivery_time_hackathon_by_ims_proschool/dataAlert: This notebook is an old notebook so I couldn't structure the preprocessing and model code.import pandas as pd from sklearn.preprocessing import MultiLabelBinarizer import numpy as np from sklearn.model_selection import train_test_split train_data = pd.read_excel("Data_Train.xlsx") test_data =pd.read_excel("Data_Test.xlsx") train_data.head(5) test_data.head(5) test_data.shape train_data.shape print(set(test_data.Location.unique())- set(train_data.Location.unique())) print(set(train_data.Location.unique())- set(test_data.Location.unique())) test_data.Location.nunique() train_data.Location.nunique() train_data.Restaurant.nunique() test_data.Restaurant.nunique() print(len(set(train_data.Restaurant.unique())-set(test_data.Restaurant.unique()))) print(len(set(test_data.Restaurant.unique())-set(train_data.Restaurant.unique()))) test_data.info() train_data.info() train_data.loc[train_data['Reviews'].str.contains("-")] test_data.loc[test_data['Reviews'].str.contains("-")] all_data = train_data.drop("Delivery_Time",axis=1).append(test_data,ignore_index=True) all_data[:11094].tail(5) train_data.tail(5) all_data[11094:].head() test_data.head(5) all_data['Cuisines'] = all_data['Cuisines'].apply(lambda x:x.split(',')) all_data['Cuisines']= all_data['Cuisines'].apply(lambda x :[i.strip() for i in x] ) location = all_data['Location'].apply(lambda x:x.split(',')).apply(lambda x :[i.strip() for i in x]) mlb = MultiLabelBinarizer() s= all_data['Cuisines'] cuisines = pd.DataFrame(mlb.fit_transform(s),columns=mlb.classes_) mlb1 = MultiLabelBinarizer() location_encoded = pd.DataFrame(mlb1.fit_transform(location),columns=mlb1.classes_) cuisines['Chinese'].sum() pd.get_dummies(pd.DataFrame(all_data.Cuisines.values.tolist()),prefix_sep='',prefix='').sum(level=0,axis=1)['Chinese'].sum() train_data[train_data['Cuisines'].str.contains('Chinese')].count() location_encoded.head() all_data.info() all_data.loc[~all_data['Average_Cost'].str.contains('₹'),'Average_Cost']='₹50' all_data.loc[~all_data['Minimum_Order'].str.contains('₹')] all_data['Average_Cost'] = all_data['Average_Cost'].str.replace('₹','').str.replace(',','').astype(int) all_data['Minimum_Order'] = all_data['Minimum_Order'].str.replace('₹','').str.replace(',','').astype(int) all_data.Reviews.value_counts() all_data.Reviews = all_data.Reviews.str.replace('-','0').astype(int) all_data.Votes = all_data.Votes.str.replace('-','0').astype(int) all_data.info() all_data.Rating.value_counts() all_data.Rating = all_data.Rating.str.replace('-','0.0').str.replace('Opening Soon','0.0' ).str.replace('Temporarily Closed','0.0' ).str.replace('NEW','0.0') all_data.Rating = all_data.Rating.astype(float) all_data.info() all_data_encoded = pd.concat([pd.concat([all_data,location_encoded],axis=1),cuisines],axis=1) all_data_encoded.info() all_data_restraunt_encoded = pd.concat([all_data_encoded,pd.get_dummies(all_data_encoded.Restaurant)],axis=1) all_data_encoded = all_data_encoded.drop(['Restaurant','Location','Cuisines'],axis=1) all_data_restraunt_encoded.info() all_data_restraunt_encoded = all_data_restraunt_encoded.drop(['Restaurant','Location','Cuisines'],axis=1) pd.read_excel("Sample_Submission.xlsx").head(5) train_data['Delivery_Time'].value_counts() #target = train_data['Delivery_Time'] factor = pd.factorize(train_data['Delivery_Time']) definitions = factor[1] target = pd.Series(factor[0]) all_data_encoded['cuisines_count']=all_data.Cuisines.apply(lambda x:len(x)) all_data_encoded.to_csv('All_data_encoded.csv',index=False)Model Buildingfrom sklearn.model_selection import KFold,cross_val_score from sklearn.ensemble import RandomForestClassifier X_train, X_test, y_train, y_test = train_test_split(all_data_encoded[0:11094], target, test_size = 0.25, random_state = 21) classifier = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 42) classifier.fit(X_train, y_train) y_submission = classifier.predict(all_data_encoded[11094:]) pd.DataFrame({'Delivery_Time':np.vectorize(reversefactor.get)(y_submission)}).to_excel( "Submissions/First_submission_rfc.xlsx",index=False) def plot_confusion_matrix(classifier,X,y): X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.25, random_state = 21) classifier.fit(X_train, y_train) # Predicting the Test set results y_pred = classifier.predict(X_test) #Reverse factorize (converting y_pred from 0s,1s and 2s to Iris-setosa, Iris-versicolor and Iris-virginica reversefactor = dict(zip(range(7),definitions)) y_test = np.vectorize(reversefactor.get)(y_test) y_pred = np.vectorize(reversefactor.get)(y_pred) # Making the Confusion Matrix print(pd.crosstab(y_test, y_pred, rownames=['Actual duration'], colnames=['Predicted duration'])) # Predicting the Test set results y_pred = classifier.predict(X_test) #Reverse factorize (converting y_pred from 0s,1s and 2s to Iris-setosa, Iris-versicolor and Iris-virginica reversefactor = dict(zip(range(7),definitions)) y_test = np.vectorize(reversefactor.get)(y_test) y_pred = np.vectorize(reversefactor.get)(y_pred) # Making the Confusion Matrix print(pd.crosstab(y_test, y_pred, rownames=['Actual duration'], colnames=['Predicted duration'])) import lightgbm as lgbm from sklearn.ensemble import AdaBoostClassifier,ExtraTreesClassifier,BaggingClassifier,GradientBoostingClassifier gbc = GradientBoostingClassifier(n_estimators=500) plot_confusion_matrix(gbc,all_data_encoded[0:11094], target) lgbc = lgbm.LGBMClassifier(num_leaves=100, learning_rate=0.15, n_estimators=900) #great plot_confusion_matrix(lgbc,all_data_encoded[0:11094], target) #okayish plot_confusion_matrix(bgc,all_data_encoded[0:11094], target) pd.DataFrame({'Delivery_Time':np.vectorize(reversefactor.get)(lgbc.predict(all_data_encoded[11094:]))}).to_excel( "Submissions/Second_submission_lgbc_est_900_lr_15.xlsx",index=False) abc = AdaBoostClassifier() etc = ExtraTreesClassifier() bgc = BaggingClassifier() plot_confusion_matrix(abc,all_data_encoded[0:11094], target) plot_confusion_matrix(etc,all_data_encoded[0:11094], target) pd.DataFrame({'Delivery_Time':np.vectorize(reversefactor.get)(lgbc.predict(all_data_encoded[11094:]))}).to_excel( "Submissions/Second_submission_rfc.xlsx",index=False)prepared by () | November 07, 2018 I have some macros here. If there is a problem with displaying mathematical formulas, please run me to load these macros.$ \newcommand{\bra}[1]{\langle 1|} $$ \newcommand{\ket}[1]{|1\rangle} $$ \newcommand{\braket}[2]{\langle 1|2\rangle} $$ \newcommand{\inner}[2]{\langle 1,2\rangle} $$ \newcommand{\biginner}[2]{\left\langle 1,2\right\rangle} $$ \newcommand{\mymatrix}[2]{\left( \begin{array}{1} 2\end{array} \right)} $$ \newcommand{\myvector}[1]{\mymatrix{c}{1}} $$ \newcommand{\myrvector}[1]{\mymatrix{r}{1}} $$ \newcommand{\mypar}[1]{\left( 1 \right)} $$ \newcommand{\mybigpar}[1]{ \Big( 1 \Big)} $$ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $$ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $$ \newcommand{\onehalf}{\frac{1}{2}} $$ \newcommand{\donehalf}{\dfrac{1}{2}} $$ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $$ \newcommand{\vzero}{\myvector{1\\0}} $$ \newcommand{\vone}{\myvector{0\\1}} $$ \newcommand{\vhadamardzero}{\myvector{ \sqrttwo \\ \sqrttwo } } $$ \newcommand{\vhadamardone}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $$ \newcommand{\myarray}[2]{ \begin{array}{1}2\end{array}} $$ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $$ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $$ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $$ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $$ \newcommand{\norm}[1]{ \left\lVert 1 \right\rVert } $ SuperpositionThere is no classical counterpart of the concept "superposition".But, we can still use a classical analogy that might help us to give some intuitions. Probability distribution Suppose that Asja starts in $ \myvector{1\\0} $ and secretly applies the probabilistic operator $ \mymatrix{cc}{ 0.3 & 0.6 \\ 0.7 & 0.4 } $.Because she applies her operator secretly, our information about her state is probabilistic, which is calculated as$$ \myvector{0.3 \\ 0.7} = \mymatrix{cc}{ 0.3 & 0.6 \\ 0.7 & 0.4 } \myvector{1\\0}.$$Asja is either in state 0 or in state 1.However, from our point of view, Asja is in state 0 and state 1 with probabilities $ 0.3 $ and $ 0.7 $, respectively.We can say that Asja in a probability distribution of states 0 and 1, being in both states at the same time.On the other hand, if we observe Asja's state, then our information about Asja becomes deterministic: either $ \myvector{1 \\ 0} $ or $ \myvector{0 \\ 1} $.We can say that after measurement the probabilistic state $ \myvector{0.3 \\ 0.7} $ collapses to either $ \myvector{1 \\ 0} $ or $ \myvector{0 \\ 1} $. The third experiment Now, we can explain the following experiment. The initial Step The photon is in state $ \ket{v_0} = \vzero $. The first step Hadamard is applied:$ \ket{v_1} = \hadamard \vzero = \vhadamardzero $.At this point, the photon is in a superposition of state $ \ket{0} $ and state $ \ket{1} $, being in both states with the amplitudes $ \frac{1}{\sqrt{2}} $ and $ \frac{1}{\sqrt{2}} $, respectively.The state of photon is $ \ket{v_1} = \vhadamardzero $, and we can represent it also as follows:$ \ket{v_1} = \frac{1}{\sqrt{2}} \ket{0} + \frac{1}{\sqrt{2}} \ket{1} $. The second step Hadamard is applied again:We write the affect of Hadamard on states $ \ket{0} $ and $ \ket{1} $ as follows:$ H \ket{0} = \frac{1}{\sqrt{2}} \ket{0} + \frac{1}{\sqrt{2}} \ket{1} $$ H \ket{1} = \frac{1}{\sqrt{2}} \ket{0} - \frac{1}{\sqrt{2}} \ket{1} $This representation helps us to see clearly why the state $ \ket{1} $ disappears.Now, let's see the affect of Hadamard on the quantum state $ \ket{v_1} = \frac{1}{\sqrt{2}} \ket{0} + \frac{1}{\sqrt{2}} \ket{1} $:$ \ket{v_2} = H \ket{v_1} = H \mybigpar{ \frac{1}{\sqrt{2}} \ket{0} + \frac{1}{\sqrt{2}} \ket{1} } = \frac{1}{\sqrt{2}} H \ket{0} + \frac{1}{\sqrt{2}} H \ket{1} $We can replace $ H\ket{0} $ and $ H\ket{1} $ as described above.$ \ket{v_2} $ is formed by the summation of the following terms:$ \frac{1}{\sqrt{2}} H \ket{0} = \frac{1}{2} \ket{0} + \frac{1}{2} \ket{1} $$ \frac{1}{\sqrt{2}} H \ket{1} = \frac{1}{2} \ket{0} - \frac{1}{2} \ket{1} $The amplitude of $ \ket{0} $ becomes 1, but the amplitude of $ \ket{1} $ becomes 0 because of cancellation.$ \ket{v_2} = 1 \cdot \ket{0} + 0 \cdot \ket{1} = \ket{0} $.The photon was in both states at the same time with certain amplitudes.After the second Hadamard, the results are interfered with each other.The interference can be constructive or destructive.In our examples, the resulting $ \ket{0} $s are interfered constructively, but the resulting $ \ket{1} $s are interfered destructively. Observations Probabilistic systems: If there is a nonzero transition to a state, then it contributes to the probability of this state positively. Quantum systems: If there is a nonzero transition to a state, then we cannot make such an interpretation without knowing the other transtions to this state.If it is the only transition, then it contributes to the amplitude (and probability) of the state, and it does not matter whether the sign of the transition is positive or negative.If there is more than one transition, then depending on the summation of all transitions, we can determine whether a specific transition contributes or not.As a simple rule, if the final amplitude of the state and nonzero transition have the same sign, then it is a positive contribution; and, if they have the opposite signs, then it is a negative contribution. Task 1 [on paper]Start in state $ \ket{u_0} = \ket{1} $.Apply Hadamard operator to $ \ket{u_0} $, i.e, find $ \ket{u_1} = H \ket{u_0} $.Apply Hadamard operator to $\ket{u_1}$, i.e, find $ \ket{u_2} = H \ket{u_1} $.Observe the constructive and destructive interferences, when calculating $ \ket{u_2} $. Being in a superposition A quantum system can be in more than one state with nonzero amplitudes.Then, we say that our system is in a superposition of these states.When evolving from a superposition, the resulting transitions may affect each other constructively and destructively. This can happen only because of having both negative or positive amplitudes. Otherwise, all nonzero transitions are added up to each other as in probababilistic systems. Measurement We can measure a quantum system, and then the system is observed in one of its states.This is the most basic measurement in quantum computing. (There are more generic measurement operators, but we will not cover them.)The probability of the system to be observed in a specified state is the square value of its amplitude. If the amplitude of a state is zero, then this state cannot be observed. If the amplitude of a state is nonzero, then this state can be observed. For example, if the system is in quantum state $$ \myrvector{ -\frac{\sqrt{2}}{\sqrt{3}} \\ \frac{1}{\sqrt{3}} },$$then, after a measurement, we can observe the system in state $\ket{0} $ with probability $ \frac{2}{3} $ and in state $\ket{1}$ with probability $ \frac{1}{3} $. Collapsing After the measurement, the system collapses to the observed state, and so the system is no longer in a superposition.Thus, the information kept in a superposition is lost. In the above example, when the system is observed in state $\ket{0}$, then the new state becomes $ \myvector{1 \\ 0} $. If it is observed in state $\ket{1}$, then the new state becomes $ \myvector{0 \\ 1} $. Task 2 We have a quantum system with four states: $\ket{00}$, $ \ket{01} $, $\ket{10}$, and $ \ket{11} $.We can also say that our system has two qubits.Suppose that the system is in the following state:$ \myrvector{ \dfrac{ 1 }{ \sqrt{3} - \sqrt{ 5 + 2\sqrt{6}} } \\ \\ \dfrac{1}{ \sqrt{3} - \sqrt{ 7 + 2\sqrt{12} } } \\ \\ \dfrac{ 1 }{ \sqrt{5} - \sqrt{ 13 + 2\sqrt{40} } } \\ \\ \dfrac{1}{ \sqrt{ 7 } - \sqrt{ 15 + 2 \sqrt{56} } } }. $ Find the probability of observing the system in state $\ket{00}$, $ \ket{01} $, $\ket{10}$, or $ \ket{11} $. You may write a function to calculate the dominator of each fraction automatically, where its value is determined by three values $a$, $ b$, and $ c $ by assuming the form $ \sqrt{a} - \sqrt{b + 2 \sqrt{c} } $. Verify that the total probability is 1 (or almost 1).# # your solution is here #click for our solution Z-gate (operator) The indentity operator $ I = \mymatrix{cc}{1 & 0 \\ 0 & 1} $ does not affect the computation.What about the following operator?$ Z = \Z $.It is very similar to the identity operator.Consider the quantum state $ \ket{u} = \myvector{ \frac{3}{5} \\ \frac{4}{5} } $.Let's calculate the new quantum state after appying $ Z $ to $ \ket{u} $:$ \ket{u'} = Z \ket{u} = \Z \myvector{ \frac{3}{5} \\ \frac{4}{5} } = \myrvector{ \frac{3}{5} \\ -\frac{4}{5} } $. The quantum states $ \ket{u} $ and $ \ket{u'} $ look similar. The probabilities of observing the state 0 and state 1 are the same when the system is in $ \ket{u} $ or $ \ket{u'} $.On the other hand, they are far away from each other as shown below: For example, by applying Hadamard to each of them, the probability of observing the state 0 and state 1 may change (?)To observe this, we can do a simple experiment. Task 3 Create a quantum ciruit with 5 qubits.Apply h-gate (Hadamard operator) to each qubit.Apply z-gate ($Z$ operator) to randomly picked qubits. (e.g., $ mycircuit.z(qreg[i]) $)Apply h-gate to each qubit Measure each qubit.Execute your program 1000 times.Compare the outcomes of the qubits affected by z-gates, and the outcomes of the qubits not affected by z-gates.Does z-gate change the outcome?Why?# let's import all necessary objects and methods for quantum circuits from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer from qiskit.tools.visualization import matplotlib_circuit_drawer as drawer # let's import randrange for random choices from random import randrange # # your code is here ## Load the Drive helper and mount from google.colab import drive # This will prompt for authorization. drive.mount('/content/drive') import fastai from fastai.vision import * import os import matplotlib.pyplot as plt import matplotlib.image as mpimg from zipfile import ZipFile def unZip(file_name): with ZipFile(file_name, 'r') as zip: zip.extractall() print('Done!') !rm -rf chest_xray !rm -rf dataset1 file_name = '/content/drive/chestXRay.zip' path="/content/drive/My Drive/" file_name=path+'chestXRay.zip' unZip(file_name) !ls chest_xray !ls unZip('chest_xray.zip') !ls chest_xray import glob import cv2 import numpy as np %matplotlib inline filesPos= sorted(glob.glob('chest_xray/train/NORMAL/*.jpeg')) filesNeg=sorted(glob.glob('chest_xray/train/PNEUMONIA/*.jpeg')) print(len(filesPos),len(filesNeg)) from fastai import * import shutil def copyfiles(filesName,dest): c=0 for file in filesName: shutil.copy(file, dest) c+=1 print("Copied "+str(c)) !mkdir dataset1 !mkdir dataset1/train !ls dataset1 !mkdir dataset1/train/PNEUMONIA !mkdir dataset1/train/NORMAL !ls dataset1/train/ filesNeg=filesNeg[:1340] import random random.seed(3000) random.shuffle(filesNeg) random.shuffle(filesPos) !ls chest_xray copyfiles(filesPos,'dataset1/train/NORMAL') copyfiles(filesNeg,'dataset1/train/PNEUMONIA') !mkdir dataset1/valid !mkdir dataset1/test !cp -r chest_xray/val dataset1/ !cp -r chest_xray/test dataset1/ !rm -rf dataset1/valid !mv dataset1/val dataset1/valid np.random.seed(1234) path='dataset1' data = ImageDataBunch.from_folder(path, ds_tfms=get_transforms(do_flip=True,max_lighting=0.1), seed=1234, valid_pct=0.2, size=224, num_workers=8, bs=32, test="test") data data.classes data.show_batch(rows=3, figsize=(10,6), hide_axis=False) from sklearn.metrics import roc_auc_score,f1_score def f1_score_a(y_pred,y_true,tens=True): # score=roc_auc_score(y_true,torch.sigmoid(y_pred)[:,1]) f1a_score=f1_score(y_true, np.round(torch.sigmoid(y_pred)[:,1]), average='macro') if tens: # score=tensor(score) f1a_score=tensor(f1a_score) else: f1a_score=f1a_score return f1a_score # from sklearn.metrics import f1_score # def f1_out(y_ture,y_pred,tens=True): # f1a_score=f1_score(y_true, torch.sigmoid(y_pred)[:,1], average='macro') # if tens: # f1a_score=tensor(f1a_score) # else: # f1a_score=f1a_score # return f1a_score import torch import torchvision densenet121 = torchvision.models.densenet121(pretrained=True) densenet121num_ftrs = densenet121.classifier.in_features densenet121.classifier=nn.Sequential( nn.Dropout(0.5), nn.Linear(num_ftrs, 128), nn.Dropout(0.3), nn.Linear(128, 2), ) densenet121=densenet121.cuda() auc=[] loss=[] aucc=[] learn = Learner(data, densenet121, metrics=[error_rate, accuracy,f1_score_a]) learn from fastai.callbacks import * learn.fit_one_cycle(10,0.0003,callbacks=[SaveModelCallback(learn, every='imrpovement', monitor='f1_score_a')]) lossArray=[] lossArray.append(learn.recorder.losses) auc=[] auc.append(learn.recorder.metrics) auc learn.recorder.plot_losses() learn.recorder.plot(show_momentum=True) learn.lr_find() learn.recorder.plot() learn.load('bestmodel') learn.unfreeze() learn.fit_one_cycle(5,callbacks=[SaveModelCallback(learn, every='imrpovement', monitor='f1_score_a')]) learn.load('bestmodel') interp = ClassificationInterpretation.from_learner(learn) interp.plot_confusion_matrix() # learn.load('Res') # learn.lr_find() # # learn.recorder.plot() # learn.fit_one_cycle(20,max_lr=(1e-07),wd=0.25) # interp = ClassificationInterpretation.from_learner(learn) # interp.plot_confusion_matrix() # learn.lr_find() # learn.recorder.plot() conf=interp.confusion_matrix() TrueNagitive=conf[0][0] FalseNegative=conf[0][1] TruePositive=conf[1][1] FalsePositive=conf[1][0] recal=TruePositive/(TruePositive+FalseNegative) precision=TruePositive/(TruePositive+FalsePositive) print("Precision of Model =",precision,"Recall of Model ", recal) f1=2*((precision*recal)/(precision+recal)) print('F1 Score of Model =',f1)RobotManager Testsservice_client_get_map = rospy.ServiceProxy('/static_map', GetMap) map_response = service_client_get_map() models_folder_path = rospkg.RosPack().get_path('simulator_setup') robot_yaml = os.path.join(models_folder_path, 'robot', "myrobot.model.yaml") from task_generator.robot_manager import RobotManager from rl_agent.utils.observation_collector import ObservationCollector # namespace convention: *simulation ns*/*robot name*/*data topic* # robot1 namespace: '/sim_1/test1/...' e.g. '/sim_1/test1/goal', '/sim_1/test1/odom' etc. # robot1 = RobotManager(ns='sim_1', map_=map_response.map, robot_yaml_path=robot_yaml, robot_name='test1') # robot2 = RobotManager(ns='sim_1', map_=map_response.map, robot_yaml_path=robot_yaml, robot_name='test2') robot_list = [RobotManager(ns='eval_sim', map_=map_response.map, robot_yaml_path=robot_yaml, robot_name=f'robot{i+1}') for i in range(12)] from task_generator.marl_tasks import get_MARL_task # number of static and dynamic obstacles still hardcoded for this task mode task_manager = get_MARL_task( "eval_sim", "random", robot_names=[f'robot{i+1}' for i in range(12)], PATHS={} ) for robot in robot_list: robot.set_start_pos_goal_pos() # sets random start and goal position and returns the poses start_pos_1, goal_pos_1 = robot1.set_start_pos_goal_pos() start_pos_2, goal_pos_2 = robot2.set_start_pos_goal_pos() print(f"{goal_pos_1}\n{goal_pos_2:}")x: 8.391000373288989 y: -1.9499998949468136 theta: 1.1753860034784536 x: 3.6660002507269382 y: 2.7750002276152372 theta: 0.7659632563102519TASK MANAGER TESTSfrom task_generator.marl_tasks import get_MARL_task # number of static and dynamic obstacles still hardcoded for this task mode task_manager = get_MARL_task( "sim_1", "random", robot_names=["test1", "test2"] ) # generates random dynamic and static obstacle configurations as well as # random start and goal pose task_manager.reset()DRL AGENT CLASS TESTSDEFAULT_HYPERPARAMETER = os.path.join( rospkg.RosPack().get_path("arena_local_planner_drl"), "configs", "hyperparameters", "default.json", ) from rl_agent.training_agent_wrapper import TrainingDRLAgent agent1 = TrainingDRLAgent( ns="sim_1", robot_name="test1", hyperparameter_path=DEFAULT_HYPERPARAMETER, ) agent2 =TrainingDRLAgent( ns="sim_1", robot_name="test2", hyperparameter_path=DEFAULT_HYPERPARAMETER, ) # in order to retrieve the first observation, manually call the StepWorld service # as follows: rosservice call /sim_1/step_world "0.1" agent1.get_observations() agent_list = [agent1, agent2] from rl_agent.envs.pettingzoo_env import FlatlandPettingZooEnv env = FlatlandPettingZooEnv(ns="sim_1", agent_list=agent_list) import numpy as np env.reset() merged_obs, rewards, dones, infos = env.step({"test1": np.array([0.3, 2.0]), "test2": np.array([0.3, 2.0])})generate statistical data of gatk & vardict> tools: >> gatk VariantEval>> picard CollectVariantCallingMetrics some bugs with vardict vcf, may be compartibility reason%%bash picard -Xmx2048m CollectVariantCallingMetrics \ I=/mnt/data_sata_2t/vsftpd/wxq/ftp/files/0306/HY19020837.vardict.vcf.gz \ O=/mnt/data_sata_2t/vsftpd/wxq/ftp/files/0306/HY19020837.vardict.metrics \ DBSNP=/home/jgs/database/gatk_bundle/GRCh37/ftp.broadinstitute.org/bundle/b37/dbsnp_138.b37.vcf \ TI=/mnt/data_sata_2t/vsftpd/wxq/ftp/files/0306/target.interval_list picard -Xmx2048m CollectVariantCallingMetrics \ I=/mnt/data_sata_2t/vsftpd/wxq/ftp/files/0306/HY19020837.vcf.gz \ O=/mnt/data_sata_2t/vsftpd/wxq/ftp/files/0306/HY19020837.gatk.metrics \ DBSNP=/home/jgs/database/gatk_bundle/GRCh37/ftp.broadinstitute.org/bundle/b37/dbsnp_138.b37.vcf \ TI=/mnt/data_sata_2t/vsftpd/wxq/ftp/files/0306/target.interval_list %%bash gatk VariantEval \ --eval HY19020837.vcf.gz \ --eval HY19020837.vcf.gz \ -R ~/database/gatk_bundle/b37/human_g1k_v37.fasta \ -O ./compare.tabtrain test split datawinData[train].head() X_train, X_test, y_train, y_test = train_test_split(winData[train], np.ravel(winData[target]), random_state=42, test_size=0.3, stratify=winData.win) # design model model_lr = LogisticRegression(solver="lbfgs") model_cat = CatBoostClassifier(iterations=3000, random_state=42, logging_level="Silent") # fit model model_lr.fit(X_train,y_train) model_cat.fit(X_train,y_train, plot=True) # predict with model y_pred_lr = model_lr.predict(X_test) y_pred_cat = model_cat.predict(X_test) # predict with probabilites y_pred_proba_lr = model_lr.predict_proba(X_test) y_pred_proba_cat = model_cat.predict_proba(X_test) plt.plot(y_pred_proba_lr[:,1]) plt.plot(y_pred_proba_cat[:,1])Eval modelfrom sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.metrics import roc_curve from sklearn.metrics import aucAccuracyprint("lr: ", accuracy_score(y_pred_lr, y_test)) print("cat: ", accuracy_score(y_pred_cat, y_test)) #print("pred LR mean: ", np.mean(y_pred_lr))lr: 0.9813486370157819 cat: 0.9813486370157819Confusion matrixconfusion_matrix(y_pred_lr,y_test)ROC curvedef rocPlot(y_test, y_pred_list): plt.style.use('fivethirtyeight') plt.figure(figsize=[8,8]) lw=5 for y_pred in y_pred_list: fpr, tpr, _ = roc_curve(y_test, y_pred[:,1]) roc_auc = auc(fpr, tpr) plt.plot(fpr, tpr, lw=lw, label="ROC curve (area = %0.2f)" % roc_auc) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic') plt.legend(loc="lower right") pass return(0) rocPlot(y_test,[y_pred_proba_lr, y_pred_proba_cat])Find the optimal thresholddef Find_Optimal_Cutoff(target, predicted): """ Find the optimal probability cutoff point for a classification model related to event rate Parameters ---------- target : Matrix with dependent or target data, where rows are observations predicted : Matrix with predicted data, where rows are observations Returns ------- list type, with optimal cutoff value """ fpr, tpr, threshold = roc_curve(target, predicted) optimal_idx = np.argmin(np.abs(tpr - fpr)) # Edit: Change to argmin! optimal_threshold = threshold[optimal_idx] i = np.arange(len(tpr)) roc = pd.DataFrame({'tf' : pd.Series(tpr-(1-fpr), index=i), 'threshold' : pd.Series(threshold, index=i)}) roc_t = roc.iloc[np.abs(roc.tf-0).argsort()[:1]] return list(roc_t['threshold']) threshold_lr = Find_Optimal_Cutoff(y_test, y_pred_proba_lr[:,1]) print("lr: ", threshold_lr) threshold_cat = Find_Optimal_Cutoff(y_test, y_pred_proba_cat[:,1]) print("cat:",threshold_cat) data = pd.DataFrame({"target": y_test, "pred_proba_lr": y_pred_proba_lr[:,1], "pred_proba_cat": y_pred_proba_lr[:,1]}) # Find prediction to the dataframe applying threshold data['pred_lr'] = data['pred_proba_lr'].map(lambda x: 1 if x > threshold_lr[0] else 0) data['pred_cat'] = data['pred_proba_cat'].map(lambda x: 1 if x > threshold_cat[0] else 0) # Print confusion Matrix print("lr: \n",confusion_matrix(data['target'], data['pred_lr'])) print("\ncat: \n",confusion_matrix(data['target'], data['pred_cat'])) plt.plot(data["pred_lr"],'r*', alpha=0.5) plt.plot(data["pred_cat"],'b^', alpha=0.5) plt.plot(data["target"],'g.')How do we perform on year 2018?# get from 2018 win2018 = winData_all.loc[winData_all.year==2018,:] X_2018 = win2018[train] y_2018 = win2018[target] pred_2018 = model_lr.predict_proba(X_2018)[:,1] X_2018["country_id"] = win2018.country_id X_2018["country_name"] = countries.iloc[win2018.country_id].get_values() X_2018["target"] = y_2018 X_2018["pred"] = pred_2018 X_2018.head() pred10 = X_2018.sort_values("pred", ascending=False)["country_name"].head(10).get_values() pred10 placement_df = pd.read_csv("../DATA/placement_df.csv") result2018 = placement_df.iloc[-1,] res10 = result2018.sort_values().head(10).index.get_values() res10What is the overlap between predicted top 10 and actual top 10?np.isin(pred10,res10).mean()Logistic regression implementation from scratch Logistic regression is the first of its kind classification technique that can easily classify into multiple categories and that too by usig the same linear model techniques. After getting the output using the linear model, we run it through a sigmoid function and get are class labels.%load_ext autoreload %autoreload 2 %matplotlib inline import numpy as np import pandas as pd import math from sklearn.datasets import load_breast_cancer from sklearn.linear_model import LogisticRegression as LogR from sklearn.metrics import r2_score from scipy.special import expit as sigmoidBasic StructureAssume 0's to be weights for the linear model.for each iteration: find the output of the linear model using existing weights find the delta wrt the true values. update the weights using a learning rate. for predictions: simply use sigmoid of the output using the linear modelclass LogisticRegression(): def __init__(self, x, y, n_iter=1500, lr=0.01): self.w = np.zeros((x.shape[1], 1)) self.lr = lr/x.shape[0] self.n_iter = n_iter self.x, self.y = x, y def fit(self): for i in range(self.n_iter): predictions = self.predict(self.x) delta = self.y - predictions self.w += (self.lr * (self.x.T @ delta)) def predict(self, x): l = x @ self.w return np.round(sigmoid(l)) data = load_breast_cancer() d = data.data X_train = pd.DataFrame(d, columns=data.feature_names) y_train = data.target[:, None] X_train = (X_train - X_train.mean())/X_train.std() lr = LogisticRegression(X_train, y_train) lr.fit() r2_score(y_train, lr.predict(X_train)) sklearn_lr = LogR() sklearn_lr.fit(X_train, data.target) sklearn_lr.score(X_train, data.target)**CSCE 5218 / CSCE 4930 Deep Learning** **HW1a The Perceptron** (20 pt) # Get the datasets !curl.exe --output train.dat http://huang.eng.unt.edu/CSCE-5218/train.dat !curl.exe --output test.dat http://huang.eng.unt.edu/CSCE-5218/test.dat # Look at the datasets %cd C:\Users\richa\Documents\SPR22\csce5218\deep_learning with open('test.dat') as f: for line in f: print(line) # Look at the datasets with open('train.dat') as f: for line in f: print(line)A1 A2 A3 A4 A5 A6 A7 A8 A9 A10 A11 A12 A13 1 1 0 0 0 0 0 0 1 1 0 0 1 0 0 0 1 1 0 1 1 0 0 0 0 0 1 0 0 1 0 1 1 0 1 0 1 1 1 0 1 1 0 0 1 0 0 1 0 1 0 1 1 1 1 0 0 1 0 0 0 0 0 1 1 1 1 1 1 0 0 1 1 1 0 0 0 1 0 1 1 0 1 1 0 1 1 0 0 0 1 0 0 0 0 0 1 0 0 0 0 1 1 0 1 1 1 0 0 0 1 0 0 0 0 0 0 0 1 0 1 0 1 0 1 0 1 0 1 1 1 0 0 0 1 1 1 0 1 1 0 1 1 0 1 0 1 1 1 1 1 1 1 1 0 0 1 1 0 0 1 0 0 1 1 1 1 0 1 0 1 1 0 0 1 1 0 1 0 0 1 1 0 0 1 1 0 0 0 0 1 0 1 1 1 0 1 0 0 0 0 0 1 0 1 1 0 1 1 0 0 1 1 1 1 1 0 0 1 0 1 0 1 1 1 0 0 0 1 0 1 1 1 1 1 1 1 0 1 0 0 0 0 1 1 0 1 1 1 1 1 0 1 0 0 1 0 1 0 1 1 0 0 1 1 0 0 0 1 1 0 1 1 0 0 0 0 1 1 0 1 1 1 0 0 1 1 1 1 1 0 1 1 1 0 1 1 1 1 1 0 0 0 1 0 1 1 0 0 0 1 1 0 1 0 1 0 1 1 1 1 0 1 0 0 1 0 1 1 1 1 0 0 1 1 0 0 0 1 0 0 0 1 0 0 0 0 0 1 0 1 0 1 0 0 0 1 1 0 0 0 1 1 0 1 0 0 0 0 1 0 0 0 1 0 1 1 0 1 0 0 0 1 1 1 0 0 1 0 1 1 0 1 0 0 0 0 0 1 1 1 1 1 0 1 0 0 0 1 1 0 1 0 1 0 0 1 0 1 0 0 1 1 0 0 0 1 1 1 1 0 1 1 0 0 0 0 0 1 1 1 1 0 0 0 0 1 0 0 1 1 0 1 1 1 1 0 0 0 1 1 0[...]Build the Perceptron ModelYou will need to complete some of the function definitions below. DO NOT import any other libraries to complete this.import math import itertools import re # Corpus reader, all columns but the last one are coordinates; # the last column is the label def read_data(file_name): f = open(file_name, 'r') data = [] # Discard header line f.readline() for instance in f.readlines(): if not re.search('\t', instance): continue instance = list(map(int, instance.strip().split('\t'))) # Add a dummy input so that w0 becomes the bias instance = [[-1] + instance] data += instance return data def dot_product(array1, array2): # Return dot product of array 1 and array 2 return sum([i*j for (i, j) in zip(array1, array2)]) def sigmoid(x): # Return output of sigmoid function on x return math.exp(x)/(math.exp(x)+1) # The output of the model, which for the perceptron is # the sigmoid function applied to the dot product of # the instance and the weights def output(weights, instance): # return the output of the model return sigmoid(dot_product(weights, instance)) # Predict the label of an instance; this is the definition of the perceptron # you should output 1 if the output is >= 0.5 else output 0 def predict(weights, instance): # return the prediction of the model output = sigmoid(dot_product(weights, instance)) return 1 if output >= 0.5 else 0 # Accuracy = percent of correct predictions def get_accuracy(weights, instances): # You do not have to write code like this, but get used to it correct = sum([1 if predict(weights, instance) == instance[-1] else 0 for instance in instances]) return correct * 100 / len(instances) # Train a perceptron with instances and hyperparameters: # lr (learning rate) # epochs # The implementation comes from the definition of the perceptron # # Training consists on fitting the parameters which are the weights # that's the only thing training is responsible to fit # (recall that w0 is the bias, and w1..wn are the weights for each coordinate) # # Hyperparameters (lr and epochs) are given to the training algorithm # We are updating weights in the opposite direction of the gradient of the error, # so with a "decent" lr we are guaranteed to reduce the error after each iteration. def train_perceptron(instances, lr, epochs): #step: initializing weights weights = [0] * (len(instances[0])-1) for _ in range(epochs): for instance in instances: #steps: error calculation by comparing output vs. true value in_value = dot_product(weights, instance) output = sigmoid(in_value) error = instance[-1] - output #steps: updating weights by minimizing error for i in range(0, len(weights)): weights[i] += lr * error * output * (1-output) * instance[i] return weightsRun itinstances_tr = read_data("train.dat") instances_te = read_data("test.dat") lr = 0.005 epochs = 5 weights = train_perceptron(instances_tr, lr, epochs) accuracy = get_accuracy(weights, instances_te) print(f"#tr: {len(instances_tr):3}, epochs: {epochs:3}, learning rate: {lr:.3f}; " f"Accuracy (test, {len(instances_te)} instances): {accuracy:.1f}")#tr: 400, epochs: 5, learning rate: 0.005; Accuracy (test, 100 instances): 68.0QuestionsAnswer the following questions. Include your implementation and the output for each question. Question 1In `train_perceptron(instances, lr, epochs)`, we have the following code:```in_value = dot_product(weights, instance)output = sigmoid(in_value)error = instance[-1] - output```Why don't we have the following code snippet instead?```output = predict(weights, instance)error = instance[-1] - output``` TODO Add your answer here (text only). * We can obtain a more accurate error rate in the first code snippet vs. the second. This is because in the second one, we are using the predict function which returns our output of either 1 or 0, and if we run that against the actual data (instance[-1]), we would only obtain the values: -1, 0, and 1. Conversely, the sigmoid function will return continuous values depending on our dot product which is helpful in minimizing error rates since we can determine better accuracy between values that seem close, where our predict function wouldn't give us the granular detail that we need. This helps us better calculate our gradient function for weight updates. Question 2Train the perceptron with the following hyperparameters and calculate the accuracy with the test dataset.```tr_percent = [5, 10, 25, 50, 75, 100] percent of the training dataset to train withnum_epochs = [5, 10, 20, 50, 100] number of epochslr = [0.005, 0.01, 0.05] learning rate```TODO: Write your code below and include the output at the end of each training loop (NOT AFTER EACH EPOCH)of your code.The output should look like the following:``` tr: 20, epochs: 5, learning rate: 0.005; Accuracy (test, 100 instances): 68.0 tr: 20, epochs: 10, learning rate: 0.005; Accuracy (test, 100 instances): 68.0 tr: 20, epochs: 20, learning rate: 0.005; Accuracy (test, 100 instances): 68.0[and so on for all the combinations]```You will get different results with different hyperparameters. TODO Add your answer here (code and output in the format above)instances_tr = read_data("train.dat") instances_te = read_data("test.dat") tr_percent = [5, 10, 25, 50, 75, 100] # percent of the training dataset to train with num_epochs = [5, 10, 20, 50, 100] # number of epochs lr_array = [0.005, 0.01, 0.05] # learning rate for lr in lr_array: for tr_size in tr_percent: for epochs in num_epochs: size = round(len(instances_tr)*tr_size/100) pre_instances = instances_tr[0:size] weights = train_perceptron(pre_instances, lr, epochs) accuracy = get_accuracy(weights, instances_te) print(f"#tr: {len(pre_instances):0}, epochs: {epochs:3}, learning rate: {lr:.3f}; " f"Accuracy (test, {len(instances_te)} instances): {accuracy:.1f}")#tr: 20, epochs: 100, learning rate: 0.005; Accuracy (test, 100 instances): 68.0 #tr: 40, epochs: 100, learning rate: 0.005; Accuracy (test, 100 instances): 68.0 #tr: 100, epochs: 100, learning rate: 0.005; Accuracy (test, 100 instances): 68.0 #tr: 200, epochs: 100, learning rate: 0.005; Accuracy (test, 100 instances): 74.0 #tr: 300, epochs: 100, learning rate: 0.005; Accuracy (test, 100 instances): 78.0 #tr: 400, epochs: 100, learning rate: 0.005; Accuracy (test, 100 instances): 77.0 #tr: 20, epochs: 100, learning rate: 0.010; Accuracy (test, 100 instances): 68.0 #tr: 40, epochs: 100, learning rate: 0.010; Accuracy (test, 100 instances): 68.0 #tr: 100, epochs: 100, learning rate: 0.010; Accuracy (test, 100 instances): 71.0 #tr: 200, epochs: 100, learning rate: 0.010; Accuracy (test, 100 instances): 78.0 #tr: 300, epochs: 100, learning rate: 0.010; Accuracy (test, 100 instances): 80.0 #tr: 400, epochs: 100, learning rate: 0.010; Accuracy (test, 100 instances): 80.0 #tr: 20, epochs: 100[...]Question 3Write a couple paragraphs interpreting the results with all the combinations of hyperparameters. Drawing a plot will probably help you make a point. In particular, answer the following:- A. Do you need to train with all of the training dataset to get the highest accuracy with the test dataset?- B. How do you justify that training the second run obtains worse accuracy than the first one (despite the second one using more training data)? ```tr: 100, epochs: 20, learning rate: 0.050; Accuracy (test, 100 instances): 71.0tr: 200, epochs: 20, learning rate: 0.005; Accuracy (test, 100 instances): 68.0```- C. Can you get higher accuracy with additional hyperparameters (higher than `80.0`)?- D. Is it always worth training for more epochs (while keeping all other hyperparameters fixed)? TODO: Add your answer here (code and text)# plot data from Question 2 import matplotlib.pyplot as plt tr=[20,40,100,200,300,400,20,40,100,200,300,400,20,40,100,200,300,400] acc=[68,68,68,74,78,77,68,68,71,78,80,80,64,69,77,76,77,80] lr=[0.005,0.005,0.005,0.005,0.005,0.005,0.01,0.01,0.01,0.01,0.01,0.01,0.05,0.05,0.05,0.05,0.05,0.05] plt.scatter(tr,acc,c='teal') plt.xlabel('Training Instances, #') plt.ylabel('Accuracy, %') plt.title('Training Instances vs. Accuracy') plt.show() plt.scatter(lr,acc,c='m') plt.xlabel('Learning Rate, %') plt.ylabel('Accuracy, %') plt.title('Learning Rate vs. Accuracy') plt.show()* A: No, there is one case with an 80% accuracy (highest), but the perceptron was trained only on 75% on the training dataset (tr = 300, epochs = 100, lr = 0.01, tested with 100 instances). This was the 11th run. * B: It just so happens that the test cases had closer outcomes when you included only 25% (tr = 100) of the training dataset vs. 50% (tr = 200). In this example, only 20 epochs occurred, but if one increased the number of epochs, as a general rule (for the most part) one will obtain a higher accuracy when including more training data (but not necessarily so).# Can you get higher accuracy with additional hyperparameters (higher than 80.0)? instances_tr = read_data("train.dat") instances_te = read_data("test.dat") tr_percent = [75,85,90,95, 100] # percent of the training dataset to train with num_epochs = [20,50,100,300,500] # number of epochs lr_array = [0.01, 0.05, 0.1, 0.2, 0.5] # learning rate for lr in lr_array: for tr_size in tr_percent: for epochs in num_epochs: size = round(len(instances_tr)*tr_size/100) pre_instances = instances_tr[0:size] weights = train_perceptron(pre_instances, lr, epochs) accuracy = get_accuracy(weights, instances_te) print(f"#tr: {len(pre_instances):0}, epochs: {epochs:3}, learning rate: {lr:.3f}; " f"Accuracy (test, {len(instances_te)} instances): {accuracy:.1f}")#tr: 300, epochs: 500, learning rate: 0.010; Accuracy (test, 100 instances): 77.0 #tr: 340, epochs: 500, learning rate: 0.010; Accuracy (test, 100 instances): 79.0 #tr: 360, epochs: 500, learning rate: 0.010; Accuracy (test, 100 instances): 80.0 #tr: 380, epochs: 500, learning rate: 0.010; Accuracy (test, 100 instances): 80.0 #tr: 400, epochs: 500, learning rate: 0.010; Accuracy (test, 100 instances): 80.0 #tr: 300, epochs: 500, learning rate: 0.050; Accuracy (test, 100 instances): 76.0 #tr: 340, epochs: 500, learning rate: 0.050; Accuracy (test, 100 instances): 79.0 #tr: 360, epochs: 500, learning rate: 0.050; Accuracy (test, 100 instances): 79.0 #tr: 380, epochs: 500, learning rate: 0.050; Accuracy (test, 100 instances): 79.0 #tr: 400, epochs: 500, learning rate: 0.050; Accuracy (test, 100 instances): 79.0 #tr: 300, epochs: 500, learning rate: 0.100; Accuracy (test, 100 instances): 76.0 #tr: 340, epochs: 500, learning rate: 0.100; Accuracy (test, 100 instances): 79.0 #tr: 360, epochs[...]* C: With respect to the above code cell, on the 23rd run, an accuracy of 81% was achieved. Here are the respective hyperparameters: tr: 360, epochs: 500, learning rate: 0.500; Accuracy (test, 100 instances): 81.0. It is not easy achieving a higher accuracy as many adjustments were made.# Is it always worth training for more epochs (while keeping all other hyperparameters fixed)? instances_tr = read_data("train.dat") instances_te = read_data("test.dat") tr_percent = [5, 10, 25, 50, 75, 100] # percent of the training dataset to train with num_epochs = [100, 150, 200, 250, 300] # number of epochs lr_array = [0.005, 0.01, 0.05] # learning rate for lr in lr_array: for tr_size in tr_percent: for epochs in num_epochs: size = round(len(instances_tr)*tr_size/100) pre_instances = instances_tr[0:size] weights = train_perceptron(pre_instances, lr, epochs) accuracy = get_accuracy(weights, instances_te) print(f"#tr: {len(pre_instances):0}, epochs: {epochs:3}, learning rate: {lr:.3f}; " f"Accuracy (test, {len(instances_te)} instances): {accuracy:.1f}")#tr: 20, epochs: 300, learning rate: 0.005; Accuracy (test, 100 instances): 70.0 #tr: 40, epochs: 300, learning rate: 0.005; Accuracy (test, 100 instances): 69.0 #tr: 100, epochs: 300, learning rate: 0.005; Accuracy (test, 100 instances): 73.0 #tr: 200, epochs: 300, learning rate: 0.005; Accuracy (test, 100 instances): 78.0 #tr: 300, epochs: 300, learning rate: 0.005; Accuracy (test, 100 instances): 80.0 #tr: 400, epochs: 300, learning rate: 0.005; Accuracy (test, 100 instances): 79.0 #tr: 20, epochs: 300, learning rate: 0.010; Accuracy (test, 100 instances): 67.0 #tr: 40, epochs: 300, learning rate: 0.010; Accuracy (test, 100 instances): 72.0 #tr: 100, epochs: 300, learning rate: 0.010; Accuracy (test, 100 instances): 74.0 #tr: 200, epochs: 300, learning rate: 0.010; Accuracy (test, 100 instances): 78.0 #tr: 300, epochs: 300, learning rate: 0.010; Accuracy (test, 100 instances): 79.0 #tr: 400, epochs: 300, learning rate: 0.010; Accuracy (test, 100 instances): 80.0 #tr: 20, epochs: 300[...]Dimensionality Reduction Term Clustering Cluster columns instead of rows in order to find term clustering and use those cluster centroids as features to reduce dimensions.csrL2Normalized.T rowLabels = train_model.bisecting_kmeans(csrL2Normalized.T.toarray(), 3, 3)LDA (Latent Dirichlet Allocation)from sklearn.decomposition import LatentDirichletAllocation lda = LatentDirichletAllocation(n_components=1000, random_state=0) csrL2Normalized_lda = lda.fit_transform(csrL2Normalized) csrL2Normalized_lda.shape type(csrL2Normalized_lda) pd.DataFrame(csrL2Normalized_lda).head() csrL2Normalized_lda[:,1:2].shape calinski_harabaz_score(denseMatrix, csrL2Normalized_lda[:,1:2]) labels = train_model.bisecting_kmeans(csrL2Normalized_lda, 3, 3) calinski_harabaz_score(denseMatrix, labels)PCAfrom sklearn.decomposition import TruncatedSVD pca = TruncatedSVD(10000) csrL2Normalized_pca = pca.fit_transform(csrL2Normalized)Outlier DetectiondfDense = pd.DataFrame(denseMatrix) dfDense[2].head() sns.distplot(dfDense[2]) from sklearn.ensemble import IsolationForest clf = IsolationForest(behaviour='new', random_state=10, contamination='auto', n_jobs=-1) outl = clf.fit_predict(csrL2Normalized.T) pd.DataFrame(outl)[0].value_counts() dfDense.shape colsToBeRemoved = list() for idx,out in enumerate(outl): if out == -1: colsToBeRemoved.append(idx) denseMatrixWithoutOutliers = np.delete(denseMatrix, colsToBeRemoved, 1) denseMatrixWithoutOutliers.shapeScikit KMeansfrom sklearn.cluster import KMeans def calcSSE(csr, cluster): matrix = csr.toarray() members = matrix[cluster,:] return np.sum(np.square(members - np.mean(members))) def bisect(csr, k=7, max_iter=10, random_state=10, n_jobs=-1): clusters = list() initial_clusters = list() for i in range(csr.shape[0]): initial_clusters.append(i) clusters.append(initial_clusters) km = KMeans(n_clusters=2, init='k-means++', max_iter=max_iter, random_state=random_state, n_jobs=n_jobs) parentSSE = list() parentSSE.append(calcSSE(csr, initial_clusters)) while len(clusters) <= k : minClusterASSE = float("inf") minClusterBSSE = float("inf") minClusterA = list() minClusterB = list() minIndex = -1 for idx, cluster in enumerate(clusters): totalParentSSE = parentSSE[idx] km.fit(csr) clusterA = list() clusterB = list() for index,clu in enumerate(km.labels_): if clu==0: clusterA.append(index) else: clusterB.append(index) clusterASSE = calcSSE(csr,clusterA) clusterBSSE = calcSSE(csr,clusterB) if clusterASSE + clusterBSSE < minClusterASSE + minClusterBSSE: minClusterASSE = clusterASSE minClusterBSSE = clusterBSSE minIndex = idx minClusterA = clusterA minClusterB = clusterB del clusters[minIndex] del parentSSE[minIndex] clusters.append(minClusterA) clusters.append(minClusterB) parentSSE.append(minClusterASSE) parentSSE.append(minClusterBSSE) labels = np.zeros(csr.shape[0], dtype=np.int) for index,cluster in enumerate(clusters): for idx in cluster: labels[idx] = index+1 return labels labels = bisect(csrL2Normalized, 3, 3) calinski_harabaz_score(csrL2Normalized.toarray(), labels) labels = bisect(csrL2Normalized, max_iter=10) calinski_harabaz_score(csrL2Normalized.toarray(), labels) read_transform.write_predictions(labels, '../models/predictions/1.1-am-my-bisect.dat')Raw KMeanskm = KMeans(n_clusters=7, init='k-means++', max_iter=100, random_state=10, n_jobs=-1) km.fit(csrL2Normalized) pd.DataFrame(km.labels_)[0].value_counts() km.inertia_ calinski_harabaz_score(csrL2Normalized.toarray(), km.labels_) read_transform.write_predictions(km.labels_ + 1, '../models/predictions/1.1-am-kmeans-sci.dat')Raw kmeans without outlierscsr_without_outliers = csr_matrix(denseMatrixWithoutOutliers) csr_without_outliers.shape km.fit(csr_without_outliers) pd.DataFrame(km.labels_)[0].value_counts() km.inertia_ calinski_harabaz_score(csrL2Normalized.toarray(), km.labels_)Raw Kmeans with LDAfrom sklearn.decomposition import LatentDirichletAllocation lda = LatentDirichletAllocation(n_components=10, random_state=0) csrL2Normalized_lda = lda.fit_transform(csrL2Normalized) csrL2Normalized_lda.shape km.fit(csrL2Normalized_lda) calinski_harabaz_score(csrL2Normalized_lda, km.labels_) pd.DataFrame(km.labels_)[0].value_counts()Raw Kmeans with PCAfrom sklearn.decomposition import TruncatedSVD pca = TruncatedSVD(10) csrL2Normalized_pca = pca.fit_transform(csrL2Normalized) csrL2Normalized_pca.shape km.fit(csrL2Normalized_pca) calinski_harabaz_score(csrL2Normalized_pca, km.labels_) pd.DataFrame(km.labels_)[0].value_counts() read_transform.write_predictions(km.labels_ + 1, '../models/predictions/1.1-am-raw-kmeans-pca-10.dat')Bisect KMeans without outlierslabels = train_model.bisecting_kmeans(denseMatrixWithoutOutliers, 7, 10) calinski_harabaz_score(denseMatrixWithoutOutliers, labels) read_transform.write_predictions(labels, '../models/predictions/1.1-am-bisect-without-outliers.dat')Bisect Kmeans with PCApca = TruncatedSVD(n_components=100, n_iter=10, random_state=10, algorithm='arpack') csrL2Normalized_pca = pca.fit_transform(csrL2Normalized) labels = train_model.bisecting_kmeans(csrL2Normalized_pca, 7, 10) calinski_harabaz_score(csrL2Normalized_pca, labels) read_transform.write_predictions(labels, '../models/predictions/1.1-am-bisect-with-pca.dat') pd.DataFrame(labels)[0].value_counts() chscores = list() ks = list() for k in range(3, 21, 2): labels = train_model.bisecting_kmeans(csrL2Normalized_pca, k, 10) ks.append(k) chscore = calinski_harabaz_score(csrL2Normalized_pca, labels) chscores.append(chscore) print ("K= %d CH Score = %f" %(k, chscore)) plt.plot(ks, chscores)Bisect Kmeans with LDAcsrL2Normalized_lda = hstack((csrL2Normalized, csrL2Normalized_lda)).tocsr() csrL2Normalized_lda pca = TruncatedSVD(100) csrL2Normalized_lda_pca = pca.fit_transform(csrL2Normalized_lda) csrL2Normalized_lda_pca.shape labels = train_model.bisecting_kmeans(csrL2Normalized_lda_pca, 7, 10) pd.DataFrame(labels)[0].value_counts()Loading Data and Important Librariesimport numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline data = pd.read_csv("diabetes.csv") data.head() y = data.iloc[::,-1] x = data.iloc[::, 0:-1] x.head() def get_na(data): flag = False na_cols = dict(data.isna().sum()) for x in na_cols.keys(): if(na_cols[x] > 0): flag = True print(x,na_cols[x]) if(flag == False): print("No Columns having NA values :)") get_na(data) x.columnsEDAsns.distplot(x.Age).set_title('Age') sns.distplot(x.BloodPressure).set_title('BloodPressure Level') sns.distplot(x.SkinThickness).set_title('SkinThickness Level') sns.distplot(x.Insulin).set_title('Insulin Level') sns.distplot(x.BMI).set_title('BMI Level') sns.distplot(x.DiabetesPedigreeFunction).set_title('DiabetesPedigreeFunction Level') sns.distplot(x.Glucose).set_title('Glucose Level')Building Modelfrom sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.33, random_state=42)Logistic Regressionglm = LogisticRegression(solver='liblinear') model = glm.fit(x_train,y_train) predicted = model.predict(x_test) conf_mat = confusion_matrix(y_test, predicted) conf_matRandom Forestfrom sklearn.ensemble import RandomForestClassifier rf_obj = RandomForestClassifier(n_estimators=1000, criterion='gini', min_samples_split=2, min_samples_leaf=5, min_weight_fraction_leaf=0.0, max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, bootstrap=True, n_jobs=1000, random_state=None, verbose=0) model_rf = rf_obj.fit(x_train, y_train) predicted_rf = model.predict(x_test) # Fitting Random Forest Classification to the Training set from sklearn.ensemble import RandomForestClassifier classifier = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0) classifier.fit(x_train,y_train) # Predicting the Test set results y_pred = classifier.predict(x_test) conf_mat_R1 = confusion_matrix(y_test, y_pred) conf_mat conf_mat = confusion_matrix(y_test, predicted_rf) conf_matStandardizationfrom sklearn.preprocessing import StandardScaler scaler = StandardScaler().fit(x) scale_data = pd.DataFrame(scaler.transform(x), columns= x.columns) scale_data.head() x_scaled_train, x_scaled_test, y_scaled_train, y_scaled_test = train_test_split(scale_data, y, test_size=0.33, random_state=42)For scaled Data Logistic Regression and Random Forest Classifier# For scaled Data Logistic Regression glm_scaled = LogisticRegression(solver='liblinear') logistic_model_scaled = glm_scaled.fit(x_scaled_train,y_scaled_train) logistic_predicted_scaled = logistic_model_scaled.predict(x_scaled_test) conf_mat = confusion_matrix(y_scaled_test, logistic_predicted_scaled) conf_mat rf_obj = RandomForestClassifier(n_estimators=1000, criterion='gini', min_samples_split=2, min_samples_leaf=5, min_weight_fraction_leaf=0.0, max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, bootstrap=True, n_jobs=1000, random_state=None, verbose=0) rf_model_rf_scaled = rf_obj.fit(x_scaled_train,y_scaled_train) rf_predicted_rf_scaled = model.predict(x_scaled_test) conf_mat = confusion_matrix(y_scaled_test, logistic_predicted_scaled) conf_matBatch preformance[link](https://docs.sqlalchemy.org/en/13/faq/performance.html)import time import sqlite3 from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import Column, Integer, String, create_engine from sqlalchemy.orm import scoped_session, sessionmaker Base = declarative_base() DBSession = scoped_session(sessionmaker()) engine = None class Customer(Base): __tablename__ = "customer" id = Column(Integer, primary_key=True) name = Column(String(255)) def init_sqlalchemy(dbname='sqlite:///sqlalchemy.db'): global engine engine = create_engine(dbname, echo=False) DBSession.remove() DBSession.configure(bind=engine, autoflush=False, expire_on_commit=False) Base.metadata.drop_all(engine) Base.metadata.create_all(engine) def test_sqlalchemy_orm(n=100000): init_sqlalchemy() t0 = time.time() for i in range(n): customer = Customer() customer.name = 'NAME ' + str(i) DBSession.add(customer) if i % 1000 == 0: DBSession.flush() DBSession.commit() print( "SQLAlchemy ORM: Total time for " + str(n) + " records " + str(time.time() - t0) + " secs") def test_sqlalchemy_orm_pk_given(n=100000): init_sqlalchemy() t0 = time.time() for i in range(n): customer = Customer(id=i + 1, name="NAME " + str(i)) DBSession.add(customer) if i % 1000 == 0: DBSession.flush() DBSession.commit() print( "SQLAlchemy ORM pk given: Total time for " + str(n) + " records " + str(time.time() - t0) + " secs") def test_sqlalchemy_orm_bulk_save_objects(n=100000): init_sqlalchemy() t0 = time.time() for chunk in range(0, n, 10000): DBSession.bulk_save_objects( [ Customer(name="NAME " + str(i)) for i in range(chunk, min(chunk + 10000, n)) ] ) DBSession.commit() print( "SQLAlchemy ORM bulk_save_objects(): Total time for " + str(n) + " records " + str(time.time() - t0) + " secs") def test_sqlalchemy_orm_bulk_insert(n=100000): init_sqlalchemy() t0 = time.time() for chunk in range(0, n, 10000): DBSession.bulk_insert_mappings( Customer, [ dict(name="NAME " + str(i)) for i in range(chunk, min(chunk + 10000, n)) ] ) DBSession.commit() print( "SQLAlchemy ORM bulk_insert_mappings(): Total time for " + str(n) + " records " + str(time.time() - t0) + " secs") def test_sqlalchemy_core(n=100000): init_sqlalchemy() t0 = time.time() engine.execute( Customer.__table__.insert(), [{"name": 'NAME ' + str(i)} for i in range(n)] ) print( "SQLAlchemy Core: Total time for " + str(n) + " records " + str(time.time() - t0) + " secs") def init_sqlite3(dbname): conn = sqlite3.connect(dbname) c = conn.cursor() c.execute("DROP TABLE IF EXISTS customer") c.execute( "CREATE TABLE customer (id INTEGER NOT NULL, " "name VARCHAR(255), PRIMARY KEY(id))") conn.commit() return conn def test_sqlite3(n=100000, dbname='sqlite3.db'): conn = init_sqlite3(dbname) c = conn.cursor() t0 = time.time() for i in range(n): row = ('NAME ' + str(i),) c.execute("INSERT INTO customer (name) VALUES (?)", row) conn.commit() print( "sqlite3: Total time for " + str(n) + " records " + str(time.time() - t0) + " sec") if __name__ == '__main__': test_sqlalchemy_orm(100000) test_sqlalchemy_orm_pk_given(100000) test_sqlalchemy_orm_bulk_save_objects(100000) test_sqlalchemy_orm_bulk_insert(100000) test_sqlalchemy_core(100000) test_sqlite3(100000)TM1py:Reading DataGoing through all the different ways to get data into your Python scripts Part 1: Reading data from a CSV fileIntroduction to [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html)#import pandas to get data from csv file import pandas as pd # pd.read_csv will store the information into a pandas dataframe called df df = pd.read_csv('reading_data.csv') #A pandas dataframe has lots of cool pre-built functions such as: # print the result df.head() #write data to csv df.to_csv('my_new_filePyPal.csv') # Find all unique values for one column df.Country.unique()Part 2: Reading data from TM1 Going through all TM1py options to load data from TM1 into our Jupyter notebook Setting up connection to TM1#import TM1py services from TM1py.Services import TM1Service from TM1py.Utils import Utils #TM1 credentials ADDRESS = "localhost" PORT = 8009 USER = "admin" PWD = "" SSL = True #Connect to the TM1 instance tm1 = TM1Service(address=ADDRESS, port=PORT, user=USER, password=, ssl=SSL) # Cube view used in this notbook cube_name = 'Bike Shares' view_name = '2014 to 2017 Counts by Day'Getting the view as map of coordinates and values- **Use Case:** Get the values with all intersections# query first 5 cells from the cube view as coordinate-cell dictionary cells = tm1.cubes.cells.execute_view(cube_name=cube_name, view_name=view_name, private=False, top=5) cells # print first entries from coordinates-cell dictionary instead of [Version].[Version].[Actual] returns Actual for element_unique_names, cell in cells.items(): # extract element names from unique-element-names element_names = Utils.element_names_from_element_unique_names( element_unique_names=element_unique_names) # take value from cell value = cell["Value"] print(element_names, value)('Actual', '2014-01-01', 'NYC', 'Count') 6059 ('Actual', '2014-01-01', 'Chicago', 'Count') 123 ('Actual', '2014-01-01', 'Washington', 'Count') 3011 ('Actual', '2014-01-02', 'NYC', 'Count') 8600 ('Actual', '2014-01-02', 'Chicago', 'Count') 112Getting the number of cells- **Use Case:** Check how many cells are you going to work with- **Note**: Very fast#tm1.cubes.cells.execute_view_csv or tm1.cubes.cells.execute_mdx_csv %time df_cellcount= tm1.cubes.cells.execute_view_cellcount(cube_name=cube_name, view_name=view_name, private=False) df_cellcountGetting data as CSV fomat- **Use Case**: Get your data as CSV format- **Note**: Very fast#tm1.cubes.cells.execute_view_csv or tm1.cubes.cells.execute_mdx_csv %time csv = tm1.cubes.cells.execute_view_csv(cube_name=cube_name, view_name=view_name, private=False) #diplay the result as CSV format csv[0:200] #diplay first 10 lines of the result for line in csv.split("\r\n")[0:10]: print(line) #diplay last 20 lines of the result for line in csv.split("\r\n")[-10:]: print(line)2017-12-29,NYC,13759 2017-12-29,Chicago,1076 2017-12-29,Washington,3088 2017-12-30,NYC,5956 2017-12-30,Chicago,548 2017-12-30,Washington,1876 2017-12-31,NYC,6569 2017-12-31,Chicago,651 2017-12-31,Washington,1437Getting data as (pandas) dataframe- **Use Case**: Get your data as a pandas dataframe- **Note**: useful for further data analysis in python%time df = tm1.cubes.cells.execute_view_dataframe(cube_name=cube_name, view_name=view_name, private=False) df.head() df.to_csv(view_name+"Pypal.csv")Getting data as (pandas) pivot dataframe - **Use Case**: Get your data as a pandas dataframe following your view structure- **Note**: useful for further data analysis in python%time df_pivot = tm1.cubes.cells.execute_view_dataframe_pivot(cube_name=cube_name, view_name=view_name, private=False) # print first 5 records df_pivot.head() # print last 5 records df_pivot.tail()Getting data in custom JSON format- **Use Case**: Query additional information, such as: - Cell is RuleDerived - Cell is Consolidated - Member properties - Attribute Values- **Note**: very flexible. Not fast.%time raw_json = tm1.cubes.cells.execute_view_raw( cube_name=cube_name, view_name=view_name, private=False, elem_properties=["Type"], cell_properties=["RuleDerived", "Value"]) # print full response raw_json # Extract cube name from response raw_json['Cube']Getting cell values- **Use Case**: sometimes you are only interested in the cell values. Skipping the elements in the response increases performance- **Note**: Fast and light%time values = tm1.cubes.cells.execute_view_values(cube_name=cube_name, view_name=view_name, private=False) # extract first ten values first_ten = list(values)[0:10] # print first ten values print(first_ten)[]Getting row elements and cell values only- **Use Case**: sometimes elements in columns and titles are irrelevant. Skipping these elements in the response increases performance- **Note**: Faster than querying everythingrows_and_values = tm1.cubes.cells.execute_view_rows_and_values( cube_name=cube_name, view_name=view_name, private=False, element_unique_names=False) for row_elements, values_by_row in rows_and_values.items(): print(row_elements, values_by_row)Getting data with attributes valuesTo get attributes values, you will need to get the data from an MDX querymdx = """ WITH MEMBER [Bike Shares Measure].[City Alias] AS [}ElementAttributes_City].([}ElementAttributes_City].[City Alias]) SELECT NON EMPTY {[Date].Members}*{TM1SubsetAll([City])} ON ROWS, NON EMPTY {[Bike Shares Measure].[Count], [Bike Shares Measure].[City Alias] } ON COLUMNS FROM [Bike Shares] WHERE ([Version].[Actual],[Bike Shares Measure].[Count])""" # get table'ish dataframe data = tm1.cubes.cells.execute_mdx(mdx) #Build pandas dataframe df = Utils.build_pandas_dataframe_from_cellset(data, multiindex=False) print(df) # get pivot dataframe pivot = tm1.cubes.cells.execute_mdx_dataframe_pivot(mdx) print(pivot)Values Bike Shares Measure City Alias Count Date City 2014 Chicago 2454634 NYC New York City 8081216 Total City 13449000 Washington 2913150 2014-01 Chicago 25076 NYC New York City 300400 Total City 438173 Washington 112697 2014-01-01 Chicago 123 NYC New York City 6059 Total City 9193 Washington 3011 2014-01-02 Chicago 112 NYC New York City 8600 Total City 12028 Washington 3316 2014-01-03 Chicago 6 NYC New York City 1144 To[...]Part 3: Reading from APIs Getting data from a web service, introduction to a JSON fileThe code below has been extracted from this article: [Upload weather data from web service](https://code.cubewise.com/tm1py-help-content/upload-weather-data-from-web-services-into-planning-analytics)#library for HTTP / REST Request against Webservices import requests #standard library for JSON parsing, manipulation import json # Define constants STATION = 'GHCND:USW00014732' FROM, TO = '2017-01-01', '2017-01-04' HEADERS = {"token": ''} url = 'https://www.ncdc.noaa.gov/cdo-web/api/v2/data?' \ 'datasetid=GHCND&' \ 'startdate=' + FROM + '&' \ 'enddate=' + TO + '&' \ 'limit=1000&' \ 'datatypeid=TMIN&' \ 'datatypeid=TAVG&' \ 'datatypeid=TMAX&' \ 'stationid=' + STATION print(url) #Execute the URL against the NOAA API to get the results #Prettyprint first three items from result-set response = requests.get(url, headers=HEADERS).json() results = response["results"] print(json.dumps(results[0:3], indent=2)) #Rearrange the data cells = dict() for record in results: value = record['value'] / 10 coordinates = ("Actual", record['date'][0:10], "NYC", record['datatype']) cells[coordinates] = value for coordinate, value in cells.items(): print(coordinate, value) # Write values back to TM1 tm1.cubes.cells.write_values("Weather Data", cells)Activity 01import pandas as pd import matplotlib.pyplot as plt import numpy as np data = pd.read_csv("wholesale_customers_data.csv") data.isnull().sum() outliers = {} for i in range(data.shape[1]): min_t = data[data.columns[i]].mean() - (3 * data[data.columns[i]].std()) max_t = data[data.columns[i]].mean() + (3 * data[data.columns[i]].std()) count = 0 for j in data[data.columns[i]]: if j < min_t or j > max_t: count += 1 outliers[data.columns[i]] = [count,data.shape[0]-count] print(outliers) plt.hist(data["Fresh"]) plt.show() plt.figure(figsize=(8,8)) plt.pie(outliers["Detergents_Paper"],autopct="%.2f") plt.show() data_standardized = (data - data.mean())/data.std() data_standardized.head()converting jsonl files of the metadata and pdf_pases to data framesmetadata_path = '20200705v1/sample/metadata/sample.jsonl' df=pd.read_json(metadata_path, lines=True) df.keys() pdf_parses_path = '20200705v1/sample/pdf_parses/sample.jsonl' df_pdf = pd.read_json(pdf_parses_path, lines=True) df_pdf.keys()removing all columns except 'paper_id', 'abstract' and 'mag_field_of_study' and droping NaN value containging rowsdf_abstract = df.filter(['paper_id', 'abstract', 'mag_field_of_study']) df_abstract.keys() df_abstract.isna().sum() df_abstract.dropna(inplace=True) df_abstract.isna().sum() df_abstract.columns df_abstract.shapeselecting rows with 'mag_field_of_study' columns values being only CS, Phy, Med, Bio, Chem and Matdf_abstract.mag_field_of_study.head(3) df_abstract.mag_field_of_study=df_abstract.mag_field_of_study.str[0] df_abstract.mag_field_of_study.head(3) df_abstract.mag_field_of_study.unique() df_abst_pmbccm = df_abstract.loc[df_abstract.mag_field_of_study.isin(['Computer Science', 'Physics', 'Medicine', 'Biology', 'Chemistry', 'Mathematics'])] df_abst_pmbccm.mag_field_of_study.unique()merge metadata and pdf_parsesdf_merged = df_abst_pmbccm.merge(df_pdf, on='paper_id', how='left') df_merged.columns ### droping columns of no relavance here df_merged.drop(columns=['_pdf_hash', 'bib_entries', 'ref_entries', 'abstract_y'], inplace=True) df_merged.columns df_merged.isna().sum() df_merged.head(10)creating a function to filter body_text column to have only text key valuesdf_merged.body_text.isnull()[0] df_merged.body_text[1]!=df_merged.body_text[1] df_merged.body_text[95]!=df_merged.body_text[95] def get_text(y): if y!=y: return np.nan else: text = '' for dict_ in y: text = text+' '+dict_['text'] return text df_merged.body_text = df_merged['body_text'].apply(get_text)merged file is converted to a feather filedf_merged.to_feather('20200705v1/sample/df_merged.feather') ls 20200705v1/sample/df_merged.feather metadata/ pdf_parses/Automatic differentiation with `autograd` We train models to get better and better as a function of experience. Usually, getting better means minimizing a loss function. To achieve this goal, we often iteratively compute the gradient of the loss with respect to weights and then update the weights accordingly. While the gradient calculations are straightforward through a chain rule, for complex models, working it out by hand can be a pain.Before diving deep into the model training, let's go through how MXNet’s `autograd` package expedites this work by automatically calculating derivatives. Basic usage Let's first import the `autograd` package.import mxnet as mx from mxnet import nd from mxnet import autogradAs a toy example, let’s say that we are interested in differentiating a function $f(x) = 2 x^2$ with respect to parameter $x$. We can start by assigning an initial value of $x$. differentiate $f(x) = 2 x^2$ with respect to parameter $x$.x = nd.array([[1, 2], [3, 4]]) xOnce we compute the gradient of $f(x)$ with respect to $x$, we’ll need a place to store it. In MXNet, we can tell an NDArray that we plan to store a gradient by invoking its `attach_grad` method.x.attach_grad()Now we’re going to define the function $y=f(x)$. To let MXNet store $y$, so that we can compute gradients later, we need to put the definition inside a `autograd.record()` scope. $y=f(x)$def f(x): return 2 * x**2 with autograd.record(): y = f(x) x, yLet’s invoke back propagation (backprop) by calling `y.backward()`. When $y$ has more than one entry, `y.backward()` is equivalent to `y.sum().backward()`. Backward propagation of yy.backward()Now, let’s see if this is the expected output. Note that $y=2x^2$ and $\frac{dy}{dx} = 4x$, which should be `[[4, 8],[12, 16]]`. Let's check the automatically computed results: $y=2x^2$ $\frac{dy}{dx} = 4x$x, x.gradUsing Python control flows Sometimes we want to write dynamic programs where the execution depends on some real-time values. MXNet will record the execution trace and compute the gradient as well.Consider the following function `f`: it doubles the inputs until it's `norm` reaches 1000. Then it selects one element depending on the sum of its elements. $Y=f(X)$- Take a vector `X` of two random numbers in [-1, 1]- `X` is multiplied by `2` until its norm reach `1000`- If `X`'s sum is positive, return 1st element otherwise 2nddef f(x): x = x * 2 while x.norm().asscalar() < 1000: x = x * 2 # If sum positive # pick 1st if x.sum().asscalar() >= 0: y = x[0] # else pick 2nd else: y = x[1] return yWe record the trace and feed in a random value:x = nd.random.uniform(-1, 1, shape=2) x x.attach_grad() with autograd.record(): y = f(x) y.backward()We know that `y` is a linear function of `x`, and `y` is chosen from `x`. Then the gradient with respect to `x` be will be either `[y/x[0], 0]` or `[0, y/x[1]]`, depending on which element from `x` we picked. Let's find the results: $y=k.x[0]$ or $y=k.x[1]$, hence $\frac{dy}{dx} = \begin{vmatrix} 0 \\ k \end{vmatrix} $ or $ \begin{vmatrix} k \\ 0 \end{vmatrix}$with $k = 2^n$ where n is the number of times $x$ was multiplied by 2x x.gradImports!pip install fastFM !pip install surprise !pip install lightfm !pip install hyperopt import pandas as pd import math import numpy as np import random from numpy.linalg import inv from numpy.linalg import multi_dot import matplotlib.pyplot as plt import itertools import warnings from math import sqrt from sklearn.feature_extraction import DictVectorizer from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder from fastFM import als from sklearn.metrics import mean_squared_error from lightfm.datasets import fetch_movielens from lightfm import LightFM from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive from google.colab import auth from oauth2client.client import GoogleCredentials warnings.filterwarnings("ignore") from scipy import sparse from lightfm import LightFM from sklearn.metrics.pairwise import cosine_similarityGet sampled data from location in drivedef get_data(): !pip install -U -q PyDrive from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive from google.colab import auth from oauth2client.client import GoogleCredentials # Authenticate and create the PyDrive client. auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) id = "1FFYnZRIuzQLeBkDUuJpK_oRtUmdxMd9O" downloaded = drive.CreateFile({'id':id}) downloaded.GetContentFile('final_dataset.csv') data = pd.read_csv('final_dataset.csv') return data data = get_data()Create train-test datasetsdef train_test_split(data): user_freq=data.groupby(['userId']).size().reset_index(name='counts') users_lt3=user_freq[user_freq['counts']<3][['userId']] users_ge3=user_freq[user_freq['counts']>=3][['userId']] train1=pd.merge(data, users_lt3, on=['userId'],how='inner') data1=pd.merge(data, users_ge3, on=['userId'],how='inner') data1.sort_values(['userId', 'timestamp'], ascending=[True, False],inplace=True) test=data1.groupby('userId').sample(frac=.3, random_state=2) test_idx=data1.index.isin(test.index.to_list()) train=train1.append(data1[~test_idx]) return train, test, user_freq train, test, user_freq = train_test_split(data)Create data for giving inputs to modeldef create_interaction_matrix(df,user_col, item_col, rating_col): interactions = df.groupby([user_col, item_col])[rating_col] \ .sum().unstack().reset_index(). \ fillna(0).set_index(user_col) return interactions def create_user_dict(interactions): user_id = list(interactions.index) user_dict = {} counter = 0 for i in user_id: user_dict[i] = counter counter += 1 return user_dict def create_item_dict(df,id_col,name_col): item_dict ={} for i in range(df.shape[0]): item_dict[(df.loc[i,id_col])] = df.loc[i,name_col] return item_dict def create_data_fm(train, test, user_freq): train=train[['userId', 'movieId', 'rating']] test=test[['userId', 'movieId', 'rating']] train.rename(columns = {'rating':'0_rating'}, inplace = True) test.rename(columns = {'rating':'0_rating'}, inplace = True) missing_test_users = user_freq[user_freq['counts']<3]['userId'].values interactions = create_interaction_matrix(df = train, user_col = 'userId', item_col = 'movieId', rating_col = '0_rating') test_int=create_interaction_matrix(df = test, user_col = 'userId', item_col = 'movieId', rating_col = '0_rating') a = np.zeros(shape=(len(missing_test_users),test_int.shape[1])) missing = pd.DataFrame(a,columns= test_int.columns.values) missing = missing.set_index(missing_test_users) test_int = pd.concat([test_int,missing]) return interactions, test_int interactions, test_int = create_data_fm(train,test, user_freq)Bayesian hyperparameter tuningfrom lightfm.evaluation import precision_at_k from hyperopt import tpe, fmin, hp, Trials, STATUS_OK,space_eval def fm_fn(params): x = sparse.csr_matrix(interactions.values) test_intr = sparse.csr_matrix(test_int.values) fm = LightFM(loss='warp',no_components = params['n_components'],k=params['k']) fm.fit(x,epochs = params['epoch'],num_threads=3) # results = cross_validate(model, train, measures = ['rmse'],cv=5, verbose=True) res = precision_at_k(fm,test_interactions = test_intr, k=params['k']).mean() return -res space = { 'n_components': hp.choice('n_components',[10,15,20,30]), 'epoch': hp.choice('epoch', [50,30,100]), 'k': hp.choice('k', [20,10,30]) } trials = Trials() best = fmin(algo = tpe.suggest, fn = fm_fn, trials = trials, max_evals = 20, space = space) space_eval(space, best) n_comps = [10,15,20,30] epochs = [50,30,100] k = [20,10,30] n_comps[trials.trials[0]['misc']['vals']['n_components'][0]] [epochs[x['misc']['vals']['epoch'][0]] for x in trials.trials] tpe_results=np.array([[x['result']['loss'],epochs[x['misc']['vals']['epoch'][0]], k[x['misc']['vals']['k'][0]], n_comps[x['misc']['vals']['n_components'][0]] ] for x in trials.trials]) tpe_results_df=pd.DataFrame(tpe_results, columns=['score', 'learning_rate', 'max_depth', 'n_estimators']) tpe_results_df.plot(subplots=True,figsize=(10, 10))We find the best parameters are 'epoch': 30, 'k': 10, 'n_components': 15 OPTIMIZED MODELdef runFM(interactions, n_components, k, epoch ,loss='warp', n_jobs = 4): x = sparse.csr_matrix(interactions.values) model = LightFM(no_components= n_components, loss=loss,k=k) model.fit(x,epochs=epoch,num_threads = n_jobs) return model def train_fm_model(train, test, user_freq): interactions, test_int = create_data(train,test, user_freq) mf_model = runFM(interactions,10,10,30) return mf_model mf_model = train_fm_model(train, test, user_freq) def get_fm_predictions(fm_model): user_dict = create_user_dict(interactions=interactions) movies_dict = create_item_dict(df = movies, id_col = 'movieId', name_col = 'title') u_list=list(user_dict.values()) n_users, n_items = interactions.shape preds=pd.DataFrame(columns=['userId','movieId','prediction']) def predictions(users): u_vec=[users]*n_items i_vec=list(np.arange(n_items)) r_vec=list(mf_model.predict(users,np.arange(n_items))) data = pd.DataFrame({'userId': u_vec, 'movieId': i_vec,'prediction':r_vec}) return(data) epc = list(map(predictions,u_list)) df = pd.concat(epc) m_ls=list(interactions.columns) m_id=list(np.arange(len(m_ls))) movie_df = pd.DataFrame({'m_id': m_ls, 'movieId': m_id}) u_ls=list(interactions.index) u_id=list(np.arange(len(u_ls))) user_df = pd.DataFrame({'u_id': u_ls, 'userId': u_id}) df=pd.merge(pd.merge(df,user_df,on=['userId'],how='inner'),movie_df,on=['movieId'],how='inner') df.drop(['movieId','userId'],axis=1,inplace=True) df.rename({'u_id': 'userId', 'm_id': 'movieId'}, axis=1, inplace=True) merged = pd.merge(train, df, on=["userId", "movieId"], how="outer") all_predictions = merged[merged['0_rating'].isnull()].drop('0_rating', axis=1) all_predictions = all_predictions.rename(columns={'prediction':'predictions'}) return all_predictions fm_predictions = get_fm_predictions(mf_model) from google.colab import drive drive.mount('/content/drive') all_predictions.to_csv('/content/drive/MyDrive/main_data/fm_jay_warp.csv',index = None) train['userId'].nunique()COVID 19 Basic Visualizationimport os import pandas as pd from ipywidgets import interact, Layout import ipywidgets as widgets import matplotlib.dates as mdates import plotly.graph_objects as go from plotly.subplots import make_subplots import datetime as dt import calmap # df_confirmed_o = pd.read_csv(r'data/time_series_covid19_confirmed_global.csv') df_confirmed_o = pd.read_csv('https://coviddata.github.io/coviddata/v1/countries/cases.csv') df_confirmed = df_confirmed_o.groupby(['Country'], as_index=False).sum() # df_confirmed.head() df_deaths_o = pd.read_csv('https://coviddata.github.io/coviddata/v1/countries/deaths.csv') df_deaths = df_deaths_o.groupby(['Country'], as_index=False).sum() # df_deaths.head() df_recovered_o = pd.read_csv('https://coviddata.github.io/coviddata/v1/countries/recoveries.csv') df_recovered = df_recovered_o.groupby(['Country'], as_index=False).sum() # df_recovered.head() def f2(x): dates = [dt.datetime.strptime(d,'%Y-%m-%d').date() for d in list(df_confirmed.columns[1:])] st_val = dates.index(date_sel2.value[0]) + 1 end_val = dates.index(date_sel2.value[1]) + 2 x = [dt.datetime.strptime(d,'%Y-%m-%d').date() for d in list(df_confirmed.columns[st_val:end_val])] y1 = df_confirmed[df_confirmed['Country'] == country_sel2.value].iloc[0][st_val:end_val] y1.index = pd.to_datetime(y1.index) y2 = df_deaths[df_deaths['Country'] == country_sel2.value].iloc[0][st_val:end_val] y2.index = pd.to_datetime(y1.index) y3 = df_recovered[df_recovered['Country'] == country_sel2.value].iloc[0][st_val:end_val] y3.index = pd.to_datetime(y1.index) fig5.data[0].x = y1.index fig5.data[0].y = y1 fig5.data[1].x = y2.index fig5.data[1].y = y2 fig5.data[2].x = y3.index fig5.data[2].y = y3 if uniform_sel.value: h = max([y1.max(), y2.max(), y3.max()]) fig5.update_yaxes(range=[0, h]) else: fig5.update_yaxes(range=[0, y1.max()], row=1, col=1) fig5.update_yaxes(range=[0, y2.max()], row=1, col=2) fig5.update_yaxes(range=[0, y3.max()], row=1, col=3) country_sel2 = widgets.Dropdown( options=list(df_confirmed['Country'].unique()), description='Country:', value='India', disabled=False, ) dates = [dt.datetime.strptime(d,'%Y-%m-%d').date() for d in list(df_confirmed.columns[1:])] dates_i = range(len(dates)) options = [(i.strftime(' %d %b %y '), i) for i in dates] date_sel2 = widgets.SelectionRangeSlider( options=options, index=(0, len(dates)-1), description='Months', disabled=False, continuous_update=False, layout=Layout(width='70%', height='80px') ) uniform_sel = widgets.Checkbox( value=True, description='Scale uniformly', disabled=False ) country_sel2.observe(f2, names='value') date_sel2.observe(f2, names='value') uniform_sel.observe(f2, names='value') x = [dt.datetime.strptime(d,'%Y-%m-%d').date() for d in list(df_confirmed.columns[1:])] y1 = df_confirmed[df_confirmed['Country'] == 'India'].iloc[0][1:] y1.index = pd.to_datetime(y1.index) y2 = df_deaths[df_deaths['Country'] == 'India'].iloc[0][1:] y2.index = pd.to_datetime(y2.index) y3 = df_recovered[df_recovered['Country'] == 'India'].iloc[0][1:] y3.index = pd.to_datetime(y3.index) fig5 = make_subplots(rows=1, cols=3, subplot_titles=("Confirmed", "Deaths", "Recovered")) fig5.add_trace( go.Scatter( x=y1.index, y=y1, line=dict(color='blue', width=2), name="Confirmed" ), row=1, col=1 ) fig5.add_trace( go.Scatter( x=y2.index, y=y2, line=dict(color='red', width=2), name="Deaths" ), row=1, col=2 ) fig5.add_trace( go.Scatter( x=y3.index, y=y3, line=dict(color='green', width=2), name="Recovered" ), row=1, col=3 ) fig5 = go.FigureWidget(fig5) fig5.update_layout(title_text="Cumulative Data on seperate graphs", showlegend=False) for i in range(3): fig5.update_xaxes(title_text="Time", row=1, col=i+1) fig5.update_yaxes(title_text="Cases", row=1, col=1) # fig5.update_yaxes(rangemode="nonnegative") h = max([y1.max(), y2.max(), y3.max()]) fig5.update_yaxes(range=[0, h]) container4 = widgets.HBox([country_sel2, uniform_sel]) cum_sep = widgets.VBox([container4, date_sel2, fig5]) cum_sep def f(x): dates = [dt.datetime.strptime(d,'%Y-%m-%d').date() for d in list(df_confirmed.columns[1:])] st_val = dates.index(date_sel.value[0]) + 1 end_val = dates.index(date_sel.value[1]) + 2 x = [dt.datetime.strptime(d,'%Y-%m-%d').date() for d in list(df_confirmed.columns[st_val:end_val])] y1 = df_confirmed[df_confirmed['Country'] == country_sel.value].iloc[0][st_val:end_val] y1.index = pd.to_datetime(y1.index) y2 = df_deaths[df_deaths['Country'] == country_sel.value].iloc[0][st_val:end_val] y2.index = pd.to_datetime(y1.index) y3 = df_recovered[df_recovered['Country'] == country_sel.value].iloc[0][st_val:end_val] y3.index = pd.to_datetime(y1.index) if confirmed_sel.value: fig4.data[0].x = y1.index fig4.data[0].y = y1 else: fig4.data[0].x = None fig4.data[0].y = None if death_sel.value: fig4.data[1].x = y2.index fig4.data[1].y = y2 else: fig4.data[1].x = None fig4.data[1].y = None if recover_sel.value: fig4.data[2].x = y3.index fig4.data[2].y = y3 else: fig4.data[2].x = None fig4.data[2].y = None country_sel = widgets.Dropdown( options=list(df_confirmed['Country'].unique()), description='Country:', value='India', disabled=False, ) confirmed_sel = widgets.Checkbox( value=True, description='Confirmed', disabled=False ) death_sel = widgets.Checkbox( value=True, description='Death', disabled=False ) recover_sel = widgets.Checkbox( value=True, description='Recovered', disabled=False ) dates = [dt.datetime.strptime(d,'%Y-%m-%d').date() for d in list(df_confirmed.columns[1:])] dates_i = range(len(dates)) options = [(i.strftime(' %d %b %y '), i) for i in dates] date_sel = widgets.SelectionRangeSlider( options=options, index=(0, len(dates)-1), description='Months', disabled=False, continuous_update=False, layout=Layout(width='70%', height='80px') ) country_sel.observe(f, names='value') confirmed_sel.observe(f, names='value') death_sel.observe(f, names='value') recover_sel.observe(f, names='value') date_sel.observe(f, names='value') x = [dt.datetime.strptime(d,'%Y-%m-%d').date() for d in list(df_confirmed.columns[1:])] y1 = df_confirmed[df_confirmed['Country'] == 'India'].iloc[0][1:] y1.index = pd.to_datetime(y1.index) y2 = df_deaths[df_deaths['Country'] == 'India'].iloc[0][1:] y2.index = pd.to_datetime(y2.index) y3 = df_recovered[df_recovered['Country'] == 'India'].iloc[0][1:] y3.index = pd.to_datetime(y3.index) fig4 = go.FigureWidget(data=[ go.Scatter( x=y1.index, y=y1, line=dict(color='blue', width=2), name="Confirmed" ), go.Scatter( x=y2.index, y=y2, line=dict(color='red', width=2), name="Deaths" ), go.Scatter( x=y3.index, y=y3, line=dict(color='green', width=2), name="Recovered" ) ]) fig4.update_layout( title='Cumulative data on single graph', yaxis_title="Cases", xaxis_title="Time", legend=dict(x=.8, y=-.5), # margin={"r":0,"t":30,"l":20,"b":20} ) fig4.update_yaxes(rangemode="nonnegative") container2 = widgets.HBox([country_sel, confirmed_sel, death_sel, recover_sel]) cum_sin = widgets.VBox([container2, date_sel, fig4]) cum_sin df_confirmed_n = df_confirmed.copy() for i in range(len(df_confirmed_n)): for j in range(4, len(df_confirmed_n.loc[0])): df_confirmed_n.iat[i, j] = df_confirmed_n.iat[i, j] - df_confirmed.iat[i, j-1] # df_confirmed_n.head() df_deaths_n = df_deaths.copy() for i in range(len(df_confirmed_n)): for j in range(4, len(df_confirmed_n.loc[0])): df_deaths_n.iat[i, j] = df_deaths_n.iat[i, j] - df_deaths.iat[i, j-1] # df_deaths_n.head() df_recovered_n = df_recovered.copy() for i in range(len(df_confirmed_n)): for j in range(4, len(df_confirmed_n.loc[0])): df_recovered_n.iat[i, j] = df_recovered_n.iat[i, j] - df_recovered.iat[i, j-1] # df_recovered_n.head() def f3(x): dates = [dt.datetime.strptime(d,'%Y-%m-%d').date() for d in list(df_confirmed_n.columns[1:])] st_val = dates.index(date_sel3.value[0]) + 1 end_val = dates.index(date_sel3.value[1]) + 2 x = [dt.datetime.strptime(d,'%Y-%m-%d').date() for d in list(df_confirmed_n.columns[st_val:end_val])] y1 = df_confirmed_n[df_confirmed_n['Country'] == country_sel3.value].iloc[0][st_val:end_val] y1.index = pd.to_datetime(y1.index) y2 = df_deaths_n[df_deaths_n['Country'] == country_sel3.value].iloc[0][st_val:end_val] y2.index = pd.to_datetime(y1.index) y3 = df_recovered_n[df_recovered_n['Country'] == country_sel3.value].iloc[0][st_val:end_val] y3.index = pd.to_datetime(y1.index) fig6.data[0].x = y1.index fig6.data[0].y = y1 fig6.data[1].x = y2.index fig6.data[1].y = y2 fig6.data[2].x = y3.index fig6.data[2].y = y3 country_sel3 = widgets.Dropdown( options=list(df_confirmed['Country'].unique()), description='Country:', value='India', disabled=False, ) dates = [dt.datetime.strptime(d,'%Y-%m-%d').date() for d in list(df_confirmed.columns[3:])] dates_i = range(len(dates)) options = [(i.strftime(' %d %b %y '), i) for i in dates] date_sel3 = widgets.SelectionRangeSlider( options=options, index=(0, len(dates)-1), description='Months', disabled=False, continuous_update=False, layout=Layout(width='70%', height='80px') ) country_sel3.observe(f3, names='value') date_sel3.observe(f3, names='value') x = [dt.datetime.strptime(d,'%Y-%m-%d').date() for d in list(df_confirmed.columns[3:])] y1 = df_confirmed_n[df_confirmed['Country'] == 'India'].iloc[0][3:] y1.index = pd.to_datetime(y1.index) y2 = df_deaths_n[df_deaths['Country'] == 'India'].iloc[0][3:] y2.index = pd.to_datetime(y2.index) y3 = df_recovered_n[df_recovered['Country'] == 'India'].iloc[0][3:] y3.index = pd.to_datetime(y3.index) fig6 = make_subplots(rows=1, cols=3, subplot_titles=("Confirmed", "Deaths", "Recovered")) fig6.add_trace( go.Scatter( x=y1.index, y=y1, line=dict(color='blue', width=2), name="Confirmed" ), row=1, col=1 ) fig6.add_trace( go.Scatter( x=y2.index, y=y2, line=dict(color='red', width=2), name="Deaths" ), row=1, col=2 ) fig6.add_trace( go.Scatter( x=y3.index, y=y3, line=dict(color='green', width=2), name="Recovered" ), row=1, col=3 ) fig6 = go.FigureWidget(fig6) fig6.update_layout(title_text="Daily data on seperate graphs", showlegend=False) for i in range(3): fig6.update_xaxes(title_text="Time", row=1, col=i+1) fig6.update_yaxes(title_text="Cases", row=1, col=1) fig6.update_yaxes(rangemode="nonnegative") container5 = widgets.HBox([country_sel3]) dai_sep = widgets.VBox([container5, date_sel3, fig6]) dai_sepMap Plotsdef f4(x): if map_sel.value == 'Total Confirmed': map_fig['data'][0].z = df_confirmed.iloc[:, -1] map_fig['data'][0].colorscale = "Blues" elif map_sel.value == 'Deadth-to-Confrimed Ratio': map_fig['data'][0].z = df_deaths.iloc[:, -1]/df_confirmed.iloc[:, -1] map_fig['data'][0].colorscale = "Reds" elif map_sel.value == 'Recovered-to-Confrimed Ratio': map_fig['data'][0].z = df_recovered.iloc[:, -1]/df_confirmed.iloc[:, -1] map_fig['data'][0].colorscale = "Greens" map_fig.update_layout(title_text = map_sel.value, margin={"r":0,"t":40,"l":0,"b":0}, height=500) map_sel = widgets.Dropdown( options=['Total Confirmed', 'Deadth-to-Confrimed Ratio', 'Recovered-to-Confrimed Ratio'], description='Select Plot:', value='Total Confirmed', disabled=False, ) map_sel.observe(f4, names='value') map_fig = go.Figure(go.Choropleth(locationmode = 'country names', locations=list(df_confirmed['Country']), z=df_confirmed.iloc[:, -1], colorscale="Blues", name='Confirmed' )) map_fig = go.FigureWidget(map_fig) map_fig.update_layout(title_text = 'Total Confirmed', margin={"r":0,"t":40,"l":0,"b":0}, height=500) maps_w = widgets.VBox([map_sel, map_fig]) maps_w**Setup**import itertools import os import matplotlib.pylab as plt import tensorflow as tf from tensorflow.keras.applications import MobileNetV2 from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Flatten from tensorflow.keras.layers import Dense import numpy as np**Load DataSet**data_dir = tf.keras.utils.get_file( 'flower_photos', 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz', untar=True)Downloading data from https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz 228818944/228813984 [==============================] - 4s 0us/step**Pre-processing Data**IMAGE_SIZE = (224, 224) BATCH_SIZE = 32 datagen_kwargs = dict(rescale=1./255, validation_split=.20) dataflow_kwargs = dict(target_size=IMAGE_SIZE, batch_size=BATCH_SIZE, interpolation="bilinear") valid_datagen = tf.keras.preprocessing.image.ImageDataGenerator( **datagen_kwargs) valid_generator = valid_datagen.flow_from_directory( data_dir, subset="validation", shuffle=False, **dataflow_kwargs) do_data_augmentation = True if do_data_augmentation: train_datagen = tf.keras.preprocessing.image.ImageDataGenerator( rotation_range=40, horizontal_flip=True, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, **datagen_kwargs) else: train_datagen = valid_datagen train_generator = train_datagen.flow_from_directory( data_dir, subset="training", shuffle=True, **dataflow_kwargs)Found 731 images belonging to 5 classes. Found 2939 images belonging to 5 classes.**Transfer Learning to Build Model**model = Sequential() model.add(MobileNetV2(include_top = False, weights="imagenet", input_shape=(224, 224, 3))) model.add(tf.keras.layers.GlobalAveragePooling2D()) model.add(Dense(1, activation = 'softmax')) model.layers[0].trainable = FalseDownloading data from https://storage.googleapis.com/tensorflow/keras-applications/mobilenet_v2/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_1.0_224_no_top.h5 9412608/9406464 [==============================] - 0s 0us/step**Summary of Updated Model**model.summary()Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= mobilenetv2_1.00_224 (Functi (None, 7, 7, 1280) 2257984 _________________________________________________________________ global_average_pooling2d (Gl (None, 1280) 0 _________________________________________________________________ dense (Dense) (None, 1) 1281 ================================================================= Total params: 2,259,265 Trainable params: 1,281 Non-trainable params: 2,257,984 _________________________________________________________________**Compile the Model**from tensorflow.keras.optimizers import RMSprop from tensorflow.keras import optimizers model.compile(optimizer=RMSprop(lr=0.005), loss = 'categorical_crossentropy', metrics = 'accuracy')**Train the Model**steps_per_epoch = train_generator.samples // train_generator.batch_size validation_steps = valid_generator.samples // valid_generator.batch_size hist = model.fit( train_generator, epochs=5, steps_per_epoch=steps_per_epoch, validation_data=valid_generator, validation_steps=validation_steps).historyEpoch 1/5 91/91 [==============================] - 83s 565ms/step - loss: 1.2431 - accuracy: 0.8342 - val_loss: 0.6834 - val_accuracy: 0.9148 Epoch 2/5 91/91 [==============================] - 51s 559ms/step - loss: 0.5575 - accuracy: 0.9297 - val_loss: 0.6170 - val_accuracy: 0.9202 Epoch 3/5 91/91 [==============================] - 51s 561ms/step - loss: 0.4656 - accuracy: 0.9378 - val_loss: 0.5212 - val_accuracy: 0.9480 Epoch 4/5 91/91 [==============================] - 51s 559ms/step - loss: 0.4775 - accuracy: 0.9405 - val_loss: 0.5768 - val_accuracy: 0.9369 Epoch 5/5 91/91 [==============================] - 51s 559ms/step - loss: 0.4205 - accuracy: 0.9474 - val_loss: 0.5294 - val_accuracy: 0.9466**Test Accuracy of Model on the validation data**model.evaluate(valid_generator)23/23 [==============================] - 4s 179ms/step - loss: 0.5225 - accuracy: 0.9475**Prediction using Model**def get_class_string_from_index(index): for class_string, class_index in valid_generator.class_indices.items(): if class_index == index: return class_string x, y = next(valid_generator) image = x[0, :, :, :] true_index = np.argmax(y[0]) plt.imshow(image) plt.axis('off') plt.show() # Expand the validation image to (1, 224, 224, 3) before predicting the label prediction_scores = model.predict(np.expand_dims(image, axis=0)) predicted_index = np.argmax(prediction_scores) print("True label: " + get_class_string_from_index(true_index)) print("Predicted label: " + get_class_string_from_index(predicted_index))**Save the Model**saved_model_path = f"/content" tf.saved_model.save(model, saved_model_path)INFO:tensorflow:Assets written to: /content/assetsnote_seq = ['g8', 'e8', 'e4', 'f8', 'd8', 'd4', 'c8', 'd8', 'e8', 'f8', 'g8', 'g8', 'g4', 'g8', 'e8', 'e8', 'e8', 'f8', 'd8', 'd4', 'c8', 'e8', 'g8', 'g8', 'e8', 'e8', 'e4', 'd8', 'd8', 'd8', 'd8', 'd8', 'e8', 'f4', 'e8', 'e8', 'e8', 'e8', 'e8', 'f8', 'g4', 'g8', 'e8', 'e4', 'f8', 'd8', 'd4', 'c8', 'e8', 'g8', 'g8', 'e8', 'e8', 'e4'] note_seq[0:5], note_seq[1:6], note_seq[2:7] code2idx = {'c4':0, 'd4':1, 'e4':2, 'f4':3, 'g4':4, 'a4':5, 'b4':6, 'c8':7, 'd8':8, 'e8':9, 'f8':10, 'g8':11, 'a8':12, 'b8':13} len(note_seq), range(len(note_seq)-5) dataset = list() for i in range(len(note_seq)-4): # print(note_seq[i:i+5]) subset = note_seq[i:i+5] items = list() for item in subset: code2idx[item] items.append(code2idx[item]) # print(items) dataset.append(items) print(dataset) import numpy as np datasets = np.array(dataset) x_train = datasets[:,:4] x_train.shape, x_train y_train = datasets[:,4] y_train.shape, y_train x_train = x_train / 13 x_train[2] X_train = np.reshape(x_train, (50, 4, 1)) X_train.shape, X_train[2] import tensorflow as tf model = tf.keras.models.Sequential() model.add(tf.keras.Input(shape=(4, 1))) model.add(tf.keras.layers.LSTM(128)) model.add(tf.keras.layers.Dense(14, activation='softmax')) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['acc']) model.fit(X_train, y_train, epochs=1000, batch_size=10)Epoch 1/1000 5/5 [==============================] - 2s 6ms/step - loss: 2.6242 - acc: 0.2200 Epoch 2/1000 5/5 [==============================] - 0s 5ms/step - loss: 2.5727 - acc: 0.3400 Epoch 3/1000 5/5 [==============================] - 0s 5ms/step - loss: 2.5099 - acc: 0.3400 Epoch 4/1000 5/5 [==============================] - 0s 5ms/step - loss: 2.4410 - acc: 0.3400 Epoch 5/1000 5/5 [==============================] - 0s 5ms/step - loss: 2.3591 - acc: 0.3400 Epoch 6/1000 5/5 [==============================] - 0s 6ms/step - loss: 2.2395 - acc: 0.3400 Epoch 7/1000 5/5 [==============================] - 0s 5ms/step - loss: 2.1212 - acc: 0.3400 Epoch 8/1000 5/5 [==============================] - 0s 5ms/step - loss: 2.0121 - acc: 0.3400 Epoch 9/1000 5/5 [==============================] - 0s 5ms/step - loss: 2.0164 - acc: 0.3400 Epoch 10/1000 5/5 [==============================] - 0s 7ms/step - loss: 1.9728 - acc: 0.3400 Epoch 11/1000 5/5 [==============================] - 0s 5ms/step - lo[...]evaluationmodel.evaluate(X_train, y_train) X_train[0:1] pred = model.predict(X_train[0:1]) np.argmax(pred) pred1 = model.predict(X_train[1:2]) np.argmax(pred1) pred2 = model.predict(X_train[2:3]) np.argmax(pred2) from sklearn.metrics import classification_report, confusion_matrix y_pred = model.predict(X_train) y_pred.shape, y_pred[5] y_pred_argmax = np.argmax(y_pred, axis=1) y_pred_argmax.shape, y_pred_argmax[5] y_train.shape, y_train[5] print(classification_report(y_train, y_pred_argmax)) y_train confusion_matrix(y_train, y_pred_argmax) import seaborn as sns sns.heatmap(confusion_matrix(y_train, y_pred_argmax), annot=True)Import#region import matplotlib.pyplot as plt import math from sympy import * import matplotlib.pyplot as plt from numpy import linspace import numpy as np #endregion t = symbols('t') f = symbols('f', cls=Function)Input#read input #region def ReadArray(f): line = f.readline() result = list(map(lambda x: float(N(x)), line.split())) return result def ReadMatrix(f): listCoef = [] line = f.readline() while(line.strip() != ''): coef = list(map(lambda x: float(N(x)), line.split())) listCoef.append(coef) line = f.readline() #print('listCoef: ') #print(listCoef) return listCoef def RandN(listCoef): # R & N R = listCoef[0][0] N = math.inf for coef in listCoef: if(R > coef[0]): R = coef[0] coef.pop(0) if(N > len(coef)): N = len(coef) if R <= 0: raise ValueError("invalid input: bán kính <= 0") return (R,N) #endregionHàm chínhdef calculate(initial, listCoef, N): result = initial # mảng kết quả c_i k=len(listCoef)-1 # mảng mảng hệ số a_i và f for n in range(0,N-k): c=0 offset = 1; for i in range(n+1,n+k+1): offset *= i #start calculating c_{n+k} for m in range(0,n+1): mult = 1 for i in range(0,k): c += listCoef[i][n-m] * result[m+i] * mult mult *= m+i+1 c= (listCoef[k][n]-c)/offset # -1*n! / (n+k)! result.append(c) return result #Program def Polynomial(inputPath): f = open(inputPath,"r") initial = ReadArray(f) listCoef = ReadMatrix(f) f.close() R,N = RandN(listCoef) result = calculate(initial, listCoef, N) return (R, result) def Restore(array): 3Plot and save#region def Save(result, outputPath, mode): f = open(outputPath, mode) f.write("Radius of convergence = " + str(result[0]) + ", Result: \n"); f.write(str(result[1])) f.close() def Plotf(f, interval): t_vals = linspace(interval[0], interval[1], 1000) lam_x = lambdify(t, f, modules = ['numpy']) x_vals = lam_x(t_vals) plt.plot(t_vals, x_vals) def Plot(result, start, end, g = None): f = 0 power = 0 for i in result: f += i * (t ** power) power += 1 Plotf(f, (start, end)) if g is not None: Plotf(g, (start, end)) return f #endregion #FrobeniusTesttest1 = 'example1.txt' test2 = 'example2.txt' output = 'outputPath_1.txt' R, array = Polynomial(test1) print("Radius of convergence = " ,str(R), ", Result:") np.set_printoptions(precision=1) print(np.array(array)) f = Plot(array, -2 , 2, g = sin(3*t)) print(f.evalf(2)) Save((R,array),output,"w") R, array = Polynomial(test2) print("Radius of convergence = " + str(R) + ", Result: \n") print(array) Plot(array, -1 , 1, g = sin(t)) Save((R,array),output,"w") Plot([1,2], -2 , 2, g = sin(3*t))Longest Palindromic Substring[link](https://www.algoexpert.io/questions/Longest%20Palindromic%20Substring) My Solution# O(n^2) time | O(n) space def longestPalindromicSubstring(string): # Write your code here. res = [0, 1] for centerIdx in range(1, len(string)): odd = findLongestPalindrome(string, centerIdx - 1, centerIdx + 1) even = findLongestPalindrome(string, centerIdx - 1, centerIdx) res = max(odd, even, res, key=lambda x: x[1] - x[0]) return string[res[0]:res[1]] def findLongestPalindrome(string, left, right): while left >= 0 and right < len(string): if string[left] != string[right]: break left -= 1 right += 1 return [left + 1, right] # Manacher's Algorithm # O(n) time | O(n) space def longestPalindromicSubstring(string): # Write your code here. newString = "#" + "#".join(string) + "#" radiuses = [0 for x in newString] # not include the center longestCenter = 0 left = 0 right = 0 for i in range(1, len(newString)): if i <= right: j = left + right - i radiuses[i] = min(j - left, radiuses[j]) k = radiuses[i] while i - k - 1 >= 0 and i + k + 1 < len(newString): if newString[i - k - 1] == newString[i + k + 1]: k += 1 else: break radiuses[i] = k potentialRight = i + k if potentialRight > right: right = potentialRight left = i - k if radiuses[i] > radiuses[longestCenter]: longestCenter = i resLeft = (longestCenter - radiuses[longestCenter]) // 2 resRight = (longestCenter + radiuses[longestCenter] - 1) // 2 + 1 return string[resLeft:resRight] longestPalindromicSubstring('ababababababa')Expert Solution# O(n^3) time | O(n) space def longestPalindromicSubstring(string): longest = "" for i in range(len(string)): for j in range(i, len(string)): substring = string[i : j + 1] if len(substring) > len(longest) and isPalindrome(substring): longest = substring return longest def isPalindrome(string): leftIdx = 0 rightIdx = len(string) - 1 while leftIdx < rightIdx: if string[leftIdx] != string[rightIdx]: return False leftIdx += 1 rightIdx -= 1 return True # O(n^2) time | O(n) space def longestPalindromicSubstring(string): currentLongest = [0, 1] for i in range(1, len(string)): odd = getLongestPalindromeFrom(string, i - 1, i + 1) even = getLongestPalindromeFrom(string, i - 1, i) longest = max(odd, even, key=lambda x: x[1] - x[0]) currentLongest = max(longest, currentLongest, key=lambda x: x[1] - x[0]) return string[currentLongest[0] : currentLongest[1]] def getLongestPalindromeFrom(string, leftIdx, rightIdx): while leftIdx >= 0 and rightIdx < len(string): if string[leftIdx] != rightIdx[rightIdx]: break leftIdx += 1 rightIdx -= 1 return [leftIdx + 1, rightIdx]pytti: python text to image---This is a closed beta. Leak it if you must, information wants to be free.[pytti is made possible by supporters like you.](https://www.patreon.com/sportsracer48) [Thank you.](https://www.youtube.com/watch?v=TexDW6nEhgU)# @title Licensed under the MIT License # Copyleft (c) 2021 # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE.Instructions `scenes:` Descriptions of scenes you want generated, separated by `||`. Each scene can contain multiple prompts, separated by `|`.*Example:* `Winter sunrise | icy landscape || Winter day | snowy skyline || Winter sunset | chilly air || Winter night | clear sky` would go through several winter scenes.**Advanced:** weight prompts with `description:weight`. Higher `weight` values will be prioritized by the optimizer, and negative `weight` values will remove the description from the image. The default weight is $1$. Weights can also be functions of $t$ to change over the course of an animation.*Example scene:* `blue sky:10|martian landscape|red sky:-1` would try to turn the martian sky blue.**Advanced:** stop prompts once the image matches them sufficiently with `description:weight:stop`. `stop` should be between $0$ and $1$ for positive prompts, or between $-1$ and $0$ for negative prompts. Lower `stop` values will have more effect on the image (remember that $-1<-0.5<0$). A prompt with a negative `weight` will often go haywire without a stop. Stops can also be functions of $t$ to change over the course of an animation.*Example scene:* `Feathered dinosaurs|birds:1:0.87|scales:-1:-.9|text:-1:-.9` Would try to make feathered dinosaurs, lightly like birds, without scales or text, but without making 'anti-scales' or 'anti-text.'**NEW:****Advanced:** Use `description:weight_mask description` with a text prompt as `mask`. The prompt will only be applied to areas of the image that match `mask description` according to CLIP.*Example scene:* `Khaleesi Daenerys Targaryen | mother of dragons | dragon:3_baby` would only apply the weight `dragon` to parts of the image that match `baby`, thus turning the babies that `mother` tends to make into dragons (hopefully).**Advanced:** Use `description:weight_[mask]` with a URL or path to an image, or a path to a .mp4 video to use as a `mask`. The prompt will only be applied to the masked (white) areas of the mask image. Use `description:weight_[-mask]` to apply the prompt to the black areas instead.*Example scene:* `sunlight:3_[mask.mp4]|midnight:3_[-mask.mp4]` Would apply `sunlight` in the white areas of `mask.mp4`, and `midnight` in the black areas.**Legacy:** Directional weights will still work as before, but they aren't as good as masks.**Advanced:** Use `[path or url]` as a prompt to add a semantic image prompt. This will be read by CLIP and understood as a near perfect text description of the image.*Example scene:* `[artist signature.png]:-1:-.95|[https://i.redd.it/ewpeykozy7e71.png]:3|fractal clouds|hole in the sky`---`scene_prefix:` text prepended to the beginning of each scene.*Example:* `Trending on Arstation|``scene_suffix:` text appended to the end of each scene.*Example:* ` by urney``interpolation_steps:` number of steps to spend smoothly transitioning from the last scene at the start of each scene. $200$ is a good default. Set to $0$ to disable.`steps_per_scene:` total number of steps to spend rendering each scene. Should be at least `interpolation_steps`. This will indirectly control the total length of an animation.---**NEW**: `direct_image_prompts:` paths or urls of images that you want your image to look like in a literal sense, along with `weight_mask` and `stop` values, separated by `|`.Apply masks to direct image prompts with `path or url of image:weight_path or url of mask` For video masks it must be a path to an mp4 file.**Legacy** latent image prompts are no more. They are now rolled into direct image prompts.---`init_image:` path or url of start image. Works well for creating a central focus.`direct_init_weight:` Defaults to $0$. Use the initial image as a direct image prompt. Equivalent to adding `init_image:direct_init_weight` as a `direct_image_prompt`. Supports weights, masks, and stops.`semantic_init_weight:` Defaults to $0$. Defaults to $0$. Use the initial image as a semantic image prompt. Equivalent to adding `[init_image]:direct_init_weight` as a prompt to each scene in `scenes`. Supports weights, masks, and stops. **IMPORTANT** since this is a semantic prompt, you still need to put the mask in `[` `]` to denote it as a path or url, otherwise it will be read as text instead of a file.---`width`, `height:` image size. Set one of these $-1$ to derive it from the aspect ratio of the init image.`pixel_size:` integer image scale factor. Makes the image bigger. Set to $1$ for VQGAN or face VRAM issues.`smoothing_weight:` makes the image smoother. Defaults to $0$ (no smoothing). Can also be negative for that deep fried look.`image_model:` select how your image will be represented.`vqgan_model:` select your VQGAN version (only for `image_model: VQGAN`)`random_initial_palette:` if checked, palettes will start out with random colors. Otherwise they will start out as grayscale. (only for `image_model: Limited Palette`)`palette_size:` number of colors in each palette. (only for `image_model: Limited Palette`)`palettes:` total number of palettes. The image will have `palette_size*palettes` colors total. (only for `image_model: Limited Palette`)`gamma:` relative gamma value. Higher values make the image darker and higher contrast, lower values make the image lighter and lower contrast. (only for `image_model: Limited Palette`). $1$ is a good default.`hdr_weight:` how strongly the optimizer will maintain the `gamma`. Set to $0$ to disable. (only for `image_model: Limited Palette`)`palette_normalization_weight:` how strongly the optimizer will maintain the palettes' presence in the image. Prevents the image from losing palettes. (only for `image_model: Limited Palette`)`show_palette:` check this box to see the palette each time the image is displayed. (only for `image_model: Limited Palette`)`target_pallete:` path or url of an image which the model will use to make the palette it uses.`lock_pallete:` force the model to use the initial palette (most useful from restore, but will force a grayscale image or a wonky palette otherwise).---`animation_mode:` select animation mode or disable animation.`sampling_mode:` how pixels are sampled during animation. `nearest` will keep the image sharp, but may look bad. `bilinear` will smooth the image out, and `bicubic` is untested :)`infill_mode:` select how new pixels should be filled if they come in from the edge.* mirror: reflect image over boundary* wrap: pull pixels from opposite side* black: fill with black * smear: sample closest pixel in image`pre_animation_steps:` number of steps to run before animation starts, to begin with a stable image. $250$ is a good default.`steps_per_frame:` number of steps between each image move. $50$ is a good default.`frames_per_second:` number of frames to render each second. Controls how $t$ is scaled.`direct_stabilization_weight: ` keeps the current frame as a direct image prompt. For `Video Source` this will use the current frame of the video as a direct image prompt. For `2D` and `3D` this will use the shifted version of the previous frame. Also supports masks: `weight_mask.mp4`.`semantic_stabilization_weight: ` keeps the current frame as a semantic image prompt. For `Video Source` this will use the current frame of the video as a direct image prompt. For `2D` and `3D` this will use the shifted version of the previous frame. Also supports masks: `weight_[mask.mp4]` or `weight_mask phrase`.`depth_stabilization_weight: ` keeps the depth model output somewhat consistent at a *VERY* steep performance cost. For `Video Source` this will use the current frame of the video as a semantic image prompt. For `2D` and `3D` this will use the shifted version of the previous frame. Also supports masks: `weight_mask.mp4`.`edge_stabilization_weight: ` keeps the images contours somewhat consistent at very little performance cost. For `Video Source` this will use the current frame of the video as a direct image prompt with a sobel filter. For `2D` and `3D` this will use the shifted version of the previous frame. Also supports masks: `weight_mask.mp4`.`flow_stabilization_weight: ` used for `animation_mode: 3D` and `Video Source` to prevent flickering. Comes with a slight performance cost for `Video Source`, and a great one for `3D`, due to implementation differences. Also supports masks: `weight_mask.mp4`. For video source, the mask should select the part of the frame you want to move, and the rest will be treated as a still background.---`video_path: ` path to mp4 file for `Video Source``frame_stride` advance this many frames in the video for each output frame. This is surprisingly useful. Set to $1$ to render each frame. Video masks will also step at this rate.`reencode_each_frame: ` check this box to use each video frame as an `init_image` instead of warping each output frame into the init for the next. Cuts will still be detected and trigger a reencode.`flow_long_term_samples: ` Sample multiple frames into the past for consistent interpolation even with disocclusion, as described by [, , and (2016)](https://arxiv.org/abs/1604.08610). Each sample is twice as far back in the past as the last, so the earliest sampled frame is $2^{\text{long_term_flow_samples}}$ frames in the past. Set to $0$ to disable.---`translate_x:` horizontal image motion as a function of time $t$ in seconds.`translate_y:` vertical image motion as a function of time $t$ in seconds.`translate_z_3d:` forward image motion as a function of time $t$ in seconds. (only for `animation_mode:3D`)`rotate_3d:` image rotation as a quaternion $\left[r,x,y,z\right]$ as a function of time $t$ in seconds. (only for `animation_mode:3D`)`rotate_2d:` image rotation in degrees as a function of time $t$ in seconds. (only for `animation_mode:2D`)`zoom_x_2d:` horizontal image zoom as a function of time $t$ in seconds. (only for `animation_mode:2D`)`zoom_y_2d:` vertical image zoom as a function of time $t$ in seconds. (only for `animation_mode:2D`)`lock_camera:` check this box to prevent all scrolling or drifting. Makes for more stable 3D rotations. (only for `animation_mode:3D`)`field_of_view:` vertical field of view in degrees. (only for `animation_mode:3D`)`near_plane:` closest depth distance in pixels. (only for `animation_mode:3D`)`far_plane:` farthest depth distance in pixels. (only for `animation_mode:3D`)---`file_namespace:` output directory name.`allow_overwrite:` check to overwrite existing files in `file_namespace`.`display_every:` how many steps between each time the image is displayed in the notebook.`clear_every:` how many steps between each time notebook console is cleared.`display_scale:` image display scale in notebook. $1$ will show the image at full size. Does not affect saved images.`save_every:` how many steps between each time the image is saved. Set to `steps_per_frame` for consistent animation.`backups:` number of backups to keep (only the oldest backups are deleted). Large images make very large backups, so be warned. Set to `all` to save all backups. These are used for the `flow_long_term_samples` so be sure that this is at least $2^{\text{flow_long_term_samples}}+1$ for `Video Source` mode.`show_graphs:` check this to see graphs of the loss values each time the image is displayed. Disable this for local runtimes.`approximate_vram_usage:` currently broken. Don't believe its lies.---`ViTB32, ViTB16, RN50, RN50x4:` select your CLIP models. These take a lot of VRAM.`learning_rate:` how quickly the image changes.`reset_lr_each_frame:` the optimizer will adaptively change the learning rate, so this will thwart it.`seed:` pseudorandom seed.---`cutouts:` number of cutouts. Reduce this to use less VRAM at the cost of quality and speed.`cut_pow:` should be positive. Large values shrink cutouts, making the image more detailed, small values expand the cutouts, making it more coherent. $1$ is a good default. $3$ or higher can cause crashes.`cutout_border:` should be between $0$ and $1$. Allows cutouts to poke out over the edges of the image by this fraction of the image size, allowing better detail around the edges of the image. Set to $0$ to disable. $0.25$ is a good default.`border_mode:` how to fill cutouts that stick out over the edge of the image. Match with `infill_mode` for consistent infill.* clamp: move cutouts back onto image* mirror: reflect image over boundary* wrap: pull pixels from opposite side* black: fill with black * smear: sample closest pixel in image Step 1: SetupRun the cells in this section once for each runtime, or after a factory reset.#@title 1.1 Mount google drive (optional) #@markdown Mounting your drive is optional but recommended. You can even restore from google randomly #@markdown kicking you out if you mount your drive. from google.colab import drive drive._mount('/content/drive') !mkdir -p /content/drive/MyDrive/pytti_test %cd /content/drive/MyDrive/pytti_test #@title 1.2 NVIDIA-SMI (optional) #@markdown View information about your runtime GPU. #@markdown Google will connect you to an industrial strength GPU, which is needed to run #@markdown this notebook. You can also disable error checking on your GPU to get some #@markdown more VRAM, at a marginal cost to stability. You will have to restart the runtime after #@markdown disabling it. enable_error_checking = False#@param {type:"boolean"} if enable_error_checking: !nvidia-smi else: !nvidia-smi !nvidia-smi -i 0 -e 0 #@title 1.3 Install everything else #@markdown Run this cell on a fresh runtime to install the libraries and modules. from os.path import exists as path_exists if path_exists('/content/drive/MyDrive/pytti_test'): %cd /content/drive/MyDrive/pytti_test try: from adjustText import adjust_text import pytti, torch everything_installed = True except ModuleNotFoundError: everything_installed = False def install_everything(): !pip install tensorflow==1.15.2 !pip install transformers &> /dev/null !pip install PyGLM &> /dev/null !pip install ftfy regex tqdm omegaconf pytorch-lightning &> /dev/null !pip install kornia &> /dev/null !pip install einops &> /dev/null !pip install imageio-ffmpeg &> /dev/null !pip install adjustText exrex bunch &> /dev/null !pip install matplotlib-label-lines &> /dev/null !git clone https://github.com/openai/CLIP.git &> /dev/null !git clone https://github.com/CompVis/taming-transformers.git &> /dev/null if not path_exists('./pytti'): !git clone --branch p5 https://github.com/sportsracer48/pytti.git &> /dev/null else: !rm -r pytti !git clone --branch p5 https://github.com/sportsracer48/pytti.git !git clone https://github.com/shariqfarooq123/AdaBins.git &> /dev/null !git clone https://github.com/zacjiang/GMA.git &> /dev/null !mkdir -p AdaBins/pretrained if not path_exists('AdaBins/pretrained/AdaBins_nyu.pt'): !gdown https://drive.google.com/uc?id=1lvyZZbC9NLcS8a__YPcUP7rDiIpbRpoF if not path_exists('AdaBins_nyu.pt'): !gdown https://drive.google.com/uc?id=1zgGJrkFkJbRouqMaWArXE4WF_rhj-pxW !mv AdaBins_nyu.pt AdaBins/pretrained/AdaBins_nyu.pt from pytti.Notebook import change_tqdm_color change_tqdm_color() !mkdir -p images_out !mkdir -p videos force_install = True #@param{type:"boolean"} if not everything_installed or force_install: install_everything() elif everything_installed: from pytti.Notebook import change_tqdm_color change_tqdm_color()Step 2: Run it!Edit the parameters, or load saved parameters, then run the model.#@title #2.1 Parameters: #@markdown --- from os.path import exists as path_exists if path_exists('/content/drive/MyDrive/pytti_test'): %cd /content/drive/MyDrive/pytti_test drive_mounted = True else: drive_mounted = False try: from pytti.Notebook import change_tqdm_color, get_last_file except ModuleNotFoundError: if drive_mounted: #THIS IS NOT AN ERROR. This is the code that would #make an error if something were wrong. raise RuntimeError('ERROR: please run setup (step 1.3).') else: #THIS IS NOT AN ERROR. This is the code that would #make an error if something were wrong. raise RuntimeError('WARNING: drive is not mounted.\nERROR: please run setup (step 1.3).') change_tqdm_color() import glob, json, random, re, math try: from bunch import Bunch except ModuleNotFoundError: if drive_mounted: #THIS IS NOT AN ERROR. This is the code that would #make an error if something were wrong. raise RuntimeError('ERROR: please run setup (step 1.3).') else: #THIS IS NOT AN ERROR. This is the code that would #make an error if something were wrong. raise RuntimeError('WARNING: drive is not mounted.\nERROR: please run setup (step 1.3).') #these are used to make the defaults look pretty model_default = None random_seed = None all = math.inf derive_from_init_aspect_ratio = -1 def define_parameters(): locals_before = locals().copy() #@markdown ###Prompts: scenes = "a sunless garden when the flowers are dead"#@param{type:"string"} scene_prefix = ""#@param{type:"string"} scene_suffix = ""#@param{type:"string"} interpolation_steps = 0#@param{type:"number"} steps_per_scene = 60100#@param{type:"raw"} #@markdown --- #@markdown ###Image Prompts: direct_image_prompts = ""#@param{type:"string"} #@markdown --- #@markdown ###Initial image: init_image = ""#@param{type:"string"} direct_init_weight = ""#@param{type:"string"} semantic_init_weight = ""#@param{type:"string"} #@markdown --- #@markdown ###Image: #@markdown Use `image_model` to select how the model will encode the image image_model = "VQGAN" #@param ["VQGAN", "Limited Palette", "Unlimited Palette"] #@markdown image_model | description | strengths | weaknesses #@markdown --- | -- | -- | -- #@markdown VQGAN | classic VQGAN image | smooth images | limited datasets, slow, VRAM intesnsive #@markdown Limited Palette | pytti differentiable palette | fast, VRAM scales with `palettes` | pixel images #@markdown Unlimited Palette | simple RGB optimization | fast, VRAM efficient | pixel images #@markdown The output image resolution will be `width` $\times$ `pixel_size` by height $\times$ `pixel_size` pixels. #@markdown The easiest way to run out of VRAM is to select `image_model` VQGAN without reducing #@markdown `pixel_size` to $1$. #@markdown For `animation_mode: 3D` the minimum resoultion is about 450 by 400 pixels. width = 180#@param {type:"raw"} height = 112#@param {type:"raw"} pixel_size = 4#@param{type:"number"} smoothing_weight = 0.02#@param{type:"number"} #@markdown `VQGAN` specific settings: vqgan_model = "imagenet" #@param ["imagenet", "coco", "wikiart", "sflckr", "openimages"] #@markdown `Limited Palette` specific settings: random_initial_palette = False#@param{type:"boolean"} palette_size = 6#@param{type:"number"} palettes = 9#@param{type:"number"} gamma = 1#@param{type:"number"} hdr_weight = 0.01#@param{type:"number"} palette_normalization_weight = 0.2#@param{type:"number"} show_palette = False #@param{type:"boolean"} target_palette = ""#@param{type:"string"} lock_palette = False #@param{type:"boolean"} #@markdown --- #@markdown ###Animation: animation_mode = "3D" #@param ["off","2D", "3D", "Video Source"] sampling_mode = "bicubic" #@param ["bilinear","nearest","bicubic"] infill_mode = "wrap" #@param ["mirror","wrap","black","smear"] pre_animation_steps = 100#@param{type:"number"} steps_per_frame = 50#@param{type:"number"} frames_per_second = 12#@param{type:"number"} #@markdown --- #@markdown ###Stabilization Weights: direct_stabilization_weight = ""#@param{type:"string"} semantic_stabilization_weight = ""#@param{type:"string"} depth_stabilization_weight = ""#@param{type:"string"} edge_stabilization_weight = ""#@param{type:"string"} #@markdown `flow_stabilization_weight` is used for `animation_mode: 3D` and `Video Source` flow_stabilization_weight = ""#@param{type:"string"} #@markdown --- #@markdown ###Video Tracking: #@markdown Only for `animation_mode: Video Source`. video_path = ""#@param{type:"string"} frame_stride = 1#@param{type:"number"} reencode_each_frame = True #@param{type:"boolean"} flow_long_term_samples = 1#@param{type:"number"} #@markdown --- #@markdown ###Image Motion: translate_x = "-1700*sin(radians(1.5))" #@param{type:"string"} translate_y = "0" #@param{type:"string"} #@markdown `..._3d` is only used in 3D mode. translate_z_3d = "(50+10*t)*sin(t/10*pi)**2" #@param{type:"string"} #@markdown `rotate_3d` *must* be a `[w,x,y,z]` rotation (unit) quaternion. Use `rotate_3d: [1,0,0,0]` for no rotation. #@markdown [Learn more about rotation quaternions here](https://eater.net/quaternions). rotate_3d = "[cos(radians(1.5)), 0, -sin(radians(1.5))/sqrt(2), sin(radians(1.5))/sqrt(2)]"#@param{type:"string"} #@markdown `..._2d` is only used in 2D mode. rotate_2d = "5" #@param{type:"string"} zoom_x_2d = "0" #@param{type:"string"} zoom_y_2d = "0" #@param{type:"string"} #@markdown 3D camera (only used in 3D mode): lock_camera = True#@param{type:"boolean"} field_of_view = 60#@param{type:"number"} near_plane = 1#@param{type:"number"} far_plane = 10000#@param{type:"number"} #@markdown --- #@markdown ###Output: file_namespace = "wilde1"#@param{type:"string"} if file_namespace == '': file_namespace = 'out' allow_overwrite = False#@param{type:"boolean"} base_name = file_namespace if not allow_overwrite and path_exists(f'images_out/{file_namespace}'): _, i = get_last_file(f'images_out/{file_namespace}', f'^(?P
    {re.escape(file_namespace)}\\(?)(?P\\d*)(?P\\)?_1\\.png)$')
        if i == 0:
          print(f"WARNING: file_namespace {file_namespace} already has images from run 0")
        elif i is not None:
          print(f"WARNING: file_namespace {file_namespace} already has images from runs 0 through {i}")
      elif glob.glob(f'images_out/{file_namespace}/{base_name}_*.png'):
        print(f"WARNING: file_namespace {file_namespace} has images which will be overwritten")
      try:
        del i
        del _
      except NameError:
        pass
      del base_name
      display_every = steps_per_frame #@param{type:"raw"}
      clear_every = 0 #@param{type:"raw"}
      display_scale = 1#@param{type:"number"}
      save_every = steps_per_frame #@param{type:"raw"}
      backups =  2**(flow_long_term_samples+1)+1#this is used for video transfer, so don't lower it if that's what you're doing#@param {type:"raw"}
      show_graphs = False #@param{type:"boolean"}
      approximate_vram_usage = False#@param{type:"boolean"}
    
      #@markdown ---
      #@markdown ###Model:
      #@markdown Quality settings from Dribnet's CLIPIT (https://github.com/dribnet/clipit).
      #@markdown Selecting too many will use up all your VRAM and slow down the model.
      #@markdown I usually use ViTB32, ViTB16, and RN50 if I get a A100, otherwise I just use ViT32B.
    
      #@markdown quality | CLIP models
      #@markdown --- | --
      #@markdown  draft | ViTB32 
      #@markdown  normal | ViTB32, ViTB16 
      #@markdown  high | ViTB32, ViTB16, RN50
      #@markdown  best | ViTB32, ViTB16, RN50x4
      ViTB32 = True #@param{type:"boolean"}
      ViTB16 = True #@param{type:"boolean"}
      RN50 = False #@param{type:"boolean"}
      RN50x4 = False #@param{type:"boolean"}
      #@markdown the default learning rate is `0.1` for all the VQGAN models
      #@markdown except openimages, which is `0.15`. For the palette modes the
      #@markdown default is `0.02`. 
      learning_rate =  model_default#@param{type:"raw"}
      reset_lr_each_frame = True#@param{type:"boolean"}
      seed = random_seed #@param{type:"raw"}
      #@markdown **Cutouts**:
    
      #@markdown [Cutouts are how CLIP sees the image.](https://twitter.com/remi_durant/status/1460607677801897990)
      cutouts =  40#@param{type:"number"}
      cut_pow =  2#@param {type:"number"}
      cutout_border =  .25#@param {type:"number"}
      #@markdown NOTE: prompt masks (`promt:weight_[mask.png]`) will not work right on '`wrap`' or '`mirror`' mode.
      border_mode = "clamp" #@param ["clamp","mirror","wrap","black","smear"]
      
      if seed is None:
        seed = random.randint(-0x8000_0000_0000_0000, 0xffff_ffff_ffff_ffff)
      locals_after = locals().copy()
      for k in locals_before.keys():
        del locals_after[k]
      del locals_after['locals_before']
      return locals_after
    
    params = Bunch(define_parameters())
    print("SETTINGS:")
    print(json.dumps(params))
    #@title 2.2 Load settings (optional)
    #@markdown copy the `SETTINGS:` output from the **Parameters** cell (tripple click to select the whole
    #@markdown line from `{'scenes'...` to `}`) and paste them in a note to save them for later.
    
    #@markdown Paste them here in the future to load those settings again. Running this cell with blank settings won't do anything.
    from os.path import exists as path_exists
    if path_exists('/content/drive/MyDrive/pytti_test'):
      %cd /content/drive/MyDrive/pytti_test
      drive_mounted = True
    else:
      drive_mounted = False
    try:
      from pytti.Notebook import *
    except ModuleNotFoundError:
      if drive_mounted:
        #THIS IS NOT AN ERROR. This is the code that would
        #make an error if something were wrong.
        raise RuntimeError('ERROR: please run setup (step 1.3).')
      else:
        #THIS IS NOT AN ERROR. This is the code that would
        #make an error if something were wrong.
        raise RuntimeError('WARNING: drive is not mounted.\nERROR: please run setup (step 1.3).')
    change_tqdm_color()
      
    import json, random
    try:
      from bunch import Bunch
    except ModuleNotFoundError:
      if drive_mounted:
        #THIS IS NOT AN ERROR. This is the code that would
        #make an error if something were wrong.
        raise RuntimeError('ERROR: please run setup (step 1.3).')
      else:
        #THIS IS NOT AN ERROR. This is the code that would
        #make an error if something were wrong.
        raise RuntimeError('WARNING: drive is not mounted.\nERROR: please run setup (step 1.3).')
    
    settings = ""#@param{type:"string"}
    #@markdown Check `random_seed` to overwrite the seed from the settings with a random one for some variation.
    random_seed = False #@param{type:"boolean"}
    
    if settings != '':
      params = load_settings(settings, random_seed)
    #@title 2.3 Run it!
    #@markdown pytti is 1000% percent better code than VQLIPSE, so have a look at the code. 
    #@markdown You just might understand what's going on.
    import torch
    
    from os.path import exists as path_exists
    if path_exists('/content/drive/MyDrive/pytti_test'):
      %cd /content/drive/MyDrive/pytti_test
      drive_mounted = True
    else:
      drive_mounted = False
    try:
      from pytti.Notebook import *
    except ModuleNotFoundError:
      if drive_mounted:
        #THIS IS NOT AN ERROR. This is the code that would
        #make an error if something were wrong.
        raise RuntimeError('ERROR: please run setup (step 1.3).')
      else:
        #THIS IS NOT AN ERROR. This is the code that would
        #make an error if something were wrong.
        raise RuntimeError('WARNING: drive is not mounted.\nERROR: please run setup (step 1.3).')
    change_tqdm_color()
    import sys
    sys.path.append('./AdaBins')
    
    try:
      from pytti import Perceptor
    except ModuleNotFoundError:
      if drive_mounted:
        #THIS IS NOT AN ERROR. This is the code that would
        #make an error if something were wrong.
        raise RuntimeError('ERROR: please run setup (step 1.3).')
      else:
        #THIS IS NOT AN ERROR. This is the code that would
        #make an error if something were wrong.
        raise RuntimeError('WARNING: drive is not mounted.\nERROR: please run setup (step 1.3).')
    print("Loading pytti...")
    from pytti.Image import PixelImage, RGBImage, VQGANImage
    from pytti.ImageGuide import DirectImageGuide
    from pytti.Perceptor.Embedder import HDMultiClipEmbedder
    from pytti.Perceptor.Prompt import parse_prompt
    from pytti.LossAug import TVLoss, HSVLoss, OpticalFlowLoss, TargetFlowLoss
    from pytti.Transforms import zoom_2d, zoom_3d, apply_flow
    from pytti import *
    from pytti.LossAug.DepthLoss import init_AdaBins
    print("pytti loaded.")
    
    import torch, gc, glob, subprocess, warnings, re, math, json
    import numpy as np
    from IPython import display
    from PIL import Image, ImageEnhance
    
    from torchvision.transforms import functional as TF
    
    #display settings, because usability counts
    #warnings.filterwarnings("error", category=UserWarning)
    %matplotlib inline 
    import matplotlib.pyplot as plt
    import seaborn as sns
    sns.set()
    import pandas as pd
    plt.style.use('bmh')
    pd.options.display.max_columns = None
    pd.options.display.width = 175
    
    latest = -1
    #@markdown check `batch_mode` to run batch settings
    batch_mode = False #@param{type:"boolean"}
    if batch_mode:
      try:
        batch_list
      except NameError:
        raise RuntimeError("ERROR: no batch settings. Please run 'batch settings' cell at the bottom of the page to use batch mode.")
    else:
      try:
        params
      except NameError:
        raise RuntimeError("ERROR: no parameters. Please run parameters (step 2.1).")
    #@markdown check `restore` to restore from a previous run
    restore = False#@param{type:"boolean"}
    #@markdown check `reencode` if you are restoring with a modified image or modified image settings
    reencode = False#@param{type:"boolean"}
    #@markdown which run to restore
    restore_run = latest #@param{type:"raw"}
    if restore and restore_run == latest:
      _, restore_run = get_last_file(f'backup/{params.file_namespace}', 
                               f'^(?P
    {re.escape(params.file_namespace)}\\(?)(?P\\d*)(?P\\)?_\\d+\\.bak)$')
    
    def do_run():
      clear_rotoscopers()#what a silly name
      vram_profiling(params.approximate_vram_usage)
      reset_vram_usage()
      global CLIP_MODEL_NAMES
      #@markdown which frame to restore from
      restore_frame =  latest#@param{type:"raw"}
    
      #set up seed for deterministic RNG
      if params.seed is not None:
        torch.manual_seed(params.seed)
    
      #load CLIP
      load_clip(params)
      embedder = HDMultiClipEmbedder(cutn=params.cutouts, 
                                     cut_pow = params.cut_pow, 
                                     padding = params.cutout_border,
                                     border_mode = params.border_mode)
      
      #load scenes
      with vram_usage_mode('Text Prompts'):
        print('Loading prompts...')
        prompts = [[parse_prompt(embedder, p.strip()) 
                  for p in (params.scene_prefix + stage + params.scene_suffix).strip().split('|') if p.strip()]
                  for stage in params.scenes.split('||') if stage]
        print('Prompts loaded.')
    
      #load init image
      if params.init_image != '':
        init_image_pil = Image.open(fetch(params.init_image)).convert('RGB')
        init_size = init_image_pil.size
        #automatic aspect ratio matching
        if params.width == -1:
          params.width = int(params.height*init_size[0]/init_size[1])
        if params.height == -1:
          params.height = int(params.width*init_size[1]/init_size[0])
      else:
        init_image_pil = None
    
      #video source
      if params.animation_mode == "Video Source":
        print(f'loading {params.video_path}...')
        video_frames = get_frames(params.video_path)
        params.pre_animation_steps = max(params.steps_per_frame, params.pre_animation_steps)
        if init_image_pil is None:
          init_image_pil = Image.fromarray(video_frames.get_data(0)).convert('RGB')
          #enhancer = ImageEnhance.Contrast(init_image_pil)
          #init_image_pil = enhancer.enhance(2)
          init_size = init_image_pil.size
          if params.width == -1:
            params.width = int(params.height*init_size[0]/init_size[1])
          if params.height == -1:
            params.height = int(params.width*init_size[1]/init_size[0])
    
      #set up image
      if params.image_model == "Limited Palette":
        img = PixelImage(*format_params(params,
                         'width', 'height', 'pixel_size', 
                         'palette_size', 'palettes', 'gamma', 
                         'hdr_weight', 'palette_normalization_weight'))
        img.encode_random(random_pallet = params.random_initial_palette)
        if params.target_palette.strip() != '':
          img.set_pallet_target(Image.open(fetch(params.target_palette)).convert('RGB'))
        else:
          img.lock_pallet(params.lock_palette)
      elif params.image_model == "Unlimited Palette":
        img = RGBImage(params.width, params.height, params.pixel_size)
        img.encode_random()
      elif params.image_model == "VQGAN":
        VQGANImage.init_vqgan(params.vqgan_model)
        img = VQGANImage(params.width, params.height, params.pixel_size)
        img.encode_random()
    
      loss_augs = []
    
      if init_image_pil is not None:
        if not restore:
          print("Encoding image...")
          img.encode_image(init_image_pil)
          print("Encoded Image:")
          display.display(img.decode_image())
        #set up init image prompt
        init_augs = ['direct_init_weight']
        init_augs = [build_loss(x,params[x],f'init image ({params.init_image})', img, init_image_pil) 
                      for x in init_augs if params[x] not in ['','0']]
        loss_augs.extend(init_augs)
        if params.semantic_init_weight not in ['','0']:
          semantic_init_prompt = parse_prompt(embedder, 
                                        f"init image [{params.init_image}]:{params.semantic_init_weight}", 
                                        init_image_pil)
          prompts[0].append(semantic_init_prompt)
        else:
          semantic_init_prompt = None
      else:
        init_augs, semantic_init_prompt = [], None
    
      #other image prompts
    
      loss_augs.extend(type(img).get_preferred_loss().TargetImage(p.strip(), img.image_shape, is_path = True) 
                       for p in params.direct_image_prompts.split('|') if p.strip())
    
      #stabilization
    
      stabilization_augs = ['direct_stabilization_weight',
                            'depth_stabilization_weight',
                            'edge_stabilization_weight']
      stabilization_augs = [build_loss(x,params[x],'stabilization',
                                       img, init_image_pil) 
                            for x in stabilization_augs if params[x] not in ['','0']]
      loss_augs.extend(stabilization_augs)
      
      if params.semantic_stabilization_weight not in ['0','']:
        last_frame_semantic = parse_prompt(embedder, 
                                           f"stabilization:{params.semantic_stabilization_weight}", 
                                           init_image_pil if init_image_pil else img.decode_image())
        last_frame_semantic.set_enabled(init_image_pil is not None)
        for scene in prompts:
          scene.append(last_frame_semantic)
      else:
        last_frame_semantic = None
    
      #optical flow
      if params.animation_mode == 'Video Source':
        if params.flow_stabilization_weight == '':
          params.flow_stabilization_weight = '0'
        optical_flows = [OpticalFlowLoss.TargetImage(f"optical flow stabilization (frame {-2**i}):{params.flow_stabilization_weight}", 
                                                     img.image_shape) 
                         for i in range(params.flow_long_term_samples + 1)]
        for optical_flow in optical_flows:
          optical_flow.set_enabled(False)
        loss_augs.extend(optical_flows)
      elif params.animation_mode == '3D' and params.flow_stabilization_weight not in ['0','']:
        optical_flows = [TargetFlowLoss.TargetImage(f"optical flow stabilization:{params.flow_stabilization_weight}", 
                                                    img.image_shape)]
        for optical_flow in optical_flows:
          optical_flow.set_enabled(False)
        loss_augs.extend(optical_flows)
      else:
        optical_flows = []
      #other loss augs
      if params.smoothing_weight != 0:
        loss_augs.append(TVLoss(weight = params.smoothing_weight))
      
      #set up filespace
      subprocess.run(['mkdir','-p',f'images_out/{params.file_namespace}'])
      subprocess.run(['mkdir','-p',f'backup/{params.file_namespace}'])
      if restore:
        base_name = params.file_namespace if restore_run == 0 else f'{params.file_namespace}({restore_run})'
      elif not params.allow_overwrite:
        #finds the next available base_name to save files with. Why did I do this with regex? 
        _, i = get_next_file(f'images_out/{params.file_namespace}', 
                             f'^(?P
    {re.escape(params.file_namespace)}\\(?)(?P\\d*)(?P\\)?_1\\.png)$',
                             [f"{params.file_namespace}_1.png",f"{params.file_namespace}(1)_1.png"])
        base_name = params.file_namespace if i == 0 else f'{params.file_namespace}({i})'
      else:
        base_name = params.file_namespace
    
      #restore
      if restore:
        if not reencode:
          if restore_frame == latest:
            filename, restore_frame = get_last_file(f'backup/{params.file_namespace}', 
                                                    f'^(?P
    {re.escape(base_name)}_)(?P\\d*)(?P\\.bak)$')
          else: 
            filename = f'{base_name}_{restore_frame}.bak'
          print("restoring from", filename)
          img.load_state_dict(torch.load(f'backup/{params.file_namespace}/{filename}'))
        else:#reencode
          if restore_frame == latest:
            filename, restore_frame = get_last_file(f'images_out/{params.file_namespace}', 
                                                    f'^(?P
    {re.escape(base_name)}_)(?P\\d*)(?P\\.png)$')
          else: 
            filename = f'{base_name}_{restore_frame}.png'
          print("restoring from", filename)
          img.encode_image(Image.open(f'images_out/{params.file_namespace}/{filename}').convert('RGB'))
        i = restore_frame*params.save_every
      else:
        i = 0
    
      #graphs
      if params.show_graphs:
        fig, axs = plt.subplots(4, 1, figsize=(21,13))
        axs  = np.asarray(axs).flatten()
        #fig.facecolor = (0,0,0)
      else:
        fig, axs = None, None
    
      #make the main model object
      model = DirectImageGuide(img, embedder, lr = params.learning_rate)
    
      #Update is called each step.
      def update(i, stage_i):
        #display
        if params.clear_every > 0 and i > 0 and i % params.clear_every == 0:
          display.clear_output()
        if params.display_every > 0 and i % params.display_every == 0:
          print(f"Step {i} losses:")
          if model.dataframe:
            print(model.dataframe[0].iloc[-1])
          if params.approximate_vram_usage:
            print("VRAM Usage:")
            print_vram_usage()
          display_width = int(img.image_shape[0]*params.display_scale)
          display_height = int(img.image_shape[1]*params.display_scale)
          if stage_i > 0 and params.show_graphs:
            model.plot_losses(axs)
            im = img.decode_image()
            sidebyside = make_hbox(im.resize((display_width, display_height), Image.LANCZOS), fig)
            display.display(sidebyside)
          else:
            im = img.decode_image()
            display.display(im.resize((display_width, display_height), Image.LANCZOS))
          if params.show_palette and isinstance(img, PixelImage):
            print('Palette:')
            display.display(img.render_pallet())
        #save
        if i > 0 and params.save_every > 0 and i % params.save_every == 0:
          try:
            im
          except NameError:
            im = img.decode_image()
          n = i//params.save_every
          filename = f"images_out/{params.file_namespace}/{base_name}_{n}.png"
          im.save(filename)
          if params.backups > 0:
            filename = f"backup/{params.file_namespace}/{base_name}_{n}.bak"
            torch.save(img.state_dict(), filename)
            if n > params.backups:
              subprocess.run(['rm', f"backup/{params.file_namespace}/{base_name}_{n-params.backups}.bak"])
        #animate
        t = (i - params.pre_animation_steps)/(params.steps_per_frame*params.frames_per_second)
        set_t(t)
        if i >= params.pre_animation_steps:
          if (i - params.pre_animation_steps) % params.steps_per_frame == 0:
            print(f"Time: {t:.4f} seconds")
            update_rotoscopers(((i - params.pre_animation_steps)//params.steps_per_frame+1)*params.frame_stride)
            if params.reset_lr_each_frame:
              model.set_optim(None)
            if params.animation_mode == "2D":
              tx, ty = parametric_eval(params.translate_x), parametric_eval(params.translate_y)
              theta = parametric_eval(params.rotate_2d)
              zx, zy = parametric_eval(params.zoom_x_2d), parametric_eval(params.zoom_y_2d)
              next_step_pil = zoom_2d(img, 
                                      (tx,ty), (zx,zy), theta, 
                                      border_mode = params.infill_mode, sampling_mode = params.sampling_mode)
            elif params.animation_mode == "3D":
              try:
                im
              except NameError:
                im = img.decode_image()
              with vram_usage_mode('Optical Flow Loss'):
                flow, next_step_pil = zoom_3d(img, 
                                            (params.translate_x,params.translate_y,params.translate_z_3d), params.rotate_3d, 
                                            params.field_of_view, params.near_plane, params.far_plane,
                                            border_mode = params.infill_mode, sampling_mode = params.sampling_mode,
                                            stabilize = params.lock_camera)
                freeze_vram_usage()
                
              for optical_flow in optical_flows:
                optical_flow.set_last_step(im)
                optical_flow.set_target_flow(flow)
                optical_flow.set_enabled(True)
            elif params.animation_mode == "Video Source":
              frame_n = min((i - params.pre_animation_steps)*params.frame_stride//params.steps_per_frame, len(video_frames) - 1)
              next_frame_n = min(frame_n + params.frame_stride, len(video_frames) - 1)
              next_step_pil = Image.fromarray(video_frames.get_data(next_frame_n)).convert('RGB').resize(img.image_shape, Image.LANCZOS)
              for j, optical_flow in enumerate(optical_flows):
                old_frame_n = frame_n - (2**j - 1)*params.frame_stride
                save_n = i//params.save_every - (2**j - 1)
                if old_frame_n < 0 or save_n < 1:
                  break
                current_step_pil = Image.fromarray(video_frames.get_data(old_frame_n)).convert('RGB').resize(img.image_shape, Image.LANCZOS)
                filename = f"backup/{params.file_namespace}/{base_name}_{save_n}.bak"
                filename = None if j == 0 else filename
                flow_im, mask_tensor = optical_flow.set_flow(current_step_pil, next_step_pil, 
                                                            img, filename, 
                                                            params.infill_mode, params.sampling_mode)
                optical_flow.set_enabled(True)
                #first flow is previous frame
                if j == 0:
                  mask_accum = mask_tensor.detach()
                  valid = mask_tensor.mean()
                  print("valid pixels:", valid)
                  if params.reencode_each_frame or valid < .03:
                    if isinstance(img, PixelImage) and valid >= .03:
                      img.lock_pallet()
                      img.encode_image(next_step_pil, smart_encode = False)
                      img.lock_pallet(params.lock_palette)
                    else:
                      img.encode_image(next_step_pil)
                    reencoded = True
                  else:
                    reencoded = False
                else:
                  with torch.no_grad():
                    optical_flow.set_mask((mask_tensor - mask_accum).clamp(0,1))
                    mask_accum.add_(mask_tensor)
            if params.animation_mode != 'off':
              for aug in stabilization_augs:
                aug.set_comp(next_step_pil)
                aug.set_enabled(True)
              if last_frame_semantic is not None:
                last_frame_semantic.set_image(embedder, next_step_pil)
                last_frame_semantic.set_enabled(True)
              for aug in init_augs:
                aug.set_enabled(False)
              if semantic_init_prompt is not None:
                semantic_init_prompt.set_enabled(False)
                
          
      model.update = update
      
      print(f"Settings saved to images_out/{params.file_namespace}/{base_name}_settings.txt")
      save_settings(params, f"images_out/{params.file_namespace}/{base_name}_settings.txt")
    
      skip_prompts = i // params.steps_per_scene
      skip_steps   = i %  params.steps_per_scene
      last_scene = prompts[0] if skip_prompts == 0 else prompts[skip_prompts - 1]
      for scene in prompts[skip_prompts:]:
        print("Running prompt:", ' | '.join(map(str,scene)))
        i += model.run_steps(params.steps_per_scene-skip_steps, 
                             scene, last_scene, loss_augs, 
                             interp_steps = params.interpolation_steps,
                             i_offset = i, skipped_steps = skip_steps)
        skip_steps = 0
        model.clear_dataframe()
        last_scene = scene
      if fig:
        del fig, axs
    
    #if __name__ == '__main__':
    try:
      gc.collect()
      torch.cuda.empty_cache()
      if batch_mode:
        if restore:
          settings_list = batch_list[restore_run:]
        else:
          settings_list = batch_list
          namespace = batch_list[0]['file_namespace']
          subprocess.run(['mkdir','-p',f'images_out/{namespace}'])
          save_batch(batch_list, f'images_out/{namespace}/{namespace}_batch settings.txt')
          print(f"Batch settings saved to images_out/{namespace}/{namespace}_batch settings.txt")
        for settings in settings_list:
          setting_string = json.dumps(settings)
          print("SETTINGS:")
          print(setting_string)
          params = load_settings(setting_string)
          if params.animation_mode == '3D':
            init_AdaBins()
          params.allow_overwrite = False
          do_run()
          restore = False
          reencode = False
          gc.collect()
          torch.cuda.empty_cache()
      else:
        if params.animation_mode == '3D':
          pass
          #init_AdaBins()
        do_run()
        print("Complete.")
        gc.collect()
        torch.cuda.empty_cache()
    except KeyboardInterrupt:
      pass
    except RuntimeError:
      print_vram_usage()
      raise
          
    #print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=10))Step 3: Render videoYou can dowload from the notebook, but it's faster to download from your drive.#@title 3.1 Render video
    from os.path import exists as path_exists
    if path_exists('/content/drive/MyDrive/pytti_test'):
      %cd /content/drive/MyDrive/pytti_test
      drive_mounted = True
    else:
      drive_mounted = False
    try:
      from pytti.Notebook import change_tqdm_color
    except ModuleNotFoundError:
      if drive_mounted:
        #THIS IS NOT AN ERROR. This is the code that would
        #make an error if something were wrong.
        raise RuntimeError('ERROR: please run setup (step 1.3).')
      else:
        #THIS IS NOT AN ERROR. This is the code that would
        #make an error if something were wrong.
        raise RuntimeError('WARNING: drive is not mounted.\nERROR: please run setup (step 1.3).')
    change_tqdm_color()
      
    from tqdm.notebook import tqdm
    import numpy as np
    from os.path import exists as path_exists
    from subprocess import Popen, PIPE
    from PIL import Image, ImageFile
    from os.path import splitext as split_file
    import glob
    from pytti.Notebook import get_last_file
    
    ImageFile.LOAD_TRUNCATED_IMAGES = True
    
    try:
      params
    except NameError:
      raise RuntimeError("ERROR: no parameters. Please run parameters (step 2.1).")
    
    if not path_exists(f"images_out/{params.file_namespace}"):
      if path_exists(f"/content/drive/MyDrive"):
        #THIS IS NOT AN ERROR. This is the code that would
        #make an error if something were wrong.
        raise RuntimeError(f"ERROR: file_namespace: {params.file_namespace} does not exist.")
      else:
        #THIS IS NOT AN ERROR. This is the code that would
        #make an error if something were wrong.
        raise RuntimeError(f"WARNING: Drive is not mounted.\nERROR: file_namespace: {params.file_namespace} does not exist.")
    
    #@markdown The first run executed in `file_namespace` is number $0$, the second is number $1$, etc.
    
    latest = -1
    run_number = latest#@param{type:"raw"}
    if run_number == -1:
      _, i = get_last_file(f'images_out/{params.file_namespace}', 
                           f'^(?P
    {re.escape(params.file_namespace)}\\(?)(?P\\d*)(?P\\)?_1\\.png)$')
      run_number = i
    base_name = params.file_namespace if run_number == 0 else (params.file_namespace+f"({run_number})")
    tqdm.write(f'Generating video from {params.file_namespace}/{base_name}_*.png')
    
    all_frames = glob.glob(f'images_out/{params.file_namespace}/{base_name}_*.png')
    all_frames.sort(key = lambda s: int(split_file(s)[0].split('_')[-1]))
    print(f'found {len(all_frames)} frames matching images_out/{params.file_namespace}/{base_name}_*.png')
    
    start_frame = 0#@param{type:"number"}
    all_frames = all_frames[start_frame:]
    
    fps =  params.frames_per_second#@param{type:"raw"}
    
    total_frames = len(all_frames)
    
    if total_frames == 0:
      #THIS IS NOT AN ERROR. This is the code that would
      #make an error if something were wrong.
      raise RuntimeError(f"ERROR: no frames to render in images_out/{params.file_namespace}")
    
    frames = []
    
    for filename in tqdm(all_frames):
      frames.append(Image.open(filename))
    
    p = Popen(['ffmpeg', '-y', '-f', 'image2pipe', '-vcodec', 'png', '-r', str(fps), '-i', '-', '-vcodec', 'libx264', '-r', str(fps), '-pix_fmt', 'yuv420p', '-crf', '1', '-preset', 'veryslow', f"videos/{base_name}.mp4"], stdin=PIPE)
    for im in tqdm(frames):
      im.save(p.stdin, 'PNG')
    p.stdin.close()
    
    print("Encoding video...")
    p.wait()
    print("Video complete.")
    #@title 3.1 Render video (concatenate all runs)
    from os.path import exists as path_exists
    if path_exists('/content/drive/MyDrive/pytti_test'):
      %cd /content/drive/MyDrive/pytti_test
      drive_mounted = True
    else:
      drive_mounted = False
    try:
      from pytti.Notebook import change_tqdm_color
    except ModuleNotFoundError:
      if drive_mounted:
        #THIS IS NOT AN ERROR. This is the code that would
        #make an error if something were wrong.
        raise RuntimeError('ERROR: please run setup (step 1.3).')
      else:
        #THIS IS NOT AN ERROR. This is the code that would
        #make an error if something were wrong.
        raise RuntimeError('WARNING: drive is not mounted.\nERROR: please run setup (step 1.3).')
    change_tqdm_color()
      
    from tqdm.notebook import tqdm
    import numpy as np
    from os.path import exists as path_exists
    from subprocess import Popen, PIPE
    from PIL import Image, ImageFile
    from os.path import splitext as split_file
    import glob
    from pytti.Notebook import get_last_file
    
    ImageFile.LOAD_TRUNCATED_IMAGES = True
    
    try:
      params
    except NameError:
      raise RuntimeError("ERROR: no parameters. Please run parameters (step 2.1).")
    
    if not path_exists(f"images_out/{params.file_namespace}"):
      if path_exists(f"/content/drive/MyDrive"):
        raise RuntimeError(f"ERROR: file_namespace: {params.file_namespace} does not exist.")
      else:
        raise RuntimeError(f"WARNING: Drive is not mounted.\nERROR: file_namespace: {params.file_namespace} does not exist.")
    
    #@markdown The first run executed in `file_namespace` is number $0$, the second is number $1$, etc.
    
    latest = -1
    run_number = latest
    if run_number == -1:
      _, i = get_last_file(f'images_out/{params.file_namespace}', 
                           f'^(?P
    {re.escape(params.file_namespace)}\\(?)(?P\\d*)(?P\\)?_1\\.png)$')
      run_number = i
    
    all_frames = []
    for i in range(run_number+1):
      base_name = params.file_namespace if i == 0 else (params.file_namespace+f"({i})")
      frames = glob.glob(f'images_out/{params.file_namespace}/{base_name}_*.png')
      frames.sort(key = lambda s: int(split_file(s)[0].split('_')[-1]))
      all_frames.extend(frames)
    
    start_frame = 0#@param{type:"number"}
    all_frames = all_frames[start_frame:]
    
    fps =  params.frames_per_second#@param{type:"raw"}
    
    total_frames = len(all_frames)
    
    if total_frames == 0:
      #THIS IS NOT AN ERROR. This is the code that would
      #make an error if something were wrong.
      raise RuntimeError(f"ERROR: no frames to render in images_out/{params.file_namespace}")
    
    frames = []
    
    for filename in tqdm(all_frames):
      frames.append(Image.open(filename))
    
    p = Popen(['ffmpeg', '-y', '-f', 'image2pipe', '-vcodec', 'png', '-r', str(fps), '-i', '-', '-vcodec', 'libx264', '-r', str(fps), '-pix_fmt', 'yuv420p', '-crf', '1', '-preset', 'veryslow', f"videos/{base_name}.mp4"], stdin=PIPE)
    for im in tqdm(frames):
      im.save(p.stdin, 'PNG')
    p.stdin.close()
    
    print("Encoding video...")
    p.wait()
    print("Video complete.")
    #@title 3.2 Download the last exported video
    from os.path import exists as path_exists
    if path_exists('/content/drive/MyDrive/pytti_test'):
      %cd /content/drive/MyDrive/pytti_test
    
    try:
      from pytti.Notebook import get_last_file
    except ModuleNotFoundError:
      if drive_mounted:
        #THIS IS NOT AN ERROR. This is the code that would
        #make an error if something were wrong.
        raise RuntimeError('ERROR: please run setup (step 1.3).')
      else:
        #THIS IS NOT AN ERROR. This is the code that would
        #make an error if something were wrong.
        raise RuntimeError('WARNING: drive is not mounted.\nERROR: please run setup (step 1.3).')
    
    try:
      params
    except NameError:
      #THIS IS NOT AN ERROR. This is the code that would
      #make an error if something were wrong.
      raise RuntimeError("ERROR: please run parameters (step 2.1).")
    
    from google.colab import files
    try:
      base_name = params.file_namespace if run_number == 0 else (params.file_namespace+f"({run_number})")
      filename = f'{base_name}.mp4'
    except NameError:
      filename, i = get_last_file(f'videos', 
                           f'^(?P
    {re.escape(params.file_namespace)}\\(?)(?P\\d*)(?P\\)?\\.mp4)$')
    
    if path_exists(f'videos/{filename}'):
      files.download(f"videos/{filename}")
    else:
      if path_exists(f"/content/drive/MyDrive"):
        #THIS IS NOT AN ERROR. This is the code that would
        #make an error if something were wrong.
        raise RuntimeError(f"ERROR: video videos/{filename} does not exist.")
      else:
        #THIS IS NOT AN ERROR. This is the code that would
        #make an error if something were wrong.
        raise RuntimeError(f"WARNING: Drive is not mounted.\nERROR: video videos/{filename} does not exist.")Batch SetingsWARNING: If you use google colab (even with pro and pro+) GPUs for long enought google will throttle your account. Be careful with batch runs if you don't want to get kicked.#@title batch settings
    from os.path import exists as path_exists
    if path_exists('/content/drive/MyDrive/pytti_test'):
      %cd /content/drive/MyDrive/pytti_test
      drive_mounted = True
    else:
      drive_mounted = False
    try:
      from pytti.Notebook import change_tqdm_color, save_batch
    except ModuleNotFoundError:
      if drive_mounted:
        raise RuntimeError('ERROR: please run setup (step 1).')
      else:
        raise RuntimeError('WARNING: drive is not mounted.\nERROR: please run setup (step 1).')
    change_tqdm_color()
    
    try:
      import exrex, random, glob
    except ModuleNotFoundError:
      if drive_mounted:
        raise RuntimeError('ERROR: please run setup (step 1).')
      else:
        raise RuntimeError('WARNING: drive is not mounted.\nERROR: please run setup (step 1).')
    from numpy import arange
    import itertools
    
    def all_matches(s):
      return list(exrex.generate(s))
    
    def dict_product(dictionary):
      return [dict(zip(dictionary, x)) for x in itertools.product(*dictionary.values())]
    
    #these are used to make the defaults look pretty
    model_default = None
    random_seed = None
    
    def define_parameters():
      locals_before = locals().copy()
      scenes = ["list","your","runs"] #@param{type:"raw"}
      scene_prefix = ["all "," permutations "," are run "] #@param{type:"raw"}
      scene_suffix = [" that", " makes", " 27" ] #@param{type:"raw"}
      interpolation_steps = [0] #@param{type:"raw"}
      steps_per_scene = [300] #@param{type:"raw"}
      direct_image_prompts = [""] #@param{type:"raw"}
      init_image = [""] #@param{type:"raw"}
      direct_init_weight = [""] #@param{type:"raw"}
      semantic_init_weight = [""] #@param{type:"raw"}
      image_model = ["Limited Palette"] #@param{type:"raw"}
      width = [180] #@param{type:"raw"}
      height = [112] #@param{type:"raw"}
      pixel_size = [4] #@param{type:"raw"}
      smoothing_weight = [0.05] #@param{type:"raw"}
      vqgan_model = ["sflckr"] #@param{type:"raw"}
      random_initial_palette = [False] #@param{type:"raw"}
      palette_size = [9] #@param{type:"raw"}
      palettes = [8] #@param{type:"raw"}
      gamma = [1] #@param{type:"raw"}
      hdr_weight = [1.0] #@param{type:"raw"}
      palette_normalization_weight = [1.0] #@param{type:"raw"}
      show_palette = [False] #@param{type:"raw"}
      target_palette = [""] #@param{type:"raw"}
      lock_palette = [False] #@param{type:"raw"}
      animation_mode = ["off"] #@param{type:"raw"}
      sampling_mode = ["bicubic"] #@param{type:"raw"}
      infill_mode = ["wrap"] #@param{type:"raw"}
      pre_animation_steps = [100] #@param{type:"raw"}
      steps_per_frame = [50] #@param{type:"raw"}
      frames_per_second = [12] #@param{type:"raw"}
      direct_stabilization_weight = [""] #@param{type:"raw"}
      semantic_stabilization_weight = [""] #@param{type:"raw"}
      depth_stabilization_weight = [""] #@param{type:"raw"}
      edge_stabilization_weight = [""] #@param{type:"raw"}
      flow_stabilization_weight = [""] #@param{type:"raw"}
      video_path = [""] #@param{type:"raw"}
      frame_stride = [1] #@param{type:"raw"}
      reencode_each_frame = [True] #@param{type:"raw"}
      flow_long_term_samples = [0] #@param{type:"raw"}
      translate_x = ["0"] #@param{type:"raw"}
      translate_y = ["0"] #@param{type:"raw"}
      translate_z_3d = ["0"] #@param{type:"raw"}
      rotate_3d = ["[1,0,0,0]"] #@param{type:"raw"}
      rotate_2d = ["0"] #@param{type:"raw"}
      zoom_x_2d = ["0"] #@param{type:"raw"}
      zoom_y_2d = ["0"] #@param{type:"raw"}
      lock_camera = [True] #@param{type:"raw"}
      field_of_view = [60] #@param{type:"raw"}
      near_plane = [1] #@param{type:"raw"}
      far_plane = [10000] #@param{type:"raw"}
      file_namespace = ["Basic Batch"] #@param{type:"raw"}
      allow_overwrite = [False]
      display_every = [50] #@param{type:"raw"}
      clear_every = [0] #@param{type:"raw"}
      display_scale = [1] #@param{type:"raw"}
      save_every = [50] #@param{type:"raw"}
      backups = [2] #@param{type:"raw"}
      show_graphs = [False] #@param{type:"raw"}
      approximate_vram_usage = [False] #@param{type:"raw"}
      ViTB32 = [True] #@param{type:"raw"}
      ViTB16 = [False] #@param{type:"raw"}
      RN50 = [False] #@param{type:"raw"}
      RN50x4 = [False] #@param{type:"raw"}
      learning_rate = [None] #@param{type:"raw"}
      reset_lr_each_frame = [True] #@param{type:"raw"}
      seed = [None] #@param{type:"raw"}
      cutouts = [40] #@param{type:"raw"}
      cut_pow = [2] #@param{type:"raw"}
      cutout_border = [0.25] #@param{type:"raw"}
      border_mode = ["clamp"] #@param{type:"raw"}
      locals_after = locals().copy()
      for k in locals_before.keys():
        del locals_after[k]
      del locals_after['locals_before']
      return locals_after
    
    param_dict = define_parameters()
    batch_list = dict_product(param_dict)
    namespace = batch_list[0]['file_namespace']
    if glob.glob(f'images_out/{namespace}/*.png'):
      print(f"WARNING: images_out/{namespace} contains images. Batch indicies may not match filenames unless restoring.")Loading a mock map Here, we describe the script to load a mock map from the kappaTNG suite.  First, we load some modules.import numpy as np
    import matplotlib.pyplot as plt
    %matplotlib inline 
    ng = 1024  # number of grids
    theta = 5.0  # opening angle in deg
    fname = "sample_kappa.dat"
    
    pix = theta/ng  # pixel size
    theta = pix*np.arange(ng)
    
    
    with open(fname, 'rb') as f:
        dummy = np.fromfile(f, dtype="int32", count=1)
        kappa = np.fromfile(f, dtype="float", count=ng*ng)
        dummy = np.fromfile(f, dtype="int32", count=1)
    
    kappa = kappa.reshape((ng, ng))Now, the array `kappa` contains the convergence field. Let's plot it as the 2D color map.X, Y = np.meshgrid(theta, theta)
    
    plt.pcolormesh(X, Y, kappa, vmin=-0.05, vmax=0.07, cmap="Blues")
    cbar = plt.colorbar()
    cbar.set_label(r'$\kappa$', fontsize=20)Calcite dissolution as a function of CO2 pressurehttp://hydrochemistry.eu/exmpls/calcite.html%pylab inline
    import phreeqpython
    pp = phreeqpython.PhreeqPython(database='phreeqc.dat')Populating the interactive namespace from numpy and matplotlibPhreeqPython Calculation# add solutions
    solution0 = pp.add_solution({}) # empty solution
    solution1 = pp.add_solution({}).equalize(['Calcite', 'CO2(g)'], [0, -1.7])
    solution2 = pp.add_solution({}).equalize(['Calcite', 'CO2(g)'], [0, -3.5])
    # create a mixture of solution 1 and 2
    solution3 = solution1*0.5 + solution2*0.5
    
    x = []
    y = []
    
    for i in range(30):
        solution0.add('CO2', 3.5/30)
        solution0.saturate('Calcite')    
        x.append(solution0.sr('CO2(g)')*100)
        y.append(solution0.total_element('Ca'))Plotting the resultsplt.figure(figsize=[7,7])
    plt.plot(x,y, 'rs-', label='equilibrium')
    plt.plot([solution1.sr('CO2(g)')*1e2, solution2.sr('CO2(g)')*1e2], [solution1.total_element('Ca'),solution2.total_element('Ca')], '-gx', label='mixing_line')
    plt.plot(solution3.sr('CO2(g)')*1e2, solution3.total_element('Ca'), '-b^', label='1:1')
    plt.xlim([0,3])
    plt.ylim([0,3])
    plt.grid()
    plt.legend()
    plt.title('Calcite Equilibrium')Split data into days and visualise this data# Subset data into 2 years for ease of processing
    solar.datetime = pd.to_datetime(solar.datetime)
    solar_subset = solar[solar.datetime > "2010"]
    solar_subset.head()
    # Have each day on its own row
    solar_subset['date'] = solar_subset['datetime'].dt.date
    solar_subset['hour'] = solar_subset['datetime'].dt.hour
    solar_subset.head()
    each_day = solar_subset.pivot(index='date', columns='hour', values='capacity_factor')
    each_day = each_day.dropna()
    each_day.head()
    # Visualise as PCA
    pca = PCA(n_components=4)
    
    pca_results = pca.fit_transform(each_day)
    
    principal_df = pd.DataFrame(data = pca_results, columns = ['principal component 1', 'principal component 2','principal component 3', 'principal component 4'])
    principal_df.plot()
    plt.scatter(principal_df['principal component 1'],principal_df['principal component 3'])t-SNEtsne = TSNE(n_components=2, random_state=0, n_iter=2000, early_exaggeration=4)
    each_day_2d = tsne.fit_transform(each_day)
    
    plt.scatter(each_day_2d[:,0], each_day_2d[:,1])
    plt.show()Predict Future SalesWe are provided with daily historical sales data. Our task is to analyse the data and highlight interesting features.**File descriptions**    sales_train.csv - the training set. Daily historical data from January 2013 to October 2015.    test.csv - the test set. You need to forecast the sales for these shops and products for November 2015.    sample_submission.csv - a sample submission file in the correct format.    items.csv - supplemental information about the items/products.    item_categories.csv  - supplemental information about the items categories.    shops.csv- supplemental information about the shops.**Data fields**    ID - an Id that represents a (Shop, Item) tuple within the test set    shop_id - unique identifier of a shop    item_id - unique identifier of a product    item_category_id - unique identifier of item category    item_cnt_day - number of products sold. You are predicting a monthly amount of this measure    item_price - current price of an item    date - date in format dd/mm/yyyy    date_block_num - a consecutive month number, used for convenience. January 2013 is 0, February 2013 is 1,..., October 2015 is 33    item_name - name of item    shop_name - name of shop    item_category_name - name of item categoryimport numpy as np # linear algebra
    import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
    import re
    pd.set_option('display.max_columns', None)
    pd.set_option('max_colwidth', 120)
    pd.set_option('display.width', 500)
    
    import plotly.express as px
    import plotly.graph_objects as go
    import plotly
    plotly.offline.init_notebook_mode(connected = True)
    
    import matplotlib.pyplot as plt
    
    import seaborn as sns
    sns.set()
    
    from wordcloud import WordCloud
    from wordcloud import STOPWORDS
    ITEMS_CAT_PATH = '../input/competitive-data-science-predict-future-sales/item_categories.csv'
    ITEMS_PATH = '../input/competitive-data-science-predict-future-sales/items.csv'
    SALES_TRAIN_PATH = '../input/competitive-data-science-predict-future-sales/sales_train.csv'
    SAMPLE_SUBMISSION_PATH = '../input/competitive-data-science-predict-future-sales/sample_submission.csv'
    SHOPS_PATH = '../input/competitive-data-science-predict-future-sales/shops.csv'
    TEST_PATH = '../input/competitive-data-science-predict-future-sales/test.csv'
    items_cats = pd.read_csv(ITEMS_CAT_PATH)
    items = pd.read_csv(ITEMS_PATH)
    sales_train = pd.read_csv(SALES_TRAIN_PATH)
    sample_submission = pd.read_csv(SAMPLE_SUBMISSION_PATH)
    shops = pd.read_csv(SHOPS_PATH)
    test = pd.read_csv(TEST_PATH)Lower the digit capacity of data types to optimise code work.items_cats['item_category_id'] = pd.to_numeric(items_cats['item_category_id'], downcast='signed')
    items[['item_id', 'item_category_id']] = items[['item_id', 'item_category_id']].apply(pd.to_numeric, downcast='signed')
    sales_train[['date_block_num', 'shop_id', 'item_id']] = sales_train[['date_block_num', 'shop_id', 'item_id']].apply(pd.to_numeric, downcast='signed')
    sales_train[['item_price', 'item_cnt_day']] = sales_train[['item_price', 'item_cnt_day']].apply(pd.to_numeric, downcast='float')
    sample_submission['ID'] = pd.to_numeric(sample_submission['ID'], downcast='signed')
    sample_submission['item_cnt_month'] = pd.to_numeric(sample_submission['item_cnt_month'], downcast='float')
    shops['shop_id'] = pd.to_numeric(shops['shop_id'], downcast='signed')
    test[['ID', 'shop_id', 'item_id']] = test[['ID', 'shop_id', 'item_id']].apply(pd.to_numeric, downcast='signed')Merge several tables into one for easy reference.sales_item = sales_train.merge(items, on='item_id', how='left')
    sales_item_shops = sales_item.merge(shops, on='shop_id', how='left')
    df = sales_item_shops.merge(items_cats, on='item_category_id', how='left')
    df.sample(10)
    df.info()
    Int64Index: 2935849 entries, 0 to 2935848
    Data columns (total 10 columns):
     #   Column              Dtype  
    ---  ------              -----  
     0   date                object 
     1   date_block_num      int8   
     2   shop_id             int8   
     3   item_id             int16  
     4   item_price          float32
     5   item_cnt_day        float32
     6   item_name           object 
     7   item_category_id    int8   
     8   shop_name           object 
     9   item_category_name  object 
    dtypes: float32(2), int16(1), int8(3), object(4)
    memory usage: 148.4+ MBLowercase the columns.columns = ['item_name', 'shop_name', 'item_category_name']
    for column in columns: df[column] = df[column].str.lower()
    print('Count rows: ', df.shape[0])
    print('Count columns: ', df.shape[1])
    df.describe().THere we can see anomalies in the data: a negative value for the price and quantity of goods sold per day.print('Count of rows with abnormal price: {:.0f}'.format(len(df[df['item_price'] <= 0])))
    print('Count of rows with abnormal quantity of goods sold per day: {:.0f}'.format(len(df[df['item_cnt_day'] < 0])))Count of rows with abnormal price: 1
    Count of rows with abnormal quantity of goods sold per day: 7356Remove all rows with anomalies.df = df[~(df['item_price'] <= 0) & ~(df['item_cnt_day'] < 0)]
    df.describe(include = ['object']).T
    print(f'Count duplicates: {df.duplicated().sum()}')
    df = df.drop_duplicates()
    df.info()
    df['date'] = df['date'].astype('datetime64[Y]')
    print('Min date:', df['date'].min().date())
    print('Max date', df['date'].max().date())
    df_cnt_item = df.groupby('date_block_num').agg({'item_cnt_day':'sum'}).reset_index().rename(columns={'date_block_num':'number_of_month', 'item_cnt_day':'item_cnt'})
    fig = px.line(df_cnt_item, x="number_of_month", y="item_cnt")
    fig.update_layout(
        title='The number of goods sold on a monthly basis',
        xaxis_title='Time',
        yaxis_title='Sales')
    fig.show()In the graph we see two sales peaks - in December 2013 (183. thousand) and December 2014 (169. thousand) . This is the time when people make the most purchases for New Year's Eve gifts.df['sum_price'] = df.item_price * df.item_cnt_day
    df_sales_sum = df.groupby('date_block_num').agg({'sum_price':'sum'}).reset_index().rename(columns={'date_block_num':'number_of_month', 'sum_price':'total_sales'})
    fig = px.line(df_sales_sum, x='number_of_month', y='total_sales')
    fig.update_layout(
        title='Total Sales',
        xaxis_title='Time',
        yaxis_title='Total prise')
    fig.show()In December 2014, there were fewer purchases than in December 2013, but they were more expensive. There is a clear seasonality in the data.top_categories = df.groupby('item_category_id').agg({'item_id':'nunique'}).reset_index().rename(columns={'item_id':'count_items'}).sort_values(by='count_items', ascending=False).head(10)
    plt.figure(figsize=(25,8))
    plt.title('Top-10 selling categories', size=20)
    ax = sns.barplot(y=top_categories['count_items'], 
                     x=top_categories['item_category_id'])
    for p in ax.patches:
            ax.annotate (str(int(p.get_height())), (p.get_x() + p.get_width() / 2., p.get_height()),
                 ha='center', va='center', rotation=0, xytext=(0, 20), textcoords='offset points')
    plt.xlabel('Category id', size=15)
    plt.ylabel('Count selling categories', size=15)
    plt.ylim(None,6000)
    plt.show()Most goods were sold from category 40 - 4964 items during the whole period of the survey. In second place is category 55 with 2327 items and in third place the category 37 with 1777 items sold.distribution_categories = df[df['item_category_id'].isin(top_categories['item_category_id'])]
    plt.figure(figsize=(25,8))
    plt.title('Distribution of Top-10 selling categories depending on item_id', size=20)
    ax = sns.boxplot(x="item_category_id", y="item_id", data=distribution_categories)
    plt.xlabel('Category id', size=15)
    plt.ylabel('Count selling items', size=15)
    plt.show()The greatest variation in values is observed for items in categories 37, 40 and 55. However, the categories 40 and 55 have no outliers. Goods in categories 19, 23 and 72 have the most abnormal values.plt.rcParams['figure.figsize'] = (12, 8)
    stopwords = set(STOPWORDS)
    wordcloud = WordCloud(background_color = 'pink',
                          max_words = 200, 
                          stopwords = stopwords,
                         width = 1000,
                         height = 500,
                         random_state = 42).generate(str(items_cats['item_category_name']))
    
    
    plt.title('Wordcloud for Item Category Names', fontsize = 20)
    plt.axis('off')
    plt.imshow(wordcloud, interpolation = 'bilinear')
    plt.show()
    items_per_shop = df.groupby('shop_name').agg({'item_cnt_day':'sum'}).reset_index().rename(columns={'item_cnt_day':'total_cnt_items'}).sort_values(by='total_cnt_items', ascending=False)
    plt.figure(figsize=(15,18), dpi=80)
    plt.title('Sales per shop', size=20)
    ax = sns.barplot(x=items_per_shop['total_cnt_items'], y=items_per_shop['shop_name'])
    plt.xlabel('Count of items', size=15)
    plt.ylabel('Shop name', size=15)
    plt.show()Most goods were sold from the shop in Москва ТЦ "Семеновский" during the entire period.cnt_items_shop_monthly = df.groupby(['shop_id', 'date_block_num']).agg({'item_cnt_day':'sum'}).reset_index().sort_values(by='item_cnt_day', ascending=False)
    plt.figure(figsize=(30,10))
    plt.title('Average number of items sold per month in each shop', size=20)
    ax = sns.barplot(y=cnt_items_shop_monthly['item_cnt_day'], 
                     x=cnt_items_shop_monthly['shop_id'])
    for p in ax.patches:
            ax.annotate (str(int(p.get_height())), (p.get_x() + p.get_width() / 2., p.get_height()),
                 ha='center', va='center', rotation=0, xytext=(0, 30), textcoords='offset points')
    plt.xlabel('Shop id', size=15)
    plt.ylabel('the average number of items sold', size=15)
    plt.show()-------- This programming code is a raw version, an improved version will be uploaded soon  ---------import pandas as pd
    import matplotlib.pyplot as plt
    
    import numpy as np
    # Check which ISCO-codes are not in official document but are in the document of the Netherlands
    def compare_df(df1,df2,compare_name):
        df_in = df1
        
        np1 = df1[compare_name].unique()
    
        np2 = df2[compare_name].unique()
    
        df_output = df_in.iloc[0:0]
        
        xyz = False
        number_missing = 0
    
        for i in np1:
            xyz = False
            for j in np2:
                if i == j:
                    xyz = True
                
            if xyz == False:
                x = df_in[df_in[compare_name] == i]
                df_output = pd.concat([df_output,x])
    
        df_output = df_output.reset_index()
        df_output = df_output.drop(['index'],axis=1)
        return df_output
    df_avg = pd.read_csv('avg_data.csv')
    df_min = pd.read_csv('min_data.csv')
    df_max = pd.read_csv('max_data.csv')
    df_avg = df_avg.drop('Unnamed: 0', axis = 1)
    df_min = df_min.drop('Unnamed: 0', axis = 1)
    df_max = df_max.drop('Unnamed: 0', axis = 1)
    df_avg = df_avg.rename(columns={"brc_beroep":'brc_code'})
    df_min = df_min.rename(columns={"brc_beroep":'brc_code'})
    df_max = df_max.rename(columns={"brc_beroep":'brc_code'})
    df_avg = df_avg.drop('jobs_lost_low', axis = 1)
    df_avg = df_avg.drop('jobs_lost_high', axis = 1)
    df_avg = df_avg.rename(columns={"jobs_lost_avg":'jobs_lost_ET'})
    
    df_min = df_min.drop('jobs_lost_avg', axis = 1)
    df_min = df_min.drop('jobs_lost_high', axis = 1)
    df_min = df_min.rename(columns={"jobs_lost_low":'jobs_lost_ET'})
    
    df_max = df_max.drop('jobs_lost_low', axis = 1)
    df_max = df_max.drop('jobs_lost_avg', axis = 1)
    df_max = df_max.rename(columns={"jobs_lost_high":'jobs_lost_ET'})
    
    df = df_avg
    df.head()
    
    df[df['brc_code'] == 711]
    ham = df[df['jobs_lost_ET'] == 0]
    ham.num_working.sum()
    len(df)
    len(df['brc_code'].unique())
    df_occ = pd.read_excel('Keys_Occupation_data.xlsx')
    df_occ['brc_code'] = df_occ['Title']
    df_occ['brc_name'] = df_occ['Title']
    
    #x = df_occ.iloc[0]['Title']
    #x.split(' ', maxsplit = 1)
    
    for i in range(0, len(df_occ)):
        x = df_occ.iloc[i]['Title']
        y = df_occ.iloc[i]['brc_code']
    
        a,b = x.split(" ", maxsplit = 1)
        
        df_occ['brc_code'].replace(y, value=a, inplace=True)
    
    
        
    for i in range(0, len(df_occ)):
        x = df_occ.iloc[i]['brc_name']
        
        a,b = x.split(" ", maxsplit = 1)
        
        df_occ['brc_name'].replace(x, value =b, inplace=True)
    
    df_occ = df_occ[['occ_code','brc_code', 'brc_name']]    
    
    df_occ["brc_code"] = pd.to_numeric(df_occ["brc_code"])
    df_occ.head()
    compare_df(df_occ,df,'brc_code')
    df_ed = pd.read_excel('Keys_Education_data.xlsx')
    df_ed
    df_pol = pd.read_csv('Policies_data.csv', sep = ';')
    df_pol.head()
    len(df_pol['occ_code'].unique())
    df_pol.head()
    df_pol[df_pol['occ_code'] == "A000163"].head()
    df_pol[df_pol['occ_code'] == "A000238"].tail()
    df_pol[df_pol['occ_code'] == "A000328"].tail()
    df_occ[df_occ['brc_code'] == 111]
    #df_pol[df_pol['occ_code'] == "A000230"]
    expl1 = df_occ[['occ_code','brc_code']]
    
    df1 = pd.merge(df_pol,expl1, on=['occ_code'],how ='inner')
    
    df1.head()
    df1[df1['brc_code'] == 1041].head()
    print(df1['num_working'].sum())
    len(df1['occ_code'].unique())
    #Transition to one BRC code of formed database
    df_ET = df_occ[['brc_code', 'brc_name']]
    df_ET['num_working'] = 0
    df_ET['jobs_lost_ET'] = 0
    df_ET.head()
    for i in range(0,len(df_ET)):
        #i = i - 1
        brc_code = df_occ.iloc[i]['brc_code']
        
        new_df = df[df['brc_code'] == brc_code]
        a = new_df['num_working'].sum()
        b = new_df['jobs_lost_ET'].sum()
        
        #x = df_ET.iloc[i]['num_working']
        #y = df_ET.iloc[i]['jobs_lost_ET']
        
        #df_ET['num_working'].replace(x, value=a, inplace=True)
        #df_ET['jobs_lost_ET'].replace(y, value=b, inplace=True)
        
        df_ET.loc[i,'num_working'] = a
        df_ET.loc[i,'jobs_lost_ET'] = b
        
        #i = i + 1
    df_2 = df_ET
    #df_2 = df_2[0:114]
    df_2["brc_code"] = pd.to_numeric(df_2["brc_code"], downcast='integer')
    
    pd.options.display.max_colwidth = 100
    
    df_2
    poi = df_2[df_2['brc_code'] == 111]
    poi
    kaas = df_2[df_2['jobs_lost_ET']== 0]
    kaas.head()
    kaas.num_working.sum()
    expl7 = df_2[df_2['jobs_lost_ET']== 0]
    expl7 = expl7[['brc_code']]
    expl7 = expl7.reset_index(drop=True)
    expl7
    # In the origianl df_pol the overall categories are still incorporated therefor we lose this data. But we see
    # that there are still 114 occupations in there 
    expl2 = df_ed[['ed_code','education_level']]
    
    df2 = pd.merge(df1,expl2,on=['ed_code'],how ='inner')
    
    df2.head()
    df3 = df2[['ID','brc_code','periods','education_level','num_working']]
    df3.head()
    # Percentage growht per brc_code
    expl2003 = df3[df3['periods'] == ('2003JJ00')]
    
    expl2018 = df3[df3['periods'] == ('2018JJ00')]
    expl8 = expl7.copy()
    expl8['jc_03'] = 0
    expl8['jc_18'] = 0
    expl8['job_decrease'] = 0
    expl8['job_growth'] = 0
    #expl8 = expl8.reset_index(drop=True)
    expl8
    for i in range(0,len(expl8)):
        brc_code = expl8.iloc[i]['brc_code']
        
        df1 = expl2003[expl2003['brc_code'] == brc_code]
        df2 = expl2018[expl2018['brc_code'] == brc_code]
        
        x = df1['num_working'].sum()
        y = df2['num_working'].sum()
        
        expl8.loc[i,'jc_03'] = x
        expl8.loc[i,'jc_18'] = y
        
        z = ((y-x)/15)
        
        v = z/x
        
        if (z>0): 
            expl8.loc[i,'job_growth'] = (x*v) * 12 # growth per year times 12 for the duration until 2030
        else:
            expl8.loc[i,'job_decrease'] = (-x*v) * 12
    
    
    expl8.sum()
    expl9 = expl8[['brc_code','job_decrease','job_growth']]
    df12 = pd.merge(df_2,expl9,on=['brc_code'],how ='left')
    df12 = df12.fillna(value = 0)
    df12
    df12['jobs_lost'] = df12['jobs_lost_ET'] + df12['job_decrease']
    # Incorporate sectors
    df_beroep = pd.read_excel("brc_sector.xlsx")
    df_beroep = df_beroep.drop_duplicates()
    df_beroep = df_beroep.reset_index()
    df_beroep = df_beroep.drop(labels="index", axis = 1)
    df_beroep = df_beroep.rename(columns={"brc_beroep":'brc_code'})
    df_beroep.head()
    df13 = pd.merge(df12,df_beroep, on=['brc_code'], how='inner')
    df13 = df13[['brc_code','brc_sector','brc_name','num_working','job_growth','jobs_lost']]
    df13.head()
    df13.sum()
    ## Education level
    expl22 = df3[df3['education_level']==0]
    expl22.head()
    expl22 = df3[df3['education_level']==0]
    
    expl22['num_working'].sum()
    expl22 = df3[df3['education_level']==1]
    
    expl22['num_working'].sum()
    expl22 = df3[df3['education_level']==2]
    
    expl22['num_working'].sum()
    expl22 = df3[df3['education_level']==3]
    
    expl22['num_working'].sum()
    # Check if there are brc code missing
    
    compare_df(df, df3,'brc_code')
    # reset all the education levels that are 0 to 1 since we assume education levels of 1 are reseached at least
    df4 = df3
    for i in range(0, len(df4)):
        x = df4.iloc[i]['education_level']
        
        if (x == 0):
            df4["education_level"].replace(x, value=1, inplace=True)
    df4[df4['education_level']==0]
    df4[df4['education_level']==2].head()
    # Check per occupation what the education level is
    expl3 = df4[df4['brc_code'] == 111]
    
    X1 = expl3[expl3['education_level']==1]
    X2 = expl3[expl3['education_level']==2]
    X3 = expl3[expl3['education_level']==3]
    print (X3)
    Y1 = X1['num_working'].sum()
    Y2 = X2['num_working'].sum()
    Y3 = X3['num_working'].sum()
    print(Y1,Y2,Y3)
    
    if (Y2 > Y3 ) & ( Y2 > Y1):
        print ('hi')
    else:
        print('no')
    # df  with      brc_code and education level
    expl4 = df_occ['brc_code']
    expl4 = pd.DataFrame(expl4)
    expl5 = expl4[0:0]
    
    #expl5['education_level_1'] = 1
    #expl5['education_level_2'] = 1
    #expl5['education_level_3'] = 1
    expl5['percentage_ed_1'] = 1
    expl5['percentage_ed_2'] = 1
    expl5['percentage_ed_3'] = 1
    
    for i in range(0, len(expl4)):
        x = expl4.iloc[i]['brc_code']
        
        df = df4[df4['brc_code'] == x]
        
        X1 = df[df['education_level']==1]
        X2 = df[df['education_level']==2]
        X3 = df[df['education_level']==3]
        
        Y1 = X1['num_working'].sum()
        Y2 = X2['num_working'].sum()
        Y3 = X3['num_working'].sum()
        
        
        print(Y1,Y2,Y3) 
    
    
    for i in range(0, len(expl4)):
        x = expl4.iloc[i]['brc_code']
        
        df = df4[df4['brc_code'] == x]
        
        X1 = df[df['education_level']==1]
        X2 = df[df['education_level']==2]
        X3 = df[df['education_level']==3]
        
        Y1 = X1['num_working'].sum()
        Y2 = X2['num_working'].sum()
        Y3 = X3['num_working'].sum()
        
        YY = Y1 + Y2 + Y3
        
        Z1 = Y1/YY
        Z2 = Y2/YY
        Z3 = Y3/YY
        
        expl5 = expl5.append({'brc_code':x,'percentage_ed_1': Z1, 'percentage_ed_2': Z2, 'percentage_ed_3': Z3}, ignore_index=True)
        
        expl5["brc_code"] = pd.to_numeric(expl5["brc_code"], downcast='integer')
        #expl5 = expl5.to_numeric('brc_code',downcast='integer') 
        #pd.to_numeric(s, downcast='integer')
        #df.append({'Rank': new[0],'Probability': new[1],'soc_code': new[2],'Occupation': new[3]}, ignore_index=True)
        
        #if (Y3 > Y2 ) & ( Y3 > Y1):
            #expl4[''] df4["education_level"].replace(x, value=1, inplace=True)
    expl5.head()
    expl5.sum()/114
    df4 = pd.merge(df13,expl5,on=['brc_code'], how = 'inner')
    df4
    df4['job_growth1'] = df4['job_growth'] * df4['percentage_ed_1']
    df4['job_growth2'] = df4['job_growth'] * df4['percentage_ed_2']
    df4['job_growth3'] = df4['job_growth'] * df4['percentage_ed_3']
    df4['jobs_lost1'] = df4['jobs_lost'] * df4['percentage_ed_1']
    df4['jobs_lost2'] = df4['jobs_lost'] * df4['percentage_ed_2']
    df4['jobs_lost3'] = df4['jobs_lost'] * df4['percentage_ed_3']
    df5 = df4[['brc_code','brc_sector','brc_name','num_working','job_growth1','job_growth2','job_growth3','jobs_lost1','jobs_lost2','jobs_lost3']]
    df5.tail()
    df_class = pd.read_excel("class_label.xlsx")
    df_class
    df6 = df_class
    df6['num_working'] = 0
    df6['job_growth1'] = 0
    df6['job_growth2'] = 0
    df6['job_growth3'] = 0
    df6['jobs_lost1'] = 0
    df6['jobs_lost2'] = 0 
    df6['jobs_lost3'] = 0
    df6
    for i in range(0,len(df_class)):
        brc_sector = df_class.iloc[i]['brc_sector']
        
        df1 = df5[df5['brc_sector'] == brc_sector]
        
        x1 = df1['num_working'].sum()
        x2 = df1['job_growth1'].sum()
        x3 = df1['job_growth2'].sum()
        x4 = df1['job_growth3'].sum()
        x5 = df1['jobs_lost1'].sum()
        x6 = df1['jobs_lost2'].sum()
        x7 = df1['jobs_lost3'].sum()
        
        df6.loc[i,'num_working'] = x1
        df6.loc[i,'job_growth1'] = x2
        df6.loc[i,'job_growth2'] = x3
        df6.loc[i,'job_growth3'] = x4
        df6.loc[i,'jobs_lost1'] = x5
        df6.loc[i,'jobs_lost2'] = x6
        df6.loc[i,'jobs_lost3'] = x7
        
    df6.sum()
    df7 = df6
    df7
    expl10 = df6
    expl10['job_growth'] = expl10['job_growth1'] + expl10['job_growth2'] + expl10['job_growth3']
    expl10['jobs_lost'] = expl10['jobs_lost1'] + expl10['jobs_lost2'] + expl10['jobs_lost3']
    expl10 = expl10[[ 'brc_sector','jobs_lost','job_growth']]
    expl10
    expl10.plot.bar(x ='brc_sector')
    plt.show()
    
    #incorporated are job growth and extra job loss through historical information
    # Replace workers with workers from the same sector
    df7 = df7.round(decimals = 3)
    df7 = df7[['brc_sector','job_growth1','job_growth2','job_growth3','jobs_lost1','jobs_lost2','jobs_lost3']]
    df7
    df7.sum()
    
    for i in range(0,len(df7)):
        
        x1 = df7.iloc[i]['jobs_lost3']
        x2 = df7.iloc[i]['job_growth3']
        
        if (x1 < x2):
            df7.loc[i,'jobs_lost3'] = 0
            df7.loc[i,'job_growth3'] = (x2 - x1)
        else:
            df7.loc[i,'jobs_lost3'] = (x1 - x2)
            df7.loc[i,'job_growth3'] = 0
            
        x3 = df7.iloc[i]['jobs_lost2']
        x4 = df7.iloc[i]['job_growth2']
        
        if (x3 < x4):
            df7.loc[i,'jobs_lost2'] = 0
            df7.loc[i,'job_growth2'] = (x4 - x3)
        else:
            df7.loc[i,'jobs_lost2'] = (x3 - x4)
            df7.loc[i,'job_growth2'] = 0
            
        x5 = df7.iloc[i]['jobs_lost1']
        x6 = df7.iloc[i]['job_growth1']
        
        if (x5 < x6):
            df7.loc[i,'jobs_lost1'] = 0
            df7.loc[i,'job_growth1'] = (x6 - x5)
        else:
            df7.loc[i,'jobs_lost1'] = (x5 - x6)
            df7.loc[i,'job_growth1'] = 0
    
    df7
    for i in range(0,len(df7)):
        
        x1 = df7.iloc[i]['jobs_lost3']
        x2 = df7.iloc[i]['job_growth3']
        
        x3 = df7.iloc[i]['jobs_lost2']
        x4 = df7.iloc[i]['job_growth2']
        
        x5 = df7.iloc[i]['jobs_lost1']
        x6 = df7.iloc[i]['job_growth1']
        
          
        if (x3 < x2):
            df7.loc[i,'jobs_lost2'] = 0
            df7.loc[i,'job_growth3'] = (x2 - x3)
        else:
            df7.loc[i,'jobs_lost2'] = (x3 - x2)
            df7.loc[i,'job_growth3'] = 0
            
    
        
        if (x5 < x4):
            df7.loc[i,'jobs_lost1'] = 0
            df7.loc[i,'job_growth2'] = (x4 - x5)
        else:
            df7.loc[i,'jobs_lost1'] = (x5 - x4)
            df7.loc[i,'job_growth2'] = 0
            
        
     
    df7
    df7.sum()
    df8 = df7[['brc_sector','jobs_lost1','jobs_lost2', 'jobs_lost3']]
    df8
    
    # What is the distribution of workers that is available
    df8.plot.bar(x ='brc_sector')
    plt.show()
    z1 = int(df8['jobs_lost1'].sum()*1000)
    z2 = int(df8['jobs_lost2'].sum()*1000)
    z3 = int(df8['jobs_lost3'].sum()*1000)
    
    print('Number of people lost current job educational level 1')
    print(z1)
    print('Number of people lost current job educational level 2')
    print(z2)
    print('Number of people lost current job educational level 3')
    print(z3)
    
    print('Total number of current jobs lost')
    print(z1+z2+z3)Number of people lost current job educational level 1
    992005
    Number of people lost current job educational level 2
    1325076
    Number of people lost current job educational level 3
    408372
    Total number of current jobs lost
    2725453**Task-2**: Apply algorithm on digits dataset - One hot encoding of features and train test division 65% - 35%from sklearn import datasets
    from sklearn.tree import DecisionTreeClassifier
    import pandas as pd
    import numpy as np
    #Load dataset
    digits = datasets.load_digits()
    print(digits)
    print(digits.data.shape)
    print(digits.target.shape)
    #import the necessary module
    from sklearn.model_selection import train_test_split
    
    X=digits.data
    Y=digits.target
    #split data set into train and test sets
    X_train, X_test, y_train, y_test = train_test_split( 
        X, Y, test_size = 0.35, random_state = 46)
    #Create a Decision Tree Classifier (using Gini)
    clf_gini = DecisionTreeClassifier(criterion = "gini",
                random_state = 46,max_depth=7, min_samples_leaf=46)
    
    clf_gini.fit(X_train, y_train)
    # Predict the classes of test data
    y_pred = clf_gini.predict(X_test)
    print("Predicted values:")
    print(y_pred)
    # Model Accuracy
    from sklearn import metrics
    print("Confusion Matrix: ",
            metrics.confusion_matrix(y_test, y_pred))
    print ("Accuracy : ",
        metrics.accuracy_score(y_test,y_pred)*100)
    print("Report : ",
        metrics.classification_report(y_test, y_pred))Confusion Matrix:  [[42  0  0  0  0  0  0  0  0  2]
     [ 0 37  1  1  8  0  6  3  1  9]
     [ 1 20 37  5  1  0  5  1  2  1]
     [ 0  7  0 49  0  0  0  4  1  5]
     [ 4  3  1  0 36  0  8  9  1  0]
     [ 4  5  0  0  1 43  2  2  0 12]
     [ 0  3  0  0  0  0 49  0  0  0]
     [ 0  0  7  0  1  0  0 55  1  3]
     [ 0  8  2  5  0  0  0  4 26 12]
     [ 0  0  1  4  0  0  0  7  1 60]]
    Accuracy :  68.99841017488076
    Report :                precision    recall  f1-score   support
    
               0       0.82      0.95      0.88        44
               1       0.45      0.56      0.50        66
               2       0.76      0.51      0.61        73
               3       0.77      0.74      0.75        66
               4       0.77      0.58      0.66        62
               5       1.00      0.62      0.77        69
               6       0.70      0.94      0.80        52
               7       0.65      0.82      0.72        67
               8       0.79      0.46      0.58        57
               9       0.58      0.82      0.68        73
    
        accuracy  [...]__DATA PREPROCESSING__# Separate hotels by country
    china_hotels = hotels[hotels['city_id'] == 31497].reset_index()
    greece_hotels = hotels[hotels['city_id'] == 14121].reset_index()
    usa_hotels = hotels[hotels['city_id'] == 14257].reset_index()
    neth_hotels = hotels[hotels['city_id'] == 27561].reset_index()
    # Clear one misleading city name
    pois['country'] = pois['country'].replace("Hong Kong", "China")
    pois.head()
    # Get unique poi types
    poi_types = pois['poi_types'].tolist()
    poi_str = ", ".join(poi_type for poi_type in poi_types)
    poi_str_list = poi_str.split(",")
    poi_list = list(set(poi_str_list))
    
    for i in range(len(poi_list)):
        poi_list[i] = poi_list[i].lstrip(' ')
    
    poi_list
    # Manipulate pois dataset to get only nightlife features that are involved
    elim_index = []
    for index, row in pois.iterrows():
        if "Bar / Pub" in row['poi_types'] or "Disco / Nightclub" in row['poi_types'] or "Casino" in row['poi_types']:
            elim_index.append(index)
            
    poi_elim = pois[pois.index.isin(elim_index)].reset_index()
    poi_elim.head()
    # Separate pois by country
    china_poi = poi_elim[poi_elim['city_id'] == 31497].reset_index()
    greece_poi = poi_elim[poi_elim['city_id'] == 14121].reset_index()
    usa_poi = poi_elim[poi_elim['city_id'] == 14257].reset_index()
    neth_poi = poi_elim[poi_elim['city_id'] == 27561].reset_index()__ALGORITHM EXPLANATION__After exploring the data and obtaining insights, I start to implement my algorithm here.The basic idea of this algorithm is to assign nigthlife scores to given hotels according to their distance to "Disco / Nightclub", "Bar / Pub", and "Casino". I also took "distance_to_center" feature into account for being closely related with the scoring structure. I dug into it in further process.I begin with calculating each hotel's distance to all selected poi locations by using longitude and latitude for each city separately.  Here I start with Hotels in China and their distance to selected poi locations and repeat this process for each city.  While calculating the distance, I take the __mean__ of the list of distances for each hotel to get an average distance that hotel has for nightlife locations.  __Note:__ I placed __"club_club_hotel"__ and "__party_people__" features into the dataset as well by filling unknown values with "__0.0__" values. Nevertheless, I decided not to use  this feature because first, those include quite a lot of unknown information in it, second, "__0.0__" value would mean negative for a nightlife aspect where there is actually no information about  this.china_distances = []
    for i in range(len(china_hotels)):
        per_bar_distance = []
        hotel_long = china_hotels.iloc[i][6]
        hotel_lat = china_hotels.iloc[i][7]
        hotel_coord = (hotel_lat, hotel_long)
        for j in range(len(china_poi)):
            poi_long = china_poi.iloc[j][5]
            poi_lat = china_poi.iloc[j][6]
            poi_coord = (poi_lat, poi_long)
            
            # Calculate the distance by using given longitude and latitude in "meters"
            hotel_poi_distance = geopy.distance.geodesic(hotel_coord, poi_coord).m
            per_bar_distance.append(hotel_poi_distance)
        china_distances.append(per_bar_distance)
    
    china_hotels['nightlife_distance'] = china_distances
    china_hotels['mean_distance'] = [statistics.mean(distance_list) for distance_list in china_distances]
    china_hotels['median_distance'] = [statistics.median(distance_list) for distance_list in china_distances]
    china_train = china_hotels[["hotel_id", "city_id", "distance_to_center", "club_club_hotel", "party_people", "nightlife_distance", "mean_distance", "median_distance", "overall_rating"]]
    china_train = china_train.fillna(0.0)
    china_train- I used scatter plot to visualize the distribution of the __"mean_distance"__ and __"distance_to_center"__ features. Besides this plot shows that these two features are directly  proportional to each other in a linear trend.  - One other takeaway from this plot is that there is a __density__ where the hotel's distance to city center and the mean distance to nightlife locations are taking the lowest values. On the other hand, as in China data and others, there is an outlier where the value of each feature is the highest. The data point is appearing in a very far position from where other  sample points located on the plotplt.figure(figsize=(15, 15))
    plt.scatter(china_train.distance_to_center, china_train.mean_distance)
    plt.title("China Hotels", fontsize=18)
    plt.xlabel('distance_to_center', fontsize=18)
    plt.ylabel('mean_distance', fontsize=18)
    plt.show()
    greece_distances = []
    for i in range(len(greece_hotels)):
        per_bar_distance = []
        hotel_long = greece_hotels.iloc[i][6]
        hotel_lat = greece_hotels.iloc[i][7]
        hotel_coord = (hotel_lat, hotel_long)
        for j in range(len(greece_poi)):
            poi_long = greece_poi.iloc[j][5]
            poi_lat = greece_poi.iloc[j][6]
            poi_coord = (poi_lat, poi_long)
            hotel_poi_distance = geopy.distance.geodesic(hotel_coord, poi_coord).m
            per_bar_distance.append(hotel_poi_distance)
        greece_distances.append(per_bar_distance)
    
    greece_hotels['nightlife_distance'] = greece_distances
    greece_hotels['mean_distance'] = [statistics.mean(distance_list) for distance_list in greece_distances]
    greece_hotels['median_distance'] = [statistics.median(distance_list) for distance_list in greece_distances]
    greece_train = greece_hotels[["hotel_id", "city_id", "distance_to_center", "club_club_hotel", "party_people", "nightlife_distance", "mean_distance", "median_distance", "overall_rating"]]
    greece_train = greece_train.fillna(0.0)
    greece_train
    plt.figure(figsize=(15, 15))
    plt.scatter(greece_train.distance_to_center, greece_train.mean_distance)
    plt.title("Greece Hotels", fontsize=18)
    plt.xlabel('distance_to_center', fontsize=18)
    plt.ylabel('mean_distance', fontsize=18)
    plt.show()
    usa_distances = []
    for i in range(len(usa_hotels)):
        per_bar_distance = []
        hotel_long = usa_hotels.iloc[i][6]
        hotel_lat = usa_hotels.iloc[i][7]
        hotel_coord = (hotel_lat, hotel_long)
        for j in range(len(usa_poi)):
            poi_long = usa_poi.iloc[j][5]
            poi_lat = usa_poi.iloc[j][6]
            poi_coord = (poi_lat, poi_long)
            hotel_poi_distance = geopy.distance.geodesic(hotel_coord, poi_coord).m
            per_bar_distance.append(hotel_poi_distance)
        usa_distances.append(per_bar_distance)
    
    usa_hotels['nightlife_distance'] = usa_distances
    usa_hotels['mean_distance'] = [statistics.mean(distance_list) for distance_list in usa_distances]
    usa_hotels['median_distance'] = [statistics.median(distance_list) for distance_list in usa_distances]
    usa_train = usa_hotels[["hotel_id", "city_id", "distance_to_center", "club_club_hotel", "party_people", "nightlife_distance", "mean_distance", "median_distance", "overall_rating"]]
    usa_train = usa_train.fillna(0.0)
    usa_train
    plt.figure(figsize=(15, 15))
    plt.scatter(usa_train.distance_to_center, usa_train.mean_distance)
    plt.title("USA Hotels", fontsize=18)
    plt.xlabel('distance_to_center', fontsize=18)
    plt.ylabel('mean_distance', fontsize=18)
    plt.show()
    neth_distances = []
    for i in range(len(neth_hotels)):
        per_bar_distance = []
        hotel_long = neth_hotels.iloc[i][6]
        hotel_lat = neth_hotels.iloc[i][7]
        hotel_coord = (hotel_lat, hotel_long)
        for j in range(len(neth_poi)):
            poi_long = neth_poi.iloc[j][5]
            poi_lat = neth_poi.iloc[j][6]
            poi_coord = (poi_lat, poi_long)
            hotel_poi_distance = geopy.distance.geodesic(hotel_coord, poi_coord).m
            per_bar_distance.append(hotel_poi_distance)
        neth_distances.append(per_bar_distance)
    
    neth_hotels['nightlife_distance'] = neth_distances
    neth_hotels['mean_distance'] = [statistics.mean(distance_list) for distance_list in neth_distances]
    neth_hotels['median_distance'] = [statistics.median(distance_list) for distance_list in neth_distances]
    neth_train = neth_hotels[["hotel_id", "city_id", "distance_to_center", "club_club_hotel", "party_people", "nightlife_distance", "mean_distance", "median_distance", "overall_rating"]]
    neth_train = neth_train.fillna(0.0)
    neth_train
    plt.figure(figsize=(15, 15))
    plt.scatter(neth_train.distance_to_center, neth_train.mean_distance)
    plt.title("Netherlands Hotels", fontsize=18)
    plt.xlabel('distance_to_center', fontsize=18)
    plt.ylabel('mean_distance', fontsize=18)
    plt.show()__MERGE 4 CITY HOTELS DATA__  - After calculating the mean distance of each hotel in each city to the nightlife locations inside its own city area, finally I can merge all these data to put it in the process of   assigning nightlife scores. Again, by doing that, I am calculating only the distance value inside their own city, not to  another nightlife location inside another city, so that I can  give scores based on just __"distance"__ metric.# CONCAT ALL 4 CITY HOTELS DATA
    frames = [china_train, greece_train, usa_train, neth_train]
    data = pd.concat(frames).reset_index()
    del data['index']
    data
    plt.figure(figsize=(15, 15))
    plt.scatter(data.mean_distance, data.distance_to_center)
    plt.title("400 Hotels Distribution", fontsize=18)
    plt.xlabel('mean_distance', fontsize=18)
    plt.ylabel('distance_to_center', fontsize=18)
    plt.show()I sorted the data by giving the first priority to the __"mean_distance"__ feature and the second to the __"distance_to_center"__ feature. The reason for counting distance to center when  giving score to hotels but not keeping it as the priority feature is that there is a chance if a hotel is closer to the city center it might be closer to the nightlife locations, and  this hypothesis is sort of being supported by the linear trend between those two features, but since this is an assumption I took it as a supportive feature.data_sorted = data.sort_values(["mean_distance", "distance_to_center"], ascending=[True, True])
    data_sortedAs I mentioned above, the correlation matrix also shows that they are correlated, but as  description of the feature__(distance_to_center)__, it does not prove that hotel would be closer to  nightlife locations if it is closer to the city center absolutely.data_to_corr = data_sorted[["distance_to_center", "mean_distance"]]
    f = plt.figure(figsize=(19, 15))
    plt.matshow(data_to_corr.corr(), fignum=f.number)
    plt.xticks(range(data_to_corr.shape[1]), data_to_corr.columns, fontsize=14, rotation=45)
    plt.yticks(range(data_to_corr.shape[1]), data_to_corr.columns, fontsize=14)
    cb = plt.colorbar()
    cb.ax.tick_params(labelsize=14)
    plt.title('Correlation Matrix', fontsize=16);
    plt.savefig('correlation_matrix.png')__HANDLING OUTLIERS__  __WINSORIZING__  To handle outliers and decrease the effect in the dataset I use __winsorizing__ method. __Winsorizing__ limits the extreme values in the dataset to reduce the effect of outliers, by setting outliers to a specified percentile.lower_bound = 0.1
    upper_bound = 0.95
    res = data_sorted[["distance_to_center", "mean_distance"]].quantile([lower_bound, upper_bound])
    res
    # WINSORIZING DATA
    X = StandardScaler().fit_transform(data_sorted[["mean_distance", "distance_to_center"]])
    X = winsorize(X, limits=[0.01, 0.05])
    len(X)After winsorizing(handling outliers) the data points are more scattered than being extremely densed or extremely outside of the mean.plt.figure(figsize=(15, 15))
    plt.scatter(X[:, 0], X[:, 1])
    plt.title("400 Hotels Distribution", fontsize=18)
    plt.xlabel('mean_distance', fontsize=18)
    plt.ylabel('distance_to_center', fontsize=18)
    plt.show()__CLUSTERING__  - Elbow method is being used to determine the ideal number of clusters. This method tries number of cluster amounts to test to see until which point the sum of squared distances are  being decreased significantly. Here, usually, the number that is on the elbow of the curve is being selected. - From this plot I can see that elbow of the curve shows around k=40 (k-> number of k-means clusters)# ELBOW METHOD FOR #CLUSTERS
    sum_of_squared_distances = []
    K = range(10,100)
    for k in K:
        k_means = KMeans(n_clusters=k)
        model = k_means.fit(X)
        sum_of_squared_distances.append(k_means.inertia_)
    
    plt.figure(figsize=(15,15))
    plt.plot(K, sum_of_squared_distances, 'bx-')
    plt.xlabel('k')
    plt.ylabel('sum_of_squared_distances')
    plt.title('elbow method for optimal k')
    plt.show()From elbow method I can see that elbow of the curve is around 40. Although, I chose 100 clusters to divide hotels for assigning scores from 1 to 100 according to their mean distance to  nightlife locations and distance to city center. In addition to that, I use __sillhouette score__ and __calinski harabazs score__ to validate the number of clusters which gives higher  score for 100 clusters. The reason is that the data is being spreaded/scattered enough to cluster them into 100.# K-MEANS CLUSTERING
    k_means = KMeans(n_clusters=40)
    model = k_means.fit(X)
    
    y_hat = k_means.predict(X)
    labels = k_means.labels_
    sill_score = metrics.silhouette_score(X, labels, metric='euclidean')
    cal_score = metrics.calinski_harabasz_score(X, labels)
    print("Sillhouette Score: ", sill_score)
    print("Calinski Score: ", cal_score)
    
    plt.figure(figsize=(15, 15))
    plt.scatter(X[:, 0], X[:, 1], s=1)
    plt.scatter(k_means.cluster_centers_[:,0] ,k_means.cluster_centers_[:,1], color='black', s=5)
    plt.xlabel("mean_distance", fontsize=18)
    plt.ylabel("distance_to_center", fontsize=18)
    plt.show()
    # K-MEANS CLUSTERING
    k_means = KMeans(n_clusters=100)
    model = k_means.fit(X)
    
    y_hat = k_means.predict(X)
    labels = k_means.labels_
    sill_score = metrics.silhouette_score(X, labels, metric='euclidean')
    cal_score = metrics.calinski_harabasz_score(X, labels)
    print("Sillhouette Score: ", sill_score)
    print("Calinski Score: ", cal_score)
    
    plt.figure(figsize=(15, 15))
    plt.scatter(X[:, 0], X[:, 1], s=1)
    plt.scatter(k_means.cluster_centers_[:,0] ,k_means.cluster_centers_[:,1], color='black', s=5)
    plt.xlabel("mean_distance", fontsize=18)
    plt.ylabel("distance_to_center", fontsize=18)
    plt.show()Sillhouette Score:  0.47214150099452956
    Calinski Score:  10535.495935993727Now that I have 100 clusters for the data points, I will calculate the magnitude of each data point to its own assigned cluster's centroid and sort them in descending order because the  lower the distance a hotel has to each feature(left corner of the plot) the higher score it will take. By this I made a way of distinction of assigining different scores amongst the  hotels.def sorted_cluster(x, model=None):
        if model == None:
            model = KMeans()
        model = sorted_cluster_centers_(model, x)
        model = sorted_labels_(model, x)
        return model
    
    def sorted_cluster_centers_(model, x):
        new_centroids = []
        magnitude = []
        for center in model.cluster_centers_:
            magnitude.append(np.sqrt(center.dot(center)))
        idx_argsort = np.argsort(magnitude)
        model.cluster_centers_ = model.cluster_centers_[idx_argsort]
        return model
    
    def sorted_labels_(sorted_model, x):
        sorted_model.labels_ = sorted_model.predict(x)
        return sorted_model
    k_means = sorted_cluster(X, k_means)
    print(k_means.cluster_centers_)
    print([score+1 for score in k_means.labels_])
    data_sorted["score"] = [score+1 for score in k_means.labels_]
    data_sorted
    final_data = data_sorted[["hotel_id", "city_id", "score"]]
    final_data
    final_data.to_csv('result_data.csv')Adversarial Search in GamesThe following code is based on the code provided by the book [Artificial Intelligence: A Modern Approach](http://aima.cs.berkeley.edu/) in http://aima.cs.berkeley.edu/python/readme.html.  Abstract class for modeling a gameclass Game:
        """A game is similar to a problem, but it has a utility for each
        state and a terminal test instead of a path cost and a goal
        test. To create a game, subclass this class and implement
        legal_moves, make_move, utility, and terminal_test. You may
        override display and successors or you can inherit their default
        methods. You will also need to set the .initial attribute to the
        initial state; this can be done in the constructor."""
    
        def legal_moves(self, state):
            "Return a list of the allowable moves at this point."
            abstract
    
        def make_move(self, move, state):
            "Return the state that results from making a move from a state."
            abstract
    
        def utility(self, state, player):
            "Return the value of this final state to player."
            abstract
    
        def terminal_test(self, state):
            "Return True if this is a final state for the game."
            return not self.legal_moves(state)
    
        def to_move(self, state):
            "Return the player whose move it is in this state."
            return state.to_move
    
        def display(self, state):
            "Print or otherwise display the state."
            print state
    
        def successors(self, state):
            "Return a list of legal (move, state) pairs."
            return [(move, self.make_move(move, state))
                    for move in self.legal_moves(state)]
    
        def __repr__(self):
            return '<%s>' % self.__class__.__name__Minimax with alpha-beta pruning implementationSome auxiliary functionsdef argmin(seq, fn):
        """Return an element with lowest fn(seq[i]) score; tie goes to first one.
        >>> argmin(['one', 'to', 'three'], len)
        'to'
        """
        best = seq[0]; best_score = fn(best)
        for x in seq:
            x_score = fn(x)
            if x_score < best_score:
                best, best_score = x, x_score
        return best
    
    def argmax(seq, fn):
        """Return an element with highest fn(seq[i]) score; tie goes to first one.
        >>> argmax(['one', 'to', 'three'], len)
        'three'
        """
        return argmin(seq, lambda x: -fn(x))Minimax search functiondef alphabeta_search(state, game, d=float('inf'), cutoff_test=None, eval_fn=None):
        """Search game to determine best action; use alpha-beta pruning.
        This version cuts off search and uses an evaluation function."""
    
        player = game.to_move(state)
    
        def max_value(state, alpha, beta, depth):
            if cutoff_test(state, depth):
                return eval_fn(state, player)
            v = -float('inf')
            for (a, s) in game.successors(state):
                v = max(v, min_value(s, alpha, beta, depth+1))
                if v >= beta:
                    return v
                alpha = max(alpha, v)
            return v
    
        def min_value(state, alpha, beta, depth):
            if cutoff_test(state, depth):
                return eval_fn(state, player)
            v = float('inf')
            for (a, s) in game.successors(state):
                v = min(v, max_value(s, alpha, beta, depth+1))
                if v <= alpha:
                    return v
                beta = min(beta, v)
            return v
    
        # Body of alphabeta_search starts here:
        # The default test cuts off at depth d or at a terminal state
        cutoff_test = (cutoff_test or
                       (lambda state,depth: depth>d or game.terminal_test(state)))
        eval_fn = eval_fn or (lambda state, player: game.utility(state, player))
        action, state = argmax(game.successors(state),
                               lambda ((a, s)): min_value(s, -float('inf'), float('inf'), 0))
        return actionGeneric playing agentsAuxiliary functionsdef num_or_str(x):
        """The argument is a string; convert to a number if possible, or strip it.
        >>> num_or_str('42')
        42
        >>> num_or_str(' 42x ')
        '42x'
        """
        if isnumber(x): return x
        try:
            return int(x)
        except ValueError:
            try:
                return float(x)
            except ValueError:
                    return str(x).strip()
    
    def isnumber(x):
        "Is x a number? We say it is if it has a __int__ method."
        return hasattr(x, '__int__')A player that makes a query for each movedef query_player(game, state):
        "Make a move by querying standard input."
        game.display(state)
        return num_or_str(raw_input('Your move? '))A player that chooses a move at randomimport random
    
    def random_player(game, state):
        "A player that chooses a legal move at random."
        return random.choice(game.legal_moves(state))A player that uses minimimax alpha-beta searchdef alphabeta_player(game, state):
        return alphabeta_search(state, game)A function that receives a list of players and call each player alternativelydef play_game(game, *players):
        "Play an n-person, move-alternating game."
        state = game.initial
        while True:
            for player in players:
                move = player(game, state)
                state = game.make_move(move, state)
                if game.terminal_test(state):
                    return game.utility(state, 0)The last-stone gameThe game is played with a heap of stones. Each player take alternatively a number $n$ of stones ($1 \le n \le 3$). The player that takes the last stone wins.An auxiliary class to define light-weight objectsclass Struct:
        """Create an instance with argument=value slots.
        This is for making a lightweight object whose class doesn't matter."""
        def __init__(self, **entries):
            self.__dict__.update(entries)
    
        def __cmp__(self, other):
            if isinstance(other, Struct):
                return cmp(self.__dict__, other.__dict__)
            else:
                return cmp(self.__dict__, other)
    
        def __repr__(self):
            args = ['%s=%s' % (k, repr(v)) for (k, v) in vars(self).items()]
            return 'Struct(%s)' % ', '.join(args)The following class models the last-stone game:class LastStone(Game):
        def __init__(self, stones):
            self.initial = Struct(to_move=0, heap = stones)
    
        def legal_moves(self, state):
            "Return a list of the allowable moves at this point."
            return range(1, min(3, state.heap) + 1)
    
        def make_move(self, move, state):
            "Return the state that results from making a move from a state."
            return Struct(to_move = 1 - state.to_move,
                          heap = state.heap - move)
            
        def utility(self, state, player):
            "Return the value of this final state to player."
            if state.to_move == player:
                return -1
            else:
                return 1
    
        def terminal_test(self, state):
            "Return True if this is a final state for the game."
            return not self.legal_moves(state)
    
        def to_move(self, state):
            "Return the player whose move it is in this state."
            return state.to_move
    
        def display(self, state):
            "Print or otherwise display the state."
            print state
    
        def successors(self, state):
            "Return a list of legal (move, state) pairs."
            return [(move, self.make_move(move, state))
                    for move in self.legal_moves(state)]An interactive game against the computer, can you win?play_game(LastStone(10), query_player, alphabeta_player)Struct(to_move=0, heap=10)1. Design an evaluation function for the last-stone game and test itdef eval_fn(state, player):
        ### Your code here ###
        return 0
        
    def smart_player(game, state):
        return alphabeta_search(state, game, d = 2, eval_fn = eval_fn)
    
    
    result = play_game(LastStone(10), smart_player, alphabeta_player)
    if result == 1:
        print "Smart player wins"
    else:
        print "Smart player loses"Smart player losesThe 3-heaps last-stone gameIn this version of the game, there are three heaps instead of 1. In each turn, a player takes $n$ stones ($1 \le n \le k$) from one of the heaps. The player that takes the last stone wins. 2. Define a class that models the 3-heaps last-stone gameclass LastStone3Heaps(Game):
        def __init__(self, k, heap1, heap2, heap3):
            pass
    
        def legal_moves(self, state):
            "Return a list of the allowable moves at this point."
            pass
        
        def make_move(self, move, state):
            "Return the state that results from making a move from a state."
            pass
        
        def utility(self, state, player):
            "Return the value of this final state to player."
            pass
    
        def terminal_test(self, state):
            "Return True if this is a final state for the game."
            pass
        
        def to_move(self, state):
            "Return the player whose move it is in this state."
            pass
        
        def display(self, state):
            "Print or otherwise display the state."
            pass
        
        def successors(self, state):
            "Return a list of legal (move, state) pairs."
            passscikit-plotabs 패키지1.confusion plot2.roc-curve3.mlxtendfpr1, tpr1, thresholds1 = roc_curve(y, model1.decision_function(X))
    fpr2, tpr2, thresholds1 = roc_curve(y, model2.decision_function(X))
    
    plt.plot(fpr1, tpr1, 'o-', ms=2, label="Logistic Regression")
    plt.plot(fpr2, tpr2, 'o-', ms=2, label="Kernel SVM")
    plt.legend()
    plt.plot([0, 1], [0, 1], 'k--', label="random guess")
    plt.show()
    from sklearn.datasets import load_iris
    iris = load_iris()
    iris
    X = iris.data
    y = iris.target
    dfX = pd.DataFrame(X, columns = iris.feature_names)
    dfy = pd.DataFrame(y, columns = ["species"])
    df = pd.concat([dfX,dfy], axis = 1)
    df = df[["sepal length (cm)", "species"]]
    df = df[df.species.isin([0,1])]
    df = df.rename(columns = {"sepal length (cm)": "sepal_length"})
    df
    import statsmodels.api as sm
    model = sm.Logit.from_formula("species ~ sepal_length", data = df)
    result = model.fit()
    print(result.summary())
    y_pred = result.predict(df.sepal_length) >= 0.5
    plt.plot(y_pred, "o")
    
    from sklearn.metrics import confusion_matrix, classification_report
    
    confusion_matrix(df.species, y_pred)
    from sklearn.metrics import classification_report
    
    print(classification_report(df.species, y_pred))
    from sklearn.metrics import roc_curve
    
    fpr, tpr, thresholds = roc_curve(df.species, result.predict(df.sepal_length) )
    plt.plot(fpr, tpr)
    plt.show()
    from sklearn.metrics import auc
    auc(fpr, tpr)
    plt.show()
    iris
    X = iris.data
    y = iris.target
    dfX = pd.DataFrame(X, columns = iris.feature_names)
    dfy = pd.DataFrame(y, columns = ["species"])
    df = pd.concat([dfX,dfy], axis = 1)
    #df = df[["sepal length (cm)", "species"]]
    df = df[df.species.isin([1,2])]
    df["species"] -= 1
    df = df.rename(columns = {"sepal length (cm)": "sepal_length", "sepal width (cm)":"sepal_width", "petal length (cm)":"petal_length", "petal width (cm)":"petal_width"})
    df
    import statsmodels.api as sm
    model = sm.Logit.from_formula("species ~ sepal_length + sepal_width + petal_length + petal_width", data = df)
    result = model.fit()
    print(result.summary())
    df.columns
    y_pred = result.predict(df) >= 0.5
    plt.plot(y_pred, "o")
    
    from sklearn.metrics import confusion_matrix, classification_report
    
    confusion_matrix(df.species, y_pred)
    from sklearn.metrics import classification_report
    
    print(classification_report(df.species, y_pred))
    from sklearn.metrics import roc_curve
    
    fpr, tpr, thresholds = roc_curve(df.species, result.predict(df) )
    plt.plot(fpr, tpr)
    plt.show()Investor - Flow of Funds - US  Introduction:Special thanks to: https://github.com/rgrp for sharing the dataset. Step 1. Import the necessary librariesimport pandas as pd
    import numpy as npStep 2. Import the dataset from this [address](https://raw.githubusercontent.com/datasets/investor-flow-of-funds-us/master/data/weekly.csv).   Step 3. Assign it to a variable calledurl = 'https://raw.githubusercontent.com/datasets/investor-flow-of-funds-us/master/data/weekly.csv'
    df = pd.read_csv(url)
    df.head()Step 4.  What is the frequency of the dataset?_ = pd.to_datetime(df['Date'])
    pd.Series([(_.iat[i+1]-_.iat[i]).days for i in range(df.shape[0]-1)]).value_counts()
    # frequency seems to be weekly but a few exceptionsStep 5. Set the column Date as the index.df.set_index(df['Date'], drop=True, inplace=True)
    df.drop(columns=['Date'], inplace=True)
    df.head()Step 6. What is the type of the index?df.index.dtype, type(df.index)
    # -> type 'object'Step 7. Set the index to a DatetimeIndex typedf.index = pd.to_datetime(df.index)
    df.index.dtype, type(df.index)Step 8.  Change the frequency to monthly, sum the values and assign it to monthly.# df.resample('M').agg(np.sum)
    monthly = df.resample('M').sum()
    monthlyStep 9. You will notice that it filled the dataFrame with months that don't have any data with NaN. Let's drop these rows.monthly = monthly[monthly.sum(axis=1)!=0]
    monthlyStep 10. Good, now we have the monthly data. Now change the frequency to year.monthly.resample('AS-JAN').sum()Linear Regression in Python for Engineers, Data Scientists and Geoscientists  , Associate Professor, University of Texas at Austin  Contacts: [Twitter/@GeostatsGuy](https://twitter.com/geostatsguy) | [GitHub/GeostatsGuy](https://github.com/GeostatsGuy) | [www.michaelpyrcz.com](http://michaelpyrcz.com) | [GoogleScholar](https://scholar.google.com/citations?user=QVZ20eQAAAAJ&hl=en&oi=ao) | [Book](https://www.amazon.com/Geostatistical-Reservoir-Modeling-Michael-Pyrcz/dp/0199731446)This is a tutorial / demonstration of **Linear Regression**.  In $Python$, the $SciPy$ package, specifically the $Stats$ functions (https://docs.scipy.org/doc/scipy/reference/stats.html) provide excellent tools for efficient use of statistics.  I have previously provided this example in R and posted it on GitHub:1. R https://github.com/GeostatsGuy/geostatsr/blob/master/linear_regression_demo_v2.R2. Rmd with docs https://github.com/GeostatsGuy/geostatsr/blob/master/linear_regression_demo_v2.Rmd 3. knit as an HTML document(https://github.com/GeostatsGuy/geostatsr/blob/master/linear_regression_demo_v2.html) In all cases, I use the same dataset available as a comma delimited file (https://git.io/fxMql).    This tutorial includes basic, calculation of a linear regression model (only 1 predictor and 1 response), testing the significance of the parameters, calculation the parameter confidence intervals and the conditional prediction interval. CaveatsI have not included all the details, specifically the test assumptions in this document.  These are included in the accompanying course notes, Lec09_Bivariate_QQ_Regres.pdf. Project Goal0. Introduction to Python in Jupyter including setting a working directory, loading data into a Pandas DataFrame.1. Learn the basics for working with linear regresion in Python.  2. Demonstrate the efficiency of using Python and SciPy package for statistical analysis. Load the required librariesThe following code loads the required libraries.import os                                                   # to set current working directory 
    import numpy as np                                          # arrays and matrix math
    import scipy.stats as st                                    # statistical methods
    import pandas as pd                                         # DataFrames
    import matplotlib.pyplot as plt                             # for plotting
    import math                                                 # for square rootIf you get a package import error, you may have to first install some of these packages. This can usually be accomplished by opening up a command window on Windows and then typing 'python -m pip install [package-name]'. More assistance is available with the respective package docs.    Set the working directoryI always like to do this so I don't lose files and to simplify subsequent read and writes (avoid including the full address each time).  Also, in this case make sure to place the required (see below) data file in this working directory.#os.chdir("C:\PGE337")                                  # set the working directoryLoading DataLet's load the provided dataset. 'Density_Por_data.csv' is available at https://github.com/GeostatsGuy/GeoDataSets. It is a comma delimited file with 20 porosity measures from 2 rock units from the subsurface, porosity (as a fraction). We load it with the pandas 'read_csv' function into a data frame we called 'df' and then preview it by printing a slice and by utilizing the 'head' DataFrame member function (with a nice and clean format, see below).#df = pd.read_csv("Density_Por_data.csv")                    # read a .csv file in as a DataFrame
    df = pd.read_csv(r"https://raw.githubusercontent.com/GeostatsGuy/GeoDataSets/master/Density_Por_data.csv") # load data from Dr. Pyrcz's GitHub repository
    df.head()                                                   # we could also use this command for a table previewIt is useful to review the summary statistics of our loaded DataFrame.  That can be accomplished with the 'describe' DataFrame member function.  We transpose to switch the axes for ease of visualization.df.describe().transpose()Here we extract the X1 and X2 unit porosity samples from the DataFrame into separate arrays called 'X1' and 'X2' for convenience.por = df['Porosity']
    den = df['Density']
    denv = np.linspace(1.0,2.4,100)Linear Regression ModelLet's first calculate the linear regression modellinear = st.linregress(den,por)
    
    print('The model parameters are, slope (b1) = ' + str(round(linear.slope,2)) + ', and the intercept (b0) = ' + str(round(linear.intercept,2)))
    plt.scatter(den, por, color = 'red',edgecolor='black',alpha=0.2,label='sample data')
    plt.plot(denv, linear.intercept + linear.slope*denv, 'black', label='linear regression model')
    plt.title('Sample Data and Model'); plt.xlabel('Density (g/cm3)'); plt.ylabel('Porosity (%)')
    plt.legend(); plt.grid()
    plt.show()The model parameters are, slope (b1) = -9.1, and the intercept (b0) = 28.35Model Confidence IntervalsLet's calculate the 95% confidence interval for the linear regression model slope parameter, $b_1$, of our model.* we first need the $t_{critical}$ value, given $alpha = 0.05$ and $df = n-2$.alpha = 0.05
    tstat = st.t.ppf([alpha/2,1-alpha/2], len(por)-2)
    slope_lower,slope_upper = linear.slope + tstat*linear.stderr
    #intercept_lower,intercept_upper = linear.intercept + tstat*linear.intercept_stderr
    
    print('The model parameters confidence intervals at a ' + str(1-alpha) + ' significance level are:')
    print('Slope: ' + str(round(slope_lower,2)) + ' , ' + str(round(slope_upper,2)))
    #print('Intercept: ' + str(round(intercept_lower,2)) + ' , ' + str(round(intercept_upper,2)))The model parameters confidence intervals at a 0.95 significance level are:
    Slope: -10.26 , -7.94Model Prediction IntervalsLet's calculate the prediction intervals.new_x = 2.05
    apha = 0.05
    
    por_hat = linear.intercept + linear.slope*den
    MSE = np.sum(np.power(por-por_hat,2))/(len(por)-2)
    est_stderr = math.sqrt(1 + 1/len(por) + np.power(new_x - np.average(den),2)/np.sum(np.power(den-np.average(den),2)))
    tstat = st.t.ppf([alpha/2,1-alpha/2], len(por)-2)
    
    y_pred_lower, y_pred_upper = linear.intercept + linear.slope*new_x + tstat*math.sqrt(MSE)*est_stderr
    
    plt.scatter(den, por, color = 'red',edgecolor='black',alpha=0.2,label='sample data',zorder=1)
    plt.plot(denv, linear.intercept + linear.slope*denv, 'black', label='linear regression model',zorder=1)
    plt.scatter(new_x, linear.intercept + linear.slope*new_x, color='yellow',edgecolor='black',label='linear regression model',zorder=2)
    plt.plot([new_x,new_x],[y_pred_lower,y_pred_upper],color='black',linestyle='dashed',zorder=1,label='prediction interval')
    plt.title('Sample Data, Model and a Prediction Interval'); plt.xlabel('Density (g/cm3)'); plt.ylabel('Porosity (%)')
    plt.legend(); plt.grid()
    plt.show()Model CheckingLet's test the slope with the following hypothesis test:\begin{equation}H_0: b_{1} = 0.0\end{equation}\begin{equation}H_1: b_{1} \ne 0.0\end{equation}and see if we can reject this hypothesis, $H_{0}$ , that the slope parameter is equal to 0.0.  If we reject this null hypothesis, we show that the slope is meaning full and there is a linear relationship between density and porosity that we can use.Fortunately, the $linregress$ function from the $stats$ package provides us with the two sided p-value for this test.print('The linear regression model slope parameter p-value is ' + str(round(linear.pvalue,3)) + '.')The linear regression model slope parameter p-value is 0.0.We reject the null hypothesis and adopt the alternative hypothesis, $H_1$, that the slope is not equal to 0.0. Correlation Coefficien and $r^2$ ValuesWe can also observe correlation coefficient, $\rho^2 = r^2$, and the $r^2$ value that indicates the proportion of variance that is described for our model. * valid for our linear modelprint('The correlation coefficient is = ' + str(round(linear.rvalue,2)) + ' and the r-squared value = ', str(round(linear.rvalue**2,2)))The correlation coefficient is = -0.84 and the r-squared value =  0.7Model Cross ValidationLet's use this model to make a prediction at all the data locations.  * now plot a standard model cross validation plot, actual vs. predicted values for the response feature* note, we are only looking at data used to build the model, known as training data.por_hat = linear.slope * den + linear.intercept
    plt.scatter(por_hat,por,color='red',alpha=0.2,edgecolor='black')
    plt.plot([0,30],[0,30],color='black',linewidth=1)
    plt.ylabel('Actual Porosity (%)'); plt.xlabel('Estimated Porosity (%)'); plt.title('Training Data Cross Validation Plot')
    plt.xlim(5,20); plt.ylim(5,20)
    plt.grid(); plt.show()Now let's look at the distribution of estimates.plt.hist(por_hat,color='red',alpha=0.2,edgecolor='black')
    plt.title("Porosity Predictions with Linear Model")
    plt.xlabel('Porosity (%)'); plt.ylabel('Frequency')
    plt.show()It is useful to plot the predictions of porosity and porosity data vs. the density data. From this plot we can observe the linear limitation of our model and get a sense of the unexplained variance $\frac{\sum_{i=1}^{n}(y_i - \hat{y}_i)^2} {n-1}$plt.scatter(den, por,color='red',alpha=0.2,edgecolor='black',label='sample data')
    plt.scatter(den, por_hat,color='blue',alpha=0.2,edgecolor='black',label='model')
    plt.title('Sample Data and Model'); plt.xlabel('Density (g/cm3)'); plt.ylabel('Porosity (%)')
    plt.legend()
    plt.show()Next let's calculate the residual and check their distribution. * residuals are the true values at the data locations minus the estimates at the data locations, $y_i - \hat{y}_i$.  We want to make sure the average is close to 0.0 (unbiased estimates) and to observe the shape and spread of the residual distribution.residual = por - por_hat
    plt.hist(residual,color='red',alpha=0.2,edgecolor='black')
    plt.title("Residual")
    plt.xlabel('Porosity True - Estimate (%)')
    print('The average of the residuals is ' + str(round(np.mean(residual),2)))The average of the residuals is -0.0Next we will check the residual vs. the fitted value.  * we want to see if the errors are consistent over the range of fitted values.  * for example, we could use this plot to identify higher error or systematic under- or overestimation over a specific range of fitted values.plt.scatter(por_hat,residual,color='red',alpha=0.2,edgecolor='black')
    plt.title('Residual vs. Fitted Value')
    plt.xlabel('Porosity Estimate (%)')
    plt.ylabel('Porosity Residual (%)')
    plt.plot([5,20], [0,0],'black')
    plt.show()SimBench Converter This tutorial introduces the SimBench csv format and the converter, connecting pandapower with the SimBench csv format:    The SimBench csv format is, similar to the pandapower format, an element based model which stores the parameters of each element in a table. Widespread element models are used.Two function, one in each direction, connects the csv files where the SimBench csv format data is stored with the pandapower net structure: csv2pp() and pp2csv(). These functions uses the internal functions csv_data2pp() and pp2csv_data(). The internal functions do the full conversion but do not perform reading or writing csv files.import pandapower.networks as nw
    import simbench as sb
    import os
    
    # let's have a look at the SimBench csv format appearance
    net = nw.mv_oberrhein()
    csv_data = sb.pp2csv_data(net)
    list(csv_data.keys())hp.pandapower.toolbox - INFO: set 0 of 0 unsupplied buses out of service
    hp.pandapower.toolbox - INFO: dropped 0 lines with 0 line switches
    hp.pandapower.toolbox - INFO: dropped 0 trafo elements with 0 switches
    hp.pandapower.toolbox - INFO: dropped 0 trafo3w elements with 0 switches
    hp.pandapower.toolbox - INFO: dropped 0 switches
    hp.converter.simbench.csv_pp_converter - WARNING: In elements ['sgen', 'load']'scaling' differs from 1, which is not converted.
    C:\Users\e2n011\Documents\GIT\pandapower-pro\converter\simbench\csv_pp_converter.py:777: FutureWarning:
    
    Sorting because non-concatenation axis is not aligned. A future version
    of pandas will change to not sort by default.
    
    To accept the future behavior, pass 'sort=False'.
    
    To retain the current behavior and silence the warning, pass 'sort=True'.
    
    
    hp.converter.simbench.csv_pp_converter - INFO: There are 2 ExternalNet without subnet data.
    hp.converter.simbench.csv_pp_converter - INFO: There are 181 Line without subnet data.
    hp.conver[...]The list of csv_data keys show which element table exist in the SimBench csv format. The keys can be devided classes like "elements", "profiles", "types", "study cases" and "results", as done by csv_tablenames(). Now let's do a full conversion from csv files to pandapower net and the same in other way around.# determine relevant paths
    test_network_path = os.path.join(sb.sb_dir, "test", "converter", "test_network")
    test_output_folder_path = os.path.join(simbench_converter_path(), "test_network_output_folder")
    
    # get the pandapower net from test network csv files
    net = sb.csv2pp(test_network_path)
    
    # convert pp net to csv files
    sb.pp2csv(net, test_output_folder_path, export_pp_std_types=False)C:\Users\e2n011\Documents\GIT\pandapower-pro\converter\simbench\csv_pp_converter.py:777: FutureWarning:
    
    Sorting because non-concatenation axis is not aligned. A future version
    of pandas will change to not sort by default.
    
    To accept the future behavior, pass 'sort=False'.
    
    To retain the current behavior and silence the warning, pass 'sort=True'.
    
    
    hp.pandapower.toolbox - INFO: set 0 of 0 unsupplied buses out of service
    hp.pandapower.toolbox - INFO: dropped 0 lines with 0 line switches
    hp.pandapower.toolbox - INFO: dropped 0 trafo elements with 0 switches
    hp.pandapower.toolbox - INFO: dropped 0 trafo3w elements with 0 switches
    hp.pandapower.toolbox - INFO: dropped 0 switches
    hp.converter.simbench.csv_pp_converter - INFO: There are 2 Bay without subnet data.
    hp.converter.simbench.csv_pp_converter - INFO: There are 5 ExternalNet without subnet data.
    hp.converter.simbench.csv_pp_converter - INFO: There are 10 Line without subnet data.
    hp.converter.simbench.csv_pp_converter - INFO: There a[...]Atlas Scientific pH  pH Sensor for measuring acidity (hydrogen ion concentration) of liquids  Manufacturer Link: https://www.atlas-scientific.com/ph.htmlimport matplotlib.pyplot as plt
    
    from meerkat.base import time
    from meerkat import atlas, parser
    # instance device and set output format to .csv (which is default)
    dev = atlas.pH(bus_n=1, output='csv')Configuration# device information: device type, firmware version
    time.sleep(0.5)
    dev.info()
    # status of device power: restart code, input voltage Vcc
    time.sleep(0.5)
    dev.status()
    # set current temperature for compensation
    time.sleep(0.5)
    dev.temp_set(24.4)
    time.sleep(0.5)
    dev.temp_get()Calibration# Three point calibration
    dev.cal_set_mid(7.00)
    time.sleep(1)
    dev.cal_set_low(3.00)
    time.sleep(1)
    dev.cal_set_high(10.09)
    time.sleep(1)
    dev.cal_get()
    # clear calibration
    time.sleep(1)
    dev.cal_clear()
    time.sleep(1)
    dev.cal_get()
    # response breakdown of calibration
    time.sleep(0.5)
    dev.cal_slope()Measurement# single pH measurement
    time.sleep(0.5)
    dev.measure()
    # get one sample without a description
    dev.get('test_1')
    # get 5 samples with a description
    dev.get('test_2', n=5)JSON Data Publishing# set the metadata publishing interval to every third sample
    dev.json_writer.metadata_interval = 3
    dev.publish(description='test_3', n=5)CSV Writer Output# write 5 samples to .csv file with description
    dev.write(description='test_4', n=5)
    # name of file written
    dev.csv_writer.path
    # load .csv data written with dev.write
    m, df = parser.csv_resource(dev.csv_writer.path)
    df
    df.datetime64_ns = df.datetime64_ns.dt.tz_localize('UTC')  # Pandas 0.24.1 hack
    df[["datetime64_ns", "pH"]].plot(x="datetime64_ns", style='-x');JSON Writer Outputdev.writer_output = "json"
    # get 7 samples with a description
    dev.write(description='test_5', n=7)
    # name of file written
    dev.json_writer.path
    !head $dev.json_writer.path{"description": "test_5", "sample_n": 0, "pH": 0.0, "std_time_ms": "2020-12-10 19:47:48.004082"}
    {"description": "test_5", "sample_n": 1, "pH": 0.0, "std_time_ms": "2020-12-10 19:47:50.458519"}
    {"description": "test_5", "sample_n": 2, "pH": 0.0, "std_time_ms": "2020-12-10 19:47:52.913051", "encoding": "utf-8", "format": "text/json", "standard": "RFC 8259", "line_terminator": "\n", "quote_char": "\"", "double_quote": true, "escape_char": "\\", "null_sequence": "NA", "comment": "#", "metadata": {"name": "atlas_ph", "urls": "www.atlas-scientific.com/ph.html", "manufacturer": "Atlas Scientific", "header": ["description", "sample_n", "pH"], "dtype": ["str", "int", "float"], "units": [null, "count", "pH units"], "accuracy": [null, 1, "+/-0.002"], "precision": [null, 1, 0.001], "bus_n": 1, "bus_addr": 99, "description": "Atlas pH"}, "path": "2020_12_10_19_47_48_atlas_ph.jsontxt", "time_format": "std_time_ms", "strfmtime": "%Y-%m-%d %H:%M:%S.%f", "metadata_interval": 3}
    {"description": "test_5[...]Using `numba.jit` to speedup the computation of the Cityblock distance matrix In this notebook we implement a function to compute the Cityblock distance matrix using Numba's *just-it-time* compilation decorator. We compare it's performance to that of corresponding non-decorated NumPy function.We will use two Numba functions here. The decorator ` @numba.jit` and `numba.prange`.import numpy as np
    import numba
    def cityblock_python(x, y):
        """Naive python implementation."""
    
        num_samples, num_feat = x.shape
        dist_matrix = np.empty((num_samples, num_samples))
        for i in range(num_samples):
            for j in range(num_samples):
                r = 0.0
                for k in range(num_feat):
                    r += np.abs(x[i][k] - y[j][k])
                dist_matrix[i][j] = r
    
        return dist_matrix
    
    
    @numba.jit(nopython=True)
    def cityblock_numba1(x, y):
        """Implementation with numba."""
    
        num_samples, num_feat = x.shape
        dist_matrix = np.empty((num_samples, num_samples))
        for i in range(num_samples):
            for j in range(num_samples):
                r = 0.0
                for k in numba.prange(num_feat):
                    r += np.abs(x[i][k] - y[j][k])
                dist_matrix[i][j] = r
    
        return dist_matrix
    
    
    @numba.jit(nopython=True)
    def cityblock_numba2(x, y):
        """Implementation with numba and numpy."""
    
        num_samples, num_feat = x.shape
        dist_matrix = np.empty((num_samples, num_samples))
        for i in range(num_samples):
            for j in numba.prange(num_samples):
                dist_matrix[i][j] = np.linalg.norm(x[i] - y[j], 1)
    
        return dist_matrixNoteObserve that the inner loop, which is a reduction, is done with `numba.prange`. `numba.prange` automatically takes care of data privatization and reductions.# Let's check that they all give the same result
    a = 10. * np.random.random([100, 10])
    
    print(np.abs(cityblock_python(a, a) - cityblock_numba1(a, a)).max())
    print(np.abs(cityblock_python(a, a) - cityblock_numba2(a, a)).max())
    nsamples = 200
    nfeat = 25
    
    x = 10. * np.random.random([nsamples, nfeat])
    
    %timeit cityblock_python(x,x)
    %timeit cityblock_numba1(x, x)
    %timeit cityblock_numba2(x, x)Lectura de datos- train.xlsx para entrenamiento- val.xlsx para validaciontrain = pd.read_csv("train.csv")
    val = pd.read_csv("val.csv")Entrenamiento por mes  Agrupacion de datos con:- Mes- Año- Clustertrain_mes = train.groupby(['MES','ANIO','CLUSTER']).sum().reset_index()
    train_mes = train_mes[['MES','ANIO','CLUSTER','atropello','caida_ocupante','choque','otro','volcamiento']]
    
    val_mes = val.groupby(['MES','ANIO','CLUSTER']).sum().reset_index()
    val_mes = val_mes[['MES','ANIO','CLUSTER','atropello','caida_ocupante','choque','otro','volcamiento']]Datos de entrenamiento#
    X_train = train_mes[['MES','ANIO','CLUSTER']].values
    y_train = train_mes[['atropello','caida_ocupante','choque','otro','volcamiento']].valuesNormalizacion- Lo lleva a rango (0,1)#StandardScaler()
    #RobustScaler()
    min_max_scaler = preprocessing.MinMaxScaler()
    min_max_scaler = min_max_scaler.fit(X_train)
    X_train = min_max_scaler.transform(X_train)
    joblib.dump(min_max_scaler,"scaler_mes.pkl")Entrenamiento svmsvm =  MultiOutputRegressor(sklearn.svm.SVR(),n_jobs=-1)
    scores = cross_val_score(svm, X_train, y_train, cv=5,scoring = 'neg_mean_squared_error')
    print('R2 promedio:',np.mean(scores))
    svm = svm.fit(X_train,y_train)
    joblib.dump(svm, 'svm_mes.pkl')R2 promedio: -64572.18580367493Predicciony_pred = svm.predict(X_train)
    rmse(y_train, y_pred)
    #'choque_atropello','incendio',
    X_val = val_mes[['MES','ANIO','CLUSTER']].values
    y_val = val_mes[['atropello','caida_ocupante','choque','otro','volcamiento']].values
    X_val = min_max_scaler.transform(X_val)Validaciony_pred = svm.predict(X_val)
    print("RMSE  VALIDACION:",rmse(y_val, y_pred))
    y_val = pd.DataFrame(y_val)
    y_pred = pd.DataFrame(y_pred)
    
    plt.plot([i for i in range(30)],list(y_pred[0])[0:30])
    plt.plot([i for i in range(30)],list(y_val[0])[0:30])Entrenamiento con RandomForestRegresorbosque =  RandomForestRegressor()
    scores = cross_val_score(bosque, X_train, y_train, cv=5,scoring = 'neg_mean_squared_error')
    print('R2 promedio:',np.mean(scores))
    bosque = bosque.fit(X_train,y_train)
    joblib.dump(bosque, 'bosque_mes.pkl',compress=9)R2 promedio: -1326.4431222266007Predicciony_pred = bosque.predict(X_train)
    rmse(y_train, y_pred)Entrenamiento con tuneado de parametrosclf =  RandomForestRegressor()
    tuned_params = { 
                #"criterion": ['entropy','gini'],
                "max_features": [1,2,4,8,16,32],
                "n_estimators" :[512,1024,2048,4096],
                "min_samples_split" : [2,4,8,16,32,64,128],
                "bootstrap" : [False,True],
                "max_features": ['auto','sqrt'],
                "max_depth": [2,4,8,16,32,64,128,256]
                }
    
    
    clf = GridSearchCV(clf, tuned_params,cv=5,scoring='neg_mean_squared_error',n_jobs = -1)
    clf.fit(X_train, y_train)
    print("Mejores Parametros Encontrados")
    print(clf.best_params_)
    
    ##
    ## Se entrena el clasificador
    ##
    clf =  RandomForestRegressor(
        #criterion  = clf.best_params_['criterion'],
        max_features  = clf.best_params_['max_features'],
        n_estimators  = clf.best_params_['n_estimators'],
        min_samples_split  = clf.best_params_['min_samples_split'],
        bootstrap  = clf.best_params_['bootstrap'],
        max_depth  = clf.best_params_['max_depth'],
    )
    
    
    scores = cross_val_score(clf, X_train, y_train, cv=5,scoring = 'neg_mean_squared_error')
    print('RMSE CV entrenamiento promedio:',np.mean(scores))
    
    ### Se guarda el Modelo
    
    bosque = clf.fit(X_train,y_train)
    joblib.dump(bosque, 'bosque_tuning_mes.pkl',compress=9)
    
    
    
    y_pred = bosque.predict(X_train)
    print("RMSE entrenamiento:",rmse(y_train, y_pred))Mejores Parametros Encontrados
    {'bootstrap': True, 'max_depth': 128, 'max_features': 'auto', 'min_samples_split': 16, 'n_estimators': 1024}
    RMSE CV entrenamiento promedio: -1198.305688477314
    RMSE entrenamiento: 546.5580975358841Prediccion para tuneado de datosy_pred = bosque.predict(X_val)
    print("Error de validación:",rmse(y_val, y_pred))
    y_val = pd.DataFrame(y_val)
    y_pred = pd.DataFrame(y_pred)
    plt.plot([i for i in range(30)],list(y_pred[1])[0:30])
    plt.plot([i for i in range(30)],list(y_val[1])[0:30])Modelo predictivo por diatrain_dia = train.groupby(['DIA','MES','ANIO','CLUSTER']).sum().reset_index()
    train_dia = train_dia[['DIA','ESPECIAL','MES','ANIO','CLUSTER','atropello','caida_ocupante','choque','otro','volcamiento','incendio','choque_atropello']]
    
    val_dia = val.groupby(['DIA','MES','ANIO','CLUSTER']).sum().reset_index()
    val_dia = val_dia[['DIA','ESPECIAL','MES','ANIO','CLUSTER','atropello','caida_ocupante','choque','otro','volcamiento','incendio','choque_atropello']]
    
    X_train = train_dia[['DIA','MES','ANIO','CLUSTER','ESPECIAL']].values
    y_train = train_dia[['DIA','ESPECIAL','atropello','caida_ocupante','choque','choque_atropello','incendio','otro','volcamiento']].values
    
    min_max_scaler = preprocessing.MinMaxScaler()
    min_max_scaler = min_max_scaler.fit(X_train)
    X_train = min_max_scaler.transform(X_train)
    joblib.dump(min_max_scaler,"scaler_dia.pkl")
    clf =  RandomForestRegressor()
    tuned_params = { 
                #"criterion": ['entropy','gini'],
                "max_features": [1,2,4,8,16,32],
                "n_estimators" :[512,1024,2048,4096],
                "min_samples_split" : [2,4,8,16,32,64,128],
                "bootstrap" : [False,True],
                "max_features": ['auto','sqrt'],
                "max_depth": [2,4,8,16,32,64,128,256]
                }
    
    
    clf = GridSearchCV(clf, tuned_params,cv=5,scoring='neg_mean_squared_error',n_jobs = -1)
    clf.fit(X_train, y_train)
    print("Mejores Parametros Encontrados")
    print(clf.best_params_)
    
    ##
    ## Se entrena el clasificador
    ##
    clf =  RandomForestRegressor(
        #criterion  = clf.best_params_['criterion'],
        max_features  = clf.best_params_['max_features'],
        n_estimators  = clf.best_params_['n_estimators'],
        min_samples_split  = clf.best_params_['min_samples_split'],
        bootstrap  = clf.best_params_['bootstrap'],
        max_depth  = clf.best_params_['max_depth'],
    )
    
    scores = cross_val_score(clf, X_train, y_train, cv=5,scoring = 'neg_mean_squared_error')
    print('RMSE CV entrenamiento promedio:',np.mean(scores))
    
    ### Se guarda el Modelo
    
    bosque = clf.fit(X_train,y_train)
    joblib.dump(bosque, 'bosque_tuning_dia.pkl',compress=9)
    
    
    
    y_pred = bosque.predict(X_train)
    print("RMSE entrenamiento:",rmse(y_train, y_pred))
    y_pred = bosque.predict(X_val)
    print("Error de validación:",rmse(y_val, y_pred))
    y_val = pd.DataFrame(y_val)
    y_pred = pd.DataFrame(y_pred)
    #plt.plot([i for i in range(30)],list(y_pred[1])[0:30])
    #plt.plot([i for i in range(30)],list(y_val[1])[0:30])
    plt.plot(y_pred)
    plt.plot(y_val)Controlled Z gatesfrom qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
    
    from math import pi
    
    qreg1 =  QuantumRegister(2)
    creg1 = ClassicalRegister(2)
    
    # define our quantum circuit
    mycircuit1 = QuantumCircuit(qreg1,creg1)
    
    def controlled_anti_z(circuit, q0, q1):
        circuit.h(q1)
        circuit.cx(q0,q1)
        circuit.h(q1)
        circuit.cx(q0,q1)
        circuit.h(q1)
        circuit.cx(q0,q1)
        circuit.h(q1)
        circuit.cx(q0,q1)
        circuit.h(q1)
        circuit.cx(q0,q1)
        circuit.h(q1)
    
    def controlled_z(circuit, q0, q1):
        circuit.h(q1)
        circuit.cx(q0,q1)
        circuit.h(q1)
    
    controlled_z(mycircuit1, qreg1[0],qreg1[1])
    # mycircuit1.cz(qreg1[0],qreg1[1])
    # mycircuit1.cx(qreg1[0],qreg1[1])
    # mycircuit1.cz(qreg1[0],qreg1[1])
    # mycircuit1.cx(qreg1[0],qreg1[1])
    # mycircuit1.cz(qreg1[0],qreg1[1])
    
    job = execute(mycircuit1,Aer.get_backend('unitary_simulator'))
    u=job.result().get_unitary(mycircuit1,decimals=3)
    print(u)[[ 1.+0.j  0.+0.j  0.+0.j  0.+0.j]
     [ 0.+0.j  1.+0.j  0.+0.j  0.+0.j]
     [ 0.+0.j  0.+0.j  1.+0.j  0.+0.j]
     [ 0.+0.j  0.+0.j  0.+0.j -1.+0.j]]Oracle query matrixqreg2 =  QuantumRegister(8)
    creg2 = ClassicalRegister(8)
    
    # define our quantum circuit
    mycircuit2 = QuantumCircuit(qreg2,creg2)
    
    mycircuit2.ccx(qreg2[0],qreg2[1],qreg2[4])
    mycircuit2.ccx(qreg2[2],qreg2[3],qreg2[5])
    mycircuit2.ccx(qreg2[4],qreg2[5],qreg2[6])
    controlled_anti_z(mycircuit2,qreg2[6],qreg2[7])
    
    job = execute(mycircuit2,Aer.get_backend('unitary_simulator'))
    u=job.result().get_unitary(mycircuit2,decimals=3)
    for i in range(len(u)):
        s=""
        for j in range(len(u)):
            val = str(u[i][j].real)
            while(len(val)<8): val  = " "+val
            s = s + val
        print(s)
    qreg3 =  QuantumRegister(4)
    creg3 = ClassicalRegister(4)
    
    # define our quantum circuit
    mycircuit3 = QuantumCircuit(qreg3,creg3)
    
    def oracle(number):
        if(number%4 < 2):
            mycircuit3.x(qreg3[1])
        if(number%8 < 4):
            mycircuit3.x(qreg3[2])
        mycircuit3.ccx(qreg3[2],qreg3[1],qreg3[3])
        if(number%2 == 0):
            controlled_anti_z(mycircuit3,qreg3[3],qreg3[0])
        else:
            controlled_z(mycircuit3,qreg3[3],qreg3[0])
        mycircuit3.ccx(qreg3[2],qreg3[1],qreg3[3])
        if(number%8 < 4):
            mycircuit3.x(qreg3[2])
        if(number%4 < 2):
            mycircuit3.x(qreg3[1])
    
    oracle(1)
    
    job = execute(mycircuit3,Aer.get_backend('unitary_simulator'))
    u=job.result().get_unitary(mycircuit3,decimals=3)
    for i in range(len(u)):
        s=""
        for j in range(len(u)):
            val = str(u[i][j].real)
            while(len(val)<5): val  = " "+val
            s = s + val
        print(s)
    mycircuit3.draw(output='mpl')
    qreg4 =  QuantumRegister(3)
    creg4 = ClassicalRegister(3)
    
    # define our quantum circuit
    mycircuit4 = QuantumCircuit(qreg4,creg4)
    
    #controlled_anti_z(mycircuit4,qreg4[1],qreg4[0])
    
    mycircuit4.z(qreg4[0])
    mycircuit4.x(qreg4[0])
    mycircuit4.z(qreg4[0])
    mycircuit4.x(qreg4[0])
    mycircuit4.z(qreg4[0])
    
    job = execute(mycircuit4,Aer.get_backend('unitary_simulator'))
    u=job.result().get_unitary(mycircuit4,decimals=3)
    for i in range(len(u)):
        s=""
        for j in range(len(u)):
            val = str(u[i][j].real)
            while(len(val)<5): val  = " "+val
            s = s + val
        print(s)
    qreg5 =  QuantumRegister(2)
    creg5 = ClassicalRegister(2)
    
    # define our quantum circuit
    mycircuit5 = QuantumCircuit(qreg5,creg5)
    
    #mycircuit5.x(qreg5[1])
    controlled_z(mycircuit5,qreg5[1],qreg5[0])
    #mycircuit5.x(qreg5[1])
    
    job = execute(mycircuit5,Aer.get_backend('unitary_simulator'))
    u=job.result().get_unitary(mycircuit5,decimals=3)
    for i in range(len(u)):
        s=""
        for j in range(len(u)):
            val = str(u[i][j].real)
            while(len(val)<5): val  = " "+val
            s = s + val
        print(s)1.0  0.0  0.0  0.0
      0.0  1.0  0.0  0.0
      0.0  0.0  1.0  0.0
      0.0  0.0  0.0 -1.0Diffusion matrixCheck for the details of the implementation: https://stackoverflow.com/questions/51190773/how-to-realize-the-grovers-diffusion-operator-in-qqreg6 =  QuantumRegister(3)
    creg6 = ClassicalRegister(3)
    
    # define our quantum circuit
    mycircuit6 = QuantumCircuit(qreg6,creg6)
    
    mycircuit6.x(qreg6[2])
    mycircuit6.h(qreg6[2])
    
    mycircuit6.h(qreg6[1])
    mycircuit6.h(qreg6[0])
    mycircuit6.x(qreg6[1])
    mycircuit6.x(qreg6[0])
    
    mycircuit6.ccx(qreg6[1],qreg6[0],qreg6[2])
    
    mycircuit6.x(qreg6[1])
    mycircuit6.x(qreg6[0])
    mycircuit6.h(qreg6[1])
    mycircuit6.h(qreg6[0])
    
    mycircuit6.h(qreg6[2])
    mycircuit6.x(qreg6[2])
    
    job = execute(mycircuit6,Aer.get_backend('unitary_simulator'))
    u=job.result().get_unitary(mycircuit6,decimals=3)
    for i in range(len(u)):
        s=""
        for j in range(len(u)):
            val = str(u[i][j].real)
            while(len(val)<5): val  = " "+val
            s = s + val
        print(s)
    qreg7 =  QuantumRegister(5)
    creg7 = ClassicalRegister(5)
    
    # define our quantum circuit
    mycircuit7 = QuantumCircuit(qreg7,creg7)
    
    mycircuit7.x(qreg7[4])
    mycircuit7.h(qreg7[4])
    
    for i in range(3):
        mycircuit7.h(qreg7[i])
        mycircuit7.x(qreg7[i])
    
    mycircuit7.ccx(qreg7[1],qreg7[0],qreg7[3])
    mycircuit7.ccx(qreg7[2],qreg7[3],qreg7[4])
    mycircuit7.ccx(qreg7[1],qreg7[0],qreg7[3])
    
    for i in range(3):
        mycircuit7.x(qreg7[i])
        mycircuit7.h(qreg7[i])
    
    mycircuit7.h(qreg7[4])
    mycircuit7.x(qreg7[4])
    
    job = execute(mycircuit7,Aer.get_backend('unitary_simulator'))
    u=job.result().get_unitary(mycircuit7,decimals=3)
    for i in range(len(u)):
        s=""
        for j in range(len(u)):
            val = str(u[i][j].real)
            while(len(val)<5): val  = " "+val
            s = s + val
        print(s)0.75-0.25-0.25-0.25-0.25-0.25-0.25-0.25  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0
    -0.25 0.75-0.25-0.25-0.25-0.25-0.25-0.25  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0
    -0.25-0.25 0.75-0.25-0.25-0.25-0.25-0.25  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0
    -0.25-0.25-0.25 0.75-0.25-0.25-0.25-0.25  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0
    -0.25-0.25-0.25-0.25 0.75-0.25-0.25-0.25  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0
    -0.25-0.25-0.25-0.25-0.25 0.75-0.25-0.25  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0
    -0.25-0.25-0.25-0.25-0.25-0.25 0.7[...]Testing Grover on 2 qubits.qreg8 =  QuantumRegister(3)
    creg8 = ClassicalRegister(3)
    
    # define our quantum circuit
    mycircuit8 = QuantumCircuit(qreg8,creg8)
    
    def diffusion():
        mycircuit8.x(qreg8[2])
        mycircuit8.h(qreg8[2])
        
        mycircuit8.h(qreg8[1])
        mycircuit8.h(qreg8[0])
        mycircuit8.x(qreg8[1])
        mycircuit8.x(qreg8[0])
    
        mycircuit8.ccx(qreg8[1],qreg8[0],qreg8[2])
    
        mycircuit8.x(qreg8[1])
        mycircuit8.x(qreg8[0])
        mycircuit8.h(qreg8[1])
        mycircuit8.h(qreg8[0])
    
        mycircuit8.h(qreg8[2])
        mycircuit8.x(qreg8[2])
        
    def oracle(number):
        if(number%4 < 2):
            mycircuit8.x(qreg8[1])
        if(number%2 == 0):
            controlled_anti_z(mycircuit8,qreg8[1],qreg8[0])
        else:
            controlled_z(mycircuit8,qreg8[1],qreg8[0])
        if(number%4 < 2):
            mycircuit8.x(qreg8[1])
    
    #Grover itself
    for i in range(2):
        mycircuit8.h(qreg8[i])
    mycircuit8.barrier()
    oracle(0)
    mycircuit8.barrier()
    diffusion()
    
    mycircuit8.measure(qreg8,creg8)
    
    job = execute(mycircuit8,Aer.get_backend('qasm_simulator'),shots=10000)
    counts1 = job.result().get_counts(mycircuit8)
    print(counts1) # print the outcomes
    
    mycircuit8.draw(output='mpl'){'000': 10000}Testing Grover on 3 qubits.qreg9 =  QuantumRegister(5)
    creg9 = ClassicalRegister(5)
    
    # define our quantum circuit
    mycircuit9 = QuantumCircuit(qreg9,creg9)
    
    def oracle(number):
        if(number%4 < 2):
            mycircuit9.x(qreg9[1])
        if(number%8 < 4):
            mycircuit9.x(qreg9[2])
        mycircuit9.ccx(qreg9[2],qreg9[1],qreg9[4])
        if(number%2 == 0):
            controlled_anti_z(mycircuit9,qreg9[4],qreg9[0])
        else:
            controlled_z(mycircuit9,qreg9[4],qreg9[0])
        mycircuit9.ccx(qreg9[2],qreg9[1],qreg9[4])
        if(number%8 < 4):
            mycircuit9.x(qreg9[2])
        if(number%4 < 2):
            mycircuit9.x(qreg9[1])
    
    def diffusion():
        mycircuit9.x(qreg9[4])
        mycircuit9.h(qreg9[4])
    
        for i in range(3):
            mycircuit9.h(qreg9[i])
            mycircuit9.x(qreg9[i])
    
        mycircuit9.ccx(qreg9[1],qreg9[0],qreg9[3])
        mycircuit9.ccx(qreg9[2],qreg9[3],qreg9[4])
        mycircuit9.ccx(qreg9[1],qreg9[0],qreg9[3])
    
        for i in range(3):
            mycircuit9.x(qreg9[i])
            mycircuit9.h(qreg9[i])
    
        mycircuit9.h(qreg9[4])
        mycircuit9.x(qreg9[4])
            
    #Grover itself
    for i in range(3):
        mycircuit9.h(qreg9[i])
    mycircuit9.barrier()
    #Try 1,2,6,12 iterations of Grover
    for i in range(6):
        oracle(0)
        #oracle(1)
        mycircuit9.barrier()
        diffusion()
        mycircuit9.barrier()
    
    mycircuit9.measure(qreg9,creg9)
    
    job = execute(mycircuit9,Aer.get_backend('qasm_simulator'),shots=10000)
    counts1 = job.result().get_counts(mycircuit9)
    print(counts1) # print the outcomes
    
    #mycircuit9.draw(output='mpl'){'00000': 9999, '00001': 1}Clasificación del conjunto artificial de datos de las dos lunas * *15 min* | Última modificación: , 2019. El siguiente es un conjunto artificial de datos que tiene una frontera de decisión compleja.import pandas as pd
    import numpy as np
    import matplotlib.pyplot as plt
    %matplotlib inline
    ##
    ## Generación de los datos
    ##
    from sklearn.datasets import make_moons
    
    NPOINTS = 140
    
    X, y = make_moons(
        n_samples=NPOINTS, 
        shuffle=False, 
        noise=0.1, 
        random_state=12345)
    ##
    ## Graficación
    ##
    plt.scatter(X[:70,0], X[:70,1], color='red')
    plt.scatter(X[70:,0], X[70:,1], color='blue')
    plt.gca().set_aspect('equal', adjustable='box')Define a function for which we'd like to find the rootsdef function_for_roots(x):
        a = 1.1
        b = -3.04
        c = 2.07
        return a*x**2 + b*x + c #get the roots of ax^2 + bx + cWe need a function to check whether our initial values are validdef check_initial_values(f,x_min,x_max,tol):
        
        #check our initial guesses
        y_min = f(x_min)
        y_max = f(x_max)
        
        #check that x_min and x_max contain a zero crossing
        if(y_min*y_max>=0.0):
            print("No zero crossing found in the range = ",x_min,x_max)
            # CONTINUE WRITING PROGRAM HERESchema Alignment Exampleimport keras
    import urllib2
    
    import pandas as pd
    
    from hashlib import md5
    from pprint import pprint
    from bs4 import BeautifulSoup
    from sklearn.cluster import DBSCAN
    
    import sys
    sys.path.append('/Users/BenJohnson/projects/what-is-this/wit/')
    from wit import *Using Theano backend.TaskWe attempt to approach the problem of "schema alignment": given two datasets that contain roughly the same kinds of data, merge them into a single dataset such that columns are appropriately aligned.  This is a generic problem that comes up when we're trying to merge data, but in this example we explore it's application to data attained via web scraping. Specifically, we'll try to do this with data scraped from message board forums, which contains data types such as    username    post body    signature    date    ...So the task is to align the username field in the first dataset with the username field in the second dataset, etc.  The usual approach is to write/configure a parser for each schema, which includes a manual mapping of data fields from the HTML page to  DataWe'll use data scraped by  from     remingtonsociety.com    marauderairrifle.comA post in the first set looks like:```html                    Noel                         Joined: Tue Jun 26, 2007 8:48 pm                  Posts:  112                                                                                             Remington 12A wanted                            Looking for a 12A with condition. Bore must be very good plus, external at 80% or better please.                            ```A post in the second set looks like:```html                                                                    Re: Is this what we need for 22 M-rod accuracy?                                                                                            by                                                             RayK                                                     » Tue May 28, 2013 9:18 pm                                         Thanks for the new photos!  Those are great and clearly show the idea.Mine currently look just like that... Ray                                        25 W-rod | 8 shots @ 70 FPE -2% | 3100 fill25 M-rod                                                        RayK                                    Posts: 5311            Joined: Sat Jun 05, 2010 3:19 pm                                            Top                                ```They have somewhat similar structure, and actually look like maybe they were generated by a different configuration of the same forum building software.  However, they're sufficiently different that they cannot be parsed with the same CSS selectors or regular expressions. ApproachThere are a few stages to this problem:    a) Finding 'atomic data elements' in the raw HTML page.          - i.e. separating the posts on a page that has multiple posts        b) Finding the 'data fields' within an atomic data element        - i.e. finding the locations in the DOM that contain data points we're interested in capturing.        c) Merging 'data fields' across schemaa) and b) are difficult in their own right, and we implement simple, non-optimal solutions.  For a), we totally punt and just do it manually.  For b):    - find all nodes that contain text in the corpus of posts    - find CSS paths of the format            tag1[class1, ...] > tag2[class2, ...] > ... > tag_n[class_m, ...]          to each of these nodes.  From here on, when we say "CSS path", we mean CSS path w/ classes, w/o indices.    - assume data in nodes with the same CSS path are "equivalent".  The last step is an assumption that could be wrong in both directions: data elements with the same CSS path could be different latent variables, and data elements with different CSS paths could be the same latent variables.  These issues could be mitigated by applying more advanced wrapper induction techniques, but they do not turn out to be too damaging to the current approach.  In fact, the process of merging two schemas also merges classes within the same schema (for better or for worse). Set parametersnum_features = 75  # in characters
    max_len      = 350 # in characters
    formatter    = KerasFormatter(num_features, max_len)Load data from two forums with different schemas.```origin : domain that record comes fromhash   : md5 hash of the path to the elements in the DOM (including classes)id     : record number within origin + md5 hash of origin fieldobj    : text content of DOM nodesrc    : corresponding raw HTML markup```df = pd.read_csv('data/simple-forum-dataset.csv')
    df.tail()Make training set, which will consist of triplets of points    (anchor, positive, negative)        where     - anchor and positive are of the same class    - anchor and negative are of different classes    We'll minimize the loss function    max(0, distance(anchor, positive) - distance(anchor, negative) + margin)In this case, we use    distance(x, y) = 1 - cosine_similarity(x, y)but     distance(x, y) = euclidean_distance(x, y)    also seems to work in general.train     = make_triplet_train(df, N = 600)
    trn, levs = formatter.format(train, ['obj'], 'hash')  + 459fb-1706c
      + 520fa-1706c
      + 9ead9-1706c
      + 34883-1706c
      + 2e98d-1706c
      + 2bb6e-1706c
      + bf07b-1706c
      + 8fbdb-1706c
      + a1c8c-48774
      + e5316-48774
      + 3122c-48774
      + df7d2-48774
      + 5fd24-48774
      + 58faa-48774
      + 0dba3-48774
      + 9acc3-48774
      + 6d664-48774
      + 20a50-48774
      + b9976-48774Define and train the model.  This should take several minutes on the current dataset.classifier = TripletClassifier(trn, levs)
    classifier.fit(batch_size = 250, nb_epoch = 3)Create set of all unique records (without duplicates), then project them using the model.  The projection should take tens of seconds.unq = df.copy()
    del unq['id']
    unq = unq.drop_duplicates()
    
    awl, _ = formatter.format(unq, ['obj'], 'hash')
    preds  = classifer.predict(awl['x'][0], verbose = True)14314/14314 [==============================] - 13sCluster the projections using DBSCAN, then interpret the cluster to be the aligned datatypes.  Though the string similarity was performed on just the HTML text content, we show the classes with HTML markup so we can easily verify the cluster assignments.  Notice that some of the clusters include hashes from both schemas, while others only include hashes from a single schema -- this is desired behavior, as there is not a 1-to-1 mapping between the two schemas.db = DBSCAN(eps = .1, min_samples = 50).fit(preds)
    
    # Observation cluster assignments by hash
    res         = unq.hash.groupby(db.labels_).apply(lambda x: x.value_counts()).reset_index()
    res.columns = ('cluster', 'hash', 'cnt')
    
    # Discard small clusters, or points that were assigned to None cluster
    good_res = res[(res.cnt > 100) & (res.cluster > -1)]
    eqv = list(good_res.groupby('cluster').hash.apply(lambda x: list(x)))
    eqv = map(eval, np.unique(map(str, eqv)))
    
    print '\n-----------------------'
    print 'clusters, without markup'
    print_eqv(eqv, df, path = 'obj')
    
    print '\n--------------------------------------'
    print 'clusters, with markup for verification'
    print_eqv(eqv, df, path = 'src')height has been deprecated.
    
    
    -----------------------
    clusters, without markup
    
     --- 
    
    ['3122c-48774', '9acc3-48774', '34883-1706c', '8fbdb-1706c']
    
    
    3122c-48774	(1428 rows)
    10253    ShawnHu wrote:How about a hammer tapping servi...
    14293    I am totally against this, and I think you are...
    21204    My bolt broken last night. I wanted to know if...
    17490    This line of scopes have alot of potential and...
    18197    HI guys, this morning I decided to try and mak...
    Name: obj, dtype: object
    9acc3-48774	(1209 rows)
    13566    The trigger screws do not affect the hammer tr...
    17055    thanks for the response man I have been using ...
    7474     All my misses are @ close range. So I guess I ...
    6539     bstaley wrote:Got a chance to try the heat shr...
    15720            Shawnhu,PM just sent with my contact info
    Name: obj, dtype: object
    34883-1706c	(975 rows)
    838     i have one too without markings on barrel . th...
    5784    Hello, looks like a nice board to whi[...]E2E Modelmodel_path = '../logs/e2e_opt:adam_all/'
    predictor = Predictor(model_path, E2ELSTM)
    train_cnfm, y, y_hat = conf_mat(train, predictor)
    dev_cnfm, y_dev, y_hat_dev = conf_mat(dev, predictor)
    print accuracy_score(y, y_hat)
    print accuracy_score(y_dev, y_hat_dev)
    print accuracy_score(y, y_hat)
    print accuracy_score(y_dev, y_hat_dev)
    y_hat = np.array(y_hat)
    y = np.array(y)
    
    y_hat_dev = np.array(y_hat_dev)
    y_dev = np.array(y_dev)
    print 'groups detection train\t', float(((y < 6) == (y_hat < 6)).sum()) / len(y)
    print 'groups detection dev\t', float(((y_dev < 6) == (y_hat_dev < 6)).sum()) / len(y_dev)
    print 'groups detection train\t', float(((y < 6) == (y_hat < 6)).sum()) / len(y)
    print 'groups detection dev\t', float(((y_dev < 6) == (y_hat_dev < 6)).sum()) / len(y_dev)
    Counter(y_hat)
    Counter(y_hat_dev)
    plot_confusion_matrix(train_cnfm, labels)
    plot_confusion_matrix(dev_cnfm, labels)Discriminate firstmodel_path = '../logs/dist_opt:mom_3/'
    predictor = Predictor(model_path, DistLSTM)
    train_cnfm, y, y_hat = conf_mat(train, predictor)
    dev_cnfm, y_dev, y_hat_dev = conf_mat(dev, predictor)
    print accuracy_score(y, y_hat)
    print accuracy_score(y_dev, y_hat_dev)
    y_hat = np.array(y_hat)
    y = np.array(y)
    
    y_hat_dev = np.array(y_hat_dev)
    y_dev = np.array(y_dev)
    print 'groups detection train\t', float(((y < 6) == (y_hat < 6)).sum()) / len(y)
    print 'groups detection dev\t', float(((y_dev < 6) == (y_hat_dev < 6)).sum()) / len(y_dev)
    float(((y_dev < 6) == (y_hat_dev < 6)).sum()) / len(y_dev)
    plot_confusion_matrix(train_cnfm, labels)
    plot_confusion_matrix(dev_cnfm, labels)import torch
    import torch.nn as nn
    import torchvision
    import numpy as np
    import torch.optim as optim
    import matplotlib.pyplot as plt
    import time
    !git clone https://github.com/Shakil-1501/TSAI.git
    from TSAI.S11 import CustomResNet
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")
    print("Device : ",device)
    model = CustomResNet.CustomResNet().to(device)
    print("Model Loaded Successfully ")
    from TSAI.S11 import albumentationstransform
    albumentationstransform_train_transforms = albumentationstransform.train_transforms()
    from TSAI.S11 import dataloader
    trainloader , testloader = dataloader.datasetloader(albumentationstransform_train_transforms , batchsize = 512 , numwork = 4 )
    classes = ('plane', 'car', 'bird', 'cat','deer', 'dog', 'frog', 'horse', 'ship', 'truck')
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr = 0.00001 , momentum = 0.9 )
    from TSAI.S11 import LRScheduler
    lrfinder = LRScheduler.LRFinder(model, optimizer , criterion , device = "cuda")
    lrfinder.range_test(trainloader , end_lr =  0.1 , num_iter = 100 , step_mode = "linear" )
    lrfinder.plot()
    lrfinder.reset()
    from TSAI.S11 import traindataset
    print("Devive : ", device)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr = 0.003 , momentum = 0.95 , weight_decay = 0.01 )
    scheduler = torch.optim.lr_scheduler.OneCycleLR( optimizer , max_lr = 0.01 , div_factor = 10 , pct_start  = 5/24 ,
                                                     cycle_momentum = True , epochs = 24 , steps_per_epoch = len(trainloader),
                                                     final_div_factor = 1 , anneal_strategy = 'linear' )
    
    start_time = time.time()
    traindataset.traindataset(range_ = 24 , model = model , device = device , 
                              trainloader = trainloader , optimizer = optimizer  , criterion_ = criterion ,
                              batchsize = 512 , scheduler_ = scheduler )
    end_time = time.time()
    print("Training Time : ",end_time-start_time)
    from TSAI.S11 import testdataset
    testdataset.testdataset(model = model , device = device , testloader = testloader )/content/TSAI/S11/CustomResNet.py:57: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.
      return F.log_softmax(x)1. Obtain course titlescourse_blocks = page_soup.findAll("ul", {"class": "program-set"})
    len(course_blocks)
    course_blocks_eng = course_blocks[:-2]
    len(course_blocks_eng)
    #each container contains code, name, and desc for a course
    containers = [container for course_block in course_blocks_eng for container in course_block.findAll("li", {"class": "program-course"})]
    len(containers)
    import re
    course_titles = [re.split(" \([0-9]+ credit[s]*\)", container.a.text.strip())[0] for container in containers]
    len(course_titles)
    course_titles
    course_titles = [title.replace("\r", "") for title in course_titles]
    course_titles2. Obtain course codes and course names from course titlescourse_codes = [" ".join(title.split()[:2]) for title in course_titles]
    course_codes
    course_names = [" ".join(title.split()[2:]) for title in course_titles]
    course_names3. Obtain course descriptionscourse_descs = [container.find("div", {"class": "content"}).p.text.strip() for container in containers]
    len(course_descs)
    course_descs4. Write to CSVimport pandas as pd
    
    df = pd.DataFrame({
        
        "Course Number": course_codes,
        "Course Name": course_names,
        "Course Description": course_descs    
        
    })
    
    df
    df.to_csv('McGill_MechEng_Core_and_Electives_Courses.csv', index = False)Question 1  (20 points) Create a 4 x 4 matrix whose diagonal elements are all one (1's). Name it as matrix "C". Show your solutions using Python codes and do not forget to label them on the Text Cell.import numpy as np 
    #Matrix c
    a = np.array([1,1,1,1])
    b = np.diag(a)
    print(b)[[1 0 0 0]
     [0 1 0 0]
     [0 0 1 0]
     [0 0 0 1]]Question 2. (20 points) In relation to Question 1, show a solution that doubles all the values of each element. Show your solutions using Python codes and do not forget to label them on the Text Cell.a = np.array([1,1,1,1])
    b = np.diag(a)
    print(b*2)[[2 0 0 0]
     [0 2 0 0]
     [0 0 2 0]
     [0 0 0 2]]Question 3. (10 points) Find the cross-product of matrices, A = [2,7,4] andB = [3,9,8]. Show your solutions using Python codes and do not forget to label them on the Text Cell.a = np.array ([2,7,4])
    b = np.array ([3,9,8])
    output = np.cross(a,b)
    print(output)[20 -4 -3]List
    workshop = ["Moringa", "School", "Ngong-Lane", "Learning"]
    print(workshop)
    workshop[0]
    workshop[0:2]
    workshop.append('People')
    print(workshop)
    workshop.remove('Moringa')
    print(workshop)
    
    
    age = (100, 200, 300)
    type(age)Dict: Has keys and valuescontact = {"name":"Elly",
               "phone":"0700501255",
               "email":""}
    
    print(contact){'name': 'Elly', 'phone': '0700501255', 'email': ''}Using the "get" functioncontact.get('email')Using the del function in dict(cant be used in List) syntax is del followed by var name and then the key or valueprint(contact){'name': 'Elly', 'phone': '0700501255', 'email': ''}Set: eliminating duplicatenum = [12,14, 14, 15, 16, 17, 22]
    new_num = set(num)
    print(new_num)Create a dict with keys:  1 -5          and values:^2 of the keyspower_dict ={"1":1,"2": 4, 
                 "3": 9,
                 "4": 16,
                 "5": 25}
    print(power_dict)
    power_dict["5"] = 5**5
    print(power_dict){'1': 1, '2': 4, '3': 9, '4': 16, '5': 25}
    {'1': 1, '2': 4, '3': 9, '4': 16, '5': 3125}Modules: Pre-written code that are imported to perfom certain functionsTo acces any function in a module, yiu call the module plus the function to performimport math
    number = 100
    sqr = math.sqrt(100)
    #print(sqr)
    squares = {1:(int(math.pow(1,2))),2:(int(math.pow(2,2))),3:(int(math.pow(3,2))),4:(int(math.pow(4,2))),5:(int(math.pow(5,2)))}
    print(squares){1: 1, 2: 4, 3: 9, 4: 16, 5: 25}#### 1)Mean/Median/Mode Imputation
    #It is easy to implement and fastest way to obtain a complete dataset
    #But, it imapcts the correlation and also distortion is in the original variance
    import pandas as pd
    from google.colab import files
    uploaded = files.upload()
    df=pd.read_csv("titanic.csv", usecols=['Age', 'Fare', 'Survived'])
    df.head()
    df.isnull().mean()
    def impute_nan(df,variable,median):
        df[variable+"_median"]=df[variable].fillna(median)
    median = df.Age.median()
    median
    impute_nan(df, 'Age', median)
    df.head()
    print(df['Age'].std())
    print(df['Age_median'].std())
    import matplotlib.pyplot as plt
    %matplotlib inline
    fig = plt.figure()
    ax = fig.add_subplot(111)
    df['Age'].plot(kind='kde', ax=ax)
    df.Age_median.plot(kind='kde', ax=ax, color='red')
    lines, labels = ax.get_legend_handles_labels()
    ax.legend(lines, labels, loc='best');
    #### 2)Random Sample Imputation
    #It consist of random observation from dataset and use the random observation to replace the nan value.
    #There is less distortion in variance in Random Sample imputation but it cannot be used everywork.
    #It assumes that data is missing completely at random.
    import pandas as pd
    df=pd.read_csv('titanic.csv', usecols=['Age','Fare','Survived'])
    df.head()
    df.isnull().mean()
    df.isnull().sum()
    df['Age'].dropna().sample(df['Age'].isnull().sum(), random_state=0)
    def impute_nan(df,variable,median):
        df[variable+"_median"]=df[variable].fillna(median)
        df[variable+"random"]=df[variable]
        random_sample = df[variable].dropna().sample(df[variable].isnull().sum(), random_state=0)
        random_sample.index=df[df[variable].isnull()].index
        df.loc[df[variable].isnull(),variable+'_random']=random_sample
    median = df.Age.median()
    median
    impute_nan(df,"Age",median)
    df.head()
    import matplotlib.pyplot as plt
    %matplotlib inline
    fig = plt.figure()
    ax = fig.add_subplot(111)
    df['Age'].plot(kind='kde', ax=ax)
    df.Age_median.plot(kind='kde', ax=ax, color='red')
    df.Age_random.plot(kind='kde', ax=ax, color='green')
    lines, labels = ax.get_legend_handles_labels()
    ax.legend(lines, labels, loc='best')
    #### 3)Capturing NAN values with a new feature
    #Works when the data is not missing completely at random.
    #creates addition features which can leads to curse of dimentionality
    df=pd.read_csv('titanic.csv', usecols=['Age','Fare','Survived'])
    df.head()
    import numpy as np
    df['Age_NAN']=np.where(df['Age'].isnull(),1,0)
    df.head()
    df.Age.median()
    df['Age'].fillna(df.Age.median(),inplace=True)
    #### 4)End of Distribution Imputation
    #MCAR
    df=pd.read_csv('titanic.csv', usecols=['Age','Fare','Survived'])
    df.head()
    df.Age.hist(bins=50);
    extreme=df.Age.mean()+3*df.Age.std()
    import seaborn as sns
    sns.boxplot('Age',data=df);
    def impute_nan(df,variable,median,extreme):
        df[variable+"_end_distribution"]=df[variable].fillna(extreme)
        df[variable].fillna(median,inplace=True)
    impute_nan(df,'Age',df.Age.median(),extreme)
    df.head()
    df['Age'].hist(bins=50);
    df['Age_end_distribution'].hist(bins=50);
    sns.boxplot('Age_end_distribution',data=df);
    #### 5)Arbitrary Value Imputation 
     #It is use to find out the important of missing value.Rarely use.
    import pandas as pd
    df=pd.read_csv("titanic.csv", usecols=["Age","Fare","Survived"])
    df.head()
    def impute_nan(df,variable):
        df[variable+'_zero']=df[variable].fillna(0)
        df[variable+'_hundred']=df[variable].fillna(100)
    df['Age'].hist(bins=50);*Analytical Information Systems* Worksheet 5 - Big Data and Streaminghl für Wirtschaftsinformatik und InformationsmanagementSS 2020  MapReduce [MapReduce](https://en.wikipedia.org/wiki/MapReduce) is a programming model and an associated implementation for processing and generating big data sets with a parallel, distributed algorithm on a cluster.Let's have a look at the word count example from the lecture again 1. __Input__1. __Splitting__: Prepare the Map() input1. __Mapping__: Run the user-provided Map() code. Each worker node applies the map function to the local data, and writes the output to a temporary storage.1. __Shuffling__: "Shuffle" the Map output to the Reduce processors. 1. __Reduce__: Run the user-provided Reduce() code. The Reduce processors process each group of output data, per key, in parallel.1. __Final result__: Produce the final output – the MapReduce system collects and sorts all the Reduce output  MapReduce LibrariesMapReduce libraries have been written in many programming languages, with different levels of optimization. - A popular open-source implementation that has support for distributed shuffles is part of Apache Hadoop.- [RHadoop](https://github.com/RevolutionAnalytics/RHadoop/wiki) is a collection of five R packages that allow users to manage and analyze data with Apache Hadoop.     - using RHadoop requires a Java and Hadoop installation, the Hadoop Distributed File System, etc.Thus, we will only examplify the MapReduce algorithm using basic R and the `tidyverse`:  Examplary R MapReduce Word Count Implementation __Defining the map function__ The map function breaks the line into words and outputs a key/value pair for each word.library(tidyverse)
    count_words <- function(line){
        line %>%
                str_split(" ",simplify=FALSE) %>%
                unlist() %>%
                tibble(key=., value=1)
    }__Defining the reduce function__ In the word count example, the Reduce function sums the word counts and generates a single output of the word and the final sum.reduce_count <- function(df){
        df %>%
            summarise(key=key[1],
                      count=sum(value))
    }__Going through the MapReduce steps__ __1. Input__Input =  "\n\n"
    Input__2. Splitting__We will split the input by line ('\n' indicates a new line)Input %>%
        str_split("\n",simplify=FALSE) %>% unlist2. MappingInput %>%
        str_split("\n",simplify=FALSE) %>% unlist %>%
        map(count_words)4. ShufflingInput %>%
        str_split("\n",simplify=FALSE) %>% unlist %>%
        map(count_words) %>% 
        map_df(rbind) %>% group_split(key)5. ReducingInput %>%
        str_split("\n",simplify=FALSE) %>% unlist %>%
        map(count_words) %>% 
        map_df(rbind) %>% group_split(key) %>%
        map(reduce_count)5. Merge and sortInput %>%
        str_split("\n",simplify=FALSE) %>% unlist %>%
        map(count_words) %>% 
        map_df(rbind) %>% group_split(key) %>%
        map(reduce_count) %>%
        map_df(cbind) %>% arrange(desc(count))__Doing it the undistributed tidyverse way__Input %>%
        str_replace_all("\n", " ") %>%
        str_split(" ",simplify=FALSE) %>% unlist %>% 
        tibble(key=.) %>%
        group_by(key) %>%
        summarize(count=n()) %>%  arrange(desc(count))Stream Processing __Credits__- , Stanford University, http://web.stanford.edu/class/cs246/slides/15-streams1.pdf- , Princeton University, https://www.cs.princeton.edu/courses/archive/fall16/cos418/docs/L22-stream-processing.pdf  Data Streams - In many data mining situations, we do not know the entire data set in advance- We can think of the data as infinite and non-stationary (the distribution changes over time)- Stream Management is important when the input rate is controlled externally:    - Google queries    - Twitter or Facebook status updates __The Stream Model__ - Input elements enter at a rapid rate, at one or more input ports (i.e., streams)    - We call elements of the stream tuples- The system cannot store the entire stream accessibly    How do you make critical calculations about the stream using a limited amount of (secondary) memory?  Basic Stream Operators __Stateless conversion__ - Convert Celsius temperature to Fahrenheit: __emit__ (input * 9 / 5) + 3 __Stateless filtering__Function can filter inputs: –if(input>threshold) {__emit__ input} __Stateful conversion__Compute EWMA of Fahrenheit temperature:- new_temp = ⍺ * ( CtoF(input) ) + (1- ⍺) * last_temp- last_temp = new_temp – emit new_temp- emit new_temp __Aggregation (stateful)__E.g.,Average value per window- Window can be  elements (10) or time (1s)- Windows can be disjoint (every 5s)- Windows can be “tumbling” (5s window every 1s) __Stream processing as chain__ __Stream processing as directed graph__   The challenge of stream processing for BIG DATALarge amounts of data to process in realtime__Examples__:- Social network trends (trending)- Intrusion detection systems (networks, datacenters)- Sensors: Detect earthquakes by correlating vibrations of millions of smartphones- Fraud detection    - Visa: 2000 txn / sec on average, peak ~47,000 / sec __Stateless operations: trivially parallelized__ __State complicates parallelization__- Need to join results across parallel computations __Parallelization complicates fault-tolerance__ __We can parallelize joins__- using partitioned hash joins- but agian, complicates fault-tolerance  Stream Processing frameworksDifferent frameworks handle these challenges differently- Record acknowledgement (Storm)- Micro-batches (Spark Streaming, Storm Trident) - Transactional updates (GoogleClouddataflow) - Distributed snapshots (Flink)  Streaming data with R __The `sparklyr` interface for Spark Streaming__from the official [Website](https://spark.rstudio.com/guides/streaming/):Spark Streaming makes it easy to build scalable fault-tolerant streaming applications. Because is part of the Spark API, it is possible to re-use query code that queries the current state of the stream, as well as joining the streaming data with historical data. Please see Spark’s official documentation for a deeper look into Spark Streaming.The sparklyr interface provides the following:- Ability to run dplyr, SQL, spark_apply(), and PipelineModels against a stream- Read in multiple formats: CSV, text, JSON, parquet, Kafka, JDBC, and orc- Write stream results to Spark memory and the following file formats: CSV, text, JSON, parquet, Kafka, JDBC, and orc- An out-of-the box graph visualization to monitor the stream- A new reactiveSpark() function, that allows Shiny apps to poll the contents of the stream create Shiny apps that are able to read the contents of the stream  Interacting with a streamA good way of looking at the way how Spark streams update is as a three stage operation:1. __Input__ - Spark reads the data inside a given folder. The folder is expected to contain multiple data files, with new files being created containing the most current stream data.1. __Processing__ - Spark applies the desired operations on top of the data. These operations could be data manipulations (dplyr, SQL), data transformations (sdf operations, PipelineModel predictions), or native R manipulations (spark_apply()).1. __Output__ - The results of processing the input files are saved in a different folder.  `sparklyr` Example __Install requirements__This can take a few minutes...system("apt-get install openjdk-8-jdk-headless -qq > /dev/null")
    Sys.setenv(JAVA_HOME = "/usr/lib/jvm/java-8-openjdk-amd64")
    install.packages(c("sparklyr", "future"))
    library(sparklyr)
    spark_install()
    library(future)
    library(tidyverse)1. Open the Spark connectionsc <- spark_connect(master = "local")Optional step. This resets the input and output folders. It makes it easier to run the code multiple times in a clean manner.if(file.exists("source")) unlink("source", TRUE)
    if(file.exists("source-out")) unlink("source-out", TRUE)2. Produce a single test file inside the “source” folder. This allows the “read” function to infer CSV file definition.stream_generate_test(iterations = 1)
    list.files("source")3. Point the stream reader to the folder where the streaming files will be placed.read_folder <- stream_read_csv(sc, "source")4. Process stream function: The processing starts with the read_folder variable that contains the input stream. It coerces the integer field x, into a type double. This is because the next function, ft_binarizer() does not accept integers. The binarizer determines if x is over 400 or not. This is a good illustration of how dplyr can help simplify the manipulation needed during the processing stage.process_stream <- read_folder %>%
      mutate(x = as.double(x)) %>%
      ft_binarizer(
        input_col = "x",
        output_col = "over",
        threshold = 400
      )4. The output writer is what starts the streaming job. It will start monitoring the input folder, and then write the new results in the “source-out” folder.write_output <- stream_write_csv(process_stream, "source-out")5. The test generation function will run 100 files every 0.2 seconds. To run the tests “out-of-sync” with the current R session, the future package is used.invisible(future(stream_generate_test(interval = 0.2, iterations = 100)))6. The “source-out” folder can be treated as a if it was a single table within Spark. Using spark_read_csv(), the data can be mapped, but not brought into memory (memory = FALSE). This allows the current results to be further analyzed using regular dplyr commands.spark_read_csv(sc, "stream", "source-out", memory = FALSE)Exercises  1. MapReduce __Sales analysis__You need to run a company-wide sales analysis. Your company uses a MapReduce system to handle the massive transaction data.We will have a look at the data first:sales <- read_csv('https://raw.githubusercontent.com/wi3jmu/AIS2020/master/notebooks/data/sales.csv')
    sales %>% head(10)Parsed with column specification:
    cols(
      date = col_date(format = ""),
      customerID = col_double(),
      productID = col_double(),
      payment = col_character(),
      amount = col_double(),
      price = col_double(),
      cost = col_double(),
      category = col_character()
    )Define the corresponding Map and Reduce functions:__Map__: Calculates the total profit for each product id within each subsetcalculate_profit <- function(df){
        df %>%
            # Write your code here 
    }__Reduce__: Adds up the profit for each different product idreduce_profit <- function(df){
        df %>%
            # Write your code here 
    }
    sales %>% #Input
        split(sample(rep(1:5, 1000))) %>% #Splitting
        map(calculate_profit) %>% #Mapping
        map_df(rbind) %>% group_split(productID)%>% #Shuffling
        map(reduce_profit) %>% #Reduce
        map_df(cbind) %>% arrange(desc(total_profit)) %>%  #Merge and Sort
        head(10) #Display only top 102. Stream Processing 1. Why do stateful operation complicate parallelization?# Write your answer here2. Why do parallelization operations complicate fault-tolerance?# Write your answer here![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png)[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/nlu/blob/master/examples/colab/visualization/NLU_visualizations_tutorial.ipynb) With NLU and [Spark-NLP-Display](https://github.com/JohnSnowLabs/spark-nlp-display) you can visualize the outputs of various NLP models visualization Available vizualizations- ner- dep- resolution- relation- assertion  Install NLU!wget https://setup.johnsnowlabs.com/nlu/colab.sh -O - | bash
    import nlu--2022-04-15 03:40:22--  https://setup.johnsnowlabs.com/nlu/colab.sh
    Resolving setup.johnsnowlabs.com (setup.johnsnowlabs.com)... 192.168.127.12
    Connecting to setup.johnsnowlabs.com (setup.johnsnowlabs.com)|192.168.127.12|:443... connected.
    HTTP request sent, awaiting response... 302 Moved Temporarily
    Location: https://raw.githubusercontent.com/JohnSnowLabs/nlu/master/scripts/colab_setup.sh [following]
    --2022-04-15 03:40:23--  https://raw.githubusercontent.com/JohnSnowLabs/nlu/master/scripts/colab_setup.sh
    Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 172.16.31.10, 192.168.127.12, 172.16.31.10, ...
    Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|172.16.31.10|:443... connected.
    HTTP request sent, awaiting response... 200 OK
    Length: 1665 (1.6K) [text/plain]
    Saving to: ‘STDOUT’
    
    -                   100%[===================>]   1.63K  --.-KB/s    in 0s      
    
    2022-04-15 03:40:23 (34.4 MB/s) - written to stdout [1665/1665]
    
    Installing  NLU [...]Visualize Named Entity Recognizer (NER) resultsApplicable to any of the [100+ NER models! See here for an overview](https://nlp.johnsnowlabs.com/models?task=Named+Entity+Recognition)nlu.load('ner').viz(" from America and  from Germany don't share many oppinions.")onto_recognize_entities_sm download started this may take some time.
    Approx size to download 160.1 MB
    [OK!]Visualize Dependency tree Visualizes the structure of the labeled dependency tree and part of speech tagsnlu.load('dep.typed').viz("Billy went to the mall")
    #Bigger Example
    nlu.load('dep.typed').viz(" from America and  from Germany don't share many oppinions but they both love  Labs software")dependency_typed_conllu download started this may take some time.
    Approximate size to download 2.3 MB
    [OK!]
    pos_anc download started this may take some time.
    Approximate size to download 3.9 MB
    [OK!]
    dependency_conllu download started this may take some time.
    Approximate size to download 16.7 MB
    [OK!]Licensed visualizationsThe following cells showcase visualizations with licensed models. Make sure to upload your `spark_nlp_for_healthcare.json` or follow [the instrunctions here to get access to licensed features](https://nlu.johnsnowlabs.com/docs/en/examples_hcautomatically-authorize-google-colab-via-json-file) .      If you do not have any credentials yet, you can [grab some here](https://www.johnsnowlabs.com/spark-nlp-try-free/)  Visualize resolution for entitiesApplicable to any of the [100+ Resolver models! See here for an overview](https://nlp.johnsnowlabs.com/models?task=Entity+Resolution)nlu.load('med_ner.jsl.wip.clinical resolve_chunk.rxnorm.in').viz("He took 2 pills of Aspirin daily")
    # bigger example
    data = "This is an 82 - year-old male with a history of prior tobacco use , hypertension , chronic renal insufficiency , COPD , gastritis , and TIA who initially presented to Braintree with a non-ST elevation MI and Guaiac positive stools , transferred to St . Margaret\'s Center for Women & Infants for cardiac catheterization with PTCA to mid LAD lesion complicated by hypotension and bradycardia requiring Atropine , IV fluids and transient dopamine possibly secondary to vagal reaction , subsequently transferred to CCU for close monitoring , hemodynamically stable at the time of admission to the CCU ."
    nlu.load('med_ner.jsl.wip.clinical resolve_chunk.rxnorm.in').viz(data)ner_wikiner_glove_840B_300 download started this may take some time.
    Approximate size to download 14.8 MB
    [OK!]Visualize resolution sentencesApplicable to any of the [100+ Resolver models! See here for an overview](https://nlp.johnsnowlabs.com/models?task=Entity+Resolution)nlu.load('med_ner.jsl.wip.clinical resolve.icd10cm').viz('She was diagnosed with a respiratory congestion')
    # bigger example
    data = 'The patient is a 5-month-old infant who presented initially on Monday with a cold, cough, and runny nose for 2 days. Mom states she had no fever. Her appetite was good but she was spitting up a lot. She had no difficulty breathing and her cough was described as dry and hacky. At that time, physical exam showed a right TM, which was red. Left TM was okay. She was fairly congested but looked happy and playful. She was started on Amoxil and Aldex and we told to recheck in 2 weeks to recheck her ear. Mom returned to clinic again today because she got much worse overnight. She was having difficulty breathing. She was much more congested and her appetite had decreased significantly today. She also spiked a temperature yesterday of 102.6 and always having trouble sleeping secondary to congestion'
    nlu.load('med_ner.jsl.wip.clinical resolve.icd10cm').viz(data)Visualize assertion status of entitiesApplicable to any of the [10 + Assertion models! See here for an overview](https://nlp.johnsnowlabs.com/models?task=Assertion+Status)nlu.load('med_ner.clinical assert').viz("The MRI scan showed no signs of cancer in the left lung")
    #bigger example
    data ='This is the case of a very pleasant 46-year-old Caucasian female, seen in clinic on 12/11/07 during which time MRI of the left shoulder showed no evidence of rotator cuff tear. She did have a previous MRI of the cervical spine that did show an osteophyte on the left C6-C7 level. Based on this, negative MRI of the shoulder, the patient was recommended to have anterior cervical discectomy with anterior interbody fusion at C6-C7 level. Operation, expected outcome, risks, and benefits were discussed with her. Risks include, but not exclusive of bleeding and infection, bleeding could be soft tissue bleeding, which may compromise airway and may result in return to the operating room emergently for evacuation of said hematoma. There is also the possibility of bleeding into the epidural space, which can compress the spinal cord and result in weakness and numbness of all four extremities as well as impairment of bowel and bladder function. However, the patient may develop deeper-seated infection, which may require return to the operating room. Should the infection be in the area of the spinal instrumentation, this will cause a dilemma since there might be a need to remove the spinal instrumentation and/or allograft. There is also the possibility of potential injury to the esophageus, the trachea, and the carotid artery. There is also the risks of stroke on the right cerebral circulation should an undiagnosed plaque be propelled from the right carotid. She understood all of these risks and agreed to have the procedure performed.'
    nlu.load('med_ner.clinical assert').viz(data)Visualize relationships between entitiesApplicable to any of the [20 + Relation Extractor models! See here for an overview](https://nlp.johnsnowlabs.com/models?task=Relation+Extraction)nlu.load('med_ner.jsl.wip.clinical relation.temporal_events').viz('He developed cancer after a mercury poisoning in 1999 ') 
    # bigger example
    data = 'This is the case of a very pleasant 46-year-old Caucasian female, seen in clinic on 12/11/07 during which time MRI of the left shoulder showed no evidence of rotator cuff tear. She did have a previous MRI of the cervical spine that did show an osteophyte on the left C6-C7 level. Based on this, negative MRI of the shoulder, the patient was recommended to have anterior cervical discectomy with anterior interbody fusion at C6-C7 level. Operation, expected outcome, risks, and benefits were discussed with her. Risks include, but not exclusive of bleeding and infection, bleeding could be soft tissue bleeding, which may compromise airway and may result in return to the operating room emergently for evacuation of said hematoma. There is also the possibility of bleeding into the epidural space, which can compress the spinal cord and result in weakness and numbness of all four extremities as well as impairment of bowel and bladder function. However, the patient may develop deeper-seated infection, which may require return to the operating room. Should the infection be in the area of the spinal instrumentation, this will cause a dilemma since there might be a need to remove the spinal instrumentation and/or allograft. There is also the possibility of potential injury to the esophageus, the trachea, and the carotid artery. There is also the risks of stroke on the right cerebral circulation should an undiagnosed plaque be propelled from the right carotid. She understood all of these risks and agreed to have the procedure performed'
    pipe = nlu.load('med_ner.jsl.wip.clinical relation.clinical').viz(data)Configuring visualizations - `labels_to_viz` : Defines a subset of NER labels to viz i.e. ['PER'] , by default=[] which will display all labels. Applicable only for NER viz- `viz_colors`  : Applicable for [ner, resolution, assert ] key = label, value=hex color, i.e. viz_colors={'TREATMENT':'008080', 'problem':'800080'}data = 'Dr.  suggested that Fritz takes 5mg penicilin for his cough'
    # Define custom colors for labels
    viz_colors={'STRENGTH':'#800080', 'DRUG_BRANDNAME':'#77b5fe', 'GENDER':'#ebde34'}
    nlu.load('med_ner.jsl.wip.clinical').viz(data,viz_colors =viz_colors)
    data = 'Dr.  suggested that Fritz takes 5mg penicilin for his cough'
    # Filter wich NER label to viz
    labels_to_viz=['SYMPTOM']
    nlu.load('med_ner.jsl.wip.clinical').viz(data,viz_colors=viz_colors,labels_to_viz=labels_to_viz)Specify visualization type manuallyNLU tries to automatically infer a viz type if none is specified. You can manually specify which component to viz by setting `viz_type=type` for one type out of `ner,dep,resolution,relation`data = " from America and  from Germany don't share many oppinions, but they both love John Snow Labs software!"
    nlu.load('ner').viz(data,viz_type='ner')Viz Dependencyimport nlu
    data = " from America and  from Germany don't share many oppinions, but they both love John Snow Labs software!"
    viz = nlu.load('dep.typed').viz(data,viz_type='dep')Viz Medical NERimport nlu
    data = " and  from Germany don't share many oppinions, but they both fear cancer!"
    nlu.load('med_ner.jsl.wip.clinical').viz(data,viz_type='ner')Viz Resolution Chunksimport nlu
    nlu_ref = 'med_ner.jsl.wip.clinical en.resolve_chunk.icd10cm.neoplasms'
    data = """The patient is a 5-month-old infant who presented initially on Monday with a cold, cough, and runny nose for 2 days. Mom states she had no fever. Her appetite was good but she was spitting up a lot. She had no difficulty breathing and her cough was described as dry and hacky. At that time, physical exam showed a right TM, which was red. Left TM was okay. She was fairly congested but looked happy and playful. She was started on Amoxil and Aldex and we told to recheck in 2 weeks to recheck her ear. Mom returned to clinic again today because she got much worse overnight. She was having difficulty breathing. She was much more congested and her appetite had decreased significantly today. She also spiked a temperature yesterday of 102.6 and always having trouble sleeping secondary to congestion."""
    pipe = nlu.load(nlu_ref)
    viz = pipe.viz(data,viz_type='resolution')Viz Resolution Sentenceimport nlu 
    data = ["""He has a starvation ketosis but nothing found for significant for dry oral mucosa"""]
    nlu.load('med_ner.jsl.wip.clinical resolve.icd10pcs').viz(data,viz_type='resolution' )Viz Assertnlu_ref = 'med_ner.jsl.wip.clinical assert'
    data = "The patient was tested for cancer, but none was detected, he is free of cancer."
    nlu.load(nlu_ref).viz(data,viz_type='assert')Viz Relationnlu_ref = 'med_ner.jsl.wip.clinical relation.temporal_events'
    data = "He was advised chest X-ray or CT scan after checking his SpO2 which was <= 93%"
    pipe = nlu.load(nlu_ref).viz(data,viz_type='relation')Fine-Tuning DialoGPT3 on your telegram chat Here is a ready-to-run code for fine-tuning a RuDialoGPT3 model using HuggingFace and PyTorch on **your telegram chat**.I used RuDialoGPT-3 trained on forums to fine tune. It was trained by [@Grossmend](https://github.com/Grossmend) on Russian forums. The training process took 12 days using 4x RTX 2080 Ti (2 epochs on 32GB text corpus). The training procedure of GPT-3 for dialogue is described in Grossmend's [blogpost](https://habr.com/ru/company/icl_services/blog/548244/) (in Russian).I have created a simple pipeline and fine tuned that model on my own exported telegram chat (~30mb json). It is in fact very easy to get the data from telegram and fine tune a model. Therefore, I made this notebook! If you want just to try / to talk to my fine-tuned model than go **straight to the Inference section**.  Uploading your data for fine-tuning# installing huggingface datasets and accelerate 
    ! pip install datasets transformers[sentencepiece]
    ! pip install accelerate
    
    # [optional] Login to google drive to save models
    from google.colab import drive
    drive.mount('/content/drive')
    
    # [optional] Login to wandb to track model's behaviour
    '''! pip install wandb
    ! wandb login
    wandb.init(project="fine tune RuDialoGPT2 on KirArChat")'''
    #@title Imports
    import sys
    import re
    import json
    
    from sklearn.model_selection import train_test_split
    from tqdm import tqdm
    
    import torch
    from transformers import TextDataset, DataCollatorForLanguageModeling
    from torch.utils.data import DataLoader
    
    from accelerate import Accelerator
    from transformers import AdamW, AutoModelForSequenceClassification, get_schedulerNext cell downloads model and tokenizer using HuggingFace.You can start with my version or @Grossmend's: "Grossmend/rudialogpt3_medium_based_on_gpt2". Moreover, you can even start with any different DialoGPT trained on your language (with the notation of |x|y|text).from transformers import AutoModelForCausalLM, AutoTokenizer
    
    checkpoint = "Kirili4ik/ruDialoGpt3-medium-finetuned-telegram"   
    tokenizer =  AutoTokenizer.from_pretrained(checkpoint)
    model = AutoModelForCausalLM.from_pretrained(checkpoint)
    #@title Utility functions
    def get_length_param(text: str, tokenizer) -> str:
        """Maps text to 1 of 4 buckets based on length after encoding.
    
        Parameters
        ----------
        text: str
            The text to be given 1 of 4 length parameters.
    
        tokenizer: HuggingFace tokenizer 
            Tokenizer that used to compute the length of the text after encoding.
            For more info ee https://huggingface.co/transformers/main_classes/tokenizer.html
    
        Returns
        -------
        len_param: str
            One of four buckets: 
            '1' for short, '2' for medium, '3' for long texts and '-' for all others. 
        """
        tokens_count = len(tokenizer.encode(text))
        if tokens_count <= 15:
            len_param = '1'
        elif tokens_count <= 50:
            len_param = '2'
        elif tokens_count <= 256:
            len_param = '3'
        else:
            len_param = '-'
        return len_param
    
    
    def get_user_param(text: dict, machine_name_in_chat: str) -> str:
        """Maps text by 1/0 for it to be the person or the machine in the dialog
    
        Parameters
        ----------
        text: Dict[..., 'from', ...]
            Dict containing field 'from' with the name of the user who sent the message
    
        machine_name_in_chat: str
            Str with the name of the machine - it will be predicted
        """
        if text['from'] == machine_name_in_chat:
            return '1'  # machine
        else:
            return '0'  # human
    
    
    def build_text_file(data_json: dict, dest_path: str, 
                        tokenizer, machine_name_in_chat='Кирилл Гельван'):
        """Create a text file for training in special format for ruDialoGPT-3.
    
        Parameters
        ----------
        data_json: dict
            Dict containing 'text' (message) and 'from' (user who sent the message)
            
        dest_path: str
            String containing path to write data there
    
        tokenizer: HuggingFace tokenizer 
            Tokenizer that used to compute the length of the text after encoding.
            For more info ee https://huggingface.co/transformers/main_classes/tokenizer.html
        """
        f = open(dest_path, 'w')
        new_data = ''
        for i in range(len(data_json) - 1):
            message, next_message = data_json[i], data_json[i+1]
            if message['text'] == '' or type(message['text']) != str:
                continue
            if next_message['text'] == '' or type(next_message['text']) != str:
                continue
    
            user   = get_user_param(message, machine_name_in_chat=machine_name_in_chat)
            length = get_length_param(data_json[i+1]['text'], tokenizer)
            message_text = re.sub(r"\n", ". ", message['text'])
            new_data += f"|{user}|{length}|{message_text}{tokenizer.eos_token}" + "\n"
    
        f.write(new_data)
    
    
    def load_dataset(train_path, test_path, tokenizer):
        """Creates train and test PyTorch datasets and collate_fn using HuggingFace.
    
        Parameters
        ----------
        train_path: str
            String containing path to train data
            
        test_path: str
            String containing path to test data
    
        tokenizer: HuggingFace tokenizer 
            Tokenizer that used to compute the length of the text after encoding.
            For more info ee https://huggingface.co/transformers/main_classes/tokenizer.html
        """
        train_dataset = TextDataset(
              tokenizer  = tokenizer,
              file_path  = train_path,
              block_size = 256)
         
        test_dataset = TextDataset(
              tokenizer  = tokenizer,
              file_path  = test_path,
              block_size = 256)   
        
        data_collator = DataCollatorForLanguageModeling(
            tokenizer=tokenizer, mlm=False
        )
        return train_dataset, test_dataset, data_collator1) Export your telegram chat![](https://raw.githubusercontent.com/Kirili4ik/ruDialoGpt3-finetune-colab/main/how-to-export-chat.jpg)2) Upload it to colab![](https://raw.githubusercontent.com/Kirili4ik/ruDialoGpt3-finetune-colab/main/how-to-upload-json.jpg)3) Next cell creates train and test set from it4) :tada:#@markdown Your telegram chat json path 'ChatExport.../YourChatName.json':
    path_to_telegram_chat_json = 'example: /content/drive/MyDrive/char27.json' #@param {type : "string"}
    #@markdown Name of the user to predict by GPT-3:
    machine_name_in_chat = 'example: ' #@param {type : "string"}
    
    
    with open(path_to_telegram_chat_json) as f: data = json.load(f)['messages']
    
    # test data is first 10% of chat, train - last 90%
    train, test = data[int(len(data)*0.1):], data[:int(len(data)*0.1)]
    
    build_text_file(train, 'train_dataset.txt', tokenizer)
    build_text_file(test,  'test_dataset.txt', tokenizer)
    
    print("Train dataset length: " + str(len(train)) + "samples")
    print("Test dataset length: "  + str(len(test)) + "samples")
    # let's look at our data
    ! head -n 10 train_dataset.txtHere the first number is the spearker number - '1' for GPT and '0' for the person. The second number is the lengths of the expected answer: '1' for short, '2' for medium, '3' for long texts and '-' for all others.# Create PyTorch Datasets
    train_dataset, test_dataset, data_collator = load_dataset('train_dataset.txt', 'test_dataset.txt', tokenizer)
    
    # Create PyTorch Dataloaders
    train_loader = DataLoader(train_dataset, shuffle=True, batch_size=2, collate_fn=data_collator)
    test_loader = DataLoader(test_dataset, batch_size=2, collate_fn=data_collator)
    # this cell checks 1 forward pass
    try:
        for batch in train_loader:
            break
        {k: v.shape for k, v in batch.items()}
    
        outputs = model(**batch)
    except:
        print("Unexpected error:", sys.exc_info()[0])
        raiseFine-tuning#@title Fine-tuning params
    num_epochs = 3 #@param {type:"integer"}
    optimizer = AdamW(model.parameters(), lr=3e-5) #@param
    save_checkpoint_path = 'exmaple: drive/MyDrive/GPT2_checkpoint-more-data-2ep.pt' #@param {type:"string"}
    
    
    num_training_steps = num_epochs * len(train_dataset)
    lr_scheduler = get_scheduler(
        "linear",
        optimizer=optimizer,
        num_warmup_steps=100,
        num_training_steps=num_training_steps
    )
    
    accelerator = Accelerator()
    train_dl, test_dl, model, optimizer = accelerator.prepare(
        train_loader, test_loader, model, optimizer
    )
    # wandb.watch(model, log="all")
    progress_bar = tqdm(range(num_training_steps))
    
    for epoch in range(num_epochs):
        
        ### TRAIN EPOCH
        model.train()
        for batch in train_dl:
            optimizer.zero_grad()
            outputs = model(**batch)
            loss = outputs.loss
            accelerator.backward(loss)
            
            # wandb.log({'train_loss':loss.item()})
            optimizer.step()
            lr_scheduler.step()
            progress_bar.update(1)
    
        ### SAVE
        torch.save({
                'model_state_dict': model.state_dict(),
        }, save_checkpoint_path)
        
        ### VALIDATE ONCE
        cum_loss = 0
        model.eval()
        with torch.inference_mode():
            for batch in test_dl:
                outputs = model(**batch)
                cum_loss += float(outputs.loss.item())
        
        print(cum_loss/len(test_loader))
        # wandb.log({'val_mean_loss':cum_loss/len(test_loader)})Inference#@title Installs and Utility functions
    
    %%capture
    # installing huggingface datasets and accelerate 
    ! pip install datasets transformers[sentencepiece]
    ! pip install accelerate
    
    def get_length_param(text: str, tokenizer) -> str:
        """Maps text to 1 of 4 buckets based on length after encoding.
    
        Parameters
        ----------
        text: str
            The text to be given 1 of 4 length parameters.
    
        tokenizer: HuggingFace tokenizer 
            Tokenizer that used to compute the length of the text after encoding.
            For more info ee https://huggingface.co/transformers/main_classes/tokenizer.html
    
        Returns
        -------
        len_param: str
            One of four buckets: 
            '1' for short, '2' for medium, '3' for long texts and '-' for all others. 
        """
        tokens_count = len(tokenizer.encode(text))
        if tokens_count <= 15:
            len_param = '1'
        elif tokens_count <= 50:
            len_param = '2'
        elif tokens_count <= 256:
            len_param = '3'
        else:
            len_param = '-'
        return len_param
    
    
    def get_user_param(text: dict, machine_name_in_chat: str) -> str:
        """Maps text by 1/0 for it to be the person or the machine in the dialogue
    
        Parameters
        ----------
        text: Dict[..., 'from', ...]
            Dict containing field 'from' with the name of the user who sent the message
    
        machine_name_in_chat: str
            Str with the name of the machine - it will be predicted
        """
        if text['from'] == machine_name_in_chat:
            return '1'  # machine
        else:
            return '0'  # human
    
    
    def build_text_file(data_json: dict, dest_path: str, 
                        tokenizer, machine_name_in_chat=''):
        """Create a text file for training in special format for ruDialoGPT-3.
    
        Parameters
        ----------
        data_json: dict
            Dict containing 'text' (message) and 'from' (user who sent the message)
            
        dest_path: str
            String containing path to write data there
    
        tokenizer: HuggingFace tokenizer 
            Tokenizer that used to compute the length of the text after encoding.
            For more info ee https://huggingface.co/transformers/main_classes/tokenizer.html
        """
        f = open(dest_path, 'w')
        new_data = ''
        for i in range(len(data_json) - 1):
            message, next_message = data_json[i], data_json[i+1]
            if message['text'] == '' or type(message['text']) != str:
                continue
            if next_message['text'] == '' or type(next_message['text']) != str:
                continue
    
            user   = get_user_param(message, machine_name_in_chat=machine_name_in_chat)
            length = get_length_param(data_json[i+1]['text'], tokenizer)
            message_text = re.sub(r"\n", ". ", message['text'])
            new_data += f"|{user}|{length}|{message_text}{tokenizer.eos_token}" + "\n"
    
        f.write(new_data)
    
    
    def load_dataset(train_path, test_path, tokenizer):
        """Creates train and test PyTorch datasets and collate_fn using HuggingFace.
    
        Parameters
        ----------
        train_path: str
            String containing path to train data
            
        test_path: str
            String containing path to test data
    
        tokenizer: HuggingFace tokenizer 
            Tokenizer that used to compute the length of the text after encoding.
            For more info ee https://huggingface.co/transformers/main_classes/tokenizer.html
        """
        train_dataset = TextDataset(
              tokenizer  = tokenizer,
              file_path  = train_path,
              block_size = 256)
         
        test_dataset = TextDataset(
              tokenizer  = tokenizer,
              file_path  = test_path,
              block_size = 256)   
        
        data_collator = DataCollatorForLanguageModeling(
            tokenizer=tokenizer, mlm=False
        )
        return train_dataset, test_dataset, data_collator
    import torch
    from transformers import AutoModelForCausalLM, AutoTokenizer
    
    # Download checkpoint:
    checkpoint = "Kirili4ik/ruDialoGpt3-medium-finetuned-telegram"   
    tokenizer =  AutoTokenizer.from_pretrained(checkpoint)
    model = AutoModelForCausalLM.from_pretrained(checkpoint)
    
    # [optional] Insert your checkpoint if needed:
    '''from google.colab import drive
    drive.mount('/content/drive')
    checkpoint = torch.load('drive/MyDrive/GPT2_checkpoint.pt', map_location='cpu')
    model.load_state_dict(checkpoint['model_state_dict'])'''
    
    model = model.to('cpu')
    model.eval()
    print()
    ### INFERENCE
    
    chat_history_ids = torch.zeros((1, 0), dtype=torch.int)
    
    while True:
        
        next_who = input("Who's phrase?\t")  #input("H / G?")     # Human or GPT
    
        # In case Human
        if next_who == "H":
            input_user = input("===> Human: ")
            
            # encode the new user input, add parameters and return a tensor in Pytorch
            new_user_input_ids = tokenizer.encode(f"|0|{get_length_param(input_user, tokenizer)}|" \
                                                  + input_user + tokenizer.eos_token, return_tensors="pt")
            # append the new user input tokens to the chat history
            chat_history_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1)
    
        if next_who == "G":
    
            next_len = input("Phrase len? 1/2/3/-\t")  #input("Exp. len?(-/1/2/3): ")
            # encode the new user input, add parameters and return a tensor in Pytorch
            new_user_input_ids = tokenizer.encode(f"|1|{next_len}|", return_tensors="pt")
            # append the new user input tokens to the chat history
            chat_history_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1)
            
            # print(tokenizer.decode(chat_history_ids[-1])) # uncomment to see full gpt input
            
            # save previous len
            input_len = chat_history_ids.shape[-1]
            # generated a response; PS you can read about the parameters at hf.co/blog/how-to-generate
            chat_history_ids = model.generate(
                chat_history_ids,
                num_return_sequences=1,                     # use for more variants, but have to print [i]
                max_length=512,
                no_repeat_ngram_size=3,
                do_sample=True,
                top_k=50,
                top_p=0.9,
                temperature = 0.6,                          # 0 for greedy
                mask_token_id=tokenizer.mask_token_id,
                eos_token_id=tokenizer.eos_token_id,
                unk_token_id=tokenizer.unk_token_id,
                pad_token_id=tokenizer.pad_token_id,
                device='cpu'
            )
            
            # pretty print last ouput tokens from bot
            print(f"===> GPT-3:  {tokenizer.decode(chat_history_ids[:, input_len:][0], skip_special_tokens=True)}")Bank Marketing Campaign- Predicting Term Deposit (CRISP-DM)  Problem Definition:  One of the most common marketing strategy In Banking sector is direct marketing campaigns through phone calls ,it is a form of advertising that allows organizations to communicate directly with customers to offer their services based on the client’s existing bank profile .Here we will consider term deposit as a banking service .Direct marketing has significant challenges:* The increasing number of marketing campaigns over time has reduced their effects on the general public.* According to study positive response rates of direct marketing tend to be around 1-3% * Startegy like Telemarketing may have high financial and resource costs, but less ROI * Many people find direct marketing annoying and intrusive.* If we reach a consumer who isn't interested in services, they are likely to find it irritating and this can create a negative brand association.  Business Goal : to build a list of target customers who are likey to subscribe a term deposite. The more targeted our campaigns, the more successful they are likely to be.  Project Objective : We will convert this problem into a machine learning classification problem .We will build a model to predict whether a client will subscribe a term deposit or not so that the banks can arrange a better management of available resources by focusing on the potential customers “predicted” by the classifier .This will help them to design a more efficient and precise campaign strategy to reduce the costs,improve the profits and customer satisfaction . Data Mining Technique to be used : Classification   Data Set Information:The data is related with direct marketing campaigns of a Portuguese banking institution. The marketing campaigns were based on phone calls. Often, more than one contact to the same client was required, in order to access if the product (bank term deposit) would be ('yes') or not ('no') subscribed.source : uci machine learning repository , link: http://archive.ics.uci.edu/ml/datasets/Bank+Marketing   Attribute Information:**Bank client data:*** Age (numeric)* Job : type of job (categorical: 'admin.', 'blue-collar', 'entrepreneur', 'housemaid', 'management', 'retired', 'self-employed', 'services', 'student', 'technician', 'unemployed', 'unknown')* Marital : marital status (categorical: 'divorced', 'married', 'single', 'unknown' ; note: 'divorced' means divorced or widowed)* Education (categorical: 'basic.4y', 'basic.6y', 'basic.9y', 'high.school', 'illiterate', 'professional.course', 'university.degree', 'unknown')* Default: has credit in default? (categorical: 'no', 'yes', 'unknown')* Housing: has housing loan? (categorical: 'no', 'yes', 'unknown')* Loan: has personal loan? (categorical: 'no', 'yes', 'unknown')**Related with the last contact of the current campaign:*** Contact: contact communication type (categorical:  'cellular','telephone')* Month: last contact month of year (categorical: 'jan', 'feb', 'mar',   …, 'nov', 'dec')* Dayofweek: last contact day of the week (categorical:  'mon','tue','wed','thu','fri')* Duration: last contact duration, in seconds (numeric). Important  note: this attribute highly affects the output target (e.g., if  duration=0 then y='no'). Yet, the duration is not known before a call  is performed. Also, after the end of the call y is obviously known.  Thus, this input should only be included for benchmark purposes and  should be discarded if the intention is to have a realistic  predictive model.**Other attributes:***  Campaign: number of contacts performed during this campaign and for    this client (numeric, includes last contact)*  Pdays: number of days that passed by after the client was last   contacted from a previous campaign (numeric; 999 means client was not   previously contacted)*  Previous: number of contacts performed before this campaign and for   this client (numeric)* Poutcome: outcome of the previous marketing campaign (categorical:  'failure','nonexistent','success')**Social and economic context attributes*** Emp.var.rate: employment variation rate - quarterly indicator, it defines as a measure of the extent to which available labour resources (people available to work) are being used.  (numeric)* Cons.price.idx: consumer price index - monthly indicator (numeric), it expresses the change in the current prices of the market basket in terms of the prices during the same month in the previous year. * Cons.conf.idx: consumer confidence index - monthly indicator , CCI is a survey administered by The Conference Board, that measures how optimistic or pessimistic consumers are regarding their expected financial situation  (numeric)* Euribor3m: euribor 3 month rate - daily indicator (numeric), it is the interest rate at which a selection of European banks lend one another funds denominated in euros whereby the loans have a maturity of 3 months  Nr.employed: number of employees - quarterly indicator (numeric)  **Output variable (desired target):*** y - has the client subscribed a term deposit? (binary: 'yes', 'no')  Preliminary Data Analysisimport pandas as pd    
    import seaborn as sns     
    import numpy as np        
    import matplotlib.pyplot as plt     
    from scipy import stats       
    df=pd.read_csv(r'D:\Linear Regression\bank-additional_bank-additional-full.csv')
    df.head()
    df.shape
    df.info()
    df.isnull().sum()
    # no null value present in the data set 
    df.nunique()
    df.describe()
    df.describe(include='object')
    # renameing 'y' as 'target'
    df=df.rename(columns={'y':'target'})
    df
    df.target.value_counts()Exploratory Data Analysis   Visualizing Categorical Variablessns.set_style('whitegrid')
    plt.figure(figsize=(6,4))
    sns.countplot(x='target',data=df)
    plt.figure(figsize=(6,4))
    sns.countplot(x='marital',data=df)
    plt.figure(figsize=(10,6))
    sns.countplot(x='education',data=df)
    plt.xticks(rotation=45, horizontalalignment='right')
    plt.figure(figsize=(12,6))
    sns.countplot(x="job",data=df)
    plt.xticks(rotation=45, horizontalalignment='right')
    fig,(ax1,ax2,ax3) =plt.subplots(nrows = 1, ncols = 3,figsize=(14,6))
    sns.countplot(x="loan",data=df,ax=ax1)
    ax1.set_xlabel('personal loan')
    sns.countplot(x="housing",data=df,ax=ax2)
    ax2.set_xlabel('house loan')
    sns.countplot(x="default",data=df,ax=ax3)
    ax3.set_xlabel('default')
    fig,ax=plt.subplots()
    sns.countplot(x="poutcome",data=df)
    ax.set_xlabel('previous outcome')
    fig,ax=plt.subplots()
    sns.countplot(x="contact",data=df)
    ax.set_xlabel("contcat type")
    plt.figure(figsize=(10,6))
    sns.countplot(x="month",data=df)
    plt.figure(figsize=(8,4))
    sns.countplot(x="day_of_week",data=df)Visualizing Numerical Variableplt.figure(figsize=(8,6))
    sns.distplot(df['age'],bins=40)
    df.age.describe()
    plt.figure(figsize=(8,6))
    fig = sns.boxplot(x='age', data=df,color='royalblue')
    plt.figure(figsize=(8,6))
    sns.distplot(df['duration'],bins=40)
    df.duration.describe()
    plt.figure(figsize=(8,6))
    fig = sns.boxplot(x='duration', data=df,color='royalblue')
    #converting target variable in 0 for no and 1 for yes
    df['target']=df['target'].replace({'no':0,'yes':1})
    df.target.value_counts()Bivariate analysisplt.figure(figsize=(10,6))
    sns.barplot(x='job', y='target', data=df)
    plt.title('job vs term deposit(likey to subscribe)',fontsize=16)
    plt.xticks(rotation=45, horizontalalignment='right')
    df1 = df.groupby('job').agg({'target':'sum'})
    plt.figure(figsize=(10,6))
    sns.barplot(x = df1.index, y = df1.target, data = df1)
    plt.title('job vs term deposit (total count)',fontsize=16)
    plt.xticks(rotation=45, horizontalalignment='right')
    df.job.value_counts()
    plt.figure(figsize=(10,6))
    sns.barplot(x='marital', y='target', data=df)
    plt.title('marital vs term deposit(likey to subscribe)',fontsize=16)
    df1 = df.groupby('marital').agg({'target':'sum'})
    plt.figure(figsize=(10,6))
    sns.barplot(x = df1.index, y = df1.target, data = df1)
    plt.title('marital vs term deposit (total count)',fontsize=16)
    df.marital.value_counts()
    plt.figure(figsize=(10,6))
    sns.barplot(x='education', y='target', data=df)
    plt.title('education vs term deposit(likey to subscribe)',fontsize=16)
    df1 = df.groupby('education').agg({'target':'sum'})
    plt.figure(figsize=(10,6))
    sns.barplot(x = df1.index, y = df1.target, data = df1)
    plt.title('education vs term deposit',fontsize=16)
    plt.xticks(rotation=45, horizontalalignment='right')
    df.education.value_counts()
    plt.figure(figsize=(10,6))
    sns.barplot(x='job', y='target', hue='marital',data=df)
    plt.title('job vs marital vs term deposit(likey to subscribe)',fontsize=16)
    plt.xticks(rotation=45, horizontalalignment='right')
    plt.figure(figsize=(10,6))
    sns.barplot(x='job', y='target',hue='housing', data=df)
    plt.title('job vs housing vs term deposit(likey to subscribe)',fontsize=16)
    plt.xticks(rotation=45, horizontalalignment='right')
    plt.figure(figsize=(10,6))
    sns.barplot(x='job', y='target',hue='loan', data=df)
    plt.title('job vs loan vs term deposit(likey to subscribe)',fontsize=16)
    plt.xticks(rotation=45, horizontalalignment='right')
    plt.figure(figsize=(10,6))
    sns.barplot(x='month', y='target', data=df)
    plt.title('month vs term deposit(likey to subscribe)',fontsize=16)
    df1 = df.groupby('month').agg({'target':'sum'})
    plt.figure(figsize=(10,6))
    sns.barplot(x = df1.index, y = df1.target, data = df1)
    plt.title('month vs term deposit (total)',fontsize=16)
    df.month.value_counts()
    plt.figure(figsize=(10,6))
    sns.barplot(x='day_of_week', y='target', data=df)
    plt.title('day_of_week vs term deposit(likey to subscribe)',fontsize=16)
    df1 = df.groupby('day_of_week').agg({'target':'sum'})
    plt.figure(figsize=(10,6))
    sns.barplot(x = df1.index, y = df1.target, data = df1)
    plt.title('day_of_week vs term deposit (total)',fontsize=16)
    df.info()
    corrmat = df.corr()
    fig, ax = plt.subplots(figsize=(12, 9))
    sns.heatmap(corrmat,annot=True,)
    plt.title('correlation matrix for numerical variables',fontsize=18)Observations :'emp.var.rate', 'cons.price.idx' , 'cons.conf.idx' , 'euribor3m' , and 'nr.employed' are highly correlated , we can not drop any of them without advise from domain expert ,may be we can do PCA on these varibales in later stage   Data Preparation We have to drop the 'duration' column beacuse the duration will be unknown before a call is performed so in training dataset we can not keep this, moreover after the the call anhow the outcome will be khnowndf=df.drop('duration',axis=1)
    df.shape
    df
    df.columns
    df.pdays.value_counts()
    # in pdays 39673 values are 999 which means client was not previously contacted, and the range for other values is
    # 3-27 and count is significantly low , now we can not keep this coulmn like this ,it will add bias to our model , 
    #so we will convert this coulm as categorical column 
    df['pdays_cat'] =['cont' if each==999 else 'not_cont' for each in df.pdays]
    df.head()
    df.pdays_cat.value_counts()
    df=df.drop(['pdays'], axis=1)
    df.shapeseparating categorical variablecat_columns_df=df.select_dtypes(include=['object'])
    cat_columns_dfseparating numerical valuenum_columns_df=df.select_dtypes(exclude=['object'])
    num_columns_df
    num_columns_df = num_columns_df.drop('target',axis=1)
    num_columns_df.shape
    num_columns_df.columns
    #creating test data set 
    df_test=df.target.astype(dtype='bool')
    df_test.shape
    df_test.dtypecreating dummy variables form categorical varibalesdummy_columns_df=pd.get_dummies(cat_columns_df,drop_first=True)
    dummy_columns_df.shapescaling numerical variablesfrom sklearn.preprocessing import StandardScaler
    scaler = StandardScaler()
    num_columns_scaled_df=pd.DataFrame(scaler.fit_transform(num_columns_df),columns=num_columns_df.columns)
    num_columns_scaled_dfCreating Train Data setdf_train = pd.concat([num_columns_scaled_df, dummy_columns_df], axis=1,sort=False)
    df_train
    df_train.shapeTrain Test Splitingfrom sklearn.model_selection import train_test_split
    X_train,X_test,y_train,y_test=train_test_split(df_train,df_test,test_size=0.2,random_state=101)
    print(X_train.shape)
    print(X_test.shape)
    print(y_train.shape)
    print(y_test.shape)(32950, 52)
    (8238, 52)
    (32950,)
    (8238,)Handling Imbalance Datafrom collections import Counter
    print(Counter(y_train))Counter({False: 29269, True: 3681})Yes : 88.82 %No : 11.17 %  We will use Random Oversampling technique to balance the data , this method randomly selectes examples from the minority class, with replacement, and adding them to the training dataset.from imblearn.over_sampling import RandomOverSampler
    RO = RandomOverSampler(sampling_strategy='minority')
    X_train_os,y_train_os = RO.fit_sample(X_train,y_train)
    X_train_os.shape,y_train_os.shape
    from collections import Counter
    print(Counter(y_train))
    print(Counter(y_train_os))Counter({False: 29269, True: 3681})
    Counter({False: 29269, True: 29269})Model and Evaluation   Model used * Logistic Regression * K-Nearest Neighbor* Decision Tree* Random Forest  Evaluation metrics :* Accuracy Score * F1 Score* Precision Score * Recall Score * ROC AUC Score  Evaluation method :* As this is a classification problem we will mainly consider the F1 and ROC     AUC score * Satisfactory Recall Score : Our objective will be reducing False Negetive       count,we can not miss out our potential customer * Satisfactory Precision Score : We have to bring Flase Positive count low so     that we save ourselves from wasting money and effort .from sklearn import metrics 
    from sklearn.metrics import confusion_matrix,classification_report
    from sklearn.metrics import roc_curve,accuracy_score,precision_score, recall_score
    def classification_metrics(x,y) :
        accuracy = metrics.accuracy_score(x,y)
        F1_weighted= metrics.f1_score(x,y,average='weighted')
        F1_macro= metrics.f1_score(x,y,average='macro')
        precision = metrics.precision_score(x,y)
        recall=metrics.recall_score(x,y)
        cm= metrics.confusion_matrix(x,y)
        
        print('accuracy score is :',accuracy )
        print('F1 weighted score is :',F1_weighted )
        print('F1 macro score is :',F1_macro )
        print('precision score is :',precision )
        print('recall score is :',recall )
        sns.heatmap(cm,annot=True,cmap='Blues',fmt='g')
        plt.title('Confusion Matrix',fontsize=16)Logistic Regressionfrom sklearn.linear_model import LogisticRegression
    LR=LogisticRegression()
    LR.fit(X_train_os,y_train_os)
    y_predLR=LR.predict(X_test)
    classification_metrics(y_test,y_predLR)
    LR_probs = LR.predict_proba(X_test)[:,1]
    NS_probs = [0 for _ in range(len(y_test))]
    lr_fpr, lr_tpr, lr_threshold = roc_curve(y_test, LR_probs)
    ns_fpr, ns_tpr, ns_threshold = roc_curve(y_test, NS_probs)
    plt.plot(lr_fpr, lr_tpr, marker='.', label='Logistic')
    plt.plot(ns_fpr, ns_tpr, linestyle='--', label='No Skill')
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('ROC Curve',fontsize=16)
    plt.legend()
    plt.show()
    
    print('AUC ROC score is :' ,metrics.roc_auc_score(y_test,y_predLR))Knn Classifierfrom  sklearn.neighbors import KNeighborsClassifier
    scores = []
    recall=[]
    for k in range(1,25,2) :
        knn=KNeighborsClassifier(n_neighbors=k)
        knn.fit(X_train_os,y_train_os)
        y_pred=knn.predict(X_test)
        scores.append(metrics.recall_score(y_test,y_pred))
        recall.append(metrics.recall_score(y_test,y_pred))
    plt.plot(range(1,25,2), scores)
    plt.xlabel('Value of K for KNN')
    plt.ylabel('Testing Accuracy')
    #will consider the optimun k value as 14 
    knn=KNeighborsClassifier(n_neighbors=14)
    knn.fit(X_train_os,y_train_os)
    y_predKNN=knn.predict(X_test)
    classification_metrics(y_test,y_predKNN)
    KNN_probs = knn.predict_proba(X_test)[:,1]
    NS_probs = [0 for _ in range(len(y_test))]
    knn_fpr, knn_tpr, _ = roc_curve(y_test, KNN_probs)
    ns_fpr, ns_tpr, ns_threshold = roc_curve(y_test, NS_probs)
    plt.plot(knn_fpr, knn_tpr, marker='.', label='Decision Tree')
    plt.plot(ns_fpr, ns_tpr, linestyle='--', label='No Skill')
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('ROC Curve',fontsize=16)
    plt.legend()
    plt.show()
    print('AUC ROC score is :' ,metrics.roc_auc_score(y_test,y_predKNN))Decision Tree Classifierfrom sklearn.tree import DecisionTreeClassifier
    DT=DecisionTreeClassifier()
    DT.fit(X_train_os,y_train_os)
    y_predDT=DT.predict(X_test)
    classification_metrics(y_test,y_predDT)
    DT_probs = DT.predict_proba(X_test)[:,1]
    NS_probs = [0 for _ in range(len(y_test))]
    dt_fpr, dt_tpr, _ = roc_curve(y_test, DT_probs)
    ns_fpr, ns_tpr, ns_threshold = roc_curve(y_test, NS_probs)
    plt.plot(dt_fpr, dt_tpr, marker='.', label='Decision Tree')
    plt.plot(ns_fpr, ns_tpr, linestyle='--', label='No Skill')
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('ROC Curve',fontsize=16)
    plt.legend()
    plt.show()
    print('AUC ROC score is :' ,metrics.roc_auc_score(y_test,y_predDT))Random Forest Classifierfrom sklearn.ensemble import RandomForestClassifier
    RF=RandomForestClassifier()
    RF.fit(X_train_os,y_train_os)
    y_predRF=RF.predict(X_test)
    classification_metrics(y_test,y_predRF)
    RF_probs = RF.predict_proba(X_test)[:,1]
    NS_probs = [0 for _ in range(len(y_test))]
    rf_fpr, rf_tpr, _ = roc_curve(y_test, RF_probs)
    ns_fpr, ns_tpr, ns_threshold = roc_curve(y_test, NS_probs)
    plt.plot(rf_fpr, rf_tpr, marker='.', label='Decision Tree')
    plt.plot(ns_fpr, ns_tpr, linestyle='--', label='No Skill')
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('ROC Curve',fontsize=16)
    plt.legend()
    plt.show()
    print('AUC ROC score is :' ,metrics.roc_auc_score(y_test,y_predRF))The False Negative count for Logistic Regression is the least, also the Recall, F1, AUC ROC  scores are comparatively better for this model, we will try to manipulate the threshold value so that we can improve the Recall value .LR_probs = LR.predict_proba(X_test)[:,1]
    LR_probs
    lr_threshold
    recall_sc = []
    for thres in lr_threshold:
        y_predLR = np.where(LR_probs>thres,1,0)
        recall_sc.append(metrics.recall_score(y_test, y_predLR))
        
    df_score = pd.concat([pd.Series(lr_threshold), pd.Series(recall_sc)],axis=1)
    df_score.columns = ['thresholds', 'recall_sc']
    df_score.head
    #we will try to increase the recall value upto 70 percent 
    df_score = df_score[df_score['recall_sc']< 0.80]
    df_score.sort_values(by='recall_sc', ascending=False, inplace=True)
    df_score
    #for threshold 0.40 the recall score is maximum 
    thres=0.33
    y_predLR_final = np.where(LR_probs>thres,1,0)
    classification_metrics(y_test,y_predLR_final)
    print('AUC ROC score is :' ,metrics.roc_auc_score(y_test,y_predLR_final))AUC ROC score is : 0.6868271045837147Though we were able to improve the recall lvalue and decrease the FN count but the FP count also increase heavily, so there is a trade-off ,now considering the business expectation we can take decision about this and tune the model ,### later on we will try to hypertune the knn,decison tree and random forest  model and will check the resultsDataFrame or Series 를 수직 또는 수평으로 연결import FinanceDataReader as fdr
    samsung_df = fdr.DataReader('005390', '2009-01-01', '2017-12-31')
    kodex_df = fdr.DataReader('069500', '2016-01-01', '2017-12-31')
    samsung_df.head(2)
    kodex_df.head(2)
    #default axis 0, 수직으로
    pd.concat(
        [samsung_df, kodex_df]
    ).head(2)- Column, index alignment 특징은 그대로 적용됨!pd.concat([samsung_df, kodex_df[['Open', 'High']]]).tail()
     pd.concat(
         [samsung_df, kodex_df],
         keys=['삼성', 'KODEX200'],
         names=['종목명', '날짜']
     )
     pd.concat(
         [samsung_df, kodex_df],
         axis=1
     ).head()
     pd.concat(
         [samsung_df, kodex_df],
         keys=['삼성', 'KODEX200'],
         axis=1
     )
     pd.concat(
         [samsung_df, kodex_df],
         axis=1,
         names=['종목명'],
         join='inner',
     ).head()
    pd.concat([samsung_df.head(), kodex_df['Close'].head()], join='inner')
    samsung_diff_col_df = samsung_df.copy()
    samsung_diff_col_df.columns = ['1_' + col for col in samsung_df.columns]
    samsung_diff_col_df.head()
    samsung_df.head()
    pd.concat([samsung_df, samsung_diff_col_df])Customer Segmentation using Clustering***This mini-project is based on [this blog post](http://blog.yhat.com/posts/customer-segmentation-using-python.html) by yhat. Please feel free to refer to the post for additional information, and solutions.%matplotlib inline
    import pandas as pd
    import sklearn
    import matplotlib.pyplot as plt
    import seaborn as sns
    
    # Setup Seaborn
    sns.set_style("whitegrid")
    sns.set_context("poster")DataThe dataset contains information on marketing newsletters/e-mail campaigns (e-mail offers sent to customers) and transaction level data from customers. The transactional data shows which offer customers responded to, and what the customer ended up buying. The data is presented as an Excel workbook containing two worksheets. Each worksheet contains a different dataset.df_offers = pd.read_excel("./WineKMC.xlsx", sheetname=0)
    df_offers.columns = ["offer_id", "campaign", "varietal", "min_qty", "discount", "origin", "past_peak"]
    df_offers.head()We see that the first dataset contains information about each offer such as the month it is in effect and several attributes about the wine that the offer refers to: the variety, minimum quantity, discount, country of origin and whether or not it is past peak. The second dataset in the second worksheet contains transactional data -- which offer each customer responded to.df_transactions = pd.read_excel("./WineKMC.xlsx", sheetname=1)
    df_transactions.columns = ["customer_name", "offer_id"]
    df_transactions['n'] = 1
    df_transactions.head()Data wrangling We're trying to learn more about how our customers behave, so we can use their behavior (whether or not they purchased something based on an offer) as a way to group similar minded customers together. We can then study those groups to look for patterns and trends which can help us formulate future offers.The first thing we need is a way to compare customers. To do this, we're going to create a matrix that contains each customer and a 0/1 indicator for whether or not they responded to a given offer.  Checkup Exercise Set IExercise: Create a data frame where each row has the following columns (Use the pandas [`merge`](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.merge.html) and [`pivot_table`](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.pivot_table.html) functions for this purpose): customer_name One column for each offer, with a 1 if the customer responded to the offerMake sure you also deal with any weird values such as `NaN`. Read the documentation to develop your solution.#your turn
    df_merge = pd.merge(left = df_offers, right = df_transactions, on = 'offer_id', how = 'left')
    df_pivot= df_merge.pivot_table(index = 'customer_name', columns = 'offer_id', values = 'n', aggfunc= 'sum', fill_value = 0)
    df_pivot= df_pivot.reset_index('customer_name')
    x_col = df_pivot.drop('customer_name', axis = 1)
    df_pivot.head()
    x_col.head()K-Means ClusteringRecall that in K-Means Clustering we want to *maximize* the distance between centroids and *minimize* the distance between data points and the respective centroid for the cluster they are in. True evaluation for unsupervised learning would require labeled data; however, we can use a variety of intuitive metrics to try to pick the number of clusters K. We will introduce two methods: the Elbow method, the Silhouette method and the gap statistic.  Choosing K: The Elbow Sum-of-Squares MethodThe first method looks at the sum-of-squares error in each cluster against $K$. We compute the distance from each data point to the center of the cluster (centroid) to which the data point was assigned. $$SS = \sum_k \sum_{x_i \in C_k} \sum_{x_j \in C_k} \left( x_i - x_j \right)^2 = \sum_k \sum_{x_i \in C_k} \left( x_i - \mu_k \right)^2$$where $x_i$ is a point, $C_k$ represents cluster $k$ and $\mu_k$ is the centroid for cluster $k$. We can plot SS vs. $K$ and choose the *elbow point* in the plot as the best value for $K$. The elbow point is the point at which the plot starts descending much more slowly.  Checkup Exercise Set IIExercise:  What values of $SS$ do you believe represent better clusterings? Why? Create a numpy matrix `x_cols` with only the columns representing the offers (i.e. the 0/1 colums)  Write code that applies the [`KMeans`](http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html) clustering method from scikit-learn to this matrix.  Construct a plot showing $SS$ for each $K$ and pick $K$ using this plot. For simplicity, test $2 \le K \le 10$. Make a bar chart showing the number of points in each cluster for k-means under the best $K$. What challenges did you experience using the Elbow method to pick $K$?# your turn
    
    #Import Package:
    from sklearn.cluster import KMeans
    
    #Calculate Sum of Sqaured Errors for various values of k: 
    SOS = []
    for k in range(2,11):
        km = KMeans(n_clusters = k, random_state = 12)     #Initiate kmeans
        model = km.fit(x_col)           #Fit model
        SOS.append(model.inertia_)      #Calculate error values and append to SOS
    
    #Transfrom to Dataframe:
    df_SOS=  pd.DataFrame({'SOS': SOS, 'K': range(2,11)}) 
    
    #Plot SOS Vs k:
    df_SOS.plot(x= 'K', y = 'SOS', figsize= (15,8), color = 'red', marker = 'o')
    _ = plt.xlabel('k-values')
    _ = plt.ylabel('Sum of Squared Errors')
    _ = plt.title('Elbow Method')
    _ = plt.annotate("k= 4", xy =(4,218))
    # Bar garph using best k:
    import numpy as np
    
    km_best = KMeans(n_clusters = 4, random_state = 12)
    km_best.fit_predict(x_col)
    
    unique,count = np.unique(km_best.labels_, return_counts = True)
    print(unique, count)
    
    _ = plt.figure(figsize= (15,8))
    _ = plt.bar(unique, count, align = 'center', alpha = 0.8 )
    _ = plt.xlabel('k-clusters')
    _ = plt.ylabel('Count of cluster labels')
    _ = plt.title('Frequency of cluster labels in the dataset')[0 1 2 3] [27 15 23 35]Observation: 1. Optimal value of k = 4 because after this point sum of squared errors reduce marginally.  2. Difficulty with elbow method is that it becomes difficult to pin down optimal value of k from the graph.   Choosing K: The Silhouette MethodThere exists another method that measures how well each datapoint $x_i$ "fits" its assigned cluster *and also* how poorly it fits into other clusters. This is a different way of looking at the same objective. Denote $a_{x_i}$ as the *average* distance from $x_i$ to all other points within its own cluster $k$. The lower the value, the better. On the other hand $b_{x_i}$ is the minimum average distance from $x_i$ to points in a different cluster, minimized over clusters. That is, compute separately for each cluster the average distance from $x_i$ to the points within that cluster, and then take the minimum. The silhouette $s(x_i)$ is defined as$$s(x_i) = \frac{b_{x_i} - a_{x_i}}{\max{\left( a_{x_i}, b_{x_i}\right)}}$$The silhouette score is computed on *every datapoint in every cluster*. The silhouette score ranges from -1 (a poor clustering) to +1 (a very dense clustering) with 0 denoting the situation where clusters overlap. Some criteria for the silhouette coefficient is provided in the table below. | Range       | Interpretation                                ||-------------|-----------------------------------------------|| 0.71 - 1.0  | A strong structure has been found.            || 0.51 - 0.7  | A reasonable structure has been found.        || 0.26 - 0.5  | The structure is weak and could be artificial.|| < 0.25      | No substantial structure has been found.      |Source: http://www.stat.berkeley.edu/~spector/s133/Clus.html Fortunately, scikit-learn provides a function to compute this for us (phew!) called [`sklearn.metrics.silhouette_score`](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.silhouette_score.html). Take a look at [this article](http://scikit-learn.org/stable/auto_examples/cluster/plot_kmeans_silhouette_analysis.html) on picking $K$ in scikit-learn, as it will help you in the next exercise set. Checkup Exercise Set IIIExercise: Using the documentation for the `silhouette_score` function above, construct a series of silhouette plots like the ones in the article linked above.Exercise: Compute the average silhouette score for each $K$ and plot it. What $K$ does the plot suggest we should choose? Does it differ from what we found using the Elbow method?# Your turn.
    from sklearn.metrics import silhouette_samples, silhouette_score
    
    silhouette = []
    
    for k in range (2,11):
        km_best_2 = KMeans(n_clusters= k, random_state = 12)
        labels=km_best_2.fit_predict(x_col)
        silhouette_score(x_col,labels)
        silhouette.append(silhouette_score(x_col,labels))
    print(silhouette)
        
    #Plot silhouette score:
    plt.figure(figsize = (15,8))
    _ = plt.plot(range(2,11),silhouette, color = 'black', linestyle = '--')
    _ = plt.xlabel('k-values')
    _ = plt.ylabel('Silhouette Score')[0.09007566678796264, 0.11983176694269196, 0.134214593236668, 0.12669880588460608, 0.14923833749716883, 0.14998961923527868, 0.09681014391899324, 0.1002309461682194, 0.1067213573938179]Observation:1. Since silhouette average score for each k-value is <0.25, there is no substantial cluster that has been found in our dataset. 2. However, as per elbow method as well as silhouette score k = 4, 6 and 7 will provide better clustering than other k-values.   Choosing $K$: The Gap StatisticThere is one last method worth covering for picking $K$, the so-called Gap statistic. The computation for the gap statistic builds on the sum-of-squares established in the Elbow method discussion, and compares it to the sum-of-squares of a "null distribution," that is, a random set of points with no clustering. The estimate for the optimal number of clusters $K$ is the value for which $\log{SS}$ falls the farthest below that of the reference distribution:$$G_k = E_n^*\{\log SS_k\} - \log SS_k$$In other words a good clustering yields a much larger difference between the reference distribution and the clustered data. The reference distribution is a Monte Carlo (randomization) procedure that constructs $B$ random distributions of points within the bounding box (limits) of the original data and then applies K-means to this synthetic distribution of data points.. $E_n^*\{\log SS_k\}$ is just the average $SS_k$ over all $B$ replicates. We then compute the standard deviation $\sigma_{SS}$ of the values of $SS_k$ computed from the $B$ replicates of the reference distribution and compute$$s_k = \sqrt{1+1/B}\sigma_{SS}$$Finally, we choose $K=k$ such that $G_k \geq G_{k+1} - s_{k+1}$.  Aside: Choosing $K$ when we Have LabelsUnsupervised learning expects that we do not have the labels. In some situations, we may wish to cluster data that is labeled. Computing the optimal number of clusters is much easier if we have access to labels. There are several methods available. We will not go into the math or details since it is rare to have access to the labels, but we provide the names and references of these measures.* Adjusted Rand Index* Mutual Information* V-Measure* Fowlkes–Mallows indexSee [this article](http://scikit-learn.org/stable/modules/clustering.html) for more information about these metrics.  Visualizing Clusters using PCAHow do we visualize clusters? If we only had two features, we could likely plot the data as is. But we have 100 data points each containing 32 features (dimensions). Principal Component Analysis (PCA) will help us reduce the dimensionality of our data from 32 to something lower. For a visualization on the coordinate plane, we will use 2 dimensions. In this exercise, we're going to use it to transform our multi-dimensional dataset into a 2 dimensional dataset.This is only one use of PCA for dimension reduction. We can also use PCA when we want to perform regression but we have a set of highly correlated variables. PCA untangles these correlations into a smaller number of features/predictors all of which are orthogonal (not correlated). PCA is also used to reduce a large set of variables into a much smaller one. Checkup Exercise Set IVExercise: Use PCA to plot your clusters: Use scikit-learn's [`PCA`](http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html) function to reduce the dimensionality of your clustering data to 2 components Create a data frame with the following fields:     customer name   cluster id the customer belongs to   the two PCA components (label them `x` and `y`)   Plot a scatterplot of the `x` vs `y` columns Color-code points differently based on cluster ID How do the clusters look?  Based on what you see, what seems to be the best value for $K$? Moreover, which method of choosing $K$ seems to have produced the optimal result visually?Exercise: Now look at both the original raw data about the offers and transactions and look at the fitted clusters. Tell a story about the clusters in context of the original data. For example, do the clusters correspond to wine variants or something else interesting?#your turn
    from sklearn.decomposition import PCA
    
    pca = PCA(n_components = 2, random_state = 2)
    
    pca_predict=pca.fit_transform(x_col)
    
    df_pca = pd.DataFrame(pca_predict, columns=['x', 'y'])
    
    df_pca.head()
    df_merge_2 = pd.merge(left = df_pca, right = df_pivot, left_index=True, right_index=True)
    df_merge_2 = df_merge_2.drop(range(1,33), axis = 1)
    df_merge_2 = df_merge_2.assign(label= km_best.labels_)
    df_merge_2.head()
    #Plot PCA components: 
    _ = plt.figure(figsize = (15,8))
    _ = plt.scatter(df_merge_2.x, df_merge_2.y, alpha= 0.6, cmap='Set1', marker = 'o', edgecolors='black', c = df_merge_2.label)
    _ = plt.xlabel('1st pca feature')
    _ = plt.ylabel('2nd pca feature')
    _ = plt.title('Scatter plot of PCA Variables')Observation: 1. From scatter plot above, we observe 4 cluster labels, showing some overlap between pink and brown clusters.2. Let's apply k-means to find optimal k-value as see if we can reduce some of these clusters.#Calculate optimal k-value for PCA variables:
    
    #Sum of squared errors:
    error = []
    
    #Apply k-means
    for i in range (2,11):
        km_pca = KMeans(n_clusters= i)
        km_pca.fit_predict(pca_predict)
        error.append(km_pca.inertia_)
        
    #Convert array to dataframe:
    df_pca_k= pd.DataFrame({'SOS': error, 'k_values': range(2,11)})
    
    #Plot summ of squared errors Vs k-values: 
    
    df_pca_k.plot(y = 'SOS', x = 'k_values', linestyle= "--", color = 'green', figsize = (15,8), marker = 'o')
    _ = plt.annotate('k=3', xy=(3, 21), fontsize = 15)
    _ = plt.xlabel('k-values')
    _ = plt.ylabel('Summ of Squared Errors for PCA variables')
    _ = plt.title('Elbow Method')
    #Apply k-means using optimal k-value: 
    km_pca_best = KMeans(n_clusters = 3, random_state = 12)
    
    ##Predict labels:
    pca_lables=km_pca_best.fit_predict(pca_predict)
    #Plot PCA components with optimized cluster id's: 
    _ = plt.figure(figsize = (15,8))
    _ = plt.scatter(df_pca.x, df_pca.y, alpha= 0.6, cmap='Set1', marker = 'o', edgecolors='black', c = pca_lables)
    _ = plt.xlabel('1st pca feature')
    _ = plt.ylabel('2nd pca feature')
    #Silhouette Score: 
    score = silhouette_score(pca_predict, pca_lables)
    print(f"Silhouette Score is {score}")Silhouette Score is 0.5303468047950125Observation: 1. From above scatter plot, we can clearly see that after applying pca, we were able to reduce k-value from 4 to 3.2. Silhouette Score has improved from 0.13 to 0.53, which means a reasonable cluster structure had been formed after applying PCA before K-Means.#Let's visualize clusters with best k-value: 
    df_merge_2 = pd.merge(left = df_pca, right = df_pivot, left_index=True, right_index=True)
    df_merge_2 = df_merge_2.drop(range(1,33), axis = 1)
    df_merge_3 = df_merge_2.assign(pca_label= pca_lables)
    df_merge_3.head()
    df_final=pd.merge(left = df_merge, right = df_merge_3, on= 'customer_name', how = 'inner').drop(['n'], axis = 1) 
    df_final.head()
    #Cluster Summary Table:
    df_clusters=df_final.groupby('pca_label').agg({'min_qty': ['mean'],
                                  'discount': ['mean']}).round().stack()
    
    df_clusters=df_clusters.rename(columns={'x': 'pca_x', 'y': 'pca_y'}).reset_index()
    df_clusters.drop('level_1', axis= 1)
    #Melt Table:
    df_clusters_melt = pd.melt(df_clusters,id_vars='pca_label',
                               value_vars=['min_qty','discount'],
                               var_name='criteria', value_name='mean')
    #Bar plot:
    _ = plt.figure(figsize=(15,8))
    sns.barplot(x= 'pca_label', y = 'mean', data = df_clusters_melt, hue = 'criteria', palette='Set1')
    
    _ = plt.legend(title = 'Criteria', loc ='upper right', bbox_to_anchor=(1.5,1))Observation: 1. Cluster 0: Customers get a good discount even if they don't buy as much.  What we've done is we've taken those columns of 0/1 indicator variables, and we've transformed them into a 2-D dataset. We took one column and arbitrarily called it `x` and then called the other `y`. Now we can throw each point into a scatterplot. We color coded each point based on it's cluster so it's easier to see them. Exercise Set VAs we saw earlier, PCA has a lot of other uses. Since we wanted to visualize our data in 2 dimensions, restricted the number of dimensions to 2 in PCA. But what is the true optimal number of dimensions?Exercise: Using a new PCA object shown in the next cell, plot the `explained_variance_` field and look for the elbow point, the point where the curve's rate of descent seems to slow sharply. This value is one possible value for the optimal number of dimensions. What is it?#your turn
    # Initialize a new PCA model with a default number of components.
    import sklearn.decomposition 
    pca = sklearn.decomposition.PCA()
    pca.fit(x_col)
    print(pca.explained_variance_.shape)
    # Do the rest on your own :)
    
    plt.figure(figsize = (15,8))
    _ = plt.plot(range(1, 33), pca.explained_variance_, color='red', marker = 'o') 
    _ = plt.xlabel('Optimal Dimensions for the dataset')
    _ = plt.ylabel('Explained Variance')
    _ = plt.title('PCA Components Explained Variance')(32,)Observation: 1. From Elbow Curve above, we can see that explained variance decreases sharply after point 3, hence should be the optimal dimension for this dataset.  Other Clustering Algorithmsk-means is only one of a ton of clustering algorithms. Below is a brief description of several clustering algorithms, and the table provides references to the other clustering algorithms in scikit-learn. * **Affinity Propagation** does not require the number of clusters $K$ to be known in advance! AP uses a "message passing" paradigm to cluster points based on their similarity. * **Spectral Clustering** uses the eigenvalues of a similarity matrix to reduce the dimensionality of the data before clustering in a lower dimensional space. This is tangentially similar to what we did to visualize k-means clusters using PCA. The number of clusters must be known a priori.* **Ward's Method** applies to hierarchical clustering. Hierarchical clustering algorithms take a set of data and successively divide the observations into more and more clusters at each layer of the hierarchy. Ward's method is used to determine when two clusters in the hierarchy should be combined into one. It is basically an extension of hierarchical clustering. Hierarchical clustering is *divisive*, that is, all observations are part of the same cluster at first, and at each successive iteration, the clusters are made smaller and smaller. With hierarchical clustering, a hierarchy is constructed, and there is not really the concept of "number of clusters." The number of clusters simply determines how low or how high in the hierarchy we reference and can be determined empirically or by looking at the [dendogram](https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.cluster.hierarchy.dendrogram.html).* **Agglomerative Clustering** is similar to hierarchical clustering but but is not divisive, it is *agglomerative*. That is, every observation is placed into its own cluster and at each iteration or level or the hierarchy, observations are merged into fewer and fewer clusters until convergence. Similar to hierarchical clustering, the constructed hierarchy contains all possible numbers of clusters and it is up to the analyst to pick the number by reviewing statistics or the dendogram.* **DBSCAN** is based on point density rather than distance. It groups together points with many nearby neighbors. DBSCAN is one of the most cited algorithms in the literature. It does not require knowing the number of clusters a priori, but does require specifying the neighborhood size.  Clustering Algorithms in Scikit-learnMethod nameParametersScalabilityUse CaseGeometry (metric used)K-Meansnumber of clustersVery largen_samples, medium n_clusters withMiniBatch codeGeneral-purpose, even cluster size, flat geometry, not too many clustersDistances between pointsAffinity propagationdamping, sample preferenceNot scalable with n_samplesMany clusters, uneven cluster size, non-flat geometryGraph distance (e.g. nearest-neighbor graph)Mean-shiftbandwidthNot scalable with n_samplesMany clusters, uneven cluster size, non-flat geometryDistances between pointsSpectral clusteringnumber of clustersMedium n_samples, small n_clustersFew clusters, even cluster size, non-flat geometryGraph distance (e.g. nearest-neighbor graph)Ward hierarchical clusteringnumber of clustersLarge n_samples and n_clustersMany clusters, possibly connectivity constraintsDistances between pointsAgglomerative clusteringnumber of clusters, linkage type, distanceLarge n_samples and n_clustersMany clusters, possibly connectivity constraints, non EuclideandistancesAny pairwise distanceDBSCANneighborhood sizeVery large n_samples, medium n_clustersNon-flat geometry, uneven cluster sizesDistances between nearest pointsGaussian mixturesmanyNot scalableFlat geometry, good for density estimationMahalanobis distances to  centersBirchbranching factor, threshold, optional global clusterer.Large n_clusters and n_samplesLarge dataset, outlier removal, data reduction.Euclidean distance between pointsSource: http://scikit-learn.org/stable/modules/clustering.html Exercise Set VIExercise: Try clustering using the following algorithms. Affinity propagationSpectral clusteringAgglomerative clusteringDBSCANHow do their results compare? Which performs the best? Tell a story why you think it performs the best.# Your turn
    from sklearn.cluster import AffinityPropagation, AgglomerativeClustering, SpectralClustering, DBSCAN, MeanShift
    #from sklearn.pipeline import make_pipeline
    
    #Initiate models:
    ap = AffinityPropagation()
    ac = AgglomerativeClustering(n_clusters=3)
    sc = SpectralClustering(n_clusters=3)
    db = DBSCAN(eps = 1)
    ms = MeanShift()
    #AffinityPropagation
    aff_labels= ap.fit_predict(x_col)
    print('Silhouette Score AffinityPropagation:', silhouette_score(x_col, aff_labels))
    
    #AgglomerativeClustering
    agg_lables=ac.fit_predict(x_col)
    print('Silhouette Score AgglomerativeClustering:',silhouette_score(x_col, agg_lables))
    
    # SpectralBiclustering
    sc_labels = sc.fit_predict(x_col)
    print('Silhouette Score SpectralClustering:',silhouette_score(x_col, sc_labels))
    
    #DBSCAN
    db_labels = db.fit_predict(x_col)
    print('Silhouette Score DBSCAN:',silhouette_score(x_col, db_labels))
    
    #Scatter Plot of clustering models on Principal Components:
    _ =plt.figure(figsize = (20,10))
    _ =plt.subplot(2,2,1)
    _ =plt.scatter(df_merge_2.x, df_merge_2.y, c = aff_labels, cmap = 'Set1')
    _ = plt.title("Affinity Propagation")
    
    _ =plt.subplot(2,2,2)
    _ =plt.scatter(df_merge_2.x, df_merge_2.y, c = agg_lables, cmap ='Set2')
    _ = plt.title("Agglomerative Clustering")
    
    _ =plt.subplot(2,2,3)
    _ =plt.scatter(df_merge_2.x, df_merge_2.y, c = sc_labels, cmap ='viridis')
    _ = plt.title("Spectral Clustering")
    
    _ =plt.subplot(2,2,4)
    _ =plt.scatter(df_merge_2.x, df_merge_2.y, c = db_labels, cmap= 'plasma')
    _ = plt.title("DBSCAN")Observation: 1. From above, Affinity Propogation performed better as observed from its silhouette score. However, there are multiple clusters of small sizes, which makes it difficult to    2. Because Affinity Propogation can accept many clusters of uneven cluster size. 3. So, how does it compare with mean shift model, which can also accept many clusters of uneven sizes.#Mean Shift (optional) 
    ms_labels = ms.fit_predict(x_col)
    print('Silhouette Score Mean Sift:',silhouette_score(x_col, ms_labels))
    plt.figure(figsize= (10,8))
    _ = plt.scatter(df_merge_2.x, df_merge_2.y, c = sc_labels)
    _ = plt.title("Mean Shift")Silhouette Score Mean Sift: 0.16468837239678863VQE and Quantum Graph Neural Networksimport numpy
    import math
    import random
    import numpy as np
    import scipy
    from matplotlib import pyplot as plt
    from tqdm import tqdm
    from scipy.optimize import minimize
    import networkx as nx
    import cirqIn order to begin thinking about the quantum graph neural network, we must prepare some training data. We perform VQE in order to find the ground state of a given Ising model Hamiltonian:# Initialize the necessary qubits
    
    qubit_number = 4
    data_register = [cirq.GridQubit(0, i) for i in range(0, qubit_number)]
    network_register = [cirq.GridQubit(1, i) for i in range(0, qubit_number)]
    
    # Creates the graph structure of the quantum system
    
    ising_graph = nx.Graph()
    ising_graph.add_nodes_from(range(0, qubit_number))
    ising_graph.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 0)])
    
    nx.draw(ising_graph)
    plt.show() 
    
    # Creates parameters
    
    matrix_params = [[random.randint(10, 100)/10 for i in range(0, 4)] for j in range(0, 2)]
    print(matrix_params)
    # Defines the rz gate:
    
    def rz(control, target, param):
        yield cirq.CX.on(control, target)
        yield cirq.rz(param).on(target)
        yield cirq.CX.on(control, target)
    
    # Method that initializes qubits in even superposition
    
    def even_superposition(qubits):
        
        for i in qubits:
            yield cirq.H.on(i)
    
    # Method that prepares the QAOA ansatz layers
    
    def qaoa_layer(param1, param2, qubits, ising_graph):
        
        # Applies another layer of coupling gates
        for count, i in enumerate(ising_graph.edges):
            yield rz(qubits[i[0]], qubits[i[1]], 2*param1[count])
        
        # Applies the final layer of RX gates on the qubits
        
        for i in range(0, len(qubits)):
            yield cirq.rx(2*param2[i]).on(qubits[i])
        
    # Method that prepares the decoupled layers
    
    def decoupled_layer(param1, param2, qubits):
        
        for i in range(0, len(qubits)):
            yield cirq.ZPowGate(exponent=param1[i]).on(qubits[i])
            yield cirq.XPowGate(exponent=param1[i]).on(qubits[i])
    
    # Method that prepares the VQE circuit that will be used to 
    
    def vqe_circuit(parameters, qubits, ising_graph):
        
        yield decoupled_layer(parameters[0], parameters[1], qubits) 
        yield decoupled_layer(parameters[2], parameters[3], qubits)
    
    test_circuit = cirq.Circuit()
    test_circuit.append(vqe_circuit([[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], data_register, ising_graph))
    print(test_circuit)
    # Creates the Hamiltonian that we are attempting to learn
    
    def create_hamiltonian_matrix(n, graph):
        
        # Defines Pauli matrices
    
        pauli_x = np.array([[0, 1], [1, 0]])
        pauli_y = np.array([[0, -1j], [1j, 0]])
        pauli_z = np.array([[1, 0], [0, -1]])
        identity = np.array([[1, 0], [0, 1]])
    
        matrix = np.zeros((2**n, 2**n))
        
        # Creates the interaction component of the Hamiltonian
    
        for count, i in enumerate(graph.edges):
            m = 1
            for j in range(0, n):
                if (i[0] == j or i[1] == j):
                    m = np.kron(m, pauli_z)
                else:
                    m = np.kron(m, identity)
            matrix = np.add(matrix, matrix_params[0][count]*m)
        
        # Creates the "bias" component of the matrix
        
        for i in range(0, n):
            m = 1
            for j in range(0, n):
                if (j == i):
                    m = np.kron(m, pauli_x)
                else:
                    m = np.kron(m, identity)
            matrix = np.add(matrix, matrix_params[1][i]*m)
    
        return matrix
    
    print(create_hamiltonian_matrix(qubit_number, ising_graph))
    def create_density_matrix(arr):
        
        array = np.array(arr)
        plt.matshow(array)
        plt.colorbar()
        plt.show()
    
    # Finds the eigenvector corresponding to the lowest energy state
        
    val, vec = np.linalg.eig(create_hamiltonian_matrix(qubit_number, ising_graph))
    m = []
    
    min_ind = list(val).index(min(val))
    print(val[min_ind])
    for i in range(0, 2**qubit_number):
        m.append(vec[i][min_ind])
    
    '''
    def apply(n):
        return float(n*np.conj(n))
    
    func_vec = np.vectorize(apply)
    new = func_vec(np.outer(m, m))
    '''
    
    create_density_matrix(np.real(np.outer(m, np.conj(m))))
    # Creates the VQE method that we will optimize
    
    def create_circuit(parameters, qubits):
        
        # Prepares the circuit
        
        circuit = cirq.Circuit()
        circuit.append(even_superposition(qubits))
        circuit.append(vqe_circuit(parameters, qubits, ising_graph))
        
        # Creates the simulation
        
        simulator = cirq.Simulator()
        results = simulator.simulate(circuit)
        state_vector = results.final_state
        
        return state_vector
    
    # Creates the cost function
    
    iterations = 0
    
    def cost_function(parameters, qubits):
        
        global iterations
        
        hamiltonian = create_hamiltonian_matrix(qubit_number, ising_graph)
        vector = create_circuit(parameters, qubits)
        
        first = np.matmul(hamiltonian, vector)
        cost = np.inner(np.conj(vector), first)
        
        if (iterations%50 == 0):
            print("Cost at Step "+str(iterations)+"= "+str(np.real(cost)))
        
        iterations += 1
        
        return np.real(cost)
    
    # Creates the optimizer for our variational circuit
    
    qubit_register = network_register
    
    def optimizer_cost(params):
        
        parameters = [
            params[0:4], 
            params[4:8],
            params[8:12],
            params[12:16]
        ]
        
        return cost_function(parameters, qubit_register)
    
    # Creates the optimizer
    
    init = [random.randint(0, 20)/10 for i in range(0, 16)]
    out = minimize(optimizer_cost, x0=init, method="COBYLA", options={'maxiter':1000, 'tol':1e-10})
    g = out['x']
    print(out)
    
    # Prepares the optimal state and visualizes it
    
    optimal_params = [
            g[0:4], 
            g[4:8],
            g[8:12],
            g[12:16]
        ]
    
    optimal = create_circuit(optimal_params, qubit_register)
    result = np.real(np.outer(optimal, np.conj(optimal)))
    create_density_matrix(result)
    
    v = [ 3.21629331,  0.54890376,  2.02976445,  0.7818173 ,  1.3213677 ,
            1.48080682,  1.67054856,  1.44101918,  1.20196752,  0.56441884,
           -0.31570509,  0.15785939,  1.69543663,  0.72541886,  0.02910459,
           -0.52821689]
    # Attempts to evolve the prepared ground state forward in time, with the time evolution circuit
    
    def le_state_evolve(depth, time, qubits, ising_graph, params):
        
        yield even_superposition(qubits)
        yield vqe_circuit(params, qubits, ising_graph)
        yield time_evolution(depth, time, qubits, ising_graph)
    
    # Creates the circuit
    
    def create_time_circuit(depth, time, qubits, ising_graph, params):
        
        circuit = cirq.Circuit()
        circuit.append(le_state_evolve(depth, time, qubits, ising_graph, params))
        
        simulator = cirq.Simulator()
        results = simulator.simulate(circuit)
        state_vector = results.final_state
        
        return state_vector
    
    vector = create_time_circuit(800, 10, data_register, ising_graph, optimal_params)
    create_density_matrix(np.real(np.outer(vector, np.conj(vector))))
    # Creates the numrical simulation, to test our time-evolution circuit
    
    def time_evolution_test(time, vec):
        
        new_matrix = scipy.linalg.expm(complex(0,-1)*create_hamiltonian_matrix(qubit_number, ising_graph)*time / hbar)
        return np.matmul(new_matrix, vec)
    
    vec = time_evolution_test(10, optimal)
    create_density_matrix(np.real(np.outer(vec, np.conj(vec))))
    
    print("Fidelity: "+str(np.inner(np.conj(vec), vector)*np.inner(np.conj(vector), vec)))
    # Creates the initial "guess" graph of interactions, and assigns parameters to each of the edges
    
    initial_graph = nx.complete_graph(qubit_number)
    
    # Creates the SWAP test between two registers of qubits
    
    def swap_test(control, index1, index2):
        
        yield cirq.H.on(control)
        for i in range(0, len(index1)):
            yield cirq.CSWAP(control, index1[i], index2[i])
        yield cirq.H.on(control)
    
    # Creates the QGRNN ansatz
    
    def qgrnn_ansatz(initial_graph, parameters, opt_params, depth, qubits, time):
        
        yield even_superposition(qubits)
        yield vqe_circuit(opt_params, qubits, ising_graph)
        for i in range(0, depth):
            yield qaoa_layer([i*time/depth for i in parameters[0]], [i*time/depth for i in parameters[1]], qubits, initial_graph)
    
    def find_infidelity_time(depth, time, index1, index2, control, params, opt_params, ising_graph, initial_graph):
        
        circuit = cirq.Circuit()
        circuit.append(le_state_evolve(depth, time, index1, ising_graph, opt_params))
        circuit.append(qgrnn_ansatz(initial_graph, params, opt_params, depth, index2, time))
        circuit.append(swap_test(control, index1, index2))
        circuit.append(cirq.measure(control, key="q"))
        
        simulator = cirq.Simulator()
        results = simulator.run(circuit, repetitions=100)
        
        new_res = list(str(results)[2:])
        return sum([int(i) for i in new_res])
    
    control = cirq.GridQubit(2, 0)
    # Now, we define the cost function that is used in the optimization method
    
    time_range = range(0, 10)
    iterations = 0
    
    def cost_function(params):
        
        global iterations
        
        params = [params[0:6], params[6:10]]
        
        total_cost = 0
        for i in time_range:
            res = find_infidelity_time(1, i, data_register, network_register, control, params, optimal_params, ising_graph, initial_graph)
            total_cost += res
            
        print("Cost at Step "+str(iterations)+": "+str(total_cost / len(time_range)))
        
        iterations += 1
        return total_cost / len(time_range)
    init = [random.randint(10, 100)/10 for i in range(0, 10)]
    init = [7.9, 2.7, 7.1, 3.9, 3.7, 9.9, 4.5, 6.4]
    out = minimize(cost_function, x0=init, method="COBYLA", options={'maxiter':500, 'tol':1e-10})
    g = out['x']
    print(out)Extended Kalman Filter algorithmLoan Sarazin & import numpy as np
    import numpy.linalg as la
    from math import *
    import matplotlib.pyplot as plt
    
    import pandas as pd
    #Importation des données 
    data = pd.read_excel("donnee.xlsx")
    print(data.head())
    signalReel = np.array(data.loc[:, ('signalReel')])
    signalBruite = np.array(data.loc[:, ('signalBruite')])
    
    Temps = np.array(data.loc[:, ('Temps')])
    N = signalReel.shape[0]
    X = Temps
    
    plt.figure(figsize = (15, 8))
    plt.plot(X, signalBruite, '.', label = "Noisy signal")
    plt.plot(X, signalReel, '.', label = "Real signal")
    plt.legend()
    plt.show()
    #restriction au 300 premières valeurs
    plt.figure(figsize = (15, 8))
    plt.plot(X[:300], signalBruite[:300], label = "Noisy signal", linestyle = "dashed")
    plt.plot(X[:300], signalReel[:300], label = "Real signal")
    plt.legend()
    plt.xlabel("Time")
    plt.ylabel("Signal values")
    plt.title("Plot of both the noisy signal and the real signal through the time evolution")
    
    plt.show()
    #Implémentation du filtre de Kalman étendu 
    
    def KalmanF_extended(Z, Q, R, A):
        #Initialisation de x0 et P00
        x0 = np.random.normal(size = 2)
        P00 = np.identity(2)
        
        k=0
        x_evol = []
        
        nu0 = 12
        Te = 1/193.28
        
        x = x0
        P = P00
        N = Z.shape[0]
        
        #Boucle d'estimation/prediction
        while (k < N ):
            new_x = x.copy().reshape((2, 1))
        
            newP =  P + Q
            
            H = np.array([sin(2*pi*nu0*Te*(k+1) + new_x[1, 0]), 
                          new_x[0, 0]*cos(2*pi*nu0*Te*(k+1) + new_x[1, 0])]).reshape((2, 1))
            S = np.array(H.T@newP@H + R)
            K = (newP@H /S).reshape((2, 1))
            
            epsilon = Z[k] - new_x[0]*sin(2*pi*nu0*(k+1)*Te + new_x[1])
            x = new_x + epsilon*K
            P = newP - K@H.T@newP
            
            x_evol.append(x)
            k += 1
        return np.array(x_evol).reshape((len(x_evol), 2))
    Q = np.array([2*10**(-5), 0, 0, 2*10**(-1)])
    Q = Q.reshape((2, 2))
    
    R = 3
    
    A = np.eye(2)
    
    Q, R, A
    
    x_estim = KalmanF_extended(signalBruite, Q, R, A)
    plt.figure(figsize = (8, 5))
    iter = np.arange(0, 20000)
    plt.plot(iter, x_estim[:, 0], 'b.')
    plt.xlabel("Iterations of the EKF algorithm")
    plt.ylabel("Value of alpha")
    plt.title("Plot of amplitude alpha along the iterations")
    plt.show()
    plt.figure(figsize = (8, 5))
    iter = np.arange(0, 20000)
    plt.plot(iter, x_estim[:, 1], 'b.')
    plt.xlabel("Iterations of the EKF algorithm")
    plt.ylabel("Value of $\phi$")
    plt.title("Plot of the phase $\phi$ along the iterations")
    plt.show()
    nu0 = 12
    Te = 1/193.28
    
    nmin = 19850
    nmax = nmin + 150
    
    plt.figure(figsize = (10, 8))
    iter = np.arange(nmin, nmax)
    plt.plot(iter, x_estim[nmin:nmax, 0]*np.sin(2*pi*nu0*(iter)*Te + x_estim[nmin:nmax, 1]), label = "Estimated signal")
    plt.plot(iter, signalReel[nmin:nmax], label = "Real signal")
    plt.plot(iter, signalBruite[nmin:nmax], 'r.', label = "Noised signal")
    plt.xlabel("Time")
    plt.ylabel("Value of the signal")
    plt.title("Real and estimated signals (19 000, 20 000)")
    plt.legend()
    plt.show()We can see that we have a quite accurate estimation of the signal. The amplitude alpha is converging to a value close to 5 and the phase $\phi$ is also close to the real phase. Note that the plot is for the last periods, we can see below that the estimated signal is less accurate at the beginning of the time period.nmin = 0
    nmax = nmin + 150
    
    plt.figure(figsize = (10, 8))
    iter = np.arange(nmin, nmax)
    plt.plot(iter, x_estim[nmin:nmax, 0]*np.sin(2*pi*nu0*(iter)*Te + x_estim[nmin:nmax, 1]), label = "Estimated signal")
    plt.plot(iter, signalReel[nmin:nmax], label = "Real signal")
    plt.plot(iter, signalBruite[nmin:nmax], 'r.', label = "Noised signal")
    plt.xlabel("Time")
    plt.ylabel("Value of the signal")
    plt.title("Real and estimated signals (0, 300)")
    plt.legend()
    plt.show()jupyter notebook- mode    - 명령모드 (esc) : 셀을 수정할때 사용    - 편집모드 (enter) : 셀안의 내용을 수정할때 사용- style    - markdown (명령모드 + m) : 셀안에 설명을 작성할때 사용    - code (명령모드 + y) : 파이썬 코드를 작성할때 사용- 단축키    - 셀 실행 : shift + enter    - 셀 삭제 : (명령모드) x    - 되돌리기 : (명령모드) z    - 셀 생성 : (명령모드) a(위에), b(아래)2 + 3
    1 + 2Magic Command- 셀 내부에서 특별하게 동작하는 커맨드- % : 한줄의 Magic Command를 동작- %% : 셀단위의 Magic Command를 동작- 주요 Magic Command    - pwd : 현재 주피터 노트북 파일의 경로    - ls : 현재 디렉토리의 파일 리스트    - whos : 현재 선언된 변수를 출력    - reset : 현재 선언된 변수를 삭세%pwd
    a = 1
    %whos
    %reset
    %whosInteractive namespace is empty.Shell Command- 주피터 노트북을 실행 셀 환경의 명령을 사용- 명령어 앞에 !를 붙여서 실행- 주요 명령어    - ls, cat, echo, ....!echo python
    !ls01_jupyter notebook.ipynb
    01_jupyter_notebook_2.ipynb
    02_basic_syntax.ipynb
    03_condition_loop.ipynb
    04_function.ipynbRFMdf.info()
    df['InvoiceDate'] = pd.to_datetime(df['InvoiceDate'])
    df.info()
    df.shape
    min(df['InvoiceDate'])
    max(df['InvoiceDate'])
    df = df.dropna()
    len(df)
    df['total_price'] = df['Quantity'] * df['UnitPrice']
    reference_date = max(df['InvoiceDate']) + timedelta(days=1)
    reference_date
    df_processed = df.groupby(['CustomerID']).agg({'InvoiceDate': lambda x: (reference_date - x.max()).days,
                                                  'InvoiceNo': 'count',
                                                  'total_price': 'sum'})
    df_processed
    df_processed.columns = ['Recency', 'Frequency', 'Monetary']
    df_processed.head()
    import seaborn as sns
    
    sns.distplot(df_processed['Recency'])
    sns.distplot(df_processed['Frequency'])
    sns.distplot(df_processed['Monetary'])
    r = range(4, 0, -1)
    f = range(1, 5)
    m = range(1, 5)
    r
    for i in f:
        print(i)
    r_g = pd.qcut(df_processed['Recency'], q=4, labels=r)
    f_g = pd.qcut(df_processed['Frequency'], q=4, labels=f)
    m_g = pd.qcut(df_processed['Monetary'], q=4, labels=m)
    final_df = df_processed.assign(R = r_g.values, F = f_g.values, M = m_g.values)
    final_df
    def combine_rfm(x): return str(x['R']) + str(x['F']) + str(x['M'])
    final_df['combined'] = final_df.apply(combine_rfm, axis=1)
    rfm = final_df
    rfm.head()
    rfm['sum_val'] = rfm[['R', 'F', 'M']].sum(axis=1)
    rfm
    rfm['combined'].nunique()
    def rfm_level(df):
        if df['sum_val'] >= 9:
            return 'Can\'t Loose Them'
        elif ((df['sum_val'] >= 8) and (df['sum_val'] < 9)):
            return 'Champions'
        elif ((df['sum_val'] >= 7) and (df['sum_val'] < 8)):
            return 'Loyal'
        elif ((df['sum_val'] >= 6) and (df['sum_val'] < 7)):
            return 'Potential'
        elif ((df['sum_val'] >= 5) and (df['sum_val'] < 6)):
            return 'Promising'
        elif ((df['sum_val'] >= 4) and (df['sum_val'] < 5)):
            return 'Needs Attention'
        else:
            return 'Require Activation'
    rfm['rfm_level'] = rfm.apply(rfm_level, axis=1)
    rfm
    rfm_level_agg = rfm.groupby('rfm_level').agg({
        'Recency': 'mean',
        'Frequency': 'mean',
        'Monetary': ['mean', 'count']
    }).round(1)
    rfm_level_agg
    rfm_level_agg.columns = rfm_level_agg.columns.droplevel()
    rfm_level_agg.columns = ['Recency_Mean', 'Frequency_Mean', 'Monetary_Mean', 'Count']
    rfm_level_agg
    fig = plt.gcf()
    ax = fig.add_subplot()
    fig.set_size_inches(16, 9)
    squarify.plot(sizes=rfm_level_agg['Count'], 
                  label=['Can\'t Loose Them',
                         'Champions',
                         'Loyal',
                         'Needs Attention',
                         'Potential', 
                         'Promising', 
                         'Require Activation'], alpha=.6 )
    plt.title('RFM Segments')
    plt.axis('off')
    plt.show()"Set Matrix Zeroes"> "[[Leetcode]](https://leetcode.com/problems/set-matrix-zeroes/)[Arrays][Matrix]"- toc: true - badges: true- comments: true- categories: [Problem Solving,Leetcode]- comments: true- author:   Problem StatementGiven a m x n matrix, if an element is 0, set its entire row and column to 0. Do it in-place. Example 1:```Input: [  [1,1,1],  [1,0,1],  [1,1,1]]Output: [  [1,0,1],  [0,0,0],  [1,0,1]]``` Example 2:```Input: [  [0,1,2,0],  [3,4,5,2],  [1,3,1,5]]Output: [  [0,0,0,0],  [0,4,5,0],  [0,3,1,0]]``` Follow up:- A straight forward solution using O(mn) space is probably a bad idea.- A simple improvement uses O(m + n) space, but still not the best solution.- Could you devise a constant space solution?[[URL]](https://leetcode.com/problems/set-matrix-zeroes/)  Approach 1#collapse-hide
    from typing import List
    
    class Solution:
        def setZeroes(self, matrix: List[List[int]]) -> None:
            """
            Do not return anything, modify matrix in-place instead.
            """
            temp = []
            matrixCopy = []
            
            for row in matrix:
                for element in row:
                    temp.append(element)
                matrixCopy.append(temp)
                temp = []
                
            for i in range(len(matrix)):
                for j in range(len(matrix[0])):
                    if matrixCopy[i][j] == 0:
                        for k in range(len(matrix[0])):
                            matrix[i][k] = 0
                        for k in range(len(matrix)):
                            matrix[k][j] = 0
            
            return matrix
    sol = Solution()
    sol.setZeroes([
      [1,1,1],
      [1,0,1],
      [1,1,1]
    ])![](Images/Problem_solving/setZeroes/Approach1_sub.png) **Worst case performance in Time: $O(m*n)$** **Worst case performance in Space:** $O(m*n)$ **Is Inplace?** : ```False```   Approach 2 1. Traverse the original matrix and look for 0 entities2. if found, record the i, j values using auxilary variables3. Using sets for recording i, j vlues would be benifiting as we overcome duplicate row, column values ahead.4. Finally, re iterate over the original matrix, for every cell make a check `if i in rows or j in columns`, update  the values to 0.#collapse-hide
    
    class Solution:
        def setZeroes(self, matrix: List[List[int]]) -> None:
            """
            Do not return anything, modify matrix in-place instead.
            """
            rows = set()
            columns = set()
            for i in range(len(matrix)):
                for j in range(len(matrix[0])):
                    if matrix[i][j] == 0:
                        rows.add(i)
                        columns.add(j)
            
            for i in range(len(matrix)):
                for j in range(len(matrix[0])):
                    if i in rows or j in columns:
                        matrix[i][j] = 0
                        
            return matrix
    sol = Solution()
    sol.setZeroes([
      [1,1,1],
      [1,0,1],
      [1,1,1]
    ])![](Images/Problem_solving/setZeroes/Approach2_sub.png) **Worst case performance in Time: $O(m*n)$** **Worst case performance in Space:** $O(m+n)$ **Is Inplace?** : ```False```   Approach 3#collapse-hide
    
    class Solution:
        def setZeroes(self, matrix: List[List[int]]) -> None:
            """
            Do not return anything, modify matrix in-place instead.
            """
            rowFlag, colFlag = False, False
            
            for i in range(len(matrix)):
                for j in range(len(matrix[0])):
                    if i == 0 and matrix[i][j] == 0:
                        rowFlag = True
                    if j ==0 and matrix[i][j] == 0:
                        colFlag = True
                    if matrix[i][j] == 0:
                        matrix[0][j] = 0
                        matrix[i][0] = 0
                        
            for i in range(1, len(matrix)):
                    for j in range(1, len(matrix[0])):
                        if matrix[i][0] == 0 or matrix[0][j] == 0:
                            matrix[i][j] = 0
            
            if rowFlag == True:
                for i in range(len(matrix[0])):
                    matrix[0][i] = 0
            if colFlag == True:
                for i in range(len(matrix)):
                    matrix[j][0] = 0
            
            return matrix
    sol = Solution()
    sol.setZeroes([
      [1,1,1],
      [1,0,1],
      [1,1,1]
    ])testing part%%time
    ## testing results
    encode_vector = encoder_one_hot_action(0)
    encoder_decode_action(encode_vector)
    
    encoded, encoded_hot = encoder_one_hot_state(obs)
    encoder_decode_state(encoded_hot)
    
    ## computer result
    for i in range(10):
        list_obs, actions = compute_random_suffle(0)CPU times: user 227 ms, sys: 50.3 ms, total: 278 ms
    Wall time: 275 msGeneration partdef flattenx(x):
        return jnp.ravel(x)
    
    flatten_all = vmap(flattenx)
    
    def generate_dataset(nb_iter_main_loop=100):
    
        actions_all = []
        obs_all = []
    
        for _ in range(nb_iter_main_loop):
    
            # train on main loop
            list_obs, actions = compute_random_suffle(0)
    
            # process for training
            actions_all.append(actions)
            obs_all.append(list_obs)
            
        obs_all = jnp.concatenate(obs_all)
        actions_all = jnp.concatenate(actions_all)
        
        obs_all, second = encoder_one_hot_state_vector(obs_all)
        actions_all = encoder_one_hot_action_action(actions_all)
            
        return obs_all, actions_all
    %%time
    obs_all, actions_all = generate_dataset()
    
    dataset = TensorDataset(torch.from_numpy(np.array(obs_all)), torch.from_numpy(np.array(actions_all)))
    dataloader = DataLoader(dataset, batch_size=32, shuffle=True)
    
    optax.__version__Optax optimization%%time
    
    from flax.training import train_state
    
    loss_hist = []
    
    optimizer = optax.adam(0.001)
    
    nb_epoch = 1
    nb_round_generation = 50
    size_generation = 10000
    
    # label smoothing
    smooth_vec = vmap(optax.smooth_labels, in_axes=(0, None))
    
    state = train_state.TrainState.create(
          apply_fn=model_rubik.apply, params=params, tx=optimizer)
    
    
    @jax.jit
    def train_step(state, batch):
        """Train for a single step."""
        def loss_fn(params):
            logits = model_rubik.apply({'params': params}, batch['rubik'])
            actions_ = smooth_vec(batch['action'], 0.1)
            loss = jnp.mean(
                optax.softmax_cross_entropy(
                    logits=logits, labels=actions_))
            return loss, logits
        grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
        (_, logits), grads = grad_fn(state.params)
        state = state.apply_gradients(grads=grads)
        metrics = compute_metrics(logits=logits, labels=batch['action'])
        return state, metrics
    
    def compute_metrics(logits, labels):
        loss = jnp.mean(
          optax.softmax_cross_entropy(
              logits=logits, labels=labels))
        accuracy = jnp.mean(jnp.argmax(logits, -1) == jnp.argmax(labels, -1))
        metrics = {
          'loss': loss,
          'accuracy': accuracy,
          }
        return metrics
    
    %%time
    
    loss_hist = []
    i = 0
    
    for q in range(nb_round_generation):
    
        #print("New generation")
        
        obs_all, actions_all = generate_dataset(size_generation)
    
        dataset = TensorDataset(torch.from_numpy(np.array(obs_all)), torch.from_numpy(np.array(actions_all)))
        dataloader = DataLoader(dataset, batch_size=64, shuffle=True)
    
        for p in range(nb_epoch):
            
            print("epoch")
             
            for obs, actions in dataloader:
                
                obs = jnp.array(obs)
                actions = jnp.array(actions)
                
                batch = {}
                batch['action'] = actions
                batch['rubik'] = obs
                
                # optimizer application
                state, metrics = train_step(state, batch)
                
                loss_hist.append(metrics['loss'].item())
                #params = jax.lax.stop_gradient(params)
                i += 1
                
                if (i % 100) == 0:
                    #print(i)
                    xla._xla_callable.cache_clear()
                    
            #jax.profiler.save_device_memory_profile(f"memory{p}_{q}.prof")
    
    
    plt.plot(loss_hist)
    for obs, actions in dataloader:
        obs = jnp.array(obs)
        actions = jnp.array(actions)
    loss_hist_conv = np.convolve(loss_hist, np.ones(50)/50)
    plt.plot(loss_hist_conv)
    loss_hist_conv = np.convolve(loss_hist, np.ones(50)/50)
    plt.plot(loss_hist_conv)Testing performance for solving rubiks !!!!env.reset()
    
    state_goal = env.state.copy()
    ## test to solve a rubiks
    obs, action = compute_random_suffle(0, nb_iter=10)
    begin_state = obs[-1, :, :, :]
    env.reset()
    env.set_init(np.array(begin_state))
    state_obs = begin_state
    flat_obs, noflat_obs = encoder_one_hot_state(state_obs)
    
    
    count = 0
    while np.all(state_goal == np.array(state_obs)) == False and count < 50:
        
        action = jnp.argmax(model_rubik.apply({'params': state.params}, flat_obs)).item()
        print(action)
        state_obs, _, _, _= env.step(action)
        flat_obs, noflat_obs = encoder_one_hot_state(state_obs)
        count += 1
        
        
        
    state_obs
    begin_state
    model_rubik.apply({'params': state.params}, flat_obs)
    env.reset()
    episode_rew = 0
    
    ## action list
    #actions = jrandom.randint(key1, shape=(nb_iter,1), minval=0, maxval=12)
    actions = np.random.randint(0, 12, size=(nb_iter,1))
    
    ## state list
    obs_list = []
    
    for q in range(nb_iter):
    
        print(actions[q, 0])
        obs, rew, done, _ = env.step(actions[q, 0])
        obs_list.append(obs.copy())
        print(obs)
    
        ## we should also compute the max of the model
    
    ### we compute the reverse action
    np.array(state_obs)
    obs
    
    pickle.dump(state.params, open( "save_state.p", "wb" ) )
    params = pickle.load(open( "save_state.p", "rb" ))
    paramsAifont> Using Biologically-Informed Artificial Neural Networks to Automatically Optimise Font Parameters for Legibility. This file will become your README and also the index of your documentation.  Install Dependencies:* [fastai](https://docs.fast.ai/Installing)   * Use -c fastchan  * pytorch 1.10.0  * torchvision 0.11.1* [diffvg](https://github.com/BachiLi/diffvg)  * pytorch torchvision -c pytorch  * numpy  * scikit-image  * -c anaconda cmake  * -c conda-forge ffmpeg* [Google Fonts](https://github.com/google/fonts)  * `ofl` folder from the repo needed for `FontSampler`  * If the annotation database is updated, a Google Fonts API key is also needed. See: https://erraticgenerator.com/blog/use-google-fonts-for-machine-learning-part1/Not available yet!`pip install your_project_name`  How to use TBAsay_hello("Kaljarv")Prediction Evaluation Print model namesprint_filelist(m_flist_tt)Predict on models#part_x_M027, pos_ids = get_pos_IDs(part['NFCHOA_M027'],ID_ref)
    #part_x_R006, _ = get_pos_IDs(part['NFCHOA_R006'],ID_ref)
    
    params_t = params.copy()
    params_t['batch_size'] = 1000
    
    params_t_nsc = params_nsc.copy()
    params_t_nsc['batch_size'] = 1000
    
    params_t_nic = create_test_params(feat[cn[:64]], targ, par, batch_size=1000, shuffle=False)
    
    def model_pred_pos(model, part, params, ID_ref):
        """Returns list of ndarrays of predictions on model based on test partition per position."""
        part_x, pos_ids = get_pos_IDs(part,ID_ref)
        b_gen = []
        pred = []
        y = []
        for j in range(len(part_x)):
            b_gen.append(DataGenerator_raw(part_x[j], **params))
            pred.append(model.predict_generator(b_gen[j], verbose=0, use_multiprocessing=True, workers=4))
            y.append(get_y_gen(b_gen[j]))
        return pred, y
    pred_R006 = {}
    pred_M027 = {}
    y_R006 = {}
    y_M027 = {}
    for i,md in enumerate(m_flist_tt):
        if 'no-ic' in md:
            params_tmp = params_t_nic
        elif 'nsc' in md:
            params_tmp = params_t_nsc
        else:
            params_tmp = params_t    
        
        pred_R006[md], y_R006[md] = model_pred_pos(m_tt[i] , part['NFCHOA_R006'], params_tmp, ID_ref)
        pred_M027[md], y_M027[md] = model_pred_pos(m_tt[i] , part['NFCHOA_M027'], params_tmp, ID_ref)
        #print(i)
    k = list(pred_M027.keys())Generate Plots# generate plots
    %matplotlib
    # all positions overview
    for name in k[11:12]:
        model_name, loss_name, tdata, special, bs, _ = get_model_info(name)
        f1 = plot_locaz_all(pred_R006[name],
                            l=True,
                            title='Model: {}{} | Loss: {} | Test-Data: {}'.format(model_name, special, loss_name, 'NFCHOA_R006'))
        #f2 = plot_locaz_all(pred_M027[name],
        #                    l=True,
        #                    title='Model: {}{} | Loss: {} | Test-Data: {}'.format(model_name, special, loss_name, 'NFCHOA_M027'))
    # individual positions incl. gt comparison
    %matplotlib
    
    m_idx = 12
    pos_idx = 0
    
    #model_name, loss_name, tdata, special, bs, _ = get_model_info(k[m_idx])
    
    # manually wrap angles
    def wrap(a):
        for i in range(len(a)):
            if a[i] > 180:
                a[i] = a[i]-360
            elif a[i] < -180:
                a[i] = a[i]+360
        return a
    
    #title = 'Model: {}{} | Loss: {} | Test-Data: {}\n Position: {}'.format(model_name, special, loss_name, 'NFCHOA_M027', pos_idx)
    f1, ax_p, ax_y = plot_locaz(wrap(pred_M027[k[m_idx]][pos_idx]), y_M027[k[m_idx]][pos_idx], l=True)
    #f.suptitle(title, fontsize='x-large')
    lines = (ax_p.get_children()[:1][0],ax_y.get_children()[:1][0],ax_p.get_children()[:][1],ax_p.get_children()[:][2],ax_p.get_children()[:][4])
    #f.legend(lines, ('predictions', 'human / gt', 'mean over subjects/repititions at 0° head rotation', '± 180°','± 180°'))
    
    #title = 'Model: {}{} | Loss: {} | Test-Data: {}\n Position: {}'.format(model_name, special, loss_name, 'NFCHOA_R006', pos_idx)
    f2, ax_p, ax_y = plot_locaz(wrap(pred_R006[k[m_idx]][pos_idx]), y_R006[k[m_idx]][pos_idx], l=True)
    #f.suptitle(title, fontsize='x-large')
    lines = (ax_p.get_children()[:1][0],ax_y.get_children()[:1][0],ax_p.get_children()[:][1],ax_p.get_children()[:][2],ax_p.get_children()[:][4])
    #f.legend(lines, ('predictions', 'human / gt', 'mean over subjects/repititions at 0° head rotation', '± 180°','± 180°'))
    f1.set_size_inches(10.5, 5)
    f1.savefig('f1.png', dpi=200)
    f2.set_size_inches(10.5, 5)
    f2.savefig('f2.png', dpi=200)Importing librariesimport pandas as pd
    import numpy as np
    import matplotlib.pyplot as plt
    import seaborn as sns
    %matplotlib inlineImporting Dataurl='https://raw.githubusercontent.com/AdiPersonalWorks/Random/master/student_scores%20-%20student_scores.csv'
    data=pd.read_csv(url)
    data.head()
    data.shape
    data.describe()
    sns.regplot(x='Hours',y='Scores',data=data)
    plt.show()Predictive Modelling- Linear Regressiondata_train=data.iloc[:20]
    data_test=data.iloc[20:]
    x_train=data_train.drop(['Scores'],axis=1)
    y_train=data_train['Scores']
    x_test=data_test.drop(['Scores'],axis=1)
    from sklearn.linear_model import LinearRegression
    lreg=LinearRegression()
    lreg.fit(x_train,y_train)
    lreg.score(x_train,y_train)
    pred=lreg.predict(x_test)
    df1=pd.DataFrame({'Actual':data_test['Scores'],'Predicted':pred})
    df1Final Predictiondict1={'Hours':[9.25]}
    df=pd.DataFrame(dict1)
    df['Scores']=lreg.predict(df)
    df
    print('Hence, the score if student studies for 9.25 hours is: {}'.format(list(df['Scores'])[0]))Hence, the score if student studies for 9.25 hours is: 91.09485441683266Evaluating the modeldef rmse(y,y1):
        add=(y-y1)**2
        final=np.sqrt(np.mean(add))
        return final
    print('Therefore, the root mean squared error is {}'.format(rmse(data_test['Scores'],pred)))Therefore, the root mean squared error is 5.931635159442725# Python ≥3.5 is required
    import sys
    assert sys.version_info >= (3, 5)
    
    # Scikit-Learn ≥0.20 is required
    import sklearn
    assert sklearn.__version__ >= "0.20"
    
    try:
        # %tensorflow_version only exists in Colab.
        %tensorflow_version 2.x
        IS_COLAB = True
    except Exception:
        IS_COLAB = False
    
    # TensorFlow ≥2.0 is required
    import tensorflow as tf
    from tensorflow import keras
    assert tf.__version__ >= "2.0"
    
    if not tf.config.list_physical_devices('GPU'):
        print("No GPU was detected. CNNs can be very slow without a GPU.")
        if IS_COLAB:
            print("Go to Runtime > Change runtime and select a GPU hardware accelerator.")
    
    # Common imports
    import numpy as np
    import os
    
    # to make this notebook's output stable across runs
    np.random.seed(42)
    tf.random.set_seed(42)
    
    # To plot pretty figures
    %matplotlib inline
    import matplotlib as mpl
    import matplotlib.pyplot as plt
    mpl.rc('axes', labelsize=14)
    mpl.rc('xtick', labelsize=12)
    mpl.rc('ytick', labelsize=12)
    
    # Where to save the figures
    PROJECT_ROOT_DIR = "."
    CHAPTER_ID = "cnn"
    IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID)
    os.makedirs(IMAGES_PATH, exist_ok=True)
    
    def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
        path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
        print("Saving figure", fig_id)
        if tight_layout:
            plt.tight_layout()
        plt.savefig(path, format=fig_extension, dpi=resolution)9. High Accuracy CNN for MNIST_Exercise: Build your own CNN from scratch and try to achieve the highest possible accuracy on MNIST._ The following model uses 2 convolutional layers, followed by 1 pooling layer, then dropout 25%, then a dense layer, another dropout layer but with 50% dropout, and finally the output layer. It reaches about 99.2% accuracy on the test set. This places this model roughly in the top 20% in the [MNIST Kaggle competition](https://www.kaggle.com/c/digit-recognizer/) (if we ignore the models with an accuracy greater than 99.79% which were most likely trained on the test set, as explained by  in [this post](https://www.kaggle.com/c/digit-recognizer/discussion/61480)). Can you do better? To reach 99.5 to 99.7% accuracy on the test set, you need to add image augmentation, batch norm, use a learning schedule such as 1-cycle, and possibly create an ensemble.(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.mnist.load_data()
    X_train_full = X_train_full / 255.
    X_test = X_test / 255.
    X_train, X_valid = X_train_full[:-5000], X_train_full[-5000:]
    y_train, y_valid = y_train_full[:-5000], y_train_full[-5000:]
    
    X_train = X_train[..., np.newaxis]
    X_valid = X_valid[..., np.newaxis]
    X_test = X_test[..., np.newaxis]
    X_train.shape
    X_train[0].shape
    keras.backend.clear_session()
    tf.random.set_seed(42)
    np.random.seed(42)
    
    model = keras.models.Sequential([
        keras.layers.Conv2D(32, kernel_size=3, padding="same", activation="relu"),
        keras.layers.Conv2D(64, kernel_size=3, padding="same", activation="relu"),
        keras.layers.MaxPool2D(),
        keras.layers.Flatten(),
        keras.layers.Dropout(0.25),
        keras.layers.Dense(128, activation="relu"),
        keras.layers.Dropout(0.5),
        keras.layers.Dense(10, activation="softmax")
    ])
    model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam",
                  metrics=["accuracy"])
    
    model.fit(X_train, y_train, epochs=10, validation_data=(X_valid, y_valid))
    model.evaluate(X_test, y_test)Epoch 1/10
    1719/1719 [==============================] - 16s 5ms/step - loss: 0.3717 - accuracy: 0.8833 - val_loss: 0.0489 - val_accuracy: 0.9854
    Epoch 2/10
    1719/1719 [==============================] - 8s 5ms/step - loss: 0.0822 - accuracy: 0.9754 - val_loss: 0.0422 - val_accuracy: 0.9884
    Epoch 3/10
    1719/1719 [==============================] - 8s 5ms/step - loss: 0.0566 - accuracy: 0.9825 - val_loss: 0.0366 - val_accuracy: 0.9906
    Epoch 4/10
    1719/1719 [==============================] - 8s 5ms/step - loss: 0.0503 - accuracy: 0.9843 - val_loss: 0.0365 - val_accuracy: 0.9906
    Epoch 5/10
    1719/1719 [==============================] - 9s 5ms/step - loss: 0.0413 - accuracy: 0.9864 - val_loss: 0.0321 - val_accuracy: 0.9920
    Epoch 6/10
    1719/1719 [==============================] - 8s 5ms/step - loss: 0.0347 - accuracy: 0.9889 - val_loss: 0.0379 - val_accuracy: 0.9906
    Epoch 7/10
    1719/1719 [==============================] - 9s 5ms/step - loss: 0.0323 - accuracy: 0.9895 - val_loss: 0.0381 - val_accuracy[...]- Layer 1(Conv2D(32):  * Output Size: 28x28 image + 3x3 kernal + 1 stride = 28+3+1 = 32 => (28,28,32)  * number of parameters: 3 * 3 * 1 + 1 * 32 = 320- Layer 2(Conv2D(64):  * Output Size: 28x28 image + 3x3 kernal + 1 stride = (28+3+1)*2 = 64 => (28,28,64)  * number of parameters: 3 * 3 * 32 + 1 * 64 = 18496- Layer 3(MaxPool2D):  * Output Size: (14,14,64), dimensionality is reduced because of pooling  * number of parameters: 0- Layer 4(Flatten):  * Output Size: 14 * 14 * 64 = 12544  * number of parameters: 0- Layer 5(Dropout(0.25):  * Output Size: 12544, same of previous layer  * number of parameters: 0- Layer 6(Dense(128):  * Output Size: 64 + 64 = 128  * number of parameters: 128 * 12544 + 128 = 1605760- Layer 7(Dropout(0.5):  * Output Size: 128, same as previous layer  * number of parameters: 0- Layer 8(Dense(10):  * Output Size: 10, desired output size since there are 10 classes  * number of parameters: 128 * (3 * 3 + 1) = 1290Piecewise Hazard Model> How to estimate the Piecewise Hazard estimator.In this module we assume that during a given time period the hazard is constant.# export
    import matplotlib.pyplot as plt
    import numpy as np
    import torch
    import torch.nn as nn
    from sklearn.preprocessing import MaxAbsScaler
    
    torch.Tensor.ndim = property(lambda x: x.dim())
    # hide
    %load_ext autoreload
    %autoreload 2
    %matplotlib inline
    # export
    class PieceWiseHazard(nn.Module):
        """
        Piecewise Hazard where the hazard is constant between breakpoints.
        parameters:
        - breakpoints: time points where hazard would change (must include 0 and max possible time)
        """
        def __init__(self, breakpoints:np.array, t_scaler:MaxAbsScaler, **kwargs):
            super().__init__()
            self.t_scaler = t_scaler
            if len(breakpoints.shape) == 1:
                breakpoints = self.t_scaler.transform(breakpoints[:,None])
            else:
                breakpoints = self.t_scaler.transform(breakpoints)
            self.logλ = nn.Parameter(torch.randn(len(breakpoints)-1, 1))
            self.register_buffer('breakpoints', torch.Tensor(breakpoints[:-1]))
            self.register_buffer('widths', torch.Tensor(np.diff(breakpoints, axis=0)))
            self.prepend_zero = nn.ConstantPad2d((0,0,1,0), 0)
            
        def cumulative_hazard(self, t, t_section):
            """
            Integral of hazard wrt time.
            """
            λ = torch.exp(self.logλ)
    
            # cumulative hazard 
            cum_hazard = λ * self.widths
            cum_hazard = cum_hazard.cumsum(0)
            cum_hazard = self.prepend_zero(cum_hazard)
            cum_hazard_sec = cum_hazard[t_section]
            
            δ_t = t - self.breakpoints[t_section]
            
            return cum_hazard_sec + λ[t_section] * δ_t
            
        def forward(self, t, t_section, *args):
            return self.logλ[t_section], self.cumulative_hazard(t, t_section)
        
        def survival_function(self, t:np.array):
            """
            parameters:
            - t: time (do not scale to be between 0 and 1)
            """
            if len(t.shape) == 1:
                t = t[:,None]
            t = self.t_scaler.transform(t)
                
            with torch.no_grad():
                # get the times and time sections for survival function
                breakpoints = self.breakpoints[1:].cpu().numpy()
                t_sec_query = np.searchsorted(breakpoints.squeeze(), t.squeeze())
                # convert to pytorch tensors
                t_query = torch.Tensor(t)
                t_sec_query = torch.LongTensor(t_sec_query)
    
                # calculate cumulative hazard according to above
                Λ = self.cumulative_hazard(t_query, t_sec_query)
                return torch.exp(-Λ)
            
        def hazard(self):
            with torch.no_grad():
                width = self.widths
                breakpoints = self.breakpoints
                λ = torch.exp(self.logλ)
                return (self.t_scaler.inverse_transform(breakpoints).squeeze(), 
                        self.t_scaler.inverse_transform(width).squeeze(), 
                        λ.squeeze())
                
        def plot_survival_function(self, t):
            s = self.survival_function(t)
            # plot
            plt.figure(figsize=(12,5))
            plt.plot(t, s)
            plt.xlabel('Time')
            plt.ylabel('Survival Probability')
            plt.show()
            
        def plot_hazard(self):
            """
            Plot base hazard
            """
            breakpoints, width, λ = self.hazard()
            # plot
            plt.figure(figsize=(12,5))
            plt.bar(breakpoints, λ, width, align='edge')
            plt.ylabel('λ')
            plt.xlabel('t')
            plt.show()
    # hide
    from torchlife.data import create_db
    import pandas as pd
    
    url = "https://raw.githubusercontent.com/CamDavidsonPilon/lifelines/master/lifelines/datasets/rossi.csv"
    df = pd.read_csv(url)
    df.head()
    # hide
    from fastai.basics import Learner
    from torchlife.losses import hazard_loss
    from torchlife.data import get_breakpoints, create_db
    
    df.rename(columns={'week':'t', 'arrest':'e'}, inplace=True)
    
    # event_times = df.loc[df['e']==1, 't'].values
    # breakpoints = np.percentile(event_times, [20, 40, 60, 80])
    # print(breakpoints)
    breakpoints = get_breakpoints(df)
    db, t_scaler, _ = create_db(df, breakpoints)
    
    model = PieceWiseHazard(breakpoints, t_scaler)
    learner = Learner(db, model, loss_func=hazard_loss)
    learner.lr_find()
    learner.recorder.plot()
    # hide
    epochs = 20
    learner.fit(epochs, lr=0.5)Plot Hazard Functionslearner.model.plot_hazard()
    learner.model.plot_survival_function(np.arange(df['t'].max()))
    # hide
    from nbdev.export import *
    notebook2script()Converted 00_index.ipynb.
    Converted 10_SAT.ipynb.
    Converted 20_KaplanMeier.ipynb.
    Converted 50_hazard.ipynb.
    Converted 55_hazard.PiecewiseHazard.ipynb.
    Converted 59_hazard.Cox.ipynb.
    Converted 60_AFT_models.ipynb.
    Converted 65_AFT_error_distributions.ipynb.
    Converted 80_data.ipynb.
    Converted 90_model.ipynb.
    Converted 95_Losses.ipynb.Project: **Finding Lane Lines on the Road** ***Develop a pipeline to identify lane lines on the road. You must apply it on a series of individual images, provided in the *test_images* folder. Once you have a result that looks roughly like the image *line-segments-example* in the examples folder (also shown below), you'll need to try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines.   **The tools you have are color selection, region of interest selection, grayscaling, Gaussian smoothing, Canny Edge Detection and Hough Tranform line detection.  You  are also free to explore and try other techniques that were not presented.  Your goal is piece together a pipeline to detect the line segments in the image, then average/extrapolate them and draw them onto the image for display (as below).**---      Your output should look something like this (above) after detecting line segments using the helper functions below           Your goal is to connect/average/extrapolate line segments to get output like this    Import Packages#importing some useful packages
    import matplotlib.pyplot as plt
    import numpy as np
    import cv2
    %matplotlib inlineRead in an Image#reading in an image
    image = cv2.imread('test_images/solidWhiteRight.jpg')
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    
    #printing out some stats and plotting
    print('This image is:', type(image), 'with dimensions:', image.shape)
    plt.imshow(image)  # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')This image is:  with dimensions: (540, 960, 3)Ideas for Lane Detection Pipeline **Some OpenCV functions that might be useful for this project are:**`cv2.inRange()` for color selection  `cv2.fillPoly()` for regions selection  `cv2.line()` to draw lines on an image given endpoints  `cv2.addWeighted()` to coadd / overlay two images`cv2.cvtColor()` to grayscale or change color`cv2.imwrite()` to output images to file  `cv2.bitwise_and()` to apply a mask to an image  Helper Functions Below are some helper functions to help get you started.import math
    
    def grayscale(img):
        """Applies the Grayscale transform
        This will return an image with only one color channel
        but NOTE: to see the returned image as grayscale
        (assuming your grayscaled image is called 'gray')
        you should call plt.imshow(gray, cmap='gray')"""
        return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        # Or use BGR2GRAY if you read an image with cv2.imread()
        # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        
    def canny(img, low_threshold, high_threshold):
        """Applies the Canny transform"""
        return cv2.Canny(img, low_threshold, high_threshold)
    
    def gaussian_blur(img, kernel_size):
        """Applies a Gaussian Noise kernel"""
        return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
    
    def region_of_interest(img, vertices):
        """
        Applies an image mask.
        
        Only keeps the region of the image defined by the polygon
        formed from `vertices`. The rest of the image is set to black.
        `vertices` should be a numpy array of integer points.
        """
        #defining a blank mask to start with
        mask = np.zeros_like(img)   
        
        #defining a 3 channel or 1 channel color to fill the mask with depending on the input image
        if len(img.shape) > 2:
            channel_count = img.shape[2]  # i.e. 3 or 4 depending on your image
            ignore_mask_color = (255,) * channel_count
        else:
            ignore_mask_color = 255
            
        #filling pixels inside the polygon defined by "vertices" with the fill color    
        cv2.fillPoly(mask, vertices, ignore_mask_color)
        
        #returning the image only where mask pixels are nonzero
        masked_image = cv2.bitwise_and(img, mask)
        return masked_image
    
    
    def draw_lines(img, lines, color=[255, 0, 0], thickness=2):
        """
        This function draws `lines` with `color` and `thickness`.    
        Lines are drawn on the image inplace (mutates the image).
        If you want to make the lines semi-transparent, think about combining
        this function with the weighted_img() function below
        """
        for line in lines:
            for x1,y1,x2,y2 in line:
                cv2.line(img, (x1, y1), (x2, y2), color, thickness)
    
    def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
        """
        `img` should be the output of a Canny transform.
            
        Returns an image with hough lines drawn.
        """
        lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
        line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
        draw_lines(line_img, lines)
        return line_img, lines
    
    def weighted_img(img, initial_img, alpha=0.8, beta=1., gamma=0.):
        """
        `img` is the output of the hough_lines(), An image with lines drawn on it.
        Should be a blank image (all black) with lines drawn on it.
        
        `initial_img` should be the image before any processing.
        
        The result image is computed as follows:
        
        initial_img * α + img * β + γ
        NOTE: initial_img and img must be the same shape!
        """
        return cv2.addWeighted(initial_img, alpha, img, beta, gamma)
    
    def getKey(item):
        return item[0]Test ImagesBuild your pipeline to work on the images in the directory "test_images"import os
    
    path = "test_images/"
    files = os.listdir(path)
    images = []
    
    f, plots = plt.subplots((len(files)+3-1)//3, 3, figsize=(20,10))
    plots = [plot for sublist in plots for plot in sublist]
    
    for file, plot in zip(files, plots):
        image = cv2.cvtColor(cv2.imread(os.path.join(path, file)), cv2.COLOR_BGR2RGB)
        plot.set_title(file)
        plot.imshow(image)
        images.append((image, file))Build a Lane Finding Pipeline Build the pipeline and run your solution on all test_images.Try tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.# TODO: Build your pipeline that will draw lane lines segments on the test_images
    
    #aplly gray scale to all the images
    images_copy = []
    gray_scale_images = []
    for element in images:
        gray_scale_images.append(grayscale(element[0]))
        images_copy.append(element[0])
    #plt.imshow(gray_scale_images[0], cmap='gray')
    
    #noise elimination with gaussean blur
    blured_images = []
    kernel_size = 7
    for element in gray_scale_images:
        blured_images.append(gaussian_blur(element, kernel_size))
    #plt.imshow(blured_images[0], cmap='gray')
    
    #canny function for borders
    lower_canny = 200
    upper_canny = 240
    canny_images = []
    for element in blured_images:
        canny_images.append(canny(element, lower_canny, upper_canny))
    #plt.imshow(canny_images[0], cmap='gray')
        
    #region of interest
    vertices = np.array([[[150,540], [400,330], [600,330], [850,540]]])
    masked_images = []
    for element in canny_images:
        masked_images.append(region_of_interest(element, vertices))
    #plt.imshow(masked_images[0], cmap='gray')
    
    #draw lines using hough transform
    rho = 3
    theta = np.pi/270
    threshold = 10
    min_line_len = 1
    max_line_gap = 20
    lines = []
         
    hough_images = []
    for element in masked_images:
        #get the lines coordinades and the drawn image
        temp_img, img_lines = hough_lines(element, rho, theta, threshold, min_line_len, max_line_gap)
        hough_images.append(temp_img)
        lines.append(img_lines)
    #plt.imshow(hough_images[0])
    
    #draw the founded boundaries
    weighted_images = []
    for original, hough in zip(images_copy, hough_images):
        weighted_images.append(weighted_img(hough, original))
    plt.imshow(weighted_images[0])Improve the draw_lines() function**At this point, you should have the Hough line segments drawn onto the road. Extend your code to define a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. Try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. The output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest.**# TODO: Build your pipeline that will draw complete lane lines on the test_images
    import operator
    
    coordinates_per_image = []
    images_left = [] #left coordinates
    images_right = [] #right coordinates
    for i in range(len(lines)):
        images_right.append([])
        images_left.append([])
    
    #separate left and right coordinates
    index = 0
    for element in lines:
        coordinates_per_image.append(element)
        for e in element:
            for x1,y1,x2,y2 in e:
                if x1 < 500:
                    images_left[index].append((x1,y1))
                else:
                    images_right[index].append((x1,y1))
                if x2 < 500:
                    images_left[index].append((x2,y2))
                else:
                    images_right[index].append((x2,y2))
        index += 1
    
    #order the list
    print(type(images_left[0]))
    print(images_left[0][0])
    
    #images_left.sort(key=itemgetter(1))
    #images_right.sort(key = sortSecond)
    images_left.sort(key = operator.itemgetter(0))
    print(len(images_left[0]))
    print(images_left[0])
    
    
    """
    for index in range(len(images_left[0])):
        if index < len(images_left[0]):
            full_line = cv2.line(images_copy[0], (images_left[0][index][0],images_left[0][index][1]),
                                    (images_left[0][index+1][0], images_left[0][index+1][1]),  color=[255, 0, 0], thickness=5)
            
    """
    (293, 462)
    60
    [(165, 535), (284, 447), (181, 537), (396, 373), (298, 437), (391, 372), (393, 369), (432, 342), (369, 386), (389, 373), (282, 450), (287, 445), (166, 534), (204, 505), (250, 484), (255, 479), (294, 440), (300, 437), (283, 458), (305, 442), (256, 469), (271, 457), (343, 412), (352, 406), (276, 464), (292, 442), (401, 364), (412, 357), (357, 402), (365, 389), (307, 432), (310, 429), (326, 425), (330, 423), (334, 419), (390, 377), (289, 445), (293, 441), (262, 474), (279, 451), (290, 443), (296, 440), (392, 370), (400, 365), (263, 474), (267, 470), (161, 537), (177, 525), (259, 477), (277, 453), (211, 500), (235, 484), (190, 531), (237, 493), (289, 444), (293, 442), (335, 412), (362, 391), (279, 461), (326, 426)]Test rechunked CONUS404import fsspec
    import xarray as xr
    import hvplot.xarray
    import intake
    import os
    import warnings
    warnings.filterwarnings('ignore')Open dataset from Intake Catalog* Automatically select on-prem dataset from /caldera if running on prem (Denali/Tallgrass)* Automatically select cloud data on S3 if not running on prem To test whether we are on-prem, we see if SLURM_CLUSTER_NAME is defined.  If SLURM_CLUSTER_NAME is not defined, the user is either not on Denali/Tallgrass on the main node, which they should not be onurl = 'https://raw.githubusercontent.com/nhm-usgs/data-pipeline-helpers/main/conus404/conus404_intake.yml'
    cat = intake.open_catalog(url)
    list(cat)
    if 'SLURM_CLUSTER_NAME' in os.environ:
        ds = cat['conus404-2017-onprem'].to_dask()
    else:
        ds = cat['conus404-2017-cloud'].to_dask()
    ds
    ds.SNOWLoad the full domain at a specific time step%%time
    da = ds.SNOW.sel(time='2017-03-01 00:00').load()
    da.hvplot.quadmesh(x='lon', y='lat', rasterize=True, 
                                 geo=True, tiles='OSM', alpha=0.7, cmap='turbo')Load the full time series at a specific grid cell%%time
    da = ds.T2.isel(south_north=600,west_east=600).load()
    da.hvplot(x='time', grid=True)pomegranate / hmmlearn comparisonhmmlearn is a Python module for hidden markov models with a scikit-learn like API. It was originally present in scikit-learn until its removal due to structural learning not meshing well with the API of many other classical machine learning algorithms. Here is a table highlighting some of the similarities and differences between the two packages.FeaturepomegranatehmmlearnGraph StructureSilent States&10003;Optional Explicit End State&10003;Sparse Implementation&10003;Arbitrary Emissions Allowed on States&10003;Discrete/Gaussian/GMM Emissions&10003;&10003;Large Library of Other Emissions&10003;Build Model from Matrices&10003;&10003;Build Model Node-by-Node&10003;Serialize to JSON&10003;Serialize using Pickle/Joblib&10003;AlgorithmsPriors&10003;Sampling&10003;&10003;Log Probability Scoring&10003;&10003;Forward-Backward Emissions&10003;&10003;Forward-Backward Transitions&10003;Viterbi Decoding&10003;&10003;MAP Decoding&10003;&10003;Baum-Welch Training&10003;&10003;Viterbi Training&10003;Labeled Training&10003;Tied Emissions&10003;Tied Transitions&10003;Emission Inertia&10003;Transition Inertia&10003;Emission Freezing&10003;&10003;Transition Freezing&10003;&10003;Multi-threaded Training&10003;Coming SoonJust because the two features are implemented doesn't speak to how fast they are. Below we investigate how fast the two packages are in different settings the two have implemented.  Fully Connected Graphs with Multivariate Gaussian EmissionsLets look at the sample scoring method, viterbi, and Baum-Welch training for fully connected graphs with multivariate Gaussian emisisons. A fully connected graph is one where all states have connections to all other states. This is a case which pomegranate is expected to do poorly due to its sparse implementation, and hmmlearn should shine due to its vectorized implementations.%pylab inline
    import hmmlearn, pomegranate, time, seaborn
    from hmmlearn.hmm import *
    from pomegranate import *
    seaborn.set_style('whitegrid')Populating the interactive namespace from numpy and matplotlibBoth hmmlearn and pomegranate are under active development. Here are the current versions of the two packages.print "hmmlearn version {}".format(hmmlearn.__version__)
    print "pomegranate version {}".format(pomegranate.__version__)hmmlearn version 0.2.0
    pomegranate version 0.4.0We first should have a function which will randomly generate transition matrices and emissions for the hidden markov model, and randomly generate sequences which fit the model.def initialize_components(n_components, n_dims, n_seqs):
        """
        Initialize a transition matrix for a model with a fixed number of components,
        for Gaussian emissions with a certain number of dimensions, and a data set
        with a certain number of sequences.
        """
        
        transmat = numpy.abs(numpy.random.randn(n_components, n_components))
        transmat = (transmat.T / transmat.sum( axis=1 )).T
    
        start_probs = numpy.abs( numpy.random.randn(n_components) )
        start_probs /= start_probs.sum()
    
        means = numpy.random.randn(n_components, n_dims)
        covars = numpy.ones((n_components, n_dims))
        
        seqs = numpy.zeros((n_seqs, n_components, n_dims))
        for i in range(n_seqs):
            seqs[i] = means + numpy.random.randn(n_components, n_dims)
            
        return transmat, start_probs, means, covars, seqsLets create the model in hmmlearn. It's fairly straight forward, only some attributes need to be overridden with the known structure and emissions.def hmmlearn_model(transmat, start_probs, means, covars):
        """Return a hmmlearn model."""
    
        model = GaussianHMM(n_components=transmat.shape[0], covariance_type='diag', n_iter=1, tol=1e-8)
        model.startprob_ = start_probs
        model.transmat_ = transmat
        model.means_ = means
        model._covars_ = covars
        return modelNow lets create the model in pomegranate. Also fairly straightforward. The biggest difference is creating explicit distribution objects rather than passing in vectors, and passing everything into a function instead of overriding attributes. This is done because each state in the graph can be a different distribution and many distributions are supported.def pomegranate_model(transmat, start_probs, means, covars):
        """Return a pomegranate model."""
        
        states = [ MultivariateGaussianDistribution( means[i], numpy.eye(means.shape[1]) ) for i in range(transmat.shape[0]) ]
        model = HiddenMarkovModel.from_matrix(transmat, states, start_probs, merge='None')
        return modelLets now compare some algorithm times.def evaluate_models(n_dims, n_seqs):
        hllp, plp = [], []
        hlv, pv = [], []
        hlm, pm = [], []
        hls, ps = [], []
        hlt, pt = [], []
    
        for i in range(10, 112, 10):
            transmat, start_probs, means, covars, seqs = initialize_components(i, n_dims, n_seqs)
            model = hmmlearn_model(transmat, start_probs, means, covars)
    
            tic = time.time()
            for seq in seqs:
                model.score(seq)
            hllp.append( time.time() - tic )
    
            tic = time.time()
            for seq in seqs:
                model.predict(seq)
            hlv.append( time.time() - tic )
    
            tic = time.time()
            for seq in seqs:
                model.predict_proba(seq)
            hlm.append( time.time() - tic )    
            
            tic = time.time()
            model.fit(seqs.reshape(n_seqs*i, n_dims), lengths=[i]*n_seqs)
            hlt.append( time.time() - tic )
    
            model = pomegranate_model(transmat, start_probs, means, covars)
    
            tic = time.time()
            for seq in seqs:
                model.log_probability(seq)
            plp.append( time.time() - tic )
    
            tic = time.time()
            for seq in seqs:
                model.predict(seq)
            pv.append( time.time() - tic )
    
            tic = time.time()
            for seq in seqs:
                model.predict_proba(seq)
            pm.append( time.time() - tic )    
            
            tic = time.time()
            model.fit(seqs, max_iterations=1, verbose=False)
            pt.append( time.time() - tic )
    
        plt.figure( figsize=(12, 8))
        plt.xlabel("# Components", fontsize=12 )
        plt.ylabel("pomegranate is x times faster", fontsize=12 )
        plt.plot( numpy.array(hllp) / numpy.array(plp), label="Log Probability")
        plt.plot( numpy.array(hlv) / numpy.array(pv), label="Viterbi")
        plt.plot( numpy.array(hlm) / numpy.array(pm), label="Maximum A Posteriori")
        plt.plot( numpy.array(hlt) / numpy.array(pt), label="Training")
        plt.xticks( xrange(11), xrange(10, 112, 10), fontsize=12 )
        plt.yticks( fontsize=12 )
        plt.legend( fontsize=12 )
    evaluate_models(10, 50)It looks like in this case pomegranate and hmmlearn are approximately the same for large (>30 components) dense graphs for the forward algorithm (log probability), MAP, and training. However, hmmlearn is significantly faster in terms of calculating the Viterbi path, while pomegranate is faster for smaller (<30 components) graphs.  Sparse Graphs with Multivariate Gaussian Emissionspomegranate is based off of a sparse implementations and so excels in graphs which are sparse. Lets try a model architecture where each hidden state only has transitions to itself and the next state, but running the same algorithms as last time.def initialize_components(n_components, n_dims, n_seqs):
        """
        Initialize a transition matrix for a model with a fixed number of components,
        for Gaussian emissions with a certain number of dimensions, and a data set
        with a certain number of sequences.
        """
        
        transmat = numpy.zeros((n_components, n_components))
        transmat[-1, -1] = 1
        for i in range(n_components-1):
            transmat[i, i] = 1
            transmat[i, i+1] = 1
        transmat[ transmat < 0 ] = 0
        transmat = (transmat.T / transmat.sum( axis=1 )).T
    
        start_probs = numpy.abs( numpy.random.randn(n_components) )
        start_probs /= start_probs.sum()
    
        means = numpy.random.randn(n_components, n_dims)
        covars = numpy.ones((n_components, n_dims))
        
        seqs = numpy.zeros((n_seqs, n_components, n_dims))
        for i in range(n_seqs):
            seqs[i] = means + numpy.random.randn(n_components, n_dims)
            
        return transmat, start_probs, means, covars, seqs
    evaluate_models(10, 50)Sparse Graph with Discrete EmissionsLets also compare MultinomialHMM to a pomegranate HMM with discrete emisisons for completeness.def initialize_components(n_components, n_seqs):
        """
        Initialize a transition matrix for a model with a fixed number of components,
        for Gaussian emissions with a certain number of dimensions, and a data set
        with a certain number of sequences.
        """
        
        transmat = numpy.zeros((n_components, n_components))
        transmat[-1, -1] = 1
        for i in range(n_components-1):
            transmat[i, i] = 1
            transmat[i, i+1] = 1
        transmat[ transmat < 0 ] = 0
        transmat = (transmat.T / transmat.sum( axis=1 )).T
        
        start_probs = numpy.abs( numpy.random.randn(n_components) )
        start_probs /= start_probs.sum()
    
        dists = numpy.abs(numpy.random.randn(n_components, 4))
        dists = (dists.T / dists.T.sum(axis=0)).T
        
        seqs = numpy.random.randint(0, 4, (n_seqs, n_components*2, 1))
        return transmat, start_probs, dists, seqs
    
    def hmmlearn_model(transmat, start_probs, dists):
        """Return a hmmlearn model."""
    
        model = MultinomialHMM(n_components=transmat.shape[0], n_iter=1, tol=1e-8)
        model.startprob_ = start_probs
        model.transmat_ = transmat
        model.emissionprob_ = dists
        return model
    
    def pomegranate_model(transmat, start_probs, dists):
        """Return a pomegranate model."""
        
        states = [ DiscreteDistribution({ 'A': d[0],
                                          'C': d[1],
                                          'G': d[2], 
                                          'T': d[3] }) for d in dists ]
        model = HiddenMarkovModel.from_matrix(transmat, states, start_probs, merge='None')
        return model
    
    def evaluate_models(n_seqs):
        hllp, plp = [], []
        hlv, pv = [], []
        hlm, pm = [], []
        hls, ps = [], []
        hlt, pt = [], []
    
        dna = 'ACGT'
        
        for i in range(10, 112, 10):
            transmat, start_probs, dists, seqs = initialize_components(i, n_seqs)
            model = hmmlearn_model(transmat, start_probs, dists)
    
            tic = time.time()
            for seq in seqs:
                model.score(seq)
            hllp.append( time.time() - tic )
    
            tic = time.time()
            for seq in seqs:
                model.predict(seq)
            hlv.append( time.time() - tic )
    
            tic = time.time()
            for seq in seqs:
                model.predict_proba(seq)
            hlm.append( time.time() - tic )    
            
            tic = time.time()
            model.fit(seqs.reshape(n_seqs*i*2, 1), lengths=[i*2]*n_seqs)
            hlt.append( time.time() - tic )
    
            model = pomegranate_model(transmat, start_probs, dists)
            seqs = [[dna[i] for i in seq] for seq in seqs]
    
            tic = time.time()
            for seq in seqs:
                model.log_probability(seq)
            plp.append( time.time() - tic )
    
            tic = time.time()
            for seq in seqs:
                model.predict(seq)
            pv.append( time.time() - tic )
    
            tic = time.time()
            for seq in seqs:
                model.predict_proba(seq)
            pm.append( time.time() - tic )    
            
            tic = time.time()
            model.fit(seqs, max_iterations=1, verbose=False)
            pt.append( time.time() - tic )
    
        plt.figure( figsize=(12, 8))
        plt.xlabel("# Components", fontsize=12 )
        plt.ylabel("pomegranate is x times faster", fontsize=12 )
        plt.plot( numpy.array(hllp) / numpy.array(plp), label="Log Probability")
        plt.plot( numpy.array(hlv) / numpy.array(pv), label="Viterbi")
        plt.plot( numpy.array(hlm) / numpy.array(pm), label="Maximum A Posteriori")
        plt.plot( numpy.array(hlt) / numpy.array(pt), label="Training")
        plt.xticks( xrange(11), xrange(10, 112, 10), fontsize=12 )
        plt.yticks( fontsize=12 )
        plt.legend( fontsize=12 )
    evaluate_models(50)/home/jmschr/anaconda/lib/python2.7/site-packages/ipykernel/__main__.py:77: DeprecationWarning: converting an array with ndim > 0 to an index will result in an error in the futureIPHAS DR2!mkdir /_tmp/iphas/
    !cd /_tmp/iphas/
    # light version (8 GB):
    !wget -c -r -np -nH --cut-dirs=3 --accept=fits.gz http://www.iphas.org/data/dr2/light/
    # full version (48 GB):
    #!wget -c -r -np -nH --cut-dirs=3 --accept=fits.gz http://www.iphas.org/data/dr2/light/
    !gunzip *.gz
    !python /app/dev/ingest_iphas.py2MASS!python /app/dev/fetch_2mass.py
    !cd /_tmp/twomass
    !gunzip *.gz
    !python /app/dev/ingest_2mass.pyGaia DR2!python /app/dev/fetch_gaia_dr2.py
    !python /app/dev/ingest_gaia_dr2.pyAllWISE!mkdir /_tmp/allwise
    !cd /_tmp/allwise
    !wget https://irsa.ipac.caltech.edu/data/download/wise-allwise/wget_bz2.script
    !chmod 755 wget_bz2.script
    !./wget_bz2.script
    !lbunzip2 /_tmp/allwise/irsa.ipac.caltech.edu/data/download/wise-allwise/*.bz2
    !python /app/dev/ingest_allwise.pyCatWISE# fixme:
    !wget from https://portal.nersc.gov/project/cosmo/data/CatWISE/RFC 2019d!cd /_tmp
    !wget http://astrogeo.org/vlbi/solutions/rfc_2019d/rfc_2019d_cat.txt
    !python /app/dev/ingest_rfc.pyLAMOST DR4_v2 and DR5_v3!wget http://dr4.lamost.org/v2/sas/catalog/dr4_v2.csv.gz -O lamost_dr4_v2.csv.gz
    !wget http://dr5.lamost.org/v3/sas/catalog/dr5_v3.csv.gz -O lamost_dr5_v3.csv.gz
    !gunzip lamost_dr5_v3.csv.gz
    !gunzip lamost_dr4_v2.csv.gzBuild a movie reviewed# Write a program that recomends movies from data that you enter.
    
    # Step 1
    # Each movie consist of a name, a genre, a rating (1 to 5) and the person who reviewed the movie.
    
    # Ideas
    
    # Create a DataFrame
    # Create a database (text file)
    # Create a user interface
    
    
    # Later 
    
    # Collect data from the internetMovieimport pandas as pd
    
    path = "/Users/Pippo/Desktop/python_examples/Movie_recomender/"
    
    
    df = pd.read_excel('movie_recommendations.xlsx')
    df
    
    df.sort_values('Rating', ascending=False).head(1)
    df.sort_values('Rating', ascending=False).head(5)
    
    df.sort_values('Rating', ascending=False, inplace=True)
    print(df.head(5).sample(1))
    
    df.head(3)
    df.head(10)
    # produce clean unique index
    df.sort_values(by=['Name', 'Genre', 'Reviewer'], inplace=True)
    clean = df.drop_duplicates().dropna()
    df
    long = clean.set_index(['Name', 'Genre', 'Reviewer'])
    long
    
    # mean rating per genre
    print(long.unstack(1).mean())
    
    print('Enter a genre (or leave it empty): ', end='')
    genre = input()
    
    if genre:
        g = df[df['Genre'] == genre]
    else: 
        g = df
        
    result = g.sort_values('Rating', ascending=False)
    print(result.head(3).sample(1))
    print("Enter a rating number 1 - 5 (or leave it empty): ", end="")
    rating = float(input())
    
    if rating:
        r = df[df['Rating'] == rating]
    else:
        r = df
        
    print("Enter a reviewer (or leave it empty): ", end="")
    reviewer = input()
    
    if reviewer:
        re = r[r['Reviewer'] == reviewer]
    else:
        re = r
    
    print("Enter a genre (or leave it empty): ", end="")
    genre = input()
    
    if genre:
        g = re[re['Genre'] == genre]
    else:
        g = re
    
    
    if g.shape[0] > 0:
        result = g.sort_values('Rating', ascending=False)
        print(result.head(3).sample(1))
    else:
        print("Sorry, no results!")Enter a rating number 1 - 5 (or leave it empty): 1
    Enter a reviewer (or leave it empty): Drama
    Enter a genre (or leave it empty): Paul
    Sorry, no results!Define path and load files Accross the analysis the following names are used for the 3 screens- ova == Ovariole Number screen- fec == Hippo RNAi EggLaying screen- xRNAi == EggLaying screendata = "../Data"
    resultpath = '../Results'
    PPI_ID = "2018_08"
    G = nx.read_graphml(os.path.join(data, 'PPIs', 'PPI_{}.graphml'.format(PPI_ID)))
    hipo_ova = pd.read_csv(os.path.join(data,'Screen/hipo_ova_clean.csv'))
    xRNAi_fec = pd.read_csv(os.path.join(data,'Screen/xRNAi_fec_clean.csv'))
    hipo_fec = pd.read_csv(os.path.join(data,'Screen/hipo_fec_clean.csv'))
    signaling = pd.read_csv(os.path.join(data,'signaling.csv'))
    connectors= pd.read_csv(os.path.join(resultpath,"ConnectorGeneList_{}.csv".format(PPI_ID)))
    mean_ova_gene = hipo_ova.groupby('FbID', as_index=False).mean()
    # We only consider the sum of eggs layed over 5 days, so we restrict the data to the "Sum"
    mean_fec_gene = hipo_fec[hipo_fec['Condition'] == 'Sum'].reset_index(drop=True).groupby('FbID', as_index=False).mean()
    mean_xRNAi_gene = xRNAi_fec[xRNAi_fec['Condition'] == 'Sum'].reset_index(drop=True).groupby('FbID', as_index=False).mean()
    # Modules computed in the notebook file: Seed-Connector
    ova_module_G = nx.read_graphml(os.path.join(resultpath,'Ova_module_{}.graphml'.format(PPI_ID)))
    fec_module_G = nx.read_graphml(os.path.join(resultpath,'Hpo_EggL_module_{}.graphml'.format(PPI_ID)))
    xRNAi_module_G = nx.read_graphml(os.path.join(resultpath,'EggL_module_{}.graphml'.format(PPI_ID)))
    core_module_G = nx.read_graphml(os.path.join(resultpath,'Core_module_{}.graphml'.format(PPI_ID)))
    ova_module = ova_module_G.nodes()
    fec_module = fec_module_G.nodes()
    xRNAi_module = xRNAi_module_G.nodes()
    core_module = core_module_G.nodes()
    # We redefine our threshold of 2 and 5 for Z score cutoff
    ova_threshold = 2
    eggl_threshold = 5Scatter plots of the 3 screens against each othercommongenes = set.intersection(set(mean_ova_gene['FbID'].unique()), set(mean_fec_gene['FbID'].unique()))
    gray = "#AAAAAA"
    green = "#77cc77"Screen scatter plots V2# Make a temporary dataframe to plot with
    tmpdata = mean_fec_gene[mean_fec_gene['FbID'].isin(commongenes)]
    tmpdata = tmpdata.merge(mean_ova_gene[mean_ova_gene['FbID'].isin(commongenes)], on='FbID')
    tmpdata = tmpdata.merge(mean_xRNAi_gene[mean_xRNAi_gene['FbID'].isin(commongenes)], on='FbID')
    
    tmpdata = tmpdata.rename(columns={"Z_x":"Z_fec", "Z_y":"Z_ova", "Z":"Z_xRNAi"})
    X = "Z_fec"
    Y = "Z_ova"
    tmpdata['Color'] = (tmpdata[Y].abs() > ova_threshold) | (tmpdata[X].abs() > eggl_threshold)
    tmpdata['Type'] = np.where(tmpdata['Color'], "Postive",  "Negative")
    tmpdata['Color'] = np.where(tmpdata['Color'], green,  gray)
    g = sns.JointGrid(x=tmpdata[tmpdata['Type'] == "Negative"][X], y=tmpdata[tmpdata['Type'] == "Negative"][Y], xlim=(-17,10), ylim=(-17,10))
    g = g.plot_joint(sns.scatterplot, color="#AAAAAA")
    g.x = tmpdata[tmpdata['Type'] == "Postive"][X]
    g.y = tmpdata[tmpdata['Type'] == "Postive"][Y]
    g = g.plot_joint(sns.scatterplot, color="#77cc77")
    g.x = tmpdata[X]
    g.y = tmpdata[Y]
    g = g.plot_marginals(sns.distplot, kde=True, color=".5", bins=np.arange(-15,15,1))
    g.savefig(os.path.join(resultpath,"Figures/Scatter_Ova_VS_Fec_new.svg"))
    X = "Z_xRNAi"
    Y = "Z_ova"
    tmpdata['Color'] = (tmpdata[Y].abs() > ova_threshold) | (tmpdata[X].abs() > eggl_threshold)
    tmpdata['Type'] = np.where(tmpdata['Color'], "Postive",  "Negative")
    tmpdata['Color'] = np.where(tmpdata['Color'], green,  gray)
    g = sns.JointGrid(x=tmpdata[tmpdata['Type'] == "Negative"][X], y=tmpdata[tmpdata['Type'] == "Negative"][Y], xlim=(-17,10), ylim=(-17,10))
    g = g.plot_joint(sns.scatterplot, color="#AAAAAA")
    g.x = tmpdata[tmpdata['Type'] == "Postive"][X]
    g.y = tmpdata[tmpdata['Type'] == "Postive"][Y]
    g = g.plot_joint(sns.scatterplot, color="#77cc77")
    g.x = tmpdata[X]
    g.y = tmpdata[Y]
    g = g.plot_marginals(sns.distplot, kde=True, color=".5", bins=np.arange(-15,15,1))
    g.savefig(os.path.join(resultpath,"Figures/Scatter_Ova_VS_xRNAi_new.svg"))
    X = "Z_fec"
    Y = "Z_xRNAi"
    tmpdata['Color'] = (tmpdata[Y].abs() > ova_threshold) | (tmpdata[X].abs() > eggl_threshold)
    tmpdata['Type'] = np.where(tmpdata['Color'], "Postive",  "Negative")
    tmpdata['Color'] = np.where(tmpdata['Color'], green,  gray)
    g = sns.JointGrid(x=tmpdata[tmpdata['Type'] == "Negative"][X], y=tmpdata[tmpdata['Type'] == "Negative"][Y], xlim=(-17,10), ylim=(-17,10))
    g = g.plot_joint(sns.scatterplot, color="#AAAAAA")
    g.x = tmpdata[tmpdata['Type'] == "Postive"][X]
    g.y = tmpdata[tmpdata['Type'] == "Postive"][Y]
    g = g.plot_joint(sns.scatterplot, color="#77cc77")
    g.x = tmpdata[X]
    g.y = tmpdata[Y]
    g = g.plot_marginals(sns.distplot, kde=True, color=".5", bins=np.arange(-15,15,1))
    g.savefig(os.path.join(resultpath,"Figures/Scatter_Fec_VS_xRNAi_new.svg"))Ovariole nb against Egg Laying (Hippo RNAi for both)# # # Set X as Ovariole Number
    X = mean_fec_gene[mean_fec_gene['FbID'].isin(commongenes)]['Z'].values
    # # # Set Y as Hippo RNAi EggLaying
    Y = mean_ova_gene[mean_ova_gene['FbID'].isin(commongenes)]['Z'].values
    # Let's create the color set Gray if under threshold, or blue if over threshold
    c = []
    for i in range(len(X)):
        tmpc = gray
        if np.abs(X[i]) >= eggl_threshold:
            tmpc = green
        if np.abs(Y[i]) >= ova_threshold:
            tmpc = green
        c.append(tmpc)
    fig = plt.figure(figsize=(5,5))
    plt.scatter(X, Y, c=c, s=15)
    plt.plot([-15,15],[2,2],'--', color='red', alpha=0.3)
    plt.plot([-15,15],[-2,-2], '--' ,color='red', alpha=0.3)
    plt.plot([5,5],[-15,15], '--' ,color='darkred', alpha=0.3)
    plt.plot([-5,-5],[-15,15], '--' ,color='darkred', alpha=0.3)
    plt.xlim(-15,15)
    plt.ylim(-15,15)
    plt.xlabel('Hippo RNAi Egg Laying Zscore')
    plt.ylabel('Ovariole Nb Zscore')
    plt.title('Hippo RNAi Egg Laying VS Ovariole number')
    fig.savefig(os.path.join(resultpath,"Figures/Scatter_Ova_VS_Fec.svg"))Hippo RNAi Ovariole nb VS Egg Laying# Set X as Ovariole Number
    X = mean_xRNAi_gene[mean_xRNAi_gene['FbID'].isin(commongenes)]['Z'].values
    # Set Y as Hippo RNAi EggLaying
    Y = mean_ova_gene[mean_ova_gene['FbID'].isin(commongenes)]['Z'].values
    # Let's create the color set Gray if under threshold, or blue if over threshold
    c = []
    for i in range(len(X)):
        tmpc = gray
        if np.abs(X[i]) >= eggl_threshold:
            tmpc = green
        if np.abs(Y[i]) >= ova_threshold:
            tmpc = green
        c.append(tmpc)
    fig = plt.figure(figsize=(5,5))
    plt.scatter(X, Y, c=c, s=15)
    plt.plot([-15,15],[2,2],'--', color='red', alpha=0.3)
    plt.plot([-15,15],[-2,-2], '--' ,color='red', alpha=0.3)
    plt.plot([5,5],[-15,15], '--' ,color='darkred', alpha=0.3)
    plt.plot([-5,-5],[-15,15], '--' ,color='darkred', alpha=0.3)
    plt.xlim(-15,15)
    plt.ylim(-15,15)
    plt.xlabel('Egg Laying Zscore')
    plt.ylabel('Ovariole Nb Zscore')
    plt.title('Egg Laying VS Ovariole number')
    fig.savefig(os.path.join(resultpath,"Figures/Scatter_Ova_VS_xRNAi.svg"))Hippo RNAi Egg Laying VS Egg Laying# Set X as Ovariole Number
    X = mean_fec_gene[mean_fec_gene['FbID'].isin(commongenes)]['Z'].values
    # Set Y as Hippo RNAi EggLaying
    Y = mean_xRNAi_gene[mean_xRNAi_gene['FbID'].isin(commongenes)]['Z'].values
    # Let's create the color set Gray if under threshold, or blue if over threshold
    c = []
    for i in range(len(X)):
        tmpc = gray
        if np.abs(X[i]) >= eggl_threshold:
            tmpc = green
        if np.abs(Y[i]) >= eggl_threshold:
            tmpc = green
        c.append(tmpc)
    fig = plt.figure(figsize=(5,5))
    plt.scatter(X, Y, c=c, s=15)
    plt.plot([-15,15],[5,5], '--', color='darkred', alpha=0.3)
    plt.plot([-15,15],[-5,-5], '--', color='darkred', alpha=0.3)
    plt.plot([5,5],[-15,15], '--', color='darkred', alpha=0.3)
    plt.plot([-5,-5],[-15,15], '--', color='darkred', alpha=0.3)
    plt.xlim(-15,15)
    plt.ylim(-15,15)
    plt.xlabel('Hippo RNAi Egg Laying Zscore')
    plt.ylabel('Egg Laying Zscore')
    plt.title('Hippo RNAi Egg Laying VS Egg Laying')
    fig.savefig(os.path.join(resultpath,"Figures/Scatter_Fec_VS_xRNAi.svg"))Correlation of Ovariole nb and Egg Layingg = sns.jointplot(x="Count_x", y="OvarioleNb", color=gray, data=tmpdata[(tmpdata['Count_x'] > 0) & (tmpdata['OvarioleNb'] > 0)], kind="reg")
    g = g.annotate(stats.pearsonr)
    # sns.regplot(X,Y)
    g.savefig(os.path.join(resultpath,"Figures/Regression_Ova_VS_Fec.svg"))
    g = sns.jointplot(x="Count_x", y="OvarioleNb", color=gray, data=tmpdata[(tmpdata['Count_x'] > 0) & (tmpdata['OvarioleNb'] > 0)], kind="reg")
    g = g.annotate(stats.pearsonr)
    # sns.regplot(X,Y)
    g.savefig(os.path.join(resultpath,"Figures/Regression_Ova_VS_Fec.svg"))/home/lblondel/anaconda3/lib/python3.7/site-packages/scipy/stats/stats.py:1713: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
      return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval
    /home/lblondel/anaconda3/lib/python3.7/site-packages/seaborn/axisgrid.py:1847: UserWarning: JointGrid annotation is deprecated and will be removed in a future release.
      warnings.warn(UserWarning(msg))Looking at the effects of the modules genes on each phenotypeall_modules = [list(core_module), list(ova_module), list(fec_module), list(xRNAi_module)]
    all_modules_G = [core_module_G, ova_module_G, fec_module_G, xRNAi_module_G]
    module_name = ['Core Module', 'HpoRNAi Ova', 'HpoRNAi EggL', 'EggL']
    data = pd.DataFrame()
    
    for module, G, name in zip(all_modules, all_modules_G, module_name):
        tmp = mean_fec_gene[mean_fec_gene['FbID'].isin(module)][['FbID','Z']]
        tmp = tmp.append(mean_xRNAi_gene[mean_xRNAi_gene['FbID'].isin(module)][['FbID','Z']])
        tmp = tmp.append(mean_ova_gene[mean_ova_gene['FbID'].isin(module)][['FbID','Z']])
        
        inscreen = []
        inscreen += ["Hippo RNAi EggLaying"]*len(mean_fec_gene[mean_fec_gene['FbID'].isin(module)])
        inscreen += ["EggLaying"]*len(mean_xRNAi_gene[mean_xRNAi_gene['FbID'].isin(module)])
        inscreen += ["Ovariole nb"]*len(mean_ova_gene[mean_ova_gene['FbID'].isin(module)])
        tmp['Screen'] = inscreen
        
        tmp['Seed'] = tmp['FbID'].apply(lambda node: 'Seed' if 'Seed' in G.nodes[node]['Seed'] else 'Connector')
        
        tmp['Module'] = [name]*len(tmp)
        data = data.append(tmp)
        
    data = data.reset_index(drop=True)Drawing the different modules Z score valuesfig = plt.figure(figsize=(10,12))
    i = 1
    for module in module_name:
        ax = fig.add_subplot(4,1,i)
        sns.swarmplot(data=data[(data['Module'] == module)], y='Screen', x='Z', hue='Seed')    
        plt.plot([-2,-2],[-10,10], 'k--', alpha=0.3)
        plt.plot([-5,-5],[-10,10], 'k--', alpha=0.3)
        plt.plot([2,2],[-10,10], 'k--', alpha=0.3)
        plt.plot([5,5],[-10,10], 'k--', alpha=0.3)
        plt.ylabel("")
        plt.xlabel("Zscore of phenotype")
        plt.title("Effect of {} genes on the different screens".format(module))
        plt.xlim(-17,8)
        i += 1
    fig.savefig(os.path.join(resultpath, "Figures/Zscores_allscreens.svg"))General Screen number reportingsns.set()
    ova_negative = mean_ova_gene[mean_ova_gene['Z'] <= - ova_threshold].shape[0]
    ova_positive = mean_ova_gene[mean_ova_gene['Z'] >= ova_threshold].shape[0]
    ova_noeffect = mean_ova_gene.shape[0] - ova_negative - ova_positive
    assert(ova_noeffect + ova_negative + ova_positive == len(mean_ova_gene))
    fec_negative = mean_fec_gene[mean_fec_gene['Z'] <= - eggl_threshold].shape[0]
    fec_positive = mean_fec_gene[mean_fec_gene['Z'] >= eggl_threshold].shape[0]
    fec_noeffect = mean_fec_gene.shape[0] - fec_negative - fec_positive
    fec_notfound = 12 # Genes not tested because RNAi line not existing
    assert(fec_noeffect + fec_negative + fec_positive == len(mean_fec_gene))
    xRNAi_negative = mean_xRNAi_gene[mean_xRNAi_gene['Z'] <= - eggl_threshold].shape[0]
    xRNAi_positive = mean_xRNAi_gene[mean_xRNAi_gene['Z'] >= eggl_threshold].shape[0]
    xRNAi_noeffect = mean_xRNAi_gene.shape[0] - xRNAi_negative - xRNAi_positive
    assert(xRNAi_noeffect + xRNAi_negative + xRNAi_positive == len(mean_xRNAi_gene))
    neg = np.array([fec_negative, xRNAi_negative, ova_negative])
    pos = np.array([fec_positive, xRNAi_positive, ova_positive])
    noef = np.array([fec_noeffect, xRNAi_noeffect, ova_noeffect])
    notf = np.array([fec_notfound, 0, 0])
    fig = plt.figure(figsize=(4,2))
    width = 0.7
    p1 = plt.bar(np.arange(3), notf, width, color=(0.6,0.6,0.6))
    p2 = plt.bar(np.arange(3), noef, width, color=(0.5,0.5,0.9), bottom=notf)
    p3 = plt.bar(np.arange(3), neg, width,  color=(0.9,0.5,0.5), bottom=notf+noef)
    p4 = plt.bar(np.arange(3), pos, width,  color=green, bottom=notf+noef+neg)
    
    plt.ylabel('Number of genes')
    plt.title('Screens number of genes breakdown')
    plt.xticks(np.arange(3), ('Hippo RNAi Egg Laying', 'Egg Laying', 'Hippo RNAi Ovariole nb'))
    # plt.yticks(np.arange(0, 81, 10))
    plt.legend((p1[0], p2[0], p3[0], p4[0]), ('Not found', 'No effect', 'Negative effect', 'Positive effect'))
    fig.savefig(os.path.join(resultpath, "Figures/ScreenNumber_Breakdown.svg"))Make Tabletable = np.array([notf, noef-1, neg, pos, neg+pos+noef+notf-1])
    rows = ['Not found', 'No effect', 'Negative effect', 'Positive effect', 'Total']
    columns = ['Hippo RNAi Egg Laying', 'Egg Laying', 'Hippo RNAi Ovariole nb']
    colors = [(0.6,0.6,0.6), (0.5,0.5,0.9), (0.9,0.5,0.5), green, (1,1,1)]
    fig = plt.figure(figsize=(9,3))
    ax = fig.add_subplot(111)
    ax.axis('tight')
    ax.axis('off')
    ax.table(cellText=table,
                          rowLabels=rows,
                          rowColours=colors,
                          colLabels=columns,
                          loc='center')
    fig.savefig(os.path.join(resultpath, "Figures/Table_ScreenNumber_Breakdown.svg"))Hippo RNAi Effectfig = plt.figure(figsize=(6,2))
    ax = fig.add_subplot(121)
    sns.boxplot(data=hipo_fec[(hipo_fec['Condition'] == 'Sum')], x='Count', y="Gene", order=['OR', 'Tj>HpoRNAi', 'Tj>HpoRNAi,HpoRNAi'], color='lightgray')
    plt.title("Hippo RNAi Egg Laying")
    # fig.savefig(os.path.join(resultpath, 'Figures/HippoRNAiEffect_hpoEggL.svg'))
    
    ax = fig.add_subplot(122)
    # fig = plt.figure(figsize=(5,3))
    sns.boxplot(data=hipo_ova, x='OvarioleNb', y="Gene", order=['OR', 'Tj>HpoRNAi', 'Tj>HpoRNAi,HpoRNAi'], color='lightgray')
    plt.title("Hippo RNAi Ovariole nb")
    fig.savefig(os.path.join(resultpath, 'Figures/HippoRNAiEffect.svg'))Batch effectsfig = plt.figure(figsize=(5,3))
    sns.violinplot(data=hipo_fec[(hipo_fec['Condition'] == 'Sum') & (hipo_fec['Gene'] == 'Tj>HpoRNAi')], x='Batch', y="Count", color="lightgray")
    plt.title("Hippo RNAi Egg Laying")
    fig.savefig(os.path.join(resultpath, 'Figures/BatchEffect_hpoEggL.svg'))
    
    fig = plt.figure(figsize=(5,3))
    sns.violinplot(data=xRNAi_fec[(xRNAi_fec['Condition'] == 'Sum') & (xRNAi_fec['Gene'] == 'Tj>')], x='Batch', y="Count", color="lightgray")
    plt.title("Egg Laying")
    fig.savefig(os.path.join(resultpath, 'Figures/BatchEffect_EggL.svg'))
    
    fig = plt.figure(figsize=(5,3))
    sns.violinplot(data=hipo_ova[(hipo_ova['Gene'] == 'Tj>HpoRNAi')], x='Batch', y="OvarioleNb", color="lightgray")
    plt.title("Hippo RNAi Ovariole number")
    fig.savefig(os.path.join(resultpath, 'Figures/BatchEffect_hpoova.svg'))/home/lblondel/anaconda3/lib/python3.7/site-packages/scipy/stats/stats.py:1713: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
      return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumvalVacationPy---- Note* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.# Dependencies and Setup
    import matplotlib.pyplot as plt
    import pandas as pd
    import numpy as np
    import requests
    import gmaps
    import os
    import json
    from pprint import pprint
    from census import Census
    import time
    from us import states
    from scipy.stats import linregress
    from matplotlib import pyplot as plt
    
    # Import API key
    from api_keys import g_keyStore Part I results into DataFrame* Load the csv exported in Part I to a DataFrame# Load CSV file generated from WeatherPy Folder
    cities_data_to_load = "../VacationPy/cities.csv"
    cities_data = pd.read_csv(cities_data_to_load)
    cities_data
    
    dropna_cities_data = cities_data.dropna()
    dropna_cities_data.head(20)Humidity Heatmap* Configure gmaps.* Use the Lat and Lng as locations and Humidity as the weight.* Add Heatmap layer to map.# Configure gmaps
    gmaps.configure(api_key=g_key)
    
    # Locations
    locations = dropna_cities_data[["Lat", "Lng"]]
    
    humidity = dropna_cities_data["Humidity"].astype(float)
    # Plot Heatmap
    fig = gmaps.figure()
    # Create heat layer
    heat_layer = gmaps.heatmap_layer(locations, weights=humidity, 
                                     dissipating=False, max_intensity=100,
                                     point_radius=2)
    
    # Add layer
    fig.add_layer(heat_layer)
    
    # Display figure
    figCreate new DataFrame fitting weather criteria* Narrow down the cities to fit weather conditions.* Drop any rows will null values.#Select data with perfect weather
    perfect_weather_df=cities_data.loc[(cities_data["Max Temp"] > 70)  & (cities_data["Max Temp"] < 80) & (cities_data["Cloudiness"] == 0) & (cities_data["Wind Speed"] <15),:]
    
    perfect_weather_dfHotel Map* Store into variable named `hotel_df`.* Add a "Hotel Name" column to the DataFrame.* Set parameters to search for hotels with 5000 meters.* Hit the Google Places API for each city's coordinates.* Store the first Hotel result into the DataFrame.* Plot markers on top of the heatmap.# Create a hotel_df
    hotel_df = perfect_weather_df.loc[:,["City","Country", "Lat", "Lng"]]
    
    # Add a "Hotel Name" column to the DataFrame
    hotel_df["Hotel Name"] = ""
    
    # Display the result
    hotel_df.head()
    base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
    
    params = {"type" : "hotel",
              "keyword" : "",
              "radius" : 5000,
              "key" : g_key}
    for index, row in hotel_df.iterrows():
        # get city name, lat, lnt from df
        lat = row["Lat"]
        lng = row["Lng"]
        city_name = row["City"]
        
        # add keyword to params dict
        params["location"] = f"{lat},{lng}"
    
        # assemble url and make API request
        print(f"Retrieving Results for Index {index}: {city_name}.")
        response = requests.get(base_url, params=params).json()
        
        # extract results
        results = response['results']
        
        # save the hotel name to dataframe
        try:
            print(f"Closest hotel in {city_name} is {results[0]['name']}.")
            hotel_df.loc[index, "Hotel Name"] = results[0]['name']
    
        # if there is no hotel available, show missing field
        except (KeyError, IndexError):
            print("Missing field/result... skipping.")
            
        print("------------")
        
        # Wait 1 sec to make another api request to avoid SSL Error
        time.sleep(1)
    
    # Print end of search once searching is completed
    print("-------End of Search-------")
    # drop all the rows in which any of the column contains null value.
    hotel_df = hotel_df.dropna(how="any")
    
    # Display the hotel dataframe
    hotel_df
    # NOTE: Do not change any of the code in this cell
    
    # Using the template add the hotel marks to the heatmap
    info_box_template = """
    
    Name
    {Hotel Name}
    City
    {City}
    Country
    {Country}
    """ # Store the DataFrame Row # NOTE: be sure to update with your DataFrame name hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()] locations = hotel_df[["Lat", "Lng"]] # Add marker layer and info box content ontop of heat map markers = gmaps.marker_layer(locations, info_box_content = hotel_info) # Add the layer to the map fig.add_layer(markers) # Display Map figRate limit checksIt seems that a substantial number of tweets is missing from our collection. One possible reason for this is that the crawler runs into rate limits and that at such moments Twitter does not return the tweets which would make the total number of tweets exceed the allowed number within the current time. Twitter's manual states that this behavior is possible but the exact limit is not mentioned: https://developer.twitter.com/en/docs/tweets/filter-realtime/api-reference/post-statuses-filterWe check the number of tweets collected on Thursday 16 April 2020 by twiqs.nl. This crawler runs four crawlers: track (Dutch tweets), follow (Dutch people), locations (tweets from Dutch-speaking locations) and dialect (tweets written in Dutch dialects). It is possible that the rate limit only affects the total number of tweets collected, so we also examine the sum of the number of collected tweets per time unit (total).In the logs of the track crawler we found ten mentions of the crawler hitting the date limit.from datetime import datetime RATELIMITHITS = ["1586990812990","1587049573673","1587049595569","1587049601794","1587049610358", "1587049620416","1587049624909","1587049632389","1587049646012","1587049659564"] for timestamp in RATELIMITHITS: print(datetime.fromtimestamp(int(timestamp[0:10]))) TRACK = "track" FOLLOW = "follow" LOCATIONS = "locations" DIALECT = "dialect" TOTAL = "total" RATELIMITHIT = "rate limit hit" STREAMS = [TRACK,FOLLOW,LOCATIONS,DIALECT] counts = {} for stream in STREAMS: inFileName = "20200416-"+stream+".txt" inFile = open(inFileName,"r") counts[stream] = {} for line in inFile: count,time = line.strip().split() counts[stream][time] = int(count) inFile.close() counts[TOTAL] = {} for minute in counts[TRACK].keys(): counts[TOTAL][minute] = counts[TRACK][minute] counts[TOTAL][minute] += counts[FOLLOW][minute] if minute in counts[FOLLOW] else 0 counts[TOTAL][minute] += counts[LOCATIONS][minute] if minute in counts[LOCATIONS] else 0 counts[TOTAL][minute] += counts[DIALECT][minute] if minute in counts[DIALECT] else 0 from library import stringArrayToDates import matplotlib import matplotlib.pyplot as plt import matplotlib.dates as mdates font = {"size":12} matplotlib.rc("font",**font) myFmt = mdates.DateFormatter('%H:%M') DATEFORMAT = "%Y%m%d%H%M" plt.figure(figsize=(9,6)) ax1 = plt.subplot(111) ax1.xaxis.set_major_formatter(myFmt) ax1.plot_date(stringArrayToDates(counts[TRACK].keys(),dateFormat=DATEFORMAT),list(counts[TRACK].values()),xdate=True,fmt="-",label=TRACK) ax1.plot_date(stringArrayToDates(counts[FOLLOW].keys(),dateFormat=DATEFORMAT),list(counts[FOLLOW].values()),xdate=True,fmt="-",label=FOLLOW) ax1.plot_date(stringArrayToDates(counts[LOCATIONS].keys(),dateFormat=DATEFORMAT),list(counts[LOCATIONS].values()),xdate=True,fmt="-",label=LOCATIONS) ax1.plot_date(stringArrayToDates(counts[DIALECT].keys(),dateFormat=DATEFORMAT),list(counts[DIALECT].values()),xdate=True,fmt="-",label=DIALECT) ax1.plot_date(stringArrayToDates(counts[TOTAL].keys(),dateFormat=DATEFORMAT),list(counts[TOTAL].values()),xdate=True,fmt="-",label=TOTAL) ax1.legend() for i in range(0,len(RATELIMITHITS)): plt.vlines(datetime.fromtimestamp(int(RATELIMITHITS[i][0:10])),0,max(counts[TOTAL].values()),label=RATELIMITHIT if i == 0 else "") plt.title("number of tweets per minute on Thursday 16 April") ax1.legend() plt.show() def aggregate(countsIn,number): countsOut = {} sortedKeys = sorted(countsIn.keys()) for i in range(0,len(sortedKeys)): countsOut[sortedKeys[i]] = countsIn[sortedKeys[i]] for j in range(i+1,i+number): try: countsOut[sortedKeys[i]] += countsIn[sortedKeys[j]] except: pass return(countsOut) aggregate({"a":1,"b":2,"c":3,"d":4},2) == {'a': 3,'b': 5,'c': 7,'d': 4} MINS = 15 plt.figure(figsize=(9,6)) ax2 = plt.subplot(111) ax2.xaxis.set_major_formatter(myFmt) ax2.plot_date(stringArrayToDates(counts[TRACK].keys(),dateFormat=DATEFORMAT),list(aggregate(counts[TRACK],MINS).values()),xdate=True,fmt="-",label=TRACK) ax2.plot_date(stringArrayToDates(counts[FOLLOW].keys(),dateFormat=DATEFORMAT),list(aggregate(counts[FOLLOW],MINS).values()),xdate=True,fmt="-",label=FOLLOW) ax2.plot_date(stringArrayToDates(counts[LOCATIONS].keys(),dateFormat=DATEFORMAT),list(aggregate(counts[LOCATIONS],MINS).values()),xdate=True,fmt="-",label=LOCATIONS) ax2.plot_date(stringArrayToDates(counts[DIALECT].keys(),dateFormat=DATEFORMAT),list(aggregate(counts[DIALECT],MINS).values()),xdate=True,fmt="-",label=DIALECT) ax2.plot_date(stringArrayToDates(counts[TOTAL].keys(),dateFormat=DATEFORMAT),list(aggregate(counts[TOTAL],MINS).values()),xdate=True,fmt="-",label=TOTAL) for i in range(0,len(RATELIMITHITS)): plt.vlines(datetime.fromtimestamp(int(RATELIMITHITS[i][0:10])),0,max(list(aggregate(counts[TOTAL],MINS).values())),label=RATELIMITHIT if i == 0 else "") plt.title("number of tweets per "+str(MINS)+" minutes on Thursday 16 April") ax2.legend() plt.show()Tutorial II: Linear Trotter steps of diagonal Coulomb operators Electronic structure Hamiltonians with diagonal Coulomb operatorsWhen expressed in an arbitrary basis the molecular electronic structure Hamiltonian takes the form$$H = \sum_{pq} h_{pq} a^\dagger_p a_q + \sum_{pqrs} h_{pqrs} a^\dagger_p a^\dagger_q a_r a_s$$where the coefficients $h_{pq}$ and $h_{pqrs}$ are determined by integrals taken over the basis functions. Note that this Hamiltonian has $O(N^4)$ terms which tends to make its simulation challenging on near-term devices.However, as discussed in [Phys. Rev. X 8, 011044](https://journals.aps.org/prx/abstract/10.1103/PhysRevX.8.011044), by carefully selection of basis function it is possible to obtain a representation that diagonalizes the Coulomb operator, leading to a much simpler Hamiltonian with $O(N^2)$ terms that can be written as$$H = \sum_{pq} T_{pq} a^\dagger_p a_q + \sum_{pq} V_{pq} a^\dagger_p a_p a^\dagger_q a_q$$This form is derived in [Phys. Rev. X 8, 011044](https://journals.aps.org/prx/abstract/10.1103/PhysRevX.8.011044) by using basis functions that are related to a unitary rotation of plane waves. However, plane waves are not the only basis with this property; e.g., see [JCP 147, 244102](https://aip.scitation.org/doi/10.1063/1.5007066) for a basis that provides the diagonal form and high accuracy representation of single-molecules.Being a periodic basis, plane waves are particularly well suited to simulating periodic materials (e.g. solid state LiH instead of single molecule LiH in vacuum). One can use plane waves to also simulate single-molecules with a basis set discretization error that is asymptotically equivalent to Gaussian molecular orbitals; however, in practice for simulating single-molecules one often needs a constant factor more plane waves than Gaussians, and sometimes that constant factor is prohibitive for NISQ applications. In Tutorial III, we discuss how a combination of techniques from Tutorial I and this tutorial enable simulation of arbitrary basis electronic structure in low depth. However, this tutorial will focus on representations of the Hamiltonian with a diagonal Coulomb operator. The techniques discussed in this notebook are applicable to any molecular system, whether periodic or not. However, for simplicity this notebook will focus on the simulation of the uniform electron gas, aka "jellium". Jellium has the same Hamiltonian as an arbitrary molecule but without an external potential (i.e. $T_{pp}$ is uniform for all $p$). Generation of a dual basis jellium HamiltonianWe begin by generating a small two-dimensional jellium model in the "plane wave dual basis" as in [Phys. Rev. X 8, 011044](https://journals.aps.org/prx/abstract/10.1103/PhysRevX.8.011044). Such two-dimensional jellium systems are often studied in the context of the fractional quantum Hall effect.import openfermion # Set parameters of jellium model. wigner_seitz_radius = 5. # Radius per electron in Bohr radii. n_dimensions = 2 # Number of spatial dimensions. grid_length = 2 # Number of grid points in each dimension. spinless = True # Whether to include spin degree of freedom or not. n_electrons = 2 # Number of electrons. # Figure out length scale based on Wigner-Seitz radius and construct a basis grid. length_scale = openfermion.wigner_seitz_length_scale( wigner_seitz_radius, n_electrons, n_dimensions) grid = openfermion.Grid(n_dimensions, grid_length, length_scale) # Initialize the model and print out. fermion_hamiltonian = openfermion.jellium_model(grid, spinless=spinless, plane_wave=False) print(fermion_hamiltonian) # Convert to DiagonalCoulombHamiltonian type. hamiltonian = openfermion.get_diagonal_coulomb_hamiltonian(fermion_hamiltonian)0.1256637061435917 [0^ 0] + -0.07957747154594769 [0^ 0 1^ 1] + -0.07957747154594769 [0^ 0 2^ 2] + -0.23873241463784306 [0^ 0 3^ 3] + -0.06283185307179587 [0^ 1] + -0.06283185307179585 [0^ 2] + -0.06283185307179587 [1^ 0] + 0.1256637061435917 [1^ 1] + -0.07957747154594769 [1^ 1 0^ 0] + -0.23873241463784306 [1^ 1 2^ 2] + -0.07957747154594769 [1^ 1 3^ 3] + -0.06283185307179585 [1^ 3] + -0.06283185307179585 [2^ 0] + 0.1256637061435917 [2^ 2] + -0.07957747154594769 [2^ 2 0^ 0] + -0.23873241463784306 [2^ 2 1^ 1] + -0.07957747154594769 [2^ 2 3^ 3] + -0.06283185307179587 [2^ 3] + -0.06283185307179585 [3^ 1] + -0.06283185307179587 [3^ 2] + 0.1256637061435917 [3^ 3] + -0.23873241463784306 [3^ 3 0^ 0] + -0.07957747154594769 [3^ 3 1^ 1] + -0.07957747154594769 [3^ 3 2^ 2]In the last line above we converted the FermionOperator to a class called DiagonalCoulombHamiltonian which is a special data structure in OpenFermion for representing operators that take the form$$H = \sum_{pq} T_{pq} a^\dagger_p a_q + \sum_{pq} V_{pq} a^\dagger_p a_p a^\dagger_q a_q.$$OpenFermion-Cirq has implemented Hamiltonian simulation algorithms that are optimized specifically for Hamiltonians of this form. They take as input the OpenFermion data structure DiagonalCoulombHamiltonian, which represents such a Hamiltonian in terms of matrices storing $T_{pq}$ and $V_{pq}$. Initializing the mean-field state of jelliumOften one would like to begin a simulation of electronic structure in the mean-field state. To do this while keeping operators in the dual basis one needs to apply a rotation of single particle basis functions (see Tutorial I). For arbitrary molecules this would necessitate first computing the canonical orbitals using a Hartree-Fock calculation, perhaps by using [OpenFermion-PySCF](https://github.com/quantumlib/OpenFermion-PySCF). However, since jellium has no external potential the mean-field state is an eigenstate of the one-body term $\sum_{p, q} T_{pq} a^\dagger_p a_q$. This term is a quadratic Hamiltonian, so its eigenstates can be prepared by applying a Bogoliubov transformation to a computational basis state. The Bogoliubov transformation changes the basis to one in which the quadratic Hamiltonian has the diagonal form $\sum_{p} \varepsilon_p b^\dagger_p b_p$, where the $b^\dagger_p$ are the creation operators for a new set of orbitals. We'll set the number of electrons to be half the total number of orbitals.We'll use the OpenFermion class QuadraticHamiltonian to obtain the Bogoliubov transformation matrix. Then, we'll initialize some qubits and create a circuit that applies the transformation to these qubits. Since our algorithms work with linear qubit connectivity, we'll use the `LineQubit` class. We won't bother compiling to Xmon gates for now to keep the circuits simple, but this can be done automatically using the appropriate Cirq methods. We will specify the initial state by passing in a list of the occupied orbitals (which in this case are just the first `n_electron` orbitals).import cirq import openfermioncirq # Obtain the Bogoliubov transformation matrix. quadratic_hamiltonian = openfermion.QuadraticHamiltonian(hamiltonian.one_body) _, transformation_matrix, _ = quadratic_hamiltonian.diagonalizing_bogoliubov_transform() # Create a circuit that prepares the mean-field state occupied_orbitals = range(n_electrons) n_qubits = openfermion.count_qubits(quadratic_hamiltonian) qubits = cirq.LineQubit.range(n_qubits) state_preparation_circuit = cirq.Circuit( openfermioncirq.bogoliubov_transform( qubits, transformation_matrix, initial_state=occupied_orbitals)) # Print circuit. cirq.DropNegligible().optimize_circuit(state_preparation_circuit) print(state_preparation_circuit)0: ──────────────YXXY─────────────────────────────────── │ 1: ───YXXY───────#2^-0.502───────────────YXXY─────────── │ │ 2: ───#2^0.995───────────────YXXY────────#2^0.005─────── │ 3: ──────────────────────────#2^-0.498──────────────────Hamiltonian simulation via a Trotter-Suzuki product formulaThe goal of Hamiltonian time evolution simulation is to apply the unitary operator $\exp(-i H t)$ for some time t. A simulation via a product formula proceeds by dividing the total evolution time $t$ into a finite number of steps $r$ and performing an approximate simulation of $\exp(-i H t/r)$ $r$ times. Each simulation of $\exp(-i H t/r)$ is called a Trotter step. The unitary $\exp(-i H t/r)$ is approximated by interleaving simulations of the terms $H_j$ of a decomposition $H = \sum_{j=1}^L H_j$. For example, the first-order symmetric, commonly known as the second-order, Trotter formula is$$\exp(-i H t) \approx \prod_{j=1}^L \exp(-i H_j t/2) \prod_{j=L}^1 \exp(-i H_j t/2).$$Higher-order product formulas are obtained from this one via a recursive construction. There is also a zeroth-order formula, which corresponds to$$\exp(-i H t) \approx \prod_{j=1}^L \exp(-i H_j t).$$In our case, the $H_j$ have the form $T_{pq} a^\dagger_p a_q + T_{pq}^* a^\dagger_q a_p$ or $V_{pq} a^\dagger_p a_p a^\dagger_q a_q$.To construct a circuit for performing time evolution via a product formula, we need to specify the total evolution time, the number of steps to use, and the order of the formula to use. For a fixed evolution time, increasing the number of steps and increasing the order of the formula both yield a more accurate simulation at the cost of increasing the gate count of the circuit. We could also specify an asymmetric Trotter step, or a controlled version, but we won't do that here. We will need to specify what algorithm will be used to compile the Trotter step. There are several options appropriate for DiagonalCoulombHamiltonians.A key result of [Phys. Rev. Lett. 120, 110501](https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.120.110501) was to introduce a linear connectivity swap network which cycles through configurations in which all qubits are adjacent in at most $N$ parallel layers of swaps. As shown in that paper, by using fermionc swap gates instead of regular swap gates, one can use this swap network to simulate Trotter steps of an entire DiagonalCoulombHamiltonian. This effectively simulates $N / 2$ different pairs of terms $V_{pq} a^\dagger_p a_p a^\dagger_q a_q$ and $T_{pq} a^\dagger_p a_q + T^*_{pq} a^\dagger_q a_p$ in each layer of gates. By repeating the swap network for $N$ layers, all terms are simulated exactly once. This is referred to as the `LINEAR_SWAP_NETWORK` Trotter step in OpenFermion-Cirq.Another approach to implementing the Trotter step involves simulating all $\sum_{pq} V_{pq} a^\dagger_p a_p a^\dagger_q a_q$ terms (which commute and thus, this involves no Trotter error) by using the linear swap networks (with normal swap gates), and then performing a basis transformation which diagonalizes the one-body terms so that all of the $\sum_{pq} T_{pq} a^\dagger_p a_q$ terms can be simulated at once, and then performing another basis transformation to restore the original basis. This method involves more gates in each Trotter step but has less Trotter error, and thus might require fewer Trotter steps to achive some target accuracy. This is referred to as the `SPLIT_OPERATOR` Trotter step in OpenFermion-Cirq.Thus, there are currently two options for simulating DiagonalCoulombHamiltonians, `LINEAR_SWAP_NETWORK` and `SPLIT_OPERATOR`, and they correspond to different orderings of the terms $H_j$ in the product formula. Different orderings give different results because the $H_j$ do not all commute. Let's construct a circuit with the `LINEAR_SWAP_NETWORK` method using just one first order Trotter step. We'll insert operations into the circuit using the strategy `EARLIEST` so the printed output will be most compact. Still, the circuit will be longer than the width of this notebook, so we'll print it out transposed.from openfermioncirq import trotter # Set algorithm parameters. time = 1.0 n_steps = 1 order = 1 # Construct circuit swap_network_trotter_step = cirq.Circuit( openfermioncirq.simulate_trotter( qubits, hamiltonian, time, n_steps, order, algorithm=trotter.LINEAR_SWAP_NETWORK), strategy=cirq.InsertStrategy.EARLIEST) # Print circuit. cirq.DropNegligible().optimize_circuit(swap_network_trotter_step) print(swap_network_trotter_step.to_text_diagram(transpose=True))0 1 2 3 │ │ │ │ XXYY───────XXYY^-0.02 XXYY───────XXYY^-0.02 │ │ │ │ │ │ │ │ │ │ │ │ @──────────@^0.025 @──────────@^0.025 │ │ │ │ ×ᶠ─────────×ᶠ ×ᶠ─────────×ᶠ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ @──────────@^(1/13) │ │ │ │ │ │ ×ᶠ─────────×ᶠ │ │ │ │ │ XXYY───────XXYY^-0.02 XXYY───────XXYY^-0.02 │ │ │ │ │ │ │ │ │ │ │ │ @──────────@^0.025 @──────────@^0.025 │ │ │ │ ×ᶠ─────────×ᶠ ×ᶠ─────────×ᶠ │ │ │ │ Rz(-0.04π) │ │ Rz(-0.04π) │ │ [...]Now let's do the same, but using the `SPLIT_OPERATOR` method.split_operator_trotter_step = cirq.Circuit( openfermioncirq.simulate_trotter( qubits, hamiltonian, time, n_steps, order, algorithm=trotter.SPLIT_OPERATOR), strategy=cirq.InsertStrategy.EARLIEST) cirq.DropNegligible().optimize_circuit(split_operator_trotter_step) print(split_operator_trotter_step.to_text_diagram(transpose=True))0 1 2 3 │ │ │ │ │ │ │ │ │ │ │ │ │ │ YXXY────────#2^0.5 │ │ │ │ │ │ │ │ │ │ │ │ │ YXXY────────#2^0.608 │ │ │ │ │ │ │ YXXY────────#2^-0.004 │ │ │ │ YXXY───────#2^(2/3) │ │ │ │ │ │ Rz(-π) YXXY────────#2^0.612 │ │ │ │ │ │ │ YXXY────────#2^-0.498 │ │ │ │ Rz(π) Rz(-0.02π) │ │ │ │ │ │ │ │ Rz(-0.02π) Rz(-0.04π) │ │ │ │ │ │ │ │ │ │ │ │ │ │ YXXY────────#2^0.498 │ │ │ │ │[...]Let's run these circuits on the simulator that comes with Cirq and compute the energy of the resulting states.# Initialize Cirq simulator. simulator = cirq.Simulator() # Convert the Hamiltonian to a sparse matrix. hamiltonian_sparse = openfermion.get_sparse_operator(hamiltonian) # Obtain initial state vector as integer. initial_state = sum(2 ** (n_qubits - 1 - i) for i in occupied_orbitals) # Construct and simulate circuit using the swap network method. circuit = state_preparation_circuit + swap_network_trotter_step result = simulator.simulate(circuit, initial_state=initial_state) final_state = result.final_state print('Energy of state obtained with swap network method: {}'.format( openfermion.expectation(hamiltonian_sparse, final_state).real)) # Construct and simulate circuit using the split-operator method. circuit = state_preparation_circuit + split_operator_trotter_step result = simulator.simulate(circuit, initial_state=initial_state) final_state = result.final_state print('Energy of state obtained with split-operator method: {}'.format( openfermion.expectation(hamiltonian_sparse, final_state).real))Energy of state obtained with swap network method: -0.1925748549537973 Energy of state obtained with split-operator method: -0.1925762474271256Increasing the number of Trotter steps will cause both methods to converge to the same operation, corresponding to an exact simulation. You can play around with the number of Trotter steps to confirm. Note that for NISQ applications one will often be interested in using the zeroth-order Trotter step, also known as the first-order asymmetric Trotter step. We can implement these Trotter steps by setting the order to zero, as we do below.# Set algorithm parameters. time = 1.0 n_steps = 1 order = 0 # Construct circuit swap_network_trotter_step = cirq.Circuit( openfermioncirq.simulate_trotter( qubits, hamiltonian, time, n_steps, order, algorithm=trotter.LINEAR_SWAP_NETWORK), strategy=cirq.InsertStrategy.EARLIEST) cirq.DropNegligible().optimize_circuit(swap_network_trotter_step) print(swap_network_trotter_step.to_text_diagram(transpose=True))0 1 2 3 │ │ │ │ XXYY───────XXYY^-0.04 XXYY───────XXYY^-0.04 │ │ │ │ │ │ │ │ │ │ │ │ @──────────@^0.051 @──────────@^0.051 │ │ │ │ ×ᶠ─────────×ᶠ ×ᶠ─────────×ᶠ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ @──────────@^0.152 │ │ │ │ │ │ ×ᶠ─────────×ᶠ │ │ │ │ │ XXYY───────XXYY^-0.04 XXYY───────XXYY^-0.04 │ │ │ │ │ │ │ │ │ │ │ │ @──────────@^0.051 @──────────@^0.051 │ │ │ │ ×ᶠ─────────×ᶠ ×ᶠ─────────×ᶠ │ │ │ │ Rz(-0.04π) │ │ Rz(-0.04π) │ │ [...]Note the unusual pattern of fermionic swap networks towards the end. What is happening there is that in the zeroth order step of a `LINEAR_SWAP_NETWORK` style Trotter step, the qubit order is reversed upon output. To avoid this one needs to set an optional called `omit_final_swaps`, e.g.swap_network_trotter_step = cirq.Circuit( openfermioncirq.simulate_trotter( qubits, hamiltonian, time, n_steps, order, algorithm=trotter.LINEAR_SWAP_NETWORK, omit_final_swaps=True), strategy=cirq.InsertStrategy.EARLIEST) cirq.DropNegligible().optimize_circuit(swap_network_trotter_step) print(swap_network_trotter_step.to_text_diagram(transpose=True))0 1 2 3 │ │ │ │ XXYY───────XXYY^-0.04 XXYY───────XXYY^-0.04 │ │ │ │ │ │ │ │ │ │ │ │ @──────────@^0.051 @──────────@^0.051 │ │ │ │ ×ᶠ─────────×ᶠ ×ᶠ─────────×ᶠ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ @──────────@^0.152 │ │ │ │ │ │ ×ᶠ─────────×ᶠ │ │ │ │ │ XXYY───────XXYY^-0.04 XXYY───────XXYY^-0.04 │ │ │ │ │ │ │ │ │ │ │ │ @──────────@^0.051 @──────────@^0.051 │ │ │ │ ×ᶠ─────────×ᶠ ×ᶠ─────────×ᶠ │ │ │ │ Rz(-0.04π) │ │ Rz(-0.04π) │ │ [...]One can also have fun compiling arbitrary high-order formulas. Here's the third-order symmetric formula:order=3 n_steps=1 swap_network_trotter_step = cirq.Circuit( openfermioncirq.simulate_trotter( qubits, hamiltonian, time, n_steps, order, algorithm=trotter.LINEAR_SWAP_NETWORK), strategy=cirq.InsertStrategy.EARLIEST) cirq.DropNegligible().optimize_circuit(swap_network_trotter_step) print(swap_network_trotter_step.to_text_diagram(transpose=True))0 1 2 3 │ │ │ │ XXYY────────XXYY^-0.003 XXYY────────XXYY^-0.003 │ │ │ │ │ │ │ │ │ │ │ │ @───────────@^0.004 @───────────@^0.004 │ │ │ │ ×ᶠ──────────×ᶠ ×ᶠ──────────×ᶠ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ @───────────@^0.012 │ │ │ │ │ │ ×ᶠ──────────×ᶠ │ │ │ │ │ XXYY────────XXYY^-0.003 XXYY────────XXYY^-0.003 │ │ │ │ │ │ │ │ │ │ │ │ @───────────@^0.004 @───────────@^0.004 │ │ │ │ ×ᶠ──────────×ᶠ ×ᶠ──────────×ᶠ │ │ [...]Application to phase estimationPhase estimation is a procedure that, given access to a controlled unitary and one of its eigenvectors, estimates the phase of the eigenvalue corresponding to that eigenvector. In the context of quantum simulation, this unitary is usually the time evolution operator $e^{-iHt}$. Thus if $H\lvert n\rangle = E_n \lvert n \rangle$, and we initialize the system in state $\lvert n \rangle$, phase estimation would estimate the value $E_n t / (2\pi)$. To avoid aliasing of phases, $t$ should be chosen to be smaller than $2\pi / \lvert E_n \rvert$. The simplest phase estimation circuit measures one bit of the phase in four steps:1. Perform a Hadamard transform on the control qubit.2. Apply the controlled unitary.3. Perform a Hadamard transform on the control qubit.4. Measure the control qubit.Below, we demonstrate the construction of this circuit where the controlled unitary is a controlled Trotter step of our jellium Hamiltonian. This circuit can be used as a building block of a larger phase estimation circuit.# Define a phase estimation circuit. def measure_bit_of_phase(system_qubits, control_qubit, controlled_unitary): yield cirq.H(control_qubit) yield controlled_unitary yield cirq.H(control_qubit) yield cirq.measure(control_qubit) # Get an upper bound on the Hamiltonian norm. import numpy bound = numpy.sum(numpy.abs(hamiltonian.one_body)) + numpy.sum(numpy.abs(hamiltonian.two_body)) # Construct phase estimation circuit. time = 2 * numpy.pi / bound control = cirq.LineQubit(-1) controlled_unitary = openfermioncirq.simulate_trotter( qubits, hamiltonian, time, n_steps=1, order=1, algorithm=trotter.LINEAR_SWAP_NETWORK, control_qubit=control) circuit = cirq.Circuit( measure_bit_of_phase( qubits, control, controlled_unitary)) # Print the circuit. cirq.DropNegligible().optimize_circuit(circuit) print(circuit.to_text_diagram(transpose=True))-1 0 1 2 3 │ │ │ │ │ H │ │ │ │ │ │ │ │ │ @──XXYY─────XXYY^-0.048 │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ @──@────────@^0.061 │ │ │ │ │ │ │ ┌╴│ │ │ │ │ ╶┐ │ │ ×ᶠ───────×ᶠ │ │ │ │ @──┼────────┼───────────XXYY─────XXYY^-0.048 │ └╴│ │ │ │ │ ╶┘ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ @──┼────────┼───────────@────────@^0.061 │ │ │ │ │ │ │ │ ×ᶠ───────×ᶠ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ @──┼────────@─────[...]Part 1import numpy as np import scipy.signal as signal %matplotlib notebook import matplotlib.pyplot as plt R_1 = 20000 R_2 = 10000 C = 20e-12 def diode (x): v_t = 25.9 / 1000 I_s = 0.2 # if (x > 1000000): # return 1000000 return I_s * (np.exp (x / v_t) - 1) def diode_dot (x): v_t = 25.9 / 1000 I_s = 0.2 return I_s / v_t * (np.exp (x / v_t)) n = np.linspace (-.1, .1, 1000) diode_out = np.zeros(1000) for i in range(1000): diode_out[i] = diode (i) plt.figure() plt.plot (n, diode (n)) def deriv (x, x_1, x_1_dot, T): return (x - x_1) * (2/T) - x_1_dot def integral (x_1, x_dot, x_1_dot, T): return x_1 + (T/2) * (x_dot + x_1_dot) fs = 44100 N = 10000 freq = 200 n = np.arange (N) x = np.zeros (N) x[:N] = np.cos (2 * np.pi * n[:N] * freq / fs) # / 10000 plt.figure() plt.plot (x) v_i = 0 # v_i_dot = 0 v_b = 0 v_b_dot = 0 v_c = 0 v_c_dot = 0 y = np.zeros (N) for n in range (N): # print ("{}, {}, {}".format (v_c - v_b, v_c, v_b)) # if (v_c - v_b > 10000): # break # v_c = x[n] * R_2 / R_1 v_b_dot_1 = v_b_dot v_b_dot = (v_c / R_1 / C) + v_c_dot - (1 / C) * diode (v_c - v_b) # + v_c_dot # ((diode (v_b) - diode (v_c - v_b)) / C) + v_c_dot v_b_1 = v_b v_b = integral (v_b_1, v_b_dot, v_b_dot_1, 1.0/fs) # # v_i_1 = v_i v_i = x[n] # # v_i_dot_1 = v_i_dot # # v_i_dot = deriv (v_i, v_i_1, v_i_dot_1, 1.0/fs) v_c_1 = v_c # # v_c = R_2 * (diode (v_b) - v_i/R_1) v_c = R_1 * diode (v_b) - v_i v_c_dot_1 = v_c_dot v_c_dot = deriv (v_c, v_c_1, v_c_dot_1, 1.0/fs) y[n] = v_c # v_c test[n] = (v_b - v_c) plt.figure() plt.plot (y) # plt.figure() # plt.plot (test) # plt.xlim ((0, 2))C:\Users\jatin\Anaconda3\lib\site-packages\ipykernel_launcher.py:20: RuntimeWarning: invalid value encountered in double_scalarsPart 2R_3 = 20000 # input filter b1_i = -R_3 * C b0_i = 0 a1_i = R_3 a0_i = R_3 * R_3 w, H_i = signal.freqs ([b1_i, b0_i], [a1_i, a0_i]) plt.figure() plt.semilogx (w / (2 * np.pi), 20 * np.log10 (np.abs (H_i))) plt.xlim ((20, 20000)) # control filter b1_c = -R_3 * C b0_c = 0 a1_c = R_2 a0_c = R_2 * R_3 w, H_c = signal.freqs ([b1_c, b0_c], [a1_c, a0_c]) plt.figure() plt.semilogx (w / (2 * np.pi), 20 * np.log10 (np.abs (H_c))) plt.xlim ((20, 20000)) b_z_i, a_z_i = signal.bilinear ([b1_i, b0_i], [a1_i, a0_i], fs=fs) b_z_c, a_z_c = signal.bilinear ([b1_c, b0_c], [a1_c, a0_c], fs=fs) z_i = 0.0 z_c = 0.0 y2 = np.zeros(N) for n in range (N): y_i = z_i + y[n] * b_z_i[0] z_i = y[n] * b_z_i[1] - y_i * a_z_i[1] y_c = z_c + y[n] * b_z_c[0] z_c = y[n] * b_z_c[1] - y_c * a_z_c[1] y2[n] = y_i + y_c plt.figure() plt.plot (y2[100:])Stellar ClassificationA classifier which classifies a set of parameters into either of Stars, Galaxies and Quasars. From Sloan Digital Sky Survey DR17Dataset used: Stellar Classification Dataset - SDSS17https://www.kaggle.com/fedesoriano/stellar-classification-dataset-sdss17 ContextIn astronomy, stellar classification is the classification of stars based on their spectral characteristics. The classification scheme of galaxies, quasars, and stars is one of the most fundamental in astronomy. The early cataloguing of stars and their distribution in the sky has led to the understanding that they make up our own galaxy and, following the distinction that Andromeda was a separate galaxy to our own, numerous galaxies began to be surveyed as more powerful telescopes were built. This datasat aims to classificate stars, galaxies, and quasars based on their spectral characteristics. Content of dataThe data consists of 100,000 observations of space taken by the SDSS (Sloan Digital Sky Survey). Every observation is described by 17 feature columns and 1 class column which identifies it to be either a star, galaxy or quasar.1. obj_ID = Object Identifier, the unique value that identifies the object in the image catalog used by the CAS2. alpha = Right Ascension angle (at J2000 epoch)3. delta = Declination angle (at J2000 epoch)4. u = Ultraviolet filter in the photometric system5. g = Green filter in the photometric system6. r = Red filter in the photometric system7. i = Near Infrared filter in the photometric system8. z = Infrared filter in the photometric system9. run_ID = Run Number used to identify the specific scan10. rereun_ID = Rerun Number to specify how the image was processed11. cam_col = Camera column to identify the scanline within the run12. field_ID = Field number to identify each field13. spec_obj_ID = Unique ID used for optical spectroscopic objects (this means that 2 different observations with the same spec_obj_ID must share the output class)14. class = object class (galaxy, star or quasar object)15. redshift = redshift value based on the increase in wavelength16. plate = plate ID, identifies each plate in SDSS17. MJD = Modified Julian Date, used to indicate when a given piece of SDSS data was taken18. fiber_ID = fiber ID that identifies the fiber that pointed the light at the focal plane in each observation Referenceshttps://www.kaggle.com/prasadchaskar/steller-classification-with-97-accuracyhttps://www.kaggle.com/beyzanks/stellar-classification-98-4-acc-100-auc Notebook Step 1Import libraries and the dataset from the csv(comma separated values) file.# Importing standard libraries import pandas as pd import numpy as np import seaborn as sns df = pd.read_csv("star_classification.csv")Step 2Exploring data for the values we have and if we any problems associated with the data.df.head() # The columns in our data df.columns df.info() # Checking if we got any missing values in the data. df.isna().sum() # Based on data only certain columns are significant. Removed columns are the ones displaying IDs, # which are of little to no importance in out process of making a model. stellar_df = df[['alpha', 'delta', 'u', 'g', 'r', 'i', 'z', 'cam_col', 'class', 'redshift', 'plate', 'MJD']] stellar_df.shape stellar_df.describe()Step 3Visualising the data for understanding how it is spread out by using differnt plots using the seaborn library.sns.scatterplot(data=stellar_df, x="alpha", y="redshift", color="orange") sns.histplot(data=stellar_df, x="delta", color="black") sns.histplot(data=stellar_df, x="redshift", color="r")Step 4Making the training and testing data split based on the 'class' column which is to be predicted.stellar_df["class"].value_counts() # Modifying the class column to be a value rather than a string for the model stellar_df["class"] = [0 if i=="STAR" else 1 if i=="QSO" else 2 for i in stellar_df["class"]] # X is the set of features based on which y (the class) is to be predicted/classified. X = stellar_df.drop('class',axis=1) y = stellar_df['class'] print(X.shape) print(y.shape) # Making the test size to 25% of total dataset and setting random state to # a fixed value will guarantee that same sequence of random numbers are generated each time we run the code. from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 7)Step 5Preprocess the data and try different models (Random forest & Support Vector Machine here) while aiming for getting the highest accuracy.from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) # Random forest classifier from sklearn.ensemble import RandomForestClassifier r_forest = RandomForestClassifier() r_forest.fit(X_train,y_train) predicted = r_forest.predict(X_test) score = r_forest.score(X_test, y_test) rf_score = np.mean(score) print(f"Accuracy Achieved by Random forest classifier: {rf_score*100} %") # SVM - Support Vector Machine Classifier from sklearn.svm import SVC from sklearn.pipeline import make_pipeline svem = make_pipeline(StandardScaler(), SVC(gamma='auto')) svem.fit(X_train,y_train) predicted = svem.predict(X_test) score = svem.score(X_test, y_test) svem_score = np.mean(score) print(f"Accuracy Achieved in SVM: {svem_score*100} %") # Neural Network from sklearn.neural_network import MLPClassifier nn = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1, max_iter=1000) nn.fit(X_train, y_train) score = nn.score(X_test, y_test) nn_score = np.mean(score) print(f"Accuracy Achieved in Neural Network: {nn_score*100} %") # Neural Network with different hidden layer size from sklearn.neural_network import MLPClassifier nn2 = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(6, 3), random_state=1, max_iter=1000) nn2.fit(X_train, y_train) score = nn2.score(X_test, y_test) nn2_score = np.mean(score) print(nn2_score*100) model_scores=[rf_score, nn2_score, svem_score] for i in model_scores: print(f"Score: {i}")Score: 0.97916 Score: 0.97156 Score: 0.95876ResultWe find that our Random forest classifier performs the best that is 97.92%print(X_train[19]) class_dict = { 0: "STAR", 1: "QSO", 2: "GALAXY" } prediction = r_forest.predict([X_train[1]]) print(f"Predicted class: {class_dict[prediction[0]]}") # Taking above example and changing values exp_data = [0.5284327 , -1.12519075, 0.19588647, 0.0443259 , 0.28241633, 0.14658281, 0.04755141, 0.40811594, 2.92790288, -0.27501299, 0.11645269] prediction = r_forest.predict([exp_data]) print(f"Predicted class: {class_dict[prediction[0]]}")Predicted class: QSOSave and load modelimport pickle filename = 'rf_model.pkl' with open(filename, 'wb') as file: pickle.dump(r_forest, file) loaded_model = pickle.load(open(filename, 'rb')) result = loaded_model.score(X_test, y_test) print(f"Resultant Accuracy from saved model: {result*100} %") y_pred=loaded_model.predict(X_test) y_predResult VisualizationBy Building a confusion Matrix.from sklearn.metrics import confusion_matrix #Generate the confusion matrix cf_matrix = confusion_matrix(y_test, y_pred) print(cf_matrix) ax = sns.heatmap(cf_matrix, annot=True, cmap='Blues') ax.set_title('Confusion Matrix')CAPSTONE PROJECT:3 - RETAIL Importing Neccessary Python Packages and Libraries.import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt %matplotlib inline import datetime as dt # importing required libraries for clustering import sklearn from sklearn.preprocessing import StandardScaler from sklearn.cluster import KMeans from sklearn.metrics import silhouette_scoreReading and Understanding Datax={1,2,3,4,4} x.update([4,5,6,7]) print(x) print(bool('False')) print(bool) i=0 while i<3: print(i) i+=1 else: print(0) # Reading the data from excel to pandas dataframe on which analysis needs to be done. retail = pd.read_excel('Online_Retail.xlsx') retail.head()Performing a preliminary data inspection and data cleaning.# shape of df retail.shape #showing no. of rows and columns in the dataset. # df info retail.info() #showing no. of rows with non null entries and datatype of each variable with name. # df description retail.describe() #This gives statistical summary of numerical variable by default.Describe function has revealed strange insight with negative values in Quantity variable and UnitPrice variable, which needs further investigation.retail[retail.Quantity<0],retail[retail.Quantity>0]Filtering and comparing data for both negative and non negative values of Quantity, and correlating with information provided we can infer that negative value in Quantity is associated with cancelled order.retail[retail.UnitPrice<0]From above filtering and description it's not clear as to what is 'adjustment of bad debt' and needs consultation from client or domain expertise for it's treatment. Checking for missing data and formulating an apt strategy to treat them.# Calculating the Missing Values % contribution in DF df_null = round(100*(retail.isnull().sum())/len(retail), 5) df_null # Droping rows having missing values retail = retail.dropna() retail.shapeDealing with duplicate records.retail[retail.duplicated()].shape retail.loc[retail.duplicated(keep=False)] retail[['StockCode','Description']][retail[['StockCode','Description']].duplicated(keep=False)] # Dropping duplicate records. retail.drop_duplicates(inplace= True) retail.shapeDescriptive analysis on dataset.retail.describe() #This is giving the statistical summary such as count, mean, min, max and quartiles values. retail[retail.Quantity<0].shape # This shows that 8872 no. of products has been returned or cancelled. retail.CustomerID.nunique() #This shows the total no. of unique customers who are contributing to overall sales. retail.Country.unique(),retail.Country.nunique() #This shows that there are 37 countries with their names contributing to sales. retail.Country.value_counts() # This shows the total no. of product bought for each country for that period, with UK being the highest contributor in sales. retail.Country.value_counts().plot(kind= 'bar') retail.InvoiceNo.nunique() # this shows the total no. of unique transactions for the given period. len(retail.InvoiceNo.value_counts())# this shows the total no. of unique transactions for the given period. len(retail.Description.unique()),retail.Description.nunique() # This shows the no. of distinct products bought by the customer during the given period. sns.boxplot(retail.UnitPrice) retail.UnitPrice.quantile([0,.25,.50,.75,.8,.9,.99,1])#This shows the relative prices of products where price of maximum products lie below 15 sterling #per unit, with max price being 38970. retail.UnitPrice[retail.UnitPrice<25].plot(kind='hist') retail.Description.mode() #Highest selling product. retail.Description.value_counts().head(25) # This is a list of top 25 selling products.Cohort analysis : As per requirement we will build a cohort on the basis of the 1st invoice date of available data in a month and then check for retention metric. Cohort size will be 1 month and we will consider the whole data as range under analysis.retail.info() #Converting Customer ID to object datatype. retail1=retail.copy() retail1['CustomerID']=retail1['CustomerID'].astype(str) retail1.info() # for this analysis we are going to drop negative Quantity orders if not done so it would be calculated in ordered more than once. retail1.drop(retail1[retail1.Quantity<1].index, inplace = True) retail1[retail1.Quantity<1] n_orders = retail1.groupby(['CustomerID'])['InvoiceNo'].nunique() mult_orders_perc = np.sum(n_orders > 1) / retail1['CustomerID'].nunique() print(f'{100 * mult_orders_perc:.2f}% of customers ordered more than once.') # This show that 65% out of all data provided to us in this given period ordered more than once. # We can have a look at the distribution of the number of orders per customer. ax = sns.distplot(n_orders, kde=False, hist=True) ax.set(title='Distribution of number of orders per customer', xlabel='no. of orders', ylabel='no. of customers');We keep only the relevant columns and drop duplicated values — one order (indicated by InvoiceNo) can contain multiple items (indicated by StockCode) for analysisretail1 = retail1[['CustomerID', 'InvoiceNo', 'InvoiceDate']].drop_duplicates() retail1Creating the cohort and order_month variables. The first one indicates the monthly cohort based on the first purchase date (calculated per customer).The latter one is the truncated month of the purchase date.retail1['order_month'] = retail1['InvoiceDate'].dt.to_period('M') retail1['cohort'] = retail1.groupby('CustomerID')['InvoiceDate'] \ .transform('min') \ .dt.to_period('M') retail1.head(25)Aggregating the data per cohort and order_month and count the number of unique customers in each group. Additionally, we add the period_number, which indicates the number of periods between the cohort month and the month of the purchase.df_cohort = retail1.groupby(['cohort', 'order_month']) \ .agg(n_customers=('CustomerID', 'nunique')) \ .reset_index(drop=False) from operator import attrgetter df_cohort['period_number'] = (df_cohort.order_month - df_cohort.cohort).apply(attrgetter('n')) df_cohort.head(14)Now we will pivot the df_cohort table in a way that each row contains information about a given cohort and each column contains values for certain period.cohort_pivot = df_cohort.pivot_table(index = 'cohort', columns = 'period_number', values = 'n_customers')To obtain the retention matrix, we need to divide the values each row by the row's first value, which is actually the cohort size — all customers who made their first purchase in the given month.cohort_size = cohort_pivot.iloc[:,0] retention_matrix = cohort_pivot.divide(cohort_size, axis = 0)We plot the retention matrix as a heatmap. Additionally, we wanted to include extra information regarding the cohort size. That is why we in fact created two heatmaps, where the one indicating the cohort size is using a white only colormap — no coloring at all.import matplotlib.colors as mcolors with sns.axes_style("white"): fig, ax = plt.subplots(1, 2, figsize=(12, 8), sharey=True, gridspec_kw={'width_ratios': [1, 11]}) # retention matrix sns.heatmap(retention_matrix, mask=retention_matrix.isnull(), annot=True, fmt='.0%', cmap='RdYlGn', ax=ax[1]) ax[1].set_title('Monthly Cohorts: User Retention', fontsize=16) ax[1].set(xlabel='no. of periods', ylabel='') # cohort size cohort_size_df = pd.DataFrame(cohort_size).rename(columns={0: 'cohort_size'}) white_cmap = mcolors.ListedColormap(['white']) sns.heatmap(cohort_size_df, annot=True, cbar=False, fmt='g', cmap=white_cmap, ax=ax[0]) fig.tight_layout()After going through the heatmap it can be inferred that the Cohort in 2010-12 had maximum retention with 27% for the 12th period and an average of around 34% throughout the year. Whereas there is sharp decrease in retention rate for the 2nd group at the end period, average retention rate is around 26% throughout the period. Horizontally it shows gradual increase except for final sharp decline. All in all we can say- the cohort in proceeding months are observed to show a gradual decline in retention rate. But the average retention rate for the end period for all cohort is seen to be around 10%, except for the 1st cohort. Data Preparation for RFM analysis. We are going to analyse the Customers based on below 3 factors:- R (Recency): Number of days since last purchase- F (Frequency): Number of transactions- M (Monetary): Total amount of transactions (revenue contributed)# New Attribute : Monetary retail['Amount'] = retail['Quantity']*retail['UnitPrice'] rfm_m = retail.groupby('CustomerID')['Amount'].sum() rfm_m = rfm_m.reset_index() rfm_m.shape rfm_m.head() # New Attribute : Frequency rfm_f = retail.groupby('CustomerID')['InvoiceNo'].count() rfm_f = rfm_f.reset_index() rfm_f.columns = ['CustomerID', 'Frequency'] rfm_f.head() # Merging the two dfs rfm = pd.merge(rfm_m, rfm_f, on='CustomerID', how='inner') rfm.head() # New Attribute : Recency # Convert 'InvoiceDate' to datetime datatype. retail['InvoiceDate'] = pd.to_datetime(retail['InvoiceDate'],format='%d-%m-%Y %H:%M') retail.info() # Compute the maximum date to know the last transaction date max_date = max(retail['InvoiceDate']) max_date # Compute the difference between max date and transaction date retail['Diff'] = max_date - retail['InvoiceDate'] retail # Compute last transaction date to get the recency of customers rfm_p = retail.groupby('CustomerID')['Diff'].min() rfm_p = rfm_p.reset_index() rfm_p.head() # Extract number of days only rfm_p['Diff'] = rfm_p['Diff'].dt.days rfm_p.head() # Merging tha dataframes to get the final RFM dataframe rfm = pd.merge(rfm, rfm_p, on='CustomerID', how='inner') rfm.columns = ['CustomerID', 'Amount', 'Frequency', 'Recency'] rfm.head() x=[] for i in rfm.Amount: if i<=rfm.Amount.quantile(.25): x.append(1) elif i<=rfm.Amount.quantile(0.50): x.append(2) elif i<=rfm.Amount.quantile(0.75): x.append(3) elif i<=rfm.Amount.quantile(1.0): x.append(4) rfm["Amount_star"]=x rfm y=[] for i in rfm.Frequency: if i<=rfm.Frequency.quantile(.25): y.append(1) elif i<=rfm.Frequency.quantile(0.50): y.append(2) elif i<=rfm.Frequency.quantile(0.75): y.append(3) elif i<=rfm.Frequency.quantile(1.0): y.append(4) rfm["Frequency_star"]=y rfm z=[] for i in rfm.Recency: if i<=rfm.Recency.quantile(.25): z.append(4) elif i<=rfm.Recency.quantile(0.50): z.append(3) elif i<=rfm.Recency.quantile(0.75): z.append(2) elif i<=rfm.Recency.quantile(1.0): z.append(1) rfm['Recency_star']=z rfm rfm['rfm_score']=rfm.Amount_star+rfm.Frequency_star+rfm.Recency_star rfm rfm['rfm_score%']=round(((rfm.rfm_score/12)*100),2) #this I have created to have better clarity while analysing. rfm rfm['rfm_segment']=rfm.Amount_star.astype(str) + "-" + rfm.Frequency_star.astype(str) + "-" + rfm.Recency_star.astype(str) rfm rfm['rfm_segment1']=rfm.Amount_star.astype(str) + rfm.Frequency_star.astype(str) + rfm.Recency_star.astype(str) rfm rfm.rfm_segment.value_counts().head(60) # this shows all the unique combination of star ratings. #((rfm.Amount_star== 4).sum()/retail.CustomerID.nunique())*100Inferences from RFM segmentation:->From above rfm_segment we can easily filter and make inferences that any value starting with 4 or 3 (or say at hundreds place) will be a high revenue generating customer, thus they should be made to feel valued customer.->Any customer with rfm_score% of 75 or higher can be considered a high value customer even if they have not made more purchases because they can be approached to gain insight as to what offer lures them to buy more, and thus creating proper strategy to target those group to give offers or discounts.-> Best Customers – This group consists of those customers who are found in M-4, F-4, and R-4 meaning that they transacted recently, do so often and spend more than other customers. A shortened notation for this segment is 4-4-4;->High-spending New Customers – This group consists of those customers in 4-1-1 and 4-2-1. These are customers who transacted only once, but very recently and they spent a lot.->Lowest-Spending Active Loyal Customers – This group consists of those customers in segments 1-1-3 and 1-1-4 (they transacted recently and do so often, but spend the least).->Churned Best Customers – This segment consists of those customers in groups 4-1-1, 4-1-2, 4-2-1 and 4-2-2 (they transacted frequently and spent a lot, but it’s been a long time since they’ve transacted)->Best Customers – Communications with this group should make them feel valued and appreciated. These customers likely generate a disproportionately high percentage of overall revenues and thus focusing on keeping them happy should be a top priority. Further analyzing their individual preferences and affinities will provide additional information.->High-spending New Customers – It is always a good idea to carefully incubate all new customers, but because these new customers spent a lot on their first purchase, it’s even more important. Like with the Best Customers group, it’s important to make them feel valued and appreciated – and to give them terrific incentives to continue interacting with the client.->Lowest-Spending Active Loyal Customers – These repeat customers are active and loyal, but they are low spenders. Marketers should create campaigns for this group that make them feel valued, and incentivize them to increase their spend levels. As loyal customers, it often also pays to reward them with special offers if they spread the word about the brand to their friends, e.g., via social networks.->Churned Best Customers – These are valuable customers who stopped transacting a long time ago. While it’s often challenging to re-engage churned customers, the high value of these customers makes it worthwhile trying. Like with the Best Customers group, it’s important to communicate with them on the basis of their specific preferences, as known from earlier transaction data. Data Preparation for Algorithm. There are 2 types of outliers and we will treat outliers as it can skew our dataset.- Statistical- Domain specific# Outlier Analysis of Amount Frequency and Recency attributes = ['Amount','Frequency','Recency'] plt.rcParams['figure.figsize'] = [10,8] sns.boxplot(data = rfm[attributes], orient="v", palette="Set2" ,whis=1.5,saturation=1, width=0.7) plt.title("Outliers Variable Distribution", fontsize = 14, fontweight = 'bold') plt.ylabel("Range", fontweight = 'bold') plt.xlabel("Attributes", fontweight = 'bold') # Removing (statistical) outliers for Amount Q1 = rfm.Amount.quantile(0.05) Q3 = rfm.Amount.quantile(0.95) IQR = Q3 - Q1 rfm = rfm[(rfm.Amount >= Q1 - 1.5*IQR) & (rfm.Amount <= Q3 + 1.5*IQR)] # Removing (statistical) outliers for Recency Q1 = rfm.Recency.quantile(0.05) Q3 = rfm.Recency.quantile(0.95) IQR = Q3 - Q1 rfm = rfm[(rfm.Recency >= Q1 - 1.5*IQR) & (rfm.Recency <= Q3 + 1.5*IQR)] # Removing (statistical) outliers for Frequency Q1 = rfm.Frequency.quantile(0.05) Q3 = rfm.Frequency.quantile(0.95) IQR = Q3 - Q1 rfm = rfm[(rfm.Frequency >= Q1 - 1.5*IQR) & (rfm.Frequency <= Q3 + 1.5*IQR)]Standardizing the data.It is extremely important to rescale the variables so that they have a comparable scale.|There are two common ways of rescaling:1. Min-Max scaling 2. Standardisation (mean-0, sigma-1) Here, we will use Standardisation Scaling.# Rescaling the attributes rfm_df = rfm[['Amount', 'Frequency', 'Recency']] # Instantiate scaler = StandardScaler() # fit_transform rfm_df_scaled = scaler.fit_transform(rfm_df) rfm_df_scaled.shape rfm_df_scaled = pd.DataFrame(rfm_df_scaled) rfm_df_scaled.columns = ['Amount', 'Frequency', 'Recency'] rfm_df_scaled.head()Building the Model K-Means Clustering K-means clustering is one of the simplest and popular unsupervised machine learning algorithms.The algorithm works as follows:- First we initialize k points, called means, randomly.- We categorize each item to its closest mean and we update the mean’s coordinates, which are the averages of the items categorized in that mean so far.- We repeat the process for a given number of iterations and at the end, we have our clusters.# k-means with some arbitrary k kmeans = KMeans(n_clusters=3, max_iter=50) kmeans.fit(rfm_df_scaled) rfm_df_scaled kmeans.labels_Deciding the optimum number of clusters to be formed. Using Elbow Curve to get the right number of Clusters.A fundamental step for any unsupervised algorithm is to determine the optimal number of clusters into which the data may be clustered. The Elbow Method is one of the most popular methods to determine this optimal value of k.# Elbow-curve/SSD ssd = [] range_n_clusters = [2, 3, 4, 5, 6, 7, 8] for num_clusters in range_n_clusters: kmeans = KMeans(n_clusters=num_clusters, max_iter=50) kmeans.fit(rfm_df_scaled) ssd.append(kmeans.inertia_) # plot the SSDs for each n_clusters plt.plot(range_n_clusters,ssd)Exporting dataframe to excel for data visualization in Tableauimport openpyxl import xlsxwriter error_cluster= pd.DataFrame(list(zip(range_n_clusters,ssd)), columns=["no.of clusters","Error"]) error_cluster.to_excel("error_cluster.xlsx")Silhouette Analysis$$\text{silhouette score}=\frac{p-q}{max(p,q)}$$$p$ is the mean distance to the points in the nearest cluster that the data point is not a part of$q$ is the mean intra-cluster distance to all the points in its own cluster.* The value of the silhouette score range lies between -1 to 1. * A score closer to 1 indicates that the data point is very similar to other data points in the cluster, * A score closer to -1 indicates that the data point is not similar to the data points in its cluster.# Silhouette analysis range_n_clusters = [2, 3, 4, 5, 6, 7, 8] for num_clusters in range_n_clusters: # intialise kmeans kmeans = KMeans(n_clusters=num_clusters, max_iter=50) kmeans.fit(rfm_df_scaled) cluster_labels = kmeans.labels_ # silhouette score silhouette_avg = silhouette_score(rfm_df_scaled, cluster_labels) print("For n_clusters={0}, the silhouette score is {1}".format(num_clusters, silhouette_avg)) # Final model with k=3 kmeans = KMeans(n_clusters=3, max_iter=50) kmeans.fit(rfm_df_scaled) kmeans.labels_ # assign the label rfm['Cluster_Id'] = kmeans.labels_ rfm.head(60) # Box plot to visualize Cluster Id vs Amount sns.boxplot(x='Cluster_Id', y='Amount', data=rfm) # Box plot to visualize Cluster Id vs Frequency sns.boxplot(x='Cluster_Id', y='Frequency', data=rfm) # Box plot to visualize Cluster Id vs Recency sns.boxplot(x='Cluster_Id', y='Recency', data=rfm)Final Analysis Inferences:K-Means Clustering with 3 Cluster Ids- Customers with Cluster Id 1 are frequent buyers, recent buyers and high spender as well, should be considered as the best customer group.- Customers with Cluster Id 0 are the customers with comparatively good monetary value, frequency and receny for all transactions under observation. Thus these customer group can be a target for conversion into high spending cohort.- Customers with Cluster Id 2 are not recent buyers, frequency is also low and hence least of importance from business point of view.rfmExporting files from dataframe to excel for visualization and dashboard building in Tableau.import openpyxl import xlsxwriter rfm.to_excel("rfm.xlsx")XLNet training for Personality Detection Neuroticism* Based on: https://medium.com/swlh/using-xlnet-for-sentiment-classification-cfa948e65e85 Configuración de Bibliotecasimport transformers from transformers import XLNetTokenizer, XLNetModel, AdamW, get_linear_schedule_with_warmup import torch import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from matplotlib import rc from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix, classification_report, accuracy_score from collections import defaultdict from textwrap import wrap from pylab import rcParams from torch import nn, optim from keras.preprocessing.sequence import pad_sequences from torch.utils.data import TensorDataset,RandomSampler,SequentialSampler from torch.utils.data import Dataset, DataLoader import torch.nn.functional as F import re import json import randomObtener los datoswith open("../jsons/twitter/guide.json", "r") as guidejson: json_dic = json.load(guidejson) not_neu_count = json_dic["0"] neu_count = json_dic["1"] zero_neu = [] one_neu = [] for i in range(1,not_neu_count + 1): with open(f'../jsons/twitter/{i}-0.json', "r") as temp_not_neu: temp_json = json.load(temp_not_neu) temp_json["y"] = 0 zero_neu.append(temp_json) for i in range(1,neu_count + 1): with open(f'../jsons/twitter/{i}-1.json', "r") as temp_neu: temp_json = json.load(temp_neu) temp_json["y"] = 1 one_neu.append(temp_json) print(len(zero_neu), len(one_neu))6200 3717Preparar los datos para la XLNetimport random random.seed(1) random.shuffle(zero_neu) random.shuffle(one_neu) train_data = [] test_data = [] train_seg = 0.7 for i in zero_neu[:int(len(zero_neu) * train_seg)]: train_data.append(i) for i in one_neu[:int(len(one_neu) * train_seg)]: train_data.append(i) for i in zero_neu[int(len(zero_neu) * train_seg):]: test_data.append(i) for i in one_neu[int(len(one_neu) * train_seg):]: test_data.append(i) random.shuffle(train_data) random.shuffle(test_data)Load Modelfrom transformers import XLNetForSequenceClassification model = XLNetForSequenceClassification.from_pretrained('xlnet-base-cased', num_labels = 2)Some weights of the model checkpoint at xlnet-base-cased were not used when initializing XLNetForSequenceClassification: ['lm_loss.weight', 'lm_loss.bias'] - This IS expected if you are initializing XLNetForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model). - This IS NOT expected if you are initializing XLNetForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model). Some weights of XLNetForSequenceClassification were not initialized from the model checkpoint at xlnet-base-cased and are newly initialized: ['sequence_summary.summary.weight', 'sequence_summary.summary.bias', 'logits_proj.weight', 'logits_proj.bias'] You should probably TRAIN this model on a down-stream task to be able to use it for predictions a[...]Required functionsdef data_segmenter(elem): input_ids = [] attention_mask = [] input_ids = elem['input_ids'] attention_mask = elem['attention_mask'] y = torch.Tensor([elem["y"]]).long() input_ids = torch.Tensor(input_ids).long() attention_mask = torch.Tensor(attention_mask).long() return input_ids, attention_mask, y ids, am, y = data_segmenter(train_data[0]) print(ids.shape) from sklearn import metrics def train_twitter(input_ids, attention_mask, targets, optimizer, scheduler): acc = 0 outputs = model(input_ids=input_ids, token_type_ids=None, attention_mask=attention_mask, labels = targets) loss = outputs[0] logits = outputs[1] # preds = preds.cpu().detach().numpy() _, prediction = torch.max(outputs[1], dim=1) # print(prediction) targets = targets.cpu().detach().numpy() prediction = prediction.cpu().detach().numpy() accuracy = metrics.accuracy_score(targets, prediction) acc = accuracy loss.backward() nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0) optimizer.step() scheduler.step() optimizer.zero_grad() return acc, loss.item() def eval_model(input_ids, attention_mask, targets): loss = 0 acc = 0 with torch.no_grad(): outputs = model(input_ids=input_ids, token_type_ids=None, attention_mask=attention_mask, labels = targets) loss = outputs[0] logits = outputs[1] _, prediction = torch.max(outputs[1], dim=1) targets = targets.cpu().detach().numpy() prediction = prediction.cpu().detach().numpy() accuracy = metrics.accuracy_score(targets, prediction) acc += accuracy return acc, loss.item()Global VariablesEPOCHS = 10 # BATCH_SIZE = 2 # DATA_SIZE = 512 param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay':0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=3e-5) total_steps = len(train_data) * EPOCHS scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=0, num_training_steps=total_steps ) import timeTrainhistory_train = defaultdict(list) best_accuracy = 0 start_time = time.time() for epoch in range(EPOCHS): train_acc = 0 train_loss = [] it = 0 model = model.train() while it < len(train_data): input_ids, attention_mask, targets = data_segmenter(train_data[it]) temp_acc, temp_loss = train_twitter( input_ids, attention_mask, targets, optimizer, scheduler ) train_acc += temp_acc train_loss.append(temp_loss) it += 1 print(f"[EPOCH {epoch + 1}, BATCH {it} of {len(train_data)}] {targets[0]} ->ACC: {train_acc/it}") with open(f'Results/ACC/acum_acc_train_epoch_{epoch+1}.txt', 'w') as acc_file: acc_file.write(f"[EPOCH {epoch + 1}, BATCH {it} of {len(train_data)}] ->ACC: {train_acc/it}") history_train['train_acc'].append(train_acc / it) history_train['train_loss'].append(np.mean(train_loss)) with open('history_train.json', 'w') as hist_json: json.dump(history_train, hist_json) model.save_pretrained(f'Results/Models/Model-{epoch+1}') if best_accuracy < history_train["train_acc"][-1]: best_accuracy = history_train["train_acc"][-1] print(f'Train loss {history_train["train_loss"][-1]} Train accuracy {history_train["train_acc"][-1]}') print() print("Losses:") print(history_train['train_loss']) print("Accuracy:") print(history_train['train_acc']) print("Best Accuracy:", best_accuracy) total_time = time.time() - start_time with open('train_time.txt', 'w') as time_file: time_file.write(f'Total_time: {total_time}')[EPOCH 1, BATCH 1 of 6941] 1 ->ACC: 1.0 [EPOCH 1, BATCH 2 of 6941] 0 ->ACC: 0.5 [EPOCH 1, BATCH 3 of 6941] 0 ->ACC: 0.3333333333333333 [EPOCH 1, BATCH 4 of 6941] 0 ->ACC: 0.5 [EPOCH 1, BATCH 5 of 6941] 0 ->ACC: 0.4 [EPOCH 1, BATCH 6 of 6941] 0 ->ACC: 0.5 [EPOCH 1, BATCH 7 of 6941] 0 ->ACC: 0.5714285714285714 [EPOCH 1, BATCH 8 of 6941] 0 ->ACC: 0.625 [EPOCH 1, BATCH 9 of 6941] 0 ->ACC: 0.6666666666666666 [EPOCH 1, BATCH 10 of 6941] 1 ->ACC: 0.6 [EPOCH 1, BATCH 11 of 6941] 0 ->ACC: 0.6363636363636364 [EPOCH 1, BATCH 12 of 6941] 1 ->ACC: 0.5833333333333334 [EPOCH 1, BATCH 13 of 6941] 1 ->ACC: 0.5384615384615384 [EPOCH 1, BATCH 14 of 6941] 0 ->ACC: 0.5714285714285714 [EPOCH 1, BATCH 15 of 6941] 0 ->ACC: 0.6 [EPOCH 1, BATCH 16 of 6941] 1 ->ACC: 0.5625 [EPOCH 1, BATCH 17 of 6941] 0 ->ACC: 0.5882352941176471 [EPOCH 1, BATCH 18 of 6941] 0 ->ACC: 0.6111111111111112 [EPOCH 1, BATCH 19 of 6941] 0 ->ACC: 0.631578947368421 [EPOCH 1, BATCH 20 of 6941] 0 ->ACC: 0.65 [EPOCH 1, BATCH 21 of 6941] 0 ->AC[...]Testhistory_test = defaultdict(list) best_accuracy = 0 train_acc = 0 train_loss = [] it = 0 start_time = time.time() for epoch in range(EPOCHS): test_acc = 0 test_loss = [] it = 0 model = XLNetForSequenceClassification.from_pretrained(f'Model-{epoch+1}', num_labels = 2) model = model.eval() while it < len(test_data): input_ids, attention_mask, targets = data_segmenter(test_data[it]) temp_acc, temp_loss = eval_model( model, input_ids, attention_mask, targets ) test_acc += temp_acc test_loss.append(temp_loss) it += 1 print(f"[EPOCH {epoch+1}, BATCH {it} of {len(test_data)}] {targets[0]} ->AC: {test_acc/it}") with open(f'Results/ACC/acum_acc_test_epoch_{epoch+1}.txt', 'w') as acc_file: acc_file.write(f"[EPOCH {epoch + 1}, BATCH {it} of {len(test_data)}] ->ACC: {train_acc/it}") history_test['test_acc'].append(test_acc / it) history_test['test_loss'].append(np.mean(test_loss)) print(f'Test loss {history_test["test_loss"][-1]} Test accuracy {history_test["test_acc"][-1]}') print() if best_accuracy < history_test["test_acc"][-1]: best_accuracy = history_test["test_acc"][-1] with open('history_test.json', 'w') as hist_json: json.dump(history_test, hist_json) print("Losses:") print(history_test['test_loss']) print("Accuracy:") print(history_test['test_acc']) print("Best Accuracy:", best_accuracy) total_time = time.time() - start_time with open('test_time.txt', 'w') as time_file: time_file.write(f'Total_time: {total_time}')Download keras-yolo3 projectGithub: https://github.com/sleepless-se/keras-yolo3(本家)自分のgit : https://github.com/tsuna-can/yolo-test.git Clone sleepless-se/keras-yolo3.git#自分のやつtinyに変更済み !git clone https://github.com/tsuna-can/yolo-test.git %cd yolo-testInstall requirementsインストールした後にランタイムを再起動するのを忘れずに!pip install -r requirements.txtUpload VoTT export file and directory (.zip)Please upload your Archive.zip%cd VOCDevkit/VOC2007 %cd /content/yolo-test/VOCDevkit/VOC2007 from google.colab import files uploaded = files.upload() !lsUnzip and make train *files*!unzip Archive %cd /content/yolo-test/ !python make_train_files.pyConvert annotations for YOLOPlease set your *classes*フラグでクラスを指定する!python voc_annotation.py tree tree_whiteTrain model!python train.pyDownload trainde weights and classes fileダウンロードがブロックされることがあるので注意#weight trained = 'logs/000/trained_weights_final.h5' files.download(trained) #クラス名 classes = "model_data/voc_classes.txt" files.download(classes) #train train_imgs = "model_data/2007_train.txt" files.download(train_imgs) #val val_imgs = "model_data/2007_val.txt" files.download(val_imgs) #test test_imgs = "model_data/2007_test.txt" files.download(test_imgs)Predict by new model結果はresult.jpgとして保存されるようにしてありますカーネルを再起動した時はディレクトリを移動し、voc_classes.txtを書き換えて、logs/000/にweightファイルをアップロードする。weightファイルが破損する可能性があるので、必ずアップロードボタンからアップロードする!python tiny_yolo_video.py --imageAuthor: Place: Helsinki, Finland Driving Behaviour Analysis Segmentation: goals and challengesSegmentation (or technically knowns as clustering) is a machine learning problem where the goal is to learn a generative model (function and its parameters) in a unsupervised setting such that given a set of users having multi-dimensional features, predict for each user its cluster or segment membership. The development of robust and efficient segementation models is a two-fold data science approach: (1) data exploration, (2) machine learning model selection and evaluation.As a first step, exploratory analysis of the data is essential to gain insights into the problem as well as into the data and to identify the underlying modeling challenges. Moreover, data exploration also helps to formulate the appropriate modeling assumptions and experimental design while considering the resources at hand. As a second step, selection of the optimal model is crucial such that it generalizes to the new data. The optimal model can be selected based on the performance metric obtained through rigorous evaluation of the modeling assumptions using a principled experimental design.#import libraries necessary for this project import numpy as np import pandas as pd import copy import itertools from IPython.display import display # Allows the use of display() for DataFrames # pretty inline display for jupyter notebooks import matplotlib.pyplot as plt import matplotlib as mpl import seaborn as sns %matplotlib inline # import libraries that support data science tasks from scipy import linalg from sklearn.decomposition import PCA from sklearn import mixture from sklearn.cluster import KMeans from sklearn.metrics import silhouette_score from sklearn.model_selection import cross_val_score from sklearn.model_selection import GridSearchCV np.random.seed(1606) #some auxiliary functions to support visualization and analysis task def visualizePCAResults(good_data, pca): # Dimension indexing dimensions = dimensions = ['Component {}'.format(i) for i in range(1,len(pca.components_)+1)] # PCA components components = pd.DataFrame(np.round(pca.components_, 4), columns = list(good_data.keys())) components.index = dimensions # PCA explained variance ratios = pca.explained_variance_ratio_.reshape(len(pca.components_), 1) variance_ratios = pd.DataFrame(np.round(ratios, 4), columns = ['Explained Variance']) variance_ratios.index = dimensions # Create a bar plot visualization fig, ax = plt.subplots(figsize = (14,8)) # Plot the feature weights as a function of the components components.plot(ax = ax, kind = 'bar'); ax.set_ylabel("Feature Weights") ax.set_xticklabels(dimensions, rotation=0) # Display the explained variance ratios for i, ev in enumerate(pca.explained_variance_ratio_): ax.text(i-0.40, ax.get_ylim()[1] + 0.05, "Explained Variance\n %.4f"%(ev)) # Return a concatenated DataFrame return pd.concat([variance_ratios, components], axis = 1) def visualizeBiPlot(good_data, reduced_data, pca): fig, ax = plt.subplots(figsize = (14,8)) # scatterplot of the pca data ax.scatter(x=reduced_data.loc[:, 'Component 1'], y=reduced_data.loc[:, 'Component 2'], facecolors='b', edgecolors='b', s=70, alpha=0.5) feature_vectors = pca.components_.T # we use scaling factors to make the arrows easier to see arrow_size, text_pos = 7.0, 8.0, # projections of the original features for i, v in enumerate(feature_vectors): ax.arrow(0, 0, arrow_size*v[0], arrow_size*v[1], head_width=0.2, head_length=0.2, linewidth=2, color='red') ax.text(v[0]*text_pos, v[1]*text_pos, good_data.columns[i], color='black', ha='center', va='center', fontsize=18) ax.set_xlabel("Component 1", fontsize=14) ax.set_ylabel("Component 2", fontsize=14) ax.set_title("PC plane with original feature projections.", fontsize=16); return ax def visualizeSegments(X, Y_, means, covariances, index, title): plt.figure(figsize=(10,8)) splot = plt.subplot(2, 1, 1 + index) for i, (mean, covar, color) in enumerate(zip( means, covariances, color_iter)): #print(covar) #covar=np.diag(covar) v, w = linalg.eigh(covar) v = 2. * np.sqrt(2.) * np.sqrt(v) u = w[0] / linalg.norm(w[0]) # as the DP will not use every component it has access to # unless it needs it, we shouldn't plot the redundant # components. if not np.any(Y_ == i): continue plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color) # Plot an ellipse to show the Gaussian component angle = np.arctan(u[1] / u[0]) angle = 180. * angle / np.pi # convert to degrees ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color) ell.set_clip_box(splot.bbox) ell.set_alpha(0.5) splot.add_artist(ell) plt.xlim(-3., 5.) plt.ylim(-3., 5.) plt.xticks(()) plt.yticks(()) plt.title(title) #the above function has been adopted/ taken from sklearn code examples.Exploratory Analysis of the DataUnderstanding data through exploration is the key to every data science problem. Exploration helps to develop hypothesis on the data generation process and to formulate approriate assumptions for the modelling task. 1. I started looking at the data dimensionalities and noticed that the data contains 165 samples and 5 features in addition to one meta-data feature namely CarID. 2. Since there are very few features, the visual inspection already showed that the data ranges are quite different. Some features have values in very high range while other features have values in lower ranges (Remark: need some standardization to remove the feature induced biases or artifacts!)3. Another important finding form the data exploration is about the particular values of the features. Certainly the negative or zero values would not make any sense (e.g., fuel consumption or distance or Speed cannot be negative) in this problem and should be assumed as measurement errors and should be discarded from the data. Hence, I removed samples with negative or zero values in addition to missing or null values. 4. To further understand the structure or potential relationhsips that could exist in the data, i. I explored the pairwise distribution of features across the dataset to get hints on features which are correlated or have some type of associations. ii. I also studied the PCA analysis of the dataset to further investigate the joint variations of features. Remark: in practice all the data exploration should be done on training data only, however, for the sake of this assignment I was willing to welcome some over-fitting!# load the car data dataset try: drivingData = pd.read_csv("Car_data.txt", delimiter="\t",decimal=",",encoding='utf-8',na_values=[' ']) print("Car dataset has {} samples with {} features each.".format(*drivingData.shape)) except: print("Dataset could not be loaded. Is the dataset missing?") #quick visual analysis of the few samples drivingData.head(10) drivingData['Average fuel consumption'] = [str(x).replace(',', '.') for x in drivingData['Average fuel consumption']] drivingData['Average fuel consumption'] = drivingData['Average fuel consumption'].astype(float) #set index to CarID column drivingData.set_index(['CarID'], inplace=True) drivingData.head(5) # display a description of the dataset display(drivingData.describe()) #check for NaN or missing values drivingData.isnull().sum() #drop rows having NaN or missing values drivingData.dropna(inplace=True) print(drivingData.shape) drivingData.head(5) #drop duplicated rows drivingData.drop_duplicates(inplace=True) print(drivingData.shape) drivingData.head(5) drivingData=drivingData[(drivingData > 0).all(1)] drivingData.shape drivingData.head(5) # produce a feature-wise correlation heatmap of the dataset sns.heatmap(drivingData.corr(), annot=True)A key observation from the feature-wise scatter plot is that there exist outliers in features: Max Speed and Distance. Usually the clustering models are sensitive to outliers, probably good to remove these outlier samples.# produce a scatter plot matrix for each pair of features in the data pd.plotting.scatter_matrix(drivingData, alpha=1,figsize = (16,10), diagonal = 'kde');What are the modeling challenges(?):In the proposed problem, exploratory analysis of the data identifies three fundamental modeling challenges:1. Small sample-size, diverse feature-level distributions2. Clear structure or pattern is not obvious from the observed data3. No ground truth is available on the true number of segments What is my solution(?):To solve the challenges mentioned above, my solution combines three machine learning principles:1. Data Cleaning, Normalization and Standardization,2. Data transformation to lower-dimensional space to reveal the hidden structure and pattern,3. Automatic model complexity selection using Bayesian GMM 1. Data Cleaning, Normalization and StandardizationAs a normalization step, element-wise natural log is taken of the complete dataset, where the goal was to make the feature values comparable such that the variation in higher values do not tend the models to be biased towards these values. Next as a standardization step, to balance the feature-wise scales, z-transformation was performed. After this standardization each features has mean=0 and standard deviation=1 meaning all features have equal scales/ranges. In other words, while doing this standardization I aprior assume that all features are equally important. Though the data exploration steps revealed potential relevant features, I intended to adopt a purely data-driven approach and letting the models choose the best features. On the contrary, datasets/problems comprising of a large number of features and very small sample sizes, I would make use of the data exploration results and any prior knowledge (available from the expert) to be included in the model in a more systematic way.Additionally, as data cleaning steps, I considered samples whose feature values are 2.5 times greater than the interquartile range (comprising of 25th and 75th percentile of the specific feature values) as outliers and were removed from the collection. 2. Data transformation to lower-dimensional space to reveal the hidden structure and pattern Principal Component Analysis (PCA):In the realm of small sample-sized datasets, it is statistically challenging to infer model parameters accurately and more advanced machine learning models can be developed that favor sparse solutions (imposing regularization that shuts down the unnecessary features). Alternatively, dimensionality reduction is a more straightforward and ready-to-implement solution. To this end, matrix factorization has evolved as the state-of- art tool for dimensionality reduction and data visualization. Essentially Principal Component Analysis (PCA) is a matrix factorization approach that decomposes the observed data into multiple low-dimensional latent factors (also known as “components”). The underlying assumption is that the combination of multiple latent factors has generated the observed data, corrupted with some noise. However, each combination has generated some parts of the data. The machine learning aim is then to learn these components thereby capturing the strongest variation patterns for individual features in a principled way. In this way, the latent factors can then be seen as “noise-free” low-dimensional representation of the data and the replacement for noisy features. Moreover studying the latent reprsentation can also help reveal useful patterns hidden in the data. 3. Automatic model complexity selection using Bayesian GMM Bayesian Gaussian Mixture Models: A Gaussian mixture model (GMM) is one of the most widely used generative models in machine learning. Essentially, GMM is a latent variable model assuming that the observed data points are generated from a mixture of a latent Gaussian distributions with unknown parameters (mean and variance). The latent variables or components are then inferred using an Expectation Maximization EM algorithm. The Bayesian variant of the GMM model allows assuming priors for the unknown mean and variance parameter of each component and instead of point estimates, yields full posterior distributions over the latent variables. In a generative machine learning model (for instance GMM), latent variables or components represents the our prior beliefs about the model structure and the underlying data generation process. Since we have little knowledge about the real-world process and the degree of noise that has corrupted the data, it is difficult to choose suitable components apriori (in other words fixing true model complexity apriori). We have to resort to optimization methods to supplement our beliefs on the potentially appropriate choices of the compoennts. The performance and generalisation properties of model is very dependent on chosen model complexity. As the number of features grow or shrink in size, the model complexity selection becomes more challenging. A classical approach to estimate number of components is Cross-Validation (CV). Here, I used CV as a comparision approach to automatic component selection. I partitioned the given data into training (90%) and held-out set (10% to be used for validation purpose), randomly. In practice, I performed 10-fold cross-validation (CV) on the training data, wherein each fold of the CV run, 1/10th of the data was held-out as a validation set at random, while the model was trained on the other 9/10 sets. Lastly, predictive scores (here per-test sample average log-likelihood) were computed across the predictions obtained from a complete round of CV experiment. And the components were selected that maxmimized the predctive scores.On the contrary to classical CV approach, Bayesian machionary provides framework for automatic component selection. In practice, we assume suitable prior distribution on the latent variables or components and estimate the posterior distribution using the data likelihood guided by the prior. Specifically, sparse prior (e.g., Dirichlet process prior, Laplace or Cauchy) guide the inference procedure to extract relevant sturcture from the data and restrict the number of components that are truly needed to capture the variation in the data. In other words, excessive components are automatically shut-down (or push to zero) during the inference process. In this way, even if we choose a higher number of components, the model will learn the true components in a data-driven manner. 1. Data Cleaning, Normalization and StandardizationnormalizedDrivingData = copy.deepcopy(drivingData) normalizedDrivingData = normalizedDrivingData.applymap(np.log) normalizedDrivingData = (normalizedDrivingData-normalizedDrivingData.mean())/normalizedDrivingData.std() normalizedDrivingData.head(5) normalizedDrivingData.std() # produce a feature-wise correlation heatmap of the dataset sns.heatmap(normalizedDrivingData.corr(), annot=True) # produce a scatter plot matrix for each pair of features in the data pp = pd.plotting.scatter_matrix(normalizedDrivingData, alpha=1,figsize = (16,10), diagonal = 'kde') # Select the indices for data points you wish to remove outliers_lst = [] # For each feature find the data points with extreme high or low values for feature in normalizedDrivingData.columns: # TODO: Calculate Q1 (25th percentile of the data) for the given feature Q1 = np.percentile(normalizedDrivingData.loc[:, feature], 20) # TODO: Calculate Q3 (75th percentile of the data) for the given feature Q3 = np.percentile(normalizedDrivingData.loc[:, feature], 80) # TODO: Use the interquartile range to calculate an outlier step (2.5 times the interquartile range) step = 1.5 * (Q3 - Q1) # Display the outliers print("Data points considered outliers for the feature '{}':".format(feature)) # The tilde sign ~ means not # So here, we're finding any points outside of Q1 - step and Q3 + step outliers_rows = normalizedDrivingData.loc[~((normalizedDrivingData[feature] >= Q1 - step) & (normalizedDrivingData[feature] <= Q3 + step)), :] display(outliers_rows) outliers_lst.append(list(outliers_rows.index)) outliers = list(itertools.chain.from_iterable(outliers_lst)) # List of unique outliers # We use set() # Sets are lists with no duplicate entries uniq_outliers = list(set(outliers)) # List of duplicate outliers dup_outliers = list(set([x for x in outliers if outliers.count(x) > 1])) print('Outliers list:\n', uniq_outliers) print('Length of outliers list:\n', len(uniq_outliers)) print('Duplicate list:\n', dup_outliers) print('Length of duplicates list:\n', len(dup_outliers)) # Remove duplicate outliers processedDrivingData = copy.deepcopy(normalizedDrivingData) processedDrivingData.drop(uniq_outliers, inplace=True) # Original Data print('Original shape of data:\n', normalizedDrivingData.shape) # Processed Data print('New shape of data:\n', processedDrivingData.shape) #processedDrivingData.index.names = ['CarID'] processedDrivingData.head(5) # produce a feature-wise correlation heatmap of the dataset sns.heatmap(processedDrivingData.corr(), annot=True) # produce a scatter plot matrix for each pair of features in the data pd.plotting.scatter_matrix(processedDrivingData, alpha=1,figsize = (16,10), diagonal = 'kde');2. Data transformation to lower-dimensional space to reveal the hidden structure and pattern# TODO: Apply PCA by fitting the processed driving data with the same number of dimensions as features # Instantiate pca = PCA(n_components=processedDrivingData.shape[1]) # Fit pca.fit(processedDrivingData) # Generate PCA results plot pcaResults = visualizePCAResults(processedDrivingData, pca)The PCA analysis helped to identify some key relationships between features. The knowledge on feature relationship can help formulate the modelling assumptions and test relevant hypothesese. For instance, PCA component captured the strongest variation across four features namely Average Speed, Average fuel consumption, Max Speed and Distance. It is fairly plausible to assume that these features are correlated in majority of the samples, specially Average Speed and Fuel Consumption, Max Speed and Distance. It is also practically relevant since speed is proportional to fuel consumption as well as more fuel burnt could imply more distance covered. The second component captured samples showing the second largest variation in Battery Charging Voltage. Altogether the first two components already explained more than 88% of the variation in the data. Whereas remaining three components capture smallest variation for individual features. More robust and reliable conclusions can be drawn by selecting the number of components in a principled way (for example using cross validation or Bayesian PCA)# DataFrame of results display(pcaResults) # DataFrame display(type(pcaResults)) # Cumulative explained variance should add to 1 display(pcaResults['Explained Variance'].cumsum()) pca = PCA(n_components=2) # Fit pca.fit(processedDrivingData) pcaTransformedData = pca.transform(processedDrivingData) pcaTransformedData = pd.DataFrame(pcaTransformedData, columns = ['Component 1', 'Component 2']) visualizeBiPlot(processedDrivingData, pcaTransformedData, pca) pcaTransformedData.index=processedDrivingData.index pcaTransformedData.head(5)Exploratory analysis using PCA revealed interesting pattern hidden in the data. The projections or transformation of the raw data on to the first two compoments already give hints on the underlying segments. For instance, the above figure shows the that may be three segments in the data set. First segment or cluster group samples based on Average fuel consumption and Average Speed, while the second segment clusters samples based on Max Speed and Distance and the third segment comprise of sample based on Battery Cahrging Voltage. Since this is an exploratory analysis, we cannot make any generalizable claims. Next, I adopted a generative machine learning model to infer unbiased data-driven predictions. 3. Automatic model complexity selection using Bayesian GMM# Fit a Gaussian mixture with EM using ten components on complete dataset gmm = mixture.GaussianMixture(n_components=10, covariance_type='full', max_iter=500,n_init=5, random_state=1111).fit(np.array(pcaTransformedData)) # Fit a Dirichlet process Bayesian Gaussian mixture using ten components on complete dataset bayesianGMM = mixture.BayesianGaussianMixture(n_components=10, covariance_type='full',max_iter=500,n_init=5, init_params='random', weight_concentration_prior_type='dirichlet_distribution', random_state=1111).fit(np.array(pcaTransformedData)) color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold', 'darkorange', 'red', 'magenta', 'steelblue', 'pink','green']) visualizeSegments(np.array(pcaTransformedData), gmm.predict(pcaTransformedData), gmm.means_, gmm.covariances_, 0, 'Model Selection with Gaussian Mixture Model') visualizeSegments(np.array(pcaTransformedData), bayesianGMM.predict(pcaTransformedData), bayesianGMM.means_, bayesianGMM.covariances_, 1, 'Model Selection with Bayesian Gaussian Mixture Model') plt.show()The above figure demonstrates the usefulness of the Bayesian GMM over traditional GMM. Both models have access to ten components with which to fit the data. The standard GMM model unnecessarily used all ten components while the Bayesian GMM model effectively only used as many as are needed to fully explain the variation is the data. Here we can notice that the GMM model splits some components arbitrarily, because it is trying to fit too many components, while the Dirichlet Process prior based Bayesian GMM model adapts model complexity automatically.This figure doesn’t show it, as we’re in a low-dimensional space, but another advantage of the Bayesian GMM model is that it can fit full covariance matrices effectively even when there are less examples per segment than there are dimensions in the data, due to regularization properties of the model.The above analysis was performed to show the proof-of-concept difference between the standard and Bayesian GMM model. I next performed a cross-validation procedure to obtain more robust and generalizable estimates on the true number of model components.def computeCVLogLikelihood(X,nComponents): ####Quite time consuming, use other covariance_type to 'diag', 'tied' or spherical for speed-up #to run the CV process quickly, set max_iter to 5000 and n_init=10 #for quick runs, set set max_iter to 1000 and n_init=1 gmm = mixture.GaussianMixture(covariance_type='full', max_iter=500,n_init=5, random_state=1111) bayesianGMM = mixture.BayesianGaussianMixture(covariance_type='full', max_iter=500,n_init=5, init_params='random', weight_concentration_prior_type='dirichlet_distribution', random_state=1111) gmm_LL, bayesianGMM_LL = [], [] for n in nComponents: gmm.n_components = n bayesianGMM.n_components = n bayesianGMM_LL.append(np.mean(cross_val_score(bayesianGMM, X, cv=10))) gmm_LL.append(np.mean(cross_val_score(gmm, X, cv=10))) return gmm_LL, bayesianGMM_LL nComponents = range(1,11) gmm_LL, bayesianGMM_LL = computeCVLogLikelihood(pcaTransformedData,nComponents) n_components_gmm = nComponents[np.argmax(gmm_LL)] n_components_bgmm = nComponents[np.argmax(bayesianGMM_LL)] print("best n_components by GMM CV = %d" % n_components_gmm) print("best n_components by Bayesian GMM CV = %d" % n_components_bgmm) plt.figure() plt.plot(nComponents, gmm_LL, 'b', label='GMM scores') plt.plot(nComponents, bayesianGMM_LL, 'r', label='Bayesian GMM scores') plt.axvline(n_components_gmm, color='b', label='GMM CV: %d' % n_components_gmm, linestyle='--') plt.axvline(n_components_bgmm, color='r', label='Bayesian GMM CV: %d' % n_components_bgmm, linestyle='--') plt.xlabel('nb of components') plt.ylabel('CV scores') plt.legend(loc='best') plt.title('Model Selection') plt.show()best n_components by GMM CV = 2 best n_components by Bayesian GMM CV = 3The CV experiments clearly demosntrated the usefulness of Bayesian GMM to automatically select the number of components. Since, the standard GMM uses all the components to fit the data, resultantly starts to overfit with increasing number of components. Whereas Bayesian GMM only uses the components needed to model the data and shut-down excessive components without compromising the accuracy.# Fit a Dirichlet process Bayesian Gaussian mixture on complete dataset bayesianGMM = mixture.BayesianGaussianMixture(n_components=n_components_bgmm, covariance_type='full', max_iter=500,n_init=5, init_params='random', weight_concentration_prior_type='dirichlet_distribution', random_state=1111).fit(np.array(pcaTransformedData)) prediction = pd.DataFrame(bayesianGMM.predict(pcaTransformedData)+1, columns=['Segments ID']) prediction.index = pcaTransformedData.index prediction #adding the outlier samples to complete the predictions for all 165 samples finalPredictions = pd.DataFrame(index=pd.Series(np.arange(1,166)), columns=['Segments ID']) finalPredictions.index.names = ['CarID'] finalPredictions finalPredictions.loc[prediction.index] = prediction finalPredictions finalPredictions.to_csv(path_or_buf='Segementation.csv',na_rep='Outlier',header=True,sep='\t')Importing the datasetdataset = pd.read_csv('../../data/all_records_northeastern.csv')Dropping the column, Unnamed as it is not necessarydataset.drop(columns=['Unnamed: 0'],inplace=True) dataset.head()Count of accept and reject in datasetdataset.status.value_counts()As we see from above stats, our data is baised so we need to resample the data, in order to balanced datasetbalanced_data=resample(dataset[dataset.status=='accept'],replace=True,n_samples=1000,random_state=123) balanced_data=balanced_data.append(dataset[dataset.status=='reject']) balanced_data.status.value_counts() encoded_dataset=balanced_data encoded_dataset.head()Defining labels-X and Tragets-YX=encoded_dataset[['gre_score_quant','gre_score_verbal','test_score_toefl','undergraduation_score','work_ex','papers_published']].copy() Y=encoded_dataset[['status']].copy()Splitting the dataset_encoded into training and testing datasetX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2,random_state=1)Training the modeldef modeltraining(model,X_train,X_test,Y_train,Y_test): sc = StandardScaler() sc.fit(X_train) X_train = sc.transform(X_train) X_test = sc.transform(X_test) print(X_test) model.fit(X_train,Y_train) predicted_labels_test=model.predict(X_test) predicted_labels_train=model.predict(X_train) accuracy_test=accuracy_score(Y_test,predicted_labels_test) accuracy_train=accuracy_score(Y_train,predicted_labels_train) return model,predicted_labels_test,predicted_labels_train,accuracy_test,accuracy_train,scCalling RandomForest modelmodel=RandomForestClassifier() model,predicted_labels_test,predicted_labels_train,accuracy_test,accuracy_train,sc=modeltraining(model,X_train,X_test,Y_train,Y_test)[[-0.1189225 0.73491402 0.39076816 -0.36262002 -0.62058721 1.89123818] [ 0.78723151 -0.2849141 0.43531487 0.59052431 0.41333471 1.89123818] [ 0.10761601 -0.48887972 0.12348788 -0.03768445 0.2308779 -0.59699686] ... [ 1.24030852 1.5507765 0.70259515 0.87213514 -1.04631976 -0.59699686] [ 1.01377002 1.5507765 -4.37573011 -2.09560971 1.14316194 -0.59699686] [ 0.33415451 2.16267337 0.5244083 -0.01602208 0.83906726 -0.59699686]]Accuracy of Test dataaccuracy_testAccuracy of Train dataaccuracy_train model print(classification_report(Y_test,predicted_labels_test)) print(classification_report(Y_train,predicted_labels_train))precision recall f1-score support accept 0.98 1.00 0.99 801 reject 1.00 0.98 0.99 862 micro avg 0.99 0.99 0.99 1663 macro avg 0.99 0.99 0.99 1663 weighted avg 0.99 0.99 0.99 1663Hypertuning the parameters using grid search, tune the max_depth and number of estimatorskf = KFold(n_splits=5) kf.get_n_splits(X) param_grid = {"n_estimators": [10,15,20,30], "criterion": ['gini'], "max_depth": [10,15,20,25], "bootstrap": [True], "min_samples_leaf": [0.5,1,2] } # run grid search grid_search = GridSearchCV(model, param_grid, cv=5,return_train_score=True) model,predicted_labels_test,predicted_labels_train,accuracy_test,accuracy_train,sc=modeltraining(grid_search,X_train,X_test,Y_train,Y_test)[[-0.1189225 0.73491402 0.39076816 -0.36262002 -0.62058721 1.89123818] [ 0.78723151 -0.2849141 0.43531487 0.59052431 0.41333471 1.89123818] [ 0.10761601 -0.48887972 0.12348788 -0.03768445 0.2308779 -0.59699686] ... [ 1.24030852 1.5507765 0.70259515 0.87213514 -1.04631976 -0.59699686] [ 1.01377002 1.5507765 -4.37573011 -2.09560971 1.14316194 -0.59699686] [ 0.33415451 2.16267337 0.5244083 -0.01602208 0.83906726 -0.59699686]]Accuracy of test data after grid searchaccuracy_test accuracy_train sc grid_search.best_estimator_ grid_search.best_params_Confusion matrix:print(confusion_matrix(Y_test,predicted_labels_test))[[182 17] [ 47 170]]Saving the pickle file of model and scaler# Dump the trained decision tree classifier with Pickle rf_classifier_pkl_filename = r'..\..\model\university_random_forest_predict.pickel' standard_scaler_filename = r'..\..\model\UniversityRFstandardScaler_rf_model.pickel' random_forest_classifier_model_pkl = open(rf_classifier_pkl_filename, 'wb') pickle.dump(model, random_forest_classifier_model_pkl) random_forest_classifier_model_pkl.close() sc_rf_classifier_scaler_pkl = open(standard_scaler_filename, 'wb') pickle.dump(sc, sc_rf_classifier_scaler_pkl) sc_rf_classifier_scaler_pkl.close() random_forest_classifier_model_pkl = open(rf_classifier_pkl_filename, 'rb') random_forest_classifier_model= pickle.load(random_forest_classifier_model_pkl) random_forest_classifier_model_pkl.close() sc_rf_classifier_scaler_pkl = open(standard_scaler_filename, 'rb') standard_scaler_rf_classifier= pickle.load(sc_rf_classifier_scaler_pkl) sc_rf_classifier_scaler_pkl.close()Title of Workbook This template is intended to provide a base to work from in Data Science and Analytics. Some simple functions and imports are included for example. Setup importsExample and common Data Science packages are included for the importimport os, os.path import pandas as pd import numpy as np from PIL import Image import matplotlib.pyplot as plt from pathlib import Path import daskSet Variablesx = 1 #placeholderCreate Functions Test if a file existsdef testFileExisits(locationPath): print(locationPath) assert os.path.exists(locationPath)Test if a path exists or createdef createLocationPath(locationPath): print(locationPath) path = Path(locationPath) path.mkdir(parents=True, exist_ok=True) assert os.path.exists(locationPath)Check which OS is running the workbookdef detectOS(): if (os.name == "nt"): #Windows print("This is Windows") elif (os.name == "posix"): #Linux print("This is Linux") else: print("Unknown OS")Create Objectsclass exampleClass(object): def __init__(self, inputString): self.savedString = inputString def printInput(self, inputString): print(inputString) def printSavedInput(self): print(self.savedString)Instantiate ObjectstheObject = exampleClass("Hello World")Run StuffdetectOS() theObject.printSavedInput() theObject.printInput("Hello Universe")Getting Started===============Installation------------You can install the package with pip from [PyPI](https://pypi.org/project/timeatlas/) pip install timeatlasHandle TimeSeries-----------------TimeAtlas is a library to handle time series of any kind. Let's create aTimeSeries object.from timeatlas import TimeSeries from pandas import DataFrame, DatetimeIndex index = DatetimeIndex(['2019-01-01', '2019-01-02', '2019-01-03', '2019-01-04','2019-01-05', '2019-01-06', '2019-01-07', '2019-01-08','2019-01-09', '2019-01-10', '2019-01-11', '2019-01-12']) my_series = DataFrame([0.4, 1.0, 0.7, 0.6, 0.4, 1.0, 0.7, 0.6, 0.4, 1.0, 0.7, 0.6], index=index) ts = TimeSeries(my_series) tsLike in Pandas, you can check its main characteristics with `TimeSeries.describe()`ts.describe()You can visualize it with the `TimeSeries.plot()` functionts.plot()What about Metadata ?---------------------TimeAtlas includes a Metadata object allowing you to add some typed metadataobject. For instance :from timeatlas import Metadata, types my_unit = types.Unit("power", "W", "float") my_sensor = types.Sensor(2902, "HB/floor2/22-23C/Prises_Power_Tot") my_coords = types.Coords(46.796611, 7.147563) # You can also use Python dictionaries my_location = { "building" : "Blue Factory", "floor" : "12", "room" : "22C" } my_dict = { "unit": my_unit, "sensor": my_sensor, "location": my_location, "coordinates": my_coords } # Create the Metadata object my_meta = Metadata(my_dict) my_metaAnd we can create a `TimeSeries` object with its associated metadata.ts_meta = TimeSeries(my_series, my_meta) ts_meta.plot()Preprocessingimport numpy as np import spacy import gensim import os import re from gensim import corpora from nltk.tokenize import sent_tokenize import pandas as pd nlp = spacy.load("en_core_web_md") from bs4 import BeautifulSoup from os import mkdir import requests #Dicitionary used to access full book text in HTML format urls = {'foucault_madness_and_civ':'https://archive.org/stream/Michel_Foucault_Madness_And_Civilization/Michel%20Foucault%2C%20Richard%20Howard%20%28transl.%29%20-%20Madness%20and%20Civilization_%20A%20History%20of%20Insanity%20in%20the%20Age%20of%20Reason%20%282013%2C%20Vintage%29_djvu.txt', 'foucault_discipline_and_punish': 'https://archive.org/stream/MichelFoucaultDisciplineAndPunish/Michel%20Foucault%20-%20Discipline%20and%20Punish_djvu.txt', 'foucault_history_of_sexuality':'https://archive.org/stream/TheHistoryOfSexualityVol13/The-History-Of-Sexuality-Vol-1-3_djvu.txt', 'chomsky_media_control': 'https://archive.org/stream/media_Noam_Chomsky-Media_Control/Noam_Chomsky-Media_Control_djvu.txt', 'chomsky_american_power': 'https://archive.org/stream/AmericanPowerAndTheNewMandarins_201805/American%20Power%20And%20The%20New%20Mandarins_djvu.txt', 'chomsky_manufacturing_consent': 'https://archive.org/stream/revhosatx14/%5BEdward_S._Herman%2C_Noam_Chomsky%5D_Manufacturing_Con%28b-ok.org%29_djvu.txt'} def file_books(title, link): '''Create directories for book from title and link''' #Access HTML webpage on Internet Archive r = requests.get(link) data = r.text soup = BeautifulSoup(data) #Full text is in
     part of HTML doc
        book = soup.pre.string
        #Write book as text file, save file
        with open(f'./data/{title}.txt', 'w', encoding='utf-8') as book_file:
            book_file.write(book)
            #Make a folder for each book
            mkdir(f'./data/{title}_extracts')
    def split_book(title, n_lines=5):
        '''Split a text file based on a number of lines, book title'''
        #Find file path based on title
        filepath = f'./data/{title}.txt'
        #Extract directory and filename from file path
        path, filename = os.path.split(filepath)
        #Change path to book's directory
        path += f'/{title}_extracts'
        # filename.split('.') would not work for filenames with more than one .
        basename, ext = os.path.splitext(filename)
        #open input file
        with open(filepath, 'r', encoding='utf-8') as book_file:
            try:
                #open the first output(extract) file
                extract_file = open(os.path.join(path, '{}_{}{}'.format(basename, 0, ext)), 'w', encoding='utf-8')
                #Loop over all lines of input file, number them
                for i, sentence in enumerate(sent_tokenize(book_file.read())):
                    #Close extract file and open a new one
                    #When the line number % desired n_lines is 0
                    if i % n_lines == 0:
                        extract_file.close()
                        #Open the next output file to write the next extract
                        extract_file = open(os.path.join(path, '{}_{}{}'.format(basename, i/100, ext)), 'w', encoding='utf-8')
                    #write the line to extract file
                    extract_file.write(sentence)
            finally:
                #close last output file
                extract_file.close()
    if not os.path.isdir('./data'):  
        os.mkdir('./data')
    for title, link in urls.items():
        if not os.path.isdir(f'./data/{title}_extracts'):
            file_books(title, link)
            split_book(title)
    def gather_data(path_to_data):
        path = f'./data/{path_to_data}'
        data = {'extracts': [], 'author': []}
        #For file at the given path
        for file in os.listdir(path):
            #If the directory is not a folder
            if os.path.isdir(file) == False:
                #If the file type is .txt
                if file[-3:] == 'txt':
                    #Open each text file at the path provided
                    with open(os.path.join(path, file), encoding='utf-8') as t:
                        #Read and strip new line signal
                        text = t.read().replace('\n', ' ')
                        data['extracts'].append(str(text))
                        data['author'].append(path_to_data.split('_')[0])
        return data
    extracts_dirs = [folder for folder in os.listdir('./data') if (os.path.isdir(f'./data/{folder}') == True)]
    df_final = pd.DataFrame({'extracts': [], 'author': []})
    for directory in extracts_dirs:
        extracts = gather_data(directory)
        df = pd.DataFrame(extracts, columns = extracts.keys())
        df_final = pd.concat([df_final, df], axis=0)
    print(df_final.shape)
    df_final = df_final.reset_index()
    df_final = df_final[df_final['author'] != '.ipynb']
    df_final['author'].value_counts()
    df_final.shape
    import pickle
    file = open(f'./data/dataframe.pkl', 'wb')
    pickle.dump(df_final, file)
    file.close()Model  Train/Test Splitfrom sklearn.model_selection import train_test_split
    X = df_final['extracts']
    y = df_final['author']
    
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, shuffle=True)
    X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, shuffle=True)Randomized Search w/ Naive Bayesfrom imblearn.ensemble import BalancedRandomForestClassifier
    from imblearn.pipeline import Pipeline
    from imblearn.over_sampling import SMOTE
    from imblearn.under_sampling import RandomUnderSampler
    from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
    from sklearn.ensemble import RandomForestClassifier
    from sklearn.model_selection import RandomizedSearchCV
    from sklearn.naive_bayes import MultinomialNB
    from gensim.parsing.preprocessing import preprocess_string
    # from sklearn.pipeline import Pipeline
    counter = TfidfVectorizer(
                              stop_words='english',
                              tokenizer=preprocess_string)
    
    rf = RandomForestClassifier()
    nb = MultinomialNB()
    rusampler = RandomUnderSampler()
    pipeline = Pipeline([('counter', counter),
                         ('sampler', rusampler),
                         ('bayes', nb)])
    parameters = {
        'counter__max_df': [i/100 for i in range(75, 100)],
        'counter__min_df': range(0, 10),
        'counter__ngram_range': [(1,2), (1, 3), (2,3), (3, 6), (2, 7)],
        'counter__analyzer': ['word', 'char', 'char_wb']
    }
    
    rand_search = RandomizedSearchCV(pipeline, parameters, cv=2, n_iter=8, n_jobs=1, verbose=2)
    best = rand_search.fit(X_train, y_train)
    best.best_estimator_.score(X_val, y_val)
    best.best_estimator_.score(X_test, y_test)
    best.best_estimator_.predict_proba([''])
    import pickle 
    filename = './models/nb_model.pkl'
    file = open(filename, 'wb')
    pickle.dump(best, file)
    file.close()
    file = open('./models/nb_model.pkl', 'rb')
    nb = pickle.load(file)
    file.close()Word Embeddings with Spacydef get_doc_vectors(words):
        # converts a list of words into their word vectors
        return nlp(words).vector
    get_doc_vectors('')
    vectors = df_final['extracts'].apply(get_doc_vectors).tolist()
    from sklearn.decomposition import PCA
    import pickle
    pca = PCA(2)
    word_vecs_2d = pca.fit_transform(vectors)
    word_vecs_2d.shape
    word_vecs_train = X_train.apply(get_doc_vectors).tolist()
    word_vecs_val = X_val.apply(get_doc_vectors).tolist()
    word_vecs_test = X_test.apply(get_doc_vectors).tolist()
    #Pickle 2D word vectors for use in dash app
    file = open(f'./models/word_vectors.pkl', 'wb')
    pickle.dump(word_vecs_2d, file)
    file.close()
    get_doc_vectors('no')
    pipeline.steps[1][1].kneighbors()[1]
    from imblearn.pipeline import make_pipeline
    from sklearn.neighbors import KNeighborsClassifier
    for k in range(1,50):
        pca = PCA(2)
        knn = KNeighborsClassifier(algorithm='kd_tree', n_neighbors = k, weights='distance', metric='euclidean')
        pipeline = make_pipeline(pca, rusampler, knn)
        pipeline.fit(word_vecs_train, y_train)
        new_obs_2d = pipeline.named_steps['pca'].transform(get_doc_vectors('').reshape(1, -1))
        print(k, pipeline.score(word_vecs_test, y_test), 
    #           pipeline.steps[1][1].kneighbors(new_obs_2d),
              pipeline.predict_proba(get_doc_vectors('').reshape(1, -1)))
    pca = PCA(2)
    k=2
    knn = KNeighborsClassifier(algorithm='kd_tree', n_neighbors = k, weights='distance', metric='euclidean')
    pipeline = make_pipeline(pca, knn)
    pipeline.fit(word_vecs_train, y_train)
    file = open(f'./models/model_k{k}.pkl', 'wb') # 2 is most balanced prediction probabilities
    pickle.dump(pipeline, file)
    file.close()
    pipeline.named_steps
    def display_results(text):
        file = open(f'./models/model_k{29}.pkl', 'rb')
        model = pickle.load(file)
        file.close()
        vector = get_doc_vectors(text)
        new_obs = vector.reshape(1, -1)
        print(new_obs.shape)
        new_obs_2d = model.named_steps['pca'].transform(new_obs)
        index = model.named_steps['kneighborsclassifier'].kneighbors(new_obs_2d)[1][0][0]
        text = df_final['extracts'][index]
        return f'The author most likely to have written your sample, "{text}", is {index}.'
    
    display_results(' was a great author')
    plt.scatter(x=word_vecs_2d[:, 0],y=word_vecs_2d[:,1])
    import seaborn as sns
    import matplotlib.pyplot as plt
    plt.figure(figsize=(10, 10))
    sns.scatterplot(word_vecs_2d[:,0], word_vecs_2d[:,1], hue=df_final['author'])
    plt.title('')Word Embeddings w/ Gensimfrom gensim.test.utils import common_texts
    from gensim.utils import  simple_preprocess, lemmatize
    preprocess_string(df_final['extracts'][3])
    import gensim
    from gensim import corpora
    from gensim.utils import simple_preprocess
    def read_corpus(series, tokens_only=False):
        for i, doc in enumerate(series):
            tokens = gensim.utils.simple_preprocess(doc)
            print(tokens)
            if tokens_only:
                yield tokens
            else:
                # For training data, add tags
                yield gensim.models.doc2vec.TaggedDocument(tokens, [i])
    next(read_corpus(df_final['extracts']))['rl', 'if', 'kjii', 'jp', 'kwsg', 'ffc', 'fffjil', 'ri', 'hscif', 'piini', 'pqb', 'ffi', 'ipil', 'igij', 'ssbhlil', 'ps', 'im', 'iilb', 'ii', 'msjr', 'tashi', 'nst', 'liii', 'sf', 'ic', 'tta', 'ksp', 'mr', 'izlv', 'siirhki', 'hhh', 'fjfi', 'imimmiumnmum', 'mm', 'ar', 'kttaskni', 'ks', 'ssi', 'sr', 'ziubuss', 'fmhlll', 'fiiblii', 'hi', 'hr', 'hhi', 'rwm', 'tret', 'sks', 'nh', 'ii']Extinction Efficiency FactorFigure 6.5 from Chapter 6 of *Interstellar and Intergalactic Medium* by , 2021, Cambridge University Press.Plot the efficiency factor Q$_{ext}$ for two values of the real index of refraction, $n_r=1.5$ (glass) and $n_r=1.33$ (water ice).Uses van de Hulst's method to compute Mie scattering in the astrophysically interesting limit that thespherical scatterer is large compared to the wavelength of light ($2\pi a >> \lambda$) and only moderatelyrefractive and absorptive at wavelengths of interest ($|n-1|<1$).  In this limit, the solution for the purescattering case yeilds an efficiency factor\begin{equation}   Q_{ext} = Q_{sca}\approx 2-\frac{4}{\varrho}\sin\varrho + \frac{4}{\varrho^2}(1-\cos\varrho)\end{equation}where\begin{equation}   \varrho = 2\left(\frac{2\pi a}{\lambda}\right)|n_r-1|\end{equation}in the short-wavelength limit this function approaches the limiting value of $Q_{ext}=2.0$.%matplotlib inline
    
    import os
    import sys
    import math
    import numpy as np
    import matplotlib
    import matplotlib.pyplot as plt
    from matplotlib.ticker import MultipleLocator, LogLocator, NullFormatter
    
    import warnings
    warnings.filterwarnings('ignore',category=UserWarning, append=True)Standard Plot FormatSetup the standard plotting format and make the plot. Fonts and resolution adopted follow CUP style.figName = 'Fig6_5' 
    
    # graphic aspect ratio = width/height
    
    aspect = 4.0/3.0 # 4:3
    
    # Text width in inches - don't change, this is defined by the print layout
    
    textWidth = 6.0 # inches
    
    # output format and resolution
    
    figFmt = 'png'
    dpi = 600
    
    # Graphic dimensions 
    
    plotWidth = dpi*textWidth
    plotHeight = plotWidth/aspect
    axisFontSize = 10
    labelFontSize = 6
    lwidth = 0.5
    axisPad = 5
    wInches = textWidth 
    hInches = wInches/aspect
    
    # Plot filename
    
    plotFile = f'{figName}.{figFmt}'
    
    # LaTeX is used throughout for markup of symbols, Times-Roman serif font
    
    plt.rc('text', usetex=True)
    plt.rc('font', **{'family':'serif','serif':['Times-Roman'],'weight':'bold','size':'16'})
    
    # Font and line weight defaults for axes
    
    matplotlib.rc('axes',linewidth=lwidth)
    matplotlib.rcParams.update({'font.size':axisFontSize})
    
    # axis and label padding
    
    plt.rcParams['xtick.major.pad'] = f'{axisPad}'
    plt.rcParams['ytick.major.pad'] = f'{axisPad}'
    plt.rcParams['axes.labelpad'] = f'{axisPad}'Scattering Efficiency FactorDo the computation for real two indices of refraction * $n_r=1.5$ - typical of glass (fused silica) at visible wavelengths * $n_r=1.33$ - typical of pure water ice at visible wavelenths parameterize using $x=2\pi a/\lambda$.# Range of x
    
    xMin = 0.01
    xMax = 14.0
    x = np.linspace(xMin,xMax,501)
    
    yMin = 0.0
    yMax = 4.0
    
    # glass (fused silica)
    
    nr = 1.5
    rho = 2.0*x*(nr-1.0)
    Qext1 = 2.0 - (4.0/rho)*np.sin(rho)+(4.0/(rho*rho))*(1-np.cos(rho))
    
    # pure water ice 
    
    nr = 1.33
    rho = 2.0*x*(nr-1.0)
    Qext2 = 2.0 - (4.0/rho)*np.sin(rho)+(4.0/(rho*rho))*(1-np.cos(rho))Make the plotPlot $n_r$=1.5 (glass) as a solid line, $n_r$=1.33 (ice) as a dotted line, with $Q_{ext}$=2.0 for reference(see text).fig,ax = plt.subplots()
    
    fig.set_dpi(dpi)
    fig.set_size_inches(wInches,hInches,forward=True)
    
    ax.tick_params('both',length=6,width=lwidth,which='major',direction='in',top='on',right='on')
    ax.tick_params('both',length=3,width=lwidth,which='minor',direction='in',top='on',right='on')
    
    # Limits
    
    plt.xlim(xMin,xMax)
    ax.xaxis.set_major_locator(MultipleLocator(2))
    ax.xaxis.set_minor_locator(MultipleLocator(1))
    plt.xlabel(r'$x=2\pi a/\lambda$',fontsize=axisFontSize)
    
    plt.ylim(yMin,yMax)
    ax.yaxis.set_major_locator(MultipleLocator(1))
    ax.yaxis.set_minor_locator(MultipleLocator(0.5))
    plt.ylabel(r'Q$_{\rm ext}$',fontsize=axisFontSize)
    
    # glass (nr=1.5)
    
    plt.plot(x,Qext1,'-',color='black',lw=1.0,zorder=10)
    plt.text(4.0,3.2,r'$n_{r}$=1.5',fontsize=axisFontSize,ha='right',color='black')
    
    # ice (nr=1.33)
    
    plt.plot(x,Qext2,':',color='black',lw=1.0,zorder=10)
    plt.text(7.5,3.0,r'$n_{r}$=1.33',fontsize=axisFontSize,ha='left',color='black')
    
    # large x asymptote at Qext=2
    
    plt.hlines(2.0,xMin,xMax,ls='--',color='black',lw=0.5,zorder=8)
    
    plt.plot()
    plt.savefig(plotFile,bbox_inches='tight',facecolor='white')Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License.  Distributed Tensorflow with HorovodIn this tutorial, you will train a word2vec model in TensorFlow using distributed training via [Horovod](https://github.com/uber/horovod).  Prerequisites* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning (AML)* Go through the [configuration notebook](../../../configuration.ipynb) to:    * install the AML SDK    * create a workspace and its configuration file (`config.json`)* Review the [tutorial](../train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb) on single-node TensorFlow training using the SDK# Check core SDK version number
    import azureml.core
    
    print("SDK version:", azureml.core.VERSION)DiagnosticsOpt-in diagnostics for better experience, quality, and security of future releases.from azureml.telemetry import set_diagnostics_collection
    
    set_diagnostics_collection(send_diagnostics=True)Initialize workspaceInitialize a [Workspace](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architectureworkspace) object from the existing workspace you created in the Prerequisites step. `Workspace.from_config()` creates a workspace object from the details stored in `config.json`.from azureml.core.workspace import Workspace
    
    ws = Workspace.from_config()
    print('Workspace name: ' + ws.name, 
          'Azure region: ' + ws.location, 
          'Subscription id: ' + ws.subscription_id, 
          'Resource group: ' + ws.resource_group, sep='\n')Create or Attach existing AmlComputeYou will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecturecompute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource.**Creation of AmlCompute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace this code will skip the creation process.As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota.from azureml.core.compute import ComputeTarget, AmlCompute
    from azureml.core.compute_target import ComputeTargetException
    
    # choose a name for your cluster
    cluster_name = "gpucluster"
    
    try:
        compute_target = ComputeTarget(workspace=ws, name=cluster_name)
        print('Found existing compute target')
    except ComputeTargetException:
        print('Creating a new compute target...')
        compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6', 
                                                               max_nodes=4)
    
        # create the cluster
        compute_target = ComputeTarget.create(ws, cluster_name, compute_config)
    
        compute_target.wait_for_completion(show_output=True)
    
    # use get_status() to get a detailed status for the current cluster. 
    print(compute_target.get_status().serialize())The above code creates a GPU cluster. If you instead want to create a CPU cluster, provide a different VM size to the `vm_size` parameter, such as `STANDARD_D2_V2`.  Upload data to datastoreTo make data accessible for remote training, AML provides a convenient way to do so via a [Datastore](https://docs.microsoft.com/azure/machine-learning/service/how-to-access-data). The datastore provides a mechanism for you to upload/download data to Azure Storage, and interact with it from your remote compute targets. If your data is already stored in Azure, or you download the data as part of your training script, you will not need to do this step. For this tutorial, although you can download the data in your training script, we will demonstrate how to upload the training data to a datastore and access it during training to illustrate the datastore functionality. First, download the training data from [here](http://mattmahoney.net/dc/text8.zip) to your local machine:import os
    import urllib
    
    os.makedirs('./data', exist_ok=True)
    download_url = 'http://mattmahoney.net/dc/text8.zip'
    urllib.request.urlretrieve(download_url, filename='./data/text8.zip')Each workspace is associated with a default datastore. In this tutorial, we will upload the training data to this default datastore.ds = ws.get_default_datastore()
    print(ds.datastore_type, ds.account_name, ds.container_name)Upload the contents of the data directory to the path `./data` on the default datastore.ds.upload(src_dir='data', target_path='data', overwrite=True, show_progress=True)For convenience, let's get a reference to the path on the datastore with the zip file of training data. We can do so using the `path` method. In the next section, we can then pass this reference to our training script's `--input_data` argument.path_on_datastore = 'data/text8.zip'
    ds_data = ds.path(path_on_datastore)
    print(ds_data)Train model on the remote compute  Create a project directoryCreate a directory that will contain all the necessary code from your local machine that you will need access to on the remote resource. This includes the training script, and any additional files your training script depends on.project_folder = './tf-distr-hvd'
    os.makedirs(project_folder, exist_ok=True)Copy the training script `tf_horovod_word2vec.py` into this project directory.import shutil
    
    shutil.copy('tf_horovod_word2vec.py', project_folder)Create an experimentCreate an [Experiment](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architectureexperiment) to track all the runs in your workspace for this distributed TensorFlow tutorial.from azureml.core import Experiment
    
    experiment_name = 'tf-distr-hvd'
    experiment = Experiment(ws, name=experiment_name)Create a TensorFlow estimatorThe AML SDK's TensorFlow estimator enables you to easily submit TensorFlow training jobs for both single-node and distributed runs. For more information on the TensorFlow estimator, refer [here](https://docs.microsoft.com/azure/machine-learning/service/how-to-train-tensorflow).from azureml.train.dnn import TensorFlow
    
    script_params={
        '--input_data': ds_data
    }
    
    estimator= TensorFlow(source_directory=project_folder,
                          compute_target=compute_target,
                          script_params=script_params,
                          entry_script='tf_horovod_word2vec.py',
                          node_count=2,
                          process_count_per_node=1,
                          distributed_backend='mpi',
                          use_gpu=True)The above code specifies that we will run our training script on `2` nodes, with one worker per node. In order to execute a distributed run using MPI/Horovod, you must provide the argument `distributed_backend='mpi'`. Using this estimator with these settings, TensorFlow, Horovod and their dependencies will be installed for you. However, if your script also uses other packages, make sure to install them via the `TensorFlow` constructor's `pip_packages` or `conda_packages` parameters.Note that we passed our training data reference `ds_data` to our script's `--input_data` argument. This will 1) mount our datastore on the remote compute and 2) provide the path to the data zip file on our datastore.  Submit jobRun your experiment by submitting your estimator object. Note that this call is asynchronous.run = experiment.submit(estimator)
    print(run)Monitor your runYou can monitor the progress of the run with a Jupyter widget. Like the run submission, the widget is asynchronous and provides live updates every 10-15 seconds until the job completes.from azureml.widgets import RunDetails
    RunDetails(run).show()Alternatively, you can block until the script has completed training before running more code.run.wait_for_completion(show_output=True)"Hello world" in Python===When programmers are learning a new language, we tend to write a one-line program that prints some version of the message "Hello world!" this is a simple program that shows whether your computer is properly set up to run Python programs.print('Hello Python world!')Hello Python world!Note: If you are using Python 2.7, this would be:print "Hello Python world!"Hello Python world!Задание №1 15-го января планируется взять кредит в банке на некоторый срок (целое число месяцев). Условие его выплаты таковы:− 1-го числа k-ого месяца долг возрастёт на 1% по сравнению с концом предыдущего месяца;− со 2-го по 14-е число k-того месяца необходимо выплатить часть долга;− 15-го числа k-того месяца долг должен быть на одну и ту же сумму меньше долга на 15-е число предыдущего месяца.На сколько месяцев планируется взять кредит, если известно, что общая сумма выплат после полного погашения кредита на 20% больше суммы, взятой в кредит?  Решение По формуле выплаты кредита S дифференцированными платежами: П = (n+1)/200*r*S, где n = искомое число месяцев, r - величина платежной ставки в процентах.S = 1
    r = 1
    n = 0.2*S*200/S*r - 1
    print(f"Клиент выплатит кредит за {n} месяцев")Клиент выплатит кредит за 39.0 месяцевЗадание №2 Рабочий удерживает за один конец доску массой 50 кг. С горизонтальной поверхностью доска образует угол 30°. С какой силой удерживает рабочий доску, если эта сила направлена перпендикулярно доске?  ![image.png](attachment:image.png)  Решение На доску действуют три силы: сила тяжести mg, сила F и сила реакции N в точке O (про которую забывают все школьники). Чтобы найти искомую силу F, запишем правило моментов относительно точки О. В таком случае плечо силы реакции N равно нулю, а значит и момент этой силы равен нулю.import math
    m = 50
    a = 30
    g = 10
    F = (m * g) * math.cos(a) / 2 
    print(f"Численное значение искомой силы F равно: {round(F)}H")Численное значение искомой силы F равно: 39HЗадание №3 Определите время жизни  нестабильной частицы, движущейся со скоростью u=0,85c, которая пролетела от места своего рождения до точки распада расстояние l=650 м.  Решение#Дано:
    u = 0.85
    l = 650
    c = 3 * 10**-6 #значение скорости света в вакууме
    # t = ?Задача связана с эффектом релятивистского замедления времени. Тогда для решения нам нужно воспользоваться соответствующей формулой: \begin{equation}    \bigtriangleup{t} = \frac{r}{\sqrt{1 - \frac{u^2}{c^2}}}\end{equation} Поиск времени, за которое тело преодолело соответствующее расстояние. Считая, что движение равномерное, получим: \begin{equation}    l = u\bigtriangleup{t} 	\Rightarrow \bigtriangleup{t} = \frac{l}{u}\end{equation} Совместим две формулы и получим искомую величину.import cmath as m
    t = (l / u) * (m.sqrt(1 - (u**2/c**2)))
    print(f"Время жизни нестабильной частицы: {t} секунд")Время жизни нестабильной частицы: 216666666.66531715j секундimagenet_data = torchvision.datasets.('temp',download=True, transform=_transforms)
    data_loader = torch.utils.data.DataLoader(imagenet_data,
                                              batch_size=4,
                                              shuffle=True,
                                              num_workers=4)
    
    resnet18 = torchvision.models.resnet18(pretrained=True)Downloading: "https://download.pytorch.org/models/resnet18-5c106cde.pth" to /root/.cache/torch/checkpoints/resnet18-5c106cde.pth
    100%|██████████| 44.7M/44.7M [00:01<00:00, 36.1MB/s]https://stackoverflow.com/questions/52548174/how-to-remove-the-last-fc-layer-from-a-resnet-model-in-pytorchclass CustomHead(nn.Module):
        def __init__(self, n, out):
            super().__init__()
            
            self.lin = nn.Linear(n, out)
            self.act = nn.Softmax(0)
        
        def forward(x):
            x = self.lin(x)
            return self.act(x)
    model = nn.Sequential(
        OrderedDict([
            ('pre-trained', nn.Sequential(*list(resnet18.children())[:-1])),
            ('head', CustomHead(512,10))
        ])
    )
    for i in data_loader:
        print(i)
        X, y = i
        out = model(X)
        breakNetwork TrainingHaving implemented and tested all the components of the final networks in steps 1-3, we are now ready to train the network on a large dataset (ImageNet). Notes* The author used the full ImageNet dataset. I did not want to use the full dataset so I decided to use a subset.    * so I used this project: https://github.com/mf1024/ImageNet-datasets-downloader to create a subset of the ImageNet dataset. I used this to create a small subset. The purpose of the small subset is to test the remainder of the notebook without having to wait for the extended amount of time it would take to use the full ImageNet dataset. I know ther results won't be good, but it will help me flesh out this JupyterNotebook.    * The code from `ImageNet-data-set-downloader` does not split the images into the required `train`, `test`, `validation` directories. That is done with other scripts outside this notebook. This notebook assumes that the data is available with that split. It will not work otherwise. * If we use the full ImageNet dataset, splitting the data should not be needed. I believe that the ImageNet dataset comes already split into the appropriate directory types.* The full ImagNet set can be downloaded via Kaggle. Join competition. https://www.kaggle.com/c/imagenet-object-localization-challenge/data. Be aware that this is 155G of data!* I modified some of the set up code to make it fit my environment. This will have to be modified if this notebook is run elsewehre. All of the paths and pointers to the training data are in the cell labeled *Training Dataset* bellow.* Split cells into more functionally isolated cells. i.e separated package imports  TO DO * Train with a larger Imagenet dataset* Complete the training on the full ImageNet. Download this from Kaggle or use the max options with the current `Image-data-set-downloader`* The Examples section at the end of the notebook generate the results to disk. Add a set of cells that can be optionally run that would instead display the images: damaged, predicted and original images.* Train on the CelebA dataset. This is really what we will need for the mobile use case.* Refactor some of the cells - there is too much being done in a single cell. Split. It makes it easier to read and debug.* Sort out the 'fine tuning' aspects of this work*import os
    import gc
    import datetime
    import numpy as np
    import pandas as pd
    import cv2
    
    from copy import deepcopy
    from tqdm import tqdm
    
    from keras.preprocessing.image import ImageDataGenerator
    from keras.callbacks import TensorBoard, ModelCheckpoint, LambdaCallback
    from keras import backend as K
    from keras.utils import Sequence
    from keras_tqdm import TQDMNotebookCallback
    
    import matplotlib.pyplot as plt
    from matplotlib.ticker import NullFormatter
    from IPython.display import clear_output
    
    # Change to the project root path
    if os.path.basename(os.getcwd()) != 'PConv-Keras':
        os.chdir('..')
        
    from libs.pconv_model import PConvUnet
    from libs.util import MaskGenerator
    %load_ext autoreload
    %autoreload 2
    plt.ioff()Training DatasetTRAIN_DIR = r"/home/edm/work/mldata/small-imagenet_1000/imagenet_images_1000_811/train/"
    VAL_DIR = r"/home/edm/work/mldata/small-imagenet_1000/imagenet_images_1000_811//"
    TEST_DIR = r"/home/edm/work/mldata/small-imagenet_1000/imagenet_images_1000_811/test/"
    
    BATCH_SIZE = 4Creating train & test data generatorclass AugmentingDataGenerator(ImageDataGenerator):
        def flow_from_directory(self, directory, mask_generator, *args, **kwargs):
            generator = super().flow_from_directory(directory, class_mode=None, *args, **kwargs)        
            seed = None if 'seed' not in kwargs else kwargs['seed']
            while True:
                
                # Get augmentend image samples
                ori = next(generator)
    
                # Get masks for each image sample            
                mask = np.stack([
                    mask_generator.sample(seed)
                    for _ in range(ori.shape[0])], axis=0
                )
    
                # Apply masks to all image sample
                masked = deepcopy(ori)
                masked[mask==0] = 1
    
                # Yield ([ori, masl],  ori) training batches
                # print(masked.shape, ori.shape)
                gc.collect()
                yield [masked, mask], ori
    
    ## Create Training Generator
    
    train_datagen = AugmentingDataGenerator(  
        rotation_range=10,
        width_shift_range=0.1,
        height_shift_range=0.1,
        rescale=1./255,
        horizontal_flip=True
    )
    train_generator = train_datagen.flow_from_directory(
        TRAIN_DIR, 
        MaskGenerator(512, 512, 3),
        target_size=(512, 512), 
        batch_size=BATCH_SIZE
    )
    
    ## Create Validation Generator
    
    val_datagen = AugmentingDataGenerator(rescale=1./255)
    val_generator = val_datagen.flow_from_directory(
        VAL_DIR, 
        MaskGenerator(512, 512, 3), 
        target_size=(512, 512), 
        batch_size=BATCH_SIZE, 
        classes=['val'], 
        seed=42
    )
    
    ## Create Testing Generator
    
    test_datagen = AugmentingDataGenerator(rescale=1./255)
    test_generator = test_datagen.flow_from_directory(
        TEST_DIR, 
        MaskGenerator(512, 512, 3), 
        target_size=(512, 512), 
        batch_size=BATCH_SIZE, 
        seed=42
    )Take a look at images and how they would looktest_data = next(train_generator)
    (masked, mask), ori = test_data
    
    # Show side by side
    for i in range(len(ori)):
        _, axes = plt.subplots(1, 3, figsize=(20, 5))
        axes[0].imshow(masked[i,:,:,:])
        axes[1].imshow(mask[i,:,:,:] * 1.)
        axes[2].imshow(ori[i,:,:,:])
        plt.show()Found 177429 images belonging to 222 classes.Training on ImageNetdef plot_callback(model):
        """Called at the end of each epoch, displaying our previous test images,
        as well as their masked predictions and saving them to disk"""
        
        # Get samples & Display them        
        pred_img = model.predict([masked, mask])
        pred_time = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
    
        # Clear current output and display test images
        for i in range(len(ori)):
            _, axes = plt.subplots(1, 3, figsize=(20, 5))
            axes[0].imshow(masked[i,:,:,:])
            axes[1].imshow(pred_img[i,:,:,:] * 1.)
            axes[2].imshow(ori[i,:,:,:])
            axes[0].set_title('Masked Image')
            axes[1].set_title('Predicted Image')
            axes[2].set_title('Original Image')
                    
            plt.savefig(r'data/test_samples/img_{}_{}.png'.format(i, pred_time))
            plt.close()Phase 1 - with batch normalization# Instantiate the model
    model = PConvUnet(vgg_weights="/home/edm/work/dev/PConv-Keras/data/logs/pytorch_to_keras_vgg16.h5")
    #model.load(r"C:\Users\\Documents\GitHub\PConv-Keras\data\logs\single_image_test\weights.10-0.89.h5")
    model.load(r"/home/edm/work/dev/PConv-Keras/data/logs/imagenet_phase1_paperMasksweights.43-4.33.h5")
    FOLDER = './data/logs/imagenet_phase1_paperMasks'
    
    # Run training for certain amount of epochs
    model.fit_generator(
        train_generator, 
        #steps_per_epoch=10000,
        steps_per_epoch=100,
        validation_data=val_generator,
        #validation_steps=1000,
        validation_steps=100,
        #epochs=50,  
        epochs=50,
        verbose=0,
        callbacks=[
            TensorBoard(
                log_dir=FOLDER,
                write_graph=False
            ),
            ModelCheckpoint(
                FOLDER+'weights.{epoch:02d}-{loss:.2f}.h5',
                monitor='val_loss', 
                save_best_only=True, 
                save_weights_only=True
            ),
            LambdaCallback(
                on_epoch_end=lambda epoch, logs: plot_callback(model)
            ),
            TQDMNotebookCallback()
        ]
    )Phase 2 - without batch normalization# Load weights from previous run
    #model = PConvUnet(vgg_weights='./data/logs/pytorch_vgg16.h5')
    model = PConvUnet(vgg_weights='./data/logs/pytorch_to_keras_vgg16.h5')
    
    model.load(
        r"C:\Users\\Documents\GitHub\PConv-Keras\data\logs\imagenet_phase1\weights.23-1.18.h5",
        train_bn=False,
        lr=0.00005
    )
    # Run training for certain amount of epochs
    model.fit_generator(
        train_generator, 
        #steps_per_epoch=10000,
        steps_per_epoch=10,
        validation_data=val_generator,
        #validation_steps=1000,
        validation_steps=10,
        #epochs=50,  
        epochs=5, 
        verbose=0,
        callbacks=[
            TensorBoard(
                log_dir='./data/logs/imagenet_phase2',
                write_graph=False
            ),
            ModelCheckpoint(
                './data/logs/imagenet_phase2/weights.{epoch:02d}-{loss:.2f}.h5',
                monitor='val_loss', 
                save_best_only=True, 
                save_weights_only=True
            ),
            LambdaCallback(
                on_epoch_end=lambda epoch, logs: plot_callback(model)
            ),
            TQDMNotebookCallback()
        ]
    )Phase 3 - Generating samplesLet us use the fine-tuned network to get some sample. We will save results in `data/test_samples` folder# Load weights from previous run
    model = PConvUnet()
    model.load(
        #r"C:\Users\\Documents\GitHub\PConv-Keras\data\logs\imagenet_phase2\weights.26-1.07.h5",
        #"/home/edm/work/dev/PConv-Keras/data/logs/imagenet_phase1_paperMasksweights.04-8.92.h5",
        #r"/home/edm/work/dev/PConv-Keras/data/logs/imagenet_phase1_paperMasksweights.43-4.33.h5",
        r"/home/edm/work/dev/PConv-Keras/data/logs/imagenet_phase1_paperMasksweights.43-4.33.h5",
        train_bn=False,
        lr=0.00005
    )
    n = 0
    for (masked, mask), ori in tqdm(test_generator):
        
        # Run predictions for this batch of images
        pred_img = model.predict([masked, mask])
        pred_time = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
        
        # Clear current output and display test images
        for i in range(len(ori)):
            _, axes = plt.subplots(1, 2, figsize=(10, 5))
            axes[0].imshow(masked[i,:,:,:])
            axes[1].imshow(pred_img[i,:,:,:] * 1.)
            axes[0].set_title('Masked Image')
            axes[1].set_title('Predicted Image')
            axes[0].xaxis.set_major_formatter(NullFormatter())
            axes[0].yaxis.set_major_formatter(NullFormatter())
            axes[1].xaxis.set_major_formatter(NullFormatter())
            axes[1].yaxis.set_major_formatter(NullFormatter())
                    
            plt.savefig(r'data/test_samples/img_{}_{}.png'.format(i, pred_time))
            plt.close()
            n += 1
            
        # Only create predictions for about 100 images
        #if n > 100:
        if n > 100:
            break25it [00:23,  1.12it/s]Section Below not needed for now  Performance EvaluationTo evaluate the performance of the network, in this notebook I'll try loading the test masks used in the original paper, and see which PSNR scores we get on imagenet# Store data
    ratios = []
    psnrs = []
    
    # Loop through test masks released with paper
    test_masks = os.listdir('./data/masks/test')
    for filename in tqdm(test_masks):
        
        # Load mask from paper
        filepath = os.path.join('./data/masks/test', filename)
        mask = cv2.imread(filepath) / 255
        ratios.append(mask[:,:,0].sum() / (512 * 512))
        mask = np.array([1-mask for _ in range(BATCH_SIZE)])
        
        # Pick out image from test generator
        test_data = next(val_generator)
        (_, _), ori = test_data
        
        masked = deepcopy(ori)
        masked[mask==0] = 1
        
        # Run prediction on image & mask
        pred = model.predict([ori, mask])
        
        # Calculate PSNR
        psnrs.append(-10.0 * np.log10(np.mean(np.square(pred - ori))))
    df = pd.DataFrame({'ratios': ratios[:2408], 'psnrs': psnrs})
    
    means, stds = [], []
    idx1 = [0.01, 0.1, 0.2, 0.3, 0.4, 0.5]
    idx2 = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]
    
    for mi, ma in zip(idx1, idx2):
        means.append(df[(df.ratios >= mi) & (df.ratios <= ma)].mean())
        stds.append(df[(df.ratios >= mi) & (df.ratios <= ma)].std())
        
    pd.DataFrame(means, index=['{}-{}'.format(a, b) for a, b in zip(idx1, idx2)])运行下列代码import numpy as np
    import tensorflow as tf
    from keras.callbacks import TensorBoard
    from keras.layers import Input, Dense
    from keras.models import Model
    
    
    def write_log(callback, names, logs, batch_no):
        for name, value in zip(names, logs):
            summary = tf.Summary()
            summary_value = summary.value.add()
            summary_value.simple_value = value
            summary_value.tag = name
            callback.writer.add_summary(summary, batch_no)
            callback.writer.flush()
        
    net_in = Input(shape=(3,))
    net_out = Dense(1)(net_in)
    model = Model(net_in, net_out)
    model.compile(loss='mse', optimizer='sgd', metrics=['mae'])
    
    log_path = './graph'
    callback = TensorBoard(log_path)
    callback.set_model(model)
    train_names = ['train_loss', 'train_mae']
    val_names = ['val_loss', 'val_mae']
    for batch_no in range(100):
        X_train, Y_train = np.random.rand(32, 3), np.random.rand(32, 1)
        logs = model.train_on_batch(X_train, Y_train)
        write_log(callback, train_names, logs, batch_no)
        
        if batch_no % 10 == 0:
            X_val, Y_val = np.random.rand(32, 3), np.random.rand(32, 1)
            logs = model.train_on_batch(X_val, Y_val)
            write_log(callback, val_names, logs, batch_no//10)Using TensorFlow backend.Accessing BioData Catalyst Harmonized Variables Using Python PIC-SURE API This tutorial notebook will demonstrate how to query and work with the BioData Catalyst cross-studies harmonized variables using python PIC-SURE API. For a more step-by-step introduction to the Python PIC-SURE API, see the `PICSURE-API_101.ipynb` notebook. **Before running this notebook, please be sure to get a user-specific security token. For more information on how to proceed, see the `get_your_token.ipynb` notebook**  -------     Environment set-up  System Requirements- Python 3.6 or Later- PIP & Bash Interpreter  Installation of External Dependenciesimport sys
    !{sys.executable} -m pip install -r requirements.txt
    !{sys.executable} -m pip install --upgrade --force-reinstall git+https://github.com/hms-dbmi/pic-sure-python-adapter-hpds.git
    !{sys.executable} -m pip install --upgrade --force-reinstall git+https://github.com/hms-dbmi/pic-sure-python-client.git
    import json
    from pprint import pprint
    
    import pandas as pd
    import numpy as np 
    import matplotlib.pyplot as plt
    from scipy import stats
    
    import PicSureHpdsLib
    import PicSureClient
    
    from python_lib.utils import get_multiIndex_variablesDict, joining_variablesDict_onCol
    print("NB: This Jupyter Notebook has been written using PIC-SURE API following versions:\n- PicSureHpdsLib: 1.1.0\n- PicSureClient: 1.1.0")
    print("The installed PIC-SURE API libraries versions:\n- PicSureHpdsLib: {0}\n- PicSureClient: {1}".format(PicSureHpdsLib.__version__, PicSureClient.__version__))Connecting to a PIC-SURE NetworkPICSURE_network_URL = "https://picsure.biodatacatalyst.nhlbi.nih.gov/picsure"
    resource_id = "02e23f52-f354-4e8b-"
    token_file = "token.txt"
    with open(token_file, "r") as f:
        my_token = f.read()
    client = PicSureClient.Client()
    connection = client.connect(PICSURE_network_URL, my_token)
    adapter = PicSureHpdsLib.Adapter(connection)
    resource = adapter.useResource(resource_id)Harmonized Variables The goal of Data Harmonization is to "produce a high quality, lasting resource of publicly available and thoroughly documented harmonized phenotype variables". The TOPMed Data Coordinating Center collaborates with Working Group members, study and phenotype experts on this endeavour. So far, 44 harmonized variables are accessible through PICS-SURE (as well as, for each variable, the age at which the variable value as been collected for a given subject).Which phenotypes' caracteristics are included the harmonized variables?- Key NHLBI phenotypes        - Blood cell counts    - VTE    - Atherosclerosis-related phenotypes    - Lipids    - Blood pressure􏰀- Common covariates    - Height    - Weight    - BMI    - Smoking status    - Race/ethnicityMore information about the variables harmonization process is available at: - https://www.nhlbiwgs.org/sites/default/files/pheno_harmonization_guidelines.pdf  1. Retrieving Variables Dictionary from HPDS Database Here we retrieve the harmonized variables information by querying the "harmonized" keyword.harmonized_dic = resource.dictionary().find("Harmonized").DataFrame()
    pd.set_option("display.max.rows", 50)
    multiIndexdic = get_multiIndex_variablesDict(harmonized_dic)
    import warnings
    warnings.filterwarnings("ignore", 'This pattern has match groups')
    multiIndexdic_sub = multiIndexdic.loc[~ multiIndexdic["simplified_name"].str.contains("(^[Aa]ge)|(SUBJECT_ID)", regex=True),:]
    multiIndexdic_sub.shapeOverall, there are 82 harmonized variables. After discarding "subject ID" and the variables only indicating age of the subject at which a given harmonized variable has been measured, there are 43 left.multiIndexdic_sub2. Selecting Variables and Retrieving Data from the Database Let's say we are interested in the subset of harmonized Variables pertaining to the demographics. Subseting to keep only the phenotypical variables + the "affection status", that will be used as the dependent variable for this illustration use-case.mask_demo = multiIndexdic_sub.index.get_level_values(1) == '01 - Demographics'
    variablesDict = multiIndexdic_sub.loc[mask_demo,:]
    selected_vars = variablesDict.loc[:, "name"].tolist()
    pprint(selected_vars)Retrieving the data:query = resource.query()
    query.select().add(selected_vars)
    facts = query.getResultsDataFrame()
    
    facts = facts.set_index("Patient ID")\
    .dropna(axis=0, how="all")
    facts.columns = variablesDict["simplified_name"].values
    sex_varname = "Subject sex  as recorded by the study."
    study_varname = "A distinct subgroup within a study generally indicating subjects who share similar characteristics due to study design. Subjects may belong to only one subcohort."
    race_varname = "Harmonized race category of participant."
    facts.shape
    facts.head()
    facts = facts.astype("category")Studying the Sex Repartion Across Studiesimport matplotlib.patches as mpatches
    from matplotlib import cm
    from matplotlib.offsetbox import (TextArea, DrawingArea, OffsetImage,
                                      AnnotationBbox)
    plt.rcParams["figure.figsize"] = (14,8)
    font = {'weight' : 'bold',
            'size'   : 12}
    plt.rc('font', **font)
    subset_facts = facts.loc[pd.notnull(facts[sex_varname]),:]
    ratio_df = subset_facts.groupby(study_varname)[sex_varname]\
    .apply(lambda x: pd.value_counts(x)/(np.sum(pd.notnull(x))))\
    .unstack(1)
    annotation_x_position = ratio_df.apply(np.max, axis=1)
    number_subjects = subset_facts.groupby(study_varname)[sex_varname].apply(lambda x: x.notnull().sum())
    annotation_gen = list(zip(number_subjects, annotation_x_position))
    
    fig = ratio_df.plot.barh(title="Subjects sex-ratio across studies", figsize=(10, 12))
    fig.legend(bbox_to_anchor=(1, 0.5))
    fig.set_xlim(0, 1.15)
    fig.set_ylabel(None)
    
    for n, p in enumerate(fig.patches[:27]):
        nb_subject, x_position = annotation_gen[n]
        fig.annotate(nb_subject, (x_position + 0.03, p.get_y()+0.1), bbox=dict(facecolor='none',
                                                                           edgecolor='black',
                                                                           boxstyle='round'))
    
    handles, labels = fig.get_legend_handles_labels()
    red_patch = mpatches.Patch(label='Study nb subjects', edgecolor="black", facecolor="white")
    handles.append(red_patch)
    fig.legend(handles=handles)Applying Gradient Descent Algorithm from scratch for Univariate Linear Regression#gradient Descent Algorithm
    
    theta_zero = 0
    theta_one  = 0
    
    learningRate = 0.001
    iterations = 100
    n = len(X)
    for i in range(n):
        pred_Y = theta_one*X + theta_zero
        D_theta_zero = (-2/n) * sum(Y - pred_Y)
        D_theta_one  = (-2/n) * sum( X * (Y - pred_Y))
        theta_zero   = theta_zero - learningRate * D_theta_zero
        theta_one    = theta_one  - learningRate * D_theta_one
    
    
    print(theta_zero, theta_one)
    predicted_Y = theta_zero + theta_one * X
    plt.scatter(X,Y)
    plt.plot([min(X),max(X)] ,[min(predicted_Y), max(predicted_Y)], color='Green')
    plt.xlabel('Population of City in 10,000s')
    plt.ylabel('Profit in $10,000s')
    plt.show()Group A12 submission : HW - 2   Section 2 - Solutionimport pandas as pd
    import numpy as np
    import datetime as dt
    import statsmodels.api as sm
    import matplotlib.pyplot as plt
    plt.style.use('ggplot')
    import seaborn as sns
    import statsmodels.formula.api as smf
    import numpy 
    path_to_data_file = r'proshares_analysis_data.xlsx'   # Assuming that the file is in the root/home directory
    
    df_sp = pd.read_excel(path_to_data_file,sheet_name = 2)
    
    df_sp.set_index('date',inplace =True)
    
    S_and_P_data = df_sp['SPY US Equity']
    
    df_hedge_fund = pd.read_excel(path_to_data_file, sheet_name = 1)
    
    df_hedge_fund.set_index('date',inplace = True)
    
    periods = 12Question 1  1.  For the series in the “hedgefundseries” tab, report the following summary statistics:1(a)  mean (b)  volatility (c)  Sharpe ratioAnnualize these statistics.df_hfs = pd.read_excel('proshares_analysis_data.xlsx',sheet_name = 1).set_index('date')
    periods = 12
    df_hfs_annualized = df_hfs*periods
    mean = df_hfs_annualized.mean()
    std = df_hfs_annualized.std()/np.sqrt(periods)
    sharpe = mean/std
    df_summary = pd.DataFrame({'Mean(%)':mean*100,"Volatility":std,"Sharpe":sharpe})
    df_summaryQuestion 2  2.  For the series in the “hedgefundseries” tab, , calculate the following statistics related to tail-risk.(a)  Skewness(b)  Excess Kurtosis (in excess of 3)(c)  VaR (.05) - the fifth quantile of historic returns(d)  CVaR (.05) - the mean of the returns at or below the fifth quantile(e)  Maximum drawdown - include the dates of the max/min/recovery within the max drawdownperiod.df_hfs_2 = df_hfs 
    
    skew =df_hfs_2.skew(axis = 0)
    
    # Calculating kurtosis
    from scipy.stats import kurtosis
    
    kurtosis = pd.Series(index = df_hfs_2.skew(axis = 0).index, data = kurtosis(df_hfs_2, fisher=True, axis = 0))
    
    # Calculating VaR(0.05)
    VaR = pd.Series(index = df_hfs_2.skew(axis = 0).index, data = np.percentile(df_hfs, 0.05, axis = 0))
    
    
    
    # Calculating CVaR(0.05) - - the mean of the returns at or below the fifth quantile
    CVar = df_hfs_2[df_hfs_2 <= VaR].mean()
    
    # Maximum drawdown
    cum_return = (1+ df_hfs).cumprod()
    rolling_max = cum_return.cummax()
    drawdown = (cum_return - rolling_max) / rolling_max
    maxDrawdown = drawdown.min()
    
    bottom = pd.Series(pd.to_datetime([drawdown[col].idxmin() for col in drawdown]),index=df_hfs.columns)
    peak = pd.Series(pd.to_datetime([(cum_return[col][:bottom[col]].idxmax()) for col in cum_return]),index=df_hfs.columns)
        
    peakLevels = pd.Series([cum_return[col].loc[peak.loc[col]] for col in cum_return],index=df_hfs.columns)
    recovered = []
    for col in cum_return:
        for lev in cum_return[col][bottom[col]:]:
            if lev >= peakLevels[col]:
                recovered.append(cum_return.index[cum_return[col] == lev][0])
                break
    pd.DataFrame({'Skewness':skew,'Excess Kurtosis':kurtosis,'5% VaR':VaR,'5% CVaR':CVar,'Max Drawdown':maxDrawdown,'Peak':peak,'Bottom':bottom,'Recovered':recovered})
    df_hfs_2.loc['max drawdown'] = drawdown.min()
    drawdown.plot(figsize = (15,8),title = 'Drawdown')Question 3   3. For the series in the "hedge_fund_series" tab, run a regression of each against SPY. Include an intercept. Report the following regression-based statistics     a. Maket Beta     b. Treynor Ratio     c. Information Ratio Annualize the three stats as appropriate## function to clean the data and replace missing values with 0
    def clean(data):
        if data.isnull().values.any():
            
            return data.fillna(0)
        else:
            return data
    def market_beta(model, x,y, market = False):
        
        beta = model.params.to_frame('Parameters').loc[x.columns[1],'Parameters']
       
        return beta
    def Treynor_Ratio(model,x,y,periods):
        trey = []
        pred = model.predict(x).mean()*periods
        beta = model.params.to_frame('Parameters').loc[x.columns[1],'Parameters']
        trey = pred/beta
        return trey
    def Information_Ratio(model,x,y,periods):
        
        alpha = model.params.to_frame('Parameters').loc[x.columns[0],'Parameters']
        sigma = (y-model.predict(x)).std()
        info = alpha/(sigma*(periods**0.5))
        return info
    def regression(predicted_var, predicting_var, periods = 12, intercept = False):
        ## for the regression model
        y = predicted_var
        y = clean(y)
        
        
        
        
        x = predicting_var
        x = clean(x)
        if intercept == True:
            x = sm.add_constant(x)
        
        
        model  =sm.OLS(y,x,missing = 'drop').fit()
        
         
        meanr = y.mean()*periods
        s = meanr/(y.std()*(periods)**0.5)
        m = market_beta(model,x,y,market=True)
        t = Treynor_Ratio(model,x,y,periods)
        i = Information_Ratio(model,x,y,periods)
        r = model.rsquared
        
        
        return meanr,s,m,t, i,r
    
    
    
    mean_returns = {}
    sharpe = {}
    beta = {}
    trey = {}
    info = {}
    r_sq = {}
    for c in df_hedge_fund.columns:
        mean_returns[c],sharpe[c],beta[c], trey[c], info[c],r_sq[c] = regression(df_hedge_fund[c],S_and_P_data,intercept = True)
    
    df_summary = pd.DataFrame([mean_returns,sharpe,r_sq,beta,trey,info])
    
    df_summary = df_summary.T.rename(columns = {0:'Mean Returns (%)',1:'Sharpe Ratio',2:'R-squared',3:'Market Beta',4:'Treynor Ratio',5:'Information Ratio'})
    
    df_summary['Mean Returns (%)'] = df_summary['Mean Returns (%)']*100
    
    df_summary.loc[S_and_P_data.name] = [S_and_P_data.mean()*100,(S_and_P_data.mean()/S_and_P_data.std())*(periods)**0.5,1,1,'NA','NA']
    
    df_summaryQuestion 4  Discuss the previous statistics, and what they tell us about... (a) the differences between SPY and the hedge-fund series? (b) which performs better between HDG and QAI. (c) whether HDG and the ML series capture the most notable properties of HFRI.  a. All the hedge funds got better mean returns while having a higher sharpe ratio. For a risk averse investor, S&P could have been a  better bet.     All the hedge funds had a low information ratio but a high R-squared, this means that they took on a bit more risk but did not deviate too far from the S&P as it can explain so much of the variance of the hedge funds. For the hedge funds with sharpe ratios close to S&P's sharpe ratio (HFRIFWI and MLEIFCTR), the fund managers were able to mean - variance optimize the portfolio well   b. HDG has a slightly higher mean return but lower values of Sharpe and Information ratios as compared to QAI. This means that the extra performance came at a cost of higher volatility and probably involved a bit of luck as compared to QAI. Overall it is close but QAI has slightly better statistics   c. HFRI has a very high correaltion with the ML series and HDG. Also all the 4 series have a very similar Beta with the S&P, which probably implies that they similar average volatilites. Overall, on average HDG and ML do capture the most notable features of HFRI but they will probably provide lesser mean returns  along with lower volatility and a less risk of major loss as HFRI's tails are fatter. This is we believe is a case of average returns and average volatility masking the risky tail events.  Question 5 5.  Report the correlation matrix for these assets.(a)  Show the correlations as a heat map.(b)  Which series have the highest and lowest correlations?path_to_data_file = 'proshares_analysis_data.xlsx'
    df = pd.read_excel(path_to_data_file,sheet_name='hedge_fund_series')
    df
    df.corr()
    sns.heatmap(df.corr())HDG US Equity and QAI US Equity has lowest correlation; MLEIFCTR Index and MLEIFCTX Index has highest correlation.  Question 6  Replicate HFRI with the six factors listed on the “merrillfactors” tab. Include a constant, and run the unrestricted regression,      \begin{align}  r^{hfri}_{t} = \alpha^{merr} + x^{merr}_{t} \beta^{merr} + \epsilon^{merr}_{t} (1)  \end{align}  \begin{align}  \hat r^{hfri}_{t} = \hat\alpha^{merr} + x^{merr}_{t} \hat \beta^{merr} (2)  \end{align}     (a)  Report the intercept and betas.  (b)  Are the betas realistic position sizes, or do they require huge long-short positions?  (c)  Report the R-squared.  (d)  Report the volatility of $\epsilon^{merr}$ (the tracking error.)from dataclasses import dataclass
    import warnings
    # regression function
    @dataclass
    class RegressionsOutput:
        excess_ret_stats: pd.DataFrame
        params: pd.DataFrame
        residuals: pd.DataFrame
        tstats: pd.DataFrame
        other: pd.DataFrame
        df: pd.DataFrame
    
    def lfm_time_series_regression(df, portfolio_names, factors, annualize_factor=12):
        excess_ret_stats = pd.DataFrame(index=factors, columns=['average', 'std'], dtype=float)
        for factor in factors:
            excess_ret_stats.loc[factor, 'average'] = annualize_factor * df[factor].mean()
            excess_ret_stats.loc[factor, 'std'] = np.sqrt(annualize_factor) * df[factor].std()
            excess_ret_stats.loc[factor, 'sharpe_ratio'] = \
                excess_ret_stats.loc[factor, 'average'] / excess_ret_stats.loc[factor, 'std']
            # Here I'll just report the unscaled skewness
            excess_ret_stats.loc[factor, 'skewness'] = df[factor].skew()
            # excess_ret_stats.loc[factor, 'skewness'] = annualize_factor * df[factor].skew()
    
        _temp_excess_ret_stats = excess_ret_stats.copy()
        _temp_excess_ret_stats.loc['const', :] = 0
    
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            rhs = sm.add_constant(df[factors])
        df_params = pd.DataFrame(columns=portfolio_names)
        df_other = pd.DataFrame(columns=portfolio_names)
        df_residuals = pd.DataFrame(columns=portfolio_names)
        df_tstats = pd.DataFrame(columns=portfolio_names)
        for portfolio in portfolio_names:
            lhs = df[portfolio]
            res = sm.OLS(lhs, rhs, missing='drop').fit()
            df_params[portfolio] = res.params
            df_params.loc['const', portfolio] = annualize_factor * res.params['const']
            
            df_other.loc['r_squared', portfolio] = res.rsquared
            df_other.loc['model_implied_excess_ret', portfolio] = df_params[portfolio] @ _temp_excess_ret_stats['average']
            df_other.loc['ave_excess_ret', portfolio] = \
                annualize_factor * df[portfolio].mean()
            df_other.loc['std_excess_ret', portfolio] = \
                np.sqrt(annualize_factor) * df[portfolio].std()
            df_other.loc['skewness_excess_ret', portfolio] = \
                annualize_factor * df[portfolio].skew()
            df_other.loc['sharpe_ratio', portfolio] = \
                df_other.loc['ave_excess_ret', portfolio] / df_other.loc['std_excess_ret', portfolio]
            df_residuals[portfolio] = res.resid
            df_tstats[portfolio] = res.tvalues
    
        regression_outputs = RegressionsOutput(
            excess_ret_stats.T,
            df_params.T,
            df_residuals,
            df_tstats.T,
            df_other.T,
            df)
    
    
        return regression_outputs
    df_hfs = pd.read_excel('proshares_analysis_data.xlsx', sheet_name = 'hedge_fund_series').set_index('date')
    df_mf = pd.read_excel('proshares_analysis_data.xlsx', sheet_name = 'merrill_factors').set_index('date')
    
    merrill_factors = ['SPY US Equity', 'USGG3M Index', 'EEM US Equity', 'EFA US Equity', 'EUO US Equity', 'IWM US Equity']
    
    df_mf['HFRIFWI Index'] = df_hfs['HFRIFWI Index']
    
    prob6_regs = lfm_time_series_regression(df=df_mf, portfolio_names=['HFRIFWI Index'], factors=merrill_factors)(a) Report the intercept and betas.display(prob6_regs.params)(b) Are the betas realistic position sizes, or do they require huge long-short positions?These betas are realistic position sizes. Only the USGG3M is short and that might not be allowed  (c) Report the R-squared. (d) Report the volatility of  E(merr), (the tracking error.)display(prob6_regs.other)
    
    print('\nR-squared = {:.5f}'.format(np.array(prob6_regs.other['r_squared'])[0]))
    print('Tracking error = {:.5f}'.format(np.array(prob6_regs.residuals.std() * np.sqrt(12))[0]))7. Let's examine the replication out-of-sample.date_range = df_mf.iloc[60:, :].index
    oos_fitted = pd.Series(index = date_range, name = 'OOS_fit', dtype='float64')
    for i in range(60, len(df_mf)):
        date = df_mf.iloc[i:i+1, :].index
        # date_month_prior = pd.DatetimeIndex([date]).shift(periods = -1, freq = 'M')[0]
        df_subset = df_mf.iloc[i-60:i, :]
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            rhs = sm.add_constant(df_subset[merrill_factors])
        lhs = df_subset['HFRIFWI Index']
        res = sm.OLS(lhs, rhs, drop="missing").fit()
        alpha = res.params['const']
        beta = res.params.drop(index='const')
        x_t = df_mf.loc[date, merrill_factors]
        predicted_next_value = alpha + x_t @ beta
        oos_fitted[date] = predicted_next_value
    
    oos_fitted.plot(figsize=(14,3))
    df_mf.iloc[60:,:]['HFRIFWI Index'].plot()
    plt.legend()
    plt.show()
    None
    
    display((pd.DataFrame([oos_fitted, df_mf.iloc[60:,:]['HFRIFWI Index']])).T.corr())The OOS results perform well, showing almost the same level of replicability - 94.5% correlation between the replicating portfolio and the HFRI.  8. (a) regression betafrom dataclasses import dataclass
    import warnings
    df_hfs = pd.read_excel('proshares_analysis_data.xlsx', sheet_name = 'hedge_fund_series').set_index('date')
    df_mf = pd.read_excel('proshares_analysis_data.xlsx', sheet_name = 'merrill_factors').set_index('date')
    # regression function
    @dataclass
    class RegressionsOutput:
        excess_ret_stats: pd.DataFrame
        params: pd.DataFrame
        residuals: pd.DataFrame
        tstats: pd.DataFrame
        other: pd.DataFrame
        df: pd.DataFrame
    
    def lfm_time_series_regression(df, portfolio_names, factors, annualize_factor=12):
        excess_ret_stats = pd.DataFrame(index=factors, columns=['average', 'std'], dtype=float)
        for factor in factors:
            excess_ret_stats.loc[factor, 'average'] = annualize_factor * df[factor].mean()
            excess_ret_stats.loc[factor, 'std'] = np.sqrt(annualize_factor) * df[factor].std()
            excess_ret_stats.loc[factor, 'sharpe_ratio'] = \
                excess_ret_stats.loc[factor, 'average'] / excess_ret_stats.loc[factor, 'std']
            # Here I'll just report the unscaled skewness
            excess_ret_stats.loc[factor, 'skewness'] = df[factor].skew()
            # excess_ret_stats.loc[factor, 'skewness'] = annualize_factor * df[factor].skew()
    
        _temp_excess_ret_stats = excess_ret_stats.copy()
        _temp_excess_ret_stats.loc['const', :] = 0
    
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            rhs = sm.add_constant(df[factors])
        df_params = pd.DataFrame(columns=portfolio_names)
        df_other = pd.DataFrame(columns=portfolio_names)
        df_residuals = pd.DataFrame(columns=portfolio_names)
        df_tstats = pd.DataFrame(columns=portfolio_names)
        for portfolio in portfolio_names:
            lhs = df[portfolio]
            res = sm.OLS(lhs, rhs, missing='drop').fit()
            df_params[portfolio] = res.params
            df_params.loc['const', portfolio] = 0
            
            df_other.loc['r_squared', portfolio] = res.rsquared
            df_other.loc['model_implied_excess_ret', portfolio] = df_params[portfolio] @ _temp_excess_ret_stats['average']
            df_other.loc['ave_excess_ret', portfolio] = \
                annualize_factor * df[portfolio].mean()
            df_other.loc['std_excess_ret', portfolio] = \
                np.sqrt(annualize_factor) * df[portfolio].std()
            df_other.loc['skewness_excess_ret', portfolio] = \
                annualize_factor * df[portfolio].skew()
            df_other.loc['sharpe_ratio', portfolio] = \
                df_other.loc['ave_excess_ret', portfolio] / df_other.loc['std_excess_ret', portfolio]
            df_residuals[portfolio] = res.resid
            df_tstats[portfolio] = res.tvalues
    
        regression_outputs = RegressionsOutput(
            excess_ret_stats.T,
            df_params.T,
            df_residuals,
            df_tstats.T,
            df_other.T,
            df)
    
    
        return regression_outputs
    merrill_factors = ['SPY US Equity', 'USGG3M Index', 'EEM US Equity', 'EFA US Equity', 'EUO US Equity', 'IWM US Equity']
    
    df_mf['HFRIFWI Index'] = df_hfs['HFRIFWI Index']
    
    prob8_regs = lfm_time_series_regression(df=df_mf, portfolio_names=['HFRIFWI Index'], factors=merrill_factors)
    display(prob8_regs.params)8(b) mean of fitted value  the mean of HFRI isprint(df_hfs['HFRIFWI Index'].mean()*12)
    
    a=df_mf['SPY US Equity'].mean()
    b=df_mf['USGG3M Index'].mean()
    c=df_mf['EEM US Equity'].mean()
    d=df_mf['EFA US Equity'].mean()
    e=df_mf['EUO US Equity'].mean()
    f=df_mf['IWM US Equity'].mean()
    mean_fitted=(0.072022*a-0.400591*b+0.072159*c+0.106318*d+0.022431*e+0.130892*f)*12the mean of fitted value isprint(mean_fitted)0.037023779152482235so we can see that compared to mean of MFRI, mean of fitted value is much lower  8(c) The correlation  parameters without intererceptprob8_regs.paramsparameters with intererceptprob6_regs.params
    
    
    a=df_mf['SPY US Equity']
    b=df_mf['USGG3M Index']
    c=df_mf['EEM US Equity']
    d=df_mf['EFA US Equity']
    e=df_mf['EUO US Equity']
    f=df_mf['IWM US Equity']
    k=[]
    g=[]
    t=[]
    
    for i in range(len(a)):
        fitted_no_intece=0.072022*a[i]-0.400591*b[i]+0.072159*c[i]+0.106318*d[i]+0.022431*e[i]+0.130892*f[i]
        k.append(fitted_no_intece)
        fitted_intece=0.01376+0.072022*a[i]-0.400591*b[i]+0.072159*c[i]+0.106318*d[i]+0.022431*e[i]+0.130892*f[i]
        g.append(fitted_intece)
        t.append(df_hfs['HFRIFWI Index'][i])
    df_corr = pd.DataFrame({'HFRIFWI Index':df_hfs['HFRIFWI Index'],'Predicted with intercept':g,'Predicted witout intercept':k})
    
    df_corr.corr()[['HFRIFWI Index']]Website     , , , , , , .   Using Employment and Employer-Level Measures to Understand Indiana's Labor Market  IntroductionWhile in the [Data Exploration](Data_Exploration.ipynb) notebook we focused primarily on understanding our cohort's earnings, here we will first look at two measures of stable employment before switching gears to the demand side of employment: the employers. For the second part of this notebook, we will analyze some employer-level measures created in a supplementary [notebook](Create_Employer_Characteristics.ipynb) to get a better sense of Indiana's labor market and how employers of individuals in our cohort fit into the overall labor market.  Learning ObjectivesWe will cover two prominent analyses:1. Different measures of stable employment1. Labor market interactionsThese two sections will have two different units of analysis: the first will focus directly on the individuals in our cohort, and then will switch onto their employers. Before we start looking at their employers, a logical prelude would be taking a deeper dive into our cohort's employment. Here, we will walk through two different measures of stable employment within a cohort and see if their earnings differed significantly from those without stable employment. From there, we will load in our employer-level measures file and look at the differences in employers of members in our cohort who experienced different levels in employment.We would like to find out if there are any distinguishing factors between the overall labor market in Indiana and the employers that hired members of our 2016Q4 cohort. Ultimately, we want to gain a better understanding of the demand side when it comes to employment opportunities for our TANF leavers.Similar to the [Data Exploration](Data_Exploration.ipynb) notebook, we will pose a few direct questions we will use to answer our ultimate question: **How can we use labor market interactions to help explain employment outcomes of TANF leavers?**Before we do so, we need to load our external R packages and connect to the database.  R Setup#database interaction imports
    library(DBI)
    library(RPostgreSQL)
    
    # for data manipulation/visualization
    library(tidyverse)
    
    # scaling data
    library(scales)
    # create an RPostgreSQL driver
    drv <- dbDriver("PostgreSQL")
    
    # connect to the database
    con <- dbConnect(drv,dbname = "postgresql://stuffed.adrf.info/appliedda")Stable Employment MeasuresAs discussed above, we will spend some time in this section taking a look at our 2016Q4 cohort's employment outcomes. We will examine two different defintions of stable employment and see how average quarterly earnings differ for individuals who satisfy these definitions of stable employment. We have listed the two questions we will seek to answer in this section below:1. How many leavers found stable employment? What percentage is this of our total cohort?1. What were the average quarterly earnings within these stable jobs?Let's first load our table matching our 2016Q4 cohort to their employment outcomes into R.# read table into R
    qry = "
    select *
    from ada_tdc_2020.cohort_2016_earnings
    "
    df_2016_wages = dbGetQuery(con, qry)
    # take a look at df_2016_wages
    glimpse(df_2016_wages)Now, we're ready to start answering our first guiding question for this section. Question 1: How many leavers found stable employment? What percentage is this of our total cohort?  How would you define stable employment? In fact, it is quite a subjective measure. Here are the two definitions of stable employment we will look at: 1. Those with positive earnings all four quarters after exit with the same employer2. Those that experienced full-quarter employment. By full-quarter employment, an individual had earnings in quarters t-1, t, and t+1 from the same employer.> These are not the only two, but just two common measures of stable employment. If you choose to analyze stable employment within a specific cohort (highly recommended), make sure you clearly state your definition of stable employment.  Stable Employment Measure 1: Positive earnings all four quarters with the same employerThis calculation is relatively simple given that we have to just manipulate `df_2016_wages`. We will approach this calculation by counting the number of quarters each individual (`ssn`) received wages from each employer (`uiacct`), and then filter for just those `ssn`/`uiacct` combinations that appear in all four quarters in 2017.# see if we can calculate stable employment measure #1
    df_2016_wages %>%
        group_by(ssn, uiacct) %>%
        summarize(n_quarters = n_distinct(quarter)
        ) %>%
        ungroup() %>%
        filter(n_quarters==4) %>%
        head()From here, we can add one line of code `summarize(n_distinct(ssn))` to calculate the number of individuals in our cohort that experienced this measure of stable employment.# calculate number of individuals in our cohort that experienced stable employment measure #1
    df_2016_wages %>%
        group_by(ssn, uiacct) %>%
        summarize(n_quarters = n_distinct(quarter)
        ) %>%
        ungroup() %>%
        filter(n_quarters==4) %>%
        summarize(n_distinct(ssn))If you are curious about the amount of members of our cohort that found stable employment (according to this defintion) with multiple employers, you can do so with a few more lines of code.# see if we can calculate stable employment measure #1
    df_2016_wages %>%
        group_by(ssn, uiacct) %>%
        summarize(n_quarters = n_distinct(quarter)
        ) %>%
        ungroup() %>%
        filter(n_quarters==4) %>%
        group_by(ssn) %>%
        summarize(n=n()) %>%
        ungroup() %>%
        filter(n>1) %>%
        summarize(num=n())Anyways, we can calculate the percentage of our cohort that experienced stable employment within this time frame pretty easily now--we just need to load our original cohort into R as a frame of reference.# 2016Q4 cohort with most recent case information
    qry <- "
    SELECT *
    FROM ada_tdc_2020.cohort_2016
    "
    
    #read into R as df
    df_2016 <- dbGetQuery(con,qry)
    # save to calculate stable employment percentage
    stable <- df_2016_wages %>%
        group_by(ssn, uiacct) %>%
        summarize(n_quarters = n_distinct(quarter)
        ) %>%
        ungroup() %>%
        filter(n_quarters==4) %>%
        summarize(num = n_distinct(ssn))
    # percentage employed all four quarters
    percent((stable$num/n_distinct(df_2016$ssn)), .01)Now, let's see how the percentage changes when we use our second definition of stable employment.  Stable Employment Measure 2: Full-Quarter EmploymentFinding full-quarter employment is a bit more complicated. Instead of using R, we will venture back into SQL, since we will need to find earnings for our cohort from 2016Q4 through 2018Q1 to calculate if an individual experienced full-quarter employment some time in 2017. We have already created this table, named `full_q_wages_2016` in the `ada_tdc_2020` schema for you using the code below:> To satisfy full-quarter employment in 2017Q1, an individual needed to have earnings from the same employer in 2016Q4, 2017Q1, and 2017Q2. Therefore, if we want to see all full-quarter employment from 2017Q1 to 2017Q4, we would need all earnings data from 2016Q4 to 2018Q1.     create table ada_tdc_2020.full_q_wages_2016 as    select a.ssn, a.tanf_spell_months, a.tanf_total_months, a.county,    b.year, b.quarter, b.uiacct, b.wages, b.naics_3_digit, b.cnty,     format('%s-%s-1', b.year, b.quarter*3-2)::date as job_yr_q    from ada_tdc_2020.cohort_2016 a    left join in_dwd.wage_by_employer b    on a.ssn = b.ssn    where b.year = 2017 or (b.year = 2016 and b.quarter = 4) or (b.year=2018 and b.quarter=1)# get earnings for our cohort from 2016Q4-2018Q1
    qry = '
    select *
    from ada_tdc_2020.full_q_wages_2016
    limit 5
    '
    dbGetQuery(con, qry)Now that we have earnings for our cohort from 2016Q4-2018Q1, we can calculate full-quarter employment. To do so, we will use three copies of the same table, and then use a `WHERE` clause to make sure we are identifying the same individual and employer combination across three consecutive quarters.The `\'3 month\'::interval` code can be used when working with dates (`job_yr_q` in this case), as it will match to exactly three months from the original date. Before or after the original date can be indicated with `+` or `-` signs.# see if we can calculate full-quarter employment
    qry = '
    select a.ssn, a.uiacct, a.job_yr_q, a.wages
    from ada_tdc_2020.full_q_wages_2016 a, ada_tdc_2020.full_q_wages_2016 b, ada_tdc_2020.full_q_wages_2016 c
    where a.ssn = b.ssn and a.uiacct=b.uiacct and
    a.ssn = c.ssn and a.uiacct = c.uiacct and a.job_yr_q = (b.job_yr_q - \'3 month\'::interval)::date and 
    a.job_yr_q = (c.job_yr_q + \'3 month\'::interval)::date
    order by a.ssn, a.job_yr_q
    limit 5
    '
    dbGetQuery(con, qry)The query above will only select earnings for quarters where an individual experienced full-quarter employment with an employer, and due to the `WHERE` clause, it will only select full-quarter employment in 2017, and won't include those who experienced full quarter employment in 2016Q4 or 2018Q1.# read full-quarter employment into r as cohort_2016_full
    qry = '
    select a.ssn, a.uiacct, a.job_yr_q, a.wages
    from ada_tdc_2020.full_q_wages_2016 a, ada_tdc_2020.full_q_wages_2016 b, ada_tdc_2020.full_q_wages_2016 c
    where a.ssn = b.ssn and a.uiacct=b.uiacct and
    a.ssn = c.ssn and a.uiacct = c.uiacct and a.job_yr_q = (b.job_yr_q - \'3 month\'::interval)::date and 
    a.job_yr_q = (c.job_yr_q + \'3 month\'::interval)::date
    order by a.ssn, a.job_yr_q
    '
    cohort_2016_full <- dbGetQuery(con, qry)Now that we have all records of full-quarter employment, along with their earnings in the quarter, we can easily calculate the number of individuals in our cohort who experienced our second measure of stable employment in at least one quarter.# calculate number of individuals in our cohort that experienced full-quarter employment
    cohort_2016_full %>%
        summarize(n=n_distinct(ssn))
    # save number of individuals in our cohort that experienced full-quarter employment
    full_n <- cohort_2016_full %>%
        summarize(n=n_distinct(ssn))
    # calculate proportion of people in our cohort that experienced full-quarter employment
    percent((full_n$n/n_distinct(df_2016$ssn)), .01)We can also calculate the percentage of individuals in our cohort that experienced full quarter employment with the same employer in all four quarters.cohort_2016_full %>%
        group_by(ssn, uiacct) %>%
        summarize(n_quarters = n_distinct(job_yr_q)) %>%
        ungroup() %>%
        filter(n_quarters == 4) %>%
        summarize(n=n_distinct(ssn))And then we can calculate this percentage.# save as full_4
    full_4 <- cohort_2016_full %>%
        group_by(ssn, uiacct) %>%
        summarize(n_quarters = n_distinct(job_yr_q)) %>%
        ungroup() %>%
        filter(n_quarters == 4) %>%
        summarize(n=n_distinct(ssn))
    percent((full_4$n/n_distinct(df_2016$ssn)), .01)If you're curious, we can see if anyone experienced full quarter employment all four quarters with multiple employers as well.# save as full_4
    cohort_2016_full %>%
        group_by(ssn, uiacct) %>%
        summarize(n_quarters = n_distinct(job_yr_q)) %>%
        ungroup() %>%
        filter(n_quarters == 4) %>%
        group_by(ssn) %>%
        summarize(n_emps = n_distinct(uiacct)) %>%
        filter(n_emps > 1) %>%
        summarize(n=n_distinct(ssn))Are you surprised at the difference in percentages for our two measures of stable employment?  Checkpoint 1: Recreate for 2009Q1  Find the percentage of our 2009Q1 cohort that experienced stable employment using these two metrics. How do they compare? Does this surprise you?# How many individuals satisfy stable employment measure #1?
    
    # What percentage of our cohort satisfies stable employment measure #1?
    
    # How many individuals satisfy stable employment measure #2?
    
    # Use table "ada_tdc_2020.full_q_wages_2009"
    
    # What percentage of our cohort satisfies stable employment measure #2 for at least one quarter?Question 2: What were the average quarterly earnings within these stable jobs? Let's see if earnings differed for our cohort when comparing our two measures of stable employment.   Stable Employment Measure 1: Average Quarterly EarningsWe'll start with our first measure of those that had earnings with the same employer for all four quarters within our time frame. First, we will isolate all `ssn`/`uiacct` combinations that satisfied this stable employment measure, and then filter our original earnings data frame `df_2016_wages` to just include wages for these combinations.# all ssn and uiacct values from stable employment measure #1 and save to stable_emp_1
    stable_emp_1 <- df_2016_wages %>%
        group_by(ssn, uiacct) %>%
        summarize(n_quarters = n_distinct(quarter)
        ) %>%
        ungroup() %>%
        filter(n_quarters==4) %>%
        select(-n_quarters)> The code used to create `stable_emp_1` is copied from the code used earlier to isolate those who had earnings with the same employer for all four quarters within our time frame, with the addition of the last line so we don't store the number of quarters for which they were employed (which is always four in this case anyways).# see stable_emp_1
    head(stable_emp_1)Now, we just need to `inner_join` rows in `df_2016_wages` for those with the same `uiacct` and `ssn` combinations as in `stable_emp_1`, and then we can find the average quarterly earnings.# find average quarterly earnings for these individuals
    df_2016_wages %>%
        inner_join(stable_emp_1, by = c('uiacct', 'ssn')) %>%
        summarize(mean_wages = mean(wages))Stable Employment Measure 2: Average Quarterly EarningsFor our second stable employment measure, we have already identified `ssn`/`uiacct`/`job_yr_q` combinations for full-quarter employment. We will use a similar strategy in joining `df_2016_wages` before finding the average quarterly earnings for quarters in which members of our cohort experienced full-quarter employment.# see cohort_2016_full
    head(cohort_2016_full)
    # find average quarterly earnings for stable employment measure 2
    df_2016_wages %>%
        inner_join(cohort_2016_full, by = c('uiacct', 'ssn', 'job_yr_q') %>%
        summarize(mean_wages = mean(wages))Checkpoint 2: Wages in Stable Employment for the 2009Q1 Cohort Find the average quarterly wages for those in our 2009Q1 cohort that experienced stable employment using the two defintions above.# average quarterly wages under stable employment measure #1
    
    
    # average quarterly wages under stable employment measure #2Indiana's EmployersIn this section, we'll look at the characteristics of Indiana's employers. First, let's load in and take a quick look at our employer-level characteristics file `employers_2017` (located in the `ada_tdc_2020` schema for all employers in each quarter of 2017.  Load the dataset Before we get started answering these questions, let's load and then take a look at this file.# look at employer-level characteristics table
    qry <- "
    select *
    from ada_tdc_2020.employers_2017
    limit 5
    "
    dbGetQuery(con, qry)
    # read into R
    qry <- "
    select *
    from ada_tdc_2020.employers_2017
    "
    employers <- dbGetQuery(con, qry)Let's see how many rows are in `employer`.# number of rows
    nrow(employers)Let's also see how many employers we have on file per quarter in 2017.# number of employers by quarter
    employers %>%
        count(quarter)Indiana's EmployersNow that the `employers` data frame is ready for use, as in the [Data Exploration](Data_Exploration.ipynb) notebook, we will try to answer some broad questions about Indiana's labor market through some more direct questions:- What is the total number of jobs per quarter? What about total number of full quarter jobs?- What are the most popular industries by number of employees? What about by number of employers?- What is the distribution of both total and full-quarter employment of employers per quarter?- What is the distribution of total and average annual earnings by quarter of these employers?- Did average employment, hiring, and separation rates across all employers vary by quarter in 2017? Question 1: What is the total number of jobs per quarter? What about total number of full quarter jobs? There are two columns in `employers` we will focus on to answer this set of questions: `num_employed`, which is a calculation of the number of employers, and `full_num_employed`, which is the number of full-quarter employees.# find number of employees and full-quarter employees
    employers %>%
        summarize(total_jobs = sum(num_employed),
                 total_full_quarter_jobs = sum(full_num_employed, na.rm=T))Question 2: What are the most popular industries by number of employees? What about by number of employers? Again, we will leverage the `num_employed` variable in `employers`, and this time, we will group by `naics_3_digit`.# 10 most popular industries
    employers %>%
        group_by(naics_3_digit) %>%
        summarize(num_employed = sum(num_employed)) %>%
        arrange(desc(num_employed)) %>%
        head(10)Let's use our industry crosswalk to put some names to these NAICS codes. Like in the [Data Exploration](Data_Exploration.ipynb) notebook, we can use the `naics_2017` table in the `public` schema to act as a crosswalk.# read naics_2017 table into R as naics
    qry = '
    select *
    from public.naics_2017
    '
    naics <- dbGetQuery(con, qry)
    # save 10 most popular industries
    pop_naics <- employers %>%
        group_by(naics_3_digit) %>%
        summarize(num_employed = sum(num_employed)) %>%
        arrange(desc(num_employed)) %>%
        # make naics_3_digit character type instead of numeric
        mutate(naics_3_digit = as.character(naics_3_digit)) %>%
        head(10)Now that we have stored `pop_naics` as a data frame, we can `left_join()` it to `naics` to find the industries associated with each 3-digit NAICS code.# get industry names of most popular naics
    pop_naics %>% 
        left_join(naics, by=c('naics_3_digit' = 'naics_us_code')) %>%
        # don't include the other columns
        select(-c(seq_no,naics_3_digit)) %>%
        # sort order of columns
        select(naics_us_title, num_employed)Do any of these industries suprise you? Now, let's move on to our most common industries by number of employers.> In the following code, `n_distinct()` is used to calculate the number of unique employers in 2017, whereas `n()` calculates the number of total employers for all four quarters in 2017.# calculate number of distinct and total number of employers in all four quarters of 2017
    employers %>%
        group_by(naics_3_digit) %>%
        summarize(distinct_emp = n_distinct(uiacct),
                 num_emps = n()) %>%
        arrange(desc(distinct_emp)) %>%
        filter(!is.na(naics_3_digit)) %>%
        head(10)Again, we can find the associated industry names with a quick join after saving the resulting data frame above.# calculate number of distinct and total number of employers in all four quarters of 2017
    # save to pop_naics_emps
    pop_naics_emps <- employers %>%
        group_by(naics_3_digit) %>%
        summarize(distinct_emp = n_distinct(uiacct),
                 num_emps = n()) %>%
        arrange(desc(distinct_emp)) %>%
        filter(!is.na(naics_3_digit)) %>%
        # again make naics_3_digit character type
        mutate(naics_3_digit = as.character(naics_3_digit)) %>%
        head(10)
    # get industry names of most popular naics
    pop_naics_emps %>% 
        left_join(naics, by=c('naics_3_digit' = 'naics_us_code')) %>%
        # don't include the other columns
        select(-c(seq_no,naics_3_digit)) %>%
        # sort order of columns
        select(naics_us_title, distinct_emp, num_emps)How does this list compare to the one of the most popular industries by number of total employees? Question 3: What is the distribution of both total and full-quarter employment of employers per quarter? Now, instead of aggregating `num_employed` by quarter, we will simply look at the distribution of `num_employed` within each quarter. We will find the 1st, 10th, 25th, 50th, 75th, 90th and 99th percentiles.# find distribution of total employees by employer and quarter
    employers %>%
        summarize('.01' = quantile(num_employed, .01, na.rm=TRUE),
                  '.1' = quantile(num_employed, .1, na.rm=TRUE),
                  '.25' = quantile(num_employed, .25, na.rm=TRUE),
                  '.5' = quantile(num_employed, .5, na.rm=TRUE),
                  '.75' = quantile(num_employed, .75, na.rm=TRUE),
                  '.9' = quantile(num_employed, .9, na.rm=TRUE),
                  '.99' = quantile(num_employed, .99, na.rm=TRUE),
                 )
    # find distribution of full-quarter employees by employer and quarter
    employers %>%
        summarize('01' = quantile(full_num_employed, .01, na.rm=TRUE),
                  '.1' = quantile(full_num_employed, .1, na.rm=TRUE),
                  '.25' = quantile(full_num_employed, .25, na.rm=TRUE),
                  '.5' = quantile(full_num_employed, .5, na.rm=TRUE),
                  '.75' = quantile(full_num_employed, .75, na.rm=TRUE),
                  '.9' = quantile(full_num_employed, .9, na.rm=TRUE),
                  '.99' = quantile(full_num_employed, .99, na.rm=TRUE),
                 )What does this tell you about the relative size of employers in Indiana? Question 4: What is the distribution of total and average annual earnings by quarter of these employers?# find distribution of total earnings by employer and quarter
    employers %>%
        summarize('.01' = quantile(total_earnings, .01, na.rm=TRUE),
                  '.1' = quantile(total_earnings, .1, na.rm=TRUE),
                  '.25' = quantile(total_earnings, .25, na.rm=TRUE),
                  '.5' = quantile(total_earnings, .5, na.rm=TRUE),
                  '.75' = quantile(total_earnings, .75, na.rm=TRUE),
                  '.9' = quantile(total_earnings, .9, na.rm=TRUE),
                  '.99' = quantile(total_earnings, .99, na.rm=TRUE),
                 )
    # find distribution of average annual earnings by employer and quarter
    employers %>%
        summarize('.1' = quantile(avg_earnings, .1, na.rm=TRUE),
                  '.25' = quantile(avg_earnings, .25, na.rm=TRUE),
                  '.5' = quantile(avg_earnings, .5, na.rm=TRUE),
                  '.75' = quantile(avg_earnings, .75, na.rm=TRUE),
                  '.9' = quantile(avg_earnings, .9, na.rm=TRUE),
                  '.99' = quantile(avg_earnings, .99, na.rm=TRUE),
                 )Is this what you were expecting to see? How do overall average earnings by employees compare to average earnings within our cohort? Question 5: Did average employment, hiring, and separation rates across all employers vary by quarter in 2017? Here, we will go back to using `group_by` and `summarize` to find our answers.# find mean and standard deviation of employment rates by quarter
    employers %>%
        group_by(quarter) %>%
        summarize(mean = mean(emp_rate, na.rm=TRUE),
                 sd = sd(emp_rate, na.rm=TRUE))
    # find mean and standard deviation of hiring rates by quarter
    employers %>%
        group_by(quarter) %>%
        summarize(mean = mean(hire_rate, na.rm=TRUE),
                 sd = sd(hire_rate, na.rm=T))
    # find mean and standard deviation of separation rates by quarter
    employers %>%
        group_by(quarter) %>%
        summarize(mean = mean(sep_rate, na.rm=T),
                 sd = sd(sep_rate, na.rm=T))Based on your knowledge of employment patterns in 2017, are these results consistent with the overall trends in the United States at the time?  Checkpoint 3: Understanding Our Cohort within Labor Market  Optimally, we would like to get a better sense of who is employing our 2016 cohort - are they larger employers with lots of turnover? Do they tend to pay their employees better? Please find the answers to the questions posed in "Indiana's Employers" for employers that employed members of our cohort. Filter the `employers` data frame based on the `uiacct` and `quarter`.# guiding question 1
    
    
    # guiding question 2
    
    
    # guiding question 3
    
    
    # guiding question 4
    
    
    # guiding question 5Python Function TestingThis notebook is for testing all the Python functions. Each cell is a data source class; feel free to experiment to your heart's content.import augur
    
    # import everything that githubapi.py imports so we can just copy and paste our function later
    augur_app = augur.Application('../augur.config.json')
    import pandas as pd
    import sqlalchemy as s
    import numpy as np
    import re
    from augur import logger
    from augur.util import annotate
    
    ghtorrent = augur_app.ghtorrent()
    owner='rails'
    repo='rails'
    
    # ghtorrent.closed_issues(owner, repo)
    # ghtorrent.code_commits(owner, repo)
    # ghtorrent.code_review_iteration(owner, repo)
    # ghtorrent.contribution_acceptance(owner, repo)
    # ghtorrent.contributing_github_organizations(owner, repo)
    # ghtorrent.first_response_to_issue_duration(owner, repo)
    # ghtorrent.forks(owner, repo)
    # ghtorrent.maintainer_response_to_merge_request_duration(owner, repo)
    # ghtorrent.new_contributing_github_organizations(owner, repo)
    # ghtorrent.open_issues(owner, repo)
    # ghtorrent.pull_request_comments(owner, repo)
    # ghtorrent.pull_requests_open(owner, repo)
    # ghtorrent.issue_comments(owner, repo)
    # ghtorrent.watchers(owner, repo)
    # ghtorrent.commits100(owner, repo)
    # ghtorrent.commit_comments(owner, repo)
    # ghtorrent.committer_locations(owner, repo)
    # ghtorrent.total_committers(owner, repo)
    # ghtorrent.issue_activity(owner, repo)
    # ghtorrent.pull_request_acceptance_rate(owner, repo)
    # ghtorrent.community_age(owner, repo)
    # ghtorrent.community_engagement(owner, repo)
    # ghtorrent.contributors(owner, repo)
    # ghtorrent.contributions(owner, repo)
    # ghtorrent.classify_contributors(owner, repo)
    # ghtorrent.project_age(owner, repo)
    # ghtorrent.fakes(owner, repo)
    # ghtorrent.ghtorrent_range(owner, repo)
    import pandas as pd
    import sqlalchemy as s
    import numpy as np
    import re
    from augur import logger
    
    ghtorrentplus = augur_app.ghtorrentplus()
    owner='rails'
    repo='rails'
    
    # ghtorrentplus.closed_issue_resolution_duration(owner, repo)
    import sys
    import pandas as pd
    if sys.version_info > (3, 0):
        import urllib.parse as url
    else:
        import urllib as url
    
    publicwww = augur_app.publicwww()
    owner='rails'
    repo='rails'
    
    # publicwww.linking_websites(owner, repo)
    import os
    import shutil
    import re
    import json
    import datetime
    import pandas as pd
    import git
    from lockfile import LockFile, AlreadyLocked
    from augur.util import logger, get_cache
    
    git = augur_app.git()
    
    # git.get_repo("https://github.com/rails/rails")
    # git.update()
    # git.downloaded_repos()
    # git.lines_changed_minus_whitespace("https://github.com/rails/rails")
    # git.changes_by_author("https://github.com/rails/rails")
    from augur.localcsv import LocalCSV
    import json
    import re
    from dateutil.parser import parse
    import pandas as pd
    import github
    import numpy as np
    import datetime
    import requests
    from augur import logger
    
    github = augur_app.githubapi()
    owner='rails'
    repo='rails'
    
    github.lines_of_code_changed(owner, repo)
    # github.bus_factor(owner, repo)
    # github.major_tags(owner, repo)
    # github.tags(owner, repo)
    # github.contributors_gender(owner, repo)
    import requests
    import pandas as pd
    import numpy as np
    from bs4 import BeautifulSoup
    from augur import logger
    
    librariesio = augur_app.librariesio()
    owner='rails'
    repo='rails'
    
    # librariesio.dependencies(owner, repo)
    # librariesio.dependency_stats(owner, repo)
    # librariesio.dependents(owner, repo)
    import json
    import pandas as pd
    import requests
    import datetime
    import base64
    from augur import logger
    
    downloads = augur_app.downloads()
    owner='rails'
    repo='rails'
    
    # downloads.downloads(owner, repo)
    # downloads.ruby_downloads(owner)
    # downloads.npm_downloads(owner, repo)
    import pandas as pd
    import tldextract
    from urllib.parse import urlparse
    from .util import get_data_path
    
    localcsv = augur_app.localcsv()
    owner='rails'
    repo='rails'
    
    localcsv.classify_emails(self, email_series)Lambda School Data Science*Unit 2, Sprint 1, Module 1*---  Regression 1 AssignmentYou'll use another **New York City** real estate dataset. But now you'll **predict how much it costs to rent an apartment**, instead of how much it costs to buy a condo.The data comes from renthop.com, an apartment listing website.- [ ] Look at the data. Choose a feature, and plot its relationship with the target.- [ ] Use scikit-learn for linear regression with one feature. You can follow the [5-step process from ](https://jakevdp.github.io/PythonDataScienceHandbook/05.02-introducing-scikit-learn.htmlBasics-of-the-API).- [ ] Define a function to make new predictions and explain the model coefficient.- [ ] Organize and comment your code.> [Do Not Copy-Paste.](https://docs.google.com/document/d/1ubOw9B3Hfip27hF2ZFnW3a3z9xAgrUDRReOEo-FHCVs/edit) You must type each of these exercises in, manually. If you copy and paste, you might as well not even do them. The point of these exercises is to train your hands, your brain, and your mind in how to read, write, and see code. If you copy-paste, you are cheating yourself out of the effectiveness of the lessons.If your **Plotly** visualizations aren't working:- You must have JavaScript enabled in your browser- You probably want to use Chrome or Firefox- You may need to turn off ad blockers- [If you're using Jupyter Lab locally, you need to install some "extensions"](https://plot.ly/python/getting-started/jupyterlab-support-python-35) Stretch Goals- [ ] Do linear regression with two or more features.- [ ] Read [The Discovery of Statistical Regression](https://priceonomics.com/the-discovery-of-statistical-regression/)- [ ] Read [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/ISLR%20Seventh%20Printing.pdf), Chapter 2.1: What Is Statistical Learning?import sys
    
    # If you're on Colab:
    if 'google.colab' in sys.modules:
        DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
    
    # If you're working locally:
    else:
        DATA_PATH = '../data/'
        
    # Ignore this Numpy warning when using Plotly Express:
    # FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
    import warnings
    warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
    # Read New York City apartment rental listing data
    import pandas as pd
    df = pd.read_csv(DATA_PATH+'apartments/renthop-nyc.csv', 
                     parse_dates=['created'], 
                     index_col='created')
    
    # assert df.shape == (49352, 34)
    
    
    # dtype_dict = {'ZIP_CODE': 'object',
    #               'YEAR_BUILT': int}
    
    # df = pd.read_csv(DATA_PATH+'condos/tribeca.csv', 
    #                  dtype=dtype_dict, 
    #                  parse_dates=['SALE_DATE'],
    #                  index_col='SALE_DATE')
    # Remove outliers: 
    # the most extreme 1% prices,
    # the most extreme .1% latitudes, &
    # the most extreme .1% longitudes
    df = df[(df['price'] >= 1375) & (df['price'] <= 15500) & 
            (df['latitude'] >=40.57) & (df['latitude'] < 40.99) &
            (df['longitude'] >= -74.1) & (df['longitude'] <= -73.38)]
    df.head()
    df.info()
    # plotting histogram of the target variable 
    df['price'].plot(kind='hist')
    # the data is positively skewed 
    df['elevator'].value_counts()
    df['bedrooms'].value_counts()
    import matplotlib.pyplot as plt
    
    # style
    plt.style.use('seaborn')
    
    plt.scatter(df['bedrooms'], df['price']);
    
    plt.xlabel('Number of Bedrooms')
    plt.ylabel('Price')
    
    plt.show()
    # style
    plt.style.use('seaborn')
    
    plt.scatter(df['latitude'], df['price']);
    
    plt.xlabel('Latitude')
    plt.ylabel('Price')
    
    plt.show()
    plt.style.use('seaborn')
    
    plt.scatter(df['longitude'], df['price']);
    
    plt.xlabel('longitude')
    plt.ylabel('Price')
    
    plt.show()
    # working with total bedrooms and price
    X = df[['bedrooms']]
    y = df['price']
    # # convert the created from an object to datetime so we can split our dataset using created 
    # df["created"] = pd.to_datetime(df["created"])
    # df["date_created"] = df["created"].dt.date
    # df["date_created"]
    # # make the new column date_created as the index
    # df = df.set_index('created')
    # df.head()
    # date created ranges from April to June. We will make month of June as our test set 
    # now we split the dataset in train and test 
    
    cutoff = '2016-06-01'
    
    # applying the filter 
    filt = X.index < cutoff
    X_train, y_train = X.loc[filt], y.loc[filt]
    X_test, y_test = X.loc[~filt], y.loc[~filt]
    # baseline guess 
    
    plt.hist(y_train);
    baseline_guess = y_train.mean()
    
    moe = abs(baseline_guess - y_train).mean()
    
    print(f'prediction of a baseline model: ${round(baseline_guess,2)}, with a margin of error: ${round(moe,2)}')
    # need to make a model that is more accurate than the baseline model above 
    from sklearn.linear_model import LinearRegression
    from sklearn.metrics import mean_squared_error
    import numpy as np
    
    lin_reg = LinearRegression()
    lin_reg.fit(X,y);
    lin_reg.coef_[0]
    lin_reg.intercept_we could write the line equation to solve for the rent as follows:rent = 853.25 * (total_bedrooms) + 2267.97X_model = np.linspace(0, X_train['bedrooms'].max(), 50).reshape(-1,1)
    # Note how we use the .predict() method with our model
    rent_pred = lin_reg.predict(X_model)
    
    # Plot our data
    plt.scatter(X_train, y_train)
    
    # Plot the regression line
    plt.plot(X_model, rent_pred , color='red', label='Our Model')
    
    plt.xlabel('Bedroom')
    plt.ylabel('Rent Price')
    plt.title('linear Regression')
    plt.legend()
    # Calculating RMSE score 
    rent_predictions = lin_reg.predict(X)
    rmse_scores = np.sqrt(mean_squared_error(rent_predictions, y))
    rmse_scores
    # validating the score using cross_val 
    from sklearn.model_selection import cross_val_score
    
    score = cross_val_score(lin_reg, X, y, scoring= 'neg_mean_squared_error', cv=10)
    rmse = np.sqrt(-score)
    std_rmse = np.std(rmse)
    
    print(rmse)
    print(rmse.mean())
    std_rmse
    # Scores on the test and validation set are almost the same. this model is low on bias and low on variance. Hence, a perfect generalized modelwe should be able to make the score better by adding polynomial regression as the reation is not exactly linear.from sklearn.pipeline import make_pipeline
    from sklearn.preprocessing import PolynomialFeatures
    
    poly_reg  = make_pipeline(PolynomialFeatures(degree=9), LinearRegression())
    poly_reg.fit(X,y);
    
    X_model = np.linspace(0, X_train['bedrooms'].max(), 50).reshape(-1,1)
    # Note how we use the .predict() method with our model
    rent_pred = poly_reg.predict(X_model)
    
    # Plot our data
    plt.scatter(X_train, y_train)
    
    # Plot the regression line
    plt.plot(X_model, rent_pred , color='red', label='Our Model')
    
    plt.xlabel('Bedroom')
    plt.ylabel('Rent Price')
    plt.title('using Polynomial Regression')
    plt.legend()
    plt.show()
    # Calculating RMSE score 
    
    rent_pred1 = poly_reg.predict(X)
    rmse_scores = np.sqrt(mean_squared_error(rent_pred1, y))
    rmse_scores
    # validating the score using cross_val 
    
    score = cross_val_score(poly_reg, X, y, scoring= 'neg_mean_squared_error', cv=10)
    rmse = np.sqrt(-score)
    std_rmse = np.std(rmse)
    
    print(rmse)
    print(rmse.mean())
    std_rmse
    # we got the best model possible with polynomial features degrees set to 9Lets do it using Ridge Regression and see what we get. Don't accept to better resulst because model was not overfitting.from sklearn.linear_model import Ridge
    # use Random Search to find the best value of alpha for ridge regression 
    from sklearn.model_selection import RandomizedSearchCV
    
    ridge = Ridge(normalize=True, random_state=42)
    parameters = {'alpha':[1e-15, 1e-10, 1e-8, 1e-5, 1e-2, 1, 5,10, 20,30,40, 50, 60, 100, 110]}
    
    ridge_reg = RandomizedSearchCV(ridge, parameters, scoring= 'neg_mean_squared_error', cv=10, random_state=42)
    ridge_reg.fit(X,y)
    
    print(ridge_reg.best_params_)
    print(ridge_reg.best_estimator_)
    # using the best value of alpha for ridge lets calculate the rmse and see if we get any better results 
    
    ridge = ridge_reg.best_estimator_
    
    # Calculating RMSE score 
    
    rent_pred2 = ridge.predict(X)
    rmse_scores = np.sqrt(mean_squared_error(rent_pred2, y))
    rmse_scores
    X_model = np.linspace(0, X_train['bedrooms'].max(), 50).reshape(-1,1)
    # Note how we use the .predict() method with our model
    rent_pred = ridge.predict(X_model)
    
    # Plot our data
    plt.scatter(X_train, y_train)
    
    # Plot the regression line
    plt.plot(X_model, rent_pred , color='red', label='Our Model')
    
    plt.xlabel('Bedroom')
    plt.ylabel('Rent Price')
    plt.title('Using Ridge Regression')
    plt.legend()
    plt.show()
    # validating the score using cross_val 
    
    score = cross_val_score(ridge, X, y, scoring= 'neg_mean_squared_error', cv=10)
    rmse = np.sqrt(-score)
    std_rmse = np.std(rmse)
    
    print(rmse)
    print(rmse.mean())
    std_rmse[1513.61744289 1442.79760998 1499.81967906 1467.77577537 1472.22972372
     1532.00247255 1516.93963049 1505.28687873 1454.30489411 1477.66023195]
    1488.243433883667Lets try usin Stochastic Gradient Descent with ridgefrom sklearn.linear_model import SGDRegressor
    
    sgd_reg = SGDRegressor(random_state=42)
    # lets start by finding out the best value of the learing rate 
    
    parameters1 = {'eta0': [0.001, 0.005, 0.01, 0.03, 0.06, 0.09, 1, 1.05]}
    
    sgd_regressor = RandomizedSearchCV(sgd_reg, parameters1, cv=10, scoring = 'neg_mean_squared_error', random_state=42)
    sgd_regressor.fit(X,y)
    
    print(sgd_regressor.best_params_)
    print(sgd_regressor.best_estimator_)
    sgd_reg = sgd_regressor.best_estimator_
    
    rent_pred4 = sgd_reg.predict(X)
    rmse = np.sqrt(mean_squared_error(rent_pred4, y))
    rmse
    X_model = np.linspace(0, X_train['bedrooms'].max(), 50).reshape(-1,1)
    # Note how we use the .predict() method with our model
    rent_pred5 = sgd_reg.predict(X_model)
    
    # Plot our data
    plt.scatter(X_train, y_train)
    
    # Plot the regression line
    plt.plot(X_model, rent_pred5 , color='red', label='Our Model')
    
    plt.xlabel('Bedroom')
    plt.ylabel('Rent Price')
    plt.title('Using SGD regression with Ridge')
    plt.legend()
    plt.show()
    # validating the score using cross_val 
    # shouldn't be any different than ridge regression
    
    score = cross_val_score(sgd_reg, X, y, scoring= 'neg_mean_squared_error', cv=10)
    rmse = np.sqrt(-score)
    std_rmse = np.std(rmse)
    
    print(rmse)
    print(rmse.mean())
    std_rmse[1513.57503353 1443.298727   1499.67209299 1466.98882474 1472.31476895
     1532.83228657 1516.84217954 1504.98170739 1455.4029412  1477.19573594]
    1488.310429785467**Code Signal Solution for Interview Challenges** **Problem Statement 1:**'''Given an array a that contains only numbers in the range from 1 to a.length, find the first duplicate number for which the second occurrence has the minimal index. In other words, if there are more than 1 duplicated numbers, return the number for which the second occurrence has a smaller index than the second occurrence of the other number does. If there are no such elements, return -1.ExampleFor a = [2, 1, 3, 5, 3, 2], the output should be firstDuplicate(a) = 3.There are 2 duplicates: numbers 2 and 3. The second occurrence of 3 has a smaller index than the second occurrence of 2 does, so the answer is 3.For a = [2, 2], the output should be firstDuplicate(a) = 2;For a = [2, 4, 3, 5, 1], the output should be firstDuplicate(a) = -1.Input/Output[execution time limit] 4 seconds (py3)[input] array.integer aGuaranteed constraints:1 ≤ a.length ≤ 105,1 ≤ a[i] ≤ a.length.[output] integerThe element in a that occurs in the array more than once and has the minimal index for its second occurrence. If there are no such elements, return -1.'''def firstDuplicate(a):
        seen = set()
        for i in a:
            if i in seen:
                return i
            seen.add(i)
        return -1
    
    a = [2, 1, 3, 5, 3, 2]
    print(firstDuplicate(a)){2}
    {1, 2}
    {1, 2, 3}
    {1, 2, 3, 5}
    3**Problem Statement 2:**Given a string s consisting of small English letters, find and return the first instance of a non-repeating character in it. If there is no such character, return '_'.ExampleFor s = "abacabad", the output should befirstNotRepeatingCharacter(s) = 'c'.There are 2 non-repeating characters in the string: 'c' and 'd'. Return c since it appears in the string first.For s = "abacabaabacaba", the output should befirstNotRepeatingCharacter(s) = '_'.There are no characters in this string that do not repeat.Input/Output[execution time limit] 4 seconds (py3)[input] string sA string that contains only lowercase English letters.Guaranteed constraints:1 ≤ s.length ≤ 105.[output] charThe first non-repeating character in s, or '_' if there are no characters that do not repeat.def firstNotRepeatingCharacter(s):
      x = []
      for i in s:
        if s.index(i) == s.rindex(i):
          return i
      return '_'  
    
    
    s = "abacabad"
    # s = "abacabaabacaba"
    print(firstNotRepeatingCharacter(s))c**Problem Statement 3:**Note: Try to solve this task in-place (with O(1) additional memory), since this is what you'll be asked to do during an interview.You are given an n x n 2D matrix that represents an image. Rotate the image by 90 degrees (clockwise).ExampleFora = [[1, 2, 3],     [4, 5, 6],     [7, 8, 9]]the output should berotateImage(a) =    [[7, 4, 1],     [8, 5, 2],     [9, 6, 3]]Input/Output[execution time limit] 4 seconds (py3)[input] array.array.integer aGuaranteed constraints:1 ≤ a.length ≤ 100,a[i].length = a.length,1 ≤ a[i][j] ≤ 104.[output] array.array.integerdef rotateImage(a):
      return list(zip(*a[::-1]))
    
    
    a = [[1, 2, 3], 
         [4, 5, 6], 
         [7, 8, 9]] 
    # print(len(a))
    # print(a[-2][-1])
    rotateImage(a)
    a[::-1]ImageNet Handlingtransform = transforms.Compose([
        # you can add other transformations in this list
        transforms.Resize((224, 224)),
        transforms.ToTensor()
    ])
    
    imagenet_data = torchvision.datasets.ImageNet('../../seri/datasets/imagenet', transform=transform)
    data_loader = torch.utils.data.DataLoader(imagenet_data,
                                              batch_size=4,
                                              shuffle=True,
                                              num_workers=16)Constantslayer1 = get_layer(4, 2, 5)
    layer2 = get_layer(4, 1, 6)
    channel, stdBucketWidth, stdActLower, numBuckets, minExamples, maxExamples, numImages = 1052, .5, -16, 64, 3, 100, 32Get Sorted ImagesactsAndImgs = acts_and_imgs(data_loader, layer, channel, numImages)
    sortedImages = sort_acts_and_imgs(actsAndImgs, data_loader, layer, channel, stdBucketWidth, stdActLower, numBuckets, minExamples, maxExamples)Save Sorted Images# with open('filename.sortedImages', 'wb') as handle:
    #     pickle.dump(sortedImages, handle, protocol=pickle.HIGHEST_PROTOCOL)
    
    # with open('filename.actsAndImgs', 'wb') as handle:
    #     pickle.dump(actsAndImgs, handle, protocol=pickle.HIGHEST_PROTOCOL)
    
    # Load saved object as "b"
    # with open('filename.sortedImages', 'rb') as handle:
    #     b = pickle.load(handle)Random Human Sort (for testing)'''
    img_3_5_6_5 = Image.open('datasets/test_images/3_5_6_5.png')
    img_4_2_6_2 = Image.open('datasets/test_images/4_2_6_2.png')
    img_4_2_6_202 = Image.open('datasets/test_images/4_2_6_202.png')
    img_2_1_1_9 = Image.open('datasets/test_images/2_1_1_9.png')
    img_4_2_1_13 = Image.open('datasets/test_images/4_2_1_13.png')
    img_4_2_6_1642 = Image.open('datasets/test_images/4_2_6_1642.png')
    img_4_2_6_11 = Image.open('datasets/test_images/4_2_6_11.png')
    img_4_2_5_1138 = Image.open('datasets/test_images/4_2_5_1138.png')
    
    
    sortedImages = {1.0:[img_3_5_6_5, img_4_2_6_2, img_4_2_6_202], 2.0: [img_2_1_1_9, img_4_2_1_13, img_4_2_6_1642]}
    humanSort = {"one":[img_3_5_6_5, img_4_2_6_2, img_4_2_6_202], "two": [img_2_1_1_9, img_4_2_1_13, img_4_2_6_1642]}
    '''
    
    images = [i for (a, i) in actsAndImgs]
    random.shuffle(images)
    numLabels = 5
    humanSort = {}
    catSize = int(len(images) / numLabels)
    for i in range(numLabels):
        humanSort.update({i:images[i*catSize:(i+1)*catSize]})Conditional Probability Plotstart = time.time()
    distribution = act_distribution(sortedImages, humanSort)
    labels = range(numLabels)
    activations = []
    for (a, l) in distribution.keys():
        if a not in activations:
            activations.append(a)
    activations = sorted(activations)
    probabilities = {} #dictionary from label to list of probabilities (indexed by activation)
    
    for l in labels:
        probabilities.update({l:[]})
        
    for a in activations:
        for l in labels:
            list1 = probabilities[l]
            p = 0
            if (a, l) in distribution.keys():
                p = len(distribution[a, l]) / len(sortedImages[a])
            list1.append(p)
            probabilities.update({l:list1})
    
    #print(actsAndImgs)
    #print("******")
    print(probabilities)
    #print('*******')
    #print(sortedImages)
    #print('*******')
    #print(humanSort)
    #print("******")
    print(distribution)
    #print("******")
    #print(activations)
    
    
    fig, ax = plt.subplots()
    ax.stackplot(activations, probabilities.values(),
                 labels=probabilities.keys())
    ax.legend(loc='upper left')
    ax.set_title('CLIP vs Human Categorization')
    ax.set_xlabel('Activation')
    ax.set_ylabel('Probability')
    end = time.time()
    print(end - start)
    
    plt.show()
    print(sortedImages){-15.75: [], -15.25: [], -14.75: [], -14.25: [], -13.75: [], -13.25: [], -12.75: [], -12.25: [], -11.75: [], -11.25: [], -10.75: [], -10.25: [], -9.75: [], -9.25: [], -8.75: [], -8.25: [], -7.75: [], -7.25: [], -6.75: [], -6.25: [], -5.75: [], -5.25: [], -4.75: [], -4.25: [], -3.75: [], -3.25: [], -2.75: [], -2.25: [], -1.75: [], -1.25: [], -0.75: [], -0.25: [, , , , , , , , , Array  An array is a data structure that stores values of same data type. In Python, this is the main difference betweenarrays and lists.While python lists can contain values corresponding to different data types, arrays in python can only containvalues corresponding to same data type. In this tutorial, we will understand the Python arrays with few examples.If you are new to Python, get started with the Python Introduction article.To use arrays in python language, you need to import the standard array module. This is because array is not afundamental data type like strings, integer etc. Here is how you can import array module in python : from array import * Once you have imported the array module, you can declare an array. Here is how you do it: arrayIdentifierName = array(typecode, [Initializers])my_array = array('i', [1,2,3,4,5])
    print(my_array[1])
    
    print(my_array[2])
    
    print(my_array[0])
    from array import *
    my_array = array('i', [1,2,3,4,5])
    for i in my_array:
        print(i)1
    2
    3
    4
    5* Append any value using append() methodmy_array = array('i', [1,2,3,4,5])
    my_array.append(6)
    print(my_array)array('i', [1, 2, 3, 4, 5, 6])* Insert value using insert() methodmy_array = array('i', [1,2,3,4,5])
    my_array.insert(0,0)
    print(my_array)array('i', [0, 1, 2, 3, 4, 5])* Extend value using extend() methodmy_array = array('i', [1,2,3,4,5])
    my_extnd_array = array('i', [7,8,9,10])
    my_array.extend(my_extnd_array)
    print(my_array)array('i', [1, 2, 3, 4, 5, 7, 8, 9, 10])* Add items from list into array using fromlist()my_array = array('i', [1,2,3,4,5])
    c=[11,12,13]
    my_array.fromlist(c)
    print(my_array)array('i', [1, 2, 3, 4, 5, 11, 12, 13])* Remove any array element using remove() methodmy_array = array('i', [1,2,3,4,5])
    my_array.remove(4)
    print(my_array)array('i', [1, 2, 3, 5])* Remove last array element using pop() methodmy_array = array('i', [1,2,3,4,5])
    my_array.pop()
    print(my_array)array('i', [1, 2, 3, 4])* Fetch any element through its index using index()methodmy_array = array('i', [1,2,3,4,5])
    print(my_array.index(5))
    
    my_array = array('i', [1,2,3,3,5])
    print(my_array.index(3))4
    2* Reverse a python array using reverse() methodmy_array = array('i', [1,2,3,4,5])
    my_array.reverse()
    my_array* Get array buer information throughbuer_info() methodmy_array = array('i', [1,2,3,4,5])
    my_array.buffer_info()* Check for number of occurrences of an elementusing count() methodmy_array = array('i', [1,2,3,3,5])
    my_array.count(3)* Convert array to a python list with sameelements using tolist() methodmy_array = array('i', [1,2,3,4,5])
    c = my_array.tolist()
    cCreate a Python Program that displays the grades of the students#@title Students' Grade in Object-oriented Programming
    Student_Name = ""#@param {type:"string"}
    prelim = 90 #@param {type:"number"}
    midterm = 90 #@param {type:"number"}
    final = 90 #@param {type:"number"}
    semestral = (prelim*0.30+midterm*0.30+final*0.40)
    
    print("Student_Name: "+Student_Name)
    print("Semestral grade of the student: "+str(semestral))
    #@title Gender
    
    Male = False #@param {type:"boolean"}
    Female = True #@param {type:"boolean"}
    Birthdate = "2002-08-02" #@param {type:"date"}
    
    print("My birthdate is: "+Birthdate)
    
    #@title Gender
    
    Gender = "Female" #@param["Male","Female"]
    
    print("My gender is: "+Gender)My gender is: Femaleveri1 = [[1,'a',['cat'],2],[[[3]],'dog'],4,5]
    
    
    
    def flattenf(List1):
        newList = []
        for i in List1:
            if type(i) != type([]):
                newList.append(i)
            else:
                newList.extend(flattenf(i))
        return newList
    flattenf(veri1)
    # soru 2 
    veri2= [[1, 2], [3, 4], [5, 6, 7]]
    def rever(List2):
      for i in List2:
        List2.reverse()
        return List2
    rever(veri2)How to import custom module from the git clone repository> This code is an example of how to import Mask_RCNN from the git reposotory and use Config module from thatimport os
    import sys
    !git clone https://www.github.com/matterport/Mask_RCNN.git 
    os.chdir("Mask_RCNN")
    ROOT_DIR = "/content"
    sys.path.append(os.path.join(ROOT_DIR, 'Mask_RCNN'))
    from mrcnn.config import Config #here is your importPrepare Hotel Data# import needed libraries
    import requests
    from bs4 import BeautifulSoup
    import time
    import pandas as pdWeb scraping www.booking.com to extract hotels data. Disclaimers: - All of the data extracted are not my own and are properties of www.booking.com and data will not be used for commercial purposes- Scraped data are not used for commercial purposes and purely for personal education purposes- HTML, format, tags, parameters, and other website script used as reference are working as of this writing and may be changed anytime by the website owner/administrators which may impact this code- The code blocks may take 0-2mins depending on hardware/software/network capabilities. If you want to rerun, patience is appreciated- Imported time module to avoid overloading the site and get blocked# Web Scraping: scrape details of each hotel from search results in www.booking.com 
    headers = {
            'Access-Control-Allow-Origin': '*',
            'Access-Control-Allow-Methods': 'GET',
            'Access-Control-Allow-Headers': 'Content-Type',
            'Access-Control-Max-Age': '3600',
            "User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82 Safari/537.36"
        }
    # get html text and create BeautifulSoup object 
    url_5k_10k = 'https://www.booking.com/searchresults.html?label=gen173nr-1DCAEoggI46AdIM1gEaLQBiAEBmAExuAEXyAEM2AED6AEB-AECiAIBqAIDuAK-vIuQBsACAdICJDNiYTIzMGMwLTgyYjUtNGFiYS04MjlkLTk1ZjhkMDZjZGM4YdgCBOACAQ&sid=c181dd9af9df88eff960c65192ff13a9&aid=304142&src=searchresults&error_url=https%3A%2F%2Fwww.booking.com%2Fsearchresults.html%3Flabel%3Dgen173nr-1DCAEoggI46AdIM1gEaLQBiAEBmAExuAEXyAEM2AED6AEB-AECiAIBqAIDuAK-vIuQBsACAdICJDNiYTIzMGMwLTgyYjUtNGFiYS04MjlkLTk1ZjhkMDZjZGM4YdgCBOACAQ%3Bsid%3Dc181dd9af9df88eff960c65192ff13a9%3Btmpl%3Dsearchresults%3Bcheckin_monthday%3D01%3Bcheckin_year_month%3D2022-04%3Bcheckout_monthday%3D02%3Bcheckout_year_month%3D2022-04%3Bclass_interval%3D1%3Bdest_id%3D-2437894%3Bdest_type%3Dcity%3Bdtdisc%3D0%3Bfrom_history%3D1%3Bgroup_adults%3D2%3Bgroup_children%3D0%3Binac%3D0%3Bindex_postcard%3D0%3Blabel_click%3Dundef%3Bno_rooms%3D1%3Boffset%3D0%3Bpostcard%3D0%3Braw_dest_type%3Dcity%3Broom1%3DA%252CA%3Bsb_price_type%3Dtotal%3Bsb_travel_purpose%3Dleisure%3Bsh_position%3D1%3Bshw_aparth%3D1%3Bsi%3Dai%3Bsi%3Dci%3Bsi%3Dco%3Bsi%3Ddi%3Bsi%3Dla%3Bsi%3Dre%3Bslp_r_match%3D0%3Bsrpvid%3Db42595ebe16f00d7%3Bss_all%3D0%3Bssb%3Dempty%3Bsshis%3D0%26%3B&ss=Manila&is_ski_area=0&ssne=Manila&ssne_untouched=Manila&city=-2437894&checkin_year=2022&checkin_month=9&checkin_monthday=1&checkout_year=2022&checkout_month=9&checkout_monthday=2&group_adults=2&group_children=0&no_rooms=1&from_sf=1&nflt=price%3DPHP-5000-10000-1%3Bht_id%3D204&shw_aparth=0'
    search_html_1 = requests.get(url_5k_10k, headers = headers)    
    search_soup_1 = BeautifulSoup(search_html_1.text, 'html.parser')
    # find all links in the soup object based on class attribute
    hotel_link_class = {'class' :'_4310f7077 _45807dae0 _89c03ca2c _f7538b398'} 
    hotels_1 = search_soup_1.find_all('a', hotel_link_class) 
    
    count = 0 # for counting iterations
    hotels_dict = {} # main dictionary of hotels data  
    for hotel in hotels_1 : # iteration to put hotel details in a dict for each hotel
        hotel_details_dict = {} # dictionary of hotel data per hotel
        # extract hotel name
        hotel_name = hotel.get('href').strip()[:-6].split('/')[-1].split('.')[0]
        title_hotel_name = hotel_name.replace('-',' ').title()
        hotel_details_dict['hotel_name'] = title_hotel_name
        # price range category based on website search
        hotel_details_dict['hotel_min_price_range_php'] = '5k to 10k' 
        # extract reviews
        review_url = 'https://www.booking.com/reviews/ph/hotel/' + hotel_name + '.en-gb.html'
        review_html = requests.get(review_url, headers = headers)
        review_soup = BeautifulSoup(review_html.text, 'html.parser')
        reviewer_name = review_soup.find_all('p', {'class' : 'reviewer_name' } )
        review_content = review_soup.find_all('div', {'class' : 'review_item_review_content' } )
        zipped_reviews = dict(zip(reviewer_name, review_content) )
        review_count = 0
        for k, v in zipped_reviews.items() : # iterate to extract reviews with format
            review = 'Reviewer Name: {}\nReview: {}'.format(k.get_text(), v.get_text().strip().replace('\n', ' ') )
            review_count += 1
            if review_count == 3 : break
        hotel_details_dict['hotel_reviews'] = review 
        #extract hotel link
        hotel_details_dict['hotel_link'] = hotel.get('href').strip()[:-6]
        hotels_dict[hotel_name] = hotel_details_dict
        count += 1
        if count == 20 : break
    
    # 2nd set of urls for different price range based on the website search
    url_10k_up = 'https://www.booking.com/searchresults.html?label=gen173nr-1DCAEoggI46AdIM1gEaLQBiAEBmAExuAEXyAEM2AED6AEB-AECiAIBqAIDuAK-vIuQBsACAdICJDNiYTIzMGMwLTgyYjUtNGFiYS04MjlkLTk1ZjhkMDZjZGM4YdgCBOACAQ&sid=c181dd9af9df88eff960c65192ff13a9&aid=304142&src=searchresults&error_url=https%3A%2F%2Fwww.booking.com%2Fsearchresults.html%3Flabel%3Dgen173nr-1DCAEoggI46AdIM1gEaLQBiAEBmAExuAEXyAEM2AED6AEB-AECiAIBqAIDuAK-vIuQBsACAdICJDNiYTIzMGMwLTgyYjUtNGFiYS04MjlkLTk1ZjhkMDZjZGM4YdgCBOACAQ%3Bsid%3Dc181dd9af9df88eff960c65192ff13a9%3Btmpl%3Dsearchresults%3Bcheckin_monthday%3D01%3Bcheckin_year_month%3D2022-04%3Bcheckout_monthday%3D02%3Bcheckout_year_month%3D2022-04%3Bclass_interval%3D1%3Bdest_id%3D-2437894%3Bdest_type%3Dcity%3Bdtdisc%3D0%3Bfrom_history%3D1%3Bgroup_adults%3D2%3Bgroup_children%3D0%3Binac%3D0%3Bindex_postcard%3D0%3Blabel_click%3Dundef%3Bno_rooms%3D1%3Boffset%3D0%3Bpostcard%3D0%3Braw_dest_type%3Dcity%3Broom1%3DA%252CA%3Bsb_price_type%3Dtotal%3Bsb_travel_purpose%3Dleisure%3Bsh_position%3D1%3Bshw_aparth%3D1%3Bsi%3Dai%3Bsi%3Dci%3Bsi%3Dco%3Bsi%3Ddi%3Bsi%3Dla%3Bsi%3Dre%3Bslp_r_match%3D0%3Bsrpvid%3Db42595ebe16f00d7%3Bss_all%3D0%3Bssb%3Dempty%3Bsshis%3D0%26%3B&ss=Manila&is_ski_area=0&ssne=Manila&ssne_untouched=Manila&city=-2437894&checkin_year=2022&checkin_month=9&checkin_monthday=1&checkout_year=2022&checkout_month=9&checkout_monthday=2&group_adults=2&group_children=0&no_rooms=1&from_sf=1&nflt=price%3DPHP-10000-max-1%3Bht_id%3D204&shw_aparth=0%27'
    search_html_2 = requests.get(url_10k_up, headers = headers)
    search_soup_2 = BeautifulSoup(search_html_2.text, 'html.parser')
    hotels_2 = search_soup_2.find_all('a', hotel_link_class)
    
    for hotel in hotels_2 : # iteration to put hotel details in a dict for each hotel
        hotel_details_dict = {}
        # extract hotel name
        hotel_name = hotel.get('href').strip()[:-6].split('/')[-1].split('.')[0]
        title_hotel_name = hotel_name.replace('-',' ').title()
        hotel_details_dict['hotel_name'] = title_hotel_name
        # price range category based on website search
        hotel_details_dict['hotel_min_price_range_php'] = '10k and up'
        # extract reviews
        review_url = 'https://www.booking.com/reviews/ph/hotel/' + hotel_name + '.en-gb.html'
        review_html = requests.get(review_url, headers = headers)
        review_soup = BeautifulSoup(review_html.text, 'html.parser')
        reviewer_name = review_soup.find_all('p', {'class' : 'reviewer_name' } )
        review_content = review_soup.find_all('div', {'class' : 'review_item_review_content' } )
        zipped_reviews = dict(zip(reviewer_name, review_content) )
        review_count = 0
        for k, v in zipped_reviews.items() : # iterate to extract reviews with format
            review = 'Reviewer Name: {}\nReview: {}'.format(k.get_text(), v.get_text().strip().replace('\n', ' ') )
            review_count += 1
            if review_count == 3 : break
        hotel_details_dict['hotel_reviews'] = review 
        #extract hotel link
        hotel_details_dict['hotel_link'] = hotel.get('href').strip()[:-6]
        hotels_dict[hotel_name] = hotel_details_dict
        count += 1
        if count == 30 : break
    
    # count number of hotels and print sample link
    print('Scraping Complete!')
    print('Count of Hotels in the List:', len(hotels_dict) )
    print('Sample Hotel data:')
    print(hotels_dict[hotel_name])Scraping Complete!
    Count of Hotels in the List: 25
    Sample Hotel data:
    {'hotel_name': '', 'hotel_min_price_range_php': '10k and up', 'hotel_reviews': 'Reviewer Name: \nJanet\n\nReview: No buffet yet and limited use of pool  Facilities is ok like indoor pool  Stayed in December 2021', 'hotel_link': 'https://www.booking.com/hotel/ph/marco-polo-ortigas-manila.en-gb.html?label=gen173nr-1DCAEoggI46AdIM1gEaLQBiAEBmAExuAEXyAEM2AED6AEB-AECiAIBqAIDuAK-vIuQBsACAdICJDNiYTIzMGMwLTgyYjUtNGFiYS04MjlkLTk1ZjhkMDZjZGM4YdgCBOACAQ&sid=c181dd9af9df88eff960c65192ff13a9&aid=304142&ucfs=1&arphpl=1&checkin=2022-09-01&checkout=2022-09-02&dest_id=-2437894&dest_type=city&group_adults=2&req_adults=2&no_rooms=1&group_children=0&req_children=0&hpos=10&hapos=10&sr_order=popularity&nflt=price%3DPHP-10000-max-1%3Bht_id%3D204&srpvid=98a795aeeaa001c4&srepoch=1645391838&all_sr_blocks=78158504_187848439_0_41_0&highlighted_blocks=78158504_187848439_0_41_0&matching_block_id=78158504_187848439_0_41_0&[...]Web scrape other hotel details from each hotel link and put in the hotels_dict (dictionary)# scrape hotel other details from each hotel link in the dict and put in the hotels_dict (dictionary)
    counter = 0
    for k, v in hotels_dict.items() :
        hotel_link = v['hotel_link']
        # start with empty hotel details dict
        hotel_details_dict = {}
        
        # create soup object per hotel link
        hotel_html = requests.get(hotel_link, headers = headers)
        hotel_soup = BeautifulSoup(hotel_html.text, 'html.parser')
        
        # scrape and clean hotel location
        location_tag = hotel_soup.find('span',{'class' : 'hp_address_subtitle'} )
        hotel_loc = location_tag.contents[0].strip()
        hotels_dict[k]['hotel_location'] = hotel_loc 
        
        # scrape and clean amenities
        amenities_tag = hotel_soup.find_all('div', {'class' : 'important_facility'} )
        amenities_lst = []
        amenities = ''
        for amenity in amenities_tag:
            amenities_lst.append(amenity.get_text().strip() )
        
        amenities_lst = list(dict.fromkeys(amenities_lst) )
        for item in amenities_lst :
            if len(amenities) > 0 :
                amenities = amenities + ', ' + item
            else : 
                amenities = amenities + item
        
        hotels_dict[k]['hotel_amenities'] = amenities
    
        # scrape and clean no. of rooms
        room_types = hotel_soup.find_all('span', {'class' : 'hprt-roomtype-icon-link'} )
        count_room_avail = hotel_soup.find_all('span', {'class' : 'only_x_left urgency_message_red'} )
        room_types_lst = []
        avail_count_lst = []
    
        for room_type in room_types :
            room_types_lst.append((room_type.get_text().strip()) )
            
        if count_room_avail == [] :
            for count_room in range(len(room_types)) :
                avail_count = 'More than 7 available for this room type.'
                avail_count_lst.append(avail_count)
        else : 
            for count_room in count_room_avail :
                if len(count_room) > 0 :
                    avail_count = count_room.get_text().strip()
                    avail_count_lst.append(avail_count)
                else : 
                    avail_count = 'More than 7 available for this room type.'
                    avail_count_lst.append(avail_count)
    
        room_details = dict(zip(room_types_lst, avail_count_lst) )
        hotels_dict[k]['hotel_avail_rooms'] = room_details
    
        # scrape and clean other info
        other_tags = hotel_soup.find_all('div', {'id' : 'property_description_content'})
        for tag in other_tags :
            other_info = tag.get_text().lstrip().split('\n')
            other_info = other_info[0] + ' ' + other_info[1]
        
        hotels_dict[k]['hotel_other_info'] = other_info
        
        counter += 1
        # if counter == 1 : break # this is for testing purposes
        time.sleep(1) 
    
    print('Scraping Complete!')
    print('No. of iterations: ', counter)Scraping Complete!
    No. of iterations:  25Create Pandas dataframe, clean, then save as Hotel.csvdf = pd.DataFrame.from_dict(hotels_dict, orient = 'index') 
    df.info()
    df.head()
    df['hotel_location'] = df['hotel_location'].apply(lambda x: x.split(',')[-3].strip() + ' City')
    df.hotel_location.loc[df['hotel_location'] == '1588 Pedro Gil St. cor MH Del Pilar Malate City'] = 'Manila City'
    df.hotel_location.loc[df['hotel_location'] == 'Muntinlupa City City'] = 'Muntinlupa City'
    df.hotel_location.loc[df['hotel_location'] == 'Quezon City City'] = 'Quezon City'
    df
    df.drop('hotel_link', axis = 1)
    df = df[['hotel_name', 'hotel_location', 'hotel_amenities', 'hotel_min_price_range_php', 
            'hotel_avail_rooms', 'hotel_other_info', 'hotel_reviews']]
    df.to_csv('Hotel.csv', index = False)
    dfImportsimport tensorflow as tf
    import tensorflow_hub as hub
    import tensorflow_addons as tfa
    import tensorflow_datasets as tfds
    
    from tensorflow import keras
    
    tfds.disable_progress_bar()
    tf.keras.utils.set_random_seed(42)
    import sys
    
    sys.path.append("..")
    
    from vit.deit_models import ViTDistilled
    from vit.model_configs import base_configConstantsMODEL_TYPE = "deit_distilled_tiny_patch16_224"
    
    BATCH_SIZE = 256
    NUM_EPOCHS = 20
    BASE_LR = 0.0005
    WEIGHT_DECAY = 0.0001
    
    AUTO = tf.data.AUTOTUNE
    NB_CLASSES = 5Initialize model configdeit_tiny_config = base_config.get_config(drop_path_rate=0.1, model_name=MODEL_TYPE)
    with deit_tiny_config.unlocked():
        deit_tiny_config.num_classes = NB_CLASSES
    
    deit_tiny_config.to_dict()Data preprocessing and loadingSZ = deit_tiny_config.image_size
    
    
    def preprocess_dataset(is_training=True):
        def _pp(image, label):
            if is_training:
                # Resize to a bigger spatial resolution and take the random
                # crops.
                image = tf.image.resize(image, (SZ + 20, SZ + 20))
                image = tf.image.random_crop(image, (SZ, SZ, 3))
                image = tf.image.random_flip_left_right(image)
            else:
                image = tf.image.resize(image, (SZ, SZ))
            label = tf.one_hot(label, depth=NB_CLASSES)
            return image, label
    
        return _pp
    
    
    def prepare_dataset(dataset, is_training=True):
        if is_training:
            dataset = dataset.shuffle(BATCH_SIZE * 10)
        dataset = dataset.map(preprocess_dataset(is_training), num_parallel_calls=AUTO)
        return dataset.batch(BATCH_SIZE).prefetch(AUTO)
    train_dataset, val_dataset = tfds.load(
        "tf_flowers", split=["train[:90%]", "train[90%:]"], as_supervised=True
    )
    num_train = train_dataset.cardinality()
    num_val = val_dataset.cardinality()
    print(f"Number of training examples: {num_train}")
    print(f"Number of validation examples: {num_val}")
    
    train_dataset = prepare_dataset(train_dataset, is_training=True)
    val_dataset = prepare_dataset(val_dataset, is_training=False)Initialize student and teacher modelsdeit_tiny = ViTDistilled(deit_tiny_config)
    
    resolution = deit_tiny_config.image_size
    dummy_inputs = tf.ones((2, resolution, resolution, 3))
    _ = deit_tiny(dummy_inputs)
    print(f"Number of parameters (millions): {deit_tiny.count_params() / 1e6}.")
    # 98.37% on the validation set.
    # To know how this was trained refer to `./bit-teacher.ipynb`.
    bit_teacher_flowers = keras.models.load_model("bit_teacher_flowers")
    print(f"Number of parameters (millions): {bit_teacher_flowers.count_params() / 1e6}.")Here we can see that the teacher model has got orders of magnitude more parameters than the student model.  Wrap the training logic of DeiT**Note** that here we are just following the core principles of the distillation process laid out in the [original paper](https://arxiv.org/abs/2012.12877). The authors use more data augmentation and regularization which have been purposefully discarded to keep the workflow simple to follow.class DeiT(keras.Model):
        # Reference:
        # https://keras.io/examples/vision/knowledge_distillation/
        def __init__(self, student, teacher, **kwargs):
            super().__init__(**kwargs)
            self.student = student
            self.teacher = teacher
    
        def compile(
            self,
            optimizer,
            metrics,
            student_loss_fn,
            distillation_loss_fn,
        ):
            super().compile(optimizer=optimizer, metrics=metrics)
            self.student_loss_fn = student_loss_fn
            self.distillation_loss_fn = distillation_loss_fn
    
        def train_step(self, data):
            # Unpack data.
            x, y = data
    
            # Forward pass of teacher
            teacher_predictions = tf.nn.softmax(self.teacher(x, training=False), -1)
            teacher_predictions = tf.argmax(teacher_predictions, -1)
    
            with tf.GradientTape() as tape:
                # Forward pass of student.
                cls_predictions, dist_predictions, _ = self.student(
                    x / 255.0, training=True
                )
    
                # Compute losses.
                student_loss = self.student_loss_fn(y, cls_predictions)
                distillation_loss = self.distillation_loss_fn(
                    teacher_predictions, dist_predictions
                )
                loss = (student_loss + distillation_loss) / 2
    
            # Compute gradients.
            trainable_vars = self.student.trainable_variables
            gradients = tape.gradient(loss, trainable_vars)
    
            # Update weights.
            self.optimizer.apply_gradients(zip(gradients, trainable_vars))
    
            # Update the metrics configured in `compile()`.
            student_predictions = (cls_predictions + dist_predictions) / 2
            self.compiled_metrics.update_state(y, student_predictions)
    
            # Return a dict of performance.
            results = {m.name: m.result() for m in self.metrics}
            results.update(
                {"student_loss": student_loss, "distillation_loss": distillation_loss}
            )
            return results
    
        def test_step(self, data):
            # Unpack the data.
            x, y = data
    
            # Compute predictions.
            y_prediction, _ = self.student(x / 255.0, training=False)
    
            # Calculate the loss.
            student_loss = self.student_loss_fn(y, y_prediction)
    
            # Update the metrics.
            self.compiled_metrics.update_state(y, y_prediction)
    
            # Return a dict of performance.
            results = {m.name: m.result() for m in self.metrics}
            results.update({"student_loss": student_loss})
            return results
    
        def call(self, inputs):
            return self.student(inputs / 255.0, training=False)Distill the teacher model into the student modeldeit_distiller = DeiT(student=deit_tiny, teacher=bit_teacher_flowers)
    
    lr_scaled = (BASE_LR / 512) * BATCH_SIZE
    deit_distiller.compile(
        optimizer=tfa.optimizers.AdamW(weight_decay=WEIGHT_DECAY, learning_rate=lr_scaled),
        metrics=["accuracy"],
        student_loss_fn=keras.losses.CategoricalCrossentropy(
            from_logits=True, label_smoothing=0.1
        ),
        distillation_loss_fn=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
    )
    _ = deit_distiller.fit(train_dataset, validation_data=val_dataset, epochs=NUM_EPOCHS)SVM with queue imbalancedf_res = {}
    for s in kernels:
        df_res_temp = pd.read_csv('../svm_queue_imbalance/res_svm/svm_linear_{}_len{}.csv'.format(
            stocks[0], data_length))
        df_res_temp = df_res_temp.append(
             pd.read_csv('../svm_queue_imbalance/res_svm/svm_sigmoid_{}_len{}.csv'.format(
                 stocks[0], data_length)))
        df_res_temp = df_res_temp.append(
            pd.read_csv('../svm_queue_imbalance/res_svm/svm_rbf_{}_len{}.csv'.format(
                stocks[0], data_length)))
        df_res[s] = df_res_temp
        df_res[s].index = list(range(len(df_res[s])))
    
    df_best_svm = pd.DataFrame()
    
    idx_max = df_res[s].sort_values(by='matthews', ascending=False).groupby(
        'kernel')['matthews'].idxmax()
    df_best_svm = df_best_svm.append(df_res[s].loc[idx_max])
    df_best_svm
     from sklearn import utils
    
    def get_classes_weights(y_train):
        classes = np.unique(y_train)
        class_weight_list = utils.class_weight.compute_class_weight('balanced', classes, y_train)
        class_weights = {classes[0]: class_weight_list[0], classes[1]: class_weight_list[1]}
        return class_weights
    
    def fit_best_svm_classifier(df_best_svm, df, kernel=None):    
        gamma = df_best_svm[df_best_svm['kernel'] == kernel]['gamma'].values[0]
        coef0 = df_best_svm[df_best_svm['kernel'] == kernel]['coef0'].values[0]
        c = df_best_svm[df_best_svm['kernel'] == kernel]['C'].values[0]
    
        X = df['queue_imbalance'].values.reshape(-1, 1)
        y = df['mid_price_indicator']
       
        weights = get_classes_weights(y)
        clf = SVC(gamma=gamma, C=c, coef0=coef0, kernel=kernel, random_state=23131, class_weight=weights)
        clf.fit(X, y)
        return clf
        
    def get_scores_dict_for_data(functions_to_run, dfs, log_clf, stock):
        scores = {'stock': stock}
        for func_name, func in functions_to_run.items():
            for df_name, df in dfs.items():
                pred = log_clf.predict(df['queue_imbalance'].values.reshape(-1, 1))
                df[f'pred'] = pred
                scores['{}_{}'.format(df_name, func_name)] = func(df['mid_price_indicator'], pred)
        return scores
                
    functions_to_run = {'precision': metrics.precision_score, 'roc_auc': metrics.roc_auc_score,
                       'f1_score': metrics.f1_score, 'recall': metrics.recall_score, 
                       'matthews': metrics.matthews_corrcoef, 'kappa': metrics.cohen_kappa_score}
    scores = []
    
    for kernel in df_best_svm['kernel'].unique():
        stock = stocks[0]
        log_clf = fit_best_svm_classifier(df_best_svm, d_stocks[kernel], kernel=kernel)
        dfs = {'train': d_stocks[kernel], 'test': d_test_stocks[kernel], }
        res_validation = model.validate_model(
            log_clf, 
            d_stocks[kernel][['queue_imbalance']], d_stocks[kernel]['mid_price_indicator'])
        res = get_scores_dict_for_data(functions_to_run, dfs, log_clf, stock)
        res = {**res, **res_validation}
        scores.append(res)
    df_scores = pd.DataFrame(scores)
    def convert_scores(df, column):
        scores = []
        for i, row in df.iterrows():
            scores.append(np.mean(row[column]))
        return scores
    scores_columns = ['f1', 'kappa', 'matthews', 'precision', 'recall', 'roc_auc', 'train_f1', 'train_kappa',
           'train_matthews', 'train_precision', 'train_recall', 'train_roc_auc']
    
    for col in scores_columns:
        df_scores[col] = convert_scores(df_scores, col)
    df_scores
    print('Pivot values')
    for i in range(len(kernels)):
        df = d_stocks[kernels[i]]
        print(np.mean([np.min(df[df['pred'] == 1]['queue_imbalance']), 
                      np.max(df[df['pred'] == 0]['queue_imbalance'])]))
    f, ax = plt.subplots(3, 1, figsize=(35, 15), sharex=True)
    i = 0
    
    for i in range(len(kernels)):
        s = kernels[i]
        df = d_stocks[s]
        X = d_stocks[s][['queue_imbalance']].values
        y = d_stocks[s]['mid_price_indicator'].values.astype(np.integer)
    
        clf = fit_best_svm_classifier(df_best_svm, d_stocks[s], kernel=s)
        plot_decision_regions(X[0:900], y[0:900], clf=clf,ax=ax[i], colors=','.join(['orange', 'blue']))
        ax[i].set_xlabel('Queue Imbalance')
        ax[i].set_title('SVM Decision Regions for {} on training data'.format(s))
        ax[i].set_xlim(-1.01, 1.01)
    plt.tight_layout()
    if should_save_fig:
        print('Saving')
        plt.savefig('svm_decision_region_4549.png')SavingIntroduction to Pandas ()The latest version of this notebook is available at [https://github.com/Asterics2020-Obelics](https://github.com/Asterics2020-Obelics/School2019/tree/master/pandas)%matplotlib inline
    import pandas as pd
    import matplotlib as ml
    import sys
    plt = ml.pyplot
    ml.rcParams['figure.figsize'] = (10.0, 5.0)
    
    print("Python version: {0}\n"
          "Pandas version: {1}\n"
          "Matplotlib version: {2}"
          .format(sys.version, pd.__version__, ml.__version__))
    from IPython.core.magic import register_line_magic
    
    @register_line_magic
    def shorterr(line):
        """Show only the exception message if one is raised."""
        try:
            output = eval(line)
        except Exception as e:
            print("\x1b[31m\x1b[1m{e.__class__.__name__}: {e}\x1b[0m".format(e=e))
        else:
            return output
        
    del shorterrThe basic data structures in Pandas  `DataFrame`data = {'a': [1, 2, 3],
            'b': [4.1, 5.2, 6.3],
            'c': ['foo', 'bar', 'baz'],
            'd': 42}
    df = pd.DataFrame(data)
    df
    type(df)`Series`df['a']
    type(df['a'])  
    df['a'] * 23
    np.cos(df['a'])
    s = pd.Series(np.random.randint(0, 10, 5))
    s
    s.sort_values()  # indices are kept!
    s * s.sort_values()  # and are used to match elements
    s * s.sort_values().reset_index(drop=True)Examining a `DataFrame`df
    df.dtypes
    df.columns
    df.shapeLooking into the datadf.head(2)
    df.tail(2)
    df.describe()Indexing and SlicingThere are different ways to index/slice data in pandas, which is a bit confusing at first.df.loc
    df.ilocUsing `.loc[]`This one treats the input as label or "row-name".df.loc[2]
    df['b'].loc[2]
    %shorterr df.loc[-1]KeyError: -1Accessing multiple rows/columnsdf.loc[[1, 2], ['b', 'd']]
    df.loc[1:3, ['a']]Using `.iloc[]`df.iloc[2]
    df.iloc[-1]Grouped operationsdf = pd.DataFrame({'location' : ['Italy', 'France', 'Italy', 'Italy',
                                     'France', 'Italy', 'France', 'France'],
                       'detector' : ['ARCA_DU1', 'ORCA_DU2', 'ARCA_DU3', 'ARCA_DU4',
                                     'ANTARES', 'NEMO', 'ORCA_DU1', 'ORCA_DU5'],
                       'x' : np.random.randn(8),
                       'y' : np.random.randn(8)})
    df
    df.groupby(['location']).groups
    df.groupby(['location']).get_group('France')
    df = pd.DataFrame({
        'event_id': [1, 1, 1, 2, 2, 3, 3, 3, 3],
        'n_hits': [23, 13, 14, 44, 23, 45, 1, 2, 4]
    })
    df
    df.groupby('event_id')
    for name, group_data in df.groupby('event_id'):
        print("Group name: {0}".format(name))
        print(group_data, "\n")
    df.groupby('event_id').aggregate([np.mean, np.median, sum, min, max])Plottingdf = pd.DataFrame({'foo': np.cumsum(np.random.random(1000) - 0.5),
                       'bar': np.cumsum(np.random.random(1000) - 0.5),
                       'narf': np.cumsum(np.random.random(1000) - 0.5)})
    df.plot();
    df.hist();
    plt.scatter(df['foo'], df['bar']);GET Print Jobs# apiclient.get_print_jobs()GET Print Shipping Optionsapiclient.get_print_shipping_options(iso_country_code='ca')Create print jobbook = {
       "external_id": "test-line-item",
       "title": "My Book",
       "cover_source_url": "https://www.dropbox.com/s/7bv6mg2tj0h3l0r/lulu_trade_perfect_template.pdf?dl=1&raw=1",
       "interior_source_url": "https://www.dropbox.com/s/r20orb8umqjzav9/lulu_trade_interior_template-32.pdf?dl=1&raw=1",
       "pod_package_id": "0600X0900BWSTDPB060UW444MXX",
       "quantity": 1,
    }
    books = [book]  # a print job can include multiple books
    
    address = {
       "name": "",
       "street1": "Street address 1",
       "street2": "(optional) street address second line",
       "city": "L\u00fcbeck",
       "postcode": "H1A 2A1",
       "state_code": "QC",
       "country_code": "CA",
       "phone_number": "844-212-0689",
    }
    
    
    apiclient.create_print_job(address, books, shipping_level="GROUND", external_id="test-print-job")
    
    
    data = apiclient.get_print_jobs()
    import pprint
    pprint.pprint(data)[{'child_job_ids': [],
      'contact_email': '',
      'costs': {'currency': None,
                'line_item_costs': None,
                'shipping_cost': None,
                'total_cost_excl_tax': None,
                'total_cost_incl_tax': None,
                'total_tax': None},
      'date_created': '2021-03-08T20:03:50.808926Z',
      'date_modified': '2021-03-08T20:03:57.019644Z',
      'dropship_profile_id': '7e914864-78e4-4fae-bf0c-a10574143885',
      'estimated_shipping_dates': None,
      'external_id': 'test-print-job',
      'id': 19740,
      'line_items': [{'external_id': 'test-line-item',
                      'id': 34904,
                      'order_line_item_id': None,
                      'pod_package_id': '0550X0850BWSTDPB060UW444GXX',
                      'printable_id': None,
                      'printable_normalization': {'cover': {'job_id': 34090,
                                                            'normalized_file': None,
                                                            'page_count': None,
          [...]KNN ON AMAZON FINE FOOD REVIEWS DATASET Data Source **[https://www.kaggle.com/snap/amazon-fine-food-reviews](https://www.kaggle.com/snap/amazon-fine-food-reviews)** The Amazon Fine Food Reviews dataset consists of reviews of fine foods from Amazon.It consist of data collected from past many years. This dataset consist of approx 550k reviews.  ![Knn Model on Amazon Review Dataset ](http://houseofbots.com/images/news/3197/cover.png)   SNIPPET1. Converted the reviews using NLP techniques i.e BOW, tf-IDF, Word2Vec and tf-IDF Word2Vec.2. Applied Knn on the dataset with both Techniques i.e KD-Tree and Bruteforce. 3. Calculated Train Error, CV Error and Test Error to determine the performance and to ensure best fit.4. Compared performance of each model using accuracy.5. Made confusion matrix between predicted and tested data.6. Conclusion based on the obtained results.  DATA INFORMATION* Number of reviews: 568,454* Number of users: 256,059* Number of products: 74,258* Timespan: Oct 1999 - Oct 2012* Number of Attributes/Columns in data: 10   ATTRIBUTE INFORMATION1.  Id2.  ProductId - unique identifier for the product3.  UserId - unqiue identifier for the user4.  ProfileName5.  HelpfulnessNumerator - number of users who found the review helpful6.  HelpfulnessDenominator - number of users who indicated whether they found the review helpful or not7.  Score - rating between 1 and 58.  Time - timestamp for the review9.  Summary - brief summary of the review10. Text - text of the review  OBJECTIVEPredict the polarity of the review using Knn and Compare all models to find the best accuracy and ensure that the model is neither overfitting nor underfitting.     LOADINGimport time
    import sqlite3 
    import pandas as pd
    conn=sqlite3.connect('./final.sqlite') # making a connection with sqlite
    data=pd.read_sql_query("""SELECT * FROM Reviews""",conn)
    data.head(3)MAPPING# function to map the polarity as 0 or 1
    def sign(x):
        if x=='positive':
            return 1
        else:
            return 0
    
    data['Score']=data['Score'].map(sign)
    # Dimension
    print(data.shape)
    # Frequency of data.
    data['Score'].value_counts()SAMPLING# Taking a Random Sample of 20k points.
    Data=data.sample(20000)
    Data['Score'].value_counts()SORTING# Sorting the data according to Time.
    Data.sort_values('Time',inplace=True)IMPORTINGimport re
    import gensim
    import pickle
    import numpy as np
    import seaborn as sns
    from scipy import sparse
    from prettytable import PrettyTable
    from sklearn.metrics import accuracy_score
    import statistics as s
    from sklearn.model_selection import train_test_split
    from sklearn.feature_extraction.text import CountVectorizer
    from sklearn.feature_extraction.text import TfidfTransformer
    from sklearn.feature_extraction.text import TfidfVectorizer
    from sklearn import preprocessing
    from sklearn.model_selection import TimeSeriesSplit
    from sklearn.model_selection import GridSearchCV
    from sklearn.neighbors import KNeighborsClassifier
    import matplotlib.pyplot as plt
    from gensim.models import Word2Vec
    from gensim.models import KeyedVectors
    from sklearn.metrics import confusion_matrixFUNCTIONS   1. SPLIT FUNCTION'''
    This function is used to split that data into train and test.
    It uses the function to split it into 70-30 %.
    It does not shuffle so the data is distributed sequentially.
    '''
    
    def Split(d1,d2):
        a,b,c,d= train_test_split(d1,d2,test_size=0.3,shuffle=False) # Splitting it in 70-30 without shuffling.
        return a,b,c,d2. Knn  FUNCTION'''
    This function takes training data and lgorithm as input and gives execution time, accuracy and the optimal value of k
    on that data.
    It uses TimeSeriessplit CV.
    It also calculates accuracy in training data and CV data.
    '''
    
    def KNN(x_train,y_train,algo):
        start = time.time()
        cv_acc=[]
        train_acc=[]
        tscv = TimeSeriesSplit(n_splits=5)         # Using 5 cross valiadtions.
        for n in range(1,30,2):
            l1=[]
            l2=[]
            for train,cv in tscv.split(x_train):
                knn = KNeighborsClassifier(n_neighbors=n,algorithm=algo,n_jobs=-1)
                knn.fit(x_train[train],y_train[train])
                pred_cv = knn.predict(x_train[cv])
                pred_train = knn.predict(x_train[train])
                acc_cv = accuracy_score(y_train[cv],pred_cv, normalize=True) * float(100)
                acc_train = accuracy_score(y_train[train],pred_train, normalize=True) * float(100)
                l1.append(acc_cv)
                l2.append(acc_train)
            cv_acc.append(s.mean(l1))
            train_acc.append(s.mean(l2))
        end = time.time()
        t=end-start
        neigh=list(np.arange(1,30,2))
        opt=neigh[cv_acc.index(max(cv_acc))]
        return cv_acc,train_acc,t,opt3. K vs ACCURACY PLOT'''
    This function takes Accuarcy and plots the graph for accuracy vs k.
    '''
    
    def Accplot(acu,nlp,algo):
        sns.set_style("darkgrid")
        plt.plot(np.arange(1,30,2),acu,'b--')
        plt.xlabel("K Nearest Neighbours",fontsize=15, color='black')
        plt.ylabel("Accuracy",fontsize=15, color='black')
        plt.title("Accuracy -" + nlp + "- KNN - " + algo,fontsize=15, color='black')
        plt.show()
        return plt.show()4. K vs ERROR PLOT'''
    This function takes the CV accuracy and Training accuracy.
    Output is train error and CV error.
    It also plots the graph between K vs Errors.
    '''
    
    def Trainplot(cv_acc,train_acc,algo):
        a = [100 - x for x in cv_acc]
        b = [100 - x for x in train_acc]
        k=np.arange(1,30,2)
        plt.plot(k, a, '-b', label='CV Error')
        plt.plot(k, b, '-r', label='Train Error')
        plt.legend(loc='lower right')
        plt.xlabel("K Nearest Neighbours",fontsize=15, color='black')
        plt.ylabel("Train Error & Cv Error",fontsize=15, color='black')
        plt.title("Train Error vs Cv Error on " + algo,fontsize=15, color='black')
        #plt.plot(k, a, 'r--', k, b, 'b--')
        plt.show()
        print("The Train Error is -: ",round(s.mean(b),3),"%\n")
        print("The CV Error is -: ",round(s.mean(a),3),"%\n")5. PREDICT FUNCTION'''
    It runs the desired algorithm on the optimal value of k we get from training part.
    It also returns accuracy and test error.
    '''
    
    def Test(x_train,y_train,x_test,y_test,opt,algo):
        knn = KNeighborsClassifier(n_neighbors=opt,algorithm=algo)
        knn.fit(x_train,y_train)
        pred = knn.predict(x_test)
        acc = accuracy_score(y_test,pred, normalize=True) * float(100)
        test_err=100-acc
        print("The Accuracy is -: ",round(acc,3),"%\n")
        print("The Test Error is -: ",round(test_err,3),"%\n")
        return pred6. CONFUSION MATRIX'''
    It gives confusion matrix between actual and predicted values.
    '''
    
    def conf(test,pre):
        cf = confusion_matrix(test,pre)
        df =pd.DataFrame(cf,index=[0,1],columns=[0,1])
        sns.set(font_scale=1.5)
        sns.heatmap(df,annot=True,annot_kws={"size" :20},fmt='g')
        return plt.show()Knn Model on BOW ( Bag Of Words )![](https://cdn-images-1.medium.com/max/1600/0*JpqZhCNsQ_OGaRkB.jpg)   SPLITTING INTO TRAIN AND TESTx_train, x_test, y_train, y_test = Split(Data['CleanedText'].values,Data['Score'].values)
    print("-----------------------TRAIN DATA------------------------------------")
    print(x_train.shape)
    print(y_train.shape)
    print("---------------------------------------------------------------------")
    print("\n-----------------------TEST DATA-------------------------------------")
    print(x_test.shape)
    print(y_test.shape)-----------------------TRAIN DATA------------------------------------
    (14000,)
    (14000,)
    ---------------------------------------------------------------------
    
    -----------------------TEST DATA-------------------------------------
    (6000,)
    (6000,)CONVERTING REVIEWS INTO VECTORS USING BOW'''
    Here we are fitting it on training data and then transforming the test data with that vocabulary so that the test data 
    is not seen by the training phase and generalization is possible.
    '''
    count = CountVectorizer() 
    x_train = count.fit_transform(x_train)
    x_test = count.transform(x_test)
    print("Train Dataset Shape -: ",x_train.shape)
    print("Test Dataset Shape -: ",x_test.shape)Train Dataset Shape -:  (14000, 15029)
    Test Dataset Shape -:  (6000, 15029)NORMALIZING THE DATAx_train = preprocessing.normalize(x_train)
    x_test = preprocessing.normalize(x_test)CALLING Knn FUNCTION WITH BRUTEFORCE ALGORITHMcv,train,t,opt=KNN(x_train,y_train,'brute')
    print("Time taken to complete -: ",t,"sec\n")
    print("Optimal_k -: ",opt,"\n")
    print("Accuracy -: ",round(max(cv),3),"%")  # Accuracy on CV dataset.Time taken to complete -:  284.2675817012787 sec
    
    Optimal_k -:  11 
    
    Accuracy -:  85.521 %K  VS ACCURACY PLOTAccplot(cv,'BOW','Bruteforce')K VS TRAIN & CV ERROR PLOTTrainplot(cv,train,'BOW')From the given plot we can analyse that the optimal_k is 11 and it tends to go towards overfitting but it is not exactly overfitting as i have not plotted it till 50 or 100k so we can't be sure about it but i think that  a range between 13-17 will be a good one to declare optimal_k.   PREDICTING ON OPTIMAL Kpred = Test(x_train,y_train,x_test,y_test,opt,'brute')The Accuracy is -:  83.8 %
    
    The Test Error is -:  16.2 %CONFUSION MATRIX BETWEEN ACTUAL AND PREDICTED CLASS LABELSconf(y_test,pred)CONVERTING FROM SPARSE TO DENSE MATRIXd_train = x_train.todense(order=None, out=None)
    d_test = x_test.todense(order=None, out=None)
    print(d_train.shape)
    print(d_test.shape)(14000, 15029)
    (6000, 15029)CALLING Knn FUNCTION WITH KD-Tree ALGORITHMcv,train,t,opt=KNN(d_train,y_train,'kd_tree')
    print("Time taken to complete -: ",t,"sec\n")
    print("Optimal_k -: ",opt,"\n")
    print("Accuracy -: ",round(max(cv),3),"%")Time taken to complete -:  15291.153882265091 sec
    
    Optimal_k -:  11 
    
    Accuracy -:  85.512 %K  VS ACCURACY PLOTAccplot(cv,'BOW','KD-Tree')K VS TRAIN & CV ERROR PLOTTrainplot(cv,train,'BOW')This case is also similar to previous case as the difference in Train Error and Test Error is same.   PREDICTING ON OPTIMAL Kpred = Test(d_train,y_train,d_test,y_test,opt,'kd_tree')The Accuracy is -:  83.75 %
    
    The Test Error is -:  16.25 %CONFUSION MATRIX BETWEEN ACTUAL AND PREDICTED CLASS LABELSconf(y_test,pred)Knn Model on tf-IDF![](https://lphinternetservices.com/images/TF-IDF-define.jpg)   SPLITTING INTO TRAIN AND TESTx_train, x_test, y_train, y_test = Split(Data['CleanedText'].values,Data['Score'].values)CONVERTING REVIEWS INTO VECTORS USING tf-IDFtf_idf_vect = TfidfVectorizer(ngram_range=(1,1)) 
    x_train = tf_idf_vect.fit_transform(x_train)
    x_test = tf_idf_vect.transform(x_test)
    print("Train Dataset Shape -: ",x_train.shape)
    print("Test Dataset Shape -: ",x_test.shape)Train Dataset Shape -:  (14000, 15029)
    Test Dataset Shape -:  (6000, 15029)NORMALIZING THE DATAx_train = preprocessing.normalize(x_train)
    x_test = preprocessing.normalize(x_test)CALLING Knn FUNCTION WITH BRUTEFORCE ALGORITHMcv,train,t,opt=KNN(x_train,y_train,'brute')
    print("Time taken to complete -: ",t,"sec\n")
    print("Optimal_k -: ",opt,"\n")
    print("Accuracy -: ",round(max(cv),3),"%")Time taken to complete -:  285.0983760356903 sec
    
    Optimal_k -:  9 
    
    Accuracy -:  85.632 %K  VS ACCURACY PLOTAccplot(cv,'BOW','Bruteforce')K VS TRAIN & CV ERROR PLOTTrainplot(cv,train,'BOW')In this case the difference between the Train and Test Error is less than BOW i.e 4.189 % but by amalysing the plot we can infer that optimal_k tends towards overfitting.   PREDICTING ON OPTIMAL Kpred = Test(x_train,y_train,x_test,y_test,opt,'brute')The Accuracy is -:  83.6 %
    
    The Test Error is -:  16.4 %CONFUSION MATRIX BETWEEN ACTUAL AND PREDICTED CLASS LABELSconf(y_test,pred)CONVERTING FROM SPARSE TO DENSE MATRIXd_train = x_train.todense(order=None, out=None)
    d_test = x_test.todense(order=None, out=None)CALLING Knn FUNCTION WITH KD-TREE ALGORITHMcv,train,t,opt=KNN(d_train,y_train,'kd_tree')
    print("Time taken to complete -: ",t,"sec\n")
    print("Optimal_k -: ",opt,"\n")
    print("Accuracy -: ",round(max(cv),3),"%")Time taken to complete -:  16632.4118578434 sec
    
    Optimal_k -:  9 
    
    Accuracy -:  85.461 %K  VS ACCURACY PLOTAccplot(cv,'tf-IDF','KD-Tree')K VS TRAIN & CV ERROR PLOTTrainplot(cv,train,'tf-IDF')It is similar to previous case in terms of difference between test and train error and about optimal_k tending to overfit.   PREDICTING ON OPTIMAL Kpred = Test(d_train,y_train,d_test,y_test,opt,'kd_tree')The Accuracy is -:  84.15 %
    
    The Test Error is -:  15.85 %CONFUSION MATRIX BETWEEN ACTUAL AND PREDICTED CLASS LABELSconf(y_test,pred)Knn on Avg Word2Vec![](https://adriancolyer.files.wordpress.com/2016/04/word2vec-gender-relation.png?w=596)  SPLIT DATA INTO TRAIN AND TEST'''
    Here we are taking two lists and putting the data separate as Test in l4 and Train in l5.
    '''
    
    l4=[]
    for i in range(14000):
        l1=Data['CleanedText'].values[i]
        l2=str(l1)
        l4.append(l2)
    
    l5=[]
    for i in range(14000,20000,1):
        l1=Data['CleanedText'].values[i]
        l2=str(l1)
        l5.append(l2)MAKING LIST OF WORDSdef cleanpunc(sentence): #function to clean the word of any punctuation or special characters
        cleaned = re.sub(r'[?|!|\'|"|#]',r'',sentence)
        cleaned = re.sub(r'[.|,|)|(|\|/]',r' ',cleaned)
        return  cleaned
    
    #------------------------------------------ TRAIN DATASET ---------------------------------------------------------------------
    # making a list of words for each review.
    i=0
    list_of_sent=[] # list to store all the lists.
    for sent in l4:
        filtered_sentence=[] # list to store each review.
        for w in sent.split():
            for cleaned_words in cleanpunc(w).split():
                if(cleaned_words.isalpha()):    
                    filtered_sentence.append(cleaned_words.lower())
                else:
                    continue 
        list_of_sent.append(filtered_sentence)
    
    # ------------------------------------------- TEST DATASET ---------------------------------------------------------------------    
        
    # making a list of words for each review.
    i=0
    list_of_sent1=[] # list to store all the lists.
    for sent in l5:
        filtered_sentence=[] # list to store each review.
        for w in sent.split():
            for cleaned_words in cleanpunc(w).split():
                if(cleaned_words.isalpha()):    
                    filtered_sentence.append(cleaned_words.lower())
                else:
                    continue 
        list_of_sent1.append(filtered_sentence)
    print("____________________________________ TRAINING DATA ___________________________________________________")
    print(Data['CleanedText'].values[3])
    
    print("\n" + "********************Converted these sentences into a list of words each***********************\n ")
    
    # First Review with breaking into words. 
    print(list_of_sent[3])
    
    #-------------------------------------------------------------------------------------------------------------------
    print("--------------------------------------------------------------------------------------------------------------")
    
    print("____________________________________ TEST DATA ___________________________________________________")
    
    print(Data['CleanedText'].values[-1])
    
    print("\n" + "********************Converted these sentences into a list of words each***********************\n ")
    
    # First Review with breaking into words. 
    print(list_of_sent1[-1])____________________________________ TRAINING DATA ___________________________________________________
    b'love sauc put everyth'
    
    ********************Converted these sentences into a list of words each***********************
     
    ['blove', 'sauc', 'put', 'everyth']
    --------------------------------------------------------------------------------------------------------------
    ____________________________________ TEST DATA ___________________________________________________
    b'like product price point flavor veri strong overpow perhap figur name'
    
    ********************Converted these sentences into a list of words each***********************
     
    ['blike', 'product', 'price', 'point', 'flavor', 'veri', 'strong', 'overpow', 'perhap', 'figur', 'name']TRAINING THE MODEL ON TRAIN DATA'''
    Trained our own model on the training data only with feature size or dimension = 100 with min_count = 2 this means that 
    if a word comes atleast 2 times only then consider it otherwise leave it.
    '''
    w2v_model=gensim.models.Word2Vec(list_of_sent,min_count=2,size=100, workers=4)CONVERTING REVIEWS INTO VECTORS USING AVG WORD2VEC'''
    Converting the reviews into vectors by using the above trained model.
    '''
    #-------------------------------------- TRAIN DATASET ------------------------------------------------------------------
    sent_vectors = []; # the avg-w2v for each sentence/review is stored in this list
    for sent in list_of_sent: # for each review/sentence
        sent_vec = np.zeros(100) # as word vectors are of zero length
        cnt_words =0; # num of words with a valid vector in the sentence/review
        for word in sent: # for each word in a review/sentence
            try:
                vec = w2v_model.wv[word]
                sent_vec += vec
                cnt_words += 1
            except:
                pass
        sent_vec /= cnt_words
        sent_vectors.append(sent_vec)
    
    #------------------------------------- TEST DATASET --------------------------------------------------------------------    
    '''
    Here we are converting reviews of test data using the vocabulary of training data to make the concept to generalization 
    meaningful and fruitful.
    '''
    sent_vectors1 = []; # the avg-w2v for each sentence/review is stored in this list
    for sent in list_of_sent1: # for each review/sentence
        sent_vec = np.zeros(100) # as word vectors are of zero length
        cnt_words =0; # num of words with a valid vector in the sentence/review
        for word in sent: # for each word in a review/sentence
            try:
                vec = w2v_model.wv[word]
                sent_vec += vec
                cnt_words += 1
            except:
                pass
        sent_vec /= cnt_words
        sent_vectors1.append(sent_vec)CHECKING THE Nan VALUE'''
    Here we are checking the Nan values as these creates a lot of problem and it occurs when we divide any value by 0 this
    means a value of high range i.e infinity.
    '''
    sent_vectors=np.array(sent_vectors)
    print(np.isnan(sent_vectors).any())
    sent_vectors1=np.array(sent_vectors1)
    print(np.isnan(sent_vectors1).any())
    print(sent_vectors.shape)
    print(sent_vectors1.shape)(14000, 100)
    (6000, 100)NORMALIZING THE DATAsent_vectors=preprocessing.normalize(sent_vectors)
    sent_vectors1=preprocessing.normalize(sent_vectors1)CALLING Knn FUNCTION WITH BRUTEFORCE ALGORITHMx_train, x_test, y_train, y_test = Split(Data['CleanedText'].values,Data['Score'].values)
    cv,train,t,opt=KNN(sent_vectors, y_train, 'brute')
    print("Time taken to complete -: ",t,"sec\n")
    print("Optimal_k -: ",opt,"\n")
    print("Accuracy -: ",round(max(cv),3),"%")Time taken to complete -:  126.74465680122375 sec
    
    Optimal_k -:  13 
    
    Accuracy -:  85.315 %K  VS ACCURACY PLOTAccplot(cv,'Avg Word2Vec','Bruteforce')K VS TRAIN & CV ERROR PLOTTrainplot(cv,train,'Avg Word2Vec')By analyzing this we can say that neither it is overfitting nor underfitting and the optimal_k is between the 2 i.e 13 but we can't be so sure as we have not plotted it on whole dataset and moreover we had not taken k values till 50 or 100 maybe then the picture will become clear.   PREDICTING ON OPTIMAL Kpred = Test(sent_vectors, y_train, sent_vectors1, y_test, opt, 'brute')The Accuracy is -:  84.283 %
    
    The Test Error is -:  15.717 %CONFUSION MATRIX BETWEEN ACTUAL AND PREDICTED CLASS LABELSconf(y_test,pred)CONVERTING FROM SPARSE TO DENSE MATRIXb1=sparse.csr_matrix(sent_vectors)
    b2=sparse.csr_matrix(sent_vectors1)
    d_train = b1.todense(order=None, out=None)
    d_test = b2.todense(order=None, out=None)CALLING Knn FUNCTION WITH KD-TREE ALGORITHMcv,train,t,opt=KNN(d_train, y_train, 'kd_tree')
    print("Time taken to complete -: ",t,"sec\n")
    print("Optimal_k -: ",opt,"\n")
    print("Accuracy -: ",round(max(cv),3),"%")Time taken to complete -:  992.0894522666931 sec
    
    Optimal_k -:  13 
    
    Accuracy -:  85.315 %The KD-TREE is giving the same results as bruteforce but is taking long to compute as the data given to the algorithm is dense.   K  VS ACCURACY PLOTAccplot(cv,'Avg Word2Vec','KD-Tree')K VS TRAIN & CV ERROR PLOTTrainplot(cv,train,'Avg Word2Vec')PREDICTING ON OPTIMAL Kpred = Test(d_train, y_train, d_test, y_test, opt, 'kd_tree')The Accuracy is -:  84.283 %
    
    The Test Error is -:  15.717 %CONFUSION MATRIX BETWEEN ACTUAL AND PREDICTED CLASS LABELSconf(y_test,pred)Knn Model On tf-IDF Word2Vec  NOTE : I forgot to save it so have to do it again.x_train, x_test, y_train, y_test = Split(Data['CleanedText'].values,Data['Score'].values)
    tf_idf_vect = TfidfVectorizer(ngram_range=(1,1)) 
    x_train = tf_idf_vect.fit_transform(x_train)
    x_test = tf_idf_vect.transform(x_test)TRAINING OWN MODEL ON TRAIN DATASETmodel=gensim.models.Word2Vec(list_of_sent,min_count=2,size=100, workers=4)CONVERTING REVIEWS INTO VECTORS USING tf-IDF WORD2VEC#--------------------------------------- TRAIN DATASET ---------------------------------------------------------------- 
    
    tfidf_feat = tf_idf_vect.get_feature_names()
    
    tfidf_sent_vectors_train = []; # the tfidf-w2v for each sentence/review is stored in this list
    row=0;
    for sent in list_of_sent: # for each review/sentence
        sent_vec = np.zeros(100) # as word vectors are of zero length
        weight_sum =0; # num of words with a valid vector in the sentence/review
        for word in sent: # for each word in a review/sentence
            try:
                vec = model.wv[word]
                # obtain the tf_idfidf of a word in a sentence/review
                tf_idf = x_train[row, tfidf_feat.index(word)]
                sent_vec += (vec * tf_idf)
                weight_sum += tf_idf
            except:
                pass
        sent_vec /= weight_sum
        tfidf_sent_vectors_train.append(sent_vec)
        row += 1
    
        
    #----------------------------------------- TEST DATASET ---------------------------------------------------------------
    
    tfidf_sent_vectors_test = []; # the tfidf-w2v for each sentence/review is stored in this list
    row=0;
    for sent in list_of_sent1: # for each review/sentence
        sent_vec = np.zeros(100) # as word vectors are of zero length
        weight_sum =0; # num of words with a valid vector in the sentence/review
        for word in sent: # for each word in a review/sentence
            try:
                vec = model.wv[word]
                # obtain the tf_idfidf of a word in a sentence/review
                tf_idf = x_test[row, tfidf_feat.index(word)]
                sent_vec += (vec * tf_idf)
                weight_sum += tf_idf
            except:
                pass
        sent_vec /= weight_sum
        tfidf_sent_vectors_test.append(sent_vec)
        row += 1CHECKING THE Nan VALUEtrain = np.array(tfidf_sent_vectors_train)
    test = np.array(tfidf_sent_vectors_test)
    print(np.isnan(train).any())
    print(np.isnan(test).any())False
    FalseNORMALIZING THE DATAtrain_ = preprocessing.normalize(train)
    test_ = preprocessing.normalize(test)CALLING Knn FUNCTION WITH BRUTEFORCE ALGORITHMcv,train,t,opt=KNN(train_, y_train, 'brute')
    print("Time taken to complete -: ",t,"sec\n")
    print("Optimal_k -: ",opt,"\n")
    print("Accuracy -: ",round(max(cv),3),"%")Time taken to complete -:  113.40949702262878 sec
    
    Optimal_k -:  11 
    
    Accuracy -:  85.152 %K  VS ACCURACY PLOTAccplot(cv,'tf-IDF Word2Vec','BruteForce')K VS TRAIN & CV ERROR PLOTTrainplot(cv,train,'tf-IDF Word2Vec')PREDICTING ON OPTIMAL Kpred = Test(train_, y_train, test_, y_test, opt, 'brute')The Accuracy is -:  83.0 %
    
    The Test Error is -:  17.0 %CONFUSION MATRIX BETWEEN ACTUAL AND PREDICTED CLASS LABELSconf(y_test,pred)CONVERTING FROM SPARSE TO DENSE MATRIXb1=sparse.csr_matrix(train_)
    b2=sparse.csr_matrix(test_)
    training = b1.todense()
    testing = b2.todense()CALLING Knn FUNCTION WITH KD-TREE ALGORITHMcv,train,t,opt=KNN(training, y_train, 'kd_tree')
    print("Time taken to complete -: ",t,"sec\n")
    print("Optimal_k -: ",opt,"\n")
    print("Accuracy -: ",round(max(cv),3),"%")Time taken to complete -:  978.9853284358978 sec
    
    Optimal_k -:  11 
    
    Accuracy -:  85.152 %K  VS ACCURACY PLOTAccplot(cv,'tf-IDF Word2Vec','KD-Tree')K VS TRAIN & CV ERROR PLOTTrainplot(cv,train,'tf-IDF Word2Vec')PREDICTING ON OPTIMAL Kpred = Test(train_, y_train, test_, y_test, opt, 'kd_tree')The Accuracy is -:  83.0 %
    
    The Test Error is -:  17.0 %CONFUSION MATRIX BETWEEN ACTUAL AND PREDICTED CLASS LABELSconf(y_test,pred)CONCLUSIONx = PrettyTable()
    
    x.field_names = ["NLP Technique", "Algorithm", "Accuracy(%)", "Hyperparameter", "Train Error(%)", "Test Error(%)", "Time(in sec)"]
    
    x.add_row(["BOW", "BruteForce", 83.80, 11, 10.675, 16.20, 284])
    x.add_row(["BOW", "KD-Tree", 83.75, 11, 10.683, 16.25, 15291])
    x.add_row(["tf-IDF", "BruteForce", 83.60, 9, 10.901, 16.40, 285])
    x.add_row(["tf-IDF", "KD-Tree", 84.15, 9, 11.059, 15.85, 16632])
    x.add_row(["Avg Word2Vec", "BruteForce", 84.283, 13, 10.615, 15.717, 126])
    x.add_row(["Avg Word2Vec", "KD-TREE", 84.283, 13, 10.615, 15.717, 992])
    x.add_row(["tf-IDF Word2Vec", "BruteForce", 83.0, 11, 11.031, 17.0, 113])
    x.add_row(["tf-IDF Word2Vec", "KD-TREE", 83.0, 11, 11.019, 17.0, 978])
    
    print(x)+-----------------+------------+-------------+----------------+----------------+---------------+--------------+
    |  NLP Technique  | Algorithm  | Accuracy(%) | Hyperparameter | Train Error(%) | Test Error(%) | Time(in sec) |
    +-----------------+------------+-------------+----------------+----------------+---------------+--------------+
    |       BOW       | BruteForce |     83.8    |       11       |     10.675     |      16.2     |     284      |
    |       BOW       |  KD-Tree   |    83.75    |       11       |     10.683     |     16.25     |    15291     |
    |      tf-IDF     | BruteForce |     83.6    |       9        |     10.901     |      16.4     |     285      |
    |      tf-IDF     |  KD-Tree   |    84.15    |       9        |     11.059     |     15.85     |    16632     |
    |   Avg Word2Vec  | BruteForce |    84.283   |       13       |     10.615     |     15.717    |     126      |
    |   Avg Word2Vec  |  KD-TREE   |    84.283   |       13       |     10.615     |     15.717    |     992[...]Load Datasetfrom datasets import load_dataset
    
    split = "train"
    cache_dir = "./data_cache_doc2vec_solution_spans_notebook"
    
    document_dataset = load_dataset(
        "doc2dial",
        name="document_domain",
        split=split,
        ignore_verifications=True,
        cache_dir=cache_dir,
    )
    
    rc_dataset = load_dataset(
        "doc2dial",
        name="doc2dial_rc",
        split=split,
        ignore_verifications=True,
        cache_dir=cache_dir,
    )Train a model for each document  Preprocessingdef span_dict_for_row(spans):
        spans_dict = {}
        for span in spans:
            spans_dict[span['id_sp']] = span['text_sp']
    
        return spans_dict
    
    from gensim.utils import simple_preprocess
    import pandas as pd
    
    #Getting a list of spans per grounding document !!!! Language confusion: spans are the documents in gensim land and the list of spans (aka the whole document) is the corpora in gensim
    
    document_full_df = pd.DataFrame(data=document_dataset)
    raw_training_docs_per_doc = {}
    for index, row in document_full_df.iterrows():
        raw_training_docs_per_doc[row['doc_id']] = span_dict_for_row(row['spans'])
    
    tokenized_training_docs = {}
    for doc_id in raw_training_docs_per_doc:
        spans_dic = raw_training_docs_per_doc[doc_id]
        preprocessed_spans = {}
        for key, value in spans_dic.items():
            #simplest preprocessing from gemsim
            preprocessed_spans[key] = simple_preprocess(value, deacc=True)
        tokenized_training_docs[doc_id] = preprocessed_spans
    
    list(tokenized_training_docs.items())[0][1]Trainingfrom gensim.models.doc2vec import Doc2Vec, TaggedDocument
    
    models_for_doc = {}
    for doc_id in tokenized_training_docs:
        tokenized_span_dic = tokenized_training_docs[doc_id]
        training_docs = [TaggedDocument(span, [key]) for key, span in tokenized_span_dic.items()]
        # Check how to fine tune the model https://radimrehurek.com/gensim/models/doc2vec.html
        model = Doc2Vec(training_docs, vector_size=10, window=4, min_count=1, workers=4, epochs=30)
        models_for_doc[doc_id] = model
    
    len(models_for_doc)Predict rc questionspredictions = []
    references = []
    for example in rc_dataset:
        question_ = example["question"]
        doc_id = example['title']
        # it does better if the user and agent string are left
        # question_ = question_.replace('user:', '')
        # question_ = question_.replace('agent:', '')
    
        #preprocess question in the same way
        test_doc = simple_preprocess(question_, deacc=True)
        #calculate vector using model for that document
        model = models_for_doc[doc_id]
        vector = model.infer_vector(test_doc)
        #find the most similar document (spans)
        sims = model.dv.most_similar([vector], topn=1)
    
        spans_dic = raw_training_docs_per_doc[doc_id]
        tag_for_most_likely_answer = sims[0][0]
        most_likely_answer = spans_dic[tag_for_most_likely_answer]
    
        # print(f'Question: {question_}\n')
        # print(f'Predicted Answer tag {tag_for_answer}: {most_likely_answer}\n')
        # print(f'Correct answer: {example["answers"]}\n')
    
        id_ = example["id"]
        predictions.append(
            {'id': id_,
             'prediction_text':
                 most_likely_answer,
             'no_answer_probability': 0.0
             }
        )
    
        #just using their answers
        references.append(
            {
                "id": id_,
                "answers": example["answers"],
            }
        )
    
    predictions[:5]Evaluatefrom datasets import load_metric
    
    print(f'Number of question in train dataset {rc_dataset.shape[0]}')
    metric = load_metric("squad_v2")
    metric.add_batch(predictions=predictions, references=references)
    final_score = metric.compute()
    final_scoreNumber of question in train dataset 20431How does this compare to guessing at random?import random
    
    random_predictions = []
    random_references = []
    for example in rc_dataset:
        question_ = example["question"]
        doc_id = example['title']
    
        #pick a random text from the document
        spans = raw_training_docs_per_doc[doc_id]
        most_likely_answer = spans[str(random.randint(1, len(spans)))]
    
        id_ = example["id"]
        random_predictions.append(
            {'id': id_,
             'prediction_text':
                 most_likely_answer,
             'no_answer_probability': 0.0
             }
        )
    
        #just using their answers
        random_references.append(
            {
                "id": id_,
                "answers": example["answers"],
            }
        )
    
    random_predictions[:3]
    print(f'Results for random text predictions for each question')
    metric = load_metric("squad_v2")
    metric.add_batch(predictions=random_predictions, references=random_references)
    final_score = metric.compute()
    final_scoreResults for random text predictions for each questionResults for validation part of rc datasetrc_validation_dataset = load_dataset(
        "doc2dial",
        name="doc2dial_rc",
        split="validation",
        ignore_verifications=True,
        cache_dir=cache_dir,
    )
    
    
    validation_predictions = []
    validation_references = []
    print(f'Number of question in validation dataset {rc_validation_dataset.shape[0]}')
    
    for example in rc_validation_dataset:
        question_ = example["question"]
        doc_id = example['title']
        # it does better if the user and agent string are left
        # question_ = question_.replace('user:', '')
        # question_ = question_.replace('agent:', '')
    
        #preprocess question in the same way
        test_doc = simple_preprocess(question_, deacc=True)
        #calculate vector using model for that document
        model = models_for_doc[doc_id]
        vector = model.infer_vector(test_doc)
        #find the most similar document (spans)
        sims = model.dv.most_similar([vector], topn=1)
    
        #find what that relates to in the original document
        spans_dic = raw_training_docs_per_doc[doc_id]
        tag_for_most_likely_answer = sims[0][0]
        most_likely_answer = spans_dic[tag_for_most_likely_answer]
    
        id_ = example["id"]
        validation_predictions.append(
            {'id': id_,
             'prediction_text':
                 most_likely_answer,
             'no_answer_probability': 0.0
             }
        )
    
        #just using their answers
        validation_references.append(
            {
                "id": id_,
                "answers": example["answers"],
            }
        )
    
    validation_predictions[:5]
    print(f'Results for validation dataset')
    metric = load_metric("squad_v2")
    metric.add_batch(predictions=validation_predictions, references=validation_references)
    final_score = metric.compute()
    final_scoreResults for validation datasetInteractive exploration of AEM log dataAdapted from [voila-gpx-viewer](https://github.com/jtpio/voila-gpx-viewer), found in the [Voila Gallery](https://voila-gallery.org/services/gallery/)Preparations:```bashconda activate ELAconda install -c conda-forge bqplot ipyleafletjupyter labextension install jupyter-leaflet bqplotjupyter labextension listcd /path/to/parent/of/thisfilejupyter-lab .```import os
    import sys
    import pandas as pd
    import numpy as np
    import matplotlib.pyplot as plt
    import rasterio
    from rasterio.plot import show
    import geopandas as gpd
    
    import datetime
    import json
    
    from io import StringIO
    from statistics import mean
    
    from bqplot import Axis, Figure, Lines, LinearScale, LogScale
    from bqplot.interacts import IndexSelector
    from ipyleaflet import basemaps, FullScreenControl, LayerGroup, Map, MeasureControl, Polyline, Marker, CircleMarker, WidgetControl
    from ipywidgets import Button, HTML, HBox, VBox, Checkbox, FileUpload, Label, Output, IntSlider, Layout, Image, link
    # Only set to True for co-dev of ela from this use case:
    ela_from_source = False
    ela_from_source = True
    if ela_from_source:
        if ('ELA_SRC' in os.environ):
            root_src_dir = os.environ['ELA_SRC']
        elif sys.platform == 'win32':
            root_src_dir = r'C:\src\github_jm\pyela'
        else:
            username = os.environ['USER']
            root_src_dir = os.path.join('/home', username, 'src/ela/pyela')
        pkg_src_dir = root_src_dir
        sys.path.insert(0, pkg_src_dir)
    
    from ela.textproc import *
    from ela.utils import *
    from ela.classification import *
    from ela.visual import *
    from ela.spatial import SliceOperationImporting dataThere are two main sets of information we need: the borehole lithology logs, and the spatial information in the surface elevation (DEM) and geolocation of a subset of bores around Bungendore.data_path = NoneYou probably want to explicitly set `data_path` to the location where you put the folder(s) e.g:#data_path = '/home/myusername/data' # On Linux, if you now have the folder /home/myusername/data/Bungendore
    #data_path = r'C:\data\Lithology'  # windows, if you have C:\data\Lithology\BungendoreOtherwise a fallback for the pyela developer(s)if data_path is None:
        if ('ELA_DATA' in os.environ):
            data_path = os.environ['ELA_DATA']
        elif sys.platform == 'win32':
            data_path = r'C:\data\Lithology'
        else:
            username = os.environ['USER']
            data_path = os.path.join('/home', username, 'data')
    data_path
    data_path = '/home/per202/data/Lithology'
    aem_datadir = os.path.join(data_path, 'AEM')
    swan_datadir = os.path.join(data_path, 'swan_coastal')
    scp_datadir = os.path.join(aem_datadir, 'Swan_coastal_plains')
    scp_grids_datadir = os.path.join(scp_datadir, 'grids')
    ngis_datadir = os.path.join(data_path, 'NGIS')
    scp_shp_datadir = os.path.join(data_path, 'NGIS/swan_coastal')DEMLet's have a look at the DEM provided as part of the AEM packagedem = rasterio.open(os.path.join(swan_datadir,'Swan_DEM/CLIP.tif'))
    cnd_slice_dir = os.path.join(scp_grids_datadir,'cnd')
    
    cnd_000_005 = rasterio.open(os.path.join(cnd_slice_dir,'Swan_Coastal_Plain_CND_000m_to_005m_Final.ers'))
    bore_locations_raw = gpd.read_file(os.path.join(scp_shp_datadir, 'scp.shp'))The DEM raster and the bore location shapefile do not use the same projection (coordinate reference system) so we reproject one of them. We choose the raster's UTM.bore_locations = bore_locations_raw.to_crs(dem.crs)
    import aseg_gdf2
    gdf = aseg_gdf2.read( os.path.join(scp_datadir, 'located_data/Swan_Coastal_Plain_Final_CND'))
    gdf
    gdf.field_names()
    df = gdf.df()
    df.head()ViewerDerived from [voila-gpx-viewer](https://github.com/jtpio/voila-gpx-viewer)# create the output widget to place the results
    out = Output()
    def plot_map(points):
        """
        Plot the GPS trace on a map
        """
        mean_lat = mean(p.Latitude for p in points)
        mean_lng = mean(p.Longitude for p in points)
    
        # create the map
        m = Map(center=(mean_lat, mean_lng), zoom=12, basemap=basemaps.Stamen.Terrain)
    
        # show trace
        line = Polyline(locations=[[[p.Latitude, p.Longitude] for p in points],],
                        color = "red", fill=False)
        m.add_layer(line)
        m.add_control(FullScreenControl())
        return m
    ind = range(len(df))
    points = [df.iloc[i] for i in ind if (i % 10 == 0)]
    len(points)
    gdf.get_field_definition('Date'), gdf.get_field_definition('Time')
    p = points[123]
    p.Date, p.Time
    p = points[len(points) - 123]
    p.Date, p.Time
    set(p.Date for p in points)
    points = [p for p in points if (p.Date == 20130512.0)]
    points = sorted(points, key=lambda p: p.Time) 
    def plot_elevation(gpx):
        px = [p.Time for p in points]
        py = [p.CND_011 for p in points]
    
        x_scale, y_scale = LinearScale(), LogScale()
        x_scale.allow_padding = False
        x_ax = Axis(label='Time (s)', scale=x_scale)
        y_ax = Axis(label='CND 011(?)', scale=y_scale, orientation='vertical')
    
        lines = Lines(x=px, y=py, scales={'x': x_scale, 'y': y_scale})
    
        elevation = Figure(title='CND 011 Chart', axes=[x_ax, y_ax], marks=[lines])
        elevation.layout.width = 'auto'
        elevation.layout.height = 'auto'
        elevation.layout.min_height = '500px'
    
        elevation.interaction = IndexSelector(scale=x_scale)
    
        return elevation
    
    def link_trace_elevation(trace, elevation, points):
        """
        Link the trace the elevation graph.
        Changing the selection on the elevation will update the
        marker on the map
        """
        times = np.asarray([p.Time for p in points])
    
        def find_point(time):
            """
            Find a point given the time
            """
            dist_1 = abs(times - time)
            pos = np.argmin(dist_1)
            return points[pos]
        
        # add a checkbox to auto center
        autocenter = Checkbox(value=False, description='Auto Center')
        autocenter_control = WidgetControl(widget=autocenter, position='bottomright')
        trace.add_control(autocenter_control)
        # mark the current position on the map
        start = points[0]
        marker = CircleMarker(visible=False, location=(start.Latitude, start.Longitude),
                              radius=10, color="green", fill_color="green")
        trace.add_layer(marker)
        brushintsel = elevation.interaction
        def update_range(change):
            """
            Update the position on the map when the elevation
            graph selector changes
            """
            if brushintsel.selected.shape != (1,):
                return
            marker.visible = True
            selected = brushintsel.selected # time stamp in seconds for a day
            point = find_point(selected)
            marker.location = (point.Latitude, point.Longitude)
            if autocenter.value:
                trace.center = marker.location
            #position = max(0, int((selected / distance_from_start) * len(points)))
        brushintsel.observe(update_range, 'selected')
    
    def plot_gpx(points):
        trace = plot_map(points)
        elevation = plot_elevation(points)
        debug = Label(value='')
        display(trace)
        display(elevation)
        display(debug)
        link_trace_elevation(trace, elevation, points)
    plot_gpx(points)**VARIABLES AND VALUES** value--> basic thibg program WORKS WITH.   - different types:    - integers (), floats (decimals), stringsvariable--> refers to a value; in n = 17, n is the variableoperator---> symbols that represent a computation, + or*expression-> combination of values, variables and operatorsstatement-> unit of code that a python interpreter can execute. ex in print("hello world") "print" os the statement and in the my_salary = 250, my_salary is hte statementprint("hello world")hello worldexercise# this is a comment
    message = "Python version 3 is so much better than version 2"
    # message is a string type
    n = 17
    pi = 3.14159
    #run code and see what happened
    # nothing happened because we did not ask for an output
    
    print(message)
    print(n)
    print(pi)
    #now it will print the variables created above
    
    type(message)
    #tells what the data type of the variables is
    type(n)
    type(pi)
    
    
    
    n = 4
    o = 2
    p = n+ o 
    print(p)
    n = "4"
    #this makes it a string
    o = "2"
    type(o)
    
    print(p)
    #shoud concatenate them....not sure why it isnt
    
    
    t = "4"
    r = "2"
    c = t+r 
    print(c)
    #did it correctly here42**1) Normal Normal** ***1.1a) Scipy normal PDF function*** KL(P || Q) = sum x in X P(x) * log(P(x) / Q(x))Source: [MLM](https://machinelearningmastery.com/divergence-between-probability-distributions/), [Cory TDS](https://towardsdatascience.com/kl-divergence-python-example-b87069e4b810), and [Swayson GitHub](https://gist.github.com/swayson/86c296aa354a555536e6765bbe726ff7)# p, q are arrays
    def _kl_divergence(p, q):
        p = np.asarray(p, dtype=np.float)
        q = np.asarray(q, dtype=np.float)
        return np.sum(np.where(p != 0, p * np.log(p / q), 0))
        # return sum(p[i] * np.log(p[i]/q[i]) for i in range(len(p)))
    p = norm.pdf(x, loc=p_loc, scale=p_scale)
    q = norm.pdf(x, loc=q_loc, scale=q_scale)
    
    plt.title('KL(P||Q) = %1.3f' % _kl_divergence(p, q))
    plt.plot(x, p)
    plt.plot(x, q, c='red')
    plt.show();
    sum(p), sum(q)***1.1b) Scipy normal PDF function (rewritten here)*** $p(x ; \mu, \sigma) = \frac{1}{\sigma\sqrt{2\pi}}exp(-\frac{1}{2}\frac{(x - \mu)^2}{\sigma^2})$ Source: [Scipy norm pdf](https://github.com/scipy/scipy/blob/v1.5.4/scipy/stats/_continuous_distns.pyL242-L243)def _norm_pdf(x, loc=0., scale=1.):
        return np.exp(-((x-loc)**2 / scale**2) / 2.0) / \
               (scale*np.sqrt(2*np.pi))
    p = _norm_pdf(x, loc=p_loc, scale=p_scale)
    q = _norm_pdf(x, loc=q_loc, scale=q_scale)
    
    plt.title('KL(P||Q) = %1.3f' % _kl_divergence(p, q))
    plt.plot(x, p)
    plt.plot(x, q, c='red')
    plt.show();***1.1c) Scipy normal log PDF function***log_p = norm.logpdf(x, loc=p_loc, scale=p_scale)
    log_q = norm.logpdf(x, loc=q_loc, scale=q_scale)
    
    # I guess you cannot put log_p and log_q into KL Divergence right???
    plt.title('KL(P||Q) = %1.3f' % _kl_divergence(p, q))
    plt.plot(x, log_p)
    plt.plot(x, log_q, c='red')
    plt.show();***1.1d) Scipy normal log PDF function (rewritten here)*** Source: [Scipy norm log pdf](https://github.com/scipy/scipy/blob/v1.5.4/scipy/stats/_continuous_distns.pyL246-L247)def _norm_logpdf(x, loc=0., scale=1.):
        return (-((x-loc)**2 / scale**2) / 2.0) - \
               np.log(scale*np.sqrt(2*np.pi))
    log_p = _norm_logpdf(x, loc=p_loc, scale=p_scale)
    log_q = _norm_logpdf(x, loc=q_loc, scale=q_scale)
    
    plt.title('KL(P||Q) = %1.3f' % _kl_divergence(p, q))
    plt.plot(x, log_p)
    plt.plot(x, log_q, c='red')
    plt.show();***1.2a) TFP normal PDF function***p_dist = tfd.Normal(loc=p_loc, scale=p_scale)
    q_dist = tfd.Normal(loc=q_loc, scale=q_scale)
    p = p_dist.prob(x)
    q = q_dist.prob(x)
    
    plt.title('KL(P||Q) = %1.3f' % p_dist.kl_divergence(q_dist))
    plt.plot(x, p)
    plt.plot(x, q, c='red')
    plt.show();***1.2b) TFP normal PDF and KL functions (rewritten here)***# TFP only has _log_prob written, thus _prob is the exp of that
    def _log_prob(x, loc=0., scale=1.):
        scale = tf.convert_to_tensor(scale)
        log_unnormalized = -0.5 * tf.math.squared_difference(
            x / scale, loc / scale)
        log_normalization = tf.constant(
            0.5 * np.log(2. * np.pi), dtype='float32') + tf.math.log(scale)
        return log_unnormalized - log_normalization
    
    def _prob(x, loc=0., scale=1.):
        return tf.exp(_log_prob(x, loc, scale))
    # a, b are distributions.normal.Normal
    def _kl_normal_normal(a, b, name=None):
      """Calculate the batched KL divergence KL(a || b) with a and b Normal.
      Args:
        a: instance of a Normal distribution object.
        b: instance of a Normal distribution object.
        name: Name to use for created operations.
          Default value: `None` (i.e., `'kl_normal_normal'`).
      Returns:
        kl_div: Batchwise KL(a || b)
      """
      with tf.name_scope(name or 'kl_normal_normal'):
        b_scale = tf.convert_to_tensor(b.scale)  # We'll read it thrice.
        diff_log_scale = tf.math.log(a.scale) - tf.math.log(b_scale)
        return (
            0.5 * tf.math.squared_difference(a.loc / b_scale, b.loc / b_scale) +
            0.5 * tf.math.expm1(2. * diff_log_scale) -
            diff_log_scale)
    p_dist = tfd.Normal(loc=p_loc, scale=p_scale)
    q_dist = tfd.Normal(loc=q_loc, scale=q_scale)
    p = _prob(x, loc=p_loc, scale=p_scale)
    q = _prob(x, loc=q_loc, scale=q_scale)
    
    plt.title('KL(P||Q) = %1.3f' % _kl_normal_normal(p_dist, q_dist))
    plt.plot(x, p)
    plt.plot(x, q, c='red')
    plt.show();
    type(p_dist), type(q_dist)***1.2c) TFP normal log PDF function***p_dist = tfd.Normal(loc=p_loc, scale=p_scale)
    q_dist = tfd.Normal(loc=q_loc, scale=q_scale)
    p = p_dist.log_prob(x)
    q = q_dist.log_prob(x)
    
    plt.title('KL(P||Q) = %1.3f' % p_dist.kl_divergence(q_dist))
    plt.plot(x, p)
    plt.plot(x, q, c='red')
    plt.show();***1.2d) TFP normal log PDF(rewritten here)***p_dist = tfd.Normal(loc=p_loc, scale=p_scale)
    q_dist = tfd.Normal(loc=q_loc, scale=q_scale)
    p = _log_prob(x, loc=p_loc, scale=p_scale)
    q = _log_prob(x, loc=q_loc, scale=q_scale)
    
    plt.title('KL(P||Q) = %1.3f' % _kl_normal_normal(p_dist, q_dist))
    plt.plot(x, p)
    plt.plot(x, q, c='red')
    plt.show();**2) Student-t Student-t** ***2.1a) Scipy Student-t PDF function*** Currently used the same KL Divegence function as Normal. Not sure if this is correct.def _kl_divergence(p, q):
        return sum(p[i] * np.log(p[i]/q[i]) for i in range(len(p)))
        # return np.sum(np.where(p != 0, p * np.log(p / q), 0))
    p = t.pdf(x, df=p_df, loc=p_loc, scale=p_scale)
    q = t.pdf(x, df=q_df, loc=q_loc, scale=q_scale)
    
    plt.title('KL(P||Q) = %1.3f' % _kl_divergence(p, q))
    plt.plot(x, p)
    plt.plot(x, q, c='red')
    plt.show();***2.1b) Scipy Student-t PDF function (rewritten here)*** $f(t) = \frac{\Gamma[(v + 1)/2]}{\sqrt{v\pi}\Gamma[v/2]}(1 + t^2/v)^{-(v+1)/2}$Where:$t = \frac{x - \mu}{\sigma}$$v = DoF $$\Gamma = Gamma function $$d = 1D $$ \sum = 1$Source: [Wikipedia](https://en.wikipedia.org/wiki/Multivariate_t-distribution) Source: [Scipy t pdf](https://github.com/scipy/scipy/blob/v1.5.4/scipy/stats/_continuous_distns.pyL5924-L5931)#                                gamma((df+1)/2)
    # t.pdf(x, df) = ---------------------------------------------------
    #                sqrt(pi*df) * gamma(df/2) * (1+x**2/df)**((df+1)/2)
    def _t_pdf(x, df=10., loc=0., scale=1.):
        t = np.asarray(((x-loc)/scale)*1.0)
        v = np.asarray(df*1.0)
        Px = np.exp(sc.gammaln((v+1)/2) - sc.gammaln(v/2))
        Px /= np.sqrt(v*np.pi) * (1+(t**2)/v)**((v+1)/2)
        return Px
    p = _t_pdf(x, df=p_df, loc=p_loc, scale=p_scale)
    q = _t_pdf(x, df=q_df, loc=q_loc, scale=q_scale)
    
    plt.title('KL(P||Q) = %1.3f' % _kl_divergence(p, q))
    plt.plot(x, p)
    plt.plot(x, q, c='red')
    plt.show();***2.1c) Scipy Student-t log PDF function***log_p = t.logpdf(x, df=p_df, loc=p_loc, scale=p_scale)
    log_q = t.logpdf(x, df=q_df, loc=q_loc, scale=q_scale)
    
    # I guess you cannot put log_p and log_q into KL Divergence right???
    plt.title('KL(P||Q) = %1.3f' % _kl_divergence(p, q))
    plt.plot(x, log_p)
    plt.plot(x, log_q, c='red')
    plt.show();***2.1d) Scipy Student-t log PDF function (rewritten here)*** Source: [Scipy t log pdf](https://github.com/scipy/scipy/blob/v1.5.4/scipy/stats/_continuous_distns.pyL5933-L5937)def _t_logpdf(x, df=10., loc=0., scale=1.):
        t = ((x-loc)/scale)*1.0
        v = df*1.0
        lPx = sc.gammaln((v+1)/2)-sc.gammaln(v/2)
        lPx -= 0.5*np.log(v*np.pi) + (v+1)/2*np.log(1+(t**2)/v)
        return lPx
    log_p = _t_logpdf(x, loc=p_loc, scale=p_scale)
    log_q = _t_logpdf(x, loc=q_loc, scale=q_scale)
    
    plt.title('KL(P||Q) = %1.3f' % _kl_divergence(p, q))
    plt.plot(x, log_p)
    plt.plot(x, log_q, c='red')
    plt.show();***2.2a) TFP Student-t PDF function*** tfp currently does not have an in-house Student-T function. Therefore, we have to create it ourselves...p_dist = tfd.StudentT(df=p_df, loc=p_loc, scale=p_scale)
    q_dist = tfd.StudentT(df=q_df, loc=q_loc, scale=q_scale)
    p = p_dist.prob(x)
    q = q_dist.prob(x)
    
    # plt.title('KL(P||Q) = %1.3f' % p_dist.kl_divergence(q_dist))
    plt.title('No KL(P||Q)')
    plt.plot(x, p)
    plt.plot(x, q, c='red')
    plt.show();***2.2b) TFP Student-t PDF and KL functions (rewritten here)***def _t_log_prob(x, df, loc, scale):
      """
      Compute log probability of Student T distribution.
      Note that scale can be negative.
      Args:
        x: Floating-point `Tensor`. Where to compute the log probabilities.
        df: Floating-point `Tensor`. The degrees of freedom of the
          distribution(s). `df` must contain only positive values.
        loc: Floating-point `Tensor`; the location(s) of the distribution(s).
        scale: Floating-point `Tensor`; the scale(s) of the distribution(s).
      Returns:
        A `Tensor` with shape broadcast according to the arguments.
      """
      # Writing `y` this way reduces XLA mem copies.
      y = (x - loc) * (tf.math.rsqrt(df) / scale)
      log_unnormalized_prob = -0.5 * (df + 1.) * log1psquare(y)
      log_normalization = (
          tf.math.log(tf.abs(scale)) + 0.5 * tf.math.log(df) +
          0.5 * np.log(np.pi) + tfp_math.log_gamma_difference(0.5, 0.5 * df))
      return log_unnormalized_prob - log_normalization
    
    def _t_prob(x, df, loc, scale):
        return tf.exp(_t_log_prob(x, df, loc, scale))
    
    '''
    # TFP only has _log_prob written, thus _prob is the exp of that
    def _log_prob(x, loc=0., scale=1.):
        scale = tf.convert_to_tensor(scale)
        log_unnormalized = -0.5 * tf.math.squared_difference(
            x / scale, loc / scale)
        log_normalization = tf.constant(
            0.5 * np.log(2. * np.pi), dtype='float32') + tf.math.log(scale)
        return log_unnormalized - log_normalization
    
    def _prob(x, loc=0., scale=1.):
        return tf.exp(_log_prob(x, loc, scale))
    '''
    def _kl_student_student(a, b, name=None):
        with tf.name_scope(name or 'kl_student_student'):
            b_scale = tf.convert_to_tensor(b.scale)  # We'll read it thrice.
            diff_log_scale = tf.math.log(a.scale) - tf.math.log(b_scale)
            return (
                0.5 * tf.math.squared_difference(a.loc / b_scale, b.loc / b_scale) +
                0.5 * tf.math.expm1(2. * diff_log_scale) -
                diff_log_scale)
    
    '''
    def _kl_normal_normal(a, b, name=None):
      """Calculate the batched KL divergence KL(a || b) with a and b Normal.
      Args:
        a: instance of a Normal distribution object.
        b: instance of a Normal distribution object.
        name: Name to use for created operations.
          Default value: `None` (i.e., `'kl_normal_normal'`).
      Returns:
        kl_div: Batchwise KL(a || b)
      """
      with tf.name_scope(name or 'kl_normal_normal'):
        b_scale = tf.convert_to_tensor(b.scale)  # We'll read it thrice.
        diff_log_scale = tf.math.log(a.scale) - tf.math.log(b_scale)
        return (
            0.5 * tf.math.squared_difference(a.loc / b_scale, b.loc / b_scale) +
            0.5 * tf.math.expm1(2. * diff_log_scale) -
            diff_log_scale)
    '''
    p_dist = tfd.StudentT(df=p_df, loc=p_loc, scale=p_scale)
    q_dist = tfd.StudentT(df=q_df, loc=q_loc, scale=q_scale)
    p = _t_prob(x, df=p_df, loc=p_loc, scale=p_scale)
    q = _t_prob(x, df=p_df, loc=q_loc, scale=q_scale)
    
    # plt.title('KL(P||Q) = %1.3f' % p_dist.kl_divergence(q_dist))
    plt.title('KL(P||Q) = %1.3f' % _kl_student_student(p_dist, q_dist))
    plt.plot(x, p)
    plt.plot(x, q, c='red')
    plt.show();
    
    '''
    p_dist = tfd.Normal(loc=p_loc, scale=p_scale)
    q_dist = tfd.Normal(loc=q_loc, scale=q_scale)
    p = _prob(x, loc=p_loc, scale=p_scale)
    q = _prob(x, loc=q_loc, scale=q_scale)
    
    plt.title('KL(P||Q) = %1.3f' % _kl_normal_normal(p_dist, q_dist))
    plt.plot(x, p)
    plt.plot(x, q, c='red')
    plt.show();
    '''***2.2c) TFP Student-t log PDF function***p_dist = tfd.StudentT(df=p_df, loc=p_loc, scale=p_scale)
    q_dist = tfd.StudentT(df=q_df, loc=q_loc, scale=q_scale)
    p = p_dist.log_prob(x)
    q = q_dist.log_prob(x)
    
    # plt.title('KL(P||Q) = %1.3f' % p_dist.kl_divergence(q_dist))
    plt.title('No KL(P||Q)')
    plt.plot(x, p)
    plt.plot(x, q, c='red')
    plt.show();***2.2d) TFP Student-t log PDF (rewritten here)***'''
    p_dist = tfd.Normal(loc=p_loc, scale=p_scale)
    q_dist = tfd.Normal(loc=q_loc, scale=q_scale)
    p = _log_prob(x, loc=p_loc, scale=p_scale)
    q = _log_prob(x, loc=q_loc, scale=q_scale)
    
    plt.title('KL(P||Q) = %1.3f' % _kl_normal_normal(p_dist, q_dist))
    plt.plot(x, p)
    plt.plot(x, q, c='red')
    plt.show();
    '''**3) CoupledNormal CoupledNormal**# TO-DO**4) MultivariateCoupledNormal MultivariateCoupledNormal**# TO-DOAnalysis of transcriptomic changes during Mz19 development: GO terms (Figure 6)from __future__ import division
    import sys
    import random
    import copy
    import math
    import json
    
    import numpy as np
    import pandas as pd
    import scipy
    
    %matplotlib inline
    from matplotlib import pyplot as plt
    import matplotlib as mpl
    
    import seaborn as sns
    
    sys.path.append("../resources/")
    import sct
    reload(sct)
    
    sns.set_style("ticks")
    sns.set_context("talk")
    
    output_dir = "out/"
    output_suffix = ""
    output_formats = [".png", ".pdf"]
    
    def save_figure(fig, name):
        for output_format in output_formats:
            fig.savefig(output_dir + "/" + name + output_suffix + output_format)
        return None
    
    mpl.rc('savefig', dpi=300)
    
    pd.options.mode.chained_assignment = None  # default='warn'Load GO resultdef load_Flymine(infile):
        terms = []
        pvalues = []
        num_genes = []
        with open(infile) as f:
            for line in f:
                term = line.rstrip().split("\t")[0].decode('utf-8')
                pvalue = float(line.rstrip().split("\t")[1])
                n = len(line.rstrip().split("\t")[2].split(","))
                terms.append(term)
                pvalues.append(pvalue)
                num_genes.append(n)
        df = pd.DataFrame({"term":terms, "pvalue": pvalues, "num_genes": num_genes})
        df.sort_values("pvalue", inplace=True, ascending=True)
        return df
    GO_24hAPF = load_Flymine("../data/GO_Flymine_genes_DE_24hAPF_adult_24hAPFHigh.tsv")
    GO_adult = load_Flymine("../data/GO_Flymine_genes_DE_24hAPF_adult_adultHigh.tsv")
    # hack to fix unicode issue
    GO_24hAPF.term.loc[0] = "cell development"
    GO_adult.term.loc[0] = "ATP metabolic process"
    # Check number of terms with P value < threshold
    GO_24hAPF.loc[GO_24hAPF["pvalue"] < 1e-5].shapePlot P values and number of genes of top hits# Note that these plots have not been pruned using a tool to cut the GO ontology at a particular level
    # Thus, the plots are different than those in the paper.
    num_hits = 30
    # P values, 24h
    myData = GO_24hAPF.head(n=num_hits)[::-1]
    x = np.array(range(myData.shape[0]))
    y = -np.log10(myData["pvalue"])
    xticklabels = list(myData["term"])
    
    fig, ax = plt.subplots(1, 1, figsize=(3,8))
    ax.barh(x, y)
    ax.set_yticks(x+0.5)
    ax.set_yticklabels(xticklabels)
    ax.set_xlabel("-log10(P value)")
    # P values, adult
    myData = GO_adult.head(n=num_hits)[::-1]
    x = np.array(range(myData.shape[0]))
    y = -np.log10(myData["pvalue"])
    xticklabels = list(myData["term"])
    
    fig, ax = plt.subplots(1, 1, figsize=(3,8))
    ax.barh(x, y)
    ax.set_yticks(x+0.5)
    ax.set_yticklabels(xticklabels)
    ax.set_xlabel("-log10(P value)")
    # number of genes, 24h
    myData = GO_24hAPF.head(n=num_hits)[::-1]
    x = np.array(range(myData.shape[0]))
    y = myData["num_genes"]
    xticklabels = list(myData["term"])
    
    fig, ax = plt.subplots(1, 1, figsize=(3,8))
    ax.barh(x, y)
    ax.set_yticks(x+0.5)
    ax.set_yticklabels(xticklabels)
    ax.set_xlabel("Genes")
    # number of genes, adult
    myData = GO_adult.head(n=num_hits)[::-1]
    x = np.array(range(myData.shape[0]))
    y = myData["num_genes"]
    xticklabels = list(myData["term"])
    
    fig, ax = plt.subplots(1, 1, figsize=(3,8))
    ax.barh(x, y)
    ax.set_yticks(x+0.5)
    ax.set_yticklabels(xticklabels)
    ax.set_xlabel("Genes")패키지 불러오기import tensorflow as tf
    import numpy as np
    import os
    
    from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
    import matplotlib.pyplot as plt
    
    from preprocess import *시각화 함수def plot_graphs(history, string):
        plt.plot(history.history[string])
        plt.plot(history.history['val_'+string], '')
        plt.xlabel("Epochs")
        plt.ylabel(string)
        plt.legend([string, 'val_'+string])
        plt.show()학습 데이터 경로 정의DATA_IN_PATH = './data_in/'
    DATA_OUT_PATH = './data_out/'
    TRAIN_INPUTS = 'train_inputs.npy'
    TRAIN_OUTPUTS = 'train_outputs.npy'
    TRAIN_TARGETS = 'train_targets.npy'
    DATA_CONFIGS = 'data_configs.json'랜덤 시드 고정SEED_NUM = 1234
    tf.random.set_seed(SEED_NUM)파일 로드index_inputs = np.load(open(DATA_IN_PATH + TRAIN_INPUTS, 'rb'))
    index_outputs = np.load(open(DATA_IN_PATH + TRAIN_OUTPUTS , 'rb'))
    index_targets = np.load(open(DATA_IN_PATH + TRAIN_TARGETS , 'rb'))
    prepro_configs = json.load(open(DATA_IN_PATH + DATA_CONFIGS, 'r'))
    # Show length
    print(len(index_inputs),  len(index_outputs), len(index_targets))20 20 20모델 만들기에 필요한 값 선언MODEL_NAME = 'seq2seq_kor'
    BATCH_SIZE = 2
    MAX_SEQUENCE = 25
    EPOCH = 30
    UNITS = 1024
    EMBEDDING_DIM = 256
    VALIDATION_SPLIT = 0.1 
    
    char2idx = prepro_configs['char2idx']
    idx2char = prepro_configs['idx2char']
    std_index = prepro_configs['std_symbol']
    end_index = prepro_configs['end_symbol']
    vocab_size = prepro_configs['vocab_size']모델   인코더class Encoder(tf.keras.layers.Layer):
        def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz):
            super(Encoder, self).__init__()
            self.batch_sz = batch_sz
            self.enc_units = enc_units
            self.vocab_size = vocab_size 
            self.embedding_dim = embedding_dim          
            
            self.embedding = tf.keras.layers.Embedding(self.vocab_size, self.embedding_dim)
            self.gru = tf.keras.layers.GRU(self.enc_units,
                                           return_sequences=True,
                                           return_state=True,
                                           recurrent_initializer='glorot_uniform')
    
        def call(self, x, hidden):
            x = self.embedding(x)
            output, state = self.gru(x, initial_state = hidden)
            return output, state
    
        def initialize_hidden_state(self, inp):
            return tf.zeros((tf.shape(inp)[0], self.enc_units))어텐션class BahdanauAttention(tf.keras.layers.Layer):
        def __init__(self, units):
            super(BahdanauAttention, self).__init__()
            self.W1 = tf.keras.layers.Dense(units)
            self.W2 = tf.keras.layers.Dense(units)
            self.V = tf.keras.layers.Dense(1)
    
        def call(self, query, values):
            hidden_with_time_axis = tf.expand_dims(query, 1)
    
            score = self.V(tf.nn.tanh(
                self.W1(values) + self.W2(hidden_with_time_axis)))
    
            attention_weights = tf.nn.softmax(score, axis=1)
    
            context_vector = attention_weights * values
            context_vector = tf.reduce_sum(context_vector, axis=1)
    
            return context_vector, attention_weights디코더class Decoder(tf.keras.layers.Layer):
        def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz):
            super(Decoder, self).__init__()
            
            self.batch_sz = batch_sz
            self.dec_units = dec_units
            self.vocab_size = vocab_size 
            self.embedding_dim = embedding_dim  
            
            self.embedding = tf.keras.layers.Embedding(self.vocab_size, self.embedding_dim)
            self.gru = tf.keras.layers.GRU(self.dec_units,
                                           return_sequences=True,
                                           return_state=True,
                                           recurrent_initializer='glorot_uniform')
            self.fc = tf.keras.layers.Dense(self.vocab_size)
    
            self.attention = BahdanauAttention(self.dec_units)
            
        def call(self, x, hidden, enc_output):
            context_vector, attention_weights = self.attention(hidden, enc_output)
    
            x = self.embedding(x)
    
            x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
    
            output, state = self.gru(x)
            output = tf.reshape(output, (-1, output.shape[2]))
                
            x = self.fc(output)
            
            return x, state, attention_weights
    optimizer = tf.keras.optimizers.Adam()
    
    loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none')
    
    train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy')
    
    def loss(real, pred):
        mask = tf.math.logical_not(tf.math.equal(real, 0))
        loss_ = loss_object(real, pred)
        mask = tf.cast(mask, dtype=loss_.dtype)
        loss_ *= mask
        return tf.reduce_mean(loss_)
    
    def accuracy(real, pred):
        mask = tf.math.logical_not(tf.math.equal(real, 0))
        mask = tf.expand_dims(tf.cast(mask, dtype=pred.dtype), axis=-1)
        pred *= mask    
        acc = train_accuracy(real, pred)
    
        return tf.reduce_mean(acc)시퀀스 투 스퀀스 모델class seq2seq(tf.keras.Model):
        def __init__(self, vocab_size, embedding_dim, enc_units, dec_units, batch_sz, end_token_idx=2):    
            super(seq2seq, self).__init__()
            self.end_token_idx = end_token_idx
            self.encoder = Encoder(vocab_size, embedding_dim, enc_units, batch_sz) 
            self.decoder = Decoder(vocab_size, embedding_dim, dec_units, batch_sz) 
    
        def call(self, x):
            inp, tar = x
            
            enc_hidden = self.encoder.initialize_hidden_state(inp)
            enc_output, enc_hidden = self.encoder(inp, enc_hidden)
    
            dec_hidden = enc_hidden
    
            predict_tokens = list()
            for t in range(0, tar.shape[1]):
                dec_input = tf.dtypes.cast(tf.expand_dims(tar[:, t], 1), tf.float32) 
                predictions, dec_hidden, _ = self.decoder(dec_input, dec_hidden, enc_output)
                predict_tokens.append(tf.dtypes.cast(predictions, tf.float32))   
            return tf.stack(predict_tokens, axis=1)
        
        def inference(self, x):
            inp  = x
    
            enc_hidden = self.encoder.initialize_hidden_state(inp)
            enc_output, enc_hidden = self.encoder(inp, enc_hidden)
    
            dec_hidden = enc_hidden
            
            dec_input = tf.expand_dims([char2idx[std_index]], 1)
            
            predict_tokens = list()
            for t in range(0, MAX_SEQUENCE):
                predictions, dec_hidden, _ = self.decoder(dec_input, dec_hidden, enc_output)
                predict_token = tf.argmax(predictions[0])
                
                if predict_token == self.end_token_idx:
                    break
                
                predict_tokens.append(predict_token)
                dec_input = tf.dtypes.cast(tf.expand_dims([predict_token], 0), tf.float32)   
                
            return tf.stack(predict_tokens, axis=0).numpy()
    model = seq2seq(vocab_size, EMBEDDING_DIM, UNITS, UNITS, BATCH_SIZE, char2idx[end_index])
    model.compile(loss=loss, optimizer=tf.keras.optimizers.Adam(1e-3), metrics=[accuracy])
    #model.run_eagerly = True학습 진행PATH = DATA_OUT_PATH + MODEL_NAME
    if not(os.path.isdir(PATH)):
            os.makedirs(os.path.join(PATH))
            
    checkpoint_path = DATA_OUT_PATH + MODEL_NAME + '/weights.h5'
        
    cp_callback = ModelCheckpoint(
        checkpoint_path, monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=True)
    
    earlystop_callback = EarlyStopping(monitor='val_accuracy', min_delta=0.0001, patience=10)
    
    history = model.fit([index_inputs, index_outputs], index_targets,
                        batch_size=BATCH_SIZE, epochs=EPOCH,
                        validation_split=VALIDATION_SPLIT, callbacks=[earlystop_callback, cp_callback])Epoch 1/30
    9/9 [==============================] - ETA: 0s - loss: 0.8365 - accuracy: 0.8360
    Epoch 00001: val_accuracy improved from -inf to 0.85400, saving model to ./data_out/seq2seq_kor/weights.h5
    9/9 [==============================] - 5s 599ms/step - loss: 0.8365 - accuracy: 0.8360 - val_loss: 0.6929 - val_accuracy: 0.8540
    Epoch 2/30
    9/9 [==============================] - ETA: 0s - loss: 0.7576 - accuracy: 0.8527
    Epoch 00002: val_accuracy improved from 0.85400 to 0.85600, saving model to ./data_out/seq2seq_kor/weights.h5
    9/9 [==============================] - 1s 91ms/step - loss: 0.7576 - accuracy: 0.8527 - val_loss: 0.6273 - val_accuracy: 0.8560
    Epoch 3/30
    9/9 [==============================] - ETA: 0s - loss: 0.6806 - accuracy: 0.8572
    Epoch 00003: val_accuracy improved from 0.85600 to 0.85667, saving model to ./data_out/seq2seq_kor/weights.h5
    9/9 [==============================] - 1s 70ms/step - loss: 0.6806 - accuracy: 0.8572 - val_loss: 0.5928 - val_accuracy: 0.8567
    Epoch 4/30
    9[...]결과 플롯plot_graphs(history, 'accuracy')
    plot_graphs(history, 'loss')결과 확인SAVE_FILE_NM = "weights.h5"
    model.load_weights(os.path.join(DATA_OUT_PATH, MODEL_NAME, SAVE_FILE_NM))
    query = "남자친구 승진 선물로 뭐가 좋을까?"
    
    test_index_inputs, _ = enc_processing([query], char2idx)    
    predict_tokens = model.inference(test_index_inputs)
    print(predict_tokens)
    
    print(' '.join([idx2char[str(t)] for t in predict_tokens]))[83 79 98 97 21 56]
    평소에 필요했던 게 좋을 것 생각해보세요Mission to Mars - web scrapingfrom splinter import Browser
    from bs4 import BeautifulSoup
    import pandas as pd
    import requests
    import time
    # Connect to the Chrome browser
    
    executable_path = {"executable_path": "C:/Users/cindy/Downloads/chromedriver_win32/chromedriver.exe"}
    browser = Browser("chrome", **executable_path, headless=False)NASA Mars Newsurl = "https://mars.nasa.gov/news/"
    browser.visit(url)
    html = browser.html
    
    soup = BeautifulSoup(html, "html.parser")
    # print(soup.prettify())
    
    # find most recent news title from mars.nasa.gov
    
    news_title = soup.find_all('div', class_='content_title')[1].text
    print(news_title)
    # find the paragraph for the most recent news title from mars.nasa.gov
    
    news_p = soup.find('div', class_='article_teaser_body').text    
    print(news_p)Like much of the rest of the world, the Mars rover team is pushing forward with its mission-critical work while putting the health and safety of their colleagues and community first.JPL Mars Space Images - Featured Imageurl = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
    browser.visit(url)
    html = browser.html
    
    # click the button to get to the page with the full image, wait 5 seconds for the page to load and click next button
    
    browser.click_link_by_partial_text('FULL IMAGE')
    time.sleep(5)
    
    browser.click_link_by_partial_text('more info')
    html = browser.html
    soup_jpl = BeautifulSoup(html, "html.parser")
    # print(soup_jpl.prettify())
    featured_url_image1 = soup_jpl.find('figure', class_='lede')
    # print(featured_url_image1)
    featured_url_image2 = featured_url_image1.find('a')['href']
    
    featured_image_url = f'https://www.jpl.nasa.gov{featured_url_image2}'
    print(featured_image_url)https://www.jpl.nasa.gov/spaceimages/images/largesize/PIA20465_hires.jpgMars Weather twitter account#  Added in a sleep timer to allow all the data to load
    
    url = "https://twitter.com/marswxreport?lang=en"
    browser.visit(url)
    
    time.sleep(10)
    
    html = browser.html
    soup_twitter = BeautifulSoup(html, "html.parser")
    # print(soup_twitter.prettify())
    # re is Python 'regular expression' programming language embedded in Python to perform matching.
    import re
    pattern = re.compile(r'sol')
    
    mars_weather = soup_twitter.find('span', text=pattern).text
    print(mars_weather)InSight sol 501 (2020-04-24) low -93.5ºC (-136.2ºF) high -4.3ºC (24.3ºF)
    winds from the SW at 5.0 m/s (11.3 mph) gusting to 15.8 m/s (35.3 mph)
    pressure at 6.70 hPaMars Facts# This will read HTML tables into a list of dataframe objects
    
    url = "https://space-facts.com/mars/"
    mars_list = pd.read_html(url)
    mars_list2 = mars_list[2]
    mars_list2
    # Rename the columns
    mars_list2.columns = ['Feature', 'Value']
    print(mars_list2)
    # put the data back into html format with html tags
    mars_list2 = mars_list2.set_index('Feature')
    mars_facts_html = mars_list2.to_html(classes='table table-bordered')
    mars_facts_htmlMars Hemispheres#  This will scrape Mars 4 hemispheres and put their name/url image link into a dictionary
    
    url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
    browser.visit(url)
    
    time.sleep(10)
    
    html = browser.html
    soup_hemi = BeautifulSoup(html, "html.parser")
    #print(soup_hemi.prettify())
    hemi_list = soup_hemi.find('div', class_='collapsible results')
    #print(hemi_list.prettify)
    hemi_list2 = hemi_list.find_all('div', class_='item')
    # print(hemi_list2)
    # loop through the HTML to find the 4 hemispheres and the URL for the image.  Put into a dictionary.
    
    hemi_dict = []
    
    for x in hemi_list2:
        title = x.find("h3").text
        print (title)
        title = title.replace("Enhanced", "")
        link = x.find("a")["href"]
        img_link = "https://astrogeology.usgs.gov" + link
        print (img_link)
        browser.visit(img_link)
        time.sleep(5)
        html = browser.html
        soup = BeautifulSoup(html, "html.parser")
        image = soup.find('div', class_='downloads')
        image2 = image.find("a")["href"]
        hemi_dict.append({"title": title, "img url": image2})
    
    hemi_dictNumPy, SciPy and dynamic relaxation IntroductionIn today's session we will focus on more advanced uses of scientific and computing libraries in Python. The main exercise will allow you to deform a mesh into a relaxed position, given an inverted gravitational force.  Aknowledgements*Dynamic relaxation* exercise was originally written by  and , *NumPy/SciPy introduction* by . Last edit on 09/2020Thanks to Ir.  for introducing the following reference:* ., , , and  (2014). Shell Structures for Architecture: Form Finding and Optimization. New York: Routledge. * Chapter 2: Review of Dynamic Relaxation with an extension to six degrees of freedom theory by .  NumPy[Numpy](https://numpy.org/doc/1.17/reference/index.html) ([the user guide](https://numpy.org/doc/1.17/user/index.htmluser)) is the core library for scientific computing in Python. It provides very fast code for linear algebra operations, effectively replacing Matlab. Numpy is a **core Python library** upon which many others are built on (e.g. Pandas for data engineering, SciKit-learn for machine learning, SciPy for advance linear algebra). So knowing the basics of Numpy will help you to faster understand the higher-level libraries and adopt solutions (look up Stack Overflow: as long as you know how to Google the solution will be there...)![image.png](attachment:image.png) How does Numpy work?The base component in Numpy is an array. In short it works like an algebraic matrix: it can have one or more dimensions. A numpy array is a grid of values, **all of the same type**. It smartly stores data in one place in the computer memory, unlike other Python array-like objects. Those are scattered in memory and the code just has links to those locations. It's easy to use, but performance suffers a lot. SciPy : TODOSciPy is an open-source Python library used for scientific and technical computing. It has several sub-packages, such as on covering constants, spatial algorithms (e.g. distance, nearest neighbor fuctions), linear algebra methods and many more. It is built on top of NumPy, making it not only a useful, but also a powerful tool. More about it [here](https://www.tutorialspoint.com/scipy/index.htm).This library will not be used in the following exercise, but it will prove very useful if you need to do more complex linear algebra routines, such as integration. Let's create a NumPy arrayimport numpy as np
    
    # Create a rank 1 array (1D matrix)
    a = np.array([1, 2, 3])   
    
    print(a)        
    print(type(a))            
    print(a.shape)            
    print(a[0], a[1], a[2]) 
    a[0] = 5
    print(a)Arrays can be made in many other ways and remembering this functionality (or printing out a cheat sheet) will save you precious coding time. Also, you are not limited to 2 dimensions: all examples below can be extended by 1 dimension by adding an additional value after the comma.# Creates an array of all zeros
           
                          
    # Creates an array of all ones
                 
    
    # Creates an array of all of a specific number (5 in this case)
          
    # Creates an identity matrix of a specified size (3 in this case)
                
                          
    # Creates a 2x3 array filled with random values in [0,5)Exercise 1Create an identity matrix of size 3, then change all the ones to a different value each.array = np.???Performance exampleTo compare the performance of pure Python and Numpy, follow the pseudocode instructions in the next cells.my_list = []
    # Populate my_list, such that it contains 10**6 lists of [2,2,2]
    ???
    def python_test(my_list):
        # TODO: multiply each number in my_list by 3
    
    # this is called a magic command, check out: 
    # https://ipython.readthedocs.io/en/stable/interactive/magics.html
    %time python_test(my_list)        
    # TODO: create an array of 3x10**6 containing only 2s
    array = ???
    def numpy_test(array):
        # TODO: multiply each number in the array by 3
       
        
    %time numpy_test(array)Wall time: 12 msDynamic relaxationThis exercise consists of 3 main steps:1. Reading in the data and defining the initial state of the mesh2. Relaxing the mesh3. Plotting the results# For step 2
    from numpy import array, zeros, float64, append, sqrt, sign
    
    # For step 3
    import matplotlib
    import matplotlib.pyplot as plt
    from mpl_toolkits.mplot3d import Axes3D
    
    # necessary only for the Jupyter notebook
    %matplotlib notebook1. Reading in the data and defining the initial state of the meshDuring this step we need to read in the data, parse it to something we can manipulate, pre-process the geometeries and pass the results/ initial state of the mesh to the next process. The expected outputs of this step are:* points : List of points* edges : List of edges* vertices : List of vertices* xyzIS : Initial state of the system* bbmax, bbmin : Bounding box of the mesh-------The only input for the first step of the exercise in the location of the mesh file (.obj) you received together with this notebook.To begin with, let's locate the file and open it:filename = 'BasicRhinoOutput.obj' # TODO: fill in the name of the file and potentially the location
    #filename=filename.replace('\','//')
    # opening the file
    
    def open_obj_file(filename):
        with open(filename, 'r') as myfile:
            return myfile.read().split('\n')What is inside of data? (for a more elaborate description of the format, check out this [link](http://paulbourke.net/dataformats/obj/))data = open_obj_file(filename)
    dataHow do we make this readable?def process_obj_file(data):
        # initiating the output
        xyz = []  # xyz coordinates
        points = []  # references to vertices
        lines = []  # pairs of references to vertices
        
        # parsing the obj file, filling the data lists
        for line in data:
            # split the string line by ' '
            parts = line.split(' ')
    
            # TODO: this seems unnecessary
            # checking if it contains data 
            if not parts:
                continue
    
            # assigning the 1st element to head and the rest to tail 
            head = parts[0]
            tail = parts[1:]
    
            # appending vertex cordinations in (xyz) and assigning an index to them in (points)
            if head == 'v':
                ftail = [float(x) for x in tail]
                xyz.append(ftail)
                points.append(len(xyz)-1)
            # iterating through the edges of the faces and appending them in (lines)
            elif head == 'f':
                ftail = [float(x) for x in tail]
                for i in range(len(ftail)):
                    sp = ftail[i%len(ftail)]
                    ep = ftail[(i+1)%len(ftail)]
                    lines.append((int(sp)-1, int(ep)-1))
        return xyz, points, lines
    xyz, points, lines = process_obj_file(data)
    xyzHow do we preprocess the data?def mesh_welding(xyz):
        
        # This is how we store the welded points
        vertices = []
        
        # initiating the bounding box max and min point
        bbmax = [0,0,0]
        bbmin = [0,0,0]
    
        # points to vertices (welding similar vertices)
        x2v = {}
        tol = 0.001 ** 2
    
        for i, x in enumerate(xyz):
            found = False
            for j, v in enumerate(vertices):
                if (x[0] - v[0]) ** 2 < tol \
                        and (x[1] - v[1]) ** 2 < tol \
                        and (x[2] - v[2]) ** 2 < tol:
                    found = True
                    x2v[i] = j
                    break
            if not found:
                x2v[i] = len(vertices)
                vertices.append(x)
    
            #finding the bounding box
            for i in range(3):
                if x[i] > bbmax[i]: 
                    bbmax[i] = x[i]
                if x[i] < bbmin[i]: 
                    bbmin[i] = x[i]
        return bbmin, bbmax, vertices, x2v
    bbmin, bbmax, vertices, x2v = mesh_welding(xyz)
    verticesAnything we forgot?# redefining the indexes: remapping the points to new, velded locations
    points[:] = [x2v[index] for index in points]
    
    # redefining the edges by the refined indexes
    edges = [(x2v[u[0]], x2v[u[1]]) for u in lines]
    
    # reserving the Initial State of the system
    import copy
    xyzIS = copy.deepcopy(vertices)How does the initial state look like?xyzIS == verticesDynamic relaxation Input data:* vertices: a list of unique xyz coordinates* edges: a list of pairs of vertex indices* points: index list of fixed vertices* bbmax, bbmin : Bounding box coordinate list Output data:* xyzFS : Final state of the system# fixed points here are set to be the boundary of the rectangle
    fixed = []
    for i, pnt in enumerate(iter(xyz)):
        # check if the points are no closer to 
        if abs(pnt[0] - bbmin[0]) < 0.01  \
            or abs(pnt[0] - bbmax[0]) < 0.01 \
            or abs(pnt[1] - bbmin[1]) < 0.01 \
            or abs(pnt[1] - bbmax[1]) < 0.01:
            fixed.append(i)
    no_vert = len(vertices)
    no_ed = len(edges)
    
    freePoints = list(set(range(no_vert)) - set(fixed))
    
    xyz = array(vertices)
    
    # p : Applied Load Component which in our case is zero
    p = ??? # create an NumPy array of zeroes of shape no_vert x 3 of type float64
    
    # W : Gravity Force
    W = ??? # Create a numpy array, that represents the gravity force
    
    # v : Velosity
    v = zeros((no_vert, 3), dtype=float64)
    
    
    # restlength : Rest Length of the springs
    diff = array([(xyz[edges[i][1]] - xyz[edges[i][0]]) for i in range(no_ed)])
    
    restlength = array([sqrt(diff[i,0]*diff[i,0] + diff[i,1]*diff[i,1] + diff[i,2]*diff[i,2]) for i in range(no_ed)])We need to make some assumptions, while making this code...# adamp : Acceleration damp controls the stability of the system
    adamp = 10
    IterationMax = 100
    
    # K : Elasticity Constant in Hooke's law
    K = 20
    
    # dt : Time intervals
    interval = 0.1
    mass = 1
    # this is a loop, in which we progress through time
    for k in range(IterationMax):
        
        # S : Stiffness Force
        S = ??? # TODO: create an NumPy array of zeroes of shape no_vert x 3 of type float64
        # R : Final Forces
        R = ??? # TODO: create an NumPy array of zeroes of shape no_vert x 3 of type float64
            
        # xyz0 : Initial Position
        xyz0 = xyz.copy()
        # vp : Initial Velocity
        vp = v.copy()
        
        # calculating the length of the edges
        di = array([(xyz[edges[i][1]] - xyz[edges[i][0]]) for i in range(no_ed)])
        dist = array([sqrt(di[i,0]*di[i,0] + di[i,1]*di[i,1] + di[i,2]*di[i,2]) for i in range(no_ed)])
    
        # Hooke's law : Force = length difference * Elasticity Constant (K)
        EdgeForce = ??? # TODO: make the formula above
        # Edge Force : Decomposing the existing force in each edge into the 3 dimensions 
        EdgeForceS = zeros((no_ed, 3), dtype=float64)
    
        # iterating throughout the edges
        for i in range(no_ed):
            for j in range(3):
                # Separating the forces into X, Y, Z dimensions
                EdgeForceS[i,j] = (di[i,j] / dist[i]) * EdgeForce[i]
    
            # Adding the force of each edge to the coresponding nodes
            S[edges[i][0]] -= EdgeForceS[i]
            S[edges[i][1]] += EdgeForceS[i]
        
        R[freePoints] = p[freePoints] + W[freePoints] - S[freePoints]
        
        A = 1/(1 + adamp * interval * 0.5)
        B = (1 - adamp * interval * 0.5 ) / (1 + adamp * interval * 0.5)
        
        # updating the velocity
        v[freePoints] = A * interval * R[freePoints] / mass + B * vp[freePoints]
        # updating the position
        xyz[freePoints] = xyz0[freePoints] + interval * v[freePoints]
    
    
    xyzFS = xyz3. PlotterThe last step of the process uses Matplotlib library used in the previous workshop. In this step we will create a visualization for the initial state and/or final state of the system. Our inputs are:* xyzIS : initial state of the system* xyzFS : final state of the system* *Optional* bbmax, bbmin : Bounding box coordinate listcoordinates = xyzFS
    
    # initiating the plotter
    fig = plt.figure()
    ax = fig.gca(projection='3d')
    
    # plotting edges
    for u, v in edges:
        ax.plot([coordinates[u][0], coordinates[v][0]],
                [coordinates[u][1], coordinates[v][1]],
                [coordinates[u][2], coordinates[v][2]], color='k')
    
    # plotting points
    for i in points:
        ax.scatter(coordinates[i][0], coordinates[i][1], coordinates[i][2],
                   color='r', s=4)
        
    # This command changes the scale of the axes
    ax.scatter([bbmin[0], bbmax[0]], [bbmin[1], bbmax[1]], [bbmin[2], bbmax[2]+10], color='w', s=1)
    
    # These are needed for the 
    # ax.set_xlim([bbmin[0], bbmax[0]])
    # ax.set_ylim([bbmin[1], bbmax[1]])
    # ax.set_zlim([bbmin[2], bbmax[2]+10])
    
    plt.show()A library that works a bit faster ...[Plotly](https://plot.ly/python/) is another open source library for Python, that works significantly faster than Matplotlib on Jupyter notebooks. Also, it's a bit easier to use: see for yourself below.import plotly.graph_objects as go
    import numpy as np
    
    # Here we extract the mesh coordinates from the final state NumPy array
    
    # Initial state data is still in list format, so we first convert it to a NumPy array
    
    # More complex example of Plotly plot: lines and dots셀레니움을 이용한 네이버 블로그 크롤러import platform
    print(platform.architecture())
    !python --version
    pwd
    # 네이버에서 검색어 입력받아 검색 한 후 블로그 메뉴를 선택하고
    # 오른쪽에 있는 검색옵션 버튼을 눌러서
    # 정렬 방식과 기간을 입력하기
    
    #Step 0. 필요한 모듈과 라이브러리를 로딩하고 검색어를 입력 받습니다.
    import sys
    import os
    import pandas as pd
    import numpy as np
    import math
    
    from bs4 import BeautifulSoup
    import requests
    import urllib.request as req
    
    from selenium import webdriver
    from selenium.webdriver.common.keys import Keys
    import time
    import tqdm
    from tqdm.notebook import tqdm
    query_txt = '성심당여행대전'
    start_date= "20190101"
    end_date= "20210501"
    os.getenv('HOME')
    webdriver.__version__
    #Step 1. 크롬 웹브라우저 실행
    path = os.getenv('HOME')+ '/chromedriver'
    driver = webdriver.Chrome(path)
    # 사이트 주소는 네이버
    driver.get('https://blog.naver.com/')
    time.sleep(1)
    #Step 2. 네이버 검색창에 "검색어" 검색
    element = driver.find_element_by_name("sectionBlogQuery").send_keys(query_txt)
    # element.submit()
    time.sleep(2)
    driver.find_element_by_class_name("button.button_blog").click() # 관련도순 xpath
    # element.find_element_by_css_selector("#header > div.header_common > div > div.area_search > form > fieldset > a.button.button_blog").click() # 관련도순 xpath
    # element.clear()
    # element.send_keys(query_txt)  # query_txt는 위에서 입력한 '이재용'
    # element.submit()
    #Step 1. 크롬 웹브라우저 실행
    path = os.getenv('HOME')+ '/chromedriver'
    
    driver = webdriver.Chrome(path)
    # 사이트 주소는 네이버
    driver.get('https://blog.naver.com/')
    time.sleep(0.1)
    
    #Step 2. 네이버 검색창에 "검색어" 검색
    element = driver.find_element_by_name("sectionBlogQuery").send_keys(query_txt)
    # element.submit()
    time.sleep(0.1)
    
    driver.find_element_by_class_name("button.button_blog").click() # 관련도순 xpath
    time.sleep(0.1)
    
    
    #Step 4. 오른쪽의 검색 옵션 버튼 클릭
    driver.find_element_by_class_name("present_selected").click()
    
    
    #Step 6. 날짜 입력
    driver.find_element_by_id("search_start_date").send_keys(start_date)
    driver.find_element_by_id("search_end_date").send_keys(end_date)
    time.sleep(0.1)
    
    driver.find_element_by_id("periodSearch").click()
    time.sleep(0.1)
    
    searched_post_num = driver.find_element_by_class_name('search_number').text
    print(searched_post_num)
    
    url_list = []
    title_list = []
    
    total_page = 600 
    # total_page = math.ceil(int(searched_post_num.replace(',', '').strip('건')) / 7)
    print('total_page :', total_page)
    
    for i in tqdm(range(0, total_page)):  # 페이지 번호
        url = f'https://section.blog.naver.com/Search/Post.naver?pageNo={i}&rangeType=sim&orderBy=recentdate&startDate={start_date}&endDate={end_date}&keyword={query_txt}'
        driver.get(url)
        response = requests.get(url)
        soup = BeautifulSoup(response.text, 'html.parser')
        print(soup)
    #     time.sleep(0.5)
    #     area = soup.findAll('div', {'class' : 'list_search_post'}) #.find_all('a', {'class' : 'url'})
    #     print(area)
        
        # URL 크롤링 시작
    #     titles = "a.sh_blog_title._sp_each_url._sp_each_title" # #content
    # #     article_raw = driver.find_elements_by_class_name('area_list_search')
    # #     article_raw = driver.find_elements_by_css_selector('#content > section > div.area_list_search > div:nth-child(1)')
        
    #     article_raw = driver.find_elements_by_xpath(f'//*[@id="content"]/section/div[2]/div[{i}]')
        
    #     print(article_raw)
    
    #     # url 크롤링 시작    # 7개 
    #     for article in article_raw:
    #         url = article.get_attribute('href')   
    #         print(url)
    #         url_list.append(url)
        
    #     # 제목 크롤링 시작    
    #     for article in article_raw:
    #         title = article.get_attribute('title')   
    #         title_list.append(title)
        
    #         print(title)
        
    # print('url갯수: ', len(url_list))
    # print('url갯수: ', len(title_list))
    
    # df = pd.DataFrame({'url':url_list, 'title':title_list})
    
    # # 저장하기
    # df.to_csv("./blog_url.csv")
    li = [2, 3, 4, 4, 5, 6, 7, 8]
    len(li)
    for i in range(0, 8, 2):
        print(i)
    new = []
    for i in range(0, len(li)-1, 2):
        new.append([li[i], li[i+1]])
    new
    article_raw = driver.find_elements_by_xpath('//*[@id="content"]/section/div[2]/div[1]')
    # article_raw.get_attribute('href')
    for i in article_raw:
        print(i.get_attribute('href'))
    //*[@id="content"]/section/div[2]
    //*[@id="content"]/section/div[2]
    //*[@id="content"]/section/div[2]
    
    //*[@id="content"]/section/div[2]/div[1]
    //*[@id="content"]/section/div[2]/div[2]
    //*[@id="content"]/section/div[2]/div[3]
    ...
    //*[@id="content"]/section/div[2]/div[7]1 page = 7 posts72 page searchsample = https://section.blog.naver.com/Search/Post.naver?pageNo=1&rangeType=PERIOD&orderBy=sim&startDate=2019-01-01&endDate=2021-05-01&keyword=%EC%84%B1%EC%8B%AC%EB%8B%B9%EC%97%AC%ED%96%89%EB%8C%80%EC%A0%84## 제목 눌러서 블로그 페이지 열기
    driver.find_element_by_class_name('title').click()
    time.sleep(1)
    type(searched_post_num), searched_post_num
    import re
    re.sub('^[0-9]', '', searched_post_num)
    searched_post_num
    searched_post_num.replace(',', '').replace('건', '')
    total_page = math.ceil(int(searched_post_num.replace(',', '').strip('건')) / 7)
    total_pageLecture 10: Computations in Discrete Mathematics Please note: This lecture will be recorded and made available for viewing online. If you do not wish to be recorded, please adjust your camera settings accordingly.   Reminders/Announcements:- Assignment 3 is due Thursday at 8pm- Grades for Quiz 1, Assignments 1 & 2, and Participation checks for Lectures 4-8 will be available to you by Friday (which is some sort of drop deadline)- This lecture has participation checks again.- Small typo on Assignment 3  Participation UpdateCoCalc allows me to export your "file use times." It gives me a JSON file with entries like this (this is anonymized):![](usetimes.png)It should look familiar! It's a dictionary of dictionaries! The edit times take timestamps on when the file was edited (the unit here is "milliseconds since the epoch"). A Python script then scrapes this file to make sure the files were interacted with.If you missed a few points, don't worry! 5 Participation scores will be dropped for each student.  Combinatorics and The Online Encyclopedia of Integer Sequences (OEIS)Combinatorics is the art of *counting*. - How many ways can you pick k objects out of a set of n objects (the *binomial coefficients*)- How many ways can you order a list of n distinct objects (*the factorial*)- How many ways can you "partition" a group of n people (*the Bell numbers*)- ...Often a general answer is not easy to come by. A *very very very useful* resource is the OEIS: https://oeis.org/ . It is a huge database of integer sequences, together with the things that they count.Sample of how your research project might go:- Step 1: Enumerate the first few cases of your counting problem by hand (or better, with Sage)- Step 2: Plug in your results into OEIS to make a conjecture regarding the general answer- Step 3: Prove your conjecture using insights from OEISExample: For a positive integer $n$, define $[n] = \{1,2,\dots,n\}$. How many subsets of $[n]$ have no *consecutive elements* in them? I.e. how many subsets $S\subset[n]$ such that $\{i,i+1\}\not\subset S$ for all $i$.def count(n):
        tally = 0
        subsets = Subsets(n)
        for subset in subsets:
            if any([(i in subset and i+1 in subset) for i in range(1,n+1)]):
                pass
            else:
                tally+=1
        return(tally)
    for i in range(0,10):
        print(count(i))
    oeis([1,2,3,5,8,13,21,34,55,89])
    entry = oeis('A000045')
    print(entry.name())
    #print(entry.comments())Theorem: Let $S_n$ be the number of subsets of $[n]$ with no consecutive elements. Then $S_n$ is a Fibonacci number.Proof: Let $S$ be a subset of $[n]$. If $n$ is not in $S$, then in fact $S$ is a subset of $[n-1]$ with no consecutive elements. If $n$ is in $S$, then $S\setminus\{n\}$ is a subset of $[n-2]$ with no consecutive elements. This establishes a recursion $$S_n = S_{n-1}+S_{n-2}$$for $n\geq 2$. The initial conditions $S_0 = 1$, $S_1=2$ show that we have essentially a "reindexed" Fibonacci sequence.S = Subsets(3)
    S
    for subset in S:
        print(subset, len(subset))***** Participation Check ***************************Write a function `evenMinusOdd` which takes a positive integer $n$ as input and returns the difference between the number of even element subsets of $[n]$ and the number of odd element subsets of $[n]$,$$\ \{S:S\subset [n], |S| \text{ is even }\}-\ \{S:S\subset [n], |S| \text{ is odd }\}.$$Make a conjecture for what numbers count this sequence by running your function on $n=1,2,3,4,5,6,7$.def evenMinusOdd(n):
        #Your code here
    
    
    
    #your code here********************************************************* Sage also has nice set constructors, so you can easily play poker with all of your friends:suits = ['Diamonds','Clubs','Hearts','Spades']
    values = ['Ace','King','Queen','Jack']+[i for i in range(10,1,-1)]
    deckOfCards = list(cartesian_product([suits,values]))
    deckOfCards[0:13]
    shuffle(deckOfCards)
    deckOfCards[0:5]
    possibleHands = Subsets(deckOfCards, 5)
    len(possibleHands)Binomial CoefficientsThe binomial coefficient $\binom{n}{k}$ counts the number of ways of choosing a $k$ element subset from an $n$ element set. You may have seen the formula $$\binom{n}{k} = \frac{n!}{k!(n-k)!},$$or the *Binomial Theorem* $$(1+x)^n = \sum_{k=0}^n \binom{n}{k}x^k.$$for i in range(6):
        print(binomial(5,i))
    show(expand((1+x)^5))The binomial theorem actually gives a proof of your participation check;$$0 = 0^n = (1-1)^n = \sum_{k=0}^n\binom{n}{k}(-1)^k = \left(\sum_{k \text{ even }}\binom{n}{k}\right) - \left(\sum_{k \text{ odd }}\binom{n}{k}\right)$$There are many so called *binomial identities*. For example, the basic recurrence relation$$\binom{n}{k} = \binom{n-1}{k}+\binom{n-1}{k-1},$$or the so called *hockey stick identity*$$\sum_{k=r}^n\binom{k}{r} = \binom{n+1}{r+1}$$for n in range(10):
        print([binomial(n,k) for k in range(n+1)])You can get subsets of a given size in Sage by adding in a second parameter:L = ['Dog','Cat','Cow','Pig','Duck']
    for subset in Subsets(L,3):
        print(subset)Permutations in SageA *permutation* of length $n$ is a reordering of the list $[1,2,\dots,n]$. We will usually write a permutation as $$\pi = \pi_1\pi_2\dots\pi_n$$Be careful! Combinatorial indexing is different than Python list indexing...P = Permutations(4)
    for perm in P:
        print(perm)
    print(P.cardinality())Permutations are counted by the *factorial* $n! = n\cdot(n-1)\dots 3\cdot 2\cdot 1.$for i in range(7):
        print(factorial(i))Often one applies *statistics* to permutations. For example: an index $k$ is a *descent* if $\pi_k > \pi_{k+1}$. Descents in permutations are closely related to the algebraic and geometric properties of the symmetric group.for perm in Permutations(3):
        print(perm,perm.descents())Here's a cool fact. Let's count the so called *Grassmanian permutations*, which are those who have a descent set contained in $\{k\}$. I.e. either they have no descents or they have a single descent at a specified index.def grass(n,k):
        tally = 0
        for p in Permutations(n):
            if p.descents()==[k] or not p.descents():
                tally+=1
        return(tally)
    for n in range(9):
        print([grass(n,k) for k in range(n+1)])You can actually use this to give alternative proofs to many binomial identities!There is a *vast* literature on permutation statistics. You may have heard of cycle type, inversions, the Major index, left-right maxima, right-left maxima, peaks, pinnacles, excedances, ..... We will not go into more during this lecture, but may explore some in Assignment 4.   Set PartitionsA *set partition* of a set $S$ is a collection of nonempty blocks $\{B_1,\dots,B_k\}$ with $S = \cup B_i$ and $B_i\cap B_j = \emptyset$.Think of this as a way of assigning $n$ individuals into teams such that:- no team is empty- every player is on exactly one teamPartitions = SetPartitions(3)
    for partition in Partitions:
        print(partition)Set partitions are counted by the *Bell numbers*.for k in range(0,10):
        print(len(list(SetPartitions(k))))
    oeis([1,1,2,5,15,52,203,877,4140,21147])You may have seen the Bell numbers from the power series expansion below:$$e^{e^{x}-1} = \sum_{n=0}^\infty \frac{B_n}{n!}x^n.$$f(x) = exp(exp(x)-1)
    f
    show(f.series(x,10))
    bellNums = [1,1,2,5,15,52,203,877,4140,21147]
    show([bellNums[i]/factorial(i) for i in range(10)])From this one can derive *Dobinski's formula*, which gives the explicit black magic$$B_n = \frac{1}{e}\sum_{k = 0}^\infty \frac{k^n}{k!}.$$var('k')
    for i in range(1,10):
        print(bell_number(i))
        print(float(sum(k^i / factorial(k), k, 1, 15)/e))
        print('*****************')You *do not* have to memorize or really care about Dobinski's formula for this class, but it is way cool! Combinatorial identities like these give us the power to actually compute something like the 50th Bell number, without actually listing things out and keeping a running tally. It takes *forever* to simply count all of the set partitions of even something like $\{1,2,3,\dots,12\}$...tally = 0
    S = SetPartitions(12)
    for partition in S:
        tally+=1
    print(tally)
    bell_number(12,'dobinski')
    bell_number(35,'dobinski')
    bell_number(35,'dobinski').ndigits()The number we just computed is on the order of $10^{30}$...For comparison...the universe is ~$14$ billion years old, which as we all know is a hair over $10^{17}$ seconds...modern processors are operating at the level of gigahertz, which is about $10^9$ clock ticks a second...so if you parallelized a cluster of computers and had them running since the big bang, you *might* have finished the calculation by now. But $B_{35}$ is only the beginning...bell_number(5000,'dobinski')
    bell_number(5000,'dobinski').ndigits()  
    bell_number??You can gain many beautiful combinatorial identities by restricting the number of blocks in your set partition; this leads to the *Stirling numbers of the second kind*. We will not go into them too much here, other than the following: ***** Participation Check ***************************In the code cell below, use Sage to compute the number of set partitions of $\{1,2,\dots,n\}$ which have $n-1$ blocks. Do this for $n=2,3,\dots,8$. In the second code cell, feed the sequence into OEIS to make a conjecture about the number of such partitions in general.#Your code here
    #Your OEIS call here*********************************************************  Integer PartitionsGiven a positive integer $n$, an *integer partition* of $n$ is a weakly decreasing sequence of positive integers $\lambda_1\geq \lambda_2\geq \dots \geq \lambda_k$ with $$n=\lambda_1+\dots+\lambda_k.$$Partitions are of great interest in both combinatorics *and* number theory *and* representation theory *and* probably other stuff!P = Partitions(5)
    for p in P:
        print(p)
    for i in range(10):
        print(len(list(Partitions(i))))
    oeis([1,1,2,3,5,7,11,15,22,30])There is a wealth of topics we could study regarding integer partitions, and we don't have time to get to all of them. Here's an example of the "odd equals distinct" phenomenon. The `parts_in` tag restricts the size of the numbers you are allowed to use in the partition (remember this for your next homework!) and the `max_slope` parameter tells you how big $\lambda_{i+1}$ can be compared to $\lambda_i$:for partition in Partitions(9,parts_in=[1,3,5,7,9]):  #This gives partitions using only odd numbers
        print(partition)
    for partition in Partitions(9,max_slope=-1):  #This gives partitions with no repetitions allowed (all the parts are distinct)
        print(partition)There is a beautiful proof of this dating back to Euler (because of course it was him), which we don't have time to get into. Finally, here is an example of the "restricted part size equals restricted number of parts" phenomenon:for partition in Partitions(7,max_part = 4):  #This gives partitions using parts <= 4
        print(partition)
    for partition in Partitions(7,max_length = 4):  #This gives partitions using at most 4 parts
        print(partition)Author: [](https://www.linkedin.com/in/yuntingchiu/)import cv2
    import matplotlib.pyplot as plt
    import numpy as np
    import time
    import pandas as pd
    #wd
    %cd /content/drive/MyDrive/American_University/2021_Fall/DATA-793-001_Data Science Practicum/data
    !pwd/content/drive/MyDrive/American_University/2021_Fall/DATA-793-001_Data Science Practicum/data
    /content/drive/MyDrive/American_University/2021_Fall/DATA-793-001_Data Science Practicum/dataExploratory Data AnalysisRead the data (`.npz` file)"""
    data_zipped = np.load("np_data_all.npz", allow_pickle=True)
    
    for item in data_zipped.files:
        print(item)
        print(data_zipped[item])
        
    print(data_zipped[item].shape)
    data = data_zipped[item]
    """arr_0
    [[array([134, 131, 116, ...,  68,  60,  71], dtype=uint8) 'fake']
     [array([133, 130, 115, ...,  71,  59,  71], dtype=uint8) 'fake']
     [array([117, 113, 112, ...,  43,  31,  45], dtype=uint8) 'fake']
     ...
     [array([ 33,  20,  66, ..., 188, 155, 172], dtype=uint8) 'real']
     [array([ 51,  28,  46, ..., 116,  52,  50], dtype=uint8) 'real']
     [array([174, 140, 102, ...,  23,  39,  98], dtype=uint8) 'real']]
    (13984, 2)Read the data (`.npy` file)data = np.load("np_data_one.npy", allow_pickle=True)Check the length of $X$ and $y$X = []
    y = []
    for i in data:
      X.append(i[0])
      y.append(i[1])
    print(len(X))
    print(len(y))
    print("The length should be " + str((6984+7000)))
    print(X)
    print(y)
    print("data dimension:",data.shape)[array([133, 130, 115, ...,  71,  59,  71], dtype=uint8), array([ 49,  40,  25, ..., 111, 105,  73], dtype=uint8), array([28, 21, 15, ..., 49, 44, 40], dtype=uint8), array([ 61,  61,  53, ..., 244, 246, 243], dtype=uint8), array([15, 14, 19, ..., 88, 66, 53], dtype=uint8), array([ 1,  1,  1, ..., 18, 14, 11], dtype=uint8), array([91, 61, 63, ..., 29, 20, 67], dtype=uint8), array([250, 187, 143, ..., 100,  99, 104], dtype=uint8), array([26, 25, 31, ..., 14, 15, 20], dtype=uint8), array([36, 32, 20, ..., 38, 30, 17], dtype=uint8), array([ 51,  37,  34, ..., 179, 158, 131], dtype=uint8), array([163, 146, 164, ..., 109,  55, 104], dtype=uint8), array([ 63,  58,  65, ..., 255, 247, 253], dtype=uint8), array([ 53,  41,  29, ..., 164, 180, 196], dtype=uint8), array([110,  32,  32, ..., 188, 149, 180], dtype=uint8), array([29, 18, 52, ..., 13, 14,  8], dtype=uint8), array([ 78,  59,  65, ..., 194, 207, 239], dtype=uint8), array([101,  77,  73, ...,  34,  32,  46], dtype=uint8), array([ 83,  77[...]Visualizationfake_cnt = 0
    real_cnt = 0
    for i in data:
      if i[1] == "fake":
        fake_cnt += 1
      else:
        real_cnt += 1
    
    #print(fake_cnt)
    #print(real_cnt)
    df = [['fake', fake_cnt], ['real', real_cnt]]
    df = pd.DataFrame(df, columns=['image_type', 'count'])
    #ax = df.plot.bar(x='video_type', y='count', rot=0)
    #fig = plt.figure()
    plt.bar(df['image_type'], df['count'])
    plt.xlabel("Image Type")
    plt.ylabel("Count")
    plt.savefig('count_type.png')Machine Learning Taskfrom sklearn.model_selection import train_test_split
    from sklearn.svm import SVC
    from sklearn.pipeline import make_pipeline
    from sklearn.preprocessing import StandardScaler # standardize features by removing the mean and scaling to unit variance.
    from sklearn.metrics import confusion_matrix
    #from sklearn.metrics import plot_confusion_matrix
    from sklearn.metrics import accuracy_score
    from sklearn.metrics import classification_report
    from sklearn.ensemble import RandomForestClassifier
    from sklearn.linear_model import LogisticRegression
    from sklearn.metrics import roc_curve
    from sklearn.metrics import roc_auc_scoreSupport Vector Machinestart_time = time.time()
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42) # 80% for training, 20 for of testing
    svm_clf = make_pipeline(StandardScaler(), SVC(gamma='scale', C = 1)) # clf = classifer
    svm_clf.fit(X_train, y_train)
    y_pred = svm_clf.predict(X_test)
    
    print("--- %s seconds ---" % (time.time() - start_time))
    print(confusion_matrix(y_test, y_pred))SVM Confusion Matrix#plot_confusion_matrix(svm_clf, X_test, y_test, values_format = '.0f') 
    #plt.figure(figsize=(12,8))
    #plt.show()
    conf_matrix = confusion_matrix(y_true = y_test, y_pred = y_pred)
    # Print the confusion matrix using Matplotlib
    
    fig, ax = plt.subplots(figsize=(7.5, 7.5))
    ax.matshow(conf_matrix, cmap=plt.cm.Blues, alpha=0.3)
    for i in range(conf_matrix.shape[0]):
        for j in range(conf_matrix.shape[1]):
            ax.text(x=j, y=i,s=conf_matrix[i, j], va='center', ha='center', size='xx-large')
     
    plt.xlabel('Predictions', fontsize=18)
    plt.ylabel('Actuals', fontsize=18)
    plt.title('Confusion Matrix', fontsize=18)
    plt.show()
    plt.savefig('Confusion_Matrix.png')ROC curves- ROC Curves summarize the trade-off between the true positive rate and false positive rate for a predictive model using different probability thresholds.- Precision-Recall curves summarize the trade-off between the true positive rate and the positive predictive value for a predictive model using different probability thresholds.- ROC curves are appropriate when the observations are balanced between each class, whereas precision-recall curves are appropriate for imbalanced datasets."""
    # generate a no skill prediction (majority class)
    ns_probs = [0 for _ in range(len(y_test))]
    lr_probs = svm_clf.predict_proba(X_test)
    # keep probabilities for the positive outcome only
    lr_probs = lr_probs[:, 1]
    # calculate scores
    ns_auc = roc_auc_score(y_test, ns_probs)
    lr_auc = roc_auc_score(y_test, lr_probs)
    # summarize scores
    print('No Skill: ROC AUC=%.3f' % (ns_auc))
    print('Logistic: ROC AUC=%.3f' % (lr_auc))
    # calculate roc curves
    ns_fpr, ns_tpr, _ = roc_curve(y_test, ns_probs)
    lr_fpr, lr_tpr, _ = roc_curve(y_test, lr_probs)
    # plot the roc curve for the model
    plt.plot(ns_fpr, ns_tpr, linestyle='--', label='No Skill')
    plt.plot(lr_fpr, lr_tpr, marker='.', label='Logistic')
    # axis labels
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    # show the legend
    plt.legend()
    # show the plot
    plt.show()
    plt.savefig('ROC_AUC_Plot.png')
    """SVM Accuracy Scoreprint("----------Accuracy Score----------------")
    print(accuracy_score(y_test, y_pred))
    
    target_names = ['fake', 'real']
    print(classification_report(y_test, y_pred, target_names=target_names))----------Accuracy Score----------------
    0.6475
                  precision    recall  f1-score   support
    
            fake       0.68      0.60      0.64       207
            real       0.62      0.70      0.66       193
    
        accuracy                           0.65       400
       macro avg       0.65      0.65      0.65       400
    weighted avg       0.65      0.65      0.65       400Random Forest Classifierstart_time = time.time()
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42) # 80% for training, 20 for of testing
    #rf_clf = RandomForestClassifier(n_estimators=100, random_state=42, bootstrap=True)
    #rf_clf.fit(X_train, y_train)
    #y_pred = rf_clf.predict(X_test)
    
    #print("--- %s seconds ---" % (time.time() - start_time))
    #print(confusion_matrix(y_test, y_pred))Random Forest Accuracy Scoreprint(accuracy_score(y_test, y_pred))0.7257142857142858Logistic Regressionstart_time = time.time()
    lg_clf = LogisticRegression(random_state=42, C=1)
    lg_clf.fit(X_train, y_train)
    y_pred = lg_clf.predict(X_test)
    
    print("--- %s seconds ---" % (time.time() - start_time))
    print(confusion_matrix(y_test, y_pred))/usr/local/lib/python3.7/dist-packages/sklearn/linear_model/_logistic.py:940: ConvergenceWarning: lbfgs failed to converge (status=1):
    STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
    
    Increase the number of iterations (max_iter) or scale the data as shown in:
        https://scikit-learn.org/stable/modules/preprocessing.html
    Please also refer to the documentation for alternative solver options:
        https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
      extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)Logistic Regression Accuracy Scoreprint(accuracy_score(y_test, y_pred))0.6528571428571428Nested Cross-Validation (Testing Zone)from sklearn.datasets import make_classification
    from sklearn.model_selection import KFold
    from sklearn.model_selection import GridSearchCV
    from sklearn.ensemble import RandomForestClassifier
    from sklearn.metrics import accuracy_score
    # manual nested cross-validation for random forest on a classification dataset
    from numpy import mean
    from numpy import std
    from sklearn.datasets import make_classification
    from sklearn.model_selection import cross_val_score
    from sklearn.model_selection import KFold
    from sklearn.model_selection import GridSearchCV
    from sklearn.ensemble import RandomForestClassifier
    # create dataset
    
    #X, y = make_classification(n_samples=1000, n_features=20, random_state=1, n_informative=10, n_redundant=10)
    #print(X.shape)
    #print(y.shape)
    
    # configure the cross-validation procedure
    cv_inner = KFold(n_splits=3, shuffle=True, random_state=1)
    # define the model
    model = RandomForestClassifier(random_state=42)
    # define search space
    space = dict()
    space['n_estimators'] = [10, 100, 500]
    #space['max_features'] = [2, 4, 6]
    # define search
    search = GridSearchCV(model, space, scoring='accuracy', n_jobs=1, cv=cv_inner, refit=True)
    # configure the cross-validation procedure
    cv_outer = KFold(n_splits=10, shuffle=True, random_state=1)
    # execute the nested cross-validation
    scores = cross_val_score(search, X_train, y_train, scoring='accuracy', cv=cv_outer, n_jobs=-1)
    # report performance
    print('Accuracy: %.3f (%.3f)' % (mean(scores), std(scores)))
    
    
    result = search.fit(X_train, y_train)
    # get the best performing model fit on the whole training set
    best_model = result.best_estimator_
    # evaluate model on the hold out dataset
    yhat = best_model.predict(X_test)
    space = {}
    space['n_estimators'] = list(range(1, 1001))
    print(space)Animated plotsTimeseries plots can be animated by simply passing `animate=True` to the geo (or when calling hyp.plot).# Code source: 
    # License: MIT
    
    # import
    import hypertools as hyp
    
    # load example data
    geo = hyp.load('weights_avg')
    
    # plot
    geo.plot(animate=True, legend=['first', 'second'])*************************************************************************************************Written By:-   Date:-   Topic:- Language Modeling with Ngrams  ➢ What are we trying to solve here?  Using probabilistic models called N-grams to predict the next word from the previous n-1 words.  Note that this problem is corpus (data set of the sentences) specific.  ➢ Grams refered to number of words taken into consideration. e.g. unigram means occurence of the single word in the corpus. Bigrams means predicting the next word based on 1 previous word. Trigrams mean predicting the next word based on 2 previous words and so on.➢This method is count based. A simple approach uses count.  Count all the unigrams. e.g. count of all the words in the corpus. ➢ We’ll call a statistical model that can assess this probability a Language Model  *************************************************************************************************➢ Consider following corpus  "The lake is very big. Its water is so transparent that you can see your face clearly. Its water is so transparent that the moon appears even bigger due to reflection"  How to estimate the probability of the word "the" given previous words "its water is so transparent that"  We calculate    Count(its water is so transparent that the) = 1and  Count(its water is so transparent that) = 2Hence  P(the | its water is so transparent that ) = Count(its water is so transparent that the) / Count(its water is so transparent that)   *************************************************************************************************Unfortunately, for most sequences and for most text collections we won’t get good estimates from this method.  ➢ What we’re likely to get is 0. Or worse 0/0.  ➢ Let’s use the chain rule of probability  ➢And a particularly useful independence assumption.   *************************************************************************************************Chain rule of Probability.P(A,B,C,D) = P(A)P(B|A)P(C|A,B)P(D|A,B,C)  Independence Assumption.(Markov Assumption)  That is, the probability in question is independent of its earlier history. *************************************************************************************************How to approach this problem  1) Decide Grams. e.g. Bigrams(2word counts), unigrams(1 word count) etc  2) P(wn | wn-1) = Count(wn-1,wn) | count(wn-1)  Coding:  consider the following corpus  "Sales of the company to return to normalcy. The new products and services contributed to increase revenue."  We have to find the bigram estimates of sentence probabilities in the corpus.  Strategy:  0) preprocess the sentences (optional)  1) Count the unigram count  2) count the bigram count  3) find out bigram_count / unigram_count  *************************************************************************************************import re
    
    corpus = "Sales of the company to return to normalcy.\n Sales of the new products and services contributed to increase revenue."
    def _preprocess(corpus):
        
        #split courpus into sentences
        sentences = corpus.split(".")
    
        #add custom start and end tags
        data = []
        
        for sentence in sentences:
            #add custom start and end tags
            if sentence != "":
                
                #keep words and digits only
                sentence = re.sub("[^a-zA-Z\d]", " ", sentence)
                
                # lower case words
                sentence = sentence.lower()
                
                #add custom start and end tags
                sentence = " "+ sentence.strip() + " "
                
                #append processed data
                data.append(sentence)
        
        return data
        #to generate words from the sentences
        
    data = _preprocess(corpus)
    print(data)
    def _unigrams(data):
        #unigrams as a dict to store keys as unigram and values as word occurence
        unigrams = {}
        
        for sentences in data:
            
            #generate words from sentences
            words = sentences.split(" ")
            
            #process individual words
            for word in words:
                
                #if keys exists in dict, we increment the count
                try:
                    if unigrams[word] >= 1:
                        unigrams[word] += 1
                #else we store key in dict, with cont 1
                except:
                    unigrams[word] = 1
        return unigrams
    
    unigrams = _unigrams(data)
    print(unigrams)
    def _bigrams(data):
        #unigrams as a dict to store keys as unigram and values as word occurence
        bigrams = {}
        
        bigram_key_set = []
        
        for sentences in data:
            #generate words from sentences
            words = sentences.split(" ")
            
            #append bigrams of the words using zip function
            bigram_key_set.extend(list(zip(words, words[1:])))
            #print(bigram_key_set)
                    
            
        for key in bigram_key_set:
            try:
                if bigrams[key] >= 1:
                    bigrams[key] += 1
            except:
                bigrams[key] = 1
        return bigrams
    
    bigrams = _bigrams(data)
    print(bigrams)
    def _probability(unigrams,bigrams):
        
        probability = {}
        
        for key in bigrams.keys():
            word1 = key[0]
            
            try:
                probability[key] = round(bigrams[key] / unigrams[word1],2)
            except:
                probability[key] = 0
        return probability
    
    probability = _probability(unigrams,bigrams)
    print(probability){('', 'sales'): 1.0, ('sales', 'of'): 1.0, ('of', 'the'): 1.0, ('the', 'company'): 0.5, ('company', 'to'): 1.0, ('to', 'return'): 0.33, ('return', 'to'): 1.0, ('to', 'normalcy'): 0.33, ('normalcy', ''): 1.0, ('the', 'new'): 0.5, ('new', 'products'): 1.0, ('products', 'and'): 1.0, ('and', 'services'): 1.0, ('services', 'contributed'): 1.0, ('contributed', 'to'): 1.0, ('to', 'increase'): 0.33, ('increase', 'revenue'): 1.0, ('revenue', ''): 1.0}Binary ClassificationThis notebook contains the code to experiment with the embeddings created by a fine-tuned S-BERT model for **binary** classification. Includes loading data, loading the model, and running different models for the classification! pip install \
      scprep\
      spacy==2.3.2 \
      sentence_transformers==0.4.0 \
      phate==1.0.4 && \
      python -m spacy download es_core_news_lgWARNING! Once you installed the packages in the previous cell you must restart your runtime and then import the library and load the modelimport spacy
    if spacy.prefer_gpu():
        print("Using the GPU")
    else:
        print("Using the CPU")
    es_nlp = spacy.load('es_core_news_lg')Using the GPUFor development work, in case you want to update the files in your GitHub branch by rerunning the clone, you first have to empty the folder.!rm -rf policy-data-analyzer/
    # Define branch to clone
    ! branch_name='master' && \
      git clone --branch $branch_name https://github.com/wri-dssg/policy-data-analyzer.git
    import os
    from sklearn.model_selection import train_test_split
    from sklearn import svm
    from sklearn.model_selection import cross_val_score
    from sklearn.metrics import classification_report
    from sklearn.ensemble import RandomForestClassifier
    from sentence_transformers import SentenceTransformer
    import time
    import cupy as cp
    import json
    
    os.chdir("policy-data-analyzer") #If you run this cell more than once, comment out this line because you are ready in this folder and you will get an error
    from tasks.data_loading.src.utils import *
    from tasks.model_evaluation.src.model_evaluator import *
    from tasks.data_visualization.src.plotting import *
    from tasks.data_augmentation.src.zero_shot_classification.latent_embeddings_classifier import *
    
    from google.colab import drive
    drive.mount('/content/drive')Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount("/content/drive", force_remount=True).Run binary class classification experiments  Data Loadingexperiment = "EXP30"
    classifier = "Binary"
    
    # This first one is the one used by David and Daniel
    base_path = "/content/drive/MyDrive/WRI-LatinAmerica-Talent"
    
    # This one is the one used by Jordi
    # base_path = "/content/drive/MyDrive/Official Folder of WRI Latin America Project/WRI-LatinAmerica-Talent"
    
    data_path = f"{base_path}/Cristina_Policy_Files/Tagged_sentence_lists/datasets/{classifier}/"
      
    train_sents, train_labels, test_sents, test_labels = load_dataset(data_path, experiment)
    label_names = unique_labels(train_labels)
    numeric_train_labels = labels2numeric(train_labels, label_names)
    plot_data_distribution(numeric_train_labels, label_names)
    print("Train Sentence:", train_sents[2], "\nTrain Label:", train_labels[2])
    print("Test Sentence:", test_sents[2], "\nTest Label:", test_labels[2])Load best model from trainingmodel_name = "paraphrase-xlm-r-multilingual-v1"
    test_perc = 0.25
    num_epochs = 2
    model_deets = f"model={model_name}_test-perc={test_perc}_n-epoch={num_epochs}"
    
    saved_model_path = f"{base_path}/Modeling/{classifier}ClassificationExperiments/TESTS/{experiment}"
    bin_model = SentenceTransformer(saved_model_path)Encode Sentences First, we will check how good are the fine tuned embeddings without the projection matrix additionall_sent_embs = encode_all_sents(test_sents, bin_model)
    visualize_embeddings_2D(np.vstack(all_sent_embs), test_labels, tsne_perplexity=50)[t-SNE] Computing 151 nearest neighbors...
    [t-SNE] Indexed 203 samples in 0.001s...
    [t-SNE] Computed neighbors for 203 samples in 0.011s...
    [t-SNE] Computed conditional probabilities for sample 203 / 203
    [t-SNE] Mean sigma: 1.657047
    [t-SNE] KL divergence after 250 iterations with early exaggeration: 56.095413
    [t-SNE] KL divergence after 1000 iterations: 0.254581Ok, doesn't look that bad, but not perfect either... The incentives are scattered too much in the space, and the line between non-incentives and incentives is not clearly defined. ***For now, it doesn't matter - but we should experiment more with fine tuning.***.Now, let's check whether the projection matrix helps:proj_matrix = cp.asnumpy(calc_proj_matrix(train_sents, 50, es_nlp, bin_model, 0.01))
    all_sent_embs = encode_all_sents(test_sents, bin_model, proj_matrix)
    visualize_embeddings_2D(np.vstack(all_sent_embs), test_labels, tsne_perplexity=50)[t-SNE] Computing 151 nearest neighbors...
    [t-SNE] Indexed 203 samples in 0.000s...
    [t-SNE] Computed neighbors for 203 samples in 0.006s...
    [t-SNE] Computed conditional probabilities for sample 203 / 203
    [t-SNE] Mean sigma: 11.356366
    [t-SNE] KL divergence after 250 iterations with early exaggeration: 58.398811
    [t-SNE] KL divergence after 1000 iterations: 0.431297Actually, the projection matrix makes things worse. ***Let's NOT use it for now!!!!!***# Simple embeddings, no projection matrix added
    all_sent_embs = encode_all_sents(train_sents, bin_model)
    all_test_embs = encode_all_sents(test_sents, bin_model)100%|██████████| 306/306 [00:05<00:00, 54.31it/s]Train classifiersfrom sklearn.model_selection import cross_val_score
    from sklearn.metrics import classification_report1. Let's start with Random Forests!from sklearn.ensemble import RandomForestClassifier
    clf = RandomForestClassifier(n_estimators=100, max_depth=3, random_state=69420)
    clf.fit(np.vstack(all_sent_embs), all_labels)
    clf_preds = [clf.predict(sent_emb)[0] for sent_emb in all_test_embs]
     print(classification_report(test_labels, clf_preds))
    numeric_preds = labels2numeric(clf_preds, label_names)
    numeric_test_labels = labels2numeric(test_labels, label_names)
    evaluator = ModelEvaluator(label_names, y_true=numeric_test_labels, y_pred=numeric_preds)
    evaluator.plot_confusion_matrix(color_map='Blues')Honestly, without Grid Search and 5-fold Cross Validation, these are not bad results... We should add those though! 2. Now, we're gonna try Support Vector Machinesfrom sklearn import svm
    clf = svm.SVC(gamma=0.001, C=100.)
    clf.fit(np.vstack(all_sent_embs), all_labels)
    clf_preds = [clf.predict(sent_emb)[0] for sent_emb in all_test_embs]
    numeric_preds = labels2numeric(clf_preds, label_names)
    numeric_test_labels = labels2numeric(test_labels, label_names)
    evaluator = ModelEvaluator(label_names, y_true=numeric_test_labels, y_pred=numeric_preds)
    print(classification_report(test_labels, clf_preds))
    evaluator.plot_confusion_matrix(color_map='Blues')Ok, so SVMs are slightly better than Random Forests at ***differentiating*** text! There's a 1-2% decrease in performance for the incentive class, but a 6% gain in non-incentives. If these results remain when doing cross validation and grid search, then I'd recommend going for the SVMs.**Next steps:**- Add Grid Search Cross Validation from sklearnWhat about... Beto?I downloaded the weights and placed them in the folder below:!pip install transformers
    beto_path = f"{base_path}/Modeling/BETO/pytorch/"
    from transformers import BertTokenizer, BertForSequenceClassification***IMPORTANT:*** I was not able to figure out a way of using the fine tuning results from the models above so I'm gonna use BETO out of the box, for both encoding/classification and see how it goes.The following cells are a demo of how the model should be put to use - once you understand it, feel free to skip this part!tokenizer = BertTokenizer.from_pretrained(beto_path)
    model = BertForSequenceClassification.from_pretrained(beto_path)
    classes = ["no es parafrasis", "es parafrasis"]
    
    sequence_0 = "La compañia Hugging esta basada en Nueva York"
    sequence_1 = "Las manzanas son malas para la salud"
    sequence_2 = "La sede principal de Hugging esta en Manhattan"
    
    paraphrase = tokenizer(sequence_0, sequence_2, return_tensors="pt")
    not_paraphrase = tokenizer(sequence_0, sequence_1, return_tensors="pt")
    
    paraphrase_classification_logits = model(**paraphrase).logits
    not_paraphrase_classification_logits = model(**not_paraphrase).logits
    
    paraphrase_results = torch.softmax(paraphrase_classification_logits, dim=1).tolist()[0]
    not_paraphrase_results = torch.softmax(not_paraphrase_classification_logits, dim=1).tolist()[0]
    print(">>> Deberia de ser parafrasis:")
    for i in range(len(classes)):
      print(f"{classes[i]}: {int(round(paraphrase_results[i] * 100))}%")
    
    print("\n>>> NO deberia de ser parafrasis:")
    for i in range(len(classes)):
      print(f"{classes[i]}: {int(round(not_paraphrase_results[i] * 100))}%")>>> Deberia de ser parafrasis:
    no es parafrasis: 41%
    es parafrasis: 59%
    
    >>> NO deberia de ser parafrasis:
    no es parafrasis: 61%
    es parafrasis: 39%OK! Now it's time to apply it to our data. We will try it out with our test set, just to have a fair comparisontokenized_sents = tokenizer(test_sents, padding=True, return_tensors="pt")
    clf_logits = model(**tokenized_sents).logits
    clf_results = torch.softmax(clf_logits, dim=1).tolist()[0]
    # This stores the index of the highest score - in other words, our label
    clf_preds = [np.argmax(logits) for logits in clf_results]
    print(classification_report(test_labels, clf_preds))
    evaluator = ModelEvaluator(label_names, y_true=numeric_test_labels, y_pred=clf_preds)
    evaluator.plot_confusion_matrix(color_map='Blues')Zadania# Wykonaj odwracanie listy:
    # [1,5,2,6,7,1,6] -> [6,1,7,6,2,5,1]
    # Wykonaj redukcję duplikatów w liście:
    # [2,4,7,1,6,8,1,2,4] -> [1,2,4,6,7,8]
    # Wykonaj podział listy na listę z trzy elementowymi krotkami:
    # [4,2,4,7,1,2,4,6,8] -> [(4,2,4),(7,1,2),(4,6,8),(9,)]
    # Wykonaj zliczanie znaków w zdaniu: "Dzień dobry wszystkim!"
    # Wykonaj zamianę znaku "z" w zdaniu: "Dzień dobry wszystkim!"
    # na znak "Z": "DZień dobry wsZystkim!"
    # Wykonaj redukcję nadwymiarowych spacji w tekście:
    # "Wyszedłem  rano na   zawnątrz i sypał   śnieg." ->
    # "Wyszedłem rano na zawnątrz i sypał śnieg."
    # Wykonaj pętlę, która:
    # - zakończy się, gdy osiągnie liczbę '24' w liście
    # - będzie pomijać wykonywanie operacji dzielenia przez 2 
    #   dla liczb nieparzystych
    # Lista: [10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
    #         20, 21, 22, 23, 24, 25, 26, 27, 28, 29]
    # Pro-tip: range(10,30)
    # Wykonaj pętlę, która:
    # - dla każdej liczby wykona mnożenie przez 3
    # - dla parzystych liczb wykona dzielnie przez 2
    # Lista: [10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
    #         20, 21, 22, 23, 24, 25, 26, 27, 28, 29]
    # Pro-tip: range(10,30)NSF COA author/affiliation toolInspired by [this awesome tool](https://github.com/ejfertig/NSFBiosketch) from Dr. , but unable to get it to run in time due to a java install problem with the xlsx package in my perpetually infuriating R environment, I whipped up something similar for the Pythonistas. This tool will take a list of PMIDs and return the list of authors and affiliations, along with most recent authorship date.import pandas as pd
    from pymed import PubMed
    from time import sleepImport papersImport a list of your publication PMIDs, one per line in a plaintext filepmids = []
    with open('PMID-export.txt', 'r') as f:
        for line in f:
            pmids.append(line.strip())
    pmidsWe'll sort them in chronological order, to ensure we get the most recent conflict dates per authorpmids.sort()
    # Create a PubMed object that GraphQL can use to query
    # Note that the parameters are not required but kindly requested by PubMed Central
    # https://www.ncbi.nlm.nih.gov/pmc/tools/developers/
    
    pubmed = PubMed(tool="BioSketchify", email="my")Retrieve and parse PubMed entriesQuery PubMed one publication at a time, and parse the author and affiliation list.Due to API limits, we have to limit the rate at which we query.authors = {}
    
    for pmid in pmids:
        results = pubmed.query(pmid, max_results=1)
        for article in results:
            for author in article.authors:
                name = '%s, %s' % (author['lastname'], author['firstname'])
                year = article.publication_date.year
                affiliation = author['affiliation']
                authors[name] = (year, affiliation)
        print(article.title)
        sleep(1)Cephaloticoccus gen. nov., a new genus of 'Verrucomicrobia' containing two novel species isolated from Cephalotes ant guts.
    Dissecting host-associated communities with DNA barcodes.
    Gut microbiota of dung beetles correspond to dietary specializations of adults and larvae.
    By their own devices: invasive Argentine ants have shifted diet without clear aid from symbiotic microbes.
    Unraveling the processes shaping mammalian gut microbiomes over evolutionary time.
    Corrigendum: Cephaloticoccus gen. nov., a new genus of 'Verrucomicrobia' containing two novel species isolated from Cephalotes ant guts.
    The structured diversity of specialized gut symbionts of the New World army ants.
    Ant-plant mutualism: a dietary by-product of a tropical ant's macronutrient requirements.
    Dramatic Differences in Gut Bacterial Densities Correlate with Diet and Habitat in Rainforest Ants.
    A communal catalogue reveals Earth's multiscale microbial diversity.
    The human microbiome in evolution.
    Improving saliva shotgun[...]Make an author dataframe, with blank columns for "Organization" and "Department"author_df = pd.DataFrame.from_dict(authors, orient='index', columns=['year','affiliation'])
    author_df['Organization'] = ''
    author_df['Department'] = ''
    
    author_df.head()Split affiliation into department and organizationThis might be optional, but PubMed stores affiliation in a single column, and NSF requests 'Organization' be in its own column. This function will loop over the author dataframe, and present each comma-separated element of the 'affiliation' value to you and prompt for input. Press 1 to store that chunk to the 'Department' column, 2 to store that chunk to the 'Organization' column, and any other key to move to the next author.It will only parse authors that have no entry for the required 'Organization' column, so if you miss that and re-run this cell it will pick up where you left off.print("Enter 1 for Department, 2 for Organization, or nothing to skip rest")
    
    for i, author in author_df.iterrows():
        if author['Organization'] != '':
            continue
        try:
            for bit in author['affiliation'].split(','):
    
                print(bit)
                choice = input("Input:")
                if choice == '1':
                    author_df.loc[i, 'Department'] = author_df.loc[i, 'Department'] + bit
                elif choice == '2':
                    author_df.loc[i, 'Organization'] = author_df.loc[i, 'Organization'] + bit
                else:
                    break
        except:
            continue
        
    author_df.head()Export author dataframe to CSV fileYou can now open this in your favorite spreadsheet column to clean it up and add to the NSF workbook.author_df.to_csv('authors_with_affiliations.csv')14 - Beginner Exercises*   While Loop*   Break & Continue   🍦🍦🍦  1.Create a Python program that finds the number and sum of all integers between 7801 and 8853 that are divisible by 13.# Write your own code in this cell🍦🍦 2.Create a program that takes a number from the user and multiplies it by the power of eight, then repeats the process until the user enters the number zero.# Write your own code in this cell🍦3.Write a function that append the first 50 prime numbers in a list, then print that list.# Write your own code in this cell🍦🍦4.Write code that produces the following output:(use **break**)```* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *  ```# Write your own code in this cell🍦🍦5.Write code that produces the following output:(use continue)```* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * ```# Write your own code in this cellProblem 39 Integer right trianglesIf $p$ is the perimeter of a right angle triangle with integral length sides, $\{a,b,c\}$, there are exactly three solutions for $p = 120$.$$\{20,48,52\}, \{24,45,51\}, \{30,40,50\}$$For which value of $p \le 1000$, is the number of solutions maximised? Solutionfrom math import isqrt, sqrt
    def compute(n: int) -> int:
        perimeters = {}
        for a in range(3, n // 3):
            for b in range(a, n // 2):
                k = a * a + b * b
                if sqrt(k) % 1 == 0:
                    p = a + b + isqrt(k)
                    if p in perimeters:
                        perimeters[p] += 1
                    else:
                        perimeters[p] = 1
        return max(perimeters, key=perimeters.get)
    compute(1_000)
    %timeit -n 100 -r 1 -p 6 compute(1_000)24.1138 ms ± 0 ns per loop (mean ± std. dev. of 1 run, 100 loops each)MobileNetfrom keras.applications import VGG16
    from keras.models import Sequential
    from keras.layers import Dense, Dropout, Activation, Flatten
    from keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D
    from keras.layers.normalization import BatchNormalization
    from keras.models import Model
    from keras.preprocessing.image import ImageDataGenerator
    from keras.callbacks import ModelCheckpoint, EarlyStopping
    from keras.optimizers import Adam
    from keras.models import load_model
    import keras
    
    import numpy as np
    from sklearn.metrics import confusion_matrix,classification_report
    import matplotlib.pyplot as plt
    #--------------#
    # custom metrics
    #--------------#
    # for custom metrics
    import keras.backend as K
    from keras.applications.inception_v3 import InceptionV3
    from keras.applications.mobilenet import mobilenet
    from keras.layers import Input
    # load MobileNet
    img_rows, img_cols = 224, 224 
    img_width, img_height = 224,224
    weights='weights/inception_resnet_v2_weights_tf_dim_ordering_tf_kernels.h5'
    
    
    
    
    # fix data format issues
    if K.image_data_format() == 'channels_first':
        input_shape = (3, img_width, img_height)
    else:
        input_shape = (img_width, img_height, 3)
    # Re-loads the MobileNet model without the top or FC layers
    
    mobile = keras.applications.mobilenet.MobileNet(input_shape=input_shape,
                                                       alpha=1.0, 
                                                       depth_multiplier=1, 
                                                       dropout=1e-3, 
                                                       include_top=False, 
                                                       weights='imagenet')
    
    
    
    # Here we freeze the last 4 layers 
    # Layers are set to trainable as True by default
    for layer in mobile.layers:
        layer.trainable = False
    import pandas as pd
    pd.set_option('max_colwidth', -1)
    layers = [(layer, layer.name, layer.trainable) for layer in mobile.layers]
    pd.DataFrame(layers, columns=['Layer Type', 'Layer Name', 'Layer Trainable']) 
    base_model = applications.InceptionV3(weights='imagenet', 
                                          include_top=False, # remove top fully connected layers
                                          input_shape=(img_width, img_height, 3))
    # compile model using Adam optimizer, categorical cross entropy loss
    # use low learning rate for transfer learning
    model.compile(optimizer=Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, 
                                 epsilon=1e-08, decay=0.0),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    # build a classifier model to put on top of the convolutional model
    model_top = Sequential()
    model_top.add(GlobalAveragePooling2D(
        input_shape=base_model.output_shape[1:],
        data_format=None)),
    model_top.add(Dense(256, activation='relu'))
    model_top.add(Dropout(0.5))
    model_top.add(Dense(4, activation='softmax'))
    model = Model(inputs=base_model.input, 
                  outputs=model_top(base_model.output))
    
    # note that it is necessary to start with a fully-trained
    # classifier, including the top classifier,
    # in order to successfully do fine-tuning
    #top_model.load_weights(top_model_weights_path)
    
    
    # define directories and parameters
    train_data_dir = 'data/train'
    test_data_dir = 'data/test'
    validation_data_dir = 'data/validation'
    
    train_datagen = ImageDataGenerator(
          rescale=1./255,
          rotation_range=20,
          width_shift_range=0.2,
          height_shift_range=0.2,
          horizontal_flip=True,
          fill_mode='nearest')
     
    validation_datagen = ImageDataGenerator(rescale=1./255)
    # generator/ load data
    batch_size=16
     
    train_generator = train_datagen.flow_from_directory(
            train_data_dir,
            target_size=(img_rows, img_cols),
            batch_size=batch_size,
            class_mode='categorical')
     
    validation_generator = validation_datagen.flow_from_directory(
            validation_data_dir,
            target_size=(img_rows, img_cols),
            batch_size=batch_size,
            class_mode='categorical')
    #--------------#
    # train top layers
    #--------------#
    checkpoint = ModelCheckpoint("retinal_mobile1.h5",
                                 monitor="val_loss",
                                 mode="min",
                                 save_best_only = True,
                                 verbose=1)
    
    earlystop = EarlyStopping(monitor = 'val_loss', 
                              min_delta = 0, 
                              patience = 3,
                              verbose = 1,
                              restore_best_weights = True)
    
    callbacks = [earlystop, checkpoint]
    mobile.compile(loss = 'categorical_crossentropy',
                  optimizer = Adam(lr = 0.001),
                  metrics = ['accuracy'])
    nb_train_samples = 83484
    nb_validation_samples = 968
    model = mobile
    epochs = 10
    batch_size = 16
    history = model.fit_generator(
        train_generator,
        steps_per_epoch = nb_train_samples // batch_size,
        epochs = epochs,
        callbacks = callbacks,
        validation_data = validation_generator,
        validation_steps = nb_validation_samples // batch_size)
    scores = model.evaluate_generator(validation_generator,steps=nb_validation_samples // batch_size+1, verbose=1)
    print('\nTest result: %.3f loss: %.3f' %(scores[1]*100,scores[0]))
    model.save("retinal_mobile.h5")
    validation_generator = validation_datagen.flow_from_directory(
            validation_data_dir,
            target_size=(img_rows, img_cols),
            batch_size=batch_size,
            class_mode='categorical',
            shuffle=False)
    class_labels = validation_generator.class_indices
    class_labels = {v: k for k, v in class_labels.items()}
    classes = list(class_labels.values())
    y_pred = model.predict_generator(validation_generator, nb_validation_samples // batch_size+1)
    y_pred_label = np.argmax(y_pred, axis=1)
    validation_generator.classes.shape,y_pred_label.shape
    # Confusion Matrix and Classification Report
    print('Confusion Matrix')
    print(confusion_matrix(validation_generator.classes, y_pred_label))
    print('Classification Report')
    print(classification_report(validation_generator.classes, y_pred_label, target_names=classes))
    plt.figure(figsize=(8,8))
    cnf_matrix = confusion_matrix(validation_generator.classes, y_pred_label)
    
    plt.imshow(cnf_matrix, interpolation='nearest')
    plt.colorbar()
    tick_marks = np.arange(len(classes))
    _ = plt.xticks(tick_marks, classes, rotation=90)
    _ = plt.yticks(tick_marks, classes)
    plt.show();
    #--------------#
    # plot acc and loss two plot
    #--------------#
    # Plot training & validation accuracy values
    plt.plot(history.history['acc'])
    plt.plot(history.history['val_acc'])
    plt.title('Model accuracy')
    plt.ylabel('Accuracy')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    plt.show()
    
    # Plot training & validation loss values
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('Model loss')
    plt.ylabel('Loss')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    plt.show()
    # load classifier
    from keras.applications import MobileNet
    from keras.models import Sequential
    from keras.layers import Dense, Dropout, Activation, Flatten
    from keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D,GlobalAveragePooling2D
    from keras.layers.normalization import BatchNormalization
    from keras.models import Model
    from keras.preprocessing.image import ImageDataGenerator
    from keras.callbacks import ModelCheckpoint, EarlyStopping
    from keras.optimizers import Adam
    from keras.models import load_model
    
    import numpy as np
    from sklearn.metrics import confusion_matrix,classification_report
    import matplotlib.pyplot as plt
    
    classifier = load_model("retinal_mobile.h5")IntroductionThis notebook is aimed to give a quick presentation for `hsbalance` package.`hsbalance` is Python based package that is meant to find the optimized solution of rotor balancing problem  A. Independent Systems  Creating Modelfor independent systems where number of balancing planes are equal to the number of measuring points we do not need optimization process as number of equations are equal to the number of unknowns 1. Enter the initial vibration column vector `A`:- each row represents the vibration at certain measuring plane.- vibration is to be represented in the form ('amplitude' @ 'phase(degrees)')- Enter slow roll vibration column `A0`A_math = [['170@112'],
         ['53@78']]
    
    A0_math = [['12@30'],
          ['12@30']]2. Enter trial mass effect matrix `B`B = [['B00', 'B01']     ['B10', 'B11']]where:- B00: vibration at measuring point 1 when trial mass at balancing plane 1- B01: vibration at measuring point 1 when trial mass at balancing plane 2- B00: vibration at measuring point 2 when trial mass at balancing plane 1- B00: vibration at measuring point 2 when trial mass at balancing plane 2As a general rule in this notebook columns will be for balancing planes and rows are for measuring pointsB_math = [['235@94', '189@115'],
         ['58@68', '77@104']]3. Enter the trial mass amounts in row vector `U`:U_math = ['1.15@0', '1.15@0']4. Transform matrices to cartesian (complex number) form:A = hs.convert_matrix_to_cart(A_math)
    A0 = hs.convert_matrix_to_cart(A0_math)
    B = hs.convert_matrix_to_cart(B_math)
    U = hs.convert_matrix_to_cart(U_math)
    print('A=\n{}\n\nA0=\n{}\n\nB=\n{}\n\nU = {}'.format(A, A0, B, U))A=
    [[-63.68312088+157.62125528j]
     [ 11.01931961 +51.84182284j]]
    
    A0=
    [[10.39230485+6.j]
     [10.39230485+6.j]]
    
    B=
    [[-16.39277133+234.42755181j -79.87485147+171.29217175j]
     [ 21.72718242 +53.77666356j -18.62798596 +74.71277092j]]
    
    U = [1.15+0.j 1.15+0.j]As in this example, this is an independent system where number of measuring points (M) are equal to the number of balancing planes (N).we, thus, except an exact solution for balancing weights `W` that can be calculated first by find the Influence Coefficients matrix `ALPHA`:\begin{align}  \tag {1}  \alpha = \frac{(B - A)}{U} \label{eq:test1}\end{align}\begin{align}    \tag {2}    W = - \alpha^{-1}(A - A_{0})\end{align}Alpha_CI = (B - A)/U
    W = -np.linalg.inv(Alpha_CI) @ (A - A0)
    hs.convert_matrix_to_math(Alpha_CI)
    WTransform back to mathematical expression formALPHA_math = hs.convert_matrix_to_math(Alpha_CI)
    W_math = hs.convert_matrix_to_math(W)
    print('ALPHA=\n{}\n\nW=\n{}'.format(ALPHA_math, W_math))ALPHA=
    [['78.433 @ 58.4' '18.427 @ 139.8']
     ['9.462 @ 10.2' '32.56 @ 142.4']]
    
    W=
    [['1.952 @ 239.5']
     ['0.894 @ 139.1']]>This means we need to put 2 grams at angel 57.4 degrees on balancing plane 1, and 1.1 grams at 301.1 degrees on plane 2. - Lets Try out the same independent system problem using our optimization modeling code:  - first we will create a model of the system parameters:  - we will be using least squares model to solve the problem using optimization technique to minimize the squares of errors:alpha = hs.Alpha()  # create an instance of alpha class
    alpha.add(A=A, B=B, U=U)  # calculate alpha from parameters
    hs.convert_matrix_to_math(alpha.value)
    print(alpha)
    my_model = hs.LeastSquares(A-A0, alpha)
    W = my_model.solve()
    hs.convert_matrix_to_math(W)
    my_model.rmse()Which is exactly as the exact solution In order to summerize the model:print(my_model.info())++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    MODEL
    ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    
    ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    MODEL TYPE
    ==================================================
    LeastSquares
    ==================================================
    End of MODEL TYPE
    ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    
                       
    ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    INFLUENCE COEFFICIENT MATRIX
    ==================================================
    
    ++++++++++++++++++++++++++++++++++++++++
    Influence Coefficient Matrix
    ++++++++++++++++++++++++++++++++++++++++
    
    ++++++++++++++++++++++++++++++++++++++++
    Coefficient Values
    ==============================
                    Plane 1         Plane 2
    Sensor 1  78.433 @ 58.4  18.427 @ 139.8
    Sensor 2   9.462 @ 10.2   32.56 @ 142.4
    ==============================
    End of Coefficient Values
    ++++++++++++++++++++++++++++++++++++++++
    
                       [...]A. Dependent Systems IntroductionIn dependent systems, number of measuring points are less than the number of balancing planes.  This will lead to a problem with infinite number of solutions as the number of unknowns are less than the number of equations.  We can use optimization technique here effectively to reduce the error and we can apply constraints to our model. We will be example.[[1]](1) which presents a 1150 MW nuclear power turbine-generatorbalancing problem. The system consists of 11 measuring points and 5 balancing planes. (independent system)  In practical plane 4 was not accessible.  [1] , , and . Balancing a 1150 MW turbine-generator. United Kingdom: N. p., 2000. Web.  ParametersALPHA_math=[
                                ['9.8@117', '17@124', '7.2@114', '38.5@77'],
                                ['2.7@43', '14.3@317', '4.5@213', '14.3@270'],
                                ['12.5@323', '25@261', '15.2@158', '30@238'],
                                ['22.4@92', '32.6@45', '23.3@315', '27.8@210'], 
                                ['26@94', '40.3@9', '25@330', '34@213'],
                                ['40.3@355', '43@144', '29.6@61', '65.4@322'],
                                ['20.6@339', '32.3@152', '36.7@41', '61.8@322'],
                                ['12.6@226', '37.6@52', '18.8@153', '26@176'],
                                ['13.4@209', '26.9@76', '47.5@98', '71.7@312'],
                                ['13.4@154', '22.4@307', '52@299', '102@165'],
                                ['5.4@24', '7.2@199', '22.4@2', '27.8@99']]
    
    A_math=[
                                ['55@259'], 
                                ['45@118'],
                                ['124@21'],
                                ['138@349'],
                                ['107@349'],
                                ['90@280'],
                                ['58@354'],
                                ['108@201'],
                                ['88@190'],
                                ['56@48'],
                                ['73@158']]Convert to complex numbers (cartesian) formA = hs.convert_matrix_to_cart(A_math)
    ALPHA = hs.convert_matrix_to_cart(ALPHA_math)
    # A, ALPHAAdding ALPHAalpha = hs.Alpha()
    alpha.add(direct_matrix=ALPHA)
    alpha.check()Not a square matrix --> no exact solution.
    
    No ill conditioned planes --> okSolving with Least squares:model_LeastSquares = hs.LeastSquares(A, alpha, name='Least_squares') # Instantiate least square model
    W_LeastSquares = model_LeastSquares.solve() #solve
    hs.convert_matrix_to_math(W_LeastSquares)
    residuals_LeastSquares = model_LeastSquares.expected_residual_vibration()
    hs.convert_matrix_to_math(residuals_LeastSquares) # Expected residule vibrationsRoot mean square error:rmse_LeastSquares = model_LeastSquares.rmse()
    rmse_LeastSquares
    print(model_LeastSquares.info())++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    MODEL
    ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    
    ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    MODEL TYPE
    ==================================================
    LeastSquares
    ==================================================
    End of MODEL TYPE
    ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    
                       
    ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    MODEL NAME
    ==================================================
    Least_squares
    ==================================================
    End of MODEL NAME
    ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    
                       
    ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    INFLUENCE COEFFICIENT MATRIX
    ==================================================
    
    ++++++++++++++++++++++++++++++++++++++++
    Influence Coefficient Matrix
    ++++++++++++++++++++++++++++++++++++++++
    
    ++++++++++++++++++++++++++++++++++++++++
    [...]DiscussionLeast square has iterated over to get the minimum squares of errors (hence, the least `RMSE`)  Doing so, it does it blindly so we can see that it add huge amount of weight at plane 1 (3.8 kg!), meanwhile vibration on bearing 3 is expected to be 106 $\mu$ which is probably an alarm value!!   Solving with MinMax:model_MinMax = hs.Min_max(A, alpha, name='MinMax') # Instantiate MinMax model
    W_MinMax = model_MinMax.solve() #solve
    hs.convert_matrix_to_math(W_MinMax)
    residuals_MinMax = model_MinMax.expected_residual_vibration()
    hs.convert_matrix_to_math(residuals_MinMax) # Expected residule vibrationsRoot mean square error:rmse_MinMax = model_MinMax.rmse()
    rmse_MinMax
    print(model_MinMax.info())++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    MODEL
    ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    
    ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    MODEL TYPE
    ==================================================
    Min_max
    ==================================================
    End of MODEL TYPE
    ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    
                       
    ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    MODEL NAME
    ==================================================
    MinMax
    ==================================================
    End of MODEL NAME
    ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    
                       
    ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    INFLUENCE COEFFICIENT MATRIX
    ==================================================
    
    ++++++++++++++++++++++++++++++++++++++++
    Influence Coefficient Matrix
    ++++++++++++++++++++++++++++++++++++++++
    
    ++++++++++++++++++++++++++++++++++++++++
    Coefficient [...]Discussion`MinMax` is a great optimization tool that tends to, instead of unbiased `Least Squares`, level up the residuals to minimize the maximum. here we see that we have a great improvement in the residual vibrations (max 70$\mu\$).  The downside is putting more and more weights in the correction (4.4 grams in plane 1 now!) and ends up with higher RMSE. In order to constraint the weight to a certain limit we can perform a Constrained Minmax` modelweight_const ={0 : 3.402, 1 : 3.402, 2 : 3.402, 3 : 3.402} # limit weight to 120 oz
    model_MinMax_const = hs.Min_max(A, alpha, weight_const=weight_const, name='MinMax_const') # Instantiate MinMax model
    W_MinMax_const = model_MinMax_const.solve() #solve
    hs.convert_matrix_to_math(W_MinMax_const)
    residuals_MinMax_const = model_MinMax_const.expected_residual_vibration()
    hs.convert_matrix_to_math(residuals_MinMax_const) # Expected residule vibrationsRoot mean square error:rmse_MinMax_const = model_MinMax_const.rmse()
    rmse_MinMax_constDiscussionConstrained MinMAx` has done its job in minimizing the weights to 3.402 Kg (120 oz).  The downside is that we got more maximum vibration in residuals (73$\mu\$)  Solving with Linear Matrix Inequality (LMI) In certain situations, instead of being unbiased ---> `Least Squares` or leveled ---> `MinMax`, we actually want to be BIASED to certain planes. In other words we want the optimzer to do its best to decrease certain planes (`critical planes`) and keep the others under a `lazy constrains` just below certain amount of vibration level.weight_const ={0 : 3.402, 1 : 3.402, 2 : 3.402, 3 : 3.402} # limit weight to 120 oz
    critical_planes = {1, 9} #  setting the critical planes to be 2, 10 (note python start counting at 0)
    V_max = 76 # max vibration for non-critical planes
    model_LMI = hs.LMI(A, alpha, weight_const=weight_const, critical_planes=critical_planes, V_max=V_max
                         , name='LMI') # Instantiate LMI model
    W_LMI = model_LMI.solve() #solve
    hs.convert_matrix_to_math(W_LMI)
    residuals_LMI = model_LMI.expected_residual_vibration()
    hs.convert_matrix_to_math(residuals_LMI) # Expected residule vibrationsRoot mean square error:rmse_LMI = model_LMI.rmse()
    rmse_LMI
    print(model_LMI.info())++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    MODEL
    ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    
    ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    MODEL TYPE
    ==================================================
    LMI
    ==================================================
    End of MODEL TYPE
    ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    
                       
    ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    MODEL NAME
    ==================================================
    LMI
    ==================================================
    End of MODEL NAME
    ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    
                       
    ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    INFLUENCE COEFFICIENT MATRIX
    ==================================================
    
    ++++++++++++++++++++++++++++++++++++++++
    Influence Coefficient Matrix
    ++++++++++++++++++++++++++++++++++++++++
    
    ++++++++++++++++++++++++++++++++++++++++
    Coefficient Values
    [...]DiscussionLMI model has been biased to plane 2 with a slight enhancement and plane 10 which greatly decreased from 69.7$\mu$ to 45.1$\mu$ (35% decrease) but that was with the cost of increasing non critical planes to the limit we have assigned (76$\mu\$)  Plottingmodels = [model_LeastSquares, model_MinMax, model_MinMax_const, model_LMI]
    def plot_models(models):
        residule_vibration = {model.name:abs(model.expected_residual_vibration().ravel()) for model in models}
        rmse = {model.name:model.rmse() for model in models}
        fig, (ax0, ax1) = plt.subplots(2, 1)
        ax0.bar(rmse.keys(), rmse.values())
        plt.xlabel('Models')
        plt.ylabel('Vibration');
        models_number = len(residule_vibration.values())
        measuring_points = max((len(array) for array in residule_vibration.values()))
    
        jet= plt.get_cmap('jet')
        colors = iter(jet(np.linspace(0,1,models_number)))
    
        step = 0
        for array in residule_vibration.values():
            ax1.bar(np.arange(len(array)) + step, array, color = next(colors), width = 1/models_number)
            step += 1 / (models_number+1)
        ax1.legend([model.name for model in models])
        ax1.set_xticks(range(measuring_points), ['M.P '+ str(point) for point in range(1, 1+measuring_points)],
                      rotation =45);
    plot_models(models)Combining Pre-Built & Custom AI ServicesIn this notebook, you will integrate with the Computer Vision API and the Text Analytics API to augment the claims processing capabilities. In the end, you will integrate the API calls to the summarizer and classifier services that your deployed and produce a finished claim report that shows all of the processing applied to the claim text and claim image.  Task 1 - Caption & Tag with the Computer Vision API In the cell bellow, provided the key to your Computer Vision API and run the cell.subscription_key = '' #""
    assert subscription_keyConstruct the endpoint to the Computer Vision API by running the following cell. Notice the last path segment is analyze, which indicates you will use the analyze feature.Be sure to update the value in vision_endpoint below so it matches the Endpoint value you copied from the Azure Portal for your instance of the Computer Vision service. Be sure your value ends in a slash (/)vision_endpoint = '' #""
    vision_base_url = vision_endpoint + "vision/v1.0/"
    vision_analyze_url = vision_base_url + "analyze"The following cell contains a list of sample images found after performing a simple web search. Feel free to substitute in URLs to the image of your choice.fender_bender = "https://www.washingtonpost.com/blogs/innovations/files/2015/02/Stolen_Car_Crash-00aef.jpg"
    damaged_house = "https://c2.staticflickr.com/8/7342/10983313185_0589b74946_z.jpg"
    police_car = "https://localtvwnep.files.wordpress.com/2015/11/fender-bender.jpeg"
    car_with_text = "https://static.buildasign.com/cmsimages/bas-vinyl-lettering-splash-01.png"
    car_tow = 'https://i.ytimg.com/vi/wmxJ2FrzTWo/maxresdefault.jpg'From the list of images above, select one and assign it to image_url for further processing:image_url = car_towRun the following cell to preview the image you have selected.from IPython.display import Image, display
    display(Image(image_url))The following cell builds the HTTP request to make against the Computer Vision API.Run the following cell to retrieve the caption and tags:import requests
    headers  = {'Ocp-Apim-Subscription-Key': subscription_key }
    params   = {'visualFeatures': 'Categories,Description,Tags,Color'}
    data     = {'url': image_url}
    response = requests.post(vision_analyze_url, headers=headers, params=params, json=data)
    response.raise_for_status()
    analysis = response.json()
    analysisAs you can see in the above output, the result is a nested document structure. Run the following cells to pull out the caption and top 3 tag results:caption = analysis["description"]["captions"][0]["text"].capitalize()
    caption
    topTags = analysis["description"]["tags"][0:3]
    topTagsTask 2 - Performing OCR In order to perform OCR with the Computer Vision service, you need to target the OCR endpoint.Run the following cell to construct the right URL:vision_ocr_url = vision_base_url + "ocr"Next, invoke the OCR endpoint with the following code and examine the result:headers  = {'Ocp-Apim-Subscription-Key': subscription_key }
    params   = {}
    data     = {'url': image_url}
    response = requests.post(vision_ocr_url, headers=headers, params=params, json=data)
    response.raise_for_status()
    ocr_analysis = response.json()
    ocr_analysisWe have provided the following code for you to extract the text as a flat array from the results.Run the following cell to extract the text items from the results document:import itertools
    flatten = lambda x: list(itertools.chain.from_iterable(x))
    words_list = [[ [w['text'] for w in line['words']]  for line in d['lines']] for d in ocr_analysis['regions']]
    words_list = flatten(flatten(words_list))
    print(list(words_list))Task 3 - Performing Sentiment Analysis Sentiment Analysis is performed using the Text Analytics API.Update the following cell with the key to your instance of the Text Analytics API and run the cell:text_analytics_subscription_key = '' #""
    assert text_analytics_subscription_keyUpdate the following cell with the correct base URL for your deployed instance of the Text Analytics API and run the cell:#""
    text_analytics_base_url = ''
    sentiment_api_url = text_analytics_base_url + "/text/analytics/v2.1/sentiment"The following cell has a set of example claims you can use to test the measurement sentiment.Run the cell:neg_sent = """We are just devastated and emotionally drained. 
    The roof was torn off of our car, and to make matters
    worse my daughter's favorite teddy bear was impaled on the street lamp."""
    pos_sent = """We are just happy the damaage was mininmal and that everyone is safe. 
    We are thankful for your support."""
    neutral_sent = """I crashed my car."""
    long_claim = """
    I was driving down El Camino and stopped at a red light.
    It was about 3pm in the afternoon. The sun was bright and shining just behind the stoplight.
    This made it hard to see the lights. There was a car on my left in the left turn lane.
    A few moments later another car, a black sedan pulled up behind me. 
    When the left turn light changed green, the black sedan hit me thinking 
    that the light had changed for us, but I had not moved because the light 
    was still red. After hitting my car, the black sedan backed up and then sped past me.
    I did manage to catch its license plate. The license plate of the black sedan was ABC123. 
    """From the above list of claims, select one and assign its variable to claim_text to be used in the call to the Text Analytics API.claim_text = long_claimThe API requires you to submit a document of the following form.Run the cell to build the request document:documents = {'documents' : [
        {'id': '1', 'language': 'en', 'text': claim_text}
    ]}Now invoke the Text Analytics API and observe the result.headers   = {"Ocp-Apim-Subscription-Key": text_analytics_subscription_key}
    response  = requests.post(sentiment_api_url, headers=headers, json=documents)
    sentiments = response.json()
    sentimentsTo parse out the sentiment score from the response, run the following cell:score = sentiments['documents'][0]['score']
    scoreYou can provide a human-friendly interpretation on this score by running the following cell:score_interpretation = "neutral"
    if (score < 0.45): 
        score_interpretation = "negative"
    elif (score >= 0.55):
        score_interpretation = "positive"
    score_interpretationTask 4 - Invoking the Azure ML Deployed Services Run the following cell to define a method that will be used to invoke your classifier and summarizer methods deployed using Azure Machine Learning service to Azure Container Instances:def invoke_service(ml_service_key, ml_service_scoring_endpoint, ml_service_input):
        headers   = {"Authorization": "Bearer " + ml_service_key}
        response  = requests.post(ml_service_scoring_endpoint, headers=headers, json=ml_service_input)
        result = response.json()
        return resultConfigure the classifier invocation with the key and endpoint as appropriate to your deployed instance:classifier_service_key = "" #leave this value empty if the service does not have authentication enabled
    #""
    classifier_service_scoring_endpoint = ''
    classifier_service_input = [claim_text]Invoke the classifier and observe the result:classifier_result = invoke_service(classifier_service_key, 
                                       classifier_service_scoring_endpoint, classifier_service_input)
    classifier_result
    # Interpret the classifier result
    classification = 'Auto Insurance Claim' if classifier_result == 1 else 'Home Insurance Claim' 
    classificationSimilarly, configure the key and scoring endpoint as appropriate to your summarizer service:summarizer_service_key = "" #leave this value empty if the service does not have authentication enabled
    #""
    summarizer_service_scoring_endpoint = ''
    summarizer_service_input = claim_textInvoke the summarizer service and observe the result:summarizer_result = invoke_service(summarizer_service_key, summarizer_service_scoring_endpoint, 
                                       summarizer_service_input)
    formatted_result =  summarizer_result[0].replace("\\n", " ").strip() if len(summarizer_result) > 0 else "N/A"
    formatted_resultTask 5 - Summarizing the ResultsIn this final task, you pull together all of the pieces to display the results of your AI based processing.Run the following cell and examine the result.from IPython.core.display import HTML
    
    displayTemplate = """
    
    Claim Summary
    Classification: {}
    Caption: {}
    Tags: {}
    Text in Image: {}
    Sentiment: {}
    Summary:
    {} 
     
    Claim:
    {}
    """ displayTemplate = displayTemplate.format(classification, caption, ' '.join(topTags), ' '.join(words_list), score_interpretation, image_url, formatted_result, claim_text) display(HTML(displayTemplate))Load pickled df_lgbtwith open('pickles/df_lgbt.pickle', 'rb') as f: df_lgbt = pickle.load(f)Load pickled df_map (test)with open('pickles/df_map_test.pickle', 'rb') as f: df_map = pickle.load(f) translator = google_translator() translate_text = translator.translate('Ich mag dich', lang_src='de', lang_tgt='en') translate_text df_lgbt['Story'].indexPickle translations_dict# pickle translations_dict with open('pickles/translations_dict.pickle', 'wb' ) as f: pickle.dump(translations, f) # pickle translations_dict with open('pickles/translations_header.pickle', 'wb' ) as f: pickle.dump(translations_header, f)Pickle df_lgbt_translated# pickle df_lgbt_translated with open('pickles/df_lgbt_translated.pickle', 'wb' ) as f: pickle.dump(df_lgbt_translated, f) with open('pickles/df_map_translated.pickle', 'wb' ) as f: pickle.dump(df_map_translated, f) df_map_translated = pd.DataFrame(df_map) len(df_map_translated) df_map_translated.drop(['Header', 'Story'], axis=1, inplace=True) df_map_translated.rename(columns={'Story_EN':'Story', 'Header_EN':'Header'}, inplace=True) df_lgbt_translated df_map_translated['Story_EN'] = pd.Series(translations) df_map_translated['Header_EN'] = pd.Series(translations_header) df_lgbt_translated.drop(['Header', 'Story'], axis=1, inplace=True) df_lgbt_translated.rename(columns={'Story_EN':'Story', 'Header_EN':'Header'}, inplace=True) df_lgbt_translated = pd.DataFrame(df_lgbt) df_lgbt['Header_EN'] = pd.Series(translations_header) df_lgbt['Story_EN'] = pd.Series(translations) translations_header = {} for de_text, text_index in zip(df_lgbt['Header'], df_lgbt['Header'].index): en_text = translator.translate(de_text, lang_tgt='en') translations_header[text_index] = en_text translations = {} for de_text, text_index in zip(df_lgbt['Story'], df_lgbt['Story'].index): en_text = translator.translate(de_text, lang_tgt='en') translations[text_index] = en_text df_lgbt[] translator.translate('Hallo, ich heiße Filipe und das ist ein Test', dest='en') [df_map.loc[1707, 'Story']] df_mapExport ONNXexample = next(iter(dataloader)) example_torch = example_convert_to_torch(example, float_dtype) batch_size = example["anchors"].shape[0] example_tuple = list(example_torch.values()) pillar_x = example_tuple[0][:,:,0].unsqueeze(0).unsqueeze(0) pillar_y = example_tuple[0][:,:,1].unsqueeze(0).unsqueeze(0) pillar_z = example_tuple[0][:,:,2].unsqueeze(0).unsqueeze(0) # pillar_i = example_tuple[0][:,:,3].unsqueeze(0).unsqueeze(0) pillar_i = torch.ones(pillar_x.shape,dtype=torch.float32, device=pillar_x.device ) num_points_per_pillar = example_tuple[1].float().unsqueeze(0) # Find distance of x, y, and z from pillar center # assuming xyres_16.proto coors_x = example_tuple[2][:, 3].float() coors_y = example_tuple[2][:, 2].float() vx, vy = voxel_generator.voxel_size[0], voxel_generator.voxel_size[1] x_offset = vx/2 + voxel_generator.point_cloud_range[0] y_offset = vy/2 + voxel_generator.point_cloud_range[1] # self.x_offset = self.vx / 2 + pc_range[0] # self.y_offset = self.vy / 2 + pc_range[1] # this assumes xyres 20 # x_sub = coors_x.unsqueeze(1) * 0.16 + 0.1 # y_sub = coors_y.unsqueeze(1) * 0.16 + -39.9 # here assumes xyres 16 x_sub = coors_x.unsqueeze(1)*vx + x_offset y_sub = coors_y.unsqueeze(1)*vy + y_offset # x_sub = coors_x.unsqueeze(1)*0.28 + 0.14 # y_sub = coors_y.unsqueeze(1)*0.28 - 20.0 ones = torch.ones([1,voxel_generator._max_num_points], dtype = torch.float32, device = pillar_x.device) x_sub_shaped = torch.mm(x_sub, ones).unsqueeze(0).unsqueeze(0) y_sub_shaped = torch.mm(y_sub, ones).unsqueeze(0).unsqueeze(0) num_points_for_a_pillar = pillar_x.size()[3] mask = get_paddings_indicator(num_points_per_pillar, num_points_for_a_pillar, axis=0) mask = mask.permute(0, 2, 1) mask = mask.unsqueeze(1) mask = mask.type_as(pillar_x) coors = example_tuple[2] print("pillar_x.size()",pillar_x.size()) print("pillar_y.size()",pillar_y.size()) print("pillar_z.size()",pillar_z.size()) print("pillar_i.size()",pillar_i.size()) print("num_points_per_pillar.size()",num_points_per_pillar.size()) print("x_sub_shaped.size()",x_sub_shaped.size()) print("y_sub_shaped.size()",y_sub_shaped.size()) print("mask.size()",mask.size()) pillar_x = torch.ones([1, 1, 70000, 100], dtype=torch.float32, device=pillar_x.device) pillar_y = torch.ones([1, 1, 70000, 100], dtype=torch.float32, device=pillar_x.device) pillar_z = torch.ones([1, 1, 70000, 100], dtype=torch.float32, device=pillar_x.device) pillar_i = torch.ones([1, 1, 70000, 100], dtype=torch.float32, device=pillar_x.device) num_points_per_pillar = torch.ones([1, 70000], dtype=torch.float32, device=pillar_x.device) x_sub_shaped = torch.ones([1, 1, 70000, 100], dtype=torch.float32, device=pillar_x.device) y_sub_shaped = torch.ones([1, 1, 70000, 100], dtype=torch.float32, device=pillar_x.device) mask = torch.ones([1, 1, 70000, 100], dtype=torch.float32, device=pillar_x.device) example1 = [pillar_x, pillar_y, pillar_z, pillar_i, num_points_per_pillar, x_sub_shaped, y_sub_shaped, mask] input_names = ['pillar_x', 'pillar_y', 'pillar_z', 'pillar_i', 'num_points_per_pillar', 'x_sub_shaped', 'y_sub_shaped', 'mask'] torch.onnx.export(net, example1, "pfe.onnx", verbose=False, input_names=input_names) rpn_input = torch.ones([1, 64, 400, 600], dtype=torch.float32, device=pillar_x.device) torch.onnx.export(net.rpn, rpn_input, "rpn.onnx", verbose=False)Plot eval oncenet.eval() example = next(iter(dataloader)) example_torch = example_convert_to_torch(example, float_dtype) batch_size = example["anchors"].shape[0] example_tuple = list(example_torch.values()) pillar_x = example_tuple[0][:,:,0].unsqueeze(0).unsqueeze(0) pillar_y = example_tuple[0][:,:,1].unsqueeze(0).unsqueeze(0) pillar_z = example_tuple[0][:,:,2].unsqueeze(0).unsqueeze(0) # pillar_i = example_tuple[0][:,:,3].unsqueeze(0).unsqueeze(0) pillar_i = torch.ones(pillar_x.shape,dtype=torch.float32, device=pillar_x.device ) num_points_per_pillar = example_tuple[1].float().unsqueeze(0) # Find distance of x, y, and z from pillar center # assuming xyres_16.proto coors_x = example_tuple[2][:, 3].float() coors_y = example_tuple[2][:, 2].float() vx, vy = voxel_generator.voxel_size[0], voxel_generator.voxel_size[1] x_offset = vx/2 + voxel_generator.point_cloud_range[0] y_offset = vy/2 + voxel_generator.point_cloud_range[1] # self.x_offset = self.vx / 2 + pc_range[0] # self.y_offset = self.vy / 2 + pc_range[1] # this assumes xyres 20 # x_sub = coors_x.unsqueeze(1) * 0.16 + 0.1 # y_sub = coors_y.unsqueeze(1) * 0.16 + -39.9 # here assumes xyres 16 x_sub = coors_x.unsqueeze(1)*vx + x_offset y_sub = coors_y.unsqueeze(1)*vy + y_offset # x_sub = coors_x.unsqueeze(1)*0.28 + 0.14 # y_sub = coors_y.unsqueeze(1)*0.28 - 20.0 ones = torch.ones([1,voxel_generator._max_num_points], dtype = torch.float32, device = pillar_x.device) x_sub_shaped = torch.mm(x_sub, ones) y_sub_shaped = torch.mm(y_sub, ones) num_points_for_a_pillar = pillar_x.size()[3] mask = get_paddings_indicator(num_points_per_pillar, num_points_for_a_pillar, axis=0) mask = mask.permute(0, 2, 1) mask = mask.unsqueeze(1) mask = mask.type_as(pillar_x) coors = example_tuple[2] anchors = example_tuple[3] labels = example_tuple[4] reg_targets = example_tuple[5] inputs = [pillar_x, pillar_y, pillar_z, pillar_i, num_points_per_pillar, x_sub_shaped, y_sub_shaped, mask, coors, anchors, labels, reg_targets] ret_dict = net(inputs) ret_dict # example_token = example["metadata"][0]['token'] # filtered_sample_tokens = dataset.dataset.filtered_sample_tokens # index = filtered_sample_tokens.index(example["metadata"][0]['token']) # gt_example = dataset.dataset.get_sensor_data(index) points = example['points'][0] pc_range = model_cfg.voxel_generator.point_cloud_range points = np.array( [p for p in points if (pc_range[0] < p[0] < pc_range[3]) & (pc_range[1] < p[1] < pc_range[4]) & ( pc_range[2] < p[2] < pc_range[5])]) gt_boxes = example['gt_boxes'] gt_labels = example['gt_names'][0] c = np.zeros(points[:, 3].shape[0]).reshape(-1, 1) c = np.concatenate([c, c, c], axis=1) points = points[:, 0:3] pc = o3d.geometry.PointCloud() pc.points = o3d.utility.Vector3dVector(points) pc.colors = o3d.utility.Vector3dVector(c) mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=2.0, origin=[-0, -0, -0]) geo = [pc, mesh_frame] detection = [{'box3d_lidar': ret_dict[0][0], "label_preds": ret_dict[0][2], 'scores': ret_dict[0][1]}] geo = add_prediction_per_class(dataset.dataset.nusc, detection, gt_boxes, gt_labels, ["traffic_cone"], geo) o3d.visualization.draw_geometries(geo) import pandas as pd pd.set_option('display.max_rows', 5000) import matplotlib.pyplot as plt plt.plot(np.array(range(len(mean_losses)))*50,mean_losses,"r.") mean_lossesEvalfrom second.data.preprocess import merge_second_batch, prep_pointcloud # pc_file_name = "/media/starlet/LdTho/data/sets/1614053547.551486.pcd" # pc_file_name = "/media/starlet/LdTho/data/sets/1612769433360603.pcd" pc_file_name = "/media/starlet/LdTho/data/sets/baraja_lidar/1612769435143246.pcd" pcd = o3d.io.read_point_cloud(pc_file_name) points = np.asarray(pcd.points) pc_range = model_cfg.voxel_generator.point_cloud_range points = np.array([p for p in points if (pc_range[0] < p[0] < pc_range[3]) & (pc_range[1] < p[1] < pc_range[4]) & (pc_range[2] < p[2] < pc_range[5])]) points = np.concatenate([points.transpose(), np.array([np.ones(points.shape[0])]),],axis = 0).transpose() points = points[~np.isnan(points).any(axis=1)] points input_dict = { 'lidar': { 'type': 'lidar', 'points': points }, 'metadata': { 'token': pc_file_name } } out_size_factor = model_cfg.rpn.layer_strides[0] // model_cfg.rpn.upsample_strides[0] example = prep_pointcloud(input_dict=input_dict, root_path= None , voxel_generator= voxel_generator, target_assigner= target_assigner, max_voxels= 70000, training= False, create_targets=False, shuffle_points=False, num_point_features=model_cfg.num_point_features, remove_outside_points=False, anchor_cache=None, anchor_area_threshold=-1, out_size_factor=out_size_factor, out_dtype=np.float32 ) # example["points"] = points example["metadata"] = input_dict["metadata"] example = [example] print(example) eval_dataloader = torch.utils.data.DataLoader( example, batch_size=input_cfg.batch_size, shuffle=True, num_workers=4, pin_memory=False, collate_fn=merge_second_batch) net.eval() example = next(iter(eval_dataloader)) example_torch = example_convert_to_torch(example, float_dtype) batch_size = example["anchors"].shape[0] example_tuple = list(example_torch.values()) pillar_x = example_tuple[0][:,:,0].unsqueeze(0).unsqueeze(0) pillar_y = example_tuple[0][:,:,1].unsqueeze(0).unsqueeze(0) pillar_z = example_tuple[0][:,:,2].unsqueeze(0).unsqueeze(0) # pillar_i = example_tuple[0][:,:,3].unsqueeze(0).unsqueeze(0) pillar_i = torch.ones(pillar_x.shape,dtype=torch.float32, device=pillar_x.device ) num_points_per_pillar = example_tuple[1].float().unsqueeze(0) # Find distance of x, y, and z from pillar center # assuming xyres_16.proto coors_x = example_tuple[2][:, 3].float() coors_y = example_tuple[2][:, 2].float() vx, vy = voxel_generator.voxel_size[0], voxel_generator.voxel_size[1] x_offset = vx/2 + voxel_generator.point_cloud_range[0] y_offset = vy/2 + voxel_generator.point_cloud_range[1] # self.x_offset = self.vx / 2 + pc_range[0] # self.y_offset = self.vy / 2 + pc_range[1] # this assumes xyres 20 # x_sub = coors_x.unsqueeze(1) * 0.16 + 0.1 # y_sub = coors_y.unsqueeze(1) * 0.16 + -39.9 # here assumes xyres 16 x_sub = coors_x.unsqueeze(1)*vx + x_offset y_sub = coors_y.unsqueeze(1)*vy + y_offset # x_sub = coors_x.unsqueeze(1)*0.28 + 0.14 # y_sub = coors_y.unsqueeze(1)*0.28 - 20.0 ones = torch.ones([1,voxel_generator._max_num_points], dtype = torch.float32, device = pillar_x.device) x_sub_shaped = torch.mm(x_sub, ones) y_sub_shaped = torch.mm(y_sub, ones) num_points_for_a_pillar = pillar_x.size()[3] mask = get_paddings_indicator(num_points_per_pillar, num_points_for_a_pillar, axis=0) mask = mask.permute(0, 2, 1) mask = mask.unsqueeze(1) mask = mask.type_as(pillar_x) coors = example_tuple[2] anchors = example_tuple[3] labels = example_tuple[4] # reg_targets = example_tuple[5] inputs = [pillar_x, pillar_y, pillar_z, pillar_i, num_points_per_pillar, x_sub_shaped, y_sub_shaped, mask, coors, anchors, labels] ret_dict = net(inputs) ret_dict points = np.asarray(pcd.points) points = np.array( [p for p in points if (pc_range[0] < p[0] < pc_range[3]) & (pc_range[1] < p[1] < pc_range[4]) & ( pc_range[2] < p[2] < pc_range[5])]) points = np.concatenate([points.transpose(), np.array([np.repeat(0.0, points.shape[0])]), np.array([np.repeat(0.0, points.shape[0])])],axis = 0).transpose() points = points[~np.isnan(points).any(axis=1)] c = points[:, 3].reshape(-1, 1) c = np.concatenate([c, c, c], axis=1) points = points[:, 0:3] pc = o3d.geometry.PointCloud() pc.points = o3d.utility.Vector3dVector(points) pc.colors = o3d.utility.Vector3dVector(c) mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=2.0, origin=[-0, -0, -0]) geo = [pc, mesh_frame] detection = [{'box3d_lidar': ret_dict[0][0], "label_preds": ret_dict[0][2], 'scores': ret_dict[0][1]}] det_boxes = detection[0]['box3d_lidar'].cpu().detach().numpy() det_labels = detection[0]['label_preds'].cpu().detach().numpy() det_scores = detection[0]['scores'].cpu().detach().numpy() class_names = ['traffic_cone'] color = { "traffic_cone": (1,0,0), "gt_traffic_cone": (0,1,0), "pedestrian": (1,1,0), "gt_pedestrian": (0,0,1) } for i, class_name in enumerate(class_names): mask = np.logical_and(det_labels == i, det_scores > 0.21) class_det_boxes = det_boxes[mask] class_det_scores = det_scores[mask] class_det_labels = det_labels[mask] print(len(class_det_boxes),len(class_det_scores),len(class_det_labels)) print(class_det_scores) rbbox_corners = box_np_ops.center_to_corner_box3d(class_det_boxes[:, :3], class_det_boxes[:, 3:6], class_det_boxes[:, 6], origin=(0.5, 0.5, 0.5), axis=2) for j in range(len(rbbox_corners)): geo.append(buildBBox(rbbox_corners[j], color=color[class_name])) o3d.visualization.draw_geometries(geo) pillar_z v_path = "/media/starlet/LdTho/data/sets/KITTI/training/velodyne/000000.bin" points = np.fromfile( str(v_path), dtype=np.float32, count=-1).reshape([-1, 4]) pointsCreate a Basic Convolutional Neural Network for Image Classification 1. Import Library and Data#@title Code for loading the data # import libraries import tensorflow as tf import tensorflow.keras as keras import numpy as np import matplotlib.pyplot as plt # utilities library import cv2 import os import random random.seed(0) np.random.seed(0) tf.random.set_seed(0) os.environ['PYTHONHASHSEED'] = '0' # !rm -rf jaffe # !rm -rf __MACOSX # !rm jaffe.zip !wget https://github.com/iqbalbasyar/kickstart_gan/raw/main/jaffe.zip -q !unzip -q jaffe.zip data_path = 'jaffe' data_dir_list = os.listdir(data_path) img_data_list=[] img_label = [] label_map = {} i = 0 for dataset in data_dir_list: img_list=os.listdir(data_path+'/'+ dataset) label_map[i] = str(dataset) # print ('Load the images of dataset-'+'{}\n'.format(dataset)) for img in img_list: img_label.append(i) input_img=cv2.imread(data_path + '/'+ dataset + '/'+ img ) input_img_resize=cv2.resize(input_img,(128,128)) img_data_list.append(input_img_resize) i += 1 img_data = np.array(img_data_list) img_data = img_data.astype('float') img_data = img_data/255 #normalization img_label = np.array(img_label) # num_classes = 6 # num_of_samples = img_data.shape[0] # img_label = np.ones((num_of_samples,),dtype='int') # img_label[0:29]=0 #30 # img_label[30:58]=1 #29 # img_label[59:90]=2 #32 # img_label[91:121]=3 #31 # img_label[122:152]=4 #31 # img_label[153:]=5 #30 # names = ['ANGRY','DISGUST','FEAR','HAPPY','SAD','SURPRISE'] def getlabel(id): return label_map[id]**About this Data**: We will use the Japanese Female Facial Expression (JAFFE) dataset which has 183 images of 10 different female models posing for 6 emotions. The data will be normalized so its value will ranging from 0 to 1. The label will be denoted as numbers, which follow this mapping : | | Expression ||---:|:-------------|| 0 | HAPPY || 1 | SURPRISE || 2 | FEAR || 3 | SAD || 4 | ANGRY || 5 | DISGUST | Cite the author, , , . Coding Facial Expressions with Gabor Wavelets, 3rd IEEE International Conference on Automatic Face and Gesture Recognition, pp. 200-205 (1998). http://doi.org/10.1109/AFGR.1998.670949 Open access content available at: https://zenodo.org/record/3430156 split the data into train and test set so that we can perform **cross validation** laterfrom sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(img_data, img_label, test_size=0.1, random_state=0)___ 2. Create the Model In previous two notebook, we already created a similar model using keras. Here's the code if you wanted to remember.# simple model in 1st Notebook model_simple = keras.Sequential() model_simple.add(keras.layers.Dense(units=1, input_shape=[1])) model_simple.compile(optimizer='SGD', loss='mean_squared_error') # Neural Network in 2nd Notebook model_nn = keras.Sequential() model_nn.add(keras.layers.Flatten(input_shape=(128,128,3)) ) # flatten layer model_nn.add(keras.layers.Dense(units = 128,activation='relu') ) #input layer model_nn.add(keras.layers.Dense(units = 64, activation='relu') ) # hidden layer model_nn.add(keras.layers.Dense(units = 6, activation='softmax') ) # output layer model_nn.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])We are now trying to add a convolutional part to our model. Mind the changes compared to previous basic neural networkmodel_cnn = keras.Sequential() model_cnn.add(keras.layers.Conv2D(filters=16, kernel_size=(5,5), input_shape=(128,128,3), padding='same', activation="relu")) model_cnn.add(keras.layers.Conv2D(filters=32, kernel_size=(3,3), padding='same', activation="relu")) model_cnn.add(keras.layers.MaxPooling2D()) model_cnn.add(keras.layers.Flatten()) # flatten layer model_cnn.add(keras.layers.Dense(units = 128,activation='relu') ) #input layer model_cnn.add(keras.layers.Dense(units = 64, activation='relu') ) # hidden layer model_cnn.add(keras.layers.Dense(units = 6, activation='softmax') ) # output layer model_cnn.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) # summarize the model to see what happened inside model_cnn.summary()Model: "sequential_2" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 128, 128, 16) 1216 _________________________________________________________________ conv2d_1 (Conv2D) (None, 128, 128, 32) 4640 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 64, 64, 32) 0 _________________________________________________________________ flatten_1 (Flatten) (None, 131072) 0 _________________________________________________________________ dense_4 (Dense) (None, 128) 16777344 _________________________________________________________________ dense_5 (Dense) (None, 64) 8256 ______________________________________________________[...]3. Train the Modelmodel_cnn.fit(x_train, y_train, epochs=20);Epoch 1/20 2/6 [=========>....................] - ETA: 0s - loss: 7.9531 - accuracy: 0.1875WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0178s vs `on_train_batch_end` time: 0.0295s). Check your callbacks. 6/6 [==============================] - 0s 55ms/step - loss: 7.0799 - accuracy: 0.1890 Epoch 2/20 6/6 [==============================] - 0s 33ms/step - loss: 1.9150 - accuracy: 0.2073 Epoch 3/20 6/6 [==============================] - 0s 33ms/step - loss: 1.8054 - accuracy: 0.1829 Epoch 4/20 6/6 [==============================] - 0s 33ms/step - loss: 1.7506 - accuracy: 0.2256 Epoch 5/20 6/6 [==============================] - 0s 32ms/step - loss: 1.7592 - accuracy: 0.2622 Epoch 6/20 6/6 [==============================] - 0s 33ms/step - loss: 1.6657 - accuracy: 0.3354 Epoch 7/20 6/6 [==============================] - 0s 32ms/step - loss: 1.5612 - accuracy: 0.4817 Epoch 8/20 6/6 [==============================] - 0s 33ms/step - l[...]4. Test the Modelscore_accuracy = model_cnn.evaluate(x_test, y_test)1/1 [==============================] - 0s 2ms/step - loss: 1.0605 - accuracy: 0.8421Comparing Neural Network with CNN# train the basic neural network we previously have defined model_nn.fit(x_train, y_train, epochs=40) score_accuracy = model_nn.evaluate(x_test, y_test)INF-285 Tarea 2: SVD y PCA Instrucciones* La tarea es individual.* Las consultas sobre las tareas se deben realizar por medio de la plataforma Aula.* La tarea debe ser realizada en Jupyter Notebook (Python 3).* Se evaluará la correcta utilización de librerias ```NumPy```, ```SciPy```, entre otras, así como la correcta implementación de algoritmos de forma vectorizada.* El archivo de entrega debe denominarse **ROL-tarea-numero.ipynb**. De no respetarse este formato existirá un descuento de 50 puntos* La fecha de entrega es el viernes 29 de Mayo a las 18:00 hrs. Se aceptarán entregas hasta las 19:00 hrs sin descuento en caso de existir algun problema, posteriormente existirá un descuento lineal hasta las 20:00 hrs del mismo día.* Las tareas que sean entregadas antes del jueves a mediodía recibirán una bonificación de 10 puntos.* Se limitará el uso de librerias a solo las que estan agregadas en el Notebook (No se permite usar sklearn)* Debe seguir la firma de las funciones que se indican en la tarea, en caso contrario se considerará incorrecta* Debe citar cualquier código ajeno utilizado (incluso si proviene de los Jupyter Notebooks del curso). IntroducciónLa compresión de Imágenes utilizando *SVD* se basa en que la matriz $\Sigma$ representa los valores singulares de la matriz original, entonces se puede obtener una aproximación de la imagen original minimizando el rango de la matriz al eliminar los valores singulares de menor valor, ya que estos representan una "menor información" de la imagen. De este forma, por ejemplo si $\Sigma$ es de tamaño $n\times n$, se pueden omitir los $\sigma$ menos significativos obteniendo $\tilde{\Sigma}$ de tamaño $m\times m$, $mimport numpy as np import matplotlib.pyplot as plt from PIL import Image, ImageSequence def plotAnimation(animation): """ Parameters ---------- animimation : (frames, rows, cols) array GIF array Returns ------- Animation plots : None """ for frame in animation: plt.imshow(frame, cmap=plt.cm.gray) plt.axis('off') plt.show() def gifToArray(gif_file): """ Parameters ---------- gif_file : string GIF path Returns ------- data : (frames, rows, cols) array NumPy array with GIF pixel values """ im = Image.open(gif_file) data = list() for frame in ImageSequence.Iterator(im): tmp = np.array(im.convert('L')) data.append(tmp) data = np.array(data) return dataPodemos considerar un *GIF* como una colección de $p$ *frames*, donde un *frame* es una martriz $F\in\mathbb{R}^{r\times c}$ con $r$ el número de filas y $c$ en número de columnas de esta imagen. Ahora, si $(f_k)_{i,j}$ corresponde al elemento $i,j$ del $k$-ésimo *frame*, vamos a definir $\mathbf{f}_{i,j}=\langle (f_1)_{i,j}, (f_2)_{i,j},\dots,(f_p)_{i,j}\rangle$,es decir, este vector corresponde a los valores de los $p$ frames de la coordenada $(i,j)$ del *GIF*.Finalmente, para trabajar con los algoritmos, vamos a construir la matriz $G \in \mathbb{R}^{q\times p}$, donde $q=r\times c$ de cada *frame*, y que se define como:\begin{equation} G = \left[ \begin{array}{c} \mathbf{f}_{1,1} \\ \hline \mathbf{f}_{1,2} \\ \hline \dots \\ \hline \mathbf{f}_{r,c} \end{array} \right]\end{equation} ---- Funciones a Implementar 1. Crear la función ```createG(data)``` que recibe ```data``` el arreglo ```NumPy``` con la información del GIF, y retorna el arreglo $G$ definido anteriormente. (10 puntos)def createG(data): """ Parameters ---------- data : (frames, rows, cols) array NumPy array with GIF pixel values G : (q, p) array G matrix """ return G2. Crear la función ```restoreGIF(data)``` que recibe los datos procesados ```data``` y ```shape``` que contiene la tupla ```(frames, rows, cols)```, la dimensión original del *GIF*. Esta función retorna la reconstrucción del GIF. (10 puntos)def restoreGIF(data, shape): """ Parameters ---------- data : (q, p) array G matrix shape : tuple (frames, rows, cols) Returns ------- reshaped_data : (frames, rows, cols) array NumPy array with GIF pixel values """ return reshaped_dataSVD3. Implementar la función ```G_SVD(G, m)``` que reciba la matriz $G$ y los $m$ componentes que se utilizarán para comprimir el *GIF* utilizando *SVD*. La función debe retornar $U$, $\textrm{diag}(\Sigma)$ y $V^T$. Además, implementar la función ```SVD_G(U, s, Vt)``` que recibe las matrices generadas por el *SVD* y retorne la reconstrucción de la matriz $G$. (30 puntos)# G to SVD def G_SVD(G, m): """ Parameters ---------- G : (q, p)-array G matrix m : int Number of components Returns ------- U : (q, m)-array SVD U matrix s : m-array Singular values Vt : (m, p)-array SVD V^T matrix """ # Apply SVD return U, s, Vt # SVD to 'compressed' G def SVD_G(U, s, Vt): """ Parameters ---------- U : (q, m)-array SVD U matrix s : m-array Singular values Vt : (m, q)-array SVD V^T matrix Returns ------- B : (p, q)-array "Compressed" G """ return BPCA4. Implementar la función ```G_PCA(G, m)``` que reciba la matriz $G$ y los $m$ componentes que se utilizarán para comprimir el *GIF* utilizando *PCA*. La función debe retornar $PC$, $Y$ y $\mu$. Además, implementar la función ```PCA_G(PC, Y, mu)``` que recibe las matrices generadas por *PCA* y retorne la reconstrucción de la matriz $G$. Para esto debe utilizar la funcion de SVD implementada en el punto anterior. (35 puntos)def G_PCA(G, m): """ Parameters ---------- G : (q, p)-array G matrix m : int Number of components Returns ------- PC : (p, m)-array first m principal components Y : (q,m)-array PC Scores mu : (p)-array Average per column """ return PC, Y, mu def PCA_G(PC, Y, mu): """ Parameters ---------- PC : (p, m)-array first m principal components Y : (q,m)-array PC Scores mu : (p)-array Average per column Returns ------- B : (q, p)-array "Compressed" G """ return BPreguntasPara responder las siguientes preguntas, debe implementar las funciones propuestas 1. ¿Cuál sería el costo de almacenamiento en MB usando $m$ vectores singulares? (5 puntos)def SVD_size(G, m): """ Parameters ---------- G : (q, p)-array G matrix m : int Number of components Returns ------- size : Float total size of SVD return """ return size def PCA_size(G, m): """ Parameters ---------- G : (q, p)-array G matrix m : int Number of components Returns ------- size : Float total size of PCA return """ return size2. ¿Cuál sería el *gif* resultante con $m$ componentes? (5 puntos)def print_animation_SVD(G, m): """ Parameters ---------- G : (q, p)-array G matrix m : int Number of components Returns ------- La funcion no debe retornar nada, solo mostrar las imagenes de los frames reconstruidos """ return def print_animation_PCA(G, m): """ Parameters ---------- G : (q, p)-array G matrix m : int Number of components Returns ------- La funcion no debe retornar nada, solo mostrar las imagenes de los frames reconstruidos """ return3. ¿Cual sería el error en función de $m$? (Calcule el error utilizando la norma-2) (5 puntos)Considere calcular el error de la siguiente manera: $||G-B_m||_2$, donde $G$ corresponde a la matriz definida anteriormente y $B_m$ a la matriz "comprimida" utilizando los métodos correspondientes para un $m$ particular.def compression_error_SVD(G, m): """ Parameters ---------- G : (q, p)-array G matrix m : int Number of components Returns ------- error : Float total size of PCA return """ return error def compression_error_PCA(G, m): """ Parameters ---------- G : (q, p)-array G matrix m : int Number of components Returns ------- error : Float total size of PCA return """ return error1) Pandas read_csv na_values 2) Pandas read_csv keep_default_na 3) Pandas read_csv na_filterimport pandas as pd df=pd.read_csv('csv_files/Fortune_10.csv') df # NaN stand for Not a Number # #na null is also consider NaN import pandas as pd df=pd.read_csv('csv_files/Fortune_10.csv',na_values=['not available','no values']) df df=pd.read_csv('csv_files/Fortune_10.csv',na_values={'Companies':'no values','Growth':'not available'}) df df=pd.read_csv('csv_files/Fortune_10.csv',keep_default_na=True) df # to improve speed of pandas to not look for empty cell use na_filter=False df=pd.read_csv('csv_files/Fortune_10.csv',na_filter=False) dfTask 1import pandas as pd import matplotlib.pyplot as plt data = pd.read_csv('Bike-Sharing-Dataset/hour.csv')We are intersted in the following plots:1. total count vs time2. scatter plot of a registered vs total count3. density map of a registered vs total countplt.plot(data["dteday"], data["cnt"]) plt.scatter(data["registered"], data["cnt"]) plt.hist2d(data["registered"], data["cnt"], bins=(10, 10), cmap=plt.cm.jet);Task 2 We implmenented the following pipeline:1. Loading : loading csv file2. Clean Data by dropping column instant which we don't need.3. Process 4. Wrangling 1. Loadingdata = pd.read_csv('Bike-Sharing-Dataset/hour.csv') print(data.head(10))instant dteday season yr mnth hr holiday weekday workingday \ 0 1 2011-01-01 1 0 1 0 0 6 0 1 2 2011-01-01 1 0 1 1 0 6 0 2 3 2011-01-01 1 0 1 2 0 6 0 3 4 2011-01-01 1 0 1 3 0 6 0 4 5 2011-01-01 1 0 1 4 0 6 0 5 6 2011-01-01 1 0 1 5 0 6 0 6 7 2011-01-01 1 0 1 6 0 6 0 7 8 2011-01-01 1 0 1 7 0 6 0 8 9 2011-01-01 1 0 1 8 0 6 0 9 10 2011-01-01 1 0 1 9 0 6 0 weathersit temp atemp hum windspeed casual registered cnt 0 1 0.24 0.2879 0.81 0.0000 3 13 16[...]2. Clean Datadata.drop('instant', axis=1, inplace=True)Let's confirm if column instant has been removeddata.info() RangeIndex: 17379 entries, 0 to 17378 Data columns (total 17 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 instant 17379 non-null int64 1 dteday 17379 non-null object 2 season 17379 non-null int64 3 yr 17379 non-null int64 4 mnth 17379 non-null int64 5 hr 17379 non-null int64 6 holiday 17379 non-null int64 7 weekday 17379 non-null int64 8 workingday 17379 non-null int64 9 weathersit 17379 non-null int64 10 temp 17379 non-null float64 11 atemp 17379 non-null float64 12 hum 17379 non-null float64 13 windspeed 17379 non-null float64 14 casual 17379 non-null int64 15 registered 17379 non-null int64 16 cnt 17379 non-null int64 dtypes: float64(4), int64(12), object(1) memory usage: 2.3+ MB3. Process Datadata['total'] = data['registered'] + data['casual'] data.head()4. Wrangle Data Check if the column total (sum ofcount = data['cnt'] == data['total'] assert count.sum() == len(data)Task 3 Step 1#data = pd.read_csv('Bike-Sharing-Dataset/hour.csv') # Method chaining begins import numpy as np df = pd.read_csv('Bike-Sharing-Dataset/hour.csv').drop('instant', axis=1).assign(registered_ratio=data['registered']/ data['cnt']).assign(casual_ratio=data['casual']/ data['cnt']).assign(yr=lambda x: np.where(x.yr==0, 2011, 2012)) df def load_and_process(url_or_path_to_csv_file): df = pd.read_csv(url_or_path_to_csv_file).drop('instant', axis=1).assign(registered_ratio=lambda x : x['registered']/ x['cnt']).assign(casual_ratio=lambda x :x['casual']/ x['cnt']).assign(yr=lambda x: np.where(x.yr==0, 2011, 2012)) return df load_and_process('Bike-Sharing-Dataset/hour.csv') import project_functions df = project_functions.load_and_process('Bike-Sharing-Dataset/hour.csv') dfSEICHE vs DWT ANALYSISimport xarray as xr import numpy as np import matplotlib.pyplot as plt import os from matplotlib import gridspec from matplotlib.colors import ListedColormap, LinearSegmentedColormap os.chdir (r'C:\Users\lcag075\Dropbox\MAJURO-teslakit') data_path=r'C:\Users\lcag075\Dropbox\MAJURO-teslakit\teslakit\DATA\sites\MAJURO' data=xr.open_dataset(os.path.join(data_path,'Seiche','hm0_daily_60secfrequency.nc')) data['time']=data.time.dt.floor('d') kma = xr.open_dataset(os.path.join(r'C:\Users\lcag075\Dropbox\Culebras-uoa\MAJURO\DATA\DWTs', "kma.nc")) kma = xr.Dataset( { 'bmus':(('time'),kma.sorted_bmus.values), },coords = {'time': kma.time.values}) kma=kma.sel(time = slice(data.time[0],data.time[-1])) data['bmus']=kma.bmus num_clusters=36 l_colors_dwt = np.array([ (1.0000, 0.1344, 0.0021), (1.0000, 0.2669, 0.0022), (1.0000, 0.5317, 0.0024), (1.0000, 0.6641, 0.0025), (1.0000, 0.9287, 0.0028), (0.9430, 1.0000, 0.0029),(0.6785, 1.0000, 0.0031), (0.5463, 1.0000, 0.0032),(0.2821, 1.0000, 0.0035),(0.1500, 1.0000, 0.0036),(0.0038, 1.0000, 0.1217), (0.0039, 1.0000, 0.2539),(0.0039, 1.0000, 0.4901),(0.0039, 1.0000, 0.6082),(0.0039, 1.0000, 0.8444), (0.0039, 1.0000, 0.9625), (0.0039, 0.8052, 1.0000), (0.0039, 0.6872, 1.0000),(0.0040, 0.4510, 1.0000),(0.0040, 0.3329, 1.0000),(0.0040, 0.0967, 1.0000),(0.1474, 0.0040, 1.0000),(0.2655, 0.0040, 1.0000), (0.5017, 0.0040, 1.0000),(0.6198, 0.0040, 1.0000),(0.7965, 0.0040, 1.0000),(0.8848, 0.0040, 1.0000),(1.0000, 0.0040, 0.9424),(1.0000, 0.0040, 0.8541), (1.0000, 0.0040, 0.6774),(1.0000, 0.0040, 0.5890),(1.0000, 0.0040, 0.4124),(1.0000, 0.0040, 0.3240),(1.0000, 0.0040, 0.1473),(0.9190, 0.1564, 0.2476), (0.7529, 0.3782, 0.4051),(0.6699, 0.4477, 0.4584),(0.5200, 0.5200, 0.5200),(0.4595, 0.4595, 0.4595),(0.4100, 0.4100, 0.4100),(0.3706, 0.3706, 0.3706), (0.2000, 0.2000, 0.2000),( 0, 0, 0)]) # get first N colors np_colors_base = np.array(l_colors_dwt) np_colors_rgb = np_colors_base[:num_clusters] newcmp = ListedColormap(np_colors_rgb) fig = plt.figure(figsize=[18.5,9]) gs1=gridspec.GridSpec(4,1) ax1=fig.add_subplot(gs1[0]) ax2=fig.add_subplot(gs1[1],sharex=ax1) ax3=fig.add_subplot(gs1[2],sharex=ax1) ax4=fig.add_subplot(gs1[3],sharex=ax1) ax1.plot(data.time,data.hm0_41320,'k:',linewidth=0.8) ax1.scatter(data.time,data.hm0_41320,15,data.bmus+1,cmap=newcmp) ax1.set_ylabel('Hm0 (m)',fontsize=12) ax1.text(.5,.9,'41320', horizontalalignment='center', transform=ax1.transAxes,fontsize=13,fontweight='bold') ax4.plot(data.time,data.hm0_41323,'k:',linewidth=0.8) cs=ax4.scatter(data.time,data.hm0_41323,15,data.bmus+1,cmap=newcmp) ax4.set_ylabel('Hm0 (m)',fontsize=13) ax4.text(.5,.9,'41323', horizontalalignment='center', transform=ax4.transAxes,fontsize=13,fontweight='bold') ax2.plot(data.time,data.hm0_41321,'k:',linewidth=0.8) ax2.scatter(data.time,data.hm0_41321,15,data.bmus+1,cmap=newcmp) ax2.set_ylabel('Hm0 (m)',fontsize=13) ax2.text(.5,.9,'41321', horizontalalignment='center', transform=ax2.transAxes,fontsize=13,fontweight='bold') ax3.plot(data.time,data.hm0_41322,'k:',linewidth=0.8) ax3.scatter(data.time,data.hm0_41322,15,data.bmus+1,cmap=newcmp) ax3.set_ylabel('Hm0 (m)',fontsize=13) ax3.text(.5,.9,'41322', horizontalalignment='center', transform=ax3.transAxes,fontsize=13,fontweight='bold') ax1.set_xlim(data.time[0],data.time[-1]) gs1.tight_layout(fig, rect=[0.05, [], 0.93, []]) gs2=gridspec.GridSpec(1,1) ax1=fig.add_subplot(gs2[0]) plt.colorbar(cs,cax=ax1) ax1.set_ylabel('DWT') gs2.tight_layout(fig, rect=[0.94, 0.05, 0.995, 0.95])C:\Users\lcag075\AppData\Local\Continuum\anaconda2\envs\teslakit\lib\site-packages\ipykernel_launcher.py:34: UserWarning: This figure includes Axes that are not compatible with tight_layout, so results might be incorrect.Hm0 Daily (360s frequency) - TG locationdata=xr.open_dataset(os.path.join(data_path,'Seiche','hm0_daily_360secfrequency.nc')) data['time']=data.time.dt.floor('d') kma = xr.open_dataset(os.path.join(r'C:\Users\lcag075\Dropbox\Culebras-uoa\MAJURO\DATA\DWTs', "kma.nc")) kma = xr.Dataset( { 'bmus':(('time'),kma.sorted_bmus.values), },coords = {'time': kma.time.values}) ini=max(kma.time[0],data.time[0]).values fin=min(kma.time[-1],data.time[-1]).values print('Time beginning: ' + str(ini)) print('Time end: ' + str(fin)) kma=kma.sel(time = slice(ini,fin)) data=data.sel(time = slice(ini,fin)) data['bmus']=kma.bmus hm0corr=data.hm0.values-0.01; hm0corr[np.where(hm0corr<0)]=np.nan data['hm0']=(['time'], hm0corr) print(data) fig = plt.figure(figsize=[18,5]) gs1=gridspec.GridSpec(1,1) ax1=fig.add_subplot(gs1[0]) ax1.plot(data.time,data.hm0,'k:',linewidth=0.8) ax1.scatter(data.time,data.hm0,15,data.bmus+1,cmap=newcmp) ax1.set_ylabel('Hm0 (m)',fontsize=12) ax1.set_xlim(data.time[0],data.time[-1]) gs1.tight_layout(fig, rect=[0.05, [], 0.90, []]) gs2=gridspec.GridSpec(1,1) ax1=fig.add_subplot(gs2[0]) plt.colorbar(cs,cax=ax1) ax1.set_ylabel('DWT') gs2.tight_layout(fig, rect=[0.91, 0.1, 0.97, 0.95]) mean_hm0=np.full([len(np.unique(data.bmus))],np.nan) perc_hm0=np.full([len(np.unique(data.bmus))],np.nan) max_hm0=np.full([len(np.unique(data.bmus))],np.nan) for a in range(len(np.unique(data.bmus))): mean_hm0[a]=np.nanmean(data.hm0[np.where(data.bmus==a)[0]]) max_hm0[a]=np.nanmax(data.hm0[np.where(data.bmus==a)[0]]) perc_hm0[a]=np.nanpercentile(data.hm0[np.where(data.bmus==a)[0]],95) perc_hm0 def Plot_Hm0(var,newcmp): x=np.reshape(var,[6,6]); x=np.flipud(np.rot90((x.T))) x1=np.full([np.shape(x)[0], np.shape(x)[1]+1],np.nan) x1[:,:-1]=x; x1[:,-1]=x[:,-1] fig = plt.figure(figsize=[14.5,9.5]) gs2=gridspec.GridSpec(1,1) ax2=fig.add_subplot(gs2[0]) cs=ax2.pcolormesh((np.arange(1,8)),np.flip(np.arange(1,8)),x1,vmin=np.nanmin(x1)*0.9, vmax=np.nanmax(x1)*1.1,cmap='gnuplot2_r') ax2.set_yticks([]); ax2.set_xticks([]) X,Y=np.meshgrid(np.arange(1,7),np.flip(np.arange(1,7))) X=X.T; Y=Y.T X=np.flipud(np.rot90((X))); Y=np.flipud(np.rot90((Y))) cs1=plt.scatter(X.flatten()+0.5,Y.flatten()+0.5,120,np.arange(0,36,1)+1,cmap=newcmp) c=plt.colorbar(cs1,pad=-0.02) c.set_label('DWT',fontsize=16) c1=plt.colorbar(cs,pad=0.05) c1.set_label('Hm0 (m)',fontsize=16) Plot_Hm0(mean_hm0,newcmp) plt.title('Mean', fontsize=20) Plot_Hm0(perc_hm0,newcmp) plt.title('95% Percentile', fontsize=20) Plot_Hm0(max_hm0,newcmp) plt.title('Maximum', fontsize=20) fig = plt.figure(figsize=[17,17]) # order=[0,6,12,18,24,30,1,7,13,19,25,31,2,8,14,20,26,32,3,9,15,21,27,33,4,10,16,22,28,34,5,11,17,23,29,35] lim=150 gs1=gridspec.GridSpec(6,6) label='Hm0 (m)' for a in range(len(np.unique(data.bmus))): ax=fig.add_subplot(gs1[a],alpha=0.1, facecolor=l_colors_dwt[a,:]) ax.patch.set_alpha(0.2) ax.set_xlim(np.nanmin(data.hm0),np.nanmax(data.hm0)) ax.set_ylim(0,lim) ax.grid(True,color='grey',linestyle=':',alpha=0.7) # ax.text(np.nanmin(data.hm0)+(np.nanmax(data.hm0)-np.nanmin(data.hm0))/9,0.85*lim,'DWT'+str(a+1),fontsize=11,fontweight='bold') if a>=30: ax.set_xlabel(label,fontsize=12) ax.xaxis.set_tick_params(labelsize=12) else: ax.set_xticklabels([]) if (a==0) | (a==6) | (a==12) | (a==18) | (a==24) | (a==30): ax.set_ylabel('Probability',fontsize=12) ax.yaxis.set_tick_params(labelsize=12) else: ax.set_yticklabels([]) ax.hist(data.hm0[np.where(data.bmus==a)[0]],density=True, bins=np.linspace(np.nanmin(data.hm0),np.nanmax(data.hm0),30), alpha=0.8, color='lightcoral', ec='darkred')Grays Harbor TopographyModify topography from the Astoria 1/3" DEM, which was referenced to MHW, by an approximate adjustment to Mean Tide Level (MTL). Around Grays Harbor, this adjustment is approximately linear in longitude, as verified separately using the [VDatum software](https://vdatum.noaa.gov/vdatumweb/). We use the datums at Westport and Aberdeen to estimate the linear function.%matplotlib inline from pylab import * from clawpack.geoclaw import topotools server = 'https://www.ngdc.noaa.gov/thredds/dodsC/regional/' url_astoria = server + 'astoria_13_mhw_2012.nc' extent = [-124.2, -123.65, 46.8, 47.15] GH_13sec_mhw = topotools.read_netcdf(url_astoria, extent=extent) figure(figsize=(13,7)) ax = axes() GH_13sec_mhw.crop(coarsen=3).plot(axes=ax) title('Astoria MHW');Approximate conversion from MHW to MTLDatums at Westport and Aberdeen tide gauges:Datum | Westport | Aberdeen------|----------|------------ MHW | 2.561 | 2.869 diff | 1.068 | 1.21 MTL | 1.493 | 1.659 diff | 1.068 | 1.21 MLW | 0.425 | 0.449 Topo referenced to MHW should be increased by 1.068m at Westport and by 1.21m at Aberdeen to reference to MTL. We use a linear function based on longitude:Zmtl = GH_13sec_mhw.Z + (1.068 + (1.21-1.068)*(GH_13sec_mhw.X+124.105)/(-123.85+124.105)) GH_13sec_mtl = topotools.Topography() GH_13sec_mtl.set_xyZ(GH_13sec_mhw.X, GH_13sec_mhw.Y, Zmtl) extent = [-123.9,-123.7,46.93,46.99] figure(figsize=(13,13)) ax = subplot(211) GH_13sec_mhw.crop(extent).plot(axes=ax,limits=(-10,10), add_colorbar=True, cb_kwargs={'shrink':0.7}) title('Topo relative to MHW near Aberdeen') ax = subplot(212) GH_13sec_mtl.crop(extent).plot(axes=ax,limits=(-10,10), add_colorbar=True, cb_kwargs={'shrink':0.7}) title('Topo relative to MTL near Aberdeen');Create topo file:GH_13sec_mtl.write('GH_13sec_mtl.asc', topo_type=3, header_style='asc', Z_format='%10.3f')Web Service Endpoint を使った推論import requests import json import numpy as npデータをロードして1レコード抽出しますnpz = np.load('docdata1.npz') x = npz['arr_0'] y = npz['arr_1'] print(x.shape) print(y.shape) x_data = x[6500].tolist()Web Service Endpoint を指定します - 前のステップでコピーしておいた URI を使用しますscoring_uri = ''推論を実行しますheaders = {'Content-Type':'application/json'} data = {"data": x_data} response = requests.post(scoring_uri, data=json.dumps(data), headers=headers) print(response.status_code) print(response.elapsed) data_retd = np.array(response.json(), dtype=np.float32) print(data_retd)Reflect Tables into SQLAlchemy ORM# Python SQL toolkit and Object Relational Mapper import sqlalchemy from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine, func from sqlalchemy.orm import create_session from sqlalchemy.schema import Table, MetaData from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import Column, Integer, String, Float from sqlalchemy import create_engine, inspect, func engine = create_engine("sqlite:///hawaii2.sqlite") conn = engine.connect() # reflect an existing database into a new model Base = automap_base() Base.prepare(engine, reflect=True) # We can view all of the classes that automap found Base.classes.keys() # Save references to each table Measurement = Base.classes.measurement station = Base.classes.station # Create our session (link) from Python to the DB session = Session(engine) # Use the Inspector to explore the database and print the table names inspector = inspect(engine) inspector.get_table_names() measurement_col = [] columns = inspector.get_columns('measurement') for c in columns: measurement_col.append(c['name']) print(c['name'], c["type"]) measurement_col # top 5 measurements engine.execute('SELECT * FROM Measurement LIMIT 5').fetchall()Exploratory Climate Analysis# last date session.query(Measurement.date).order_by((Measurement.date.desc())).first() #today date print(dt.date.today()) print(dt.date(2017, 8 ,23)) # Design a query to retrieve the last 12 months of precipitation data and plot the results session.query(Measurement.date).order_by(Measurement.date.desc()).first # Calculate the date 1 year ago from the last data point in the database one_year = dt.date(2017,8,23) -dt.timedelta(days=365) print(one_year) #Perform a query to retrieve the data and precipitation scores results = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= one_year).all() precipitation_last_12 = session.query(Measurement.date,Measurement.prcp).\ filter(func.datetime(Measurement.date) >= one_year).\ order_by(Measurement.date).all() precipitation_last_12 #Save the query results as a Pandas DataFrame and set the index to the date column df = pd.DataFrame(results, columns=['date', 'precipitation']) df.set_index(df['date'], inplace=True) df.tail() precipitation_pd = pd.DataFrame(precipitation_last_12) precipitation_pd= precipitation_pd.dropna() #Sort the dataframe by date precipitation_pd.describe() # date and pcp top 5 precipitation_pd.head() #count of date and Precipitation precipitation_pd.count() # Create Date vs. Prcp Plot scatter plot #precipitation_pd.date.set_index('date',inplace=True,sort_columns=True,use_index=True, legend=True,grid=True, color='g') precipitation_pd=precipitation_pd.sort_values('date') precipitation_pd.plot(x='date',y='prcp',rot=90) # Set title/ plt.title("Date vs Precipitation") # Set x axis label #FIX DATES plt.xlabel("Date") # Set y axis label plt.ylabel("Precipitation") # Set grid line plt.grid(linestyle='-', linewidth=1, alpha = 1.5) plt.savefig('date_precip_12.png') # Design a query to show how many stations are available in this dataset? session.query(Measurement.station, func.sum(Measurement.station))\ .group_by(Measurement.station).all() stationcount = session.query(Measurement).distinct(Measurement.station).group_by(Measurement.station).count() stationcount # What are the most active stations? (i.e. what stations have the most rows)? #print('Total Station Number:',Measurement.station) sel = [Measurement.station, func.count(Measurement.tobs)] query = session.query(*sel).\ group_by(Measurement.station).\ order_by(func.count(Measurement.tobs).desc()) q_df = pd.DataFrame(query, columns=['station_id','total_count']).set_index('station_id') store_station=q_df.index[0] q_df plt.savefig('total_count_df.png') # store min,max and avg for the active station sel= [func.min(Measurement.tobs), func.max(Measurement.tobs),func.avg(Measurement.tobs)] query = session.query(*sel).\ filter(Measurement.station==store_station).all() q_df=pd.DataFrame(query,columns=['low','hgh','avg']) q_df #histogram of Tobs sel=[Measurement.date, Measurement.tobs] query = session.query(*sel).\ filter(Measurement.station==store_station).\ filter(func.strftime(Measurement.date)>=one_year).\ group_by(Measurement.date).\ order_by(func.count(Measurement.tobs).desc()) q_df=pd.DataFrame(query[:][:],columns=['date', 'tobs']) q_df.plot.hist(bins=12, color='green') plt.tight_layout() plt.show() plt.savefig('stat_temp.png')Bonus Challenge Assignment# This function called `calc_temps` will accept start date and end date in the format '%Y-%m-%d' # and return the minimum, average, and maximum temperatures for that range of dates def calc_temps(start_date, end_date): """TMIN, TAVG, and TMAX for a list of dates. Args: start_date (string): A date string in the format %Y-%m-%d end_date (string): A date string in the format %Y-%m-%d Returns: TMIN, TAVE, and TMAX """ return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\ filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all() # function usage example print(calc_temps('2012-02-28', '2012-03-05')) # Use your previous function `calc_temps` to calculate the tmin, tavg, and tmax # for your trip using the previous year's data for those same dates. prev_year_start = dt.date(2018, 1, 1) - dt.timedelta(days=365) prev_year_end = dt.date(2018, 1, 7) - dt.timedelta(days=365) tmin, tavg, tmax = calc_temps(prev_year_start.strftime("%Y-%m-%d"), prev_year_end.strftime("%Y-%m-%d"))[0] print(tmin, tavg, tmax) # Plot the results from your previous query as a bar chart. # Use "Trip Avg Temp" as your Title # Use the average temperature for the y value # Use the peak-to-peak (tmax-tmin) value as the y error bar (yerr) fig, ax = plt.subplots(figsize=plt.figaspect(2.)) xpos = 1 yerr = tmax-tmin bar = ax.bar(xpos, tmax, yerr=yerr, alpha=0.5, color='coral', align="center") ax.set(xticks=range(xpos), xticklabels="a", title="Trip Avg Temp", ylabel="Temp (F)") ax.margins(.2, .2) # fig.autofmt_xdate() fig.tight_layout() fig.show() plt.savefig('trip_avg_temp.png') # Calculate the total amount of rainfall per weather station for your trip dates using the previous year's matching dates. # Sort this in descending order by precipitation amount and list the station, name, latitude, longitude, and elevation start_date = '2012-01-01' end_date = '2012-01-07' sel = [Measurement.station, station.name, station.latitude, station.longitude, station.elevation, func.sum(Measurement.prcp)] results = session.query(*sel).\ filter(Measurement.station == station.station, func.strftime(Measurement.date) >= start_date, func.strftime(Measurement.date) <= end_date).\ group_by(Measurement.station).order_by(func.sum(Measurement.prcp).desc()) q_df=pd.DataFrame(results,columns=['station_id','station_name','total_precip','lat_station','Lng_station','elevation_station']).set_index('station_id') q_df # Create a query that will calculate the daily normals #disp# (i.e. the averages for tmin, tmax, and tavg for all historic data matching a specific month and day) def daily_normals(date): """Daily Normals. Args: date (str): A date string in the format '%m-%d' Returns: A list of tuples containing the daily normals, tmin, tavg, and tmax """ sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)] return session.query(*sel).filter(func.strftime("%m-%d", Measurement.date) == date).all() daily_normals("01-01") # calculate the daily normals for your trip # push each tuple of calculations into a list called `normals` # Set the start and end date of the trip trip_start = '2018-01-01' trip_end = '2018-01-07' # Use the start and end date to create a range of dates trip_dates = pd.date_range(trip_start, trip_end, freq='D') # Stip off the year and save a list of %m-%d strings trip_month_day = trip_dates.strftime('%m-%d') # Loop through the list of %m-%d strings and calculate the normals for each date normals = [] for date in trip_month_day: normals.append(*daily_normals(date)) normals # Load the previous query results into a Pandas DataFrame and add the `trip_dates` range as the `date` index df_trip = pd.DataFrame(normals, columns=['tmin', 'tavg', 'tmax']) df_trip['date'] = trip_dates df_trip.set_index(['date'],inplace=True) df_trip.head() # Plot the daily normals as an area plot with `stacked=False` df_trip.plot(kind='area', stacked=False, x_compat=True, alpha=.2) plt.tight_layout() plt.savefig('area_count_df.png')RL Assignment - Design a DQN Agent for Moutain Car Game Environmentenv = gym.make('MountainCar-v0') for e in range(20): observation = env.reset() for t in range(100): env.render() #print(observation) action = env.action_space.sample() observation, reward, done, other_info = env.step(action) if done: #Game Over print("Game Episode :{}/{}, High Score:{},Exploration Rate:{:.2}".format(e,20,t,1.0)) break env.close() print("Game Over!")Importando Banco de Dadosdf = pd.read_csv('train.csv', index_col='id') df.head() df = df.dropna() for c in df.columns[df.dtypes == object]: df[c] = df[c].astype('category') df.education.cat.reorder_categories(['illiterate', 'basic.4y', 'basic.6y', 'basic.9y', 'high.school', 'professional.course', 'university.degree'], ordered=True, inplace=True) for c in df.columns[df.dtypes == 'category']: df[c] = df[c].cat.codes df.head() #previsores = df.drop(['default','contact', 'duration', 'previous', 'campaign', 'pdays', 'y'], axis=1) previsores = df.drop(['y'], axis=1) previsores.head() classe = df.filter('y') classe.head() sum(classe['y']) #from sklearn.preprocessing import StandardScaler #scaler = StandardScaler() #previsores = scaler.fit_transform(previsores) #classe = scaler.fit_transform(classe) from sklearn.model_selection import train_test_split previsores_treinamento, previsores_teste, classe_treinamento, classe_teste = train_test_split(previsores, classe, test_size=0.15, random_state=42, shuffle=True, stratify=classe) previsores_treinamento.shape, previsores_teste.shape, classe_treinamento.shape, classe_teste.shapeAnálise bayesianafrom sklearn.naive_bayes import GaussianNB regressor = GaussianNB() regressor.fit(previsores_treinamento, classe_treinamento) regressor.score(previsores_treinamento, classe_treinamento) y_pred = regressor.predict(previsores_teste) from sklearn import metrics print(metrics.classification_report(classe_teste, y_pred))precision recall f1-score support 0 0.94 0.85 0.89 3589 1 0.38 0.62 0.47 520 accuracy 0.82 4109 macro avg 0.66 0.73 0.68 4109 weighted avg 0.87 0.82 0.84 4109Análise K-meansfrom sklearn.neighbors import KNeighborsClassifier knn_class = KNeighborsClassifier(algorithm='auto', metric='minkowski', leaf_size=30, n_neighbors=5) knn_class.fit(previsores_treinamento, classe_treinamento) knn_class.score(previsores_treinamento, classe_treinamento) y_pred = knn_class.predict(previsores_teste) from sklearn import metrics print(metrics.classification_report(classe_teste, y_pred)) sum(y_pred)RandomForestfrom sklearn.ensemble import RandomForestClassifier regressor_rf = RandomForestClassifier(n_estimators=10000, criterion='entropy', random_state=42, max_depth=1000) regressor_rf.fit(previsores_treinamento, classe_treinamento) regressor_rf.score(previsores_treinamento, classe_treinamento) y_pred = regressor_rf.predict(previsores_teste) print(metrics.classification_report(classe_teste, y_pred))precision recall f1-score support 0 0.93 0.96 0.95 3589 1 0.66 0.54 0.59 520 accuracy 0.91 4109 macro avg 0.80 0.75 0.77 4109 weighted avg 0.90 0.91 0.90 4109Análise SVMfrom sklearn.svm import SVC regressor = SVC(kernel='rbf', random_state=1, C=2) regressor.fit(previsores_treinamento, classe_treinamento) regressor.score(previsores_treinamento, classe_treinamento) y_pred = regressor.predict(previsores_teste) print(metrics.classification_report(classe_teste, y_pred)) #!pip install tensorflow #!pip install keras import keras from keras.models import Sequential from keras.layers import Dense classificador = Sequential( ) classificador.add(Dense(units=40, activation='relu', input_dim=20), ) classificador.add(Dense(units=40, activation='relu')) model.add(Dropout(0.2)) classificador.add(Dense(units=40, activation='relu')) model.add(Dropout(0.2)) classificador.add(Dense(units=40, activation='relu')) model.add(Dropout(0.2)) classificador.add(Dense(units=40, activation='relu')) classificador.add(Dense(units=1, activation='sigmoid')) classificador.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) classificador.summary() classificador.fit(previsores_treinamento, classe_treinamento, batch_size=10, epochs=100) y_pred = classificador.predict(previsores_teste) #lista = lis #for x in y_pred: # print(x) y_pred = (y_pred > 0.5) print(metrics.classification_report(classe_teste, y_pred))precision recall f1-score support 0 0.92 0.96 0.94 3589 1 0.62 0.46 0.53 520 accuracy 0.90 4109 macro avg 0.77 0.71 0.73 4109 weighted avg 0.89 0.90 0.89 4109Kadleaval = pd.read_csv('test.csv', index_col='id') sub = pd.read_csv('sample.csv', index_col='id') #aval.head() for c in aval.columns[aval.dtypes == object]: aval[c] = aval[c].astype('category') aval.education.cat.reorder_categories(['illiterate', 'basic.4y', 'basic.6y', 'basic.9y', 'high.school', 'professional.course', 'university.degree'], ordered=True, inplace=True) for c in aval.columns[aval.dtypes == 'category']: aval[c] = aval[c].cat.codes #aval.head() #previsores = aval.drop(['contact', 'duration', 'previous'], axis=1) #aval = scaler.fit_transform(aval) y_aval = regressor_rf.predict(aval) #y_aval = (y_aval > 0.1) y_aval sub.y = pd.Series(y_aval) sub.head() sub.to_csv('Resultado_lucas_rf.csv')**Standard Imports**%config InlineBackend.figure_format = 'retina' %matplotlib inline import numpy as np import scipy as sp import matplotlib.pyplot as plt from scipy import signal import seaborn as sns sns.set_style('white')1. Simulate sharp waveformdef periodic_gaussians(T, period, gauss_std, Fs = 1000, delta_jitter = None, delta_prob = None, amplitude_envelope = False, amplitude_envelope_filt_kwargs = {}): """Simulate a signal that is periodic gaussians Parameters ---------- T : float length of time period gauss_std Fs delta_jitter amplitude_envelope : bool if True, the gaussian periodic gaussian is modulated by an amplitude envelope. This amplitude envelope is obtained by bandpass-filtering white noise amplitude_envelope_filt_kwargs : dict Returns ------- t lfp """ # Process input dt = 1/float(Fs) t = np.arange(0,T,dt) N_samples = len(t) # Generate delta train delta_train = periodic_delta(N_samples, int(period*Fs), delta_jitter = delta_jitter, delta_prob = delta_prob) # Generate Gaussian gauss_len_time_half = gauss_std*3 gauss_t = np.arange(-gauss_len_time_half,gauss_len_time_half+dt,dt) gauss_curve = gaussian(gauss_t, 0, gauss_std) # Convolve Gaussian with delta train lfp = np.convolve(delta_train, gauss_curve, mode='same') # Make minimum -1 and max 1. Then subtract mean Ntaps = len(gauss_t) lfp = (lfp - np.min(lfp[Ntaps:-Ntaps]))/(np.max(lfp[Ntaps:-Ntaps])-np.min(lfp[Ntaps:-Ntaps]))*2 - 1 # Subtract mean lfp -= np.mean(lfp) return t, lfp def gaussian(x, mu, sig): return np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.))) def periodic_delta(N_samples, period, delta_jitter = None, delta_prob = None): """Simulate an oscillatory point process (1 event every period) noise is the standard deviation of the distribution of inter-spike-intervals (in samples)""" if delta_prob is None: delta_prob = 1 x = np.zeros(N_samples) spike_time = period-1 while spike_time < N_samples: if np.random.rand(1)[0] < delta_prob: x[spike_time] = 1 if delta_jitter is not None: spike_time += period + int(np.random.randn()*delta_jitter) else: spike_time += period return x def simbrown(N): """Simulate a brown noise signal (power law distribution 1/f^2) with N samples""" wn = np.random.randn(N) return np.cumsum(wn) def simfiltonef(T, f_range, Fs, N, samp_buffer = 10000): """ Simulate a band-pass filtered signal with 1/f^2 Input suggestions: f_range=(2,None), Fs=1000, N=1000 Parameters ---------- T : float length of time of simulated oscillation Fs : float oscillation sampling rate f_range : 2-element array (lo,hi) frequency range of simulated data if None: do not filter N : int order of filter """ if f_range is None: # Do not filter # Generate 1/f^2 noise brownN = simbrown(int(T*Fs)) return brownN elif f_range[1] is None: # High pass filter # Generate 1/f^2 noise brownN = simbrown(int(T*Fs+N*2)) # Filter nyq = Fs / 2. if N % 2 == 0: print('NOTE: Increased high-pass filter order by 1 in order to be odd') N += 1 taps = sp.signal.firwin(N, f_range[0] / nyq, pass_zero=False) brownNf = sp.signal.filtfilt(taps, [1], brownN) return brownNf[N:-N] else: # Bandpass filter # Generate 1/f^2 noise brownN = simbrown(int(T*Fs+N*2)) # Filter nyq = Fs / 2. taps = sp.signal.firwin(N, np.array(f_range) / nyq, pass_zero=False) brownNf = sp.signal.filtfilt(taps, [1], brownN) return brownNf[N:-N] def norm01(x): return (x - np.min(x))/(np.max(x)-np.min(x))Simulate nonsinusoidal oscillation# SImulation parameters Fs = 1000 delta_jitter = 6 delta_prob = .6 T = 30 f1 = 10 f1bw = 2 f1_range = (f1-f1bw,f1+f1bw) period = 1/float(f1) gauss_std_1 = .01 gauss_std_2 = .02 np.random.seed(1) t, x_gauss_1 = periodic_gaussians(T, period, gauss_std_1, Fs = Fs, delta_jitter = delta_jitter, delta_prob = delta_prob) t, x_gauss_2 = periodic_gaussians(T, period, gauss_std_2, Fs = Fs, delta_jitter = delta_jitter, delta_prob = delta_prob)Simulate 1/f noiseNtaps = 500 randseed = 0 brown_bandpass = (2,200) x_brown = simfiltonef(T, brown_bandpass, Fs, Ntaps) x_brown = norm01(x_brown)Viz symmetric and asymmetric oscillation# Oscillation and noise is neural signal x_gauss_weight = .3 x1 = x_gauss_1*x_gauss_weight + x_brown x2 = x_gauss_2*x_gauss_weight + x_brown plt.figure(figsize=(9,3)) plt.plot(t,x1, 'k-') # plt.plot(t,x_gauss_1*x_gauss_weight, 'b-', alpha=.5) # plt.plot(t,x_brown, 'r-', alpha=.5) plt.xlim((0,1)) plt.xlabel('Time (s)', size=15) plt.ylabel('Voltage (a.u.)', size=15)Compute PSDfrom neurodsp import spectral f, psd = spectral.psd(x1, Fs, nperseg = Fs*2) flim = (0,50) fidxs = np.logical_and(f >= flim[0], f <= flim[1]) f = f[fidxs] psd = psd[fidxs] plt.figure(figsize=(5,5)) plt.semilogy(f, psd, 'k') plt.xlabel('Frequency (Hz)', size=15) plt.ylabel('Power (a.u.)', size=15)Display of the performance results for PGW and the PW initialization In this notebook we run the code implemented in 'partial_gw.py' which is adapted from Chapel et al.We compute PGW in the setting of domain adaptation with the same of different features, and with a prior set to either 10 or 20%. We also compute the initialization Partial-Wasserstein when the feature types are the same.import os import utils import numpy as np from IPython.display import HTML, display, Markdown import pandas as pd from sklearn.metrics import accuracy_score, recall_score, precision_score import ot import partial_gw as pgw import matplotlib.pyplot as plt name_path = 'results_pgw' path = os.getcwd() + "/saved_plans" if not os.path.isdir(path): os.mkdir(path) path = path + "/" + name_path if not os.path.isdir(path): os.mkdir(path)Caltech dataset - PU on different domains Prior is set to 10%n_unl = 100 n_pos = 100 nb_reps = 20 nb_dummies = 10 prior = 0.1 domain_s = ['surf_Caltech', 'surf_amazon', 'surf_webcam', 'surf_dslr'] domain_d = ['decaf_caltech', 'decaf_amazon', 'decaf_webcam', 'decaf_dslr'] domain_s = ['surf_Caltech', 'surf_amazon', 'surf_webcam', 'surf_dslr'] for d in domain_s: pgw.compute_perf_pgw('surf_Caltech', d, n_pos, n_unl, prior, nb_reps, 'results_pgw', nb_dummies) domain_d = ['decaf_caltech', 'decaf_amazon', 'decaf_webcam', 'decaf_dslr'] for d in domain_d: pgw.compute_perf_pgw('decaf_caltech', d, n_pos, n_unl, prior, nb_reps, 'results_pgw', nb_dummies) domain_s = ['surf_Caltech', 'surf_amazon', 'surf_webcam', 'surf_dslr'] domain_d = ['decaf_caltech', 'decaf_amazon', 'decaf_webcam', 'decaf_dslr'] np.set_printoptions(precision=5) datasets = [('surf_Caltech', d) for d in domain_s] + [('decaf_caltech', d) for d in domain_d] for (d_p, d_u) in datasets: acc, rec, prec = [], [], [] # Aggregate and compute accuracies for i in range(nb_reps): P, U, y_u = utils.draw_p_u_dataset_scar(d_p, d_u, n_pos, n_unl, prior, i) nb_unl_pos = int(np.sum(y_u)) plan = np.load(path + f'/partial_gw_plan_{d_p}_{n_pos}_{d_u}_{n_unl}_prior{prior}_reps{i}.npy', allow_pickle=True) marg = np.sum(plan[-nb_dummies:], axis=0) y_hat = np.ones(len(y_u)) y_hat[np.argsort(marg)[nb_unl_pos:]] = 0 acc.append(accuracy_score(y_u, y_hat)) rec.append(recall_score(y_u, y_hat)) prec.append(precision_score(y_u, y_hat)) # Compute mean print(f'Accuracy for dataset {(d_p, d_u, prior)}: {100 * np.mean(acc)} $\pm$ {100 * np.std(acc)}') # print(f'Recall for dataset {(d_p, d_u)}: {np.mean(rec)} $\pm$ {np.std(rec)}') # print(f'Precision for dataset {(d_p, d_u)}: {np.mean(prec)} $\pm$ {np.std(prec)}\n')Accuracy for dataset ('surf_Caltech', 'surf_Caltech', 0.1): 84.89999999999999 $\pm$ 1.9467922333931804 Accuracy for dataset ('surf_Caltech', 'surf_amazon', 0.1): 82.2 $\pm$ 1.8867962264113187 Accuracy for dataset ('surf_Caltech', 'surf_webcam', 0.1): 81.30000000000001 $\pm$ 1.8193405398660225 Accuracy for dataset ('surf_Caltech', 'surf_dslr', 0.1): 81.4 $\pm$ 1.6852299546352687 Accuracy for dataset ('decaf_caltech', 'decaf_caltech', 0.1): 83.0 $\pm$ 2.2360679774997885 Accuracy for dataset ('decaf_caltech', 'decaf_amazon', 0.1): 81.4 $\pm$ 1.9078784028338882 Accuracy for dataset ('decaf_caltech', 'decaf_webcam', 0.1): 82.70000000000002 $\pm$ 2.5514701644346127 Accuracy for dataset ('decaf_caltech', 'decaf_dslr', 0.1): 83.79999999999998 $\pm$ 1.5362291495737233Prior is set to 20%n_unl = 100 n_pos = 100 nb_reps = 20 nb_dummies = 10 prior = 0.2 domain_s = ['surf_Caltech', 'surf_amazon', 'surf_webcam'] domain_d = ['decaf_caltech', 'decaf_amazon', 'decaf_webcam'] n_unl = 100 n_pos = 100 nb_reps = 20 nb_dummies = 10 prior = 0.2 domain_s = ['surf_Caltech', 'surf_amazon', 'surf_webcam'] for d in domain_s: pgw.compute_perf_pgw('surf_Caltech', d, n_pos, n_unl, prior, nb_reps, 'results_pgw', nb_dummies) domain_d = ['decaf_caltech', 'decaf_amazon', 'decaf_webcam'] for d in domain_d: pgw.compute_perf_pgw('decaf_caltech', d, n_pos, n_unl, prior, nb_reps, 'results_pgw', nb_dummies) np.set_printoptions(precision=5) datasets = [('surf_Caltech', d) for d in domain_s] + [('decaf_caltech', d) for d in domain_d] for (d_p, d_u) in datasets: acc, rec, prec = [], [], [] # Aggregate and compute accuracies for i in range(nb_reps): P, U, y_u = utils.draw_p_u_dataset_scar(d_p, d_u, n_pos, n_unl, prior, i) nb_unl_pos = int(np.sum(y_u)) plan = np.load(path + f'/partial_gw_plan_{d_p}_{n_pos}_{d_u}_{n_unl}_prior{prior}_reps{i}.npy', allow_pickle=True) marg = np.sum(plan[-nb_dummies:], axis=0) y_hat = np.ones(len(y_u)) y_hat[np.argsort(marg)[nb_unl_pos:]] = 0 acc.append(accuracy_score(y_u, y_hat)) rec.append(recall_score(y_u, y_hat)) prec.append(precision_score(y_u, y_hat)) # Compute mean print(f'Accuracy for dataset {(d_p, d_u, prior)}: {100 * np.mean(acc)} $\pm$ {100 * np.std(acc)}') # print(f'Recall for dataset {(d_p, d_u)}: {np.mean(rec)} $\pm$ {np.std(rec)}') # print(f'Precision for dataset {(d_p, d_u)}: {np.mean(prec)} $\pm$ {np.std(prec)}\n')Accuracy for dataset ('surf_Caltech', 'surf_Caltech', 0.2): 75.7 $\pm$ 4.659399102888697 Accuracy for dataset ('surf_Caltech', 'surf_amazon', 0.2): 65.99999999999999 $\pm$ 3.286335345030996 Accuracy for dataset ('surf_Caltech', 'surf_webcam', 0.2): 64.30000000000001 $\pm$ 2.123676058159532 Accuracy for dataset ('decaf_caltech', 'decaf_caltech', 0.2): 76.70000000000002 $\pm$ 3.593048844644337 Accuracy for dataset ('decaf_caltech', 'decaf_amazon', 0.2): 68.7 $\pm$ 4.394314508543966 Accuracy for dataset ('decaf_caltech', 'decaf_webcam', 0.2): 75.9 $\pm$ 3.6041642581880216Caltech dataset - PU on different feature spaces Prior is set tio 10%n_unl = 100 n_pos = 100 nb_reps = 20 nb_dummies = 10 prior = 0.1 domain_s = ['surf_Caltech', 'surf_amazon', 'surf_webcam', 'surf_dslr'] domain_d = ['decaf_caltech', 'decaf_amazon', 'decaf_webcam', 'decaf_dslr'] n_unl = 100 n_pos = 100 nb_reps = 20 nb_dummies = 10 prior = 0.1 domain_s = ['surf_Caltech', 'surf_amazon', 'surf_webcam', 'surf_dslr'] for d in domain_s: pgw.compute_perf_pgw('decaf_caltech', d, n_pos, n_unl, prior, nb_reps, 'results_pgw', nb_dummies) domain_d = ['decaf_caltech', 'decaf_amazon', 'decaf_webcam', 'decaf_dslr'] for d in domain_d: pgw.compute_perf_pgw('surf_Caltech', d, n_pos, n_unl, prior, nb_reps, 'results_pgw', nb_dummies) np.set_printoptions(precision=5) datasets = [('surf_Caltech', d) for d in domain_d] + [('decaf_caltech', d) for d in domain_s] for (d_p, d_u) in datasets: acc, rec, prec = [], [], [] # Aggregate and compute accuracies for i in range(nb_reps): P, U, y_u = utils.draw_p_u_dataset_scar(d_p, d_u, n_pos, n_unl, prior, i) nb_unl_pos = int(np.sum(y_u)) plan = np.load(path + f'/partial_gw_plan_{d_p}_{n_pos}_{d_u}_{n_unl}_prior{prior}_reps{i}.npy', allow_pickle=True) marg = np.sum(plan[-nb_dummies:], axis=0) y_hat = np.ones(len(y_u)) y_hat[np.argsort(marg)[nb_unl_pos:]] = 0 acc.append(accuracy_score(y_u, y_hat)) rec.append(recall_score(y_u, y_hat)) prec.append(precision_score(y_u, y_hat)) # Compute mean print(f'Accuracy for dataset {(d_p, d_u, prior)}: {100 * np.mean(acc)} $\pm$ {100 * np.std(acc)}') # print(f'Recall for dataset {(d_p, d_u)}: {np.mean(rec)} $\pm$ {np.std(rec)}') # print(f'Precision for dataset {(d_p, d_u)}: {np.mean(prec)} $\pm$ {np.std(prec)}\n')Accuracy for dataset ('surf_Caltech', 'decaf_caltech', 0.1): 85.1 $\pm$ 2.643860813280458 Accuracy for dataset ('surf_Caltech', 'decaf_amazon', 0.1): 87.10000000000001 $\pm$ 6.49538297562199 Accuracy for dataset ('surf_Caltech', 'decaf_webcam', 0.1): 88.6 $\pm$ 5.624944444170091 Accuracy for dataset ('surf_Caltech', 'decaf_dslr', 0.1): 91.10000000000001 $\pm$ 8.086408349817608 Accuracy for dataset ('decaf_caltech', 'surf_Caltech', 0.1): 81.00000000000001 $\pm$ 1.6124515496597067 Accuracy for dataset ('decaf_caltech', 'surf_amazon', 0.1): 81.2 $\pm$ 1.9390719429665286 Accuracy for dataset ('decaf_caltech', 'surf_webcam', 0.1): 81.30000000000001 $\pm$ 2.0273134932713264 Accuracy for dataset ('decaf_caltech', 'surf_dslr', 0.1): 80.80000000000001 $\pm$ 1.599999999999997Prior is set to 20%n_unl = 100 n_pos = 100 nb_reps = 20 nb_dummies = 10 prior = 0.2 domain_s = ['surf_Caltech', 'surf_amazon', 'surf_webcam'] domain_d = ['decaf_caltech', 'decaf_amazon', 'decaf_webcam'] n_unl = 100 n_pos = 100 nb_reps = 20 nb_dummies = 10 prior = 0.2 domain_s = ['surf_Caltech', 'surf_amazon', 'surf_webcam'] for d in domain_s: pgw.compute_perf_pgw('decaf_caltech', d, n_pos, n_unl, prior, nb_reps, 'results_pgw', nb_dummies) domain_d = ['decaf_caltech', 'decaf_amazon', 'decaf_webcam'] for d in domain_d: pgw.compute_perf_pgw('surf_Caltech', d, n_pos, n_unl, prior, nb_reps, 'results_pgw', nb_dummies) np.set_printoptions(precision=5) datasets = [('surf_Caltech', d) for d in domain_d] + [('decaf_caltech', d) for d in domain_s] for (d_p, d_u) in datasets: acc, rec, prec = [], [], [] # Aggregate and compute accuracies for i in range(nb_reps): P, U, y_u = utils.draw_p_u_dataset_scar(d_p, d_u, n_pos, n_unl, prior, i) nb_unl_pos = int(np.sum(y_u)) plan = np.load(path + f'/partial_gw_plan_{d_p}_{n_pos}_{d_u}_{n_unl}_prior{prior}_reps{i}.npy', allow_pickle=True) marg = np.sum(plan[-nb_dummies:], axis=0) y_hat = np.ones(len(y_u)) y_hat[np.argsort(marg)[nb_unl_pos:]] = 0 acc.append(accuracy_score(y_u, y_hat)) rec.append(recall_score(y_u, y_hat)) prec.append(precision_score(y_u, y_hat)) # Compute mean print(f'Accuracy for dataset {(d_p, d_u, prior)}: {100 * np.mean(acc)} $\pm$ {100 * np.std(acc)}') # print(f'Recall for dataset {(d_p, d_u)}: {np.mean(rec)} $\pm$ {np.std(rec)}') # print(f'Precision for dataset {(d_p, d_u)}: {np.mean(prec)} $\pm$ {np.std(prec)}\n')Accuracy for dataset ('surf_Caltech', 'decaf_caltech', 0.2): 75.6 $\pm$ 3.322649545167231 Accuracy for dataset ('surf_Caltech', 'decaf_amazon', 0.2): 87.89999999999999 $\pm$ 6.617401302626283 Accuracy for dataset ('surf_Caltech', 'decaf_webcam', 0.2): 88.4 $\pm$ 5.817215828899594 Accuracy for dataset ('decaf_caltech', 'surf_Caltech', 0.2): 63.69999999999999 $\pm$ 2.917190429162966 Accuracy for dataset ('decaf_caltech', 'surf_amazon', 0.2): 62.39999999999999 $\pm$ 4.454211490264018 Accuracy for dataset ('decaf_caltech', 'surf_webcam', 0.2): 61.4 $\pm$ 3.231098884280704Compute performance of initialization - Partial WassersteinWe focus in this section on the computation of Partial-Wasserstein (PW), to check if PGW improves the performance.We only focus on this section on PU learning performed over similar features. Prior is set to 10%n_unl = 100 n_pos = 100 nb_reps = 20 nb_dummies = 10 prior = 0.1 domain_s = ['surf_Caltech', 'surf_amazon', 'surf_webcam', 'surf_dslr'] domain_d = ['decaf_caltech', 'decaf_amazon', 'decaf_webcam', 'decaf_dslr'] domain_s = ['surf_Caltech', 'surf_amazon', 'surf_webcam', 'surf_dslr'] for d in domain_s: pgw.compute_perf_init('surf_Caltech', d, n_pos, n_unl, prior, nb_reps, 'results_pgw', nb_dummies) domain_d = ['decaf_caltech', 'decaf_amazon', 'decaf_webcam', 'decaf_dslr'] for d in domain_d: pgw.compute_perf_init('decaf_caltech', d, n_pos, n_unl, prior, nb_reps, 'results_pgw', nb_dummies) datasets = [('surf_Caltech', d) for d in domain_s] + [('decaf_caltech', d) for d in domain_d] for (d_p, d_u) in datasets: acc, rec, prec = [], [], [] # Aggregate and compute accuracies for i in range(nb_reps): P, U, y_u = utils.draw_p_u_dataset_scar(d_p, d_u, n_pos, n_unl, prior, i) nb_unl_pos = int(np.sum(y_u)) plan = np.load(path + f'/partial_gw_init_{d_p}_{n_pos}_{d_u}_{n_unl}_prior{prior}_reps{i}.npy', allow_pickle=True) marg = np.sum(plan[-nb_dummies:], axis=0) y_hat = np.ones(len(y_u)) y_hat[np.argsort(marg)[nb_unl_pos:]] = 0 acc.append(accuracy_score(y_u, y_hat)) rec.append(recall_score(y_u, y_hat)) prec.append(precision_score(y_u, y_hat)) # Compute mean print(f'Accuracy for dataset {(d_p, d_u, prior)}: {100 * np.mean(acc)} $\pm$ {100 * np.std(acc)}') # print(f'Recall for dataset {(d_p, d_u)}: {np.mean(rec)} $\pm$ {np.std(rec)}') # print(f'Precision for dataset {(d_p, d_u)}: {np.mean(prec)} $\pm$ {np.std(prec)}\n')Accuracy for dataset ('surf_Caltech', 'surf_Caltech', 0.1): 89.9 $\pm$ 2.046948949045873 Accuracy for dataset ('surf_Caltech', 'surf_amazon', 0.1): 81.80000000000001 $\pm$ 1.2489995996796768 Accuracy for dataset ('surf_Caltech', 'surf_webcam', 0.1): 81.89999999999999 $\pm$ 1.3379088160259627 Accuracy for dataset ('surf_Caltech', 'surf_dslr', 0.1): 80.00000000000001 $\pm$ 1.1102230246251565e-14 Accuracy for dataset ('decaf_caltech', 'decaf_caltech', 0.1): 93.9 $\pm$ 1.6093476939431057 Accuracy for dataset ('decaf_caltech', 'decaf_amazon', 0.1): 80.10000000000002 $\pm$ 0.43588989435406533 Accuracy for dataset ('decaf_caltech', 'decaf_webcam', 0.1): 80.10000000000002 $\pm$ 0.43588989435406533 Accuracy for dataset ('decaf_caltech', 'decaf_dslr', 0.1): 80.60000000000002 $\pm$ 0.9165151389911639Prior is set to 20%n_unl = 100 n_pos = 100 nb_reps = 20 nb_dummies = 10 prior = 0.2 domain_s = ['surf_Caltech', 'surf_amazon', 'surf_webcam'] domain_d = ['decaf_caltech', 'decaf_amazon', 'decaf_webcam'] domain_s = ['surf_Caltech', 'surf_amazon', 'surf_webcam'] for d in domain_s: pgw.compute_perf_init('surf_Caltech', d, n_pos, n_unl, prior, nb_reps, 'results_pgw', nb_dummies) domain_d = ['decaf_caltech', 'decaf_amazon', 'decaf_webcam'] for d in domain_d: pgw.compute_perf_init('decaf_caltech', d, n_pos, n_unl, prior, nb_reps, 'results_pgw', nb_dummies) datasets = [('surf_Caltech', d) for d in domain_s] + [('decaf_caltech', d) for d in domain_d] for (d_p, d_u) in datasets: acc, rec, prec = [], [], [] # Aggregate and compute accuracies for i in range(nb_reps): P, U, y_u = utils.draw_p_u_dataset_scar(d_p, d_u, n_pos, n_unl, prior, i) nb_unl_pos = int(np.sum(y_u)) plan = np.load(path + f'/partial_gw_init_{d_p}_{n_pos}_{d_u}_{n_unl}_prior{prior}_reps{i}.npy', allow_pickle=True) marg = np.sum(plan[-nb_dummies:], axis=0) y_hat = np.ones(len(y_u)) y_hat[np.argsort(marg)[nb_unl_pos:]] = 0 acc.append(accuracy_score(y_u, y_hat)) rec.append(recall_score(y_u, y_hat)) prec.append(precision_score(y_u, y_hat)) # Compute mean print(f'Accuracy for dataset {(d_p, d_u, prior)}: {100 * np.mean(acc)} $\pm$ {100 * np.std(acc)}') # print(f'Recall for dataset {(d_p, d_u)}: {np.mean(rec)} $\pm$ {np.std(rec)}') # print(f'Precision for dataset {(d_p, d_u)}: {np.mean(prec)} $\pm$ {np.std(prec)}\n')Accuracy for dataset ('surf_Caltech', 'surf_Caltech', 0.2): 79.7 $\pm$ 2.9849623113198587 Accuracy for dataset ('surf_Caltech', 'surf_amazon', 0.2): 65.6 $\pm$ 2.870540018881464 Accuracy for dataset ('surf_Caltech', 'surf_webcam', 0.2): 65.10000000000001 $\pm$ 1.946792233393179 Accuracy for dataset ('decaf_caltech', 'decaf_caltech', 0.2): 90.60000000000002 $\pm$ 2.5377155080899043 Accuracy for dataset ('decaf_caltech', 'decaf_amazon', 0.2): 62.499999999999986 $\pm$ 2.085665361461421 Accuracy for dataset ('decaf_caltech', 'decaf_webcam', 0.2): 65.7 $\pm$ 1.926136028425824Talktorial 9 Ligand-based pharmacophores Developed in the CADD seminars 2017 and 2018, AG Volkamer, Charité/FU Berlin , and **Note**: Please run this notebook cell by cell. Running all cells in one is possible also, however, a few PyMol images might not turn out as intended. Aim of this talktorialIn this talktorial, we use known EGFR ligands, which were selected and aligned in the previous talktorial, to identify donor, acceptor, and hydrophobic pharmacophoric features for each ligand. Those features are then clustered to define an ensemble pharmacophore, which represents the properties of the set of known EGFR ligands and can be used to search for novel EGFR ligands via virtual screening. Learning goals Theory* Pharmacophore modeling * Structure- and ligand-based pharmacophore modeling* Virtual screening with pharmacophores* Clustering: k means Practical* Get pre-aligned ligands from previous talktorial* Start PyMoL* Show ligands with PyMol* Extract pharmacophore features* Show the pharmacophore features of all ligands * Hydrogen bond donors * Hydrogen bond acceptors * Hydrophobic contacts* Collect coordinates of features per feature type* Generate ensemble pharmacophores * Set static parameters for k-means clustering * Set static parameters for cluster selection * Define k-means clustering and cluster selection functions * Cluster features * Select relevant clusters * Get selected cluster coordinates* Show clusters * Hydrogen bond donors * Hydrogen bond acceptors * Hydrophobic contacts* Show ensemble pharmacophore References* IUPAC pharmacophore definition ([Pure & Appl. Chem (1998), 70, 1129-43](https://iupac.org/publications/pac/70/5/1129/))* 3D pharmacophores in LigandScout ([J. Chem. Inf. Model. (2005), 45, 160-9](http://pubs.acs.org/doi/pdf/10.1021/ci049885e))* Book chapter: Pharmacophore Perception and Applications ([Applied Chemoinformatics, Wiley-VCH Verlag GmbH & Co. KGaA, Weinheim, (2018), **1**, 259-82](https://onlinelibrary.wiley.com/doi/10.1002/9783527806539.ch6f))* Book chapter: Structure-Based Virtual Screening ([Applied Chemoinformatics, Wiley-VCH Verlag GmbH & Co. KGaA, Weinheim, (2018), **1**, 313-31](https://onlinelibrary.wiley.com/doi/10.1002/9783527806539.ch6h)).* and the origin of the pharmacophore concept ([Internet Electron. J. Mol. Des. (2007), 6, 271-9](http://biochempress.com/Files/iejmd_2007_6_0271.pdf))* PyMol integration with RDKit ([rdkit.Chem.PyMol documentation](http://rdkit.org/docs/source/rdkit.Chem.PyMol.html))* 's demonstration of pharmacophore modeling with RDKit ([RDKit UGM 2016 on GitHub](https://github.com/rdkit/UGM_2016/blob/master/Notebooks/Stiefl_RDKitPh4FullPublication.ipynb)) Theory PharmacophoresIn computer-aided drug design, the description of drug-target interactions with pharmacophores is a well-established method. The term pharmacophore was defined in 1998 by a IUPAC working party:"A pharmacophore is the ensemble of steric and electronic features that is necessary to ensure the optimal supramolecular interactions with a specific biological target structure and to trigger (or to block) its biological response." ([Pure & Appl. Chem. (1998), 70, 1129-43](https://iupac.org/publications/pac/70/5/1129/))In other words, a *pharmacophore* consists of several *pharmacophoric features*, which describe important steric and physico-chemical properties of a ligand observed to bind a target under investigation. Such *physico-chemical properties* (also called feature types) can be hydrogen bond donors/acceptors, hydrophobic/aromatic interactions, or positively/negatively charged groups, and the *steric properties* are defined by the 3D arrangement of these features. Structure- and ligand-based pharmacophore modelingIn pharmacophore modeling, two main approaches are used, depending on the biological question and available data sources, i.e. structure- and ligand-based pharmacophore modeling.*Structure-based pharmacophore models* are derived from protein-ligand complexes. Features are defined by observed interactions between the protein and ligand, ensuring that only those ligand moieties are used for virtual screening that have already been shown to be involved in ligand binding. However, structures of protein-ligand complexes are not available for all targets. In this case, either complex structures can be generated by modeling the ligand into the target binding site, e.g. via molecular docking, or pharmacophore modeling methods can be invoked that only use the target binding site to detect potential protein-ligand interaction sites.*Ligand-based pharmacophore models* are based on a set of ligands known to bind the target under investigation. The common chemical features of these ligands build the pharmacophore model. This method is used for targets with multiple known ligands and in case of missing protein-ligand complex structures. In this talktorial, we will use ligand-based pharmacophore modeling using a set of known EGFR ligands.For more information on pharmacophore modeling, we recommend ([Pharmacophore Perception and Applications: Applied Chemoinformatics, Wiley-VCH Verlag GmbH & Co. KGaA, Weinheim, (2018), **1**, 259-82](https://onlinelibrary.wiley.com/doi/10.1002/9783527806539.ch6f)) and ([J. Chem. Inf. Model. (2005), 45, 160-9](http://pubs.acs.org/doi/pdf/10.1021/ci049885e)). Figure 1: Structure-based pharmacophore representing protein-ligand interactions (figure by ). Virtual screening with pharmacophoresAs described earlier in **talktorial 4**, virtual screening (VS) describes the screening of a query (e.g. here in **talktorial 9** a pharmacophore model or in **talktorial 4** a query compound) against a large library of compounds, in order to identify those small molecules (in the library) that are most likely to bind a target under investigation (represented by the query). In pharmacophore-based virtual screening, the compound library is matched compound-by-compound into a pharmacophore model and ranked by the best matching results ([Structure-Based Virtual Screening: Applied Chemoinformatics, Wiley-VCH Verlag GmbH & Co. KGaA, Weinheim, (2018), **1**, 313-31](https://onlinelibrary.wiley.com/doi/10.1002/9783527806539.ch6h)). Clustering: k meansIn this talktorial, we will generate an ensemble pharmacophore by clustering the feature points of several ligand-based pharmacophores. The clustering algorithm used is the k means clustering, which aims to cluster a data set into k clusters:1. k different centroids are selected and each point of the data set is assigned to its closest centroids.2. New centroids are calculated based on the current clusters and each point of the data set is newly assigned to its closest centroids.3. This procedure is repeated until the centroids are stable.([K means wikipedia](https://de.wikipedia.org/wiki/K-Means-Algorithmus)) Practicalimport os, glob # RDKit from rdkit import RDConfig, Chem, Geometry, DistanceGeometry from rdkit.Chem import ChemicalFeatures, rdDistGeom, Draw, rdMolTransforms, AllChem from rdkit.Chem.Draw import IPythonConsole, DrawingOptions from rdkit.Chem.Pharm3D import Pharmacophore, EmbedLib from rdkit.Numerics import rdAlignment IPythonConsole.ipython_useSVG=True # PyMOL related from rdkit.Chem import PyMol from pymol import * import time # Needed for waiting a second from PIL import Image # For export the image to disk import collections import pandas as pd import math from sklearn import datasets, cluster import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from collections import Counter # For handling the labels import operatorGet pre-aligned ligands from previous talktorialWe retrieve all ligands that were aligned in the previous talktorial.First, we get the file paths to all ligand PDB files.mol_files = [] for file in glob.glob("../data/T8/*_lig.pdb"): mol_files.append(file) mol_files pdb_ids = [i.split("/")[-1].split("_")[0] for i in mol_files] pdb_idsSecond, we read all ligands from these PDB files using RDKit.mols = [] for mol_file in mol_files: mol = Chem.MolFromPDBFile(mol_file, removeHs=False) if mol is None: print(mol_file, 'could not be read') else: Chem.SanitizeMol(mol) print(Chem.MolToSmiles(mol)) mols.append(mol) rangeMols = range(1, len(mols)+1) print('Number of molecules: ', len(mols)) Draw.MolsToGridImage(mols, molsPerRow=4, legends=["From PDB ID: "+i for i in pdb_ids])We encounter a problem here: When loading ligands from a PDB file, RDKit does not assign e.g. aromatic rings to the ligand. We use the RDKit function `AssignBondOrdersFromTemplate`, which assigns bonds to a molecule based on a reference molecule, e.g. in our case based on the SMILES pattern of the molecule. Check for further information: ([RDKit discussion on "Aromaticity of non-protein molecules in PDB not detected"](https://github.com/rdkit/rdkit/issues/1031)) and ([RDKit documentation on `AssignBondOrdersFromTemplate`](http://rdkit.org/docs/source/rdkit.Chem.AllChem.htmlAssignBondOrdersFromTemplate)).# Load SMILES for PDB ligand structures ligs = pd.read_csv("../data/T8/PDB_top_ligands.csv", sep="\t") # Get SMILES in the same order as in pdb_ids ligs_smiles = [ligs[ligs["@structureId"]==pdb_id]["smiles"].values[0] for pdb_id in pdb_ids] # Generate RDKit Mol object from SMILES refmols = [Chem.MolFromSmiles(smiles) for smiles in ligs_smiles] # Assign bond orders to molecules (mols) based on SMILES patterns (refmols) mols = [AllChem.AssignBondOrdersFromTemplate(refmol, mol) for refmol, mol in zip(refmols, mols)] Draw.MolsToGridImage(mols, molsPerRow=4, legends=["From PDB ID: "+i for i in pdb_ids])We can also have a look at the molecules in 2D (we copy the molecules for this example to keep the original coordinates).mols_2D = [] for mol in mols: tmp=Chem.Mol(mol) AllChem.Compute2DCoords(tmp) mols_2D.append(tmp) Draw.MolsToGridImage(mols_2D, molsPerRow=4, legends=["From PDB ID: "+i for i in pdb_ids])Start PyMolWe start PyMol within the terminal.# Open PyMol in shell os.popen('pymol -R')Note: If no separate PyMol window opens at this stage, something may be wrong with your PyMol installation and needs to be fixed first! You can also try to manually start PyMol by typing `pymol -R` in your shell. We need to wait until PyMol is launched completely. Then we can link PyMol to the Jupyter notebook via RDKit: `objPMV = PyMol.MolViewer()`. Check general functionalities of PyMol integration with RDKit on ([rdkit.Chem.PyMol documentation](http://rdkit.org/docs/source/rdkit.Chem.PyMol.html)).# Error handling: wait until PyMol is loaded nrTry = 0 # Number of current attempt ttw = 10 # Time to wait in seconds while nrTry < ttw: # Try until PyMol is loaded and the object can be saved nrTry += 1 try: objPMV = PyMol.MolViewer() # Save the PyMol object break # Stop the loop when PyMol is loaded except ConnectionRefusedError: # Exception handling if PyMol is not loaded yet time.sleep(1) # Wait... if nrTry == ttw: # After ttw trys: print error message print("Error: PyMol did not start correctly.\n" + "Try again and/or check if PyMol is installed completely.")This is the RDKit PyMol object, which we will be using in the following in order to control PyMol from within the Jupyter notebook:objPMVThe two most important commands of the RDKit PyMol integration are: * `objPMV.ShowMol(object, name, showOnly)` to load the object in PyMol* `objPMV.GetPNG(h=height)` to show the figure in the jupyter notebook Show ligands with PyMolWe show all ligands (pre-aligned in previous talktorial) with PyMol. We load each molecule individually into PyMol and set a unique PyMol name (m1, m2, ...).for mol, i in zip(mols, rangeMols): objPMV.ShowMol(mol, name='m%d'%i, showOnly=False) i += 1All ligands should be visible now in the PyMol window. We display a picture of the PyMol window in this talktorial via retrieving a *png* picture.objPMV.GetPNG(h=300)Next, we define a function from the steps described above. We add some styling PyMol commands. You can pass PyMol commands from RDKit to PyMol using:`objPMV.server.do("any_pymol_command")`def visualize_ligands(objPMV, molecules): ''' This function shows all input molecules in PyMol (within the Jupyter notebook). ''' # Initialize PyMol in order to remove all previous objects objPMV.server.do("reinitialize") # Load ligands rangeMols = range(1, len(molecules)+1) for mol, i in zip(molecules, rangeMols): objPMV.ShowMol(mol, name='mol_%d'%i, showOnly=False) toStickCmd='cmd.show("sticks","mol_'+str(i)+'")' objPMV.server.do(toStickCmd) i += 1 # Turn camera objPMV.server.do("turn x, -40") # Set background to white objPMV.server.do("bg_color white") # Zoom in on ligands objPMV.server.do("zoom") # Turn on ray tracing for better image quality objPMV.server.do("ray 1800, 1000") # Export as PNG file outputPNG = objPMV.GetPNG(w=1800, h=1000) outputPNG.save("../data/T9/ligands.png", ) # Display in Jupyter notebook return objPMV.GetPNG(h=300) visualize_ligands(objPMV, mols)Extract pharmacophore featuresAs described above, the aim of this talktorial is to generate a ligand-based ensemble pharmacophore from a set of ligands. First, we need to extract pharmacophore features per ligand.Therefore, we load a feature factory (with the default feature definitions).See also [rdkit docu on chemical features and pharmacophores](https://rdkit.readthedocs.io/en/latest/GettingStartedInPython.htmlchemical-features-and-pharmacophores).ffact = AllChem.BuildFeatureFactory(os.path.join(RDConfig.RDDataDir,'BaseFeatures.fdef'))We take a look at the pharmacophore features that are implemented in RDKit:list(ffact.GetFeatureDefs().keys())As an example, we get all feature for an example molecule.m1 = mols[0] feats = ffact.GetFeaturesForMol(m1) print('Number of features found:',len(feats))Number of features found: 14The type (in RDKit called family) of a feature can be retrieved with `GetFamily()`.feats[0].GetFamily()We get the frequency of features types for our example molecule.feats_freq = collections.Counter([x.GetFamily() for x in feats]) feats_freqWe apply the functions shown above to all molecules in our ligand set. We display the frequency of feature types per molecule as DataFrame.# Get feature type frequency per molecule mols_feats_freq = [] for i in mols: feats = [x.GetFamily() for x in ffact.GetFeaturesForMol(i)] feats_freq = collections.Counter(feats) mols_feats_freq.append(feats_freq) # Show data as DataFrame p = pd.DataFrame(mols_feats_freq, index=["m"+str(i) for i in range(1, len(mols)+1)]).fillna(0).astype(int) p.transpose()Furtheron, we concentrate in this talktorial only on the following feature types: hydrogen bond acceptors (acceptors), hydrogen bond donors (donors), and hydrophobic contacts (hydrophobics).We retrieve the feature RDKit objects per feature type and per molecule.acceptors = [] donors = [] hydrophobics = [] for i in mols: acceptors.append(ffact.GetFeaturesForMol(i, includeOnly='Acceptor')) donors.append(ffact.GetFeaturesForMol(i, includeOnly='Donor')) hydrophobics.append(ffact.GetFeaturesForMol(i, includeOnly='Hydrophobe')) features = {"donors": donors, "acceptors": acceptors, "hydrophobics": hydrophobics}Show the pharmacophore features of all ligandsPharmacophore feature types usually are displayed in defined colors, e.g. usually hydrogen bond donors, hydrogen bond acceptors, and hydrophobic contacts are colored green, red, and yellow, respectively.feature_colors = {"donors": (0,0.9,0), # Green "acceptors": (0.9,0,0), # Red "hydrophobics": (1,0.9,0)} # YellowRDKit's PyMol integration allows us to draw spheres (representing pharmacophore features) in PyMoL using the following command:`objPMV.server.sphere(loc, sphereRad, colors[i], label, 1)`def visualize_features(objPMV, molecules, feature_type, features, feature_color): ''' This function displays all input molecules and all input features as spheres in PyMOL. A png picture from the PyMOL window is loaded into the Jupyter Notebook and saved as file to disc. At the end, the PyMOL session is cleaned from all objects. ''' # Initialize PyMol in order to remove all previous objects objPMV.server.do("reinitialize") print("Number of " + feature_type + " in all ligands: " + str(sum([len(i) for i in features]))) # Load ligands rangeMols = range(1, len(molecules)+1) for mol, i in zip(molecules, rangeMols): objPMV.ShowMol(mol, name='mol_%d'%i, showOnly=False) toStickCmd='cmd.show("sticks","mol_'+str(i)+'")' objPMV.server.do(toStickCmd) i += 1 # Load features for i in range(len(features)): for feature in features[i]: loc = list(feature.GetPos()) sphere_radius = 0.5 label = feature_type + '_%d'%(i+1) objPMV.server.sphere(loc, sphere_radius, feature_color, label, 1) # show the sphere (pharmacophore feature) # Turn camera objPMV.server.do("turn x, -40") # Set background to white objPMV.server.do("bg_color white") # Zoom in on ligands objPMV.server.do("zoom") # Turn on ray tracing for better image quality objPMV.server.do("ray 1800, 1000") # Export as PNG file outputPNG = objPMV.GetPNG(w=1800, h=1000) outputPNG.save("../data/T9/ligands_features_"+feature_type+".png", ) # Display in Jupyter notebook return objPMV.GetPNG(h=300)We use this function to visualize the features for the feature types under consideration. Hydrogen bond donorsfeature_type = "donors" visualize_features(objPMV, mols, feature_type, features[feature_type], feature_colors[feature_type])Number of donors in all ligands: 11Hydrogen bond acceptorsfeature_type = "acceptors" visualize_features(objPMV, mols, feature_type, features[feature_type], feature_colors[feature_type])Number of acceptors in all ligands: 23Hydrophobic contactsfeature_type = "hydrophobics" visualize_features(objPMV, mols, feature_type, features[feature_type], feature_colors[feature_type])Number of hydrophobics in all ligands: 7Collect coordinates of features per feature typeSince we want to cluster features (per feature type), we now collect all coordinates of features (per feature type).features_coord = {"donors": [list(item.GetPos()) for sublist in features["donors"] for item in sublist], "acceptors": [list(item.GetPos()) for sublist in features["acceptors"] for item in sublist], "hydrophobics": [list(item.GetPos()) for sublist in features["hydrophobics"] for item in sublist]}Now, we have the positions of e.g. all acceptor features:features_coord["acceptors"]Generate ensemble pharmacophoresIn order to generate ensemble pharmacophores, we use k-means clustering to cluster features per feature type. Set static parameters for k-means clustering`kq`: With this paramter, we determine the number of clusters `k` per feature type depending on the number of feature points, i.e. per feature type:`k` = number_of_features / `kq`# k quotient (kq) used to determine k in k-means: k = number of feature points / kq # kq should be selected so that k (feature clusters) is for all clusters at least 1 and not larger than 4-5 clusters kq = 7Set static parameters for cluster selection`min_cluster_size`: We only want to retain clusters that potentially contain features from most molecules in our ligand ensemble. Therefore, we set this variable to 75% of the number of molecules in our ligand ensemble.`top_cluster_number`: With this parameter, we select only the largest cluster.# Threshold for clustering: number = percentage of threshold value min_cluster_size = int(len(mols) * 0.75) # Show only top features top_cluster_number = 4Define k-means clustering and cluster selection functionsWe define a function that calculates the centers of clusters, which are derived from k-means clustering.def clustering(feature_coord, kd): ''' This functions computes the k-means clustering of input feature coordinates. ''' # Define parameter k as feature number divided by "k quotient" k = math.ceil(len(feature_coord) / kq) k = 2 if k == 1 else k # Tailor-made adaption of k for hydrophobics in for the example in this talktorial print('Clustering: \nVariable k in k-means: %d of %d points\n'%(k, len(feature_coord))) # Initialize of k-means k_means = cluster.KMeans(n_clusters=k) # Compute the k-means clustering k_means.fit(feature_coord) # Return the clusters return k_meansWe define a function that sorts the clusters by size and outputs a list of indices of the largest clusters.def get_clusters(k_means, min_cluster_size, top_cluster_number): ''' This function retrieves information on a input k-means clustering: * gets cluster label for each feature * counts cluster sizes and sort cluster indices by cluster size * selects clusters based on size * returns selected cluster indices ''' # Sort clusters by size and only show largest feature_labels = k_means.labels_ print('Cluster labels for all features: \n%s\n'% feature_labels) feature_labels_count = Counter(feature_labels) print('Cluster label counter: \n%s\n'% feature_labels_count) feature_labels_count = sorted(feature_labels_count.items(), key=operator.itemgetter(1), reverse=True) print('Sorted cluster label counters: \n%s\n'% feature_labels_count) # Get number of the largest clusters, which are larger then the threshold (selected clusters) cluster_indices_sel = [] for cluster_index, cluster_size in feature_labels_count: # feature_labels_count = list of (cluster_index, cluster_size) if cluster_size >= min_cluster_size and top_cluster_number > 0: cluster_indices_sel.append(cluster_index) top_cluster_number -= 1 print('Cluster indices of selected clusters: \n%s\n'% cluster_indices_sel) return cluster_indices_selCluster featuresFor each feature type, we perform the k-means clustering with our defined `clustering` function.k_means = {"donors": clustering(features_coord["donors"], kq), "acceptors": clustering(features_coord["acceptors"], kq), "hydrophobics": clustering(features_coord["hydrophobics"], kq)}Clustering: Variable k in k-means: 2 of 11 points Clustering: Variable k in k-means: 4 of 23 points Clustering: Variable k in k-means: 2 of 7 pointsSelect relevant clustersFor each feature type, we select relevant clusters with our defined `get_clusters` function.print("Hydrogen bond donors\n") cluster_indices_sel_don = get_clusters(k_means["donors"], min_cluster_size, top_cluster_number) print("Hydrogen bond acceptors\n") cluster_indices_sel_acc = get_clusters(k_means["acceptors"], min_cluster_size, top_cluster_number) print("Hydrophobic contacts\n") cluster_indices_sel_h = get_clusters(k_means["hydrophobics"], min_cluster_size, top_cluster_number) cluster_indices_sel = {"donors": cluster_indices_sel_don, "acceptors": cluster_indices_sel_acc, "hydrophobics": cluster_indices_sel_h}Get selected cluster coordinatesdef get_selected_cluster_center_coords(k_means, cluster_indices_sel, feature_type): ''' This function retrieves cluster center coordinates for selected clusters (by their indices). ''' # Get cluster centers for a certain feature type cluster_centers = k_means[feature_type].cluster_centers_ # Cast to list and then to pandas Series (for element selection by indices) cluster_centers = pd.Series(cluster_centers.tolist()) # Select cluster centers by indices of selected clusters cluster_centers_sel = cluster_centers[cluster_indices_sel[feature_type]] # Cast to list and return return list(cluster_centers_sel) cluster_centers_sel = {"donors": get_selected_cluster_center_coords(k_means, cluster_indices_sel, "donors"), "acceptors": get_selected_cluster_center_coords(k_means, cluster_indices_sel, "acceptors"), "hydrophobics": get_selected_cluster_center_coords(k_means, cluster_indices_sel, "hydrophobics")} cluster_centers_sel["acceptors"]Show clustersPer feature type, we visualize cluster centers alongside with all molecules and all feature points.def visualize_clusters(objPMV, molecules, feature_type, features, cluster_centers_sel, feature_color): ''' This function displays * all input molecules, * all input features as spheres, and * the resulting cluster centers in PyMoL. A png picture from the PyMoL window is loaded into the Jupyter notebook and saved as file to disc. ''' # Initialize PyMol in order to remove all previous objects objPMV.server.do("reinitialize") print("Number of " + feature_type + " in all ligands: " + str(sum([len(i) for i in features]))) # Load ligands rangeMols = range(1, len(molecules)+1) for mol, i in zip(molecules, rangeMols): objPMV.ShowMol(mol, name='mol_%d'%i, showOnly=False) toStickCmd='cmd.show("sticks","mol_'+str(i)+'")' objPMV.server.do(toStickCmd) i += 1 # Load features for i in range(len(features)): for feature in features[i]: loc = list(feature.GetPos()) sphere_radius = 0.5 label = feature_type + '_%d'%(i+1) objPMV.server.sphere(loc, sphere_radius, feature_color, label, 1) # show the sphere (pharmacophore feature) # Load clusters for i in range(len(cluster_centers_sel)): loc = cluster_centers_sel[i] sphere_radius = 1 label = feature_type + '_c%d'%(i+1) objPMV.server.sphere(loc, sphere_radius, feature_color, label, 1) # Turn camera objPMV.server.do("turn x, -40") # Set PyMol styling objPMV.server.do("bg_color white") objPMV.server.do("zoom") objPMV.server.do("ray 1800, 1000") # Export as PNG file outputPNG = objPMV.GetPNG(w=1800, h=1000) outputPNG.save("../data/T9/ligands_features_clusters_"+feature_type+".png") # Display in Jupyter notebook return objPMV.GetPNG(h=300)Hydrogen bond donorsfeature_type = "donors" visualize_clusters(objPMV, mols, feature_type, features[feature_type], cluster_centers_sel[feature_type], feature_colors[feature_type])Number of donors in all ligands: 11Hydrogen bond acceptorfeature_type = "acceptors" visualize_clusters(objPMV, mols, feature_type, features[feature_type], cluster_centers_sel[feature_type], feature_colors[feature_type])Number of acceptors in all ligands: 23Hydrophobic contactsfeature_type = "hydrophobics" visualize_clusters(objPMV, mols, feature_type, features[feature_type], cluster_centers_sel[feature_type], feature_colors[feature_type])Number of hydrophobics in all ligands: 7Show ensemble pharmacophoreIn this last step, we combine the clustered pharmacophoric features (i.e. hydrogen bond donors and acceptors as well as hydrophobic contacts), to one ensemble pharmacophore, representing the pharmacophoric properties of the four selected ligands.# Initialize PyMol in order to remove all previous objects objPMV.server.do("reinitialize") # Load ligands rangeMols = range(1, len(mols)+1) for mol, i in zip(mols, rangeMols): objPMV.ShowMol(mol, name='mol_%d'%i, showOnly=False) toStickCmd='cmd.show("sticks","mol_'+str(i)+'")' objPMV.server.do(toStickCmd) i += 1 # Load clusters for feature_type in cluster_indices_sel.keys(): centers = cluster_centers_sel[feature_type] for i in range(len(centers)): loc = centers[i] sphere_radius = 1 feature_color = feature_colors[feature_type] label = feature_type + '_c%d'%(i+1) objPMV.server.sphere(loc, sphere_radius, feature_color, label, 1) # Turn camera objPMV.server.do("turn x, -40") # Set PyMol styling objPMV.server.do("bg_color white") objPMV.server.do("zoom") objPMV.server.do("ray 1800, 1000") # Export as PNG file outputPNG = objPMV.GetPNG(w=1800, h=1000) outputPNG.save("../data/T9/ligands_ensemble_ph4.png") # Display in Jupyter notebook objPMV.GetPNG(h=300)autoencoderimport keras from keras.layers import BatchNormalization, Activation, Input from keras.layers import Reshape, UpSampling2D, Conv2D from keras.models import Model from keras.datasets import cifar10 from keras.optimizers import Adam import numpy as np import matplotlib.pyplot as plt #from keras.initializers import he_normal def build_encoder(x): input_ = Input(x.shape[1:]) c = Conv2D(32, (3, 3), padding="same", strides=2)(input_) c = BatchNormalization()(c) c = Activation("relu")(c) c = Conv2D(64, (3, 3), padding="same", strides=2)(c) c = BatchNormalization()(c) c = Activation("relu")(c) c = Conv2D(128, (3, 3), padding="same", strides=2)(c) c = BatchNormalization()(c) c = Activation("relu")(c) c = Conv2D(256, (3, 3), padding="same", strides=2)(c) c = BatchNormalization()(c) c = Activation("relu")(c) c = Conv2D(512, (3, 3), padding="same", strides=2)(c) c = BatchNormalization()(c) c = Activation("relu")(c) return Model(inputs=input_, outputs=c) def build_decoder(x): input_ = Input([int(x.shape[1]/16), int(x.shape[2]/16), 512]) c = Conv2D(512, (3, 3), padding="same")(input_) c = BatchNormalization()(c) c = Activation("relu")(c) c = UpSampling2D((2,2))(c)#4*4 c = Conv2D(256, (3, 3), padding="same")(c) c = BatchNormalization()(c) c = Activation("relu")(c) c = UpSampling2D((2,2))(c)#8*8 c = Conv2D(128, (3, 3), padding="same")(c) c = BatchNormalization()(c) c = Activation("relu")(c) c = UpSampling2D((2,2))(c)#16*16 c = Conv2D(64, (3, 3), padding="same")(c) c = BatchNormalization()(c) c = Activation("relu")(c) c = UpSampling2D((2,2))(c)#32*32 c = Conv2D(32, (3, 3), padding="same")(c) c = BatchNormalization()(c) c = Activation("relu")(c) c = UpSampling2D((2,2))(c)#32*32 c = Conv2D(3, (1, 1), padding="same")(c) c = BatchNormalization()(c) c = Activation("sigmoid")(c) return Model(inputs=input_, outputs=c) def cifar(): # dataset (x_train, y_train), (x_test, y_test) = cifar10.load_data() x_train = x_train.astype('float32') / 255 x_test = x_test.astype('float32') / 255 return x_train, x_test, y_train, y_test def train_autoencoder(x, epoch=50): encoder = build_encoder(x) decoder = build_decoder(x) input_ = Input(x.shape[1:]) ae = Model(inputs = input_, outputs = decoder(encoder(input_))) ae.compile(loss='mse', optimizer=Adam()) hist = ae.fit(x, x, batch_size=128, epochs=epoch, verbose = False) return ae, encoder x_train, x_test, y_train, y_test = cifar()Downloading data from https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz 170500096/170498071 [==============================] - 6s 0us/stepMobileNetV2from keras.layers import GlobalAveragePooling2D, Dense from keras.applications import MobileNetV2 def train_mobile(x_train, y_train): classes = 10 y_train = to_categorical(y_train) Y_test = to_categorical(y_test) model = MobileNetV2(include_top=False, input_shape=x_train.shape[1:], weights=None, alpha=0.5) # 全結合層を付ける layer = GlobalAveragePooling2D()(model.output) out = Dense(classes, activation='softmax')(layer) model = Model(inputs=model.input, outputs=out) model.compile(loss='categorical_crossentropy', optimizer = "sgd", metrics=['accuracy']) #cnnの学習 hist = model.fit(x_train, y_train, validation_data=(x_test, Y_test), verbose=False, batch_size=128, epochs=200) return np.max(hist.history['val_acc'])Resnet50from keras.applications import ResNet50 def train_resnet(x_train, y_train): classes = 10 y_train = to_categorical(y_train) Y_test = to_categorical(y_test) model = ResNet50(include_top=False, input_shape=x_train.shape[1:], weights=None) # 全結合層を付ける layer = GlobalAveragePooling2D()(model.output) out = Dense(classes, activation='softmax')(layer) model = Model(inputs=model.input, outputs=out) model.compile(loss='categorical_crossentropy', optimizer = "sgd", metrics=['accuracy']) #cnnの学習 hist = model.fit(x_train, y_train, validation_data=(x_test, Y_test), verbose=False, batch_size=128, epochs=200) return np.max(hist.history['val_acc'])knn data compressionfrom sklearn.manifold import TSNE from sklearn.neighbors import KNeighborsClassifier from keras.utils.np_utils import to_categorical import seaborn as sns def random_choice(x, label, M, k=10): result_x, result_y = [], [] for i in range(k): result = [] for j in range(len(label)): if label[j] == i: result.append(j) number = np.random.choice(np.arange(0, len(result)), M, replace=False) for o in number: result_x.append(x[result[o]]) result_y.append(i) return np.array(result_x), np.array(result_y) def t_sne(x, label, encoder, fig=True): embed = encoder.predict(x) test_s = embed.reshape((len(embed),-1)) X_reduced = TSNE(n_components=2).fit_transform(test_s) if fig == True: plt.figure(figsize=(12,10)) plt.scatter(X_reduced[:, 0], X_reduced[:, 1],c=label.reshape(-1), cmap="jet") plt.colorbar() plt.show() return X_reduced def data_complexity(embed, label, k=10): result = np.zeros(len(label)) for i in range(k): for j in range(k): if not i == j: class_i, class_j = [], [] for o in range(len(label)): if label[o] == i: class_i.append(o) elif label[o] == j: class_j.append(o) knn = train_knn(embed[class_i], embed[class_j]) for n in class_i: result[n] += knn.predict_proba(embed[n].reshape(1,-1))[:,1] return result def train_knn(xi, xj, M=100, V=1): neigh = KNeighborsClassifier(n_neighbors=3) X = np.vstack((xi,xj)) y = np.zeros(len(xi)+len(xj)) y[len(xi):] = 1 neigh.fit(X, y) return neigh def knn_choice(x, label, score, M, k=10): result_x, result_y = [], [] for i in range(k): temp_x = [] temp_score = [] for j in range(len(label)): if label[j] == i: temp_x.append(x[j]) temp_score.append(score[j]) small_to_big = np.array(temp_score).argsort() for o in range(M): result_x.append(temp_x[small_to_big[o]]) result_y.append(i) return np.array(result_x), np.array(result_y)evaluatedata_size = [500, 1500, 2500, 3500, 4500, 5000] for i in range(10): print("Try:",i+1) print("encoder training...") model, encoder = train_autoencoder(x_train) print("t-sne training...") X_tsne = t_sne(x_train, y_train, encoder, fig=False) score = data_complexity(X_tsne, y_train) for size in range(6): #random choice print("random choice training...") x_train_small, y_train_small = random_choice(x_train, y_train, M=data_size[size]) random_mobile = train_mobile(x_train_small, y_train_small) random_resnet = train_resnet(x_train_small, y_train_small) #knn choice print("knn choice training...") x_train_small, y_train_small = knn_choice(x_train, y_train, score, M=data_size[size]) knn_mobile = train_mobile(x_train_small, y_train_small) knn_resnet = train_resnet(x_train_small, y_train_small) #result print("Data_size/class is ",data_size[size],", MobileNetV2 accuracy, random:", random_mobile,", knn:",knn_mobile) print("Data_size/class is ",data_size[size],", Resnet50 accuracy, random:", random_resnet,", knn:",knn_resnet)Try: 1 encoder training... WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:66: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:541: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:4432: The name tf.random_uniform is deprecated. Please use tf.random.uniform instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:190: The name tf.get_default_session is deprecated. Please use tf.compat.v1.get_default_session instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:197: The name tf.ConfigProto is deprecated. Please use tf.compat.v1.ConfigProto instead. WARN[...]graphrandom_mobile_500 = [0.3062, 0.3556, 0.3117, 0.324, 0.3211] knn_mobile_500 = [0.3926, 0.4024, 0.3958, 0.3856, 0.3921] random_resnet_500 = [0.4392, 0.4174, 0.4362, 0.4467, 0.4472] knn_resnet_500 = [0.4645, 0.4663, 0.461, 0.4726, 0.462] random_mobile_1500 = [0.434, 0.4408, 0.4311, 0.4522, 0.4392] knn_mobile_1500 = [0.4776, 0.4513, 0.4569, 0.4247, 0.4687] random_resnet_1500 = [0.5052, 0.5028, 0.4974, 0.5023, 0.4932] knn_resnet_1500 = [0.5082, 0.5089, 0.5103, 0.5015, 0.4979] random_mobile_2500 = [0.4866, 0.463, 0.4754, 0.4862, 0.4866] knn_mobile_2500 = [0.4996, 0.5039, 0.4974, 0.4905, 0.5053] random_resnet_2500 = [0.5393, 0.5404, 0.5265, 0.5428, 0.5238] knn_resnet_2500 = [0.5382, 0.5333, 0.5343, 0.5443, 0.5409] mobile_5000 = [0.5672, 0.5654, 0.5814, 0.5716, 0.5809] resnet_5000 = [0.5962, 0.6005, 0.6059, 0.592, 0.5981] import matplotlib.pyplot as plt import numpy as np x = [500, 1500, 2500, 5000] random_boundary_upper = [np.max(random_mobile_500), np.max(random_mobile_1500), np.max(random_mobile_2500), np.max(mobile_5000)] random_boundary_lower = [np.min(random_mobile_500), np.min(random_mobile_1500), np.min(random_mobile_2500), np.min(mobile_5000)] knn_boundary_upper = [np.max(knn_mobile_500), np.max(knn_mobile_1500), np.max(knn_mobile_2500), np.max(mobile_5000)] knn_boundary_lower = [np.min(knn_mobile_500), np.min(knn_mobile_1500), np.min(knn_mobile_2500), np.min(mobile_5000)] plt.figure(figsize=(8,5)) plt.title("MobileNetV2", fontsize=16) plt.xlabel("Data size / 1 class", fontsize=16) plt.ylabel("Accuracy", fontsize=16) plt.xlim(0,5500) plt.plot(x, [np.mean(knn_mobile_500), np.mean(knn_mobile_1500), np.mean(knn_mobile_2500), np.mean(mobile_5000)], "-o", label="Knn", c="r") plt.fill_between(x, knn_boundary_upper, knn_boundary_lower, facecolor='r',alpha=0.3) plt.plot(x, [np.mean(random_mobile_500), np.mean(random_mobile_1500), np.mean(random_mobile_2500), np.mean(mobile_5000)], "-o", label="Random") plt.fill_between(x, random_boundary_upper, random_boundary_lower, facecolor='b',alpha=0.3) plt.legend(fontsize=16) plt.show() import matplotlib.pyplot as plt import numpy as np x = [500, 1500, 2500, 5000] random_boundary_upper = [np.max(random_resnet_500), np.max(random_resnet_1500), np.max(random_resnet_2500), np.max(resnet_5000)] random_boundary_lower = [np.min(random_resnet_500), np.min(random_resnet_1500), np.min(random_resnet_2500), np.min(resnet_5000)] knn_boundary_upper = [np.max(knn_resnet_500), np.max(knn_resnet_1500), np.max(knn_resnet_2500), np.max(resnet_5000)] knn_boundary_lower = [np.min(knn_resnet_500), np.min(knn_resnet_1500), np.min(knn_resnet_2500), np.min(resnet_5000)] plt.figure(figsize=(8,5)) plt.title("ResNet50", fontsize=16) plt.xlabel("Data size / 1 class", fontsize=16) plt.ylabel("Accuracy", fontsize=16) plt.xlim(0,5500) plt.plot(x, [np.mean(knn_resnet_500), np.mean(knn_resnet_1500), np.mean(knn_resnet_2500), np.mean(resnet_5000)], "-o", label="Knn", c="r") plt.fill_between(x, knn_boundary_upper, knn_boundary_lower, facecolor='r',alpha=0.3) plt.plot(x, [np.mean(random_resnet_500), np.mean(random_resnet_1500), np.mean(random_resnet_2500), np.mean(resnet_5000)], "-o", label="Random") plt.fill_between(x, random_boundary_upper, random_boundary_lower, facecolor='b',alpha=0.3) plt.legend(fontsize=16) plt.show()Python for Finance (2nd ed.)**Mastering Data-Driven Finance**© Dr. | The Python Quants GmbH Object Oriented Programming* Class: An abstract definition of a class of objects. For example, a human being.类* Object: An instance of a class. For example, Sandra.对象* Attribute: A feature of the class (class attribute) or of an instance of the class(instance attribute). For example, being a mammal, being male of female, or color of the eyes.属性* Method: An operation that the class can implement. For example, walking.方法* Parameters: Input taken by a method to influence its behavior. For example, three steps.参数* Instantiation: The process of creating a specific object based on an abstract class.实例化 Introduction# A simple class implementing the example of a human being might look as follows. class HumanBeing(object): def __init__(self, first_name, eye_color): self.first_name = first_name self.eye_color = eye_color self.position = 0 def walk_steps(self, steps): self.position += steps HumanBeing. Sandra = HumanBeing('Sandra', 'blue') ##类的实例化 Sandra.first_name Sandra.position Sandra.walk_steps(5) Sandra.position Sandra.walk_steps(-5) Sandra.positionA Brief Look at Standard Objects intn = 5 n.numerator? type(n) n.numerator n.bit_length() n + n 2 * n n.__sizeof__()listl = [1, 2, 3, 4] type(l) l[0] l.append(10) l + l 2 * l sum(l) l.__sizeof__()ndarrayimport numpy as np a = np.arange(16).reshape((4, 4)) a type(a) a.nbytes a.sum() a.cumsum(axis=0) a + a 2 * a sum(a) np.sum(a) a.__sizeof__()DataFrameimport pandas as pd df = pd.DataFrame(a, columns=list('abcd')) type(df) df.columns df.sum() df.cumsum() df + df 2 * df np.sum(df) df.__sizeof__()Basics of Python Classesclass FinancialInstrument(object): pass fi = FinancialInstrument() type(fi) fi fi.__str__() fi.price = 100 fi.price class FinancialInstrument(object): author = '' ##class级别的属性,可以被下面任何的instance继承 def __init__(self, symbol, price): ##实例化的属性 self.symbol = symbol self.price = price FinancialInstrument.author aapl = FinancialInstrument('AAPL', 100) aapl.symbol aapl.author aapl.price aapl.price = 105 ##实例化的属性可以随意更改 aapl.price class FinancialInstrument(FinancialInstrument): ##新的子类继承父类的所有属性和方法 def get_price(self): return self.price def set_price(self, price): self.price = price fi = FinancialInstrument('AAPL', 100) fi. fi.get_price() fi.set_price(105) fi.get_price() fi.price class FinancialInstrument(object): def __init__(self, symbol, price): self.symbol = symbol self.__price = price ## Price is defined as a private instance attribute 私有化属性 def get_price(self): return self.__price def set_price(self, price): self.__price = price fi = FinancialInstrument('AAPL', 100) fi.get_price() # causes intentional error fi.__price ##私有化属性,无法调用 fi. ## If the class name is prepended with a single leading underscore, direct access and manipulation are s fi._FinancialInstrument__price ##调用私有属性方法 fi._FinancialInstrument__price = 105 fi.get_price() fi.set_price(100) class PortfolioPosition(object): def __init__(self, financial_instrument, position_size): self.position = financial_instrument self.__position_size = position_size def get_position_size(self): return self.__position_size def update_position_size(self, position_size): self.__position_size = position_size def get_position_value(self): return self.__position_size * \ self.position.get_price() pp = PortfolioPosition(fi, 10) pp.get_position_size() pp.get_position_value() pp.position.get_price() pp.position.set_price(105) pp.get_position_value() pp.position.Python Data Model* The Python data model allow you to design classes that consistently interact with basic language constructs of Python.class Vector(object): def __init__(self, x=0, y=0, z=0): self.x = x self.y = y self.z = z v = Vector(1, 2, 3) v.z v ## The default string representation ## The special method __repr__allows the definition of custom string representations. class Vector(Vector): def __repr__(self): return 'Vector(%r, %r, %r)' % (self.x, self.y, self.z) v = Vector(1, 2, 3) v print(v) ## The behavior of abs() and bool() on the Vector class ## can be defined via the special methods __abs__ and __bool__. class Vector(Vector): def __abs__(self): return (self.x ** 2 + self.y ** 2 + self.z ** 2) ** 0.5 def __bool__(self): return bool(abs(self)) v = Vector(1, 2, -1) abs(v) bool(v) v = Vector() v abs(v) bool(v) # The behavior of + and * can be defined through the special methods __add__ and __mul__. class Vector(Vector): def __add__(self, other): x = self.x + other.x y = self.y + other.y z = self.z + other.z return Vector(x, y, z) def __mul__(self, scalar): return Vector(self.x * scalar, self.y * scalar, self.z * scalar) v = Vector(1, 2, 3) v + Vector(2, 3, 4) v * 2 # The special method __len__ gives the length of an object in number of elements. # The special method __getitem__makes indexing via the square bracket notation possible. class Vector(Vector): def __len__(self): return 3 def __getitem__(self, i): if i in [0, -3]: return self.x elif i in [1, -2]: return self.y elif i in [2, -1]: return self.z else: raise IndexError('Index out of range.') v = Vector(1, 2, 3) len(v) v[0] v[-2] # causes intentional error v[3] # The special method __iter__ defines the behavior during iterations over elements of an object. class Vector(Vector): def __iter__(self): for i in range(len(self)): yield self[i] v = Vector(1, 2, 3) for i in range(3): print(v[i]) # Indirect iteration using index values (via __getitem__). for coordinate in v: print(coordinate) # Direct iteration over the class instance (using __iter__). # Summary class Vector(object): def __init__(self, x=0, y=0, z=0): self.x = x self.y = y self.z = z def __repr__(self): return 'Vector(%r, %r, %r)' % (self.x, self.y, self.z) def __abs__(self): return (self.x ** 2 + self.y ** 2 + self.z ** 2) ** 0.5 def __bool__(self): return bool(abs(self)) def __add__(self, other): x = self.x + other.x y = self.y + other.y z = self.z + other.z return Vector(x, y, z) def __mul__(self, scalar): return Vector(self.x * scalar, self.y * scalar, self.z * scalar) def __len__(self): return 3 def __getitem__(self, i): if i in [0, -3]: return self.x elif i in [1, -2]: return self.y elif i in [2, -1]: return self.z else: raise IndexError('Index out of range.') def __iter__(self): for i in range(len(self)): yield self[i]Hands-On Session No. 3 (Training CNNs on MNIST/CIFAR10)# connect to google drive from google.colab import drive drive.mount('/content/gdrive') # this should print all folders in your google drive main folder !ls "/content/gdrive/My Drive/" # change "your_data_folder" to your data_dir name data_dir = "/content/gdrive/My Drive/colab_data" import torch import torch.nn as nn import torchvision.transforms as transforms import torchvision.datasets as datasets from torchvision.utils import save_image, make_grid import matplotlib.pyplot as plt import numpy as np from IPython import display import os device = 'cuda' if torch.cuda.is_available() else 'cpu' # device = 'cpu' def mnist_loaders(batch_size, data_dir): transform = transforms.ToTensor() mnist_train = datasets.MNIST(data_dir, train=True, download=True, transform=transform) mnist_test = datasets.MNIST(data_dir, train=False, download=True, transform=transform) train_loader = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, pin_memory=True) test_loader = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False, pin_memory=True) return train_loader, test_loader def cifar_loaders(batch_size, data_dir): normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.225, 0.225, 0.225]) train = datasets.CIFAR10(data_dir, train=True, download=True, transform=transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, 4), transforms.ToTensor(), normalize, ])) test = datasets.CIFAR10(data_dir, train=False, transform=transforms.Compose([transforms.ToTensor(), normalize])) train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True, pin_memory=True) test_loader = torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=False, pin_memory=True) return train_loader, test_loader class AverageValueMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0.0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.countInstructions for excercise Q11a. Implement a linear model.1b. Think, what do you think will happen if weights (and bias..) are initialized to zero? Now run it and check.1c. Did the model managed to learn? Why?1d. Train your model end to end on MNIST(you might want to check out momentum method)**GPU**:Try to move to computing on GPU. For that you'll need to "move" your model and all the data to be in the GPU. You can do it uding ".cuda()" at the end of your model and all input tensors (e.g., samples and targets and any other tensor that you created yourself) Q22a. Implement a two-layer network.2b. Think, what do you think will happen if weights (and bias..) are initialized to zero (using the two layer net)? Now run it and check. Why this has happened?2c. Did the model managed to learn? Why?2d. Train your model end to end on MNIST. Q33a. Add a convolution layer to the beginning of the network. (Check Conv2d)(don't forget the non-linearity..)3b. Try to achieve ~1% error on MNIST**Trying other hyperparameters**While you're trying, try different optimizers, different initializations, l2 regularization (try it all after you feel that you can't improve the current model) Q4Try training a model on CIFAR10. (Try to achieve above 65% accuracy, you can try go for 80% but you'll need to go deeper)batch_size = 0 # <<<<------------ Fill this train_loader, test_loader = mnist_loaders(batch_size, data_dir) # Implement the "Flatten" module class Flatten(nn.Module): def forward(self, x): out = 0 # <---------------------- Fill this correctly return out model = nn.Sequential( Flatten(), #nn.Linear(?????, ???????), # <----------------- Put correct dimensions for linear layer ) # next lines check if your model runs correctly X, y = next(iter(test_loader)) pred = model(X) err = pred.view(pred.shape[0], -1).max(dim=1)[1].ne(y).float().mean() def weights_init(m): if isinstance(m, (nn.Conv2d, nn.Linear)): m.weight.data.zero_() ## <--------- Note the weights are inititalized to zero if m.bias is not None: m.bias.data.zero_() lr = 0 # <----------------- Choose learning rate n_epochs = 0 # <------------- choose number of epochs model.train() # weights initizlization model.apply(weights_init) # loss function lossf = torch.nn.CrossEntropyLoss() # optimizer optimizer = torch.optim.SGD(model.parameters(), lr=lr) ######### this is for statistics (no need to touch) ######### trn_losss = [] trn_errs = [] val_errs = [] plt.ion() fig, axes = plt.subplots(1, 2, figsize=(10,5)) ############################################################## # computing the validation error def compute_error(loader, model): run_err = AverageValueMeter() for idx, (samples, targets) in enumerate(train_loader): outputs = model(samples) # compute stats err = outputs.max(1)[1].ne(targets).float().mean().item() run_err.update(err) #break ## <-------------- you might consider uncommenting this for debugging return run_err.avg # iterate on epochs for t in range(n_epochs): trn_err = AverageValueMeter() trn_loss = AverageValueMeter() # iterate on batches for idx, (samples, targets) in enumerate(train_loader): ################## Change these lines #################################### # run the model on the samples and put into the loss function ################## Finish changing here ################################## optimizer.zero_grad() loss.backward() optimizer.step() #################### compute stats (no need to change below here) ############# err = outputs.max(1)[1].ne(targets).float().mean().item() trn_err.update(err) trn_loss.update(loss.item()) if idx % 100 == 0: print('epoch:', t, 'batch:', idx, '/', len(train_loader), 'train-error:', trn_err.avg) break # computing stats val_err = compute_error(test_loader, model) val_errs.append(val_err) trn_losss.append(trn_loss.avg) trn_errs.append(trn_err.avg) display.clear_output(wait=True) axes[0].plot(range(len(trn_losss)), trn_losss, 'k')[0] axes[1].plot(range(len(trn_errs)), trn_errs, 'b')[0] axes[1].plot(range(len(val_errs)), val_errs, 'r')[0] display.display(plt.gcf())Fitting Gaussian Process Models in Python A common applied statistics task involves building regression models to characterize non-linear relationships between variables. It is possible to fit such models by assuming a particular non-linear structure, such as a sinusoidal, exponential, or polynomial function, to describe a given response by one variable to another. Unless this relationship is obvious from the outset, however, it involves possibly extensive model selection procedures to ensure the most appropriate model is retained. Alternatively, a non-parametric approach can be adopted by defining a set of knots across the variable space and use a spline or kernel regression to describe arbitrary non-linear relationships. However, knot layout procedures are somewhat *ad hoc* and can also involve variable selection. A third alternative is to adopt a **Bayesian non-parametric** strategy, and directly model the unknown underlying function. For this, we can employ Gaussian process models.Use of the term "non-parametric" in the context of Bayesian analysis is something of a misnomer. This is because the fundamental first step in Bayesian modeling is to specify a *full probability model* for the problem at hand, assigning probability densities to all unknown quantities of interest. So, it is difficult to explicitly state a full probability model without the use of probability functions, which are parametric! It turns out that Bayesian non-parametric methods do not imply that there are no parameters, but rather that the number of parameters grows with the size of the dataset. In fact, Bayesian non-parametric models are *infinitely* parametric. Building models with GaussiansWhat if we chose to use Gaussian distributions to model our data? $$p(x \mid \pi, \Sigma) = (2\pi)^{-k/2}|\Sigma|^{-1/2} \exp\left\{ -\frac{1}{2} (x-\mu)^{\prime}\Sigma^{-1}(x-\mu) \right\}$$There would not seem to be an advantage to doing this, because normal distributions are not particularly flexible distributions in and of themselves. However, adopting a set of Gaussians (a multivariate normal vector) confers a number of advantages. First, the marginal distribution of any subset of elements from a multivariate normal distribution is also normal:$$p(x,y) = \mathcal{N}\left(\left[{\begin{array}{c} {\mu_x} \\ {\mu_y} \\\end{array}}\right], \left[{\begin{array}{c} {\Sigma_x} & {\Sigma_{xy}} \\ {\Sigma_{xy}^T} & {\Sigma_y} \\\end{array}}\right]\right)$$$$p(x) = \int p(x,y) dy = \mathcal{N}(\mu_x, \Sigma_x)$$Also, conditionals distributions of a subset of a multivariate normal distribution (conditional on the remaining elements) are normal too:$$p(x|y) = \mathcal{N}(\mu_x + \Sigma_{xy}\Sigma_y^{-1}(y-\mu_y), \Sigma_x-\Sigma_{xy}\Sigma_y^{-1}\Sigma_{xy}^T)$$A Gaussian process generalizes the multivariate normal to infinite dimension. It is defined as an infinite collection of random variables, any finite subset of which have a Gaussian distribution. Thus, the marginalization property is explicit in its definition. Another way of thinking about an infinite vector is as a *function*. When we write a function that takes continuous values as inputs, we are essentially specifying an infinte vector that only returns values (indexed by the inputs) when the function is called upon to do so. By the same token, this notion of an infinite-dimensional Gaussian as a function allows us to work with them computationally: we are never required to store all the elements of the Gaussian process, only to calculate them on demand.So, we can describe a Gaussian process as a ***disribution over functions***. Just as a multivariate normal distribution is completely specified by a mean vector and covariance matrix, a GP is fully specified by a mean *function* and a covariance *function*:$$p(x) \sim \mathcal{GP}(m(x), k(x,x^{\prime}))$$It is the marginalization property that makes working with a Gaussian process feasible: we can marginalize over the infinitely-many variables that we are not interested in, or have not observed. For example, one specification of a GP might be as follows:$$\begin{aligned}m(x) &=0 \\k(x,x^{\prime}) &= \theta_1\exp\left(-\frac{\theta_2}{2}(x-x^{\prime})^2\right)\end{aligned}$$here, the covariance function is a **squared exponential**, for which values of $x$ and $x^{\prime}$ that are close together result in values of $k$ closer to 1 and those that are far apart return values closer to zero. It may seem odd to simply adopt the zero function to represent the mean function of the Gaussian process -- surely we can do better than that! It turns out that most of the learning in the GP involves the covariance function and its parameters, so very little is gained in specifying a complicated mean function.For a finite number of points, the GP becomes a multivariate normal, with the mean and covariance as the mean functon and covariance function evaluated at those points. Sampling from a Gaussian ProcessTo make this notion of a "distribution over functions" more concrete, let's quickly demonstrate how we obtain realizations from a Gaussian process, which result in an evaluation of a function over a set of points. All we will do here is sample from the *prior* Gaussian process, so before any data have been introduced. What we need first is our covariance function, which will be the squared exponential, and a function to evaluate the covariance at given points (resulting in a covariance matrix).%matplotlib inline import numpy as np import pandas as pd import seaborn as sns import matplotlib.pylab as plt np.random.seed(42) def exponential_cov(x, y, params): return params[0] * np.exp( -0.5 * params[1] * np.subtract.outer(x, y)**2):0: FutureWarning: IPython widgets are experimental and may change in the future.We are going generate realizations sequentially, point by point, using the lovely conditioning property of mutlivariate Gaussian distributions. Here is that conditional:$$p(x|y) = \mathcal{N}(\mu_x + \Sigma_{xy}\Sigma_y^{-1}(y-\mu_y), \Sigma_x-\Sigma_{xy}\Sigma_y^{-1}\Sigma_{xy}^T)$$And this the function that implements it:def conditional(x_new, x, y, params): B = exponential_cov(x_new, x, params) C = exponential_cov(x, x, params) A = exponential_cov(x_new, x_new, params) mu = np.linalg.inv(C).dot(B.T).T.dot(y) sigma = A - B.dot(np.linalg.inv(C).dot(B.T)) return(mu.squeeze(), sigma.squeeze())We will start with a Gaussian process prior with hyperparameters $\theta_0=1, \theta_1=10$. We will also assume a zero function as the mean, so we can plot a band that represents one standard deviation from the mean.θ = [1, 10] σ_0 = exponential_cov(0, 0, θ) xpts = np.arange(-3, 3, step=0.01) plt.errorbar(xpts, np.zeros(len(xpts)), yerr=σ_0, capsize=0) plt.ylim(-3, 3);Let's select an arbitrary starting point to sample, say $x=1$. Since there are no prevous points, we can sample from an unconditional Gaussian:x = [1.] y = [np.random.normal(scale=σ_0)] yWe can now update our confidence band, given the point that we just sampled, using the covariance function to generate new point-wise intervals, conditional on the value $[x_0, y_0]$.σ_1 = exponential_cov(x, x, θ) def predict(x, data, kernel, params, sigma, t): k = [kernel(x, y, params) for y in data] Sinv = np.linalg.inv(sigma) y_pred = np.dot(k, Sinv).dot(t) sigma_new = kernel(x, x, params) - np.dot(k, Sinv).dot(k) return y_pred, sigma_new x_pred = np.linspace(-3, 3, 1000) predictions = [predict(i, x, exponential_cov, θ, σ_1, y) for i in x_pred] y_pred, sigmas = np.transpose(predictions) plt.errorbar(x_pred, y_pred, yerr=sigmas, capsize=0) plt.plot(x, y, "ro") plt.xlim(-3, 3); plt.ylim(-3, 3);So conditional on this point, and the covariance structure we have specified, we have essentially constrained the probable location of additional points. Let's now sample another:m, s = conditional([-0.7], x, y, θ) y2 = np.random.normal(m, s) y2This point is added to the realization, and can be used to further update the location of the next point.x.append(-0.7) y.append(y2) σ_2 = exponential_cov(x, x, θ) predictions = [predict(i, x, exponential_cov, θ, σ_2, y) for i in x_pred] y_pred, sigmas = np.transpose(predictions) plt.errorbar(x_pred, y_pred, yerr=sigmas, capsize=0) plt.plot(x, y, "ro") plt.xlim(-3, 3); plt.ylim(-3, 3);Of course, sampling sequentially is just a heuristic to demonstrate how the covariance structure works. We can just as easily sample several points at once:x_more = [-2.1, -1.5, 0.3, 1.8, 2.5] mu, s = conditional(x_more, x, y, θ) y_more = np.random.multivariate_normal(mu, s) y_more x += x_more y += y_more.tolist() σ_new = exponential_cov(x, x, θ) predictions = [predict(i, x, exponential_cov, θ, σ_new, y) for i in x_pred] y_pred, sigmas = np.transpose(predictions) plt.errorbar(x_pred, y_pred, yerr=sigmas, capsize=0) plt.plot(x, y, "ro") plt.ylim(-3, 3);So as the density of points becomes high, the result will be one realization (function) from the prior GP. Fitting Gaussian Processes in PythonThough it's entirely possible to extend the code above to introduce data and fit a Gaussian processes by hand, there are a number of libraries available for specifying and fitting GP models in a more automated way. I will demonstrate and compare three packages that include classes and functions specifically tailored for GP modeling:- [scikit-learn](http://scikit-learn.org/stable/modules/gaussian_process.html)- [GPflow](http://gpflow.readthedocs.io/en/latest/intro.html)- [PyMC3](https://pymc-devs.github.io/pymc3/)In particular, each of these packages include covariance functions that can be flexibly combined to adequately describe the patterns of non-linearity in the data, along with methods for fitting the parameters of the GP. We will use some simulated data as a test case for comparing the performance of each package. I don't actually recall where I found this data, so I have no details regarding how it was generated . However, it clearly shows some type of non-linear process, corrupted by a certain amount of observation or measurement error so it should be a reasonable task for a Gaussian process approach.%run get_data.py sns.regplot(x, y, fit_reg=False)scikit-learn`scikit-learn` is the premier machine learning package for Python. It provides a comprehensive set of supervised and unsupervised learning algortihms, implemented under a consistent API that makes your entire modeling pipeline (from data preparation through output summarization) as frictionless as possible. Included among its library of tools is a Gaussian process module, which recently underwent a complete revision (as of version 0.18). Consistent with the implementation of other machine learning methods in `scikit-learn`, the appropriate interface for using GPs depends on the type of task to which it is being applied. For regression tasks, where we are predicting a continuous response variable, a `GaussianProcessRegressor` is applied by specifying an appropriate covariance function, or **kernel**. Fitting proceeds by maximizing the log of the marginal likelihood, a convenient approach for Gaussian processes that avoids the computationally-intensive crossvalidation strategy that is usually employed in choosing optimial hyperparameters for the model. The `GaussianProcessRegressor` does not allow for the specification of the mean function, always assuming it to be the zero function, highlighting the diminished role of the mean function in calculating the posterior.For classification tasks, where the output variable is categorical (or binary), the `GaussianProcessClassifier` is used. This may seem incongruous, using normal distributions to fit categorical data, but is accomodated by using a latent Gaussian response variable and then transforming it to the unit interval (or more generally for more than two outcome classes, a simplex). The result of this is a soft, probabilistic classification rather than a hard classification that is common in machine learning algorithms. Similar to the regression setting, the user chooses an appropriate kernel to describe the type of covariance expected in the dataset. Since the posterior of this GP is non-normal, a Laplace approximation is used to obtain a solution, rather than maximizing the marginal likelihood.from sklearn import gaussian_process from sklearn.gaussian_process.kernels import Matern, WhiteKernel, ConstantKernel`scikit-learn` offers a library of about a dozen covariance functions, which they call *kernels*, to choose from. A flexible choice to start with is the Mat&232;rn covariance. $$k_{M}(x) = \frac{\sigma^2}{\Gamma(\nu)2^{\nu-1}} \left(\frac{\sqrt{2 \nu} x}{l}\right)^{\nu} K_{\nu}\left(\frac{\sqrt{2 \nu} x}{l}\right)$$where where $\Gamma$ is the gamma function and $K$ is a modified Bessel function. The form of covariance matrices sampled from this function is governed by three parameters, each of which controls a property of the covariance.* **amplitude** ($\sigma$) controls the scaling of the output along the y-axis. This parameter is just a scalar multiplier, and is therefore usually left out of implementations of the Mat&232;rn function (*i.e.* set to one)* **lengthscale** ($l$) complements the amplitude by scaling realizations on the x-axis. Larger values make points appear closer together.* **roughness** ($\nu$) controls the sharpness of ridges in the covariance function, which ultimately affect the roughness (smoothness) of realizations.Though in general all the parameters are non-negative real-valued, when $\nu = p + 1/2$ for integer-valued $p$, the function can be expressed partly as a polynomial function of order $p$ and generates realizations that are $p$-times differentiable, so values $\nu \in \{3/2, 5/2\}$ are extremely common. A GP kernel can be specified as the sum of additive components in `scikit-learn` simply by using the sum operator, so we can include a Mat&232;rn component (`Matern`), an amplitude factor (`ConstantKernel`), as well as an observation noise (`WhiteKernel`):kernel = ConstantKernel() + Matern(length_scale=2, nu=3/2) + WhiteKernel(noise_level=1)As mentioned, the `scikit-learn` API is very consistent across learning methods, and as a result, all functions expect a tabular set of input variables, either as a 2-dimensional NumPy `array` or a pandas `DataFrame`. Since we have only a single input variable here, we can add a second dimension using the `reshape` method:X = x.reshape(-1, 1) X.shapeFinally, we instantiate a `GaussianProcessRegressor` object with our custom kernel, and call its `fit` method, passing the input (`X`) and output (`y`) arrays.gp = gaussian_process.GaussianProcessRegressor(kernel=kernel) gp.fit(X, y)Conveniently, `scikit-learn` displays the configuration that is used for the fitting algorithm each time one of its classes is instantiated. This is useful because it reveals hidden settings that are given default values if not specified by the user; these settings can often influence the resulting fit, so its important that we understand what `fit` has assumed on our behalf. Here, for example, we see that the L-BFGS-B algorithm has been used to optimized the hyperparameters (`optimizer='fmin_l_bfgs_b'`) and that the output variable has not been normalized (`normalize_y=False`). When there is a fear of finding a local, rather than a global, maximum in the marginal likelihood, a non-zero value can be specified for `n_restarts_optimizer`, which will run the optimization algorithm as many times as specified, using randomly-chosen starting coordinates, in the hope that a globally-competitive value can be discovered.The `fit` method endows the returned model object with attributes associated with the fitting procedure; these attributes will all have an underscore (`_`) appended to their names. For example, the `kernel_` attribute will return the kernel used to parameterize the GP, along with their corresponding optimal hyperparameter values:gp.kernel_Along with the `fit` method, each supervised learning class retains a `predict` method that generates predicted outcomes ($y^*$) given a new set of predictors ($X^*$) distinct from those used to fit the model. For a Gaussian process, this is fulfulled by the *posterior predictive distribution*, which is the Gaussian process with the mean and covariance functions updated to their posterior forms, after having been fit. $$p(y^*|y, x, x^*) = \mathcal{GP}(m^*(x^*), k^*(x^*))$$where the posterior mean and covariance functions are calculated as:$$\begin{aligned}m^*(x^*) &= k(x^*,x)^T[k(x,x) + \sigma^2I]^{-1}y \\k^*(x^*) &= k(x^*,x^*)+\sigma^2 - k(x^*,x)^T[k(x,x) + \sigma^2I]^{-1}k(x^*,x)\end{aligned}$$x_pred = np.linspace(-6, 6).reshape(-1,1) y_pred, sigma = gp.predict(x_pred, return_std=True)Notice that we can calculate a prediction for arbitrary inputs $X^*$. To get a sense of the form of the posterior over a range of likely inputs, we can pass it a linear space as we have done above. `predict` optionally returns posterior standard deviations along with the expected value, so we can use this to plot a confidence region around the expected function:plt.figure(figsize=(10,8)) sns.regplot(x, y, fit_reg=False, label='Data') plt.plot(x_pred, y_pred, color='grey', label='Prediction') plt.fill(np.concatenate([x_pred, x_pred[::-1]]), np.concatenate([y_pred - 2*sigma, (y_pred + 2*sigma)[::-1]]), alpha=.5, fc='grey', ec='None', label='95% CI') plt.xlabel('$x$') plt.ylabel('$f(x)$') plt.xlim(-6, 6) plt.ylim(-3, 3) plt.legend(loc='lower left');GPflowOne of the early projects to provide a standalone package for fitting Gaussian processes in Python was [GPy](http://sheffieldml.github.io/GPy/) by the Sheffield machine learning group. Much like `scikit-learn`'s `gaussian_process` module, GPy provides a set of classes for specifying and fitting Gaussian processes, with a large library of kernels that can be combined as needed. GPflow is a re-implementation of the GPy library, using Google's popular [TensorFlow](https://www.tensorflow.org) library as its computational backend. The main advantage of this change for most users is that it allows the use of more modern methods for fitting larger GP models, namely variational inference and Markov chain Monte Carlo.Let's demonstrate GPflow usage by fitting our simulated dataset. The API is slightly more general than `scikit-learns`, as it expects tabular inputs for both the predictors (features) and outcomes. Hence, we must reshape `y` to a tabular format:Y = y.reshape(-1,1)To mirror the `scikit-learn` model, we will again specify a Matèrn covariance function. GPflow has two user-facing subclasses, one which fixes the roughness parameter to 3/2 (`Matern32`) and another to 5/2 (`Matern52`). Amplitude is an included parameter (`variance`), so we do not need to include a separate constant kernel.import GPflow k = GPflow.kernels.Matern32(1, variance=1, lengthscales=1.2)There are six different GP classes, chosen according to the covariance structure (full vs. sparse approximation) and the likelihood of the model (Gaussian vs. non-Gaussian). The main innovation of GPflow is that non-conjugate models (*i.e.* those with a non-normal likelihood) can be fit either using Markov chain Monte Carlo or an approximation via variational inference.Since our model involves a straightforward conjugate Gaussian likelihood, we can use the `GPR` (Gaussian process regression) class.m = GPflow.gpr.GPR(X, Y, kern=k)We can access the parameter values simply by printing the regression model object.print(m)model.kern.lengthscales transform:+ve prior:None [ 1.2] model.kern.variance transform:+ve prior:None [ 1.] model.likelihood.variance transform:+ve prior:None [ 1.]Notice that, in addition to the hyperparameters of the Matèrn kernel, there is an additional variance parameter that is associated with the normal likelihood. We can set it to non-default values by direct assignment.m.likelihood.variance = 0.01This model is fit using the `optimize` method, which runs a gradient ascent algorithm on the model likelhiood (it uses the `minimize` function from SciPy as a default optimizer).m.optimize() mThe model object includes a `predict_y` attribute, which we can use to obtain expected values and variances on an arbitrary grid of input values.plt.figure(figsize=(10,8)) xx = np.linspace(-6, 6).reshape(-1,1) def plot(m): mean, var = m.predict_y(xx) plt.plot(X, Y, 'kx', mew=2) plt.plot(xx, mean, 'b', lw=2) plt.fill_between(xx[:,0], mean[:,0] - 2*np.sqrt(var[:,0]), mean[:,0] + 2*np.sqrt(var[:,0]), color='blue', alpha=0.2) plt.xlim(-6, 6) plt.ylim(-3, 3) plot(m)You might have noticed that there is nothing particularly Bayesian about what we have done here. No priors have been specified, and we have just performed maximum likelihood to obtain a solution.m.kern.variance.prior = GPflow.priors.Gamma(1,0.1) m.kern.lengthscales.prior = GPflow.priors.Gamma(1,0.1)In addition to specifying priors on the hyperparameters, we can also fix values if we have information to justify doing so. For example, we may know the measurement error of our data-collecting instrument, so we can assign that error value as a constant.m.likelihood.variance = 0.1 m.likelihood.variance.fixed = True m.optimize()Though we may feel satisfied that we have a proper Bayesian model, the end result is very much the same. All we have done is added the log-probabilities of the priors to the model, and performed optimization again. This time, the result is a maximum *a posteriori* (MAP) estimate.print(m) plt.figure(figsize=(10,8)) def plot(m): mean, var = m.predict_y(xx) plt.plot(X, Y, 'kx', mew=2) plt.plot(xx, mean, 'b', lw=2) plt.fill_between(xx[:,0], mean[:,0] - 2*np.sqrt(var[:,0]), mean[:,0] + 2*np.sqrt(var[:,0]), color='blue', alpha=0.2) plt.xlim(-6, 6) plt.ylim(-3, 3) plot(m)To perform a "Fully Bayesian" analysis, we can use the more general `GPMC` class, which jointly samples over the parameters and the functions. For this, we need to specify a likelihood as well as priors for the kernel parameters. Let's change the model slightly and use a Student's T likelihood, which will be more robust to the influence of extreme values.l = GPflow.likelihoods.StudentT() m = GPflow.gpmc.GPMC(X, Y, kern=k, likelihood=l) m.kern.variance.prior = GPflow.priors.Gamma(1,1) m.kern.lengthscales.prior = GPflow.priors.Gamma(1,1)Rather than `optimize`, we fit the `GPMC` model using the `sample` method. This will employ [Hamiltonian Monte Carlo](https://arxiv.org/pdf/1206.1901.pdf) (HMC), an efficient form of Markov chain Monte Carlo that takes advantage of gradient information to improve posterior sampling. The TensorFlow library provides automatic differentiation functions that allow the gradient to be calculated for arbitrary models. The HMC algorithm requires the specification of hyperparameter values that determine the behavior of the sampling procedure; these parameters can be tuned.trace = m.sample(1000, verbose=True, epsilon=0.03, Lmax=15)Iteration: 100 Acc Rate: 95.0 % Iteration: 200 Acc Rate: 92.0 % Iteration: 300 Acc Rate: 98.0 % Iteration: 400 Acc Rate: 94.0 % Iteration: 500 Acc Rate: 97.0 % Iteration: 600 Acc Rate: 98.0 % Iteration: 700 Acc Rate: 95.0 % Iteration: 800 Acc Rate: 99.0 % Iteration: 900 Acc Rate: 96.0 % Iteration: 1000 Acc Rate: 95.0 %We end up with a trace containing sampled values from the kernel parameters, which can be plotted to get an idea about the posterior uncertainty in their values, after being informed by the data.parameter_samples = m.get_samples_df(trace) for col in parameter_samples.columns.sort_values()[1:]: parameter_samples[col].hist(label=col.split('.')[-1], alpha=0.4, bins=15) plt.legend()We can then go back and generate predictions from the posterior GP, and plot several of them to get an idea of the predicted underlying function.realizations = [] for sample in trace[-100:]: m.set_state(sample) realizations.append(m.predict_f_samples(xx, 1).squeeze()) realizations = np.vstack(realizations) plt.figure(figsize=(12, 6)) line, = plt.plot(xx.squeeze(), np.mean(realizations, 0), lw=2) for draw in realizations: plt.plot(xx.squeeze(), draw, color=line.get_color(), alpha = 0.1) plt.plot(X, Y, 'kx', mew=2) plt.xlim(-6, 6) plt.ylim(-3, 3);Thus, it may benefit users with models that have unusual likelihood functions or models that are difficult to fit using gradient ascent optimization methods to use GPflow in place of `scikit-learn`. Moreover, if inference regarding the GP hyperparameters are of interest, or if prior information exists that would be useful in obtaining more accurate estimates, then a fully Bayesian approach such as that offered by GPflow's model classes is necessary. PyMC3The PyMC project is a very general Python package for probabilistic programming that can be used to fit nearly any Bayesian model (*disclosure*: I have been a developer of PyMC since its creation). Similarly to GPflow, the current version (PyMC3) has been re-engineered from earlier versions to rely on a modern computational backend. Rather than TensorFlow, PyMC3 is build on top of [Theano](http://deeplearning.net/software/theano/), an engine for evaluating expressions defined in terms of operations on tensors. It works in much the same way as TensorFlow, at least superficially, providing automatic differentiation, parallel computation, and dynamic generation of efficient, compiled code.import pymc3 as pm import theano.tensor as ttPyMC3 is a Bayesian modeling *toolkit*, providing mean functions, covariance functions and probability distributions that can be combined as needed to construct a Gaussian process model. Models are specified by declaring variables and functions of variables to specify a fully-Bayesian model. Declarations are made inside of a `Model` context, which automatically adds them to the model in preparation for fitting. Let's start out by instantiating a model, and adding a Matèrn covariance function and its hyperparameters:with pm.Model() as gp_fit: ρ = pm.Gamma('ρ', 1, 1) η = pm.Gamma('η', 1, 1) K = η * pm.gp.cov.Matern32(1, ρ)We can continue to build upon our model by speficying a mean function (this is redundant here, since a zero function is assumed when not specified) and an observation noise variable, which we will give a half-Cauchy prior:with gp_fit: M = pm.gp.mean.Zero() σ = pm.HalfCauchy('σ', 2.5)The Gaussian process model is encapsulated within the `GP` class, parameterized by the mean function, covariance function, and observation error specified above. Since the outcomes of the GP have been observed, we provide that data to the instance of `GP` in the `observed` argument as a dictionary. These are fed to the underlying multivariate normal likelihood.with gp_fit: y_obs = pm.gp.GP('y_obs', mean_func=M, cov_func=K, sigma=σ, observed={'X':X, 'Y':y})The `sample` function called inside the `Model` context fits the model using MCMC sampling. By default, PyMC3 uses an auto-tuning version of HMC called the [No U-turn Sampler](https://arxiv.org/abs/1111.4246) (NUTS) that picks appropriate values for the path length and step size parameters that we saw in GPflow's `sample` calls. Additionally, to initialize the sampler to reasonable starting parameter values, a variational inference algorithm is run before NUTS, to yield approximate posterior mean values for all the parametes.with gp_fit: trace = pm.sample(2000, n_init=20000) pm.traceplot(trace[-1000:], varnames=['ρ', 'σ', 'η']);In addition to fitting the model, we would like to be able to generate predictions. This implies sampling from the posterior predictive distribution, which if you recall is just some linear algebra:$$\begin{aligned}m^*(x^*) &= k(x^*,x)^T[k(x,x) + \sigma^2I]^{-1}y \\k^*(x^*) &= k(x^*,x^*)+\sigma^2 - k(x^*,x)^T[k(x,x) + \sigma^2I]^{-1}k(x^*,x)\end{aligned}$$PyMC3 allows for predictive sampling after the model is fit, using the recorded values of the model parameters to generate samples. The `sample_gp` function implements the predictive GP above, called with the sample trace, the GP variable and a grid of points over which to generate realizations:Z = np.linspace(-6, 6, 100).reshape(-1, 1) with gp_fit: gp_samples = pm.gp.sample_gp(trace[1000:], y_obs, Z, samples=50) fig, ax = plt.subplots(figsize=(14,5)) [ax.plot(Z, x, color=line.get_color(), alpha=0.3) for x in gp_samples] # overlay the observed data ax.plot(X, y, 'o', color="k", ms=10); ax.set_xlabel("x"); ax.set_ylabel("f(x)"); ax.set_title("Posterior predictive distribution");The Python language has many similarities to Perl, C, and Java. However, there are some definite differences between the languages. First Python ProgramLet us execute programs in different modes of programming.> Interactive Mode Programming Invoking the interpreter without passing a script file as a parameter brings up the following prompt −print ("Hello, Python!")If you are running new version of Python, then you would need to use print statement with parenthesis as in print ("Hello, Python!");. However in Python version 2.4.3, this produces the following result −```Hello, Python!```# Press ctrl + return print ("Hello, Python!")> Script Mode ProgrammingInvoking the interpreter with a script parameter begins execution of the script and continues until the script is finished. When the script is finished, the interpreter is no longer active.Let us write a simple Python program in a script. Python files have extension .py. Type the following source code in a test.py file −```print ("Hello, Python!")```We assume that you have Python interpreter set in PATH variable. Now, try to run this program as follows −```$ python test.py```Let us try another way to execute a Python script. Here is the modified test.py file −```!/usr/bin/pythonprint ("Hello, Python!")```We assume that you have Python interpreter available in /usr/bin directory. Now, try to run this program as follows −```$ chmod +x test.py This is to make file executable$./test.py```This produces the following result −```Hello, Python!``` Python IdentifiersA Python identifier is a name used to identify a variable, function, class, module or other object. An identifier starts with a letter A to Z or a to z or an underscore (_) followed by zero or more letters, underscores and digits (0 to 9).Python does not allow punctuation characters such as @, $, and % within identifiers. Python is a case sensitive programming language. Thus, **Manpower** and **manpower** are two different identifiers in Python.Here are naming conventions for Python identifiers −- Class names start with an uppercase letter. All other identifiers start with a lowercase letter.- Starting an identifier with a single leading underscore indicates that the identifier is private.- Starting an identifier with two leading underscores indicates a strongly private identifier.- If the identifier also ends with two trailing underscores, the identifier is a language-defined special name. | | | ||---------- |--------- |-------- || and | exec | not || assert | finally | or || break | for | pass || class | from | print || continue | global | raise || def | if | return || del | import | try || elif | in | while || else | is | with || except | lambda | yield | Lines and IndentationPython provides no braces to indicate blocks of code for class and function definitions or flow control. Blocks of code are denoted by line indentation, which is rigidly enforced.The number of spaces in the indentation is variable, but all statements within the block must be indented the same amount. For example −if True: print ("True") else: print ("False")However, the following block generates an error −if True: print ("Answer") print ("True") else: print ("Answer") print ("False")Thus, in Python all the continuous lines indented with same number of spaces would form a block. The following example has various statement blocks −**Note** − Do not try to understand the logic at this point of time. Just make sure you understood various blocks even if they are without braces.#!/usr/bin/python import sys try: # open file stream file = open(file_name, "w") except IOError: print ("There was an error writing to"), file_name sys.exit() print ("Enter '"), file_finish, print ("' When finished") while file_text != file_finish: file_text = raw_input("Enter text: ") if file_text == file_finish: # close the file file.close break file.write(file_text) file.write("\n") file.close() file_name = raw_input("Enter filename: ") if len(file_name) == 0: print ("Next time please enter something") sys.exit() try: file = open(file_name, "r") except IOError: print ("There was an error reading file") sys.exit() file_text = file.read() file.close() print (file_text)Multi-Line StatementsStatements in Python typically end with a new line. Python does, however, allow the use of the line continuation character (\) to denote that the line should continue. For example −total = item_one + \ item_two + \ item_threeStatements contained within the [], {}, or () brackets do not need to use the line continuation character. For example −days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday']Quotation in PythonPython accepts single ('), double (") and triple (''' or """) quotes to denote string literals, as long as the same type of quote starts and ends the string.The triple quotes are used to span the string across multiple lines. For example, all the following are legal −word = 'word' sentence = "This is a sentence." paragraph = """This is a paragraph. It is made up of multiple lines and sentences."""Comments in PythonA hash sign () that is not inside a string literal begins a comment. All characters after the and up to the end of the physical line are part of the comment and the Python interpreter ignores them.#!/usr/bin/python # First comment print ("Hello, Python!") # second commentThis produces the following result −```Hello, Python!``` You can type a comment on the same line after a statement or expression −name = "Madisetti" # This is again commentYou can comment multiple lines as follows −# This is a comment. # This is a comment, too. # This is a comment, too. # I said that already.Following triple-quoted string is also ignored by Python interpreter and can be used as a multiline comments:''' This is a multiline comment. '''Using Blank LinesA line containing only whitespace, possibly with a comment, is known as a blank line and Python totally ignores it.In an interactive interpreter session, you must enter an empty physical line to terminate a multiline statement. Waiting for the UserThe following line of the program displays the prompt, the statement saying “Press the enter key to exit”, and waits for the user to take action −#!/usr/bin/python raw_input("\n\nPress the enter key to exit.")Here, "\n\n" is used to create two new lines before displaying the actual line. Once the user presses the key, the program ends. This is a nice trick to keep a console window open until the user is done with an application. Multiple Statements on a Single LineThe semicolon ( ; ) allows multiple statements on the single line given that neither statement starts a new code block. Here is a sample snip using the semicolon −import sys; x = 'foo'; sys.stdout.write(x + '\n')Multiple Statement Groups as SuitesA group of individual statements, which make a single code block are called suites in Python. Compound or complex statements, such as if, while, def, and class require a header line and a suite.Header lines begin the statement (with the keyword) and terminate with a colon ( : ) and are followed by one or more lines which make up the suite. For example −if expression : suite elif expression : suite else : suitePandas Interoperability# Install dependencies for google colab import sys if 'google.colab' in sys.modules: %pip install -r https://raw.githubusercontent.com/thomasjpfan/ml-workshop-intermediate-1-of-2/master/requirements.txt import sklearn assert sklearn.__version__.startswith("1.0"), "Plese install scikit-learn 1.0" import numpy as np sklearn.set_config(display='diagram')Categorical Dataimport pandas as pd df_train = pd.DataFrame({ "pet": ["snake", "dog", "cat", "cow"], })OridinalEncoderfrom sklearn.preprocessing import OrdinalEncoder ord_encoder = OrdinalEncoder() ord_encoder.fit_transform(df_train) ord_encoder.categories_ df_test = pd.DataFrame({ "pet": ["cow", "cat"] }) df_test ord_encoder.transform(df_test)Categories that are unknown during `fit`df_test_unknown = pd.DataFrame({ "pet": ["bear"] }) try: ord_encoder.transform(df_test_unknown) except ValueError as err: print(err)How to handle unknown categories in OridinalEncoder? Provide all the categories in the constructordf_train ord_encoder = OrdinalEncoder( categories=[['snake', 'dog', 'cat', 'cow', 'bear']]) ord_encoder.fit_transform(df_train) df_test_unknown ord_encoder.transform(df_test_unknown)Setting a value for unknown values directlyord_encoder = OrdinalEncoder(handle_unknown='use_encoded_value', unknown_value=-1) ord_encoder.fit_transform(df_train) df_test_unknown ord_encoder.transform(df_test_unknown)OneHotEncoderfrom sklearn.preprocessing import OneHotEncoder ohe = OneHotEncoder() X_trans = ohe.fit_transform(df_train) X_transBy default it is sparse!X_trans.toarray()Switch to denseohe = OneHotEncoder(sparse=False) ohe.fit_transform(df_train)Unknown categories during transform?df_test_unknown # this will fail try: ohe.transform(df_test_unknown) except ValueError as exc: print(exc)OHE can handle unknownsohe = OneHotEncoder(sparse=False, handle_unknown='ignore') ohe.fit(df_train) ohe.transform(df_test_unknown) ohe.categories_Two categorical featuresdf_train = pd.DataFrame({ "pet": ["cat", "dog", "snake"], "city": ["New York", "London", "London"] }) ohe.fit(df_train) ohe.categories_ ohe.transform(df_train)Column Transformer!import pandas as pd X_df = pd.DataFrame({ 'age': [10, 20, 15, 5, 20, 14], 'height': [5, 7, 6.5, 4.1, 5.4, 5.4], 'pet': ['dog', 'snake', 'cat', 'dog', 'cat', 'cat'] }) X_dfWith OridinalEncoderfrom sklearn.compose import ColumnTransformer from sklearn.preprocessing import StandardScaler ct = ColumnTransformer([ ('numerical', StandardScaler(), ['age', 'height']), ('categorical', OrdinalEncoder(), ['pet']) ]) ct.fit_transform(X_df)With OneHotEncoderct = ColumnTransformer([ ('numerical', StandardScaler(), ['age', 'height']), ('categorical', OneHotEncoder(), ['pet']) ]) ct.fit_transform(X_df)Titanic datasetfrom sklearn.datasets import fetch_openml titanic = fetch_openml(data_id=40945, as_frame=True) X, y = titanic.data, titanic.target y X.head()Are three categories already encoded in the dataset?X.dtypesAre there missing values in the dataset?missing_values = pd.concat({"na_cnt": X.isna().sum(), "dtypes": X.dtypes}, axis='columns') missing_valuesSplit data into training and test setfrom sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, stratify=y, random_state=42)ColumnTransformermissing_valuesNumerical preprocessingnumerical_features = ['age', 'sibsp', 'parch', 'fare', 'body'] from sklearn.pipeline import Pipeline from sklearn.impute import SimpleImputer num_prep = Pipeline([ ('imputer', SimpleImputer()), ('scaler', StandardScaler()) ]) num_prepRunning only on numerical featuresnum_trans = num_prep.fit_transform(X_train[numerical_features]) num_trans num_trans.shapeCategorical preprocessingcategorical_features = ['sex', 'embarked'] cat_prep = Pipeline([ ('imputer', SimpleImputer(strategy='constant', fill_value='sk_missing')), ('ohe', OneHotEncoder(handle_unknown='ignore', sparse=False)) ]) cat_prepRunning only on the categorical featurescat_trans = cat_prep.fit_transform(X_train[categorical_features]) cat_trans cat_trans.shapeColumnTransformer!ct = ColumnTransformer([ ('numerical', num_prep, numerical_features), ('categorical', cat_prep, categorical_features) ]) ct X_trans = ct.fit_transform(X_train) X_trans[:, :5] X_trans[:, 5:] X_trans.shapeLinear modelfrom sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LogisticRegression log_reg = Pipeline([ ('preprocess', ct), ('log_reg', LogisticRegression(random_state=42)) ]) log_reg log_reg.fit(X_train, y_train) log_reg.score(X_train, y_train)Random Forestfrom sklearn.ensemble import RandomForestClassifier rf = Pipeline([ ('preprocess', ct), ('log_reg', RandomForestClassifier(random_state=42)) ]) rf rf.fit(X_train, y_train) rf.score(X_train, y_train)Exercise 11. Load the ames housing dataset using `sklearn.datasets.fetch_openml` with `data_id=41211` and `as_frame=True`. - - **Hint**: You may ignore the version warning1. How many samples and features are there?1. Find and save the categorical and numerical feature names. - **Hint**: You can use `X.select_dtypes(include='category').columns` and `X.select_dtypes(include='number').columns`1. What are the categorical feature names? What are the numerical feature names?1. Split the data into training and test dataset.1. Build pipeline using a `ColumnTransformer`, `OrdinalEncoder`, and `sklearn.ensemble.HistGradientBoostingRegressor` and fit on the train dataset. - **Hint**: Use `'passthrough'` option for numerical columns. - **Hint**: Use `OrdinalEncoder` with `handle_unknown='use_encoded_value'` and `unknown_value=-1`.1. Evaluate the model on the test set.1. **Extra**: Use `sklearn.compose.make_column_selector` instead of passing the feature names directly.from sklearn.datasets import fetch_openml from sklearn.ensemble import HistGradientBoostingRegressor from sklearn.compose import make_column_selector**If you are running locally**, you can uncomment the following cell to load the solution into the cell. On **Google Colab**, [see solution here](https://github.com/thomasjpfan/ml-workshop-intermediate-1-of-2/blob/master/notebooks/solutions/04-ex01-solutions.py).# %load solutions/04-ex01-solutions.pyData Preprocessing Zonedf_t.drop(['Loan_ID'],inplace=True,axis=1) # Checking number of the columns len((df_t)) # Shape of the dataset df_t.shape # Checking the name of the columns df_t.columns # checking the null values df_t.isnull() # Checking the sum of the null vlaues df_t.isnull().sum() # Visualize the null values using heatmap sns.heatmap(df_t.isnull(),yticklabels=False,cbar=False) # Data Observation df_t.info() RangeIndex: 367 entries, 0 to 366 Data columns (total 11 columns): Gender 356 non-null object Married 367 non-null object Dependents 357 non-null object Education 367 non-null object Self_Employed 344 non-null object ApplicantIncome 367 non-null int64 CoapplicantIncome 367 non-null int64 LoanAmount 362 non-null float64 Loan_Amount_Term 361 non-null float64 Credit_History 338 non-null float64 Property_Area 367 non-null object dtypes: float64(3), int64(2), object(6) memory usage: 31.6+ KBHandling the Null / Missing Values in Numerical Values# Using fillna methods to fillup the maean of the features df_t['LoanAmount'] = df_t['LoanAmount'].fillna(df_t['LoanAmount'].mean()) df_t['Loan_Amount_Term'] = df_t['Loan_Amount_Term'].fillna(df_t['Loan_Amount_Term'].mean()) df_t['Credit_History'] = df_t['Credit_History'].fillna(df_t['Credit_History'].mean()) df_t.isnull().sum() # After handling the Numerical missing valeu sns.heatmap(df_t.isnull())Handling the Null / Missing Values in Gategorical Values# Knowing the value counts df_t['Gender'].value_counts() df_t['Dependents'].value_counts() df_t['Self_Employed'].value_counts() # Converting the numerical values using get_dummies Gender = pd.get_dummies(df_t['Gender'],drop_first=True) Dependents = pd.get_dummies(df_t['Dependents'],drop_first=True) Self_Employed = pd.get_dummies(df_t['Self_Employed'],drop_first=True) # Displays the Converted features Gender.head() Dependents.head() Self_Employed.head() df_t.drop(['Gender','Dependents','Self_Employed'],axis=1,inplace=True) df_t.head() # Concating the existing Features with new one df_t = pd.concat([df_t,Gender,Dependents,Self_Employed],axis=1) df_t.head() # Converting Other Gategorical Features # Counts values df_t['Married'].value_counts().head() df_t['Education'].value_counts().head() df_t['Property_Area'].value_counts().head() Married = pd.get_dummies(df_t['Married'],drop_first=True) Education = pd.get_dummies(df_t['Education'],drop_first=True) Property_Area = pd.get_dummies(df_t['Property_Area'],drop_first=True) df_t.drop(['Married','Education','Property_Area'],axis=1,inplace=True) # Concating the existing Features with new one df_t = pd.concat([df_t,Married,Education,Property_Area],axis=1) df_t.head() df_t.to_csv('formulatedtest.csv',index=False) sns.countplot(y = 'Male',hue='y',data=df_t)Prioritizing Variants In Patient Genome Data, CCBB, UCSD () Table of Contents* [Background](background)* [Decision Tree Development](decision-tree-development) * [Version One](version-one) * [Version Two](version-two) * [Version Three](version-three) * [Version Four](version-four)* [Next Steps](next-steps) BackgroundI have been asked to help develop an in-house pipeline for DNA variant ranking. The request was:> Simply put, we need to develop a consistent ranking strategy for DNA variants - perhaps one that mirrors the protocol at [ http://www.semel.ucla.edu/research/variant-filtering-ranking-pipeline ]. For the most part, Guorong has the pipeline built already for generating fully annotated variant tables ... [snip /] ... It’s a matter of coming up with the filtering rules on the columns of data. I read the link, which describes a four-tier filtering pipeline in which tier 1 is disruptive variants in coding exons (stop-gain, splice-site, short indels), tier 2 is damaging coding variants (predicted mis-sense), and tier 3 and 4 are variants conserved or accelerated in multiple lineages or just one lineage, respectively. I've been asked to produce a decision tree, akin to this one shown in figure 10.20 of Exploring Personal Genomics (2013) by and : ![](images/exploring_personal_genomics_by_dudley_fig_10-20.png) Information for making the scoring decisions is already being produced by our existing pipeline using the following call to annotate the .vcf variant file using ANNOVAR (see http://annovar.openbioinformatics.org/en/latest/ ):perl table_annovar.pl /mnt/data/workspace/data/normal_blood_wgs_hli_B1000000012_S1.final.vqsr.vcf.avinput humandb/ -buildver hg19 -out /mnt/data/workspace/data/normal_blood_wgs_hli_B1000000012_S1.vqsr_annotated -remove -protocol knownGene,tfbsConsSites,cytoBand,targetScanS,genomicSuperDups,gwasCatalog,esp6500siv2_all,1000g2014oct_all,snp138,ljb26_all,cg46,cg69,popfreq_all_20150413,clinvar_20150330,cosmic70,nci60 -operation g,r,r,r,r,r,f,f,f,f,f,f,f,f,f,f -nastring .Unzipping Data### Unzipping Dataset train_file = 'train' test_file = 'test' with zipfile.ZipFile("/CV_projects/catsDog_pytorch/train.zip","r") as train: train.extractall("") with zipfile.ZipFile("/CV_projects/catsDog_pytorch/test.zip","r") as test: test.extractall("") train_list = glob.glob(os.path.join(train_file,'*.jpg')) # Get list of all jpg files/dirs in data folder trian_dir test_list = glob.glob(os.path.join(test_file, '*.jpg')) print(f"Train Data: {len(train_list)}") print(f"Test Data: {len(test_list)}") print(os.listdir(train_file)[:5])['cat.0.jpg', 'cat.1.jpg', 'cat.10.jpg', 'cat.100.jpg', 'cat.1000.jpg']Data visualizationfrom PIL import Image random_idx = np.random.randint(1,25000,size=10) fig = plt.figure() i=1 for idx in random_idx: ax = fig.add_subplot(2,5,i) img = Image.open(train_list[idx]) plt.imshow(img) i+=1 plt.axis('off') plt.show() device = (torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')) print(f"Training on device : {device}.") label = [path.split('\\')[-1].split('.')[0] for path in train_list] print(type(label)) train_list[6071].split('\\')[-1].split('.')[0]Data splitfrom sklearn.model_selection import train_test_split train_list, val_list = train_test_split(train_list, test_size=0.2) print(len(train_list)) print(len(val_list))20000 5000Data Augumentationtrain_transforms = transforms.Compose([ transforms.Resize((224, 224)), transforms.RandomCrop(224), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(p=0.7), transforms.ToTensor(), ]) val_transforms = transforms.Compose([ transforms.Resize((224, 224)), transforms.RandomCrop(224), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(p=0.7), transforms.ToTensor(), ]) test_transforms = transforms.Compose([ transforms.Resize((224, 224)), transforms.RandomCrop(224), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(p=0.7), transforms.ToTensor() ])Datasets and Dataloadersclass dataset(torch.utils.data.Dataset): def __init__(self,file_list,transform=None): self.file_list = file_list self.transform = transform #dataset length def __len__(self): self.filelength = len(self.file_list) return self.filelength #load images def __getitem__(self,idx): img_path = self.file_list[idx] img = Image.open(img_path) img_transformed = self.transform(img) label = img_path.split('\\')[-1].split('.')[0] if label == 'dog': label=1 elif label == 'cat': label=0 return img_transformed,label train_data = dataset(train_list, transform=train_transforms) test_data = dataset(test_list, transform=test_transforms) val_data = dataset(val_list, transform=test_transforms) trainloader = torch.utils.data.DataLoader(dataset = train_data, batch_size=8, shuffle=True ) testloader = torch.utils.data.DataLoader(dataset = test_data, batch_size=18, shuffle=False) validloader = torch.utils.data.DataLoader(dataset = val_data, batch_size=8, shuffle=False)Data visualizationfor images, labels in trainloader: fig, ax = plt.subplots(figsize = (10, 10)) ax.set_xticks([]) ax.set_yticks([]) ax.imshow(make_grid(images, 4).permute(1,2,0)) break print(len(validloader)) print(len(trainloader)) print(len(testloader)) img = next(iter(trainloader))[0][0] plt.imshow(transforms.ToPILImage()(img)) print(f"Train data: {len(train_data)}, Train loader: {len(trainloader)}") print(f"Val data: {len(val_data)}, Val loader: {len(validloader)}") #check our images shape train_data[0][0].shapeTrainingLoopimport datetime def TrainingLoop (epoch, model, loss_fn, optimizer, trainloader ): for epoch in range(1, epoch+1): epoch_loss = 0 train_correct = 0 train_total = 0 for data, label in trainloader: data = data.to(device) label = label.to(device) output = model(data) loss = loss_fn(output, label) optimizer.zero_grad() loss.backward() optimizer.step() epoch_loss += loss.item() _,tpredicted = torch.max(output, dim=1) train_total += label.shape[0] train_correct += int((tpredicted == label).sum()) if epoch == 1 or epoch %10 == 0: print(f"Loss in epoch {epoch} : {epoch_loss/len(trainloader)} : date and time : {datetime.datetime.now()}: Got {train_correct} / {train_total} with train accuracy of :{float(train_correct) / float(train_total) * 100:.2f}") with torch.no_grad(): valid_loss = 0 correct = 0 total = 0 for data, label in validloader: data = data.to(device=device) label = label.to(device=device) ## Forward Pass val_output = model(data) val_loss = loss_fn(val_output, label) valid_loss += val_loss.item() _, predicted = torch.max(val_output, dim=1) total += label.shape[0] correct += int((predicted == label).sum()) if epoch == 1 or epoch %10 == 0: print( f"Loss in Valid epoch {epoch} : {valid_loss/len(validloader)} : Got {correct} / {total} with valid accuracy of :{float(correct) / float(total) * 100:.2f}")Transfer learning Resnet50from torchvision.models import resnet50 Net = resnet50(pretrained = True) # Modifying Head of the classifier Net.fc = nn.Sequential( nn.Dropout(0.3), #dropout incoming last convnet's few features nn.Linear(2048, 2) ) #os.environ["CUDA_LAUNCH_BLOCKING"] = "1" model =Net.to(device=device) model.train() optimizer = optim.SGD(params = model.parameters(), lr=1e-3) loss_fn = nn.CrossEntropyLoss() TrainingLoop( epoch=10, model=model, loss_fn=loss_fn, optimizer=optimizer, trainloader=trainloader, )Loss in epoch 1 : 0.2243860801840201 : date and time : 2022-04-20 18:46:24.904942: Got 18168 / 20000 with train accuracy of :90.84 Loss in Valid epoch 1 : 0.16517693873271347 : Got 4679 / 5000 with valid accuracy of :93.58 Loss in epoch 10 : 0.08322408119685715 : date and time : 2022-04-20 19:32:00.444477: Got 19380 / 20000 with train accuracy of :96.90 Loss in Valid epoch 10 : 0.08550401275092735 : Got 4844 / 5000 with valid accuracy of :96.88Save and Load the modeltorch.save(model.state_dict(), "catsdog_resnet50.pt") model = Net.to(device="cpu") model.load_state_dict(torch.load("/CV_projects/catsDogs/catsdog_resnet50.pt")) model.eval()Test data evaluationdog_probs = [] model.eval() with torch.no_grad(): for data, fileid in testloader: data = data.to(device="cpu") preds = model(data) preds_list = F.softmax(preds, dim=1)[:, 1].tolist() dog_probs += list(zip(list(fileid), preds_list)) dog_probs.sort(key = lambda x : int(x[0])) dog_probs idx = list(map(lambda x: x[0],dog_probs)) prob = list(map(lambda x: x[1],dog_probs)) submission = pd.DataFrame({'id':idx,'label':prob}) submissionModelo Black-Scholes Objetivo General - Utilizar el modelo matemático de Black - Scholes para la toma de decisiones financieras. Objetivos Específico - Determinar el costo de una acción usando el modelo Black - Scholes Modelo que representa el problema $ C = S.N(d1) - X.e ^-r.t . N(d2)$ Supuestos>- No hay costes de transacción o impuestos.>- La tasa de interés libre de riesgo es constante para todos los vencimientos.>- La acción no paga dividendos.>- La volatilidad se mantiene constante.>- Se permite la venta en corto.>- No hay oportunidades de arbitraje sin riesgo.>- Asume que la distribución de probabilidad de los retornos es una distribución normal. Simulacionesimport numpy as np import scipy.stats as ss import matplotlib.pyplot as plt %matplotlib inline import time def d1(S0, K, r, sigma, T): return (np.log(S0/K) + (r + sigma**2 / 2) * T)/(sigma * np.sqrt(T)) def d2(S0, K, r, sigma, T): return (np.log(S0 / K) + (r - sigma**2 / 2) * T) / (sigma * np.sqrt(T)) S0 = 50 K = 40 r=0.1 sigma = 0.30 T = 0.25 Otype='C' print ("S0 = Precio de la accion en el tiempo 0:", S0) print ("K = Precio fijo:", K) print ("r = Tasa libre de riesgo:", r) print ("sigma = Volatilidad del precio:", sigma) print ("T = Periodo de vencimiento:", T) d1(S0 , K , r , sigma , T) d2(S0 , K , r , sigma , T) def BlackScholes(type,S0, K, r, sigma, T): if type=="C": # Opcion C es una opcion de compra Europea return S0 * ss.norm.cdf(d1(S0, K, r, sigma, T)) - K * np.exp(-r * T) * ss.norm.cdf(d2(S0, K, r, sigma, T)) else: return K * np.exp(-r * T) * ss.norm.cdf(-d2(S0, K, r, sigma, T)) - S0 * ss.norm.cdf(-d1(S0, K, r, sigma, T)) S0 = 50 K = 40 r=0.1 sigma = 0.30 T = 0.25 Otype='C' print ("S0 = Precio de la accion en el tiempo 0:", S0) print ("K = Precio fijo:", K) print ("r = Tasa libre de riesgo:", r) print ("sigma = Volatilidad del precio:", sigma) print ("T = Periodo de vencimiento:", T) t=time.time() BS = BlackScholes(Otype,S0, K, r, sigma, T) elasticidad=time.time()-t print ("Black-Scholes price:",BS) print("Elasticidad" , elasticidad) plt.scatter(BS,elasticidad, c = "m") plt.scatter(S0,T, c = "g")Word prediction using QuadgramThis program reads the corpus line by line.This reads the corpus one line at a time loads it into the memory Time Complexity for word prediction : O(1) Time Complexity for word prediction with rank 'r': O(r) Import corpusfrom nltk.util import ngrams from collections import defaultdict from collections import OrderedDict import string import time import gc start_time = time.time()Do preprocessing: Remove the punctuations and lowercase the tokens#returns: string #arg: string #remove punctuations and make the string lowercase def removePunctuations(sen): #split the string into word tokens temp_l = sen.split() i = 0 #changes the word to lowercase and removes punctuations from it for word in temp_l : for l in word : if l in string.punctuation: word = word.replace(l," ") temp_l[i] = word.lower() i=i+1 #spliting is being don here beacause in sentences line here---so after punctuation removal it should #become "here so" content = " ".join(temp_l) return contentTokenize and load the corpus data#returns : void #arg: string,dict,dict,dict,dict #loads the corpus for the dataset and makes the frequency count of quadgram and trigram strings def loadCorpus(file_path,bi_dict,tri_dict,quad_dict,vocab_dict): w1 = '' #for storing the 3rd last word to be used for next token set w2 = '' #for storing the 2nd last word to be used for next token set w3 = '' #for storing the last word to be used for next token set token = [] word_len = 0 #open the corpus file and read it line by line with open(file_path,'r') as file: for line in file: #split the line into tokens token = line.split() i = 0 #for each word in the token list ,remove pucntuations and change to lowercase for word in token : for l in word : if l in string.punctuation: word = word.replace(l," ") token[i] = word.lower() i=i+1 #make the token list into a string content = " ".join(token) token = content.split() word_len = word_len + len(token) if not token: continue #add the last word from previous line if w3!= '': token.insert(0,w3) temp0 = list(ngrams(token,2)) #since we are reading line by line some combinations of word might get missed for pairing #for trigram #first add the previous words if w2!= '': token.insert(0,w2) #tokens for trigrams temp1 = list(ngrams(token,3)) #insert the 3rd last word from previous line for quadgram pairing if w1!= '': token.insert(0,w1) #add new unique words to the vocaulary set if available for word in token: if word not in vocab_dict: vocab_dict[word] = 1 else: vocab_dict[word]+= 1 #tokens for quadgrams temp2 = list(ngrams(token,4)) #count the frequency of the bigram sentences for t in temp0: sen = ' '.join(t) bi_dict[sen] += 1 #count the frequency of the trigram sentences for t in temp1: sen = ' '.join(t) tri_dict[sen] += 1 #count the frequency of the quadgram sentences for t in temp2: sen = ' '.join(t) quad_dict[sen] += 1 #then take out the last 3 words n = len(token) #store the last few words for the next sentence pairing w1 = token[n -3] w2 = token[n -2] w3 = token[n -1] return word_lenCreate a Hash Table for Probable words for Trigram sentences#returns: void #arg: dict,dict,dict,dict,dict,int #creates dict for storing probable words with their probabilities for a trigram sentence def createProbableWordDict(bi_dict,tri_dict,quad_dict,prob_dict,vocab_dict,token_len): for quad_sen in quad_dict: prob = 0.0 quad_token = quad_sen.split() tri_sen = ' '.join(quad_token[:3]) tri_count = tri_dict[tri_sen] if tri_count != 0: prob = interpolatedProbability(quad_token,token_len, vocab_dict, bi_dict, tri_dict, quad_dict, l1 = 0.25, l2 = 0.25, l3 = 0.25 , l4 = 0.25) if tri_sen not in prob_dict: prob_dict[tri_sen] = [] prob_dict[tri_sen].append([prob,quad_token[-1]]) else: prob_dict[tri_sen].append([prob,quad_token[-1]]) prob = None tri_count = None quad_token = None tri_sen = NoneSort the probable words#returns: void #arg: dict #for sorting the probable word acc. to their probabilities def sortProbWordDict(prob_dict): for key in prob_dict: if len(prob_dict[key])>1: sorted(prob_dict[key],reverse = True)Driver function for doing the prediction#returns: string #arg: string,dict,int #does prediction for the the sentence def doPrediction(sen,prob_dict,rank = 1): if sen in prob_dict: if rank <= len(prob_dict[sen]): return prob_dict[sen][rank-1][1] else: return prob_dict[sen][0][1] else: return "Can't predict"For Computing Interpolated Probability#returns: float #arg: float,float,float,float,list,list,dict,dict,dict,dict #for calculating the interpolated probablity def interpolatedProbability(quad_token,token_len, vocab_dict, bi_dict, tri_dict, quad_dict, l1 = 0.25, l2 = 0.25, l3 = 0.25 , l4 = 0.25): sen = ' '.join(quad_token) prob =( l1*(quad_dict[sen] / tri_dict[' '.join(quad_token[0:3])]) + l2*(tri_dict[' '.join(quad_token[1:4])] / bi_dict[' '.join(quad_token[1:3])]) + l3*(bi_dict[' '.join(quad_token[2:4])] / vocab_dict[quad_token[2]]) + l4*(vocab_dict[quad_token[3]] / token_len) ) return probFor Taking input from the User#returns: string #arg: void #for taking input from user def takeInput(): cond = False #take input while(cond == False): sen = input('Enter the string\n') sen = removePunctuations(sen) temp = sen.split() if len(temp) < 3: print("Please enter atleast 3 words !") else: cond = True temp = temp[-3:] sen = " ".join(temp) return senmain function""" def main(): #variable declaration tri_dict = defaultdict(int) #for keeping count of sentences of three words quad_dict = defaultdict(int) #for keeping count of sentences of three words vocab_dict = defaultdict(int) #for storing the different words with their frequencies prob_dict = OrderedDict() #for storing the probabilities of probable words for a sentence bi_dict = defaultdict(int) #load the corpus for the dataset token_len = loadCorpus('corpusfile.txt',bi_dict,tri_dict,quad_dict,vocab_dict) print("---Preprocessing Time for Corpus loading: %s seconds ---" % (time.time() - start_time)) start_time1 = time.time() #creates a dictionary of probable words createProbableWordDict(bi_dict,tri_dict,quad_dict,prob_dict,vocab_dict,token_len) #sort the dictionary of probable words sortProbWordDict(prob_dict) # writeProbWords(prob_dict) gc.collect() print("---Preprocessing Time for Creating Probable Word Dict: %s seconds ---" % (time.time() - start_time1)) """" """ if __name__ == '__main__': main() """For Debugging Purpose OnlyUncomment the above two cells and ignore running the cells below if not debugging#variable declaration tri_dict = defaultdict(int) #for keeping count of sentences of three words quad_dict = defaultdict(int) #for keeping count of sentences of three words vocab_dict = defaultdict(int) #for storing the different words with their frequencies prob_dict = OrderedDict() #for storing the probabilities of probable words for a sentence bi_dict = defaultdict(int) #load the corpus for the dataset token_len = loadCorpus('corpusfile.txt',bi_dict,tri_dict,quad_dict,vocab_dict) print("---Preprocessing Time for Corpus loading: %s seconds ---" % (time.time() - start_time)) start_time1 = time.time() #creates a dictionary of probable words createProbableWordDict(bi_dict,tri_dict,quad_dict,prob_dict,vocab_dict,token_len) #sort the dictionary of probable words sortProbWordDict(prob_dict) # writeProbWords(prob_dict) gc.collect() print("---Preprocessing Time for Creating Probable Word Dict: %s seconds ---" % (time.time() - start_time1)) sen = takeInput() start_time2 = time.time() prediction = doPrediction(sen,prob_dict) print("Word Prediction:",prediction) print("---Time for Prediction Operation: %s seconds ---" % (time.time() - start_time2))Enter the string emma by jane Word Prediction: austen ---Time for Prediction Operation: 0.0007853507995605469 seconds ---Final projectThe Allen–Cahn equation (after and ) is a reaction–diffusion equation of mathematical physics which describes the process of phase separation in multi-component alloy systems, including order-disorder transitions.The equation describes the time evolution of a scalar-valued state variable $\eta$ on a domain $\Omega=[0,1]$ during a time interval $[0,T]$, and is given (in one dimension) by:$$\frac{\partial \eta}{\partial t} - \varepsilon^2 \eta'' + f'(\eta) = 0, \qquad \eta'(0, t) = \eta'(1, t) = 0,\qquad\eta(x,0) = \eta_0(x)$$where $f$ is a double-well potential, $\eta_0$ is the initial condition, and $\varepsilon$ is the characteristic width of the phase transition.This equation is the L2 gradient flow of the Ginzburg–Landau free energy functional, and it is closely related to the Cahn–Hilliard equation.A typical example of double well potential is given by the following function$$f(\eta) = \eta^2(\eta-1)^2$$which has two minima in $0$ and $1$ (the two wells, where its value is zero), one local maximum in $0.5$, and it is always greater or equal than zero.The two minima above behave like "attractors" for the phase $\eta$. Think of a solid-liquid phase transition (say water+ice) occupying the region $[0,1]$. When $\eta = 0$, then the material is liquid, while when $\eta = 1$ the material is solid (or viceversa).Any other value for $\eta$ is *unstable*, and the equation will pull that region towards either $0$ or $1$.Discretisation of this problem can be done by finite difference in time. For example, a fully explicity discretisation in time would lead to the following algorithm.We split the interval $[0,T]$ in `n_steps` intervals, of dimension `dt = T/n_steps`. Given the solution at time `t[k] = k*dt`, it i possible to compute the next solution at time `t[k+1]` as$$\eta_{k+1} = \eta_{k} + \Delta t \varepsilon^2 \eta_k'' - \Delta t f'(\eta_k)$$Such a solution will not be stable. A possible remedy that improves the stability of the problem, is to treat the linear term $\Delta t \varepsilon^2 \eta_k''$ implicitly, and keep the term $-f'(\eta_k)$ explicit, that is:$$\eta_{k+1} - \Delta t \varepsilon^2 \eta_k'' = \eta_{k} - \Delta t f'(\eta_k)$$Grouping together the terms on the right hand side, this problem is identical to the one we solved in the python notebook number 9, with the exception of the constant $\Delta t \varepsilon^2$ in front the stiffness matrix.In particular, given a set of basis functions $v_i$, representing $\eta = \eta^j v_j$ (sum is implied), we can solve the problem using finite elements by computing$$\big((v_i, v_j) + \Delta t \varepsilon^2 (v_i', v_j')\big) \eta^j_{k+1} = \big((v_i, v_j) \eta^j_{k} - \Delta t (v_i, f'(\eta_k)\big)$$where a sum is implied over $j$ on both the left hand side and the right hand side. Let us remark that while writing this last version of the equation we moved from a forward Euler scheme to a backward Euler scheme for the second spatial derivative term: that is, we used $\eta^j_{k+1}$ instead of $\eta^j_{k}$. This results in a linear system$$A x = b$$where $$A_{ij} = M_{ij}+ \Delta t \varepsilon^2 K_{ij} = \big((v_i, v_j) + \Delta t \varepsilon^2 (v_i', v_j')\big) $$and $$b_i = M_{ij} \big(\eta_k^j - \Delta t f'(\eta_k^j)\big)$$where we simplified the integration on the right hand side, by computing the integral of the interpolation of $f'(\eta)$. Step 1Write a finite element solver, to solve one step of the problem above, given the solution at the previous time step, using the same techniques used in notebook number 9.In particular:1. Write a function that takes in input a vector representing $\eta$, an returns a vector containing $f'(\eta)$. Call this function `F`.2. Write a function that takes in input a vector of support points of dimension `ndofs` and the degree `degree` of the polynomial basis, and returns a list of basis functions (piecewise polynomial objects of type `PPoly`) of dimension `ndofs`, representing the interpolatory spline basis of degree `degree`3. Write a function that, given a piecewise polynomial object of type `PPoly` and a number `n_gauss_quadrature_points`, computes the vector of global_quadrature_points and global_quadrature_weights, that contains replicas of a Gauss quadrature formula with `n_gauss_quadrature_points` on each of the intervals defined by `unique(PPoly.x)`4. Write a function that, given the basis and the quadrature points and weights, returns the two matrices $M$ and $K$ Step 2Solve the Allen-Cahan equation on the interval $[0,1]$, from time $t=0$ and time $t=1$, given a time step `dt`, a number of degrees of freedom `ndofs`, and a polynomial degree `k`.1. Write a function that takes the initial value of $\eta_0$ as a function, eps, dt, ndofs, and degree, and returns a matrix of dimension `(int(T/dt), ndofs)` containing all the coefficients $\eta_k^i$ representing the solution, and the set of basis functions used to compute the solution2. Write a function that takes all the solutions `eta`, the basis functions, a stride number `s`, and a resolution `res`, and plots on a single plot the solutions $\eta_0$, $\eta_s$, $\eta_{2s}$, computed on `res` equispaced points between zero and one Step 3Solve the problem for all combinations of1. eps = [01, .001]2. ndofs = [16, 32, 64, 128]3. degree = [1, 2, 3]3. dt = [.25, .125, .0625, .03125, .015625]with $\eta_0 = \sin(2 \pi x)+1$.Plot the final solution at $t=1$ in all cases. What do you observe? What happens when you increase ndofs and keep dt constant? Step 4 (Optional)Instead of solving the problem explicitly, solve it implicitly, by using backward euler method also for the non linear term. This requires the solution of a Nonlinear problem at every step. Use scipy and numpy methods to solve the non linear iteration. Project implementationFirst we import the needed python libraries%pylab inline import sympy as sym import scipy from scipy.interpolate import * from scipy.integrate import * %matplotlib inline from matplotlib import cm import matplotlib.pyplot from IPython.display import Image from IPython.display import display, clear_output import timePopulating the interactive namespace from numpy and matplotlib*The Allen–Cahn equation $\frac{\partial \eta}{\partial t} - \varepsilon^2 \eta'' + f'(\eta) = 0$ is a reaction–diffusion equation which describes the process of phase separation in multi-component alloy systems, including order-disorder transitions.**The equation describes the time evolution of a scalar-valued state variable $\eta$ on a domain $\Omega=[0,1]$ during a time interval $[0,T]$. * *The double well potential is given by the following function*$$f(\eta) = \eta^2(\eta-1)^2$$Before to start, let's represent graphically this function over it's interval of definitiondef f(eta): # This is the double well potential function return eta**2*(eta-1)**2To represent f over it's whole interval of definition, we split the interval $\Omega=[0,1]$ in `n_space` intervals, of dimension `dl = T/n_space`.n_space=500 Omega=linspace(0,1,n_space) dl=1/n_space _ = plot(Omega,f(Omega)) _ = plt.ylabel(r"$f ( \eta )$", fontsize = 12) _ = plt.xlabel(r"$\eta \in \Omega$", fontsize = 12)As announced, $f$ "*has two minima in $0$ and $1$ (the two wells, where its value is zero), one local maximum in $0.5$, and it is always greater or equal than zero.*" In preparation of the exercise to be done, we define and split the interval $[0,T]$ in `n_steps` intervals, of dimension `dt = T/n_steps`.T=1 n_steps=100 dt=T/n_steps Time=linspace(0,T,n_steps)Step 1*Write a finite element solver, to solve one step of the problem above, given the solution at the previous time step* 1.1 Double potential derivative*Write a function that takes in input a vector representing $\eta$, an returns a vector containing $f'(\eta)$. Call this function `F`.*Given the definition of the double well potential $f(\eta) = \eta^2(\eta-1)^2$, it's derivative is the following function:$$f'(\eta) = F(\eta)= \frac{d(\eta^2)} {d \eta} \times(\eta-1)^2+ \eta^2\times\frac{d (\eta-1)^2}{d\eta}\\=2\eta\times(\eta-1)^2 + \eta^2\times2(\eta-1)$$def F(eta): # derivative of the double well potential function return 2*eta*(eta-1)**2+eta**2*2*(eta-1) _ = plot(Omega,F(Omega)) _ = plt.ylabel(r"$F(\eta) = f'(\eta) $", fontsize = 12) _ = plt.xlabel(r"$\eta \in \Omega$", fontsize = 12)1.2. Basis functions*Write a function that takes in input a vector of support points of dimension `ndofs` and the degree `degree` of the polynomial basis, and returns a list of basis functions (piecewise polynomial objects of type `PPoly`) of dimension `ndofs`, representing the interpolatory spline basis of degree `degree`.*def compute_basis_functions(support_points, degree): # Insert here what was in notebook 9 basis = [] dbasis = [] for i in range(len(support_points)): c = support_points*0 # c has same shape as support_points but is null c[i] = 1 # c is null everywhere except in one of the support_points bi = PPoly.from_spline(splrep(support_points,c,k=degree)) basis.append(bi)# append base basis function to basis matrix return basis*The basis functions are constructed from the spline interpolation by computing the piecewise interpolation of a function that has value one in one of the `support_points` and zero in all other support points.*There are as many basis functions as support points, that is, `ndofs`. They define a basis for a piecewise polynomial space of dimension `ndofs`.ndofs=16 degree=2Lets visualize this basis functions and use them to plot a piecewise polynomial interpolation of $f$ in $\Omega$support_points=linspace(0,1,ndofs) BasisFunctions=compute_basis_functions(support_points,degree) OmegaBasisMat=zeros((n_space,ndofs)) for i in range(len(support_points)): OmegaBasisMat[:,i]=BasisFunctions[i](Omega) fig, ax = plt.subplots(nrows=1, ncols=2,figsize=(10,4)) ylim(-0.01,max(f(Omega))) for i in range(ndofs): BasisFunc=BasisFunctions[i] ax[0].plot(Omega,BasisFunc(Omega),color=cm.gist_rainbow(i/ndofs)) # plot this basis function ax[1].plot(Omega,OmegaBasisMat[:,i:i+1].dot(f(support_points[i:i+1])),color=cm.gist_rainbow(i/ndofs)) # plot this basis function ax[1].scatter(support_points[i],f(support_points)[i:i+1],color=cm.gist_rainbow(i/ndofs)) ax[1].plot(support_points[:i+1],f(support_points)[:i+1],'k',dashes=[2,2,2,2]) clear_output() display(fig) time.sleep(2.0/float(ndofs)) clear_output() display(fig) time.sleep(0.5) _ = ax[1].plot(Omega,f(Omega),'k',alpha=0.8) clear_output() display(fig) clear_output()....... 1.3. Global quadrature*Write a function that, given a piecewise polynomial object of type `PPoly` and a number `n_gauss_quadrature_points`, computes the vector of global_quadrature_points and global_quadrature_weights, that contains replicas of a Gauss quadrature formula with `n_gauss_quadrature_points` on each of the intervals defined by `unique(PPoly.x)`* Gauss quadrature uses the function values evaluated at the `n_gauss_quadrature_points` and corresponding weights to approximate the integral by a weighted sum. Gauss quadrature deals with integration for $x \in [-1,1]$, consequently we have to rescale the points and weights to work from zero to one.# Step 1.3 def compute_global_quadrature(basis, n_gauss_quadrature_points): # Create a Gauss quadrature formula with n_gauss_quadrature_points, # extract the intervals from basis (i.e., unique(basis.x)), and # create len(x)-1 shifted # and scaled Gauss quadrature formulas # that can be used to integrate on each interval. # Put all of these # together, and return the result # The intervals are stored as `x` (with some repeated entries) # in the `PPoly` object. Thats why we use unique() to make sure # that every interval border is taken only once intervals = unique(basis[0].x) # and make sure we can integrate exactly the product # of two basis functions qp, w = numpy.polynomial.legendre.leggauss(n_gauss_quadrature_points) # Rescale the points and weights to work from zero to one qp = (qp+1)/2 w /= 2 # Now replicate these points and weights in all the intervals h = diff(intervals) # 1st order discrete difference Q = array([intervals[i]+h[i]*qp for i in range(len(h))]).reshape((-1,)) W = array([w*h[i] for i in range(len(h))]).reshape((-1,)) # return global_quadrature, global_weights return Q,W Q,W= compute_global_quadrature(BasisFunctions,degree +1) plot(Q,W,'k',dashes=[2,2,2,2],alpha=0.3) plot(Q,W,'xk') xlabel("global quadrature points") ylabel("global weights")1.4 Compute M and K*Write a function that, given the basis and the quadrature points and weights, returns the two matrices $M$ and $K$ *# Step 1.4 def compute_system_matrices(basis, global_quadrature, global_weights): # Compute the matrices M_ij = (v_i, v_j) and K_ij = (v_i', v_j') and return them #compute the 1st derivative of the basis functions dbasis = [] nB=len(basis) for i in range(nB): dbasis.append( basis[i].derivative(1) ) Bq = array([basis[i](global_quadrature) for i in range(nB)]).T dBq = array([dbasis[i](global_quadrature) for i in range(nB)]).T M = einsum('qi, q, qj', Bq, global_weights, Bq) K = einsum('qi, q, qj', dBq, global_weights, dBq) # return M, Kn-th order discrete difference return M,K M,K=compute_system_matrices(BasisFunctions, Q,W) shape(support_points) fig,ax=plt.subplots(nrows=1,ncols=2,figsize=(10,4)) ax[0].imshow(M,cm.jet,vmin=0,vmax=0.02) ax[0].set_title("M matrix") ax[1].imshow(K,cm.jet,vmin=0,vmax=2) ax[1].set_title("K matrix")Step 2Solve the Allen-Cahan equation on the interval $[0,1]$, from time $t=0$ and time $t=1$, given a time step `dt`, a number of degrees of freedom `ndofs`, and a polynomial degree `k`.1. Write a function that takes the initial value of $\eta_0$ as a function, eps, dt, ndofs, and degree, and returns a matrix of dimension `(int(T/dt), ndofs)` containing all the coefficients $\eta_k^i$ representing the solution, and the set of basis functions used to compute the solution# Step 2.1 def solve_allen_cahan(eta_0_function, eps, dt, ndofs, degree): # put together all the above functions, loop over time, and produce # the result matrix eta, containing the solution at all points support_points=linspace(0,1,ndofs) basis=compute_basis_functions(support_points, degree) Q,W= compute_global_quadrature(basis,degree +1) M,K=compute_system_matrices(basis, Q,W) #A_{ij} = M_{ij}+ \Delta t \varepsilon^2 K_{ij} A = M + dt * eps**2 * K ntime=int(1.0/dt) eta=zeros((ntime, ndofs),dtype=float) # store initial values in eta eta[0,:]=eta_0_function(support_points) for k in range(ntime-1): # b_i = M_{ij} (eta_k^j - Delta_t f'(eta_k^j) ) b = M.dot( eta[k] - dt *F(eta[k]) ) # A.eta_k+1 = b eta[k+1,:]=np.linalg.solve(A,b) return eta, basis2.2. plot the solutions $\eta_0$, $\eta_s$, $\eta_{2s}$Write a function that takes all the solutions `eta`, the basis functions, a stride number `s`, and a resolution `res`, and plots on a single plot the solutions $\eta_0$, $\eta_s$, $\eta_{2s}$, computed on `res` equispaced points between zero and one# Step 2.2 def plot_solution(eta, basis, stride, resolution): x = linspace(0,1,resolution) BasisMatrix=np.zeros((resolution,len(basis))) for i in range(len(basis)): BasisMatrix[:,i]=basis[i](x) fig,ax=plt.subplots(figsize=(8,4)) for k in range(0,len(eta)):#,stride): label="t="+str(k/len(eta)) if( (k/len(eta))%0.1!=0):label='' ax.plot(x , eta[k,:].dot(BasisMatrix.T),color=cm.jet(k/len(eta)),label=label) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) clear_output() display(fig) time.sleep(2.0*1.0/float(len(eta))) ax.plot(x , eta[-1,:].dot(BasisMatrix.T),color='k',label="t=1",dashes=[2,2,2,2]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) display(fig) clear_output()Step 3*Solve the problem for all combinations of**1. eps = [01, .001]**2. ndofs = [16, 32, 64, 128]**3. degree = [1, 2, 3]**3. dt = [.25, .125, .0625, .03125, .015625]**with $\eta_0 = \sin(2 \pi x)+1$.**Plot the final solution at $t=1$ in all cases. What do you observe? What happens when you increase ndofs and keep dt constant? *def eta_0(x): return sin(2*pi*x)+1 eps=.001 degree=2 ndofs=64 dt=0.05 eta,basis=solve_allen_cahan(eta_0, eps, dt, ndofs, degree) stride=4 plot_solution(eta,basis,stride,200) plt.title("Degree="+str(degree)+" ndofs="+str(ndofs)+" Small eps = "+str(eps)+" and small dt= "+str(dt))......eps=.01 degree=3 ndofs=128 dt=0.25 eta,basis=solve_allen_cahan(eta_0, eps, dt, ndofs, degree) stride=1 plot_solution(eta,basis,stride,ndofs) plt.title("Degree="+str(degree)+" ndofs="+str(ndofs)+" Larger eps = "+str(eps)+" and Larger dt = "+str(dt))...... The plots below show the influence of the different parameters within the ranges requested. We can see the convergence of the approximated solution increases with the increase of the number of degrees of freedom and the reduction of the timestep. More precisely :- the **degree** is represented by the different versions of dashes on the plot below: - degree **1** corrisponds to the curves with small hashes, - degree **2** to the dong dashes, - degree **3** to the continuous lines. The continuous and dashed lines are almost always superposed, indicating that the degrees used are almost not influent on the result at t=1. A very light effect can be visible only for the smallest numbers of degree of freedom **ndofs** or for large timesteps **dt**. - the largest is the characteristic width of the phase transition **eps**, the smaller is the **dt** required to observe convergence of the approximated solution towards the exact solution . In facts for the smallest values of the phase transition $\varepsilon$, the convergence of the problem modeled in this exercise requires $dt \le 0.0625$, while for the largest values of $\varepsilon$ a timestep $dt\le 0.125$ is almost sufficient. Choosing too large **dt** the solution experiences oscillations.- increasing the number of degrees of freedom **ndofs** while keeping **dt** constant leads to smoother functions, removing some "local" variations of the solution respecto to the converged solution (specially for the smallest values of $\varepsilon$ that induce a sharper solution). But this doesn't change the global shape of the result, except for very large **dt** but for the largest **dt** the approximation is anyway far to be good.def plot_t1(eta, basis, stride, resolution,ax,color,dashes): x = linspace(0,1,resolution) BasisMatrix=np.zeros((resolution,len(basis))) for i in range(len(basis)): BasisMatrix[:,i]=basis[i](x) if(dashes!=[1,0,1,0]):label='' ax.plot(x , eta[-1,:].dot(BasisMatrix.T),color=color,dashes=dashes) #ax.legend(loc='lower right') #ax.set_title(title) ax.set_ylim(-1,1.5) epslist=[0.1,0.04,0.01,.001] deglist=[1,2,3] ndofslist=[16, 32, 64, 128] dtlist=[.25, .125, .0625, .03125, .015625] fig,ax=plt.subplots(nrows=5,ncols=4,figsize=(20,20)) for col,ndofs in enumerate(ndofslist): for row,dt in enumerate(dtlist): ax[row,col].text(.5,.9," nodfs="+str(ndofs)+" dt="+str(dt), horizontalalignment='center', transform=ax[row,col].transAxes,fontsize=14) for eps,color in zip(epslist,cm.jet([0.1,.30,0.45,0.9])): ax[row,col].plot(0,0,color=color,label=" eps="+str(eps)) ax[row,col].legend(loc='lower right') for degree,dash in zip(deglist,([3,3,3,3],[6,4,6,4],[1,0,1,0])): eta,basis=solve_allen_cahan(eta_0, eps, dt, ndofs, degree) plot_t1(eta,basis,stride,200,ax[row,col],color,dash)Course: Computational Thinking for Governance Analytics Prof. , PhD * Visiting Professor of Computational Policy at Evans School of Public Policy and Governance, and eScience Institute Senior Data Science Fellow, University of Washington.* Professor of Government and Political Methodology, Pontificia Universidad Católica del Perú. _____ Data Preprocessing in Python: Data Integration and Reshaping By now, we know how to collect data from different places, and clean/format them as needed. While the cleaning and formatting is done for each data source, we finally need to integrate all the sources into one to start the real analytical work. Let's get the data on [democracy index](https://en.wikipedia.org/wiki/Democracy_Index) by _The Economist_:# importing pandas for the notebook import pandas as pd #link for the tables demoLink="https://en.wikipedia.org/wiki/Democracy_Index" # fetching the tables demodata=pd.read_html(demoLink,header=0,flavor="bs4",attrs={'class':"wikitable sortable"})You should remember by now that **demodata** is a _list_ of all the sortable tables from that URL. Let me recover the one we want:demoVars=demodata[4].copy()In this last session on Python, we want to prepare data on the world countries and their level of democracy. I will add some variables from other tables to this one later. Take a look at what you have now:# first rows demoVars.head()Let's start by getting rid of the columns we will not use:#column positions to drop whichToDrop=[0,1,5] #dropping and updating the data frame demoVars.drop(labels=demoVars.columns[whichToDrop],axis=1,inplace=True)Let's take a look at the columns names:# these are: demoVars.columns #demoVars.drop(labels=demoVars.columns[0],axis=1, inplace=True) #demoVars.drop(labels=demoVars.columns[0],axis=1, inplace=True) #demoVars.drop(labels=demoVars.columns[3],axis=1, inplace=True)The previous result shows nothing relevant, but the pandas data frame shows _unneeded dashes_ in the column names. Let's **check the presence of special characters**:# this is a column name with dashes: import re re.sub("\s","",demoVars.columns[5])The result shows you some **hidden characters**. Let's use that info with the pandas replace:# using pandas own replace function demoVars.columns=demoVars.columns.str.replace("\s|\xad","",regex=True) # current colum names demoVars.columnsLet's clean the data contents. Notice that in the website some labels that are not needed are present in this case. Let's check the frequency table of "Regime Type" to try to identify the wrong labels that are affecting the data frame to delete those *rows*:# frequency table using "value_counts" demoVars['Regimetype'].value_counts() # these are the wrong ones: demoVars['Regimetype'].value_counts().index[4:] #saving the wrong ones: byeValues=demoVars['Regimetype'].value_counts().index[4:]Now that we know which ones are not needed, we can **filter** the data frame **rows** using pandas' **isnin**:# use "~" to request the "opposite" rows demoVars = demoVars[~demoVars['Regimetype'].isin(byeValues)] # data frame after demoVarsNotice that the index **0** has dissapeared, and that even though you have 167 countries now, the last one has index **170**. When you filter rows that will happen; so it is better to **reset the indexes of the data frame**:# new indexes will appear demoVars.reset_index(drop=True,inplace=True)Let's save the file as in CSV format:demoVars.to_csv("demoVars.csv",index=False)Exercise 1:- Go to this link: [https://en.wikipedia.org/wiki/List_of_ISO_3166_country_codes](https://en.wikipedia.org/wiki/List_of_ISO_3166_country_codes).- Scrap the table of countries, and keep country names, **ISO codes** and **Internet codes**.- Prepare a CSV file with the clean and formatted data, and upload that file into GitHub.- Read the file from Github and add the ISO codes and internet codes to the table of democracy index. Integrating Data Frames Integrating data sets needs the following considerations:* Merging is done on two data frames.* You need a column in each data frame with that share the same exact and unique values. The column names or titles need not be the same.* The merged table shows by default the mutual coincidences; but you can also request the values not matched, which will help you detect possible extra cleaning.* Pandas differentiates the **left** from the **right** data frames. Let me bring the new data frame with ISO codes and internet codes:# link to the data in CSV format linkDataIso='https://github.com/EvansDataScience/CTforGA_integrating/raw/main/isodata.csv' # using 'read_csv' with a link DataIso=pd.read_csv(linkDataIso) #Check the current names: DataIso.columns* **Option one**(default): merge only the coincidences:# notice the key column are country names, but the column names are different: allData=demoVars.merge(DataIso,left_on="Country",right_on="Countryname")Once the merged has taken place, check the differences:# number of rows (countries) before merge demoVars.shape # number of rows after merge allData.shapeThe previous merge has **lost several rows** (countries). * **Option two**: you want to keep all the rows, matched and unmatched, from **both** data frames:# merge 'outer' keeps all matched and unmatched # merge 'indicator' will tell you if the key was found in 'both' or only in one of them allData=demoVars.merge(DataIso,left_on="Country",right_on="Countryname",how='outer',indicator='True')The _allData_ dataframe has now a column named 'True', the **indicator**:allData.columns # explore matched and unmatched counts: allData['True'].value_counts()Our main task is to add columns to the Democracy Index data. So these are the countries from The Economist that did not find a match:allData[allData['True']=='left_only'].CountryThose names should be here, but written differently:allData[allData['True']=='right_only'].CountrynameThe next steps might seem a little laborious, combining computational thinking adn finally some brute force. The first tool we need is another library allows fuzzy merging: [TheFuzz](https://github.com/seatgeek/thefuzz).The library has two functions of interest:_extractOne_ and _extract_. Let me show you how they work:# The countries unmatched UnmatchedLeft=allData[allData['True']=='left_only'].Country.to_list() UnmatchedRight=allData[allData['True']=='right_only'].Countryname.to_list()This is a country name fro the economist that did not get a match in the ISO data:UnmatchedLeft[0]Let's find the best match from the ISO data:from thefuzz import process process.extractOne(UnmatchedLeft[0], UnmatchedRight) !pip install thefuzzCollecting thefuzz Downloading thefuzz-0.19.0-py2.py3-none-any.whl (17 kB) Installing collected packages: thefuzz Successfully installed thefuzz-0.19.0Let's find the three best matches from the ISO data:import thefuzz process.extract(UnmatchedLeft[0], UnmatchedRight,limit=3)These operations will not be done blindly. We will need to see what matches are done:UnmatchedLeft=allData[allData['True']=='left_only'].Country.to_list() UnmatchedLeft [(left, process.extractOne(left, UnmatchedRight)) for left in sorted(UnmatchedLeft)]These are the totally wrong matches:# this is a list of tuples: TotallyWrong=[('Czech Republic', ('Central African Republic (the)', 86)), ('Laos', ('Barbados', 68)), ('Republic of the Congo', ('Bahamas (the)', 86)), ('South Korea', ("Korea (the Democratic People's Republic of)", 86)), ('Sudan', ('South Sudan', 90)), ('United States', ('United States Minor Outlying Islands (the)', 90))]Let me make a list with all the names in the democracy index countries that must be omitted currently:omitLeft=[leftName for (leftName,rightFuzzy) in TotallyWrong] #parenthesis not needed omitLeftCreate a dictionary with the changes that will be done:# process.extractOne(left, UnmatchedRight)[0] IS JUST THE COUNTRY NAME TO THE RIGHT # [0] just took first element in tuple. # left for left in UnmatchedLeft IS JUST EVERY COUNTRY NAME TO THE LEFT # if left not in omitLeft IS A CONDITION {process.extractOne(left, UnmatchedRight)[0]:left for left in UnmatchedLeft if left not in omitLeft}Let's save the dict above and use it to replace:changesRight={process.extractOne(left, UnmatchedRight)[0]:left for left in UnmatchedLeft if left not in omitLeft} DataIso.Countryname.replace(changesRight,inplace=True)DataIso has new country names. Let's prepare manually the remaining changes:DataIso.Countryname.to_list() # dict of manual changes bruteForceChanges={'Korea (the Republic of)':'South Korea', 'United States of America (the)':'United States', 'Czechia':'Czech Republic', 'Congo (the)':'Republic of the Congo', 'Sudan (the)':'Sudan', "Lao People's Democratic Republic (the)":'Laos'} # replacing DataIso.Countryname.replace(bruteForceChanges,inplace=True)Let's redo the merging with full coincidences (option 1):# redoing merge allData=demoVars.merge(DataIso,left_on="Country",right_on="Countryname") # current dimension allData.shapeHere, we could drop the columns that are not needed:allData.drop(["Countryname"],axis=1,inplace=True) allDataLet's save this merged data frame in CSV format:allData.to_csv('allData.csv',index=False)Exercise 2:- Go to this link: [https://en.wikipedia.org/wiki/List_of_countries_and_dependencies_by_population](https://en.wikipedia.org/wiki/List_of_countries_and_dependencies_by_population).- Scrap the table of countries, and keep country names, and region.- Make sure the country names match exactly the ones from the table of democracy index.- Merge this table with the allData data frame (just add the region)- Prepare a CSV file with the clean, formatted and merged data, and upload that file into GitHub.#link for the tables ex2Link="https://en.wikipedia.org/wiki/List_of_countries_and_dependencies_by_population" # fetching the tables ex2data=pd.read_html(ex2Link,header=0,flavor="bs4",attrs={'class':"wikitable sortable"}) ex2vars = ex2data[0].copy() ex2vars.head() #column positions to drop ex2WhichToDrop = [0,3,4,5,6,7] #Dropping unnecessary columns and updating df ex2vars.drop(labels=ex2vars.columns[ex2WhichToDrop], axis=1, inplace=True) ex2vars.columns #Cleaning Column Names import re #re.sub("\s", "", ex2vars.columns[0]) ex2vars.columns=ex2vars.columns.str.replace("\s|/","",regex=True) ex2vars.columns #cleaning data contents ex2vars['Region'].value_counts() #should we drop world? removeVals= ex2vars['Region'].value_counts().index[5] removeVals removeValsList=[removeVals] removeValsList # use "~" to request the "opposite" rows ex2vars = ex2vars[~ex2vars['Region'].isin(removeValsList)] # data frame after ex2vars ex2vars.reset_index(drop=True,inplace=True) ex2vars.shape ex2vars.to_csv("ex2vars.csv", index=False)Now to Merge with AllData Frame RESHAPING Wide and Long formatLet's open **allDataFull**, whose structure has the **WIDE** format. In this format, the variables are in every column, the most traditional one for spreadsheet users.allDataFull=pd.read_csv('https://github.com/EvansDataScience/CTforGA_integrating/raw/main/allDataFull.csv') allDataFullThis is the final dataframe, let's check the data types, in case they need formatting:allDataFull.info() RangeIndex: 167 entries, 0 to 166 Data columns (total 12 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Country 167 non-null object 1 Regimetype 167 non-null object 2 Overallscore 167 non-null float64 3 Electoralprocessandpluralism 167 non-null float64 4 Functioningofgovernment 167 non-null float64 5 Politicalparticipation 167 non-null float64 6 Politicalculture 167 non-null float64 7 Civilliberties 167 non-null float64 8 InternetccTLD 167 non-null object 9 iso2 166 non-null object 10 iso3 167 non-null object 11 Region 167 non-null object dtypes: float64(6), object(6) memory usage: 15.8+ KBWe should turn the texts of the regime type column into ordinal levels:from pandas.api.types import CategoricalDtype # create data type - notice ascending order of levels levelsDemo = CategoricalDtype(categories=['Authoritarian', 'Hybrid regime', 'Flawed democracy', 'Full democracy'], ordered=True) # make the change: allDataFull.Regimetype=allDataFull.Regimetype.astype(levelsDemo) # see result: allDataFull.RegimetypeOnce you have the right data types, you can request stats:allDataFull.describe(include='all') # try? # allDataFull.describe()Let's see turn it into a **LONG** format:allDataFull_LONG=allDataFull.melt(id_vars=['Country','Region',"Regimetype",'iso3','iso2','InternetccTLD']) allDataFull_LONGThe amount of of rows multiplies, but **some** codes are easier to implement with long format (or can only be implemented in that format), like plotting:import plotnine as p9 base=p9.ggplot(data=allDataFull_LONG) base + p9.geom_boxplot(p9.aes(x='variable',y='value')) + p9.coord_flip()Using the original format (wide), you could only ploy one bar using the previous *grammar*:base=p9.ggplot(data=allDataFull) base + p9.geom_boxplot(p9.aes(x=1,y='Overallscore')) + p9.coord_flip()Nonetheless, there are packages in Python that use the wide format for plotting (i.e. _seaborn_):import seaborn as sns whatVars= ['Overallscore', 'Electoralprocessandpluralism', 'Functioningofgovernment', 'Politicalparticipation', 'Politicalculture', 'Civilliberties'] sns.boxplot(data=allDataFull[whatVars], orient="h",color='white')AggregatingSometimes you need to aggregate your original data frame, that is, aggregate using a particular function some columns by a particular group . For example, if we wanted the average of the overall score of democracy by region, and get the result as a data frame:allDataFull.groupby("Region")[['Overallscore']].agg('mean').reset_index()What if we wanted the average of the overall score of democracy by region and by regime type, and get the result as a data frame:allDataFull.groupby(["Region","Regimetype"])[['Overallscore']].agg('mean').reset_index()Notice the if you do not use _reset_index()_, you will not get the groups as columns, but as row indexes:allDataFull.groupby(["Region","Regimetype"])[['Overallscore']].agg('mean')You can also request more aggregative functions:allDataFull.groupby(["Region","Regimetype"])[['Overallscore']].agg(['min','max']).reset_index()And of course, more variables to aggregate:severalVars=['Electoralprocessandpluralism','Functioningofgovernment', 'Politicalparticipation'] allDataFull.groupby(["Region","Regimetype"])[severalVars].agg(['min','max']).reset_index()The previous column names look more complex:multi=allDataFull.groupby(["Region","Regimetype"])[severalVars].agg(['min','max']).reset_index() multi.columnsThe aggregation process can produce **multiIndex**. Most of the time you want to flatten the multiIndex. We could use some basic string operations for that:# concatenating: [(col[0]+'_'+col[1]) for col in multi.columns ] # concatenating if secind element is not '': [(col[0]+'_'+col[1]) if col[1]!='' else col[0] for col in multi.columns ] # same as before, but using 'join' function: ['_'.join(col).strip() if col[1]!='' else col[0] for col in multi.columns ]I will save the new column names:multi.columns= ['_'.join(col).strip() if col[1]!='' else col[0] for col in multi.columns ]This is the current look:multiThe current result can also be used when melting to a long format:multi.melt(id_vars=['Region','Regimetype'])____ SAVING FILEThe current _allDataFull_ data frame is clean and formatted. We have used CSV files so far:allDataFull.to_csv("allDataFull_OK.csv",index=False)When we read back the file you may notice something wrong:allDataFull_OK=pd.read_csv("allDataFull_OK.csv") allDataFull_OK.info() RangeIndex: 167 entries, 0 to 166 Data columns (total 12 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Country 167 non-null object 1 Regimetype 167 non-null object 2 Overallscore 167 non-null float64 3 Electoralprocessandpluralism 167 non-null float64 4 Functioningofgovernment 167 non-null float64 5 Politicalparticipation 167 non-null float64 6 Politicalculture 167 non-null float64 7 Civilliberties 167 non-null float64 8 InternetccTLD 167 non-null object 9 iso2 166 non-null object 10 iso3 167 non-null object 11 Region 167 non-null object dtypes: float64(6), object(6) memory usage: 15.8+ KBWe need a file that avoids we have to reformat the columns again and again. Let's see the choices: For future use in Python:allDataFull.to_pickle("allDataFull_OK.pkl") # you will need: DF=pd.read_pickle("allDataFull.pkl") # or: # from urllib.request import urlopen # DF=pd.read_pickle(urlopen("https://..../allDataFull.pkl"),compression=None) allDataFull_OK=pd.read_pickle("allDataFull_OK.pkl") allDataFull_OK.info() allDataFull_OK.RegimetypeFor future use in R:from rpy2.robjects import pandas2ri pandas2ri.activate() from rpy2.robjects.packages import importr base = importr('base') base.saveRDS(allDataFull,file="allDataFull_OK.RDS") #In R, you call it with: DF = readRDS("allDataFull.RDS") #or, if iyou read from cloud: DF = readRDS(url("https://..../allDataFull.RDS")/Users/JoseManuel/anaconda3/envs/Evans_GovAnalytics/lib/python3.7/site-packages/rpy2/robjects/vectors.py:980: UserWarning: R object inheriting from "POSIXct" but without attribute "tzone".This notebook shows how to fit a Bayesian Gaussian mixture model using stochastic variational inference, with TensorFlow Probability (TFP) and TensorFlow 2.0.Let's load the packages we'll use. Note that you need the [TFP nightly build](https://www.tensorflow.org/probability/installnightly_builds) to use TFP with TF 2.0 (as of 2019-06-12).# Install packages !pip install tensorflow==2.0.0-beta0 -q !pip install --upgrade tfp-nightly -q # Imports import numpy as np import matplotlib.pyplot as plt import seaborn as sns import tensorflow as tf import tensorflow_probability as tfp tfd = tfp.distributions # Plot settings %config InlineBackend.figure_format = 'svg' # Random seed np.random.seed(12345) tf.random.set_seed(12345)DataLet's generate some points in 2D space, which form 3 clusters.# Generate some data N = 3000 X = np.random.randn(N, 2).astype('float32') X[:1000, :] += [2, 0] X[1000:2000, :] -= [2, 4] X[2000:, :] += [-2, 4] # Plot the data plt.plot(X[:, 0], X[:, 1], '.') plt.axis('equal') plt.show()The recommended way to feed data through a model in TF 2.0 is to use [the tf.data API](https://www.tensorflow.org/guide/datasets) (as opposed to the old `feed_dict` mechanism). So, let's create a TF Dataset which contains the data points from above.# Make a TensorFlow Dataset from that data batch_size = 500 dataset = tf.data.Dataset.from_tensor_slices( (X)).shuffle(10000).batch(batch_size)WARNING: Logging before flag parsing goes to stderr. W0613 04:56:29.335068 139962006239104 deprecation.py:323] From /usr/local/lib/python3.6/dist-packages/tensorflow/python/data/util/random_seed.py:58: add_dispatch_support..wrapper (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version. Instructions for updating: Use tf.where in 2.0, which has the same broadcast rule as np.whereMathLet's model the data-generating distribution with a Bayesian Gaussian mixture model. The model has $k \in \{1, ..., K\}$ mixture components - we'll use multivariate normal distributions. To match the data we generated, we'll use $K=3$ mixture components in $D=2$ dimensions.We'll use [stochastic variational inference](http://www.jmlr.org/papers/v14/hoffman13a.html) to fit the mixture model. Briefly, this means that to go from the non-Bayesian model to a variational Bayesian model, we'll replace each point parameter of our model with a probability distribution, called the "variational posterior". Then we'll optimize the variables of those variational posterior distributions to be as close as possible to the true posteriors. [See here](https://probflow.readthedocs.io/en/latest/math.html) for more info.Each of the 3 normal distributions has a mean ($\mu$). To probabilistically model the means, we'll use a normal distribution as the variational posterior for the mean of each component ($k$) in each dimension ($d$):$$\mu_{k,d} \sim \text{Normal} ( l_{k,d}, ~ s_{k,d} )$$where $l$ and $s$ are two variables which we will be trying to fit, corresponding to the mean and standard deviation of the component means' variational posteriors, respectively.Each of the 3 normal distributions also has a standard deviation ($\sigma$). We'll use a square-root inverse [Gamma distribution](https://en.wikipedia.org/wiki/Gamma_distribution) as the variational posterior for the standard deviation of each component in each dimension ([why?](https://probflow.readthedocs.io/en/latest/parameters.htmlscale-parameters)):$$\sigma_{k,d}^{-2} \sim \text{Gamma} ( \alpha_{k,d}, ~ \beta_{k,d} )$$where again $\alpha$ and $\beta$ are two variables we'll be trying to fit, corresponding to the shape and rate parameters of the Gamma variational posterior.Each of the 3 mixture components also has a weight, $\theta$, such that all 3 weights sum to 1. To model these weights, we'll use a [Dirichlet distribution](https://en.wikipedia.org/wiki/Dirichlet_distribution) as the variational posterior:$$\theta_k \sim \text{Dirichlet} ( \mathbf{c} )$$where $\mathbf{c}$ is a $K$-length vector of variables we'll be trying to fit, corresponding to the concentration parameters of the Dirichlet variational posterior. These concentration parameters determine how heavily to weight each mixture component.Finally, the probability of a data point being generated by any of the mixture components is modeled with a categorical distribution with observation probabilities according to $\theta$:$$Z_i \sim \text{Categorical} (\theta)$$and then the likelihood of a data point is determined by that mixture component:$$Y_i \sim \text{Normal} ( \mu_k, ~ \sigma_k )$$We'll put some priors on the parameters to regularize things a bit:$$\mu_{k,d} \sim \text{Normal} ( 0, 1 ) \\\sigma_{k,d}^{-2} \sim \text{Gamma} ( 5, 5 ) \\\theta_k \sim \text{Dirichlet} ([2, 2, 2])$$ ModelTo build this model in TensorFlow, we'll use the [model subclassing API](https://www.tensorflow.org/guide/kerasmodel_subclassing). This way we can build a class to represent the model, and it makes it easier to fit the model and access its variables.In the constructor (`__init__`), we'll create all the variables ($l, s, \alpha, \beta\,$ and $\mathbf{c}$) and define the priors.In the `call` method, we'll use TensorFlow to set up two computations. First, the log likelihood of each datapoint in the batch, given the model and the current value of the variational posterior's variables. And second, the sum of the Kullback–Leibler divergence between the variational posteriors and their priors.class GaussianMixtureModel(tf.keras.Model): """A Bayesian Gaussian mixture model. Assumes Gaussians' variances in each dimension are independent. Parameters ---------- Nc : int > 0 Number of mixture components. Nd : int > 0 Number of dimensions. """ def __init__(self, Nc, Nd): # Initialize super(GaussianMixtureModel, self).__init__() self.Nc = Nc self.Nd = Nd # Variational distribution variables for means self.locs = tf.Variable(tf.random.normal((Nc, Nd))) self.scales = tf.Variable(tf.pow(tf.random.gamma((Nc, Nd), 5, 5), -0.5)) # Variational distribution variables for standard deviations self.alpha = tf.Variable(tf.random.uniform((Nc, Nd), 4., 6.)) self.beta = tf.Variable(tf.random.uniform((Nc, Nd), 4., 6.)) # Variational distribution variables for component weights self.counts = tf.Variable(2*tf.ones((Nc,))) # Prior distributions for the means self.mu_prior = tfd.Normal(tf.zeros((Nc, Nd)), tf.ones((Nc, Nd))) # Prior distributions for the standard deviations self.sigma_prior = tfd.Gamma(5*tf.ones((Nc, Nd)), 5*tf.ones((Nc, Nd))) # Prior distributions for the component weights self.theta_prior = tfd.Dirichlet(2*tf.ones((Nc,))) def call(self, x, sampling=True, independent=True): """Compute losses given a batch of data. Parameters ---------- x : tf.Tensor A batch of data sampling : bool Whether to sample from the variational posterior distributions (if True, the default), or just use the mean of the variational distributions (if False). Returns ------- log_likelihoods : tf.Tensor Log likelihood for each sample kl_sum : tf.Tensor Sum of the KL divergences between the variational distributions and their priors """ # The variational distributions mu = tfd.Normal(self.locs, self.scales) sigma = tfd.Gamma(self.alpha, self.beta) theta = tfd.Dirichlet(self.counts) # Sample from the variational distributions if sampling: Nb = x.shape[0] #number of samples in the batch mu_sample = mu.sample(Nb) sigma_sample = tf.pow(sigma.sample(Nb), -0.5) theta_sample = theta.sample(Nb) else: mu_sample = tf.reshape(mu.mean(), (1, self.Nc, self.Nd)) sigma_sample = tf.pow(tf.reshape(sigma.mean(), (1, self.Nc, self.Nd)), -0.5) theta_sample = tf.reshape(theta.mean(), (1, self.Nc)) # The mixture density density = tfd.Mixture( cat=tfd.Categorical(probs=theta_sample), components=[ tfd.MultivariateNormalDiag(loc=mu_sample[:, i, :], scale_diag=sigma_sample[:, i, :]) for i in range(self.Nc)]) # Compute the mean log likelihood log_likelihoods = density.log_prob(x) # Compute the KL divergence sum mu_div = tf.reduce_sum(tfd.kl_divergence(mu, self.mu_prior)) sigma_div = tf.reduce_sum(tfd.kl_divergence(sigma, self.sigma_prior)) theta_div = tf.reduce_sum(tfd.kl_divergence(theta, self.theta_prior)) kl_sum = mu_div + sigma_div + theta_div # Return both losses return log_likelihoods, kl_sumNow, we can instantiate this model with $K=3$ mixture components in $D=2$ dimensions.# A GMM with 3 components in 2 dimensions model = GaussianMixtureModel(3, 2)We'll use the Adam optimizer:# Use the Adam optimizer optimizer = tf.keras.optimizers.Adam(lr=1e-3)Next we'll define a function which performs a training step: compute the gradients of the variables controlling the variational posterior distributions with respect to the loss, and use the optimizer to update those variables. The loss that we're using is the negative evidence lower bound (ELBO). Two factors contribute to the ELBO loss: the log likelihood of the data given the model with the current parameter values, and the KL divergence between the variational posterior distributions and their priors.Note that because we're using minibatches, we have to ensure the loss from these two sources are on the same scale! To get both terms on the same scale, we'll take the average log likelihood, but divide the sum of the divergences by the total number of data points in the dataset (*not* by the number of samples in the minibatch).@tf.function def train_step(data): with tf.GradientTape() as tape: log_likelihoods, kl_sum = model(data) elbo_loss = kl_sum/N - tf.reduce_mean(log_likelihoods) gradients = tape.gradient(elbo_loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables))Fitting the ModelNow that we've set everything up, we can fit the model. Using the `train_step` function we defined above, and having defined the TF Dataset, this is pretty simple:# Fit the model EPOCHS = 1000 for epoch in range(EPOCHS): for data in dataset: train_step(data)W0613 04:56:30.323180 139962006239104 deprecation.py:323] From /usr/local/lib/python3.6/dist-packages/tensorflow_probability/python/distributions/mixture.py:154: Categorical.event_size (from tensorflow_probability.python.distributions.categorical) is deprecated and will be removed after 2019-05-19. Instructions for updating: The `event_size` property is deprecated. Use `num_categories` instead. They have the same value, but `event_size` is misnamed.Inspecting the ModelWe can look at the fit mixture density, which matches the distribution of the data pretty well:# Compute log likelihood at each point on a grid Np = 100 #number of grid points Xp, Yp = np.meshgrid(np.linspace(-6, 6, Np), np.linspace(-6, 6, Np)) Pp = np.column_stack([Xp.flatten(), Yp.flatten()]).astype('float32') Z, _ = model(Pp, sampling=False) Z = np.reshape(Z, (Np, Np)) # Show the fit mixture density plt.imshow(np.exp(Z), extent=(-6, 6, -6, 6), origin='lower') cbar = plt.colorbar() cbar.ax.set_ylabel('Likelihood')We can also access the individual variables of the variational posterior distributions:model.locsAnd, because we used the model subclassing API, we can easily get a list of all the variables in the model:model.trainable_variablesWe can view the variational posterior distributions. For example, the standard deviation of the first mixture component in the first dimension:# Sample from the std deviation variational posterior stds = tf.pow(tfd.Gamma(model.alpha, model.beta).sample(10000), -0.5) # Plot the samples sns.distplot(stds[:, 0, 0])And also the variational joint distributions of the means of the first mixture component:# Sample from the mean variational posterior means = tfd.Normal(model.locs, model.scales).sample(10000) # Plot the mean samples for a single sns.kdeplot(means[:, 0, 0].numpy(), means[:, 0, 1].numpy(), n_levels=10)Selenium Web Scraping and Browsing Automationimport IPython from IPython.display import IFrame, Javascript, display # selenium-python docs url_selenium_python_docs = IFrame( src='https://selenium-python.readthedocs.io/', width='100%', height='600' ) display(url_selenium_python_docs) import selenium.webdriver dir(selenium.webdriver) # !which chromedriver geckodriver safaridriver operadriver msedgedriver #### Note: In your machine, locate where your drivers were installed from selenium.webdriver import Chrome, Firefox, Safari, Opera, Edge import time from selenium import webdriver def get_chrome(): return Chrome(executable_path='/usr/local/bin/chromedriver') def get_firefox(headless=False): if headless: options = webdriver.FirefoxOptions() options.add_argument('--headless') return Firefox(executable_path='/usr/local/opt/ruby/bin/geckodriver', options=options) return Firefox(executable_path='/usr/local/opt/ruby/bin/geckodriver') def get_safari(): return Safari(executable_path='/usr/bin/safaridriver') def get_opera(): return Opera(executable_path='/usr/local/bin/operadriver') def get_msedge(): return Edge(executable_path='/Users/rodelarenas/Desktop/DEV/edgedriver_mac64/msedgedriver') # Basic Navigation BASE_URL = 'https://google.com' # open a webdriver firefox = get_firefox() # maximize, minimize firefox.maximize_window() time.sleep(1) # browsing firefox.get(BASE_URL) time.sleep(2) firefox.get('https://www.gmanmi.com') time.sleep(2) # F / B firefox.back() time.sleep(2) firefox.forward() time.sleep(5) # firefox.close() firefox.quit() # Basis Interaction from selenium.webdriver.common.keys import Keys dir(Keys) BASE_URL = 'https://trends.google.com/trends/trendingsearches/daily?geo=PH' firefox = get_firefox() firefox.get(BASE_URL) time.sleep(.2) firefox.maximize_window() dataset = [] for _ in range(5): try: # pause time.sleep(2) # element should be visible from the screen before you apply your action/s firefox.execute_script('window.scrollBy(0, document.body.scrollHeight)', '') # need to scroll the page to find the element # locate the element load_more_class = 'feed-load-more-button' load_more_button = firefox.find_element_by_class_name(load_more_class) # do the interaction load_more_button.click() # get the feed wrapper feed_wrapper_class = 'feed-list-wrapper' feeds = firefox.find_elements_by_class_name(feed_wrapper_class) for fw in feeds: # dates dates_class = 'content-header-title' dates = fw.find_element_by_class_name(dates_class) feed_date = dates.text # titles titles_class = 'title' titles = fw.find_elements_by_class_name(titles_class) txt_titles = [t.text for t in titles] # summary texts summary_text_class = 'summary-text' summary_text = fw.find_elements_by_class_name(summary_text_class) txt_summaries = [s.text for s in summary_text] # sources source_time_class = 'source-and-time' sources = fw.find_elements_by_class_name(source_time_class) txt_sources = [sp.text for sp in sources] for idx, _ in enumerate(txt_titles): title = txt_titles[idx] summary_text = txt_summaries[idx] sources_text = txt_sources[idx] sources_text = sources_text.split() source_page = sources_text[0] posted_date_time = sources_text[2] + ' ' + sources_text[3] data = { 'date': feed_date, 'title': title, 'summary_text': summary_text, 'source_page': source_page, 'posted_date_time': posted_date_time } dataset.append(data) except: pass import pandas as pd df = pd.DataFrame(dataset) df df.to_csv('scraped_data.csv', index=False)StableNet® WeatherMap Handling IntroductionThis script gives a simple example on how to Load a WeatherMap from StableNet® using the REST API Enhance the WeatherMap with advanced REST API calls to obtain alarms filtered for the selected element and add them as statistics "Re-Add" the WeatherMap to the server as enhanced version using the REST API once more.Import necessary python modulesimport warnings import requests from requests.auth import HTTPBasicAuth import getpass from xml.etree import ElementTreeEnter Server Credentials & WeatherMapID to be used as baseweathermapid = "1058" #Credentials server_ip = '10.20.20.113' server_port = '5443' username = 'infosim'Get Weather Map from Server and save XML to variablewarnings.filterwarnings("ignore") resp = requests.get("https://"+server_ip+":"+server_port+"/rest/weathermaps/get/" + weathermapid, verify=False, auth=HTTPBasicAuth(username, getpass.getpass('Enter password:'))) tree = ElementTree.fromstring(resp.content)Enter password: ··········Adding Alarms as Node Statistics to all Weather Map nodespw=getpass.getpass('Enter password:'); for child in tree.findall('weathermapnodes/weathermapnode'): erefID=child.find('elementreference').get('obid') erefDOMAIN=child.find('elementreference').get('domain') stats=child.find('statistics') filter = '' if erefDOMAIN=="device": filter='' if erefDOMAIN=="measurement": filter='' if erefDOMAIN=="link": filter='' filter=''+filter+'' print('[Adding alarms for '+erefDOMAIN+' element with ID ' + erefID+']', end='') resp=requests.post("https://"+server_ip+":"+server_port+"/rest/events/liveopenalarms", verify=False, auth=HTTPBasicAuth(username, pw), data=filter, headers={'Content-Type': 'application/xml'}) alarms = ElementTree.fromstring(resp.content) for openalarm in alarms: alarminfo = openalarm.find('rootcause').get('info') monitorid = openalarm.find('rootcause').get('monitorid') print('R', end='') # Create statistic entry newentry=ElementTree.SubElement(stats,'statistic',{'showaslabel': 'false', 'type': 'monitorvalue', 'title': '[ROOT CAUSE] '+alarminfo}) ElementTree.SubElement(newentry,'reference', {'obid': monitorid, 'domain': 'monitor'}) ElementTree.SubElement(newentry,'time', {'multiplier': '1440', 'type': 'lastminutes', 'timezone': 'Europe/Berlin', 'average': '60000'}) for symptom in openalarm.findall('symptoms/symptom'): alarminfo = symptom.get('info') monitorid = symptom.get('monitorid') print('S', end='') # Create statistic entry newentry=ElementTree.SubElement(stats,'statistic',{'showaslabel': 'false', 'type': 'monitorvalue', 'title': '[SYMPTOM] '+alarminfo}) ElementTree.SubElement(newentry,'reference', {'obid': monitorid, 'domain': 'monitor'}) ElementTree.SubElement(newentry,'time', {'multiplier': '1440', 'type': 'lastminutes', 'timezone': 'Europe/Berlin', 'average': '60000'}) print('') tree.set('name',tree.get('name')+' (Alarm Statistics)') finalMap = ElementTree.tostring(tree)Enter password: ··········Adding Extended Weather Map to server as new Weather Mapwarnings.filterwarnings("ignore") resp=requests.post("https://"+server_ip+":"+server_port+"/rest/weathermaps/add/", verify=False, auth=HTTPBasicAuth(username, getpass.getpass('Enter password:')), data=finalMap, headers={'Content-Type': 'application/xml'})Enter password: ··········Logistic Regression Logistic Regression configuration Parameters:* penalty{‘l1’, ‘l2’, ‘elasticnet’, ‘none’}, default=’l2’ - Used to specify the norm used in the penalization. The ‘newton-cg’, ‘sag’ and ‘lbfgs’ solvers support only l2 penalties. ‘elasticnet’ is only supported by the ‘saga’ solver. If ‘none’ (not supported by the liblinear solver), no regularization is applied. - New in version 0.19: l1 penalty with SAGA solver (allowing ‘multinomial’ + L1)* dualbool, default=False - Dual or primal formulation. Dual formulation is only implemented for l2 penalty with liblinear solver. Prefer dual=False when n_samples > n_features. * tolfloat, default=1e-4 - Tolerance for stopping criteria. * Cfloat, default=1.0 - Inverse of regularization strength; must be a positive float. Like in support vector machines, smaller values specify stronger regularization.* fit_intercept bool, default=True - Specifies if a constant (a.k.a. bias or intercept) should be added to the decision function. * intercept_scaling float, default=1 - Useful only when the solver ‘liblinear’ is used and self.fit_intercept is set to True. In this case, x becomes [x, self.intercept_scaling], i.e. a “synthetic” feature with constant value equal to intercept_scaling is appended to the instance vector. The intercept becomes intercept_scaling * synthetic_feature_weight. - Note! the synthetic feature weight is subject to l1/l2 regularization as all other features. To lessen the effect of regularization on synthetic feature weight (and therefore on the intercept) intercept_scaling has to be increased. * class_weight dict or ‘balanced’, default=None - Weights associated with classes in the form {class_label: weight}. If not given, all classes are supposed to have weight one. - The “balanced” mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as n_samples / (n_classes * np.bincount(y)). - Note that these weights will be multiplied with sample_weight (passed through the fit method) if sample_weight is specified. - New in version 0.17: class_weight=’balanced’ * random_state int, RandomState instance, default=None - Used when solver == ‘sag’, ‘saga’ or ‘liblinear’ to shuffle the data. See Glossary for details. * solver{‘newton-cg’, ‘lbfgs’, ‘liblinear’, ‘sag’, ‘saga’}, default=’lbfgs’ - Algorithm to use in the optimization problem. - For small datasets, ‘liblinear’ is a good choice, whereas ‘sag’ and ‘saga’ are faster for large ones. - For multiclass problems, only ‘newton-cg’, ‘sag’, ‘saga’ and ‘lbfgs’ handle multinomial loss; ‘liblinear’ is limited to one-versus-rest schemes. - ‘newton-cg’, ‘lbfgs’, ‘sag’ and ‘saga’ handle L2 or no penalty - ‘liblinear’ and ‘saga’ also handle L1 penalty - ‘saga’ also supports ‘elasticnet’ penalty - ‘liblinear’ does not support setting penalty='none' - Note that ‘sag’ and ‘saga’ fast convergence is only guaranteed on features with approximately the same scale. You can preprocess the data with a scaler from sklearn.preprocessing. - New in version 0.17: Stochastic Average Gradient descent solver. - New in version 0.19: SAGA solver. - Changed in version 0.22: The default solver changed from ‘liblinear’ to ‘lbfgs’ in 0.22. - max_iter int, default=100 - Maximum number of iterations taken for the solvers to converge. - multi_class{‘auto’, ‘ovr’, ‘multinomial’}, default=’auto’ - If the option chosen is ‘ovr’, then a binary problem is fit for each label. For ‘multinomial’ the loss minimised is the multinomial loss fit across the entire probability distribution, even when the data is binary. ‘multinomial’ is unavailable when solver=’liblinear’. ‘auto’ selects ‘ovr’ if the data is binary, or if solver=’liblinear’, and otherwise selects ‘multinomial’. - New in version 0.18: Stochastic Average Gradient descent solver for ‘multinomial’ case. - Changed in version 0.22: Default changed from ‘ovr’ to ‘auto’ in 0.22. - verbose int, default=0 - For the liblinear and lbfgs solvers set verbose to any positive number for verbosity. - warm_start bool, default=False - When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. Useless for liblinear solver. See the Glossary. - New in version 0.17: warm_start to support lbfgs, newton-cg, sag, saga solvers. - n_jobs int, default=None - Number of CPU cores used when parallelizing over classes if multi_class=’ovr’”. This parameter is ignored when the solver is set to ‘liblinear’ regardless of whether ‘multi_class’ is specified or not. None means 1 unless in a joblib.parallel_backend context. -1 means using all processors. See Glossary for more details. - l1_ratio float, default=None - The Elastic-Net mixing parameter, with 0 <= l1_ratio <= 1. Only used if penalty='elasticnet'. Setting l1_ratio=0 is equivalent to using penalty='l2', while setting l1_ratio=1 is equivalent to using penalty='l1'. For 0 < l1_ratio <1, the penalty is a combination of L1 and L2.# Pandas for data loading and processing import pandas as pd #Data Analysis from sklearn.linear_model import LogisticRegression #Data splitting from sklearn.model_selection import train_test_split #Numpy for diverse math functions import numpy as np #Model validation from sklearn.metrics import matthews_corrcoef from sklearn.metrics import accuracy_score from sklearn.metrics import roc_curve,roc_auc_score from sklearn.metrics import f1_score #visualization libraries import matplotlib.pyplot as plt import seaborn as sns #Upsampling from sklearn.utils import resample # Reading data from schwartau schwartau_daily = pd.read_csv('data/summary/schwartau_daily_filtered.csv') wurzburg_daily = pd.read_csv('data/summary/wurzburg_daily_filtered.csv') #describe our data wurzburg_daily[wurzburg_daily.select_dtypes(exclude='object').columns].describe().\ style.background_gradient(axis=1,cmap=sns.light_palette('green', as_cmap=True)) # Display new class counts wurzburg_daily.flow_processed.value_counts() #describe our data schwartau_daily[schwartau_daily.select_dtypes(exclude='object').columns].describe().\ style.background_gradient(axis=1,cmap=sns.light_palette('green', as_cmap=True)) # Display new class counts schwartau_daily.flow_processed.value_counts() # split into test and train # Separate X and Y (Characteristics and Labels) labels_schwartau = schwartau_daily['flow_processed'] data_schwartau = schwartau_daily.drop(['flow_processed','timestamp'], axis=1) x_train_schwartau, x_test_schwartau, y_train_schwartau, y_test_schwartau = train_test_split(data_schwartau, labels_schwartau, test_size=0.20, random_state = 1019124610) # split into test and train # Separate X and Y (Characteristics and Labels) # Upsample # Separate majority and minority classes df_majority = wurzburg_daily[wurzburg_daily.flow_processed==0] df_minority = wurzburg_daily[wurzburg_daily.flow_processed==1] # Upsample minority class df_minority_upsampled = resample(df_minority, replace=True, # sample with replacement n_samples=621, # to match majority class random_state=123) # reproducible results # Combine majority class with upsampled minority class df_upsampled = pd.concat([df_majority, df_minority_upsampled]) labels_wurzburg = df_upsampled['flow_processed'] data_wurzburg = df_upsampled.drop(['flow_processed','timestamp'], axis=1) x_train_wurzburg, x_test_wurzburg, y_train_wurzburg, y_test_wurzburg = train_test_split(data_wurzburg, labels_wurzburg, test_size=0.20, stratify=labels_wurzburg, random_state = 1019124610) #Model preparation (parameters) penalty_values = ['l1', 'l2', 'elasticnet', 'none'] dual_value = False # Can be True or False tol_value = 1e-4 C_value = 1 fit_intercept_value = True # Can be True or False intercept_scaling_value = 1 class_weight_value = None random_state_value = 1019124610 solver_values = ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'] max_iter_value = 10000 multi_class_values = ['auto', 'ovr', 'multinomial'] verbose_value = 0 n_jobs_value = None l1_ratio_values = np.arange(0.0,1.1,0.1) # LR = LogisticRegression(penalty='none', dual=False, # tol=0.0001, C=1.0, fit_intercept=True, # intercept_scaling=1, class_weight=None, # random_state=None, solver='lbfgs', max_iter=100, # multi_class='auto', verbose=0, warm_start=False, # n_jobs=None, l1_ratio=None); def modelo_schwartau(LR): LR.fit(x_train_schwartau,y_train_schwartau) y_test_predicted = LR.predict(x_test_schwartau) y_test_scores = LR.decision_function(x_test_schwartau) MCC = matthews_corrcoef(y_test_schwartau, y_test_predicted) # print("matthews_corrcoef", MCC) ACC = accuracy_score(y_test_schwartau, y_test_predicted) # print("Accuracy", ACC) roc_auc=roc_auc_score(y_test_schwartau, y_test_scores) # print("ROC", roc_auc) f1=f1_score(y_test_schwartau, y_test_predicted, average='weighted') return [MCC,ACC,roc_auc,f1] complete_results_LR_schwartau = [] for penalty in penalty_values: for solver in solver_values: if solver == 'lbfgs': if (penalty != 'l2'or penalty != 'none') : continue else: # print("-----------------------") ## Generar modelo LR = LogisticRegression(penalty=penalty, dual=dual_value, tol=tol_value, C=C_value, fit_intercept=fit_intercept_value, intercept_scaling=1, class_weight=class_weight_value, random_state=random_state_value, solver=solver, max_iter=max_iter_value, multi_class='auto', verbose=verbose_value, warm_start=False, n_jobs=n_jobs_value, l1_ratio=None); # print("Params: ",solver,"-",penalty) results=modelo_schwartau(LR) str_label = "LR-"+solver+"-"+penalty results.append(str_label) complete_results_LR_schwartau.append(results) if solver == 'newton-cg': if (penalty != 'l2'or penalty != 'none') : continue else: # print("-----------------------") ## Generar modelo LR = LogisticRegression(penalty=penalty, dual=dual_value, tol=tol_value, C=C_value, fit_intercept=fit_intercept_value, intercept_scaling=1, class_weight=class_weight_value, random_state=random_state_value, solver=solver, max_iter=max_iter_value, multi_class='auto', verbose=verbose_value, warm_start=False, n_jobs=n_jobs_value, l1_ratio=None); # print("Params: ",solver,"-",penalty) results=modelo_schwartau(LR) str_label = "LR-"+solver+"-"+penalty results.append(str_label) complete_results_LR_schwartau.append(results) if solver == 'liblinear': if (penalty == 'none' or penalty == 'elasticnet') : continue else: # print("-----------------------") ## Generar modelo LR = LogisticRegression(penalty=penalty, dual=dual_value, tol=tol_value, C=C_value, fit_intercept=fit_intercept_value, intercept_scaling=1, class_weight=class_weight_value, random_state=random_state_value, solver=solver, max_iter=max_iter_value, multi_class='auto', verbose=verbose_value, warm_start=False, n_jobs=n_jobs_value, l1_ratio=None); # print("Params: ",solver,"-",penalty) results=modelo_schwartau(LR) str_label = "LR-"+solver+"-"+penalty results.append(str_label) complete_results_LR_schwartau.append(results) if solver == 'sag': if (penalty != 'l2'or penalty != 'none') : continue else: # print("-----------------------") ## Generar modelo LR = LogisticRegression(penalty=penalty, dual=dual_value, tol=tol_value, C=C_value, fit_intercept=fit_intercept_value, intercept_scaling=1, class_weight=class_weight_value, random_state=random_state_value, solver=solver, max_iter=max_iter_value, multi_class='auto', verbose=verbose_value, warm_start=False, n_jobs=n_jobs_value, l1_ratio=None); # print("Params: ",solver,"-",penalty) results=modelo_schwartau(LR) str_label = "LR-"+solver+"-"+penalty results.append(str_label) complete_results_LR_schwartau.append(results) if solver == 'saga': ## Generar modelo if penalty != 'elasticnet': # print("-----------------------") LR = LogisticRegression(penalty=penalty, dual=dual_value, tol=tol_value, C=C_value, fit_intercept=fit_intercept_value, intercept_scaling=1, class_weight=class_weight_value, random_state=random_state_value, solver=solver, max_iter=max_iter_value, multi_class='auto', verbose=verbose_value, warm_start=False, n_jobs=n_jobs_value, l1_ratio=None); # print("Params: ",solver,"-",penalty) results=modelo_schwartau(LR) str_label = "LR-"+solver+"-"+penalty results.append(str_label) complete_results_LR_schwartau.append(results) else: for l1_ratio in l1_ratio_values: # print("-----------------------") LR = LogisticRegression(penalty=penalty, dual=dual_value, tol=tol_value, C=C_value, fit_intercept=fit_intercept_value, intercept_scaling=1, class_weight=class_weight_value, random_state=random_state_value, solver=solver, max_iter=max_iter_value, multi_class='auto', verbose=verbose_value, warm_start=False, n_jobs=n_jobs_value, l1_ratio=l1_ratio); # print("Params: ",solver,"-",penalty,"-",l1_ratio) results=modelo_schwartau(LR) str_label = "LR-"+solver+"-"+penalty+"-"+str(l1_ratio) results.append(str_label) complete_results_LR_schwartau.append(results) # print("-----------------------") def modelo_wurzburg(LR): LR.fit(x_train_wurzburg,y_train_wurzburg) y_test_predicted = LR.predict(x_test_wurzburg) y_test_scores = LR.decision_function(x_test_wurzburg) MCC = matthews_corrcoef(y_test_wurzburg, y_test_predicted) # print("matthews_corrcoef", MCC) ACC = accuracy_score(y_test_wurzburg, y_test_predicted) # print("Accuracy", ACC) roc_auc=roc_auc_score(y_test_wurzburg, y_test_scores) # print("ROC", roc_auc) f1=f1_score(y_test_wurzburg, y_test_predicted, average='weighted') return [MCC,ACC,roc_auc,f1] complete_results_LR_wurzburg = [] for penalty in penalty_values: for solver in solver_values: if solver == 'lbfgs': if (penalty != 'l2'or penalty != 'none') : continue else: # print("-----------------------") ## Generar modelo LR = LogisticRegression(penalty=penalty, dual=dual_value, tol=tol_value, C=C_value, fit_intercept=fit_intercept_value, intercept_scaling=1, class_weight=class_weight_value, random_state=random_state_value, solver=solver, max_iter=max_iter_value, multi_class='auto', verbose=verbose_value, warm_start=False, n_jobs=n_jobs_value, l1_ratio=None); # print("Params: ",solver,"-",penalty) results=modelo_wurzburg(LR) str_label = "LR-"+solver+"-"+penalty results.append(str_label) complete_results_LR_wurzburg.append(results) if solver == 'newton-cg': if (penalty != 'l2'or penalty != 'none') : continue else: # print("-----------------------") ## Generar modelo LR = LogisticRegression(penalty=penalty, dual=dual_value, tol=tol_value, C=C_value, fit_intercept=fit_intercept_value, intercept_scaling=1, class_weight=class_weight_value, random_state=random_state_value, solver=solver, max_iter=max_iter_value, multi_class='auto', verbose=verbose_value, warm_start=False, n_jobs=n_jobs_value, l1_ratio=None); # print("Params: ",solver,"-",penalty) results=modelo_wurzburg(LR) str_label = "LR-"+solver+"-"+penalty results.append(str_label) complete_results_LR_wurzburg.append(results) if solver == 'liblinear': if (penalty == 'none' or penalty == 'elasticnet') : continue else: # print("-----------------------") ## Generar modelo LR = LogisticRegression(penalty=penalty, dual=dual_value, tol=tol_value, C=C_value, fit_intercept=fit_intercept_value, intercept_scaling=1, class_weight=class_weight_value, random_state=random_state_value, solver=solver, max_iter=max_iter_value, multi_class='auto', verbose=verbose_value, warm_start=False, n_jobs=n_jobs_value, l1_ratio=None); # print("Params: ",solver,"-",penalty) results=modelo_wurzburg(LR) str_label = "LR-"+solver+"-"+penalty results.append(str_label) complete_results_LR_wurzburg.append(results) if solver == 'sag': if (penalty != 'l2'or penalty != 'none') : continue else: # print("-----------------------") ## Generar modelo LR = LogisticRegression(penalty=penalty, dual=dual_value, tol=tol_value, C=C_value, fit_intercept=fit_intercept_value, intercept_scaling=1, class_weight=class_weight_value, random_state=random_state_value, solver=solver, max_iter=max_iter_value, multi_class='auto', verbose=verbose_value, warm_start=False, n_jobs=n_jobs_value, l1_ratio=None); # print("Params: ",solver,"-",penalty) results=modelo_wurzburg(LR) str_label = "LR-"+solver+"-"+penalty results.append(str_label) complete_results_LR_wurzburg.append(results) if solver == 'saga': ## Generar modelo if penalty != 'elasticnet': # print("-----------------------") LR = LogisticRegression(penalty=penalty, dual=dual_value, tol=tol_value, C=C_value, fit_intercept=fit_intercept_value, intercept_scaling=1, class_weight=class_weight_value, random_state=random_state_value, solver=solver, max_iter=max_iter_value, multi_class='auto', verbose=verbose_value, warm_start=False, n_jobs=n_jobs_value, l1_ratio=None); # print("Params: ",solver,"-",penalty) results=modelo_wurzburg(LR) str_label = "LR-"+solver+"-"+penalty results.append(str_label) complete_results_LR_wurzburg.append(results) else: for l1_ratio in l1_ratio_values: # print("-----------------------") LR = LogisticRegression(penalty=penalty, dual=dual_value, tol=tol_value, C=C_value, fit_intercept=fit_intercept_value, intercept_scaling=1, class_weight=class_weight_value, random_state=random_state_value, solver=solver, max_iter=max_iter_value, multi_class='auto', verbose=verbose_value, warm_start=False, n_jobs=n_jobs_value, l1_ratio=l1_ratio); # print("Params: ",solver,"-",penalty,"-",l1_ratio) results=modelo_wurzburg(LR) str_label = "LR-"+solver+"-"+penalty+"-"+str(l1_ratio) results.append(str_label) complete_results_LR_wurzburg.append(results) # print("-----------------------") #Transforming results into dataframes and adding an id column LR__schwartau = pd.DataFrame(complete_results_LR_schwartau, columns = ['matthews_corrcoef','Accuracy','ROC','F1','Label']) LR__schwartau['ID'] = range(1, 1+len(LR__schwartau)) LR__schwartau_sort=LR__schwartau.sort_values(by=['matthews_corrcoef','F1','ROC','Accuracy'], ascending=[False,False,False,False]) LR__wurzburg = pd.DataFrame(complete_results_LR_wurzburg, columns = ['matthews_corrcoef','Accuracy','ROC','F1','Label']) LR__wurzburg['ID'] = range(1, 1+len(LR__wurzburg)) LR__wurzburg_sort=LR__wurzburg.sort_values(by=['matthews_corrcoef','F1','ROC','Accuracy'], ascending=[False,False,False,False]) # Graph to display results of Logistic Regression ax = LR__schwartau_sort[['matthews_corrcoef','F1','ROC','Accuracy']].plot(kind='bar',figsize=(30,15),title='Results Logistic Regression',fontsize=20) ax.legend(prop=dict(size=20),loc='center left', bbox_to_anchor=(1.0, 0.5)) for p in ax.patches: text = ax.annotate("%.3f" % p.get_height(), (p.get_x() + p.get_width() / 2., p.get_height()), ha='center', va='center', xytext=(0, 10), textcoords='offset points') text.set_fontsize(15) plt.rc('figure', titlesize=50) plt.show() # Graph to display results of Logistic Regression ax = LR__wurzburg_sort[['matthews_corrcoef','F1','ROC','Accuracy']].plot(kind='bar',figsize=(30,15),title='Results Logistic Regression',fontsize=20) ax.legend(prop=dict(size=20),loc='center left', bbox_to_anchor=(1.0, 0.5)) for p in ax.patches: text = ax.annotate("%.3f" % p.get_height(), (p.get_x() + p.get_width() / 2., p.get_height()), ha='center', va='center', xytext=(0, 10), textcoords='offset points') text.set_fontsize(15) plt.rc('figure', titlesize=50) plt.show() # Selected Model # schwartau print(LR__schwartau_sort.head(1)) print('------------------------------------------------------------------------') # wurzburg print(LR__wurzburg_sort.head(1))matthews_corrcoef Accuracy ROC F1 Label ID 15 0.398218 0.704545 0.727391 0.700126 LR-saga-none 16 ------------------------------------------------------------------------ matthews_corrcoef Accuracy ROC F1 Label ID 2 0.212497 0.60241 0.602581 0.594157 LR-liblinear-l2 3Assignments for Task 0 Libraries to learn:1. Jupyter Notebook2. Python3. Numpy and Matplotlib4. Pandas5. Sklearn 0. Activating Python Virtual EnvironmentOnce you are done with software installation, you can activate your virtual environment by typing in your Terminal or Anaconda Prompt:`conda activate HC9999_stage1`You should see your terminal prompt change to HC9999_stage1. All the tasks below should be performed inside this environment. 1. Jupyter Notebook 1.1. Quick introduction to Jupyter NotebookJupyter Notebook has features like auto-completion, inline documentation, etc. which come handy when you are using lots of libraries in your code.You will learn how to use it on the way. But for starters:**1. Writing code**You can write code in the cells. To execute the code, press ```CTRL + ENTER```. To add a cell after executing current cell, use ```SHIFT + ENTER```.**2. Markdown in Jupyter notebook**You can use Markdown in Jupyter Notebook. Markdown is also written in cells but for that you need to convert code cell to Markdown cell. To do that go on a cell and press ```ESC``` key till the cell turns **BLUE** from **GREEN**. Now press ```M``` key and try to type anything and press ```SHIFT + ENTER```. To convert a cell back to code cell, use ```ESC``` till it turns **BLUE** and press ```Y``` key. **3. Auto-completion**Use ```TAB``` key to show suggestions for auto-complete.**4. Documentation**Use ```Shift + TAB``` keys to see documentation. The **more times you press TAB, you get to see more elaborate documentation**. 1.2. Resources 1. http://nbviewer.jupyter.org/github/jvns/pandas-cookbook/blob/master/cookbook/A%20quick%20tour%20of%20IPython%20Notebook.ipynb2. http://cs231n.github.io/ipython-tutorial/3. https://medium.com/codingthesmartway-com-blog/getting-started-with-jupyter-notebook-for-python-4e7082bd5d46 2. Python**Python is a programming language** that we will use for all higher level tasks. It is very easy to learn and use. If you are familiar with any programming languages like C or Java you wouldn't have any difficulty picking it up. For Python, follow this tutorial by Stanford University: http://cs231n.github.io/python-numpy-tutorial/Also see [official documentation](https://docs.python.org/3.6/) or google search for specific python stuff. Assigment 1.1Create a function **sigmoid**. This should accept a list and return a list by evaluating sigmoid function on each element of the list.import math def sigmoid(x_list): ''' Returns a list of sigmoids Sigmoid is defined as f(x) = 1/(1+e^-x) ''' y_list = [] for i in x_list: y_list.append(1.0/(1.0+math.exp(-i))) return y_list x_list = [1,2,3,4,-4] sigmoid(x_list)3. NumPy and Matplotlib**NumPy** is a numerical computing library for Python. You can use NumPy to do linear algebra operations easily in Python. It has a similar interface to other numerical computing softwares like Octave, MATLAB, etc.**Matplotlib** is a **plotting package for Python**. Soon enough in ML, we will have lots of columns of numbers and we might not be able to make sense of them by just looking at them. That is when Matplotlib is going to come handy. Matplotlib allows us to plot charts, histograms, images, etc. In simple words it is used for **data visualization**.The same tutorial we used for Python has enough of NumPy guide for us to start. Later we will use functions as needed and required. It also has basic Matplotlib usage guide.For NumPy and Matplotlib follow this tutorial by Stanford University to get started: http://cs231n.github.io/python-numpy-tutorial/ Assignment 1.2Complete functions for **forward pass** and **MSE**. Forward pass for linear regression and MSE is mean squared error calculated on predicted and actual output.# NumPy Task # Complete the following functions # GIVEN: # X and y_actual # TODO: # forward_pass function: Forward pass of linear regression # mse function: calculates L2 loss between actual and predicted import numpy as np np.random.seed(seed=1) # TODO: Complete the function def forward_pass(W, x, b): ''' Returns predicted y y_pred = W*x + b ''' return (W*x) + b # TODO: Complete the function def mse(y_pred, y_actual): ''' Returns MSE = (1/N)*(Summation(i = 1 to N)[(y_pred - y_actual)^2]) ''' return np.mean(np.square(y_pred-y_actual)) # Generate data # Lets assume input is a table of size 2000 * 2 where 2000 indicates number of samples and 2 are attributes, # one dependent and one independent. x = np.random.randn(2000, 1) # independent y_actual = 2*x + 5 + np.random.randn(2000, 1) # dependent # TODO: # Divide dataset: 1500 samples in training set and 500 samples in test set x_train = x[:1500] y_train = y_actual[:1500] x_test = x[1500:] y_test = y_actual[1500:] # Initialize random weight W = np.random.randn(1) b = np.random.randn(1) # Compute Forward Pass for x_train y_pred_train = forward_pass(W, x_train, b) # Compute MSE between 'y_train' and 'y_pred_train' print('Mean Squared Error for y_train and y_pred_train:', mse(y_train, y_pred_train)) # Compute Forward Pass for x_test y_pred_test = forward_pass(W, x_test, b) # Compute MSE between 'y_test' and 'y_pred_test' print('Mean Squared Error for y_test and y_pred_test:', mse(y_test, y_pred_test))Mean Squared Error for y_train and y_pred_train: 29.999152323334894 Mean Squared Error for y_test and y_pred_test: 29.114060058145174Visualization with Matplotlibimport matplotlib.pyplot as plt # Visualize predicted values # make a scatter plot with x_train and y_pred_train plt.scatter(x_train, y_pred_train) # put a title plt.title('Predicted') # generate and show a graph # unless we use show(), the graph is saved only in memory plt.show() # Visualize actual values plt.scatter(x_train, y_train) plt.title('Actual') plt.show()4. Pandas**Pandas** is used for data analytics. With Pandas, you can load datasets as tables. You can then perform operations on the data now present as a table.You can do operations with Pandas that you can do in Numpy or Python lists and dictionaries, like slicing, indexing, etc. Resources1. ml-coding-tutorial.pdf is very brief and to the point for completing tasks2. Very good for overall understanding of pandas https://github.com/jvns/pandas-cookbook3. Tutorials from pandas official website http://pandas.pydata.org/pandas-docs/stable/tutorials.htmlimport pandas as pd # we will read and check our data iris = pd.read_csv('https://raw.githubusercontent.com/mwaskom/seaborn-data/master/iris.csv') # pandas can load data from URIs like URLs, Filesystems, etc # try downloading some CSV files and load it with pandas iris.head() # This function will show simple stats for loaded dataset iris.describe() # Checking data types of individual columns iris.dtypesYou can use indexing like in numpy and dictionaries in pandas too. There are also iloc, loc and ix for indexing.# You can use loc, iloc and ix to select single or multiple rows print('Single row\n------------\n' + str(iris.iloc[0])) print('\nMultiple rows\n---------------\n' + str(iris.iloc[[0, 5]])) # Pandas has dataframes and series as major data structures # This is a Series type(iris.loc[1]) # single row # This is a DataFrame type(iris.loc[[1,2]]) # multiple rows iris['sepal_length'] # single column iris[['sepal_length', 'sepal_width']] # multiple columns iris['species'].unique()5. Sklearn**Scikit learn** is a library for machine learning. It has several model architectures which can learn data that you provide. Even if you are writing your own model architectures or algorithms, sklearn comes in handy for data pre-processing operations like normalization and calculating errors and accuracies, etc. Resources1. ml-coding-tutorial.pdf is very brief and to the point for completing tasks2. Tutorials from sklearn official website http://scikit-learn.org/stable/tutorial/index.htmlfrom sklearn.linear_model import LinearRegressionWe will now fit the above **"x"** and **"y_actual"** values and again evaluate the loss after fitting unlike before where we never trained the algorithm.# import the model and create an object of it model = LinearRegression() # every model or estimator as sklearn likes to call it will have a fit method # use it with train data model.fit(x_train, y_train) # similarly the model will have a predict function to make predictions on new data # evaluate model accuracy with train and test data y_pred = model.predict(x_test) print('Mean Squared Error:', mse(y_pred, y_test)) # notice the difference in values plt.scatter(x_test, y_pred) plt.title('Predicted') plt.show() plt.scatter(x_test, y_test) plt.title('Actual') plt.show()Assignment 1.3Use pandas to load iris dataset. Iris dataset has 3 types flowers in it, you can see from above. Your task is to figure out name of the flower given other parameters. Tasks:1. Load iris dataset as pandas dataframe.2. Shuffle the dataset.3. Seperate the dataframe into independent X and dependent y parameters. 4. Split X and y into train and test set. Use train_test_split from sklearn.model_selection.5. Use SVC classifier from sklearn. Checkout [sklearn docs](http://scikit-learn.org/stable/documentation.html) or just google how to use SVC. The interface is similar to LinearRegression.6. Fit the train data on it.7. Predict y on X_test.8. Find loss.# 1. load pandas library and load iris data set as a dataframe import pandas as pd data = pd.read_csv('https://raw.githubusercontent.com/mwaskom/seaborn-data/master/iris.csv') # 2. Data-preprocessing: shuffle the dataframe using sample() function pre-defined on iris dataframe # You can also google search how to shuffle dataframe # Additionally comment on why do you need to shuffle the dataframe data = data.sample(frac=1).reset_index(drop=True) # 3. split iris dataframe columns into two set: # - dependent parameters X (four columns) # - independent parameters y which is name of the flower (1 column) # select all rows leaving only flower name column using iloc # input variables; other than flower information # output variable; only flower information x = data.iloc[:,:4] y = data.iloc[:,-1] # 4. divide X and y into train and test set # use train_test_split from sklearn.model_selection from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.3) # 5. import the model # 6. Fit the model # use SVC from sklearn.svm from sklearn.svm import SVC model = SVC() model.fit(x_train, y_train) # 7. Predict the test examples pred = model.predict(x_test) # 8. Find the accuracy on test samples using score() function model.score(x_test, y_test) # For a manual check you can store predicted values in a numpy array # and then use np.column_stack to view them side by side import numpy as np y_pred_test = model.predict(x_test) np.column_stack([pred, y_test])Exercise 4 - MINST Convolutional Autoencoder (Instructor Version)In this exercise we will construct a convolutional autoencoder for the sample of the MNISTdataset.Import pickle, numpy, matplotlib as well as the *Model* class from **keras.models** and *Input* and *Conv2D*, *MaxPooling2D* and *UpSampling2D* from **keras.layers**.import pickle import numpy as np import matplotlib.pyplot as plt from keras.models import Model from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2DUsing TensorFlow backend.Load the datawith open('mnist.pkl', 'rb') as f: images = pickle.load(f)['images']Rescale the images to have values between 0 and 1.images = images / 255.We do need to reshape the images to add a single depth channel for use with convolutional stages. Reshape the images to have a shape of 28 x 28 x 1.images = images.reshape((-1, 28, 28, 1))Define an input layer. We will use the same shape input as an image.input_layer = Input(shape=(28, 28, 1,))Add a convolutional stage, with 16 layers or filters, a 3 x 3 weight matrix, a ReLU activation function and using **same** padding which means the output has the same length as the input image.hidden_encoding = Conv2D( 16, # Number of layers or filters in the weight matrix (3, 3), # Shape of the weight matrix activation='relu', padding='same', # How to apply the weights to the images )(input_layer)WARNING:tensorflow:From /home/ben/envs/packt/lib/python3.7/site-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version. Instructions for updating: Colocations handled automatically by placer.Add a max pooling layer to the encoder with a 2 x 2 kernel.encoded = MaxPooling2D((2, 2))(hidden_encoding)Add a decoding convolutional layerhidden_decoding = Conv2D( 16, # Number of layers or filters in the weight matrix (3, 3), # Shape of the weight matrix activation='relu', padding='same', # How to apply the weights to the images )(encoded)Add an upsampling layer.upsample_decoding = UpSampling2D((2, 2))(hidden_decoding)Add the final convolutional stage, using 1 layer as per the initial image depthdecoded = Conv2D( 1, # Number of layers or filters in the weight matrix (3, 3), # Shape of the weight matrix activation='sigmoid', padding='same', # How to apply the weights to the images )(upsample_decoding)Construct the model by passing the first and last layers of the network to the Model class.autoencoder = Model(input_layer, decoded)Display the structure of the modelautoencoder.summary()_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_1 (InputLayer) (None, 28, 28, 1) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 28, 28, 16) 160 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 14, 14, 16) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 14, 14, 16) 2320 _________________________________________________________________ up_sampling2d_1 (UpSampling2 (None, 28, 28, 16) 0 _________________________________________________________________ conv2d_3 (Conv2D) (None, 28, 28, 1) 145 ================================================================= Total para[...]Compile the autoencoder using a binary cross entropy loss function and adadelta gradient descent.autoencoder.compile(loss='binary_crossentropy', optimizer='adadelta')Now let's fit the model, again we pass the images as the training data and as the desired output. Train for 20 epochs as convolutional networks take a lot longer to compute.autoencoder.fit(images, images, epochs=20)WARNING:tensorflow:From /home/ben/envs/packt/lib/python3.7/site-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version. Instructions for updating: Use tf.cast instead. Epoch 1/20 10000/10000 [==============================] - 7s 746us/step - loss: 0.1165 Epoch 2/20 10000/10000 [==============================] - 7s 724us/step - loss: 0.0706 Epoch 3/20 10000/10000 [==============================] - 8s 786us/step - loss: 0.0685 Epoch 4/20 10000/10000 [==============================] - 8s 776us/step - loss: 0.0675 Epoch 5/20 10000/10000 [==============================] - 7s 728us/step - loss: 0.0669 Epoch 6/20 10000/10000 [==============================] - 8s 830us/step - loss: 0.0663 Epoch 7/20 10000/10000 [==============================] - 9s 880us/step - loss: 0.0659 Epoch 8/20 10000/10000 [==============================] - 9s 939us/step - loss: 0.0656 Epoch 9/20 10000/10000 [=================[...]Calculate and store the output of the encoding stage for the first 5 samples.encoder_output = Model(input_layer, encoded).predict(images[:5])Reshape the encoder output for visualisation.encoder_output = encoder_output.reshape((-1, 14 * 14, 16))Get the output of the decoder for the 5 imagesdecoder_output = autoencoder.predict(images[:5])Reshape the decoder output to be 28 x 28 in sizedecoder_output = decoder_output.reshape((-1, 28, 28))Reshape the original images back to be 28 x 28 in size.images = images.reshape((-1, 28, 28))Plot the original image, the mean encoder output and the decoder.plt.figure(figsize=(10, 7)) for i in range(5): plt.subplot(3, 5, i + 1) plt.imshow(images[i], cmap='gray') plt.axis('off') plt.subplot(3, 5, i + 6) plt.imshow(encoder_output[i], cmap='gray') plt.axis('off') plt.subplot(3, 5, i + 11) plt.imshow(decoder_output[i], cmap='gray') plt.axis('off')#crie um programa que tenha uma tupla com varias palavras (nao usar acentos) #depois disso vc deve mostrar para cada palavras, quais sao suas vogais. lista = ('aprender', 'programar', 'curso', 'video', 'python', 'computador', 'internet', 'google') for p in lista: print(f'\nna palavra {p.upper()} temos ', end=' ') for letra in p: if letra.lower() in 'aeiou': print(letra, end=' ')Data Download!git clone https://github.com/rois-codh/kmnist !python kmnist/download_data.py !tar xf kkanji.tarVisualizePATH = Path('kkanji2') !ls {PATH}/U+4E71/ imdata = plt.imread(str(PATH/'U+4E71/0d0cd162e69ced42.png')) imdata = io.imread(str(PATH/'U+4E71/0d0cd162e69ced42.png')) imdata/256 imdata.shape np.expand_dims(imdata, axis=-1).shape plt.imshow(imdata)Scan filenamesfilenames = [] labels = [] for p in tqdm(PATH.glob('*/*.png')): fname = str(p) filenames.append(fname) label = fname.split('/')[1] labels.append(label) size = len(filenames) classes = list(set(labels)) ctoi = dict((c, i) for i, c in enumerate(classes)) itoc = dict((i, c) for i, c in enumerate(classes)) class_size = len(classes) itoc[1], ctoi['U+6D17'] labels = [ctoi[lbl] for lbl in labels] def ceildiv(a, b): return -(-a // b) def plots_from_files(imspaths, figsize=(10,5), rows=1, titles=None, maintitle=None): """Plot the images in a grid""" f = plt.figure(figsize=figsize) if maintitle is not None: plt.suptitle(maintitle, fontsize=10) for i in range(len(imspaths)): sp = f.add_subplot(rows, ceildiv(len(imspaths), rows), i+1) sp.axis('Off') if titles is not None: sp.set_title(titles[i], fontsize=16) img = plt.imread(imspaths[i]) plt.imshow(img, cmap='gray') # cmap='binary' idxs = [np.random.randint(0, size-1) for i in range(9)] imspaths = [filenames[i] for i in idxs] titles = [labels[i] for i in idxs] plots_from_files(imspaths, rows=3, titles=titles, figsize=(10, 10))DataLoaderbs = 128 data = np.zeros((len(filenames), 1, 64, 64), dtype=np.float) for index, fname in tqdm(enumerate(filenames)): imdata = io.imread(fname) # normalize the data normalized = imdata / 256 data[index] = np.expand_dims(normalized, axis=0) # shape (1, 64, 64) data.shape140424it [01:50, 1269.52it/s]normalize the data to be between 0 and 1np.save(PATH/'data.pkl', data) np.save(PATH/'labels.pkl', labels) data = np.load(PATH/'data.pkl') labels = np.load(PATH/'labels.pkl') data_cuda = torch.FloatTensor(data).cuda() labels_cuda = torch.LongTensor(labels).cuda() valid_offset = int(data_cuda.shape[0] * 0.9) X_train, y_train = data_cuda[: valid_offset], labels_cuda[: valid_offset] X_valid, y_valid = data_cuda[valid_offset: ], labels_cuda[valid_offset: ] X_train.shape, len(y_train), X_valid.shape, len(y_valid) train_ds = torch.utils.data.TensorDataset(X_train, y_train) train_dl = torch.utils.data.DataLoader(train_ds, batch_size=bs, shuffle=True) valid_ds = torch.utils.data.TensorDataset(X_valid, y_valid) valid_dl = torch.utils.data.DataLoader(valid_ds, batch_size=bs, shuffle=True)Modeldef conv2d(ni, nf, k=3, s=2, p=1): return nn.Conv2d(ni, nf, kernel_size=k, stride=s, padding=p) def upsample(ni, nf, k=3, s=2, p=1, op=0): return nn.ConvTranspose2d(ni, nf, kernel_size=k, stride=s, padding=p, output_padding=op) class Flatten(nn.Module): def __init__(self): super().__init__() def forward(self, x): x = x.view(x.size()[0], -1) return x class ResnetBlock(nn.Module): def __init__(self, nf): super().__init__() self.conv1 = conv2d(nf, nf, s=1) self.batchnorm = nn.BatchNorm2d(nf) self.relu = nn.ReLU(True) self.conv2 = conv2d(nf, nf, s=1) def forward(self, x): out = self.conv1(x) out = self.batchnorm(out) out = self.relu(out) out = self.conv2(out) return x + out def conv_res(ni, nf): return nn.Sequential(conv2d(ni, nf), ResnetBlock(nf)) def up_res(ni, nf): return nn.Sequential(upsample(ni, nf, op=1), ResnetBlock(nf)) def create_encoder(image_size=64, latent_dim=4): channels = [1, 4, 8, 16, 32] layers = [] layers.append(conv_res(channels[0], channels[1])) # (bs, 32, 32) layers.append(conv_res(channels[1], channels[2])) # (bs, 16, 16) layers.append(conv_res(channels[2], channels[3])) # (bs, 8, 8) layers.append(conv_res(channels[3], channels[4])) # (bs, 4, 4) return nn.Sequential(*layers) def create_decoder(image_size=64, latent_dim=4): channels = [32, 16, 8, 4, 1] layers = [] # use upsampling layers.append(up_res(channels[0], channels[1])) # (bs, 16, 16) layers.append(up_res(channels[1], channels[2])) # (bs, 8, 8) layers.append(up_res(channels[2], channels[3])) # (bs, 4, 4) layers.append(up_res(channels[3], channels[4])) # (bs, 1, 1) return nn.Sequential(*layers) enc = create_encoder() dec = create_decoder() enc decClassification Modelclass Classifier(nn.Module): def __init__(self, enc, class_size): super().__init__() self.enc = enc self.flatten = Flatten() # (bs, 16 * 32) self.relu = nn.ReLU(True) self.drop = nn.Dropout() self.lin = nn.Linear(in_features= 512, out_features=class_size) def forward(self, X): y = self.enc(X) y = self.flatten(y) y = self.relu(y) y = self.drop(y) return self.lin(y) model = Classifier(enc, class_size).cuda() lr=2e-2 loss_func = nn.CrossEntropyLoss() def update(x,y,lr): opt = optim.Adam(model.parameters(), lr) y_hat = model(x) loss = loss_func(y_hat, y) loss.backward() opt.step() opt.zero_grad() return loss.item() losses = [update(x,y,1e-3) for x,y in train_dl] plt.plot(losses);AutoEncoder Modelclass AutoEncoder(nn.Module): def __init__(self, encoder, decoder): super().__init__() self.enc = encoder self.dec = decoder def forward(self, X): y = self.enc(X) y = self.dec(y) return y enc = create_encoder() dec = create_decoder() model = AutoEncoder(enc, dec).cuda() lr=2e-2 loss_func = nn.L1Loss() def update(x,y,lr): opt = optim.Adam(model.parameters(), lr) y_hat = model(x) loss = loss_func(y_hat, y) loss.backward() opt.step() opt.zero_grad() return loss.item() train_ds = torch.utils.data.TensorDataset(X_train, X_train) train_dl = torch.utils.data.DataLoader(train_ds, batch_size=bs, shuffle=True) epochs = 10 valid_losses = [] for epoch in tqdm(range(epochs)): losses = [update(x,y,lr) for x,y in train_dl] y_hat = model(X_valid) valid_loss = loss_func(y_hat, X_valid) valid_losses.append(valid_loss.item()) plt.plot(losses)AutoEncoder#model = nn.Sequential(enc, dec).cuda()K Nearest Neighbor A movie-ing target Friend inventory Use handout![bullseye3](img/bullseye2.png) Friend Inventory What would each person choose?![movies](img/movies.png) Friend Inventory Decision majority by ringIf you just polled the inner ring of people, what movie would you end up seeing?- How about if you polled the first *and* second ring?- The first three rings?- All of it?Share with your neighbor what movie you'd end up seeing. Friend Inventory What did you end up seeing?Who's movie choices changed based on how many people you polled?![movies](img/movies.png) Friend Inventory What's the "algorithm" we used for this process, in normal words? How does this relate to K nearest neighbor(knn)? ![annotate](img/bullseye-annotate.png) Starting OffWe are going to revisit the pima native Americans diabetes dataset. Can we use knn to classify people correctly and then predict if someone will have diabetes? Implementing a K-Nearest Neighbors Model Objective: - To be able to implement a KNN classification model, and utilize a visualization to identify the best value of K. Agenda- What is the **K-nearest neighbors** classification model?- How do you use the sklearn grid search function? K-nearest neighbors (KNN) classification **Lazy learning** or **Instance-based (IB)** learning methods simply store the training examples and postpone the generalization (building a model) until a new instance must be classified or prediction made. **Non-parametric models** assume that the data distribution cannot be defined interms of such a finite set of parameters. How does the KNN algorithm work? What should the grey point be? KNN has the following basic steps: VotingHow to break ties:1. When doing a binary classification, often use a odd K to avoid ties.2. Multiple approaches for Multiclass problems: - Reduce the K by 1 to see who wins. - Weight the votes based on the distance of the neighbors Example training dataThis example uses a multi-class problem and each color represents a different class. KNN classification map (K=1)![1NN classification map](img/04_1nn_map.png) KNN classification map (K=5)![5NN classification map](img/04_5nn_map.png) A bit more math Euclidean Distance**Euclidean distance** refers to the distance between two points. These points can be in different dimensional space and are represented by different forms of coordinates. In one-dimensional space, the points are just on a straight number line. A bit more math Measuring distance in a 2-d SpaceIn two-dimensional space, the coordinates are given as points on the x- and y-axes![alt text](img/euclidean-distance.png) A bit more math Measuring distance in a 3-d SpaceIn three-dimensional space, x-, y- and z-axes are used. $$\sqrt{(x_1-x_2)^2 + (y_1-y_2)^2 + (z_1-z_2)^2}$$![alt text](img/vectorgraph.jpg) A bit more math Euclidean Distance Equation![alt text](img/euclidean-equation.png)The source of this formula is in the Pythagorean theorem. Implementing the KNN Classifier with SKlearn Reviewing the Pima dataset Loading the data[following this analysis for part of this section](https://www.kaggle.com/shrutimechlearn/step-by-step-diabetes-classification-knn-detailed)The Question creeping out of this summary- Can minimum value of below listed columns be zero (0)?- On these columns, a value of zero does not make sense and thus indicates missing value.Following columns or variables have an invalid zero value:- Glucose- BloodPressure- SkinThickness- Insulin- BMIimport pandas as pd diabetes = pd.read_csv('diabetes.csv') # print the shapes of X and y print(X.shape) print(y.shape) # Use x and y variables to split the training data into train and test set from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)Importance of ScalingCompare how the different the data looks when it is scaled versus non-scaled Should we use a Standard Scaler or Min-Max Scaler?https://sebastianraschka.com/Articles/2014_about_feature_scaling.htmlhttp://datareality.blogspot.com/2016/11/scaling-normalizing-standardizing-which.htmlfrom sklearn.preprocessing import StandardScaler from sklearn.preprocessing import MinMaxScaler scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test)/Users/swilson5/anaconda3/lib/python3.6/site-packages/sklearn/preprocessing/data.py:625: DataConversionWarning: Data with input dtype int64, float64 were all converted to float64 by StandardScaler. return self.partial_fit(X, y) /Users/swilson5/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:7: DataConversionWarning: Data with input dtype int64, float64 were all converted to float64 by StandardScaler. import sys /Users/swilson5/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:8: DataConversionWarning: Data with input dtype int64, float64 were all converted to float64 by StandardScaler.scikit-learn 4-step modeling pattern ![steps](img/sklearnsteps.png) **Step 1:** Import the class you plan to usefrom sklearn.neighbors import KNeighborsClassifier**Step 2:** "Instantiate" the "estimator"- "Estimator" is scikit-learn's term for model- "Instantiate" means "make an instance of"knn = KNeighborsClassifier(n_neighbors=1)**Class specifications**- Name of the object does not matter- Can specify tuning parameters (aka "hyperparameters") during this step- All parameters not specified are set to their defaultsprint(knn)KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None, n_jobs=None, n_neighbors=1, p=2, weights='uniform')**Step 3:** Fit the model with data (aka "model training")- Model is learning the relationship between X and y- Occurs in-placeknn.fit(X_train, y_train)**Step 4:** Predict the response for a new observation- New observations are called "out-of-sample" data- Uses the information it learned during the model training process# make class predictions for the testing set y_pred_class = knn.predict(X_test) # calculate accuracy from sklearn import metrics print('Accuracy:' + str(metrics.accuracy_score(y_test, y_pred_class))) print('F1: ' + str(metrics.f1_score(y_test, y_pred_class))) import matplotlib.pyplot as plt import itertools import numpy as np %matplotlib inline import seaborn as sns import warnings warnings.filterwarnings('ignore') plt.rcParams["figure.figsize"] = [10,5] def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion Matrix, without normalization') print(cm) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test,y_pred_class) classes = ['Perished', 'Survived'] plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues)Using a different value for K# instantiate the model (using the value K=5) knn = KNeighborsClassifier(n_neighbors=5) # fit the model with data knn.fit(X_train, y_train) # make class predictions for the testing set y_pred_class = knn.predict(X_test) print('Accuracy:' + str(metrics.accuracy_score(y_test, y_pred_class))) print('F1: ' + str(metrics.f1_score(y_test, y_pred_class))) from sklearn.metrics import classification_report print(classification_report(y_test, y_pred_class)) cm = confusion_matrix(y_test,y_pred_class) classes = ['Perished', 'Survived'] plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues)Search for an optimal value of K for KNNk_range = list(range(1, 4)) k_scores = [] for k in k_range: knn = KNeighborsClassifier(n_neighbors=k) knn.fit(X_train, y_train) y_predict = knn.predict(X_test) score = metrics.f1_score(y_test, y_predict, average='weighted') k_scores.append( score) print(k_scores)Visual comparison of different $K$sThis is not an ROC curveimport matplotlib.pyplot as plt plt.figure(figsize=(12, 6)) plt.plot(k_range, k_scores, color='red', linestyle='dashed', marker='o', markerfacecolor='blue', markersize=10) plt.title('F1 score by K Value') plt.xlabel('K Value') plt.ylabel('F1 Score') plt.show()Pandasimport pandas as pd from urllib import request import tempfile from pathlib import Path with tempfile.TemporaryDirectory() as tmpdirname: # Define the remote file to retrieve remote_url = 'http://data.insideairbnb.com/greece/attica/athens/2021-12-23/visualisations/listings.csv' # Define the local filename to save data local_file = Path(tmpdirname) / 'listings.csv' # Download remote and save locally request.urlretrieve(remote_url, local_file) print('created temporary directory', tmpdirname) # read the airbnb NYC listings csv file airbnb = pd.read_csv(local_file) airbnb airbnb.head(10) airbnb.tail() # Results for a single column airbnb['name'] # results for multiple columns hosts = airbnb[['host_id', 'host_name']] hosts.head() # Show the data types for each column airbnb.dtypes airbnb['last_review'] = pd.to_datetime(airbnb['last_review']) airbnb.dtypes # extract the year from a datetime series airbnb['year'] = airbnb['last_review'].dt.year # to integer airbnb['year'] = airbnb['year'].astype('UInt16') airbnb['year'].head() # Strip leading and trailing spaces from a string series airbnb['name'] = airbnb['name'].str.strip() airbnb['name'].head() airbnb['name_lower'] = airbnb['name'].str.lower() airbnb['name_lower'].head() # calculate using two columns airbnb['min_revenue'] = airbnb['minimum_nights'] * airbnb['price'] airbnb[['minimum_nights', 'price', 'min_revenue']].head() # get the mean price airbnb['price'].mean() # get the median price airbnb['price'].median() # get the mean grouped by type of room airbnb[['room_type', 'price']].groupby('room_type', as_index=False).mean() # get the median grouped by type of room airbnb[['room_type', 'price']].groupby('room_type', as_index=False).median() # get the median grouped by type of room and year airbnb[['room_type', 'year', 'price']].groupby(['room_type', 'year'], as_index=False).median() # get all rows with price < 1000 airbnb_under_1000 = airbnb[airbnb['price'] < 1000] airbnb_under_1000.head() # get all rows with price < 1000 and year equal to 2020 airbnb_2019_under_1000 = airbnb[(airbnb['price'] < 1000) & (airbnb['year'] == 2020)] airbnb_2019_under_1000.head() # distribution of prices under $1000 ax = airbnb_under_1000['price'].plot.hist(bins=40)sales_train.csv - the training set. Daily historical data from January 2013 to October 2015. test.csv - the test set. You need to forecast the sales for these shops and products for November 2015. sample_submission.csv - a sample submission file in the correct format. items.csv - supplemental information about the items/products. item_categories.csv - supplemental information about the items categories. shops.csv- supplemental information about the shops.教训,将数据类型改为int8会导致负数的出现code, kbd, pre, samp { font-family:'consolas', Lucida Console, SimSun, Fira Code, Monaco !important; font-size: 11pt !important;}div.output_area pre { font-family: 'consolas', Lucida Console, SimSun, Fira Code, Monaco !important; font-size: 10pt !important;}div.output_area img, div.output_area svg { background-color: FFFFFF !important;}import numpy as np import pandas as pd import time #pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 100) from sklearn.preprocessing import LabelEncoder import gc from time_series_pipeline import * from tqdm import tqdm from scipy import stats import time_series_pipeline from itertools import product items, shops, cats, train, test_indicate = load_data() # item_id字段无负值 #train['id'] = train['shop_id'].astype(str) + '_' + train['item_id'].astype(str) test_indicate['id'] = test_indicate['shop_id'].astype(str) + '_' + test_indicate['item_id'].astype(str) #train = train[train['id'].isin(test_indicate['id'])] shops.loc[shops['shop_name'] == 'Сергиев Посад ТЦ "7Я"', 'shop_name'] = 'СергиевПосад ТЦ "7Я"' shops['city'] = shops['shop_name'].str.split(' ').transform(lambda x: x[0]) shops.loc[shops['city'] == '!Якутск', 'city'] = 'Якутск' shops['city_code'] = LabelEncoder().fit_transform(shops['city']) shops = shops[['shop_id','city_code']] cats['split'] = cats['item_category_name'].str.split('-') cats['type'] = cats['split'].transform(lambda x: x[0].strip()) cats['type_code'] = LabelEncoder().fit_transform(cats['type']) # 类型 cats['subtype'] = cats['split'].map(lambda x: x[1].strip() if len(x) > 1 else x[0].strip()) cats['subtype_code'] = LabelEncoder().fit_transform(cats['subtype']) # 子类型 cats = cats[['item_category_id','type_code', 'subtype_code']] items.drop(['item_name'], axis = 1, inplace = True) ##################### 数据增强 matrix = [] cols = ['date_block_num','shop_id','item_id'] for i in range(34): sales = train[train.date_block_num==i] matrix.append(np.array(list(product([i], sales.shop_id.unique(), sales.item_id.unique())), dtype='int16')) matrix = pd.DataFrame(np.vstack(matrix), columns=cols) matrix.sort_values(cols,inplace=True) matrix['id'] = matrix['shop_id'].astype(str) + '_' + matrix['item_id'].astype(str) ###########加入测试集 test_indicate['date_block_num'] = 34 test_indicate['date_block_num'] = test_indicate['date_block_num'].astype(np.int8) test_indicate['shop_id'] = test_indicate['shop_id'].astype(np.int8) test_indicate['item_id'] = test_indicate['item_id'].astype(np.int16) test_indicate matrix = pd.concat([matrix, test_indicate], ignore_index = True, sort = False) #matrix.fillna(0, inplace = True) # 将日数据汇总为月数据 start = time.time() df = pd.DataFrame() grouped = train.groupby(['date_block_num','shop_id','item_id']) #df['item_price'] = grouped['item_price'].mean() df['item_cnt_month'] = grouped['item_cnt_day'].sum() df.reset_index(inplace = True) #matrix['item_category_id'] = grouped['item_category_id'].agg(lambda x:stats.mode(x)[0][0]) #matrix['type_code'] = grouped['type_code'].agg(lambda x:stats.mode(x)[0][0]) #matrix['subtype_code'] = grouped['subtype_code'].agg(lambda x:stats.mode(x)[0][0]) #计算众数 print('data has {} rows and {} columns'.format(df.shape[0], df.shape[1])) print('The program costs %.2f seconds'%(time.time() - start)) matrix = pd.merge(matrix, df, on = cols, how = 'left') matrix['item_cnt_month'] = (matrix['item_cnt_month'] .fillna(0) .clip(0,20) # NB clip target here .astype(np.float16)) #matrix['item_category_id'] = grouped['item_category_id'].agg(lambda x:stats.mode(x)[0][0]) #matrix['type_code'] = grouped['type_code'].agg(lambda x:stats.mode(x)[0][0]) #matrix['subtype_code'] = grouped['subtype_code'].agg(lambda x:stats.mode(x)[0][0]) #计算众数 print('data has {} rows and {} columns'.format(df.shape[0], df.shape[1])) print('The program costs %.2f seconds'%(time.time() - start)) matrix train_shops = pd.merge(matrix, shops, on = 'shop_id', how = 'left') print('data has {} rows and {} columns'.format(train_shops.shape[0], train_shops.shape[1])) items_cats = pd.merge(items, cats, on = 'item_category_id') print('data has {} rows and {} columns'.format(items_cats.shape[0], items_cats.shape[1])) df = pd.merge(train_shops, items_cats, on = 'item_id', how = 'left') print('data has {} rows and {} columns'.format(df.shape[0], df.shape[1])) #%whos del cats, grouped, items, items_cats, matrix, sales, shops, test_indicate, train, train_shops gc.collect() df.sort_values(by = ['date_block_num','id'], inplace = True) df #df.reset_index(inplace=True) def groupby_shift(df, col, groupcol, shift_n, fill_na = np.nan): ''' apply fast groupby shift df: data col: column need to be shift shift: n fill_na: na filled value ''' rowno = list(df.groupby(groupcol).size().cumsum()) # 获取每分组第一个元素的index lagged_col = df[col].shift(shift_n) # 不分组滚动 na_rows = [i for i in range(shift_n)] # 初始化为缺失值的index for i in rowno: if i == rowno[len(rowno)-1]: # 最后一个index直接跳过不然会超出最大index continue else: new = [i + j for j in range(shift_n)] # 将每组最开始的shift_n个值变成nan na_rows.extend(new) # 加入列表 na_rows = list(set(na_rows)) # 去除重复值 na_rows = [i for i in na_rows if i <= len(lagged_col) - 1] # 防止超出最大index lagged_col.iloc[na_rows] = fill_na # 变成nan return lagged_col start = time.time() df['shift_1'] = groupby_shift(df, 'item_cnt_month', 'id', 1) df['shift_2'] = groupby_shift(df, 'item_cnt_month', 'id', 2) df['shift_3'] = groupby_shift(df, 'item_cnt_month', 'id', 3) df['shift_6'] = groupby_shift(df, 'item_cnt_month', 'id', 6) df['shift_12'] = groupby_shift(df, 'item_cnt_month', 'id', 12) df['shift_3_roll_avg_3'] = df['shift_3'].rolling(3).mean().astype(np.float32) df['shift_3_roll_avg_6'] = df['shift_3'].rolling(6).mean().astype(np.float32) df['shift_12_roll_avg_6'] = df['shift_12'].rolling(6).mean().astype(np.float32) ''' df['mon_avg_item_cnt'] = groupby_shift(df, 'item_cnt_month', 'date_block_num', 1) df['mon_avg_item_cnt'] = df.groupby(['date_block_num'])['mon_avg_item_cnt'].transform(lambda x: x.mean()) group = ['date_block_num', 'item_id'] df['mon_item_avg_1'] = groupby_shift(df, 'item_cnt_month', group, 1) df['mon_item_avg_1'] = df.groupby(group)['mon_item_avg_1'].transform(lambda x: x.mean()) df['mon_item_avg_2'] = groupby_shift(df, 'item_cnt_month', group, 2) df['mon_item_avg_2'] = df.groupby(group)['mon_item_avg_2'].transform(lambda x: x.mean()) df['mon_item_avg_6'] = groupby_shift(df, 'item_cnt_month', group, 6) df['mon_item_avg_6'] = df.groupby(group)['mon_item_avg_6'].transform(lambda x: x.mean()) group = ['date_block_num', 'shop_id'] df['mon_shop_1'] = groupby_shift(df, 'item_cnt_month', group, 1) df['mon_shop_1'] = df.groupby(group)['mon_shop_1'].transform(lambda x: x.mean()) group = ['date_block_num', 'shop_id', 'item_category_id'] df['mon_shop_item_1'] = groupby_shift(df, 'item_cnt_month', group, 1) df['mon_shop_item_1'] = df.groupby(group)['mon_shop_item_1'].transform(lambda x: x.mean()) group = ['date_block_num', 'shop_id', 'subtype_code'] df['mon_shop_sub_1'] = groupby_shift(df, 'item_cnt_month', group, 1) df['mon_shop_sub_1'] = df.groupby(group)['mon_shop_sub_1'].transform(lambda x: x.mean()) group = ['date_block_num', 'city_code'] df['mon_city_avg_1'] = groupby_shift(df, 'item_cnt_month', group, 1) df['mon_city_avg_1'] = df.groupby(group)['mon_city_avg_1'].transform(lambda x: x.mean()) group = ['date_block_num', 'item_id', 'city_code'] df['mon_item_city_avg_1'] = groupby_shift(df, 'item_cnt_month', group, 1) df['mon_item_city_avg_1'] = df.groupby(group)['mon_item_city_avg_1'].transform(lambda x: x.mean()) group = ['date_block_num', 'type_code'] df['mon_type_avg_1'] = groupby_shift(df, 'item_cnt_month', group, 1) df['mon_type_avg_1'] = df.groupby(group)['mon_type_avg_1'].transform(lambda x: x.mean()) group = ['date_block_num', 'subtype_code'] df['mon_subtype_avg_1'] = groupby_shift(df, 'item_cnt_month', group, 1) df['mon_subtype_avg_1'] = df.groupby(group)['mon_subtype_avg_1'].transform(lambda x: x.mean()) ''' #df['shift6_rolling6_mean'] = df.groupby(['id'])['item_cnt_month'].transform(lambda x: x.shift(6).rolling(6).mean()) #df['shift2_rolling2_mean'] = df.groupby(['id'])['item_cnt_month'].transform(lambda x: x.shift(2).rolling(2).mean()) #df['shift3_rolling1_mean'] = df.groupby(['id'])['item_cnt_month'].transform(lambda x: x.shift(3).rolling(1).mean()) #df['shift3_rolling2_mean'] = df.groupby(['id'])['item_cnt_month'].transform(lambda x: x.shift(3).rolling(2).mean()) #df['shift6_rolling6_mean'] = df.groupby(['id'])['item_cnt_month'].transform(lambda x: x.shift(6).rolling(6).mean()) print('The program costs %.2f seconds'%(time.time() - start)) df ''' df['price_shift_1'] = groupby_shift(df, 'item_price', 'id', 1) df['price_shift_2'] = groupby_shift(df, 'item_price', 'id', 2) df['price_shift_3'] = groupby_shift(df, 'item_price', 'id', 3) df['price_shift_6'] = groupby_shift(df, 'item_price', 'id', 6) df['price_shift_12'] = groupby_shift(df, 'item_price', 'id', 12) ''' def recode_na(df, cols): '''recode na value by grouped average ''' for i in tqdm(cols): df[i] = df[i].transform(lambda x: x.fillna(x.median())) return df colz = ['shift_1', 'shift_3', 'shift_12', 'price_shift_1', 'price_shift_3', 'price_shift_6', 'price_shift_12', 'shift_3_roll_avg_3', 'shift_3_roll_avg_6', 'shift_12_roll_avg_6', 'mon_avg_item_cnt', 'mon_item_avg_1', 'mon_item_avg_2', 'mon_item_avg_6', 'mon_shop_1', 'mon_shop_item_1', 'mon_shop_sub_1', 'mon_city_avg_1', 'mon_item_city_avg_1', 'mon_type_avg_1', 'mon_subtype_avg_1'] df = recode_na(df, colz) df['month'] = df['date_block_num'] % 12 df = df[] df['id'].isna().value_counts() df[(== '59_22088') & (df['date_block_num'] > 11)] df.columns features = ['date_block_num', 'month', 'shop_id', 'item_id', 'city_code', 'item_category_id', 'type_code', 'subtype_code', 'shift_1', 'shift_3', 'shift_12', 'price_shift_1', 'price_shift_3', 'price_shift_6', 'price_shift_12', 'shift_3_roll_avg_3', 'shift_3_roll_avg_6', 'shift_12_roll_avg_6', 'mon_avg_item_cnt', 'mon_item_avg_1', 'mon_item_avg_2', 'mon_item_avg_6', 'mon_shop_1', 'mon_shop_item_1', 'mon_shop_sub_1', 'mon_city_avg_1', 'mon_item_city_avg_1', 'mon_type_avg_1', 'mon_subtype_avg_1'] cat_features = ['month', 'shop_id','item_id','city_code', 'item_category_id', 'type_code', 'subtype_code'] from catboost import CatBoostRegressor from sklearn import preprocessing, metrics from sklearn.model_selection import TimeSeriesSplit df.sort_values(['date_block_num','id'],inplace = True) #df #data = data.sort_values('date_block_num') x_train = df[df['date_block_num'] < 34] y_train = x_train['item_cnt_month'] test = df[df['date_block_num'] == 34] #need_to_remove = ['item_cnt_day','city_code','item_category_id', # 'type_code','subtype_code', 'shop_id', 'item_id', 'id'] #features = [i for i in list(df.columns) if i not in need_to_remove] #n_fold = 3 #3 for timely purpose of the kernel folds = TimeSeriesSplit(n_splits = 3) # use TimeSeriesSplit cv splits = folds.split(x_train, y_train) val_pred = np.zeros(len(x_train)) test_pred = np.zeros(len(test)) for fold, (trn_idx, val_idx) in enumerate(splits): print(f'Training fold {fold + 1}') train_set = x_train.iloc[trn_idx][features] y_tra = y_train.iloc[trn_idx] val_set = x_train.iloc[val_idx][features] y_val = y_train.iloc[val_idx] model = CatBoostRegressor(iterations = 500, learning_rate = 0.05, depth = 6, eval_metric = 'RMSE', random_seed = 42, bagging_temperature = 0.2, od_type = 'Iter', metric_period = 50, od_wait = 20) model.fit(train_set, y_tra, eval_set = (val_set, y_val), use_best_model = True, cat_features = cat_features, verbose = 50) val_pred[val_idx] = model.predict(x_train.iloc[val_idx][features]) # prediction test_pred += model.predict(test[features]) / 3 # calculate mean prediction value of 3 models print('-' * 50) print('\n') val_rmse = np.sqrt(metrics.mean_squared_error(y_train, val_pred)) print('Our out of folds rmse is {:.4f}'.format(val_rmse)) len(val_pred) len(test_pred) pd.Series(test_pred.clip(0,20)).describe() test_indicate test_indicate['item_cnt_month'] = test_pred.clip(0,20) #test_indicate.to_csv('xgb_submission.csv', index=False) test_indicate submission = pd.DataFrame({'ID': test_indicate.index,'item_cnt_month': test_pred.clip(0,20)}) submission submission.to_csv('../output/cat_submission.csv', index=False) submission def run_lgb(data): '''cross validation ''' start = time.time() data = data.sort_values('date_block_num') x_train = data[data['date_block_num'] <= 27 y_train = x_train['item_cnt_day'] test = data[(data['date_block_num'] > 27) del data gc.collect() params = { 'boosting_type': 'gbdt', 'metric': 'rmse', 'objective': 'poisson', # loss function 'seed': 225, 'learning_rate': 0.02, 'lambda': 0.4, # l2 regularization 'reg_alpha': 0.4, # l1 regularization 'max_depth': 5, # max depth of decision trees 'num_leaves': 64, # number of leaves 'bagging_fraction': 0.7, # bootstrap sampling 'bagging_freq' : 1, 'colsample_bytree': 0.7 # feature sampling } oof = np.zeros(len(x_train)) preds = np.zeros(len(test)) n_fold = 3 #3 for timely purpose of the kernel folds = TimeSeriesSplit(n_splits=n_fold) # use TimeSeriesSplit cv splits = folds.split(x_train, y_train) #feature_importances = pd.DataFrame() #feature_importances['feature'] = features for fold, (trn_idx, val_idx) in enumerate(splits): print(f'Training fold {fold + 1}') train_set = lgb.Dataset(x_train.iloc[trn_idx][features], y_train.iloc[trn_idx], categorical_feature = cat) val_set = lgb.Dataset(x_train.iloc[val_idx][features], y_train.iloc[val_idx], categorical_feature = cat) model = lgb.train(params, train_set, num_boost_round = 2400, early_stopping_rounds = 50, valid_sets = [val_set], verbose_eval = 50) #lgb.plot_importance(model, importance_type = 'gain', precision = 0, # height = 0.5, figsize = (6, 10), title = '') #feature_importances[f'fold_{fold + 1}'] = model.feature_importance() oof[val_idx] = model.predict(x_train.iloc[val_idx][features]) # prediction preds += model.predict(test[features]) / 3 # calculate mean prediction value of 3 models print('-' * 50) print('\n') model.save_model('model.lgb') # save model del x_train print('3 folds cross-validation costs %7.2f seconds'%(time.time() - start)) oof_rmse = np.sqrt(metrics.mean_squared_error(y_train, oof)) print(f'Our out of folds rmse is {oof_rmse}') del y_train test = test[['id', 'date', 'demand']] test['demand'] = preds gc.collect() return test df[(df['shop_id'] == 59) & (df['item_id'] == 126)]```Index 67512848date 67512848date_block_num 8439106shop_id 8439106item_id 16878212item_price 33756424item_cnt_day 8439106city_code 8439106item_category_id 8439106type_code 8439106subtype_code 8439106lag_1 67512848dtype: int64```的 df['lag_t1'] = df.groupby(['shop_id','item_id'])['item_cnt_day'].transform(lambda x: x.shift(30)) date = df.groupby(['shop_id','item_id'])['date'] date = pd.DataFrame(date) date df['lag_t1_rolling'] = df.groupby(['shop_id','item_id'])['item_cnt_day'].transform(lambda x: x.shift(30).rolling(30).mean()) df['lag_3'].isna().value_counts() df['lag_t7'] = df.groupby(['shop_id', 'item_id'])['item_cnt_day'].transform(lambda x: x.shift(7)) df df['lag_t7'].isna().value_counts()测试集是34个月内某些商店和某些物品的乘积。 有5100个商品 * 42个商店 = 214200对。 与训练集相比,有363件新商品。 因此,对于测试集中的大多数项目,目标值应为零。 另一方面,训练集仅包含过去出售或退回的货币对。 主要思想是计算月销售额,并在一个月内将每个唯一对的零销售额扩展为零。 这样,训练数据将类似于测试数据。df data = pd.read_pickle('../data/data.pkl') data data.columns features = [ 'date_block_num', 'shop_id', 'item_id', #'item_cnt_month', 'city_code', 'item_category_id', 'type_code', 'subtype_code', 'item_cnt_month_lag_1', 'item_cnt_month_lag_2', 'item_cnt_month_lag_3', 'item_cnt_month_lag_6', 'item_cnt_month_lag_12', 'date_avg_item_cnt_lag_1', 'date_item_avg_item_cnt_lag_1', 'date_item_avg_item_cnt_lag_2', 'date_item_avg_item_cnt_lag_3', 'date_item_avg_item_cnt_lag_6', 'date_item_avg_item_cnt_lag_12', #'date_shop_avg_item_cnt_lag_1', #'date_shop_avg_item_cnt_lag_2', #'date_shop_avg_item_cnt_lag_3', #'date_shop_avg_item_cnt_lag_6', #'date_shop_avg_item_cnt_lag_12', #'date_cat_avg_item_cnt_lag_1', #'date_shop_cat_avg_item_cnt_lag_1', #'date_shop_type_avg_item_cnt_lag_1', #'date_shop_subtype_avg_item_cnt_lag_1', #'date_city_avg_item_cnt_lag_1', #'date_item_city_avg_item_cnt_lag_1', #'date_type_avg_item_cnt_lag_1', #'date_subtype_avg_item_cnt_lag_1', #'delta_price_lag', 'month', #'days', #'item_shop_last_sale', #'item_last_sale', #'item_shop_first_sale', #'item_first_sale', ] cat_features = ['date_block_num', 'month', 'shop_id', 'item_id', 'city_code', 'item_category_id', 'type_code', 'subtype_code'] #data data['id'] = data['shop_id'].astype(str) + '_' + test_indicate['item_id'].astype(str) data.sort_values(['date_block_num','id'],inplace = True) x_train = data[data['date_block_num'] < 34] y_train = x_train['item_cnt_month'].astype(np.float32) test = data[data['date_block_num'] == 34] #need_to_remove = ['item_cnt_day','city_code','item_category_id', # 'type_code','subtype_code', 'shop_id', 'item_id', 'id'] #features = [i for i in list(df.columns) if i not in need_to_remove] #n_fold = 3 #3 for timely purpose of the kernel folds = TimeSeriesSplit(n_splits = 3) # use TimeSeriesSplit cv splits = folds.split(x_train, y_train) val_pred = np.zeros(len(x_train)) test_pred = np.zeros(len(test)) for fold, (trn_idx, val_idx) in enumerate(splits): print(f'Training fold {fold + 1}') train_set = x_train.iloc[trn_idx][features] y_tra = y_train.iloc[trn_idx] val_set = x_train.iloc[val_idx][features] y_val = y_train.iloc[val_idx] model = CatBoostRegressor(iterations = 500, learning_rate = 0.05, depth = 6, eval_metric = 'RMSE', random_seed = 42, bagging_temperature = 0.2, od_type = 'Iter', metric_period = 50, od_wait = 20) model.fit(train_set, y_tra, eval_set = (val_set, y_val), use_best_model = True, cat_features = cat_features, verbose = 50) val_pred[val_idx] = model.predict(x_train.iloc[val_idx][features]) # prediction test_pred += model.predict(test[features]) / 3 # calculate mean prediction value of 3 models print('-' * 50) print('\n') val_rmse = np.sqrt(metrics.mean_squared_error(y_train, val_pred)) print('Our out of folds rmse is {:.4f}'.format(val_rmse)) from itertools import product product[1]Simple Evolutionary Exploration WalkthroughThis notebook contains instructions on how to use the SEE module, along with several examples. These instructions will cover the following parts: * [Import Image Files](Import_Image_Files)* [Manual Search](Manual_Search)* [Genetic Algorithm Search](Genetic_Algorithm_Search)* [Reading the Results](Reading_the_Results) ---- Import Image Files First import the following packages:import matplotlib.pylab as plt %matplotlib inline import imageioNext, read in the image to be segmented, and the ground truth segmentation mask of the image:img = imageio.imread('Image_data/KOMATSUNA/rgbd_plant/rgb_04_009_05.png') gmask = imageio.imread('Image_data/KOMATSUNA/rgbd_label/label_04_009_05.png')The ground truth mask should only contain one channel, and should only contain as many unique values as there are segments. If these conditions are not met, consider relabeling the image, converting the image to grayscale, or indexing to only use one channel (see below).import numpy as np gmask = np.sum(gmask, axis=2) > 0The following can now be used to see the imported images:plt.figure(figsize= (10, 5)) plt.subplot(121) plt.imshow(img) plt.title("Original Image") plt.axis("off") plt.subplot(122) plt.imshow(gmask) plt.title("Ground Truth Segmentation Mask") plt.axis("off") plt.tight_layout plt.show()---- Manual Search First import image files, as well as the following packages:from see import JupyterGUI, SegmentorsManual searching of parameters can easily be done using the provided GUI. Pre-established parameters can be put into the widget, or the parameter values can be changed using the sliders. To change the algorithm, simply change the `alg` input. For a list of available inputs print `Segmentors.algorithmspace`Segmentors.algorithmspace ### Example of input for params # params = ['FB', 7563, 0.13, 2060, 0.01, 4342, 850, 10, 0.57, 1863, 1543, 134, 3, 1, 0.35, (1, 1), 8.1, 'checkerboard', 'checkerboard', 3, 7625, -35, 0.0, 0.0, 0.0] JupyterGUI.segmentwidget(img, gmask, params = None, alg = 'CT')---- Genetic Algorithm Search First import image files, as well as the following packages:from see import GeneticSearch, SegmentorsTo run the genetic algorithm, we need to initialize an instance of an evolver. The original image and ground truth segmentation image are inputs to it, along with an integer value for population size. This value sets how many indivudals are in our population. For this example, we'll set this number to be equal to 10.my_evolver = GeneticSearch.Evolver(img, gmask, pop_size=10)Now that the evolver has been initialized, we can run the genetic algorithm for a specified number of generations (or iterations). Here we will set this number equal to 5.# warnings may appear when this runs population = my_evolver.run(ngen=5)generation 1 of population size 10---- Reading the Results After the genetic algorithm is complete, we can retrieve the individuals that resulted in the lowest (best) fitness values by printing `my_evolver.hof`. These individuals are sorted according to fitness value, so to get the overal best individual, we can simply look at the first individual in the list.print('Best Individual:\n', my_evolver.hof[0])Best Individual: ['FB', 8635, 0.363, 3796, 0.07, 6866, 1544, 10, 0.81, 2559, 3929, 4, 8, 0.01, 0.34, (1, 1), 4.5, 'checkerboard', 'checkerboard', 5, 889, 39, 0.0, 0.0, 0.0]We can see the mask this individual generates by evaluating it, then plotting the result:seg = Segmentors.algoFromParams(my_evolver.hof[0]) mask = seg.evaluate(img) plt.figure(figsize=(10, 5)) plt.subplot(121) plt.imshow(img) plt.title("Original Image") plt.axis('off') plt.subplot(122) plt.imshow(mask) plt.title("Segmentation") plt.axis('off') plt.tight_layout plt.show()We can also use `FitnessFunction` to calculate the final fitness value for this algorithm:print('Fitness Value: ', Segmentors.FitnessFunction(mask, gmask)[0])Fitness Value: 0.026894541600423955If this value is satisfactory, we can then get usable code to run this algorithm anywhere, including outside this notebook. The `print_best_algorithm_code` function does this using the given individual:GeneticSearch.print_best_algorithm_code(my_evolver.hof[0])multichannel = False if len(img.shape) > 2: multichannel = True output = skimage.segmentation.felzenszwalb( img, 3796, 0.07, 6866, multichannel=multichannel, )DREAM5 - SGBM, denormalized variable importanceimport os import sys sys.path.append('../../') from arboreto.core import * from arboreto.utils import * import matplotlib.pyplot as pltData pathswd = os.getcwd().split('arboreto')[0] + 'arboreto/resources/dream5/' net1_ex_path = wd + 'net1/net1_expression_data.tsv' net1_tf_path = wd + 'net1/net1_transcription_factors.tsv' net3_ex_path = wd + 'net3/net3_expression_data.tsv' net3_tf_path = wd + 'net3/net3_transcription_factors.tsv' net4_ex_path = wd + 'net4/net4_expression_data.tsv' net4_tf_path = wd + 'net4/net4_transcription_factors.tsv'Dask clientfrom dask.distributed import Client, LocalCluster client = Client(LocalCluster()) clientUse port forwarding to view the dashboard `$ ssh -L 8000:localhost:8787 nostromo`client.shutdown()Infer networks (with meta)def infer_network_and_meta(ex_path, tf_path, client): ex_matrix = load_expression_matrix(ex_path) gene_names = load_gene_names(ex_path) tf_names = load_tf_names(tf_path, gene_names) network_graph, meta_graph = create_graph(ex_matrix, gene_names, tf_names, "GBM", SGBM_KWARGS, 'all', # [200, 201, 202, 203, 204], early_stop_window_length=25, include_meta=True) result = client.compute([network_graph, meta_graph], sync=True) network_df = result[0] meta_df = result[1] return network_df, meta_df %%time net1, meta1 = infer_network_and_meta(net1_ex_path, net1_tf_path, client) len(net1) meta1.head() meta1.hist(bins=50) plt.show()Correlate `n_estimators` with max `importance`# net1['max_importance'] = net1.groupby(['target'])['importance'].transform(max) # max_by_target_df = net1.groupby(['target'])['importance'].max().reset_index() # top_3_by_target_df = net1.groupby(['target'])['importance'].nlargest(3).reset_index() # net1.groupby(['target'])['importance'].nlargest(5).reset_index() # net1_max_importance_by_n_rounds = meta1.merge(net1[['target', 'max_importance']].drop_duplicates(), how='left', on=['target']) # net1_max_importance_by_n_rounds = meta1.merge(max_by_target_df, how='left', on=['target']) n = 1 meta1.merge(net1.groupby(['target'])['importance'].nlargest(n).reset_index(), how='left', on=['target']).plot.scatter(x='n_estimators', y='importance', figsize=(16, 9)) plt.show() # cmap values: jet, coolwarm # https://matplotlib.org/examples/color/colormaps_reference.html n = 10 meta1.merge(net1.groupby(['target'])['importance'].nlargest(n).reset_index(), how='left', on=['target']).plot.hexbin(x='n_estimators', y='importance', bins='log', cmap=plt.get_cmap("inferno"), figsize=(16, 9)) plt.show()DAFUQ?* what the hell does this mean? --> probably that the normalization doesn't make a lot of sensenet1_max_importance_by_n_rounds.plot.scatter(x='n_estimators', y='max_importance', figsize=(10, 7)) plt.show() net1_max_importance_by_n_rounds.plot.scatter(x='value', y='max_importance', figsize=(10, 7)) plt.show() meta1.hist(bins=50, figsize=(10,7)) plt.show()#bnsreenu(2021) Source Code: python_for_microscopists, 090a-autoencoder_colorize_V0.2.py #Available at: https://github.com/bnsreenu/python_for_microscopists/blob/master/090a-autoencoder_colorize_V0.2.py #Last Accessed: 29 April 2021 #Importing Libraries from keras.layers import Conv2D, UpSampling2D from keras.models import Sequential from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img from skimage.color import rgb2lab, lab2rgb from skimage.transform import resize from skimage.io import imsave import numpy as np import tensorflow as tf #Mount google drive from google.colab import drive drive.mount('/content/drive') root_path = 'gdrive/My Drive/your_project_folder/' #change dir to your project folder #Upload kaggle.json file (Config file obtained by: #To use the Kaggle API, sign up for a Kaggle account at https://www.kaggle.com. #Then go to the 'Account' tab of your user profile (https://www.kaggle.com//account) and select 'Create API Token'. #This will trigger the download of kaggle.json, a file containing your API credentials.) #https://github.com/Kaggle/kaggle-api from google.colab import files files.upload() #this will prompt you to upload the kaggle.json #Installs kaggle API from json !pip install -q kaggle !mkdir -p ~/.kaggle !cp kaggle.json ~/.kaggle/ !ls ~/.kaggle !chmod 600 /root/.kaggle/kaggle.json # set permission #download natural dataset # (2018) Natural Images. Kaggle Dataset. Available at: https://www.kaggle.com/prasunroy/natural-images. Last Accessed: 03 May 2021 !kaggle datasets download -d prasunroy/natural-images -p /content/gdrive/My\ Drive/kaggle/natural #Creating a folder for dataset to unzip to import os os.chdir('/content/gdrive/My Drive/kaggle/natural') #change dir !unzip -q natural-images.zip -d natural-images/ #unzip data in natural-images/ #folder for all images if not os.path.exists('/content/gdrive/My Drive/kaggle/natural/natural-images/allImages'): os.makedirs('/content/gdrive/My Drive/kaggle/natural/natural-images/allImages') #put all images into one folder import shutil # -------------------------------------------------------- reorg_dir = "/content/gdrive/My Drive/kaggle/natural/natural-images/natural_images" target_dir = "/content/gdrive/My Drive/kaggle/natural/natural-images/allImages" # --------------------------------------------------------- for root, dirs, files in os.walk(reorg_dir): for name in files: subject = root+"/"+name n = 1; name_orig = name while os.path.exists(target_dir+"/"+name): name = "duplicate_"+str(n)+"_"+name_orig; n = n+1 newfile = target_dir+"/"+name; shutil.copy(subject, newfile) #shuffling image names import os from random import shuffle dirname = r'/content/gdrive/My Drive/kaggle/natural/natural-images/allImages' paths = [ os.path.join(root, filename) for root, dirs, files in os.walk(dirname) for filename in files if filename.endswith('.jpg') ] shuffle(paths) randomData= paths[:6890] #creating directory to contain shuffled images (train_datagen works with a nested directory) if not os.path.exists('/content/gdrive/My Drive/kaggle/natural/natural-images/allImagesProcessed'): os.makedirs('/content/gdrive/My Drive/kaggle/natural/natural-images/allImagesProcessed') if not os.path.exists('/content/gdrive/My Drive/kaggle/natural/natural-images/allImagesProcessed/resized'): os.makedirs('/content/gdrive/My Drive/kaggle/natural/natural-images/allImagesProcessed/resized') #save shuffled images to folder from PIL import Image count = 0 for imagename in randomData: image = Image.open(randomData[count]) image.save('/content/gdrive/My Drive/kaggle/natural/natural-images/allImagesProcessed/resized/image_'+str(count)+'.jpg') count+=1 path = '/content/gdrive/My Drive/kaggle/natural/natural-images/allImagesProcessed/' #Normalize images - divide by 255 train_datagen = ImageDataGenerator(rescale=1. / 255) #Resize images, if needed train = train_datagen.flow_from_directory(path, target_size=(256, 256), batch_size=340, class_mode=None) #convert images to greyscale for training and splitting into training and validation sets X =[] Y =[] for img in train[0]: try: lab = rgb2lab(img) X.append(lab[:,:,0]) Y.append(lab[:,:,1:] / 128) #A and B values range from -127 to 128, #so we divide the values by 128 to restrict values to between -1 and 1. except: print('error') X = np.array(X) Y = np.array(Y) X = X.reshape(X.shape+(1,)) #dimensions to be the same for X and Y print(X.shape) print(Y.shape) #model definitions #Encoder model = Sequential() model.add(Conv2D(64, (3, 3), activation='relu', padding='same', strides=2, input_shape=(256, 256, 1))) model.add(Conv2D(128, (3, 3), activation='relu', padding='same')) model.add(Conv2D(128, (3,3), activation='relu', padding='same', strides=2)) model.add(Conv2D(256, (3,3), activation='relu', padding='same')) model.add(Conv2D(256, (3,3), activation='relu', padding='same', strides=2)) model.add(Conv2D(512, (3,3), activation='relu', padding='same')) model.add(Conv2D(512, (3,3), activation='relu', padding='same')) model.add(Conv2D(256, (3,3), activation='relu', padding='same')) #Decoder #Decoder #Note: For the last layer we use tanh instead of Relu. #This is because we are colorizing the image in this layer using 2 filters, A and B. #A and B values range between -1 and 1 so tanh (or hyperbolic tangent) is used #as it also has the range between -1 and 1. #Other functions go from 0 to 1. model.add(Conv2D(128, (3,3), activation='relu', padding='same')) model.add(UpSampling2D((2, 2))) model.add(Conv2D(64, (3,3), activation='relu', padding='same')) model.add(UpSampling2D((2, 2))) model.add(Conv2D(32, (3,3), activation='relu', padding='same')) model.add(Conv2D(16, (3,3), activation='relu', padding='same')) model.add(Conv2D(2, (3, 3), activation='tanh', padding='same')) model.add(UpSampling2D((2, 2))) model.compile(optimizer='adam', loss='mse' , metrics=['accuracy']) model.summary() #create folder for saving model files if not os.path.exists('/content/gdrive/My Drive/kaggle/natural/models'): os.makedirs('/content/gdrive/My Drive/kaggle/natural/models') #train model model.fit(X,Y,validation_split=0.3, epochs=350, batch_size=48) #save model model_json = model.to_json() with open('/content/gdrive/My Drive/kaggle/natural/models/model.json', "w") as json_file: json_file.write(model_json) model.save_weights("/content/gdrive/My Drive/kaggle/natural/models/model.h5") model.save('/content/gdrive/My Drive/kaggle/natural/models/colorize_autoencoder.model') #test model tf.keras.models.load_model( '/content/gdrive/My Drive/kaggle/natural/models/colorize_autoencoder.model', custom_objects=None, compile=True) img1_color=[] img1=img_to_array(load_img("/content/gdrive/My Drive/kaggle/natural/natural-images/allImages/car_0186.jpg")) img1 = resize(img1 ,(256,256)) imsave("previous.png", img1) #saves original image img1_color.append(img1) img1_color = np.array(img1_color, dtype=float) img1_color = rgb2lab(1.0/255*img1_color)[:,:,:,0] img1_color = img1_color.reshape(img1_color.shape+(1,)) output1 = model.predict(img1_color) output1 = output1*128 result = np.zeros((256, 256, 3)) result[:,:,0] = img1_color[0][:,:,0] result[:,:,1:] = output1[0] imsave("result.png", lab2rgb(result)) #saves colourised test imageWARNING:root:Lossy conversion from float32 to uint8. Range [4.4609375, 254.90478515625]. Convert image to uint8 prior to saving to suppress this warning. WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.Quiz 1import numpy as np # Subway ridership for 5 stations on 10 different days ridership = np.array([ [ 0, 0, 2, 5, 0], [1478, 3877, 3674, 2328, 2539], [1613, 4088, 3991, 6461, 2691], [1560, 3392, 3826, 4787, 2613], [1608, 4802, 3932, 4477, 2705], [1576, 3933, 3909, 4979, 2685], [ 95, 229, 255, 496, 201], [ 2, 0, 1, 27, 0], [1438, 3785, 3589, 4174, 2215], [1342, 4043, 4009, 4665, 3033] ]) # Change False to True for each block of code to see what it does # Accessing elements if False: print ridership[1, 3] print ridership[1:3, 3:5] print ridership[1, :] # Vectorized operations on rows or columns if False: print ridership[0, :] + ridership[1, :] print ridership[:, 0] + ridership[:, 1] # Vectorized operations on entire arrays if False: a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) b = np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]]) print a + b def mean_riders_for_max_station(ridership): ''' Fill in this function to find the station with the maximum riders on the first day, then return the mean riders per day for that station. Also return the mean ridership overall for comparsion. Hint: NumPy's argmax() function might be useful: http://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html ''' max_col = ridership[0,:].argmax() mean_for_max = ridership[:,max_col].mean() overall_mean = ridership.mean() return (overall_mean, mean_for_max) mean_riders_for_max_station(ridership)Quiz 2import numpy as np # Change False to True for this block of code to see what it does # NumPy axis argument if False: a = np.array([ [1, 2, 3], [4, 5, 6], [7, 8, 9] ]) print a.sum() print a.sum(axis=0) print a.sum(axis=1) # Subway ridership for 5 stations on 10 different days ridership = np.array([ [ 0, 0, 2, 5, 0], [1478, 3877, 3674, 2328, 2539], [1613, 4088, 3991, 6461, 2691], [1560, 3392, 3826, 4787, 2613], [1608, 4802, 3932, 4477, 2705], [1576, 3933, 3909, 4979, 2685], [ 95, 229, 255, 496, 201], [ 2, 0, 1, 27, 0], [1438, 3785, 3589, 4174, 2215], [1342, 4043, 4009, 4665, 3033] ]) def min_and_max_riders_per_day(ridership): ''' Fill in this function. First, for each subway station, calculate the mean ridership per day. Then, out of all the subway stations, return the maximum and minimum of these values. That is, find the maximum mean-ridership-per-day and the minimum mean-ridership-per-day for any subway station. ''' mean_per_station = ridership.mean(axis = 0) # aggregate along columns max_daily_ridership = mean_per_station.max() # Replace this with your code min_daily_ridership = mean_per_station.min() # Replace this with your code return (max_daily_ridership, min_daily_ridership) min_and_max_riders_per_day(ridership)Quiz 3import pandas as pd # Subway ridership for 5 stations on 10 different days ridership_df = pd.DataFrame( data=[[ 0, 0, 2, 5, 0], [1478, 3877, 3674, 2328, 2539], [1613, 4088, 3991, 6461, 2691], [1560, 3392, 3826, 4787, 2613], [1608, 4802, 3932, 4477, 2705], [1576, 3933, 3909, 4979, 2685], [ 95, 229, 255, 496, 201], [ 2, 0, 1, 27, 0], [1438, 3785, 3589, 4174, 2215], [1342, 4043, 4009, 4665, 3033]], index=['05-01-11', '05-02-11', '05-03-11', '05-04-11', '05-05-11', '05-06-11', '05-07-11', '05-08-11', '05-09-11', '05-10-11'], columns=['R003', 'R004', 'R005', 'R006', 'R007'] ) # Change False to True for each block of code to see what it does # DataFrame creation if False: # You can create a DataFrame out of a dictionary mapping column names to values df_1 = pd.DataFrame({'A': [0, 1, 2], 'B': [3, 4, 5]}) print df_1 # You can also use a list of lists or a 2D NumPy array df_2 = pd.DataFrame([[0, 1, 2], [3, 4, 5]], columns=['A', 'B', 'C']) print df_2 # Accessing elements if False: print ridership_df.iloc[0] print ridership_df.loc['05-05-11'] print ridership_df['R003'] print ridership_df.iloc[1, 3] # Accessing multiple rows if False: print ridership_df.iloc[1:4] # Accessing multiple columns if False: print ridership_df[['R003', 'R005']] # Pandas axis if False: df = pd.DataFrame({'A': [0, 1, 2], 'B': [3, 4, 5]}) print df.sum() print df.sum(axis=1) print df.values.sum() def mean_riders_for_max_station(ridership): ''' Fill in this function to find the station with the maximum riders on the first day, then return the mean riders per day for that station. Also return the mean ridership overall for comparsion. This is the same as a previous exercise, but this time the input is a Pandas DataFrame rather than a 2D NumPy array. ''' max_col_idx = ridership.iloc[0].argmax() overall_mean = ridership.values.mean() # Replace this with your code mean_for_max = ridership[max_col_idx].mean() # Replace this with your code return (overall_mean, mean_for_max) mean_riders_for_max_station(ridership_df)Quiz 4import pandas as pd filename = 'nyc_subway_weather.csv' subway_df = pd.read_csv(filename) def correlation(x, y): ''' Fill in this function to compute the correlation between the two input variables. Each input is either a NumPy array or a Pandas Series. correlation = average of (x in standard units) times (y in standard units) Remember to pass the argument "ddof=0" to the Pandas std() function! ''' x_stddev = x.std(ddof = 0) y_stddev = y.std(ddof = 0) x_mean = x.mean() y_mean = y.mean() x_std = (x - x_mean) / x_stddev y_std = (y - y_mean) / y_stddev return (x_std * y_std).mean() entries = subway_df['ENTRIESn_hourly'] cum_entries = subway_df['ENTRIESn'] rain = subway_df['meanprecipi'] temp = subway_df['meantempi'] print correlation(entries, rain) print correlation(entries, temp) print correlation(rain, temp) print correlation(entries, cum_entries)0.0356485157722 -0.0266933483216 -0.229034323408 0.585895470766Quiz 5import pandas as pd # Examples of vectorized operations on DataFrames: # Change False to True for each block of code to see what it does # Adding DataFrames with the column names if True: df1 = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]}) df2 = pd.DataFrame({'a': [10, 20, 30], 'b': [40, 50, 60], 'c': [70, 80, 90]}) print df1 + df2 # Adding DataFrames with overlapping column names if True: df1 = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]}) df2 = pd.DataFrame({'d': [10, 20, 30], 'c': [40, 50, 60], 'b': [70, 80, 90]}) print df1 + df2 # Adding DataFrames with overlapping row indexes if True: df1 = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]}, index=['row1', 'row2', 'row3']) df2 = pd.DataFrame({'a': [10, 20, 30], 'b': [40, 50, 60], 'c': [70, 80, 90]}, index=['row4', 'row3', 'row2']) print df1 + df2 # --- Quiz --- # Cumulative entries and exits for one station for a few hours. entries_and_exits = pd.DataFrame({ 'ENTRIESn': [3144312, 3144335, 3144353, 3144424, 3144594, 3144808, 3144895, 3144905, 3144941, 3145094], 'EXITSn': [1088151, 1088159, 1088177, 1088231, 1088275, 1088317, 1088328, 1088331, 1088420, 1088753] }) def get_hourly_entries_and_exits(entries_and_exits): ''' Fill in this function to take a DataFrame with cumulative entries and exits (entries in the first column, exits in the second) and return a DataFrame with hourly entries and exits (entries in the first column, exits in the second). ''' shifted = entries_and_exits.shift(1) return (entries_and_exits - shifted) get_hourly_entries_and_exits(entries_and_exits) entries_and_exits.diff()a b c 0 11 44 77 1 22 55 88 2 33 66 99 a b c d 0 NaN 74 47 NaN 1 NaN 85 58 NaN 2 NaN 96 69 NaN a b c row1 NaN NaN NaN row2 32.0 65.0 98.0 row3 23.0 56.0 89.0 row4 NaN NaN NaNQuiz 6import pandas as pd # Change False to True for this block of code to see what it does # DataFrame applymap() if False: df = pd.DataFrame({ 'a': [1, 2, 3], 'b': [10, 20, 30], 'c': [5, 10, 15] }) def add_one(x): return x + 1 print df.applymap(add_one) grades_df = pd.DataFrame( data={'exam1': [43, 81, 78, 75, 89, 70, 91, 65, 98, 87], 'exam2': [24, 63, 56, 56, 67, 51, 79, 46, 72, 60]}, index=['Andre', 'Barry', 'Chris', 'Dan', 'Emilio', 'Fred', 'Greta', 'Humbert', 'Ivan', 'James'] ) grade_table = { 10: 'A', 9: 'A', 8: 'B', 7: 'C', 6: 'D' } def convert_grade(grade): numeric_grade = int(grade) / 10 return grade_table.get(numeric_grade, 'F') def convert_grades(grades): ''' Fill in this function to convert the given DataFrame of numerical grades to letter grades. Return a new DataFrame with the converted grade. The conversion rule is: 90-100 -> A 80-89 -> B 70-79 -> C 60-69 -> D 0-59 -> F ''' return grades.applymap(convert_grade) convert_grades(grades_df)Quiz 7import pandas as pd grades_df = pd.DataFrame( data={'exam1': [43, 81, 78, 75, 89, 70, 91, 65, 98, 87], 'exam2': [24, 63, 56, 56, 67, 51, 79, 46, 72, 60]}, index=['Andre', 'Barry', 'Chris', 'Dan', 'Emilio', 'Fred', 'Greta', 'Humbert', 'Ivan', 'James'] ) # Change False to True for this block of code to see what it does # DataFrame apply() if True: def convert_grades_curve(exam_grades): # Pandas has a bult-in function that will perform this calculation # This will give the bottom 0% to 10% of students the grade 'F', # 10% to 20% the grade 'D', and so on. You can read more about # the qcut() function here: # http://pandas.pydata.org/pandas-docs/stable/generated/pandas.qcut.html return pd.qcut(exam_grades, [0, 0.1, 0.2, 0.5, 0.8, 1], labels=['F', 'D', 'C', 'B', 'A']) # qcut() operates on a list, array, or Series. This is the # result of running the function on a single column of the # DataFrame. print convert_grades_curve(grades_df['exam1']) # qcut() does not work on DataFrames, but we can use apply() # to call the function on each column separately print grades_df.apply(convert_grades_curve) def standardize(df): ''' Fill in this function to standardize each column of the given DataFrame. To standardize a variable, convert each value to the number of standard deviations it is above or below the mean. ''' def standardize_column(column): mean = column.mean() std_dev = column.std(ddof = 0) return (column - mean) / std_dev return df.apply(standardize_column) standardize(grades_df)Andre F Barry B Chris C Dan C Emilio B Fred C Greta A Humbert D Ivan A James B Name: exam1, dtype: category Categories (5, object): [F < D < C < B < A] exam1 exam2 Andre F F Barry B B Chris C C Dan C C Emilio B B Fred C C Greta A A Humbert D D Ivan A A James B BQuiz 8import numpy as np import pandas as pd df = pd.DataFrame({ 'a': [4, 5, 3, 1, 2], 'b': [20, 10, 40, 50, 30], 'c': [25, 20, 5, 15, 10] }) # Change False to True for this block of code to see what it does # DataFrame apply() - use case 2 if False: print df.apply(np.mean) print df.apply(np.max) def second_largest(df): ''' Fill in this function to return the second-largest value of each column of the input DataFrame. ''' def second_largest_of_column(column): max = None max_idx = None second_max = None second_max_idx = None for i in range(len(column)): value = column[i] if max == None: max = value max_idx = i elif value > max: second_max = max second_max_idx = max_idx max = value max_idx = i elif value > second_max: second_max = value second_max_idx = i return second_max return df.apply(second_largest_of_column) second_largest(df)Quiz 9import pandas as pd # Change False to True for each block of code to see what it does # Adding a Series to a square DataFrame if False: s = pd.Series([1, 2, 3, 4]) df = pd.DataFrame({ 0: [10, 20, 30, 40], 1: [50, 60, 70, 80], 2: [90, 100, 110, 120], 3: [130, 140, 150, 160] }) print df print '' # Create a blank line between outputs print df + s # Adding a Series to a one-row DataFrame if False: s = pd.Series([1, 2, 3, 4]) df = pd.DataFrame({0: [10], 1: [20], 2: [30], 3: [40]}) print df print '' # Create a blank line between outputs print df + s # Adding a Series to a one-column DataFrame if False: s = pd.Series([1, 2, 3, 4]) df = pd.DataFrame({0: [10, 20, 30, 40]}) print df print '' # Create a blank line between outputs print df + s # Adding when DataFrame column names match Series index if False: s = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd']) df = pd.DataFrame({ 'a': [10, 20, 30, 40], 'b': [50, 60, 70, 80], 'c': [90, 100, 110, 120], 'd': [130, 140, 150, 160] }) print df print '' # Create a blank line between outputs print df + s # Adding when DataFrame column names don't match Series index if False: s = pd.Series([1, 2, 3, 4]) df = pd.DataFrame({ 'a': [10, 20, 30, 40], 'b': [50, 60, 70, 80], 'c': [90, 100, 110, 120], 'd': [130, 140, 150, 160] }) print df print '' # Create a blank line between outputs print df + sa b c d 0 10 50 90 130 1 20 60 100 140 2 30 70 110 150 3 40 80 120 160 0 1 2 3 a b c d 0 NaN NaN NaN NaN NaN NaN NaN NaN 1 NaN NaN NaN NaN NaN NaN NaN NaN 2 NaN NaN NaN NaN NaN NaN NaN NaN 3 NaN NaN NaN NaN NaN NaN NaN NaNQuiz 10import pandas as pd from IPython.display import display, HTML import numpy as np from collections import defaultdict import matplotlib.pyplot as plt import random import seaborn as sns # Adding using + if False: s = pd.Series([1, 2, 3, 4]) df = pd.DataFrame({ 0: [10, 20, 30, 40], 1: [50, 60, 70, 80], 2: [90, 100, 110, 120], 3: [130, 140, 150, 160] }) print df print '' # Create a blank line between outputs print df + s # Adding with axis='index' if False: s = pd.Series([1, 2, 3, 4]) df = pd.DataFrame({ 0: [10, 20, 30, 40], 1: [50, 60, 70, 80], 2: [90, 100, 110, 120], 3: [130, 140, 150, 160] }) print df print '' # Create a blank line between outputs print df.add(s, axis='index') # The functions sub(), mul(), and div() work similarly to add() # Adding with axis='columns' if False: s = pd.Series([1, 2, 3, 4]) df = pd.DataFrame({ 0: [10, 20, 30, 40], 1: [50, 60, 70, 80], 2: [90, 100, 110, 120], 3: [130, 140, 150, 160] }) print df print '' # Create a blank line between outputs print df.add(s, axis='columns') # The functions sub(), mul(), and div() work similarly to add() grades_df = pd.DataFrame( data={'exam1': [43, 81, 78, 75, 89, 70, 91, 65, 98, 87], 'exam2': [24, 63, 56, 56, 67, 51, 79, 46, 72, 60]}, index=['Andre', 'Barry', 'Chris', 'Dan', 'Emilio', 'Fred', 'Greta', 'Humbert', 'Ivan', 'James'] ) def standardize(df): ''' Fill in this function to standardize each column of the given DataFrame. To standardize a variable, convert each value to the number of standard deviations it is above or below the mean. This time, try to use vectorized operations instead of apply(). You should get the same results as you did before. ''' means = df.mean() std_devs = df.std(ddof = 0) return (df - means) / std_devs def standardize_rows(df): ''' Optional: Fill in this function to standardize each row of the given DataFrame. Again, try not to use apply(). This one is more challenging than standardizing each column! ''' means = df.mean(axis = 'columns') std_devs = df.std(ddof = 0, axis = 'columns') return df.subtract(means, axis = 'index').divide(std_devs, axis = 'index') standardize(grades_df) standardize_rows(grades_df)Quiz 11 groupbyimport matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns values = np.array([1, 3, 2, 4, 1, 6, 4]) example_df = pd.DataFrame({ 'value': values, 'even': values % 2 == 0, 'above_three': values > 3 }, index=['a', 'b', 'c', 'd', 'e', 'f', 'g']) # Change False to True for each block of code to see what it does # Examine DataFrame if False: print example_df # Examine groups if False: grouped_data = example_df.groupby('even') # The groups attribute is a dictionary mapping keys to lists of row indexes print grouped_data.groups # Group by multiple columns if False: grouped_data = example_df.groupby(['even', 'above_three']) print grouped_data.groups # Get sum of each group if False: grouped_data = example_df.groupby('even') print grouped_data.sum() # Limit columns in result if False: grouped_data = example_df.groupby('even') # You can take one or more columns from the result DataFrame print grouped_data.sum()['value'] print '\n' # Blank line to separate results # You can also take a subset of columns from the grouped data before # collapsing to a DataFrame. In this case, the result is the same. print grouped_data['value'].sum() filename = 'nyc_subway_weather.csv' subway_df = pd.read_csv(filename) ### Write code here to group the subway data by a variable of your choice, then ### either print out the mean ridership within each group or create a plot. max_days = subway_df.groupby(['DATEn','day_week'])[u'ENTRIESn_hourly'].max() print subway_df.head() print max_days %matplotlib inline max_days.plot()UNIT DATEn TIMEn ENTRIESn EXITSn ENTRIESn_hourly \ 0 R003 05-01-11 00:00:00 4388333 2911002 0.0 1 R003 05-01-11 04:00:00 4388333 2911002 0.0 2 R003 05-01-11 12:00:00 4388333 2911002 0.0 3 R003 05-01-11 16:00:00 4388333 2911002 0.0 4 R003 05-01-11 20:00:00 4388333 2911002 0.0 EXITSn_hourly datetime hour day_week ... pressurei \ 0 0.0 2011-05-01 00:00:00 0 6 ... 30.22 1 0.0 2011-05-01 04:00:00 4 6 ... 30.25 2 0.0 2011-05-01 12:00:00 12 6 ... 30.28 3 0.0 2011-05-01 16:00:00 16 6 ... 30.26 4 0.0 2011-05-01 20:00:00 20 6 ... 30.28 rain tempi wspdi meanprecipi meanpressurei meantempi meanwspdi \ 0 0 55.9 3.5 0.0 [...]Quiz 12import numpy as np import pandas as pd values = np.array([1, 3, 2, 4, 1, 6, 4]) example_df = pd.DataFrame({ 'value': values, 'even': values % 2 == 0, 'above_three': values > 3 }, index=['a', 'b', 'c', 'd', 'e', 'f', 'g']) # Change False to True for each block of code to see what it does # Standardize each group if False: def standardize(xs): return (xs - xs.mean()) / xs.std() grouped_data = example_df.groupby('even') print grouped_data['value'].apply(standardize) # Find second largest value in each group if False: def second_largest(xs): sorted_xs = xs.sort(inplace=False, ascending=False) return sorted_xs.iloc[1] grouped_data = example_df.groupby('even') print grouped_data['value'].apply(second_largest) # --- Quiz --- # DataFrame with cumulative entries and exits for multiple stations ridership_df = pd.DataFrame({ 'UNIT': ['R051', 'R079', 'R051', 'R079', 'R051', 'R079', 'R051', 'R079', 'R051'], 'TIMEn': ['00:00:00', '02:00:00', '04:00:00', '06:00:00', '08:00:00', '10:00:00', '12:00:00', '14:00:00', '16:00:00'], 'ENTRIESn': [3144312, 8936644, 3144335, 8936658, 3144353, 8936687, 3144424, 8936819, 3144594], 'EXITSn': [1088151, 13755385, 1088159, 13755393, 1088177, 13755598, 1088231, 13756191, 1088275] }) def test_item(entry): print entry return entry def get_hourly_entries_and_exits_inner(entries_and_exits): ''' Fill in this function to take a DataFrame with cumulative entries and exits (entries in the first column, exits in the second) and return a DataFrame with hourly entries and exits (entries in the first column, exits in the second). ''' shifted = entries_and_exits.shift(1) return entries_and_exits - shifted def calculate_hourly(entries_and_exits): entries_and_exits['ENTRIESn'] = get_hourly_entries_and_exits_inner(entries_and_exits['ENTRIESn']) entries_and_exits['EXITSn'] = get_hourly_entries_and_exits_inner(entries_and_exits['EXITSn']) return entries_and_exits def get_hourly_entries_and_exits(entries_and_exits): ''' Fill in this function to take a DataFrame with cumulative entries and exits and return a DataFrame with hourly entries and exits. The hourly entries and exits should be calculated separately for each station (the 'UNIT' column). Hint: Take a look at the `get_hourly_entries_and_exits()` function you wrote in a previous quiz, DataFrame Vectorized Operations. If you copy it here and rename it, you can use it and the `.apply()` function to help solve this problem. ''' return entries_and_exits.groupby('UNIT')[['ENTRIESn','EXITSn']].apply(calculate_hourly) get_hourly_entries_and_exits(ridership_df)Quiz 13 - merge subway and weather dataimport pandas as pd subway_df = pd.DataFrame({ 'UNIT': ['R003', 'R003', 'R003', 'R003', 'R003', 'R004', 'R004', 'R004', 'R004', 'R004'], 'DATEn': ['05-01-11', '05-02-11', '05-03-11', '05-04-11', '05-05-11', '05-01-11', '05-02-11', '05-03-11', '05-04-11', '05-05-11'], 'hour': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'ENTRIESn': [ 4388333, 4388348, 4389885, 4391507, 4393043, 14656120, 14656174, 14660126, 14664247, 14668301], 'EXITSn': [ 2911002, 2911036, 2912127, 2913223, 2914284, 14451774, 14451851, 14454734, 14457780, 14460818], 'latitude': [ 40.689945, 40.689945, 40.689945, 40.689945, 40.689945, 40.69132 , 40.69132 , 40.69132 , 40.69132 , 40.69132 ], 'longitude': [-73.872564, -73.872564, -73.872564, -73.872564, -73.872564, -73.867135, -73.867135, -73.867135, -73.867135, -73.867135] }) weather_df = pd.DataFrame({ 'DATEn': ['05-01-11', '05-01-11', '05-02-11', '05-02-11', '05-03-11', '05-03-11', '05-04-11', '05-04-11', '05-05-11', '05-05-11'], 'hour': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'latitude': [ 40.689945, 40.69132 , 40.689945, 40.69132 , 40.689945, 40.69132 , 40.689945, 40.69132 , 40.689945, 40.69132 ], 'longitude': [-73.872564, -73.867135, -73.872564, -73.867135, -73.872564, -73.867135, -73.872564, -73.867135, -73.872564, -73.867135], 'pressurei': [ 30.24, 30.24, 30.32, 30.32, 30.14, 30.14, 29.98, 29.98, 30.01, 30.01], 'fog': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'rain': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'tempi': [ 52. , 52. , 48.9, 48.9, 54. , 54. , 57.2, 57.2, 48.9, 48.9], 'wspdi': [ 8.1, 8.1, 6.9, 6.9, 3.5, 3.5, 15. , 15. , 15. , 15. ] }) def combine_dfs(subway_df, weather_df): ''' Fill in this function to take 2 DataFrames, one with subway data and one with weather data, and return a single dataframe with one row for each date, hour, and location. Only include times and locations that have both subway data and weather data available. ''' return subway_df.merge(weather_df, on=['DATEn','hour','latitude','longitude'], how='inner') combine_dfs(subway_df, weather_df)Quiz 14 - plot dataframesimport matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns values = np.array([1, 3, 2, 4, 1, 6, 4]) example_df = pd.DataFrame({ 'value': values, 'even': values % 2 == 0, 'above_three': values > 3 }, index=['a', 'b', 'c', 'd', 'e', 'f', 'g']) # Change False to True for this block of code to see what it does # groupby() without as_index if False: first_even = example_df.groupby('even').first() print first_even print first_even['even'] # Causes an error. 'even' is no longer a column in the DataFrame # groupby() with as_index=False if False: first_even = example_df.groupby('even', as_index=False).first() print first_even print first_even['even'] # Now 'even' is still a column in the DataFrame filename = 'nyc_subway_weather.csv' subway_df = pd.read_csv(filename) ## Make a plot of your choice here showing something interesting about the subway data. ## Matplotlib documentation here: http://matplotlib.org/api/pyplot_api.html ## Once you've got something you're happy with, share it on the forums! def average_entries(entry): entry['ENTRIESn_hourly'] = entry['ENTRIESn_hourly'].mean() entry['rain'] = entry['rain'].mean() return entry # %matplotlib inline average_ridership = subway_df[['DATEn','rain','ENTRIESn_hourly']].groupby('DATEn').mean() with_rain = average_ridership.loc[average_ridership['rain'] > 0] without_rain = average_ridership.loc[average_ridership['rain'] == 0] len(with_rain) len(without_rain) # with_rain.plot() # plt.show() # without_rain.plot() # plt.show() print with_rain['ENTRIESn_hourly'].mean() print without_rain['ENTRIESn_hourly'].mean() # average_ridership = subway_df[['DATEn','rain','ENTRIESn_hourly']].groupby('DATEn').mean() # average_ridership = subway_df[['DATEn','tempi','ENTRIESn_hourly']].groupby('DATEn').mean() # print average_ridership # average_ridership.plot.scatter(x = 'tempi', y = 'ENTRIESn_hourly') average_ridership = subway_df[['UNIT','ENTRIESn_hourly','hour']].groupby(['UNIT','hour']).mean() # result = average_ridership.reset_index().pivot(index='hour',columns='UNIT', values='ENTRIESn_hourly') average_ridership = subway_df[['DATEn','rain','ENTRIESn_hourly','UNIT']].groupby(['UNIT','DATEn']).mean().reset_index() print average_ridership.describe() print average_ridership.head() # average_ridership.plot() # average_ridership.plot(x = 'DATEn',y = 'ENTRIESn_hourly') # result = average_ridership.pivot(index=['UNIT','DATEn'],columns='rain',values='ENTRIESn_hourly') # average_ridership.set_index(['UNIT','DATEn']).pivot(values='ENTRIESn_hourly', columns='rain', index=['UNIT','DATEn']) result = pd.pivot_table(average_ridership, index=['UNIT','DATEn'],columns='rain',values='ENTRIESn_hourly',aggfunc=np.mean) result.plot() result['rain' == 1]1877.83704114 1886.1876537 rain ENTRIESn_hourly count 7414.000000 7414.000000 mean 0.223091 1848.981656 std 0.416347 1951.834919 min 0.000000 0.000000 25% 0.000000 683.500000 50% 0.000000 1233.300000 75% 0.000000 2210.208333 max 1.000000 13815.833333 UNIT DATEn rain ENTRIESn_hourly 0 R003 05-01-11 0 0.000000 1 R003 05-02-11 0 246.333333 2 R003 05-03-11 0 322.600000 3 R003 05-04-11 1 260.000000 4 R003 05-05-11 0 321.600000Variables: definition *Variables are placeholders to store data values to recover them later.*Consider the following analogy: you have a water source and you want to collect some water. If you don't have a placeholder (mug, bucket, bottle,...), you will not be able to collect it as the water will drain through your fingers. Variables need to have a name and this name can be anything you want as long as you follow some [rules](https://www.w3schools.com/python/gloss_python_variable_names.asp).x = 10 y = 20.5 z = 'hello' print(x) print(y) print(z)helloData Types Python is a dynamically typed language. That means the interpreter infers the type of an object at runtime. Basic data types are:- **Integers**: stores positive or negative integer values. In Python, the integer type can be arbitrarily large. _(eg. 10, 3, -5)_- **Float**: stores decimal numbers. By default, Python interprets any number that includes a decimal point as a double precision floating point number. Like some other programming languages, in Python, there is not much difference between float and double except that float is in-built in Python, while double is imported from a library called `NumPy`. We will talk about the libraries in detail later. _(eg. 20.2, 100.2403, -5.50)_- **String**: stores a group of characters encapsulated in a single quote or double quotes _(eg. "hello", "Ironhack", "Teacher")_- **Boolean**: stores either a TRUE or FALSE To check our data types we will use the function `type()` (in Python, any name which is followed by opening and closing parentheses is a function) **Integer**x = 10 print(x) type(x)10As you can see from the example above, `print` and `type` are followed by `()` and therefore they are functions. They take an argument and the return another value. **Float**y = 20.5 print(y) type(y)20.5**String** String can be encapsulated in single quotes or double quotes.z = "hello" print(z) type(z) x = 'hello there' print(x) type(x)hello there**Note:** As you can see from the examples above, you can define a string using double `" "` or single quotes`' '`. You may wonder when you should use double quotes and single quotes. The short answer is that depends on your string. If your string contains single quotes like `I couldn't make it` then, you must use double quotes. Otherwise, you are safe using single quotes. **Note:** in the previous two cases, the length of the string was small (they were single-line strings). What if we want a string that is more than one line long (multiple line string). In such cases, we use three single quotes. And every new line is represented by a '/n' character in the output.# Multiple line string x = '''Float - stores decimal numbers. By default, Python interprets any number that includes a decimal point as a double precision floating point number. Like some other programming languages, in Python there is not much difference between float and double except that float is in-built in Python, while double is imported from a library called NumPy. We will talk about the libraries in detail later. Hello There!''' print(x) type(x)Float - stores decimal numbers. By default, python interprets any number that includes a decimal point as a double precision floating point number. Like some other programming languages, in python there is not much difference between float and double except that float is in built in python while double is imported from a library called numpy. We will talk about the libraries in detail later. Hello There!**Boolean**x = True # this is higlighted in green because it is a key word print(x) type(x) x = False print(x) type(x) x = "True" # In this case it is a string because we encapsulated in double quotes print(x) type(x)TrueExcercise: Data Types **1. For the following variables, use Python to check the data type of the variables.**x1 = 1.1 x2 = "Ironhack" x3 = "1.1" x4 = True x5 = "True" x6 = -1**2. What is the difference between `x1` and `x3`?**x3 is in qouations making it a string**3.Substract `x3` from `x1`**x3-x1**4. What is the difference between `x4` and `x5`?** 5 is in qoutations making it a string **5.Substract `x4` from `x5`** Operators Operators are special symbols in Python that are used to perform certain operations such as mathematical operations, logical operations, and comparison operations. The value or the variables that the operator works on are called Operands. We will look at some of the more common operators that we use in Python.# We will define two variables here and use operators on them x = 10 y = 5Arithmetic operators These are used to perform mathematical operations as shown below: Here on the left hand side we have the operator and on the right we can see how they can be used with variables- `+` - Addition (x + y)- `-` - Subtraction (x - y)- `*` - Multiplication (x * y)- `/` - Division (x / y)- `%` - Modulus (x % y)- `**` - Exponentiation (x ** y)- `//` - Floor division (x // y)# You can try and test these operators as shown below x+y x-y x*y x/y x%y # Gives the remainder x**y # 'y' times multiplication of 'x' x // y # Rounds off the result to the lower interger value on the number linePython also has a `math` library that you can use to perform some more complex mathematical operations.Libraries in python contain some additional pre-defined functions that the user can directly.We will talk about libraries in more detail later.Here we will show how to use [math library](https://docs.python.org/3/library/math.html) for some operations that we have looked at before . import math # This is the way to have access to all the functions included in the `math` library math.floor(2.9) # This is the same as 'floor division' operator that we used earlier # This is the same as 'Exponentiation' operator that we used earlier. # In this case, the function takes two arguments separated by ','. The first one is the base, # and the second is the power. In the example, below we're computing: 2**3 math.pow(2,3) math.sqrt(9) #This returns the square rootAssignment Operators These are used to assign values to variables. We have already seen it before when we talked about 'Variables' and 'Data Types'x = 10 print(x)10Some special assignment operators assign the value and perform a simple mathematical operation at the same time. They are shown below:x += 3 is equivalent to x = x + 3x = 10x+=3print(x) x = 10x-=3print(x) x = 10x*=3print(x) x = 10x/=3print(x) x = 10x%=3print(x)### Comparison Operators These are to make a comparison between two variables. They return a boolean value, either True or False. Returns `True` if the condition is met, otherwise returns a `False`.== Equal x == y != Not equal x != y > Greater than x > y < Less than x < y >= Greater than or equal to x >= y<= Less than or equal to x <= yx = 10 # Initialize variable `x` with a value 10 y = 5 # Initialize variable `y` with a value 5 x == y x != y x > y x >= y xLogical Operators These operators are used to combine two or more comparison operations. It returns True if all the conditions are met. Returns a False if any one of the multiple conditions is not met.x = 10 # First we will initialize variable `x` with a value 10.**and**Returns True if both statements are true x < 5 and x < 10x < 5 and x < 10 x > 5 and x < 20**or**Returns True if one of the statements is true x < 5 or x < 4x < 5 or x > 12 x == 10 or x > 12**not**Reverse the result, returns False if the result is true not(x < 5 and x < 10)not(x 12) not(x == 10 or x > 12)## More Resources: https://www.programiz.com/python-programming/operators https://www.tutorialspoint.com/python/python_basic_operators.htm # Exercise Operators **1. In this problem we will show you how to take a user input. Use the code and then answer the questions that follow:**x1 = input("Please enter an integer number: ")x2 = input("Please enter another integer number")Print the values of the two variables?What is the data type of `x1` and `x2`?As you would notice, data type of `x1` and `x2` is string. Even though we entered integers, the input functionconverts them into string by default. Now we do some data type conversion from string to integersx1 = int(x1) This converts string into integer and re-initializes `x1`x2 = int(x2) This converts string into integer and re-initializes `x2`Print the values of the two variables?What is the data type of `x1` and `x2`?Now perform simple comparisons between `x1` and `x2` Check if the two variables are equal ? Check if `x1` is greater than `x2`? Check if `x2` is greater than `x1`? Check if `x1` is not equal to `x2`? Store the difference between the two varibles `x1` and `x2` in another variable `x3` (subtract the smaller number from the larger number)Increment the smaller of the two variables (`x1` and `x2`) with the difference. Again check if `x1` and `x2`are equal or not?**2. In the lesson we talked about 'math' library in python. [Here](https://docs.python.org/3/library/math.html) is a link to the documenation of this library. Go through the documentation and try any three more functions on a numerical variable that were not described in the lesson.** ## Additional MaterialThe students will not be tested on these in the pre-work assessment!!# We will talk about these later in the bootcamp. The definitions are below for your reference. x = 10 y = 5**Identity Operator** **is** Returns `True` if both variables are the same object.The `x` is the `y`.x is y**is not** Returns `True` if both variables are not the same object.`x` is not `y`.x is not y**Membership Operator**a = 'I' b = 'Ironhack'**in** Returns `True` if a sequence with the specified value is present in the object.`x` in `y`.a in b**not in** Returns `True` if a sequence with the specified value is not present in the object.`x` not in `y`.a not in bTF-IDF Vectorizationfrom sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import CountVectorizer #here applying making question bank of all q1 and q2 for calculating tfidf questions = df['question1'].to_list() + df['question2'].to_list() questions[:5] questions_splitted=[] for sent in questions: questions_splitted.append(sent.split()) #training tfidf tfidf = TfidfVectorizer(lowercase=False,) tfidf.fit(questions) #creating dictionary of tfidf_features and their idf values word2tfidf = dict(zip(tfidf.get_feature_names(), tfidf.idf_)) len(word2tfidf) nlp=spacy.load('en_core_web_sm') doc1=nlp(df['question1'][0]) mean_vec1 = np.zeros([len(doc1), len(doc1[0].vector)]) mean_vec1.shape # en_vectors_web_lg, which includes over 1 million unique vectors. nlp = spacy.load('en_core_web_sm') vecs1 = [] # https://github.com/noamraph/tqdm # tqdm is used to print the progress bar for qu1 in tqdm(list(df['question1'])): doc1 = nlp(qu1) # 384 is the number of dimensions of vectors mean_vec1 = np.zeros([len(doc1), len(doc1[0].vector)]) for word1 in doc1: # word2vec vec1 = word1.vector # fetch df score try: idf = word2tfidf[str(word1)] except: idf = 0 # compute final vec mean_vec1 += vec1 * idf mean_vec1 = mean_vec1.mean(axis=0) vecs1.append(mean_vec1) df['q1_feats_m'] = list(vecs1) len(vecs1) from sklearn.preprocessing import MinMaxScaler df=pd.read_csv(r'/gdrive/MyDrive/Colab Notebooks/Datasets/nlp_features_train.csv',encoding='latin-1') dfp_subsampled = df[0:5000] dfp_subsampled.head(2) X = MinMaxScaler().fit_transform(dfp_subsampled[['cwc_min', 'cwc_max', 'csc_min', 'csc_max' , 'ctc_min' , 'ctc_max' , 'last_word_eq', 'first_word_eq' , 'abs_len_diff' , 'mean_len' , 'token_set_ratio' , 'token_sort_ratio' , 'fuzz_ratio' , 'fuzz_partial_ratio' , 'longest_substr_ratio']]) y = dfp_subsampled['is_duplicate'].values from sklearn.manifold import TSNE tsne2d = TSNE( n_components=2, init='random', # pca random_state=101, method='barnes_hut', n_iter=1000, verbose=2, angle=0.5,perplexity=40 ).fit_transform(X) df = pd.DataFrame({'x':tsne2d[:,0], 'y':tsne2d[:,1] ,'label':y}) df.head(5) import seaborn as sns sns.lmplot(data=df, x='x', y='y', hue='label', fit_reg=False, size=8,palette="Set1",markers=['s','o']) plt.title("perplexity : {} and max_iter : {}".format(30, 1000)) plt.show()CPE 695 Final Project Team 1 Data Descriptionhttps://archive.ics.uci.edu/ml/datasets/Audit+Data# Data Filepath data_filepath = 'audit_data' # Packages import pandas as pd import random import warnings from IPython.display import Image, HTML, display import pydotplus import seaborn as sns import matplotlib.pyplot as plt # Sklearn import sklearn from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.tree import DecisionTreeClassifier, export_graphviz from sklearn.metrics import classification_report, accuracy_score, confusion_matrix from sklearn.ensemble import RandomForestClassifier import helper warnings.filterwarnings("ignore") plt.style.use('fivethirtyeight') # Load Data df_audit = pd.read_csv('/'.join([data_filepath, 'audit_risk.csv'])) df_trial = pd.read_csv('/'.join([data_filepath, 'trial.csv']))Summarize Data Audit Data (Training Data)# Summarize print(f' Aduit shape: {df_audit.shape}') display(df_audit.head()) display(df_audit.tail()) display(df_audit.info()) df_audit.describe() df_audit.nunique()Trial Data (Test Data)# Summarize print(f'Trial shape: {df_audit.shape}') display(df_trial.head()) display(df_trial.tail()) display(df_trial.info()) df_trial.describe() df_trial.nunique()Select Data: TrialThere are two datasets included in the file audit and trial. The two datasets contain the same data, but the audit data is far more expansive. The audit data contains raw data (Para A, Para B, Money Value, etc.) and formulas (Score A, Score B, Score MV, etc.). Comptroller and Auditor General of India (CAG) derived the formulas. The trial data has fewer formulaic calculations. The purpose of this project is to create a predictive model based on the raw data. The project will use the trial data and remove the calculated scores. The model will overfit the data if the formulas are included. Clean Data# Variables string_cols = ['LOCATION_ID'] dep_col = 'Risk' dep_col_idx = -1 drop_cols = ['Score'] cols_keep = ['Sector_score', 'LOCATION_ID', 'PARA_A', 'PARA_B', 'numbers', 'Money_Value', 'District_Loss', 'PROB', 'History', 'Prob', 'Risk'] cols_keep = ['Sector_score', 'LOCATION_ID', 'PARA_A', 'PARA_B', 'Money_Value', 'District_Loss', 'History', 'Risk'] df_audit.columns df = df_audit.copy() df = df.loc[:, cols_keep] df = helper.fill_nas_df(df) # No Location df_no_location = df.copy().drop(columns= string_cols) # Location Dummy Only df_location_dummy = pd.get_dummies(df.loc[:, string_cols]) # Location Dummy df_location_included = helper.dummy_string_var(df, string_cols)Ignore Locations and drop columnsdf = df_no_location.copy() #df = df.drop(columns = drop_cols) df.head() # Split X = df.drop(columns = [dep_col]) y = df.loc[:, dep_col] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size= 0.2)Load datasetfrom sklearn.datasets import fetch_california_housing house_dataset = fetch_california_housing() # Import pandas package to format the data import pandas as pd # Extract features with their names into the a dataframe format data = pd.DataFrame(house_dataset.data, columns=house_dataset.feature_names) # Extract target with their names into a pd.Series object with name MEDV target = pd.Series(house_dataset.target, name = 'MEDV') from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.2, random_state=42) X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, shuffle=False) X_train.shape, X_test.shapeUse LightGBM GBDT model to do regression without tuningdef build_model(hp): model = lgb.LGBMRegressor( boosting_type='gbdt', # you can also search model type such as: # boosting_type=hp.Choice("model_type", ['gbdt', 'goss'], default='gbdt'), num_leaves=hp.Int("num_leaves", 5, 50, step=1), learning_rate=hp.Float('learning_rate', 1e-3, 1, sampling='log', default=0.01), n_estimators=hp.Int('n_estimators', 5, 50, step=1) ) return modelCustomize tunerimport os import pickle import tensorflow as tf import keras_tuner as kt import lightgbm as lgb from sklearn.metrics import mean_squared_error class LightGBMTuner(kt.engine.base_tuner.BaseTuner): def run_trial(self, trial, X, y, validation_data): model = self.hypermodel.build(trial.hyperparameters) # build the model model.fit(X_train, y_train, eval_set=[validation_data], eval_metric='mse', early_stopping_rounds=5) # fit the model X_val, y_val = validation_data y_pred = model.predict(X_val, num_iteration=model.best_iteration_) # evaluate the model eval_mse = mean_squared_error(y_val, y_pred) self.oracle.update_trial(trial.trial_id, {'mse': eval_mse}) # inform the oracle of the eval result, the result is a dictionary with the metric names as the keys. self.save_model(trial.trial_id, model) # save the model to disk def save_model(self, trial_id, model, step=0): fname = os.path.join(self.get_trial_dir(trial_id), 'model.txt') model.booster_.save_model(fname, num_iteration=model.best_iteration_) def load_model(self, trial): fname = os.path.join(self.get_trial_dir(trial.trial_id), 'model.txt') model = lgb.Booster(model_file=fname) return modelCustomize Bayesian Optimization search algorithmimport random import numpy as np from scipy import optimize as scipy_optimize from scipy.stats import norm from sklearn import exceptions from sklearn import gaussian_process from keras_tuner.engine import hyperparameters as hp_module from keras_tuner.engine import multi_execution_tuner from keras_tuner.engine import oracle as oracle_module from keras_tuner.engine import trial as trial_lib class BayesianOptimizationOracle(oracle_module.Oracle): """Bayesian optimization oracle. It uses Bayesian optimization with a underlying Gaussian process model. The acquisition function used is upper confidence bound (UCB), which can be found in the following link: https://www.cse.wustl.edu/~garnett/cse515t/spring_2015/files/lecture_notes/12.pdf # Arguments objective: String or `kerastuner.Objective`. If a string, the direction of the optimization (min or max) will be inferred. max_trials: Int. Total number of trials (model configurations) to test at most. Note that the oracle may interrupt the search before `max_trial` models have been tested if the search space has been exhausted. num_initial_points: (Optional) Int. The number of randomly generated samples as initial training data for Bayesian optimization. (If not specified, a trick is to use the square root of the dimensionality of the hyperparameter space.) beta: Float. The balancing factor of exploration and exploitation. The larger it is, the more explorative it is. seed: Int. Random seed. hyperparameters: HyperParameters class instance. Can be used to override (or register in advance) hyperparamters in the search space. """ def __init__(self, objective, max_trials, beta=2.6, acq_type="ucb", num_initial_points=None, seed=None, hyperparameters=None, *args, **kwargs): super(BayesianOptimizationOracle, self).__init__( objective=objective, max_trials=max_trials, hyperparameters=hyperparameters, seed=seed, *args, **kwargs) # Use 2 as the initial number of random points if not presented. self.num_initial_points = num_initial_points or 2 self.beta = beta self.seed = seed or random.randint(1, 1e4) self._random_state = np.random.RandomState(self.seed) self.gpr = self._make_gpr() self.acq_type = acq_type def _make_gpr(self): return gaussian_process.GaussianProcessRegressor( kernel=gaussian_process.kernels.Matern(nu=2.5), alpha=1e-4, normalize_y=True, random_state=self.seed) def _vectorize_trials(self): x, y = [], [] for trial in self.trials.values(): # Create a vector representation of each Trial's hyperparameters. trial_hps = trial.hyperparameters vector = [] nonfixed_hp_space = [hp for hp in self.hyperparameters.space if not isinstance(hp, hp_module.Fixed)] for hp in nonfixed_hp_space: # For hyperparameters not present in the trial (either added after # the trial or inactive in the trial), set to default value. if trial_hps.is_active(hp): trial_value = trial_hps.values[hp.name] else: trial_value = hp.default # Embed an HP value into the continuous space [0, 1]. prob = hp_module.value_to_cumulative_prob(trial_value, hp) vector.append(prob) if trial.status == "COMPLETED": score = trial.score if self.objective.direction == "min": score = -1 * score else: continue x.append(vector) y.append(score) x = np.array(x) y = np.array(y) return x, y def _vector_to_values(self, vector): hps = hp_module.HyperParameters() vector_index = 0 for hp in self.hyperparameters.space: hps.merge([hp]) if isinstance(hp, hp_module.Fixed): value = hp.value else: prob = vector[vector_index] vector_index += 1 value = hp_module.cumulative_prob_to_value(prob, hp) if hps.is_active(hp): hps.values[hp.name] = value return hps.values def _random_populate_space(self): values = self._random_values() if values is None: return {'status': trial_lib.TrialStatus.STOPPED, 'values': None} return {'status': trial_lib.TrialStatus.RUNNING, 'values': values} def _num_completed_trials(self): return len([t for t in self.trials.values() if t.status == 'COMPLETED']) def populate_space(self, trial_id): if self._num_completed_trials() < self.num_initial_points: return self._random_populate_space() # Update Gaussian process regressor x, y = self._vectorize_trials() try: self.gpr.fit(x, y) except exceptions.ConvergenceWarning as e: raise e # Three acquisition functions def _upper_confidence_bound(x): x = x.reshape(1, -1) mu, sigma = self.gpr.predict(x, return_std=True) return -1 * (mu + self.beta * sigma) def _probability_of_improvement(x): # calculate the best surrogate score found so far x_history, _ = self._vectorize_trials() y_pred = self.gpr.predict(x_history, return_std=False) y_best = max(y_pred) # calculate mean and stdev via surrogate function x = x.reshape(1, -1) mu, sigma = self.gpr.predict(x, return_std=True) # calculate the probability of improvement z = (mu - y_best) / (sigma+1E-9) prob = norm.cdf(z) return -1 * prob def _expected_improvement(x): # calculate the best surrogate score found so far x_history, _ = self._vectorize_trials() y_pred = self.gpr.predict(x_history, return_std=False) y_best = max(y_pred) # calculate mean and stdev via surrogate function x = x.reshape(1, -1) mu, sigma = self.gpr.predict(x, return_std=True) # calculate the probability of improvement z = (mu - y_best) / (sigma+1E-9) ei = (mu - y_best) * norm.cdf(z) + sigma * norm.pdf(z) return -1 * ei acq_funcs = { "ucb": _upper_confidence_bound, "pi": _probability_of_improvement, "ei": _expected_improvement, } # Sampling based on acquisition functions optimal_val = float('inf') optimal_x = None num_restarts = 50 bounds = self._get_hp_bounds() x_seeds = self._random_state.uniform(bounds[:, 0], bounds[:, 1], size=(num_restarts, bounds.shape[0])) for x_try in x_seeds: # Sign of score is flipped when maximizing. result = scipy_optimize.minimize(acq_funcs[self.acq_type], x0=x_try, bounds=bounds, method='L-BFGS-B') if result.fun[0] < optimal_val: optimal_val = result.fun[0] optimal_x = result.x values = self._vector_to_values(optimal_x) return {'status': trial_lib.TrialStatus.RUNNING, 'values': values} def _get_hp_bounds(self): nonfixed_hp_space = [hp for hp in self.hyperparameters.space if not isinstance(hp, hp_module.Fixed)] bounds = [] for hp in nonfixed_hp_space: bounds.append([0, 1]) return np.array(bounds) def get_state(self): state = super(BayesianOptimizationOracle, self).get_state() state.update({ 'num_initial_points': self.num_initial_points, 'acq_type': self.acq_type, 'beta': self.beta, 'seed': self.seed, }) return state def set_state(self, state): super(BayesianOptimizationOracle, self).set_state(state) self.num_initial_points = state['num_initial_points'] self.acq_type = state['acq_type'] self.beta = state['beta'] self.seed = state['seed'] self._random_state = np.random.RandomState(self.seed) self.gpr = self._make_gpr()Use customized Bayesian Optimization search algorithm to tune modelsbo_tuner = LightGBMTuner( oracle=BayesianOptimizationOracle( objective=kt.Objective('mse', 'min'), max_trials=10, acq_type="ucb", # you can switch between different acquisition functions seed=42), hypermodel=build_model, overwrite=True, project_name='bo_tuner') bo_tuner.search(X_train, y_train, validation_data=(X_val, y_val)) from sklearn.metrics import mean_squared_error best_model = bo_tuner.get_best_models(1)[0] y_pred_test = best_model.predict(X_test) test_mse = mean_squared_error(y_test, y_pred_test) print("The prediction MSE on test set: {}".format(test_mse)) bo_tuner.results_summary(1)Plot search curvesimport matplotlib.pyplot as plt def plot_curve(x, y, xlabel, ylabel, title): plt.plot(x, y) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.title(title) plt.show() def plot_curves(x, ys, xlabel, ylabel, title, ymin, ymax, legend, markers, linestyles, markevery=1): for i, y in enumerate(ys): plt.plot(x, y, marker=markers[i], linestyle=linestyles[i], markevery=markevery) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.title(title) plt.ylim(ymin, ymax) plt.legend(legend) plt.show() mse_bo = [bo_tuner.oracle.get_trial(trial_id).score for trial_id in bo_tuner.oracle.end_order] ids = list(range(len(mse_bo))) plot_curve(ids, mse_bo, 'Trials in finishing order', 'Validation MSE', 'Searched results') high_value = float('inf') high_mse_bo = [] for value in mse_bo: high_value = min(high_value, value) high_mse_bo.append(high_value) plot_curve(ids, high_mse_bo, 'Trials in finishing order', 'Highest validation MSE so far', 'Searched results') random_tuner = LightGBMTuner( oracle=kt.oracles.RandomSearch( objective=kt.Objective('mse', 'min'), max_trials=100, seed=42), hypermodel=build_model, overwrite=True, project_name='random_tuner') random_tuner.search(X_train, y_train, validation_data=(X_val, y_val)) from sklearn.metrics import mean_squared_error best_model = random_tuner.get_best_models(1)[0] y_pred_test = best_model.predict(X_test) test_mse = mean_squared_error(y_test, y_pred_test) print("The prediction MSE on test set: {}".format(test_mse)) random_tuner.results_summary(1) mse_random = [random_tuner.oracle.get_trial(trial_id).score for trial_id in random_tuner.oracle.end_order] mse_bo = [bo_tuner.oracle.get_trial(trial_id).score for trial_id in bo_tuner.oracle.end_order] high_value = float('inf') high_mse_random = [] for value in mse_random: high_value = min(high_value, value) high_mse_random.append(high_value) high_value = float('inf') high_mse_bo = [] for value in mse_bo: high_value = min(high_value, value) high_mse_bo.append(high_value) plot_curves(ids, [mse_random, mse_bo], 'Trials in finishing order', 'Validation MSE', 'Searched results', 0, 1.5, markers=['o', '+'], linestyles=['-', '-.'], legend=['Random search', 'Bayesian optimization']) plot_curves(ids, [high_mse_random, high_mse_bo], 'Trials in finishing order', 'Highest validation MSE so far', 'Searched results', 0.2, 0.4, markers=['o', '+'], linestyles=['-', '-.'], legend=['Random search', 'Bayesian optimization'], markevery=5)- Prove the following facts: Supose $f$ is a function satisfying - $f(0) = f_{min},$ and $\lim_{x\to \infty}f(x) = f_{max}$ - $f$ is continuous - $f$ is strictly increasing then, for any $p\in (f_{min}, f_{max})$, - there exists unique $\hat \sigma$, such that $f(\hat \sigma) = p$ and $$\hat \sigma = \arg\min_{\sigma\in (0,\infty)} | f(\sigma) - p|.$$__Pf:__By Intermediate Value Theorem, there exists $\hat \sigma$, such that $f(\hat \sigma) = p$, since $f$ is continuous and $p\in (f_{min}, f_{max})$.Assuming that there exists $\hat \sigma' \ne \hat \sigma$, such that $f(\hat \sigma') = p$, then we must have $$\hat \sigma' < \hat \sigma$$ or $$\hat \sigma' > \hat \sigma.$$Without loss of generality, assuming that $\hat \sigma' < \hat \sigma$. Because $f$ is strictly increasing, we have $$p = f(\hat \sigma') > f(\hat \sigma) = p,$$which contradicts the assumption above.Therefore, there exists unique $\hat \sigma$, such that $f(\hat \sigma) = p.$Plus, since $| f(\sigma) - p| \ge 0$, we have $$\min_{\sigma\in (0,\infty)} | f(\sigma) - p| \ge 0,$$Because $\hat \sigma$ is the unique point which satisfies $$f(\hat \sigma) - p = 0 ,$$we can conclude that$$\hat \sigma = \arg\min_{\sigma\in (0,\infty)} | f(\sigma) - p|.$$__Q.E.D__ - Now we denote by $f(\sigma)$ the BSM put price with the following parameters: - vol_ratio = $\sigma$; spot_price = 100.; drift_ratio = .0475; strike = 110.; maturity = 1. Answer the following questions: - What is $f_{min}$ and $f_{max}$? - Is $f$ strictly increasing on $(0,\infty)$? Justify your answer. - If the market put price is $10$, then what's the implied volatility?__Soln:__The put price with maturity $T$ and $K$ will be known as $P_0$ given as below:$$P_0 = \mathbb E [e^{-rT} (S(T) - K)^-] = K e^{-rT} \Phi(- d_2) - S_0 \Phi(- d_1),$$where $d_i$ are given as$$d_1 = \frac{(r + \frac 1 2 \sigma^2) T - \ln \frac{K}{S_0}}{\sigma \sqrt T},$$and$$d_2 = \frac{(r - \frac 1 2 \sigma^2) T - \ln \frac{K}{S_0}}{\sigma \sqrt T} = d_1 - \sigma \sqrt T$$- $f_{min} = f(0) = 4.8972$ , $f_{max} = \lim_{x\to \infty}f(x) = 104.8972$- $f$ is strictly increasing on $(0,\infty)$, which has been proved in Hw_3/2.- The implied volatility $\hat \sigma = 0.1787$. The main code is shown as follows. ____from scipy.optimize import fsolve def f(x): gbm1 = Gbm(vol_ratio = x) option1 = VanillaOption(otype=-1) return(gbm1.bsm_price(option1)-10) ans_sig = fsolve(f,0.1) print(ans_sig)- Find its implied volatility with the following parameters: - BSM call price is 10.; spot_price = 100.; drift_ratio = .0475; strike = 110.; maturity = 1.import numpy as np import scipy.stats as ss class VanillaOption: def __init__( self, otype = 1, # 1: 'call' # -1: 'put' strike = 110., maturity = 1., market_price = 10.): self.otype = otype self.strike = strike self.maturity = maturity self.market_price = market_price #this will be used for calibration def payoff(self, s): #s: excercise price otype = self.otype k = self.strike maturity = self.maturity return max([0, (s - k)*otype]) class Gbm: def __init__(self, init_state = 100., drift_ratio = .0475, vol_ratio = .2 ): self.init_state = init_state self.drift_ratio = drift_ratio self.vol_ratio = vol_ratio def bsm_price(self, vanilla_option): s0 = self.init_state sigma = self.vol_ratio r = self.drift_ratio otype = vanilla_option.otype k = vanilla_option.strike maturity = vanilla_option.maturity d1 = (np.log(s0 / k) + (r + 0.5 * sigma ** 2) * maturity) / (sigma * np.sqrt(maturity)) d2 = d1 - sigma * np.sqrt(maturity) return (otype * s0 * ss.norm.cdf(otype * d1) #line break needs parenthesis - otype * np.exp(-r * maturity) * k * ss.norm.cdf(otype * d2)) Gbm.bsm_price = bsm_price from scipy.optimize import fsolve def f(x): gbm2 = Gbm(vol_ratio = x) option2 = VanillaOption(otype=1) return(gbm2.bsm_price(option2)-10) ans_sig_2 = fsolve(f,0.1) print('>>> The implied volatility is ' + str(ans_sig_2))>>> The implied volatility is [0.30199229]MNISTfrom fastai.vision.all import * from fastbook import * path = untar_data(URLs.MNIST_SAMPLE) threes = (path/'train'/'3').ls().sorted() sevens = (path/'train'/'7').ls().sorted() three_tensors = [tensor(Image.open(img)) for img in threes] sevens_tensors = [tensor(Image.open(img)) for img in sevens] stack_threes = torch.stack(three_tensors).float()/255 stack_sevens = torch.stack(sevens_tensors).float()/255 valid_3_tns = torch.stack([tensor(Image.open(o)) for o in (path/'valid'/'3').ls()]) valid_3_tns = valid_3_tns.float()/255 valid_7_tns = torch.stack([tensor(Image.open(o)) for o in (path/'valid'/'7').ls()]) valid_7_tns = valid_7_tns.float()/255 train_x = torch.cat([stack_threes, stack_sevens]).view(-1, 28*28) train_y = tensor([1]*len(threes) + [0]*len(sevens)).unsqueeze(1) train_x.shape, train_y.shape dataset = list(zip(train_x, train_y)) x, y = dataset[0] x.shape, y valid_x = torch.cat([valid_3_tns, valid_7_tns]).view(-1, 28*28) valid_y = tensor([1]*len(valid_3_tns) + [0]*len(valid_7_tns)).unsqueeze(1) valid_dataset = list(zip(valid_x, valid_y))Now we need an (initially random) weight for every pixeldef init_params(size, std=1.0): return (torch.randn(size)*std).requires_grad_() weights = init_params(28*28, 1) weights.shape, weights[1:10] bias = init_params(1)y=ax+b -> y = weight * x + bias We can now calculate a prediction for one image:(train_x[0] * weights.T).sum() + biasIn Python, matrix multiplication is represented with the @ operatordef linear1(x_batch): return x_batch @ weights + bias preds = linear1(train_x) preds corrects = (preds > 0.5).float() == train_y corrects corrects.float().mean().item()Check change in accuracy when small change in weightsweights[0].data *= 1.0001 preds = linear1(train_x) ((preds > 0.0).float() == train_y).float().mean().item()LOSS FUNCTIONtargets = tensor([1, 0, 1]) predictions = tensor([0.9, 0.4, 0.2]) torch.where(targets==1, 1-predictions, predictions)torch.where(a_logic, b, c) == if a: b; else c but works on CUDA/GPUdef mnist_loss(predictions, targest): return torch.where(targest==1, 1 - predictions, predictions).mean() mnist_loss(predictions, targets)SIGMOIDdef sigmoid(x): return 1/(1+torch.exp(-x)) plot_function(torch.sigmoid, title='Sigmoid', min=-4, max=4) def mnist_loss(predictions, targets): predictions = predictions.sigmoid() return torch.where(targets==1, 1-predictions, predictions).mean()SGD and MINI-BATCHES A DataLoader can take any Python collection and turn it into an iterator over many batchescoll = range(15) dl = DataLoader(coll, batch_size=5, shuffle=True) list(dl)A collection that contains tuples of independent and dependent variables is known in PyTorch as a Datasetds = L(enumerate(string.ascii_lowercase)) dsWhen we pass a Dataset to a DataLoader we will get back many batches which are themselves tuples of tensors representing batches of independent and dependent variablesdl = DataLoader(ds, batch_size=6, shuffle=True) list(dl)Put it all togetherweights = init_params((28*28, 1)) bias = init_params(1) dl = DataLoader(dataset, batch_size=256) xb, yb = first(dl) xb.shape, yb.shape valid_dl = DataLoader(valid_dataset, batch_size=256) batch = train_x[:4] batch.shape preds = linear1(batch) preds loss = mnist_loss(preds, train_y[:4]) loss loss.mean().backward() weights.grad.shape, weights.grad.mean(), bias.grad def calc_grad(xb, yb, model): preds = model(xb) loss = mnist_loss(preds, yb) loss.backward() calc_grad(batch, train_y[:4], linear1) weights.grad.mean(), bias.gradMethods in PyTorch whose names end in an underscore modify their objects in place. For instance, bias.zero_() sets all elements of the tensor bias to 0.weights.grad.zero_(), bias.grad.zero_(); def train_epoch(model, lr, params): for xb, yb in dl: calc_grad(xb, yb, model) for p in params: p.data -= p.grad*lr p.grad.zero_() (preds > 0.0).float() == train_y[:4] def batch_accuracy(xb, yb): preds = xb.sigmoid() correct = (preds > 0.5) == yb return correct.float().mean() batch_accuracy(linear1(batch), train_y[:4]) def validate_epoch(model): accs = [batch_accuracy(model(xb), yb) for xb, yb in valid_dl] return round(torch.stack(accs).mean().item(), 4) validate_epoch(linear1)Train for an epochlr = 1. params = weights, bias train_epoch(linear1, lr, params) validate_epoch(linear1) for i in range(20): train_epoch(linear1, lr, params) print(validate_epoch(linear1), end=' ')0.8124 0.9067 0.9355 0.9467 0.9515 0.9569 0.9569 0.9584 0.9599 0.9608 0.9613 0.9638 0.9638 0.9653 0.9662 0.9677 0.9682 0.9692 0.9696 0.9711Create an optimizerlinear_model = nn.Linear(28*28, 1) w, b = linear_model.parameters() w.shape, b.shape class BasicOptim: def __init__(self, params, lr): self.params = list(params) self.lr = lr def step(self, *args, **kwargs): for p in self.params: p.data -= p.grad.data * self.lr def zero_grad(self, *args, **kwargs): for p in self.params: p.grad = None opt = BasicOptim(linear_model.parameters(), lr) def train_epoch(model): for xb, yb in dl: calc_grad(xb, yb, model) opt.step() opt.zero_grad() validate_epoch(linear_model) def train_model(model, epochs): for i in range(epochs): train_epoch(model) print(validate_epoch(model), end=' ') train_model(linear_model, 20) linear_model = nn.Linear(28*28, 1) opt = SGD(linear_model.parameters(), lr) train_model(linear_model, 20) dls = DataLoaders(dl, valid_dl) learn = Learner(dls, nn.Linear(28*28, 1), opt_func=SGD, loss_func=mnist_loss, metrics=batch_accuracy) learn.fit(10, lr=lr)Adding non-linearitydef simple_net(xb): res = xb @ w1 + b1 res = res.max(tensor(0.0)) res = res @ w2 + b2 return res w1 = init_params((28*28,30)) b1 = init_params(30) w2 = init_params((30,1)) b2 = init_params(1) plot_function(F.relu) simple_net = nn.Sequential( nn.Linear(28*28, 30), nn.ReLU(), nn.Linear(30, 1) ) learn = Learner(dls, simple_net, opt_func=SGD, loss_func=mnist_loss, metrics=batch_accuracy) learn.fit(40, 0.1) plt.plot(L(learn.recorder.values).itemgot(2)); learn.recorder.values[-1][2] dls = ImageDataLoaders.from_folder(path) learn = cnn_learner(dls, resnet18, pretrained=False, loss_func=F.cross_entropy, metrics=accuracy) learn.fit_one_cycle(1, 0.1)Test Utilsdef print_decoded_tensors_as_dict(weapon_data, array_of_tensors): genDict = {} for tensor in array_of_tensors: decoded, _ = weapon_data.decode_processed_tensor(tensor) for key, value in decoded.items(): if key not in genDict: genDict[key] = [] genDict[key].append(value) for key, value in genDict.items(): print(key, "=", value) def get_weapon_data(): return weapons.get_data()Initial VAE TrainingInitializes all network hyperparameters and shows training debug messages of the training epoch and cost. Trains and saves the trained model in the specified folder.network_architecture = \ dict(n_input=0, #set it in with scope n_hidden_1=26, n_hidden_2=12, n_z=2) learning_rate = 0.01 optimizer = tf.train.RMSPropOptimizer(learning_rate) transfer_fct = tf.nn.elu num_epochs = 70 batch_size = 4 epoch_debug_step = 1 saved_model_folder = "trained_vae/" saved_model_full_path = saved_model_folder + "model.ckpt" with tf.Session() as sess: train_data, test_data = get_weapon_data() network_architecture['n_input'] = train_data.num_features network = vae.get_new_trained(sess, train_data, network_architecture, optimizer, transfer_fct, batch_size, num_epochs, epoch_debug_step, trained_model_save_path=saved_model_folder)Encode and Decode Testing 1 Tests the encoding and decoding functionality and outputs the inputted and generated values. This case uses the same size as the training batch_size.with tf.Session(graph=tf.Graph()) as sess: network = vae.get_untrained(sess, network_architecture, optimizer, transfer_fct, batch_size) network = vae.restore(network, saved_model_full_path) train_data, test_data = get_weapon_data() samples = test_data.next_batch(batch_size) x_reconstructed = network.encode_and_decode(samples, True) print_decoded_tensors_as_dict(test_data, np.concatenate((samples,x_reconstructed), axis=0))Encode and Decode Testing 2Tests the encoding and decoding functionality and outputs the inputted and generated values. This case does not use the same size as the training batch_size.with tf.Session(graph=tf.Graph()) as sess: network = vae.get_untrained(sess, network_architecture, optimizer, transfer_fct, batch_size) network = vae.restore(network, saved_model_full_path) train_data, test_data = get_weapon_data() samples = test_data.next_batch(1) x_reconstructed_mean = network.encode_and_decode(samples, False) print_decoded_tensors_as_dict(test_data, np.concatenate((samples,[x_reconstructed_mean]), axis=0))Latent Space Visualizationimport matplotlib.pyplot as plt %matplotlib inline def show_z_distribution(vae_model, title, z_mean=True): all_z = np.zeros((batch_size,network_architecture['n_z'])) train_data, test_data = get_weapon_data() total_batch = int(train_data.num_examples / batch_size) # Loop over all batches for i in range(total_batch): batch = train_data.next_batch(batch_size) z_dist = vae_model.calculate_z(batch) if z_mean: z_dist = vae_model.calculate_z_mean(batch) all_z = np.vstack((all_z, z_dist)) plt.figure(figsize=(15,5)) plt.subplot(1,2,1) plt.scatter(all_z[:,0], all_z[:,1]) plt.xlim(-3,3) plt.ylim(-3,3) plt.title(title) plt.subplot(1,2,2) plt.hist2d(all_z[:,0], all_z[:,1], (50, 50), cmap=plt.cm.jet) plt.colorbar() plt.title(title) with tf.Session(graph=tf.Graph()) as sess: network = vae.get_untrained(sess, network_architecture, optimizer, transfer_fct, batch_size) show_z_distribution(network, "Untrained Latent Space", z_mean=True) network = vae.restore(network, "trained_vae/model.ckpt") show_z_distribution(network, "Trained Latent Space - Z Mean", z_mean=True) show_z_distribution(network, "Trained Latent Space - Z", z_mean=False)Random Input Decoding Test 1This tests the decoding from latent space functionality with random input. This case does not use the same size as the training batch_size.with tf.Session(graph=tf.Graph()) as sess: network = vae.get_untrained(sess, network_architecture, optimizer, transfer_fct, batch_size) network = vae.restore(network, saved_model_full_path) generated = [] random_val = np.random.normal(size=(1,network_architecture["n_z"])) x_test = network.decode_from_latent_space(random_val, False) #[generated.append(x) for x in x_test] generated.append(x_test) train_data, test_data = get_weapon_data() print_decoded_tensors_as_dict(train_data, generated)Random Input Decoding Test 2This tests the decoding from latent space functionality with random input. This case uses the same size as the training batch_size.with tf.Session(graph=tf.Graph()) as sess: network = vae.get_untrained(sess, network_architecture, optimizer, transfer_fct, batch_size) network = vae.restore(network, saved_model_full_path) generated = [] random_val = np.random.normal(size=(batch_size,network_architecture["n_z"])) x_test = network.decode_from_latent_space(random_val, True) [generated.append(x) for x in x_test] train_data, test_data = get_weapon_data() print_decoded_tensors_as_dict(train_data, generated)Here's where the TensorFlow stuff starts. The "session" is the thing that will actually do that calculations.sess = tf.InteractiveSession()The way that TensorFlow works is that you define the operations on `Variable` and `placeholder` objects. Here we describe the model:T = tf.float64 # First the variables that we might want to optimize: porb_tensor = tf.Variable(porb, dtype=T) tp_tensor = tf.Variable(tp, dtype=T) nu_tensor = tf.Variable(nu_arr, dtype=T) e_param_tensor = tf.Variable(e_param, dtype=T) # This forces the ecc to be between 0 and 1 e_tensor = 1.0 / (1.0 + tf.exp(-e_param_tensor)) varpi_tensor = tf.Variable(varpi, dtype=T) log_sigma2_tensor = tf.Variable(0.0, dtype=T) # Variance from observational uncertainties and model misspecification ad_tensor = tf.Variable(a1d + np.zeros_like(nu_arr), dtype=T) # These are some placeholders for the data: times_tensor = tf.placeholder(T, times.shape) dmmags_tensor = tf.placeholder(T, dmmags.shape) # Solve Kepler's equation mean_anom = 2.0 * np.pi * (times_tensor - tp_tensor) / porb_tensor ecc_anom = kepler(mean_anom, e_tensor) true_anom = 2.0 * tf.atan2(tf.sqrt(1.0+e_tensor)*tf.tan(0.5*ecc_anom), tf.sqrt(1.0-e_tensor) + tf.zeros_like(times_tensor)) # Here we define how the time delay will be calculated: tau_tensor = -(1.0 - tf.square(e_tensor)) * tf.sin(true_anom + varpi_tensor) / (1.0 + e_tensor*tf.cos(true_anom)) # And the design matrix: arg_tensor = 2.0 * np.pi * nu_tensor[None, :] * (times_tensor[:, None] - ad_tensor[None, :] * tau_tensor[:, None]) D_tensor = tf.concat([tf.cos(arg_tensor), tf.sin(arg_tensor)], axis=1) # Define the linear solve for W_hat: DTD_tensor = tf.matmul(D_tensor, D_tensor, transpose_a=True) DTy_tensor = tf.matmul(D_tensor, dmmags_tensor[:, None], transpose_a=True) W_hat_tensor = tf.linalg.solve(DTD_tensor, DTy_tensor) # Finally, the model and the chi^2 objective: model_tensor = tf.squeeze(tf.matmul(D_tensor, W_hat_tensor)) chi2_tensor = tf.reduce_sum(tf.square(dmmags_tensor - model_tensor)) * tf.exp(-log_sigma2_tensor) chi2_tensor += len(times) * log_sigma2_tensorHere's how you could evaluate different parts of the model:# We need to initialize the variables: tf.global_variables_initializer().run() # We'll also need to pass in the data: data = {times_tensor: times, dmmags_tensor: dmmags} # Let's plot the initial time delay initial_tau = sess.run(tau_tensor, feed_dict=data) plt.plot(times+tmid, initial_tau, ".", ms=2) plt.ylabel(r"$\tau(t)$") plt.xlabel("$t$"); initial_model = sess.run(model_tensor, feed_dict=data) plt.plot(times, dmmags, ".k") plt.plot(times, initial_model) # plt.xlim(100, 102) # plt.ylim(-75, 75) plt.xlabel("t") plt.ylabel("L(t)");Now we'll fit the parameters. We'll iterate with different subsets a few times.old_chi2 = sess.run(chi2_tensor, feed_dict=data) for i in range(5): params = [log_sigma2_tensor, porb_tensor, tp_tensor] opt = tf.contrib.opt.ScipyOptimizerInterface(chi2_tensor, params, method="L-BFGS-B") opt.minimize(sess, feed_dict=data) params.append(ad_tensor) opt = tf.contrib.opt.ScipyOptimizerInterface(chi2_tensor, params, method="L-BFGS-B") opt.minimize(sess, feed_dict=data) params += [e_param_tensor, varpi_tensor] opt = tf.contrib.opt.ScipyOptimizerInterface(chi2_tensor, params, method="L-BFGS-B") opt.minimize(sess, feed_dict=data) new_chi2 = sess.run(chi2_tensor, feed_dict=data) print(old_chi2 - new_chi2) if np.abs(old_chi2 - new_chi2) < 1.0: break old_chi2 = new_chi2INFO:tensorflow:Optimization terminated with: Message: b'CONVERGENCE: REL_REDUCTION_OF_F_<=_FACTR*EPSMCH' Objective function value: 38819.131340 Number of iterations: 5 Number of functions evaluations: 9 INFO:tensorflow:Optimization terminated with: Message: b'CONVERGENCE: REL_REDUCTION_OF_F_<=_FACTR*EPSMCH' Objective function value: 38818.046748 Number of iterations: 9 Number of functions evaluations: 13 INFO:tensorflow:Optimization terminated with: Message: b'CONVERGENCE: REL_REDUCTION_OF_F_<=_FACTR*EPSMCH' Objective function value: 38818.046748 Number of iterations: 1 Number of functions evaluations: 4 4715.05510637 INFO:tensorflow:Optimization terminated with: Message: b'CONVERGENCE: REL_REDUCTION_OF_F_<=_FACTR*EPSMCH' Objective function value: 38817.963113 Number of iterations: 5 Number of functions evaluations: 9 INFO:tensorflow:Optimization terminated with: Message: b'CONVERGENCE: REL_REDUCTION_OF_F_<=_FACTR*EPSMCH' Objective function value: 3[...]Let's make the updated final plots.final_tau = sess.run(tau_tensor, feed_dict=data) plt.plot(times+tmid, initial_tau, ".", ms=2) plt.plot(times+tmid, final_tau, ".", ms=2) plt.ylabel(r"$\tau(t) / a$") plt.xlabel("$t$"); models = tau_tensor[:, None] * ad_tensor[None, :] plt.plot(times+tmid, sess.run(models, feed_dict=data), ".", ms=3) plt.ylabel(r"$\tau(t)$") plt.xlabel("$t$"); ivar = -np.diag(sess.run(tf.hessians(-0.5*chi2_tensor, ad_tensor), feed_dict=data)[0]) ad = sess.run(ad_tensor) ad *= np.sign(ad[0]) sig = 1.0 / np.sqrt(ivar) plt.errorbar(np.arange(len(ad)), ad, yerr=sig, fmt="."); m = np.ones_like(ad, dtype=bool) while True: var = 1.0 / np.sum(ivar[m]) mu = np.sum(ivar[m] * ad[m]) * var var2 = np.sum(ivar[m] * (mu - ad[m])**2) * var m_new = np.abs(ad - mu) / np.sqrt(var) < 7.0 if m.sum() == m_new.sum(): m = m_new break m = m_new ad = ad[m] ivar = ivar[m] m sig = 1.0 / np.sqrt(ivar) plt.errorbar(np.arange(len(ad)), ad, yerr=sig, fmt="."); ad * np.sqrt(ivar) if np.any(ad * np.sqrt(ivar) < -1.0): m2 = ad * np.sqrt(ivar) < -1.0 m1 = ~m2 ad = [ np.sum(ivar[m1]*ad[m1]) / np.sum(ivar[m1]), np.sum(ivar[m2]*ad[m2]) / np.sum(ivar[m2]), ] else: ad = [np.sum(ivar*ad) / np.sum(ivar)] ad if len(ad) > 1: inds = tf.cast(0.5 - 0.5 * (ad_tensor / tf.abs(ad_tensor)), tf.int32) else: inds = tf.zeros_like(ad_tensor, dtype=tf.int32) ad_params = tf.Variable(ad, dtype=T) sess.run(ad_params.initializer) ad_tensor = tf.gather(ad_params, inds) # And the design matrix: arg_tensor = 2.0 * np.pi * nu_tensor[None, :] * (times_tensor[:, None] - ad_tensor[None, :] * tau_tensor[:, None]) D_tensor = tf.concat([tf.cos(arg_tensor), tf.sin(arg_tensor)], axis=1) # Define the linear solve for W_hat: DTD_tensor = tf.matmul(D_tensor, D_tensor, transpose_a=True) DTy_tensor = tf.matmul(D_tensor, dmmags_tensor[:, None], transpose_a=True) W_hat_tensor = tf.linalg.solve(DTD_tensor, DTy_tensor) # Finally, the model and the chi^2 objective: model_tensor = tf.squeeze(tf.matmul(D_tensor, W_hat_tensor)) chi2_tensor = tf.reduce_sum(tf.square(dmmags_tensor - model_tensor)) * tf.exp(-log_sigma2_tensor) chi2_tensor += len(times) * log_sigma2_tensor old_chi2 = sess.run(chi2_tensor, feed_dict=data) for i in range(5): params = [log_sigma2_tensor, porb_tensor, tp_tensor] opt = tf.contrib.opt.ScipyOptimizerInterface(chi2_tensor, params, method="L-BFGS-B") opt.minimize(sess, feed_dict=data) params.append(ad_params) opt = tf.contrib.opt.ScipyOptimizerInterface(chi2_tensor, params, method="L-BFGS-B") opt.minimize(sess, feed_dict=data) params += [e_param_tensor, varpi_tensor] opt = tf.contrib.opt.ScipyOptimizerInterface(chi2_tensor, params, method="L-BFGS-B") opt.minimize(sess, feed_dict=data) params.append(nu_tensor) opt = tf.contrib.opt.ScipyOptimizerInterface(chi2_tensor, params, method="L-BFGS-B") opt.minimize(sess, feed_dict=data) new_chi2 = sess.run(chi2_tensor, feed_dict=data) print(old_chi2 - new_chi2) if np.abs(old_chi2 - new_chi2) < 1.0: break old_chi2 = new_chi2 models = tau_tensor[:, None] * ad_tensor[None, :] plt.plot(times+tmid, 86400.0 * sess.run(models, feed_dict=data), ".", ms=2); plt.ylabel(r"$\tau(t)$") plt.xlabel("$t$"); sess.run(e_tensor), e hess_tensor = tf.hessians(-0.5*chi2_tensor, params[:-1]) hess = sess.run(hess_tensor, feed_dict=data) 1. / np.sqrt(-hess[1]) hess np.sqrt(-np.diag(np.linalg.inv(hess[3]))) sess.run(ad_tensor) a1d / np.sqrt(-np.diag(np.linalg.inv(hess[3]))) porb sess.run(porb_tensor)Finding Numerical Fluxes for DGCopyright (C) 2020 MIT LicensePermission is hereby granted, free of charge, to any person obtaining a copyof this software and associated documentation files (the "Software"), to dealin the Software without restriction, including without limitation the rightsto use, copy, modify, merge, publish, distribute, sublicense, and/or sellcopies of the Software, and to permit persons to whom the Software isfurnished to do so, subject to the following conditions:The above copyright notice and this permission notice shall be included inall copies or substantial portions of the Software.THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ORIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THEAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHERLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS INTHE SOFTWARE.----NB: This note book uses the [Maxima-Jupyter kernel](https://github.com/robert-dodier/maxima-jupyter) to interface with [Maxima](http://maxima.sourceforge.net/), an open-source computer algebra system. I have found that the [Docker image](https://hub.docker.com/r/calyau/maxima-jupyter) is a fairly convenient way of using this kernel.Some Maxima version info for reproducibility:build_info();Load some packages:kill(all); load("itensor"); assert(condition):=if not condition then error("Assertion violated") else true$ norm_2_squared(v):=v.v; crossfunc(f):=makelist( sum(sum( levi_civita([i,j,k])*f(j,k), j,1,3),k,1,3),i,1,3)$ crossprod(a,b):=crossfunc(lambda([j,k], a[j]*b[k]));REDEFINITION-WITH-DEFMACRO: redefining MAXIMA::IFNOT in DEFMACRO REDEFINITION-WITH-DEFMACRO: redefining MAXIMA::M+OR*OR^P in DEFMACRO REDEFINITION-WITH-DEFUN: redefining MAXIMA::$IDUMMY-IMPL in DEFUN REDEFINITION-WITH-DEFUN: redefining MAXIMA::$IDUMMY in DEFUN REDEFINITION-WITH-DEFUN: redefining MAXIMA::ISPROD in DEFUN REDEFINITION-WITH-DEFUN: redefining MAXIMA::DERAT in DEFUN REDEFINITION-WITH-DEFUN: redefining MAXIMA::PLUSI in DEFUN REDEFINITION-WITH-DEFUN: redefining MAXIMA::MINUSI in DEFUN REDEFINITION-WITH-DEFUN: redefining MAXIMA::COVI in DEFUN REDEFINITION-WITH-DEFUN: redefining MAXIMA::CONTI in DEFUN REDEFINITION-WITH-DEFUN: redefining MAXIMA::DERI in DEFUN REDEFINITION-WITH-DEFUN: redefining MAXIMA::NAME in DEFUN REDEFINITION-WITH-DEFUN: redefining MAXIMA::$COVI-IMPL in DEFUN REDEFINITION-WITH-DEFUN: redefining MAXIMA::$COVI in DEFUN REDEFINITION-WITH-DEFUN: redefining MAXIMA::$CONTI-IMPL in DEFUN REDEFINITION-WITH-D[...]Simplification UtilitiesThese function simplify expressions coming from the (symbolic) simultaneous diagonalization, by letting Maxima realize that $n \cdot n =1$./* ------------------------------------------------------------------------- */ /* Simplification for expressions stemming from hyperbolic systems */ /* ------------------------------------------------------------------------- */ hypsimp(x):=ratsimp(ratsubst(1,n.n,x))$ fullhypsimp(x):=hypsimp( ratsubst( last(n)^2, 1-sum(n[i]^2,i,1,length(n)-1), x) )$ /* ------------------------------------------------------------------------- */ /* diagonalize a given hyperbolic operator A */ /* ------------------------------------------------------------------------- */ hypdiagonalize(A):=block([evA, V, invV,D], evA:hypsimp(apply(append, eigenvectors(A)[2])), V:transpose(apply(matrix, evA)), invV:hypsimp(invert(V)), assert(hypsimp(V.invV)=ident(length(A))), D:hypsimp(invV.A.V), [V, D, invV])$Wave Equation/* redefine this to change dimensionality: */ n:[nx,ny]; dims:length(n); assume(c>0); if dims = 1 then n:[1];Define the flux jacobian for the first-order form of the wave-equation:$$\begin{align*}\partial_t u &= \nabla \cdot \boldsymbol v\\\partial_t \boldsymbol v &= \nabla u\end{align*}$$projected onto a line with normal `n`:esymmatrix(n, v, i,j):=ematrix(n,n,v,i,j)+ematrix(n,n,v,j,i); wave_A:sum(n[i]*esymmatrix(dims+1, -c, 1+i,1),i,1,dims);Find the eigenvalues of the flux Jacobian:[wave_V, wave_D, wave_invV]:hypdiagonalize(wave_A);Finding the Numerical FluxThis function sets up a system of Rankine-Hugoniot conditions across the flux fan and solves for the stagnation state:/* ------------------------------------------------------------------------- */ /* compute upwind flux for a given operator with eigenvalues evs, sorted * in ascending order. * Sign assumptions for all variables occuring in evs must be in place. */ /* ------------------------------------------------------------------------- */ hyp_upwind_flux(evs, D):=block([evvars, Dp, Dm, n, midstates, states, unknowns], evvars:listofvars(evs), add_evvars_suffix(suffix, x):=subst(makelist(v=concat(''v, suffix), v, evvars), x), evsm:add_evvars_suffix(m, evs), evsp:add_evvars_suffix(p, evs), Dm:add_evvars_suffix(m, D), Dp:add_evvars_suffix(p, D), midstates:makelist(makelist(concat(s,state,i), i, 1, length(D)), state, 1, length(evs)-1), states:append( [makelist(concat(sm, i), i, 1, length(D))], midstates, [makelist(concat(sp,i), i, 1, length(D))]), unknowns:apply(append, midstates), result:if member(0, evs) then block([biasedD, veceqns, eqns, soln], biasedD:makelist( if evs[i] = 0 then [Dp,Dm] else if evs[i] > 0 then [Dp,Dp] else [Dm,Dm], i, 1, length(evs)), veceqns:apply(append, makelist( -(if evs[i] > 0 then evsp[i] else evsm[i]) *(states[i+1]-states[i]) +(biasedD[i][1].states[i+1]-biasedD[i][2].states[i]), i,1,length(evs))), eqns:makelist(veceqns[i,1], i, 1, length(veceqns)), soln:solve(eqns, unknowns), assert(length(soln)=1), for i: 1 thru length(evs) do if evs[i] = 0 then return(Dp.subst(soln[1], midstates[i])) ) else block([straddle_idx, Dstates, veceqns, eqns, soln], straddle_idx:for i: 1 thru length(evs)-1 do if (evs[i] < 0) and (evs[i+1] > 0) then return(i), flux:makelist(concat(flux,i),i,1,length(D)), unknowns:append(unknowns, flux), Dstates:append( [Dm.first(states)], makelist( if i = straddle_idx then flux else if evs[i] > 0 then Dp.midstates[i] else Dm.midstates[i], i, 1, length(midstates)), [Dp.last(states)]), veceqns:apply(append, makelist( -(if evs[i] > 0 then evsp[i] else evsm[i]) *(states[i+1]-states[i]) +(Dstates[i+1]-Dstates[i]), i,1,length(evs))), eqns:makelist(veceqns[i,1], i, 1, length(veceqns)), print(covect(eqns)), soln:solve(eqns, unknowns), assert(length(soln)=1), subst(soln[1], flux) ), subst( append( makelist(concat(sm, i)=sm[i,1], i, 1, length(D)), makelist(concat(sp, i)=sp[i,1], i, 1, length(D)) ), result) )$Find an expression for the flux in characteristic variables.Note the `p` and `m` suffixes for the $+$ and $-$ sides of the interface.wave_eigenvalues:makelist(wave_D[i,i], i, 1, length(wave_D)); if member(0, wave_eigenvalues) then wave_sflux:hyp_upwind_flux([-c,0,c], wave_D) else wave_sflux:hyp_upwind_flux([-c,c], wave_D);solve: dependent equations eliminated: (1 6 8)Convert back to conserved variables:wave_wflux:ratsimp(wave_V.ev(wave_sflux, [sm=wave_sminw,sp=wave_spinw]));Maxwell's EquationsFirst, set up some parameter assumptions:assume(c>0); assume(mu>0); assume(epsilon>0); assume(epsinv>0); assume(muinv>0);Some helper functions for matrix creation:/* A hyperbolic system matrix resulting from a curl */ curlmat(coord):=genmatrix( lambda ([i,j], levi_civita([coord,j,i])), 3,3)$ vstack:append$ hstack(a,b):=transpose(append(transpose(a),transpose(b)))$ blockmat(a11,a12,a21,a22):=vstack(hstack(a11,a12),hstack(a21,a22))$ n:[nx,ny,nz];Next, write down the flux Jacobian on a line with normal `n`:max_submat(i):=blockmat( zeromatrix(3,3), -epsinv*curlmat(i), /* epsinv = 1/epsilon */ muinv*curlmat(i), /* muinv = 1/mu */ zeromatrix(3,3) )$ max_Asimp:sum(n[i]*max_submat(i),i,1,3); max_A:subst([epsinv=1/epsilon,muinv=1/mu], max_Asimp);Next, diagonalize to obtain the transformation to/from characteristic variables:max_invsubst(x):=subst([epsinv=1/epsilon, muinv=1/mu], x)$ [max_V, max_D, max_invV]:max_invsubst(hypdiagonalize(max_Asimp));Now find the flux in characteristic variables:max_Dinc:subst([1/(sqrt(epsilon)*sqrt(mu))=c], max_D); max_sflux:hyp_upwind_flux([-c,0,c], max_Dinc); /* FIXME: max_V should not depend on epsilon and mu, but it does For now, make cp and cm equal. */ max_sflux:subst( [cp=1/(sqrt(epsilon)*sqrt(mu)), cm=1/(sqrt(epsilon)*sqrt(mu))], max_sflux);solve: dependent equations eliminated: (1 2 11 16 15 12)And in conserved variables:max_Em:makelist(Em[i],i,1,3)$ max_Ep:makelist(Ep[i],i,1,3)$ max_Hm:makelist(Hm[i],i,1,3)$ max_Hp:makelist(Hp[i],i,1,3)$ max_wm:vstack(max_Em,max_Hm)$ max_wp:vstack(max_Ep,max_Hp)$ max_sminw:hypsimp(max_invV.max_wm)$ max_spinw:hypsimp(max_invV.max_wp)$ max_wflux:fullhypsimp(max_V.ev(max_sflux, [sm=max_sminw,sp=max_spinw])); max_stronglocalpart:max_A.max_wm; max_strongwflux:max_stronglocalpart-max_wflux;Check against value from [the literature](https://doi.org/10.1016/0010-4655(91)90199-U):max_Z:sqrt(mu/epsilon)$ max_Y:sqrt(epsilon/mu)$ max_knownstrongwflux:ratsimp(vstack( -1/(2*epsilon) *(crossprod(n,(max_Hm-max_Hp)-1/max_Z*crossprod(n,max_Em-max_Ep))), 1/(2*mu) *(crossprod(n,(max_Em-max_Ep)+1/max_Y*crossprod(n,max_Hm-max_Hp))) )); assert(norm_2_squared(hypsimp( (max_strongwflux) -max_knownstrongwflux))=0);Mean ShiftLet’s implement Mean Shift from scratch. First, we’ll have to define a Mean Shift object.import numpy as np from utils.kernels import RBF from utils.distances import euclidean class MeanShift: def __init__(self, bandwidth=1, tol=1E-7): self.bandwidth = bandwidth self.tol = 1 - tol self.kernel = RBF(gamma=self.bandwidth)The bandwidth parameter is there to parameterize the Radial Basis Function kernel. Now, let’s assumed that we have a trained model. This means that we have centers representing our clusters. Assigning a new point to a cluster comes down to assigning the point to its closest cluster. In this case we will use the Euclidean distance.def _compute_labels(self, X, centers): labels = [] for x in X: distances = np.array([euclidean(x, center) for center in centers]) label = np.argmin(distances) labels.append(label) _, labels = np.unique(labels, return_inverse=True) return np.array(labels, dtype=np.int) def predict(self, X): labels = self._compute_labels(X, self.cluster_centers_) return labelsNow, let’s look at how we can train our model. Given some data, we first start by creating a center for each point in the data. Then, until convergence, we shift and merge centers.def fit(self, X): for labels, centers in self._fit(X): self.labels_ = labels self.cluster_centers_ = centers return self def _fit(self, X): old_centers = np.array([]) new_centers = X labels = -np.ones(len(X)) # -1 represents an "orphan" while not self._has_converged(old_centers, new_centers): yield labels, new_centers old_centers = new_centers new_centers = [] for center in old_centers: shifted_center = self._shift(center, X) new_centers.append(shifted_center) new_centers = self._merge_centers(new_centers) labels = self._compute_labels(X, new_centers)An important function is the _shift function. To shift a center, we calculate the density values between the center and all points. Then, the new center is created by taking a weighted average of the data points. The difference in position between the old and new center is what is referred to as the __shift__.def _shift(self, x, X): densities = [self.kernel(x, x_) for x_ in X] shifted_center = np.average(X, weights=densities, axis=0) return shifted_centerSince all centers will eventually converge, some centers might need to be merged to speed up computation. Also, because of computer arithmetic, centers will rarely be exactly at the same position. Therefore, we redefine each center as the average of all centers that are within a certain high-density region around it. This way, we end up with identical centers, which we merge.def _merge_centers(self, centers): centers = np.unique(centers, axis=0) new_centers = [] for c in centers: distances = np.array([self.kernel(c, c_) for c_ in centers]) new_centers.append(np.mean(centers[distances > self.tol], axis=0)) centers = np.unique(new_centers, axis=0) return centersIn our case, we define convergence as the moment where the shifted centers are “close enough” to the old centers.def _has_converged(self, old, new): if len(old) == len(new): for i in range(len(new)): if self.kernel(old[i], new[i]) < 1.0: return False return True else: return FalseETL sandboxHere you can find all the ETL process than transforms a txt file of the Colombian Constitution into an index on a Elastic Search Server ESS. In case you don't have any ESS, or want to make local loads, you can use a docker image. Something to clarify, if you want to use this notebook to create json documents from .txt files, they must have the next hierarchy structure:```hierarchy = { 'TITULO' : 'headline', 'DISPOSICIONES' : 'headline', 'CAPITULO' : 'chapter', 'ARTÍCULO' : 'article'}```An important consideration to have in mind, is that here, this order represents the head level of each clasification. In other words, headline is an _"h1"_ on html, chapter an _"h2"_ and article would be a _"p"_. Importing libraries and tools.About the libraries and needed to run the main project, as the convensions says, are listed on the requirements.txt file. Here the only library that is not installed on python's kernel is [tabulate](https://pypi.org/project/tabulate/). To install it you can use ```pip install tabulate```or```pip3 install tabulate```About support.!pip3 install tabulate from classifier import * from os import path from support import * from tabulate import tabulate import requests import jsonExtracting the data.A simple use of the path kernel tools. Having this here, it won't matter that the root path has change from computer, the file with the constitution will be loaded to our program with no inconvenience.Another consideration taking place here, is the removal of the Main title of the text: "Constitucion Politica de Colombia", because it don't give any relevance but redundancy.root_folder = "LegalSearcher/ReadFiles" constitution_f = f'../ReadFiles/constitucion_colombiana.txt' filepath = path.abspath(constitution_f) with open(filepath, 'r') as f: or_text = f.readlines() f.close() print('Original len:', len(or_text)) text = [] for line in or_text: if line != '\n': if line != ' \n': text.append(line) # Drop the title "Constitucion Politica de Colombia" text.pop(0) print('Total elements:', len(text), '\n----------------------') # Caso en particular text[:5]Original len: 2742 Total elements: 1353 ----------------------Here it takes place the first metric: the original len of the elements in file. What is at the sigth, the headlines of Titles does not use a final period before ending the line. Considerations for EDA.In order to give support and remove the noises in data for EDA process, the paragraph of the text will be splited by periods "." and dot-comma ";". This is decided in the search of the best granularity for the embeddings model. Looking trough the metrics, the split is made by punctuation mark.dot_text = split_text_in_lines(text, delimiter=".") print('Total elements:', len(dot_text), '\n----------------------') dot_text[:5]Total elements: 3593 ----------------------And here jumps a noise. Yes, the function was made to consider this issues of the line break ("\n"). Having it before helps in Frontend at the process of renderizing, but here it brings just noice. In order to remove it, and adjusting the sentences by removing the space at the begging, result of the splitter, the next process will be executed: 1) Remove de line breaks "\n.". 2) Remove the space at the begging. 3) Remove the line breaks ('\n') on headlines and chapters. 4) Print the true len of the dot_text. 5) Generate the dcomma_text, and extract the len of it.# dot_text = [ element for element in dot_text if element != '\n.'] # dot_text = [ element[1:] if element[0] == ' ' else element for element in dot_text] # Remembering the original len print('Original elements in dot_text:', len(dot_text), '\n----------------------') ndot_text = [] index = 0 for line in dot_text: # Step 1: Remove de line breaks "\n.". if line == '\n.': pass elif len(line) == 0: pass # Step 2:Remove the space at the begging. elif line[0] == ' ': ndot_text.append(line[1:]) # Step 3:Remove the \n for headlines and chapters elif line[-1:] == '\n': # Some elements are just line breaks, and adding the condition into # an "AND" on the if doesn't remove it if len(line) > 2: ndot_text.append(line.rstrip("\n")) # Nothing to change else: ndot_text.append(line) index += 1 # Step 3: Print the true len of the dot_text. print('Total elements:', len(ndot_text), '\n----------------------') index = 0 for line in ndot_text: # Step 1: Remove de line breaks "\n.". if line == '\n.': ndot_text.pop(index) index +=1 # Step 4: Generate the dcomma_text, and extract the len of it. dcomma_text = split_text_in_lines(ndot_text, delimiter=";") print('Total elements:', len(dcomma_text), '\n----------------------')Original elements in dot_text: 3593 ---------------------- Total elements: 2458 ---------------------- Total elements: 2550 ----------------------The problem between Frontend, Elastic Search and Data Science.The problem here, is that this data will give to the frontend more work to do in order to know which element join and which add a line breaker. This problem shows the reason of not deleting the original text list. Transforming the articles into python dictionaries.For this process, the classifier filters through the hierarchy dictionary shown at the beggining. Fortunately, the constitution could be watched as an semi-structured data base if you make a fast check.The logic of this is that every headline will start with _**"TITULO"**_, except for the last one that is _**"DISPOSICIONES"**_. On the other side, every chapter and article starts with the words _**"CAPITULO"**_ and _**"Articulo"**_. Considereing this, every other word (or digit of an ordered list) at the beggining of any paragraph means that it is on the last article mentioned.```hierarchy = { 'TITULO' : 'headline', 'DISPOSICIONES' : 'headline', 'CAPITULO' : 'chapter', 'ARTÍCULO' : 'article'}```const_info = { 'id': "constitucion", 'source_name': "Constitución Política de Colombia", } art_list = articles_info(const_info, text, debugging=False) print('total articles = ', len(art_list))total articles = 440Overview of the articles dictionary.Every article dictionary has the next structure:art_list[78]Because of this structure, every component of the article can be consulted following python's methods of key-value. The variable "first" is referes to the first article that wants to be checked.If last_pl has fist on it, this will return only the article numbered at firstart = 12 print(f'Article No {art+1}: \n', "id: ", art_list[art]['id'],"\n", #"lexical_diversity: ", art_list[art]['article']['lexical_diversity'],"\n", art_list[art]['headline']['title'],art_list[art]['headline']['name'],"\n", art_list[art]['chapter']['title'],art_list[art]['chapter']['name'],"\n", art_list[art]['article']['name'],art_list[art]['article']['content']) print('\n--------------------------------------\n')Article No 13: id: constitucion00000201000013 DISPOSICIONES TRANSITORIAS DE LA REFORMA DE LA CONSTITUCION CAPITULO 5 DE LOS DEBERES Y OBLIGACIONES Artículo 13. ['Artículo 13. Todas las personas nacen libres e iguales ante la ley, recibirán la misma protección y trato de las autoridades y gozarán de los mismos derechos, libertades y oportunidades sin ninguna discriminación por razones de sexo, raza, origen nacional o familiar, lengua, religión, opinión política o filosófica.\n', 'El Estado promoverá las condiciones para que la igualdad sea real y efectiva y adoptara medidas en favor de grupos discriminados o marginados.\n', 'El Estado protegerá especialmente a aquellas personas que por su condición económica, física o mental, se encuentren en circunstancia de debilidad manifiesta y sancionará los abusos o maltratos que contra ellas se cometan.\n'] --------------------------------------In case you want to explore more about the articles and check by your self, here you can use the benefit of having all the dictionaries on a list. Select your range of interest by changing the first and last article of interest. Remember that list starts at 0, so remove 1 if you have an specific article on mind.first_art = 1 last_art = 3 for n in range(len( art_list[first_art : last_art] )): article = art_list[n] art_number = n + 1 print_article(art_number, article)Article No 2: id: constitucion00000100000001 DISPOSICIONES TRANSITORIAS DE LA REFORMA DE LA CONSTITUCION None None Artículo 1. ['Artículo 1. Colombia es un Estado social de derecho, organizado en forma de República unitaria, descentralizada, con autonomía de sus entidades territoriales, democrática, participativa y pluralista, fundada en el respeto de la dignidad humana, en el trabajo y la solidaridad de las personas que la integran y en la prevalencia del interés general.\n'] -------------------------------------- Article No 3: id: constitucion00000100000002 DISPOSICIONES TRANSITORIAS DE LA REFORMA DE LA CONSTITUCION None None Artículo 2. ['Artículo 2. Son fines esenciales del Estado: servir a la comunidad, promover la prosperidad general y garantizar la efectividad de los principios, derechos y deberes consagrados en la Constitución; facilitar la participación de todos en las decisiones que los afectan y en la vida económica, política, administrativa y cultura[...]Here results notorious the advantage to Frontend of mantain the libe breaks. There is no problem rendering the text. But what about Data Science? How Elastic Search will know where to check with a embedding model? Adding the embeding model to the articles dictionaryUsing the same tool, the embedding model will be made with the splitted text dcomma_textembed_list = articles_info(const_info, text, debugging=False) print('total articles = ', len(embed_list)) dcomma_text[143:145]Using the zipping tool, adding the embedding to a new key in the articles dictionary will result easy.for embed, article in zip(embed_list, art_list): article['dot_comma_sep'] = embed['article']['content'] art_list[78]Elastic Search format.In case that there is something that needs to be removed, like the lexical_diversity, you will have to edit the original function of **format_articles**. This because, this function is designed _ad hoc_, and other considerations will be time invested on something that won't increment the efficiency but presentation.levels = { 'book','part', 'headline', 'chapter', 'section', 'article' } json_list = format_articles(art_list, headers_dict=levels, debugging=False)Load and storage of data.By the time this notebook is written (not finished yet according to plan), the json file that will contain all the articles transformed is storaged on the same folder. This for nothing more than cosiness of checking only the files on the same folder and no opening any other tab.dict_json = json.dumps(json_list, ensure_ascii=False) root_folder = "LegalSearcher/ReadFiles/Embeddings" embedding_f = f'../ReadFiles/Embeddings/constitucion-embedding.json' filepath = path.abspath(embedding_f) file = open(embedding_f, "w") file.write(dict_json) file.close()Quick check of the articles stateHere is observed that the lexical_diversity is removed and that objects that every cathegory has.article = json_list[1] for key, value in article.items(): print(key, ':', value)index : constitucion legal_source : Constitución Política de Colombia id : constitucion00000100000002 headline : {'title': 'DISPOSICIONES TRANSITORIAS\n', 'name': 'DE LA REFORMA DE LA CONSTITUCION\n'} section : {'title': None, 'name': None} book : {'title': None, 'name': None} chapter : {'title': None, 'name': None} part : {'title': None, 'name': None} article : {'name': 'Artículo 2.', 'content': ['Artículo 2. Son fines esenciales del Estado: servir a la comunidad, promover la prosperidad general y garantizar la efectividad de los principios, derechos y deberes consagrados en la Constitución; facilitar la participación de todos en las decisiones que los afectan y en la vida económica, política, administrativa y cultural de la Nación; defender la independencia nacional, mantener la integridad territorial y asegurar la convivencia pacifica y la vigencia de un orden justo.\n', 'Las autoridades de la República están instituidas para proteger a todas las personas residentes en Colombia, en [...]Load of articles to localhostHere it will be used the PUT method, in order to make the index in Elastic Search the same as in our Data Base. For it, the local_url variable would be a formated string, where the article id will be added for every iteration.In order to watch the result of every _"put request"_, log_info will be created in order to be used with the **tabulate** tool. In case there is no error, that column will be empty.log_info = {'id': None, 'status': None, 'error': None, 'message': None, }Something to consider is the fact that a succesfull load of an article, will not return a status code of 200, instead, it will return a success document with all the data of the new object created or overwrited.for article in json_list: es_article_url = f"http://localhost:9200/test_all/_doc/{article['id']}" request_response = requests.put(es_article_url, json=article) log_info = add_to_log(log_info, request_response, article) # print(tabulate(log_info, headers='keys'))Query test to Elastic SearchIn order to confirm the state of our info, here it will be made an Elastic Search query. The request will be past on a python dict trough json attribute. The query word in this case will be "Colombia", being the most obvious word to appear on the Colombian Constitution.local_test = "http://localhost:9200/test_all/_search" query_test = { "query": { "simple_query_string": { "query": "Constitucion" } } } query_test = requests.get(local_test, json=query_test)Because the query text is a little "dirty", it will be share at the end as a commented line, in case you desire to look at the content, just uncoment the last cell. A cleaner view, is to call the relevant data for now, like it will be the number of articles that match with the query, the max score, and the best article rated.result = json.loads(query_test.text) print(result['hits']['total']['value']) print(result['hits']['max_score']) best_rated = result['hits']['hits'][0] best_rated # query_test.textGeneral Insructions MIDS Machine Learning at Scale MidTerm exam, Week 8, Spring, 2016Exam location is at: https://www.dropbox.com/s/jdkkttnwd88uxkl/MIDS-MLS-MidTerm-2016-Spring-Live.txt?dl=0===Instructions for midterm===Instructions:1: Please acknowledge receipt of exam by sending a quick reply to the instructors2: Review the submission form first to scope it out (it will take a 5-10 minutes to input your answers and other information into this form)3: Please keep all your work and responses in ONE (1) notebook only (and submit via the form)4: Please make sure that the NBViewer link for your Submission notebook works5: Please do NOT discuss this exam with anyone (including your class mates) until after 8AM (West coast time) Friday, March 4, 2016 Please use your live session time from week 8 to complete this mid term (plus an additional 30 minutes if you need it). This is an open book exam meaning you can consult webpages and textbooks (but not each other or other people). Please complete this exam by yourself.Please submit your solutions and notebook via the following form: http://goo.gl/forms/ggNYfRXz0t===Exam durations (All times are in California Time)===Live Session Group 4 4:00 PM - 6:00 PM (Tuesday)Live Session Group 2 4:00 PM - 6:00 PM (Wednesday)Live Session Group 3 6:30 PM - 8:30 PM (Wednesday)=====Exam questions begins here========Map-Reduce===MT0. Which of the following statememts about map-reduce are true? Check all that apply.(a) If you only have 1 computer with 1 computing core, then map-reduce is unlikely to help(b) If we run map-reduce using N computers, then we will always get at least an N-Fold speedup compared to using 1 computer(c) Because of network latency and other overhead associated with map-reduce, if we run map-reduce using N computers, then we will get less than N-Fold speedup compared to using 1 computer(d) When using map-reduce with gradient descent, we usually use a single machine that accumulates the gradients from each of the map-reduce machines, in order to compute the paramter update for the iterion===Order inversion===MT1. Suppose you wish to write a MapReduce job that creates normalized word co-occurrence data form a large input text. To ensure that all (potentially many) reducersreceive appropriate normalization factors (denominators)in the correct order in their input streams(so as to minimize memory overhead), the mapper should emit according to which pattern:(a) emit (*,word) count(b) There is no need to use order inversion here(c) emit (word,*) count(d) None of the above===Apriori principle===MT2. When searching for frequent itemsets with the Apriori algorithm(using a threshold, N), the Apriori principle allows us to avoid tracking the occurrences of the itemset {A,B,C} provided(a) all subsets of {A,B,C} occur less than N times.(b) any pair of {A,B,C} occurs less than N times.(c) any subset of {A,B,C} occurs less than N times.(d) All of the above===Bayesian document classification===MT3. When building a Bayesian document classifier, Laplace smoothing serves what purpose?(a) It allows you to use your training data as your validation data.(b) It prevents zero-products in the posterior distribution.(c) It accounts for words that were missed by regular expressions. (d) None of the above===Bias-variance tradeoff===MT4. By increasing the complexity of a model regressed on some samples of data,it is likely that the ensemble will exhibit which of the following?(a) Increased variance and bias(b) Increased variance and decreased bias(c) Decreased variance and bias(d) Decreased variance and increased bias===Combiners===MT5. Combiners can be integral to the successful utilization of the Hadoop shuffle.This utility is as a result of(a) minimization of reducer workload(b) both (a) and (c)(c) minimization of network traffic(d) none of the above===Pairwise similarity using K-L divergence===In probability theory and information theory, the Kullback–Leibler divergence (also information divergence, information gain, relative entropy, KLIC, or KL divergence) is a non-symmetric measure of the difference between two probability distributions P and Q. Specifically, the Kullback–Leibler divergence of Q from P, denoted DKL(P‖Q), is a measure of the information lost when Q is used to approximate P:For discrete probability distributions P and Q, the Kullback–Leibler divergence of Q from P is defined to beKLDistance(P, Q) = Sum over i (P(i) log (P(i) / Q(i)) In the extreme cases, the KL Divergence is 1 when P and Q are maximally differentand is 0 when the two distributions are exactly the same (follow the same distribution).For more information on K-L Divergence see:https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergenceFor the next three question we will use an MRjob class for calculating pairwise similarity using K-L Divergence as the similarity measure:Job 1: create inverted index (assume just two objects)Job 2: calculate/accumulate the similarity of each pair of objects using K-L DivergenceDownload the following notebook and then fill in the code for the first reducer to calculate the K-L divergence of objects (letter documents) in line1 and line2, i.e., KLD(Line1||line2).Here we ignore characters which are not alphabetical. And all alphabetical characters are lower-cased in the first mapper.http://nbviewer.ipython.org/urls/dl.dropbox.com/s/9onx4c2dujtkgd7/Kullback%E2%80%93Leibler%20divergence-MIDS-Midterm.ipynbhttps://www.dropbox.com/s/zr9xfhwakrxz9hc/Kullback%E2%80%93Leibler%20divergence-MIDS-Midterm.ipynb?dl=0Questions:MT6. Which number below is the closest to the result you get for KLD(Line1||line2)?(a) 0.7(b) 0.5(c) 0.2(d) 0.1MT7. Which of the following letters are missing from these character vectors?(a) p and t(b) k and q(c) j and q(d) j and fMT8. The KL divergence on multinomials is defined only when they have nonzero entries. For zero entries, we have to smooth distributions. Suppose we smooth in this way: (ni+1)/(n+24)where ni is the count for letter i and n is the total count of all letters. After smoothing, which number below is the closest to the result you get for KLD(Line1||line2)??(a) 0.08(b) 0.71(c) 0.02(d) 0.11===Gradient descent===MT9. Which of the following are true statements with respect to gradient descent for machine learning, where alpha is the learning rate. Select all that apply(a) To make gradient descent converge, we must slowly decrease alpha over time and use a combiner in the context of Hadoop.(b) Gradient descent is guaranteed to find the global minimum for any function J() regardless of using a combiner or not in the context of Hadoop(c) Gradient descent can converge even if alpha is kept fixed. (But alpha cannot be too large, or else it may fail to converge.) Combiners will help speed up the process.(d) For the specific choice of cost function J() used in linear regression, there is no local optima (other than the global optimum).===Weighted K-means===Write a MapReduce job in MRJob to do the training at scale of a weighted K-means algorithm.You can write your own code or you can use most of the code from the following notebook:http://nbviewer.ipython.org/urls/dl.dropbox.com/s/kjtdyi10nwmk4ko/MrJobKmeans-MIDS-Midterm.ipynbhttps://www.dropbox.com/s/kjtdyi10nwmk4ko/MrJobKmeans-MIDS-Midterm.ipynb?dl=0Weight each example as follows using the inverse vector length (Euclidean norm): weight(X)= 1/||X||, where ||X|| = SQRT(X.X)= SQRT(X1^2 + X2^2)Here X is vector made up of X1 and X2.Using the following data answer the following questions:https://www.dropbox.com/s/ai1uc3q2ucverly/Kmeandata.csv?dl=0Questions:MT10. Which result below is the closest to the centroids you got after running your weighted K-means code for 10 iterations?(a) (-4.0,0.0), (4.0,0.0), (6.0,6.0)(b) (-4.5,0.0), (4.5,0.0), (0.0,4.5)(c) (-5.5,0.0), (0.0,0.0), (3.0,3.0)(d) (-4.5,0.0), (-4.0,0.0), (0.0,4.5)MT11. Using the result of the previous question, which number below is the closest to the average weighted distance between each example and its assigned (closest) centroid?The average weighted distance is defined as sum over i (weighted_distance_i) / sum over i (weight_i)(a) 2.5(b) 1.5(c) 0.5(d) 4.0MT12. Which of the following statements are true? Select all that apply. a) Since K-Means is an unsupervised learning algorithm, it cannot overfit the data, and thus it is always better to have as large a number of clusters as is computationally feasible. b) The standard way of initializing K-means is setting μ1=⋯=μk to be equal to a vector of zeros. c) For some datasets, the "right" or "correct" value of K (the number of clusters) can be ambiguous, and hard even for a human expert looking carefully at the data to decide. d) A good way to initialize K-means is to select K (distinct) examples from the training set and set the cluster centroids equal to these selected examples. ===Map-Reduce===MT0. Which of the following statememts about map-reduce are true? Check all that apply.(a) If you only have 1 computer with 1 computing core, then map-reduce is unlikely to help (b) If we run map-reduce using N computers, then we will always get at least an N-Fold speedup compared to using 1 computer (c) Because of network latency and other overhead associated with map-reduce, if we run map-reduce using N computers, then we will get less than N-Fold speedup compared to using 1 computer (d) When using map-reduce with gradient descent, we usually use a single machine that accumulates the gradients from each of the map-reduce machines, in order to compute the paramter update for the iterion Answers: a, c, d ===Order inversion===MT1. Suppose you wish to write a MapReduce job that creates normalized word co-occurrence data form a large input text. To ensure that all (potentially many) reducers receive appropriate normalization factors (denominators) in the correct order in their input streams (so as to minimize memory overhead), the mapper should emit according to which pattern:(a) emit (,word) count (b) There is no need to use order inversion here (c) emit (word,) count (d) None of the above Answers: c ===Apriori principle===MT2. When searching for frequent itemsets with the Apriori algorithm (using a threshold, N), the Apriori principle allows us to avoid tracking the occurrences of the itemset {A,B,C} provided(a) all subsets of {A,B,C} occur less than N times. (b) any pair of {A,B,C} occurs less than N times. (c) any subset of {A,B,C} occurs less than N times. (d) All of the above Answers : d ===Bayesian document classification===MT3. When building a Bayesian document classifier, Laplace smoothing serves what purpose?(a) It allows you to use your training data as your validation data. (b) It prevents zero-products in the posterior distribution. (c) It accounts for words that were missed by regular expressions. (d) None of the above Answers: b , c ===Bias-variance tradeoff===MT4. By increasing the complexity of a model regressed on some samples of data, it is likely that the ensemble will exhibit which of the following?(a) Increased variance and bias (b) Increased variance and decreased bias (c) Decreased variance and bias (d) Decreased variance and increased bias Answers: b ===Combiners===MT5. Combiners can be integral to the successful utilization of the Hadoop shuffle. This utility is as a result of(a) minimization of reducer workload (b) both (a) and (c) (c) minimization of network traffic (d) none of the above Answers: b ===Pairwise similarity using K-L divergence===In probability theory and information theory, the Kullback–Leibler divergence (also information divergence, information gain, relative entropy, KLIC, or KL divergence) is a non-symmetric measure of the difference between two probability distributions P and Q. Specifically, the Kullback–Leibler divergence of Q from P, denoted DKL(P‖Q), is a measure of the information lost when Q is used to approximate P:For discrete probability distributions P and Q, the Kullback–Leibler divergence of Q from P is defined to beKLDistance(P, Q) = Sum over i (P(i) log (P(i) / Q(i))In the extreme cases, the KL Divergence is 1 when P and Q are maximally different and is 0 when the two distributions are exactly the same (follow the same distribution).For more information on K-L Divergence see:https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergenceFor the next three question we will use an MRjob class for calculating pairwise similarity using K-L Divergence as the similarity measure:Job 1: create inverted index (assume just two objects) Job 2: calculate/accumulate the similarity of each pair of objects using K-L DivergenceDownload the following notebook and then fill in the code for the first reducer to calculate the K-L divergence of objects (letter documents) in line1 and line2, i.e., KLD(Line1||line2).Here we ignore characters which are not alphabetical. And all alphabetical characters are lower-cased in the first mapper.http://nbviewer.ipython.org/urls/dl.dropbox.com/s/9onx4c2dujtkgd7/Kullback%E2%80%93Leibler%20divergence-MIDS-Midterm.ipynb https://www.dropbox.com/s/zr9xfhwakrxz9hc/Kullback%E2%80%93Leibler%20divergence-MIDS-Midterm.ipynb?dl=0 Using the MRJob Class below calculate the KL divergence of the following two objects.%%writefile kltext.txt 1.Data Science is an interdisciplinary field about processes and systems to extract knowledge or insights from large volumes of data in various forms (data in various forms, data in various forms, data in various forms), either structured or unstructured,[1][2] which is a continuation of some of the data analysis fields such as statistics, data mining and predictive analytics, as well as Knowledge Discovery in Databases. 2.Machine learning is a subfield of computer science[1] that evolved from the study of pattern recognition and computational learning theory in artificial intelligence.[1] Machine learning explores the study and construction of algorithms that can learn from and make predictions on data.[2] Such algorithms operate by building a model from example inputs in order to make data-driven predictions or decisions,[3]:2 rather than following strictly static program instructions.Writing kltext.txtMRjob class for calculating pairwise similarity using K-L Divergence as the similarity measureJob 1: create inverted index (assume just two objects) Job 2: calculate the similarity of each pair of objectsimport numpy as np np.log(3) %%writefile kldivergence.py from mrjob.job import MRJob import re import numpy as np import pandas as pd import math class kldivergence(MRJob): def mapper1(self, _, line): index = int(line.split('.',1)[0]) letter_list = re.sub(r"[^A-Za-z]+", '', line).lower() count = {} for l in letter_list: if count.has_key(l): count[l] += 1 else: count[l] = 1 for key in count: yield key, [index, count[key]*1.0/len(letter_list)] def reducer1(self, key, values): doc_list ={} for doc,prob in values: doc_list[doc]=prob p = doc_list[1] ## Probability for key in document 1 q = doc_list[2] ## Probability for key in document 2 kl = p * math.log(p/q) ## KL Divergence yield key, kl def reducer2(self, key, values): kl_sum = 0 for value in values: kl_sum = kl_sum + value yield None, kl_sum def steps(self): return [self.mr(mapper=self.mapper1, reducer=self.reducer1), self.mr(reducer=self.reducer2)] if __name__ == '__main__': kldivergence.run() from kldivergence import kldivergence mr_job = kldivergence(args=['kltext.txt']) with mr_job.make_runner() as runner: runner.run() # stream_output: get access of the output for line in runner.stream_output(): print mr_job.parse_output_line(line)WARNING:mrjob.runner: WARNING:mrjob.runner:PLEASE NOTE: Starting in mrjob v0.5.0, protocols will be strict by default. It's recommended you run your job with --strict-protocols or set up mrjob.conf as described at https://pythonhosted.org/mrjob/whats-new.html#ready-for-strict-protocols WARNING:mrjob.runner: WARNING:mrjob.job:mr() is deprecated and will be removed in v0.6.0. Use mrjob.step.MRStep directly instead. WARNING:mrjob.job:mr() is deprecated and will be removed in v0.6.0. Use mrjob.step.MRStep directly instead. WARNING:mrjob.job:mr() is deprecated and will be removed in v0.6.0. Use mrjob.step.MRStep directly instead. WARNING:mrjob.job:mr() is deprecated and will be removed in v0.6.0. Use mrjob.step.MRStep directly instead. WARNING:mrjob.job:mr() is deprecated and will be removed in v0.6.0. Use mrjob.step.MRStep directly instead. WARNING:mrjob.job:mr() is deprecated and will be removed in v0.6.0. Use mrjob.step.MRStep directly instead. WARNING:mrjob.job:mr() is deprecated and wi[...]This example features the segmentation of brightfield images using convolutional neural networks. The `cnn` module offers functions to generate neural networks (`models`) which can be trained to segment images. The architecture of the model is an implementation of the U-Net [1] architecture. This network extracts features from images through a series of convolutional layers, after which these features are used to form the final segmented image through a series of upsampling layers. To get started with segmentation, a training data set is needed consisting of already segmented images. In our experience we found that around 400 images is sufficient, although fewer images may also give a satisfactory result. A simple tool to manually segment images can be found in the `gui` module of `ColiCoords`. (Example usage script can be found __[here](https://github.com/Jhsmit/ColiCoords-Paper/blob/master/figures/Figure_6/gui_annotate.py)__)We have used a midrange graphics card (Nvidia GeForce GTX 1060 6GB) for training the network. Due to memory limitations the images where scaled down from 512x512 to 256x256. This might not be nessecary if more GPU memory is available.[1] , , , U-Net: Convolutional Networks for Biomedical Image Segmentation __[arXiv:1505.04597](https://arxiv.org/abs/1505.04597)__ Binary images are loaded and resize to 256x256 to save video memory:# Modify these lines to import the training data brightfield and binary brightfield = tifffile.imread('data/02/brightfield_stack.tif') binary = tifffile.imread(r'data/02/binary_stack.tif') brightfield.shape, binary.shape brightfield_resized = resize_stack(brightfield, 0.5) binary_resized = resize_stack((binary > 0).astype(int), 0.5, img_type= 'binary') fig, axes = iter_subplots(1, 2, figsize=(6, 3)) axes[0].iter_imshow(brightfield_resized, cmap='gray') axes[1].iter_imshow(binary_resized, cmap='gray_r') fig.display() plt.tight_layout()The brightfield and binary data are used to initalize an `DefaultImgSequence` object. This is a subclass of `Sequence` from `keras` which acts as a `generator` to generate more images from the input training data. The amount of data is increased 8-fold though a series of rotations and reflections, as they also are valid training images. This is done to create more training data to prevent over-fitting. The `DefaultImgSequence` object also normalized the images in real-time though tanh (Hampel) normalization.Over-fitting is a frequently occuring problem in training neural networks. If the network is trained on a too low number of images, it is possible for the network to 'memorize' images instead of recognizing patterns. Therefore, the training data is split into two sets; training (in-sample) and validation (out-of-sample) data. After each epoch of training the model is checked against the validation data. During training, an increased validation loss while in-sample loss is decreasing could be due to over-fitting.isq = DefaultImgSequence(brightfield_resized, binary_resized) vsq, tsq = isq.val_split(1/8., random=True)Warning, index out of bounds, set to last elementThe model is initialized by calling `get_unet_256`. The input shape is (width, height, channels). Other shapes are available in `cnn.unet` and custom shapes can be supplied as keyword argument.model = get_unet_256(input_shape=(256, 256, 1))Pretrained model weights will be made available shortly and can be loaded as:model.load_weights('PATH TO MODEL WEIGHTS.h5')A checkpoint is created to save the model's weights every time the validation loss decreases.checkpoint = ModelCheckpoint('model_example', monitor='val_loss', save_weights_only=True, save_best_only=True, verbose=1, mode='min')The last step is to start training the model! Depending on the weights used to initialize the model, the number of input images and the hardware used this can take anywhere from minutes to hours. It is recommended to start out with a small number of epochs (full iteration of all data) to see how the validation loss (val_loss) progresses.model.fit_generator(train_sq, steps_per_epoch=len(train_sq), epochs=5 validation_data=val_sq,callbacks=[checkpoint])Epoch 1/1 208/240 [=========================>....] - ETA: 34:33 - loss: 0.1466 - acc: 0.95 - ETA: 18:12 - loss: 0.1779 - acc: 0.94 - ETA: 12:45 - loss: 0.1378 - acc: 0.95 - ETA: 10:01 - loss: 0.1141 - acc: 0.96 - ETA: 8:22 - loss: 0.0971 - acc: 0.9727 - ETA: 7:16 - loss: 0.2085 - acc: 0.960 - ETA: 6:29 - loss: 0.3122 - acc: 0.951 - ETA: 5:54 - loss: 0.2795 - acc: 0.956 - ETA: 5:26 - loss: 0.2532 - acc: 0.960 - ETA: 5:04 - loss: 0.2307 - acc: 0.963 - ETA: 4:45 - loss: 0.2132 - acc: 0.966 - ETA: 4:30 - loss: 0.2004 - acc: 0.968 - ETA: 4:17 - loss: 0.1889 - acc: 0.970 - ETA: 4:06 - loss: 0.1798 - acc: 0.971 - ETA: 3:56 - loss: 0.1958 - acc: 0.967 - ETA: 3:47 - loss: 0.2316 - acc: 0.961 - ETA: 3:40 - loss: 0.2205 - acc: 0.963 - ETA: 3:33 - loss: 0.2117 - acc: 0.965 - ETA: 3:26 - loss: 0.2055 - acc: 0.965 - ETA: 3:21 - loss: 0.1973 - acc: 0.966 - ETA: 3:16 - loss: 0.1892 - acc: 0.967 - ETA: 3:11 - loss: 0.1834 - acc: 0.968 - ETA: 3:07 - loss: 0.1776 - acc: 0.969 - ETA: 3:02 - loss: 0.1727 -[...]Muchos servidores web, al recibir múltiples peticiones en un corto tiempo de una misma IP, la bloquean para evitar saturaciones y problemas de servicio. Esto puede ser un problema para los scrapers ya que generan justamente este comportamiento.Para evitar ser detectados tendríamos que cambiar nuestra dirección IP pública antes de cada request, cosa que sería extremadamente lento y en muchos casos imposible, o podemos utilizar un **proxy**. Un proxy es un intermediario entre quien hace la petición (nuestro programa) y quien la recibe (el servidor) que nos permite enmascarar la IP de donde salió la request. Utilizando un proxy, el servidor web verá la IP de ese proxy y no la nuestra. Si bien no podemos elegir con qué dirección IP hacer la petición, sí podemos elegir a través de qué proxy hacerla.El sitio www.cualesmiip.com te permite ver cuál es la IP saliente de tu red. Si estás en una LAN, seguramente tu IP local sea algo como 192.18.x.x, pero la IP con la que salís al mundo, la IP de tu router asignada por tu ISP, será diferente. Armemos una función que averigue la IP que ve este sitio:def get_my_ip(url='http://www.cualesmiip.com/', proxies=None): ########################### # DESPUES ESTO ########################### if proxies: if url.startswith('https'): if 'https' in proxies.keys(): print('Utilizando proxy', proxies['https']) else: print('No hay proxy https configurado') elif url.startswith('http:'): if 'http' in proxies.keys(): print('Utilizando proxy', proxies['http']) else: print('No hay proxy http configurado') else: print('URL incorrecta') return None else: print('No hay proxy configurado') ########################### # PRIMERO ESTO ########################### # Hacemos la request al sitio try: resp = requests.get(url, proxies=proxies) except Exception as e: print('Error haciendo la request.', e) return None # Verificamos el status_code if resp.status_code != 200: print('Error de status. Status code:', resp.status_code) # Compilamos la expresión regular con el patrón para la IP regex = re.compile(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})') # Extraemos los strings que matcheen el patrón my_ip = regex.findall(resp.text) # y devolvemos el primero de ellos si es que lo hay return my_ip[0] if my_ip is not None else None get_my_ip()No hay proxy configuradoEsta es la IP que ve el sitio.Ahora utilicemos un PROXY para hacer la request. Debemos crear un diccionario e indicar la IP del proxy para cada protocolo. Podemos obtener una lista de proxies del sitio https://free-proxy-list.net/ o https://hidemy.name/es/proxy-list/# Actualizar ip del proxy proxy_dict = {'http':'http://172.16.58.3:3128', 'https':'https://172.16.58.3:3128'}Modifiquemos la función para que utilice e indique el proxy que está utilizandoget_my_ip(url='https://www.cualesmiip.com/', proxies=proxy_dict)Utilizando proxy https://172.16.58.3:3128 Error haciendo la request. HTTPSConnectionPool(host='www.cualesmiip.com', port=443): Max retries exceeded with url: / (Caused by ProxyError('Cannot connect to proxy.', NewConnectionError(': Failed to establish a new connection: [Errno 110] Connection timed out',)))Vemos que la IP que reconoce el sitio cambió y coincide con la IP del proxy que estamos usando. Otro tipo de proxy que podemos utilizar es SOCKS. El proxy HTTP solamente funciona para ese protocolo, mientras que el proxy SOCKS funciona a más bajo nivel (TCP) y lo podemos utilizar para cualquier tipo de tráfico ya sean páginas web, programas, torrents, etc.# Actualizar ip del proxy proxy_dict_socks = {'http':'socks4://192.168.3.11:50659', 'https':'socks4://192.168.3.11:50659'} get_my_ip(proxies=proxy_dict_socks)Utilizando proxy socks4://192.168.3.11:50659window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); `GiRaFFE_NRPy`: Main Driver, staggered prescription Author: **Notebook Status:** Validation in progress **Validation Notes:** This code assembles the various parts needed for GRFFE evolution in order. NRPy+ Source Code for this module:* [GiRaFFE_NRPy/GiRaFFE_NRPy_Main_Driver_staggered.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_Main_Driver_staggered.py) Other critical files (in alphabetical order): * [GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Afield_flux.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Afield_flux.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy_staggered-Afield_flux.ipynb) Generates the expressions to find the flux term of the induction equation.* [GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_A2B.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_A2B.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy_staggered-A2B.ipynb) Generates the driver to compute the magnetic field from the vector potential/* [GiRaFFE_NRPy/GiRaFFE_NRPy_BCs.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_BCs.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-BCs.ipynb) Generates the code to apply boundary conditions to the vector potential, scalar potential, and three-velocity.* [GiRaFFE_NRPy/GiRaFFE_NRPy_C2P_P2C.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_C2P_P2C.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-C2P_P2C.ipynb) Generates the conservative-to-primitive and primitive-to-conservative solvers.* [GiRaFFE_NRPy/GiRaFFE_NRPy_Metric_Face_Values.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_Metric_Face_Values.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-Metric_Face_Values.ipynb) Generates code to interpolate metric gridfunctions to cell faces.* [GiRaFFE_NRPy/GiRaFFE_NRPy_PPM.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_PPM.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-PPM.ipynb) Genearates code to reconstruct primitive variables on cell faces.* [GiRaFFE_NRPy/GiRaFFE_NRPy_Source_Terms.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_Source_Terms.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-Source_Terms.ipynb) Genearates code to compute the $\tilde{S}_i$ source term.* [GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Source_Terms.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Source_Terms.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy_staggered-Source_Terms.ipynb) Genearates code to compute the $A_i$ gauge term and $\psi^6 \Phi$ right-hand sides.* [GiRaFFE_NRPy/Stilde_flux.py](../../edit/in_progress/GiRaFFE_NRPy/Stilde_flux.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-Stilde_flux.ipynb) Generates the expressions to find the flux term of the Poynting flux evolution equation.* [../GRFFE/equations.py](../../edit/GRFFE/equations.py) [\[**tutorial**\]](../Tutorial-GRFFE_Equations-Cartesian.ipynb) Generates code necessary to compute the source terms.* [../GRHD/equations.py](../../edit/GRHD/equations.py) [\[**tutorial**\]](../Tutorial-GRHD_Equations-Cartesian.ipynb) Generates code necessary to compute the source terms. Introduction: Having written all the various algorithms that will go into evolving the GRFFE equations forward through time, we are ready to write a start-to-finish module to do so. However, to help keep things more organized, we will first create a dedicated module to assemble the various functions we need to run, in order, to perform the evolution. This will reduce the length of the standalone C code, improving that notebook's readability.This notebook does this for the *staggered prescription*. Table of Contents$$\label{prelim}$$During a given RK substep, we will perform the following steps in this order, based on the order used in the original `GiRaFFE`:0. [Step 0](prelim): Preliminaries1. [Step 1](rhs): Calculate the right-hand sides 1. [Step 1.a](source): Calculate the source terms of $\partial_t A_i$, $\partial_t \tilde{S}_i$, and $\partial_t [\sqrt{\gamma} \Phi]$ right-hand sides 1. [**GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Source_Terms.py**](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Source_Terms.py), [**GiRaFFE_NRPy/GiRaFFE_NRPy_Source_Terms**](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_Source_Terms.py) 1. [Step 1.b](flux): Calculate the Flux terms 1. In each direction: 1. Interpolate the metric gridfunctions to cell faces 1. [**GiRaFFE_NRPy/GiRaFFE_NRPy_Metric_Face_Values.py**](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_Metric_Face_Values.py) 1. Reconstruct primitives $\bar{v}^i$ and $B^i$ on cell faces with the piecewise-parabolic method 1. [**GiRaFFE_NRPy/GiRaFFE_NRPy_PPM.py**](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_PPM.py) 1. Compute the fluxes of $\tilde{S}_i$ and $A_i$ and add the appropriate combinations to the evolution equation right-hand sides 1. [**GiRaFFE_NRPy/Stilde_flux.py**](../../edit/in_progress/GiRaFFE_NRPy/Stilde_flux.py), [**GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Afield_flux.py**](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Afield_flux.py)1. [Step 2](poststep): Recover the primitive variables and apply boundary conditions (post-step) 1. [Step 2.a](potential_bc): Apply boundary conditions to $A_i$ and $\sqrt{\gamma} \Phi$ 1. [**GiRaFFE_NRPy/GiRaFFE_NRPy_BCs.py**](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_BCs.py) 1. [Step 2.b](a2b): Compute $B^i$ from $A_i$ 1. [**GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_A2B.py**](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_A2B.py) 1. [Step 2.c](c2p): Run the Conservative-to-Primitive solver 1. This applies fixes to $\tilde{S}_i$, then computes $\bar{v}^i$. A current sheet prescription is then applied to $\bar{v}^i$, and $\tilde{S}_i$ is recomputed to be consistent. 1. [**GiRaFFE_NRPy/GiRaFFE_NRPy_C2P_P2C.py**](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_C2P_P2C.py) 1. [Step 2.d](velocity_bc): Apply outflow boundary conditions to $\bar{v}^i$ 1. [**GiRaFFE_NRPy/GiRaFFE_NRPy_BCs.py**](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_BCs.py) 1. [Step 2.e](bssn_interp): Workaround to interpolate BSSN instead of ADM bssn_interp1. [Step 3](write_out): Write out the C code function1. [Step 3](code_validation): Self-Validation against `GiRaFFE_NRPy_Main_Drive.py`1. [Step 5](latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file Step 0: Preliminaries \[Back to [top](toc)\]$$\label{prelim}$$We begin by importing the NRPy+ core functionality. We also import the GRHD module.# Step 0: Add NRPy's directory to the path # https://stackoverflow.com/questions/16780014/import-file-from-parent-directory import os,sys nrpy_dir_path = os.path.join("..") if nrpy_dir_path not in sys.path: sys.path.append(nrpy_dir_path) from outputC import outCfunction, lhrh # NRPy+: Core C code output module import finite_difference as fin # NRPy+: Finite difference C code generation module import NRPy_param_funcs as par # NRPy+: Parameter interface import grid as gri # NRPy+: Functions having to do with numerical grids import loop as lp # NRPy+: Generate C code loops import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support import reference_metric as rfm # NRPy+: Reference metric support import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface thismodule = "GiRaFFE_NRPy_Main_Driver" par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",2) out_dir = os.path.join("GiRaFFE_standalone_Ccodes") cmd.mkdir(out_dir) CoordSystem = "Cartesian" par.set_parval_from_str("reference_metric::CoordSystem",CoordSystem) rfm.reference_metric() # Create ReU, ReDD needed for rescaling B-L initial data, generating BSSN RHSs, etc. outCparams = "outCverbose=False,CSE_sorting=none"Step 1: Calculate the right-hand sides \[Back to [top](toc)\]$$\label{rhs}$$In the method of lines using Runge-Kutta methods, each timestep involves several "RK substeps" during which we will run the same set of function calls. These can be divided into two groups: one in which the RHSs themselves are calculated, and a second in which boundary conditions are applied and auxiliary variables updated (the post-step). Here, we focus on that first group.The gauge terms of our evolution equations consist of two derivative terms: the Lorentz gauge term of $\partial_t A_k$, which is $\partial_k (\alpha \Phi - \beta^j A_j)$ and the non-damping, flux-like term of $\partial_t [\psi^6 \Phi]$, which is $\partial_j (\alpha\sqrt{\gamma}A^j - \beta^j [\sqrt{\gamma} \Phi])$. We compute these terms first, after we register all the gridfunctions we will need for GRFFE in the staggered prescription.import GRHD.equations as GRHD # NRPy+: Generate general relativistic hydrodynamics equations gammaDD = ixp.register_gridfunctions_for_single_rank2("AUXEVOL","gammaDD","sym01",DIM=3) betaU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","betaU",DIM=3) alpha = gri.register_gridfunctions("AUXEVOL","alpha") ixp.register_gridfunctions_for_single_rank1("EVOL","AD") BU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","BU") ixp.register_gridfunctions_for_single_rank1("AUXEVOL","BstaggerU") ValenciavU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","ValenciavU") gri.register_gridfunctions("EVOL","psi6Phi") StildeD = ixp.register_gridfunctions_for_single_rank1("EVOL","StildeD") gri.register_gridfunctions("AUXEVOL","psi6_temp") gri.register_gridfunctions("AUXEVOL","psi6center") gri.register_gridfunctions("AUXEVOL","cmax_x") gri.register_gridfunctions("AUXEVOL","cmin_x") gri.register_gridfunctions("AUXEVOL","cmax_y") gri.register_gridfunctions("AUXEVOL","cmin_y") gri.register_gridfunctions("AUXEVOL","cmax_z") gri.register_gridfunctions("AUXEVOL","cmin_z") phi = gri.register_gridfunctions("AUXEVOL","phi") # Needed only for ADM-BSSN-ADM workaround gammaUUxx,gammaUUyy,gammaUUzz = gri.register_gridfunctions("AUXEVOL",["gammaUUxx","gammaUUyy","gammaUUzz"]) gamma_faceUUxx,gamma_faceUUyy,gamma_faceUUzz = gri.register_gridfunctions("AUXEVOL",["gamma_faceUUxx","gamma_faceUUyy","gamma_faceUUzz"])Step 1.a: Calculate the source terms of $\partial_t A_i$, $\partial_t \tilde{S}_i$, and $\partial_t [\sqrt{\gamma} \Phi]$ right-hand sides \[Back to [top](toc)\]$$\label{source}$$We will now calculate the terms on the RHS of $A_i$ and $[\sqrt{\gamma} \Phi]$ that involve the divergence and gradient operators. We also compute the other term in the RHS of $[\sqrt{\gamma} \Phi]$, which is a straightforward damping term. Documentation for this can be found in [Tutorial-GiRaFFE_NRPy_staggered-Source_Terms](Tutorial-GiRaFFE_NRPy_staggered-Source_Terms.ipynb).import GiRaFFE_NRPy.GiRaFFE_NRPy_staggered_Source_Terms as stgsrc subdir = "RHSs" stgsrc.GiRaFFE_NRPy_Source_Terms(os.path.join(out_dir,subdir))We also need to compute the source term of the $\tilde{S}_i$ evolution equation. This term involves derivatives of the four metric, so we can save some effort here by taking advantage of the interpolations done of the metric gridfunctions to the cell faces, which will allow us to take a finite-difference derivative with the accuracy of a higher order and the computational cost of a lower order. However, it will require some more complicated coding, detailed in [Tutorial-GiRaFFE_NRPy-Source_Terms](Tutorial-GiRaFFE_NRPy-Source_Terms.ipynb)import GiRaFFE_NRPy.GiRaFFE_NRPy_Source_Terms as source # Declare this symbol: sqrt4pi = par.Cparameters("REAL",thismodule,"sqrt4pi","sqrt(4.0*M_PI)") source.write_out_functions_for_StildeD_source_term(os.path.join(out_dir,subdir),outCparams,gammaDD,betaU,alpha, ValenciavU,BU,sqrt4pi)Output C function calculate_StildeD0_source_term() to file GiRaFFE_standalone_Ccodes/RHSs/calculate_StildeD0_source_term.h Output C function calculate_StildeD1_source_term() to file GiRaFFE_standalone_Ccodes/RHSs/calculate_StildeD1_source_term.h Output C function calculate_StildeD2_source_term() to file GiRaFFE_standalone_Ccodes/RHSs/calculate_StildeD2_source_term.hStep 1.b: Calculate the Flux terms \[Back to [top](toc)\]$$\label{flux}$$Now, we will compute the flux terms of $\partial_t A_i$ and $\partial_t \tilde{S}_i$. To do so, we will first need to interpolate the metric gridfunctions to cell faces and to reconstruct the primitives on the cell faces using the code detailed in [Tutorial-GiRaFFE_NRPy-Metric_Face_Values](Tutorial-GiRaFFE_NRPy-Metric_Face_Values.ipynb) and in [Tutorial-GiRaFFE_NRPy-PPM](Tutorial-GiRaFFE_NRPy-PPM.ipynb).subdir = "FCVAL" cmd.mkdir(os.path.join(out_dir, subdir)) import GiRaFFE_NRPy.GiRaFFE_NRPy_Metric_Face_Values as FCVAL FCVAL.GiRaFFE_NRPy_FCVAL(os.path.join(out_dir,subdir)) subdir = "PPM" cmd.mkdir(os.path.join(out_dir, subdir)) import GiRaFFE_NRPy.GiRaFFE_NRPy_PPM as PPM PPM.GiRaFFE_NRPy_PPM(os.path.join(out_dir,subdir))Here, we will write the function to compute the electric field contribution to the induction equation RHS. This is coded with documentation in [Tutorial-GiRaFFE_NRPy_staggered-Afield_flux](Tutorial-GiRaFFE_NRPy_staggered-Afield_flux.ipynb). For the $i^{\rm th}$ component of the electric field, we will need reconstrutions in the $j^{\rm th}$ and $k^{\rm th}$ direction. These will be computed in the driver function, [below](write_out).import GiRaFFE_NRPy.GiRaFFE_NRPy_staggered_Afield_flux as Af # We will pass values of the gridfunction on the cell faces into the function. This requires us # to declare them as C parameters in NRPy+. We will denote this with the _face infix/suffix. alpha_face = gri.register_gridfunctions("AUXEVOL","alpha_face") gamma_faceDD = ixp.register_gridfunctions_for_single_rank2("AUXEVOL","gamma_faceDD","sym01") beta_faceU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","beta_faceU") # We'll need some more gridfunctions, now, to represent the reconstructions of BU and ValenciavU # on the right and left faces Valenciav_rU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","Valenciav_rU",DIM=3) B_rU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","B_rU",DIM=3) Valenciav_lU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","Valenciav_lU",DIM=3) B_lU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","B_lU",DIM=3) subdir = "RHSs" Af.GiRaFFE_NRPy_Afield_flux(os.path.join(out_dir, subdir))We must do something similar here, albeit much simpler. For instance, the $x$ component of $\partial_t \tilde{S}_i$ will be a finite difference of the flux throught the faces in the $\pm x$ direction; for further detail, see [Tutorial-GiRaFFE_NRPy-Stilde_flux](Tutorial-GiRaFFE_NRPy-Stilde_flux.ipynb).Stilde_flux_HLLED = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","Stilde_flux_HLLED") import GiRaFFE_NRPy.Stilde_flux as Sf Sf.generate_C_code_for_Stilde_flux(os.path.join(out_dir,subdir), True, alpha_face,gamma_faceDD,beta_faceU, Valenciav_rU,B_rU,Valenciav_lU,B_lU, Stilde_flux_HLLED, sqrt4pi, write_cmax_cmin=True)Output C function calculate_Stilde_rhsD() to file GiRaFFE_standalone_Ccodes/RHSs/calculate_Stilde_rhsD.hStep 2: Recover the primitive variables and apply boundary conditions \[Back to [top](toc)\]$$\label{poststep}$$With the RHSs computed, we can now recover the primitive variables, which are the Valencia three-velocity $\bar{v}^i$ and the magnetic field $B^i$. We can also apply boundary conditions to the vector potential and velocity. By doing this at each RK substep, we can help ensure the accuracy of the following substeps. Step 2.a: Apply boundary conditions to $A_i$ and $\sqrt{\gamma} \Phi$ \[Back to [top](toc)\]$$\label{potential_bc}$$First, we will apply boundary conditions to the vector potential, $A_i$, and the scalar potential $\sqrt{\gamma} \Phi$. The file we generate here contains both functions we need for BCs, as documented in [Tutorial-GiRaFFE_NRPy-BCs](Tutorial-GiRaFFE_NRPy-BCs.ipynb).subdir = "boundary_conditions" cmd.mkdir(os.path.join(out_dir,subdir)) import GiRaFFE_NRPy.GiRaFFE_NRPy_BCs as BC BC.GiRaFFE_NRPy_BCs(os.path.join(out_dir,subdir))Step 2.b: Compute $B^i$ from $A_i$ \[Back to [top](toc)\]$$\label{a2b}$$Now, we will calculate the magnetic field as the curl of the vector potential at all points in our domain; we will need these at both cell centers and faces, as detailed in [Tutorial-GiRaFFE_NRPy_staggered-A2B](Tutorial-GiRaFFE_NRPy-A2B.ipynb).subdir = "A2B" cmd.mkdir(os.path.join(out_dir,subdir)) import GiRaFFE_NRPy.GiRaFFE_NRPy_staggered_A2B as A2B A2B.GiRaFFE_NRPy_A2B(os.path.join(out_dir,subdir))Step 2.c: Run the Conservative-to-Primitive solver \[Back to [top](toc)\]$$\label{c2p}$$With these functions, we apply fixes to the Poynting flux, and use that to update the three-velocity. Then, we apply our current sheet prescription to the velocity, and recompute the Poynting flux to agree with the now-fixed velocity. More detail can be found in [Tutorial-GiRaFFE_NRPy-C2P_P2C](Tutorial-GiRaFFE_NRPy-C2P_P2C.ipynb).import GiRaFFE_NRPy.GiRaFFE_NRPy_C2P_P2C as C2P_P2C C2P_P2C.GiRaFFE_NRPy_C2P(StildeD,BU,gammaDD,betaU,alpha) values_to_print = [ lhrh(lhs=gri.gfaccess("in_gfs","StildeD0"),rhs=C2P_P2C.outStildeD[0]), lhrh(lhs=gri.gfaccess("in_gfs","StildeD1"),rhs=C2P_P2C.outStildeD[1]), lhrh(lhs=gri.gfaccess("in_gfs","StildeD2"),rhs=C2P_P2C.outStildeD[2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","ValenciavU0"),rhs=C2P_P2C.ValenciavU[0]), lhrh(lhs=gri.gfaccess("auxevol_gfs","ValenciavU1"),rhs=C2P_P2C.ValenciavU[1]), lhrh(lhs=gri.gfaccess("auxevol_gfs","ValenciavU2"),rhs=C2P_P2C.ValenciavU[2]) ] subdir = "C2P" cmd.mkdir(os.path.join(out_dir,subdir)) desc = "Apply fixes to \tilde{S}_i and recompute the velocity to match with current sheet prescription." name = "GiRaFFE_NRPy_cons_to_prims" outCfunction( outfile = os.path.join(out_dir,subdir,name+".h"), desc=desc, name=name, params ="const paramstruct *params,REAL *xx[3],REAL *auxevol_gfs,REAL *in_gfs", body = fin.FD_outputC("returnstring",values_to_print,params=outCparams).replace("IDX4","IDX4S"), loopopts ="AllPoints,Read_xxs", rel_path_to_Cparams=os.path.join("../")) # TINYDOUBLE = par.Cparameters("REAL",thismodule,"TINYDOUBLE",1e-100) C2P_P2C.GiRaFFE_NRPy_P2C(gammaDD,betaU,alpha, ValenciavU,BU, sqrt4pi) values_to_print = [ lhrh(lhs=gri.gfaccess("in_gfs","StildeD0"),rhs=C2P_P2C.StildeD[0]), lhrh(lhs=gri.gfaccess("in_gfs","StildeD1"),rhs=C2P_P2C.StildeD[1]), lhrh(lhs=gri.gfaccess("in_gfs","StildeD2"),rhs=C2P_P2C.StildeD[2]), ] desc = "Recompute StildeD after current sheet fix to Valencia 3-velocity to ensure consistency between conservative & primitive variables." name = "GiRaFFE_NRPy_prims_to_cons" outCfunction( outfile = os.path.join(out_dir,subdir,name+".h"), desc=desc, name=name, params ="const paramstruct *params,REAL *auxevol_gfs,REAL *in_gfs", body = fin.FD_outputC("returnstring",values_to_print,params=outCparams).replace("IDX4","IDX4S"), loopopts ="AllPoints", rel_path_to_Cparams=os.path.join("../"))Output C function GiRaFFE_NRPy_prims_to_cons() to file GiRaFFE_standalone_Ccodes/C2P/GiRaFFE_NRPy_prims_to_cons.hStep 2.d: Apply outflow boundary conditions to $\bar{v}^i$ \[Back to [top](toc)\]$$\label{velocity_bc}$$Now, we can apply outflow boundary conditions to the Valencia three-velocity. This specific type of boundary condition helps avoid numerical error "flowing" into our grid. This function has already been generated [above](potential_bc). Step 2.e: Workaround to interpolate BSSN instead of ADM $\bar{v}^i$ \[Back to [top](toc)\]$$\label{bssn_interp}$$The original `GiRaFFE` converted its metric to BSSN, interpolated that to metric faces, and then converted back to ADM; we'll have to do the same in order to verify round-off level agreement.import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends # First calculate the conformal factor psi^4 = detgamma^(1/3) _gammaUU, gammaDET = ixp.symm_matrix_inverter3x3(gammaDD) # _gammaUU unused. psi4 = sp.cbrt(gammaDET) phi_expression = sp.Rational(1,4)*sp.log(psi4) # Rescale gammaDD: gammabarDD = gammaDD/psi4 gammabarDD = ixp.zerorank2(DIM=3) for i in range(3): for j in range(3): gammabarDD[i][j] = gammaDD[i][j]/psi4 gammabarUUxx = gammaUUxx*psi4 gammabarUUyy = gammaUUyy*psi4 gammabarUUzz = gammaUUzz*psi4 # Generate a kernel to convert to BSSN: # We'll convert the metric in place to ensure compatibility with our metric face interpolator values_to_print = [ lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD00"),rhs=gammabarDD[0][0]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD01"),rhs=gammabarDD[0][1]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD02"),rhs=gammabarDD[0][2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD11"),rhs=gammabarDD[1][1]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD12"),rhs=gammabarDD[1][2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD22"),rhs=gammabarDD[2][2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","phi"),rhs=phi_expression), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaUUxx"),rhs=gammabarUUxx), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaUUyy"),rhs=gammabarUUyy), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaUUzz"),rhs=gammabarUUzz), ] desc = "Convert ADM metric to BSSN" name = "Workaround_ADM_to_BSSN" outCfunction( outfile = os.path.join(out_dir,name+".h"), desc=desc, name=name, params ="const paramstruct *params,REAL *auxevol_gfs", body = fin.FD_outputC("returnstring",values_to_print,params=outCparams).replace("IDX4","IDX4S"), loopopts ="AllPoints", rel_path_to_Cparams=os.path.join("./")) rescaled_gammaDD = ixp.zerorank2(DIM=3) for i in range(3): for j in range(3): # Here, gammaDD actually represents gammabarDD, but recall that we converted in place. rescaled_gammaDD[i][j] = gammaDD[i][j]*sp.exp(4*phi) rescaled_gammaUUxx = gammaUUxx/sp.exp(4*phi) rescaled_gammaUUyy = gammaUUyy/sp.exp(4*phi) rescaled_gammaUUzz = gammaUUzz/sp.exp(4*phi) # We'll convert the metric in place to ensure compatibility with our metric face interpolator values_to_print = [ lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD00"),rhs=rescaled_gammaDD[0][0]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD01"),rhs=rescaled_gammaDD[0][1]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD02"),rhs=rescaled_gammaDD[0][2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD11"),rhs=rescaled_gammaDD[1][1]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD12"),rhs=rescaled_gammaDD[1][2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD22"),rhs=rescaled_gammaDD[2][2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaUUxx"),rhs=rescaled_gammaUUxx), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaUUyy"),rhs=rescaled_gammaUUyy), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaUUzz"),rhs=rescaled_gammaUUzz) ] C_code_kernel = fin.FD_outputC("returnstring",values_to_print,params=outCparams)\ .replace("IDX4","IDX4S") C_face_kernel = C_code_kernel.replace("GAMMA","GAMMA_FACE").replace("PHIGF","PSI6_TEMPGF") desc = "Convert BSSN metric to ADM" name = "Workaround_BSSN_to_ADM" Ccode_function = outCfunction( outfile = "returnstring", desc=desc, name=name, params ="const paramstruct *params,REAL *auxevol_gfs", body = C_code_kernel+"\n"+C_face_kernel, loopopts ="InteriorPoints", rel_path_to_Cparams=os.path.join("./")).replace("NGHOSTS+Nxx0","NGHOSTS+Nxx0+1").replace("NGHOSTS+Nxx1","NGHOSTS+Nxx1+1").replace("NGHOSTS+Nxx2","NGHOSTS+Nxx2+1") with open(os.path.join(out_dir,name+".h"),"w") as file: file.write(Ccode_function)Output C function Workaround_ADM_to_BSSN() to file GiRaFFE_standalone_Ccodes/Workaround_ADM_to_BSSN.hStep 3: Write out the C code function \[Back to [top](toc)\]$$\label{write_out}$$Now, we have generated all the functions we will need for the `GiRaFFE` evolution. So, we will now assemble our evolution driver. This file will first `include` all of the files we just generated for easy access. Then, we will write a function that calls these functions in the correct order, iterating over the flux directions as necessary.%%writefile $out_dir/GiRaFFE_NRPy_Main_Driver.h // Structure to track ghostzones for PPM: typedef struct __gf_and_gz_struct__ { REAL *gf; int gz_lo[4],gz_hi[4]; } gf_and_gz_struct; // Some additional constants needed for PPM: static const int VX=0,VY=1,VZ=2, BX_CENTER=3,BY_CENTER=4,BZ_CENTER=5,BX_STAGGER=6,BY_STAGGER=7,BZ_STAGGER=8, VXR=9,VYR=10,VZR=11,VXL=12,VYL=13,VZL=14; //<-- Be _sure_ to define MAXNUMVARS appropriately! const int NUM_RECONSTRUCT_GFS = 15; #define WORKAROUND_ENABLED // Include ALL functions needed for evolution #include "PPM/reconstruct_set_of_prims_PPM_GRFFE_NRPy.c" #include "FCVAL/interpolate_metric_gfs_to_cell_faces.h" #include "RHSs/calculate_StildeD0_source_term.h" #include "RHSs/calculate_StildeD1_source_term.h" #include "RHSs/calculate_StildeD2_source_term.h" #include "RHSs/Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.h" #include "RHSs/A_i_rhs_no_gauge_terms.h" #include "A2B/compute_B_and_Bstagger_from_A.h" #include "RHSs/calculate_Stilde_flux_D0.h" #include "RHSs/calculate_Stilde_flux_D1.h" #include "RHSs/calculate_Stilde_flux_D2.h" #include "RHSs/calculate_Stilde_rhsD.h" #include "boundary_conditions/GiRaFFE_boundary_conditions.h" #include "C2P/GiRaFFE_NRPy_cons_to_prims.h" #include "C2P/GiRaFFE_NRPy_prims_to_cons.h" void workaround_Valencia_to_Drift_velocity(const paramstruct *params, REAL *vU0, const REAL *alpha, const REAL *betaU0, const REAL flux_dirn) { #include "set_Cparameters.h" // Converts Valencia 3-velocities to Drift 3-velocities for testing. The variable argument // vu0 is any Valencia 3-velocity component or reconstruction thereof. #pragma omp parallel for for (int i2 = 2*(flux_dirn==3);i2 < Nxx_plus_2NGHOSTS2-1*(flux_dirn==3);i2++) for (int i1 = 2*(flux_dirn==2);i1 < Nxx_plus_2NGHOSTS1-1*(flux_dirn==2);i1++) for (int i0 = 2*(flux_dirn==1);i0 < Nxx_plus_2NGHOSTS0-1*(flux_dirn==1);i0++) { int ii = IDX3S(i0,i1,i2); // Here, we modify the velocity in place. vU0[ii] = alpha[ii]*vU0[ii]-betaU0[ii]; } } void workaround_Drift_to_Valencia_velocity(const paramstruct *params, REAL *vU0, const REAL *alpha, const REAL *betaU0, const REAL flux_dirn) { #include "set_Cparameters.h" // Converts Drift 3-velocities to Valencia 3-velocities for testing. The variable argument // vu0 is any drift (i.e. IllinoisGRMHD's definition) 3-velocity component or reconstruction thereof. #pragma omp parallel for for (int i2 = 2*(flux_dirn==3);i2 < Nxx_plus_2NGHOSTS2-1*(flux_dirn==3);i2++) for (int i1 = 2*(flux_dirn==2);i1 < Nxx_plus_2NGHOSTS1-1*(flux_dirn==2);i1++) for (int i0 = 2*(flux_dirn==1);i0 < Nxx_plus_2NGHOSTS0-1*(flux_dirn==1);i0++) { int ii = IDX3S(i0,i1,i2); // Here, we modify the velocity in place. vU0[ii] = (vU0[ii]+betaU0[ii])/alpha[ii]; } } void GiRaFFE_NRPy_RHSs(const paramstruct *restrict params,REAL *restrict auxevol_gfs,REAL *restrict in_gfs,REAL *restrict rhs_gfs) { #include "set_Cparameters.h" // First thing's first: initialize the RHSs to zero! #pragma omp parallel for for(int ii=0;iiMAXNUMINTERP) {CCTK_VError(VERR_DEF_PARAMS,"Error: Didn't allocate enough space for interp_vars[]."); } // We are FINISHED with v{x,y,z}{r,l} and P{r,l} so we use these 8 gridfunctions' worth of space as temp storage. Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs(params,interp_vars, in_gfs+Nxxp2NG012*PSI6PHIGF, auxevol_gfs+Nxxp2NG012*VALENCIAV_RU0GF, // WARNING: auxevol_gfs+Nxxp2NG012*VALENCIAV_RU1GF, // ALL VARIABLES auxevol_gfs+Nxxp2NG012*VALENCIAV_RU2GF, // ON THESE LINES auxevol_gfs+Nxxp2NG012*VALENCIAV_LU0GF, // ARE OVERWRITTEN auxevol_gfs+Nxxp2NG012*VALENCIAV_LU1GF, // FOR TEMP STORAGE auxevol_gfs+Nxxp2NG012*VALENCIAV_LU2GF, // . auxevol_gfs+Nxxp2NG012*VALENCIAV_RRU0GF, // . auxevol_gfs+Nxxp2NG012*VALENCIAV_RLU0GF, // . rhs_gfs+Nxxp2NG012*PSI6PHIGF, rhs_gfs+Nxxp2NG012*AD0GF, rhs_gfs+Nxxp2NG012*AD1GF, rhs_gfs+Nxxp2NG012*AD2GF); /*#pragma omp parallel for for(int k=0;kOverwriting GiRaFFE_standalone_Ccodes/GiRaFFE_NRPy_Main_Driver.hStep 4: Self-Validation against `GiRaFFE_NRPy_Main_Drive.py` \[Back to [top](toc)\]$$\label{code_validation}$$To validate the code in this tutorial we check for agreement between the files1. that were generated in this tutorial and1. those that are generated in the module `GiRaFFE_NRPy_Main_Driver.py`gri.glb_gridfcs_list = [] # Define the directory that we wish to validate against: valdir = os.path.join("GiRaFFE_validation_Ccodes") cmd.mkdir(valdir) import GiRaFFE_NRPy.GiRaFFE_NRPy_Main_Driver_staggered as md md.GiRaFFE_NRPy_Main_Driver_generate_all(valdir)Output C function calculate_StildeD0_source_term() to file GiRaFFE_validation_Ccodes/RHSs/calculate_StildeD0_source_term.h Output C function calculate_StildeD1_source_term() to file GiRaFFE_validation_Ccodes/RHSs/calculate_StildeD1_source_term.h Output C function calculate_StildeD2_source_term() to file GiRaFFE_validation_Ccodes/RHSs/calculate_StildeD2_source_term.h Output C function calculate_Stilde_rhsD() to file GiRaFFE_validation_Ccodes/RHSs/calculate_Stilde_rhsD.h Output C function GiRaFFE_NRPy_cons_to_prims() to file GiRaFFE_validation_Ccodes/C2P/GiRaFFE_NRPy_cons_to_prims.h Output C function GiRaFFE_NRPy_prims_to_cons() to file GiRaFFE_validation_Ccodes/C2P/GiRaFFE_NRPy_prims_to_cons.hWith both sets of codes generated, we can now compare them against each other.import difflib import sys print("Printing difference between original C code and this code...") # Open the files to compare files = ["GiRaFFE_NRPy_Main_Driver.h", "RHSs/Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.h", "PPM/reconstruct_set_of_prims_PPM_GRFFE_NRPy.c", "PPM/loop_defines_reconstruction_NRPy.h", "FCVAL/interpolate_metric_gfs_to_cell_faces.h", "RHSs/calculate_StildeD0_source_term.h", "RHSs/calculate_StildeD1_source_term.h", "RHSs/calculate_StildeD2_source_term.h", "RHSs/calculate_Stilde_flux_D0.h", "RHSs/calculate_Stilde_flux_D1.h", "RHSs/calculate_Stilde_flux_D2.h", "A2B/compute_B_and_Bstagger_from_A.h", "boundary_conditions/GiRaFFE_boundary_conditions.h", "A2B/compute_B_and_Bstagger_from_A.h", "C2P/GiRaFFE_NRPy_cons_to_prims.h", "C2P/GiRaFFE_NRPy_prims_to_cons.h"] for file in files: print("Checking file " + file) with open(os.path.join(valdir,file)) as file1, open(os.path.join(out_dir,file)) as file2: # Read the lines of each file file1_lines = file1.readlines() file2_lines = file2.readlines() num_diffs = 0 for line in difflib.unified_diff(file1_lines, file2_lines, fromfile=os.path.join(valdir,file), tofile=os.path.join(out_dir,file)): sys.stdout.writelines(line) num_diffs = num_diffs + 1 if num_diffs == 0: print("No difference. TEST PASSED!") else: print("ERROR: Disagreement found with .py file. See differences above.") sys.exit(1)Printing difference between original C code and this code... Checking file GiRaFFE_NRPy_Main_Driver.h No difference. TEST PASSED! Checking file RHSs/Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.h No difference. TEST PASSED! Checking file PPM/reconstruct_set_of_prims_PPM_GRFFE_NRPy.c No difference. TEST PASSED! Checking file PPM/loop_defines_reconstruction_NRPy.h No difference. TEST PASSED! Checking file FCVAL/interpolate_metric_gfs_to_cell_faces.h No difference. TEST PASSED! Checking file RHSs/calculate_StildeD0_source_term.h No difference. TEST PASSED! Checking file RHSs/calculate_StildeD1_source_term.h No difference. TEST PASSED! Checking file RHSs/calculate_StildeD2_source_term.h No difference. TEST PASSED! Checking file RHSs/calculate_Stilde_flux_D0.h No difference. TEST PASSED! Checking file RHSs/calculate_Stilde_flux_D1.h No difference. TEST PASSED! Checking file RHSs/calculate_Stilde_flux_D2.h No difference. TEST PASSED! Checking file A2B/compute_B_and_Bstagger_from_A.h No dif[...]Step 4: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](toc)\]$$\label{latex_pdf_output}$$The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename[Tutorial-GiRaFFE_NRPy_Main_Driver](TTutorial-GiRaFFE_NRPy_Main_Driver.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-GiRaFFE_NRPy_Main_Driver")Created Tutorial-GiRaFFE_NRPy_Main_Driver.tex, and compiled LaTeX file to PDF file Tutorial-GiRaFFE_NRPy_Main_Driver.pdftqdmimport numpy as np import openmm as mm from openmm import app from openmm import unit from uibcdf_systems import TwoLJParticles from uibcdf_reporters import TQDMReporter box=[[2.5, 0.0, 0.0], [0.0, 2.5, 0.0], [0.0, 0.0, 2.5]]*unit.nanometers molecular_system = TwoLJParticles(atom_1='Ar', atom_2='Xe', box=box) integrator = mm.LangevinIntegrator(300.0*unit.kelvin, 1.0/unit.picoseconds, 0.01*unit.picoseconds) platform = mm.Platform.getPlatformByName('CUDA') simulation = app.Simulation(molecular_system.topology, molecular_system.system, integrator, platform) coordinates=[[0.0, 0.0, 0.0], [1.0, 0.0, 0.0]]*unit.nanometers simulation.context.setPositions(coordinates) velocities = np.zeros([2, 3], np.float32) * unit.nanometers/unit.picoseconds simulation.context.setVelocities(velocities) n_steps = 20000 report_interval = 20 reporter = TQDMReporter(report_interval, n_steps) simulation.reporters.append(reporter) simulation.step(n_steps) reporter.finalize()MNIST For ML Beginnershttps://www.tensorflow.org/versions/r0.12/tutorials/mnist/beginners/ The MNIST data is hosted on Yann LeCun's website(http://yann.lecun.com/exdb/mnist/). If you are copying and pasting in the code from this tutorial, start here with these two lines of code which will download and read in the data automatically:# https://www.tensorflow.org/versions/r0.12/tutorials/mnist/beginners/ from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)Extracting MNIST_data/train-images-idx3-ubyte.gz Extracting MNIST_data/train-labels-idx1-ubyte.gz Extracting MNIST_data/t10k-images-idx3-ubyte.gz Extracting MNIST_data/t10k-labels-idx1-ubyte.gzImplementing the Regressionimport tensorflow as tf x = tf.placeholder(tf.float32, [None, 784]) W = tf.Variable(tf.zeros([784, 10])) b = tf.Variable(tf.zeros([10])) # y == hypothesis y = tf.nn.softmax(tf.matmul(x, W) + b)Training# y_ == label y_ = tf.placeholder(tf.float32, [None, 10]) # cross_entropy == cost/lost function cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1])) train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) # parameters training_epochs = 10 batch_size = 100 total_batch = int(mnist.train.num_examples / batch_size) sess = tf.Session() sess.run(tf.global_variables_initializer()) for epoch in range(training_epochs): cost = 0 for i in range(total_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) c, _ = sess.run([cross_entropy, train_step], feed_dict={x: batch_xs, y_: batch_ys}) cost += c / total_batch print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(cost)) print("Learning finished")Epoch: 0001 cost = 0.403418125 Epoch: 0002 cost = 0.310536775 Epoch: 0003 cost = 0.295193166 Epoch: 0004 cost = 0.286539707 Epoch: 0005 cost = 0.281588302 Epoch: 0006 cost = 0.277909960 Epoch: 0007 cost = 0.274699409 Epoch: 0008 cost = 0.271966946 Epoch: 0009 cost = 0.269758330 Epoch: 0010 cost = 0.267622066 Learning finishedEvaluating Our Modelcorrect_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))0.9233Deep MNIST for Expertshttps://www.tensorflow.org/versions/r0.12/tutorials/mnist/pros/ Shapesess = tf.InteractiveSession() def show_(t): print('shape:', tf.shape(t).eval(), 'size:', tf.size(t).eval(), 'rank:', tf.rank(t).eval()) print(t.eval()) c1 = tf.constant([1, 3, 5, 7, 9, 0, 2, 4, 6, 8, 3, 7]) c4 = tf.constant([[1, 2, 3], [7, 8, 9]]) print('-----------reshape------------') show_(tf.reshape(c1, [2, -1])) # [[1 3 5 7 9 0] [2 4 6 8 3 7]] show_(tf.reshape(c1, [-1, 3])) # [[1 3 5] [7 9 0] [2 4 6] [8 3 7]] show_(tf.reshape(c4, [-1])) # [1 2 3 7 8 9] c2 = tf.reshape(c1, [2, 2, 1, 3]) c3 = tf.reshape(c1, [1, 4, 1, 3, 1]) # removes dimensions of size 1 print('-----------squeeze------------') show_(c2) # [ -> tensor # [ -> tf.reshape(c1, [2, # [ -> tf.reshape(c1, [2, 2, # [ -> tf.reshape(c1, [2, 2, 1, # 1 3 5 -> tf.reshape(c1, [2, 2, 1, 3]) # ] # ] # [ # [7 9 0] # ] # ] # [[[2 4 6]] [[8 3 7]]] # ] show_(tf.squeeze(c2)) # [[[1 3 5] [7 9 0]] [[2 4 6] [8 3 7]]] # [[[[[1] [3] [5]]] [[[7] [9] [0]]] [[[2] [4] [6]]] [[[8] [3] [7]]]]] show_(c3) show_(tf.squeeze(c3)) # [[1 3 5] [7 9 0] [2 4 6] [8 3 7]] sess.close()-----------reshape------------ shape: [2 6] size: 12 rank: 2 [[1 3 5 7 9 0] [2 4 6 8 3 7]] shape: [4 3] size: 12 rank: 2 [[1 3 5] [7 9 0] [2 4 6] [8 3 7]] shape: [6] size: 6 rank: 1 [1 2 3 7 8 9] -----------squeeze------------ shape: [2 2 1 3] size: 12 rank: 4 [[[[1 3 5]] [[7 9 0]]] [[[2 4 6]] [[8 3 7]]]] shape: [2 2 3] size: 12 rank: 3 [[[1 3 5] [7 9 0]] [[2 4 6] [8 3 7]]] shape: [1 4 1 3 1] size: 12 rank: 5 [[[[[1] [3] [5]]] [[[7] [9] [0]]] [[[2] [4] [6]]] [[[8] [3] [7]]]]] shape: [4 3] size: 12 rank: 2 [[1 3 5] [7 9 0] [2 4 6] [8 3 7]]window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); Start-to-Finish Example: Numerical Solution of the Scalar Wave Equation, in Curvilinear Coordinates with Loop Tiling Author: & ting improvements courtesy This module solves the scalar wave equation in *spherical coordinates* (though other coordinates, including Cartesian, may be chosen) using [loop tiling](https://en.wikipedia.org/wiki/Loop_nest_optimization).**Notebook Status:** Validated **Validation Notes:** This module has been validated to converge at the expected order to the exact solution (see [plot](convergence) at bottom). NRPy+ Source Code for this module: * [ScalarWave/ScalarWaveCurvilinear_RHSs.py](../edit/ScalarWave/ScalarWaveCurvilinear_RHSs.py) [\[**tutorial**\]](Tutorial-ScalarWaveCurvilinear.ipynb) Generates the right-hand side for the Scalar Wave Equation in curvilinear coordinates* [ScalarWave/InitialData.py](../edit/ScalarWave/InitialData.py) [\[**tutorial**\]](Tutorial-ScalarWave.ipynb) Generating C code for either plane wave or spherical Gaussian initial data for the scalar wave equation Introduction:As outlined in the [previous NRPy+ tutorial notebook](Tutorial-ScalarWaveCurvilinear.ipynb), we first use NRPy+ to generate initial data for the scalar wave equation, and then we use it to generate the RHS expressions for [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html) time integration based on the [explicit Runge-Kutta fourth-order scheme](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) (RK4).The entire algorithm is outlined below, with NRPy+-based components highlighted in green.1. Allocate memory for gridfunctions, including temporary storage for the RK4 time integration.1. Set gridfunction values to initial data.1. Evolve the system forward in time using RK4 time integration. At each RK4 substep, do the following: 1. Evaluate scalar wave RHS expressions. 1. Apply boundary conditions.1. At the end of each iteration in time, output the relative error between numerical and exact solutions. Table of Contents$$\label{toc}$$This notebook is organized as follows1. [Step 1](writec): Generate C code to solve the scalar wave equation in curvilinear coordinates 1. [Step 1.a](id_rhss): C code generation: Initial data and scalar wave right-hand-sides 1. [Step 1.b](boundaryconditions): C code generation: Boundary condition driver 1. [Step 1.c](cparams_rfm_and_domainsize): Generate Cparameters files; set reference metric parameters, including `domain_size` 1. [Step 1.d](cfl): C code generation: Finding the minimum proper distance between grid points, needed for [CFL](https://en.wikipedia.org/w/index.php?title=Courant%E2%80%93Friedrichs%E2%80%93Lewy_condition&oldid=806430673)-limited timestep1. [Step 2](mainc): The C code `main()` function for `ScalarWaveCurvilinear_Playground`1. [Step 3](compileexec): Compile generated C codes & solve the scalar wave equation1. [Step 4](convergence): Code validation: Plot the numerical error, and confirm that it converges to zero at expected rate with increasing numerical resolution (sampling)1. [Step 5](latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file Step 1: Using NRPy+ to generate necessary C code to solve the scalar wave equation in curvilinear, singular coordinates \[Back to [top](toc)\]$$\label{writec}$$ Step 1.a: C code generation: Initial data and scalar wave RHSs \[Back to [top](toc)\]$$\label{id_rhss}$$We choose simple plane wave initial data, which is documented in the [Cartesian scalar wave module](Tutorial-ScalarWave.ipynb). Specifically, we implement monochromatic (single-wavelength) wave traveling in the $\hat{k}$ direction with speed $c$$$u(\vec{x},t) = f(\hat{k}\cdot\vec{x} - c t),$$where $\hat{k}$ is a unit vector.The scalar wave RHSs in curvilinear coordinates (documented [in the previous module](Tutorial-ScalarWaveCurvilinear.ipynb)) are simply the right-hand sides of the scalar wave equation written in curvilinear coordinates\begin{align}\partial_t u &= v \\\partial_t v &= c^2 \left(\hat{g}^{ij} \partial_{i} \partial_{j} u - \hat{\Gamma}^i \partial_i u\right),\end{align}where $\hat{g}^{ij}$ is the inverse reference 3-metric (i.e., the metric corresponding to the underlying coordinate system we choose$-$spherical coordinates in our example below), and $\hat{\Gamma}^i$ is the contracted Christoffel symbol $\hat{\Gamma}^\tau = \hat{g}^{\mu\nu} \hat{\Gamma}^\tau_{\mu\nu}$.Below we generate + the initial data by calling `InitialData(Type="PlaneWave")` inside the NRPy+ [ScalarWave/InitialData.py](../edit/ScalarWave/InitialData.py) module (documented in [this NRPy+ Jupyter notebook](Tutorial-ScalarWave.ipynb)), and + the RHS expressions by calling `ScalarWaveCurvilinear_RHSs()` inside the NRPy+ [ScalarWave/ScalarWaveCurvilinear_RHSs.py](../edit/ScalarWave/ScalarWaveCurvilinear_RHSs.py) module (documented in [this NRPy+ Jupyter notebook](Tutorial-ScalarWaveCurvilinear.ipynb)).# Step P1: Import needed NRPy+ core modules: import shutil, os, sys # Standard Python modules for multiplatform OS-level functions sys.path.append("..") from outputC import lhrh, add_to_Cfunction_dict # NRPy+: Core C code output module import finite_difference as fin # NRPy+: Finite difference C code generation module import NRPy_param_funcs as par # NRPy+: Parameter interface import grid as gri # NRPy+: Functions having to do with numerical grids import reference_metric as rfm # NRPy+: Reference metric support import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface # Step P2: Create C code output directory: Ccodesrootdir = os.path.join("ScalarWaveCurvilinear_Playground_Ccodes_with_loop_tiling_new_way") # First remove C code output directory if it exists # Courtesy https://stackoverflow.com/questions/303200/how-do-i-remove-delete-a-folder-that-is-not-empty shutil.rmtree(Ccodesrootdir, ignore_errors=True) # Then create a fresh directory cmd.mkdir(Ccodesrootdir) # Step P3: Create executable output directory: outdir = os.path.join(Ccodesrootdir, "output") cmd.mkdir(outdir) # Step P4: Enable/disable SIMD. If enabled, code should run ~2x faster on most CPUs. enable_SIMD = False # Step P5: Enable reference metric precomputation. enable_rfm_precompute = True par.set_parval_from_str("reference_metric::rfm_precompute_to_Cfunctions_and_NRPy_basic_defines", "True") if enable_SIMD and not enable_rfm_precompute: print("ERROR: SIMD does not currently handle transcendental functions,\n") print(" like those found in rfmstruct (rfm_precompute).\n") print(" Therefore, enable_SIMD==True and enable_rfm_precompute==False\n") print(" is not supported.\n") sys.exit(1) # Step P6: Enable "FD functions". In other words, all finite-difference stencils # will be output as inlined static functions. This is essential for # compiling highly complex FD kernels with using certain versions of GCC; # GCC 10-ish will choke on BSSN FD kernels at high FD order, sometimes # taking *hours* to compile. Unaffected GCC versions compile these kernels # in seconds. FD functions do not slow the code performance, but do add # another header file to the C source tree. # With gcc 7.5.0, enable_FD_functions=True decreases performance by 10% enable_FD_functions = False # Step 1: Set some core parameters, including CoordSystem, boundary condition, # MoL, timestepping algorithm, FD order, # floating point precision, and CFL factor: # Step 1.a: Set the coordinate system for the numerical grid # Choices are: Spherical, SinhSpherical, SinhSphericalv2, Cylindrical, SinhCylindrical, # SymTP, SinhSymTP CoordSystem = "SinhSpherical" par.set_parval_from_str("reference_metric::CoordSystem", CoordSystem) rfm.reference_metric() # Step 1.b: Set boundary conditions # Current choices are QuadraticExtrapolation (quadratic polynomial extrapolation) or Sommerfeld BoundaryCondition = "QuadraticExtrapolation" # Step 1.c: Set defaults for Coordinate system parameters. # These are perhaps the most commonly adjusted parameters, # so we enable modifications at this high level. # domain_size sets the default value for: # * Spherical's params.RMAX # * SinhSpherical*'s params.AMAX # * Cartesians*'s -params.{x,y,z}min & .{x,y,z}max # * Cylindrical's -params.ZMIN & .{Z,RHO}MAX # * SinhCylindrical's params.AMPL{RHO,Z} # * *SymTP's params.AMAX domain_size = 10.0 # Needed for all coordinate systems. # sinh_width sets the default value for: # * SinhSpherical's params.SINHW # * SinhCylindrical's params.SINHW{RHO,Z} # * SinhSymTP's params.SINHWAA sinh_width = 0.4 # If Sinh* coordinates chosen # sinhv2_const_dr sets the default value for: # * SinhSphericalv2's params.const_dr # * SinhCylindricalv2's params.const_d{rho,z} sinhv2_const_dr = 0.05# If Sinh*v2 coordinates chosen # SymTP_bScale sets the default value for: # * SinhSymTP's params.bScale SymTP_bScale = 1.0 # If SymTP chosen # Step 1.d: Set the order of spatial and temporal derivatives; # the core data type, and the CFL factor. # RK_method choices include: Euler, "RK2 Heun", "RK2 MP", "RK2 Ralston", RK3, "RK3 Heun", "RK3 Ralston", # SSPRK3, RK4, DP5, DP5alt, CK5, DP6, L6, DP8 RK_method = "RK4" FD_order = 4 # Finite difference order: even numbers only, starting with 2. 12 is generally unstable REAL = "double" # Best to use double here. CFL_FACTOR= 1.0 # Step 1.e: Tile parameters _,_,_,_,_,_ = par.Cparameters("int","ScalarWaveCurvilinear", ["tilesize0", "tilesize1", "tilesize2", "tilesize_plus_2NGHOSTS0", "tilesize_plus_2NGHOSTS1", "tilesize_plus_2NGHOSTS2"], [1,1,1,1,1,1]) # Step 2: Import the ScalarWave.InitialData module. # This command only declares ScalarWave initial data # parameters and the InitialData() function. import ScalarWave.InitialData as swid # Step 3: Import ScalarWave_RHSs module. # This command only declares ScalarWave RHS parameters # and the ScalarWave_RHSs function (called later) import ScalarWave.ScalarWaveCurvilinear_RHSs as swrhs # Step 4: Call the InitialData() function to set up initial data. # Options include: # "PlaneWave": monochromatic (single frequency/wavelength) plane wave # "SphericalGaussian": spherically symmetric Gaussian, with default stdev=3 swid.InitialData(CoordSystem=CoordSystem,Type="PlaneWave") # Step 5: Set the finite differencing order to FD_order (set above). par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER", FD_order) # Step 6: Generate SymPy symbolic expressions for # uu_rhs and vv_rhs; the ScalarWave RHSs. # This function also declares the uu and vv # gridfunctions, which need to be declared # to output even the initial data to C file. # First get into the enable_rfm_precompute environment, if enable_rfm_precompute==True if enable_rfm_precompute: cmd.mkdir(os.path.join(Ccodesrootdir, "rfm_files/")) par.set_parval_from_str("reference_metric::enable_rfm_precompute", "True") par.set_parval_from_str("reference_metric::rfm_precompute_Ccode_outdir", os.path.join(Ccodesrootdir, "rfm_files/")) swrhs.ScalarWaveCurvilinear_RHSs() # Step 6.a: Now that we are finished with all the rfm hatted # quantities, let's restore them to their closed- # form expressions. if enable_rfm_precompute: par.set_parval_from_str("reference_metric::enable_rfm_precompute", "False") # Reset to False to disable rfm_precompute. rfm.ref_metric__hatted_quantities() # Step 7: Copy SIMD/SIMD_intrinsics.h to $Ccodesrootdir/SIMD/SIMD_intrinsics.h if enable_SIMD: cmd.mkdir(os.path.join(Ccodesrootdir,"SIMD")) shutil.copy(os.path.join("../SIMD/")+"SIMD_intrinsics.h",os.path.join(Ccodesrootdir,"SIMD/")) # Step 8: Set enable "FD functions" parameter. See above for details. par.set_parval_from_str("finite_difference::enable_FD_functions", enable_FD_functions)Step 1.b: Generate Method of Lines timestepping code \[Back to [top](toc)\]$$\label{mol}$$The Method of Lines algorithm is described in detail in the [**NRPy+ tutorial notebook on Method of Lines algorithm**](Tutorial-Method_of_Lines-C_Code_Generation.ipynb).# Step 10: Generate Runge-Kutta-based (RK-based) timestepping code. # As described above the Table of Contents, this is a 2-step process: # 1.b.A: Evaluate RHSs (RHS_string) # 1.b.B: Apply boundary conditions (post_RHS_string, pt 1) import MoLtimestepping.MoL_new_way as MoL # from MoLtimestepping.RK_Butcher_Table_Dictionary import Butcher_dict # RK_order = Butcher_dict[RK_method][1] RHS_string = "rhs_eval(params, rfmstruct, RK_INPUT_GFS, RK_OUTPUT_GFS);" if not enable_rfm_precompute: RHS_string = RHS_string.replace("rfmstruct", "xx") if BoundaryCondition == "QuadraticExtrapolation": # Extrapolation BCs are applied to the evolved gridfunctions themselves after the MoL update post_RHS_string = "apply_bcs_curvilinear(params, bcstruct, NUM_EVOL_GFS, evol_gf_parity, RK_OUTPUT_GFS);" elif BoundaryCondition == "Sommerfeld": # Sommerfeld BCs are applied to the gridfunction RHSs directly RHS_string += "apply_bcs_sommerfeld(params, xx, bcstruct, NUM_EVOL_GFS, evol_gf_parity, RK_INPUT_GFS, RK_OUTPUT_GFS);" post_RHS_string = "" else: print("Invalid choice of boundary condition") sys.exit(1) MoL.register_C_functions_and_NRPy_basic_defines(RK_method, RHS_string=RHS_string, post_RHS_string=post_RHS_string, enable_rfm=enable_rfm_precompute, enable_curviBCs=True, enable_SIMD=enable_SIMD) def add_to_Cfunction_dict_exact_solution_single_point(): includes = ["NRPy_basic_defines.h", "NRPy_function_prototypes.h"] desc = "Exact solution at a single point. params.time==0 corresponds to the initial data." c_type = "void" name = "exact_solution_single_point" params = """const paramstruct *restrict params, const REAL xx0, const REAL xx1, const REAL xx2, REAL *uu_exact, REAL *vv_exact""" body = fin.FD_outputC("returnstring",[lhrh(lhs="*uu_exact",rhs=swid.uu_ID), lhrh(lhs="*vv_exact",rhs=swid.vv_ID)], params="includebraces=False,preindent=1,outCverbose=False") add_to_Cfunction_dict( includes=includes, desc=desc, c_type=c_type, name=name, params=params, body=body, rel_path_to_Cparams=os.path.join(".")) def add_to_Cfunction_dict_exact_solution_all_points(): includes = ["NRPy_basic_defines.h", "NRPy_function_prototypes.h"] desc = "Exact solution at all points. params.time==0 corresponds to the initial data." c_type = "void" name = "exact_solution_all_points" params = "const paramstruct *restrict params,REAL *restrict xx[3], REAL *restrict in_gfs" body = """exact_solution_single_point(params, xx0, xx1, xx2, &in_gfs[IDX4S(UUGF,i0,i1,i2)], &in_gfs[IDX4S(VVGF,i0,i1,i2)]);""" add_to_Cfunction_dict( includes=includes, desc=desc, c_type=c_type, name=name, params=params, body=body, rel_path_to_Cparams=os.path.join("."), loopopts = "AllPoints,Read_xxs")Below we write the right-hand side evaluation function. We use [*loop tiling*](https://en.wikipedia.org/wiki/Loop_nest_optimization) for locality optimization.def add_to_Cfunction_dict_rhs_eval(): desc="Evaluate the scalar wave RHSs" includes = ["NRPy_basic_defines.h", "NRPy_function_prototypes.h"] if enable_FD_functions: includes += ["finite_difference_functions.h"] if enable_SIMD: includes += ["SIMD/SIMD_intrinsics.h"] prefunc = "#define IDX4ST(gf,i0,i1,i2) ( (i0) + tilesize_plus_2NGHOSTS0*( (i1) + tilesize_plus_2NGHOSTS1*( (i2) + tilesize_plus_2NGHOSTS2*(gf) ) ) )" c_type = "void" name = "rhs_eval" params ="const paramstruct *restrict params, " if enable_rfm_precompute: params += "const rfm_struct *restrict rfmstruct, " else: params += "REAL *xx[3], " params += "const REAL *restrict in_gfs, REAL *restrict rhs_gfs" body = """ const int tilesize_total = tilesize_plus_2NGHOSTS0*tilesize_plus_2NGHOSTS1*tilesize_plus_2NGHOSTS2*NUM_EVOL_GFS; const int num_strides0 = Nxx0/tilesize0; const int num_strides1 = Nxx1/tilesize1; const int num_strides2 = Nxx2/tilesize2; // Tile loop #pragma omp parallel for for(int stride2=0;stride2Step 1.b: Output needed C code for boundary condition driver \[Back to [top](toc)\]$$\label{boundaryconditions}$$import CurviBoundaryConditions.CurviBoundaryConditions_new_way as CBC CBC.CurviBoundaryConditions_register_C_functions_and_NRPy_basic_defines() # FIXME: # if BoundaryCondition == "Sommerfeld": # bcs = cbcs.sommerfeld_boundary_condition_class(fd_order=2, # vars_radial_falloff_power_default=3, # vars_speed_default=1., # vars_at_inf_default=0.) # # bcs.vars_radpower.items() # bcs.write_sommerfeld_file(Ccodesrootdir)Evolved gridfunction "uu" has parity type 0. Evolved gridfunction "vv" has parity type 0.Step 1.c: Output C codes needed for declaring and setting Cparameters; also set `free_parameters.h` \[Back to [top](toc)\]$$\label{cparams_rfm_and_domainsize}$$Here we output `free_parameters.h`, which sets initial data parameters, as well as grid domain & reference metric parameters, applying `domain_size` and `sinh_width`/`SymTP_bScale` (if applicable) as set above# Step 1.c.i: Set free_parameters.h with open(os.path.join(Ccodesrootdir,"free_parameters.h"),"w") as file: file.write(""" // Free parameters related to physical system: params.time = 0.0; // Initial simulation time corresponds to exact solution at time=0. params.wavespeed = 1.0; // Free parameters related to numerical timestep: REAL CFL_FACTOR = """+str(CFL_FACTOR)+"""; // Tile parameters params.tilesize0 = Nxx[0]; params.tilesize1 = 1; params.tilesize2 = 1; params.tilesize_plus_2NGHOSTS0 = params.tilesize0 + 2*NGHOSTS; params.tilesize_plus_2NGHOSTS1 = params.tilesize1 + 2*NGHOSTS; params.tilesize_plus_2NGHOSTS2 = params.tilesize2 + 2*NGHOSTS; """) # Append to $Ccodesrootdir/free_parameters.h reference metric parameters based on generic # domain_size,sinh_width,sinhv2_const_dr,SymTP_bScale, # parameters set above. rfm.out_default_free_parameters_for_rfm(os.path.join(Ccodesrootdir,"free_parameters.h"), domain_size,sinh_width,sinhv2_const_dr,SymTP_bScale)Step 1.d: Output needed C code for finding the minimum proper distance between grid points, needed for [CFL](https://en.wikipedia.org/w/index.php?title=Courant%E2%80%93Friedrichs%E2%80%93Lewy_condition&oldid=806430673)-limited timestep \[Back to [top](toc)\]$$\label{cfl}$$In order for our explicit-timestepping numerical solution to the scalar wave equation to be stable, it must satisfy the [CFL](https://en.wikipedia.org/w/index.php?title=Courant%E2%80%93Friedrichs%E2%80%93Lewy_condition&oldid=806430673) condition:$$\Delta t \le \frac{\min(ds_i)}{c},$$where $c$ is the wavespeed, and$$ds_i = h_i \Delta x^i$$ is the proper distance between neighboring gridpoints in the $i$th direction (in 3D, there are 3 directions), $h_i$ is the $i$th reference metric scale factor, and $\Delta x^i$ is the uniform grid spacing in the $i$th direction:# Generate & register C function set_Nxx_dxx_invdx_params__and__xx() # Generate & register C function xx_to_Cart() for # (the mapping from xx->Cartesian) for the chosen # CoordSystem: # Generate & register the find_timestep() function rfm.register_C_functions_and_NRPy_basic_defines(enable_rfm_precompute=enable_rfm_precompute)Step 2: The C code `main()` function for `ScalarWaveCurvilinear_Playground` \[Back to [top](toc)\]$$\label{mainc}$$Just as in [the start-to-finish, solving the scalar wave equation in Cartesian coordinates module](Tutorial-Start_to_Finish-ScalarWave.ipynb), we will implement the scalar wave equation via the Method of Lines. As discussed above, the critical differences between this code and the Cartesian version are as follows:1. The CFL-constrained timestep depends on the proper distance between neighboring gridpoints1. The boundary conditions must account for the fact that ghost zone points lying in the domain exterior can map either to the interior of the domain, or lie on the outer boundary. In the former case, we simply copy the data from the interior. In the latter case, we apply the usual outer boundary conditions.1. The numerical grids must be staggered to avoid direct evaluation of the equations on coordinate singularities.def add_to_Cfunction_dict_main__ScalarWaveCurvilinear_Playground(): includes = ["NRPy_basic_defines.h", "NRPy_function_prototypes.h"] desc = """// main() function: // Step 0: Read command-line input, set up grid structure, allocate memory for gridfunctions, set up coordinates // Step 1: Write test data to gridfunctions // Step 2: Overwrite all data in ghost zones with NaNs // Step 3: Apply curvilinear boundary conditions // Step 4: Print gridfunction data after curvilinear boundary conditions have been applied // Step 5: Free all allocated memory """ c_type = "int" name = "main" params = "int argc, const char *argv[]" body = r""" paramstruct params; set_Cparameters_to_default(¶ms); // Step 0a: Read command-line input, error out if nonconformant if(argc != 4 || atoi(argv[1]) < NGHOSTS || atoi(argv[2]) < NGHOSTS || atoi(argv[3]) < NGHOSTS) { printf("Error: Expected one command-line argument: ./ScalarWaveCurvilinear_Playground Nx0 Nx1 Nx2,\n"); printf("where Nx[0,1,2] is the number of grid points in the 0, 1, and 2 directions.\n"); printf("Nx[] MUST BE larger than NGHOSTS (= %d)\n",NGHOSTS); exit(1); } // Step 0b: Set up numerical grid structure, first in space... const int Nxx[3] = { atoi(argv[1]), atoi(argv[2]), atoi(argv[3]) }; if(Nxx[0]%2 != 0 || Nxx[1]%2 != 0 || Nxx[2]%2 != 0) { printf("Error: Cannot guarantee a proper cell-centered grid if number of grid cells not set to even number.\n"); printf(" For example, in case of angular directions, proper symmetry zones will not exist.\n"); exit(1); } // Step 0c: Set free parameters, overwriting Cparameters defaults // by hand or with command-line input, as desired. #include "free_parameters.h" // Step 0d: Uniform coordinate grids are stored to *xx[3] REAL *xx[3]; // Step 0d.i: Set bcstruct bc_struct bcstruct; { int EigenCoord = 1; // Step 0d.ii: Call set_Nxx_dxx_invdx_params__and__xx(), which sets // params Nxx,Nxx_plus_2NGHOSTS,dxx,invdx, and xx[] for the // chosen Eigen-CoordSystem. set_Nxx_dxx_invdx_params__and__xx(EigenCoord, Nxx, ¶ms, xx); // Step 0e: Find ghostzone mappings; set up bcstruct driver_bcstruct(¶ms, &bcstruct, xx); // Step 0e.i: Free allocated space for xx[][] array for(int i=0;i<3;i++) free(xx[i]); } // Step 0f: Call set_Nxx_dxx_invdx_params__and__xx(), which sets // params Nxx,Nxx_plus_2NGHOSTS,dxx,invdx, and xx[] for the // chosen (non-Eigen) CoordSystem. int EigenCoord = 0; set_Nxx_dxx_invdx_params__and__xx(EigenCoord, Nxx, ¶ms, xx); // Step 0g: Set all C parameters "blah" for params.blah, including // Nxx_plus_2NGHOSTS0 = params.Nxx_plus_2NGHOSTS0, etc. #include "set_Cparameters-nopointer.h" // Step 0h: Time coordinate parameters const REAL t_final = 0.7*domain_size; /* Final time is set so that at t=t_final, * data at the origin have not been corrupted * by the approximate outer boundary condition */ // Step 0i: Set timestep based on smallest proper distance between gridpoints and CFL factor REAL dt = find_timestep(¶ms, xx, CFL_FACTOR); //printf("# Timestep set to = %e\n",(double)dt); int N_final = (int)(t_final / dt + 0.5); // The number of points in time. // Add 0.5 to account for C rounding down // typecasts to integers. int output_every_N = (int)((REAL)N_final/800.0); if(output_every_N == 0) output_every_N = 1; // Step 0j: Error out if the number of auxiliary gridfunctions outnumber evolved gridfunctions. // This is a limitation of the RK method. You are always welcome to declare & allocate // additional gridfunctions by hand. if(NUM_AUX_GFS > NUM_EVOL_GFS) { printf("Error: NUM_AUX_GFS > NUM_EVOL_GFS. Either reduce the number of auxiliary gridfunctions,\n"); printf(" or allocate (malloc) by hand storage for *diagnostic_output_gfs. \n"); exit(1); } // Step 0k: Declare struct for gridfunctions and allocate memory for y_n_gfs gridfunctions MoL_gridfunctions_struct gridfuncs; MoL_malloc_y_n_gfs(¶ms, &gridfuncs); """ if enable_rfm_precompute: body += """ // Step 0l: Set up precomputed reference metric arrays // Step 0l.i: Allocate space for precomputed reference metric arrays. rfm_struct rfmstruct; rfm_precompute_rfmstruct_malloc(¶ms, &rfmstruct); // Step 0l.ii: Define precomputed reference metric arrays. rfm_precompute_rfmstruct_define(¶ms, xx, &rfmstruct);\n""" body += r""" // Step 1: Set up initial data to be exact solution at time=0: params.time = 0.0; exact_solution_all_points(¶ms, xx, gridfuncs.y_n_gfs); // Step 1a: Allocate memory for non y_n_gfs. We do this here to free up // memory for setting up initial data (for cases in which initial // data setup is memory intensive.) MoL_malloc_non_y_n_gfs(¶ms, &gridfuncs); for(int n=0;n<=N_final;n++) { // Main loop to progress forward in time. // Step 1a: Set current time to correct value & compute exact solution params.time = ((REAL)n)*dt; // Step 2: Code validation: Compute log of L2 norm of difference // between numerical and exact solutions: // log_L2_Norm = log10( sqrt[Integral( [numerical - exact]^2 * dV)] ), // where integral is within 30% of the grid outer boundary (domain_size) if(n%output_every_N == 0) { REAL integral = 0.0; REAL numpts = 0.0; #pragma omp parallel for reduction(+:integral,numpts) LOOP_REGION(NGHOSTS,Nxx_plus_2NGHOSTS0-NGHOSTS, NGHOSTS,Nxx_plus_2NGHOSTS1-NGHOSTS, NGHOSTS,Nxx_plus_2NGHOSTS2-NGHOSTS) { REAL xCart[3]; xx_to_Cart(¶ms,xx,i0,i1,i2, xCart); if(sqrt(xCart[0]*xCart[0] + xCart[1]*xCart[1] + xCart[2]*xCart[2]) < domain_size*0.3) { REAL uu_exact,vv_exact; exact_solution_single_point(¶ms, xx[0][i0],xx[1][i1],xx[2][i2], &uu_exact,&vv_exact); double num = (double)gridfuncs.y_n_gfs[IDX4S(UUGF,i0,i1,i2)]; double exact = (double)uu_exact; integral += (num - exact)*(num - exact); numpts += 1.0; } } // Compute and output the log of the L2 norm. REAL log_L2_Norm = log10(1e-16 + sqrt(integral/numpts)); // 1e-16 + ... avoids log10(0) printf("%e %e\n",(double)params.time,log_L2_Norm); } // Step 3: Evolve scalar wave initial data forward in time using Method of Lines with RK4 algorithm, // applying quadratic extrapolation outer boundary conditions. // Step 3.b: Step forward one timestep (t -> t+dt) in time using // chosen RK-like MoL timestepping algorithm MoL_step_forward_in_time(¶ms, &rfmstruct, &bcstruct, &gridfuncs, dt); } // End main loop to progress forward in time. // Step 4: Free all allocated memory """ if enable_rfm_precompute: body += " rfm_precompute_rfmstruct_freemem(¶ms, &rfmstruct);\n" body += r""" freemem_bcstruct(¶ms, &bcstruct); MoL_free_memory_y_n_gfs(¶ms, &gridfuncs); MoL_free_memory_non_y_n_gfs(¶ms, &gridfuncs); for(int i=0;i<3;i++) free(xx[i]); return 0; """ # As rfmstruct stores functions of xx, when rfm_precompute is disabled, # we always pass xx to a function instead of &rfmstruct. if not enable_rfm_precompute: body = body.replace("&rfmstruct", "xx") add_to_Cfunction_dict( includes=includes, desc=desc, c_type=c_type, name=name, params=params, body=body, rel_path_to_Cparams=os.path.join("."), enableCparameters=False)Next, register all remaining C functions in `outC_function_dict`, and output `finite_difference_functions.h`. Also construct `NRPy_basic_defines.h`.def register_C_code_functions_ScalarWaveCurvilinear(): add_to_Cfunction_dict_exact_solution_single_point() add_to_Cfunction_dict_exact_solution_all_points() add_to_Cfunction_dict_rhs_eval() add_to_Cfunction_dict_main__ScalarWaveCurvilinear_Playground() import outputC as outC outC.outputC_register_C_functions_and_NRPy_basic_defines() # #define M_PI, etc. # Declare paramstruct, register set_Cparameters_to_default(), # and output declare_Cparameters_struct.h and set_Cparameters[].h: outC.NRPy_param_funcs_register_C_functions_and_NRPy_basic_defines(os.path.join(Ccodesrootdir)) gri.register_C_functions_and_NRPy_basic_defines() # #define IDX3S(), etc. fin.register_C_functions_and_NRPy_basic_defines(NGHOSTS_account_for_onezone_upwind=False) # #define NGHOSTS, etc. # all functions needed for scalar wave: register_C_code_functions_ScalarWaveCurvilinear() # Output functions for computing all finite-difference stencils. # Must be called after defining all functions depending on FD stencils. if enable_FD_functions: fin.output_finite_difference_functions_h(path=Ccodesrootdir) # Call this last: Set up NRPy_basic_defines.h and NRPy_function_prototypes.h. outC.construct_NRPy_basic_defines_h(Ccodesrootdir, enable_SIMD=enable_SIMD) outC.construct_NRPy_function_prototypes_h(Ccodesrootdir)Step 3: Compile generated C codes & solve the scalar wave equation \[Back to [top](toc)\]$$\label{compileexec}$$To aid in the cross-platform-compatible (with Windows, MacOS, & Linux) compilation and execution, we make use of `cmdline_helper` [(**Tutorial**)](Tutorial-cmdline_helper.ipynb).import cmdline_helper as cmd cmd.new_C_compile(Ccodesrootdir, "ScalarWaveCurvilinear_Playground", uses_free_parameters_h=True, compiler_opt_option="fast") # fastdebug or debug also supported os.chdir(Ccodesrootdir) cmd.Execute("ScalarWaveCurvilinear_Playground", "16 8 16", os.path.join("output", "out-lowresolution.txt")) cmd.Execute("ScalarWaveCurvilinear_Playground", "24 12 24", os.path.join("output", "out-medresolution.txt")) # Return to root directory os.chdir(os.path.join(".."))(EXEC): Executing `make -j10`... (BENCH): Finished executing in 2.461454153060913 seconds. Finished compilation. (EXEC): Executing `./ScalarWaveCurvilinear_Playground 16 8 16`... (BENCH): Finished executing in 1.241443157196045 seconds. (EXEC): Executing `./ScalarWaveCurvilinear_Playground 24 12 24`... (BENCH): Finished executing in 3.2527718544006348 seconds.Step 4: Code validation: Plot the numerical error, and confirm that it converges to zero at expected rate with increasing numerical resolution (sampling) \[Back to [top](toc)\]$$\label{convergence}$$The numerical solution $u_{\rm num}(x0,x1,x2,t)$ should converge to the exact solution $u_{\rm exact}(x0,x1,x2,t)$ at fourth order, which means that$$u_{\rm num}(x0,x1,x2,t) = u_{\rm exact}(x0,x1,x2,t) + \mathcal{O}\left((\Delta x0)^4\right)+ \mathcal{O}\left((\Delta x1)^4\right)+ \mathcal{O}\left((\Delta x2)^4\right)+ \mathcal{O}\left((\Delta t)^4\right).$$Thus the relative error $E_{\rm rel}$ should satisfy:$$\left|\frac{u_{\rm num}(x0,x1,x2,t) - u_{\rm exact}(x0,x1,x2,t)}{u_{\rm exact}(x0,x1,x2,t)}\right| + \mathcal{O}\left((\Delta x0)^4\right)+ \mathcal{O}\left((\Delta x1)^4\right)+ \mathcal{O}\left((\Delta x2)^4\right)+ \mathcal{O}\left((\Delta t)^4\right).$$We confirm this convergence behavior by first solving the scalar wave equation at two resolutions: $16\times 8\times 16$ (or $16^3$ if `reference_metric::CoordSystem` is set to `Cartesian`), and $24\times 12\times 24$ (or $24^3$ if `reference_metric::CoordSystem` is set to `Cartesian`) and evaluating the maximum logarithmic relative error $\log_{10} E_{\rm rel,max}$ between numerical and exact solutions within a region $R < 0.1 {\rm RMAX}$ at all iterations. Since we increase the resolution uniformly over all four coordinates $(x0,x1,x2,t)$, $E_{\rm rel}$ should drop uniformly as $(\Delta x0)^4$:$$E_{\rm rel} \propto (\Delta x0)^4.$$So at the two resolutions, we should find that$$\frac{E_{\rm rel}(16\times 8\times 16)}{E_{\rm rel}(24\times 12\times 24)} = \frac{E_{\rm rel}(16^3)}{E_{\rm rel}(24^3)} \approx \left(\frac{(\Delta x0)_{16}}{(\Delta x0)_{24}}\right)^{4} = \left(\frac{24}{16}\right)^4 \approx 5.$$Since we're measuring logarithmic relative error, this should be$$\log_{10}\left(\frac{E_{\rm rel}(16\times 8\times 16)}{E_{\rm rel}(24\times 12\times 24)}\right) = \log_{10}\left(\frac{E_{\rm rel}(16^3)}{E_{\rm rel}(24^3)}\right) \approx \log_{10}(5).$$%matplotlib inline import matplotlib.pyplot as plt import mpmath as mp import csv def file_reader(filename): with open(filename) as file: reader = csv.reader(file, delimiter=" ") data = list(zip(*reader)) # data is a tuple of strings. Tuples are immutable, and we need to perform math on # the data, so here we convert tuple to lists of floats: data0 = [] data1 = [] for i in range(len(data[0])): data0.append(float(data[0][i])) data1.append(float(data[1][i])) return data0,data1 first_col16,second_col16 = file_reader(os.path.join(outdir, 'out-lowresolution.txt')) first_col24,second_col24 = file_reader(os.path.join(outdir, 'out-medresolution.txt')) second_col16_rescaled4o = [] second_col16_rescaled5o = [] for i in range(len(second_col16)): # data16 = data24*(16/24)**4 # -> log10(data24) = log10(data24) + 4*log10(16/24) second_col16_rescaled4o.append(second_col16[i] + 4*mp.log10(16./24.)) second_col16_rescaled5o.append(second_col16[i] + 5*mp.log10(16./24.)) # https://matplotlib.org/gallery/text_labels_and_annotations/legend.html#sphx-glr-gallery-text-labels-and-annotations-legend-py fig, ax = plt.subplots() plt.title("Demonstrating 4th-order Convergence: "+par.parval_from_str("reference_metric::CoordSystem")+" Coordinates") plt.xlabel("time") plt.ylabel("log10(Max relative error)") ax.plot(first_col24, second_col24, 'k-', label='logErel(N0=24)') ax.plot(first_col16, second_col16_rescaled4o, 'k--', label='logErel(N0=16) + log((16/24)^4)') ax.set_ylim([-8.05,-1.7]) # Manually set the y-axis range case, since the log10 # relative error at t=0 could be -inf or about -16, # resulting in very different-looking plots # despite the data being the same to roundoff. if par.parval_from_str("reference_metric::CoordSystem") == "Cartesian": ax.set_ylim([-2.68,-1.62]) if par.parval_from_str("reference_metric::CoordSystem") == "Cylindrical": ax.plot(first_col16, second_col16_rescaled5o, 'k.', label='(Assuming 5th-order convergence)') legend = ax.legend(loc='lower right', shadow=True, fontsize='large') legend.get_frame().set_facecolor('C1') plt.show()Step 5: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](toc)\]$$\label{latex_pdf_output}$$The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename[Tutorial-Start_to_Finish-ScalarWaveCurvilinear_with_loop_tiling_new_way.pdf](Tutorial-Start_to_Finish-ScalarWaveCurvilinear_with_loop_tiling_new_way.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-Start_to_Finish-ScalarWaveCurvilinear_with_loop_tiling_new_way")User Mappingorig_members = pd.read_csv('kad_mrgcn_public/data/ironmarch/raw_files/orig_members.csv') sorted_member_id = orig_members['member_id'].sort_values() member_id_map ={} start_id = 1 for id in sorted_member_id: member_id_map[id] = start_id start_id = start_id + 1Postsorig_posts = pd.read_csv('kad_mrgcn_public/data/ironmarch/raw_files/orig_posts.csv') orig_posts.drop(orig_posts.columns[[0,1, 2, 3, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]], axis =1, inplace=True)Functionsdef preprocess_text(text): soup = BeautifulSoup(text, "lxml") text = soup.body.text # Get a list of sentences, removes "\n" lst = sent_tokenize(text) text = " ".join(lst) text = text.replace("\n", "") # Preprocess using NLTK text = text.lower() # Remove Punctuation text_p = "".join([char for char in text if char not in string.punctuation]) # Get a List of words words = word_tokenize(text_p) words = " ".join(words) # Remove stopwords stop_words = stopwords.words('english') filtered_words = [word for word in words if word not in stop_words] ret = " ".join(filtered_words) return ret def getBERT(post): # Get the embedding inputs = tokenizer(post, return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state # Take the average n_tensor = last_hidden_states.shape[1] vector = (last_hidden_states.sum(axis=1)/n_tensor) return vector def get_word_split(text1): l_total = [] l_partial = [] if len(text1.split())//150 >0: n = len(text1.split())//150 + 1 else: n = 1 # print(n) for w in range(n): if w == 0: l_partial = text1.split()[:200] l_total.append(" ".join(l_partial)) else: l_partial = text1.split()[w*150:w*150 + 200] l_total.append(" ".join(l_partial)) return l_totalVariablesembedding = np.zeros((orig_members.shape[0], 768)) count = np.zeros(orig_members.shape[0]) tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertModel.from_pretrained('bert-base-uncased')Iterationsfor i, row in orig_posts.iterrows(): row = orig_posts.iloc[i] author_id = row['author_id'] if author_id not in member_id_map: continue id = member_id_map[author_id] doc = row['post'] clean_doc = preprocess_text(doc) clean_chunks = get_word_split(clean_doc) embed_doc = np.zeros((1, 768)) for chunkNo, chunk in enumerate(clean_chunks): embed_chunk = getBERT(chunk) embed_chunk = embed_chunk.detach().numpy() embed_doc = (embed_doc*chunkNo + embed_chunk)/(chunkNo+1) embedding[id-1] = (count[id-1]*embedding[id-1] + embed_doc)/(count[id-1]+1) count[id-1] = count[id-1]+1 if i%5000==0: num = int(i/5000) np.save("/content/drive/MyDrive/text gcn/node_feature/ironmarch/processed/posts_embedding_chunk_v{}".format(num+1)) np.save("/content/drive/MyDrive/text gcn/node_feature/ironmarch/processed/posts_embedding_chunk_count_v{}".format(i+1), count) np.save("/content/drive/MyDrive/text gcn/node_feature/ironmarch/processed/posts_embedding_chunk_final", embedding)Two Variable Bar Plot*Visualising on a single plot the values of a variable that has nested (and independent) variables* Create the datadf = pd.DataFrame({ 'variable': ['gender', 'gender', 'age', 'age', 'age', 'income', 'income', 'income', 'income'], 'category': ['Female', 'Male', '1-24', '25-54', '55+', 'Lo', 'Lo-Med', 'Med', 'High'], 'value': [60, 40, 50, 30, 20, 10, 25, 25, 40], }) df['variable'] = pd.Categorical(df['variable'], categories=['gender', 'age', 'income']) df['category'] = pd.Categorical(df['category'], categories=df['category']) dfWe want to visualise this data and at a galance get an idea to how the `value` breaks down along the `category`s for the different `variable`.Note that each `variable` has different `category`s.First we make a simple plot with all this information and see what to draw from it.(ggplot(df, aes(x='variable', y='value', fill='category')) + geom_col() )All the `value`s along each variable add up to 100, but stacked together the difference within and without the groups is not clear. The solution is to `dodge` the bars.(ggplot(df, aes(x='variable', y='value', fill='category')) + geom_col(stat='identity', position='dodge')) # modifiedThis is good, it gives us the plot we want but the legend is not great. Each `variable` has a different set of `category`s, but the legend has them all clamped together. We cannot easily change the legend, but we can replicate it's purpose by labelling the individual bars.To do this, we create a `geom_text` with `position_dodge(width=0.9)` to match the ratio of the space taken up by each variable. If there was no spacing between the bars of different variables, we would have `width=1`.A minor quack, when text extends beyond the limits we have to manually make space or it would get clipped. Therefore we adjust the bottom `y` limits.dodge_text = position_dodge(width=0.9) # new (ggplot(df, aes(x='variable', y='value', fill='category')) + geom_col(stat='identity', position='dodge', show_legend=False) # modified + geom_text(aes(y=-.5, label='category'), # new position=dodge_text, color='gray', size=8, angle=45, va='top') + lims(y=(-5, 60)) # new )Would it look too crowded if we add value labels on top of the bars?dodge_text = position_dodge(width=0.9) (ggplot(df, aes(x='variable', y='value', fill='category')) + geom_col(stat='identity', position='dodge', show_legend=False) + geom_text(aes(y=-.5, label='category'), position=dodge_text, color='gray', size=8, angle=45, va='top') + geom_text(aes(label='value'), # new position=dodge_text, size=8, va='bottom', format_string='{}%') + lims(y=(-5, 60)) )That looks okay. The `value`s line up with the `category`s because we used the same `dodge` parameters. For the final polish, we remove the y-axis, clear out the panel and make the `variable` and `category` labels have the same color.dodge_text = position_dodge(width=0.9) ccolor = '#555555' # Gallery Plot (ggplot(df, aes(x='variable', y='value', fill='category')) + geom_col(stat='identity', position='dodge', show_legend=False) + geom_text(aes(y=-.5, label='category'), position=dodge_text, color=ccolor, size=8, angle=45, va='top') # modified + geom_text(aes(label='value'), position=dodge_text, size=8, va='bottom', format_string='{}%') + lims(y=(-5, 60)) + theme(panel_background=element_rect(fill='white'), # new axis_title_y=element_blank(), axis_line_x=element_line(color='black'), axis_line_y=element_blank(), axis_text_y=element_blank(), axis_text_x=element_text(color=ccolor), axis_ticks_major_y=element_blank(), panel_grid=element_blank(), panel_border=element_blank()) )Stress TestThe idea of this code is to see how the production Endpoint will behave when a **bunch** of requests arrive it.Let's simulate several users doing predictions at the same timeimport threading import boto3 import numpy as np import time import math from multiprocessing.pool import ThreadPool from sklearn import datasets sm = boto3.client("sagemaker-runtime") endpoint_name_mask='iris-model-%s' iris = datasets.load_iris() dataset = np.insert(iris.data, 0, iris.target,axis=1) from sagemaker.serializers import CSVSerializer def predict(payload): csv_serializer = CSVSerializer() payload = payload X = payload[1:] y = payload[0] elapsed_time = time.time() resp = sm.invoke_endpoint( EndpointName=endpoint_name_mask % env, ContentType='text/csv', Accept='text/csv', Body=csv_serializer.serialize(X) ) elapsed_time = time.time() - elapsed_time resp = float(resp['Body'].read().decode('utf-8').strip()) return (resp == y, elapsed_time) def run_test(max_threads, max_requests): num_batches = math.ceil(max_requests / len(dataset)) requests = [] for i in range(num_batches): batch = dataset.copy() np.random.shuffle(batch) requests += batch.tolist() len(requests) pool = ThreadPool(max_threads) result = pool.map(predict, requests) pool.close() pool.join() correct_random_forest=0 elapsedtime_random_forest=0 for i in result: correct_random_forest += i[0] elapsedtime_random_forest += i[1] print("Score classifier: {}".format(correct_random_forest/len(result))) print("Elapsed time: {}s".format(elapsedtime_random_forest)) env='production' %%time print("Starting test 1") run_test(10, 1000) %%time print("Starting test 2") run_test(100, 10000) %%time print("Starting test 3") run_test(150, 100000000)Marginal Gaussianization* Author: * Email: In this demonstration, we will show how we can do the marginal Gaussianization on a 2D dataset using the Histogram transformation and Inverse CDF Gaussian distribution.import os, sys cwd = os.getcwd() # sys.path.insert(0, f"{cwd}/../") sys.path.insert(0, "/home/emmanuel/code/rbig") from rbig.data import ToyData from rbig.transform.gaussianization import MarginalGaussianization # from rbig.transform.gaussianization import HistogramGaussianization, KDEGaussianization from rbig.transform import InverseGaussCDF import numpy as np from scipy import stats # Plot Functions import matplotlib.pyplot as plt import seaborn as sns sns.reset_defaults() #sns.set_style('whitegrid') #sns.set_context('talk') sns.set_context(context='talk',font_scale=0.7) %matplotlib inline %load_ext autoreload %autoreload 2DataFor this example, we are looking at a 2D dataset.def plot_2d_joint(data, color='blue', title='Original Data'): fig = plt.figure(figsize=(5, 5)) g = sns.jointplot(x=data[:, 0], y=data[:, 1], kind='hex', color=color) plt.xlabel('X') plt.ylabel('Y') plt.suptitle(title) plt.tight_layout() plt.show() def plot_prob(data, probs, title='Probabilities'): fig, ax = plt.subplots() h = ax.scatter(data[:, 0], data[:, 1], s=1, c=probs, cmap='Reds') ax.set_xlabel('X') ax.set_ylabel('Y') cbar = plt.colorbar(h, ) ax.set_title(title) plt.show() seed = 123 rng = np.random.RandomState(seed=seed) dataset = 'rbig' n_samples = 10_000 n_features = 2 noise = 0.25 random_state=1 clusters = 2 data = ToyData( dataset=dataset, n_samples=n_samples, n_features=n_features, noise=noise, random_state=random_state, clusters=clusters, ).generate_samples() X = data[:, 0] Y = data[:, 1] plot_2d_joint(data, title='Original Data')Uniformization Transformationfrom rbig.transform.uniformization import HistogramUniformization, KDEUniformization, MarginalUniformization # from rbig.density.histogram import ScipyHistogram, QuantileHistogram # from rbig.denInitialize Uniformization Algorithm# INITIALIZE UNIFORMIZATION ALGORITHM #=== # uniform_clf = HistogramUniformization(bins=100, support_extension=10, alpha=1e-4, n_quantiles=None) uniform_clf = KDEUniformization(n_quantiles=50, method='fft') # density_clf = KDEScipy(n_quantiles=50, bw_method='scott', support_extension=10) # density_clf = KDESklearn(n_quantiles=100, support_extension=10)Add it to Marginal Transformation Algorithmmg_uniformizer = MarginalUniformization(uniform_clf) mg_uniformizer.fit(data) X_trans = mg_uniformizer.transform(data) plot_2d_joint(X_trans, title='Transformed Data') data_approx = mg_uniformizer.inverse_transform(X_trans) plot_2d_joint(data_approx, title='Transformed Data') X_ldj = mg_uniformizer.log_abs_det_jacobian(data) plot_2d_joint(X_ldj, title='Transformed Data') plot_2d_joint(np.exp(X_ldj), title='Transformed Data') plot_prob(data, X_ldj.sum(-1), title='Log Probabilities') plot_prob(data, np.exp(X_ldj.sum(-1)), title='Probabilities')Marginal Gaussinizationfrom rbig.transform.uniformization import HistogramUniformization, KDEUniformization, MarginalUniformization from rbig.transform.gaussianization import MarginalGaussianization uniform_clf = HistogramUniformization(bins=100, support_extension=10, alpha=1e-4, n_quantiles=None) uniform_clf = KDEUniformization(n_quantiles=50, method='fft', ) mg_gaussianizer = MarginalGaussianization(uniform_clf) mg_gaussianizer.fit(data) X_trans = mg_gaussianizer.transform(data) plot_2d_joint(X_trans, title='Transformed Data') data_approx = mg_gaussianizer.inverse_transform(X_trans) plot_2d_joint(data_approx, title='Transformed Data') X_ldj = mg_gaussianizer.log_abs_det_jacobian(data) plot_2d_joint(X_ldj, title='Transformed Data') plot_2d_joint(np.exp(X_ldj), title='Transformed Data') X_lprob = mg_gaussianizer.score_samples(data) plot_prob(data, X_lprob, title='Log Probabilities') plot_prob(data, np.exp(X_lprob), title='Probabilities')Negative Log LikelihoodX_nll = mg_gaussianizer.score(data,) print(f"Negative Log-Likelihood Score: {X_nll:.4f}")Negative Log-Likelihood Score: -2.8415Marginal Histogram TransformationSo, for this transformation, we are going to transform our data from the current distribution to a marginally Gaussian distribution and then perform a rotation. In theory, if we do enough of these, we will eventually convert to a Gaussian distribution.# parameters nbins = 1_000 # number of bins to do the histogram transform alpha = 1e-05 # adds some regularization (noise) support_extension = 10 # initialize the transformer mg_transformer = HistogramGaussianization( nbins=nbins, alpha=alpha ) # fit the transformer to the data mg_transformer.fit(data);1. Forward TransformationFor this transformation, we will be applying the following:$$\Psi(\mathbf{x}) = \Phi^{-1}(\mathbf{x})$$where $\Phi^{-1}(\cdot)$ is the inverse CDF of the Gaussian distribution.data_trans = mg_transformer.transform(data) plot_2d_joint(data_trans, title='Transformed Data')So clearly we can see that the transformation works. Both of the marginals are Gaussian distributed.. 2. Inverse TransformationFor this step, we will apply the inverse transformation:$$\Psi^{-1}(\mathbf{x}) = \Phi \left( \mathbf{x} \right)$$where $\Phi(\cdot)$ is the CDF of the Gaussian distribution.data_approx = mg_transformer.inverse_transform(data_trans) # check that its more or less equal np.testing.assert_array_almost_equal(data_approx, data, decimal=1e-5)We see that this transformation is very close to the original. In fact, it's close to approximately 1e-5 decimal places. The errors will definitely stem from the boundaries.# Plot results plot_2d_joint(data_approx, title='Inverse Transformed Data')Log Absolute Determinant JacobianUsing the derivative of inverse-functions theorem, we can calculate the derivative like so:$$\nabla_\mathbf{x} \Phi^{-1}(\mathbf{x}) = \frac{1}{\phi (\Phi^{-1} (x)) }$$where $\phi(\cdot)$ is the PDF of the Gaussian distribution. Taking the log of these terms gives us:$$ \log \nabla_\mathbf{x} \Phi^{-1}(\mathbf{x}) = - \log \phi (\Phi^{-1} (x))$$X_slogdet = mg_transformer.log_abs_det_jacobian(data) print(X_slogdet.min(), X_slogdet.max()) print(np.exp(X_slogdet).min(), np.exp(X_slogdet).max()) # plot the gradients plot_2d_joint(np.exp(X_slogdet), title='Jacobian Data')Log Probability$$\log p_\theta(\mathbf{x}) = \log p_\theta \left( \mathbf{z} \right) + \log \left| \nabla_\mathbf{x} \mathbf{z} \right|$$where $\mathbf{z} = \Psi(\mathbf{x})$# score samples log_prob = mg_transformer.score_samples(data) # score samples log_prob = mg_transformer.score_samples(data) plot_prob(data, log_prob, title='Log Probabilities')ProbabilityThis is the same as above but without the log scale:$$p_\theta(\mathbf{x}) = p_\theta \left( \mathbf{z} \right) \left| \nabla_\mathbf{x} \mathbf{z} \right|$$where $\mathbf{z} = \Psi(\mathbf{x})$plot_prob(data, np.exp(log_prob), title='Probabilities')Negative Log-LikelihoodWe need to take the expected value (mean) of all log probabilities.$$\text{nll} = \frac{1}{N} \sum_{n=1}^{N} \log p_\theta(\mathbf{x})$$score = mg_transformer.score(data) print(f"Negative Log-Likelihood Score: {score:.4f}")Negative Log-Likelihood Score: -2.0724Stack Plot Example Using Random Numbersimport numpy as np import matplotlib.pyplot as plt x = range(1,21) y1 = np.random.rand(20) y2 = np.random.rand(20) plt.plot([], [], color='r', label='y1') plt.plot([], [], color='b', label='y2') plt.stackplot(x, y1, y2, colors=['r', 'b']) plt.title("Stack Plot with Random Numbers") plt.legend() plt.show()The originals are not included in the clusters dataframe.cluster_ids, cluster_counts = np.unique(np.concatenate([clusters.cluster_1.dropna().values, clusters.cluster_2.dropna().values]), return_counts=True) #sort cluster_counts, cluster_ids = zip(*sorted(zip(cluster_counts.tolist(), cluster_ids.tolist()),reverse=True)) cluster_ids = list(cluster_ids) print("Number of unique clusters {0}".format(len(cluster_ids))) for i, (c, count) in enumerate(zip(cluster_ids, cluster_counts)): print("Cluster id {0}: {1} original videos".format(int(c),count)) if i >= 10: break ## Merge data meta_tmp = meta.sample(frac=1, random_state=SEED) meta_tmp = meta_tmp.merge(clusters[['original','cluster_1','cluster_2', 'both_missing']].drop_duplicates(), on='original', how='left')Add clusters to the original videos. Clusters are the same in any of their fake replicates so just copy the clusters from the first one.for original_index in tqdm(meta_tmp[meta_tmp['original'].isna()].index.values): original_video = meta_tmp.loc[original_index,'index'] # get fake replicates fakes = meta_tmp[meta_tmp['original'] == original_video] if len(fakes) > 0: cluster_1 = fakes.iloc[0]['cluster_1'] cluster_2 = fakes.iloc[0]['cluster_2'] both_missing = fakes.iloc[0]['both_missing'] # set the same clusters to the original meta_tmp.at[original_index,'cluster_1'] = cluster_1 meta_tmp.at[original_index,'cluster_2'] = cluster_2 meta_tmp.at[original_index,'both_missing'] = both_missing meta_tmp[meta_tmp['original'].isna()].head() ## Open fake types and merge columns fake_types_df = pd.read_csv('../data/fake_type_classes.csv') meta_tmp = meta_tmp.merge(fake_types_df[['index','first_person_label', 'second_person_label']], on='index') meta_tmp.head() ## To make sure there is no leak between videos related to the original and same actors ## we generate a grouper column based on the original video and, in case former is missing, the actor cluster ## the video belongs to ## If cluster_1 exist choose it, else "Y" meta_tmp['name_index'] = np.where(meta_tmp.cluster_1.notna(), meta_tmp.cluster_1.astype(str), "Y") meta_tmp['name_index'] = meta_tmp.name_index.astype(str) meta_tmp.head() name_counts = meta_tmp.name_index.value_counts() name_counts # How many error cases? name_counts['Y'] sns.distplot(name_counts.values, kde=False) plt.xlabel('Number of videos in a cluster') plt.ylabel('Number of clusters')A few of the clusters have over 1K videos but majority has less than 200. The smallest number of videos per cluster is two which means one real and one corresponding fake video.We want to split these clusters first, into train and validation sets. It would be ideal to have sn equal amount of each fake classes in the valdation part.cluster_info = pd.DataFrame({'cluster':name_counts.keys(), 'count':list(name_counts.values), 'type-0':0, 'type-1':0, 'type-2':0, 'type-3':0, 'type-4':0}) for i in tqdm(cluster_info.index.values): c = cluster_info.iloc[i]['cluster'] sample = meta_tmp[meta_tmp['name_index'] == c] for faketype in range(5): cluster_info.at[i,'type-{0}'.format(faketype)] = len(sample[sample['first_person_label'] == float(faketype)]) cluster_info.head() def plotTypeDistribution(clusters=[], return_only_values=False): df = pd.DataFrame({'type':[0,1,2,3,4],'count':[0,0,0,0,0]}) for cluster in clusters: sample = cluster_info[cluster_info['cluster']==cluster] for faketype in range(5): df.at[faketype,'count'] = df.at[faketype,'count'] + sample['type-{0}'.format(faketype)] # calculate percentage from the total set sample_count = 0 for faketype in range(5): sample_count += df.at[faketype,'count'] min_count = min(df['count'].values) max_count = max(df['count'].values) min_max_ratio = min_count/max_count percentage = sample_count/len(meta_tmp) if return_only_values: return percentage, min_max_ratio sns.barplot(x='type',y='count',data=df) plt.suptitle('{0} samples = {1:.2f} % from all. min_count/max_count: {2:.2f}'.format(sample_count, 100*percentage, min_max_ratio)) plt.show() # plot distribution from all plotTypeDistribution(name_counts.keys())Type 2 is the most common and type 4 the least common with overr ten times the difference between these two classes.For validation set, sample from medium size clusters (40-200 videos) so that all classes are equally represented.from itertools import permutations import random min_val_ratio = 1.0 - TRAIN_PCT - 0.02 max_val_ratio = 1.0 - TRAIN_PCT + 0.02 print("Accept val set size from {0:.2f} to {1:.2f}".format(min_val_ratio,max_val_ratio)) highest_min_max_ratio = 0 validation_index_list = [] stopping_condition = 0.3 # brute force search for suitable combination range_list = list(range(200,460)) random.shuffle(range_list) for combination in tqdm(permutations(range_list, 160)): percentage, min_max_ratio = plotTypeDistribution(name_counts.keys()[list(combination)], return_only_values=True) if percentage > min_val_ratio and percentage < max_val_ratio: if min_max_ratio > highest_min_max_ratio: highest_min_max_ratio = min_max_ratio validation_index_list = list(combination) print("Found better ratio: {0:.3f}".format(highest_min_max_ratio)) if highest_min_max_ratio >= stopping_condition: print("Stopping condition reached") break plotTypeDistribution(name_counts.keys()[validation_index_list])This is good enough. The type classes are not necessary evenly spaced and this may explain why class 2 is the most common.# Validation clusters assert "Y" not in list(name_counts.keys()[validation_index_list]) _ = [print(name_counts.keys()[ind]) for ind in validation_index_list] meta_tmp['isVal'] = np.where(meta_tmp.cluster_1.isin(list(name_counts.keys()[validation_index_list])), True, False) meta_tmp.head() ## Split the grouper into train and val using numpy's array_split tmp_df = pd.DataFrame() name_df = meta_tmp[~meta_tmp['name_index'].isin(["Y"])][['name_index', 'isVal']].drop_duplicates() name_df = name_df.sample(frac=1, random_state=SEED) train_df = name_df[name_df['isVal']==False] val_df = name_df[name_df['isVal']==True] for slice_df in [train_df, val_df]: for idx, df in enumerate(np.array_split(slice_df, NUM_FOLDS)): df['cv_fold'] = idx tmp_df = tmp_df.append(df) Y_df = meta_tmp[meta_tmp['name_index']=="Y"][['name_index']].drop_duplicates() Y_df['isVal'] = True Y_df['cv_fold'] = 999 tmp_df = tmp_df.append(Y_df) meta_new = meta_tmp.merge(tmp_df[['name_index','cv_fold']], on='name_index', how='left') meta_new['split'] = meta_new['isVal'].apply(lambda x: "2nd_level" if x else "1st_level") meta_new['version'] = datetime.date.today() meta_new = meta_new.drop_duplicates() ## Visualize the counts tmp = meta_new.groupby(['split','cv_fold']).size().to_frame('counts') tmp ## Check that we didn't violate any of the rules ## A) all videos with same original must belong to exactly one cv_fold assert meta_new.groupby(['name_index']).nunique()['cv_fold'].max()==1 ## B) all videos with same cluster_1 must belong to exactly one cv_fold assert meta_new[meta_new.cv_fold!=999].groupby(['cluster_1']).nunique()['cv_fold'].max()==1 ## C) length of the new metadata must match the length of the original data assert len(meta_new)==orig_len for col in [None, 'original', 'cluster_1','cluster_2']: df = meta_new.copy() values = df[col] if col!=None else None aggfunc = 'count' if col!=None else None print("\ncounts wrt "+col if col!=None else "") display(pd.crosstab([df['split'], df['cv_fold']], [df['split'], df['cv_fold']], values, aggfunc=aggfunc ).fillna(0), ) print("- "*40) # update person count and merge classes meta_new = meta_new.merge(fake_types_df[['index', 'first_person_frame_labels', 'second_person_frame_labels']], on='index') # update person count meta_new['person_count'] = np.where(fake_types_df['first_person_label'].isna(), 0, # no persons np.where(fake_types_df['second_person_label'].isna(), 1, # only first 2)) # first and second meta_new.head() ## Save the splits meta_new.sort_index(inplace=True) meta_new = meta_new[KEEP_COLS] meta_new.to_csv('../data/metadata_tr_val_split_folds.csv', index=False) meta_new.head() sns.countplot(x='person_count',hue='isVal',data=meta_new) plt.suptitle('Person counts in train and val') plt.show() sns.countplot(x='person_count',hue='cv_fold',data=meta_new[meta_new['isVal']==False]) plt.suptitle('Person counts in training folds') plt.show() sns.countplot(x='person_count',hue='cv_fold',data=meta_new[meta_new['isVal']==True]) plt.suptitle('Person counts in validation folds') plt.show() sns.countplot(x='first_person_label',hue='isVal',data=meta_new) plt.suptitle('First person fake-label in train and val') plt.show() sns.countplot(x='first_person_label',hue='cv_fold',data=meta_new[meta_new['isVal']==False]) plt.suptitle('First person fake-label in training folds') plt.show() sns.countplot(x='first_person_label',hue='cv_fold',data=meta_new[meta_new['isVal']==True]) plt.suptitle('First person fake-label in validation folds') plt.show()pandas GroupBypandas groupby fun is used to split the data into groups based on some criteriaimport pandas as pd data =pd.read_csv('C:\\Users\\admin\\Desktop\\book1.csv') data gp = data.groupby( by = "Age") gp gp.groups gp = data.groupby( by = ["Age",'marks']) gp gp.groups for Age,marks in gp: print(Age) print(marks) gp= data.groupby('Age').get_group(34) gp gp.mean()Explanation of this regex: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression ^M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})$Breaking it down, M{0,4} specifies the thousands section and basically restrains it to between 0 and 4000. It's a relatively simple: 0: matched by M{0} 1000: M matched by M{1} 2000: MM matched by M{2} 3000: MMM matched by M{3} 4000: MMMM matched by M{4} You could, of course, use something like M* to allow any number (including zero) of thousands, if you want to allow bigger numbers.Next is (CM|CD|D?C{0,3}), slightly more complex, this is for the hundreds section and covers all the possibilities: 0: matched by D?C{0} (with D not there) 100: C matched by D?C{1} (with D not there) 200: CC matched by D?C{2} (with D not there) 300: CCC matched by D?C{3} (with D not there) 400: CD matched by CD 500: D matched by D?C{0} (with D there) 600: DC matched by D?C{1} (with D there) 700: DCC matched by D?C{2} (with D there) 800: DCCC matched by D?C{3} (with D there) 900: CM matched by CM Thirdly, (XC|XL|L?X{0,3}) follows the same rules as previous section but for the tens place: 0: matched by L?X{0} (with L not there) 10: X matched by L?X{1} (with L not there) 20: XX matched by L?X{2} (with L not there) 30: XXX matched by L?X{3} (with L not there) 40: XL matched by XL 50: L matched by L?X{0} (with L there) 60: LX matched by L?X{1} (with L there) 70: LXX matched by L?X{2} (with L there) 80: LXXX matched by L?X{3} (with L there) 90: XC matched by XC And, finally, (IX|IV|V?I{0,3}) is the units section, handling 0 through 9 and also similar to the previous two sections (Roman numerals, despite their seeming weirdness, follow some logical rules once you figure out what they are): 0: matched by V?I{0} (with V not there) 1: I matched by V?I{1} (with V not there) 2: II matched by V?I{2} (with V not there) 3: III matched by V?I{3} (with V not there) 4: IV matched by IV 5: V matched by V?I{0} (with V there) 6: VI matched by V?I{1} (with V there) 7: VII matched by V?I{2} (with V there) 8: VIII matched by V?I{3} (with V there) 9: IX matched by IX Just keep in mind that that regex will also match an empty string. If you don't want this (and your regex engine is modern enough), you can use positive look-behind and look-ahead: (?<=^)M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})(?=$)import re regex_pattern = r"^M{0,3}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})$" # same: regex_pattern = r'M{0,3}(C[MD]|D?C{0,3})(X[CL]|L?X{0,3})(I[XV]|V?I{0,3})$' print(str(bool(re.match(regex_pattern, input()))))CDXXI TrueVery Brief Intro to Python This is a notebook for very brief intro to Python. There is also a notebook to learn more, you may want to check [here](https://colab.research.google.com/github/yohanesnuwara/python-bootcamp-for-geoengineers/blob/master/petroweek_notebooks/petroweek2020_unit1.ipynb). To open these notebooks in Colab, click "Copy to Drive", then you'll have this notebook and freely code. Happy learning! Using libraries# import libraries. These one have been pre-installed. import numpy as np import matplotlib.pyplot as plt import pandas as pd # this is style for plotting. There are many, you can try: 'classic', 'seaborn' # but if you don't want any style, you can just omit this code. plt.style.use('ggplot')Very Brief Intro to Numpy# generate array from 0 to 10 into 100 numbers a = np.linspace(0, 10, 100) print(a) # generate array from 0 to 10 with increment of 0.25 b = np.arange(0, 10, 0.25) print(b) # generate 100 uniform random numbers from 0 to 1 c = np.random.random(100) print(c) # multiplying a and c arrays print(a * c) # make a function y=f(x), b as input def cubic(x, a, b, c): y = a* (x**3) + b * (x**2) + c * x return y y = cubic(b, 0.5, 0.25, 0.75) print(y) # make a 2D array and transpose it d = np.array([[1,2,3,4,5], [6,7,8,9,10], [11,12,13,14,15]]) d_trans = d.T print(d) print(d_trans)[[ 1 2 3 4 5] [ 6 7 8 9 10] [11 12 13 14 15]] [[ 1 6 11] [ 2 7 12] [ 3 8 13] [ 4 9 14] [ 5 10 15]]Very Brief Intro to Matplotlib# make a trigonometric array x = np.linspace(0,360,100) xrad = np.deg2rad(x) y1 = np.sin(xrad) y2 = np.cos(xrad) # plot this function plt.plot(xrad, y1) plt.plot(xrad, y2) plt.title('Trigonometric Function', size=20, pad=10) plt.xlabel('X') plt.ylabel('Y') # plt.xlim(min(x), max(x)) plt.show()Very Brief Intro to Pandas# convert the trigonometry result into a dataframe (a.k.a spreadsheet) trig = pd.DataFrame({'X': x, 'sin(x)': y1, 'cos(x)': y2}) # print it trig # print the first 10 rows trig.head(10) # print the last 10 rows trig.tail(10) # create another dataframe names = ['Robby', 'Alice', 'Rick', 'James', 'Alley'] ages = [20, 30, 21, 17, 18] cars = [0, 2, 3, 1, 6] data = pd.DataFrame({'Name': names, 'Age': ages, 'Cars': cars}) data # add new columns "hometown" and "gender" to our data hometown = ['Berlin', 'Madras', 'London', 'Warsaw', 'Paris'] gender = ['Male', 'Female', 'Male', 'Male', 'Female'] data['Hometown'] = hometown data['Gender'] = gender data # print only the data with "Name" column data['Name'] # or another alternative data.iloc[:,0] # print only the data in the first row data.iloc[1,:] # print only the data with "Gender" as "Female" mask_male = data['Gender'] == 'Female' data[mask_male]Entity - Regexes- Account - SID, email, NTAcct- Host - DNS- IP address - IP address (v4, v6)- URL - URL- Azure resource - ResourceID- Registry key - Registry Key- Domain name (DNS) - DNS- File - file path- File hash - MD5, SHA1, SHA256# previously defined regexes email_rgx = r"^[\w\d._%+-]+@(?:[\w\d-]+\.)+[\w]{2,}$" resourceId_rgx = r"^(\/subscriptions\/)[^/]*(\/resourcegroups\/).*$" ntacct_rgx = r"^[^\/:*?\"<>|]{2,15}\\[^\/:*?\"<>|]{2,15}$" import json def writeToJSONFile(data, path='./', fileName='regexes'): filePathNameWExt = './' + path + '/' + fileName + '.json' with open(filePathNameWExt, 'w') as fp: json.dump(data, fp) # regexes from the IoCExtract library data = {'DNS_REGEX': {'regex': r'^((?=[a-z0-9-]{1,63}\\.)[a-z0-9]+(-[a-z0-9]+)*\\.){1,126}[a-z]{2,63}$', 'priority': '1', 'entity': 'host'}, 'IPV4_REGEX': {'regex': r'^(?P(?:[0-9]{1,3}\\.){3}[0-9]{1,3})$', 'priority': '0', 'entity': 'ipaddress'}, 'IPV6_REGEX': {'regex': r'^(?(https?|ftp|telnet|ldap|file)://)\n (?P([a-z0-9-._~!$&\\\'()*+,;=:]|%[0-9A-F]{2})*@)?\n (?P([a-z0-9-._~!$&\\\'()*+,;=]|%[0-9A-F]{2})*)\n (:(?P\\d*))?\n (/(?P([^?\\#"<>\\s]|%[0-9A-F]{2})*/?))?\n (\\?(?P([a-z0-9-._~!$&\'()*+,;=:/?@]|%[0-9A-F]{2})*))?\n (\\#(?P([a-z0-9-._~!$&\'()*+,;=:/?@]|%[0-9A-F]{2})*))?$', 'priority': '0', 'entity': 'url'}, 'MD5_REGEX': {'regex': r'^(?:^|[^A-Fa-f0-9])(?P[A-Fa-f0-9]{32})(?:$|[^A-Fa-f0-9])$', 'priority': '1', 'entity': 'hash'}, 'SHA1_REGEX': {'regex': r'^(?:^|[^A-Fa-f0-9])(?P[A-Fa-f0-9]{40})(?:$|[^A-Fa-f0-9])$', 'priority': '1', 'entity': 'hash'}, 'SHA256_REGEX': {'regex': r'^(?:^|[^A-Fa-f0-9])(?P[A-Fa-f0-9]{64})(?:$|[^A-Fa-f0-9])$', 'priority': '1', 'entity': 'hash'}, 'LXPATH_REGEX': {'regex': r'^(?P/+||[.]+)\n (?P/(?:[^\\\\/:*?<>|\\r\\n]+/)*)\n (?P[^/\\0<>|\\r\\n ]+)$', 'priority': '2', 'entity': 'file'}, 'WINPATH_REGEX': {'regex': r'^\n (?P[a-z]:|\\\\\\\\[a-z0-9_.$-]+||[.]+)\n (?P\\\\(?:[^\\/:*?"\\\'<>|\\r\\n]+\\\\)*)\n (?P[^\\\\/*?""<>|\\r\\n ]+)$', 'priority': '1', 'entity': 'file'}} writeToJSONFile(data) def appendToJSONFile(data, fileName='regexes.json'): with open (fileName, 'w') as f: json.dump(data, f) with open ('regexes.json') as json_file: data = json.load(json_file) y = {'EMAIL_REGEX': {'regex': email_rgx, 'priority': '0', 'entity': 'account'}, 'RESOURCEID_REGEX': {'regex': resourceId_rgx, 'priority': '0', 'entity': 'azureresource'}, 'NTACCT_REGEX': {'regex': ntacct_rgx, 'priority': '0', 'entity': 'account'}} data.update(y) appendToJSONFile(data) # SID (account security identifier) regex # S-1-5-18 sid_rgx = r"^S-[\d]+-[\d]+-[\d]+$" with open ('regexes.json') as json_file: data = json.load(json_file) y = {'SID_REGEX': {'regex': sid_rgx, 'priority': '1', 'entity': 'account'}} data.update(y) appendToJSONFile(data) # Registry key regex # HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Console\Nls regkey_rgx = r"""("|'|\s)?(?PHKLM|HKCU|HKCR|HKU|HKEY_(LOCAL_MACHINE|USERS|CURRENT_USER|CURRENT_CONFIG|CLASSES_ROOT))(?P(\\[^"'\\/]+){1,}\\?)("|'|\s)?""" with open ('regexes.json') as json_file: data = json.load(json_file) y = {'REGKEY_REGEX': {'regex': regkey_rgx, 'priority': '1', 'entity': 'registrykey'}} data.update(y) appendToJSONFile(data)from numpy import dot from numpy.linalg import norm import numpy as np def cos_sim(A, B): return dot(A, B)/(norm(A)*norm(B)) doc1 = np.array([1,0,1,1,1,]) doc2 = np.array([2,1,0,1,1,]) doc3 = np.array([3,2,0,2,2,]) cos_sim(doc1, doc2) cos_sim(doc1, doc3) id : RmqV8FRJNSg2N91ykdKS secret key : Vkqv9j1hmw from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import TfidfTransformer corpus = [ 'you know I want your love', 'I like you', 'what should I do ', ] tfidf = TfidfVectorizer() tfidf.fit_transform(corpus).toarray()recommanded with cos simuliatydata from https://www.kaggle.com/rounakbanik/the-movies-dataset/version/7?select=movies_metadata.csvimport pandas as pd df_data = pd.read_csv('./movies_metadata.csv') df_data.head(3) df_data.columns data = df_data.head(10000) data = data.dropna() data.info() from sklearn.feature_extraction.text import TfidfVectorizer tfidf = TfidfVectorizer() tfidf_matrix = tfidf.fit_transform(data['overview']) tfidf_matrix.shape len(tfidf.vocabulary_) print(tfidf_matrix[4].toarray()) from sklearn.metrics.pairwise import linear_kernel cosin_sim = linear_kernel(tfidf_matrix, tfidf_matrix) type(cosin_sim), cosin_sim.shape cosin_sim[3] indeces = pd.Series(data.index, index=data['title']).drop_duplicates() indeces.head(5) indeces['From Dusk T'] list(cosin_sim[69]) cosin_list = list(enumerate(cosin_sim[69])) cosin_list cosin_index = sorted(list(cosin_list, key=lambda x:x[1], reverse=True) # cosin_index[1:11] data.iloc[]movieBig-O DemoHere's a little code demo that may help reinforce Big-O.We're seeking to quantify an order of magnitude when describing run time performance. If two competing algorithms operating on big complex problems finish within a few seconds or microseconds of each other, then we'd say they both have the same run time performance (Big-O). What we're talking about is: one algorithm takes seconds or minutes, and the other takes days or longer.The code below reads an integer (`n`) from the user and creates two processes. In one process, the product of `n` * `n` is calculated through straight multiplication. In the other process, the product of `n` * `n` is calculated in a loop using repeated addition. They both come up with the same answer, so which one is better? The answer depends on how big `n` is.There are a few important notes about this little demo:1. There's no exception handling for the input, so it's easy to crash it.2. The processes are synchronized so they both start their calculations at the same time, within a few microseconds :-). The program also implements a thread lock to ensure both processes don't try to write to `stdout` at the same time. This thread locking has no impact on the elapsed time calculations that get displayed and should not influence the results. The elapsed time display is in the form: `H:MM:SS.Micro Seconds`.3. You should notice that for small-ish integer inputs (let's say up to about `100000`), the processes run in nearly the same amount of time (addition may actually be faster in some cases). Multiplication in a microprocessor is more complex than addition. As Computer Scientists like to say: it's *expensive*. Assuming that multiplication is one step (as discussed in the class notes), we would classify the process using multiplication as running in constant time `O(1)` and the process using addition as running in `O(n)` time. No matter how big the input is, multiplication always finishes in roughly the same time (even if it may lose to repeated addition for smaller inputs); however as the size of the input grows, the run time performance of repeated addition gets much worse (grows linearly), because it's proportional to the size of the input.Run the code a few times with different inputs and see what happens. What if you enter `1`? How about `1000`? How about `10000`? Try 1 followed by 8 zeros. On my machine that took the multiplication process about .03 seconds and the addition process about 3.5 seconds. Where's the crossover when multiplication runs in less time than repeated addition? What if you try a really, really big input and let it run overnight?*Performance depends on the speed of the computer running this code, scheduler loading, available memory, etc.*#! /usr/bin/env python3 from multiprocessing import Barrier, Lock, Process from datetime import datetime # --------------------------------------------- def cleanTime(t): hh = t.hour mm = t.minute ss = t.second us = t.microsecond return f'{hh:02d}:{mm:02d}:{ss:02d}:{us}' # --------------------------------------------- def startClock(mode, start): print(f'{cleanTime(start)} start process using {mode}') print('---------------------------------------------------') # --------------------------------------------- def stopClock(mode, result, start, stop): print(f'{cleanTime(stop)} finish process using {mode}') print(f'n squared = {result}') print(f'Elapsed time = {str(stop-start)}') print('---------------------------------------------------') # --------------------------------------------- def Addition(barrier, lock, n): start = datetime.now() with lock: startClock('Addition', start) barrier.wait() # Calculate n^2 using addition squared = 0 for i in range(n): squared += n stop = datetime.now() with lock: stopClock('Addition', squared, start, stop) # --------------------------------------------- def Multiplication(barrier, lock, n): start = datetime.now() with lock: startClock('Multiplication', start) barrier.wait() # Calculate n^2 using multiplication squared = n*n stop = datetime.now() with lock: stopClock('Multiplication', squared, start, stop) # --------------------------------------------- def main(): barrier = Barrier(2) lock = Lock() n = int(input("Enter an integer: ")) print() Process(target=Addition, args=(barrier, lock, n)).start() Process(target=Multiplication, args=(barrier, lock, n)).start() # --------------------------------------------- if __name__ == '__main__': main()TP 3 Utilisation des données textuellesimport org.apache.spark.ml.feature.{IDF, Tokenizer, RegexTokenizer, StopWordsRemover, CountVectorizer, StringIndexer, OneHotEncoder, VectorAssembler} import org.apache.spark.ml.{Pipeline, PipelineModel} import org.apache.spark.ml.classification.LogisticRegression val df : DataFrame = spark.read .parquet("data/prepared_trainingset") df.show() df.select($"text".isNull).groupBy($"(text IS NULL)").count().show() val tokenizer = new RegexTokenizer() .setPattern("\\W+") .setGaps(true) .setInputCol("text") .setOutputCol("tokens") val stopWordsRemover = new StopWordsRemover() .setInputCol("tokens") .setOutputCol("tokensWOstopwords") val cvModel = new CountVectorizer() .setInputCol("tokensWOstopwords") .setOutputCol("countedWord") .setMinDF(2) //a word has to appear 2 times to be in the vocabulary .fit(stopWordsRemover.transform(tokenizer.transform(df))) val idf = new IDF() .setInputCol("countedWord") .setOutputCol("tfidf")Conversion des variables catégorielles en variables numériquesval indexerCountry = new StringIndexer() .setInputCol("country2") .setOutputCol("country_indexed") val indexerCurrency = new StringIndexer() .setInputCol("currency2") .setOutputCol("currency_indexed") val onehotencoderCountry = new OneHotEncoder() .setInputCol("country_indexed") .setOutputCol("country_onehot") val onehotencoderCurrency = new OneHotEncoder() .setInputCol("currency_indexed") .setOutputCol("currency_onehot") val assembler = new VectorAssembler() .setInputCols(Array("tfidf", "days_campaign", "hours_prepa", "goal", "country_onehot", "currency_onehot")) .setOutputCol("features") val lr = new LogisticRegression() .setElasticNetParam(0.0) .setFitIntercept(true) .setFeaturesCol("features") .setLabelCol("final_status") .setStandardization(true) .setPredictionCol("predictions") .setRawPredictionCol("raw_predictions") .setThresholds(Array(0.7, 0.3)) .setTol(1.0e-6) .setMaxIter(20) val splits = df.randomSplit(Array(0.9, 0.1)) val training = splits(0).cache() val test = splits(1) val pipeline = new Pipeline() .setStages(Array(tokenizer, stopWordsRemover,cvModel,idf, indexerCountry,indexerCurrency, onehotencoderCountry, onehotencoderCurrency, assembler, lr)) val model = pipeline.fit(training) df.show() val predic = model.transform(test)predic.select("features","raw_predictions","probability","predictions","final_status").show()import org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator val evaluator = new MulticlassClassificationEvaluator() .setLabelCol("final_status") .setPredictionCol("predictions") .setMetricName("f1") val result = evaluator.evaluate(predic) val sameModel = PipelineModel.load("spark-logistic-regression-model") val paramGrid = new ParamGridBuilder() .addGrid(cvModel.minDF, Array(55.0,75.0,95.0)) .addGrid(lr.regParam, Array(10e-8, 10e-6, 10e-4, 10e-2)) .build() val lrtv = TrainValidationSplit() .setEstimator(pipeline) .setEstimatorParamMaps(paramGrid) .setEvaluator(evaluator) val modelGrid = lrtv.fit(training) import org.apache.spark.ml.feature.{IDF, Tokenizer, RegexTokenizer, StopWordsRemover, CountVectorizer, StringIndexer, OneHotEncoder, VectorAssembler,CountVectorizerModel} import org.apache.spark.ml.{Pipeline, PipelineModel} import org.apache.spark.ml.classification.LogisticRegression import org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator import org.apache.spark.ml.tuning.{ParamGridBuilder, TrainValidationSplit,TrainValidationSplitModel, CrossValidator, CrossValidatorModel} val paramGrid = new ParamGridBuilder() .addGrid(cvModel.minDF, Array(55.0,75.0,95.0)) .addGrid(lr.regParam, Array(10e-8, 10e-6, 10e-4, 10e-2)) .build() val lrcv = new CrossValidator() .setEstimator(pipeline) .setEstimatorParamMaps(paramGrid) .setEvaluator(evaluator) .setNumFolds(5) val modelGridCV = lrcv.fit(training)DBSCAN DBSCAN is a really cool unsupervised learning. In contrast to Gaussian Mixture, it has the capability of detecting non-linear boundaries. It does not require an assumption of the number of the clusters. Instead, it requires the minimum number of samples to construct a cluster and a parameter ε to determine the radius of the cluster. A cluster is formed when sufficient data points are within the preset radius or reachable from the data points within the radius.For technical details of DBSCAN, you can check the original paper.https://aaai.org/Papers/KDD/1996/KDD96-037.pdf import sklearn.neighbors import sklearn.cluster import sklearn.decomposition import sklearn.datasets import matplotlib.pyplot as plt import pandas as pd import numpy as np #create dataframe xcol=['x0','x1','x2','x3'] ycol='y' iris=sklearn.datasets.load_iris() df=pd.DataFrame(iris.data,columns=xcol) #each input factor has to be float/int #sklearn has already done that for us #but if you are using the dataset inside the repository #you can use np.unique to convert categorical data to int df[ycol]=iris.target #minpts denotes the minimum requirement to form a cluster #minpts should be larger than 2 #there are different empirical rules #it can be 2 times dimensions #or it can be dimensions+1 minpts=1+len(xcol) #epsilon denotes the maximum distance of points inside the same cluster #later we will discuss how to find optimal eps via knn #right now we just assume its 0.6 eps=0.6 #we compute euclidean distance from all other data points #we check if there are minpts neighbors suffice the distance condition #the distance condition is that euclidean distance is smaller than eps def check_neighbors(data,datapoint,minpts,eps): #compute euclidean distance temp=data[xcol]-datapoint temp['euclidean distance']=[sum([j**2 for j in temp.loc[i]])**0.5 for i in range(len(temp))] #reset index to locate the neighbors temp.reset_index(inplace=True) #we use minpts+1 becuz we include the core point itself #if there are enough neighbors #output the indices of neighbors if len(temp[temp['euclidean distance']<=eps])>=minpts+1: return temp[temp['euclidean distance']<=eps].index.tolist() else: return [] #core part of the algorithm def density_based(data,xcol,eps,minpts): checked=[] cluster=-1 data['predict']=np.nan for i in data.index.tolist(): #if the selected point is unchecked #we check if it has enough neighbors to become a core point if i not in checked: datapoint=data[xcol].loc[i] neighbors=check_neighbors(data,datapoint,minpts,eps) checked.append(i) #if it is confirmed to be the core point #we remove itself from the neighbors #we increment the cluster index #and assign the cluster label to the core point if neighbors: neighbors.remove(i) cluster+=1 data.at[i,'predict']=cluster else: continue else: continue #we check if the neighbors can be become core points #the process is pretty similar to the above #this part can be written in recursion #check the link below for more details # https://github.com/je-suis-tm/recursion-and-dynamic-programming while neighbors: j=neighbors.pop() data.at[j,'predict']=cluster if j not in checked: datapoint=data[xcol].loc[j] newneighbors=check_neighbors(data,datapoint,minpts,eps) checked.append(j) neighbors+=newneighbors #we set noise at -1 data['predict']=data['predict'].fillna(-1) return data #for unsupervised learning, clf.score doesnt return the accuracy #there is no cross validation, no known labels #the only way to detect the accuracy is vote of the majority #for each label given #we check which iris type is the majority #we consider the majority as the correct classification #all we need to do is to count the minority def get_accuracy(data,class_,checked): df=data.copy() #use dictionary to keep track of everything d={} #counting for i in df['predict'][df['y']==class_].unique(): if i not in checked and i!=-1: d[i]=df['predict'][df['y']==class_].tolist().count(i) #comparison temp=-1 lbl=None for i in d: if d[i]>temp: lbl=i temp=d[i] return len(df['predict'][df['y']==class_][df['predict']!=lbl]) #aggregate all functions into one big main function def spatial_clustering(df,xcol,eps,minpts): #machine learning data=density_based(df,xcol,eps,minpts) #compute accuracy erreur=0 checked=[] for i in data['y'].unique(): erreur+=get_accuracy(data,i,checked) checked.append(i) accuracy=1-erreur/len(df) print('accuracy: %s'%(accuracy)) #dbscan implemented by sklearn def skl_dbscan(df,xcol,eps,minpts): #machine learning clf=sklearn.cluster.DBSCAN(eps=eps,min_samples=minpts) clf.fit(df[xcol]) df['predict']=clf.fit_predict(df[xcol]) #compute accuracy erreur=0 checked=[] for i in df['y'].unique(): erreur+=get_accuracy(df,i,checked) checked.append(i) accuracy=1-erreur/len(df) print('accuracy: %s'%(accuracy)) spatial_clustering(df,xcol,eps,minpts) skl_dbscan(df,xcol,eps,minpts) #for the purpose of visualization #we reduce 4 dimensions to 2 dims=2 x=sklearn.decomposition.PCA(n_components=dims).fit_transform(df[xcol]) data=density_based(df,xcol,eps,minpts) data['dim1']=x[:,0] data['dim2']=x[:,1] #viz fig=plt.figure(figsize=(10,5)) ax=fig.add_subplot(111) for i in set(data['predict']): if i!=-1: plt.scatter(data['dim1'][data['predict']==i], data['dim2'][data['predict']==i], label=f'Label {int(i)}',alpha=0.7) else: plt.scatter(data['dim1'][data['predict']==i], data['dim2'][data['predict']==i], label='Noise',alpha=0.7) plt.title('DBSCAN') plt.legend(loc=0) plt.show()  Selection of ε The selection of ε requires delicate balancing. If ε is too large, we will have larger but fewer clusters, vice versa. The optimal ε can be selected via Knee Method. It is quite similar to elbow method.* Use K Nearest Neighbor with K=minpts-1* Compute and plot KNN distance* Find the point where the greatest surge occurs* The underlying KNN distance is the optimal εAlternatively, we can use a modified version of DBSCAN, called OPTICS. OPTICS only requires minpts and it does not require ε. To determine the number of the clusters, OPTICS requires a new parameter called ξ to determine steep upward and downward areas in a reachability plot.The details of KNN can be found in the link below.https://github.com/je-suis-tm/machine-learning/blob/master/k%20nearest%20neighbors.ipynbThe details of Elbow Method can be found in the link below.https://github.com/je-suis-tm/machine-learning/blob/master/k%20means.ipynbThe details of OPTICS can be found in the link below.https://github.com/je-suis-tm/machine-learning/blob/master/optics.ipynb #create dual axis plot def dual_axis_plot(xaxis,data1,data2,fst_color='r', sec_color='b',fig_size=(10,5), x_label='',y_label1='',y_label2='', legend1='',legend2='',grid=False,title=''): #preset the figure fig=plt.figure(figsize=fig_size) ax=fig.add_subplot(111) #plot left hand side ax.set_xlabel(x_label) ax.set_ylabel(y_label1, color=fst_color) ax.plot(xaxis, data1, color=fst_color,label=legend1) ax.tick_params(axis='y',labelcolor=fst_color) ax.yaxis.labelpad=15 plt.legend(loc=3) #the crucial part of the function #both curves share x axis ax2=ax.twinx() #plot right hand side ax2.set_ylabel(y_label2, color=sec_color,rotation=270) ax2.plot(xaxis, data2, color=sec_color,label=legend2) ax2.tick_params(axis='y',labelcolor=sec_color) ax2.yaxis.labelpad=15 plt.legend(loc=4) #enfin fig.tight_layout() plt.grid(grid) plt.title(title) plt.show() #estimate parameters of linear equation def get_line_params(x1,y1,x2,y2): a=(y1-y2)/(x1-x2) b=y1-a*x1 return a,b #compute perpendicular distance def get_distance(x,y,a,b): temp1=y-x*a-b temp2=(a**2+1)**0.5 return np.abs(temp1/temp2) #knee method for knn to find the optimal epsilon def find_optimal_eps(distances): #compute perpendicular distance a,b=get_line_params(0,distances[0],len(distances)-1,distances[-1]) distance=[] for i in range(len(distances)): distance.append(get_distance(i,distances[i],a,b)) #viz dual_axis_plot(np.arange(1,len(distance)+1),distances,distance, x_label='Points Sampled by Distance', y_label1='NN Distance', y_label2='Perpendicular Distance', legend1='NN',legend2='Knee', title='Knee Method for KNN', fst_color='#0abda0',sec_color='#720017') #get optimal eps optimal=distances[distance.index(max(distance))] return optimal #using k distance graph to get the optimal eps def get_eps(data,xcol,minpts): #use knn to compute k distance clf=sklearn.neighbors.NearestNeighbors(n_neighbors=minpts-1) clf.fit(data[xcol]) #prepare for visualization distances,_=clf.kneighbors(data[xcol]) distances=sorted(distances.tolist(),key=lambda x:x[1]) distances=[i[1] for i in distances] #viz and get eps optimal=find_optimal_eps(distances) return optimal eps=get_eps(df,xcol,minpts) #unfortunately this seems to be a terrible choice of eps #domain knowledge is absolutely more important eps spatial_clustering(df,xcol,eps,minpts) skl_dbscan(df,xcol,eps,minpts)accuracy: 0.48Given a list of unique random intergers:- Split and merge sub lists into a sorted list PREQUISITESimport random def make(n): nums = [i for i in range(n)] for i in range(n): rnd = random.randint(0, n - 1) nums[i], nums[rnd] = nums[rnd], nums[i] return numsALGORITHMdef mergeSort(nums): if len(nums) > 1: L = nums[:len(nums) // 2] R = nums[len(nums) // 2:] mergeSort(L) mergeSort(R) idx_l, idx_r, idx = 0, 0, 0 while idx_l < len(L) and idx_r < len(R): if L[idx_l] < R[idx_r]: nums[idx] = L[idx_l] idx_l += 1 else: nums[idx] = R[idx_r] idx_r += 1 idx += 1 while idx_l < len(L): nums[idx] = L[idx_l] idx_l += 1 idx += 1 while idx_r < len(R): nums[idx] = R[idx_r] idx_r += 1 idx += 1TESTnums = make(20) mergeSort(nums) print(nums) for idx, val in enumerate(nums): assert idx == valExample 4 - on the cluster==========================This example shows how to run HpBandster in a cluster environment.The actual python code does differ substantially from example 3, except for ashared directory that is used to communicate the location of the nameserver toevery worker, and the fact that the communication is done over the network insteadof just the loop back interface.To actually run it as a batch job, usually a shell script is required.Those differer slightly from scheduler to scheduler.Here we provide an example script for the Sun Grid Engine (SGE), but adapting that toany other scheduler should be easy.The script simply specifies the logging files for output (`-o`) and error `-e`),loads a virtual environment, and then executes the master for the first array taskand a worker otherwise.Array jobs execute the same source multiple times and are bundled together into one job,where each task gets a unique task ID.For SGE those IDs are positive integers and we simply say the first task is the master... code-block:: bash submit via qsub -t 1-4 -q test_core.q example_4_cluster_submit_me.sh $ -cwd $ -o $JOB_ID-$TASK_ID.o $ -e $JOB_ID-$TASK_ID.e enter the virtual environment source ~sfalkner/virtualenvs/HpBandSter_tests/bin/activate if [ $SGE_TASK_ID -eq 1] then python3 example_4_cluster.py --run_id $JOB_ID --nic_name eth0 --working_dir . else python3 example_4_cluster.py --run_id $JOB_ID --nic_name eth0 --working_dir . --worker fiYou can simply copy the above code into a file, say submit_me.sh, and tell SGE to run it via:.. code-block:: bash qsub -t 1-4 -q your_queue_name submit_me.shNow to the actual python source:import logging logging.basicConfig(level=logging.INFO) import argparse import pickle import time import hpbandster.core.nameserver as hpns import hpbandster.core.result as hpres from hpbandster.optimizers import BOHB as BOHB from hpbandster.examples.commons import MyWorker parser = argparse.ArgumentParser(description='Example 1 - sequential and local execution.') parser.add_argument('--min_budget', type=float, help='Minimum budget used during the optimization.', default=9) parser.add_argument('--max_budget', type=float, help='Maximum budget used during the optimization.', default=243) parser.add_argument('--n_iterations', type=int, help='Number of iterations performed by the optimizer', default=4) parser.add_argument('--n_workers', type=int, help='Number of workers to run in parallel.', default=2) parser.add_argument('--worker', help='Flag to turn this into a worker process', action='store_true') parser.add_argument('--run_id', type=str, help='A unique run id for this optimization run. An easy option is to use the job id of the clusters scheduler.') parser.add_argument('--nic_name',type=str, help='Which network interface to use for communication.') parser.add_argument('--shared_directory',type=str, help='A directory that is accessible for all processes, e.g. a NFS share.') args=parser.parse_args() # Every process has to lookup the hostname host = hpns.nic_name_to_host(args.nic_name) if args.worker: time.sleep(5) # short artificial delay to make sure the nameserver is already running w = MyWorker(sleep_interval = 0.5,run_id=args.run_id, host=host) w.load_nameserver_credentials(working_directory=args.shared_directory) w.run(background=False) exit(0) # Start a nameserver: # We now start the nameserver with the host name from above and a random open port (by setting the port to 0) NS = hpns.NameServer(run_id=args.run_id, host=host, port=0, working_directory=args.shared_directory) ns_host, ns_port = NS.start() # Most optimizers are so computationally inexpensive that we can affort to run a # worker in parallel to it. Note that this one has to run in the background to # not plock! w = MyWorker(sleep_interval = 0.5,run_id=args.run_id, host=host, nameserver=ns_host, nameserver_port=ns_port) w.run(background=True) # Run an optimizer # We now have to specify the host, and the nameserver information bohb = BOHB( configspace = MyWorker.get_configspace(), run_id = args.run_id, host=host, nameserver=ns_host, nameserver_port=ns_port, min_budget=args.min_budget, max_budget=args.max_budget ) res = bohb.run(n_iterations=args.n_iterations, min_n_workers=args.n_workers) # In a cluster environment, you usually want to store the results for later analysis. # One option is to simply pickle the Result object with open(os.path.join(args.shared_directory, 'results.pkl'), 'wb') as fh: pickle.dump(res, fh) # Step 4: Shutdown # After the optimizer run, we must shutdown the master and the nameserver. bohb.shutdown(shutdown_workers=True) NS.shutdown()This example notebook uses the averaging functions found ins the diff_classifier msd module to find average msd profiles over input msd datasets using precision-weighted averaging. Precision is the inverse of the standard squared error. This increases the contribution of videos that have many particles and more homogeneous datasets to the final calculated MSD.import numpy as np import diff_classifier.aws as aws import diff_classifier.msd as msd folder = 'Gel_Studies/09_19_18_NP_concentration' #Folder in AWS S3 containing files to be analyzed bucket = 'ccurtis.data' #experiment = 'test' #Used for naming purposes. Should exclude XY and well information #vids = 2 to_track = [] frames = 651 fps = 100.02 umppx = 0.07 vids = 10 concs = ['1', 'pt5', 'pt1', 'pt05'] for conc in concs: for num in range(1, vids+1): #to_track.append('100x_0_4_1_2_gel_{}_bulk_vid_{}'.format(vis, num)) to_track.append('{}uL_XY{}'.format(conc, '%02d' % num)) to_track geomean = {} gSEM = {} for sample_name in to_track: # Users can toggle between using pre-calculated geomean files and calculating new values by commenting out the relevant # lines of code within the for loop. #aws.download_s3('{}/geomean_{}.csv'.format(folder, sample_name), 'geomean_{}.csv'.format(sample_name), bucket_name=bucket) #aws.download_s3('{}/geoSEM_{}.csv'.format(folder, sample_name), 'geoSEM_{}.csv'.format(sample_name), bucket_name=bucket) #geomean[sample_name] = np.genfromtxt('geomean_{}.csv'.format(sample_name)) #gSEM[sample_name] = np.genfromtxt('geoSEM_{}.csv'.format(sample_name)) aws.download_s3('{}/msd_{}.csv'.format(folder, sample_name), 'msd_{}.csv'.format(sample_name), bucket_name=bucket) geomean[sample_name], gSEM[sample_name] = msd.geomean_msdisp(sample_name, umppx=umppx, fps=fps, remote_folder=folder, bucket=bucket) print('Done with {}'.format(sample_name)) for conc in concs: to_avg = [] for sample in to_track: if conc in sample: to_avg.append(sample) weights, wh1 = msd.precision_weight(to_avg, gSEM) geodata = msd.precision_averaging(to_avg, geomean, gSEM, weights, bucket=bucket, folder=folder, experiment=conc)Note that in cases where two or more averaging steps are needed (for instance, if the user takes 5 videos per well with a total of four wells), averaging steps can be performed consecutively. the msd.binning function is a helpful tool by defining bins over which to average for multi-step averaging.msd.plot_all_experiments(to_track[0:10], yrange=(10**-3, 10**1), bucket=bucket, folder=folder) msd.plot_all_experiments(to_track[10:20], yrange=(10**-3, 10**1), bucket=bucket, folder=folder) msd.plot_all_experiments(to_track[20:30], yrange=(10**-3, 10**1), bucket=bucket, folder=folder) msd.plot_all_experiments(to_track[30:40], yrange=(10**-3, 10**1), bucket=bucket, folder=folder) msd.plot_all_experiments(concs, yrange=(10**-3, 10**1), bucket=bucket, folder=folder, labels=['0.005%', '0.0025%', '0.0005%', '0.00025%']) concs msd.plot_all_experiments(['2mM_5k_PEG', '2mM_5k_PEG_NH2', '2mM_PS_COOH', '2mM_PS_NH2'], yrange=(10**-3, 10**1), bucket=bucket, folder=folder)A Gentle Introduction to Programming Concepts - Using Python Introduction Play along at homeYou can follow along and through the notebooks that we will be working through by going to the GitHub repository that we manage our content in.* Repository: https://github.com/unmrds/cc-python* Introduction/Concepts (this notebook): * https://github.com/unmrds/cc-python/blob/master/1.1-Programming%20Concepts.ipynb* Variables and Data Types: * https://github.com/unmrds/cc-python/blob/master/1.2-The%20Basics.ipynb* Functions (Part 2 of the Python series): * https://github.com/unmrds/cc-python/blob/master/2.1-Functions.ipynb You can practice and play with code in Pangeo binder environment: [![Binder](https://binder.pangeo.io/badge_logo.svg)](https://binder.pangeo.io/v2/gh/unmrds/cc-python.git/master) Why learn the basic principles of programming?* Thinking algorithmically (a key element in the process used in developing programming solutions) is a powerful problem solving skill that is reinforeced with practice. Practicing programming is great practice. * Defining a problem with sufficient specificity that a solution can be effectively developed * Defining what the end-product of the process should be * Breaking a problem down into smaller components that interact with each other * Identifying the objects/data and actions that are needed to meet the requirements of each component * Linking components together to solve the defined problem * Identifying potential expansion points to reuse the developed capacity for solving related problems![Problem decomposition illustration](problemDecomposition.png)* Capabilities to streamline and automate routine processes through scripting are ubiquitous * Query languages built into existing tools (e.g. Excel, ArcGIS, Word) * Specialized languages for specific tasks (e.g. R, Pandoc template language, PHP) * General purpose languages for solving many problems (e.g. Bash shell, Perl, Python, C)* Repeatabilty with documentation* Scalability* Portability Why Python?* It is available as a free and [Open Source](https://opensource.org/osd-annotated) programming language that can be installed on numerous computer systems, including Windows, Linux and the Mac OS. It can even be editited and run through a web interface such as this Jupyter Notebook. * It is a modern programming language that includes many features that make it a very efficient languageto both learn programming with and write programs in.* It is readable and expressive. * It supports a variety of development models including object-oriented, procedural and functional capabilities. * It includes a standard library of functions that support significant programming capabilities including: * Handling email * Interacting with and publishing web and other online resources * Connecting with a wide variety of databases * Executing operating system commands * Developing graphical user interfaces* It is relatively easy to start to become productive in Python, though it still takes time and practice to becomean expert (as is the case with any programming language).The primary downside that is mentioned when discussing the choice of Python as a programming language is that asan interpreted language it can execute more slowly than traditional compiled languages such as C or C++. Can I Play at Home?There are a variety of ways to run Python on your computer:* You may already have a version of Python installed. Many operating systems have a version of Python installed that is used for routine processes within the operating system. You can easily check to see what version of Python might already be on your computer by typing `python` at the `Command Prompt` (Windows) or in the `Terminal` (Mac OS) and seeing what response you get. If Python is installed you will typically see information about the currently installed version and then be taken to the Python command prompt where you can start typing commands. * You can install one of the available versions directly from the Python project site: https://www.python.org/downloads/. Following this installation you will be able to execute commands from the *interactive command prompt* or you can start the *IDLE* integrated development environment (IDE). * You can install a pre-packaged python system such as the Anaconda release of Python (https://www.anaconda.com/products/individual) that has both Python 2.x and 3.x versions available for download. I prefer this method as it installs a copy of Python that is separate from any previous ones on your system, and allows you to execute the (enhanced) interactive Python command prompt, **and** run the Jupyter Notebook web-based environment for writing and executing Python code. The examples that we will go through today will be executed in the Jupyter Notebook environment. Running a Python EnvironmentOnce Python is installed on your computer you have a number of options for how you start up an environment where you can execute Python commands/code. 1. The most simple method is to just type `python` at the *Command Prompt* (Windows) or *Terminal* (Mac OS and Linux). If your installation was successful you will be taken to the interactive prompt. For example: UL0100MAC:~ kbene$ python Python 2.7.10 |Anaconda 2.3.0 (x86_64)| (default, May 28 2015, 17:04:42) [GCC 4.2.1 (Apple Inc. build 5577)] on darwin Type "help", "copyright", "credits" or "license" for more information. Anaconda is brought to you by Continuum Analytics. Please check out: http://continuum.io/thanks and https://binstar.org >>>2. If you would like to run the IDLE IDE you should be able to find the executable file in the folder where the Python executable installed on your system.3. If you installed the Anaconda release of Python you can type `ipython` at the *Command Prompt* (Windows) or *Terminal* (Mac OS and Linux). If you installation was successful you will be taken to an enhanced (compared with the basic Python prompt) interactive prompt. For example: (base) karl cc-python (master) >> python Python 3.8.8 (default, Apr 13 2021, 12:59:45) [Clang 10.0.0 ] :: Anaconda, Inc. on darwin Type "help", "copyright", "credits" or "license" for more information. >>>4. If you installed the Anaconda release of Python you can type `jupyter notebook` at the *Command Prompt* (Windows) or *Terminal* (Mac OS and Linux). If your installation was successful you should see some startup messages in the terminal window and your browser should open up and display the *Jupyter Notebook* interface from where you can navigate through your system's folder structure (limited to the folder that you ran the `jupyter notebook` command from and its children), and load existing notebooks or create new ones in which you can enter and execute Python commands. In more recent releases of the Anaconda Python distribution you can run the *Anaconda Navigator* from which you can run *Jupyter Notebooks* and other applications. **This is the interface that we are using for today's workshop**. ![Sample Jupyter Notebook Interface](images/jupyterNotebook.png)If, for some reason you can't run the jupyter notebook on your system, you can run the workshop content through a cloud service called Binder hosted by PanGeo: [![Binder](https://binder.pangeo.io/badge_logo.svg)](https://binder.pangeo.io/v2/gh/unmrds/cc-python.git/master) Getting HelpThere are a number of strategies that you can use for getting help with specific Python commands and syntax. First and foremost you can access the Python [documentation](https://docs.python.org/3/index.html) which will default to the most recent Python 3.x version that is in production, but from which (in the upper left corner of the page) you can select other Python versions if you are not using the version referenced by the page. Looking at and working through some of the materials in the Python [tutorial](https://docs.python.org/3/tutorial/) is also a great way to see the core Python capabilities in action. In some cases you can find quite a few useful and interesting resources through a resonably crafted Google search: e.g. for [`python create list`](https://www.google.com/search?client=safari&rls=en&q=python+create+list&ie=UTF-8&oe=UTF-8). You can also get targeted help for specific commands or objects from the command prompt by just using the `help()` function. Where you put the name of the command or object between the parentheses `()`. For example: >>>help(print) and >>>help(str)and >>>myVar = [1,2,3,4,5] >>>help(myVar)help(print) help(str) myVar = [1,2,3,4,5] help(myVar)Help on list object: class list(object) | list(iterable=(), /) | | Built-in mutable sequence. | | If no argument is given, the constructor creates a new empty list. | The argument must be an iterable if specified. | | Methods defined here: | | __add__(self, value, /) | Return self+value. | | __contains__(self, key, /) | Return key in self. | | __delitem__(self, key, /) | Delete self[key]. | | __eq__(self, value, /) | Return self==value. | | __ge__(self, value, /) | Return self>=value. | | __getattribute__(self, name, /) | Return getattr(self, name). | | __getitem__(...) | x.__getitem__(y) <==> x[y] | | __gt__(self, value, /) | Return self>value. | | __iadd__(self, value, /) | Implement self+=value. | | __imul__(self, value, /) | Implement self*=value. | | __init__(self, /, *args, **kwargs) | Initialize self. See help(type(self)) for accurate sign[...]Try It YourselfType in the help command in a code box in Jupyter Notebook for a few of the following commands/objects and take a look at the information you get:* `dict` - e.g. `help(dict)`* `print`* `sorted`* `float`For some commands/functions you need to import the module that that command belongs to. For example: import os help(os.path)Try this pair of commands in a code window in your Jupyter Notebook or interactive terminal.# type your help commands in the box and # execute the code in the box by typing shift-enter # (hold down the shift key while hitting the enter/return key)Setup to run on google colab (Ignore this if you are running on local machine)# Execute only on google colab # Here I will set up the way to load the dataset from google drive from google.colab import drive drive.mount('/content/gdrive') %ls /content/gdrive/MyDrive/Colab-notebooks/2039045/ # Execute only on google colab ROOT_PATH = '/content/gdrive/MyDrive/Colab-notebooks/' DATA_PATH = os.path.join(ROOT_PATH, '2039045') %ls $DATA_PATH2039045_face.npz 2039045_nose.csvDecorator to measure training timedef timeit(method): # Decorator to measure running time def timed(*args, **kwargs): start = time.time() result = method(*args, **kwargs) end = time.time() return result, round((end - start), 4) return timedDatasetDATA_PATH = './2039045' facial_data = np.load(os.path.join(DATA_PATH, '2039045_face.npz')) facial_data.files X_train = facial_data['X_train'] y_train = facial_data['y_train'] X_valid = facial_data['X_valid'] y_valid = facial_data['y_valid'] X_test = facial_data['X_test'] y_test = facial_data['y_test']Exploratory data analysis taskscat_index = dict() fig_01, ax = plt.subplots(1, 3, figsize = (7, 7)) for i in range(len(np.unique(y_train))): cat_index[i] = np.where(y_train == i)[0] # Finding out which indexes belong to each label imag = X_train[cat_index[i][0]].reshape((48, 48)) ax[i].imshow(imag, cmap = 'gray') ax[i].set_title(f'Category {i}') ax[i].axis('off') plt.show() #Combining the target labels y = np.concatenate((y_train, y_valid, y_test)) fig_02, ax = plt.subplots(1, 1, figsize = (7, 7)) ax.hist(y) ax.set_xticks([0, 1, 2]) ax.set_xlabel('Category') ax.set_ylabel('Count') ax.set_title('Expression category distribution') plt.show() """The data is not balanced"""Classification tasks 1.3.1from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import classification_report import time # Train KNN with k = 1 start = time.time() knn_clf = KNeighborsClassifier(n_neighbors = 1, weights = 'uniform') knn_clf.fit(X_train, y_train) end = time.time() time_taken = end - start print('training time: ', time_taken) #Calculating accuracy for test set knn_acc_test = knn_clf.score(X_test, y_test) print(f'KNN (k = 1) accuracy on test set: {knn_acc_test}') #Displaying two mis-classified images for each class y_pred_knn_test = knn_clf.predict(X_test) mis_classified_index = np.where((y_pred_knn_test == y_test) == False) fig_03, ax = plt.subplots(3, 2, figsize = (10, 10)) for i in range(3): imgs = np.zeros((2, 48, 48)) count = 0 for mis_index in mis_classified_index[0]: if count == 2: break if (y_test[mis_index] == i): imgs[count] = X_test[mis_index].reshape((48, 48)) ax[i, count].imshow(imgs[count], cmap = 'gray') ax[i, count].set_ylabel(f'Actual class {i}') ax[i, count].set_title(f'Predicted class {y_pred_knn_test[mis_index]}') ax[i, count].tick_params(axis = 'x', which = 'both', bottom = False, labelbottom = False) ax[i, count].tick_params(axis = 'y', which = 'both', left = False, labelleft = False) count += 1 plt.show() # Providing other metrics knn_report_test = classification_report(y_test, y_pred_knn_test) print('KNN (k = 1) classification report') print(50 * '-') print(knn_report_test)training time: 0.0014481544494628906 KNN (k = 1) accuracy on test set: 0.78318584070796461.3.2# I will find the best number of neighbors knn_acc_arr = [0] for i in np.arange(1, 21): knn_clf_search = KNeighborsClassifier(n_neighbors = i, weights = 'uniform') knn_clf_search.fit(X_train, y_train) knn_acc_arr.append(knn_clf_search.score(X_test, y_test)) knn_acc_arr = np.array(knn_acc_arr) best_k = np.argmax(knn_acc_arr) print(f'Best number of neighbors: {best_k}') fig_04, ax = plt.subplots() ax.plot(knn_acc_arr[1:]) ax.set_xticks(np.arange(0, 20)) ax.set_xticklabels(list(np.arange(1, 21))) ax.set_xlabel('Number of neighbors') ax.set_ylabel('Accuracy on test set') plt.show()1.3.3X_train.shape, y.shape print(np.bincount(y_train), np.bincount(y_valid), np.bincount(y_test))[963 372 611] [117 45 83] [122 40 64]Balancing the datasetfrom imblearn.over_sampling import RandomOverSampler over_sample = RandomOverSampler(sampling_strategy = 'not majority') X_train_oversample, y_train_oversample = over_sample.fit_resample(X_train, y_train) print(X_train_oversample.shape, np.bincount(y_train_oversample))(2889, 2304) [963 963 963]Training and evaluating models Class and function to find the best hyperparametersfrom sklearn.base import BaseEstimator, TransformerMixin from sklearn.model_selection import ParameterGrid from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report class OptimizingParamaters(BaseEstimator, TransformerMixin): # A class which has a purpose of finding the best parameters def __init__(self, model, params_dict, data_transformer = None): self.data_transformer = data_transformer self.params_dict_ = params_dict self.model = model self.best_params_ = {} self.best_acc_ = float('-inf') self.best_time_ = 0 self.best_metrics_ = {} self.best_yPred_ = [] @timeit def model_training(self, params, X, y = None): #Training the model self.model.set_params(**params) self.model.fit(X, y) def fit(self, X, y = None): if (self.data_transformer is not None): X_valid_transformed = self.data_transformer.transform(X_valid) else: X_valid_transformed = X_valid for combination in list(ParameterGrid(self.params_dict_)): # Running through every combination of parameters _, computation_time = self.model_training(combination, X, y) y_pred = self.predict(X_valid_transformed) acc = accuracy_score(y_valid, y_pred) if acc > self.best_acc_: self.best_acc_ = acc self.best_params_ = combination self.best_time_ = computation_time self.best_yPred_ = list(y_pred) self.best_metrics_ = classification_report(y_valid, self.best_yPred_, output_dict = True) def transform(self, X): pass def predict(self, X): return self.model.predict(X) def predict_proba(self, X): return self.model.predict_proba(X) def score(self, X, y): return self.model.score(X, y) def full_pipeline(X_train, y_train, X_test, y_test, model, param_grid, data_transformer = None): if data_transformer is not None: #try: #X_train_transformed = data_transformer.fit_transform(X_train) #X_test_transformed = data_transformer.transform(X_test) #except ValueError: X_train_transformed = data_transformer.fit_transform(X_train, y_train) X_test_transformed = data_transformer.transform(X_test) else: X_train_transformed = X_train X_test_transformed = X_test # Finding the best hyperparameters hyperpara_optimizer = OptimizingParamaters(model, param_grid, data_transformer) hyperpara_optimizer.fit(X_train_transformed, y_train) best_params = hyperpara_optimizer.best_params_ best_time = hyperpara_optimizer.best_time_ # Metrics tested on test set best_model = model.set_params(**hyperpara_optimizer.best_params_) best_model.fit(X_train_transformed, y_train) y_pred = best_model.predict(X_test_transformed) test_metrics = classification_report(y_test, y_pred, output_dict = True) # Extract accuracy, macro-avg precison, recall, and F1 score. I use [:-1] because I don't want to extract support important_metrics = [test_metrics['accuracy'], *test_metrics['macro avg'].values()][:-1] important_metrics.insert(0, best_time) # [time, accuracy, macro-avg precision, recall, F1] and a best hyperparameters dict return important_metrics, best_paramsLogistic Regressionfrom sklearn.linear_model import LogisticRegression from itertools import chain from sklearn.decomposition import PCA log_clf = LogisticRegression(random_state = 42, max_iter = 1000) log_grid = {'C': list(chain.from_iterable((0.001 * (10 ** i), 0.005 * (10 ** i)) for i in range(6)))} pca = PCA(n_components = 0.95) # I want to preserve 95% of the training set variance log_metrics, log_params = full_pipeline(X_train = X_train_oversample, y_train = y_train_oversample, X_test = X_test, y_test = y_test, model = log_clf, param_grid = log_grid, data_transformer = pca) print(log_metrics) print(40 * '-') print(log_params)[1.0263, 0.8761061946902655, 0.8540272844704525, 0.8707308743169399, 0.8608392679082634] ---------------------------------------- {'C': 0.5}Decision Trees with Gradient Boostingfrom sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.feature_selection import SelectFromModel from sklearn.ensemble import RandomForestClassifier gb_clf = GradientBoostingClassifier(n_estimators = 100, max_depth = 3, learning_rate = 0.1) gb_grid = {'max_depth': list(range(1, 6)), 'max_features' : [None, 'sqrt', 'log2', 0.3]} select_rndForests = SelectFromModel(RandomForestClassifier(n_estimators = 100, random_state=42), threshold = 'median') # Using random forests to choose the best features gb_metrics, gb_params = full_pipeline(X_train = X_train_oversample, y_train = y_train_oversample, X_test = X_test, y_test = y_test, model = gb_clf, param_grid = gb_grid, data_transformer = select_rndForests) print(gb_metrics) print(40 * '-') print(gb_params)[5.7657, 0.8805309734513275, 0.8598136947431302, 0.8392076502732241, 0.8485769980506822] ---------------------------------------- {'max_depth': 4, 'max_features': 'sqrt'}SVMfrom sklearn.svm import SVC from itertools import chain svc_clf = SVC(random_state = 42) svc_grid = {'C': [0.001 * (10 ** i) for i in range(6)], 'gamma': ['scale', 'auto', 0.001, 0.01, 0.1, 1, 10]} pca = PCA(n_components = 0.95) # I want to preserve 95% of the training set variance svc_metrics, svc_params = full_pipeline(X_train = X_train_oversample, y_train = y_train_oversample, X_test = X_test, y_test = y_test, model = svc_clf, param_grid = svc_grid, data_transformer = pca) print(svc_metrics) print(40 * '-') print(svc_params)[0.3717, 0.911504424778761, 0.8996499853672812, 0.8769637978142076, 0.8863141968806895] ---------------------------------------- {'C': 10.0, 'gamma': 'scale'}Voting classifierfrom sklearn.ensemble import VotingClassifier voting_clf = VotingClassifier(estimators = [('lr', log_clf), ('gb', gb_clf), ('svc', svc_clf)], voting = 'hard', n_jobs = -1) voting_grid = {} pca = PCA(n_components = 0.95) voting_metrics, voting_params = full_pipeline(X_train = X_train_oversample, y_train = y_train_oversample, X_test = X_test, y_test = y_test, model = voting_clf, param_grid = voting_grid, data_transformer = None) print(voting_metrics) print(40 * '-') print(voting_params)[34.4884, 0.9070796460176991, 0.9199125725843283, 0.8661543715846994, 0.8868675196278836] ---------------------------------------- {}Neural network# Reshape each image to 48 * 48 X_train_reshaped = X_train_oversample.reshape(-1, 48, 48) / 255 X_valid_reshaped = X_valid.reshape(-1, 48, 48) / 255 X_test_reshaped = X_test.reshape(-1, 48, 48) / 255 print(X_train_reshaped.shape) X_train_reshaped = X_train_reshaped[..., np.newaxis] # Reshaped to 2889*48*48*1 X_valid_reshaped = X_valid_reshaped[..., np.newaxis] X_test_reshaped = X_test_reshaped[..., np.newaxis] def build_cnn_model(learning_rate = 1e-2): # Building a CNN model = keras.models.Sequential([ keras.layers.Conv2D(64, 7, activation = 'relu', padding = 'same', input_shape = [48, 48, 1]), keras.layers.MaxPool2D(2), keras.layers.Conv2D(128, 3, activation = 'relu', padding = 'same'), keras.layers.Conv2D(128, 3, activation = 'relu', padding = 'same'), keras.layers.MaxPool2D(2), keras.layers.Conv2D(256, 3, activation = 'relu', padding = 'same'), keras.layers.Conv2D(256, 3, activation = 'relu', padding = 'same'), keras.layers.MaxPool2D(2), keras.layers.Flatten(), keras.layers.Dense(128, activation = 'relu'), keras.layers.Dropout(0.5), keras.layers.Dense(64, activation = 'relu'), keras.layers.Dropout(0.5), keras.layers.Dense(3, activation = 'softmax') ]) optimizer = keras.optimizers.Adam(learning_rate = learning_rate) model.compile(loss = 'sparse_categorical_crossentropy', optimizer = optimizer, metrics = ['accuracy'] ) return model from sklearn.metrics import classification_report from sklearn.model_selection import ParameterGrid from sklearn.metrics import accuracy_score cnn_clf = keras.wrappers.scikit_learn.KerasClassifier(build_cnn_model) cnn_grid = {'learning_rate': [0.0001, 0.001, 0.005, 0.01, 0.05, 0.1]} def cnn_pipeline(X_train, y_train, X_valid, y_valid, X_test, y_test, model, param_grid): best_acc = float('-inf') best_time = 0 best_yPred = [] best_params = {} best_metrics = {} for combination in list(ParameterGrid(param_grid)): # Running through every combination of parameters model.set_params(**combination) start = time.time() model.fit(X_train, y_train, epochs = 100, validation_data = (X_valid, y_valid), callbacks = [keras.callbacks.EarlyStopping(patience = 5, restore_best_weights = True)], ) end = time.time() computation_time = np.round(end - start, 4) y_pred = model.predict(X_valid) acc = accuracy_score(y_valid, y_pred) if acc > best_acc: #Saving parameters for the current model performs well on the validation data set best_acc = acc best_params = combination best_time = computation_time # Metrics tested on test set best_model = model.set_params(**best_params) model.fit(X_train, y_train, epochs = 100, validation_data = (X_valid, y_valid), callbacks = [keras.callbacks.EarlyStopping(patience = 10, restore_best_weights = True)], ) y_pred_test = best_model.predict(X_test) test_metrics = classification_report(y_test, y_pred_test, output_dict = True) # Extract accuracy, macro-avg precison, recall, and F1 score. I use [:-1] because I don't want to extract support important_metrics = [test_metrics['accuracy'], *test_metrics['macro avg'].values()][:-1] important_metrics.insert(0, best_time) # [time, accuracy, macro-avg precision, recall, F1] and a best hyperparameters dict return important_metrics, best_params cnn_metrics, cnn_params = cnn_pipeline(X_train = X_train_reshaped, y_train = y_train_oversample, X_valid = X_valid_reshaped, y_valid = y_valid, X_test = X_test_reshaped, y_test = y_test, model = cnn_clf, param_grid = cnn_grid) print(cnn_metrics) print(40 * '-') print(cnn_params)[27.7706, 0.9778761061946902, 0.968204365079365, 0.9782616120218579, 0.9730218621917625] ---------------------------------------- {'learning_rate': 0.0001}Saving figures and tablesIMAGES_PATH = os.path.join(ROOT_PATH, 'facial_images') fig_01.savefig(os.path.join(IMAGES_PATH, 'fig_01.png')) fig_02.savefig(os.path.join(IMAGES_PATH, 'fig_02.png')) fig_03.savefig(os.path.join(IMAGES_PATH, 'fig_03.png')) fig_04.savefig(os.path.join(IMAGES_PATH, 'fig_04.png')) import six df = pd.read_csv(os.path.join(ROOT_PATH, 'facial-result.csv')) def render_mpl_table(data, col_width=2.0, row_height=1.0, font_size=14, header_color='#40466e', row_colors=['#f1f1f2', 'w'], edge_color='w', bbox=[0, 0, 1, 1], header_columns=0, ax=None, **kwargs): if ax is None: size = (np.array(data.shape[::-1]) + np.array([0, 1])) * np.array([col_width, row_height]) fig, ax = plt.subplots(figsize=size) ax.axis('off') mpl_table = ax.table(cellText=data.values, bbox=bbox, colLabels=data.columns, **kwargs) mpl_table.auto_set_font_size(False) mpl_table.set_fontsize(font_size) for k, cell in six.iteritems(mpl_table._cells): cell.set_edgecolor(edge_color) if k[0] == 0 or k[1] < header_columns: cell.set_text_props(weight='bold', color='w') cell.set_facecolor(header_color) else: cell.set_facecolor(row_colors[k[0]%len(row_colors) ]) return fig, ax fig_05, ax = render_mpl_table(df, header_columns=0, col_width=5.0) fig_05.savefig(os.path.join(IMAGES_PATH, 'fig_05.png')) df### importing drive from google.colab import drive drive.mount('/content/gdrive') # !unzip '/content/gdrive/MyDrive/BCG/stumbleupon.zip' -d '/content/gdrive/MyDrive/BCG' ### libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns ### Config File train = '/content/gdrive/MyDrive/BCG/train.tsv' test = '/content/gdrive/MyDrive/BCG/test.tsv' sample = '/content/gdrive/MyDrive/BCG/sampleSubmission.csv' train_df = pd.read_csv(train,sep='\t') train_df.head(5) test_df = pd.read_csv(test,sep='\t') test_df.head(5) ### Sample Submission sampleSub = pd.read_csv(sample) sampleSub.head(5) xtrain_df = pd.DataFrame({ 'boilerplate': train_df.boilerplate.values, 'label': train_df.label.values }) xtrain_df.head(5) xtrain_df.shape xtrain_df.isnull().sum() plt.style.use('dark_background') sns.countplot(x='label',data=xtrain_df) plt.xlabel('Value') plt.ylabel('label') plt.show() X = [] for items in xtrain_df['boilerplate']: X.append(items) from wordcloud import WordCloud ,STOPWORDS commonWord = ' '.join(X) wordcloud = WordCloud(stopwords=STOPWORDS,height=2500,width=3000).generate(commonWord) plt.figure(1,figsize=(8, 8)) plt.imshow(wordcloud) plt.axis('off') plt.show() del X, wordcloud from sklearn.model_selection import train_test_split X = xtrain_df.boilerplate.values Y = xtrain_df.label.values x_train,x_val,y_train,y_val = train_test_split(X,Y, test_size = 0.1 , random_state = 2021) import torch if torch.cuda.is_available(): device = torch.device("cuda") print(torch.cuda.get_device_name(0)) !pip install transformers import stringSince I can't use greater then 512 len so I decided to preprocess the else it is not needed for the Bert modelimport nltk nltk.download('stopwords') from nltk.corpus import stopwords en_stops = stopwords.words('english') def remove_stop(text): return ' '.join([i for i in text.split() if i not in en_stops]) ### Simple Preprocessing def processed_word(text): text = remove_stop(text) return ''.join([i for i in text if i not in string.punctuation ]) a = "This is for the intern" print("Not processed :" ,a) print("processed word :" ,processed_word(a)) ### using based berttokenizer from transformers import BertTokenizer tokenizer = BertTokenizer.from_pretrained('bert-base-uncased',do_lower_case = True) Max_len = 325 def preprocess_from_bert(data): input_ids = [] attention_mask = [] for item in data: token = tokenizer.encode_plus( text = processed_word(item), add_special_tokens=True, max_length = Max_len, # padding = 'max_length', pad_to_max_length = True, truncation = True, return_attention_mask =True, ) input_ids.append(token.get('input_ids') ) attention_mask.append(token.get('attention_mask')) input_ids = torch.tensor(input_ids) attention_mask = torch.tensor(attention_mask) return input_ids , attention_mask train_inputs,train_masks = preprocess_from_bert(x_train) val_inputs,val_masks = preprocess_from_bert(x_val) train_masks.shape ##### DataLoader for train and validation dataset from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler train_labels = torch.tensor(y_train) val_labels = torch.tensor(y_val) batch_size = 32 train_data = TensorDataset(train_inputs, train_masks, train_labels) train_sampler = RandomSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size) val_data = TensorDataset(val_inputs, val_masks, val_labels) val_sampler = SequentialSampler(val_data) val_dataloader = DataLoader(val_data, sampler=val_sampler, batch_size=batch_size) import torch import torch.nn as nn from transformers import BertModel import torch.nn.functional as F**Model**#### using pretrained model for classification import torch import torch.nn as nn from transformers import BertModel # Create the BertClassfier class class BertClassifier(nn.Module): def __init__(self, freeze_bert=False): super(BertClassifier, self).__init__() self.bert = BertModel.from_pretrained('bert-base-uncased') self.fn = nn.Linear(768,80) self.drop = nn.Dropout(0.2) self.fn1 = nn.Linear(80,2) if freeze_bert: for param in self.bert.parameters(): param.requires_grad = False def forward(self, input_ids, attention_mask): outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask) last_hidden_state_cls = outputs[0][:, 0, :] x = F.relu(self.fn(last_hidden_state_cls)) x = self.drop(x) logits = self.fn1(x) return logits from transformers import AdamW, get_linear_schedule_with_warmup def initialize_model(epochs): bert_classifier = BertClassifier(freeze_bert=False) bert_classifier.to(device) optimizer = AdamW(bert_classifier.parameters(), lr=5e-5, # Default learning rate eps=1e-8 # Default epsilon value ) total_steps = len(train_dataloader) * epochs scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=total_steps) return bert_classifier, optimizer, scheduler import random import time loss_fn = nn.CrossEntropyLoss() def set_seed(seed_value=42): random.seed(seed_value) np.random.seed(seed_value) torch.manual_seed(seed_value) torch.cuda.manual_seed_all(seed_value) def train(model, train_dataloader, val_dataloader=None, epochs=2, evaluation=False): for epoch_i in range(epochs): print(f"{'Epoch':^7} | {'Batch':^7} | {'Train Loss':^12} | {'Val Loss':^10} | {'Val Acc':^9} | {'Elapsed':^9}") print("-"*70) t0_epoch, t0_batch = time.time(), time.time() total_loss, batch_loss, batch_counts = 0, 0, 0 model.train() for step, batch in enumerate(train_dataloader): batch_counts +=1 b_input_ids, b_attn_mask, b_labels = tuple(t.to(device) for t in batch) model.zero_grad() logits = model(b_input_ids, b_attn_mask) loss = loss_fn(logits, b_labels) batch_loss += loss.item() total_loss += loss.item() loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) optimizer.step() scheduler.step() if (step % 20 == 0 and step != 0) or (step == len(train_dataloader) - 1): time_elapsed = time.time() - t0_batch print(f"{epoch_i + 1:^7} | {step:^7} | {batch_loss / batch_counts:^12.6f} | {'-':^10} | {'-':^9} | {time_elapsed:^9.2f}") batch_loss, batch_counts = 0, 0 t0_batch = time.time() avg_train_loss = total_loss / len(train_dataloader) print("-"*70) if evaluation == True: val_loss, val_accuracy = evaluate(model, val_dataloader) time_elapsed = time.time() - t0_epoch print(f"{epoch_i + 1:^7} | {'-':^7} | {avg_train_loss:^12.6f} | {val_loss:^10.6f} | {val_accuracy:^9.2f} | {time_elapsed:^9.2f}") print("-"*70) def evaluate(model, val_dataloader): model.eval() val_accuracy = [] val_loss = [] for batch in val_dataloader: b_input_ids, b_attn_mask, b_labels = tuple(t.to(device) for t in batch) with torch.no_grad(): logits = model(b_input_ids, b_attn_mask) loss = loss_fn(logits, b_labels) val_loss.append(loss.item()) preds = torch.argmax(logits, dim=1).flatten() accuracy = (preds == b_labels).cpu().numpy().mean() * 100 val_accuracy.append(accuracy) val_loss = np.mean(val_loss) val_accuracy = np.mean(val_accuracy) return val_loss, val_accuracy ### training the model set_seed(42) # Set seed for reproducibility bert_classifier, optimizer, scheduler = initialize_model(epochs=2) train(bert_classifier, train_dataloader, val_dataloader, epochs=2, evaluation=True) def evaluate_roc(probs, y_true): preds = probs[:, 1] fpr, tpr, threshold = roc_curve(y_true, preds) roc_auc = auc(fpr, tpr) print(f'AUC: {roc_auc:.4f}') y_pred = np.where(preds >= 0.5, 1, 0) accuracy = accuracy_score(y_true, y_pred) print(f'Accuracy: {accuracy*100:.2f}%') plt.title('Receiver Operating Characteristic') plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc) plt.legend(loc = 'lower right') plt.plot([0, 1], [0, 1],'r--') plt.xlim([0, 1]) plt.ylim([0, 1]) plt.ylabel('True Positive Rate') plt.xlabel('False Positive Rate') plt.show() ### prediction on val dataset probs = bert_predict(bert_classifier, val_dataloader) evaluate_roc(probs, y_val) preds = probs[:, 1] preds.shape from sklearn.metrics import precision_recall_curve,confusion_matrix,precision_score,recall_score,f1_score # import plotly.express as px # import plotly.graph_objects as go precision,recall,thresolds = precision_recall_curve(y_val,preds) y_pred = np.where(preds >= 0.5, 1, 0) print("Precision: %f "%precision_score(y_val, y_pred)) print("="*40) print("Recall: %f "%recall_score(y_val, y_pred)) print("="*40) print("F1: %f"% f1_score(y_val, y_pred)) print("="*40) print("Let's see the confuision matrix:\n",confusion_matrix(y_val, y_pred)) print("="*40) plt.style.use('ggplot') conf_matrix = confusion_matrix(y_val, y_pred) plt.figure(figsize=(10,8)) sns.heatmap(conf_matrix,cmap="YlGnBu") plt.title('Confusion Matrix') plt.show() # fig = px.area( # x=recall, y=precision, # title=f'Precision-Recall Curve (AUC={auc(fpr, tpr):.4f})', # labels=dict(x='Recall', y='Precision'), # width=700, height=500 # ) # fig.add_shape( # type='line', line=dict(dash='dash'), # x0=0, x1=1, y0=1, y1=0 # ) # fig.update_yaxes(scaleanchor="x", scaleratio=1) # fig.update_xaxes(constrain='domain') # fig.show() # conf_matrix = confusion_matrix(y_val, y_pred) # trace1 = go.Heatmap(z = conf_matrix ,x = ["0 (pred)","1 (pred)"], # y = ["0 (true)","1 (true)"],xgap = 2, ygap = 2, # showscale = False, ) # fig = go.Figure(trace1) # fig.show() #### Loading Test DataSet def bert_predict(model, test_dataloader): model.eval() all_logits = [] for batch in test_dataloader: b_input_ids, b_attn_mask = tuple(t.to(device) for t in batch)[:2] with torch.no_grad(): logits = model(b_input_ids, b_attn_mask) all_logits.append(logits) all_logits = torch.cat(all_logits, dim=0) probs = F.softmax(all_logits, dim=1).cpu().numpy() return probs test_inputs, test_masks = preprocess_from_bert(test_df.boilerplate.values) test_dataset = TensorDataset(test_inputs, test_masks) test_sampler = SequentialSampler(test_dataset) test_dataloader = DataLoader(test_dataset, sampler=test_sampler, batch_size=32) probs = bert_predict(bert_classifier, test_dataloader) threshold = 0.6 preds = np.where(probs[:, 1] > threshold, 1, 0) print("Number of evergreen text:", preds.sum()) sampleSub['label'] = preds sns.countplot(x='label',data=sampleSub) plt.xlabel('Classes') plt.ylabel('Number of values predicted') plt.show() sampleSub.head(5) sampleSub.set_index('urlid',inplace=True) sampleSub.to_csv('finalSubmission.csv')To convert the date, we will split it into two columns. One will be a float64 in units of months (do we ignore the year?). The other will be a float64 in units of hours.def null_values(df, return_table=True): mis_val = df.isnull().sum() mis_val_percent = 100 * mis_val / len(df) mis_val_table = pd.concat([mis_val, mis_val_percent], axis=1) mis_val_table_ren_columns = mis_val_table.rename( columns = {0 : 'Missing Values', 1 : '% of Total Values'}) mis_val_table_ren_columns = mis_val_table_ren_columns[ mis_val_table_ren_columns.iloc[:, 1] != 0].sort_values( '% of Total Values', ascending=False).round(4) print("There are", len(df.columns), "columns in total.") print("There are " + str(mis_val_table_ren_columns.shape[0]) + " columns that have missing values.") if return_table: return mis_val_table_ren_columnsThe only nonmissing features are the ID, the date and time, and the last name.miss_values = null_values(arrests) pd.set_option('display.max_rows', None) miss_valuesThere are 12 columns in total. There are 9 columns that have missing values.To impute missing data in 'Sex', all nan values should be replaced with 'Unknown'.arrests['Sex'].unique()The two rows without first names are interesting. In the row with ID 2262, the last name is a placeholder ''.arrests.loc[arrests['FirstName'] != arrests['FirstName']]What are 'nwt', 'doc', and 'jpg' doing in the 'Race' feature?arrests.loc[arrests['Race'] == 'nwt'] arrests.loc[arrests['Race'] == 'doc']We see that there are multiple descriptions that correspond to the same statute. Therefore, we drop the StatuteDescription feature because it does not add anything to the data.If we keep the names, it is reasonable to perform feature engineering that assigns a unique number to a full name, including the suffix if applicable, since we see that one person can and often commits more than one offense. We will have to convert every name to uppercase, since the capitalization is inconsistent.There are 472 unique statutes. A OneHotEncoder will result in the number of features increasing to about 480.arrests['NameSuffix'].unique() arrests['StatuteDescription'].value_counts() arrests['Statute'].value_counts() arrests['Statute'].nunique()Data cleaning We drop the ArrestID and StatuteDescription because the former is useless and the latter is redundant with Statute. In fact, there are many descriptions that can potentially refer to the same statute. We also drop the names since there are too many unique names for an analysis to be useful with them kept.from sklearn.preprocessing import OneHotEncoder arrests = pd.read_csv('Arrests.csv') useless_and_redundant = ['ArrestID', 'StatuteDescription'] names = ['FirstName', 'LastName', 'MiddleName', 'NameSuffix'] arrests.drop(useless_and_redundant, axis=1, inplace=True) arrests.drop(names, axis=1, inplace=True) arrests['Year'] = pd.to_datetime(arrests['ArrestDatetime']).apply( lambda x: float(x.strftime('%Y')) if x == x else np.nan) arrests['Month'] = pd.to_datetime(arrests['ArrestDatetime']).apply( lambda x: float(x.strftime('%m')) if x == x else np.nan) arrests['Day'] = pd.to_datetime(arrests['ArrestDatetime']).apply( lambda x: float(x.strftime('%d')) if x == x else np.nan) arrests['Time'] = pd.to_datetime(arrests['ArrestDatetime']).apply( lambda x: float(x.strftime('%H')) + 1/60 * float(x.strftime('%M')) + 1/3600 * float(x.strftime('%S')) if x == x else np.nan) def convert_housenumber(x): if x == x: return str(int(x)) else: return '' arrests['HouseNumber'] = arrests['HouseNumber'].apply(convert_housenumber) arrests['Address'] = arrests['HouseNumber'] + ' ' + arrests['Street'] + ', CHARLOTTESVILLE, VA' arrests.drop(['ArrestDatetime', 'HouseNumber', 'Street'], axis=1, inplace=True)We will use OneHotEncoding for race and sex.onehot = OneHotEncoder() def convert_races(x): if x == 'Unknown' or x == 'nwt' or x == 'doc' or x == 'jpg' or x != x: return 'Unknown Race' else: return x def convert_sex(x): if x == 'Unknown' or x != x: return 'Unknown Sex' else: return x arrests['Race'] = arrests['Race'].apply(convert_races) arrests['Sex'] = arrests['Sex'].apply(convert_sex) cat_onehot = arrests[['Race', 'Sex']] mtx_onehot = onehot.fit_transform(cat_onehot).toarray() df_onehot = pd.DataFrame(mtx_onehot, columns=np.concatenate(onehot.categories_)) arrests.drop(['Race', 'Sex'], axis=1, inplace=True) arrests = pd.concat([arrests, df_onehot], axis=1) arrests.head() miss_values = null_values(arrests) pd.set_option('display.max_rows', None) miss_valuesThere are 14 columns in total. There are 2 columns that have missing values.We shall remove those entries with missing addresses and statutes.arrests.dropna(subset=['Address', 'Statute'], how='any', inplace=True) miss_values = null_values(arrests) pd.set_option('display.max_rows', None) miss_values arrests.head(10) from geopy.extra.rate_limiter import RateLimiter locator = geopy.Nominatim(user_agent="myGeocoder") geocode = RateLimiter(locator.geocode, min_delay_seconds=1) df = arrests.head(10) # location = locator.geocode(", Charlottesville, VA") df['Location'] = df['Address'].apply(geocode) df['Point'] = df['Location'].apply(lambda loc: tuple(loc.point) if loc else None) df[['Latitude', 'Longitude', 'Altitude']] = pd.DataFrame(df['Point'].tolist(), index=df.index) arrests arrests['Address']Watch Me Code 1: List Basicscolors = [] #empty list type(colors) dir(colors) help(colors.index) colors.append("orange") colors.append("blue") colors.append("green") colors.append("white") print(colors) print(len(colors)) colors.remove("green") print(colors) #lists are mutable colors.reverse() colors index = colors.index('blue') print(index) colors.index('brown') # ValueErrorInstalace potřebných knihoven%%capture pip_install !pip install git+https://github.com/honzas83/t5s tensorflow==2.3 tensorflow-text==2.3 --upgradeStažení modelu!gdown --id 1X1U7qXStJkLVsfZ7kGYuXUsrXJ6ODFom && unzip -u t5litik.zipDownloading... From: https://drive.google.com/uc?id=1X1U7qXStJkLVsfZ7kGYuXUsrXJ6ODFom To: /content/t5litik.zip 798MB [00:08, 90.7MB/s] Archive: t5litik.zip creating: t5litik/ inflating: t5litik.spm inflating: t5litik.yaml inflating: t5litik/config.json inflating: t5litik/tf_model.h5Importování knihovenimport t5s from IPython.core.display import display, HTML import reVytvoření instance třídy T5 z knihovny t5st5litik = t5s.T5("./t5litik.yaml") t5litik.load_model() def end_sentence(output): "Funkce pro odříznutí slov zbylých za poslední tečkou" return output.rsplit(".", 1)[0]+"." print("Po spuštění zadejte vstup do vstupního řádku, pro ukončení zadejte prázdný vstup.") while True: i = input("> ") if not i: break o = t5litik.predict([i]) o = end_sentence(o[0]) display(HTML(""+o+"")) print()Image size decreasing and Image smoothingAveraging of image pixels for decreasing its' size and using convolution for smoothing it and to better decreasing.import matplotlib.image as mpimg import matplotlib.pyplot as plt import numpy as np from math import floor, ceil # # Smoothing image for next better resizing it using different kernel types # def convolution(img: np.array, krl: np.array) -> np.array: new_img = np.zeros((img.shape)) for i in range(len(img)): for j in range(len(img)): new_img[i][j] = kernel(img, i, j, krl) return new_img # # highpass kernel is below # using this kernel it will be easy to show edges # 0 -1 0 # -1 4 -1 # 0 -1 0 # # averaging kernel is array of ones # 1 1 1 # 1 1 1 # 1 1 1 # # simple gaussian kernel # 1 2 1 # 2 4 2 # 1 2 1 # # # Kernel function will take any kernel # and return smoothed value for each pixel # def kernel(img: np.array, i: int, j: int, krl: np.array) -> np.array: radius = int((len(krl) - 1) / 2) average = np.zeros((3)) dividable = np.sum(krl) if np.sum(krl) > 0 else 1 for k in range((len(krl))): for l in range((len(krl))): if i + k - radius < 0 or j + l - radius < 0 or i + k - radius >= len(img) or j + l - radius >= len(img): average += np.ones((3)) else: average += img[i + k - radius][j + l - radius] * krl[k][l] return average / dividable # # Decreasing size of an image, there is also possibility to use bilinear interpolation for this # calculate the average for nearest pixels for each new pixel and that's it # # for getting better results recommends to use convolution before # you can see below that smoothed image will look better after decreasing its size # def size_decreasing(img: np.array, new_size: int) -> np.array: avg_img = np.zeros((new_size, new_size, 3)) if len(img) / new_size <= 1.9: print('This size is too big -> so it returns original image') return img steps = ceil(len(img) / new_size) for i in range(new_size): for j in range(new_size): s = 0 for k in range(i * steps, (i + 1) * steps): for l in range(j * steps, (j + 1) *steps): if k >= len(img) or l >= len(img): s += np.ones((3)) else: s += img[k][l] avg_img[i][j] = s / steps ** 2 return avg_img img = mpimg.imread('toucan.jpg') / 255 plt.figure(figsize=(8,8)) plt.imshow(img)Showing the decreased image without using convolutionplt.figure(figsize=(4, 4)) plt.imshow(size_decreasing(img, 44))Showing the smoothed image with using convolution with kernel of oneskrl = np.ones((7, 7)) #krl = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]]) #krl = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]]) #krl = np.array([[1, 2, 1], [2, 4, 2], [1, 2, 1]]) conv_img = convolution(img, krl) plt.figure(figsize=(8,8)) plt.imshow(conv_img)And decreasing the size of smoothed image, you can see that it looks much better than previous decreased imageplt.figure(figsize=(4, 4)) plt.imshow(size_decreasing(conv_img, 43))人工智能编程基础(试学班) 项目:我的微信好友在这个notebook文件中,有些模板代码已经提供给你,但你还需要实现更多的功能来完成这个项目。除非有明确要求,你无须修改任何已给出的代码。以**'(练习)'**开始的标题表示接下来的代码部分中有你需要实现的功能。这些部分都配有详细的指导,需要实现的部分也会在注释中以'TODO'标出。请仔细阅读所有的提示。除了实现代码外,你还**需要**回答一些与项目及代码相关的问题。每个需要回答的问题都会以 **'问题 X'** 标记。请仔细阅读每个问题,并且在问题后的 **'回答'** 部分写出完整的答案。我们将根据 你对问题的回答 和 撰写代码实现的功能 来对你提交的项目进行评分。>**提示:**Code 和 Markdown 区域可通过 **Shift + Enter** 快捷键运行。此外,Markdown可以通过双击进入编辑模式。--- 让我们开始吧在这个项目中,你将读取微信好友数据,并做一些有趣的统计和分析:P 项目内容我们将这个notebook分为不同的步骤,你将完成以下5个任务:**任务0 - 读取数据****任务1 - 统计微信好友的男女比例****任务2 - 分析微信好友的地域分布****任务3 - 生成微信好友的签名词云图****任务4 - 对签名进行情感分析**### 配置环境,安装项目所需package,此部分代码只需运行一次即可 ### 无需修改以下代码 !pip install -r requirements.txt !conda install -c conda-forge wordcloud --yCollecting msgpack (from -r requirements.txt (line 1)) Downloading https://files.pythonhosted.org/packages/22/4e/dcf124fd97e5f5611123d6ad9f40ffd6eb979d1efdc1049e28a795672fcd/msgpack-0.5.6-cp36-cp36m-manylinux1_x86_64.whl (315kB)  100% |████████████████████████████████| 317kB 1.5MB/s ta 0:00:01 [?25hCollecting pinyin (from -r requirements.txt (line 2)) Downloading https://files.pythonhosted.org/packages/32/95/d2969f1071b7bc0afff407d1d7b4b3f445e8e6b59df7921c9c09e35ee375/pinyin-0.4.0.tar.gz (3.6MB)  100% |████████████████████████████████| 3.6MB 150kB/s eta 0:00:01 21% |███████ | 798kB 7.7MB/s eta 0:00:01 [?25hCollecting SnowNLP (from -r requirements.txt (line 3)) Downloading https://files.pythonhosted.org/packages/3d/b3/37567686662100d3bce62d3b0f2adec18ab4b9ff2b61abd7a61c39343c1d/snownlp-0.12.3.tar.gz (37.6MB)  100% |████████████████████████████████| 37.6MB 16kB/s eta 0:00:01 6% |██▎ | 2.6MB 25.0MB/s eta 0:0[...]--- 任务0. 登陆并发送打招呼信息 导入项目所需包### 以下内容无需改动,直接运行即可 # 导入项目中所需要的包 import pandas as pd import re import os import numpy as np import pinyin import matplotlib.pyplot as plt %matplotlib inline print("所有库导入成功!")所有库导入成功!---**注意:在教室中,我们仅提供假数据来完成项目,如果您倾向于探索自己微信好友的数据,您可以前往此[链接](https://github.com/udacity/AIPND-cn-trial)下载文件,并在本地运行。**### 以下内容无需改动,直接运行即可 # 读取数据 dataset = pd.read_csv('wechat_friends.csv').fillna('').to_dict('records')--- --- 任务1. 好友男女比例 根据我们希望探索的问题,需要从数据集中取出以下几个部分:- NickName:微信昵称- Sex:性别,1表示男性,2表示女性- Province:省份- City:城市- Signature:微信签名 练习:打印数据* 打印`dataset`的数据类型* 打印`dataset`中的第一条数据,以及其数据类型。* 打印`dataset`中第一条数据的微信昵称### TODO:打印dataset的数据类型 print(type(dataset)) ### TODO:打印第一条数据及数据类型 print(dataset[0]) print(type(dataset[0])) ### TODO:打印第一条数据的微信昵称 print(dataset[0]['NickName'])Myself**问题2**dataset的数据类型是什么?dataset中第一条数据的数据类型呢?根据你的理解,这两种数据类型的区别是什么? 1.list列表;2.dict字典;3.列表有序,按元素存(元素也可以是其他数据类型),查找插入删除随着列表规模增加而增加 而字典是无序,按键值对(key:value)存,查找插入删除操作更快 练习:统计男女比例- 统计好友性别,分为男性、女性与未知三种,赋值到已经定义好的`sex`字典中。提示:- `dataset`中1表示男性、2表示女性;- *注意*:数据需要从第1条开始计算,因为**第0条是自己**。# TODO:统计好友性别 sex = { 'male': 0, 'female': 0, 'unknown': 0 } # 性别判断函数:sex_dete 形参:x 实参:Sex键对应的值 格式已修改 注意了缩进 def sex_dete(x): if x == 1.0: sex['male'] += 1 elif x == 2.0: sex['female'] += 1 else: sex['unknown'] += 1 # 遍历dataset来判断性别 for i in range(1,len(dataset)): sex_dete(dataset[i]['Sex']) ### 以下内容无需改动,直接运行即可 print("我的好友中共有", sex['male'],"位男性、", sex['female'], "位女性,有", sex['unknown'], "位好友未填写。") ### 以下内容无需改动,直接运行即可 plt.figure(figsize=(8,5), dpi=80) plt.axes(aspect=1) plt.pie([sex['male'], sex['female'], sex['unknown']], labels=['Male','Female','Unknown'], labeldistance = 1.1, autopct = '%3.1f%%', shadow = False, startangle = 90, pctdistance = 0.6 ) plt.legend(loc='upper left',) plt.title("My Wechat Friends' Sex Ratio") plt.show()这段代码的作用是绘制饼图,是不是很有趣?**饼状图**是一种常见的单变量图形,用于描绘分类变量级别的相对频率。饼图中的频率用圆形的扇区表示:角度或面积越大,该分类值就越常见。在「人工智能编程基础」这门课的第二章节中,将讲解如何使用`matplotlib`和`seaborn`绘制各种统计图表,进行数据可视化。--- 任务2. 好友地域分布 练习:统计好友省份使用`list`中`append()`方法将好友省份添加至`province`中,注意**要去除空的字符串**提示:可以用`for`循环实现*注意*:数据需要从第1条开始计算,因为**第0条是自己**。### TODO:将好友所在省份(不为空)添加到 province 中 # 这里根据上次审阅建议使用 列表推导式 生成province 没有使用append()方法插入 # 相对原本多行 这次仅一行 似乎更简洁?我感觉是不是有点太长了...? province = [dataset[i]['Province'] for i in range(1,len(dataset)) if dataset[i]['Province'] != ''] ### 以下内容无需修改,直接运行即可 province = [pinyin.get(i, format="strip", delimiter="") for i in province if i != ''] ### 以下内容无需修改,直接运行即可 province = pd.DataFrame(province) province.columns = ['Province'] province['Number of Friends'] = 1 province.groupby('Province').sum().sort_values('Number of Friends', ascending=False)[:10].plot.bar()**条形图**用于描述分类变量的分布情况。在条形图中,分类变量的每个级别用长条表示,高度表示数据在该级别的出现频率。 --- 任务3. 生成好友个性签名词云图在这里我们希望生成词云,只需要调用第三方库即可,Python有大量的库可以使用,能极大提高开发效率,是编程入门的绝佳选择。 练习:打印个性签名* 使用`print()`语句打印出最长的3个个性签名### TODO:打印最长的3个个性签名 signatures = [dataset[i]['Signature'] for i in range(len(dataset))] """ 原先助教建议我们使用 deepcopy 来防止修改数据列表时候 造成原始数据的也被修改 普通的赋值操作 在python里感觉类似曾经学C++时候老师说的 引用 操作 两个变量本质上指向的是同一个东西 当我们修改其中某个变量时候 另一个也会发生相应改变 而 deepcopy 解决这个问题 相当于做了一个“隔离”措施 复制出一个 数据一样 但是 独立互不影响 不知道这样理解是否正确!若有问题希望指出!感谢! """ """ 使用sorted方法(会建立一个新的list 不在原有list上操作 无需deepcopy了)按字符串长度排序 这里使用len(i)作为排序的关键字 reverse为True 非正序 即降序排列 lambda是一个匿名函数(懒得新建函数时候可用) lambda i:len(i) 即输入i 返回i的长 i是形参 参考了 https://blog.csdn.net/moxiaobeimm/article/details/80702496 和 https://www.cnblogs.com/hf8051/p/8085424.html """ len_descending_signatures = sorted(signatures,key = lambda i:len(i),reverse = True) for i in range(3): print("第" , i+1 , "长的签名是:" , len_descending_signatures[i]) ### 以下内容无需修改,直接运行即可 from wordcloud import WordCloud import jieba tList = [] for i in dataset: signature = i["Signature"].replace(" ", "").replace("span", "").replace("class", "").replace("emoji", "") rep = re.compile("1f\d.+") signature = rep.sub("", signature) if len(signature) > 0: tList.append(signature) text = "".join(tList) wordlist_jieba = jieba.cut(text, cut_all=True) wl_space_split = " ".join(wordlist_jieba) ### 以下内容无需修改,直接运行即可 import PIL.Image as Image alice_coloring = np.array(Image.open("wechat.jpg")) my_wordcloud = WordCloud(background_color="white", max_words=2000, mask=alice_coloring, max_font_size=40, random_state=42, font_path='./SimHei.ttf').generate(wl_space_split) plt.imshow(my_wordcloud) plt.axis("off") plt.show() my_wordcloud.to_file(os.path.join("wechatfriends_wordcloud.png"))--- 任务4. 对好友签名进行情感分析在这部分内容中,我们调用了[SnowNLP](https://github.com/isnowfy/snownlp)的情感分析,它是一个python写的类库,可以方便的处理中文文本内容,不用我们实现其中具体的代码。一般来说,情感分析的目的是为了找出作者观点的态度,是正向还是负向,或者更具体的,我们希望知道他的情绪。在这里,我们希望了解到好友签名所表达出来的情感是积极的,还是中立、负面的,比如说在以下例子中,我们对"这个商品我非常喜欢,颜色很合我意!"这句话进行了预处理,并通过训练好的模型预测其的情感。在这里,我们简单地假设大于0.66表示积极,低于0.33表示消极,其他表示中立。运行以下代码试试看!### 以下内容无需修改,直接运行即可 from snownlp import SnowNLP text = "这个商品我非常喜欢,颜色很合我意!" sentiment = SnowNLP(text).sentiments print(sentiment)0.6851400314498396接下来,我们将好友的签名进行文本预处理,并尝试统计其中积极、中立、消极的个数。 练习:统计好友签名情感分析结果比例* 统计sentiments中**大于0.66**的个数* 统计sentiments中**大于等于0.33且小于等于0.66**的个数* 统计sentiments中**小于0.33**的个数提示:可以用循环语句或者列表表达式实现。### 以下内容无需修改,直接运行即可 sentiments = [] for i in tList: sentiments.append(SnowNLP(i).sentiments) ### TODO:统计sentiments中大于0.66的个数 positive = None positive = len([i for i in sentiments if i > 0.66]) # 列表推导式 遍历后变成列表然后算长 print("情感积极有:",positive) ### TODO:统计sentiments中大于等于0.33且小于等于0.66的个数 neutral = None neutral = len([i for i in sentiments if i <= 0.66 and i >= 0.33]) print("情感中立有:",neutral) ### TODO:统计sentiments中小于0.33的个数 negative = None negative = len([i for i in sentiments if i < 0.33]) print("情感消极有:",negative) ### 以下内容无需修改,直接运行即可 labels = [u'Negative',u'Neutral',u'Positive'] values = (negative,neutral,positive) plt.xlabel(u'Sentiment Analysis') plt.ylabel(u'Number') plt.xticks(range(3),labels) plt.bar(range(3), values) plt.title('Sentiment Analysis of Friends signature') plt.show()Read IRSM FORMirsmform, irsmout = [xml_parser.get_files(regex, 'monocurve') for regex in ['irsmform', 'irsmout']] debug_df = pd.DataFrame() main_curve, sprds = xml_parser.get_rate_curves(irsmform) dsc_curve = main_curve try: estim_curve = sprds[0] except TypeError: estim_curve = main_curve cal_basket = list(xml_parser.get_calib_basket(irsmform)) mr = xml_parser.get_hw_params(irsmform).meanRREAD IRSM OUTref_swos = list(xml_parser.get_calib_basket(irsmout)) ref_mr = mr ref_sigmas = xml_parser.get_hw_params(irsmout).hw_volatilityHernard pricer with its ref sigmacalib_premiumsH = [] for swo in cal_basket: henr_price, debug = henr.hw_swo(swo, ref_mr, ref_sigmas, dsc_curve, estim_curve) debug_df = pd.concat([debug_df, pd.DataFrame(data=debug)], sort=False) calib_premiumsH.append(henr_price) calib_premiumsH cal_basketHW Calibrationsigma_hw_henr, debug_H = hw_calib.calibrate_sigma_hw(cal_basket, ref_mr, dsc_curve, estim_curve, IsJamsh=False) sigma_hw_henr df = pd.DataFrame(debug_H) df df.mkt_var df.tvar_schrager df.var_hw from collections import namedtuple a = namedtuple('calib_debug', 'expiries sigma_hw schrager var_hw black_price') len('expiries sigma_hw schrager var_hw black_price'.split()) sigma_hw_henr.values - ref_sigmas.values fig = plt.figure() plt.figure(figsize=(15,5)) # plt.step(sigma_hw_jamsh.buckets, sigma_hw_jamsh.values, label = 'Jamshidian', marker='o', where='post') plt.step(sigma_hw_henr.buckets[:-1], sigma_hw_henr.values[:-1], label = 'Henrard', marker='o', where='pre') plt.step(ref_sigmas.buckets[:-1], ref_sigmas.values[:-1], label = 'ref log', marker='o', where='pre') # plt.ylim(0) plt.legend(loc='lower right') plt.title('calibrated Hull White piecewise constant volatility') plt.xlabel('expiries') plt.ylabel('volatility') pricesH = [] for swo in cal_basket: henr_price, debug = henr.hw_swo(swo, ref_mr, sigma_hw_henr, dsc_curve, estim_curve) pricesH.append(henr_price) pricesJ = [] for swo in cal_basket: jamsh_price, debug = jamsh.hw_swo(swo, ref_mr, sigma_hw_jamsh, dsc_curve, estim_curve) pricesJ.append(jamsh_price) fig = plt.figure() plt.figure(figsize=(20,10)) ref_calib_prices = [ref_swo.cal_premium for ref_swo in ref_swos] black_prices = pd.DataFrame(data=debug_H).black_price prices_dict = { 'Jamshidian (calibrated sigma)': array(pricesJ), 'Henrard (calibrated sigma)':array(pricesH), 'Henrard()': array(calib_premiumsH), '': array(ref_calib_prices), # 'Black': array(black_prices) } for lbl, prices in prices_dict.items(): plt.plot([swo.expiry for swo in cal_basket], prices - black_prices, label = lbl, linestyle=choice([':', '--', '-.']), marker = 'o') plt.title('Black difference ') plt.legend(loc='lower right') array(ref_calib_prices) - array(pricesH)ニューラルネットワークによる機械学習をあえて用いた線形回帰ニューラルネットワークを用いた教師あり学習・回帰問題について,機械学習フレームワークPyTorchを用いた,極端に簡単な例を示す.文章も含め,以下のページに記載のプログラムを流用させて頂いた上で,さらに簡単にしたものである.PyTroch で実装したニューラルネットワークで簡単な回帰問題を解く --- 回帰(1 特徴量)#@title 一般的ライブラリ import numpy as np np.random.seed(1) import matplotlib.pyplot as plt plt.style.use('dark_background') # Dark-mode from tqdm import tqdm # Progress bar #@title 機械学習フレームワーク PiTorch import torch import torch.nn.functional import torch.utils.data torch.manual_seed(1)1. 教師データ生成$x$ は $(0,10)$の間の一様分布$ y = 2x + 3 + \epsilon ,\ \epsilon \sim \mathcal{N}(0,1)$$ y= 2x+3$に標準正規分布に従う雑音項を加えた$ (x_i,y_i) $ を生成#@title 教師データ生成 x = np.random.uniform(low=0, high=10, size=100) y = 2 * x + 3 + np.random.randn(100) #@title 図描画 fig = plt.figure() ax = fig.add_subplot() ax.set_xlabel('x') ax.set_ylabel('y') ax.scatter(x, y, alpha=0.6) fig.show()機械学習では以下のように呼ぶ $(x_i)$: Inputs $(y_i)$: Labels, outputs $f : x \mapsto y $: Model $(x_i, y_i)$: Labeled data,教師データ 2. 回帰問題教師データ $(x_i, y_i)$ による,モデル $f$ の訓練を行う.ニューラルネットワークによる回帰とは,モデル $f$ としてニューラルネットワークで表される関数を用い,ニューラルネットワークのパラメータを調整するということである. 2.1 PyTorchによるニューラルネットワークの定義class Net(torch.nn.Module): def __init__(self): super(Net, self).__init__() self.fc1 = torch.nn.Linear(1, 1) def forward(self, x): x = self.fc1(x) return x f = Net()`torch.nn.Linear(1, 1)` は, 入力と出力の次元が両方 1 のニューラルネットワークのレイヤを表す.このレイヤは,後述の活性化関数を $\phi$ として次式を表す.$y=\phi(wx+b)$`x = self.fc1(x)` は活性化関数が $\phi(z)=z$ であることを表す. したがってこのレイヤは次式を表す.$y=wx+b$このモデル $f$,すなわちニューラルネットワークを訓練することは, $w$ と $b$ の推定値を定めることに他ならないなお,このニューラルネットワークは,入力層,出力層に加えて,1層の隠れ層を持つ,単純パーセプトロンと呼ばれるものである.隠れ層の数や,隠れ層のニューロンの数を増やせば(ディープニューラルネットワーク),モデル,すなわち関数 $f$ の表現力が高まる.これを使うのが深層学習である. 3. 教師データによる,ニューラルネットワークの訓練 OptimizerはStochastic gradient descent,損失関数を MSE とする#@title PiTorchを使うための準備 x_tensor = torch.from_numpy(x.reshape(-1, 1)).float() y_tensor = torch.from_numpy(y.reshape(-1, 1)).float() epoch_loss = []OptimizerはStochastic gradient descent,損失関数を MSE とする#@title Optimizer, 損失関数の設定 optimizer = torch.optim.SGD(f.parameters(), lr=0.01) criterion = torch.nn.MSELoss() #@title 訓練 f.train() num_epochs = 1000 for epoch in tqdm(range(num_epochs)): loss = criterion(f(x_tensor), y_tensor) optimizer.zero_grad() loss.backward() optimizer.step() epoch_loss.append(loss.data.numpy().tolist())100%|██████████| 1000/1000 [00:00<00:00, 2153.51it/s]訓練時における損失 `epoch_loss` は、学習の収束状況を表す.#@title 図描画 fig = plt.figure() ax = fig.add_subplot() ax.set_yscale('log') ax.set_xlabel('#epoch') ax.set_ylabel('loss') ax.plot(list(range(len(epoch_loss))), epoch_loss, linewidth=2) fig.show()4. ニューラルネットワークによる推定ニューラルネットワークによる回帰曲線を重ねて描く#@title 推定 f.eval() # テスト入力 # [0,10]の中の等間隔な100点 x_new = np.linspace(0, 10, 100) x_new_tensor = torch.from_numpy(x_new.reshape(-1, 1)).float() # テスト入力に対する推定値 with torch.no_grad(): y_pred_tensor = f(x_new_tensor) y_pred = y_pred_tensor.data.numpy() #@title 図描画 fig = plt.figure() ax = fig.add_subplot() ax.scatter(x, y, alpha=0.6) ax.plot(x_new, y_pred, c='tab:orange', linewidth=3) fig.show()パラメータを表示 $ y = w x + b $ の $w$.2が正解print(f.fc1.weight)Parameter containing: tensor([[1.9711]], requires_grad=True)$ y = w x + b $ の $b$.3が正解print(f.fc1.bias) # パラメータ閲覧 # https://qiita.com/mathlive/items/d9f31f8538e20a102e14 # print(net) # for param in net.parameters(): # print(param)Mini Project Three: Autonomous Vehicle Driving with CNN![title](Images\car-running.gif)from IPython.display import HTML from IPython.display import Image HTML(''' The raw code for this IPython notebook is by default hidden for easier reading. To toggle on/off the raw code, click here.''')Table of Contents - 1. [Introduction](Introduction) - 1.1 [Abstract](abstract) - 1.2 [Importing Libraries](importing_libraries) - 1.3 [Setting up the Environment](Environment)- 2.0 [Creation of the Dataset](Creation_Dataset)- 3.0 [Training](Training)- 4.0 [Driving](Driving) - 5. [Conclusion](Conclusion)- 6. [Contribution](Contribution)- 7. [Citation](Citation)- 8. [License](License) 1.0 Introduction 1.1 Abstract Welcome to Imitation Learning.[Back to top](Introduction) 1.2 Importing Libraries This is the official start to any Data Science or Machine Learning Project. A Python library is a reusable chunk of code that you may want to include in your programs/ projects. In this step we import a few libraries that are required in our program. Some major libraries that are used are Numpy, Gym, Torch, PIL, Pyglet etc.[Back to top](Introduction)# Using Numpy for working with arrays import numpy as np # The gym library is a collection of test problems — environments import gym from gym import envs # Imageio for creating and saving the image import imageio # The OS module in python provides functions for interacting with the operating system. OS import os # The sys module provides functions used to manipulate different parts of the Python runtime environment. import sys # Pyglet is a library for developing visually rich GUI, Windows may appear as floating regions import pyglet from pyglet.window import key # We use copy module for shallow and deep copy operations. import copy # PyTorch is a Python package that provides two high-level features: #- Tensor computation (like NumPy) with strong GPU acceleration #- Deep neural networks built on a tape-based autograd system import torch from torch.autograd import Variable from torch import optim, nn from torch.nn import Softmax # Python Imaging Library for simple image processing such as resizing (scaling), rotation, and trimming (partial cutout) import PIL # Importing our Python scripts from model import CustomModel from data import transform_driving_image, LEFT, RIGHT, GO, ACTIONS, CustomDataset, get_dataloader # For making a table in results from astropy.table import Table, Column # To ignore warnings import warnings; warnings.simplefilter('ignore')1.3 Setting up the Environment Before we start with the setup of our environment, we need to install a few pakages which will make our game and neural network work. 1) Gym facilityInstall OpenAI Gym on the machineFollow the instructions at https://github.com/openai/gyminstallation for extensive and deep guide.**Summary of instructions:**- Install Python 3.5+- Clone the gym repo: git clone https://github.com/openai/gym.git- cd gym- Gym installation, with the box2d environments: pip install -e '.[box2d]'Follow the following steps to play the Car Racing Game- cd gym/envs/box2d- python car_racing.py 2) PytorchPytorch is the deep learning framework that we will be using. It makes it possible to build neural networks very simply.Follow the instructions on http://pytorch.org/ for a deep guide.**Summary of instructions:**- Install Python 3.5+- It is recommended to manage PyTorch with Anaconda. Please install Anaconda- Install PyTorch following instructions at https://pytorch.org/get-started/locally/![title](Images\Pytorch_Installation.png)For example this is the setup for my Computer> pip install torch==1.7.0+cpu torchvision==0.8.1+cpu torchaudio===0.7.0 -f https://download.pytorch.org/whl/torch_stable.html The EnvironmentFor this tutorial, we will use the gym library developed by OpenAI. It provides environments (simple games) to develop reinforcement learning algorithms.The environment we will be using is CarRacing-v0 ( https://gym.openai.com/envs/CarRacing-v0/ ). It is about driving a car on a circuit, the objective being to move forward while staying on the track, which contains many turns. The input to the algorithm (the state provided by the environment) is only the image displayed by the environment: we see the car, and the terrain around it.![title](Images\car-racing.png)The idea is to drive the car by analyzing this image.We are going to use this library in a roundabout way: It is designed for reinforcement learning. The objective is in principle to use the rewards (rewards) provided by the environment to learn the optimal strategy without user action. Here we will not be using these rewards.In addition, we will be doing end-to-end learning , which means that the neural network will directly give us the commands to navigate the car. This is not a road detection module, which will then be analyzed by another program (most true autonomous driving systems are made this way). Here, the neural network takes the field matrix as input, and issues a command to be executed (turn left, turn right, continue straight ahead), without any intermediate program.To use the environment, you need to import it like this:>import gym>env = gym.make('CarRacing-v0').envYou can then access several useful functions:- **env.reset() :** Allows you to restart the environment- **env.step(action) :** Allows you to perform the action `action`. This function returns a tuple `state`, `reward`, `done`, `info` containing the state of the game after the action, the reward obtained, doneindicates if the game is finished, and infocontains debug data.- **env.render() :** Displays the game window.Here, the state `state` that will be returned by env.step(action)is the image displayed on the screen (the pixel matrix). It is this data that we will use to steer our car.[Back to top](Introduction) 2.0 Creation of the dataset ![title](Images\Racing_start.gif)The car is controllable with the arrows of the keyboard.For the rest, we want to train a neural network which will take the game image as input, and as output, return the command to send (left, right, straight). We will focus first on controlling the direction . The speed control will still have to be done with the up and down keys.The first step in training our neural network is to create a dataset. It is about recording a set of images accompanied by their label . We will represent the possible actions with integers:- 0 to indicate to go left- 1 to indicate go right- 2 to indicate to go straightThus, we will save a set of 3000 images in a folder, accompanied by a file labels.txt indicating on each line label. We have 3 labels, so we save 1000 images of each label for the training set. For the testing set, we will save 600.We have created a train set, which will be used to train the network, and a test set, which will be used to evaluate its performance during training, to know when to interrupt it. Indeed, given the relatively low number of images that we use (3000), there is a risk of overfitting i.e. the network will lose in power of generalization to be better, in the special cases of the training set . This is a situation that we want to avoid, since we want to use our model subsequently in situations that it has not seen. The technique of stopping training before convergence is called early stopping. >As the epochs go by, the algorithm leans and its error on the training set naturally goes down, and so does its error on the validation set. However, after a while, the validation error stops decreasing and actually starts to go back up. This indicates that the model has started to overfit the training data. With Early Stopping, you just stop training as soon as the validation error reaches the minimum. We run this piece of code twice as we want to save different datasets for training and testing. [Back to top](Introduction)# %%capture # record_dataset.py samples_each_classes = 1000 def action_to_id(a): if all(a == [-1, 0, 0]): return LEFT elif all(a == [1, 0, 0]): return RIGHT else: return GO # is_pressed_esc = False if __name__=='__main__': quit=False if len(sys.argv) < 2: sys.exit("Usage : python record_dataset.py path") env = gym.make('CarRacing-v0').env envs.box2d.car_racing.WINDOW_H = 750 envs.box2d.car_racing.WINDOW_W = 1200 env.reset() folder = sys.argv[1] images = os.path.join(folder, "train_images") labels = os.path.join(folder, "train_labels.txt") os.makedirs(images, exist_ok=True) a = np.array([0.0, 0.0, 0.0]) def key_press(k, mod): global restart global quit if k == 65307: quit = True if k==key.LEFT: a[0] = -1.0 if k==key.RIGHT: a[0] = +1.0 if k==key.UP: a[1] = +1.0 if k==key.DOWN: a[2] = +0.8 # set 1.0 for wheels to block to zero rotation # if k==65307 : is_pressed_esc = True def key_release(k, mod): if k==key.LEFT and a[0]==-1.0: a[0] = 0 if k==key.RIGHT and a[0]==+1.0: a[0] = 0 if k==key.UP: a[1] = 0 if k==key.DOWN: a[2] = 0 env.viewer.window.on_key_press = key_press env.viewer.window.on_key_release = key_release env.reset() for i in range(100): env.step([0, 0, 0]) env.render() file_labels = open(labels, 'w') samples_saved = {a: 0 for a in ACTIONS} i = 0 # while not is_pressed_esc: while not quit : env.render(close = False) s, r, done, info = env.step(a) action_id = action_to_id(a) if samples_saved[action_id] < samples_each_classes: samples_saved[action_id] += 1 samples_each_classes imageio.imwrite(os.path.join(folder, 'train_images', 'img-%s.jpg' % i ), s) file_labels.write('%s %s\n' % ('img-%s.jpg' % i, action_id)) file_labels.flush() i += 1 print(samples_saved) # env.render() env.render(close=True)[2020-12-06 01:09:20,051] Making new env: CarRacing-v03.0 Training Model training with PyTorchPytorch is a python matrix computing and deep learning library. It consists on the one hand in an equivalent of numpy, but usable both on CPU and on GPU. And on the other hand, in a library which allows to calculate the gradient of each operation performed on the data, so as to apply the backpropagation algorithm, At the base of the training of neural networks Pytorch also has a set of modules to assemble, which makes it possible to create neural networks very simply.In pytorch, the basic object is the module. Each module is a function, or an assembly of pytorch functions, which takes as input Tensors (matrices containing data), and emerges another tensor. All the operations performed in this module will be recorded, because the operation graph is necessary for the backpropagation algorithm.![title](Images\Custom_Model.png)**The __init__ function :** here we define the network architecture. Our network is made up of two parts: self.convnet and self.classifier. The part convnet is the convolutional part: it is this part, which is responsible for analyzing the image, and recognizing the shapes. It is made up of two layers of convolution (pattern recognition), followed by a non-linearity (ReLU), and a pooling layer (which makes the output invariant to translations).The second part is the 'classifier', it takes the output of the convolution network, and comes out a size vector num_classes = 3 which represents the score of each action to be performed.The call nn.Sequential creates the layers in succession. The input will pass successively through all these layers, the input of one layer being the output of the previous one.**The forward function :** This function will be called by pytorch when our module is called. We notice the passage from a 2D input to a 1D input between the two parts convnet and classify thanks to the function input = input.view(input.size(0), -1)(the first dimension being the number of images in a batch). It's a shortcut for input = input.view(input.size(0), input.size(1) * input.size(2) * input.size(3)The entry will indeed have 4 dimensions: the first for the batch, the next 2 for the x and y dimensions of the image, and the last for the number of channels of the image: It will be 3 for the 3 colors to entering the network, then each convolution will create new channels while reducing the x and y size. Thus, as the layers progress, the 1st dimension will remain fixed (the number of images in the batch), but the following two will decrease, and the 3rd (channels) will increase. Data PreparationWe are going to create a class Dataset, which will be used by pytorch to load our dataset into memory thanks to its class DataLoder.First of all, we will define the transformations that will be used to preprocess the images, in order to give them as input to the neural network.`from torchvision import transformstransform_driving_image = transforms.Compose([ transforms.CenterCrop(72), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),])`This transformation performs the following actions:**Crop the image :** transforms.CenterCrop(72)To keep only a square of size 72 pixels, centered in the same way as the image. Indeed, the image that we get of the environment is like this: ![title](Images\Crop_Image.png)We can see that the screen shows an indication bar on the speed and the direction and acceleration controls. If we do not hide it, CNN may learn to associate the commands we give it, with these indications (this is indeed the best indicator to deduce the command to be made from the screen).After cropping, the resulting image is below. CNN will be forced to analyze the road and the position of the car in order to perform its analysis![title](Images\Crop_Image_2.png)We notice that the images provided to CNN are of much lower quality than those displayed by the environment during the game. They are indeed only 96 pixels apart. This will be enough for the neural network to analyze the shapes, and will make the training much faster (because much less neurons will be needed).**Transform the matrix into Tensor pytorch** transforms.ToTensor()The tensor is the base object in pytorch to store data. It is the analogue to a numpy matrix, except that it can be stored on CPU, or on GPU. We need to transform our image into a pytorch tensor before giving it to the neural network as input.We could also use the function tensor.from_numpy(numpy_array)to transform a numpy array into Tensor .**Standardization:** transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))The images provided by PIL have data between 0 and 1. Here we subtract 0.5, and divide by 0.5 in order to have data between -1 and 1, which is more efficient for training a network. neuron (data centered in 0 and variance close to 1).**The __len__ function** should return the length of the dataset. Here is the total number of images.**The __getitem __ (self, index) function** should return the index object index. Here, we load the image corresponding to this index, we apply the transformations to it, then we return the matrix as well as the labels (in the form of Tensor).**Directions** We have encoded the directions in three variables LEFT, RIGHTand GO, which will be used in the different modules. Code for training the neural networkThis code is taken from the tutorial http://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html .The general idea is as follows:At each epoch, we train on the entire train dataset, then we evaluate on the training dataset. The data is loaded using a DataLoader , provided by pytorch (we give it as argument the object Datasetwe created previously).Some important steps:**Wrapping** them `Tensors` in `Variables`: In Pytorch, it is necessary to do this step `data = Variable(tensor)`, because it is the object `Variable` that will keep in memory the gradient of this variable according to the final loss. A variable is in fact a combination of two tensors, that of the data and that of the gradients.**Backpropagation**To perform the backpropagation in pytorch, the following steps are necessary:- **optimizer.zero_grad() :** at each iteration of the loop. This resets the gradients of each parameter to zero.- **loss.backward() :** this will calculate the gradients for each variable by backpropagation according to the loss, and store them in the Variable object- **optimizer.step() :** Modifies each parameter of our model (network weight) in order to minimize the loss.We have explained the intricate workings of CNN in the following notebook:[CNN](./INFO7390_Assignment_3_Mini_Project_Basics_of_Convolutional_Neural_Network.ipynb)[Back to top](Introduction)# %%capture # train.py def train(model, criterion, train_loader, test_loader, max_epochs=50, learning_rate=0.001): dataloaders = { "train":train_loader, "val": test_loader } optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9) best_acc = 0 for epoch in range(max_epochs): print('Epoch {}/{}'.format(epoch, max_epochs - 1)) print('-' * 10) # Each epoch has a training and validation phase for phase in ['val', 'train']: if phase == 'train': model.train(True) # Set model to training mode else: model.train(False) # Set model to evaluate mode running_loss = 0.0 running_corrects = 0 for data in dataloaders[phase]: # get the inputs inputs, labels = data labels = labels.view(labels.size(0)) inputs, labels = Variable(inputs), Variable(labels) optimizer.zero_grad() outputs = model(inputs) _, preds = torch.max(outputs.data, 1) loss = criterion(outputs, labels) # backward + optimize only if in training phase if phase == 'train': loss.backward() optimizer.step() # statistics running_loss += loss.data * inputs.size(0) running_corrects += torch.sum(preds == labels.data) epoch_loss = running_loss / len(dataloaders[phase]) epoch_acc = running_corrects / len(dataloaders[phase]) print('{} Loss: {:.4f} Acc: {:.4f}'.format( phase, epoch_loss, epoch_acc)) # deep copy the model if phase == 'val' and epoch_acc > best_acc: best_acc = epoch_acc best_model_wts = copy.deepcopy(model.state_dict()) torch.save(best_model_wts, "models2/model-%s.weights" % epoch) print('Training complete') print('Best val Acc: {:4f}'.format(best_acc)) return best_model_wts if __name__=='__main__': num_classes = 3 model = CustomModel() train_path = "train_images" test_path = "test_images" train_loader = get_dataloader(train_path, batch_size=8) test_loader = get_dataloader(test_path, batch_size=30) loss = nn.CrossEntropyLoss() x=train(model, loss, train_loader, test_loader)Epoch 0/49 ---------- val Loss: 33.0593 Acc: 10.0000 train Loss: 7.5771 Acc: 4.5227 Epoch 1/49 ---------- val Loss: 14.7585 Acc: 25.7000 train Loss: 4.4853 Acc: 6.1653 Epoch 2/49 ---------- val Loss: 13.9112 Acc: 22.0500 train Loss: 4.1538 Acc: 6.3280 Epoch 3/49 ---------- val Loss: 11.4187 Acc: 26.7500 train Loss: 3.9667 Acc: 6.3493 Epoch 4/49 ---------- val Loss: 10.9690 Acc: 26.1000 train Loss: 3.9197 Acc: 6.4187 Epoch 5/49 ---------- val Loss: 11.9713 Acc: 25.9000 train Loss: 3.8055 Acc: 6.4587 Epoch 6/49 ---------- val Loss: 10.2514 Acc: 27.0000 train Loss: 3.7262 Acc: 6.5120 Epoch 7/49 ---------- val Loss: 10.5059 Acc: 26.8000 train Loss: 3.6343 Acc: 6.5333 Epoch 8/49 ---------- val Loss: 10.9338 Acc: 26.8500 train Loss: 3.5882 Acc: 6.6320 Epoch 9/49 ---------- val Loss: 10.3310 Acc: 26.6500 train Loss: 3.4162 Acc: 6.6027 Epoch 10/49 ---------- val Loss: 9.8062 Acc: 26.5000 train Loss: 3.3022 Acc: 6.6640 Epoch 11/49 ---------- val Loss: 9.2858 Acc: 26.7500 train Loss: 3.2339 Acc:[...]4.0 Driving Driving the car using our modelWe now have our model being trained. We will now use it to automate the steering of the car.Let's take a closer look at what happens in the loop:`s, r, done, info = env.step(a)s = s.copy() s = PIL.Image.fromarray(s) `We get the pixel matrix, and we read it using PIL (so that it is in the same format as the images read by the dataloader during the training)`input = transform_driving_image(s)`We apply the same transformations as in the dataset (cropping of the sides of the image, transformation into Tensor, and normalization between -1 and 1.)`input = Variable(input[None, :], volatile=True)`We convert it `Tensor` to `Variable` to give it as input to the neuron network. The argument `volatile=True` saves memory, by telling the network not to save the operations performed (useful when you don't want to train the model with these examples).`output = Softmax()(model(input))_, index = output.max(1) index is a tensorindex = index.data[0] get the integer inside the tensor`We give the image to the network, we get the output. It is a tensor of size 3, each entry corresponds to the score of each action (left, right or straight). The action to choose will be the one with the highest score (we pass it in a Softmax to have an output between 0 and 1). We get the action with the function `max` which returns the max value, and its index.`a[0] = id_to_steer[index] * output.data[0, index] * 0.3 lateral accelerationenv.render()``a[0]` is lateral acceleration. It is given the value 0, 1 or -1 depending on the action chosen by the neuron network. We multiply this action by a coefficient of 0.3 to avoid too abrupt actions, and also by the probability of the action given by the network (this makes it possible to have more important actions if the network is sure of its action, and less important when the network hesitates).**After launching**, you have to control the speed of the car with the up and down keys of the keyboard. The direction will be chosen by the neural network.![title](Images\car-running-auto.gif)[Back to top](Introduction)# %%capture # drive.py id_to_steer = { LEFT: -1, RIGHT: 1, GO: 0, } if __name__=='__main__': quit=False if len(sys.argv) < 2: sys.exit("Usage : python drive.py path/to/weights") # load the model #model_weights = "models2/model-1.weights" #model_weights = sys.argv[1] model = CustomModel() model.load_state_dict(x) env = gym.make('CarRacing-v0').env env.reset() a = np.array([0.0, 0.0, 0.0]) def key_press(k, mod): global restart global quit if k == 65307: quit = True if k==key.LEFT: a[0] = -1.0 if k==key.RIGHT: a[0] = +1.0 if k==key.UP: a[1] = +1.0 if k==key.DOWN: a[2] = +0.8 # set 1.0 for wheels to block to zero rotation def key_release(k, mod): if k==key.LEFT and a[0]==-1.0: a[0] = 0 if k==key.RIGHT and a[0]==+1.0: a[0] = 0 if k==key.UP: a[1] = 0 if k==key.DOWN: a[2] = 0 env.viewer.window.on_key_press = key_press env.viewer.window.on_key_release = key_release env.reset() # initialisation for i in range(50): env.step([0, 0, 0]) env.render() i = 0 while not quit : env.render(close = False) s, r, done, info = env.step(a) s = s.copy() # We transform our numpy array to PIL image # because our transformation takes an image as input s = PIL.Image.fromarray(s) input = transform_driving_image(s) input = Variable(input[None, :], volatile=True) output = Softmax()(model(input)) _, index = output.max(1) index = index.data[0].item() print(id_to_steer[index]) a[0] = id_to_steer[index] * output.data[0, index] * 0.3 # lateral acceleration # env.render() env.render(close=True) # env.close()[2020-12-12 16:05:04,564] Making new env: CarRacing-v0LIGHT GBM RESGESYON!pip install lightgbm # Yazılım dünyasında yeni çıkan herşey yüzde 90 daha iyi değildir. Yeni çıkan şeylerinde bir eskimesini beklemek gerekir. # Yukarıdaki kurulumda hata olursa - conda için # conda install -c conda-forge lightgbm import numpy as np import pandas as pd from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.metrics import mean_squared_error, r2_score import matplotlib.pyplot as plt from sklearn import model_selection df = pd.read_csv("verisetleri\Hitters.csv") df = df.dropna() dms = pd.get_dummies(df[['League', 'Division', 'NewLeague']]) y = df["Salary"] X_ = df.drop(['Salary', 'League', 'Division', 'NewLeague'], axis=1).astype('float64') X = pd.concat([X_, dms[['League_N', 'Division_W', 'NewLeague_N']]], axis=1) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42) import lightgbm from lightgbm import LGBMRegressor lgb_model = LGBMRegressor().fit(X_train, y_train) lgb_model.get_params() y_pred = lgb_model.predict(X_test) np.sqrt(mean_squared_error(y_test, y_pred))MODEL TUNNINGlgb_model = LGBMRegressor() lgb_model.get_params() # PARAMETRELER NASIL SEÇİLİR # Öntanımlı parametrelere bakarak, buradaki değerin etrafındaki değerler denenebilir. lgbm_params = {"learning_rate" : [0.01, 0.1, 0.5, 1], "n_estimators" : [20, 40, 100, 1000], "max_depth" : [1, 2, 3, 4, 5, 6, 7 ,8 ,9, 10] } lgbm_cv_model = GridSearchCV(lgb_model, lgbm_params, cv=10, n_jobs=-1, verbose=2).fit(X_train, y_train) lgbm_cv_model.best_params_ lgbm_tuned = LGBMRegressor(learning_rate=0.1, max_depth=6, n_estimators=20).fit(X_train, y_train) y_pred = lgbm_tuned.predict(X_test) np.sqrt(mean_squared_error(y_test, y_pred))A number of common questions come up about basic numbers reporting for the final list. This notebook explores some ways that we can take our intermediate SGCN summary with the results of taxonomic authority consultation and answer those questions. Pandas grouping is particularly useful in this context.import pandas as pd sgcn_summary = pd.read_csv('sgcn_taxonomy_check.csv', low_memory=False)Based on the taxonomic lookup process, we end up with final identified taxa at various levels of the taxonomic hierarchy. We record that detail in a taxonomic_rank property retrieved from the matching document in ITIS or WoRMS. In many cases, we want to report only on taxa identified at the species level, which we do in subsequent steps, but we should look at the distribution of the data across ranks first.for rank, group in sgcn_summary.groupby("taxonomic_rank"): print(rank, len(group))Class 4 Family 199 Form 1 Genus 312 Order 31 Phylum 4 Species 15560 Subclass 5 Subfamily 5 Suborder 3 Subspecies 1525 Variety 526We may also want to limit our exploration to just those species that are included in the latest reporting period, 2015. This codeblock sets up a new dataframe filtered to only species reported in 2015.matched_species = sgcn_summary.loc[(sgcn_summary["taxonomic_rank"] == "Species") & (sgcn_summary["2015"].notnull())] print(len(matched_species))12202Now we can look at the distribution of species that were successfully aligned with taxonomic authorities (aka the National List) by the high level taxonomic group assigned based on the mapping of logical groups to higher level taxonomy.for tax_group, group in matched_species.groupby("taxonomic_group"): print(tax_group, len(group))Amphibians 289 Birds 772 Fish 1195 Mammals 414 Mollusks 1447 Other 19 Other Invertebrates 3932 Plants 3812 Reptiles 322We might also want to look further at what happened in the taxonomic matching process. We generated a field in the processing metadata that captures the overall method used in matching a submitted name string to a taxon identifier.* Exact Match - means that the submitted name was found to match exactly one valid ("accepted" in the case of ITIS plants) taxon* Fuzzy Match - means that the original submitted name had a misspelling of some kind but that we were able to find it with a fuzzy search* Followed Accepted TSN or Followed Valid AphiaID - means that the original submitted name string found a match to a taxon that is no longer considered valid and our process followed the taxonomic reference to retrieve a valid taxon for use* Found multiple matches - means that our search on submitted name string found multiple matches for the name (often homynyms) but that only a single valid taxon was available to give us an acceptable matchfor match_method, group in matched_species.groupby("match_method"): print(match_method, len(group))Exact Match 10823 Followed Accepted TSN 855 Followed Valid AphiaID 74 Found multiple matches 59 Fuzzy Match 391If we really want to dig into the details, we can pull just the details for those cases where the submitted name string does not match the final valid scientific name we matched to in the taxonomic authority. This codeblock outputs a subset dataframe with just the pertinent details.matched_species.loc[matched_species["lookup_name"] != matched_species["valid_scientific_name"]][["lookup_name","valid_scientific_name","match_method"]]OverviewThis is an auxiliary notebook for the [main analyses](Main.ipynb), meant for documenting the data access and cleaning processes. Both streams of data (prisoner counts and overall populations, respectively) require additional steps before they are ready for analysis. Some of these steps can be automated in a Python environment; certain minor adjustments must be done manually, and are also explained here.*__Note__*: The data access steps here are documented for transparency; it is not necessary to do anything other than use in-repository, cleaned files to replicate the main analyses. Table of ContentsI. [Prison Populations](data1)        [Description](d1)        [Access](a1)         [Cleaning](a2)III. [Overall Populations](data3)        [Description](d3)        [Access](a5)         [Cleaning](a6) Prison Populations Description Recall that the [Communicating With Prisoners](https://www.acrosswalls.org) project compiled counts of [Prisoners By State And Sex](https://www.acrosswalls.org/datasets/prisoners-us-state-sex-panel/) from 1880 to 2010. This is a count from federal and state correctional facilities, itself coming from the [US Bureau of Justice Statistics Prisoner Series](https://www.bjs.gov/index.cfm?ty=pbse&tid=0&dcid=0&sid=40&iid=0&sortby=&page=paging&curpg=4). Access To completely automate the data cleaning process (without using the Google Sheets API for Python) would require several steps. The URL of the public sheet hosted by the project would have to include the right tab information, range information (to exclude the extra mis-formatted parts of the CSV), and a specifier to return the result as a CSV file. HTML parsers would be needed to scrape together a CSV from the raw response of a GET request. The Google API involves OAuth setup, the Google Console, and several copy/paste operations. It is simpler to document the download of the file and additional steps (programmed or manual) to ready the data. Inclusion of the raw and clean files means future users need not perform data processing unless interested.1. We start by locating the Google Sheet hosted by the project, and visiting the relevant [panel tab](https://docs.google.com/spreadsheets/d/1V4_0T_lJPVBhKuMirhXncjWMI3C6crHgcD84qv1Y2JY/editgid=1100768042).![img1](./figures/data1_snippet1.png) 2. It suffices to download this sheet only, as a CSV file, as highlighted in the picture.![img2](./figures/data1_snippet2.png) Cleaning The result is the file store in [data/raw/prisoner_counts.csv](./data/raw). From here, we can automate data processing steps. Run all these steps in order, exactly once (it is best to restart from __*1*__ if errors are made).1. We load the file into a `pandas.DataFrame` object, taking care to handle the ill-formatted raw CSV file.import pandas as pd df_pp_raw = pd.read_csv('./data/raw/prisoner_counts.csv', skiprows=3) df_pp_raw.head()2. Drop the right-most non-tabular columns, and the unneeded ratio and aglev columns.df_pp_raw = df_pp_raw.drop(['Unnamed: 8', 'source and notes', 'sexratio', 'aglev'], axis=1) df_pp_raw.head()3. Regardless of other filters we apply, we need a column totalling male and female prisoner counts.df_pp_raw['total'] = df_pp_raw.males + df_pp_raw.females df_pp_raw.head()4. To preserve the table when transformation are applied, we replace certain missing `NaN` values. Regions are `NaN` when the row is an aggregate (e.g., an extraneous row for total federal count rather than for one state). We can impute this description in place of `NaN`.df_pp_raw['region'].fillna('aggregate', inplace=True) df_pp_raw.head()Observe that in some years, multiple totals are available from the `fedstate` source (US Bureeau of Justice Statistics) and the `census-total` source (Decennial Census). There are often discrepancies. While it is ideal to use the same source consistently, not all sources are available for all years, and the discrepancies imply that one source or the other is missing counts. To resolve years with multiple sources available, we can take the higher count, noting that there is a compromise in experimental consistency.5. Drop the class column. Group by year, region, and geography, then perform a maximum aggregation of the count columns.df_pp_raw = df_pp_raw.drop(['class'], axis=1) df_pp_grouped = df_pp_raw.groupby(['year', 'region', 'geography']).max().reset_index() df_pp_grouped.head()6. At this point the data can be used in the main analyses. We will save this cleaned version as a CSV at [data/clean/prisoner_counts.csv](./data/clean).df_pp_grouped.to_csv('./data/clean/prisoner_counts.csv')Overall Populations Description US population data comes primarily from the US Census Bureau: via the decennial Census, which filled decades with constant-growth estimates until 1960, and the yearly American Community Survey thereafter. The aggregator here is the international [World Bank Group](https://www.worldbank.org/). Specifically, its [World Development Indicators](https://datacatalog.worldbank.org/dataset/world-development-indicators) project collects vast development data for several countries. Its [aggregation of population over time in the US](https://data.worldbank.org/indicator/SP.POP.TOTL?locations=US) is cited as a direct combination of the Census Bureau data. State-level granularity is thus far not offered by any known aggregator for sufficient years, and impractical to self-compile. Access Since only a national-level series is required, there are several ways to access the World Development Indicators data. Third-party APIs can wrap the World Bank HTTP GET request support, but the Group's [online database](https://databank.worldbank.org/source/world-development-indicators) interface is a simpler approach. It allows us to select only the small slice of data we need, and store it in-repository so future users need not re-acquire it.1. Visit the [interface](https://databank.worldbank.org/source/world-development-indicators) and select "World Development Indicators" for "Database".       ![img](./figures/data3_snippet1.png)2. Select "United States" for "Country".       ![img](./figures/data3_snippet2.png)3. Select "Population, total" for "Series".       ![img](./figures/data3_snippet3.png)4. For "Time", click the "select all" icon.       ![img](./figures/data3_snippet4.png)5. Download the slice in its current form. ![img](./figures/data3_snippet5.png) Cleaning The result of the __Access__ step is the raw file stored in [data/raw/overall_population.xlsx](./data/raw/), ready for cleaning. Execute in order, once (restart if errors made).1. Load into `pandas.DataFrame` object, with attention to any mis-formatting.df_op_raw = pd.read_excel('./data/raw/overall_population.xlsx', nrows=1, thousands=',') df_op_raw.iloc[:,:10]2. Drop three erroneous columns (two invalid, one for the blank 2020 count).df_op_raw = df_op_raw.drop([' ', 'Unnamed: 7', '2020'], axis=1) df_op_raw.iloc[:, :10]3. Pivot the table and make the values numeric.df_pivot = df_op_raw.transpose().reset_index() df_pivot.columns = ['year', 'total_population'] df_pivot.year = df_pivot.year.astype(int) df_pivot.total_population = df_pivot.total_population.astype(int) df_pivot.head()4. We save this result as a clean CSV file, ready for main analyses, in [/data/clean/overall_population.csv](./data/clean).df_pivot.to_csv('./data/clean/overall_population.csv')# Google ドライブをマウントするには、このセルを実行してください。 from google.colab import drive drive.mount('/content/drive') %cd drive/My Drive %tensorflow_version 2.x import tensorflow as tf %cd /content/drive/My Drive try: import imp imp.reload(dqn) except: import dqn agent = dqn.Agent(spread=10, pip_cost=1000, leverage=500, min_lots=0.01, assets=10000, available_assets_rate=0.4, restore=not True, step_size=96, n=4, lr=1e-3) %cd /content agent.run() import numpy as np def softmax(x): """Compute softmax values for each sets of scores in x.""" e_x = np.exp(x - np.max(x)) return e_x / e_x.sum() # agent.test(spread=10, pip_cost=1000, los_cut=150,test_data=not True) for _ in range(1): tree_idx, replay = agent.memory.sample(128) states = np.array([a[0][0] for a in replay], np.float32) new_states = np.array([a[0][3] for a in replay], np.float32) actions = np.array([a[0][1] for a in replay]) rewards = np.array([a[0][2] for a in replay], np.float32).reshape((-1, 1)) with tf.GradientTape() as tape: q = agent.model(states) target_q = agent.target_model(new_states).numpy() arg_q = agent.model(new_states).numpy() random = np.random.rand(actions.shape[0]) arg_q = np.argmax(arg_q, 1) # arg_q = np.array([np.argmax(arg_q[i]) if random[i] > 0.1 else np.random.randint(arg_q.shape[1]) for i in # range(arg_q.shape[0])]) q_backup = q.numpy() for i in range(len(rewards)): # q_backup[i, actions[i]] = rewards[i] if I < 1010 and not self.restore else rewards[i] + 0.2 * target_q[i, np.argmax(arg_q[i])] q_backup[i, actions[i]] = rewards[i] + 0.1 * target_q[i, arg_q[i]] mse = tf.reduce_mean(tf.reduce_sum(tf.abs(q_backup - q) ** 1.5, -1)) ae = np.array([sum(i) for i in np.abs(q_backup - q.numpy())]) agent.memory.batch_update(tree_idx, ae) gradients = tape.gradient(mse, agent.model.trainable_variables) # gradients = [(tf.clip_by_value(grad, -10.0, 10.0)) # for grad in gradients] # agent.model.optimizer.apply_gradients(zip(gradients,agent.model.trainable_variables)) # print(np.mean(ae)) # print(q[0:5]) print(mse) print(q) # print(np.mean(rewards))) rewards !pip install ta import numpy as np import pandas as pd import ta from sklearn.preprocessing import MinMaxScaler def gen_data(file_path="gbpjpy15.csv"): try: print("load file") df = pd.read_csv(file_path) except: print("Use 'python gen_data.py 'file_path''") return df["Close1"] = df["Close"] * 100 ma = np.array(ta.trend.ema(df["Close1"], 7) - ta.trend.ema(df["Close1"], 4)).reshape((-1,1)) ma2 = np.array(df["Close1"] - ta.trend.ema(df["Close1"], 7)).reshape((-1,1)) b1 = np.array(ta.volatility.bollinger_hband(df["Close1"]) - df["Close1"]).reshape((-1,1)) b2 = np.array(ta.volatility.bollinger_lband(df["Close1"]) - df["Close1"]).reshape((-1,1)) macd = np.array(ta.trend.macd_diff(df["Close1"])).reshape((-1,1)) rsi = np.array(ta.momentum.rsi(df["Close"]) - ta.momentum.rsi(df["Close"], 7)).reshape((-1,1)) stoch = np.array(ta.momentum.stoch_signal(df["High"], df["Low"], df["Close"]) - ta.momentum.stoch(df["High"], df["Low"], df["Close"])).reshape((-1,1)) x = np.concatenate([ma, macd, rsi, stoch], -1) y = np.array(df[["Open"]]) atr = np.array(ta.volatility.average_true_range(df["High"], df["Low"], df["Close"])) high = np.array(df[["High"]]) low = np.array(df[["Low"]]) print("gen time series data") gen = tf.keras.preprocessing.sequence.TimeseriesGenerator(x, y, 30) x = [] y = [] for i in gen: x.extend(i[0].tolist()) y.extend(i[1].tolist()) x = np.asanyarray(x)[100:] y = np.asanyarray(y)[100:] atr = atr[-len(y):].reshape((-1, 1)) scale_atr = atr high = high[-len(y):].reshape((-1, 1)) low = low[-len(y):].reshape((-1, 1)) np.save("x", x) np.save("target", np.array([y, atr, scale_atr, high, low])) print("done\n") %cd /content/drive/My Drive gen_data() def softmax(x): """Compute softmax values for each sets of scores in x.""" e_x = np.exp(x - np.max(x)) return e_x / e_x.sum() a1 = np.argmax(q,1) q1 = np.abs(q) / np.sum(np.abs(q),1).reshape((-1,1)) * (np.abs(q) / q) q1 += .05 * np.random.randn(q1.shape[0], q1.shape[1]) a2 = np.argmax(q1,-1) np.mean(a1 == a2) tree_idx, replay = agent.memory.sample(128) 3 in [1,2]SETTINGSThe notebook implements stacking ensemble of predictions coming from different variants of the LightGBM models implemented in `notebook_03_modeling.ipynb` over the course of working on the project. Stacking is implemented using higher-level LightGBM models. The ensembled predictions are exported as `sub_stack_[name].csv`.##### LIBRARIES import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import scipy.stats from scipy.stats import gmean import os import time import datetime import random import multiprocessing import pickle import warnings import gc from tqdm import tqdm import importlib import sys from sklearn.model_selection import KFold import lightgbm as lgb from sklearn.metrics import mean_squared_error ##### SETTINGS warnings.filterwarnings('ignore') pd.set_option('display.max_columns', None) plt.style.use('dark_background') %matplotlib inline gc.enable()IMPORT PREDICTIONS##### IMPORT OOF PREDS # version threshold min_lgb_version = 17 min_df_version = 12 # prepare model names models = os.listdir('../oof_preds') models = [m for m in models if m != '.DS_Store'] models = [m for m in models if int(m.split('_')[1][1:]) >= min_lgb_version] models = [m for m in models if int(m.split('_')[3][1:]) >= min_df_version] models = [m.replace('.npy', '') for m in models] models = sorted(models) print('OOF predictions:', len(models)) models # preprocessing loop for m in models: # load preds tmp_tr = np.load('../oof_preds/' + m + '.npy') tmp_te = pd.read_csv('../submissions/sub_' + m + '.csv', sep = '|') # split OOF preds tmp_preds_oof = tmp_tr[0] tmp_preds_oof = pd.DataFrame(tmp_preds_oof.reshape(-1)) if m == models[0]: reals_oof = tmp_tr[1] reals_oof = pd.DataFrame(reals_oof.reshape(-1)) # split ID from test preds if m == models[0]: id_test = tmp_te[['itemID']] tmp_te = tmp_te[['demandPrediction']] # rename columns reals_oof.columns = ['target'] tmp_preds_oof.columns = [m] tmp_te.columns = [m] # stack preds if m == models[0]: preds_oof = tmp_preds_oof preds_test = tmp_te else: preds_oof = pd.concat([preds_oof, tmp_preds_oof], axis = 1) preds_test = pd.concat([preds_test, tmp_te], axis = 1) # extract OOF prices and targets reals_oof = tmp_tr[1].reshape(-1) prices_oof = tmp_tr[2].reshape(-1) # display information print('- Train shape:', preds_oof.shape) print('- Test shape:', preds_test.shape) # rename objects y = pd.Series(reals_oof.copy()) X = preds_oof.copy() X_prices = pd.Series(prices_oof).copy() X_test = preds_test.copy() # read items items = pd.read_csv('../data/prepared/items_v1.csv', compression = 'gzip') print(items.shape)STACKING##### MODULES sys.path.append('../codes') from evaluation import asymmetric_mse, asymmetric_mse_eval, profit from postprocessing import postprocess_preds ##### LIST RELEVANT FEATURES drop_feats = [] features = [var for var in X.columns if var not in drop_feats] print(len(features), 'features') features ##### MODELING PARAMETERS # random seed seed = 777 # cross-validation num_folds = 5 shuffle = True # rounds and options cores = 4 stop_rounds = 100 verbose = 100 # LGB parameters lgb_params = { 'boosting_type': 'goss', 'objective': 'rmse',#asymmetric_mse, 'metrics': 'rmse', 'n_estimators': 1000, 'learning_rate': 0.1, 'bagging_fraction': 0.8, 'feature_fraction': 0.8, 'lambda_l1': 0.1, 'lambda_l2': 0.1, 'silent': True, 'verbosity': -1, 'nthread' : cores, 'random_state': seed, } # data partitioning folds = KFold(n_splits = num_folds, random_state = seed, shuffle = shuffle) ##### CROSS-VALIDATION LOOP # placeholders oof_profit = [] preds_test = np.zeros(items.shape[0]) preds_oof = np.zeros(X.shape[0]) reals_oof = np.zeros(X.shape[0]) oof_rmse = [] oof_profit = [] oracle_profit = [] # cross-validation for fold, (trn_idx, val_idx) in enumerate(folds.split(X, y)): # data partitioning X_train, y_train = X[features].iloc[trn_idx], y.iloc[trn_idx] X_valid, y_valid = X[features].iloc[val_idx], y.iloc[val_idx] # training clf = lgb.LGBMRegressor(**lgb_params) clf = clf.fit(X_train, y_train, eval_set = [(X_train, y_train), (X_valid, y_valid)], eval_metric = 'rmse', sample_weight = X_prices.iloc[trn_idx].values, eval_sample_weight = [X_prices.iloc[trn_idx].values, X_prices.iloc[val_idx].values], early_stopping_rounds = stop_rounds, verbose = verbose) # predictions reals_oof[val_idx] = y_valid preds_oof[val_idx] = postprocess_preds(clf.predict(X_valid)) preds_test += postprocess_preds(clf.predict(X_test)) / num_folds # evaluation oof_rmse.append(np.sqrt(mean_squared_error(reals_oof[val_idx], preds_oof[val_idx]))) oof_profit.append(profit(reals_oof[val_idx], preds_oof[val_idx], price = X_prices.iloc[val_idx].values)) oracle_profit.append(profit(reals_oof[val_idx], reals_oof[val_idx], price = X_prices.iloc[val_idx].values)) # information print('-' * 65) print('FOLD {:d}/{:d}: RMSE = {:.2f}, PROFIT = {:.0f}'.format(fold + 1, num_folds, oof_rmse[fold], oof_profit[fold])) print('-' * 65) print('') # print performance oof_rmse = np.sqrt(mean_squared_error(reals_oof, preds_oof)) oof_profit = profit(reals_oof, preds_oof, price = X_prices.values) / tmp_tr.shape[1] oracle_profit = profit(reals_oof, reals_oof, price = X_prices.values) / tmp_tr.shape[1] print('') print('-' * 65) print('- AVERAGE RMSE: {:.2f}'.format(np.mean(oof_rmse))) print('- AVERAGE PROFIT: {:.0f} ({:.2f}%)'.format(np.mean(oof_profit), 100 * np.mean(oof_profit) / np.mean(oracle_profit))) print('-' * 65)SUBMISSION##### SUBMISSION # model name sub_name = 'stack_' + str(len(models)) + 'preds' # save submissiion sub = pd.read_csv('../submissions/sample_submission.csv', sep = '|') sub['demandPrediction'] = postprocess_preds(preds_test) sub.to_csv('../submissions/sub_' + sub_name + '.csv', sep = '|', index = False) print(sub.shape) sub.head()Plot subhalosimport sys, os sys.path.append('../') import logging import numpy as np import matplotlib.pyplot as plt import astropy %matplotlib inline import pickle from simulation.wrapper import augmented_data from simulation.units import M_s import paper_settings logging.basicConfig( format='%(asctime)-5.5s %(name)-20.20s %(levelname)-7.7s %(message)s', datefmt='%H:%M', level=logging.INFO ) paper_settings.setup()Settingsn_cols = 4 n_rows = 2 ###data_visualization ### x_train == lens image x_train = np.load("../data/samples/x_train.npy") ### z_train == latent variable (e.g. redshift, M_halo...etc) z_train = np.load("../data/samples/z_train.npy") ### theta == parameters of interest (f_sub, beta) theta_alt_train = np.load("../data/samples/theta_alt_train.npy") theta_train = np.load("../data/samples/theta_train.npy") ### log_r_xz_train = np.load("../data/samples/log_r_xz_train.npy") t_xz_train= np.load("../data/samples/t_xz_train.npy") print("data shape:", x_train[0].shape, theta_alt_train.shape, theta_train.shape, log_r_xz_train.shape, t_xz_train.shape) #print() for i in range(10): print("theta train", theta_train[i], "theta_alt_train", theta_alt_train[i], "z_train", z_train[i]) print("log_r_xz_train", log_r_xz_train[i], "t_xz_train", t_xz_train[i]) plt.imshow(x_train[i]) plt.show()data shape: (64, 64) (10000, 2) (10000, 2) (10000,) (10000, 2) theta train [ 0.0183317 -1.74972747] theta_alt_train [ 0.17710894 -2.01112602] z_train [ 2.75936022e+76 2.14239556e+35 5.56508302e-01 2.36840786e+02 -6.02625720e-02 -1.55926643e-01 8.47578789e-01 2.10000000e+01 1.50987736e-02 6.00000000e+00 5.35253914e-04 2.00000000e+00 1.46549844e-03] log_r_xz_train 1.2657570937785695198 t_xz_train [33.83013209 -1.3093534 ]Make data#imgs, sub_latents, global_latents = pickle.load( open( "simulations_data.pickle", "rb" )) #imgs, sub_latents, global_latents = pickle.load( open( "data.pickle", "rb" )) #theta, _, imgs, _, _, _,_, sub_latents, global_latents = augmented_data( # f_sub=0.05, beta=-1.9, n_images=n_cols*n_rows - 1, mine_gold=False, # draw_alignment=True, draw_host_mass=True, draw_host_redshift=True #) pickle.dump((imgs, sub_latents, global_latents), open( "simulations_data.pickle", "wb" ) )Plot subhalosfig, caxes = paper_settings.grid_width( n_cols, n_rows, n_caxes=2, large_margin=0.025, small_margin=0.025, sep=0.025, cbar_width=0.04 ) for i in range(n_cols*n_rows - 1): ax = plt.subplot(n_rows, n_cols, i+1) im = plt.imshow( np.log10(imgs[i]), vmin=2.3, vmax=3.15, cmap='gist_gray', extent=(-3.2,3.2,-3.2,3.2), origin="lower" ) if i == 0: plt.plot([-2.9, -1.9], [-2.9, -2.9], c="white", lw=1.5, ls="-") plt.text(-2.4, -2.65, "$1''$", va="center", ha="center", color="white") sc0 = plt.scatter( sub_latents[i][:,1], sub_latents[i][:,2], s=10., c=np.log10(sub_latents[i][:,0]/M_s), cmap=paper_settings.CMAP2, # "plasma", vmin=6.8, vmax=9.0, ) if i == 0: cbar1 = plt.colorbar(sc0, cax=caxes[0]) cbar1.set_label(r'$\log_{10} \; (m_{200} / \mathrm{M}_{\odot})$') cbar1.set_ticks([7,7.5,8,8.5,9]) caxes[0].plot([14.3], [8.92], 'o', markersize=5, c=paper_settings.COLOR_ALIGN, clip_on=False, zorder=100) sc1 = plt.scatter( [global_latents[i,4]], [global_latents[i,5]], s=200., marker="+", c=[np.log10(global_latents[i,0]/M_s)], cmap=paper_settings.CMAP2, # "Greens_r", vmin=12.0, vmax=14.0, ) plt.xlim(-3.2,3.2) plt.ylim(-3.2,3.2) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) if i == 0: cbar2 = plt.colorbar(sc1, cax=caxes[1]) cbar2.set_label(r'$\log_{10} \; (M_{200} / \mathrm{M}_{\odot})$') cbar2.set_ticks([12.5, 13.,13.5,14]) caxes[1].plot([19.5], [13.93], '+', markersize=10, c=paper_settings.COLOR_ALIGN, clip_on=False, zorder=100) plt.savefig("../figures/simulations.pdf", dpi=300)The Basics of StringsWatch the full [C 101 video](https://www.youtube.com/watch?v=JSpC7Cz64h0&list=PLdo4fOcmZ0oVxKLQCHpiUWun7vlJJvUiN&index=3) for this module. What is a String?A string is a sequence of characters. A handy metaphor is a friendship bracelet, where you string together letters to make a name. Strings and String Literals`firstFriend` and `secondFriend` are variables of strings. The line within `Console.WriteLine` is also a string. It's a **string literal**. A string literal is text what represents a constant string.> Try that out with the following code. Press play and see what comes out.>> Next, try changing the variables to see different names.string firstFriend = "Maria"; string secondFriend = "Sophia"; Console.WriteLine($"My friends are {firstFriend} and {secondFriend}");My friends are Maria and SophiaString PropertiesAs you explore more with strings, you'll find that strings are more than a collection of letters. You can find the length of a string using `Length`. `Length` is a **property** of a string and it returns the number of characters in that string.> Try that out by seeing how long the names of the friends are:Console.WriteLine($"The name {firstFriend} has {firstFriend.Length} letters."); Console.WriteLine($"The name {secondFriend} has {secondFriend.Length} letters.");The name Maria has 5 letters. The name Sophia has 6 letters.String Methods Leading and Trailing SpacesSuppose your strings have leading or trailing spaces (also called **white space**) that you don't want to display. You want to trim the spaces from the strings. The `Trim` method and related methods `TrimStart` and `TrimEnd` do that work. You can just use those methods to remove leading and trailing spaces.> Play around with trimming in the following code. The brackets are there to help you see all the white space.**Editing Note: Should this be all one thing? Or multiple breakups of code and markdown?*string greeting = " Hello World! "; Console.WriteLine($"[{greeting}]"); string trimmedGreeting = greeting.TrimStart(); Console.WriteLine($"[{trimmedGreeting}]"); trimmedGreeting = greeting.TrimEnd(); Console.WriteLine($"[{trimmedGreeting}]"); trimmedGreeting = greeting.Trim(); Console.WriteLine($"[{trimmedGreeting}]");[ Hello World! ] [Hello World! ] [ Hello World!] [Hello World!]ReplaceYou can also replace substrings with other values. For example, in the code below, you can take "Hello World!" and replace "Hello" with "Greetings", to make "Greetings World!"> Try it out. What else could you replace "Hello" with?string sayHello = "Hello World!"; Console.WriteLine(sayHello); sayHello = sayHello.Replace("Hello", "Greetings"); Console.WriteLine(sayHello);Hello World! Greetings World!Changing CaseSometimes you need your strings to be all UPPERCASE or all lowercase. `ToUpper` and `ToLower` do just that.> The following example seems a bit mixed up. Can you fix it so "whisper" is all lowercase, and "shout" is all uppercase?Console.WriteLine("WhiSPer".ToUpper()); Console.WriteLine("sHoUt".ToLower());WHISPER shoutPlaygroundNow it's your turn to play around with what you've learned. Try these exercises:> Create three variables of three different people.>> Find the length of the first person, make the second person all caps, and the third person all lowercase.>> How many letters are in "supercalifragilisticexpialidocious"?>> How many characters are taken out when you trim " friendship bracelet "? Does trimming take out the center space?>> What do you want to write?Console.WriteLine("Playground"); Console.WriteLine("supercalifragilisticexpialidocious"); Console.WriteLine(" friendship bracelet ");Playground supercalifragilisticexpialidocious friendship braceletNow You Code 1: Vote or Retire? Part 1Write a program to ask for your age as input, then output whether or not you can vote and whether your not you can retire. Let's assume the voting age is 18 or higher, and the retirement age is 65 or higher.**NOTE:** This program is making two seprate decisions, and thus should have two separate if else statements.Example Run:```Enter your age: 45You can vote.You cannot retire.``` Step 1: Problem AnalysisInputs:Outputs:Algorithm (Steps in Program):#Step 2: write code here age = int(input("enter age:")) if age>64: print("you can vote and you can retire") elif age<65: print("you can vote but you cannot retire work harder") elif age>17: print("you are now a voting citizen") else: print("you cannot vote yet youj are too young")enter age:99 you can vote and you can retirePart 2Now that you got it working, re-write your code to handle bad input using Python's `try... except` statement:Example run:```Enter your age: threveThat's not an age!```**Note:** Exception handling is not part of our algorithm. It's a programming concern, not a problem-solving concern!## Step 2 (again): write code again but handle errors with try...except try: age = int(input("enter age:")) if age<0: print("your age is invalid you must be dead") elif age>64: print("you can vote and you can retire") elif age<65: print("you can vote but you cannot retire work harder") elif age>17: print("you are now a voting citizen") else: print("you cannot vote yet youj are too young") except: print("INPUT A VALUE")enter age:-1 your age is invalid you must be deadМоделька для Wannait Постановка задачиНеобходимо разработать модель с алгоритмом рекомендаций, которая удовлетворяет критериям:* можно реализовать на бекенд-сервере (он не может быть слишком сложным для вычисления)* можно обновлять рекомендации за разумное время (15 минут)* можно относительно быстро найти объекты с наибольшим предсказанием Идея текущего решения (модели):* Модель из 2 матриц, заполненных спецпараметром лямбда = sqrt(ср. арифм. (rating) / N) Обоснование лябмды Доказать, что lambda даст минимальный MSE на старте Цель: найти l Условие: способ оценки - MSE MSE >= 0 -> min MSE == 0 модель - константа l Решение: Вариант 1 sum((l - x) ** 2) / N -> min MSE'l = sum((l - x) ** 2 / N)' = sum(2(l - x) / N) = 0 sum(l - x) = 0 тк sum - конечный ряд, то sum(x) - l * N = 0 l = sum(x) / N Вариант 2 У MSE и у l растут из одного и того же места ноги (из задачи maximal likehood) ==> решение задачи, если данные распределены по Пуассону: l = sum(x) / N Цель: найти lambda Условие: l = [lambda for i in range(n_factors)] * [lambda for i in range(n_factors)].T Решение: l = n_factors * lambda ^ 2 lambda = sqrt(l / n_factors) ЧТД * Предсказание - скалярное перемножение векторов факторов пользователей и продуктов Идеи:* использовать доп данные и свести к решению задачи бинарной классификации (лайк / дизлайк)(фичи: результат коллаборативной фильтрации, данные о фильме, данные о пользователе)* использовать ансамбли из дешёвых алгоритмов Импорты и настроечки%config IPCompleter.greedy=True import pandas as pd import numpy as np import scipy import torch import matplotlib.pyplot as plt import math basepath = 'the-movies-dataset/' # количество записей из таблички PART = 100000 # количество данных, взятых из csv epochs = 50 #количество эпох обучения epoch_part = 1000 # количество кортежей (пользователь, продукт, оценка), используемое за эпоху n_factors = 20 # количество строк (столбцов) в матрицах факторов test_split = 1000 # количество кортежей (пользователь, продукт, оценка), используемое для тестированияАнализ и Обработка данных# берём часть данных о рейтингах (для полного охвата нужно >330 GB, что невозможно на обычной машине) ratings_df = pd.read_csv(basepath + 'ratings.csv').iloc[:PART, :] # нормализуем (наивно) рейтинг ratings_df['rating'] = ratings_df['rating'] / 5 # среднее число оценок на пользователя ratings_df.groupby('userId').count().mean() lambda_parameter = math.sqrt(ratings_df['rating'].mean() / n_factors) lambda_parameter # число пользователей и продуктов n_users, n_products = ratings_df.loc[:,['userId', 'movieId']].nunique() n_users, n_products # формируем матричку для Факторизации ratings_pivot_df = ratings_df.pivot(index='userId', columns='movieId', values='rating') ratings_pivot_df.head() ratings = ratings_pivot_df.as_matrix() indexes = np.where(~np.isnan(ratings)) order = np.arange(len(indexes[0])) np.random.shuffle(order) indexes = (indexes[0][order], indexes[1][order]) test_indexes = (indexes[0][:test_split], indexes[1][:test_split]) indexes = (indexes[0][test_split:], indexes[1][test_split:]) indexesМашинное обучение В качестве baseline используем SVDclass MatrixFactorization(torch.nn.Module): def __init__(self, n_users, n_products, n_factors=20): super().__init__() self.user_factors = torch.nn.Parameter(torch.full((n_users, n_factors), lambda_parameter, dtype=torch.float32), requires_grad=True) self.product_factors = torch.nn.Parameter(torch.full((n_factors, n_products), lambda_parameter, dtype=torch.float32), requires_grad=True) def forward(self, user, product): return torch.mm(self.user_factors[user, :], self.product_factors[:, product]) def predict(self, user, product): return self.forward(user, product) def predict_all(self) -> torch.Tensor: return torch.mm(self.user_factors, self.product_factors) model = MatrixFactorization(n_users, n_products, n_factors=n_factors) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model = model.to(device) print("using device {}".format(device)) print("model parameters:") for parameter in model.parameters(): print(parameter) print(parameter.shape) loss_func = torch.nn.MSELoss() optimizer = torch.optim.Adam(model.parameters(), lr=1e-3) def train(): train_loss_history = [] val_loss_history = [] phases = ['train', 'val'] for epoch in range(epochs): print('\n\nEpoch {} / {}'.format(epoch, epochs)) # shuffle data order = np.arange(len(indexes[0])) np.random.shuffle(order) train_order = order[:epoch_part] val_order = order[epoch_part:2 * epoch_part] for phase in phases: if phase == 'train': optimizer.zero_grad() current_order = train_order current_indexes = (indexes[0][train_order], indexes[1][train_order]) history = train_loss_history else: current_order = val_order current_indexes = (indexes[0][val_order], indexes[1][val_order]) history = val_loss_history rating = ratings[current_indexes[0], :][:, current_indexes[1]] prediction = model.predict(current_indexes[0], current_indexes[1]).to(device) # form mask array mask = torch.zeros(rating.shape) normal_poses = np.where(~np.isnan(rating)) for pose in zip(normal_poses[0], normal_poses[1]): mask[pose[0], pose[1]] = 1 rating = np.nan_to_num(rating) rating = torch.tensor(rating, dtype=torch.float32, device=device) mask = mask.to(device) rating *= mask prediction *= mask loss = loss_func(prediction, rating) running_loss = loss.data.cpu() print('{} loss {}'.format(phase, running_loss)) history.append(running_loss) if phase == 'train': loss.backward() optimizer.step() return train_loss_history, val_loss_history history = train()Epoch 0 / 50 train loss 0.007722136564552784 val loss 0.00791983399540186 Epoch 1 / 50 train loss 0.007995340041816235 val loss 0.007522765547037125 Epoch 2 / 50 train loss 0.007843675091862679 val loss 0.007626995909959078 Epoch 3 / 50 train loss 0.007112433668226004 val loss 0.00733239995315671 Epoch 4 / 50 train loss 0.007285361178219318 val loss 0.007524856366217136 Epoch 5 / 50 train loss 0.007262481842190027 val loss 0.007518996484577656 Epoch 6 / 50 train loss 0.007249865215271711 val loss 0.007170319557189941 Epoch 7 / 50 train loss 0.006905980873852968 val loss 0.006763985846191645 Epoch 8 / 50 train loss 0.006947500631213188 val loss 0.006613616365939379 Epoch 9 / 50 train loss 0.006744941230863333 val loss 0.006455684080719948 Epoch 10 / 50 train loss 0.006553052458912134 val loss 0.006735005881637335 Epoch 11 / 50 train loss 0.006884973030537367 val loss 0.006693677511066198 Epoch 12 / 50 train loss 0.006762245669960976 val loss 0.00652963947504758[...]Визуализация результатов обученияtitles = ['Train Loss', 'Val Loss'] plt.figure(figsize=(10, 10)) for i, values in enumerate(history): plt.subplot(2, 2, i+1) plt.title(titles[i]) plt.grid() if i == 0: color = 'b' else: color = 'orange' plt.plot(values,c=color, label= 'mean: %.2f' % np.array(values).mean()) plt.legend(loc='best') plt.show()Тестирование результатовrating = torch.tensor(ratings[test_indexes[0], :][:, test_indexes[1]], dtype=torch.float32) prediction = model.predict(test_indexes[0], test_indexes[1]).to(device) # form mask array mask = torch.ones(rating.shape) nan_poses = np.where(np.isnan(rating)) for pose in zip(nan_poses[0], nan_poses[1]): mask[pose[0], pose[1]] = 0 rating[pose[0], pose[1]] = 0 rating = rating.to(device) mask = mask.to(device) rating *= mask prediction *= mask loss = loss_func(prediction, rating) test_loss = loss.data.cpu() print('Test loss {}'.format(test_loss)) print('СКО (по смещенной дисперсии (у Netflix в 2010 0.8554 для rating [1..5])): {}'.format(math.sqrt(test_loss)))СКО (по смещенной дисперсии (а у Netflix-то 0.8554 для [1..5])): 0.0715981797013906Предсказанияpredictions = model.predict_all() print(predictions)tensor([[0.8033, 0.6568, 0.6705, ..., 0.7390, 0.7629, 0.7629], [0.7265, 0.5940, 0.6064, ..., 0.6684, 0.6900, 0.6900], [0.7045, 0.5760, 0.5880, ..., 0.6481, 0.6690, 0.6690], ..., [0.8309, 0.6793, 0.6935, ..., 0.7644, 0.7891, 0.7891], [0.7269, 0.5943, 0.6067, ..., 0.6688, 0.6904, 0.6904], [0.6950, 0.5683, 0.5801, ..., 0.6395, 0.6601, 0.6601]], device='cuda:0', grad_fn=)Init functiondef getGradient(gray, x = 0, y = 0, useGradient = True): if useGradient: grad = cv2.Sobel(gray, ddepth=cv2.CV_32F, dx=x, dy=y, ksize=3) ''' take absolute value of gradient to use negative gradient ''' grad = np.absolute(grad) ''' Normalization of gradient ''' (minVal, maxVal) = (np.min(grad), np.max(grad)) if maxVal - minVal > 0: grad = (255 * ((grad - minVal) / float(maxVal - minVal))).astype("uint8") else: grad = np.zeros(gray.shape, dtype = "uint8") else: grad = cv2.adaptiveThreshold( gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 2) return gradProcess imageimg = cv2.imread('assets/1.jpg', 0) vis = cv2.imread('assets/1.jpg') fig, ax = plt.subplots(figsize=(10, 10)) ax.imshow(img, cmap='gray') plt.show() bin_img = getGradient(img, x = 1, useGradient = True) fig, ax = plt.subplots(figsize=(15, 15)) ax.imshow(bin_img, cmap='gray') plt.show() verp = np.sum(bin_img, axis=1) / 255 drawedverp = tl.getDrawProjectionVer(img, verp) bigImg1 = tl.concat_hor((img, drawedverp)) fig, ax = plt.subplots(figsize=(15, 15)) ax.imshow(bigImg1, cmap='gray') plt.show() bigImg1 = tl.concat_hor((bin_img, img)) fig, ax = plt.subplots(figsize=(15, 15)) ax.imshow(bigImg1, cmap='gray') plt.show()Find filter sizehalf = int(np.max(verp) / 2) sliceLine = drawedverp[:,(half-1):(half+1)] contours, hierarchy = cv2.findContours(cv2.cvtColor( sliceLine, cv2.COLOR_BGR2GRAY), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) print(half) heights = [] for cnt in contours: x,y,w,h = cv2.boundingRect(cnt) heights.append(h) medianHeight = int(np.median(np.asarray(heights)) * 1.5) print("medianHeight", medianHeight) drawedverp = cv2.line(drawedverp, (half,0), (half,drawedverp.shape[0]), (0,0,255), 1)Convolve peakskernel = medianHeight verpConvolved = np.convolve(verp, np.ones((kernel,))/kernel, mode='same') drawedverpconv = tl.getDrawProjectionVer(img, verpConvolved)Find peaksbandP1ranges = [] peaks = [] c1 = 0.2 c2 = 0.3 while np.max(verpConvolved) > 40: ybm = np.argmax(verpConvolved) yb0 = tl.findb0(verpConvolved, ybm, c1 * verpConvolved[ybm]) yb1 = tl.findb1(verpConvolved, ybm, c2 * verpConvolved[ybm]) if yb1 - yb0 > medianHeight: bandP1ranges.append((yb0,yb1)) peaks.append((int(verpConvolved[ybm]), ybm)) verpConvolved[yb0:yb1] = 0 # draw peaks for peak in peaks: cv2.circle(drawedverpconv, peak, 5, (255,0,0), -1) # draw bands bandsImg = np.zeros(vis.shape, dtype = np.uint8) for band in bandP1ranges: yt, yb = band bandsImg[yt:yb] = [0,255,0] vis = cv2.addWeighted(vis, 0.6, bandsImg, 0.4, 0) drawedverpconv = cv2.addWeighted(drawedverpconv, 0.6, bandsImg[:, 0:len(drawedverpconv[0]), ...], 0.4, 0) bigImg1 = tl.concat_hor((vis, bin_img, drawedverp, drawedverpconv)) fig, ax = plt.subplots(figsize=(20, 20)) ax.imshow(bigImg1, cmap='gray') plt.show()IID case: all the clients have images of all the classes Centralized topology# IID case: all the clients have images of all the classes # Centralized graph topology (fully-connected network) # Hyperparameters num_clients = 100 num_rounds = 10 epochs = 1 batch_size = 32 # Communication matrix comm_matrix = create_mixing_matrix('centralized', num_clients) # Creating decentralized datasets train_loader, test_loader = load_data(batch_size, num_clients) # Instantiate models and optimizers and run decentralized training global_model, client_models, accs = run(train_loader, test_loader, comm_matrix, num_rounds, epochs, num_clients) cons = consensus(global_model, client_models) print(cons) axes = plt.gca() axes.set_ylim([0,1]) plt.plot(range(num_rounds), accs) plt.show() graph = nx.from_numpy_matrix(comm_matrix) nx.draw(graph)Ring topology# IID case: all the clients have images of all the classes # Ring graph topology: each client is connected to two neighbours exactly # Hyperparameters num_clients = 100 num_rounds = 10 epochs = 1 batch_size = 32 # Communication matrix comm_matrix = create_mixing_matrix('ring', num_clients) # Creating decentralized datasets train_loader, test_loader = load_data(batch_size, num_clients) # Instantiate models and optimizers and run decentralized training global_model, client_models, accs1 = run(train_loader, test_loader, comm_matrix, num_rounds, epochs, num_clients) cons = consensus(global_model, client_models) print(cons) axes = plt.gca() axes.set_ylim([0,1]) plt.plot(range(num_rounds), accs1) plt.show() graph = nx.from_numpy_matrix(comm_matrix) nx.draw(graph)Grid topology# IID case: all the clients have images of all the classes # Grid graph topology: each client is connected to exactly 4 neighbours # Hyperparameters num_clients = 100 num_rounds = 10 epochs = 1 batch_size = 32 # Communication matrix comm_matrix = create_mixing_matrix('grid', num_clients) # Creating decentralized datasets train_loader, test_loader = load_data(batch_size, num_clients) # Instantiate models and optimizers and run decentralized training global_model, client_models, accs2 = run(train_loader, test_loader, comm_matrix, num_rounds, epochs, num_clients) cons = consensus(global_model, client_models) print(cons) axes = plt.gca() axes.set_ylim([0,1]) plt.plot(range(num_rounds), accs2) plt.show() graph = nx.from_numpy_matrix(comm_matrix) nx.draw(graph)Star topology# IID case: all the clients have images of all the classes # Star graph topology: each client is connected only to one central machine # Hyperparameters num_clients = 100 num_rounds = 10 epochs = 1 batch_size = 32 # Communication matrix comm_matrix = create_mixing_matrix('star', num_clients) # Creating decentralized datasets train_loader, test_loader = load_data(batch_size, num_clients) # Instantiate models and optimizers and run decentralized training global_model, client_models, accs3 = run(train_loader, test_loader, comm_matrix, num_rounds, epochs, num_clients) cons = consensus(global_model, client_models) print(cons) axes = plt.gca() axes.set_ylim([0,1]) plt.plot(range(num_rounds), accs3) plt.show() graph = nx.from_numpy_matrix(comm_matrix) nx.draw(graph)Circular ladder topology# IID case: all the clients have images of all the classes # Circular ladder topology: each client is connected to exactly 3 neighbours # Hyperparameters num_clients = 100 num_rounds = 10 epochs = 1 batch_size = 32 # Communication matrix comm_matrix = create_mixing_matrix('ladder', num_clients) # Creating decentralized datasets train_loader, test_loader = load_data(batch_size, num_clients) # Instantiate models and optimizers and run decentralized training global_model, client_models, accs4 = run(train_loader, test_loader, comm_matrix, num_rounds, epochs, num_clients) cons = consensus(global_model, client_models) print(cons) axes = plt.gca() axes.set_ylim([0,1]) plt.plot(range(num_rounds), accs4) plt.show() graph = nx.from_numpy_matrix(comm_matrix) nx.draw(graph)Disconnected graph# IID case: all the clients have images of all the classes # Disconnected graph: no communication between machines # Hyperparameters num_clients = 100 num_rounds = 10 epochs = 1 batch_size = 32 # Communication matrix comm_matrix = create_mixing_matrix('disconnected', num_clients) # Creating decentralized datasets train_loader, test_loader = load_data(batch_size, num_clients) # Instantiate models and optimizers and run decentralized training global_model, client_models, accs5 = run(train_loader, test_loader, comm_matrix, num_rounds, epochs, num_clients) cons = consensus(global_model, client_models) print(cons) axes = plt.gca() axes.set_ylim([0,1]) plt.plot(range(num_rounds), accs5) plt.show()0-th round lr 0.1 | average train loss 2.29 | test loss 2.29 | test acc: 0.276 1-th round lr 0.1 | average train loss 2.24 | test loss 2.24 | test acc: 0.429 2-th round lr 0.1 | average train loss 1.81 | test loss 1.67 | test acc: 0.713 3-th round lr 0.1 | average train loss 1.12 | test loss 0.834 | test acc: 0.808 4-th round lr 0.1 | average train loss 0.791 | test loss 0.584 | test acc: 0.850 5-th round lr 0.1 | average train loss 0.608 | test loss 0.463 | test acc: 0.876 6-th round lr 0.1 | average train loss 0.485 | test loss 0.39 | test acc: 0.895 7-th round lr 0.1 | average train loss 0.359 | test loss 0.343 | test acc: 0.906 8-th round lr 0.1 | average train loss 0.314 | test loss 0.311 | test acc: 0.912 9-th round lr 0.1 | average train loss 0.197 | test loss 0.29 | test acc: 0.916 [0.381028, 0.639411, 0.398183, 0.419525, 0.429052, 0.546445, 0.40172, 0.418679, 0.394743, 0.396699, 0.59928, 0.410585, 0.442971, 0.596245, 0.427828, 0.462024, 0.442234, 0.412808, 0.413047, 0.424196, [...]Plotfig, ax = plt.subplots(1, figsize=(12, 9)) ax.set_ylim([0, 1]) x = np.array(range(10)) ax.plot(x, accs, color="blue", label="centralized") ax.plot(x, accs1, color="green", label="ring") ax.plot(x, accs2, color="purple", label="grid") ax.plot(x, accs3, color="yellow", label="star") ax.plot(x, accs4, color="orange", label="ladder") ax.plot(x, accs5, color="red", label="disconnected") # Add a legend, and position it on the lower right (with no box) plt.legend(loc="lower right", title="Graph topology") plt.title("Accuracy curve depending on the network topology, IID case") plt.xlabel("Round") plt.ylabel("Accuracy") plt.show()NON-IID case: every client has images of two categories chosen from [0, 1], [2, 3], [4, 5], [6, 7], or [8, 9].# NON-IID case: every client has images of two categories chosen from [0, 1], [2, 3], [4, 5], [6, 7], or [8, 9]. # Centralized graph topology (fully-connected network) # Hyperparameters num_clients = 100 num_rounds = 10 epochs = 1 batch_size = 32 # Communication matrix comm_matrix = create_mixing_matrix('centralized', num_clients) # Creating decentralized datasets train_loader, test_loader = load_data(batch_size, num_clients, 'non-iid') # Instantiate models and optimizers and run decentralized training global_model, client_models, accs = run(train_loader, test_loader, comm_matrix, num_rounds, epochs, num_clients) cons = consensus(global_model, client_models) print(cons) axes = plt.gca() axes.set_ylim([0,1]) plt.plot(range(num_rounds), accs) plt.show() # NON-IID case: every client has images of two categories chosen from [0, 1], [2, 3], [4, 5], [6, 7], or [8, 9]. # Ring graph topology: each client is connected to two neighbours exactly # Hyperparameters num_clients = 100 num_rounds = 10 epochs = 1 batch_size = 32 # Communication matrix comm_matrix = create_mixing_matrix('ring', num_clients) # Creating decentralized datasets train_loader, test_loader = load_data(batch_size, num_clients, 'non-iid') # Instantiate models and optimizers and run decentralized training global_model, client_models, accs1 = run(train_loader, test_loader, comm_matrix, num_rounds, epochs, num_clients) cons = consensus(global_model, client_models) print(cons) axes = plt.gca() axes.set_ylim([0,1]) plt.plot(range(num_rounds), accs1) plt.show() # NON-IID case: every client has images of two categories chosen from [0, 1], [2, 3], [4, 5], [6, 7], or [8, 9]. # Grid graph topology: each client is connected to exactly 4 neighbours # Hyperparameters num_clients = 100 num_rounds = 10 epochs = 1 batch_size = 32 # Communication matrix comm_matrix = create_mixing_matrix('grid', num_clients) # Creating decentralized datasets train_loader, test_loader = load_data(batch_size, num_clients, 'non-iid') # Instantiate models and optimizers and run decentralized training global_model, client_models, accs2 = run(train_loader, test_loader, comm_matrix, num_rounds, epochs, num_clients) cons = consensus(global_model, client_models) print(cons) axes = plt.gca() axes.set_ylim([0,1]) plt.plot(range(num_rounds), accs2) plt.show() # NON-IID case: every client has images of two categories chosen from [0, 1], [2, 3], [4, 5], [6, 7], or [8, 9]. # Star graph topology: each client is connected only to one central machine # Hyperparameters num_clients = 100 num_rounds = 10 epochs = 1 batch_size = 32 # Communication matrix comm_matrix = create_mixing_matrix('star', num_clients) # Creating decentralized datasets train_loader, test_loader = load_data(batch_size, num_clients, 'non-iid') # Instantiate models and optimizers and run decentralized training global_model, client_models, accs3 = run(train_loader, test_loader, comm_matrix, num_rounds, epochs, num_clients) cons = consensus(global_model, client_models) print(cons) axes = plt.gca() axes.set_ylim([0,1]) plt.plot(range(num_rounds), accs3) plt.show() # NON-IID case: every client has images of two categories chosen from [0, 1], [2, 3], [4, 5], [6, 7], or [8, 9]. # Circular ladder topology: each client is connected to exactly 3 neighbours # Hyperparameters num_clients = 100 num_rounds = 10 epochs = 1 batch_size = 32 # Communication matrix comm_matrix = create_mixing_matrix('ladder', num_clients) # Creating decentralized datasets train_loader, test_loader = load_data(batch_size, num_clients, 'non-iid') # Instantiate models and optimizers and run decentralized training global_model, client_models, accs4 = run(train_loader, test_loader, comm_matrix, num_rounds, epochs, num_clients) cons = consensus(global_model, client_models) print(cons) axes = plt.gca() axes.set_ylim([0,1]) plt.plot(range(num_rounds), accs4) plt.show() # NON-IID case: every client has images of two categories chosen from [0, 1], [2, 3], [4, 5], [6, 7], or [8, 9]. # Disconnected graph: no communication between machines # Hyperparameters num_clients = 100 num_rounds = 10 epochs = 1 batch_size = 32 # Communication matrix comm_matrix = create_mixing_matrix('disconnected', num_clients) # Creating decentralized datasets train_loader, test_loader = load_data(batch_size, num_clients, 'non-iid') # Instantiate models and optimizers and run decentralized training global_model, client_models, accs5 = run(train_loader, test_loader, comm_matrix, num_rounds, epochs, num_clients) cons = consensus(global_model, client_models) print(cons) axes = plt.gca() axes.set_ylim([0,1]) plt.plot(range(num_rounds), accs5) plt.show() fig, ax = plt.subplots(1, figsize=(12, 9)) ax.set_ylim([0, 1]) x = np.array(range(10)) ax.plot(x, accs, color="blue", label="centralized") ax.plot(x, accs1, color="green", label="ring") ax.plot(x, accs2, color="purple", label="grid") ax.plot(x, accs3, color="yellow", label="star") ax.plot(x, accs4, color="orange", label="ladder") ax.plot(x, accs5, color="red", label="disconnected") # Add a legend, and position it on the lower right (with no box) plt.legend(loc="lower right", title="Graph topology") plt.title("Accuracy curve depending on the network topology, non-IID case") plt.xlabel("Round") plt.ylabel("Accuracy") plt.show()Topology evolving over time# IID case: all the clients have images of all the classes # Grid graph topology: each client is connected to exactly 4 neighbours # Base case with a fixed topology for comparison # Hyperparameters num_clients = 100 num_rounds = 25 epochs = 1 batch_size = 32 # Communication matrix comm_matrix = create_mixing_matrix('grid', num_clients) # Creating decentralized datasets train_loader, test_loader = load_data(batch_size, num_clients) # Instantiate models and optimizers and run decentralized training global_model, client_models, accs0 = run(train_loader, test_loader, comm_matrix, num_rounds, epochs, num_clients) cons = consensus(global_model, client_models) print(cons) axes = plt.gca() axes.set_ylim([0,1]) plt.plot(range(num_rounds), accs0) plt.show() # IID case: all the clients have images of all the classes # We assign a random order to the topologies and change the topology at each round according to this order # Hyperparameters num_clients = 100 num_rounds = 25 epochs = 1 batch_size = 32 topos = ['centralized', 'ring', 'star', 'grid', 'ladder'] np.random.shuffle(topos) # Creating decentralized datasets train_loader, test_loader = load_data(batch_size, num_clients) # Instantiate models and optimizers and run decentralized training global_model, client_models, accs = run_topos(train_loader, test_loader, num_rounds, epochs, num_clients, topos, shuffle='modulo') cons = consensus(global_model, client_models) print(cons) axes = plt.gca() axes.set_ylim([0,1]) plt.plot(range(num_rounds), accs) plt.show() # IID case: all the clients have images of all the classes # We keep the same topology for a number of rounds before changing it # Hyperparameters num_clients = 100 num_rounds = 25 epochs = 1 batch_size = 32 topos = ['centralized', 'ring', 'star', 'grid', 'ladder'] np.random.shuffle(topos) # Creating decentralized datasets train_loader, test_loader = load_data(batch_size, num_clients) # Instantiate models and optimizers and run decentralized training global_model, client_models, accs1 = run_topos(train_loader, test_loader, num_rounds, epochs, num_clients, topos, shuffle='fraction') cons = consensus(global_model, client_models) print(cons) axes = plt.gca() axes.set_ylim([0,1]) plt.plot(range(num_rounds), accs1) plt.show() # IID case: all the clients have images of all the classes # We pick a topology at random at each round # Hyperparameters num_clients = 100 num_rounds = 25 epochs = 1 batch_size = 32 topos = ['centralized', 'ring', 'star', 'grid', 'ladder'] np.random.shuffle(topos) # Creating decentralized datasets train_loader, test_loader = load_data(batch_size, num_clients) # Instantiate models and optimizers and run decentralized training global_model, client_models, accs2 = run_topos(train_loader, test_loader, num_rounds, epochs, num_clients, topos, shuffle='random') cons = consensus(global_model, client_models) print(cons) axes = plt.gca() axes.set_ylim([0,1]) plt.plot(range(num_rounds), accs2) plt.show() fig, ax = plt.subplots(1, figsize=(12, 9)) ax.set_ylim([0, 1]) x = np.array(range(25)) ax.plot(x, accs, color="blue", label="modulo") ax.plot(x, accs1, color="green", label="fraction") ax.plot(x, accs2, color="purple", label="random") ax.plot(x, accs0, color="red", label="centralized (ref)") # Add a legend, and position it on the lower right (with no box) plt.legend(loc="lower right", title="Topology selection") plt.title("Accuracy curve when the network topology changes over time, IID case") plt.xlabel("Round") plt.ylabel("Accuracy") plt.show()T&T Lab - 20th (1828249) _Find the square and cube of a number_#Method1 n=int(input("Enter a Number:")) e=int(input("Enter it's Exponent:")) print("{} raised to the power {} is {}".format(n,e,n**e)) #Method2 n=int(input("Enter a Number:")) e=input("Type 'Square' for square and 'Cube' for cube: ") if e=='Square': r=n**2 elif e=='Cube': r=n**3 else: print("Invalid Input") print("{} of {} is {}".format(e,n,r))Enter a Number:8 Type 'Square' for square and 'Cube' for cube: Cube Cube of 8 is 512_WAP TO MATCH WHETHER YOUR GUESED VALUES ARE SAME AS THAT OF SCORED VALUES AND ALSO FIND THE GRADES OF SCORED VALUES_import pandas as pd import numpy as np import random #Report Card subjects=['BD','CC','ESDA','CG','DMDW'] scoredValues=pd.DataFrame(data=[random.randint(10,101) for x in range(1,6)],index=subjects,columns=['Percentage']) scoredValues #Guess Work print("Guess the Percentage scored in : ") for i in subjects: print(i,end=' :') if int(input(""))==scoredValues.loc[i,'Percentage']: print("\tYou Guessed Correctly!,You scored {} in {}".format(scoredValues.loc[i,'Percentage'],i)) else: print("\tYour Guess is Wrong!,You scored {} in {}".format(scoredValues.loc[i,'Percentage'],i)) #Grading def grade(percent): if 100>=percent>90: return "O" elif 90>=percent>80: return "E" elif 80>=percent>70: return "A" elif 70>=percent>60: return "B" elif 60>=percent>50: return "C" elif 50>=percent>40: return "D" else: return "Fail" scoredValues['Grade']=scoredValues['Percentage'].agg(grade) scoredValues_WAP TO PREPARE A SHOPPING LIST, ATLEAST 10 INPUTS AND TO DISPLAY THE ITEM WHICH HAS MAX PRIORITY_items=int(input('No. of Items:')) itemList=[(input("Item: "),random.randint(1,15)) for x in range(1,items+1)] shoppingList=pd.DataFrame(data=itemList,index=[x for x in range(1,len(itemList)+1)],columns=['Item','Quantity']) #Shopping List shoppingList #Higher the quantity Higher the Priority priority=shoppingList.index.values[1] for i in shoppingList.index.values: if shoppingList.loc[priority,'Quantity']Butter has the highest Priority_WAP TO CALCULATE THE COST OF FILLING A CYLINDER (USER WILL GIVE THE DIMENSION) @ 235/Cu.m and also the time to fill if it takes 14minutes to fill 1 cu. m_import math r=float(input("Enter Radius of the Cylinder: ")) h=float(input("Enter Height of the Cylinder: ")) volume=math.pi*(r**2)*h volume print("The cost of Filling the cylinder is {r:1.2f} Rs. ".format(r=volume*235)) print("It takes {} minutes & {} seconds to fill the cylinder".format(int(volume/14),int((volume%14)*100/60)))Enter Radius of the Cylinder: 6 Enter Height of the Cylinder: 10 The cost of Filling the cylinder is 265778.74 Rs. It takes 80 minutes & 18 seconds to fill the cylinder_Wap to store a student list with all the details of a student and give warning to students with attendance less than 60_students = [ ('Jack', 89,'CSE', 'Sydeny' , 'Australia') , ('Riti', 60,'IT', 'Delhi' , 'India' ) , ('Vikas', 92,'CSSE', 'Mumbai' , 'India' ) , ('Neelu', 46,'CSCE', 'Bangalore' , 'India' ) , ('John', 76,'IT', 'New York' , 'US') , ('Mike', 87,'EEE','las vegas' , 'US'), ('Manish',49,'CSSE','Indore','India')] studentDetails=pd.DataFrame(data=students,index=[1808200+ random.randint(0,90000) for x in range(1,len(students)+1)], columns=['Name','Attendance %','Branch','City','Country']) studentDetails for i in studentDetails.index.values: if studentDetails.loc[i,'Attendance %'] < 60: print("Warning! {}, your Attendance is below 60%".format(studentDetails.loc[i,'Name']))Warning! Neelu, your Attendance is below 60% Warning! Manish, your Attendance is below 60%Throughput Github AnalysisThis is a research project led by PhD .Different research questions are tried to be answer such as: - How do individuals and organizations use GitHub (or other public code repositories) to reference, analyze or reuse data from Data Catalogs?- Are there clear patterns of use across public repositories?- Do patterns of use differ by data/disciplinary domain, or do properties of the data resource (presence of an API, online documentation, size of user community) affect patterns of use? - Does the data reuse observed here expand our understanding of current modes of data reuse, e.g. those outlined in https://datascience.codata.org/articles/10.5334/dsj-2017-008/ ?- What are the characteristics and shape of the Earth Science research object network?- What are major nodes of connectivity?- What poorly connected islands exist? - What is the nature of data reuse in this network?- What downstream/second order grant products can be identified from this network? Current ApproachCategorizing a subset of scraped repos, with pre-defined types, which may be updated iteratively as categorization progresses (education, analysis, archiving, informational).Using ML techniques, we might be able to classify repos according to type automatically; and could consider classifying according to repository quality/completeness. Repository quality or completeness would be defined by:- presence/absence/length of readme- number of commits- number of contributorsBy using neo4j, we can construct and analyze the network graph in order to get:- Centrality and level of connection- Identification of small networks/islands within the network- What databases are highly connected and which are not?- Use database properties (has API, online search portal, has R/Python package, has user forum . . .) Objective of the Notebook This Notebook is going to be used to created an initial EDA using Neo4j to create a Recommendation System with Graph Databases. Connect to Neo4j's graph.User credentials can be input in the `config.py` script, imported as `cfg`. A `config_sample.py` script has been included. Add your credentials and rename the file to be `config.py` in your system.# Connect to Graph graph = Graph("bolt://localhost:7687", auth=(cfg.neo4j['auth']), bolt=True, password=cfg.neo4j['password']) graphCounting observationsgraph.run('MATCH (crt:TYPE {type:"schema:CodeRepository"})\ MATCH (crt)<-[:isType]-(ocr:OBJECT) \ RETURN COUNT(DISTINCT ocr)').to_data_frame()EDA for Github AnalysisCreating right graphs for GA project.We will analyze and graph the following:- Distribution of references to DBs <- ?? - Note 'Earth Science' databases within graph / Note particular 'Subjects' within graph. - X = DBs; y = of referenced repos - Linked repos (x) by commits (y) - Note 'ES' and 'Subjects' commits - Linked repos (x) by of contributors (y) - Linked repos (x) by of forks (y) Getting DataCatalogs and CodeRepos ![](img/01_graph.png) Getting MetaDatadata = graph.run('''MATCH (k:KEYWORD {keyword: "earth science"})\ MATCH (k)<-[:hasKeyword]-(a1:ANNOTATION)-[:Body]->(dc:dataCat)\ MATCH (dc)<-[:Target]-(a2:ANNOTATION)-[:Target]->(cr:codeRepo)\ RETURN distinct properties(dc), properties(cr)''').data()Metadata to DFmeta_df = utils.get_metadata_1(data = data)- Distribution of references to DBs <- ??ref = meta_df.copy() ref.head() ref = ref.groupby('dacat').agg({'dacat_name': 'max', 'forks':'mean', 'commits':'mean', 'contributors':'mean'}).reset_index() import plotly.io as pio @interact def histogram_plot(opt = ['commits', 'forks', 'contributors']): #df = meta_df df = ref[ref[opt] >= 1] df[opt] = np.log(df[opt]) title_str = opt.capitalize() + ' - References to DB' trace = go.Bar(x = df['dacat_name'], y = df[opt] ) # layout layout = go.Layout( title = title_str, # Graph title xaxis = dict(title = opt.capitalize() + ' - Datacatalog'), # x-axis label yaxis = dict(title = 'Count'), # y-axis label #hovermode ='closest' # handles multiple points landing on the same vertical ) # fig fig = go.Figure(trace, layout) fig.update_traces(marker_line_width = 0.5, selector=dict(type='histogram')) fig.update_traces(hovertemplate=None) fig.update_layout(hovermode='x unified') fig.show() #pio.write_html(fig, file='index.html', auto_open=True) meta_df.describe(include = 'all')Plotting by Data Catalog or Code Repo@interact(x=(0,500)) def show_dc_more_than(selection =['dacat','cr'], column=['forks', 'commits', 'contributors'], x = 1): meta_df if selection =='dacat': df = meta_df[['dacat_name', 'cr_item', 'forks', 'commits', 'contributors']] df = meta_df.groupby('dacat').agg({'dacat_name': 'max', 'cr_item' : 'count', 'forks' : 'sum', 'commits' : 'sum', 'contributors' : 'sum'}).reset_index() if selection =='cr': df = meta_df.groupby('cr_item').agg({'cr_name': 'max', 'dacat_name': 'max', 'forks' : 'sum', 'commits' : 'sum', 'contributors' : 'sum'}).reset_index() return df.loc[df[column] > x] dacat_list = meta_df['dacat_name'].unique().tolist() dacat_list.insert(0, 'All') @interact def histogram_plot(opt = ['commits', 'forks', 'contributors'], dacat = dacat_list): if dacat == 'All': df = meta_df else: df = meta_df[meta_df['dacat_name'] == dacat] # Plotting object df = df[df[opt] >= 1] df[opt] = np.log10(df[opt]) title_str = opt.capitalize() + ' - Repos with "Earth Sciences" as a Keyword' trace = go.Histogram(x = df[opt], nbinsx=40) mean = 10**(df[opt].mean()) median = 10**(df[opt].median()) per25 = 10**(np.percentile(df[opt], 25)) per75 = 10**(np.percentile(df[opt], 75)) # Layout layout = go.Layout( title = title_str, # Graph title xaxis = dict(title = opt.capitalize() + ' - Datacatalog: ' + dacat), # x-axis label yaxis = dict(title = 'Count'), # y-axis label # Adding stats lines shapes= [{'line': {'color': 'LightSeaGreen', 'dash': 'dot', 'width': 4}, 'type': 'line', 'x0': df[opt].mean(), 'x1': df[opt].mean(), 'xref': 'x', 'y0': -0.1, 'y1': 1, 'yref': 'paper'}, {'line': {'color': 'LightSeaGreen', 'dash': 'dot', 'width': 4}, 'type': 'line', 'x0': df[opt].median(), 'x1': df[opt].median(), 'xref': 'x', 'y0': -0.1, 'y1': 1, 'yref': 'paper'}, {'line': {'color': 'LightSeaGreen', 'dash': 'dot', 'width': 4}, 'type': 'line', 'x0': np.percentile(df[opt], 25), 'x1': np.percentile(df[opt], 25), 'xref': 'x', 'y0': -0.1, 'y1': 1, 'yref': 'paper'}, {'line': {'color': 'LightSeaGreen', 'dash': 'dot', 'width': 4}, 'type': 'line', 'x0': np.percentile(df[opt], 75), 'x1': np.percentile(df[opt], 75), 'xref': 'x', 'y0': -0.1, 'y1': 1, 'yref': 'paper'}], # Annotations regarding stats lines annotations=[ dict( x=df[opt].mean(), y=.95, xref='x', yref='paper', text="Mean = {:,.0f}".format(mean), showarrow=True, font=dict( family="Sans Serif, monospace", size=18, color="Black" ), arrowhead=8, ax=1, ay=1 ), dict( x=df[opt].median(), y=0.85, xref='x', yref='paper', text="Median = {:,.0f}".format(median), showarrow=True, font=dict( family="Sans Serif, monospace", size=18, color="Black" ), arrowhead=8, ax=1, ay=1 ), dict( x=np.percentile(df[opt], 25), y=0.75, xref='x', yref='paper', text="25per = {:,.0f}".format(per25), showarrow=True, font=dict( family="Sans Serif, monospace", size=15, color="Black" ), arrowhead=8, ax=1, ay=1 ), dict( x=np.percentile(df[opt], 75), y=0.75, xref='x', yref='paper', text="75per = {:,.0f}".format(per75), showarrow=True, font=dict( family="Sans Serif, monospace", size=15, color="Black" ), arrowhead=8, ax=1, ay=1 )]) # Figure fig = go.Figure(trace, layout) fig.update_traces(marker_line_width = 0.5, selector=dict(type='histogram')) fig.update_traces(hovertemplate=None) fig.update_layout(hovermode='x unified') # Changing Ticks a = list(set(list(np.log10(meta_df[opt])))) a = int(len(a)/13) range_list = sorted(list(meta_df[opt])) range_list_skipped = range_list[0:len(list(meta_df[opt])):a*2] range_list_skipped.append(range_list[-1]) range_list_skipped = sorted(list(set(range_list_skipped))) log_list = sorted(list(np.log10(meta_df[opt]))) log_list_skipped = log_list[0:len(list(np.log10(meta_df[opt]))):a*2] log_list_skipped.append(log_list[-1]) log_list_skipped = sorted(list(set(log_list_skipped))) fig.update_layout( xaxis = dict( tickmode = 'array', tickvals = log_list_skipped, ticktext = range_list_skipped ) ) # Changing Ticks a = list(set(list(np.log10(meta_df[opt])))) a = int(len(a)/13) range_list = sorted(list(meta_df[opt])) range_list_skipped = range_list[0:len(list(meta_df[opt])):a*2] range_list_skipped.append(range_list[-1]) range_list_skipped = sorted(list(set(range_list_skipped))) log_list = sorted(list(np.log10(meta_df[opt]))) log_list_skipped = log_list[0:len(list(np.log10(meta_df[opt]))):a*2] log_list_skipped.append(log_list[-1]) log_list_skipped = sorted(list(set(log_list_skipped))) fig.update_layout( xaxis = dict( tickmode = 'array', tickvals = [0, .5, 1, 1.5, 2, 2.5, 3, 4], ticktext = [1, 3.15, 10, 31.5, 100, 316, 1000, 10000] ) ) fig.show() meta_df[(meta_df['contributors'] == 7) | (meta_df['contributors'] == 8)] a = int(len(list(sorted(list(np.log(meta_df['commits'])))))/10) a a = int(len(list(sorted(list(np.log(meta_df['commits'])))))/10) range_list = sorted(list(meta_df['commits'])) range_list_skipped = range_list[0:len(list(meta_df['commits'])):a] range_list_skipped.append(range_list[-1]) range_list_skipped = sorted(list(set(range_list))) len(range_list_skipped)Analysis checking for Subjectsubject_data = graph.run('''MATCH (s:SUBJECT)\ WHERE s.id IN [313, 314, 315, 317]\ MATCH (s)<-[:hasSubject]-(a:ANNOTATION)-[]->(dc:dataCat)\ MATCH (dc)<-[:Target]-(:ANNOTATION)-[:Target]->(cr:codeRepo)\ RETURN distinct properties(dc), properties(cr), s.id''').data() subject_df = utils.create_df_subject(subject_data = subject_data) subject_df.head(2) subject_df.to_csv('geo313_317_data_throughput.csv') subject_df.describe(include='all') df_for_dacat = subject_df.groupby('dacat_name').count().reset_index() df_for_dacat = df_for_dacat[df_for_dacat['dacat'] > 100] dacat_list = df_for_dacat['dacat_name'].unique().tolist() dacat_list.insert(0, 'All') @interact def histogram_plot(subject = list(set(subject_df['subject_str'])), option = ['commits', 'forks', 'contributors'], dacat = dacat_list): if dacat == 'All': df = subject_df else: df = subject_df[subject_df['dacat_name'] == dacat] df = df[df['subject_str'] == subject] df = df[df[option] >= 1] df[option] = np.log10(df[option]) title_str = option.capitalize() + ' - Repos with " '+ subject + '" as a Subject' trace = go.Histogram(x = df[option], nbinsx=50, marker_color = 'white') # Stats mean = 10**(df[option].mean()) median = 10**(df[option].median()) per25 = 10**(np.percentile(df[option], 25)) per75 = 10**(np.percentile(df[option], 75)) # layout layout = go.Layout( title = title_str, # Graph title xaxis = dict(title = option.capitalize() + '- Datacatalog: ' + dacat), # x-axis label yaxis = dict(title = 'Count'), # y-axis label # Adding stats lines shapes= [{'line': {'color': 'Black', 'dash': 'dashdot', 'width': 1}, 'type': 'line', 'x0': df[option].mean(), 'x1': df[option].mean(), 'xref': 'x', 'y0': -0.1, 'y1': 1, 'yref': 'paper'}, {'line': {'color': 'Black', 'dash': 'dashdot', 'width': 1}, 'type': 'line', 'x0': df[option].median(), 'x1': df[option].median(), 'xref': 'x', 'y0': -0.1, 'y1': 1, 'yref': 'paper'}, {'line': {'color': 'Black', 'dash': 'dashdot', 'width': 1}, 'type': 'line', 'x0': np.percentile(df[option], 25), 'x1': np.percentile(df[option], 25), 'xref': 'x', 'y0': -0.1, 'y1': 1, 'yref': 'paper'}, {'line': {'color': 'Black', 'dash': 'dashdot', 'width': 1}, 'type': 'line', 'x0': np.percentile(df[option], 75), 'x1': np.percentile(df[option], 75), 'xref': 'x', 'y0': -0.1, 'y1': 1, 'yref': 'paper'}], # Annotations regarding stats lines annotations=[ dict( x=df[option].mean()+1.5, y=.95, xref='x', yref='paper', text="Mean = {:,.0f}".format(mean), showarrow=True, font=dict( family="Times New Roman", size=14, color="Black" ), arrowhead=8, ax=1, ay=1 ), dict( x=df[option].mean()+1.5, y=0.87, xref='x', yref='paper', text="Median = {:,.0f}".format(median), showarrow=True, font=dict( family="Times New Roman", size=14, color="Black" ), arrowhead=8, ax=1, ay=1 ), dict( x=df[option].mean()+1.5, y=0.79, xref='x', yref='paper', text="25 percentile = {:,.0f}".format(per25), showarrow=True, font=dict( family="Times New Roman", size=14, color="Black" ), arrowhead=8, ax=1, ay=1 ), dict( x=df[option].mean()+1.5, y=0.71, xref='x', yref='paper', text="75 percentile = {:,.0f}".format(per75), showarrow=True, font=dict( family="Times New Roman", size=14, color="Black" ), arrowhead=8, ax=1, ay=1 )] ) # fig fig = go.Figure(trace, layout) fig.update_traces(marker_line_width = 1, selector=dict(type='histogram')) fig.update_traces(hovertemplate=None) fig.update_layout(hovermode='x unified') # Changing Ticks ticks_vals_list = None ticks_vals_list = [0, .5, 1, 1.5, 2, 2.5, 3, 3.5, 4] log_list = sorted(list(set(np.log10(df[option])))) #ticks_vals_list.append(log_list[-2]) ticks_vals_list.append(log_list[-3]) # Getting the text list fig.update_layout( xaxis = dict( tickmode = 'array', tickvals = [0, .5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 5.1, 5.7], ticktext = ['1', '3.15', '10', '31.5', '100', '316', '1,000', '3,162', '10,000', '125,892', '501,187'], # xaxis = list(rangemode = 'tozero') ), template='none', xaxis_showgrid=False, yaxis_showgrid=False ) fig.update_yaxes(zeroline=True,rangemode = 'tozero'), fig.update_xaxes( zeroline=True) file_name = 'GeoGraphs_'+option.capitalize()+'.svg' fig.write_image(file_name) fig.show() #pio.write_html(fig, file='Geo_Graphs.html', auto_open=True)![img2](img/subject_graph.png) Green: Data CatNavy blue: subjectPink: Code RepoLigh Blue: Annotation All Data Without Subjects#all_data = graph.run('''MATCH (s:SUBJECT)<-[:hasSubject]-(a:ANNOTATION)-[]->(dc:dataCat)\ #MATCH (dc)<-[:Target]-(:ANNOTATION)-[:Target]->(cr:codeRepo)\ #RETURN distinct properties(dc), properties(cr), s.id''').data() #all_data = utils.create_all_df(all_data) #all_data.to_csv('all_throughput_data_w_subject.csv') all_data = pd.read_csv('output_data/all_throughput_data_w_subject.csv', index_col = 0) all_data.head(3) all_df = all_data.copy() def find_geology(x): if x == 314: return 'Geology_Palaeontology' else: return 'Other' all_df['Geology_Palaeontology'] = all_df['subject'].apply(lambda x: find_geology(x)) all_df.head(2) all_df[all_df['Geology_Palaeontology'] == 'Geology_Palaeontology'] all_df.head() all_df_dist = all_df.groupby(['subject', 'dacat'])['cr_item'].count().reset_index() all_df_dist.head(3) geo_sel = all_df_dist[all_df_dist['subject'] == 314] other_sel = all_df_dist[all_df_dist['subject'] != 314] fig = go.Figure() fig.add_trace(go.Bar(x = geo_sel['dacat'], y = geo_sel['cr_item'], name = 'Geosci', opacity=1.0, marker_color = 'black')) fig.add_trace(go.Bar(x = other_sel['dacat'], y = other_sel['cr_item'], name = 'Other', opacity=.7, marker_color = 'silver')) fig.update_layout( title = 'Count of Repos per Data Catalog', # Graph title hovermode ='closest', # handles multiple points landing on the same vertical template='none', xaxis_showgrid=False, yaxis_showgrid=False ) fig.update_xaxes(title = 'Data Catalogs', categoryorder = 'total descending',showticklabels=False, linecolor='black', zeroline=True,) fig.show() #pio.write_html(fig, file='Count_Repos_Catalog.html', auto_open=True) file_name = 'Count_Repos_Catalog.svg' fig.write_image(file_name) import plotly.express as px df_for_dacat = all_df.groupby('dacat_name').count().reset_index() df_for_dacat = df_for_dacat[df_for_dacat['dacat'] > 10] dacat_list = df_for_dacat['dacat_name'].unique().tolist() dacat_list.insert(0, 'All') @interact def histogram_plot(opt = ['commits', 'forks', 'contributors'], dacat = dacat_list): if dacat == 'All': df = all_df else: df = all_df[all_df['dacat_name'] == dacat] df = df[df[opt] >= 1] df[opt] = np.log10(df[opt]) title_str = opt.capitalize() + '- All Repos within Throughput.' first_sel = df[df['Geology_Palaeontology'] == 'Geology_Palaeontology'] second_sel = df[df['Geology_Palaeontology'] == 'Other'] fig = go.Figure() fig.add_trace(go.Histogram(histfunc="sum", name = 'Other', x = second_sel[opt], nbinsx=50, opacity=0.8)) fig.add_trace(go.Histogram(histfunc="sum", name = 'Geology_Palaeontology', x = first_sel[opt], nbinsx=50, opacity=0.8)) # layout fig.update_layout( title = title_str, # Graph title xaxis = dict(title = opt.capitalize() + '- Datacatalog: ' + dacat), # x-axis label yaxis = dict(title = 'Count'), # y-axis label hovermode ='closest', # handles multiple points landing on the same vertical ) #fig.update_layout(barmode='stack') fig.update_traces(marker_line_width = 0.5, selector=dict(type='histogram')) #fig.update_yaxes(type="log") # Changing Ticks fig.update_layout( xaxis = dict( tickmode = 'array', tickvals = [0, .5, 1, 1.5, 2, 2.5, 3, 4], ticktext = [1, 3.15, 10, 31.5, 100, 316, 1000, 10000] ) ) fig.show() #pio.write_html(fig, file='Geo_Graphs.html', auto_open=True) all_df.head(3) grouped_all_df = all_df.groupby(['subject', 'dacat_name']).agg({'forks':'mean', 'commits':'mean', 'contributors':'mean'}).reset_index() grouped_all_df.sort_values(by='commits') @interact def histogram_plot(opt = ['commits', 'forks', 'contributors']): df = grouped_all_df.copy() df = df[df[opt] >= 1] title_str = opt.capitalize() + '- All Repos by DaCat within Throughput.' fig = go.Figure() # Data for Geo geo_sel = df[df['subject'] == 314] geo_sel = geo_sel.sort_values(by = 'dacat_name').reset_index() geo_sel = geo_sel.drop_duplicates(subset = ['dacat_name']) list_314 = geo_sel['dacat_name'].to_list() # Data for Other other_sel = df[df['subject'] != 314] other_sel = other_sel[other_sel['dacat_name'].isin(list_314) == False] other_sel = other_sel.sort_values(by = 'dacat_name').reset_index() other_sel = other_sel.drop_duplicates(subset = ['dacat_name']) # Trace for Other fig.add_trace(go.Histogram(histfunc = 'avg', x = other_sel['dacat_name'], y = other_sel[opt], name = 'Other', opacity=1.0, marker_color = 'silver')) # Trace for Geo fig.add_trace(go.Histogram(histfunc = 'avg', x = geo_sel['dacat_name'], y = geo_sel[opt], name = 'Geology_Palaeontology', opacity=1.0,marker_color = 'black' )) # layout fig.update_layout( title = title_str, # Graph title hovermode ='closest', # handles multiple points landing on the same vertical template='none', xaxis_showgrid=False, yaxis_showgrid=False ) fig.update_xaxes(title = 'Data Repositories', categoryorder = 'total descending',showticklabels=False, linecolor='black', zeroline=True,) if opt == 'commits': fig.update_yaxes(title = opt.capitalize(), linecolor='black', zeroline=True, range=(0,30000)) elif opt == 'forks': fig.update_yaxes(title = opt.capitalize(), linecolor='black', zeroline=True, range=(0,500)) else: fig.update_yaxes(title = opt.capitalize(), linecolor='black', zeroline=True) file_name = 'ThroughputGraphs_'+opt.capitalize()+'.svg' fig.write_image(file_name) fig.show() #pio.write_html(fig, file='All_Graphs.html', auto_open=True)There are two types of Dimensionality Reduction techniques: Feature Selection Feature Extraction Feature Selection techniques are Backward Elimination, Forward Selection, Bidirectional Elimination, Score Comparison and more. We covered these techniques in Part 2 - Regression. In this part we will cover the following Feature Extraction techniques: 1. Principal Component Analysis (PCA) 2. Linear Discriminant Analysis (LDA) 3. Kernel PCA Principal Component Analysis (PCA) Principal component analysis (PCA) is the process of computing the principal components and using them to perform a change of basis on the data, sometimes using only the first few principal components and ignoring the rest. PCA is used in exploratory data analysis and for making predictive models. It is commonly used for dimensionality reduction by projecting each data point onto only the first few principal components to obtain lower-dimensional data while preserving as much of the data's variation as possible. The first principal component can equivalently be defined as a direction that maximizes the variance of the projected data. The principal component can be taken as a direction orthogonal to the first i-1 principal components that maximizes the variance of the projected data. The principal components of a collection of points in a real p-space are a sequence of direction vectors, where the vector is the direction of a line that best fits the data while being orthogonal to the first i-1 vectors. Here, a best-fitting line is defined as one that minimizes the average squared distance from the points to the line. These directions constitute an orthonormal basis in which different individual dimensions of the data are linearly uncorrelated. ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAATcAAAC+CAYAAABQ6kiWAAAgAElEQVR4Ae3dCby1VVU/cCsjFSEUHBIhBJRRTYwSFUQJUChCGbQQRMUJGQpFkDkRlCFJmUQFAUlQcMARBVFELVNMzTS1MKlszuZBq6fPd///63z23e855577nnPf+9zzrv35nPuc+zz72cNv7/3ba629z1736jIkAolAIjCHCNxrDuuUVUoEEoFEoEtyy06QCCQCc4lAkttcNmtWKhFIBJLcsg8kAonAXCKQ5DaXzZqVSgQSgSS37AOJQCIwlwgkuc1ls2alEoFEIMkt+0AikAjMJQJJbnPZrFmpRCARSHLLPpAIJAJziUCS21w2a1YqEUgEktyyDyQCicBcIpDkNpfNmpVKBBKBJLfsA4lAIjCXCCS5zWWzZqUSgUQgyS37QCKQCMwlAkluc9msWalEIBFIcss+kAgkAnOJQJLbXDZrVioRSASS3LIPJAKJwFwikOQ2l82alUoEEoEkt+wDiUAiMJcIrCpy+9///d+5bISVqlTiuVLIZ77rAoHekNvf/u3fdqeeemr3ghe8oLv99ttH1v3aa6/tXvKSl3Rf/vKXR8YZ9eBf/uVfujPOOKN7wxve0P3Xf/3XqGjLdl+eF110UfeUpzyle/azn9394z/+Y/eRj3yk1OdLX/pSyffv//7vu1e84hXdW9/61u4HP/jBspUlEn7f+97XvfSlL+3+4A/+IG7lNRGYCwR6Q25/8id/0m222Wbdve51r26rrbbq/D8s/Oqv/mqJc9NNNw17PPbeX/3VX3UPechDusc//vHdv/7rv46NuxwPr7jiilL2BzzgAd0RRxzR/fu//3t3+umnl3vvfOc7S5bf+c53ug022KD7xV/8xe7f/u3fyr077rijO/jgg7sPfehDMy/W8ccfX/JHchkSgXlCoDfkZlBvvfXWZaAhuF/7tV/r/vu//3sNrI866qjuR37kR7qbb755jWeL3fiP//iPDimSln74wx8uFn3mz4855phSv3e9612DtL/yla90V111Vaf+wne/+93uQQ96UHfooYcOyO3KK68s773+9a8fvDerL1/84hdL/n/2Z382qyQznUSgFwj0ity23HLLbrvttiuD+/73v3935513rgHSYuT2T//0Tx0Vd23Iy7tUxUkCyW+pauOxxx7b/eiP/mhHEhsVhpEbqQ7hX3zxxaNeK1LgUqRRUuH//M//jEyvfkDC9Fks/Od//udiUfJ5IrDOEFgyuS1XBya5bL755t0BBxzQve1tbysk8OQnP7n7m7/5mwVgjCI3ktzee+/dbbzxxt2P/diPFdX2la98ZVdLJIjL+yeeeGIX9TDA2fGe+MQndgj1Pve5T7fDDjt0r3nNa7rvf//7C/L2P+lpl1126TbZZJMOGR922GHd7/7u73bjjPN//ud/3h1++OHdwx/+8FK2xz72sd2LXvSiohrfcMMN3dOf/vTuM5/5TMkryI1NDlmddNJJ3SMf+chCbj/90z/d7bvvvt03vvGNQblIoTD7qZ/6qW7TTTftYMZeVxMdFf8Zz3hGsTXecsst3ROe8ITuSU96UscG+fa3v73k/3u/93slTXHlgUg//OEPl2cPfvCDu4c+9KHd/vvv30W8KADpGn577rlnt8UWW3Q/8zM/01144YXl3Wc+85ndxz72sYia10RgnSIwMbmRahjjGcMZ9P/0T/90pgUNcttrr73KwEQapBXG/zoMI7frr7+++/Ef//EyuBGXd572tKeV93/hF35hQJB//dd/XQjJ4I7B713S1M4779ydc845hbx+/ud/vrwrrZDOkCFVUZl+9md/tix+ICAq8gMf+MDuE5/4RF3MBd//4i/+ojvyyCPL4Ee8yJERn5p81llnlTRDVa3JjXR19tlnd8hQvttvv32xvX3rW98q6ZPofuInfqL7yZ/8yZLeCSecUEhdXN+DwKm+97vf/UodkTKbHxueBQ4TgPgf+MAHSpri3ve+9y0mAun+3M/9XHf00UcXQoy6/93f/d2gfhdccEF5n730kEMOKSRqkkDE4o+TNgeJ5JdEYBkQmJjcSBA6a3wOPPDAMvPPqkw1uRn0Vu8Y/x/2sId1X//61wfZtOT2ve99r0g24rEf1SFsXOeff365TQpEEE996lOLPYvUts8++5SBWC9gID6LDiSWIJJLL7201P1lL3vZwBYm0fe///2FYEg7//zP/1xnv8Z37yKOkNJEOPfcc0u673nPe0r8mtxCFbzuuutKnDe+8Y2DNMWz8EKqu+uuuwb3ETgpigQahAtLUrG2s5Bxzz33DFTSU045pUiTJEAhcBf3ta997WBVGdHCykTw0Y9+tMT9/d///UKUJoa6jeQLO2m8+c1vHpQtvyQC6xKBicjNYN9tt91KZw1yY/SuO/S0ha7JjZQovO51ryt5Pv/5zx/Y0Fpyo1Ypk20krWoYgy/IrCa3sCORwkgoMbijHraaWJ2kyhrYpL1tt922q6WWiPvc5z63qKmf//zn49bQK4kX6XzqU58aPJ+E3K655ppSR9tIoo6ITr0tRrRBXTxDXALCQjY77bRTR0UWIh1xEFbUX1zSHem1reuZZ55Z0kX0AvKTzzDpjH3RsyS3AlX+WQEEJiI35QqVTIf1YVuxtWJWYRi5UQlJRPIjvQgtub385S8vqmEMzro8JCmDlMr07W9/u6inteQmbpCEPEg8BjA7Uah04nzta18rEsojHvGIjhTonfgYvOxZtURTl6H+jtyokZ/85CcHt9eW3KiVVFzSaZTF9ZJLLulOPvnk8sykgMQQFgJ/4QtfOCC1KAByo1qzrwniUiuZBdoFh/POO6+0xZve9KYSlxrKxkmVbQNbIkxtf8mQCKwEAhOTGymI0Z1atc022wwGw6wKPYzcpG3FlL3oMY95TPcP//APa5Abe5DBWKt6USYSFwO7gc0IX0tunkUwEJGodAxIH1KahQ2BustYT+pClO2HSmxxYdzmY+nMktye9axnDex9bXlIaVR62DD4B7khu5awRpGbBZB2xbklN6TOfvfVr351DdK88cYbk9yig+V1RRCYmNyUjor2hS98YaYSW9R6FLl5zrCPcKipL37xi8ugjn1uDPP3vve9B/alSM/Vbv9HP/rRxS7FqF+TWywoRHwkwL7G9oUUEMaGG25Ytm1QUX1HlOxV0q0/QQItcUTacZ0luf3yL/9ykQJJXHVZqJJB3FGetSE3trl2n2FLbuyuyG2YeSLMBSm5RevndV0jsCRyW87CjSM36iWbn4FEoqo38bJDBfG15bv11luLeuZXDUJNbhYtEBWyNBCDoCKN+DWBlVeEYXsItbReeIi4bFAI8e67745bQ69rS262WqhjbXOLXzYMs3ex/SGn+BXHcpFbTDrx64q60raBKHOSW41Kfl+XCKwKcgMISY30ZMDU5PbNb36zqGB+3VD/3tQ2BzYh8WOQt+T2l3/5l2XDMMJst7acdtpp5d3YokF9k9arX/3qBaodyYnkOMwA3zbkUsiNjTNWS5VB3vbeRbBCCg+2T/bECIjYdhrxY3vHcpHbbbfdViYPK8v13rvLL7+8mC+UIcktWiav6xqB3pAbiWijjTYqJDHqVwLPe97zyqA1aIKwAMbA7R67mO0WiOhxj3tcuUeCsVlVsE3Clohdd911sM8tVmRtqfAe1cvvOqVngcE7AmL0f9wXNxZZ5LuYvU0ayuJ9EmUEewfdY/cTSLDtb0t/53d+pyxYuK8Mf/iHf1jihprI5mdhwU/WYn8ZqYp0KiB9ediXF6pqedB1ZT+cZ6HmR1wLFq00+xu/8RslHXvbIsQkYIV19913L78wYZe1uizdXC0NpPK6rhHoDbkhDwPU6mFILC0YoUZayWPkj+0M4pGgfumXfqlsqLWCR421clivelJvDXqbdev7tlMgDQsXVj39BMy+Pj/jEiIfdjrv2lRLaiItksZIj5MEvxFF0EFO3kEq6hM7/0leCJq6GSeXIKTLLruso+ohndh7533bVZCxjcTUdr/SYMyvg19p+EUEKSrqEs9JhbayhNQLYyezkL5aIiQJyr/91YHVX6vYfmnxnOc8p/vsZz9bVreR29VXXx1Z5TURWKcI9Ibc1rbW7WBFSIhybQI11eAOUhmVBolG3FEkPOy9tpzitPfa/4elM+oeUvRZrjCsbLC2DaSeKCJ/EqlfjVBdMyQCK4HAqie3lQAt8/x/CLzjHe8oquev//qvL4DE/kcbhh/1qEeVyWLBw/wnEVhHCCS5rSOg5zEb0mv87tW+O4cKUPvD7veWt7xlDdV2HnHIOvUTgVVPbsPUpXFQLzX+uLRm/Wxc2cY9m3U5FkuvLov9gyQ3CzIWEpxOwvbW2uUWSzOfJwKzRmDVk9usAcn0JkegJrnYN9geHlDHmTzljJkITI9Aktv0GGYKiUAi0EMEktx62ChZpEQgEZgegSS36THMFBKBRKCHCCS59bBRskiJQCIwPQJJbtNjmCkkAolADxFIcutho2SREoFEYHoEktymxzBTSAQSgR4ikOTWw0bJIiUCicD0CCS5TY9hppAIJAI9RKAX5OY4aydLtOeHDcPLMTwR13dOZNqjeYa9t67uKU9dD3VzbyV26q9k3usK78wnERiFQC/IzeGUzjTjo4D39lGBgxgHRIrHYQ2foZwGh8/PUe+tq/uOWnKmHL8O4aPBmXIOcYzz2parLAi0JlFk+qpXvarjlHrS8+aWq2yZbiKwEgj0gtxUPBwo+xH2KEnsfe97Xzlix3E6fJuGnwMHOfYh8AnqRFq+UMP3Kjd7vGbVp+/OuqyIdP/99y8HVYa7ReTGgYvyfOlLX5p1lpleItB7BHpDbk5z5dPTefxxAm6LHh+lTnd1xLdAknOqLe9PfQhOyHAqxpOe9KQBuTmmnH+BkOSWo5yOUXesOic2yhDBCbyktjhuPO7nNRFYHxDoDbkZoJystE6LoxEQmAFcP2fb4sYubFy1XYtTFMfu8ATVDm62KPm19+XlFF7PIs3I3xXpOkKbv4RhXrCGkRtVURnlWacvj/bjRI0oU10XJIX8P/3pT3ff+9736iKVdJ2rxlEMckNmcTKuujgtOCThOk0OceDjpNyQ9uqEA6Mot4mEyYA5YBg29bv5PRHoAwJLIjcDhQOTULlmXQGOR0hmxx9//GBARh7vfe97yzM2pPDL6SRYzofrc/rvuOOOgXMSaflsscUWRYWN48P5X3j4wx9ezv2Pe5HPhRde2HFqXJ9HhjzY0fgoiDR54ELG1M0gjWHkdvbZZ5fDGxGT4EBHTqLrD9Ux/KQ67DECZ8cHHHBAUWsjXyoun6XhK/SjH/1ox0EMD1w80LuecMIJBT++EPiDQPQRtB9Xh9KJNDmjZhZQzwjIFLbKf+qpp5byRfztt9++u+WWWwb1jnfymgj0CYGJyY2kwhGJQcimFIN1lpUhFSAQjpTD61Skf+yxx5bB+Fu/9VtxqzgxMeAY7QVSjcH80Ic+tBOPQxNOWXirN+gRgUAC4UmK67+W3MLD0wc/+MES15/I+1d+5VeK8xULGVz8IRP5sbUJNbnFuWavfOUrS7yPf/zjJY4yHHfccYXAXdkYOVfh+Utdzj///BKPGvvUpz61uDGUhjx5/OI1XjwLKyQoZKWuPN5TifkuQEykNcSIuLgBFCzcPO1pTyvvI2s2TM5knKIrTY5iQn1G7qRkxMv5jPpypBP577LLLkMlvpJR/kkEeoDAxOR25JFHlgFgEPhw3TbKNra29TJY99tvv0IGteREJaJ2PehBD1rgOYrbOGWJBYWQ7jgsroPBThoLv5/IjeSCIFpyE4cHLF6lBE5X+AJA6EFYkTZicvrsZz7zmXJrGLmRxBDpuAUF0pF68Cz1/e9/v6TF8Yr3wqF05EnNtULM8xZ1VYCb8pkUYCWQJkl4JMRYUOA8Wj4k45A2xZWmBQnSKBIVkLH8kdsnPvGJci/+7LPPPoWMqegZEoG+IjARubENUcEMjPiQsGoXdbOq4Bvf+MaSh20MMQARA8nroIMOGtiT5BfkZtAKsZpq8Ie0EuUitYQ6PQm5heTGBoYca8fH0lQ2UgxyCyl2bciNt3uYIm/ScdQZySGYdgKhOpJEqdXf/e53S/XUiyRlFTmkSOm05BaSHD+obXjb295WymF1VzC5KJdJrbWxUXu1xzjCbtPP/xOBdY3AROSmULFVI8jN7D3KefI0lTDA2ZB23nnngfE81MLrr79+QdItuVGpSDpRRpLMi1/84u6d73znApJYjNxIMEFukaGFibPOOqujmlrYYI+ST01uiCVWS0PKGye5kQ5JkFTKr33ta5HVgutHPvKR4kPVto4dd9yxSGLyZUeclNz4JCWBwmPUajTpk6T2whe+sOQf5MbRcxte8YpXrCFdt3Hy/0RgpRGYmNxsUGWnMcAMtOWQ2gIMBMKexWhNzSKRbLvttp0Vvjq05OaZRY/f/u3fLt7VwwsTMuDAJOxe48iNzQ25hVpKbSWpSIO0wsam/hYeqLVrS25WNbfZZptCbgisDSQ0kpd8OYsmrSHu3/zN3+wY9GvJDZGOk9youLBDoraphOpb5wkTdSGNCkFurQrrGXKDRW06qNPK74lAHxCYmNwUlpHaQGrVlFlXBDkZ1GxnBr7vjO6tfawlN8/rOBYlPvWpTxWv8AiLN3bP2YoYy3lHr+OrBzua/ILc/PqBc2HvkoBIh6E6Up3XhtyokRYL5IOsYrtFjeNrX/va8pwkdffddw8cQGuDJz7xiQvIbTG11EINQqP6koiHbf1QT+VRfyHJrW6N/L4aEVgSua2rCoa0xiemFdpRUkJLbmeeeWaRhobZlKhj9oEZ5LZGWAHkgq72Gi9fElBNbmHst1JZB+/tscceC8htEpubPWjPfvazSx62WAwLyM7CCokNMdWB7W+rrbZaQG7MA1RlEq4yCMNsbsgcyVtFbsOLXvSiUqYbbrihPEpyaxHK/1cbAr0kNyCSINiAEA3CaQ3r4rTkdu2115b41OdYNRTPap9VQ7/7tEeO9IkMNt544+7d7353ITyk8JKXvKSslNbkFnkwrEea4vq1hFVVUp2NsMIkNje/rpA+siVVIq/42H9HShOOPvroEo+aHNIlUwBC9j7b3h//8R+XuBY9/N528803L2ki8GHkxo7oXYtDVNUIiBuRus/8IAS5sbmFpBrxUy0NJPLaZwR6S27sbQaij31ew1Q3+7s8p9oJSIuk5x77kj1dtkj4n6QWJCSud9z3QRSIlB3Llgj3bCsRENmuu+5a7jHiI9oNN9ywkOMRRxxR7tuiYqX2nnvuKTY0hvtYmY2fjNnfhhwf8YhHlHci7/aq/AIbmP16nrOxSZME63nU0b2wfSL0SAshwSv2tMWP9pFUkCt12qoyNdV7tsrUWz6o5e6T6Fpyi7xCdS8Fzj+JQM8Q6C25kVYQghXS9idHgSHJxfN6mwYp5vLLLy8bdP2agaTDNvatb30rXhtckZ2VWGriaaedVlYfEdS73vWuQmoxqEmN55xzTtmK8sxnPrNsGkZe7F9IklSHQKiq9olJN+ySpDLpsXOpE9JWZuqfa/257rrrij0wiNyvEEhJFhYssoTKKC2baq1gx143P0+zjcYCAKlL2e+8886y8Tckzqg4EiOlPuMZzygbfdn3Ip2IY0HDKnPskYv7rlGn+hcN9fP8ngj0AYHektti4ATxLBZvJZ9PU8Zp3p22zuPyHvds2nzz/URglgisWnKbJQiZViKQCMwfAklu89emWaNEIBHoui7JLbtBIpAIzCUCSW5z2axZqUQgEUhyyz6QCCQCc4lAkttcNmtWKhFIBJLcsg8kAonAXCKQ5DaXzZqVSgQSgSS37AOJQCIwlwgkuc1ls2alEoFEIMkt+0AikAjMJQJJbnPZrFmpRCARSHLLPpAIJAJziUBvyM1pE47yceDjsI8jeXycr8YV3foaHIdU4wSTUXjFmXLzgJVjm9TTScYZEoFJEOgNuXEf6HBFhyY6SJGPg/rjMEmn3jpYMg5onKSCaxOHV6lvfOMbgxNw1yaN5XqHFys4OSCT5yy4DPs4aNI5bUsJJhjex5yPF+fRLeX9WcWFv/P36jJwYM0hePiInVVemc78ItAbciNlOFkWoTlE0eGM9YfnJ/9z1dcerDjL5nEApePIkQcPVX0LjgGHkwnAIZaBS42V73y8xmnCk9bBYZtO5+WLgXS4EoFkqv05wq4Pw+RWkYObL3zhCytRrMxzFSLQO3JztHb4BlgpPJEGKSH8gq5UOYbl61Rgbv44iQl/B8Pire29OHqcJL1SgXMcLhRrtZopwinLJp8MicAkCPSK3JznTy0d5aB4VIV4s3KUt2O6Ha3dhjg91gDhJV28d7zjHcULVO3DkxepO+64o9ttt92K85irr756UBZHfvM87zjvOkiTq0BHb4caxUeouCQh6XNCw/lLHdiPSFbcGHJfyHHNJAGhITc+WSchX+kqy+233158TNR5OPacpzBlc7y6I9CRirRvvPHGwSRDRVdHQZ6OH3cvcCVtST9wddR669An4kpDXB7u3/KWt3Qf/vCHB3Y0WHGaw2cFfxbKoC94l7TOqQ3XikKkJ29tGuk5mr61yfKtId3Ay5HwjndXD30nw3wisCRy45nJQNQhonPNChaz9FLJjV9SToQ5cA7nKK7IiYOVuoxsNdQazzlaifg8xyMYATFusskmxf1dPD/44IPLM6qeewigDspAheXsOCSNcOz8ute9rqiQ3gtP7gzibGG8cUUeriQxg81gHReWSm4wCLd9sCL9RAhn0xzwICa2O67/fJSJ/wZB3U06F1xwQbF5esbloYAYw4GOdoh3OeS58sorB4QvLjvavvvuu6De0tIuSAwJ80gWaXgmb3VgjmBzRVIRqKjaWrz6HZ7NatuceN597nOfW9Jr+8u55567oJyRfl5XNwITk5tOxeu7jrTRRht1pJqaPKaFIWxum266aXGwQpKoP2ZujmJCOpK3zqo8nJ3owKSr888/v3RkRBke6hHGnnvuWexUnMeYwX3UwWBiw0JSymBQcHqMfLgKJKEIPF1x5Rde66O+yIaUwclykJvBwtjPfsiFHzILiTI8dnFew1GLAU/qJC3xqkVyHBeC3OT5+c9/fuhKKdzUJ9qH/SyInRQpqIfyuQ9XEinHNiQ3HrpIlRYXhOc973mlbNz/cU594YUXFgfV0qdCmizU6zvf+U5pM1KRSUOdwlUhUg+vXUgTmVm4OOmkk0obctKjnHDiYlD9OIoO/LWx8gY+FlZgi5Df8IY3lLLK33flYTf0v3DXXXd1+hUC5EnMJEIK5LYRCffVvloKn3/WGoGJyI2dIzomMvFh8J2lYR8xcMKsAyIGA73+MKAbeLaCCAanGdigaLcH8GSljFdccUWJayCwJb3sZS9bQzJikNfxa7XRggKbW237G0Vu1K9h5CZ/g7Q2isMLcSBeamkduAb0Dn+lQUr18/iO3BASooUJwmk/npGQalUXURj0VNovf/nL3e67717wqx00U+e4QkQMtfrNu5eyHXLIIV1ti0OKXB3CJnyrRjm9Y4JALEK4CjzwwAMXtJc84QTv8FAWCwq1atuS22WXXVbKdPLJJ0eWg2sQJsIVtC2JnPTZevMKqbZ1uj1ILL+sWgQmIjeDBDno4PEx48WsOovaIzcDz0zMkzuJoP7o8CQIRCVw16csoVLWZSARGchHHXVUGXSkvbDViIc8ECLJR546fvj2DCI32OrV0qWQGzeAynbVVVfVxSrSkPsnnnhiMYwrRxAZokDeyAKBjQqemQQQ21577TXwYxr+TF332Wef4vqvVkGlh9zlv80225SrcoQk7Ll2Jg3tsMMOC1ZL4U6tY88Soswk4prsPENyCER/qcmNk215v/Wtby1p1H9I3chX3ZQHMcdqaeRFLa0lN0QLA5JcxIk02fRMfC94wQvKLeQmrndae9wZZ5xRysVml2G+EJiI3FSZuqdzxocNqR0800CD3Eg0VktDnRyXnq0OpLwHPOABZUY2K8eHJKacBn+QGpVHHaiPVCYDj4QTacya3KTLw3sd5G/QUeupQu1HmRFtqHL1u/EdAcSCQuuPNOKMulowQV7yMYEwtNdhFLkhduUKKawmE9IV363UbG0nXuBK5Y93SHLI6dOf/nSd5RrfFyM37yu3ScCnlu4iMZKpfoHkkS1yow0MczBtiwk8ktwCvfm5TkxuOh01jxpHMqnVllnAEeSGoCZZLaXekPIOP/zwjof1+sNQ7sOgTRKjyj7hCU8onZhKR4K56KKLin2NdGggrC25kbhsLK5tbiG53XzzzQugYYRHegYd58l1mUk2DPvsc+NIK8htyy23HKz+LchkzD9f/epXix3MYH7kIx85UAPjlXHkZjIgYSG2IDcq91Oe8pSCq4mJJ3pExz57wAEHLJDcSH/3v//9u8997nOR3dDrJOSGpBG8NtVv2oDcEKsykC4RLHIj/UXZ450kt0Bi/q4Tk9tyV10nZdgnVU1Cblb+DNLWwB/lJKnFNg+rluIivHYw8CA/CblZvCB1MbrXASmyeU1CblQy5TjvvPPqJAbfkRpJpB2AgwhdV1Q3A3up5IY02BJJTxZXlMNEVatp48itVt2jPDzcS8dkEVjHs8MOO2wBuVERSXRW29vgnoUAE8Uk5KbM6kBSHGYa0UZRLnklubWIrx//94rczP6TSm7XXHPNoAO32ydIZYjo4osvLq146KGHlriM2kKQxx/90R8VSYb9kFQihM2NylgvKBx77LElDSuBdTjllFPKfT+JCuIcJbmRKJCE1dgggygLaccz0kVbnzo/5GcSsBI5TCWr49bfw7aEiOy/s1EZAdjeoc6C+xYUdtxxxwVpU0tDcqvTtOopDXsG68CswG5X29xsNRFX/SI/75iEtthii1IfEnaQG/tjXb/W5nbmmWeW9KzctoGtlYQcK8Ohlqbk1iI13/+vWnJDDtQSndgAZfy39E86sqB44zsAACAASURBVMpqcMVWACRnYDG2m8Xdt0poICNB8Q0EkotgBZbKe+mllw7SQGrSYCgnGfh9qy0fFh6UYRLJTdovf/nLSzoGGjUREXzsYx8r5aU6WQwZFwx4CwqIg1qLtNqPgY90Q0qyvQVxWqmN7R3InG1S+UMlZ0O1ciltCzUkKYFKOYzcbMKFCXubNNTFplxpWNBRH7hKl53MXkD3SdImFvWHtTSUOUidNE2FZVaIjbctuWnvzTffvGzlMNGpl/xJgCRE7RGmk0kkN3llmC8EekNu7CgkBoNt0l3j4hlIBkf9sdIW+6E0l05utTXiICMfUowNt3E/Nuja/xT3LFxYWTU4rbbFfVeD6PTTTy+rj2xPIbmxm3lup34bqJ5UtkgHufqOPJAFyWVcYHMjucX74662T1hdDrtYbI2J9KOcsAkiqPFgBxSo5MjG6nId1Bc+UQZ4+M5kEKYA/4fpwCqqCSXix5Xd1L68CJdccskgTmzitRiAGOtNvBZsbMORjmfa1HebeElrEXy32kuiIymHtOx5SIDDVnHj/byuTgR6Q25mbWTlZ0zUo8VCdFD2F538Na95Temo9ovFCmmdBtJANgY0SU4+EUhONtIijggGso2spItajUKaSMI2j9gqwkZIEgnJg8RDkgnVM9Ksr37OZPX01FNPLZuJ231vddz6u3rIT/rjPn6hYV8d0mLEV4/ALNJTL6qyn2AFMauDjbRwjH1npCLST41rpCUN6j5c2eBqUiHh3nDDDQuIixRnS4k9aMi8jh/lclV++If9lVQmbrvCq82oxXC0YKMvtKv4yu1daUS5Iy+qsLzqto9neV3dCPSG3FY3jOtf6VuSWP8QyBr3HYG5IbfVPNhWc9l18NVW/tVW3r6TSF/LNzfk1leAs1yJQCKwMgjMBbnlTLwynSdzTQT6jMBckFufAc6yJQKJwMogkOS2MrhnrolAIrDMCCS5LTPAmXwikAisDAJJbiuDe+aaCCQCy4xAktsyA5zJJwKJwMogkOS2MrhnrolAIrDMCCS5LTPAmXwikAisDAJJbiuDe+aaCCQCy4xAktsyA5zJJwKJwMogkOS2MrhnrolAIrDMCPSS3DgBcRCk89Yc7uhk2/p8tmXGZGjyjgVyDNA4/wZDX+zRTUdJOfbJ4ZVxPNO0xVvNP31zDJKz+/h5FfiEcESTo6SWEkZh4MgoPlLj6KilpJlxp0egV+Rm8B1zzDHlEEiHDjrIMQ5A9L/DIqMjRtV1UCfOxnlkcX/WVwc2Onq89Xs563yWMz1nl3Fm86xnPWtw6vAs8nMOn5ONVxvxOwPQAZdxSKlThPUzbV2f4TcJRjAw+Tk2PYIj0KXnOooAI25eZ49Ar8iN9yedweBzmKPB6OP01f3337884zmqPgSSfwQOWngQX86gwzsleB7IzTHecaT6LDAzIWk3bbaaAo3AcfJxUrADPZ2SzM3fUsnouOOOKxiEnw44OAT1Oc95ziD91YTNPJS1N+TGN8C2225bvJ3XXtoDZCel8vqkM1KrIjhb31n6cdZ+3J/1dbVIbuMGZUhusyY3p+A6Jj1OzZ019tOmNwqTltymyYcfC0eZ131zmvTy3ekR6A25Oe7aACGJjQqvfvWri3csR0kjQ8dL77bbbuW9cIhS25LY7tjteJsiDXIkwh/CsEA6RJTiKgMHKbXbu1Hk5iht3rYcSz5qEPFj4Fhyx33z0M7OIz31UW8BOfM7IO9XvepVI4/fVne+EThA4XuAlFFLstKiqsd9jmDEv+mmm4oUTC1tyc0x3Ndee20pF18GdT2om+yeTALKF0ery4ffCzaqvffeuziD4Q1L+Ub5gaC2ajNlgsNll11W2oWDb/nUgTNlXuhJQtpBe3AP+PWvf31QPriqJyw5seHkpcUi0kS81MOjjz66XE2WHAvVkpuj3rVTSKCBgz4DH7hxckNSDf+rTCnKSaNgQoHVjTfeWDBQVuVzFSI9R6U7Yp2rRf2No5/WzidPR9+TKtXfkev8SKjrm970pqEmAHZhafGZwbcFvxDtseyBx/pwnZjc2CB4ZnJWvoEyS7UG0DxScVdH9WP0rkkoOoVr5EsNFT8cg7CdIAYdwYdzE6oSn6S8ZPHz6f9dd911gQMacUMdpt7yYs7Vn7jc34V7uWHkxtOSeNKvB33bcZCNeLy9y5+zFT433UPop512WvmfT9HI26AL71XSQwycDHuHK7y6TjvttNNgAIl74oknlngGw33ve9/ynWQxTHJDZuE8h7ObwJf/BQNWftwtcrrjO+mErwSTCEP5NttsUwgC/gY3D2ORRosDvxXSgDEsNtpoo+L1S13dR3JBTq68dcmXkxjPfW6//faSrL649dZbl3uu0b6+exZ9RmRezHji8j6/uHCG4R577LGA3BCpOPy5xvv8RyAg9zmj4dQ7HNHAAU7aNOqgP2obpIc8veca6em3NBD31c27MPU/Cdh7At8XcOc8e9999y150lCiPU3q8o7AGbZy8WjGaZJ2kab0x/XNeH8erxOT29vf/vZBBwEaSQkxzDKYVaWtg/ADyhMTu0W7iCBP0gHJgU1Dh7WaGgPj8ssvL+mYacO9n05jBVb6BiDJQTBTu7fXXnsVJy/umdXDrydpT0BuiDccy+hM3tPxFnMuQjrzrvgGKn+oUR51dZ8UhiwsjHCeEvdCCiKVuEe6DMO9CSC8VfHiFEZwRGYxRtoIzmCn6tfkJl14BbEh6joEoTC6I3htzUmMwS1tEgWCQ2TaDVEhY2WLgVyn5zubFslRPbQBHKTx7W9/e0CkPF8J2tYgNWC94z7pBm76AxOGBR5SknucwnC/aPAj++gzsOfWEPndfPPNpc4c+JBmlSPqIk99TX4wVgcf/Us8/T08hJHaEK9+xyk0yZc0B29liHjhUjL854qHUOVJcgtHNpwLuS+f8IsLR/Vwj7cz/Vuf1Z+5UnTfmBS8b1IksQXhwfX1r399iWfyjr5RXlhP/kxEbgYRsgFofMxitdPiWeBlYOtEJLLIx9VsRIqgxgQpxQDiRk7nD3uP52ZaZNIuMhjQSIyfUh6P1EuHIEnFVpNIlwcog6J2b2cmpWIgArOtRY5h9sEWi1C5t99++0HnEwdRkixJGzwzRaAebbXVVqXDG+SCLQXUv1BjI656k0SOOOKIgToY7uoMzHoCCnJDXAb4fvvtVwYkySJIVLrqThIRr77vGbVTm1C/Im2S4sYbb1wwjXINuyI3hGAgtm0jT9IVdRsJqDcfsaTB1unzeeedV8oQpFHnddJJJ5Vn8BJOOOGE8r9JrA7y0E9GkZu41GeSNZKNCSXSYK/TbxCmwJSA3Hj2itCSG/UVdkceeeQA1+hvJgvvk86Rnvz43oVr9M1IlylAOiRtIVZ59YE6wNBER6Vu27GON6/fJyI39o8nP/nJBVCg+hjo4eB31uAYhBrMICVBPexhDxvkbcavfVzqKDW5Wc3U6Z7+9KcPRPy6fDHwSUJmPCRIjWg7r85AolAWgeRmYCJTAwK5ez5JQEhU3gMPPHCBus2xs4FOeopZXHrqR2UZNqg812lJUexUVCjtQfKLDqyOSIH5oA7qotwIPSYr9TLL1yGkUpKussSHBIdM4W2SCHvOUsgNWZgU6vrKm/SFbJA66cREx/8oHGriF5dEbtCTSEnN8TGxsTPBw6D2PwxJWcP2mrF5mqRitbSW3ORDupLWySefvAZGpDPtF/1mEnKLvsd+2AaTJGmUyq4+0qWS8vMaecQ7zDbKJU9B3fRN94wH9rnot/HO+nidiNwAQ1zWMQHoo+O3g2K5AKT6sIfo7PI+66yzBmJ2K7lRGUgd1IRhojg7iTTM/gYHqQe5mMnHBbNi2FpIkm05xr0b5Ma7uokiAikQuSHs2saITLbbbrsF5AYDCxccVyNK+fsE8bfkBgP1q4MOH3YqBC0dk1QY0CMutUzasBEnPiTeyJvdKNSvpZAb9VWbDQukFqomVQ+Ba28feERAWOxNyJsKGmWLqzIru4Ukk4+2QhgWH9qgL4g/itzCsfQk3ugnITdOoU28rXNr5dK+Qe7IKsgNOYe5Jcpfk1tIfhbEvB/9QvuSfC2itO9HOvN+nZjcAKFRDDAz3DDimAYsdjIrcuP2kUWj1qt9w8jNbKxzDysjR8g6gFmZcRoJIBeSQ4ToMPG/K3LznpkRDgy2yN4K1WIhyI2aWJNbSG4kmVHkVhOI/BniETP7FnKMOrTkBoNbb711QdGQG8mI1EPVC9uk/EMK88JLX/rSUtfjjz++qF0kROqXD7VUnjCIusyK3JSD0Zy5oya3emNsqJNMBswUUa64wgWpI4i77767kCU74TC7qIE/jtzCRhu2rQVgNv9MQm7snyaIYX1GfR/72McW+xopLsgNQbXkFONAnnVfJWyY3PUPWoK89Bn2xVZSboo/l/8uidyWEwErhhrCbDoqGFA6I3ILMgq1FFEIyNGMzkYXg69Oz5YBs74BQC01SMx4reRGFWJzstImUN9IWewwgoGlvAZk+26JUP0JcpNeXaZJyI16Rv1nECe1xQJJJP/JT36yqMmTkhuV0hYEkoKysLupB7KPEBPAsLYwYSAQNsuQ3JEbondvXGBzg7fJJFToiE+yooYhIt/V23eSW01u4pPwDFyr0G1AYmxbpDZSHrXOyrSJIEIQggmQnWuU5HbFFVcUbKi4bUDyCIStUAhyi187uNfa3KyGwrq1IYpL3Ychk4FyT0pu3tW//DqilU6NBXY7E/Fy7wNVjr6F3pCbGYfIzj7CntQGgyFWuKzghVRGcmNvCJsKIzd7G4lMB6yDxqamUGuiIxx88MGFHOwpqoM9WzpirCIiNypTrJaKS9IUx/aYGOh1GvF9FLmFWjpOctPR5UnaUu5awjJI1V8ZkHYEtp1Rkhtyq39+xbBv8CPuIAv4G2hUolollD5pR37sVdEGyE3bDWu3KJMrcpOXCaomAc9ikcDKnnqNktzEjYnFKm0Qlfu+q5vyxb45Jgz/s5vVQV+ABfVtFLkhSFtG2P3qSUXZkCZijDpLX1r2PUZoyS3MIBZNWmnM+8qpv6nHUsgtVtftf2uD1XyTRqyits/n+f/ekBuQg1DYkWxGpA4gHbaPPffcszS+2bxepTX76hSI5rrrrittZeAYQIznpA/kYvXMkrq4tUGXtIHwEBc1BCHGvjd5RacIcjMoIpDi4t3o5PGsvk5Dbjo5CSv2nFEZ2cjUxyAJQzLpMzaCBrnVA015qKUtubkfhm4bP0MiRl6wsu2AVGDnvZVIdsedd955gZHfCre4Fn+0Wayi1hj4HqulJGcr4iREJg7kiIy1V0hY48iNVBf9AR5WE61SmqiUw2bfqIf2s0qNeKzwUqlJTiQabS7fYeQWpBl2N5t31c3Wk9g+YyU96hrSLjKxbcX7LbnBwCSkjKRnZdaHYA0Te9KslAtLITfqN8nexCFPE5Z+rb7yIhTUZo+SwXrwp1fkpkPamxMbMjVMfEgG7F7tCqXZkB0pOkw0IqIjBcb7rjqzxq9VQ21ssYLKV8c1eGrCYmQn3rdbGCzHU4MZi6l6w4IfVSMVm1TrvKnFytguNFiV1NERi04ukKpiQSXKifDYb2JQkwBJUwakujIy14Etx0Cnwsbg95zat/vuu5dtKQav4LlBB/fIzxUutfQqrjKQbjy3ijoKB+RGIkSiBpwBHWnDH9FFQGDKZP9XKz2KQ5Jinoj3XRGVyS42XkdazBlsV3VcfQlOCAHhCfqSrTkm1iA37WUvofvxvvZmjwx7qHeplTF5ujJVUGvhV6v86kLaJvVFeq5whU8Ekp2FEHgi+jrYSaAM1OUopxXStr9LlyQbE3SdxvrwvVfkFoDrGNTUd7/73WWVll1pmEE4GtZAMHu1or502ERIHn6KJd6oYAZGXOK1BOodBlnphSpWp0N1lPewZ+K5L05r1HVfmu199UIQCMb3qKeBRmpDAnUZ3We/s3dNXP/Lr1WVPRuWnzKKCx/vRX7uk/bkR80zgNsQcb2nDaj7ca+Na/BaLWUnFdSBlI0cQwKKd6QBg2FEWadvZZW0LZ2QeiIN14irfvqUiSzss57XOInj/5gg63QQmXJKo7UBRjztCAN9Vb7qJL2oW5RFfHZUtktlH4Vr9IFIP65MNMPK6Z7+btxYdIDN+hx6SW7rc4P0qe71YJxFuYLcqPijJoJZ5JNpJAIQSHKbg34waxIKSNp02/8j3qTXUeQm3WnTnrQMGW/9QSDJbQ7aermIYdakg9zY3GrJbbnKPgfNmlWYEoEktykBzNcnR4At0P7BsElN/mbGTASWjkCS29IxyzcSgURgFSCQ5LYKGimLmAgkAktHIMlt6ZjlG4lAIrAKEEhyWwWNlEVMBBKBpSOQ5LZ0zPKNRCARWAUIJLmtgkbKIiYCicDSEUhyWzpm+UYikAisAgSS3FZBI2URE4FEYOkIJLktHbN8IxFIBFYBAr0nN7va43SMVYBnFjERSAR6gkCvyC1+Z+joGP5CnVMWxyQ7LNBRzu1ZYj3Bca2K4XgldWrPiFurxOboJe3vtF3e2tvjoGZVTccCOc9tEteMs8oz01m3CPSG3ILYHBceTmcdZojcHNLo7H2H7zkPzHHKEX/dwjXb3OJo7fCxuZTUnenleGnHaMeR6Ut5v89xHdCp3Yd5fppVueM03Ti9eFbpZjr9QaA35AYSB/g5KRaJOdnWYXtx4KLDIB0PHq7swhN8f6BceknCH0Hrv2GSlEg0jkF3zHn4j5jkvdUQB7nxBMVXbnsA6azK74hvJ+W2p/bOKv1MZ+UR6BW5cfiB2Fxb70gBVXjt5ixlXMdfijrj4MRJ408aL8rbXutDGsP58ShyY2v0GRV4SnL0udNyI9QSbZzYGifBRpylXOXv9N5Jg7zYSRcLcByVrnadBblJf1Qe48qnbE4lrttqXPx81k8EekNupA8+K/lDGCeJGGy8rPOYHo5iYkA7lpn3IMRn0Dt/3/n19WAz4KlyXAk6lpsnLRIQJ8hctbGDRYh0HTvNBkSSkC4P9RzPLGXgOPaZAxXOVZTd0d2jyA3Z8Y/gTHwf8dmf4vhrx3NziENq44xYfTl5iQnBcducuXCczBELaZgkXDu3iTqOujoCmws9Tpy1CZtnW2f2Ks5Y+KsgdfNfQJ3kD4G/iGHt6Mh4ZeddXdmonvxm1P4IaslNm7KP8ZcQHq3qMsOC0xUeyAIf2PKAJn3+ODhtcbx4HXimP+GEE7p77rlnYOJQB74deIvirIY5hOlg3ARTp5nf+4XAxOSmgQ1GnYwbsmHn1U9TtauvvrpIbRzXBqksll49szonzCAn+XEGYmAa3P5nX0FkggHAsTFXeQYiRyo6P2ck4nLoUROc8/LD8xSHHdLl1k1cAzMIdlxZw18lu6G0vMfvJkKVTi25ISX3PEPinMcgVPd4M6Kmf/Ob3yzl4LSEoxCDUB4CQkEc7JXqxRmMq//l2TqNGVZuE4T8kBpH0vyt7rTTTuWe8gSpc3DD0QkyYy5QTk5qSF3e907tLxNBu88py6GHHlrqZ2JxTxnDGU5NbvodFVIcbcRPQB2QmmcmK+Gqq64qHrpgAD8fRCXOm9/85oE0xncqT17hzpBfBUTIpmsS4kAmnMog49ZJS12G/N5PBCYmNxKODhIfnWaY8461rWa4h9NZ22Aw6VzxoTL41OpWuEwjXdTkGO4CeTBChsiNRyH1INnV/iiDhHi0imB2F7eWjDw7++yzy31uAGuSjffiSlK5z33uUwg13NZ5ZhAiJmkHuZEckC5Srh3isAshVj406/IOU0tjsF900UVRhHLllk5eSGBceS1OkBYNdM5OIiAZBOQkXeQqIFJevaRLUgszgXZBHu6HG0XSMxLmH9WJvHUI37G8Twm1WqrN9TOuC71bS58k1b333rssNrmvjCYukldNqjALCTY8aSFtk0Osvut/yO6mm26qi9YddthhpR6TTAoLXsx/VhyBicgtOpfOGh8Szyy963CdJ22zax3M1FQMg8jHrE+iICUEKYTndr4g20DdMXNTqwwa5MbfJ3UO8dTB/zo4X5iCQYFQxG+DgcWFm08MmDaO/7mJGzZoPCO9qHPUgzRskHHd1gbSp7qH1yt2IZIMtREpRuBNicu31lDOPybXd8oTizTxTn1Fqoj8+uuvr2+X79ROqjCJTUBum222WZEUazLxLLy1k/YF5EYiROptsB0GDsou1OQWhKlO4nDNGIE/WJMB0tWubI/wQGTt9hp+V3lBC6fWLbnRGKRfu+GTD8ncajb1NcPqQmAiciMJIRiNHx8EM0vVNBYTSF51MBuTvtjDfMzyoW4GEVo5VS4qkZmXD8f48MNJPQoSMAhIbGZ3qmwduG4jZYX3drM1tUvdLWTUH+kqB1WzlUQiTRInFZYUNMz+FFtBgtziPcSJjNzn7JcpQLkWI7daYqWGsz1RBam6oVZyJDyO3KIMrqQaeCIokjoiacmNNMUWSZKuA0LQJsrfBn5ctQ/pkpu/8AkadrFh5KZtONrmhzMWdcLpce1pPSbJBz/4wUWNV46a/KMsLbkh8+jb+hnHyraJTIpVpJvX/iAwEbkpLttEOJ3VcVrxfdoqxbaI4447boFaOSxdnVlHJB0ICLF28BudtL4iBoMqyI2RvSVnzntrckNgBlSdTvudjYZ372GB1EjCZNsbJt2FDSrIjbpo4QIZRj7IhPpJglyM3JSBtGkCqMvNYXLsHZyE3Az0cLKsHAic5IsgW3JDTFTDsMMFDsPIjeQU9jjpsm9SNy2y+H8UuQVp77fffkU1RbqhkpLKa7snlVhfigkwcJQP212k1ZKbcpOYSci808d7VHSbfdv6RT3z2l8EJiK36BBUFjPocojoOhb1jaoVqkMLW5QjZucgtyA7qgWJh6d4H4RsILARUefM+EFuDPatJ24kxQ4WkpvBppMzLrPpRLqu/qeekchG2R4Zxhm2fdq81I3kIv0gN6uC/ke8bFXyiQ26bFiLkZu6MYZLw6ofyTPsY3BAeIuRG+/rVHZkinzhGU6IjznmmEJuYY5Q9yC31tDfkhvpVlwTI6lIurHIE4soi5Eb4lc35eLI2MTCJlbbXqPPaBNtRLpjrvAe/MIhc5CbOG3QV0zezBMkU++GltDGzf/7i8BE5LYuim8Qm21H2aeiDGZpKrEOF+Rm24L/reoFAUZ8Hd/MazbX4ZdCbiHJIYxh6gmJkQF/1EqaspA2ECZP8W1ghFfuIDeE5H/qbx2kQ5VuyS22poTh34APSa+VNAKjxcjtnHPOKWVAIG2gHku/JTcrwIuRG6zUjSreBqqpZ4uRG6JGNiabkPSvvfbaQXL6hgWeYcZ/9ZZH2PyC3NjtTKYWzC6//PI12vnmm28uktzxxx+/xrNBxvmllwj0htygQx1i9NaBSS61RGSA33nnnYUAQwWN2dRANtCpOQZxHWIQkPYQ3aTkxgBO9bH1g93thhtuqJPtrrzyyjJYbI0IG9CCCP//H4PPoEIMNQkqJ0nGsyA3Bnv/t0ZtditlQG4hNSvfnnvuWRZLwsBvNZZkZGW1VoO9Q62UttXfceHcc88t8VpbmdVWW0m0TdiwSDjqMAm5kdbkbxtRHUhI8dO6W2+9tTxqbW71hIXYlOEhD3lIUZPr34aSDvUNanwsREReyA2Gt9xyS7lVk5v0Yckk0U5CCFeaVpnrckS6ee0vAr0iNzCZQYO87Acj3VhssJHV4KDi2fTpe71yZmAYJD4kIJ3xoIMOKvEY9WMLBXKzXcCG4XaFT8eWrlVM8QRquD1cOr4Nq9KN7QHKEmrOqCaWTpQDAVsZRLRWGe0Pk59NsILVQuRuJZoKiAjs3bP6x94FF5txoy7Pf/7zy/twQkbyMmilaYXXyitV3SozDKRtIcUGVuQ4LHzuc58rhIXIlFMats7AIOxlJGRESpJin0KcreQWizyxtUdcaWywwQadcsORVIuMYz8ZexfTAHWerdLWkdj7FmWNCVAdEXW7rSVMFspMCr3gggsGq9ImqjB5+K7ssc8tJDQ2NmqylV0ry4iUtJq/QY0WWD3X3pEb6NjfdD4dSwc0qM3UyIWdR4c3+GOmjxn1jjvuKIbzMKaTKqg8obZJmzRmq4OOG/asaC6SCOK69NJLB7v9PdOx2bwQnEFFQjTAR62SRnpxJUUpr/p432ChKis/ya+WFqiDBrZ6y88KsAFI1bIwgajY4gRqs0GMDNVHQNhWNtmj5MXgbo8fe6lVZ/Y00o9V6FGBNMXIT9LxIdXYg8Zut+uuu5YJRNlJTYgKgbXSq60XVjbrXxWQVoNk1Q9xk4wQnzwQHymWJE69tN+wVa8tAsEBUd92221rVAHWxx57bMFEHswc2osdtZ7M2Dv1p5goJEQaZ+/U33yijKS96GNrZJg3eotAL8kt0LK9gDFbh20HT8QZdrVK6b12YAyLu5R7VB1lidl/Ke+K6z3lWqwu1Gdk1G6vQMxBSvVgG2ZQV1Z73VrJBibD7IdtXbynDK16J1/mgjr/9t1xz8RVrlYic39UunV6tu+wuSL9wCLyr+OpJ/K1aLGU9op6e6/dKxj55HV1INBrclsuCOtBMCyPxZ57Z5I4w9LOe9Mh4JgnEmm9kDBdivn2vCIwl+S2GPEs9nxeG3s114sNjP2Qqs4WR6rMkAiMQ2DuyC2Ja1xzr95nbKdscux08RvUcbXJfjAOnfXj2dyR2/rRbOtfLdnX0ga2/rX7NDVOcpsGvXw3EUgEeotAkltvmyYLlggkAtMgkOQ2DXr5biKQCPQWgSS33jZNFiwRSASmQSDJbRr08t1EIBHoLQJJbr1tmixYIpAITINAkts06OW7iUAi0FsEktx62zRZsEQgEZgGgSS3adDLdxOBRKC3CCS59bZpsmCJQCIwDQJJbtOgl+8mAolAbxFIcutt02TBEoFEYBoEktymQS/fTQQSgd4ikOTW26bJgiUCicA0CCS5TYNevpsI6e3c5gAAAKFJREFUJAK9RSDJrbdNkwVLBBKBaRBIcpsGvXw3EUgEeotAkltvmyYLlggkAtMgkOQ2DXr5biKQCPQWgSS33jZNFiwRSASmQSDJbRr08t1EIBHoLQJJbr1tmixYIpAITINAkts06OW7iUAi0FsEktx62zRZsEQgEZgGgSS3adDLdxOBRKC3CCS59bZpsmCJQCIwDQJJbtOgl+8mAolAbxH4P7fylMIHqCShAAAAAElFTkSuQmCC) ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAhYAAACuCAYAAABqQUTiAAAgAElEQVR4Ae3dB7QlRfX2YXMiqBjAhAHBhDkLRhQjYhYFAQEVRRBQMSCKAgIGFCNmxAgGEBUxgBEVFROCgijmnHPu/3r6+/ZdNT3nhhl6Zg5n3lpr5pzbp7rCr6prv7WruvsiXUIIhEAIhEAIhEAIjETgIiOlk2RCIARCIARCIARCoIuwSCcIgRAIgRAIgRAYjUCExWgok1AIhEAIhEAIhECERfpACIRACIRACITAaAQiLEZDmYRCIARCIARCIAQiLNIHQiAEQiAEQiAERiMQYTEayiQUAiEQAiEQAiEQYZE+EAIhEAIhEAIhMBqBCIvRUCahEAiBEAiBEAiBCIv0gRAIgRAIgRAIgdEIR) https://builtin.com/data-science/step-step-explanation-principal-component-analysis Importing the librariesimport numpy as np import matplotlib.pyplot as plt import pandas as pdImporting the datasetdataset = pd.read_csv('https://raw.githubusercontent.com/gauravpks/ml-repo/master/Part%209%20-%20Dimensionality%20Reduction/Principal%20Component%20Analysis%20(PCA)/Wine.csv') X = dataset.iloc[:, :-1].values y = dataset.iloc[:, -1].valuesSplitting the dataset into the Training set and Test setfrom sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)Feature Scalingfrom sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) X_test[0]Applying PCAfrom sklearn.decomposition import PCA pca = PCA(n_components = 2) X_train = pca.fit_transform(X_train) X_test = pca.transform(X_test) X_test[0]Training the Logistic Regression model on the Training setfrom sklearn.linear_model import LogisticRegression classifier = LogisticRegression(random_state = 0) classifier.fit(X_train, y_train)Making the Confusion Matrixfrom sklearn.metrics import confusion_matrix, accuracy_score y_pred = classifier.predict(X_test) cm = confusion_matrix(y_test, y_pred) print(cm) accuracy_score(y_test, y_pred)[[14 0 0] [ 1 15 0] [ 0 0 6]]Visualising the Training set resultsfrom matplotlib.colors import ListedColormap X_set, y_set = X_train, y_train X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01), np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01)) plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), alpha = 0.75, cmap = ListedColormap(('red', 'green', 'blue'))) plt.xlim(X1.min(), X1.max()) plt.ylim(X2.min(), X2.max()) for i, j in enumerate(np.unique(y_set)): plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap(('red', 'green', 'blue'))(i), label = j) plt.title('Logistic Regression (Training set)') plt.xlabel('PC1') plt.ylabel('PC2') plt.legend() plt.show()*c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2-D array with a single row if you intend to specify the same RGB or RGBA value for all points. *c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2-D array with a single row if you intend to specify the same RGB or RGBA value for all points. *c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2-D array with a single row if you intend to specify the same RGB or RGBA value for all points.Visualising the Test set resultsfrom matplotlib.colors import ListedColormap X_set, y_set = X_test, y_test X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01), np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01)) plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), alpha = 0.75, cmap = ListedColormap(('red', 'green', 'blue'))) plt.xlim(X1.min(), X1.max()) plt.ylim(X2.min(), X2.max()) for i, j in enumerate(np.unique(y_set)): plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap(('red', 'green', 'blue'))(i), label = j) plt.title('Logistic Regression (Test set)') plt.xlabel('PC1') plt.ylabel('PC2') plt.legend() plt.show()*c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2-D array with a single row if you intend to specify the same RGB or RGBA value for all points. *c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2-D array with a single row if you intend to specify the same RGB or RGBA value for all points. *c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2-D array with a single row if you intend to specify the same RGB or RGBA value for all points.Definitions# read, and create an identifier for each course def set_course_name(file_path, course_name): course_data = pd.read_csv(file_path, header='infer') course_data["Course Name"] = course_name print(course_data.tail(2)) return course_data def merging_df(dataframes_list): merged_df = pd.concat(dataframes_list) print(merged_df.shape) print(merged_df.tail(10)) return merged_dfImport information Math Coursemath_data = set_course_name("./_data/math_course_data.csv", "Math")Student Name Student Contact Grade Course Name 2 Mark 90 Math 3 Shane 87 MathBiology Coursebiology_data = set_course_name("./_data/biology_course_data.csv", "Biology")Student Name Student Contact Grade Course Name 2 Mark 90 Biology 3 Shane 95 BiologyChemistry Coursechemistry_data = set_course_name("./_data/chemistry_course_data.csv", "Chemistry")Student Name Student Contact Grade Course Name 2 Mark 58 Chemistry 3 Shane 74 ChemistryMerging Sessions Informationcourses = [math_data, biology_data, chemistry_data] merged_info = merging_df(courses)(12, 4) Student Name Student Contact Grade Course Name 2 Mark 90 Math 3 Shane 87 Math 0 Al 90 Biology 1 Dominic 90 Biology 2 Mark 90 Biology 3 Shane 95 Biology 0 Al 80 Chemistry 1 Dominic 56 Chemistry 2 Mark 58 Chemistry 3 Shane 74 ChemistryExporting Datamerged_info .to_csv(path_or_buf='./_output/0_unified_courses_data.csv', index=False)Praproses 2- Diskritisasi/binning- Scalling- Principal Component Analysis-- Feature Selection-- Feature Extraction 1. Diskritisasi/binningMengubah bilangan kontinyu menjadi diskrit atau kategorikal juga. misal Usia 1-5 diubah menjadi kategori balita, 6-15 menjadi anak-anak dan usia 16-22 menjadi remaja.usia = [12, 15, 7, 8, 40, 60] usia_diskrit = ["anak_muda","anak_muda","bocil","bocil","dewasa","dewasa"] import pandas as pd import numpy as np import matplotlib.pyplot as plt data = pd.read_csv("california_housing_test.csv") data1. longitude: A measure of how far west a house is; a higher value is farther west2. latitude: A measure of how far north a house is; a higher value is farther north3. housingMedianAge: Median age of a house within a block; a lower number is a newer building4. totalRooms: Total number of rooms within a block5. totalBedrooms: Total number of bedrooms within a block6. population: Total number of people residing within a block7. households: Total number of households, a group of people residing within a home unit, for a block8. medianIncome: Median income for households within a block of houses (measured in tens of thousands of US Dollars)9. medianHouseValue: Median house value for households within a block (measured in US Dollars)data.describe()pandas.qcuthttps://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.qcut.htmlpandas.cuthttps://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.cut.htmlpd.qcut(data['population'], q=4).value_counts() data['population'] pd.qcut(data['population'], q=5).value_counts().plot(kind='bar') label = ['sedikit', 'banyak', 'agak banyak', 'biasa', 'tidak sedikit'] label_populasi = pd.qcut(data['population'], q = [0, 0.2, 0.4, 0.6, 0.8, 1], labels = label) label_populasi label_populasi.value_counts().plot(kind='barh') data_umur_rumah = pd.cut(data['housing_median_age'], bins=5).value_counts() data_umur_rumah data_umur_rumah.plot(kind='bar') label = ["lumayan tua", "biasa", "agak modern", "tua", "modern"] label_umur_rumah = pd.cut(data['housing_median_age'], bins = 5, labels = label) label_umur_rumah label_umur_rumah.value_counts().plot(kind='barh')2. ScallingMengubah suatu data yang memiliki variasi besar sehingga memiliki skala yang tidak terlalu besar.data = pd.read_csv("baru.csv", index_col="Item_Identifier") data data.describe() data.boxplot(['Item_MRP', 'Item_Weight']) from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() scaler.fit(data[['Item_MRP']]) data['Item_MRP_Scalled'] = scaler.transform(data[['Item_MRP']]) data['Item_MRP_Scalled'].describe() data.boxplot(['Item_MRP_Scalled']) scaler.fit(data[['Item_Weight']]) data['Item_Weight_Scalled'] = scaler.transform(data[['Item_Weight']]) data['Item_Weight_Scalled'].describe() data.boxplot(['Item_MRP_Scalled', 'Item_Weight_Scalled']) # nilai min == 0 # nilai max == 200 awal = np.array([[25, 50, 150, 200, 75]]) # nilai min baru == 0 # nilai max baru == 1 (awal-0)*(1-0)/(200-0)+03.1 Feature SelectionTujuannya adalah untuk memilih fitur yang "berpengaruh" pada datasetfrom sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA bmsx = np.array([data["Item_Weight"], data["Item_Fat_Content"], data["Item_Visibility"], data['Item_MRP'], data["Outlet_Size"]]) bmsx.shape bmsx bmsxt=bmsx.transpose() bmsxt.shape bms_std=StandardScaler().fit_transform(bmsxt) bms_std pcafs=PCA(n_components=0.70,whiten=True) bmsfs_pca=pcafs.fit_transform(bms_std) print('Original number of features:', bms_std.shape[1]) print('Reduced numer of features:', bmsfs_pca.shape[1]) datafs_pca=pd.DataFrame(bmsfs_pca, columns=["PC1","PC2","PC3","PC4"]) datafs_pca3.2 Feature ExtractionTujuannya adalah menggabungkan/mengelompokkan feature menjadi beberapa feature saja.from sklearn import decomposition pcafe=decomposition.PCA(n_components=3) bmsfe_pca=pcafe.fit_transform(bms_std) print('Original number of features:', bms_std.shape[1]) print('Reduced numer of features:', bmsfe_pca.shape[1]) datafe_pca=pd.DataFrame(bmsfe_pca, columns=["PC1","PC2", "PC3"]) datafe_pcaPyTorch Experiments Templateimport torch import torchvision import pandas as pd import numpy as np import random import cv2 import json import matplotlib.pyplot as plt import sys import os sys.path.insert(0, os.path.abspath("phd/src")) sys.path.insert(0, os.path.abspath("benatools/src")) import albumentations as A import matplotlib.pyplot from benatools.torch.efficient_net import create_efn2 from benatools.torch.fitter import TorchFitter from benatools.torch.loss import CategoricalCrossEntropy from benatools.utils.tools import MultiStratifiedKFold from sklearn.model_selection import KFold from sklearn.metrics import accuracy_score from scipy.special import softmax from ads.labels import get_topics import ads.dataset # CONSTANTS PLATFORM = 'KAGGLE' # this could be 'COLAB' or 'LOCAL' DEVICE = 'TPU' # This could be 'GPU' or 'CPU' SEED = 42 seed_everything(SEED)Initialization Seeding everything for experiment replicability# Seed def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True seed_everything(42)Read Data There are normally some files linked to the dataset with metadata, contextual information, calendars, etc.# Read files # training_examples = pd.read_csv('training_examples.csv')Datasetclass TrainDataset(torch.utils.data.Dataset): def __init__(self, df, root:str, transforms=None, label_smoothing=0.0, channel_first=True, scaling_method='norm'): self.df = df # DataFrame containing self.root = root # root folder self.transforms = transforms # transforms pipeline self.label_smoothing = label_smoothing # label smoothing alpha self.channel_first = channel_first # whether to self.scaling_method = scaling_method # 'norm' normalizes the data to imagenet. 'scale' scales the data to [0,1] def get_labels(self): return np.array(self.df.columns) def _read(self, name): path = self.root + name img = cv2.imread(path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.resize(img, (256,256)) return img def _label_smoothing(self, labels): if self.label_smoothing > 0: labels *= (1-self.label_smoothing) labels += (self.label_smoothing / labels.shape[1]) return labels def _scale(self, img): if self.scaling_method == 'norm': normalize = A.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], ) img = normalize(image=img)['image'] else: img = img/255.0 return img def _adjust_channel(self, img): if self.channel_first: img = np.transpose(img, axes=(2,0,1)) return img def _transform(self, img): if self.transforms: img = self.transforms(image=img)['image'] return img def __len__(self): return len(self.df) def __getitem__(self, idx): # Get row row = self.df.iloc[idx] labels = row.values # Label smoothing labels = self._label_smoothing(labels) # Read image and reformat img = self._read(row.name) # Apply transforms img = self._transform(img) # Scale img = self._scale(img) # Adjust to C x H x W for pytorch img = self._adjust_channel(img) # Format data into a dict data = {'x': torch.from_numpy(img), 'y': torch.from_numpy(labels.astype(np.float32)) } return data def get_transforms(): """ A Function that returns a transforms pipeline """ transform = A.Compose([ A.OneOf([ A.RandomRotate90(), A.Flip(), A.Transpose() ], p=0.2), A.OneOf([ A.IAAAdditiveGaussianNoise(), A.GaussNoise(), ], p=0.2), A.OneOf([ A.MotionBlur(p=.2), A.MedianBlur(blur_limit=3, p=0.1), A.Blur(blur_limit=3, p=0.1), ], p=0.2), A.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.5), A.OneOf([ A.OpticalDistortion(p=0.3), A.GridDistortion(p=.1), A.IAAPiecewiseAffine(p=0.3), ], p=0.2), A.OneOf([ A.CLAHE(clip_limit=2), A.IAASharpen(), A.IAAEmboss(), A.RandomBrightnessContrast(), ], p=0.3), A.HueSaturationValue(p=0.3), A.OneOf([ A.Cutout(num_holes=100, max_h_size=6, max_w_size=6, fill_value=255, p=0.4), A.Cutout(num_holes=8, max_h_size=25, max_w_size=25, fill_value=0, p=0.4), A.ChannelDropout(channel_drop_range=(1, 1), fill_value=0, p=0.4) ]), ]) return transform def get_dataloader(df, bs=8, shuffle=False, drop_last=False, do_aug=True): transforms = None if do_aug: transforms = get_transforms() ds = ads.dataset.ImageDataset(df, root=IMG_ROOT, transforms=transforms) return torch.utils.data.DataLoader(ds, batch_size=bs, shuffle=shuffle, num_workers=4, pin_memory=True, drop_last=drop_last)It is useful to take a look at the databs = 12 dl = get_dataloader(df.iloc[:bs], bs=bs, shuffle=False, drop_last=False, do_aug=True) fig, axis = plt.subplots(2,bs//2, figsize=(20,10)) axis = axis.ravel() for data in dl: for i in range(len(data)): axis[i].set_title(' | '.join( df.columns[data[i]['y'].numpy()==1] ) ) axis[i].imshow(np.transpose(data[i]['x'].numpy(), (1,2,0)))ModelWhen experimenting, many different models or variations can be tried. It is useful to have a common function to route the model creations further in the training loopclass Identity(torch.nn.Module): def __init__(self): super(Identity, self).__init__() def forward(self, x): return x class ImageClassifier(torch.nn.Module): def __init__(self, n_outs=39, trainable_base=False): super(ImageClassifier, self).__init__() self.base = torchvision.models.resnet152(pretrained=True, progress=True) self.base.fc = Identity() self.set_trainable(trainable_base) self.classifier = torch.nn.Sequential( torch.nn.Linear(in_features=2048, out_features=512), torch.nn.ReLU(), torch.nn.LayerNorm(512), torch.nn.Dropout(0.25), torch.nn.Linear(in_features=512, out_features=n_outs), ) def set_trainable(self, trainable): for param in self.base.parameters(): param.requires_grad = trainable def get_cnn_outputs(self, b): outs = [1280, 1280, 1408, 1536, 1792, 2048, 2064, 2560] return outs[b] def forward(self, x): x = self.base(x) x = self.classifier(x) return xExperiments ConfigurationN_EXPERIMENTS = 1 # Normally not more than one run per commit FOLDS = [0] * N_EXPERIMENTS # Each run should cover a single fold # DATALOADER PARAMS BS = [32] * N_EXPERIMENTS # LEARNING RATE LR = [0.001] * N_EXPERIMENTS # TRANSFORMS # Params for the transforms functions # GLOBAL PARAMETERS EPOCHS=50 DISPLAY_PLOT=True VERBOSE = 1Training Loop# Reduce data to a subsample df_sub = df #.iloc[:10000] cv = MultiStratifiedKFold(5, df_sub, df.columns.tolist(), seed=SEED) cv_dict = {i:(train_idx, val_idx) for i,(train_idx, val_idx) in enumerate(cv.split(df_sub))} for i in range(0, N_EXPERIMENTS): print(f'********** EXPERIMENT {i} **********') print(f'***** bs train {BS[i]} *****') print(f'***** LR {LR[i]} *****') print(f'**********************************\n') seed_everything(SEED) # Get Dataloader train_idx, val_idx = cv_dict[FOLDS[i]] train_df, val_df = df_sub.loc[train_idx], df_sub.loc[val_idx] print(f'Training on {len(train_df)} samples - Validating on {len(val_df)} samples') train_ds = get_dataloader(train_df, bs=BS[i], shuffle=True, drop_last=False, do_aug=True) val_ds = get_dataloader(val_df, bs=BS[i], shuffle=False, drop_last=False, do_aug=False) # Create model model = ImageClassifier(trainable_base=True) #optimizer = torch.optim.Adam(model.parameters(), lr=0.001 ) optimizer = torch.optim.SGD(model.parameters(), lr=LR[i], momentum=0.9) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.1, patience=3, mode='max') #loss = torch.nn.BCEWithLogitsLoss() loss = CategoricalCrossEntropy(from_logits=True, label_smoothing=0.1, reduction='mean') model.cuda() # Fitter object fitter = TorchFitter(model, device='cuda', loss=loss, optimizer=optimizer, scheduler=scheduler ) history = fitter.fit(train_ds, val_ds, n_epochs=EPOCHS, metric=accuracy_one_hot, early_stopping_mode='max', verbose_steps=5, early_stopping=10) # Plot training plt.figure(figsize=(15,5)) plt.plot(np.arange(len(history)), history['train'],'-o',label='Train Loss',color='#ff7f0e') plt.plot(np.arange(len(history)), history['val'],'-o',label='Val Loss',color='#1f77b4') x = np.argmin( history['val'] ); y = np.min( history['val'] ) xdist = plt.xlim()[1] - plt.xlim()[0]; ydist = plt.ylim()[1] - plt.ylim()[0] plt.text(x-0.03*xdist,y-0.13*ydist,'min loss\n%.2f'%y,size=14) plt.ylabel('Loss',size=14); plt.xlabel('Epoch',size=14) plt.legend(loc=2) plt2 = plt.gca().twinx() plt2.plot(np.arange(len(history)),history['val_metric'],'-o',label='Accuracy',color='#36de47') #x = np.argmax( history['val_F1'] ); y = np.max( history['val_F1'] ) #xdist = plt2.xlim()[1] - plt2.xlim()[0]; ydist = plt2.ylim()[1] - plt2.ylim()[0] #plt2.text(x-0.03*xdist,y-0.13*ydist,'max F1\n%.2f'%y,size=14) #plt2.ylabel('F1',size=14); plt2.xlabel('Epoch',size=14) plt2.legend() #plt2 = plt.gca().twinx() #plt2.plot(np.arange(len(history)),history['lr'],'-o',label='LR',color='#2ca02c') #plt.ylabel('LR',size=14) plt.title(f'Results fold {i}',size=18) plt.legend(loc=3) plt.show()**2) Sabe-se, pelas medições, que $\frac{ω_v}{V}$ = 5% e $\frac{ω_{Δt}}{Δt}$ = 1%, calcule $\frac{ω_Q}{Q}$.**A partir das respostas do exercício anterior, pela lei do produtório, tem-se que: **Calculadoras**#Calculadora de incerteza: %reset -f from uncertainties import ufloat from math import pi d = ufloat(3.906, 0.084, "diâmetro") h = ufloat(6.119, 0.019, "altura") t = ufloat(84.80, 0.2, "tempo") vazao = pi*h*d**2/(4*t) print(vazao.format('.6f')) #Calculadora t de Student: %reset -f import scipy.stats conf = 0.9 σ = 1.180411478 def t(alpha, gl): return scipy.stats.t.ppf(1-(alpha/2), gl) print(t(1 - conf,13)*σ) %reset -f from CoolProp.CoolProp import PropsSI from uncertainties import ufloat P = 96.550 #kPa w_P = 0.150 T = 28.5 + 273.15 #K w_T = 0.5 R = 8.31451 MM = PropsSI('M','P', P*1000, 'T', T, 'Air')*1000 Px = ufloat(P, w_P) Tx = ufloat(T, w_T) ρx = Px*MM/(R*Tx) print(ρx.format('.6f'))1.115046+/-0.002533Table of Contents 1  读取数据2  数据整理2.1  将str格式的日期变为 datatime2.2  筛选月薪格式为“XXXX-XXXX”的信息2.3  分割月薪字段,分别获取月薪的下限值和上限值3  对全国范围内的职位进行分析3.1  主要城市的招聘职位数量分布情况4  筛选北京和长沙的职位4.1  月薪分布情况(全国)4.2  相关技能要求5  北京5.1  月薪分布情况5.2  相关技能要求6  长沙6.1  相关技能要求import pymongo import pandas as pd import matplotlib.pyplot as plt import numpy as np % matplotlib inline plt.style.use('ggplot') # 解决matplotlib显示中文问题 plt.rcParams['font.sans-serif'] = ['SimHei'] # 指定默认字体 plt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题读取数据client = pymongo.MongoClient('localhost') db = client['zhilian'] keyword = 'python' table = db[keyword] columns = ['zwmc', 'gsmc', 'zwyx', 'gbsj', 'gzdd', 'fkl', 'brief', 'zw_link', '_id', 'save_date'] # url_set = set([records['zw_link'] for records in table.find()]) # print(url_set) df = pd.DataFrame([records for records in table.find()], columns=columns) # columns_update = ['职位名称', # '公司名称', # '职位月薪', # '公布时间', # '工作地点', # '反馈率', # '招聘简介', # '网页链接', # '_id', # '信息保存日期'] # df.columns = columns_update print('总行数为:{}行'.format(df.shape[0])) df.head(2)总行数为:65135行数据整理 将str格式的日期变为 datatimedf['save_date'] = pd.to_datetime(df['save_date']) print(df['save_date'].dtype) # df['save_date']datetime64[ns]筛选月薪格式为“XXXX-XXXX”的信息df_clean = df[['zwmc', 'gsmc', 'zwyx', 'gbsj', 'gzdd', 'fkl', 'brief', 'zw_link', 'save_date']] # 对月薪的数据进行筛选,选取格式为“XXXX-XXXX”的信息,方面后续分析 df_clean = df_clean[df_clean['zwyx'].str.contains('\d+-\d+', regex=True)] print('总行数为:{}行'.format(df_clean.shape[0])) # df_clean.head()总行数为:60720行分割月薪字段,分别获取月薪的下限值和上限值# http://stackoverflow.com/questions/14745022/pandas-dataframe-how-do-i-split-a-column-into-two # http://stackoverflow.com/questions/20602947/append-column-to-pandas-dataframe # df_temp.loc[: ,'zwyx_min'],df_temp.loc[: , 'zwyx_max'] = df_temp.loc[: , 'zwyx'].str.split('-',1).str #会有警告 s_min, s_max = df_clean.loc[: , 'zwyx'].str.split('-',1).str df_min = pd.DataFrame(s_min) df_min.columns = ['zwyx_min'] df_max = pd.DataFrame(s_max) df_max.columns = ['zwyx_max'] df_clean_concat = pd.concat([df_clean, df_min, df_max], axis=1) # df_clean['zwyx_min'].astype(int) df_clean_concat['zwyx_min'] = pd.to_numeric(df_clean_concat['zwyx_min']) df_clean_concat['zwyx_max'] = pd.to_numeric(df_clean_concat['zwyx_max']) # print(df_clean['zwyx_min'].dtype) print(df_clean_concat.dtypes) df_clean_concat.head(2)zwmc object gsmc object zwyx object gbsj object gzdd object fkl object brief object zw_link object save_date datetime64[ns] zwyx_min int64 zwyx_max int64 dtype: object* 将数据信息按职位月薪进行排序df_clean_concat.sort_values('zwyx_min',inplace=True) # df_clean_concat.tail()* 判断爬取的数据是否有重复值# 判断爬取的数据是否有重复值 print(df_clean_concat[df_clean_concat.duplicated('zw_link')==True]) # df_clean_concat.to_csv('zhilian_python_update.csv')* 从上述结果可看出,数据是没有重复的。 对全国范围内的职位进行分析 主要城市的招聘职位数量分布情况# from IPython.core.display import display, HTML ADDRESS = [ '北京', '上海', '广州', '深圳', '天津', '武汉', '西安', '成都', '大连', '长春', '沈阳', '南京', '济南', '青岛', '杭州', '苏州', '无锡', '宁波', '重庆', '郑州', '长沙', '福州', '厦门', '哈尔滨', '石家庄', '合肥', '惠州', '太原', '昆明', '烟台', '佛山', '南昌', '贵阳', '南宁'] df_city = df_clean_concat.copy() # 由于工作地点的写上,比如北京,包含许多地址为北京-朝阳区等 # 可以用替换的方式进行整理,这里用pandas的replace()方法 for city in ADDRESS: df_city['gzdd'] = df_city['gzdd'].replace([(city+'.*')],[city],regex=True) # 针对全国主要城市进行分析 df_city_main = df_city[df_city['gzdd'].isin(ADDRESS)] df_city_main_count = df_city_main.groupby('gzdd')['zwmc','gsmc'].count() df_city_main_count['gsmc'] = df_city_main_count['gsmc']/(df_city_main_count['gsmc'].sum()) df_city_main_count.columns = ['number', 'percentage'] # 按职位数量进行排序 df_city_main_count.sort_values(by='number', ascending=False, inplace=True) # 添加辅助列,标注城市和百分比,方面在后续绘图时使用 df_city_main_count['label']=df_city_main_count.index+ ' '+ ((df_city_main_count['percentage']*100).round()).astype('int').astype('str')+'%' print(type(df_city_main_count)) # 职位数量最多的Top10城市的列表 print(df_city_main_count.head(10)) number percentage label gzdd 北京 18240 0.316260 北京 32% 上海 8597 0.149062 上海 15% 深圳 5181 0.089833 深圳 9% 广州 3334 0.057808 广州 6% 成都 3168 0.054929 成都 5% 杭州 3156 0.054721 杭州 5% 南京 2049 0.035527 南京 4% 郑州 1543 0.026754 郑州 3% 武汉 1373 0.023806 武汉 2% 西安 1236 0.021431 西安 2%筛选北京和长沙的职位address_bj_cs = ['北京', '长沙'] df_city_bj_cs = df_city[df_city['gzdd'].isin(address_bj_cs)] # df_city_bj_cs.head() df_city_bj_cs.to_csv('zhilian_'+keyword+'_bj_cs.csv') from matplotlib import cm label = df_city_main_count['label'] sizes = df_city_main_count['number'] # 设置绘图区域大小 fig, axes = plt.subplots(figsize=(10,6),ncols=2) ax1, ax2 = axes.ravel() colors = cm.PiYG(np.arange(len(sizes))/len(sizes)) # colormaps: Paired, autumn, rainbow, gray,spring,Darks # 由于城市数量太多,饼图中不显示labels和百分比 patches, texts = ax1.pie(sizes,labels=None, shadow=False, startangle=0, colors=colors) ax1.axis('equal') ax1.set_title('职位数量分布', loc='center') # ax2 只显示图例(legend) ax2.axis('off') ax2.legend(patches, label, loc='center left', fontsize=9) plt.savefig('job_distribute.jpg') plt.show()月薪分布情况(全国)from matplotlib.ticker import FormatStrFormatter fig, (ax1, ax2) = plt.subplots(figsize=(10,8), nrows=2) x_pos = list(range(df_clean_concat.shape[0])) y1 = df_clean_concat['zwyx_min'] ax1.plot(x_pos, y1) ax1.set_title('Trend of min monthly salary in China', size=14) ax1.set_xticklabels('') ax1.set_ylabel('min monthly salary(RMB)') bins = [3000,6000, 9000, 12000, 15000, 18000, 21000, 24000, 100000] counts, bins, patches = ax2.hist(y1, bins, normed=1, histtype='bar', facecolor='g', rwidth=0.8) ax2.set_title('Hist of min monthly salary in China', size=14) ax2.set_yticklabels('') # ax2.set_xlabel('min monthly salary(RMB)') # http://stackoverflow.com/questions/6352740/matplotlib-label-each-bin ax2.set_xticks(bins) #将bins设置为xticks ax2.set_xticklabels(bins, rotation=-90) # 设置为xticklabels的方向 # Label the raw counts and the percentages below the x-axis... bin_centers = 0.5 * np.diff(bins) + bins[:-1] for count, x in zip(counts, bin_centers): # # Label the raw counts # ax2.annotate(str(count), xy=(x, 0), xycoords=('data', 'axes fraction'), # xytext=(0, -70), textcoords='offset points', va='top', ha='center', rotation=-90) # Label the percentages percent = '%0.0f%%' % (100 * float(count) / counts.sum()) ax2.annotate(percent, xy=(x, 0), xycoords=('data', 'axes fraction'), xytext=(0, -40), textcoords='offset points', va='top', ha='center', rotation=-90, color='b', size=14) fig.savefig('salary_quanguo_min.jpg')**不考虑部分极值后,分析月薪分布情况**df_zwyx_adjust = df_clean_concat[df_clean_concat['zwyx_min']<=20000] fig, (ax1, ax2) = plt.subplots(figsize=(10,8), nrows=2) x_pos = list(range(df_zwyx_adjust.shape[0])) y1 = df_zwyx_adjust['zwyx_min'] ax1.plot(x_pos, y1) ax1.set_title('Trend of min monthly salary in China (adjust)', size=14) ax1.set_xticklabels('') ax1.set_ylabel('min monthly salary(RMB)') bins = [3000,6000, 9000, 12000, 15000, 18000, 21000] counts, bins, patches = ax2.hist(y1, bins, normed=1, histtype='bar', facecolor='g', rwidth=0.8) ax2.set_title('Hist of min monthly salary in China (adjust)', size=14) ax2.set_yticklabels('') # ax2.set_xlabel('min monthly salary(RMB)') # http://stackoverflow.com/questions/6352740/matplotlib-label-each-bin ax2.set_xticks(bins) #将bins设置为xticks ax2.set_xticklabels(bins, rotation=-90) # 设置为xticklabels的方向 # Label the raw counts and the percentages below the x-axis... bin_centers = 0.5 * np.diff(bins) + bins[:-1] for count, x in zip(counts, bin_centers): # # Label the raw counts # ax2.annotate(str(count), xy=(x, 0), xycoords=('data', 'axes fraction'), # xytext=(0, -70), textcoords='offset points', va='top', ha='center', rotation=-90) # Label the percentages percent = '%0.0f%%' % (100 * float(count) / counts.sum()) ax2.annotate(percent, xy=(x, 0), xycoords=('data', 'axes fraction'), xytext=(0, -40), textcoords='offset points', va='top', ha='center', rotation=-90, color='b', size=14) fig.savefig('salary_quanguo_min_adjust.jpg')相关技能要求brief_list = list(df_clean_concat['brief']) brief_str = ''.join(brief_list) print(type(brief_str)) # print(brief_str) with open('brief_quanguo.txt', 'w', encoding='utf-8') as f: f.write(brief_str)北京 月薪分布情况df_beijing = df_clean_concat[df_clean_concat['gzdd'].str.contains('北京.*', regex=True)] # df_beijing.to_excel('zhilian_kw_python_bj.xlsx') print('总行数为:{}行'.format(df_beijing.shape[0])) # df_beijing.head() from matplotlib.ticker import FormatStrFormatter fig, (ax1, ax2) = plt.subplots(figsize=(10,8), nrows=2) x_pos = list(range(df_beijing.shape[0])) y1 = df_beijing['zwyx_min'] y2 = df_beijing['zwyx_max'] ax1.plot(x_pos, y1) ax1.set_title('Trend of min monthly salary in Beijing', size=14) ax1.set_xticklabels('') ax1.set_ylabel('min monthly salary(RMB)') bins = [3000,6000, 9000, 12000, 15000, 18000, 21000, 24000, 100000] counts, bins, patches = ax2.hist(y1, bins, normed=1, histtype='bar', facecolor='g', rwidth=0.8) ax2.set_title('Hist of min monthly salary in Beijing', size=14) ax2.set_yticklabels('') # ax2.set_xlabel('min monthly salary(RMB)') # http://stackoverflow.com/questions/6352740/matplotlib-label-each-bin ax2.set_xticks(bins) #将bins设置为xticks ax2.set_xticklabels(bins, rotation=-90) # 设置为xticklabels的方向 # Label the raw counts and the percentages below the x-axis... bin_centers = 0.5 * np.diff(bins) + bins[:-1] for count, x in zip(counts, bin_centers): # # Label the raw counts # ax2.annotate(str(count), xy=(x, 0), xycoords=('data', 'axes fraction'), # xytext=(0, -70), textcoords='offset points', va='top', ha='center', rotation=-90) # Label the percentages percent = '%0.0f%%' % (100 * float(count) / counts.sum()) ax2.annotate(percent, xy=(x, 0), xycoords=('data', 'axes fraction'), xytext=(0, -40), textcoords='offset points', va='top', ha='center', rotation=-90, color='blue', size=14) fig.savefig('salary_beijing_min.jpg')相关技能要求brief_list_bj = list(df_beijing['brief']) brief_str_bj = ''.join(brief_list_bj) print(type(brief_str_bj)) # print(brief_str_bj) # with open('brief_beijing.txt', 'w', encoding='utf-8') as f: # f.write(brief_str_bj)长沙df_changsha = df_clean_concat[df_clean_concat['gzdd'].str.contains('长沙.*', regex=True)] # df_changsha = pd.DataFrame(df_changsha, ignore_index=True) # df_changsha.to_excel('zhilian_kw_python_cs.xlsx') print('总行数为:{}行'.format(df_changsha.shape[0])) # df_changsha.tail() from matplotlib.ticker import FormatStrFormatter fig, (ax1, ax2) = plt.subplots(figsize=(10,8), nrows=2) x_pos = list(range(df_changsha.shape[0])) y1 = df_changsha['zwyx_min'] ax1.plot(x_pos, y1) ax1.set_title('Trend of min monthly salary in Changsha', size=14) ax1.set_xticklabels('') ax1.set_ylabel('min monthly salary(RMB)') bins = [3000,6000, 9000, 12000, 15000, 18000, 21000, 24000, 50000] counts, bins, patches = ax2.hist(y1, bins, normed=1, histtype='bar', facecolor='g', rwidth=0.8) ax2.set_title('Hist of min monthly salary in Changsha', size=14) ax2.set_yticklabels('') # ax2.set_xlabel('min monthly salary(RMB)') # http://stackoverflow.com/questions/6352740/matplotlib-label-each-bin ax2.set_xticks(bins) #将bins设置为xticks ax2.set_xticklabels(bins, rotation=-90) # 设置为xticklabels的方向 # Label the raw counts and the percentages below the x-axis... bin_centers = 0.5 * np.diff(bins) + bins[:-1] for count, x in zip(counts, bin_centers): # # Label the raw counts # ax2.annotate(str(count), xy=(x, 0), xycoords=('data', 'axes fraction'), # xytext=(0, -70), textcoords='offset points', va='top', ha='center', rotation=-90) # Label the percentages percent = '%0.0f%%' % (100 * float(count) / counts.sum()) ax2.annotate(percent, xy=(x, 0), xycoords=('data', 'axes fraction'), xytext=(0, -40), textcoords='offset points', va='top', ha='center', rotation=-90, color='blue', size=14) fig.savefig('salary_changsha_min.jpg')相关技能要求brief_list_cs = list(df_changsha['brief']) brief_str_cs = ''.join(brief_list_cs) print(type(brief_str_cs)) # print(brief_str_cs) # with open('brief_changsha.txt', 'w', encoding='utf-8') as f: # f.write(brief_str_cs)The Anscombe's Quartet Dataset Submitted by: G00364748 ------------------------------------------------------------------------------------------------------ Assignment: Fundamentals of Data Analysis Submission Date: 9th November 2018 ------------------------------------------------------------------------------------------------------ Table of Contents Introduction Statement of Work Technology Used Aim of Investigation Findings References ------------------------------------------------------------------------------------------------------ Introduction This repository has been created as apart of my ongoing studies in Data Analytics.My intention with this repository is to investigate the Anscombe's Quartet Dataset using the available functions within Jupyter. This dataset was constructed to highlight the importance of using graphs in statistical analysis ------------------------------------------------------------------------------------------------------ Statement of Work This is a statement to confirm that the work submitted within this repository is my own work. Any information sourced online or 3rd party has been referenced appropriately. I can therefore confirm that my submitted work is in line with the Quality Assurance Framework of GMIT. ------------------------------------------------------------------------------------------------------ Technology Used * Anaconda software package* Python Programming Language* Command Prompt* Jupyter Notebook* GitHub.com* LearnOnline (GMIT) ------------------------------------------------------------------------------------------------------ Aim of Investigation It is my intention to:1. Explain the background to the dataset1. Plot any interesting aspects of the dataset1. Calculate the descriptive statistics of the variables in the dataset1. Explain why the dataset is interesting based on the previously obtained info ------------------------------------------------------------------------------------------------------ Findings 1. The background to the datasetIn 1973, a statistician called constructed the dataset quartet. His philosophy of the dataset was to "demonstrate both the importance of graphing data before analyzing it and the effect of outliers on statistical properties."[1] His development questioned the opinion another staticians that "numerical calculations are exact, but graphs are rough."![anscombe](dataset.gif)The breakdown of the information contained within the datasets: There are 8 variables; representing four pairings of an outcome and a predictor.All sets have 11 observations:the same mean of x (9) and y (7.5),the same fitted regression line (y = 3 + 0.5 x),the same regression and residual sum of squares and therefore the same multiple R-squared of 0.67. But they represent very different situations, as you will see by clicking on each dataset[2]This dataset has proven to me that statistics do not always tell the entire story when it comes to results given by data. And visualising the data allows an unbiased insight into the analysing of the data.This was deemed an issue by Anscombe and for this reason that he went on to create the quartet of datasets to visually show this flaw. [3]A 'real-life' example to highlight Anscombe's work is the following study carried out in 2012.The study involved the comparison of starting wages for law graduates based on "The National Association of Law Placement (NALP)" report from 2012.In summary, the statistical data showed the starting average wage was 80,798. When the data was graphed however, it showed two peaks in the data between 35,000USD - 75000USD at again at 160000USD."A much more accurate statement would be that most law graduates make around 50,000USD on average, and those who go to one of the top law schools make 160,000USD on average."[4]In 2017, a man by the name of , inspired by Anscombe's work, tweeted an image showing how his data looked when graphed. It turned out to be in the shape of a dinosaur.While sharing his work, he urges people to *"never trust summary statistics alone; always visualize your data"*[5]Cairo caused a ripple effect from his work as at the end of 2017 - start of 2018 another publication involving the Datasaurus was created but this time it included an additional 12 plots with similar statistics but very different graphs when plotted.*"These 13 datasets (the Datasaurus, plus 12 others) each have the same summary statistics (x/y mean, x/y standard deviation, and Pearson's correlation) to two decimal places, while being drastically different in appearance."*[6][6] 2. Plot any interesting aspects of the dataset The below graph shows the entire data of the quartetBased on the summary properties, there is not huge difference in them, showing the close relationship when it comes to the black and white data.Should you solely rely on the below output rather than visual graphs, it would be easy to assume these were more closely related than they actually are.Once viewing the graphs it is easy to see just how different the outputs are based on the shapes they produce on graphs.# http://nbviewer.jupyter.org/github/psychemedia/ou-tm351/blob/master/notebooks-RFC/Anscombe's%20Quartet%20%5Bopen%5D.ipynb [7] import pandas as pd aq=pd.read_csv('anscombe.csv',header=[0,1],index_col=[0]) aq #Created 3 side-by-side plots to view the min, max & mean outputs visually plt.subplot(1,3,1) l = aq.min() plt.hist(l) plt.title("Min") plt.xlabel("x axis") plt.ylabel("y axis") plt.subplot(1,3,2) y = aq.max() plt.hist(y) plt.title("Max") plt.xlabel("x axis") plt.ylabel("y axis") plt.subplot(1,3,3) z = aq.mean() plt.hist(z) plt.title("Mean") plt.xlabel("x axis") plt.ylabel("y axis") # Variance as a visual plt.subplot(1,3,3) h = aq.var() plt.hist(h) plt.title("Variance") plt.xlabel("x axis") plt.ylabel("y axis") plt.show()3. Calculate the descriptive statistics of the variables in the dataset *"Descriptive statistics are used to describe the basic features of the data in a study. They provide simple summaries about the sample and the measures. Together with simple graphics analysis, they form the basis of virtually every quantitative analysis of data."* [8]The website that has this definition of Descriptive Statistics also wrote some interesting examples of what happen when you include solely statistics rather than a overview of all available content.The first example relates to American baseball and if you were to take the statistics of a batter. If you were to only review the times the batter hit a ball it leaves it open to a very critical review; how many they missed vs hit.*"The batting average doesn't tell you whether the batter is hitting home runs or singles. It doesn't tell whether she's been in a slump or on a streak."*[8]Another example relates to a student and their result for a term. Again the result is the final number, a single piece of data defining the student's overall ability in the course. However the other aspects of the data from this example would include:* The level of the course being taken* The difficulty of the subjects* If these subjects were in their field of expertise or other areas.aq.mean()In the above code, I requested the mean as an output and as we can see:* The average mean of the X axis = 8.95* The average mean of the X axis = 7.47# The min of the quartet dataset aq.min() # The max of the quartet dataset aq.max()Having requested the min, max and mean output it is clear to see the close relationship and almost identical data for both axis of each graph. Gives plenty of backing to the old phrase that seeing really is believing.# The variances of the quartet dataset aq.var() # X Axis Min and Max dataset data = np.array([['','1st','2nd','3rd','4th'], ['Min',4.0,4.0,4.0,8.0], ['Max',14.0,14.0,14.0,19.0]]) print(pd.DataFrame(data=data[1:,1:], index=data[1:,0], columns=data[0,1:]))1st 2nd 3rd 4th Min 4.0 4.0 4.0 8.0 Max 14.0 14.0 14.0 19.0Probabilistic forecasting and multi-layer perceptron.This is the second part of our introduction to forecasting. This notebook starts with a short introduction to __maximum likelihood estimation__ and to __general linear models__. These concepts are used to build a model to generate a __probabilistic forecast of count data__ using a Poisson distribution. At this point you might want to take a quick look at the crash course on probability and statistics in chapter 1. The second part of this notebook is dedicated building a __multi-layer perceptron (MLP) for forecasting__, using the same data and Poisson distribution as in the first part of this notebook.from __future__ import print_function import mxnet as mx import numpy as np from mxnet import nd, autograd, ndarray from mxnet import gluon from mxnet.gluon.loss import Loss from math import factorial import pandas as pd %matplotlib inline from matplotlib import pyplot as pltMaximum likelihood estimation In the previous notebook we used linear models of the form $Y_t = X_t\beta + \epsilon_t$. The parameters $\beta$ are estimated by minimizing the mean squared error $$\frac{1}{T}\sum_t \hat\epsilon_t^2 = \frac{1}{T}\sum_t (Y_t - X_t\hat\beta)^2.$$ The model as a distributionBy making an extra assumption we can view this model and loss function in a more principled way. Let's assume that __the innovations $\epsilon_t$ are independently drawn from a Gaussian distribution__: $\epsilon_t \sim \mathcal{N}(0,\sigma^2)$. With this extra assumption we can re-write the model as a condition distribution, which is a didtribution that depends on variable $X$ that are determined outside the model. Since we assume the errors $\epsilon$ are Gaussian: $$y_t = X_t\beta + \epsilon_t \implies Y_t \sim \mathcal{N}(X_t\beta,\sigma_2),$$meaning that $y_t$ __follows a Gaussian distribution__ with mean $X_t\beta$ and variance $\sigmaˆ2$. The mean and variance of the distribution are functions of the unknown parameters $\beta$ and $\sigma$. Had we known these parameters we would have known the distribution of $Y_t$ fully, and so we could compute $$Pr(y_t<\mu \mid X_t, \beta, \sigma)=p_\mu,\ \ \mu\in\mathbb{R}.$$$Pr(y_t<\mu \mid X_t, \beta, \sigma)=p_\mu$ is __the probability that we have observed the data $y_t$__ conditional on some value of the parameters ($\beta, \sigma$) and on $X_t$. The problem can be turned around, for what value of the parameters is the probabily that we observe $y_t$ maximized? Likelihood function and Maximum Likelihood estimationThe __likelihood function__ is defined as:$$ L(\beta; y_t, X_t, \sigma) = Pr(y_t \mid X_t, \beta, \sigma).$$The __maximum likelihood estimator__ (MLE) of our parameters is the maximizer of $L(\beta; y_t, X_t, \sigma)$: $(\hat\beta,\hat\sigma)=arg\,max_{\beta,\sigma}\left(L(\beta; y_t, X_t, \sigma)\right)$, though in practice we generally minimize the opposit of the logarithm of the the likelihood function, the __log likelihood function__ $-\log L(\beta; y_t, X_t, \sigma)=-\log\left(L(\beta)\right)$. I have skipped entirely a lot of extremely important points in this very brief introduction to maximum likelihood, the goal was just to mention the main concepts. There are lots of excellent resourcres out there to read a proper discussion of these topics. MLE with GluonTo enable MLE with `gluon` we need to define the log likelihood loss function.Let us look at a concrete example. A random variable following a Bernoulli distribution takes value 0 with probability p, or 1 with probability 1-p. For a sample of $N$ independent Bernoulli distributed random variables $y_i$, the log likelihood is $L(p)=\frac{1}{N}\sum_i\left(p y_i + (1-p)(1-y_i)\right)$. Few likelihood functions are included in the current release of `gluon`, but defining a custom loss function is easy:# Define the Bernoulli loss function class Bernoulli(Loss): """Calculates the Bernoulli loss function Output and label must have the same shape. This is a scalar loss function. Parameters ---------- weight : float or None Global scalar weight for loss. sample_weight : Symbol or None Per sample weighting. Must be broadcastable to the same shape as loss. For example, if loss has shape (64, 10) and you want to weight each sample in the batch, `sample_weight` should have shape (64, 1). batch_axis : int, default 0 The axis that represents mini-batch. """ def __init__(self, weight=None, batch_axis=0, **kwargs): super(Bernoulli, self).__init__(weight, batch_axis, **kwargs) def hybrid_forward(self, F, output, label, sample_weight=None): label = _reshape_label_as_output(F, output, label) loss = -1 * (1 - output) * (1-label) - output * label loss = _apply_weighting(F, loss, self._weight, sample_weight) return F.mean(loss, axis=self._batch_axis, exclude=True) bernoulli_log_lik = Bernoulli()Gaussian MLEWhen we have a Gaussian linear model, it's quite easy to show that the values $\hat\beta$ and $\hat\sigma$, the MLEs, that minimize the log likelihood function are: 1. The value of $\hat\beta$ that minimimze $\frac{1}{T}\sum_t\left( y_t-X_t\beta\right)ˆ2$, that is, __the minimizer of the L2 loss__. 2. $\hat\sigma^2 = \frac{1}{T}\sum_t \epsilon_t^2 = \frac{1}{T}\sum_t (y_t - X_t\hat\beta)^2$, the same estimator we used earlier. In the Gaussian case the maximum likelihood estimator is equivalent to minimizing the L2 loss. This implies that to do Gaussian MLE we do not need to setup a particular function, we can just use L2 loss: `gaussian_log_lik = gluon.loss.L2Loss()`.One advantage of the likelihood framework is that we work with distributions, including in our forecasts. Another advantage is that we can use a wide class of probability distributions to describe the data. Generalized linear models Count forecastingTo forecast counts we are going to use a discrete probability distribution that takes values ${0,1,2,...}$, the __Poisson distribution__. A random variable following a Poisson distribution with parameter $\lambda_t>0$ takes values $y_t = 0,1,...$ with probability $Pr(y_t=\mu) = \frac{e^{-\lambda_t}\lambda_t^\mu}{\mu!}$. $\lambda$ is both the mean and the variance of the Poisson distribution.Suppose that we would like to model the parameter $\lambda_t$ of a Poisson distributed sample as a function of some explanatory variables $X_t$. We could posit $\lambda_t=X_t\beta$, with $X_t$ a column matrix of features and $\beta$ a column vector of parameters. The specification $\lambda_t=X_t\beta$ makes it possible for the mean and variance of the process to be negative, we can't have that! Instead we assume that $\lambda$ is a function of $X\beta$: $\lambda = f(X\beta)$, $f(.)$ is called the link function.A common choice for the link function is to assume that the parameter of the Poisson distribution is log-linear, $\log\lambda_t = X_t\beta$, implying that the link function is the exponential: $\lambda = f(X\beta)$. This choice can be problematic since we might end up taking the exponential of a large number leading to overflow. Instead we will use a logistic link function: $f(x) = \log(1 + exp(x))$. With link function $f$, we have $f(E(y_t|X_t))=X_t\beta$. The log-likelihood of some parameters $\beta$ at time $t$ is:$$L(\beta | X_t,y_t) = y_t \log(\lambda_t(\beta)) - \lambda_t(\beta)\\ = y_t \log(f(X_t\beta)) - f(X_t\beta).$$In practice we will minimize $-L(\beta | X_t,y_t) = y_t \log(f(X_t\beta)) - f(X_t\beta)$. We define this log likelihood function below together with some helper functions.def logistic(x): return nd.log1p(nd.exp(x,dtype='float64')) def inverse_logistic(x): return nd.log(nd.exp(x,dtype='float64')-1) def _reshape_label_as_output(F, output, label): return label.reshape(output.shape) if F is ndarray else label.reshape(()) class Poisson(Loss): def __init__(self, weight=None, batch_axis=0, **kwargs): super(Poisson, self).__init__(weight, batch_axis, **kwargs) def hybrid_forward(self, F, output, label, sample_weight=None): label = _reshape_label_as_output(F, output, label) loss = logistic(output) - F.log(logistic(output)) * label return F.mean(loss, axis=self._batch_axis, exclude=True)Data# Weekly sales for a novelty item (p.37-38: Montgomery)  # From datamarket.com df = pd.read_csv("weekly-sales-clean.csv").set_index('week') print(df.head()) ts = df.values[:,0] plt.plot(ts);FeaturesThe data clearly appears to be trending upward, so the model will include a linear trend. There also might be some autocorrelation so the model will include a lag of the target variable. There does not appear to be any kind of seasonal pattern, so we won't include any seasonal features.forecast_length = 10 train_length = len(ts) - forecast_length tgt_min = ts.min() ts = ts - tgt_min # droping the first observation due to lag target = nd.array(ts[:train_length]).reshape((train_length,1))[1:] # prediction target pred_target = nd.array(ts[train_length:]).reshape((forecast_length,1)) # construct lag and trend trend = nd.arange(train_length).reshape((train_length,1)) lag_sales = nd.array(ts).reshape((train_length,1)) # droping the last observation due to lag features = nd.concat(trend[:-1],lag_sales[:-1]) # standardize features_mean = features.mean(axis=0) features_std = nd.array(features.asnumpy().std(axis=0)).reshape((1,1)) features = (features - features_mean) / features_std print(features[:5,]) print(target[:5,]) batch_size = 5 train_data = gluon.data.DataLoader( gluon.data.ArrayDataset(features, target), batch_size=batch_size, shuffle=True)ModelThe GLM is still just a linear model, and so we define the network as a single dense layer with a dimension 1 output. The only difference with the Gaussian linear models of the previous notebook is that we now assume the data follows a Poisson distribution and so use the Poisson log likelihood as our loss function.# Context ctx = mx.cpu() # Network net = gluon.nn.Sequential() with net.name_scope(): net.add(gluon.nn.Dense(1)) # Parameter initialization net.collect_params().initialize(mx.init.Normal(sigma=0.1), ctx=ctx) # Trainer trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1}) # Loss poisson_log_lik = Poisson()Training loopepochs = 50 smoothing_constant = .05 moving_loss = 0 niter = 0 loss_seq = [] for e in range(epochs): for i, (data, label) in enumerate(train_data): data = data.as_in_context(ctx) label = label.as_in_context(ctx) with autograd.record(): output = net(data) loss = poisson_log_lik(output, label) loss.backward() trainer.step(batch_size) ########################## # Keep a moving average of the losses ########################## niter +=1 curr_loss = nd.mean(loss).asscalar() moving_loss = (1 - smoothing_constant) * moving_loss + (smoothing_constant) * curr_loss # correct the bias from the moving averages est_loss = moving_loss/(1-(1-smoothing_constant)**niter) loss_seq.append(est_loss) if e % 10 ==0: print("Epoch %s. Moving avg of log likelihood: %s" % (e, est_loss)) params = net.collect_params() # this returns a ParameterDict print('The type of "params" is a ',type(params)) # A ParameterDict is a dictionary of Parameter class objects # therefore, here is how we can read off the parameters from it. for param in params.values(): print(param.name,param.data()) # plot the convergence of the estimated loss function %matplotlib inline import matplotlib import matplotlib.pyplot as plt plt.figure(num=None,figsize=(8, 6),dpi=80, facecolor='w', edgecolor='k') plt.plot(range(niter),loss_seq, '.') # adding some additional bells and whistles to the plot plt.grid(True,which="both") plt.xlabel('iteration',fontsize=14) plt.ylabel('est loss',fontsize=14)ForecastAs was the case with the Gaussian AR(1) of the previous notebook, the specification of our Poisson model is autoregressive, a feature is the previous value of the target. We are therefore going forecast recursively.The value our model predict is $X_t\beta = f(\lambda_t)$ where $f(.)$ is the link function which in this application is the logistic function. To recover the parameter $\lambda$ of the Poisson distribution, which is what we are ultimately interested in estimating and predicting, we need to transform the output with $fˆ{-1}(.)$. That's what the `inverse_logistic` function is for.def forecast_poisson(net, last_obs, features_mean, features_std, forecast_length, train_length): forecast = nd.empty((forecast_length,1)) for t in range(forecast_length): if t==0: prev_obs = last_obs.reshape((1,1)) # construct features trend = nd.array([train_length + t - 1]).reshape((1,1)) #fct_feat = trend fct_feat = nd.concat(trend,prev_obs,dim=1) # normalize features fct_feat = (fct_feat - features_mean)/features_std # forecast fct = net(fct_feat) forecast[t,] = fct prev_obs = inverse_logistic(fct) return forecast fit = net(features) fit_trans = inverse_logistic(fit) fct = forecast_poisson(net,target[train_length-2], features_mean, features_std, forecast_length, train_length) fct_trans = inverse_logistic(fct) def plot_forecast(observed, fitted, forecasted): plt.plot(fitted.asnumpy(), color="r") plt.plot(observed, color="g") T = len(fitted) plt.plot(np.arange(T, T+len(forecasted)), forecasted.asnumpy(), color="b") plt.legend(["Fitted", "Observed", "Forecasted"]); plot_forecast(ts[1:],fit,fct)90% prediction intervalsLet's put a 90% prediction interval around our forecast. We proceed as previously, estimating the standard errors of the residuals and relying on the fact that the maximum likelihood estimates are asymptotically Gaussian to construct the intervals. There are several alternative ways to construct intervals relying on weaker or different assumptions, but this is a topic for another day.se_fit = (target - fit_trans).asnumpy().std() interval = np.concatenate((inverse_logistic(fct - 1.65*se_fit).asnumpy(), inverse_logistic(fct + 1.65*se_fit).asnumpy()), axis=1) def plot_forecast_interval(observed, fitted, forecasted, interval): plt.plot(fitted.asnumpy(), color="r") T = len(fitted) plt.plot(np.arange(T, T+len(forecasted)), forecasted.asnumpy(), color="b") plt.fill_between(np.arange(T, T+len(forecasted)), interval[:,0], interval[:,1], alpha=.3) plt.plot(observed, color="g") plt.legend(["Fitted", "Observed", "Forecasted"]); plot_forecast_interval(ts[1:], fit, fct, interval)Mean squared prediction errormsfe_glm = nd.mean(nd.power(fct - pred_target,2)) print(msfe_glm)Forecasting with a multilayer perceptronWe're going to use a multilayer perceptron (MLP) as in chapter 3. The network is composed of three layers, 2 dense layers with 64 hidden units and a _relu_ activation layer, and a one-dimensional output layer.num_hidden = 64 num_outputs = 1 mlp_net = gluon.nn.Sequential() with mlp_net.name_scope(): mlp_net.add(gluon.nn.Dense(num_hidden, activation="relu")) mlp_net.add(gluon.nn.Dense(num_hidden, activation="relu")) mlp_net.add(gluon.nn.Dense(num_outputs))InitializationHere we initialize the parameters using the Xavier algorithm, set up the trainer, and define the loss function. Since we are modeling and predicting continuous variables, I use a square Loss.# context mlp_ctx = mx.cpu() # Parameters mlp_net.collect_params().initialize(mx.init.Xavier(magnitude=2.24), ctx=mlp_ctx) # trainer trainer = gluon.Trainer(mlp_net.collect_params(), 'sgd', {'learning_rate': .1}) # likelihood poisson_ll = Poisson()Databatch_size = 5 train_data = gluon.data.DataLoader( gluon.data.ArrayDataset(features, target), batch_size=batch_size, shuffle=True)Training loopepochs = 100 smoothing_constant = .05 for e in range(epochs): cumulative_loss = 0 for i, (data, label) in enumerate(train_data): data = data.as_in_context(mlp_ctx) label = label.as_in_context(mlp_ctx) with autograd.record(): output = mlp_net(data) #print(output) #loss = square_loss(output, label) loss = poisson_ll(output, label) loss.backward() trainer.step(data.shape[0]) cumulative_loss += nd.sum(loss).asscalar() if e % 10 ==0: print("Epoch %s. Loss: %s" % (e, cumulative_loss)) for param in params.values(): print(param.name,param.data())Forecasts and prediction intervals# fit and residual standard errors fit = mlp_net(features) se_fit_mlp = (target - fit).asnumpy().std() # forecast fct = forecast_poisson(mlp_net,target[train_length-2], features_mean, features_std, forecast_length, train_length) # prediction interval interval = np.concatenate((inverse_logistic(fct - 1.65*se_fit).asnumpy(), inverse_logistic(fct + 1.65*se_fit).asnumpy()), axis=1) plot_forecast_interval(ts[1:],inverse_logistic(fit),inverse_logistic(fct),interval) msfe_mlp = nd.mean(nd.power(fct - pred_target,2)) print("Mean squared forecast error of the GLM: %s, of the MLP: %s" % (msfe_glm, msfe_mlp))Descriptive statistics (Data Analysis + Visualization).df.describe() df.mean() lm1 = df.groupby(['sex']).count() m1 = lm1['schoolsup'] m1 m1.hist(); m1.plot(kind='bar',figsize=(5,5)); df['sex'].hist(); lm2= df.groupby(['age']).count() m2 =lm2['schoolsup'] m2 m2.hist(); m2.plot(kind='bar',figsize=(5,5)); lm3= df.groupby(['sex','studytime']).count() m3 =lm3['schoolsup'] m3 m3.hist(); m3.plot(kind='bar',figsize=(5,5)); lm4= df.groupby(['sex','failures']).count() m4 =lm4['schoolsup'] m4 m4.hist(); m4.plot(kind='bar',figsize=(5,5)); lm5= df.groupby(['failures']).count() m5 =lm5['schoolsup'] m5 m5.hist(); m5.plot(kind='bar',figsize=(5,5));Feature Engineeringdf.columns df =df[['sex','age','studytime','failures','schoolsup']] df.head()Missing values treatmentdf.isnull().sum()Label encodingdf.head() df.columns sex_encoder = LabelEncoder() df['sex'] = sex_encoder.fit_transform(df['sex'].values) schoolsup_encoder = LabelEncoder() df['schoolsup'] = schoolsup_encoder.fit_transform(df['schoolsup'].values) df.head()shrinks dataX = df.iloc[:, 0:4].values y = df.iloc[:, 4].values # Splitting the dataset into the Training set and Test set from sklearn.cross_validation import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0) # Feature Scaling from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) # Fitting Logistic Regression to the Training set from sklearn.linear_model import LogisticRegression classifier = LogisticRegression(random_state = 0) classifier.fit(X_train, y_train) # Predicting the Test set results y_pred = classifier.predict(X_test) y_pred # Making the Confusion Matrix from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, y_pred) cm from sklearn.metrics import accuracy_score accuracy_score(y_test,y_pred,normalize=False) from sklearn.metrics import classification_report print(classification_report(y_test,y_pred))precision recall f1-score support 0 0.85 1.00 0.92 84 1 0.00 0.00 0.00 15 avg / total 0.72 0.85 0.78 99Importing Librariesimport pandas as pd import numpy as np import math import seaborn as sns import matplotlib.pyplot as plt from sklearn import linear_model from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import PolynomialFeatures from sklearn.pipeline import Pipeline from sklearn.preprocessing import normalize Forcast=pd.read_csv('/Users/Asus/Documents/InternCsv/Forcast_Official.csv') Forcast.head(4) #Renaming the Unnamed Column into Plants and creating and Index Forcast = Forcast.rename(columns={'Unnamed: 0': 'Plants'}) Forcast.drop('Plants',inplace=True,axis=1) #Forcast.head(2) Forcast.drop('Date',inplace=True,axis=1) Plant_Forcast=Forcast.groupby(["Plant"]) Plant_Sum=Plant_Forcast.sum() Plant_Sum["Total_Dispatch"] = Plant_Sum.sum(axis=1) Plant_Sum.head(5) Plant_Sum.shape Plant_Sum.reset_index(level=0,inplace=True) Plant_Sum.head(3) Plant_Sum.shapeDefine X & Yx=Plant_Sum.iloc[:,1:48].values y=Plant_Sum.iloc[:,49].values x ySplit the dataset in training Set & test Setfrom sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=0) from sklearn.linear_model import LinearRegression ml=LinearRegression() ml.fit(x_train,y_train) reg= linear_model.LinearRegression() reg.fit(Plant_Sum.iloc[:,1:48],Plant_Sum['Total_Dispatch']) reg.coef_ reg.intercept_Evaluate Modely_pred=ml.predict(x_test) print(y_pred) from sklearn.metrics import r2_score r2_score (y_test,y_pred) #High Accurcay Rate can Be Gain From the ModelPlotting Predicting Model with Actual Modelimport matplotlib.pyplot as plt plt.figure(figsize=(8,5)) plt.scatter(y_test,y_pred) myline = np.linspace(0, 10, 100) #plt.plot(myline, my_model(myline), color ="r") plt.xlabel('Actual') plt.ylabel('Predicted') plt.title('Actual Vs. Predicted') #With The Accuracy is Very High Prediction and Actual Values are Almost SimilarModel Value Differancespred_y_df=pd.DataFrame({'Actual Value':y_test,'Predicted value':y_pred,'Differance':y_test-y_pred}) pred_y_df[0:20]Data Normalization in Plant_SumPlant_Sum = normalize(Plant_Sum.iloc[:,1:49] , axis=0) #Without using normalization first dataset importing mechanism can be used Plant_Sum from numpy import genfromtxt x1=Plant_Sum[1:,0:47] y1=Plant_Sum[1:,47] print(x1[0:10]) print(y1[0:10])[[3.54599849e-03 1.77120693e-03 1.18516873e-03 1.78255753e-03 1.19111467e-03 1.19434565e-03 1.19293263e-03 1.19163354e-03 1.18041800e-03 3.57079806e-03 5.68084424e-03 2.34971631e-02 2.76950084e-02 2.94150238e-02 2.73161014e-02 2.98801636e-02 3.03769989e-02 3.04999714e-02 3.05094914e-02 3.04325658e-02 3.04203222e-02 3.02994814e-02 3.01441029e-02 3.00852042e-02 3.01618750e-02 2.97668789e-02 3.04800946e-02 3.05012717e-02 3.04414533e-02 3.03297885e-02 3.03414581e-02 3.02666662e-02 3.02185715e-02 3.02356261e-02 3.02827483e-02 3.02403799e-02 2.97521541e-02 2.98137412e-02 3.11649169e-02 3.10227730e-02 3.04612171e-02 3.09093448e-02 2.97032803e-02 2.95786224e-02 2.72147585e-02 2.18781789e-02 1.59059563e-02] [9.92657952e-06 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00 6.99320331e-04 6.93404916e-04 6.94007540e-04 6.94336521e-04 6.90055533e-04 6.88558530e-04 6.832[...]Analyzing Training Curve in Plant_Sumdef gradient(x1,y1,alpha,epoch): m=x1.shape[0] #number of samples ones = np.ones((m,1)) x1=np.concatenate((ones,x1),axis=1) n=x1.shape[1] Theta = np.ones(n) #n = 5th parameter h = np.dot(x1, Theta) #Compute Hypothesis #Gradient descent Algorithm cost = np.ones(epoch) for i in range (0,epoch): Theta[0] = Theta[0] - (alpha/ x1.shape[0]) * sum(h-y1) for j in range(1,n): Theta[j] = Theta[j] - (alpha/ x1.shape[0]) * sum((h-y1) * x1[:, j]) h = np.dot(x1,Theta) cost[i] = 1/(2*m) * sum(np.square(h-y1)) #compute Cost return cost, Theta #Calcualting Theta & Cost cost, Theta = gradient(x1, y1, 0.005, 2000) print(Theta)[-4.27037795e-02 -2.78030322e-03 -7.01235062e-03 -9.14424421e-03 -1.07766942e-02 -1.07528665e-02 -1.10711805e-02 -1.24159744e-02 -1.28940119e-02 -1.23870492e-02 -1.00236121e-02 -4.88920098e-06 1.06622033e-02 1.83829834e-02 1.66703424e-02 1.19365529e-02 1.46054517e-02 2.09330776e-02 2.67902574e-02 2.96023916e-02 3.11021513e-02 3.10149870e-02 3.56007260e-02 4.07416058e-02 4.26516840e-02 4.04776422e-02 3.45633220e-02 2.86759426e-02 2.75963285e-02 3.02790451e-02 3.41207374e-02 3.30892621e-02 3.60989580e-02 3.78669990e-02 3.68915210e-02 3.25895204e-02 3.03460690e-02 4.14642363e-02 7.86035263e-02 1.02159757e-01 9.08501502e-02 7.07413664e-02 5.58070163e-02 4.01070879e-02 2.71471409e-02 1.51860597e-02 8.29495855e-03 1.49291187e-03]Training Data Cost in Each Plants in the Forcast Tableplt.plot(cost) plt.xlabel('Number of Iterations (epoch)') plt.ylabel("Cost or Lost") plt.show print("Lowest Cost ="+ str(np.min(cost))) #Computation Cost Of Data Training #print("Cost after 10 iterations =" + str(cost[-1]))Lowest Cost =0.0005811410646404037Calculating meanfor k, d in data.items(): print(k) print(f"x={np.mean(d.x):.2f}, y={np.mean(d.y):.2f}")Calculatingfor k, d in data.items(): print(k) print(f"x={np.var(d.x):.3f}, y={np.var(d.y):.3f}") for k, d in data.items(): print(k) print(f"Pearrson cov = {np.corrcoef(d.x, d.y)}")Linear Regressionfrom scipy import stats for k, d in data.items(): slope, intercept, r_value, p_value, std_err = stats.linregress(d.x, d.y) print(k) print(f"{slope=:.2f}, {intercept=:.2f}") fig, axs = plt.subplots(2, 2, figsize=(10, 10)) cols = ["b", "r", "g", "y"] for (k, d), ax, col in zip(data.items(), axs.flatten(), cols): ax.scatter(d.x, d.y, color=col) slope, intercept, r_value, p_value, std_err = stats.linregress(d.x, d.y) x = np.linspace(np.min(d.x), np.max(d.x)) y = slope * x + intercept ax.plot(x, y, "black") ax.set_title(k) for ax in axs.flatten(): ax.set_xlabel("x") ax.set_ylabel("y") plt.tight_layout()Part 1.2*Excercise 1.2:* Questions for the lecture* What is the difference between *data* and *metadata*? How does that relate to the GPS tracks-example? * Data is concrete, metadata is data about the data* Sune says that the human eye is a great tool for data analysis. Do you agree? Explain why/why not. Mention something that the human eye is very good at. Can you think of something that [is difficult for the human eye](http://cdn.ebaumsworld.com/mediaFiles/picture/718392/84732652.jpg). Explain why your example is difficult. * I agree, its real good. Good at spotting patterns, can be confused.* Simpson's paradox is hard to explain. Come up with your own example - or find one on line. * The schoolsystem in America* In your own words, explain the differnece between *exploratory* and *explanatory* data analysis. * Explore the data or explain the datadf = pd.read_csv("../data/Police_Department_Incident_Reports__Historical_2003_to_May_2018.csv") focuscrimes = set(['WEAPON LAWS', 'PROSTITUTION', 'DRIVING UNDER THE INFLUENCE', 'ROBBERY', 'BURGLARY', 'ASSAULT', 'DRUNKENNESS', 'DRUG/NARCOTIC', 'TRESPASS', 'LARCENY/THEFT', 'VANDALISM', 'VEHICLE THEFT', 'STOLEN PROPERTY', 'DISORDERLY CONDUCT']) df["DayName"] = pd.to_datetime(df.Date).dt.day_name() df["WeekIdx"] = pd.to_datetime(df.Date).dt.weekday df["Month"] = pd.to_datetime(df.Date).dt.month df["Hour"] = pd.to_datetime(df.Time).dt.hour fig, axs = plt.subplots(7, 2, figsize=(10, 20)) for (idx, group), ax in zip(df[df.Category.isin(focuscrimes)].groupby("Category"), axs.flatten()): group[["WeekIdx", "DayName"]]\ .value_counts()\ .sort_index()\ .plot.bar(ax=ax, title=group.Category.values[0]) plt.tight_layout() fig, axs = plt.subplots(7, 2, figsize=(10, 20)) for (idx, group), ax in zip(df[df.Category.isin(focuscrimes)].groupby("Category"), axs.flatten()): val_counts = group[["Month"]]\ .value_counts()\ .sort_index()\ .plot.bar(ax=ax, title=group.Category.values[0]) plt.tight_layout() fig, axs = plt.subplots(7, 2, figsize=(10, 20)) for (idx, group), ax in zip(df[df.Category.isin(focuscrimes)].groupby("Category"), axs.flatten()): val_counts = group[["Hour"]]\ .value_counts()\ .sort_index()\ .plot.bar(ax=ax, title=group.Category.values[0]) plt.tight_layout() fig, axs = plt.subplots(7, 2, figsize=(10, 20)) for (_, group), ax in zip(df[df.Category.isin(focuscrimes)].groupby("Category"), axs.flatten()): val_counts = group[["WeekIdx", "Hour"]]\ .value_counts()\ .sort_index()\ .plot.bar(ax=ax, title=group.Category.values[0]) ax.set_xticks([x for x in range(0, 169, 12)]) ax.set_xticklabels([f"{x%24}" for x in range(0, 169, 12)], rotation=0) plt.tight_layout() print("10 Districts") print(df.PdDistrict.unique()) print("Number of crimes in each district") print(df.groupby("PdDistrict").size().sort_values(ascending=False)) print("Number of focus crimes") print(df[df.Category.isin(focuscrimes)].groupby("PdDistrict").size().sort_values(ascending=False)) p_crime = df.Category.value_counts(normalize=True) p_crime_districts = df.groupby("PdDistrict").Category.value_counts(normalize=True) fig, axs = plt.subplots(5, 2, figsize=(10, 30)) for (area, new_df), ax in zip(p_crime_districts.groupby(level=0), axs.flatten()): new_df.index = new_df.index.get_level_values(1) ps = (new_df / p_crime) ps[ps.index.isin(focuscrimes)].plot.bar(title=area, ax=ax) plt.tight_layout() randomdata = { 'CENTRAL': 0.8903601342256143, 'SOUTHERN': 0.8642882941363439, 'BAYVIEW': 0.925634097746596, 'MISSION': 0.7369022697287458, 'PARK': 0.9864113307070926, 'RICHMOND': 0.5422239624697017, 'INGLESIDE': 0.5754056712571605, 'TARAVAL': 0.5834730737348696, 'NORTHERN': 0.08148199528212985, 'TENDERLOIN': 0.37014287986350447} df_random = pd.DataFrame.from_dict(randomdata, orient="index").reset_index() df_random.columns = ["DISTRICT", "DATA"] # Normalizing between 0 and 12 df_random.DATA = df_random.DATA.transform(lambda x: (x / x.max()) * 12) df_random import json import plotly.express as px counties = json.loads(open("../files/sfpd.geojson", "r").read()) fig = px.choropleth_mapbox(df_random, geojson=counties, locations="DISTRICT", color="DATA", color_continuous_scale="Viridis", range_color=(0, 12), mapbox_style="carto-positron", zoom=10, center = {"lat": 37.773972, "lon": -122.431297}, opacity=0.5, labels={'unemp':'unemployment rate'}) fig.show() data = pd.DataFrame(df[(df.Category == "VEHICLE THEFT") & (df.WeekIdx == 1)].PdDistrict.value_counts(normalize=True)).reset_index() data.columns = ["DISTRICT", "DATA"] data.DATA = data.DATA.transform(lambda x: (x / x.max()) * 12) counties = json.loads(open("../files/sfpd.geojson", "r").read()) fig = px.choropleth_mapbox(data, geojson=counties, locations="DISTRICT", color="DATA", color_continuous_scale="Viridis", range_color=(0, 12), mapbox_style="carto-positron", zoom=10, center = {"lat": 37.773972, "lon": -122.431297}, opacity=0.5, labels={'unemp':'unemployment rate'}) fig.show()Distributions%matplotlib inline import seaborn as sns import matplotlib.pyplot as plt from scipy.stats import uniform, norm, bernoulli, binom, poisson, beta, gamma, nbinom, lognorm, geom, t, exponDiscrete data* meaning: clear spaces between values* nature: countable* values: can take only distinct and separate values* graphical representation: bar graph* classification: mutually inclusive* function graph: shows isolated points* example: days of the weekWe use probability mass function to get the distribution Bernoulli2 discrete outcomes (tail or head for a coin), the distribution over the discrete outcomes is 0 head, 1 tailprobability to get one over the other doesn't have to be 50%There is only one trial that gives us either head or taildata = bernoulli.rvs(size=1_000_000, p=0.5) ax= sns.distplot(data, kde=False, color="lightgreen", hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Bernoulli', ylabel='Frequency') data = bernoulli.rvs(size=10_000, p=0.25) ax= sns.distplot(data, kde=False, color="lightgreen", hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Bernoulli', ylabel='Frequency')BinomialBinomial distribution is a sum of independent and evenly distributed Bernoulli trials2 possible choices (tail, head), what is the probability to have X tails ouf ot 10number_of_pick = 10 number_of_trial_repetition = 1_000_000 probability = 0.5 data = binom.rvs(n=number_of_pick, p=probability, size=number_of_trial_repetition) ax = sns.distplot(data, kde=False, color='lightgreen', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Number of successes (tail)', ylabel='Frequency')Uniformevery possible result is equally likely; that is, the probability of each occurring is the same.data = uniform.rvs(size=1_000, loc = 0, scale = 10) ax = sns.distplot(data, bins=100, kde=False, color='lightgreen', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Uniform ', ylabel='Frequency') data = uniform.rvs(size=100_000, loc = 0, scale = 10) ax = sns.distplot(data, bins=100, kde=False, color='lightgreen', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Uniform ', ylabel='Frequency') data = uniform.rvs(size=1_000_000, loc = 0, scale = 10) ax = sns.distplot(data, bins=100, kde=False, color='lightgreen', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Uniform ', ylabel='Frequency')Poissonmodels the number of times an event happened in a time interval (number of users visit of a website for a specific time frame)mu is the number of times an event happens in this intervalmu = 3 data = poisson.rvs(mu=mu, size=10_000) ax = sns.distplot(data, kde=False, color='lightgreen', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Number of cars passing in the street during the day', ylabel='Frequency')Geometriccan mean two things:* probability distribution of the number X of Bernoulli trials needed to get one success* probability distribution of the number Y = X - 1 of failures before the first successp = 0.5 data = geom.rvs(p, size=1_000_000) ax = sns.distplot(data, kde=False, color='lightgreen', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Geometric', ylabel='Frequency')Negative Binomialrandom variable with discerete outcome, related to binomial/bernoulli distributiondata = nbinom.rvs(10, 0.5, size=10000) ax = sns.distplot(data, kde=False, color='lightgreen', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Negative Binomial', ylabel='Frequency')Continous data* meaning: data that falls on a continuous sequence* nature: measurable* values: can take any value in some interval* graphical representation: histogram* classification: mutually exclusive* function graph: show connected points* example: market price of a productWe use probability density function to get the distribution. For a given range of value, the probability density function gives us a way of finding out the probability of that range occurring. Normal (Gaussian)continuous probability distribution for a real valued random variable.Bell shape centered around a mean of 0mean = 0 variance = 1 data = norm.rvs(size=1_000_000, loc=mean, scale=variance) ax = sns.distplot(data, bins=100, kde=False, color='lightgreen', hist_kws={'linewidth': 15, 'alpha': 1}) ax.set(xlabel='Normal', ylabel='Frequency')Log-Normaldata = lognorm.rvs(0.1, size=10_000_000) ax = sns.distplot(data,kde=False, bins=100, color='lightgreen', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Log Normal', ylabel='Frequency') data = lognorm.rvs(0.3, size=10_000_000) ax = sns.distplot(data,kde=False, bins=100, color='lightgreen', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Log Normal', ylabel='Frequency') data = lognorm.rvs(0.5, size=10_000_000) ax = sns.distplot(data,kde=False, bins=100, color='lightgreen', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Log Normal', ylabel='Frequency')Student's tany member of a family of continuous probability distributions that arises when estimating the mean of a normally distributed population in situations where the sample size is small and the population standard deviation is unknowndf = 5 data = t.rvs(df, size=1_000) ax = sns.distplot(data, kde=False, bins=100, color='lightgreen', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel="Student's t", ylabel='Frequency') data = t.rvs(df, size=1_000_000) ax = sns.distplot(data, kde=False, bins=100, color='lightgreen', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel="Student's t", ylabel='Frequency')Gammatwo parameter (shape and scale) family of continuous probability distributions. Exponential, chi-squared, Erlang are special cases of the gamma distributiondata = gamma.rvs(a=1, size=10_000_000) ax = sns.distplot(data, kde=False, bins=100, color='lightgreen', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Gamma', ylabel='Frequency') data = gamma.rvs(a=5, size=10_000_000) ax = sns.distplot(data, kde=False, bins=100, color='lightgreen', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Gamma', ylabel='Frequency') data = gamma.rvs(a=10, size=10_000_000) ax = sns.distplot(data, kde=False, bins=100, color='lightgreen', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Gamma', ylabel='Frequency')Betadistribution for probabilities, continuous distribution taking values from 0 to 1Defined by two parameters alpha and betaalpha_param = 1 beta_param = 1 data = beta.rvs(alpha_param, beta_param, size=10_000_000) ax = sns.distplot(data, kde=False, bins=100, color='lightgreen', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Beta(1,1)', ylabel='Frequency') alpha_param = 10 beta_param = 1 data = beta.rvs(alpha_param, beta_param, size=10_000_000) ax = sns.distplot(data, kde=False, bins=100, color='lightgreen', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Beta(1,1)', ylabel='Frequency') alpha_param = 1 beta_param = 10 data = beta.rvs(alpha_param, beta_param, size=10_000_000) ax = sns.distplot(data, kde=False, bins=100, color='lightgreen', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Beta(1,1)', ylabel='Frequency')Exponentialprobability distribution of the time between events in a Poisson point process -> a process in which events occur continuously and independently at a constant average ratedata = expon.rvs(size=1_000_000) ax = sns.distplot(data, kde=False, bins=100, color='lightgreen', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Exponential', ylabel='Frequency')**Recuerda que una vez abierto, Da clic en "Copiar en Drive", de lo contrario no podras alamancenar tu progreso**# Siempre Ejecuta esta linea de codigos # esta configura librerias y ciertas funciones # necesarias para esta sesión !wget https://raw.githubusercontent.com/jdariasl/ML_2020/master/Labs/commons/utils/general.py -O general.py from general import configure_intro configure_intro() from intro import *--2021-11-18 14:17:45-- https://raw.githubusercontent.com/jdariasl/ML_2020/master/Labs/commons/utils/general.py Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 192.168.3.11, 192.168.127.12, 192.168.127.12, ... Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|192.168.3.11|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 14632 (14K) [text/plain] Saving to: ‘general.py’ general.py 0%[ ] 0 --.-KB/s general.py 100%[===================>] 14.29K --.-KB/s in 0s 2021-11-18 14:17:45 (62.0 MB/s) - ‘general.py’ saved [14632/14632] lab configuration started installing libraries downloading files lab configuredIntrodución para los laboratorios de Machine LearningEste sección explica los laboratorios que realizarán durante el curso. 1. Los laboratorios están en Python 3.72. Son escritos usando [notebooks](https://jupyter.org)3. Estan adaptados para ser ejecutados en la herramienta [Google Colab](https://colab.research.google.com/notebooks/intro.ipynbscrollTo=5fCEDCU_qrC0).En este notebook veremos la siguiente información que sera nuestra base para todas nuestras sesiones.1. Introducción a [Google Colab](https://colab.research.google.com/notebooks/intro.ipynbscrollTo=5fCEDCU_qrC0) como usarlo para el desarrollo de las practicas2. Conceptos rapidos de Python 1. Basicos y más importantes 2. Profundización (Lectura y pratica)2. Manejo de vectores y matrices en NumPy 1. Basicos y más importantes 2. Profundización (Lectura y pratica)3. Manejo de estructuras de datos en pandas 1. Basicos y más importantes 2. Profundización (Lectura y pratica)4. Manejo de gráficas en matplotlib y pandas 1. Basicos y más importantes 2. Profundización (Lectura y pratica)5. Estructuras de los laboratoriosEn cada una de las secciones pueden encontrar diferentes ejercicios para ir practicando y afianzado los conceptos presentados. Google Colab y Jupyter NotebooksUn Jupyter notebook es un proyecto open source que permite ejecutar interactivamente varios lenjuages de programación. Su nombre es un referencia a los lenguajes que fueron principales en el inicio el proyecto: JUlia, PYThon y R. Pero en la actulidad se han expandido a muchos otras más. Este proyecto también se conocio antes bajo el nombre de **IPython Notebooks**. Los notebooks, en su formato "crudo" son un archivo [JSON](https://en.wikipedia.org/wiki/JSON) que es renderizado para permitir combinar tanto codigo, texto (Usando [Markdown](https://en.wikipedia.org/wiki/Markdown)), lenguaje matemático y graficas.Los notebooks pueden ser ejecutados en diferentes entornos. Estos entornos pueden ser locales (requieren instalación y configuración) o en la nube por medio de un navegador moderno (no requiere ninguna configuración).- Distribuición de [Anaconda](https://www.anaconda.com/). (Recomendado para Windows y macOs)- Administrador de paquetes pip. (Recomendado para Linux) [Tutorial](https://www.digitalocean.com/community/tutorials/how-to-set-up-jupyter-notebook-with-python-3-on-ubuntu-18-04)- Contenedor en docker- [Google Colab](https://colab.research.google.com/notebooks/intro.ipynbscrollTo=5fCEDCU_qrC0)- [Binder](https://mybinder.org/)Como se ha mencionado, este notebook y el resto de nuestros sesiones están adaptados para usar Google Colab, pero con un muy pequeño esfuerzo también pueden ser adaptados para ser ejecutados en cualquier entorno mencionado. **Esto ultimo no es recomendado y no es objetivo del curso**, y no se podrán revisar laboratorios que no sigan las instrucciones detalladas en la ultima sección de este documento.En la industria, los jupyter notebooks son un herramienta altamente adoptada y se considera un "estandar" para el desarrollo, documentación y comunicación de resultados de investigación en trabajos de ciencia de datos. Sin embargo también ha logrado una buena pouplaridad en otros entornos. inlcusive, se ha discuito que [pueden ser una buena alternativa para susbtituir el formato estandar del paper cientifico](https://www.theatlantic.com/science/archive/2018/04/the-scientific-paper-is-obsolete/556676/). De la misma manera los jupyter notebook son la base para productos comerciales de los principales proveedores de computación en la nube como lo son:- [AI Platform](https://cloud.google.com/ai-platform/?hl=es-419) De Google Computing Platform- [Amazon SageMaker](https://aws.amazon.com/es/sagemaker/) de Amazon Web Services- [Azure Notebooks](https://notebooks.azure.com/) de Microsoft Azure- Y otros servicios usados en entornos de Big Data como son [Databricks](https://databricks.com/), [Cloudera](https://www.cloudera.com/products/data-science-and-engineering/data-science-workbench.html) y [Apache Zepelin](https://zeppelin.apache.org/).Sin embargo es totalmente vital, aclarar que los notebooks son un entorno para exploración interactiva y presentar resultados que sean reproducibles. No es recomendado su uso para tareas de software engineering "más puras" (codigo de una aplicación,API, codigo de un sistema productivo, etc). Cada dia cobra mas fuerza la siguiente idea: **los notebooks son usados en las primeras etapas de desarollo de una aplicación de ML. Pero cuando el modelo cada vez esta más cerca a un entorno "productivo", el codigo de un notebook debe ser refactorizado a un codigo que sea mas sencillo de mantener y administrar** ([En este video se hace una discusión interesante y otras críticas](https://www.youtube.com/watch?v=7jiPeIFXb6U)). En las últimas sesiones de nuestro laboratorio, realizaremos una práctica donde ahondaremos un poco más en este tema. Manejo de Google ColabColaboratory, o Colab, permite escribir y ejecutar código de Python en un navegador, con las siguentes ventajas:- Sin configuración requerida (Python, Jupyter)- Acceso gratuito a GPU- Facilidad para compartir y desarrollar los notebooks de manera interactivaMira [este video introductorio sobre Colab](https://www.youtube.com/watch?v=inN8seMm7UI), para entender como Colab nos ayuda agilizar algunas tareas. El icono de *launch* en la parte superior derecha permite lanzar el notebook directamente Colab.En Colab, existen dos tipos de celdas:> Las celdas de texto: estan escrita con *Markdown*, un lenguaje de etiquetado más legible para la decoración de texto, se puede hacer desde encabezados, usar [LaTeX](http://www.latex-project.org/), símbolos matemáticos, listas enumeradas, entre otros. > Las celdas de código: son ejecutables, es decir, se pueden correr individualmente.Recuerda que una vez abierto, Debes dar clic en "Copiar en Drive". De lo contrario no podras alamancenar tu progreso y ni compartirlo con tu profesor. Como se menciono, cada celda de codigo se ejecuta por separado. Esto implica que **la ejecución se realiza de arriba hacia abajo en orden**. Es así como evitamos problemas de importación de librerías o variables sin definir.Por ejemplo, esta es una celda de código con una secuencia de comandos Python corta que calcula un valor, lo almacena en una variable y devuelve el resultado:(A fin de ejecutar el código en la celda anterior, haz clic en él para seleccionarlo y, luego, presiona el botón de reproducción ubicado a la izquierda del código o usa la combinación de teclas "Command/Ctrl + Intro". )seconds_in_a_day = 24 * 60 * 60 seconds_in_a_dayPara editar el código, solo haz clic en la celda y comienza a editar. Las variables que defines en una celda pueden usarse en otras:seconds_in_a_week = 7 * seconds_in_a_day seconds_in_a_week**Ahora es tu turno**Haz click en la opción "+ Texto" (en la parte de arriba o al final de la celda, tambien te debe aparecer si dirijes el puntero en la parte inferior de la celda). Añade una celda de texto y prueba escribir algo.Luego, añade una celda de código en la opción " + Codigo" (en la parte de arriba o al final de la celda, tambien te debe aparecer si dirijes el puntero en la parte inferior de la celda) y escribe las siguientes lineas y ejecutalas. ``` pythonzero_to_nine = range(0,10)for n in zero_to_nine: print(n)```¿cual es la salida? Tambien es de utilidad conocer los shorcuts o accesos rapidos de colab, explora que combinaciones de teclas te pueden servir para agilizar el trabajo en Colab.![colabshortcuts](https://miro.medium.com/max/625/1*BmBIeAJrSl47_1j3TlxQUQ.png) Conceptos rapidos de Python (Crash Course!)En esta sección se hace una presentación de conceptos utiles de Python, si bien para el desarrollo del curso no es necesario y no se busca que las practicas esten enfocadas a desarrollar habilidades especificas de Python, si es de utilidad tener claras algunas capacidades de Python. Durante nuestra sesión# Tuplas tup = (1, "hola", 3) print (tup[0]) #tup[1] = 2 #Esto es un error # Listas lista = [2, 3, 2.5, "Hola"] print (lista[2]) lista[2] = "nuevo" print (lista[2]) # Diccionarios dic = {"clave": "valor", "1": 324, "2": "Hola"} print (dic["clave"]) #Conjuntos conjunto = {1, 3, "hola"} print(conjunto) ## Estrucutra if else age=17 if age>18: print("you can enter" ) elif age==18: print("go see Pink Floyd") else: print("go see Black pink / BTS" ) print("move on") dates = [1982,1980,1973] N=len(dates) # iterar en el indice for i in range(N): print(dates[i]) # iterar en los elementos for i in dates: print(i) # usar enumerate for n,i in enumerate (dates): print(n, i) # iterar en dos listas de igual tamaño a = [1, 2, 3] b = ["one", "two", "three"] for num, letra in zip(a,b): print(num,letra) ## dates = [1982,1980,1973,2000] i=0; year=0 while(year!=1973): year=dates[i] i=i+1 print(year) print("it took ", i ,"repetitions to get out of loop")Manejo vectores y matrices en NumPyNumPy es un paquete que proporciona herramientas y técnicas para manipular estructuras de datos con matrices, es mucho mejor que las listas de Python, tiene acceso y escritura más rápida. Posee una amplia colección de herramientas y técnicas que se pueden utilizar para resolver problemas mátematicos, además de que contiene todas las funciones para trabajar con matrices.A continuación encontrá algunas funciones muy útiles para los laboratorios, tales como:- Creación de matrices- Suma y resta de vectores- Producto de dos vectores- Producto de dos matrices- Multiplicación matricial- Indexación de matrices Creación de matrices El paquete NumPy introdujo los arrays N-dimensionales, acontinuación se mostrará las rutinas más utilizadas en los laboratorios. (Más rutinas [aquí](https://docs.scipy.org/doc/numpy/reference/routines.array-creation.html))#Creacion de array de ceros y unos zeros=np.zeros(10) print("Array de 10 ceros:", zeros) ones=np.ones(10) print("Array de 10 unos:", ones) array1 = np.arange(5) # Array de 5 enteros contando el 0 print("Array de 5 enteros: ",array1) line = np.linspace(0.0, 1.0, 5) #start, stop, num de puntos print("Array organizados unif.: ",line) v1 = np.array([8,6,-5,76,9.7]) #Array de una lista print("Array de una lista: ",v1) ## Ejercicio Crear array de ceros con dimension 3x2** ### HINT: ¿como es el parametro a np.zeros? # Inicializar un vector manualmente de 1D #Array de una lista v1 = np.array([3,-1,2]) v2= np.array([2,-1,3]) print("v1: ",v1) print("v2: ",v2) # Suma y resta de vectores #(para la resta usamos -) suma1 = v1 + v2 print(suma1) # producto elemento a elemento prod1 = v1*v2 print ("Producto elemeto a elemento",prod1) #Producto matricial prod1 = np.dot(v1,v2) print ("Producto matricial",prod1) prod2 = v1 @ v2 print("Producto matricial",prod2)Escribe el codigo de Python usando numpy, para realizar la siguiente operacion, siendo $\odot$ la representación de la multiplicación elemento a elmento, y $\cdot$ el producto matricial.$r = (v_{1}\odot v_{2}) + v_{1} - v_{2} + (v1 \cdot v2) $Siendo v1 y v2 los vectores v1 y v2 ejecutadas en la celda anterior. Finalmente imprime el resultado `r`. En este ejercicio tambien vas observar en funcionamiento un concepto de *numpy* llamado [Broadcasting](https://machinelearningmastery.com/broadcasting-with-numpy-arrays/).## Ejercicio: Escribe aca la operación ### El resultado debe ser [20, 14, 18] r = print (r) # Indexar una matriz manualmente 2x3 # la entrada en una lista de lista. # cada lista es un renglon de la matriz # cada renglon debe tener las mismas columnas # (el mismo tamaño de lista) m1 = np.array([[1,2,3],[0.5,0.3,2.1]]) print (m1)¿Cuántas filas y columnas tiene la variable m1?print("Shape m1", ) m2 = np.array([[1,2],[2,1],[3,1]]) # [3x2] print ("dimensiones de m2",np.shape(m2)) print(m2) #Producto de dos matrices print ("Productor de dos matrices \n",np.dot(m1,m2)) print("Producto de dos matrices @\n",m1@m2) #Producto elemento a elemento m3 = np.array([[1,2],[2,1]]) # [3x2] print ("ls dimensiones es de m3", np.shape(m3)) print(m3*m3)¿Por qué el siguiente código produce error?print (m1*m2)Se debe organizar las dimensiones de la matriz para hacer la multiplicación elemento a elemento correctamente. ¿Cómo se puede solucionar?# usando la transpuesta! m1_new = m1.T print("New shape m1", m1_new.shape) print(m1_new*m2) # pero también podria hacer re-shape? m1_new2 = m1.reshape((3,2)) print("New shape m1", m1_new2.shape) print(m1_new2*m2) m1_new2 = m1.reshape((3,-1)) print("New shape m1", m1_new2.shape) print(m1_new2*m2)¿Hay alguna diferencia con la matriz m1 después de hacer el reshape, los resultados son diferentes?_Reshape_: Asigna una nueva forma a la matriz ya sea un entero para una dimensión o una tupla para N-Dimensionesa = np.arange(6).reshape((3, 2)) a b = np.arange(6).reshape((3,1,2)) b b = b.reshape((6)) b¡Cuidado con la asignación de variables! Python usa variables referenciadas. Si se requiere hacer una copia se debe usar el método ".copy".# si se usa = se crea una referencia print("m1 antes de hacer la referencia \n", m1) m4 = m1 m4[0,1] = 9 print ("M1 se modifica aunque no se 'hizo' ninguna operacion a esta variable ""m1\n",m1) print ("Ahora con el metodo copy") print("m1 antes de hacer el copy \n", m1) m5 = np.copy(m1) m5[1,1] = 9 print ("m5 es m1 con el valor modificado \n",m5) print ("Pero m1 esta vez no es modificado") print ("m1 \n",m1)IndexaciónPara hacer una indexación en matrices se accede con los corchetes y siempre se define así: [filas,columnas]. Observa que la separación se realiza con una coma ( , ) x = matrix[filas, columnas] Si deseo escoger entre la fila x hasta y debe ser separado por dos puntos **:**, de la siguiente forma: x = matrix[x:y,] De la misma forma aplica para las columnas, si quiero la primera columna: x = matrix[0:2,0:1] Acceder a los últimas posiciones: x = matrix[0:-1,0:-1]print("Original m2") print(m2) print("----") new_m2 = m2[0:2,] print("Nuevo m2, dos primeras filas") print(new_m2) new_m2 = m2[0:2,0:1] print("Nuevo m2, dos primeras filas y primera columna") print(new_m2) m4 = np.arange(5,25).reshape((10,2)) m4 ## Ejercicio: Obtener las últimas dos filas del anterior vector ## Hint: debe retonar [[21, 22], [23, 24]] last_m2 = m4[] last_m2Concatenar vectores y matricesa = np.array([[1,5,9], [2,6,10]]) b = np.array([[3,7,11], [4,8,12]]) print("a:\n ", a, "\n b: \n", b) np.concatenate((a,b), axis=0) np.concatenate((a, b), axis=1) np.vstack((a, b)) ## Algunas veces es util, obtener una lista ## de los elementos que están la matriz ## si bien el reshape puede ser usado ## np.ravel() puede ser usado para ese objetivo a = np.array([[1,1, 1], [2,2,2], [3,3,3]]) print("antes del ravel \n ", a, "\n usando ravel \n ", np.ravel(a))Otras funcionalidades de Numpy## Calculo del promedio a = np.array(range(5)) print("vector", a, "promedio", np.mean(a)) print("vector", a, "promedio con axis = 1, es equivalante al anterior", np.mean(a, axis = 0)) ## el parametro axis es mas util en una matriz a = np.array(range(6)).reshape((3,2)) print("vector \n", a, "\n promedio con axis = 0\n", np.mean(a, axis = 0)) print("vector \n ", a, "\n promedio con axis = 1\n " , np.mean(a, axis = 1)) ## equivalente con min, max y sum print ("extraer la suma: ", a.sum(), "o de esta manera", np.sum(a)) print ("igualmente el parametro axis es util, con axis = 0 \n", np.sum(a, axis = 0), "\n y axis = 1 \n ", np.sum(a, axis = 1)) # generar un valores aleatorio x = np.random.choice(2,size = 10) print(x) x = np.random.choice(10,size = 10) print(x) ## extrar los valores unicos y contar la frequencia de ## de estos mismos uniq = np.unique(x) print("devuelve los valores unicos", uniq) uniq = np.unique(x, return_counts=True) print("y con esta opciones devuelve una tupla \n ", uniq, "\nel primer elemento de la tupla son los valores", uniq[0], "\n el segundo elemento el contador \n", uniq[1]) ## Ejercicio: explorar la funcion de estos ## y que parametros pueden recibir ## metodos #ones : np.ones() #diag: np.diag() #linalg.inv #Inversa de matriz: np.linalg.inv() #linalg.svd #Descomposición en valores singulares: np.linalg.svd() #logical_and #Elemento a elmento: np.logical_and() #logical_or #Elemento a elelemto: np.logical_or()Recordar que el uso de numpy es preferido para la implementación, de los algoritmos, ya que al usar la libreria, los codigos son mas efecientes y se logran los beneficios de la **vectorización**, por ejemplo, al realizar la siguiente operación, es posible realizarla con ciclos **for**. pero al usar la representación matricial y las librerias de numpy tenemos algunos beneficios.$m = a \cdot b $print("shape de a", matriz_a_for.shape) print("shape de b", matriz_b_for.shape)Usando esta imagen entendemos, un poco mas la operación![vectorization](https://github.com/jdariasl/ML_2020/blob/master/Labs/commons/images/vectorization.png?raw=1)# implementacion con numpy import time results = [] for n in range(10): s = time.time() m = np.dot(matriz_a_for, matriz_b_for.T) results.append(time.time() - s) print("time en ms", np.mean(results)*1000, "\n results \n ",m) ## Implementacion la operacion mediante ciclos for m = np.zeros(shape = (10,1)) results = [] for n in range(10): s = time.time() for i in range(m.shape[0]): aa = np.sum(matriz_a_for[i, :]*matriz_b_for) m[i,0] = aa results.append(time.time() - s) print("time en ms", np.mean(results)*1000, "\n results \n ",m)Con ciclos **for** tenemos una implementación que es casi 200 % más lenta que la implementación "vectorizada". Manejo de estructuras de datos en Pandas# en la configuracion inical se hace import pandas as pd # lectura de un csv datos = pd.read_csv("bank.csv", sep = ";") # esto crea un pandas dataframe # Pandas es una libreria muy popular para el manejo de datos # y se integra con los notebooks de manera muy sencilla datos.head() # sample para explorar los datos de manera rapida datos.sample() # y se puede interpretar como una matriz de numpy: print ("puedes ver los datos de shape ", datos.shape) # pero para acceder a ellos es más similar a una tabla print("en forma de renglon 10 todas las columnas, pero retorna una serie") datos.loc[9, :] print("en forma de renglon 10 todas las columnas, pero retorna una df una tabla") datos.loc[[9], :] # puedes tambien filtrar columnas y mostrar multiples df con display print("primer filtro") display(datos.loc[[9], ['age', 'job', 'marital']]) print("segundo filtro") # mostrar los rengloes de 10 al 15 display(datos.loc[range(9,15), ['age', 'job', 'marital']]) # sin embargo esta soporta mas tipos de datos, pero se pueden seguir haciendo operaciones # similares con las columnas numericas # y puedes referenciar las columnas datos['day'] + datos['pdays']Vamos aprovechar tambien los df, para hacer tablas para nuesros experimentos. Podemos ir agregando resultados a la tabla# creacion de una tabla # con resultados errores = [0.1, 0.2,0.3, 0.01] parametros = [1,2,3,5] # se crea df vacio results = pd.DataFrame() i = 0 for e,p in zip (errores, parametros): results.loc[i, "parametro"] = p results.loc[i, "error"] = e results.loc[i, "tipo"] = "entrenamiento" i+=1 for e,p in zip (errores, parametros): results.loc[i, "parametro"] = p results.loc[i, "error"] = e results.loc[i, "tipo"] = "validacion" i+=1 resultsManejo de gráficas en matplotlib y pandas Ahora utilizaremos la librería matplotlib para hacer algunos ejemplos de gráficas básicas:$$ y = x $$$$ y = x^2 $$$$ y = x^3 $$Observemos que la variable x solo se carga en la primera celda de código. En adelante se puede usar sin necesidad de ser cargada de nuevo. En la gráfica de $y=x^3$ incluimos el título de la gráfica y los label para los ejes de la misma.#x = np.array([-2,-1,0,1,2]) x = np.linspace(-10,10,100) #Función y = ax + b y = 0.5*x plt.plot(x,y) plt.ylim(0,10) plt.xlim(0,10) plt.ylabel('y = x') plt.xlabel('x') plt.title(u"Gráfica de una función lineal\n") plt.show() y = x**2 plt.plot(x,y, c='green') #plt.xlim(-150,150) plt.show() y = x**3 plt.plot(y) plt.ylabel(r'$y = x^3$') plt.xlabel(u'x - 100 números entre -10 y 10') plt.title(u'Ejemplos de introducción para el curso de Simulación de Sistemas - UdeA\n') plt.show()Generando gráfica punteaday = np.sin(x) plt.plot(y, 'b-') plt.ylabel('y = seno(x)') plt.xlabel(u'x - 100 números entre -10 y 10') plt.title(r'$s(t) = \mathcal{A}\sin(2 \omega t)$', fontsize=16, color='r') plt.show()Dos gráficas en el mismo planoy2 = x y = np.sin(x) y1 = np.cos(x) plt.plot(y, 'r--', y1, 'b-') plt.ylabel('y = seno(x)') plt.xlabel(u'x - 100 números entre -10 y 10') plt.title(u'Ejemplos de introducción para el curso de Simulación de Sistemas - UdeA\n') plt.show()Agregando el legend al gráficoy = np.sin(x) y1 = np.cos(x) plt.plot(y, 'r-', label='Seno') plt.plot(y1, 'b-', label='Coseno') plt.ylabel('y = seno(x)') plt.xlabel(u'x - 100 números entre -10 y 10') plt.title(u'Ejemplos de introducción para el curso de Simulación de Sistemas - UdeA\n') plt.legend() plt.show()Graficas de barrasvalues = np.array([1, 10, 100]) ind = np.arange(3) plt.figure(1, figsize=(9, 3)) plt.subplot(131) plt.bar(ind+1, values*np.random.rand(3)) plt.subplot(132) plt.bar(ind+1, np.flipud(values)) plt.subplot(133) plt.bar(ind+1, values) plt.suptitle(u'Ejemplos - Simulación de Sistemas y Lab.') plt.show()Gráficos para problemas de clasificación en Machine learning. Scatter plots#Creamos los datos artificiales mu1, sigma1 = 1.5, 0.1 mu2, sigma2 = 1, 0.1 N = 100 x1 = mu1 + sigma1 * np.random.randn(N) x1line = np.linspace(0,2,N) x2 = mu2 + sigma2 * np.random.randn(N) x2line = np.linspace(3,5,N) #Decision boundary b = -2 m = 3.5 v = np.linspace(0,100,100) t = v*m + b plt.scatter(x1, x1line, c='b') plt.scatter(x2, x2line, c='r') plt.plot(v, t, 'c-') plt.ylim(-0.5, 6, 1) plt.xlim(0.5, 1.8, 0.2) plt.show()También representar gráficas para visualizar relaciones matemáticas útiles$P(x; \mu, \sigma) = \frac{1}{\sqrt{2\pi}\sigma }exp-\frac{1}{2}\frac{(x-\mu)^2}{\sigma^2}$mu, sigma = 0, 2 #Generamos un conjunto de valores x x = np.linspace(-3,3,100) #los llevamos a la función de densidad de probabilidad normal p = ( 1/(np.sqrt(2*np.pi)*sigma) ) * np.exp( (-1/2) * ( ((x - mu) ** 2) / (sigma ** 2) ) ) plt.plot(x, p, 'r--') plt.show() p = np.random.randn(1000) plt.hist(p, color = 'b') plt.show()De igual manera, si tenemos un pandas dataframe, se puede simplificar nuestros codigos, y podemos hacer plots con menos lineas de codigo.datos.plot() results.plot.bar(x ='tipo', y='error') results.groupby(['tipo'])['error'].mean().plot.bar()Ejercicio![Explora un poco más la librería de pandas](https://pandas.pydata.org/)[Explora esta librería de visualizacion que se integra con pandas](https://seaborn.pydata.org/) Estructura de los laboratorios1. Contextualización del problema y entedimiento de los datosSe busca que tenga un contexto del problema que se está abordando, de modo que les ayude a resolver los ejercicios. Es de suma importancia entender de fondo si es un problema de clasificación o regresión, y en cuanto a los datos con los que trabajaremos, el número de clases, muestras y carácteristicas, esto le ayudará para entender los resultados una vez complete los algoritmos y para hacer las gráficas, entre otros. 2. Completar el códigoHacer la implemetación del algoritmo del modelo. Son modelos que se explicaron en la teoría vista en clase, una vez comprenda su funcionamiento se completa ya sea el algoritmo o la implementación correcta de las librerías usando los recursos que ya tiene el laboratorio.3. EntrenamientoEn este punto debe hacer uso de las funciones escritas en el punto anterior para realizar el proceso de modelamiento y simulación de los datos de cargados en el punto 14. ResultadosCompletar la tabla de resultados y derivar conclusiones de estos mismos.En cada uno de esta sesiones, vas encontrar ejercicios de codigo similares al siguiente:#La celda comenzara con este aviso: ## Ejercicio de Codigo ## # Se te requerira completar una función cuyas especificaciones # y desables se explicán, en la documentacion de la función # puedes agregar celdas de codigo para y verificando y testeando lo que necesites # cuando estes seguro comienza a escribir tu codigo # dentro de la función # NO MODIFIQUES el nombre de la funcion def mult_matrices (matriz_a, matriz_b): """Esta funcion debera devolver la multiplicacion de matrices entre las dos matrices entrantes matriz_a, matriz_b: matrices en numpy retorna: debe retornar el resultado de multiplicar las dos entradas """ # Aqui comienza a completar tu codigo #res = np.dot(matriz_a, matriz_b, axis = 1) #res = matriz_a + matriz_b res = np.dot(matriz_a, matriz_b) # debes retornar siempre lo requerido return(res) # seguido de la celda siempre encontraras el codigo para testear si tu implementacion fue correcta ## Es muy importante que esta celda de codigo NO la modifiques GRADER_INTRO_LAB.run_test("ejercicio1", mult_matrices) ## tambien vas a encontrar ejercicios similares a este # donde tu funcion debe ejecutar el codigo del modelo ## y retornar el modelo entrenado ## Ejercicio de Codigo ## def train_model(Xtrain, ytrain, param): """ esta función debe entrenar un modelo de regression pero solo inicializando aleatorimente train_data: a matriz numpy con las muestras de entrenamiento train_labels: a matriz numpy con labels de entrenamiento param: este parametro es un dummy no debes hacer nada! retorna: la matriz W inicializada, y el error de entrenamiento """ W = np.random.rand((Xtrain.shape[1])) # np.ones((Xtrain.shape[1])) # np.ones((Xtrain.shape[1])) error = np.mean(np.dot(Xtrain, W) - ytrain) return (W,error) #return (None) ## la funcion que prueba tu implementacion GRADER_INTRO_LAB.run_test("ejercicio2", train_model) # y con las funciones debes usarla para completar los experimentos y llenar el dataframe # debes variar el parametro de 0 a 5 ## Ejercicio de Codigo ## def experimentar (Xtest, ytest, params): """Esta función debe realizar los experimentos, de manera programatica.} Debe devolver un datarame con los errrores por cada parametro. Xtest: matriz numpy con los valores del conjunto para test ytest: los valores de etiqueta reales retorna: un dataframe con dos columnas: el valor del parametro y el valor del error """ #params = range(5) resultados = pd.DataFrame() for i, param in enumerate (params): W, err = train_model(Xtest, ytest, param) resultados.loc[i,'param'] = param resultados.loc[i,'err'] = err return (resultados) ## la funcion que prueba tu implementacion GRADER_INTRO_LAB.run_test("ejercicio3", experimentar)TEST EXITOSO!De igual manera, en cada notebook van a encontrar **preguntas abiertas**, Que deberán ser respondidas con: - los resultados del laboratorio, - información vista en clase - pequeña investigación relacionada con la tematica de la sesión.#@title Pregunta Abierta #@markdown ¿es necesario siempre una inicialización aleatoria de las variables? respuesta1_1 = "" #@param {type:"string"} Recuerda seguir el código del honor y responder concientemente: - son un es espacio de aprendizaje - sirven para afianzar tus conocimientos - sirve para validar como interpretas lo que haces - son revisadas (hay algoritmos que me ayudan a detectar respuestas similares... ¡incluso de semestres pasados!) # en las ultimas partes del laboratorio vas encontrar esta linea de codigo # sirve para verificar que todo esta completo GRADER_INTRO_LAB.check_tests()Todo se ve ok. Asegurate de responder las preguntas en el formualario y envialo ¡buen trabajo!Finalmente al final de cada notebook, encontrarán un formulario que debe ser diligenciado con la información correspondiente.**USAR LAS CEDULAS**#@title Integrantes codigo_integrante_1 ='' #@param {type:"string"} codigo_integrante_2 = '' #@param {type:"string"}**¿como se considera un laboratorio entregado?**1. Debes descargar el archivo .ipynb2. Entrar al formulario, subir el archivo y **enviar el formulario**3. Se tomara en cuenta el ultimo envio del formulario. (esto tambien aplica si envian desde diferentes usuarios)4. El formulario sera cerrado despues del limite establecido. Recomendaciones Finales para la entrega de los laboratorios1. Los espacios para laboratorio son para **ustedes**, traten de aprovecharlos al máximo.2. Es invalido enviar los laboratorios vía correo electronico y otro medio que no haya sido especificado. Se debe seguir el proceso descrito en la anterior sección.3. **Muy buena practica**: cuando se confirma que todos los tests estan correctos, reinicar el kernel, y dar en ejecutar todas las lineas de codigo. De esta manera te aseguras que todo esta correcto para el envio del laboratorio.3. Tener en cuenta, que si bien hay tests automáticos, los notebooks serán revisados y ejecutados de **manera manual**. Hagan los ejercicios a **conciencia y con toda la dispocisión para aprender y generar habilidades**.4. Es muy importante revisar las guias de laboratorio con anterioridad. Es muy factible que si el primer vistazo que le hacen a la guia es durante la sesión conjunta, el tiempo no sera un aliado. 5. En cada sección se especificara la fecha/hora limite de entrega. Debemos respetar las reglas definidas para el grupo. **De antemano se entienden que todos podemos tener condiciones/situaciones diferentes, sin embargo no habrá excepciones si no se siguen las pautas descritas en el estatuto estudiantíl**. ---------# esta linea de codigo va fallar, es de uso exclusivo del los profesores GRADER_INTRO_LAB.grade()Using Requests to interact with the GitHub API This is a little introduction to using the [Requests](http://docs.python-requests.org/en/master/) library to interact with the [GitHub API](https://developer.github.com/v3/). The documentation for each is excellent, so the idea here is to give a few flavours of how they interact with one-another, and not to redundantly document the tools themselves. First, import some useful functionality:from pprint import pprint from pathlib import Path import requestsNext, get hold of a [GitHub personal access token](https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line/) with the "Read:Org" scope selected:![](PAT_creation.png)You may choose to save the token in a file called token.txt, and read it in with the code below. Alternatively, just create a variable called ```token=''```.Note: You will not be able to get the token from the GitHub interface after it has been displayed for the first time, so it is recommended to save it somewhere (like a keychain).def api_token(): token_file = Path('token.txt') if not token_file.exists(): raise IOError('Please create a token at github.com, and save it in {}'.format(token_file)) token = token_file.read_text().strip() return token token = api_token()Now that we have a personal access token, let's define the headers that will allow us to use it:headers = {'Authorization': 'token {}'.format(token), 'Accept': 'application/vnd.github.v3+json'} API_URL = 'https://api.github.com'Finally, we are ready. Let's find out what the "login" (username) is for this personal access token.# The endpoint target. target = '/user' # The actual HTTP request resp = requests.get('{}{}'.format(API_URL, target), headers=headers) # Raise an error if we didn't get a 200 return code. resp.raise_for_status() # Assume the response is JSON, and get hold of it. content = resp.json() # Print the content so that we know what a GitHub response actually looks like. pprint(content) # Get hold of the user's login ID login = content['login']{'avatar_url': 'https://avatars3.githubusercontent.com/u/810663?v=4', 'bio': 'Scientific software engineer and problem solver.', 'blog': 'https://pelson.github.io', 'company': None, 'created_at': '2011-05-25T20:18:23Z', 'email': '', 'events_url': 'https://api.github.com/users/pelson/events{/privacy}', 'followers': 87, 'followers_url': 'https://api.github.com/users/pelson/followers', 'following': 23, 'following_url': 'https://api.github.com/users/pelson/following{/other_user}', 'gists_url': 'https://api.github.com/users/pelson/gists{/gist_id}', 'gravatar_id': '', 'hireable': True, 'html_url': 'https://github.com/pelson', 'id': 810663, 'location': 'UK', 'login': 'pelson', 'name': '', 'organizations_url': 'https://api.github.com/users/pelson/orgs', 'public_gists': 55, 'public_repos': 168, 'received_events_url': 'https://api.github.com/users/pelson/received_events', 'repos_url': 'https://api.github.com/users/pelson/repos', 'site_admin':[...]Now, let's look at all the repos that this user has access to:target = '/user/repos' resp = requests.get('{}{}'.format(API_URL, target), headers=headers) resp.raise_for_status() content = resp.json() pprint(content[0]) pprint([repo['full_name'] for repo in content][:5])['bblay/iris_logo', 'conda-forge/addict-feedstock', 'conda-forge/ads-feedstock', 'conda-forge/affine-feedstock', 'conda-forge/agate-dbf-feedstock']Similarly, let's get hold of all the repos in the user's GitHub account:target = '/users/{}/repos'.format(login) resp = requests.get('{}{}'.format(API_URL, target), headers=headers) resp.raise_for_status() content = resp.json() pprint([repo['full_name'] for repo in content][:5])['pelson/anaconda-build', 'pelson/anaconda-list-distributions', 'pelson/anaconda-recipes', 'pelson/antigrain', 'pelson/artview-feedstock']There are a number of repos starting with "a" in this user's account. Let's change the sort order by modifying the request:target = '/users/{}/repos'.format(login) params = {'sort': 'updated'} resp = requests.get('{}{}'.format(API_URL, target), params=params, headers=headers) resp.raise_for_status() content = resp.json() pprint([repo['full_name'] for repo in content][:5])['pelson/intro-to-using-requests-with-github-API', 'pelson/my-first-heroku-tornado-app', 'pelson/example-supervisord-manager', 'pelson/conda-rpms', 'pelson/python-gnupg']The GitHub API is an endless trove of information. For example, one can get hold of a number of events that the authenticated user did with:target = '/users/{}/events'.format(login) resp = requests.get('{}{}'.format(API_URL, target), headers=headers) resp.raise_for_status() content = resp.json() pprint({event['type'] for event in content}){'CreateEvent', 'GollumEvent', 'IssueCommentEvent', 'PullRequestEvent', 'PullRequestReviewCommentEvent', 'PushEvent'}自动写诗 作者:郑之杰首先导入必要的库:import numpy as np import torch import torch.nn as nn from torch.utils.data import DataLoader加载数据集本次实验的数据来自chinese-poetry:https://github.com/chinese-poetry/chinese-poetry实验提供预处理过的数据集,含有57580首唐诗,每首诗限定在125词,不足125词的以``````填充。数据集以npz文件形式保存,包含三个部分:- (1)data: 诗词数据,将诗词中的字转化为其在字典中的序号表示。- (2)ix2word: 序号到字的映射- (3)word2ix: 字到序号的映射预处理数据集的下载:[点击下载](https://yun.sfo2.digitaloceanspaces.com/pytorch_book/pytorch_book/tang.npz)def prepareData(): # 读入预处理的数据 datas = np.load("tang.npz") data = datas['data'] ix2word = datas['ix2word'].item() word2ix = datas['word2ix'].item() # 转为torch.Tensor data = torch.from_numpy(data) dataloader = DataLoader(data, batch_size = 16, shuffle = True, num_workers = 2) return dataloader, ix2word, word2ix dataloader, ix2word, word2ix = prepareData()构建模型模型包括Embedding层、LSTM层和输出层。class PoetryModel(nn.Module): def __init__(self, vocab_size, embedding_dim, hidden_dim): super(PoetryModel, self).__init__() self.hidden_dim = hidden_dim self.embedding = nn.Embedding(vocab_size, embedding_dim) self.lstm = nn.LSTM(embedding_dim, self.hidden_dim, num_layers=2) self.linear = nn.Linear(self.hidden_dim, vocab_size) def forward(self, input, hidden = None): seq_len, batch_size = input.size() if hidden is None: h_0 = input.data.new(2, batch_size, self.hidden_dim).fill_(0).float() c_0 = input.data.new(2, batch_size, self.hidden_dim).fill_(0).float() else: h_0, c_0 = hidden embeds = self.embedding(input) output, hidden = self.lstm(embeds, (h_0, c_0)) output = self.linear(output.view(seq_len * batch_size, -1)) return output, hidden训练模型# 设置超参数 learning_rate = 5e-3 # 学习率 embedding_dim = 128 # 嵌入层维度 hidden_dim = 256 # 隐藏层维度 model_path = None # 预训练模型路径 epochs = 4 # 训练轮数 verbose = True # 打印训练过程 device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') def train(dataloader, ix2word, word2ix): # 配置模型,是否继续上一次的训练 model = PoetryModel(len(word2ix), embedding_dim, hidden_dim) if model_path: model.load_state_dict(torch.load(model_path)) model.to(device) # 设置优化器 optimizer = torch.optim.Adam(model.parameters(), lr = learning_rate) # 设置损失函数 criterion = nn.CrossEntropyLoss() # 定义训练过程 for epoch in range(epochs): for batch_idx, data in enumerate(dataloader): data = data.long().transpose(1, 0).contiguous() data = data.to(device) input, target = data[:-1, :], data[1:, :] output, _ = model(input) loss = criterion(output, target.view(-1)) if batch_idx % 900 == 0 & verbose: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch+1, batch_idx * len(data[1]), len(dataloader.dataset), 100. * batch_idx / len(dataloader), loss.item())) optimizer.zero_grad() loss.backward() optimizer.step() # 保存模型 torch.save(model.state_dict(), 'model.pth') train(dataloader, ix2word, word2ix)Train Epoch: 1 [0/57580 (0%)] Loss: 8.991456 Train Epoch: 1 [14400/57580 (25%)] Loss: 2.581470 Train Epoch: 1 [28800/57580 (50%)] Loss: 2.751966 Train Epoch: 1 [43200/57580 (75%)] Loss: 2.144301 Train Epoch: 2 [0/57580 (0%)] Loss: 2.348747 Train Epoch: 2 [14400/57580 (25%)] Loss: 2.117488 Train Epoch: 2 [28800/57580 (50%)] Loss: 2.355037 Train Epoch: 2 [43200/57580 (75%)] Loss: 2.676550 Train Epoch: 3 [0/57580 (0%)] Loss: 2.454466 Train Epoch: 3 [14400/57580 (25%)] Loss: 2.279747 Train Epoch: 3 [28800/57580 (50%)] Loss: 2.265135 Train Epoch: 3 [43200/57580 (75%)] Loss: 1.936758 Train Epoch: 4 [0/57580 (0%)] Loss: 2.007303 Train Epoch: 4 [14400/57580 (25%)] Loss: 2.050078 Train Epoch: 4 [28800/57580 (50%)] Loss: 1.963969 Train Epoch: 4 [43200/57580 (75%)] Loss: 1.757161生成唐诗给定几个词,根据这几个词接着生成一首完整的唐诗。# 设置超参数 model_path = 'model.pth' # 模型路径 start_words = '湖光秋月两相和' # 唐诗的第一句 max_gen_len = 125 # 生成唐诗的最长长度 def generate(start_words, ix2word, word2ix): # 读取模型 model = PoetryModel(len(word2ix), embedding_dim, hidden_dim) model.load_state_dict(torch.load(model_path)) model.to(device) # 读取唐诗的第一句 results = list(start_words) start_word_len = len(start_words) # 设置第一个词为 input = torch.Tensor([word2ix['']]).view(1, 1).long() input = input.to(device) hidden = None # 生成唐诗 for i in range(max_gen_len): output, hidden = model(input, hidden) # 读取第一句 if i < start_word_len: w = results[i] input = input.data.new([word2ix[w]]).view(1, 1) # 生成后面的句子 else: top_index = output.data[0].topk(1)[1][0].item() w = ix2word[top_index] results.append(w) input = input.data.new([top_index]).view(1, 1) # 结束标志 if w == '': del results[-1] break return results results = generate(start_words, ix2word, word2ix) print(results)['湖', '光', '秋', '月', '两', '相', '和', ',', '一', '片', '云', '中', '一', '片', '红', '。', '一', '片', '玉', '壺', '金', '翡', '翠', ',', '一', '枝', '红', '叶', '绿', '苔', '生', '。', '玉', '阶', '金', '缕', '金', '钗', '白', ',', '玉', '笛', '声', '声', '啼', '夜', '啼', '。', '一', '夜', '风', '吹', '花', '影', '落', ',', '一', '声', '啼', '鸟', '啼', '猨', '啼', '。', '一', '夜', '风', '吹', '不', '可', '见', ',', '一', '片', '红', '绡', '满', '衣', '裳', '。', '一', '夜', '一', '声', '啼', '不', '断', ',', '一', '声', '啼', '鸟', '啼', '不', '断', '。', '一', '夜', '风', '吹', '不', '可', '见', ',', '一', '声', '啼', '鸟', '啼', '不', '断', '。', '一', '夜', '风', '吹', '不', '可', '见', ',', '一', '声', '啼', '鸟', '啼']生成藏头诗# 设置超参数 model_path = 'model.pth' # 模型路径 start_words_acrostic = '湖光秋月两相和' # 唐诗的“头” max_gen_len_acrostic = 125 # 生成唐诗的最长长度 def gen_acrostic(start_words, ix2word, word2ix): # 读取模型 model = PoetryModel(len(word2ix), embedding_dim, hidden_dim) model.load_state_dict(torch.load(model_path)) model.to(device) # 读取唐诗的“头” results = [] start_word_len = len(start_words) # 设置第一个词为 input = (torch.Tensor([word2ix['']]).view(1, 1).long()) input = input.to(device) hidden = None index = 0 # 指示已生成了多少句 pre_word = '' # 上一个词 # 生成藏头诗 for i in range(max_gen_len_acrostic): output, hidden = model(input, hidden) top_index = output.data[0].topk(1)[1][0].item() w = ix2word[top_index] # 如果遇到标志一句的结尾,喂入下一个“头” if (pre_word in {u'。', u'!', ''}): # 如果生成的诗已经包含全部“头”,则结束 if index == start_word_len: break # 把“头”作为输入喂入模型 else: w = start_words[index] index += 1 input = (input.data.new([word2ix[w]])).view(1, 1) # 否则,把上一次预测作为下一个词输入 else: input = (input.data.new([word2ix[w]])).view(1, 1) results.append(w) pre_word = w return results results_acrostic = gen_acrostic(start_words_acrostic, ix2word, word2ix) print(results_acrostic)['湖', '上', '春', '风', '吹', ',', '春', '风', '吹', '落', '花', '。', '光', '阴', '一', '片', '月', ',', '一', '片', '绿', '苔', '生', '。', '秋', '风', '吹', '落', '日', ',', '白', '日', '照', '寒', '烟', '。', '月', '色', '连', '天', '远', ',', '江', '云', '入', '海', '门', '。', '两', '家', '春', '草', '绿', ',', '万', '里', '雪', '霜', '开', '。', '相', '见', '无', '人', '见', ',', '长', '安', '有', '故', '人', '。', '和', '风', '吹', '落', '日', ',', '白', '日', '照', '寒', '烟', '。']Expectation Reflection for Heart Disease DiagnosisIn this work, we apply our method, Expectation Reflection (ER), to predict heart disease. We compare the performance of ER with other existing methods such as Logistic Regression, Naive Bayes, Dicision Tree, Random Forest, k-nearest neighbors, and Support Vector Machines (SVM).import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.model_selection import KFold from sklearn.metrics import accuracy_score import expectation_reflection as ER from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier import matplotlib.pyplot as plt %matplotlib inline np.random.seed(1) # load data df = pd.read_csv('../heartdisease_data.csv',sep= ',') df[0:10]The data contains 13 features:0) age: Age (years) --> discrete 1) sex: Sex (1: male, 0: female) --> binary 2) cp: Chest pain type (1: typical angina, 2: atypical angina, 3: non-anginal pain, 4: asymptomatic) --> categorical 3) trestbps: Resting blood pressure (mm Hg on admission to the hospital) --> continuous 4) chol: Cholesterol measurement (mg/dl) --> continuous 5) fbs: Fasting blood sugar (0: 120 mg/dl) --> binary 6) restecg: Resting electrocardiographic measurement (0: normal, 1: having ST-T wave abnormality, 2: showing probable or definite left ventricular hypertrophy by Estes' criteria) --> categorical 7) thalach: Maximum heart rate achieved --> continuous8) exang: Exercise induced angina (1: yes; 0: no) --> binary 9) oldpeak: ST depression induced by exercise relative to rest ('ST' relates to positions on the ECG plot) --> continuous10) slope: The slope of the peak exercise ST segment (1: upsloping, 2: flat, 3: downsloping) --> categorical11) ca: The number of major vessels (0-4) --> categorical 12) thal: Thalassemia (a type of blood disorder) (1: normal; 2: fixed defect; 3: reversable defect) --> categorical and 1 target: Heart disease (0: no, 1: yes)# select features and target: df = np.array(df).astype(float) # features: X = df[:,:-1] l,n = X.shape print(l,n) # target: y = df[:,-1] # convert 1,0 to 1,-1: y = 2*y - 1303 13Convert categorical variables to one hotfrom sklearn.preprocessing import OneHotEncoder onehot_encoder = OneHotEncoder(sparse=False,categories='auto') # sex = X[:,1] = 0,1 --> 2 #x1 = onehot_encoder.fit_transform(X[:,1].reshape(-1,1)) x1 = 2*X[:,1] - 1 # 0,1 --> -1, 1 # cp = X[:,2] = 1,2,3,4 --> 4 x2 = onehot_encoder.fit_transform(X[:,2].reshape(-1,1)) # fbs = X[:,5] = 0,1 --> 2 #x5 = onehot_encoder.fit_transform(X[:,5].reshape(-1,1)) x5 = 2*X[:,5] - 1 # 0,1 --> -1, 1 # restecg = X[:,6] = 0,1,2 --> 3 x6 = onehot_encoder.fit_transform(X[:,6].reshape(-1,1)) #exang: = X[:,8] = 0,1 --> 2 #x8 = onehot_encoder.fit_transform(X[:,8].reshape(-1,1)) x8 = X[:,8] # 0,1 --> -1, 1 # X[:,10] = 0,1,2 --> 3 x10 = onehot_encoder.fit_transform(X[:,10].reshape(-1,1)) # X[:,11] = 0,1,2,3,4 --> 5 x11 = onehot_encoder.fit_transform(X[:,11].reshape(-1,1)) # X[:,12] = 0,1,2,3 --> 4 x12= onehot_encoder.fit_transform(X[:,12].reshape(-1,1)) Xnew = np.hstack([X[:,0][:,np.newaxis],x1[:,np.newaxis]]) Xnew = np.hstack([Xnew,x2]) Xnew = np.hstack([Xnew,X[:,3:5]]) Xnew = np.hstack([Xnew,x5[:,np.newaxis]]) Xnew = np.hstack([Xnew,x6,X[:,7][:,np.newaxis],x8[:,np.newaxis],X[:,9][:,np.newaxis],x10,x11,x12]) X = Xnew X.shape np.unique(y,return_counts=True) t1 = y == -1. X1 = X[t1] y1 = y[t1] t2 = y == 1. X2 = X[t2] y2 = y[t2] t2 = np.random.choice(len(y2),len(y1),replace=False) X2 = X2[t2] y2 = y2[t2] X = np.vstack([X1,X2]) y = np.hstack([y1,y2]) np.unique(y,return_counts=True) from sklearn.utils import shuffle X, y = shuffle(X, y) from sklearn.preprocessing import MinMaxScaler X = MinMaxScaler().fit_transform(X)Predictiondef inference(X_train,y_train,X_test,y_test,method='expectation_reflection'): if method == 'expectation_reflection': h0,w = ER.fit(X_train,y_train,niter_max=100,regu=0.1) y_pred = ER.predict(X_test,h0,w) else: if method == 'logistic_regression': model = LogisticRegression(solver='liblinear') if method == 'naive_bayes': model = GaussianNB() if method == 'random_forest': model = RandomForestClassifier(criterion = "gini", random_state = 1, max_depth=3, min_samples_leaf=5,n_estimators=100) if method == 'decision_tree': model = DecisionTreeClassifier() model.fit(X_train, y_train) y_pred = model.predict(X_test) accuracy = accuracy_score(y_test,y_pred) return accuracy def compare_inference(X,y,train_size): npred = 100 accuracy = np.zeros((len(list_methods),npred)) precision = np.zeros((len(list_methods),npred)) recall = np.zeros((len(list_methods),npred)) accuracy_train = np.zeros((len(list_methods),npred)) for ipred in range(npred): X_train0,X_test,y_train0,y_test = train_test_split(X,y,test_size=0.2,random_state = ipred) idx_train = np.random.choice(len(y_train0),size=int(train_size*len(y)),replace=False) X_train,y_train = X_train0[idx_train],y_train0[idx_train] for i,method in enumerate(list_methods): accuracy[i,ipred] = inference(X_train,y_train,X_test,y_test,method) return accuracy.mean(axis=1),accuracy.std(axis=1) list_train_size = [0.8,0.6,0.4,0.2] list_methods=['logistic_regression','naive_bayes','random_forest','expectation_reflection'] acc = np.zeros((len(list_train_size),len(list_methods))) acc_std = np.zeros((len(list_train_size),len(list_methods))) for i,train_size in enumerate(list_train_size): acc[i,:],acc_std[i,:] = compare_inference(X,y,train_size) print(train_size,acc[i,:]) df = pd.DataFrame(acc,columns = list_methods) df.insert(0, "train_size",list_train_size, True) df plt.figure(figsize=(4,3)) plt.plot(list_train_size,acc[:,0],'k--',marker='o',mfc='none',label='Logistic Regression') plt.plot(list_train_size,acc[:,1],'b--',marker='s',mfc='none',label='Naive Bayes') plt.plot(list_train_size,acc[:,2],'r--',marker='^',mfc='none',label='Random Forest') plt.plot(list_train_size,acc[:,-1],'k-',marker='o',label='Expectation Reflection') plt.xlabel('train size') plt.ylabel('accuracy mean') plt.legend() plt.figure(figsize=(4,3)) plt.plot(list_train_size,acc_std[:,0],'k--',marker='o',mfc='none',label='Logistic Regression') plt.plot(list_train_size,acc_std[:,1],'b--',marker='s',mfc='none',label='Naive Bayes') plt.plot(list_train_size,acc_std[:,2],'r--',marker='^',mfc='none',label='Random Forest') plt.plot(list_train_size,acc_std[:,-1],'k-',marker='o',label='Expectation Reflection') plt.xlabel('train size') plt.ylabel('accuracy standard deviation') plt.legend() #np.savetxt('../heart_acc.txt',acc,fmt='%f') #np.savetxt('../heart_acc_std.txt',acc_std,fmt='%f')https://colab.research.google.com/drive/1-zs3rm9ickBXd0oj6yiy5U8PpW_ywHF_!pip install selectivesearch import matplotlib.pyplot as plt %matplotlib inline import selectivesearch import cv2 !wget https://www.dropbox.com/s/l98leemr7r5stnm/Hemanvi.jpeg img = cv2.imread('/content/Hemanvi.jpeg') img_lbl, regions = selectivesearch.selective_search(img, scale=100, min_size=2000) print(len(regions)) candidates = set() for r in regions: if r['rect'] in candidates: continue # excluding regions smaller than 2000 pixels if r['size'] < 2000: continue x, y, w, h = r['rect'] candidates.add(r['rect']) import matplotlib.patches as mpatches fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6)) ax.imshow(img) for x, y, w, h in candidates: rect = mpatches.Rectangle( (x, y), w, h, fill=False, edgecolor='red', linewidth=1) ax.add_patch(rect) plt.axis('off') plt.show()Finding best model and hyper parameter tunning using GridSearchCV **For iris flower dataset in sklearn library, we are going to find out best model and best hyper parameters using GridSearchCV** **Load iris flower dataset**from sklearn import svm, datasets iris = datasets.load_iris() import pandas as pd df = pd.DataFrame(iris.data,columns=iris.feature_names) df['flower'] = iris.target df['flower'] = df['flower'].apply(lambda x: iris.target_names[x]) df[47:150]Approach 1: Use train_test_split and manually tune parameters by trial and errorfrom sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.3) model = svm.SVC(kernel='rbf',C=30,gamma='auto') model.fit(X_train,y_train) model.score(X_test, y_test)Approach 2: Use K Fold Cross validation **Manually try suppling models with different parameters to cross_val_score function with 5 fold cross validation**cross_val_score(svm.SVC(kernel='linear',C=10,gamma='auto'),iris.data, iris.target, cv=5) cross_val_score(svm.SVC(kernel='rbf',C=10,gamma='auto'),iris.data, iris.target, cv=5) cross_val_score(svm.SVC(kernel='rbf',C=20,gamma='auto'),iris.data, iris.target, cv=5)**Above approach is tiresome and very manual. We can use for loop as an alternative**kernels = ['rbf', 'linear'] C = [1,10,20] avg_scores = {} for kval in kernels: for cval in C: cv_scores = cross_val_score(svm.SVC(kernel=kval,C=cval,gamma='auto'),iris.data, iris.target, cv=5) avg_scores[kval + '_' + str(cval)] = np.average(cv_scores) avg_scores**From above results we can say that rbf with C=1 or 10 or linear with C=1 will give best performance** Approach 3: Use GridSearchCV **GridSearchCV does exactly same thing as for loop above but in a single line of code**from sklearn.model_selection import GridSearchCV clf = GridSearchCV(svm.SVC(gamma='auto'), { 'C': [1,10,20], 'kernel': ['rbf','linear'] }, cv=5, return_train_score=False) clf.fit(iris.data, iris.target) clf.cv_results_ df = pd.DataFrame(clf.cv_results_) df df[['param_C','param_kernel','mean_test_score']] clf.best_params_ clf.best_score_ dir(clf)**Use RandomizedSearchCV to reduce number of iterations and with random combination of parameters. This is useful when you have too many parameters to try and your training time is longer. It helps reduce the cost of computation**from sklearn.model_selection import RandomizedSearchCV rs = RandomizedSearchCV(svm.SVC(gamma='auto'), { 'C': [1,10,20], 'kernel': ['rbf','linear'] }, cv=5, return_train_score=False, n_iter=2 ) rs.fit(iris.data, iris.target) pd.DataFrame(rs.cv_results_)[['param_C','param_kernel','mean_test_score']]**How about different models with different hyperparameters?**from sklearn import svm from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression model_params = { 'svm': { 'model': svm.SVC(gamma='auto'), 'params' : { 'C': [1,10,20], 'kernel': ['rbf','linear'] } }, 'random_forest': { 'model': RandomForestClassifier(), 'params' : { 'n_estimators': [1,5,10] } }, 'logistic_regression' : { 'model': LogisticRegression(solver='liblinear',multi_class='auto'), 'params': { 'C': [1,5,10] } } } scores = [] for model_name, mp in model_params.items(): clf = GridSearchCV(mp['model'], mp['params'], cv=5, return_train_score=False) clf.fit(iris.data, iris.target) scores.append({ 'model': model_name, 'best_score': clf.best_score_, 'best_params': clf.best_params_ }) df = pd.DataFrame(scores,columns=['model','best_score','best_params']) dfExercise 3Add the specified code for each code cell, running the cells _in order_. Define a function `add_three` that takes a single argument and returns a value 3 greater than the input.def add_three(number): return number+3Create and output a variable `ten` that is the result of passing `7` to your `add_three()` function.ten=add_three(7) print(ten)10Create a variable `ten_str` that is the result of passing `"7"` to your `add_three()` function. What does this tell you about how the function should be described (e.g., in a doc string)?ten_str=add_three("7") argument should be integerDefine a function `imperial_to_metric` that takes in two arguments: a number of feet and a number of inches. The function should return the total length in meters. _Include an appropriate doc string_.def imperial_to_metric(num_feet,num_inches): """ summary parameters: argument1 : number of feet argument2 : number of inches returns: total length in meters """ num_feet=num_feet*.3048 num_inches=num_inches*.0254 return num_feet+num_inchesCreate and output variable `height_in_meters` by passing your height in imperial to the `imperial_to_metric()` function.height_in_meters=imperial_to_metric(5,4) print(height_in_meters)1.6256Define a function `compare_str_length` that takes in 2 strings, and returns a sentence of the form```"The difference in string lengths is N"```Include an appropriate doc string.#some changes def compare_str_length(str1,str2): """ parameters:str1,str2 two strings returns: difference in lengths """ N=abs(len(str1)-len(str2)) print("The difference in string lengths is ",N)Pass two strings of different lengths to your `compare_str_length()` function.compare_str_length("two","three")The difference in string lengths is 2Define a function `fraction_str()` that takes two parameters, a numerator and a denominator, and outputs a string version of that that fraction (e.g., `"3/4"`). Make the parameters be **keyword arguments** with default values of 1.def fraction_str(num=1,denom=1): """ parameters:two numbers returns:string version of the fraction """ return (str(num)+"/"+str(denom))Call the `fraction_str()` function with named arguments to produce the string `"5/11"`. Print the result.- For fun: try listing the denominator argument before the numerator argument! What happens?fraction_str(denom=5,num=11)Call the `fraction_str()` function only specifying a denominator of `3`. Print the resultfraction_str(denom=3)1 / 3Call the `fraction_str()` function using **positional arguments** (unnamed) to produce the string `"11/5"`. Print the result.fraction_str(11,5)11 / 5Definindo as funções para h e psi#Defining h def h(t, L, w, e_mais, e_cruzado,A): h_mais = A*cos(w*t-w*L) h_cruzado = A*sin(w*t-w*L) return h_mais*e_mais + h_cruzado*e_cruzado\begin{equation} h = h_+ + h_\times\end{equation}#função PSI(t) def PSIj(j, k, L, N, A, w, T, ep, ec): H = h(T, L[j-1], w, ep, ec,A) phij = N[j-1].dot(H.dot(N[j-1]))/2 return phij/(1-(k.dot(N[j-1]))**2) #expandir aqui\begin{equation}\Psi (t) = \frac{n^i h_{ij} n^j}{2(1 - (\hat{k}\cdot \hat{n})^2)}\end{equation} Símbolosphi, theta, t, w, L, A , psi, sigma= symbols('ϕ θ t ω L A ψ σ')Sistemas de coordenadas e vetores usando o sympyDetFrame = ReferenceFrame("Det") WaveFrame = ReferenceFrame("Wave") WaveFrame.orient(DetFrame, "body", (phi, theta, psi), 'zxz') vx = WaveFrame.x vy = WaveFrame.y vz = WaveFrame.z dbaseii = outer(vx, vx) dbaseij = outer(vx, vy) dbaseik = outer(vx, vz) dbaseji = outer(vy, vx) dbasejj = outer(vy, vy) dbasejk = outer(vy, vz) dbaseki = outer(vz, vx) dbasekj = outer(vz, vy) dbasekk = outer(vz, vz) e_plus = dbaseii - dbasejj e_cross = dbaseij + dbaseji #n no referencial do detector n2 = cos(sigma)*DetFrame.x + sin(sigma)*DetFrame.y n3 = cos(sigma)*DetFrame.x - sin(sigma)*DetFrame.y k = WaveFrame.zDefining posições dos satélitesO = Point('O') #origin O.set_vel(DetFrame, 0) #seting p1, p2, p3 p1 = Point(r'P_1') p2 = Point(r'P_2') p3 = Point(r'P_3') #r1, r2, r3, gamma1, gamma2, gamma3 = symbols(r'r_1 r_2 r_3 \gamma_1 \gamma_2 \gamma_3') #dist from org & phase angle l = Symbol('l') p1.set_pos(O, l*cos(0 )*DetFrame.x + l*sin(0 )*DetFrame.y + 0*DetFrame.z) p2.set_pos(O, l*cos(2*pi/3)*DetFrame.x + l*sin(2*pi/3)*DetFrame.y + 0*DetFrame.z) p3.set_pos(O, l*cos(4*pi/3)*DetFrame.x + l*sin(4*pi/3)*DetFrame.y + 0*DetFrame.z) P1 = p1.pos_from(O) P2 = p2.pos_from(O) P3 = p3.pos_from(O) P = [P1, P2, P3] #setting n's, according to KTV notation n1 = p2.pos_from(p3) n2 = p3.pos_from(p1) n3 = p1.pos_from(p2) L1 = n1.magnitude() L2 = n2.magnitude() L3 = n3.magnitude() N = [n1, n2, n3] L = [L1, L2, L3]Início do cálculo do interferômetroPARAMETERS = (k,L,N,P,A,w,t, e_plus, e_cross) def delay(func, D): return func.subs(w*t, w*t - L[D-1]) def ygw(i,j,k,L,N,P,A,w,T, ep, ec): m = abs(6-i-j)-1 return (1+ k.dot(N[m]))*\ (PSIj(m, k, L, N, A, w, T + k.dot(P[i-1]) - L[m], ep, ec)\ - PSIj(m,k, L, N, A, w, T + k.dot(P[j-1]), ep, ec)) # # T + k.dot(P[i]) - L[m]) , T + k.dot(P[j])) def ygwD(i,j,k,L,N,P,A,w,T, ep, ec, D): #Ygw com delay #delay = L[D] return delay(ygw(i,j,k,L,N,P,A,w,T, ep, ec), D) def yij(i,j, parms = PARAMETERS): k,L,N,P,A,w,T, ep, ec = parms return ygw(i,j,k,L,N,P,A,w,T, ep, ec) def yijD(i,j,D): return delay(yij(i,j),D) def yijDD(i,j,D, E): return delay(delay(yij(i,j),D),E) f = A*cos(w*t) f delay(f, 2) X = (yij(3,1) + yijD(1,3,2))\ + delay(delay((yij(2,1) + yijD(1,2,3)),2),2)\ - (yij(2,1) + yijD(1,2,3))\ - delay(delay(yij(3,1)+yijD(1,3,2),2),2)\ - delay(delay(delay(delay(\ (yij(3,1) + yijD(1,3,2))\ + delay(delay((yij(2,1) + yijD(1,2,3)),2),2)\ - (yij(2,1) + yijD(1,2,3))\ - delay(delay(yij(3,1)+yijD(1,3,2),2),2)\ ,2),2),3),3) #X = sympy.trigsimp(X) y1 = yijD(3,1,2) - yij(2,3) #calculando M X=sympy.trigsimp(y1) X=sympy.expand(X) X #M=sympy.trigsimp(M) F_mais=X.coeff(cos(w*t)) F_cruzado=X.coeff(sin(w*t)) F_cruzado f_mais = sympy.lambdify([ phi, theta, w, A, l], F_mais) f_cruzado = sympy.lambdify([phi, theta, w, A, l], F_cruzado) M_eval = sympy.lambdify([phi, theta, w, A, l], M) f_mais #defining parameters phi_value, theta_value = np.mgrid[-np.pi:np.pi:100j, 0:np.pi:100j] arm=5e9/3e8 #segundos f=10**-3 #Hz freq=2*np.pi*f a=1 #atribuindos os valores acima nas funções # [phi , theta , w , A, r1, r2, r3, gamma1, gamma2, gamma3] f_mais_data = f_mais((phi_value), (theta_value), freq, a, arm) f_cruzado_data = f_cruzado((phi_value),(theta_value), freq, a, arm) f_mais_data #plot phi, theta e F fig = plt.figure() ax = fig.gca(projection='3d') ax.plot_surface(phi_value, theta_value,(f_mais_data),color='b') #ax.plot_surface(phi_value, theta_value,(f_cruzado_data),color='g') #ax.plot_surface(phi_value, theta_value,(f_cruzado_data-f_mais_data),color='g') ax.set_xlabel('phi') ax.set_ylabel('theta') ax.set_zlabel('F+') plt.show() #plot x,y,z fig = plt.figure() ax = fig.gca(projection='3d') x_mais=(f_mais_data)*np.sin(theta_value)*np.sin(phi_value) y_mais=-(f_mais_data)*np.sin(theta_value)*np.cos(phi_value) z_mais=(f_mais_data)*np.cos(theta_value) x_cruzado=(f_cruzado_data)*np.sin(theta_value)*np.sin(phi_value) y_cruzado=-(f_cruzado_data)*np.sin(theta_value)*np.cos(phi_value) z_cruzado=(f_cruzado_data)*np.cos(theta_value) ax.plot_surface(x_mais,y_mais,z_mais,color='b') #ax.plot_surface(x_cruzado,y_cruzado,z_cruzado,color='g') #ax.plot_surface((x_cruzado-x_mais),(y_cruzado-y_mais),(z_cruzado-z_mais),color='g', label = 'F_cruzado') ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z') plt.show()Setup Install dependenciesimport os import IPython from IPython.display import Audio from hyperparams import Hyperparams as hp from synthesize import Synthesizer #! ls checkpoints/i-0db58090af9cb95af/LJ01-1 #! ls checkpoints/i-031e73172dafa5ce9/LJ01-2 checkpoint_text2mel = "checkpoints/i-0db58090af9cb95af/LJ01-1/model_gs_820k" checkpoint_ssrn = "checkpoints/i-031e73172dafa5ce9/LJ01-2/model_gs_773k" synthesizer = Synthesizer(checkpoint_text2mel, checkpoint_ssrn) ! ls checkpoints/i-031e73172dafa5ce9/LJ01-2/model_gs_747k* tongue_twisters = [ " picked a peck of pickled peppers", "A peck of pickled peppers picked", "If picked a peck of pickled peppers", "Where’s the peck of pickled peppers picked?", "How much wood would a woodchuck chuck if a woodchuck could chuck wood?", "He would chuck, he would, as much as he could, and chuck as much wood", "As a woodchuck would if a woodchuck could chuck wood", "She sells seashells by the seashore", "Susie works in a shoeshine shop. Where she shines she sits, and where she sits she shines", "Fuzzy Wuzzy was a bear. Fuzzy Wuzzy had no hair. Fuzzy Wuzzy wasn’t fuzzy, was he?" ] ljset = [ "The birch canoe slid on the smooth planks", "Glue the sheet to the dark blue background", "It's easy to tell the depth of a well", "These days a chicken leg is a rare dish", "Rice is often served in round bowls", "The juice of lemons makes fine punch", "The box was thrown beside the parked truck", "The hogs were fed chopped corn and garbage", "Four hours of steady work faced us", "Large size in stockings is hard to sell", "The boy was there when the sun rose" ] sentences = ljset for i, sentence in enumerate(sentences): filename = "test%d.wav" % i synthesizer.synthesize(sentence, filename) print(sentence) IPython.display.display(Audio(filename, rate=hp.sr)) ! lscheckpoints hyperparams.pyc README.md test2.wav test.wav data_load.py LICENSE requirements.txt test3.wav train.py data_load.pyc modules.py synthesize.py test4.wav train.pyc dctts_tf.ipynb modules.pyc synthesize.pyc test5.wav utils.py fig networks.py test0.wav test6.wav utils.pyc harvard_sentences.txt networks.pyc test10.wav test7.wav horse.ogg notebook.ipynb test11.wav test8.wav hyperparams.py prepo.py test1.wav test9.wavAppendix 6: Iterative Estimation Methods A6.1 Newton Iterationimport numpy as np def f(P): X = none return X def Jacobian(P): J = none return J def Init(): P = none return P def pseudo_inverse(J): return (J^T*J)^(-1)*J^T def solve(J, e): return -pseudo_inverse(J) * e def step(P, X): e = f(P) - X J = Jacobian(P) delta = solve(J, e) return delta # It is possible that this iteration procedure converges to a local minimum value, or does not converge at all. def newton_iterate(X): P = Init() delta = none while continue_cond(delta): delta = step(P, X) P += deltaWeighted iterationAssume the measurement X satisfies a Gaussian distribution which covariance matrix $\Sigma_X$, and wishes to minimize the Mahalanobis distance $||f(\hat{P}) - X||_\Sigma$def pseudo_inverse_mahalanobis(J, C): return (J^T * C^(-1) * J)^(-1) * J^T * C^(-1) def solve_mahalanobis(J, C, e): return -pseudo_inverse_mahalanobis(J, C)* eQGridInteractive pandas dataframes: https://github.com/quantopian/qgrid```pip install qgrid --upgrade```df2 = df[df['Mine_State'] != "Wyoming"].groupby('Mine_State').sum() df3 = df.groupby('Mine_State').sum() # have to run this from the home dir of this repo # cd insight/ # python setup.py develop %aimport insight.plotting insight.plotting.plot_prod_vs_hours(df3, color_index=1) # insight.plotting.plot_prod_vs_hours(df2, color_index=1) def plot_prod_vs_hours( df, color_index=0, output_file="../img/production-vs-hours-worked.png" ): fig, ax = plt.subplots(figsize=(10, 8)) sns.regplot( df["Labor_Hours"], df["Production_short_tons"], ax=ax, color=sns.color_palette()[color_index], ) ax.set_xlabel("Labor Hours Worked") ax.set_ylabel("Total Amount Produced") x = ax.set_xlim(-9506023.213266129, 204993853.21326613) y = ax.set_ylim(-51476801.43653282, 746280580.4034251) fig.tight_layout() fig.savefig(output_file) plot_prod_vs_hours(df2, color_index=0) plot_prod_vs_hours(df3, color_index=1) # make a change via qgrid df3 = qgrid_widget.get_changed_df()Github https://github.com/jbwhit/jupyter-tips-and-tricks/commit/d3f2c0cef4dfd28eb3b9077595f14597a3022b1c?short_path=04303fcdiff-04303fce5e9bb38bcee25d12d9def22eqgrid_widget = qgrid.show_grid( df2[["Year", "Labor_Hours", "Production_short_tons"]], show_toolbar=True, ) qgrid_widgetCorEx on Positive Articlesimport pandas as pd wri = pd.read_csv("wri.csv",index_col=0) wri = wri[wri['labels']=='positive'] wri.reset_index(drop=True, inplace=True) print(wri.head()) print(wri.info()) from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer( max_df=.5, min_df=2, max_features=None, ngram_range=(1, 2), norm=None, binary=True, use_idf=False, sublinear_tf=False, strip_accents = 'unicode', stop_words = 'english' ) vectorizer = vectorizer.fit(wri['text']) tfidf = vectorizer.transform(wri['text']) vocab = vectorizer.get_feature_names() print(len(vocab)) print(vocab) from corextopic import corextopic as ct TOPICS = 100 NBR_OF_WORDS = 7 anchors = [] model = ct.Corex(n_hidden=TOPICS, seed=42) model = model.fit( tfidf, words=vocab )['land', 'acre','hectares', 'acquisition', 'land acquisition', 'agricultural', 'acres', 'degradation','landslides','property','resettlement'],   ['farmer', 'farming', 'agricultural', 'produce', 'crop', 'crops', 'agrarian', 'farms','farm','field','fields','soil','sugarcane','vegetables','farmers','agriculture','tractor','prices crops', 'debt','quota','food','fruits','livestock','cow','wheat','harvest','harvesting','horticulture','loan','loans','milk','paddy','rice','plant','plants','potatoes','potato'],   ['mining', 'coal', 'miner', 'miners','sand mining', 'sand','bauxite','iron ore','limestone','manganese ore','granite'],   ['forest','forests', 'forest department', 'reserve', 'forest officials','forestry'],   ['animal','leopard','leopards', 'animals', 'wildlife', 'tiger', 'attacked', 'slaughter', 'lion','lions', 'threat', 'tigress', 'bear','birds','cat','cattle','crocodile','elephant','elephants','pangolin','pangolins','species'],   ['drought', 'droughts','monsoon', 'rain','rains','rainfall','disaster'],   ['water', 'irrigation', 'monsoon', 'rain', 'flood', 'floods', 'flooded', 'climate change','climate','dam','dams','drinking']# Anchors designed to nudge the model towards measuring specific genres anchors = [ ['land','resettlement','degradation','plot'], ['farm','Farmers','crop','agriculture','crops','agrarian','farmer','farmers''cows','tractor','acre','fields','livestock','harvest','harvesting','potato','sugarcane','paddy','rice','milk'], ['mining', 'coal', 'miner', 'miners','sand mining', 'sand','bauxite','iron ore','limestone','manganese ore','granite'], ['forest','deforestation','trees'], ['animal','attacked','leopard','leopards','tiger','tigress','crocodile'], ['drought','rain','climate change'], ['water','dams','irrigation','flood','drinking'] ] anchors = [ [a for a in topic if a in vocab] for topic in anchors ] model = ct.Corex(n_hidden=TOPICS, seed=42) model = model.fit( tfidf, words=vocab, anchors=anchors, # Pass the anchors in here anchor_strength=100 # Tell the model how much it should rely on the anchors ) for i, topic_ngrams in enumerate(model.get_topics(n_words=NBR_OF_WORDS)): topic_ngrams = [ngram[0] for ngram in topic_ngrams if ngram[1] > 0] print("Topic #{}: {}".format(i+1, ", ".join(topic_ngrams))) topic_df = pd.DataFrame( model.transform(tfidf), columns=["topic_{}".format(i+1) for i in range(TOPICS)] ).astype(float) topic_df.index = wri.index wri = pd.concat([wri, topic_df], axis=1) for i in range(TOPICS): column='topic_{}'.format(i+1) print(wri[column].value_counts(normalize=True)) #Topic Flags wri['topic']=wri['topic_1']+wri['topic_2']+wri['topic_3']+wri['topic_4']+wri['topic_5']+wri['topic_6']+wri['topic_7'] wri['topic'].value_counts(normalize=True) MisTagged = wri[wri['topic']==0] MisTagged.to_csv("MissTagged.csv") print(MisTagged) print(MisTagged.info()) for i in range(TOPICS): column='topic_{}'.format(i+1) print(MisTagged[column].value_counts(normalize=True))0.0 1.0 Name: topic_1, dtype: float64 0.0 1.0 Name: topic_2, dtype: float64 0.0 1.0 Name: topic_3, dtype: float64 0.0 1.0 Name: topic_4, dtype: float64 0.0 1.0 Name: topic_5, dtype: float64 0.0 1.0 Name: topic_6, dtype: float64 0.0 1.0 Name: topic_7, dtype: float64 0.0 0.822222 1.0 0.177778 Name: topic_8, dtype: float64 0.0 0.922222 1.0 0.077778 Name: topic_9, dtype: float64 0.0 0.833333 1.0 0.166667 Name: topic_10, dtype: float64 0.0 0.866667 1.0 0.133333 Name: topic_11, dtype: float64 0.0 0.844444 1.0 0.155556 Name: topic_12, dtype: float64 0.0 0.866667 1.0 0.133333 Name: topic_13, dtype: float64 0.0 0.944444 1.0 0.055556 Name: topic_14, dtype: float64 0.0 0.777778 1.0 0.222222 Name: topic_15, dtype: float64 0.0 0.777778 1.0 0.222222 Name: topic_16, dtype: float64 0.0 0.877778 1.0 0.122222 Name: topic_17, dtype: float64 0.0 0.744444 1.0 0.255556 Name: topic_18, dtype: float64 0.0 0.877778 1.0 [...]Traindevice = 'gpu' if torch.cuda.is_available() else 'cpu' device def train(epochs, train_dl, val_dl, model, criterion, hooks, lr=3e-3, metrics=None, verbose=True): for hook in hooks: model.register_backward_hook(hook) optim = variable_lr_optimizer() stats = defaultdict(list) for epoch in range(epochs): model.to(device).train() e_loss = 0.0 e_total = 0 # for i, (x, y) in enumerate(train_dl): # optim.zero_grad() # x.to(device) # y.to(device) # bs = x.shape[0] # # Forward pass # y_hat = model(x) # loss = criterion(y_hat, y, reduction='mean') # # Backward pass # loss.backward() # optim.step() # # Update mini batch stats. # e_total += bs # loss += loss * bs # # Evaluate on validation set. # val_stats = validation_metrics() # # Update epoch stats. # stats['loss'].append(e_total) # stats['val_loss'].append() # Print epoch stats. return stats def gradient_stats_hook(model, grad_in, grad_out): print(stats(grad_out)) train(3, None, None, hnet, nn.BCEWithLogitsLoss, [gradient_stats_hook])DS 1: Baseline# %%time # ds_1 = uptools.Image_Dataset("data/img_data", "imgs", 256) # ds_1.data_split((0.76,0.12,0.12),stratify=True) # ds_1.details(plot=True) # ds_1.save_arrays("ds_1")Augmentation Parametersaug = augtools.Augmenter(pwa_scale=(0.05,0.075), e_sev=3, g_sev=2, b_sev=2, contrast=True, clip_limit=(1,3), dropout_pair=(0.15,0.85), x_scale=(0.7,1.3), y_scale=(0.7,1.05), shear_range=(-20,20), x_shift=(-0.05,0.05), y_shift=(-0.1,0.1), rotate_range=(-30,30), v_flip=0.5, h_flip=0.5, sharpness=True, fill_mode="reflect", randomize_params=True)DS 2: Rebalanced Dataset%%time ds_2 = uptools.Image_Dataset("data/img_data", "imgs", 256) %%time ds_2.split_rebalance((0.76,0.12,0.12), augmenter=aug, augment_scale=700, augment_type="simple", order=[7,4,5,0,2,1,3,12,14,10,13,15,8,11,6,9]) ds_2.details(plot=True) ds_2.save_arrays("ds_2")imgo_output/uptools/save_arrays/ds_2/X_train.h5 saved successfully. imgo_output/uptools/save_arrays/ds_2/y_train.h5 saved successfully. imgo_output/uptools/save_arrays/ds_2/X_val.h5 saved successfully. imgo_output/uptools/save_arrays/ds_2/y_val.h5 saved successfully. imgo_output/uptools/save_arrays/ds_2/X_test.h5 saved successfully. imgo_output/uptools/save_arrays/ds_2/y_test.h5 saved successfully.$$f(x,y)=\frac{1}{2\sqrt{(\pi^2 + x^2 + (y-1)^2)^3}}$$Покомпонентные плотности распределения:$$f(x)=\frac{1}{\pi^2 + x^2}$$$$f(y)=\frac{1}{\pi^2 + (y-1)^2}$$ X - распределение Коши с $x_0=0, \gamma=\pi$ Y - распределение Коши с $y_0=1, \gamma=\pi$ Совместные плотности распределения:$$f(x|y)=\frac{f(x,y)}{f(y)}=\frac{\pi^2 + (y-1)^2}{2\sqrt{(\pi^2 + x^2 + (y-1)^2)^3}}$$$$f(y|x)=\frac{f(x,y)}{f(x)}=\frac{\pi^2 + x^2}{2\sqrt{(\pi^2 + x^2 + (y-1)^2)^3}}$$Покомпонентные функции распределения:$$F(x)=\int_{-\infty}^{x}\frac{1}{\pi^2 + t^2}dt=\frac{1}{\pi}arctg\frac{x}{\pi}+\frac{1}{2}$$$$F(y)=\int_{-\infty}^{y}\frac{1}{\pi^2 + (t-1)^2}dt=\frac{1}{\pi}arctg\frac{y-1}{\pi}+\frac{1}{2}$$$$F(x|y)=\int_{-\infty}^{x}\frac{\pi^2 + (y-1)^2}{2\sqrt{(\pi^2 + x^2 + (y-1)^2)^3}}dt=\frac{x}{2\sqrt{\pi^2 + x^2 + (y-1)^2}} + \frac{1}{2}$$Разложим функцию распределения$$F(x,y)=F(y)F(x|y)$$import numpy as np import math from matplotlib import pyplot as plt from mpl_toolkits.mplot3d import Axes3D from statlib.metrics import generate_borders from scipy.stats import chi2 from statlib.metrics import pearson_chi plt.style.use('ggplot') rng = np.arange(-6, 6, 0.1) X, Y = np.meshgrid(rng, rng) Z = 1 / (2 * (math.pi**2 + X**2 + (Y - 1)**2)**1.5) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.plot_surface(X, Y, Z); from statlib.rand.basic_rand import BasicRand from statlib.rand.engine import LCGПромоделируем отдельно $F(y)$ и $F(x|y)$: $$\begin{cases} \xi_1=\frac{1}{\pi}arctg\frac{y-1}{\pi} + \frac{1}{2} \\ \xi_2=\frac{x}{2\sqrt{a^2+x^2}} + \frac{1}{2} \end{cases}, a=\sqrt{\pi^2+(y-1)^2}$$$$\begin{cases} y=1+\pi tg(\pi \xi_1 - \frac{\pi}{2}) \\ x=sgn(2\xi_2 - 1) sgn(1-(2\xi_2-1)^2) \frac{a\left | 2\xi_2 - 1 \right |}{\sqrt{\left | 1-(2\xi_2-1)^2 \right |}}\end{cases}$$from statlib.metrics import get_bins_count gen = BasicRand(LCG()) n = 1000000 ranges = [[-6, 6], [-6, 6]] bins = get_bins_count(n) def build_sample(n, gen): xi1 = np.array([gen.next() for _ in range(n)]) xi2 = np.array([gen.next() for _ in range(n)]) y_p = 1 + math.pi * np.tan(math.pi * xi1 - math.pi / 2) a = np.sqrt(math.pi**2 + (y_p - 1)**2) t = 2 * xi2 - 1 x_p = np.sign(t) * np.sign(1 - t**2) * a * np.abs(t) / np.sqrt(np.abs(1 - t**2)) return x_p, y_p x_p, y_p = build_sample(n, gen) x_p[:10], y_p[:10] fig = plt.figure() ax = fig.add_subplot(111, projection='3d') hist, xedges, yedges = np.histogram2d(x_p, y_p, bins=bins, range=ranges, normed=True) xpos, ypos = np.meshgrid(xedges[:-1], yedges[:-1]) xpos = xpos.ravel() ypos = ypos.ravel() zpos = 0 dx = dy = np.ones_like(zpos) dz = hist.ravel() ax.bar3d(xpos, ypos, zpos, dx, dy, dz) ax.plot_surface(X, Y, Z) plt.show() plt.ylabel('Y') plt.xlabel('X') plt.hist2d(x_p, y_p, bins=bins, range=ranges, density=True); plt.colorbar() plt.imshow(Z.T) plt.colorbar() plt.show() plt.imshow(hist) plt.colorbar() plt.show()Границы для равновероятностной гистограммыx_p_sorted = np.array(sorted(x_p)) y_p_sorted = np.array(sorted(y_p)) borders_eq_x = generate_borders(x_p_sorted, bins) borders_eq_y = generate_borders(y_p_sorted, bins)Проверка на соответствие закону распределенияdef f(x, y): return 1 / (2 * (math.pi**2 + x**2 + (y - 1)**2)**1.5) def pearson_chi_2d(x_p, y_p, bins_x, bins_y): n, m = len(bins_x) - 1, len(bins_y) - 1 cnt = len(x_p) chi_tab, _, _ = np.histogram2d(x_p, y_p, bins=[bins_x, bins_y], density=True) chi_val = 0 for i in range(n): for j in range(m): pij = f((bins_x[i] + bins_x[i + 1]) / 2, (bins_y[j] + bins_y[j + 1]) / 2) chi_val += (chi_tab[i, j] - pij)**2 / pij return chi_val exp_criteria = pearson_chi_2d(x_p, y_p, borders_eq_x, borders_eq_y) theor_criteria = chi2.ppf(0.05, len(borders_eq_x) * len(borders_eq_y)) print('Экспериментальное значение критерия: {}, теоретическое значение критерия: {}'.format(exp_criteria, theor_criteria)) if exp_criteria < theor_criteria: print('Критерий выполнен')Экспериментальное значение критерия: 1.6778356458648909, теоретическое значение критерия: 2791.5338511216537 Критерий выполненГистограмма для Xvals_x, bins_x, _ = plt.hist(x_p, bins=bins, range=ranges[0], density=True); plt.xlim(ranges[0]) vals_int_x, bins_int_x, _ = plt.hist(x_p_sorted, bins=borders_eq_x, density=True)Гистограмма для Yvals_y, bins_y, _ = plt.hist(y_p, bins=bins, range=ranges[1], density=True); plt.xlim(ranges[1]) vals_int_y, bins_int_y, _ = plt.hist(y_p_sorted, bins=borders_eq_y, density=True)Функция распределения для Xdef get_val_counts(arr): counts = {} for x in arr: counts[x] = counts.get(x, 0) + 1 return counts def build_distribution_function(x_var): counts = get_val_counts(x_var) F = [0] for i in range(len(x_var) - 1): F.append(F[i] + counts[x_var[i]] / n) return F F_x = build_distribution_function(x_p_sorted) plt.xlim(ranges[0]) plt.step(x_p_sorted, F_x)Функция распределения для YF_y = build_distribution_function(y_p_sorted) plt.xlim(ranges[1]) plt.step(y_p_sorted, F_y) def F_x(x): return 1 / math.pi * math.atan(x / math.pi) + 0.5 def F_y(y): return 1 / math.pi * math.atan((y - 1) / math.pi) + 0.5 def pearson_1d(bins, vals, F): n = np.sum(vals) chi_sqr = pearson_chi(bins, vals, F, n) table_pearson = chi2.ppf(0.05, len(bins)) print('Экспериментальное значение критерия: {}, теоретическое значение критерия: {}'.format(chi_sqr, table_pearson)) if chi_sqr < table_pearson: print('Критерий принят')Критерий согласия для Xprint('Критерий согласия для X:') pearson_1d(bins_int_x, vals_int_x, F_x)Критерий согласия для X: Экспериментальное значение критерия: 16.21092202052519, теоретическое значение критерия: 38.1162180624794 Критерий принятКритерий согласия для Yprint('Критерий согласия для Y:') pearson_1d(bins_int_y, vals_int_y, F_y)Критерий согласия для Y: Экспериментальное значение критерия: 16.263968087156524, теоретическое значение критерия: 38.1162180624794 Критерий принятТочечные оценкиНайдем точеные оценки: 1. Мода для X, Y2. Медиана для X, Y3. Корелляция Модаdef mode(vals, bins): mode_pos = np.argmax(vals) # берем середину интервала для лучшего приближения return (bins[mode_pos] + bins[mode_pos + 1]) / 2 mode_x = mode(vals_x, bins_x) mode_y = mode(vals_y, bins_y) print('Мода X:', mode_x) print('Мода Y:', mode_y)Мода X: 0.11538461538461542 Мода Y: 1.2692307692307696Медианаx_med = np.median(x_p) y_med = np.median(y_p) print('Медиана X:', x_med) print('Медиана Y:', y_med)Медиана X: -0.0013526901442064978 Медиана Y: 1.009174335583478Проверка независимостиПроверим независимость X и Y с помощью критерия хи квадратdef chi2_criteria_tab(x_p, y_p, bins_x, bins_y): n, m = len(bins_x) - 1, len(bins_y) - 1 cnt = len(x_p) chi_tab, _, _ = np.histogram2d(x_p, y_p, bins=[bins_x, bins_y], density=True) x_probs = np.array([np.sum(chi_tab[i, :]) for i in range(n)]) y_probs = np.array([np.sum(chi_tab[:, i]) for i in range(m)]) chi_val = 0 for i in range(n): for j in range(m): pij = x_probs[i] * y_probs[j] chi_val += (chi_tab[i, j] - pij)**2 / pij return chi_val * cnt exp_criteria = chi2_criteria_tab(x_p, y_p, borders_eq_x, borders_eq_y) theor_criteria = chi2.ppf(0.05, len(borders_eq_x) * len(borders_eq_y)) print('Экспериментальное значение критерия: {}, теоретическое значение критерия: {}'.format(exp_criteria, theor_criteria)) if exp_criteria < theor_criteria: print('Критерий выполнен')Экспериментальное значение критерия: 56509664.08480907, теоретическое значение критерия: 2791.5338511216537Доверительные интервалы Модаdef bootstrap_mode(alpha, gen, nsamples, nelements): n = nsamples bins = get_bins_count(nelements) modes_x = [] modes_y = [] for i in range(nsamples): x_p, y_p = build_sample(nelements, gen) x_p_sorted = np.array(sorted(x_p)) y_p_sorted = np.array(sorted(y_p)) borders_eq_x = generate_borders(x_p_sorted, bins) borders_eq_y = generate_borders(y_p_sorted, bins) vals_x, bins_x = np.histogram(x_p, bins=borders_eq_x, density=True) vals_y, bins_y = np.histogram(y_p, bins=borders_eq_y, density=True) modes_x.append(mode(vals_x, bins_x)) modes_y.append(mode(vals_y, bins_y)) modes_x.sort() modes_y.sort() interval_x = (modes_x[int(nsamples * alpha / 2)], modes_x[int(nsamples * (1 - alpha / 2))]) interval_y = (modes_y[int(nsamples * alpha / 2)], modes_y[int(nsamples * (1 - alpha / 2))]) return interval_x, interval_y for nsamples in [50, 100, 1000]: for nelements in [100, 300, 1000]: print('Количество выборок: {}, количество элементов в выборке: {}\n'.format(nsamples, nelements)) mode_interval_x, mode_interval_y = bootstrap_mode(0.05, gen, nsamples, nelements) print('Мода X:', mode_x) print('Доверительный интервал моды для X:', mode_interval_x) print() print('Мода Y:', mode_y) print('Доверительный интервал моды для Y:', mode_interval_y)Количество выборок: 50, количество элементов в выборке: 100 Мода X: 0.11538461538461542 Доверительный интервал моды для X: (-1.7462486138330848, 1.95345719068064) Мода Y: 1.2692307692307696 Доверительный интервал моды для Y: (-1.4434781617624892, 3.795266080877864) Количество выборок: 50, количество элементов в выборке: 300 Мода X: 0.11538461538461542 Доверительный интервал моды для X: (-1.2343404798848814, 1.5643994116059758) Мода Y: 1.2692307692307696 Доверительный интервал моды для Y: (-0.6912408203975304, 2.7221121085214053) Количество выборок: 50, количество элементов в выборке: 1000 Мода X: 0.11538461538461542 Доверительный интервал моды для X: (-1.2807264412412032, 1.3401898889674946) Мода Y: 1.2692307692307696 Доверительный интервал моды для Y: (-0.43172486617995737, 1.990486825467978) Количество выборок: 100, количество элементов в выборке: 100 Мода X: 0.11538461538461542 Доверительный интервал моды для X: (-2.037216037641965, 2.042345489566466) Мода Y: 1.2692307692307[...]Медианаdef bootstrap_median(alpha, gen, nsamples, nelements): n = nsamples bins = get_bins_count(nelements) modes_x = [] modes_y = [] for i in range(nsamples): x_p, y_p = build_sample(nelements, gen) modes_x.append(np.mediand(x_p)) modes_y.append(np.mediand(y_p)) modes_x.sort() modes_y.sort() interval_x = (modes_x[int(nelements * alpha / 2)], modes_x[int(nelements * (1 - alpha / 2))]) interval_y = (modes_y[int(nelements * alpha / 2)], modes_y[int(nelements * (1 - alpha / 2))]) return interval_x, interval_y bootstrap_median(0.05, gen, 1000, 1000)Simulating Predator and Prey RelationshipWithout a predator, rabbits will reproduce until they reach the carrying capacity of the land. When coyotes show up, they will eat the rabbits and reproduce until they can't find enough rabbits. We will explore the fluctuations in the two populations over time. Using Lotka-Volterra Model Part 1: Rabbits without predatorsAccording to [Mother Earth News](https://www.motherearthnews.com/homesteading-and-livestock/rabbits-on-pasture-intensive-grazing-with-bunnies-zbcz1504), a rabbit eats six square feet of pasture per day. Let's assume that our rabbits live in a five acre clearing in a forest: 217,800 square feet/6 square feet = 36,300 rabbit-days worth of food. For simplicity, let's assume the grass grows back in two months. Thus, the carrying capacity of five acres is 36,300/60 = 605 rabbits.Female rabbits reproduce about six to seven times per year. They have six to ten children in a litter. According to [Wikipedia](https://en.wikipedia.org/wiki/Rabbit), a wild rabbit reaches sexual maturity when it is about six months old and typically lives one to two years. For simplicity, let's assume that in the presence of unlimited food, a rabbit lives forever, is immediately sexually mature, and has 1.5 children every month.For our purposes, then, let $x_t$ be the number of rabbits in our five acre clearing on month $t$.$$\begin{equation*} R_t = R_{t-1} + 1.5\frac{605 - R_{t-1}}{605} R_{t-1}\end{equation*}$$The formula could be put into general form$$\begin{equation*} R_t = R_{t-1} + growth_{R} \times \big( \frac{capacity_{R} - R_{t-1}}{capacity_{R}} \big) R_{t-1}\end{equation*}$$By doing this, we allow users to interact with growth rate and the capacity value visualize different interactionfrom __future__ import print_function from ipywidgets import interact, interactive, fixed, interact_manual from IPython.display import display, clear_output import ipywidgets as widgets import matplotlib.pyplot as plt import numpy as np %matplotlib inline style = {'description_width': 'initial'} capacity_R = widgets.FloatText(description="Capacity", value=605) growth_rate_R = widgets.FloatText(description="Growth rate", value=1.5) initial_R = widgets.FloatText(description="Initial population",style=style, value=1) button_R = widgets.Button(description="Plot Graph") display(initial_R, capacity_R, growth_rate_R, button_R) def plot_graph_r(b): clear_output() display(initial_R, capacity_R, growth_rate_R, button_R) fig = plt.figure() ax = fig.add_subplot(111) t = np.arange(0, 20, 1) s = np.zeros(t.shape) R = initial_R.value for i in range(t.shape[0]): s[i] = R #print(s[i]) nextvalue = growth_rate_R.value * (capacity_R.value - R)/(capacity_R.value) * R #print('nextvalue: ', nextvalue) R = R + nextvalue ax.plot(t, s) ax.set(xlabel='time (months)', ylabel='rabbits', title='Rabbits Without Predators') ax.grid() button_R.on_click(plot_graph_r)Part 2: Coyotes without preysAccording to [Huntwise](https://www.besthuntingtimes.com/blog/2020/2/3/why-you-should-coyote-hunt-how-to-get-started), coyotes need to consume about 2-3 pounds of food per day. Their diet is 90 percent mammalian. The perfect adult cottontail rabbits weigh 2.6 pounds on average. Thus, we assume the coyote eats one rabbit per day. For coyotes, the breeding season is in February and March. According to [Wikipedia](https://en.wikipedia.org/wiki/CoyoteSocial_and_reproductive_behaviors), females have a gestation period of 63 days, with an average litter size of 6, though the number fluctuates depending on coyote population density and the abundance of food. By fall, the pups are old enough to hunt for themselves.In the absence of rabbits, the number of coyotes will drop, as their food supply is scarce.The formula could be put into general form:$$\begin{align*} C_t & \sim (1 - death_{C}) \times C_{t-1}\\ &= C_{t-1} - death_{C} \times C_{t-1}\end{align*}$$%matplotlib inline style = {'description_width': 'initial'} initial_C=widgets.FloatText(description="Initial Population",style=style,value=200.0) declining_rate_C=widgets.FloatText(description="Death rate",value=0.5) button_C=widgets.Button(description="Plot Graph") display(initial_C, declining_rate_C, button_C) def plot_graph_c(b): clear_output() display(initial_C, declining_rate_C, button_C) fig = plt.figure() ax = fig.add_subplot(111) t1 = np.arange(0, 20, 1) s1 = np.zeros(t1.shape) C = initial_C.value for i in range(t1.shape[0]): s1[i] = C C = (1 - declining_rate_C.value)*C ax.plot(t1, s1) ax.set(xlabel='time (months)', ylabel='coyotes', title='Coyotes Without Predators') ax.grid() button_C.on_click(plot_graph_c)Part 3: Interaction Between Coyotes and RabbitWith the simple interaction from the first two parts, now we can combine both interaction and come out with simple interaction.$$\begin{align*} R_t &= R_{t-1} + growth_{R} \times \big( \frac{capacity_{R} - R_{t-1}}{capacity_{R}} \big) R_{t-1} - death_{R}(C_{t-1})\times R_{t-1}\\\\ C_t &= C_{t-1} - death_{C} \times C_{t-1} + growth_{C}(R_{t-1}) \times C_{t-1}\end{align*}$$In equations above, death rate of rabbit is a function parameterized by the amount of coyote. Similarly, the growth rate of coyotes is a function parameterized by the amount of the rabbit.The death rate of the rabbit should be $0$ if there are no coyotes, while it should approach $1$ if there are many coyotes. One of the formula fulfilling this characteristics is hyperbolic function.$$\begin{equation}death_R(C) = 1 - \frac{1}{xC + 1}\end{equation}$$where $x$ determines how quickly $death_R$ increases as the number of coyotes ($C$) increases. Similarly, the growth rate of the coyotes should be $0$ if there are no rabbits, while it should approach infinity if there are many rabbits. One of the formula fulfilling this characteristics is a linear function.$$\begin{equation}growth_C(R) = yC\end{equation}$$where $y$ determines how quickly $growth_C$ increases as number of rabbit ($R$) increases.Putting all together, the final equtions are$$\begin{align*} R_t &= R_{t-1} + growth_{R} \times \big( \frac{capacity_{R} - R_{t-1}}{capacity_{R}} \big) R_{t-1} - \big( 1 - \frac{1}{xC_{t-1} + 1} \big)\times R_{t-1}\\\\ C_t &= C_{t-1} - death_{C} \times C_{t-1} + yR_{t-1}C_{t-1}\end{align*}$$#### %matplotlib inline initial_rabbit = widgets.FloatText(description="Initial Rabbit", value=1) initial_coyote = widgets.FloatText(description="Initial Coyote", value=1) capacity = widgets.FloatText(description="capacity_R", value=5) growth_rate = widgets.FloatText(description="growth_R", value=1) death_rate = widgets.FloatText(description="death_C", value=1) x = widgets.FloatText(description="x", value=1) y = widgets.FloatText(description="y", value=1) button = widgets.Button(description="Plot Graph") display(initial_rabbit, initial_coyote, capacity, growth_rate, death_rate, x, y, button) def plot_graph(b): clear_output() display(initial_rabbit, initial_coyote, capacity, growth_rate, death_rate, x, y, button) fig = plt.figure() ax = fig.add_subplot(111) t = np.arange(0, 20, 0.5) s = np.zeros(t.shape) p = np.zeros(t.shape) R = initial_rabbit.value C = initial_coyote.value for i in range(t.shape[0]): s[i] = R p[i] = C R = R + growth_rate.value * (capacity.value - R)/(capacity.value) * R - (1 - 1/(x.value*C + 1))*R C = C - death_rate.value * C + y.value*s[i]*C ax.plot(t, s, label="rabit") ax.plot(t, p, label="coyote") ax.set(xlabel='time (months)', ylabel='rabbits', title='Coyotes Rabbit Predator Prey Relationship') ax.grid() ax.legend() button.on_click(plot_graph)Using ABM simulationIn ABMs of ecological and evolutionary dynamics, prey naturally grow but get eaten by predators, while the predators grow if they get prey, but naturally die off if they can't find any food. In our ABM simulation model, we will randomly choosing an agent to update the system's state in an asynchronous manner. Design the data structure to store the attributes of the the prey and predatorsThe information about about agent type must be represented in the data structure, also in order to simulate the interactions in a spacel, the information about the agents spatial location is also needed. In the code, we use r_init and c_init to represent the initial population of rabbits and coyotes. The for loop iterates r_init + c_init times, and in the first r_init iteration, the prey agents are generated, while the predator agents are generated for the rest. The rules for how prey and predators behave on their own:If a prey agent meets a predator agent, it dies with some probability because of predation. We will implement death as the removal of the prey from the preys agents list.If a predator agent can't find any prey agents nearby, it dies with some probability because of the lack of food. Otherwise, it will reproduce at a certain reproduction rate. According to [Purely Facts](http://purelyfacts.com/question/12/which-is-faster-a-coyote-or-a-rabbit?DDA=28&DDB=98), the top speed is roughly 64 kph for coyote, and 4 kph for rabbit, so let's assume coyotes are 15 times faster than rabbit. Furthermore, to ensure our simulation model can naturally handle situations where the size of the agent population changes rapidly, and guarantees that each agent is updated once, on average, in each unit time length, we defined a update_one_unit_time() function to address the issue. We make the unit length of time passes by in each asynchronous updating proportional to the size of the agent populationat the time of updating. This way, the progress of time will be steady in the simulation, even if the number of agents changes over time. Static Single Stepfrom __future__ import print_function from ipywidgets import interact, interactive, fixed, interact_manual from IPython.display import display, clear_output import ipywidgets as widgets %matplotlib inline # matplotlib.use('TkAgg') from pylab import * import copy as cp r_init = 0 # Initial rabbit population c_init = 0 #Initial coyotes population nr = 0 # Capacity of rabbits mr = 20.0 # Magnitude of movement of rabbits in feet per day rr = 0 # Reproduction rate of rabbits (per day) dc = 0 # Death rate of coyotes when there is no rabbit (per day) rc = 0 # Reproduction rate of coyotes (per day) cd = 20.0 # Radius for collision detection in feet mc = mr * 2 # Magnitude of movement of coyotes in feet per day cc = 3 # How many unburned rabbits a coyote can contain ic = 0.04 # Likelihood that an immigrant coyote will show up on any day ir = 0.1 #Likelihood that an immigrant rabbit will show up on any day cdsq = cd**2 width = 500 # Width of pasture in feet height = 500 # Height of pasture in feet class Rabbit: pass class Coyote: pass def initialize(a): global r_init, c_init, nr, mr, rr, dc, rc, mc, cc, ic, ir, cdsq # Give default values r_init = initial_rabbit_count.value # Initial rabbit population c_init = initial_coyote_count.value #Initial coyotes population nr = rabbit_capacity.value # Capacity of rabbits mr = 20.0 # Magnitude of movement of rabbits in feet per day rr = rabbit_reproduction.value # Reproduction rate of rabbits (per day) dc = coyote_death_rate.value # Death rate of coyotes when there is no rabbit (per day) rc = coyote_birth_rate.value # Reproduction rate of coyotes (per day) cd = 20.0 # Radius for collision detection in feet mc = mr * 2 # Magnitude of movement of coyotes in feet per day cc = 3 # How many unburned rabbits a coyote can contain ic = 0.04 # Likelihood that an immigrant coyote will show up on any day ir = 0.1 #Likelihood that an immigrant rabbit will show up on any day cdsq = cd**2 ''' Created for CSE/ECE 6730 final project ''' global rabbits, coyotes, rdata, cdata rabbits = [] coyotes = [] rdata = [] cdata = [] for i in range(r_init): ag = Rabbit() ag.birthday = 0 ag.x = width * random() ag.y = height * random() rabbits.append(ag) for i in range(c_init): ag = Coyote() ag.unburned_rabbits = 0 #Born hungry ag.birthday = 0 ag.x = width * random() ag.y = height * random() coyotes.append(ag) observe() def update_one_unit_time(b): global rabbits, coyotes new_coyotes = [] new_rabbits = [] # Find the weighted center of mass for coyotes # Coyotes who see no rabbits will head in this direction center_x = width / 2.0 center_y = height / 2.0 for coyote in coyotes: sx = 0.0 sy = 0.0 n = 0 sx += coyote.x * coyote.unburned_rabbits sy += coyote.y * coyote.unburned_rabbits n += coyote.unburned_rabbits if n > 0: center_x = sx / n center_y = sy / n # Immigrant coyote? if random() < ic: ag = Coyote() ag.unburned_rabbits = 1 ag.birthday = 0 # FIXME ag.x = width * random() ag.y = height * random() new_coyotes.append(ag) # Immigrant rabbit? if random() < ir: ag = Rabbit() ag.birthday = 0 # FIXME ag.x = width * random() ag.y = height * random() new_rabbits.append(ag) # Move rabbits randomly for ag in rabbits: ag.x += uniform(-mr, mr) ag.y += uniform(-mr, mr) ag.x = width if ag.x > width else 0 if ag.x < 0 else ag.x ag.y = height if ag.y > height else 0 if ag.y < 0 else ag.y # Move coyotes to a nearby rabbit for coyote in coyotes: reachable_rabbits = [nb for nb in rabbits if (coyote.x - nb.x)**2 +(coyote.y - nb.y)**2 < mc**2] reachable_count = len(reachable_rabbits) # No reachable rabbits? Move toward fat coyotes if reachable_count == 0: coyote.x = 0.9 * coyote.x + 0.1 * center_x coyote.y = 0.9 * coyote.y + 0.1 * center_y else: if reachable_count == 1: target = reachable_rabbits[0] if reachable_count > 1: target = reachable_rabbits[randint(0, reachable_count - 1)] coyote.x = target.x coyote.y = target.y # Feed the coyotes for coyote in coyotes: # Burn one rabbit coyote.unburned_rabbits = coyote.unburned_rabbits - 1 # Find the rabbits that are nearby near_rabbits = [nb for nb in rabbits if (coyote.x - nb.x)**2 +(coyote.y - nb.y)**2 < cdsq] # How many rabbits will the coyote eat? to_eat = min(cc - coyote.unburned_rabbits, len(near_rabbits)) # Eat them while to_eat > 0: rabbit_to_eat = near_rabbits[0] rabbits.remove(rabbit_to_eat) del near_rabbits[0] coyote.unburned_rabbits += 1 to_eat = to_eat - 1 # Kill hungry coyotes i = 0 while i < len(coyotes): coyote = coyotes[i] # Empty belly for eight days? Coyote dies if coyote.unburned_rabbits < -8: del coyotes[i] continue # Less? Coyote has a chance if coyote.unburned_rabbits <= 0: if random() < dc: del coyotes[i] else: i += 1 else: i += 1 # Reproduce fed coyotes for coyote in coyotes: if coyote.unburned_rabbits > 0 and random() < rc: ag = Coyote() ag.unburned_rabbits = 0 #Born hungry ag.birthday = 0 # FIXME ag.x = coyote.x + 10.0 * random() ag.y = coyote.y + 10.0 * random() new_coyotes.append(ag) # Reproduce uneated rabbits growth_rate = 1 - len(rabbits)/nr; for rabbit in rabbits: if random() < rr * growth_rate: ag = Rabbit() ag.birthday = 0 # FIXME ag.x = rabbit.x + 10.0 * random() ag.y = rabbit.y + 10.0 * random() new_rabbits.append(ag) # Update arrays coyotes.extend(new_coyotes) rabbits.extend(new_rabbits) observe() def observe(): global agents, rdata, cdata, width, height #cla() clear_output() display(initial_rabbit_count, initial_coyote_count, rabbit_capacity, rabbit_reproduction, coyote_death_rate, coyote_birth_rate,button_init, button_step) fig = plt.figure(figsize=(20,10)) ax = fig.add_subplot(121) #cla() rdata.append(len(rabbits)) if len(rabbits) > 0: x = [ag.x for ag in rabbits] y = [ag.y for ag in rabbits] ax.plot(x,y,'k.') cdata.append(len(coyotes)) if len(coyotes) > 0: x = [ag.x for ag in coyotes] y = [ag.y for ag in coyotes] ax.plot(x,y,'ro') ax.axis('image') ax.axis([0,width,0,height]) summary = "Rabbits: {} Coyotes: {}".format(len(rabbits), len(coyotes)) ax = fig.add_subplot(122) #cla() ax.plot(rdata, label ='prey') ax.plot(cdata, label = 'predator') ax.set_title(summary) #print("hello") legend() style = {'description_width': 'initial'} initial_rabbit_count = widgets.IntText(description="Initial_rabbit_count", value=400, style=style) initial_coyote_count = widgets.IntText(description="Initial_coyote_count", value=12, style=style) rabbit_capacity = widgets.IntText(description="Rabbit_capacity",style=style, value=605) rabbit_reproduction = widgets.FloatText(description="Rabbit_reproduction",style=style, value=0.1) coyote_death_rate = widgets.FloatText(description="coyote_death_rate",style=style, value=0.07) coyote_birth_rate = widgets.FloatText(description="coyote_birth_rate",style=style, value=0.03) button_step = widgets.Button(description="Step") button_init = widgets.Button(description="Init") display(initial_rabbit_count, initial_coyote_count, rabbit_capacity, rabbit_reproduction, coyote_death_rate, coyote_birth_rate,button_init, button_step) button_init.on_click(initialize) button_step.on_click(update_one_unit_time)GUI Interactionfrom __future__ import print_function from ipywidgets import interact, interactive, fixed, interact_manual from IPython.display import display, clear_output import ipywidgets as widgets import matplotlib matplotlib.use('TkAgg') from pylab import * import copy as cp import pycxsimulator # Supress warnings # import sys # oldstderr = sys.stderr # sys.stderr = open('/dev/null', 'w') # Give default values r_init = 400 # Initial rabbit population c_init = 12 #Initial coyotes population nr = 605 # Capacity of rabbits mr = 20.0 # Magnitude of movement of rabbits in feet per day rr = 0.1 # Reproduction rate of rabbits (per day) dc = 0.07 # Death rate of coyotes when there is no rabbit (per day) rc = 0.03 # Reproduction rate of coyotes (per day) cd = 20.0 # Radius for collision detection in feet mc = mr * 2 # Magnitude of movement of coyotes in feet per day cc = 3 # How many unburned rabbits a coyote can contain ic = 0.04 # Likelihood that an immigrant coyote will show up on any day ir = 0.1 #Likelihood that an immigrant rabbit will show up on any day cdsq = cd**2 width = 500 # Width of pasture in feet height = 500 # Height of pasture in feet class Rabbit: pass class Coyote: pass def initialize(): ''' Created for CSE/ECE 6730 final project ''' global rabbits, coyotes, rdata, cdata rabbits = [] coyotes = [] rdata = [] cdata = [] for i in range(r_init): ag = Rabbit() ag.birthday = 0 ag.x = width * random() ag.y = height * random() rabbits.append(ag) for i in range(c_init): ag = Coyote() ag.unburned_rabbits = 0 #Born hungry ag.birthday = 0 ag.x = width * random() ag.y = height * random() coyotes.append(ag) def update_one_unit_time(): global rabbits, coyotes new_coyotes = [] new_rabbits = [] # Find the weighted center of mass for coyotes # Coyotes who see no rabbits will head in this direction center_x = width / 2.0 center_y = height / 2.0 for coyote in coyotes: sx = 0.0 sy = 0.0 n = 0 sx += coyote.x * coyote.unburned_rabbits sy += coyote.y * coyote.unburned_rabbits n += coyote.unburned_rabbits if n > 0: center_x = sx / n center_y = sy / n # Immigrant coyote? if random() < ic: ag = Coyote() ag.unburned_rabbits = 1 ag.birthday = 0 # FIXME ag.x = width * random() ag.y = height * random() new_coyotes.append(ag) # Immigrant rabbit? if random() < ir: ag = Rabbit() ag.birthday = 0 # FIXME ag.x = width * random() ag.y = height * random() new_rabbits.append(ag) # Move rabbits randomly for ag in rabbits: ag.x += uniform(-mr, mr) ag.y += uniform(-mr, mr) ag.x = width if ag.x > width else 0 if ag.x < 0 else ag.x ag.y = height if ag.y > height else 0 if ag.y < 0 else ag.y # Move coyotes to a nearby rabbit for coyote in coyotes: reachable_rabbits = [nb for nb in rabbits if (coyote.x - nb.x)**2 +(coyote.y - nb.y)**2 < mc**2] reachable_count = len(reachable_rabbits) # No reachable rabbits? Move toward fat coyotes if reachable_count == 0: coyote.x = 0.9 * coyote.x + 0.1 * center_x coyote.y = 0.9 * coyote.y + 0.1 * center_y else: if reachable_count == 1: target = reachable_rabbits[0] if reachable_count > 1: target = reachable_rabbits[randint(0, reachable_count - 1)] coyote.x = target.x coyote.y = target.y # Feed the coyotes for coyote in coyotes: # Burn one rabbit coyote.unburned_rabbits = coyote.unburned_rabbits - 1 # Find the rabbits that are nearby near_rabbits = [nb for nb in rabbits if (coyote.x - nb.x)**2 +(coyote.y - nb.y)**2 < cdsq] # How many rabbits will the coyote eat? to_eat = min(cc - coyote.unburned_rabbits, len(near_rabbits)) # Eat them while to_eat > 0: rabbit_to_eat = near_rabbits[0] rabbits.remove(rabbit_to_eat) del near_rabbits[0] coyote.unburned_rabbits += 1 to_eat = to_eat - 1 # Kill hungry coyotes i = 0 while i < len(coyotes): coyote = coyotes[i] # Empty belly for eight days? Coyote dies if coyote.unburned_rabbits < -8: del coyotes[i] continue # Less? Coyote has a chance if coyote.unburned_rabbits <= 0: if random() < dc: del coyotes[i] else: i += 1 else: i += 1 # Reproduce fed coyotes for coyote in coyotes: if coyote.unburned_rabbits > 0 and random() < rc: ag = Coyote() ag.unburned_rabbits = 0 #Born hungry ag.birthday = 0 # FIXME ag.x = coyote.x + 10.0 * random() ag.y = coyote.y + 10.0 * random() new_coyotes.append(ag) # Reproduce uneated rabbits growth_rate = 1 - len(rabbits)/nr; for rabbit in rabbits: if random() < rr * growth_rate: ag = Rabbit() ag.birthday = 0 # FIXME ag.x = rabbit.x + 10.0 * random() ag.y = rabbit.y + 10.0 * random() new_rabbits.append(ag) # Update arrays coyotes.extend(new_coyotes) rabbits.extend(new_rabbits) def observe(): global agents, rdata, cdata, width, height cla() subplot(1, 2 , 1) cla() rdata.append(len(rabbits)) if len(rabbits) > 0: x = [ag.x for ag in rabbits] y = [ag.y for ag in rabbits] plot(x,y,'k.') cdata.append(len(coyotes)) if len(coyotes) > 0: x = [ag.x for ag in coyotes] y = [ag.y for ag in coyotes] plot(x,y,'ro') axis('image') axis([0,width,0,height]) summary = "Rabbits: {} Coyotes: {}".format(len(rabbits), len(coyotes)) p = subplot(1, 2, 2) cla() plot(rdata, label ='prey') plot(cdata, label = 'predator') p.set_title(summary) legend() # Parameter setters def initial_rabbit_count (val = r_init): ''' Initial rabbit population ''' global r_init r_init = int(val) # or int(val), str(val), etc. return val def initial_coyote_count (val = c_init): ''' Initial coyote population ''' global c_init c_init = int(val) # or int(val), str(val), etc. return val def rabbit_capacity (val = nr): ''' Max rabbit population ''' global nr nr = int(val) # or int(val), str(val), etc. return val def rabbit_reproduction (val = rr ): ''' Probability that a rabbit will give birth any particular day ''' global r rr = float(val) # or int(val), str(val), etc. return val def coyote_death_rate (val = dc ): ''' Probability that a coyote will die on a day it is not near any rabbits ''' global dc dc = float(val) # or int(val), str(val), etc. return val def coyote_birth_rate (val = rc): ''' Probability that a coyote will give birth on any particular day ''' global rc rc = float(val) # or int(val), str(val), etc. return val pycxsimulator.GUI(parameterSetters = [initial_rabbit_count, initial_coyote_count, rabbit_capacity, rabbit_reproduction, coyote_death_rate, coyote_birth_rate]).start(func = [initialize, observe, update_one_unit_time])Function to convert ALL the images in the images subfolder to 1D vectordef get_image_vector_dict(): file_list = glob.glob("images\\*.png") img_vector_dict = {} for f in file_list: try: #read the sample.png image img = imgops.grayscale(Image.open(f)) except: print(f'ERROR! Unable to open {f}') #convert the img to a vector img88 = img.resize((8,8)) arr88 = np.array(img88) flat_arr88 = arr88.ravel() #print(flat_arr88) #print(flat_arr88.shape) img_vector_dict[f] = flat_arr88 #print(img_vector_dict) return img_vector_dict def extract_digit(file_name): a = file_name.split("\\") b = a[1].split('.') return (b[0])[-1] def validate_predictions(predictions): print('predictions:', predictions) print('|Expected|Predicted|Pass/Fail|') for f_nm, pred in predictions.items(): expected = extract_digit(f_nm) pf = 'PASS' if int(expected) == int(pred) else 'FAIL' print(f'|{expected}|{pred}|{pf}') # Using the model..... # import numpy as np # from PIL import Image # from PIL import ImageOps as imgops # import joblib # from sklearn.datasets import load_digits # import glob # load the sample img_dict = get_image_vector_dict() #print(len(sample)) if len(img_dict) < 1 or len(img_dict.values()) < 1: print('Sample image error!') exit(404) # load digits dataset so we can look up our predicted target #digits = load_digits() - loaded earlier in the notebook predictions = {} # loop thru the vectors and run them through the prediction model for img_nm, smpl in img_dict.items(): p = knc.predict([smpl]) predictions[img_nm] = digits.target_names[p[0]] validate_predictions(predictions) #print('predictions:', predictions) print('The End!')predictions: {'images\\sample1.png': 0, 'images\\sample2.png': 1, 'images\\sample3.png': 9, 'images\\sample5.png': 9, 'images\\sample6.png': 9, 'images\\sample7.png': 0, 'images\\sample8.png': 9, 'images\\sample9.png': 1} |Expected|Predicted|Pass/Fail| |1|0|FAIL |2|1|FAIL |3|9|FAIL |5|9|FAIL |6|9|FAIL |7|0|FAIL |8|9|FAIL |9|1|FAIL The End!Numpyimport numpy as npThe core of the `numpy` package is the `array` class. Let's examine that first. We can make an array out of a sequence, like a list.d = [1, 2, 3, 4, 5] np.array(d)data typesUnlike lists, arrays must be homogeneous, in that the data types of each element must be the same. The data type of the array is upcast to be able to represent all of the data. So, if only one element is a float, all elements will be converted to floats.d = [1, 2, 3.1415, 4, 5] np.array(d)You can query the datatype by examining the `dtype` attribute of the array.d = [1, 2, 3.1415, 4, 5] arr = np.array(d) arr.dtypeArray types may be defined explicity in the callarr = np.array([1, 2, 3, 4, 5], dtype='float32') arrComplex numbers are noted with a lowercase `j` or uppercase `J`, like thiscmplx = np.array([1.0+2.0j, 3.0+4.0J]) print(cmplx) cmplx.dtype[ 1.+2.j 3.+4.j]As we have seen before, arrays are like multidimensional sequences. We can create a 2D array by supplying a list of lists as the argument.arr = np.array([[1., 2., 3.,], [4., 5., 6.]]) arrArray attributesArrays have a few other important attributes. Note attributes never have parentheses after them. Methods always do.arr.size # The number of elements in the array arr.shape # The shape of the array (i.e., the size of each dimension) arr.ndim # The number of dimensions of the arraySetting array shapeYou can set the `array.shape` attribute to change the shape of the array. This attribute does not change the elements of the array, or how it is stored in memory, just how it is seen.arr.shape = (3, 2) arr arr.shape = (6,) arrSingleton dimensions add to the dimensionality of an array. The last example was a 1D array (also called a vector), the next are 2D arrays.arr.shape = (1, 6) arr # Note that there are *two* square brackets in the output sequence. This is a row vector. arr.shape = (6, 1) arr # this is also a 2D array, like a column vectorArray indexingArrays are indexed in a similar way to sequences, with `start:stop:stride` notation, except that this is used for each dimension in the array. Colons denote all the values in a particular dimension, slices indicate some particular subset of the data in that particular dimension. A common use case is to get a single row or column from a 2D array (a table of data).arr = np.arange(60).reshape(6, 10) arr arr[:, 4] # the 5th column arr[2, :] # the 3rd row arr[2] # Trailing colons do not need to be explicitly typed. This is equivalent to the last example. arr[4, 7] # an individual element in the table--- *Exercise*> Slices can be combined in any way. Define a new array or use array `arr` and grab out every other row and the 4th column and beyond.--- Conventions concerning arrays containing spatio-temporal informationGenerally, you will want to think of arrays as representing dimensions in space and time. The conventional way to think of this is that the dimensions are $(t, z, y, x)$; missing dimensions are omitted. This will help make plotting and analysis easier. Some examples might be: temp[:, :, :, :] A 4D array (time, height, latitude, longitude) press[:, :] A 2D array (time, height) humid[:, :] A 2D array (latitude, longitude) Array methodsArrays have a number of methods. Let's take a look at the `mean` method as an example.arr = np.array([[1., 2., 3.,], [4., 5., 6.]]) # reset the array to our 2x3 array. arr.mean() # The mean of all of the elements in the array`Mean` takes the optional argument `axis` that can be used to take the mean along a single axis of the array. Just like with indexing, the axes are reference in a zero-based system; `axis=0` means the first dimension.arr.mean(axis=0) # The meanIn this case, there are two rows in the first dimension, and `arr.mean(axis=0)` takes the average in the 'row' direction, resulting in a 1D array that is the average across the rows. --- *Exercise*> Find the mean of the array in the 'column' direction, along `axis=1`.> Use the `sum` method of the array class to get the sum of the numbers in each column. The result should be a 1D array with three elements.--- You can also use the `reshape` method to change the shape of an array.arr arr.reshape(3, 2)You can find the mininum and maximum of an array with the `min` and `max` methods. Sometimes it is useful to find the indices of these minima and maxima. For this use `argmin` and `argmax`, likex = np.random.rand(10) imax = x.argmax() print(imax, x[imax], x.max())7 0.947778556457 0.947778556457Creating standard arraysThere are a few standard arrays, for example, arrays filled with zeros or ones (or empty). Here are some examples of creating arrays.o = np.ones((3, 4, 5)) # The argument is a shape, so is a tuple with the length of each dimension as an argument b = np.ones((2, 3), dtype=np.bool) z = np.zeros((2, 3), dtype=np.float32) bYou can also create these arrays with the same shape and datatype of the input array using `np.ones_like` and `np.zeros_like`.zb = np.zeros_like(b) zbYou can also create a diagonal array with a given vector along the diagonal. These can be offset with an optional argument `k` (default=0). This example creates a tri-diagonal array similar to that used for finite difference calculationsnp.diag(-2*np.ones(6)) + np.diag(np.ones(5), k=-1) + np.diag(np.ones(5), k=1)There are also a number of ways to generate sequences of numbers. - `np.arange([start,] stop [[, stride]])` Create a sequence of numbers, similar to `range` - `np.linspace(min, max, length)` Create a uniform series of specified `length` between `min` and `max`, inclusive. - `np.logspace(minpow, maxpow, length)` Create a uniform series in logspace of specified `length` between `10**minpow` and `10**maxpow`, inclusive.np.arange(10.) np.arange(2, 10, 2) np.linspace(2, 4, 17)You can create arrays of random numbers easily with methods in `np.random`.* `np.random.rand(d0, d1, ..., d2)`: Create an array of the given shape `d0, ..., dn` and populate it with random samples from a uniform distribution over [0,1).* `np.random.randint(low, high=None, size=None)`: Return random integers from `low` (inclusive) to `high` (exclusive). If `high` is None then return integers from [0, `low`). `size` is an int or tuple of ints to give the output shape.* `np.randon.randn(d0, d1, ..., d2)`: Create an array of the given shape `d0, ..., dn` and populate it with random samples from the "standard normal" distribution.* `np.random.random(size=None)`: Return random floats of `size` (int or tuple of ints) in the interval [0, 1).np.random.rand(2, 4) np.random.randint(1, 50, (2, 4))--- *Exercise*> Create an array of random floats between 0 and 1 that has dimension 5 x 3. Calculate the standard deviation of the columns of the array. Then add to this a `linspace` array of the appropriate size that contains numbers between 10 and 15.--- Combining and splitting arraysGenerally, arrays can be combined with the `np.concatenate` function. The arguments are a sequence of arrays to join, and the axis along which to join them (default=0).x = np.random.rand(4, 5, 6) y = np.random.rand(4, 5, 6) print(np.concatenate((x, y)).shape) print() print(np.concatenate((x, y), axis=0).shape) print(np.concatenate((x, y), axis=1).shape) print(np.concatenate((x, y), axis=2).shape)(8, 5, 6) (8, 5, 6) (4, 10, 6) (4, 5, 12)There are a number of convenience functions that act like concatenate for specific axes: - `np.vstack` – vertical stack (stack along axis=0) - `np.hstack` – horizontal stack (stack along axis=1) - `np.dstack` – depth stack (stack along axis=2)print(np.vstack((x, y)).shape) print(np.hstack((x, y)).shape) print(np.dstack((x, y)).shape)(8, 5, 6) (4, 10, 6) (4, 5, 12)Likewise, arrays can be split with `np.split` or `np.array_split`. There are also convenience functions to split horizontally, vertically, and with depth.x = np.random.rand(12, 2, 5) [a.shape for a in np.split(x, 4, axis=0)]--- *Exercise*> Create an array, A, of shape (40, 50, 60). The array slices for first ten entries in the axis=1 direction of A should be filled with 1's, for the next ten filled with 2's, and on up to 5's.> Split it along axis=1 into five sections.> Concatenate two of these back together along axis 1.> What is the resulting shape of each array? _[Advanced: can you calculate this on one line?]_--- Finding valuesThere are a number of ways to find values in an array. The simplest is always to create a boolean array, likex = np.random.rand(5, 5) print(x) ind = x > 0.5 print(ind)[[ 0.11873289 0.40188555 0.96426409 0.60360203 0.54526375] [ 0.20289134 0.98457289 0.3847002 0.00733166 0.63739985] [ 0.50234835 0.25930096 0.36977497 0.05787961 0.95391643] [ 0.02016065 0.69361985 0.5945571 0.381457 0.33079702] [ 0.62571904 0.18626591 0.35990447 0.65804042 0.12489787]] [[False False True True True] [False True False False True] [ True False False False True] [False True True False False] [ True False False True False]]The boolean array can be used as an index to other arrays. Note this will return a 1D array, no matter what dimension the origial arrays are, because there is no way to know what structure the `True` values have.x = np.random.rand(5, 5) y = np.sin(x) y[x > 0.5] # or, equivalently, as two lines idx = x > 0.5 y[idx]To get the indices of the places where the conditional is true (i.e., the locations of the `True` values in the boolean array), use the `np.where` command.x = np.random.rand(5, 5) idx = np.where(x > 0.5) idxNote that `np.where` always returns a tuple of indices for each dimension. This is a little strange for 1D arrays, but is done for consistency across all input values. Often, you will want to explicitly pull out the (single) array of indices from the tuple, likex = np.random.rand(10) idx = np.where(x>0.5)[0] print(idx)_What happens with the [0] is missing behind the call to `where`?_ --- *Exercise*> You can also use these calculated indices, or boolean matrices on the left hand side for assignment.> Create a 10x10 random array, with values between 0 and 1. Replace all of the numbers smaller than 0.5 with zero.> Do this first not using `where` and then do it using `where`.--- Array viewsThe data for an array may be stored in memory using `C` or `FORTRAN` ordered memory. Typically, there is no need to think about this, some details can be found [here](http://docs.scipy.org/doc/numpy-1.10.0/reference/internals.html).However, it is important to remember that subsets of an array can produce a different 'view' of the array that addresses the same memory as the original array. This can lead to some unexpected behaviors. One way to think of this is that assignment in Python is more like a C-pointer (i.e., a reference to a memory location) than an actual value.a = np.arange(10.0) b = a[::2] print(a) print(b) a[4] = -999 # this will modify b as well... print(a) print(b) b[-1] = -888 # this will modify a as well... print(a) print(b)[ 0. 1. 2. 3. -999. 5. 6. 7. -888. 9.] [ 0. 2. -999. 6. -888.]Normally, this will not be a problem, but if you need to make sure that a subset of an array has it's own memory, make sure you make a `copy` of the array, likea = np.arange(10.0) b = a.copy()[::2] # or np.copy(a) a[4] = -999 # this will NOT modify b now print(a) print(b)[ 0. 1. 2. 3. -999. 5. 6. 7. 8. 9.] [ 0. 2. 4. 6. 8.]Array broadcasting(Largely taken from [SciPy docs](https://docs.scipy.org/doc/numpy-1.10.0/user/basics.broadcasting.html))Generally arrays should be the same shape for them to be multiplied together.a = np.array([1.0, 2.0, 3.0]) b = np.array([2.0, 2.0, 2.0]) a * bThe term broadcasting describes how `numpy` treats arrays with different shapes during arithmetic operations. Subject to certain constraints, the smaller array is “broadcast” across the larger array so that they have compatible shapes. Broadcasting provides a means of vectorizing array operations so that looping occurs in C instead of Python. It does this without making needless copies of data and usually leads to efficient algorithm implementations.For example, the simplest broadcasting example occurs when an array and a scalar value are combined in an operation:a = np.array([1.0, 2.0, 3.0]) b = 2.0 a * bThe result is equivalent to the previous example where b was an array. We can think of the scalar b being stretched during the arithmetic operation into an array with the same shape as a. The new elements in b are simply copies of the original scalar. The stretching analogy is only conceptual. NumPy is smart enough to use the original scalar value without actually making copies, so that broadcasting operations are as memory and computationally efficient as possible.The code in the second example is more efficient than that in the first because broadcasting moves less memory around during the multiplication (b is a scalar rather than an array). General Broadcasting RulesWhen operating on two arrays, NumPy compares their shapes element-wise. It starts with the trailing dimensions, and works its way forward. Two dimensions are compatible when1. they are equal, or1. one of them is 1If these conditions are not met, a ValueError: frames are not aligned exception is thrown, indicating that the arrays have incompatible shapes. The size of the resulting array is the maximum size along each dimension of the input arrays.Arrays do not need to have the same number of dimensions. For example, if you have a 256x256x3 array of RGB values, and you want to scale each color in the image by a different value, you can multiply the image by a one-dimensional array with 3 values. Lining up the sizes of the trailing axes of these arrays according to the broadcast rules, shows that they are compatible: Image (3d array): 256 x 256 x 3 Scale (1d array): 3 Result (3d array): 256 x 256 x 3When either of the dimensions compared is one, the other is used. In other words, dimensions with size 1 are stretched or “copied” to match the other.In the following example, both the A and B arrays have axes with length one that are expanded to a larger size during the broadcast operation: A (4d array): 8 x 1 x 6 x 1 B (3d array): 7 x 1 x 5 Result (4d array): 8 x 7 x 6 x 5 Here are some more examples: A (2d array): 5 x 4 B (1d array): 1 Result (2d array): 5 x 4 A (2d array): 5 x 4 B (1d array): 4 Result (2d array): 5 x 4 A (3d array): 15 x 3 x 5 B (3d array): 15 x 1 x 5 Result (3d array): 15 x 3 x 5 A (3d array): 15 x 3 x 5 B (2d array): 3 x 5 Result (3d array): 15 x 3 x 5 A (3d array): 15 x 3 x 5 B (2d array): 3 x 1 Result (3d array): 15 x 3 x 5 Let's create an example with arrays of random numbers.A = np.random.rand(15, 3, 5) B = np.random.rand(3, 1) print(A.shape, B.shape) Result = A * B print(Result.shape)(15, 3, 5) (3, 1) (15, 3, 5)Here are examples of shapes that do not broadcast: A (1d array): 3 B (1d array): 4 trailing dimensions do not match A (2d array): 2 x 1 B (3d array): 8 x 4 x 3 second from last dimensions mismatched --- *Exercise* a = np.random.rand(5, 7, 1, 8) b = np.random.rand(8) c = np.random.rand(7, 3, 8) d = np.random.rand(5, 1, 3, 1)> Experiment with multiplying combinations of the arrays above together. Try to predict the resulting shape beforehand.--- Notice that the rules for broadcasting are based on the location of singleton dimensions. Singleton dimensions are implied forward (to the left), but not backward (to the right). So, the first example here works but not the second: A (2d array): 5 x 4 B (1d array): 4 Result (2d array): 5 x 4 A (2d array): 5 x 4 B (1d array): 5*Compare with large set of examples above. How can the bottom example here be fixed?* This problem can be fixed by creating new singleton dimensions in arrays. This can be done by putting `np.newaxis` in the appropriate space when indexing the array. For example:A = np.random.rand(5, 4) B = np.random.rand(5) A*B print(B.shape) print(B[:,np.newaxis].shape) (A*B[:,np.newaxis]).shape--- *Exercise*> Multiply `b = np.random.rand(8)` and `c = np.random.rand(8, 3, 7)`. What is another way you could accomplish this calculation besides using `newaxis`?--- --- *Exercise* b = np.random.rand(2) c = np.random.rand(2, 3) > Concatenate arrays `b` and `c`. Along which axis would it make sense to concatenate, given the arrays dimensions? Do you need to make any changes to the arrays to get this to work?--- Flattening arrays with `a.flat` and `a.flatten()`There are two basic ways to turn any array into a 1D array. They are slightly different.`a.flatten()` returns a copy of an array, in one dimension.a = np.arange(12).reshape(3, 4) print(a) b = a.flatten() print(b)[[ 0 1 2 3] [ 4 5 6 7] [ 8 9 10 11]] [ 0 1 2 3 4 5 6 7 8 9 10 11]the `flat` attribute on the other hand gives a view of the array in 1D. It looks like an iterator object (like `range` and `zip`). This allowsa.flat[6] = -999 print(a)[[ 0 1 2 3] [ 4 5 -999 7] [ 8 9 10 11]]In contrast, this does not work as expected. _WHY?_a.flatten()[5] = -888 print(a)[[ 0 1 2 3] [ 4 5 -999 7] [ 8 9 10 11]]Other operations can be done to the array first. For example, we can take a transpose of the array before we flatten it.a.T.flat[6] = -998 print(a)[[ 0 1 -998 3] [ 4 5 -999 7] [ 8 9 10 11]]Here, the `T` attribute (equivalent to the `a.transpose()` method) gives a view of the array transposed (similar to MATLAB's tick notation).print(a.T)[[ 0 4 8] [ 1 5 9] [-998 -999 10] [ 3 7 11]]Masked arraysMasked arrays are ways to create arrays with missing values. MATLAB™ uses NaNs (NaN stands for 'Not a Number'), and the NaNs are the values of the arrays at those points. This approach also works in Python. Masked arrays are preferred since they retain the masked array values, and also some plotting routines require masked arrays when plotting arrays with missing values. Masked arrays are usually created through some condition, likearr = np.random.randn(7, 8) cond = arr > 0.1 # `cond` is True for the random values greater than 0.5 marr = np.ma.masked_where(cond, arr) print(marr) marr.mean(axis=0)The mask can also be supplied explicity when creating the masked array,marr = np.ma.masked_array([1, 2, 3, 4, 5], mask=[True, True, False, False, True]) marrImporting dataOne of the basic commands in `numpy` for loading in data is the `loadtxt` command. There are other ways to do this, such as the [`genfromtxt`](http://docs.scipy.org/doc/numpy-dev/user/basics.io.genfromtxt.html) command, but `loadtxt` is sufficient for most purposes, and is easy to use.data = np.loadtxt('../data/CTD.txt', comments='*') data[:,2] # a column of data representing temperature data--- *Exercise*> Read in the oceanographic data file '../data/CTD.txt' into an array. You can look at the data file itself to see what variables are stored in each column.> Using this data, write a function to calculate the linear equation of state. This is an approximation of the density of water, as it depends on salinity, temperature, and some empirical constants. We will use the following form for the linear equation of state:> $\rho = 1027[1+7.6\times 10^{-4}(S-35) -1.7\times 10^{-4}(T-25)]$> where $\rho$ is the density, $S$ is the salinity, and $T$ is the temperature.> This is more free form than the homework, so you should set up all of the associated code to call the function, and write out the function yourself. Don't forget docstrings! For a check, the first value of your density array in order should equal 1021.7519981630001 and the last should equal 1028.0471353619998.--- Polynomial fittingThe basic function for fitting a polynomial (e.g., a straight line) is `np.polyfit(x, y, deg)`. There are a number of other functions that let you add (`np.polyadd`), multiply (`np.polymul`), find zeros (`np.roots`), and do other operations to polynomials.x = np.random.rand(100) y = 5 + 3*x + 0.1*np.random.randn(100) # A straight line with some noise p = np.polyfit(x, y, 1) # fit a straight line (order is 1) print(p) # The coefficients of the polynomial, with highest order first. (i.e,. [slope, intercept])[ 3.04891792 4.98836372]Let's plot it to make sure this makes sense:import matplotlib.pyplot as plt %matplotlib inline # plot data plt.plot(x, y, '.') # plot fitted line plt.plot(x, p[0]*x + p[1]) plt.legend(('Data', 'Fitted line'))Once you have the fit, you can use it to find other useful things, like the value of the fitted line at $x=1$:np.polyval(p, 1)You can also use the `np.polynomial.Polynomial` class to work with polynomials. Note, these define polynomials the opposite way, with the _lowest_ order first. The Polynomial class gives an excellent example of operator overloading, and the flexibility of classes.p1 = np.polynomial.Polynomial([5, 3]) # y = 5 + 3 x p2 = np.polynomial.Polynomial([3, 6, 8, 2]) # y = 3 + 6 x + 8 x**2 + 2 x**3You can use the Polynomial object to evaluate the value of the polynomial at various input values:print('Evaluation') print('p1(0.0) = ', p1(0)) print('p2(5.0) = ', p2(5))Evaluation p1(0.0) = 5.0 p2(5.0) = 483.0We can use this to make a plot to see the function:x = np.linspace(0,10) plt.plot(x, p1(x), x, p2(x)) plt.legend(['p1', 'p2'])Other things we can do:print('Roots') print('Roots of p2 = ', p2.roots()) print() print('Operations') print('p1 + p2 = ', p1 + p2) print('p1 * p2 = ', p1 * p2) print() print('Calculus') print('Derivative of p1', p1.deriv(1)) print('Integral of p2', p2.integ(4, k=[4, 3, 2, 1]))Roots Roots of p2 = [-3.21124270+0.j -0.39437865-0.55818847j -0.39437865+0.55818847j] Operations p1 + p2 = poly([ 8. 9. 8. 2.]) p1 * p2 = poly([ 15. 39. 58. 34. 6.]) Calculus Derivative of p1 poly([ 3.]) Integral of p2 poly([ 1. 2. 1.5 0.66666667 0.125 0.05 0.02222222 0.00238095])VectorizationVectorization and array broadcasting are two big reasons that `numpy` can be efficient and fast. With these tools, you can avoid writing for loops (which are slow).The best way to do mathematical operations using `numpy` arrays is to do `vector` operations. That is, mathematical operations are defined to be element by element, and this is done much faster than looping. As a rule of thumb, you should be very concerned if your code has more than one significant `for` loop in the numerical analysis section.Here is a way to do multiply 2 big arrays using for loops, which is not how you should do it. The sum at the end is included for comparison with the subsequent approach.a = np.arange(102400.0).reshape(4, 8, 1600, 2) # a 4D array using sequential numbers b = np.random.rand(4, 8, 1600, 2) # a 4D array using random numbers li, lj, lk, lm = b.shape # size of b in each dimension sol = np.zeros(b.shape) for i in range(li): for j in range(lj): for k in range(lk): for m in range(lm): sol[i,j,k,m] = a[i,j,k,m]*b[i,j,k,m] print(sol.sum())2625932085.42The better way is to directly multiply the arrays together, taking advantage of C code that Python has in the background.sol = a * b # element-by-element multiplication. This operation is about as fast as it can be on your computer. print(sol.sum())2625932085.42Basic performance evaluationWe can do some very basic perfomance testing using the `%time` special function in jupyter notebooks. Lets use this to examine the time it takes to do a singular value decomposition for different sized matrices.b = np.random.randn(5000, 2000) %time u, s, v = np.linalg.svd(b)CPU times: user 45.2 s, sys: 275 ms, total: 45.5 s Wall time: 3.99 s`%time` runs the line once and gives the time required. However, calculation times vary depending on many things including the numbers involved and the state of your computer at the moment. In this case, the `%timeit` function can be used to perform the test a number of times to get an average calculation time.%timeit b = np.random.randn(50, 20); u, s, v = np.linalg.svd(b)The slowest run took 4.78 times longer than the fastest. This could mean that an intermediate result is being cached. 1000 loops, best of 3: 444 µs per loopFor statements that are longer than a single line, the `time.time` function can be used.import time t_start = time.time() time.sleep(0.25) # Do nothing for 0.25 seconds t_stop = time.time() print('{:6.4f} seconds have passed.'.format(t_stop-t_start))0.2506 seconds have passed.--- *Exercise*> Earlier, we discussed using array operations instead of looping because it is faster. Let's compare.> Calculate the time it takes to calculate the $a$ and $b$ arrays with dimensions [4, 8, 1600, 2] by both methods demonstrated: using a series of 4 `for` loops, one for each dimension of the arrays and using array operations. Compare the times by calculating a ratio.--- Linear algebraOne of the key elements of the `numpy` package is the `numpy.linalg` subpackage that contains a number of linear algebra functions that work efficiently on arrays.a = np.random.randn(100, 100) e, v = np.linalg.eig(a) b = np.random.randn(500, 200) u, s, v = np.linalg.svd(b)Matrix multiplication is done using the `np.dot` function. In this case, matrices do _not_ need to be the same shape, but must follow the rules of matrix multiplication. E.g., the operation dot(, ) results in a 4x12 array; i.e., the inner dimensions must match (technically last and second-to-last, for arrays with more than two dimensions).x = np.random.rand(4, 5) y = np.random.rand(5, 12) res = np.dot(x, y) print(res.shape) # np.dot(y, x) # This gives an error -- order is important.(4, 12)Teoría del caosPor medio de un sistema caótico conocido como atractores de Clifford se comprobarán algunos puntos relevantes en la teoría del caos. Se utilizarán herramientas de python para la simulación. Objetivos Objetivo generalConocer más sobre la teoría del caos.Demostrar las 3 características principales que cumplen los sistemas caóticos. Objetivos específicosEvaluar en un modelo caótico distintos parámetros para comprobar que la misma función puede dar como producto resultados distintos. Graficar los resultados y compararlos. Modelo que representa el sistemaNuestro modelo caótico será:Xn+1 = 𝑠𝑖𝑛(𝑎*𝑦𝑛)+ 𝑐*(𝑐𝑜𝑠(𝑎*𝑥𝑛))Yn+1 = 𝑠𝑖𝑛(𝑏*𝑥𝑛) + 𝑑*(𝑐𝑜𝑠(𝑏*𝑦𝑛))Dónde xn,yn son los puntos anteriores a xn+1,yn+1A,b,c,d son los parámetros de los cuales tenemos control.import matplotlib.pyplot as plt import numpy as np from numpy import sin, cos import bokeh from bokeh.io import output_file, show from bokeh.plotting import figure from bokeh.transform import linear_cmap from bokeh.util.hex import hexbin def clifford(xy,a,b,c,d): x = xy[0] y = xy[1] return sin(a*y) + c*(cos(a*x)), sin(b*x) + d*(cos(b*y)) CI = [1,1] l = [] for i in range(10000000): CI = clifford(CI,-1.4,1.6,1,.7) l.append(CI) l = np.array(l) plt.scatter(l.T[0],l.T[1]) bins = hexbin(l.T[0],l.T[1], 0.025) p = figure(tools="wheel_zoom,reset", match_aspect=True, background_fill_color='#440154') p.grid.visible = False p.hex_tile(q="q", r="r", size=0.1, line_color=None, source=bins, fill_color=linear_cmap('counts', 'Viridis256', 0, max(bins.counts))) show(p) CI = [1,1] l = [] for i in range(5000000): CI = clifford(CI,1.7,1.7,0.6,1.2) l.append(CI) l = np.array(l) plt.scatter(l.T[0],l.T[1]) bins = hexbin(l.T[0],l.T[1], 0.025) p = figure(tools="wheel_zoom,reset", match_aspect=True, background_fill_color='#440154') p.grid.visible = False p.hex_tile(q="q", r="r", size=0.1, line_color=None, source=bins, fill_color=linear_cmap('counts', 'Viridis256', 0, max(bins.counts))) show(p) CI = [1,1] l = [] for i in range(5000000): CI = clifford(CI,-1.7,1.3,-0.1,-1.2) l.append(CI) l = np.array(l) plt.scatter(l.T[0],l.T[1]) bins = hexbin(l.T[0],l.T[1], 0.025) p = figure(tools="wheel_zoom,reset", match_aspect=True, background_fill_color='#440154') p.grid.visible = False p.hex_tile(q="q", r="r", size=0.1, line_color=None, source=bins, fill_color=linear_cmap('counts', 'Viridis256', 0, max(bins.counts))) show(p) CI = [1,1] l = [] for i in range(500000): CI = clifford(CI,-1.7,2.9,-2.1,-3.2) l.append(CI) l = np.array(l) plt.scatter(l.T[0],l.T[1]) bins = hexbin(l.T[0],l.T[1], 0.025) p = figure(tools="wheel_zoom,reset", match_aspect=True, background_fill_color='#440154') p.grid.visible = False p.hex_tile(q="q", r="r", size=0.1, line_color=None, source=bins, fill_color=linear_cmap('counts', 'Viridis256', 0, max(bins.counts))) show(p) CI = [4,8] l = [] for i in range(5000000): CI = clifford(CI,-1.4,1.6,1,.7) l.append(CI) l = np.array(l) plt.scatter(l.T[0],l.T[1]) bins = hexbin(l.T[0],l.T[1], 0.025) p = figure(tools="wheel_zoom,reset", match_aspect=True, background_fill_color='#440154') p.grid.visible = False p.hex_tile(q="q", r="r", size=0.1, line_color=None, source=bins, fill_color=linear_cmap('counts', 'Viridis256', 0, max(bins.counts))) show(p)Setup for EDC core API access using xcube client library InstallationFor creating an `xcube` Python environment and installing `xcube` follow the instructions given in the [xcube's README](https://github.com/dcs4cop/xcube/blob/master/README.md).For installing the `xcube_sh` plugin follow the instructions given in the [xcube-sh's README](https://github.com/dcs4cop/xcube-sh/blob/master/README.md).Before using Jupyter Lab for the first time install the `jupyterlab` package and make sure the [Jupyter GeoJSON extension](https://www.npmjs.com/package/@jupyterlab/geojson-extension) is installed too:```bash(xcube) conda install -c conda-forge jupyterlab(xcube) jupyter labextension install @jupyterlab/geojson-extension``` API access (OAuth2)For API access the following environment variables must be provided via a .env file```SH_CLIENT_ID=SH_CLIENT_SECRET=SH_INSTANCE_ID=```You can find these values in your Euro Data Cube Dashboard in the API Access (OAuth2) section of the Euro Data Cube service. Test SetupTest whether setup was successfull by importing some important `xcube_sh` exports:# Configure data cubes using CubeConfig from xcube_sh.config import CubeConfig # Open data cubes from SH with given CubeConfig from xcube_sh.cube import open_cube # Observe SH requests made open_cube() from xcube_sh.observers import Observers # View stored cubes from xcube_sh.viewer import ViewerServer from xcube_sh.version import version versionWhat-If Tool and SHAP on COMPAS keras modelThis notebook shows:- Training of a keras model on the [COMPAS](https://www.kaggle.com/danofer/compass) dataset.- Use of What-If Tool on the trained model.- Explanation of inference results using [SHAP](https://github.com/slundberg/shap).- Use of What-If Tool to display SHAP values.Copyright 2019 Google LLC.SPDX-License-Identifier: Apache-2.0#@title Install What-If Tool Widget and SHAP library !pip install --upgrade --quiet witwidget shap #@title Read training dataset from CSV {display-mode: "form"} import pandas as pd import numpy as np import tensorflow as tf import witwidget import os import pickle from tensorflow.keras.layers import Dense from tensorflow.keras.models import Sequential from sklearn.utils import shuffle df = pd.read_csv('https://storage.googleapis.com/what-if-tool-resources/computefest2019/cox-violent-parsed_filt.csv') # Filter out entries with no indication of recidivism or no compass score df = df[df['is_recid'] != -1] df = df[df['decile_score'] != -1] # Rename recidivism column df['recidivism_within_2_years'] = df['is_recid'] # Make the COMPASS label column numeric (0 and 1), for use in our model df['COMPASS_determination'] = np.where(df['score_text'] == 'Low', 0, 1) df = pd.get_dummies(df, columns=['sex', 'race']) # Get list of all columns from the dataset we will use for model input or output. input_features = ['sex_Female', 'sex_Male', 'age', 'race_African-American', 'race_Caucasian', 'race_Hispanic', 'race_Native American', 'race_Other', 'priors_count', 'juv_fel_count', 'juv_misd_count', 'juv_other_count'] to_keep = input_features + ['recidivism_within_2_years', 'COMPASS_determination'] to_remove = [col for col in df.columns if col not in to_keep] df = df.drop(columns=to_remove) input_columns = df.columns.tolist() labels = df['COMPASS_determination'] df.head() df_for_training = df.drop(columns=['COMPASS_determination', 'recidivism_within_2_years']) train_size = int(len(df_for_training) * 0.8) train_data = df_for_training[:train_size] train_labels = labels[:train_size] test_data = df_for_training[train_size:] test_labels = labels[train_size:] test_data_with_labels = df[train_size:] # This is the size of the array we'll be feeding into our model for each example input_size = len(train_data.iloc[0]) model = Sequential() model.add(Dense(200, input_shape=(input_size,), activation='relu')) model.add(Dense(50, activation='relu')) model.add(Dense(25, activation='relu')) model.add(Dense(1, activation='sigmoid')) model.compile(loss='mean_squared_error', optimizer='adam') model.summary() model.fit(train_data.values, train_labels.values, epochs=4, batch_size=32, validation_split=0.1) # Helper methods to convert examples to/from tf.Example and vector for model prediction. def df_to_tf_examples(df): examples = [] columns = df.columns.values.tolist() for index, row in df.iterrows(): example = tf.train.Example() for col in columns: if col.startswith('sex_') and row[col] == 1: example.features.feature[col[:3]].bytes_list.value.append(col[4:].encode('utf-8')) elif col.startswith('race_') and row[col] == 1: example.features.feature[col[:4]].bytes_list.value.append(col[5:].encode('utf-8')) elif df[col].dtype is np.dtype(np.int64): example.features.feature[col].int64_list.value.append(int(row[col])) elif df[col].dtype is np.dtype(np.float64): example.features.feature[col].float_list.value.append(row[col]) examples.append(example) return examples def from_tf_example(example): inp = [] for i, col in enumerate(input_columns): if col == 'recidivism_within_2_years' or col == 'COMPASS_determination': continue if col.startswith('sex'): if example.features.feature[col[:3]].bytes_list.value and example.features.feature[col[:3]].bytes_list.value[0].decode() == col[4:]: inp.append(1) else: inp.append(0) elif col.startswith('race'): if example.features.feature[col[:4]].bytes_list.value and example.features.feature[col[:4]].bytes_list.value[0].decode() == col[5:]: inp.append(1) else: inp.append(0) else: inp.append(example.features.feature[col].int64_list.value[0]) return inp # For using WIT to display SHAP values, we send each vector entry to WIT as its # own feature, as opposed to collapsing categorical features into a single # string for display in the tool. This is because each vector entry for the # one-hot encodings for the categorical features has its own SHAP value to # display. def df_to_shap_tf_examples(df): examples = [] columns = df.columns.values.tolist() for index, row in df.iterrows(): example = tf.train.Example() for col in columns: example.features.feature[col].int64_list.value.append(int(row[col])) examples.append(example) return examples def from_shap_tf_example(example): inp = [] for i, col in enumerate(input_columns): if col == 'recidivism_within_2_years' or col == 'COMPASS_determination': continue inp.append(example.features.feature[col].int64_list.value[0]) return inp # Convert data to tf.Example format for use in WIT examples_for_wit = df_to_tf_examples(test_data_with_labels) examples_for_shap_wit = df_to_shap_tf_examples(test_data_with_labels) #@title Show model results in WIT from witwidget.notebook.visualization import WitWidget, WitConfigBuilder num_datapoints = 1000 #@param {type: "number"} def custom_predict(examples_to_infer): model_inputs = [from_tf_example(ex) for ex in examples_to_infer] preds = model.predict([model_inputs]) return [[1 - pred[0], pred[0]] for pred in preds] config_builder = WitConfigBuilder(examples_for_wit[:num_datapoints]).set_custom_predict_fn( custom_predict).set_target_feature('recidivism_within_2_years') ww = WitWidget(config_builder, height=800) import shap # Create an explainer by passing a subset of our training data explainer = shap.DeepExplainer(model, train_data.values[:200]) # Explain predictions of the model on the first 5 examples from our test set shap_values = explainer.shap_values(test_data.values[:5]) shap_values #@title Show model results and SHAP values in WIT from witwidget.notebook.visualization import WitWidget, WitConfigBuilder num_datapoints = 1000 #@param {type: "number"} # Return model predictions and SHAP values for each inference. def custom_predict_with_shap(examples_to_infer): model_inputs = [from_shap_tf_example(ex) for ex in examples_to_infer] preds = model.predict([model_inputs]) preds = [[1 - pred[0], pred[0]] for pred in preds] shap_output = explainer.shap_values(np.array(model_inputs))[0] attributions = [] for shap in shap_output: attrs = {} for i, col in enumerate(df_for_training.columns): attrs[col] = shap[i] attributions.append(attrs) ret = {'predictions': preds, 'attributions': attributions} return ret config_builder = WitConfigBuilder(examples_for_shap_wit[:num_datapoints]).set_custom_predict_fn( custom_predict_with_shap).set_target_feature('recidivism_within_2_years') ww = WitWidget(config_builder, height=800)QsurfaceQsurface is a simulation package for the surface code, and is designed to modularize 3 aspects of a surface code simulation.1. The surface code2. The error model3. The used decoder Examples from the readme.The included examples in this section uses `qsurface.main.initialize` to setup the surface code and decoder, and `qsurface.main.run` to perform simulations. We'll expand these examples with more in-depth descriptions and how to perform a threshold simulation with `qsurface.main.threshold`. To simulate the toric code and simulate with bit-flip error for 10 iterations and decode with the MWPM decoder:from qsurface.main import initialize, run, BenchmarkDecoder code, decoder = initialize((6,6), "toric", "mwpm", enabled_errors=["pauli"]) run(code, decoder, iterations=10, error_rates = {"p_bitflip": 0.1})Benchmarking of decoders can be enabled by attaching a *benchmarker* object to the decoder. See the docs for the syntax and information to setup benchmarking.benchmarker = BenchmarkDecoder({"decode":"duration"}) code, decoder = initialize((6,6), "toric", "mwpm", enabled_errors=["pauli"]) run(code, decoder, iterations=10, error_rates = {"p_bitflip": 0.1}, benchmark=benchmarker)The figures in Qsurface allows for step-by-step visualization of the surface code simulation (and if supported the decoding process). Each figure logs its history such that the user can move backwards in time to view past states of the surface (and decoder). Press `h` when the figure is open for more information.The GUI of the figure is made possible by the PyQt5 or Tkinter backend. However, for Jupyter notebooks such as this one, the GUI is not available. Qsurface automatically detects this and plots each iteration inline instead. If you're running the notebook locally, or have proper X11 forwarding setup, you can still force the PyQt5 or Tkinter with the magic `%matplotlib qt` or `%matplotlib tk` prior to importing qsurface.# When display is available interactive plotting can be enabled by either # %matplotlib qt # or # %matplotlib tk from qsurface.main import initialize, run, BenchmarkDecoder code, decoder = initialize( (3,3), "toric", "mwpm", enabled_errors=["pauli"], plotting=True, initial_states=(0,0), ) run(code, decoder, error_rates = {"p_bitflip": 0.1, "p_phaseflip": 0.1}, decode_initial=False, iterations=1) from qsurface.main import initialize, run, BenchmarkDecoder code, decoder = initialize((3,3), "toric", "mwpm", enabled_errors=["pauli"], faulty_measurements=True, plotting=True, initial_states=(0,0)) run(code, decoder, error_rates = {"p_bitflip": 0.05, "pm_bitflip": 0.05}, decode_initial=False)Timing Redis Operations *Data Structures and Information Retrieval in Python*Copyright 2021 License: [Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International](https://creativecommons.org/licenses/by-nc-sa/4.0/) [Click here to run this notebook on Colab](https://colab.research.google.com/github/AllenDowney/DSIRP/blob/main/chapters/time_redis.ipynb) Performance of lpush and rpushThe [Redis documentation](https://redis.io/topics/data-types) says> The main features of Redis Lists from the point of view of time complexity are the support for constant time insertion and deletion of elements near the head and tail, even with many millions of inserted items. Accessing elements is very fast near the extremes of the list but is slow if you try accessing the middle of a very big list, as it is an O(N) operation.In class recently, we saw some behavior that suggested that `lpush` might be linear (as we would expect from an array list, like Python lists) rather than constant time (as we would expect from a linked list).So let's find out.import sys IN_COLAB = 'google.colab' in sys.modules if IN_COLAB: !pip install redis-server !/usr/local/lib/python*/dist-packages/redis_server/bin/redis-server --daemonize yes else: !redis-server --daemonize yes try: import redis except ImportError: !pip install redis import redis r = redis.Redis() import os def etime(): """Measures user and system time this process has used. Returns the sum of user and system time.""" user, sys, chuser, chsys, real = os.times() return user+sys def time_func(func, n): """Run a function and return the elapsed time. func: function n: problem size, passed as an argument to func returns: user+sys time in seconds """ start = etime() func(n) end = etime() elapsed = end - start return elapsed def run_timing_test(func, max_time=1): """Tests the given function with a range of values for n. func: function object returns: list of ns and a list of run times. """ ns = [] ts = [] for i in range(10, 28): n = 2**i t = time_func(func, n) print(n, t) if t > 0: ns.append(n) ts.append(t) if t > max_time: break return ns, tslpushdef lpush(n): key = 'lpush_test' [r.lpush(key, x) for x in range(n)] r.delete(key) def rpush(n): key = 'rpush_test' [r.rpush(key, x) for x in range(n)] r.delete(key) ns, ts = run_timing_test(rpush) def fit(ns, ts, exp=1.0, index=-1): """Fits a curve with the given exponent. ns: sequence of problem sizes ts: sequence of times exp: exponent of the fitted curve index: index of the element the fitted line should go through returns: sequence of fitted times """ # Use the element with the given index as a reference point, # and scale all other points accordingly. nref = ns[index] tref = ts[index] tfit = [] for n in ns: ratio = n / nref t = ratio**exp * tref tfit.append(t) return tfit import matplotlib.pyplot as plt def plot_timing_test(ns, ts, label='', color='C0', exp=1.0, scale='log'): """Plots data and a fitted curve. ns: sequence of n (problem size) ts: sequence of t (run time) label: string label for the data curve color: string color for the data curve exp: exponent (slope) for the fitted curve scale: string passed to xscale and yscale """ ts_fit = fit(ns, ts, exp) fit_label = 'exp = %d' % exp plt.plot(ns, ts_fit, label=fit_label, color='0.7', linestyle='dashed') plt.plot(ns, ts, 'o-', label=label, color=color, alpha=0.7) plt.xlabel('Problem size (n)') plt.ylabel('Runtime (seconds)') plt.xscale(scale) plt.yscale(scale) plt.legend() plot_timing_test(ns, ts, scale='linear')Define Parameters# Implement pre-trained VGG16 CNN model vgg16 = VGG16(include_top=False, weights='imagenet', input_shape=(224, 224, 3)) plot_model(vgg16, show_shapes=True, to_file='./result/vgg16.pdf') plot_model(vgg16, show_shapes=True, to_file='./result/vgg16.png') vgg16.summary()_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_1 (InputLayer) (None, 224, 224, 3) 0 _________________________________________________________________ block1_conv1 (Conv2D) (None, 224, 224, 64) 1792 _________________________________________________________________ block1_conv2 (Conv2D) (None, 224, 224, 64) 36928 _________________________________________________________________ block1_pool (MaxPooling2D) (None, 112, 112, 64) 0 _________________________________________________________________ block2_conv1 (Conv2D) (None, 112, 112, 128) 73856 _________________________________________________________________ block2_conv2 (Conv2D) (None, 112, 112, 128) 147584 _________________________________________________________________ block2_poo[...]Define Triplet Network# Define base network for triplet network def base_net(input_shape=(224, 224, 3), trainable=False): """ define triplet network """ # load pre-trained VGG16 model vgg16 = VGG16(include_top=False, weights='imagenet', input_shape=input_shape) vgg16.trainable = trainable # define sequential model model = Sequential(name='base_net') model.add(vgg16) model.add(Flatten(name='flatten')) model.add(Dense(512, activation='relu', name='fc1')) model.add(Dense(128, activation=None, name='fc2')) model.add(Lambda(lambda x: K.l2_normalize(x, axis=1), name='l2_norm')) return model base_model = base_net(input_shape=(224, 224, 3), trainable=False) plot_model(base_model, show_shapes=True, to_file='./result/base_model.pdf') plot_model(base_model, show_shapes=True, to_file='./result/base_model.png') base_model.summary() # Define triplet network def triplet_net(base_model, input_shape=(224, 224, 3)): """ function to define triplet networks """ # define input: anchor, positive, negative anc_input = Input(shape=input_shape, name='anchor_input') pos_input = Input(shape=input_shape, name='positive_input') neg_input = Input(shape=input_shape, name='negative_input') # extract vector represent using CNN based model anc_output = base_model(anc_input) pos_output = base_model(pos_input) neg_output = base_model(neg_input) # extract vector represent using CNN based model anc_output = Lambda(lambda x: x, name='anchor_output')(anc_output) pos_output = Lambda(lambda x: x, name='positive_output')(pos_output) neg_output = Lambda(lambda x: x, name='nagative_output')(neg_output) # define inputs and outputs inputs=[anc_input, pos_input, neg_input] outputs=[anc_output, pos_output, neg_output] # define the triplet model model = Model(inputs=inputs, outputs=outputs, name='triplet_net') return model triplet_model = triplet_net(base_model=base_model, input_shape=(224, 224, 3)) plot_model(triplet_model, show_shapes=True, to_file='./result/triplet_network.pdf') plot_model(triplet_model, show_shapes=True, to_file='./result/triplet_network.png') triplet_model.summary() # Define triplet loss def triplet_loss(y_true, y_pred): """ function to compute triplet loss margin is predefined coded, manually change if needed """ # define triplet margin margin = 0.2 # get the prediction vector anchor, positive, negative = y_pred[0], y_pred[1], y_pred[2] # compute distance pos_distance = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), axis=-1) neg_distance = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), axis=-1) # compute loss partial_loss = tf.subtract(pos_distance, neg_distance) + margin full_loss = tf.reduce_sum(tf.maximum(partial_loss, 0.0)) return full_lossNLP Basics: Exploring the dataset Read in text dataimport pandas as pd fullCorpus = pd.read_csv('SMSSpamCollection.tsv', sep='\t', header=None)Explore the dataset# What is the shape of the dataset? # How many spam/ham are there? # How much missing data is there?This notebook shows how to use Orchestrator APIs for user experimentsimport os from fabrictestbed.slice_manager import SliceManager, Status import json credmgr_host = os.environ['FABRIC_CREDMGR_HOST'] orchestrator_host = os.environ['FABRIC_ORCHESTRATOR_HOST'] print(f"CM Host: {credmgr_host} Orchestrator Host: {orchestrator_host}")Create Slice Manager ObjectUsers can request tokens with different Project and Scopes by altering `project_name` and `scope` parameters in the refresh call below.slice_manager = SliceManager(oc_host=orchestrator_host, cm_host=credmgr_host, project_name='all', scope='all') # Initialize the slice manager slice_manager.initialize()Orchestrator API example to query for available resourcesstatus, advertised_topology = slice_manager.resources() print(f"Status: {status}") if status == Status.OK: print(f"Toplogy: {advertised_topology}") else: print(f"Error: {advertised_topology}") if status == Status.OK: advertised_topology.draw()Create Slicefrom fabrictestbed.slice_editor import ExperimentTopology, Capacities, ComponentType, ComponentModelType, ServiceType # Create topology t = ExperimentTopology() # Add node #n1 = t.add_node(name='n1', site='UKY') # Set capacities cap = Capacities() cap.set_fields(core=2, ram=6, disk=10) # Set Properties #n1.set_properties(capacities=cap, image_type='qcow2', image_ref='default_centos_8') # Add PCI devices #n1.add_component(ctype=ComponentType.NVME, model='P4510', name='c1') # Add node n2 = t.add_node(name='n2', site='LBNL') # Set properties n2.set_properties(capacities=cap, image_type='qcow2', image_ref='default_centos_8') #n2.add_component(model_type=ComponentModelType.SmartNIC_ConnectX_6, name='n2-nic1') n2.add_component(ctype=ComponentType.NVME, model='P4510', name='c1') #n2.add_component(ctype=ComponentType.GPU, model='Tesla T4', name='c3') # Generate Slice Graph slice_graph = t.serialize() ssh_key = None with open ("/home/fabric/.ssh/id_rsa.pub", "r") as myfile: ssh_key=myfile.read() ssh_key=ssh_key.strip() # Request slice from Orchestrator status, reservations = slice_manager.create(slice_name='Slice-comps-only', slice_graph=slice_graph, ssh_key=ssh_key) print("Response Status {}".format(status)) if status == Status.OK: print("Reservations created {}".format(reservations)) else: print(f"Failure: {reservations}") # Set the Slice ID from output of the above command slice_id=reservations[0].slice_idQuery Slicesstatus, slices = slice_manager.slices(state="All") print("Response Status {}".format(status)) if status == Status.OK: print("Slices {}".format(slices)) else: print(f"Failure: {slices}")Query Sliversstatus, slivers = slice_manager.slivers(slice_id=slice_id) print("Response Status {}".format(status)) if status == Status.OK: print("Slivers {}".format(slivers)) else: print(f"Failure: {slivers}")Sliver Statusfor s in slivers: status, sliver_status = slice_manager.sliver_status(slice_id=slice_id, sliver_id=s.reservation_id) print("Response Status {}".format(status)) if status == Status.OK: print() print("Sliver Status {}".format(sliver_status)) print()Delete Slicestatus, result = slice_manager.delete(slice_id=slice_id) print("Response Status {}".format(status)) print("Response received {}".format(result))CAS Connection Connect to the Cas Serverimport swat s = swat.CAS(host, port) s.session.setLocale(locale="en_US") s.sessionProp.setSessOpt(timeout=864000)The history saving thread hit an unexpected error (DatabaseError('database disk image is malformed',)).History will not be written to the database.Document Classification Part 2: Forest Models and AutotuneIn this notebook, you will build forest models that performs the classification task. To improve the model and illustrate the power of SAS Optimization, hyperparameter tuning is automated using the autotune action. The results after autotune give significant improvement over the forest models trained with default parameters. Load DataThe Cora data set is publicly available via [this hyperlink](https://linqs.soe.ucsc.edu/data).import document_classification_scripts as scripts import importlib importlib.reload(scripts) from document_classification_scripts import AttributeDict, nClasses, nWords, targetColumn, baseFeatureList demo = scripts.Demo(s) demo.loadRawData()NOTE: Cloud Analytic Services made the uploaded file available as table CONTENT in caslib CASUSERHDFS(brrees). NOTE: The table CONTENT has been created in caslib CASUSERHDFS(brrees) from binary data uploaded to Cloud Analytic Services. NOTE: Cloud Analytic Services made the uploaded file available as table CITES in caslib CASUSERHDFS(brrees). NOTE: The table CITES has been created in caslib CASUSERHDFS(brrees) from binary data uploaded to Cloud Analytic Services.Data Preprocessing Creates a custom format definition for target labelsdemo.defineTargetVariableFormat()NOTE: Format library MYFMTLIB added. Format search update using parameter APPEND completed.Partitions data into training and testdemo.loadOrPartitionData()NOTE: Cloud Analytic Services added the caslib 'cora'. NOTE: Cloud Analytic Services made the file contentPartitioned.sashdat available as table CONTENTPARTITIONED in caslib CASUSERHDFS(brrees). NOTE: Cloud Analytic Services made the file contentTrain.sashdat available as table CONTENTTRAIN in caslib CASUSERHDFS(brrees). NOTE: Cloud Analytic Services made the file contentTest.sashdat available as table CONTENTTEST in caslib CASUSERHDFS(brrees).Performs Principal Component Analysis (PCA)nPca = 40 demo.performPca(nPca) pcaFeatureList = [f"pca{i}" for i in range(1,nPca)]WARNING: The variable w445 in table CONTENTTRAIN is constant.Joins citations and training data targetsdemo.joinTrainingTargets()NOTE: Table CITESTRAIN was created in caslib CASUSERHDFS(brrees) with 3562 rows returned. NOTE: Table CITESCOMBINED was created in caslib CASUSERHDFS(brrees) with 5429 rows returned.Generate Network Features%%capture networkParam=AttributeDict({ "useCentrality":True, "useNodeSimilarity":True, "useCommunity":True, "useCore":True }) tableContentNetwork, networkFeatureList = demo.addNetworkFeatures( "contentTrain", "citesTrain", networkParam) tableContentPartitionedNetwork, networkFeatureList = demo.addNetworkFeatures( "contentPartitioned", "citesCombined", networkParam) tableContentNetworkPca, networkFeatureList = demo.addNetworkFeatures( "contentTrainPca", "citesTrain", networkParam) tableContentPartitionedNetworkPca, networkFeatureList = demo.addNetworkFeatures( "contentPartitionedPca", "citesCombined", networkParam) s.datastep.runCode( code = f"data contentTestNetwork; set {tableContentPartitionedNetwork}(where=(partition=0)); run;" ) print(f"contentTestNetwork: (rows, cols) = {s.CASTable('contentTestNetwork').shape}") s.datastep.runCode( code = f"data contentTestPcaNetwork; set {tableContentPartitionedNetworkPca}(where=(partition=0)); run;" ) print(f"contentTestPcaNetwork: (rows, cols) = {s.CASTable('contentTestPcaNetwork').shape}")contentTestNetwork: (rows, cols) = (542, 1485) contentTestPcaNetwork: (rows, cols) = (542, 92)Build Forest ClassifiersUsing the Decision Tree action set, you can train a forest model which, with default hyperparameters and baseline features, predicts poorly compared to the neural networks trained in Part 1. Significant improvements are made by adding network features to the model and finding ideal hyperparameters using the tuneForest action in the Autotune action set.def trainForestModel(modelName, tableTrain, featureList, forestParam): return s.decisionTree.forestTrain( inputs=featureList, target=targetColumn, nominal={targetColumn}, table=tableTrain, varImp=True, seed=forestParam.randomSeed, casOut={"name": modelName, "replace": True}, saveState={"name": f"{modelName}AStore", "replace": True} ) def scoreForestModel(modelName, tableTest): r = s.aStore.score( table=tableTest, rstore=f"{modelName}AStore", copyVars={"node", "target"}, casout={"name": f"{modelName}Scored", "replace": True} ) s.datastep.runCode( single="YES", code=f""" data {modelName}Scores; set {modelName}Scored end=_end; retain correct count 0; if I_target EQ target then correct = correct+1; count=count+1; if _end then do; accuracy = correct / count; misclassification = 1 - accuracy; output; end; keep correct count accuracy misclassification; run; """ ) def bootstrapForestModel(modelName, tableTrain, tableTest, featureList, forestParam=None, n=25): accuracies = [] for i in range(n): partitionData(tableIn=tableTest, tableOut=f"{tableTest}Part_", table1Out=f"{tableTest}Boot_", table2Out=None, frac1=90, randomSeed=(i+5678), partName="bootstrap") trainForestModel(modelName, tableTrain, featureList, randomSeed=(12345+i), forestParam=forestParam) acc = scoreForestModel(modelName, f"{tableTest}Boot_") accuracies = accuracies + [acc] print(f"Bootstrap Accuracy = {np.mean(accuracies)} +- {np.std(accuracies)}") return accuraciesTrain Baseline Forest ModelbaseForestModel = "baseForestModel" %%time resultsTrainBaseForest = demo.trainForestModel( baseForestModel, "contentTrain", baseFeatureList) resultsTrainBaseForest['OutputCasTables']NOTE: 1274001 bytes were written to the table "baseForestModelAStore" in the caslib "CASUSERHDFS(brrees)". CPU times: user 31.2 ms, sys: 0 ns, total: 31.2 ms Wall time: 32.7 sScore Baseline Forest ModelresultsScoreBaseForest=demo.scoreForestModel(baseForestModel,"contentTest")Accuracy = 0.43357933579335795Bootstrap Runs%%time accuracies = demo.bootstrapForestModel(baseForestModel,"contentTrain", "contentTest", baseFeatureList);NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5678 for sampling. NOTE: 1274001 bytes were written to the table "baseForestModelAStore" in the caslib "CASUSERHDFS(brrees)". Accuracy = 0.4385245901639344 NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5679 for sampling. NOTE: 1267373 bytes were written to the table "baseForestModelAStore" in the caslib "CASUSERHDFS(brrees)". Accuracy = 0.4139344262295082 NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5680 for sampling. NOTE: 1269965 bytes were written to the table "baseForestModelAStore" in the caslib "CASUSERHDFS(brrees)". Accuracy = 0.4323770491803279 NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5681 for sampling. NOTE: 1270045 bytes were written to the table "baseForestModelAStore" in the caslib "CASUSERHDFS(brrees)". Accuracy = 0.41598360655737704 NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5682 for sampling. NOTE: 1272049 bytes were written to the table "baseFores[...]Train PCA Forest ModelpcaForestModel = "pcaForestModel" pcaFeatureList = [f"pca{i}" for i in range(1,nPca)] %%time resultsTrainPcaForest = demo.trainForestModel( pcaForestModel, "contentTrainPca", pcaFeatureList) resultsTrainPcaForest['OutputCasTables']NOTE: 1158292 bytes were written to the table "pcaForestModelAStore" in the caslib "CASUSERHDFS(brrees)". CPU times: user 15.6 ms, sys: 0 ns, total: 15.6 ms Wall time: 2.08 sScore PCA Forest ModelresultsScorePcaForest=demo.scoreForestModel(pcaForestModel,"contentTestPca")Accuracy = 0.44095940959409596Bootstrap Runs%%time accuracies = demo.bootstrapForestModel(pcaForestModel,"contentTrainPca", "contentTestPca", pcaFeatureList);NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5678 for sampling. NOTE: 1158292 bytes were written to the table "pcaForestModelAStore" in the caslib "CASUSERHDFS(brrees)". Accuracy = 0.4385245901639344 NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5679 for sampling. NOTE: 1153612 bytes were written to the table "pcaForestModelAStore" in the caslib "CASUSERHDFS(brrees)". Accuracy = 0.4180327868852459 NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5680 for sampling. NOTE: 1153708 bytes were written to the table "pcaForestModelAStore" in the caslib "CASUSERHDFS(brrees)". Accuracy = 0.41598360655737704 NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5681 for sampling. NOTE: 1155204 bytes were written to the table "pcaForestModelAStore" in the caslib "CASUSERHDFS(brrees)". Accuracy = 0.4385245901639344 NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5682 for sampling. NOTE: 1155108 bytes were written to the table "pcaForestMode[...]Train Network-Features-Only Forest ModelnetworkForestModel = "networkForestModel" %%time resultsTrainNetworkForest = demo.trainForestModel( networkForestModel, "contentTrainNetwork", networkFeatureList) resultsTrainNetworkForest['OutputCasTables']NOTE: 1366499 bytes were written to the table "networkForestModelAStore" in the caslib "CASUSERHDFS(brrees)". CPU times: user 31.2 ms, sys: 0 ns, total: 31.2 ms Wall time: 2.55 sScore Network-Features-Only Forest ModelresultsScoreNetworkForest=demo.scoreForestModel(networkForestModel,"contentTestPcaNetwork")Accuracy = 0.8081180811808119Bootstrap Runs%%time accuracies = demo.bootstrapForestModel(networkForestModel,"contentTrainNetwork", "contentTestNetwork", networkFeatureList);NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5678 for sampling. NOTE: 1366499 bytes were written to the table "networkForestModelAStore" in the caslib "CASUSERHDFS(brrees)". Accuracy = 0.8114754098360656 NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5679 for sampling. NOTE: 1355451 bytes were written to the table "networkForestModelAStore" in the caslib "CASUSERHDFS(brrees)". Accuracy = 0.805327868852459 NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5680 for sampling. NOTE: 1352235 bytes were written to the table "networkForestModelAStore" in the caslib "CASUSERHDFS(brrees)". Accuracy = 0.7991803278688525 NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5681 for sampling. NOTE: 1352267 bytes were written to the table "networkForestModelAStore" in the caslib "CASUSERHDFS(brrees)". Accuracy = 0.8012295081967213 NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5682 for sampling. NOTE: 1353875 bytes were written to the table [...]Train Baseline+Network Features Forest ModelnetworkBaseForestModel = "networkBaseForestModel" %%time resultsTrainNetworkBaseForest = demo.trainForestModel( networkBaseForestModel, "contentTrainNetwork", baseFeatureList+networkFeatureList) resultsTrainNetworkBaseForest['OutputCasTables']NOTE: 1608867 bytes were written to the table "networkBaseForestModelAStore" in the caslib "CASUSERHDFS(brrees)". CPU times: user 46.9 ms, sys: 0 ns, total: 46.9 ms Wall time: 34.5 sScore Baseline+Network Forest ModelresultsScoreNetworkBaseForest=demo.scoreForestModel(networkBaseForestModel,"contentTestNetwork")Accuracy = 0.7416974169741697Bootstrap Runs%%time accuracies = demo.bootstrapForestModel(networkBaseForestModel,"contentTrainNetwork", "contentTestNetwork", baseFeatureList+networkFeatureList);NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5678 for sampling. NOTE: 1608867 bytes were written to the table "networkBaseForestModelAStore" in the caslib "CASUSERHDFS(brrees)". Accuracy = 0.7459016393442623 NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5679 for sampling. NOTE: 1615683 bytes were written to the table "networkBaseForestModelAStore" in the caslib "CASUSERHDFS(brrees)". Accuracy = 0.7418032786885246 NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5680 for sampling. NOTE: 1616983 bytes were written to the table "networkBaseForestModelAStore" in the caslib "CASUSERHDFS(brrees)". Accuracy = 0.7295081967213115 NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5681 for sampling. NOTE: 1612143 bytes were written to the table "networkBaseForestModelAStore" in the caslib "CASUSERHDFS(brrees)". Accuracy = 0.7295081967213115 NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5682 for sampling. NOTE: 1614167 bytes were writ[...]Train PCA+Network Features Forest ModelnetworkPcaForestModel = "networkPcaForestModel" %%time resultsTrainNetworkPcaForest = demo.trainForestModel( networkPcaForestModel, "contentTrainPcaNetwork", pcaFeatureList+networkFeatureList) resultsTrainNetworkPcaForest['OutputCasTables']NOTE: 1334778 bytes were written to the table "networkPcaForestModelAStore" in the caslib "CASUSERHDFS(brrees)". CPU times: user 15.6 ms, sys: 0 ns, total: 15.6 ms Wall time: 3.61 sScore PCA+Network Forest ModelresultsScoreNetworkPcaForest=demo.scoreForestModel(networkPcaForestModel,"contentTestPcaNetwork")Accuracy = 0.7915129151291513Bootstrap Runs%%time accuracies = demo.bootstrapForestModel(networkPcaForestModel,"contentTrainPcaNetwork", "contentTestPcaNetwork", pcaFeatureList+networkFeatureList);NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5678 for sampling. NOTE: 1334778 bytes were written to the table "networkPcaForestModelAStore" in the caslib "CASUSERHDFS(brrees)". Accuracy = 0.7950819672131147 NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5679 for sampling. NOTE: 1336482 bytes were written to the table "networkPcaForestModelAStore" in the caslib "CASUSERHDFS(brrees)". Accuracy = 0.7971311475409836 NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5680 for sampling. NOTE: 1333346 bytes were written to the table "networkPcaForestModelAStore" in the caslib "CASUSERHDFS(brrees)". Accuracy = 0.7971311475409836 NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5681 for sampling. NOTE: 1341178 bytes were written to the table "networkPcaForestModelAStore" in the caslib "CASUSERHDFS(brrees)". Accuracy = 0.7971311475409836 NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5682 for sampling. NOTE: 1342754 bytes were written [...]Autotune the Forest Modelsdef tuneForestModel(modelName, tableTrain, featureList, tunerOptions=None): if tunerOptions is None: tunerOptions = { "seed": 123, "objective": "MISC" } result = s.autotune.tuneForest( trainOptions={ "table": tableTrain, "inputs": featureList, "target": targetColumn, "nominal": {targetColumn}, "casout": {"name": modelName, "replace": True}, "saveState": {"name": f"{modelName}AStore", "replace": True} }, tunerOptions=tunerOptions ) return result def loadOrTuneForestModel( modelName, tableTrain, featureList, tunerOptions=None, newRun=False): coraCaslib = "cora" addCaslibIfNeeded(coraCaslib) r = s.table.fileInfo(caslib="cora") if not f"{modelName}AStore.sashdat" in r.FileInfo["Name"].unique(): newRun = True if not os.path.exists(f"../data/{modelName}Best.pkl"): newRun = True if newRun: r = resultsTrainNetworkForestAuto = tuneForestModel( modelName, tableTrain, featureList) saveTables([f"{modelName}AStore", f"{modelName}"]) r.BestConfiguration.to_pickle(f"../data/{modelName}Best.pkl") return r.BestConfiguration else: loadTables([f"{modelName}AStore", f"{modelName}"]) bestConfiguration = pd.read_pickle(f"../data/{modelName}Best.pkl") return bestConfigurationAutotune PCA Features ModelnewRun=False pcaModelAuto = "pcaModelAuto" forestParamAuto = demo.loadOrTuneForestModel(pcaModelAuto, "contentTrainPca", pcaFeatureList, newRun=newRun )NOTE: Cloud Analytic Services made the file pcaModelAutoAStore.sashdat available as table PCAMODELAUTOASTORE in caslib CASUSERHDFS(brrees). NOTE: Cloud Analytic Services made the file pcaModelAuto.sashdat available as table PCAMODELAUTO in caslib CASUSERHDFS(brrees).Score Autotuned PCA Features ModelresultsScorePcaModelAuto=demo.scoreForestModel(pcaModelAuto,"contentTestPca")Accuracy = 0.6881918819188192Bootstrap Runs%%time accuracies = demo.bootstrapForestModel(pcaModelAuto,"contentTrainPca", "contentTestPca", pcaFeatureList, forestParamAuto, 25);NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5678 for sampling. NOTE: 27483492 bytes were written to the table "pcaModelAutoAStore" in the caslib "CASUSERHDFS(brrees)". Accuracy = 0.694672131147541 NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5679 for sampling. NOTE: 27513260 bytes were written to the table "pcaModelAutoAStore" in the caslib "CASUSERHDFS(brrees)". Accuracy = 0.6844262295081968 NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5680 for sampling. NOTE: 27535980 bytes were written to the table "pcaModelAutoAStore" in the caslib "CASUSERHDFS(brrees)". Accuracy = 0.680327868852459 NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5681 for sampling. NOTE: 27490660 bytes were written to the table "pcaModelAutoAStore" in the caslib "CASUSERHDFS(brrees)". Accuracy = 0.6844262295081968 NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5682 for sampling. NOTE: 27451780 bytes were written to the table "pcaModelAutoAStore"[...]Autotune Network-Features-Only ModelnetworkModelAuto = "networkModelAuto" forestParamAuto = demo.loadOrTuneForestModel(networkModelAuto, "contentTrainNetwork", networkFeatureList, newRun=newRun )NOTE: Cloud Analytic Services made the file networkModelAutoAStore.sashdat available as table NETWORKMODELAUTOASTORE in caslib CASUSERHDFS(brrees). NOTE: Cloud Analytic Services made the file networkModelAuto.sashdat available as table NETWORKMODELAUTO in caslib CASUSERHDFS(brrees).Score Autotuned Network-Features-Only ModelresultsScoreNetworkModelAuto=demo.scoreForestModel(networkModelAuto,"contentTestNetwork")Accuracy = 0.8523985239852399Bootstrap Runs%%time accuracies = demo.bootstrapForestModel(networkModelAuto,"contentTrainNetwork", "contentTestNetwork", networkFeatureList, forestParamAuto, 25);NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5678 for sampling. NOTE: 6362659 bytes were written to the table "networkModelAutoAStore" in the caslib "CASUSERHDFS(brrees)". Accuracy = 0.8463114754098361 NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5679 for sampling. NOTE: 6363299 bytes were written to the table "networkModelAutoAStore" in the caslib "CASUSERHDFS(brrees)". Accuracy = 0.8504098360655737 NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5680 for sampling. NOTE: 6358395 bytes were written to the table "networkModelAutoAStore" in the caslib "CASUSERHDFS(brrees)". Accuracy = 0.8442622950819673 NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5681 for sampling. NOTE: 6356387 bytes were written to the table "networkModelAutoAStore" in the caslib "CASUSERHDFS(brrees)". Accuracy = 0.8463114754098361 NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5682 for sampling. NOTE: 6353107 bytes were written to the table "networ[...]Autotune PCA+Network ModelnetworkPcaModelAuto = "networkPcaModelAuto" forestParamAuto = demo.loadOrTuneForestModel(networkPcaModelAuto, "contentTrainPcaNetwork", pcaFeatureList + networkFeatureList, newRun=newRun )NOTE: Cloud Analytic Services made the file networkPcaModelAutoAStore.sashdat available as table NETWORKPCAMODELAUTOASTORE in caslib CASUSERHDFS(brrees). NOTE: Cloud Analytic Services made the file networkPcaModelAuto.sashdat available as table NETWORKPCAMODELAUTO in caslib CASUSERHDFS(brrees).Score Autotuned PCA+Network ModelresultsScoreNetworkPcaModelAuto=demo.scoreForestModel(networkPcaModelAuto,"contentTestPcaNetwork")Accuracy = 0.8523985239852399Bootstrap Runs%%time accuracies = demo.bootstrapForestModel(networkPcaModelAuto,"contentTrainPcaNetwork", "contentTestPcaNetwork", pcaFeatureList+networkFeatureList, forestParamAuto, 25);NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5678 for sampling. NOTE: 6615870 bytes were written to the table "networkPcaModelAutoAStore" in the caslib "CASUSERHDFS(brrees)". Accuracy = 0.8545081967213115 NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5679 for sampling. NOTE: 6621238 bytes were written to the table "networkPcaModelAutoAStore" in the caslib "CASUSERHDFS(brrees)". Accuracy = 0.8565573770491803 NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5680 for sampling. NOTE: 6590182 bytes were written to the table "networkPcaModelAutoAStore" in the caslib "CASUSERHDFS(brrees)". Accuracy = 0.8504098360655737 NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5681 for sampling. NOTE: 6590214 bytes were written to the table "networkPcaModelAutoAStore" in the caslib "CASUSERHDFS(brrees)". Accuracy = 0.8504098360655737 NOTE: Simple Random Sampling is in effect. NOTE: Using SEED=5682 for sampling. NOTE: 6589022 bytes were written to the t[...]Session Cleanups.terminate();Pendulum lab`2019/10/28 AISVN`We investigated the dependency of the period of a pendulum on its length. To get the nonlinear relationship we had to extend the range of length from less than $4 \text{cm}$ to $5 \text{meters}$. Here is the raw data:import matplotlib.pyplot as plt length = [0.038,0.055,0.102,0.175,0.309,0.551,0.668,0.736,0.869,1.117,2.374,2.854,5.009] period = [0.4312,0.4866,0.6432,0.8606,1.152,1.49,1.674,1.744,1.892,2.13,3.0738,3.4012,4.524] plt.plot(length,period) plt.show()Import librariesimport pandas as pd import numpy as np # Pandas warning messages (optional) pd.options.mode.chained_assignment = None #default='warn' # Plot options import matplotlib.pyplot as plt import seaborn as sns sns.palplot(sns.color_palette('deep')) sns.set_style("whitegrid") %matplotlib inline pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) from sklearn import preprocessing from sklearn.svm import SVC data=pd.read_csv('data/listings.csv', low_memory=False, usecols=['id','name','host_id','host_name','neighbourhood','latitude','longitude', 'room_type','price','minimum_nights','number_of_reviews','last_review','reviews_per_month', 'calculated_host_listings_count','availability_365']) data.info() data.head() len(data[data['room_type'] == 'Entire home/apt']) len(data[data['room_type'] == 'Private room']) # Convert 'price' from string currency to float data['price'] = data['price'].str.replace('$', '').str.replace(',', '').astype(float, 2) # Fill missing data with mean strategy data[data==' ']=np.nan #print the number of NaNs print(data.isnull().sum()) corr=data.corr(method='kendall') plt.figure(figsize=(15,8)) sns.heatmap(corr, annot=True) data.columns sns.countplot(data['room_type'],palette='plasma') fig=plt.gcf() fig.set_size_inches(5,5) plt.title('Room Type') data.drop(['id','name','host_id','latitude','longitude', 'latitude','longitude','number_of_reviews'], axis=1, inplace=True) #examing the changes data.head(5) data.drop(['last_review','reviews_per_month','host_name'], axis=1, inplace=True) data.head(20) from numpy.random import seed def Encode(data): for column in data.columns[data.columns.isin(['neighbourhood','room_type'])]: data[column]=data[column].factorize()[0] return data data_en=Encode(data.copy()) data_en.head() np.random.seed(42) N_rep=10000 mean_replicate=np.empty(N_rep) for i in range(N_rep): samples=np.random.choice(data_en['price'], len(data_en['price'])) mean_replicate[i]=np.mean(samples) mean, std=np.mean(mean_replicate), np.std(mean_replicate) lower_bound=mean-1.64*std print(lower_bound) entire_home=data_en.price[data_en.room_type==0] private_room=data_en.price[data_en.room_type==1] private_number=len(private_room) entire_number=len(entire_home) print(entire_number) print(private_number) mean_diff_observe=np.mean(entire_home)-np.mean(private_room) print(mean_diff_observe)478.8538166947268The mean price difference between private room and entire home is $-478.85np.random.seed(42) std_diff_replicate=np.empty(N_rep) for i in range(N_rep): entire_samples=np.random.choice(entire_home, entire_number) private_samples=np.random.choice(private_room, private_number) std_diff_replicate[i]=np.std(entire_samples)-np.std(private_samples) std_diff_mean, std_diff_std=np.mean(std_diff_replicate), np.std(std_diff_replicate) conf_interval=[std_diff_mean-1.96*std_diff_std,std_diff_mean+1.96*std_diff_std] print(conf_interval) np.random.seed(42) np.random.choice(entire_home, entire_number)95% confidence interval for the difference between the private room and entire home/apt is [-1381,-981].np.random.seed(42) entire_shifted=entire_home-np.mean(entire_home)+np.mean(private_room) def permutation_sample(data1, data2): data3=np.random.permutation(np.concatenate((data1,data2))) perm_sample_1=data3[:len(data1)] perm_sample_2=data3[len(data1):] return perm_sample_1, perm_sample_2 perm_mean_replicates=np.empty(N_rep) for i in range(N_rep): perm_private, perm_entire=permutation_sample(private_room, entire_shifted) perm_mean_replicates[i]=np.mean(perm_private)-np.mean(perm_entire) mean_diff=np.empty(N_rep) for i in range(N_rep): mean_diff[i]=np.mean(private_samples)-np.mean(np.random.choice(entire_shifted, len(entire_shifted))) p_value_permutation=np.sum(perm_mean_replicates>=mean_diff_observe)/len(perm_mean_replicates) print('p_val_permuation is %s' % p_value_permutation) p_val=(np.sum(mean_diff)>=mean_diff_observe)/len(mean_diff) print('p value is %s'% p_val)p value is 0.0001_Lambda School Data Science_ Make explanatory visualizationsTody we will reproduce this [example by FiveThirtyEight:](https://fivethirtyeight.com/features/al-gores-new-movie-exposes-the-big-flaw-in-online-movie-ratings/)from IPython.display import display, Image url = 'https://fivethirtyeight.com/wp-content/uploads/2017/09/mehtahickey-inconvenient-0830-1.png' example = Image(url=url, width=400) display(example)Using this data: https://github.com/fivethirtyeight/data/tree/master/inconvenient-sequel Objectives- add emphasis and annotations to transform visualizations from exploratory to explanatory- remove clutter from visualizationsLinks- [Strong Titles Are The Biggest Bang for Your Buck](http://stephanieevergreen.com/strong-titles/)- [Remove to improve (the data-ink ratio)](https://www.darkhorseanalytics.com/blog/data-looks-better-naked)- [How to Generate FiveThirtyEight Graphs in Python](https://www.dataquest.io/blog/making-538-plots/) Make prototypesThis helps us understand the problem%matplotlib inline import matplotlib.pyplot as plt import numpy as np import pandas as pd plt.style.use('fivethirtyeight') fake = pd.Series([38, 3, 2, 1, 2, 4, 6, 5, 5, 33], index=range(1,11)) fake.plot.bar(color='C1', width=0.9); fake2 = pd.Series( [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10]) fake2.value_counts().sort_index().plot.bar(color='C1', width=0.9);Annotate with textplt.style.use('fivethirtyeight') fake = pd.Series([38, 3, 2, 1, 2, 4, 6, 5, 5, 33], index=range(1,11)) ax = fake.plot.bar(color='C1', width=0.9) ax.tick_params(labelrotation=0) ax.text(x=-1.2, y=45.8, s="'An Inconvenient Sequal: Truth to Power' is divisive", fontsize= 15, fontweight='bold'); ax.text(x=-0.9, y=43, s='IMBd ratings for the films as of Aug.29',fontsize=12); ax.set(xlabel='Rating', ylabel='Percent of Total Votes', yticks=range(0,50,10));Reproduce with real datadf = pd.read_csv('https://raw.githubusercontent.com/fivethirtyeight/data/master/inconvenient-sequel/ratings.csv') df.shape pd.options.display.max_columns = 500 df.head() df.sample().T df.timestamp = pd.to_datetime(df.timestamp) df.timestamp.describe() df = df.set_index('timestamp') df.head() df['2017-08-29'] df[df.category == 'IMDb users'] lastday = df['2017-08-29'] lastday[lastday.category == 'IMDb users'].tail() final = df.tail(1) columns = [str(i) + '_pct' for i in range(1,11)] data = final[columns].T data.plot.bar(); data.index = range(1,11) ax = data.plot.bar(color='C1', width=0.9, legend=False) ax.tick_params(labelrotation=0) ax.text(x=-1.2, y=45.8, s="'An Inconvenient Sequal: Truth to Power' is divisive", fontsize= 15, fontweight='bold'); ax.text(x=-0.9, y=43, s='IMBd ratings for the films as of Aug.29',fontsize=12); ax.set(xlabel='Rating', ylabel='Percent of Total Votes', yticks=range(0,50,10));ASSIGNMENTReplicate the lesson code. I recommend that you [do not copy-paste](https://docs.google.com/document/d/1ubOw9B3Hfip27hF2ZFnW3a3z9xAgrUDRReOEo-FHCVs/edit). STRETCH OPTIONS Reproduce another example from [FiveThityEight's shared data repository](https://data.fivethirtyeight.com/).If you aren't sure what to choose, try:- the chart titled ["Men dominated Al Gore's IMDb movie rating"](https://fivethirtyeight.com/features/al-gores-new-movie-exposes-the-big-flaw-in-online-movie-ratings/)- or the tutorial, [How to Generate FiveThirtyEight Graphs in Python](https://www.dataquest.io/blog/making-538-plots/)Other options include:- [thanksgiving-2015](https://fivethirtyeight.com/features/heres-what-your-part-of-america-eats-on-thanksgiving/) (try the [`altair`](https://altair-viz.github.io/gallery/index.htmlmaps) library)- [candy-power-ranking](https://fivethirtyeight.com/features/the-ultimate-halloween-candy-power-ranking/) (try the [`statsmodels`](https://www.statsmodels.org/stable/index.html) library)- or another example of your choice! Make more charts!Choose a chart you want to make, from [FT's Visual Vocabulary poster](http://ft.com/vocabulary).Find the chart in an example gallery of a Python data visualization library:- [Seaborn](http://seaborn.pydata.org/examples/index.html)- [Altair](https://altair-viz.github.io/gallery/index.html)- [Matplotlib](https://matplotlib.org/gallery.html)- [Pandas](https://pandas.pydata.org/pandas-docs/stable/visualization.html)Reproduce the chart. [Optionally, try the "Ben Franklin Method."](https://docs.google.com/document/d/1ubOw9B3Hfip27hF2ZFnW3a3z9xAgrUDRReOEo-FHCVs/edit) If you want, experiment and make changes.Take notes. Consider sharing your work with your cohort!candy = pd.read_csv('https://raw.githubusercontent.com/fivethirtyeight/data/master/candy-power-ranking/candy-data.csv') candy.shape candy.head() candy.winpercent = candy.winpercent / 100 candy['chocolate_and_caramel'] = candy.chocolate + candy.caramel candy[candy.chocolate == 1].winpercent.mean() candy[candy.caramel == 1].winpercent.mean() candy.pivot_table(index='chocolate_and_caramel', values='winpercent') mappings = {0:0, 1:0, 2:1} candy.chocolate_and_caramel = candy.chocolate_and_caramel.map(mappings) candy.pivot_table(index='chocolate_and_caramel', values='winpercent') # i want to create my own little deck of cards library to help get an intuition for OOP import random class Card(object): def __init__(self, value, suit): self.value = value self.suit = suit def show(self): print('{} of {}'.format(self.value,self.suit)) class Deck(object): def __init__(self): self.cards = [] self.build() def build(self): for s in ['spades','clubs','diamonds','hearts']: [self.cards.append(Card(v,s)) for v in range(1,14)] def show(self): [c.show() for c in self.cards] def shuffle(self): for i in range(len(self.cards)-1, 0, -1): r = random.randint(0,i) self.cards[i], self.cards[r] = self.cards[r], self.cards[i] def drawCard(self): return self.cards.pop() class Player(object): def __init__(self, name): self.name = name self.hand = [] def draw(self, deck): self.hand.append(deck.drawCard()) return self # return allows us to chain these draw methods. def showHand(self): [c.show() for c in self.hand] deck = Deck() mac = Player('mac') deck.shuffle() mac.draw(deck) mac.draw(deck).draw(deck) mac.showHand() # could now make a blackjack game or something but this gave me a better intuitionPolynomial InterpolationWe know that if we have a set of $n$ data points with coordinates $(x_1; y_1), (x_2; y_2), \dots, (x_n; y_n)$, we can try to figure out what function may have generated these points.Please note that **our assumptions about the data** will lead us to choosing one function over another. This means that our results are as good as our data and assumptions. Therefore, it's extremely important that we write down our assumptions (which sometimes can be difficult as we sometimes don't realize we're making them). It will be better for our readers if they know what those assumptions and models are.In this case, we'll state two assumptions:1. The points in our dataset are generated by a polynomial function2. The points are very precise, there is absolutely no error in them. This means that the function should pass **through every point**This method is called *polynomial interpolation* (*"polynomial"* captures assumption 1 and *"interpolation"* captures assumption 2).It can be proved (look at [Wikipedia](https://en.wikipedia.org/wiki/Polynomial_interpolation) for example) that if we have $n$ data points, there is only one polynomial of degree $n-1$ which passes through them. In "math speak": "the vector spaces of $n$ points and polynomials of degree $n-1$ are isomorphic (there exists a bijection mapping one to the other)".There are a lot of ways to do interpolation. We can also write the function ourselves if we want but this requires quite a lot more knowledge than we already covered in this course. So we'll use a function which does this for us. `numpy.polyfit()` is one such function. It accepts three main parameters (there are others as well, but they are optional): a list of $x$ coordinates, a list of $y$ coordinates, and a polynomial degree.Let's say we have these points:```pythonpoints = np.array([(0, 0), (1, 0.8), (2, 0.9), (3, 0.1), (4, -0.8), (5, -1.0)])```First, we need to "extract" the coordinates:```pythonx = points[:, 0]y = points[:, 1]```Then, we need to calculate the interpolating polynomial. For the degree, we'll set $n-1$:```pythoncoefficients = np.polyfit(x, y, len(points) - 1)poly = np.poly1d(coefficients)```After that, we need to plot the function. To do this, we'll create a range of $x$ values and evaluate the polynomial at each value:```pythonplot_x = np.linspace(np.min(x), np.max(x), 1000)plot_y = poly(plot_x)```Finally, we need to plot the result. We'll plot both the fitting polynomial curve (using `plt.plot()`) and the points (using `plt.scatter`). It's also nice to have different colors to make the line stand out from the points.```pythonplt.plot(plot_x, plot_y, c = "green")plt.scatter(x, y)plt.xlabel("x")plt.ylabel("y")plt.show()```Don't forget to label the axes!Your task now is to **wrap the code in a function**. It should accept a list of points, the polynomial degree, min and max value of $x$ used for plotting. **Be extremely careful to ensure that the function uses its parameters!**We'll use this function to try some other cases.import numpy as np import numpy.polynomial.polynomial as p import matplotlib.pyplot as plt """ Polynomial Interpolation - Interpolates a polynomial of the specified degree through the given points and plots it points - a list of points (x, y) to plot degree - the polynomial degree min_x, max_x - range of x values used to plot the interpolating polynomial """ def interpolate_polynomial(points, degree, min_x, max_x): x = points[:, 0] y = points[:, 1] coefficients = np.polyfit(x, y, degree) polynomial = np.poly1d(coefficients) plot_x = np.linspace(min_x, max_x, 1000) plot_y = polynomial(plot_x) plt.plot(plot_x, plot_y, c = "green") plt.scatter(x, y) plt.xlabel("x") plt.ylabel("y") plt.show() pass points = np.array([(0, 0), (1, 0.8), (2, 0.9), (3, 0.1), (4, -0.8), (5, -1.0)]) interpolate_polynomial(points, len(points) - 1, np.min(points[:, 0]), np.max(points[:, 0]))We see this is a very nice fit. This is expected, of course. Let's try to expand our view a little. Let's try to plot other values of $x$, further than the original ones. This is **extrapolation**.interpolate_polynomial(points, len(points) - 1, -5, 10)Hmmm... it seems our polynomial goes a little wild outside the original range. This is to show how **extrapolation can be quite dangerous**.Let's try a lower polynomial degree now. We used 4, how about 3, 2 and 1?**Note:** We can add titles to every plot so that we know what exactly we're doing. Te title may be passed as an additional parameter to our function.interpolate_polynomial(points, 3, np.min(points[:, 0]), np.max(points[:, 0])) interpolate_polynomial(points, 2, np.min(points[:, 0]), np.max(points[:, 0])) interpolate_polynomial(points, 1, np.min(points[:, 0]), np.max(points[:, 0]))We see the fitting curves (or line in the last case) struggle more and more and they don't pass through every point. This breaks our assumptions but it can be very useful.Okay, one more thing. How about increasing the degree? Let's try 5, 7 and 10. Python might complain a little, just ignore it, everything is fine... sort of :).interpolate_polynomial(points, 5, np.min(points[:, 0]), np.max(points[:, 0])) interpolate_polynomial(points, 7, np.min(points[:, 0]), np.max(points[:, 0])) interpolate_polynomial(points, 10, np.min(points[:, 0]), np.max(points[:, 0]))Those graphs look pretty much the same. But that's the point exactly. I'm being quite sneaky here. Let's try to expand our view once again and see what our results really look like.interpolate_polynomial(points, 5, -10, 10) interpolate_polynomial(points, 7, -10, 10) interpolate_polynomial(points, 10, -10, 10)Now we see there are very wild differences. Even though the first two plots look quite similar, look at the $y$ values - they're quite different.So, these are the dangers of interpolation. Use a too high degree, and you get "the polynomial wiggle". These are all meant to represent **the same** data points but they look insanely different. Here's one more comparison.interpolate_polynomial(points, len(points) - 1, -2, 7) interpolate_polynomial(points, len(points) + 1, -2, 7)Now we can see what big difference even a small change in degree can make. This is why we have to choose our interpolating functions very carefully. Generally, a lower degree means a simpler function, which is to be preferred. See [Occam's razor](https://en.wikipedia.org/wiki/Occam%27s_razor).And also, **we need to be very careful about our assumptions**.points = np.array([(-5, 0.03846), (-4, 0.05882), (-3, 0.1), (-2, 0.2), (-1, 0.5), (0, 1), (1, 0.5), (2, 0.2), (3, 0.1), (4, 0.05882), (5, 0.03846)]) interpolate_polynomial(points, 10, np.min(points[:, 0]), np.max(points[:, 0]))NumPy (www.numpy.org)NumPy is important in scientific computing, it is coded both in Python and C (for speed). A few important features for Numpy are:a powerful N-dimensional array objectsophisticated broadcasting functionstools for integrating C/C++ and Fortran codeuseful linear algebra, Fourier transform, and random number capabilitiesNext, we will introduce Numpy arrays, which are related to the data structures.In order to use Numpy module, we need to import it first. A conventional way to import it is to use “np” as a shortened name using```pythonimport numpy as np```Numpy has a detailed guide for users migrating from Matlab. Just google 'Numpy for Matlab Users'import numpy as npIf the previously line produces an error, then you need to install numpy.Please type```python!pip install numpy ```#to create an array, we use the numpy funcion array x = np.array([[1,2,3]]) xArrays are entered by rows, each row is defined as a list. To create a 2d array, simply use nested listsy = np.array([[1,2,3],[4,5,6]]) yThe arrays created with numpy are objects and have many atributes associated with them. For example, the shape of an array can be found with *shape*, and its size with *size*y.shape y.size x.shape x.sizeYou can access the elements in the array by index. There are multiple ways to access the element in the arrayx[0], x[1],x[2] x[3] y[0],y[0][0],y[0][1],y[0][2] y[1],y[1][0],y[1][1],y[1][2] y[0,0],y[0,1],y[0,2],y[1,0],y[1,1],y[1,2]In this form, the first index represents the row and the second index represents the column.You can also use slices to obtain a section of the array:# What result will you obtain after this operation? y[:,:2] # What result will you obtain after this operation? y[:,-2:] # you an also access mutiple rows or columns by index y[:,[0,1]] y[:,[0,2]]NumPy includes methods to generate arrays that have a structure. - *arrange* -> generates arrays that are in order and evenly spaced,- *linspace* -> generates an array of n equally spaced elements starting from a defined begining and end points# np.arange requires three parameters: # The starting point, the end point, and the increment # NOTE: the end point is not inclusive np.arange(0.5, 3, 0.5) large_array = np.arange(0,2000,1) large_array large_array.size # np.linspace requires three parameters: # The starting point, the end point, and # the number of elements # NOTE: the end point is inclusive np.linspace(0.5, 3, 6) np.linspace(0, 1999, 2000)NumPy includes some predefined arrays that can make your life easiernp.zeros((5,5)) np.zeros_like(y) np.zeros((3,)) np.zeros_like(x) np.ones((5, 5)) np.empty((5, 1)) np.empty((1,5)) np.empty((5))You can use the assigment operator to modify one or multiple elements in your array# if you don't provide the increment, np.arange will # use a default value of 1 a = np.arange(1, 7) a #to change the element in the index position 4, we can do a[4] = 10 a # to change the elements from 4 to the end we can do a[4:] = [45,32] a #python will let you know if you made a mistake a[4:] = [43,32,55] # exercise # to change the elements from index 2 to 5 (inclusive) to zero. # we can do?? a[2:6] = np.zeros((1,4)) aExercise:Create a zero array b with shape 2 by 2, and set $$ 𝑏=\begin{bmatrix}1&2 \\ 3& 4\end{bmatrix}$$using array indexing.b = np.zeros((2,2)) b b[0,0] = 1 b[0,1] = 2 b[1,0] = 3 b[1,1] = 4 b[0] = [1,2] b[1] = [3,4] bNumPy has powerful broadcasting abilities. You can do mathematical operation with arrays of different sizes and NumPy will take care of the operation if possible Operations with scalarsb = np.array([[0,1],[2,3]]) c = 2 b+c b-c b*c b/c b**cOperations between arraysb = np.array([[0,1],[2,3]]) d = np.array([[4,5],[6,7]]) b+d b-d b*d b/d b**dThe *, /, and ** operations are operating on an element by element basis. Operations between arrays of different sizesb = np.array([[0,2],[3,4]]) d = np.array([[4],[5]]) b+dCan you explain what is going on?b-d b*d b/d b**dMatrix Multiplicationb = np.array([[0,1],[2,3]]) d = np.array([[4,5],[6,7]]) e = np.array([[4],[5]]) f = np.array([[4,5]]) b@d, np.matmul(b,d) b@e # NumPy will tell you when you make a mistake b@f # the .T atributes computes the transpose of a matrix # it has precedence over other operations b@f.TNumPy can also apply logical operations between arrays and scalars or between two arrays of the same sizex = np.array([1, 2, 4, 5, 9, 3]) y = np.array([0, 2, 3, 1, 2, 3]) x>3Python can index elements of an array that satisfy a logical expression.x[x>3] # you can also use multiple conditions x[np.logical_or(x<3,x>=5)] x[np.logical_and(x<=9,x>=5)]you can also use the assignment operator to modify an array based on conditionsy = x[x>3] y y[y>=9] = 0 yFeature engineering Nesta fase, será realizada a feature engineering. A partir dos insights obtidos na parte de análise exploratória, serão criadas as seguintes novas features:1. **Age:** categorizar de 10 em 10 anos e utilizando árvore de decisão. Aplicar log;2. **Capital gain e loss:** criação de uma indicadora se possui ou não;3. **hours per week:** aplicar log, categorizar utilizando árvore de decisão e criar uma indicadora se trabalha acima de 40 horas ou não4. **Race:** gerar duas novas indicadoras - se é branco ou não; se é branco, negro ou outras raças5. **native country:** gerar uma nova indicadora se é dos EUA ou não.6. Para as demais categóricas aplicar label ou ordinal encoder**Nos casos 1, 2 e 3**, como foram observados alguns outliers na análise exploratória, categorizar, ao invés de simplesmente excluir essas observações, é uma forma de não perder informação. Os valores extremos poderiam, também, ser imputados, entretanto como não há uma forma de obter um entendimento mais aprofundado do negócio, este método, bem como a exclusão, poderiam incorrer em substituição/perda de valores legítimos. Nos casos específicos das features **"capital-gain"** e **"capital-loss"**, como a grande maioria dos valores são zero, optou-se por apenas indicar se a pessoa possui ou não valor diferente de zero nestas variáveis.**Nos casos 4 e 5**, como foi observado que o número de observações em uma categoria era predominante em relação às outras, resolveu-se apenas binarizar as variáveis. Importando as libraries necessáriasimport pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import cross_val_score from sklearn import tree from sklearn.preprocessing import LabelEncoder, OrdinalEncoder from IPython.core.interactiveshell import InteractiveShell #‘all’, ‘last’, ‘last_expr’ or ‘none’, ‘last_expr_or_assign. Default 'last_expr' InteractiveShell.ast_node_interactivity = "all" pd.set_option('display.max_rows', None) pd.set_option('display.max_columns', None) pd.set_option('max_colwidth', None)Carregando as bases de treino e testedf_train = pd.read_csv('df_train_cleaned.csv') df_train.shape df_train.head() df_train['income'].value_counts() df_test = pd.read_csv('df_test_cleaned.csv') df_test.shape df_test.head() df_test['income'].value_counts()Categorizando a variável "age" Realizando a categorização da variável "age" por faixas de 10 anos# Definindo uma função para categorizar em faixas de 10 anos e aplicando às bases de treino e teste def age_group(x): x = int(x) x = abs(x) if( 16 < x < 31 ): return 1 #"17-30" if( 30 < x < 41 ): return 2 #"31-40" if( 40 < x < 51 ): return 3 #"41-50" if( 50 < x < 61 ): return 4 #"51-60" if( 60 < x < 71 ): return 5 #"61-70" else: return 6 #"Maior que 70" df_train['age_group'] = df_train['age'].apply(age_group) df_test['age_group'] = df_test['age'].apply(age_group)Realizando a categorização da variável "age" pelo método tree basedO método tree based utiliza uma árvore de decisão para categorizar a variável preditora. Ele possui a vantagem de levar em conta a relação da variável preditora com a variável target ao sugerir as categorias, a desvantagem é que pode sugerir algumas categorias que não fazem sentido do ponto de vista negocial, então deve ser utlizado com cautela.# Definindo uma função para encontrar a profundidade ótima (número de categorias) de acordo com algumas métricas de performance. # Lógico que neste método pode-se tentar diversas variações tanto de parâmetros a serem testados quanto métricas de performance, # contudo isto pode incorrer em custo de processamento. def optimal_depth(dataframe, variavel, target, depth=[]): score_ls = [] # aqui serão guardados os valores de roc auc score_std_ls = [] # aqui serão guardados os valores de desvio padrão do roc_auc for tree_depth in depth: tree_model = DecisionTreeClassifier(max_depth=tree_depth, random_state = 0) scores = cross_val_score(tree_model, dataframe[variavel].to_frame(), dataframe[target], cv=3, scoring='roc_auc') score_ls.append(np.mean(scores)) score_std_ls.append(np.std(scores)) temp = pd.concat([pd.Series(depth), pd.Series(score_ls), pd.Series(score_std_ls)], axis=1) temp.columns = ['depth', 'roc_auc_mean', 'roc_auc_std'] return temp # Definindo uma função para categorizar a variável de acordo com a profundidade sugerida na função acima def tree_based_categ(df_train, df_test, variavel, target, depth=1, sufix = '_cat_tree'): clf = DecisionTreeClassifier(max_depth=depth, random_state = 0) clf.fit(df_train[variavel].to_frame(), df_train[target]) plt.figure(figsize=(7,7)) tree.plot_tree(clf, filled=True); plt.title("Decision tree para a variável "+variavel) plt.show() df_train['prob_1']=clf.predict_proba(df_train[variavel].to_frame())[:,1] df_test['prob_1']=clf.predict_proba(df_test[variavel].to_frame())[:,1] categorias = pd.concat([df_train.groupby(['prob_1'])[variavel].min().astype(float), df_train.groupby(['prob_1'])[variavel].max().astype(float)], axis=1).reset_index() cat_name = variavel+sufix categorias[cat_name] = categorias.index categorias if cat_name in df_train.columns: df_train = df_train.drop(columns = [cat_name]) else: pass if cat_name in df_test.columns: df_test = df_test.drop(columns = [cat_name]) else: pass df_train = pd.merge(df_train, categorias[['prob_1',cat_name]], left_on=['prob_1'], right_on=['prob_1'], how='left') df_test = pd.merge(df_test, categorias[['prob_1',cat_name]], left_on=['prob_1'], right_on=['prob_1'], how='left') df_train = df_train.drop(columns = ['prob_1']) df_test = df_test.drop(columns = ['prob_1']) display(categorias) df_train.groupby([cat_name])[target].count().plot.bar(rot=0) return df_train, df_testAplicando a função para definir a melhor profundidade **(número de categorias)**. No caso abaixo, será **selecionado o depth = 3**, pois após ele não existe um ganho tão alto de aucoptimal_depth(df_train, 'age', 'income', depth=[1,2,3,4,5,6])Aplicando, na base de treino, o depth ótimo selecionado e **categorizando a variável "age"** nas bases de treino e testedf_train, df_test = tree_based_categ(df_train, df_test, 'age', 'income', depth=3)**A partir dos resultados acima, resolveu-se realizar uma outra categorização na variável "age", conforme abaixo.**# Definindo uma função para categorizar em faixas de 10 anos e aplicando às bases de treino e teste def age_group_2(x): x = int(x) x = abs(x) if( 16 < x <= 35 ): return 1 #"17-35" if( 35 < x <= 61 ): return 3 #"36-61" else: return 2 #"Maior que 61" df_train['age_group_2'] = df_train['age'].apply(age_group_2) df_test['age_group_2'] = df_test['age'].apply(age_group_2) df_train["age_log"] = np.log(1 + df_train['age']) df_test["age_log"] = np.log(1 + df_test['age']) df_train.head()Criando a variável indicadora se possui ou não capital gain e capital loss# Definindo uma função para binarizar as variáveis e aplicando às bases de treino e teste def cont2dummy(x): x = int(x) x = abs(x) if( x == 0 ): return 0 else: return 1 df_train['capital_gain_dummy'] = df_train['capital-gain'].apply(cont2dummy) df_test['capital_gain_dummy'] = df_test['capital-gain'].apply(cont2dummy) df_train['capital_loss_dummy'] = df_train['capital-loss'].apply(cont2dummy) df_test['capital_loss_dummy'] = df_test['capital-loss'].apply(cont2dummy) df_train.head()Hours per week: aplicando log, categorizando com árvore de decisão e criando uma indicadora se trabalha acima de 40 horas ou não Aplicando logdf_train["Hours_per_week_log"] = np.log(1 + df_train['hours-per-week']) df_test["Hours_per_week_log"] = np.log(1 + df_test['hours-per-week'])Realizando a categorização pelo método tree basedAplicando a função para definir a melhor profundidade **(número de categorias)**. No caso abaixo, será **selecionado o depth = 2**, pois após ele não existe um ganho tão alto de aucoptimal_depth(df_train, 'hours-per-week', 'income', depth=[1,2,3,4,5,6])Aplicando, na base de treino, o depth ótimo selecionado e **categorizando a variável "hours-per-week"** nas bases de treino e testedf_train, df_test = tree_based_categ(df_train, df_test, 'hours-per-week', 'income', depth=2)Criando a indicadora se trabalha acima de 40h ou nãoO insight de criar esta feature veio após analisar que 75% das pessoas que ganham acima de 50K anuais trabalham acima de 40h semanais, enquanto que 50% das pessoas que ganham abaixo de 50K trabalham acima de 40h semanais.# Definindo uma função para binarizar a variável e aplicando às bases de treino e teste def hpw2dummy(x): x = int(x) x = abs(x) if( x <= 40 ): return 0 else: return 1 df_train['hours_per_week_acima40'] = df_train['hours-per-week'].apply(hpw2dummy) df_test['hours_per_week_acima40'] = df_test['hours-per-week'].apply(hpw2dummy) df_train.head()Race: gerando duas novas indicadoras - se é branco ou não; se é branco, negro ou outras raças Gerando a indicadora se é branco ou não# Definindo uma função para binarizar a variável e aplicando às bases de treino e teste def race2dummy(x): if( x == 'White' ): return 1 else: return 0 df_train['race_is_white'] = df_train['race'].apply(race2dummy) df_test['race_is_white'] = df_test['race'].apply(race2dummy)Gerando a indicadora se é branco, negro ou outras raças# Definindo uma função para gerar o label para a variável e aplicando às bases de treino e teste def race2label(x): if( x == 'White' ): return 3 if( x == 'Black' ): return 2 else: return 1 df_train['race_is_white_black'] = df_train['race'].apply(race2label) df_test['race_is_white_black'] = df_test['race'].apply(race2label) df_train.head()Native country: gerando uma nova indicadora se é dos EUA ou não# Definindo uma função para binarizar a variável e aplicando às bases de treino e teste def country2dummy(x): if( x == 'United-States' ): return 1 else: return 0 df_train['native_country_is_eua'] = df_train['native-country'].apply(country2dummy) df_test['native_country_is_eua'] = df_test['native-country'].apply(country2dummy) df_test.head()Encoding de variáveis categóricasA variável "education" não será codificada pois já existe a feature "education-num" que é sua representação codificada. Para as outras será aplicado o label encoder pois não se observa uma ordenação natural entre suas categorias.var2encoding = ['workclass', 'marital-status', 'occupation', 'relationship', 'sex'] df_train_enc = df_train.copy() df_test_enc = df_test.copy() le = LabelEncoder() for var in var2encoding: df_train_enc[var]=le.fit_transform(df_train_enc[var]) df_test_enc[var]=le.transform(df_test_enc[var])Codificando, também, a variável target. É uma boa prática codificar como 1 a categoria do evento (ganhar acima de 50K, em nosso estudo), uma vez que será nosso evento a ser estimado. Assim:def codtarget(x): if( x == '<=50K' ): return 0 else: return 1 df_train_enc['income'] = df_train_enc['income'].str.replace('.','') df_test_enc['income'] = df_test_enc['income'].str.replace('.', '') df_train_enc['income'] = df_train_enc['income'].apply(codtarget) df_test_enc['income'] = df_test_enc['income'].apply(codtarget) df_test_enc.head()Excluindo variáveis que não serão testadas no desenvolvimento do modelo, seja porque não fazem sentido ou porque já foram criadas outras que a representamvars_to_drop = ['age', 'fnlwgt', 'education', 'race', 'hours-per-week', 'native-country', 'capital-gain', 'capital-loss'] df_train_fe = df_train_enc.drop(vars_to_drop, axis=1) df_test_fe = df_test_enc.drop(vars_to_drop, axis=1) df_train_fe.shape df_train_fe.head()Realizando mais uma rodada de EDA com as novas features que foram criadasplt.hist(df_train_fe['age_log']) plt.show() plt.hist(df_train_fe['Hours_per_week_log']) plt.show()A partir dos resultados acima, se observa que, após a transformação logarítimica as distribuições das variáveis "age" e "hours per week" ajustaram um pouco mais, ficando mais próximas de uma distribuição normal, embora esta última ainda apresente um pouco mais de assimetria.InteractiveShell.ast_node_interactivity = "last_expr" def graph_dist (df, variavel): sns.countplot(x=df[variavel], hue = df['income'], palette='Greens_r', order = df[variavel].value_counts().index) plt.title('Distribuição da variável alvo por '+variavel, fontsize=14) plt.xticks(rotation = 90) plt.yticks() plt.legend() sns.set(rc={"figure.figsize": (15, 23)}) plt.subplot(5,2,1) graph_dist (df_train_fe, 'age_group') plt.subplot(5,2,2) graph_dist (df_train_fe, 'age_cat_tree') plt.subplot(5,2,3) graph_dist (df_train_fe, 'age_group_2') plt.subplot(5,2,4) graph_dist (df_train_fe, 'capital_gain_dummy') plt.subplot(5,2,5) graph_dist (df_train_fe, 'capital_loss_dummy') plt.subplot(5,2,6) graph_dist (df_train_fe, 'hours-per-week_cat_tree') plt.subplot(5,2,7) graph_dist (df_train_fe, 'hours_per_week_acima40') plt.subplot(5,2,8) graph_dist (df_train_fe, 'race_is_white') plt.subplot(5,2,9) graph_dist (df_train_fe, 'race_is_white_black') plt.subplot(5,2,10) graph_dist (df_train_fe, 'native_country_is_eua') plt.tight_layout() plt.show() def graph_perc (df, variavel): x,y = variavel, 'income' count_data = (df .groupby(x)[y] .value_counts(normalize=True) .mul(100) .rename('percentual') .reset_index()) sns.barplot(x=variavel, y="percentual", hue="income", data=count_data, palette='Greens_r') plt.title('Percentual de pessoas que ganham abaixo e acima de 50K de acordo com\n as categorias da variável '+variavel, fontsize=14) plt.xticks(rotation = 90) plt.yticks() plt.legend() sns.set(rc={"figure.figsize": (15, 23)}) plt.subplot(5,2,1) graph_perc (df_train_fe, 'age_group') plt.subplot(5,2,2) graph_perc (df_train_fe, 'age_cat_tree') plt.subplot(5,2,3) graph_perc (df_train_fe, 'age_group_2') plt.subplot(5,2,4) graph_perc (df_train_fe, 'capital_gain_dummy') plt.subplot(5,2,5) graph_perc (df_train_fe, 'capital_loss_dummy') plt.subplot(5,2,6) graph_perc (df_train_fe, 'hours-per-week_cat_tree') plt.subplot(5,2,7) graph_perc (df_train_fe, 'hours_per_week_acima40') plt.subplot(5,2,8) graph_perc (df_train_fe, 'race_is_white') plt.subplot(5,2,9) graph_perc (df_train_fe, 'race_is_white_black') plt.subplot(5,2,10) graph_perc (df_train_fe, 'native_country_is_eua') plt.tight_layout() plt.show()A partir dos resultados acima, nota-se que praticamente todas as categorias apresentam mais representatividade em relação às variáveis originais. Ainda, a maioria das novas features parecem promissoras em diferenciar os grupos dos que ganham acima e abaixo de 50K anuais . Realizando uma última visão geral nos dados e salvando para a fase de modelagemInteractiveShell.ast_node_interactivity = "all" df_train_fe.shape df_train_fe.head() df_test_fe.shape df_test_fe.head() df_train_fe.describe() df_test_fe.describe() # Distribuição de eventos e não eventos nas bases de treino e teste perc_target_train = df_train_fe.groupby(['income']).agg({'income': 'count'}).apply(lambda x:100 * x / float(x.sum()), axis=0).transpose() print("\033[1m" + '% de eventos e não eventos na base de treino'+ "\033[0m") perc_target_train perc_target_test = df_test_fe.groupby(['income']).agg({'income': 'count'}).apply(lambda x:100 * x / float(x.sum()), axis=0).transpose() print("\033[1m" +'% de eventos e não eventos na base de teste'+ "\033[0m"+ "\033[0m") perc_target_test df_train_fe.to_csv('df_train2modelling.csv', index=False) df_test_fe.to_csv('df_test2modelling.csv', index=False)2019-12-20-coffea-demo 1. IntroductionThis demo of the new Awkward Array was presented on December 20, 2019, before the final 1.0 version was released. Some interfaces may have changed. To run this notebook, make sure you have version 0.1.36 ([GitHub](https://github.com/scikit-hep/awkward-1.0/releases/tag/0.1.36), [pip](https://pypi.org/project/awkward1/0.1.36/)) by installing```bashpip install 'awkward1==0.1.36'```The basic concepts of Awkward arrays are presented on the [old Awkward README](https://github.com/scikit-hep/awkward-0.x/tree/0.12.17readme) and the motivation for a 1.0 rewrite are presented on the [new Awkward README](https://github.com/scikit-hep/awkward-1.0/tree/0.1.32readme).# The base of the GitHub repo is two levels up from this notebook. import sys import os sys.path.insert(0, os.path.join(os.getcwd(), "..", ".."))2. High-level array classThe biggest user-facing change is that, instead of mixing NumPy arrays and `JaggedArray` objects, the new Awkward has a single `Array` class for data analysis.import numpy as np import awkward1 as ak array1 = ak.Array([[1.1, 2.2, 3.3], [], [4.4, 5.5]]) array1 array2 = ak.Array([{"x": 0, "y": []}, {"x": 1, "y": [1.1]}, {"x": 2, "y": [1.1, 2.2]}]) array2The same `Array` class is used for all data structures, such as the array of lists in `array1` and the array of records in `array2`. (Incidentally, the width of that string representation is exactly large enough to fit into GitHub and StackOverflow text boxes without scrolling!)There won't be any user-level functions that apply to some data types and not others. The result of an operation is likely type-dependent, but its accessibility is not. (At this time, the only existing operations are conversions like `ak.tolist` and descriptions like `ak.typeof`.)ak.tolist(array1) ak.tojson(array1) ak.tolist(array2) ak.tojson(array2) ak.typeof(array1) ak.typeof(array2)Data types are described using the [datashape language](https://datashape.readthedocs.io/en/latest/). Some Awkward features are [not expressible](https://github.com/blaze/datashape/issues/237) in the current datashape specification, so they're expressed in an extension of the language using the same _style_ of syntax. 3. Low-level array classesThe old `JaggedArray` and `Table` are still available, but you have to ask for them explicitly with `layout`. They're not "private" or "internal implementations" (there's no underscore in `layout`): they're public for frameworks like Coffea but hidden from data analysts.As such, their string representations have more low-level detail: the contents of indexes, rather than what they mean as high-level types. (The XML formatting is just an elaboration on Python's angle-bracket convention for `repr` and the fact that we need to denote nesting.)array1.layout array2.layoutThese classes are defined in C++ and wrapped by pybind11. The `awkward1.Array` class is pure Python. Many of the same operations work for layout classes, though less attention has been paid to its interface.ak.typeof(array1) ak.typeof(array1.layout) ak.tojson(array1) ak.tojson(array1.layout) array1.layout.tojson()4. Behavioral mix-insIn the original Awkward library, we added behaviors to objects and arrays of objects, like computing `pt` or boosting for arrays of Lorentz vectors, by letting structure classes such as `JaggedArray` multiply inherit from classes providing the implementations. That technique was not fully thought-through: it was easy to lose an array's "Lorentzness" when slicing it or performing other operations. It also relies on a Python language feature that can't pass through C++.It has since become clear that behavioral mix-ins aren't an obscure use-case but a primary one, so its implementation requires more thought. Adding behaviors to arrays is now a "first-class feature," built into the array types themselves.class PointClass(ak.Record): def __repr__(self): return "".format(self["x"], self["y"]) def mag(self): return abs(np.sqrt(self["x"]**2 + self["y"]**2)) ak.namespace["Point"] = PointClass array3 = ak.Array([{"x": 1, "y": 1.1}, {"x": 2, "y": 2.2}, {"x": 3, "y": 3.3}]) array3 array3.layout.typeTypes can have arbitrary parameters, which modify their meaning. These types are JSON-encoded and passed through C++ or wherever the arrays get sent.pointtype = array3.layout.type pointtype["__class__"] = "Point" pointtype pointtype["__str__"] = "PointType[{}, {}]".format(pointtype.field("x"), pointtype.field("y")) pointtype # There will be a better interface for assigning types... array4 = ak.Array(array3.layout, type=ak.ArrayType(pointtype, len(array3.layout))) array4 [x.mag() for x in array4]The elements of this array are `PointClass` instances because the `__class__` parameter is `"Point"`, a name that is recognized in Awkward's class namespace. The global namespace is in `ak.namespace`, but custom ones can be passed into the `Array` constructor to turn on/off or change behaviors.ak.namespaceAs you can see, variable-length strings are also implemented as mix-ins. Apart from this type annotation, a string is just a jagged array of 8-bit integers.array5 = ak.Array(["Daisy", "Daisy", "give", "me", "your", "answer", "do."]) array5 array5.layout ak.tolist(array5.layout) # Slice it! ak.tolist(array5.layout[:, 1:]) # Slice it! array5[:, 1:]Like all behavioral mix-ins, the string interpretation is _only_ applied in the high-level `Array` view, not the layout classes. Thus, a C++ function that generates or uses jagged arrays of Lorentz vectors (e.g. a nice FastJet interface?) does not depend on Python. It only has to manipulate a map of strings.The old Awkward also had an `ObjectArray`, which generated Python objects on demand, such as individual Lorentz vectors, and these had to have the same set of methods as arrays of Lorentz vectors. Keeping those coordinated was difficult. Now, however, the individual objects don't disinherit from the Awkward arrays they come from: the strings above are merely a view (which is why the slice worked). Instead of `Methods` and `ObjectArrays`, we now have a unified mechanism.For instance, this `PointClass` object,array4[2]is still an Awkward `Record`.array4[2].layout5. Agreement with NumPyAwkward array represents a superset of NumPy's core, so it must return the same results as NumPy. This was tricky in the old Awkward, when we restricted ourselves to vectorized functions, and this led to hidden limitations: slices were limited to depth `2`, concatenation was limited to `axis <= 1`, and `choose(n)` was limited to `n < 5`. But now that we can write compiled for loops, there are no such limitations.deepnumpy = np.arange(2*3*5*7).reshape(2, 3, 5, 7) deepawkward = ak.Array(deepnumpy) deepawkward deepnumpy[1:, :2, [4, 1, 1, -2], ::-1] deepawkward[1:, :2, [4, 1, 1, -2], ::-1] ak.tolist(deepnumpy[1:, :2, [4, 1, 1, -2], ::-1]) == ak.tolist(deepawkward[1:, :2, [4, 1, 1, -2], ::-1])6. Creating arraysA few of the examples above create arrays by passing them to the `Array` constructor. This is like old Awkward's `fromiter` function. In fact, new Awkward has a `fromiter` function, but it's implicitly called by the `Array` constructor.# Calls ak.fromiter, which converts rowwise → columnar data. ak.Array([[1.1, 2.2, 3.3], [], [4.4, 5.5]]) # Calls ak.fromjson, which deserializes. ak.Array("[[1.1, 2.2, 3.3], [], [4.4, 5.5]]") # Calls ak.fromnumpy, which views. nparray = np.array([[1.1, 2.2, 3.3], [4.4, 5.5, 6.6]]) akarray = ak.Array(nparray) akarray nparray[0, 1] = 999 akarrayYou can also build these manually from the low-level layouts, but it's a lot of work!content = ak.layout.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5])) offsets = ak.layout.Index64(np.array([0, 3, 3, 5], dtype=np.int64)) # match 64-bit to 64-bit to avoid copy listoffsetarray = ak.layout.ListOffsetArray64(offsets, content) listoffsetarray ak.Array(listoffsetarray)7. FillableArrayThe `fromiter` algorithm has been expanded into a builder interface, so that you can accumulate Awkward arrays gradually.builder = ak.FillableArray() for i in range(10): builder.beginrecord() builder.field("x") builder.real(np.random.normal()) builder.field("y") builder.beginlist() for j in range(np.random.poisson(2.5)): builder.integer(np.random.randint(0, 10)) builder.endlist() builder.endrecord() builderThis is not a regular array, but you can `snapshot` it to get one (and keep filling the `builder`). A `snapshot` does not copy array data: if you take several snapshots while filling, they _might_ share data. (And they _might_ not, if it has allocated new buffers to grow beyond its reserved space!)array6 = builder.snapshot() array6 ak.tolist(array6) ak.typeof(array6)The array that you produce can have nested structure, as shown above. The structure was determined by the order in which `builder` methods were called.You can write algorithms that build arrays as if you were printing out JSON: * call `beginlist()` instead of printing `"["`, * call `endlist()` instead of printing `"]"`, * call `beginrecord()` instead of printing `"{"`, * call `endrecord()` instead of printing `"}"`, * call `field(key)` instead of printing `"key":`, etc.deepbuilder = ak.FillableArray() def deepnesting(depth): if depth == 0: deepbuilder.integer(np.random.randint(0, 10)) else: deepbuilder.beginlist() for j in range(np.random.poisson(2.5)): deepnesting(depth - 1) deepbuilder.endlist() deepnesting(5) ak.tolist(deepbuilder.snapshot()) ak.typeof(deepbuilder) deepbuilder.snapshot().layoutBoth `fromiter` and `fromjson` are implemented using `FillableArray`, the latter using the RapidJSON C++ library for deserialization.# !wget https://scikit-hep.org/uproot/examples/HZZ.json hzz = ak.fromjson("HZZ.json") hzz for key in hzz.layout.keys(): print("{:18s} {}".format(key, hzz[key].type))jets 2421 * var * {"px": float64, "py": float64, "pz": float64, "E": float64, "id": bool} muons 2421 * var * {"px": float64, "py": float64, "pz": float64, "E": float64, "q": int64, "iso": float64} electrons 2421 * var * {"px": float64, "py": float64, "pz": float64, "E": float64, "q": int64, "iso": float64} photons 2421 * var * {"px": float64, "py": float64, "pz": float64, "E": float64, "iso": float64} MET 2421 * {"x": float64, "y": float64} MC_hadronic_b 2421 * {"px": float64, "py": float64, "pz": float64} MC_leptonic_b 2421 * {"px": float64, "py": float64, "pz": float64} MC_hadronicW_q 2421 * {"px": float64, "py": float64, "pz": float64} MC_hadronicW_qbar 2421 * {"px": float64, "py": float64, "pz": float64} MC_lepton 2421 * {"px": float64, "py": float64, "pz": float64, "pdgid": int64} MC_neutrino 2421 * {"px": float64, "py": float64, "pz": float64} num_PV 2421 * int64 trigger_isomu[...]The loop over Python objects or JSON nodes was moved from Python into C++, so it's faster. However, the implementation requires vtable-lookups (the type is discovered at runtime), so it's not _a lot_ faster. There's room for specialized methods when the type is known in advance. (See [src/libawkward/io/root.cpp](https://github.com/scikit-hep/awkward-1.0/blob/main/src/libawkward/io/root.cpp) for a ${\tt std::vector}^N{\tt}$ implementation.)In general, turning rowwise data into columnar data is about 10× faster than it used to be.import awkward as oldawkward import json aslist = json.load(open("HZZ.json")) * 10 asjson = json.dumps(aslist) %%timeit -r 3 ak.fromiter(aslist) # new fromiter %%timeit -r 3 oldawkward.fromiter(aslist) # old fromiter %%timeit -r 3 ak.fromjson(asjson) # new fromjson %%timeit -r 3 oldawkward.fromiter(json.loads(asjson)) # old equivalent of fromjson2.29 s ± 97.2 ms per loop (mean ± std. dev. of 3 runs, 1 loop each)8. Awkward arrays in NumbaOne of the motivating goals of the Awkward re-write was to incorporate Numba on the same footing.import numba @numba.jit(nopython=True) def muon_sumpt(events): out = np.zeros(len(events), np.float64) i = 0 for event in events: for muon in event["muons"]: out[i] += np.sqrt(muon["px"]**2 + muon["py"]**2) i += 1 return out hzz = ak.Array(json.load(open("HZZ.json")) * 100).layout muon_sumpt(hzz)Notice that we can write for loops on event _records_ and muon _records_. We don't have to take apart `JaggedArrays` and write algorithms on offsets and indexes.Incidentally, my first version of the above raised segfaults because the `i += 1` was in the inner loop, rather than the outer loop (indentation error). Since it's Numba, I could debug it by running the pure Python version.muon_sumpt.py_func muon_sumpt.py_func(hzz) %%timeit -r 3 muon_sumpt(hzz) # in Numba %%timeit -r 1 muon_sumpt.py_func(hzz) # pure Python11.4 s ± 0 ns per loop (mean ± std. dev. of 1 run, 1 loop each)So, a 50× speedup over Python without changing any code. Debug in Python, accelerate with Numba. Awkward arrays could be a benefit to Numba users in general: Numba can handle complex data types by converting Python objects to and from equivalent structs, but that puts a translation burden at the entry and exit of every Numba function. Awkward leaves the data in the same form (big array buffers), transforming only its handles to the data. (JSON → Awkward arrays → Numba could become a useful workflow in industry.) Side note...It looks like there's a performance bug in the current implementation: if we remove all particles but muons from what we pass through Numba, we see a 3× speedup relative to leaving them in that scales with the size of the dataset (100× vs 1000×). That shouldn't happen: unused fields are supposed to be ignored in the compiled code. Once everything is operational, we'll investigate these performance issues.%%timeit -r 3 muon_sumpt(hzz.astype(None)[["muons"]]) # in Numba, passing only muons through %%timeit -r 3 muon_sumpt(hzz.astype(None)) # in Numba, passing everything through232 ms ± 5.31 ms per loop (mean ± std. dev. of 3 runs, 1 loop each)9. FillableArray in NumbaThe example above took an Awkward array into a Numba function and did some processing on it. To write data out, we can use `FillableArrays`.@numba.jit(nopython=True) def make_data(builder): for i in range(10): builder.beginrecord() builder.field("x") builder.real(i*1.1) builder.field("y") builder.beginlist() for j in range(i): builder.integer(j) builder.endlist() builder.endrecord() return builder builder = ak.layout.FillableArray() make_data(builder) builder.snapshot() ak.tolist(builder.snapshot())Since you can walk over data structures and create data structures (and later, assign fields to datasets like the old `Table`), you have complete freedom to manipulate data * at compiled-code speeds, * without having to leave the Python environment, * without having to rethink your algorithm in terms of array-at-a-time functions.(This _supplements_ the array-at-a-time approach introduced last year.) 10. Awkward arrays in C++Since everything has been implemented in C++, Awkward 1.0 can be used in C++ programs. More importantly, we will (someday) be able to create Awkward arrays in C++ and access them in Python or vice-versa.open("test-program.cpp", "w").write(""" #include #include "awkward/fillable/FillableArray.h" #include "awkward/fillable/FillableOptions.h" namespace ak = awkward; int main(int, char**) { ak::FillableArray builder(ak::FillableOptions(1024, 2.0)); for (int i = 0; i < 10; i++) { builder.beginrecord(); builder.field_fast("x"); // (field_fast means don't check the whole string, just its pointer) builder.real(i*1.1); builder.field_fast("y"); builder.beginlist(); for (int j = 0; j < i; j++) { builder.integer(j); } builder.endlist(); builder.endrecord(); } std::cout << builder.snapshot()->tojson(false, 1) << std::endl; return 0; } """) import pygments.formatters import pygments.lexers.c_cpp print(pygments.highlight(open("test-program.cpp").read(), pygments.lexers.c_cpp.CppLexer(), pygments.formatters.Terminal256Formatter())) !g++ -I../../include -L../../awkward1 test-program.cpp -lawkward-static -lawkward-cpu-kernels-static -o test-program !./test-program[{"x":0.0,"y":[]},{"x":1.1,"y":[0]},{"x":2.2,"y":[0,1]},{"x":3.3,"y":[0,1,2]},{"x":4.4,"y":[0,1,2,3]},{"x":5.5,"y":[0,1,2,3,4]},{"x":6.6,"y":[0,1,2,3,4,5]},{"x":7.7,"y":[0,1,2,3,4,5,6]},{"x":8.8,"y":[0,1,2,3,4,5,6,7]},{"x":9.9,"y":[0,1,2,3,4,5,6,7,8]}]11. Identities: database-like index for arraysIn the [PartiQL toy language](https://github.com/jpivarski/PartiQLreadme), it became apparent that set operations, in which unique records are identified by reference, rather than by value, are important. They provide such operations as joins and lossless unions.No set operations have been implemented, but implementing them will require an index that tracks particle identities through all other operations. This concept of an index is the primary distinction between an array library like NumPy and a relational library like Pandas. In Awkward, this index is called an `Identity` and can optionally be attached to arrays.**Note:** this interface is the most likely to change. Identities have only been implemented at this early stage so that they don't have to be painfully retrofitted later.hzzlayout = ak.fromjson("HZZ.json").layout hzzlayout.setid() hzzlayout.id hzzlayout.field("muons").content.field("px").id np.asarray(hzzlayout.field("muons").content.field("px").id)An `Identity` is a 2-dimensional array with the same structure as a Pandas row `MultiIndex` with a `fieldloc` for the nested columns. They're equivalent to paths from root (wherever you called `setid`) to the element in question.hzzlayout[1000, "muons", 1].locationAs a nice side-effect of having indexes, we can give better error messages about where an indexing error occurs. You might use `Identities` just for debugging.# Indexing error with an Identity: try: hzzlayout[1000, "muons", 2] except Exception as err: print(err) # Indexing error without an Identity: try: ak.fromjson("HZZ.json").layout[1000, "muons", 2] except Exception as err: print(err)in ListArray64 attempting to get 2, index out of rangeWhen the array goes through any kind of transformation, such as the boolean filter below, the `Identity` is similarly selected.mask = np.random.randint(0, 100, len(hzzlayout)) == 0 mask selected = hzzlayout[mask] np.asarray(selected.id)Section 1: Business UnderstandingFor this project, I was interestested in using **Seattle Airbnb Data from 2016** to better understand:1. What kind of accommodations do may I book?2. Which period of the year has the highest number of listings? By how much do prices spike?3. What are the cheapest and most expensive neighborhoods?4. What are the factors that most impact the price?You can find the full set of files related to anaylises in: https://www.kaggle.com/airbnb/seattle/data.Also, check out my medium post at: https://matsuch.medium.com/can-you-guess-the-best-time-to-visit-seattle-24025ab7da70 Opening libraries#import algebra linear and data manipulation import numpy as np import pandas as pd from collections import defaultdict #Plot import matplotlib as mpl import matplotlib.pyplot as plt import seaborn as sns import matplotlib.patches as mpatches #Machine Learning from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score, mean_squared_error #ignore warnings import warnings warnings.filterwarnings('ignore')Information gathering#opening dataframes df_review = pd.read_csv('./data/reviews.csv') df_listings = pd.read_csv('./data/listings.csv') df_calendar = pd.read_csv('./data/calendar.csv')Section 2: Data Undestanding#============================================================================== #Spliting dataframes into categories - Host, Review, quarto, bairro #============================================================================== host = df_listings[['host_id','host_is_superhost', 'host_response_rate', 'host_response_time']] review = df_listings[['id','number_of_reviews', 'review_scores_rating', 'review_scores_accuracy', 'review_scores_cleanliness', 'review_scores_checkin', 'review_scores_communication', 'review_scores_location', 'review_scores_value']] quarto = df_listings[['room_type', 'bathrooms', 'bedrooms', 'beds', 'bed_type', 'accommodates','property_type']] bairro = df_listings[['neighbourhood', 'neighbourhood_cleansed','neighbourhood_group_cleansed']] #============================================================== #Data treatment - NaN Values and change object columns in floats #============================================================== #host print('before % of null values : \n', host.isnull().sum()/len(host)*100, '\n') #% NaN values #transform the host_is_superhost in float host['host_is_superhost'] = host['host_is_superhost'].map({'f':1,'t':0}) #change f and t for 1 and 0 #Transform the responde_rate column in str and flot to find the mean host['host_response_rate_num'] = host['host_response_rate'].astype(str) host['host_response_rate_num'] = host['host_response_rate_num'].str.replace("%", "").astype("float") #Change responde_time into float and fill with the mean host['host_response_time'] = host['host_response_time'].map({'within a few hours':6, 'within an hour':1, 'within a day':24, 'a few days or more':48}) #Below I was trying to fill all nan values in responde_rate and response_time by the host behavior #Since it would be the same person, i assumed that the response would be close for each one. #however the results was not as expected. #fill host activity by its mean host['host_response_rate_num'] = host['host_response_rate_num'].fillna(host.groupby(['host_id']) ['host_response_rate_num'].transform('mean')) host['host_response_time'] = host['host_response_time'].fillna(host.groupby(['host_id']) ['host_response_time'].transform('mean')) print('after % of null values : \n', host.isnull().sum()/len(host)*100, '\n') #% NaN values #Tried to fill the nan with the mean, however since it doesn't I decided to drop all those rows to have a complete dataset host = host.drop(['host_response_rate'], axis = 1) #drop the old response rate column hostnew = host.dropna() #drop the remaining nan values, since the new df has 86% of the original size print('Size comparison between the old and new host df :', len(hostnew)/len(host), '\n')before % of null values : host_id 0.000000 host_is_superhost 0.052383 host_response_rate 13.698271 host_response_time 13.698271 dtype: float64 after % of null values : host_id 0.000000 host_is_superhost 0.052383 host_response_rate 13.698271 host_response_time 13.698271 host_response_rate_num 13.698271 dtype: float64 Size comparison between the old and new host df : 0.8630172865374541OBS: I tried to fill nan values with the mean of each id however it did not make sense. I was looking at the review dataset and noticed that each id is unique. I decided to drop all nan values since the new df still has 86% of the original df size#review #fill all columns with the mean - new approach ''' review['review_scores_value'] = review.groupby('id')['review_scores_value'].transform(lambda x: x.fillna(x.mean())) review['review_scores_rating'] = review.groupby('id')['review_scores_rating'].transform(lambda x: x.fillna(x.mean())) review['review_scores_accuracy'] = review.groupby('id')['review_scores_accuracy'].transform(lambda x: x.fillna(x.mean())) review['review_scores_cleanliness'] = review.groupby('id')['review_scores_cleanliness'].transform(lambda x: x.fillna(x.mean())) review['review_scores_checkin'] = review.groupby('id')['review_scores_checkin'].transform(lambda x: x.fillna(x.mean())) review['review_scores_communication'] = review.groupby('id')['review_scores_communication'].transform(lambda x: x.fillna(x.mean())) review['review_scores_location'] = review.groupby('id')['review_scores_location'].transform(lambda x: x.fillna(x.mean())) #NEED TO FIND A FASTER WAY TO PERFORM THIS ''' print('Sum of nan values per column: \n',review.isnull().sum()) print('number of listings with no reviews: ', (review['number_of_reviews'] == 0).sum())Sum of nan values per column: id 0 number_of_reviews 0 review_scores_rating 647 review_scores_accuracy 658 review_scores_cleanliness 653 review_scores_checkin 658 review_scores_communication 651 review_scores_location 655 review_scores_value 656 dtype: int64 number of listings with no reviews: 627I tried to fill nan values with the mean of each id however it does not make sense. I was looking at the review dataset and notice that each id is unique, so I decided to drop all nan values. For the propose to anylise the influence in the review score, nan values would have a negative impact on the results#quarto print(quarto.isnull().sum()) #count null columns quartonew=quarto.dropna() #drop null values #bairro print(quarto.isnull().sum()) #count null columns bairronew=bairro.dropna() ##drop null values, less than 10% - most of the ananysis here need neighbourhood informationroom_type 0 bathrooms 16 bedrooms 6 beds 1 bed_type 0 accommodates 0 property_type 1 dtype: int64 room_type 0 bathrooms 16 bedrooms 6 beds 1 bed_type 0 accommodates 0 property_type 1 dtype: int64Section 3: Data Preparation#=============== #Data treatment #============== #creat new df df_novo = pd.concat((review, quartonew, hostnew, bairronew), axis=1) #split date information in year and month, drop the original date column df_calendar['date'] = pd.to_datetime(df_calendar['date'], format= '%Y/%m/%d') #set date in datetime df_calendar['ano'] = df_calendar['date'].dt.year #create a year column df_calendar['mês'] = df_calendar['date'].dt.month #create a month column df_calendar.drop(['date'], axis=1, inplace=True) #drop the old date column df_novo.rename(index=str, columns={'id': 'listing_id'}, inplace=True) #change the 'id' column name to be the same as the calendar df = pd.merge(df_calendar, df_novo, on='listing_id') #merge calendar and the new dataframe #Compare the listing_id size and unique count print('listing_id size: ', len(df['listing_id'])) #counting how many rows has the listing_id column print('Unique values listing_id', df['listing_id'].nunique()) #counting how many unique values has in the listind_id column #Looking at the data we can see that the same listing sometimes have price and sometimes don't. #I assumed that the listing should't vary its price to much #so grouped the listing_id and filed the nan values with that listing mean price. df['price'] = df['price'].str.replace("[$, ]", "").astype("float") #replacing $ by "" and converting price column into float df['price'] = df['price'].fillna(df.groupby(['listing_id'])['price'].transform('mean')) #fillnan prices with the mean by grouping based on the listing_id print('NaN Price Values representative: ', df['price'].isnull().sum()/len(df)*100) #Droping the remaning NaN Price values because represents only 2.48% of the df size df = df.dropna(subset=['price']) #drop all listings with no price df.sort_values('price', ascending=False) print('listing_id size: ', len(df['listing_id'])) print('Unique values listing_id', df['listing_id'].nunique()) df['price'] = df['price'].fillna(df.groupby(['listing_id'])['price'].transform('mean')) df.isnull().sum()/len(df)*100listing_id size: 1358895 Unique values listing_id 3723Section 4: Modeling#===================== #linearization process #===================== #turn categorical columns into dummies cat_col = list(df.select_dtypes(include=['object']).columns) def create_dummy_df(df, cat_cols, dummy_na): ''' INPUT: df - pandas dataframe with categorical variables you want to dummy cat_cols - list of strings that are associated with names of the categorical columns dummy_na - Bool holding whether you want to dummy NA vals of categorical columns or not OUTPUT: df - a new dataframe that has the following characteristics: 1. contains all columns that were not specified as categorical 2. removes all the original columns in cat_cols 3. dummy columns for each of the categorical columns in cat_cols 4. if dummy_na is True - it also contains dummy columns for the NaN values 5. Use a prefix of the column name with an underscore (_) for separating ''' for col in cat_cols: df = pd.concat([df.drop(col, axis=1), pd.get_dummies(df[col], prefix=col, prefix_sep='_', drop_first=True, dummy_na=dummy_na)], axis=1) return df #Processo de linearização #1. Drop the rows with missing response values df_m = df.dropna(subset=['price']) #2. Drop columns with Nan for all the values df_m = df_m.dropna() #3 Apply dummy_df df_m = create_dummy_df(df_m,cat_col,dummy_na=True) #4 Split data into X matriz and response vector y X = df_m.drop(['price'], axis=1) y = df_m['price'] #5 Split into train and test X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .3, random_state=42) lm_model = LinearRegression(normalize=True) # Instantiate lm_model.fit(X_train, y_train) #Fit #Predict using your model y_test_preds = lm_model.predict(X_test) y_train_preds = lm_model.predict(X_train) #Score using your model test_score = r2_score(y_test, y_test_preds) train_score = r2_score(y_train, y_train_preds) #Print training and testing score print("Training data R²: {:.4f}. Test data R²: {:.4f}.".format(train_score, test_score))Training data R²: 0.6743. Test data R²: 0.6767.Section 5: Evaluate the Results Question 1: What kind of accommodations may I book?#====================================== #which are the most requested room types #====================================== #Creating list with the room type counting (there are only 3 types) room = [df_listings['room_type'].value_counts()[0], df_listings['room_type'].value_counts()[1], df_listings['room_type'].value_counts()[2]] room_series = pd.Series(room) #Unique room types room_type = df_listings['room_type'].unique().tolist() room_type_series = pd.Series(room_type) #plot plt.figure(figsize=(10,10)) ax = room_series.plot(kind='bar') ax.set_title('Most requested room types') ax.set_xlabel('Room type') ax.set_ylabel('Number of requests') ax.set_xticklabels(room_type) def add_value_labels(ax, spacing=5): """Add labels to the end of each bar in a bar chart. Arguments: ax (matplotlib.axes.Axes): The matplotlib object containing the axes of the plot to annotate. spacing (int): The distance between the labels and the bars. """ # For each bar: Place a label for rect in ax.patches: # Get X and Y placement of label from rect. y_value = rect.get_height() x_value = rect.get_x() + rect.get_width() / 2 # Number of points between bar and label. Change to your liking. space = spacing # Vertical alignment for positive values va = 'bottom' # If value of bar is negative: Place label below bar if y_value < 0: # Invert space to place label below space *= -1 # Vertically align label at top va = 'top' # Use Y value as label and format number with one decimal place label = "{:.0f}".format(y_value) # Create annotation ax.annotate( label, # Use `label` as label (x_value, y_value), # Place label at end of the bar xytext=(0, space), # Vertically shift label by `space` textcoords="offset points", # Interpret `xytext` as offset in points ha='center', # Horizontally center label va=va) # Vertically align label differently for # positive and negative values. # Call the function above. All the magic happens there. add_value_labels(ax) plt.savefig('Most requested room type.png')These are the most requested room types around Seattle, with more than 50% being "Entire Home/apt" Question 2: Which period of the year has the highest number of listings? By how much do prices spike?#loop to count how much listings are avaliable per month hospedagens_mensais = pd.Series([12]) for i in range(1, 13): hospedagens_mensais[i] = len(df[(df['mês'] == i) & (df['ano'] == 2016)]['listing_id'].unique()) #count just if the year is 2016 hospedagens_mensais = hospedagens_mensais.drop(0) #drop those listings with no months meses = ['Jan', 'Feb', 'Mar', 'Apr', 'May' , 'Jun' , 'Jul' , 'Aug' , 'Sep' , 'Oct' , 'Nov' , 'Dec'] preco_mensal = df.groupby('mês')['price'].mean() #group by month and mean price #plot - Pric x Listing - per month preco_mensal = df.groupby('mês')['price'].mean() plt.subplots(figsize = (15,10)) ax = plt.gca() sns.pointplot(x = meses, y = hospedagens_mensais, color='black',linestyles=['-']) ax2=plt.twinx() sns.pointplot(x = meses, y = preco_mensal, color='black', linestyles=['--']) plt.legend(labels=['AVG Monthy Price', 'Listings']) plt.title('Listings in Seattle, 2016') plt.ylabel('Avg Price') plt.xlabel('Months') plt.savefig('listings x price.png')By the chart above we have that: April is the month with the most number listings and the most expensive time to visit seattle.#======================================== #histogram of all listing with mean price #======================================== df_hist=df.groupby('listing_id')['price'].mean() plt.figure(figsize=(15,10)) plt.hist(df_hist,bins=100,color='navy') plt.ylabel('Counting') plt.xlabel('Listing Price, USD') plt.title('Listing Histogram') plt.savefig('Histograma de preços.png')As we can see in the chart below, the avarage price vary between USD 60 to USD 200 Question 3: What are the cheapest and most expensive neighborhoods?#========================================================== #Question - which are the monst expensive neighbourhoods? #========================================================== preco_bairros = df.groupby('neighbourhood')['price'].mean().sort_values(ascending=False).reset_index(name='price') top5_bairros_caros_nome = preco_bairros['neighbourhood'][0:5].tolist() top5_bairros_caros_preco = preco_bairros['price'][0:5].tolist() top5_bairros_caros_qtd = [df.loc[df.neighbourhood == preco_bairros['neighbourhood'][0] , 'neighbourhood'].count(), df.loc[df.neighbourhood == preco_bairros['neighbourhood'][1] , 'neighbourhood'].count(), df.loc[df.neighbourhood == preco_bairros['neighbourhood'][2] , 'neighbourhood'].count(), df.loc[df.neighbourhood == preco_bairros['neighbourhood'][3] , 'neighbourhood'].count(), df.loc[df.neighbourhood == preco_bairros['neighbourhood'][4] , 'neighbourhood'].count()] df_top5 = pd.DataFrame(list(zip(top5_bairros_caros_nome,top5_bairros_caros_qtd,top5_bairros_caros_preco)),columns=['neighbourhood','listings','price']) df_top5 = df_top5.set_index('neighbourhood') fig = plt.figure(figsize=(10,10)) # Create matplotlib figure ax = fig.add_subplot(111) # Create matplotlib axes ax2 = ax.twinx() # Create another axes that shares the same x-axis as a width = .3 df_top5.listings.plot(kind='bar',color='darkorange',ax=ax,width=width, position=0,legend=True) df_top5.price.plot(kind='bar',color='navy', ax=ax2,width = width,position=1,legend=True) ax.set_ylabel('listings') ax2.set_ylabel('avg price') ax.legend(loc='upper center') fig.autofmt_xdate(rotation=45) ax.set_xlim(-1,5) fig.savefig('Top 5 - most expensive neighbourhoods - AVG price x Listing.png') max_n_price = df_top5['price'].max() min_n_price = df_top5['price'].min() mean_n_price = df_top5['price'].mean() price_diff = max_n_price - min_n_price print('Expensive neighbourhoods - Highest AVG Price: ',"$%.0f" % max_n_price) print('Expensive neighbourhoods - Lowest AVG Price: ',"$%.0f" % min_n_price) print('Avg price: ',"$%.0f" % mean_n_price) print('Price Diff: '"$%.0f" % price_diff) #====================================================== #Question - which are the most cheapest neighbourhoods? #====================================================== preco_bairros = df.groupby('neighbourhood')['price'].mean().sort_values(ascending=False).reset_index(name='price') size = len(preco_bairros.index) top5_bairros_caros_nome = preco_bairros['neighbourhood'][76:81].tolist() top5_bairros_caros_preco = preco_bairros['price'][76:81].tolist() top5_bairros_caros_qtd = [df.loc[df.neighbourhood == preco_bairros['neighbourhood'][size-5] , 'neighbourhood'].count(), df.loc[df.neighbourhood == preco_bairros['neighbourhood'][size-4] , 'neighbourhood'].count(), df.loc[df.neighbourhood == preco_bairros['neighbourhood'][size-3] , 'neighbourhood'].count(), df.loc[df.neighbourhood == preco_bairros['neighbourhood'][size-2] , 'neighbourhood'].count(), df.loc[df.neighbourhood == preco_bairros['neighbourhood'][size-1] , 'neighbourhood'].count()] df_top5 = pd.DataFrame(list(zip(top5_bairros_caros_nome,top5_bairros_caros_qtd,top5_bairros_caros_preco)),columns=['neighbourhood','listing','price']) df_top5 = df_top5.set_index('neighbourhood') fig = plt.figure(figsize=(10,10)) # Create matplotlib figure ax = fig.add_subplot(111) # Create matplotlib axes ax2 = ax.twinx() # Create another axes that shares the same x-axis as a width = .3 df_top5.listing.plot(kind='bar',color='darkorange',ax=ax,width=width, position=0,legend=True) df_top5.price.plot(kind='bar',color='navy', ax=ax2,width = width,position=1,legend=True) teste = ['listing','Preço'] ax.set_ylabel('listing') ax2.set_ylabel('Preço') ax.legend(loc='upper center') fig.autofmt_xdate(rotation=45) ax.set_xlim(-1,5) fig.savefig('Top 5 - Bairros mais baratos - Preço x Disponibilidade.png') max_n_price = df_top5['price'].max() min_n_price = df_top5['price'].min() mean_n_price = df_top5['price'].mean() price_diff = max_n_price - min_n_price print('Cheapest neighbourhoods - Highest AVG Price: ',"$%.0f" % max_n_price) print('Cheapest neighbourhoods - Lowest AVG Price: ',"$%.0f" % min_n_price) print('Avg price: ',"$%.0f" % mean_n_price) print('Price Diff: '"$%.0f" % price_diff)Cheapest neighbourhoods - Highest AVG Price: $78 Cheapest neighbourhoods - Lowest AVG Price: $60 Avg price: $71 Price Diff: $18Prabably most of the biggest places in seattle are located in the "Most Expensive Neighbourhood" chart and that the smallest places are in the "Cheapest neighbourhood" chart Question 4: What are the factors that most impact the price?#Heatmap area df2 = df.copy() df_fisico = df2[['bathrooms', 'bedrooms', 'beds', 'accommodates','price']] fig, ax = plt.subplots(figsize=(10,10)) sns.heatmap(df_fisico.corr(), annot=True, fmt='.2f',ax=ax, cmap="YlGnBu") plt.savefig('Heatmap - Space.png')The chart above behave as exepcted: With more bathrooms, bedrooms, bed the bigger will be the place and higher will be the price. Accommodates showns to be the most infleunce in the price. SUPPORT CHARTS AND OTHERS QUESTIONSdef contagem_total(df, col1, col2, procura): ''' INPUT: df - Dataframe that you are looking for col1 - column that you go through col2 - column with the values that you want procura - list containing the data you are looking for in each line of the df[col] OUTPUT: new_df - the new dataframe that has the unique counting ''' new_df = defaultdict(int) #Loop in the list you want search for for val in procura: #Loop for x in range(df.shape[0]): #if found what you want, add 1 if val in df[col1][x]: new_df[val] += int(df[col2][x]) new_df = pd.DataFrame(pd.Series(new_df)).reset_index() new_df.columns = [col1, col2] return new_df amenities = ["TV","Internet","Wireless Internet","Cable TV","Air Conditioning","Kitchen", "Elevator in Building","Wheelchair Accessible","Smoke Detector","Free Parking on Premises","Pool", "Pets live on this property","Breakfast","Gym","Heating","Washer","Buzzer/Wireless Intercom","Smoking Allowed", "Carbon Monoxide Detector","Pets Allowed","Indoor Fireplace","Dryer","Dog(s)","Family/Kid Friendly","Hot Tub", "Cat(s)","Essentials","Shampoo","First Aid Kit","Doorman","Fire Extinguisher","Safety Card","Washer / Dryer", "Suitable for Events","Other pet(s)","Hangers","24-Hour Check-in","Laptop Friendly Workspace", "Lock on Bedroom Door","Hair Dryer","Iron"] #Create new_df based on 'amenities' unique couting df_amen = df_listings['amenities'].value_counts().reset_index() df_amen.rename(columns={'index': 'Count'}, inplace=True) new_df = contagem_total(df_amen,'Count','amenities',amenities) new_df.set_index('Count',inplace=True) #=========== #aminities % #=========== perc = (new_df/new_df.sum()*100) fig = plt.figure(figsize=(20,10)) # Create matplotlib figure ax = fig.add_subplot(111) # Create matplotlib axes width = .5 perc.plot(kind='bar',color='navy',width=width,ax=ax, position=0,legend=False)This chart showns what it's the % of listings that have each amentities. internet, kitchen, smoke dectetor, heating, dryer, essetials are the most common amentities in Seattle.#================================================ #Question - How does the price vary in Seattle? #================================================ max_n_price = df['price'].max() min_n_price = df['price'].min() mean_n_price = df['price'].mean() print('Highest Price: ',"$%.0f" % max_n_price) print('Lowest Price: ',"$%.0f" % min_n_price) print('Avg price: ',"$%.0f" % mean_n_price) #=============================================== #How many guests are allowed in thoses listings? #=============================================== df_acco = df['accommodates'].value_counts().reset_index().astype("float") df_acco.rename(columns={'index': 'guests'}, inplace=True) df_acco.set_index('guests') def compute_percentage(x): pct = x/df_acco['accommodates'].sum()*100 return round(pct, 2) df_acco['percentage'] = df_acco['accommodates'].apply(compute_percentage) fig = plt.figure(figsize=(10,10)) # Create matplotlib figure ax = fig.add_subplot(111) # Create matplotlib axes plt.ylabel('Listings %') plt.xlabel('Number of Guests allowed') plt.title('Guests allowed per listing') width = .5 df_acco.plot(kind='bar',x='guests',y='percentage',color='navy',width=width,ax=ax,legend=False) plt.savefig('Accommodates per listing') df_acco.head()Above we can see that more than 40% of the listings allow just 2 guests#heatmap df_hc = df.select_dtypes(include=['float','int64']).copy() fig, ax = plt.subplots(figsize=(20,10)) sns.heatmap(df_hc.corr(), annot=True, fmt='.2f',ax=ax, cmap="YlGnBu") plt.savefig('Heatmap - Geral.png')The chart above we have a overall heatmap, I did it to see how those informations impact price and review after that I decided to split into small charts since price its more related to bathrooms, bedrooms, beds and accommodates#Review and host heatmap df_review = df2[['review_scores_rating','review_scores_accuracy', 'review_scores_cleanliness','review_scores_checkin', 'review_scores_communication','review_scores_value', 'host_response_time', 'host_response_rate_num' , 'host_is_superhost']] fig, ax = plt.subplots(figsize=(20,20)) sns.heatmap(df_review.corr(), annot=True, fmt='.2f',ax=ax, cmap="YlGnBu") plt.savefig('Heatmap - Review.png')Similar to fiducial drift correction, 3D imaging based on astigmatism is implemented in B-Store in separate parts:1. the `CalibrateAstigmatism` processor that is used to launch the interactive calibration, and2. a `ComputeTrajectories` class that describes the algorithm for fitting smoothed curves to the beads' x- and y-widths.After fitting the calibration curves, CalibrateAstigmatism provides a function known as `calibrationCurve` that takes a set of localizations as inputs and computes their axial positions as a result.# Be sure not to use the %pylab inline option %pylab from bstore import processors as proc from pathlib import Path import pandas as pdUsing matplotlib backend: Qt5Agg Populating the interactive namespace from numpy and matplotlibLoad the test dataThe test data for this example is in the [B-Store test files repository](https://github.com/kmdouglass/bstore_test_files). Download or clone this repository, and set the variable below to point to */processor_test_files/sequence-as-stack-Beads-AS-Exp_Localizations.csv*pathToData = Path('../../bstore_test_files/processor_test_files/sequence-as-stack-Beads-AS-Exp_Localizations.csv') # Load the test data with open(str(pathToData), 'r') as f: df = pd.read_csv(f) df.describe()This dataset contains localizations from six fluorescent beads that are scanned axially and with a cylindrical lens in the imaging path. The dataset is synthetic and comes from the [2016 SMLM Challenge](http://bigwww.epfl.ch/smlm/challenge2016/index.html); for convenience, the localizations have already been computed from the z-stacks.The known z-position is in the column named **z [nm]**. The PSF widths in x and y are in the columns named **sigma_x [nm]** and **sigma_y [nm]** respectively.Let's plot the localizations' x- and y-positions:plt.scatter(df['x [nm]'], -df['y [nm]'], s=2) plt.xlabel('x-position') plt.ylabel('y-position') plt.axis('equal') plt.grid(True) plt.show()Axial calibrationsIf you have already worked through the Fiducial Drift Correction notebook, then this part will look familiar. We start by defining a `CalibrateAstigmatism` processor with a few custom parameters. Then, we feed it with our localization file. A window appears that shows a 2D histogram of the density of localizations, allowing us to manually select the beads. We can select any number of regions we like by clicking and dragging a rectangle around the regions.After a region is drawn, **press the space bar to add it to the processor**. You may then select another region in the same manner. To finish searching for beads, simply close the window.Try selecting the bead at \\( x = 4.3 \, \mu m \\), \\( y = 7 \, \mu m \\) and closing the window afterward.# coordCols = ['x', 'y'] by default ca = proc.CalibrateAstigmatism(coordCols=['x [nm]', 'y [nm]'], sigmaCols=['sigma_x [nm]', 'sigma_y [nm]'], zCol='z [nm]') ca.astigmatismComputer.smoothingWindowSize=20 ca.astigmatismComputer.smoothingFilterSize=3 processed_df = ca(df) processed_df.describe()The processed DataFrame is actually the same as the input; no changes were made. However, if all went well, the CalibrateAstigmatism processor has fit splines to the PSF widths as a function of z and computed the calibration curve.Before discussing what the `CalibrateAstigmatism` processor did, let's go over how it was used. First, we create the processor by setting some of its optional parameters:```ca = proc.CalibrateAstigmatism(coordCols=['x [nm]', 'y [nm]'], sigmaCols=['sigma_x [nm]', 'sigma_y [nm]'], zCol='z [nm]')````coordCols` contains the names of the columns of the x- and y-positions of the localizations and `sigmaCols` contains the names of the columns containing the widths of the PSF in the x- and y-directions. Finally, `zCol` is the name of the known z-position of the beads. (This is known because it is controlled during the acquisition of the image stack of the fluorescent beads.) Some of the other optional parameters we could set are1. `startz` : where the z-fitting should begin2. `stopz` : where the z-fitting should end3. `astigmatismComputer` : this is a `ComputeTrajectories` object for calculating the spline fits to the individual bead trajectories in z. You can write your own algorithm for fitting z-trajectories and feed it to the processor using this parameter.4. `interactiveSearch` : a True/False value that determines whether a window is displayed to allow the user to select regions containing beads. You would set this to False if you already found beads but want to refit them using some different spline fitting parameters of the astigmatismComputer.5. `wobbleComputer` : this is another `ComputeTrajectories` object that calculates the PSF's centroid's position as a function of z, also known as wobble.Next, we adjust some of the smoothing spline parameters. These parameters are not part of the `CalibrateAstigmatism` processor; rather they belong to the `DefaultAstigmatismComputer` which belongs to the processor. The `DefaultAstigmatismComputer` is simply a type of `ComputeTrajectories` object for computing astigmatism calibration curves.```ca.astigmatismComputer.smoothingWindowSize=20ca.astigmatismComputer.smoothingFilterSize=3````smoothingWindowSize` is the size of the moving window that is used to weight the points in the trajectory during the spline fitting; `smoothingFilterSize` is the standard deviation of the Gaussian weighting function.Finally, we perform the calibration by calling the processor on the DataFrame:```processed_df = ca(df)``` Plot the calibration curves and bead localizationsJust like with the FiducialDriftCorrect processor, we can plot the individual localizations belonging to the bead we selected as a function of z, as well as the average smoothing spline.ca.astigmatismComputer.plotBeads()You should see a single window appear containing two plots. The top is a plot of the PSF width in x vs. z, and the bottom is a plot of the PSF width in y vs. z. The data points are the individual localizations and the curves are the two splines that fit to each trajectory. Modifying the bead fits Changing which beads are used in the average trajectoryLet's now rerun the processor. This time, select at least two regions containing beads. (I selected the same one as before and another at \\(x = 9.3 \, \mu m \\) and \\(y = 2.7 \, \mu m\\).processed_df = ca(df) ca.astigmatismComputer.plotBeads()Setting wobble fiting range to the match the astigmatism fit range. startz and stopz are set in the astigmatism computer. Performing spline fits... Performing spline fits...By selecting multiple beads, we tell B-Store to compute the average of the individually-fitted splines. This average spline is displayed in the plots as the solid, continuous curve plotted over the data ponts. If, for some reason, one of the beads was noisy or simply not good, then the average spline may not accurately represent the astigmatism present in the system. We can request that the `DefaultAstigmatismComputer` use only certain beads by setting its `useTrajectories` parameter.# Recompute the average spline without selecting beads first ca.interactiveSearch = False ca.astigmatismComputer.useTrajectories = [1] # Use only bead number 1 _ = ca(df) # underscore means don't bother capturing the output ca.astigmatismComputer.plotBeads()Setting wobble fiting range to the match the astigmatism fit range. startz and stopz are set in the astigmatism computer. Performing spline fits... Performing spline fits...Now the points belonging to bead number 0 will appear in gray; this indicates that they were not used in the fit. If you look closely, you will also see that the spline has changed very slightly and fits only the localizations belonging to bead number 1.If you decide that you really do want to use all the beads, we can indicate this by setting `useTrajectories` to the empty list (`[]`).ca.astigmatismComputer.useTrajectories = [] # Use all beads _ = ca(df) ca.astigmatismComputer.plotBeads()Setting wobble fiting range to the match the astigmatism fit range. startz and stopz are set in the astigmatism computer. Performing spline fits... Performing spline fits...Changing the fit rangeYou may also find that the full axial range in the data contains regions that are noisy or not well fit. We can select a smaller axial region to fit using the `startz` and `stopz` parameters of the `DefaultAstigmatismComputer`.ca.astigmatismComputer.startz = -300 ca.astigmatismComputer.stopz = 300 _ = ca(df) ca.astigmatismComputer.plotBeads()Setting wobble fiting range to the match the astigmatism fit range. startz and stopz are set in the astigmatism computer. Performing spline fits... Performing spline fits...You should now see gray x's corresponding to data points that are outside the fitting range. You should also see that the average spline now only covers the range \\( \left[ -300 \, \mu m, 300 \, \mu m \right] \\).You will also see a notice that startz and stopz parameters of the wobble computer were updated as well. Its startz and stopz parameters are always synchronized with the astigmatism computer to ensure that all fits are performed on the same range. Changing the spline smoothing parametersSimilarly, we can change the smoothing parameters of the cubic spline after we have already selected beads.ca.astigmatismComputer.reset() ca.astigmatismComputer.smoothingWindowSize = 50 ca.astigmatismComputer.smoothingFilterSize = 25 _ = ca(df) ca.astigmatismComputer.plotBeads() ca.astigmatismComputer.reset() _ = ca(df) ca.astigmatismComputer.plotBeads()Setting wobble fiting range to the match the astigmatism fit range. startz and stopz are set in the astigmatism computer. Performing spline fits... Performing spline fits...Adjust the wobble curvesWobble is the x- and y-position of the PSF centroid as a function of the axial position. The trajectories of the beads' centroid in x and y as a function z is a wobble curve and may be used to correct errors made by false assumptions about the aberrations present in the PSF. (See [Carlini et al., PLoS One 2015](http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0142949) for more information.)The wobble computer is tuned in much the same way as the astigmatism computer. If you don't want to correct for wobble, then simply ignore this feature.ca.wobbleComputer.plotBeads() ca.wobbleComputer.smoothingWindowSize = 10 ca.wobbleComputer.smoothingFilterSize = 2 # The following are locked to the value of startz and stopz from the # astigmatism computer and therefore do not do anything. ca.wobbleComputer.startz = -300 # Does nothing! ca.wobbleComputer.stopz = 300 # Does nothing! _ = ca(df) ca.wobbleComputer.plotBeads()Setting wobble fiting range to the match the astigmatism fit range. startz and stopz are set in the astigmatism computer. Performing spline fits... Performing spline fits...Using the calibrations to axially localize a datasetOnce calibrated, the `CalibrateAstigmatism` processor contains a property called `calibrationCurves` that holds the spline fits to \\( W_x \\) vs. \\(z\\) and \\( W_y \\) vs. z, where \\( W_x \\) and \\( W_y \\) are the PSF widths in x and y, respectively. These fits are functions, which means they that they accept a single number (the z-coordinate) as an input and produce the width in x and y as outputs.ca.calibrationCurvesWe can use these functions in B-Store's `ComputeZPosition` processor. To initialize the processor, we need to specify which functions to use and, optionally, the names of the columns containing the z-positions and PSF widths.cz = proc.ComputeZPosition(ca.calibrationCurves, zCol='z [nm]', sigmaCols=['sigma_x [nm]', 'sigma_y [nm]'])Though we didn't specify it above, the ComputeZPosition also accepts a parameter called `fittype` that takes one of two values: `diff` (the default value) and `huang`. `diff` computes the calibration curve by first sampling the two spline fits, taking their difference, and then reinterpolating to produce a monotonic calibration curve that transforms \\(W_x - W_y\\) into \\( z \\). In general, it is very fast. The `huang` method computes the z-position by minimizing an objective function related to the distance between the experimental and calibrated PSF widths. This method was used in the first astigmatic STORM paper [Huang, et al. Science 319, 810-813 (2008)](http://science.sciencemag.org/content/319/5864/810). Because each localization requires a call to an optimiztion method, it is much smaller.Having created the processor, let's now load a test dataset and localize it in z.pathToData = Path('../../bstore_test_files/processor_test_files/MT0.N1.LD-AS-Exp_Localizations.csv') with open(str(pathToData), 'r') as f: locs = pd.read_csv(f) locs.head()You will notice that we already specified the correct column names when creating the `ComputeZPosition` processor.Now, we simply pass these localizations to the processor and the z-position is computed automatically.locs_z = cz(locs) locs_z.head()Correcting wobbleTo also correct for wobble, we can specify a few extra parameters to the `ComputeZPosition` processor, including the wobble curves calculated by the `CalibrateAstigmatism` processor.ca.wobbleCurves cz = proc.ComputeZPosition(ca.calibrationCurves, zCol='z [nm]', coordCols=['x [nm]', 'y [nm]'], sigmaCols=['sigma_x [nm]', 'sigma_y [nm]'], wobbleFunc=ca.wobbleCurves) locs_z_wobble = cz(locs) locs_z_wobble.head()/home/douglass/anaconda3/envs/bstore/lib/python3.5/site-packages/scipy/interpolate/interpolate.py:612: RuntimeWarning: invalid value encountered in less below_bounds = x_new < self.x[0] /home/douglass/anaconda3/envs/bstore/lib/python3.5/site-packages/scipy/interpolate/interpolate.py:613: RuntimeWarning: invalid value encountered in greater above_bounds = x_new > self.x[-1]Now you see that a small offset has been applied to the **x [nm]** and **y [nm]** columns to correct these localizations for wobble. The value of the offset has been saved in the **dx** and **dy** columns. Modifying the trajectory-fitting algorithm*You may skip this section if you do not want to program your own astigmatism computer.*By default, B-Store uses a curve fitting algorithm based on a cubic smoothing spline with weights determined by a Gaussian filter. The algorithm is implemented in a class called `DefaultAstigmatismComputer` which uses the `ComputeTrajectories` interface. You can write your own astigmatism computer by inheriting this interface.import inspect print(inspect.getsource(proc.ComputeTrajectories))class ComputeTrajectories(metaclass=ABCMeta): """Basic functionality for computing trajectories from localizations. This is used to compute trajectories from regions of a dataset containing localizations, such as fiducial drift trajectories (position vs. frame number)or astigmatic calibration curves (PSF width vs. z). Attributes ---------- regionLocs : Pandas DataFrame The localizations for individual regions. """ def __init__(self): """Initializes the trajectory computer. """ self._regionData = None def _plotCurves(self, curveNumber=None, coordCols=['x', 'y'], horizontalLabels=['', 'time'], verticalLabels=['x', 'y'], title='trajectories', splineCols=['t','x','y'], offsets=[0,0], ylims=[-100, 500, -100, 500]): """Make a plot of each region's trajectory and the average spline fit. plotCurves allows the user to check the tr[...]The `ComputeTrajectories` interface provides a property and four methods:1. `regionLocs` contains a DataFrame with all of the localizations. It must have at least one index with the label 'region_id' that identifies which region the localizations came from.2. `clearRegionLocs()` removes the localization information that is held by the computer.3. `_plotCurves()` is the code used to plot the trajectories.4. `_movingAverage()` is the sliding window Gaussian filter used to weight the datapoints for the cubic smoothing spline.5. `reset()` resets the computer to its initial state.In addition, there is one abstract method called `computeTrajectory`. Any class that implements this interface must define a function with this name.As an example, the actual implementation of this interface by the `DefaultAstigmatismComputer` is printed below:print(inspect.getsource(proc.DefaultAstigmatismComputer.computeTrajectory))def computeTrajectory(self, locs): """Computes the final drift trajectory from fiducial localizations. Parameters ---------- locs : Pandas DataFrame DataFrame containing the localizations belonging to beads. Returns ------- avgSpline : Pandas DataFrame A dataframe containing z-positions and PSF widths in x- and y- for calibrating an astigmatic imaging measurement. """ z = self.zCol if self.startz: startz = self.startz else: startz = locs[z].min() if self.stopz: stopz = self.stopz else: stopz = locs[z].max() self.clearRegionLocs() self.regionLocs = locs self._removeOutliers(startz, stopz) self.fitCurves() self.combineCurves(startz, stopz) return self.avgSplineThe method returns the averaged splines for the PSF widths in each direction. This is a Pandas DataFrame with columns named `z`, `xS` and `yS`.# Print the first five values of the DataFrame returned by the drift computer ca.astigmatismComputer.avgSpline.head()Automated MLCOLAB = True if COLAB: !sudo apt-get install git-lfs && git lfs install !rm -rf dl-projects !git clone https://github.com/mengwangk/dl-projects #!cd dl-projects && ls -l --block-size=M if COLAB: !cp dl-projects/utils* . !cp dl-projects/preprocess* . %reload_ext autoreload %autoreload 2 %matplotlib inline import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import scipy.stats as ss import math import matplotlib from scipy import stats from collections import Counter from pathlib import Path plt.style.use('fivethirtyeight') sns.set(style="ticks") # Automated feature engineering import featuretools as ft # Machine learning from sklearn.pipeline import Pipeline from sklearn.preprocessing import Imputer, MinMaxScaler, StandardScaler from sklearn.impute import SimpleImputer from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score, precision_recall_curve, roc_curve, mean_squared_error, accuracy_score from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split, cross_val_score from sklearn.ensemble import RandomForestClassifier from IPython.display import display from utils import * from preprocess import * # The Answer to the Ultimate Question of Life, the Universe, and Everything. np.random.seed(42) %aimportModules to reload: all-except-skipped Modules to skip:Preparationif COLAB: from google.colab import drive drive.mount('/content/gdrive') GDRIVE_DATASET_FOLDER = Path('gdrive/My Drive/datasets/') if COLAB: DATASET_PATH = GDRIVE_DATASET_FOLDER ORIGIN_DATASET_PATH = Path('dl-projects/datasets') else: DATASET_PATH = Path("datasets") ORIGIN_DATASET_PATH = Path('datasets') DATASET = DATASET_PATH/"feature_matrix_2.csv" ORIGIN_DATASET = ORIGIN_DATASET_PATH/'4D.zip' if COLAB: !ls -l gdrive/"My Drive"/datasets/ --block-size=M !ls -l dl-projects/datasets --block-size=M data = pd.read_csv(DATASET, header=0, sep=',', quotechar='"', parse_dates=['time']) origin_data = format_tabular(ORIGIN_DATASET) data.info() RangeIndex: 699972 entries, 0 to 699971 Columns: 123 entries, index to year dtypes: bool(3), datetime64[ns](1), float64(36), int64(83) memory usage: 642.8 MBExploratory Data Analysisfeature_matrix = data feature_matrix.columns feature_matrix.head(4).T origin_data[origin_data['LuckyNo']==911].head(10) # feature_matrix.groupby('time')['COUNT(Results)'].mean().plot() # plt.title('Average Monthly Count of Results') # plt.ylabel('Strike Per Number')Feature Selectionfrom utils import feature_selection %load_ext autoreload %autoreload 2 feature_matrix_selection = feature_selection(feature_matrix.drop(columns = ['time', 'NumberId'])) feature_matrix_selection['time'] = feature_matrix['time'] feature_matrix_selection['NumberId'] = feature_matrix['NumberId'] feature_matrix_selection['Label'] = feature_matrix['Label'] feature_matrix_selection.columnsCorrelationsfeature_matrix_selection.shape corrs = feature_matrix_selection.corr().sort_values('TotalStrike') corrs['TotalStrike'].head() corrs['Label'].dropna().tail(8) corrs['TotalStrike'].dropna().tail(8)Visualization#pip install autoviz #from autoviz.AutoViz_Class import AutoViz_ClassXgBoostimport xgboost as xgb model = xgb.XGBClassifier() def predict_dt(dt, feature_matrix, return_probs = False): feature_matrix['date'] = feature_matrix['time'] # Subset labels test_labels = feature_matrix.loc[feature_matrix['date'] == dt, 'Label'] train_labels = feature_matrix.loc[feature_matrix['date'] < dt, 'Label'] print(f"Size of test labels {len(test_labels)}") print(f"Size of train labels {len(train_labels)}") # Features X_train = feature_matrix[feature_matrix['date'] < dt].drop(columns = ['NumberId', 'time', 'date', 'Label', 'TotalStrike', 'month', 'year', 'index'], errors='ignore') X_test = feature_matrix[feature_matrix['date'] == dt].drop(columns = ['NumberId', 'time', 'date', 'Label', 'TotalStrike', 'month', 'year', 'index'], errors='ignore') print(f"Size of X train {len(X_train)}") print(f"Size of X test {len(X_test)}") feature_names = list(X_train.columns) # Impute and scale features pipeline = Pipeline([('imputer', SimpleImputer(strategy = 'median')), ('scaler', MinMaxScaler())]) # Fit and transform training data X_train = pipeline.fit_transform(X_train) X_test = pipeline.transform(X_test) # Labels y_train = np.array(train_labels).reshape((-1, )) y_test = np.array(test_labels).reshape((-1, )) print('Training on {} observations.'.format(len(X_train))) print('Testing on {} observations.\n'.format(len(X_test))) # Train model.fit(X_train, y_train) # Make predictions predictions = model.predict(X_test) probs = model.predict_proba(X_test)[:, 1] # Total positive positive = np.where((predictions==1)) print('Total predicted to be positive: ', len(positive[0])) # Calculate metrics p = precision_score(y_test, predictions) r = recall_score(y_test, predictions) f = f1_score(y_test, predictions) auc = roc_auc_score(y_test, probs) a = accuracy_score(y_test, predictions) cm = confusion_matrix(y_test, predictions) print(f'Precision: {round(p, 5)}') print(f'Recall: {round(r, 5)}') print(f'F1 Score: {round(f, 5)}') print(f'ROC AUC: {round(auc, 5)}') print(f'Accuracy: {round(a, 5)}') #print('Probability') #print(len(probs), probs) # print('Probability >= Avg proba') # avg_p = np.average(probs) # print(f'Average probablity: {avg_p}') # hp = np.where((probs >= avg_p * 2) & (predictions==1) ) # print(len(hp[0]), probs[hp[0]], hp[0]) print('Confusion matrix') print(cm) # Total predicted matches print('Predicted matches') m = np.where((predictions==1)) print(len(m[0]), m) if len(positive[0]) > 0: # Matching draws print('Matched draws') m = np.where((predictions==1) & (y_test == 1)) print(len(m[0]), m) data = feature_matrix.loc[feature_matrix['date'] == dt] display(data.iloc[m[0]][ ['NumberId', 'Label', 'month', 'MODE(Results.PrizeType)_1stPrizeNo', 'MODE(Results.PrizeType)_2ndPrizeNo', 'MODE(Results.PrizeType)_3rdPrizeNo', 'MODE(Results.PrizeType)_ConsolationNo1', 'MODE(Results.PrizeType)_ConsolationNo10', 'MODE(Results.PrizeType)_ConsolationNo2', 'MODE(Results.PrizeType)_ConsolationNo3', 'MODE(Results.PrizeType)_ConsolationNo4', 'MODE(Results.PrizeType)_ConsolationNo5', 'MODE(Results.PrizeType)_ConsolationNo6', 'MODE(Results.PrizeType)_ConsolationNo7', 'MODE(Results.PrizeType)_ConsolationNo8', 'MODE(Results.PrizeType)_ConsolationNo9', 'MODE(Results.PrizeType)_SpecialNo1', 'MODE(Results.PrizeType)_SpecialNo10', 'MODE(Results.PrizeType)_SpecialNo2', 'MODE(Results.PrizeType)_SpecialNo3', 'MODE(Results.PrizeType)_SpecialNo4', 'MODE(Results.PrizeType)_SpecialNo5', 'MODE(Results.PrizeType)_SpecialNo6', 'MODE(Results.PrizeType)_SpecialNo7', 'MODE(Results.PrizeType)_SpecialNo8', 'MODE(Results.PrizeType)_SpecialNo9']].T) else: print('No luck this month') # Feature importances fi = pd.DataFrame({'feature': feature_names, 'importance': model.feature_importances_}) if return_probs: return fi, probs return fi # All the months len(feature_matrix_selection['time'].unique()), feature_matrix_selection['time'].unique()Prediction by monthsfrom utils import plot_feature_importances %time oct_2018 = predict_dt(pd.datetime(2018,10,1), feature_matrix_selection) norm_oct_2018_fi = plot_feature_importances(oct_2018) %time may_2019 = predict_dt(pd.datetime(2019,5,1), feature_matrix_selection) norm_may_2019_fi = plot_feature_importances(may_2019) %time june_2019 = predict_dt(pd.datetime(2019,6,1), feature_matrix_selection) norm_june_2019_fi = plot_feature_importances(june_2019) %time july_2019 = predict_dt(pd.datetime(2019,7,1), feature_matrix_selection) norm_july_2019_fi = plot_feature_importances(july_2019) %time aug_2019 = predict_dt(pd.datetime(2019,8,1), feature_matrix_selection) norm_aug_2019_fi = plot_feature_importances(aug_2019) %time oct_2019 = predict_dt(pd.datetime(2019,10,1), feature_matrix_selection) norm_oct_2019_fi = plot_feature_importances(oct_2019) %time sep_2019 = predict_dt(pd.datetime(2019,9,1), feature_matrix_selection)Size of test labels 10000 Size of train labels 679972 Size of X train 679972 Size of X test 10000 Training on 679972 observations. Testing on 10000 observations. Total predicted to be positive: 0 Precision: 0.0 Recall: 0.0 F1 Score: 0.0 ROC AUC: 0.54424 Accuracy: 0.9684 Confusion matrix [[9684 0] [ 316 0]] Predicted matches 0 (array([], dtype=int64),) No luck this month CPU times: user 2min 12s, sys: 499 ms, total: 2min 13s Wall time: 2min 13sTuning - GridSearchCV Check Raw Dataorigin_data.tail(10) origin_data[(origin_data['DrawDate'].dt.year == 2019) & (origin_data['DrawDate'].dt.month == 6)]['DrawNo'].nunique() origin_data[(origin_data['DrawDate'].dt.year == 2019) & (origin_data['DrawDate'].dt.month == 10)]['DrawNo'].nunique() print(15 * 45 + 14 * 45)1305Testingimport numpy as np import pandas as pd data = [['no_1', 1], ['no_2', 2], ['no_3', 3], ['no_4', 4], ['no_5', 5], ['no_6', 6], ['no_7', 7]] # Create the pandas DataFrame df = pd.DataFrame(data, columns = ['Name', 'Age']) a = np.array([0,0,0,1,0,1, 1]) b = np.array([0,0,0,1,0,0, 1]) print(len(a)) m = np.where((a==1) & (b ==1)) print(len(m[0]), m[0], a[m[0]]) print(df.iloc[m[0]]) probs = np.array([0.03399902, 0.03295987, 0.03078781, 0.04921166, 0.03662422, 0.03233755]) print(np.average(probs)) mydict = [{'a': 1, 'b': 2, 'c': 3, 'd': 4}, {'a': 100, 'b': 200, 'c': 300, 'd': 400}, {'a': 1000, 'b': 2000, 'c': 3000, 'd': 4000 }] df = pd.DataFrame(mydict) df.iloc[[0]][['a','b']]Practice 1In this notebook the challenge is to use Pandas, and appropriate ancillary modules, to load in data on passengers from the Titanic. The dataset, found on a [public repository](https://towardsdatascience.com/all-the-datasets-you-need-to-practice-data-science-skills-and-make-a-great-portfolio-857a348883b5), has these columns: - PassengerId - Survived - P-class - Name - Sex - Age - SibSp - Parch - Ticket - Fare - Cabin - Embarked. You are given no context on any aspects of the data. Your challenge is to use Pandas to explore the dataset and produce at least two outputs: - 1) Histograms on the age distribution for male and females in each class. Try to make sure each x axis has the same range. - 2) Investigate features [class, age, sex] on those who survived and those who didnt. The relevant class notebooks which may help here are Practical sessions 5,6,7 and 8.import pandas as pd # Read data from file # We are going to use the function 'read_csv' within the Pandas package: if 'google.colab' in str(get_ipython()): data = pd.read_csv('https://raw.githubusercontent.com/loftytopping/DEES_programming_course/master/extra_activities/data/titanic_data.csv') data.head() else: data = pd.read_csv("data/titanic_data.csv") data.head() # The following command removes any entries that are not numbers, or NaNs [Not-A-Number] data=data.dropna(subset=['Age']) # Lets take a preview of the dataframe we have created data.head() # Lets see how many entries we have data.size1) - Histograms on the age and sex distribution in each class# Befoore I create histograms for each class, I need to know how many classes there are. We can use the value_counts() function for that. data['Pclass'].value_counts() # Now produce histograms. Please take a look at the course practical notebooks if anything isnt clear. import matplotlib.pyplot as plt import numpy as np # Create a canvas for our plot fig, axs = plt.subplots(3, 2, figsize=(12, 8), sharey=False) # First class data[(data['Pclass']==1) & (data['Sex']=='male')].hist(column='Age',ax=axs[0,0]) axs[0,0].set_title('First class [male]') data[(data['Pclass']==1) & (data['Sex']=='female')].hist(column='Age',ax=axs[0,1]) axs[0,1].set_title('First class [female]') # Second class data[(data['Pclass']==2) & (data['Sex']=='male')].hist(column='Age',ax=axs[1,0]) axs[1,0].set_title('Second class [male]') data[(data['Pclass']==2) & (data['Sex']=='female')].hist(column='Age',ax=axs[1,1]) axs[1,1].set_title('Second class [female]') # Third class data[(data['Pclass']==3) & (data['Sex']=='male')].hist(column='Age',ax=axs[2,0]) axs[2,0].set_title('Third class [male]') data[(data['Pclass']==3) & (data['Sex']=='female')].hist(column='Age',ax=axs[2,1]) axs[2,1].set_title('Third class [female]') # Im also going to set the maximum and minimum limits on each x axis to be the same. # to do this Im going to work out these numbers from the age column min_age = np.min(data['Age'].values) max_age = np.max(data['Age'].values) axs[0,0].set_xlim([min_age, max_age]) axs[0,1].set_xlim([min_age, max_age]) axs[1,0].set_xlim([min_age, max_age]) axs[1,1].set_xlim([min_age, max_age]) axs[2,0].set_xlim([min_age, max_age]) axs[2,1].set_xlim([min_age, max_age])2) Investigate features [class, age, sex] on those who survived and those who didnt.I can find which column metric I need to use to define a passenger who survived. But how we do we produce a histogram for categorical variables. Specifically, we want to visualise the distribution of people in each class and also their sex. We can use a histogram for age, but we will use a bar plot for both class and sex. The code to produce a bar plot looks a little different from a histogram. You may replicate the following example, changing the reference to column name and axis where appropriate:data[data['Survived']==1]['Pclass'].value_counts().plot(kind='bar',ax=axs[0,1])# Create a canvas for our plot fig, axs = plt.subplots(2, 3, figsize=(12, 8), sharey=False) # Survived data[data['Survived']==1].hist(column='Age',ax=axs[0,0]) axs[0,0].set_title('Age [survived]') data[data['Survived']==1]['Pclass'].value_counts().plot(kind='bar',ax=axs[0,1]) axs[0,1].set_title('Class [Survived]') data[data['Survived']==1]['Sex'].value_counts().plot(kind='bar',ax=axs[0,2]) axs[0,2].set_title('Sex [Survived]') # Didnt survice data[data['Survived']==0].hist(column='Age',ax=axs[1,0]) axs[1,0].set_title('Age [did not survive]') data[data['Survived']==0]['Pclass'].value_counts().plot(kind='bar',ax=axs[1,1]) axs[1,1].set_title('Class [did not survive]') data[data['Survived']==0]['Sex'].value_counts().plot(kind='bar',ax=axs[1,2]) axs[1,2].set_title('Sex [did not survive]')Replace atom symbols with atom numbers, so RDKit does not raise errors when creating Mol from the SMARTSold_smarts = [] new_smarts = [] for line in lines: smarts = line.strip().split("\t")[1] old_smarts.append(smarts) new = smarts.replace("Cl", "[#17]").replace("Mg", "[#12]").replace("Al", "[#13]") new = new.replace("Li", "[#3]").replace("Si", "[#14]").replace("Br", "[#35]") new = new.replace("Se", "[#34]").replace("[As]", "[#33]").replace("As", "[#33]").replace("Na", "[#11]") new = new.replace("C", "[#6]").replace("N", "[#7]").replace("O", "[#8]") new = new.replace("B", "[#5]").replace("H", "[#1]").replace("K", "[#19]") new_smarts.append(new)Atom pairs are not sensitive to the bonds between themfor i in range(64): new_smarts[i] = new_smarts[i].replace("-", "~")The section 5 and section 6 single and double bonds of PubChem fingerprint also matches aromatic bondsfor i in range(153, 450): new_smarts[i] = new_smarts[i].replace("-", "-,:") new_smarts[i] = new_smarts[i].replace("=", "=,:")Check if all the SMARTS are convertable to RDKit Mol objectfor i, smart in enumerate(new_smarts): patt = Chem.MolFromSmarts(smart) if patt is None: print(i, smart)Write the original and transformed SMARTS to pandas dataframedata = { "PubChem_pattern": old_smarts, "SMARTS": new_smarts } df = pd.DataFrame(data) df.head(10)Create images for the SMARTSimages = [] for smart in df["SMARTS"]: patt = Chem.MolFromSmarts(smart) img = Chem.Draw.MolToImage(patt) images.append(img)Save images to disk and log their pathsimage_paths = [] os.makedirs(os.path.join("..", "data", "pubchemfp_pattern_images"), exist_ok=True) for i, img in enumerate(images): save_path = os.path.join("..", "data", "pubchemfp_pattern_images", f"{i}.png") img.save(save_path) image_paths.append(save_path)Display images by converting the dataframe to HTML with paths to the imagesdef path_to_image_html(path): return f'' df["images"] = image_paths HTML(df.to_html(escape=False ,formatters=dict(images=path_to_image_html)))Save the dataframe to data/ directorydf.to_csv(os.path.join("..", "data", "pubchemFPKeys_to_SMARTSpattern.csv"))The class to create fragment labels for each atom of a moleculeclass MolFragmentsLabel: """ Label atoms in a molecule with the fragments they belong to. The fragment library is built from PubChem fingerprint section 3 to section 7. The labels are fingerprint like vectors for each atom of the molecule. Args: ref_file (str): path to the reference file (csv format) that contains the SMARTS strings to match molecular fragments. """ ref_smarts = None def __init__(self, ref_file=None): if ref_file is None: self.ref_file = os.path.join("data", "pubchemFPKeys_to_SMARTSpattern.csv") else: self.ref_file = ref_file if MolFragmentsLabel.ref_smarts is None: self._build_ref(self.ref_file) @classmethod def _build_ref(cls, ref_file): df = pd.read_csv(ref_file) cls.ref_smarts = [Chem.MolFromSmarts(smarts) for smarts in df["SMARTS"]] def create_labels_for(self, mol, sparse=True): """ Create fragment labels for a molecule: Args: mol (SMILES str or RDKit Mol object): the molecule to create labels for. sparse (bool): return the matrix in sparse format. Default: True. """ if isinstance(mol, str): mol = Chem.MolFromSmiles(mol) if mol is None: raise ValueError(f"{mol} is not a valid SMILES string.") # add hydrogens to the molecule mol = Chem.AddHs(mol) # initiate the vectors labels = np.zeros((len(self.ref_smarts), mol.GetNumAtoms()), dtype=np.int) # search for the fragments in the molecule for i, pattern in enumerate(self.ref_smarts): mat_substructs = mol.GetSubstructMatches(pattern) # convert tuple of tuples to a set mat_atoms = set() for atoms in mat_substructs: mat_atoms = mat_atoms.union(set(atoms)) mat_atoms = list(mat_atoms) labels[i, mat_atoms] = 1 if sparse: labels = scipy.sparse.coo_matrix(labels) return labelsTest the classmfl = MolFragmentsLabel(ref_file=os.path.join("..", "data", "pubchemFPKeys_to_SMARTSpattern.csv")) %time labels = mfl.create_labels_for("Cl-c1cccc(C)c1", sparse=True) print(labels.todense()[[20, 21, 31, 69, 70, 79, 81, 92, 93]]) for i in range(labels.shape[0]): if labels.getrow(i).sum() > 0: print(i, end=", ")20, 21, 31, 69, 70, 79, 81, 92, 93, 99, 107, 108, 111, 121, 153, 167, 171, 178, 183, 207, 227, 238, 240, 253, 257, 261, 287, 289, 293, 301, 307, 315, 319, 321, 328, 332, 335, 336, 340, 345, 355, 371, 377, 397, 401, 405, 414, 415, 416, 425, 445, 446, 447, 475, 538,The real PubChem fingerprintpubchemfp = All_Fingerprint.cdk_fingerprint("Cl-c1cccc(C)c1", fp_type="pubchem") reduced_fp = [d - 263 for d in pubchemfp] print(reduced_fp[6:]) print("="*40) print("The fingerprint matches to the labels I generated very well.")[20, 21, 31, 69, 70, 79, 81, 92, 93, 99, 107, 108, 111, 121, 153, 167, 171, 178, 183, 207, 227, 238, 240, 253, 257, 261, 287, 289, 293, 301, 307, 315, 319, 321, 328, 332, 335, 336, 340, 345, 355, 371, 377, 397, 401, 405, 414, 415, 416, 425, 445, 446, 447, 475, 538] ======================================== The fingerprint matches to the labels I generated very well.Another testlabels = mfl.create_labels_for("CC(=O)OC1=CC=CC=C1C(=O)O", sparse=True) print(labels.todense()[[20, 21, 23, 45, 69, 70, 78, 81, 89, 92, 93]]) for i in range(labels.shape[0]): if labels.getrow(i).sum() > 0: print(i, end=", ") pubchemfp = All_Fingerprint.cdk_fingerprint("CC(=O)OC1=CC=CC=C1C(=O)O", fp_type="pubchem") reduced_fp = [d - 263 for d in pubchemfp] print(reduced_fp[9:]) print("="*40) print("The fingerprint matches to the labels I generated very well again.")[20, 21, 23, 45, 69, 70, 78, 81, 89, 92, 93, 107, 108, 111, 117, 118, 119, 121, 142, 143, 153, 157, 167, 171, 177, 178, 180, 183, 189, 207, 213, 227, 230, 235, 253, 257, 261, 272, 278, 279, 285, 289, 290, 293, 301, 302, 307, 310, 311, 312, 315, 316, 318, 319, 321, 326, 331, 332, 334, 336, 340, 341, 343, 345, 351, 354, 355, 356, 357, 359, 360, 362, 363, 369, 371, 374, 376, 377, 378, 379, 388, 392, 397, 401, 403, 404, 405, 408, 409, 414, 415, 416, 417, 421, 425, 426, 429, 435, 441, 445, 446, 447, 493, 556] ======================================== The fingerprint matches to the labels I generated very well again.Demonstration of AUC of ROC%matplotlib notebook import matplotlib.pyplot as plt import pandas as pd import seaborn as sns from sklearn.linear_model import LogisticRegression from sklearn.metrics import roc_curve sns.set(font_scale=1.5) sns.set_style("whitegrid", {'grid.linestyle':'--'})Here we are going to use the breast cancer data to build a simple logistic regression modelcancer_data = pd.read_csv("https://raw.githubusercontent.com/changyaochen/MECE4520/master/lectures/lecture_4/breast_cancer_data.csv") cancer_data["label"] = cancer_data["diagnosis"].apply(lambda x: 0 if x == "B" else 1) cancer_data.head() # fit the logistic regression model lr_model = LogisticRegression(penalty="none", random_state=42) lr_model.fit( X=cancer_data[["radius_mean"]], y=cancer_data["label"], ) # make prediction on the training dataset prediction_results = pd.DataFrame( data={ "probability": lr_model.predict_proba(cancer_data[["radius_mean"]])[:, 1], "label": cancer_data["label"], } ) # take a look some randome samples prediction_results.sample(10, random_state=123).sort_values("probability") # calculate the values needed for the auc of roc fpr, tpr, thresholds = roc_curve( y_true=prediction_results["label"], y_score=prediction_results["probability"], ) roc_data = pd.DataFrame( data={ "fpr": fpr, "tpr": tpr, "thresholds": thresholds, }, ) roc_data.tail() # make plot plt.figure() # plot the ROC sns.lineplot(x=fpr, y=tpr) # plot the diagonal line sns.lineplot(x=[0, 1], y=[0, 1], linestyle="--") plt.gca().set_aspect("equal") plt.gca().set_xlabel("False Positive Rate") plt.gca().set_ylabel("True Positive Rate") plt.gca().set_xlim([-.1, 1.1]) plt.gca().set_ylim([-.1, 1.1]) plt.tight_layout() plt.show()Analysis of ResultsNote, this notebook requires the file `H2_exp_output.json` (which is created in the `2_efficient_graph.ipynb` notebook) to exist.Here we analyse some of the results from the DASK run for the H2 experiment.import matplotlib.pyplot as plt import pandas as pd import numpy as np import qpfasLoad `json` fileexp_df = pd.read_json ("H2_exp_output.json") exp_df.head()Compute VQE Errorvqe_energies = np.array([i["converged_energy"] for i in exp_df["vqe_output"].values]) exp_df["vqe_error_abs"] = np.abs(vqe_energies - exp_df["fci_energy"]) exp_df["hf_error"] = exp_df["hf_energy"] - exp_df["fci_energy"]Explore Data%matplotlib notebook plt.figure(figsize=(10, 6)) exp_vars = [f"{i['transformation']}\n{i['optimizer']}" for _, i in exp_df.iterrows()] exp_df["vqe_error_abs"].plot(marker='o', label='VQE') plt.xticks(np.arange(len(exp_vars)), exp_vars) plt.ylabel("Error (H)") plt.yscale("log") plt.axhline(y=exp_df["hf_error"].values[0], c="k", ls='dashed', label='HF') plt.legend() plt.tight_layout() plt.show()Graph ReconstructionWe can resconstruct the orginal dask dag from the resultsgraph = qpfas.workflow.reconstruct_graph(exp_df["node_history"].values) qpfas.workflow.DaskDAG.visualize_dag(graph)Part 1import pandas as pd df = pd.read_csv('input.txt', header=None, names=["depth"]) df["dif"] = df.diff() sum(df.dif>0)Part 2w3 = df.rolling(window=3).sum() w3d = w3.diff() sum(w3d.depth>0)**Exercise 12.1** The linear model I used in this chapter has the obvious drawback that it is linear, and there is no reason to expect prices to change linearlyover time. We can add flexibility to the model by adding a quadratic term,as we did in Section 11.3.Use a quadratic model to fit the time series of daily prices, and use the modelto generate predictions. You will have to write a version of RunLinearModelthat runs that quadratic model# Solution import statsmodels.formula.api as smf def RunQuadraticModel(daily): """Runs a linear model of prices versus years. daily: DataFrame of daily prices returns: model, results """ daily['years2'] = daily.years**2 model = smf.ols('ppg ~ years + years2', data=daily) results = model.fit() return model, results #The following function takes a DataFrame of transactions and compute daily averages. def GroupByDay(transactions, func=np.mean): """Groups transactions by day and compute the daily mean ppg. transactions: DataFrame of transactions returns: DataFrame of daily prices """ grouped = transactions[['date', 'ppg']].groupby('date') daily = grouped.aggregate(func) daily['date'] = daily.index start = daily.date[0] one_year = np.timedelta64(1, 'Y') daily['years'] = (daily.date - start) / one_year return daily #The following function returns a map from quality name to a DataFrame of daily averages. def GroupByQualityAndDay(transactions): """Divides transactions by quality and computes mean daily price. transaction: DataFrame of transactions returns: map from quality to time series of ppg """ groups = transactions.groupby('quality') dailies = {} for name, group in groups: dailies[name] = GroupByDay(group) return dailies def PlotFittedValues(model, results, label=''): """Plots original data and fitted values. model: StatsModel model object results: StatsModel results object """ years = model.exog[:,1] values = model.endog thinkplot.Scatter(years, values, s=15, label=label) thinkplot.Plot(years, results.fittedvalues, label='model', color='#ff7f00') def RunLinearModel(daily): model = smf.ols('ppg ~ years', data=daily) results = model.fit() return model, results def PlotPredictions(daily, years, iters=101, percent=90, func=RunLinearModel): """Plots predictions. daily: DataFrame of daily prices years: sequence of times (in years) to make predictions for iters: number of simulations percent: what percentile range to show func: function that fits a model to the data """ result_seq = SimulateResults(daily, iters=iters, func=func) p = (100 - percent) / 2 percents = p, 100-p predict_seq = GeneratePredictions(result_seq, years, add_resid=True) low, high = thinkstats2.PercentileRows(predict_seq, percents) thinkplot.FillBetween(years, low, high, alpha=0.3, color='gray') predict_seq = GeneratePredictions(result_seq, years, add_resid=False) low, high = thinkstats2.PercentileRows(predict_seq, percents) thinkplot.FillBetween(years, low, high, alpha=0.5, color='gray') #`dailies` is the map from quality name to DataFrame. transactions = pd.read_csv('mj-clean.csv', parse_dates=[5]) dailies = GroupByQualityAndDay(transactions) # Solution name = 'high' daily = dailies[name] model, results = RunQuadraticModel(daily) results.summary() # Solution PlotFittedValues(model, results, label=name) thinkplot.Config(title='Fitted values', xlabel='Years', xlim=[-0.1, 3.8], ylabel='price per gram ($)') # Solution years = np.linspace(0, 5, 101) thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name) PlotPredictions(daily, years, func=RunQuadraticModel) thinkplot.Config(title='predictions', xlabel='Years', xlim=[years[0]-0.1, years[-1]+0.1], ylabel='Price per gram ($)')**Exercise 12.2** Write a definition for a class named SerialCorrelationTestthat extends HypothesisTest from Section 9.2. It should take a series anda lag as data, compute the serial correlation of the series with the given lag,and then compute the p-value of the observed correlation.Use this class to test whether the serial correlation in raw price data isstatistically significant. Also test the residuals of the linear model and (ifyou did the previous exercise), the quadratic model.# Solution class SerialCorrelationTest(thinkstats2.HypothesisTest): """Tests serial correlations by permutation.""" def TestStatistic(self, data): """Computes the test statistic. data: tuple of xs and ys """ series, lag = data test_stat = abs(SerialCorr(series, lag)) return test_stat def RunModel(self): """Run the model of the null hypothesis. returns: simulated data """ series, lag = self.data permutation = series.reindex(np.random.permutation(series.index)) return permutation, lag #The following function computes serial correlation with the given lag. def SerialCorr(series, lag=1): xs = series[lag:] ys = series.shift(lag)[lag:] corr = thinkstats2.Corr(xs, ys) return corr # Solution # test the correlation between consecutive prices name = 'high' daily = dailies[name] series = daily.ppg test = SerialCorrelationTest((series, 1)) pvalue = test.PValue() print(test.actual, pvalue) # Solution # test for serial correlation in residuals of the linear model _, results = RunLinearModel(daily) series = results.resid test = SerialCorrelationTest((series, 1)) pvalue = test.PValue() print(test.actual, pvalue) # Solution # test for serial correlation in residuals of the quadratic model _, results = RunQuadraticModel(daily) series = results.resid test = SerialCorrelationTest((series, 1)) pvalue = test.PValue() print(test.actual, pvalue)0.05607308161289916 0.045New Object Hierarchyimport numpy as np import functools as fnc import pprint as ppMagmaclass Magma: def __init__(self, elems, tbl): self.__elements = elems self.__table = np.array(tbl) def __contains__(self, element): return element in self.__elements def __getitem__(self, index): return self.__elements[index] @property def elements(self): return self.__elements def set_elements(self, new_elements): if isinstance(new_elements, list): self.__elements = new_elements elif isinstance(new_elements, dict): self.__elements = [new_elements[elem] for elem in self.__elements] return self @property def table(self): return self.__table def op(self, *args): if len(args) == 1: if args[0] in self.__elements: return args[0] else: raise ValueError(f"{args[0]} is not a valid element name") elif len(args) == 2: row = self.__elements.index(args[0]) col = self.__elements.index(args[1]) index = self.__table[row, col] return self.__elements[index] else: return fnc.reduce(lambda a, b: self.op(a, b), args) def __repr__(self): return f"{self.__class__.__name__}(\n{self.__elements}, \n{self.__table})" def table_with_names(self): return [[self.__elements[index] for index in row] for row in self.__table]Magma Examples Rock-Paper-Scisors Magma This magma is obviously commutative, but not associative.See https://en.wikipedia.org/wiki/Commutative_magma* $M = \langle \{r,p,s\}, \cdot \rangle$* For all $x, y \in M$, if $x$ *beats* $y$, then $x \cdot y = y \cdot x = x$* Also, for all $x \in M$, $xx = x$rps = Magma(['r', 'p', 's'], [[0, 1, 0], [1, 1, 2], [0, 2, 2]]) rpsThe following demonstrates that the rps magma is non-associative:ps = rps.op('p', 's') rp = rps.op('r', 'p') r_ps = rps.op('r', ps) rp_s = rps.op(rp, 's') print(f" r(ps) = r{ps} = {r_ps}, \nbut (rp)s = {rp}s = {rp_s}")r(ps) = rs = r, but (rp)s = ps = sFor other magma examples, [see this discussion](https://math.stackexchange.com/questions/779507/can-you-give-me-some-concrete-examples-of-magmas). Also, [see this paper on groupiods](https://arxiv.org/ftp/math/papers/0304/0304490.pdf). Smarandache Groupoid This is Example 1.4.1 in the paper referenced, above.ex141_tbl = [[0, 3, 0, 3, 0, 3], [1, 4, 1, 4, 1, 4], [2, 5, 2, 5, 2, 5], [3, 0, 3, 0, 3, 0], [4, 1, 4, 1, 4, 1], [5, 2, 5, 2, 5, 2]] ex141_magma = Magma(['a', 'b', 'c', 'd', 'e', 'f'], ex141_tbl) ex141_magmaTesting Magma Methods Table and Element Accessorsrps.table rps.elements rps.table_with_names()Magma as an Iterator and Container of Elements[el for el in rps] 'r' in rpsReplacing ("Setting") Element Namesfull_names = ['rock', 'paper', 'scissors'] rps.set_elements(full_names) orig_elems = ['r', 'p', 's'] mapping = dict(zip(rps.elements, orig_elems)) print(mapping) rps.set_elements(orig_elems){'rock': 'r', 'paper': 'p', 'scissors': 's'}Table Utilitiesdef is_table_associative(table): result = True indices = range(len(table)) # [0, 1, 2, ..., n-1] for a in indices: for b in indices: for c in indices: ab = table[a][b] bc = table[b][c] if not (table[ab][c] == table[a][bc]): result = False break return result def is_table_commutative(table): result = True indices = range(len(table)) # [0, 1, 2, ..., n-1] for a in indices: for b in indices: if table[a][b] != table[b][a]: result = False break return result def table_has_identity(table): indices = range(len(table)) id = None for x in indices: if all(table[x][y] == y for y in indices): id = x break return idTesting Table Utilities# not assoc; is comm; no identity -- the RPS magma table, above tbl1 = [[0, 1, 0], [1, 1, 2], [0, 2, 2]] # is assoc; not comm; has identity (0) --- the S3 group table tbl2 = [[0, 1, 2, 3, 4, 5], [1, 2, 0, 5, 3, 4], [2, 0, 1, 4, 5, 3], [3, 4, 5, 0, 1, 2], [4, 5, 3, 2, 0, 1], [5, 3, 4, 1, 2, 0]] # is assoc; is comm; has identity (0) --- the Z4 group table tbl3 = [[0, 1, 2, 3], [1, 2, 3, 0], [2, 3, 0, 1], [3, 0, 1, 2]] # powerset(3) group table tbl4 = [[0, 1, 2, 3, 4, 5, 6, 7], [1, 0, 4, 5, 2, 3, 7, 6], [2, 4, 0, 6, 1, 7, 3, 5], [3, 5, 6, 0, 7, 1, 2, 4], [4, 2, 1, 7, 0, 6, 5, 3], [5, 3, 7, 1, 6, 0, 4, 2], [6, 7, 3, 2, 5, 4, 0, 1], [7, 6, 5, 4, 3, 2, 1, 0]] tbl5 = ex141_tbl # Defined in magma section, above test_tables = [tbl1, tbl2, tbl3, tbl4, tbl5] print(" Table Associative? Commutative? Identity?") print('-'*55) for tbl in test_tables: i = test_tables.index(tbl) + 1 is_assoc = str(is_table_associative(tbl)) is_comm = str(is_table_commutative(tbl)) tbl_id = str(table_has_identity(tbl)) print(f"{i :>{6}} {is_assoc :>{14}} {is_comm :>{12}} {tbl_id :>{12}}")Table Associative? Commutative? Identity? ------------------------------------------------------- 1 False True None 2 True False 0 3 True True 0 4 True True 0 5 True False NoneSemigroup A semigroup is an associative magma.is_table_associative(rps.table) class Semigroup(Magma): def __init__(self, elems, tbl): if is_table_associative(tbl): super().__init__(elems, tbl) else: raise ValueError("Table does not support associativity")The Semigroup constructor will fail if the table does not support associativity:try: Semigroup(['r', 'p', 's'], [[0, 1, 0], [1, 1, 2], [0, 2, 2]]) except: print("Something went wrong")Something went wrongSee p. 67 in Pinter for a possible example Monoid A monoid is a semigroup with an identity element.class Monoid(Semigroup): def __init__(self, elems, tbl): self.identity = has_identity(tbl) if self.identity: super().__init__(elems, tbl) else: raise ValueError("Table has no identity element")Hiukkaset hukassa Aalto Junior x CMS open dataTervetuloa kiehtovan ja kummallisen hiukkasfysiikan pariin. Tämä tiedosto on oppilaan versio, joka sisältää tehtäviä ja demoja CMS:n avoimen datan parissa. Vastatkaa kaikkiin $\color{blue}{\text{kysymyksiin}}$ ja pitäkää hauskaa datan kanssa! AlustusAlla on koodisolu joka ajettaessa hakee tarvittavat python paketit. Pandas on data-analyysiä varten, numpy tieteellistä laskentaa ja matplotlib.pyplot mahdollistaa kuvaajien piirtämisen. Annetaan näille lyhyemmät nimet (pd, np ja plt), jotta myöhemmin niitä käytettäessä ei tarvitse kirjoittaa koko paketin nimeä.import pandas as pd import matplotlib.pyplot as plt import numpy as np %matplotlib inline #lataa koodin ja sitten ajaa sen: !curl -O https://raw.githubusercontent.com/cms-opendata-education/cms-jupyter-materials-finnish/master/Data/Apufunktiot/hiukkasethukassa.ipynb %run hiukkasethukassa.ipynb #%run ./../../Apufunktiot/hiukkasethukassa.ipynb% Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 7616 100 7616 0 0 21698 0 --:--:-- --:--:-- --:--:-- 21698**** Hiukkasfysiikkaa - Maailmankaikkeuden pienimpiä rakennuspalikoita - Niin pieniä, ettei niitä aina voi edes havaita suoraan - Tarvitaan uusia työkaluja ja teorioita selittämään hiukkasten maailmaa - Korkeita energioita - Valtavasti dataa Ote tieteen kuvalehden artikkelista kvanttimekaniikka viidessä minuutissa :> Hiukkasten mittaamista vaikeuttaa se, että tietyt ominaisuudet muuttavat luonnettaan juuri sillä hetkellä, kun niitä aletaan mitata. Niels Bohrin mukaan tällöin ei enää voida määrittää hiukkasten paikkaa ja liikettä, koska käsitteillä ei enää ole merkitystä.Yksi esimerkki paradoksista on niin sanottu Schrödingerin kissa.[[1](tk)] Katsokaa ensimmäinen minuutti videosta. [[2](tedvideo)]from IPython.lib.display import YouTubeVideo YouTubeVideo('UjaAxUO6-Uw', width=1000, height=600) #https://youtu.be/UjaAxUO6-Uw>Tämä ei tunnu järkevältä, jos asiaa tarkastellaan klassisen fysiikan näkökulmasta. Niiden mukaan kissa ei voi olla yhtaikaa kuollut ja elävä – vaan ainoastaan jompaakumpaa.Suuri osa kvanttimekaniikasta on kuitenkin Schrödingerin kissa -ajatusleikin kaltaisia kahden eri tilan yhdistelmiä. [[1](tk)] Stadardimalli $\color{blue}{\text{Mitä tarkoittaa malli?}}$ Malli on yksinkertaistettu kuvaus todellisuudesta. Fysiikan mallit rakennetaan teorian ja kokeellisen tutkimuksen yhteistyönä. Teoreetikot rakentavat malleja joita sitten testataan kokeellisesti. __Stadardimalli__ on matemaattinen malli joka kuvaa kaikkia tunnettuja hiukkasia sekä heikon-, vahvan- ja sähkömagneettisen vuorovaikutuksen. Standardimallissa on mukana 12 alkeishiukkasta, neljä voimaa välittävää hiukkasta sekä Higgsin hiukkanen. Lisätietoa [1] Ote Tieteen Kuvalehden artikkelista "kvanttimekaniikka viidessä minuutissa" Alkeishiukkaset ovat ne fysikaaliset rakennuskivet, joista kaikki universumin atomit koostuvat. Niitä on kahtatoista eri tyyppiä, mutta luonnollisissa oloissa niistä esiintyy nykyään vain neljää – elektroneja, elektronin neutriinoja, ylöskvarkkeja ja alaskvarkkeja. Muita oli luonnossa vain heti alkuräjähdyksen jälkeen, mutta sittemmin niitä on luotu uudelleen hiukkaskiihdyttimissä.**Voimia välittävät hiukkaset** pitävät rakennusosaset kasassa. Ne välittävät atomeihin neljää luonnon perusvoimaa: FotonitFotonit ovat massattomia valohiukkasia, jotka välittävät sähkömagneettista voimaa. GluonitGluonit liimaavat kvarkit yhteen alkeishiukkasissa ja välittävät vahvaa ydinvoimaa.**W**-ja **Z**-hiukkaset W- ja Z-hiukkaset eli **-bosonit** välittävät heikkoa ydinvoimaa. Ne liittyvät tietynlaisiin radioaktiivisuuden muotoihin.Gravitoni Gravitonien arvellaan välittävän painovoimaa. Hiukkasten olemassaoloa ei ole vielä onnistuttu todistamaan, mutta niitä etsitään kiivaasti CERNissä Sveitsissä.Lisäksi on olemassa **Higgsin hiukkanen**. Se löytyi 99,99 prosentin todennäköisyydellä vuonna 2012. Hiukkanen antaa atomien rakennusosasille massan. Kvarkit kiinnittyvät voimakkaammin Higgsin hiukkasiin, ja siksi ne ovat raskaampia kuin esimerkiksi elektronit. $\color{blue}{\text{Mitä standardimalli ei selitä?}}$ • Onko löytämämme Higgsin bosoni standardimallin mukainen ja ainoa? • Miksi maailmankaikkeudessa on enemmän materiaa kuin antimateriaa? • Mitä on pimeä aine? • Miten gravitaatio yhdistetään muihin (kvantti)vuorovaikutuksiin? • Onko ”alkeishiukkasilla” sisäinen rakenne? • Onko luonnossa lisää symmetrioita? Supersymmetria? [[3](cernstandard)] **Työ on siis vielä kesken!** **** CERNKatsotaan lyhyt esittelyvideo CERN:stä ja CMS kokeesta.from IPython.lib.display import YouTubeVideo YouTubeVideo('S99d9BQmGB0', width=900, height=400)LHC kiihdytin ja kaikki sen koeasemat ovat nyt (2019-2020) toisella pitkällä huoltotauolla.- Suorituskykyä parannellaan.- Säteilystä kärsineitä osia vaihdetaan. _CMS on kuin sipuli_CMS-hiukkasilmaisin koostuu useasta kerroksesta, joista jokaisella on erityinen tehtävä törmäyksen mittaamisessa. Tärkeää on mitata, millaisia hiukkasia törmäyksestä sinkoilee. Tämä saadaan selville mittaamalla **varaus**, **liikemäärä** ja **energia**. Lisätietoa eri osien toiminnasta Eri hiukkaset vaikuttavat aineen kanssa eri tavoin, eli tarvitaan erilaisia kerroksia mittamaan hiukkasten energioita ja ratoja. Tässä lyhyt tiivistelmäilmaisimen toiminnasta. [[4](diat)], [[5](cms)]. - **Jälki-ilmaisin** Jälki-ilmaisin koostuu kahden tyyppisisitä pii osista. Jälki-ilmaisin havaitsee varattujen hiukkasten reitit, kun ne vuorovaikuttavat elekromagneettisesti ilmaisimen kanssa. Tarkan paikkadatan avulla voidaan määrittää protonien törmäyskohdat ja syntyneiden raskaampien ydinten hajoamispaikat. Myös hiukkasen radan kaarevuussäteen avulla voidaan laskea sen momentti.*Pixel* -ilmaisin on rakennettu pienistä piisoluista (65 miljoonaa kappaletta), jotka mittaavat varattujen hiukkasten radat hyvin tarkasti.*Tracker* -kerros on valmistettu piiliuskoista jotka niin ikään mittaavat hiukkasten ratoja.- **Sähkömagneettinen kalorimetri (ECAL)**Elektronien ja fotonien energiat saadaan mitattua hyvin tarkasti sillä niiden törmäyks ECAL-kerrokseen aiheuttaa sähkömagneettisen ryöpyn, joka mitataan tuikeilmaisimilla. Törmänneen elektronin tai fotonin energia on suoraan verrannollinen tuikeilmaisimien havaitsemaan valon määrään.- **Hadronikalorimetri (HCAL)**Hardonikalorimetri pysäyttää hardoneiksi kutsutut hiukkaset, kuten protonit ja netronit. HCAL-kerrokseen saapuvat hardonit menettävät liike-energiaansa hiukkasryöppyihin, joiden synnyttämien tuikevalojen avulla saadaan lasketuksi hardonin alkuperäinen energia.- **Myonijärjestelmä**CMS ilmaisin on nimensä (Compact Muon Solenoid) mukaisesti suunniteltu erityisesti havaitsemaan myoneita. Myonit ovat vaikeasti havaittavia ja ne kulkevatkin ECAL ja HCAL kerrosten läpi pysähtymättä. Kuitenkin positiivisina hiukkasina niiden kulkiessa kaasulla täytettyjen kammioiden läpi (_drift tubes_) kaasu ionisoituu ja vapautuneet elektronit sekä positiiviset ionit kulkeutuvat (_Cathode Strip Chambers_) anodeille ja katodeille. Signaalin avulla voidaan laskea myonin paikka tietyllä ajanhetkellä. _Resistive Plate Chambers_ ovat myös osa myonijärjestelmää luoden signaalia, joka voidaan siirtää eteenpäin käsiteltäväksi.Neutriinot havaitaan epäsuorasti liikemäärän säilymisen kautta. **$\color{blue}{\text{Tarkastelkaa miten eri hiukkaset käyttäytyvät CMS ilmaisimen kerroksissa.}}$****$\color{blue}{\text{Selvitä millainen varaus kuvan hiukkasilla on. }}$** Vinkki Vastaus- **Myoneita** on vaikea havaita mutta CMS on nimensä (Compact Muon Solenoid) mukaisesti erittäin hyvä myös siinä. Myonit kulkevat kaikkien kerrosten läpi pysähtymättä jättäen kuitenkin signaaleja pii-ilmaisimiin ja myoni kammioihin. Kuvassa myonin rata kääntyy ensin alaspäin ja sitten vahvan solenoid magneetin ohitettuaan alkaa kaartua ylöspäin. Oikean käden säännön mukaan myoni on siis negatiivinen hiukkanen. Huomion arvoista on että solenoidi magneetin sisällä magneettikenttä on vastakkais suuntainen magneetin ulkopuoliseen magneettikenttään verrattuna. - **'Elektronin'** rata kääntyy kuvassa ylöspäin. Tarkastellessamme tilannetta oikean käden säännön avulla päädymme ristiriitaan. Hiukkanen käyttäytyy, kuin positiivinen hiukkanen, mutta jo yläasteelta on tuttua, että elektronit ovat negatiivisesti varautuneita. Kyseessä onkin positroni eli elektronin antihiukkanen joka muistuttaa paljon elektronia, mutta onkin positiivisesti varautnut. Fyysikot saattavat käyttää välillä termejä hajamielisesti sekaisin, kunhan asiayhteydestä on selvää mistä puhutaan ;) - **Hadronit** Kuvassa on kaksi hadronia toinen merkittynä vihreällä viivalla ja toinen vihreällä katkoviivalla. Hadronit ovat vähintään kahden kvarkin muodostamia hiukkasia. **Katkoviivalla** merkityn hiukkasen rata ei taivu ollenkaan 4 Teslan, eli vahvuudeltaan noin 100,000 kertaa maan magneettikentän vahvuisessa [5](cms) magneettikentässä. Tämä tarkoittanee sitä, että hadroni on varaukseton. Kyseessä voi olla esimerkiksi neutroni. Neutronin vauhti hidastuu hadronikalorimetrissä kun se vuorovaikuttaa aineen kanssa ja aiheuttaa signaalin. **Jatkuvalla viivalla** kuvatun hadronin rata kaartuu kuvassa alaspäin. Voimme siis päätellä hiukkasen olevan varaukseltaan negatiivinen. Tälläinen hadroni on esimerkiksi Pion ($ \pi^- : d \overline{\mu} $ ), hadronin liike-energia voidaan mitata sen aiheuttamasta hiukkasryöpystä hadronikalorimetrissä. - **Fotonin** kulku CMS:n sisällä on piirretty tummansinisellä katkoviivalla. Viiva on suora ja päättyy elektrokalorimetrissä syntyvään hiukkasryöppyyn. Fotoni on siis varaukseton.Kaikkia hiukkasia ei voida CMS:n avulla havaita. Esimerkiksi neutriinot havaitaan epäsuorasti liikemäärän säilymisen kautta. Jos hiukkanen pyörii myötäpäivää LHC:ssa, **$\color{blue}{\text{mihin suuntaan}}$ $\color{blue}{\text{magneettikentän}}$ $\color{blue}{\text{ tulisi osoittaa?}}$** Vinkki Vastaus Magneettikentän tulee osoittaa $\color{green}{\text{ylöspäin}}$, jotta sen aiheuttama voima pitää protonin ympyräradalla. LHC kiihdyttimessä kiihdytetään kuitenkin protoneja molempiin suuntiin. Magneettikenttä osoittaa alaspäin toisessa putkessa, jossa protonit viilettävät vastapäivään. YksiköistäMeidän on helppo puhua metreistä, koska voimme mitata sillä matkaamme lähikauppaan, kilogramma on maitotölkin massa ja sekunnissa sanomme yhden numeron. Hiukkasfysiikan liikkuu kuitenkin aivan eri suuruusluokissa ja voimien suhteissa. Siksi meille luontevat yksiköt kuten metri tai kilogramma eivät sovi hiukkasfyysikon arkeen.Tutkitaan ensin millaisia suureita tulemme käytämään massa: m aika: t pituus: l Loput tarvitsemistamme suureista voidaan johtaa näistä kolmesta. $\color{blue}{\text{Miten liikemäärän voi essittää yllä mainittujen suureiden avulla?}}$ $\frac{m \cdot l}{t}$ $\color{blue}{\text{ Entä energian?}}$ $\frac{m \cdot l^2}{t^2}$ Hiukkas fyysikot usein asettavat c=1 tällä oletuksella saadaan useat kaavat siistimpään muotoon (esim $E=mc^2 \rightarrow E=m$). Muutoksen voi huoletta tehdä, sillä valonnopeus (tyhjiössä) on vakio ja sen numeerinen arvo riippuu vain valituista yksiköistä. Samalla muutamme käytettävää järjestelmää siten, että nopeudella ei ole yksikköä eli se on ns. dimensioton.Näin ollen myös liikemäärän ja energian yksiköt muuttuvat liikemäärä: [m] * [v] --> [m] energia: [m] * [v]^2 --> [m] Olemme päätyneet tilanteeseen jossa liikemäärällä, energialla ja massalla on kaikilla sama yksikkö $[m]$! Voimme siis valita lempi yksikkömme, jolla mittaamme kaikkia kolmea esim. kilogramma tai newtonsekuntti. $\color{blue}{\text{Arvatkaa mikä on hiukkasfyysikoiden lempi yksikkö!}}$ (vastaus on entuudestaan tuttu massan, liikemäärän tai energian yksikkö) eV = 1.602176634×10−19 JouleaElektronivoltti on tuttu energian yksikkönä. Hiukkasfyysikot voivat kuitenkin mitata sillä myös massaa ja liikemäärää. Tulevissa tehtävissä tulet törmäämään sen kerrannaisyksiköihin: 1MeV = 10^6 eV 1GeV = 10^9 eV **** Hiukkasten havaitseminen hiukkaskiihdyttimellä- LHC törmäyttää miltei valonnopeudella kulkevia protoneja. - Syntyy uusia hiukkasia - Mitataan vakaampia hiukkasia - Päätellään datan avulla mistä hiukkasesta mitatut hiukkaset ovat kotoisin- Yksittäiset havainnot eivät riitä- Tarvitaan tilastollisesti merkittävä määrä dataa ja havaintoja - Verrataan dataa simulaatioihin ja ennusteisiin **** $\color{purple}{\text{Esimerkki:}}$ Histogrammi eri datamäärilläKokeillaan, miten datan määrä vaikuttaa tuloksiin. Alla on koodi, joka piirtää annetulla datamäärällä (4 kertaa) histogrammin kahden suurienergisen myonin invariantista massasta. Aloitetaan tutkimalla pieniä lukuja 0-20 ja kasvatetaan datan määrää vähitellen.# Haetaan data tiedostosta ja tallennetaan se muuttujaan 'data'. data = pd.read_csv('https://raw.githubusercontent.com/cms-opendata-education/cms-jupyter-materials-finnish/master/Data/Zmumu_Run2011A_massoilla.csv') # Tallennetaan datatiedostossa olevat invarianttien massojen arvot muuttujaan 'invariantti_massa'. invariantti_massa = data['M'] kuvat = 0 # Luodaan tyhjä lista 'valitut', johon tallennetaan valittujen invarianttien massojen arvot. while kuvat < 4: valitut = [] # Pyydetään käyttäjää valitsemaan tapahtumien lukumäärä. Tallennetaan luku muuttujaan 'määrä'. määrä = int(input('Kuinka monta tapahtumaa valitaan: ')) # Tarkastetaan, onko datassa tarpeeksi tapahtumia. Jos tapahtumia ei ole tarpeeksi, tulostetaan virheilmoitus. # Muussa tapauksessa massat valitaan ja tallennetaan 'valitut'-listaan. if määrä > len(invariantti_massa): print('Virhe: Histogrammin piirto epäonnistui. Ei tarpeeksi dataa. Maksimimäärä tapahtumia on ' + str(len(invariantti_massa)) + '.') else: for f in range(määrä): M = invariantti_massa[f] valitut.append(M) print('\n Valitsit datasta {} invariantin massan arvoa.'.format(määrä)) # Piirretään histogrammi valitusta datamäärästä. Valitaan pylväiden lukumäärä 'bins' sekä x-akselin väli 'range'. plt.hist(valitut, bins=120, range=(60,120)) # Name the axises and give the title. plt.xlabel('Invariantti massa [GeV]') plt.ylabel('Tapahtumien lukumäärä') plt.title('Kahden myonin invariantti massa\n') plt.show() # Empty the variable "selected" for the next run. kuvat += 1 valitut = []**** Mikä invariantti massa?Hiukkasfysiikan maailmassa tavallisen mekaniikan kaavat eivät enään aina päde kun massa muuttuu energiaksi ja toisin päin. Säilymislakeja toki kuitenkin on. Voimme muodostaa hiukkasten energian ja liikemäärän avulla suureen jota kutsumme **invariantiksi massaksi**. Sen arvo kuvaa hajonneelle emohiukkaselle ominaista massaa. On mahdoton ajatus punnita esimerkiksi Higgsin hiukkasta, jonka eliniänodote on luokkaa $10^{-22}$ sekunttia [[6](higs)]. CMS:n kaltainen laite voi kuitenkin mitata tytärhiukkasten liikemäärää ja energiaa, joiden avulla voidaan laskea _invariantti massa_. Se on matemaattinen arvo joka on "invariantti" mittausympäristölle, eli se voidaan laskea missä ja mille tahansa. Jos sen laskee tietystä hajoamisesta syntyneille tytärhiukkasille, saadaan arvo joka on lähellä emohiukkasen massaa. Jos taas lasketaan invariantti massa hiukkasille jotka eivät liity mitenkään toisiinsa saadaan arvo joka ei kuvaa mitään, eli taustakohinaa.**Invariantti massa siis säilyy hiukkasen hajotessa uusiksi hiukkasiksi.** Tämän ominaisuuden ansiosta se on voittamaton työkalu kokeellisen hiukkasfysiikan tutkijoille. Hiukkasilmaisin, kuten CMS, havaitsee lähinnä stabiileja hiukkasia. Määrittääkseen minkä raskaamman hiukkasen hajoamisesta stabiilit hiukkaset ovat kotoisin, tutkijoiden tulee tarkastella suuria määriä dataa. Piikit invariantin massan histogrammissa voivat viitata tietyn energiseen emohiukkaseen.[Lisätietoa englanniksi](https://profmattstrassler.com/articles-and-posts/particle-physics-basics/mass-energy-matter-etc/mass-and-energy/) **** Hiukkaset hukassaVAIHTOEHTO1:CERN:in kesäopiskelija on saanut tehtäväkseen tutkia CMS:n mittaamaa tutkimusdataa vuoden 2011 ajoilta. Dataan on valikoitu tapahtumia jossa hiukkasilmaisin on havainnut kaksi myonia. Aikaisempien tukimusten perusteella tiedetään että moni hiukkanen voi hajota juuri kahdeksi myoniksi ja ne voidaan tunnistaa invariantin massan avulla. Kiireinen kesäopiskelija pyytää teitä auttamaan suuren datamäärän analysoinnissa. Hän on jakanut datan valmiiksi 6 tiedostoon, joissa jokaisessa vaikuttaa olevan jotain kiinnostava invariantin massan arvoissa. Autetaan kesäopiskelijaa ja tutkitaan mistä tiedostoista on oikein kyse!Hypoteesi: Emohiukkasten massat voidaan selvittää invariantin massan arvojen avulla ja näin löytää hiukkaset jotka voivat hajota kahdeksi myoniksi.VAIHTOEHTO2:CERN:n kesäopiskelijalle on käynyt vahinko. Hän on unohtanut pitää tärkeän mittausdatan järjestyksessä. Nyt hänellä on käsissään kuusi oudosti nimettyä tiedostoa CMS:n mittausdataa. Jokaisessa tiedostossa on mitattuja arvoja kahdesta myonista jotka ovat kenties peräisin jonkun hiukkasen hajoamisesta. Kesäopiskelija pyytää teidän apua tiedostojen sisällön tutkimisessa. Tiedostot ovat suuria, joten tehokas ja hyvä tapa käsitellä tällaisia tiedostoja on käyttää Python ohjelmointikieltä. Ette tarvitse aikaisempaa ohjelmointikokemusta.#Python koodi ajetaan koodisoluista muuttuja = 10*10 print("saimme vastaukseski: " + str(muuttuja))saimme vastaukseski: 100**** Datan kimppuunEnnen kuin aloitatte muistakaa ajaa ensimmäinen koodisolu otsikon [alustus](alustus) alta. Sen avulla saatte tarvittavat Python paketit datan käsittelyyn.**\** merkkaa koodissa kommentin jota kone ei aja. Kommentit helpottavat koodin lukemista ja sen toiminnan ymmärtämistä.Poistakaa alla olevasta koodista kommentin merkki **\** ryhmänne numeron mukaiselta riviltä ja ajakaa solu. Käsky pd.read_csv() lukee suluissa olevan csv-tiedoston ja tallentaa sen sisältämän datan taulukkona muuttujaan. Annetaan muuttujalle nimeksi _pikkidata_ + _ryhmän numero_.$\color{red}{\text{Ajakaa esim. näppäimillä [ctrl] + [enter]}}$#piikkidata1 = pd.read_csv('https://raw.githubusercontent.com/cms-opendata-education/cms-jupyter-materials-finnish/master/Data/piikkidata1.csv') #piikkidata2 = pd.read_csv('https://raw.githubusercontent.com/cms-opendata-education/cms-jupyter-materials-finnish/master/Data/piikkidata2.csv') #piikkidata3 = pd.read_csv('https://raw.githubusercontent.com/cms-opendata-education/cms-jupyter-materials-finnish/master/Data/piikkidata3.csv') #piikkidata4 = pd.read_csv('https://raw.githubusercontent.com/cms-opendata-education/cms-jupyter-materials-finnish/master/Data/piikkidata4.csv') #piikkidata5 = pd.read_csv('https://raw.githubusercontent.com/cms-opendata-education/cms-jupyter-materials-finnish/master/Data/piikkidata5.csv') #piikkidata6 = pd.read_csv('https://raw.githubusercontent.com/cms-opendata-education/cms-jupyter-materials-finnish/master/Data/piikkidata6.csv')Edellisessä koodisolussa latasit tiedoston netistä, luit sen csv-muodosta ja tallensit muuttujaan. Mutta miten saamme tietää miltä lataamamme data näyttää? Lukekaa ajatuksella läpi alla oleva esimerkki. Sitten koittakaa tutkia ryhmänne piikkidataa ja löytää vastaukset ensimmäisiin kysymyksiin. Hyödyllisiä Python komentoja - tiedostonnimi.head() tulostaa tiedoston 5 ensimmäistä riviä - tiedostonnimi.sarakkeenotsikko palauttaa taulukosta tämän sarakkeen arvot. - print() tulostaa sulkujen sisällä olevan arvon - len() palauttaa sulkujen sisällä olevan listan/jonon/taulukon pituuden - min() palauttaa sulkujen sisällä olevan datan pienimmän arvon - max() palauttaa datan suurimman arvon **** $\color{purple}{\text{Esimerkki:}}$ Taulukon (_Pandas DataFrame_) tutkiminen ja käyttäminenLukekaa ensin huolella ja ajakaa sitten solut järjestyksessä.Esimerkin numerot ovat todellisten koeasemien mittoja. [[7](mitat)]# Tätä ei tarvitse ymmärtää sillä latasitte jo tutkittavan datan netistä. LHC_koeasemat=pd.DataFrame({'Nimi':['CMS', 'ATLAS', 'ALICE', 'LHCb'], 'Paino':[14000, 7000, 10000, 5600], #tonneja 'Pituus':[21, 46, 26, 21], #metrejä 'Leveys':[15, 25, 16, 13], #metrejä 'Korkeus':[15, 25, 16, 10]}) #metrejä # luodaan taulukko ja tallennetaan se muuttujaan "LHC_koeasemat" # Taulukko sisältää LHC kiihdyttimen varrella olevien koeasemien tietoja [lähdee]. # HEAD-komennon käyttö: LHC_koeasemat.head() #palauttaa taulukon 5 ensimmäistä riviä # Valitaan sarake "Nimi" ja tulostetaan se print(LHC_koeasemat.Nimi) #lasketaan montako riviä taulukossa on ja tulostetaan vastaus pituus=len(LHC_koeasemat) print(pituus) # LHC_koeasemat.Paino valitsee tutkittavaksi sarakkeen jossa on ilmaisimien painot # max(LHC_koeasemat.Paino ) etsii painoista suurimman # print(max(LHC_koeasemat.Paino)) tulostaa max() funktiolla saaadun suurimman massan. suurin=max(LHC_koeasemat.Paino) print(suurin) #samaan tapaan: pienin=min(LHC_koeasemat.Korkeus) print(pienin)**** Tutkikaa ladattua tiedostoa Seuraavaksi kokeilkaa kirjoittaa itse koodia.Haetussa datassa on monta tapahtumaa, joista jokaisesta on tallennettu useita arvoja. Lisäksi jokaisessa tapahtumassa syntyi 2 myonia jotenka dataan on tallennettu arvot molemmille myoneille. Numerointi 1 tai 2 kertoo kummalle myonille kyseinen arvo kuuluu. Tehtävän kannalta __oleellisia ovat__: - __px__ : $p_x$ on kyseisen myonin (1 tai 2) liikemäärän x-akselin suuntainen komponentti (GeV). - __py__ : $p_y$ on myonin (1 tai 2) liikemäärän y-komponentti (GeV). - __pz__ : $p_z$ on myonin (1 tai 2) liikemäärän z-komponentti (GeV). - __E__ : $E$ on myonin (1 tai 2) energia (GeV). Muita arvoja taulukossa - __Run__ ja __Event__ : kertovat mistä törmäyksestä kyseisen rivin data on peräisin. - __pt__ : $p_t$ (transverse momentum) myonin liikemäärän komponentti joka on kohtisuorassa hiukkassuihkua vastaan (GeV). - __eta__ : $\eta$ on pseudorapiditeetti. - __phi__ : $\phi$ (atsimuuttikulma) on kulma jonka suuntaan hiukkanen kulkee jos detektoria tarkastellaan hiukkassuihkun akselilta (rad). - __Q__ : on myonin varaus. Jokaisessa dataan valitussa törmäyksessä syntyy myoni ja antimyoni ($\mu^+ , \mu^-$) [1]. - __M__ : palataan tähän myöhemmin. Kysymykset:- $\color{blue}{\text{Kuinka monta mittausta tiedostossa on?}}$- $\color{blue}{\text{Mikä on suurin energia, jonka myonille on havaittu?}}$- $\color{blue}{\text{Mikä on pienin liikemäärän y-komponentti joka myonille on havaittu?}}$Kirjoittakaa koodi jolla saatte vastaukset kysymyksiin!#Tähänn tulee teidän itse kirjoittama koodi!$\color{purple}{\text{Tarkistakaa}}$ saamanne arvot ajamalla alla oleva solu ja vastaamalla kysymyksiin.tarkista_arvot()**** Laskuja PythonillaPython on monikäyttöinen ohjelmointikieli, jolla voidaan käsitellä suuria määriä dataa ja suorittaa teokkaasti laskutoimituksia. Aloitetaan harjoittelu perus laskuista ja käytetään yllä esitettyä esimerkkiä. Esimerkkitaulukko on jo tallennettu tietokoneen muistiin.Alussa latasimme _numpy_ kirjaston ja tallensimme sen nimellä _np_. Kirjasto sisältää valmiiksi määriteltyjä funktioita, jota helpottavat laskujen suorittamista. Esimerkiksi _np.sqrt(x)_ vastaa $\sqrt x$. Lisäksi (x)\**2 ja _np.square(x)_ vastaa vat molemmat $x^2$.**** $\color{purple}{\text{Esimerkki:}}$ Laskuja taulukon arvoillaYksinkertaisuuden vuoksi oletetaan, että kaikki listan hiukkasilmaisimet olisivat lieriöitä. Näin ei kuitenkaan todellisuudessa ole.#muistellaan mitä taulukko näyttikään LHC_koeasemat.head()Kun tehtiin oletus että jokainen ilmaisin on lieriö voidaan helposti laskea - **tilavuudet V :** $\pi (0.5 r)^2 \cdot l$ - **tiheydet :** $\frac{m}{V}$.- **lävistäjät x :** pythagoran lauseen avulla, $\sqrt{(r)^2+l^2}$, missä **$r$** on lieriön pohjan lävistäjä eli ilmaisimen korkeus, **$l$** lieriön korkeus eli ilmaisimen pituus ja **$m$** ilmaisimen massa.# tilavuuden laskemine tilavuus = np.pi * (LHC_koeasemat.Korkeus /2 )**2 * LHC_koeasemat.Pituus print(tilavuus) # tiheyden laskemine tiheys = LHC_koeasemat.Paino / tilavuus print(tiheys) # lävistäjän laskemine lävistäjä= np.sqrt((LHC_koeasemat.Pituus)**2 + (LHC_koeasemat.Korkeus)**2) print(lävistäjä)**** Invariantin massan laskeminenDatassa on havaintoja kahdesta törmäyksen synnyttämästä myonista. Jotta voisimme ymmmärtää paremmin mitä törmäyksessä on todella tapahtunut on dataa kerättävä paljon ja sen perusteella päätellä mitä tapahtui siellä mihin hiukkasilmaisinkaan ei näe. Törmäyksestä sinkoilevista vakaammista hiukkasista ja niiden energioista voimme päätellä invariantin massan avulla mistä ne saattavat olla kotoisin.$\color{blue}{\text{Laskekaa ryhmänne datalle invariantin masssan arvot}}$, ehkä sieltä löytyy jotain mielenkiintoista? Käytetään laskussa seuraavaa lauseketta invariantille massalle $M = \sqrt{(E_1 + E_2)^2 - \|\textbf{p}_1 + \textbf{p}_2 \| ^2}$missä $\|\textbf{p}_1 + \textbf{p}_2 \|^2$ on vektorinormin neliö ja se voidaan laskea seuraavan kaavan mukaisesti, $\|\textbf{p}_1 + \textbf{p}_2 \|^2=(p_{x1}+p_{x2})^2+(p_{y1}+p_{y2})^2+(p_{z1}+p_{z2})^2$Kannattaa suorittaa välivaiheita ja tallentaa niiden tulokset muuttujiin. Muuttujia voi sitten käyttää lopullisen tuloksen laskemisessa.#Kirjoittakaa tähän koodi joka suorittaa invariantin massan laskun$\color{purple}{\text{Tarkistakaa}}$ saamanne arvot ajamalla alla oleva solu ja vastaamalla kysymyksiin.tarkista_inv_massat()PsstTarkkasilmäisimmät saattoivatkin jo huomata, että invariantti massa oli jo valmiiksi laskettuna sarakkessa 'M'. Voitte halutessanne vertailla saamianne vastauksia valmiiksi laskettuihin. **** $\color{purple}{\text{Esimerkki:}}$ Histogrammi PythonillaVoitte tarkastella laskemianne invariantin massan arvoja _head()_ funktion avulla, mutta tuhansien numeroiden katselu listana ei ole järkevää. Nähdäkemme fysiikkaa mittausten takana on numerot esitettävä havainnolistavassa muodossa. Tässä kohtaa astuu esiin _histogrammi_. Histogrammi on voittamaton työkalu hiukkasfysiikan tutkimuksessa, jossa tapahtummia on suuri määrä ja ne jakautuvat todennäköisyyksien mukaan.Käytetään histogrammin piirtoon _matplotlib.pyplot_ pakettia jonka tallensimme alussa nimellä _plt_. Paketilla piirretään histogrammi helposti funktion plt.hist() avulla.# Luodaan histogrammi # plt.hist()ottaa argumenteiksi kolme arvoa: ### tidoston nimi josta histogrammi piirretään ### bins kertoo moneenko pylvääseen havainnot jaetaan ### range määrittää alueen jolle histogrammi luodaan plt.hist(LHC_koeasemat.Korkeus, bins=4, range=(10,30)) # nimetään akselit plt.xlabel("Korkeus (m)") plt.ylabel("Havaintojen lukumäärä") # annetaan kuvaajalle otsikko plt.title("Histogrammi koeasemien korkeudesta") # tulostetaan luotu kuvaaja plt.show()Pienillä datamäärillä histogrammi ei tuo lisäarvoa numeroiden tarkasteluun, eikä siitä ole hyötyä. Palataan kuitenkin tutkittavaan aiheiseen, ja kokeillaan saadaanko histogrammilla selvyyttä _piikkidataan_.**** Histogrammi piikkidatasta$\color{blue}{\text{Piirtäkää laskemistanne invariantin massan arvoista histogrammi.}}$ Kokeilkaa ensin itse ja kurkatkaa sitten _vinkkejä_. Vinkkejä tiedostoLaskitte aijemmin invariantin massan arvot ja tallensitte ne johonkin muuttujaan. Riittää, kun kirjoitatte kyseisen muuttujan nimen. rangeKannattaa ensin tarkastella _max()_ ja _min()_ funktioiden avulla millaisia arvoja laskemanne invariantti massa saa. Näin saatte määriteltyä histogrammin piirtoalueen (_range=(min,max)_) helposti.Tämän jälkeenkin kannattaa rajata kuvaa vielä hieman, jotta näette piikin hyvin. binsKokeilkaa eri vaihtoehtoja, ja pitäkää mielessä montako tapahtumaa datassa olikaan. Voitte myös tarkistakaa sen _len()_ funktion avulla.#Kirjoittakaa tähän koodi joka piirtää histogrammin. Muistakaa myös nimetä akselit ja kuvaaja.**** Pohdintaa $\color{blue}{\text{Oliko histogrammissa nähtävissä jonkinlainen kuvio tai jakauma?}}$ $\color{blue}{\text{Mistä mahdollinen kuvio johtuu, vai voiko se olla sattumaa?}}$ $\color{blue}{\text{Oliko histogrammin piirtäminen hyödyllistä?}}$ Tulosten vertailuVAIHTOEHTO1Nyt yhdistetään saamamme tulokset suurempaan kokonaisuuteen ja tutkitaan tiedostoa jonka palasia _piikkidatat_ ovat.VAIHTOEHTO2Kesäopiskelijallamme on epävarma aavistus siitä mistä _piikkidatat_ saattavat olla peräisin. Hänellä on tietokeneellaan toinen tiedosto jossa on samoihin aikoihin CMS:llä mitattuja myonien arvoja. Alla oleva solu lataa kyseisen tiedoston myös teidän tutkittavaksi.# tallennetaan tidosto nimellä 'dimu2' dimu2=pd.read_csv('https://raw.githubusercontent.com/cms-opendata-education/cms-jupyter-materials-finnish/master/Data/DoubleMuRun2011A.csv') #Tutkikaa tiedostoa esimerkiksi invariantin massan histogrammin avulla.--- Chapter 5: Statistics --- Descriptive StatisticsUsually, a normal primary tool to inlustrate the data is the Histogram, that can describes many statistic values.The book introduces manual Histogram by splits the ``num_friedns`` into 100 bins. I modify some code to my style.num_friends = [100.0,49,41,40,25,21,21,19,19,18,18,16,15,15,15,15,14,14,13,13,13,13,12,12,11, 10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,8,8,8,8,8,8,8,8,8,8,8,8,8,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,6,6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6,6,6,6,6,6,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,4,4,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4,4,4,4,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] from collections import Counter import matplotlib.pyplot as plt friend_counts = Counter(num_friends) upper_X = int(max(num_friends) + 1) xs = range(upper_X) # largest value is 100 ys = [friend_counts[x] for x in xs] # height is just # of friends upper_Y = max(ys) + 3 plt.bar(xs, ys) plt.axis([0, upper_X, 0, upper_Y]) plt.title("Histogram of Friend Counts") plt.xlabel("# of friends") plt.ylabel("# of people") plt.show() plt.bar(xs, ys,edgecolor='purple', color='None') plt.show()Descriptive StatisticThe book shows ways to manual calculates descriptive statistics. Note that the ``linear_algebra.py`` from chapter four is copied to the note book directory.* **ranking**: sorted data then extract the ranks * **`mean(data)`** : summary of data divided by number of data * **`median(data)`** : the middle between max and min, the number or shape of data impacts directly this value such odd or even number of data * **`quatile(data,p)`**: devides data into 10 ranges, quatile 4 is equals to percentil 25 equlas to decentile 2.5. Argument `p` is the decentile * **`mode(data)`** : the data points that most frequency* **`data_range(data)`** : the maximum minus minimum value * **`variance(data)`**: variacne, sum sqaure of each data and mean devid by number of data minus degree of freedom, mostly use N-1* **`standard_deviation(data)`**: root of variance* **`interquartile_range`** : Range between Third Quatile and Second Quatile, represent the middile reange of datanum_points = len(num_friends) # 204 assert num_points == 204 largest_value = max(num_friends) # 100 smallest_value = min(num_friends) # 1 range_value = largest_value - smallest_value assert largest_value == 100 assert smallest_value == 1 assert range_value == 99 # ranking sorted_values = sorted(num_friends) smallest_value = sorted_values[0] # 1 second_smallest_value = sorted_values[1] # 1 second_largest_value = sorted_values[-2] # 49 assert smallest_value == 1 assert second_smallest_value == 1 assert second_largest_value == 49 from typing import List def mean(xs: List[float]) -> float: return sum(xs) / len(xs) mean(num_friends) # 7.333333 assert 7.3333 < mean(num_friends) < 7.3334 # The underscores indicate that these are "private" functions, as they're # intended to be called by our median function but not by other people # using our statistics library. def _median_odd(xs: List[float]) -> float: """If len(xs) is odd, the median is the middle element""" return sorted(xs)[len(xs) // 2] def _median_even(xs: List[float]) -> float: """If len(xs) is even, it's the average of the middle two elements""" sorted_xs = sorted(xs) hi_midpoint = len(xs) // 2 # e.g. length 4 => hi_midpoint 2 return (sorted_xs[hi_midpoint - 1] + sorted_xs[hi_midpoint]) / 2 def median(v: List[float]) -> float: """Finds the 'middle-most' value of v""" return _median_even(v) if len(v) % 2 == 0 else _median_odd(v) assert median([1, 10, 2, 9, 5]) == 5 assert median([1, 9, 2, 10]) == (2 + 9) / 2 assert median(num_friends) == 6 def quantile(xs: List[float], p: float) -> float: """Returns the pth-percentile value in x""" p_index = int(p * len(xs)) return sorted(xs)[p_index] assert quantile(num_friends, 0.10) == 1 assert quantile(num_friends, 0.25) == 3 assert quantile(num_friends, 0.75) == 9 assert quantile(num_friends, 0.90) == 13 def mode(x: List[float]) -> List[float]: """Returns a list, since there might be more than one mode""" counts = Counter(x) max_count = max(counts.values()) return [x_i for x_i, count in counts.items() if count == max_count] assert set(mode(num_friends)) == {1, 6} # "range" already means something in Python, so we'll use a different name def data_range(xs: List[float]) -> float: return max(xs) - min(xs) assert data_range(num_friends) == 99 from linear_algebra import sum_of_squares def de_mean(xs: List[float]) -> List[float]: """Translate xs by subtracting its mean (so the result has mean 0)""" x_bar = mean(xs) return [x - x_bar for x in xs] def variance(xs: List[float]) -> float: """Almost the average squared deviation from the mean""" assert len(xs) >= 2, "variance requires at least two elements" n = len(xs) deviations = de_mean(xs) return sum_of_squares(deviations) / (n - 1) assert 81.54 < variance(num_friends) < 81.55 import math def standard_deviation(xs: List[float]) -> float: """The standard deviation is the square root of the variance""" return math.sqrt(variance(xs)) assert 9.02 < standard_deviation(num_friends) < 9.04 def interquartile_range(xs: List[float]) -> float: """Returns the difference between the 75%-ile and the 25%-ile""" return quantile(xs, 0.75) - quantile(xs, 0.25) assert interquartile_range(num_friends) == 6Demonstrate the statistics valuesLet's try to use it and then represent it in the graph.Ranges = data_range(num_friends) Median = median(num_friends) Mean = mean(num_friends) _, Mode = mode(num_friends) First_Quatile = quantile(num_friends,0.25) Third_Quatile = quantile(num_friends,0.75) IQR =interquartile_range(num_friends) STD_DEV = standard_deviation(num_friends) print(Third_Quatile - First_Quatile,IQR) print(STD_DEV)6 6 9.03014473623248Actually, data is not fit into normal distribution since the outline. Also, this chapter use ``list`` but some work seem too complicated, so I use numpy instead.import numpy as np x = np.array(xs) y = np.array(ys) plt.figure(figsize=(16,9)) plt.plot(x,y, 'o-',label='data') plt.plot([Median,Median],[0,upper_Y],ls='--', label='Median') plt.plot([Mean,Mean],[0,upper_Y],ls='--', label='Mean') plt.plot([Mode,Mode],[0,upper_Y],ls='--', label='Mode') plt.fill_between(x, y, 0, where = (x>First_Quatile) & (xRemove the outlinesLet see the statistics values when remove some outlines and then plot it again.# delete 100.0,49,41,40, index is 0 to 3 Data = np.delete(np.array(num_friends),range(15)) friend_counts = Counter(Data) upper_X = int(max(Data) + 1) x = np.arange(upper_X) # largest value is 100 y = np.array([friend_counts[c] for c in x] ) # height is just # of friends upper_Y = max(y) + 3 Ranges = data_range(Data) Median = median(Data) Mean = mean(Data) _, Mode = mode(Data) First_Quatile = quantile(Data,0.25) Third_Quatile = quantile(Data,0.75) IQR =interquartile_range(Data) STD_DEV = standard_deviation(Data) plt.figure(figsize=(16,9)) plt.plot(x,y, 'o-',label='data') plt.plot([Median,Median],[0,upper_Y],ls='--', label='Median') plt.plot([Mean,Mean],[0,upper_Y],ls='-', label='Mean') plt.plot([Mode,Mode],[0,upper_Y],ls='--', label='Mode') plt.fill_between(x, y, 0, where = (x>First_Quatile) & (x--- Covariance and CorrelationBesides each individual avarage and disribution, we can see two dataset comparisision by Covariance and Correlation.Let see the data from the book without remove any outline, the more number of friend may be more number hours online.Data seems not growth in the same direction (blue data) that not in line with the expected (orange)num_friends = [100.0,49,41,40,25,21,21,19,19,18,18,16,15,15,15,15,14,14,13,13,13,13,12,12,11, 10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,8,8,8,8,8,8,8,8,8,8,8,8,8,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,6,6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6,6,6,6,6,6,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,4,4,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4,4,4,4,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] daily_minutes = [1,68.77,51.25,52.08,38.36,44.54,57.13,51.4,41.42,31.22,34.76,54.01,38.79,47.59, 49.1,27.66,41.03,36.73,48.65,28.12,46.62,35.57,32.98,35,26.07,23.77,39.73,40.57, 31.65,31.21,36.32,20.45,21.93,26.02,27.34,23.49,46.94,30.5,33.8,24.23,21.4,27.94, 32.24,40.57,25.07,19.42,22.39,18.42,46.96,23.72,26.41,26.97,36.76,40.32,35.02,29.47, 30.2,31,38.11,38.18,36.31,21.03,30.86,36.07,28.66,29.08,37.28,15.28,24.17,22.31,30.17, 25.53,19.85,35.37,44.6,17.23,13.47,26.33,35.02,32.09,24.81,19.33,28.77,24.26,31.98,25.73, 24.86,16.28,34.51,15.23,39.72,40.8,26.06,35.76,34.76,16.13,44.04,18.03,19.65,32.62,35.59, 39.43,14.18,35.24,40.13,41.82,35.45,36.07,43.67,24.61,20.9,21.9,18.79,27.61,27.21,26.61,29.77, 20.59,27.53,13.82,33.2,25,33.1,36.65,18.63,14.87,22.2,36.81,25.53,24.62,26.25,18.21,28.08,19.42, 29.79,32.8,35.99,28.32,27.79,35.88,29.06,36.28,14.1,36.63,37.49,26.9,18.58,38.48,24.48,18.95,33.55, 14.24,29.04,32.51,25.63,22.22,19,32.73,15.16,13.9,27.2,32.01,29.27,33,13.74,20.42,27.32,18.23,35.35, 28.48,9.08,24.62,20.12,35.26,19.92,31.02,16.49,12.16,30.7,31.22,34.65,13.13,27.51,33.2,31.57,14.1,33.42, 17.44,10.12,24.42,9.82,23.39,30.93,15.03,21.67,31.09,33.29,22.61,26.89,23.48,8.38,27.81,32.35,23.84] daily_hours = [dm / 60 for dm in daily_minutes] plt.figure(figsize=(16,9)) #plt.plot(num_friends,daily_hours,'kp',markerfacecolor='b',markersize=12) plt.plot(num_friends,daily_minutes,'o',markerfacecolor='b',markersize=12, alpha=0.5, label='Actual') plt.plot(sorted(num_friends),sorted(daily_minutes),'o',markerfacecolor='pink',markersize=12, alpha=0.3, label='Perfect') plt.xlabel('Number of Friend',fontsize=14) plt.ylabel('Daily Hour',fontsize=14) plt.title('Correlation with outliter', fontsize=16) plt.xticks(range(0,110,10)) plt.yticks(range(0,110,10)) plt.legend() plt.show()The **Covariance** is compare variance the dataset, positive mean the value growth in the same direction while negative is in the opposite direction and value close to zero mean no siginficant. In general with N data set, the covariance can be compute from the covariance matrix\begin{eqnarray*}\text{Covariance Matrix:} \ \ \Sigma = \begin{bmatrix} \sigma_{1}^2 \ \sigma_{12} \ \dots \ \sigma_{1N} \\ \sigma_{21} \ \sigma_{2}^2 \ \dots \ \sigma_{2N} \\ \vdots \ \vdots \ \ddots \ \vdots \\ \sigma_{N1} \ \sigma_{N2} \ \dots \ \sigma_{NM}^2 \end{bmatrix}\end{eqnarray*} In the case of Number of friend (x) and dialy minute (y) the covariance is:\begin{equation}\begin{aligned}\text{Covariance}_{x,y} &= \vec{x} \cdot \vec{y} \\ \\ &= \sum_{i=0}^{i=N} {x_i \cdot y_i} \\ \\ &= \sum_{i=0}^{i=N} {\sigma_x \cdot \sigma_y} \\ \\ &= \large \sum_{i=0}^{i=N} { \frac{ (x_i -\bar{x})(y_i - \bar{y})}{N-1} }\end{aligned}\tag{Covariance}\label{Covairance}\end{equation}from linear_algebra import dot def covariance(xs: List[float], ys: List[float]) -> float: assert len(xs) == len(ys), "xs and ys must have same number of elements" return dot(de_mean(xs), de_mean(ys)) / (len(xs) - 1) assert 22.42 < covariance(num_friends, daily_minutes) < 22.43 assert 22.42 / 60 < covariance(num_friends, daily_hours) < 22.43 / 60**Correlation** similar to the covariance but more stable.\begin{equation}\begin{aligned}\text{Correlation}_{x,y} &= \frac{\vec{x} \cdot \vec{y}}{\sigma_x \sigma_y} \\ \\ &= \sum_{i=0}^{i=N} \frac{x_i \cdot y_i}{\sigma_x \sigma_y} \\ \\ &= \large \frac{1}{N-1}\sum_{i=0}^{i=N} { \frac{ (x_i -\bar{x})(y_i - \bar{y})}{\sigma_x \sigma_y} }\end{aligned}\tag{Correlation}\label{Correlation}\end{equation} Coefficient of variance\begin{equation}\begin{aligned}\text{Coefficient of Variation} = \frac{\sigma}{\bar{x}} \end{aligned}\tag{Coefficient of Variation}\label{Coefficient of Variation}\end{equation}def correlation(xs: List[float], ys: List[float]) -> float: """Measures how much xs and ys vary in tandem about their means""" stdev_x = standard_deviation(xs) stdev_y = standard_deviation(ys) if stdev_x > 0 and stdev_y > 0: return covariance(xs, ys) / stdev_x / stdev_y else: return 0 # if no variation, correlation is zero assert 0.24 < correlation(num_friends, daily_minutes) < 0.25 assert 0.24 < correlation(num_friends, daily_hours) < 0.25 outlier = num_friends.index(100) # index of outlier num_friends_good = [x for i, x in enumerate(num_friends) if i != outlier] daily_minutes_good = [x for i, x in enumerate(daily_minutes) if i != outlier] daily_hours_good = [dm / 60 for dm in daily_minutes_good] assert 0.57 < correlation(num_friends_good, daily_minutes_good) < 0.58 assert 0.57 < correlation(num_friends_good, daily_hours_good) < 0.58 plt.figure(figsize=(16,9)) #plt.plot(num_friends,daily_hours,'kp',markerfacecolor='b',markersize=12) #plt.plot(num_friends,daily_hours,'o',markerfacecolor='b',markersize=12, alpha=0.2, label='Actual') plt.plot(num_friends_good,daily_hours_good,'o',markerfacecolor='b',markersize=12, alpha=0.5, label='Exoected') #plt.plot(sorted(num_friends_good),sorted(daily_hours_good),'o',markerfacecolor='pink',markersize=12, alpha=0.8, label='Exoected') plt.xlabel('Number of Friend',fontsize=14) plt.ylabel('Daily Hour',fontsize=14) #plt.xticks([]) #plt.yticks([]) plt.show()Week 4 practical - due Friday 9/27 at 11pm* Make sure to put your name in the body of the notebook, and in the filename.* You may discuss these problems with other students in class, but you must turn in your own notebook.* List anyone with whom you discussed the work.* Please document your code, label your plots, and use markdown cells to add explanatory comments to the notebook.* Save your notebook as a pdf file and submit it in Canvas before the deadline.You may not distribute this notebook beyond our class, or post it anywhere online, without permission from the instructor.import numpy as np import sympy as sp from scipy import integrate from matplotlib import rc rc('text', usetex=True) from sympy import init_printing init_printing(use_latex=True) from astropy.cosmology import WMAP9 as cosmo from astropy import constants as const import matplotlib.pyplot as plt %matplotlib inline import matplotlib matplotlib.rcParams['mathtext.fontset'] = 'stix' matplotlib.rcParams['font.family'] = 'serif' matplotlib.rcParams['font.sans-serif'] = 'stix' matplotlib.rcParams['text.usetex'] = True import matplotlib as mplHubble diagramType Ia supernovae can be used to measure the **Hubble diagram**, which plots the luminosity distance as a function of redshift. Here you will compute the Hubble diagram for different cosmologies.Consider a universe with mass density parameter $\Omega_M$ and dark energy characterized by $\Omega_L$. Let $d_H = c/H_0$ be the Hubble distance, where the Hubble constant is $H_0 = 100 h$ km/s/Mpc. The comoving distance to redshift $z$, in units of $d_H$, is$$ {\hat d}_C(z) = \int_{0}^{z} \frac{dz}{\sqrt{\Omega_M(1+z)^3 + \Omega_\Lambda + \Omega_k (1+z)^2}}$$where$$ \Omega_k = 1 - \Omega_M - \Omega_\Lambda$$The luminosity distance is then$$ d_L(z) = (1+z) d_H \times \begin{cases} \Omega_k^{-1/2} \sinh\left(\Omega_k^{1/2} {\hat d}_C(z)\right) & \Omega_k > 0 \\ {\hat d}_C(z) & \Omega_k = 0 \\ |\Omega_k|^{-1/2} \sin\left(|\Omega_k|^{1/2} {\hat d}_C(z)\right) & \Omega_k < 0 \end{cases}$$Plot Hubble diagrams out to $z=2$ for the following cases:* flat cosmologies with $(\Omega_M,\Omega_\Lambda) = (0,1)$, $(0.3,0.7)$, $(0.7,0.3)$, and $(1,0)$* matter-only cosmologies with $\Omega_\Lambda = 0$ and $\Omega_M = 0.1$, $0.3$, and $0.5$ Note: `astropy` has functions to compute cosmological distances. In case you are interested, here is sample code that you could modify to test your results. **But you must write your own code here.**ztry = np.arange(0.1,1.1,0.1) Otry = 0.3 Ltry = 0.7 from astropy.cosmology import LambdaCDM cosmo = LambdaCDM(H0=100.0,Om0=Otry,Ode0=Ltry,Tcmb0=0) # note that since we set H0=100 in the previous command, we get distances in units of h^{-1} Mpc cosmo.luminosity_distance(ztry).value def f1(Ωm, ΩΛ): Ωk = 1 - Ωm - ΩΛ f = lambda z: (Ωm * (1+z)**3 + ΩΛ + Ωk * (1+z)**2)**(-1/2) return f def d_C(Ωm, ΩΛ): ## Defines and evaluates the dc function and returns the value Ωk = 1 - Ωm - ΩΛ return integrate.quad(f1(Ωm, ΩΛ), 0 , 2)[0] def partFunc(Ωm, ΩΛ): Ωk = 1 - Ωm - ΩΛ display(d_C(Ωm,ΩΛ)) ### Need to define H with correct units d_H = (const.c/cosmo.H(0)).value if Ωk > 0: d_L = lambda z: (1+z) * d_H * (Ωk)**(-1/2) * np.sinh((Ωk**(1/2)) * d_C(Ωm, ΩΛ)) elif Ωk == 0: d_L = lambda z: (1+z) * d_H * d_C(Ωm, ΩΛ) elif Ωk < 0: d_L = lambda z: (1+z) * d_H * np.absolute(Ωk**(-1/2))*np.sin(np.absolute(Ωk**(1/2))*d_C(Ωm, Ωk)) return d_L def hPlot(Ωm, ΩΛ): z = np.linspace(0, 2, 100) display(z) f = partFunc(Ωm, ΩΛ) display(f) plt.clf() plt.plot(z, f(z), 'r' ) plt.show() hPlot(0,1) #print((const.c/cosmo.H(0)).value)_Lambda School Data Science, Unit 2_ Sprint Challenge: Predict Steph Curry's shots 🏀For your Sprint Challenge, you'll use a dataset with all Steph Curry's NBA field goal attempts. (Regular season and playoff games, from October 28, 2009, through June 5, 2019.) You'll predict whether each shot was made, using information about the shot and the game. This is hard to predict! Try to get above 60% accuracy. The dataset was collected with the [nba_api](https://github.com/swar/nba_api) Python library.import sys in_colab = 'google.colab' in sys.modules if in_colab: # Install packages in Colab !pip install category_encoders==2.0.0 !pip install pandas-profiling==2.3.0 !pip install plotly==4.1.1 import pandas as pd # Read data url = 'https://drive.google.com/uc?export=download&id=1fL7KPyxgGYfQDsuJoBWHIWwCAf-HTFpX' df = pd.read_csv('https://drive.google.com/uc?export=download&id=1fL7KPyxgGYfQDsuJoBWHIWwCAf-HTFpX') # Check data shape assert df.shape == (13958, 20) df.head(5)To demonstrate mastery on your Sprint Challenge, do all the required, numbered instructions in this notebook.To earn a score of "3", also do all the stretch goals.You are permitted and encouraged to do as much data exploration as you want.**1. Begin with baselines for classification.** Your target to predict is `shot_made_flag`. What is your baseline accuracy, if you guessed the majority class for every prediction?**2. Hold out your test set.** Use the 2018-19 season to test. NBA seasons begin in October and end in June. You'll know you've split the data correctly when your test set has 1,709 observations.**3. Engineer new feature.** Engineer at least **1** new feature, from this list, or your own idea.- **Homecourt Advantage**: Is the home team (`htm`) the Golden State Warriors (`GSW`) ?- **Opponent**: Who is the other team playing the Golden State Warriors?- **Seconds remaining in the period**: Combine minutes remaining with seconds remaining, to get the total number of seconds remaining in the period.- **Seconds remaining in the game**: Combine period, and seconds remaining in the period, to get the total number of seconds remaining in the game. A basketball game has 4 periods, each 12 minutes long.- **Made previous shot**: Was 's previous shot successful?**4. Decide how to validate** your model. Choose one of the following options. Any of these options are good. You are not graded on which you choose.- **Train/validate/test split: train on the 2009-10 season through 2016-17 season, validate with the 2017-18 season.** You'll know you've split the data correctly when your train set has 11,081 observations, and your validation set has 1,168 observations.- **Train/validate/test split: random 80/20%** train/validate split.- **Cross-validation** with independent test set. You may use any scikit-learn cross-validation method.**5.** Use a scikit-learn **pipeline** to **encode categoricals** and fit a **Decision Tree** or **Random Forest** model.**6.** Get your model's **validation accuracy.** (Multiple times if you try multiple iterations.) **7.** Get your model's **test accuracy.** (One time, at the end.)**8.** Given a **confusion matrix** for a hypothetical binary classification model, **calculate accuracy, precision, and recall.** Stretch Goals- Engineer 4+ new features total, either from the list above, or your own ideas.- Make 2+ visualizations to explore relationships between features and target.- Optimize 3+ hyperparameters by trying 10+ "candidates" (possible combinations of hyperparameters). You can use `RandomizedSearchCV` or do it manually.- Get and plot your model's feature importances. 1. Begin with baselines for classification. >Your target to predict is `shot_made_flag`. What would your baseline accuracy be, if you guessed the majority class for every prediction?y_train = df['shot_made_flag'] y_train.value_counts(normalize=True) majority_class = y_train.mode()[0] y_pred = [majority_class] * len(y_train) print(len(y_pred)) #Accuracy of majority class baseline = frequency of the majority class from sklearn.metrics import accuracy_score accuracy_score(y_train, y_pred)2. Hold out your test set.>Use the 2018-19 season to test. NBA seasons begin in October and end in June. You'll know you've split the data correctly when your test set has 1,709 observations.# Convert to datetime and look at the date range df['game_date'] = pd.to_datetime(df['game_date'], infer_datetime_format=True) cutoff = pd.to_datetime('2018-06-30') train = df[df.game_date < cutoff] test = df[df.game_date >= cutoff] train.shape, test.shape3. Engineer new feature.>Engineer at least **1** new feature, from this list, or your own idea.>>- **Homecourt Advantage**: Is the home team (`htm`) the Golden State Warriors (`GSW`) ?>- **Opponent**: Who is the other team playing the Golden State Warriors?>- **Seconds remaining in the period**: Combine minutes remaining with seconds remaining, to get the total number of seconds remaining in the period.>- **Seconds remaining in the game**: Combine period, and seconds remaining in the period, to get the total number of seconds remaining in the game. A basketball game has 4 periods, each 12 minutes long.>- **Made previous shot**: Was 's previous shot successful?df.head() df['new_seconds'] = (df.period)*720 + df.seconds_remaining df.head()**4. Decide how to validate** your model. >Choose one of the following options. Any of these options are good. You are not graded on which you choose.>>- **Train/validate/test split: train on the 2009-10 season through 2016-17 season, validate with the 2017-18 season.** You'll know you've split the data correctly when your train set has 11,081 observations, and your validation set has 1,168 observations.>- **Train/validate/test split: random 80/20%** train/validate split.>- **Cross-validation** with independent test set. You may use any scikit-learn cross-validation method.from sklearn.model_selection import train_test_split X = df.drop(['shot_made_flag', 'game_date'], axis=1) Y = df.shot_made_flag X_train = X y_train = df.shot_made_flag X_train.shape, y_train.shape X_train, X_val, y_train, y_val = train_test_split( X_train, y_train, train_size = 0.80, test_size = 0.20, stratify = y_train, random_state=42 ) X_train.shape, X_val.shape, y_train.shape, y_val.shape5. Use a scikit-learn pipeline to encode categoricals and fit a Decision Tree or Random Forest model.from sklearn.tree import DecisionTreeClassifier dt = make_pipeline( ce.OneHotEncoder(use_cat_names=True), SimpleImputer(), DecisionTreeClassifier(random_state=42) ) dt.fit(X_train, y_train) score = dt.score(X_val, y_val) print('Decision Tree, Validation Accuracy', score) import category_encoders as ce from sklearn.impute import SimpleImputer from sklearn.ensemble import RandomForestClassifier from sklearn.pipeline import make_pipeline pipeline = make_pipeline( ce.OneHotEncoder(use_cat_names='True'), SimpleImputer(strategy='median'), RandomForestClassifier(n_estimators=60, random_state=42, n_jobs=-1) ) pipeline.fit(X_train, y_train) print ('Validation Accuracy', pipeline.score(X_val, y_val)) y_pred = pipeline.predict(X_val) y_pred6.Get your model's validation accuracy> (Multiple times if you try multiple iterations.)from sklearn.metrics import accuracy_score model = dt.named_steps['decisiontreeclassifier'] y_pred = pipeline.predict(X_val) accuracy_score(y_val, y_pred)7. Get your model's test accuracy> (One time, at the end.)]8. Given a confusion matrix, calculate accuracy, precision, and recall.Imagine this is the confusion matrix for a binary classification model. Use the confusion matrix to calculate the model's accuracy, precision, and recall. Predicted Negative Positive Actual Negative 85 58 Positive 8 36 Calculate accuracycorrect_pred = 85 + 36 total_pred = 85 + 58 + 8 + 36 accuracy = correct_pred / total_pred print(accuracy)0.6470588235294118Calculate precisioncorrect_pred = 36 total_pred = 58 + 36 precision = correct_pred / total_pred print(precision)0.3829787234042553Calculate recallcorrect_pred = 36 actual = 8 + 36 recall = correct_pred / actual print(recall)Calculate downhole xyz from Dist,Azimuth,Inclination using minimum curvatureThe Minimum Curvature Method smooths two straight-line segments of the Balanced Tangential Method by using the Ratio Factor (RF).Where: MD = Measured Depth between surveys I1 = Inclination (angle) of upper survey in degrees I2 = Inclination (angle) of lower in degrees Az1= Azimuth direction of upper survey Az2 = Azimuth direction of lower survey RF = Ratio Factor ß is the dog leg angle. Thanks to:http://www.drillingformulas.com/minimum-curvature-method/ and https://gis.stackexchange.com/questions/13484/how-to-convert-distance-azimuth-dip-to-xyzfrom math import acos, cos, sin, tan, radians def dia2xyz(X1,Y1,Z1,I1,Az1,Distance1,I2,Az2,Distance2): I1=radians(I1) Az1=radians(Az1) I2=radians(I2) Az2=radians(Az2) MD = Distance2 - Distance1 Beta = acos(cos(I2 - I1) - (sin(I1)*sin(I2)*(1-cos(Az2-Az1)))) if(Beta==0): RF=1 else: RF = 2 / Beta * tan(Beta / 2) dX = MD/2 * (sin(I1)*sin(Az1) + sin(I2)*sin(Az2))*RF dY = MD/2 * (sin(I1)*cos(Az1) + sin(I2)*cos(Az2))*RF dZ = MD/2 * (cos(I1) + cos(I2))*RF X2 = X1 + dX Y2 = Y1 + dY Z2 = Z1 - dZ return(X2,Y2,Z2) X1=Y1=Z1=0 Distance1=3500 # start I1=15 # Inclination start Az1=20 # Azimuth start Distance2=3600 # end I2=25 # Inclination end Az2=45 # Azimuth end X2,Y2,Z2=dia2xyz(X1,Y1,Z1,I1,Az1,Distance1,I2,Az2,Distance2) print('E,N,Z') print(X1,Y1,Z1) print(X2,Y2,Z2) import pandas as pd survey=pd.read_csv('../../drilling_test/a104970_a104970_drilling/Survey_data_file.txt', skiprows=47,sep='\t') survey.drop(survey.index[[0,1]],inplace=True) survey.drop(survey.index[len(survey)-1],inplace=True) survey.reset_index(inplace=True) display(survey.head()) location=pd.read_csv('../../drilling_test/a104970_a104970_drilling/Location_data_file.txt', skiprows=45,sep='\t') location.drop(location.index[[0,1]],inplace=True) location.drop(location.index[len(location)-1],inplace=True) location.reset_index(inplace=True) location.set_index('Hole_ID',inplace=True) display(location.head()) def convert_survey(survey,location): holeid='' for indx,interval in survey.iterrows(): if(interval['Hole_ID'] != holeid): first=True if(first): first=False lasti=float(interval['Dip']) lasta=float(interval['Azimuth_TRUE']) lastd=float(interval['Surveyed Depth']) holeid=interval['Hole_ID'] X1=float(location.loc[holeid]['MGA Easting']) Y1=float(location.loc[holeid]['MGA Northing']) Z1=float(location.loc[holeid]['Elevation']) print() print(holeid,indx+2,X1,Y1,Z1) else: if(indxMWD14001 2 420324.0 7584887.0 314.0 MWD14001 3 420333.1528705034 7584902.75763067 303.5635500581167 MWD14001 4 420338.0193951451 7584911.20382948 298.0483570185162 MWD14001 5 420352.1755308132 7584937.006610235 281.42919671408026 MWD14001 6 420354.625956783 7584941.615200808 278.4700974090309 MWD14001 7 420357.08199202496 7584946.214946074 275.5018950505092 MWD14001 8 420359.54081724666 7584950.810260398 272.52914247053315 MWD14001 9 420362.0156643821 7584955.39695688 269.5563853354977 MWD14001 10 420364.49173617474 7584959.98593733 266.5881778552873 MWD14001 11 420366.9545698436 7584964.579102712 263.6154273539461 MWD14001 12 420369.40971662744 7584969.167537559 260.62904092323237 MWD14001 13 420371.8793472038 7584973.754081886 257.6517361872421 MWD14001 14 420374.37018834933 7584978.332118366 254.67898511651987 MWD14001 15 420376.857308361 7584982.903301377 251.69259868580613 MWD14001 16 420379.3170361326 7584987.471414774 248.67900589726722 MWD14001 17 420381.77444510453 7584992.02[...]https://www.cs.cornell.edu/projects/kddcup/datasets.html!tar xzvf hep-th-2003.tar.gz !ls -hal 2003/ | wc -lload libraries# https://github.com/alvinwan/tex2py !pip install tex2py # https://github.com/alvinwan/TexSoup !pip install texsoup import re import time import glob import matplotlib.pyplot as pltload datalist_of_files = glob.glob('hep-th/2003/*') len(list_of_files)find relevant tags in latexstart_time=time.time() reslts={} number_of_eq=0 for this_file in list_of_files: with open(this_file,'rb') as f: data = f.read() resp = re.findall('\\\\begin{(?:eqnarray|equation|multiline)}.*?end{(?:eqnarray|equation|multiline)}',str(data),re.DOTALL) # print(this_file) for eq in resp: number_of_eq+=1 try: reslts[this_file].append(eq) except KeyError: reslts[this_file]=[] reslts[this_file].append(eq) # print(' ',eq,'\n') print(round(time.time()-start_time,2),'seconds') number_of_eq len(reslts.keys()) reslts[list(reslts.keys())[0]] reslts[list(reslts.keys())[1]][0]visualize resultsnumber_of_eq_per_file=[] for k,v in reslts.items(): number_of_eq_per_file.append(len(v)) _=plt.hist(number_of_eq_per_file,bins=100) _=plt.ylabel('number of expressions') _=plt.xlabel('number of documents') _=plt.hist(number_of_eq_per_file,bins=100) _=plt.yscale('log', nonposy='clip') _=plt.ylabel('number of expressions') _=plt.xlabel('number of documents')parse the Latexoptions: * http://plastex.sourceforge.net/plastex/sect0025.html* tex2py* texsoup# https://github.com/alvinwan/tex2py from tex2py import tex2py list_of_files[1] with open(list_of_files[1]) as f: data = f.read() toc = tex2py(data)toc.valid_tagsfrom TexSoup import TexSoup soup = TexSoup(data) soup.equation lst = list(soup.find_all('equation')) len(lst) # how many equations are in the document? lst[0] # show the first match lst[0][0] # what is the string inside the "begin{equation}"? lst[1] lst[1][0] for this_eq in soup.find_all('equation'): print(this_eq[0])\label{planewave} \begin{split} ds^2 & = 2dx^+dx^--\m^2\vec{x}^2\bigl(dx^+\bigr)^2+d\vec{x}^2\,,\\ F_5 & = 4\m dx^+\wedge\bigl(dx^1\wedge dx^2\wedge dx^3\wedge dx^4+dx^5\wedge dx^6\wedge dx^7\wedge dx^8\bigr)\,. \o_n=\sqrt{n^2+\bigl(\m\a'p^+\bigr)^2}\,,\qquad n\in\Nop\,, \label{dict} \frac{1}{\m}H = \D-J\,,\qquad \frac{1}{\bigl(\m\a'p^+\bigr)^2} = \frac{g^2_{\text{YM}}N}{J^2}\equiv \l'\,,\qquad 4\pi g_{\text{s}}\bigl(\m\a'p^+\bigr)^2 = \frac{J^2}{N}\equiv g_2 N\to\infty\quad\text{and}\quad J\to\infty\quad\text{with}\quad\frac{J^2}{N}\quad \text{fixed}\,,\quad g_{\text{YM}}\quad\text{fixed}\,, S_{\mbox{\scriptsize bos.}(r)}=\frac{e(\a_r)}{4\pi\a'}\int\,d\t\int_0^{2\pi|\a_r|} \,d\s_r\bigl[\dot{x}_r^2-x^{\prime\,2}_r-\m^2x_r^2\bigr]\,, \begin{split} x_r^I(\s_r)& = x_{0(r)}^I+\sqrt{2}\sum_{n=1}^{\infty} \bigl(x_{n(r)}^I\cos\frac{n\s_r}{|\a_r|}+x_{-n(r)}^I\sin\frac{n\s_r}{|\a_r|}\bigr)\,,\\ p_r^I(\s_r) & =\frac{1}{2\pi|\a_r|}\bigl[p_{0(r)}^I+\sqrt{2}\sum_{n=1}^{\infty} \bigl(p_{n(r[...]Factorielledef factorielleIt(n): r = 1 for i in range(1,n+1): r *=i return r factorielleIt(8) def factorielleRec(n): if n == 0: return 1 return n * factorielleRec(n-1) factorielleRec(3) def factorielleAffichage(n): print("Début appel " + str(n)) if n == 0: print ("Fin appel 0") return 1 p = factorielleAffichage(n-1) print("Valeurs prec : " + str(p)) print("Fin appel " + str(n)) return n * p factorielleAffichage(3)Début appel 3 Début appel 2 Début appel 1 Début appel 0 Fin appel 0 Valeurs prec : 1 Fin appel 1 Valeurs prec : 1 Fin appel 2 Valeurs prec : 2 Fin appel 3Syracusedef syr(n): print(n) if n == 1: return if n%2==0: syr(n//2) else: syr(3*n+1) syr(5)5 16 8 4 2 1Hanoidef print_hanoi_actions(n, t1, t2, t3): if n == 0: return print_hanoi_actions(n-1,t1,t3,t2) print("Move from " + t1 + " to " + t2) print_hanoi_actions(n-1,t3,t2,t1) print_hanoi_actions(8,"Tower 1","Tower 2","Tower 3") class Towers: def __init__(self,n): self.towers = (list(range(n,0,-1)),[],[]) self.size = n def move(self,i,j): t1,t2 = self.towers[i], self.towers[j] if len(t1) == 0 or (len(t2) != 0 and t1[-1]>t2[-1]): raise ValueError("Invalid action") print("Move from Tower " + str(i) + " to Tower " + str(j)) t2.append(t1.pop()) def pretty_print(self): res = "" n = self.size for i in range(n): line = "" for t in self.towers: if len(t) > i: v = t[i] else: v = 0 stars = v*"*" spaces = (n-v)*" " line+= spaces + stars + "|" + stars + spaces + " " line+="\n" res = line + res return res T = Towers(3) print(T.pretty_print()) T.move(0,1) print(T.pretty_print()) T.move(0,1) print(T.pretty_print()) def hanoi(n, T, t1, t2, t3): print("Debut appel ",n,t1,t2) if n == 0: print("Fin appel 0") return hanoi(n-1,T,t1,t3,t2) T.move(t1,t2) print(T.pretty_print()) hanoi(n-1,T,t3,t2,t1) print("Fin appel ",n,t1,t2) n = 3 T = Towers(n) print(T.pretty_print()) hanoi(n,T,0,1,2) T = Towers(4) print T.pretty_print() hanoi(4,T,0,1,2)TD Exercice 1def affiche(n,i): if n == i: print(n) print("*") else: print(i) affiche(n,i+1) affiche(5,1)1 2 3 4 5 *Exercice 2def f1(n): if n==0: return 1 return f1(n+1) f1(-6) def f2(n): if n==0: return 0 return f2(n-1) + n f2(5) def f3(n): if n==0: return 0 return f3(n-1) - n f3(5) def f4(n): if n==0: return 0 if n<0: return n + f4(-n) return n + f4(-n+1) f4(-5) def f5(n): if n<=1: return 0 return 1 + f5(n-2) f5(12) sum(range(101)) def fiboRec(n): if n == 0 or n == 1: return 1 return fiboRec(n-1) + fiboRec(n-2) fiboRec(40) def fiboIt(n): u0 = 1 u1 = 1 for i in range(2,n+1): u1, u0 = u0 + u1, u1 return u1 fiboIt(12000) def pgcd(a,b): if a < b: return pgcd(b,a) if a%b == 0: return b return pgcd(b,a%b) pgcd(2145,630) pgcd(7,15) def dicho(T, deb, fin, v): if fin - deb <= 0: return -1 m = (deb+fin)//2 if T[m] == v: return m if v < T[m]: return dicho(T,deb,m,v) return dicho(T,m+1,fin,v) dicho([2,4,6,9,12],0,5,5) def fiboC(n): if n == 0 or n == 1: return 1,1 f0,c0 = fiboC(n-1) f1,c1 = fiboC(n-2) return f0+f1, c0+c1+1 fiboC(7) def test(n): if n == 0: return 1 return 1 + test(n//2) + test(n//2) test(8) def puiss1(a,b): if b == 0: return 1 return a*puiss1(a,b-1) puiss1(3,4) 3**4 def puiss2(a,b): if b == 0: return 1 if b%2 == 1: return a * puiss2(a,b-1) c = puiss2(a, b//2) return c*c puiss2(3,4)EvaluationEvaluate model prediction for an entire play.import numpy as np import pandas as pd import matplotlib.pyplot as plt import sys, os sys.path.append('/home/ec2-user/SageMaker/helmet_detection/src') from helmet_detection_model.detector import ObjectDetector video_in = '/home/ec2-user/SageMaker/helmet_detection/input/train/57583_000082_Endzone.mp4' model_path = '/home/ec2-user/SageMaker/helmet_detection/model/model_helmet_frcnn.pt' gtfile_name = '/home/ec2-user/SageMaker/helmet_detection/input/train_labels.csv' full_video = True subset_video = 4 conf_thres=0.9 iou_threshold = 0.25 num_classes = 2 # %%time # detections, eval_det, fns, fps = ObjectDetector.run_detection_eval_video(video_in, gtfile_name, # model_path, full_video, # subset_video, conf_thres, # iou_threshold) # eval_det.describe()Draw detection errors on frames# eval_det.to_csv("/home/ec2-user/SageMaker/helmet_detection/output/eval_det.csv", index=False) # fns.to_csv("/home/ec2-user/SageMaker/helmet_detection/output/fns.csv", index=False) # fps.to_csv("/home/ec2-user/SageMaker/helmet_detection/output/fps.csv", index=False) eval_det = pd.read_csv("/home/ec2-user/SageMaker/helmet_detection/output/eval_det.csv") fns = pd.read_csv("/home/ec2-user/SageMaker/helmet_detection/output/fns.csv") fps = pd.read_csv("/home/ec2-user/SageMaker/helmet_detection/output/fps.csv") fn_thres = 3 fp_thres = 3 # # list of frames with fn>=fn_thres and fp>=fp_thres frame_list = eval_det[(eval_det['fn'] >= fn_thres) & (eval_det['fp'] >= fp_thres)]['frame_id'].tolist() ## frame_list = ObjectDetector.find_frames_high_fn_fp(eval_det, fn_thres, fp_thres) # # list of frames with no fn and fp # frame_list = eval_det[(eval_det['fn'] == 0) & (eval_det['fp'] == 0)]['frame_id'].tolist() # list of frames with more than 5 fn # frame_list = eval_det[(eval_det['fn'] > 5)]['frame_id'].tolist() print(frame_list) fns.shape !rm /home/ec2-user/SageMaker/helmet_detection/output/out_images/* success = ObjectDetector.draw_detect_error(video_in, gtfile_name, full_video, subset_video, frame_list, fns, fps) successGet %frame with no fn and fp, fn=1, fn between 2 and 5, and fn more than 5df_good = eval_det[(eval_det['fn'] == 0) & (eval_det['fp'] == 0)] print(df_good.shape) print(100*(df_good.shape[0]/eval_det.shape[0])) df_fn_1 = eval_det[(eval_det['fn'] == 1)] print(df_fn_1.shape) print(100*(df_fn_1.shape[0]/eval_det.shape[0])) df_fn_2_5 = eval_det[(eval_det['fn'] >= 2) & (eval_det['fn'] <= 5)] print(df_fn_2_5.shape) print(100*(df_fn_2_5.shape[0]/eval_det.shape[0])) df_fn_5 = eval_det[(eval_det['fn'] > 5)] print(df_fn_5.shape) print(100*(df_fn_5.shape[0]/eval_det.shape[0])) df_fn_5 eval_det["precision"] = eval_det.apply(lambda row: row.tp/(row.tp + row.fp), axis=1) eval_det["recall"] = eval_det.apply(lambda row: row.tp/(row.tp + row.fn), axis=1) eval_det["f1_score"] = eval_det.apply(lambda row: (2 * row.precision * row.recall)/(row.precision + row.recall), axis=1) eval_det.head() # Calculate total number of helmets, tp, fn, fp, precision, recall, and F1 score total_gt = eval_det['num_object_gt'].sum() total_tp = eval_det['tp'].sum() total_fn = eval_det['fn'].sum() total_fp = eval_det['fp'].sum() total_precision = total_tp/(total_tp+total_fp) total_recall = total_tp/(total_tp+total_fn) total_f1 = 2*total_precision*total_recall/(total_precision+total_recall) total_gt, total_tp, total_fn, total_fp, total_precision, total_recall, total_f1Plot R/R and FN/FP for each framesnap_time=10 fig, ax1 = plt.subplots() ax1.set_xlabel('Frame ID') ax1.set_ylabel('Precision / Recall', color='g') ax1.plot(eval_det['frame_id'], eval_det['precision'], color='r') ax1.plot(eval_det['frame_id'], eval_det['recall'], color='g') ax1.set_ylim([0, 1]) plt.legend(['precision', 'recall'], loc='upper left') ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis ax2.set_ylabel('FN / FP', color='b') # we already handled the x-label with ax1 ax2.plot(eval_det['frame_id'], eval_det['fn'], color='c') ax2.plot(eval_det['frame_id'], eval_det['fp'], color='b') ax2.set_ylim([0, 22]) ax2.plot(eval_det['frame_id'], eval_det['num_object_gt'], color='k') ax2.tick_params(axis='y', labelcolor='b') plt.legend(['FN', 'FP', 'Total Helmets'], loc='lower right') fig.tight_layout() # otherwise the right y-label is slightly clipped plt.axvline(x=snap_time, color='k', linestyle='--') # plt.show() plt.savefig('/home/ec2-user/SageMaker/0Artifact/helmet_detection/output/pr_fnfp.png')Plot F1 score and FN/FP for each framefig, ax1 = plt.subplots() ax1.set_xlabel('Frame ID') ax1.set_ylabel('F1 score', color='g') ax1.plot(eval_det['frame_id'], eval_det['f1_score'], color='r') ax1.set_ylim([0, 1]) plt.legend(['F1 score'], loc='upper left') # ax1.tick_params(axis='y', labelcolor=color) ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis ax2.set_ylabel('FN / FP', color='b') # we already handled the x-label with ax1 ax2.plot(eval_det['frame_id'], eval_det['fn'], color='c') ax2.plot(eval_det['frame_id'], eval_det['fp'], color='b') ax2.set_ylim([0, 22]) ax2.plot(eval_det['frame_id'], eval_det['num_object_gt'], color='k') ax2.tick_params(axis='y', labelcolor='b') plt.legend(['FN', 'FP', 'Total Helmets'], loc='lower right') fig.tight_layout() # otherwise the right y-label is slightly clipped plt.axvline(x=snap_time, color='k', linestyle='--')# plt.show() plt.savefig('/home/ec2-user/SageMaker/0Artifact/helmet_detection/output/f1_fnfp.png')Plot stacked bar for tp, fn and fp for each frame# pal = ["#9b59b6", "#e74c3c", "#34495e", "#2ecc71"] pal = ["g","r","b"] plt.figure(figsize=(12,8)) plt.stackplot(eval_det['frame_id'], eval_det['tp'], eval_det['fn'], eval_det['fp'], labels=['TP','FN','FP'], colors=pal) plt.plot(eval_det['frame_id'], eval_det['num_object_gt'], color='k', linewidth=6, label='Total Helmets') plt.legend(loc='best', fontsize=12) plt.xlabel('Frame ID', fontsize=12) plt.ylabel(' # of TPs, FNs, FPs', fontsize=12) plt.axvline(x=snap_time, color='k', linestyle='--') plt.savefig('/home/ec2-user/SageMaker/0Artifact/helmet_detection/output/stacked.png') detections = ObjectDetector.run_detection_video(video_in, model_path, full_video,subset_video, conf_thres) vid_title = "/home/ec2-user/SageMaker/helmet_detection/input/" + os.path.splitext(os.path.basename(video_in))[0] + '.csv' print(vid_title) detections.to_csv(vid_title, index=None) !ls /home/ec2-user/SageMaker/0Artifact/helmet_detection/input/train_labels.csv # !mkdir src/helmet_detection_metric/detections # !mkdir src/helmet_detection_metric/groundtruths # !mkdir src/helmet_detection_metric/results !python src/helmet_detection_metric/object_detection_metrics.py '/home/ec2-user/SageMaker/0Artifact/helmet_detection/input/train/57583_000082_Endzone.mp4' True 0 4000 !python src/helmet_detection_metric/pascalvoc.pyCheck botscore of friends/followers of botsusers_to_check_botscore = db_helper.getDataframeFromQuery( db_helper.connect_db(), """ select c.t_usr_id_ego, c.t_usr_id_conn, c.conn_type from bot b, connections c where b.twitter_user_id = c.t_usr_id_ego ;""" ) users_to_check_botscore.drop_duplicates(inplace=True) twitter_id_alignment_dict = { "": "Left", "": "Left", # ... "": "C.Left", "": "C.Left", # ... "": "Right", "": "Right", } users_to_check_botscore["seed"] = users_to_check_botscore.t_usr_id_ego.apply( twitter_id_alignment_dict.get ) users_to_check_botscore.conn_type = users_to_check_botscore.conn_type.apply( lambda x: "follower" if x else "friend" )saving bot scores to json files for later processingtw_keys = { "consumer_key": "", "consumer_secret": "", "access_token": "", "access_token_secret": "", "mashape_key": "", } check_botscore = RetrieveBotscore(tw_keys) check_botscore.check_botometer_scores(users_to_check_botscore.t_usr_id_conn.unique())Reading bot scores from json filejson_file = "" botscore_measurements = check_botscore.loadJson(json_file)[[ "user_id_str","user_screen_name","scores_english" ]] botscore_measurements = botscore_measurements.join( pd.DataFrame(users_to_check_botscore,columns=["user_id_str"]).set_index("user_id_str"), on="user_id_str", how="outer" ) botscore_measurements = botscore_measurements[~botscore_measurements.user_id_str.isna()] botscore_measurements.set_index("user_id_str", inplace=True) botscore_dict = botscore_measurements.scores_english.to_dict() connections_overall_botscores = users_to_check_botscore.groupby( ["conn_type","seed"] ).apply( lambda x: pd.Series([ botscore_dict.get(str(user_id)) for user_id in pd.Series(np.concatenate(x.t_usr_id_conn.values)).unique() ]).dropna().to_list() ) connections_overall_botscores.to_csv("../data/connections_seeds_unique_botscores.csv")My First Machine Learning Program IntroductionThere is no better way to learn Machine Learning than to experiment with it! In this notebook you are step by step guided to do this. Challenge yourself in trying to understand the code and try to change the parameters. First things first, we need a few libraries.import numpy as np import pandas as pd import sklearn as sk import matplotlib import matplotlib.pyplot as plt from IPython.display import display print('numpy version:', np.__version__) print('pandas version:', pd.__version__) print('scikit-learn version:', sk.__version__) print('matplotlib version:', matplotlib.__version__) %matplotlib inlinenumpy version: 1.19.2 pandas version: 1.1.3 scikit-learn version: 0.23.2 matplotlib version: 3.3.2The algorithm you are going to use is known as k-NN (k-Nearest Neighbors) classification. DIYIn Canvas more information is provided about kNN. Read through it and write down how you think the learning works. The supervised learning classification steps: overviewThis notebook implements all specific tasks in applying supervised machine learning techniques:* Preparing the data, * Analysing and visualising the data, * Cleaning the data, * Selecting features, * Dividing your data into a training and test set, * Training your Machine learning algorithm, * Applying the machine learning algorithm * and Evaluating its results.You will learn more on how to do each task in the weeks to come. For now, try to read the code and experiment with it. Step 1 Preparing the dataThe first step is to get the data. There are a lot of prepared datasets available on the internet. We've selected a popular dataset for you that is suitable for a classification. * Iris dataset (150 entries of 4 input features and 1 output label) DIYTake a look at the website https://archive.ics.uci.edu/ml/datasets.html and find these datasets there. What information or metadata is available? List resources that explain the content of these dataset and that used these datasets to test learning algorithmns. Downloadingdf_iris = pd.read_csv("http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data")Exploring the iris datasetdf_iris.columns = ['Sepal Length', 'Sepal Width', 'Petal Length', 'Petal Width', 'Species'] print('Iris dataset shape: {}'.format(df_iris.shape)) df_iris.head(5)Iris dataset shape: (149, 5)Analysing and visualising the data & cleaning the dataIn this process the features should be stored in a 2D NxM array (matrix) variable separate from the label which should be stored in a 1D array (or vector) of size N. N ( rows) represents the number of data points or entries, M ( columns) represents the number of features. If necessary, textual information should be removed or transformed into numerical data. DIYTake a look at the topics we'll discuss next weeks. When will we take about visualisation? When will we discuss data cleaning? Iris datasetdf_iris['Species-id'] = df_iris['Species'].map({'Iris-setosa': 0, 'Iris-versicolor': 1, 'Iris-virginica': 2}) df_iris.head(5) iris_features = tuple(df_iris.columns[:4].values) df_iris.boxplot(column=iris_features, by='Species-id', figsize=(15,8), layout=(1,4));Selecting features (and scaling them)We will scale each feature (linearly) between 0 and 10.from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from sklearn.svm import SVC from sklearn import metricsIris dataset - step 3# Define X_iris and y_iris X_iris = df_iris[['Petal Length', 'Petal Width']] y_iris = df_iris['Species-id'] print('Iris types:', df_iris['Species-id'].unique()) # Normalize scaler_iris = StandardScaler().fit(X_iris) X_iris = scaler_iris.transform(X_iris) print('The length of X_iris: {}'.format(len(X_iris))) plt.scatter(X_iris[:,0], X_iris[:,1], edgecolors='k', c=y_iris)Iris types: [0 1 2] The length of X_iris: 149Dividing your data into a training and test setLet’s use the symbol X for the scaled 2D input feature array, and the symbol y for the 1D output label vector.We will split the data into a train set and test set. Iris dataset# Split in train and test sets #X_train_iris, X_test_iris, y_train_iris, y_test_iris = train_test_split(X_iris, y_iris, test_size=0.25, stratify=y_iris) X_train_iris, X_test_iris, y_train_iris, y_test_iris = train_test_split(X_iris, y_iris, test_size=0.25) print('Train shape:', X_train_iris.shape, y_train_iris.shape) print('Test shape:', X_test_iris.shape, y_test_iris.shape) plt.scatter(X_train_iris[:,0], X_train_iris[:,1], edgecolors='k', c=y_train_iris)Train shape: (111, 2) (111,) Test shape: (38, 2) (38,)Training your Machine learning algorithm![image.png](attachment:image.png)Figure 1: A data point is classified by majority votes from its 5 nearest neighbors. Here, the unknown point would be classified as red, since 4 out of 5 neighbors are red. The library sklearn contains an implementation of the NearestNeighbors algorithm:from sklearn.neighbors import NearestNeighbors from sklearn.neighbors import KNeighborsClassifier from matplotlib.colors import ListedColormapWe create a function that takes the features and labels of the data as parameters, as the number of neighbors. We next predict the class for all possible values in the two dimensions. These results are plotted, visualising the decision boundaries between predicted classes.def plot_nearest_neighbors(X_train, X_test, Y_train, Y_test, k, classlabels, featurelabels, weight): print('Number of training points: ',X_train.size) print('Number of training points: ',X_train.shape) possible_classes = Y_train.unique() nbrs = NearestNeighbors(n_neighbors=k, algorithm='ball_tree').fit(X_train) distances, indices = nbrs.kneighbors(X_test) nb_of_classes = classlabels.unique().size h = .02 # step size in the mesh ## Create color maps cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF']) cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF']) ## we create an instance of Neighbours Classifier and fit the data. clf_data = KNeighborsClassifier(k, weights=weight) clf_data.fit(X_train, Y_train) ## Plot the decision boundary. For that, we will assign a color to each ## possible point in the mesh [x_min, x_max]x[y_min, y_max]. x_min, x_max = X_train[:, 0].min() - 1, X_train[:, 0].max() + 1 y_min, y_max = X_train[:, 1].min() - 1, X_train[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) Z_data = clf_data.predict(np.c_[xx.ravel(), yy.ravel()]) ## Put the result into a color plot Z_data = Z_data.reshape(xx.shape) plt.figure() plt.pcolormesh(xx, yy, Z_data, cmap=cmap_light, shading='auto') ## Plot also the training points #print(Y_train) #print(X_train[0]) for i in possible_classes: x1s_data = X_train[:, 0][Y_train.values == i] x2s_data = X_train[:, 1][Y_train.values == i] plt.scatter(x1s_data, x2s_data, cmap=cmap_bold, edgecolors='k', label=i) plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.title("%i-Class classification (k = %i, weights = '%s')" % (nb_of_classes, k, weight)) plt.xlabel(featurelabels[0]) plt.ylabel(featurelabels[1]) plt.legend() plt.show() return clf_dataPlotting the decision boundary for the Iris datasetclf_iris = plot_nearest_neighbors(X_train_iris, X_test_iris, y_train_iris, y_test_iris, 5, df_iris.Species, ['Petal Length', 'Petal Width'], 'distance')Number of training points: 222 Number of training points: (111, 2)Step 6 - Result of learning: accuracyThe accuracy of your KNN algorithm is based on the correct predicted labels of the hold-out test set. Iris dataset#Evaluate performance (with test set) print('Number of test points: ',X_test_iris.size) y_pred_iris = clf_iris.predict(X_test_iris)Number of test points: 76Accuracy score for the Iris Dataset:metrics.accuracy_score(y_test_iris, y_pred_iris) from sklearn.metrics import classification_report target_names = ['class 0', 'class 1', 'class 2'] print(classification_report(y_test_iris, y_pred_iris, target_names=target_names)) # Plot non-normalized confusion matrix from sklearn.metrics import plot_confusion_matrix titles_options = [("Confusion matrix, without normalization", None), ("Normalized confusion matrix", 'true')] for title, normalize in titles_options: disp = plot_confusion_matrix(clf_iris, X_test_iris, y_test_iris, display_labels=target_names, cmap=plt.cm.Blues, normalize=normalize) disp.ax_.set_title(title) print(title) print(disp.confusion_matrix) plt.show()Confusion matrix, without normalization [[16 0 0] [ 0 11 0] [ 0 3 8]] Normalized confusion matrix [[1. 0. 0. ] [0. 1. 0. ] [0. 0.27272727 0.72727273]]SNR estimation 1. Main construction SetupLet $x(t)$ be source neuronal signal for $t \in [0, T]$Let $k(t)$ be the response function of calcium indicator. Then the source calcium signal$$y(t) = \int_{0}^{\infty} k(\tau) x(t - \tau) d\tau$$The signal is integrated over fixed time intervals $\Delta t$ upon collection. It also experiences i.i.d white noise $\nu_i \sim \mathcal{N}(0, \sigma^2)$ with unknown variance.$$y_i = \int_{i\Delta t}^{{(i+1)\Delta t}} y(t) + \nu_i = y^{src}_i + \nu_i$$ SNR detection via autocorrelationIf kernel timescale $t_k = STD(k)$ is sufficiently slow compared to the sampling timescale $\Delta t$, we can assume that the autocorrelation of the source signal will be much more smooth than that of white noise.Autocorrelation of steps $i$ and $j$ is defined as$$AC_{ij} = \frac{\langle (y_i - \mu_i)(y_j - \mu_j) \rangle}{\sigma_i \sigma_j}$$where$$\mu_i = \langle y_i \rangle = \langle y^{src}_i + \nu_i \rangle = \mu_{src}$$and$$\sigma_i^2 = \langle (y_i - \mu_i)^2 \rangle = \langle (y^{src}_i - \mu_i)^2 + 2(y_i - \mu_i)\nu_i + \nu_i^2 \rangle = \sigma_{src}^2 + \sigma_{\nu}^2$$where the interference terms are zero because white noise is assumed to be uncorrelated with the signal. Thus we can rewrite the autocorrelation as$$AC_{ij} = \frac{\langle (y^{src}_i - \mu_i)(y^{src}_j - \mu_j) + (y^{src}_i - \mu_i)\nu_j + \nu_i(y^{src}_j - \mu_j) + \nu_i \nu_j\rangle}{\sigma_{src}^2 + \sigma_{\nu}^2} = \frac{\sigma_{src}^2 \rho^{src}_{ij}}{\sigma_{src}^2 + \sigma_{\nu}^2} = \frac{\rho^{src}_{ij}}{1 + SNR}$$where the last 3 terms are zero because white noise is uncorrelated with the signal and also is uncorrelated with itself. Thus, autocorrelation of the observed signal is proportional to autocorrelation of the raw signal and inverse-proportional to $(1 + SNR)$.Finally, as a first order approximation we will assume time-invariance of the source signal, meaning that, in a random trial, mean and variance of the signal is the same for all timesteps. Hence,$$\mu^{src}_i = \mu^{src} \forall i \; ;\;\; \sigma^{src}_i = \sigma^{src} \forall i$$and$$AC_{ij} = AC_{k, k+(j-i)} = AC_k \;\; \forall i,j,k$$ Estimation of $AC_k$In order to compute the $k$-step autocorrelation, we replace all expectation values with averages over timesteps and trials simultaneously. Note that there is no need for bias correction, because the bias multiplier is the same for the numerator and the denominator. Estimation of SNRSNR can be estimated from a single step autocorrelation1. Make a guess for $k(t)$2. Simulate expected value for $AC_k$3. Calculate expected value for SNR 2. Inequality constraintsAbove is an approximate relationship of sample and source autocorrelation and SNR. In order for this estimator to be practically useful, additional work is required:* Construct lower bound for $\rho_k$ given kernel STD. Hence construct lower bound on SNR* Estimate effects of finite sample size on $AC_{ij}$ expansion. Derive first order correcton $O(1/n)$* Estimate effects of changing trial 3. Extension* Can values of $AC_k$ for multiple $k$ be combined to better estimate SNR?* Can one estimate kernel timescale given its shape?import numpy as np import matplotlib.pyplot as plt def expDistr(t, tau): return np.exp(-t/tau) / tau def autocorr(x, norm=False): ac = np.correlate(x, x, mode='full')[len(x)-1:] if norm: return ac / ac[0] else: return ac T = 10 # s, simulation time dt = 0.001 # s tau = 0.4 # s, exp kernel timescale nData = int(T / dt) nKer = int(5 * tau / dt) tRaw = np.arange(nData) * dt xRaw = np.random.normal(0, 1, nData) tKer = np.arange(nKer) * dt k = expDistr(tRaw, tau)[:nKer] k /= np.sum(k) acRaw = autocorr(xRaw, norm=True) yDirect = np.convolve(k, xRaw, mode='full')[:nData] acDirect = autocorr(yDirect, norm=True) kInv = np.fft.ifft(1/np.fft.fft(k)) yInv = np.convolve(kInv, xRaw, mode='full')[:nData] acInv = autocorr(yInv, norm=True) kComposite = np.convolve(k, kInv, mode='full')[:nKer] yIndirect = np.convolve(k, yInv, mode='full')[:nData] acIndirect = autocorr(yIndirect, norm=True) fig, ax = plt.subplots(nrows=4, ncols=3, figsize=(12, 8), tight_layout=True) ax[0,1].set_title('Raw signal') ax[0,1].plot(tRaw, xRaw) ax[1,0].set_title('Kernel') ax[1,0].plot(tKer, k) ax[0,2].set_title('Autocorr of raw') ax[0,2].plot(tRaw, acRaw) ax[1,1].set_title('Exp conv to Raw') ax[1,1].plot(tRaw, yDirect) ax[1,2].set_title('Autocorr of ExpConv') ax[1,2].plot(tRaw, acDirect) ax[2,0].set_title('Inverse Kernel') ax[2,0].plot(tKer[:20], kInv[:20]) ax[2,1].set_title('Raw conv with Inverse') ax[2,1].plot(tRaw[:20], yInv[:20]) ax[2,2].set_title('Autocorr of InvConv') ax[2,2].plot(tRaw[:20], acInv[:20]) ax[3,0].set_title('Composite Kernel') ax[3,0].plot(tKer[:20], kComposite[:20]) ax[3,1].set_title('Inverse conv with Exp') ax[3,1].plot(tRaw, yIndirect) ax[3,2].set_title('Autocorr of InvConv') ax[3,2].plot(tRaw, acIndirect) plt.show() spikes = np.mean(np.random.uniform(0, 1, (1000, 200)) < 0.01, axis=1) acSpikes = autocorr(spikes, norm=True) fig, ax = plt.subplots(ncols=2, figsize=(8, 4)) ax[0].plot(spikes) ax[1].plot(acSpikes) plt.show()[![Notebook Tutorial](__code/__all/notebook_tutorial.png)](https://neutronimaging.pages.ornl.gov/tutorial/notebooks/bragg_edge_peak_fitting) Select Your IPTSfrom __code import system from __code.bragg_edge.bragg_edge_peak_fitting_evaluation import BraggEdge, Interface system.System.select_working_dir(facility='SNS', instrument='VENUS') from __code.__all import custom_style custom_style.style() from plotly.offline import plot, init_notebook_mode, iplot init_notebook_mode()Prepare UI engine%gui qtSelect Normalized Data Input FolderData and time spectra files will be loadedo_bragg = BraggEdge(working_dir=system.System.get_working_dir()) o_bragg.select_working_folder()Select Open Beam Data Input foldero_bragg.select_ob_folder()Select sample region and peak to fit Select how many random files to use to select region to fito_bragg.how_many_data_to_use_to_select_sample_roi()fit uio_interface = Interface(o_bragg=o_bragg, spectra_file=o_bragg.spectra_file) o_interface.show()DEBUGGINGfrom __code import system from __code.bragg_edge.peak_fitting_evaluation.bragg_edge_peak_fitting import BraggEdge, Interface %gui qt import os # small data set # data_path = "/Volumes/G-DRIVE/IPTS/VENUS/shared/testing_normalized/" # spectra_file = os.path.join(data_path, "Image019_Spectra.txt") # full data set #data_path = "/Volumes/G-DRIVE/IPTS/SNAP/Si_normalized/Si_powder_1_Angs_20C_corrected_normalized" #spectra_file = os.path.join(data_path, "normalized_Spectra.txt") data_path = "/Volumes/G-DRIVE/IPTS/IPTS-26171-testing_ibeatles/10_InconelPowder_1.5Hrs_Corrected_normalized/" spectra_file = "/Volumes/G-DRIVE/IPTS/IPTS-26171-testing_ibeatles/10_InconelPowder_1.5Hrs_Corrected_normalized/20210910_Run_52256_InconelPowder_0008_0646026_Spectra.txt" import glob list_data = glob.glob(data_path + "*.tif") assert os.path.exists(spectra_file) o_bragg = BraggEdge(working_dir=data_path) o_bragg.load_data(data_path) o_interface = Interface(o_bragg=o_bragg, working_dir=data_path, spectra_file=spectra_file) o_interface.show()DEBUGGING using import straightfrom __code import system from __code.bragg_edge.peak_fitting_evaluation.bragg_edge_peak_fitting import BraggEdge, Interface %gui qt data_path = "/Volumes/G-Drive/IPTS/SNAP/Si_normalized/Si_powder_1_Angs_20C_corrected_normalized" o_interface = Interface(working_dir=data_path) o_interface.show()BackgammonQuelle: https://github.com/awni/backgammonErweitert um das Extrahieren von 192 Features für TD-Gammonimport random import time import numpy as np class Game: LAYOUT = "0-2-o,5-5-x,7-3-x,11-5-o,12-5-x,16-3-o,18-5-o,23-2-x" NUMCOLS = 24 QUAD = 6 OFF = 'off' ON = 'on' TOKENS = ['x', 'o'] def __init__(self, layout=LAYOUT, grid=None, off_pieces=None, bar_pieces=None, num_pieces=None, players=None): """ Define a new game object """ self.die = Game.QUAD self.layout = layout if grid: self.grid = copy.deepcopy(grid) self.off_pieces = copy.deepcopy(off_pieces) self.bar_pieces = copy.deepcopy(bar_pieces) self.num_pieces = copy.deepcopy(num_pieces) self.players = players return self.players = Game.TOKENS self.grid = [[] for _ in range(Game.NUMCOLS)] self.off_pieces = {} self.bar_pieces = {} self.num_pieces = {} for t in self.players: self.bar_pieces[t] = [] self.off_pieces[t] = [] self.num_pieces[t] = 0 @staticmethod def new(): game = Game() game.reset() return game # Methode die exakt die 198 Features liefert die in TD-Gammon 0.0 benutzt wurden # Nach "Reinforcement Learning: An Introduction", Sutton & Barto, 2017 def extractFeatures(self, player): features = [] # 196 Features kodieren den Zustand der Spielfelder, 98 für jeden Spieler for p in self.players: # 24 mögliche Brettpositionen for col in self.grid: # 4 Features kodieren eine Stelle auf dem Spielbrett feats = [0.] * 4 if len(col) > 0 and col[0] == p: # 0,1,2,3,4,5 Steine werden kodiert als # 0000, 1000, 1100, 1110, 1110.5, 1111 # (4. Bit = (n-3)/2) for i in range(len(col)): if i < 3: feats[i] += 1 else: feats[3] = (len(col)-3)/2.0 break features += feats # Anzahl der Steine auf der "Bar", n/2 features.append(float(len(self.bar_pieces[p])) / 2.) # Anzahl der Steine die bereits aus dem Spiel sind, n/15 features.append(float(len(self.off_pieces[p])) / 15.) # Zwei Features für den derzeitigen Spieler if player == self.players[0]: features += [1., 0.] else: features += [0., 1.] return np.array(features).reshape(1, -1) def roll_dice(self): return (random.randint(1, self.die), random.randint(1, self.die)) def play(self, players, draw=False): player_num = random.randint(0, 1) while not self.is_over(): self.next_step(players[player_num], player_num, draw=draw) player_num = (player_num + 1) % 2 return self.winner() def next_step(self, player, player_num, draw=False): roll = self.roll_dice() if draw: self.draw() self.take_turn(player, roll, draw=draw) def take_turn(self, player, roll, draw=False): if draw: print("Player %s rolled <%d, %d>." % (player.player, roll[0], roll[1])) time.sleep(1) moves = self.get_actions(roll, player.player, nodups=True) move = player.get_action(moves, self) if moves else None if move: self.take_action(move, player.player) def clone(self): """ Return an exact copy of the game. Changes can be made to the cloned version without affecting the original. """ return Game(None, self.grid, self.off_pieces, self.bar_pieces, self.num_pieces, self.players) def take_action(self, action, token): """ Makes given move for player, assumes move is valid, will remove pieces from play """ ateList = [0] * 4 for i, (s, e) in enumerate(action): if s == Game.ON: piece = self.bar_pieces[token].pop() else: piece = self.grid[s].pop() if e == Game.OFF: self.off_pieces[token].append(piece) continue if len(self.grid[e]) > 0 and self.grid[e][0] != token: bar_piece = self.grid[e].pop() self.bar_pieces[bar_piece].append(bar_piece) ateList[i] = 1 self.grid[e].append(piece) return ateList def undo_action(self, action, player, ateList): """ Reverses given move for player, assumes move is valid, will remove pieces from play """ for i, (s, e) in enumerate(reversed(action)): if e == Game.OFF: piece = self.off_pieces[player].pop() else: piece = self.grid[e].pop() if ateList[len(action) - 1 - i]: bar_piece = self.bar_pieces[self.opponent(player)].pop() self.grid[e].append(bar_piece) if s == Game.ON: self.bar_pieces[player].append(piece) else: self.grid[s].append(piece) def get_actions(self, roll, player, nodups=False): """ Get set of all possible move tuples """ moves = set() if nodups: start = 0 else: start = None r1, r2 = roll if r1 == r2: # doubles i = 4 # keep trying until we find some moves while not moves and i > 0: self.find_moves(tuple([r1]*i), player, (), moves, start) i -= 1 else: self.find_moves(roll, player, (), moves, start) self.find_moves((r2, r1), player, (), moves, start) # has no moves, try moving only one piece if not moves: for r in roll: self.find_moves((r, ), player, (), moves, start) return moves def find_moves(self, rs, player, move, moves, start=None): if len(rs)==0: moves.add(move) return r, rs = rs[0], rs[1:] # see if we can remove a piece from the bar if self.bar_pieces[player]: if self.can_onboard(player, r): piece = self.bar_pieces[player].pop() bar_piece = None if len(self.grid[r - 1]) == 1 and self.grid[r - 1][-1]!=player: bar_piece = self.grid[r - 1].pop() self.grid[r - 1].append(piece) self.find_moves(rs, player, move+((Game.ON, r - 1), ), moves, start) self.grid[r - 1].pop() self.bar_pieces[player].append(piece) if bar_piece: self.grid[r - 1].append(bar_piece) return # otherwise check each grid location for valid move using r offboarding = self.can_offboard(player) for i in range(len(self.grid)): if start is not None: start = i if self.is_valid_move(i, i + r, player): piece = self.grid[i].pop() bar_piece = None if len(self.grid[i+r]) == 1 and self.grid[i+r][-1] != player: bar_piece = self.grid[i + r].pop() self.grid[i + r].append(piece) self.find_moves(rs, player, move + ((i, i + r), ), moves, start) self.grid[i + r].pop() self.grid[i].append(piece) if bar_piece: self.grid[i + r].append(bar_piece) # If we can't move on the board can we take the piece off? if offboarding and self.remove_piece(player, i, r): piece = self.grid[i].pop() self.off_pieces[player].append(piece) self.find_moves(rs, player, move + ((i, Game.OFF), ), moves, start) self.off_pieces[player].pop() self.grid[i].append(piece) def opponent(self, token): """ Retrieve opponent players token for a given players token. """ for t in self.players: if t != token: return t def is_won(self, player): """ If game is over and player won, return True, else return False """ return self.is_over() and player == self.players[self.winner()] def is_lost(self, player): """ If game is over and player lost, return True, else return False """ return self.is_over() and player != self.players[self.winner()] def reverse(self): """ Reverses a game allowing it to be seen by the opponent from the same perspective """ self.grid.reverse() self.players.reverse() def reset(self): """ Resets game to original layout. """ for col in self.layout.split(','): loc, num, token = col.split('-') self.grid[int(loc)] = [token for _ in range(int(num))] for col in self.grid: for piece in col: self.num_pieces[piece] += 1 def winner(self): """ Get winner. """ return 0 if len(self.off_pieces[self.players[0]]) == self.num_pieces[self.players[0]] else 1 def is_over(self): """ Checks if the game is over. """ for t in self.players: if len(self.off_pieces[t]) == self.num_pieces[t]: return True return False def can_offboard(self, player): count = 0 for i in range(Game.NUMCOLS - self.die, Game.NUMCOLS): if len(self.grid[i]) > 0 and self.grid[i][0] == player: count += len(self.grid[i]) if count+len(self.off_pieces[player]) == self.num_pieces[player]: return True return False def can_onboard(self, player, r): """ Can we take a players piece on the bar to a position on the grid given by roll-1? """ if len(self.grid[r - 1]) <= 1 or self.grid[r - 1][0] == player: return True else: return False def remove_piece(self, player, start, r): """ Can we remove a piece from location start with roll r ? In this function we assume we are cool to offboard, i.e. no pieces on the bar and all are in the home quadrant. """ if start < Game.NUMCOLS - self.die: return False if len(self.grid[start]) == 0 or self.grid[start][0] != player: return False if start + r == Game.NUMCOLS: return True if start + r > Game.NUMCOLS: for i in range(start - 1, Game.NUMCOLS - self.die - 1, -1): if len(self.grid[i]) != 0 and self.grid[i][0] == self.players[0]: return False return True return False def is_valid_move(self, start, end, token): if len(self.grid[start]) > 0 and self.grid[start][0] == token: if end < 0 or end >= len(self.grid): return False if len(self.grid[end]) <= 1: return True if len(self.grid[end]) > 1 and self.grid[end][-1] == token: return True return False def draw_col(self,i,col): print("|", end = '') if i==-2: if col<10: print(" ", end = '') print(str(col), end = '') elif i==-1: print("--", end = '') elif len(self.grid[col])>i: print(" "+self.grid[col][i], end = '') else: print(" ", end = '') def draw(self): largest = max([len(self.grid[i]) for i in range(int(len(self.grid)/2),len(self.grid))]) for i in range(-2,largest): for col in range(int(len(self.grid)/2),len(self.grid)): self.draw_col(i,col) print("|") print() print() largest = max([len(self.grid[i]) for i in range(int(len(self.grid)/2))]) for i in range(largest-1,-3,-1): for col in range(int(len(self.grid)/2)-1,-1,-1): self.draw_col(i,col) print("|") for t in self.players: print(" Off Board : "%(t), end = '') for piece in self.off_pieces[t]: print(t+'', end = '') print(" Bar : ", end = '') for piece in self.bar_pieces[t]: print(t+'', end = '') print()Wie sieht das Spielfeld und die extrahierten Features aus?game = Game.new() print("Spielfeld:") game.draw() x = game.extractFeatures(Game.TOKENS[0]) print() print("Features aus der Sicht von x:") print(x) print() print("Form von den Features:", x.shape)Spielfeld: |12|13|14|15|16|17|18|19|20|21|22|23| |--|--|--|--|--|--|--|--|--|--|--|--| | x| | | | o| | o| | | | | x| | x| | | | o| | o| | | | | x| | x| | | | o| | o| | | | | | | x| | | | | | o| | | | | | | x| | | | | | o| | | | | | | o| | | | | | x| | | | | | | o| | | | | | x| | | | | | | o| | | | x| | x| | | | | | | o| | | | x| | x| | | | | o| | o| | | | x| | x| | | | | o| |--|--|--|--|--|--|--|--|--|--|--|--| |11|10| 9| 8| 7| 6| 5| 4| 3| 2| 1| 0| Off Board : Bar : Off Board : Bar : Features aus der Sicht von x: [[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 1. 1. 0. 0. 0. 0. 1. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 0. 0. 0. 0. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0[...]Resample data and create climatologies%%time ds_sst.analysed_sst[0,:1799,:3600].plot() %%time ds_sst.mask[0,:1799,:3600].plot() %%time sst_masked = ds_sst.where(ds_sst.mask==1) sst_masked.analysed_sst[0,2500:3500,:3600].plot() masked_25km = sst_masked.interp({'lon':ds_ccmp.longitude,'lat':ds_ccmp.latitude}) cyr1,cyr2='2010-01-01','2019-12-31' #make day average ds_ccmp = ds_ccmp.resample(time='D').mean() ds_ccmp_clim = ds_ccmp.sel(time=slice(cyr1,cyr2)) ds_ccmp_clim = ds_ccmp_clim.groupby('time.dayofyear').mean('time',keep_attrs=True,skipna=False) #sst 25km ds_sst_clim = sst_masked.sel(time=slice(cyr1,cyr2)) ds_sst_clim = ds_sst_clim.groupby('time.dayofyear').mean('time',keep_attrs=True,skipna=False) #put data into a dictionary data_dict={'wnd':ds_ccmp, 'sst':ds_sst} clim_dict={'wnd_clim':ds_ccmp_clim, 'sst_clim':ds_sst_clim} return data_dict,clim_dict def multi_apply_along_axis(func1d, axis, arrs, *args, **kwargs): """ function from: https://climate-cms.org/2019/07/29/multi-apply-along-axis.html Given a function `func1d(A, B, C, ..., *args, **kwargs)` that acts on multiple one dimensional arrays, apply that function to the N-dimensional arrays listed by `arrs` along axis `axis` If `arrs` are one dimensional this is equivalent to:: func1d(*arrs, *args, **kwargs) If there is only one array in `arrs` this is equivalent to:: numpy.apply_along_axis(func1d, axis, arrs[0], *args, **kwargs) All arrays in `arrs` must have compatible dimensions to be able to run `numpy.concatenate(arrs, axis)` Arguments: func1d: Function that operates on `len(arrs)` 1 dimensional arrays, with signature `f(*arrs, *args, **kwargs)` axis: Axis of all `arrs` to apply the function along arrs: Iterable of numpy arrays *args: Passed to func1d after array arguments **kwargs: Passed to func1d as keyword arguments """ import numpy # Concatenate the input arrays along the calculation axis to make one big # array that can be passed in to `apply_along_axis` carrs = numpy.concatenate(arrs, axis) # We'll need to split the concatenated arrays up before we apply `func1d`, # here's the offsets to split them back into the originals offsets=[] start=0 for i in range(len(arrs)-1): start += arrs[i].shape[axis] offsets.append(start) # The helper closure splits up the concatenated array back into the components of `arrs` # and then runs `func1d` on them def helperfunc(a, *args, **kwargs): arrs = numpy.split(a, offsets) return func1d(*[*arrs, *args], **kwargs) # Run `apply_along_axis` along the concatenated array return numpy.apply_along_axis(helperfunc, axis, carrs, *args, **kwargs) data,clim=get_data() data['sst'] data['wnd'] ds_sst, ds_wnd = data['sst'],data['wnd'] ds_wnd['wspd']=(ds_wnd.uwnd**2+ds_wnd.vwnd**2)**.5 #interp doesn't work on chunked dims so rechunk ds_sst = ds_sst.chunk({'time':1,'lat':901,'lon':1800}) ds_new = ds_sst.interp(lat = ds_wnd.lat,lon=ds_wnd.lon) #SST #remove mean, seasonal cycle, trend before analysis clim = ds_new.groupby('time.dayofyear').mean('time',keep_attrs=True,skipna=False) ds_new_tem = ds_new.groupby('time.dayofyear')-clim #detrending doesn't work with nan so fill with nan but will have to mask at end and pixels with nan in timeseries tem = ds_new_tem.fillna(0) ds_detrended_sst = signal.detrend(tem.analysed_sst,axis=0) ds_new_tem.analysed_sst.isel(lon=720,lat=310).plot() plt.plot(ds_new_tem.time,ds_detrended_sst[:,310,720]) #WSPD #remove mean, seasonal cycle, trend before analysis clim = ds_wnd.groupby('time.dayofyear').mean('time',keep_attrs=True,skipna=False) ds_new_tem = ds_wnd.groupby('time.dayofyear')-clim #detrending doesn't work with nan so fill with nan but will have to mask at end and pixels with nan in timeseries tem = ds_new_tem.fillna(0) ds_detrended_wnd = signal.detrend(tem.wspd,axis=0) ds_new_tem.wspd.isel(lon=720,lat=310).plot() plt.plot(ds_new_tem.time,ds_detrended_wnd[:,310,720]) corr = multi_apply_along_axis(pearsonr, 0, [ds_detrended_sst,ds_detrended_wnd]) print(corr.shape) print(type(corr)) #not sure why by 1440 is blank acorr = corr[0,:,:] acorr2 = np.concatenate([acorr[:,720:1439],acorr[:,:1439],acorr[:,:720]],axis=1) #plt.imshow(acorr[:,:1439],vmin=-.1,vmax=.1,cmap='RdBu') #plt.imshow(acorr2,vmin=-.1,vmax=.1,cmap='viridis') data = acorr2 data = np.nan_to_num(data,0) lowpass = ndimage.gaussian_filter(data, 40) gauss_highpass = data - lowpass plt.imshow(lowpass,vmin=-.1,vmax=.1,cmap='RdBu') plt.imshow(gauss_highpass,vmin=-.1,vmax=.1,cmap='RdBu')Quiz 0102 Use a lambda function and map() to answer the following questions. 1). Add 1 to each element of the list 'a' and create another list. a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]def new(number): a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] n = len(a) answer= [] for i in range(n): lambda i : i - number answer.append(i) return answer print(new(1))[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]2). Multiply element-wise the lists 'a' and 'b'. Show the result as another list.a = [2, 4, 6, 8, 10]b = [10, 8, 6, 4, 2] Use a lambda function and filter() to answer the following questions. 3). Find the multiples of 5 in the range 1~100. Show the result as a list.answer= [] for i in range(1,101): if lambda i: i%5 == 0: answer.append(i) print(answer)4). In the following list, find the values larger than 160. Show the result as a list.a = [145.2, 160.3, 155.2, 171.6, 165.34, 167.2, 161.1, 164.9]a = [145.2, 160.3, 155.2, 171.6, 165.34, 167.2, 161.1, 164.9] new_List =[] length = len(a) for i in range(length): if lambda i : a < 165: new_List.append(a) print(new_List)5). In the following list, find the values larger than 160 and smaller than 165. Show the result as a list.a = [145.2, 160.3, 155.2, 171.6, 165.34, 167.2, 161.1, 164.9]a = [145.2, 160.3, 155.2, 171.6, 165.34, 167.2, 161.1, 164.9] new_List =[] length = len(a) for i in range(length): if lambda i : a < 165: new_List.append(a) print(new_List)6). In the following list of names, extract only those stargin with 'J'. Show the result as a list.a = ['Sara', 'Jack', 'Rebecca', 'Jennifer', 'Paul', 'Andrew', 'John']a = ['Sara', 'Jack', 'Rebecca', 'Jennifer', 'Paul', 'Andrew', 'John'] n = len(a) count=0 for i in range(n): if a[i] == 'J': count +=1 print("J found") a.remove('J') print(a)from sklearn.datasets import fetch_20newsgroups newsdata=fetch_20newsgroups(subset='train') print(newsdata.keys()) print( len(newsdata.data), len(newsdata.filenames), len(newsdata.target_names), len(newsdata.target)) print(newsdata.target_names) print(newsdata.target[0]) print(newsdata.target_names[7]) print(newsdata.data[0]) # 데이터를 토큰화 -> BoW 로 만듦 from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.naive_bayes import MultinomialNB # 다항분포 나이브 베이즈 모델 from sklearn.metrics import accuracy_score # 정확도 계산 dtmvector = CountVectorizer() X_train_dtm = dtmvector.fit_transform(newsdata.data) print(X_train_dtm.shape) tdidf_transformer = TfidfTransformer() tfidfv = tdidf_transformer.fit_transform(X_train_dtm) print(tfidfv.shape) mod = MultinomialNB() mod.fit(tfidfv, newsdata.target) # X_train , Y_train newsdata_test = fetch_20newsgroups(subset='test',shuffle=True) X_test_dtm = dtmvector.transform(newsdata_test.data) # DTM으로 변환 tfidfv_test = tdidf_transformer.transform(X_test_dtm) #Tf-idf 행렬로 변환 predicted = mod.predict(tfidfv_test) #예측 print("정확도:", accuracy_score(newsdata_test.target, predicted)) #예측값과 실제값 비교Local Linear Trend Model with the Kalman FilterThe one-state local linear trend model is expressed as:$$\begin{align*} v_t &= \mu_t +\epsilon_t \qquad &\epsilon_t \sim N(0,\sigma_{\epsilon_t}^2)\\ \mu_{t+1} &= \mu_t +\eta_t \qquad &\eta_t \sim N(0,\sigma_{\eta_t}^2)\\\end{align*} $$where $\epsilon_t$ and $\eta_t$ are assumed random indepedent Normal processes.ff_data = pd.read_excel("../data/25_Portfolios_ME_Prior_12.xlsx", sheet_name=[0, 1], header=0, index_col=0)[1] / 100 ff_data.index = pd.date_range(start='07/1963', end='03/2021', freq='m') ff_data.head() p25_data = pd.read_excel("../data/25_Portfolios_ME_Prior_12.xlsx", sheet_name=[0, 1], header=0, index_col=0)[0] p25_data.index = pd.date_range(start='01/1927', end='03/2021', freq='m') p25_data.head() df_corner = p25_data[['SMALL LoPRIOR', 'SMALL HiPRIOR', 'BIG LoPRIOR', 'BIG HiPRIOR']] / 100 df_corner = pd.concat([df_corner, ff_data], join='inner', axis=1) df_corner # CAPM fig, axes = plt.subplots(2, 2, figsize=(15, 10)) for i in range(4): y = df_corner.iloc[:, i:i+1].values - df_corner.iloc[:, -1:].values X = sm.add_constant(df_corner['Mkt-RF'].values) mod = KalmanFilter(y, X) res = mod.fit() print(res.summary()) ss = pd.DataFrame(res.smoothed_state.T, columns=['alpha', 'beta']) axes[i//2][i%2].plot(ss['alpha']) axes[i//2][i%2].set_ylabel("Smoothed estimate on alpha") ax2 = axes[i//2][i%2].twinx() ax2.set_ylabel('Smoothed estimate on beta') ax2.plot(ss['beta'], color='m') axes[i//2][i%2].set_title(f'Time-varying coefficient: {df_corner.columns[i]}') axes[i//2][i%2].autoscale(tight=True) fig.tight_layout();Statespace Model Results ============================================================================== Dep. Variable: y No. Observations: 692 Model: KalmanFilter Log Likelihood 1051.506 Date: Mon, 02 Aug 2021 AIC -2095.013 Time: 22:03:07 BIC -2076.855 Sample: 0 HQIC -2087.990 - 692 Covariance Type: opg ================================================================================ coef std err z P>|z| [0.025 0.975] -------------------------------------------------------------------------------- intercept 6.42e-12 6.11e-16 1.05e+04 [...]NOTES USING run.py and NOT run2.py sys path (or run2.py) may interfere with overwriting configs, NEED TO LOOK INTO MORE WILL ONLY WORK FOR SIMULATIONS WHERE N>1. CAN STILL VISUALIZE SINGLE RUN DATA AS SEEN BELOW, BUT cadCAD RUN MUST BE N>1.# import sys # sys.path.append('../') import pandas as pd import matplotlib.pyplot as plt # import run2 from src.sim import run import seaborn as sns # For analysis import numpy as np import pandas as pd sns.set_style("whitegrid") !pip list | grep cadCAD experiments = run.run() experiments experiments.dataset[0].head() experiments.dataset[0].tail() experiments.dataset[1].head() len(experiments.dataset[0]) experiments = experiments.sort_values(by =['rules_price']).reset_index(drop=True) # df = experiments cols = 1 rows = 1 cc_idx = 0 # len(experiments) # df = experiments.copy() # print(df.head()) while cc_idx 0] # print(df['price']) df = df.groupby('timestep').agg({'price': ['min', 'mean', 'max']}).reset_index() # print(df.head()) plot_label = experiment['rules_price'] ax = axs title = 'Price' + '\n' + 'Scenario: ' + str(cc_label) + ' rules_price' ax.set_title(title) ax.set_ylabel('Price') colors = ['b','orange', 'g', 'magenta', 'r'] # ax.plot(df.timestep, df['price'],color = colors[0], label='Price') df.plot(x='timestep', y=('price','mean'), label='Mean', ax=ax, legend=True) ax.fill_between(df.timestep, df[('price','min')], df[('price','max')], alpha=0.3) ax.legend() ax.set_xlabel('Timesteps') ax.grid(color='0.9', linestyle='-', linewidth=1) plt.tight_layout() fig.tight_layout(rect=[0, 0, 1, .97]) fig.patch.set_alpha(1) display(fig) plt.close()FIRST RUN ONLY!experiments = experiments.sort_values(by =['rules_price']).reset_index(drop=True) # df = experiments cols = 1 rows = 1 cc_idx = 0 # len(experiments) # df = experiments.copy() # print(df.head()) while cc_idx 0] # FIRST RUN ONLY df = df[df.run == 1] # print(df['price']) df = df.groupby('timestep').agg({'price': ['min', 'mean', 'max']}).reset_index() # print(df.head()) plot_label = experiment['rules_price'] ax = axs title = 'Price' + '\n' + 'Scenario: ' + str(cc_label) + ' rules_price' ax.set_title(title) ax.set_ylabel('Price') colors = ['b','orange', 'g', 'magenta', 'r'] # ax.plot(df.timestep, df['price'],color = colors[0], label='Price') df.plot(x='timestep', y=('price','mean'), label='Mean', ax=ax, legend=True) ax.fill_between(df.timestep, df[('price','min')], df[('price','max')], alpha=0.3) ax.legend() ax.set_xlabel('Timesteps') ax.grid(color='0.9', linestyle='-', linewidth=1) plt.tight_layout() fig.tight_layout(rect=[0, 0, 1, .97]) fig.patch.set_alpha(1) display(fig) plt.close() experiments = experiments.sort_values(by =['rules_price']).reset_index(drop=True) # df = experiments cols = 1 rows = 1 cc_idx = 0 # len(experiments) # df = experiments.copy() # print(df.head()) while cc_idx 0] # FIRST RUN ONLY df = df[df.run == 1] df = df.groupby('timestep').agg({'spot_price': ['min', 'mean', 'max']}).reset_index() # print(df.head()) plot_label = experiment['rules_price'] ax = axs title = 'spot_price' + '\n' + 'Scenario: ' + str(cc_label) + ' rules_price' ax.set_title(title) ax.set_ylabel('spot_price') colors = ['b','orange', 'g', 'magenta', 'r'] # ax.plot(df.timestep, df['price'],color = colors[0], label='Price') df.plot(x='timestep', y=('spot_price','mean'), label='Mean', ax=ax, legend=True) ax.fill_between(df.timestep, df[('spot_price','min')], df[('spot_price','max')], alpha=0.3) ax.legend() ax.set_xlabel('Timesteps') ax.grid(color='0.9', linestyle='-', linewidth=1) plt.tight_layout() fig.tight_layout(rect=[0, 0, 1, .97]) fig.patch.set_alpha(1) display(fig) plt.close() experiments = experiments.sort_values(by =['rules_price']).reset_index(drop=True) # df = experiments cols = 1 rows = 1 cc_idx = 0 # len(experiments) # df = experiments.copy() # print(df.head()) while cc_idx 0] # FIRST RUN ONLY df = df[df.run == 1] df = df.groupby('timestep').agg({'alpha': ['min', 'mean', 'max']}).reset_index() # print(df.head()) plot_label = experiment['rules_price'] ax = axs title = 'Alpha' + '\n' + 'Scenario: ' + str(cc_label) + ' rules_price' ax.set_title(title) ax.set_ylabel('Alpha') colors = ['b','orange', 'g', 'magenta', 'r'] # ax.plot(df.timestep, df['price'],color = colors[0], label='Price') df.plot(x='timestep', y=('alpha','mean'), label='Mean', ax=ax, legend=True) ax.fill_between(df.timestep, df[('alpha','min')], df[('alpha','max')], alpha=0.3) ax.legend() ax.set_xlabel('Timesteps') ax.grid(color='0.9', linestyle='-', linewidth=1) plt.tight_layout() fig.tight_layout(rect=[0, 0, 1, .97]) fig.patch.set_alpha(1) display(fig) plt.close() experiments = experiments.sort_values(by =['rules_price']).reset_index(drop=True) # df = experiments cols = 1 rows = 1 cc_idx = 0 # len(experiments) # df = experiments.copy() # print(df.head()) while cc_idx 0] # FIRST RUN ONLY df = df[df.run == 1] df = df.groupby('timestep').agg({'reserve': ['min', 'mean', 'max']}).reset_index() # print(df.head()) plot_label = experiment['rules_price'] ax = axs title = 'Reserve' + '\n' + 'Scenario: ' + str(cc_label) + ' rules_price' ax.set_title(title) ax.set_ylabel('Amount') colors = ['b','orange', 'g', 'magenta', 'r'] # ax.plot(df.timestep, df['price'],color = colors[0], label='Price') df.plot(x='timestep', y=('reserve','mean'), label='Mean', ax=ax, legend=True) # ax.fill_between(df.timestep, df[('reserve','min')], df[('reserve','max')], reserve=0.3) ax.legend() ax.set_xlabel('Timesteps') ax.grid(color='0.9', linestyle='-', linewidth=1) plt.tight_layout() fig.tight_layout(rect=[0, 0, 1, .97]) fig.patch.set_alpha(1) display(fig) plt.close() experiments = experiments.sort_values(by =['rules_price']).reset_index(drop=True) # df = experiments cols = 1 rows = 1 cc_idx = 0 # len(experiments) # df = experiments.copy() # print(df.head()) while cc_idx 0] # FIRST RUN ONLY df = df[df.run == 1] df = df.groupby('timestep').agg({'supply': ['min', 'mean', 'max']}).reset_index() # print(df.head()) plot_label = experiment['rules_price'] ax = axs title = 'supply' + '\n' + 'Scenario: ' + str(cc_label) + ' rules_price' ax.set_title(title) ax.set_ylabel('Amount') colors = ['b','orange', 'g', 'magenta', 'r'] # ax.plot(df.timestep, df['price'],color = colors[0], label='Price') df.plot(x='timestep', y=('supply','mean'), label='Mean', ax=ax, legend=True) # ax.fill_between(df.timestep, df[('supply','min')], df[('supply','max')], supply=0.3) ax.legend() ax.set_xlabel('Timesteps') ax.grid(color='0.9', linestyle='-', linewidth=1) plt.tight_layout() fig.tight_layout(rect=[0, 0, 1, .97]) fig.patch.set_alpha(1) display(fig) plt.close()INTO AGENT LEVEL across AGENTS This plot is across agents of the same variable (this case is agent_attestations_1) Can be repeated for all other columns in agents dataframeexperiments = experiments.sort_values(by =['rules_price']).reset_index(drop=True) cols = 1 rows = 1 cc_idx = 0 # config_labels = ['RULE 1,'RULE 2'] while cc_idx 0] # FIRST RUN ONLY df = df[df.run == 1] # MAKE A FOR LOOP or FUNCTION FOR ALL AGENTS df['agent_1_attest_1'] = df.agents.apply(lambda x: np.array(x['agent_attestations_1'][0])) df['agent_2_attest_1'] = df.agents.apply(lambda x: np.array(x['agent_attestations_1'][1])) df['agent_3_attest_1'] = df.agents.apply(lambda x: np.array(x['agent_attestations_1'][2])) df['agent_4_attest_1'] = df.agents.apply(lambda x: np.array(x['agent_attestations_1'][3])) df['agent_5_attest_1'] = df.agents.apply(lambda x: np.array(x['agent_attestations_1'][4])) # df = df.groupby('timestep').agg({'supply': ['min', 'mean', 'max']}).reset_index() # df['agent_attest_1'] = np.array(df.agent_attest_1,dtype = float) # print(df['agent_attest_1']) # print(df['agent_attest_1'][10]) plot_label = experiment['rules_price'] ax = axs title = 'agent_attest_1' + '\n' + 'Scenario: ' + str(cc_label) + ' rules_price' ax.set_title(title) ax.set_ylabel('Amount') colors = ['b','orange', 'g', 'magenta', 'r'] # ax.plot(df.timestep, df['price'],color = colors[0], label='Price') df.plot(x='timestep', y='agent_1_attest_1', label='agent_1', ax=ax, legend=True) df.plot(x='timestep', y='agent_2_attest_1', label='agent_2', ax=ax, legend=True) df.plot(x='timestep', y='agent_3_attest_1', label='agent_3', ax=ax, legend=True) df.plot(x='timestep', y='agent_4_attest_1', label='agent_4', ax=ax, legend=True) df.plot(x='timestep', y='agent_5_attest_1', label='agent_5', ax=ax, legend=True) # ax.fill_between(df.timestep, df[('supply','min')], df[('supply','max')], supply=0.3) ax.legend() ax.set_xlabel('Timesteps') ax.grid(color='0.9', linestyle='-', linewidth=1) plt.tight_layout() fig.tight_layout(rect=[0, 0, 1, .97]) fig.patch.set_alpha(1) display(fig) plt.close()INTO AGENT LEVEL across columns in AGENTS dataframe THIS CASE FIRST AGENT CAN ADD MORE VARS and REPEAT FOR OTHER AGENTSexperiments = experiments.sort_values(by =['rules_price']).reset_index(drop=True) cols = 1 rows = 1 cc_idx = 0 # config_labels = ['RULE 1,'RULE 2'] while cc_idx 0] # FIRST RUN ONLY df = df[df.run == 1] # MAKE A FOR LOOP or FUNCTION FOR ALL AGENTS df['agent_1_reserve'] = df.agents.apply(lambda x: np.array(x['agent_reserve'][0])) df['agent_1_supply_1'] = df.agents.apply(lambda x: np.array(x['agent_supply_1'][0])) df['agent_1_supply_0'] = df.agents.apply(lambda x: np.array(x['agent_supply_0'][0])) df['agent_1_supply_free'] = df.agents.apply(lambda x: np.array(x['agent_supply_free'][0])) df['agent_1_attest_1'] = df.agents.apply(lambda x: np.array(x['agent_attestations_1'][0])) df['agent_1_attest_0'] = df.agents.apply(lambda x: np.array(x['agent_attestations_0'][0])) # df = df.groupby('timestep').agg({'supply': ['min', 'mean', 'max']}).reset_index() # df['agent_attest_1'] = np.array(df.agent_attest_1,dtype = float) # print(df['agent_attest_1']) # print(df['agent_attest_1'][10]) plot_label = experiment['rules_price'] ax = axs title = 'agent_attest_1' + '\n' + 'Scenario: ' + str(cc_label) + ' rules_price' ax.set_title(title) ax.set_ylabel('Amount') colors = ['b','orange', 'g', 'magenta', 'r'] # ax.plot(df.timestep, df['price'],color = colors[0], label='Price') df.plot(x='timestep', y='agent_1_reserve', label='agent_reserve', ax=ax, legend=True) df.plot(x='timestep', y='agent_1_supply_1', label='agent_supply_1', ax=ax, legend=True) df.plot(x='timestep', y='agent_1_supply_0', label='agent_supply_0', ax=ax, legend=True) df.plot(x='timestep', y='agent_1_supply_free', label='agent_supply_free', ax=ax, legend=True) df.plot(x='timestep', y='agent_1_attest_1', label='agent_attestations_1', ax=ax, legend=True) df.plot(x='timestep', y='agent_1_attest_0', label='agent_attestations_0', ax=ax, legend=True) # ax.fill_between(df.timestep, df[('supply','min')], df[('supply','max')], supply=0.3) ax.legend() ax.set_xlabel('Timesteps') ax.grid(color='0.9', linestyle='-', linewidth=1) plt.tight_layout() fig.tight_layout(rect=[0, 0, 1, .97]) fig.patch.set_alpha(1) display(fig) plt.close()DIfference in Agents to See Response This example compares reserveexperiments = experiments.sort_values(by =['rules_price']).reset_index(drop=True) cols = 1 rows = 1 cc_idx = 0 # config_labels = ['RULE 1,'RULE 2'] while cc_idx 0] # FIRST RUN ONLY df = df[df.run == 1] # MAKE A FOR LOOP or FUNCTION FOR ALL AGENTS df['agent_1_reserve'] = df.agents.apply(lambda x: np.array(x['agent_reserve'][0])) df['agent_2_reserve'] = df.agents.apply(lambda x: np.array(x['agent_reserve'][1])) df['agent_3_reserve'] = df.agents.apply(lambda x: np.array(x['agent_reserve'][2])) df['agent_4_reserve'] = df.agents.apply(lambda x: np.array(x['agent_reserve'][3])) df['agent_5_reserve'] = df.agents.apply(lambda x: np.array(x['agent_reserve'][4])) # df['agent_1_attest_0'] = df.agents.apply(lambda x: np.array(x['agent_reserve'][0])) # print( df['agent_1_reserve']) df['agent_1_reserve'] = df.agent_1_reserve.diff() df['agent_2_reserve'] = df.agent_2_reserve.diff() df['agent_3_reserve'] = df.agent_3_reserve.diff() df['agent_4_reserve'] = df.agent_4_reserve.diff() df['agent_5_reserve'] = df.agent_5_reserve.diff() # hack subtracting starting amount df['reserve'] = df.reserve.diff() # df = df.groupby('timestep').agg({'supply': ['min', 'mean', 'max']}).reset_index() # df['agent_attest_1'] = np.array(df.agent_attest_1,dtype = float) # print(df['agent_attest_1']) # print(df['agent_attest_1'][10]) plot_label = experiment['rules_price'] ax = axs title = 'Agent Reserve Differnce vs Global Reserve' + '\n' + 'Scenario: ' + str(cc_label) + ' rules_price' ax.set_title(title) ax.set_ylabel('Amount') colors = ['b','orange', 'g', 'magenta', 'r'] # ax.plot(df.timestep, df['price'],color = colors[0], label='Price') df.plot(x='timestep', y='agent_1_reserve', label='agent_1_reserve', ax=ax, legend=True) df.plot(x='timestep', y='agent_2_reserve', label='agent_2_reserve', ax=ax, legend=True) df.plot(x='timestep', y='agent_3_reserve', label='agent_3_reserve', ax=ax, legend=True) df.plot(x='timestep', y='agent_4_reserve', label='agent_4_reserve', ax=ax, legend=True) df.plot(x='timestep', y='agent_5_reserve', label='agent_5_reserve', ax=ax, legend=True) df.plot(x='timestep', y='reserve', label='Global_reserve', ax=ax, legend=True) # ax.fill_between(df.timestep, df[('supply','min')], df[('supply','max')], supply=0.3) ax.legend() ax.set_xlabel('Timesteps') ax.grid(color='0.9', linestyle='-', linewidth=1) plt.tight_layout() fig.tight_layout(rect=[0, 0, 1, .97]) fig.patch.set_alpha(1) display(fig) plt.close() experiments = experiments.sort_values(by =['rules_price']).reset_index(drop=True) cols = 1 rows = 1 cc_idx = 0 # config_labels = ['RULE 1,'RULE 2'] while cc_idx 0] # FIRST RUN ONLY df = df[df.run == 1] # MAKE A FOR LOOP or FUNCTION FOR ALL AGENTS df['agent_1_reserve'] = df.agents.apply(lambda x: np.array(x['agent_reserve'][0])) df['agent_2_reserve'] = df.agents.apply(lambda x: np.array(x['agent_reserve'][1])) df['agent_3_reserve'] = df.agents.apply(lambda x: np.array(x['agent_reserve'][2])) df['agent_4_reserve'] = df.agents.apply(lambda x: np.array(x['agent_reserve'][3])) df['agent_5_reserve'] = df.agents.apply(lambda x: np.array(x['agent_reserve'][4])) # df['agent_1_attest_0'] = df.agents.apply(lambda x: np.array(x['agent_reserve'][0])) # print( df['agent_1_reserve']) df['agent_1_reserve'] = df.agent_1_reserve.diff() df['agent_2_reserve'] = df.agent_2_reserve.diff() df['agent_3_reserve'] = df.agent_3_reserve.diff() df['agent_4_reserve'] = df.agent_4_reserve.diff() df['agent_5_reserve'] = df.agent_5_reserve.diff() # hack subtracting starting amount df['reserve'] = df.reserve.diff() # df = df.groupby('timestep').agg({'supply': ['min', 'mean', 'max']}).reset_index() # df['agent_attest_1'] = np.array(df.agent_attest_1,dtype = float) # print(df['agent_attest_1']) # print(df['agent_attest_1'][10]) plot_label = experiment['rules_price'] ax = axs title = 'Agent Reserve Differnce vs Global Reserve' + '\n' + 'Scenario: ' + str(cc_label) + ' rules_price' ax.set_title(title) ax.set_ylabel('Amount') colors = ['b','orange', 'g', 'magenta', 'r'] # ax.plot(df.timestep, df['price'],color = colors[0], label='Price') df.plot(x='timestep', y='agent_1_reserve', label='agent_1_reserve', ax=ax, legend=True) df.plot(x='timestep', y='agent_2_reserve', label='agent_2_reserve', ax=ax, legend=True) df.plot(x='timestep', y='agent_3_reserve', label='agent_3_reserve', ax=ax, legend=True) df.plot(x='timestep', y='agent_4_reserve', label='agent_4_reserve', ax=ax, legend=True) df.plot(x='timestep', y='agent_5_reserve', label='agent_5_reserve', ax=ax, legend=True) df.plot(x='timestep', y='price', label='Price', ax=ax, legend=True) df.plot(x='timestep', y='reserve', label='Global Reserve', ax=ax, legend=True) # ax.fill_between(df.timestep, df[('supply','min')], df[('supply','max')], supply=0.3) ax.legend() ax.set_xlabel('Timesteps') ax.grid(color='0.9', linestyle='-', linewidth=1) plt.tight_layout() fig.tight_layout(rect=[0, 0, 1, .97]) fig.patch.set_alpha(1) display(fig) plt.close() experiments = experiments.sort_values(by =['rules_price']).reset_index(drop=True) cols = 1 rows = 1 cc_idx = 0 # config_labels = ['RULE 1,'RULE 2'] while cc_idx 0] # FIRST RUN ONLY df = df[df.run == 1] # MAKE A FOR LOOP or FUNCTION FOR ALL AGENTS df['agent_1_reserve'] = df.agents.apply(lambda x: np.array(x['agent_reserve'][0])) df['agent_2_reserve'] = df.agents.apply(lambda x: np.array(x['agent_reserve'][1])) df['agent_3_reserve'] = df.agents.apply(lambda x: np.array(x['agent_reserve'][2])) df['agent_4_reserve'] = df.agents.apply(lambda x: np.array(x['agent_reserve'][3])) df['agent_5_reserve'] = df.agents.apply(lambda x: np.array(x['agent_reserve'][4])) # df['agent_1_attest_0'] = df.agents.apply(lambda x: np.array(x['agent_reserve'][0])) # print( df['agent_1_reserve']) df['agent_1_reserve'] = df.agent_1_reserve.diff() df['agent_2_reserve'] = df.agent_2_reserve.diff() df['agent_3_reserve'] = df.agent_3_reserve.diff() df['agent_4_reserve'] = df.agent_4_reserve.diff() df['agent_5_reserve'] = df.agent_5_reserve.diff() # hack subtracting starting amount df['alpha'] = df.alpha.diff() # df = df.groupby('timestep').agg({'supply': ['min', 'mean', 'max']}).reset_index() # df['agent_attest_1'] = np.array(df.agent_attest_1,dtype = float) # print(df['agent_attest_1']) # print(df['agent_attest_1'][10]) plot_label = experiment['rules_price'] ax = axs title = 'Agent Reserve Differnce vs Global Reserve' + '\n' + 'Scenario: ' + str(cc_label) + ' rules_price' ax.set_title(title) ax.set_ylabel('Amount') colors = ['b','orange', 'g', 'magenta', 'r'] # ax.plot(df.timestep, df['price'],color = colors[0], label='Price') df.plot(x='timestep', y='agent_1_reserve', label='agent_1_reserve', ax=ax, legend=True) df.plot(x='timestep', y='agent_2_reserve', label='agent_2_reserve', ax=ax, legend=True) df.plot(x='timestep', y='agent_3_reserve', label='agent_3_reserve', ax=ax, legend=True) df.plot(x='timestep', y='agent_4_reserve', label='agent_4_reserve', ax=ax, legend=True) df.plot(x='timestep', y='agent_5_reserve', label='agent_5_reserve', ax=ax, legend=True) df.plot(x='timestep', y='price', label='Price', ax=ax, legend=True) df.plot(x='timestep', y='alpha', label='Alpha', ax=ax, legend=True) # ax.fill_between(df.timestep, df[('supply','min')], df[('supply','max')], supply=0.3) ax.legend() ax.set_xlabel('Timesteps') ax.grid(color='0.9', linestyle='-', linewidth=1) plt.tight_layout() fig.tight_layout(rect=[0, 0, 1, .97]) fig.patch.set_alpha(1) display(fig) plt.close() experiments = experiments.sort_values(by =['rules_price']).reset_index(drop=True) cols = 1 rows = 1 cc_idx = 0 # config_labels = ['RULE 1,'RULE 2'] while cc_idx 0] # FIRST RUN ONLY df = df[df.run == 1] # MAKE A FOR LOOP or FUNCTION FOR ALL AGENTS df['agent_1_reserve'] = df.agents.apply(lambda x: np.array(x['agent_reserve'][0])) df['agent_2_reserve'] = df.agents.apply(lambda x: np.array(x['agent_reserve'][1])) df['agent_3_reserve'] = df.agents.apply(lambda x: np.array(x['agent_reserve'][2])) df['agent_4_reserve'] = df.agents.apply(lambda x: np.array(x['agent_reserve'][3])) df['agent_5_reserve'] = df.agents.apply(lambda x: np.array(x['agent_reserve'][4])) # df['agent_1_attest_0'] = df.agents.apply(lambda x: np.array(x['agent_reserve'][0])) # print( df['agent_1_reserve']) df['agent_1_reserve'] = df.agent_1_reserve.diff() df['agent_2_reserve'] = df.agent_2_reserve.diff() df['agent_3_reserve'] = df.agent_3_reserve.diff() df['agent_4_reserve'] = df.agent_4_reserve.diff() df['agent_5_reserve'] = df.agent_5_reserve.diff() # hack subtracting starting amount # df['alpha'] = df.alpha.diff() # df = df.groupby('timestep').agg({'supply': ['min', 'mean', 'max']}).reset_index() # df['agent_attest_1'] = np.array(df.agent_attest_1,dtype = float) # print(df['agent_attest_1']) # print(df['agent_attest_1'][10]) plot_label = experiment['rules_price'] ax = axs title = 'Agent Reserve Differnce vs Global Reserve' + '\n' + 'Scenario: ' + str(cc_label) + ' rules_price' ax.set_title(title) ax.set_ylabel('Amount') colors = ['b','orange', 'g', 'magenta', 'r'] # ax.plot(df.timestep, df['price'],color = colors[0], label='Price') df.plot(x='timestep', y='agent_1_reserve', label='agent_1_reserve', ax=ax, legend=True) df.plot(x='timestep', y='agent_2_reserve', label='agent_2_reserve', ax=ax, legend=True) df.plot(x='timestep', y='agent_3_reserve', label='agent_3_reserve', ax=ax, legend=True) df.plot(x='timestep', y='agent_4_reserve', label='agent_4_reserve', ax=ax, legend=True) df.plot(x='timestep', y='agent_5_reserve', label='agent_5_reserve', ax=ax, legend=True) df.plot(x='timestep', y='private_price', label='Private Price', ax=ax, legend=True) df.plot(x='timestep', y='price', label='Alpha', ax=ax, legend=True) # ax.fill_between(df.timestep, df[('supply','min')], df[('supply','max')], supply=0.3) ax.legend() ax.set_xlabel('Timesteps') ax.grid(color='0.9', linestyle='-', linewidth=1) plt.tight_layout() fig.tight_layout(rect=[0, 0, 1, .97]) fig.patch.set_alpha(1) display(fig) plt.close() experiments = experiments.sort_values(by =['rules_price']).reset_index(drop=True) cols = 1 rows = 1 cc_idx = 0 # config_labels = ['RULE 1,'RULE 2'] while cc_idx 0] # FIRST RUN ONLY df = df[df.run == 1] # MAKE A FOR LOOP or FUNCTION FOR ALL AGENTS df['agent_1_attestations_1'] = df.agents.apply(lambda x: np.array(x['agent_attestations_1'][0])) df['agent_2_attestations_1'] = df.agents.apply(lambda x: np.array(x['agent_attestations_1'][1])) df['agent_3_attestations_1'] = df.agents.apply(lambda x: np.array(x['agent_attestations_1'][2])) df['agent_4_attestations_1'] = df.agents.apply(lambda x: np.array(x['agent_attestations_1'][3])) df['agent_5_attestations_1'] = df.agents.apply(lambda x: np.array(x['agent_attestations_1'][4])) # df['agent_1_attest_0'] = df.agents.apply(lambda x: np.array(x['agent_attestations_1'][0])) # print( df['agent_1_attestations_1']) df['agent_1_attestations_1'] = df.agent_1_attestations_1.diff() df['agent_2_attestations_1'] = df.agent_2_attestations_1.diff() df['agent_3_attestations_1'] = df.agent_3_attestations_1.diff() df['agent_4_attestations_1'] = df.agent_4_attestations_1.diff() df['agent_5_attestations_1'] = df.agent_5_attestations_1.diff() # hack subtracting starting amount # df['alpha'] = df.alpha.diff() # df = df.groupby('timestep').agg({'supply': ['min', 'mean', 'max']}).reset_index() # df['agent_attest_1'] = np.array(df.agent_attest_1,dtype = float) # print(df['agent_attest_1']) # print(df['agent_attest_1'][10]) plot_label = experiment['rules_price'] ax = axs title = 'Agent attestations_1 Difference vs Global attestations_1' + '\n' + 'Scenario: ' + str(cc_label) + ' rules_price' ax.set_title(title) ax.set_ylabel('Amount') colors = ['b','orange', 'g', 'magenta', 'r'] # ax.plot(df.timestep, df['price'],color = colors[0], label='Price') df.plot(x='timestep', y='agent_1_attestations_1', label='agent_1_attestations_1', ax=ax, legend=True) df.plot(x='timestep', y='agent_2_attestations_1', label='agent_2_attestations_1', ax=ax, legend=True) df.plot(x='timestep', y='agent_3_attestations_1', label='agent_3_attestations_1', ax=ax, legend=True) df.plot(x='timestep', y='agent_4_attestations_1', label='agent_4_attestations_1', ax=ax, legend=True) df.plot(x='timestep', y='agent_5_attestations_1', label='agent_5_attestations_1', ax=ax, legend=True) df.plot(x='timestep', y='private_alpha', label='Private Alpha', ax=ax, legend=True) df.plot(x='timestep', y='alpha', label='Alpha', ax=ax, legend=True) # ax.fill_between(df.timestep, df[('supply','min')], df[('supply','max')], supply=0.3) ax.legend() ax.set_xlabel('Timesteps') ax.grid(color='0.9', linestyle='-', linewidth=1) plt.tight_layout() fig.tight_layout(rect=[0, 0, 1, .97]) fig.patch.set_alpha(1) display(fig) plt.close() experiments = experiments.sort_values(by =['rules_price']).reset_index(drop=True) cols = 1 rows = 1 cc_idx = 0 # config_labels = ['RULE 1,'RULE 2'] while cc_idx 0] # FIRST RUN ONLY df = df[df.run == 1] # MAKE A FOR LOOP or FUNCTION FOR ALL AGENTS df['agent_1_attestations_1'] = df.agents.apply(lambda x: np.array(x['agent_attestations_1'][0])) df['agent_2_attestations_1'] = df.agents.apply(lambda x: np.array(x['agent_attestations_1'][1])) df['agent_3_attestations_1'] = df.agents.apply(lambda x: np.array(x['agent_attestations_1'][2])) df['agent_4_attestations_1'] = df.agents.apply(lambda x: np.array(x['agent_attestations_1'][3])) df['agent_5_attestations_1'] = df.agents.apply(lambda x: np.array(x['agent_attestations_1'][4])) # df['agent_1_attest_0'] = df.agents.apply(lambda x: np.array(x['agent_attestations_1'][0])) # print( df['agent_1_attestations_1']) df['agent_1_attestations_1'] = df.agent_1_attestations_1.diff() df['agent_2_attestations_1'] = df.agent_2_attestations_1.diff() df['agent_3_attestations_1'] = df.agent_3_attestations_1.diff() df['agent_4_attestations_1'] = df.agent_4_attestations_1.diff() df['agent_5_attestations_1'] = df.agent_5_attestations_1.diff() df['agent_1_attestations_0'] = df.agents.apply(lambda x: np.array(x['agent_attestations_0'][0])) df['agent_2_attestations_0'] = df.agents.apply(lambda x: np.array(x['agent_attestations_0'][1])) df['agent_3_attestations_0'] = df.agents.apply(lambda x: np.array(x['agent_attestations_0'][2])) df['agent_4_attestations_0'] = df.agents.apply(lambda x: np.array(x['agent_attestations_0'][3])) df['agent_5_attestations_0'] = df.agents.apply(lambda x: np.array(x['agent_attestations_0'][4])) # df['agent_1_attest_0'] = df.agents.apply(lambda x: np.array(x['agent_attestations_1'][0])) # print( df['agent_1_attestations_1']) df['agent_1_attestations_0'] = - df.agent_1_attestations_0.diff() df['agent_2_attestations_0'] = - df.agent_2_attestations_0.diff() df['agent_3_attestations_0'] = - df.agent_3_attestations_0.diff() df['agent_4_attestations_0'] = - df.agent_4_attestations_0.diff() df['agent_5_attestations_0'] = - df.agent_5_attestations_0.diff() # hack subtracting starting amount # df['alpha'] = df.alpha.diff() # df = df.groupby('timestep').agg({'supply': ['min', 'mean', 'max']}).reset_index() # df['agent_attest_1'] = np.array(df.agent_attest_1,dtype = float) # print(df['agent_attest_1']) # print(df['agent_attest_1'][10]) plot_label = experiment['rules_price'] ax = axs title = 'Agent attestations_1 Difference vs Global attestations_1' + '\n' + 'Scenario: ' + str(cc_label) + ' rules_price' ax.set_title(title) ax.set_ylabel('Amount') colors = ['b','orange', 'g', 'magenta', 'r'] # ax.plot(df.timestep, df['price'],color = colors[0], label='Price') df.plot(x='timestep', y='agent_1_attestations_1', label='agent_1_attestations_1', ax=ax, legend=True) df.plot(x='timestep', y='agent_2_attestations_1', label='agent_2_attestations_1', ax=ax, legend=True) df.plot(x='timestep', y='agent_3_attestations_1', label='agent_3_attestations_1', ax=ax, legend=True) df.plot(x='timestep', y='agent_4_attestations_1', label='agent_4_attestations_1', ax=ax, legend=True) df.plot(x='timestep', y='agent_5_attestations_1', label='agent_5_attestations_1', ax=ax, legend=True) df.plot(x='timestep', y='agent_1_attestations_0', label='agent_1_attestations_0', ax=ax, legend=True) df.plot(x='timestep', y='agent_2_attestations_0', label='agent_2_attestations_0', ax=ax, legend=True) df.plot(x='timestep', y='agent_3_attestations_0', label='agent_3_attestations_0', ax=ax, legend=True) df.plot(x='timestep', y='agent_4_attestations_0', label='agent_4_attestations_0', ax=ax, legend=True) df.plot(x='timestep', y='agent_5_attestations_0', label='agent_5_attestations_0', ax=ax, legend=True) df.plot(x='timestep', y='private_alpha', label='Private Alpha', ax=ax, legend=True) df.plot(x='timestep', y='alpha', label='Alpha', ax=ax, legend=True) # ax.fill_between(df.timestep, df[('supply','min')], df[('supply','max')], supply=0.3) ax.legend() ax.set_xlabel('Timesteps') ax.grid(color='0.9', linestyle='-', linewidth=1) plt.tight_layout() fig.tight_layout(rect=[0, 0, 1, .97]) fig.patch.set_alpha(1) display(fig) plt.close() experiments = experiments.sort_values(by =['rules_price']).reset_index(drop=True) cols = 1 rows = 1 cc_idx = 0 # config_labels = ['RULE 1,'RULE 2'] while cc_idx 0] # FIRST RUN ONLY df = df[df.run == 1] # MAKE A FOR LOOP or FUNCTION FOR ALL AGENTS df['agent_1_private_alpha'] = df.agents.apply(lambda x: np.array(x['agent_private_alpha'][0])) df['agent_2_private_alpha'] = df.agents.apply(lambda x: np.array(x['agent_private_alpha'][1])) df['agent_3_private_alpha'] = df.agents.apply(lambda x: np.array(x['agent_private_alpha'][2])) df['agent_4_private_alpha'] = df.agents.apply(lambda x: np.array(x['agent_private_alpha'][3])) df['agent_5_private_alpha'] = df.agents.apply(lambda x: np.array(x['agent_private_alpha'][4])) # # df['agent_1_attest_0'] = df.agents.apply(lambda x: np.array(x['agent_attestations_1'][0])) # # print( df['agent_1_attestations_1']) # df['agent_1_attestations_1'] = df.agent_1_attestations_1.diff() # df['agent_2_attestations_1'] = df.agent_2_attestations_1.diff() # df['agent_3_attestations_1'] = df.agent_3_attestations_1.diff() # df['agent_4_attestations_1'] = df.agent_4_attestations_1.diff() # df['agent_5_attestations_1'] = df.agent_5_attestations_1.diff() # df['agent_1_attestations_0'] = df.agents.apply(lambda x: np.array(x['agent_attestations_0'][0])) # df['agent_2_attestations_0'] = df.agents.apply(lambda x: np.array(x['agent_attestations_0'][1])) # df['agent_3_attestations_0'] = df.agents.apply(lambda x: np.array(x['agent_attestations_0'][2])) # df['agent_4_attestations_0'] = df.agents.apply(lambda x: np.array(x['agent_attestations_0'][3])) # df['agent_5_attestations_0'] = df.agents.apply(lambda x: np.array(x['agent_attestations_0'][4])) # # df['agent_1_attest_0'] = df.agents.apply(lambda x: np.array(x['agent_attestations_1'][0])) # # print( df['agent_1_attestations_1']) # df['agent_1_attestations_0'] = - df.agent_1_attestations_0.diff() # df['agent_2_attestations_0'] = - df.agent_2_attestations_0.diff() # df['agent_3_attestations_0'] = - df.agent_3_attestations_0.diff() # df['agent_4_attestations_0'] = - df.agent_4_attestations_0.diff() # df['agent_5_attestations_0'] = - df.agent_5_attestations_0.diff() # hack subtracting starting amount # df['alpha'] = df.alpha.diff() # df = df.groupby('timestep').agg({'supply': ['min', 'mean', 'max']}).reset_index() # df['agent_attest_1'] = np.array(df.agent_attest_1,dtype = float) # print(df['agent_attest_1']) # print(df['agent_attest_1'][10]) plot_label = experiment['rules_price'] ax = axs title = 'Agent Private Alpha vs Global Alpha' + '\n' + 'Scenario: ' + str(cc_label) + ' rules_price' ax.set_title(title) ax.set_ylabel('Alpha Value') colors = ['b','orange', 'g', 'magenta', 'r', 'k' ] # df['agent_1_attestations_1'] = df['agent_1_attestations_1'][0] plt.hlines(y= max(df['agent_1_private_alpha']), xmin=0, xmax=max(df.timestep), label = 'Agent 1 Private Alpha', color = colors[0]) plt.hlines(y= max(df['agent_2_private_alpha']), xmin=0, xmax=max(df.timestep), label = 'Agent 2 Private Alpha', color = colors[1]) plt.hlines(y= max(df['agent_3_private_alpha']), xmin=0, xmax=max(df.timestep), label = 'Agent 3 Private Alpha', color = colors[2]) plt.hlines(y= max(df['agent_4_private_alpha']), xmin=0, xmax=max(df.timestep), label = 'Agent 4 Private Alpha', color = colors[3]) plt.hlines(y= max(df['agent_5_private_alpha']), xmin=0, xmax=max(df.timestep), label = 'Agent 5 Private Alpha', color = colors[4]) # df.plot(x='timestep', y='agent_1_attestations_1', label='agent_1_attestations_1', ax=ax, legend=True) # df.plot(x='timestep', y='agent_2_attestations_1', label='agent_2_attestations_1', ax=ax, legend=True) # df.plot(x='timestep', y='agent_3_attestations_1', label='agent_3_attestations_1', ax=ax, legend=True) # df.plot(x='timestep', y='agent_4_attestations_1', label='agent_4_attestations_1', ax=ax, legend=True) # df.plot(x='timestep', y='agent_5_attestations_1', label='agent_5_attestations_1', ax=ax, legend=True) # df.plot(x='timestep', y='agent_1_attestations_0', label='agent_1_attestations_0', ax=ax, legend=True) # df.plot(x='timestep', y='agent_2_attestations_0', label='agent_2_attestations_0', ax=ax, legend=True) # df.plot(x='timestep', y='agent_3_attestations_0', label='agent_3_attestations_0', ax=ax, legend=True) # df.plot(x='timestep', y='agent_4_attestations_0', label='agent_4_attestations_0', ax=ax, legend=True) # df.plot(x='timestep', y='agent_5_attestations_0', label='agent_5_attestations_0', ax=ax, legend=True) df.plot(x='timestep', y='private_alpha', label='Private Alpha', ax=ax, legend=True, kind= 'scatter') df.plot(x='timestep', y='alpha', label='Alpha', ax=ax, legend=True, color = colors[5]) # ax.fill_between(df.timestep, df[('supply','min')], df[('supply','max')], supply=0.3) ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) ax.set_xlabel('Timesteps') ax.grid(color='0.9', linestyle='-', linewidth=1) plt.tight_layout() fig.tight_layout(rect=[0, 0, 1, .97]) fig.patch.set_alpha(1) display(fig) plt.close() experiments = experiments.sort_values(by =['rules_price']).reset_index(drop=True) cols = 1 rows = 1 cc_idx = 0 # config_labels = ['RULE 1,'RULE 2'] while cc_idx 0] # FIRST RUN ONLY df = df[df.run == 1] # MAKE A FOR LOOP or FUNCTION FOR ALL AGENTS df['agent_1_private_alpha'] = df.agents.apply(lambda x: np.array(x['agent_private_alpha'][0])) df['agent_2_private_alpha'] = df.agents.apply(lambda x: np.array(x['agent_private_alpha'][1])) # df['agent_3_private_alpha'] = df.agents.apply(lambda x: np.array(x['agent_private_alpha'][2])) # df['agent_4_private_alpha'] = df.agents.apply(lambda x: np.array(x['agent_private_alpha'][3])) # df['agent_5_private_alpha'] = df.agents.apply(lambda x: np.array(x['agent_private_alpha'][4])) # # df['agent_1_attest_0'] = df.agents.apply(lambda x: np.array(x['agent_attestations_1'][0])) # # print( df['agent_1_attestations_1']) # df['agent_1_attestations_1'] = df.agent_1_attestations_1.diff() # df['agent_2_attestations_1'] = df.agent_2_attestations_1.diff() # df['agent_3_attestations_1'] = df.agent_3_attestations_1.diff() # df['agent_4_attestations_1'] = df.agent_4_attestations_1.diff() # df['agent_5_attestations_1'] = df.agent_5_attestations_1.diff() # df['agent_1_attestations_0'] = df.agents.apply(lambda x: np.array(x['agent_attestations_0'][0])) # df['agent_2_attestations_0'] = df.agents.apply(lambda x: np.array(x['agent_attestations_0'][1])) # df['agent_3_attestations_0'] = df.agents.apply(lambda x: np.array(x['agent_attestations_0'][2])) # df['agent_4_attestations_0'] = df.agents.apply(lambda x: np.array(x['agent_attestations_0'][3])) # df['agent_5_attestations_0'] = df.agents.apply(lambda x: np.array(x['agent_attestations_0'][4])) # # df['agent_1_attest_0'] = df.agents.apply(lambda x: np.array(x['agent_attestations_1'][0])) # # print( df['agent_1_attestations_1']) # df['agent_1_attestations_0'] = - df.agent_1_attestations_0.diff() # df['agent_2_attestations_0'] = - df.agent_2_attestations_0.diff() # df['agent_3_attestations_0'] = - df.agent_3_attestations_0.diff() # df['agent_4_attestations_0'] = - df.agent_4_attestations_0.diff() # df['agent_5_attestations_0'] = - df.agent_5_attestations_0.diff() # hack subtracting starting amount # df['alpha'] = df.alpha.diff() # df = df.groupby('timestep').agg({'supply': ['min', 'mean', 'max']}).reset_index() # df['agent_attest_1'] = np.array(df.agent_attest_1,dtype = float) # print(df['agent_attest_1']) # print(df['agent_attest_1'][10]) plot_label = experiment['rules_price'] ax = axs title = 'Agent Private Alpha vs Global Alpha' + '\n' + 'Scenario: ' + str(cc_label) + ' rules_price' ax.set_title(title) ax.set_ylabel('Alpha Value') colors = ['b','orange', 'g', 'magenta', 'r', 'k' ] # df['agent_1_attestations_1'] = df['agent_1_attestations_1'][0] plt.hlines(y= max(df['agent_1_private_alpha']), xmin=0, xmax=max(df.timestep), label = 'Agent 1 Private Alpha', color = colors[0]) plt.hlines(y= max(df['agent_2_private_alpha']), xmin=0, xmax=max(df.timestep), label = 'Agent 2 Private Alpha', color = colors[1]) # plt.hlines(y= max(df['agent_3_private_alpha']), xmin=0, xmax=max(df.timestep), label = 'Agent 3 Private Alpha', color = colors[2]) # plt.hlines(y= max(df['agent_4_private_alpha']), xmin=0, xmax=max(df.timestep), label = 'Agent 4 Private Alpha', color = colors[3]) # plt.hlines(y= max(df['agent_5_private_alpha']), xmin=0, xmax=max(df.timestep), label = 'Agent 5 Private Alpha', color = colors[4]) # df.plot(x='timestep', y='agent_1_attestations_1', label='agent_1_attestations_1', ax=ax, legend=True) # df.plot(x='timestep', y='agent_2_attestations_1', label='agent_2_attestations_1', ax=ax, legend=True) # df.plot(x='timestep', y='agent_3_attestations_1', label='agent_3_attestations_1', ax=ax, legend=True) # df.plot(x='timestep', y='agent_4_attestations_1', label='agent_4_attestations_1', ax=ax, legend=True) # df.plot(x='timestep', y='agent_5_attestations_1', label='agent_5_attestations_1', ax=ax, legend=True) # df.plot(x='timestep', y='agent_1_attestations_0', label='agent_1_attestations_0', ax=ax, legend=True) # df.plot(x='timestep', y='agent_2_attestations_0', label='agent_2_attestations_0', ax=ax, legend=True) # df.plot(x='timestep', y='agent_3_attestations_0', label='agent_3_attestations_0', ax=ax, legend=True) # df.plot(x='timestep', y='agent_4_attestations_0', label='agent_4_attestations_0', ax=ax, legend=True) # df.plot(x='timestep', y='agent_5_attestations_0', label='agent_5_attestations_0', ax=ax, legend=True) df.plot(x='timestep', y='private_alpha', label='Private Alpha', ax=ax, legend=True, kind= 'scatter') df.plot(x='timestep', y='alpha', label='Alpha', ax=ax, legend=True, color = colors[5]) # ax.fill_between(df.timestep, df[('supply','min')], df[('supply','max')], supply=0.3) ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) ax.set_xlabel('Timesteps') ax.grid(color='0.9', linestyle='-', linewidth=1) plt.tight_layout() fig.tight_layout(rect=[0, 0, 1, .97]) fig.patch.set_alpha(1) display(fig) plt.close() experiments = experiments.sort_values(by =['rules_price']).reset_index(drop=True) cols = 1 rows = 1 cc_idx = 0 # config_labels = ['RULE 1,'RULE 2'] while cc_idx 0] # FIRST RUN ONLY df = df[df.run == 1] # MAKE A FOR LOOP or FUNCTION FOR ALL AGENTS df['agent_1_attestations_1'] = df.agents.apply(lambda x: np.array(x['agent_attestations_1'][0])) df['agent_2_attestations_1'] = df.agents.apply(lambda x: np.array(x['agent_attestations_1'][1])) df['agent_3_attestations_1'] = df.agents.apply(lambda x: np.array(x['agent_attestations_1'][2])) df['agent_4_attestations_1'] = df.agents.apply(lambda x: np.array(x['agent_attestations_1'][3])) df['agent_5_attestations_1'] = df.agents.apply(lambda x: np.array(x['agent_attestations_1'][4])) # df['agent_1_attest_0'] = df.agents.apply(lambda x: np.array(x['agent_attestations_1'][0])) # print( df['agent_1_attestations_1']) df['agent_1_attestations_1'] = df.agent_1_attestations_1.diff() df['agent_2_attestations_1'] = df.agent_2_attestations_1.diff() df['agent_3_attestations_1'] = df.agent_3_attestations_1.diff() df['agent_4_attestations_1'] = df.agent_4_attestations_1.diff() df['agent_5_attestations_1'] = df.agent_5_attestations_1.diff() df['agent_1_attestations_0'] = df.agents.apply(lambda x: np.array(x['agent_attestations_0'][0])) df['agent_2_attestations_0'] = df.agents.apply(lambda x: np.array(x['agent_attestations_0'][1])) df['agent_3_attestations_0'] = df.agents.apply(lambda x: np.array(x['agent_attestations_0'][2])) df['agent_4_attestations_0'] = df.agents.apply(lambda x: np.array(x['agent_attestations_0'][3])) df['agent_5_attestations_0'] = df.agents.apply(lambda x: np.array(x['agent_attestations_0'][4])) # df['agent_1_attest_0'] = df.agents.apply(lambda x: np.array(x['agent_attestations_1'][0])) # print( df['agent_1_attestations_1']) df['agent_1_attestations_0'] = - df.agent_1_attestations_0.diff() df['agent_2_attestations_0'] = - df.agent_2_attestations_0.diff() df['agent_3_attestations_0'] = - df.agent_3_attestations_0.diff() df['agent_4_attestations_0'] = - df.agent_4_attestations_0.diff() df['agent_5_attestations_0'] = - df.agent_5_attestations_0.diff() # hack subtracting starting amount # df['alpha'] = df.alpha.diff() # df = df.groupby('timestep').agg({'supply': ['min', 'mean', 'max']}).reset_index() # df['agent_attest_1'] = np.array(df.agent_attest_1,dtype = float) # print(df['agent_attest_1']) # print(df['agent_attest_1'][10]) plot_label = experiment['rules_price'] ax = axs title = 'Agent attestations_1 Difference vs Global attestations_1' + '\n' + 'Scenario: ' + str(cc_label) + ' rules_price' ax.set_title(title) ax.set_ylabel('Amount') colors = ['b','orange', 'g', 'magenta', 'r'] # ax.plot(df.timestep, df['price'],color = colors[0], label='Price') df.plot(x='timestep', y='agent_1_attestations_1', label='agent_1_attestations_1', ax=ax, legend=True, color = colors[0]) df.plot(x='timestep', y='agent_2_attestations_1', label='agent_2_attestations_1', ax=ax, legend=True, color = colors[1]) df.plot(x='timestep', y='agent_3_attestations_1', label='agent_3_attestations_1', ax=ax, legend=True, color = colors[2]) df.plot(x='timestep', y='agent_4_attestations_1', label='agent_4_attestations_1', ax=ax, legend=True, color = colors[3]) df.plot(x='timestep', y='agent_5_attestations_1', label='agent_5_attestations_1', ax=ax, legend=True, color = colors[4]) df.plot(x='timestep', y='agent_1_attestations_0', label='agent_1_attestations_0', ax=ax, legend=True, color = colors[0]) df.plot(x='timestep', y='agent_2_attestations_0', label='agent_2_attestations_0', ax=ax, legend=True, color = colors[1]) df.plot(x='timestep', y='agent_3_attestations_0', label='agent_3_attestations_0', ax=ax, legend=True, color = colors[2]) df.plot(x='timestep', y='agent_4_attestations_0', label='agent_4_attestations_0', ax=ax, legend=True, color = colors[3]) df.plot(x='timestep', y='agent_5_attestations_0', label='agent_5_attestations_0', ax=ax, legend=True, color = colors[4]) # df.plot(x='timestep', y='private_alpha', label='Private Alpha', ax=ax, legend=True) # df.plot(x='timestep', y='alpha', label='Alpha', ax=ax, legend=True) # ax.fill_between(df.timestep, df[('supply','min')], df[('supply','max')], supply=0.3) ax.legend() ax.set_xlabel('Timesteps') ax.grid(color='0.9', linestyle='-', linewidth=1) plt.tight_layout() fig.tight_layout(rect=[0, 0, 1, .97]) fig.patch.set_alpha(1) display(fig) plt.close() print(df['private_alpha'].unique()) df.private_alpha.hist()Initial data exploration---[Memotion dataset 7k](https://www.kaggle.com/williamscott701/memotion-dataset-7k): Dataset for sentiment classification of memesimport numpy as np import pandas as pd import matplotlib.pyplot as plt import os import pickle from PIL import ImageLoadprint('Current directory:', os.getcwd()) print('Changing directory: moving up') os.chdir('../') print('Current directory:', os.getcwd()) # Pickling data_raw_path = "data/raw/" with open(data_raw_path+"labels_pd_pickle","rb") as file_handle: labels_df = pickle.load(file_handle) with open(data_raw_path+"reference_df_pickle","rb") as file_handle: reference_df = pickle.load(file_handle) display(labels_df.head(10)) labels_df.shape display(reference_df.head(3)) reference_df.shapeImagesdef plot_image(image_name, figure_path=None): image = Image.open(data_raw_path+"images/"+image_name) # summarize some details about the image print(image.format, image.size, image.mode) # show the image plt.imshow(image) plt.axis('off') if figure_path: plt.savefig(figure_path, bbox_inches='tight') plt.show() plot_image(labels_df.image_name[0]) labels_df.text_corrected[0]JPEG (735, 500) RGBText and labels Missing valueslabels_df.isna().sum() labels_na = labels_df[labels_df.isna().any(axis=1)] pd.set_option('display.max_rows', labels_na.shape[0]+1) labels_na labels_na = labels_df[labels_df.text_corrected.isna()] pd.set_option('display.max_rows', labels_na.shape[0]+1) labels_na # Explore the 5 images with NaN text_corrected for i, row in labels_na.iterrows(): print(i) plot_image(row.image_name)119 JPEG (500, 480) RGBDrop NAlabels_df = labels_df.dropna()Labels distributionlabels_df.describe() colors = ['C0', 'C1', 'C2', 'C3', 'C4'] _ = labels_df.humour.value_counts().plot(kind='bar', figsize=(10,5), rot=0, color=colors) plt.savefig('reports/figures/humour_distribution.png', bbox_inches='tight') _ = labels_df.sarcasm.value_counts().plot(kind='bar', figsize=(10,5), rot=0, color=colors) plt.savefig('reports/figures/sarcasm_distribution.png', bbox_inches='tight') _ = labels_df.offensive.value_counts().plot(kind='bar', figsize=(10,5), rot=0, color=colors) plt.savefig('reports/figures/offensive_distribution.png', bbox_inches='tight') _ = labels_df.motivational.value_counts().plot(kind='bar', figsize=(10,5), rot=0, color=colors) plt.savefig('reports/figures/motivational_distribution.png', bbox_inches='tight') _ = labels_df.overall_sentiment.value_counts().plot(kind='bar', figsize=(10,5), rot=0, color=colors) plt.savefig('reports/figures/overall_sentiment_distribution.png', bbox_inches='tight')Observations and Insights Observation 1*Capomulin and Ramicane seem to be more effective and consistent at shrinking tumor volumes with Capomulin having an average tumor volume of 40.68 (SD=4.99) and Ramicane having an average tumor volume of 40.22 (SD=4.85). The rest of the treatments all have average tumor volumes above 52 with standard deviations over 6.* Observation 2 *As time increases, the tumor volume of mice treated with Capomulin tends to decrease.* Observation 3 *There is a strong positive relationship between Tumor Volume and Weight in mice treated with Capomulin (Pearson Correlation Coefficient = 0.88).*# Dependencies and Setup import matplotlib.pyplot as plt import pandas as pd import scipy.stats as st import numpy as np from random import randint # Study data files mouse_metadata_path = "data/Mouse_metadata.csv" study_results_path = "data/Study_results.csv" # Read the mouse data and the study results mouse_metadata = pd.read_csv(mouse_metadata_path) study_results = pd.read_csv(study_results_path) # Combine the data into a single dataset df = pd.merge(mouse_metadata, study_results, on="Mouse ID") # Display the data table for preview df # Checking the number of mice. num_mice = len(df["Mouse ID"].unique()) print(f'There are {num_mice} mice in the dataset') # Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint. duplicate_rows = df[df.duplicated(['Mouse ID', 'Timepoint'])] mouse_id = duplicate_rows['Mouse ID'].unique() print(f'The duplicate rows are from mouse {mouse_id[0]}') # Optional: Get all the data for the duplicate mouse ID. all_duplicate_data = df[df['Mouse ID']==mouse_id[0]] all_duplicate_data # Create a clean DataFrame by dropping the duplicate mouse by its ID. clean_df = df[df['Mouse ID']!=mouse_id[0]] # Checking the number of mice in the clean DataFrame. num_mice = len(clean_df["Mouse ID"].unique()) print(f'There are {num_mice} mice in the clean dataset')There are 248 mice in the clean datasetSummary Statistics# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen # Use groupby and summary statistical methods to calculate the following properties of each drug regimen: # mean, median, variance, standard deviation, and SEM of the tumor volume. # Assemble the resulting series into a single summary dataframe. grouped_drug = clean_df.groupby('Drug Regimen').mean()['Tumor Volume (mm3)'] col_mean = clean_df.groupby('Drug Regimen').mean()['Tumor Volume (mm3)'] col_median = clean_df.groupby('Drug Regimen').median()['Tumor Volume (mm3)'] col_var = clean_df.groupby('Drug Regimen').var()['Tumor Volume (mm3)'] col_sd = clean_df.groupby('Drug Regimen').std()['Tumor Volume (mm3)'] col_sem = clean_df.groupby('Drug Regimen').sem()['Tumor Volume (mm3)'] summary_regimen = pd.DataFrame({'Mean(Tumor Vol)': col_mean, 'Median(Tumor Vol)': col_median, 'Var(Tumor Vol)': col_var, 'SD(Tumor Vol)': col_sd, 'SEM(Tumor Vol)': col_sem}) print(summary_regimen) # Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen # Using the aggregation method, produce the same summary statistics in a single line summary_regimen2 = clean_df.groupby('Drug Regimen').agg(['mean', 'median', 'var', 'std', 'sem'])["Tumor Volume (mm3)"] summary_regimen2Bar and Pie Charts#### Generate a bar plot showing the total number of measurements taken on each drug regimen using pandas. ## total number of measurements = count of timepoints per drug regimen measurements = clean_df.groupby("Drug Regimen").count()["Timepoint"] measurements_bar = measurements.plot(kind="bar", color = "#659ab5") plt.title("Measurements Taken per Drug") plt.xlabel("Drug Regimen") plt.ylabel("No. of Measurements") plt.xticks(rotation=45) plt.show() plt.savefig('images/measurements_pandasplot.png') # Generate a bar plot showing the total number of measurements taken on each drug regimen using pyplot. x_axis = np.arange(len(col_regimen)) #create ticks tick_locs = [value for value in x_axis] plt.bar(x_axis, measurements, color="#866b99", align="center") plt.xlabel("Drug Regimen") plt.ylabel("No. of Measurements") plt.xticks(ticks=tick_locs, rotation=45, labels=measurements.index) plt.title("Measurements Taken per Drug") plt.show() plt.savefig("images/measurements_matplotlib.png") # Generate a pie plot showing the distribution of female versus male mice using pandas unique_mice = clean_df.drop_duplicates(subset="Mouse ID") sexes = unique_mice["Sex"].value_counts() sexes.plot(kind="pie", legend=True, autopct="%1.1f%%", labels=None, title="Sex Distribution of Mice", colors=['#1e458e', '#Bf67c0']) plt.savefig('images/sexes_pandas.png') # Generate a pie plot showing the distribution of female versus male mice using pyplot plt.pie(sexes, labels=None, autopct="%1.1f%%", shadow=True, colors=['#1e458e', '#Bf67c0'], startangle=90) plt.title("Sex Distribution of Mice") plt.legend(["Male", "Female"]) plt.show() plt.savefig('images/sexes_matplotlib.png')Quartiles, Outliers and Boxplots# Calculate the final tumor volume of each mouse across four of the treatment regimens: # Capomulin, Ramicane, Infubinol, and Ceftamin ## Start by getting the last (greatest) timepoint for each mouse last_timepoints = clean_df.groupby("Mouse ID").max()["Timepoint"] ## Merge this group df with the original dataframe to get the tumor volume at the last timepoint final_df = pd.merge(last_timepoints, clean_df, on=['Mouse ID', 'Timepoint']) # Put treatments into a list for for loop (and later for plot labels) treatments = ['Capomulin', 'Ramicane', 'Infubinol', 'Ceftamin'] #individual subsets for each treatment capomulin_df = final_df[final_df['Drug Regimen']=='Capomulin'] ramicane_df = final_df[final_df['Drug Regimen']=='Ramicane'] infubinol_df = final_df[final_df['Drug Regimen']=='Infubinol'] ceftamin_df = final_df[final_df['Drug Regimen']=='Ceftamin'] #determine outliers for each treatment ##capomulin tumor_vols_capo = capomulin_df['Tumor Volume (mm3)'] q1_capo = tumor_vols_capo.quantile(0.25) q3_capo = tumor_vols_capo.quantile(0.75) iqr_capo = q3_capo - q1_capo lower_capo = q1_capo - 1.5*iqr_capo upper_capo = q3_capo + 1.5*iqr_capo potential_outliers_capo = capomulin_df[(capomulin_df['Tumor Volume (mm3)']upper_capo)] ##ramicane tumor_vols_rami = ramicane_df['Tumor Volume (mm3)'] q1_rami = tumor_vols_rami.quantile(0.25) q3_rami = tumor_vols_rami.quantile(0.75) iqr_rami = q3_rami - q1_rami lower_rami = q1_rami - 1.5*iqr_rami upper_rami = q3_rami + 1.5*iqr_rami potential_outliers_rami = ramicane_df[(ramicane_df['Tumor Volume (mm3)']upper_rami)] ##infubinol tumor_vols_infu = infubinol_df['Tumor Volume (mm3)'] q1_infu = tumor_vols_infu.quantile(0.25) q3_infu = tumor_vols_infu.quantile(0.75) iqr_infu = q3_infu - q1_infu lower_infu = q1_infu - 1.5*iqr_infu upper_infu = q3_infu + 1.5*iqr_infu potential_outliers_infu = infubinol_df[(infubinol_df['Tumor Volume (mm3)']upper_infu)] ##ceftamin tumor_vols_ceft = ceftamin_df['Tumor Volume (mm3)'] q1_ceft = tumor_vols_ceft.quantile(0.25) q3_ceft = tumor_vols_ceft.quantile(0.75) iqr_ceft = q3_ceft - q1_ceft lower_ceft = q1_ceft - 1.5*iqr_ceft upper_ceft = q3_ceft + 1.5*iqr_ceft potential_outliers_ceft = ceftamin_df[(ceftamin_df['Tumor Volume (mm3)']upper_ceft)] print(f'Tumor volumes in the Capomulin treatment group that are below {round(lower_capo,2)} or above {round(upper_capo,2)} could be considered outliers.') print(f'There are {len(potential_outliers_capo)} potential outliers in the Capomulin treatment group.') print('------------------------------------------------------------------') print(f'Tumor volumes in the Ramicane treatment group that are below {round(lower_rami,2)} or above {round(upper_rami,2)} could be considered outliers.') print(f'There are {len(potential_outliers_rami)} potential outliers in the Ramicane treatment group.') print('------------------------------------------------------------------') print(f'Tumor volumes in the Infubinol treatment group that are below {round(lower_infu,2)} or above {round(upper_infu,2)} could be considered outliers.') print(f'There is {len(potential_outliers_infu)} potential outlier in the Infubinol treatment group.') print('------------------------------------------------------------------') print(f'Tumor volumes in the Ceftamin treatment group that are below {round(lower_ceft,2)} or above {round(upper_ceft,2)} could be considered outliers.') print(f'There are {len(potential_outliers_ceft)} potential outliers in the Ceftamin treatment group.') # Generate a box plot of the final tumor volume of each mouse across four regimens of interest data_list = [capomulin_df['Tumor Volume (mm3)'], ramicane_df['Tumor Volume (mm3)'], infubinol_df['Tumor Volume (mm3)'], ceftamin_df['Tumor Volume (mm3)']] #boxplot bplot = plt.boxplot(x=data_list, labels=treatments, patch_artist=True) #title and axis labels plt.title("Final Tumor Volumes for Different Treatments") plt.xlabel("Treatment") plt.ylabel("Tumor Volume (mm3)") #add color colors = ["#C0E9AD","#ADDBE9", "#D5ADE9", "#E9ADCF"] for patch,color in zip(bplot['boxes'],colors): patch.set_facecolor(color) #show plot plt.show() #save plot plt.savefig('images/tumorvol_boxplots.png')Line and Scatter Plots# Generate a line plot of tumor volume vs. time point for a mouse treated with Capomulin all_capomulin = clean_df[clean_df['Drug Regimen']=='Capomulin'] all_mice = all_capomulin['Mouse ID'].unique() random_num = randint(0, len(all_mice)) random_id = all_mice[random_num] random_mouse = all_capomulin[all_capomulin['Mouse ID']==random_id] plt.plot(random_mouse['Timepoint'], random_mouse['Tumor Volume (mm3)'], 'o-') plt.title(f'Tumor Volumes of Mouse {random_id}') plt.ylabel('Tumor Volume (mm3)') plt.xlabel('Timepoint') plt.show() plt.savefig('images/random_capo_mouse.png') # Generate a scatter plot of average tumor volume vs. mouse weight for the Capomulin regimen capomulin_avg = capomulin_df.groupby('Mouse ID').mean() #scatterplot plt.scatter(capomulin_avg['Tumor Volume (mm3)'], capomulin_avg['Weight (g)'], marker = "o", color="#C0E9AD", edgecolors="#75A75D") #title and axis labels plt.title("Avg Tumor Vol vs Weight for Capomulin Treatment") plt.xlabel("Average Tumor Volume (mm3)") plt.ylabel("Mouse Weight (g)") #show plot plt.show() #save plot plt.savefig('images/capo_tumorvol_weight_scatter.png')Correlation and Regression# Calculate the correlation coefficient and linear regression model # for mouse weight and average tumor volume for the Capomulin regimen #correlation x_values = capomulin_avg['Tumor Volume (mm3)'] y_values = capomulin_avg['Weight (g)'] correlation = st.pearsonr(x_values, y_values) print(f'The correlation between tumor volume and mouse weight is {round(correlation[0],2)}') #linear regression model (slope, intercept, rvalue, pvalue, stderr) = st.linregress(x_values, y_values) regress_values = x_values*slope + intercept line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2)) #scatterplot plt.scatter(x_values, y_values, marker = "o", color="#C0E9AD", edgecolors="#75A75D") #regression line plt.plot(x_values, regress_values, color="#000000") #title and axis labels plt.title("Avg Tumor Vol vs Weight for Capomulin Treatment") plt.xlabel("Average Tumor Volume (mm3)") plt.ylabel("Mouse Weight (g)") #add regression line equation to plot plt.annotate(line_eq, (33,18), fontsize=12, color="#000000") plt.show() plt.savefig('images/capo_tumorvol_weight_regression.png')Find Meijer store locations.Use the Meijer API to return a list of all stores. Lessons Learned.- Using [Fake GPS location](https://play.google.com/store/apps/details?id=com.lexa.fakegps&hl=en_US) Meijer's app searches for location (0.0, 0.0).- API URL: https://mservices.meijer.com/storeinfo/api/mobile/near?latitude=0.0&longitude=0.0&miles=2000&numToReturn=250 Fred Meijer Offices:2929 Walker Ave NW, Grand Rapids, MI 4954443.016570, -85.726290latitude, longitude=43.016570, -85.726290 account_services_secret="drAqas76Re7RekeBanaMaNEMah7paDE5" basic_token=base64.encodebytes("mma:{}".format(account_services_secret).encode("UTF-8")).decode("UTF-8").strip() url = 'https://mservices.meijer.com/storeinfo/api/mobile/near' payload = { 'latitude': latitude, 'longitude': longitude, 'miles': '10000', 'numToReturn': '10000', } r = requests.get(url, params=payload) rDigging through the sniffed traffic there's an extra 'authorization header'import os os.environ["MEIJER_AUTH"]="Bearer ..." headers = { "Authorization": os.environ["MEIJER_AUTH"], "Version": "6", "User-Agent": "okhttp/3.8.0", } url = 'https://mservices.meijer.com/storeinfo/api/mobile/near' payload = { 'latitude': Latitude, 'longitude': Longitude, 'miles': '10000', 'numToReturn': '10000', } r = requests.get(url, params=payload, headers=headers) r.status_code len(r.json()["store"])From Wiki: https://en.wikipedia.org/wiki/Meijer.> About half of the company's **242 stores** are located in Michigan, with additional locations in Illinois, Indiana, Kentucky, Ohio, and Wisconsin.(r.json()).keys() import json stores = r.json()["store"] stores with open("meijer_stores.json", "w") as fid: json.dump(stores, fid) with open("meijer_stores.json", "r") as fid: stores2 = json.load(fid) stores2todo: correlation between trap size and Tuned Ackleyta = tuned_ackley() ta.visualise3d(lim=20, n=50) ta.visulise_gradient(lim=20, n=50) class single_experiment: def set_objective(self, objective_func): self.objective_func = objective_func def set_optimise(self, optimizer): self.optimizer = optimizer def do(self): optimal, optimum, statistics = self.optimizer.optimise(self.objective_func) if np.linalg.norm(optimal - self.objective_func.get_optimal()) < 1e-1 \ or np.linalg.norm(optimum - self.objective_func.get_optimum()) < 1e-1: statistics['status'] = 'global minimum' elif statistics['status'] != 'diverge': statistics['status'] = 'local minimum' print(statistics['status']) statistics['trail'] = np.array([x,y]) self.analyser = post_analysis(statistics, self.objective_func)plot the domain edge of tuned ackley functionr = 19 angle = np.linspace(0, 2*np.pi, 100) x = np.cos(angle) * 19 y = np.sin(angle) * 19original CMAnp.random.seed(134) # setup objective function ta = tuned_ackley() # setup adjust function optimizer default = do_nothing() # setup optmizer cma = cma_es() paras = {'mean0':np.array([[19.193], [22.221]]), 'std':np.ones((2,)) * 3, 'tol':1e-5, 'adjust_func':default} cma.set_parameters(paras) # boil down to one expreiment exp1 = single_experiment() exp1.set_objective(ta) exp1.set_optimise(cma) exp1.do() exp1.analyser.plot_distance() exp1.analyser.animate_moving_cluster()line search CMAnp.random.seed(134) # setup objective function ta = tuned_ackley() # setup adjust function optimizer line = line_search(alpha=1, beta=0.1) # setup optmizer cmal = cma_es() paras = {'mean0':np.array([[19.193], [22.221]]), 'std':np.ones((2,)) * 3, 'tol':1e-5, 'adjust_func':line} cmal.set_parameters(paras) # boil down to one expreiment exp2 = single_experiment() exp2.set_objective(ta) exp2.set_optimise(cmal) exp2.do() exp2.analyser.plot_distance() exp2.analyser.animate_moving_cluster()egg holder objective line search CMAnp.random.seed(134) # setup objective function eg = eggholder() # setup adjust function optimizer line = line_search(alpha=1, beta=0.1) # setup optmizer cmal = cma_es() paras = {'mean0':np.array([[19.193], [22.221]]), 'std':np.ones((2,)) * 300, 'tol':1e-5, 'adjust_func':line} cmal.set_parameters(paras) # boil down to one expreiment exp3 = single_experiment() exp3.set_objective(eg) exp3.set_optimise(cmal) exp3.do() eg.visualise3d(lim=650, n=100) exp3.analyser.plot_distance()original CMAnp.random.seed(134) # setup objective function eg = eggholder() # setup adjust function optimizer # setup optmizer cmal = cma_es() paras = {'mean0':np.array([[19.193], [22.221]]), 'std':np.ones((2,)) * 300, 'tol':1e-5, 'adjust_func':default} cmal.set_parameters(paras) # boil down to one expreiment exp4 = single_experiment() exp4.set_objective(eg) exp4.set_optimise(cmal) exp4.do() exp4.analyser.plot_distance()contrast between line search and Adamad = adam() x0 = np.array([[13.193], [12.221]]) paras = {'x0':x0, 'alpha':0.01, 'beta_1':0.9, 'beta_2':0.999, 'epsilon':1e-8, 'max_iter':100000, 'tol':1e-2} ad.set_parameters(paras) %time minimum, cnt = ad.optimise(ta) print("found minimum position:{}, minimum:{}, iteration counts:{}".format(minimum.ravel(), ta.func(minimum), cnt)) line = line_search() x0 = np.array([[13.193], [12.221]]) paras = {'x0':x0, 'alpha':1, 'beta':0.1, 'max_iter':100, 'tol':1e-2} line.set_parameters(paras) %time minimum, cnt = line.optimise(ta) print("found minimum position:{}, minimum:{}, iteration counts:{}".format(minimum.ravel(), ta.func(minimum), cnt)) np.random.seed(134) # setup objective function ta = tuned_ackley() # setup adjust function optimizer line = line_search(alpha=1, beta=0.1) # setup optmizer cmal = cma_es() paras = {'mean0':np.array([[13.193], [12.221]]), 'std':np.ones((2,)) * 3, 'tol':1e-5, 'adjust_func':default} cmal.set_parameters(paras) # boil down to one expreiment exp5 = single_experiment() exp5.set_objective(ta) exp5.set_optimise(cmal) exp5.do() exp5.analyser.plot_distance() exp5.analyser.animate_scatterplot_distance()Functional programming Functional Programming Understanding to think in a *functional programming* style is almost asimportant as object orientation for building DRY, clear scientific software,and is just as conceptually difficult.Programs are composed of functions: they take data in (which we call*parameters* or *arguments*) and send data out (through `return` statements.)A conceptual trick which is often used by computer scientists to teach the coreidea of functional programming is this: to write a program,in theory, you only ever need functions with **one** argument, even when you think you need two or more. Why?Let's define a program to add two numbers:def add(a, b): return a+b add(5,6)How could we do this, in a fictional version of Python which only defined functions of one argument?In order to understand this, we'll have to understand several of the conceptsof functional programming. Let's start with a program which just adds five tosomething:def add_five(a): return a+5 add_five(6)OK, we could define lots of these, one for each number we want to add. But thatwould be infinitely repetitive. So, let's try to metaprogram that: we want afunction which returns these add_N() functions.Let's start with the easy case: a function which returns a function which adds 5 to something:def generate_five_adder(): def _add_five(a): return a+5 return _add_five coolfunction = generate_five_adder() coolfunction(7)OK, so what happened there? Well, we defined a function **inside** the other function. We can always do that:def thirty_function(): def times_three(a): return a*3 def add_seven(a): return a+7 return times_three(add_seven(3)) thirty_function()When we do this, the functions enclosed inside the outer function are **local** functions, and can't be seen outside:add_sevenThere's not really much of a difference between functions and other variablesin python. A function is just a variable which can have () put after it to callthe code!print(thirty_function) x=[thirty_function, add_five, add] for fun in x: print(fun) And we know that one of the things we can do with a variable is `return` it. So we can return a function, and then call it outside:def deferred_greeting(): def greet(): print("Hello") return greet friendlyfunction=deferred_greeting() # Do something else print("Just passing the time...") # OK, Go! friendlyfunction()HelloSo now, to finish this, we just need to return a function to add an arbitrary amount:def define_adder(increment): def adder(a): return a + increment return adder add_3=define_adder(3) add_3(9)We can make this even prettier: let's make another variable pointing to our define_adder() function:add = define_adderAnd now we can do the real magic:add(8)(5)Closures You may have noticed something a bit weird:In the definition of `define_adder`, `increment` is a local variable. It should have gone out of scope and died at the end of the definition. How can the amount the returned adder function is adding still be kept?This is called a **closure**. In Python, whenever a function definition references a variable in the surrounding scope, it is preserved within the function definition.You can close over global module variables as well:name = "James" def greet(): print("Hello, ", name) greet()Hello, JamesAnd note that the closure stores a reference to the variable in the surrounding scope: ("Late Binding")name="Matt" greet()Hello, MattMap and Reduce We often want to apply a function to each variable in an array, to return a new array. We can do this with a list comprehension:numbers=range(10) [add_five(i) for i in numbers]But this is sufficiently common that there's a quick built-in:list(map(add_five, numbers))This **map** operation is really important conceptually when understandingefficient parallel programming: different computers can apply the *mapped*function to their input at the same time. We call this Single Program, MultipleData. (SPMD) **map** is half of the **map-reduce** functional programmingparadigm which is key to the efficient operation of much of today's "datascience" explosion. Let's continue our functional programming mind-stretch by looking at **reduce** operations.We very often want to loop with some kind of accumulator, such as when finding a mean, or finding a maximum:def summer(data): sum = 0.0 for x in data: sum+=x return sum summer(range(10)) import sys def my_max(data): # Start with the smallest possible number highest = sys.float_info.min for x in data: if x>highest: highest=x return highest my_max([2,5,10,-11,-5])These operations, where we have some variable which is building up a result,and the result is updated with some operation, can be gathered together as afunctional program, taking in the operation to be used to combine results as anargument:def accumulate(initial, operation, data): accumulator=initial for x in data: accumulator=operation(accumulator, x) return accumulator def my_sum(data): def _add(a,b): return a+b return accumulate(0, _add, data) my_sum(range(5)) def bigger(a,b): if b>a: return b return a def my_max(data): return accumulate(sys.float_info.min, bigger, data) my_max([2,5,10,-11,-5])Now, because these operations, _bigger, and _add, are such that e.g. (a+b)+c = a+(b+c) , i.e. they are **associative**, we could apply our accumulationto the left half and the right half of the array, each on a different computer, and then combine the two halves:1+2+3+4=(1+2)+(3+4)Indeed, with a bigger array, we can divide-and-conquer more times:1+2+3+4+5+6+7+8=((1+2)+(3+4))+((5+6)+(7+8))So with enough parallel computers, we could do this operation on eight numbersin three steps: first, we use four computers to do one each of the pairwiseadds.Then, we use two computers to add the four totals.Then, we use one of the computers to do the final add of the two last numbers.You might be able to do the maths to see that with an N element list, thenumber of such steps is proportional to the logarithm of N.We say that with enough computers, reduction operations are O(ln N)This course isn't an introduction to algorithms, but we'll talk more about thisO() notation when we think about programming for performance.Anyway, this accumulate-under-an-operation process, is so fundamental tocomputing that it's usually in standard libraries for languages which allowfunctional programming:from functools import reduce def my_max(data): return reduce(bigger, data, sys.float_info.min) my_max([2,5,10,-11,-5])Lambda Functions When doing functional programming, we often want to be able to define a function on the fly:def most_Cs_in_any_sequence(sequences): def count_Cs(sequence): return sequence.count('C') counts=map(count_Cs, sequences) return max(counts) def most_Gs_in_any_sequence(sequences): return max(map(lambda sequence: sequence.count('G'),sequences)) data=[ "CGTA", "CGGGTAAACG", "GATTACA" ] most_Gs_in_any_sequence(data)The syntax here is that these two definitions are identical:func_name=lambda a,b,c : a+b+c def func_name(a,b,c): return a+b+clambda defines an "anonymous" function.def most_of_given_base_in_any_sequence(sequences, base): return max(map(lambda sequence: sequence.count(base), sequences)) most_of_given_base_in_any_sequence(data,'A')The above fragment defined a lambda function as a **closure** over `base`. If you understood that, you've got it! To double all elements in an array:data=range(10) list(map(lambda x: 2*x, data)) [2*x for x in data] def my_max(data): return reduce(lambda a,b: a if a>b else b, data, sys.float_info.min) my_max([2,5,10,-11,-5])Using functional programming for numerical methods Probably the most common use in research computing for functional programmingis the application of a numerical method to a function. For example:% matplotlib inlinefrom scipy.optimize import newton from numpy import linspace, zeros from matplotlib import pyplot as plt solve_me=lambda x: x**2-x print(newton(solve_me, 2), newton(solve_me,0.2)) xs=linspace(-1,2,50) solved=[xs,list(map(solve_me,xs)),xs,zeros(len(xs))] plt.plot(*solved)1.0 -3.4419051426429775e-21Sometimes such tools return another function:def derivative_simple(func, eps, at): return (func(at+eps)-func(at))/eps def derivative(func, eps): def _func_derived(x): return (func(x+eps)-func(x))/eps return _func_derived straight = derivative(solve_me, 0.01) straight(3) derived=(xs,list(map(solve_me,xs)),xs,list(map(derivative(solve_me,0.01),xs))) plt.plot(*derived) print(newton(derivative(solve_me,0.01),0))0.49500000000000044Of course, coding your own numerical methods is bad:import scipy.misc def derivative(func): def _func_derived(x): return scipy.misc.derivative(solve_me,x) return _func_derived newton(derivative(solve_me),0)Use RASA DSL to Configure DeepPavlov's GO-Bot At DeepPavlov, we support a variety of industry-wide and popular standards to support developing Conversational AI solutions.DSLs, known as Domain-Specific Languages, provide a rich mechanism to define the behavior, or "the what", whilethe underlying system uses the parser to transform these definitions into commands that implement this behavior, or "the how" using the system's components.Until very recently we supported two such DSLs, including industry-standard [AIML](http://docs.deeppavlov.ai/en/master/features/skills/aiml_skill.html), as well as [DSL](http://docs.deeppavlov.ai/en/master/features/skills/dsl_skill.html) designed by one of our partners, EORA.In this tutorial, you will learn how to use another industrial DSL, or, better said, set of DSLs, introduced by RASA.ai,to build simple goal-oriented chatbots using DeepPavlov's GO-bot.This is the very beginning of our work focused on supporting RASA DSLs as a way to configure DeepPavlov-based goal-oriented chatbots,and therefore not all elements of the RASA DSLs are supported. It is also worth mentioning that in 0.12.0 release we want to focus on supporting tools to define the domain logic behind the goal-oriented assistant, and files like `config.yml` and others are out of scope for this release.To configure a DeepPavlov-based goal-oriented chatbot using these DSLs, you need to have at least three basic config files:* `stories.md` (or `stories-{trn, tst, val}.md` but these are just subsamples)* `nlu.md`* `domain.yml`These files allow you to define 3 key elements of the chatbot, including product-level stories, NLU training data, and your chatbot's domain. Concepts Behind Stories.md, NLU.md, and Domain.yml `stories.md` `stories.md` is a mechanism used to teach your chatbot how to respond to user messages. It allows you to control your chatbot's dialog management.These "stories" model real conversations between a user and a chatbot. This Markdown-based file is used to define a list of*stories*, and each *story* can have a list of one or more *intents* with (optional) corresponding *slots*, where each *intent*has one or more corresponding *actions* taken by the chatbot.These actions, in general, can be anything, from simple message replies, to programmable actions that call APIs of other services.*Note:* In this version, supported actions are limited to simple message replies.In a way, it can be seen as a *dialogues dataset*.*Note: Stories do not provide an ultimative instruction of how the bot should behave: it is up to the training process to infer the implicit underlying patterns controlling the dialogue.* If you are looking for a way to make the bot follow the story templates strictly as defined, there is a known hack: the more times the model sees the training data, the better the model models the data, so the the desired behavior is achieved when the accuracy on the training data is 1.Such a situation is illustrated in section Basic Chatbot. format Stories file is a markdown file of the following format:```markdown story_title(not used by algorithm, but useful to work with for humans)* user_action_label{"1st_slot_present_in_action": "slot1_value", .., "Nth_slot_present_in_action": "slotN_value"} - system_respective_utterance* another_user_action_of_the_same_format - another_system_response... another_story_title...```**See examples below in this tutorial** `nlu.md` `nlu.md` represents an NLU model of your chatbot. It allows you to provide training examples that show how your chatbot shouldunderstand user messages, and then train a model through these examples.While DeepPavlov's GO-bot supports JSON-based DSTC-2 format for training data, this Markdown Format introduced by RASA is the easiest one for humans to read and write. format NLU file is a markdown file of the following format:```markdown intent:possible_user_action_label_1- An example of user text that has the possible_user_action_label_1 action label- Another example of user text that has the possible_user_action_label_1 action label... intent:possible_user_action_label_N- An example of user text that has the (possible_user_action_label_N)[action_label] action label...```**See examples below in this tutorial** `domain.yml` `domain.yml` helps you to define the universe your chatbot lives in: what user inputs it expects to get, what actions it should be able to predict,how to respond, and what information to store.This YML format is relatively simple, and it can be seen as a dictionary of all components of your chatbot, including but not limited to intents,actions, responses, and other things. format Domain file is a YAML file of the following format:```yaml slots section lists the possible slot names (aka slot types) that are used in the domain (i.e. relevant for bot's tasks) currently only type: text is supportedslots: slot1_name: type: text ... slotN_name: type: text entities list now follows the slots list 2nd level keys and is present to support upcoming features. Stay tuned for updates with this!entities:- slot1_name...- slotN_name intents section lists the intents that can appear in the stories being kept together they do describe the user-side part of go-bot's experienceintents: - user_action_label - another_user_action_of_the_same_format ... responses section lists the system response templates. Despite system response' titles being usually informative themselves (one could even find them more appropriate when no actual "Natural Language" is needed (e.g. for buttons actions in bot apps)) It is though extremely useful to be able to serialize the response title to text. That's what this section content is needed for.responses: system_utterance_1: - text: "The text that system responds with" another_system_response: - text: "Here some text again"```**See examples below in this tutorial** Basic Chatbot Let's build the simplest chatbot possible.This chatbot will be capable of processing three intents: *greeting*, *goodbye*, and *thanks*.DP_MIN_DEMO_DIR = "dp_minimal_demo_dir" # we will work in this folder import os %cd /content os.makedirs(DP_MIN_DEMO_DIR, exist_ok=True) %cd {DP_MIN_DEMO_DIR}/content /content/dp_minimal_demo_dirStories.md: Basic Stories Example `stories.md` is pretty straightforward in this case. In it you define 3 stories, each having its own intent and response (utterance).Take into account the fact that you can combine all of these intents under one story, or add two intents to one story, and third to another one.%%writefile stories.md ## greet * greet - utter_greet ## thank * thank - utter_noworries ## goodbye * bye - utter_byeWriting stories.mdnlu.md: Basic NLU Training Data Example `nlu.md` has an NLU training data that enables DeepPavlov to recognize user phrases as belonging to one of the intents defined in `domain.yml`.%%writefile nlu.md ## intent:greet - Hi - Hey - Hi bot - Hey bot - Hello - Good morning - hi again - hi folks ## intent:bye - goodbye - goodnight - good bye - good night - see ya - toodle-oo - bye bye - gotta go - farewell ## intent:thank - Thanks - Thank you - Thank you so much - Thanks bot - Thanks for that - cheersWriting nlu.mddomain.yml: Basic Domain Example In this demo, `domain.yml` contains the list of: * possible user action intents, and* possible system response actions*Note:* Entities and slots are omitted in this example. See the more sophisticated example below to see how they can be defined in the `domain.yml`.%%writefile domain.yml intents: - greet - bye - thank responses: utter_noworries: - text: No worries! utter_greet: - text: Hi utter_bye: - text: Bye!Writing domain.ymlThe next step is to install the `deeppavlov` library.!pip install git+https://github.com/deepmipt/DeepPavlov.git@feature/gobot-md-yaml-config !python -m deeppavlov install gobot_simple_dstc2Collecting git+https://github.com/deepmipt/DeepPavlov.git@feature/gobot-md-yaml-config Cloning https://github.com/deepmipt/DeepPavlov.git (to revision feature/gobot-md-yaml-config) to /tmp/pip-req-build-dsjafm4a Running command git clone -q https://github.com/deepmipt/DeepPavlov.git /tmp/pip-req-build-dsjafm4a Running command git checkout -b feature/gobot-md-yaml-config --track origin/feature/gobot-md-yaml-config Switched to a new branch 'feature/gobot-md-yaml-config' Branch 'feature/gobot-md-yaml-config' set up to track remote branch 'feature/gobot-md-yaml-config' from 'origin'. Collecting aio-pika==6.4.1 [?25l Downloading https://files.pythonhosted.org/packages/c8/07/196a4115cbef31fa0c3dabdea146f02dffe5e49998341d20dbe2278953bc/aio_pika-6.4.1-py3-none-any.whl (40kB)  |████████████████████████████████| 51kB 2.1MB/s [?25hCollecting Cython==0.29.14 [?25l Downloading https://files.pythonhosted.org/packages/df/d1/4d3f8a7a920e805488a966cc6ab55c978a712240f584445d703c08[...]Define the path to our DSL-based configuration files above (the folder we are in right now) and the folder used to store the trained bot.from deeppavlov import configs from deeppavlov.core.common.file import read_json gobot_config = read_json(configs.go_bot.gobot_md_yaml_minimal) gobot_config['metadata']['variables']['DATA_PATH'] = '.' gobot_config['metadata']['variables']['MODEL_PATH'] = '.'Since our data is basically the mock tutorial data we will use the same subsamples for all of the train (training set), test (test set) and valid (validation set) subsamples. However, for a real DeepPavlov-based goal-oriented bot you should use different train, test, and valid sample stories.md files.!cp stories.md stories-trn.md !cp stories.md stories-tst.md !cp stories.md stories-val.mdThe next step is to train the bot:from deeppavlov import train_model train_model(gobot_config, download=True)2020-08-07 08:18:09.343 INFO in 'deeppavlov.core.data.utils'['utils'] at line 94: Downloading from http://files.deeppavlov.ai/embeddings/glove.6B.100d.txt to /root/.deeppavlov/downloads/embeddings/glove.6B.100d.txt 347MB [01:35, 3.65MB/s] 2020-08-07 08:19:47.73 INFO in 'deeppavlov.core.data.utils'['utils'] at line 94: Downloading from http://files.deeppavlov.ai/datasets/gobot_md_yaml_minimal.tar.gz to /content/gobot_md_yaml_minimal.tar.gz 100%|██████████| 528/528 [00:00<00:00, 1.53MB/s] 2020-08-07 08:19:47.749 INFO in 'deeppavlov.core.data.utils'['utils'] at line 269: Extracting /content/gobot_md_yaml_minimal.tar.gz archive into /content/dp_minimal_demo_dir 2020-08-07 08:19:47.769 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 112: [loading dialogs from /tmp/tmpixkz464w] 2020-08-07 08:19:47.776 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 112: [loading dialogs from /tmp/tmpfcnmbf3s] 2020-08-07 08:19:47.784 INFO in 'deeppavlov.data[...]Finally, it's time to build our bot and experiment with it:from deeppavlov import build_model bot = build_model(gobot_config) bot.reset() bot(["start"]) bot(["Hi"])[0][0].actions_tupleOur bot answers with "greeting" to our "greeting". How will it respond to some grateful message?bot.reset() bot(["start"]) bot(["Thanks!"])[0][0].actions_tupleOk, "no worries" is an expected response. Let's check if the "goodbye" user message is processed with the corresponding reply:bot.reset() bot(["start"]) bot_response_actions = bot(["bye"])[0][0].actions_tuple import yaml system_utter2text = yaml.load(open("domain.yml"))["responses"] system_utter2text[bot_response_actions[0]][0]["text"]Advanced Chatbot: Building a Restaurants Bot inspired by the DSTC Schema-Guided Dialogue DatasetDP_BIG_DEMO_DIR = "dp_big_demo_dir" # we'll work in this directory import os %cd /content os.makedirs(DP_BIG_DEMO_DIR, exist_ok=True) %cd {DP_BIG_DEMO_DIR}/content /content/dp_big_demo_dirWhile the previous demo was focused on figuring out how to work with the very simple goal-oriented chatbot, the reality of chatbots is rarely that simple. Take, for example, the use case for restaurants. People can search for them, ask about the menus, or book tables. These activities require a substantially more advanced configuration.In the purpose of this more realistic demo, we decided to go through a rather unusual route. To simplify the process of defining the domain and behavior of this chatbot, we took a famous industrial research dataset provided by the Dialogue Systems Technology Challenge known as DSTC, also known as [Schema Dataset](https://github.com/google-research-datasets/dstc8-schema-guided-dialogue).This dataset contains a huge number of the annotated human-machine conversations crowdsourced in an [M2M manner](https://arxiv.org/pdf/1801.04871.pdf) for various real-life scenarios and domains.One of these domains is dedicated to *Restaurants*. In it, users are performing a variety of the goal-oriented tasks like searching for restaurants or booking tables via interaction with the bot.Given the power and elegance of the DSTC format, we took liberty to use our internal **automatic conversion tool** to directly **transform** its data into the set of _stories.md_, _nlu.md_, _domain.yml_.*Note: the dataset used is this demo is quite large. The dataset files listings are provided in form of file subset listings. Feel free to examine the files yourself.* Download the data used in this tutorial section# let's get the mentioned converted Schema-dataset subset !wget http://files.deeppavlov.ai/datasets/schema_resto_md_yaml_v2.tar.gz !tar -zxf schema_resto_md_yaml_v2.tar.gz--2020-08-07 08:22:47-- http://files.deeppavlov.ai/datasets/schema_resto_md_yaml_v2.tar.gz Resolving files.deeppavlov.ai (files.deeppavlov.ai)... 172.16.17.32 Connecting to files.deeppavlov.ai (files.deeppavlov.ai)|172.16.17.32|:80... connected. HTTP request sent, awaiting response... 200 OK Length: 100665 (98K) [application/octet-stream] Saving to: ‘schema_resto_md_yaml_v2.tar.gz’ schema_resto_md_yam 100%[===================>] 98.31K 110KB/s in 0.9s 2020-08-07 08:22:49 (110 KB/s) - ‘schema_resto_md_yaml_v2.tar.gz’ saved [100665/100665]Technical Note: Automatic Conversion from DSTC Schema Format to RASA DSLs Schema dataset is provided in DSTC (Dialogue State Tracking Challenge) [format](https://github.com/google-research-datasets/dstc8-schema-guided-dialogue).The DSTC format has its own advantages: it is very detailed and allows for various additional info to be incorporated into the dataset itself. In it, there are two major components - Schema Representation, and Dialog Representation. The first component is dedicated to describing sets of Intents, Slots, and Entities that are used by a given service through an API. The second component is focused on describing actual dialogs that happen between users and services. It also includes actual labels for the aforementioned Intents, Slots, and Entities defined in the Schema component. However, while DSTC format is quite rich for building state-of-the-art systems that participate in the annual DSTC competitions, it takes a serious effort for the real-world developers to collect and annotate data using this format. In contrast, RASA DSLs we're illustrating here are quite different from the DSTC: they are meant to be neat and minimalistic, and to allow developers to define their domains from a rather scarce input information. As mentioned in the beginning of this part of the tutorial, we've performed an automatical conversion of the Schema Restaurants dataset from the DSTC format to RASA DSLs. Slot Filler Any typical goal-oriented chatbot system uses a standard approach to define the way it works in the form of a pipeline. DeepPavlov's Go-Bot is quite permissive in which componentscan be used in it's pipeline; however, Slot Filler is the required one.Slot Filler, also known as slotfiller, is necessary to recognize and normalize slot-filling information provided in the user's utterances.For example, when user says that she wants to "book a table in *London*", slotfiller's job is to recognize that *London* in this phrase represents the required slot `city`.For the purposes of this demo, we are providing the pretrained slotfiller for the dataset. The small notebook on how the slotfiller was trained will be provided in one of the upcoming releases.from deeppavlov import configs, train_model, build_model from deeppavlov.core.common.file import read_json !python -m deeppavlov download schema_resto_md_yaml/ner_config.json !python -m deeppavlov download schema_resto_md_yaml/slotfiller_config.json slotfill_config = read_json("schema_resto_md_yaml/slotfiller_config.json") slotfiller = build_model(slotfill_config, download=True) slotfiller(["i'm looking for a thai food somewhere in SFO"])Seems OK. Let's save our slotfiller config to train and evaluate the restaurants bot, finally.import json json.dump(slotfill_config, open('slotfill_config.json', 'wt'))Known Limitations While slotfilling technology uses the power of the industry-standard Named Entity Recognition (NER) method to recognize key slots in the given phrases, the quality of slot recognition can be substantially increased by combining this process with the data already known to the bot's developer. For example, having a finite list of cities that are supported by a given end-user solution (e.g., several cities in the Greater Seattle Area for local restaurant chain) aids slotfiller in a significant way. Typically, this information is stored in the database, though it may also be provided in the loose files like CSV (comma-separated values).However, in order to focus on the support of the RASA DSLs, we made a conscious decision to omit the support of such data in this demo. An additional demo highligting usage of such data will be provided in one of the consequent releases.Nevertheless our demo goal-oriented bot should still be able to generalize and use the global patterns in the conversations. Stories.md: Advanced Stories Example `stories.md`*Note: As said above, this file has been auto-generated from the DSTC schema.*Like in the Basic Demo, Stories here define a variety of interactions between user and our bot.STORIES_FPATH = "schema_resto_md_yaml/stories.md" !echo "stories file size (lines): $(wc -l {STORIES_FPATH})" !echo -e '\n\npart of stories file is listed below\n' !head -500 {STORIES_FPATH} | tail -30stories file size (lines): 4768 schema_resto_md_yaml/stories.md part of stories file is listed below - utter_INFORM_Cuisine+INFORM_StreetAddress+NOTIFY_SUCCESS * THANK_YOU+GOODBYE{} - utter_GOODBYE ## 25 * hi{} - utter_hi * INFORM_INTENT_Intent{"intent": "FindRestaurants"} - utter_REQUEST_City * INFORM_City{"city": "Oakland"} - utter_REQUEST_Cuisine * INFORM_Cuisine{"cuisine": "Fish"} - utter_OFFER_RestaurantName+OFFER_City+INFORM_COUNT_Count * REQUEST_ALTS{} - utter_OFFER_RestaurantName+OFFER_City * INFORM_PriceRange+REQUEST_ALTS{"price_range": "moderate"} - utter_OFFER_RestaurantName+OFFER_City+INFORM_COUNT_Count * REQUEST_ServesAlcohol+REQUEST_HasLiveMusic{} - utter_INFORM_HasLiveMusic+INFORM_ServesAlcohol * INFORM_INTENT_Intent+SELECT{"intent": "ReserveRestaurant"} - utter_REQUEST_Time * INFORM_PartySize+INFORM_Time{"party_size": "2", "time": "10:30 in the morning"} - utter_CONFIRM_RestaurantName+CONFIRM_City+CONFIRM_Time+CONFIRM_PartySize+CONFIRM_Date * R[...]nlu.md: Advanced NLU Training Data Example `nlu.md`*Note: As said above, this file has been auto-generated from the DSTC schema, and it's quite large. Below you can see only a part of this file. Feel free to examine the entire file.*Like in the Basic Demo, `nlu.md` shows the examples of the user utterances for the supported intent classes.The slotfilling and NER information is provided in the form of the inline mark-up.NLU_FPATH = "schema_resto_md_yaml/nlu.md" !echo "nlu file size (lines): $(wc -l {NLU_FPATH})" !echo -e '\n\npart of nlu file is listed below\n' !head -50 {NLU_FPATH} | tail -20nlu file size (lines): 2147 schema_resto_md_yaml/nlu.md part of nlu file is listed below - [Evening 7:15](time) at Mcdonald's. - [6:15 in the evening](time) at Bistro liaison. - Let's set it up for [6:30 pm](time) at Sakoon. - I would like a reservation in [Flames Eatery](restaurant_name) at [19:00](time) - I'm thinking about Rice Bowl, at [morning 11:30](time) ## intent:REQUEST_Cuisine+AFFIRM - Sounds great. Do you know if they cook [Taiwanese](cuisine) food? - yeah. what do they serve? ## intent:AFFIRM - Yes - That's great. - Sure that sounds good. - That works! - that would be great. - That sounds good. - Yes that works for me - Sure, that sounds great! - This is good for meLet's take a closer look to some specific intent examples.!grep --no-group-separator -m 10 -A 1 -P "(INFORM_Cuisine|INFORM_City)" {NLU_FPATH}## intent:INFORM_City+INFORM_INTENT_Intent - Can you find me somewhere to eat in Dublin? ## intent:INFORM_Cuisine - Anyone that is [burger](cuisine) like in nature ## intent:INFORM_City+INFORM_Cuisine - I'm looking for [European](cuisine) food in San Francisco. ## intent:INFORM_Cuisine+INFORM_INTENT_Intent - I am looking for a [Greek](cuisine) place to eat please. ## intent:INFORM_City - It's in San Francisco. ## intent:INFORM_Cuisine+REQUEST_ALTS - I'd like another kind of restaurant. I'd like Sushi. ## intent:INFORM_City+INFORM_Time - I want a restaurant in Mountain View, and a reservation at 1 o"clock in the afternoon. ## intent:INFORM_PriceRange+INFORM_Cuisine - I would prefer some [Filipino](cuisine) food which is affordable and not too expensive. ## intent:INFORM_City+INFORM_Time+NEGATE - No. Please make it for [7:30 pm](time) at Santa Clara. ## intent:INFORM_RestaurantName+INFORM_City+INFORM_Time - the restaurant is la traviata.i want it in san fran.make it at [6:30 pm](time)domain.yml: Advanced Domain Example `domain.yml`*Note: As said above, this file has been auto-generated from the DSTC schema, and it's quite large. Below you can see only a part of this file. Feel free to examine the entire file.*The domain file now provides the list of slots and entities as defined by the DSTC schema, as well as the supported intent classes and system response text templates.DOMAIN_FPATH = "schema_resto_md_yaml/domain.yml" !echo "domain file size (lines): $(wc -l {DOMAIN_FPATH})" !echo -e '\n\nmost of domain file is listed below, just some portion of intents and response templates is skipped \n' !head -40 {DOMAIN_FPATH} && echo "..." !grep -B 1 -A 10 responses {DOMAIN_FPATH} && echo "..." !grep --no-group-separator -A 1 OFFER_City: {DOMAIN_FPATH} && echo "..." !grep --no-group-separator -A 1 CONFIRM_Time: {DOMAIN_FPATH} && echo "..."domain file size (lines): 478 schema_resto_md_yaml/domain.yml most of domain file is listed below, just some portion of intents and response templates is skipped slots: price_range: type: text cuisine: type: text date: type: text restaurant_name: type: text intent: type: text party_size: type: text time: type: text has_live_music: type: text city: type: text serves_alcohol: type: text entities: - price_range - cuisine - date - restaurant_name - intent - party_size - time - has_live_music - city - serves_alcohol intents: - INFORM_RestaurantName+INFORM_Time - THANK_YOU+GOODBYE - INFORM_Time+INFORM_RestaurantName - INFORM_PriceRange+REQUEST_ALTS - REQUEST_PhoneNumber+REQUEST_StreetAddress+AFFIRM ... responses: utter_REQUEST_Cuisine: - text: "What are you in the mood for? {cuisine}? {cuisine}" utter_REQUEST_City: - text: "What city are you interested in?" utter_OFFER_RestaurantName+OFFER_City: - [...]Now that we have all three key files, like in the Basic Demo, we can now proceed with our bot's training.from deeppavlov import configs from deeppavlov.core.common.file import read_json gobot_config = read_json(configs.go_bot.gobot_md_yaml_minimal) gobot_config['chainer']['pipe'][-1]['slot_filler'] = {"config_path": "slotfill_config.json"} gobot_config['metadata']['variables']['DATA_PATH'] = 'schema_resto_md_yaml' gobot_config['metadata']['variables']['MODEL_PATH'] = '.'Since our data is the tutorial data we will use the same subsamples for all of train (training set), test (test set), and valid (validation set) subsamples.However, for a real DeepPavlov-based goal-oriented bot you should use different train, test, and valid sample stories.md files.!cp schema_resto_md_yaml/stories.md schema_resto_md_yaml/stories-trn.md !cp schema_resto_md_yaml/stories.md schema_resto_md_yaml/stories-tst.md !cp schema_resto_md_yaml/stories.md schema_resto_md_yaml/stories-val.md from deeppavlov import train_model train_model(gobot_config, download=False); bot = build_model(gobot_config)2020-08-07 08:35:16.789 INFO in 'deeppavlov.core.data.simple_vocab'['simple_vocab'] at line 115: [loading vocabulary from /content/dp_big_demo_dir/word.dict] 2020-08-07 08:35:17.434 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 112: [loading dialogs from /tmp/tmpu_18leqx] 2020-08-07 08:35:18.107 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 112: [loading dialogs from /tmp/tmpu2tzz5gn] 2020-08-07 08:35:18.689 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 112: [loading dialogs from /tmp/tmp4ceyk7ty] 2020-08-07 08:35:19.236 INFO in 'deeppavlov.core.data.simple_vocab'['simple_vocab'] at line 115: [loading vocabulary from /root/.deeppavlov/models/schema_ner/word.dict] 2020-08-07 08:35:19.240 INFO in 'deeppavlov.core.data.simple_vocab'['simple_vocab'] at line 115: [loading vocabulary from /root/.deeppavlov/models/schema_ner/tag.dict] 2020-08-07 08:35:19.243 INFO in 'deeppavlov.core.data.simple_vocab'['simple_[...]Let's see whether the bot works at all:bot.reset() bot(["Hey!"])[0][0].actions_tuple bot.reset()Ok, let's have a conversation:bot.reset() bot(["Hi!"])[0][0].actions_tupleAwesome. Seems like our bot performs well. Let's get some action!bot(["I'd like to find a restaurant for this evening"])[0][0].actions_tupleThe bot replies with the request to provide the necessary information, and we give it back:bot(["Somewhere in Oakland, sushi and for two people please"])[0][0].actions_tupleAnd so on.bot(["Cool! That's what I was looking for, thanks!"])[0][0].actions_tupleLet's say goodbye to our botbot(["Bye bot"])[0][0].actions_tupleWhile it'd be nice for it to reply "Good bye!", it didn't. Why?Given that the DSTC dataset doesn't support this utterance, our bot can't properly react to such user's response. So to make our bot a bit more polite we have to add the -bye -bye utterances to the training data.Notice that you will have to add it to all 3 files (note that in case of `domain.yml` you have to add one line to *intents* and another two to *responses* sections of the file): stories.md```... goodbye* bye - utter_bye...``` nlu.md```... intent:bye- goodbye- goodnight...``` domain.yml```...intents: - bye...responses: utter_bye: - text: Bye!...```You will also have to re-use our stories.md for train, test, and valid stories.md files. Again, for the purposes of this demo, we use the same files for stories.md.However, for a real DeepPavlov-based goal-oriented bot you should use different train, test, and valid sample stories.md files.!sed -i -e "s|^\s*$|\* bye\n - utter_BYE\n|g" {STORIES_FPATH} # add bye to each story !echo -e " utter_BYE:\n - text: \"Bye!\"" >> {DOMAIN_FPATH} # add bye to nlu example !grep -m 1 -A 3 bye ../dp_minimal_demo_dir/nlu.md >> {NLU_FPATH} && echo "" >> {NLU_FPATH} # add bye to response templates !sed -ie "s|intents:|intents:\n - bye|g" {DOMAIN_FPATH} # add bye to domain intents !cp schema_resto_md_yaml/stories.md schema_resto_md_yaml/stories-trn.md !cp schema_resto_md_yaml/stories.md schema_resto_md_yaml/stories-tst.md !cp schema_resto_md_yaml/stories.md schema_resto_md_yaml/stories-val.mdRe-training our bot:!rm -rf model # remove the previous trained model from deeppavlov import train_model train_model(gobot_config, download=False); bot = build_model(gobot_config)Checking it:bot.reset() bot(["Hi!"])[0][0].actions_tuple bot(["I'd like to find a restaurant for this evening"])[0][0].actions_tuple bot(["Somewhere in Oakland, sushi and for two people please"])[0][0].actions_tuple bot(["Cool! That's what I was looking for, thanks!"])[0][0].actions_tuple bot(["Bye bot"])[0][0].actions_tupleComparing with RASANow that we've run through a couple of demos, let's make sure that our configs work the same way as they do using RASA framework.%cd /content !mkdir rasa_demo %cd rasa_demo/content /content/rasa_demoLet's install the RASA library!pip install rasa !python -m spacy download en_core_web_md !python -m spacy link en_core_web_md en !mkdir data !cp ../dp_big_demo_dir/schema_resto_md_yaml/{stories,nlu}.md data !cp ../dp_big_demo_dir/schema_resto_md_yaml/domain.yml .We'll also use some simple rasa environment config from their repo!wget https://raw.githubusercontent.com/RasaHQ/rasa/1.10.x/examples/moodbot/config.yml !rasa train/usr/local/lib/python3.6/dist-packages/rasa/core/domain.py:151: FutureWarning: No tracker session configuration was found in the loaded domain. Domains without a session config will automatically receive a session expiration time of 60 minutes in Rasa version 2.0 if not configured otherwise. session_config = cls._get_session_config(data.get(SESSION_CONFIG_KEY, {})) Training Core model... 2020-08-07 09:34:58 INFO  root - Generating grammar tables from /usr/lib/python3.6/lib2to3/Grammar.txt 2020-08-07 09:34:58 INFO  root - Generating grammar tables from /usr/lib/python3.6/lib2to3/PatternGrammar.txt /usr/local/lib/python3.6/dist-packages/rasa/core/domain.py:151: FutureWarning: No tracker session configuration was found in the loaded domain. Domains without a session config will automatically receive a session expiration time of 60 minutes in Rasa version 2.0 if not configured otherwise. session_config = cls._get_se[...]And when the bot is trained you can interact with it in the interactive mode:!cat | rasa shell2020-08-07 12:54:29 INFO  root - Generating grammar tables from /usr/lib/python3.6/lib2to3/Grammar.txt 2020-08-07 12:54:29 INFO  root - Generating grammar tables from /usr/lib/python3.6/lib2to3/PatternGrammar.txt 2020-08-07 12:54:29 INFO  root - Starting Rasa server on http://localhost:5005 2020-08-07 12:54:45 INFO  rasa.nlu.components - Added 'SpacyNLP' to component cache. Key 'SpacyNLP-en'. 2020-08-07 12:54:51 INFO  root - Rasa server is up and running. Bot loaded. Type a message and press enter (use '/stop' to exit):  Warning: Input is not to a terminal (fd=0). [?7h[?12l[?25hHi!  [?7h[?12l[?25h[?2004lhi [?7h[?12l[?25hI'd like to find a restaurant for this evening  [?7h[?12l[?25h[?2004lDo you have a preferred city and place? [?7h[?12l[?25hSomewhere in Oakland, sushi and for [...]Abstract Algebra: An Interactive Approach, 2e©2015 This notebook is provided with the textbook, "Abstract Algebra: An Interactive Approach, 2nd Ed." by . Users of this notebook are encouraged to buy the textbook.

    Chapter 3Patterns Within the Cosets of GroupsInitialization: This cell MUST be evaluated first:load('absalgtext2.sage')Left and Right CosetsHow to Write a Secret MessageNormal SubgroupsQuotient GroupsSageMath Interactive ProblemsLeft and Right CosetsWe introduced subgroups in the last chapter, but left many questions unanswered. For example, is there any relationship between the size of the group and the size of one of its subgroups?In this chapter we will introduce the tool of cosets to determine many of the properties of subgroups, including what possible sizes the subgroups could be. To understand cosets, let us begin by looking at some cases where an element does notgenerate the group, in hopes of finding some patterns in the circle graphs. Let us look at the example Z10. The generators were 1, 3, 7, and 9, so let us pick an elements besides these, such as 4. Here is the circle graph of Add(4):Z = ZGroup(10) CircleGraph(Z, Add(4))We see that 4 is not a generator, for there are arrows of two colors. The red arrows connect the points {0, 2, 4, 6, 8}, while the green arrows connect the points {1, 3, 5, 7, 9}. Thus, the group is partitioned into two sets, and no arrow connects these two. We can immediately see some patterns in the way that Z10 is partitioned. One of the two sets, {0, 2, 4, 6, 8} is actually a subgroup of Z10, and is the subgroup generated by the element 4. We can check this with the command:Group(Z[4])The other set is obtained by adding 1 to each element of this subgroup. Let us look at another example to see if this pattern continues. Here is the circle graph of Add(5):CircleGraph(Z, Add(5))Here we have five different colored arrows, so the group is partitioned into five sets: {0, 5}, {1, 6}, {2, 7}, {3, 8}, and {4, 9}. One of these sets is the subgroup generated by the element 5:Group(Z[5])The others are obtained by adding some constant to every element in this subgroup. EXPERIMENT:Replace the Add(5) in the above circle graph with either Add(0), Add(2), Add(6), or Add(8). (These are the other elements of the group which are not generators.) Do we obtain any new partitions of the group? By doing this experiment, you probable noticed that the partition of the group depends only on the subgroup generated by the element. Of course this is an abelian group, so it is natural to ask if these patterns we have noticed carry over to non-abelian groups. Let us look at Terry's group.G = InitTerry(); GThis group does not have any generators, so any element of the group will produce a partition of the group. Consider the circle graph that sends every element to that element multiplied by Spin. Because this group is non-abelian, we have a choice as to how to do this. Do we multiply the element x to the left of Spin, giving x·Spin, or do we multiply on the right, giving Spin·x? These two options yield different circle graphs. The former is obtained by the commandCircleGraph(G, LeftMult(Spin))The latter is given by the commandCircleGraph(G, RightMult(Spin))Notice that these two circle graphs produce different partitions of the group! The first gives the partition {Stay, Spin}, {RotRt, FlipLft}, and {RotLft, FlipRt}. The second circle graph gives us the partition {Stay, Spin}, {RotRt, FlipRt}, and {RotLft, FlipLft}. In both cases, one of the sets in the partition is the subgroup generated by Spin:H = Group(Spin); HWe still see some patterns the two partitions. In the first case every set in the partition is obtained by multiplying an element such as RotRt or RotLft by each element of the subgroup H. In the second partition, every set is obtained by multiplying each element of the subgroup H by an element such as RotRt or RotLft.EXPERIMENT:Change the element Spin in the last two circle graphs to the other elements of Terry's group. Do all partitions follow the pattern mentioned above? To define these partitions mathematically, we will use the pattern that we have been observing: that every set in the partition is obtained by multiplying every element of a subgroup H by an element x, either on the right or on the left.DEFINITION 3.1Let G be a group, and let H be a subgroup of G. If x is an element of G, we define the setx H={ x·y | y ∈ H }.The set x H is called a left coset of H. We could just have easily considered multiplication the other way. This would give us the setH x = { y·x | y ∈ H }which is called a right coset of H.These definitions are very easy to work with mathematically, but it is not apparent that the right cosets and the left cosets form the partitions of the group that we have been observing. This has to be proved!SageMath has been programed to mimic this notation. Thus, we can form right cosets by multiplying each element in H by an element in the group, say RotRt.H = Group(Spin); H H * RotRtNotice that SageMath multiplies every element in the set H by RotRt. In a sense, it distributes RotRt throughout the set.We can find a left coset with respect to H in the same way:RotRt * HThe left and right cosets are different, as we observed in the circle graphs.We will denote the set of all left cosets of H by G/H, and will denote the set of all right cosets ofH by H\G. Albeit the notation is strange, but it reminds us which side the element of G is multiplied on.SageMath can help us to find all left and right cosets of G with respect to H. The commands areLftCoset(G, H) RtCoset(G, H)Note that in both commands, the first argument is the group or list of elements in the group, and the second argument is the subgroup. These give us a list of cosets. (Since each coset is displayed as a list of elements we find ourselves with a "list of lists.") These are exactly the partitions we observed in the circle graphs of LeftMult(Spin) and RightMult(Spin). In fact, we begin to see some patterns in the cosets. First of all, all of the cosets are the same size. Also, every element of the group appears once, and only once, in each of the two coset lists. However, we have yet to prove these patterns persist. The next two lemmas do this for us. We begin by showing that all cosets of H are of the same length.LEMMA 3.1Let G be a group and H be a finite subgroup of G. Then all left and right cosets of H contain |H| elements.Click here for the proof.Next we must show that every element of G is in exactly one left coset and one right coset. This can be worded as follows:LEMMA 3.2If two left or two right cosets have any elements in common, they are in fact the same coset. That is, H x ∩ H y ≠ { } implies that H x = H y,andx H ∩ y H ≠ { } implies that x H = y H.Click here for the proof.EXAMPLE:Find all of the left and right cosets of the subgroup {1, 11} of the group Z15*.Since Z15* is abelian, the left and right cosets are the same. By Lemmas 3.1 and 3.2, the cosets will be disjoint, and all have 2 elements. One of the cosets will be the subgroup {1, 11}. We pick an element not in the subgroup, say 2, and multiply each element of the subgroup by 2, producing the coset {2, 7}. We pick another element not yet in a coset, and repeat the process. To find the coset containing 4, we multiply the subgroup by 4, to product the coset {4, 14}. At this point, only 2 elements are unaccounted for, so they must be in their own coset, {8, 13}. So the list of cosets are {{1, 11}, {2, 7}, {4, 14}, {8, 13}}.These last two proofs were called lemmas instead of propositions, since they lead to an important theorem.THEOREM 3.1: Lagrange's TheoremLet G be a finite group, and H a subgroup of G. Then the order of H divides the order of G. That is, |G| = k·|H| for some positive integer k.Click here for the proof.Lagrange's theorem has many important consequences. We call the applications of an important theorem Corollaries.COROLLARY 3.1Let G be a finite group, and let x be an element of G. Then the order of x divides |G|.Click here for the proof.COROLLARY 3.2Let G be a finite group of order n and let x be an element of G. Then xn = e.Click here for the proof.COROLLARY 3.3A group of prime order is cyclic. Click here for the proof.COROLLARY 3.4Let n be a positive integer, and x a number coprime to n. Then x&981;(n) ≡ 1 (mod n)where &981;(n) is Euler's totient function.Click here for the proof.When n = p is prime then &981;(p) = p − 1, and Corollary 3.4 says thatxp−1 ≡ 1 (mod p).This result is known as Fermat's little theorem. Fermat had proved this without the help of group theory, which was a real challenge! Yet this result becomes a trivial consequence of a larger theorem when we look at the supporting group structure.DEFINITION 3.2If H is a subgroup of G,we can define the index of H in G, denoted [G:H], to be the number of right cosets in H\G. Of course this is the same as the number of left cosets in G/H.Notice that when G is a finite group we have by the argument in Lagrange's theorem (3.1) that |G| = |H|·[G:H].How To Write a Secret MessageIn this section, we will learn of one important application of Lagrange's Theorem. We will learn how to write a message that no one can read except for the person to whom the message is sent, even if everyone in the world knows the code! This sounds like an impossibility, but we will see that it can be done with group theory. This code has applications in internet security and secure data transmissions.EXAMPLETo introduce this code, we begin by considering the group Z33*, formed by all of the generators of Z33.The order of this group is &981;(33). We can either look at the table in the previous notebook, or use SageMath's built in EulerPhi functionEulerPhi(33)to see that there are 20 elements. It isn't hard to determine the generators of Z33:{1, 2, 4, 5, 7, 8, 10, 13, 14, 16, 17, 19, 20, 23, 25, 26, 28, 29, 31, 32}.This is the set of elements in Z33*.G = ZStar(33); GLet us consider making a circle graph that maps each element to that element raised to a certain power. For example, we can map each element to its square by the commandCircleGraph(G, Pow(2))This yields a rather perplexing graph, since some elements have many arrows point to them, for example 4 is obtained by squaring either 2, 13, 20, or 31 in this group. Those elements that have "square roots" seem to have four of them, while the majority of the elements do not have "square roots" at all. Of course a "square root of x" is defined as any element whose square is x. Although there are some other interesting patterns to explore here, let us look at the circle graphs of different powers. This graph maps every element to its cube:CircleGraph(G, Pow(3))This graph has a very different behavior. No two arrows heads are at the same element, indicating that no two elements have the same cube. Also, every element has some arrow that points to it. Thus, we see from the figure that the cube function is both one-to-one and onto. Hence, every element has a unique cube root.EXPERIMENT:Form different circle graphs by replacing the 3 in the last graph with higher numbers. Look at all of the circle graphs from Pow(4) to Pow(21). In these 18 circle graphs, which ones form one-to-one and onto functions? Why does Pow(20) behave the way it does? (Hint: see Corollary 3.4.) Why does Pow(21) behave like Pow(1)? Here is one explanation as to why x → x3 is one-to-one and onto. Let us plot both Pow(3) and Pow(7) on the same graph:CircleGraph(G, Pow(3), Pow(7))The green arrows representing Pow(7) go in the opposite direction as the red arrows representing Pow(3). This indicates that to take a cube root of a number modulo 33, we take its seventh power! This is because &981;(33) = 20, so using Corollary 3.4, (x3)7 = x21 = x20·x = e·x = x.Here we used the observation that Pow(21) behaved like Pow(1) because of Corollary 3.4. We say that the function Pow(7) is the inverse of the function Pow(3). The difference between this examples and the first example Pow(2) is that 3 is coprime to &981;(33) = 20, whereas 2 is not. See if you can use this idea to prove the following generalization:PROPOSITION 3.1Suppose G is a finite group of order m, and that r is some integer which is coprime to m. Then the function f(x) = xr is one-to-one and onto. In other words, we can always find the unique rth root of any element of G. Click here for the proof.EXAMPLE:Let us try another experiment. Rather than using the elements in Z33*, suppose we considered all of the numbers from 0 to 32. This will no longer by a group, since we have included some non-invertible elements. But we can still take the cube of any number, and can still plot the circle graph. Here it is:R = MultMod(33); R CircleGraph(R, Pow(3))This function is still one-to-one and onto, even with the extra elements. What about Pow(7)? Is this still the inverse of Pow(3) for this new set? Let's check by graphing both Pow(3) and Pow(7) on the same graph again:CircleGraph(R, Pow(3), Pow(7))We can see that Pow(7) is still the inverse of Pow(3), even with the extra elements. This will happen in general whenever n is a product of two distinct primes, as seen by the following proposition.PROPOSITION 3.2Suppose n is a product of two distinct primes and r·s ≡ 1 (mod &981;(n)). Then for all values of x less then n, (xr)s ≡ x (mod n).Click here for the proof.The function x → x3 is not only one-to-one and onto, but also mixes up most of the numbers 0 through 32 fairly well. This suggests an encryption scheme. For example, we could let A = 1 J = 10 S = 19 B = 2 K = 11 T = 20 C = 3 L = 12 U = 21 D = 4 M = 13 V = 22 E = 5 N = 14 W = 23 F = 6 O = 15 X = 24 G = 7 P = 16 Y = 25 H = 8 Q = 17 Z = 26 I = 9 R = 18 Space = 0 Now any message can be converted to a sequence of numbers:CAN YOU READ THISbecomes3, 1, 14, 0, 25, 15, 21, 0, 18, 5, 1, 4, 0, 20, 8, 9, 19 We replace each number with its cube, modulo 33. Using the above circle graph, we get 27, 1, 5, 0, 16, 9, 21, 0, 24, 26, 1, 31, 0, 14, 17, 3, 28To decipher this, one would take the seventh power of each number in the sequence modulo 33, and convert back to letters in the alphabet. Voilà! There are two main drawbacks with this "modulo 33" code. The first is that, for longer messages, the letter E which encodes to 26 would appear most frequently in the encoded string. Even someone who didn't know the code might deduce that 26 stands for E even if they didn't know anything about algebra.The second problem is that anyone who knew how to encrypt the message could use Proposition 3.2 and the fact that Z33* is of order 20 to come up with the inverse of 3 in Z20* being 7, and thereby could decode the message. What we would like is a code in which anyone could encrypt a message, but only the person who originated the code could decipher.We can solve both of these problems just by picking n to be a larger value! Rather than picking n = 33, suppose that n was the product of two very large prime numbers p and q, say 80 digits each. Then n would have about 160 digits! (We will not be able to draw circle graphs on this large of a group.) By the totient function theorem, &981;(n) would be (p − 1)·(q − 1). Rather than picking r = 3, we would pick some large value of r which is coprime to (p − 1)·(q − 1). A four digit number would be sufficient. Our code will be x → y = xr mod n.Even though r and n are large, this computation is a piece of cake for SageMath.We decode this by finding the inverse of r in the group Z&981;(n)*, and call this s. In our modulo 33 code s = 7, but here s could be almost as large as n. By Proposition 3.2, we can decode the message by the operationy → x = ys mod n.This operation "undoes" the encryption, since we know that(xr)s ≡ x (mod n) if r·s ≡ 1 (mod &981;(n)).How does this solve both of the problems of the modulo 33 code? First of all, instead of encrypting a letter at a time, n is large enough for us to encrypt an entire line at a time. For example, the messageCAN YOU READ THIS can be encrypted by the single number0301140025152100180501040020080919.Notice that every two digits of this number represent one letter, according to the same code we had before. This eliminates the problem of one number having a higher occurrence frequency, as in the modulo 33 code.The other problem with the modulo 33 code is that everyone who knew how to encrypt the message could also decode a message. Is the the case when n is large? In order to decode a message, one must know the value of s, which is given by the inverse of r (mod &981;(n)). This is easy to do with SageMath once &981;(n) is known, but how difficult it is to find &981;(n)! One needs to know the prime factorization of n, which would be about 160 digits long. In fact, adding two digits to p and q makes the factorization 10 times harder. So by making the prime numbers even larger, we can be assured that the factorization connot be done within one's lifetime. Thus, only the person who originated the prime numbers p and q could come up with the value of s. This encryption scheme is called the RSA encryption, which is an abbreviation of Rivest-Shamir-Adleman, the three mathematicians who came up with this code. There are SageMath routines set up in this notebook that allow us to experiment with RSA encryption.EXAMPLE:Suppose that a friend wishes to send a completely confidential message, but that all correspondence between you and your friend were being monitored.The first step is for you to come up with 2 very large primes of 80 digits or more. Even though SageMath cannot factor large numbers, it does have a secret way of testing whether a number is prime. The functionNextPrime(n)uses this feature to find the next prime number after n. So by entering two large random numbers, SageMath can find two large random primes. Say you pick p to bep = NextPrime(12345678901234567890123456789012345678901234567890123456789012345678901234567890); pand pick q to beq = NextPrime(98765432109876543210987654321098765432109876543210987654321098765432109876543210); qThese numbers aren't too random, but they will do. The backslash in the output shows that the line is continued, so we have one large number. However, the backslash will not work on the input line. Note that clicking on the left side of the output toggles the format from a line that is broken to a single line with a scrollbar.SageMath uses a variation of the Agrawal, Kayal, and Saxena primality test to find the next prime number.This test can definitely determine whether a number is prime, in a time that is a polynomial function of the numberof digits in p and q. Hence, we can quickly know for certain that the numbers p and qwill be prime. Next, multiply the two numbers together, and tell your friend the product n.n = p*q; nAlso give your friend a 4 digit number r which happens to be coprime to both (p−1) and (q−1). Simply find a 4 digit prime which does not happen to be a factor of (p−1) and (q−1). Here is a random (or maybe not so random) four digit prime number:r = NextPrime(1234); rNow check to make sure this is not a factor of (p−1) and (q−1):gcd((p-1)*(q-1), r)Since the gcd is 1, r is coprime to (p−1) and (q−1). Even if this communication is monitored, only the value of n, not the p and q which generated it, would be known to any spies. Next tell your friend to convert the message to a number. There are routines in this notebook for converting messages numbers quickly. They are MessageToNumber("Message")andNumberToMessage(Number)Note we put the message in quotation marks. Therefore, we can find the number corresponding to "HERE IS A MESSAGE" by typingx = MessageToNumber("HERE IS A MESSAGE"); xWe can then convert this back to a message with the commandNumberToMessage(x)Encrypting this message is accomplished by raising x to the rth power modulo n. This is done by the SageMath command PowerMod. The format for this command isPowerMod(a, b, n) = ab mod nIt can compute such powers very quickly.y = PowerMod(x, r, n); ySuppose your friend has done this operation with a different message, giving you the answery = 695574051470244068706114266574256043827756065440747032387700788446830783525388331288538827113160595765080505966693143199918635215093570816224139063616551830794How do you decode this message?First, find the number s which is the inverse of r modulo (p−1)·(q−1). This is given bys = PowerMod(r, -1, (p-1)*(q-1) ); sThis is the secret key to the code. Because the spies only know the values of n and r, there is no way for them to factor n to come up with p and q. So this value s will still be a secret. Next, compute ys mod n using the commandx = PowerMod(y, s, n); xFinally, the commandNumberToMessage(x)puts the message into readable form. With SageMath, this code becomes easy to use!There are many other applications to this code besides sending secret messages. For example, suppose to get an account at the Electronic Bank, you are asked to pick two large random prime numbers, p and q. The bank then gives you the account number n = p·q, and a number r, and publishes these. The bank then gives you the number s which will be the inverse of r in Z&981;(n)*.Now you can use the number s to decode a message such asy = MessageToNumber("Check 1034: Pay to the order of $43.50"); y x = PowerMod(y, s, n); xYou can send this number to , along with your account number. Then he, his bank, and anyone else who needs to know, can verify what this says as follows:y = PowerMod(x, r, n) NumberToMessage(y)What does this prove? The only person knowing the number s sent this message, which would be the owner of the account. The fact that the message was so encoded becomes a signature to the check. Using this method, one can send an "electronic check" (even through the e-mail) that is impossible to forge.Normal SubgroupsWhen we defined left cosets and right cosets, we were in essence defining how we could take an element of a group G and multiply it with a subgroup of G. But this definition can apply to any subset of G. That is, if X is any subset of G, we can defineX u = { x·u | x ∈ X}, u X = { u·x | x ∈ X}.We can also, using the same idea, multiply two subsets of G together.DEFINITION 3.3If X and Y are two subsets of a group G, we can defineX·Y = { x·y | x ∈ X and y ∈ Y}.We find that {u}·X is the same thing as u X, so this definition is consistent with the previous definitions. Since multiplication is associative, we also haveX·(Y·Z) = (X·Y)·Z.This raises some interesting questions. If X and Y are subgroups of G, will X·Y be a subgroup? Suppose X and Y are cosets of G with respect to a subgroup H. Will X·Y be a coset of G?EXAMPLE:One way to answer these questions is to experiment! If we have two subsets of G, (which SageMath represents as a list of elements in brackets), we can multiply the sets together using the command Mult(G, X, Y).Let's pick a group large enough to see some patterns. Execute the following command to load the octahedral group.InitGroup("e") AddGroupVar("a","b","c") Define(a^2, e) Define(b^3, e) Define(c^4, e) Define(b*a, a*b*b) Define(c*a, a*b*c) Define(c*b, a*c*c) G = Group(); GRecall that this group has order 24. There are many subgroups in this group. Let us find a few.H = Group(c); H K = Group(b*c); K L = Group(a*b*c*c); LWe can now explore what happens when we multiply two subgroups together.H * KLooks like a lot of elements! Let's count them.len(_)So H·K has 16 elements. Apparently, each element of H, when multiplied by an element in K, produces a unique element.Is H·K a subgroup of G? Lagrange's theorem (3.1) quickly tells us no! Since 16 does not divide 24, H·K is not a subgroup of G, in spite of the fact that H and K are.EXPERIMENT:Try working multiplying two other subgroups of G. Consider for example K·H, H·L, L·H, K·L, or L·K. Do any of these form subgroups of G? EXAMPLE:Suppose instead that X and Y are both cosets of G with respect to a subgroup H. We can let again let H be the group generated by the element c.H = Group(c); HHere are the right and left cosets:RtCoset(G, H) LftCoset(G, H)As expected, the left and right cosets are different, since the group is not commutative. Let's pick two right cosets:X = H*b; X Y = H*a*c; YNow, let's see if the product gives us anything useful.X * YLet's find the length of this mess.len(_)Not only can this fail to be a subgroup, but this cannot be a coset of a subgroup. Do we have any better luck with left cosets? Let's try it.Z = a*H; Z W = b*H; WWell, here goes nothing!Z * W len(_)That didn't seem to produce anything useful either. But before we give up completely, suppose we multiply a left coset by a right coset.W * YThat looks more promising. In fact, this is a subgroup! Compare this withGroup(a*c^2)to see that they are the same. EXPERIMENT:Try other combinations of a left coset times a right coset, such as Z·X, Z·Y, and W·X. How many elements do these have? Are any of them subgroups? Are any or them cosets of some subgroup? This experiment indicates that there might be some promise in exploring the products of left cosets times right cosets. Let's try another example to see if we can determine a pattern. EXAMPLE: Enter in the groupM = Group(a*b^2*c, c^2); MThis is also a group of order 4. Let's find the right and left cosets of G with respect to this group.RtCoset(G, M) LftCoset(G, M)There is more of a pattern in the cosets with this subgroup. Look carefully. The right and left cosets are the same! This particular subgroup allows us to explore more patterns. Because any left coset is also a right coset, they are interchangeable. We saw before that a left coset times a right coset produced something that looked like a coset. But in this case, left and right cosets are the same so let's try multiplying two of these together. Here are three of the cosets:H = a * M; H K = b * M; K L = c * M; LHere is the product of the first two:H * KLook carefully. This is one of the cosets of M. Will this always happen?EXPERIMENT:Try other multiplications of H, K, and L, such as K·H, H·L, L·H, K·L, and L·K. Do all of these products form cosets? Because the subgroup M has such special properties, we will give a special name for this type of group. DEFINITION 3.4A subgroup H of the group G is said to be normal if all left cosets are also right cosets, and conversely, all right cosets are also left cosets.That is, H is normal if G/H = H\G.We need a quick way to check whether a subgroup is normal, and the following propositions provides us with one.PROPOSITION 3.3A subgroup H is a normal subgroup of G if, and only if, g H g-1 = H for all elements g in G.Click here for the proof.This gives us a way to determine if a subgroup is normal, but we can improve on this test. PROPOSITION 3.4Let H be a subgroup of G. Then H is normal if, and only if, g·h·g-1 ∈ Hfor all elements g ∈ G and h ∈ H.Click here for the proof.We have already seen one example of a normal subgroup, but there are many others. For example, if G is any group, then the subgroups {e} and G are normal. These normal subgroups are said to be trivial.If, on the other hand, G is commutative, then any subgroup will be a normal subgroup.We conclude this section with a simple observation. PROPOSITION 3.5If H is a subgroup of G with index 2, then H is a normal subgroup.Click here for the proof.When we have a normal subgroup, the set of cosets will possess more properties than for standard subgroups. We will explore these in the next section.Quotient GroupsWhen we were experimenting with multiplying cosets together, it seemed that if the subgroup N was a normal subgroup of G, then the products of two cosets gave us another coset. We will now examine why this is true. LEMMA 3.3If N is a normal subgroup of G, then the product of two cosets of N produces a coset of N. In fact,a N·b N = (a·b)N.Click here for the proof.This proposition is very suggestive. Since we can multiply two cosets together, can the set of all cosets form another group? This is, in fact, exactly what happens. THEOREM 3.2: The Quotient Group TheoremLet N be a normal subgroup of G. Then the set of all cosets is a group, which is denoted by G/N, called the quotient group of G with respect to N.Click here for the proof.EXAMPLE:One of the easiest groups to consider is the group of integers &8484;, under addition. A subgroup of &8484; would consist of all multiples of k, for some non-negative k. (k = 0 and k = 1 give us the two trivial subgroups.) Since &8484; is commutative, these subgroups are normal. What would the cosets look like? Each coset would consist of all numbers equivalent modulo k. So there would be k cosets of k &8484; (except when k = 0). Thus, &8484;/k &8484; is essentially the same group as Zk. In the case of Zk, we used modulo arithmetic as the group operator. In essence, the notationx ≡ y (mod k)means that x and y belong in the same coset of &8484;/k &8484;. We can extend this notation to any normal subgroup. We say that "x is equivalent mod N to y," orx ≡ y (mod N)to mean that x and y belong in the same coset of N. It is easy to see thatx ≡ y (mod N) if, and only if, x·y-1 ∈ NIn §1.2, we defined a equivalence relation as a relation satisfying the three properties 1) (Reflexive) Every element x is equivalent to itself.2) (Symmetric) If x is equivalent to y, then y is equivalent to x.3) (Transitive) If x is equivalent to y, and y in turn is equivalent to z, then x is equivalent to z.Because of the fact the two elements are equivalent if they are in the same coset, it is clear that x ≡ y (mod N) is an equivalence relation. The equivalence classes would be the cosets of N for which the relation is defined.EXAMPLE:Consider the octahedral group:InitGroup("e") AddGroupVar("a", "b", "c") Define(a^2, e) Define(b^3, e) Define(c^4, e) Define(b*a, a*b^2) Define(c*a, a*b*c) Define(c*b, a*c^2) G = Group(); GWe found one normal subgroup to this group, namelyM = Group(a*b^2*c, c^2); MThe cosets, or equivalence classes, with respect to this subgroup are:Q = LftCoset(G, M); QWe can use the MultTable command on the quotient group:MultTable(Q)Notice that since the names of the elements are too long to enter them into each square, Mathematica uses a color code for the elements. Nonetheless, we can determine much information from this table. The first thing we can observe from this table is that this group is not commutative. For example,{a, a·c2, b2·c, b2·c3} · {b, a·b·c, b·c2, a·b·c3}does not give the same thing as {b, a·b·c, b·c2, a·b·c3} · {a, a·c2, b2·c, b2·c3}We have already seen a non-commutative group of order 6, namely S3. In fact, there is a copy of S3 sitting inside of the group G, which could be seen by the commandH = Group(a, b); HWe can compare the multiplication table of this subgroup with that of the quotient group Q:MultTable(Q) MultTable([e, b, b^2, a, a*b, a*b^2])The color patterns are not the same, but this doesn't mean that these two groups are not equivalent. There might be some way to rearrange the elements in the last command so that the color patterns in the two tables match.EXPERIMENT:Can you rearrange the elements {e, b, b2, a, a·b, a·b2} in the last command so that the color patterns in the two tables match? Those who enjoy logic puzzles should find this an easy exercise. (There is more than one solution.) Proofs: Proof of Lemma 3.1: It is clear from the definitions that H u and u H each contains at most |H| elements. In order to prove that the number is exactly |H| we need to show that two distinct elements of H produce two different elements in the cosets. Suppose that this were not the case in a right coset. We would have two different elements x and y for whichx·u = y·u,which multiplying on the right by u-1 gives x = y, a contradiction. Similar reasoning works for left cosets. Ifu·x = u·y,multiplying on the left by u-1 shows that x = y.Return to text Proof of Lemma 3.2:We begin with right cosets. Suppose there is an element g ∈ H x ∩ H y. Then there are elements h and k in H such thatg = h·x = k·y.Therefore, x = h-1·k·y,and so (*) H x = H h-1·k·y. Since H is a subgroup, h-1·k ∈ H, so that H h-1·k ⊆ H. Moreover, if u is in H, then u = (u·k-1·h)·(h-1·k) ∈ H h-1·k.Therefore,H ⊆ H h-1·k,and we have shown that H = H h-1·k. Combining this with (*) gives us H x = H y.We can do left cosets in the same way. Suppose we have g ∈ x H ∩ y H. Then we have elements h and k in H such thatg = x·h = y·k.Therefore, x = y·k·h-1,and so x H = y·k·h-1 H = y H.Return to text Proof of Theorem 3.1:We can use either left cosets or right cosets to prove this, so let us use right cosets. Every element of x in G is contained in at least one right coset. For example, x is contained in H x. Let k be the number of distinct right cosets. Then, if the right cosets areH x1, H x2, H x3, …, H xk,we can writeG = H x1 ∪ H x2 ∪ H x3 ∪ ··· ∪ H xk.The ∪'s represent the union of the cosets. But by Lemma 3.2, there are no elements in common among these sets, and so this union defines a partition of G. By Lemma 3.1, each cosets contains |H| elements. So |G| = k·|H|.Return to text Proof of Corollary 3.1:The order of x equals the order of the subgroup [x] of G. Therefore, by Lagrange's theorem (3.1), the assertion follows.Return to text Proof of Corollary 3.2:Let m denote the order of x. By Corollary 3.1, n = m k for some integer k. Then we have xn = xm·k = (xm)k = ek = e.Return to text Proof of Corollary 3.3:Suppose G is of order p, which is prime. Then the only positive divisors of p are 1 and p, so by Lagrange's theorem (3.1) any subgroup must be of order 1 or p. If x is any element of G besides the identity, then [x] contains x as well as the identity. Thus, G = [x] so G is cyclic.Return to text Proof of Corollary 3.4:We simply apply Corollary 3.2 to the group Zn*. This group has &981;(n) elements, and if x is coprime to p then x is a generator of Zn, so x is in Zn*.Return to text Proof of Proposition 3.1:Since G is of order m, we have by Corollary 3.2 that xm = e for all x in G. If r and m are coprime, then r is a generator in the additive group Zm. But this means that r is an element of the group Zm*, and so there is an inverse element s = r-1. Thus, s·r = 1 in Zm*. Another way we could say this iss r = k m + 1for some integer k.Now we are ready to take the rth root of an element. If y is an element of G, then the rth root of y in G is merely ys. To see this, note that(ys)r = ys·r = y(k m+1) = (ym)k·y = ek·y = y.So ys is one rth root of y. But ys must be a different element for every y in G, since the rth power of ys is different. Since the rth root of every element of G is accounted for, by the pigeonhole principle there cannot be two rth roots to any element. Thus, ys gives the unique rth root of y in G.Return to text Proof of Proposition 3.2:The proposition is trivial if x = 0, so we will assume that x > 0.If x is coprime to n, then proposition is true by Proposition 3.1. Suppose x is not coprime to n = p·q, where p and q are the two distinct primes. By the totient function theorem (2.1), &981;(n) = (p − 1)·(q − 1). The number x would be a multiple of either p or q, say p.Then x = p·a for some integer a, and soxr·s = (p·a)r·s = pr·s·ar·swill be a multiple of p.Also, x is not a multiple of q since x is less than n. Since r·s ≡ 1 (mod (p − 1)(q − 1)),r·s ≡ 1 (mod (q − 1)). Thus, by Proposition 3.1 again, we havexr·s ≡ x (mod q).Since we also have xr·s ≡ x (mod p), by the Chinese remainder theorem (1.3), we have, since p and q are coprime,xr·s ≡ x (mod p·q = n).Return to text Proof of Proposition 3.3:First of all, suppose H is normal, and let g be an element of G. Then g H and H g both contain the element g.Since the left and right cosets are the same, we haveg H = H gMultiplying both sides on the right by g-1 givesg H g-1 = H g·g-1 = HNow, suppose that g H g-1 = H for all elements g in G. ThenH g = g H g-1·g = g H e = g H.Thus, every left coset is also a right coset, and vice versa.Return to text Proof of Proposition 3.4:If H is a normal subgroup of G, then g·h·g-1 ∈ g H g-1, which is H by Proposition 3.3.Let us suppose that for all g in G and h in H, g·h·g-1 ∈ H. Theng H g-1 ⊆ H.In particular, if we replace every g with g-1, we getg-1H(g-1)-1 ⊆ H Multiplying both sides of the equation by g on the left gives us H g ⊆ g H, and multiplying on the right by g-1gives us H ⊆ g H g-1. Since we also have that g H g-1 ⊆ H, we can conclude that g H g-1 = H. Then from Proposition 3.3, H is normal.Return to text Proof of Proposition 3.5:Since H is a subgroup of G with index 2, there are two left cosets and two right cosets. One of the left cosets is e H, which is the set of elements in H. The other left coset must then be the set of elements not in H. But the same thing is true for the right cosets, so the left and right cosets are the same. Thus, H is normal.Return to text Proof of Lemma 3.3: We simply observe that a N·b N = a·(N b)·N = a·(b N)·N = (a·b)·(N·N) = (a·b)N.Note that N b = b N because N is a normal subgroup.Return to text Proof of Theorem 3.2:We simply have to check that G/N satisfies the four requirements in Definition 1.5. The closure property is given by Lemma 3.3. To check associativity,a N·(b N·c N) = a N·(b·c) N = (a·(b·c)) N = ((a·b)·c) N = (a·b) N·c N = (a N·b N)·c N.The identity element is e N = N, and we can check thate N·a N = (e·a) N = a N, and a N·e N = (a·e) N = a N.Finally, the inverse of a N is a-1N, sincea N·a-1N = (a·a-1) N = e N = N,and a-1N·a N = (a-1·a) N = e N = N.Thus, the set of all cosets forms a group.Return to text SageMath Interactive Problems§3.1 31)Find the left and right cosets of the subgroup {e, c, c2, c3} of the octahedral group, given by:InitGroup("e") AddGroupVar("a", "b", "c") Define(a^2, e); Define(b^3, e); Define(c^4, e) Define(b*a, a*b^2); Define(c*a, a*b*c); Define(c*b, a*c^2) G = Group(); GAre the left and right cosets the same? §3.1 32)Find the left and right cosets of the subgroup {e, c2, a·b2·c, a·b2·c3} of the octahedral group, given by:InitGroup("e") AddGroupVar("a", "b", "c") Define(a^2, e); Define(b^3, e); Define(c^4, e) Define(b*a, a*b^2); Define(c*a, a*b*c); Define(c*b, a*c^2) G = Group(); GAre the left and right cosets the same? §3.2 21)This exercise is required in order to do the RSA encryption Problems 22 or 23. Using SageMath's NextPrime command, find two large prime numbers p and q, at least 80 digits each. This is done by the two SageMath commandsp = NextPrime( ...large number goes here... ); p q = NextPrime( ...large number goes here... ); qBefore executing these commands, replace the "...large number goes here..." with two random numbers at least 80 digits long.We will use the value r = 10007. Verify that this number is coprime to (p − 1) and (q − 1) by executing the following:gcd( (p - 1)*(q - 1), 10007)If this command yields 10007 instead of 1, go back and find new values for p and q.Once the gcd is 1, compute n = p·q, and save this to a file. To do this, entern = p*q; print('n =', n)This line can then be copied and pasted into a text file, using a text editor such as Notepad or TextEditor.Next, find the secret number s, which deciphers a message:s = PowerMod(10007, -1, (p - 1)*(q - 1))You will need to save this number for future reference. Enterprint('s =', s)and copy and paste the single line version output to the same text file. Save this file with a name of your choice.Finally, copy and paste just the n number into the body of an e-mail message, sent to the professor. Do not send the value of the secret number s, but save it for a future exercise. §3.2 22)Using the values of n and s from Problem 21, send an "electronic check" to your favorite professor for $100.00. This check will be in the form of a huge number, x. Once this number is found, enterprint('x =', x)then copy and paste the single line version of the output into the body of a letter. §3.2 23)After doing Problem 21, your instructor will send you a response with a value of y. Copy and paste this line into the input cell below and evaluate it. Also copy and paste the n and s lines from the text file you created in Problem 21, and execute these as well. You can verify that these 3 values are entered correctly into SageMath with the commandsn s yUsing these values of n and s, decode the messege y and hand in (on paper) what it says. §3.2 24) tried creating his encryption number with the two primesp = NextPrime(71587027345719754873415671567856782163741561519737155752525673649286739584756092); p q = NextPrime(p + 1); qWhen he publicized the product n = p·q, along with the value r = 6367, he received a message from a friend:y = 3092722521993064335403878476414515883199432204869058005976140725073546523106848249491531282456640454385678472107616521242043590910817888839981759972041752306977What did this message say? §3.3 19)Show that there is a group Q which is generated by two elements a and b, for whicha4 = e, b2 = a2, b·a = a3·b, a2 ≠ e.This can be entered into SageMath with the commandInitGroup("e") AddGroupVar("a", "b") Define(a^4, e) Define(b^2, a^2) Define(b*a, a^3*b) Q = Group(a, b); QFind all subgroups of this group, and show that all subgroups are normal, even though the group is non-abelian. (Write down the list of left cosets and right cosets for each subgroup found.) §3.3 20)Use SageMath, along with a bit of trial and error, to find a subgroup of order 12 of the octahedral group. Show that this subgroup is a normal subgroup. The following reloads the octahedronal group:InitGroup("e"); AddGroupVar("a", "b", "c") Define(a^2, e); Define(b^3, e); Define(c^4, e) Define(b*a, a*b^2); Define(c*a, a*b*c); Define(c*b, a*c^2) G = Group(); G§3.4 19)Define in SageMath the group Z*105. How many elements does this group have? Consider the subgroup H generated by the element 11. A circle graph demonstrating the cosets G/H can be obtained by the commandCircleGraph(G, Mult(11))By looking at the circle graph, determine the cosets of G with respect to H. What is the order of the element 2·H in the quotient group G/H? §3.4 20)Here is a group of order 20 from Problem 18 of §2.2:InitGroup("e") AddGroupVar("a", "b") Define(a^5, e) Define(b^4, e) Define(b*a, a^2*b) G = Group(); GTable of Contents1  Use pyresample to make a projected image2  Read the lons/lats from the MYD03 file3  get the map projection from corners.json4  Use pyresample to define a new grid in this projection5  resample the longitudes on this grid6  replace missing values with floating point nan7  Plot the image using cartopy Use pyresample to make a projected imageIn the cartopy_mapping_pyproj notebook we stored projectioncoords in a json file called corners.json. This notebookreads that information back in to plot lats/lons on a mapimport a301 import json from a301.utils.data_read import download import a301 import pprint import shutil from pyhdf.SD import SD, SDC import json import pprint import cartopy from pyresample import kd_tree read_data=False if read_data: filename_M3='MYD03.A2013222.2105.006.2013223155808.hdf' download(filename_M3) for filename in [filename_M3,filename_M2]: local_file = Path.cwd() / Path(filename) to_file = a301.data_dir / Path(filename) print(f'copy {local_file} to {to_file}') shutil.copy(local_file,to_file) import cartopy.crs as ccrs import matplotlib.pyplot as plt import cartopy from pathlib import Path import pprint import numpy as np import pdb #Read the lons/lats from the MYD03 file**substitute your filename**# Read the lats and lons from the MYD03 file filename_M3='MYD03.A2013222.2105.006.2013223155808.hdf' m3_path= a301.data_dir / Path(filename_M3) print(f'reading {m3_path}') m3_file = SD(str(m3_path), SDC.READ) lats = m3_file.select('Latitude').get() lons = m3_file.select('Longitude').get()reading /Users/phil/repos/a301_code/data/MYD03.A2013222.2105.006.2013223155808.hdfget the map projection from corners.jsonGet the map projection and extent from corners.jsonjson_file = a301.data_dir / Path('corners.json') with open(json_file,'r') as f: map_dict=json.load(f) pprint.pprint(map_dict){'extent': [-1285873.5967137816, 1561347.9917805532, -1179100.5032042824, 1297248.5261361937], 'lats': [32.13645206898284, 28.687374622563773, 45.73346985640787, 50.510827489422674, 32.13645206898284], 'lons': [-104.77089390290801, -129.005397891393, -138.038848796623, -107.001718605882, -104.77089390290801], 'proj4_params': {'datum': 'WGS84', 'ellps': 'WGS84', 'lat_0': 39.59910106367865, 'lon_0': -121.4048713497655, 'proj': 'laea', 'x_0': 0.0, 'y_0': 0.0}, 'proj4_string': '+datum=WGS84 +ellps=WGS84 +proj=laea ' '+lon_0=-121.4048713497655 +lat_0=39.59910106367865 +x_0=0.0 ' '+y_0=0.0 +no_defs', 'xcoords': [1561347.9917805532, -744961.1366254934, -1285873.5967137816, 1019738.9399581843, [...]Use pyresample to define a new grid in this projectionfrom pyresample import load_area, save_quicklook, SwathDefinition proj_params = map_dict['proj4_params'] swath_def = SwathDefinition(lons, lats) area_def=swath_def.compute_optimal_bb_area(proj_dict=proj_params) dir(area_def)resample the longitudes on this gridfill_value=-9999. area_name = 'modis swath 5min granule' image_lons = kd_tree.resample_nearest(swath_def, lons.ravel(), area_def, radius_of_influence=5000, nprocs=2,fill_value=fill_value) print(f'\ndump area definition:\n{area_def}\n') print((f'\nx and y pixel dimensions in meters:' f'\n{area_def.pixel_size_x}\n{area_def.pixel_size_y}\n'))dump area definition: Area ID: laea_otf Description: On-the-fly laea area Projection: {'datum': 'WGS84', 'ellps': 'WGS84', 'lat_0': '39.59910106367865', 'lon_0': '-121.4048713497655', 'proj': 'laea', 'x_0': '0.0', 'y_0': '0.0'} Number of columns: 1489 Number of rows: 2244 Area extent: (-1269560.846045296, -1177850.7780355075, 1532203.80246403, 1265820.4615973115) x and y pixel dimensions in meters: 1881.641805580474 1088.980053312308replace missing values with floating point nannan_value = np.array([np.nan],dtype=np.float32)[0] image_lons[image_lons< -9000]=nan_valuePlot the image using cartopycrs = area_def.to_cartopy_crs() ax = plt.axes(projection=crs) ax.coastlines() ax.set_global() plt.imshow(image_lons, transform=crs, extent=crs.bounds, origin='upper') plt.colorbar(); crs.globe.to_proj4_params()!pip install --upgrade -q pip jax jaxlib # Install Flax at head: !pip install --upgrade -q git+https://github.com/google/flax.git import jax from typing import Any, Callable, Sequence, Optional from jax import lax, random, numpy as jnp import flax from flax.core import freeze, unfreeze from flax import linen as nn from jax.config import config config.enable_omnistaging() # Linen requires enabling omnistaging* Class attributes are attributes of class specified outside any function. * They are same for all instances of the class.* In below syntax, ```features``` is not a class attribute. In the ```__init__()``` of parent class, it will be initialized. It is different for different objects, and must be provided during creation of object.class ExplicitMLP(nn.Module): features: Sequence[int] def setup(self): ''' This function is called automatically after __postinit__() function. Here we can register submodules, variables, parameters you will need in your model. ''' self.layers = [nn.Dense(feat) for feat in self.features] def __call__(self, inputs): ''' Is called whenever inputs are sent in the model.apply() It doesn't matter whether inputs contain params or not. Don't think about it. This function just need specifies the flow. ''' x = inputs for i, lyr in enumerate(self.layers): x = lyr(x) if i!=len(self.layers)-1: x = nn.relu(x) return x* In the above class, ```model.layers``` won't be accessible from outside the class. It seems like these layers come into existence only when ```model.apply()``` is called.* Below is an example of a neat trick done by flax. If you would like to modify/define the initialisation procedure for a module, at first sight it looks like you will have to pass in and maintain what method to use outside of class(like with ```params```). But, what flax does is that it recognizes that the initialisation method is basically just a combination of function and a random key, so, it will allow you to store and maintain the function part inside the class! (You can do so for functions, but not for shared state.) And this function will take the random key+ shapes etc. as its input and produce deterministic output based on that, which will be used to provide the initial parameters.key = random.PRNGKey(0) key1, key2 = random.split(key, 2) x = random.normal(key1, (4,4)) #First dimension will automatically be interpretted as batch-dimension. No need to use vmap. model = ExplicitMLP(features=[3,4,5]) params = model.init(key2, x) #Would go on init-ing all the internal layers too.The ```model.apply()``` below, would have to call each of its sub-layer as specified in ```__call__``` function above. Before calling each of it's sub layers, it sets that specific layer's params properly and would also set various flags that would make sure that you can only use ```__call__``` from inside ```model.apply()``` or ```model.init()```.y = model.apply(params, x) #Can't do y = model((params,x)) print('initialized parameter shapes:\n', jax.tree_map(jnp.shape, unfreeze(params))) print('output shape:\n', y.shape)initialized parameter shapes: {'params': {'layers_0': {'bias': (3,), 'kernel': (4, 3)}, 'layers_1': {'bias': (4,), 'kernel': (3, 4)}, 'layers_2': {'bias': (5,), 'kernel': (4, 5)}}} output shape: (4, 5)Below is another easier method for specifying the flow of steps in the model. We define as well as use the layers directly, specifying only what to pass to it.class SimpleMLP(nn.Module): features: Sequence[int] @nn.compact def __call__(self, inputs): x = inputs for i, feat in enumerate(self.features): x = nn.Dense(feat, name=f'layers_{i}')(x) #No need to do init/apply etc. as we are in @nn.compact if i!=len(self.features)-1: x=nn.relu(x) return x key = random.PRNGKey(0) key1, key2 = random.split(key, 2) x = random.uniform(key1, (4,4)) model = SimpleMLP([4, 3, 5]) params = model.init(key2,x) y = model.apply(params, x) print('initialised parameter shapes:\n', jax.tree_map(jnp.shape, unfreeze(params))) print('output shape:\n', y.shape)initialised parameter shapes: {'params': {'layers_0': {'bias': (4,), 'kernel': (4, 4)}, 'layers_1': {'bias': (3,), 'kernel': (4, 3)}, 'layers_2': {'bias': (5,), 'kernel': (3, 5)}}} output shape: (4, 5)Compact notation for defining computation models from scratch, using mathematical operations(only) alongside defining any parameters that the model has. The ```self.param()``` behave differently based on whether ```__call__``` has been called by ```init()``` or ```apply()```.class SimpleDense(nn.Module): features: int kernel_init: Callable = nn.initializers.lecun_normal() bias_init: Callable = nn.initializers.zeros @nn.compact def __call__(self, inputs): kernel = self.param('kernel', self.kernel_init, (inputs.shape[-1], self.features)) y = jnp.dot(inputs, kernel) bias = self.param('bias', self.bias_init, (self.features, )) y = y+bias return y key = random.PRNGKey(0) key1, key2 = random.split(key, 2) x = random.uniform(key1, (4,4)) model = SimpleDense(features=3) params = model.init(key2, x) y = model.apply(params, x) print('initialised parameter shapes:\n', jax.tree_map(jnp.shape, unfreeze(params))) print('output shape:\n', y.shape)initialised parameter shapes: {'params': {'bias': (3,), 'kernel': (4, 3)}} output shape: (4, 3)If the above model is implemented using ```setup()``` way, it won't be able to fill in the blank below as no input is available in ```setup()``` function.class SimpleDense(nn.Module): features: int kernel_init: Callable = nn.initializers.lecun_normal() bias_init: Callable = nn.initializers.zeros def setup(self): self.kernel = self.param('kernel', self.kernel_init, (___________, self.features)) bias = self.param('bias', self.bias_init, (self.features, )) @nn.compact def __call__(self, inputs): y = jnp.dot(inputs, self.kernel)+self.bias return y* Following code shows how to define variables for a model, apart from its parameters. * The variables, like parameters, are stored in a tree. * And like parameters, are handled outside the class.* To define a variable, specify the entire path from root to the final variable. Here we have specified ```('batch_stats', 'mean')```.* Due to ```@nn.compact()``` the variables and parameters are only initalised and defined once. but all the operations specified are performed every time ```model.apply()``` is called.class BiasAdderWithRunningMean(nn.Module): decay: float = 0.99 @nn.compact def __call__(self, x): is_initialized = self.has_variable('batch_stats', 'mean') ra_mean = self.variable('batch_stats', 'mean', #variable entire path name lambda s: jnp.zeros(s), #initialization function x.shape[1:]) #input to initialization function mean = ra_mean.value bias = self.param('bias', lambda rng, shape : jnp.zeros(shape), #Since it's a parameter, its lambda function must take rng and shape both. x.shape[1:]) if is_initialized: ra_mean.value = self.decay * ra_mean.value\ + (1.0-self.decay)*jnp.mean(x, axis=0, keepdims=True) return x - ra_mean.value + bias* The ```model.apply()``` call has been modified below. You must specify the mutable parameters of the model, and receive them in the output. * The variable ```y``` still contains, the value returned by the ```__call__``` function defined above.* ```model.init()``` returns all the initialized parameters, i.e., variables and params, both. All those are sent into the ```apply()``` call. (And hence they don't need to be initialised again in ```__call__```. )* Although the ```model.apply()``` returns updated variables, but still ```params_n_variables``` has the same old variables. As variables need to be handled outside the class too; so the variables in the ```params_n_variables``` need to be updated here too.key = random.PRNGKey(0) key1, key2 = random.split(key, 2) x = random.uniform(key1, (5,)) model = BiasAdderWithRunningMean(decay=0.99) params_n_variables = model.init(key2, x) print(params_n_variables) for i in range(10): x = random.normal(key2+i, (5,)) y, updated_variables = model.apply(params_n_variables, x, mutable=['batch_stats']) old_variables, params = params_n_variables.pop('params') #remaining tree is first output and popped part(params) is the second params_n_variables = freeze({'params':params, **updated_variables}) #New tree being made from the available components print(updated_state) print('initialised parameter shapes:\n', jax.tree_map(jnp.shape, unfreeze(params))) print('output shape:\n', y.shape)Optimizers in flax The parameters of the model are stored in the optimizer and are available in ```optimizer.target``` .from flax import optim optimizer_def = optim.GradientDescent(learning_rate=0.01) optimizer = optimizer_def.create(params) #These params are stored within the class of optimizer and need not be handled outside. loss_grad_fn = jax.value_and_grad(loss) for i in range(101): loss_val, grad = loss_grad_fn(optimizer.target) optimizer = optimizer.apply_gradient(grad)Gaussian Processes for Timeseries ForecastingIn this notebook we run some experiments to demonstrate how we can use Gaussian Processes in the context of time series forecasting. This material is part of a talk on [Gaussian Process for Time Series Analysis](https://de.pycon.org/program/pydata-knlnbb-gaussian-process-for-time-series-analysis-dr-juan-orduz/) presented at the [PyCon DE & PyData 2019 Conference](https://de.pycon.org/) in Berlin.**References:**- [Gaussian Processes for Machine Learning](http://www.gaussianprocess.org/gpml/), by and .- [Gaussian Processes for Timeseries Modelling](http://www.robots.ox.ac.uk/~sjrob/Pubs/philTransA_2012.pdf), by , , , , and .- [Bayesian Data Analysis](http://www.stat.columbia.edu/~gelman/book/), by , , , , , and .- [scikit-learn docs: 1.7. Gaussian Processes](https://scikit-learn.org/stable/modules/gaussian_process.html). In particular I recomend the example: [Gaussian process regression (GPR) on Mauna Loa CO2 data](https://scikit-learn.org/stable/auto_examples/gaussian_process/plot_gpr_co2.htmlsphx-glr-auto-examples-gaussian-process-plot-gpr-co2-py).- Blog Posts: - [Bayesian Regression as a Gaussian Process](https://juanitorduz.github.io/reg_bayesian_regression/) - [An Introduction to Gaussian Process Regression](https://juanitorduz.github.io/gaussian_process_reg/) Prepare Notebookimport numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() %matplotlib inlineExample 1In this first example we consider a seasonal stationary time series. Generate Data- Time Variable Let us define a "time" variable $t\in \mathbb{N}$.# Number of samples. n = 1000 # Generate "time" variable. t = np.arange(n) data_df = pd.DataFrame({'t' : t})- Seasonal ComponentLet us define a function to generate seasonal components (Fourier modes).# Generate seasonal variables. def seasonal(t, amplitude, period): """Generate a sinusoidal curve.""" y1 = amplitude * np.sin((2*np.pi)*t/period) return y1 # Add two seasonal components. data_df['s1'] = data_df['t'].apply(lambda t : seasonal(t, amplitude=2, period=40)) # Define target variable. data_df['y1'] = data_df['s1']Let us plot this seasonal variable:plt.rcParams['figure.figsize'] = [13, 8] fig, ax = plt.subplots() sns.lineplot(x='t', y='s1', data=data_df, label='s1', ax=ax) ax.set(title='Seasonal Component', xlabel='t', ylabel='') ax.legend(loc='lower left');- Gaussian Noise Finally, we add some noise.# Set noise standard deviation. sigma_n = 0.3 data_df['epsilon'] = np.random.normal(loc=0, scale=sigma_n, size=n) # Add noise to target variable. data_df ['y1'] = data_df ['y1'] + data_df ['epsilon']Let us plot the resulting data:fig, ax = plt.subplots() sns.lineplot(x='t', y='y1', data=data_df, label='y1', ax=ax) ax.set(title='Sample Data 1', xlabel='t', ylabel='') ax.legend(loc='lower left');Define ModelLet us now define the kernel of the gaussian process model. We include the following kernel components ([recall](http://www.gaussianprocess.org/gpml/chapters/RW4.pdf) that the sum of kernels is againi a kernel):- [`WhiteKernel`](https://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.kernels.WhiteKernel.html) to account for noise. - [`ExpSineSquared`](https://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.kernels.ExpSineSquared.html) to model the periodic component.We add bounds to the kernel hyper-parameters which are optimized by maximizing the log-marginal-likelihood (see [documentation](https://scikit-learn.org/stable/modules/gaussian_process.html)).from sklearn.gaussian_process.kernels import WhiteKernel, ExpSineSquared, ConstantKernel k0 = WhiteKernel(noise_level=0.3**2, noise_level_bounds=(0.1**2, 0.5**2)) k1 = ConstantKernel(constant_value=2) * ExpSineSquared(length_scale=1.0, periodicity=40, periodicity_bounds=(35, 45)) kernel_1 = k0 + k1Next we initialize the [`GaussianProcessRegressor`](https://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.GaussianProcessRegressor.htmlsklearn.gaussian_process.GaussianProcessRegressor) object.from sklearn.gaussian_process import GaussianProcessRegressor gp1 = GaussianProcessRegressor( kernel=kernel_1, n_restarts_optimizer=10, normalize_y=True, alpha=0.0 )Split Data We now prepare and split the data for the model.X = data_df['t'].values.reshape(n ,1) y = data_df['y1'].values.reshape(n ,1) prop_train = 0.7 n_train = round(prop_train*n) X_train = X[:n_train] y_train = y[:n_train] X_test = X[n_train:] y_test = y[n_train:]Model Fit + PredictionsLet us fit the model and generate predictions.gp1.fit(X_train, y_train) #Generate predictions. y_pred, y_std = gp1.predict(X, return_std=True) data_df['y_pred'] = y_pred data_df['y_std'] = y_std data_df['y_pred_lwr'] = data_df['y_pred'] - data_df['y_std'] data_df['y_pred_upr'] = data_df['y_pred'] + data_df['y_std']We plot the predictions.fig, ax = plt.subplots() ax.fill_between( x=data_df['t'], y1=data_df['y_pred_lwr'], y2=data_df['y_pred_upr'], color='black', alpha=0.15, label='credible_interval' ) sns.lineplot(x='t', y='y1', data=data_df, label = 'y1', ax=ax) sns.lineplot(x='t', y='y_pred', data=data_df, label='y_pred', color='black', ax=ax) ax.axvline(n_train, color='red', linestyle='--', label='train_test_split') ax.set(title='Prediction Sample 1', xlabel='t', ylabel='') ax.legend(loc='upper left');Let us compute the $R^2$ of the prediction on the test set.gp1.score(X=X_test, y=y_test)Example 2In this example we add a linear trend component. - Linear Trend Component# Generate trend component. def linear_trend(beta, x): """Scale vector by a scalar.""" trend_comp = beta * x return trend_comp data_df['tr1'] = data_df['t'].apply(lambda x : linear_trend(0.01, x)) # Add trend to target variable y_1. data_df['y2'] = data_df['y1'] + data_df['tr1']Let us see the trend plot:fig, ax = plt.subplots() sns.lineplot(x='t', y='y2', data=data_df, label='y_2', ax=ax) ax.set(title='Sample Data 2', xlabel='t', ylabel='') ax.legend(loc='upper left');Define ModelFor this second example we add a [`RBF`](https://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.kernels.RBF.html) kernel to model the trend component.from sklearn.gaussian_process.kernels import RBF k0 = WhiteKernel(noise_level=0.3**2, noise_level_bounds=(0.1**2, 0.5**2)) k1 = ConstantKernel(constant_value=2) * ExpSineSquared(length_scale=1.0, periodicity=40, periodicity_bounds=(35, 45)) k2 = ConstantKernel(constant_value=10, constant_value_bounds=(1e-2, 1e3)) * RBF(length_scale=100.0, length_scale_bounds=(1, 1e4)) kernel_2 = k0 + k1 + k2 # Define GaussianProcessRegressor object. gp2 = GaussianProcessRegressor( kernel=kernel_2, n_restarts_optimizer=10, normalize_y=True, alpha=0.0 )Split Data We split the data as above.y = data_df['y2'].values.reshape(n ,1) y_train = y[:n_train] y_test = y[n_train:]Model Fit + Predictionsgp2.fit(X_train, y_train) # Generate predictions. y_pred, y_std = gp2.predict(X, return_std=True) data_df['y_pred'] = y_pred data_df['y_std'] = y_std data_df['y_pred_lwr'] = data_df['y_pred'] - data_df['y_std'] data_df['y_pred_upr'] = data_df['y_pred'] + data_df['y_std']We plot the predictions.fig, ax = plt.subplots() ax.fill_between( x=data_df['t'], y1=data_df['y_pred_lwr'], y2=data_df['y_pred_upr'], color='black', alpha=0.15, label='credible_interval' ) sns.lineplot(x='t', y='y2', data=data_df, label = 'y2', ax=ax) sns.lineplot(x='t', y='y_pred', data=data_df, label='y_pred', color='black', ax=ax) ax.axvline(n_train, color='red', linestyle='--', label='train_test_split') ax.set(title='Prediction Sample 2', xlabel='t', ylabel='') ax.legend(loc='upper left');Let us compute the $R^2$ of the prediction on the test set for this second model.gp2.score(X=X_test, y=y_test)Example 3In this third example we add a second seasonal component. - Add Other Seasonal Component# Create other seasonal component. data_df['s2'] = data_df['t'].apply(lambda t : seasonal(t, amplitude=1, period=13.3)) # Add to y_2. data_df['y3'] = data_df['y2'] + data_df['s2'] fig, ax = plt.subplots() sns.lineplot(x='t', y='y3', data=data_df, label='y3', ax=ax) ax.set(title='Sample Data 3', xlabel='t', ylabel='') ax.legend(loc='upper left');Define ModelWe add another `ExpSineSquared` kernel to the one of Example 2.k0 = WhiteKernel(noise_level=0.3**2, noise_level_bounds=(0.1**2, 0.5**2)) k1 = ConstantKernel(constant_value=2) * ExpSineSquared(length_scale=1.0, periodicity=40, periodicity_bounds=(35, 45)) k2 = ConstantKernel(constant_value=10, constant_value_bounds=(1e-2, 1e3)) * RBF(length_scale=100.0, length_scale_bounds=(1, 1e4)) k3 = ConstantKernel(constant_value=1) * ExpSineSquared(length_scale=1.0, periodicity=12, periodicity_bounds=(10, 15)) kernel_3 = k0 + k1 + k2 + k3 # Define GaussianProcessRegressor object. gp3 = GaussianProcessRegressor( kernel=kernel_3, n_restarts_optimizer=10, normalize_y=True, alpha=0.0 )Split Datay = data_df['y3'].values.reshape(n ,1) y_train = y[:n_train] y_test = y[n_train:]Model Fit + Predictionsgp3.fit(X_train, y_train) y_pred, y_std = gp3.predict(X, return_std=True) data_df['y_pred'] = y_pred data_df['y_std'] = y_std data_df['y_pred_lwr'] = data_df['y_pred'] - data_df['y_std'] data_df['y_pred_upr'] = data_df['y_pred'] + data_df['y_std']We plot the predictions:fig, ax = plt.subplots() ax.fill_between( x=data_df['t'], y1=data_df['y_pred_lwr'], y2=data_df['y_pred_upr'], color='black', alpha=0.15, label='credible_interval' ) sns.lineplot(x='t', y='y3', data=data_df, label = 'y3', ax=ax) sns.lineplot(x='t', y='y_pred', data=data_df, label='y_pred', color='black', ax=ax) ax.axvline(n_train, color='red', linestyle='--', label='train_test_split') ax.set(title='Prediction Sample Data 3', xlabel='t', ylabel='') ax.legend(loc='upper left'); # Compute R-squared. gp3.score(X=X_test, y=y_test)Example 4In this last example we consider a non-linear trend component. - Non-Linear Trend Component# Generate trend component. def non_linear_trend(x): """Scale and take square root.""" trend_comp = 0.2 * np.power(x, 1/2) return trend_comp # Compute non-linear trend. data_df ['tr2'] = data_df['t'].apply(non_linear_trend) # Add trend to target variable. data_df ['y4'] = data_df ['y3'] + data_df ['tr2']Let us see the trend plot:fig, ax = plt.subplots() sns.lineplot(x='t', y='y4', data=data_df, ax=ax) ax.set(title='Sample Data 4', xlabel='t', ylabel='');Define ModelInstead of an `RBF` kernel to model the trendd, we use a [`RationalQuadratic`](https://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.kernels.RationalQuadratic.html) which *can seen as a scale mixture (an infinite sum) of RBF kernels with different characteristic length-scales*.from sklearn.gaussian_process.kernels import RationalQuadratic k0 = WhiteKernel(noise_level=0.3**2, noise_level_bounds=(0.1**2, 0.5**2)) k1 = ConstantKernel(constant_value=2) * ExpSineSquared(length_scale=1.0, periodicity=40, periodicity_bounds=(35, 45)) k2 = ConstantKernel(constant_value=10) * RationalQuadratic(length_scale=500, alpha= 50.0, alpha_bounds=(1, 1e3)) k3 = ConstantKernel(constant_value=1) * ExpSineSquared(length_scale=1.0, periodicity=12, periodicity_bounds=(10, 15)) kernel_4 = k0 + k1 + k2 + k3 # Define GaussianProcessRegressor object. gp4 = GaussianProcessRegressor( kernel=kernel_4, n_restarts_optimizer=10, normalize_y=True, alpha=0.0 )Split Datay = data_df['y4'].values.reshape(n ,1) y_train = y[:n_train] y_test = y[n_train:]Model Fit + Predictionsgp4.fit(X_train, y_train) y_pred, y_std = gp4.predict(X, return_std=True) data_df['y_pred'] = y_pred data_df['y_std'] = y_std data_df['y_pred_lwr'] = data_df['y_pred'] - data_df['y_std'] data_df['y_pred_upr'] = data_df['y_pred'] + data_df['y_std']Finally, let us plot the prediction:fig, ax = plt.subplots() ax.fill_between( x=data_df['t'], y1=data_df['y_pred_lwr'], y2=data_df['y_pred_upr'], color='black', alpha=0.15, label='credible_interval' ) sns.lineplot(x='t', y='y4', data=data_df, label = 'y4', ax=ax) sns.lineplot(x='t', y='y_pred', data=data_df, label='y_pred', color='black', ax=ax) ax.axvline(n_train, color='red', linestyle='--', label='train_test_split') ax.set(title='Prediction Sample Data 4', xlabel='t', ylabel='') ax.legend(loc='upper left'); # Compute R-squared. gp4.score(X=X_test, y=y_test)Table of Contents 1  Imports and settings2  DNN3  Working with time series Imports and settingsimport scipy.stats as st import numpy as np import numpy.random as rd import pandas as pd import sklearn.datasets as dt from keras.layers import Convolution1D, Convolution2D, BatchNormalization, Dense, Dropout from keras.models import Sequential import matplotlib.pyplot as plt import matplotlib as mpl import seaborn as sns %matplotlib inline sns.set_style('darkgrid') plt.style.use('dark_background') mpl.rc("figure", figsize=(10,6)) mpl.rcParams['lines.linewidth'] = 3 mpl.rcParams['axes.facecolor'] = (1,1,1,0) mpl.rcParams['figure.facecolor'] = (1,1,1,0) sns.set_palette('deep', color_codes=True) def sinplot(flip=1): x = np.linspace(0, 14, 100) for i in range(1, 7): plt.plot(x, np.sin(x + i * .5) * (7 - i) * flip)DNNcd = {0:'r',1:'b', 2:'g'} blob = dt.make_blobs(3000, cluster_std=3) c = [cd[l] for l in blob[1]] plt.scatter(blob[0][:,0], blob[0][:,1], c=c, s=10) from keras.utils.np_utils import to_categorical model = Sequential() model.add(Dense(10, input_shape=(2,) , activation= 'relu')) model.add(Dense(5 , activation= 'relu')) model.add(Dense(3, activation= 'sigmoid')) model.compile(optimizer='adam', loss='categorical_crossentropy') model.summary() model.fit(blob[0][:2000], to_categorical(blob[1][:2000]), batch_size=3) plt.scatter(blob[0][2000:][:,0], blob[0][2000:][:,1], c=model.predict(blob[0][2000:]).argmax(axis=1), s=10)Working with time seriesX, y = dt.make_friedman1(3000) plt.scatter(X[:,0], X[:,1], c=y)18DCE097 **Project title: Weather Forecast using LSTM**1. Main aim is to reduce RMSE values for accurate predictions.2. We have taken dataset from Kaggle to predict the temperature of a particular place.* Train RMSE: 1.39 RMSE* Test RMSE: 1.38 RMSEimport numpy import matplotlib.pyplot as plt from pandas import read_csv import math from keras.models import Sequential from keras.layers import Dense, Dropout, LSTM, Bidirectional, GRU from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import mean_squared_error # convert an array of values into a dataset matrix def create_dataset(dataset, look_back=1): dataX, dataY = [], [] for i in range(len(dataset)-look_back-1): a = dataset[i:(i+look_back), 0] dataX.append(a) dataY.append(dataset[i + look_back, 0]) return numpy.array(dataX), numpy.array(dataY) # fix random seed for reproducibility numpy.random.seed(7) # load the dataset dataframe = read_csv('/content/farm_temperature_data.csv', usecols=[1]) dataset = dataframe.values dataset = dataset.astype('float32') dataframe.head() # normalize the dataset scaler = MinMaxScaler(feature_range=(0, 1)) dataset = scaler.fit_transform(dataset) # split into train and test sets train_size = int(len(dataset) * 0.8) test_size = len(dataset) - train_size train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:] # reshape into X=t and Y=t+1 look_back = 1 trainX, trainY = create_dataset(train, look_back) testX, testY = create_dataset(test, look_back) # reshape input to be [samples, time steps, features] trainX = numpy.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1])) testX = numpy.reshape(testX, (testX.shape[0], 1, testX.shape[1])) # create and fit the LSTM network model = Sequential() model.add(LSTM(64, input_shape=(1, look_back), return_sequences=True)) model.add(LSTM(16, input_shape=(1, look_back), return_sequences=True)) model.add(LSTM(4, input_shape=(1, look_back), return_sequences=False)) model.add(Dropout(0.2)) model.add(Dense(1)) # # create and fit the BiLSTM network # model = Sequential() # model.add(Bidirectional(LSTM(64, input_shape=(1, look_back), return_sequences=True))) # model.add(Bidirectional(LSTM(16, input_shape=(1, look_back), return_sequences=True))) # model.add(Bidirectional(LSTM(4, input_shape=(1, look_back), return_sequences=False))) # model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy']) model.fit(trainX, trainY, epochs=20, batch_size=1, verbose=2) # make predictions trainPredict = model.predict(trainX) testPredict = model.predict(testX) # invert predictions trainPredict = scaler.inverse_transform(trainPredict) trainY = scaler.inverse_transform([trainY]) testPredict = scaler.inverse_transform(testPredict) testY = scaler.inverse_transform([testY]) # calculate root mean squared error trainScore = numpy.sqrt(mean_squared_error(trainY[0], trainPredict[:,0])) print('Train Score: %.2f RMSE' % (trainScore)) testScore = numpy.sqrt(mean_squared_error(testY[0], testPredict[:,0])) print('Test Score: %.2f RMSE' % (testScore)) # shift train predictions for plotting plt.figure(figsize=(20,10)) trainPredictPlot = numpy.empty_like(dataset) trainPredictPlot[:, :] = numpy.nan trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict # shift test predictions for plotting testPredictPlot = numpy.empty_like(dataset) testPredictPlot[:, :] = numpy.nan testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict # plot baseline and predictions plt.title("Weather Forecast") plt.xlabel("Days") plt.ylabel("Temperature (Celcius)") plt.plot(scaler.inverse_transform(dataset), label="Actual") plt.plot(trainPredictPlot, label="Prediction (Train)") plt.plot(testPredictPlot, label="Prediction (Test)") plt.legend(loc="upper right") plt.show()Component AnalysisData calculation and visualization work for SCC/WCC/Monads/All Components.import json import statistics as stat import numpy as np import pandas as pd import csv as csv import matplotlib.pyplot as mpl import os from tqdm import tqdm import networkx as nx from collections import defaultdict, Counter import pickle pwd = "/home//shared/caringbridge/data/projects/sna-social-support/csv_data/" dyad_dir = "/home/srivbane/shared/caringbridge/data/projects/sna-social-support/dyad_growth/" metadata_dir = "/home/srivbane/shared/caringbridge/data/projects/sna-social-support/user_metadata" epoch_day = 86400000 # accounting for milliseconds epoch_yr = epoch_day * 365 srt = 1104537600000 # jan 1, 2005 rng = 12 * epoch_yr # until jan 1, 2017 (cant multiply by floats) six_months = 6 * 30 * epoch_day # lets say a month is 30 days # read the user->user interactions dataframe u2u_df = pd.read_feather(os.path.join(metadata_dir,"u2u_df.feather")) len(u2u_df) # load the list of valid users data_selection_working_dir = "/home/srivbane/shared/caringbridge/data/projects/sna-social-support/data_selection" valid_user_ids = set() with open(os.path.join(data_selection_working_dir, "valid_user_ids.txt"), 'r') as infile: for line in infile: user_id = line.strip() if user_id == "": continue else: valid_user_ids.add(int(user_id)) len(valid_user_ids) # load the list of valid sites data_selection_working_dir = "/home/srivbane/shared/caringbridge/data/projects/sna-social-support/data_selection" valid_site_ids = set() with open(os.path.join(data_selection_working_dir, "valid_site_ids.txt"), 'r') as infile: for line in infile: site_id = line.strip() if site_id == "": continue else: valid_site_ids.add(int(site_id)) len(valid_site_ids) author_to_site = os.path.join(metadata_dir, "interaction_metadata.h5") df = pd.read_hdf(author_to_site) sorted_df = df.sort_values(by=["user_id", "site_id", "created_at"]) journals = sorted_df[sorted_df.int_type == "journal"] firsts = journals.drop_duplicates(subset=["user_id", "site_id"], keep="first") lasts = journals.drop_duplicates(subset=["user_id", "site_id"], keep="last") len(firsts), len(lasts) first_time = {a : b for a,b in zip(firsts.user_id, firsts.created_at)} last_time = {a : b for a,b in zip(lasts.user_id, lasts.created_at)} author_ind = {a : b for a,b in zip(firsts.index, firsts.user_id)} active_users = defaultdict(list) for d in tqdm(range(srt, srt + rng, epoch_day*7*4), position=0, leave=False): for ind in firsts.index: user_id = author_ind[ind] f = first_time[user_id] l = last_time[user_id] if f < d and l + six_months > d: active_users[d].append(user_id) valid_u2u_df = u2u_df[(u2u_df.from_user_id.isin(valid_user_ids))&(u2u_df.to_user_id.isin(valid_user_ids))] inits_df = valid_u2u_df.sort_values(by='created_at', ascending=True).drop_duplicates(subset=['from_user_id', 'to_user_id'], keep='first')Fall 2019 Network Component Gen over Time I would edit your core loop to construct a single graph object that is updated at each iteration of the loop: (1) old nodes that are no longer considered active are removed, (2) new nodes that are now considered active are added, and (3) any new edges between active nodes are added.index_error = 0 key_error = 0 with open(os.path.join(pwd, "revised_1219_scc.csv"), 'w', encoding="utf-8") as strong, \ open(os.path.join(pwd, "revised_1219_wcc.csv"), 'w', encoding="utf=8") as weak: strong_w = csv.writer(strong); weak_w = csv.writer(weak); for d in tqdm(range(srt, srt + rng, epoch_day*7*4), position=0, leave=False): G = nx.DiGraph() #nodes = set(active_users[d]) nodes = valid_user_ids inits_subset = inits_df[(inits_df.created_at <= d) & (inits_df.from_user_id.isin(nodes)) & (inits_df.to_user_id.isin(nodes))] edges = [tuple(row) for row in inits_subset[["from_user_id", "to_user_id"]].values] G.add_nodes_from(nodes) G.add_edges_from(edges) scc_sizes = []; wcc_sizes = []; scc_size = 0; wcc_size = 0; for i_, connected_nodes in enumerate(sorted(nx.strongly_connected_components(G), key=len)): scc_size = len(connected_nodes) scc_sizes.append(scc_size) for i_, connected_nodes in enumerate(sorted(nx.weakly_connected_components(G), key=len)): wcc_size = len(connected_nodes) wcc_sizes.append(wcc_size) sorted(scc_sizes); sorted(wcc_sizes); try: strong_row = (d, scc_sizes[0], scc_sizes[1], len(scc_sizes) - scc_sizes.count(1), scc_sizes.count(1)) weak_row = (d, wcc_sizes[0], wcc_sizes[1], len(scc_sizes) - wcc_sizes.count(1), wcc_sizes.count(1)) strong_w.writerow(strong_row); weak_w.writerow(weak_row); except IndexError: index_error += 1 continue len(pre_subset), len(post_subset), missed------ December 2019 Revisions for CSCWwith open(os.path.join(pwd, "revised_1219_scc.csv"), 'r', encoding='utf-8') as s, \ open(os.path.join(pwd, "revised_1219_scc.csv"), 'r', encoding='utf-8') as w: sg_life = pd.read_csv(s, index_col = 0, header=None, names=("d", "Largest", "Second", "# Components", "0th-Deg", "Missed")) wk_life = pd.read_csv(w, index_col = 0, header=None, names=("d", "Largest", "Second", "# Components", "0th-Deg", "Missed")) epoch_yr = epoch_day * 365 em = 10 mpl.rcParams['figure.figsize'] = [10, 3] mpl.rcParams['figure.dpi'] = 300 mpl.rcParams['font.family'] = "sans" mpl.rcParams['font.size'] = 8 sg_lifeNumericp1, = mpl.plot("Largest", color='b', data = wk_life) p2, = mpl.plot("0th-Deg", color='r', data = wk_life) p3, = mpl.plot("# Components", color='y', data=sg_life) p4, = mpl.plot("# Components", color='c', data=wk_life) labels = ('2005', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016') x_pos = np.arange(srt, srt + epoch_yr * len(labels), epoch_yr) mpl.xticks(x_pos, labels = labels) mpl.xlabel("Years", fontsize = em) mpl.ylabel("Size (Users)", fontsize = em) #mpl.yscale("log") mpl.title("Component Size over Time", fontsize = em) mpl.legend([p1,p2,p3,p4], ["Largest WCC", "Monads", "Reciprocal Dyads (SCC >2)", "WCC"]) mpl.show() fig, ax = mpl.subplots() p1, = ax.plot("Largest", color = 'b', data = wk_life) p2, = ax.plot("0th-Deg", color = 'r', data = wk_life) p4, = ax.plot("# Components", color = 'y', data = wk_life) labels = ('2005', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016') x_pos = np.arange(srt, srt + epoch_yr * len(labels), epoch_yr) ax.set_xticks(x_pos) ax.set_xlabel(labels) ax.tick_params(axis='y') mpl.xlabel("Years", fontsize = em) mpl.ylabel("WCC/Monads",fontsize = em) mpl.title("Component Size over Time", fontsize = em) ax2 = ax.twinx() p3, = ax2.plot("# Components", color = 'g', data=sg_life) ax2.set_ylabel('Dyads', color = 'g', fontsize = em) ax2.tick_params(axis='y', labelcolor='g') mpl.setp(ax, xticks=x_pos, xticklabels=labels) fig.legend([p1,p2,p3,p4], ["Largest WCC", "Monads", "Reciprocal Dyads (SCC >2)", "WCC"], loc='upper left') mpl.show()Proportionalrng = 11.5 * 365 * epoch_day with open(os.path.join(pwd, "nw_size.csv"), 'r', encoding='utf-8') as n: nw = pd.read_csv(n, index_col = 0, header=None, names=("d", "Auth", "Int", "Life")) sg_prop = pd.DataFrame(sg_life["Largest"] / nw["Life"], columns=("Proportion",)) wk_prop = pd.DataFrame(wk_life["Largest"] / nw["Life"], columns=("Proportion",)) mo_prop = pd.DataFrame(wk_life["0th-Deg"] / nw["Life"], columns=("Proportion",)) nsg_prop = pd.DataFrame(sg_life["# Components"] / nw["Life"], columns=("Proportion",)) nwk_prop = pd.DataFrame(wk_life["# Components"] / nw["Life"], columns=("Proportion",)) sg_prop = sg_prop.dropna(); wk_prop = wk_prop.dropna(); mo_prop = mo_prop.dropna(); nsg_prop = nsg_prop.dropna(); nwk_prop = nwk_prop.dropna(); p1, = mpl.plot("Proportion", color='b', data = wk_prop) p2, = mpl.plot("Proportion", color='r', data = mo_prop) p3, = mpl.plot("Proportion", color = 'g', data = nsg_prop) p4, = mpl.plot("Proportion", color = 'c', data = nwk_prop) labels = ('2005', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016') x_pos = np.arange(srt, srt + epoch_yr * len(labels), epoch_yr) mpl.xticks(x_pos, labels = labels) mpl.xlabel("Years", fontsize = em) mpl.ylabel("Proportion of Users", fontsize = em) mpl.ylim(bottom = 0, top = 1) mpl.xlim(right= srt + 11.5 * 365 * epoch_day) #mpl.yscale("log") mpl.title("Component Proportions on CaringBridge over Time", fontsize = em) mpl.legend([p1,p2,p3,p4], ["Largest WCC", "Monads", "Reciprocal Dyads (>2)", "WCC"]) mpl.show() fig, ax = mpl.subplots() p1, = ax.plot("Proportion", color = 'b', data = wk_prop) p2, = ax.plot("Proportion", color = 'r', data = mo_prop) p4, = ax.plot("Proportion", color = 'c', data = nwk_prop) #lazy solution to scaling labels = ('2005', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016') x_pos = np.arange(srt, srt + epoch_yr * len(labels), epoch_yr) ax.set_xticks(x_pos) ax.set_xlabel(labels) ax.tick_params(axis='y') mpl.xlabel("Years", fontsize = em) mpl.ylabel("Non-Dyadic Proportions",fontsize = em) mpl.title("Component Proportions on CaringBridge over Time", fontsize = em) ax2 = ax.twinx() p4, = ax2.plot("Proportion", color = 'g', data=nsg_prop) ax2.set_ylabel('Dyad Proportion', color = 'g', fontsize = em) ax2.tick_params(axis='y', labelcolor='g') mpl.setp(ax, xticks=x_pos, xticklabels=labels) ax.set_ylim(bottom = 0, top = 1) ax2.set_ylim(bottom = 0, top = .01) ax.set_xlim(right= srt + 11.5 * 365 * epoch_day) mpl.legend([p1,p2,p3,p4], ["Largest WCC", "Monads", "Reciprocal Dyads (>2)", "WCC"]) mpl.show()Analyze data from split0 and split1 folders generated by noisy_student and saves all figures in experiment/figures folder.# exp_dir = "/home/henriklg/master-thesis/code/kvasir-capsule/experiments/cl_500" exp_dir = "/home/henriklg/master-thesis/code/kvasir-capsule/experiments/cl_500" split0 = exp_dir+"/split0" split1 = exp_dir+"/split1" sub_dirs = ["0_teacher", "0_student", "1_teacher", "1_student", "2_teacher", "2_student"]Functionsimport matplotlib.pyplot as plt import numpy as np import seaborn as sns sns.set() SMALL_SIZE = 12 MEDIUM_SIZE = 14 BIGGER_SIZE = 16 plt.rc('font', size=SMALL_SIZE) # controls default text sizes plt.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels plt.rc('legend', fontsize=MEDIUM_SIZE) # legend fontsize plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title def l2d_old(line): return " ".join(line.split()).split(" ") def l2d(line): name = (line.split(" ")) # split on double whitespace name = next(sub for sub in name if sub) # fetch first non-empty cell name = name.strip() # remove whitespace from string metrics = (" ".join(line.split()).split(" ")[-4:]) return ([name]+metrics) def parse_classification_report(path): class_m = {} tot_m = {} if "teacher" in path: model = "teacher" else: model = "student" with open(path) as file: line = file.readline() line = file.readline() # skip first line line = file.readline() while line: data = l2d(line) class_m[data[0]] = { "prec": float(data[1]), "rec": float(data[2]), "f1": float(data[3]), "model": model } line = file.readline() if len(line) == 1: line = False line = file.readline() tot_m["acc"] = l2d_old(line)[1] line = file.readline() tot_m["macro"] = l2d_old(line)[2:5] line = file.readline() tot_m["weighted"] = l2d_old(line)[2:5] return class_m, tot_m import ast def parse_history(path): with open(path) as file: # loss - acc - val_los - val_acc loss = file.readline() acc = file.readline() val_loss = file.readline() val_acc = file.readline() history = { "loss": ast.literal_eval(loss.split(":")[-1].strip()), "acc": ast.literal_eval(acc.split(":")[-1].strip()), "val_loss": ast.literal_eval(val_loss.split(":")[-1].strip()), "val_acc": ast.literal_eval(val_acc.split(":")[-1].strip()) } return history def average_history(history_list): result = [0]*int(len(history_list[0])) for history in (history_list): for idx, val in enumerate(history): result[idx] += val result = [res/(len(history_list)) for res in result] return (result)Get the data from experiment resultsmets = ["loss", "acc", "val_loss", "val_acc"] hist0 = {key: [] for key in mets} hist1 = {key: [] for key in mets} lowest_epoch = 200 for hist, split in zip([hist0, hist1], [split0, split1]): for dir_ in sub_dirs: path = "{}/{}/history.txt".format(split, dir_) history = parse_history(path) hist["loss"].append(history["loss"]) hist["acc"].append(history["acc"]) hist["val_loss"].append(history["val_loss"]) hist["val_acc"].append(history["val_acc"]) if (len(history["loss"]) < lowest_epoch): lowest_epoch = len(history["loss"]) # Shorten the history lists to the lowest epoch for hist in [hist0, hist1]: for key, outerlist in hist.items(): hist[key] = [sublist[0:lowest_epoch] for sublist in outerlist] import pathlib def savefig(name): path = exp_dir+"/figures/" pathlib.Path(path).mkdir(parents=True, exist_ok=True) plt.savefig(path+name+".pdf", format="pdf")Get results from training on split_0 Accuracy and lossx = range(lowest_epoch) plt.figure(figsize=(14,6)); # Subplot 1 plt.subplot(1, 2, 1) for model in hist0["acc"]: plt.plot(x, model, linewidth=1.5) plt.legend(sub_dirs); plt.xlabel("Epoch") plt.ylabel("Accuracy"); plt.title('Training accuracy') plt.tight_layout() # Subplot 2 plt.subplot(1, 2, 2) for model in hist0["loss"]: plt.plot(x, model, linewidth=1.5) plt.legend(sub_dirs); plt.xlabel("Epoch") plt.ylabel("Loss"); plt.tight_layout() plt.title('Training loss') savefig("split0_history_training") plt.figure(figsize=(14,6)); # Subplot 1 plt.subplot(1, 2, 1) for model in hist0["val_acc"]: plt.plot(x, model, linewidth=1.5) plt.legend(sub_dirs); plt.xlabel("Epoch") plt.ylabel("Accuracy"); plt.title('Validation accuracy') plt.tight_layout() # Subplot 2 plt.subplot(1, 2, 2) for model in hist0["val_loss"]: plt.plot(x, model, linewidth=1.5) plt.legend(sub_dirs); plt.xlabel("Epoch") plt.ylabel("Loss"); plt.tight_layout() plt.title('Validation loss') savefig("split0_history_validation") avg_val_acc_0 = average_history(hist0["val_acc"]) avg_val_loss_0 = average_history(hist0["val_loss"]) plt.figure(figsize=(14,6)); # Subplot 1 plt.subplot(1, 2, 1) plt.plot(x, avg_val_acc_0, linewidth=1.5) plt.xlabel("Epoch") plt.ylabel("Accuracy"); plt.title('Average validation accuracy') plt.tight_layout() # Subplot 2 plt.subplot(1, 2, 2) plt.plot(x, avg_val_loss_0, linewidth=1.5) plt.xlabel("Epoch") plt.ylabel("Loss"); plt.tight_layout() plt.title('Average validation loss') savefig("split0_average_validation_history")F1-measurereport_contents_0 = [] for model in sub_dirs: report_file = "{}/{}/classification_report.txt".format(split0, model) _, tot_m = parse_classification_report(report_file) report_contents_0.append(tot_m); # Compare weighted and macro precision, recall and f1-score metrics0 = { "prec": [], "rec": [], "f1": [], "acc": [] } metric = "weighted" for idx, content in enumerate(report_contents_0): metrics0["acc"].append(float(content["acc"])) metrics0["prec"].append(float(content[metric][0])) metrics0["rec"].append(float(content[metric][1])) metrics0["f1"].append(float(content[metric][2])) x = list(range(len(sub_dirs))) plt.figure(figsize=(9,6)) plt.plot( x,metrics0["prec"],'r', x,metrics0["rec"],'b', x,metrics0["f1"],'g', linewidth=1.5, marker='o' ) plt.legend(["Precision","Recall","F1-score", "acc"]) plt.xlabel("Iteration") plt.ylabel("Weighted average score") plt.tight_layout(pad=1.5) savefig("split0_prec_rec_f1") plt.figure(figsize=(9,6)) plt.plot( x,metrics0["acc"],'r--', x,metrics0["f1"],'g-', linewidth=1.5, marker='o' ) plt.legend(["Accuracy", "F1-Score"]) plt.xlabel("Iteration") plt.ylabel("Weighted average score") plt.tight_layout(pad=1.5) savefig("split0_acc_f1")Get results from training on split_1 Accuracy and lossx = range(lowest_epoch) plt.figure(figsize=(14,6)); # Subplot 1 plt.subplot(1, 2, 1) for model in hist1["acc"]: plt.plot(x, model, linewidth=1.5) plt.legend(sub_dirs); plt.xlabel("Epoch") plt.ylabel("Accuracy on training data"); plt.title('Training accuracy') plt.tight_layout() # Subplot 2 plt.subplot(1, 2, 2) for model in hist1["loss"]: plt.plot(x, model, linewidth=1.5) plt.legend(sub_dirs); plt.xlabel("Epoch") plt.ylabel("Loss on training data"); plt.tight_layout() plt.title('Training loss') savefig("split1_history_training") plt.figure(figsize=(14,6)); # Subplot 1 plt.subplot(1, 2, 1) for model in hist1["val_acc"]: plt.plot(x, model, linewidth=1.5) plt.legend(sub_dirs); plt.xlabel("Epoch") plt.ylabel("Accuracy"); plt.title('Validation accuracy') plt.tight_layout() # Subplot 2 plt.subplot(1, 2, 2) for model in hist1["val_loss"]: plt.plot(x, model, linewidth=1.5) plt.legend(sub_dirs); plt.xlabel("Epoch") plt.ylabel("Loss"); plt.tight_layout() plt.title('Validation loss') savefig("split1_history_validation") avg_val_acc_1 = average_history(hist1["val_acc"]) avg_val_loss_1 = average_history(hist1["val_loss"]) plt.figure(figsize=(14,6)); # Subplot 1 plt.subplot(1, 2, 1) plt.plot(x, avg_val_acc_1, linewidth=1.5) plt.xlabel("Epoch") plt.ylabel("Accuracy"); plt.title('Average validation accuracy') plt.tight_layout() # Subplot 2 plt.subplot(1, 2, 2) plt.plot(x, avg_val_loss_1, linewidth=1.5) plt.xlabel("Epoch") plt.ylabel("Loss"); plt.tight_layout() plt.title('Average validation loss') savefig("split1_average_validation_history")F1-measurereport_contents_1 = [] for model in sub_dirs: report_file = "{}/{}/classification_report.txt".format(split1, model) _, tot_m = parse_classification_report(report_file) report_contents_1.append(tot_m); # Compare weighted and macro precision, recall and f1-score metrics1 = { "prec": [], "rec": [], "f1": [], "acc": [] } metric = "weighted" for idx, content in enumerate(report_contents_1): metrics1["acc"].append(float(content["acc"])) metrics1["prec"].append(float(content[metric][0])) metrics1["rec"].append(float(content[metric][1])) metrics1["f1"].append(float(content[metric][2])) x = list(range(len(sub_dirs))) plt.figure(figsize=(9,6)) plt.plot( x,metrics1["prec"],'r', x,metrics1["rec"],'b', x,metrics1["f1"],'g', linewidth=1.5, marker='o' ) plt.legend(["Precision","Recall","F1-score", "acc"]) plt.xlabel("Iteration") plt.ylabel("Weighted average score") plt.tight_layout(pad=1.5) savefig("split1_prec_rec_f1") plt.figure(figsize=(9,6)) plt.plot( x,metrics1["acc"],'r--', x,metrics1["f1"],'g-', linewidth=1.5, marker='o' ) plt.legend(["Accuracy", "F1-Score"]) plt.xlabel("Iteration") plt.ylabel("Weighted average score") plt.tight_layout(pad=1.5) savefig("split1_acc_f1")Average the results of both splitsx = range(lowest_epoch) plt.figure(figsize=(14,6)); # Subplot 1 plt.subplot(1, 2, 1) plt.plot( x, avg_val_acc_0, x, avg_val_acc_1, linewidth=1.5 ) plt.legend(["split_0", "split_1"]) plt.xlabel("Epoch") plt.ylabel("Accuracy"); plt.title('Average validation accuracy split_0 and split_1') plt.tight_layout() # Subplot 2 plt.subplot(1, 2, 2) plt.plot( x, avg_val_loss_0, x, avg_val_loss_1, linewidth=1.5 ) plt.legend(["split_0", "split_1"]) plt.xlabel("Epoch") plt.ylabel("Loss"); plt.tight_layout() plt.title('Average validation loss split_0 and split_1') savefig("both_average_validation_history") x = range(len(sub_dirs)) plt.figure(figsize=(9,6)) plt.plot( x, metrics0["f1"], x, metrics1["f1"], linewidth=1.5 ) plt.legend(["split_0", "split_1"]) plt.title("F1 score for split_0 and split_1") plt.xlabel("Iteration") plt.ylabel("F1 score") plt.tight_layout(pad=1.5) savefig("both_f1") average_acc = average_history([metrics0["acc"], metrics1["acc"]]) average_f1 = average_history([metrics0["f1"], metrics1["f1"]]) plt.figure(figsize=(9,6)) plt.plot( x,average_acc,'r--', x,average_f1,'g-', linewidth=1.5, marker='o' ) plt.legend(["Accuracy", "F1-Score"]) plt.title("Average F1 and Accuracy metrics for split0 and split1") plt.xlabel("Iteration") plt.ylabel("Weighted average score") plt.tight_layout(pad=1.5) savefig("both_avg_acc_f1") for acc, f1 in zip(average_acc, average_f1): print (round(acc,3), round(f1,3))Harvard CS109b, Spring 2018 Lecture 21 Installation and Setup Install MySQL1. Download free MySQL Community Edition [https://dev.mysql.com/downloads/](https://dev.mysql.com/downloads/)2. Start MySQL. On Mac, use System Preferences, MySQL3. Install Jupyter connector [http://bigdatazone.blogspot.com/2017/05/run-sql-from-within-jupyter-notebook.html](http://bigdatazone.blogspot.com/2017/05/run-sql-from-within-jupyter-notebook.html)The iPython - SQL connector: [https://github.com/catherinedevlin/ipython-sql](https://github.com/catherinedevlin/ipython-sql)# Load the Jupyter Extension %load_ext sql try: import pymysql pymysql.install_as_MySQLdb() except ImportError: pass # Connect to the database # TODO: substitute your MySQL password here %sql mysql://root:0=^b-jLRm)2W\/anaconda/lib/python3.6/site-packages/pymysql/cursors.py:165: Warning: (1287, "'@@tx_isolation' is deprecated and will be removed in a future release. Please use '@@transaction_isolation' instead") result = self._query(query)Create a database# Create the database %sql create database school # Connect to the new database # TODO: enter your password here %sql mysql://root:0=^b-jLRm)2W\") %sql INSERT into student values(23456789, "", "") %sql select * from student %sql CREATE TABLE classroom(ID int NOT NULL, name varchar(50), location varchar(20)); %sql insert into classroom values (123,"CS109B", "NWB103"); %sql insert into classroom values (234, "AM207", "MDG125") %sql select * from classroom %%sql create table registration( student_id int NOT NULL, class_id int NOT NULL, FOREIGN KEY (student_id) REFERENCES student(ID)); %sql desc registration %sql insert into registration values (12345678, 123) %sql select * from registration; %%sql select s.name "Student", c.name "Course" from student s, registration r, classroom c where c.id = r.class_id and s.id = r.student_id %%sql SELECT s.name as Student, c.name as Course FROM student s INNER JOIN registration r ON s.id = r.student_id INNER JOIN classroom c ON r.class_id = c.id ORDER BY s.namemysql://root:***@localhost * mysql://root:***@localhost/school 1 rows affected.Use Database in Python MySQL Libraryimport mysql.connector # TODO: substitute your MySQL password here cnx = mysql.connector.connect(user='root',password='', host='localhost',database='school') query = ("SELECT s.name as Student, c.name as Course FROM student s " + "INNER JOIN registration r ON s.id = r.student_id " + "INNER JOIN classroom c ON r.class_id = c.id "+ "ORDER BY s.name") query cursor = cnx.cursor() cursor.execute((query)) for (name, course) in cursor: print (name, course)Test User CS109BFrom Pandasimport pandas as pd df = pd.read_sql(query, con=cnx) len(df) enrollment = df.sort_values(['Student','Course']).groupby('Student') enrollment.head(5)To Reset Everything# Note that dropping the database may hang because of the Pandas and MySQL Library connections. # You may need to restart your Jupyter kernel for this to have effect. %sql drop database school %sql show databases; %sql create database school06/29/2020KRT (GDSD6 checks)# basic packages import os, glob import pandas as pd import numpy as np; np.random.seed(0) import itertools from collections import Counter, defaultdict import time # machine learning packages from sklearn from sklearn.preprocessing import MinMaxScaler #StandardScaler from sklearn import preprocessing, metrics from sklearn.feature_selection import VarianceThreshold from sklearn.model_selection import train_test_split, KFold, cross_validate, cross_val_score, StratifiedKFold from sklearn.linear_model import LogisticRegression, Lasso, LassoCV from sklearn.naive_bayes import GaussianNB from sklearn.svm import LinearSVC from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, VotingClassifier, AdaBoostClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.neural_network import MLPClassifier from sklearn.metrics import roc_auc_score, auc, roc_curve, plot_roc_curve, confusion_matrix, accuracy_score from sklearn.metrics import explained_variance_score from scipy import interp import scipy.stats as stats from subprocess import call from IPython.display import Image # for IRF from functools import reduce # Needed for the scikit-learn wrapper function import irf from irf import (irf_utils, utils, irf_jupyter_utils) from irf.ensemble.wrf import RandomForestClassifierWithWeights from math import ceil # Import our custom utilities from imp import reload # Import tools needed for visualization import seaborn as sns; sns.set() import matplotlib import matplotlib.pyplot as plt from sklearn.tree import export_graphviz import pydot %load_ext autoreload %autoreload 2 save_dir = '../data/processed/fig4_modelling/KRT_ex' if not os.path.exists(save_dir): os.makedirs(save_dir) THRES=1 normal_tissues = ['Airway','Astrocytes','Bladder','Colon','Esophageal','GDSD6','GM12878','HMEC','Melanocytes','Ovarian', 'Pancreas','Prostate','Renal','Thyroid','Uterine'] normal_tissues_dict = dict(zip(normal_tissues,range(len(normal_tissues)))) rna_df = pd.read_csv('../data/interim/rna/tissue_tpm_sym.csv',index_col=0) rna_KRT_dict = pd.Series(rna_df.GDSD6.values, index=rna_df.index.values).to_dict()0. Data Wrangling- import- preprocess# import data_all = pd.read_csv('/Users/mguo123/Google Drive/1_khavari/omics_project-LD/pan_omics/data/processed/tissue_crms/all_count_comb_overall.csv',index_col=0,header=0) data_all = data_all[data_all.tissue.isin(normal_tissues)] data_all = data_all[data_all.iloc[:,2:].sum(axis=1)>1e-1] # expression labels exp_label = list(np.log10(data_all.exp.values+1e-2)) labels_all = np.array(np.array(exp_label)>THRES) tissues_label = data_all.tissue.values#np.array((data_all.exp>THRES).values) tissue_num_labels = data_all.tissue.map(normal_tissues_dict).values genes_all = data_all.index.values gene_to_num_dict = dict(zip(np.unique(genes_all),range(len(np.unique(genes_all))))) genes_num_all = np.vectorize(gene_to_num_dict.get)(genes_all) print('files_loaded', data_all.shape) data_all[:5] ## only tfs data_all.drop(['tissue','exp','num_loop_counts','num_loops','num_atac_regions_pro','num_atac_regions_loop'],axis=1,inplace=True) data_all.shape selector = VarianceThreshold() data_all_varfilt = selector.fit_transform(data_all) data_all_varfilt_cols = data_all.columns[selector.get_support()] print(data_all.shape, data_all_varfilt.shape, len(data_all_varfilt_cols)) scaler = MinMaxScaler() data_all_norm = scaler.fit_transform(data_all_varfilt) data_all_norm = pd.DataFrame(data_all_norm, columns = data_all_varfilt_cols) data_all_norm[:5]1. See if there is even a correlation in expression between GDSD6 genes and some of the famourus TFs (AP2B, JUN, FOS, etc)KRT_genes = pd.read_csv('../../rnaseq/unique_gene_lists/'+'GDSD6'+'_genes.txt',header=None).loc[:,0] len(KRT_genes) KRT_tfs = sorted(set(list(pd.read_csv('../data/external/krt_tfs_063020.csv')['tfs']))) print(KRT_tfs,len(KRT_tfs)) # data_all['tissue'] = tissues_label tfs_feat_dict = defaultdict(list) for feat in data_all.columns: tfs_feat_dict[feat.split('_')[0]].append(feat) KRT_TF_feat = [] KRT_TF_dict = defaultdict(list) for tf in KRT_tfs: if tf+'_pro' in data_all.columns: KRT_TF_feat.append(tf+'_pro') KRT_TF_dict[tf].append(tf+'_pro') if tf+'_loop' in data_all.columns: KRT_TF_feat.append(tf+'_loop') KRT_TF_dict[tf].append(tf+'_loop') KRT_TF_dict['all'] = KRT_TF_feat KRT_crm = data_all[tissues_label=='GDSD6'] KRT_exp_arr = np.array(exp_label)[tissues_label=='GDSD6'] KRT_exp_genes_arr = set(list(KRT_crm.index[np.array(KRT_exp_arr)>THRES])) KRT_exp_genes_num = len(KRT_exp_genes_arr)#np.where(np.array(KRT_exp_arr)>THRES)[0]) KRT_crm_KRT_genes = KRT_crm[KRT_crm.index.isin(KRT_genes)] # num KRT expressed genes, KRT_exp_genes_num, KRT_crm.shape, KRT_exp_genes_num/KRT_crm.shape[0] KRT_crm#.loc['TP63',] KRT_tf_expr = {} not_expr_tf=[] for tf in KRT_tfs: exp = rna_KRT_dict.get(tf) if exp is None: exp=0 KRT_tf_expr[tf]=exp if expCEBPA E2F1 FOXF2 KER2 LDB2 OTX1 PBX1 PBX2 POU1F1 POU3F1 POU3F2 POU3F3 POU3F4 POU4F1 POU4F2 POU4F3 POU5F1 POU5F1B POU6F1 POU6F2 PRRX1 SOX11 TFAP2B TFAP2D TWIST2 ZEB1 26get correlation between expression and feature valuecorr_dict = {} expr_genes_set = set() for feat in data_all.columns: expr_genes = set(list(KRT_crm.index[KRT_crm[feat]>0])).intersection(KRT_exp_genes_arr) expr_genes_set = expr_genes_set.union(expr_genes) num_expr_genes = len(expr_genes) corr_dict[feat] = [np.corrcoef(KRT_exp_arr, KRT_crm[feat])[0][1], feat in KRT_TF_feat, num_expr_genes/KRT_exp_genes] corr_df = pd.DataFrame.from_dict(corr_dict,orient='index') corr_df.columns = ['corr', 'KRT_TF','frac_expr_genes'] corr_df['corr_sq'] = corr_df['corr'].apply(lambda x:x**2) corr_df = corr_df.sort_values('corr_sq',ascending=False) corr_df.reset_index(inplace=True) corr_df.columns = ['feat', 'corr', 'KRT_TF','frac_expr_genes', 'corr_sq'] corr_df.reset_index(inplace=True) corr_df.fillna(0,inplace=True) print('all', len(expr_genes_set)/KRT_exp_genes) corr_df.to_csv(os.path.join(save_dir, 'stats_corr_KRT.csv')) corr_df.sort_values('corr_sq',ascending=False)[:50] sns.scatterplot(x='index', y='corr_sq',alpha=1, data=corr_df[corr_df.KRT_TF])Results: we can see that the correlation between expression of KRT genes and TFs footprinting features is quite low, furthermore there seems to not be an association beween the r^2 rank and whether or not the KRT feature is contains a KRT tf (manually annotated)# KRT_crm['SOX13_loop'].describe() # corr_df#[corr_df.KRT_TF]next we see if the KRT tfs are significantly enriched in KRT genesKRT_crm.shape results = {} count_all = data_all.sum().sum() count_KRTgene = KRT_crm.sum().sum() #mat_counts.sum(axis=1)[0], sum first row for tf, feat_list in KRT_TF_dict.items(): if len(feat_list)>0: # print(tf) KRT_crm_KRT_genes = KRT_crm[feat_list] count_KRTtf_KRTgene = KRT_crm_KRT_genes.sum().sum() # A count_KRTtf = data_all[feat_list].sum().sum() #mat_counts.sum(axis=0)[0], sum down first col count_KRTtf_neg = count_KRTtf - count_KRTtf_KRTgene # B count_neg_KRTgene = count_KRTgene - count_KRTtf_KRTgene #C count_neg_neg = count_all - count_KRTgene - count_KRTtf_neg #D mat_counts = np.array([[count_KRTtf_KRTgene,count_neg_KRTgene], [count_KRTtf_neg, count_neg_neg]]).reshape((2,2)) pseudo = 1 mat_counts_pseudo = mat_counts+pseudo num_in_1 = mat_counts.sum(axis=1)[0] #count_KRTgene num_in_2 = mat_counts.sum(axis=0)[0] #count_KRTtf in_1_and_in_2 = count_KRTtf_KRTgene in_1_or_in_2 = count_KRTgene +count_KRTtf_neg in_1 = count_KRTgene in_2 = count_KRTtf observed_num = mat_counts[0][0] #count_KRTtf_KRTgene expected_num = num_in_1*num_in_2/sum(sum(mat_counts)) oddsratio_pseudo, pvalue_pseudo = stats.fisher_exact(mat_counts_pseudo,alternative='greater') jaccard = in_1_and_in_2/in_1_or_in_2 intersect_over_min = in_1_and_in_2/min(in_1,in_2) results[tf] = { 'jaccard':jaccard,'intersect_over_min':intersect_over_min, 'intersection':in_1_and_in_2, 'union':in_1_or_in_2, 'num_in_1':num_in_1,'num_in_2':num_in_2, 'observed':observed_num, 'expected':expected_num, 'oddsratio':oddsratio_pseudo, 'pval':pvalue_pseudo} result_df = pd.DataFrame.from_dict(results,orient='index') result_df['pval_bonf'] = result_df.pval.apply(lambda x: min(1, x* sum(sum(mat_counts))))#result_df.shape[0])) result_df['log_pval_bonf'] = result_df.pval_bonf.apply(lambda x: min(100,-np.log10(x+1e-100))) result_df.to_csv(os.path.join(save_dir, 'stats_fisher_KRTtfs.csv')) display(result_df[:5]) result_df.shape result_df_filt # # for all # KRT_crm = data_all[tissues_label=='GDSD6'] # KRT_crm_KRT_genes = KRT_crm[KRT_TF_feat] # count_KRTtf_KRTgene = KRT_crm_KRT_genes.sum().sum() # A # count_all = data_all.sum().sum() # count_KRTgene = KRT_crm.sum().sum() #mat_counts.sum(axis=1)[0], sum first row # count_KRTtf = data_all[KRT_TF_feat].sum().sum() #mat_counts.sum(axis=0)[0], sum down first col # count_KRTtf_neg = count_KRTtf - count_KRTtf_KRTgene # B # count_neg_KRTgene = count_KRTgene - count_KRTtf_KRTgene #C # count_neg_neg = count_all - count_KRTgene - count_KRTtf_neg #D # mat_counts = np.array([[count_KRTtf_KRTgene,count_neg_KRTgene], # [count_KRTtf_neg, count_neg_neg]]).reshape((2,2)) # pseudo = 1 # mat_counts_pseudo = mat_counts+pseudo # num_in_1 = mat_counts.sum(axis=1)[0] #count_KRTgene # num_in_2 = mat_counts.sum(axis=0)[0] #count_KRTtf # in_1_and_in_2 = count_KRTtf_KRTgene # in_1_or_in_2 = count_KRTgene +count_KRTtf_neg # in_1 = count_KRTgene # in_2 = count_KRTtf # observed_num = mat_counts[0][0] #count_KRTtf_KRTgene # expected_num = num_in_1*num_in_2/sum(sum(mat_counts)) # oddsratio_pseudo, pvalue_pseudo = stats.fisher_exact(mat_counts_pseudo,alternative='greater') # jaccard = in_1_and_in_2/in_1_or_in_2 # intersect_over_min = in_1_and_in_2/min(in_1,in_2) # results = { 'jaccard':jaccard,'intersect_over_min':intersect_over_min, # 'intersection':in_1_and_in_2, # 'union':in_1_or_in_2, # 'num_in_1':num_in_1,'num_in_2':num_in_2, # 'observed':observed_num, 'expected':expected_num, # 'oddsratio':oddsratio_pseudo, 'pval':pvalue_pseudo} # # resultsdo some excel work:see pptx next we check which tf footprints are significantly associated with having KRT genes be expressed# expression KRT_exp_crm = KRT_crm.iloc[np.where(np.array(KRT_exp_arr)>THRES)[0],:] results = {} count_all = KRT_crm.sum().sum() count_KRTgene = KRT_exp_crm.sum().sum() #mat_counts.sum(axis=1)[0], sum first row for tf, feat_list in tfs_feat_dict.items(): if len(feat_list)>0: # print(tf) KRT_crm_KRT_genes = KRT_exp_crm[feat_list] count_KRTtf_KRTgene = KRT_crm_KRT_genes.sum().sum() # A if count_KRTtf_KRTgene==0: continue count_KRTtf = KRT_crm[feat_list].sum().sum() #mat_counts.sum(axis=0)[0], sum down first col count_KRTtf_neg = count_KRTtf - count_KRTtf_KRTgene # B count_neg_KRTgene = count_KRTgene - count_KRTtf_KRTgene #C count_neg_neg = count_all - count_KRTgene - count_KRTtf_neg #D mat_counts = np.array([[count_KRTtf_KRTgene,count_neg_KRTgene], [count_KRTtf_neg, count_neg_neg]]).reshape((2,2)) pseudo = 1 mat_counts_pseudo = mat_counts+pseudo num_in_1 = mat_counts.sum(axis=1)[0] #count_KRTgene num_in_2 = mat_counts.sum(axis=0)[0] #count_KRTtf in_1_and_in_2 = count_KRTtf_KRTgene in_1_or_in_2 = count_KRTgene +count_KRTtf_neg in_1 = count_KRTgene in_2 = count_KRTtf observed_num = mat_counts[0][0] #count_KRTtf_KRTgene expected_num = num_in_1*num_in_2/sum(sum(mat_counts)) oddsratio_pseudo, pvalue_pseudo = stats.fisher_exact(mat_counts_pseudo,alternative='greater') jaccard = in_1_and_in_2/in_1_or_in_2 intersect_over_min = in_1_and_in_2/min(in_1,in_2) results[tf] = { 'jaccard':jaccard,'intersect_over_min':intersect_over_min, 'intersection':in_1_and_in_2, 'union':in_1_or_in_2, 'num_in_1':num_in_1,'num_in_2':num_in_2, 'observed':observed_num, 'expected':expected_num, 'oddsratio':oddsratio_pseudo, 'pval':pvalue_pseudo} result_df_exp = pd.DataFrame.from_dict(results,orient='index') result_df_exp['pval_bonf'] = result_df_exp.pval.apply(lambda x: min(1, x* result_df.shape[0])) result_df_exp['log_pval_bonf'] = result_df_exp.pval_bonf.apply(lambda x: min(100,-np.log10(x+1e-100))) result_df_exp['is_KRT_TF'] = result_df_exp.index.isin(KRT_tfs) result_df_exp.to_csv(os.path.join(save_dir, 'stats_fisher_KRTexp.csv')) result_df_exp.sort_values('oddsratio',ascending=False) result_df_exp.sort_values('oddsratio',ascending=False)[:10] # results[result_idx] = {'color_row':color_1, 'color_col':color_2, # 'jaccard':jaccard,'intersect_over_min':intersect_over_min, # 'intersection':len(in_1_and_in_2), # 'union':len(in_1_or_in_2), # 'num_in_1':num_in_1,'num_in_2':num_in_2, # 'observed':observed_num, 'expected':expected_num, # 'oddsratio':oddsratio_pseudo, 'pval':pvalue_pseudo} # sns.distplot(KRT_crm_KRT_genes['JUND_loop']) # def comp_two_gene_dicts(gene_dict1, gene_dict2, pseudo=0): # """ # 1: rows # 2: columns # """ # bg = set(gene_dict1['background']) | set(gene_dict2['background']) # results = {} # result_idx = 0 # for color_1 in ['purple','green', 'grey', 'blue']: # for color_2 in ['purple','green', 'grey', 'blue']: # geneset_1 = set(gene_dict1[color_1]) # geneset_2 = set(gene_dict2[color_2]) # in_1_and_in_2 = geneset_1 & geneset_2 # in_1_not_2 = geneset_1 - geneset_2 # not_1_in_2 = geneset_2 - geneset_1 # in_1_or_in_2 = geneset_1 | geneset_2 # not_1_not_2 = bg - in_1_or_in_2 # mat_counts = np.array([[len(in_1_and_in_2), len(in_1_not_2)], # [len(not_1_in_2), len(not_1_not_2)]]).reshape((2,2)) # mat_counts_pseudo = mat_counts+pseudo # # if (color_1=='grey') & (color_2=='grey'): # # print(mat_counts) # num_in_1 = mat_counts.sum(axis=1)[0] # num_in_2 = mat_counts.sum(axis=0)[0] # observed_num = mat_counts[0][0] # expected_num = num_in_1*num_in_2/sum(sum(mat_counts)) # oddsratio_pseudo, pvalue_pseudo = stats.fisher_exact(mat_counts_pseudo,alternative='greater') # jaccard = len(in_1_and_in_2)/len(in_1_or_in_2) # intersect_over_min = len(in_1_and_in_2)/min(num_in_1,num_in_2) # results[result_idx] = {'color_row':color_1, 'color_col':color_2, # 'jaccard':jaccard,'intersect_over_min':intersect_over_min, # 'intersection':len(in_1_and_in_2), # 'union':len(in_1_or_in_2), # 'num_in_1':num_in_1,'num_in_2':num_in_2, # 'observed':observed_num, 'expected':expected_num, # 'oddsratio':oddsratio_pseudo, 'pval':pvalue_pseudo} # result_idx+=1 # result_df = pd.DataFrame.from_dict(results,orient='index') # result_df['pval_bonf'] = result_df.pval.apply(lambda x: min(1, x* sum(sum(mat_counts))))#result_df.shape[0])) # result_df['log_pval_bonf'] = result_df.pval_bonf.apply(lambda x: min(100,-np.log10(x+1e-100))) # return result_df2. check relationship between TFAP2C and KLF4cols_to_check=['TFAP2C_pro','TFAP2C_loop','KLF4_pro','KLF4_loop'] f = sns.pairplot(KRT_crm[cols_to_check],kind='reg') plt.savefig(os.path.join(save_dir, 'TFAP2C_KLF4_corr_check.png'))5. Next check would be for pairs of KRT tfs to see which are significantly enriched in KRT genestfs_w_feats = set() for col in data_all.columns: tfs_w_feats.add(col.split('_')[0]) len(tfs_w_feats) KRT_TF_pair_dict = defaultdict(list) for tf1 in KRT_tfs: for tf2 in KRT_tfs: if (tf10: counter+=1 if (counter%100)==0: print(counter, tf) # print(tf) KRT_crm_KRT_genes = KRT_crm[feat_list] count_KRTtf_KRTgene = KRT_crm_KRT_genes.sum().sum() # A count_KRTtf = data_all[feat_list].sum().sum() #mat_counts.sum(axis=0)[0], sum down first col count_KRTtf_neg = count_KRTtf - count_KRTtf_KRTgene # B count_neg_KRTgene = count_KRTgene - count_KRTtf_KRTgene #C count_neg_neg = count_all - count_KRTgene - count_KRTtf_neg #D mat_counts = np.array([[count_KRTtf_KRTgene,count_neg_KRTgene], [count_KRTtf_neg, count_neg_neg]]).reshape((2,2)) pseudo = 1 mat_counts_pseudo = mat_counts+pseudo num_in_1 = mat_counts.sum(axis=1)[0] #count_KRTgene num_in_2 = mat_counts.sum(axis=0)[0] #count_KRTtf in_1_and_in_2 = count_KRTtf_KRTgene in_1_or_in_2 = count_KRTgene +count_KRTtf_neg in_1 = count_KRTgene in_2 = count_KRTtf observed_num = mat_counts[0][0] #count_KRTtf_KRTgene expected_num = num_in_1*num_in_2/sum(sum(mat_counts)) oddsratio_pseudo, pvalue_pseudo = stats.fisher_exact(mat_counts_pseudo,alternative='greater') jaccard = in_1_and_in_2/in_1_or_in_2 intersect_over_min = in_1_and_in_2/min(in_1,in_2) results[tf] = { 'jaccard':jaccard,'intersect_over_min':intersect_over_min, 'intersection':in_1_and_in_2, 'union':in_1_or_in_2, 'num_in_1':num_in_1,'num_in_2':num_in_2, 'observed':observed_num, 'expected':expected_num, 'oddsratio':oddsratio_pseudo, 'pval':pvalue_pseudo} result_df = pd.DataFrame.from_dict(results,orient='index') result_df['pval_bonf'] = result_df.pval.apply(lambda x: min(1, x* sum(sum(mat_counts))))#result_df.shape[0])) result_df['log_pval_bonf'] = result_df.pval_bonf.apply(lambda x: min(100,-np.log10(x+1e-100))) result_df.to_csv(os.path.join(save_dir, 'stats_fisher_KRTtfs_pairs.csv')) result_df result_df_filt = result_df[(result_df.pval_bonf<0.05) & (result_df.oddsratio>1)& (result_df.oddsratio<1000)] print(result_df_filt.shape, result_df.shape) result_df_filt.sort_values('oddsratio',ascending=False)[:10] result_mat_df = pd.DataFrame(index=KRT_tfs, columns = KRT_tfs).fillna(0) for pair in result_df_filt.index.values: tf1,tf2 = pair.split('::') result_mat_df.at[tf1,tf2]=1 result_mat_df.at[tf2,tf1]=1 result_mat_df.to_csv(os.path.join(save_dir, 'stats_fisher_KRTtfs_pairs_mat.csv'))KRT gene pairs with expressioncounter = 0 results = {} count_all = KRT_crm.sum().sum() count_KRTgene = KRT_exp_crm.sum().sum() #mat_counts.sum(axis=1)[0], sum first row for tf, feat_list in KRT_TF_pair_dict.items(): if len(feat_list)>0: counter+=1 if (counter%100)==0: print(counter, tf) # print(tf) KRT_crm_KRT_genes = KRT_exp_crm[feat_list] count_KRTtf_KRTgene = KRT_crm_KRT_genes.sum().sum() # A if count_KRTtf_KRTgene==0: continue count_KRTtf = KRT_crm[feat_list].sum().sum() #mat_counts.sum(axis=0)[0], sum down first col count_KRTtf_neg = count_KRTtf - count_KRTtf_KRTgene # B count_neg_KRTgene = count_KRTgene - count_KRTtf_KRTgene #C count_neg_neg = count_all - count_KRTgene - count_KRTtf_neg #D mat_counts = np.array([[count_KRTtf_KRTgene,count_neg_KRTgene], [count_KRTtf_neg, count_neg_neg]]).reshape((2,2)) pseudo = 1 mat_counts_pseudo = mat_counts+pseudo num_in_1 = mat_counts.sum(axis=1)[0] #count_KRTgene num_in_2 = mat_counts.sum(axis=0)[0] #count_KRTtf in_1_and_in_2 = count_KRTtf_KRTgene in_1_or_in_2 = count_KRTgene +count_KRTtf_neg in_1 = count_KRTgene in_2 = count_KRTtf observed_num = mat_counts[0][0] #count_KRTtf_KRTgene expected_num = num_in_1*num_in_2/sum(sum(mat_counts)) oddsratio_pseudo, pvalue_pseudo = stats.fisher_exact(mat_counts_pseudo,alternative='greater') jaccard = in_1_and_in_2/in_1_or_in_2 intersect_over_min = in_1_and_in_2/min(in_1,in_2) results[tf] = { 'jaccard':jaccard,'intersect_over_min':intersect_over_min, 'intersection':in_1_and_in_2, 'union':in_1_or_in_2, 'num_in_1':num_in_1,'num_in_2':num_in_2, 'observed':observed_num, 'expected':expected_num, 'oddsratio':oddsratio_pseudo, 'pval':pvalue_pseudo} result_df = pd.DataFrame.from_dict(results,orient='index') result_df['pval_bonf'] = result_df.pval.apply(lambda x: min(1, x* sum(sum(mat_counts))))#result_df.shape[0])) result_df['log_pval_bonf'] = result_df.pval_bonf.apply(lambda x: min(100,-np.log10(x+1e-100))) result_df.to_csv(os.path.join(save_dir, 'stats_fisher_KRTtfs_exp_pairs.csv')) result_df result_df_filt = result_df[(result_df.pval<0.05) & (result_df.oddsratio>1)& (result_df.oddsratio<1000)] print(result_df_filt.shape, result_df.shape) result_df_filt.sort_values('oddsratio',ascending=False)[:50] result_df_filt.loc['KLF4::TFAP2C',:]5. Next check stability score found interactions and annotate of KRT tfs each interaction hasscore_thres=.2 def check_interaction(interaction_str): feat_arr = interaction_str.split('::') num_KRT = 0 num_tot = len(feat_arr) for feat in feat_arr: if feat in KRT_TF_feat: num_KRT+=1 return [num_KRT,num_tot] # GDSD6_stability_df = pd.read_csv('../data/processed/fig4_modelling/irf_manual/test_GDSD6_GDSD6_boosted_stability_score.csv') GDSD6_stability_df = pd.read_csv('../data/processed/fig4_modelling/irf_manual/test_GDSD6_purple_boosted_stability_score.csv') GDSD6_stability_df GDSD6_stability_df[['num_KRT','num_in_interaction']]=GDSD6_stability_df['index'].apply(func=check_interaction).apply(pd.Series)#,result_type='expand') GDSD6_stability_df['frac_KRT'] = GDSD6_stability_df['num_KRT']/GDSD6_stability_df['num_in_interaction'] sns.pairplot(GDSD6_stability_df[['score','frac_KRT']],kind='reg') GDSD6_stability_df[(GDSD6_stability_df.score >score_thres)] GDSD6_stability_df[(GDSD6_stability_df.frac_KRT>0) & (GDSD6_stability_df.score >score_thres)]SKIN trajectory analysis# rna rna_df = pd.read_csv('../data/interim/rna/tissue_tpm_sym.csv',index_col=0) rna_df_norm = rna_df[normal_tissues] rna_D0_dict = pd.Series(rna_df.GDSD0.values, index=rna_df.index.values).to_dict() rna_D3_dict = pd.Series(rna_df.GDSD3.values, index=rna_df.index.values).to_dict() rna_D6_dict = pd.Series(rna_df.GDSD6.values, index=rna_df.index.values).to_dict() %%time D0_crm = pd.read_csv('/Users/mguo123/Google Drive/1_khavari/omics_project-LD/pan_omics/data/processed/tissue_crms/pro_loop_tissue/GDSD0_crm.csv',index_col=0,header=0) D0_crm.drop(['tissue','exp','num_loop_counts','num_loops','num_atac_regions_pro','num_atac_regions_loop'],axis=1,inplace=True) D3_crm = pd.read_csv('/Users/mguo123/Google Drive/1_khavari/omics_project-LD/pan_omics/data/processed/tissue_crms/pro_loop_tissue/GDSD3_crm.csv',index_col=0,header=0) D3_crm.drop(['tissue','exp','num_loop_counts','num_loops','num_atac_regions_pro','num_atac_regions_loop'],axis=1,inplace=True) D6_crm = pd.read_csv('/Users/mguo123/Google Drive/1_khavari/omics_project-LD/pan_omics/data/processed/tissue_crms/pro_loop_tissue/GDSD6_crm.csv',index_col=0,header=0) D6_crm.drop(['tissue','exp','num_loop_counts','num_loops','num_atac_regions_pro','num_atac_regions_loop'],axis=1,inplace=True) what genes D0_crmHomework 3 - Probability Estimation Before you beginRemember to:1. Make your own copy of the notebook by pressing the "Copy to drive" button.2. Expend all cells by pressing **Ctrl+[** Your IDs✍️ Fill in your IDs in the cell below:## %%%%%%%%%%%%%%% Your code here - Begin %%%%%%%%%%%%%%% ## Fill in your IDs (as a string) student1_id = '...' student2_id = '...' ## %%%%%%%%%%%%%%% Your code here - End %%%%%%%%%%%%%%%%% print('Hello ' + student1_id + ' & ' + student2_id)Importing Packagesimport numpy as np # Numerical package (mainly multi-dimensional arrays and linear algebra) import pandas as pd # A package for working with data frames import matplotlib.pyplot as plt # A plotting package ## Setup matplotlib to output figures into the notebook %matplotlib inline ## Set some default values of the the matplotlib plots plt.rcParams['figure.figsize'] = (6.0, 6.0) # Set default plot's sizes plt.rcParams['figure.dpi'] = 120 # Set default plot's dpi (increase fonts' size) plt.rcParams['axes.grid'] = True # Show grid by default in figuresThe Ultra-Trail du Mont-BlancIn this assignment, we will examine the distribution of results in The Ultra-Trail du Mont-Blanc race.The description of the race (taken from Wikipedia):> It takes place once a year on either the last weekend in August or the first weekend of September in the Alps, and follows the route of the Tour du Mont Blanc through France, Italy and Switzerland. It has a distance of approximately 171 kilometres (106 mi), and a total elevation gain of around 10,040 metres (32,940 ft). It is widely regarded as one of the most difficult foot races in the world, and one of the largest with more than 2,500 starters. It is one race during a week-long festival based around Chamonix in France. The races have strict entry and qualification requirements attained by accumulating enough race points through qualifying trail races over the previous two-year period. In 2016 and 2017, 42% and 34% of runners did not finish the UTMB race.> While the best runners complete the loop in slightly more than 20 hours, most runners take 32 to 46 hours to reach the finish line. Most runners will have to run through two nights in order to complete the race.The original results can be found [here](https://utmbmontblanc.com/en/page/349/results.html).A processed version of the results as a CSV file can be found [here](https://technion046195.netlify.app/datasets/utmb.csv) Loading the datasetdata_file = 'https://technion046195.netlify.app/datasets/utmb.csv' ## Loading the data dataset = pd.read_csv(data_file) datasetThe Data Fields and TypesThis dataset contains the results for all the runners which finished the race over all the years in which the race took place. The data fields (the columns) for each runner are as following:- **Name**: The runner's name.- **Year**: The year of the race.- **Result**: The runner's result (the time it took him to finish the race) in hours. The race ends after about 47 hours. Therefore, these results are between 0 and 47.- **Age group**: The age group to which the runner belongs to: 0 - the youngest (10-22) and 6 - The oldest (80+)- **Gender**: The runner's gender: 0-male, 1-female.- **Rank**: The overall score rank of the runner in that specific year. To extract a column from the DataFrame as a NumPy array we can use the following code: ✍️ The == operatorCorrect the code below so that it will count and print the number of runners which have finished the race in 2014.- Use the equality operator "a==b" to create a boolean 1D array.- Use the summation function *np.sum(x)* to sum over the values of the array.## Define the array of the years: years = dataset['Year'].values ## %%%%%%%%%%%%%%% Your code here - Begin %%%%%%%%%%%%%%% ## Correct the following line: ## Example: the number of runners which have finished the race since 2010 would be: np.sum(x > 2010) number_of_runners_in_2014 = ... ## %%%%%%%%%%%%%%% Your code here - End %%%%%%%%%%%%%%%%% print('The number of runners which finished the race in 2014: ' + str(number_of_runners_in_2014))✍️ The & operatorCorrect the code below so that it will count and print the number of **male** runners which have finished the race in 2014.- Use the logical AND operator "a & b" two combine two boolean arrays## Define the array of the genders: genders = dataset['Gender'].values ## %%%%%%%%%%%%%%% Your code here - Begin %%%%%%%%%%%%%%% ## Fill in the following line: number_of_male_runners_in_2014 = ... ## %%%%%%%%%%%%%%% Your code here - End %%%%%%%%%%%%%%%%% print('The number of male runners which finished the race in 2014: ' + str(number_of_male_runners_in_2014))✍️ Calculating the histogramIn the cell below we define a function called *calculate_hist*. We would like this function to calculates an histogram based in the data array *x* by dividing the range of *x*s into *n_bins* equal bins.Complete the code so that it will correctly calculate the histogram:def calculate_hist(x, n_bins): n_samples = len(x) # Get the number of values in x ## Define bins bins_edges = np.linspace(x.min(), x.max(), n_bins + 1) bins_width = np.diff(bins_edges) ## This is equal to running: bins[1:] - bins[:-1] ## Initialize the array of histogram values hist_values = np.zeros(n_bins) ## Loop over the bins for k in range(n_bins): bin_left_edge = bins_edges[k] bin_right_edge = bins_edges[k + 1] ## %%%%%%%%%%%%%%% Your code here - Begin %%%%%%%%%%%%%%% ## Fill in the following line to count the number of samples in each bin: number_of_samples_in_bin = ... ## %%%%%%%%%%%%%%% Your code here - End %%%%%%%%%%%%%%%%% hist_values[k] = number_of_samples_in_bin / n_samples / bins_width[k] return hist_values, bins_edges, bins_widthThe following code uses the function above to calculate the histogram on the list of runners' running times.## Define the array of the results: results = dataset['Result'].values ## Calculate the histogram n_bins = 150 hist_values, bins_edges, bins_width = calculate_hist(results, n_bins)The following code uses the Matplotlib package to plot the histogram.## Prepare the figure and axes fig, ax = plt.subplots() ## Plot the histogram's bars ax.bar(bins_edges[:-1], hist_values, width=bins_width, align='edge') ## Set title and axis labels ax.set_title('Histogram of Results') ax.set_ylabel('PDF') ax.set_xlabel('Result [hours]');The hist functions in NumPy and MatplotlibFrom here on we will use the NumPy function [numpy.histogram](https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html) to calculate histograms and the Matplotlib function [plt.hist](https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.hist.html?highlight=histmatplotlib.axes.Axes.hist) to calculate and plot the histograms:## Calculate and plot the histogram fig, ax = plt.subplots() ax.hist(results, bins=n_bins ,density=True) ax.set_title('Historgram of Results') ax.set_ylabel('PDF') ax.set_xlabel('Result [hours]');✍️ IndexingLike in Matlab, we can use an array of booleans *x* to index an array *y* by *y[x]*.Complete the following code to calculate the mean result of male runners from 2014:## %%%%%%%%%%%%%%% Your code here - Begin %%%%%%%%%%%%%%% ## Fill in the following line so that indices will be a boolean array corresponding to male runners which competed in 2014 indices = ... ## %%%%%%%%%%%%%%% Your code here - End %%%%%%%%%%%%%%%%% result_of_male_in_2014 = results[indices] mean_result = np.mean(result_of_male_in_2014) print('The mean result of male runners in 2014 is {:.2f} hours'.format(mean_result))*In the last line we have used the format function to format the output string. From here on we will be using format for that. You can read about how to use the format function [here](https://pyformat.info/)* ✍️ Comparing Two Age GroupsLet us now compare the results of two groups of runers:- **Group 1**: Male runners from age group 1 in 2018- **Group 2**: Male runners from age group 3 in 2018Complete the code bellow to extract the results of these two groups and plots their histograms:## %%%%%%%%%%%%%%% Your code here - Begin %%%%%%%%%%%%%%%\ ## Define the array of age groups: age_group = ... ## Define the indices for each group indices1 = ... indices2 = ... ## Extract the results for each group results1 = results[indices1] results2 = results[indices2] ## %%%%%%%%%%%%%%% Your code here - End %%%%%%%%%%%%%%%%% ## Calculate and plot the histograms fig, ax = plt.subplots() ax.hist(results1, bins=40 ,density=True, alpha=0.3, label='Group 1') ax.hist(results2, bins=40 ,density=True, alpha=0.3, label='Group 2') ax.set_title('Historgrams for Groups 1 & 2') ax.set_ylabel('PDF') ax.set_xlabel('Result [hours]'); ax.legend();Fitting a parametric distribution modelWe will now try to fit a parametric distribution model to the results of the two age groups. We will do so using use the [Beta distribution](https://en.wikipedia.org/wiki/Beta_distribution).The Beta distribution describes a continues distribution over the finite interval of $\left[0,1\right]$ and has the following PDF:$$p\left(x;a,b\right)=\frac{\Gamma\left(a+b\right)}{\Gamma\left(a\right)\Gamma\left(b\right)}x^{a-1}\left(1-x\right)^{b-1}$$$\Gamma$ here is the [Gamam function](https://en.wikipedia.org/wiki/Gamma_function) and is usually referred to as the extension of the factorial fuction. (since for integers inputs we get that $\Gamma\left(n\right)=\left(n-1\right)!$). The Gamma function appears in the PDFs of various known distributions and is defined as an integral over some integrand. For our needs we will only have to be able to evaluate it for different inputs, and we will do so using SciPy function [scipy.special.gamma](https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.gamma.html).$a$ and $b$ are the two parameters of the distribution, which we would like to find. Scaling the distributionIn our case, the results are distributed over the interval $\left[0,47\right]$, so we will have to scale the Beta distribution to fit this range. We can do so by using the following scaled PDF:$$p\left(x;a,b,s\right)=\frac{\Gamma\left(a+b\right)}{s\cdot\Gamma\left(a\right)\Gamma\left(b\right)}\left(x/s\right)^{a-1}\left(1-\left(x/s\right)\right)^{b-1}$$In our case $s=47$. ✍️ Plotting the Beta DistributionFill in the code below to define the function *calc_beta_pdf*, which calculates the Beta distribution's PDF given some $a$ and $b$.- Use the gamma(x) to calculate $\Gamma\left(x\right)$## import the gamma function from the SciPy package from scipy.special import gamma ## Define the function calculating the PDF of the Beta distribution def calc_beta_pdf(x, a, b, s): ## %%%%%%%%%%%%%%% Your code here - Begin %%%%%%%%%%%%%%% pdf_value = gamma(a + b) / .... ## %%%%%%%%%%%%%%% Your code here - End %%%%%%%%%%%%%%%%% return pdf_valueThe following code plots the PDF for the following values of values of $\left(a,b\right)$:$$\left(0.5,0.5\right),\left(5,1\right),\left(1,3\right),\left(2,2\right),\left(2,5\right),$$(You are expected get an "RuntimeWarning: divide by zero" error and it is OK.)## Define the scale parameter s s = 47 ## Define a grid to plot the distribution over. results_grid = np.arange(0, 47.1, 0.1) # A grid from 0 to 47 with steps of 0.1 ## Prepare the figure and axes fig, ax = plt.subplots() ## Calculate and plot the PDF for eac hset of parameters for a, b in [[0.5, 0.5], [5, 1], [1, 3], [2, 2], [2, 5]]: beta_pdf = calc_beta_pdf(results_grid, a, b, s) ax.plot(results_grid, beta_pdf, label='$a={},b={}$'.format(a, b)) ax.set_title('Beta Distributions') ax.set_ylabel('PDF') ax.set_xlabel('Result [hours]'); ax.set_ylim(0, 2.5 / s) ax.legend();✍️ Maximum Likelihood Estimation (MLE)We will Use the MLE approach to find the optimal parameters $a^*$ and $b^*$ for fitting the beta distribution to the results of group 1 and group2.To find the optimal parameters which minimizes the *Argmin* use a grid search, i.e., look for the optimal parameters over a selected grid by checking each and every point on the grid. Fill in the code below the define the functions performing the MLE.- Use np.log(x) to calculate the log of x- Use the calc_beta_pdf we have defined above to calculate the log-likelihood- Reminder: Use np.sum(x) to calculate the sum over the array x# Define the log-likelihood function def beta_log_likelyhood(a, b, s, x): ## %%%%%%%%%%%%%%% Your code here - Begin %%%%%%%%%%%%%%% ## Fill in this line to calculate the log-likelihood. x here is the array contating the data. log_likelyhood = ## %%%%%%%%%%%%%%% Your code here - End %%%%%%%%%%%%%%%%% return log_likelyhood # Define the function which searches for the optimal parameters a & b on a given grid def beta_mle(a_grid, b_grid, s, x): optimal_a = None optimal_b = None min_value_so_far = np.inf for a in a_grid: for b in b_grid: ## %%%%%%%%%%%%%%% Your code here - Begin %%%%%%%%%%%%%%% current_value = ... ## %%%%%%%%%%%%%%% Your code here - End %%%%%%%%%%%%%%%%% if current_value < min_value_so_far: optimal_a = a optimal_b = b min_value_so_far = current_value return optimal_a, optimal_bThe following code uses the above functions to find the optimal parameters and plot the estimated PDF over the histogram## Define the grid a_grid = np.arange(0.1, 10, 0.1) b_grid = np.arange(0.1, 10, 0.1) ## Group 1 ## ======= ## Find optimal MLE parameters optimal_a1, optimal_b1 = beta_mle(a_grid, b_grid, s, results1) ## Calcualte the PDF beta_pdf1 = calc_beta_pdf(results_grid, optimal_a1, optimal_b1, s) ## Plot the results fig, ax = plt.subplots() ax.hist(results1, bins=40 ,density=True, label='Histogram') ax.plot(results_grid, beta_pdf1, label='Beta') ax.set_title('Group 1 - MLE') ax.set_ylabel('PDF') ax.set_xlabel('Time [min]') ax.legend(); ## Group 2 ## ======= ## Find optimal MLE parameters optimal_a2, optimal_b2 = beta_mle(a_grid, b_grid, s, results2) ## Calcualte the PDF beta_pdf2 = calc_beta_pdf(results_grid, optimal_a2, optimal_b2, s) ## Plot the results fig, ax = plt.subplots() ax.hist(results2, bins=40 ,density=True, label='Histogram') ax.plot(results_grid, beta_pdf2, label='Beta') ax.set_title('Group 2 - MLE') ax.set_ylabel('PDF') ax.set_xlabel('Time [min]') ax.legend();The [scipy.stats.beta](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.beta.html) modelSciPy has a set of object for working with a large range of distributions, one of them is *scipy.stats.beta*.A distribution with a given set of parameter can by define by:``` pythondistrib = beta(a, b)```or with a specific scale and offset:``` pythondistrib = beta(a, b, offset, scale)```The *distrib* objects can now be used to calculate the PDF using *distrib.pdf*, generate random samples using *distrib.rsv* and more.The *scipy.stats.beta* can also be used to estimate the MLE parameters given a dataset by:``` pythona, b, offset, scale = beta.fit(data, floc=offset, fscale=scale)```The following code compares our grid search calculation of the MLE parameters to SciPy's calculation.## Import the beta distribution model from SciPy from scipy.stats import beta ## Calcualte the MLE parameters for group 1 using SciPy optimal_a1_scipy, optimal_b1_scipy, _, _ = beta.fit(results1, floc=0, fscale=s) ## The _ in the line above tells Python to ignore the 3rd and 4th outputs of beta.fit print('Results using our function: a={:.2f}, b={:.2f}'.format(optimal_a1, optimal_b1)) print('Results using SciPy: a={:.2f}, b={:.2f}'.format(optimal_a1_scipy, optimal_b1_scipy))The Probability of Group 2 Being Better Then Group 1We would now like to calculate the following probability:> The probability that the result of a random runner from group 2 will be better (lower) then the result of a random runner from group 1We would like to do so based on the two estimated probabilities we have found.Before you start, write this probability (for yourself, not for submission) as an integral over the PDF of the joint distribution of the two results. We will calculate this integral numerically. The 2D gridThe following code creates a 2D grid using np.meshgrid (which is similar to Matlab's meshgrid function)## Define the 2D grid of results using np.meshgrid (which is similar to Matlab's meshgrid function) results_grid_2d_1, results_grid_2d_2 = np.meshgrid(np.arange(0, 47.1, 0.1), np.arange(0, 47.1, 0.1)) print('results_grid_2d_1:') print(results_grid_2d_1[:5, :5]) print() print('results_grid_2d_2:') print(results_grid_2d_2[:5, :5])✍️ The joint probabilityCalculate the PDF of the joint probability of the results of the two runners over the 2D grid of results.## %%%%%%%%%%%%%%% Your code here - Begin %%%%%%%%%%%%%%% ## Calcualte the MLE parameters for group 2 using SciPy optimal_a2_scipy, optimal_b2_scipy, _, _ = ... ## Define two SciPy beta distribution objects based on the MLE parameters calculated earlier: beta_dist1 = beta(optimal_a1_scipy, optimal_b1_scipy, loc=0, scale=s) beta_dist2 = ... ## Calculate the marginal PDF of the two results over the grid marginal_pdf1 = beta_dist1.pdf(results_grid_2d_1) marginal_pdf2 = ... ## Calculate the joint PDF over the grid joint_pdf = ... ## %%%%%%%%%%%%%%% Your code here - End %%%%%%%%%%%%%%%%%Plotting the joint distribution(You are expected get an "RuntimeWarning: divide by zero" error and it is OK.)## Plot the joint PDF from matplotlib import ticker # ticker is used to select the levels of the conturs map according to a logaritmic scale fig, ax = plt.subplots() ax.contour(results_grid_2d_1, results_grid_2d_2, joint_pdf, locator=ticker.LogLocator(numticks=40), colors='black', linewidths=0.2) img_obj = ax.contourf(results_grid_2d_1, results_grid_2d_2, joint_pdf, locator=ticker.LogLocator(numticks=40), cmap='jet') plt.colorbar(img_obj); ax.set_title('The Joint PDF') ax.set_ylabel('Result of Runner 2') ax.set_xlabel('Result of Runner 1') ax.legend();Calculation the integralGiven a 2D grid $\left\{x_i\right\}$, $\left\{y_j\right\}$ with step sizes of $\Delta x$ and $\Delta y$ and area $S$, we can approximate the integral:$$\underset{S}{\int\int}f\left(x,y\right)dx_1dx_2$$As:$$\sum_{x_i,y_j\in S}f\left(x_i,y_j\right) \Delta x \Delta y$$✍️ Approximate the desired probability using a numerical approximation of the relevant integral over the joint probability:dx1 = 0.1 dx2 = 0.1 ## %%%%%%%%%%%%%%% Your code here - Begin %%%%%%%%%%%%%%% ## Fill in the following line to define the points on the grid which we would like to integrate over. ## Use results_grid_2d_1 & results_grid_2d_2 to define relevat_area relevat_area = ... ## %%%%%%%%%%%%%%% Your code here - End %%%%%%%%%%%%%%%%% prob = np.sum(joint_pdf * relevat_area) * dx1 * dx2 print('The probability that the result of a random runner from group 2 will be better (lower) then the result of a random runner from group 1 is: {:.2f}'.format(prob))______Copyright For more information, visit us at www.pieriandata.com DataFramesDataFrames are the workhorse of pandas and are directly inspired by the R programming language. We can think of a DataFrame as a bunch of Series objects put together to share the same index. Let's use pandas to explore this topic!import pandas as pd import numpy as np from numpy.random import randn np.random.seed(101) df = pd.DataFrame(randn(5,4),index='A B C D E'.split(),columns='W X Y Z'.split()) dfSelection and IndexingLet's learn the various methods to grab data from a DataFramedf['W'] # Pass a list of column names df[['W','Z']] # SQL Syntax (NOT RECOMMENDED!) df.WDataFrame Columns are just Seriestype(df['W'])Creating a new column:df['new'] = df['W'] + df['Y'] dfRemoving Columnsdf.drop('new',axis=1) # Not inplace unless specified! df df.drop('new',axis=1,inplace=True) dfCan also drop rows this way:df.drop('E',axis=0)Selecting Rowsdf.loc['A']Or select based off of position instead of labeldf.iloc[2]Selecting subset of rows and columnsdf.loc['B','Y'] df.loc[['A','B'],['W','Y']]Conditional SelectionAn important feature of pandas is conditional selection using bracket notation, very similar to numpy:df df>0 df[df>0] df[df['W']>0] df[df['W']>0]['Y'] df[df['W']>0][['Y','X']]For two conditions you can use | and & with parenthesis:df[(df['W']>0) & (df['Y'] > 1)]More Index DetailsLet's discuss some more features of indexing, including resetting the index or setting it something else. We'll also talk about index hierarchy!df # Reset to default 0,1...n index df.reset_index() newind = 'CA NY WY OR CO'.split() df['States'] = newind df df.set_index('States') df df.set_index('States',inplace=True) dfDataFrame SummariesThere are a couple of ways to obtain summary data on DataFrames.df.describe() provides summary statistics on all numerical columns.df.info and df.dtypes displays the data type of all columns.df.describe() df.dtypes df.info() Index: 5 entries, CA to CO Data columns (total 4 columns): W 5 non-null float64 X 5 non-null float64 Y 5 non-null float64 Z 5 non-null float64 dtypes: float64(4) memory usage: 200.0+ bytesBasic querydf = pd.read_sql(""" select e.nom_ent, m.nom_mun, m.cve_ent, m.cve_mun, d.year, d.fosas, d.cuerpos, d.cuerpos_identificados, d.restos, d.restos_identificados from mapasdata d join areas_geoestadisticas_municipales m on d.cve_ent = m.cve_ent and d.cve_mun = m.cve_mun join areas_geoestadisticas_estatales e on d.cve_ent = e.cve_ent """, con) df.head() grouped = df.groupby(['nom_ent', 'nom_mun', 'year']).sum() grouped.to_dict(orient='index') grouped.to_dict(orient="index") grouped.to_dict(orient="index")Tf-Flower - Image Classification ProblemClassifying different types of flowers using a ``CNN`` with ``tensorflow`` and ``keras`` Importsimport tensorflow_datasets as tfds import tensorflow as tf import numpy as np from matplotlib import pyplot as plt import math, cv2 tfds.list_builders()[:]Data Preperationds = tfds.load(name='tf_flowers', split="train")Downloading and preparing dataset tf_flowers/3.0.1 (download: 218.21 MiB, generated: 221.83 MiB, total: 440.05 MiB) to /root/tensorflow_datasets/tf_flowers/3.0.1...Class Namesclass_names =np.array(['dandelion', 'daisy', 'tulips', 'sunflowers', 'roses'])Iterating over the ``dataset``flowers =[data for data in ds] # for data in ds: # print(data['image'], data['label']) # breakSeparating `images` and `labels`y =tf.constant([data['label'].numpy() for data in ds]) X_images =[] for data in ds: image = cv2.resize(data['image'].numpy()/255, (200, 200)) X_images.append(image) X = tf.constant(X_images) X.shape X.shape, X[0]Showing some sample Imagesdef display_images(images_and_classes, labels, cols=5): rows = 3 fig = plt.figure() fig.set_size_inches(cols * 2, rows * 2) for i, (image, label) in enumerate(zip(images_and_classes, labels)): plt.subplot(rows, cols, i + 1) plt.axis('off') plt.imshow(image) plt.title(class_names[label]) display_images(X[:15], y)Model Creationinput_shape = X[0].shape input_shape model = tf.keras.Sequential([ tf.keras.layers.Conv2D(128, (3, 3), strides=1, input_shape=input_shape, activation='relu') , tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(64, (2, 2), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(64, (2, 2), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(32, (2, 2), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(32, activation='relu'), tf.keras.layers.Dense(16, activation='relu'), tf.keras.layers.Dense(5, activation='softmax') ], name="flowers_classifier") model.compile( optimizer = tf.keras.optimizers.Adam(lr=0.001), metrics=['accuracy'], loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) ) VAL_SPLIT = .2 BATCH_SIZE = 32 VERBOSE = 1 EPOCHS = 2 history = model.fit(X, y, batch_size=BATCH_SIZE, epochs=EPOCHS, verbose=VERBOSE, validation_split=VAL_SPLIT)Epoch 1/2 92/92 [==============================] - 353s 4s/step - loss: 1.4041 - accuracy: 0.3614 - val_loss: 1.3379 - val_accuracy: 0.3787 Epoch 2/2 92/92 [==============================] - 342s 4s/step - loss: 1.2494 - accuracy: 0.4724 - val_loss: 1.1918 - val_accuracy: 0.5150The model is undetrained, if we increase number of epochs this can result in an increase in the model accuracy as weel. Making predictionsX[0].numpy() class_names[model.predict(X[:1]).argmax()] history.historyload model & get layer (multi GPU ver.)model = load_model('/home/seanyu/project/CCP/model/model_hard_negative_thresh10_gen2_k3.h5') relu_out = K.function([model.get_layer('input_1').input],[model.get_layer('output').output, model.get_layer('activation_40').output]) test = pd.read_csv("/home/seanyu/project/CCP/res_csv/testing_hard_negative_thresh10_gen2_k3.csv") posi_thres = 0.95 nega_thres = 0.5 fn = list(test[(test["y_true"] == 1) & (test["y_pred"] <= nega_thres)].sort_values("y_pred")["png_name"]) tn = list(test[(test["y_true"] == 0) & (test["y_pred"] <= nega_thres)].sort_values("y_pred")["png_name"]) fp = list(test[(test["y_true"] == 0) & (test["y_pred"] > posi_thres)].sort_values("y_pred", ascending = False)["png_name"]) tp = list(test[(test["y_true"] == 1) & (test["y_pred"] > posi_thres)].sort_values("y_pred", ascending = False)["png_name"]) def img_combine(img,ncols=5,size=1,path=False): nimg=len(img) nrows=int(ceil(nimg/ncols)) fig, axes = plt.subplots(nrows=nrows, ncols=ncols, sharex=True, sharey=True, figsize=(ncols*size,nrows*size)) if nrows==0: return elif ncols == 1: for r, ax in zip(np.arange(nrows), axes): nth=r if nth < nimg: ax.imshow(img[nth],cmap='rainbow') ax.set_axis_off() elif nrows==1: for c, ax in zip(np.arange(ncols), axes): nth=c if nth < nimg: ax.imshow(img[nth],cmap='rainbow' ) ax.set_axis_off() else: for r, row in zip(np.arange(nrows), axes): for c, ax in zip(np.arange(ncols), row): nth=r*ncols+c if nth < nimg: ax.imshow(img[nth],cmap='rainbow') ax.set_axis_off() if path: plt.savefig(path, dpi = 300) plt.show() def op_img(x, crop_size, output_size): # crop size: w, h # output size, w, h, c crop_w, crop_h = crop_size out_w, out_h, out_c = output_size X_ = np.array([ np.array(img_center_crop(Image.open(i), target_size=(crop_w, crop_h)), dtype='float32') for i in x] ) X_ = np.array([scipy.misc.imresize(i, size=(out_w, out_h)) for i in X_]) # add this line to resize images X_ = X_.reshape((len(X_), out_w, out_h, out_c)) return X_ def img_center_crop(img, target_size): # return center cropprd image (not resizing) # img should be a PIL image object # target size should be a tuple, eg (224, 224) width, height = img.size if width <= target_size[0] and height <= target_size[1]: return img left = (width - target_size[0])/2 right = (width + target_size[0])/2 top = (height - target_size[1])/2 bottom = (height + target_size[1])/2 new_img = img.crop((left, top, right, bottom)) return new_img arr = [] suc = 0 force_binary = [1, 0] import cv2 import re for file in fn[:20]: o_img = cv2.imread(file).astype('float32') o_img = cv2.resize(o_img,(200,200)) o_img = np.array(o_img).astype('uint8') in_img = op_img([file], (100,100), (200, 200, 3)) in_img = in_img.astype('float32') in_img = preprocess_input(in_img) result = relu_out([in_img]) #output = result[0] #output = (result[0] > 0.5).astype(int) if force_binary: output = np.array([force_binary]) else: output = result[0] #weight * filters filters = result[1] gap_w = np.dot(model.get_layer('output').get_weights()[0], np.swapaxes(output,0,1)) gap_w = gap_w.reshape(1,1,1,1024) img = gap_w * filters img = img.sum(axis=3).reshape(filters.shape[1],filters.shape[2]) img = cv2.resize(img,(200,200),interpolation=cv2.INTER_CUBIC) arr.append(o_img[:,:,::-1]) arr.append(img) # show img print(file) print("score: "+str(result[0][0])) print("img") plt.figure() plt.imshow(o_img[:,:,::-1]) plt.show() print("hotmap") plt.figure() plt.imshow(img,cmap='rainbow') plt.show() img_combine(arr, ncols=4, size=4, path="/home/seanyu/project/CCP/plots/heatmap_fn")Using MsSQLSparkConnector with Integrated AD Auth This sample shows how to use the MsSQLSparkConnector with integrated AD Auth when using use principal and keytab instead of username/password. PreReq ------- - SQL Server 2019 big data cluster is deployed with AD - Have access to AD controller to create keytabs that we will use in this sample. - Download [AdultCensusIncome.csv]( https://amldockerdatasets.azureedge.net/AdultCensusIncome.csv ) to your local machine. Upload this file to hdfs folder named *spark_data*. - The sample uses a SQL database *spark_mssql_db* to create/update tables. Refer **data-virtualization/mssql_spark_connector_user_creation.ipynb** on steps to create this database anduser. Creating KeyTab file The following section shows how to generate principal and keytab. This assumes you have a SS19 Big Data Cluster installed with Windows AD contoller for domain AZDATA.LOCAL. One of the users is and the user is part of Domain Admin group. Create KeyTab file using ktpass 1. Login to the Windows AD controller with testusera1 credentials. 2. Open command prompt in Administrator mode. 3. Use ktpass to create a key tab. Refer [here](https://docs.microsoft.com/en-us/windows-server/administration/windows-commands/ktpass) for documentation on using ktpass. ```sh ktpass -out testusera1.keytab -mapUser -pass -mapOp set +DumpSalt -crypto AES256-SHA1 -ptype KRB5_NT_PRINCIPAL -princ ``` Note that principal name in ktpass is case sensitive. The command above generates a keytab file named testusera1.keytab. Transfer this file to hdfs folder in Big Data Cluster. In this sample we transfer the file to /user/testusera1/testusera1.keytab Create KeyTab file using kinit If you are on a linux machine kinit can be used as follows to create keytab. Note that you linux machine shoud be connected to the domain controler. ``` sh ktutil ktutil : add_entry -password -p -k 1 -e arcfour-hmac-md5 Password for usera1@myDomain: ktutil : add_entry -password -p -k 1 -e des-cbc-md4 ktutil : wkt testusera1.keytab ``` ``` sh Check if keytab generated properly. Any error implies that keytab is not generated right. kinit -kt testusera1.keytab ``` Load Keytab to HDFS for use, you can do it from ADS, or use hadoop commandline inside a BDC container: ```sh hadoop fs -mkdir -p /user/testusera1/ hadoop fs -copyFromLocal -f testusera1.keytab /user/testusera1/testusera1.keytab ``` Configure Spark applicaion to point to the key tab file Here we configure spark to use the keytab file once the keytab is created and uploaded to HDFS (/user/testusera1/testusera1.keytab). Note the usage of "spark.files" : "/user/testusera1/testusera1.keytab". As a result of this configuration Spark driver distributes the file to all executors. Run the cell below to start the spark application.%%configure -f {"conf": { "spark.files" : "/user/testusera1/testusera1.keytab", "spark.executor.memory": "4g", "spark.driver.memory": "4g", "spark.executor.cores": 2, "spark.driver.cores": 1, "spark.executor.instances": 4 } }Read CSV into a data frame In this step we read the CSV into a data frame. This dataframe would then be written to SQL table using MSSQL Spark Connector#spark = SparkSession.builder.getOrCreate() sc.setLogLevel("INFO") #Read a file and then write it to the SQL table datafile = "/spark_data/AdultCensusIncome.csv" df = spark.read.format('csv').options(header='true', inferSchema='true', ignoreLeadingWhiteSpace='true', ignoreTrailingWhiteSpace='true').load(datafile) df.show(5) #Process this data. Very simple data cleanup steps. Replacing "-" with "_" in column names columns_new = [col.replace("-", "_") for col in df.columns] df = df.toDF(*columns_new) df.show(5)Starting Spark application(Part 1) Write and READ to/from SQL Table ( using Integrated Auth) - Write dataframe to SQL table to Master instance - Read SQL Table to Spark dataframe In both scenarions here we use integrated auth with principal\keytab file rather than username\password of the user.#Write from Spark to SQL table using MSSQL Spark Connector print("MSSQL Spark Connector write(overwrite) start ") servername = "jdbc:sqlserver://mssql.azdata.local:31433" dbname = "spark_mssql_db" security_spec = ";integratedSecurity=true;authenticationScheme=JavaKerberos;" url = servername + ";" + "databaseName=" + dbname + security_spec dbtable = "AdultCensus_test" principal = "" keytab = "/user/testusera1/testusera1.keytab" try: df.write \ .format("com.microsoft.sqlserver.jdbc.spark") \ .mode("overwrite") \ .option("url", url) \ .option("dbtable", dbtable) \ .option("principal", principal) \ .option("keytab", keytab) \ .save() except ValueError as error : print("MSSQL Spark Connector write(overwrite) failed", error) print("MSSQL Connector write(overwrite) done ") #Read from SQL table using MSSQ Connector print("MSSQL Spark Connector read start ") jdbcDF = spark.read \ .format("com.microsoft.sqlserver.jdbc.spark") \ .option("url", url) \ .option("dbtable", dbtable) \ .option("url", url) \ .option("dbtable", dbtable) \ .option("principal", principal) \ .option("keytab", keytab).load() jdbcDF.show(5) print("MSSQL Spark Connector read done")(PART 2) Write and READ to/from Data Pools ( using Integrated Auth) - Write dataframe to SQL external table in Data Pools in Big Data Cluste - Read SQL external Table to Spark dataframe User creation as follows ``` ```#Write from Spark to datapools using Spark Connector print("MSSQL Spark Connector write(overwrite) start ") servername = "jdbc:sqlserver://mssql.azdata.local:31433" dbname = "spark_mssql_db" security_spec = ";integratedSecurity=true;authenticationScheme=JavaKerberos;" url = servername + ";" + "databaseName=" + dbname + security_spec datapool_table = "AdultCensus_DataPoolTable" principal = "" keytab = "/user/testuser/testusera1.keytab" datasource_name = "connector_ds" try: df.write \ .format("com.microsoft.sqlserver.jdbc.spark") \ .mode("overwrite") \ .option("url", url) \ .option("dbtable", datapool_table) \ .option("principal", principal) \ .option("keytab", keytab) \ .option("dataPoolDataSource",datasource_name) \ .save() except ValueError as error : print("MSSQL Spark Connector write(overwrite) failed", error) print("MSSQL Connector write(overwrite) done ") #Read from SQL table using MSSQ Connector print("MSSQL Spark Connector read data pool external table start ") jdbcDF = spark.read \ .format("com.microsoft.sqlserver.jdbc.spark") \ .option("url", url) \ .option("dbtable", datapool_table) \ .option("url", url) \ .option("dbtable", dbtable) \ .option("principal", principal) \ .option("keytab", keytab).load() jdbcDF.show(5) print("MSSQL Connector read from data pool external table succeeded")Example usage of the Yin-Yang datasetimport torch import numpy as np import matplotlib.pyplot as plt from dataset import ClassificationTask from torch.utils.data import DataLoader %matplotlib inlineSetup datasets (training, validation and test set)dataset_train = ClassificationTask(size=5000, seed=42) dataset_validation = ClassificationTask(size=1000, seed=41) dataset_test = ClassificationTask(size=1000, seed=40)Setup PyTorch dataloadersbatchsize_train = 20 batchsize_eval = len(dataset_test) train_loader = DataLoader(dataset_train, batch_size=batchsize_train, shuffle=True) val_loader = DataLoader(dataset_validation, batch_size=batchsize_eval, shuffle=True) test_loader = DataLoader(dataset_test, batch_size=batchsize_eval, shuffle=False)Plot datafig, axes = plt.subplots(ncols=3, sharey=True, figsize=(15, 8)) titles = ['Training set', 'Validation set', 'Test set'] for i, loader in enumerate([train_loader, val_loader, test_loader]): axes[i].set_title(titles[i]) axes[i].set_aspect('equal', adjustable='box') xs = [] ys = [] cs = [] for batch, batch_labels in loader: for j, item in enumerate(batch): x1, y1, x2, y2 = item c = int(np.where(batch_labels[j] == 1)[0]) xs.append(x1) ys.append(y1) cs.append(c) xs = np.array(xs) ys = np.array(ys) cs = np.array(cs) axes[i].scatter(xs[cs == 0], ys[cs == 0], color='C0', edgecolor='k', alpha=0.7) axes[i].scatter(xs[cs == 1], ys[cs == 1], color='C1', edgecolor='k', alpha=0.7) axes[i].scatter(xs[cs == 2], ys[cs == 2], color='C2', edgecolor='k', alpha=0.7) axes[i].set_xlabel('x1') if i == 0: axes[i].set_ylabel('y1')Setup ANNclass Net(torch.nn.Module): def __init__(self, network_layout): super(Net, self).__init__() self.n_inputs = network_layout['n_inputs'] self.n_layers = network_layout['n_layers'] self.layer_sizes = network_layout['layer_sizes'] self.layers = torch.nn.ModuleList() layer = torch.nn.Linear(self.n_inputs, self.layer_sizes[0], bias=True) self.layers.append(layer) for i in range(self.n_layers-1): layer = torch.nn.Linear(self.layer_sizes[i], self.layer_sizes[i+1], bias=True) self.layers.append(layer) return def forward(self, x): x_hidden = [] for i in range(self.n_layers): x = self.layers[i](x) if not i == (self.n_layers-1): relu = torch.nn.ReLU() x = relu(x) x_hidden.append(x) return x torch.manual_seed(12345) # ANN with one hidden layer (with 120 neurons) network_layout = { 'n_inputs': 4, 'n_layers': 2, 'layer_sizes': [120, 3], } net = Net(network_layout) # Linear classifier for reference shallow_network_layout = { 'n_inputs': 4, 'n_layers': 1, 'layer_sizes': [3], } linear_classifier = Net(shallow_network_layout)Train ANN# used to determine validation accuracy after each epoch in training def validation_step(net, criterion, loader): with torch.no_grad(): num_correct = 0 num_shown = 0 for j, data in enumerate(loader): inputs, labels = data # need to convert to float32 because data is in float64 inputs = inputs.float() outputs = net(inputs) winner = outputs.argmax(1) num_correct += len(outputs[winner == labels.argmax(1)]) num_shown += len(labels) accuracy = float(num_correct) / num_shown return accuracy # set training parameters n_epochs = 500 learning_rate = 0.1 val_accuracies = [] train_accuracies = [] # setup loss and optimizer criterion = torch.nn.MSELoss() optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate) # train for n_epochs for epoch in range(n_epochs): val_acc = validation_step(net, criterion, val_loader) if epoch % 25 == 0: print('Validation accuracy after {0} epochs: {1}'.format(epoch, val_acc)) val_accuracies.append(val_acc) num_correct = 0 num_shown = 0 for j, data in enumerate(train_loader): inputs, labels = data # need to convert to float32 because data is in float64 inputs = inputs.float() labels = labels.float() # zero the parameter gradients optimizer.zero_grad() # forward pass outputs = net(inputs) winner = outputs.argmax(1) num_correct += len(outputs[outputs.argmax(1) == labels.argmax(1)]) num_shown += len(labels) loss = criterion(outputs, labels) loss.backward() optimizer.step() accuracy = float(num_correct) / num_shown train_accuracies.append(accuracy) # after training evaluate on test set test_acc = validation_step(net, criterion, test_loader) print('#############################') print('Final test accuracy:', test_acc) print('#############################')Validation accuracy after 0 epochs: 0.316 Validation accuracy after 25 epochs: 0.834 Validation accuracy after 50 epochs: 0.883 Validation accuracy after 75 epochs: 0.946 Validation accuracy after 100 epochs: 0.952 Validation accuracy after 125 epochs: 0.942 Validation accuracy after 150 epochs: 0.958 Validation accuracy after 175 epochs: 0.927 Validation accuracy after 200 epochs: 0.959 Validation accuracy after 225 epochs: 0.951 Validation accuracy after 250 epochs: 0.948 Validation accuracy after 275 epochs: 0.952 Validation accuracy after 300 epochs: 0.963 Validation accuracy after 325 epochs: 0.978 Validation accuracy after 350 epochs: 0.967 Validation accuracy after 375 epochs: 0.948 Validation accuracy after 400 epochs: 0.96 Validation accuracy after 425 epochs: 0.952 Validation accuracy after 450 epochs: 0.963 Validation accuracy after 475 epochs: 0.953 ############################# Final test accuracy: 0.978 #############################Plot training resultsplt.figure(figsize=(10,8)) plt.plot(train_accuracies, label='train acc') plt.plot(val_accuracies, label='val acc') plt.axhline(test_acc, ls='--', color='grey', label='test acc') plt.xlabel('epochs') plt.ylabel('accuracy') plt.ylim(0.3, 1.05) plt.legend()Train Linear classifier as referenceval_accuracies = [] train_accuracies = [] # setup loss and optimizer criterion = torch.nn.MSELoss() optimizer = torch.optim.SGD(linear_classifier.parameters(), lr=learning_rate) # train for n_epochs for epoch in range(n_epochs): val_acc = validation_step(linear_classifier, criterion, val_loader) if epoch % 25 == 0: print('Validation accuracy of linear classifier after {0} epochs: {1}'.format(epoch, val_acc)) val_accuracies.append(val_acc) num_correct = 0 num_shown = 0 for j, data in enumerate(train_loader): inputs, labels = data # need to convert to float32 because data is in float64 inputs = inputs.float() labels = labels.float() # zero the parameter gradients optimizer.zero_grad() # forward pass outputs = linear_classifier(inputs) num_correct += len(outputs[outputs.argmax(1) == labels.argmax(1)]) num_shown += len(labels) loss = criterion(outputs, labels) loss.backward() optimizer.step() accuracy = float(num_correct) / num_shown train_accuracies.append(accuracy) # after training evaluate on test set test_acc = validation_step(linear_classifier, criterion, test_loader) print('#############################') print('Final test accuracy linear classifier:', test_acc) print('#############################') plt.figure(figsize=(10,8)) plt.plot(train_accuracies, label='train acc (lin classifier)') plt.plot(val_accuracies, label='val acc (lin classifier)') plt.axhline(test_acc, ls='--', color='grey', label='test acc (lin classifier)') plt.xlabel('epochs') plt.ylabel('accuracy') plt.ylim(0.3, 1.05) plt.legend()EDA Importing Librariesimport pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') %matplotlib inlineImporting Datasetdataset = pd.read_csv('Cleaned_Data.csv') dataset.head().TCrash Year Distributionsns.countplot(x='Year', palette="pastel", data=dataset) plt.gcf().set_size_inches(20,10) plt.title('Year Distribution') plt.xlabel('Year') plt.xticks(rotation='vertical') plt.ylabel('Count') plt.show()Observations- We observe that during the years 2017, 2018 we have an average of 175000 accident.- There is a decrease in the number of accidents in 2020 and 2021 due to the pandemic. Monthly Distribution of Accidentssns.catplot(y="Year", hue="Month", kind="count", palette="pastel", data=dataset) plt.title('Monthly Distribution of Accidents') plt.xlabel('Count') plt.ylabel('Monthly') plt.gcf().set_size_inches(20,20) plt.show()Observation- From the above graph we can observe that their is a sudden spike in the number of accidents in the months of May and June. Time of Accidents Distributionsns.countplot(x='Hour', palette="pastel", data=dataset) plt.gcf().set_size_inches(20,10) plt.title('Time Distribution') plt.xlabel('Time') plt.xticks(rotation='vertical') plt.ylabel('Count') plt.show()Observations- Contrast to common conception most of the accidents took place in the afternoon. Contributing factor Distributionsns.countplot(x='CONTRIBUTING_FACTOR_1', palette="pastel", data=dataset, order=dataset.CONTRIBUTING_FACTOR_1.value_counts().index) plt.gcf().set_size_inches(20,10) plt.title('Contributing factor Distribution') plt.xlabel('Contributing factors') plt.xticks(rotation='vertical') plt.ylabel('Count') plt.show()Observations- Lack of attention was the major contributor for accidents followed by following too closely, not DUI or DWI. Point of Impact and Pre Crash action correlation.sns.catplot(y="POINT_OF_IMPACT", hue="PRE_CRASH", kind="count", data=dataset) plt.title('Point of Impact and Pre Crash action correlation') plt.xlabel('Count') plt.ylabel('Point of Impact') plt.gcf().set_size_inches(30,15) plt.show()Observations- For the accidents having damages on the front end the driver was going straight.- For the accidents having damages on the back end the driver was mostly backing the car. Point of Impact and Contributing Factor correlationsns.catplot(y="POINT_OF_IMPACT", hue="CONTRIBUTING_FACTOR_1", kind="count", data=dataset) plt.title('Point of Impact and Contributing Factor correlation') plt.xlabel('Count') plt.ylabel('Point of Impact') plt.gcf().set_size_inches(30,15) plt.show()Observations- For the accidents having damages on the front end the Contributing Factor was drivers inattention.- For the accidents having damages on the back end the Contributing Factor was the driver was backing the car unsafely.- For the accidents having damages on the sides end the Contributing Factor was unsafe lane changing. Pre Crash and Contributing Factor correlationsns.catplot(y="PRE_CRASH", hue="CONTRIBUTING_FACTOR_1", kind="count", data=dataset) plt.title('Pre Crash and Contributing Factor correlation') plt.xlabel('Count') plt.ylabel('Pre Crash Action') plt.gcf().set_size_inches(30,15) plt.show()Vehicle Make Distributionsns.countplot(x='MAKE', palette="pastel", data=dataset, order=dataset.MAKE.value_counts().index) plt.gcf().set_size_inches(20,10) plt.title('Vehicle Make Distribution') plt.xlabel('Vehicle Make') plt.xticks(rotation='vertical') plt.ylabel('Count') plt.show()How old the car is Distributionsns.countplot(x='how_old', palette="pastel", data=dataset) plt.gcf().set_size_inches(20,10) plt.title('how old the car is Distribution') plt.xlabel('years') plt.xticks(rotation='vertical') plt.ylabel('Count') plt.show()Vehicle Occupants and Contributing Factor Distribution.sns.catplot(y="VEHICLE_OCCUPANTS", hue="CONTRIBUTING_FACTOR_1", kind="count", palette="pastel", data=dataset) plt.title('Monthly Distribution') plt.xlabel('Count') plt.ylabel('Monthly') plt.gcf().set_size_inches(30,20) plt.show()Observations- When there is only one person in the vehicle lack of attention and over speeding were the causes for accidentssns.catplot(x="DRIVER_SEX", hue="CONTRIBUTING_FACTOR_1", kind="count", palette="pastel", data=dataset) plt.title('Sex and Contributing Factors') plt.xlabel('Drivers sex') plt.ylabel('Count') plt.gcf().set_size_inches(20,15) plt.show()Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved MIT license. (c) based on Jupyter notebooks by [Dynamical Systems APMA 4101](https://github.com/mspieg/dynamical-systems) and from his course [Introduction to numerical methods](https://github.com/mandli/intro-numerical-methods), notebook style sheet by , [Engineering Computations](https://github.com/engineersCode)# Execute this cell to load the notebook's style sheet, then ignore it from IPython.core.display import HTML css_file = '../style/custom.css' HTML(open(css_file, "r").read())Simplified Convection Problem: The Lorenz EquationsThe Lorenz Equations are a 3-D dynamical system that is a simplified model of Rayleigh-Benard thermal convection. They are derived and described in detail in Edward Lorenz' 1963 paper [Deterministic Nonperiodic Flow](http://journals.ametsoc.org/doi/pdf/10.1175/1520-0469%281963%29020%3C0130%3ADNF%3E2.0.CO%3B2) in the Journal of Atmospheric Science. Here we will just sketch out the key points of the derivation. A more complete derivation can be found [here](https://www.math.uni-hamburg.de/home/lauterbach/scripts/seminar03/prill.pdf)The key idea is that the Lorenz Equations result from a severely truncated spectral approximation to the 2-D equations for incompressible thermal convection in stream-function/vorticity form. These equations govern the flow of a buouyant incompressible fluid with a temperature dependent density in a layer of depth $h$, that is heated from below and cooled from the top. Governing EquationsThe full coupled set of scaled PDE's describe the coupling of incompressible 2D Navier Stokes flow with an advection-diffusion equation for temperature, and can be written in dimensionless form as,$$ \frac{1}{\mathrm{Pr}}\left[ \frac{\partial \omega}{\partial t} + \vec{v}\cdot\nabla\omega\right] = \nabla^2\omega + \mathrm{Ra}\frac{\partial T}{\partial x}$$$$ \nabla^2 \psi = -\omega$$$$ \frac{\partial T}{\partial t} + \vec{v}\cdot\nabla T = \nabla^2 T$$where$$ \vec{v}=(u,0,w) = \nabla\times\psi\hat{j}=(-\frac{\partial\psi}{\partial z}, 0, \frac{\partial\psi}{\partial x})$$ is the fluid velocity field (which in this form is exactly incompressible with $\nabla\cdot\vec{v}=0$). $\psi$ is the "Streamfunction" whose contours are tangent to the fluid trajectories at all times. The vorticity,$$ \omega = \bf{\vec{\omega} \cdot \hat{j}} = (\nabla\times\vec{v}) \cdot \hat{j}$$ measures the local rate of rotation of the fluid, and is driven by horizontal variations in temperature (actually density). $\hat{j}=(0,1,0)^T$ denotes the unit vector in y-direction. Boundary conditions for temperature are $T=1$ on the bottom of the layer and $T=0$ on the top. In the absence of any fluid motion ($\omega=\vec{v}=0$), the temperature field is just a steady conductive ramp with $$ T = 1 - z$$Thus we can also solve for the perturbation $\theta(x,z,t)$ away from this steady state by substituting$$ T = 1 - z + \theta(x,z,t)$$into the energy equation to solve for the perturbed temperature using$$ \frac{\partial \theta}{\partial t} + \vec{v}\cdot\nabla \theta = \nabla^2\theta + w$$ ParametersIn dimensionless form, these equations have two important dimensionless numbers that control the structure and behavior of the convection. **The Prandtl Number**The first is the "Prandtl Number", $\mathrm{Pr} = \frac{\nu}{\kappa}$ which is the ratio of the fluid viscosity $\nu$ to the thermal diffusivitiy $\kappa$. Since both vorticity and temperature both obey advection diffusion equations (and viscosity acts to diffuse momentum/vorticity), the Prandtl number is a measure of whether momemtum or energy is more dissipative. **The Rayleigh Number**The second key parameter is the Rayleigh number $$ \mathrm{Ra} = \frac{g\alpha(T_1 - T_0)h^3}{\nu\kappa}$$which measures the balance of forces that drive convection (i.e. gravity, or temperature differences), to those that damp convection such as viscosity and thermal diffusivity. Systems with large Rayleigh numbers are prone to vigorous convection. However, it was shown by Rayleigh, that there is a critical value of the Rayleigh Number $\mathrm{Ra}_c$ below which there is no convection. This value depends on the size of the convection cell and boundary conditions for stress on the fluid, however, for the simplest case of a layer with no-slip top and bottom boundary conditions and cell with aspect ratio $a=h/L$ (with $h$ the layer depth and $L$ the width of the convection cell), then the critical Ra number is$$\mathrm{Ra}_c = \pi^4 (1 + a^2)^3/a^2$$which has a minimum value for $a_{min}^2=1/2$.# load libraries import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt from pylab import rcParams from pylab import rcParams from matplotlib import rc # define font size FSize = 18 font = {'color': 'black', 'weight': 'normal', 'size': FSize} mpl.rc('xtick', labelsize=FSize) mpl.rc('ytick', labelsize=FSize) # Show Ra vs a a = np.linspace(0.01,2.) Rc = np.pi**4*(1. + a**2)**3/a**2 plt.figure() plt.semilogy(a,Rc) amin = np.sqrt(1./2.) Rcmin = np.pi**4*(1. + amin**2)**3/amin**2 plt.semilogy(amin,Rcmin,'ro') plt.xlabel('a', fontdict=font) plt.ylabel('Ra$_c$', fontdict=font) plt.title('Critical Rayleigh Number', fontdict=font) plt.show()Spectral decompositionNext, we expand the streamfunction and temperature fields in terms of a highly truncated Fourier Series where the streamfunction contains one cellular mode$$ \psi(x,z,t) = X(t)\sin(a\pi x)\sin(\pi z)$$and temperature has two modes$$ \theta(x,z,t) = Y(t)\cos(a\pi x)\sin(\pi z) - Z(t)\sin(2\pi z)$$Here, $X(t)$, $Y(t)$ and $Z(t)$ are the time dependent amplitudes of each mode. Defining\begin{equation}\begin{split}\phi(x,z) &= \sin(a\pi x)\sin(\pi z)\\\theta_0(x,z) &= \cos(a\pi x)\sin(\pi z)\\\theta_1(z) &= -\sin(2\pi z)\end{split}\notag\end{equation}streamfunction and temperature fields simplify to:\begin{equation}\begin{split}\psi(x,z,t) &= X(t) \phi(x,z)\\\theta(x,z,t) &= Y(t) \theta_0(x,z) + Z(t) \theta_1(z)\\\end{split}\notag\end{equation}Using $a = a_{min} = \sqrt{0.5}$ for the minimum critical Rayleigh number $Ra_c^{min}$, the spatial components of each mode looks likea = np.sqrt(0.5) x0 = np.linspace(0,1./a) z0 = np.linspace(0.,1.) x,z = np.meshgrid(x0,z0) psi = np.sin(a*np.pi*x)*np.sin(np.pi*z) theta0 = np.cos(a*np.pi*x)*np.sin(np.pi*z) theta1 = -np.sin(2.*np.pi*z) plt.figure() plt.figure(figsize=(10,8)) plt.subplot(2,2,1) plt.contourf(x,z,psi) plt.title(r'$\Phi = sin(a \pi x)sin(\pi z)$', fontdict=font) plt.gca().set_aspect('equal') plt.subplot(2,2,3) plt.contourf(x,z,theta0) plt.title(r'$\theta_0 = cos(a \pi x)sin(\pi z)$', fontdict=font) plt.gca().set_aspect('equal') plt.subplot(2,2,4) plt.contourf(x,z,theta1) plt.title(r'$\theta_1 = -sin(2 \pi z)$', fontdict=font) plt.gca().set_aspect('equal') plt.tight_layout() plt.show()For our initial conditions $X(0) = 2$, $Y(0) = 3$, $Z(0) = 4$, the streamfunction $\psi(x,z,t)$ and temperature field $\theta(x,z,t)$\begin{equation}\begin{split}\psi(x,z,t) &= X(t)\sin(a\pi x)\sin(\pi z)\\\theta(x,z,t) & = Y(t)\cos(a\pi x)\sin(\pi z) - Z(t)\sin(2\pi z)\\\end{split}\notag\end{equation}become \begin{equation}\begin{split}\psi(x,z,0) &= 2\sin(a\pi x)\sin(\pi z)\\\theta(x,z,0) &= 3\cos(a\pi x)\sin(\pi z) - 4\sin(2\pi z)\\\end{split}\notag\end{equation}# Define figure size plt.figure() plt.figure(figsize=(20,20)) # Initial Streamfunction psi and velocity field plt.subplot(1,2,1) plt.contourf(x,z,2.*psi,cmap='viridis_r') # Velocity field U = - 2. * np.pi * np.sin(a*np.pi*x) * np.cos(np.pi*z) V = 2. * a * np.pi * np.cos(a*np.pi*x) * np.sin(np.pi*z) plt.quiver(x,z,U,V) plt.gca().set_aspect('equal') plt.title('Streamfunction $\psi$', fontdict=font) # Initial temperature field plt.subplot(1,2,2) plt.contourf(x,z,3*theta0 + 4*theta1,cmap='magma') plt.gca().set_aspect('equal') plt.title(r'Temperature $\theta$', fontdict=font) plt.tight_layout() plt.show()Create Model Input DataThis file queries the tokenized trajectory tables (all_tokens, vs_tokens), endpoint table (pred_endpt) and ICU-specific input data table (icu_tokens) to create tensorflow record files that can be used to generate embeddings and final models.import datetime, os, boto3, pickle, h5py, sys import numpy as np import pandas as pd import tensorflow as tf import tensorflow.keras as tfk import matplotlib.pyplot as plt from contextlib import ExitStack from itertools import product, repeat from sklearn.model_selection import train_test_split, KFold from IPython.display import display, HTML from collections import Counter, defaultdict from utils.connections import connection, cursor, gluedatabase, processed_db, upload_file, processed_data_bucket, download_file #from utils.datagen import PROCESSED_DATAPATH, MODEL_INPUT_DATAPATH, RESULTFILE_DATAPATH, strategies, targets, get_datafile from utils.datagen import PROCESSED_DATAPATH, MODEL_INPUT_DATAPATH from utils.utils import read_data, dump_data def get_vocabulary(table): query = f'select label, count(*) from {processed_db}.{table} group by label' vocab = pd.read_sql(query, connection) vocab_to_int = {v:i for i, v in enumerate(vocab[vocab._col1>=20].sort_values('_col1').label)} int_to_vocab = {i:v for i, v in enumerate(vocab[vocab._col1>=20].sort_values('_col1').label)} rare = max(vocab_to_int.values()) + 1 def rare_str(): return 'rare' def rare_int(): return rare i2v = defaultdict(rare_str, int_to_vocab) v2i = defaultdict(rare_int, vocab_to_int) return v2i, i2v def get_full_vocab(): vocabs = [] for table in ['all_tokens', 'vs_tokens']: query = f'select label, count(*) from {processed_db}.{table} group by label' vocabs.append(pd.read_sql(query, connection)) vocab = pd.concat(vocabs) vocab_to_int = {v:i for i, v in enumerate(vocab[vocab._col1>=20].sort_values('_col1').label)} int_to_vocab = {i:v for i, v in enumerate(vocab[vocab._col1>=20].sort_values('_col1').label)} rare = max(vocab_to_int.values()) + 1 def rare_str(): return 'rare' def rare_int(): return rare i2v = defaultdict(rare_str, int_to_vocab) v2i = defaultdict(rare_int, vocab_to_int) return v2i, i2v def _bytes_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def _int_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def _int_list_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=value)) def _float_feature(value): return tf.train.Feature(float_list=tf.train.FloatList(value=[value])) def _timestep_feature(value): step_list = [] for v in value: step_list.append(tf.train.Feature(int64_list=tf.train.Int64List(value=v))) step_feature = tf.train.FeatureList(feature=step_list) return tf.train.FeatureLists(feature_list={'data': step_feature}) np.random.seed(42) def get_train_validation_splits(RELOAD=False): # we are splitting by subject, not admission, so as not to leak data across training and validation # sets, although in this instance it's likely that would harm sensitivity rather than help, as for # death endpoints, an earlier admission would have definitionally the opposite outcome for the # overlapped data, rather than the same outcome. if RELOAD: query = f'select distinct subject_id from {processed_db}.full_endpoints' subjects = np.array(pd.read_sql(query, connection)) kf = KFold(n_splits=5, shuffle=True) kfold_split = list(kf.split(subjects)) train_ids = [subjects[k[0]] for k in kfold_split] valid_ids = [subjects[k[1]] for k in kfold_split] with open(os.path.join(PROCESSED_DATAPATH, 'training_validation_splits'), 'wb') as outfile: pickle.dump((train_ids, valid_ids), outfile) else: with open(os.path.join(PROCESSED_DATAPATH, 'training_validation_splits'), 'rb') as infile: train_ids, valid_ids = pickle.load(infile) return train_ids, valid_ids vs_v2i, vs_i2v = get_full_vocab() clin_v2i, clin_i2v = get_vocabulary('all_tokens') train_ids, valid_ids = get_train_validation_splits(False) def get_oversample_dist(): # this function figures out the required multipliers to generate the correct # oversample distributions (both simple oversample and time to event oversample) # for all included endpoints query = f'select * from {processed_db}.full_endpoints' full_endpoint_df = pd.read_sql(query, connection) six_hours = np.timedelta64(6, 'h')/np.timedelta64(1, 'ns') seven_days = np.timedelta64(7, 'D')/np.timedelta64(1, 'ns') long_enough = full_endpoint_df[(full_endpoint_df.nth==1)& (full_endpoint_df.duration >= six_hours) & # duration is in ns in this df (((full_endpoint_df.intime + np.timedelta64(6, 'h')) < full_endpoint_df.deathtime) |(full_endpoint_df.hospital_expire_flag == 0))] in_hosp_death_rate = len(long_enough[long_enough.hospital_expire_flag==1])/len(long_enough) in_icu_death_rate = len(long_enough[(long_enough.hospital_expire_flag==1) & (long_enough.deathtime <= long_enough.outtime)])/len(long_enough) long_icu_stay_rate = len(long_enough[long_enough.duration >= seven_days])/len(long_enough) readmitted = full_endpoint_df[full_endpoint_df.hadm_id.isin(long_enough.hadm_id.unique()) & (full_endpoint_df.nth==2)] readmitted_rate = len(readmitted)/len(long_enough) print(f'Death in hospital: {in_hosp_death_rate}, Death in this ICU: {in_icu_death_rate}, Long ICU stay: {long_icu_stay_rate}, ICU readmission: {readmitted_rate}') # we are targeting oversample to 50% of data, so what multiplier gets us to approximately # that distribution for each endpoint? oversampling_rates = {'hosp_death': round(0.5/in_hosp_death_rate, 0), 'icu_death': round(0.5/in_icu_death_rate, 0), 'long_icu': round(0.5/long_icu_stay_rate, 0), 'icu_readm': round(0.5/readmitted_rate, 0)} print(oversampling_rates) first_icu_adms = full_endpoint_df[full_endpoint_df.nth == 1].copy() first_icu_adms['predtime'] = first_icu_adms.intime + np.timedelta64(6, 'h') # get distribution of time to event values in hours for weighting strategy death_times = (first_icu_adms.deathtime - first_icu_adms.predtime)[first_icu_adms.hospital_expire_flag == 1]/np.timedelta64(1, 'h') in_icu_death_times = (first_icu_adms.deathtime - first_icu_adms.predtime)[(first_icu_adms.hospital_expire_flag == 1) & (first_icu_adms.deathtime <= first_icu_adms.outtime)]/np.timedelta64(1, 'h') # distribution of admission time in weeks if > 1 week duration_dist = np.histogram([d//seven_days if d//seven_days < 8 else 8 for d in first_icu_adms.duration], bins=9)[0][1:] # getting time to readmission full_endpoint_df = full_endpoint_df[['hadm_id', 'new_icu_stay', 'intime', 'outtime', 'duration', 'subject_id', 'hospital_expire_flag', 'admittime', 'dischtime', 'deathtime', 'admission_age']].drop_duplicates() first_admissions = full_endpoint_df.sort_values('intime').groupby(['hadm_id']).first().reset_index() subsequent = full_endpoint_df[~full_endpoint_df.new_icu_stay.isin(first_admissions.new_icu_stay)] readmissions = subsequent[['hadm_id', 'intime']].sort_values('intime').groupby(['hadm_id']).first().reset_index() readmissions = pd.merge(full_endpoint_df[['hadm_id','outtime']], readmissions, how='left') full_endpoint_df['readm'] = np.where(full_endpoint_df.hadm_id.isin(subsequent.hadm_id), 1, 0) full_endpoint_df['time_to_readm'] = np.where((full_endpoint_df.readm==1)&(full_endpoint_df.new_icu_stay.isin(first_admissions.new_icu_stay)), (readmissions.intime - readmissions.outtime)//np.timedelta64(24, 'h'), -1) readmit_times = full_endpoint_df.time_to_readm # we make the assumption that once the time to event is more than 1 week in the future, the relationship between # current patient status and endpoint time (not endpoint occurrance, just timing) becomes less deterministic # so focus on over-weighting those subjects with the endpoint within 1 week most highly death_time_dist = np.histogram([d//24 if d <= 168 else 7 for d in death_times], bins=8)[0] in_icu_death_time_dist = np.histogram([d//24 if d <= 168 else 7 for d in in_icu_death_times], bins=8)[0] readmit_time_dist = np.histogram([d//24 if d <= 168 else 7 for d in readmit_times], bins=8)[0] # targeting 5* oversampling for in-hosp death target = 5*sum(death_time_dist) in_hosp_multiplier = [9,8,7,6,5,4,3,2] print(sum(in_hosp_multiplier*death_time_dist)/target) # targeting 8* oversampling for in-ICU death target = 8*sum(death_time_dist) in_icu_multiplier = [16,15,14,13,12,11,10,9] print(sum(in_icu_multiplier*in_icu_death_time_dist)/target) # targeting 3* oversampling for long-ICU but there is no 'time-to' component, so we # instead overweight by duration - oversampling the longest stays by the most target = 3*sum(duration_dist) long_stay_multiplier = [2,3,4,5,6,7,8,9] print(sum(long_stay_multiplier*duration_dist)/target) target = 8*sum(death_time_dist) in_icu_multiplier = [16,15,14,13,12,11,10,9] print(sum(in_icu_multiplier*in_icu_death_time_dist)/target) # targeting 4* oversampling for readmission target = 4*sum(readmit_time_dist) readmit_multiplier = [4,3,3,3,3,3,3,3] print(sum(readmit_multiplier*readmit_time_dist)/target) return oversampling_rates, in_hosp_multiplier, in_icu_multiplier, readmit_multiplier, long_stay_multiplier oversampling_rates, in_hosp_multiplier, in_icu_multiplier, readmit_multiplier, long_stay_multiplier = get_oversample_dist() # dictionary of {endpoint : associated time weighting input} variables endpoint_lookup = {'hosp_death': 'tt_dth', 'icu_death': 'tt_dth', 'long_icu': 'duration', 'icu_readm': 'tt_readm'} oversampling_multipliers = {'hosp_death': in_hosp_multiplier, 'icu_death': in_icu_multiplier, 'icu_readm': readmit_multiplier, 'long_icu': long_stay_multiplier} def get_stepped_data(data, times): # converts a serial list of events into steps of events that all occur # within the same 1-hr span evt_list_stepped = [] timesteps = list(reversed(sorted(np.unique(times))))[-MAX_TIMESTEPS:] for step in timesteps: x_stepped = data[np.where(times==step)[0]][-TIMESTEP_WIDTH:] evt_list_stepped.append(np.pad(x_stepped, (TIMESTEP_WIDTH-len(x_stepped), 0), 'constant')) if len(evt_list_stepped) > 0: return np.vstack(evt_list_stepped), timesteps else: return [], timesteps def augment(serial, stepped, aug_count): # randomly creates 10*aug_count augmentated versions of the input trajectory # by truncating / masking / shuffling combinations of events within the list try: z = [shuffle_stepped(stepped, len(serial)) for _ in range(aug_count)] augmentation_selector = np.random.choice([0, 1], 10*aug_count, p=[0.7, 0.3], replace=True) first_data = min(np.nonzero(serial)[0]) data_elems = len(serial[first_data:]) x = [mask_serial(z[i//10], first_data, data_elems) for i, x in enumerate(augmentation_selector) if x == 0] y = [truncate_serial(z[i//10], first_data, data_elems) for i, x in enumerate(augmentation_selector) if x == 1] return x + y except: return [serial.astype(int) for _ in range(aug_count*20)] def mask_serial(serial, first_data, data_elems): # removes somewhere between 1 and half the number of elements in the list - random mask mask_num = np.random.randint(1, max(2, data_elems//2)) mask = np.random.choice(list(range(first_data, len(serial))), data_elems - mask_num, replace=False) return np.hstack([[0]*(mask_num+first_data), serial[sorted(mask)]]).astype(np.int64) def shuffle_stepped(stepped, n): # shuffles events that occur within the same 1hr period and reassembles into a serial trajectory try: for i in range(stepped.shape[0]): nz = np.nonzero(stepped[i])[0] if len(nz) > 0: first_data = min(nz) np.random.shuffle(stepped[i][first_data:]) s = stepped[np.nonzero(stepped)].flatten() return np.hstack([[0]*(max(0, n-len(s))), s[-n:]]).astype(np.int64) except: return np.array(stepped) def truncate_serial(serial, first_data, data_elems): # removes somewhere between 1 and a third of the number of elements in the list - drops oldest events truncate_num = np.random.randint(1, max(2, data_elems//3)) return np.hstack([[0]*(first_data+truncate_num), serial[first_data + truncate_num:]]).astype(np.int64) def get_vocab(x, cat): if cat=='clin': return int(clin_v2i[x]) return int(vs_v2i[x]) def get_data(traj, predtime, cat): # gets portion of trajectory that was available for prediction at a given prediction time traj = traj[traj.time < predtime].sort_values('time') serial_data = np.array([get_vocab(x, cat) for x in traj.label]) serial_times = np.array([round((predtime-t)/np.timedelta64(1, 'h'), 0) for t in traj.time]) data_stepped, times_stepped = get_stepped_data(serial_data, serial_times) serial_data = np.pad(serial_data[-SERIAL_TIMESTEPS:], (SERIAL_TIMESTEPS-len(serial_data[-SERIAL_TIMESTEPS:]), 0), mode='constant') serial_times = np.pad(serial_times[-SERIAL_TIMESTEPS:], (SERIAL_TIMESTEPS-len(serial_times[-SERIAL_TIMESTEPS:]), 0), mode='constant') return serial_data, serial_times, data_stepped, times_stepped def get_feature_dict(ct, vt, it, endpoints): # creates feature dictionary for tfrecord file from clincal trajectory, vital signs # trajectory and endpoints at a given prediction time predtime = endpoints.predtime clin_data, clin_times, clin_stepped_data, clin_stepped_times = get_data(ct, predtime, 'clin') vs_data, vs_times, vs_stepped_data, vs_stepped_times = get_data(vt, predtime, 'vs') hist_icu = len(it[(it.time= seven_days, 1, 0) full_endpoint_df['icu_death'] = np.where((full_endpoint_df.hospital_expire_flag==1)&(full_endpoint_df.deathtime <= full_endpoint_df.outtime), 1, 0) first_admissions = full_endpoint_df.sort_values('intime').groupby(['hadm_id']).first().reset_index() subsequent = full_endpoint_df[~full_endpoint_df.new_icu_stay.isin(first_admissions.new_icu_stay)] readmissions = subsequent[['hadm_id', 'intime']].sort_values('intime').groupby(['hadm_id']).first().reset_index() readmissions = pd.merge(full_endpoint_df[['hadm_id','outtime']], readmissions, how='left') full_endpoint_df['readm'] = np.where(full_endpoint_df.hadm_id.isin(subsequent.hadm_id), 1, 0) full_endpoint_df['time_to_readm'] = np.where((full_endpoint_df.readm==1)&(full_endpoint_df.new_icu_stay.isin(first_admissions.new_icu_stay)), (readmissions.intime - readmissions.outtime)//np.timedelta64(24, 'h'), -1) full_endpoint_df['time_to_death'] = np.where(full_endpoint_df.hospital_expire_flag==1, (full_endpoint_df.deathtime - full_endpoint_df.predtime)//np.timedelta64(24, 'h'), -1) full_endpoint_df['duration_wk'] = full_endpoint_df.duration//(np.timedelta64(7, 'D')//np.timedelta64(1, 'ns'))-1 return icu_traj, full_endpoint_df SERIAL_TIMESTEPS = 500 MAX_TIMESTEPS = 200 TIMESTEP_WIDTH = 100 import copy def serialize_data(): # combines data from db into basic feature_dict that is ready for data augmentation processes offset = 500 icu_traj, full_endpoint_df = get_icu_endpt() subject_list = full_endpoint_df.subject_id.unique() # batching data into managable chunks of length (offset) subjects for i in range(0, len(subject_list), offset): data_list = {} subject_subset = tuple(subject_list[i:i+offset]) query_clin = f'select * from {processed_db}.all_tokens where subject_id in {subject_subset}' query_vs = f'select * from {processed_db}.vs_tokens where subject_id in {subject_subset}' clin_tokenized_traj = pd.read_sql(query_clin, connection) vs_tokenized_traj = pd.read_sql(query_vs, connection) print(f'selected data batch {i//offset} ({i} of {len(subject_list)})') for j, admission in enumerate(clin_tokenized_traj.hadm_id.unique()): ct = clin_tokenized_traj[clin_tokenized_traj.hadm_id == admission] vt = vs_tokenized_traj[vs_tokenized_traj.hadm_id == admission] it = icu_traj[icu_traj.hadm_id == admission] endpoints = full_endpoint_df[full_endpoint_df.hadm_id == admission].sort_values('intime') if len(endpoints)>0: endpoints = endpoints.iloc[0] data_list[admission] = (get_feature_dict(ct, pd.concat([ct, vt]), it, endpoints)) dump_data(os.path.join(PROCESSED_DATAPATH, f'serialized_{i}'), data_list) def get_feature_value(feature, label): # for convenience because tfrecord feature dictionaries are annoying return feature[label].int64_list.value[0] def get_multiplier(value, target, strategy): # lookup appropriate multiplier according to endpoint value, endpoint target # and weighting strategy if value < 0: return 1 if strategy == 'basic': return int(oversampling_rates[target]) lookup = min(value, len(oversampling_multipliers[target]) - 1) return int(oversampling_multipliers[target][lookup]) def do_oversampling(strategy): # strategy is either 'basic' or 'tte' - used to select the multiplier for positive # class members - this can be either weighted to time to event or a single rate # for all minority class serialized_filelist = [f for f in os.listdir(PROCESSED_DATAPATH) if 'serialized' in f] for filename in serialized_filelist: basic_data = read_data(os.path.join(PROCESSED_DATAPATH, filename)) #oversampled_data = {t:{'train':{}, 'valid':{}} for t in oversampling_rates.keys()} augmented_data = {t:{'train':{}, 'valid':{}} for t in oversampling_rates.keys()} for admission in basic_data.keys(): feature_dict, clin_data, vs_data, clin_stepped_data, vs_stepped_data = basic_data[admission] for target in oversampling_rates.keys(): # insert original data into the oversampled and augmented lists #oversampled_data[target]['train'][admission] = [feature_dict] #oversampled_data[target]['valid'][admission] = [feature_dict] # what is the value of the target feature for this record? value = get_feature_value(feature_dict, endpoint_lookup[target]) multiplier = get_multiplier(value, target, strategy) #for _ in range(int(multiplier) - 1): # # add rate-1 more copies of this record to the oversampling list # oversampled_data[target]['train'][admission].append(feature_dict) # make augmentation shuffles for both clinical and vital sign data # note: we can augment for validation set and take average prediction as, # final prediction value, but for validation we augment the same number # of times, regardless of the feature value, whereas for the training # set, we set the rate according to the multiplication factor as per # oversampling strategy clin_augmented_data = {'train': augment(clin_data, clin_stepped_data, multiplier), 'valid': augment(clin_data, clin_stepped_data, 10)} vs_augmented_data = {'train': augment(vs_data, vs_stepped_data, multiplier), 'valid': augment(vs_data, vs_stepped_data, 10)} for phase in ['train', 'valid']: augmented_data[target][phase][admission] = [feature_dict] for a, v in zip(clin_augmented_data[phase], vs_augmented_data[phase]): augmented_dict = copy.deepcopy(feature_dict) try: augmented_dict['data_clin'] = tf.train.Feature(int64_list=tf.train.Int64List(value=a)) augmented_dict['data_vs'] = tf.train.Feature(int64_list=tf.train.Int64List(value=v)) augmented_data[target][phase][admission].append(augmented_dict) except TypeError: # this will except if the augmented trajectory is a single event i.e. not iterable pass suffix = filename.split('_')[1] print(f'basic_rate_{suffix}') # dump_data(os.path.join(PROCESSED_DATAPATH, f'{strategy}_rate_oversample_{suffix}'), oversampled_data) dump_data(os.path.join(PROCESSED_DATAPATH, f'{strategy}_rate_augment_{suffix}'), augmented_data) def make_data_files_weighted_distribution(weighting, strategy): # now that we have the data all in the right format and appropriately oversampled, combine # them according to their k-fold assignment into the final tfrecord file that will be fed # to the models for w in weighting: for s in strategy: print(w, s) files = [f for f in os.listdir(PROCESSED_DATAPATH) if s in f and w in f] train_file_names = [os.path.join(MODEL_INPUT_DATAPATH, f'train_{s}_{w}_{fold}') for fold in range(5)] valid_file_names = [os.path.join(MODEL_INPUT_DATAPATH, f'valid_{s}_{w}_{fold}') for fold in range(5)] with ExitStack() as stack: train_files = {endpoint: [stack.enter_context(tf.io.TFRecordWriter(f'{t}_{endpoint}')) for t in train_file_names] for endpoint in endpoint_lookup.keys()} valid_files = {endpoint: [stack.enter_context(tf.io.TFRecordWriter(f'{v}_{endpoint}')) for v in valid_file_names] for endpoint in endpoint_lookup.keys()} for f in files: print(f) datafile = read_data(os.path.join(PROCESSED_DATAPATH, f)) for target, target_data in datafile.items(): for phase, phase_data in target_data.items(): for visit, visit_data in phase_data.items(): if len(phase_data) > 0: for fold in range(5): for d in visit_data: subject = get_feature_value(d, 'subject') traj_feat = tf.train.Features(feature=d) example = tf.train.Example(features=traj_feat) if (phase == 'train') and (subject in train_ids[fold]): train_files[target][fold].write(example.SerializeToString()) elif (phase == 'valid') and (subject in valid_ids[fold]): valid_files[target][fold].write(example.SerializeToString()) def make_data_files_original_distribution(): # as per make_data_files_weighted_distribution but without weightings train_file_names = [os.path.join(MODEL_INPUT_DATAPATH, f'train_original_{fold}') for fold in range(5)] valid_file_names = [os.path.join(MODEL_INPUT_DATAPATH, f'valid_original_{fold}') for fold in range(5)] with ExitStack() as stack: train_files = [stack.enter_context(tf.io.TFRecordWriter(t)) for t in train_file_names] valid_files = [stack.enter_context(tf.io.TFRecordWriter(v)) for v in valid_file_names] serialized_filelist = [f for f in os.listdir(PROCESSED_DATAPATH) if 'serialized' in f] for filename in serialized_filelist: print(filename) basic_data = read_data(os.path.join(PROCESSED_DATAPATH, filename)) for admission in basic_data.keys(): feature_dict, clin_data, vs_data, clin_stepped_data, vs_stepped_data = basic_data[admission] subject = get_feature_value(feature_dict, 'subject') traj_feat = tf.train.Features(feature=feature_dict) example = tf.train.Example(features=traj_feat) for fold in range(5): if subject in train_ids[fold]: train_files[fold].write(example.SerializeToString()) else: valid_files[fold].write(example.SerializeToString()) serialize_data() # this step produces the endpoints tt_dth and tt_readm in full days and duration in full weeks make_data_files_original_distribution() for strategy in ['basic', 'tte']: do_oversampling(strategy) # this step takes serialised data and produces augmented / oversampled data according to the endpoint weighting = ['tte', 'basic'] strategy = ['augment', 'oversample'] make_data_files_weighted_distribution(weighting, strategy)Shell APD Learning eXchange DnA Notebook Predicting power usage in the eastern United StatesIn this notebook, we will go through the cycle of a typical data science project, starting with data collection, cleaning, feature engineering and finally predictive modeling. We will also explore the three different categories of machine learning problems, and how they work. By the end of this exercise notebook you should have a basic understanding of how data science works. PJM Hourly Energy Consumption DataPJM is a regional transmission organization in the United States. It is part of the Eastern Interconnection grid operating an electric transmission system serving all or parts of Delaware, Illinois, Indiana, Kentucky, Maryland, Michigan, New Jersey, North Carolina, Ohio, Pennsylvania, Tennessee, Virginia, West Virginia, and the District of Columbia.We will be using hourly power consumption data from PJM's East grid. This data set ranges from 2002-2018. The data came from PJM's website and are given in megawatts (MW). Overview:0. [Introduction](intro)1. [Import packages](setup)2. [Get the data](data)3. [Explore the data](explore)4. [Split train and test sets](traintest)5. [Regression - Predict energy usage](model) - [Linear regression](linear) - [Gradient-boosted trees](xgbreg)6. [Engineer new features](featureengineer)7. [Clean the dataset](cleaning)8. [Retrain your model with new features](modelnew)REGRESSION | CLASSIFICATION | CLUSTERING- | - | - | | 1. Import some python libraries and define functions we'll need%%capture import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from matplotlib import cm import matplotlib.dates as mdates import xgboost as xgb from xgboost import plot_importance, plot_tree from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score from sklearn.linear_model import LinearRegression, SGDClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import confusion_matrix from sklearn.metrics import precision_score, recall_score from sklearn.metrics import f1_score from sklearn.cluster import KMeans import seaborn as sns import math from datetime import datetime def mean_absolute_percentage_error(y_true, y_pred): """Calculates MAPE given y_true and y_pred""" y_true, y_pred = np.array(y_true), np.array(y_pred) return np.mean(np.abs((y_true - y_pred) / y_true)) * 100 def plot_confusion_matrix(y_true, y_pred, classes, normalize=False, title=None, cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if not title: if normalize: title = 'Normalized confusion matrix' else: title = 'Confusion matrix, without normalization' # Compute confusion matrix cm = confusion_matrix(y_true, y_pred) # Only use the labels that appear in the data # classes = classes[unique_labels(y_true, y_pred)] if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) fig, ax = plt.subplots(figsize=(7,7)) # fig.patch.set_facecolor('white') im = ax.imshow(cm, interpolation='nearest', cmap=cmap) ax.figure.colorbar(im, ax=ax) plt.figure(figsize=(10, 10)) # We want to show all ticks... ax.set(xticks=np.arange(cm.shape[1]), yticks=np.arange(cm.shape[0]), # ... and label them with the respective list entries xticklabels=classes, yticklabels=classes, title=title, ylabel='True label', xlabel='Predicted label') # Rotate the tick labels and set their alignment. plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor") # Loop over data dimensions and create text annotations. fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i in range(cm.shape[0]): for j in range(cm.shape[1]): ax.text(j, i, format(cm[i, j], fmt), ha="center", va="center", color="white" if cm[i, j] > thresh else "black") fig.tight_layout() return ax color_pal = ["#F8766D", "#D39200", "#93AA00", "#00BA38", "#00C19F", "#00B9E3", "#619CFF", "#DB72FB"] np.set_printoptions(precision=2) pd.set_option('display.max_columns', None) plt.style.use('fivethirtyeight') plt.rcParams.update({'font.size': 22})2. Download the data to our notebookurl = 'https://github.com/robmoratore/ShellDnA/raw/master/data/PJM_Load.csv' df_full = pd.read_csv(url, sep = ',', date_parser=["date"]) df_full.index = pd.DatetimeIndex(df_full["date"]) df_full = df_full.drop(columns=['date']) pjme = df_full[['load', 'dayofyear']]3. Explore the data Let's look at the first few rows of the datasetpjme.head()Plot energy usage over time_ = pjme["load"].plot(style='.', figsize=(15,5), color=color_pal[0], title='Energy consumption') _.set(xlabel="Time", ylabel="Energy (MW)")4. Split the train and testing sets We do that to ensure our model is able to generalize. That means, perform well on unseen data. If we train and test using the same data, the model will memorize that specific output and not learn it.We will use the data from 2015 on as our test set.![alt](https://github.com/robmoratore/ShellDnA/blob/master/data/images/train_test.png?raw=true)split_date = '2015-01-01' pjme_train = pjme.loc[pjme.index <= split_date].copy() pjme_test = pjme.loc[pjme.index > split_date].copy() ax = pd.DataFrame(pjme_test["load"]) \ .rename(columns={'load': 'TEST SET'}) \ .join(pd.DataFrame(pjme_train["load"]).rename(columns={'load': 'TRAINING SET'}), how='outer') \ .plot(figsize=(15,5), title='Energy consumption', style='.') _ = ax.set(xlabel="Time", ylabel="Energy (MW)")5. Regression - Can we predict energy use for a given hour? 5.1 Start with a simple model - Linear Regressionlin_reg = LinearRegression() lin_reg.fit(pjme_train["dayofyear"].values.reshape(-1, 1), pjme_train["load"].values.reshape(-1, 1)) pjme_test['load_Prediction_lin'] = lin_reg.predict(pjme_test["dayofyear"].values.reshape(-1, 1)) _ = pjme_test[['load','load_Prediction_lin']].plot(figsize=(15, 5)) _.set(xlabel="Time", ylabel="Energy (MW)")Check how well our model performsWhich error metrics would you choose?mse = mean_squared_error(y_true=pjme_test['load'], y_pred=pjme_test['load_Prediction_lin']) print ("The Mean Squared Error (MSE) is ", round(mse, 2) , "MW\u00b2") print ("The Root Mean Squared Error (RMSE) is ", round(np.sqrt(mse), 2), "MW") print ("The Mean Absolute Error (MAE) is ", round(mean_absolute_error(y_true=pjme_test['load'], y_pred=pjme_test['load_Prediction_lin']), 2), "MW") print ("The Mean Absolute Percentage Error is ", round(mean_absolute_percentage_error(y_true=pjme_test['load'], y_pred=pjme_test['load_Prediction_lin']), 2), "%") print ("The R\u00b2 (coefficient of determination) regression score is ", round(r2_score(y_true=pjme_test['load'],y_pred=pjme_test['load_Prediction_lin']), 2))The Mean Squared Error (MSE) is 42420922.87 MW² The Root Mean Squared Error (RMSE) is 6513.13 MW The Mean Absolute Error (MAE) is 5242.46 MW The Mean Absolute Percentage Error is 17.6 % The R² (coefficient of determination) regression score is -0.025.2 Get fancy with some Machine Learning - Gradient-boosted trees This is one of the cleverest ways of combining multiple decision trees. Here, instead of having one single tree responsible for all the decisions, you use many trees on sequence. That means, the next tree does not try to predict the final result but the error of the previous tree. When you add all trees together, the results are generally very goodinclude_variables = "dayofyear" reg = xgb.XGBRegressor(n_estimators=50) reg.fit(pjme_train[include_variables].values.reshape(-1, 1), pjme_train["load"].values.reshape(-1, 1)) pjme_test['load_Prediction_xgb'] = reg.predict(pjme_test[include_variables].values.reshape(-1, 1)) _ = pjme_test[['load','load_Prediction_xgb']].plot(figsize=(15, 5)) _.set(xlabel="Time", ylabel="Energy (MW)")Check how well our model performsmse = mean_squared_error(y_true=pjme_test['load'], y_pred=pjme_test['load_Prediction_xgb']) print ("The Mean Squared Error (MSE) is ", round(mse, 2), "MW\u00b2") print ("The Root Mean Squared Error (RMSE) is ", round(np.sqrt(mse), 2), "MW") print ("The Mean Absolute Error (MAE) is ", round(mean_absolute_error(y_true=pjme_test['load'], y_pred=pjme_test['load_Prediction_xgb']), 2), "MW") print ("The Mean Absolute Percentage Error is ", round(mean_absolute_percentage_error(y_true=pjme_test['load'], y_pred=pjme_test['load_Prediction_xgb']), 2), "%") print ("The R\u00b2 (coefficient of determination) regression score is ", round(r2_score(y_true=pjme_test['load'],y_pred=pjme_test['load_Prediction_xgb']), 2))The Mean Squared Error (MSE) is 31031470.06 MW² The Root Mean Squared Error (RMSE) is 5570.59 MW The Mean Absolute Error (MAE) is 4354.54 MW The Mean Absolute Percentage Error is 14.6 % The R² (coefficient of determination) regression score is 0.256. Engineer New Features Which features would you create to help predict load?df_full.head().round(2) _ = df_full.hist(bins=61, figsize=(20,20))7. Clean the dataset Outliers_ = df_full["temp"].plot(style='k.', figsize=(15,5)) _.set(xlabel="Time", ylabel="Temperature (C)") df_full = df_full[(df_full['temp']>=-30) & (df_full['temp']<=55)] _ = df_full["temp"].plot(style='k.', figsize=(15,5)) _.set(xlabel="Time", ylabel="Temperature (C)")Missing valueslen(df_full.index)-df_full.count() df_full = df_full.dropna() len(df_full.index)-df_full.count()8. Retrain Gradient-boosted trees model with new features We can now retrain the Gradient-boosted trees model using the new features to see if performance improves.Here is a list of available features that can be used in the model.df_full.drop(columns=['load']).columnsSelect which features you would like to include in the model, and add them to the `include_variables` list bellow.Then check how the model performs.# SELECT A COMBINATION OF FEATURES HERE include_variables = ['hour', 'dayofyear', 'quarter'] # split_date = '2015-01-01' df_full_train = df_full.loc[df_full.index <= split_date].copy() df_full_test = df_full.loc[df_full.index > split_date].copy() df_full_test.index = pd.DatetimeIndex(df_full_test.index) df_full_train.index = pd.DatetimeIndex(df_full_train.index) reg = xgb.XGBRegressor(n_estimators=50) reg.fit(df_full_train[include_variables], df_full_train["load"].values.reshape(-1, 1)) df_full_test['load_Prediction_xgb'] = reg.predict(df_full_test[include_variables]) _ = df_full_test[['load','load_Prediction_xgb']].plot(figsize=(15, 5)) _.set(xlabel="Time", ylabel="Energy (MW)")Check how well our model performsWhich Feature gives you the lowest MAE and highest R$^{2}$?mse = mean_squared_error(y_true=df_full_test['load'], y_pred=df_full_test['load_Prediction_xgb']) print ("The Mean Squared Error (MSE) is ", round(mse, 2), "MW\u00b2") print ("The Root Mean Squared Error (RMSE) is ", round(np.sqrt(mse), 2), "MW") print ("The Mean Absolute Error (MAE) is ", round(mean_absolute_error(y_true=df_full_test['load'], y_pred=df_full_test['load_Prediction_xgb']), 2), "MW") print ("The Mean Absolute Percentage Error is ", round(mean_absolute_percentage_error(y_true=df_full_test['load'], y_pred=df_full_test['load_Prediction_xgb']), 2), "%") print ("The R\u00b2 (coefficient of determination) regression score is ", round(r2_score(y_true=df_full_test['load'],y_pred=df_full_test['load_Prediction_xgb']), 2))The Mean Squared Error (MSE) is 16502377.28 MW² The Root Mean Squared Error (RMSE) is 4062.31 MW The Mean Absolute Error (MAE) is 3209.28 MW The Mean Absolute Percentage Error is 10.45 % The R² (coefficient of determination) regression score is 0.6Feature ImportancesFeature importance is a great way to get a general idea about which features the model is relying on most to make the prediction. This is a metric that simply sums up how many times each feature is split on.We can see that the day of year was most commonly used to split trees, while hour came in next. Quarter has low importance (never used in any splits even) due to the fact that it could be created by different dayofyear splits._ = plot_importance(reg, height=0.9)8.1 Look at Worst and Best Predicted Daysdf_full_test['error'] = df_full_test['load'] - df_full_test['load_Prediction_xgb'] df_full_test['abs_error'] = df_full_test['error'].apply(np.abs) error_by_day = df_full_test.groupby(['year','month','dayofmonth']) \ .mean()[['load','load_Prediction_xgb','error','abs_error']]Worst dayserror_by_day.sort_values('abs_error', ascending=False).round(2).head(10) f, ax = plt.subplots(1) f.set_figheight(5) f.set_figwidth(10) _ = df_full_test[['load','load_Prediction_xgb']][(df_full_test[['load','load_Prediction_xgb']].index>'2015-02-20') & (df_full_test[['load','load_Prediction_xgb']].index<'2015-02-21')].plot(ax=ax, style=['-','.']) plot = plt.suptitle('Worst Predicted Day') _.set(xlabel="Time", ylabel="Energy (MW)")Temperature in worst predicted daydf_full[df_full.index=='2015-02-20 00:00:00']['temp'].head()Best dayserror_by_day.sort_values('abs_error', ascending=True).round(2).head(10) f, ax = plt.subplots(1) f.set_figheight(5) f.set_figwidth(10) _ = df_full_test[['load','load_Prediction_xgb']][(df_full_test[['load','load_Prediction_xgb']].index>'2018-05-17') & (df_full_test[['load','load_Prediction_xgb']].index<'2018-05-18')].plot(ax=ax, style=['-','.']) plot = plt.suptitle('Best Predicted Day') _.set(xlabel="Time", ylabel="Energy (MW)")Sample 3 Revision Sampling===Includes all but the first 2018 revision in namespace 0.We're sampling only in 2018 (so that we have all of 2019 to observe reverts)import mwapi import mwxml import mwxml.utilities import mwcli import mwreverts import oresapi import mwparserfromhell import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib import os from tqdm import tqdm import bz2 import gzip import json import re import hashlib from datetime import datetime import nltk import scipy.stats import para from itertools import groupby from collections import Counter git_root_dir = !git rev-parse --show-toplevel git_root_dir = git_root_dir[0] git_root_dir raw_data_dir = "/export/scratch2/wiki_data" derived_data_dir = os.path.join(git_root_dir, "data", "derived") raw_data_dir, derived_data_dir working_dir = os.path.join(derived_data_dir, 'revision_sample') os.makedirs(working_dir, exist_ok=True) working_dir # NOTE TIMESTAMP BUG # These incorrectly use CT, rather than UTC, as the boundaries for the data collection period # This is completely fine, but it's probably not what most analysts would think start_date = datetime.fromisoformat('2018-01-01') start_timestamp = int(start_date.timestamp()) end_date = datetime.fromisoformat('2020-01-01') end_timestamp = int(end_date.timestamp()) start_timestamp, end_timestamp sample_start_timestamp = start_timestamp sample_end_date = datetime.fromisoformat('2019-01-01') sample_end_timestamp = int(sample_end_date.timestamp())Load in all revisionsWhich excludes revisions outside of 2018-2020 and only for pages in ns0 and non-redirects.start = datetime.now() stub_history_reverts_dir = os.path.join(derived_data_dir, 'stub-history-reverts') rev_ids_filepath = os.path.join(stub_history_reverts_dir, 'rev_ids_sorted.csv') df = pd.read_csv(rev_ids_filepath, header=None, names=['page_id', 'rev_id', 'rev_timestamp', 'is_revert_target', 'is_reverted', 'is_reverting'] ) print(f"{datetime.now() - start}") len(df) df.head() # number of unique pages in this sample page_ids_set = set(df.page_id) len(page_ids_set) # Read the revert info stub_history_reverts_dir = os.path.join(derived_data_dir, 'stub-history-reverts') revert_df_filepath = os.path.join(stub_history_reverts_dir, 'revert_df.pkl') revert_df = pd.read_pickle(revert_df_filepath) len(revert_df) revert_df.head(3) start = datetime.now() page_df = df.groupby('page_id').agg({ 'rev_id': len, 'is_reverted': np.sum, 'is_reverting': np.sum }).rename(columns={ 'rev_id': 'revision_count', 'is_reverted': 'reverted_count', 'is_reverting': 'revert_count' }) print(f"{datetime.now() - start}") len(page_df) page_df.sample(3) eligible_for_sampling = [] curr_page_id = -1 first_page_omitted_in_2018_count = 0 for tup in tqdm(df.itertuples(), total=len(df)): if tup.page_id != curr_page_id: curr_page_id = tup.page_id # can never sample the FIRST post-2018 revision eligible_for_sampling.append(False) # keep track of the number of revisions that are omitted entirely because they are the first # (should be <= the number of unique pages) if tup.rev_timestamp <= sample_end_timestamp: first_page_omitted_in_2018_count += 1 else: is_eligible = tup.rev_timestamp <= sample_end_timestamp eligible_for_sampling.append(is_eligible) first_page_omitted_in_2018_count df['is_sample_eligible'] = eligible_for_sampling # after filtering, only 43.9% are actually eligible... np.sum(df.is_sample_eligible), np.sum(df.is_sample_eligible) / len(df) df = df.reset_index().rename(columns={'index': 'full_index'})Sample version 3Includes all revisions that meet the sampling criteria.sample = df[df.is_sample_eligible] len(sample) # 11.9% of the sample are reverted np.sum(sample.is_reverted), np.sum(sample.is_reverted) / len(sample) # 9.0% of the sample are reverts np.sum(sample.is_reverting), np.sum(sample.is_reverting) / len(sample) # 6.1% of pages with 1+ revision are included in the sample len(set(sample.page_id)), len(set(sample.page_id)) / len(page_ids_set) matched_sample = df.loc[sample.index - 1] assert len(matched_sample) == len(sample) assert np.all(sample.page_id.reset_index(drop=True) == matched_sample.page_id.reset_index(drop=True)) sample = sample.assign(prev_rev_id=matched_sample.rev_id.tolist()) sample.head(3) matched_sample = df.loc[sample.index + 1] assert len(matched_sample) == len(sample) sample['next_rev_id'] = -1 idx = np.array(sample.page_id.tolist()) == np.array(matched_sample.page_id.tolist()) print(f"{np.sum(idx)} / {len(sample)} sampled revisions have 1+ subsequent revision in 2018 or 2019.") sample.loc[idx, 'next_rev_id'] = matched_sample[idx].rev_id.tolist() # get the timestamp of the previous and next revisions rev_id_timestamp_dict = {tup.rev_id: tup.rev_timestamp for tup in tqdm(df.itertuples(), total=len(df))} sample['prev_rev_timestamp'] = sample.prev_rev_id.map(lambda prev_rev_id: rev_id_timestamp_dict[prev_rev_id]) sample['next_rev_timestamp'] = sample.next_rev_id.map(lambda next_rev_id: rev_id_timestamp_dict[next_rev_id] if next_rev_id != -1 else -1) no_next_rev_count = np.sum(sample.next_rev_timestamp == -1) print(f"{no_next_rev_count} sample revisions ({no_next_rev_count / len(sample)*100:.2f}%) have no next revision in the collected data range.") sample.head() sample_reverting_rev_ids = set(sample[sample.is_reverting==1].rev_id) reverting_rev_id_to_reverted_ids_dict = { row.reverting_rev_id: row.reverted_rev_ids for row in tqdm(revert_df.itertuples(), total=len(revert_df)) if row.reverting_rev_id in sample_reverting_rev_ids } # for reverting revisions in the sample, merge in the list of reverted_rev_ids # using the dictionary computed in the cell above reverted_rev_ids_list = [] for tup in sample.itertuples(): if tup.is_reverting == 1: reverted_rev_ids = reverting_rev_id_to_reverted_ids_dict[tup.rev_id] reverted_rev_ids_list.append(reverted_rev_ids) else: reverted_rev_ids_list.append([]) sample['reverted_rev_ids'] = reverted_rev_ids_list reverted_to_reverting_rev_id_dict = {} for tup in tqdm(revert_df.itertuples(), total=len(revert_df)): for rev_id in tup.reverted_rev_ids: reverted_to_reverting_rev_id_dict[rev_id] = tup.reverting_rev_id # grab the reverting id for reverted revisions sample['reverting_rev_id'] = -1 sample.loc[sample.is_reverted==1,'reverting_rev_id'] = [ reverted_to_reverting_rev_id_dict[rev_id] for rev_id in sample.loc[sample.is_reverted==1].rev_id ] # merge in the time of the reverting revision sample['reverting_rev_timestamp'] = -1 reverting_rev_timestamp_dict = {tup.rev_id: tup.rev_timestamp for tup in df[df.is_reverting==1].itertuples()} sample.loc[sample.is_reverted==1,'reverting_rev_timestamp'] = [ reverting_rev_timestamp_dict[rev_id] for rev_id in sample.loc[sample.is_reverted==1].reverting_rev_id ] sample[sample.is_reverted==1].head() sample = sample.drop(columns='full_index') sample.head(1) # save the sample sample_filepath = os.path.join(working_dir, 'sample3_all.pkl') sample.to_pickle(sample_filepath) print("Finished.") # read in the sample dataframe revision_sample_dir = os.path.join(derived_data_dir, 'revision_sample') sample_filepath = os.path.join(revision_sample_dir, 'sample3_all.pkl') rev_df = pd.read_pickle(sample_filepath) len(rev_df) rev_df.head()Chapter 4 Question 10A comparison of logistic regression, LDA, QDA and KNN on stock market dataimport statsmodels.api as sm import seaborn as sns import matplotlib.pyplot as plt import statsmodels.graphics.tsaplots import numpy as np import sklearn.linear_model import sklearn.metrics import sklearn.discriminant_analysis import sklearn.neighbors import pandas as pd sns.set(style="whitegrid") # This dataset contains weekly returns for 21 years, from 1990 to 2010. weekly = sm.datasets.get_rdataset("Weekly", "ISLR").data(a) Produce some numerical and graphical summaries of the `weekly` datasetweekly.describe() sns.pairplot(weekly) print(f"Market goes up on average {sum(weekly.Direction=='Up')/len(weekly)*100:.2f}% of the time")Market goes up on average 55.56% of the timePatterns: - Volume goes steeply up with time - Nothing else obvious# Convert this to a time series and do some averages / plots of total change / downsampling? plt.plot(weekly.Today) # Autocorrelation of the "Today" data # We don't really expect significant correlation fig = statsmodels.graphics.tsaplots.plot_acf(weekly.Today, lags=25, title="Autocorrelation in weekly stock prices")(b) Use the full data set to perform a logistic regression with `Direction` as the response and the five lag variables plus `Volume` as the predictors.# Could use sklearn.linear_model.LogisticRegression y = weekly.Direction=="Up" # Have to encode this ourselves, apparently. X = sm.add_constant(weekly.drop(columns=["Direction","Year","Today"])) logistic_model = sm.Logit(y, X) results = logistic_model.fit() print(results.summary()) model = sklearn.linear_model.LogisticRegression() model.fit(weekly.drop(columns=["Direction","Year","Today"]), weekly.Direction) print(model.intercept_) print(model.coef_)[0.26484745] [[-0.04117292 0.05846974 -0.01599122 -0.02769998 -0.01440289 -0.02212844]]The two regressors agree, but only the intercept is significant (i.e. there is no-zero probability of "Up" when the lags and volume are all zero). The `Lag2` is significant at a p<0.05 level but needs further investigation, with the Bonferroni correction it wouldn't be. (c) compute the confusion matrix and overall fraction of correct predictionsy_true = weekly.Direction y_pred = model.predict(weekly.drop(columns=["Direction","Year","Today"])) confusion_matrix = sklearn.metrics.confusion_matrix(y_true, y_pred) tn, fp, fn, tp = confusion_matrix.ravel() print(confusion_matrix) fraction_correct = (tn+tp)/(tn+tp+fn+fp) print(f"fraction correct: {fraction_correct}")[[ 55 429] [ 47 558]] fraction correct: 0.5629017447199265So we are right 56.3% of the time, versus a model that always predicts "Up" which would be right 55.6% of the time. We don't have many false negatives, but we have an awful lot of false positives!# Check for statsmodels version y_pred_sm = np.where(results.fittedvalues < 0.5, "Up", "Down") print(sklearn.metrics.confusion_matrix(y_true, y_pred_sm))[[ 19 465] [ 42 563]](d) Now fit the model using training data from 1990-2008, with `Lag2` as the only predictor. Use the 2009-2010 data as test data, and get the confusion matrix & overall fraction correct.train = weekly[weekly.Year < 2009] test = weekly[weekly.Year >= 2009] x_train = train.Lag2 y_train = train.Direction x_test = test.Lag2 y_test = test.Direction Ky_pred = model.predict(x_test.to_numpy().reshape(-1,1)) confusion_matrix = sklearn.metrics.confusion_matrix(y_test, y_pred) tn, fp, fn, tp = confusion_matrix.ravel() print(confusion_matrix) fraction_correct = (tn+tp)/(tn+tp+fn+fp) print(f"fraction correct: {fraction_correct}")[[ 9 34] [ 5 56]] fraction correct: 0.625(e) Repeat (d) using LDAlda_model = sklearn.discriminant_analysis.LinearDiscriminantAnalysis() lda_model.fit(x_train.to_numpy().reshape(-1,1), y_train) # reshape required to cast the training data to a 2d array y_pred = lda_model.predict(x_test.to_numpy().reshape(-1,1)) confusion_matrix = sklearn.metrics.confusion_matrix(y_test, y_pred) tn, fp, fn, tp = confusion_matrix.ravel() print(confusion_matrix) fraction_correct = (tn+tp)/(tn+tp+fn+fp) print(f"fraction correct: {fraction_correct}")[[ 9 34] [ 5 56]] fraction correct: 0.625(f) Repeat (d) using QDAqda_model = sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis() qda_model.fit(x_train.to_numpy().reshape(-1,1), y_train) # reshape required to cast the training data to a 2d array y_pred = qda_model.predict(x_test.to_numpy().reshape(-1,1)) confusion_matrix = sklearn.metrics.confusion_matrix(y_test, y_pred) tn, fp, fn, tp = confusion_matrix.ravel() print(confusion_matrix) fraction_correct = (tn+tp)/(tn+tp+fn+fp) print(f"fraction correct: {fraction_correct}")[[ 0 43] [ 0 61]] fraction correct: 0.5865384615384616(g) Repeat (d) using KNN with K = 1knn_model = sklearn.neighbors.KNeighborsClassifier(n_neighbors=1) knn_model.fit(x_train.to_numpy().reshape(-1,1), y_train) # reshape required to cast the training data to a 2d array y_pred = knn_model.predict(x_test.to_numpy().reshape(-1,1)) confusion_matrix = sklearn.metrics.confusion_matrix(y_test, y_pred) tn, fp, fn, tp = confusion_matrix.ravel() print(confusion_matrix) fraction_correct = (tn+tp)/(tn+tp+fn+fp) print(f"fraction correct: {fraction_correct}")[[21 22] [31 30]] fraction correct: 0.49038461538461536(h) Which of these methods appears to provide the best results on this data? Somewhat surprisingly, it seems like a linear fit performs best on this data, better than QDA and KNN (insinuating that both classes have the same variance in `Lag2`) (i) Experiment with different combinations of predictors, including possible transformations and interactions, for each of the methods. Report the best results and parameters.# The p-value for ShelveLoc suggests it is predictive - plot with FacetGrid # fig, ax = plt.subplots(figsize=(10,10)) g = sns.FacetGrid(weekly, hue="Direction", height=5) g = g.map(sns.distplot, "Lag2").add_legend() import itertools for i in range(1,len(train.columns.drop(["Direction","Year","Today"]))): for col in itertools.combinations(train.columns.drop(["Direction","Year","Today"]), i): col = list(col) print(col) x_train = train[col] y_train = train.Direction x_test = test[col] y_test = test.Direction lda_model = sklearn.discriminant_analysis.LinearDiscriminantAnalysis() lda_model.fit(x_train, y_train) # reshape required to cast the training data to a 2d array y_pred = lda_model.predict(x_test) confusion_matrix = sklearn.metrics.confusion_matrix(y_test, y_pred) tn, fp, fn, tp = confusion_matrix.ravel() print(confusion_matrix) fraction_correct = (tn+tp)/(tn+tp+fn+fp) print(f"fraction correct: {fraction_correct}") # With `Lag2`, we get an accuracy of 0.625. With other predictors, the data gets worse. What about with QDA? for i in range(1,len(train.columns.drop(["Direction","Year","Today"]))): for col in itertools.combinations(train.columns.drop(["Direction","Year","Today"]), i): col = list(col) print(col) x_train = train[col] y_train = train.Direction x_test = test[col] y_test = test.Direction qda_model = sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis() qda_model.fit(x_train, y_train) # reshape required to cast the training data to a 2d array y_pred = qda_model.predict(x_test) confusion_matrix = sklearn.metrics.confusion_matrix(y_test, y_pred) tn, fp, fn, tp = confusion_matrix.ravel() # print(confusion_matrix) fraction_correct = (tn+tp)/(tn+tp+fn+fp) print(f"fraction correct: {fraction_correct}") # QDA strictly worse. And KNN? for i in range(2,len(train.columns.drop(["Direction","Year","Today"]))): for col in itertools.combinations(train.columns.drop(["Direction","Year","Today"]), i): col = list(col) print(col) x_train = train[col] y_train = train.Direction x_test = test[col] y_test = test.Direction for j in range(1,6): print(j) knn_model = sklearn.neighbors.KNeighborsClassifier(n_neighbors=j) knn_model.fit(x_train, y_train) # reshape required to cast the training data to a 2d array y_pred = knn_model.predict(x_test) confusion_matrix = sklearn.metrics.confusion_matrix(y_test, y_pred) tn, fp, fn, tp = confusion_matrix.ravel() # print(confusion_matrix) fraction_correct = (tn+tp)/(tn+tp+fn+fp) print(f"fraction correct: {fraction_correct}") # And with interactions? transforms?K-Nearest Neighbor (kNN) 연습*이 워크시트를 완성하고 제출하세요. (출력물과 워크시트에 포함되지 않은 코드들을 포함해서) 더 자세한 정보는 코스 웹사이트인 [숙제 페이지](http://vision.stanford.edu/teaching/cs231n/assignments.html)에서 볼 수 있습니다.*kNN 분류기는 다음 두 단계로 구성됩니다.- 학습중에, 분류기는 데이터를 학습하고 그것을 기억합니다.- 테스트중에, KNN은 모든 이미지를 훈련된 이미지와 k 번째 레이블을 전송하는 가장 유사한 훈련 예와 비교합니다.- k의 값은 교차 검증되었습니다.이번 연습에서 우리는 이러한 단계들을 수행하고 간단한 이미지 분류기 pipeline, 교차검증을 이해하고, 효율적인 벡터화된 코드를 작성하는 방법을 알아봅니다.# 이 notebook을 위해 몇가지 설치 코드를 실행하세요. import random import numpy as np from cs231n.data_utils import load_CIFAR10 import matplotlib.pyplot as plt # matplotlib figure들을 새 창에서 뛰우지 않고 이 notebook에서 하기 위한 약간의 마법입니다. %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # 이 notebook이 외부 파이썬 모듈을 재호출하기위한 코드입니다. # 다음 링크를 확인해 보세요. http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython %load_ext autoreload %autoreload 2 # CIFAR-10 데이터를 불러옵니다. cifar10_dir = 'cs231n/datasets/cifar-10-batches-py' X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir) # sanity 체크로서 학습 데이터와 테스트 데이터의 크기를 출력합니다. print 'Training data shape: ', X_train.shape print 'Training labels shape: ', y_train.shape print 'Test data shape: ', X_test.shape print 'Test labels shape: ', y_test.shape # 데이터셋에서 몇 가지 예제를 시각화 합니다. # 각 class마다 약간의 학습 이미지를 보여줍니다. classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] num_classes = len(classes) samples_per_class = 7 for y, cls in enumerate(classes): idxs = np.flatnonzero(y_train == y) idxs = np.random.choice(idxs, samples_per_class, replace=False) for i, idx in enumerate(idxs): plt_idx = i * num_classes + y + 1 plt.subplot(samples_per_class, num_classes, plt_idx) plt.imshow(X_train[idx].astype('uint8')) plt.axis('off') if i == 0: plt.title(cls) plt.show() # 이 연습에 더 효율적인 코드 실행을 위한 데이터를 표본 num_training = 5000 mask = range(num_training) X_train = X_train[mask] y_train = y_train[mask] num_test = 500 mask = range(num_test) X_test = X_test[mask] y_test = y_test[mask] # 이미지 데이터를 행으로 변형시킵니다. X_train = np.reshape(X_train, (X_train.shape[0], -1)) X_test = np.reshape(X_test, (X_test.shape[0], -1)) print X_train.shape, X_test.shape from cs231n.classifiers import KNearestNeighbor # kNN 분류기를 생성합니다. # kNN분류기를 학습시킬때 분류기는 단순히 데이터를 기억하고 # 더 이상의 처리를 하지 않는다는것을 기억하세요. classifier = KNearestNeighbor() classifier.train(X_train, y_train)이제 테스트데이터를 kNN 분류기로 분류해볼껍니다.이 과정을 두 단계로 분류할 수 있습니다:1. 먼저 모든 테스트 예제와 모든 훈련 예제 사이의 거리를 계산해야 합니다.2. Given these distances, for each test example we find the k nearest examples and have them vote for the label모든 테스트 예제와 학습 예제 사이의 거리 행렬을 계산하는 것 부터 시작해 봅시다. **Ntr** 학습 예제와 **Nte** 테스트 예제가 있을 때, 각 (i, j) 요소가 i번째 테스트와 j번째 훈련 예제의 거리를 나타내는 **Nte x Ntr** 행렬을 결과로 얻을 수 있습니다.먼저 `cs231n/classifiers/k_nearest_neighbor.py`를 열고 각 (테스트, 학습) 예제를 계산하는데 (매우 비효율적인) 이중 반복문을 사용한 `compute_distances_two_loops`를 구현해 보세요.# cs231n/classifiers/k_nearest_neighbor.py를 열고 # compute_distances_two_loops를 구현해 보세요. # 구현을 테스트해보세요. dists = classifier.compute_distances_two_loops(X_test) print dists.shape # 거리 행렬을 시각화 할 수 있습니다: 각 행은 하나의 시험 예제와 훈련 예제의 거리 plt.imshow(dists, interpolation='none') plt.show()**연습문제 1** 일부 행, 열이 더 밝게 가시화 된 거리 행렬의 구조화된 패턴에 주목하세요. (기본 색상에서 검은 색은 낮은 간격을 나타내는 반면, 흰색은 높은 간격을 나타내는 것에 주목하세요.)- 뚜렷하게 밝은 행의 데이터가 그렇게 표시된 원인은 무엇일까요?- 열은 어떤 원인 때문에 저럴까요? **당신의 답**: *여기에 쓰세요*# 이제 predict_labels를 구현해보고 아래의 코드를 실행해 보세요. # k = 1 을 사용합니다.(가장 가까운 이웃으로) y_test_pred = classifier.predict_labels(dists, k=1) # 예측 예제의 정확도를 계산하고 출력하세요. num_correct = np.sum(y_test_pred == y_test) accuracy = float(num_correct) / num_test print 'Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)우리는 대략 `27%`정도의 정확도를 예상합니다. 이제 `k = 5`같은 좀더 큰 `k`에 대해서도 실행해 보세요.y_test_pred = classifier.predict_labels(dists, k=5) num_correct = np.sum(y_test_pred == y_test) accuracy = float(num_correct) / num_test print 'Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)`k = 1`보다 약간 더 좋은 성능을 기대할 수 있습니다.# 이제 부분 벡터화와 단일 반복문을 사용하여 거리 행렬의 계산 속도를 높일 수 있습니다. # compute_distance_one_loop를 구현해보고 아래의 코드를 실행해 보세요. dists_one = classifier.compute_distances_one_loop(X_test) # 우리의 벡터화 구현이 맞다는것을 보장하기 위해, # 우리는 navie한 구현을 확인해야 합니다. # 두 행렬의 유사 여부를 결정하는 방법은 여러가지가 있습니다. # 단순한 방법은 Frobenius norm입니다. # 이 Frobenius norm의 두 행렬은 모든 원소의 차이의 제곱합의 제곱근 입니다. # 다른 말로 하면, 행렬을 벡터로 변형하고 유클리드 거리를 계산합니다. difference = np.linalg.norm(dists - dists_one, ord='fro') print 'Difference was: %f' % (difference, ) if difference < 0.001: print 'Good! The distance matrices are the same' else: print 'Uh-oh! The distance matrices are different' # 이제 compute_distances_no_loops 안의 완전히 벡터화된 버전을 구현하고 실행합니다. dists_two = classifier.compute_distances_no_loops(X_test) # 거리 행렬이 우리가 전에 계산한 것과 일치하는지 확인합니다. difference = np.linalg.norm(dists - dists_two, ord='fro') print 'Difference was: %f' % (difference, ) if difference < 0.001: print 'Good! The distance matrices are the same' else: print 'Uh-oh! The distance matrices are different' # 구현한 것들이 얼마나 빠른지 비교합시다. def time_function(f, *args): """ Call a function f with args and return the time (in seconds) that it took to execute. """ import time tic = time.time() f(*args) toc = time.time() return toc - tic two_loop_time = time_function(classifier.compute_distances_two_loops, X_test) print 'Two loop version took %f seconds' % two_loop_time one_loop_time = time_function(classifier.compute_distances_one_loop, X_test) print 'One loop version took %f seconds' % one_loop_time no_loop_time = time_function(classifier.compute_distances_no_loops, X_test) print 'No loop version took %f seconds' % no_loop_time # 완전 벡터화 구현이 훨씬 더 빠른 성능을 낸다는것을 볼 수 있습니다.교차검증우리는 k-Nearest Neighbor 분류기를 구현했지만 임의로 k = 5라는 값을 정했습니다. 이제 hyperparameter의 교차검증으로 최선의 값을 결정할 것입니다.num_folds = 5 k_choices = [1, 3, 5, 8, 10, 12, 15, 20, 50, 100] X_train_folds = [] y_train_folds = [] #################################################################################### # TODO: # # 폴더에 훈련 데이터를 분류합니다. # # 분류 후에, X_train_folds 와 y_train_folds는 y_train_folds[i]가 # # X_train_folds[i]의 점에 대한 레이블 벡터인 num_folds의 길이의 목록이어야 합니다. # # 힌트: numpy의 array_split 함수를 살펴보세요. # #################################################################################### pass ################################################################################ # 코드의 끝 # ################################################################################ # 사전은 서로 다른 교차 검증을 실행할 때 찾은 k의 값에 대한 정확도를 가지고 있습니다. # k_to_accuracies[k]는 'num_folds' 길이의 리스트로 # 각기 다른 k 값을 사용할 때의 정확도를 담고있습니다. k_to_accuracies = {} #################################################################################### # TODO: # # 최고의 k 값을 찾기 위해 k-fold 교차 검증을 수행합니다. # # 가능한 각 k에 대해서, k-nearest-neighbor 알고리즘을 numpy의num_folds회 실행합니다.# # 각각의 경우에 모두 사용하되 그 중 하나는 훈련 데이터로, # # 마지막 하나는 검증 데이터로 사용합니다. # #################################################################################### pass ################################################################################ # 코드의 끝 # ################################################################################ # 계산된 정확도를 출력합니다. for k in sorted(k_to_accuracies): for accuracy in k_to_accuracies[k]: print 'k = %d, accuracy = %f' % (k, accuracy) # 원시 관측 플롯 for k in k_choices: accuracies = k_to_accuracies[k] plt.scatter([k] * len(accuracies), accuracies) # 표준편차에 해당하는 오차 막대와 추세선을 그립니다. accuracies_mean = np.array([np.mean(v) for k,v in sorted(k_to_accuracies.items())]) accuracies_std = np.array([np.std(v) for k,v in sorted(k_to_accuracies.items())]) plt.errorbar(k_choices, accuracies_mean, yerr=accuracies_std) plt.title('Cross-validation on k') plt.xlabel('k') plt.ylabel('Cross-validation accuracy') plt.show() # 위의 교차검증 결과에 기반해서 최적의 k를 선택하고 모든 학습 데이터를 # 이용하여 분류기를 재학습 시키고 테스트 데이터를 이용해 테스트 해봅니다. # 테스트데이터에 대해서 28%이상의 정확도를 얻을 수 있어야 합니다. best_k = 1 classifier = KNearestNeighbor() classifier.train(X_train, y_train) y_test_pred = classifier.predict(X_test, k=best_k) # 정확도를 계산하고 출력합니다. num_correct = np.sum(y_test_pred == y_test) accuracy = float(num_correct) / num_test print 'Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)Tensorflow Project ExerciseLet's wrap up this Deep Learning by taking a a quick look at the effectiveness of Neural Nets!We'll use the [Bank Authentication Data Set](https://archive.ics.uci.edu/ml/datasets/banknote+authentication) from the UCI repository.The data consists of 5 columns:* variance of Wavelet Transformed image (continuous)* skewness of Wavelet Transformed image (continuous)* curtosis of Wavelet Transformed image (continuous)* entropy of image (continuous)* class (integer)Where class indicates whether or not a Bank Note was authentic.This sort of task is perfectly suited for Neural Networks and Deep Learning! Just follow the instructions below to get started!import pandas as pd data = pd.read_csv('bank_note_data.csv') data.head() import seaborn as sns %matplotlib inline sns.countplot(x='Class',data=data) sns.pairplot(data,hue='Class') from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(data.drop('Class',axis=1)) scaled_features = scaler.fit_transform(data.drop('Class',axis=1)) df_feat = pd.DataFrame(scaled_features,columns=data.columns[:-1]) df_feat.head() X = df_feat y = data['Class'] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3) import tensorflow as tf df_feat.columns image_var = tf.feature_column.numeric_column("Image.Var") image_skew = tf.feature_column.numeric_column('Image.Skew') image_curt = tf.feature_column.numeric_column('Image.Curt') entropy =tf.feature_column.numeric_column('Entropy') feat_cols = [image_var,image_skew,image_curt,entropy] classifier = tf.estimator.DNNClassifier(hidden_units=[10, 20, 10], n_classes=2,feature_columns=feat_cols) input_func = tf.estimator.inputs.pandas_input_fn(x=X_train,y=y_train,batch_size=20,shuffle=True) classifier.train(input_fn=input_func,steps=500) pred_fn = tf.estimator.inputs.pandas_input_fn(x=X_test,batch_size=len(X_test),shuffle=False) note_predictions = list(classifier.predict(input_fn=pred_fn)) note_predictions[0] final_preds = [] for pred in note_predictions: final_preds.append(pred['class_ids'][0]) from sklearn.metrics import classification_report,confusion_matrix print(confusion_matrix(y_test,final_preds)) print(classification_report(y_test,final_preds)) from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier(n_estimators=200) rfc.fit(X_train,y_train) rfc_preds = rfc.predict(X_test) print(classification_report(y_test,rfc_preds)) print(confusion_matrix(y_test,rfc_preds))[[214 1] [ 2 195]]Modeling** *Executive Summary*This section represents the **Data Preparation** and **Modeling** sections of the CRISP-DM process. *Objectives*1. **[Feature Selection]** To prototype the selection of features and data processing required before modeling.2. **[Modeling]** To prototype various models (regression, correlation, clustering) as a means to better understand the data. Setup# Import libraries import os import math import pprint import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import statsmodels.api as sm from ipywidgets import * from sklearn.cluster import KMeans, AgglomerativeClustering from sklearn.linear_model import LinearRegression from sklearn import preprocessing from sklearn.metrics import mean_squared_error from sklearn.tree import DecisionTreeClassifier, plot_tree from scipy.cluster.hierarchy import dendrogram # Declare global variables DATA_DIR = os.path.join('../data/processed') DATA_FILE = os.path.join(DATA_DIR, 'processed.csv') plt.style.use('ggplot') CLUSTERING_COLS = ['leaid', 'year', 'lea_name', 'fips', 'number_of_schools', 'teachers_total_fte', 'spec_ed_students', 'enrollment_x', 'read_test_num_valid', 'read_test_pct_prof_midpt', 'math_test_num_valid', 'math_test_pct_prof_midpt', 'rev_total', 'exp_total'] ## Set a target year for early analysis TGT_YEAR = 2016 # Useful functions def null_counter(df): record_nulls = [] for col in df.columns: nulls = df[col].isnull().sum() percent_null = round((nulls / df.shape[0]) * 100, 2) record_nulls.append([col, nulls, percent_null]) output = pd.DataFrame(record_nulls, columns=['Attribute', 'Null Count', '% Null']) return output def get_year_range(df): year_range = list(df['year'].unique()) year_range.sort() return year_range def subset_by_states_only(df): df = df[df['fips'] <= 56] return df def sound_off(df): nRow, nCol = cluster_df.shape print(f'There are {nRow} rows and {nCol} columns.') print('') YEAR_RANGE = get_year_range(cluster_df) print(f'Data spans the years {YEAR_RANGE[0]} to {YEAR_RANGE[-1]}.') print('') print('Available columns include:') display(null_counter(cluster_df)) def filter_out_factor(df, column_name): ## Identify records with null values in column bad_records = df[df[column_name].isnull()] bad_records.to_csv(f'missing_{column_name}.csv') ## Drop records with null values in column df = df[df[column_name].notnull()] return dfData PreparationIn this section we load the data, running various scripts to format the contents properly.***High-Level Overview***We tried to choose a subset of columns in which the data was mostly complete. That meant disqualifying rows that were:* ... not states (i.e. territories).* ... did not have reported scores for standardized tests.* ... were outliers (three school districts with >500 schools).* ... had flagged data (i.e. a "-2" value indicating that results could not be provided for privacy reasons)We were especially disappointed to have to remove "english_language_learners" from the modeling data. In literature this factor is frequently referred to as significant. But, more than 6,000 records in our limited set simply have no reported value for this metric. Interpolation was briefly considered but is not likely to be an accurate approach.**Normalization** was also performed in the last few cells. Given the large differences between districts with varying school counts, this was judged to be a necessary step.# Load and preview data ## Isolate by specific columns cluster_df = pd.read_csv(DATA_FILE)[CLUSTERING_COLS] ## Filter out non-state records cluster_df = subset_by_states_only(cluster_df) ## Filter by year cluster_df = cluster_df[cluster_df['year'] == TGT_YEAR] sound_off(cluster_df) # Convert negative values (flags) to Null for col in cluster_df.columns[3:]: cluster_df[col] = cluster_df[col].apply(lambda x: np.nan if x<0 else x) # sound_off(cluster_df) # Remove records with missing values for col in cluster_df.columns: cluster_df = filter_out_factor(cluster_df, col) sound_off(cluster_df) # Identify and remove outliers plt.scatter(cluster_df['leaid'], cluster_df['number_of_schools']) ## Somewhat arbitrary threshold of >500 schools outliers = cluster_df[cluster_df['number_of_schools'] > 500] display(outliers) print(cluster_df.shape) cluster_df = cluster_df.drop(outliers.index) print(cluster_df.shape) # Remove the columns that won't be used as features cluster_prepared_df = cluster_df.drop(['leaid', 'year', 'lea_name', 'fips'], axis=1)ClusteringThe purpose of this tool is specifically *descriptive* analytics. In short, we are looking to understand our underlying data, rather than build predictions. To that end, we try a few different setups to produce varying results. ***High-Level Overview**** An elbow chart of sum-of-squared-distances shows that a k value of 6 is well-suited for this clustering.* The resulting cluster is fairly uneven in size (13,126, 133, 38, 1,969, 11, 492). I wonder if it would be meaningful to run a second clustering on the largest category?* Clusterings seem to naturally correspond to number_of_schools. This seems reasonable, given that number_of_schools is going to influence most of the metrics used (enrollment, revenue, expenditure).# Build an elbow chart to find a good cluster range def build_elbow_chart(df, min_clusters=2, max_clusters=10, random_seed=777): ## Calculate sum of squared distances for each cluster ssd = [] for k in range(min_clusters, max_clusters+1): kmeans_learner = KMeans(n_clusters=k, random_state=random_seed) kmeans_learner.fit(df) ssd.append(kmeans_learner.inertia_) ## Plot sum of squared distances plt.plot(range(min_clusters, max_clusters+1), ssd) plt.title('Elbow Chart') plt.xlabel('K') plt.ylabel('Sum of Squared Distances') # Build a good cluster and view the resulting data def build_cluster(df, k=6, random_seed=777): kmeans_learner = KMeans(n_clusters=k, random_state=random_seed) results = kmeans_learner.fit_predict(df) return results # View the characteristics of each labeled dataset def view_cluster(df, results, k=6): df['labels'] = results for i in range(0, k): subset = df[df['labels'] == i] subset.to_csv(f'labeled_{i}.csv') print(i) display(subset.describe()) print() # normed_data = pd.DataFrame(preprocessing.StandardScaler().fit_transform(cluster_prepared_df)) # build_elbow_chart(normed_data) # results = build_cluster(normed_data, k=7) # view_cluster(cluster_prepared_df, results, k=7) # Place data into four categories, to prevent clustering from emphasizing size single_df = cluster_prepared_df[cluster_prepared_df['number_of_schools'] == 1] small_df = cluster_prepared_df[(cluster_prepared_df['number_of_schools'] > 1) & (cluster_prepared_df['number_of_schools'] <= 3)] medium_df = cluster_prepared_df[(cluster_prepared_df['number_of_schools'] > 3) & (cluster_prepared_df['number_of_schools'] <= 10)] large_df = cluster_prepared_df[(cluster_prepared_df['number_of_schools'] > 10)] df_list = [single_df, small_df, medium_df, large_df] counts = [x['number_of_schools'].count() for x in df_list] print(counts) # Normalize within clusters to detect patterns besides size normed_df_list = [] for df in df_list: normed_df_list.append(preprocessing.StandardScaler().fit_transform(df)) build_elbow_chart(normed_df_list[0]) build_elbow_chart(normed_df_list[1]) build_elbow_chart(normed_df_list[2]) build_elbow_chart(normed_df_list[3]) results = [] for df in normed_df_list: results.append(build_cluster(df, k=4)) # Districts w/ One School view_cluster(single_df, results[0], k=4) # Districts w/ 2-3 Schools view_cluster(small_df, results[1], k=4) # Districts w/ 4-10 Schools view_cluster(medium_df, results[2], k=4) # Districts w/ One School view_cluster(large_df, results[3], k=4)0Appendix A: Idea Graveyard ***Decision Tree***# cluster_prepared_df['avg_midpoint'] = \ # cluster_prepared_df[['read_test_pct_prof_midpt', 'math_test_pct_prof_midpt']].mean(axis=1) # features = cluster_prepared_df.drop(['math_test_pct_prof_midpt', 'read_test_pct_prof_midpt', 'avg_midpoint'], axis=1) # labels = cluster_prepared_df['avg_midpoint'] # display(features.describe()) # tree_model = DecisionTreeClassifier(random_state=777, max_depth=3) # discrete_labels = preprocessing.KBinsDiscretizer(n_bins=3, encode='ordinal').fit_transform(np.asarray(labels).reshape(-1, 1)) # tree_model.fit(features, discrete_labels) # plt.figure(figsize=(15,15)) # plot_tree(tree_model) # plt.show()***Nested Clustering***# # Build an elbow chart to find a good cluster range # def build_elbow_chart(df): # ## Set parameters # min_clusters = 2 # max_clusters = 10 # random_seed = 777 # ## Calculate sum of squared distances for each cluster # ssd = [] # for k in range(min_clusters, max_clusters+1): # kmeans_learner = KMeans(n_clusters=k, random_state=random_seed) # kmeans_learner.fit(df) # ssd.append(kmeans_learner.inertia_) # ## Plot sum of squared distances # plt.plot(range(min_clusters, max_clusters+1), ssd) # plt.title('Elbow Chart') # plt.xlabel('K') # plt.ylabel('Sum of Squared Distances') # build_elbow_chart(cluster_prepared_df) # # Build a good cluster and view the resulting data # def build_cluster(df): # chosen_k = 6 # random_seed = 777 # kmeans_learner = KMeans(n_clusters=chosen_k, random_state=random_seed) # results = kmeans_learner.fit_predict(df) # return results # results = build_cluster(cluster_prepared_df) # # Attach our new clustering labels to the original dataframe # cluster_df['labels'] = results # # View the characteristics of each labeled dataset # def view_cluster(df, chosen_k=6): # for i in range(0, chosen_k): # subset = df[df['labels'] == i] # subset.to_csv(f'labeled_{i}.csv') # print(i) # display(subset.describe()) # print() # view_cluster(cluster_df) # ## Try a second clustering on the largest category # sub_cluster_df = cluster_prepared_df[cluster_df['labels'] == 0] # build_elbow_chart(sub_cluster_df) # results = build_cluster(sub_cluster_df) # sub_cluster_df['labels'] = results # view_cluster(sub_cluster_df)***Hierarchical Clustering***# # Build a hierarchical cluster # n = 10 # h_cluster = AgglomerativeClustering(n_clusters=10, compute_distances=True) # # Fit it to normalized data # norm_data = preprocessing.StandardScaler().fit_transform(cluster_prepared_df) # results = h_cluster.fit_predict(norm_data) # # Visualize # def plot_dendrogram(model, **kwargs): # # Create linkage matrix and then plot the dendrogram # # create the counts of samples under each node # counts = np.zeros(model.children_.shape[0]) # n_samples = len(model.labels_) # for i, merge in enumerate(model.children_): # current_count = 0 # for child_idx in merge: # if child_idx < n_samples: # current_count += 1 # leaf node # else: # current_count += counts[child_idx - n_samples] # counts[i] = current_count # linkage_matrix = np.column_stack([model.children_, model.distances_, # counts]).astype(float) # # Plot the corresponding dendrogram # dendrogram(linkage_matrix, **kwargs) # plt.figure(figsize=(8,8)) # plot_dendrogram(h_cluster, truncate_mode='level', p=3) # plt.xticks(rotation=-90) # plt.savefig('dendro.png') # # View the characteristics of each labeled dataset # def view_cluster(df, results, k=6): # df['labels'] = results # for i in range(0, k): # subset = df[df['labels'] == i] # subset.to_csv(f'labeled_{i}.csv') # print(i) # display(subset.describe()) # print() # k = len(np.unique(results)) # view_cluster(cluster_df, results, k)Example 1: ZARC circuit 1) Synthesize data:The impedance of a ZARC can be written as$$Z^{\rm exact}(f) = R_\infty + \displaystyle \frac{1}{\displaystyle \frac{1}{R_{\rm ct}}+C \left(i 2\pi f\right)^\phi}$$where $\displaystyle C = \frac{\tau_0^\phi}{R_{\rm ct}}$.The analytical DRT can be computed analytically as$$\gamma(\log \tau) = \displaystyle \frac{\displaystyle R_{\rm ct}}{\displaystyle 2\pi} \displaystyle \frac{\displaystyle \sin\left((1-\phi)\pi\right)}{\displaystyle \cosh(\phi \log(\tau/\tau_0))-\cos(\pi(1-\phi))}$$Random noise ($\sigma_n$) is added to impedance response.# Define the frequency range: N_freqs = 81 freq_vec = np.logspace(-4., 4., num=N_freqs, endpoint=True) tau = 1/freq_vec # Parameters for ZARC model: R_inf = 10 R_ct = 50 phi = 0.8 tau_0 = 1. C = tau_0**phi/R_ct # Compute impedance data: Z_exact = R_inf+1./(1./R_ct+C*(1j*2.*pi*freq_vec)**phi) # Add noise to the impedance data: rng = np.random.seed(214975) sigma_n_exp = 0.1 Z_exp = Z_exact + sigma_n_exp*(np.random.normal(0, 1, N_freqs)+1j*np.random.normal(0, 1, N_freqs)) # Finer mesh (100 ppd) for plotting analytical DRT result: freq_vec_plot = np.logspace(-4., 4., num=10*(N_freqs-1), endpoint=True) tau_plot = 1/freq_vec_plot gamma_fct_plot = (R_ct)/(2.*pi)*sin((1.-phi)*pi)/(np.cosh(phi*np.log(tau_plot/tau_0))-cos((1.-phi)*pi))2) Compute DRT: a. Deep-Prior DRT:Tunable hyperparameters:* lambda_limit ($\lambda_{lim}$): criteria for early stopping: $\lambda_{lim}=\frac{|MSE_{n+1}-MSE_n|}{MSE_n}$. Default: $1e^{-4}$* learning_rate: neural network learning rate. Default: $1e^{-5}$gamma1, R_inf1 = DRT_Lib.DP_DRT(freq_vec, Z_exp, display=True)Early stop. Number of iteration: 17249b. Gaussian Process DRT:Tunable hyperparameters:* sigma_n ($\sigma_n$): Standard deviation of experimental impedance noise. Default: $0.1$gamma2, R_inf2, sigma_gamma2 = DRT_Lib.GP_DRT(freq_vec, Z_exp, display=True)c. Tikhonov Regularization:Tunable hyperparameters:* el ($\lambda$): Tikhonov regularization factor. Default: $1e^{-2}$* method: Optimization method, select from "SLSQP", "L-BFGS-B", and "TNC". Default: "SLSQP"gamma3, R_inf3 = DRT_Lib.TR_DRT(freq_vec, Z_exp, display=True)Optimization terminated successfully (Exit mode 0) Current function value: 16.07702307906185 Iterations: 83 Function evaluations: 6896 Gradient evaluations: 833. Compare Results:fig, ax = plt.subplots(figsize=(7,5)) ax.semilogx(freq_vec_plot, gamma_fct_plot, color='black', linewidth=2, label="$Exact$") ax.semilogx(freq_vec, gamma1, 'r-', label="$DP-DRT$") ax.semilogx(freq_vec, gamma2, 'b-', label="$GP-DRT$") ax.semilogx(freq_vec, gamma3, 'g-',label="$TR-DRT$") ax.legend(frameon=False, fontsize = 15) ax.set_xlabel(r'$Real[Z_{11}]$', fontsize = 15) ax.set_ylabel(r'$-Imag[Z_{11}]$', fontsize = 15) plt.show()Example 2: Double ZARC circuit 1) Synthesize data:The impedance has the format of $$Z^{\rm exact}(f) = 2R_\infty + \displaystyle \frac{1}{\displaystyle \frac{1}{R_{\rm ct}}+C_1 \left(i 2\pi f\right)^{\phi}} + \displaystyle \frac{1}{\displaystyle\frac{1}{R_{\rm ct}}+C_2 \left(i 2\pi f\right)^{\phi}}$$ where $\displaystyle C_1 = \frac{\tau_1^\phi}{R_{\rm ct}}$ and $\displaystyle C_2 = \frac{\tau_2^\phi}{R_{\rm ct}}$In this tutorial, $\tau_1=0.1$ and $\tau_2=10$The analytical DRT is calculated as $$\gamma(\log \tau) = \displaystyle \frac{\displaystyle R_{\rm ct}}{\displaystyle 2\pi} \sin\left((1-\phi)\pi\right) \displaystyle \left(\frac{1 }{\displaystyle \cosh(\phi \log(\tau/\tau_1))-\cos(\pi(1-\phi))} + \displaystyle \frac{1}{\displaystyle \cosh(\phi \log(\tau/\tau_2))-\cos(\pi(1-\phi))}\right)$$Random noise ($\sigma_n$) is added to impedance response.# Define the frequency range: N_freqs = 81 freq_vec = np.logspace(-4., 4., num=N_freqs, endpoint=True) xi_vec = np.log(freq_vec) tau = 1/freq_vec # Parameters for two ZARC elements in series model: R_inf = 10 R_ct = 50 phi = 0.8 tau_1 = 0.1 tau_2 = 10 C_1 = tau_1**phi/R_ct C_2 = tau_2**phi/R_ct # Compute impedance data: Z_exact = 2*R_inf + 1./(1./R_ct+C_1*(1j*2.*pi*freq_vec)**phi) + 1./(1./R_ct+C_2*(1j*2.*pi*freq_vec)**phi) # Add noise to the impedance data: rng = np.random.seed(214975) sigma_n_exp = 0.1 Z_exp = Z_exact + sigma_n_exp*(np.random.normal(0, 1, N_freqs)+1j*np.random.normal(0, 1, N_freqs)) # Finer mesh (100 ppd) for plotting analytical DRT result: freq_vec_plot = np.logspace(-4., 4., num=10*(N_freqs-1), endpoint=True) tau_plot = 1/freq_vec_plot gamma_fct_plot = (R_ct)/(2.*pi)*sin((1.-phi)*pi)*(1/(np.cosh(phi*np.log(tau_plot/tau_1))-cos((1.-phi)*pi)) +\ 1/(np.cosh(phi*np.log(tau_plot/tau_2))-cos((1.-phi)*pi)))2) Compute DRT: a. Deep-Prior DRT:Tunable hyperparameters:* lambda_limit ($\lambda_{lim}$): criteria for early stopping: $\lambda_{lim}=\frac{|MSE_{n+1}-MSE_n|}{MSE_n}$. Default: $1e^{-4}$* learning_rate: neural network learning rate. Default: $1e^{-5}$gamma1, R_inf1 = DRT_Lib.DP_DRT(freq_vec, Z_exp, display=True)Early stop. Number of iteration: 17301b. Gaussian Process DRT:Tunable hyperparameters:* sigma_n ($\sigma_n$): Standard deviation of experimental impedance noise. Default: $0.1$gamma2, R_inf2, sigma_gamma2 = DRT_Lib.GP_DRT(freq_vec, Z_exp, display=True)c. Tikhonov Regularization:Tunable hyperparameters:* el ($\lambda$): Tikhonov regularization factor. Default: $1e^{-2}$* method: Optimization method, select from "SLSQP", "L-BFGS-B", and "TNC". Default: "SLSQP"gamma3, R_inf3 = DRT_Lib.TR_DRT(freq_vec, Z_exp, display=True)Optimization terminated successfully (Exit mode 0) Current function value: 32.33814498853992 Iterations: 68 Function evaluations: 5650 Gradient evaluations: 683. Compare Results:fig, ax = plt.subplots(figsize=(7,5)) ax.semilogx(freq_vec_plot, gamma_fct_plot, color='black', linewidth=2, label="$Exact$") ax.semilogx(freq_vec, gamma1, 'r-', label="$DP-DRT$") ax.semilogx(freq_vec, gamma2, 'b-', label="$GP-DRT$") ax.semilogx(freq_vec, gamma3, 'g-',label="$TR-DRT$") ax.legend(frameon=False, fontsize = 15) ax.set_xlabel(r'$f / Hz$', fontsize = 15) ax.set_ylabel(r'$\gamma/\Omega$', fontsize = 15) plt.show()Convert Datasets to BayesFusion FormatSmile (and PxS) needs headers, otherwise it crashes. This script quickly converts. Imports# imports import json import pandas as pd import os import sys from os.path import dirname # Custom imports root_dir = dirname(dirname(os.getcwd())) src_dir = os.path.join(root_dir, 'src') resc_dir = os.path.join(root_dir, 'resc') data_dir = os.path.join(resc_dir, 'data', 'tidy') datasets =os.listdir(data_dir) #datasetsActions Read - Modify -Write FileJust hardcoded edits of the files we care about.def read_modify_write(in_fname): if 'bayesfusion' in in_fname: return elif '.csv' not in in_fname: return else: # Read df = pd.read_csv(in_fname, header=None) # Modify bf_columns = ["att_{}".format(x) for x in df.columns.values] df.columns = bf_columns # Write base, ext = os.path.splitext(in_fname) out_fname = base+"_bayesfusion"+ext df.to_csv(out_fname, index=False) msg = """ Succesful modification of file: {} Results written to: {} """.format(in_fname, out_fname) #print(msg) returnModify entire dirds = 'nltcs' ds_dir = os.path.join(data_dir, ds) ds_dir def bayesfusionize_dir(directory): ds_fnames = [os.path.join(directory, x) for x in os.listdir(directory)] ds_fnames.sort() for f in ds_fnames: read_modify_write(f) return "Yessir"bayesfusionize_dir(ds_dir)## All datasetsdatasets = os.listdir(data_dir)datasets.sort()ds_dirs = [os.path.join(data_dir, ds) for ds in datasets]ds_dirs import joblibfrom joblib import Parallel, delayedParallel(n_jobs=8)(delayed(bayesfusionize_dir)(ds) for ds in datasets)for ds in datasets: ds_dir = os.path.join(data_dir, ds) bayesfusionize_dir(ds_dir) msg = """ Dataset: {} done. """.format(ds) print(msg)Matplotlib: Plotting Subplots in a loop**17th June 2020**[See accompanying blog post](https://engineeringfordatascience.com/posts/matplotlib_subplots/) on Engineering for Data Science# install requirements if necessary # ! pip install matplotlib pandas ffn nb_black %load_ext nb_blackProblem StatementYou can't directly loop through the subplots if both `nrows` and `ncols` are greater than 1. This is because you are returned a list of lists, rather than a list of subplot objects.import matplotlib.pyplot as plt %matplotlib inline # create subplots fig, axs = plt.subplots(nrows=3, ncols=2) print(axs.shape) axs(3, 2)How can you loop through subplots? Load Example DataIn this notebook we will use daily stock data for 5 stocks collected using a finance library called **[ffn](https://pypi.org/project/ffn/)** and then demonstrate methods for plotting the daily time series of each stock on a different subplot# library to get stock data import ffn # load daily stock prices for selected stocks from ffn tickers = ["aapl", "msft", "tsla", "nvda", "intc"] prices = ffn.get(tickers, start="2017-01-01") # convert data into a 'long' table for this plotting exercise df = prices.melt(ignore_index=False, var_name="ticker", value_name="closing_price") df.head()Method 1: `axs.ravel()`# define subplot grid fig, axs = plt.subplots(nrows=3, ncols=2, figsize=(15, 12)) plt.subplots_adjust(hspace=0.5) fig.suptitle("Daily closing prices", fontsize=18, y=0.95) # loop through tickers and axes for ticker, ax in zip(tickers, axs.ravel()): # filter df for ticker and plot on specified axes df[df["ticker"] == ticker].plot(ax=ax) # chart formatting ax.set_title(ticker.upper()) ax.get_legend().remove() ax.set_xlabel("") plt.show()Method 2: `plt.subplot`plt.figure(figsize=(15, 12)) plt.subplots_adjust(hspace=0.5) plt.suptitle("Daily closing prices", fontsize=18, y=0.95) # loop through the length of tickers and keep track of index for n, ticker in enumerate(tickers): # add a new subplot iteratively ax = plt.subplot(3, 2, n + 1) # filter df and plot ticker on the new subplot axis df[df["ticker"] == ticker].plot(ax=ax) # chart formatting ax.set_title(ticker.upper()) ax.get_legend().remove() ax.set_xlabel("")**Pros of Method 2**- do not get an empty plot at the end if the number of charts is an odd number- simple syntax**Cons of Method 2**- still need to predefine the subplot grid (i.e. number of rows and columns) which means you need to know the length of the tickers in advance - this normally should not be a problem as you are unlikely to be plotting a generator instead of a list - you could calculate the subplot grid dynamically Dynamic Grid SizingThe snippet below can be used to dynamically calculate the number of rows in a grid# find minimium required rows given we want 2 columns ncols = 2 nrows = len(tickers) // ncols + (len(tickers) % ncols > 0) nrows plt.figure(figsize=(15, 12)) plt.subplots_adjust(hspace=0.2) plt.suptitle("Daily closing prices", fontsize=18, y=0.95) # set number of columns (use 3 to demonstrate the change) ncols = 3 # calculate number of rows nrows = len(tickers) // ncols + (len(tickers) % ncols > 0) # loop through the length of tickers and keep track of index for n, ticker in enumerate(tickers): # add a new subplot iteratively using nrows and cols ax = plt.subplot(nrows, ncols, n + 1) # filter df and plot ticker on the new subplot axis df[df["ticker"] == ticker].plot(ax=ax) # chart formatting ax.set_title(ticker.upper()) ax.get_legend().remove() ax.set_xlabel("")ResNet-50 comparison: compiled vs uncompiled, featuring InferentiaIn this notebook we will see how to deploy a pretrained model from the PyTorch Vision library, in particular a ResNet50, to Amazon SageMaker. We will also test how it performs on different hardware configurations, and the effects of model compilation with Amazon SageMaker Neo. In a nutshell, we will test:- ResNet50 on a ml.c5.xlarge, uncompiled- ResNet50 on a ml.g4dn.xlarge, uncompiled- ResNet50 on a ml.c5.xlarge, compiled- ResNet50 on a ml.g4dn.xlarge, compiled- ResNet50 on a ml.inf1.xlarge, compiled Set-up model and SageMaker helper functionsimport sagemaker from sagemaker import Session, get_execution_role from sagemaker.pytorch.model import PyTorchModel from sagemaker.utils import name_from_base print(sagemaker.__version__) sess = Session() bucket = sess.default_bucket() role = get_execution_role() endpoints = {}2.47.2.post0Let's download the model for the PyTorch Hub, and create an archive that can be used by SageMaker to deploy this model. For using PyTorch in Script Mode, Amazon SageMaker expects a single archive file in `.tar.gz` format, containing a model file and the code for inference in a `code` folder. The structure of the archive will be as follows: ```/model.tar.gz/--- model.pth/--- code//--- /--- inference.py/--- /--- requirements.txt (optional)```By setting the variable `download_the_model=False`, you can skip the download phase and provide your own path to S3 in the `model_data` variable.download_the_model = False if download_the_model: import torch, tarfile # Load the model model = torch.hub.load('pytorch/vision:v0.9.0', 'resnet50', pretrained=True) inp = torch.rand(1, 3, 224, 224) model_trace = torch.jit.trace(model, inp) # Save your model. The following code saves it with the .pth file extension model_trace.save('model.pth') with tarfile.open('model.tar.gz', 'w:gz') as f: f.add('model.pth') f.add('code/uncompiled-inference.py', 'code/inference.py') f.close() pytorch_resnet50_prefix = 'pytorch/resnet50' model_data = sess.upload_data('model.tar.gz', bucket, pytorch_resnet50_prefix) else: pytorch_resnet50_prefix = 'pytorch/resnet50' model_data = f's3://{bucket}/{pytorch_resnet50_prefix}/model.tar.gz' print(f'Model stored in {model_data}')Model stored in s3://sagemaker-us-east-1-859755744029/pytorch/resnet50/model.tar.gzDeploy and test on CPUIn our first test, we will deploy the model on a `ml.c5.xlarge` instance, without compiling the model. Although this is a CNN, it is still possible to run it on CPU, although the performances won't be that good. This can give us a nice baseline of the performances of our model.pth_model = PyTorchModel(model_data=model_data, entry_point='uncompiled-inference.py', source_dir='code', role=role, framework_version='1.7', py_version='py3' ) predictor = pth_model.deploy(1, 'ml.c5.xlarge') endpoints['cpu_uncompiled'] = predictor.endpoint_name predictor.endpoint_nameDeploy and test on GPUThe instance chosen this time is a `ml.g4dn.xlarge`. It has great throughput and the cheapest way of running GPU inferences on the AWS cloud.pth_model = PyTorchModel(model_data=model_data, entry_point='uncompiled-inference.py', source_dir='code', role=role, framework_version='1.6', py_version='py3' ) predictor = pth_model.deploy(1, 'ml.g4dn.xlarge') endpoints['gpu_uncompiled'] = predictor.endpoint_name predictor.endpoint_nameCompiled ModelsA common tactic in more advanced use cases is to improve model performances, in terms of latency and throughput, by compiling the model. Amazon SageMaker features its own compiler, Amazon SageMaker Neo, that enables data scientists to optimize ML models for inference on SageMaker in the cloud and supported devices at the edge.You start with a machine learning model already built with DarkNet, Keras, MXNet, PyTorch, TensorFlow, TensorFlow-Lite, ONNX, or XGBoost and trained in Amazon SageMaker or anywhere else. Then you choose your target hardware platform, which can be a SageMaker hosting instance or an edge device based on processors from Ambarella, Apple, ARM, Intel, MediaTek, Nvidia, NXP, Qualcomm, RockChip, Texas Instruments, or Xilinx. With a single click, SageMaker Neo optimizes the trained model and compiles it into an executable. For inference in the cloud, SageMaker Neo speeds up inference and saves cost by creating an inference optimized container in SageMaker hosting. For inference at the edge, SageMaker Neo saves developers months of manual tuning by automatically tuning the model for the selected operating system and processor hardware. Create the `model_data` for compilationTo compile the model, we need to provide a `tar.gz` archive just like before, with very few changes. In particular, since SageMaker will use a different DL runtime for running compiled models, we will let it use the default function for serving the model, and only provide a script containing how to preprocess data. Let's create this archive and uplaod it to S3.import tarfile with tarfile.open('model-to-compile.tar.gz', 'w:gz') as f: f.add('model.pth') f.add('code/compiled-inference.py', 'code/inference.py') f.close() model_data = sess.upload_data('model-to-compile.tar.gz', bucket, pytorch_resnet50_prefix) output_path = f's3://{bucket}/{pytorch_resnet50_prefix}/compiled'Compile for CPULet's run the same baseline test from before, and compile and deploy for CPU instances.pth_model = PyTorchModel(model_data=model_data, entry_point='compiled-inference.py', source_dir='code', role=role, framework_version='1.6', py_version='py3' ) output_path = f's3://{bucket}/{pytorch_resnet50_prefix}/compiled' compiled_model = pth_model.compile( target_instance_family='ml_c5', input_shape={"input0": [1, 3, 224, 224]}, output_path=output_path, role=role, job_name=name_from_base(f'pytorch-resnet50-c5') ) predictor = compiled_model.deploy(1, 'ml.c5.xlarge') endpoints['cpu_compiled'] = predictor.endpoint_name predictor.endpoint_nameCompile for GPUpth_model = PyTorchModel(model_data=model_data, entry_point='compiled-inference.py', source_dir='code', role=role, framework_version='1.6', py_version='py3' ) output_path = f's3://{bucket}/{pytorch_resnet50_prefix}/compiled' compiled_model = pth_model.compile( target_instance_family='ml_g4dn', input_shape={"input0": [1, 3, 224, 224]}, output_path=output_path, role=role, job_name=name_from_base(f'pytorch-resnet50-g4dn') ) predictor = compiled_model.deploy(1, 'ml.g4dn.xlarge') endpoints['gpu_compiled'] = predictor.endpoint_name predictor.endpoint_name print(1)1Compile for Inferentia instancesThere is one more thing we can try to improve our model performances: using Inferentia instances.Amazon EC2 Inf1 instances deliver high-performance ML inference at the lowest cost in the cloud. They deliver up to 2.3x higher throughput and up to 70% lower cost per inference than comparable current generation GPU-based Amazon EC2 instances. Inf1 instances are built from the ground up to support machine learning inference applications. They feature up to 16 AWS Inferentia chips, high-performance machine learning inference chips designed and built by AWS.pth_model = PyTorchModel( model_data=model_data, entry_point='compiled-inference.py', source_dir='code', role=role, framework_version='1.7', py_version='py3' ) compiled_model = pth_model.compile( target_instance_family='ml_inf1', input_shape={"input0": [1, 3, 224, 224]}, output_path=output_path, role=role, job_name=name_from_base(f'pytorch-resnet50-inf1'), compile_max_run=1000 ) predictor = compiled_model.deploy(1, 'ml.inf1.xlarge') endpoints['inferentia'] = predictor.endpoint_name predictor.endpoint_nameTest For testing our models and endpoints, we will use the following picture of a beagle pup, freely available on [Wikimedia](https://commons.wikimedia.org/wiki/File:Beagle_puppy_sitting_on_grass.jpg). We will pass it to our endpoints as `application/x-image`, and no particular pre-processing is needed on client-side.from IPython.display import Image Image('doggo.jpg')Finally, we will use the following function to benchmark our SageMaker endpoints, measuring the latency of our predictions. This specific version uses both the `Predictor` from the SageMaker Python SDK and boto3's `invoke_endpoint()` function - just change the last parameter from `boto3` to `sm` if you want to use the Python SDK.from load_test import load_tester num_thread = 10 from IPython.display import JSON JSON(endpoints) # CPU - Uncompiled load_tester(num_thread, endpoints['cpu_uncompiled'], 'doggo.jpg', 'boto3')Using boto3 for requests. TPS | P50 | P90 | P95 | P99 | err 0.000|0.00000|0.00000|0.00000|0.00000 | 0 0.000|0.00000|0.00000|0.00000|0.00000 | 0 0.000|0.00000|0.00000|0.00000|0.00000 | 0 0.000|0.00000|0.00000|0.00000|0.00000 | 0 0.000|0.00000|0.00000|0.00000|0.00000 | 0 0.000|0.00000|0.00000|0.00000|0.00000 | 0 0.000|0.00000|0.00000|0.00000|0.00000 | 0 0.000|0.00000|0.00000|0.00000|0.00000 | 0 0.000|0.00000|0.00000|0.00000|0.00000 | 0 0.000|0.00000|0.00000|0.00000|0.00000 | 0 0.000|0.00000|0.00000|0.00000|0.00000 | 0 0.000|0.00000|0.00000|0.00000|0.00000 | 0 0.000|0.00000|0.00000|0.00000|0.00000 | 0 0.000|0.00000|0.00000|0.00000|0.00000 | 0 0.000|0.00000|0.00000|0.00000|0.00000 | 0 0.000|0.00000|0.00000|0.00000|0.00000 | 0 0.000|0.00000|0.00000|0.00000|0.00000 | 0 0.000|0.00000|0.00000|0.00000|0.00000 | 0 4.100|154.09832|170.55326|171.40527|179.54471 | 0 5.900|167.96876|171.36371|171.82379|185.01005 | 0Let's discuss the above results:as we can from the latency tests, the model way too long to generate an inference, averaging about 6 transactions per second (TPS). This of course may be sufficient for some low throughput use cases, rarely used instances, but it's very likely that it won't be sufficient. Let's see how those numbers change when using a GPU.# GPU - Uncompiled load_tester(num_thread, endpoints['gpu_uncompiled'], 'doggo.jpg', 'boto3')Using boto3 for requests. TPS | P50 | P90 | P95 | P99 | err 0.000|0.00000|0.00000|0.00000|0.00000 | 0 0.000|0.00000|0.00000|0.00000|0.00000 | 0 0.000|0.00000|0.00000|0.00000|0.00000 | 0 71.900|11.80012|14.37088|15.16426|71.54423 | 0 74.200|11.80911|14.11828|15.55170|80.74821 | 0 73.600|11.86891|13.91335|14.91341|74.74922 | 0 75.700|11.81529|13.47131|14.11105|79.58128 | 0 75.500|11.76775|13.63597|14.26841|65.81278 | 0 73.000|11.74483|13.89458|15.24351|75.62139 | 0 75.300|11.57395|13.56684|14.60281|95.03487 | 0 74.100|11.69744|13.78759|16.07176|77.24368 | 0 75.100|11.82399|13.74409|16.19095|77.27810 | 0 76.300|11.68339|13.35336|15.39730|82.90827 | 0 76.400|12.08593|13.35388|14.11228|70.84170 | 0 75.900|12.08845|13.37607|14.13612|64.37122 | 0 76.500|11.65937|13.01402|13.38935|73.97340 | 0 76.800|11.45495|13.07725|13.75206|79.20948 | 0 76.200|11.65578|13.37109|14.84504|68.58632 | 0 76.300|11.56737|13.29634|13.75376|84.87934 | 0 76.200|11.7[...]Now we're talking! The GPU helps us achieve 77 TPS on average, with a much lower latency percentile over the board. Nice!# CPU - Compiled load_tester(num_thread, endpoints['cpu_compiled'], 'doggo.jpg', 'boto3')Using boto3 for requests. TPS | P50 | P90 | P95 | P99 | err 0.000|0.00000|0.00000|0.00000|0.00000 | 0 0.000|0.00000|0.00000|0.00000|0.00000 | 0 0.000|0.00000|0.00000|0.00000|0.00000 | 0 0.000|0.00000|0.00000|0.00000|0.00000 | 0 0.000|0.00000|0.00000|0.00000|0.00000 | 0 0.000|0.00000|0.00000|0.00000|0.00000 | 0 0.000|0.00000|0.00000|0.00000|0.00000 | 0 0.000|0.00000|0.00000|0.00000|0.00000 | 0 1.400|27.34789|43.17109|45.49675|46.66742 | 0 13.200|75.15947|77.39340|77.70184|78.02003 | 0 13.100|75.52688|77.68237|78.34686|79.13477 | 0 13.300|75.38521|77.44209|77.88107|79.08387 | 0 13.200|75.31815|77.52130|78.14982|79.18169 | 0 13.200|75.35261|77.46928|78.06024|79.13503 | 0 13.200|75.35843|77.41700|77.87167|79.08827 | 0 13.200|75.29122|77.43950|78.07704|79.71710 | 0 13.100|75.37787|77.48869|78.26608|81.16619 | 0 13.200|75.24554|77.36055|78.05424|81.42112 | 0 13.200|75.21513|77.36815|78.14131|81.42112 | 0 13.200|75.25020|77.41456|78.15042|81.[...]With a simple compilation job, we more than DOUBLED the performances of our model on our c5 instance, achieving 13 TPS and half the latency percentiles. Let's see if the same results can be seen on GPU.# GPU - Compiled load_tester(num_thread, endpoints['gpu_compiled'], 'doggo.jpg', 'boto3')Using boto3 for requests. TPS | P50 | P90 | P95 | P99 | err 0.000|0.00000|0.00000|0.00000|0.00000 | 0 0.000|0.00000|0.00000|0.00000|0.00000 | 0 127.200|6.18587|7.55885|7.92916|46.42042 | 0 134.800|6.25129|7.63310|8.23157|43.91295 | 0 140.900|6.20703|7.22852|7.78228|53.26434 | 0 138.300|6.19562|6.83635|7.07245|58.19304 | 0 142.400|6.08956|6.92130|7.23731|46.92067 | 0 142.500|6.20135|7.51865|8.06668|51.33103 | 0 143.200|6.24374|6.97204|7.34329|30.09887 | 0 144.000|6.15883|6.95482|7.13942|48.93987 | 0 143.200|6.18212|6.93569|7.06721|47.13597 | 0 144.300|5.55439|6.83386|6.90111|56.21996 | 0 144.400|6.20829|6.94591|7.13983|9.06043 | 0 143.700|6.07319|6.87563|7.07919|73.52560 | 0 144.200|6.09211|6.83380|6.92895|49.54501 | 0 144.700|5.55294|6.85225|6.91850|63.36337 | 0 144.300|6.19800|6.91495|7.19666|29.78611 | 0 144.400|5.77911|6.87983|7.04087|61.14207 | 0 144.600|6.13239|6.87113|6.99401|52.21058 | 0 144.400|5.83435|6.88874|7.07376|53.57051 [...]Results are also consistent in the compiled version of the model on GPU. Almost double the TPS, with 7 ms latency. Let's take it one step further and test Inferentia.# Inferentia load_tester(num_thread, endpoints['inferentia'], 'doggo.jpg', 'boto3')Using boto3 for requests. TPS | P50 | P90 | P95 | P99 | err 0.000|0.00000|0.00000|0.00000|0.00000 | 0 0.000|0.00000|0.00000|0.00000|0.00000 | 0 121.200|4.15289|5.93244|6.44113|7.80767 | 0 227.400|3.49829|4.67301|5.47904|8.46626 | 0 220.600|3.69167|5.06214|5.70987|7.27710 | 0 259.000|3.41871|4.28656|4.66167|8.99143 | 0 259.000|3.41253|4.19616|4.50734|5.42752 | 0 247.200|3.53762|4.24550|4.48812|5.24465 | 0 270.800|3.35592|4.15566|4.44887|5.29431 | 0 274.700|3.45520|4.20523|4.41165|4.96067 | 0 260.300|3.36056|4.15711|4.68477|9.28876 | 0 264.000|3.30304|4.11524|4.47264|9.47070 | 0 252.500|3.27674|3.91348|4.23365|5.38313 | 0 266.600|3.23882|3.94166|4.29280|5.42367 | 0 277.300|3.34935|4.16940|4.50558|5.50479 | 0 277.100|3.38901|4.08751|4.33727|4.99565 | 0 280.400|3.35106|4.01841|4.37606|5.20386 | 0 275.700|3.27066|3.95396|4.20007|4.99382 | 0 281.400|3.34144|4.08846|4.33117|4.84749 | 0 275.700|3.26554|3.98474|4.24265|4.85687 | 0The best results so far! Up to 290 TPS at the same latency percentiles than GPU, or lower. All of this for a fraction of the cost. Reviewing the resultsLet's plot the results obtained from the previous tests, by taking into account the last printed line of each load testing. We will ignore the error rate, and take into account the TPS obtained with our tests dividing it by the cost of the machine in the region of testing (DUB) - a metric we will call "throughput per dollar".import matplotlib.pyplot as plt from io import StringIO import pandas as pd data = StringIO(''' experiment|TPS|p50|cost/hour c5|5.900|167.96876|0.23 c5 + Neo|13.200|75.25020|0.23 g4dn|76.200|11.75612|0.821 g4dn + Neo|144.400|5.83435|0.821 inf1|275.700|3.26554|0.33 ''') df = pd.read_csv(data, sep='|') df['throughput per dollar'] = df['TPS']/df['cost/hour'] ax = df.plot(x="experiment", y="TPS", legend=False, linewidth=3) df.plot(x="experiment", y="p50", ax=ax, legend=False, color="r", linewidth=3) df.plot(x="experiment", y="throughput per dollar", ax=ax, legend=False, color="g", linewidth=3) for x in df.experiment: plt.axvline(x=x, ls='--', color='black', linewidth=1) ax.figure.legend() plt.rcParams["figure.figsize"] = (8,5) plt.show()The results speak for themselves. The `inf1` instance proves extremely competitive, thanks to its high TPS, incredibly low latency and undeniable value. Compiling the model for a GPU just like the `g4dn` is also a very interesting approach. Clean-upfor endpoint in endpoints: pred = sagemaker.predictor.Predictor(endpoint_name=endpoints[endpoint]) pred.delete_endpoint()Give me stuff by this identifierdef get_by_id(id_: str): rv = requests.get(f"{BASE_URL}/nmdcschema/ids/{id_}") return rv.json() doc = get_by_id("gold:Gs0135149") print(doc["type"]) print(doc["name"]) print( [(ca["applies_to_person"]["name"], ca["applies_to_person"]["orcid"]) for ca in doc["has_credit_associations"] ]) doc = get_by_id("igsn:IEWFS000Q") print(doc["type"]) print(doc["name"]) print(doc["lat_lon"]) print(doc["part_of"])nmdc:Biosample Bulk soil microbial communities from the East River watershed near Crested Butte, Colorado, United States - ER_153 {'has_raw_value': '38.920576363 -106.948748019', 'latitude': 38.920576363, 'longitude': -106.948748019} ['gold:Gs0135149']Give me all the MetaG/MAGS/metaT from Study ABCimport json def get( collection: str, filter_: dict, max_page_size=20, page_token=None, limit=0 ): params = { "filter": json.dumps(filter_), "max_page_size": max_page_size, } getmore = True results = [] while getmore: response = requests.get( f"{BASE_URL}/nmdcschema/{collection}", params=params ).json() if "resources" not in response: return response results.extend(response["resources"]) if limit and len(results) > limit: getmore = False elif response.get("next_page_token"): print(f"Got {len(results)} so far. Getting more...") params["page_token"] = response["next_page_token"] else: getmore = False print(f"Got {len(results)}") return results get("lollipops", {}) biosamples = get( "biosample_set", {"part_of": "gold:Gs0135149"}, max_page_size=100 ) def ids_of(documents): return [d["id"] for d in documents] processings = get( "omics_processing_set", {"has_input": {"$in": ids_of(biosamples)}}, max_page_size=100 ) """ Request is too large with 831 IDs, so breaking it up here. """ docs = [] for i in range(0, 1000, 100): ids = ids_of(processings)[i:i+100] docs.extend(get( "metatranscriptome_activity_set", {"was_informed_by": {"$in": ids}}, max_page_size=100)) len(docs) def get_turbo( collection: str, filter_: dict, max_page_size=1000, limit=0, authorization=None ): json_in = { "find": collection, "filter": filter_, "limit": limit or max_page_size, "sort": {"id": 1} } getmore = True results = [] headers = {"Authorization": authorization} if authorization else None while getmore: response = requests.post( f"{BASE_URL}/queries:run", json=json_in, headers=headers ).json() if not response.get("ok"): return response batch = response["cursor"]["firstBatch"] for d in batch: d.pop("_id") results.extend(batch) if limit and len(results) > limit: getmore = False elif len(batch) == max_page_size: print(f"Got {len(results)} so far. Getting more...") json_in["find"]["_id"] = {"$gt": str(batch[-1]["_id"])} else: getmore = False print(f"Got {len(results)}") return results def get_authorization(username, password): rv = requests.post( f"{BASE_URL}/token", data={ "grant_type": "password", "username": username, "password": password, }) if not str(rv.status_code).startswith("2"): raise Exception(rv.text) response = rv.json() print("Expires", response["expires"]) return f'Bearer {response["access_token"]}' authorization = get_authorization("", "") docs = get_turbo( "metatranscriptome_activity_set", {"was_informed_by": {"$in": ids_of(processings)}}, authorization=authorization ) docs = get_turbo( "metagenome_assembly_set", {"was_informed_by": {"$in": ids_of(processings)}}, authorization=authorization ) docs = get_turbo( "metagenome_annotation_activity_set", {"was_informed_by": {"$in": ids_of(processings)}}, authorization=authorization ) docs = get_turbo( "nom_analysis_activity_set", {"was_informed_by": {"$in": ids_of(processings)}}, authorization=authorization )Got 101A simple one-dimensional regression example computed in two different ways:1. A noise-free case2. A noisy case with known noise-level per datapointIn both cases, the kernel’s parameters are estimated using the maximum likelihood principle.The figures illustrate the interpolating property of the Gaussian Process model as well as its probabilistic nature in the form of a pointwise 95% confidence interval.Note that the parameter alpha is applied as a Tikhonov regularization of the assumed covariance between the training points. New to Plotly?Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/).You can set up Plotly to work in [online](https://plot.ly/python/getting-started/initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/start-plotting-online).We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started! Versionimport sklearn sklearn.__version__Imports This tutorial imports [GaussianProcessRegressor](http://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.GaussianProcessRegressor.htmlsklearn.gaussian_process.GaussianProcessRegressor) and [RBF](http://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.kernels.RBF.htmlsklearn.gaussian_process.kernels.RBF).import plotly.plotly as py import plotly.graph_objs as go import numpy as np from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels import RBF, ConstantKernel as CCalculationsnp.random.seed(1) def f(x): """The function to predict.""" return x * np.sin(x) def data_to_plotly(x): k = [] for i in range(0, len(x)): k.append(x[i][0]) return kThe Noiseless CaseX = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T # Observations y = f(X).ravel() # Mesh the input space for evaluations of the real function, the prediction and # its MSE x = np.atleast_2d(np.linspace(0, 10, 1000)).T # Instanciate a Gaussian Process model kernel = C(1.0, (1e-3, 1e3)) * RBF(10, (1e-2, 1e2)) gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9) # Fit to data using Maximum Likelihood Estimation of the parameters gp.fit(X, y) # Make the prediction on the meshed x-axis (ask for MSE as well) y_pred, sigma = gp.predict(x, return_std=True)Plot the function, the prediction and the 95% confidence interval based on the MSEp1 = go.Scatter(x=data_to_plotly(x), y=data_to_plotly(f(x)), mode='lines', line=dict(color='red', dash='dot'), name=u'f(x) = xsin(x)') p2 = go.Scatter(x=data_to_plotly(X), y=y, mode='markers', marker=dict(color='red'), name=u'Observations') p3 = go.Scatter(x=data_to_plotly(x), y=y_pred, mode='lines', line=dict(color='blue'), name=u'Prediction', ) p4 = go.Scatter(x=data_to_plotly(np.concatenate([x, x[::-1]])), y=np.concatenate([y_pred - 1.9600 * sigma,]), mode='lines', line=dict(color='blue'), fill='tonexty', name='95% confidence interval') data = [p3, p4, p1, p2] layout = go.Layout(xaxis=dict(title='x'), yaxis=dict(title='f(x)'), ) fig = go.Figure(data=data, layout=layout) py.iplot(fig)The Noisy CaseX = np.linspace(0.1, 9.9, 20) X = np.atleast_2d(X).T # Observations and noise y = f(X).ravel() dy = 0.5 + 1.0 * np.random.random(y.shape) noise = np.random.normal(0, dy) y += noise # Instanciate a Gaussian Process model gp = GaussianProcessRegressor(kernel=kernel, alpha=(dy / y) ** 2, n_restarts_optimizer=10) # Fit to data using Maximum Likelihood Estimation of the parameters gp.fit(X, y) # Make the prediction on the meshed x-axis (ask for MSE as well) y_pred, sigma = gp.predict(x, return_std=True)Plot the function, the prediction and the 95% confidence interval based on the MSEp1 = go.Scatter(x=data_to_plotly(x), y=data_to_plotly(f(x)), mode='lines', line=dict(color='red', dash='dot'), name=u'f(x) = xsin(x)') p2 = go.Scatter(x=X.ravel(), y=y, mode='markers', marker=dict(color='red'), error_y=dict(visible=True, arrayminus=dy), name=u'Observations') p3 = go.Scatter(x=data_to_plotly(x), y=y_pred, mode='lines', line=dict(color='blue'), name=u'Prediction', ) p4 = go.Scatter(x=data_to_plotly(np.concatenate([x, x[::-1]])), y=np.concatenate([y_pred - 1.9600 * sigma,]), mode='lines', line=dict(color='blue'), fill='tonexty', name='95% confidence interval') data = [p3, p4, p1, p2] layout = go.Layout(xaxis=dict(title='x'), yaxis=dict(title='f(x)'), ) fig = go.Figure(data=data, layout=layout) py.iplot(fig)LicenseAuthor: License: BSD 3 clausefrom IPython.display import display, HTML display(HTML('')) display(HTML('')) ! pip install git+https://github.com/plotly/publisher.git --upgrade import publisher publisher.publish( 'Gaussian Processes Regression Basic Introductory Example.ipynb', 'scikit-learn/plot-gpr-noisy-targets/', 'Gaussian Processes Regression Basic Introductory Example | plotly', ' ', title = 'Gaussian Processes Regression Basic Introductory Example | plotly', name = 'Gaussian Processes Regression Basic Introductory Example', has_thumbnail='true', thumbnail='thumbnail/gpr-noisy.jpg', language='scikit-learn', page_type='example_index', display_as='gaussian-process', order=8, ipynb= '~Diksha_Gabha/3161')TODOs - Add factory class which prepares object for XGBoost and LightGBM.- Add example for classification example. Local Feature Importance> Give GBDT model ( XGBoost or LightGBM ) and a specific instance this class would calculate the relationship of different features with a specific instance.#hide from nbdev.showdoc import * #export class XGBoostLFI: "Wrapper around for `XGBoost` models" def __init__(self, model): self.model = model def get_tree(self, trees, tree_index): "Return tree for a specific index." mask = trees.Tree == tree_index return trees.loc[mask] def get_booster(self): "Returns booster." return self.model.get_booster() def get_num_trees(self, trees_df): "Returns number of number of estimators." return trees_df.Tree.nunique() def get_node(self, tree, node_id): "Returns a particular node in a tree." mask = tree.ID == node_id return tree.loc[mask] def get_node_index(self, node, branch): "Returns index of a particular node in a tree." if branch == 'left': return node['Yes'].values[0] else: return node['No'].values[0] def get_node_id(self, node): "Returns id of a particular node in a tree. It is different from a node-index." return node['ID'].values[0] def next_node(self, tree, curr_node, branch): "Returns next node from a current node based on which sub-branch one wants to navigate." if branch == 'left': return self.get_node(tree, self.get_node_index(curr_node, branch)) else: return self.get_node(tree, self.get_node_index(curr_node, branch)) def get_split(self, node): "Returns split value" return node['Split'].values[0] def get_feature(self, node): "Returns feature that was used to make the split." return node['Feature'].values[0] def node_score(self, node): "Returns gain for a particular decision node." return node['Gain'].values[0] def node_parent_score(self, node): "Returns parent score for a particular node" return node['Parent_Score'].values[0] def node_cover(self, node): "Returns how many samples are there in a terminal node." return node['Cover'].values[0] def average(self, left_node, right_node): "Returns average of scores of children of a node." return (left_node['score'] + right_node['score']) / 2 def weighted_sum(self, left_node, right_node): "Returns weighted average of children of a node." return ((left_node['cover'] * left_node['score'] + right_node['cover'] * right_node['score'])) /\ (left_node['cover'] + right_node['cover']) def propagation_strategy(self, left_node, right_node, strategy='average'): "Returns parent score from left and right children. It is based on propagation strategy." if strategy == 'average': return self.average(left_node, right_node) else: return self.weighted_sum(left_node, right_node) #export class LocalFeatureImportance: "Calculates `Feature Importance` and provides explanability. It implements (http://www.cs.sjtu.edu.cn/~kzhu/papers/kzhu-infocode.pdf)" def __init__(self, model, strategy='average'): self.strategy = strategy model_category = self.get_model_category(self.get_model_type(model)) if model_category == 'xgboost': self.model = XGBoostLFI(model) def trees_to_df(self): "Convert internal tree reprensentation to a Pandas DataFrame" if self.get_model_type(self.model) == xgb.core.Booster: return self.model.trees_to_dataframe() else: return self.model.get_booster().trees_to_dataframe() def get_model_type(self, model): return type(model) def get_model_category(self, model_type): if model_type in [xgb.core.Booster, xgb.sklearn.XGBRegressor, xgb.sklearn.XGBClassifier]: return 'xgboost' else: raise ValueError(f'{model_type} is not supported.') def make_node(self, node): "Create dict representation of a node which contains score and cover based on strategy." if self.strategy == 'average': return {'score': self.model.node_parent_score(node)} else: return {'score': self.model.node_parent_score(node), 'cover': self.model.node_cover(node)} def parse(self, tree, node_id): "Calculates and assigns scores for a particular node in the tree." current_node = self.model.get_node(tree, node_id) if self.model.get_feature(current_node) == 'Leaf': return self.model.node_score(current_node) left_child = self.model.next_node(tree, current_node, branch='left') right_child = self.model.next_node(tree, current_node, branch='right') left_branch_score = self.parse(tree, self.model.get_node_id(left_child)) tree.loc[tree.ID == left_child.ID.values[0], 'Parent_Score'] = left_branch_score right_branch_score = self.parse(tree, self.model.get_node_id(right_child)) tree.loc[tree.ID == right_child.ID.values[0], 'Parent_Score'] = right_branch_score root_score = self.model.propagation_strategy(self.make_node(self.model.get_node(tree, self.model.get_node_index(current_node, branch='left') )), self.make_node(self.model.get_node(tree, self.model.get_node_index(current_node, branch='right') ))) tree.loc[tree.ID == current_node.ID.values[0], 'Parent_Score'] = root_score return root_score def propagate_scores(self): "Parse and calculates scores for all nodes for all trees." trees_df = self.trees_to_df() parsed_trees = [] num_trees = self.model.get_num_trees(trees_df) for tree_index in range(num_trees): tree = self.model.get_tree(trees_df, tree_index) self.parse(tree, f'{tree_index}-0') parsed_trees.append(tree) return pd.concat(parsed_trees, axis=0) def split_decision(self, feature, split_value): "How to decide whether to go left or right in a tree." if pd.isnull(feature): return True elif feature <= split_value: return True else: return False def walk(self, tree, node_id, test, feature_contribution): "Walks through the tree for a specific instance and returns relationship between features and instance." current_node = self.model.get_node(tree, node_id) feature = self.model.get_feature(current_node) if feature == 'Leaf': return if self.split_decision(test[feature].values[0], self.model.get_split(current_node)): left_child = self.model.next_node(tree, current_node, branch='left') t = self.model.node_parent_score(left_child) - self.model.node_parent_score(current_node) feature_contribution[feature].append(t) self.walk(tree, self.model.get_node_id(left_child), test, feature_contribution) else: right_child = self.model.next_node(tree, current_node, branch='right') t = self.model.node_parent_score(right_child) - self.model.node_parent_score(current_node) feature_contribution[feature].append(t) self.walk(tree, self.model.get_node_id(right_child), test, feature_contribution) return feature_contribution def index_to_feat(self, feats): "Maps indices to features." return {i:f for i, f in enumerate(feats)} def get_fi(self, trees, x_test): "Calculates feature importance by aggregating over all samples over all trees." itof = self.index_to_feat(x_test.columns.tolist()) fc_over_sample = np.zeros(shape=(x_test.shape[1], x_test.shape[0])) for index in range(x_test.shape[0]): num_trees = self.model.get_num_trees(trees) contribution = defaultdict(int) for tree_index in range(num_trees): feature_contribution = defaultdict(list) tree = self.model.get_tree(trees, tree_index) fc = self.walk(tree, f'{tree_index}-0', x_test[index:index+1], feature_contribution) for f, l in fc.items(): contribution[f] += (np.sum(l) / num_trees) fc_over_sample[:, index] = np.array([contribution[itof[i]] if itof[i] in contribution else 0 for i in range(x_test.shape[1])]) fc_overall = np.apply_along_axis(np.median, axis=1, arr=fc_over_sample) return pd.DataFrame({'feature': x_test.columns.tolist(), 'importance': fc_overall})Usagex,y = make_regression(n_samples=1000,n_features=6,n_informative=3) xtr, xval, ytr, yval = train_test_split(x, y, test_size=0.5, random_state=41) model = xgb.XGBRegressor(objective='reg:squarederror', n_estimators=10, max_depth=4) model.fit(xtr,ytr) fig, ax = plt.subplots(1, figsize=(16, 10)) xgb.plot_importance(model, importance_type='gain', ax=ax); lfi = LocalFeatureImportance(model) scores = lfi.propagate_scores() scores.head()**Sample instance**si = pd.DataFrame(xval[5:6, :], columns=['f0', 'f1', 'f2', 'f3', 'f4', 'f5']); si model.predict(si) fc = lfi.get_fi(scores, si) fcCompare with global feature importancex,y = make_regression(n_samples=1000,n_features=6,n_informative=3) xtr, xval, ytr, yval = train_test_split(x, y, test_size=0.5, random_state=41) model = xgb.XGBRegressor(objective='reg:squarederror', n_estimators=50, max_depth=2) model.fit(xtr,ytr) fig, ax = plt.subplots(1, figsize=(16, 10)) xgb.plot_importance(model, importance_type='gain', ax=ax); lfi = LocalFeatureImportance(model) scores = lfi.propagate_scores() fc = lfi.get_fi(scores, pd.DataFrame(xval, columns=[f'f{i}' for i in range(6)])) fig, ax = plt.subplots(1, figsize=(16, 10)) fc.reindex(fc.importance.abs().sort_values(ascending=True).index).set_index('feature')['importance']\ .map(np.abs).plot(kind='barh', ax=ax) plt.ylabel('Feature Contribution');使用随即森林填补缺失值dataset = load_boston() dataset.data.shape #总共506*13=6578个数据 X_full, y_full = dataset.data, dataset.target n_samples = X_full.shape[0] n_features = X_full.shape[1]添加缺失值#首先确定我们希望放入的缺失数据的比例,在这里我们假设是50%,那总共就要有3289个数据缺失 rng = np.random.RandomState(0) missing_rate = 0.5 n_missing_samples = int(np.floor(n_samples * n_features * missing_rate)) #np.floor向下取整,返回.0格式的浮点数 #所有数据要随机遍布在数据集的各行各列当中,而一个缺失的数据会需要一个行索引和一个列索引 #如果能够创造一个数组,包含3289个分布在0~506中间的行索引,和3289个分布在0~13之间的列索引,那我们就可 #以利用索引来为数据中的任意3289个位置赋空值 #然后我们用0,均值和随机森林来填写这些缺失值,然后查看回归的结果如何 missing_features = rng.randint(0,n_features,n_missing_samples) missing_samples = rng.randint(0,n_samples,n_missing_samples) #missing_samples = rng.choice(dataset.data.shape[0],n_missing_samples,replace=False) #我们现在采样了3289个数据,远远超过我们的样本量506,所以我们使用随机抽取的函数randint。但如果我们需要 #的数据量小于我们的样本量506,那我们可以采用np.random.choice来抽样,choice会随机抽取不重复的随机数, #因此可以帮助我们让数据更加分散,确保数据不会集中在一些行中 X_missing = X_full.copy() y_missing = y_full.copy() X_missing[missing_samples,missing_features] = np.nan X_missing = pd.DataFrame(X_missing) #转换成DataFrame是为了后续方便各种操作,numpy对矩阵的运算速度快到拯救人生,但是在索引等功能上却不如pandas使用0和均值填充#使用均值进行填补 from sklearn.impute import SimpleImputer imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean') X_missing_mean = imp_mean.fit_transform(X_missing) #使用0进行填补 imp_0 = SimpleImputer(missing_values=np.nan, strategy="constant",fill_value=0) X_missing_0 = imp_0.fit_transform(X_missing)使用随即森林填充缺失值""" 使用随机森林回归填补缺失值 任何回归都是从特征矩阵中学习,然后求解连续型标签y的过程,之所以能够实现这个过程,是因为回归算法认为,特征 矩阵和标签之前存在着某种联系。实际上,标签和特征是可以相互转换的,比如说,在一个“用地区,环境,附近学校数 量”预测“房价”的问题中,我们既可以用“地区”,“环境”,“附近学校数量”的数据来预测“房价”,也可以反过来, 用“环境”,“附近学校数量”和“房价”来预测“地区”。而回归填补缺失值,正是利用了这种思想。 对于一个有n个特征的数据来说,其中特征T有缺失值,我们就把特征T当作标签,其他的n-1个特征和原本的标签组成新 的特征矩阵。那对于T来说,它没有缺失的部分,就是我们的Y_test,这部分数据既有标签也有特征,而它缺失的部分,只有特征没有标签,就是我们需要预测的部分。 特征T不缺失的值对应的其他n-1个特征 + 本来的标签:X_train 特征T不缺失的值:Y_train 特征T缺失的值对应的其他n-1个特征 + 本来的标签:X_test 特征T缺失的值:未知,我们需要预测的Y_test 这种做法,对于某一个特征大量缺失,其他特征却很完整的情况,非常适用。 那如果数据中除了特征T之外,其他特征也有缺失值怎么办? 答案是遍历所有的特征,从缺失最少的开始进行填补(因为填补缺失最少的特征所需要的准确信息最少)。 填补一个特征时,先将其他特征的缺失值用0代替,每完成一次回归预测,就将预测值放到原本的特征矩阵中,再继续填 补下一个特征。每一次填补完毕,有缺失值的特征会减少一个,所以每次循环后,需要用0来填补的特征就越来越少。当 进行到最后一个特征时(这个特征应该是所有特征中缺失值最多的),已经没有任何的其他特征需要用0来进行填补了, 而我们已经使用回归为其他特征填补了大量有效信息,可以用来填补缺失最多的特征。 遍历所有的特征后,数据就完整,不再有缺失值了。 """ X_missing_reg = X_missing.copy() # 找出数据集中缺失值最多的从小到大的排序 sortindex = np.argsort(X_missing_reg.isnull().sum(axis=0)).values for i in sortindex: #构建我们的新特征矩阵和新标签 df = X_missing_reg fillc = df.iloc[:,i] df = pd.concat([df.iloc[:,df.columns != i],pd.DataFrame(y_full)],axis=1) #在新特征矩阵中,对含有缺失值的列,进行0的填补 df_0 =SimpleImputer(missing_values=np.nan,strategy='constant',fill_value=0).fit_transform(df) #找出我们的训练集和测试集 Ytrain = fillc[fillc.notnull()] Ytest = fillc[fillc.isnull()] Xtrain = df_0[Ytrain.index,:] Xtest = df_0[Ytest.index,:] #用随机森林回归来填补缺失值 rfc = RandomForestRegressor(n_estimators=100) rfc = rfc.fit(Xtrain, Ytrain) Ypredict = rfc.predict(Xtest) #将填补好的特征返回到我们的原始的特征矩阵中 X_missing_reg.loc[X_missing_reg.iloc[:,i].isnull(),i] = Ypredict对填补好的数据进行建模#对所有数据进行建模,取得MSE结果 X = [X_full,X_missing_mean,X_missing_0,X_missing_reg] mse = [] std = [] for x in X: estimator = RandomForestRegressor(random_state=0, n_estimators=100) scores = cross_val_score(estimator,x,y_full,scoring='neg_mean_squared_error',cv=5).mean() mse.append(scores * -1) x_labels = ['Full data', 'Zero Imputation', 'Mean Imputation', 'Regressor Imputation'] colors = ['r', 'g', 'b', 'orange'] plt.figure(figsize=(12, 6)) ax = plt.subplot(111) for i in np.arange(len(mse)): ax.barh(i, mse[i],color=colors[i], alpha=0.6, align='center') ax.set_title('Imputation Techniques with Boston Data') ax.set_xlim(left=np.min(mse) * 0.9, right=np.max(mse) * 1.1) ax.set_yticks(np.arange(len(mse))) ax.set_xlabel('MSE') ax.set_yticklabels(x_labels) plt.show()Heating degree days*Heating degree days* (HDD) provide a simple measure of energy use. The assumption is that people start heating their homes once temperatures drop below a certain threshold. By counting the number of such days one can anticipate energy use.This notebook calculates heating degree days from the GFS forecast for a number of different threshold values. Note that we use layer `49173` which contains daily aggregates of the GFS forecast. Consult the data explorer (https://ibmpairs.mybluemix.net/data-explorer) for more details on this dataset.from datetime import datetime, timedelta import re, os, pandas as pd, numpy as np, logging from matplotlib import pyplot as plt from scipy import ndimage from ibmpairs import paw, authenticationWe set some auxiliary variables and configure the logging system. E.g. `iso8601` helps when converting datetime objects to strings in ISO 8601-compliant format.oneSec = timedelta(seconds=1) iso8601 = '%Y-%m-%dT%H:%M:%SZ' logging.basicConfig(level=logging.WARNING) pawLogger = logging.getLogger('ibmpairs.paw') pawLogger.setLevel(logging.ERROR)We use our API key to authenticate. To do so, we generate an authentication object `pairs_credentials`. When using the `paw` library, this can be used directly when instantiating a query via `paw.PAIRSQuery`. However, when submitting an API request directly (via e.g. python's `requests` module), we need to use a suitable HTTP header. We construct this directly as well. See `pairs_auth_headers`.**Please replace `` with your PAIRS API key.**pairs_api_key = ''Specifically, we use `authentication.OAuth2` to retrieve the access token:pairs_credentials = authentication.OAuth2(api_key=pairs_api_key) pairs_auth_headers={ 'authorization' : f'Bearer {pairs_credentials.jwt_token}', 'content-type' : 'application/json' }The token will expire after an hour. This notebook will run in less time, so this is no issue. For longer running jobs use `pairs_credentials.refresh_auth_token()` to refresh. See https://pairs.res.ibm.com/tutorial/tutorials/api/paw.htmlrefreshing-a-token for details. Step 1: Run a point queryWhen working with PAIRS, it is generally good practice to start with a point query. Point queries are fast and create immediate responses, allowing users to ensure that they understand the data they are accessing. In this case we are interested in layer `49173`, which contains daily aggregates of the GFS temperature forecast. The layer has two dimensions, *issuetime* and *horizon*. *horizon* gives the forecast horizon in days, i.e. the time difference between the day the forecast was made and the one it is for, with the convention (for *this dataset only*) that the first day in the forecast has a horizon of 1 instead of 0. The GFS is issued several times a day, we use the 0:00 UTC forecast.(Note that the convention for this dataset are somewhat different from the way most weather forecasts are stored in PAIRS.)As mentioned before, consult the data explorer (https://ibmpairs.mybluemix.net/data-explorer) for details.pointQueryJson = { 'layers' : [ { 'type' : 'raster', 'id' : '49173', 'dimensions' : [{'name' : 'issuetime', 'value' : '0'}, {'name' : 'horizon', 'value' : '16'}] } ], 'spatial' : {'type' : 'point', 'coordinates' : ['40', '-100']}, 'temporal' : {'intervals' : [{ 'start' : datetime(2019, 10, 1).strftime(iso8601), 'end' : datetime(2019, 12, 1).strftime(iso8601) }]} } pointQuery = paw.PAIRSQuery(pointQueryJson, auth=pairs_credentials, authType='api-key') pointQuery.submit() pointQuery.vdf.head()Step 2: Calculating HDDs on the flyIn what follows we calculate heating degree days for the next two weeks using a user defined function (UDF). Here we will implement a simple logic that allows us to find the adjust the threshold value used dynamically. See the tutorials and documentation at https://pairs.res.ibm.com/tutorial for further details on these.Since we want to calculate the HDDs for the next two weeks, we need yesterday's date.utcToday = datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0) - timedelta(days=1)The following functino generates a suitable query JSON to be submitted to PAIRS. Note that it is generally advisable to write functions that write UDF strings instead of writing them by hand.def getQueryJson(today, thresholds = [18]): queryJson = { 'layers' : [ { 'alias' : 'L{0}DA{1}'.format(lID, str(daysAhead).zfill(2)), 'aggregation' : 'Mean', 'type' : 'raster', 'id' : lID, 'dimensions' : [{'name' : 'issuetime', 'value' : '0'}, {'name' : 'horizon', 'value' : str(daysAhead + 1)}], 'temporal' : {'intervals' : [{ 'start' : (today + timedelta(days = daysAhead)).strftime(iso8601), 'end' : (today + timedelta(days = daysAhead)).strftime(iso8601) }]}, 'output' : False } for daysAhead in range(0, 16) for lID in ['49173', '49174']], 'spatial' : {'type' : 'poly', 'aoi' : 24}, 'temporal' : {'intervals' : [{'snapshot' : today.strftime(iso8601)}]}, #'publish' : False } queryJson['layers'].extend([{ 'alias' : 'hdd{0}C'.format(t), 'expression' : '+'.join([ '(($L49173DA{0} + $L49174DA{0}) / 2 < {1} ? 1 : 0)'.format(str(daysAhead).zfill(2), format(t)) for daysAhead in range(0, 16) ]) } for t in thresholds]) return queryJson queryJson = getQueryJson(utcToday, thresholds = [0, 5, 10, 15]) query = paw.PAIRSQuery(queryJson, auth=pairs_credentials, authType='api-key') query.submit() query.poll_till_finished() query.download() query.create_layers()Step 3: ResultsThe following shows a spatial distribution of heating degree days for the next two weeks depending on different threshold temperatures.bBox = query.metadata['Expression-hdd5C[hdd5C]-Exp']['details']['boundingBox'] extent = [bBox['minLongitude'], bBox['maxLongitude'], bBox['minLatitude'], bBox['maxLatitude']] fig, axes = plt.subplots(2, 2, sharex=True, sharey=True, figsize = (24, 12)) axesImage = axes[0, 0].imshow(query.data['Expression-hdd0C[hdd0C]-Exp'], cmap = 'coolwarm', vmin = 0, vmax = 16, extent = extent) axes[0, 1].imshow(query.data['Expression-hdd5C[hdd5C]-Exp'], cmap = 'coolwarm', vmin = 0, vmax = 16, extent = extent) axes[1, 0].imshow(query.data['Expression-hdd10C[hdd10C]-Exp'], cmap = 'coolwarm', vmin = 0, vmax = 16, extent = extent) axes[1, 1].imshow(query.data['Expression-hdd15C[hdd15C]-Exp'], cmap = 'coolwarm', vmin = 0, vmax = 16, extent = extent) axes[0, 0].set_title('Heating degree days 0C') axes[0, 1].set_title('Heating degree days 5C') axes[1, 0].set_title('Heating degree days 10C') axes[1, 1].set_title('Heating degree days 15C') plt.colorbar(axesImage, ax = axes, label = 'Days') plt.savefig('hdd.png', dpi = 160, bbox_inches = 'tight', transparent = True) plt.show()Questão 1Crie um array x com 200 pontos, começando de -10 até 10. Depois calcule as seguintes funções sobre esse array:y1 = sen(x) / xy2 = 1 / (1-e^(-x))Por fim, plot essas duas funções, em função do x, ambas em uma mesma figura, porém, em dois axes distintos (um abaixo do outro).import numpy as np import math import matplotlib.pyplot as plt def f1(x): return math.sin(x) / x def f2(x): return 1 / (1 - math.e ** (-x)) numbers = np.random.uniform(size = 200, low = -10, high = 10) result_y1 = np.array([f1(x) for x in numbers]) result_y2 = np.array([f2(x) for x in numbers]) fig, axs = plt.subplots(2) axs[0].plot(result_y1) axs[1].plot(result_y2)Questão 2Carregue o arquivo avocado.csv e, em seguida, siga as instruções abaixo:(a) Obtenha apenas os dados desse dataset que são referentes à região da Califórnia (California), no ano de 2017; e salva-os em um novo dataframe.(b) A partir do dataframe obtido no item (a), obtenha apenas os dados referentes à abacates do tipo convencional; em seguida, salve em um novo dataframe.(c) Utilizando o dataframe salvo no item (b), crie um gráfico de linha que apresente a variação do preço médio do abacate ao longo do ano de 2017 (datas) na região da Califórnia.(d) Utilizando o dataframe salvo no item (a), crie um gráfico de pizza que apresente o percentual do volume total de abacates vendidos para cada tipo: convencional e orgânico.import pandas as pd import matplotlib.pyplot as plt import matplotlib.dates as mdates df = pd.read_csv('avocado.csv') # (a) avocados_from_california = df.loc[df['region'] == 'California'] # (b) avocados_from_california_conventional = avocados_from_california.loc[df['type'] == 'conventional'] # (c) data = avocados_from_california_conventional.loc[ (avocados_from_california_conventional['year'] == 2017) ] fig, ax = plt.subplots() ax.plot('Date', 'AveragePrice', data=data) ax.xaxis.set_major_locator(mdates.MonthLocator())Questão 3Carregue o dataset contido no arquivo cholera-dataset.csv em um objeto DataFrame. Esse dataset contém dados referentes ao número de casos reportados de cólera em cada país, desde 1950 até 2016, além do número de mortes provocadas pela cólera; dentre outras informações.Após carregar esse dataset, obtenha:(a) Apenas os dados da Índia.(b) Crie um gráfico de barras que apresente o número de casos reportados e de mortes causadas pela cólera na Índia, ambos em um mesmo axes, em função do ano.import pandas as pd import matplotlib.pyplot as plt df = pd.read_csv('cholera-dataset.csv') #A cholera_india = df.loc[df['Country'] == 'India'] #B fig, ax = plt.subplots() ax.bar(cholera_india['Year'], cholera_india['Number of reported cases of cholera'], label = 'Reported cases') ax.bar(cholera_india['Year'], cholera_india['Number of reported deaths from cholera'], label = 'Reported deaths') plt.legend() plt.xlabel('Year') plt.ylabel('Number of cases and deaths') plt.title('Number of cases and deaths in India') plt.show()Questão 4Carregue os dados do arquivo googleplaystore.csv e, em seguida:(a) Crie um gráfico de pizza que apresente o percentual dos gêneros (Genres) dos aplicativos. Como existem muitos gêneros, considere apenas os 10 primeiros.(b) Crie um gráfico de barras que contenha a quantidade de aplicativos que foram desenvolvidos para cada versão específica (ou conjunto de versões) do Android, especificadas na coluna Android Ver.import pandas as pd import matplotlib.pyplot as plt df = pd.read_csv('googleplaystore.csv') #A games_gender = df.groupby(['Genres']).size() games_gender = games_gender.apply(lambda x: x / len(df) * 100) games_gender.sort_values(ascending=False, inplace=True) games_gender = games_gender[:9] games_gender['Others'] = 100 - games_gender.sum() fig, ax = plt.subplots() ax.pie(games_gender) plt.legend(title = 'Genres', labels = games_gender.index, loc='right', bbox_to_anchor=(1.5, 0.5)) plt.show() #B number_versions = df['Android Ver'].value_counts() total = number_versions.sum() number_versions = number_versions.sort_values(ascending= False) # Show only 03 versions with most apps, and the rest stays on column "Others" number_versions = number_versions[:3] number_versions['Others'] = total - number_versions.sum() fig, ax = plt.subplots() ax.bar(number_versions.index, number_versions) plt.show()Questão 5Carregue o arquivo houses_to_rent.csv. Em seguida:(a) Crie um gráfico de dispersão para visualizar uma relação entre a área e o preço total do aluguel do imóvel.(b) Visualize os outliers presentes na coluna da área utilizando o boxplot.(c) Faça o mesmo que foi solicitado no item anterior, porém, para a coluna do preço total do aluguel.(d) Elimine os outliers de ambas as colunas utilizando o método quantile.(e) Crie novamente um gráfico de dispersão dos dados da área e preço total do aluguel do imóvel; dessa vez, após a remoção dos outliers de ambas as colunas.import pandas as pd import matplotlib.pyplot as plt import numpy as np df = pd.read_csv('houses_to_rent.csv') # (a) plt.scatter(df['area'], df['total (R$)']) plt.show() # (b) fig1, ax1 = plt.subplots() ax1.boxplot(df['area'], showfliers=False, vert=False) plt.show() # (c) fig1, ax1 = plt.subplots() ax1.boxplot(df['total (R$)'], showfliers=False, vert=False) plt.show() # (d) e (e) pd_series_adjusted = df['area'][df['area'].between(df['area'].quantile(.1), df['area'].quantile(.9))] fig1, ax1 = plt.subplots() ax1.boxplot(pd_series_adjusted, showfliers=False, vert=False) plt.show() pd_series_adjusted = df['total (R$)'][df['total (R$)'].between(df['total (R$)'].quantile(.1), df['total (R$)'].quantile(.9))] fig1, ax1 = plt.subplots() ax1.boxplot(pd_series_adjusted, showfliers=False, vert=False) plt.show()Data Generation Process The first step in the End2You pipeline is to generate `.hdf5` of the raw input data. The main file that needs to defined is the `input_file.csv` which is a comma separated file with paths of the raw modality information (e.\g. .wav, .mp4 etc.), and the corresponding label, with a header `raw_file,label_file`. The raw file and the corresponding label file, must have the same name. Example of an `input_file.csv`:``` raw_file,label_file/path/to/data/file1.wav,/path/to/labels/file1.csv/path/to/data/file2.wav,/path/to/labels/file2.csv```The label_file is a file containing a header of the form timestamp,label_1,label_2, where timestamp is the time segment of the raw sequence with the corresponding labels (e.g. label_1, label_2,...).```*Label File example - file1.csv*time,label1,label20.00,0.24,0.140.04,0.20,0.18```Let's see how we can create the file if we have the path to the audio raw files and the labels. Create `input_file.csv`import numpy as np from pathlib import Path audio_path = Path('/path/to/raw_files') label_path = Path('/path/to/label/files') audio_files = np.array([str(x) for x in sorted(list(audio_path.glob('*')))]).reshape(-1, 1) label_files = np.array([str(x) for x in sorted(list(label_path.glob('*')))]).reshape(-1, 1) files_array = np.hstack([audio_files, label_files]) path2save_file = Path('/path/to/save/input_file.csv') np.savetxt(str(path2save_file), files_array, delimiter=',', fmt='%s', header='raw_file,label_file')Run generatorfrom end2you.generation_process import GenerationProcess from end2you.utils import ParamsWe use the class `Params` to elegantly define the parameters required for the generation process.The parameters for the generation process are the following:``` - save_data_folder: The batch size to load. - modality : Modality to be used for. Values [`audio`, `visual`, `audiovisual`]. - input_file : Path to input_file.csv. - delimiter : Label file delimiter. - fieldnames : If not provider, it assumes label_files have a header, otherwise provide a header with this parameter. - exclude_cols : If columns needs to be excluded from process. Takes a string with comma separated integers of columns to be excluded (starting from 0) e.g. '0, 2' - exclude first and third columns. - root_dir : Path to save the output files of end2you. - log_file : Name of log file.```Example:generator_params = Params(dict_params={ 'save_data_folder': '/path/to/save/hdf5/files', 'modality': 'audio', 'input_file': '/path/to/input_file.csv', 'exclude_cols': '0', 'delimiter': ';', 'fieldnames': 'file, timestamp, arousal, valence, liking', 'log_file': 'generation.log', 'root_dir': '/path/to/save/output/files/of/end2you' }) generation = GenerationProcess(generator_params) generation.start()Introduction to Diffusion MRI data Diffusion Weighted Imaging (DWI)Diffusion imaging probes the random, microscopic motion of water protons by employing MRI sequences which are sensitive to the geometry and environmental organization surrounding the water protons. This is a popular technique for studying the white matter of the brain. The diffusion within biological structures, such as the brain, are often restricted due to barriers (eg. cell membranes), resulting in a preferred direction of diffusion (anisotropy). A typical diffusion MRI scan will acquire multiple volumes that are sensitive to a particular diffusion direction and result in diffusion-weighted images (DWI). Diffusion that exhibits directionality in the same direction result in an attenuated signal. With further processing (to be discussed later in the lesson), the acquired images can provide measurements which are related to the microscopic changes and estimate white matter trajectories. Images with no diffusion weighting are also acquired as part of the acquisition protocol.![fiber_configurations](DiffusionDirections.png) \Diffusion along X, Y, and Z directions b-values & b-vectorsIn addition to the acquired diffusion images, two files are collected as part of the diffusion dataset. These files correspond to the gradient amplitude (b-values) and directions (b-vectors) of the diffusion measurement and are named with the extensions `.bval` and `.bvec` respectively. The b-value is the diffusion-sensitizing factor, and reflects the timing & strength of the gradients used to acquire the diffusion-weighted images. The b-vector corresponds to the direction of the diffusion sensitivity. Together these two files define the diffusion MRI measurement as a set of gradient directions and corresponding amplitudes. DatasetFor the rest of this tutorial, we will make use of a subset of publicly available dataset, ds000030, from [openneuro.org](https://openneuro.org/datasets/ds000030) The dataset is structured according to the Brain Imaging Data Structure ([BIDS](https://bids-specification.readthedocs.io/en/etable/)). Below is a tree diagram showing the folder structure of a single MR session within ds000030. This was obtained by using the bash command `tree`. `!tree ../data/ds000030````ds000030├── CHANGES├── code├── dataset_description.json├── README└── sub-10788/    ├── anat    │   ├── sub-10788_T1w.json    │   └── sub-10788_T1w.nii.gz    └── dwi       ├── sub-10788_dwi.bval       │── sub-10788_dwi.bvec       │── sub-10788_dwi.json       └── sub-10788_dwi.nii.gz```!aws s3 sync --no-sign-request \ s3://openneuro/ds000030/ds000030_R1.0.5/uncompressed/sub-10788 \ ../../data/ds000030/sub-10788 \ --exclude '*' \ --include '*T1w*' \ --include '*dwi*'Querying a BIDS Dataset[`pybids`](https://bids-standard.github.io/pybids/) is a Python API for querying, summarizing and manipulating the BIDS folder structure. We will make use of `pybids` to query the necessary files. Lets first pull the metadata from its associated JSON file using the `get_metadata()` function for the first run.from bids.layout import BIDSLayout layout = BIDSLayout("../../data/ds000030", validate=False) dwi = layout.get(subject='10788', suffix='dwi', extension='nii.gz', return_type='file')[0] layout.get_metadata(dwi)[`dipy`](http://dipy.org)For this lesson, we will use the `Dipy` (Diffusion Imaging in Python) package for processing and analysing diffusion MRI. Why `dipy`? - Fully free and open source- Implemented in Python. Easy to understand, and easy to use.- Implementations of many state-of-the art algorithms- High performance. Many algorithms implemented in [`cython`](http://cython.org/) Installing `dipy`The easiest way to install `Dipy` is to use `pip`! Additionally, `Dipy` makes use of the FURY library for visualization. We will also install this using `pip`!We can install it by entering the following in a terminal `pip install dipy`. We will do so using Jupyter Magic in the following cell! Defining a measurement: `GradientTable``Dipy` has a built-in function that allows us to read in `bval` and `bvec` files named `read_bvals_bvecs` under the `dipy.io.gradients` module. Let's first grab the path to our gradient directions and amplitude files and load them into memory.dwi = layout.get(subject='10788', suffix='dwi', extension='.nii.gz', return_type='file')[0] bvec = layout.get(subject='10788', suffix='dwi', extension='bvec', return_type='file')[0] bval = layout.get(subject='10788', suffix='dwi', extension='bval', return_type='file')[0] import numpy as np import nibabel as nib from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt %matplotlib notebook data = nib.load(dwi).get_fdata() data.shape x_slice = data[58, :, :, 0] y_slice = data[:, 58, :, 0] z_slice = data[:, :, 30, 0] slices = [x_slice, y_slice, z_slice] fig, axes = plt.subplots(1, len(slices)) for i, slice in enumerate(slices): axes[i].imshow(slice.T, cmap="gray", origin="lower") x_slice = data[58, :, :, 2] y_slice = data[:, 58, :, 2] z_slice = data[:, :, 30, 2] slices = [x_slice, y_slice, z_slice] fig, axes = plt.subplots(1, len(slices)) for i, slice in enumerate(slices): axes[i].imshow(slice.T, cmap="gray", origin="lower") bvec_txt = np.genfromtxt(bvec) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(bvec_txt[0], bvec_txt[1], bvec_txt[2]) plt.show from dipy.io.gradients import read_bvals_bvecs from dipy.core.gradients import gradient_table gt_bvals, gt_bvecs = read_bvals_bvecs(bval, bvec)There is a also simple `GradientTable` object implemented in the `dipy.core.gradients` module. The input to the `GradientTable` should be our the values for our gradient directions and amplitudes we just read.gtab = gradient_table(gt_bvals, gt_bvecs)We will need this gradient table later on to process our data and generate diffusion tensor images (DTI)! There is also a built in function for gradient tables, `b0s_mask` that can be used to separate diffusion weighted measurements from non-diffusion weighted measurements (b=0s/mm^2). Try to extract the vector corresponding to diffusion weighted measurements in the following cell!gtab.bvecs[~gtab.b0s_mask]It is also important to know where our diffusion weighting free measurements are as we need them for registration in our preprocessing, (our next notebook). The gtab.b0s_mask shows that this is our first volume of our dataset.gtab.b0s_mask b0 = data[:, :, :, gtab.b0s_mask] b0.shape b0 = np.squeeze(b0) x_slice = b0[58, :, :] y_slice = b0[:, 58, :] z_slice = b0[:, :, 30] slices = [x_slice, y_slice, z_slice] fig, axes = plt.subplots(1, len(slices)) for i, slice in enumerate(slices): axes[i].imshow(slice.T, cmap="gray", origin="lower")2.2 MLP Baselineimport csv import numpy as np def csv_read(csv_path): """ Reading the results of k fold cross validation on the test dataset Args: csv_file: the path of the csv file Return: best_r2: best r2 score best_n_hidden: the number of hidden neurons coresponding to the best r2 score Remark: - The layout of each is that [mse_ave, rmse_ave, mae_ave, r2_ave] - In each column, the results are sorted by the number of hidden neurons. """ with open(csv_path, 'r') as f: reader = csv.reader(f) mse_ave, rmse_ave, mae_ave, r2_ave = [], [], [], [] for line in reader: mse_ave.append(float(line[0])) rmse_ave.append(float(line[1])) mae_ave.append(float(line[2])) r2_ave.append(float(line[3])) best_mse = mse_ave[np.argsort(mse_ave)[0]] best_rmse = rmse_ave[np.argsort(rmse_ave)[0]] best_mae = mae_ave[np.argsort(mae_ave)[0]] best_r2 = r2_ave[-2] best_n_hidden = (int(reader.line_num)-1) * 64 return best_mse, best_rmse, best_mae, best_r2, best_n_hidden log_path = "/home/ubuntu16/catkin_ws/src/sonar_navigation/Output/track-I-II/Best_Length/" prefix = "2019_08_07_mlp_" suffix = "_kfold_log.csv" n=45078 mse, rmse, mae, r2 , ar2, n_hidden = [], [], [], [], [], [] for i in range(1,49,1): csv_path = log_path + prefix + str(i) + suffix best_mse, best_rmse, best_mae, best_r2, best_n_hidden = csv_read(csv_path) # compute adj_r2 p = i*16 best_ar2 = 1 - ((1-best_r2)*(n-1))/(n-p-1) mse.append(best_mse) rmse.append(best_rmse) mae.append(best_mae) r2.append(best_r2) ar2.append(best_ar2) n_hidden.append(best_n_hidden) print(" rmse: {} \n mae: {} \n r2: {} \n ar2: {} \n".format( rmse[11], mae[11], r2[11], ar2[11])) r2=0.845505571365 n=45078 p=12*16 print 1 - (1-r2)*(n-1)/(n-1-p) r2=0.854160110402551 n=45078 p=12*16 print 1 - (1-r2)*(n-1)/(n-1-p) r2=0.878546745246136 n=45078 p=36*16 print 1 - (1-r2)*(n-1)/(n-1-p) from keras.models import Model from keras.layers import Input, Dense, Dropout def mlp(input_dim, n_hidden): """ Define a mlp model with one hidden layer Args: - input_dim: dimensions of input tensor - n_hidden: number of hidden layer neurons return: - A model instance """ # Input x = Input(shape=(input_dim, )) # hidden layer h1 = Dense(n_hidden, activation='relu')(x) h1 = Dropout(0.5)(h1) # output layer y = Dense(1)(h1) # model model = Model(inputs=x, outputs=y) return model mlp_model = mlp(192,320) mlp_model.summary() import matplotlib.gridspec as gridspec fig = plt.figure(figsize=(6,4), dpi=300) gs1 = gridspec.GridSpec(2, 2) #gs1.update(left=0.05, right=0.48, wspace=0.05) ax1 = fig.add_subplot(gs1[0, 0]) ax2 = fig.add_subplot(gs1[0, 1]) ax3 = fig.add_subplot(gs1[1, :]) x = np.arange(1,49,1) ax1.plot(x, rmse, label="RMSE") ax1.scatter(x[11], rmse[11], c='red', marker='*') ax1.set_xlim(0,49) ax1.set_xticks(np.arange(0, 49, 6)) ax1.set_xlabel("Sequence Length") ax1.set_ylabel("RMSE") ax1.legend(prop={'size':8}) #ax12.plot(x, baseline_r_square) ax2.plot(x, r2, label='R2') ax2.plot(x, ar2, label='R2_adjust') ax2.scatter(x[11], r2[11], c='red', marker='*') ax2.set_xlim(0,49) ax2.set_xticks(np.arange(0, 49, 6)) ax2.set_xlabel("Sequence Length") ax2.set_ylabel("R2 & R2_adjust") ax2.legend(loc="lower right", prop={'size':8}) ax3.stem(x, n_hidden, bottom=64) ax3.set_xlim(0,49) ax3.set_xticks(np.arange(0, 49, 4)) ax3.set_yticks(np.arange(0, 576, 64)) ax3.set_xlabel("Sequence Length") ax3.set_ylabel("Hidden Neurons") fig.tight_layout() fig.savefig('best_sequence.png') plt.show() print np.argsort(mse)+1 print print np.argsort(rmse)+1 print print np.argsort(mae)+1 print print np.argsort(r2)[::-1]+1 print print np.argsort(ar2)[::-1]+1 print print n_hidden[12 11 13 8 34 35 23 36 37 15 40 10 22 44 4 5 33 17 3 14 28 31 25 47 18 9 38 41 24 7 20 39 46 30 16 27 19 32 42 26 2 48 29 6 43 21 45 1] [12 11 13 8 34 35 23 36 37 15 40 10 22 44 4 5 33 17 3 14 28 31 25 47 18 9 38 41 24 7 20 39 46 30 16 27 19 32 42 26 2 48 29 6 43 21 45 1] [35 37 34 13 12 22 23 36 14 40 38 11 44 15 8 10 33 25 32 42 31 4 18 41 28 26 24 27 47 20 17 46 48 16 39 29 30 9 5 19 43 3 21 7 45 6 2 1] [12 13 8 23 11 34 35 22 37 15 36 4 17 5 40 33 18 10 14 20 3 28 31 19 25 16 38 24 44 30 7 27 41 39 26 9 32 21 47 42 6 29 46 2 43 48 45 1] [12 13 8 11 23 34 22 15 35 37 4 5 17 36 33 18 10 40 3 14 20 28 31 19 25 16 24 7 38 9 27 44 30 26 41 39 21 32 6 2 42 47 29 46 43 48 45 1] [512, 320, 256, 384, 384, 128, 128, 448, 128, 128, 256, 320, 320, 256, 384, 128, 192, 256, 256, 256, 128, 320, 320, 192, 384, 320, 384, 256, 192, 256, 384, 192, 256, 384, 320, 320, 320, 256, 192, 384, 192, 320, 320, 320, 128, 320, 192, 320]Load the data from the csv, ignores lines with comments.The result is a simple python arrayimport csv data = [] with open('enzymes_5.csv', newline='') as csvfile: reader = csv.reader(csvfile, delimiter=',', quotechar='|') for row in reader: if len(row) > 0 and not row[0].startswith('#'): data.append([float(r) for r in row]) #data !ls def normalize_dataset(data): for column_id in range(len(data[0])): column_max = max([x[column_id] for x in data]) for i, entry in enumerate([x[column_id] for x in data]): data[i][column_id] = entry/column_max return data #data = normalize_dataset(data) #for d in data: # print(d[0], d[1]) from scipy.stats import chisquare,chi2 import numpy as np import pandas as pd import math import matplotlib.pyplot as plt from scipy.spatial import distance import scipy as sp import sys from sklearn import mixture #data = np.random.uniform(size=[50000, 10]) from sklearn.datasets.samples_generator import make_blobs centers = [(0.2, 0.2), (0.2, 0.8), (0.5, 0.5)] cluster_std = [0.05, 0.05, 0.05] #data, y = make_blobs(n_samples=1000, cluster_std=cluster_std, centers=centers, n_features=3, random_state=1) #plt.scatter(data[y == 0, 0], data[y == 0, 1], color="red", s=10, label="Cluster1") #plt.scatter(data[y == 1, 0], data[y == 1, 1], color="blue", s=10, label="Cluster2") #plt.scatter(data[y == 2, 0], data[y == 2, 1], color="green", s=10, label="Cluster3") import csv with open('synthetic.csv', mode='w') as employee_file: employee_writer = csv.writer(employee_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) for l in data: employee_writer.writerow(l)Class to store data about bins.Makes it easier to keep track of marked bins and merging them.Holds the index of the bin in the initial array, mapping to the dataset.Holds an array of all subsequent bins that should be merged to itself.# TODO create datapoint class class Bin: def __init__(self, index, interval, dim): self.interval = interval self.marked = False self.support = 0 self.index = index self.merge_with = [] self.assigned_points = [] self.dimension = dim def add_point(self, point): self.support += 1 self.assigned_points.append(point) def get_width(self): return self.interval.length def __str__(self): return 'Interval: {0}; Marked: {1}; Support: {2}; Index: {3}; \ Merge-with {4}; # of assigned points {5}; Dimension: {6}'.format( self.interval, self.marked, self.support, self.index, self.merge_with, len(self.assigned_points), self.dimension ) class Interval: def __init__(self, start, end): self.start = start self.end = end self.length = end - start def __str__(self): return 'Start: {0}; End: {1}; Length {2}'.format(self.start, self.end, self.length)Calculates the interval, splits the data into bins.TODO: Works on one dimension only, should be generalised to an arbitrary number of dimensions according to the datasetdef split_into_bins(data): bins = [] for col_idx in range(len(data[0])): column_bins = [] column_data = [x[col_idx] for x in data] col_min = min(column_data) col_max = max(column_data) interval_length = col_max - col_min # divide interval 1 + log2(n) bins nr_of_bins = math.floor(1 + math.log2(len(column_data))) column_bins = [] b = interval_length / nr_of_bins for i in range(nr_of_bins): # adds a bin with a start and end interval bin_interval = Interval(col_min + b * i, col_min + b * (i + 1)) column_bins.append(Bin(i, bin_interval, col_idx)) # compute support for each bin for i, datapoint in enumerate(column_data): bin_index = int((datapoint - col_min) / b) bin_index = bin_index if bin_index < nr_of_bins else bin_index - 1 column_bins[bin_index].add_point(data[i]) bins.append(column_bins) return bins, nr_of_bins bins, nr_of_bins = split_into_bins(data) #plt.scatter([x[0] for x in data], [y[1] for y in data]) #plt.scatter([b.interval[0] for b in bins[0]], [0.5 for _ in range(len(bins[0]))], c='green', s=100) #plt.scatter([b.interval[1] for b in bins[0]], [0.5 for _ in range(len(bins[0]))], c='red') #plt.show()"On the attributes deemed non-uniform, the bin with the largest support is marked. The remaining un-marked bins are tested again using the Chi-square test for uniform distribution. If the Chi-square test indicates that the un-marked bins “look” uniform, then we stop. Otherwise, the bin with the second-largest support is marked. Then, we repeat testing the remaining un-marked bins for the uniform distribution and marking bins in decreasing order of support, until the current set of un-marked bins satisfies the Chi-square test for uniform distribution."# find the bin with the highest support and mark it def mark_highest_support(column_bins): max_support = 0 max_index = 0 for _bin in column_bins: if _bin.marked: continue if _bin.support > max_support: max_support = _bin.support max_index = _bin.index #print(max_support, max_index) column_bins[max_index].marked = True # perform chisquared for the support def mark_bins(column_bins, alpha=0.001, stat=1): while (stat > alpha): # support list of all *unmarked* bins support_list = [column_bins[i].support for i in range(nr_of_bins) if not column_bins[i].marked] #print(support_list) # if there are no unmarked bins, end the process if len(support_list) == 0: break (stat, p) = chisquare(support_list) #print('stat', stat) #print('p', p) if (stat > alpha): mark_highest_support(column_bins) #print(list(column_bins[i].support for i in range(nr_of_bins) if not column_bins[i].marked)) for column_bins in bins: mark_bins(column_bins)F:\ProgramData\Anaconda3\lib\site-packages\scipy\stats\stats.py:5745: RuntimeWarning: invalid value encountered in true_divide terms = (f_obs - f_exp)**2 / f_exp F:\ProgramData\Anaconda3\lib\site-packages\scipy\stats\_distn_infrastructure.py:903: RuntimeWarning: invalid value encountered in greater return (a < x) & (x < b) F:\ProgramData\Anaconda3\lib\site-packages\scipy\stats\_distn_infrastructure.py:903: RuntimeWarning: invalid value encountered in less return (a < x) & (x < b) F:\ProgramData\Anaconda3\lib\site-packages\scipy\stats\_distn_infrastructure.py:1912: RuntimeWarning: invalid value encountered in less_equal cond2 = cond0 & (x <= _a)Find the adjacent bins that have the same marked status. Add all following bins with the same status to `merge_with` array in the Bin objects# merge bins def mark_merge_bins(column_bins): for i, _bin1 in enumerate(column_bins): for j, _bin2 in enumerate(column_bins[i+1:]): if _bin1.marked == _bin2.marked: _bin1.merge_with.append(_bin2.index) else: break for column_bins in bins: mark_merge_bins(column_bins)Merges the bins according to the merge list. New bins in a new array.# merge each bin in the list by extending the interval and combining support def merge_bin(all_column_bins, column_bin): for bin_index in column_bin.merge_with: column_bin.interval.end = all_column_bins[bin_index].interval.end column_bin.support += all_column_bins[bin_index].support column_bin.assigned_points.extend(all_column_bins[bin_index].assigned_points) # merge bins of a single column def merge_column_bins(column_bins): i = 0 new_bins = [] while i < len(column_bins): # if bin has no following bins to merge with, keep it as is, and go to the next one if len(column_bins[i].merge_with) == 0: new_bins.append(column_bins[i]) i += 1 continue merge_bin(column_bins, column_bins[i]) new_bins.append(column_bins[i]) # skip all of the bins that were included in the current one i = max(column_bins[i].merge_with) + 1 return new_bins def merge_all_bins(bins): new_bins = [] for column_bins in bins: new_bins.append(merge_column_bins(column_bins)) return new_bins new_bins = merge_all_bins(bins) dim1 = 17 dim2 = 22 dims = [i for i, x in enumerate(new_bins) if len(x) > 3] for i in range(0, len(dims)-1, 2): dim1 = dims[i] dim2 = dims[i + 1] plt.scatter([x[dim1] for x in data], [y[dim2] for y in data]) plt.scatter([b.interval.start for b in new_bins[dim1]], [0.5 for _ in range(len(new_bins[dim1]))], c='green', s=100) plt.scatter([b.interval.end for b in new_bins[dim2]], [0.5 for _ in range(len(new_bins[dim2]))], c='red') plt.scatter([0.5 for _ in range(len(new_bins[dim1]))], [b.interval.start for b in new_bins[dim1]], c='yellow', s=100) plt.scatter([0.5 for _ in range(len(new_bins[dim2]))], [b.interval.end for b in new_bins[dim2]], c='purple') plt.show() sum(x.support for x in new_bins[0]) # confirm all points are kept from scipy.stats import poisson def create_new_candidate(candidate, dim_bin, reevaluated_points): current_bins_list = [] current_bins_list.extend(candidate.bins) current_bins_list.append(dim_bin) return PSignature(current_bins_list, reevaluated_points) def generate_candidate_list(candidate_list, current_dim_bins, threshold, current_dim): new_candidates = [] for candidate in candidate_list: for dim_bin in current_dim_bins: if dim_bin.marked: expected_sup = candidate.get_support() * dim_bin.get_width() reevaluated_points = candidate.reevaluate_assigned_points(dim_bin, current_dim) r_support = len(reevaluated_points) if r_support == 0: continue print('R support {0}, expected support {1}'.format(r_support, expected_sup)) print('Poisson distribution:', poisson.pmf(r_support, expected_sup), r_support, expected_sup) if poisson.pmf(r_support, expected_sup) < threshold: new_candidate = create_new_candidate(candidate, dim_bin, reevaluated_points) new_candidates.append(new_candidate) print("Length of new candidates after poisson", len(new_candidates)) return new_candidates class PSignature: def __init__(self, bins, assigned_points=[]): self.bins = bins self.assigned_points = assigned_points def get_support(self): return len(self.assigned_points) def add_bin(self, _bin): self.bins.append(_bin) self.assigned_points.append(_bin.assigned_support) self.reevaluate_assigned_points(current_dim) def reevaluate_assigned_points(self, _bin, current_dim): evaluated_points = [] current_interval = _bin.interval for point in self.assigned_points: if point[current_dim] > current_interval.start and point[current_dim] <= current_interval.end: evaluated_points.append(point) return evaluated_points def get_means(self): return np.average(np.array(self.assigned_points), axis = 0) candidate_list = [] for _bin in new_bins[0]: if _bin.marked: candidate_list.append(PSignature([_bin], assigned_points=_bin.assigned_points)) poisson_threshold = 1e-4 for dim in range(1, len(data[0])): candidate_list = generate_candidate_list(candidate_list, new_bins[dim], poisson_threshold, dim) for c in candidate_list: print("---") for b in c.bins: print(b.index, b.dimension, len(b.assigned_points), b.marked) print(b.interval) print(len(c.assigned_points)) for bin in new_bins[1]: print(bin.marked) for candidate in candidate_list: for _bin in candidate.bins: print('start {0}, end {1}, length {2}'.format(_bin.interval.start, _bin.interval.end, _bin.interval.length)) class DataPoint: def __init__(self, coords): self.coords = coords self.assigned_clusters = [] def __eq__(self, other): if len(other.coords) != len(self.coords): return False for i, x in enumerate(self.coords): if x - other.coords[i] > 1e-9: return False return True for c in candidate_list: print(c.get_means()) inv_cov_cluster_dict = dict() for i,can in enumerate(candidate_list): cov = np.cov(np.array(can.assigned_points).T) inv_covmat= np.linalg.inv(cov) inv_cov_cluster_dict[i] = inv_covmat #fuzzy membership matrix #initialize matrix with datapoints in one column and found cluster (e.g. 1,2,3) in other column #initialize clusterpoints with a 1 at the matrix intersection matrix = np.zeros(dtype='float', shape=(len(data), len(candidate_list))) dps = [] print(matrix.shape) cov_dat = np.cov(np.array(data).T) inv_covmat_dat= np.linalg.inv(cov_dat) for i, point in enumerate(data): if i % 100 == 0: print(i) data_point = DataPoint(point) for j, candidate in enumerate(candidate_list): candidate_data_points = [DataPoint(p) for p in candidate.assigned_points] if data_point in candidate_data_points: matrix[i][j] = 1 data_point.assigned_clusters.append(j) fraction = 1 if len(data_point.assigned_clusters) == 0 else 1 / len(data_point.assigned_clusters) for r in range(len(candidate_list)): if matrix[i][r] == 1: matrix[i][r] = fraction #""" if len(data_point.assigned_clusters) == 0: closest = sys.maxsize closest_candidate_idx = 0 for idx, c in enumerate(candidate_list): mh_distance = distance.mahalanobis(data_point.coords, c.get_means(), inv_cov_cluster_dict[idx]) if mh_distance < closest: closest = mh_distance closest_candidate_idx = idx data_point.assigned_clusters.append(closest_candidate_idx) matrix[i][closest_candidate_idx] = 1 #""" dps.append(data_point) #compute mean of support set of cluster #compute the shortest mahalanobis distance(scipy.spatial.distance.mahalanobis) # of unassigned points to cluster core and assign # EM -> probably need to implement ourself means_before = np.array([c.get_means() for c in candidate_list]) gmm = mixture.BayesianGaussianMixture(n_components=len(candidate_list), covariance_type='full').fit(matrix) #gmm2 = mixture.BayesianGaussianMixture(n_components=len(candidate_list), covariance_type='full').fit(data) result = gmm.predict(matrix) #result2 = gmm2.predict(data) clustered_points = list() projected_cluster_dict = dict() for c in range(len(candidate_list)): projected_cluster_dict[c] = [] for assigned_cluster, p in list(zip(result, data)): clustered_points.append((assigned_cluster,p)) if assigned_cluster in projected_cluster_dict: projected_cluster_dict[assigned_cluster].append(p) means_after_bgm = list() for pj in projected_cluster_dict.keys(): means_after_bgm.append(np.mean(np.array(projected_cluster_dict[pj]), axis = 0)) amount = 0 for pj in projected_cluster_dict.keys(): amount += len(projected_cluster_dict[pj]) print(amount) plt.scatter([x[0] for x in data], [y[1] for y in data], c=result, s=20) plt.scatter([x[0] for x in means_after_bgm], [y[1] for y in means_after_bgm], c="green") plt.scatter([x[0] for x in means_before], [y[1] for y in means_before], c="red") plt.show() print(means_after_bgm) inv_cov_dict = dict() for key in projected_cluster_dict.keys(): cov = np.cov(np.array(projected_cluster_dict[key]).T) inv_covmat= np.linalg.inv(cov) inv_cov_dict[key] = inv_covmat degree_of_freedom = (len(data[0])-1) *(len(data[1])-1) #degree_of_freedom = 10 chi_crit = chi2.ppf(0.001, df=degree_of_freedom) noise_cluster_idx = len(candidate_list) print(chi_crit) for i, c in enumerate(clustered_points): cluster_mean = means_after_bgm[c[0]] md = distance.mahalanobis(c[1],cluster_mean,inv_cov_dict[c[0]]) if md > chi_crit: clustered_points[i] = (noise_cluster_idx,c[1]) print(md, clustered_points[i], chi_crit) plt.scatter([x[1][0] for x in clustered_points], [y[1][1] for y in clustered_points], c = [z[0] for z in clustered_points]) plt.scatter([x[0] for x in means_after_bgm], [y[1] for y in means_after_bgm], c="green") plt.show() print(means_after_bgm) print(gmm.means_)[[3.99514325e-01 6.00485675e-01] [9.99899443e-01 1.00557106e-04]]Null modelsIn this notebook we show several basic results concerning the behaviorof quadrangular structural coefficients in standard null models. Global $q$-clustering in Erdős–Rényi (ER) random graphsIt is a well known fact that in ER random graphs expected global clusteringis equal to $p$, that is, to the edge exsitence probability.It can be deduced simply from the fact that for any $2$-path:```i -- j -- k```the probability that also `i` and `k` are connected is still equal to $p$as it is constant for all possible edges. Hence, each $2$-path is closedto make a triangle with probability of exactly $p$ resulting in the globalclustering equal to $p$.Similarly, for any $3$-path:``` i l | | j ---- k```probability that it is closed and form a strict quadrangle is equal tothe probability that `i` and `k` as well as `j` and `l` are not connectedwhich is $(1-p)^2$ times the probability that `i` and `l` are connectedwhich $p$. So this gives expected quadrangular clustering(global complementarity) equal to$p(1-p)^2$. Note that for graphs with low value of $p$ we have that$(1-p) \approx 1$, so both expected clustering and $q$-clusteringare comparable. SimulationWe check the above assertion by simulating $100$ ER random graphs with$2000$ nodes and different values of $p = .01, .02, .05$.**NOTE.** This may take a while to execute.import random import pandas as pd import igraph as ig from tqdm import tqdm from pathcensus import PathCensus random.seed(101) P = (.001, .002, .005, .01) N = 5000 R = 20 er_null = [] for p in P: for _ in tqdm(range(R)): graph = ig.Graph.Erdos_Renyi(N, p=p, directed=False) paths = PathCensus(graph) index = dict(p=p, q=p*(1-p)**2) df = paths.coefs("global") for k, v in index.items(): df[k] = v index["q"] = p er_null.append(df) data = pd.concat(er_null, axis=0, ignore_index=True) sim = data[["p", "sim_g", "sim", "tclust", "tclosure"]] \ .reset_index() \ .melt(id_vars=["index", "p"]) \ .assign(relerr=lambda df: abs(df["value"] - df["p"]) / df["p"]) comp = data[[ "q", "comp_g", "comp", "qclust", "qclosure" ]] \ .reset_index() \ .melt(id_vars=["index", "q"]) \ .assign(relerr=lambda df: abs(df["value"] - df["q"]) / df["q"])Similarity coefficients vis-a-vis density in ER random graph ($p$)sim.groupby(["p", "variable"])["value"].describe()Complementarity coefficients vis-a-vis $p(1-p)^2$ in ER random graphscomp.groupby(["q", "variable"])["value"].describe().round(6)Read in similarity matrix and format as floatsdef read_similarity_matrix(filepath): # Read in data correctly into columns similarities = ( read_with_nulls(filepath) .reset_index(drop=True).T .reset_index().T .reset_index(drop=True) ) # Get column headers correctly and format PERSID according to apps similarities.columns = ["PERSID", *similarities.columns[1:]] similarities["PERSID"] = similarities["PERSID"].str.replace("nr:", "") similarities.columns = ["PERSID", *similarities["PERSID"]] # Change all similarity column types to float similarity_matrix = similarities.loc[:, similarities.columns != 'PERSID'].astype(float) # Reconstruct similarity matrix with correct formatting and datatypes final_similarities = pd.concat((similarities["PERSID"], similarity_matrix), axis=1) return final_similarities similarities_strict = read_similarity_matrix(f"../graph_data/{files[2]}") similarities_strict.head() ( similarities_strict.loc[similarities_strict["PERSID"].isin(persid_list)].iloc[:, :6] ).to_csv(f"../graph_data_small/{files[2]}", index=False, header=True) similarities_strict["PERSID"][0] == apps["PERSID"][0] print(apps["PERSID"][0])E4FFAE5299FA01F7ADC0E4115B9A6514Obtain AHD data with App PERSIDsahd_hits = read_with_nulls(f"../graph_data/{files[3]}") ahd_hits["PERSID"] = ahd_hits["PERSID"].str.replace("nr:", "") ahd_hits.loc[apps["PERSID"].isin(persid_list)].head() ahd_hits.loc[apps["PERSID"].isin(persid_list)].to_csv(f"../graph_data_small/{files[3]}", index=False, header=True)Obtain OS instances dataos_instances = read_with_nulls(f"../graph_data/{files[4]}") # Rename first column as PERSID os_instances.rename({u"os_\ufeffPersID": "PERSID"}, axis=1, inplace=True) os_instances.head() os_instances.iloc[:5, :].to_csv(f"../graph_data_small/{files[4]}", index=False, header=True)Process similarity data for connected componentssimilarities_connected_components = read_with_nulls(f"../graph_data/{files[5]}") similarities_connected_components.rename( {"PersID-1": "PERSID_1", "PersID-2": "PERSID_2"}, axis=1, inplace=True ) similarities_connected_components["PERSID_2"] = similarities_connected_components[ "PERSID_2" ].str.replace("nr:", "") similarities_connected_components.head() similarities_connected_components.iloc[:5, :].to_csv(f"../graph_data_small/{files[5]}", index=False, header=True) similarities_all = read_similarity_matrix(f"../graph_data/{files[6]}") similarities_all.head() ( similarities_all.loc[similarities_all["PERSID"].isin(persid_list)].iloc[:, :6] ).to_csv(f"../graph_data_small/{files[6]}", index=False, header=True) similarities_max = read_with_nulls(f"../graph_data/{files[7]}") similarities_max.rename( {"PersID-1": "PERSID_1", "PersID-2": "PERSID_2"}, axis=1, inplace=True ) similarities_max["PERSID_2"] = similarities_max[ "PERSID_2" ].str.replace("nr:", "") similarities_max.head() similarities_max.loc[similarities_max["PERSID_1"].isin(persid_list)].to_csv(f"../graph_data_small/{files[7]}", index=False, header=True) db_all = read_with_nulls(f"../graph_data/{files[8]}") len(db_all) db_all.columns def clean_strings(df, colnames=[]): for col in colnames: df[col] = df[col].str.replace("nr:", "") return df db_all_clean = clean_strings(db_all, colnames=["EAR_MAPPED_CMDB_ID", "CMDB_CI_PERSID", "CMDB_CI_ID_OF_PARENT_CI"]) ( db_all_clean.loc[db_all_clean["EAR_MAPPED_CMDB_ID"].isin(persid_list)] ).to_csv(f"../graph_data_small/{files[8]}", index=False, header=True) # cond1 = os_instances.loc[os_instances["PERSID"].isin(db_all_clean["CMDB_CI_PERSID"])] # os_instances.loc[cond1.index] # cond2 = apps.loc[apps["PERSID"].isin(db_all_clean["CMDB_CI_PERSID"])] # apps.loc[cond2.index] db_all_clean.head()Check for presence of IDs in the applications and organizations CSVsany(db_all_clean["CMDB_CI_PERSID"].isin(orgs["PERSID"])) any(db_all_clean["CMDB_CI_ID_OF_PARENT_CI"].isin(orgs["PERSID"])) any(db_all_clean["CMDB_CI_PERSID"].isin(apps["PERSID"])) any(db_all_clean["CMDB_CI_ID_OF_PARENT_CI"].isin(apps["PERSID"])) any(db_all_clean["CMDB_CI_PERSID"].isin(os_instances["PERSID"])) any(db_all_clean["CMDB_CI_ID_OF_PARENT_CI"].isin(os_instances["PERSID"]))Nearest Neighbour# df = rating_data.groupby(['user_id', 'content_id']).max('rating').reset_index() df = rating_data.pivot(index='content_id', columns='user_id', values='rating').fillna(0) df from sklearn.neighbors import NearestNeighbors knn = NearestNeighbors(metric='cosine', algorithm='brute') knn.fit(df.values) distances, indices = knn.kneighbors(df.values, n_neighbors=len(df)) neighbours = {} for i in range(0, len(indices)): nn = indices[i] dist = distances[i] e = nn[0] e_content = df.index[e] neighbours[e_content] = {"nn": [df.index[n] for n in nn[1:]], "dist": [1 - x for x in dist[1:]]} indices recom4 = content[content['ID'].isin(indices[][1:])] recom4 def predict_rating(user_id, content_id, neighbours): if content_id not in neighbours: print("no data for content_id") neighbours = neighbours[content_id] nn = neighbours['nn'] dist = neighbours['dist'] numerator = 0 denominator = 0 for i in range(0, len(nn)): content_id = nn[i] user_rating = df.loc[content_id, user_id] if user_rating > 0: numerator += user_rating * dist[i] denominator += dist[i] if denominator > 0: return numerator / denominator else: return 0 predict_rating('1', '2', 5)Given: A collection of at most 10 symbols defining an ordered alphabet, and a positive integer n (n≤10).Return: All strings of length n that can be formed from the alphabet, ordered lexicographically (use the standard order of symbols in the English alphabet).def indexing(s: List[str], n: int) -> str: s.sort() for p in itertools.product(s, repeat=n): print(''.join(p)) # Try sample dataset s = ['A', 'C', 'G', 'T'] n = 2 indexing(s, n) # Try Rosalind dataset with open(data_dir/"rosalind_lexf.txt", 'r') as f: s = list(f.readline().rstrip().split()) n = int(f.readline().rstrip()) indexing(s, n)AAA AAB AAC AAD AAE AAF ABA ABB ABC ABD ABE ABF ACA ACB ACC ACD ACE ACF ADA ADB ADC ADD ADE ADF AEA AEB AEC AED AEE AEF AFA AFB AFC AFD AFE AFF BAA BAB BAC BAD BAE BAF BBA BBB BBC BBD BBE BBF BCA BCB BCC BCD BCE BCF BDA BDB BDC BDD BDE BDF BEA BEB BEC BED BEE BEF BFA BFB BFC BFD BFE BFF CAA CAB CAC CAD CAE CAF CBA CBB CBC CBD CBE CBF CCA CCB CCC CCD CCE CCF CDA CDB CDC CDD CDE CDF CEA CEB CEC CED CEE CEF CFA CFB CFC CFD CFE CFF DAA DAB DAC DAD DAE DAF DBA DBB DBC DBD DBE DBF DCA DCB DCC DCD DCE DCF DDA DDB DDC DDD DDE DDF DEA DEB DEC DED DEE DEF DFA DFB DFC DFD DFE DFF EAA EAB EAC EAD EAE EAF EBA EBB EBC EBD EBE EBF ECA ECB ECC ECD ECE ECF EDA EDB EDC EDD EDE EDF EEA EEB EEC EED EEE EEF EFA EFB EFC EFD EFE EFF FAA FAB FAC FAD FAE FAF FBA FBB FBC FBD FBE FBF FCA FCB FCC FCD FCE FCF FDA FDB FDC FDD FDE FDF FEA FEB FEC FED FEE FEF FFA FFB FFC FFD FFE FFFAll the IPython Notebooks in **Data Science Interview Questions** series by Dr. are available @ **[GitHub](https://github.com/milaan9/DataScience_Interview_Questions)** Python Basics ➞ 120 Questions 1. What is Python?Solution- Python is a high-level, interpreted, interactive and object-oriented scripting language. Python is designed to be highly readable. It uses English keywords frequently where as other languages use punctuation, and it has fewer syntactical constructions than other languages. 2. Name some of the features of Python.SolutionFollowing are some of the salient features of python −* It supports functional and structured programming methods as well as OOP.* It can be used as a scripting language or can be compiled to byte-code for building large applications.* It provides very high-level dynamic data types and supports dynamic type checking.* It supports automatic garbage collection.* It can be easily integrated with C, C++, COM, ActiveX, CORBA, and Java. 3. What is the purpose of PYTHONPATH environment variable?Solution- PYTHONPATH - It has a role similar to PATH. This variable tells the Python interpreter where to locate the module files imported into a program. It should include the Python source library directory and the directories containing Python source code. PYTHONPATH is sometimes preset by the Python installer. 4. What is the purpose of PYTHONSTARTUP environment variable?Solution- PYTHONSTARTUP - It contains the path of an initialization file containing Python source code. It is executed every time you start the interpreter. It is named as .pythonrc.py in Unix and it contains commands that load utilities or modify PYTHONPATH. 5. What is the purpose of PYTHONCASEOK environment variable?Solution- PYTHONCASEOK − It is used in Windows to instruct Python to find the first case-insensitive match in an import statement. Set this variable to any value to activate it. 6. What is the purpose of PYTHONHOME environment variable?Solution- PYTHONHOME − It is an alternative module search path. It is usually embedded in the PYTHONSTARTUP or PYTHONPATH directories to make switching module libraries easy. 7. Is python a case sensitive language?Solution- Yes! Python is a case sensitive programming language. 8. What are the supported data types in Python?Solution- Python has five standard data types: 1. Numbers 2. String 3. List 4. Tuple 5. Dictionary 9. What is the output of print `str` if `str = 'Hello World!'`?Solution- It will print complete string. - Output would be `Hello World!` 10. What is the output of print `str[0]` if `str = 'Hello World!'`?Solution- It will print first character of the string. Output would be H. 11. What is the output of print `str[2:5]` if `str = 'Hello World!'`?Solution- It will print characters starting from 3rd to 5th. - Output would be `llo` 12. What is the output of print `str[2:]` if `str = 'Hello World!'`?Solution- It will print characters starting from 3rd character. - Output would be `llo World!` 13. What is the output of print `str * 2` if `str = 'Hello World!'`?Solution- It will print string two times. - Output would be `Hello World!Hello World!` 14. What is the output of print `str + "TEST"` if `str = 'Hello World!'`?Solution- It will print concatenated string. - Output would be `Hello World!TEST` 15. What is the output of print `list` if `list = [ 'abcd', 786 , 2.23, 'john', 70.2 ]`?Solution- It will print complete list. - Output would be `['abcd', 786, 2.23, 'john', 70.200000000000003]` 16. What is the output of print `list[0]` if `list = [ 'abcd', 786 , 2.23, 'john', 70.2 ]`?Solution- It will print first element of the list. - Output would be `abcd` 17. What is the output of print `list[1:3]` if `list = [ 'abcd', 786 , 2.23, 'john', 70.2 ]`?Solution- It will print elements starting from 2nd till 3rd. - Output would be `[786, 2.23]` 18. What is the output of print `list[2:]` if `list = [ 'abcd', 786 , 2.23, 'john', 70.2 ]`?Solution- It will print elements starting from 3rd element. - Output would be `[2.23, 'john', 70.200000000000003]` 19. What is the output of print `tinylist * 2` if `tinylist = [123, 'john']`?Solution- It will print list two times. - Output would be `[123, 'john', 123, 'john']` 20. What is the output of print `list1 + list2`, if `list1 = [ 'abcd', 786 , 2.23, 'john', 70.2 ] and ist2 = [123, 'john']`?Solution- It will print concatenated lists. - Output would be `['abcd', 786, 2.23, 'john', 70.2, 123, 'john']` 21. What are tuples in Python?Solution- A tuple is another sequence data type that is similar to the list. - A tuple consists of a number of values separated by commas. - Unlike lists, however, tuples are enclosed within parentheses. 22. What is the difference between tuples and lists in Python?Solution- The main differences between lists and tuples are: - Lists are enclosed in brackets `[ ]` and their elements and size can be changed, while tuples are enclosed in parentheses `( )` and cannot be updated. - Tuples can be thought of as read-only lists. 23. What is the output of print `tuple` if `tuple = ( 'abcd', 786 , 2.23, 'john', 70.2 )`?Solution- It will print complete tuple. - Output would be `('abcd', 786, 2.23, 'john', 70.200000000000003)` 24. What is the output of print `tuple[0]` if `tuple = ( 'abcd', 786 , 2.23, 'john', 70.2 )`?Solution- It will print first element of the tuple. - Output would be `abcd` 25. What is the output of print `tuple[1:3]` if `tuple = ( 'abcd', 786 , 2.23, 'john', 70.2 )`?Solution- It will print elements starting from 2nd till 3rd. - Output would be `(786, 2.23)` 26. What is the output of print `tuple[2:]` if `tuple = ( 'abcd', 786 , 2.23, 'john', 70.2 )`?Solution- It will print elements starting from 3rd element. - Output would be `(2.23, 'john', 70.200000000000003)` 27. What is the output of print `tinytuple * 2` if `tinytuple = (123, 'john')`?Solution- It will print tuple two times. - Output would be `(123, 'john', 123, 'john')` 28. What is the output of print `tuple + tinytuple` if `tuple = ( 'abcd', 786, 2.23, 'john', 70.2 )` and `tinytuple = (123, 'john')`?Solution- It will print concatenated tuples. - Output would be `('abcd', 786, 2.23, 'john', 70.200000000000003, 123, 'john')` 29. What are Python's dictionaries?Solution- Python's dictionaries are kind of hash table type. - They work like associative arrays or hashes found in Perl and consist of key-value pairs. - A dictionary key can be almost any Python type, but are usually numbers or strings. - Values, on the other hand, can be any arbitrary Python object. 30. How will you create a dictionary in python?Solution- Dictionaries are enclosed by curly braces `{ }` and values can be assigned and accessed using square braces `[]`.```pythondict = {}dict['one'] = "This is one"dict[2] = "This is two"tinydict = {'name': 'john','code':6734, 'dept': 'sales'}``` 31. How will you get all the keys from the dictionary?Solution- Using `dictionary.keys()` function, we can get all the keys from the dictionary object.```pythonprint dict.keys() Prints all the keys``` 32. How will you get all the values from the dictionary?Solution- Using `dictionary.values()` function, we can get all the values from the dictionary object.```pythonprint dict.values() Prints all the values``` 33. How will you convert a string to an int in python?Solution- `int(x [,base])` - Converts `x` to an integer. `base` specifies the base if `x` is a string. 34. How will you convert a string to a long in python?Solution- `long(x [,base] )` - Converts `x` to a long integer. `base` specifies the base if `x` is a string. 35. How will you convert a string to a float in python?Solution- `float(x)` − Converts `x` to a floating-point number. 36. How will you convert a object to a string in python?Solution- `str(x)` − Converts object `x` to a string representation. 37. How will you convert a object to a regular expression in python?Solution- `repr(x)` − Converts object `x` to an expression string. 38. How will you convert a String to an object in python?Solution- `eval(str)` − Evaluates a string and returns an object. 39. How will you convert a string to a tuple in python?Solution- `tuple(s)` − Converts `s` to a tuple. 40. How will you convert a string to a list in python?Solution- `list(s)` − Converts `s` to a list. 41. How will you convert a string to a set in python?Solution- `set(s)` − Converts `s` to a set. 42. How will you create a dictionary using tuples in python?Solution- `dict(d)` − Creates a dictionary. `d` must be a sequence of (key,value) tuples. 43. How will you convert a string to a frozen set in python?Solution- `frozenset(s)` − Converts `s` to a frozen set. 44. How will you convert an integer to a character in python?Solution- `chr(x)` − Converts an integer to a character. 45. How will you convert an integer to an unicode character in python?Solution- `unichr(x)` − Converts an integer to a Unicode character. 46. How will you convert a single character to its integer value in python?Solution- `ord(x)` − Converts a single character to its integer value. 47. How will you convert an integer to hexadecimal string in python?Solution- `hex(x)` − Converts an integer to a hexadecimal string. 48. How will you convert an integer to octal string in python?Solution- `oct(x)` − Converts an integer to an octal string. 49. What is the purpose of `**` operator?Solution- `**` Exponent − Performs exponential (power) calculation on operators. - `a**b` = 10 to the power 20 if `a = 10` and `b = 20` 50. What is the purpose of `//` operator?Solution- `//` Floor Division − The division of operands where the result is the quotient in which the digits after the decimal point are removed. 51. What is the purpose of `is` operator?Solution- `is` − Evaluates to `True` if the variables on either side of the operator point to the same object and false otherwise. `x` is `y`, here is results in 1 if `id(x)` equals `id(y)`. 52. What is the purpose of `not in` operator?Solution- `not in` − Evaluates to `True` if it does not finds a variable in the specified sequence and false otherwise. `x` not in `y`, here not in results in a 1 if `x` is not a member of sequence `y`. 53. What is the purpose `break` statement in python?Solution- `break` statement − Terminates the loop statement and transfers execution to the statement immediately following the loop. 54. What is the purpose `continue` statement in python?Solution- `continue` statement − Causes the loop to skip the remainder of its body and immediately retest its condition prior to reiterating. 55. What is the purpose `pass` statement in python?Solution- `pass` statement − The `pass` statement in Python is used when a statement is required syntactically but you do not want any command or code to execute. 56. How can you pick a random item from a list or tuple?Solution- `choice(seq)` − Returns a random item from a list, tuple, or string. 57. How can you pick a random item from a range?Solution- `randrange ([start,] stop [,step])` − returns a randomly selected element from range(start, stop, step). 58. How can you get a random number in python?Solution- `random()` − returns a random float `r`, such that 0 is less than or equal to `r` and `r` is less than 1. 59. How will you set the starting value in generating random numbers?Solution- `seed([x])` − Sets the integer starting value used in generating random numbers. Call this function before calling any other random module function. Returns `None`. 60. How will you randomizes the items of a list in place?Solution- `shuffle(lst)` − Randomizes the items of a list in place. Returns `None`. 61. How will you capitalizes first letter of string?Solution- `capitalize()` − Capitalizes first letter of string. 62. How will you check in a string that all characters are alphanumeric?Solution- `isalnum()` − Returns `True` if string has at least 1 character and all characters are alphanumeric and `False` otherwise. 63. How will you check in a string that all characters are digits?Solution- `isdigit()` − Returns `True` if string contains only digits and `False` otherwise. 64. How will you check in a string that all characters are in lowercase?Solution- `islower()` − Returns `True` if string has at least 1 cased character and all cased characters are in lowercase and `False` otherwise. 65. How will you check in a string that all characters are numerics?Solution- `isnumeric()` − Returns `True` if a unicode string contains only numeric characters and `False` otherwise. 66. How will you check in a string that all characters are whitespaces?Solution- `isspace()` − Returns `True` if string contains only whitespace characters and `False` otherwise. 67. How will you check in a string that it is properly titlecased?Solution- `istitle()` − Returns `True` if string is properly "titlecased" and `False` otherwise. 68. How will you check in a string that all characters are in uppercase?Solution- `isupper()` − Returns `True` if string has at least one cased character and all cased characters are in uppercase and `False` otherwise. 69. How will you merge elements in a sequence?Solution- `join(seq)` − Merges (concatenates) the string representations of elements in sequence `seq` into a string, with separator string. 70. How will you get the length of the string?Solution- `len(string)` − Returns the length of the string. 71. How will you get a space-padded string with the original string left-justified to a total of width columns?Solution- `ljust(width[, fillchar])` − Returns a space-padded string with the original string left-justified to a total of width columns. 72. How will you convert a string to all lowercase?Solution- `lower()` − Converts all uppercase letters in string to lowercase. 73. How will you remove all leading whitespace in string?Solution- `lstrip()` − Removes all leading whitespace in string. 74. How will you get the max alphabetical character from the string?Solution- `max(str)` − Returns the `max` alphabetical character from the string `str`. 75. How will you get the min alphabetical character from the string?Solution- ``min(str)` − Returns the `min` alphabetical character from the string `str`. 76. How will you replaces all occurrences of old substring in string with new string?Solution- `replace(old, new [, max])` − Replaces all occurrences of old in string with new or at most max occurrences if `max` given. 77. How will you remove all leading and trailing whitespace in string?Solution- `strip([chars])` − Performs both `lstrip()` and `rstrip()` on string. 78. How will you change case for all letters in string?Solution- `swapcase()` − Inverts case for all letters in string. 79. How will you get titlecased version of string?Solution- `title()` − Returns "titlecased" version of string, that is, all words begin with uppercase and the rest are lowercase. 80. How will you convert a string to all uppercase?Solution- `upper()` − Converts all lowercase letters in string to uppercase. 81. How will you check in a string that all characters are decimal?Solution- `isdecimal()` − Returns `True` if a unicode string contains only decimal characters and `False` otherwise. 82. What is the difference between `del()` and `remove()` methods of list?Solution- To remove a list element, you can use either the `del` statement if you know exactly which element(s) you are deleting or the `remove()` method if you do not know. 83. What is the output of `len([1, 2, 3])`?Solution- `3` 84. What is the output of `[1, 2, 3] + [4, 5, 6]`?Solution- `[1, 2, 3, 4, 5, 6]` 85. What is the output of `['Hi!'] * 4`?Solution- `['Hi!', 'Hi!', 'Hi!', 'Hi!']` 86. What is the output of 3 in `[1, 2, 3]`?Solution- `True` 87. What is the output of for `x in [1, 2, 3]: print x`?Solution```python123``` 88. What is the output of `L[2]` if `L = [1,2,3]`?Solution- `3`, Offsets start at zero. 89. What is the output of `L[-2]` if `L = [1,2,3]`?Solution- `1`, Negative: count from the right. 90. What is the output of `L[1:]` if `L = [1,2,3]`?Solution- `2, 3`, Slicing fetches sections. 91. How will you compare two lists?Solution- `cmp(list1, list2)` − Compares elements of both lists. 92. How will you get the length of a list?Solution- `len(list)` − Gives the total length of the list. 93. How will you get the max valued item of a list?Solution- `max(list)` − Returns item from the list with max value. 94. How will you get the min valued item of a list?Solution- `min(list)` − Returns item from the list with min value. 95. How will you get the index of an object in a list?Solution- `list.index(obj)` − Returns the lowest index in list that `obj` appears. 96. How will you insert an object at given index in a list?Solution- `list.insert(index, obj)` − Inserts object `obj` into list at offset index. 97. How will you remove last object from a list?Solution`list.pop(obj=list[-1])` − Removes and returns last object or obj from list. 98. How will you remove an object from a list?Solution- `list.remove(obj)` − Removes object `obj` from list. 99. How will you reverse a list?Solution- `list.reverse()` − Reverses objects of list in place. 100. How will you sort a list?Solution- `list.sort([func])` − Sorts objects of list, use compare `func` if given. 101. What is lambda function in python?Solution- `‘lambda’` is a keyword in python which creates an anonymous function. Lambda does not contain block of statements. It does not contain return statements. 102. What we call a function which is incomplete version of a function?Solution- `Stub`. 103. When a function is defined then the system stores parameters and local variables in an area of memory. What this memory is known as?Solution- `Stack`. 104. A canvas can have a foreground color? (Yes/No)Solution- `Yes`. 105. Is Python platform independent?Solution- No. There are some modules and functions in python that can only run on certain platforms. 106. Do you think Python has a complier?Solution- Yes. Python complier which works automatically so we don’t notice the compiler of python. 107. What are the applications of Python?Solution1. Django (Web framework of Python).2. Micro Frame work such as Flask and Bottle.3. Plone and Django CMS for advanced content Management. 108. What is the basic difference between Python ver 2 and Python ver 3?Solution- Table below explains the difference between Python version 2 and Python version 3.| S.No | Section | Python Version 2 | Python Version 3 | |:-------|:---------------| :------ |:--------|| 1. | Print Function | Print command can be used without parentheses. | Python 3 needs parentheses to print any string. It will raise error without parentheses. | | 2. | Unicode | ASCII str() types and separate Unicode() but there is no byte type code in Python 2. | Unicode (utf-8) and it has two byte classes − Byte, Bytearray S. || 3. | Exceptions | Python 2 accepts both new and old notations of syntax. | Python 3 raises a SyntaxError in turn when we don’t enclose the exception argument in parentheses. || 4. | Comparing Unorderable | It does not raise any error. | It raises ‘TypeError’ as warning if we try to compare unorderable types. | 109. Which programming Language is an implementation of Python programming language designed to run on Java Platform?Solution- `Jython`. (Jython is successor of Jpython.) 110. Is there any double data type in Python?Solution- `No`. 111. Is String in Python are immutable? (Yes/No)Solution- `Yes`. 112. Can `True = False` be possible in Python?Solution- `No`. 113. Which module of python is used to apply the methods related to OS.?Solution- `OS`. 114. When does a new block begin in python?Solution- A block begins when the line is intended by 4 spaces. 115. Write a function in python which detects whether the given two strings are anagrams or not.Solutiondef check(a,b): if(len(a)!=len(b)): return False else: if(sorted(list(a)) == sorted(list(b))): return True else: return False116. Name the python Library used for Machine learning.Solution- Scikit-learn python Library used for Machine learning 117. What does `pass` operation do?Solution- `pass` indicates that nothing is to be done i.e., it signifies a no operation. 118. Name the tools which python uses to find bugs (if any).Solution- `Pylint` and `pychecker`. 119. Write a function to give the sum of all the numbers in list?SolutionSample list − (100, 200, 300, 400, 0, 500)Expected output − 1500# Program for sum of all the numbers in list is − def sum(numbers): total = 0 for num in numbers: total+=num print("Sum of the numbers: ", total) sum((100, 200, 300, 400, 0, 500)) # We define a function ‘sum’ with numbers as parameter. #The in for loop we store the sum of all the values of list.Sum of the numbers: 1500120. Write a program in Python to reverse a string without using inbuilt function reverse string?Solution# Reverse a string without using reverse() function def string_reverse(string): i = len(string) - 1 print ("The length of string is: ", len(string)) sNew = '' while i >= 0: sNew = sNew + str(string[i]) i = i -1 return sNew print(string_reverse("1tniop")) # First we declare a variable to store the reverse string. # Then using while loop and indexing of string (index is calculated by string length) # we reverse the string. While loop starts when index is greater than zero. # Index is reduced to value 1 each time. When index reaches zero we obtain the reverse of string.The length of string is: 6 point1121. Write a program to test whether the number is in the defined range or not?Solution# Program is − def test_range(num): if num in range(0, 101): print("%s is in range"%str(num)) else: print("%s is not in range"%str(num)) # print("The number is outside the given range.") test_range(99) # To test any number in a particular range we make use of the method ‘if..in’ and else condition.99 is in range122. Write a program to calculate number of upper case letters and number of lower case letters?SolutionTest on String: 'The quick Brown Fox'# Program is − def string_test(s): d={"UPPER_CASE":0, "LOWER_CASE":0} for c in s: if c.isupper(): d["UPPER_CASE"]+=1 elif c.islower(): d["LOWER_CASE"]+=1 else: pass print ("String in testing is: ", s) print ("Number of Lower Case characters in String: ", d["UPPER_CASE"]) print ("Number of Upper Case characters in String: ", d["LOWER_CASE"]) string_test('The quick Brown Fox') # We make use of the methods .isupper() and .islower(). We initialise the count for lower and upper. # Using if and else condition we calculate total number of lower and upper case characters.String in testing is: The quick Brown Fox Number of Lower Case characters in String: 3 Number of Upper Case characters in String: 133 - Train Recommenderfrom sklearn.model_selection import train_test_split import pandas as pd import re import numpy as np from keras.layers import * from keras.models import Model, load_model from keras.optimizers import Adam from keras.callbacks import ModelCheckpoint from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.preprocessing import LabelEncoder, OneHotEncoder from livelossplot import PlotLossesKerasHelper functions# df: dataframe containing features to be encoded # columns: list of columns to be encoded def one_hot_encode(df, columns): ohe = OneHotEncoder() ohe_features = pd.DataFrame(ohe.fit_transform(df[columns]).toarray()) ohe_features.columns = ohe.get_feature_names() df = pd.concat([df, ohe_features], axis=1) df = df.drop(columns = categorical_features) return df # df: dataframe containing features to be encoded # columns: list of columns to be encoded def label_encode(df, columns): le = LabelEncoder() df[columns] = df[columns].apply(le.fit_transform) return df # df: dataframe containing text to be vectorized # column: string name of text column # vectorizer: scikit learn vectorizer - CountVectorizer or TfidfVectorizer def vectorize_text(df, column, vectorizer): text = df[column].replace(np.nan, ' ').tolist() X = vectorizer.fit_transform(text) df[column+'_features'] = list(X.toarray()) # word_vecs = pd.DataFrame(X.toarray()) df.drop(columns=column, inplace=True) # df = pd.concat([df, word_vecs], axis = 1) return df # vectorizes columns that include a list that should be broken out into one-hot-encoded features # for example, a column containing lists like ["red", "green", "blue"] will be transformed into 3 columns with 0/1 indicators # df: dataframe containing column to be vectorized # column: column containing list of features def vectorize_columns(df, columns): for column in columns: df[column] = df[column].fillna('[]') df[column] = df[column].apply(lambda x: x.strip('][').split(', ')) features = df[column].apply(frozenset).to_frame(name='features') for feature in frozenset.union(*features.features): new_col = feature.strip('\'').lower() df[new_col] = features.apply(lambda _: int(feature in _.features), axis=1) df = df.drop(columns = [column]) return df # feature_columns: list of column names that contain single features values # embedding_columns: list of column names that contain vector embeddings (image or text embeddings) def create_metadata_df(df, feature_columns, embedding_columns): features = df[feature_columns].reset_index(drop=True) embeddings = pd.DataFrame() for column in embedding_columns: embeddings = pd.concat([embeddings, pd.DataFrame(np.vstack(df[column]))], axis=1) result = pd.concat([features,embeddings],axis=1) return result # recommender with only user-item ratings and no user-item features def create_basic_network(n_items, n_users, n_factors): item_input = Input(shape=[1], name="Item-Input") item_embedding = Embedding(n_items, n_factors, name="Item-Embedding")(item_input) item_vec = Flatten(name="Flatten-Items")(item_embedding) user_input = Input(shape=[1], name="User-Input") user_embedding = Embedding(n_users, n_factors, name="User-Embedding")(user_input) user_vec = Flatten(name="Flatten-Users")(user_embedding) prod = Dot(name="Dot-Product", axes=1)([item_vec, user_vec]) model = Model([user_input, item_input], prod) model.compile(loss='mean_squared_error', optimizer=Adam(lr=0.001)) return modelLoad and preprocess data# user-item-ratings user_item_ratings_file = 'path to csv with schema: item_id, user_id, rating' ratings = pd.read_csv(user_item_ratings_file) # item features items_file = 'path to csv with schema: item_id, item_feature1, item_feature2, ..., item_featureN' items = pd.read_csv(items_file) items = items[['item_id','color','category','item_gender','description']] # sample columns in our dataset # user features user_file = 'path to csv with schema: user_id, user_feature1, user_feature2, ..., user_featureN' users = users[['user_id','user_gender','colors','user_description']] # sample columns in our dataset users = pd.read_csv(user_file) # item image encoded vectors images = pd.read_pickle('../data/image_vecs_encoded.pkl')Prepare item features# add image features to item data items = pd.merge(items,images,on='item_id') # encode item categorical features from strings to ints item_cat_features = ['color', 'category', 'item_gender'] # TODO: replace with your categorical string features items = label_encode(items, item_cat_features) # vectorize item text descriptions tf_vectorizer = TfidfVectorizer() items = vectorize_text(items, 'description', tf_vectorizer)Prepare user features# encode user categorical features user_cat_features = ['user_gender'] # TODO: replace with your categorical string features users = label_encode(users, user_cat_features) # vectorize user features - split lists into one hot encoded columns users = vectorize_columns(users, ['colors']) # sample column that contains lists in our dataset, e.g. ['blue', 'purple'] # if there is text associated with the user, vectorize it here (like a user request, profile description, or other) users = vectorize_text(users, 'user_description', tf_vectorizer)Add all metadata to ratings dfratings = pd.merge(ratings, items, on='item_id') ratings = pd.merge(ratings, users, on='user_id')Train modeltrain, test = train_test_split(ratings, test_size=0.15, random_state=42) n_users = len(ratings.user_id.unique()) n_items = len(ratings.item_id.unique()) def hybrid_recommender_v1(n_item_features, n_user_features, embedding_size): user_id_input = Input(shape=[1], name='user') item_id_input = Input(shape=[1], name='item') item_meta_input = Input(shape=[n_item_features], name='item_features') user_meta_input = Input(shape=[n_user_features], name='user_features') user_embedding = Embedding(output_dim=embedding_size, input_dim=n_users, name='user_embedding')(user_id_input) item_embedding = Embedding(output_dim=embedding_size, input_dim=n_items, name='item_embedding')(item_id_input) item_metadata = Dense(units=embedding_size, name='item_metadata')(item_meta_input) user_metadata = Dense(units=embedding_size, name='user_metadata')(user_meta_input) user_vec = Flatten()(user_embedding) item_vec = Flatten()(item_embedding) item_vec = Add()([item_vec, item_metadata]) user_vec = Add()([user_vec, user_metadata]) input_vec = Concatenate()([user_vec, item_vec])#, item_metadata, user_metadata]) x = Dense(128, activation='relu')(input_vec) x = Dropout(0.5)(x) y = Dense(1)(x) model = Model(inputs=[user_id_input, item_id_input, item_meta_input, user_meta_input], outputs=y) model.compile(loss='mse', optimizer=Adam(lr=0.001), metrics=['mae']) return model def hybrid_recommender_v2(n_item_features, n_user_features, embedding_size): # users user_id_input = Input(shape=[1], name='user') user_meta_input = Input(shape=[n_user_features], name='user_features') user_embedding = Embedding(output_dim=embedding_size, input_dim=n_users, name='user_embedding')(user_id_input) user_vec = Flatten()(user_embedding) user_vec = Dropout(0.5)(user_vec) user_metadata = Dense(units=embedding_size, name='user_metadata')(user_meta_input) # items item_id_input = Input(shape=[1], name='item') item_meta_input = Input(shape=[n_item_features], name='item_features') item_img_input = Input(shape=[embedding_size], name='item_image_features') # autoencoded image features item_embedding = Embedding(output_dim=embedding_size, input_dim=n_items, name='item_embedding')(item_id_input) item_vec = Flatten()(item_embedding) item_vec = Dropout(0.5)(item_vec) item_metadata = Dense(units=embedding_size, name='item_metadata')(item_meta_input) # join features item_vec = Add()([item_vec, item_metadata, item_img_input]) user_vec = Add()([user_vec, user_metadata]) input_vec = Concatenate()([user_vec, item_vec])#, item_metadata, user_metadata]) x = Dropout(0.5)(x) x = Dense(128, activation='relu')(input_vec) x = Dropout(0.5)(x) y = Dense(1)(x) model = Model(inputs=[user_id_input, item_id_input, item_meta_input, user_meta_input, item_img_input], outputs=y) model.compile(loss='mse', optimizer=Adam(lr=0.001), metrics=['mae']) return modelNo autoencoded features - raw VGG16 embeddings# metadata cols item_feature_cols = ['color','category','item_gender'] # item feature columns that contain a single value item_embedding_cols = ['image_features','description_features'] # item feature columns that contain a list of embeddings - applicable to image or text embeddings user_feature_cols = ['user_gender','rose gold','white','black','gray','gold','red','orange','natural','blue light'] # gender plus additional one-hot-encoded features user_embedding_cols = ['user_description_features'] # prepare train & test inputs train_item_metadata = create_metadata_df(train, item_feature_cols, item_embedding_cols) test_item_metadata = create_metadata_df(test, item_feature_cols, item_embedding_cols) train_user_metadata = create_metadata_df(train, user_feature_cols, user_embedding_cols) test_user_metadata = create_metadata_df(test, user_feature_cols, user_embedding_cols) # architecture v1 n_item_features = 6534 n_user_features = 2797 embedding_size = 256 model = hybrid_recommender_v1(n_item_features, n_user_features, embedding_size) history = model.fit([train.user_id, train.item_id, train_item_metadata, train_user_metadata] , train.rating , batch_size=32, epochs=50 , validation_split=0.1 , validation_data=([test.user_id, test.item_id, test_item_metadata, test_user_metadata], test.rating) , callbacks = [PlotLossesKeras()] , shuffle=True)Using autoencoded features# Architecture v1 item_embedding_cols = ['image_features_encoded','description_features'] train_item_metadata = create_metadata_df(train, item_feature_cols, item_embedding_cols) test_item_metadata = create_metadata_df(test, item_feature_cols, item_embedding_cols) train_user_metadata = create_metadata_df(train, user_feature_cols, user_embedding_cols) test_user_metadata = create_metadata_df(test, user_feature_cols, user_embedding_cols) n_item_features = 2694 model = hybrid_recommender_v1(n_item_features, n_user_features, embedding_size) history = model.fit([train.user_id, train.item_id, train_item_metadata, train_user_metadata] , train.rating , batch_size=32, epochs=100 , validation_split=0.1 , validation_data=([test.user_id, test.item_id, test_item_metadata, test_user_metadata], test.rating) , callbacks = [PlotLossesKeras()] , shuffle=True) # Architecture v2 n_item_features = 2438 item_embedding_cols = ['description_features'] train_item_metadata = create_metadata_df(train, item_feature_cols, item_embedding_cols) test_item_metadata = create_metadata_df(test, item_feature_cols, item_embedding_cols) train_user_metadata = create_metadata_df(train, user_feature_cols, user_embedding_cols) test_user_metadata = create_metadata_df(test, user_feature_cols, user_embedding_cols) model = hybrid_recommender_v2(n_item_features, n_user_features, embedding_size) best = ModelCheckpoint('../models/recommender.h5', monitor='val_loss', verbose=0, save_best_only=True, mode='auto') history = model.fit([train.user_id, train.item_id, train_item_metadata, train_user_metadata, np.vstack(train.image_features_encoded)] , train.rating , batch_size=32, epochs=100 , validation_split=0.2 , validation_data=([test.user_id, test.item_id, test_item_metadata, test_user_metadata, np.vstack(test.image_features_encoded)], test.rating) , callbacks = [PlotLossesKeras(), best] , shuffle=True)Generate Predictions# load trained model model = load_model('../models/recommender.h5') # model generated from v2 architecture # prep model inputs item_ids = items.item_id # all item ids num_items = len(item_ids) user_idx = users.sample(1).user_id.index[0] # select random user index user_data = users.loc[[user_idx]] # get data for selected user user_id = user_data.user_id.values[0] # get user id for selected user user_ids = np.array([user_id for i in range(num_items)]) # array of user id repeated to match number of items item_embedding_cols = ['description_features'] item_metadata = create_metadata_df(items, item_feature_cols, item_embedding_cols) user_metadata = create_metadata_df(user_data, user_feature_cols, user_embedding_cols) user_metadata = user_metadata.loc[user_metadata.index.repeat(num_items)] # repeat user features by number of items # get predictions preds = model.predict([user_ids, item_ids, item_metadata, user_metadata, np.vstack(ratings.image_features_encoded)]) preds = np.array([x[0] for x in preds]) # sort and get top N recommendations (indices of items recommended) num_recs = 10 rec_ids = (-preds).argsort()[:num_recs] # get recommended item ids and ratings from indices above recs = [(item_ids[x],preds[x]) for x in rec_ids] # list of tuples - (item id, predicted rating)Deep Neural Network in TensorFlow In this notebook, we improve on our [intermediate neural net](https://github.com/jonkrohn/DLTFpT/blob/master/notebooks/intermediate_net_in_tensorflow.ipynb) by incorporating the theory we've covered since. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/jonkrohn/DLTFpT/blob/master/notebooks/deep_net_in_tensorflow.ipynb) Load dependenciesimport tensorflow from tensorflow.keras.datasets import mnist from tensorflow.keras.utils import to_categorical from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense from tensorflow.keras.layers import Dropout # new! from tensorflow.keras.layers import BatchNormalization # new! from tensorflow.keras.optimizers import SGD from matplotlib import pyplot as pltLoad data(X_train, y_train), (X_valid, y_valid) = mnist.load_data()Preprocess dataX_train = X_train.reshape(60000, 784).astype('float32') X_valid = X_valid.reshape(10000, 784).astype('float32') X_train /= 255 X_valid /= 255 n_classes = 10 y_train = to_categorical(y_train, n_classes) y_valid = to_categorical(y_valid, n_classes)Design neural network architecturemodel = Sequential() model.add(Dense(64, activation='relu', input_shape=(784,))) model.add(BatchNormalization()) model.add(Dense(64, activation='relu')) model.add(BatchNormalization()) model.add(Dense(64, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Dense(10, activation='softmax')) model.summary()Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense (Dense) (None, 64) 50240 _________________________________________________________________ batch_normalization (BatchNo (None, 64) 256 _________________________________________________________________ dense_1 (Dense) (None, 64) 4160 _________________________________________________________________ batch_normalization_1 (Batch (None, 64) 256 _________________________________________________________________ dense_2 (Dense) (None, 64) 4160 _________________________________________________________________ batch_normalization_2 (Batch (None, 64) 256 ________________________________________________________[...]Configure modelmodel.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy'])Train!model.fit(X_train, y_train, batch_size=128, epochs=20, verbose=1, validation_data=(X_valid, y_valid))Train on 60000 samples, validate on 10000 samples Epoch 1/20 60000/60000 [==============================] - 3s 45us/sample - loss: 0.3792 - accuracy: 0.8857 - val_loss: 0.1684 - val_accuracy: 0.9479 Epoch 2/20 60000/60000 [==============================] - 2s 31us/sample - loss: 0.1519 - accuracy: 0.9542 - val_loss: 0.1251 - val_accuracy: 0.9596 Epoch 3/20 60000/60000 [==============================] - 2s 26us/sample - loss: 0.1110 - accuracy: 0.9663 - val_loss: 0.1087 - val_accuracy: 0.9669 Epoch 4/20 60000/60000 [==============================] - 2s 30us/sample - loss: 0.0885 - accuracy: 0.9726 - val_loss: 0.0986 - val_accuracy: 0.9711 Epoch 5/20 60000/60000 [==============================] - 1s 19us/sample - loss: 0.0748 - accuracy: 0.9765 - val_loss: 0.0900 - val_accuracy: 0.9735 Epoch 6/20 60000/60000 [==============================] - 1s 23us/sample - loss: 0.0624 - accuracy: 0.9804 - val_loss: 0.0964 - val_accuracy: 0.9718 Epoch 7/20 60000/60000 [==============================] [...]Performing inferencevalid_0 = X_valid[0].reshape(1, 784) model.predict(valid_0) model.predict_classes(valid_0)Setup%matplotlib inline import numpy as np import scipy.signal as sig import scipy.stats as stat import matplotlib.pyplot as plt import seaborn as sns import os import h5py import datetime import pandas as pdGeneral infosavePlots = True # whether or not to save plots saveData = True # whether or not to save csv files saveAsPath = './Fig 09/' if not os.path.exists(saveAsPath): os.mkdir(saveAsPath) saveAsName = 'Fig9c_' #path = '/Users/svcanavan/Dropbox/Coding in progress/00_BudgieSleep/Data_copies/' birdPaths = ['../data_copies/01_PreprocessedData/01_BudgieFemale_green1/00_Baseline_night/', '../data_copies/01_PreprocessedData/02_BudgieMale_yellow1/00_Baseline_night/', '../data_copies/01_PreprocessedData/03_BudgieFemale_white1/00_Baseline_night/', '../data_copies/01_PreprocessedData/04_BudgieMale_yellow2/00_Baseline_night/', '../data_copies/01_PreprocessedData/05_BudgieFemale_green2/00_Baseline_night/'] arfFilePaths = ['EEG 2 scored/', 'EEG 3 scored/', 'EEG 3 scored/', 'EEG 4 scored/', 'EEG 4 scored/'] ### load BEST EEG channels - as determined during manual scoring #### channelsToLoadEEG_best = [['6 LEEGm-LEEGp', '5 LEEGf-LEEGp'], #, '9 REEGp-LEEGp'], # extra channel to represent R hemisphere ['5 LEEGf-LEEGm', '4 LEEGf-Fgr'], #, '9 REEGf-REEGm'], # extra channel to represent R hemisphere ['9REEGm-REEGp', '4LEEGf-LEEGp'], ['6LEEGm-LEEGf', '9REEGf-REEGp'], ['7REEGf-REEGp', '4LEEGf-LEEGp']] ### load ALL of EEG channels #### channelsToLoadEEG = [['4 LEEGf-Fgr', '5 LEEGf-LEEGp', '6 LEEGm-LEEGp', '7 LEEGp-Fgr', '8 REEGp-Fgr','9 REEGp-LEEGp'], ['4 LEEGf-Fgr','5 LEEGf-LEEGm', '6 LEEGm-LEEGp', '7 REEGf-Fgr', '8 REEGm-Fgr', '9 REEGf-REEGm'], ['4LEEGf-LEEGp', '5LEEGf-LEEGm', '6LEEGm-LEEGp', '7REEGf-REEGp', '8REEGf-REEGm', '9REEGm-REEGp'], ['4LEEGf-LEEGp', '5LEEGm-LEEGp', '6LEEGm-LEEGf', '7REEGf-Fgr', '8REEGf-REEGm','9REEGf-REEGp',], ['4LEEGf-LEEGp', '5LEEGf-LEEGm', '6LEEGm-LEEGp', '7REEGf-REEGp', '8REEGf-REEGm', '9REEGm-REEGp']] channelsToLoadEOG = [['1 LEOG-Fgr', '2 REOG-Fgr'], ['2 LEOG-Fgr', '3 REOG-Fgr'], ['2LEOG-Fgr', '3REOG-Fgr'], ['2LEOG-Fgr', '3REOG-Fgr'], ['2LEOG-Fgr', '3REOG-Fgr']] birds_LL = [1,2,3] nBirds_LL = len(birds_LL) birdPaths_LL = ['../data_copies/01_PreprocessedData/02_BudgieMale_yellow1/01_Constant_light/', '../data_copies/01_PreprocessedData/03_BudgieFemale_white1/01_Constant_light/', '../data_copies/01_PreprocessedData/04_BudgieMale_yellow2/01_Constant_light/',] arfFilePaths_LL = ['EEG 2 preprocessed/', 'EEG 2 preprocessed/', 'EEG 2 preprocessed/'] lightsOffSec = np.array([7947, 9675, 9861 + 8*3600, 9873, 13467]) # lights off times in seconds from beginning of file lightsOnSec = np.array([46449, 48168, 48375+ 8*3600, 48381, 52005]) # Bird 3 gets 8 hours added b/c file starts at 8:00 instead of 16:00 epochLength = 3 sr = 200 scalingFactor = (2**15)*0.195 # scaling/conversion factor from amplitude to uV (when recording arf from jrecord) stages = ['w','d','u','i','s','r'] # wake, drowsy, unihem sleep, intermediate sleep, SWS, REM stagesSleep = ['u','i','s','r'] stagesVideo = ['m','q','d','s','u'] # moving wake, quiet wake, drowsy, sleep, unclear ## Path to scores formatted as CSVs formatted_scores_path = '../formatted_scores/' ## Path to detect SW ands EM events: use folder w/ EMs and EM artifacts detected during non-sleep events_path = '../data_copies/SWs_EMs_and_EMartifacts/'colorpalette from iWantHue# PSG scores colors = sns.color_palette(np.array([[234,103,99], [218,142,60], [174,174,62], [97,188,101], [140,133,232], [225,113,190]]) /255) sns.palplot(colors) # colorpalette from iWantHue # video scores colors_video = sns.color_palette(np.array([[244,133,129], [255,153,120], [248,172,90], [204,204,92], [204,204,92], [124, 203, 204], [200, 200, 200]]) /(255)) sns.palplot(colors_video) colors_birds = [np.repeat(.4, 3), np.repeat(.5, 3), np.repeat(.6, 3), np.repeat(.7, 3), np.repeat(.8, 3)] sns.palplot(colors_birds) LD_color=[0, 102/255, 1] LL_color=[1, 99/255, 99/255] sns.palplot([LD_color, LL_color])Plot-specific infosns.set_context("notebook", font_scale=1.5) sns.set_style("white") # Markers for legends of EEG scoring colors legendMarkersEEG = [] for stage in range(len(stages)): legendMarkersEEG.append(plt.Line2D([0],[0], color=colors[stage], marker='o', linestyle='', alpha=0.7))Calculate general variableslightsOffEp = lightsOffSec / epochLength lightsOnEp = lightsOnSec / epochLength nBirds = len(birdPaths) epochLengthPts = epochLength*sr nStages = len(stagesSleep) ## Calculate lights off in Zeitgeber time (s and hrs) ## Lights on is 0 lightsOffZeit_s = lightsOffSec - lightsOnSec lightsOffZeit_hr = lightsOffZeit_s / 3600Plot formattingfigsize = (8,6) axis_label_fontsize = 24 # Line formatting linewidth = 5 linealpha = .7 marker = 'o' markersize = 10 err_capsize = 3 err_capthick = 3 elinewidth = 3 # Bar formatting bar_linewidth = 4Load data Read in filesAllScores = {} for b in range(nBirds): bird_name = 'Bird ' + str(b+1) file = formatted_scores_path + 'All_scores_' + bird_name + '.csv' data = pd.read_csv(file, index_col=0) # if any epochs missing, mark as unclear date_index = pd.date_range(data.index.values[0], end=data.index.values[-1], freq='3s') data_reindexed = data.reindex(date_index.astype('str')) data_reindexed['Video Label'].fillna('u', inplace=True) # interpolate Time-related columns time_columns = ['Time (s)', 'Time (h)', 'Zeitgeber time (s)', 'Zeitgeber time (h)', 'Zeitgeber time (ep)', 'Epoch #'] data_interpolated = data_reindexed.copy() data_interpolated[time_columns] = data_reindexed[time_columns].interpolate() AllScores[bird_name] = data_interpolated # LL for b in [1,2,3]: bird_name = 'Bird ' + str(b+1) file = formatted_scores_path + 'All_scores_' + bird_name + 'LL.csv' data = pd.read_csv(file, index_col=0) AllScores[bird_name + 'LL'] = dataConvert video labels to numbersbirds = list(AllScores.keys()) # LD # for b_name in birds: b_num = int(b_name[5]) - 1 Scores = AllScores[b_name] Label_num = -1 * np.ones_like(Scores['Video Label']) for st in range(len(stagesVideo)): stage_inds = [x for x in range(len(Scores['Video Label'])) if stagesVideo[st] in Scores['Video Label'].iloc[x]] Label_num[stage_inds] = st # Preening/singing labels stage_inds = [x for x in range(len(Scores['Video Label'])) if ('p' in Scores['Video Label'].iloc[x])|('si' in Scores['Video Label'].iloc[x])] Label_num[stage_inds] = 0 # Unihem labels stage_inds = [x for x in range(len(Scores['Video Label'])) if ('r' in Scores['Video Label'].iloc[x])] Label_num[stage_inds] = 4 # Save to dataframe AllScores[b_name]['Video Label (#)'] = Label_num stagesVideoGet lengths of sleep episodes - based on video recording only Merge continuous epochs of the same stage, same length & start timeScoresMerged = {} for bird in birds: scores = AllScores[bird] # add a "stop" column scores['Stop (s)'] = scores['Time (s)'] + epochLength original_end_s = scores['Stop (s)'].iloc[-1] # add a column to keep track of consecutive epochs of the same stage scores['episode #'] = (scores['Video Label'] != scores['Video Label'].shift(1)).astype(int).cumsum() # don't worry about the gaps where the video recording restarted # if the behavior is the same before and after the gap, count it as a continuous episode # if you want to split episodes where the gap occurs, add: # (scores['Stop (s)'] == scores['Time (s)'].shift(-1)) # combine all epochs of the same episode # and use the values from the first epoch of that episode merged_scores = scores.groupby(scores['episode #']).aggregate('first') # calculate length of each episode lengths = merged_scores['Time (s)'].shift(-1) - merged_scores['Time (s)'] lengths.iloc[-1] = original_end_s - merged_scores['Time (s)'].iloc[-1] merged_scores['Length (s)'] = lengths ScoresMerged[bird] = merged_scoresCheck lengthsbird = 'Bird 4' print(len(AllScores[bird]) * epochLength/3600) # original print(ScoresMerged[bird]['Length (s)'].sum()/3600) # merged print(ScoresMerged[bird]['Length (s)'].sum()/3600 - 36/3600) # merged, but subtract 6 seconds # for each of the 6 gaps where the video restarted print(len(AllScores[bird + 'LL']) * epochLength/3600) # original print(ScoresMerged[bird + 'LL']['Length (s)'].sum()/3600) # merged print(ScoresMerged[bird + 'LL']['Length (s)'].sum()/3600 - 30/3600) # merged, but subtract 6 seconds for each of the 5 gaps where the video restarted23.9225 24.016666666666666 24.008333333333333Total numbers of continuous episodes of each stageSleepDurationStats = pd.DataFrame([]) n_sleep_pds = pd.Series([]) for bird in birds: n_episodes = ScoresMerged[bird]['Video Label (#)'].value_counts() #print(bird, '\t', n_episodes[5]) n_sleep_pds.loc[bird] = n_episodes[5] SleepDurationStats['# sleep pds'] = n_sleep_pds n_sleep_pdsOverall average lengthssleep_durations = pd.Series([]) sleep_durations_SD = pd.Series([]) for n,bird in enumerate(birds): scores = ScoresMerged[bird] avg_lengths = scores.groupby('Video Label (#)')['Length (s)'].mean() sd_lengths = scores.groupby('Video Label (#)')['Length (s)'].std() sleep_durations.loc[bird] = avg_lengths[5] sleep_durations_SD.loc[bird] = sd_lengths[5] SleepDurationStats['sleep pd duration mean'] = sleep_durations SleepDurationStats['sleep pd duration SD'] = sleep_durations_SD sleep_durationsREM latencyREM_Latencies = {} for bird in birds: if 'LL' not in bird: REM_latencies_bird = [] scores = AllScores[bird] # Go through each sleep episode, see if there is REM, and calculate the latency for episode_num in np.unique(scores['episode #']): episode_epochs = scores[scores['episode #']==episode_num] if episode_epochs.iloc[0]['Video Label (#)']==5: # sleep episodes only REM_epoch_inds = episode_epochs['Label (#)']==5 if any(REM_epoch_inds): # find REM epochs episode_start_s = episode_epochs.iloc[0]['Time (s)'] REM_start_s = episode_epochs[REM_epoch_inds].iloc[0]['Time (s)'] REM_latency = REM_start_s - episode_start_s REM_latencies_bird.append(REM_latency) REM_Latencies[bird] = REM_latencies_bird mean_REM_latencies = pd.Series([]) sd_REM_latencies = pd.Series([]) plt.figure(figsize=(15,3)) for n, bird in enumerate(birds): if 'LL' not in bird: plt.subplot(1,5,n+1) plt.hist(REM_Latencies[bird], range=(0,200), bins=50, histtype='step', color=colors[5]) plt.xlabel('REM latency (s)') mean_REM_latencies.loc[bird] = np.mean(REM_Latencies[bird]) sd_REM_latencies.loc[bird] = np.std(REM_Latencies[bird]) SleepDurationStats['REM latencies mean'] = mean_REM_latencies SleepDurationStats['REM latencies SD'] = sd_REM_latencies mean_REM_latenciesFIGURE 9C: Sleep durations vs REM latenciesplot_colors = [LD_color, colors[5], LL_color] axis_color = [.8,.8,.8] with plt.rc_context({'axes.edgecolor': 'k'}): # set color of plot outline plt.figure(figsize=(4,4.5)) # Bar graph of means sleep_pd_durations = SleepDurationStats['sleep pd duration mean'][['Bird 1', 'Bird 2','Bird 3','Bird 4','Bird 5']].values REM_latencies = SleepDurationStats['REM latencies mean'][['Bird 1', 'Bird 2','Bird 3','Bird 4','Bird 5']].values sleep_pd_durations_LL = SleepDurationStats['sleep pd duration mean'][['Bird 2LL','Bird 3LL','Bird 4LL']].values plt.bar([0], np.mean(sleep_pd_durations), width=.75, color='none', edgecolor=plot_colors[0], linewidth=4, alpha=linealpha) plt.bar([1], np.mean(sleep_pd_durations_LL), width=.75, color='none', edgecolor=plot_colors[2], linewidth=4, alpha=linealpha) # Line plot of each bird for bird in ['Bird 2', 'Bird 3', 'Bird 4']: # LL birds stats_LD = SleepDurationStats.loc[bird] stats_LL = SleepDurationStats.loc[bird + 'LL'] plt.plot([0, 1], [stats_LD['sleep pd duration mean'], stats_LL['sleep pd duration mean']], alpha=linealpha, color=colors_birds[int(bird[5])-1], lw=linewidth/1.5) for bird in ['Bird 1', 'Bird 5']: # LD-only birds stats_LD = SleepDurationStats.loc[bird] # Scatter plot of each plot in corresponding color plt.scatter(np.repeat(0,5), sleep_pd_durations, 5*markersize, color=plot_colors[0], alpha=linealpha) plt.scatter(np.repeat(1,3), sleep_pd_durations_LL, 5*markersize, color=plot_colors[2], alpha=linealpha) plt.xlim(-.7, 1.7) plt.ylim(0, 180) plt.xticks([0,1], [("LD mean\n sleep pd\n duration"), ('LL mean\n sleep pd\n duration')], fontsize=14) for ticklabel, tickcolor in zip(plt.gca().get_xticklabels(), [plot_colors[0], plot_colors[2]]): ticklabel.set_color(tickcolor) plt.ylabel('seconds', fontsize=axis_label_fontsize) # y label only on leftmost plot sns.despine() plt.tight_layout() if savePlots: plt.savefig(saveAsPath + saveAsName + "sleep_pd_durations_1_LLvLD.pdf") plot_colors = [LD_color, colors[5], LL_color] axis_color = [.8,.8,.8] with plt.rc_context({'axes.edgecolor': 'k'}): # set color of plot outline plt.figure(figsize=(4,4.5)) # Bar graph of means sleep_pd_durations = SleepDurationStats['sleep pd duration mean'][['Bird 1', 'Bird 2','Bird 3','Bird 4','Bird 5']].values REM_latencies = SleepDurationStats['REM latencies mean'][['Bird 1', 'Bird 2','Bird 3','Bird 4','Bird 5']].values sleep_pd_durations_LL = SleepDurationStats['sleep pd duration mean'][['Bird 2LL','Bird 3LL','Bird 4LL']].values plt.bar([0], np.mean(sleep_pd_durations), width=.75, color='none', edgecolor=plot_colors[0], linewidth=4, alpha=linealpha) plt.bar([1], np.mean(REM_latencies), width=.75, color='none', edgecolor=plot_colors[1], linewidth=4, alpha=linealpha) # Line plot of each bird for bird in ['Bird 2', 'Bird 3', 'Bird 4']: # LL birds stats_LD = SleepDurationStats.loc[bird] stats_LL = SleepDurationStats.loc[bird + 'LL'] plt.plot([0, 1], [stats_LD['sleep pd duration mean'], stats_LD['REM latencies mean']], alpha=linealpha, color=colors_birds[int(bird[5])-1], lw=linewidth/1.5) for bird in ['Bird 1', 'Bird 5']: # LD-only birds stats_LD = SleepDurationStats.loc[bird] plt.plot([0, 1], [stats_LD['sleep pd duration mean'], stats_LD['REM latencies mean']], alpha=linealpha, color=colors_birds[int(bird[5])-1], lw=linewidth/1.5) # Scatter plot of each plot in corresponding color plt.scatter(np.repeat(0,5), sleep_pd_durations, 5*markersize, color=plot_colors[0], alpha=linealpha) plt.scatter(np.repeat(1,5), REM_latencies, 5*markersize, color=plot_colors[1], alpha=linealpha) plt.xlim(-.7, 1.7) plt.ylim(0, 180) plt.xticks([0,1], [("LD mean\n sleep pd\n duration"), ('LD\n REM latency')], fontsize=14) for ticklabel, tickcolor in zip(plt.gca().get_xticklabels(), plot_colors[0:2]): ticklabel.set_color(tickcolor) plt.ylabel('seconds', fontsize=axis_label_fontsize) # y label only on leftmost plot sns.despine() plt.tight_layout() if savePlots: plt.savefig(saveAsPath + saveAsName + "sleep_pd_durations_2_LD_vs_REM_latency.pdf") plot_colors = [LD_color, colors[5], LL_color] axis_color = [.8,.8,.8] with plt.rc_context({'axes.edgecolor': 'k'}): # set color of plot outline plt.figure(figsize=(4,4.5)) # Bar graph of means sleep_pd_durations = SleepDurationStats['sleep pd duration mean'][['Bird 1', 'Bird 2','Bird 3','Bird 4','Bird 5']].values REM_latencies = SleepDurationStats['REM latencies mean'][['Bird 1', 'Bird 2','Bird 3','Bird 4','Bird 5']].values sleep_pd_durations_LL = SleepDurationStats['sleep pd duration mean'][['Bird 2LL','Bird 3LL','Bird 4LL']].values plt.bar([0], np.mean(sleep_pd_durations_LL), width=.75, color='none', edgecolor=plot_colors[2], linewidth=4, alpha=linealpha) plt.bar([1], np.mean(REM_latencies), width=.75, color='none', edgecolor=plot_colors[1], linewidth=4, alpha=linealpha) # Line plot of each bird for bird in ['Bird 2', 'Bird 3', 'Bird 4']: # LL birds stats_LD = SleepDurationStats.loc[bird] stats_LL = SleepDurationStats.loc[bird + 'LL'] plt.plot([0, 1], [stats_LL['sleep pd duration mean'], stats_LD['REM latencies mean']], alpha=linealpha, color=colors_birds[int(bird[5])-1], lw=linewidth/1.5) for bird in ['Bird 1', 'Bird 5']: # LD-only birds stats_LD = SleepDurationStats.loc[bird] # Scatter plot of each plot in corresponding color plt.scatter(np.repeat(0,3), sleep_pd_durations_LL, 5*markersize, color=plot_colors[2], alpha=linealpha) plt.scatter(np.repeat(1,5), REM_latencies, 5*markersize, color=plot_colors[1], alpha=linealpha) plt.xlim(-.7, 1.7) plt.ylim(0, 180) plt.xticks([0,1], [("LL mean\n sleep pd\n duration"), ('LD\n REM latency')], fontsize=14) for ticklabel, tickcolor in zip(plt.gca().get_xticklabels(), [plot_colors[2], plot_colors[1]]): ticklabel.set_color(tickcolor) plt.ylabel('seconds', fontsize=axis_label_fontsize) # y label only on leftmost plot sns.despine() plt.tight_layout() if savePlots: plt.savefig(saveAsPath + saveAsName + "sleep_pd_durations_3_LL_vs_REM_latency.pdf")STATS: Fig 9c# all birds sleep_pd_durations = SleepDurationStats['sleep pd duration mean'][['Bird 1', 'Bird 2','Bird 3','Bird 4','Bird 5']].values REM_latencies = SleepDurationStats['REM latencies mean'][['Bird 1', 'Bird 2','Bird 3','Bird 4','Bird 5']].values print('LD sleep pd durations vs REM latencies (n = 5)') stat.ttest_rel(sleep_pd_durations, REM_latencies) # include only birds with both LL and LD sleep_pd_durations = SleepDurationStats['sleep pd duration mean'][['Bird 2','Bird 3','Bird 4']].values REM_latencies = SleepDurationStats['REM latencies mean'][['Bird 2','Bird 3','Bird 4']].values sleep_pd_durations_LL = SleepDurationStats['sleep pd duration mean'][['Bird 2LL','Bird 3LL','Bird 4LL']].values print('LD sleep pd durations vs REM latencies (n = 3)') stat.ttest_rel(sleep_pd_durations, REM_latencies) print('LD sleep pd durations vs LL sleep pd durations (n = 3)') stat.ttest_rel(sleep_pd_durations, sleep_pd_durations_LL) print('LL sleep pd durations vs REM latency (n = 3)') stat.ttest_rel(sleep_pd_durations_LL, REM_latencies)LL sleep pd durations vs REM latency (n = 3)save to csvif saveData: SleepDurationStats.to_csv(saveAsPath + saveAsName + 'sleep_durations_REM_latencies.csv')Example of using LinUCB on a recommendation environment with parametric actionsimport os import time from matplotlib import pyplot as plt import pandas as pd from ray import tune from ray.rllib.contrib.bandits.agents import LinUCBTrainer from ray.rllib.contrib.bandits.agents.lin_ucb import UCB_CONFIG from ray.rllib.contrib.bandits.envs import ParametricItemRecoEnv %matplotlib inline UCB_CONFIG["env"] = ParametricItemRecoEnv # Actual training_iterations will be 20 * timesteps_per_iteration (100 by default) = 2,000 training_iterations = 20 print("Running training for %s time steps" % training_iterations) start_time = time.time() # first argument should no longer be `LinUCBTrainer` analysis = tune.run( "contrib/LinUCB", config=UCB_CONFIG, stop={"training_iteration": training_iterations}, num_samples=5, checkpoint_at_end=False ) print("The trials took", time.time() - start_time, "seconds\n") # Analyze cumulative regrets of the trials frame = pd.DataFrame() for key, df in analysis.trial_dataframes.items(): frame = frame.append(df, ignore_index=True) x = frame.groupby("num_steps_trained")[ "learner/cumulative_regret"].aggregate(["mean", "max", "min", "std"]) x plt.plot(x["mean"]) plt.fill_between( x.index, x["mean"] - x["std"], x["mean"] + x["std"], color="b", alpha=0.2 ) plt.title("Cumulative Regret") plt.xlabel("Training steps") plt.show()Compare functions using timeit Compare the performance of different functions to append custom string to items in a list.# performance checks # timeit results # matlib plots # refer https://stackoverflow.com/questions/50312305/what-is-the-best-way-to-interleave-two-lists # input l=['a','b','c'] # output out_l = ['a','a_1','b','b_2','c','c_3'] # import libraries from timeit import timeit from itertools import chain import tkinter import pandas as pd import matplotlib.pyplot as plt %matplotlib inline # function 1 def fun1(l): return [ l[int(i / 2)] + "_" + str(int(i / 2) + 1) if i % 2 != 0 else l[int(i/2)] for i in range(0,2*len(l)) ] # function 2 def fun2(l): return [ i for b in [[a, '{}_{}'.format(a, i)] for i, a in enumerate(l, start=1)] for i in b ] # function 3 def fun3(l): return [j for i, a in enumerate(l, 1) for j in [a, '{}_{}'.format(a, i)]] # function 4 def fun4(l): return [ val for pair in zip(l, [f'{k}_{j+1}' for j, k in enumerate(l)]) for val in pair ] # function 5 def fun5(l): def _cs1(l): for i, x in enumerate(l, 1): yield x yield f'{x}_{i}' return list(_cs1(l)) # function 6 def fun6(l): return list(chain.from_iterable( zip(l, [f'{x}_{i}' for i, x in enumerate(l, 1)])) ) # function 7 def fun7(l): out_l = [None] * (len(l) * 2) out_l[::2] = l out_l[1::2] = [f'{x}_{i}' for i, x in enumerate(l, 1)] return out_l # create a pandas dataframe # index has the list of functions # columns has the multiplication factor - # to increase input list size (thereby complexity) res = pd.DataFrame( index=['fun1', 'fun2', 'fun3', 'fun4', 'fun5', 'fun6', 'fun7'], columns=[10, 50, 100, 500, 1000, 5000], # 10000, 50000, 100000], # uncomment to test larger lists dtype=float ) res # each function to be looped over the mul.factors # timeit is used and output to dataframe for f in res.index: for c in res.columns: l = ['a', 'b', 'c', 'd'] * c stmt = '{}(l)'.format(f) # f(l) setp = 'from __main__ import l, {}'.format(f) res.at[f, c] = timeit(stmt, setp, number=50) res # using matplotlib to plot ax = res.div(res.min()).T.plot(loglog=True) ax.set_xlabel("N"); ax.set_ylabel("time (relative)"); # plt.show()Quora Questions Deduplication **Imports** **Aim:** Create a baseline model in PyTorch to solve the Deduplication Taskimport os,re,zipfile import pandas as pd import numpy as np from types import SimpleNamespace from matplotlib import pyplot as plt import itertools plt.style.use('dark_background') plt.style.use('seaborn') # Torch import torch import torch.nn as nn import torch.nn.functional as F torch.backends.cudnn.deterministic = True # metrics from sklearn import metrics # Data processing from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from tqdm.notebook import tqdm**Set PyTorch to use GPU if there is one**dev = torch.device('cuda') dev**Download the dataset using Kaggle credentials**%%time zip_name = 'train.csv.zip' if not os.path.exists(zip_name): os.environ['KAGGLE_USERNAME'] = "" # username from the json file os.environ['KAGGLE_KEY'] = "" # key from the json file !kaggle competitions download -c quora-question-pairsCPU times: user 238 µs, sys: 55 µs, total: 293 µs Wall time: 227 µs**Load data**df = pd.read_csv('train.csv.zip')[['question1','question2','is_duplicate']].dropna() dups = df[df.is_duplicate == 1].copy() no_dups = df[df.is_duplicate == 0].copy() split_fact = 1.3 df = pd.concat([dups[:int(len(dups) / split_fact)], no_dups[:int(len(no_dups) / split_fact)]],ignore_index=True) df**Initialize a class for the USE**import tensorflow as tf import tensorflow_hub as hub class UniversalSentenceEncoder: def __init__(self, encoder='universal-sentence-encoder', version='4'): self.version = version self.encoder = encoder self.embd = hub.load(f"https://tfhub.dev/google/{encoder}/{version}",) def embed(self, sentences): return self.embd(sentences) def squized(self, sentences): return np.array(self.embd(tf.squeeze(tf.cast(sentences, tf.string)))) ue = UniversalSentenceEncoder()**Train/Test split the data**train,test = train_test_split(df,test_size=0.33,random_state=42,stratify=df['is_duplicate'])**Convert the arrays to Tensors and specify the usage of GPU**%%time x_q1_train = torch.from_numpy(ue.squized(train['question1'].values)).type(torch.FloatTensor).to(dev) x_q2_train = torch.from_numpy(ue.squized(train['question2'].values)).type(torch.FloatTensor).to(dev) y_train = torch.from_numpy(train['is_duplicate'].values).type(torch.LongTensor).to(dev) x_q1_test = torch.from_numpy(ue.squized(test['question1'].values)).type(torch.FloatTensor).to(dev) x_q2_test = torch.from_numpy(ue.squized(test['question2'].values)).type(torch.FloatTensor).to(dev) y_test = torch.from_numpy(test['is_duplicate'].values).type(torch.LongTensor).to(dev)CPU times: user 1min 47s, sys: 2.95 s, total: 1min 50s Wall time: 1min 39s**Use the data loader to make things easier on training**b_size = 256 train_dl = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_q1_train,x_q2_train,y_train), batch_size=b_size, shuffle=True) test_dl = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_q1_test,x_q2_test,y_test), batch_size=b_size, shuffle=True)**Finally we do all the training in one class in the same cell for the ease of making chagnes**class Net(nn.Module): def __init__(self): super(Net,self).__init__() self.q1_lin = nn.Linear(in_features=512,out_features=1024) self.q2_lin = nn.Linear(in_features=512,out_features=1024) self.lin1 = nn.Linear(in_features=2048,out_features=1024) self.lin2 = nn.Linear(in_features=1024,out_features=512) self.lin3 = nn.Linear(in_features=512,out_features=256) self.lin4 = nn.Linear(in_features=256,out_features=128) self.lin5 = nn.Linear(in_features=128,out_features=2) # here we take the input data and pass it through the chain of layers def forward(self,q1,q2): q1 = self.q1_lin(q1) q2 = self.q2_lin(q2) x = torch.cat((q1,q2),dim=1) # print(x.size()) x = self.lin1(x) x = self.lin2(x) x = self.lin3(x) x = self.lin4(x) x = self.lin5(x) return x # instance our model model = Net().to(dev) # set the number of epochs epochs = 100 # criterion aka loss function -> find more on pytorch doc criterion = nn.CrossEntropyLoss() # optimizer optimizer = torch.optim.Adam(model.parameters(), lr=0.003) # create 3 lists to store the losses and accuracy at each epoch train_losses, test_losses, accuracy = [0]*epochs, [0]*epochs,[0]*epochs # in this current case we don't use batches for training and we pass the whole data at each epoch for e in tqdm(range(epochs)): for q1,q2,label in train_dl: optimizer.zero_grad() q1 = q1.float() q2 = q2.float() # Comput train loss y_pred = model(q1,q2) loss = criterion(y_pred, label) loss.backward() optimizer.step() # store train loss train_losses[e] = loss.item() for q1,q2,label in test_dl: # Compute the test stats with torch.no_grad(): # Turn on all the nodes model.eval() q1 = q1.float() q2 = q2.float() # Comput test loss ps = model(q1,q2) loss = criterion(ps, label) # store test loss test_losses[e] = loss.item() # # Compute accuracy top_p, top_class = ps.topk(1, dim=1) equals = (top_class == label.view(*top_class.shape)) # # store accuracy accuracy[e] = torch.mean(equals.type(torch.FloatTensor)) # Print the final information print(f'Accuracy : {100*accuracy[-1].item():0.2f}%') print(f'Train loss: {train_losses[-1]}') print(f'Test loss : {test_losses[-1]}') # Plot the results fig,ax = plt.subplots(1,2,figsize=(20,5)) ax[0].set_ylabel('Accuracy') ax[0].set_xlabel('Epochs') ax[0].set_title('Model Accuracy') ax[0].plot(accuracy) ax[1].set_ylabel('Loss') ax[1].set_xlabel('Epochs') ax[1].set_title('Train/Test Losses') ax[1].plot(train_losses, label='train') ax[1].plot(test_losses, label='test') ax[1].legend() plt.tight_layout()Jitter and BER *, April 2021*This Notebook contains an introduction into Jitter, its correlation to BER (bit error ratio) and how to measure Jitter, particularly using Xilinx FPGA's with IBERT. Jupyter setupRequired Python imports and helper functionsimport os, sys import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from scipy import special from scipy.stats import norm`helper_func` import path depends on execution in **Colab** or local **Jupyter**if 'google.colab' in sys.modules: if "BER_tail_fit" in os.listdir(): !git -C BER_tail_fit pull else: !git clone https://github.com/munich-ml/BER_tail_fit/ from BER_tail_fit.lib.helper_funcs import JitterEstimator, plot_jitter_fit, plot_jitter_overlay filesDir = os.path.join(os.getcwd(), "BER_tail_fit", "datasets") print("executing in Google Colab") else: from lib.helper_funcs import JitterEstimator, plot_jitter_fit, plot_jitter_overlay filesDir = os.path.join(os.getcwd(), "datasets") print("executing in Jupyter") np.random.seed(22) def get_url(image_name): return "https://github.com/munich-ml/BER_tail_fit/blob/main/images/{}.png?raw=true".format(image_name)Introduction to Jitter **Jitter** is the **timing uncertainty** of signal edges at the crossing point with their reference level (0V for differential signaling).Since **Noise** describes a level uncertainty, **timing noise** or **phase noise** (usually used in frequency domain).![nice](https://github.com/munich-ml/BER_tail_fit/blob/main/images/DJeye_dscr.png?raw=true) The **Total Jitter (TJ)** consists of 2 major components:**Random Jitter (RJ)**- unbounded --> increases over time- Gaussian distribution**Deterministic Jitter (DJ)**- bounded --> saturates over time - can be split into sub-components (e.g. PJ, DCD, ISI) Jitter in a transmission system ![Tx+channel+Rx](https://github.com/munich-ml/BER_tail_fit/blob/main/images/tx_channel_rx.png?raw=true)The Jitter needs to be small enough for the receiver to sample the `rx_data`, while satisfying its setup- and hold-requirements. Measure Jitter using a Scope A scope (realtime oscilloscope) measures jitter directly with the following basic procedure: - **wavetrace acquisition** (voltage over time bitstream)- **edge detection** (signal crossings with the reference voltage)- **clock recovery from data** (or usage of a strobe for source synchronous clocking schemas)- **data eye creation** (see Tektronix Primer)- **jitter** (or TIE: time interval error) is now given as edge distribution (e.g. Gaussian shaped)![eye_creation](https://github.com/munich-ml/BER_tail_fit/blob/main/images/eye_creation.png?raw=true) Disadvantages of Jitter measurements using ScopesAlthough (realtime-) scopes are are very useful tool when analysing communication systems with respect to Jitter, their usage comes with some disadvantages:- scopes and probes are expensive- measurements are only available on individual samples and/or only during test - the probes changes the channel, when being applied- the probes is placed somewhere on the channel, not at the receiverThe **in-system FPGA-based measurement approach** proposed further down can potentially mitigate or even solve those issues. How Jitter relates to the BER (bit error ratio) With higher jitter it is more likely for the receiver to sample too early or too late.x = np.linspace(-5/6, 5/6, num=500) scale = 0.05 # sigmal value of the gaussian distribution norm_pdf = norm.pdf(x, loc=-0.5, scale=scale) + norm.pdf(x, loc=0.5, scale=scale) too_early = 1 - norm.cdf(x, loc=-0.5, scale=scale) too_late = norm.cdf(x, loc=0.5, scale=scale) plt.figure(figsize=(8, 9)), plt.subplot(2,1,1) plt.imshow(plt.imread(get_url("RJeye"))), plt.axis("off") plt.subplot(2,1,2) plt.fill_between(x, norm_pdf / norm_pdf.max(), color="orange", label="normalized gaussian PDF") plt.plot(x, too_early, "k-.", label="gaussian CDF @mu=0: sampling too early") plt.plot(x, too_late, "b-.", label="gaussian CDF @mu=1: sampling too late") plt.xlim([min(x), max(x)]), plt.xticks(np.linspace(min(x), max(x), num=11)) plt.xlabel("time [UI]"), plt.grid(), plt.legend();The example above shows a data eye together with the distribution of it's crossings (jitter distribution. PDF).Integrating the PDF provides the likelihood of sampling too early or to late (CDF). BER definitionThe **bit error ratio ($BER$)** is a *figure of merit* for a link quality, commonly used in communications engineering.The $BER$ described, how many bit errors there are (on average) within the received data stream: $BER=\frac{error\_bits}{received\_bits}$A typical specification is: $BER < 10^{-12}$ BER tail fittingThe basic idea of BER tail fitting is to fit BER samples from measurements to a Jitter model, consisting of:- $\sigma$, `sigma`: Standard diviation of the Gaussian conresponding to the **RJ** (random jitter)- $\mu$, `mu`: Mean value of the Gaussian conresponding to the **DJ** (deterministic jitter)The **Gaussian Model** is fitted to only to those BER samples that are below a certain BER threshold $BERt$ (below means *later* in test time). The $BERt$ is chosen such that ideally all deterministic jitter sources completed one cycle.mu = -0.48 # mean value of the distribution sigma = 0.04 # standard deviation ui_pos = -0.42 # sample position (example) x = np.linspace(-0.5, 0, 500) pdf = norm.pdf(x, loc=mu, scale=sigma) # compute pdf for variable x def cdf(x): # define the CDF (cumulative density function) using the erf (error function) return 0.5 * (1+special.erf( (x-mu)/(np.sqrt(2)*sigma) )) plt.figure(figsize=(10,8)), plt.subplot(3,1,1) plt.plot(x, pdf, "k", label="PDF(x)") plt.stem([ui_pos], [max(pdf)], markerfmt='D', use_line_collection=True, label="sample position") plt.fill_between(x[x <= ui_pos], pdf[x <= ui_pos], color="green", alpha=0.4, label="P1") plt.fill_between(x[x > ui_pos], pdf[x > ui_pos], color="red", alpha=0.4, label="P2") plt.title(f"Edge probability 'left side data eye' with mu={mu}, sigma={sigma}") plt.ylabel("probability density"), plt.legend(), plt.grid() plt.subplot(3,1,2) plt.plot(x, cdf(x), "g", label="CDF(x) = P1(x)") plt.plot(x, 1-cdf(x), "r", label="1-CDF(x) = P2(x)") plt.plot(2*[ui_pos], [cdf(ui_pos), 1-cdf(ui_pos)], "bD", label="sample position") plt.ylabel("probability"), plt.legend(), plt.grid(); plt.subplot(3,1,3) plt.semilogy(x, 1-cdf(x), "r", label="1-CDF(x) = P2(x)") plt.semilogy([ui_pos], [1-cdf(ui_pos)], "bD", label="sample position") plt.ylabel("probability"), plt.ylim([1e-12, 1]) plt.xlabel("x [UI]"), plt.legend(), plt.grid();The **edge distribution** at $\pm\frac{1}{2}UI$ is assumed to have a **gaussian distribution** according to > $PDF(x) = \frac{1}{\sigma\sqrt{2\pi} }e^{-\frac{(x - \mu)^{2}}{2\sigma^2}}$> > with> - $PDF$ = normal probability density function, available in [scipy.stats.norm](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.norm.html?highlight=stats%20normscipy.stats.norm)> - $\sigma$ (*sigma*) = standard deviation> - $\mu$ (*mu*) = mean value When looking for the `bit error count`, - all edges **left** from the `sample position` within $-\frac{1}{2}UI \cdots 0UI$ provide good data, - while all edges **right** from the `sample position` provide data from the previous Unit Interval, thus **bit errors** at a rate of 0.5 (because every other bit is statistically right, if there are just *ones* and *zeros*)Therefore, the area $P2$ represents the $BER$ (Bit Error Ratio) with> $BER = \frac{1}{2}{P2}$The **integration of the Gaussian** can be done by means of the **Error Function** $erf(x)$, which is nicely described in [Integration of Gaussian between limits](https://www.youtube.com/watch?v=26QbWYBCw7Y): > $CDF(x) = \frac{1}{2}[1+erf(\frac{x-\mu}{\sigma\sqrt2})]$>> with> - $CDF$ = cumulative density function of a Gaussian> - $erf$ = error function> - $\sigma$ (*sigma*) = standard deviation> - $\mu$ (*mu*) = mean value Just for reference, the Error Function is defined as:> $erf(x)=\frac2{\sqrt{\pi}}\int_0^x e^{-t^2} \,dt$Returning to the **data eye problem**, the $CDF(sample\_position)$ equals the area $P1$, and therefore:> $BER = \frac{1}{2}[1-CDF(x)] = \frac{1}{4}[1-erf(\frac{x-\mu}{\sigma\sqrt2})]$With the **[complementary error function](https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.erfc.html)** $erfc(x) = 1-erf(x)$, we get:> $BER = \frac{1}{4} erfc(\frac{x-\mu}{\sigma\sqrt2})$This equation needs to be resolved for $x$, because the we need to find the $sample\_position$ for a given $BER$. Fortunately, there is an **[inverse compementary error function](https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.erfcinv.htmlscipy.special.erfcinv)** $erfcinv$, which leads to the final equation:> $x = erfcinv(4BER)\ \sigma\sqrt2 + \mu$ >> again with> - $x$ = sample position (on the left half of the unit interval)> - $erfcinv$ = inverse complementary error function> - $BER$ = Bit Error Ratio, at which the distribution is evaluated> - $\sigma$ (*sigma*) = standard deviation of the Gaussian> - $\mu$ (*mu*) = mean value of the Gaussian FPGA build-in BERT FPGA-maker XILINX offers **Integrated Bit Error Ratio Tester IBERT** for their [7-Series GTP transceivers](https://www.xilinx.com/support/documentation/white_papers/wp428-7Series-Serial-Link-Signal-Analysis.pdf) and [UltraScale/UltraScale+ GTX and GTY transceivers](https://www.xilinx.com/products/intellectual-property/ibert_ultrascale_gth.htmloverview).![WP482](https://github.com/munich-ml/BER_tail_fit/blob/main/images/wp482.png?raw=true) Evaluating test dataThe following example data has been acquired on an [Avnet AES-KU040-DB-G evaluation board](https://github.com/munich-ml/BER_tail_fit/blob/main/literature/FPGA_EvalBoard.pdf), populated with an Xilinx Kintex® UltraScale™ FPGA. The data is provided in the `datasets` directory in JSON format and library function are also provided in the `lib` directory.fns = os.listdir(filesDir) fnsThe **Jitter fits** shown below appear reasonable. The two channels `C9` and `C10` are quite different in terms of Jitter performance which is expected, because their channels are different:- `C9`: FPGA GTX transceiver connected via 10G SFP+ transceivers and a fiber-loopback- `C10`: FPGA GTX transceiver connected via SMA to 2x 0.5m cheap RG58 copper-loopback cablesplot_jitter_fit(os.listdir(filesDir)[0], filesDir, exclude_chs=[8, 11])The RJ and TJ peak-2-peak values are estimated for $BERs=10^{-12}$.The images above show, that the **Gaussian model** fits well for low $BER$ below ~$10^{-4}$ ($BERt$ = threshold).The next image is an overlay of the examples above:plot_jitter_overlay(os.listdir(filesDir)[0], filesDir, exclude_chs=[8, 11], figsize=(12,3))Jitter extrapolation to $BER=10^{-12}$ Modeling the RJ behavior is helpful to estimate the *long-term* Jitter performance with a *short* test. As an example, the same set of channels (C8..C11) have been tested twice with different `targetBER` (testtime):- `jitter_long_` with `targetBER=1E-12`- `jitter_short_` with `targetBER=1E-8`, thus a difference of factor 10.000 in testtime!plot_jitter_overlay(os.listdir(filesDir), filesDir, exclude_chs=[])Evaluation:- Within each trace, the fitted samples (`X`) are well on the trace. Thus, the method of fitting a Gaussian seems valid.- If short-term / long-term measurements differ in $sigma$/`RJrms`, the short-term measurement is worse. Thus, extrapolating from short-term is conservative.- Some short-term / long-term measurements differ in $mu$/`DJ`> - todo: Verify reproducibility (incl. tester warm-up) ConclusionAppndix: Jitter and BER Simulation Setting-up the simulationN = int(4e5) # number of simulated bits DR = 1e9 # data rate [bits/s] UI = 1/DR # unit interval [s] RJ_SIGMA = 0.025 # simulated random jitter's sigma PJ_FREQ = 3e5 # frequency of the periodic jitter PJ_AMPL = 0.1 # periodic jitter amplitude [UI] t = np.linspace(start=0, stop=(N-1)*UI, num=N) # time vector dj = PJ_AMPL * np.sin(2 * np.pi * PJ_FREQ * t) # determistic jitter, consists of PJ, only rj = RJ_SIGMA * np.random.randn(N) # random jitter tj = rj + dj # total jitter plt.figure(figsize=(12, 4)) plt.plot(tj, ".", label="TJ"); plt.plot(rj, ".", label="RJ"); plt.plot(dj, ".", label="DJ"); plt.xlabel("time [UI]"), plt.ylabel("jitter [UI]") plt.xlim([0, 7000]) plt.legend(loc="best"), plt.grid(), plt.tight_layout(); bins = np.linspace(-0.5, 0.5, 300) plt.figure(figsize=(12, 4)) plt.hist(tj, bins=bins, histtype="stepfilled", label="TJ") plt.hist(rj, bins=bins, histtype="step", linewidth=4, label="RJ") plt.hist(dj, bins=bins, histtype="step", linewidth=4, label="DJ") plt.yscale("log") plt.ylabel("counts per bin"), plt.xlabel("jitter [UI]") plt.legend(loc="best"), plt.grid(), plt.tight_layout();Random bit sequence as datadata = np.random.randint(0, 2, N) plt.figure(figsize=(14, 1.2)) n = 100 # number of bits shown sns.lineplot(x=t[:n]*1e9, y=data[:n], drawstyle='steps-post') plt.title(f"first {n} bits of the data") plt.xlabel("t [ns]"), plt.ylabel('level ["arbitrary"]');**Data sampling and error checking**Create a receiver sampler with `65` steps within the unit intervalRX_PI_STEPS = 65 # step count of the receiver phase interpolator rx_pi = np.linspace(0, 1, num=RX_PI_STEPS)Cheching for errorserrors = [] for rx_pi_step in rx_pi: errors.append(0) # start with 0 errors at each new RX PI step for i, tj_sample in enumerate(tj): if 0 < i < N-1: # allows sampling data[i-1], data[i+1] if tj_sample > rx_pi_step: # checking left side eye errors[-1] += int(np.logical_xor(data[i-1], data[i])) if 1 + tj_sample < rx_pi_step: # checking left side eye errors[-1] += int(np.logical_xor(data[i+1], data[i]))Compute and plot BERber = np.array(errors) / N plt.figure(figsize=(12, 4)) plt.semilogy(rx_pi, ber, "rX", label="measured BER") plt.semilogy([0, 1], [1/N, 1/N], "-b", label="targetBER") plt.semilogy([0, 1], [1e-12, 1e-12], "-g", label="BERs") plt.xlabel("RX PI position [UI]"), plt.ylabel("BER") plt.xlim([0, 1]), plt.ylim([1e-12, 1]) plt.legend(loc="upper center"), plt.grid();Visualizing Streaming k-means on IPython + Lightning LightningDATA VISUALIZATION SERVERLightning is a data-visualization server providing API-based access to reproducible, web-based, interactive visualizations. It includes a core set of visualization types, but is built for extendability and customization. Lightning supports modern libraries like d3.js and three.js, and is designed for interactivity over large data sets and continuously updating data streams. http://lightning-viz.org API: node.js, Python, Scala Extension support for custom chart (eg. d3.js) Lightning requirements: - Postgres recommended (SQLlite ok) - node.js (npm , gulp)from IPython.display import IFrame IFrame('https://lightning-docs.herokuapp.com/visualizations/4/iframe/', 1155, 673)The Freeman Lab at Janelia Research Campus uses Lightning to visualize large-scale neural recordings from zebrafish, in collaboration with the Ahrens Labfrom IPython.display import Image Image('http://lightning-viz.org/images/featured-graphic.png')Let's get started Spark Streaming k-means demo Environment requires: numpy, scipy, scikit-learn IPython/Python requires: lightning-python package Demo consists of 3 parts- Python driver script, data generator- Scala job - Spark Streaming - Streaming k-means- IPython client to process result, visualize with LightningOriginally this was part of the Python driver script - it has been modified for this talk to run within IPython First, some helper functionsimport os import glob from numpy import loadtxt from StringIO import StringIO def loadrecent(filename, oldtime, oldoutput): try: fname = max(glob.iglob(filename), key=os.path.getctime) except: #print('No file found') return [], oldtime newtime = os.path.getctime(fname) if not (newtime > oldtime): #print('File is not new') return oldoutput, oldtime try: f = open(fname) if os.fstat(f.fileno()).st_size == 0: print('File is empty') return [], oldtime except: print('Cannot load file') return [], oldtime prediction = loadtxt(fname, delimiter=',') return prediction, newtime def readcenters(i): """ Read integer labels """ #print (os.path.join(datainlabels, 'labels%g.txt' % i)) try: centers = loadtxt(os.path.join(datainlabels, 'labels%g.txt' % i), dtype=int) except: print('Cannot load cluster membership labels', sys.exc_info()) return array([]) return centers def readpts(i): """ Read original points """ try: with open (os.path.join(datain, 'batch%g.txt' % i), 'r') as ptsfile: ptscontent=ptsfile.read().replace('[', '').replace(']', '') pts = loadtxt(StringIO(ptscontent), delimiter=',') except: print('Cannot load points', sys.exc_info()) return array([]) return ptsProcessing k-means model 1. read model computed by Spark Streaming k-means 2. compute results with numpy 3. add to Lightning's streaming viz, displaying it as an iFrame in IPythonimport time from numpy import asarray, array, vstack, hstack, size, random, argsort, ones, argmin, sin, cos, pi from IPython.display import display, IFrame, HTML def run(dataout, nbatches, ncenters, ndims, lgn=None, npoints = 50): viz = None modeltime = 0 model = [] # loop over batches for i in range(1, nbatches): time.sleep(1) # get the latest model (after waiting) oldtime = modeltime model, modeltime = loadrecent(dataout + '/*-model.txt', modeltime, model) # plot an update (if we got a valid model) if len(model) == ncenters: if (oldtime != modeltime): centers = readcenters(i) pts = readpts(i) # pts.size should == len(pts) * 2 if (len(centers) == npoints & len(pts) == npoints): clrs = centers order = argsort(centers) clrs = clrs[order] pts = pts[order] s = ones(npoints) * 10 if ndims == 1: pts = vstack((pts, model[:,None])) else: pts = vstack((pts, model)) clrs = hstack((clrs, ones(ncenters) * 5)) s = hstack((s, ones(ncenters) * 10)) # wait a few iterations before plotting if (i > 5): # scatter plot for two dimensions if ndims == 2: if viz is None: viz = lgn.scatterstreaming(pts[:, 0], pts[:, 1], label=clrs, size=s) display(IFrame(viz.get_permalink(), width=1280, height=1280)) else: viz.append(pts[:, 0], pts[:, 1], label=clrs, size=s) # line plot for one dimension elif ndims == 1: if viz is None: viz = lgn.linestreaming(pts, label=clrs, size=s/2) display(IFrame(viz.get_permalink(), width=1280, height=1280)) else: viz.append(pts, label=clrs, size=s/2) else: raise Exception('Plotting only supported with 1 or 2 dimensions')Now, let's create the Lightning session and kick start the loop above To run, - ensure the Lightning server is running - start the Python driver script and wait 2-3 seconds for it to clean up the input/output data directory - then execute the cell belowimport os import tempfile from lightning import Lightning ncenters = 4 # 9 ndims = 2 # 1 nbatches = 100 # nb 100, 40 lightninghost = 'http://192.168.128.1:3000' path = os.path.join(tempfile.gettempdir(), 'streamkmeans') dataout = os.path.join(path, 'output') datain = os.path.join(path, 'input') datainlabels = os.path.join(path, 'inputlbl') lgn = Lightning(lightninghost) #, ipython=True) lgn.create_session('streaming-kmeans') run(dataout, nbatches, ncenters, ndims, lgn)Spatial queries: Point in Polygon & IntersectFinding out if a certain point is located inside or outside of an area, or finding out if a line intersects with another line or polygon are fundamental geospatial operations that are often used e.g. to select data based on location. Such spatial queries are one of the typical first steps of the workflow when doing spatial analysis. Performing a spatial join (will be introduced later) between two spatial datasets is one of the most typical applications where Point in Polygon (PIP) query is used. How to check if point is inside a polygon?Computationally, detecting if a point is inside a Polygon is most commonly done using a specific formula called [Ray Casting algorithm](https://en.wikipedia.org/wiki/Point_in_polygonRay_casting_algorithm). Luckily, we do not need to create such a function ourselves for conducting the Point in Polygon (PIP) query. Instead, we can take advantage of [Shapely's binary predicates](http://toblerity.org/shapely/manual.htmlbinary-predicates) that can evaluate the topolocical relationships between geographical objects, such as the PIP as we're interested here. There are basically two ways of conducting PIP in Shapely: 1. using a function called [.within()](http://toblerity.org/shapely/manual.htmlobject.within) that checks if a point is within a polygon 2. using a function called [.contains()](http://toblerity.org/shapely/manual.htmlobject.contains) that checks if a polygon contains a point Notice: even though we are talking here about **Point** in Polygon operation, it is also possible to check if a LineString or Polygon is inside another Polygon. - Let's first create a Polygon using a list of coordinate-tuples and a couple of Point objectsfrom shapely.geometry import Point, Polygon # Create Point objects p1 = Point(24.952242, 60.1696017) p2 = Point(24.976567, 60.1612500) # Create a Polygon coords = [(24.950899, 60.169158), (24.953492, 60.169158), (24.953510, 60.170104), (24.950958, 60.169990)] poly = Polygon(coords) # Let's check what we have print(p1) print(p2) print(poly)POINT (24.952242 60.1696017) POINT (24.976567 60.16125) POLYGON ((24.950899 60.169158, 24.953492 60.169158, 24.95351 60.170104, 24.950958 60.16999, 24.950899 60.169158))- Let's check if those points are `within` the polygon# Check if p1 is within the polygon using the within function p1_within = p1.within(poly) # Check if p2 is within the polygon p2_within = p2.within(poly) # Print the results print("Is p1 within the polygon?: ", p1_within) print("Is p2 within the polygon?: ", p2_within)Is p1 within the polygon?: True Is p2 within the polygon?: FalseOkey, so we can see that the first point seems to be inside that polygon and the other one doesn't. - In fact, the first point is close to the center of the polygon as we can see:print(p1) print(poly.centroid)POINT (24.952242 60.1696017) POINT (24.95224242849236 60.16960179038188)- It is also possible to do PIP other way around, i.e. to check if polygon contains a point:# Does polygon contain point 1 print("Does polygon contain p1?: ", poly.contains(p1)) # What about the other point? print("Does polygon contain p2?: ", poly.contains(p2))Does polygon contain p1?: True Does polygon contain p2?: FalseThus, both ways has the same results. Which one should you use then? Well, it depends: - if you have many points and just one polygon and you try to find out which one of them is inside the polygon: - you need to iterate over the points and check one at a time if it is **within()** the polygon specified - if you have many polygons and just one point and you want to find out which polygon contains the point - you need to iterate over the polygons until you find a polygon that **contains()** the point specified (assuming there are no overlapping polygons) IntersectAnother typical geospatial operation is to see if a geometry [intersect](http://toblerity.org/shapely/manual.htmlobject.intersects) or [touches](http://toblerity.org/shapely/manual.htmlobject.touches) another one. The difference between these two is that: - if objects intersect, the boundary and interior of an object needs to intersect in any way with those of the other. - If an object touches the other one, it is only necessary to have (at least) a single point of their boundaries in common but their interiors shoud NOT intersect.Let's try these out. - Let's create two LineStringsfrom shapely.geometry import LineString, MultiLineString # Create two lines line_a = LineString([(0, 0), (1, 1)]) line_b = LineString([(1, 1), (0, 2)])- Let's see if they intersectline_a.intersects(line_b)- Do they also touch each other?line_a.touches(line_b)Indeed, they do and we can see this by plotting the features together# Create a MultiLineString multi_line = MultiLineString([line_a, line_b]) multi_lineThus, the line_b continues from the same node ( (1,1) ) where line_a ends. - However, if the lines overlap fully, they don't touch, as we can see:# Check if line_a touches itself print("Touches?: ", line_a.touches(line_a)) # However, it does intersect print("Intersects?: ", line_a.intersects(line_a))Touches?: False Intersects?: TrueLab and Chart Events Data Extraction and Preparation SetupWe are keeping the MIMIC III tables in AWS Athena to use standard SQL queries to extract the data.The outputs are saved into *pickle* files for further use.Here we setup the connection to Athena by indicating the S3 buckets were the MIMIC III files (in *parquet* format).!pip install PyAthena from pyathena import connect from pyathena.pandas.util import as_pandas # Import libraries import datetime import numpy as np import pandas as pd import matplotlib.pyplot as plt import os import boto3 from botocore.client import ClientError from IPython.display import display, HTML import pickle %matplotlib inline s3 = boto3.resource('s3') client = boto3.client("sts") account_id = client.get_caller_identity()["Account"] my_session = boto3.session.Session() region = my_session.region_name athena_query_results_bucket = 'aws-athena-query-results-'+account_id+'-'+region try: s3.meta.client.head_bucket(Bucket=athena_query_results_bucket) except ClientError: bucket = s3.create_bucket(Bucket=athena_query_results_bucket) print('Creating bucket '+athena_query_results_bucket) cursor = connect(s3_staging_dir='s3://'+athena_query_results_bucket+'/athena/temp').cursor()Collecting PyAthena Downloading PyAthena-2.2.0-py3-none-any.whl (37 kB) Requirement already satisfied: botocore>=1.5.52 in /home/ec2-user/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from PyAthena) (1.20.55) Collecting tenacity>=4.1.0 Downloading tenacity-7.0.0-py2.py3-none-any.whl (23 kB) Requirement already satisfied: boto3>=1.4.4 in /home/ec2-user/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from PyAthena) (1.17.55) Requirement already satisfied: s3transfer<0.5.0,>=0.4.0 in /home/ec2-user/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from boto3>=1.4.4->PyAthena) (0.4.1) Requirement already satisfied: jmespath<1.0.0,>=0.7.1 in /home/ec2-user/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from boto3>=1.4.4->PyAthena) (0.10.0) Requirement already satisfied: python-dateutil<3.0.0,>=2.1 in /home/ec2-user/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from botocore>=1.5.52->PyAthena) (2.8.1) Requirement already satisfied: urllib3<1.27,[...]Diabetic patients cohortWe have two cohorts. The diabetic patients in the MIMIC III database have class imbalanced for 'dead' (1217) and 'alive' (8605) outcomes.To overcome this issue we created a *balanced* cohort by means of oversampling (see details in `sql_queries.ipynb`).cohort_table = "default.diabetic_patients_cohort" ## FULL COHORT subject_id_col = "subject_id" ## FULL COHORT prefix = "" ## FULL COHORT #cohort_table = "default.train_cohort2" ## BALANCED COHORT - TRAINING #prefix = "balanced_train_" ## BALANCED COHORT - TRAINING #subject_id_col = "new_subject_id" ## BALANCED COHORT - TRAINING #cohort_table = "default.test_cohort" ## BALANCED COHORT - TESTING #prefix = "balanced_test_" ## BALANCED COHORT - TESTING #subject_id_col = "subject_id" ## BALANCED COHORT - TESTING query='select * from default.diabetic_patients_cohort order by subject_id' cursor.execute(query) patients = as_pandas(cursor) query='select mortality_flag, count (distinct ' + subject_id_col + ') from ' + cohort_table +' group by mortality_flag' cursor.execute(query) mf = as_pandas(cursor) mf.head()Selected ICD9 codesThe 17 APACHE III features and the 3 diabetic specific features (HbA1c, blood glucose and serum creatinine) map to many ICD9 codes.Also, using the attention weights we selected a subset of the features that got the most attention from the model (named *relevant* here).chartevents_codes = [ ['Capillary refill rate', 3348, 224308, 223951, 8377, 115], ['Diastolic blood pressure', 8364, 225310, 228151, 8555, 8368, 220051, 8502, 8503, 8504, 8505, 8506, 8507, 8508, 153, 8440, 224643, 227242, 8441, 220180, 8444, 8445, 8446, 8448, 220060], ['Fraction inspired oxygen', 7146, 226767, 227035, 228192, 228193, 228232], ['Glascow coma scale eye opening', 184, 220739], ['Glascow coma scale motor response', 223901, 226757], ['Glascow coma scale total', 198], ['Glascow coma scale verbal response', 223900, 226758], ['Glucose', 3744, 3745, 1310, 807, 1529, 811, 220621, 226537, 3447, 225664], ['Heart Rate', 211, 220045], ['Height', 226730], ['Mean blood pressure', 225312, 52, 6702, 220052, 6927, 3312, 3314, 3316, 7618, 3318, 3320, 3322, 7620, 7622, 3324, 5702, 443, 456, 220181], ['Oxygen saturation', 0], ['Respiratory rate', 220210, 618, 224688, 224690, 224689, 619], ['Systolic blood pressure', 51, 225309, 220050, 3313, 3315, 3317, 3319, 3321, 3323, 3325, 442, 224167, 227243, 455, 220179, 480, 482, 484 ], ['Temperature', 224027, 645, 8537, 676, 677, 223762, 678, 679, 223761], ['Weight', 581], ['pH', 1126, 780, 223830, 220274, 220734, 4753, 4202, 1365, 7717, 3839] ] labevents_codes = [ ['Oxygen Saturation', 50817], ['Temperature', 50825], ['pH', 50820], ['% Hemoglobin A1c', 50852, 50854], ['Blood Glucose', 50931, 51529], ['Serum Creatinine', 50912] ] relevant_chartevents_codes = [ ['Glucose', 3744, 3745, 1310, 807, 1529, 811, 220621, 226537, 3447, 225664], ['Heart Rate', 211, 220045], ['Mean blood pressure', 225312, 52, 6702, 220052, 6927, 3312, 3314, 3316, 7618, 3318, 3320, 3322, 7620, 7622, 3324, 5702, 443, 456, 220181], ['Oxygen Saturation', 0], ['Systolic blood pressure', 51, 225309, 220050, 3313, 3315, 3317, 3319, 3321, 3323, 3325, 442, 224167, 227243, 455, 220179, 480, 482, 484 ], ['Temperature', 224027, 645, 8537, 676, 677, 223762, 678, 679, 223761], ['Weight', 581] ] relevant_labevents_codes = [ ['Oxygen Saturation', 50817], ['Temperature', 50825], ['Blood Glucose', 50931, 51529], ] def create_codes_table(): cursor.execute("SHOW TABLES LIKE 'featurescodes'") if (cursor.fetchone() is None): try: query = "create external table default.featurescodes (code int, mimiciiitable string, feature string, icd9code int) stored as PARQUET location 's3://aws-athena-query-results-067114122515-us-east-1/featurescodes'" cursor.execute(query) cnt = 0 for f in labevents_codes: fn = f[0] for c in f[1:]: query = "insert into featurescodes values (" + str(cnt) + ",'labevents','" + fn +"'," + str(c) +")" cursor.execute(query) cnt += 1 for f in chartevents_codes: fn = f[0] for c in f[1:]: query = "insert into featurescodes values (" + str(cnt) + ",'chartevents','" + fn +"'," + str(c) +")" cursor.execute(query) cnt += 1 print ("featurescodes table created!") except Exception as e: print (e) else: print ("featurescodes table already exists.")Featurescodes table creation#chartevents_codes = relevant_chartevents_codes #labevents_codes = relevant_labevents_codes create_codes_table()featurescodes table created!Item codes are unique among chart and lab events.cursor.execute("select count(*) from featurescodes f, featurescodes g where g.icd9code=f.icd9code and g.feature <> f.feature") assert 0 == cursor.fetchone()[0], "Different features have same code in featurescodes table!!!"Create joint events tableThis table holds the chart events AND the lab events that correspond to the selected codes from the patients in the cohort.We only keep here the events that were taken ** *48 hours before discharge* **.def create_joint_events(): cursor.execute("""CREATE TABLE events AS SELECT p.""" + subject_id_col + """ as subject_id, e.itemid, e.charttime, e.valuenum, p.admit_time, p.discharge_time, p.mortality_flag FROM """ + cohort_table + """ p LEFT OUTER JOIN (SELECT subject_id, itemid, charttime, valuenum FROM mimiciii.labevents l INNER JOIN default.featurescodes f ON f.icd9code = l.itemid WHERE f.mimiciiitable='labevents' UNION SELECT subject_id, itemid, charttime, valuenum FROM mimiciii.chartevents c INNER JOIN default.featurescodes f ON f.icd9code = c.itemid WHERE f.mimiciiitable='chartevents') e ON e.subject_id=p.subject_id AND e.charttime < p.discharge_time - INTERVAL '48' HOUR ORDER BY p.""" + subject_id_col + """, e.itemid, e.charttime """) cursor.execute("SHOW TABLES LIKE 'events'") if (cursor.fetchone() is None): create_joint_events()Create events_daystodischarge tableThis table adds the days to discharge date of the patients for each event. This is needed for further grouping of the events into individual dates.def create_events_daystodischarge(): cursor.execute("""CREATE TABLE events_daystodischarge AS SELECT subject_id, -date_diff('day', discharge_time, charttime) daystodischarge, itemid, valuenum, mortality_flag FROM events e ORDER BY subject_id, daystodischarge desc, itemid""") cursor.execute("SHOW TABLES LIKE 'events_daystodischarge'") if (cursor.fetchone() is None): create_events_daystodischarge()Create events_feature tableThis table holds the average of the measurements taken for the same feature on the same day for each patient.We took the average of the valuenum for each itemid on each daydef create_events_features(): cursor.execute("""CREATE TABLE events_features AS SELECT subject_id, daystodischarge, f.code, avg(valuenum) value, mortality_flag FROM events_daystodischarge e LEFT JOIN featurescodes f ON e.itemid=f.icd9code GROUP BY subject_id, daystodischarge, f.code, mortality_flag ORDER BY subject_id, daystodischarge desc, code""") cursor.execute("SHOW TABLES LIKE 'events_features'") if (cursor.fetchone() is None): create_events_features()Sanity checkcursor.execute("""SELECT feature, avg(value) avg, stddev(value) dev, min(value) min, max(value) max, count(value) cnt FROM events_features AS e INNER JOIN featurescodes AS f ON e.code=f.code WHERE value>0 and value<1000 GROUP BY feature ORDER BY dev DESC """) df = as_pandas(cursor) df cursor.execute(""" SELECT min(daystodischarge) minDays, max(daystodischarge) maxDays, avg(daystodischarge) avgDays, stddev(daystodischarge) stddevDays FROM events_features""") df = as_pandas(cursor) df cursor.execute(""" SELECT subject_id, count(distinct daystodischarge) cntDays FROM events_features GROUP BY subject_id""") df = as_pandas(cursor) cursor.execute(""" SELECT mortality_flag, count(distinct """ +subject_id_col + """) cnt FROM """ + cohort_table + """ group by mortality_flag""") df = as_pandas(cursor) df cursor.execute(""" SELECT mortality_flag, count(*) cnt FROM events_features group by mortality_flag""") df = as_pandas(cursor) dfConstruction of Events dataframeThe data from the events_feature table are loaded into two dataframes.The `events_item` dataframe contains for each patient and for each value of days-to-discharge a list of the features codes that were taken on that day.The `events_values` dataframe contains form each patient and for each value of days-to-discharge a list of the values corresponding to the features in the `events_item`.cursor.execute("select * from events_features order by subject_id, daystodischarge desc, code") print ("Query finished") events = as_pandas(cursor) num_pat = len(events['subject_id'].unique())Non numbers are replaced with 0.events['code'] = events['code'].fillna(0).astype(int) events['daystodischarge'] = events['daystodischarge'].fillna(0).astype(int) events['value'] = events['value'].fillna(0) events_item = events.groupby([events['subject_id'], events['daystodischarge']])['code'].apply(list).reset_index(name='codes') events_values = events.groupby([events['subject_id'], events['daystodischarge']])['value'].apply(list).reset_index(name='values') assert len(events_item['subject_id'].unique()) == num_pat, 'Wrong number of patients in events_item' assert len(events_values['subject_id'].unique()) == num_pat, 'Wrong number of patients in events_values'Dataframes are *pickled*.pickle.dump( events_item, open(prefix + "events_item.p", "wb" ) ) pickle.dump( events_values, open(prefix + "events_value.p", "wb")) pickle.dump( patients, open(prefix + "patients.p", "wb"))Just some assertions to make sure we saved what we expected.ei = pickle.load( open( prefix+"events_item.p", "rb" ) ) assert len(ei[ei['subject_id']==13]['codes']) == len(events_item[events_item['subject_id']==13]['codes']), "Wrong serialization!!" assert len(ei)==len(events_item, ), "Wrong serialization!!" ei = pickle.load(open(prefix+"events_value.p", "rb") ) #assert ei[ei['subject_id']==13]['values'][0] == events_values[events_values['subject_id']==13]['values'][0], "Wrong serialization!!" assert len(ei)==len(events_item), "Wrong serialization!!" ei = pickle.load(open(prefix+'patients.p', 'rb')) assert int(ei[ei['subject_id']==2511]['mortality_flag']) == int(patients[patients['subject_id']==2511]['mortality_flag'])Auxiliary datacursor.execute("select f.code, mimiciiitable, feature, avg(value) mean, stddev(value) stddev, count(value) cnt from featurescodes f left outer join events_features e on e.code=f.code group by f.code, mimiciiitable, feature order by f.code") print ("Features list") features = as_pandas(cursor) features.to_csv('featurescodes.csv') cursor.execute('select max(code) from featurescodes') max_code = int(cursor.fetchone()[0]) pickle.dump(max_code, open("events_maxcode.p", "wb")) max_codePatients without data Probably because they were discharge in less than 48 hours.dtod[dtod['daystodischarge']==0].sort_values(['subject_id']) print ("## Number of patients without data: ", len(dtod[dtod['daystodischarge']==0]))## Number of patients without data: 23Process DataImportant: Before running this notebook, make sure that you have downloaded the data from RegDocs (or you have pickled the PDFs and placed them in `data/interim/pickled_pdfs`.)This notebook will grab the text from each PDF, do some preprocessing so that the data is clean to use with elasticsearch and haystack, and then store the pieces of texts in `data/processed/pdf_texts` so that we can grab the text for question-answering.from haystack.utils import clean_wiki_text, convert_files_to_dicts, fetch_archive_from_http, print_answers from haystack.nodes import FARMReader, TransformersReaderC:\Users\ThibJacq\Anaconda3\envs\cer-qa\lib\site-packages\ray\autoscaler\_private\cli_logger.py:61: FutureWarning: Not all Ray CLI dependencies were found. In Ray 1.4+, the Ray CLI, autoscaler, and dashboard will only be usable via `pip install 'ray[default]'`. Please update your install command. "update your install command.", FutureWarning) C:\Users\ThibJacq\Anaconda3\envs\cer-qa\lib\site-packages\torchaudio\extension\extension.py:13: UserWarning: torchaudio C++ extension is not available. warnings.warn('torchaudio C++ extension is not available.') C:\Users\ThibJacq\Anaconda3\envs\cer-qa\lib\site-packages\torchaudio\backend\utils.py:89: UserWarning: No audio backend is available. warnings.warn('No audio backend is available.')imports# imports import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn import metrics %matplotlib inlinedata# import the pg dataset data=pd.read_csv("C:/Users/sonny/Desktop/pg.csv") # see head of the pg dataset data.head() # check the shape of dataset data.shape data.columns data.dtypes # number of values data.nunique() # check the missing values data.isnull().sum() # view the data statistics data.describe() # find the correlation between the features corr=data.corr() corr.shape # Plotting the heatmap of correlation between features plt.figure(figsize=(20,20)) sns.heatmap(corr, cbar=True, square= True, fmt='.1f', annot=True, annot_kws={'size':15}, cmap='Greens') # overlook print("-"*40) data.info() print("-"*40) # look at the relation between obejective and explainotary; logpgv figure=plt.figure() sns.pairplot(x_vars=['Date','Mw','Distance','Depth','EqNo','StationNo'],y_vars=['logpgv'],data=data,dropna=True) plt.show() # look at the relation between obejective and explainotary; PGV* figure=plt.figure() sns.pairplot(x_vars=['Date','Mw','Distance','Depth','EqNo','StationNo'],y_vars=['PGV*'],data=data,dropna=True) plt.show() sns.distplot(data['PGV*']) sns.distplot(data['logpgv'])the reason why logpgv, not PGV# split objective variable and explainatory variables column_sels=['Mw','Distance','Depth'] X=data.loc[:,column_sels] y=data['logpgv'] # Splitting to training and testing data from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.2, random_state = 4) print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)(7787, 3) (1947, 3) (7787,) (1947,)Model: LM# Import library for Linear Regression from sklearn.linear_model import LinearRegression # Create a Linear regressor lm = LinearRegression() # Train the model using the training sets lm.fit(X_train, y_train) lm.intercept_ #Converting the coefficient values to a dataframe coeffcients = pd.DataFrame([X_train.columns,lm.coef_]).T coeffcients = coeffcients.rename(columns={0: 'Attribute', 1: 'Coefficients'}) coeffcients # Model prediction on train data y_pred = lm.predict(X_train) print('R^2:',metrics.r2_score(y_train, y_pred)) print('Adjusted R^2:',1 - (1-metrics.r2_score(y_train, y_pred))*(len(y_train)-1)/(len(y_train)-X_train.shape[1]-1)) print('MAE:',metrics.mean_absolute_error(y_train, y_pred)) print('MSE:',metrics.mean_squared_error(y_train, y_pred)) print('RMSE:',np.sqrt(metrics.mean_squared_error(y_train, y_pred))) # Visualizing the differences between obs and pre; train data plt.scatter(y_train, y_pred) plt.xlabel("obs_logpgv") plt.ylabel("Pre_logpgv") plt.title("obs and pre") plt.show() # Checking residuals plt.scatter(y_pred,y_train-y_pred) plt.title("Predicted vs residuals") plt.xlabel("Predicted") plt.ylabel("Residuals") plt.show() # Checking Normality of errors sns.distplot(y_train-y_pred) plt.title("Histogram of Residuals") plt.xlabel("Residuals") plt.ylabel("Frequency") plt.show() # Predicting Test data with the model y_test_pred = lm.predict(X_test) # Visualizing the differences between obs and pre; test data plt.scatter(y_test, y_test_pred) plt.xlabel("obs_logpgv") plt.ylabel("Pre_logpgv") plt.title("obs and pre of test data") plt.show() # Model Evaluation acc_linreg = metrics.r2_score(y_test, y_test_pred) print('R^2:', acc_linreg) print('Adjusted R^2:',1 - (1-metrics.r2_score(y_test, y_test_pred))*(len(y_test)-1)/(len(y_test)-X_test.shape[1]-1)) print('MAE:',metrics.mean_absolute_error(y_test, y_test_pred)) print('MSE:',metrics.mean_squared_error(y_test, y_test_pred)) print('RMSE:',np.sqrt(metrics.mean_squared_error(y_test, y_test_pred)))R^2: 0.5715758259963973 Adjusted R^2: 0.5709143373077659 MAE: 0.25897643120910047 MSE: 0.10526698876091573 RMSE: 0.32444874596909096Model:RF# Import Random Forest Regressor from sklearn.ensemble import RandomForestRegressor # Create a Random Forest Regressor reg = RandomForestRegressor() # Train the model using the training sets reg.fit(X_train, y_train) # Model prediction on train data y_pred = reg.predict(X_train) # Model Evaluation print('R^2:',metrics.r2_score(y_train, y_pred)) print('Adjusted R^2:',1 - (1-metrics.r2_score(y_train, y_pred))*(len(y_train)-1)/(len(y_train)-X_train.shape[1]-1)) print('MAE:',metrics.mean_absolute_error(y_train, y_pred)) print('MSE:',metrics.mean_squared_error(y_train, y_pred)) print('RMSE:',np.sqrt(metrics.mean_squared_error(y_train, y_pred))) # Visualizing the differences between obs and pre; train data plt.scatter(y_train, y_pred) plt.xlabel("obs_logpgv") plt.ylabel("Pre_logpgv") plt.title("obs and pre of train data") plt.show() # Checking residuals plt.scatter(y_pred,y_train-y_pred) plt.title("Predicted vs residuals") plt.xlabel("Predicted") plt.ylabel("Residuals") plt.show() # Predicting Test data with the model y_test_pred = reg.predict(X_test) # Model Evaluation acc_rf = metrics.r2_score(y_test, y_test_pred) print('R^2:', acc_rf) print('Adjusted R^2:',1 - (1-metrics.r2_score(y_test, y_test_pred))*(len(y_test)-1)/(len(y_test)-X_test.shape[1]-1)) print('MAE:',metrics.mean_absolute_error(y_test, y_test_pred)) print('MSE:',metrics.mean_squared_error(y_test, y_test_pred)) print('RMSE:',np.sqrt(metrics.mean_squared_error(y_test, y_test_pred))) # Visualizing the differences between obs and pre; test data plt.scatter(y_test, y_test_pred) plt.xlabel("obs_logpgv") plt.ylabel("Pre_logpgv") plt.title("obs and pre of test data") plt.show()Model:XGBoost Regressor# Import XGBoost Regressor from xgboost import XGBRegressor #Create a XGBoost Regressor reg = XGBRegressor() # Train the model using the training sets reg.fit(X_train, y_train) # Model prediction on train data y_pred = reg.predict(X_train) # Model Evaluation print('R^2:',metrics.r2_score(y_train, y_pred)) print('Adjusted R^2:',1 - (1-metrics.r2_score(y_train, y_pred))*(len(y_train)-1)/(len(y_train)-X_train.shape[1]-1)) print('MAE:',metrics.mean_absolute_error(y_train, y_pred)) print('MSE:',metrics.mean_squared_error(y_train, y_pred)) print('RMSE:',np.sqrt(metrics.mean_squared_error(y_train, y_pred))) # Visualizing the differences between obs and pre; train data plt.scatter(y_train, y_pred) plt.xlabel("obs_logpgv") plt.ylabel("Pre_logpgv") plt.title("obs and pre of train data") plt.show() # Checking residuals plt.scatter(y_pred,y_train-y_pred) plt.title("Predicted vs residuals") plt.xlabel("Predicted") plt.ylabel("Residuals") plt.show() #Predicting Test data with the model y_test_pred = reg.predict(X_test) # Model Evaluation acc_xgb = metrics.r2_score(y_test, y_test_pred) print('R^2:', acc_xgb) print('Adjusted R^2:',1 - (1-metrics.r2_score(y_test, y_test_pred))*(len(y_test)-1)/(len(y_test)-X_test.shape[1]-1)) print('MAE:',metrics.mean_absolute_error(y_test, y_test_pred)) print('MSE:',metrics.mean_squared_error(y_test, y_test_pred)) print('RMSE:',np.sqrt(metrics.mean_squared_error(y_test, y_test_pred))) # Visualizing the differences between obs and pre; test data plt.scatter(y_test, y_test_pred) plt.xlabel("obs_logpgv") plt.ylabel("Pre_logpgv") plt.title("obs and pre of test data") plt.show()Model:SVM Regressor# Creating scaled set to be used in model to improve our results from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) # Import SVM Regressor from sklearn import svm # Create a SVM Regressor reg = svm.SVR() # Train the model using the training sets reg.fit(X_train, y_train) # Model prediction on train data y_pred = reg.predict(X_train) # Model Evaluation print('R^2:',metrics.r2_score(y_train, y_pred)) print('Adjusted R^2:',1 - (1-metrics.r2_score(y_train, y_pred))*(len(y_train)-1)/(len(y_train)-X_train.shape[1]-1)) print('MAE:',metrics.mean_absolute_error(y_train, y_pred)) print('MSE:',metrics.mean_squared_error(y_train, y_pred)) print('RMSE:',np.sqrt(metrics.mean_squared_error(y_train, y_pred))) # Visualizing the differences between obs and pre; train data plt.scatter(y_train, y_pred) plt.xlabel("obs_logpgv") plt.ylabel("Pre_logpgv") plt.title("obs and pre of train data") plt.show() # Checking residuals plt.scatter(y_pred,y_train-y_pred) plt.title("Predicted vs residuals") plt.xlabel("Predicted") plt.ylabel("Residuals") plt.show() # Predicting Test data with the model y_test_pred = reg.predict(X_test) # Model Evaluation acc_svm = metrics.r2_score(y_test, y_test_pred) print('R^2:', acc_svm) print('Adjusted R^2:',1 - (1-metrics.r2_score(y_test, y_test_pred))*(len(y_test)-1)/(len(y_test)-X_test.shape[1]-1)) print('MAE:',metrics.mean_absolute_error(y_test, y_test_pred)) print('MSE:',metrics.mean_squared_error(y_test, y_test_pred)) print('RMSE:',np.sqrt(metrics.mean_squared_error(y_test, y_test_pred))) # Visualizing the differences between obs and pre; test data plt.scatter(y_test, y_test_pred) plt.xlabel("obs_logpgv") plt.ylabel("Pre_logpgv") plt.title("obs and pre of test data") plt.show()R-squared scoremodels = pd.DataFrame({ 'Model': ['Linear Regression', 'Random Forest', 'XGBoost', 'Support Vector Machines'], 'R-squared Score': [acc_linreg*100, acc_rf*100, acc_xgb*100, acc_svm*100]}) models.sort_values(by='R-squared Score', ascending=False)Predicting the rating of a wine considering its variety and description This notebook presents a neural network devised to predict the rating from a taster by considering as inputs their reviews and the wine variety. The code is based on the presented in https://github.com/sararob/keras-wine-model. However, with modifications: Keras is only used for pre-processing and TensorFlow is used to train and test the model. The architecture is also slightly different: L2 regularization and dropout are used and more processing units are used in the final layer of the network (after the concatenation prodecure), whilst the loss function used is cross-entropy instead of MSE, since we tackle the problem as a classification one. The ratings are in one-hot encoding format. Imports%matplotlib nbagg import csv, random, collections import keras import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import LabelBinarizer from sklearn.metrics import mean_squared_errorAuxiliary Functionsdef iterate_minibatches(num_samples, batch_size, shuffle=False): batches_indices = [] if shuffle: indices = np.arange(num_samples) np.random.shuffle(indices) for start_idx in range(0, num_samples - batch_size + 1, batch_size): if shuffle: excerpt = indices[start_idx:start_idx+batch_size] else: excerpt = slice(start_idx, start_idx+batch_size) yield excerpt def shuffle_indices(num_samples): idx = np.arange(num_samples) np.random.shuffle(idx) return idxLoad data# We use one list for each column of the .csv description = [] variety = [] rating = [] filename = '../wine_data.csv' # You can find the dataset in: https://www.kaggle.com/zynicide/wine-reviews/data with open(filename, 'rb') as csvfile: spamreader = csv.reader(csvfile, delimiter=',', quotechar='"') next(spamreader) for row in spamreader: if (row[5] != '') and (row[1] != ''): description.append(row[2]) rating.append(row[4]) variety.append(row[9]) # Remove all wine varieties that do not appear more than 500 times # You can do this using pandas although here it is done in a more "low-level" manner occurences = collections.Counter(variety) to_remove = [] for key in occurences: if occurences[key] <= 500: to_remove.append(key) for i in range(len(to_remove)): del occurences[to_remove[i]] # Grab indices of varietyies that are not within occurences to_remove = [] for i in range(len(variety)): if variety[i] not in occurences: to_remove.append(i) # Actually delete the items from the lists for i in sorted(to_remove, reverse=True): del variety[i] del description[i] del rating[i] # Shufle lists labels = [] new_variety = [] new_description = [] for i in shuffle_indices(num_samples=len(variety)): labels.append(rating[i]) # Or labels.append(price[i]) if we are going to classify prices new_variety.append(variety[i]) new_description.append(description[i]) variety = new_variety description = new_description del new_variety del new_description del rating # Split data into train and test train_size = int(len(variety) * .8) print 'Dataset info: ' print "Train size: %d" % train_size print "Test size: %d" % (len(variety) - train_size) # Train description_train = description[:train_size] variety_train = variety[:train_size] labels_train = labels[:train_size] # Test description_test = description[train_size:] variety_test = variety[train_size:] labels_test = labels[train_size:]Dataset info: Train size: 95646 Test size: 23912Prepare data to feed it to the model# Tokenize words vocabulary_size = 10000 # Tokenize with KERAS tokenize = keras.preprocessing.text.Tokenizer(num_words=vocabulary_size, char_level=False) tokenize.fit_on_texts(description_train) # ----------------------------- Bag of words ----------------------------- # description_bow_train = tokenize.texts_to_matrix(description_train) description_bow_test = tokenize.texts_to_matrix(description_test) # ----------------------------- Variety ----------------------------- # # Use sklearn utility to convert label strings to numbered index encoder = LabelEncoder().fit(variety_train) variety_train = encoder.transform(variety_train).reshape((len(variety_train), 1)) variety_test = encoder.transform(variety_test).reshape((len(variety_test), 1)) num_classes = 1 # Convert variety to one hot num_classes += np.max(variety_train) variety_train = keras.utils.to_categorical(variety_train, num_classes) variety_test = keras.utils.to_categorical(variety_test, num_classes) # ----------------------------- Embeddings ----------------------------- # sequence_length = 150 embedding_size = 10 train_embedding = tokenize.texts_to_sequences(description_train) test_embedding = tokenize.texts_to_sequences(description_test) # Fill with zeros the sequences that are shorter than 150 train_embedding = keras.preprocessing.sequence.pad_sequences(train_embedding, maxlen=sequence_length, padding="post") test_embedding = keras.preprocessing.sequence.pad_sequences(test_embedding, maxlen=sequence_length, padding="post") # ----------------------------- Convert data to numpy arrays ----------------------------- # variety_train = np.asarray(variety_train) variety_test = np.asarray(variety_test) description_bow_train = np.asarray(description_bow_train) description_bow_test = np.asarray(description_bow_test) train_embedding = np.asarray(train_embedding) test_embedding = np.asarray(test_embedding) # Convert labels to one-hot encoding lb = LabelBinarizer() lb.fit(range(101)) Y_train = lb.transform(labels_train) Y_test = lb.transform(labels_test)Define the modelsess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) # Auxiliary functions to create TF Variables def embedding_variable(name, shape, minval, maxval): return tf.Variable(name=name, initial_value=tf.random_uniform(shape=shape, minval=minval, maxval=maxval)) def weight_variable(shape, name): return tf.get_variable(name=name, shape=shape, initializer=tf.contrib.layers.xavier_initializer()) def bias_variable(shape, name): return tf.Variable(tf.ones(shape), name=name) # Actual model def model(vocabulary_size, sequence_length, embedding_size, num_classes): # ----------------------- INPUT ----------------------- # with tf.name_scope('input'): x_description = tf.placeholder(tf.float32, shape=[None, vocabulary_size], name='x_description') x_variety = tf.placeholder(tf.float32, shape=[None, num_classes], name='x_variety') x_embedding = tf.placeholder(tf.int32, shape=[None, sequence_length], name='x_embedding') y_ = tf.placeholder(tf.float32, shape=[None, 101], name='y_') keep_prob = tf.placeholder(tf.float32, name='keep_prob') # ----------------------- NET 1 (EMBEDDING) ----------------------- # with tf.name_scope('net1'): with tf.name_scope('net1_embedding_layer'): W_embeddings = embedding_variable(shape=[vocabulary_size, embedding_size], name="W_embeddings", minval=-1.0, maxval=1.0) lookup = tf.nn.embedding_lookup(W_embeddings, x_embedding) embed = tf.reshape(lookup, [-1, embedding_size*sequence_length]) # Flatten lookup with tf.name_scope('net1_out'): input_size = embedding_size*sequence_length output_size = 100 W_net1 = weight_variable(shape=[input_size, output_size], name='W_net1') b_net1 = bias_variable(shape=[output_size], name='b_net1') h_net1 = tf.matmul(embed, W_net1) + b_net1 # ----------------------- NET 2 (BOW FOR DESCRIPTION + ONE-HOT FOR VARIETY) ----------------------- # with tf.name_scope('net2'): with tf.name_scope('net2_fc1'): x_net1 = tf.concat([x_description, x_variety], axis=1) input_size = x_net1.get_shape().as_list()[1] output_size = 256 W_net2_fc1 = weight_variable(shape=[input_size, output_size], name='W_net2_fc1') b_net2_fc1 = bias_variable(shape=[output_size], name='b_net2_fc1') h_net2_fc1 = tf.nn.relu(tf.matmul(x_net1, W_net2_fc1) + b_net2_fc1) h_net2_fc1 = tf.nn.dropout(h_net2_fc1, keep_prob=keep_prob) with tf.name_scope('net2_out'): input_size = output_size output_size = 100 W_net2 = weight_variable(shape=[input_size, output_size], name='W_net2') b_net2 = bias_variable(shape=[output_size], name='b_net2') h_net2 = tf.matmul(h_net2_fc1, W_net2) + b_net2 # ----------------------- MERGE OUTPUTS OF NET1 AND NET2 ----------------------- # with tf.name_scope('merge_nets'): x = tf.concat([h_net1, h_net2], axis=1) input_size = h_net1.get_shape().as_list()[1] + h_net2.get_shape().as_list()[1] output_size = 101 W = weight_variable(shape=[input_size, output_size], name='W') b = bias_variable(shape=[output_size], name='b') y = tf.matmul(x, W) + b scores = tf.nn.softmax(y) # ----------------------- LOSS FUNCTION ----------------------- # with tf.name_scope('l2_loss'): l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'W' in v.name]) * 0.001 with tf.name_scope('loss'): loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)) + l2_loss # ----------------------- RETURN DICTIONARY ----------------------- # return { 'x_description': x_description, 'x_variety': x_variety, 'x_embedding': x_embedding, 'x': x, 'y_': y_, 'keep_prob': keep_prob, 'loss': loss, 'scores': scores} with tf.device('/gpu:0'): net = model(vocabulary_size=vocabulary_size, sequence_length=sequence_length, embedding_size=embedding_size, num_classes=num_classes) optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(net['loss'])WARNING:tensorflow:From /home/marcelo/Escritorio/Marcelo/Proyectos/env/local/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/datasets/base.py:198: retry (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version. Instructions for updating: Use the retry module or similar alternatives. WARNING:tensorflow:From :68: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version. Instructions for updating: Future major versions of TensorFlow will allow gradients to flow into the labels input on backprop by default. See tf.nn.softmax_cross_entropy_with_logits_v2.Train model# Init variables sess.run(tf.global_variables_initializer()) # Train the network epochs = 100 batch_size = 128 # For plotting the losses p_train_loss = [] p_test_loss = [] print '\nTraining Network...' for epoch in range(epochs): train_loss = 0.0 train_batches = 0.0 train_mse = 0.0 for indices in iterate_minibatches(num_samples=Y_train.shape[0], batch_size=batch_size, shuffle=True): y_ = Y_train[indices,:] x_description = description_bow_train[indices,:] x_variety = variety_train[indices,:] x_embedding = train_embedding[indices,:] sess.run(optimizer, feed_dict={ net['x_description']: x_description, net['x_variety']: x_variety, net['x_embedding']: x_embedding, net['y_']: y_, net['keep_prob']: 0.5}) pred, loss = sess.run([net['scores'], net['loss']], feed_dict={net['x_description']: x_description, net['x_variety']: x_variety, net['x_embedding']: x_embedding, net['y_']: y_, net['keep_prob']: 1.0}) train_loss += loss train_mse += mean_squared_error(np.argmax(y_, axis=1), np.argmax(pred, axis=1)) train_batches += 1.0 train_loss = train_loss / train_batches train_mse = train_mse / train_batches # Test test_loss = 0.0 test_batches = 0.0 test_mse = 0.0 for indices in iterate_minibatches(num_samples=Y_test.shape[0], batch_size=batch_size, shuffle=False): y_= Y_test[indices,:] x_description = description_bow_test[indices,:] x_variety = variety_test[indices,:] x_embedding = test_embedding[indices,:] feed_dict = {net['x_description']: x_description, net['x_variety']: x_variety, net['x_embedding']: x_embedding, net['y_']: y_, net['keep_prob']: 1.0} pred, loss = sess.run([net['scores'], net['loss']], feed_dict=feed_dict) test_loss += loss test_mse += mean_squared_error(np.argmax(y_, axis=1), np.argmax(pred, axis=1)) test_batches += 1.0 test_loss = test_loss / test_batches test_mse = test_mse / test_batches # For the plot p_train_loss.append(train_loss) p_test_loss.append(test_loss) # Print losses each 10 epochs if (epoch % 10 == 0): print '\nEpoch %d/%d' % (epoch+1, epochs) print 'Train loss: ', train_loss print 'Test loss: ', test_loss print 'Train MSE: ', train_mse print 'Test MSE: ', test_mseTraining Network... Epoch 1/100 Train loss: 3.719155812359239 Test loss: 2.7373269860462477 Train MSE: 7.554969879518072 Test MSE: 7.178259408602151 Epoch 11/100 Train loss: 2.136696351420289 Test loss: 2.2075398352838334 Train MSE: 6.111539909638554 Test MSE: 6.581359206989247 Epoch 21/100 Train loss: 2.0958176516146065 Test loss: 2.1671837235009797 Train MSE: 6.105003346720214 Test MSE: 6.540112567204301 Epoch 31/100 Train loss: 2.097976459716378 Test loss: 2.1794783761424403 Train MSE: 6.128650016733601 Test MSE: 6.983366935483871 Epoch 41/100 Train loss: 2.097583308117776 Test loss: 2.1678501188114123 Train MSE: 6.129329819277109 Test MSE: 6.732190860215054 Epoch 51/100 Train loss: 2.095266655267003 Test loss: 2.1627769149759764 Train MSE: 6.115325886880857 Test MSE: 6.56804435483871 Epoch 61/100 Train loss: 2.098294240882598 Test loss: 2.159907894749795 Train MSE: 6.126526941097724 Test MSE: 6.653897849462366 Epoch 71/100 Train loss: 2.0972243[...]Plot lossp1, = plt.plot(np.arange(100), p_train_loss, linewidth=1, color='b', label="entrenamiento") p2, = plt.plot(np.arange(100), p_test_loss, linewidth=1, color='g', label="prueba") plt.xlabel('Epoca') plt.ylabel('Costo') plt.legend([p1, p2], ["entrenamiento", "prueba"]) plt.savefig('losses.png', dpi=200)Test modelprint '\nTesting Network...' test_loss = 0.0 test_batches = 0.0 mse = 0.0 for indices in iterate_minibatches(num_samples=Y_test.shape[0], batch_size=batch_size, shuffle=False): y_= Y_test[indices,:] x_description = description_bow_test[indices,:] x_variety = variety_test[indices,:] x_embedding = test_embedding[indices,:] feed_dict = {net['x_description']: x_description, net['x_variety']: x_variety, net['x_embedding']: x_embedding, net['y_']: y_, net['keep_prob']: 1.0} pred, loss = sess.run([net['scores'], net['loss']], feed_dict=feed_dict) test_loss += loss mse += mean_squared_error(np.argmax(y_, axis=1), np.argmax(pred, axis=1)) test_batches += 1.0 test_loss = test_loss / test_batches mse = mse / test_batches print '\nLoss for the test set: ', test_loss print 'MSE for the test set: ', mse # Predict for num_samples wines from the test set idx = shuffle_indices(Y_test.shape[0]) num_samples = 5 idx = idx[0:num_samples] y_ = Y_test[idx,:] x_description = description_bow_test[idx,:] x_variety = variety_test[idx,:] x_embedding = test_embedding[idx,:] feed_dict = {net['x_description']: x_description, net['x_variety']: x_variety, net['x_embedding']: x_embedding, net['y_']: y_, net['keep_prob']: 1.0} loss, prediction = sess.run([net['loss'], net['scores']], feed_dict=feed_dict) print '\nLoss for the first 5 test samples: ', loss for i in range(num_samples): print description_test[i] print 'Actual: ', np.argmax(y_[i,:]), ' Predicted: ', np.argmax(prediction[i,:]), '\n'Testing Network... Loss for the test set: 2.157536515625574 MSE for the test set: 6.768607190860215 Loss for the first 5 test samples: 2.056796 An everyday Zin, soft and candied, with blackberry, cherry, licorice and cocoa flavors. The tannin structure is rich. Drink now. Actual: 91 Predicted: 92 Forward and immediately delicious, this has a mix of peach, pear, apple and the more expected red fruits; it's a wild panoply of fruit flavor, with balancing acids and tannins. Round, forward, full bodied and textural, it is a uniquely flavorful wine with distinctive fruit-laden profile. Actual: 92 Predicted: 92 Zelma Long's tradition of excellence continues with the Series M: Its deep chocolate, cinnamon and berry aromas and full, complex waves of delicious fruit and spice are exactly what a Bordeaux blend should be. Restrained but memorable, the wine has firm structure and will age beautifully. Actual: 83 Predicted: 85 Lots of astringent tannins in this dry, young blend [...]Indexing a numpy array with a boolean areaimport numpy as np arr = np.array([[1,2,3], [4,np.nan,np.nan], [np.nan,np.nan,5]]) arr np.isnan(arr) ~np.isnan(arr) arr[~np.isnan(arr)]Map Reduce Algorithm Map reduce is very functional algorithm where three parts of it can easily executed on different machines. In this assignment, we will try to implement this algorithm into a >100 million rows dataset.For comparison, we will have two ways of getting counts of carriers.1. Serial way - Looping through each record and counting each airline's flight2. Map reduce way - map, reduce and sort, and collect way to counn the flightsimport os import glob import pandas as pd import utils import data_handlerData Preperationdata_handler.download_dataset()data downloaded. you can skip this step or delete data folder to download again.Variables# all files under data folder file_list = sorted(glob.glob(os.path.join('data', '*.csv.bz2'))) carrier_counts = {}Serial WayHere, we are getting a list of files under the data folder. The serial way requires to loop through all of the records and update one by one. If you think your computer is fast enough, you can also try `read_as_dataframe` in data_handler module. You can simply use the following code rather than looping through each file.```py inital value dictionarycarrier_counts = {} read all datasets into onedf = data_handler.read_as_dataframe() unique careerscarriers = df.UniqueCarrier.unique() get the countsfor carrier in df.UniqueCarrier: carrier_counts[carrier] += 1```Since my computer wasn't able to finish it, I prefered the loop. Hopefully, you won't run into these issues thanks to the Map Reduce Algorithm!for ind, data_file in enumerate(file_list): # read current data df = pd.read_csv(data_file, encoding='ISO-8859-1', memory_map=True, low_memory=False) # unique airlines in dataset carriers = df.UniqueCarrier.unique() # update the global carrier_count for key in carriers: if key not in carrier_counts: carrier_counts.update({key: 0}) # loop through each row in dataframe for carrier in df.UniqueCarrier: carrier_counts[carrier] += 1 # info prefix ='Shape: {} ; {} Mb'.format( df.shape, round(df.memory_usage().sum() / 1e+6,2)) # track the progress utils.progressbar(len(file_list), ind + 1, prefix=prefix) # results carrier_countsMap Reduce Way# todo: your code hereConclusion# todo: your conclusion hereTraining the Modeldf.head() #separating the features X =df.drop('test_result', axis = 1) X.head() y = df['test_result'] from sklearn.model_selection import train_test_split df.shape X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.1, random_state=101) from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaled_X_train = scaler.fit_transform(X_train) scaled_X_test = scaler.fit_transform(X_test) from sklearn.linear_model import LogisticRegression log_model = LogisticRegression() log_model.fit(scaled_X_train, y_train) log_model.coef_ y_pred = log_model.predict(scaled_X_test) y_pred y_pred[0] df.head() #y_pred = log_model.predict_proba(scaled_X_test) #y_predClassification Metricsfrom sklearn.metrics import accuracy_score, confusion_matrix, classification_report accuracy_score(y_test, y_pred) confusion_matrix(y_test, y_pred) from sklearn.metrics import plot_confusion_matrix plot_confusion_matrix(log_model, scaled_X_test, y_test) print(classification_report(y_test, y_pred)) from sklearn.metrics import precision_score, recall_score precision_score(y_pred, y_test) recall_score(y_pred, y_test) from sklearn.metrics import plot_roc_curve, plot_precision_recall_curve plot_roc_curve(log_model, scaled_X_test, y_test) plot_precision_recall_curve(log_model, scaled_X_test, y_test)C:\Users\saura\anaconda3\envs\mltest1\lib\site-packages\sklearn\utils\deprecation.py:87: FutureWarning: Function plot_precision_recall_curve is deprecated; Function `plot_precision_recall_curve` is deprecated in 1.0 and will be removed in 1.2. Use one of the class methods: PrecisionRecallDisplay.from_predictions or PrecisionRecallDisplay.from_estimator. warnings.warn(msg, category=FutureWarning)(Px���@h���0X���� Hp���8`���(Px���@h���0X���� Hp��� 8 ` � � � ( P x � � �  @ h � � �  0 X � � � � H p � � � 8`���(Px���@h���0X���� Hp���8`���(Px���@h���0X���� Hp���8`���(Px���@h���0X���� Hp���8`���(Px���@h��� 0 X � � � � !H!p!�!�!�!"8"`"�"�"�"#(#P#x#�#�#�#$@$h$�$�$�$%0%X%�%�%�%�% &H&p&�&�&�&'8'`'�'�'�'(((P(x(�(�(�()@)h)�)�)�)*0*X*�*�*�*�* +H+p+�+�+�+,8,`,�,�,�,-(-P-x-�-�-�-.@.h.�.�.�./0/X/�/�/�/�/ 0H0p0�0�0�0181`1�1�1�12(2P2x2�2�2�23@3h3�3�3�3404X4�4�4�4�4 5H5p5�5�5�5686`6�6�6�67(7P7x7�7�7�78@8h8�8�8�8909X9�9�9�9�9 :H:p:�:�:�:;8;`;�;�;�;<(<P<x<�<�<�<=@=h=�=�=�=>0>X>�>�>�>�> ?H?p?�?�?�?@8@`@�@�@�@A(APAxA�A�A�AB@BhB�B�B�BC0CXC�C�C�C�C DHDpD�D�D�DE8E`E�E�E�EF(FPFxF�F�F�FG@GhG�G�G�GH0HXH�H�H�H�H IHIpI�I�I�IJ8J`J�J�J�JK(KPKxK�K�K�KL@LhL�L�L�LM0MXM�M�M�M�M NHNpN�N�N�NO8O`O�O�O�OP(PPPxP�P�P�PQ@QhQ�Q�Q�QR0RXR�R�R�R�R SHSpS�S�S�ST8T`T�T�T�TU(UPUxU�U�U�UV@VhV�V�V�VW0WXW�W�W�W�W XHXpX�X�X�XY8Y`Y�Y�Y�YZStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedupStarcoderdataJupyterStructuredCleanDedup����